diff --git a/.gitattributes b/.gitattributes index cb58051f720e38288dbea9e39ad1aefb39114cc8..7a66e31b67cc7309ad541ffaaa55d1c0501070b8 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2607,3 +2607,58 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 2025/pFedMxF_[[:space:]]Personalized[[:space:]]Federated[[:space:]]Class-Incremental[[:space:]]Learning[[:space:]]with[[:space:]]Mixture[[:space:]]of[[:space:]]Frequency[[:space:]]Aggregation/af5803f4-69bd-43df-bcea-1df37a7dcf4e_origin.pdf filter=lfs diff=lfs merge=lfs -text 2025/v-CLR_[[:space:]]View-Consistent[[:space:]]Learning[[:space:]]for[[:space:]]Open-World[[:space:]]Instance[[:space:]]Segmentation/ef2f822f-1ae9-4363-af6b-d621ee5b7ef6_origin.pdf filter=lfs diff=lfs merge=lfs -text 2025/vesselFM_[[:space:]]A[[:space:]]Foundation[[:space:]]Model[[:space:]]for[[:space:]]Universal[[:space:]]3D[[:space:]]Blood[[:space:]]Vessel[[:space:]]Segmentation/ae7b8187-15d3-4443-9213-c4542fccaa23_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/1-Lipschitz[[:space:]]Layers[[:space:]]Compared_[[:space:]]Memory[[:space:]]Speed[[:space:]]and[[:space:]]Certifiable[[:space:]]Robustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/2S-UDF_[[:space:]]A[[:space:]]Novel[[:space:]]Two-stage[[:space:]]UDF[[:space:]]Learning[[:space:]]Method[[:space:]]for[[:space:]]Robust[[:space:]]Non-watertight[[:space:]]Model[[:space:]]Reconstruction[[:space:]]from[[:space:]]Multi-view[[:space:]]Images/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/360+x_[[:space:]]A[[:space:]]Panoptic[[:space:]]Multi-modal[[:space:]]Scene[[:space:]]Understanding[[:space:]]Dataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/360DVD_[[:space:]]Controllable[[:space:]]Panorama[[:space:]]Video[[:space:]]Generation[[:space:]]with[[:space:]]360-Degree[[:space:]]Video[[:space:]]Diffusion[[:space:]]Model/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/360Loc_[[:space:]]A[[:space:]]Dataset[[:space:]]and[[:space:]]Benchmark[[:space:]]for[[:space:]]Omnidirectional[[:space:]]Visual[[:space:]]Localization[[:space:]]with[[:space:]]Cross-device[[:space:]]Queries/15eb225d-3032-419c-84b0-35d6ec576cbc_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Building[[:space:]]Reconstruction[[:space:]]from[[:space:]]Monocular[[:space:]]Remote[[:space:]]Sensing[[:space:]]Images[[:space:]]with[[:space:]]Multi-level[[:space:]]Supervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Face[[:space:]]Reconstruction[[:space:]]with[[:space:]]the[[:space:]]Geometric[[:space:]]Guidance[[:space:]]of[[:space:]]Facial[[:space:]]Part[[:space:]]Segmentation/29911afb-57cf-4105-bc3b-b432a117add8_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Face[[:space:]]Tracking[[:space:]]from[[:space:]]2D[[:space:]]Video[[:space:]]through[[:space:]]Iterative[[:space:]]Dense[[:space:]]UV[[:space:]]to[[:space:]]Image[[:space:]]Flow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Facial[[:space:]]Expressions[[:space:]]through[[:space:]]Analysis-by-Neural-Synthesis/444ecb6f-5ab4-45bb-9d08-b2b359c08da3_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Feature[[:space:]]Tracking[[:space:]]via[[:space:]]Event[[:space:]]Camera/a6809d22-03e7-4639-b845-8393b79ecc8d_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Geometry-Aware[[:space:]]Deformable[[:space:]]Gaussian[[:space:]]Splatting[[:space:]]for[[:space:]]Dynamic[[:space:]]View[[:space:]]Synthesis/f455d128-070e-4b9d-a550-262379c7f3f3_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Human[[:space:]]Pose[[:space:]]Perception[[:space:]]from[[:space:]]Egocentric[[:space:]]Stereo[[:space:]]Videos/95770e99-65fc-4fd7-9de3-96977a97b4b8_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]LiDAR[[:space:]]Mapping[[:space:]]in[[:space:]]Dynamic[[:space:]]Environments[[:space:]]using[[:space:]]a[[:space:]]4D[[:space:]]Implicit[[:space:]]Neural[[:space:]]Representation/024f2dc1-2c03-4b1e-a716-0e0aea35b1de_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Multi-frame[[:space:]]Fusion[[:space:]]for[[:space:]]Video[[:space:]]Stabilization/475676e5-4dd7-4a8c-bd05-cc44ef21267a_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Neural[[:space:]]Edge[[:space:]]Reconstruction/12034c9b-4470-4339-9189-38596581605f_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Paintbrush_[[:space:]]Local[[:space:]]Stylization[[:space:]]of[[:space:]]3D[[:space:]]Shapes[[:space:]]with[[:space:]]Cascaded[[:space:]]Score[[:space:]]Distillation/669e6bfe-eb9e-4f5b-a53c-23335eda80fe_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D-Aware[[:space:]]Face[[:space:]]Editing[[:space:]]via[[:space:]]Warping-Guided[[:space:]]Latent[[:space:]]Direction[[:space:]]Learning/d47f630a-17d8-4298-a368-699d1959d603_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D-LFM_[[:space:]]Lifting[[:space:]]Foundation[[:space:]]Model/5d227142-e6b0-440e-bad4-facab1940a16_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D-SceneDreamer_[[:space:]]Text-Driven[[:space:]]3D-Consistent[[:space:]]Scene[[:space:]]Generation/e514998b-c539-47e4-bf66-6c5fccc605eb_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3DFIRES_[[:space:]]Few[[:space:]]Image[[:space:]]3D[[:space:]]REconstruction[[:space:]]for[[:space:]]Scenes[[:space:]]with[[:space:]]Hidden[[:space:]]Surfaces/9acb5370-2e99-4481-9b63-bbd93724edf4_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3DGS-Avatar_[[:space:]]Animatable[[:space:]]Avatars[[:space:]]via[[:space:]]Deformable[[:space:]]3D[[:space:]]Gaussian[[:space:]]Splatting/0f8abe9e-31c6-4dc4-9520-66dabe1eb0cf_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3DGStream_[[:space:]]On-the-Fly[[:space:]]Training[[:space:]]of[[:space:]]3D[[:space:]]Gaussians[[:space:]]for[[:space:]]Efficient[[:space:]]Streaming[[:space:]]of[[:space:]]Photo-Realistic[[:space:]]Free-Viewpoint[[:space:]]Videos/94b406dc-a25e-4b49-8259-ce68b53e5886_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3DInAction_[[:space:]]Understanding[[:space:]]Human[[:space:]]Actions[[:space:]]in[[:space:]]3D[[:space:]]Point[[:space:]]Clouds/5d416e0e-fbb0-491c-8e72-ffffff1eb68b_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3DSFLabelling_[[:space:]]Boosting[[:space:]]3D[[:space:]]Scene[[:space:]]Flow[[:space:]]Estimation[[:space:]]by[[:space:]]Pseudo[[:space:]]Auto-labelling/128911ab-0f2f-4697-8895-080e5b45c36b_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3DToonify_[[:space:]]Creating[[:space:]]Your[[:space:]]High-Fidelity[[:space:]]3D[[:space:]]Stylized[[:space:]]Avatar[[:space:]]Easily[[:space:]]from[[:space:]]2D[[:space:]]Portrait[[:space:]]Images/2c81e75a-1abf-4d0b-aa9d-80d61a8cb264_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3DiffTection_[[:space:]]3D[[:space:]]Object[[:space:]]Detection[[:space:]]with[[:space:]]Geometry-Aware[[:space:]]Diffusion[[:space:]]Features/0c8075a5-3d90-4e50-a4ef-fbf63dd9f1bc_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/4D[[:space:]]Gaussian[[:space:]]Splatting[[:space:]]for[[:space:]]Real-Time[[:space:]]Dynamic[[:space:]]Scene[[:space:]]Rendering/c4bfa810-f46e-49e4-9e19-ae1a9e3dcad6_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/4D-DRESS_[[:space:]]A[[:space:]]4D[[:space:]]Dataset[[:space:]]of[[:space:]]Real-World[[:space:]]Human[[:space:]]Clothing[[:space:]]With[[:space:]]Semantic[[:space:]]Annotations/cd2548ca-a539-45a3-adaf-36a364d6da68_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/4D-fy_[[:space:]]Text-to-4D[[:space:]]Generation[[:space:]]Using[[:space:]]Hybrid[[:space:]]Score[[:space:]]Distillation[[:space:]]Sampling/7295c7f1-f21d-431e-aa65-0a0fb95fe12c_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/4K4D_[[:space:]]Real-Time[[:space:]]4D[[:space:]]View[[:space:]]Synthesis[[:space:]]at[[:space:]]4K[[:space:]]Resolution/6d74ca33-515d-4b03-96e1-8cccfa68be60_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/6D-Diff_[[:space:]]A[[:space:]]Keypoint[[:space:]]Diffusion[[:space:]]Framework[[:space:]]for[[:space:]]6D[[:space:]]Object[[:space:]]Pose[[:space:]]Estimation/1d3927f2-2533-4713-91b0-b3f9e13c8aed_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Backpack[[:space:]]Full[[:space:]]of[[:space:]]Skills_[[:space:]]Egocentric[[:space:]]Video[[:space:]]Understanding[[:space:]]with[[:space:]]Diverse[[:space:]]Task[[:space:]]Perspectives/b19cc746-13a8-4ac2-b79c-a1691351681c_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Bayesian[[:space:]]Approach[[:space:]]to[[:space:]]OOD[[:space:]]Robustness[[:space:]]in[[:space:]]Image[[:space:]]Classification/d0e61d97-e025-4ae4-a494-3d44cf79404b_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Call[[:space:]]to[[:space:]]Reflect[[:space:]]on[[:space:]]Evaluation[[:space:]]Practices[[:space:]]for[[:space:]]Age[[:space:]]Estimation_[[:space:]]Comparative[[:space:]]Analysis[[:space:]]of[[:space:]]the[[:space:]]State-of-the-Art[[:space:]]and[[:space:]]a[[:space:]]Unified[[:space:]]Benchmark/393f0825-ecb8-44e6-bdfa-5dde4b82ecdb_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Category[[:space:]]Agnostic[[:space:]]Model[[:space:]]for[[:space:]]Visual[[:space:]]Rearrangment/4b328694-69ab-47a2-83d3-ce2efe00b0f0_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Closer[[:space:]]Look[[:space:]]at[[:space:]]the[[:space:]]Few-Shot[[:space:]]Adaptation[[:space:]]of[[:space:]]Large[[:space:]]Vision-Language[[:space:]]Models/69aa9b91-03a6-4c14-a53b-96602951c67b_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Conditional[[:space:]]Denoising[[:space:]]Diffusion[[:space:]]Probabilistic[[:space:]]Model[[:space:]]for[[:space:]]Point[[:space:]]Cloud[[:space:]]Upsampling/80eefa2b-3d90-4d98-ab03-f2521d12efac_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Dual-Augmentor[[:space:]]Framework[[:space:]]for[[:space:]]Domain[[:space:]]Generalization[[:space:]]in[[:space:]]3D[[:space:]]Human[[:space:]]Pose[[:space:]]Estimation/2e309a58-2e8d-4563-8890-368854bbd34f_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Dynamic[[:space:]]Kernel[[:space:]]Prior[[:space:]]Model[[:space:]]for[[:space:]]Unsupervised[[:space:]]Blind[[:space:]]Image[[:space:]]Super-Resolution/eb377d27-ee40-49e4-9796-048cc8e1c35d_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]General[[:space:]]and[[:space:]]Efficient[[:space:]]Training[[:space:]]for[[:space:]]Transformer[[:space:]]via[[:space:]]Token[[:space:]]Expansion/6b319bca-e10d-4650-be77-29bcc4ffd8dd_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Generative[[:space:]]Approach[[:space:]]for[[:space:]]Wikipedia-Scale[[:space:]]Visual[[:space:]]Entity[[:space:]]Recognition/25668f69-d21b-4819-83f7-a45db4e2f055_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Noisy[[:space:]]Elephant[[:space:]]in[[:space:]]the[[:space:]]Room_[[:space:]]Is[[:space:]]Your[[:space:]]Out-of-Distribution[[:space:]]Detector[[:space:]]Robust[[:space:]]to[[:space:]]Label[[:space:]]Noise_/8eab7491-89d9-4dd1-8ee1-40b9bd851b01_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Pedestrian[[:space:]]is[[:space:]]Worth[[:space:]]One[[:space:]]Prompt_[[:space:]]Towards[[:space:]]Language[[:space:]]Guidance[[:space:]]Person[[:space:]]Re-Identification/85746221-1e2b-4579-be8b-1626ff544e58_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Physics-informed[[:space:]]Low-rank[[:space:]]Deep[[:space:]]Neural[[:space:]]Network[[:space:]]for[[:space:]]Blind[[:space:]]and[[:space:]]Universal[[:space:]]Lens[[:space:]]Aberration[[:space:]]Correction/aeee8578-b512-4a62-8ec3-b06e011ce338_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Picture[[:space:]]is[[:space:]]Worth[[:space:]]More[[:space:]]Than[[:space:]]77[[:space:]]Text[[:space:]]Tokens_[[:space:]]Evaluating[[:space:]]CLIP-Style[[:space:]]Models[[:space:]]on[[:space:]]Dense[[:space:]]Captions/f5f3c319-3887-4d17-8f43-fce0198c0c77_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Recipe[[:space:]]for[[:space:]]Scaling[[:space:]]up[[:space:]]Text-to-Video[[:space:]]Generation[[:space:]]with[[:space:]]Text-free[[:space:]]Videos/873ecf7b-8814-4ac3-a70f-20982249ac1d_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Semi-supervised[[:space:]]Nighttime[[:space:]]Dehazing[[:space:]]Baseline[[:space:]]with[[:space:]]Spatial-Frequency[[:space:]]Aware[[:space:]]and[[:space:]]Realistic[[:space:]]Brightness[[:space:]]Constraint/4a280801-3209-4899-b345-f6dbc9c9ec52_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Simple[[:space:]]Baseline[[:space:]]for[[:space:]]Efficient[[:space:]]Hand[[:space:]]Mesh[[:space:]]Reconstruction/edb8cbac-0e71-45cd-8cdb-45284f946ab7_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Simple[[:space:]]Recipe[[:space:]]for[[:space:]]Contrastively[[:space:]]Pre-training[[:space:]]Video-First[[:space:]]Encoders[[:space:]]Beyond[[:space:]]16[[:space:]]Frames/e81a3abe-11ba-459a-b183-aa765dce41a0_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Simple[[:space:]]Recipe[[:space:]]for[[:space:]]Language-guided[[:space:]]Domain[[:space:]]Generalized[[:space:]]Segmentation/b9db7707-a86d-4d4c-b962-58bd8f08eecd_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Simple[[:space:]]and[[:space:]]Effective[[:space:]]Point-based[[:space:]]Network[[:space:]]for[[:space:]]Event[[:space:]]Camera[[:space:]]6-DOFs[[:space:]]Pose[[:space:]]Relocalization/1bfdaa4b-618c-45a6-9de6-a1e062fefbcd_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Stealthy[[:space:]]Wrongdoer_[[:space:]]Feature-Oriented[[:space:]]Reconstruction[[:space:]]Attack[[:space:]]against[[:space:]]Split[[:space:]]Learning/de2cb66c-4154-4b26-aa15-91e83e19d783_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Study[[:space:]]of[[:space:]]Dropout-Induced[[:space:]]Modality[[:space:]]Bias[[:space:]]on[[:space:]]Robustness[[:space:]]to[[:space:]]Missing[[:space:]]Video[[:space:]]Frames[[:space:]]for[[:space:]]Audio-Visual[[:space:]]Speech[[:space:]]Recognition/6897fec6-4bb7-4167-a28e-16a34134af6a_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Subspace-Constrained[[:space:]]Tyler's[[:space:]]Estimator[[:space:]]and[[:space:]]its[[:space:]]Applications[[:space:]]to[[:space:]]Structure[[:space:]]from[[:space:]]Motion/02a10508-95ff-4550-b14e-3121c8c91065_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Theory[[:space:]]of[[:space:]]Joint[[:space:]]Light[[:space:]]and[[:space:]]Heat[[:space:]]Transport[[:space:]]for[[:space:]]Lambertian[[:space:]]Scenes/c36c78d9-fdc0-45ae-af17-5820282f52ff_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_content_list.json b/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..c5f7abbe47fc6a8c078555eb94dfeb885de4e36a --- /dev/null +++ b/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_content_list.json @@ -0,0 +1,1572 @@ +[ + { + "type": "text", + "text": "1-Lipschitz Layers Compared: Memory, Speed, and Certifiable Robustness", + "text_level": 1, + "bbox": [ + 107, + 130, + 864, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Bernd Prach, $^{1,*}$ Fabio Brau, $^{2,*}$ Giorgio Buttazzo, $^{2}$ Christoph H. Lampert $^{1}$ $^{1}$ ISTA, Klosterneuburg, Austria \n $^{2}$ Scuola Superiore Sant'Anna, Pisa, Italy", + "bbox": [ + 194, + 179, + 772, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{bprach, chl}@ist.ac.at, {fabio.brau, giorgio.butazzo}@santannapisa.it", + "bbox": [ + 163, + 236, + 789, + 252 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 286, + 313, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The robustness of neural networks against input perturbations with bounded magnitude represents a serious concern in the deployment of deep learning models in safety-critical systems. Recently, the scientific community has focused on enhancing certifiable robustness guarantees by crafting 1-Lipschitz neural networks that leverage Lipschitz bounded dense and convolutional layers. Different methods have been proposed in the literature to achieve this goal, however, comparing the performance of such methods is not straightforward, since different metrics can be relevant (e.g., training time, memory usage, accuracy, certifiable robustness) for different applications. Therefore, this work provides a thorough comparison between different methods, covering theoretical aspects such as computational complexity and memory requirements, as well as empirical measurements of time per epoch, required memory, accuracy and certifiable robust accuracy. The paper also provides some guidelines and recommendations to support the user in selecting the methods that work best depending on the available resources. We provide code at github.com/berndprach/lLipschitzLayersCompared.", + "bbox": [ + 75, + 316, + 473, + 636 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 679, + 209, + 695 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Modern artificial neural networks achieve high accuracy and sometimes superhuman performance in many different tasks, but it is widely recognized that they are not robust to tiny and imperceptible input perturbations [4, 39] that, if properly crafted, can cause a model to produce the wrong output. Such inputs, known as Adversarial Examples, represent a serious concern for the deployment of machine learning models in safety-critical systems [26]. To overcome this issue, adversarial training has been proposed in", + "bbox": [ + 75, + 705, + 468, + 843 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/69a05b6ec4757e9c0f0d25d880c87173ad5c33a6c11a21abaab965be18a82987.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 286, + 684, + 401 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0380e230b8fd8a86f4be4561be7161ea42f27cb9c0adba33a8e941b2ad86d467.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 709, + 286, + 880, + 401 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8d5e5a868592dbfae08c67f66da1cfd8fca757ca127a7cbf23475339ed25ddca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 407, + 684, + 522 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/911b46d71b8d81a9ae74632e144c531a6ae5102ddf35953e6ce0f6141de61705.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 709, + 407, + 880, + 522 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ca9b87b573f226fc3fcf6fe66c7dfe4f183453a8207d4eee723ae1d31af61d04.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 529, + 683, + 643 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7a405ab227722501cfae03f134934faea1e665de1516715e62f2d0d11d91bd20.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 710, + 530, + 880, + 645 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9cf0b86ff8d8734c58196e37a75ef5a3075410825a59d662e572fc060a967e74.jpg", + "image_caption": [ + "Figure 1. Evaluation of 1-Lipschitz methods on different metrics. Scores are assigned from 1 (worst) to 5 (best) to every method based on the results reported in Sections 3 and 5." + ], + "image_footnote": [], + "bbox": [ + 511, + 652, + 683, + 767 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/136ab83e8fb6e86acee4ed2e9092d692d1eb036fdbf10c1af14e99ff7134787f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Legend
RARobust Accuracy
AAccuracy
TTTraining Time
ITInference Time
TMTrain Memory
IMInference Memory
", + "bbox": [ + 699, + 652, + 883, + 757 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Joined first authors.", + "bbox": [ + 75, + 851, + 215, + 862 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "This work was partially supported by project SERICS (PE00000014) under the MUR National Recovery and Resilience Plan funded by the European Union - NextGenerationEU.", + "bbox": [ + 75, + 862, + 468, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "[14, 30, 39]. It uses adversarial examples during the training to correct the model prediction. This strategy does improves the empirical robustness of the model, however, it does not", + "bbox": [ + 498, + 854, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "24574", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "provide any guarantees of robustness.", + "bbox": [ + 76, + 90, + 326, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, for many applications a guarantee of robustness is desired. Roughly speaking, a model $f$ is said to be $\\varepsilon$ -robust for a given input $x$ if no perturbation of magnitude bounded by $\\varepsilon$ can change its prediction. Recently, in the context of image classification, various approaches have been proposed to achieve certifiable robustness, including Verification, Randomized Smoothing, and Lipschitz Bounded Neural Networks.", + "bbox": [ + 75, + 106, + 467, + 226 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Verification strategies aim to establish, for any given model, whether all samples contained in a $l_{2}$ -ball with radius $\\varepsilon$ and centered in the tested input $x$ are classified with the same class as $x$ . In the exact formulation, verification strategies involve the solution of an NP-hard problem [20]. Nevertheless, even in a relaxed formulation, [44], these strategies require a huge computational effort [43].", + "bbox": [ + 75, + 237, + 467, + 342 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Randomized smoothing strategies, initially presented in [10], represent an effective way of crafting a certifiable-robust classifier $g$ based on a base classifier $f$ . If combined with an additional denoising step, they can achieve state-of-the-art levels of robustness, [7]. However, since they require multiple evaluations of the base model (up to 100k evaluations) for the classification of a single input, they cannot be used for real-time applications.", + "bbox": [ + 75, + 352, + 467, + 472 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Finally, Lipschitz Bounded Neural Networks [6, 9, 24, 27, 29, 34, 40] represent a valid alternative to produce certifiable classifiers, since they only require a single forward pass of the model at inference time to deduce guarantees of robustness. Indeed, for such models, a lower-bound of the minimal adversarial perturbation capable of fooling the classifier can be evaluated by considering the difference between the two largest class scores predicted by the model.", + "bbox": [ + 75, + 482, + 467, + 603 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Lipschitz-bounded neural networks can be obtained by the composition of 1-Lipschitz layers [2]. The process of parameterizing 1-Lipschitz layers is fairly straightforward for fully connected layers. However, for convolutions — with overlapping kernels — deducing an effective parameterization is a hard problem. Indeed, the Lipschitz condition can be essentially thought of as a condition on the Jacobian of the layer. However, the Jacobian matrix can not be efficiently computed.", + "bbox": [ + 75, + 604, + 467, + 739 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In order to avoid the explicit computation of the Jacobian, various methods have been proposed, including parameterizations that cause the Jacobian to be (very close to) orthogonal [27, 36, 40, 46] and methods that rely on an upper bound on the Jacobian instead [34]. Those different methods differ drastically in training and validation requirements (in particular time and memory) as well as empirical performance. Furthermore, increasing training time or model sizes very often also increases the empirical performance. This makes it hard to judge from the existing", + "bbox": [ + 75, + 750, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "literature which methods are the most promising. This becomes even worse when working with specific computation requirements, such as restrictions on the available memory. In this case, it is important to choose the method that better suits the characteristics of the system in terms of evaluation time, memory usage as well and certifiable-robust-accuracy.", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This work aims at giving a comprehensive comparison of different strategies for crafting 1-Lipschitz layers from both a theoretical and practical perspective. For the sake of fairness, we consider several metrics such as Time and Memory requirements for both training and inference, Accuracy, as well as Certified Robust Accuracy. The main contributions are the following:", + "bbox": [ + 496, + 204, + 890, + 309 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- An empirical comparison of 1-Lipschitz layers based on six different metrics, and four different datasets on four architecture sizes with three time constraints.", + "- A theoretical comparison of the runtime complexity and the memory usage of existing methods.", + "- A review of the most recent methods in the literature, including implementations with a revised code that we will release publicly for other researchers to build on." + ], + "bbox": [ + 500, + 325, + 890, + 444 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Existing Works and Background", + "text_level": 1, + "bbox": [ + 500, + 458, + 795, + 474 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In recent years, various methods have been proposed for creating artificial neural networks with a bounded Lipschitz constant. The Lipschitz constant of a function $f: \\mathbb{R}^n \\to \\mathbb{R}^m$ with respect to the $l_2$ norm is the smallest $L$ such that for all $x, y \\in \\mathbb{R}^n$", + "bbox": [ + 496, + 484, + 890, + 559 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| f (x) - f (y) \\right\\| _ {2} \\leq L \\| x - y \\| _ {2}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 570, + 890, + 587 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We also extend this definition to networks and layers, by considering the $l_{2}$ norms of the flattened input and output tensors in Equation (1). A layer is called 1-Lipschitz if its Lipschitz constant is at most 1. For linear layers, the Lipschitz constant is equal to the spectral norm of the weight matrix that is given as", + "bbox": [ + 496, + 598, + 890, + 688 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\| M \\| _ {2} = \\sup _ {\\mathbf {v} \\neq 0} \\frac {\\| M \\mathbf {v} \\| _ {2}}{\\| \\mathbf {v} \\| _ {2}}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 619, + 698, + 890, + 732 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A particular class of linear 1-Lipschitz layers are ones with an orthogonal Jacobian matrix. The Jacobian matrix of a layer is the matrix of partial derivatives of the flattened outputs with respect to the flattened inputs. A matrix $M$ is orthogonal if $MM^{\\top} = I$ , where $I$ is the identity matrix. For layers with an orthogonal Jacobian, Equation (1) always holds with equality and, because of this, a lot of methods aim at constructing such 1-Lipschitz layers.", + "bbox": [ + 496, + 742, + 890, + 863 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "All the neural networks analyzed in this paper consist of 1-Lipschitz parameterized layers and 1-Lipschitz activation", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "24575", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "functions, with no skip connections and no batch normalization. Even though the commonly used ReLU activation function is 1-Lipschitz, Anil et al. [2] showed that it reduces the expressive capability of the model. Hence, we adopt the MaxMin activation proposed by the authors and commonly used in 1-Lipschitz models. Concatenations of 1-Lipschitz functions are 1-Lipschitz, so the networks analyzed are 1-Lipschitz by construction.", + "bbox": [ + 75, + 90, + 472, + 212 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1. Parameterized 1-Lipschitz Layers", + "text_level": 1, + "bbox": [ + 76, + 223, + 372, + 239 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This section provides an overview of the existing methods for providing 1-Lipschitz layers. We discuss fundamental methods for estimating the spectral norms of linear and convolutional layers, i.e. Power Method [32] and Fantistica4 [35], and for crafting orthogonal matrices, i.e. Bjorck & Bowie [5], in Appendix A. The rest of this section describes 7 methods from the literature that construct 1-Lipschitz convolutions: BCOP, Cayley, SOC, AOL, LOT, CPL, and SLL. Further 1-Lipschitz methods, [19, 42, 47], and the reasons why they were not included in our main comparison can be found in Appendix B.", + "bbox": [ + 75, + 247, + 472, + 414 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "BCOP Block Orthogonal Convolution Parameterization (BCOP) was introduced by Li et al. in [27] to extend a previous work by Xiao et al. [45] that focused on the importance of orthogonal initialization of the weights. For a $k \\times k$ convolution, BCOP uses a set of $(2k - 1)$ parameter matrices. Each of these matrices is orthogonalized using the algorithm by Bjorck & Bowie [5] (see also Appendix A). Then, a $k \\times k$ kernel is constructed from those matrices in a way that guarantees that the resulting layer is orthogonal.", + "bbox": [ + 75, + 435, + 472, + 571 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Cayley Another family of orthogonal convolutional and fully connected layers has been proposed by Trockman and Kolter [40] by leveraging the Cayley Transform [8], which maps a skew-symmetric matrix $A$ into an orthogonal matrix $Q$ using the relation", + "bbox": [ + 75, + 593, + 470, + 670 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nQ = (I - A) (I + A) ^ {- 1}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 681, + 468, + 699 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The transformation can be used to parameterize orthogonal weight matrices for linear layers in a straightforward way. For convolutions, the authors make use of the fact that circular padded convolutions are vector-matrix products in the Fourier domain. As long as all those vector-matrix products have orthogonal matrices, the full convolution will have an orthogonal Jacobian. For Cayley Convolutions, those matrices are orthogonalized using the Cayley transform.", + "bbox": [ + 75, + 712, + 468, + 834 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "SOC Skew Orthogonal Convolution is an orthogonal convolutional layer presented by Singla et al. [36], obtained by leveraging the exponential convolution [15]. Analogously", + "bbox": [ + 75, + 854, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "to the matrix case, given a kernel $L\\in \\mathbb{R}^{c\\times c\\times k\\times k}$ , the exponential convolution can be defined as", + "bbox": [ + 496, + 90, + 890, + 119 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\exp (L) (x) := x + \\frac {L \\star x}{1} + \\frac {L \\star^ {2} x}{2 !} + \\dots + \\frac {L \\star^ {k} x}{k !} + \\dots , \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 498, + 126, + 890, + 169 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\star^k$ denotes a convolution applied $k$ -times. The authors proved that any exponential convolution has an orthogonal Jacobian matrix as long as $L$ is skew-symmetric, providing a way of parameterizing 1-Lipschitz layers. In their work, the sum of the infinite series is approximated by computing only the first 5 terms during training and the first 12 terms during the inference, and $L$ is normalized to have unitary spectral norm following the method presented in [35] (see Appendix A).", + "bbox": [ + 496, + 169, + 890, + 306 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "AOL Prach and Lampert [34] introduced Almost Orthogonal Lipschitz (AOL) layers. For any matrix $P$ , they defined a diagonal rescaling matrix $D$ with", + "bbox": [ + 496, + 323, + 890, + 368 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nD _ {i i} = \\left(\\sum_ {j} \\left| P ^ {\\top} P \\right| _ {i j}\\right) ^ {- 1 / 2} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 376, + 890, + 414 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and proved that the spectral norm of $PD$ is bounded by 1. This result was used to show that the linear layer given by $l(x) = PDx + b$ (where $P$ is the learnable matrix and $D$ is given by Eq. (5)) is 1-Lipschitz. Furthermore, the authors extended the idea so that it can also be efficiently applied to convolutions. This is done by calculating the rescaling in Equation (5) with the Jacobian $J$ of a convolution instead of $P$ . In order to evaluate it efficiently the authors express the elements of $J^{\\top}J$ explicitly in terms of the kernel values.", + "bbox": [ + 496, + 421, + 890, + 556 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LOT The layer presented by Xu et al. [46] extends the idea of [19] to use the Inverse Square Root of a matrix in order to orthogonalize it. Indeed, for any matrix $V$ , the matrix $Q = V(V^T V)^{-\\frac{1}{2}}$ is orthogonal. Similarly to the Cayley method, for the layer-wise orthogonal training (LOT) the convolution is applied in the Fourier frequency domain. To find the inverse square root, the authors relay on an iterative Newton Method. In details, defining $Y_0 = V^T V$ , $Z_0 = I$ , and", + "bbox": [ + 496, + 575, + 890, + 712 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nY _ {i + 1} = \\frac {1}{2} Y _ {i} \\left(3 I - Z _ {i} Y _ {i}\\right), Z _ {i + 1} = \\frac {1}{2} \\left(3 I - Z _ {i} Y _ {i}\\right) Z _ {i}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 506, + 717, + 890, + 747 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "it can be shown that $Y_{i}$ converges to $(V^{T}V)^{-\\frac{1}{2}}$ . In their proposed layer, the authors apply 10 iterations of the method for both training and evaluation.", + "bbox": [ + 496, + 755, + 890, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "CPL Meunier et al. [31] proposed the Convex Potential Layer. Given a non-decreasing 1-Lipschitz function $\\sigma$ (usually ReLU), the layer is constructed as", + "bbox": [ + 496, + 819, + 890, + 864 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nl (x) = x - \\frac {2}{\\| W \\| _ {2} ^ {2}} W ^ {\\top} \\sigma (W x + b), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 573, + 871, + 890, + 905 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "24576", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "which is 1-Lipschitz by design. The spectral norm required to calculate $l(x)$ is approximated using the power method (see Appendix A).", + "bbox": [ + 76, + 90, + 468, + 137 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "SLL The SDP-based Lipschitz Layers (SLL) proposed by Araujo et al. [3] combine the CPL layer with the upper bound on the spectral norm from AOL. The layer can be written as", + "bbox": [ + 76, + 157, + 467, + 217 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nl (x) = x - 2 W ^ {\\top} Q ^ {- 2} D ^ {2} \\sigma (W x + b), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 142, + 229, + 467, + 248 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $Q$ is a learnable diagonal matrix with positive entries and $D$ is deduced by applying Equation (5) to $P = W^{\\top}Q^{-1}$ .", + "bbox": [ + 76, + 260, + 467, + 305 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Remark 1. Both CPL and SLL are non-linear by construction, so they can be used to construct a network without any further use of activation functions. However, carrying out some preliminary experiments, we empirically found that alternating CPL (and SLL) layers with MaxMin activation layers allows achieving a better performance.", + "bbox": [ + 76, + 316, + 467, + 407 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Theoretical Comparison", + "text_level": 1, + "bbox": [ + 76, + 434, + 303, + 450 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As illustrated in the last section, various ideas and methods have been proposed to parameterize 1-Lipschitz layers. This causes the different methods to have very different properties and requirements. This section aims at highlighting the properties of the different algorithms, focusing on the algorithmic complexity and the required memory.", + "bbox": [ + 76, + 459, + 467, + 551 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1 provides an overview of the computational complexity and memory requirements for the different layers considered in the previous section. For the sake of clarity, the analysis is performed by considering separately the transformations applied to the input of the layers and those applied to the weights to ensure the 1-Lipschitz constraint. Each of the two sides of the table contains three columns: i) Operations contains the most costly transformations applied to the input as well as to the parameters of different layers; ii) MACS reports the computational complexity expressed in multiply-accumulate operations (MACS) involved in the transformations (only leading terms are presented); iii) Memory reports the memory required by the transformation during the training phase.", + "bbox": [ + 76, + 559, + 467, + 771 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "At training time, both input and weight transformations are required, thus the training complexity of the forward pass can be computed as the sum of the two corresponding MACS columns of the table. Similarly, the training memory requirements can be computed as the sum of the two corresponding Memory columns of the table. For the considered operations, the cost of the backward pass during training has the same computational complexity as the forward pass, and", + "bbox": [ + 76, + 780, + 467, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "therefore increases the overall complexity by a constant factor. At inference time, all the parameter transformations can be computed just once and cached afterward. Therefore, the inference complexity is equal to the complexity due to the input transformation (column 3 in the table). At inference time, the intermediate variables are not stored in memory, hence, the memory requirements are much lower than during training. The values cannot directly be inferred from Table 1, we reported them separately in Appendix C.1.", + "bbox": [ + 496, + 90, + 890, + 227 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that all the terms reported in Table 1 depend on the batch size $b$ , the input size $s \\times s \\times c$ , the number of inner iterations of a method $t$ , and the kernel size $k \\times k$ . (Often, $t$ is different at training and inference time.) For the sake of clarity, the MACS of a naive convolution implementation is denoted by $C$ ( $C = bs^2c^2k^2$ ), the number of inputs of a layer is denoted by $M$ ( $M = bs^2c$ ), and the size of the kernel of a standard convolution is denoted by $P$ ( $P = c^2k^2$ ). Only the leading terms of the computations are reported in Table 1. In order to simplify some terms, we assume that $c > \\log_2(s)$ and that rescaling a tensor (by a scalar) as well as adding two tensors does not require any memory in order to do backpropagation. We also assume that each additional activation does require extra memory. All these assumptions have been verified to hold within PyTorch, [33]. Also, when the algorithm described in the paper and the version provided in the supplied code differed, we considered the algorithm implemented in the code.", + "bbox": [ + 496, + 234, + 890, + 505 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The transformations reported in the table are convolutions (CONV), Fast Fourier Transformations (FFT), matrix-vector multiplications (MV), matrix-matrix multiplications (MM), matrix inversions (INV), as well as applications of an activation function (ACT). The application of algorithms such as BJORCK & Bowie (BnB), power method, and Fantastic 4 (F4) is also reported (see Appendix A for descriptions).", + "bbox": [ + 496, + 513, + 890, + 619 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Analysis of the computational complexity", + "text_level": 1, + "bbox": [ + 500, + 628, + 851, + 643 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "It is worth noting that the complexity of the input transformations (in Table 1) is similar for all methods. This implies that a similar scaling behaviour is expected at inference time for the models. Cayley and LOT apply an FFT-based convolution and have computational complexity independent of the kernel size. CPL and SLL require two convolutions, which make them slightly more expensive at inference time. Notably, SOC requires multiple convolutions, making this method more expensive at inference time.", + "bbox": [ + 496, + 651, + 890, + 787 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "At training time, parameter transformations need to be applied in addition to the input transformations during every forward pass. For SOC and CPL, the input transformations always dominate the parameter transformations in terms of computational complexity. This means the complexity scales like $c^2$ , just like a regular convolution, with a further factor of 2 and 5 respectively. All other methods", + "bbox": [ + 496, + 795, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "24577", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/afd9557e2704dcddf637eddd610b79ffc83740ab5a1cc54457a6b1e4c5f5f682.jpg", + "table_caption": [ + "Table 1. Computational complexity and memory requirements of different methods. We report multiply-accumulate operations (MACS) as well as memory requirements (per layer) for batch size $b$ , image size $s \\times s \\times c$ , kernel size $k \\times k$ and number of inner iterations $t$ . We use $C = bs^2c^2k^2$ , $M = bs^2c$ and $P = c^2k^2$ . For a detailed explanation on what is reported see Section 3. For some explanation on how the entries of this table were derived, see Appendix C." + ], + "table_footnote": [], + "table_body": "
MethodInput TransformationsParameter Transformations
OperationsMACS O(·)MemoryOperationsMACS O(·)Memory O(·)
StandardCONVCM--P
AOLCONVCMCONVc3k45P
BCOPCONVCMBnB & MMsc3kt + c3k3c2kt + c2k3
CayleyFFTs & MVsbs2c25/2MFFTs & INVss2c33/2s2c2
CPLCONVs & ACT2C3Mpower methods2c2k2P + s2c
LOTFFTs & MVsbs2c23MFFTs & MMs4s2c3t4s2c2t
SLLCONVs & ACT2C3MCONVsc3k45P
SOCCONVsCt1Mt1F4c2k2t2P
", + "bbox": [ + 132, + 156, + 839, + 327 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "require parameter transformations that scale like $c^3$ , making them more expensive for larger architectures. In particular, we do expect Cayley and LOT to require long training times for larger models, since the complexity of their parameter transformations further depends on the input size.", + "bbox": [ + 75, + 367, + 470, + 445 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2. Analysis of the training memory requirements", + "text_level": 1, + "bbox": [ + 76, + 452, + 468, + 470 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The memory requirements of the different layers are important, since they determine the maximum batch size and the type of models we can train on a particular infrastructure. At training time, typically all intermediate results are kept in memory to perform backpropagation. This includes intermediate results for both input and parameter transformations. The input transformations usually preserve the size, and therefore the memory required is usually of $\\mathcal{O}(M)$ . Therefore, for the input transformations, all methods require memory not more than a constant factor worse than standard convolutions, with the worst method being SOC, with a constant $t_1$ , typically equal to 5.", + "bbox": [ + 75, + 476, + 468, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In addition to the input transformation, we also need to store intermediate results of the parameter transformations in memory in order to evaluate the gradients. Again, most methods approximately preserve the sizes during the parameter transformations, and therefore the memory required is usually of order $\\mathcal{O}(P)$ . Exceptions to this rule are Cayley and LOT, with a larger $\\mathcal{O}(s^2 c^2)$ term, as well as BCOP.", + "bbox": [ + 75, + 665, + 468, + 772 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experimental Setup", + "text_level": 1, + "bbox": [ + 76, + 784, + 269, + 801 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This section presents an experimental study aimed at comparing the performance of the considered layers with respect to different metrics. Before presenting the results, we first summarize the setup used in our experiments. For a detailed description see Appendix E. To have a fair and meaningful comparison among the various models, all the", + "bbox": [ + 75, + 809, + 470, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "proposed layers have been evaluated using the same architecture, loss function, and optimizer. Since, according to the data reported in Table 1, different layers may have different throughput, to have a fair comparison with respect to the tested metrics, we limited the total training time instead of fixing the number of training epochs. Results are reported for training times of $2\\mathrm{h}$ , $10\\mathrm{h}$ , and $24\\mathrm{h}$ on one A100 GPU.", + "bbox": [ + 496, + 367, + 893, + 474 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our architecture is a standard convolutional network that doubles the number of channels whenever the resolution is reduced [6, 40]. For each method, we tested architectures of different sizes. We denoted them as XS, S, M and L, depending on the number of parameters, according to the criteria in Table 7, ranging from 1.5M to 100M parameters.", + "bbox": [ + 496, + 484, + 893, + 578 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Since different methods benefit from different learning rates and weight decay, for each setting (model size, method and dataset), we used the best values resulting from a random search performed on multiple training runs on a validation set composed of $10\\%$ of the original training set. More specifically, 16 runs were performed for each configuration of randomly sampled hyperparameters, and we selected the configuration maximizing the certified robust accuracy w.r.t. $\\epsilon = 36 / 255$ (see Appendix E.4 for details).", + "bbox": [ + 496, + 601, + 893, + 739 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The evaluation was carried out using four different datasets: CIFAR-10, CIFAR-100 [21], Tiny ImageNet [23], and Imagenette [16] for large images. Augmentation was used during the training (Random crops and flips on CIFAR-10 and CIFAR-100, RandAugment [11] on Tiny ImageNet, and random crop as well as RandAugment on Imagenette), details in Appendix E.5. We use the loss function proposed by [34], with same temperature 0.25, and where we tuned the margin to maximize the robust accuracy for $\\epsilon = \\frac{36}{255}$ . In detail, we considered a margin of $2\\sqrt{2}\\epsilon$ where", + "bbox": [ + 496, + 750, + 895, + 904 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "24578", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "the $\\sqrt{2}$ factor comes from the $L_{2}$ norm [41], and the factor 2 has been added to help with generalization.", + "bbox": [ + 76, + 90, + 468, + 121 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Metrics", + "text_level": 1, + "bbox": [ + 76, + 132, + 171, + 148 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "All the considered models were evaluated based on three main metrics: the throughput, the required memory, and the certified robust accuracy.", + "bbox": [ + 76, + 156, + 467, + 202 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Throughput and epoch time The throughput of a model is the average number of examples that the model can process per second. It determines how many epochs are processed in a given time frame. The evaluation of the throughput was performed on an 80GB-A100-GPU based on the average time of 100 mini-batches. We measured the inference throughput with cached parameter transformations.", + "bbox": [ + 75, + 224, + 467, + 330 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Memory required Layers that require less memory allow for larger batch size, and the memory requirements also determine the type of hardware we can train a model on. For each model, we measured and reported the maximal GPU memory occupied by tensors using the function torch.cuda.max_memory_allocated() provided by the PyTorch framework. This is not exactly equal to the overall GPU memory requirement but gives a fairly good approximation of it. Note that the model memory measured in this way also includes additional memory required by the optimizer (e.g. to store the momentum term) as well as by the activation layers in the forward pass. However, this additional memory should be at most of order $\\mathcal{O}(M + P)$ . As for the throughput, we evaluated and cached all calculations independent of the input at inference time.", + "bbox": [ + 75, + 353, + 467, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Certified robust accuracy In order to evaluate the performance of a 1-Lipschitz network, the standard metric is the certified robust accuracy. An input is classified certifiably robustly with radius $\\epsilon$ by a model, if no perturbations of the input with norm bounded by $\\epsilon$ can change the prediction of the model. Certified robust accuracy measures the proportion of examples that are classified correctly as well as certifiably robustly. For 1-Lipschitz models, a lower bound of the certified $\\epsilon$ -robust accuracy is the portion of correctly classified inputs such that $\\mathcal{M}_f(x_i, l_i) > \\epsilon \\sqrt{2}$ where the margin $\\mathcal{M}_f(x, l)$ of a model $f$ at input $x$ with label $l$ , given as $\\mathcal{M}_f(x, l) = f(x)_l - \\max_{j \\neq l} f_j(x)$ , is the difference between target class score and the highest score of a different class. For details, see [41].", + "bbox": [ + 75, + 601, + 467, + 813 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experimental Results", + "text_level": 1, + "bbox": [ + 76, + 828, + 282, + 847 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This section presents the results of the comparison performed by applying the methodology discussed in Section 4. The results related to the different metrics are dis", + "bbox": [ + 75, + 854, + 467, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "cussed in dedicated subsections and the key takeaways are summarized in the radar-plot illustrated in Figure 1.", + "bbox": [ + 498, + 90, + 890, + 121 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Training and inference times", + "text_level": 1, + "bbox": [ + 500, + 131, + 756, + 147 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 2 plots the training time per epoch of the different models as a function of their size, and Figure 3 plots the corresponding inference throughput for the various sizes as described in Section 4. As described in Table 5, the model base width, referred to as $w$ , is doubled from one model size to the next. We expect the training and inference time to scale with $w$ similarly to how individual layers scale with their number of channels, $c$ (in Table 1). This is because the width of each of the 5 blocks of our architecture is a constant multiple of the base width, $w$ .", + "bbox": [ + 496, + 154, + 890, + 306 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The training time increases (at most) about linearly with $w$ for standard convolutions, whereas the computational complexity of each single convolution scales like $c^2$ . This suggests that parallelism on the GPU and the overhead from other operations (activations, parameter updates, etc.) are important factors determining the training time. This also explains why CPL (doing two convolutions, with identical kernel parameters) is only slightly slower than a standard", + "bbox": [ + 496, + 325, + 890, + 446 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/57d3c18451c1ea846b814cee2491a28c67e9a17297d4c0a67667e8c25ce058f9.jpg", + "image_caption": [ + "Figure 2. Training time per epoch (on CIFAR-10) for different methods and different model sizes." + ], + "image_footnote": [], + "bbox": [ + 504, + 470, + 885, + 621 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e182984c7e9505b9a017c7b606b0085ad930a7ceb922c7e77489e2d91a497b54.jpg", + "image_caption": [ + "Figure 3. Inference throughput for different methods as a function of their size for CIFAR-10 sizes input images. All parameter transformations have been evaluated and cached beforehand" + ], + "image_footnote": [], + "bbox": [ + 504, + 681, + 885, + 839 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "24579", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "convolution, and SOC (doing 5 convolutions) is only about 3 times slower than the standard convolution. The AOL and SLL methods also require times comparable to a standard convolution for small models, although eventually, the $c^3$ term in the computation of the rescaling makes them slower for larger models. Finally, Cayley, LOT, and BCOP methods take much longer training times per epoch. For Cayley and LOT this behavior was expected, as they have a large $\\mathcal{O}(s^2c^3)$ term in their computational complexity. See Table 1 for further details.", + "bbox": [ + 75, + 90, + 472, + 241 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "At inference time transformations of the weights are cached, therefore some methods (AOL, BCOP) do not have any overhead compared to a standard convolution. As expected, other methods (CPL, SLL, and SOC) that apply additional convolutions to the input suffer from a corresponding overhead. Finally, Cayley and LOT have a slightly different throughput due to their FFT-based convolution. Among them, Cayley is about twice as fast because it involves a real-valued FFT rather than a complex-valued one. From Figure 3, it can be noted that cached Cayley and CPL have the same inference time, even though CPL uses twice the number of convolutions. We believe this is due to the fact that the conventional FFT-based convolution is quite efficient for large kernel sizes, but for smaller ones PyTorch implements a faster algorithm, i.e., Winograd, [22], that can be up to 2.5 times faster.", + "bbox": [ + 75, + 260, + 472, + 501 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2. Training memory requirements", + "text_level": 1, + "bbox": [ + 76, + 508, + 354, + 525 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The training and inference memory requirements of the various models (measured as described in Section 4.1) are reported in Figure 4 as a function of the model size. The results of the theoretical analysis reported in Table 1 suggest that the training memory requirements always have a term linear in the number of channels $c$ (usually the activations from the forward pass), as well as a term quadratic in $c$ (usually the weights and all transformations applied to the weights during the forward pass). This behavior can also be observed from Figure 4. For some of the models, the memory required approximately doubles from one model size to the next one, just like the width. This means that the linear term dominates (for those sizes), which makes those models relatively cheap to scale up. For the BCOP, LOT, and Cayley methods, the larger coefficients in the $c^2$ term (for LOT and Cayley the coefficient is even dependent on the input size, $s^2$ ) cause this term to dominate. This makes it much harder to scale those methods to more parameters. Method LOT requires huge amounts of memory, in particular LOT-L is too large to fit in 80GB GPU memory.", + "bbox": [ + 75, + 532, + 468, + 834 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Note that at test time, the memory requirements are much lower, because the intermediate activation values do not need to be stored, as there is no backward pass. Therefore, at inference time, most methods require a very similar", + "bbox": [ + 75, + 840, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0bc1c9dba0306e02c68b9d7cd804a7f5810379f0550caa35bcec7add3e5d0843.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 90, + 888, + 242 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/91276288aaa8dec025d6135b4f23eb42d943ec060f40240a92d2d8d7c9b1f44f.jpg", + "image_caption": [ + "Figure 4. Memory required at training and inference time for input size $32 \\times 32$ ." + ], + "image_footnote": [], + "bbox": [ + 504, + 247, + 887, + 397 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "amount of memory as a standard convolution. The Cayley and LOT methods require more memory since they perform the calculation in the Fourier space, creating an intermediate representation of the weight matrices of size $\\mathcal{O}(s^2 c^2)$ .", + "bbox": [ + 498, + 465, + 890, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3. Certified robust accuracy", + "text_level": 1, + "bbox": [ + 500, + 537, + 730, + 553 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The results related to the accuracy and the certified robust accuracy for the different methods, model sizes, and datasets measured on a 24h training budget are summarized in Table 2. The differences among the various model sizes are also highlighted in Figure 6 by reporting the sorted values of the certified robust accuracy. Further tables and plots relative to different training budgets can be found in Appendix G. The reader can compare our results with the state-of-the-art certified robust accuracy summarized in Appendix D. However, it is worth noting that, to reach state-of-the-art performance, authors often carry out experiments using large model sizes and long training times, which makes it hard to compare the methods themselves. On the other hand, the evaluation proposed in this paper allows a fairer comparison among the different methods, since it also considers timing and memory aspects. This restriction based on time, rather than the number of epochs, ensures that merely enlarging the model size does not lead to improved performance, as bigger models typically process fewer epochs of data. Indeed, in our results in Figure 6 it is usually the M (and not the L) model that performs best.", + "bbox": [ + 496, + 560, + 892, + 878 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Experiments show that SOC performs best, reaching the", + "bbox": [ + 519, + 885, + 890, + 901 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "24580", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/a4bc951066e0f4307779b0046d12277da20d1f6f2f589d4e1c500650d10b3f5f.jpg", + "table_caption": [ + "Table 2. Certified robust accuracy for radius $\\epsilon = 36 / 255$ on the evaluated datasets. Training is performed for 24 hours." + ], + "table_footnote": [], + "table_body": "
MethodsAccuracy [%]Robust Accuracy [%]
XSSMLXSSML
CIFAR-10
AOL71.773.673.473.759.160.861.061.5
BCOP71.773.174.074.658.559.360.561.5
CPL74.976.176.676.862.564.265.165.2
Cayley73.174.274.473.659.561.161.060.1
LOT75.576.672.0-63.464.658.7-
SLL73.774.275.374.361.062.062.862.3
SOC74.175.076.976.961.362.966.365.4
CIFAR-100
AOL40.343.444.341.927.931.031.429.7
BCOP41.442.843.742.228.430.131.229.2
CPL42.3-45.244.330.1-33.232.1
Cayley42.343.943.542.929.230.530.529.5
LOT43.545.242.8-30.832.529.6-
SLL41.442.842.442.128.930.529.929.6
SOC43.145.247.346.230.632.634.933.5
Tiny ImageNet
AOL26.629.330.330.018.119.721.020.6
BCOP22.426.227.627.013.816.917.216.8
CPL28.329.329.830.318.919.720.320.1
Cayley27.829.630.127.217.919.519.316.7
LOT30.732.528.8-20.821.918.1-
SLL25.127.026.527.916.618.417.718.8
SOC28.928.832.132.118.918.821.221.1
Imagenette
AOL80.883.782.876.879.978.5
BCOP81.284.59.875.680.19.8
CPL85.586.586.480.882.482.3
Cayley81.277.9-75.871.7-
SLL80.883.479.375.478.072.8
SOC80.683.679.074.778.473.5
", + "bbox": [ + 81, + 128, + 465, + 666 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "highest certified robust accuracy on two datasets. CPL models consistently rank in top-10 position among the three datasets. LOT performed well, in particular on Tiny ImageNet dataset where it performs the best. AOL did not reach high accuracy on CIFAR-10, but reached more competitive results on Tiny ImageNet. An opposite effect can be observed for SLL, which performance seems to strongly depend on the number of classes. BCOP only reach the top-10 once, while Cayley is consistently outperformed by the other methods. The very same analysis can be applied to the clean accuracy, whose sorted bar-plots are reported in Appendix G, where the main difference is that Cayley performs slightly better for that metric. Furthermore, it is worth", + "bbox": [ + 75, + 703, + 470, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "highlighting that CPL is sensitive to weight initialization. We faced numerical errors during the 10h and 24h training of the small model on CIFAR-100. On Imagenette, CPL clearly performs best, followed by BCOP and AOL. Note that these methods all construct a kernel so that the convolution is 1-Lipschitz. This seems to be good strategy for higher resolution datasets. E.g. SOC, that instead applies multiple convolutions has a drop in performs compared to other datasets.", + "bbox": [ + 496, + 90, + 890, + 226 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3.1 Interpretation of the results", + "text_level": 1, + "bbox": [ + 498, + 253, + 746, + 268 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We confirm empirically what suspected in [46]: layers that naturally include a skip connections (CPL, SLL, SOC) generally perform better than layers that do not have this ability. Furthermore, we noticed that layers with an identity initialization (AOL, LOT) perform better than layers that do neither (BCOP, Cayley). Presumably this is due to the MaxMin activation reducing the variance in the forward pass when alternated with non-identity layers.", + "bbox": [ + 496, + 277, + 890, + 401 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our results also allow ruling out some other possible explanation: one might suspect that pure contractive layers (AOL, CPL, and SLL) would suffer from vanishing gradients, differently from orthogonal ones, however, our experiments do not show any evidence of this fact. Furthermore, one might suspect that slower methods perform worse, because they allow fewer epochs for a given time budget, however, our experiments do not support this fact; two relative slow methods (SOC, LOT) are among the best ones.", + "bbox": [ + 496, + 411, + 890, + 547 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusions and Guidelines", + "text_level": 1, + "bbox": [ + 500, + 575, + 756, + 590 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work presented a comparative study of state-of-the-art 1-Lipschitz layers under the lens of different metrics, such as time and memory requirements, accuracy, and certified robust accuracy, all evaluated at training and inference time. A theoretical comparison of the methods in terms of time and memory complexity was also presented and validated by experiments.", + "bbox": [ + 496, + 602, + 890, + 708 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Taking all metrics into account (summarized in Figure 1), the results are in favor of CPL, due to its highest performance and lower consumption of computational resources. When large computational resources are available and the application does not impose stringent timing constraints during inference and training, the SOC layer could be used, due to its slightly better performance. Finally, those applications in which the inference time is crucial may take advantage of AOL or BCOP, which do not introduce additional runtime overhead (during inference) compared to a standard convolution. For higher resolution images, it also seems that CPL is the most promising method.", + "bbox": [ + 496, + 719, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "24581", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Thomas Altstidl, David Dobre, Björn Eskofier, Gauthier Gidel, and Leo Schwinn. Raising the bar for certified adversarial robustness with diffusion models. arXiv preprint arXiv:2305.10388, 2023. 14, 22", + "[2] Cem Anil, James Lucas, and Roger Grosse. Sorting out Lipschitz function approximation. In International Conference on Machine Learning (ICML), 2019. 2, 3, 11", + "[3] Alexandre Araujo, Aaron J Havens, Blaise Delattre, Alexandre Allauzen, and Bin Hu. A unified algebraic perspective on Lipschitz neural networks. In International Conference on Learning Representations (ICLR), 2023. 4, 14, 22", + "[4] Battista Biggio, Igino Corona, Davide Maiorca, Blaine Nelson, Nedim Šrndić, Pavel Laskov, Giorgio Giacinto, and Fabio Roli. Evasion attacks against machine learning at test time. In Machine Learning and Knowledge Discovery in Databases, 2013. 1", + "[5] Å. Björck and C. Bowie. An iterative algorithm for computing the best estimate of an orthogonal matrix. SIAM Journal on Numerical Analysis, 1971. 3, 11", + "[6] Fabio Brau, Giulio Rossolini, Alessandro Biondi, and Giorgio Buttazzo. Robust-by-design classification via unitary-gradient neural networks. Proceedings of the AAAI Conference on Artificial Intelligence, 2023. 2, 5", + "[7] Nicholas Carlini, Florian Tramer, Krishnamurthy Dj Dvijotham, Leslie Rice, Mingjie Sun, and J Zico Kolter. (Certified!!) adversarial robustness for free! In International Conference on Learning Representations (ICLR), 2023. 2", + "[8] Arthur Cayley. About the algebraic structure of the orthogonal group and the other classical groups in a field of characteristic zero or a prime characteristic. Journal für die reine und angewandte Mathematik, 1846. 3", + "[9] Moustapha Cisse, Piotr Bojanowski, Edouard Grave, Yann Dauphin, and Nicolas Usunier. Parseval networks: Improving robustness to adversarial examples. In International conference on machine learning, 2017. 2, 11", + "[10] Jeremy Cohen, Elan Rosenfeld, and Zico Kolter. Certified adversarial robustness via randomized smoothing. In Proceedings of the 36th International Conference on Machine Learning, 2019. 2", + "[11] Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops, 2020. 5, 16", + "[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Conference on Computer Vision and Pattern Recognition (CVPR), 2009. 16", + "[13] Farzan Farnia, Jesse Zhang, and David Tse. Generalizable adversarial training via spectral normalization. In International Conference on Learning Representations, 2018. 11", + "[14] Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. stat, 2015. 1", + "[15] Emiel Hoogeboom, Victor Garcia Satorras, Jakub Tomczak, and Max Welling. The convolution exponential and generalized Sylvester flows. In Advances in Neural Information Processing Systems, 2020. 3" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] Jeremy Howard. Imagenette. https://github.com/fastai/imagenette/. Accessed: 01.02.2024. 5, 16", + "[17] Kai Hu, Klas Leino, Zifan Wang, and Matt Fredrikson. Effectively leveraging capacity for improved deterministic robustness certification. In International Conference on Learning Representations (ICLR), 2024. 14, 22", + "[18] Kai Hu, Andy Zou, Zifan Wang, Klas Leino, and Matt Fredrikson. Unlocking deterministic robustness certification on imagenet. Conference on Neural Information Processing Systems (NeurIPS), 2024. 14", + "[19] Lei Huang, Li Liu, Fan Zhu, Diwen Wan, Zehuan Yuan, Bo Li, and Ling Shao. Controllable orthogonalization in training DNNs. In Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3, 11, 12", + "[20] Guy Katz, Clark Barrett, David L Dill, Kyle Julian, and Mykel J Kochenderfer. Reluplex: An efficient SMT solver for verifying deep neural networks. In International conference on computer aided verification, 2017. 2", + "[21] Alex Krizhevsky. Learning multiple layers of features from tiny images. Technical report, 2009. 5, 16", + "[22] Andrew Lavin and Scott Gray. Fast algorithms for convolutional neural networks. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 7", + "[23] Ya Le and Xuan Yang. Tiny imagenet visual recognition challenge. CS 231N, 2015. 5, 16", + "[24] Klas Leino, Zifan Wang, and Matt Fredrikson. Globally-robust neural networks. In International Conference on Machine Learning, 2021. 2, 11", + "[25] Mario Lezcano-Casado and David Martínez-Rubio. Cheap orthogonal constraints in neural networks: A simple parametrization of the orthogonal and unitary group. In International Conference on Machine Learning (ICML), 2019. 11", + "[26] Linyi Li, Tao Xie, and Bo Li. Sok: Certified robustness for deep neural networks. In 2023 IEEE Symposium on Security and Privacy (SP), 2023. 1", + "[27] Qiyang Li, Saminul Haque, Cem Anil, James Lucas, Roger B Grosse, and Joern-Henrik Jacobsen. Preventing gradient attenuation in Lipschitz constrained convolutional networks. In Conference on Neural Information Processing Systems (NeurIPS), 2019. 2, 3, 11, 14", + "[28] Shuai Li, Kui Jia, Yuxin Wen, Tongliang Liu, and Dacheng Tao. Orthogonal deep neural networks. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 11", + "[29] Max Losch, David Stutz, Bernt Schiele, and Mario Fritz. Certified robust models with slack control and large Lipschitz constants. arXiv preprint arXiv:2309.06166, 2023. 2", + "[30] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In International Conference on Learning Representations (ICLR), 2018. 1", + "[31] Laurent Meunier, Blaise J Delattre, Alexandre Araujo, and Alexandre Allauzen. A dynamical system perspective for Lipschitz neural networks. In International Conference on Machine Learning (ICML), 2022. 3, 11, 14, 22", + "[32] Takeru Miyato, Toshiki Kataoka, Masanori Koyama, and Yuichi Yoshida. Spectral normalization for generative adversarial networks. In International Conference on Learning Representations (ICLR), 2018. 3, 11" + ], + "bbox": [ + 501, + 92, + 890, + 888 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "24582", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[33] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In Conference on Neural Information Processing Systems (NeurIPS). 2019. 4", + "[34] Bernd Prach and Christoph H Lampert. Almost-orthogonal layers for efficient general-purpose Lipschitz networks. In European Conference on Computer Vision (ECCV), 2022. 2, 3, 5, 14", + "[35] S Singla and S Feizi. Fantastic four: Differentiable bounds on singular values of convolution layers. In International Conference on Learning Representations (ICLR), 2021. 3, 11", + "[36] Sahil Singla and Soheil Feizi. Skew orthogonal convolutions. In International Conference on Machine Learning (ICML), 2021. 2, 3, 14", + "[37] Sahil Singla and Soheil Feizi. Improved techniques for deterministic 12 robustness. Conference on Neural Information Processing Systems (NeurIPS), 2022. 14", + "[38] Leslie N Smith and Nicholay Topin. Super-convergence: Very fast training of neural networks using large learning rates. In Artificial intelligence and machine learning for multi-domain operations applications, 2019. 15", + "[39] Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In International Conference on Learning Representations (ICLR), 2014. 1", + "[40] Asher Trockman and J Zico Kolter. Orthogonalizing convolutional layers with the Cayley transform. In International Conference on Learning Representations (ICLR), 2021. 2, 3, 5, 14, 23", + "[41] Yusuke Tsuzuku, Issei Sato, and Masashi Sugiyama. Lipschitz-margin training: Scalable certification of perturbation invariance for deep neural networks. Conference on Neural Information Processing Systems (NeurIPS), 2018. 6", + "[42] Ruigang Wang and Ian Manchester. Direct parameterization of Lipschitz-bounded deep networks. In International Conference on Machine Learning (ICML), 2023. 3, 11, 12", + "[43] Lily Weng, Huan Zhang, Hongge Chen, Zhao Song, Chojui Hsieh, Luca Daniel, Duane Boning, and Inderjit Dhillon. Towards fast computation of certified robustness for relu networks. In International Conference on Machine Learning (ICML), 2018. 2", + "[44] Eric Wong and Zico Kolter. Provable defenses against adversarial examples via the convex outer adversarial polytope. In International Conference on Machine Learning (ICML), 2018. 2", + "[45] Lechao Xiao, Yasaman Bahri, Jascha Sohl-Dickstein, Samuel Schoenholz, and Jeffrey Pennington. Dynamical isometry and a mean field theory of CNNs: How to train 10,000-layer vanilla convolutional neural networks. In International Conference on Machine Learning (ICML), 2018. 3", + "[46] Xiaojun Xu, Linyi Li, and Bo Li. Lot: Layer-wise orthogonal training on improving 12 certified robustness. Conference on" + ], + "bbox": [ + 78, + 90, + 468, + 892 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Neural Information Processing Systems (NeurIPS), 2022. 2, 3, 8, 14, 23", + "[47] Tan Yu, Jun Li, Yunfeng Cai, and Ping Li. Constructing orthogonal convolutions in an explicit manner. In International Conference on Learning Representations (ICLR), 2021. 3, 11, 12" + ], + "bbox": [ + 501, + 92, + 893, + 172 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "24583", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_model.json b/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_model.json new file mode 100644 index 0000000000000000000000000000000000000000..03b3ee31e07abf3ce735de8730fb07cce5bf3b61 --- /dev/null +++ b/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_model.json @@ -0,0 +1,2101 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.044 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.108, + 0.131, + 0.865, + 0.154 + ], + "angle": 0, + "content": "1-Lipschitz Layers Compared: Memory, Speed, and Certifiable Robustness" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.18, + 0.773, + 0.235 + ], + "angle": 0, + "content": "Bernd Prach,\\(^{1,*}\\) Fabio Brau,\\(^{2,*}\\) Giorgio Buttazzo,\\(^{2}\\) Christoph H. Lampert\\(^{1}\\) \n\\(^{1}\\) ISTA, Klosterneuburg, Austria \n\\(^{2}\\) Scuola Superiore Sant'Anna, Pisa, Italy" + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.237, + 0.79, + 0.253 + ], + "angle": 0, + "content": "{bprach, chl}@ist.ac.at, {fabio.brau, giorgio.butazzo}@santannapisa.it" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.287, + 0.314, + 0.303 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.318, + 0.474, + 0.637 + ], + "angle": 0, + "content": "The robustness of neural networks against input perturbations with bounded magnitude represents a serious concern in the deployment of deep learning models in safety-critical systems. Recently, the scientific community has focused on enhancing certifiable robustness guarantees by crafting 1-Lipschitz neural networks that leverage Lipschitz bounded dense and convolutional layers. Different methods have been proposed in the literature to achieve this goal, however, comparing the performance of such methods is not straightforward, since different metrics can be relevant (e.g., training time, memory usage, accuracy, certifiable robustness) for different applications. Therefore, this work provides a thorough comparison between different methods, covering theoretical aspects such as computational complexity and memory requirements, as well as empirical measurements of time per epoch, required memory, accuracy and certifiable robust accuracy. The paper also provides some guidelines and recommendations to support the user in selecting the methods that work best depending on the available resources. We provide code at github.com/berndprach/lLipschitzLayersCompared." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.68, + 0.21, + 0.696 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.706, + 0.47, + 0.844 + ], + "angle": 0, + "content": "Modern artificial neural networks achieve high accuracy and sometimes superhuman performance in many different tasks, but it is widely recognized that they are not robust to tiny and imperceptible input perturbations [4, 39] that, if properly crafted, can cause a model to produce the wrong output. Such inputs, known as Adversarial Examples, represent a serious concern for the deployment of machine learning models in safety-critical systems [26]. To overcome this issue, adversarial training has been proposed in" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.287, + 0.685, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.71, + 0.287, + 0.882, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.409, + 0.685, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.71, + 0.409, + 0.882, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.53, + 0.684, + 0.645 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.711, + 0.531, + 0.882, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.654, + 0.684, + 0.768 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.7, + 0.654, + 0.884, + 0.758 + ], + "angle": 0, + "content": "
Legend
RARobust Accuracy
AAccuracy
TTTraining Time
ITInference Time
TMTrain Memory
IMInference Memory
" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.781, + 0.895, + 0.824 + ], + "angle": 0, + "content": "Figure 1. Evaluation of 1-Lipschitz methods on different metrics. Scores are assigned from 1 (worst) to 5 (best) to every method based on the results reported in Sections 3 and 5." + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.852, + 0.216, + 0.863 + ], + "angle": 0, + "content": "*Joined first authors." + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.863, + 0.469, + 0.9 + ], + "angle": 0, + "content": "This work was partially supported by project SERICS (PE00000014) under the MUR National Recovery and Resilience Plan funded by the European Union - NextGenerationEU." + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.852, + 0.469, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.499, + 0.856, + 0.895, + 0.901 + ], + "angle": 0, + "content": "[14, 30, 39]. It uses adversarial examples during the training to correct the model prediction. This strategy does improves the empirical robustness of the model, however, it does not" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "24574" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.327, + 0.106 + ], + "angle": 0, + "content": "provide any guarantees of robustness." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.107, + 0.468, + 0.227 + ], + "angle": 0, + "content": "However, for many applications a guarantee of robustness is desired. Roughly speaking, a model \\( f \\) is said to be \\( \\varepsilon \\)-robust for a given input \\( x \\) if no perturbation of magnitude bounded by \\( \\varepsilon \\) can change its prediction. Recently, in the context of image classification, various approaches have been proposed to achieve certifiable robustness, including Verification, Randomized Smoothing, and Lipschitz Bounded Neural Networks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.238, + 0.468, + 0.343 + ], + "angle": 0, + "content": "Verification strategies aim to establish, for any given model, whether all samples contained in a \\( l_{2} \\)-ball with radius \\( \\varepsilon \\) and centered in the tested input \\( x \\) are classified with the same class as \\( x \\). In the exact formulation, verification strategies involve the solution of an NP-hard problem [20]. Nevertheless, even in a relaxed formulation, [44], these strategies require a huge computational effort [43]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.353, + 0.468, + 0.473 + ], + "angle": 0, + "content": "Randomized smoothing strategies, initially presented in [10], represent an effective way of crafting a certifiable-robust classifier \\( g \\) based on a base classifier \\( f \\). If combined with an additional denoising step, they can achieve state-of-the-art levels of robustness, [7]. However, since they require multiple evaluations of the base model (up to 100k evaluations) for the classification of a single input, they cannot be used for real-time applications." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.483, + 0.468, + 0.604 + ], + "angle": 0, + "content": "Finally, Lipschitz Bounded Neural Networks [6, 9, 24, 27, 29, 34, 40] represent a valid alternative to produce certifiable classifiers, since they only require a single forward pass of the model at inference time to deduce guarantees of robustness. Indeed, for such models, a lower-bound of the minimal adversarial perturbation capable of fooling the classifier can be evaluated by considering the difference between the two largest class scores predicted by the model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.605, + 0.468, + 0.741 + ], + "angle": 0, + "content": "Lipschitz-bounded neural networks can be obtained by the composition of 1-Lipschitz layers [2]. The process of parameterizing 1-Lipschitz layers is fairly straightforward for fully connected layers. However, for convolutions — with overlapping kernels — deducing an effective parameterization is a hard problem. Indeed, the Lipschitz condition can be essentially thought of as a condition on the Jacobian of the layer. However, the Jacobian matrix can not be efficiently computed." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.468, + 0.901 + ], + "angle": 0, + "content": "In order to avoid the explicit computation of the Jacobian, various methods have been proposed, including parameterizations that cause the Jacobian to be (very close to) orthogonal [27, 36, 40, 46] and methods that rely on an upper bound on the Jacobian instead [34]. Those different methods differ drastically in training and validation requirements (in particular time and memory) as well as empirical performance. Furthermore, increasing training time or model sizes very often also increases the empirical performance. This makes it hard to judge from the existing" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.182 + ], + "angle": 0, + "content": "literature which methods are the most promising. This becomes even worse when working with specific computation requirements, such as restrictions on the available memory. In this case, it is important to choose the method that better suits the characteristics of the system in terms of evaluation time, memory usage as well and certifiable-robust-accuracy." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.205, + 0.892, + 0.31 + ], + "angle": 0, + "content": "This work aims at giving a comprehensive comparison of different strategies for crafting 1-Lipschitz layers from both a theoretical and practical perspective. For the sake of fairness, we consider several metrics such as Time and Memory requirements for both training and inference, Accuracy, as well as Certified Robust Accuracy. The main contributions are the following:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.326, + 0.892, + 0.369 + ], + "angle": 0, + "content": "- An empirical comparison of 1-Lipschitz layers based on six different metrics, and four different datasets on four architecture sizes with three time constraints." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.371, + 0.892, + 0.4 + ], + "angle": 0, + "content": "- A theoretical comparison of the runtime complexity and the memory usage of existing methods." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.401, + 0.892, + 0.445 + ], + "angle": 0, + "content": "- A review of the most recent methods in the literature, including implementations with a revised code that we will release publicly for other researchers to build on." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.326, + 0.892, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.459, + 0.797, + 0.476 + ], + "angle": 0, + "content": "2. Existing Works and Background" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.485, + 0.892, + 0.56 + ], + "angle": 0, + "content": "In recent years, various methods have been proposed for creating artificial neural networks with a bounded Lipschitz constant. The Lipschitz constant of a function \\( f: \\mathbb{R}^n \\to \\mathbb{R}^m \\) with respect to the \\( l_2 \\) norm is the smallest \\( L \\) such that for all \\( x, y \\in \\mathbb{R}^n \\)" + }, + { + "type": "equation", + "bbox": [ + 0.591, + 0.571, + 0.891, + 0.588 + ], + "angle": 0, + "content": "\\[\n\\left\\| f (x) - f (y) \\right\\| _ {2} \\leq L \\| x - y \\| _ {2}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.599, + 0.892, + 0.689 + ], + "angle": 0, + "content": "We also extend this definition to networks and layers, by considering the \\(l_{2}\\) norms of the flattened input and output tensors in Equation (1). A layer is called 1-Lipschitz if its Lipschitz constant is at most 1. For linear layers, the Lipschitz constant is equal to the spectral norm of the weight matrix that is given as" + }, + { + "type": "equation", + "bbox": [ + 0.62, + 0.699, + 0.891, + 0.733 + ], + "angle": 0, + "content": "\\[\n\\| M \\| _ {2} = \\sup _ {\\mathbf {v} \\neq 0} \\frac {\\| M \\mathbf {v} \\| _ {2}}{\\| \\mathbf {v} \\| _ {2}}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.743, + 0.892, + 0.864 + ], + "angle": 0, + "content": "A particular class of linear 1-Lipschitz layers are ones with an orthogonal Jacobian matrix. The Jacobian matrix of a layer is the matrix of partial derivatives of the flattened outputs with respect to the flattened inputs. A matrix \\( M \\) is orthogonal if \\( MM^{\\top} = I \\), where \\( I \\) is the identity matrix. For layers with an orthogonal Jacobian, Equation (1) always holds with equality and, because of this, a lot of methods aim at constructing such 1-Lipschitz layers." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "All the neural networks analyzed in this paper consist of 1-Lipschitz parameterized layers and 1-Lipschitz activation" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "24575" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.213 + ], + "angle": 0, + "content": "functions, with no skip connections and no batch normalization. Even though the commonly used ReLU activation function is 1-Lipschitz, Anil et al. [2] showed that it reduces the expressive capability of the model. Hence, we adopt the MaxMin activation proposed by the authors and commonly used in 1-Lipschitz models. Concatenations of 1-Lipschitz functions are 1-Lipschitz, so the networks analyzed are 1-Lipschitz by construction." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.224, + 0.373, + 0.24 + ], + "angle": 0, + "content": "2.1. Parameterized 1-Lipschitz Layers" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.248, + 0.473, + 0.415 + ], + "angle": 0, + "content": "This section provides an overview of the existing methods for providing 1-Lipschitz layers. We discuss fundamental methods for estimating the spectral norms of linear and convolutional layers, i.e. Power Method [32] and Fantistica4 [35], and for crafting orthogonal matrices, i.e. Bjorck & Bowie [5], in Appendix A. The rest of this section describes 7 methods from the literature that construct 1-Lipschitz convolutions: BCOP, Cayley, SOC, AOL, LOT, CPL, and SLL. Further 1-Lipschitz methods, [19, 42, 47], and the reasons why they were not included in our main comparison can be found in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.436, + 0.473, + 0.573 + ], + "angle": 0, + "content": "BCOP Block Orthogonal Convolution Parameterization (BCOP) was introduced by Li et al. in [27] to extend a previous work by Xiao et al. [45] that focused on the importance of orthogonal initialization of the weights. For a \\( k \\times k \\) convolution, BCOP uses a set of \\( (2k - 1) \\) parameter matrices. Each of these matrices is orthogonalized using the algorithm by Bjorck & Bowie [5] (see also Appendix A). Then, a \\( k \\times k \\) kernel is constructed from those matrices in a way that guarantees that the resulting layer is orthogonal." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.594, + 0.471, + 0.671 + ], + "angle": 0, + "content": "Cayley Another family of orthogonal convolutional and fully connected layers has been proposed by Trockman and Kolter [40] by leveraging the Cayley Transform [8], which maps a skew-symmetric matrix \\(A\\) into an orthogonal matrix \\(Q\\) using the relation" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.682, + 0.47, + 0.7 + ], + "angle": 0, + "content": "\\[\nQ = (I - A) (I + A) ^ {- 1}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.713, + 0.47, + 0.835 + ], + "angle": 0, + "content": "The transformation can be used to parameterize orthogonal weight matrices for linear layers in a straightforward way. For convolutions, the authors make use of the fact that circular padded convolutions are vector-matrix products in the Fourier domain. As long as all those vector-matrix products have orthogonal matrices, the full convolution will have an orthogonal Jacobian. For Cayley Convolutions, those matrices are orthogonalized using the Cayley transform." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.903 + ], + "angle": 0, + "content": "SOC Skew Orthogonal Convolution is an orthogonal convolutional layer presented by Singla et al. [36], obtained by leveraging the exponential convolution [15]. Analogously" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.091, + 0.892, + 0.121 + ], + "angle": 0, + "content": "to the matrix case, given a kernel \\(L\\in \\mathbb{R}^{c\\times c\\times k\\times k}\\), the exponential convolution can be defined as" + }, + { + "type": "equation", + "bbox": [ + 0.499, + 0.127, + 0.892, + 0.17 + ], + "angle": 0, + "content": "\\[\n\\exp (L) (x) := x + \\frac {L \\star x}{1} + \\frac {L \\star^ {2} x}{2 !} + \\dots + \\frac {L \\star^ {k} x}{k !} + \\dots , \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.17, + 0.892, + 0.307 + ], + "angle": 0, + "content": "where \\(\\star^k\\) denotes a convolution applied \\(k\\)-times. The authors proved that any exponential convolution has an orthogonal Jacobian matrix as long as \\(L\\) is skew-symmetric, providing a way of parameterizing 1-Lipschitz layers. In their work, the sum of the infinite series is approximated by computing only the first 5 terms during training and the first 12 terms during the inference, and \\(L\\) is normalized to have unitary spectral norm following the method presented in [35] (see Appendix A)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.324, + 0.892, + 0.369 + ], + "angle": 0, + "content": "AOL Prach and Lampert [34] introduced Almost Orthogonal Lipschitz (AOL) layers. For any matrix \\(P\\), they defined a diagonal rescaling matrix \\(D\\) with" + }, + { + "type": "equation", + "bbox": [ + 0.6, + 0.377, + 0.892, + 0.415 + ], + "angle": 0, + "content": "\\[\nD _ {i i} = \\left(\\sum_ {j} \\left| P ^ {\\top} P \\right| _ {i j}\\right) ^ {- 1 / 2} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.422, + 0.892, + 0.558 + ], + "angle": 0, + "content": "and proved that the spectral norm of \\( PD \\) is bounded by 1. This result was used to show that the linear layer given by \\( l(x) = PDx + b \\) (where \\( P \\) is the learnable matrix and \\( D \\) is given by Eq. (5)) is 1-Lipschitz. Furthermore, the authors extended the idea so that it can also be efficiently applied to convolutions. This is done by calculating the rescaling in Equation (5) with the Jacobian \\( J \\) of a convolution instead of \\( P \\). In order to evaluate it efficiently the authors express the elements of \\( J^{\\top}J \\) explicitly in terms of the kernel values." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.576, + 0.892, + 0.713 + ], + "angle": 0, + "content": "LOT The layer presented by Xu et al. [46] extends the idea of [19] to use the Inverse Square Root of a matrix in order to orthogonalize it. Indeed, for any matrix \\(V\\), the matrix \\(Q = V(V^T V)^{-\\frac{1}{2}}\\) is orthogonal. Similarly to the Cayley method, for the layer-wise orthogonal training (LOT) the convolution is applied in the Fourier frequency domain. To find the inverse square root, the authors relay on an iterative Newton Method. In details, defining \\(Y_0 = V^T V\\), \\(Z_0 = I\\), and" + }, + { + "type": "equation", + "bbox": [ + 0.508, + 0.718, + 0.892, + 0.748 + ], + "angle": 0, + "content": "\\[\nY _ {i + 1} = \\frac {1}{2} Y _ {i} \\left(3 I - Z _ {i} Y _ {i}\\right), Z _ {i + 1} = \\frac {1}{2} \\left(3 I - Z _ {i} Y _ {i}\\right) Z _ {i}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.756, + 0.892, + 0.803 + ], + "angle": 0, + "content": "it can be shown that \\( Y_{i} \\) converges to \\( (V^{T}V)^{-\\frac{1}{2}} \\). In their proposed layer, the authors apply 10 iterations of the method for both training and evaluation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.82, + 0.892, + 0.866 + ], + "angle": 0, + "content": "CPL Meunier et al. [31] proposed the Convex Potential Layer. Given a non-decreasing 1-Lipschitz function \\(\\sigma\\) (usually ReLU), the layer is constructed as" + }, + { + "type": "equation", + "bbox": [ + 0.575, + 0.872, + 0.892, + 0.906 + ], + "angle": 0, + "content": "\\[\nl (x) = x - \\frac {2}{\\| W \\| _ {2} ^ {2}} W ^ {\\top} \\sigma (W x + b), \\tag {7}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "24576" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.138 + ], + "angle": 0, + "content": "which is 1-Lipschitz by design. The spectral norm required to calculate \\( l(x) \\) is approximated using the power method (see Appendix A)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.158, + 0.468, + 0.218 + ], + "angle": 0, + "content": "SLL The SDP-based Lipschitz Layers (SLL) proposed by Araujo et al. [3] combine the CPL layer with the upper bound on the spectral norm from AOL. The layer can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.143, + 0.23, + 0.468, + 0.249 + ], + "angle": 0, + "content": "\\[\nl (x) = x - 2 W ^ {\\top} Q ^ {- 2} D ^ {2} \\sigma (W x + b), \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.261, + 0.468, + 0.306 + ], + "angle": 0, + "content": "where \\(Q\\) is a learnable diagonal matrix with positive entries and \\(D\\) is deduced by applying Equation (5) to \\(P = W^{\\top}Q^{-1}\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.318, + 0.468, + 0.409 + ], + "angle": 0, + "content": "Remark 1. Both CPL and SLL are non-linear by construction, so they can be used to construct a network without any further use of activation functions. However, carrying out some preliminary experiments, we empirically found that alternating CPL (and SLL) layers with MaxMin activation layers allows achieving a better performance." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.435, + 0.305, + 0.452 + ], + "angle": 0, + "content": "3. Theoretical Comparison" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.46, + 0.468, + 0.552 + ], + "angle": 0, + "content": "As illustrated in the last section, various ideas and methods have been proposed to parameterize 1-Lipschitz layers. This causes the different methods to have very different properties and requirements. This section aims at highlighting the properties of the different algorithms, focusing on the algorithmic complexity and the required memory." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.56, + 0.468, + 0.772 + ], + "angle": 0, + "content": "Table 1 provides an overview of the computational complexity and memory requirements for the different layers considered in the previous section. For the sake of clarity, the analysis is performed by considering separately the transformations applied to the input of the layers and those applied to the weights to ensure the 1-Lipschitz constraint. Each of the two sides of the table contains three columns: i) Operations contains the most costly transformations applied to the input as well as to the parameters of different layers; ii) MACS reports the computational complexity expressed in multiply-accumulate operations (MACS) involved in the transformations (only leading terms are presented); iii) Memory reports the memory required by the transformation during the training phase." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.781, + 0.468, + 0.901 + ], + "angle": 0, + "content": "At training time, both input and weight transformations are required, thus the training complexity of the forward pass can be computed as the sum of the two corresponding MACS columns of the table. Similarly, the training memory requirements can be computed as the sum of the two corresponding Memory columns of the table. For the considered operations, the cost of the backward pass during training has the same computational complexity as the forward pass, and" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.228 + ], + "angle": 0, + "content": "therefore increases the overall complexity by a constant factor. At inference time, all the parameter transformations can be computed just once and cached afterward. Therefore, the inference complexity is equal to the complexity due to the input transformation (column 3 in the table). At inference time, the intermediate variables are not stored in memory, hence, the memory requirements are much lower than during training. The values cannot directly be inferred from Table 1, we reported them separately in Appendix C.1." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.235, + 0.892, + 0.506 + ], + "angle": 0, + "content": "Note that all the terms reported in Table 1 depend on the batch size \\( b \\), the input size \\( s \\times s \\times c \\), the number of inner iterations of a method \\( t \\), and the kernel size \\( k \\times k \\). (Often, \\( t \\) is different at training and inference time.) For the sake of clarity, the MACS of a naive convolution implementation is denoted by \\( C \\) (\\( C = bs^2c^2k^2 \\)), the number of inputs of a layer is denoted by \\( M \\) (\\( M = bs^2c \\)), and the size of the kernel of a standard convolution is denoted by \\( P \\) (\\( P = c^2k^2 \\)). Only the leading terms of the computations are reported in Table 1. In order to simplify some terms, we assume that \\( c > \\log_2(s) \\) and that rescaling a tensor (by a scalar) as well as adding two tensors does not require any memory in order to do backpropagation. We also assume that each additional activation does require extra memory. All these assumptions have been verified to hold within PyTorch, [33]. Also, when the algorithm described in the paper and the version provided in the supplied code differed, we considered the algorithm implemented in the code." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.514, + 0.892, + 0.62 + ], + "angle": 0, + "content": "The transformations reported in the table are convolutions (CONV), Fast Fourier Transformations (FFT), matrix-vector multiplications (MV), matrix-matrix multiplications (MM), matrix inversions (INV), as well as applications of an activation function (ACT). The application of algorithms such as BJORCK & Bowie (BnB), power method, and Fantastic 4 (F4) is also reported (see Appendix A for descriptions)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.629, + 0.852, + 0.645 + ], + "angle": 0, + "content": "3.1. Analysis of the computational complexity" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.652, + 0.892, + 0.788 + ], + "angle": 0, + "content": "It is worth noting that the complexity of the input transformations (in Table 1) is similar for all methods. This implies that a similar scaling behaviour is expected at inference time for the models. Cayley and LOT apply an FFT-based convolution and have computational complexity independent of the kernel size. CPL and SLL require two convolutions, which make them slightly more expensive at inference time. Notably, SOC requires multiple convolutions, making this method more expensive at inference time." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.901 + ], + "angle": 0, + "content": "At training time, parameter transformations need to be applied in addition to the input transformations during every forward pass. For SOC and CPL, the input transformations always dominate the parameter transformations in terms of computational complexity. This means the complexity scales like \\( c^2 \\), just like a regular convolution, with a further factor of 2 and 5 respectively. All other methods" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "24577" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.895, + 0.148 + ], + "angle": 0, + "content": "Table 1. Computational complexity and memory requirements of different methods. We report multiply-accumulate operations (MACS) as well as memory requirements (per layer) for batch size \\( b \\), image size \\( s \\times s \\times c \\), kernel size \\( k \\times k \\) and number of inner iterations \\( t \\). We use \\( C = bs^2c^2k^2 \\), \\( M = bs^2c \\) and \\( P = c^2k^2 \\). For a detailed explanation on what is reported see Section 3. For some explanation on how the entries of this table were derived, see Appendix C." + }, + { + "type": "table", + "bbox": [ + 0.133, + 0.157, + 0.84, + 0.328 + ], + "angle": 0, + "content": "
MethodInput TransformationsParameter Transformations
OperationsMACS O(·)MemoryOperationsMACS O(·)Memory O(·)
StandardCONVCM--P
AOLCONVCMCONVc3k45P
BCOPCONVCMBnB & MMsc3kt + c3k3c2kt + c2k3
CayleyFFTs & MVsbs2c25/2MFFTs & INVss2c33/2s2c2
CPLCONVs & ACT2C3Mpower methods2c2k2P + s2c
LOTFFTs & MVsbs2c23MFFTs & MMs4s2c3t4s2c2t
SLLCONVs & ACT2C3MCONVsc3k45P
SOCCONVsCt1Mt1F4c2k2t2P
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.368, + 0.472, + 0.446 + ], + "angle": 0, + "content": "require parameter transformations that scale like \\( c^3 \\), making them more expensive for larger architectures. In particular, we do expect Cayley and LOT to require long training times for larger models, since the complexity of their parameter transformations further depends on the input size." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.453, + 0.469, + 0.471 + ], + "angle": 0, + "content": "3.2. Analysis of the training memory requirements" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.477, + 0.47, + 0.659 + ], + "angle": 0, + "content": "The memory requirements of the different layers are important, since they determine the maximum batch size and the type of models we can train on a particular infrastructure. At training time, typically all intermediate results are kept in memory to perform backpropagation. This includes intermediate results for both input and parameter transformations. The input transformations usually preserve the size, and therefore the memory required is usually of \\(\\mathcal{O}(M)\\). Therefore, for the input transformations, all methods require memory not more than a constant factor worse than standard convolutions, with the worst method being SOC, with a constant \\(t_1\\), typically equal to 5." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.666, + 0.47, + 0.773 + ], + "angle": 0, + "content": "In addition to the input transformation, we also need to store intermediate results of the parameter transformations in memory in order to evaluate the gradients. Again, most methods approximately preserve the sizes during the parameter transformations, and therefore the memory required is usually of order \\(\\mathcal{O}(P)\\). Exceptions to this rule are Cayley and LOT, with a larger \\(\\mathcal{O}(s^2 c^2)\\) term, as well as BCOP." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.785, + 0.27, + 0.803 + ], + "angle": 0, + "content": "4. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.471, + 0.903 + ], + "angle": 0, + "content": "This section presents an experimental study aimed at comparing the performance of the considered layers with respect to different metrics. Before presenting the results, we first summarize the setup used in our experiments. For a detailed description see Appendix E. To have a fair and meaningful comparison among the various models, all the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.368, + 0.895, + 0.476 + ], + "angle": 0, + "content": "proposed layers have been evaluated using the same architecture, loss function, and optimizer. Since, according to the data reported in Table 1, different layers may have different throughput, to have a fair comparison with respect to the tested metrics, we limited the total training time instead of fixing the number of training epochs. Results are reported for training times of \\(2\\mathrm{h}\\), \\(10\\mathrm{h}\\), and \\(24\\mathrm{h}\\) on one A100 GPU." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.486, + 0.895, + 0.579 + ], + "angle": 0, + "content": "Our architecture is a standard convolutional network that doubles the number of channels whenever the resolution is reduced [6, 40]. For each method, we tested architectures of different sizes. We denoted them as XS, S, M and L, depending on the number of parameters, according to the criteria in Table 7, ranging from 1.5M to 100M parameters." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.602, + 0.895, + 0.741 + ], + "angle": 0, + "content": "Since different methods benefit from different learning rates and weight decay, for each setting (model size, method and dataset), we used the best values resulting from a random search performed on multiple training runs on a validation set composed of \\(10\\%\\) of the original training set. More specifically, 16 runs were performed for each configuration of randomly sampled hyperparameters, and we selected the configuration maximizing the certified robust accuracy w.r.t. \\(\\epsilon = 36 / 255\\) (see Appendix E.4 for details)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.897, + 0.905 + ], + "angle": 0, + "content": "The evaluation was carried out using four different datasets: CIFAR-10, CIFAR-100 [21], Tiny ImageNet [23], and Imagenette [16] for large images. Augmentation was used during the training (Random crops and flips on CIFAR-10 and CIFAR-100, RandAugment [11] on Tiny ImageNet, and random crop as well as RandAugment on Imagenette), details in Appendix E.5. We use the loss function proposed by [34], with same temperature 0.25, and where we tuned the margin to maximize the robust accuracy for \\(\\epsilon = \\frac{36}{255}\\). In detail, we considered a margin of \\(2\\sqrt{2}\\epsilon\\) where" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "24578" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.091, + 0.47, + 0.122 + ], + "angle": 0, + "content": "the \\(\\sqrt{2}\\) factor comes from the \\(L_{2}\\) norm [41], and the factor 2 has been added to help with generalization." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.133, + 0.173, + 0.149 + ], + "angle": 0, + "content": "4.1. Metrics" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.157, + 0.468, + 0.203 + ], + "angle": 0, + "content": "All the considered models were evaluated based on three main metrics: the throughput, the required memory, and the certified robust accuracy." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.225, + 0.468, + 0.332 + ], + "angle": 0, + "content": "Throughput and epoch time The throughput of a model is the average number of examples that the model can process per second. It determines how many epochs are processed in a given time frame. The evaluation of the throughput was performed on an 80GB-A100-GPU based on the average time of 100 mini-batches. We measured the inference throughput with cached parameter transformations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.354, + 0.468, + 0.581 + ], + "angle": 0, + "content": "Memory required Layers that require less memory allow for larger batch size, and the memory requirements also determine the type of hardware we can train a model on. For each model, we measured and reported the maximal GPU memory occupied by tensors using the function torch.cuda.max_memory_allocated() provided by the PyTorch framework. This is not exactly equal to the overall GPU memory requirement but gives a fairly good approximation of it. Note that the model memory measured in this way also includes additional memory required by the optimizer (e.g. to store the momentum term) as well as by the activation layers in the forward pass. However, this additional memory should be at most of order \\(\\mathcal{O}(M + P)\\). As for the throughput, we evaluated and cached all calculations independent of the input at inference time." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.602, + 0.468, + 0.814 + ], + "angle": 0, + "content": "Certified robust accuracy In order to evaluate the performance of a 1-Lipschitz network, the standard metric is the certified robust accuracy. An input is classified certifiably robustly with radius \\(\\epsilon\\) by a model, if no perturbations of the input with norm bounded by \\(\\epsilon\\) can change the prediction of the model. Certified robust accuracy measures the proportion of examples that are classified correctly as well as certifiably robustly. For 1-Lipschitz models, a lower bound of the certified \\(\\epsilon\\)-robust accuracy is the portion of correctly classified inputs such that \\(\\mathcal{M}_f(x_i, l_i) > \\epsilon \\sqrt{2}\\) where the margin \\(\\mathcal{M}_f(x, l)\\) of a model \\(f\\) at input \\(x\\) with label \\(l\\), given as \\(\\mathcal{M}_f(x, l) = f(x)_l - \\max_{j \\neq l} f_j(x)\\), is the difference between target class score and the highest score of a different class. For details, see [41]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.829, + 0.283, + 0.848 + ], + "angle": 0, + "content": "5. Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.468, + 0.901 + ], + "angle": 0, + "content": "This section presents the results of the comparison performed by applying the methodology discussed in Section 4. The results related to the different metrics are dis" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.122 + ], + "angle": 0, + "content": "cussed in dedicated subsections and the key takeaways are summarized in the radar-plot illustrated in Figure 1." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.132, + 0.757, + 0.148 + ], + "angle": 0, + "content": "5.1. Training and inference times" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.155, + 0.892, + 0.307 + ], + "angle": 0, + "content": "Figure 2 plots the training time per epoch of the different models as a function of their size, and Figure 3 plots the corresponding inference throughput for the various sizes as described in Section 4. As described in Table 5, the model base width, referred to as \\( w \\), is doubled from one model size to the next. We expect the training and inference time to scale with \\( w \\) similarly to how individual layers scale with their number of channels, \\( c \\) (in Table 1). This is because the width of each of the 5 blocks of our architecture is a constant multiple of the base width, \\( w \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.326, + 0.892, + 0.448 + ], + "angle": 0, + "content": "The training time increases (at most) about linearly with \\( w \\) for standard convolutions, whereas the computational complexity of each single convolution scales like \\( c^2 \\). This suggests that parallelism on the GPU and the overhead from other operations (activations, parameter updates, etc.) are important factors determining the training time. This also explains why CPL (doing two convolutions, with identical kernel parameters) is only slightly slower than a standard" + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.472, + 0.887, + 0.622 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.637, + 0.892, + 0.664 + ], + "angle": 0, + "content": "Figure 2. Training time per epoch (on CIFAR-10) for different methods and different model sizes." + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.682, + 0.887, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.855, + 0.892, + 0.896 + ], + "angle": 0, + "content": "Figure 3. Inference throughput for different methods as a function of their size for CIFAR-10 sizes input images. All parameter transformations have been evaluated and cached beforehand" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "24579" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.242 + ], + "angle": 0, + "content": "convolution, and SOC (doing 5 convolutions) is only about 3 times slower than the standard convolution. The AOL and SLL methods also require times comparable to a standard convolution for small models, although eventually, the \\( c^3 \\) term in the computation of the rescaling makes them slower for larger models. Finally, Cayley, LOT, and BCOP methods take much longer training times per epoch. For Cayley and LOT this behavior was expected, as they have a large \\( \\mathcal{O}(s^2c^3) \\) term in their computational complexity. See Table 1 for further details." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.261, + 0.473, + 0.502 + ], + "angle": 0, + "content": "At inference time transformations of the weights are cached, therefore some methods (AOL, BCOP) do not have any overhead compared to a standard convolution. As expected, other methods (CPL, SLL, and SOC) that apply additional convolutions to the input suffer from a corresponding overhead. Finally, Cayley and LOT have a slightly different throughput due to their FFT-based convolution. Among them, Cayley is about twice as fast because it involves a real-valued FFT rather than a complex-valued one. From Figure 3, it can be noted that cached Cayley and CPL have the same inference time, even though CPL uses twice the number of convolutions. We believe this is due to the fact that the conventional FFT-based convolution is quite efficient for large kernel sizes, but for smaller ones PyTorch implements a faster algorithm, i.e., Winograd, [22], that can be up to 2.5 times faster." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.51, + 0.356, + 0.526 + ], + "angle": 0, + "content": "5.2. Training memory requirements" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.533, + 0.47, + 0.835 + ], + "angle": 0, + "content": "The training and inference memory requirements of the various models (measured as described in Section 4.1) are reported in Figure 4 as a function of the model size. The results of the theoretical analysis reported in Table 1 suggest that the training memory requirements always have a term linear in the number of channels \\( c \\) (usually the activations from the forward pass), as well as a term quadratic in \\( c \\) (usually the weights and all transformations applied to the weights during the forward pass). This behavior can also be observed from Figure 4. For some of the models, the memory required approximately doubles from one model size to the next one, just like the width. This means that the linear term dominates (for those sizes), which makes those models relatively cheap to scale up. For the BCOP, LOT, and Cayley methods, the larger coefficients in the \\( c^2 \\) term (for LOT and Cayley the coefficient is even dependent on the input size, \\( s^2 \\)) cause this term to dominate. This makes it much harder to scale those methods to more parameters. Method LOT requires huge amounts of memory, in particular LOT-L is too large to fit in 80GB GPU memory." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.841, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Note that at test time, the memory requirements are much lower, because the intermediate activation values do not need to be stored, as there is no backward pass. Therefore, at inference time, most methods require a very similar" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.092, + 0.889, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.248, + 0.888, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.412, + 0.893, + 0.44 + ], + "angle": 0, + "content": "Figure 4. Memory required at training and inference time for input size \\(32 \\times 32\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.467, + 0.892, + 0.529 + ], + "angle": 0, + "content": "amount of memory as a standard convolution. The Cayley and LOT methods require more memory since they perform the calculation in the Fourier space, creating an intermediate representation of the weight matrices of size \\(\\mathcal{O}(s^2 c^2)\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.538, + 0.731, + 0.554 + ], + "angle": 0, + "content": "5.3. Certified robust accuracy" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.561, + 0.893, + 0.879 + ], + "angle": 0, + "content": "The results related to the accuracy and the certified robust accuracy for the different methods, model sizes, and datasets measured on a 24h training budget are summarized in Table 2. The differences among the various model sizes are also highlighted in Figure 6 by reporting the sorted values of the certified robust accuracy. Further tables and plots relative to different training budgets can be found in Appendix G. The reader can compare our results with the state-of-the-art certified robust accuracy summarized in Appendix D. However, it is worth noting that, to reach state-of-the-art performance, authors often carry out experiments using large model sizes and long training times, which makes it hard to compare the methods themselves. On the other hand, the evaluation proposed in this paper allows a fairer comparison among the different methods, since it also considers timing and memory aspects. This restriction based on time, rather than the number of epochs, ensures that merely enlarging the model size does not lead to improved performance, as bigger models typically process fewer epochs of data. Indeed, in our results in Figure 6 it is usually the M (and not the L) model that performs best." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.886, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Experiments show that SOC performs best, reaching the" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "24580" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.47, + 0.118 + ], + "angle": 0, + "content": "Table 2. Certified robust accuracy for radius \\(\\epsilon = 36 / 255\\) on the evaluated datasets. Training is performed for 24 hours." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.129, + 0.467, + 0.667 + ], + "angle": 0, + "content": "
MethodsAccuracy [%]Robust Accuracy [%]
XSSMLXSSML
CIFAR-10
AOL71.773.673.473.759.160.861.061.5
BCOP71.773.174.074.658.559.360.561.5
CPL74.976.176.676.862.564.265.165.2
Cayley73.174.274.473.659.561.161.060.1
LOT75.576.672.0-63.464.658.7-
SLL73.774.275.374.361.062.062.862.3
SOC74.175.076.976.961.362.966.365.4
CIFAR-100
AOL40.343.444.341.927.931.031.429.7
BCOP41.442.843.742.228.430.131.229.2
CPL42.3-45.244.330.1-33.232.1
Cayley42.343.943.542.929.230.530.529.5
LOT43.545.242.8-30.832.529.6-
SLL41.442.842.442.128.930.529.929.6
SOC43.145.247.346.230.632.634.933.5
Tiny ImageNet
AOL26.629.330.330.018.119.721.020.6
BCOP22.426.227.627.013.816.917.216.8
CPL28.329.329.830.318.919.720.320.1
Cayley27.829.630.127.217.919.519.316.7
LOT30.732.528.8-20.821.918.1-
SLL25.127.026.527.916.618.417.718.8
SOC28.928.832.132.118.918.821.221.1
Imagenette
AOL80.883.782.876.879.978.5
BCOP81.284.59.875.680.19.8
CPL85.586.586.480.882.482.3
Cayley81.277.9-75.871.7-
SLL80.883.479.375.478.072.8
SOC80.683.679.074.778.473.5
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.704, + 0.471, + 0.901 + ], + "angle": 0, + "content": "highest certified robust accuracy on two datasets. CPL models consistently rank in top-10 position among the three datasets. LOT performed well, in particular on Tiny ImageNet dataset where it performs the best. AOL did not reach high accuracy on CIFAR-10, but reached more competitive results on Tiny ImageNet. An opposite effect can be observed for SLL, which performance seems to strongly depend on the number of classes. BCOP only reach the top-10 once, while Cayley is consistently outperformed by the other methods. The very same analysis can be applied to the clean accuracy, whose sorted bar-plots are reported in Appendix G, where the main difference is that Cayley performs slightly better for that metric. Furthermore, it is worth" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.227 + ], + "angle": 0, + "content": "highlighting that CPL is sensitive to weight initialization. We faced numerical errors during the 10h and 24h training of the small model on CIFAR-100. On Imagenette, CPL clearly performs best, followed by BCOP and AOL. Note that these methods all construct a kernel so that the convolution is 1-Lipschitz. This seems to be good strategy for higher resolution datasets. E.g. SOC, that instead applies multiple convolutions has a drop in performs compared to other datasets." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.254, + 0.747, + 0.269 + ], + "angle": 0, + "content": "5.3.1 Interpretation of the results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.279, + 0.892, + 0.402 + ], + "angle": 0, + "content": "We confirm empirically what suspected in [46]: layers that naturally include a skip connections (CPL, SLL, SOC) generally perform better than layers that do not have this ability. Furthermore, we noticed that layers with an identity initialization (AOL, LOT) perform better than layers that do neither (BCOP, Cayley). Presumably this is due to the MaxMin activation reducing the variance in the forward pass when alternated with non-identity layers." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.412, + 0.892, + 0.548 + ], + "angle": 0, + "content": "Our results also allow ruling out some other possible explanation: one might suspect that pure contractive layers (AOL, CPL, and SLL) would suffer from vanishing gradients, differently from orthogonal ones, however, our experiments do not show any evidence of this fact. Furthermore, one might suspect that slower methods perform worse, because they allow fewer epochs for a given time budget, however, our experiments do not support this fact; two relative slow methods (SOC, LOT) are among the best ones." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.576, + 0.757, + 0.591 + ], + "angle": 0, + "content": "6. Conclusions and Guidelines" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.603, + 0.892, + 0.709 + ], + "angle": 0, + "content": "This work presented a comparative study of state-of-the-art 1-Lipschitz layers under the lens of different metrics, such as time and memory requirements, accuracy, and certified robust accuracy, all evaluated at training and inference time. A theoretical comparison of the methods in terms of time and memory complexity was also presented and validated by experiments." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Taking all metrics into account (summarized in Figure 1), the results are in favor of CPL, due to its highest performance and lower consumption of computational resources. When large computational resources are available and the application does not impose stringent timing constraints during inference and training, the SOC layer could be used, due to its slightly better performance. Finally, those applications in which the inference time is crucial may take advantage of AOL or BCOP, which do not introduce additional runtime overhead (during inference) compared to a standard convolution. For higher resolution images, it also seems that CPL is the most promising method." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "24581" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.169 + ], + "angle": 0, + "content": "[1] Thomas Altstidl, David Dobre, Björn Eskofier, Gauthier Gidel, and Leo Schwinn. Raising the bar for certified adversarial robustness with diffusion models. arXiv preprint arXiv:2305.10388, 2023. 14, 22" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.17, + 0.47, + 0.21 + ], + "angle": 0, + "content": "[2] Cem Anil, James Lucas, and Roger Grosse. Sorting out Lipschitz function approximation. In International Conference on Machine Learning (ICML), 2019. 2, 3, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.21, + 0.471, + 0.264 + ], + "angle": 0, + "content": "[3] Alexandre Araujo, Aaron J Havens, Blaise Delattre, Alexandre Allauzen, and Bin Hu. A unified algebraic perspective on Lipschitz neural networks. In International Conference on Learning Representations (ICLR), 2023. 4, 14, 22" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.265, + 0.471, + 0.331 + ], + "angle": 0, + "content": "[4] Battista Biggio, Igino Corona, Davide Maiorca, Blaine Nelson, Nedim Šrndić, Pavel Laskov, Giorgio Giacinto, and Fabio Roli. Evasion attacks against machine learning at test time. In Machine Learning and Knowledge Discovery in Databases, 2013. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.332, + 0.471, + 0.372 + ], + "angle": 0, + "content": "[5] Å. Björck and C. Bowie. An iterative algorithm for computing the best estimate of an orthogonal matrix. SIAM Journal on Numerical Analysis, 1971. 3, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.372, + 0.471, + 0.427 + ], + "angle": 0, + "content": "[6] Fabio Brau, Giulio Rossolini, Alessandro Biondi, and Giorgio Buttazzo. Robust-by-design classification via unitary-gradient neural networks. Proceedings of the AAAI Conference on Artificial Intelligence, 2023. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.427, + 0.471, + 0.481 + ], + "angle": 0, + "content": "[7] Nicholas Carlini, Florian Tramer, Krishnamurthy Dj Dvijotham, Leslie Rice, Mingjie Sun, and J Zico Kolter. (Certified!!) adversarial robustness for free! In International Conference on Learning Representations (ICLR), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.481, + 0.471, + 0.535 + ], + "angle": 0, + "content": "[8] Arthur Cayley. About the algebraic structure of the orthogonal group and the other classical groups in a field of characteristic zero or a prime characteristic. Journal für die reine und angewandte Mathematik, 1846. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.535, + 0.471, + 0.589 + ], + "angle": 0, + "content": "[9] Moustapha Cisse, Piotr Bojanowski, Edouard Grave, Yann Dauphin, and Nicolas Usunier. Parseval networks: Improving robustness to adversarial examples. In International conference on machine learning, 2017. 2, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.589, + 0.471, + 0.643 + ], + "angle": 0, + "content": "[10] Jeremy Cohen, Elan Rosenfeld, and Zico Kolter. Certified adversarial robustness via randomized smoothing. In Proceedings of the 36th International Conference on Machine Learning, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.643, + 0.471, + 0.711 + ], + "angle": 0, + "content": "[11] Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops, 2020. 5, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.711, + 0.471, + 0.765 + ], + "angle": 0, + "content": "[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Conference on Computer Vision and Pattern Recognition (CVPR), 2009. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.765, + 0.471, + 0.806 + ], + "angle": 0, + "content": "[13] Farzan Farnia, Jesse Zhang, and David Tse. Generalizable adversarial training via spectral normalization. In International Conference on Learning Representations, 2018. 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.806, + 0.471, + 0.845 + ], + "angle": 0, + "content": "[14] Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. stat, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.845, + 0.471, + 0.9 + ], + "angle": 0, + "content": "[15] Emiel Hoogeboom, Victor Garcia Satorras, Jakub Tomczak, and Max Welling. The convolution exponential and generalized Sylvester flows. In Advances in Neural Information Processing Systems, 2020. 3" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.471, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "[16] Jeremy Howard. Imagenette. https://github.com/fastai/imagenette/. Accessed: 01.02.2024. 5, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.12, + 0.892, + 0.174 + ], + "angle": 0, + "content": "[17] Kai Hu, Klas Leino, Zifan Wang, and Matt Fredrikson. Effectively leveraging capacity for improved deterministic robustness certification. In International Conference on Learning Representations (ICLR), 2024. 14, 22" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.174, + 0.892, + 0.228 + ], + "angle": 0, + "content": "[18] Kai Hu, Andy Zou, Zifan Wang, Klas Leino, and Matt Fredrikson. Unlocking deterministic robustness certification on imagenet. Conference on Neural Information Processing Systems (NeurIPS), 2024. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.228, + 0.892, + 0.282 + ], + "angle": 0, + "content": "[19] Lei Huang, Li Liu, Fan Zhu, Diwen Wan, Zehuan Yuan, Bo Li, and Ling Shao. Controllable orthogonalization in training DNNs. In Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3, 11, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.282, + 0.892, + 0.337 + ], + "angle": 0, + "content": "[20] Guy Katz, Clark Barrett, David L Dill, Kyle Julian, and Mykel J Kochenderfer. Reluplex: An efficient SMT solver for verifying deep neural networks. In International conference on computer aided verification, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.336, + 0.892, + 0.363 + ], + "angle": 0, + "content": "[21] Alex Krizhevsky. Learning multiple layers of features from tiny images. Technical report, 2009. 5, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.363, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[22] Andrew Lavin and Scott Gray. Fast algorithms for convolutional neural networks. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.403, + 0.892, + 0.43 + ], + "angle": 0, + "content": "[23] Ya Le and Xuan Yang. Tiny imagenet visual recognition challenge. CS 231N, 2015. 5, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.43, + 0.892, + 0.469 + ], + "angle": 0, + "content": "[24] Klas Leino, Zifan Wang, and Matt Fredrikson. Globally-robust neural networks. In International Conference on Machine Learning, 2021. 2, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.469, + 0.892, + 0.537 + ], + "angle": 0, + "content": "[25] Mario Lezcano-Casado and David Martínez-Rubio. Cheap orthogonal constraints in neural networks: A simple parametrization of the orthogonal and unitary group. In International Conference on Machine Learning (ICML), 2019. 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.537, + 0.892, + 0.578 + ], + "angle": 0, + "content": "[26] Linyi Li, Tao Xie, and Bo Li. Sok: Certified robustness for deep neural networks. In 2023 IEEE Symposium on Security and Privacy (SP), 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.578, + 0.892, + 0.647 + ], + "angle": 0, + "content": "[27] Qiyang Li, Saminul Haque, Cem Anil, James Lucas, Roger B Grosse, and Joern-Henrik Jacobsen. Preventing gradient attenuation in Lipschitz constrained convolutional networks. In Conference on Neural Information Processing Systems (NeurIPS), 2019. 2, 3, 11, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.647, + 0.892, + 0.686 + ], + "angle": 0, + "content": "[28] Shuai Li, Kui Jia, Yuxin Wen, Tongliang Liu, and Dacheng Tao. Orthogonal deep neural networks. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.686, + 0.892, + 0.727 + ], + "angle": 0, + "content": "[29] Max Losch, David Stutz, Bernt Schiele, and Mario Fritz. Certified robust models with slack control and large Lipschitz constants. arXiv preprint arXiv:2309.06166, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.727, + 0.892, + 0.781 + ], + "angle": 0, + "content": "[30] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In International Conference on Learning Representations (ICLR), 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.781, + 0.892, + 0.835 + ], + "angle": 0, + "content": "[31] Laurent Meunier, Blaise J Delattre, Alexandre Araujo, and Alexandre Allauzen. A dynamical system perspective for Lipschitz neural networks. In International Conference on Machine Learning (ICML), 2022. 3, 11, 14, 22" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.835, + 0.892, + 0.889 + ], + "angle": 0, + "content": "[32] Takeru Miyato, Toshiki Kataoka, Masanori Koyama, and Yuichi Yoshida. Spectral normalization for generative adversarial networks. In International Conference on Learning Representations (ICLR), 2018. 3, 11" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.889 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "24582" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.217 + ], + "angle": 0, + "content": "[33] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In Conference on Neural Information Processing Systems (NeurIPS). 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.217, + 0.47, + 0.271 + ], + "angle": 0, + "content": "[34] Bernd Prach and Christoph H Lampert. Almost-orthogonal layers for efficient general-purpose Lipschitz networks. In European Conference on Computer Vision (ECCV), 2022. 2, 3, 5, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.271, + 0.47, + 0.324 + ], + "angle": 0, + "content": "[35] S Singla and S Feizi. Fantastic four: Differentiable bounds on singular values of convolution layers. In International Conference on Learning Representations (ICLR), 2021. 3, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.325, + 0.469, + 0.365 + ], + "angle": 0, + "content": "[36] Sahil Singla and Soheil Feizi. Skew orthogonal convolutions. In International Conference on Machine Learning (ICML), 2021. 2, 3, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.365, + 0.469, + 0.405 + ], + "angle": 0, + "content": "[37] Sahil Singla and Soheil Feizi. Improved techniques for deterministic 12 robustness. Conference on Neural Information Processing Systems (NeurIPS), 2022. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.405, + 0.469, + 0.46 + ], + "angle": 0, + "content": "[38] Leslie N Smith and Nicholay Topin. Super-convergence: Very fast training of neural networks using large learning rates. In Artificial intelligence and machine learning for multi-domain operations applications, 2019. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.46, + 0.469, + 0.514 + ], + "angle": 0, + "content": "[39] Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In International Conference on Learning Representations (ICLR), 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.514, + 0.469, + 0.567 + ], + "angle": 0, + "content": "[40] Asher Trockman and J Zico Kolter. Orthogonalizing convolutional layers with the Cayley transform. In International Conference on Learning Representations (ICLR), 2021. 2, 3, 5, 14, 23" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.568, + 0.469, + 0.622 + ], + "angle": 0, + "content": "[41] Yusuke Tsuzuku, Issei Sato, and Masashi Sugiyama. Lipschitz-margin training: Scalable certification of perturbation invariance for deep neural networks. Conference on Neural Information Processing Systems (NeurIPS), 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.622, + 0.469, + 0.662 + ], + "angle": 0, + "content": "[42] Ruigang Wang and Ian Manchester. Direct parameterization of Lipschitz-bounded deep networks. In International Conference on Machine Learning (ICML), 2023. 3, 11, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.662, + 0.469, + 0.73 + ], + "angle": 0, + "content": "[43] Lily Weng, Huan Zhang, Hongge Chen, Zhao Song, Chojui Hsieh, Luca Daniel, Duane Boning, and Inderjit Dhillon. Towards fast computation of certified robustness for relu networks. In International Conference on Machine Learning (ICML), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.73, + 0.469, + 0.783 + ], + "angle": 0, + "content": "[44] Eric Wong and Zico Kolter. Provable defenses against adversarial examples via the convex outer adversarial polytope. In International Conference on Machine Learning (ICML), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.784, + 0.469, + 0.865 + ], + "angle": 0, + "content": "[45] Lechao Xiao, Yasaman Bahri, Jascha Sohl-Dickstein, Samuel Schoenholz, and Jeffrey Pennington. Dynamical isometry and a mean field theory of CNNs: How to train 10,000-layer vanilla convolutional neural networks. In International Conference on Machine Learning (ICML), 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.866, + 0.469, + 0.893 + ], + "angle": 0, + "content": "[46] Xiaojun Xu, Linyi Li, and Bo Li. Lot: Layer-wise orthogonal training on improving 12 certified robustness. Conference on" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.893 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.119 + ], + "angle": 0, + "content": "Neural Information Processing Systems (NeurIPS), 2022. 2, 3, 8, 14, 23" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.12, + 0.894, + 0.173 + ], + "angle": 0, + "content": "[47] Tan Yu, Jun Li, Yunfeng Cai, and Ping Li. Constructing orthogonal convolutions in an explicit manner. In International Conference on Learning Representations (ICLR), 2021. 3, 11, 12" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "24583" + } + ] +] \ No newline at end of file diff --git a/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_origin.pdf b/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..aadddb7e96c3ea36dbc8e3209f93a85b1bba88f2 --- /dev/null +++ b/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52a23d2d9ef5277b4ae4843ca0b046cf3f854f103b0222acfd0e07579881958d +size 358709 diff --git a/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/full.md b/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/full.md new file mode 100644 index 0000000000000000000000000000000000000000..20058918a5e4a7fdf160b4bd1c5b764164a43fd5 --- /dev/null +++ b/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/full.md @@ -0,0 +1,302 @@ +# 1-Lipschitz Layers Compared: Memory, Speed, and Certifiable Robustness + +Bernd Prach, $^{1,*}$ Fabio Brau, $^{2,*}$ Giorgio Buttazzo, $^{2}$ Christoph H. Lampert $^{1}$ $^{1}$ ISTA, Klosterneuburg, Austria + $^{2}$ Scuola Superiore Sant'Anna, Pisa, Italy + +{bprach, chl}@ist.ac.at, {fabio.brau, giorgio.butazzo}@santannapisa.it + +# Abstract + +The robustness of neural networks against input perturbations with bounded magnitude represents a serious concern in the deployment of deep learning models in safety-critical systems. Recently, the scientific community has focused on enhancing certifiable robustness guarantees by crafting 1-Lipschitz neural networks that leverage Lipschitz bounded dense and convolutional layers. Different methods have been proposed in the literature to achieve this goal, however, comparing the performance of such methods is not straightforward, since different metrics can be relevant (e.g., training time, memory usage, accuracy, certifiable robustness) for different applications. Therefore, this work provides a thorough comparison between different methods, covering theoretical aspects such as computational complexity and memory requirements, as well as empirical measurements of time per epoch, required memory, accuracy and certifiable robust accuracy. The paper also provides some guidelines and recommendations to support the user in selecting the methods that work best depending on the available resources. We provide code at github.com/berndprach/lLipschitzLayersCompared. + +# 1. Introduction + +Modern artificial neural networks achieve high accuracy and sometimes superhuman performance in many different tasks, but it is widely recognized that they are not robust to tiny and imperceptible input perturbations [4, 39] that, if properly crafted, can cause a model to produce the wrong output. Such inputs, known as Adversarial Examples, represent a serious concern for the deployment of machine learning models in safety-critical systems [26]. To overcome this issue, adversarial training has been proposed in + +![](images/69a05b6ec4757e9c0f0d25d880c87173ad5c33a6c11a21abaab965be18a82987.jpg) + +![](images/0380e230b8fd8a86f4be4561be7161ea42f27cb9c0adba33a8e941b2ad86d467.jpg) + +![](images/8d5e5a868592dbfae08c67f66da1cfd8fca757ca127a7cbf23475339ed25ddca.jpg) + +![](images/911b46d71b8d81a9ae74632e144c531a6ae5102ddf35953e6ce0f6141de61705.jpg) + +![](images/ca9b87b573f226fc3fcf6fe66c7dfe4f183453a8207d4eee723ae1d31af61d04.jpg) + +![](images/7a405ab227722501cfae03f134934faea1e665de1516715e62f2d0d11d91bd20.jpg) + +![](images/9cf0b86ff8d8734c58196e37a75ef5a3075410825a59d662e572fc060a967e74.jpg) +Figure 1. Evaluation of 1-Lipschitz methods on different metrics. Scores are assigned from 1 (worst) to 5 (best) to every method based on the results reported in Sections 3 and 5. + +
Legend
RARobust Accuracy
AAccuracy
TTTraining Time
ITInference Time
TMTrain Memory
IMInference Memory
+ +provide any guarantees of robustness. + +However, for many applications a guarantee of robustness is desired. Roughly speaking, a model $f$ is said to be $\varepsilon$ -robust for a given input $x$ if no perturbation of magnitude bounded by $\varepsilon$ can change its prediction. Recently, in the context of image classification, various approaches have been proposed to achieve certifiable robustness, including Verification, Randomized Smoothing, and Lipschitz Bounded Neural Networks. + +Verification strategies aim to establish, for any given model, whether all samples contained in a $l_{2}$ -ball with radius $\varepsilon$ and centered in the tested input $x$ are classified with the same class as $x$ . In the exact formulation, verification strategies involve the solution of an NP-hard problem [20]. Nevertheless, even in a relaxed formulation, [44], these strategies require a huge computational effort [43]. + +Randomized smoothing strategies, initially presented in [10], represent an effective way of crafting a certifiable-robust classifier $g$ based on a base classifier $f$ . If combined with an additional denoising step, they can achieve state-of-the-art levels of robustness, [7]. However, since they require multiple evaluations of the base model (up to 100k evaluations) for the classification of a single input, they cannot be used for real-time applications. + +Finally, Lipschitz Bounded Neural Networks [6, 9, 24, 27, 29, 34, 40] represent a valid alternative to produce certifiable classifiers, since they only require a single forward pass of the model at inference time to deduce guarantees of robustness. Indeed, for such models, a lower-bound of the minimal adversarial perturbation capable of fooling the classifier can be evaluated by considering the difference between the two largest class scores predicted by the model. + +Lipschitz-bounded neural networks can be obtained by the composition of 1-Lipschitz layers [2]. The process of parameterizing 1-Lipschitz layers is fairly straightforward for fully connected layers. However, for convolutions — with overlapping kernels — deducing an effective parameterization is a hard problem. Indeed, the Lipschitz condition can be essentially thought of as a condition on the Jacobian of the layer. However, the Jacobian matrix can not be efficiently computed. + +In order to avoid the explicit computation of the Jacobian, various methods have been proposed, including parameterizations that cause the Jacobian to be (very close to) orthogonal [27, 36, 40, 46] and methods that rely on an upper bound on the Jacobian instead [34]. Those different methods differ drastically in training and validation requirements (in particular time and memory) as well as empirical performance. Furthermore, increasing training time or model sizes very often also increases the empirical performance. This makes it hard to judge from the existing + +literature which methods are the most promising. This becomes even worse when working with specific computation requirements, such as restrictions on the available memory. In this case, it is important to choose the method that better suits the characteristics of the system in terms of evaluation time, memory usage as well and certifiable-robust-accuracy. + +This work aims at giving a comprehensive comparison of different strategies for crafting 1-Lipschitz layers from both a theoretical and practical perspective. For the sake of fairness, we consider several metrics such as Time and Memory requirements for both training and inference, Accuracy, as well as Certified Robust Accuracy. The main contributions are the following: + +- An empirical comparison of 1-Lipschitz layers based on six different metrics, and four different datasets on four architecture sizes with three time constraints. +- A theoretical comparison of the runtime complexity and the memory usage of existing methods. +- A review of the most recent methods in the literature, including implementations with a revised code that we will release publicly for other researchers to build on. + +# 2. Existing Works and Background + +In recent years, various methods have been proposed for creating artificial neural networks with a bounded Lipschitz constant. The Lipschitz constant of a function $f: \mathbb{R}^n \to \mathbb{R}^m$ with respect to the $l_2$ norm is the smallest $L$ such that for all $x, y \in \mathbb{R}^n$ + +$$ +\left\| f (x) - f (y) \right\| _ {2} \leq L \| x - y \| _ {2}. \tag {1} +$$ + +We also extend this definition to networks and layers, by considering the $l_{2}$ norms of the flattened input and output tensors in Equation (1). A layer is called 1-Lipschitz if its Lipschitz constant is at most 1. For linear layers, the Lipschitz constant is equal to the spectral norm of the weight matrix that is given as + +$$ +\| M \| _ {2} = \sup _ {\mathbf {v} \neq 0} \frac {\| M \mathbf {v} \| _ {2}}{\| \mathbf {v} \| _ {2}}. \tag {2} +$$ + +A particular class of linear 1-Lipschitz layers are ones with an orthogonal Jacobian matrix. The Jacobian matrix of a layer is the matrix of partial derivatives of the flattened outputs with respect to the flattened inputs. A matrix $M$ is orthogonal if $MM^{\top} = I$ , where $I$ is the identity matrix. For layers with an orthogonal Jacobian, Equation (1) always holds with equality and, because of this, a lot of methods aim at constructing such 1-Lipschitz layers. + +All the neural networks analyzed in this paper consist of 1-Lipschitz parameterized layers and 1-Lipschitz activation + +functions, with no skip connections and no batch normalization. Even though the commonly used ReLU activation function is 1-Lipschitz, Anil et al. [2] showed that it reduces the expressive capability of the model. Hence, we adopt the MaxMin activation proposed by the authors and commonly used in 1-Lipschitz models. Concatenations of 1-Lipschitz functions are 1-Lipschitz, so the networks analyzed are 1-Lipschitz by construction. + +# 2.1. Parameterized 1-Lipschitz Layers + +This section provides an overview of the existing methods for providing 1-Lipschitz layers. We discuss fundamental methods for estimating the spectral norms of linear and convolutional layers, i.e. Power Method [32] and Fantistica4 [35], and for crafting orthogonal matrices, i.e. Bjorck & Bowie [5], in Appendix A. The rest of this section describes 7 methods from the literature that construct 1-Lipschitz convolutions: BCOP, Cayley, SOC, AOL, LOT, CPL, and SLL. Further 1-Lipschitz methods, [19, 42, 47], and the reasons why they were not included in our main comparison can be found in Appendix B. + +BCOP Block Orthogonal Convolution Parameterization (BCOP) was introduced by Li et al. in [27] to extend a previous work by Xiao et al. [45] that focused on the importance of orthogonal initialization of the weights. For a $k \times k$ convolution, BCOP uses a set of $(2k - 1)$ parameter matrices. Each of these matrices is orthogonalized using the algorithm by Bjorck & Bowie [5] (see also Appendix A). Then, a $k \times k$ kernel is constructed from those matrices in a way that guarantees that the resulting layer is orthogonal. + +Cayley Another family of orthogonal convolutional and fully connected layers has been proposed by Trockman and Kolter [40] by leveraging the Cayley Transform [8], which maps a skew-symmetric matrix $A$ into an orthogonal matrix $Q$ using the relation + +$$ +Q = (I - A) (I + A) ^ {- 1}. \tag {3} +$$ + +The transformation can be used to parameterize orthogonal weight matrices for linear layers in a straightforward way. For convolutions, the authors make use of the fact that circular padded convolutions are vector-matrix products in the Fourier domain. As long as all those vector-matrix products have orthogonal matrices, the full convolution will have an orthogonal Jacobian. For Cayley Convolutions, those matrices are orthogonalized using the Cayley transform. + +SOC Skew Orthogonal Convolution is an orthogonal convolutional layer presented by Singla et al. [36], obtained by leveraging the exponential convolution [15]. Analogously + +to the matrix case, given a kernel $L\in \mathbb{R}^{c\times c\times k\times k}$ , the exponential convolution can be defined as + +$$ +\exp (L) (x) := x + \frac {L \star x}{1} + \frac {L \star^ {2} x}{2 !} + \dots + \frac {L \star^ {k} x}{k !} + \dots , \tag {4} +$$ + +where $\star^k$ denotes a convolution applied $k$ -times. The authors proved that any exponential convolution has an orthogonal Jacobian matrix as long as $L$ is skew-symmetric, providing a way of parameterizing 1-Lipschitz layers. In their work, the sum of the infinite series is approximated by computing only the first 5 terms during training and the first 12 terms during the inference, and $L$ is normalized to have unitary spectral norm following the method presented in [35] (see Appendix A). + +AOL Prach and Lampert [34] introduced Almost Orthogonal Lipschitz (AOL) layers. For any matrix $P$ , they defined a diagonal rescaling matrix $D$ with + +$$ +D _ {i i} = \left(\sum_ {j} \left| P ^ {\top} P \right| _ {i j}\right) ^ {- 1 / 2} \tag {5} +$$ + +and proved that the spectral norm of $PD$ is bounded by 1. This result was used to show that the linear layer given by $l(x) = PDx + b$ (where $P$ is the learnable matrix and $D$ is given by Eq. (5)) is 1-Lipschitz. Furthermore, the authors extended the idea so that it can also be efficiently applied to convolutions. This is done by calculating the rescaling in Equation (5) with the Jacobian $J$ of a convolution instead of $P$ . In order to evaluate it efficiently the authors express the elements of $J^{\top}J$ explicitly in terms of the kernel values. + +LOT The layer presented by Xu et al. [46] extends the idea of [19] to use the Inverse Square Root of a matrix in order to orthogonalize it. Indeed, for any matrix $V$ , the matrix $Q = V(V^T V)^{-\frac{1}{2}}$ is orthogonal. Similarly to the Cayley method, for the layer-wise orthogonal training (LOT) the convolution is applied in the Fourier frequency domain. To find the inverse square root, the authors relay on an iterative Newton Method. In details, defining $Y_0 = V^T V$ , $Z_0 = I$ , and + +$$ +Y _ {i + 1} = \frac {1}{2} Y _ {i} \left(3 I - Z _ {i} Y _ {i}\right), Z _ {i + 1} = \frac {1}{2} \left(3 I - Z _ {i} Y _ {i}\right) Z _ {i}, \tag {6} +$$ + +it can be shown that $Y_{i}$ converges to $(V^{T}V)^{-\frac{1}{2}}$ . In their proposed layer, the authors apply 10 iterations of the method for both training and evaluation. + +CPL Meunier et al. [31] proposed the Convex Potential Layer. Given a non-decreasing 1-Lipschitz function $\sigma$ (usually ReLU), the layer is constructed as + +$$ +l (x) = x - \frac {2}{\| W \| _ {2} ^ {2}} W ^ {\top} \sigma (W x + b), \tag {7} +$$ + +which is 1-Lipschitz by design. The spectral norm required to calculate $l(x)$ is approximated using the power method (see Appendix A). + +SLL The SDP-based Lipschitz Layers (SLL) proposed by Araujo et al. [3] combine the CPL layer with the upper bound on the spectral norm from AOL. The layer can be written as + +$$ +l (x) = x - 2 W ^ {\top} Q ^ {- 2} D ^ {2} \sigma (W x + b), \tag {8} +$$ + +where $Q$ is a learnable diagonal matrix with positive entries and $D$ is deduced by applying Equation (5) to $P = W^{\top}Q^{-1}$ . + +Remark 1. Both CPL and SLL are non-linear by construction, so they can be used to construct a network without any further use of activation functions. However, carrying out some preliminary experiments, we empirically found that alternating CPL (and SLL) layers with MaxMin activation layers allows achieving a better performance. + +# 3. Theoretical Comparison + +As illustrated in the last section, various ideas and methods have been proposed to parameterize 1-Lipschitz layers. This causes the different methods to have very different properties and requirements. This section aims at highlighting the properties of the different algorithms, focusing on the algorithmic complexity and the required memory. + +Table 1 provides an overview of the computational complexity and memory requirements for the different layers considered in the previous section. For the sake of clarity, the analysis is performed by considering separately the transformations applied to the input of the layers and those applied to the weights to ensure the 1-Lipschitz constraint. Each of the two sides of the table contains three columns: i) Operations contains the most costly transformations applied to the input as well as to the parameters of different layers; ii) MACS reports the computational complexity expressed in multiply-accumulate operations (MACS) involved in the transformations (only leading terms are presented); iii) Memory reports the memory required by the transformation during the training phase. + +At training time, both input and weight transformations are required, thus the training complexity of the forward pass can be computed as the sum of the two corresponding MACS columns of the table. Similarly, the training memory requirements can be computed as the sum of the two corresponding Memory columns of the table. For the considered operations, the cost of the backward pass during training has the same computational complexity as the forward pass, and + +therefore increases the overall complexity by a constant factor. At inference time, all the parameter transformations can be computed just once and cached afterward. Therefore, the inference complexity is equal to the complexity due to the input transformation (column 3 in the table). At inference time, the intermediate variables are not stored in memory, hence, the memory requirements are much lower than during training. The values cannot directly be inferred from Table 1, we reported them separately in Appendix C.1. + +Note that all the terms reported in Table 1 depend on the batch size $b$ , the input size $s \times s \times c$ , the number of inner iterations of a method $t$ , and the kernel size $k \times k$ . (Often, $t$ is different at training and inference time.) For the sake of clarity, the MACS of a naive convolution implementation is denoted by $C$ ( $C = bs^2c^2k^2$ ), the number of inputs of a layer is denoted by $M$ ( $M = bs^2c$ ), and the size of the kernel of a standard convolution is denoted by $P$ ( $P = c^2k^2$ ). Only the leading terms of the computations are reported in Table 1. In order to simplify some terms, we assume that $c > \log_2(s)$ and that rescaling a tensor (by a scalar) as well as adding two tensors does not require any memory in order to do backpropagation. We also assume that each additional activation does require extra memory. All these assumptions have been verified to hold within PyTorch, [33]. Also, when the algorithm described in the paper and the version provided in the supplied code differed, we considered the algorithm implemented in the code. + +The transformations reported in the table are convolutions (CONV), Fast Fourier Transformations (FFT), matrix-vector multiplications (MV), matrix-matrix multiplications (MM), matrix inversions (INV), as well as applications of an activation function (ACT). The application of algorithms such as BJORCK & Bowie (BnB), power method, and Fantastic 4 (F4) is also reported (see Appendix A for descriptions). + +# 3.1. Analysis of the computational complexity + +It is worth noting that the complexity of the input transformations (in Table 1) is similar for all methods. This implies that a similar scaling behaviour is expected at inference time for the models. Cayley and LOT apply an FFT-based convolution and have computational complexity independent of the kernel size. CPL and SLL require two convolutions, which make them slightly more expensive at inference time. Notably, SOC requires multiple convolutions, making this method more expensive at inference time. + +At training time, parameter transformations need to be applied in addition to the input transformations during every forward pass. For SOC and CPL, the input transformations always dominate the parameter transformations in terms of computational complexity. This means the complexity scales like $c^2$ , just like a regular convolution, with a further factor of 2 and 5 respectively. All other methods + +Table 1. Computational complexity and memory requirements of different methods. We report multiply-accumulate operations (MACS) as well as memory requirements (per layer) for batch size $b$ , image size $s \times s \times c$ , kernel size $k \times k$ and number of inner iterations $t$ . We use $C = bs^2c^2k^2$ , $M = bs^2c$ and $P = c^2k^2$ . For a detailed explanation on what is reported see Section 3. For some explanation on how the entries of this table were derived, see Appendix C. + +
MethodInput TransformationsParameter Transformations
OperationsMACS O(·)MemoryOperationsMACS O(·)Memory O(·)
StandardCONVCM--P
AOLCONVCMCONVc3k45P
BCOPCONVCMBnB & MMsc3kt + c3k3c2kt + c2k3
CayleyFFTs & MVsbs2c25/2MFFTs & INVss2c33/2s2c2
CPLCONVs & ACT2C3Mpower methods2c2k2P + s2c
LOTFFTs & MVsbs2c23MFFTs & MMs4s2c3t4s2c2t
SLLCONVs & ACT2C3MCONVsc3k45P
SOCCONVsCt1Mt1F4c2k2t2P
+ +require parameter transformations that scale like $c^3$ , making them more expensive for larger architectures. In particular, we do expect Cayley and LOT to require long training times for larger models, since the complexity of their parameter transformations further depends on the input size. + +# 3.2. Analysis of the training memory requirements + +The memory requirements of the different layers are important, since they determine the maximum batch size and the type of models we can train on a particular infrastructure. At training time, typically all intermediate results are kept in memory to perform backpropagation. This includes intermediate results for both input and parameter transformations. The input transformations usually preserve the size, and therefore the memory required is usually of $\mathcal{O}(M)$ . Therefore, for the input transformations, all methods require memory not more than a constant factor worse than standard convolutions, with the worst method being SOC, with a constant $t_1$ , typically equal to 5. + +In addition to the input transformation, we also need to store intermediate results of the parameter transformations in memory in order to evaluate the gradients. Again, most methods approximately preserve the sizes during the parameter transformations, and therefore the memory required is usually of order $\mathcal{O}(P)$ . Exceptions to this rule are Cayley and LOT, with a larger $\mathcal{O}(s^2 c^2)$ term, as well as BCOP. + +# 4. Experimental Setup + +This section presents an experimental study aimed at comparing the performance of the considered layers with respect to different metrics. Before presenting the results, we first summarize the setup used in our experiments. For a detailed description see Appendix E. To have a fair and meaningful comparison among the various models, all the + +proposed layers have been evaluated using the same architecture, loss function, and optimizer. Since, according to the data reported in Table 1, different layers may have different throughput, to have a fair comparison with respect to the tested metrics, we limited the total training time instead of fixing the number of training epochs. Results are reported for training times of $2\mathrm{h}$ , $10\mathrm{h}$ , and $24\mathrm{h}$ on one A100 GPU. + +Our architecture is a standard convolutional network that doubles the number of channels whenever the resolution is reduced [6, 40]. For each method, we tested architectures of different sizes. We denoted them as XS, S, M and L, depending on the number of parameters, according to the criteria in Table 7, ranging from 1.5M to 100M parameters. + +Since different methods benefit from different learning rates and weight decay, for each setting (model size, method and dataset), we used the best values resulting from a random search performed on multiple training runs on a validation set composed of $10\%$ of the original training set. More specifically, 16 runs were performed for each configuration of randomly sampled hyperparameters, and we selected the configuration maximizing the certified robust accuracy w.r.t. $\epsilon = 36 / 255$ (see Appendix E.4 for details). + +The evaluation was carried out using four different datasets: CIFAR-10, CIFAR-100 [21], Tiny ImageNet [23], and Imagenette [16] for large images. Augmentation was used during the training (Random crops and flips on CIFAR-10 and CIFAR-100, RandAugment [11] on Tiny ImageNet, and random crop as well as RandAugment on Imagenette), details in Appendix E.5. We use the loss function proposed by [34], with same temperature 0.25, and where we tuned the margin to maximize the robust accuracy for $\epsilon = \frac{36}{255}$ . In detail, we considered a margin of $2\sqrt{2}\epsilon$ where + +the $\sqrt{2}$ factor comes from the $L_{2}$ norm [41], and the factor 2 has been added to help with generalization. + +# 4.1. Metrics + +All the considered models were evaluated based on three main metrics: the throughput, the required memory, and the certified robust accuracy. + +Throughput and epoch time The throughput of a model is the average number of examples that the model can process per second. It determines how many epochs are processed in a given time frame. The evaluation of the throughput was performed on an 80GB-A100-GPU based on the average time of 100 mini-batches. We measured the inference throughput with cached parameter transformations. + +Memory required Layers that require less memory allow for larger batch size, and the memory requirements also determine the type of hardware we can train a model on. For each model, we measured and reported the maximal GPU memory occupied by tensors using the function torch.cuda.max_memory_allocated() provided by the PyTorch framework. This is not exactly equal to the overall GPU memory requirement but gives a fairly good approximation of it. Note that the model memory measured in this way also includes additional memory required by the optimizer (e.g. to store the momentum term) as well as by the activation layers in the forward pass. However, this additional memory should be at most of order $\mathcal{O}(M + P)$ . As for the throughput, we evaluated and cached all calculations independent of the input at inference time. + +Certified robust accuracy In order to evaluate the performance of a 1-Lipschitz network, the standard metric is the certified robust accuracy. An input is classified certifiably robustly with radius $\epsilon$ by a model, if no perturbations of the input with norm bounded by $\epsilon$ can change the prediction of the model. Certified robust accuracy measures the proportion of examples that are classified correctly as well as certifiably robustly. For 1-Lipschitz models, a lower bound of the certified $\epsilon$ -robust accuracy is the portion of correctly classified inputs such that $\mathcal{M}_f(x_i, l_i) > \epsilon \sqrt{2}$ where the margin $\mathcal{M}_f(x, l)$ of a model $f$ at input $x$ with label $l$ , given as $\mathcal{M}_f(x, l) = f(x)_l - \max_{j \neq l} f_j(x)$ , is the difference between target class score and the highest score of a different class. For details, see [41]. + +# 5. Experimental Results + +This section presents the results of the comparison performed by applying the methodology discussed in Section 4. The results related to the different metrics are dis + +cussed in dedicated subsections and the key takeaways are summarized in the radar-plot illustrated in Figure 1. + +# 5.1. Training and inference times + +Figure 2 plots the training time per epoch of the different models as a function of their size, and Figure 3 plots the corresponding inference throughput for the various sizes as described in Section 4. As described in Table 5, the model base width, referred to as $w$ , is doubled from one model size to the next. We expect the training and inference time to scale with $w$ similarly to how individual layers scale with their number of channels, $c$ (in Table 1). This is because the width of each of the 5 blocks of our architecture is a constant multiple of the base width, $w$ . + +The training time increases (at most) about linearly with $w$ for standard convolutions, whereas the computational complexity of each single convolution scales like $c^2$ . This suggests that parallelism on the GPU and the overhead from other operations (activations, parameter updates, etc.) are important factors determining the training time. This also explains why CPL (doing two convolutions, with identical kernel parameters) is only slightly slower than a standard + +![](images/57d3c18451c1ea846b814cee2491a28c67e9a17297d4c0a67667e8c25ce058f9.jpg) +Figure 2. Training time per epoch (on CIFAR-10) for different methods and different model sizes. + +![](images/e182984c7e9505b9a017c7b606b0085ad930a7ceb922c7e77489e2d91a497b54.jpg) +Figure 3. Inference throughput for different methods as a function of their size for CIFAR-10 sizes input images. All parameter transformations have been evaluated and cached beforehand + +convolution, and SOC (doing 5 convolutions) is only about 3 times slower than the standard convolution. The AOL and SLL methods also require times comparable to a standard convolution for small models, although eventually, the $c^3$ term in the computation of the rescaling makes them slower for larger models. Finally, Cayley, LOT, and BCOP methods take much longer training times per epoch. For Cayley and LOT this behavior was expected, as they have a large $\mathcal{O}(s^2c^3)$ term in their computational complexity. See Table 1 for further details. + +At inference time transformations of the weights are cached, therefore some methods (AOL, BCOP) do not have any overhead compared to a standard convolution. As expected, other methods (CPL, SLL, and SOC) that apply additional convolutions to the input suffer from a corresponding overhead. Finally, Cayley and LOT have a slightly different throughput due to their FFT-based convolution. Among them, Cayley is about twice as fast because it involves a real-valued FFT rather than a complex-valued one. From Figure 3, it can be noted that cached Cayley and CPL have the same inference time, even though CPL uses twice the number of convolutions. We believe this is due to the fact that the conventional FFT-based convolution is quite efficient for large kernel sizes, but for smaller ones PyTorch implements a faster algorithm, i.e., Winograd, [22], that can be up to 2.5 times faster. + +# 5.2. Training memory requirements + +The training and inference memory requirements of the various models (measured as described in Section 4.1) are reported in Figure 4 as a function of the model size. The results of the theoretical analysis reported in Table 1 suggest that the training memory requirements always have a term linear in the number of channels $c$ (usually the activations from the forward pass), as well as a term quadratic in $c$ (usually the weights and all transformations applied to the weights during the forward pass). This behavior can also be observed from Figure 4. For some of the models, the memory required approximately doubles from one model size to the next one, just like the width. This means that the linear term dominates (for those sizes), which makes those models relatively cheap to scale up. For the BCOP, LOT, and Cayley methods, the larger coefficients in the $c^2$ term (for LOT and Cayley the coefficient is even dependent on the input size, $s^2$ ) cause this term to dominate. This makes it much harder to scale those methods to more parameters. Method LOT requires huge amounts of memory, in particular LOT-L is too large to fit in 80GB GPU memory. + +Note that at test time, the memory requirements are much lower, because the intermediate activation values do not need to be stored, as there is no backward pass. Therefore, at inference time, most methods require a very similar + +![](images/0bc1c9dba0306e02c68b9d7cd804a7f5810379f0550caa35bcec7add3e5d0843.jpg) + +![](images/91276288aaa8dec025d6135b4f23eb42d943ec060f40240a92d2d8d7c9b1f44f.jpg) +Figure 4. Memory required at training and inference time for input size $32 \times 32$ . + +amount of memory as a standard convolution. The Cayley and LOT methods require more memory since they perform the calculation in the Fourier space, creating an intermediate representation of the weight matrices of size $\mathcal{O}(s^2 c^2)$ . + +# 5.3. Certified robust accuracy + +The results related to the accuracy and the certified robust accuracy for the different methods, model sizes, and datasets measured on a 24h training budget are summarized in Table 2. The differences among the various model sizes are also highlighted in Figure 6 by reporting the sorted values of the certified robust accuracy. Further tables and plots relative to different training budgets can be found in Appendix G. The reader can compare our results with the state-of-the-art certified robust accuracy summarized in Appendix D. However, it is worth noting that, to reach state-of-the-art performance, authors often carry out experiments using large model sizes and long training times, which makes it hard to compare the methods themselves. On the other hand, the evaluation proposed in this paper allows a fairer comparison among the different methods, since it also considers timing and memory aspects. This restriction based on time, rather than the number of epochs, ensures that merely enlarging the model size does not lead to improved performance, as bigger models typically process fewer epochs of data. Indeed, in our results in Figure 6 it is usually the M (and not the L) model that performs best. + +Experiments show that SOC performs best, reaching the + +Table 2. Certified robust accuracy for radius $\epsilon = 36 / 255$ on the evaluated datasets. Training is performed for 24 hours. + +
MethodsAccuracy [%]Robust Accuracy [%]
XSSMLXSSML
CIFAR-10
AOL71.773.673.473.759.160.861.061.5
BCOP71.773.174.074.658.559.360.561.5
CPL74.976.176.676.862.564.265.165.2
Cayley73.174.274.473.659.561.161.060.1
LOT75.576.672.0-63.464.658.7-
SLL73.774.275.374.361.062.062.862.3
SOC74.175.076.976.961.362.966.365.4
CIFAR-100
AOL40.343.444.341.927.931.031.429.7
BCOP41.442.843.742.228.430.131.229.2
CPL42.3-45.244.330.1-33.232.1
Cayley42.343.943.542.929.230.530.529.5
LOT43.545.242.8-30.832.529.6-
SLL41.442.842.442.128.930.529.929.6
SOC43.145.247.346.230.632.634.933.5
Tiny ImageNet
AOL26.629.330.330.018.119.721.020.6
BCOP22.426.227.627.013.816.917.216.8
CPL28.329.329.830.318.919.720.320.1
Cayley27.829.630.127.217.919.519.316.7
LOT30.732.528.8-20.821.918.1-
SLL25.127.026.527.916.618.417.718.8
SOC28.928.832.132.118.918.821.221.1
Imagenette
AOL80.883.782.876.879.978.5
BCOP81.284.59.875.680.19.8
CPL85.586.586.480.882.482.3
Cayley81.277.9-75.871.7-
SLL80.883.479.375.478.072.8
SOC80.683.679.074.778.473.5
+ +highest certified robust accuracy on two datasets. CPL models consistently rank in top-10 position among the three datasets. LOT performed well, in particular on Tiny ImageNet dataset where it performs the best. AOL did not reach high accuracy on CIFAR-10, but reached more competitive results on Tiny ImageNet. An opposite effect can be observed for SLL, which performance seems to strongly depend on the number of classes. BCOP only reach the top-10 once, while Cayley is consistently outperformed by the other methods. The very same analysis can be applied to the clean accuracy, whose sorted bar-plots are reported in Appendix G, where the main difference is that Cayley performs slightly better for that metric. Furthermore, it is worth + +highlighting that CPL is sensitive to weight initialization. We faced numerical errors during the 10h and 24h training of the small model on CIFAR-100. On Imagenette, CPL clearly performs best, followed by BCOP and AOL. Note that these methods all construct a kernel so that the convolution is 1-Lipschitz. This seems to be good strategy for higher resolution datasets. E.g. SOC, that instead applies multiple convolutions has a drop in performs compared to other datasets. + +# 5.3.1 Interpretation of the results + +We confirm empirically what suspected in [46]: layers that naturally include a skip connections (CPL, SLL, SOC) generally perform better than layers that do not have this ability. Furthermore, we noticed that layers with an identity initialization (AOL, LOT) perform better than layers that do neither (BCOP, Cayley). Presumably this is due to the MaxMin activation reducing the variance in the forward pass when alternated with non-identity layers. + +Our results also allow ruling out some other possible explanation: one might suspect that pure contractive layers (AOL, CPL, and SLL) would suffer from vanishing gradients, differently from orthogonal ones, however, our experiments do not show any evidence of this fact. Furthermore, one might suspect that slower methods perform worse, because they allow fewer epochs for a given time budget, however, our experiments do not support this fact; two relative slow methods (SOC, LOT) are among the best ones. + +# 6. Conclusions and Guidelines + +This work presented a comparative study of state-of-the-art 1-Lipschitz layers under the lens of different metrics, such as time and memory requirements, accuracy, and certified robust accuracy, all evaluated at training and inference time. A theoretical comparison of the methods in terms of time and memory complexity was also presented and validated by experiments. + +Taking all metrics into account (summarized in Figure 1), the results are in favor of CPL, due to its highest performance and lower consumption of computational resources. When large computational resources are available and the application does not impose stringent timing constraints during inference and training, the SOC layer could be used, due to its slightly better performance. Finally, those applications in which the inference time is crucial may take advantage of AOL or BCOP, which do not introduce additional runtime overhead (during inference) compared to a standard convolution. For higher resolution images, it also seems that CPL is the most promising method. + +# References + +[1] Thomas Altstidl, David Dobre, Björn Eskofier, Gauthier Gidel, and Leo Schwinn. Raising the bar for certified adversarial robustness with diffusion models. arXiv preprint arXiv:2305.10388, 2023. 14, 22 +[2] Cem Anil, James Lucas, and Roger Grosse. Sorting out Lipschitz function approximation. In International Conference on Machine Learning (ICML), 2019. 2, 3, 11 +[3] Alexandre Araujo, Aaron J Havens, Blaise Delattre, Alexandre Allauzen, and Bin Hu. A unified algebraic perspective on Lipschitz neural networks. In International Conference on Learning Representations (ICLR), 2023. 4, 14, 22 +[4] Battista Biggio, Igino Corona, Davide Maiorca, Blaine Nelson, Nedim Šrndić, Pavel Laskov, Giorgio Giacinto, and Fabio Roli. Evasion attacks against machine learning at test time. In Machine Learning and Knowledge Discovery in Databases, 2013. 1 +[5] Å. Björck and C. Bowie. An iterative algorithm for computing the best estimate of an orthogonal matrix. SIAM Journal on Numerical Analysis, 1971. 3, 11 +[6] Fabio Brau, Giulio Rossolini, Alessandro Biondi, and Giorgio Buttazzo. Robust-by-design classification via unitary-gradient neural networks. Proceedings of the AAAI Conference on Artificial Intelligence, 2023. 2, 5 +[7] Nicholas Carlini, Florian Tramer, Krishnamurthy Dj Dvijotham, Leslie Rice, Mingjie Sun, and J Zico Kolter. (Certified!!) adversarial robustness for free! In International Conference on Learning Representations (ICLR), 2023. 2 +[8] Arthur Cayley. About the algebraic structure of the orthogonal group and the other classical groups in a field of characteristic zero or a prime characteristic. Journal für die reine und angewandte Mathematik, 1846. 3 +[9] Moustapha Cisse, Piotr Bojanowski, Edouard Grave, Yann Dauphin, and Nicolas Usunier. Parseval networks: Improving robustness to adversarial examples. In International conference on machine learning, 2017. 2, 11 +[10] Jeremy Cohen, Elan Rosenfeld, and Zico Kolter. Certified adversarial robustness via randomized smoothing. In Proceedings of the 36th International Conference on Machine Learning, 2019. 2 +[11] Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops, 2020. 5, 16 +[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Conference on Computer Vision and Pattern Recognition (CVPR), 2009. 16 +[13] Farzan Farnia, Jesse Zhang, and David Tse. Generalizable adversarial training via spectral normalization. In International Conference on Learning Representations, 2018. 11 +[14] Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. stat, 2015. 1 +[15] Emiel Hoogeboom, Victor Garcia Satorras, Jakub Tomczak, and Max Welling. The convolution exponential and generalized Sylvester flows. In Advances in Neural Information Processing Systems, 2020. 3 + +[16] Jeremy Howard. Imagenette. https://github.com/fastai/imagenette/. Accessed: 01.02.2024. 5, 16 +[17] Kai Hu, Klas Leino, Zifan Wang, and Matt Fredrikson. Effectively leveraging capacity for improved deterministic robustness certification. In International Conference on Learning Representations (ICLR), 2024. 14, 22 +[18] Kai Hu, Andy Zou, Zifan Wang, Klas Leino, and Matt Fredrikson. Unlocking deterministic robustness certification on imagenet. Conference on Neural Information Processing Systems (NeurIPS), 2024. 14 +[19] Lei Huang, Li Liu, Fan Zhu, Diwen Wan, Zehuan Yuan, Bo Li, and Ling Shao. Controllable orthogonalization in training DNNs. In Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3, 11, 12 +[20] Guy Katz, Clark Barrett, David L Dill, Kyle Julian, and Mykel J Kochenderfer. Reluplex: An efficient SMT solver for verifying deep neural networks. In International conference on computer aided verification, 2017. 2 +[21] Alex Krizhevsky. Learning multiple layers of features from tiny images. Technical report, 2009. 5, 16 +[22] Andrew Lavin and Scott Gray. Fast algorithms for convolutional neural networks. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 7 +[23] Ya Le and Xuan Yang. Tiny imagenet visual recognition challenge. CS 231N, 2015. 5, 16 +[24] Klas Leino, Zifan Wang, and Matt Fredrikson. Globally-robust neural networks. In International Conference on Machine Learning, 2021. 2, 11 +[25] Mario Lezcano-Casado and David Martínez-Rubio. Cheap orthogonal constraints in neural networks: A simple parametrization of the orthogonal and unitary group. In International Conference on Machine Learning (ICML), 2019. 11 +[26] Linyi Li, Tao Xie, and Bo Li. Sok: Certified robustness for deep neural networks. In 2023 IEEE Symposium on Security and Privacy (SP), 2023. 1 +[27] Qiyang Li, Saminul Haque, Cem Anil, James Lucas, Roger B Grosse, and Joern-Henrik Jacobsen. Preventing gradient attenuation in Lipschitz constrained convolutional networks. In Conference on Neural Information Processing Systems (NeurIPS), 2019. 2, 3, 11, 14 +[28] Shuai Li, Kui Jia, Yuxin Wen, Tongliang Liu, and Dacheng Tao. Orthogonal deep neural networks. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 11 +[29] Max Losch, David Stutz, Bernt Schiele, and Mario Fritz. Certified robust models with slack control and large Lipschitz constants. arXiv preprint arXiv:2309.06166, 2023. 2 +[30] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In International Conference on Learning Representations (ICLR), 2018. 1 +[31] Laurent Meunier, Blaise J Delattre, Alexandre Araujo, and Alexandre Allauzen. A dynamical system perspective for Lipschitz neural networks. In International Conference on Machine Learning (ICML), 2022. 3, 11, 14, 22 +[32] Takeru Miyato, Toshiki Kataoka, Masanori Koyama, and Yuichi Yoshida. Spectral normalization for generative adversarial networks. In International Conference on Learning Representations (ICLR), 2018. 3, 11 + +[33] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In Conference on Neural Information Processing Systems (NeurIPS). 2019. 4 +[34] Bernd Prach and Christoph H Lampert. Almost-orthogonal layers for efficient general-purpose Lipschitz networks. In European Conference on Computer Vision (ECCV), 2022. 2, 3, 5, 14 +[35] S Singla and S Feizi. Fantastic four: Differentiable bounds on singular values of convolution layers. In International Conference on Learning Representations (ICLR), 2021. 3, 11 +[36] Sahil Singla and Soheil Feizi. Skew orthogonal convolutions. In International Conference on Machine Learning (ICML), 2021. 2, 3, 14 +[37] Sahil Singla and Soheil Feizi. Improved techniques for deterministic 12 robustness. Conference on Neural Information Processing Systems (NeurIPS), 2022. 14 +[38] Leslie N Smith and Nicholay Topin. Super-convergence: Very fast training of neural networks using large learning rates. In Artificial intelligence and machine learning for multi-domain operations applications, 2019. 15 +[39] Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In International Conference on Learning Representations (ICLR), 2014. 1 +[40] Asher Trockman and J Zico Kolter. Orthogonalizing convolutional layers with the Cayley transform. In International Conference on Learning Representations (ICLR), 2021. 2, 3, 5, 14, 23 +[41] Yusuke Tsuzuku, Issei Sato, and Masashi Sugiyama. Lipschitz-margin training: Scalable certification of perturbation invariance for deep neural networks. Conference on Neural Information Processing Systems (NeurIPS), 2018. 6 +[42] Ruigang Wang and Ian Manchester. Direct parameterization of Lipschitz-bounded deep networks. In International Conference on Machine Learning (ICML), 2023. 3, 11, 12 +[43] Lily Weng, Huan Zhang, Hongge Chen, Zhao Song, Chojui Hsieh, Luca Daniel, Duane Boning, and Inderjit Dhillon. Towards fast computation of certified robustness for relu networks. In International Conference on Machine Learning (ICML), 2018. 2 +[44] Eric Wong and Zico Kolter. Provable defenses against adversarial examples via the convex outer adversarial polytope. In International Conference on Machine Learning (ICML), 2018. 2 +[45] Lechao Xiao, Yasaman Bahri, Jascha Sohl-Dickstein, Samuel Schoenholz, and Jeffrey Pennington. Dynamical isometry and a mean field theory of CNNs: How to train 10,000-layer vanilla convolutional neural networks. In International Conference on Machine Learning (ICML), 2018. 3 +[46] Xiaojun Xu, Linyi Li, and Bo Li. Lot: Layer-wise orthogonal training on improving 12 certified robustness. Conference on + +Neural Information Processing Systems (NeurIPS), 2022. 2, 3, 8, 14, 23 +[47] Tan Yu, Jun Li, Yunfeng Cai, and Ping Li. Constructing orthogonal convolutions in an explicit manner. In International Conference on Learning Representations (ICLR), 2021. 3, 11, 12 \ No newline at end of file diff --git a/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/images.zip b/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..5cc00f9305e391ae6395c378c6c95d604c4c07db --- /dev/null +++ b/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afa409f75213da2035fb9969a31e7cde975d81109d909bcc49ec82cb062ac27e +size 431983 diff --git a/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/layout.json b/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..42f9e8d844da195e2ca5fe022edf21154e2782d0 --- /dev/null +++ b/2024/1-Lipschitz Layers Compared_ Memory Speed and Certifiable Robustness/layout.json @@ -0,0 +1,8644 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 66, + 103, + 529, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 103, + 529, + 121 + ], + "spans": [ + { + "bbox": [ + 66, + 103, + 529, + 121 + ], + "type": "text", + "content": "1-Lipschitz Layers Compared: Memory, Speed, and Certifiable Robustness" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "spans": [ + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "text", + "content": "Bernd Prach," + }, + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "inline_equation", + "content": "^{1,*}" + }, + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "text", + "content": " Fabio Brau," + }, + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "inline_equation", + "content": "^{2,*}" + }, + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "text", + "content": " Giorgio Buttazzo," + }, + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "text", + "content": " Christoph H. Lampert" + }, + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "text", + "content": " ISTA, Klosterneuburg, Austria \n" + }, + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 119, + 142, + 473, + 186 + ], + "type": "text", + "content": " Scuola Superiore Sant'Anna, Pisa, Italy" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 100, + 187, + 483, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 187, + 483, + 200 + ], + "spans": [ + { + "bbox": [ + 100, + 187, + 483, + 200 + ], + "type": "text", + "content": "{bprach, chl}@ist.ac.at, {fabio.brau, giorgio.butazzo}@santannapisa.it" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 227, + 192, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 227, + 192, + 239 + ], + "spans": [ + { + "bbox": [ + 143, + 227, + 192, + 239 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 251, + 290, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 251, + 290, + 504 + ], + "spans": [ + { + "bbox": [ + 46, + 251, + 290, + 504 + ], + "type": "text", + "content": "The robustness of neural networks against input perturbations with bounded magnitude represents a serious concern in the deployment of deep learning models in safety-critical systems. Recently, the scientific community has focused on enhancing certifiable robustness guarantees by crafting 1-Lipschitz neural networks that leverage Lipschitz bounded dense and convolutional layers. Different methods have been proposed in the literature to achieve this goal, however, comparing the performance of such methods is not straightforward, since different metrics can be relevant (e.g., training time, memory usage, accuracy, certifiable robustness) for different applications. Therefore, this work provides a thorough comparison between different methods, covering theoretical aspects such as computational complexity and memory requirements, as well as empirical measurements of time per epoch, required memory, accuracy and certifiable robust accuracy. The paper also provides some guidelines and recommendations to support the user in selecting the methods that work best depending on the available resources. We provide code at github.com/berndprach/lLipschitzLayersCompared." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 538, + 128, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 538, + 128, + 551 + ], + "spans": [ + { + "bbox": [ + 47, + 538, + 128, + 551 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 559, + 287, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 559, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 46, + 559, + 287, + 668 + ], + "type": "text", + "content": "Modern artificial neural networks achieve high accuracy and sometimes superhuman performance in many different tasks, but it is widely recognized that they are not robust to tiny and imperceptible input perturbations [4, 39] that, if properly crafted, can cause a model to produce the wrong output. Such inputs, known as Adversarial Examples, represent a serious concern for the deployment of machine learning models in safety-critical systems [26]. To overcome this issue, adversarial training has been proposed in" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 313, + 227, + 419, + 318 + ], + "blocks": [ + { + "bbox": [ + 313, + 227, + 419, + 318 + ], + "lines": [ + { + "bbox": [ + 313, + 227, + 419, + 318 + ], + "spans": [ + { + "bbox": [ + 313, + 227, + 419, + 318 + ], + "type": "image", + "image_path": "69a05b6ec4757e9c0f0d25d880c87173ad5c33a6c11a21abaab965be18a82987.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 434, + 227, + 539, + 318 + ], + "blocks": [ + { + "bbox": [ + 434, + 227, + 539, + 318 + ], + "lines": [ + { + "bbox": [ + 434, + 227, + 539, + 318 + ], + "spans": [ + { + "bbox": [ + 434, + 227, + 539, + 318 + ], + "type": "image", + "image_path": "0380e230b8fd8a86f4be4561be7161ea42f27cb9c0adba33a8e941b2ad86d467.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 313, + 323, + 419, + 414 + ], + "blocks": [ + { + "bbox": [ + 313, + 323, + 419, + 414 + ], + "lines": [ + { + "bbox": [ + 313, + 323, + 419, + 414 + ], + "spans": [ + { + "bbox": [ + 313, + 323, + 419, + 414 + ], + "type": "image", + "image_path": "8d5e5a868592dbfae08c67f66da1cfd8fca757ca127a7cbf23475339ed25ddca.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 434, + 323, + 539, + 414 + ], + "blocks": [ + { + "bbox": [ + 434, + 323, + 539, + 414 + ], + "lines": [ + { + "bbox": [ + 434, + 323, + 539, + 414 + ], + "spans": [ + { + "bbox": [ + 434, + 323, + 539, + 414 + ], + "type": "image", + "image_path": "911b46d71b8d81a9ae74632e144c531a6ae5102ddf35953e6ce0f6141de61705.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 313, + 419, + 418, + 510 + ], + "blocks": [ + { + "bbox": [ + 313, + 419, + 418, + 510 + ], + "lines": [ + { + "bbox": [ + 313, + 419, + 418, + 510 + ], + "spans": [ + { + "bbox": [ + 313, + 419, + 418, + 510 + ], + "type": "image", + "image_path": "ca9b87b573f226fc3fcf6fe66c7dfe4f183453a8207d4eee723ae1d31af61d04.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 435, + 420, + 539, + 511 + ], + "blocks": [ + { + "bbox": [ + 435, + 420, + 539, + 511 + ], + "lines": [ + { + "bbox": [ + 435, + 420, + 539, + 511 + ], + "spans": [ + { + "bbox": [ + 435, + 420, + 539, + 511 + ], + "type": "image", + "image_path": "7a405ab227722501cfae03f134934faea1e665de1516715e62f2d0d11d91bd20.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 313, + 517, + 418, + 608 + ], + "blocks": [ + { + "bbox": [ + 313, + 517, + 418, + 608 + ], + "lines": [ + { + "bbox": [ + 313, + 517, + 418, + 608 + ], + "spans": [ + { + "bbox": [ + 313, + 517, + 418, + 608 + ], + "type": "image", + "image_path": "9cf0b86ff8d8734c58196e37a75ef5a3075410825a59d662e572fc060a967e74.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 618, + 547, + 652 + ], + "lines": [ + { + "bbox": [ + 304, + 618, + 547, + 652 + ], + "spans": [ + { + "bbox": [ + 304, + 618, + 547, + 652 + ], + "type": "text", + "content": "Figure 1. Evaluation of 1-Lipschitz methods on different metrics. Scores are assigned from 1 (worst) to 5 (best) to every method based on the results reported in Sections 3 and 5." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 428, + 517, + 541, + 600 + ], + "blocks": [ + { + "bbox": [ + 428, + 517, + 541, + 600 + ], + "lines": [ + { + "bbox": [ + 428, + 517, + 541, + 600 + ], + "spans": [ + { + "bbox": [ + 428, + 517, + 541, + 600 + ], + "type": "table", + "html": "
Legend
RARobust Accuracy
AAccuracy
TTTraining Time
ITInference Time
TMTrain Memory
IMInference Memory
", + "image_path": "136ab83e8fb6e86acee4ed2e9092d692d1eb036fdbf10c1af14e99ff7134787f.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_body" + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 674, + 132, + 683 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 674, + 132, + 683 + ], + "spans": [ + { + "bbox": [ + 46, + 674, + 132, + 683 + ], + "type": "text", + "content": "*Joined first authors." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 46, + 683, + 287, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 683, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 683, + 287, + 712 + ], + "type": "text", + "content": "This work was partially supported by project SERICS (PE00000014) under the MUR National Recovery and Resilience Plan funded by the European Union - NextGenerationEU." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "type": "text", + "content": "[14, 30, 39]. It uses adversarial examples during the training to correct the model prediction. This strategy does improves the empirical robustness of the model, however, it does not" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24574" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 200, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 200, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 200, + 83 + ], + "type": "text", + "content": "provide any guarantees of robustness." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 84, + 286, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 84, + 286, + 179 + ], + "spans": [ + { + "bbox": [ + 46, + 84, + 286, + 179 + ], + "type": "text", + "content": "However, for many applications a guarantee of robustness is desired. Roughly speaking, a model " + }, + { + "bbox": [ + 46, + 84, + 286, + 179 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 84, + 286, + 179 + ], + "type": "text", + "content": " is said to be " + }, + { + "bbox": [ + 46, + 84, + 286, + 179 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 46, + 84, + 286, + 179 + ], + "type": "text", + "content": "-robust for a given input " + }, + { + "bbox": [ + 46, + 84, + 286, + 179 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 84, + 286, + 179 + ], + "type": "text", + "content": " if no perturbation of magnitude bounded by " + }, + { + "bbox": [ + 46, + 84, + 286, + 179 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 46, + 84, + 286, + 179 + ], + "type": "text", + "content": " can change its prediction. Recently, in the context of image classification, various approaches have been proposed to achieve certifiable robustness, including Verification, Randomized Smoothing, and Lipschitz Bounded Neural Networks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 188, + 286, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 188, + 286, + 271 + ], + "spans": [ + { + "bbox": [ + 46, + 188, + 286, + 271 + ], + "type": "text", + "content": "Verification strategies aim to establish, for any given model, whether all samples contained in a " + }, + { + "bbox": [ + 46, + 188, + 286, + 271 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 46, + 188, + 286, + 271 + ], + "type": "text", + "content": "-ball with radius " + }, + { + "bbox": [ + 46, + 188, + 286, + 271 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 46, + 188, + 286, + 271 + ], + "type": "text", + "content": " and centered in the tested input " + }, + { + "bbox": [ + 46, + 188, + 286, + 271 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 188, + 286, + 271 + ], + "type": "text", + "content": " are classified with the same class as " + }, + { + "bbox": [ + 46, + 188, + 286, + 271 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 188, + 286, + 271 + ], + "type": "text", + "content": ". In the exact formulation, verification strategies involve the solution of an NP-hard problem [20]. Nevertheless, even in a relaxed formulation, [44], these strategies require a huge computational effort [43]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 279, + 286, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 279, + 286, + 374 + ], + "spans": [ + { + "bbox": [ + 46, + 279, + 286, + 374 + ], + "type": "text", + "content": "Randomized smoothing strategies, initially presented in [10], represent an effective way of crafting a certifiable-robust classifier " + }, + { + "bbox": [ + 46, + 279, + 286, + 374 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 46, + 279, + 286, + 374 + ], + "type": "text", + "content": " based on a base classifier " + }, + { + "bbox": [ + 46, + 279, + 286, + 374 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 279, + 286, + 374 + ], + "type": "text", + "content": ". If combined with an additional denoising step, they can achieve state-of-the-art levels of robustness, [7]. However, since they require multiple evaluations of the base model (up to 100k evaluations) for the classification of a single input, they cannot be used for real-time applications." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 382, + 286, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 382, + 286, + 478 + ], + "spans": [ + { + "bbox": [ + 46, + 382, + 286, + 478 + ], + "type": "text", + "content": "Finally, Lipschitz Bounded Neural Networks [6, 9, 24, 27, 29, 34, 40] represent a valid alternative to produce certifiable classifiers, since they only require a single forward pass of the model at inference time to deduce guarantees of robustness. Indeed, for such models, a lower-bound of the minimal adversarial perturbation capable of fooling the classifier can be evaluated by considering the difference between the two largest class scores predicted by the model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 479, + 286, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 479, + 286, + 586 + ], + "spans": [ + { + "bbox": [ + 46, + 479, + 286, + 586 + ], + "type": "text", + "content": "Lipschitz-bounded neural networks can be obtained by the composition of 1-Lipschitz layers [2]. The process of parameterizing 1-Lipschitz layers is fairly straightforward for fully connected layers. However, for convolutions — with overlapping kernels — deducing an effective parameterization is a hard problem. Indeed, the Lipschitz condition can be essentially thought of as a condition on the Jacobian of the layer. However, the Jacobian matrix can not be efficiently computed." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "text", + "content": "In order to avoid the explicit computation of the Jacobian, various methods have been proposed, including parameterizations that cause the Jacobian to be (very close to) orthogonal [27, 36, 40, 46] and methods that rely on an upper bound on the Jacobian instead [34]. Those different methods differ drastically in training and validation requirements (in particular time and memory) as well as empirical performance. Furthermore, increasing training time or model sizes very often also increases the empirical performance. This makes it hard to judge from the existing" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "literature which methods are the most promising. This becomes even worse when working with specific computation requirements, such as restrictions on the available memory. In this case, it is important to choose the method that better suits the characteristics of the system in terms of evaluation time, memory usage as well and certifiable-robust-accuracy." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 162, + 545, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 162, + 545, + 245 + ], + "spans": [ + { + "bbox": [ + 304, + 162, + 545, + 245 + ], + "type": "text", + "content": "This work aims at giving a comprehensive comparison of different strategies for crafting 1-Lipschitz layers from both a theoretical and practical perspective. For the sake of fairness, we consider several metrics such as Time and Memory requirements for both training and inference, Accuracy, as well as Certified Robust Accuracy. The main contributions are the following:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 258, + 545, + 352 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 306, + 258, + 545, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 258, + 545, + 292 + ], + "spans": [ + { + "bbox": [ + 306, + 258, + 545, + 292 + ], + "type": "text", + "content": "- An empirical comparison of 1-Lipschitz layers based on six different metrics, and four different datasets on four architecture sizes with three time constraints." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 293, + 545, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 293, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 306, + 293, + 545, + 316 + ], + "type": "text", + "content": "- A theoretical comparison of the runtime complexity and the memory usage of existing methods." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 317, + 545, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 317, + 545, + 352 + ], + "spans": [ + { + "bbox": [ + 306, + 317, + 545, + 352 + ], + "type": "text", + "content": "- A review of the most recent methods in the literature, including implementations with a revised code that we will release publicly for other researchers to build on." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 363, + 487, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 363, + 487, + 376 + ], + "spans": [ + { + "bbox": [ + 306, + 363, + 487, + 376 + ], + "type": "text", + "content": "2. Existing Works and Background" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "spans": [ + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "text", + "content": "In recent years, various methods have been proposed for creating artificial neural networks with a bounded Lipschitz constant. The Lipschitz constant of a function " + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "inline_equation", + "content": "f: \\mathbb{R}^n \\to \\mathbb{R}^m" + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "text", + "content": " with respect to the " + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "inline_equation", + "content": "l_2" + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "text", + "content": " norm is the smallest " + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "text", + "content": " such that for all " + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "inline_equation", + "content": "x, y \\in \\mathbb{R}^n" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 361, + 452, + 545, + 465 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 452, + 545, + 465 + ], + "spans": [ + { + "bbox": [ + 361, + 452, + 545, + 465 + ], + "type": "interline_equation", + "content": "\\left\\| f (x) - f (y) \\right\\| _ {2} \\leq L \\| x - y \\| _ {2}. \\tag {1}", + "image_path": "b3cb12dc14994e9e47c0b936aa3a6caa64448dfe22c6db39fe8c9f61b8710af5.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 474, + 545, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 474, + 545, + 545 + ], + "spans": [ + { + "bbox": [ + 304, + 474, + 545, + 545 + ], + "type": "text", + "content": "We also extend this definition to networks and layers, by considering the " + }, + { + "bbox": [ + 304, + 474, + 545, + 545 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 304, + 474, + 545, + 545 + ], + "type": "text", + "content": " norms of the flattened input and output tensors in Equation (1). A layer is called 1-Lipschitz if its Lipschitz constant is at most 1. For linear layers, the Lipschitz constant is equal to the spectral norm of the weight matrix that is given as" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 379, + 553, + 545, + 580 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 379, + 553, + 545, + 580 + ], + "spans": [ + { + "bbox": [ + 379, + 553, + 545, + 580 + ], + "type": "interline_equation", + "content": "\\| M \\| _ {2} = \\sup _ {\\mathbf {v} \\neq 0} \\frac {\\| M \\mathbf {v} \\| _ {2}}{\\| \\mathbf {v} \\| _ {2}}. \\tag {2}", + "image_path": "aace07611672378534cc023f6eb1fedbcbec6b75d8382aab2c44d22e65c0f579.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 588, + 545, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 588, + 545, + 684 + ], + "spans": [ + { + "bbox": [ + 304, + 588, + 545, + 684 + ], + "type": "text", + "content": "A particular class of linear 1-Lipschitz layers are ones with an orthogonal Jacobian matrix. The Jacobian matrix of a layer is the matrix of partial derivatives of the flattened outputs with respect to the flattened inputs. A matrix " + }, + { + "bbox": [ + 304, + 588, + 545, + 684 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 588, + 545, + 684 + ], + "type": "text", + "content": " is orthogonal if " + }, + { + "bbox": [ + 304, + 588, + 545, + 684 + ], + "type": "inline_equation", + "content": "MM^{\\top} = I" + }, + { + "bbox": [ + 304, + 588, + 545, + 684 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 588, + 545, + 684 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 588, + 545, + 684 + ], + "type": "text", + "content": " is the identity matrix. For layers with an orthogonal Jacobian, Equation (1) always holds with equality and, because of this, a lot of methods aim at constructing such 1-Lipschitz layers." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "All the neural networks analyzed in this paper consist of 1-Lipschitz parameterized layers and 1-Lipschitz activation" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24575" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "type": "text", + "content": "functions, with no skip connections and no batch normalization. Even though the commonly used ReLU activation function is 1-Lipschitz, Anil et al. [2] showed that it reduces the expressive capability of the model. Hence, we adopt the MaxMin activation proposed by the authors and commonly used in 1-Lipschitz models. Concatenations of 1-Lipschitz functions are 1-Lipschitz, so the networks analyzed are 1-Lipschitz by construction." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 177, + 228, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 177, + 228, + 190 + ], + "spans": [ + { + "bbox": [ + 47, + 177, + 228, + 190 + ], + "type": "text", + "content": "2.1. Parameterized 1-Lipschitz Layers" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 196, + 289, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 196, + 289, + 328 + ], + "spans": [ + { + "bbox": [ + 46, + 196, + 289, + 328 + ], + "type": "text", + "content": "This section provides an overview of the existing methods for providing 1-Lipschitz layers. We discuss fundamental methods for estimating the spectral norms of linear and convolutional layers, i.e. Power Method [32] and Fantistica4 [35], and for crafting orthogonal matrices, i.e. Bjorck & Bowie [5], in Appendix A. The rest of this section describes 7 methods from the literature that construct 1-Lipschitz convolutions: BCOP, Cayley, SOC, AOL, LOT, CPL, and SLL. Further 1-Lipschitz methods, [19, 42, 47], and the reasons why they were not included in our main comparison can be found in Appendix B." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 345, + 289, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 345, + 289, + 453 + ], + "spans": [ + { + "bbox": [ + 46, + 345, + 289, + 453 + ], + "type": "text", + "content": "BCOP Block Orthogonal Convolution Parameterization (BCOP) was introduced by Li et al. in [27] to extend a previous work by Xiao et al. [45] that focused on the importance of orthogonal initialization of the weights. For a " + }, + { + "bbox": [ + 46, + 345, + 289, + 453 + ], + "type": "inline_equation", + "content": "k \\times k" + }, + { + "bbox": [ + 46, + 345, + 289, + 453 + ], + "type": "text", + "content": " convolution, BCOP uses a set of " + }, + { + "bbox": [ + 46, + 345, + 289, + 453 + ], + "type": "inline_equation", + "content": "(2k - 1)" + }, + { + "bbox": [ + 46, + 345, + 289, + 453 + ], + "type": "text", + "content": " parameter matrices. Each of these matrices is orthogonalized using the algorithm by Bjorck & Bowie [5] (see also Appendix A). Then, a " + }, + { + "bbox": [ + 46, + 345, + 289, + 453 + ], + "type": "inline_equation", + "content": "k \\times k" + }, + { + "bbox": [ + 46, + 345, + 289, + 453 + ], + "type": "text", + "content": " kernel is constructed from those matrices in a way that guarantees that the resulting layer is orthogonal." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 470, + 288, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 470, + 288, + 531 + ], + "spans": [ + { + "bbox": [ + 46, + 470, + 288, + 531 + ], + "type": "text", + "content": "Cayley Another family of orthogonal convolutional and fully connected layers has been proposed by Trockman and Kolter [40] by leveraging the Cayley Transform [8], which maps a skew-symmetric matrix " + }, + { + "bbox": [ + 46, + 470, + 288, + 531 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 470, + 288, + 531 + ], + "type": "text", + "content": " into an orthogonal matrix " + }, + { + "bbox": [ + 46, + 470, + 288, + 531 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 46, + 470, + 288, + 531 + ], + "type": "text", + "content": " using the relation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 540, + 287, + 554 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 540, + 287, + 554 + ], + "spans": [ + { + "bbox": [ + 115, + 540, + 287, + 554 + ], + "type": "interline_equation", + "content": "Q = (I - A) (I + A) ^ {- 1}. \\tag {3}", + "image_path": "2553a3fc9494a5571d54c97425beb38e87568da867c68ead9fea4b9841459b3e.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 564, + 287, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 564, + 287, + 661 + ], + "spans": [ + { + "bbox": [ + 46, + 564, + 287, + 661 + ], + "type": "text", + "content": "The transformation can be used to parameterize orthogonal weight matrices for linear layers in a straightforward way. For convolutions, the authors make use of the fact that circular padded convolutions are vector-matrix products in the Fourier domain. As long as all those vector-matrix products have orthogonal matrices, the full convolution will have an orthogonal Jacobian. For Cayley Convolutions, those matrices are orthogonalized using the Cayley transform." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "content": "SOC Skew Orthogonal Convolution is an orthogonal convolutional layer presented by Singla et al. [36], obtained by leveraging the exponential convolution [15]. Analogously" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 545, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 95 + ], + "type": "text", + "content": "to the matrix case, given a kernel " + }, + { + "bbox": [ + 304, + 72, + 545, + 95 + ], + "type": "inline_equation", + "content": "L\\in \\mathbb{R}^{c\\times c\\times k\\times k}" + }, + { + "bbox": [ + 304, + 72, + 545, + 95 + ], + "type": "text", + "content": ", the exponential convolution can be defined as" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 100, + 545, + 134 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 100, + 545, + 134 + ], + "spans": [ + { + "bbox": [ + 305, + 100, + 545, + 134 + ], + "type": "interline_equation", + "content": "\\exp (L) (x) := x + \\frac {L \\star x}{1} + \\frac {L \\star^ {2} x}{2 !} + \\dots + \\frac {L \\star^ {k} x}{k !} + \\dots , \\tag {4}", + "image_path": "a5c0d48402dad98fed1c19faeca6249712c99fce763649021ddb515c59139c4d.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 134, + 545, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 134, + 545, + 243 + ], + "spans": [ + { + "bbox": [ + 304, + 134, + 545, + 243 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 134, + 545, + 243 + ], + "type": "inline_equation", + "content": "\\star^k" + }, + { + "bbox": [ + 304, + 134, + 545, + 243 + ], + "type": "text", + "content": " denotes a convolution applied " + }, + { + "bbox": [ + 304, + 134, + 545, + 243 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 134, + 545, + 243 + ], + "type": "text", + "content": "-times. The authors proved that any exponential convolution has an orthogonal Jacobian matrix as long as " + }, + { + "bbox": [ + 304, + 134, + 545, + 243 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 134, + 545, + 243 + ], + "type": "text", + "content": " is skew-symmetric, providing a way of parameterizing 1-Lipschitz layers. In their work, the sum of the infinite series is approximated by computing only the first 5 terms during training and the first 12 terms during the inference, and " + }, + { + "bbox": [ + 304, + 134, + 545, + 243 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 134, + 545, + 243 + ], + "type": "text", + "content": " is normalized to have unitary spectral norm following the method presented in [35] (see Appendix A)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 256, + 545, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 256, + 545, + 292 + ], + "spans": [ + { + "bbox": [ + 304, + 256, + 545, + 292 + ], + "type": "text", + "content": "AOL Prach and Lampert [34] introduced Almost Orthogonal Lipschitz (AOL) layers. For any matrix " + }, + { + "bbox": [ + 304, + 256, + 545, + 292 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 304, + 256, + 545, + 292 + ], + "type": "text", + "content": ", they defined a diagonal rescaling matrix " + }, + { + "bbox": [ + 304, + 256, + 545, + 292 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 256, + 545, + 292 + ], + "type": "text", + "content": " with" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 367, + 298, + 545, + 328 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 298, + 545, + 328 + ], + "spans": [ + { + "bbox": [ + 367, + 298, + 545, + 328 + ], + "type": "interline_equation", + "content": "D _ {i i} = \\left(\\sum_ {j} \\left| P ^ {\\top} P \\right| _ {i j}\\right) ^ {- 1 / 2} \\tag {5}", + "image_path": "dd17fe6bd8e02ce96bc6827b9535ed2261dcdd37c0e46c0de67f36aa6e9ffc34.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "spans": [ + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "text", + "content": "and proved that the spectral norm of " + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "inline_equation", + "content": "PD" + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "text", + "content": " is bounded by 1. This result was used to show that the linear layer given by " + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "inline_equation", + "content": "l(x) = PDx + b" + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "text", + "content": " (where " + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "text", + "content": " is the learnable matrix and " + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "text", + "content": " is given by Eq. (5)) is 1-Lipschitz. Furthermore, the authors extended the idea so that it can also be efficiently applied to convolutions. This is done by calculating the rescaling in Equation (5) with the Jacobian " + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "text", + "content": " of a convolution instead of " + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "text", + "content": ". In order to evaluate it efficiently the authors express the elements of " + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "inline_equation", + "content": "J^{\\top}J" + }, + { + "bbox": [ + 304, + 334, + 545, + 441 + ], + "type": "text", + "content": " explicitly in terms of the kernel values." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 456, + 545, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 456, + 545, + 564 + ], + "spans": [ + { + "bbox": [ + 304, + 456, + 545, + 564 + ], + "type": "text", + "content": "LOT The layer presented by Xu et al. [46] extends the idea of [19] to use the Inverse Square Root of a matrix in order to orthogonalize it. Indeed, for any matrix " + }, + { + "bbox": [ + 304, + 456, + 545, + 564 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 304, + 456, + 545, + 564 + ], + "type": "text", + "content": ", the matrix " + }, + { + "bbox": [ + 304, + 456, + 545, + 564 + ], + "type": "inline_equation", + "content": "Q = V(V^T V)^{-\\frac{1}{2}}" + }, + { + "bbox": [ + 304, + 456, + 545, + 564 + ], + "type": "text", + "content": " is orthogonal. Similarly to the Cayley method, for the layer-wise orthogonal training (LOT) the convolution is applied in the Fourier frequency domain. To find the inverse square root, the authors relay on an iterative Newton Method. In details, defining " + }, + { + "bbox": [ + 304, + 456, + 545, + 564 + ], + "type": "inline_equation", + "content": "Y_0 = V^T V" + }, + { + "bbox": [ + 304, + 456, + 545, + 564 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 456, + 545, + 564 + ], + "type": "inline_equation", + "content": "Z_0 = I" + }, + { + "bbox": [ + 304, + 456, + 545, + 564 + ], + "type": "text", + "content": ", and" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 310, + 568, + 545, + 592 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 568, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 310, + 568, + 545, + 592 + ], + "type": "interline_equation", + "content": "Y _ {i + 1} = \\frac {1}{2} Y _ {i} \\left(3 I - Z _ {i} Y _ {i}\\right), Z _ {i + 1} = \\frac {1}{2} \\left(3 I - Z _ {i} Y _ {i}\\right) Z _ {i}, \\tag {6}", + "image_path": "d311f7147a23e82ba031e26e7a5e2e2e9798dc051d9b01c6f49cbeccc130ab05.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 598, + 545, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 598, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 304, + 598, + 545, + 635 + ], + "type": "text", + "content": "it can be shown that " + }, + { + "bbox": [ + 304, + 598, + 545, + 635 + ], + "type": "inline_equation", + "content": "Y_{i}" + }, + { + "bbox": [ + 304, + 598, + 545, + 635 + ], + "type": "text", + "content": " converges to " + }, + { + "bbox": [ + 304, + 598, + 545, + 635 + ], + "type": "inline_equation", + "content": "(V^{T}V)^{-\\frac{1}{2}}" + }, + { + "bbox": [ + 304, + 598, + 545, + 635 + ], + "type": "text", + "content": ". In their proposed layer, the authors apply 10 iterations of the method for both training and evaluation." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 649, + 545, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 649, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 304, + 649, + 545, + 685 + ], + "type": "text", + "content": "CPL Meunier et al. [31] proposed the Convex Potential Layer. Given a non-decreasing 1-Lipschitz function " + }, + { + "bbox": [ + 304, + 649, + 545, + 685 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 304, + 649, + 545, + 685 + ], + "type": "text", + "content": " (usually ReLU), the layer is constructed as" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 351, + 690, + 545, + 717 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 351, + 690, + 545, + 717 + ], + "spans": [ + { + "bbox": [ + 351, + 690, + 545, + 717 + ], + "type": "interline_equation", + "content": "l (x) = x - \\frac {2}{\\| W \\| _ {2} ^ {2}} W ^ {\\top} \\sigma (W x + b), \\tag {7}", + "image_path": "fcbf75787530640c5b17d6c7c41591c9ac266817faa79997a0454fbedbb1cba1.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "24576" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": "which is 1-Lipschitz by design. The spectral norm required to calculate " + }, + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "inline_equation", + "content": "l(x)" + }, + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": " is approximated using the power method (see Appendix A)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 125, + 286, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 125, + 286, + 172 + ], + "spans": [ + { + "bbox": [ + 47, + 125, + 286, + 172 + ], + "type": "text", + "content": "SLL The SDP-based Lipschitz Layers (SLL) proposed by Araujo et al. [3] combine the CPL layer with the upper bound on the spectral norm from AOL. The layer can be written as" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 87, + 182, + 286, + 197 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 182, + 286, + 197 + ], + "spans": [ + { + "bbox": [ + 87, + 182, + 286, + 197 + ], + "type": "interline_equation", + "content": "l (x) = x - 2 W ^ {\\top} Q ^ {- 2} D ^ {2} \\sigma (W x + b), \\tag {8}", + "image_path": "26cf0b2cc7aa51fc295a98af3a9bdd9d54520fe1243fb280236fcd8f5cc69c52.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 206, + 286, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 206, + 286, + 242 + ], + "spans": [ + { + "bbox": [ + 47, + 206, + 286, + 242 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 206, + 286, + 242 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 47, + 206, + 286, + 242 + ], + "type": "text", + "content": " is a learnable diagonal matrix with positive entries and " + }, + { + "bbox": [ + 47, + 206, + 286, + 242 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 47, + 206, + 286, + 242 + ], + "type": "text", + "content": " is deduced by applying Equation (5) to " + }, + { + "bbox": [ + 47, + 206, + 286, + 242 + ], + "type": "inline_equation", + "content": "P = W^{\\top}Q^{-1}" + }, + { + "bbox": [ + 47, + 206, + 286, + 242 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 251, + 286, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 286, + 323 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 286, + 323 + ], + "type": "text", + "content": "Remark 1. Both CPL and SLL are non-linear by construction, so they can be used to construct a network without any further use of activation functions. However, carrying out some preliminary experiments, we empirically found that alternating CPL (and SLL) layers with MaxMin activation layers allows achieving a better performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 344, + 186, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 344, + 186, + 357 + ], + "spans": [ + { + "bbox": [ + 47, + 344, + 186, + 357 + ], + "type": "text", + "content": "3. Theoretical Comparison" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 364, + 286, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 364, + 286, + 437 + ], + "spans": [ + { + "bbox": [ + 47, + 364, + 286, + 437 + ], + "type": "text", + "content": "As illustrated in the last section, various ideas and methods have been proposed to parameterize 1-Lipschitz layers. This causes the different methods to have very different properties and requirements. This section aims at highlighting the properties of the different algorithms, focusing on the algorithmic complexity and the required memory." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 443, + 286, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 443, + 286, + 611 + ], + "spans": [ + { + "bbox": [ + 47, + 443, + 286, + 611 + ], + "type": "text", + "content": "Table 1 provides an overview of the computational complexity and memory requirements for the different layers considered in the previous section. For the sake of clarity, the analysis is performed by considering separately the transformations applied to the input of the layers and those applied to the weights to ensure the 1-Lipschitz constraint. Each of the two sides of the table contains three columns: i) Operations contains the most costly transformations applied to the input as well as to the parameters of different layers; ii) MACS reports the computational complexity expressed in multiply-accumulate operations (MACS) involved in the transformations (only leading terms are presented); iii) Memory reports the memory required by the transformation during the training phase." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 618, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 618, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 618, + 286, + 713 + ], + "type": "text", + "content": "At training time, both input and weight transformations are required, thus the training complexity of the forward pass can be computed as the sum of the two corresponding MACS columns of the table. Similarly, the training memory requirements can be computed as the sum of the two corresponding Memory columns of the table. For the considered operations, the cost of the backward pass during training has the same computational complexity as the forward pass, and" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": "therefore increases the overall complexity by a constant factor. At inference time, all the parameter transformations can be computed just once and cached afterward. Therefore, the inference complexity is equal to the complexity due to the input transformation (column 3 in the table). At inference time, the intermediate variables are not stored in memory, hence, the memory requirements are much lower than during training. The values cannot directly be inferred from Table 1, we reported them separately in Appendix C.1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "spans": [ + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": "Note that all the terms reported in Table 1 depend on the batch size " + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": ", the input size " + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "inline_equation", + "content": "s \\times s \\times c" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": ", the number of inner iterations of a method " + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": ", and the kernel size " + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "inline_equation", + "content": "k \\times k" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": ". (Often, " + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": " is different at training and inference time.) For the sake of clarity, the MACS of a naive convolution implementation is denoted by " + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "inline_equation", + "content": "C = bs^2c^2k^2" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": "), the number of inputs of a layer is denoted by " + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "inline_equation", + "content": "M = bs^2c" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": "), and the size of the kernel of a standard convolution is denoted by " + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "inline_equation", + "content": "P = c^2k^2" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": "). Only the leading terms of the computations are reported in Table 1. In order to simplify some terms, we assume that " + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "inline_equation", + "content": "c > \\log_2(s)" + }, + { + "bbox": [ + 304, + 186, + 545, + 400 + ], + "type": "text", + "content": " and that rescaling a tensor (by a scalar) as well as adding two tensors does not require any memory in order to do backpropagation. We also assume that each additional activation does require extra memory. All these assumptions have been verified to hold within PyTorch, [33]. Also, when the algorithm described in the paper and the version provided in the supplied code differed, we considered the algorithm implemented in the code." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 407, + 545, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 407, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 304, + 407, + 545, + 491 + ], + "type": "text", + "content": "The transformations reported in the table are convolutions (CONV), Fast Fourier Transformations (FFT), matrix-vector multiplications (MV), matrix-matrix multiplications (MM), matrix inversions (INV), as well as applications of an activation function (ACT). The application of algorithms such as BJORCK & Bowie (BnB), power method, and Fantastic 4 (F4) is also reported (see Appendix A for descriptions)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 498, + 521, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 498, + 521, + 510 + ], + "spans": [ + { + "bbox": [ + 306, + 498, + 521, + 510 + ], + "type": "text", + "content": "3.1. Analysis of the computational complexity" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 516, + 545, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 516, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 304, + 516, + 545, + 624 + ], + "type": "text", + "content": "It is worth noting that the complexity of the input transformations (in Table 1) is similar for all methods. This implies that a similar scaling behaviour is expected at inference time for the models. Cayley and LOT apply an FFT-based convolution and have computational complexity independent of the kernel size. CPL and SLL require two convolutions, which make them slightly more expensive at inference time. Notably, SOC requires multiple convolutions, making this method more expensive at inference time." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": "At training time, parameter transformations need to be applied in addition to the input transformations during every forward pass. For SOC and CPL, the input transformations always dominate the parameter transformations in terms of computational complexity. This means the complexity scales like " + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "inline_equation", + "content": "c^2" + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": ", just like a regular convolution, with a further factor of 2 and 5 respectively. All other methods" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24577" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 81, + 124, + 514, + 259 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "text", + "content": "Table 1. Computational complexity and memory requirements of different methods. We report multiply-accumulate operations (MACS) as well as memory requirements (per layer) for batch size " + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "text", + "content": ", image size " + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "inline_equation", + "content": "s \\times s \\times c" + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "text", + "content": ", kernel size " + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "inline_equation", + "content": "k \\times k" + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "text", + "content": " and number of inner iterations " + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "text", + "content": ". We use " + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "inline_equation", + "content": "C = bs^2c^2k^2" + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "inline_equation", + "content": "M = bs^2c" + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "inline_equation", + "content": "P = c^2k^2" + }, + { + "bbox": [ + 46, + 70, + 547, + 117 + ], + "type": "text", + "content": ". For a detailed explanation on what is reported see Section 3. For some explanation on how the entries of this table were derived, see Appendix C." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 81, + 124, + 514, + 259 + ], + "lines": [ + { + "bbox": [ + 81, + 124, + 514, + 259 + ], + "spans": [ + { + "bbox": [ + 81, + 124, + 514, + 259 + ], + "type": "table", + "html": "
MethodInput TransformationsParameter Transformations
OperationsMACS O(·)MemoryOperationsMACS O(·)Memory O(·)
StandardCONVCM--P
AOLCONVCMCONVc3k45P
BCOPCONVCMBnB & MMsc3kt + c3k3c2kt + c2k3
CayleyFFTs & MVsbs2c25/2MFFTs & INVss2c33/2s2c2
CPLCONVs & ACT2C3Mpower methods2c2k2P + s2c
LOTFFTs & MVsbs2c23MFFTs & MMs4s2c3t4s2c2t
SLLCONVs & ACT2C3MCONVsc3k45P
SOCCONVsCt1Mt1F4c2k2t2P
", + "image_path": "afd9557e2704dcddf637eddd610b79ffc83740ab5a1cc54457a6b1e4c5f5f682.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 291, + 288, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 291, + 288, + 353 + ], + "spans": [ + { + "bbox": [ + 46, + 291, + 288, + 353 + ], + "type": "text", + "content": "require parameter transformations that scale like " + }, + { + "bbox": [ + 46, + 291, + 288, + 353 + ], + "type": "inline_equation", + "content": "c^3" + }, + { + "bbox": [ + 46, + 291, + 288, + 353 + ], + "type": "text", + "content": ", making them more expensive for larger architectures. In particular, we do expect Cayley and LOT to require long training times for larger models, since the complexity of their parameter transformations further depends on the input size." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 358, + 287, + 373 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 358, + 287, + 373 + ], + "spans": [ + { + "bbox": [ + 47, + 358, + 287, + 373 + ], + "type": "text", + "content": "3.2. Analysis of the training memory requirements" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 377, + 287, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 377, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 377, + 287, + 521 + ], + "type": "text", + "content": "The memory requirements of the different layers are important, since they determine the maximum batch size and the type of models we can train on a particular infrastructure. At training time, typically all intermediate results are kept in memory to perform backpropagation. This includes intermediate results for both input and parameter transformations. The input transformations usually preserve the size, and therefore the memory required is usually of " + }, + { + "bbox": [ + 46, + 377, + 287, + 521 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(M)" + }, + { + "bbox": [ + 46, + 377, + 287, + 521 + ], + "type": "text", + "content": ". Therefore, for the input transformations, all methods require memory not more than a constant factor worse than standard convolutions, with the worst method being SOC, with a constant " + }, + { + "bbox": [ + 46, + 377, + 287, + 521 + ], + "type": "inline_equation", + "content": "t_1" + }, + { + "bbox": [ + 46, + 377, + 287, + 521 + ], + "type": "text", + "content": ", typically equal to 5." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 527, + 287, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 527, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 46, + 527, + 287, + 612 + ], + "type": "text", + "content": "In addition to the input transformation, we also need to store intermediate results of the parameter transformations in memory in order to evaluate the gradients. Again, most methods approximately preserve the sizes during the parameter transformations, and therefore the memory required is usually of order " + }, + { + "bbox": [ + 46, + 527, + 287, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(P)" + }, + { + "bbox": [ + 46, + 527, + 287, + 612 + ], + "type": "text", + "content": ". Exceptions to this rule are Cayley and LOT, with a larger " + }, + { + "bbox": [ + 46, + 527, + 287, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(s^2 c^2)" + }, + { + "bbox": [ + 46, + 527, + 287, + 612 + ], + "type": "text", + "content": " term, as well as BCOP." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 621, + 165, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 621, + 165, + 635 + ], + "spans": [ + { + "bbox": [ + 47, + 621, + 165, + 635 + ], + "type": "text", + "content": "4. Experimental Setup" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 641, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 288, + 715 + ], + "type": "text", + "content": "This section presents an experimental study aimed at comparing the performance of the considered layers with respect to different metrics. Before presenting the results, we first summarize the setup used in our experiments. For a detailed description see Appendix E. To have a fair and meaningful comparison among the various models, all the" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 291, + 547, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 291, + 547, + 376 + ], + "spans": [ + { + "bbox": [ + 304, + 291, + 547, + 376 + ], + "type": "text", + "content": "proposed layers have been evaluated using the same architecture, loss function, and optimizer. Since, according to the data reported in Table 1, different layers may have different throughput, to have a fair comparison with respect to the tested metrics, we limited the total training time instead of fixing the number of training epochs. Results are reported for training times of " + }, + { + "bbox": [ + 304, + 291, + 547, + 376 + ], + "type": "inline_equation", + "content": "2\\mathrm{h}" + }, + { + "bbox": [ + 304, + 291, + 547, + 376 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 291, + 547, + 376 + ], + "type": "inline_equation", + "content": "10\\mathrm{h}" + }, + { + "bbox": [ + 304, + 291, + 547, + 376 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 291, + 547, + 376 + ], + "type": "inline_equation", + "content": "24\\mathrm{h}" + }, + { + "bbox": [ + 304, + 291, + 547, + 376 + ], + "type": "text", + "content": " on one A100 GPU." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 384, + 547, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 384, + 547, + 458 + ], + "spans": [ + { + "bbox": [ + 304, + 384, + 547, + 458 + ], + "type": "text", + "content": "Our architecture is a standard convolutional network that doubles the number of channels whenever the resolution is reduced [6, 40]. For each method, we tested architectures of different sizes. We denoted them as XS, S, M and L, depending on the number of parameters, according to the criteria in Table 7, ranging from 1.5M to 100M parameters." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 476, + 547, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 476, + 547, + 586 + ], + "spans": [ + { + "bbox": [ + 304, + 476, + 547, + 586 + ], + "type": "text", + "content": "Since different methods benefit from different learning rates and weight decay, for each setting (model size, method and dataset), we used the best values resulting from a random search performed on multiple training runs on a validation set composed of " + }, + { + "bbox": [ + 304, + 476, + 547, + 586 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 304, + 476, + 547, + 586 + ], + "type": "text", + "content": " of the original training set. More specifically, 16 runs were performed for each configuration of randomly sampled hyperparameters, and we selected the configuration maximizing the certified robust accuracy w.r.t. " + }, + { + "bbox": [ + 304, + 476, + 547, + 586 + ], + "type": "inline_equation", + "content": "\\epsilon = 36 / 255" + }, + { + "bbox": [ + 304, + 476, + 547, + 586 + ], + "type": "text", + "content": " (see Appendix E.4 for details)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 594, + 548, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 548, + 716 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 548, + 716 + ], + "type": "text", + "content": "The evaluation was carried out using four different datasets: CIFAR-10, CIFAR-100 [21], Tiny ImageNet [23], and Imagenette [16] for large images. Augmentation was used during the training (Random crops and flips on CIFAR-10 and CIFAR-100, RandAugment [11] on Tiny ImageNet, and random crop as well as RandAugment on Imagenette), details in Appendix E.5. We use the loss function proposed by [34], with same temperature 0.25, and where we tuned the margin to maximize the robust accuracy for " + }, + { + "bbox": [ + 304, + 594, + 548, + 716 + ], + "type": "inline_equation", + "content": "\\epsilon = \\frac{36}{255}" + }, + { + "bbox": [ + 304, + 594, + 548, + 716 + ], + "type": "text", + "content": ". In detail, we considered a margin of " + }, + { + "bbox": [ + 304, + 594, + 548, + 716 + ], + "type": "inline_equation", + "content": "2\\sqrt{2}\\epsilon" + }, + { + "bbox": [ + 304, + 594, + 548, + 716 + ], + "type": "text", + "content": " where" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24578" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": "the " + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "inline_equation", + "content": "\\sqrt{2}" + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": " factor comes from the " + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": " norm [41], and the factor 2 has been added to help with generalization." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 105, + 105, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 105, + 105, + 118 + ], + "spans": [ + { + "bbox": [ + 47, + 105, + 105, + 118 + ], + "type": "text", + "content": "4.1. Metrics" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 124, + 286, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 124, + 286, + 160 + ], + "spans": [ + { + "bbox": [ + 47, + 124, + 286, + 160 + ], + "type": "text", + "content": "All the considered models were evaluated based on three main metrics: the throughput, the required memory, and the certified robust accuracy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 178, + 286, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 178, + 286, + 262 + ], + "spans": [ + { + "bbox": [ + 46, + 178, + 286, + 262 + ], + "type": "text", + "content": "Throughput and epoch time The throughput of a model is the average number of examples that the model can process per second. It determines how many epochs are processed in a given time frame. The evaluation of the throughput was performed on an 80GB-A100-GPU based on the average time of 100 mini-batches. We measured the inference throughput with cached parameter transformations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 280, + 286, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 280, + 286, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 280, + 286, + 460 + ], + "type": "text", + "content": "Memory required Layers that require less memory allow for larger batch size, and the memory requirements also determine the type of hardware we can train a model on. For each model, we measured and reported the maximal GPU memory occupied by tensors using the function torch.cuda.max_memory_allocated() provided by the PyTorch framework. This is not exactly equal to the overall GPU memory requirement but gives a fairly good approximation of it. Note that the model memory measured in this way also includes additional memory required by the optimizer (e.g. to store the momentum term) as well as by the activation layers in the forward pass. However, this additional memory should be at most of order " + }, + { + "bbox": [ + 46, + 280, + 286, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(M + P)" + }, + { + "bbox": [ + 46, + 280, + 286, + 460 + ], + "type": "text", + "content": ". As for the throughput, we evaluated and cached all calculations independent of the input at inference time." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "spans": [ + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "text", + "content": "Certified robust accuracy In order to evaluate the performance of a 1-Lipschitz network, the standard metric is the certified robust accuracy. An input is classified certifiably robustly with radius " + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "text", + "content": " by a model, if no perturbations of the input with norm bounded by " + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "text", + "content": " can change the prediction of the model. Certified robust accuracy measures the proportion of examples that are classified correctly as well as certifiably robustly. For 1-Lipschitz models, a lower bound of the certified " + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "text", + "content": "-robust accuracy is the portion of correctly classified inputs such that " + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_f(x_i, l_i) > \\epsilon \\sqrt{2}" + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "text", + "content": " where the margin " + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_f(x, l)" + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "text", + "content": " of a model " + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "text", + "content": " at input " + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "text", + "content": " with label " + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "text", + "content": ", given as " + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_f(x, l) = f(x)_l - \\max_{j \\neq l} f_j(x)" + }, + { + "bbox": [ + 46, + 476, + 286, + 644 + ], + "type": "text", + "content": ", is the difference between target class score and the highest score of a different class. For details, see [41]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 656, + 173, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 656, + 173, + 671 + ], + "spans": [ + { + "bbox": [ + 47, + 656, + 173, + 671 + ], + "type": "text", + "content": "5. Experimental Results" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 677, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 286, + 713 + ], + "type": "text", + "content": "This section presents the results of the comparison performed by applying the methodology discussed in Section 4. The results related to the different metrics are dis" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "type": "text", + "content": "cussed in dedicated subsections and the key takeaways are summarized in the radar-plot illustrated in Figure 1." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 104, + 463, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 104, + 463, + 117 + ], + "spans": [ + { + "bbox": [ + 306, + 104, + 463, + 117 + ], + "type": "text", + "content": "5.1. Training and inference times" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 122, + 545, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 122, + 545, + 243 + ], + "spans": [ + { + "bbox": [ + 304, + 122, + 545, + 243 + ], + "type": "text", + "content": "Figure 2 plots the training time per epoch of the different models as a function of their size, and Figure 3 plots the corresponding inference throughput for the various sizes as described in Section 4. As described in Table 5, the model base width, referred to as " + }, + { + "bbox": [ + 304, + 122, + 545, + 243 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 122, + 545, + 243 + ], + "type": "text", + "content": ", is doubled from one model size to the next. We expect the training and inference time to scale with " + }, + { + "bbox": [ + 304, + 122, + 545, + 243 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 122, + 545, + 243 + ], + "type": "text", + "content": " similarly to how individual layers scale with their number of channels, " + }, + { + "bbox": [ + 304, + 122, + 545, + 243 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 122, + 545, + 243 + ], + "type": "text", + "content": " (in Table 1). This is because the width of each of the 5 blocks of our architecture is a constant multiple of the base width, " + }, + { + "bbox": [ + 304, + 122, + 545, + 243 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 122, + 545, + 243 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 258, + 545, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 258, + 545, + 354 + ], + "spans": [ + { + "bbox": [ + 304, + 258, + 545, + 354 + ], + "type": "text", + "content": "The training time increases (at most) about linearly with " + }, + { + "bbox": [ + 304, + 258, + 545, + 354 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 258, + 545, + 354 + ], + "type": "text", + "content": " for standard convolutions, whereas the computational complexity of each single convolution scales like " + }, + { + "bbox": [ + 304, + 258, + 545, + 354 + ], + "type": "inline_equation", + "content": "c^2" + }, + { + "bbox": [ + 304, + 258, + 545, + 354 + ], + "type": "text", + "content": ". This suggests that parallelism on the GPU and the overhead from other operations (activations, parameter updates, etc.) are important factors determining the training time. This also explains why CPL (doing two convolutions, with identical kernel parameters) is only slightly slower than a standard" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 309, + 373, + 542, + 492 + ], + "blocks": [ + { + "bbox": [ + 309, + 373, + 542, + 492 + ], + "lines": [ + { + "bbox": [ + 309, + 373, + 542, + 492 + ], + "spans": [ + { + "bbox": [ + 309, + 373, + 542, + 492 + ], + "type": "image", + "image_path": "57d3c18451c1ea846b814cee2491a28c67e9a17297d4c0a67667e8c25ce058f9.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 504, + 545, + 525 + ], + "lines": [ + { + "bbox": [ + 306, + 504, + 545, + 525 + ], + "spans": [ + { + "bbox": [ + 306, + 504, + 545, + 525 + ], + "type": "text", + "content": "Figure 2. Training time per epoch (on CIFAR-10) for different methods and different model sizes." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 309, + 540, + 542, + 665 + ], + "blocks": [ + { + "bbox": [ + 309, + 540, + 542, + 665 + ], + "lines": [ + { + "bbox": [ + 309, + 540, + 542, + 665 + ], + "spans": [ + { + "bbox": [ + 309, + 540, + 542, + 665 + ], + "type": "image", + "image_path": "e182984c7e9505b9a017c7b606b0085ad930a7ceb922c7e77489e2d91a497b54.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 677, + 545, + 709 + ], + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 709 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 709 + ], + "type": "text", + "content": "Figure 3. Inference throughput for different methods as a function of their size for CIFAR-10 sizes input images. All parameter transformations have been evaluated and cached beforehand" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24579" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "content": "convolution, and SOC (doing 5 convolutions) is only about 3 times slower than the standard convolution. The AOL and SLL methods also require times comparable to a standard convolution for small models, although eventually, the " + }, + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "inline_equation", + "content": "c^3" + }, + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "content": " term in the computation of the rescaling makes them slower for larger models. Finally, Cayley, LOT, and BCOP methods take much longer training times per epoch. For Cayley and LOT this behavior was expected, as they have a large " + }, + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(s^2c^3)" + }, + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "content": " term in their computational complexity. See Table 1 for further details." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 206, + 289, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 206, + 289, + 397 + ], + "spans": [ + { + "bbox": [ + 46, + 206, + 289, + 397 + ], + "type": "text", + "content": "At inference time transformations of the weights are cached, therefore some methods (AOL, BCOP) do not have any overhead compared to a standard convolution. As expected, other methods (CPL, SLL, and SOC) that apply additional convolutions to the input suffer from a corresponding overhead. Finally, Cayley and LOT have a slightly different throughput due to their FFT-based convolution. Among them, Cayley is about twice as fast because it involves a real-valued FFT rather than a complex-valued one. From Figure 3, it can be noted that cached Cayley and CPL have the same inference time, even though CPL uses twice the number of convolutions. We believe this is due to the fact that the conventional FFT-based convolution is quite efficient for large kernel sizes, but for smaller ones PyTorch implements a faster algorithm, i.e., Winograd, [22], that can be up to 2.5 times faster." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 403, + 217, + 416 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 403, + 217, + 416 + ], + "spans": [ + { + "bbox": [ + 47, + 403, + 217, + 416 + ], + "type": "text", + "content": "5.2. Training memory requirements" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 422, + 287, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 422, + 287, + 661 + ], + "spans": [ + { + "bbox": [ + 46, + 422, + 287, + 661 + ], + "type": "text", + "content": "The training and inference memory requirements of the various models (measured as described in Section 4.1) are reported in Figure 4 as a function of the model size. The results of the theoretical analysis reported in Table 1 suggest that the training memory requirements always have a term linear in the number of channels " + }, + { + "bbox": [ + 46, + 422, + 287, + 661 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 46, + 422, + 287, + 661 + ], + "type": "text", + "content": " (usually the activations from the forward pass), as well as a term quadratic in " + }, + { + "bbox": [ + 46, + 422, + 287, + 661 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 46, + 422, + 287, + 661 + ], + "type": "text", + "content": " (usually the weights and all transformations applied to the weights during the forward pass). This behavior can also be observed from Figure 4. For some of the models, the memory required approximately doubles from one model size to the next one, just like the width. This means that the linear term dominates (for those sizes), which makes those models relatively cheap to scale up. For the BCOP, LOT, and Cayley methods, the larger coefficients in the " + }, + { + "bbox": [ + 46, + 422, + 287, + 661 + ], + "type": "inline_equation", + "content": "c^2" + }, + { + "bbox": [ + 46, + 422, + 287, + 661 + ], + "type": "text", + "content": " term (for LOT and Cayley the coefficient is even dependent on the input size, " + }, + { + "bbox": [ + 46, + 422, + 287, + 661 + ], + "type": "inline_equation", + "content": "s^2" + }, + { + "bbox": [ + 46, + 422, + 287, + 661 + ], + "type": "text", + "content": ") cause this term to dominate. This makes it much harder to scale those methods to more parameters. Method LOT requires huge amounts of memory, in particular LOT-L is too large to fit in 80GB GPU memory." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 666, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 666, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 666, + 288, + 714 + ], + "type": "text", + "content": "Note that at test time, the memory requirements are much lower, because the intermediate activation values do not need to be stored, as there is no backward pass. Therefore, at inference time, most methods require a very similar" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 309, + 72, + 544, + 192 + ], + "blocks": [ + { + "bbox": [ + 309, + 72, + 544, + 192 + ], + "lines": [ + { + "bbox": [ + 309, + 72, + 544, + 192 + ], + "spans": [ + { + "bbox": [ + 309, + 72, + 544, + 192 + ], + "type": "image", + "image_path": "0bc1c9dba0306e02c68b9d7cd804a7f5810379f0550caa35bcec7add3e5d0843.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 309, + 196, + 543, + 315 + ], + "blocks": [ + { + "bbox": [ + 309, + 196, + 543, + 315 + ], + "lines": [ + { + "bbox": [ + 309, + 196, + 543, + 315 + ], + "spans": [ + { + "bbox": [ + 309, + 196, + 543, + 315 + ], + "type": "image", + "image_path": "91276288aaa8dec025d6135b4f23eb42d943ec060f40240a92d2d8d7c9b1f44f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 326, + 546, + 348 + ], + "lines": [ + { + "bbox": [ + 305, + 326, + 546, + 348 + ], + "spans": [ + { + "bbox": [ + 305, + 326, + 546, + 348 + ], + "type": "text", + "content": "Figure 4. Memory required at training and inference time for input size " + }, + { + "bbox": [ + 305, + 326, + 546, + 348 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 305, + 326, + 546, + 348 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 369, + 545, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 369, + 545, + 418 + ], + "spans": [ + { + "bbox": [ + 305, + 369, + 545, + 418 + ], + "type": "text", + "content": "amount of memory as a standard convolution. The Cayley and LOT methods require more memory since they perform the calculation in the Fourier space, creating an intermediate representation of the weight matrices of size " + }, + { + "bbox": [ + 305, + 369, + 545, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(s^2 c^2)" + }, + { + "bbox": [ + 305, + 369, + 545, + 418 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 426, + 447, + 438 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 426, + 447, + 438 + ], + "spans": [ + { + "bbox": [ + 306, + 426, + 447, + 438 + ], + "type": "text", + "content": "5.3. Certified robust accuracy" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 444, + 546, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 444, + 546, + 696 + ], + "spans": [ + { + "bbox": [ + 304, + 444, + 546, + 696 + ], + "type": "text", + "content": "The results related to the accuracy and the certified robust accuracy for the different methods, model sizes, and datasets measured on a 24h training budget are summarized in Table 2. The differences among the various model sizes are also highlighted in Figure 6 by reporting the sorted values of the certified robust accuracy. Further tables and plots relative to different training budgets can be found in Appendix G. The reader can compare our results with the state-of-the-art certified robust accuracy summarized in Appendix D. However, it is worth noting that, to reach state-of-the-art performance, authors often carry out experiments using large model sizes and long training times, which makes it hard to compare the methods themselves. On the other hand, the evaluation proposed in this paper allows a fairer comparison among the different methods, since it also considers timing and memory aspects. This restriction based on time, rather than the number of epochs, ensures that merely enlarging the model size does not lead to improved performance, as bigger models typically process fewer epochs of data. Indeed, in our results in Figure 6 it is usually the M (and not the L) model that performs best." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 318, + 701, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 701, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 318, + 701, + 545, + 714 + ], + "type": "text", + "content": "Experiments show that SOC performs best, reaching the" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24580" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 102, + 285, + 528 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 287, + 93 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 287, + 93 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 287, + 93 + ], + "type": "text", + "content": "Table 2. Certified robust accuracy for radius " + }, + { + "bbox": [ + 47, + 70, + 287, + 93 + ], + "type": "inline_equation", + "content": "\\epsilon = 36 / 255" + }, + { + "bbox": [ + 47, + 70, + 287, + 93 + ], + "type": "text", + "content": " on the evaluated datasets. Training is performed for 24 hours." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 102, + 285, + 528 + ], + "lines": [ + { + "bbox": [ + 50, + 102, + 285, + 528 + ], + "spans": [ + { + "bbox": [ + 50, + 102, + 285, + 528 + ], + "type": "table", + "html": "
MethodsAccuracy [%]Robust Accuracy [%]
XSSMLXSSML
CIFAR-10
AOL71.773.673.473.759.160.861.061.5
BCOP71.773.174.074.658.559.360.561.5
CPL74.976.176.676.862.564.265.165.2
Cayley73.174.274.473.659.561.161.060.1
LOT75.576.672.0-63.464.658.7-
SLL73.774.275.374.361.062.062.862.3
SOC74.175.076.976.961.362.966.365.4
CIFAR-100
AOL40.343.444.341.927.931.031.429.7
BCOP41.442.843.742.228.430.131.229.2
CPL42.3-45.244.330.1-33.232.1
Cayley42.343.943.542.929.230.530.529.5
LOT43.545.242.8-30.832.529.6-
SLL41.442.842.442.128.930.529.929.6
SOC43.145.247.346.230.632.634.933.5
Tiny ImageNet
AOL26.629.330.330.018.119.721.020.6
BCOP22.426.227.627.013.816.917.216.8
CPL28.329.329.830.318.919.720.320.1
Cayley27.829.630.127.217.919.519.316.7
LOT30.732.528.8-20.821.918.1-
SLL25.127.026.527.916.618.417.718.8
SOC28.928.832.132.118.918.821.221.1
Imagenette
AOL80.883.782.876.879.978.5
BCOP81.284.59.875.680.19.8
CPL85.586.586.480.882.482.3
Cayley81.277.9-75.871.7-
SLL80.883.479.375.478.072.8
SOC80.683.679.074.778.473.5
", + "image_path": "a4bc951066e0f4307779b0046d12277da20d1f6f2f589d4e1c500650d10b3f5f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 557, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 557, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 557, + 288, + 713 + ], + "type": "text", + "content": "highest certified robust accuracy on two datasets. CPL models consistently rank in top-10 position among the three datasets. LOT performed well, in particular on Tiny ImageNet dataset where it performs the best. AOL did not reach high accuracy on CIFAR-10, but reached more competitive results on Tiny ImageNet. An opposite effect can be observed for SLL, which performance seems to strongly depend on the number of classes. BCOP only reach the top-10 once, while Cayley is consistently outperformed by the other methods. The very same analysis can be applied to the clean accuracy, whose sorted bar-plots are reported in Appendix G, where the main difference is that Cayley performs slightly better for that metric. Furthermore, it is worth" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "content": "highlighting that CPL is sensitive to weight initialization. We faced numerical errors during the 10h and 24h training of the small model on CIFAR-100. On Imagenette, CPL clearly performs best, followed by BCOP and AOL. Note that these methods all construct a kernel so that the convolution is 1-Lipschitz. This seems to be good strategy for higher resolution datasets. E.g. SOC, that instead applies multiple convolutions has a drop in performs compared to other datasets." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 305, + 201, + 457, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 201, + 457, + 213 + ], + "spans": [ + { + "bbox": [ + 305, + 201, + 457, + 213 + ], + "type": "text", + "content": "5.3.1 Interpretation of the results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 220, + 545, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 220, + 545, + 318 + ], + "spans": [ + { + "bbox": [ + 304, + 220, + 545, + 318 + ], + "type": "text", + "content": "We confirm empirically what suspected in [46]: layers that naturally include a skip connections (CPL, SLL, SOC) generally perform better than layers that do not have this ability. Furthermore, we noticed that layers with an identity initialization (AOL, LOT) perform better than layers that do neither (BCOP, Cayley). Presumably this is due to the MaxMin activation reducing the variance in the forward pass when alternated with non-identity layers." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 326, + 545, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 326, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 304, + 326, + 545, + 434 + ], + "type": "text", + "content": "Our results also allow ruling out some other possible explanation: one might suspect that pure contractive layers (AOL, CPL, and SLL) would suffer from vanishing gradients, differently from orthogonal ones, however, our experiments do not show any evidence of this fact. Furthermore, one might suspect that slower methods perform worse, because they allow fewer epochs for a given time budget, however, our experiments do not support this fact; two relative slow methods (SOC, LOT) are among the best ones." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 456, + 463, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 456, + 463, + 468 + ], + "spans": [ + { + "bbox": [ + 306, + 456, + 463, + 468 + ], + "type": "text", + "content": "6. Conclusions and Guidelines" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 477, + 545, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 477, + 545, + 561 + ], + "spans": [ + { + "bbox": [ + 304, + 477, + 545, + 561 + ], + "type": "text", + "content": "This work presented a comparative study of state-of-the-art 1-Lipschitz layers under the lens of different metrics, such as time and memory requirements, accuracy, and certified robust accuracy, all evaluated at training and inference time. A theoretical comparison of the methods in terms of time and memory complexity was also presented and validated by experiments." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": "Taking all metrics into account (summarized in Figure 1), the results are in favor of CPL, due to its highest performance and lower consumption of computational resources. When large computational resources are available and the application does not impose stringent timing constraints during inference and training, the SOC layer could be used, due to its slightly better performance. Finally, those applications in which the inference time is crucial may take advantage of AOL or BCOP, which do not introduce additional runtime overhead (during inference) compared to a standard convolution. For higher resolution images, it also seems that CPL is the most promising method." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24581" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "text", + "content": "[1] Thomas Altstidl, David Dobre, Björn Eskofier, Gauthier Gidel, and Leo Schwinn. Raising the bar for certified adversarial robustness with diffusion models. arXiv preprint arXiv:2305.10388, 2023. 14, 22" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 134, + 287, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 134, + 287, + 166 + ], + "spans": [ + { + "bbox": [ + 53, + 134, + 287, + 166 + ], + "type": "text", + "content": "[2] Cem Anil, James Lucas, and Roger Grosse. Sorting out Lipschitz function approximation. In International Conference on Machine Learning (ICML), 2019. 2, 3, 11" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 166, + 288, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 166, + 288, + 209 + ], + "spans": [ + { + "bbox": [ + 53, + 166, + 288, + 209 + ], + "type": "text", + "content": "[3] Alexandre Araujo, Aaron J Havens, Blaise Delattre, Alexandre Allauzen, and Bin Hu. A unified algebraic perspective on Lipschitz neural networks. In International Conference on Learning Representations (ICLR), 2023. 4, 14, 22" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 209, + 288, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 209, + 288, + 262 + ], + "spans": [ + { + "bbox": [ + 53, + 209, + 288, + 262 + ], + "type": "text", + "content": "[4] Battista Biggio, Igino Corona, Davide Maiorca, Blaine Nelson, Nedim Šrndić, Pavel Laskov, Giorgio Giacinto, and Fabio Roli. Evasion attacks against machine learning at test time. In Machine Learning and Knowledge Discovery in Databases, 2013. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 262, + 288, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 262, + 288, + 294 + ], + "spans": [ + { + "bbox": [ + 53, + 262, + 288, + 294 + ], + "type": "text", + "content": "[5] Å. Björck and C. Bowie. An iterative algorithm for computing the best estimate of an orthogonal matrix. SIAM Journal on Numerical Analysis, 1971. 3, 11" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 294, + 288, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 294, + 288, + 338 + ], + "spans": [ + { + "bbox": [ + 53, + 294, + 288, + 338 + ], + "type": "text", + "content": "[6] Fabio Brau, Giulio Rossolini, Alessandro Biondi, and Giorgio Buttazzo. Robust-by-design classification via unitary-gradient neural networks. Proceedings of the AAAI Conference on Artificial Intelligence, 2023. 2, 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 338, + 288, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 338, + 288, + 380 + ], + "spans": [ + { + "bbox": [ + 53, + 338, + 288, + 380 + ], + "type": "text", + "content": "[7] Nicholas Carlini, Florian Tramer, Krishnamurthy Dj Dvijotham, Leslie Rice, Mingjie Sun, and J Zico Kolter. (Certified!!) adversarial robustness for free! In International Conference on Learning Representations (ICLR), 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 380, + 288, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 380, + 288, + 423 + ], + "spans": [ + { + "bbox": [ + 53, + 380, + 288, + 423 + ], + "type": "text", + "content": "[8] Arthur Cayley. About the algebraic structure of the orthogonal group and the other classical groups in a field of characteristic zero or a prime characteristic. Journal für die reine und angewandte Mathematik, 1846. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 423, + 288, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 423, + 288, + 466 + ], + "spans": [ + { + "bbox": [ + 53, + 423, + 288, + 466 + ], + "type": "text", + "content": "[9] Moustapha Cisse, Piotr Bojanowski, Edouard Grave, Yann Dauphin, and Nicolas Usunier. Parseval networks: Improving robustness to adversarial examples. In International conference on machine learning, 2017. 2, 11" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 466, + 288, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 466, + 288, + 509 + ], + "spans": [ + { + "bbox": [ + 48, + 466, + 288, + 509 + ], + "type": "text", + "content": "[10] Jeremy Cohen, Elan Rosenfeld, and Zico Kolter. Certified adversarial robustness via randomized smoothing. In Proceedings of the 36th International Conference on Machine Learning, 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 509, + 288, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 509, + 288, + 563 + ], + "spans": [ + { + "bbox": [ + 48, + 509, + 288, + 563 + ], + "type": "text", + "content": "[11] Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops, 2020. 5, 16" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 563, + 288, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 563, + 288, + 605 + ], + "spans": [ + { + "bbox": [ + 48, + 563, + 288, + 605 + ], + "type": "text", + "content": "[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Conference on Computer Vision and Pattern Recognition (CVPR), 2009. 16" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 605, + 288, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 605, + 288, + 638 + ], + "spans": [ + { + "bbox": [ + 48, + 605, + 288, + 638 + ], + "type": "text", + "content": "[13] Farzan Farnia, Jesse Zhang, and David Tse. Generalizable adversarial training via spectral normalization. In International Conference on Learning Representations, 2018. 11" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 638, + 288, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 638, + 288, + 669 + ], + "spans": [ + { + "bbox": [ + 48, + 638, + 288, + 669 + ], + "type": "text", + "content": "[14] Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. stat, 2015. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 669, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 288, + 712 + ], + "type": "text", + "content": "[15] Emiel Hoogeboom, Victor Garcia Satorras, Jakub Tomczak, and Max Welling. The convolution exponential and generalized Sylvester flows. In Advances in Neural Information Processing Systems, 2020. 3" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 704 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 95 + ], + "type": "text", + "content": "[16] Jeremy Howard. Imagenette. https://github.com/fastai/imagenette/. Accessed: 01.02.2024. 5, 16" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 95, + 545, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 95, + 545, + 137 + ], + "spans": [ + { + "bbox": [ + 307, + 95, + 545, + 137 + ], + "type": "text", + "content": "[17] Kai Hu, Klas Leino, Zifan Wang, and Matt Fredrikson. Effectively leveraging capacity for improved deterministic robustness certification. In International Conference on Learning Representations (ICLR), 2024. 14, 22" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 137, + 545, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 137, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 307, + 137, + 545, + 180 + ], + "type": "text", + "content": "[18] Kai Hu, Andy Zou, Zifan Wang, Klas Leino, and Matt Fredrikson. Unlocking deterministic robustness certification on imagenet. Conference on Neural Information Processing Systems (NeurIPS), 2024. 14" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 180, + 545, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 180, + 545, + 223 + ], + "spans": [ + { + "bbox": [ + 307, + 180, + 545, + 223 + ], + "type": "text", + "content": "[19] Lei Huang, Li Liu, Fan Zhu, Diwen Wan, Zehuan Yuan, Bo Li, and Ling Shao. Controllable orthogonalization in training DNNs. In Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3, 11, 12" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 223, + 545, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 223, + 545, + 266 + ], + "spans": [ + { + "bbox": [ + 307, + 223, + 545, + 266 + ], + "type": "text", + "content": "[20] Guy Katz, Clark Barrett, David L Dill, Kyle Julian, and Mykel J Kochenderfer. Reluplex: An efficient SMT solver for verifying deep neural networks. In International conference on computer aided verification, 2017. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 266, + 545, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 266, + 545, + 287 + ], + "spans": [ + { + "bbox": [ + 307, + 266, + 545, + 287 + ], + "type": "text", + "content": "[21] Alex Krizhevsky. Learning multiple layers of features from tiny images. Technical report, 2009. 5, 16" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 287, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 287, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 287, + 545, + 319 + ], + "type": "text", + "content": "[22] Andrew Lavin and Scott Gray. Fast algorithms for convolutional neural networks. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 319, + 545, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 319, + 545, + 340 + ], + "spans": [ + { + "bbox": [ + 307, + 319, + 545, + 340 + ], + "type": "text", + "content": "[23] Ya Le and Xuan Yang. Tiny imagenet visual recognition challenge. CS 231N, 2015. 5, 16" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 340, + 545, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 340, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 307, + 340, + 545, + 371 + ], + "type": "text", + "content": "[24] Klas Leino, Zifan Wang, and Matt Fredrikson. Globally-robust neural networks. In International Conference on Machine Learning, 2021. 2, 11" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 371, + 545, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 371, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 307, + 371, + 545, + 425 + ], + "type": "text", + "content": "[25] Mario Lezcano-Casado and David Martínez-Rubio. Cheap orthogonal constraints in neural networks: A simple parametrization of the orthogonal and unitary group. In International Conference on Machine Learning (ICML), 2019. 11" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 425, + 545, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 425, + 545, + 457 + ], + "spans": [ + { + "bbox": [ + 307, + 425, + 545, + 457 + ], + "type": "text", + "content": "[26] Linyi Li, Tao Xie, and Bo Li. Sok: Certified robustness for deep neural networks. In 2023 IEEE Symposium on Security and Privacy (SP), 2023. 1" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 457, + 545, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 457, + 545, + 512 + ], + "spans": [ + { + "bbox": [ + 307, + 457, + 545, + 512 + ], + "type": "text", + "content": "[27] Qiyang Li, Saminul Haque, Cem Anil, James Lucas, Roger B Grosse, and Joern-Henrik Jacobsen. Preventing gradient attenuation in Lipschitz constrained convolutional networks. In Conference on Neural Information Processing Systems (NeurIPS), 2019. 2, 3, 11, 14" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 512, + 545, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 512, + 545, + 543 + ], + "spans": [ + { + "bbox": [ + 307, + 512, + 545, + 543 + ], + "type": "text", + "content": "[28] Shuai Li, Kui Jia, Yuxin Wen, Tongliang Liu, and Dacheng Tao. Orthogonal deep neural networks. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 11" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 543, + 545, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 543, + 545, + 575 + ], + "spans": [ + { + "bbox": [ + 307, + 543, + 545, + 575 + ], + "type": "text", + "content": "[29] Max Losch, David Stutz, Bernt Schiele, and Mario Fritz. Certified robust models with slack control and large Lipschitz constants. arXiv preprint arXiv:2309.06166, 2023. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 575, + 545, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 575, + 545, + 618 + ], + "spans": [ + { + "bbox": [ + 307, + 575, + 545, + 618 + ], + "type": "text", + "content": "[30] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In International Conference on Learning Representations (ICLR), 2018. 1" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 618, + 545, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 618, + 545, + 661 + ], + "spans": [ + { + "bbox": [ + 307, + 618, + 545, + 661 + ], + "type": "text", + "content": "[31] Laurent Meunier, Blaise J Delattre, Alexandre Araujo, and Alexandre Allauzen. A dynamical system perspective for Lipschitz neural networks. In International Conference on Machine Learning (ICML), 2022. 3, 11, 14, 22" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 661, + 545, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 661, + 545, + 704 + ], + "spans": [ + { + "bbox": [ + 307, + 661, + 545, + 704 + ], + "type": "text", + "content": "[32] Takeru Miyato, Toshiki Kataoka, Masanori Koyama, and Yuichi Yoshida. Spectral normalization for generative adversarial networks. In International Conference on Learning Representations (ICLR), 2018. 3, 11" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24582" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 707 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 171 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 171 + ], + "type": "text", + "content": "[33] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In Conference on Neural Information Processing Systems (NeurIPS). 2019. 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 171, + 287, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 171, + 287, + 214 + ], + "spans": [ + { + "bbox": [ + 48, + 171, + 287, + 214 + ], + "type": "text", + "content": "[34] Bernd Prach and Christoph H Lampert. Almost-orthogonal layers for efficient general-purpose Lipschitz networks. In European Conference on Computer Vision (ECCV), 2022. 2, 3, 5, 14" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 214, + 287, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 214, + 287, + 256 + ], + "spans": [ + { + "bbox": [ + 48, + 214, + 287, + 256 + ], + "type": "text", + "content": "[35] S Singla and S Feizi. Fantastic four: Differentiable bounds on singular values of convolution layers. In International Conference on Learning Representations (ICLR), 2021. 3, 11" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 257, + 287, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 257, + 287, + 289 + ], + "spans": [ + { + "bbox": [ + 48, + 257, + 287, + 289 + ], + "type": "text", + "content": "[36] Sahil Singla and Soheil Feizi. Skew orthogonal convolutions. In International Conference on Machine Learning (ICML), 2021. 2, 3, 14" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 289, + 287, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 289, + 287, + 320 + ], + "spans": [ + { + "bbox": [ + 48, + 289, + 287, + 320 + ], + "type": "text", + "content": "[37] Sahil Singla and Soheil Feizi. Improved techniques for deterministic 12 robustness. Conference on Neural Information Processing Systems (NeurIPS), 2022. 14" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 320, + 287, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 320, + 287, + 364 + ], + "spans": [ + { + "bbox": [ + 48, + 320, + 287, + 364 + ], + "type": "text", + "content": "[38] Leslie N Smith and Nicholay Topin. Super-convergence: Very fast training of neural networks using large learning rates. In Artificial intelligence and machine learning for multi-domain operations applications, 2019. 15" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 364, + 287, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 364, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 48, + 364, + 287, + 407 + ], + "type": "text", + "content": "[39] Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In International Conference on Learning Representations (ICLR), 2014. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 407, + 287, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 407, + 287, + 449 + ], + "spans": [ + { + "bbox": [ + 48, + 407, + 287, + 449 + ], + "type": "text", + "content": "[40] Asher Trockman and J Zico Kolter. Orthogonalizing convolutional layers with the Cayley transform. In International Conference on Learning Representations (ICLR), 2021. 2, 3, 5, 14, 23" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 449, + 287, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 449, + 287, + 492 + ], + "spans": [ + { + "bbox": [ + 48, + 449, + 287, + 492 + ], + "type": "text", + "content": "[41] Yusuke Tsuzuku, Issei Sato, and Masashi Sugiyama. Lipschitz-margin training: Scalable certification of perturbation invariance for deep neural networks. Conference on Neural Information Processing Systems (NeurIPS), 2018. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 492, + 287, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 492, + 287, + 524 + ], + "spans": [ + { + "bbox": [ + 48, + 492, + 287, + 524 + ], + "type": "text", + "content": "[42] Ruigang Wang and Ian Manchester. Direct parameterization of Lipschitz-bounded deep networks. In International Conference on Machine Learning (ICML), 2023. 3, 11, 12" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 524, + 287, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 524, + 287, + 578 + ], + "spans": [ + { + "bbox": [ + 48, + 524, + 287, + 578 + ], + "type": "text", + "content": "[43] Lily Weng, Huan Zhang, Hongge Chen, Zhao Song, Chojui Hsieh, Luca Daniel, Duane Boning, and Inderjit Dhillon. Towards fast computation of certified robustness for relu networks. In International Conference on Machine Learning (ICML), 2018. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 578, + 287, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 287, + 620 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 287, + 620 + ], + "type": "text", + "content": "[44] Eric Wong and Zico Kolter. Provable defenses against adversarial examples via the convex outer adversarial polytope. In International Conference on Machine Learning (ICML), 2018. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 620, + 287, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 620, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 48, + 620, + 287, + 685 + ], + "type": "text", + "content": "[45] Lechao Xiao, Yasaman Bahri, Jascha Sohl-Dickstein, Samuel Schoenholz, and Jeffrey Pennington. Dynamical isometry and a mean field theory of CNNs: How to train 10,000-layer vanilla convolutional neural networks. In International Conference on Machine Learning (ICML), 2018. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 685, + 287, + 707 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 685, + 287, + 707 + ], + "spans": [ + { + "bbox": [ + 48, + 685, + 287, + 707 + ], + "type": "text", + "content": "[46] Xiaojun Xu, Linyi Li, and Bo Li. Lot: Layer-wise orthogonal training on improving 12 certified robustness. Conference on" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 137 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 94 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 94 + ], + "type": "text", + "content": "Neural Information Processing Systems (NeurIPS), 2022. 2, 3, 8, 14, 23" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 95, + 547, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 95, + 547, + 137 + ], + "spans": [ + { + "bbox": [ + 307, + 95, + 547, + 137 + ], + "type": "text", + "content": "[47] Tan Yu, Jun Li, Yunfeng Cai, and Ping Li. Constructing orthogonal convolutions in an explicit manner. In International Conference on Learning Representations (ICLR), 2021. 3, 11, 12" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24583" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_content_list.json b/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..fb3e9989b79add339a5047a416b8a58ccf577283 --- /dev/null +++ b/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_content_list.json @@ -0,0 +1,1644 @@ +[ + { + "type": "text", + "text": "2S-UDF: A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images", + "text_level": 1, + "bbox": [ + 86, + 128, + 880, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Junkai Deng1,2 Fei Hou1,2* Xuhui Chen1,2 Wencheng Wang1,2 Ying He3", + "bbox": [ + 153, + 202, + 813, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences", + "bbox": [ + 102, + 220, + 867, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ University of Chinese Academy of Sciences", + "bbox": [ + 305, + 238, + 661, + 256 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ School of Computer Science and Engineering, Nanyang Technological University", + "bbox": [ + 156, + 256, + 808, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{dengjk,houfei,chenxh,whn}@ios.ac.cn yhe@ntu.edu.sg", + "bbox": [ + 225, + 277, + 718, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 325, + 312, + 342 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, building on the foundation of neural radiance field, various techniques have emerged to learn unsigned distance fields (UDF) to reconstruct 3D non-watertight models from multi-view images. Yet, a central challenge in UDF-based volume rendering is formulating a proper way to convert unsigned distance values into volume density, ensuring that the resulting weight function remains unbiased and sensitive to occlusions. Falling short on these requirements often results in incorrect topology or large reconstruction errors in resulting models. This paper addresses this challenge by presenting a novel two-stage algorithm, 2S-UDF, for learning a high-quality UDF from multi-view images. Initially, the method applies an easily trainable density function that, while slightly biased and transparent, aids in coarse reconstruction. The subsequent stage then refines the geometry and appearance of the object to achieve a high-quality reconstruction by directly adjusting the weight function used in volume rendering to ensure that it is unbiased and occlusion-aware. Decoupling density and weight in two stages makes our training stable and robust, distinguishing our technique from existing UDF learning approaches. Evaluations on the DeepFashion3D, DTU, and BlendedMVS datasets validate the robustness and effectiveness of our proposed approach. In both quantitative metrics and visual quality, the results indicate our superior performance over other UDF learning techniques in reconstructing 3D non-watertight models from multi-view images. Our code is available at https://bitbucket.org/jkdeng/2sudf/.", + "bbox": [ + 73, + 358, + 473, + 797 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 823, + 209, + 838 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "As the success of neural radiance field (NeRF) [29], numerous volume rendering based 3D modeling methods are pro", + "bbox": [ + 76, + 848, + 468, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/867bc421a97576f520a3274ecbdd5e9eba4ec439278a4d750a3d66c3827ea2c9.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 522, + 343, + 594, + 405 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/85e962c770c891a98f6b56af5d215f5073176408910292bed8e6f4a91d2c8ea6.jpg", + "image_caption": [ + "Figure 1. We learn a UDF from multiview images for nonwatertight model reconstruction. As illustrated in the cross sections of learned UDFs, our learned UDF approximates to the ground truth. In contrast, the learned UDF of NeuralUDF [25] is choppy leading to significant artifacts, e.g., unexpected pit. The learned UDF of NeUDF [23] is almost closed struggling to generate open surface." + ], + "image_footnote": [], + "bbox": [ + 522, + 410, + 594, + 454 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/dabe67eff785b81ed83779618b589b9cdfe1376a1193b3914728b850dc1466e1.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 614, + 342, + 684, + 405 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a42faf2f7448712bdc6de433b3d8c9e170355c4c40c61ea4bc1244b5a9d9d5db.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 612, + 410, + 684, + 453 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a59c2a6b379ff109af501cfcca8bad9f94ff27431c6799491729fa0cd5e3fd08.jpg", + "image_caption": [ + "NeuralUDF" + ], + "image_footnote": [], + "bbox": [ + 705, + 342, + 776, + 405 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1f6e183a63714d014a9d2906c74a7296fa1748e9aebee81c5db5fcde94239134.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 410, + 776, + 453 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f2aa3375ed2b1b37854099d1c96a2e52adb2801ce536a43fc9776bdaa551545f.jpg", + "image_caption": [ + "NeUDF" + ], + "image_footnote": [], + "bbox": [ + 800, + 342, + 870, + 405 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a8099774a3e49a585d31d7d5f2388893a45d5cebce77d9fc5502bcace31fa767.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 799, + 410, + 870, + 453 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "posed to learn signed distance fields (SDF) for 3D model reconstruction from multi-view images [7, 34, 36, 40]. These approaches map signed distance value to a density function, thereby enabling the use of volume rendering to learn an implicit SDF representation. To calculate pixel colors, they compute the weighted sum of radiances along each light ray. Achieving an accurate surface depiction requires the density function to meet three essential criteria. Firstly, the weights, which are derived from the density function, must reach their maximum value when the distance is zero, ensuring unbiasedness. Secondly, as a ray traverses through the surface, the accumulated density should tend towards infinity, rendering the surface opaque — a property referred to as occlusion-awareness. Finally, the density function should be bounded to prevent numerical issues. The popular SDF approaches, such as NeuS [34] and VolSDF [40], adopt an S-shaped density function that meets all these requirements.", + "bbox": [ + 496, + 597, + 892, + 853 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While SDF-based methods excel at reconstructing watertight models, they have limitations in representing open models. This is due to the intrinsic nature of SDF, which", + "bbox": [ + 498, + 854, + 893, + 898 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation.", + "bbox": [ + 236, + 1, + 807, + 18 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Except for this watermark, it is identical to the accepted version;", + "bbox": [ + 323, + 16, + 722, + 30 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 292, + 31, + 753, + 45 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author", + "bbox": [ + 94, + 887, + 220, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "5084", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "differentiates between the interior and exterior of a model, thus failing to accommodate open boundaries. Recent advances have attempted to mitigate this constraint by employing unsigned distance fields (UDF) [23, 25, 27]. Unlike signed distance fields, UDFs have non-negative distance values, making them suitable for representing nonwatertight models. However, learning a UDF from multiview images is a challenging task since the gradients of the UDF are unstable due to directional changes near the zero level-set, making it difficult to train the neural network. Another major challenge lies in formulating a UDF-induced density function that can simultaneously meet the above-mentioned three requirements. Unlike SDFs, UDFs cannot distinguish between the front and back of a surface based on distance values, thus, directly using an S-shaped density function is off the table. Opting for a bell-shaped density function brings its own issues. It is impossible for these integrations to approach infinity, so as to be occlusion-aware, unless the density becomes boundless at zero distance values. These conflicting requirements make UDF learning a non-trivial task, forcing existing methods to sacrifice at least one of these conditions. As shown in Figure 1, the existing methods NeuralUDF [25] and NeUDF [23] result in either choppy or nearly closed UDFs.", + "bbox": [ + 80, + 90, + 467, + 452 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As designing a UDF-induced density function that simultaneously fulfills the three aforementioned conditions remains an unresolved challenge, we propose a novel approach that learns a UDF from multi-view images in two separate stages. In the first stage, we apply an easily trainable but slightly biased and transparent density function for coarse reconstruction. Such a UDF, although being approximate, provides an important clue so that we can determine where to truncate the light rays. This accounts for the occlusion effect, where points behind the surface are not visible and should not contribute to the output color. With truncated light rays, we are able to derive the weights from UDF directly bypassing the density function, to further refine the geometry and appearance in the second stage. Our two-stage learning method, called 2S-UDF, leads to an unbiased and occlusion-aware weight function. Furthermore, by sidestepping density function learning in Stage 2, we effectively bypass the challenges associated with ensuring its boundedness. This strategy enhances the numerical stability of our method. Evaluations on benchmark datasets DeepFashion3D [43] and DTU [19] show that 2S-UDF outperforms existing UDF learning methods in terms of both reconstruction accuracy and visual quality. Additionally, we observe that the training stability of 2S-UDF is notably superior compared to other UDF learning neural networks.", + "bbox": [ + 80, + 454, + 467, + 830 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 80, + 845, + 215, + 859 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D Reconstruction from Multi-View Images. Surface reconstruction from multi-view images has been a subject of", + "bbox": [ + 80, + 871, + 467, + 898 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "study for several decades, and can generally be classified into two categories: voxel-based and point-based methods. Voxel-based methods [3, 8, 20, 21, 33] divide the 3D space into voxels and determine which ones belong to the object. These methods can be computationally expensive and may not be suitable for reconstructing complex surfaces. Point-based methods [13, 31, 38] use structure-from-motion [16] to calibrate the images and generate a dense point cloud using multi-view stereo [12]. Finally, surface reconstruction methods (e.g., [2, 17, 22]) are used to generate a mesh. Since multi-view stereo requires dense correspondences to generate a dense point cloud, which are often difficult to compute, its results often contain various types of artifacts, such as noise, holes, and incomplete structures.", + "bbox": [ + 503, + 90, + 888, + 301 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural Volume Rendering. Neural network-based 3D surface reconstruction has received attention in recent years with the emergence of neural rendering [29]. Several methods have been proposed for volume rendering and surface reconstruction using neural networks. VolSDF [40] uses the cumulative distribution function of Laplacian distribution to evaluate the density function from SDF for volume rendering and surface reconstruction. NeuS [34] adopts an unbiased density function to the first-order approximation of SDFs for more accurate reconstruction. SparseNeuS [24] extends NeuS to use fewer images for reconstruction. HFNeuS [36] improves NeuS by proposing a simplified and unbiased density function and using hierarchical multilayer perceptrons (MLPs) for detail reconstruction. GeoNeuS [10] incorporates structure-from-motion to add more constraints. NeuralWarp [7] improves the accuracy by optimizing consistency between warped views of different images. PET-NeuS [37] further improves the accuracy by introducing tri-planes into the SDF prediction module, incorporating with MLP. All these methods learn SDFs, which can only reconstruct watertight models. Recently, Long et al. proposed NeuralUDF [25] for learning UDF for reconstructing open models. It adapts the S-shaped density function for learning SDF to UDFs by introducing an indicator function. However, the indicator function is complicated to learn, and also introduces biases. Liu et al. proposed NeUDF [23] adopting a bell-shaped density. However, to make it occlusion-aware, the density has to be unbounded resulting in an improper integral, which reduces accuracy. Meng et al. proposed NeAT [27] to learn SDF with validity so as to reconstruct open models from SDF. However, it needs foreground masks for data.", + "bbox": [ + 503, + 306, + 888, + 787 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D Reconstruction from Point Clouds. There has been recent interest in surface representation using signed distance fields (SDFs) and occupation fields. Several methods have been proposed for learning SDFs [4, 26, 30, 32, 35], while occupation fields have been used in methods such as [5, 28]. However, both SDFs and occupation fields can only represent watertight models. To represent non-watertight", + "bbox": [ + 503, + 795, + 888, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "5085", + "bbox": [ + 483, + 946, + 513, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "models, some methods are proposed to learn UDF from 3D point clouds [6, 41, 42]. Our proposed method also uses UDF for non-watertight models representation, but we learn it directly from multi-view images, which is a challenging problem.", + "bbox": [ + 75, + 90, + 468, + 167 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 178, + 166, + 193 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "At the foundation of UDF-based learning approaches is the task of crafting a density function that converts unsigned distance values into volume density, ensuring that the resulting weight function is unbiased and responsive to occlusions. None of the existing UDF learning methods [23, 25] can simultaneously meet the three critical requirements, i.e., ensuring the density function is bounded, and that the weight function remains both unbiased and occlusion aware.", + "bbox": [ + 75, + 203, + 468, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We tackle these challenges by decoupling the density function and weight function across two stages. In the initial stage (Section 3.1), we utilize an easy-to-train, bell-shaped density function (which is inherently bounded) to learn a coarse UDF. While the resulting weight function is not theoretically unbiased or occlusion-aware, we can make it practically usable by choosing a proper parameter. Moving into the second stage (Section 3.2), we sidestep the density function entirely, focusing instead on refining the UDF by directly adjusting the weight function within the neural volume rendering framework. Specifically, we truncate light rays after they hit the front side of the object and obtain a weight function that is both unbiased and sensitive to occlusions, without the overhang of density function boundedness concerns. Finally, Section 3.3 presents the training details.", + "bbox": [ + 75, + 339, + 468, + 580 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Stage 1: Coarse UDF Learning via a Simple Density Function", + "text_level": 1, + "bbox": [ + 76, + 587, + 468, + 619 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We consider the scenario of a single planar plane $\\mathcal{M}$ and a single ray-plane intersection. Inspired by HF-NeuS [36], we propose an easy-to-learn density function $\\sigma_{1}$ that maps unsigned distance $f$ to density", + "bbox": [ + 75, + 626, + 468, + 686 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma_ {1} (f (t)) = \\frac {c s e ^ {- s f (t)}}{1 + e ^ {- s f (t)}}, s > 0, c > 0, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 691, + 468, + 724 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $c > 0$ is a fixed, user-specified parameter and $s > 0$ is a learnable parameter controlling the width of the bell-shaped curve. Straightforward calculation shows that the weight function $w_{1}(f(t)) = e^{-\\int_{0}^{t}\\sigma_{1}(f(u))\\mathrm{d}u}\\sigma_{1}(f(t))$ is monotonically decreasing behind the plane $\\mathcal{M}$ and the maximum value occurs at a point $t^*$ in front of $\\mathcal{M}$ with an unsigned distance value of $f(t^{*}) = \\frac{1}{s}\\ln \\frac{c}{|\\cos(\\theta)|}, (c > |\\cos (\\theta)|)$ or $f(t^{*}) = 0, (0 < c \\leq |\\cos (\\theta)|)$ , where $\\theta$ is the incident angle between the light ray and the surface normal. This means that the weight function $w_{1}$ is not unbiased. Furthermore, the line integral $\\int_0^t\\sigma_1(f(u))\\mathrm{d}u$ does", + "bbox": [ + 75, + 729, + 468, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "not approach infinity when a light ray passes through the front-most layer of the surface, indicating $w_{1}$ is only partially occlusion-aware.", + "bbox": [ + 496, + 90, + 890, + 135 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "While the density function $\\sigma_{1}$ is not perfect in theory, by selecting an appropriate $c$ , we can practically minimize bias and enhance opacity. Clearly, a smaller $c$ value decreases $f(t^{*})$ , thereby reducing bias. To gauge the effect of $c$ on opacity, we now consider the most extreme scenario where the incident light ray is perpendicular to the planar surface $\\mathcal{M}$ , and assume that the intersection point is located at $t = 1$ . In such a situation, the unsigned distance function is $f(t) = 1 - t$ for points in front of $\\mathcal{M}$ . Since $\\sigma_{1}$ is symmetrical on either side of $\\mathcal{M}$ , the surface transparency is the square of the transparency of the front side. The theoretic transparency is,", + "bbox": [ + 496, + 136, + 890, + 316 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left(e ^ {- \\int_ {0} ^ {1} \\hat {\\sigma} _ {1} (f (t)) \\mathrm {d} t}\\right) ^ {2} = \\left[ \\exp \\left(- \\int_ {0} ^ {1} \\frac {c s e ^ {- s (1 - t)}}{1 + e ^ {- s (1 - t)}} \\mathrm {d} t\\right) \\right] ^ {2} \\\\ = \\left(\\frac {1 + e ^ {- s}}{2}\\right) ^ {2 c}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 329, + 880, + 404 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Therefore, we should choose a relatively large $c$ to reduce transparency. In our implementation, we set the constant $c = 5$ based on the typical value of the learned parameter $s$ which usually ranges between 1000 and 2000. Calculations of bias and translucency show that this setting offers a good balance between occlusion-awareness and unbiasedness in the first stage training. Please refer to the supplementary material for a detailed analysis.", + "bbox": [ + 496, + 415, + 890, + 536 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Stage 2: UDF Refinement through Weight Adjustment", + "text_level": 1, + "bbox": [ + 498, + 546, + 890, + 578 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this stage, we refine the UDF learned in Stage 1 to improve the quality of geometry and appearance. Unlike Stage 1 and all other UDF-learning methods, inspired by [1], we truncate light rays based on the approximated UDF learned in Stage 1 and learn the weight function $w(t)$ directly instead of the density function $\\sigma(t)$ to refine the UDF.", + "bbox": [ + 496, + 585, + 890, + 675 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Ideally, for a single ray-plane intersection, we want a bell-shaped function $w(t)$ that attains its maximum at the points with zero distance values, and satisfies partition of unity. Therefore, we adopt the derivative of the sigmoid function as the weight function [1], defined as", + "bbox": [ + 496, + 676, + 890, + 752 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nw _ {2} (f (t)) = \\frac {s e ^ {- s f (t)}}{(1 + e ^ {- s f (t)}) ^ {2}} \\cdot | \\cos (\\theta) |. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 763, + 890, + 799 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "with $\\theta$ being the incident angle between the light ray and the surface normal.", + "bbox": [ + 496, + 809, + 890, + 838 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Intuitively speaking, learning such a weight function $w_{2}$ in Stage 2 of our UDF method is similar to learning an S-shaped density function in SDF-based approaches, such as [36]. As a result, the learning process in Stage 2 is as", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "5086", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "stable as those SDF approaches. Furthermore, it can totally avoid using the visibility indicator function, which is necessary in NeuralUDF [25].", + "bbox": [ + 75, + 90, + 468, + 136 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Calculation shows that the weight $w_{2}$ attains its maximum at zero distance values, therefore it is unbiased. However, if we naively predict the weight function directly, it will not be occlusion-aware, so we introduce the ray truncation. To make $w_{2}$ occlusion-aware, we can truncate the light rays after they pass through the frontmost layer of the surface, thereby preventing rendering the interior of the object. Note that we do not expect the truncation to be exactly on the frontmost layer of the surface. In fact, as long as it occurs between the frontmost layer and the second layer, we consider the truncation valid. This means that the approximate UDF learned in the first stage, which can capture the main topological features (such as boundaries) and provide a fairly good representation of the target object, is sufficient for us to determine where to cut off the light rays.", + "bbox": [ + 75, + 138, + 468, + 364 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In our implementation, we adopt a simple strategy to determine the truncation point for each light ray. Specifically, the truncation point of ray $\\mathbf{r}$ is the first sample point along $\\mathbf{r}$ such that", + "bbox": [ + 75, + 366, + 468, + 425 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The unsigned distance value at the point is a local maxima. To avoid distance vibration interference, it should be the maximum in a window centered at the point. And", + "- The accumulated weight up to this point is greater than $\\delta_{\\text{thres}}$ ." + ], + "bbox": [ + 76, + 428, + 467, + 503 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The accumulated weight threshold $\\delta_{\\text{thres}}$ is intuitively set to 0.5. This choice is based on the assumption that if the Stage 1 training is performed well enough, the accumulated weights at each sample point along the ray would be either 0 (for not reaching a surface) or 1 (for having intersected with a surface). Hence, we intuitively select 0.5 for $\\delta_{\\text{thres}}$ because it is the midpoint between 0 and 1. With the cutoff mechanism, only the first ray-surface intersection contributes to the color of the ray, effectively achieving occlusion-awareness. Given these properties, we conclude that,", + "bbox": [ + 75, + 505, + 468, + 670 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Theorem 1 The weight $w_{2}$ with light cutting off is unbiased and occlusion-aware.", + "bbox": [ + 75, + 688, + 468, + 718 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 2 is an intuitive illustration of our Stage 2 weight learning and truncation strategy. The UDF maxima point $A$ in front of the intersection surface would not affect the cutting point selection as the accumulated weight is below $\\delta_{\\text{thres}}$ (0.5). The local maxima $B$ due to UDF oscillation also would not affect it since it's not the maximum in a large enough neighborhood. The light is cut at maxima point $C$ , and thus the weight of point $D$ is zero without contributions to the rendering. As illustrated in Figure 2, the cutting process is robust against UDF oscillation, open boundaries, and local maxima in front of the intersection surface.", + "bbox": [ + 75, + 734, + 468, + 898 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/75a6bd4f9512b87da68e33f3321462eac43b0aa68cbe0f9128dac677398021a8.jpg", + "image_caption": [ + "Figure 2. An intuitive illustration of our ray cutting algorithm, best viewed in color and magnified. A ray shoots from left to right, approaching the boundary of the first surface, and going through another two surfaces (gray boxes). The violet solid line represents the UDF values along the ray; the orange dashed line represents the corresponding color weight." + ], + "image_footnote": [], + "bbox": [ + 517, + 98, + 877, + 207 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Training", + "text_level": 1, + "bbox": [ + 500, + 325, + 602, + 340 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Differentiable UDFs. NeuS uses an MLP network to learn the signed distance function $f$ , which is a differentiable function. In contrast, UDF is not differentiable at the zero level set, making the network difficult to learn the values and gradients of the UDF close to the zero level set.", + "bbox": [ + 496, + 348, + 890, + 422 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Another crucial requirement is to ensure non-negative values for the computed distances, which seems like a trivial task as one may simply apply absolute value or normalization such as ReLU [11] to the MLP output. However, applying the absolute value to the distance is not viable due to its non-differentiability at zero. Similarly, normalizing the output value using ReLU is not feasible as it is also non-differentiable at zero and its gradient vanishes for negative inputs. This can be particularly problematic for learning UDFs, since when the MLP returns a negative distance value, the ReLU gradient vanishes, hindering the update of the distance to a positive value in the subsequent iterations.", + "bbox": [ + 496, + 424, + 892, + 604 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We add a softplus [9] function after the output layer of the MLP [23]. The softplus function is a smooth and differentiable approximation of the ReLU function, which is defined as $\\mathrm{softplus}(x) = \\frac{1}{\\beta}\\ln (1 + e^{\\beta x})$ . Softplus has the same shape as ReLU, but it is continuous and differentiable at every point and its gradients do not vanish anywhere. Using the softplus function allows us to ensure that the output of the MLP is non-negative and differentiable, making it suitable for learning the UDF. Similar to NeUDF [23], we set $\\beta = 100$ in our experiments.", + "bbox": [ + 496, + 604, + 890, + 755 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Loss functions. Following NeuralUDF [25], we adopt an iso-surface regularizer to penalize the UDF values of the non-surface points from being zero, therefore encouraging smooth and clean UDFs. The regularization loss is defined as [25]", + "bbox": [ + 496, + 756, + 890, + 830 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {r e g} = \\frac {1}{M N} \\sum_ {i, k} \\exp (- \\tau \\cdot f (t _ {i, k})),\n$$\n", + "text_format": "latex", + "bbox": [ + 571, + 839, + 818, + 875 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\tau$ is a constant scalar that scales the learned UDF", + "bbox": [ + 500, + 885, + 890, + 898 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "5087", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "values, $M$ is the total number of sampled rays per training iteration, and $N$ is the number of sampled points on a single ray. $\\tau$ is set to 5.0 in the first stage and 50.0 in the second stage.", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The value of $s$ , which is learnable in our method, significantly affects the quality of the reconstruction. When $s$ is small, it introduces a larger bias and leads to a more blurred output. We observe that $s$ typically converges to a relatively large value between 1000 and 2000, leading to visually pleasing results. However, in rare cases when $s$ stops increasing during training, we apply a penalty to force it to increase. The penalty is defined as follows", + "bbox": [ + 75, + 151, + 470, + 272 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s} = \\frac {1}{M} \\sum_ {i, k} \\frac {1}{s _ {i , k}},\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 280, + 339, + 316 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $M$ is the number of rays during a training epoch. This term $\\mathcal{L}_s$ aggregates the reciprocals of all $s$ values used for the point $t_{i,k}$ on ray $r_i$ . Intuitively speaking, it encourages a larger $s$ during the early stage of training. In our implementation, we make this term optional since $s$ generally increases with a decreasing rate during training, and the penalty term is only necessary in rare cases when $s$ stops at a relatively low value.", + "bbox": [ + 75, + 327, + 468, + 446 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As in other SDF- and UDF-based methods [25, 34, 36], we adopt color loss and Eikonal loss in our approach. Specifically, the color loss $\\mathcal{L}_{color}$ is the $L_{1}$ loss between the predicted color and the ground truth color of a single pixel as used in [34]. The Eikonal loss $\\mathcal{L}_{eik}$ is used to regularize the learned distance field to have a unit gradient [14]. Users may also choose to adopt object masks for supervision as introduced in other SDF- and UDF-based methods [25, 34]. Putting it all together, we define the combined loss function as a weighted sum,", + "bbox": [ + 75, + 448, + 470, + 599 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {\\text {c o l o r}} + \\lambda_ {1} \\mathcal {L} _ {\\text {e i k}} + \\lambda_ {2} \\mathcal {L} _ {\\text {r e g}} + \\lambda_ {3} \\mathcal {L} _ {s} (+ \\lambda_ {m} \\mathcal {L} _ {\\text {m a s k}}),\n$$\n", + "text_format": "latex", + "bbox": [ + 86, + 609, + 457, + 626 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda_1, \\lambda_2, \\lambda_3$ and the optional $\\lambda_m$ are hyperparameters that control the weight of each loss term.", + "bbox": [ + 75, + 635, + 468, + 666 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 679, + 209, + 695 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. To evaluate our method, we use three datasets: DeepFashion3D [43], DTU [19] and BlendedMVS [39]. The DeepFashion3D dataset consists of clothing models, which are open models with boundaries. As only 3D points are available, we render 72 images of resolution $1024 \\times 1024$ with a white background from different viewpoints for each model. In addition to DeepFashion3D images rendered by us most of which are texture-less, we also take the image data from NeuralUDF [25] most of which are texture-rich into our experiments. We call them DF3D#Ours and DF3D#NeuralUDF, respectively. The DTU dataset consists of models captured in a studio, all of which are watertight. We use this dataset to validate that our method also works", + "bbox": [ + 75, + 704, + 470, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "well for watertight models. These datasets have been widely used in previous works such as [34, 36, 40]. In our experiments, open models such as in DeepFashion3D are trained without mask supervision; DTU is trained with mask supervision.", + "bbox": [ + 496, + 90, + 890, + 165 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines. To validate the effectiveness of our method, we compare it with state-of-the-art UDF learning methods: NeuralUDF [25], NeUDF [23] and NeAT [27]; and SDF learning methods: VolSDF [40] and NeuS [34].", + "bbox": [ + 496, + 167, + 893, + 228 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Comparisons on Open Models", + "text_level": 1, + "bbox": [ + 498, + 238, + 769, + 255 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/a88aa4fc13a72c8f95751da90c1b3d14e94a9ccaae9c16f0138b5db0a7ffd86c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Method#1#2#3#4#5#6#7#8#9Mean
NeuS6.6913.5010.3215.018.9912.9212.949.939.4911.09
VolSDF6.369.4411.8716.0310.7814.9115.0611.348.9611.64
NeAT10.5413.897.3013.1213.1812.448.2210.3011.3311.15
NeuralUDF6.0711.587.6810.9611.169.766.986.136.418.53
NeUDF4.398.294.9419.567.528.183.813.815.767.36
Ours4.555.774.277.436.594.772.883.215.735.02
MethodLS-C0SS-D0LS-D0NS-D1LS-C1Skirt1SS-C0Mean
NeuS3.184.825.712.213.602.445.133.87
VolSDF5.924.795.964.368.737.748.846.62
NeAT3.064.335.923.528.843.914.304.84
NeuralUDF1.922.054.111.502.472.162.152.34
NeUDF1.952.93N.A.1.482.662.741.772.26
Ours1.921.972.461.472.141.841.911.96
", + "bbox": [ + 501, + 275, + 906, + 452 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1. Chamfer distances $(\\times 10^{-3})$ on DF3D#Ours (top) and DF3D#NeuralUDF (bottom). NeAT requires mask supervision and others do not need.", + "bbox": [ + 496, + 460, + 893, + 503 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We evaluate our method and compare it with baselines using the garments from DeepFashion3D [43], where the models have multiple open boundaries. VolSDF and NeuS always close the boundaries since they learn SDFs.", + "bbox": [ + 496, + 521, + 890, + 580 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "NeuralUDF, NeUDF and NeAT are designed to learn non-watertight models. NeAT learns SDFs for open models, and requires mask supervision to produce reasonable results, but other methods do not require mask supervision for DeepFashion3D. The released codebase of NeuralUDF indicates that it also has a two-stage training process. We evaluate the results of NeuralUDF at the end of both stages, and present whichever is better.", + "bbox": [ + 496, + 582, + 890, + 702 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In contrast, NeuralUDF, NeUDF and our method learn UDFs, which can generate open models. Table 1 shows the Chamfer distances of the results on DeepFashion3D. Some of the Chamfer distances of the compared methods are large because the open holes are closed or the model is over-smoothed, resulting in significant errors.", + "bbox": [ + 496, + 703, + 890, + 794 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As demonstrated in Figure 3, we test various types of garments, some of which have rich textures, while others are nearly a single color. Learning UDFs for textureless models is more challenging since various regions of a model are ambiguous without clear color differences. However, our 2S-UDF generates satisfactory results even without masks. Though with mask supervision, the results of", + "bbox": [ + 496, + 795, + 893, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5088", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6e75436779e812106317753dd55b5af603b8f9b6bdd3d73e9de62d92abfb5d60.jpg", + "image_caption": [ + "Figure 3. Visual comparisons on selected models of the DeepFashion3D [43] dataset. The surfaces produced by NeuS and VolSDF are closed watertight models, thereby post-processing is required to remove the unnecessary parts. NeAT can produce open models by learning an SDF and predicting which surfaces in the extracted meshes should be removed, but it needs mask for supervision. NeuralUDF can generate open surfaces, but struggles with textureless inputs, leading to double-layered regions and large reconstruction errors. NeUDF generally performs well, but its training is unstable and may stumble on less distinguished, darker models like LS-D0. In contrast, our 2S-UDF consistently delivers effective reconstructions of non-watertight models. See the supplementary material for additional results." + ], + "image_footnote": [], + "bbox": [ + 99, + 85, + 880, + 324 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/04d8dfc5034a66bbcc23c3a95227ad5925a06feb7ff94eb62433c342d2e373e8.jpg", + "image_caption": [ + "Figure 4. Visualization of the learned UDFs on cross sections. Compared with the ground truth, our method can learn a UDFs that most closely resemble the ground truth, among our method, NeuralUDF, and NeUDF. NeAT is omitted in this visualization, because it learns SDFs in lieu of UDFs. Note that for LS-D0, NeUDF completely collapses without a reasonable UDF learned." + ], + "image_footnote": [], + "bbox": [ + 76, + 445, + 472, + 691 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "NeAT [27] are over-smoothed, missing details, resulting in large Chamfer distance errors. NeuralUDF [25] is unable to properly reconstruct textureless models on most models, possibly due to their complex density function which is difficult to converge. Some of the NeUDF [23] models", + "bbox": [ + 75, + 824, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "become watertight. To analyze the reasons, we illustrate these UDFs cross sections in Figure 4. To compute the ground truth UDFs, we sample 30,000 points from every input point model and compute the distances to the nearest sample point for every point in a 3D grid of resolution $512 \\times 512 \\times 512$ . All other UDFs are extracted by querying the distance neural network in a 3D grid of the same resolution. Our learned UDFs resemble the ground truth with little difference. While, the UDFs of NeuralUDF deviate from the ground truth significantly explaining its difficulty to converge. The UDFs of NeUDF are better, but the distances approach zero around open holes. As a result, it is challenging and tricky to generate non-watertight models and some of them are even closed. NeAT learns SDF, so we do not show their distance fields.", + "bbox": [ + 496, + 449, + 893, + 676 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As illustrated in Figure 5, perhaps due to the absolute of an MLP for UDF representation, NeuralUDF possibly generates two layers of zero level-sets on both sides of the surface resulting in double-layered regions after Stage 1 learning. However, in its Stage 2 refinement, the surface is crushed into pieces and the Chamfer distance errors surge suddenly.", + "bbox": [ + 496, + 681, + 893, + 789 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Figure 6, we conduct additional experiments on some open model dataset provided by NeUDF [23]. For the rack model, the thin structures reconstructed by NeuralUDF [25] and NeUDF [23] seem eroded, but ours don't. The thin structures reconstructed by NeAT [27] is the closest to the reference image, but the surface is dented inward with visible artifacts due to imperfect SDF validity learning.", + "bbox": [ + 496, + 795, + 893, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "5089", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e250f6fc48e46af907d9111a4bdbb8adc50d42105537c6b4c5b5759c2c597d83.jpg", + "image_caption": [ + "Figure 5. Plots of the Chamfer distance throughout the training process. Our method consistently reduces CD across both stages. In contrast, NeuralUDF, which also adopts a two-stage learning strategy, exhibits instability and yields a fragmented output following the second stage. The first-stage output of NeuralUDF, however, contains double-layered regions as marked above. In this figure, both methods start their stage 2 training at 250k iterations." + ], + "image_footnote": [], + "bbox": [ + 81, + 133, + 243, + 220 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b08c5db4015974cecf2dd55bf7ef7ac0fa66b41467f17aa49dc19ac8215b6cb7.jpg", + "image_caption": [ + "Stage 1 NeuralUDF" + ], + "image_footnote": [], + "bbox": [ + 250, + 89, + 473, + 143 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5722455419a86704be598c7e0c84a72095e1ecc7e2c9d1c1b6116450a8c1e8bc.jpg", + "image_caption": [ + "Stage 1 2S-UDF" + ], + "image_footnote": [], + "bbox": [ + 250, + 166, + 473, + 220 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/78fc93ab36c6a9e2d739a90287bbe009216f95a90a4d62ec4efcea94810eac1f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Method3755656997105106114118122Mean
NeuralUDF1.180.440.660.670.940.950.570.370.560.550.69
NeAT1.180.470.820.841.090.750.760.380.560.550.74
NeUDF0.900.650.730.971.070.630.940.590.720.620.78
Ours0.890.550.680.881.150.700.740.410.610.510.71
", + "bbox": [ + 84, + 364, + 457, + 431 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Chamfer distances on DTU dataset.", + "bbox": [ + 140, + 441, + 403, + 454 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The plant model does not have an object mask, making NeAT [27] impractical for training. NeuralUDF [25] completely fails to reconstruct a reasonable surface. Between our method and NeUDF [23] which can reconstruct a sensible model, the flower pot region marked in red is missing in NeUDF but not in ours. These show our method's ability to reconstruct non-watertight models more robustly compared to other methods.", + "bbox": [ + 75, + 479, + 468, + 599 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/43c4ebd9f38166d8e2c76d98ad88d6c9be7b273fb8040d09c8e0c49ee1277594.jpg", + "image_caption": [ + "Figure 6. Qualitative comparisons with NeAT [27], NeuralUDF [25] and NeUDF [23] on some example data released by NeUDF [23]. Note that NeAT cannot reconstruct \"plant\" dataset because the ground truth mask for \"plant\" is unavailable." + ], + "image_footnote": [], + "bbox": [ + 81, + 609, + 464, + 720 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Comparisons on Watertight Models", + "text_level": 1, + "bbox": [ + 76, + 816, + 387, + 832 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Other methods can also be used as the first stage of our 2S- UDF. We use NeUDF for the first stage training on the DTU dataset [19]. As detailed in Table 2, we compare the Chamfer distances of the reconstruction results with NeuralUDF,", + "bbox": [ + 75, + 839, + 468, + 898 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/625df05cda1eda0a5e170c400280861942e31769d997e9d5fe339a30328e2fd3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 88, + 887, + 157 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f475c8258dde381aa4725b94c3d0d8d675c4cf3f4e724f95df61deb96e55b5b8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 160, + 879, + 219 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0c5a010e174d1de796f8b345f9f3dbc0f36961289e5f10773f5eb798b2dda984.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 220, + 877, + 273 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5f6ce4e47ba03086b925e6e9a32a8198afad462ab494e647eede489bfa366ce8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 277, + 879, + 334 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e1bbcb01c969473cdd14228cef52d5c8678e3e921f30ed07c5c0b5a1d379b533.jpg", + "image_caption": [ + "Figure 7. Qualitative comparisons with NeAT, NeuralUDF and NeUDF on the DTU [19] dataset and close-up comparisons against NeUDF. Our method can reconstruct surfaces closer to the ground truth point clouds in various places such as the marked region, generally improving the reconstruction accuracy of NeUDF by around $10\\%$ , on a par with NeuralUDF and NeAT at the bottom two rows." + ], + "image_footnote": [], + "bbox": [ + 504, + 335, + 877, + 392 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "NeAT and NeUDF without our second-stage training. SDFs generally excel at learning watertight models, and it is worth pointing out that NeuralUDF takes the absolute value of the output of MLP as the UDF value of a given point. Therefore for closed models, they can easily learn an SDF and take its absolute value to produce a UDF. NeAT, on the other hand, explicitly learns an SDF. NeUDF and our method truly learn UDFs. While UDF learning is much more complicated than SDF learning because the UDF gradient nearby 0 is blurry and the gradient is not available at 0, our method still improves the reconstruction quality of NeUDF by around $10\\%$ as shown in Figure 7. We further provide a close-up view of specific parts of the models for detailed comparisons in Figure 7. These local callouts exhibit the ground truth points located on both sides of our surfaces, whereas most of the points are only on one side of the surfaces of NeUDF. These illustrate our reconstructed surfaces are closer to the ground truth points and thus improving the resulting quality over NeUDF, on a par with NeuralUDF and NeAT.", + "bbox": [ + 496, + 517, + 890, + 805 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Ablation Studies", + "text_level": 1, + "bbox": [ + 500, + 815, + 663, + 830 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we present main ablation studies. We refer interested readers to the supplementary material for additional ablation studies.", + "bbox": [ + 498, + 839, + 890, + 883 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effect of the two-stage training. We conduct an ablation", + "bbox": [ + 519, + 885, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "5090", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/fccdf2f8004311c88d16e956b8ba4ca5b13a0550885a50583c39c822ce1b3b15.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Method#1#7#8LS-D0
S1 & S24.552.883.212.46
S17.222.463.386.04
S25.754.005.963.65
MethodNS-D1LS-C1DTU 114DTU 122
S1 & S21.472.140.410.51
S11.466.230.590.62
S21.642.980.630.60
", + "bbox": [ + 78, + 88, + 470, + 215 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3. Chamfer distances of models learned by both Stage 1 and 2 (S1 & S2), only Stage 1 (S1) and only Stage 2 (S2) on selected datasets. Models learned by two stages yield similar Chamfer distances, but when trained with only Stage 1 or Stage 2, the Chamfer distances generally become significantly higher.", + "bbox": [ + 75, + 224, + 470, + 295 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "study on the effect of the two-stage learning. We compare the Chamfer distances among both two stages, only Stage 1 and only Stage 2 training, shown in Table 3. Our results show that two-stage training improves the Chamfer distance (lower is better) compared to training with only Stage 1 or 2, under most circumstances.", + "bbox": [ + 75, + 321, + 468, + 411 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "It should be noted that training by the second stage from scratch is also capable of generating a generally reasonable result. However, the Chamfer distances, as shown in Table 3, indicate that its learning ability is limited. Therefore, the second refinement learning stage should cooperate with the first coarse learning stage to generate the best results.", + "bbox": [ + 75, + 412, + 468, + 503 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Choice of accumulated weight threshold $\\delta_{thres}$ . In Stage 2, being a ray truncate point requires the accumulated weight up until that point to be greater than $\\delta_{thres}$ , where we intuitively select $\\delta_{thres} = 0.5$ . Figure 8 shows the reconstruction results for other choices of $\\delta_{thres}$ , namely 0.3 and 0.7, respectively. We observe that all threshold choices successfully reconstruct the model. Setting the threshold $\\delta_{thres}$ up to 0.7 produces visually similar results. Setting the threshold $\\delta_{thres}$ down to 0.3 also works fine generally despite that it may introduce more holes to the reconstructed meshes. We deduce that setting a lower threshold increases the possibility that a ray may be truncated prematurely, leading to less desirable results. Nevertheless, we still have a considerable range of $\\delta_{thres}$ from 0.3 to 0.7 without major result regression, indicating that our Stage 2 training exhibits robustness against $\\delta_{thres}$ .", + "bbox": [ + 75, + 503, + 468, + 746 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Limitations", + "text_level": 1, + "bbox": [ + 76, + 756, + 204, + 768 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Since the light is cut off after going through a layer of surface, our method relinquishes the ability to model planes with transparency. Occasionally, due to learning uncertainty, the Chamfer distance may increase slightly in the second stage, but the difference is quite small without visual impact. Overall, the two-stage learning improves the quality significantly. For watertight models, SDF learning is more suitable than UDF learning, since UDF learning is", + "bbox": [ + 75, + 779, + 468, + 902 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/769c7f1b165581d56d8ee2b70ab1418a2cfb555702c6ff80bad0ce9640a5c48a.jpg", + "image_caption": [ + "Figure 8. Qualitative comparisons on different choices of accumulated weight $\\delta_{\\text{thres}}$ . Setting a higher threshold works well few little visual differences; Setting a lower threshold generally works fine, but may introduce more holes in reconstructed meshes." + ], + "image_footnote": [], + "bbox": [ + 552, + 89, + 843, + 253 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "more complicated than SDF learning. We still advise using SDF learning, e.g., NeuS [34], HF-NeuS [36] or PET-NeuS [37], for watertight model reconstruction. Also, the mesh extraction of MeshUDF [15] tends to generate holes and \"staircase\" artifacts affecting the mesh reconstruction quality. Adopting a more robust extraction method, e.g., DoubleCoverUDF [18], could alleviate the problem, but we use MeshUDF here for all methods for a fair comparison.", + "bbox": [ + 496, + 351, + 890, + 472 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 492, + 625, + 508 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Overall, 2S-UDF offers a promising approach to the problem of reconstructing both open and watertight models from multi-view images. Its advantages over existing methods lie in the use of a simple and more accurate density function, and a smooth differentiable UDF representation, so that the learned UDF approximates the ground truth as much as possible. A two-stage learning strategy further eliminates bias and improves UDF accuracy. Results from our experiments on the DeepFashion3D, DTU and BlendedMVS datasets demonstrate the effectiveness of our method, particularly in learning smooth and stably open UDFs revealing the robustness of 2S-UDF. Moreover, our method does not rely on object masks for open model reconstruction, making it more practical in real-world applications.", + "bbox": [ + 496, + 520, + 892, + 731 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 500, + 752, + 658, + 768 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This project was supported in part by the National Natural Science Foundation of China under Grants (61872347, 62072446), in part by the National Key R&D Program of China under Grant 2023YFB3002901, in part by the Basic Research Project of ISCAS under Grant ISCAS-JCMS-202303 and in part by the Ministry of Education, Singapore, under its Academic Research Fund Grants (MOET2EP20220-0005, RG20/20 & RT19/22).", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "5091", + "bbox": [ + 482, + 944, + 513, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Dejan Azinović, Ricardo Martin-Brualla, Dan B Goldman, Matthias Nießner, and Justus Thies. Neural RGB-D Surface Reconstruction. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6280-6291, 2022. 3", + "[2] Fausto Bernardini, Joshua Mittleman, Holly Rushmeier, Cláudio Silva, and Gabriel Taubin. The ball-pivoting algorithm for surface reconstruction. IEEE Trans. Vis. Comput. Graph., 5(4):349-359, 1999. 2", + "[3] A. Broadhurst, T.W. Drummond, and R. Cipolla. A probabilistic framework for space carving. In Int. Conf. Comput. Vis., pages 388-393 vol.1, 2001. 2", + "[4] Rohan Chabra, Jan E. Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep Local Shapes: Learning Local SDF Priors for Detailed 3D Reconstruction. In Eur. Conf. Comput. Vis., pages 608-625, Cham, 2020. Springer International Publishing. 2", + "[5] Julian Chibane, Thiemo Alldieck, and Gerard Pons-Moll. Implicit Functions in Feature Space for 3D Shape Reconstruction and Completion. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6968-6979, 2020. 2", + "[6] Julian Chibane, Mohamad Aymen mir, and Gerard Pons-Moll. Neural Unsigned Distance Fields for Implicit Function Learning. In Adv. Neural Inform. Process. Syst., pages 21638-21652. Curran Associates, Inc., 2020. 3", + "[7] François Darmon, Bénédicte Bascle, Jean-Clement Devaux, Pascal Monasse, and Mathieu Aubry. Improving neural implicit surfaces geometry with patch warping. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6250-6259, 2022. 1, 2", + "[8] J. De Bonet and P. Viola. Roxels: responsibility weighted 3D volume reconstruction. In Int. Conf. Comput. Vis., pages 418-425 vol.1, 1999. 2", + "[9] Charles Dugas, Yoshua Bengio, François Bélisle, Claude Nadeau, and René Garcia. Incorporating Second-Order Functional Knowledge for Better Option Pricing. In Adv. Neural Inform. Process. Syst. MIT Press, 2000. 4", + "[10] Qiancheng Fu, Qingshan Xu, Yew Soon Ong, and Wenbing Tao. Geo-Neus: Geometry-Consistent Neural Implicit Surfaces Learning for Multi-view Reconstruction. In Adv. Neural Inform. Process. Syst., pages 3403–3416. Curran Associates, Inc., 2022. 2", + "[11] Kunihiko Fukushima. Cognitron: a self-organizing multilayered neural network. Biological Cybernetics, 20(3-4):121-136, 1975. 4", + "[12] Yasutaka Furukawa and Carlos Hernández. Multi-View Stereo: A Tutorial. Found. Trends. Comput. Graph. Vis., 9 (1-2):1-148, 2015. 2", + "[13] Silvano Galliani, Katrin Lasinger, and Konrad Schindler. Massively Parallel Multiview Stereopsis by Surface Normal Diffusion. In Int. Conf. Comput. Vis., pages 873-881, 2015. 2", + "[14] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit Geometric Regularization for Learning Shapes. In Proceedings of the 37th International Conference on Machine Learning, pages 3789-3799. PMLR, 2020. 5" + ], + "bbox": [ + 78, + 114, + 468, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Benoit Guillard, Federico Stella, and Pascal Fua. MeshUDF: Fast and Differentiable Meshing of Unsigned Distance Field Networks. In Eur. Conf. Comput. Vis., pages 576-592, Cham, 2022. Springer Nature Switzerland. 8", + "[16] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, 2 edition, 2004. 2", + "[17] Fei Hou, Chiyu Wang, Wencheng Wang, Hong Qin, Chen Qian, and Ying He. Iterative poisson surface reconstruction (iPSR) for unoriented points. ACM Trans. Graph., 41(4), 2022. 2", + "[18] Fei Hou, Xuhui Chen, Wencheng Wang, Hong Qin, and Ying He. Robust Zero Level-Set Extraction from Unsigned Distance Fields Based on Double Covering. ACM Trans. Graph., 42(6), 2023. 8", + "[19] Rasmus Jensen, Anders Dahl, George Vogiatzis, Engil Tola, and Henrik Aanæs. Large Scale Multi-view Stereopsis Evaluation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 406-413, 2014. 2, 5, 7", + "[20] Mengqi Ji, Jinzhi Zhang, Qionghai Dai, and Lu Fang. SurfaceNet+: An End-to-end 3D Neural Network for Very Sparse Multi-View Stereopsis. IEEE Trans. Pattern Anal. Mach. Intell., 43(11):4078-4093, 2021. 2", + "[21] Abhishek Kar, Christian Hane, and Jitendra Malik. Learning a Multi-View Stereo Machine. In Adv. Neural Inform. Process. Syst. Curran Associates, Inc., 2017. 2", + "[22] Michael Kazhdan and Hugues Hoppe. Screenedoisson surface reconstruction. ACM Trans. Graph., 32(3), 2013. 2", + "[23] Yu-Tao Liu, Li Wang, Jie Yang, Weikai Chen, Xiaoxu Meng, Bo Yang, and Lin Gao. NeUDF: Leaning Neural Unsigned Distance Fields with Volume Rendering. In IEEE Conf. Comput. Vis. Pattern Recog., pages 237-247, 2023. 1, 2, 3, 4, 5, 6, 7", + "[24] Xiaoxiao Long, Cheng Lin, Peng Wang, Taku Komura, and Wenping Wang. SparseNeuS: Fast Generalizable Neural Surface Reconstruction from Sparse Views. In Eur. Conf. Comput. Vis., pages 210-227, Cham, 2022. Springer Nature Switzerland. 2", + "[25] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Yuan Liu, Peng Wang, Christian Theobalt, Taku Komura, and Wenping Wang. NeuralUDF: Learning Unsigned Distance Fields for Multi-View Reconstruction of Surfaces with Arbitrary Topologies. In IEEE Conf. Comput. Vis. Pattern Recog., pages 20834–20843, 2023. 1, 2, 3, 4, 5, 6, 7", + "[26] Baorui Ma, Zhizhong Han, Yu-Shen Liu, and Matthias Zwicker. Neural-Pull: Learning Signed Distance Function from Point clouds by Learning to Pull Space onto Surface. In Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, pages 7246-7257. PMLR, 2021. 2", + "[27] Xiaoxu Meng, Weikai Chen, and Bo Yang. NeAT: Learning Neural Implicit Surfaces with Arbitrary Topologies from Multi-View Images. In IEEE Conf. Comput. Vis. Pattern Recog., pages 248–258, 2023. 2, 5, 6, 7", + "[28] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy Networks: Learning 3D Reconstruction in Function Space. In" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "5092", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "IEEE Conf. Comput. Vis. Pattern Recog., pages 4455-4465, 2019. 2", + "[29] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In Eur. Conf. Comput. Vis., pages 405-421, Cham, 2020. Springer International Publishing. 1, 2", + "[30] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning Continuous Signed Distance Functions for Shape Representation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 165-174, 2019. 2", + "[31] Johannes L. Schonberger, Enliang Zheng, Jan-Michael Frahm, and Marc Pollefeys. Pixelwise View Selection for Unstructured Multi-View Stereo. In Eur. Conf. Comput. Vis., pages 501–518, Cham, 2016. Springer International Publishing. 2", + "[32] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit Neural Representations with Periodic Activation Functions. In Adv. Neural Inform. Process. Syst., pages 7462-7473. Curran Associates, Inc., 2020. 2", + "[33] Jiaming Sun, Yiming Xie, Linghao Chen, Xiaowei Zhou, and Hujun Bao. NeuralRecon: Real-Time Coherent 3D Reconstruction from Monocular Video. In IEEE Conf. Comput. Vis. Pattern Recog., pages 15593-15602, 2021. 2", + "[34] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. NeuS: Learning Neural Implicit Surfaces by Volume Rendering for Multi-view Reconstruction. In Adv. Neural Inform. Process. Syst., pages 27171-27183. Curran Associates, Inc., 2021. 1, 2, 5, 8", + "[35] Yifan Wang, Lukas Rahmann, and Olga Sorkine-Hornung. Geometry-Consistent Neural Shape Representation with Implicit Displacement Fields. In Int. Conf. Learn. Represent. OpenReview.net, 2022. 2", + "[36] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. HF-NeuS: Improved Surface Reconstruction Using High-Frequency Details. In Adv. Neural Inform. Process. Syst., pages 1966–1978. Curran Associates, Inc., 2022. 1, 2, 3, 5, 8", + "[37] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. PETNeuS: Positional Encoding Tri-Planes for Neural Surfaces. In IEEE Conf. Comput. Vis. Pattern Recog., pages 12598–12607, 2023. 2, 8", + "[38] Yao Yao, Zixin Luo, Shiwei Li, Tianwei Shen, Tian Fang, and Long Quan. Recurrent MVSNet for High-Resolution Multi-View Stereo Depth Inference. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5520–5529, 2019. 2", + "[39] Yao Yao, Zixin Luo, Shiwei Li, Jingyang Zhang, Yufan Ren, Lei Zhou, Tian Fang, and Long Quan. BlendedMVS: A Large-Scale Dataset for Generalized Multi-View Stereo Networks. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1787–1796, 2020. 5", + "[40] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume Rendering of Neural Implicit Surfaces. In Adv. Neural Inform. Process. Syst., pages 4805-4815. Curran Associates, Inc., 2021. 1, 2, 5" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[41] Fang Zhao, Wenhao Wang, Shengcai Liao, and Ling Shao. Learning Anchored Unsigned Distance Functions with Gradient Direction Alignment for Single-view Garment Reconstruction. In Int. Conf. Comput. Vis., pages 12654-12663, 2021. 3", + "[42] Junsheng Zhou, Baorui Ma, Yu-Shen Liu, Yi Fang, and Zhizhong Han. Learning Consistency-Aware Unsigned Distance Functions Progressively from Raw Point Clouds. In Adv. Neural Inform. Process. Syst., pages 16481-16494. Curran Associates, Inc., 2022. 3", + "[43] Heming Zhu, Yu Cao, Hang Jin, Weikai Chen, Dong Du, Zhangye Wang, Shuguang Cui, and Xiaoguang Han. Deep Fashion3D: A Dataset and Benchmark for 3D Garment Reconstruction from Single Images. In Eur. Conf. Comput. Vis., pages 512-530, Cham, 2020. Springer International Publishing. 2, 5, 6" + ], + "bbox": [ + 501, + 92, + 890, + 316 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "5093", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_model.json b/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c5602a2bd6e10098df2796728a09d91a1bbb29a9 --- /dev/null +++ b/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_model.json @@ -0,0 +1,2167 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.002, + 0.808, + 0.02 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation." + }, + { + "type": "header", + "bbox": [ + 0.325, + 0.017, + 0.723, + 0.031 + ], + "angle": 0, + "content": "Except for this watermark, it is identical to the accepted version;" + }, + { + "type": "header", + "bbox": [ + 0.294, + 0.032, + 0.754, + 0.046 + ], + "angle": 0, + "content": "the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.13, + 0.882, + 0.177 + ], + "angle": 0, + "content": "2S-UDF: A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.203, + 0.815, + 0.222 + ], + "angle": 0, + "content": "Junkai Deng1,2 Fei Hou1,2* Xuhui Chen1,2 Wencheng Wang1,2 Ying He3" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.222, + 0.868, + 0.24 + ], + "angle": 0, + "content": "\\(^{1}\\)State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences" + }, + { + "type": "text", + "bbox": [ + 0.307, + 0.239, + 0.663, + 0.257 + ], + "angle": 0, + "content": "\\(^{2}\\)University of Chinese Academy of Sciences" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.257, + 0.81, + 0.275 + ], + "angle": 0, + "content": "\\(^{3}\\)School of Computer Science and Engineering, Nanyang Technological University" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.278, + 0.72, + 0.293 + ], + "angle": 0, + "content": "{dengjk,houfei,chenxh,whn}@ios.ac.cn yhe@ntu.edu.sg" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.327, + 0.313, + 0.343 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.359, + 0.474, + 0.798 + ], + "angle": 0, + "content": "Recently, building on the foundation of neural radiance field, various techniques have emerged to learn unsigned distance fields (UDF) to reconstruct 3D non-watertight models from multi-view images. Yet, a central challenge in UDF-based volume rendering is formulating a proper way to convert unsigned distance values into volume density, ensuring that the resulting weight function remains unbiased and sensitive to occlusions. Falling short on these requirements often results in incorrect topology or large reconstruction errors in resulting models. This paper addresses this challenge by presenting a novel two-stage algorithm, 2S-UDF, for learning a high-quality UDF from multi-view images. Initially, the method applies an easily trainable density function that, while slightly biased and transparent, aids in coarse reconstruction. The subsequent stage then refines the geometry and appearance of the object to achieve a high-quality reconstruction by directly adjusting the weight function used in volume rendering to ensure that it is unbiased and occlusion-aware. Decoupling density and weight in two stages makes our training stable and robust, distinguishing our technique from existing UDF learning approaches. Evaluations on the DeepFashion3D, DTU, and BlendedMVS datasets validate the robustness and effectiveness of our proposed approach. In both quantitative metrics and visual quality, the results indicate our superior performance over other UDF learning techniques in reconstructing 3D non-watertight models from multi-view images. Our code is available at https://bitbucket.org/jkdeng/2sudf/." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.824, + 0.21, + 0.839 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.849, + 0.47, + 0.88 + ], + "angle": 0, + "content": "As the success of neural radiance field (NeRF) [29], numerous volume rendering based 3D modeling methods are pro" + }, + { + "type": "image_caption", + "bbox": [ + 0.546, + 0.327, + 0.571, + 0.339 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.344, + 0.596, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.411, + 0.596, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.632, + 0.327, + 0.666, + 0.34 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.343, + 0.685, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.614, + 0.411, + 0.685, + 0.454 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.701, + 0.327, + 0.783, + 0.34 + ], + "angle": 0, + "content": "NeuralUDF" + }, + { + "type": "image", + "bbox": [ + 0.707, + 0.343, + 0.777, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.708, + 0.411, + 0.777, + 0.454 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.807, + 0.327, + 0.862, + 0.34 + ], + "angle": 0, + "content": "NeUDF" + }, + { + "type": "image", + "bbox": [ + 0.801, + 0.343, + 0.871, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.8, + 0.411, + 0.871, + 0.454 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.472, + 0.892, + 0.57 + ], + "angle": 0, + "content": "Figure 1. We learn a UDF from multiview images for nonwatertight model reconstruction. As illustrated in the cross sections of learned UDFs, our learned UDF approximates to the ground truth. In contrast, the learned UDF of NeuralUDF [25] is choppy leading to significant artifacts, e.g., unexpected pit. The learned UDF of NeUDF [23] is almost closed struggling to generate open surface." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.893, + 0.854 + ], + "angle": 0, + "content": "posed to learn signed distance fields (SDF) for 3D model reconstruction from multi-view images [7, 34, 36, 40]. These approaches map signed distance value to a density function, thereby enabling the use of volume rendering to learn an implicit SDF representation. To calculate pixel colors, they compute the weighted sum of radiances along each light ray. Achieving an accurate surface depiction requires the density function to meet three essential criteria. Firstly, the weights, which are derived from the density function, must reach their maximum value when the distance is zero, ensuring unbiasedness. Secondly, as a ray traverses through the surface, the accumulated density should tend towards infinity, rendering the surface opaque — a property referred to as occlusion-awareness. Finally, the density function should be bounded to prevent numerical issues. The popular SDF approaches, such as NeuS [34] and VolSDF [40], adopt an S-shaped density function that meets all these requirements." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.894, + 0.9 + ], + "angle": 0, + "content": "While SDF-based methods excel at reconstructing watertight models, they have limitations in representing open models. This is due to the intrinsic nature of SDF, which" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.888, + 0.221, + 0.9 + ], + "angle": 0, + "content": "*Corresponding author" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5084" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.081, + 0.092, + 0.468, + 0.453 + ], + "angle": 0, + "content": "differentiates between the interior and exterior of a model, thus failing to accommodate open boundaries. Recent advances have attempted to mitigate this constraint by employing unsigned distance fields (UDF) [23, 25, 27]. Unlike signed distance fields, UDFs have non-negative distance values, making them suitable for representing nonwatertight models. However, learning a UDF from multiview images is a challenging task since the gradients of the UDF are unstable due to directional changes near the zero level-set, making it difficult to train the neural network. Another major challenge lies in formulating a UDF-induced density function that can simultaneously meet the above-mentioned three requirements. Unlike SDFs, UDFs cannot distinguish between the front and back of a surface based on distance values, thus, directly using an S-shaped density function is off the table. Opting for a bell-shaped density function brings its own issues. It is impossible for these integrations to approach infinity, so as to be occlusion-aware, unless the density becomes boundless at zero distance values. These conflicting requirements make UDF learning a non-trivial task, forcing existing methods to sacrifice at least one of these conditions. As shown in Figure 1, the existing methods NeuralUDF [25] and NeUDF [23] result in either choppy or nearly closed UDFs." + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.455, + 0.468, + 0.831 + ], + "angle": 0, + "content": "As designing a UDF-induced density function that simultaneously fulfills the three aforementioned conditions remains an unresolved challenge, we propose a novel approach that learns a UDF from multi-view images in two separate stages. In the first stage, we apply an easily trainable but slightly biased and transparent density function for coarse reconstruction. Such a UDF, although being approximate, provides an important clue so that we can determine where to truncate the light rays. This accounts for the occlusion effect, where points behind the surface are not visible and should not contribute to the output color. With truncated light rays, we are able to derive the weights from UDF directly bypassing the density function, to further refine the geometry and appearance in the second stage. Our two-stage learning method, called 2S-UDF, leads to an unbiased and occlusion-aware weight function. Furthermore, by sidestepping density function learning in Stage 2, we effectively bypass the challenges associated with ensuring its boundedness. This strategy enhances the numerical stability of our method. Evaluations on benchmark datasets DeepFashion3D [43] and DTU [19] show that 2S-UDF outperforms existing UDF learning methods in terms of both reconstruction accuracy and visual quality. Additionally, we observe that the training stability of 2S-UDF is notably superior compared to other UDF learning neural networks." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.846, + 0.217, + 0.86 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.872, + 0.468, + 0.9 + ], + "angle": 0, + "content": "3D Reconstruction from Multi-View Images. Surface reconstruction from multi-view images has been a subject of" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.092, + 0.89, + 0.302 + ], + "angle": 0, + "content": "study for several decades, and can generally be classified into two categories: voxel-based and point-based methods. Voxel-based methods [3, 8, 20, 21, 33] divide the 3D space into voxels and determine which ones belong to the object. These methods can be computationally expensive and may not be suitable for reconstructing complex surfaces. Point-based methods [13, 31, 38] use structure-from-motion [16] to calibrate the images and generate a dense point cloud using multi-view stereo [12]. Finally, surface reconstruction methods (e.g., [2, 17, 22]) are used to generate a mesh. Since multi-view stereo requires dense correspondences to generate a dense point cloud, which are often difficult to compute, its results often contain various types of artifacts, such as noise, holes, and incomplete structures." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.308, + 0.89, + 0.789 + ], + "angle": 0, + "content": "Neural Volume Rendering. Neural network-based 3D surface reconstruction has received attention in recent years with the emergence of neural rendering [29]. Several methods have been proposed for volume rendering and surface reconstruction using neural networks. VolSDF [40] uses the cumulative distribution function of Laplacian distribution to evaluate the density function from SDF for volume rendering and surface reconstruction. NeuS [34] adopts an unbiased density function to the first-order approximation of SDFs for more accurate reconstruction. SparseNeuS [24] extends NeuS to use fewer images for reconstruction. HFNeuS [36] improves NeuS by proposing a simplified and unbiased density function and using hierarchical multilayer perceptrons (MLPs) for detail reconstruction. GeoNeuS [10] incorporates structure-from-motion to add more constraints. NeuralWarp [7] improves the accuracy by optimizing consistency between warped views of different images. PET-NeuS [37] further improves the accuracy by introducing tri-planes into the SDF prediction module, incorporating with MLP. All these methods learn SDFs, which can only reconstruct watertight models. Recently, Long et al. proposed NeuralUDF [25] for learning UDF for reconstructing open models. It adapts the S-shaped density function for learning SDF to UDFs by introducing an indicator function. However, the indicator function is complicated to learn, and also introduces biases. Liu et al. proposed NeUDF [23] adopting a bell-shaped density. However, to make it occlusion-aware, the density has to be unbounded resulting in an improper integral, which reduces accuracy. Meng et al. proposed NeAT [27] to learn SDF with validity so as to reconstruct open models from SDF. However, it needs foreground masks for data." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.796, + 0.89, + 0.9 + ], + "angle": 0, + "content": "3D Reconstruction from Point Clouds. There has been recent interest in surface representation using signed distance fields (SDFs) and occupation fields. Several methods have been proposed for learning SDFs [4, 26, 30, 32, 35], while occupation fields have been used in methods such as [5, 28]. However, both SDFs and occupation fields can only represent watertight models. To represent non-watertight" + }, + { + "type": "page_number", + "bbox": [ + 0.485, + 0.947, + 0.514, + 0.957 + ], + "angle": 0, + "content": "5085" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.168 + ], + "angle": 0, + "content": "models, some methods are proposed to learn UDF from 3D point clouds [6, 41, 42]. Our proposed method also uses UDF for non-watertight models representation, but we learn it directly from multi-view images, which is a challenging problem." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.179, + 0.168, + 0.194 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.204, + 0.469, + 0.337 + ], + "angle": 0, + "content": "At the foundation of UDF-based learning approaches is the task of crafting a density function that converts unsigned distance values into volume density, ensuring that the resulting weight function is unbiased and responsive to occlusions. None of the existing UDF learning methods [23, 25] can simultaneously meet the three critical requirements, i.e., ensuring the density function is bounded, and that the weight function remains both unbiased and occlusion aware." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.34, + 0.47, + 0.581 + ], + "angle": 0, + "content": "We tackle these challenges by decoupling the density function and weight function across two stages. In the initial stage (Section 3.1), we utilize an easy-to-train, bell-shaped density function (which is inherently bounded) to learn a coarse UDF. While the resulting weight function is not theoretically unbiased or occlusion-aware, we can make it practically usable by choosing a proper parameter. Moving into the second stage (Section 3.2), we sidestep the density function entirely, focusing instead on refining the UDF by directly adjusting the weight function within the neural volume rendering framework. Specifically, we truncate light rays after they hit the front side of the object and obtain a weight function that is both unbiased and sensitive to occlusions, without the overhang of density function boundedness concerns. Finally, Section 3.3 presents the training details." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.588, + 0.47, + 0.62 + ], + "angle": 0, + "content": "3.1. Stage 1: Coarse UDF Learning via a Simple Density Function" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.627, + 0.469, + 0.687 + ], + "angle": 0, + "content": "We consider the scenario of a single planar plane \\(\\mathcal{M}\\) and a single ray-plane intersection. Inspired by HF-NeuS [36], we propose an easy-to-learn density function \\(\\sigma_{1}\\) that maps unsigned distance \\(f\\) to density" + }, + { + "type": "equation", + "bbox": [ + 0.14, + 0.693, + 0.469, + 0.725 + ], + "angle": 0, + "content": "\\[\n\\sigma_ {1} (f (t)) = \\frac {c s e ^ {- s f (t)}}{1 + e ^ {- s f (t)}}, s > 0, c > 0, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.73, + 0.47, + 0.903 + ], + "angle": 0, + "content": "where \\( c > 0 \\) is a fixed, user-specified parameter and \\( s > 0 \\) is a learnable parameter controlling the width of the bell-shaped curve. Straightforward calculation shows that the weight function \\( w_{1}(f(t)) = e^{-\\int_{0}^{t}\\sigma_{1}(f(u))\\mathrm{d}u}\\sigma_{1}(f(t)) \\) is monotonically decreasing behind the plane \\( \\mathcal{M} \\) and the maximum value occurs at a point \\( t^* \\) in front of \\( \\mathcal{M} \\) with an unsigned distance value of \\( f(t^{*}) = \\frac{1}{s}\\ln \\frac{c}{|\\cos(\\theta)|}, (c > |\\cos (\\theta)|) \\) or \\( f(t^{*}) = 0, (0 < c \\leq |\\cos (\\theta)|) \\), where \\( \\theta \\) is the incident angle between the light ray and the surface normal. This means that the weight function \\( w_{1} \\) is not unbiased. Furthermore, the line integral \\( \\int_0^t\\sigma_1(f(u))\\mathrm{d}u \\) does" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.891, + 0.136 + ], + "angle": 0, + "content": "not approach infinity when a light ray passes through the front-most layer of the surface, indicating \\( w_{1} \\) is only partially occlusion-aware." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.137, + 0.892, + 0.318 + ], + "angle": 0, + "content": "While the density function \\(\\sigma_{1}\\) is not perfect in theory, by selecting an appropriate \\(c\\), we can practically minimize bias and enhance opacity. Clearly, a smaller \\(c\\) value decreases \\(f(t^{*})\\), thereby reducing bias. To gauge the effect of \\(c\\) on opacity, we now consider the most extreme scenario where the incident light ray is perpendicular to the planar surface \\(\\mathcal{M}\\), and assume that the intersection point is located at \\(t = 1\\). In such a situation, the unsigned distance function is \\(f(t) = 1 - t\\) for points in front of \\(\\mathcal{M}\\). Since \\(\\sigma_{1}\\) is symmetrical on either side of \\(\\mathcal{M}\\), the surface transparency is the square of the transparency of the front side. The theoretic transparency is," + }, + { + "type": "equation", + "bbox": [ + 0.511, + 0.33, + 0.882, + 0.405 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left(e ^ {- \\int_ {0} ^ {1} \\hat {\\sigma} _ {1} (f (t)) \\mathrm {d} t}\\right) ^ {2} = \\left[ \\exp \\left(- \\int_ {0} ^ {1} \\frac {c s e ^ {- s (1 - t)}}{1 + e ^ {- s (1 - t)}} \\mathrm {d} t\\right) \\right] ^ {2} \\\\ = \\left(\\frac {1 + e ^ {- s}}{2}\\right) ^ {2 c}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.416, + 0.892, + 0.537 + ], + "angle": 0, + "content": "Therefore, we should choose a relatively large \\( c \\) to reduce transparency. In our implementation, we set the constant \\( c = 5 \\) based on the typical value of the learned parameter \\( s \\) which usually ranges between 1000 and 2000. Calculations of bias and translucency show that this setting offers a good balance between occlusion-awareness and unbiasedness in the first stage training. Please refer to the supplementary material for a detailed analysis." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.547, + 0.891, + 0.579 + ], + "angle": 0, + "content": "3.2. Stage 2: UDF Refinement through Weight Adjustment" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.586, + 0.891, + 0.676 + ], + "angle": 0, + "content": "In this stage, we refine the UDF learned in Stage 1 to improve the quality of geometry and appearance. Unlike Stage 1 and all other UDF-learning methods, inspired by [1], we truncate light rays based on the approximated UDF learned in Stage 1 and learn the weight function \\( w(t) \\) directly instead of the density function \\( \\sigma(t) \\) to refine the UDF." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.678, + 0.892, + 0.753 + ], + "angle": 0, + "content": "Ideally, for a single ray-plane intersection, we want a bell-shaped function \\( w(t) \\) that attains its maximum at the points with zero distance values, and satisfies partition of unity. Therefore, we adopt the derivative of the sigmoid function as the weight function [1], defined as" + }, + { + "type": "equation", + "bbox": [ + 0.567, + 0.765, + 0.891, + 0.8 + ], + "angle": 0, + "content": "\\[\nw _ {2} (f (t)) = \\frac {s e ^ {- s f (t)}}{(1 + e ^ {- s f (t)}) ^ {2}} \\cdot | \\cos (\\theta) |. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.891, + 0.839 + ], + "angle": 0, + "content": "with \\(\\theta\\) being the incident angle between the light ray and the surface normal." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Intuitively speaking, learning such a weight function \\( w_{2} \\) in Stage 2 of our UDF method is similar to learning an S-shaped density function in SDF-based approaches, such as [36]. As a result, the learning process in Stage 2 is as" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "5086" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.137 + ], + "angle": 0, + "content": "stable as those SDF approaches. Furthermore, it can totally avoid using the visibility indicator function, which is necessary in NeuralUDF [25]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.139, + 0.47, + 0.365 + ], + "angle": 0, + "content": "Calculation shows that the weight \\( w_{2} \\) attains its maximum at zero distance values, therefore it is unbiased. However, if we naively predict the weight function directly, it will not be occlusion-aware, so we introduce the ray truncation. To make \\( w_{2} \\) occlusion-aware, we can truncate the light rays after they pass through the frontmost layer of the surface, thereby preventing rendering the interior of the object. Note that we do not expect the truncation to be exactly on the frontmost layer of the surface. In fact, as long as it occurs between the frontmost layer and the second layer, we consider the truncation valid. This means that the approximate UDF learned in the first stage, which can capture the main topological features (such as boundaries) and provide a fairly good representation of the target object, is sufficient for us to determine where to cut off the light rays." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.367, + 0.469, + 0.426 + ], + "angle": 0, + "content": "In our implementation, we adopt a simple strategy to determine the truncation point for each light ray. Specifically, the truncation point of ray \\(\\mathbf{r}\\) is the first sample point along \\(\\mathbf{r}\\) such that" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.429, + 0.468, + 0.473 + ], + "angle": 0, + "content": "- The unsigned distance value at the point is a local maxima. To avoid distance vibration interference, it should be the maximum in a window centered at the point. And" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.475, + 0.468, + 0.504 + ], + "angle": 0, + "content": "- The accumulated weight up to this point is greater than \\(\\delta_{\\text{thres}}\\)." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.429, + 0.468, + 0.504 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.506, + 0.469, + 0.671 + ], + "angle": 0, + "content": "The accumulated weight threshold \\(\\delta_{\\text{thres}}\\) is intuitively set to 0.5. This choice is based on the assumption that if the Stage 1 training is performed well enough, the accumulated weights at each sample point along the ray would be either 0 (for not reaching a surface) or 1 (for having intersected with a surface). Hence, we intuitively select 0.5 for \\(\\delta_{\\text{thres}}\\) because it is the midpoint between 0 and 1. With the cutoff mechanism, only the first ray-surface intersection contributes to the color of the ray, effectively achieving occlusion-awareness. Given these properties, we conclude that," + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.47, + 0.719 + ], + "angle": 0, + "content": "Theorem 1 The weight \\( w_{2} \\) with light cutting off is unbiased and occlusion-aware." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.469, + 0.9 + ], + "angle": 0, + "content": "Figure 2 is an intuitive illustration of our Stage 2 weight learning and truncation strategy. The UDF maxima point \\( A \\) in front of the intersection surface would not affect the cutting point selection as the accumulated weight is below \\( \\delta_{\\text{thres}} \\) (0.5). The local maxima \\( B \\) due to UDF oscillation also would not affect it since it's not the maximum in a large enough neighborhood. The light is cut at maxima point \\( C \\), and thus the weight of point \\( D \\) is zero without contributions to the rendering. As illustrated in Figure 2, the cutting process is robust against UDF oscillation, open boundaries, and local maxima in front of the intersection surface." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.099, + 0.878, + 0.208 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.218, + 0.892, + 0.302 + ], + "angle": 0, + "content": "Figure 2. An intuitive illustration of our ray cutting algorithm, best viewed in color and magnified. A ray shoots from left to right, approaching the boundary of the first surface, and going through another two surfaces (gray boxes). The violet solid line represents the UDF values along the ray; the orange dashed line represents the corresponding color weight." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.326, + 0.604, + 0.342 + ], + "angle": 0, + "content": "3.3. Training" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.349, + 0.892, + 0.424 + ], + "angle": 0, + "content": "Differentiable UDFs. NeuS uses an MLP network to learn the signed distance function \\( f \\), which is a differentiable function. In contrast, UDF is not differentiable at the zero level set, making the network difficult to learn the values and gradients of the UDF close to the zero level set." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.425, + 0.893, + 0.605 + ], + "angle": 0, + "content": "Another crucial requirement is to ensure non-negative values for the computed distances, which seems like a trivial task as one may simply apply absolute value or normalization such as ReLU [11] to the MLP output. However, applying the absolute value to the distance is not viable due to its non-differentiability at zero. Similarly, normalizing the output value using ReLU is not feasible as it is also non-differentiable at zero and its gradient vanishes for negative inputs. This can be particularly problematic for learning UDFs, since when the MLP returns a negative distance value, the ReLU gradient vanishes, hindering the update of the distance to a positive value in the subsequent iterations." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.606, + 0.892, + 0.756 + ], + "angle": 0, + "content": "We add a softplus [9] function after the output layer of the MLP [23]. The softplus function is a smooth and differentiable approximation of the ReLU function, which is defined as \\(\\mathrm{softplus}(x) = \\frac{1}{\\beta}\\ln (1 + e^{\\beta x})\\). Softplus has the same shape as ReLU, but it is continuous and differentiable at every point and its gradients do not vanish anywhere. Using the softplus function allows us to ensure that the output of the MLP is non-negative and differentiable, making it suitable for learning the UDF. Similar to NeUDF [23], we set \\(\\beta = 100\\) in our experiments." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.757, + 0.892, + 0.832 + ], + "angle": 0, + "content": "Loss functions. Following NeuralUDF [25], we adopt an iso-surface regularizer to penalize the UDF values of the non-surface points from being zero, therefore encouraging smooth and clean UDFs. The regularization loss is defined as [25]" + }, + { + "type": "equation", + "bbox": [ + 0.572, + 0.84, + 0.819, + 0.876 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {r e g} = \\frac {1}{M N} \\sum_ {i, k} \\exp (- \\tau \\cdot f (t _ {i, k})),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.886, + 0.892, + 0.9 + ], + "angle": 0, + "content": "where \\(\\tau\\) is a constant scalar that scales the learned UDF" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5087" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "values, \\( M \\) is the total number of sampled rays per training iteration, and \\( N \\) is the number of sampled points on a single ray. \\( \\tau \\) is set to 5.0 in the first stage and 50.0 in the second stage." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.152, + 0.471, + 0.273 + ], + "angle": 0, + "content": "The value of \\( s \\), which is learnable in our method, significantly affects the quality of the reconstruction. When \\( s \\) is small, it introduces a larger bias and leads to a more blurred output. We observe that \\( s \\) typically converges to a relatively large value between 1000 and 2000, leading to visually pleasing results. However, in rare cases when \\( s \\) stops increasing during training, we apply a penalty to force it to increase. The penalty is defined as follows" + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.281, + 0.341, + 0.318 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s} = \\frac {1}{M} \\sum_ {i, k} \\frac {1}{s _ {i , k}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.328, + 0.47, + 0.448 + ], + "angle": 0, + "content": "where \\(M\\) is the number of rays during a training epoch. This term \\(\\mathcal{L}_s\\) aggregates the reciprocals of all \\(s\\) values used for the point \\(t_{i,k}\\) on ray \\(r_i\\). Intuitively speaking, it encourages a larger \\(s\\) during the early stage of training. In our implementation, we make this term optional since \\(s\\) generally increases with a decreasing rate during training, and the penalty term is only necessary in rare cases when \\(s\\) stops at a relatively low value." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.449, + 0.471, + 0.601 + ], + "angle": 0, + "content": "As in other SDF- and UDF-based methods [25, 34, 36], we adopt color loss and Eikonal loss in our approach. Specifically, the color loss \\(\\mathcal{L}_{color}\\) is the \\(L_{1}\\) loss between the predicted color and the ground truth color of a single pixel as used in [34]. The Eikonal loss \\(\\mathcal{L}_{eik}\\) is used to regularize the learned distance field to have a unit gradient [14]. Users may also choose to adopt object masks for supervision as introduced in other SDF- and UDF-based methods [25, 34]. Putting it all together, we define the combined loss function as a weighted sum," + }, + { + "type": "equation", + "bbox": [ + 0.088, + 0.61, + 0.459, + 0.627 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {\\text {c o l o r}} + \\lambda_ {1} \\mathcal {L} _ {\\text {e i k}} + \\lambda_ {2} \\mathcal {L} _ {\\text {r e g}} + \\lambda_ {3} \\mathcal {L} _ {s} (+ \\lambda_ {m} \\mathcal {L} _ {\\text {m a s k}}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.636, + 0.47, + 0.667 + ], + "angle": 0, + "content": "where \\(\\lambda_1, \\lambda_2, \\lambda_3\\) and the optional \\(\\lambda_m\\) are hyperparameters that control the weight of each loss term." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.68, + 0.21, + 0.696 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Datasets. To evaluate our method, we use three datasets: DeepFashion3D [43], DTU [19] and BlendedMVS [39]. The DeepFashion3D dataset consists of clothing models, which are open models with boundaries. As only 3D points are available, we render 72 images of resolution \\(1024 \\times 1024\\) with a white background from different viewpoints for each model. In addition to DeepFashion3D images rendered by us most of which are texture-less, we also take the image data from NeuralUDF [25] most of which are texture-rich into our experiments. We call them DF3D#Ours and DF3D#NeuralUDF, respectively. The DTU dataset consists of models captured in a studio, all of which are watertight. We use this dataset to validate that our method also works" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.166 + ], + "angle": 0, + "content": "well for watertight models. These datasets have been widely used in previous works such as [34, 36, 40]. In our experiments, open models such as in DeepFashion3D are trained without mask supervision; DTU is trained with mask supervision." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.168, + 0.894, + 0.229 + ], + "angle": 0, + "content": "Baselines. To validate the effectiveness of our method, we compare it with state-of-the-art UDF learning methods: NeuralUDF [25], NeUDF [23] and NeAT [27]; and SDF learning methods: VolSDF [40] and NeuS [34]." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.239, + 0.771, + 0.256 + ], + "angle": 0, + "content": "4.1. Comparisons on Open Models" + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.276, + 0.907, + 0.453 + ], + "angle": 0, + "content": "
Method#1#2#3#4#5#6#7#8#9Mean
NeuS6.6913.5010.3215.018.9912.9212.949.939.4911.09
VolSDF6.369.4411.8716.0310.7814.9115.0611.348.9611.64
NeAT10.5413.897.3013.1213.1812.448.2210.3011.3311.15
NeuralUDF6.0711.587.6810.9611.169.766.986.136.418.53
NeUDF4.398.294.9419.567.528.183.813.815.767.36
Ours4.555.774.277.436.594.772.883.215.735.02
MethodLS-C0SS-D0LS-D0NS-D1LS-C1Skirt1SS-C0Mean
NeuS3.184.825.712.213.602.445.133.87
VolSDF5.924.795.964.368.737.748.846.62
NeAT3.064.335.923.528.843.914.304.84
NeuralUDF1.922.054.111.502.472.162.152.34
NeUDF1.952.93N.A.1.482.662.741.772.26
Ours1.921.972.461.472.141.841.911.96
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.462, + 0.894, + 0.504 + ], + "angle": 0, + "content": "Table 1. Chamfer distances \\((\\times 10^{-3})\\) on DF3D#Ours (top) and DF3D#NeuralUDF (bottom). NeAT requires mask supervision and others do not need." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.522, + 0.892, + 0.582 + ], + "angle": 0, + "content": "We evaluate our method and compare it with baselines using the garments from DeepFashion3D [43], where the models have multiple open boundaries. VolSDF and NeuS always close the boundaries since they learn SDFs." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.583, + 0.892, + 0.703 + ], + "angle": 0, + "content": "NeuralUDF, NeUDF and NeAT are designed to learn non-watertight models. NeAT learns SDFs for open models, and requires mask supervision to produce reasonable results, but other methods do not require mask supervision for DeepFashion3D. The released codebase of NeuralUDF indicates that it also has a two-stage training process. We evaluate the results of NeuralUDF at the end of both stages, and present whichever is better." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.704, + 0.892, + 0.795 + ], + "angle": 0, + "content": "In contrast, NeuralUDF, NeUDF and our method learn UDFs, which can generate open models. Table 1 shows the Chamfer distances of the results on DeepFashion3D. Some of the Chamfer distances of the compared methods are large because the open holes are closed or the model is over-smoothed, resulting in significant errors." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.895, + 0.901 + ], + "angle": 0, + "content": "As demonstrated in Figure 3, we test various types of garments, some of which have rich textures, while others are nearly a single color. Learning UDFs for textureless models is more challenging since various regions of a model are ambiguous without clear color differences. However, our 2S-UDF generates satisfactory results even without masks. Though with mask supervision, the results of" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5088" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.1, + 0.086, + 0.882, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.339, + 0.896, + 0.427 + ], + "angle": 0, + "content": "Figure 3. Visual comparisons on selected models of the DeepFashion3D [43] dataset. The surfaces produced by NeuS and VolSDF are closed watertight models, thereby post-processing is required to remove the unnecessary parts. NeAT can produce open models by learning an SDF and predicting which surfaces in the extracted meshes should be removed, but it needs mask for supervision. NeuralUDF can generate open surfaces, but struggles with textureless inputs, leading to double-layered regions and large reconstruction errors. NeUDF generally performs well, but its training is unstable and may stumble on less distinguished, darker models like LS-D0. In contrast, our 2S-UDF consistently delivers effective reconstructions of non-watertight models. See the supplementary material for additional results." + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.446, + 0.473, + 0.692 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.707, + 0.471, + 0.79 + ], + "angle": 0, + "content": "Figure 4. Visualization of the learned UDFs on cross sections. Compared with the ground truth, our method can learn a UDFs that most closely resemble the ground truth, among our method, NeuralUDF, and NeUDF. NeAT is omitted in this visualization, because it learns SDFs in lieu of UDFs. Note that for LS-D0, NeUDF completely collapses without a reasonable UDF learned." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.471, + 0.903 + ], + "angle": 0, + "content": "NeAT [27] are over-smoothed, missing details, resulting in large Chamfer distance errors. NeuralUDF [25] is unable to properly reconstruct textureless models on most models, possibly due to their complex density function which is difficult to converge. Some of the NeUDF [23] models" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.45, + 0.895, + 0.678 + ], + "angle": 0, + "content": "become watertight. To analyze the reasons, we illustrate these UDFs cross sections in Figure 4. To compute the ground truth UDFs, we sample 30,000 points from every input point model and compute the distances to the nearest sample point for every point in a 3D grid of resolution \\(512 \\times 512 \\times 512\\). All other UDFs are extracted by querying the distance neural network in a 3D grid of the same resolution. Our learned UDFs resemble the ground truth with little difference. While, the UDFs of NeuralUDF deviate from the ground truth significantly explaining its difficulty to converge. The UDFs of NeUDF are better, but the distances approach zero around open holes. As a result, it is challenging and tricky to generate non-watertight models and some of them are even closed. NeAT learns SDF, so we do not show their distance fields." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.683, + 0.895, + 0.79 + ], + "angle": 0, + "content": "As illustrated in Figure 5, perhaps due to the absolute of an MLP for UDF representation, NeuralUDF possibly generates two layers of zero level-sets on both sides of the surface resulting in double-layered regions after Stage 1 learning. However, in its Stage 2 refinement, the surface is crushed into pieces and the Chamfer distance errors surge suddenly." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.895, + 0.903 + ], + "angle": 0, + "content": "In Figure 6, we conduct additional experiments on some open model dataset provided by NeUDF [23]. For the rack model, the thin structures reconstructed by NeuralUDF [25] and NeUDF [23] seem eroded, but ours don't. The thin structures reconstructed by NeAT [27] is the closest to the reference image, but the surface is dented inward with visible artifacts due to imperfect SDF validity learning." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5089" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.083, + 0.135, + 0.245, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.25, + 0.09, + 0.474, + 0.145 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.306, + 0.146, + 0.39, + 0.165 + ], + "angle": 0, + "content": "Stage 1 NeuralUDF" + }, + { + "type": "image", + "bbox": [ + 0.25, + 0.167, + 0.474, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.306, + 0.223, + 0.383, + 0.242 + ], + "angle": 0, + "content": "Stage 1 2S-UDF" + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.256, + 0.47, + 0.354 + ], + "angle": 0, + "content": "Figure 5. Plots of the Chamfer distance throughout the training process. Our method consistently reduces CD across both stages. In contrast, NeuralUDF, which also adopts a two-stage learning strategy, exhibits instability and yields a fragmented output following the second stage. The first-stage output of NeuralUDF, however, contains double-layered regions as marked above. In this figure, both methods start their stage 2 training at 250k iterations." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.366, + 0.458, + 0.432 + ], + "angle": 0, + "content": "
Method3755656997105106114118122Mean
NeuralUDF1.180.440.660.670.940.950.570.370.560.550.69
NeAT1.180.470.820.841.090.750.760.380.560.550.74
NeUDF0.900.650.730.971.070.630.940.590.720.620.78
Ours0.890.550.680.881.150.700.740.410.610.510.71
" + }, + { + "type": "table_caption", + "bbox": [ + 0.141, + 0.442, + 0.405, + 0.455 + ], + "angle": 0, + "content": "Table 2. Chamfer distances on DTU dataset." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.48, + 0.469, + 0.601 + ], + "angle": 0, + "content": "The plant model does not have an object mask, making NeAT [27] impractical for training. NeuralUDF [25] completely fails to reconstruct a reasonable surface. Between our method and NeUDF [23] which can reconstruct a sensible model, the flower pot region marked in red is missing in NeUDF but not in ours. These show our method's ability to reconstruct non-watertight models more robustly compared to other methods." + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.61, + 0.465, + 0.721 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.735, + 0.47, + 0.791 + ], + "angle": 0, + "content": "Figure 6. Qualitative comparisons with NeAT [27], NeuralUDF [25] and NeUDF [23] on some example data released by NeUDF [23]. Note that NeAT cannot reconstruct \"plant\" dataset because the ground truth mask for \"plant\" is unavailable." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.818, + 0.388, + 0.833 + ], + "angle": 0, + "content": "4.2. Comparisons on Watertight Models" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.469, + 0.9 + ], + "angle": 0, + "content": "Other methods can also be used as the first stage of our 2S- UDF. We use NeUDF for the first stage training on the DTU dataset [19]. As detailed in Table 2, we compare the Chamfer distances of the reconstruction results with NeuralUDF," + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.089, + 0.888, + 0.159 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.161, + 0.88, + 0.22 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.222, + 0.879, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.279, + 0.88, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.336, + 0.879, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.407, + 0.892, + 0.492 + ], + "angle": 0, + "content": "Figure 7. Qualitative comparisons with NeAT, NeuralUDF and NeUDF on the DTU [19] dataset and close-up comparisons against NeUDF. Our method can reconstruct surfaces closer to the ground truth point clouds in various places such as the marked region, generally improving the reconstruction accuracy of NeUDF by around \\(10\\%\\), on a par with NeuralUDF and NeAT at the bottom two rows." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.518, + 0.892, + 0.806 + ], + "angle": 0, + "content": "NeAT and NeUDF without our second-stage training. SDFs generally excel at learning watertight models, and it is worth pointing out that NeuralUDF takes the absolute value of the output of MLP as the UDF value of a given point. Therefore for closed models, they can easily learn an SDF and take its absolute value to produce a UDF. NeAT, on the other hand, explicitly learns an SDF. NeUDF and our method truly learn UDFs. While UDF learning is much more complicated than SDF learning because the UDF gradient nearby 0 is blurry and the gradient is not available at 0, our method still improves the reconstruction quality of NeUDF by around \\(10\\%\\) as shown in Figure 7. We further provide a close-up view of specific parts of the models for detailed comparisons in Figure 7. These local callouts exhibit the ground truth points located on both sides of our surfaces, whereas most of the points are only on one side of the surfaces of NeUDF. These illustrate our reconstructed surfaces are closer to the ground truth points and thus improving the resulting quality over NeUDF, on a par with NeuralUDF and NeAT." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.816, + 0.665, + 0.831 + ], + "angle": 0, + "content": "4.3. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.84, + 0.891, + 0.884 + ], + "angle": 0, + "content": "In this section, we present main ablation studies. We refer interested readers to the supplementary material for additional ablation studies." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.886, + 0.891, + 0.901 + ], + "angle": 0, + "content": "Effect of the two-stage training. We conduct an ablation" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5090" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.079, + 0.089, + 0.472, + 0.216 + ], + "angle": 0, + "content": "
Method#1#7#8LS-D0
S1 & S24.552.883.212.46
S17.222.463.386.04
S25.754.005.963.65
MethodNS-D1LS-C1DTU 114DTU 122
S1 & S21.472.140.410.51
S11.466.230.590.62
S21.642.980.630.60
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.225, + 0.471, + 0.296 + ], + "angle": 0, + "content": "Table 3. Chamfer distances of models learned by both Stage 1 and 2 (S1 & S2), only Stage 1 (S1) and only Stage 2 (S2) on selected datasets. Models learned by two stages yield similar Chamfer distances, but when trained with only Stage 1 or Stage 2, the Chamfer distances generally become significantly higher." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.322, + 0.469, + 0.412 + ], + "angle": 0, + "content": "study on the effect of the two-stage learning. We compare the Chamfer distances among both two stages, only Stage 1 and only Stage 2 training, shown in Table 3. Our results show that two-stage training improves the Chamfer distance (lower is better) compared to training with only Stage 1 or 2, under most circumstances." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.413, + 0.469, + 0.504 + ], + "angle": 0, + "content": "It should be noted that training by the second stage from scratch is also capable of generating a generally reasonable result. However, the Chamfer distances, as shown in Table 3, indicate that its learning ability is limited. Therefore, the second refinement learning stage should cooperate with the first coarse learning stage to generate the best results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.505, + 0.47, + 0.747 + ], + "angle": 0, + "content": "Choice of accumulated weight threshold \\(\\delta_{thres}\\). In Stage 2, being a ray truncate point requires the accumulated weight up until that point to be greater than \\(\\delta_{thres}\\), where we intuitively select \\(\\delta_{thres} = 0.5\\). Figure 8 shows the reconstruction results for other choices of \\(\\delta_{thres}\\), namely 0.3 and 0.7, respectively. We observe that all threshold choices successfully reconstruct the model. Setting the threshold \\(\\delta_{thres}\\) up to 0.7 produces visually similar results. Setting the threshold \\(\\delta_{thres}\\) down to 0.3 also works fine generally despite that it may introduce more holes to the reconstructed meshes. We deduce that setting a lower threshold increases the possibility that a ray may be truncated prematurely, leading to less desirable results. Nevertheless, we still have a considerable range of \\(\\delta_{thres}\\) from 0.3 to 0.7 without major result regression, indicating that our Stage 2 training exhibits robustness against \\(\\delta_{thres}\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.757, + 0.205, + 0.77 + ], + "angle": 0, + "content": "4.4. Limitations" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Since the light is cut off after going through a layer of surface, our method relinquishes the ability to model planes with transparency. Occasionally, due to learning uncertainty, the Chamfer distance may increase slightly in the second stage, but the difference is quite small without visual impact. Overall, the two-stage learning improves the quality significantly. For watertight models, SDF learning is more suitable than UDF learning, since UDF learning is" + }, + { + "type": "image", + "bbox": [ + 0.553, + 0.09, + 0.844, + 0.254 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.264, + 0.892, + 0.321 + ], + "angle": 0, + "content": "Figure 8. Qualitative comparisons on different choices of accumulated weight \\(\\delta_{\\text{thres}}\\). Setting a higher threshold works well few little visual differences; Setting a lower threshold generally works fine, but may introduce more holes in reconstructed meshes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.352, + 0.892, + 0.473 + ], + "angle": 0, + "content": "more complicated than SDF learning. We still advise using SDF learning, e.g., NeuS [34], HF-NeuS [36] or PET-NeuS [37], for watertight model reconstruction. Also, the mesh extraction of MeshUDF [15] tends to generate holes and \"staircase\" artifacts affecting the mesh reconstruction quality. Adopting a more robust extraction method, e.g., DoubleCoverUDF [18], could alleviate the problem, but we use MeshUDF here for all methods for a fair comparison." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.493, + 0.627, + 0.509 + ], + "angle": 0, + "content": "5. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.521, + 0.893, + 0.732 + ], + "angle": 0, + "content": "Overall, 2S-UDF offers a promising approach to the problem of reconstructing both open and watertight models from multi-view images. Its advantages over existing methods lie in the use of a simple and more accurate density function, and a smooth differentiable UDF representation, so that the learned UDF approximates the ground truth as much as possible. A two-stage learning strategy further eliminates bias and improves UDF accuracy. Results from our experiments on the DeepFashion3D, DTU and BlendedMVS datasets demonstrate the effectiveness of our method, particularly in learning smooth and stably open UDFs revealing the robustness of 2S-UDF. Moreover, our method does not rely on object masks for open model reconstruction, making it more practical in real-world applications." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.753, + 0.66, + 0.77 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "This project was supported in part by the National Natural Science Foundation of China under Grants (61872347, 62072446), in part by the National Key R&D Program of China under Grant 2023YFB3002901, in part by the Basic Research Project of ISCAS under Grant ISCAS-JCMS-202303 and in part by the Ministry of Education, Singapore, under its Academic Research Fund Grants (MOET2EP20220-0005, RG20/20 & RT19/22)." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.958 + ], + "angle": 0, + "content": "5091" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Dejan Azinović, Ricardo Martin-Brualla, Dan B Goldman, Matthias Nießner, and Justus Thies. Neural RGB-D Surface Reconstruction. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6280-6291, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.47, + 0.227 + ], + "angle": 0, + "content": "[2] Fausto Bernardini, Joshua Mittleman, Holly Rushmeier, Cláudio Silva, and Gabriel Taubin. The ball-pivoting algorithm for surface reconstruction. IEEE Trans. Vis. Comput. Graph., 5(4):349-359, 1999. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.23, + 0.468, + 0.271 + ], + "angle": 0, + "content": "[3] A. Broadhurst, T.W. Drummond, and R. Cipolla. A probabilistic framework for space carving. In Int. Conf. Comput. Vis., pages 388-393 vol.1, 2001. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.273, + 0.468, + 0.342 + ], + "angle": 0, + "content": "[4] Rohan Chabra, Jan E. Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep Local Shapes: Learning Local SDF Priors for Detailed 3D Reconstruction. In Eur. Conf. Comput. Vis., pages 608-625, Cham, 2020. Springer International Publishing. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.344, + 0.468, + 0.4 + ], + "angle": 0, + "content": "[5] Julian Chibane, Thiemo Alldieck, and Gerard Pons-Moll. Implicit Functions in Feature Space for 3D Shape Reconstruction and Completion. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6968-6979, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.402, + 0.468, + 0.457 + ], + "angle": 0, + "content": "[6] Julian Chibane, Mohamad Aymen mir, and Gerard Pons-Moll. Neural Unsigned Distance Fields for Implicit Function Learning. In Adv. Neural Inform. Process. Syst., pages 21638-21652. Curran Associates, Inc., 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.459, + 0.468, + 0.514 + ], + "angle": 0, + "content": "[7] François Darmon, Bénédicte Bascle, Jean-Clement Devaux, Pascal Monasse, and Mathieu Aubry. Improving neural implicit surfaces geometry with patch warping. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6250-6259, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.516, + 0.468, + 0.557 + ], + "angle": 0, + "content": "[8] J. De Bonet and P. Viola. Roxels: responsibility weighted 3D volume reconstruction. In Int. Conf. Comput. Vis., pages 418-425 vol.1, 1999. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.56, + 0.468, + 0.615 + ], + "angle": 0, + "content": "[9] Charles Dugas, Yoshua Bengio, François Bélisle, Claude Nadeau, and René Garcia. Incorporating Second-Order Functional Knowledge for Better Option Pricing. In Adv. Neural Inform. Process. Syst. MIT Press, 2000. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.617, + 0.468, + 0.685 + ], + "angle": 0, + "content": "[10] Qiancheng Fu, Qingshan Xu, Yew Soon Ong, and Wenbing Tao. Geo-Neus: Geometry-Consistent Neural Implicit Surfaces Learning for Multi-view Reconstruction. In Adv. Neural Inform. Process. Syst., pages 3403–3416. Curran Associates, Inc., 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.688, + 0.468, + 0.728 + ], + "angle": 0, + "content": "[11] Kunihiko Fukushima. Cognitron: a self-organizing multilayered neural network. Biological Cybernetics, 20(3-4):121-136, 1975. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.731, + 0.468, + 0.772 + ], + "angle": 0, + "content": "[12] Yasutaka Furukawa and Carlos Hernández. Multi-View Stereo: A Tutorial. Found. Trends. Comput. Graph. Vis., 9 (1-2):1-148, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.468, + 0.829 + ], + "angle": 0, + "content": "[13] Silvano Galliani, Katrin Lasinger, and Konrad Schindler. Massively Parallel Multiview Stereopsis by Surface Normal Diffusion. In Int. Conf. Comput. Vis., pages 873-881, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.468, + 0.899 + ], + "angle": 0, + "content": "[14] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit Geometric Regularization for Learning Shapes. In Proceedings of the 37th International Conference on Machine Learning, pages 3789-3799. PMLR, 2020. 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[15] Benoit Guillard, Federico Stella, and Pascal Fua. MeshUDF: Fast and Differentiable Meshing of Unsigned Distance Field Networks. In Eur. Conf. Comput. Vis., pages 576-592, Cham, 2022. Springer Nature Switzerland. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.15, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[16] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, 2 edition, 2004. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.193, + 0.892, + 0.246 + ], + "angle": 0, + "content": "[17] Fei Hou, Chiyu Wang, Wencheng Wang, Hong Qin, Chen Qian, and Ying He. Iterative poisson surface reconstruction (iPSR) for unoriented points. ACM Trans. Graph., 41(4), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.892, + 0.304 + ], + "angle": 0, + "content": "[18] Fei Hou, Xuhui Chen, Wencheng Wang, Hong Qin, and Ying He. Robust Zero Level-Set Extraction from Unsigned Distance Fields Based on Double Covering. ACM Trans. Graph., 42(6), 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.307, + 0.892, + 0.36 + ], + "angle": 0, + "content": "[19] Rasmus Jensen, Anders Dahl, George Vogiatzis, Engil Tola, and Henrik Aanæs. Large Scale Multi-view Stereopsis Evaluation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 406-413, 2014. 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.363, + 0.892, + 0.418 + ], + "angle": 0, + "content": "[20] Mengqi Ji, Jinzhi Zhang, Qionghai Dai, and Lu Fang. SurfaceNet+: An End-to-end 3D Neural Network for Very Sparse Multi-View Stereopsis. IEEE Trans. Pattern Anal. Mach. Intell., 43(11):4078-4093, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.42, + 0.892, + 0.461 + ], + "angle": 0, + "content": "[21] Abhishek Kar, Christian Hane, and Jitendra Malik. Learning a Multi-View Stereo Machine. In Adv. Neural Inform. Process. Syst. Curran Associates, Inc., 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.463, + 0.892, + 0.49 + ], + "angle": 0, + "content": "[22] Michael Kazhdan and Hugues Hoppe. Screenedoisson surface reconstruction. ACM Trans. Graph., 32(3), 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.492, + 0.892, + 0.56 + ], + "angle": 0, + "content": "[23] Yu-Tao Liu, Li Wang, Jie Yang, Weikai Chen, Xiaoxu Meng, Bo Yang, and Lin Gao. NeUDF: Leaning Neural Unsigned Distance Fields with Volume Rendering. In IEEE Conf. Comput. Vis. Pattern Recog., pages 237-247, 2023. 1, 2, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.563, + 0.892, + 0.631 + ], + "angle": 0, + "content": "[24] Xiaoxiao Long, Cheng Lin, Peng Wang, Taku Komura, and Wenping Wang. SparseNeuS: Fast Generalizable Neural Surface Reconstruction from Sparse Views. In Eur. Conf. Comput. Vis., pages 210-227, Cham, 2022. Springer Nature Switzerland. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.634, + 0.892, + 0.716 + ], + "angle": 0, + "content": "[25] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Yuan Liu, Peng Wang, Christian Theobalt, Taku Komura, and Wenping Wang. NeuralUDF: Learning Unsigned Distance Fields for Multi-View Reconstruction of Surfaces with Arbitrary Topologies. In IEEE Conf. Comput. Vis. Pattern Recog., pages 20834–20843, 2023. 1, 2, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.718, + 0.892, + 0.8 + ], + "angle": 0, + "content": "[26] Baorui Ma, Zhizhong Han, Yu-Shen Liu, and Matthias Zwicker. Neural-Pull: Learning Signed Distance Function from Point clouds by Learning to Pull Space onto Surface. In Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, pages 7246-7257. PMLR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.802, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[27] Xiaoxu Meng, Weikai Chen, and Bo Yang. NeAT: Learning Neural Implicit Surfaces with Arbitrary Topologies from Multi-View Images. In IEEE Conf. Comput. Vis. Pattern Recog., pages 248–258, 2023. 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[28] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy Networks: Learning 3D Reconstruction in Function Space. In" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5092" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.468, + 0.119 + ], + "angle": 0, + "content": "IEEE Conf. Comput. Vis. Pattern Recog., pages 4455-4465, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.191 + ], + "angle": 0, + "content": "[29] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In Eur. Conf. Comput. Vis., pages 405-421, Cham, 2020. Springer International Publishing. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.193, + 0.469, + 0.261 + ], + "angle": 0, + "content": "[30] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning Continuous Signed Distance Functions for Shape Representation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 165-174, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.264, + 0.469, + 0.332 + ], + "angle": 0, + "content": "[31] Johannes L. Schonberger, Enliang Zheng, Jan-Michael Frahm, and Marc Pollefeys. Pixelwise View Selection for Unstructured Multi-View Stereo. In Eur. Conf. Comput. Vis., pages 501–518, Cham, 2016. Springer International Publishing. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.334, + 0.469, + 0.403 + ], + "angle": 0, + "content": "[32] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit Neural Representations with Periodic Activation Functions. In Adv. Neural Inform. Process. Syst., pages 7462-7473. Curran Associates, Inc., 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.406, + 0.469, + 0.46 + ], + "angle": 0, + "content": "[33] Jiaming Sun, Yiming Xie, Linghao Chen, Xiaowei Zhou, and Hujun Bao. NeuralRecon: Real-Time Coherent 3D Reconstruction from Monocular Video. In IEEE Conf. Comput. Vis. Pattern Recog., pages 15593-15602, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.462, + 0.469, + 0.531 + ], + "angle": 0, + "content": "[34] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. NeuS: Learning Neural Implicit Surfaces by Volume Rendering for Multi-view Reconstruction. In Adv. Neural Inform. Process. Syst., pages 27171-27183. Curran Associates, Inc., 2021. 1, 2, 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.533, + 0.469, + 0.588 + ], + "angle": 0, + "content": "[35] Yifan Wang, Lukas Rahmann, and Olga Sorkine-Hornung. Geometry-Consistent Neural Shape Representation with Implicit Displacement Fields. In Int. Conf. Learn. Represent. OpenReview.net, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.591, + 0.469, + 0.657 + ], + "angle": 0, + "content": "[36] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. HF-NeuS: Improved Surface Reconstruction Using High-Frequency Details. In Adv. Neural Inform. Process. Syst., pages 1966–1978. Curran Associates, Inc., 2022. 1, 2, 3, 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.661, + 0.469, + 0.716 + ], + "angle": 0, + "content": "[37] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. PETNeuS: Positional Encoding Tri-Planes for Neural Surfaces. In IEEE Conf. Comput. Vis. Pattern Recog., pages 12598–12607, 2023. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.718, + 0.469, + 0.773 + ], + "angle": 0, + "content": "[38] Yao Yao, Zixin Luo, Shiwei Li, Tianwei Shen, Tian Fang, and Long Quan. Recurrent MVSNet for High-Resolution Multi-View Stereo Depth Inference. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5520–5529, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.469, + 0.843 + ], + "angle": 0, + "content": "[39] Yao Yao, Zixin Luo, Shiwei Li, Jingyang Zhang, Yufan Ren, Lei Zhou, Tian Fang, and Long Quan. BlendedMVS: A Large-Scale Dataset for Generalized Multi-View Stereo Networks. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1787–1796, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[40] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume Rendering of Neural Implicit Surfaces. In Adv. Neural Inform. Process. Syst., pages 4805-4815. Curran Associates, Inc., 2021. 1, 2, 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.161 + ], + "angle": 0, + "content": "[41] Fang Zhao, Wenhao Wang, Shengcai Liao, and Ling Shao. Learning Anchored Unsigned Distance Functions with Gradient Direction Alignment for Single-view Garment Reconstruction. In Int. Conf. Comput. Vis., pages 12654-12663, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.892, + 0.232 + ], + "angle": 0, + "content": "[42] Junsheng Zhou, Baorui Ma, Yu-Shen Liu, Yi Fang, and Zhizhong Han. Learning Consistency-Aware Unsigned Distance Functions Progressively from Raw Point Clouds. In Adv. Neural Inform. Process. Syst., pages 16481-16494. Curran Associates, Inc., 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.234, + 0.892, + 0.317 + ], + "angle": 0, + "content": "[43] Heming Zhu, Yu Cao, Hang Jin, Weikai Chen, Dong Du, Zhangye Wang, Shuguang Cui, and Xiaoguang Han. Deep Fashion3D: A Dataset and Benchmark for 3D Garment Reconstruction from Single Images. In Eur. Conf. Comput. Vis., pages 512-530, Cham, 2020. Springer International Publishing. 2, 5, 6" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5093" + } + ] +] \ No newline at end of file diff --git a/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_origin.pdf b/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5402b2e7980f20eb25257899294126c1368809f2 --- /dev/null +++ b/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7227de63ce0cfea56439fffcbe428ec5228a39572541bc1bdc66a4cd8f433e97 +size 7094836 diff --git a/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/full.md b/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0cbe9603a223c9021d4e2541f3c7c8e2352c1029 --- /dev/null +++ b/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/full.md @@ -0,0 +1,306 @@ +# 2S-UDF: A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images + +Junkai Deng1,2 Fei Hou1,2* Xuhui Chen1,2 Wencheng Wang1,2 Ying He3 + +$^{1}$ State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences + +$^{2}$ University of Chinese Academy of Sciences + +$^{3}$ School of Computer Science and Engineering, Nanyang Technological University + +{dengjk,houfei,chenxh,whn}@ios.ac.cn yhe@ntu.edu.sg + +# Abstract + +Recently, building on the foundation of neural radiance field, various techniques have emerged to learn unsigned distance fields (UDF) to reconstruct 3D non-watertight models from multi-view images. Yet, a central challenge in UDF-based volume rendering is formulating a proper way to convert unsigned distance values into volume density, ensuring that the resulting weight function remains unbiased and sensitive to occlusions. Falling short on these requirements often results in incorrect topology or large reconstruction errors in resulting models. This paper addresses this challenge by presenting a novel two-stage algorithm, 2S-UDF, for learning a high-quality UDF from multi-view images. Initially, the method applies an easily trainable density function that, while slightly biased and transparent, aids in coarse reconstruction. The subsequent stage then refines the geometry and appearance of the object to achieve a high-quality reconstruction by directly adjusting the weight function used in volume rendering to ensure that it is unbiased and occlusion-aware. Decoupling density and weight in two stages makes our training stable and robust, distinguishing our technique from existing UDF learning approaches. Evaluations on the DeepFashion3D, DTU, and BlendedMVS datasets validate the robustness and effectiveness of our proposed approach. In both quantitative metrics and visual quality, the results indicate our superior performance over other UDF learning techniques in reconstructing 3D non-watertight models from multi-view images. Our code is available at https://bitbucket.org/jkdeng/2sudf/. + +# 1. Introduction + +As the success of neural radiance field (NeRF) [29], numerous volume rendering based 3D modeling methods are pro + +![](images/867bc421a97576f520a3274ecbdd5e9eba4ec439278a4d750a3d66c3827ea2c9.jpg) +GT + +![](images/85e962c770c891a98f6b56af5d215f5073176408910292bed8e6f4a91d2c8ea6.jpg) +Figure 1. We learn a UDF from multiview images for nonwatertight model reconstruction. As illustrated in the cross sections of learned UDFs, our learned UDF approximates to the ground truth. In contrast, the learned UDF of NeuralUDF [25] is choppy leading to significant artifacts, e.g., unexpected pit. The learned UDF of NeUDF [23] is almost closed struggling to generate open surface. + +![](images/dabe67eff785b81ed83779618b589b9cdfe1376a1193b3914728b850dc1466e1.jpg) +Ours + +![](images/a42faf2f7448712bdc6de433b3d8c9e170355c4c40c61ea4bc1244b5a9d9d5db.jpg) + +![](images/a59c2a6b379ff109af501cfcca8bad9f94ff27431c6799491729fa0cd5e3fd08.jpg) +NeuralUDF + +![](images/1f6e183a63714d014a9d2906c74a7296fa1748e9aebee81c5db5fcde94239134.jpg) + +![](images/f2aa3375ed2b1b37854099d1c96a2e52adb2801ce536a43fc9776bdaa551545f.jpg) +NeUDF + +![](images/a8099774a3e49a585d31d7d5f2388893a45d5cebce77d9fc5502bcace31fa767.jpg) + +posed to learn signed distance fields (SDF) for 3D model reconstruction from multi-view images [7, 34, 36, 40]. These approaches map signed distance value to a density function, thereby enabling the use of volume rendering to learn an implicit SDF representation. To calculate pixel colors, they compute the weighted sum of radiances along each light ray. Achieving an accurate surface depiction requires the density function to meet three essential criteria. Firstly, the weights, which are derived from the density function, must reach their maximum value when the distance is zero, ensuring unbiasedness. Secondly, as a ray traverses through the surface, the accumulated density should tend towards infinity, rendering the surface opaque — a property referred to as occlusion-awareness. Finally, the density function should be bounded to prevent numerical issues. The popular SDF approaches, such as NeuS [34] and VolSDF [40], adopt an S-shaped density function that meets all these requirements. + +While SDF-based methods excel at reconstructing watertight models, they have limitations in representing open models. This is due to the intrinsic nature of SDF, which + +differentiates between the interior and exterior of a model, thus failing to accommodate open boundaries. Recent advances have attempted to mitigate this constraint by employing unsigned distance fields (UDF) [23, 25, 27]. Unlike signed distance fields, UDFs have non-negative distance values, making them suitable for representing nonwatertight models. However, learning a UDF from multiview images is a challenging task since the gradients of the UDF are unstable due to directional changes near the zero level-set, making it difficult to train the neural network. Another major challenge lies in formulating a UDF-induced density function that can simultaneously meet the above-mentioned three requirements. Unlike SDFs, UDFs cannot distinguish between the front and back of a surface based on distance values, thus, directly using an S-shaped density function is off the table. Opting for a bell-shaped density function brings its own issues. It is impossible for these integrations to approach infinity, so as to be occlusion-aware, unless the density becomes boundless at zero distance values. These conflicting requirements make UDF learning a non-trivial task, forcing existing methods to sacrifice at least one of these conditions. As shown in Figure 1, the existing methods NeuralUDF [25] and NeUDF [23] result in either choppy or nearly closed UDFs. + +As designing a UDF-induced density function that simultaneously fulfills the three aforementioned conditions remains an unresolved challenge, we propose a novel approach that learns a UDF from multi-view images in two separate stages. In the first stage, we apply an easily trainable but slightly biased and transparent density function for coarse reconstruction. Such a UDF, although being approximate, provides an important clue so that we can determine where to truncate the light rays. This accounts for the occlusion effect, where points behind the surface are not visible and should not contribute to the output color. With truncated light rays, we are able to derive the weights from UDF directly bypassing the density function, to further refine the geometry and appearance in the second stage. Our two-stage learning method, called 2S-UDF, leads to an unbiased and occlusion-aware weight function. Furthermore, by sidestepping density function learning in Stage 2, we effectively bypass the challenges associated with ensuring its boundedness. This strategy enhances the numerical stability of our method. Evaluations on benchmark datasets DeepFashion3D [43] and DTU [19] show that 2S-UDF outperforms existing UDF learning methods in terms of both reconstruction accuracy and visual quality. Additionally, we observe that the training stability of 2S-UDF is notably superior compared to other UDF learning neural networks. + +# 2. Related Work + +3D Reconstruction from Multi-View Images. Surface reconstruction from multi-view images has been a subject of + +study for several decades, and can generally be classified into two categories: voxel-based and point-based methods. Voxel-based methods [3, 8, 20, 21, 33] divide the 3D space into voxels and determine which ones belong to the object. These methods can be computationally expensive and may not be suitable for reconstructing complex surfaces. Point-based methods [13, 31, 38] use structure-from-motion [16] to calibrate the images and generate a dense point cloud using multi-view stereo [12]. Finally, surface reconstruction methods (e.g., [2, 17, 22]) are used to generate a mesh. Since multi-view stereo requires dense correspondences to generate a dense point cloud, which are often difficult to compute, its results often contain various types of artifacts, such as noise, holes, and incomplete structures. + +Neural Volume Rendering. Neural network-based 3D surface reconstruction has received attention in recent years with the emergence of neural rendering [29]. Several methods have been proposed for volume rendering and surface reconstruction using neural networks. VolSDF [40] uses the cumulative distribution function of Laplacian distribution to evaluate the density function from SDF for volume rendering and surface reconstruction. NeuS [34] adopts an unbiased density function to the first-order approximation of SDFs for more accurate reconstruction. SparseNeuS [24] extends NeuS to use fewer images for reconstruction. HFNeuS [36] improves NeuS by proposing a simplified and unbiased density function and using hierarchical multilayer perceptrons (MLPs) for detail reconstruction. GeoNeuS [10] incorporates structure-from-motion to add more constraints. NeuralWarp [7] improves the accuracy by optimizing consistency between warped views of different images. PET-NeuS [37] further improves the accuracy by introducing tri-planes into the SDF prediction module, incorporating with MLP. All these methods learn SDFs, which can only reconstruct watertight models. Recently, Long et al. proposed NeuralUDF [25] for learning UDF for reconstructing open models. It adapts the S-shaped density function for learning SDF to UDFs by introducing an indicator function. However, the indicator function is complicated to learn, and also introduces biases. Liu et al. proposed NeUDF [23] adopting a bell-shaped density. However, to make it occlusion-aware, the density has to be unbounded resulting in an improper integral, which reduces accuracy. Meng et al. proposed NeAT [27] to learn SDF with validity so as to reconstruct open models from SDF. However, it needs foreground masks for data. + +3D Reconstruction from Point Clouds. There has been recent interest in surface representation using signed distance fields (SDFs) and occupation fields. Several methods have been proposed for learning SDFs [4, 26, 30, 32, 35], while occupation fields have been used in methods such as [5, 28]. However, both SDFs and occupation fields can only represent watertight models. To represent non-watertight + +models, some methods are proposed to learn UDF from 3D point clouds [6, 41, 42]. Our proposed method also uses UDF for non-watertight models representation, but we learn it directly from multi-view images, which is a challenging problem. + +# 3. Method + +At the foundation of UDF-based learning approaches is the task of crafting a density function that converts unsigned distance values into volume density, ensuring that the resulting weight function is unbiased and responsive to occlusions. None of the existing UDF learning methods [23, 25] can simultaneously meet the three critical requirements, i.e., ensuring the density function is bounded, and that the weight function remains both unbiased and occlusion aware. + +We tackle these challenges by decoupling the density function and weight function across two stages. In the initial stage (Section 3.1), we utilize an easy-to-train, bell-shaped density function (which is inherently bounded) to learn a coarse UDF. While the resulting weight function is not theoretically unbiased or occlusion-aware, we can make it practically usable by choosing a proper parameter. Moving into the second stage (Section 3.2), we sidestep the density function entirely, focusing instead on refining the UDF by directly adjusting the weight function within the neural volume rendering framework. Specifically, we truncate light rays after they hit the front side of the object and obtain a weight function that is both unbiased and sensitive to occlusions, without the overhang of density function boundedness concerns. Finally, Section 3.3 presents the training details. + +# 3.1. Stage 1: Coarse UDF Learning via a Simple Density Function + +We consider the scenario of a single planar plane $\mathcal{M}$ and a single ray-plane intersection. Inspired by HF-NeuS [36], we propose an easy-to-learn density function $\sigma_{1}$ that maps unsigned distance $f$ to density + +$$ +\sigma_ {1} (f (t)) = \frac {c s e ^ {- s f (t)}}{1 + e ^ {- s f (t)}}, s > 0, c > 0, \tag {1} +$$ + +where $c > 0$ is a fixed, user-specified parameter and $s > 0$ is a learnable parameter controlling the width of the bell-shaped curve. Straightforward calculation shows that the weight function $w_{1}(f(t)) = e^{-\int_{0}^{t}\sigma_{1}(f(u))\mathrm{d}u}\sigma_{1}(f(t))$ is monotonically decreasing behind the plane $\mathcal{M}$ and the maximum value occurs at a point $t^*$ in front of $\mathcal{M}$ with an unsigned distance value of $f(t^{*}) = \frac{1}{s}\ln \frac{c}{|\cos(\theta)|}, (c > |\cos (\theta)|)$ or $f(t^{*}) = 0, (0 < c \leq |\cos (\theta)|)$ , where $\theta$ is the incident angle between the light ray and the surface normal. This means that the weight function $w_{1}$ is not unbiased. Furthermore, the line integral $\int_0^t\sigma_1(f(u))\mathrm{d}u$ does + +not approach infinity when a light ray passes through the front-most layer of the surface, indicating $w_{1}$ is only partially occlusion-aware. + +While the density function $\sigma_{1}$ is not perfect in theory, by selecting an appropriate $c$ , we can practically minimize bias and enhance opacity. Clearly, a smaller $c$ value decreases $f(t^{*})$ , thereby reducing bias. To gauge the effect of $c$ on opacity, we now consider the most extreme scenario where the incident light ray is perpendicular to the planar surface $\mathcal{M}$ , and assume that the intersection point is located at $t = 1$ . In such a situation, the unsigned distance function is $f(t) = 1 - t$ for points in front of $\mathcal{M}$ . Since $\sigma_{1}$ is symmetrical on either side of $\mathcal{M}$ , the surface transparency is the square of the transparency of the front side. The theoretic transparency is, + +$$ +\begin{array}{l} \left(e ^ {- \int_ {0} ^ {1} \hat {\sigma} _ {1} (f (t)) \mathrm {d} t}\right) ^ {2} = \left[ \exp \left(- \int_ {0} ^ {1} \frac {c s e ^ {- s (1 - t)}}{1 + e ^ {- s (1 - t)}} \mathrm {d} t\right) \right] ^ {2} \\ = \left(\frac {1 + e ^ {- s}}{2}\right) ^ {2 c}. \\ \end{array} +$$ + +Therefore, we should choose a relatively large $c$ to reduce transparency. In our implementation, we set the constant $c = 5$ based on the typical value of the learned parameter $s$ which usually ranges between 1000 and 2000. Calculations of bias and translucency show that this setting offers a good balance between occlusion-awareness and unbiasedness in the first stage training. Please refer to the supplementary material for a detailed analysis. + +# 3.2. Stage 2: UDF Refinement through Weight Adjustment + +In this stage, we refine the UDF learned in Stage 1 to improve the quality of geometry and appearance. Unlike Stage 1 and all other UDF-learning methods, inspired by [1], we truncate light rays based on the approximated UDF learned in Stage 1 and learn the weight function $w(t)$ directly instead of the density function $\sigma(t)$ to refine the UDF. + +Ideally, for a single ray-plane intersection, we want a bell-shaped function $w(t)$ that attains its maximum at the points with zero distance values, and satisfies partition of unity. Therefore, we adopt the derivative of the sigmoid function as the weight function [1], defined as + +$$ +w _ {2} (f (t)) = \frac {s e ^ {- s f (t)}}{(1 + e ^ {- s f (t)}) ^ {2}} \cdot | \cos (\theta) |. \tag {2} +$$ + +with $\theta$ being the incident angle between the light ray and the surface normal. + +Intuitively speaking, learning such a weight function $w_{2}$ in Stage 2 of our UDF method is similar to learning an S-shaped density function in SDF-based approaches, such as [36]. As a result, the learning process in Stage 2 is as + +stable as those SDF approaches. Furthermore, it can totally avoid using the visibility indicator function, which is necessary in NeuralUDF [25]. + +Calculation shows that the weight $w_{2}$ attains its maximum at zero distance values, therefore it is unbiased. However, if we naively predict the weight function directly, it will not be occlusion-aware, so we introduce the ray truncation. To make $w_{2}$ occlusion-aware, we can truncate the light rays after they pass through the frontmost layer of the surface, thereby preventing rendering the interior of the object. Note that we do not expect the truncation to be exactly on the frontmost layer of the surface. In fact, as long as it occurs between the frontmost layer and the second layer, we consider the truncation valid. This means that the approximate UDF learned in the first stage, which can capture the main topological features (such as boundaries) and provide a fairly good representation of the target object, is sufficient for us to determine where to cut off the light rays. + +In our implementation, we adopt a simple strategy to determine the truncation point for each light ray. Specifically, the truncation point of ray $\mathbf{r}$ is the first sample point along $\mathbf{r}$ such that + +- The unsigned distance value at the point is a local maxima. To avoid distance vibration interference, it should be the maximum in a window centered at the point. And +- The accumulated weight up to this point is greater than $\delta_{\text{thres}}$ . + +The accumulated weight threshold $\delta_{\text{thres}}$ is intuitively set to 0.5. This choice is based on the assumption that if the Stage 1 training is performed well enough, the accumulated weights at each sample point along the ray would be either 0 (for not reaching a surface) or 1 (for having intersected with a surface). Hence, we intuitively select 0.5 for $\delta_{\text{thres}}$ because it is the midpoint between 0 and 1. With the cutoff mechanism, only the first ray-surface intersection contributes to the color of the ray, effectively achieving occlusion-awareness. Given these properties, we conclude that, + +Theorem 1 The weight $w_{2}$ with light cutting off is unbiased and occlusion-aware. + +Figure 2 is an intuitive illustration of our Stage 2 weight learning and truncation strategy. The UDF maxima point $A$ in front of the intersection surface would not affect the cutting point selection as the accumulated weight is below $\delta_{\text{thres}}$ (0.5). The local maxima $B$ due to UDF oscillation also would not affect it since it's not the maximum in a large enough neighborhood. The light is cut at maxima point $C$ , and thus the weight of point $D$ is zero without contributions to the rendering. As illustrated in Figure 2, the cutting process is robust against UDF oscillation, open boundaries, and local maxima in front of the intersection surface. + +![](images/75a6bd4f9512b87da68e33f3321462eac43b0aa68cbe0f9128dac677398021a8.jpg) +Figure 2. An intuitive illustration of our ray cutting algorithm, best viewed in color and magnified. A ray shoots from left to right, approaching the boundary of the first surface, and going through another two surfaces (gray boxes). The violet solid line represents the UDF values along the ray; the orange dashed line represents the corresponding color weight. + +# 3.3. Training + +Differentiable UDFs. NeuS uses an MLP network to learn the signed distance function $f$ , which is a differentiable function. In contrast, UDF is not differentiable at the zero level set, making the network difficult to learn the values and gradients of the UDF close to the zero level set. + +Another crucial requirement is to ensure non-negative values for the computed distances, which seems like a trivial task as one may simply apply absolute value or normalization such as ReLU [11] to the MLP output. However, applying the absolute value to the distance is not viable due to its non-differentiability at zero. Similarly, normalizing the output value using ReLU is not feasible as it is also non-differentiable at zero and its gradient vanishes for negative inputs. This can be particularly problematic for learning UDFs, since when the MLP returns a negative distance value, the ReLU gradient vanishes, hindering the update of the distance to a positive value in the subsequent iterations. + +We add a softplus [9] function after the output layer of the MLP [23]. The softplus function is a smooth and differentiable approximation of the ReLU function, which is defined as $\mathrm{softplus}(x) = \frac{1}{\beta}\ln (1 + e^{\beta x})$ . Softplus has the same shape as ReLU, but it is continuous and differentiable at every point and its gradients do not vanish anywhere. Using the softplus function allows us to ensure that the output of the MLP is non-negative and differentiable, making it suitable for learning the UDF. Similar to NeUDF [23], we set $\beta = 100$ in our experiments. + +Loss functions. Following NeuralUDF [25], we adopt an iso-surface regularizer to penalize the UDF values of the non-surface points from being zero, therefore encouraging smooth and clean UDFs. The regularization loss is defined as [25] + +$$ +\mathcal {L} _ {r e g} = \frac {1}{M N} \sum_ {i, k} \exp (- \tau \cdot f (t _ {i, k})), +$$ + +where $\tau$ is a constant scalar that scales the learned UDF + +values, $M$ is the total number of sampled rays per training iteration, and $N$ is the number of sampled points on a single ray. $\tau$ is set to 5.0 in the first stage and 50.0 in the second stage. + +The value of $s$ , which is learnable in our method, significantly affects the quality of the reconstruction. When $s$ is small, it introduces a larger bias and leads to a more blurred output. We observe that $s$ typically converges to a relatively large value between 1000 and 2000, leading to visually pleasing results. However, in rare cases when $s$ stops increasing during training, we apply a penalty to force it to increase. The penalty is defined as follows + +$$ +\mathcal {L} _ {s} = \frac {1}{M} \sum_ {i, k} \frac {1}{s _ {i , k}}, +$$ + +where $M$ is the number of rays during a training epoch. This term $\mathcal{L}_s$ aggregates the reciprocals of all $s$ values used for the point $t_{i,k}$ on ray $r_i$ . Intuitively speaking, it encourages a larger $s$ during the early stage of training. In our implementation, we make this term optional since $s$ generally increases with a decreasing rate during training, and the penalty term is only necessary in rare cases when $s$ stops at a relatively low value. + +As in other SDF- and UDF-based methods [25, 34, 36], we adopt color loss and Eikonal loss in our approach. Specifically, the color loss $\mathcal{L}_{color}$ is the $L_{1}$ loss between the predicted color and the ground truth color of a single pixel as used in [34]. The Eikonal loss $\mathcal{L}_{eik}$ is used to regularize the learned distance field to have a unit gradient [14]. Users may also choose to adopt object masks for supervision as introduced in other SDF- and UDF-based methods [25, 34]. Putting it all together, we define the combined loss function as a weighted sum, + +$$ +\mathcal {L} = \mathcal {L} _ {\text {c o l o r}} + \lambda_ {1} \mathcal {L} _ {\text {e i k}} + \lambda_ {2} \mathcal {L} _ {\text {r e g}} + \lambda_ {3} \mathcal {L} _ {s} (+ \lambda_ {m} \mathcal {L} _ {\text {m a s k}}), +$$ + +where $\lambda_1, \lambda_2, \lambda_3$ and the optional $\lambda_m$ are hyperparameters that control the weight of each loss term. + +# 4. Experiments + +Datasets. To evaluate our method, we use three datasets: DeepFashion3D [43], DTU [19] and BlendedMVS [39]. The DeepFashion3D dataset consists of clothing models, which are open models with boundaries. As only 3D points are available, we render 72 images of resolution $1024 \times 1024$ with a white background from different viewpoints for each model. In addition to DeepFashion3D images rendered by us most of which are texture-less, we also take the image data from NeuralUDF [25] most of which are texture-rich into our experiments. We call them DF3D#Ours and DF3D#NeuralUDF, respectively. The DTU dataset consists of models captured in a studio, all of which are watertight. We use this dataset to validate that our method also works + +well for watertight models. These datasets have been widely used in previous works such as [34, 36, 40]. In our experiments, open models such as in DeepFashion3D are trained without mask supervision; DTU is trained with mask supervision. + +Baselines. To validate the effectiveness of our method, we compare it with state-of-the-art UDF learning methods: NeuralUDF [25], NeUDF [23] and NeAT [27]; and SDF learning methods: VolSDF [40] and NeuS [34]. + +# 4.1. Comparisons on Open Models + +
Method#1#2#3#4#5#6#7#8#9Mean
NeuS6.6913.5010.3215.018.9912.9212.949.939.4911.09
VolSDF6.369.4411.8716.0310.7814.9115.0611.348.9611.64
NeAT10.5413.897.3013.1213.1812.448.2210.3011.3311.15
NeuralUDF6.0711.587.6810.9611.169.766.986.136.418.53
NeUDF4.398.294.9419.567.528.183.813.815.767.36
Ours4.555.774.277.436.594.772.883.215.735.02
MethodLS-C0SS-D0LS-D0NS-D1LS-C1Skirt1SS-C0Mean
NeuS3.184.825.712.213.602.445.133.87
VolSDF5.924.795.964.368.737.748.846.62
NeAT3.064.335.923.528.843.914.304.84
NeuralUDF1.922.054.111.502.472.162.152.34
NeUDF1.952.93N.A.1.482.662.741.772.26
Ours1.921.972.461.472.141.841.911.96
+ +Table 1. Chamfer distances $(\times 10^{-3})$ on DF3D#Ours (top) and DF3D#NeuralUDF (bottom). NeAT requires mask supervision and others do not need. + +We evaluate our method and compare it with baselines using the garments from DeepFashion3D [43], where the models have multiple open boundaries. VolSDF and NeuS always close the boundaries since they learn SDFs. + +NeuralUDF, NeUDF and NeAT are designed to learn non-watertight models. NeAT learns SDFs for open models, and requires mask supervision to produce reasonable results, but other methods do not require mask supervision for DeepFashion3D. The released codebase of NeuralUDF indicates that it also has a two-stage training process. We evaluate the results of NeuralUDF at the end of both stages, and present whichever is better. + +In contrast, NeuralUDF, NeUDF and our method learn UDFs, which can generate open models. Table 1 shows the Chamfer distances of the results on DeepFashion3D. Some of the Chamfer distances of the compared methods are large because the open holes are closed or the model is over-smoothed, resulting in significant errors. + +As demonstrated in Figure 3, we test various types of garments, some of which have rich textures, while others are nearly a single color. Learning UDFs for textureless models is more challenging since various regions of a model are ambiguous without clear color differences. However, our 2S-UDF generates satisfactory results even without masks. Though with mask supervision, the results of + +![](images/6e75436779e812106317753dd55b5af603b8f9b6bdd3d73e9de62d92abfb5d60.jpg) +Figure 3. Visual comparisons on selected models of the DeepFashion3D [43] dataset. The surfaces produced by NeuS and VolSDF are closed watertight models, thereby post-processing is required to remove the unnecessary parts. NeAT can produce open models by learning an SDF and predicting which surfaces in the extracted meshes should be removed, but it needs mask for supervision. NeuralUDF can generate open surfaces, but struggles with textureless inputs, leading to double-layered regions and large reconstruction errors. NeUDF generally performs well, but its training is unstable and may stumble on less distinguished, darker models like LS-D0. In contrast, our 2S-UDF consistently delivers effective reconstructions of non-watertight models. See the supplementary material for additional results. + +![](images/04d8dfc5034a66bbcc23c3a95227ad5925a06feb7ff94eb62433c342d2e373e8.jpg) +Figure 4. Visualization of the learned UDFs on cross sections. Compared with the ground truth, our method can learn a UDFs that most closely resemble the ground truth, among our method, NeuralUDF, and NeUDF. NeAT is omitted in this visualization, because it learns SDFs in lieu of UDFs. Note that for LS-D0, NeUDF completely collapses without a reasonable UDF learned. + +NeAT [27] are over-smoothed, missing details, resulting in large Chamfer distance errors. NeuralUDF [25] is unable to properly reconstruct textureless models on most models, possibly due to their complex density function which is difficult to converge. Some of the NeUDF [23] models + +become watertight. To analyze the reasons, we illustrate these UDFs cross sections in Figure 4. To compute the ground truth UDFs, we sample 30,000 points from every input point model and compute the distances to the nearest sample point for every point in a 3D grid of resolution $512 \times 512 \times 512$ . All other UDFs are extracted by querying the distance neural network in a 3D grid of the same resolution. Our learned UDFs resemble the ground truth with little difference. While, the UDFs of NeuralUDF deviate from the ground truth significantly explaining its difficulty to converge. The UDFs of NeUDF are better, but the distances approach zero around open holes. As a result, it is challenging and tricky to generate non-watertight models and some of them are even closed. NeAT learns SDF, so we do not show their distance fields. + +As illustrated in Figure 5, perhaps due to the absolute of an MLP for UDF representation, NeuralUDF possibly generates two layers of zero level-sets on both sides of the surface resulting in double-layered regions after Stage 1 learning. However, in its Stage 2 refinement, the surface is crushed into pieces and the Chamfer distance errors surge suddenly. + +In Figure 6, we conduct additional experiments on some open model dataset provided by NeUDF [23]. For the rack model, the thin structures reconstructed by NeuralUDF [25] and NeUDF [23] seem eroded, but ours don't. The thin structures reconstructed by NeAT [27] is the closest to the reference image, but the surface is dented inward with visible artifacts due to imperfect SDF validity learning. + +![](images/e250f6fc48e46af907d9111a4bdbb8adc50d42105537c6b4c5b5759c2c597d83.jpg) +Figure 5. Plots of the Chamfer distance throughout the training process. Our method consistently reduces CD across both stages. In contrast, NeuralUDF, which also adopts a two-stage learning strategy, exhibits instability and yields a fragmented output following the second stage. The first-stage output of NeuralUDF, however, contains double-layered regions as marked above. In this figure, both methods start their stage 2 training at 250k iterations. + +![](images/b08c5db4015974cecf2dd55bf7ef7ac0fa66b41467f17aa49dc19ac8215b6cb7.jpg) +Stage 1 NeuralUDF + +![](images/5722455419a86704be598c7e0c84a72095e1ecc7e2c9d1c1b6116450a8c1e8bc.jpg) +Stage 1 2S-UDF + +
Method3755656997105106114118122Mean
NeuralUDF1.180.440.660.670.940.950.570.370.560.550.69
NeAT1.180.470.820.841.090.750.760.380.560.550.74
NeUDF0.900.650.730.971.070.630.940.590.720.620.78
Ours0.890.550.680.881.150.700.740.410.610.510.71
+ +Table 2. Chamfer distances on DTU dataset. + +The plant model does not have an object mask, making NeAT [27] impractical for training. NeuralUDF [25] completely fails to reconstruct a reasonable surface. Between our method and NeUDF [23] which can reconstruct a sensible model, the flower pot region marked in red is missing in NeUDF but not in ours. These show our method's ability to reconstruct non-watertight models more robustly compared to other methods. + +![](images/43c4ebd9f38166d8e2c76d98ad88d6c9be7b273fb8040d09c8e0c49ee1277594.jpg) +Figure 6. Qualitative comparisons with NeAT [27], NeuralUDF [25] and NeUDF [23] on some example data released by NeUDF [23]. Note that NeAT cannot reconstruct "plant" dataset because the ground truth mask for "plant" is unavailable. + +# 4.2. Comparisons on Watertight Models + +Other methods can also be used as the first stage of our 2S- UDF. We use NeUDF for the first stage training on the DTU dataset [19]. As detailed in Table 2, we compare the Chamfer distances of the reconstruction results with NeuralUDF, + +![](images/625df05cda1eda0a5e170c400280861942e31769d997e9d5fe339a30328e2fd3.jpg) + +![](images/f475c8258dde381aa4725b94c3d0d8d675c4cf3f4e724f95df61deb96e55b5b8.jpg) + +![](images/0c5a010e174d1de796f8b345f9f3dbc0f36961289e5f10773f5eb798b2dda984.jpg) + +![](images/5f6ce4e47ba03086b925e6e9a32a8198afad462ab494e647eede489bfa366ce8.jpg) + +![](images/e1bbcb01c969473cdd14228cef52d5c8678e3e921f30ed07c5c0b5a1d379b533.jpg) +Figure 7. Qualitative comparisons with NeAT, NeuralUDF and NeUDF on the DTU [19] dataset and close-up comparisons against NeUDF. Our method can reconstruct surfaces closer to the ground truth point clouds in various places such as the marked region, generally improving the reconstruction accuracy of NeUDF by around $10\%$ , on a par with NeuralUDF and NeAT at the bottom two rows. + +NeAT and NeUDF without our second-stage training. SDFs generally excel at learning watertight models, and it is worth pointing out that NeuralUDF takes the absolute value of the output of MLP as the UDF value of a given point. Therefore for closed models, they can easily learn an SDF and take its absolute value to produce a UDF. NeAT, on the other hand, explicitly learns an SDF. NeUDF and our method truly learn UDFs. While UDF learning is much more complicated than SDF learning because the UDF gradient nearby 0 is blurry and the gradient is not available at 0, our method still improves the reconstruction quality of NeUDF by around $10\%$ as shown in Figure 7. We further provide a close-up view of specific parts of the models for detailed comparisons in Figure 7. These local callouts exhibit the ground truth points located on both sides of our surfaces, whereas most of the points are only on one side of the surfaces of NeUDF. These illustrate our reconstructed surfaces are closer to the ground truth points and thus improving the resulting quality over NeUDF, on a par with NeuralUDF and NeAT. + +# 4.3. Ablation Studies + +In this section, we present main ablation studies. We refer interested readers to the supplementary material for additional ablation studies. + +Effect of the two-stage training. We conduct an ablation + +
Method#1#7#8LS-D0
S1 & S24.552.883.212.46
S17.222.463.386.04
S25.754.005.963.65
MethodNS-D1LS-C1DTU 114DTU 122
S1 & S21.472.140.410.51
S11.466.230.590.62
S21.642.980.630.60
+ +Table 3. Chamfer distances of models learned by both Stage 1 and 2 (S1 & S2), only Stage 1 (S1) and only Stage 2 (S2) on selected datasets. Models learned by two stages yield similar Chamfer distances, but when trained with only Stage 1 or Stage 2, the Chamfer distances generally become significantly higher. + +study on the effect of the two-stage learning. We compare the Chamfer distances among both two stages, only Stage 1 and only Stage 2 training, shown in Table 3. Our results show that two-stage training improves the Chamfer distance (lower is better) compared to training with only Stage 1 or 2, under most circumstances. + +It should be noted that training by the second stage from scratch is also capable of generating a generally reasonable result. However, the Chamfer distances, as shown in Table 3, indicate that its learning ability is limited. Therefore, the second refinement learning stage should cooperate with the first coarse learning stage to generate the best results. + +Choice of accumulated weight threshold $\delta_{thres}$ . In Stage 2, being a ray truncate point requires the accumulated weight up until that point to be greater than $\delta_{thres}$ , where we intuitively select $\delta_{thres} = 0.5$ . Figure 8 shows the reconstruction results for other choices of $\delta_{thres}$ , namely 0.3 and 0.7, respectively. We observe that all threshold choices successfully reconstruct the model. Setting the threshold $\delta_{thres}$ up to 0.7 produces visually similar results. Setting the threshold $\delta_{thres}$ down to 0.3 also works fine generally despite that it may introduce more holes to the reconstructed meshes. We deduce that setting a lower threshold increases the possibility that a ray may be truncated prematurely, leading to less desirable results. Nevertheless, we still have a considerable range of $\delta_{thres}$ from 0.3 to 0.7 without major result regression, indicating that our Stage 2 training exhibits robustness against $\delta_{thres}$ . + +# 4.4. Limitations + +Since the light is cut off after going through a layer of surface, our method relinquishes the ability to model planes with transparency. Occasionally, due to learning uncertainty, the Chamfer distance may increase slightly in the second stage, but the difference is quite small without visual impact. Overall, the two-stage learning improves the quality significantly. For watertight models, SDF learning is more suitable than UDF learning, since UDF learning is + +![](images/769c7f1b165581d56d8ee2b70ab1418a2cfb555702c6ff80bad0ce9640a5c48a.jpg) +Figure 8. Qualitative comparisons on different choices of accumulated weight $\delta_{\text{thres}}$ . Setting a higher threshold works well few little visual differences; Setting a lower threshold generally works fine, but may introduce more holes in reconstructed meshes. + +more complicated than SDF learning. We still advise using SDF learning, e.g., NeuS [34], HF-NeuS [36] or PET-NeuS [37], for watertight model reconstruction. Also, the mesh extraction of MeshUDF [15] tends to generate holes and "staircase" artifacts affecting the mesh reconstruction quality. Adopting a more robust extraction method, e.g., DoubleCoverUDF [18], could alleviate the problem, but we use MeshUDF here for all methods for a fair comparison. + +# 5. Conclusions + +Overall, 2S-UDF offers a promising approach to the problem of reconstructing both open and watertight models from multi-view images. Its advantages over existing methods lie in the use of a simple and more accurate density function, and a smooth differentiable UDF representation, so that the learned UDF approximates the ground truth as much as possible. A two-stage learning strategy further eliminates bias and improves UDF accuracy. Results from our experiments on the DeepFashion3D, DTU and BlendedMVS datasets demonstrate the effectiveness of our method, particularly in learning smooth and stably open UDFs revealing the robustness of 2S-UDF. Moreover, our method does not rely on object masks for open model reconstruction, making it more practical in real-world applications. + +# Acknowledgments + +This project was supported in part by the National Natural Science Foundation of China under Grants (61872347, 62072446), in part by the National Key R&D Program of China under Grant 2023YFB3002901, in part by the Basic Research Project of ISCAS under Grant ISCAS-JCMS-202303 and in part by the Ministry of Education, Singapore, under its Academic Research Fund Grants (MOET2EP20220-0005, RG20/20 & RT19/22). + +# References + +[1] Dejan Azinović, Ricardo Martin-Brualla, Dan B Goldman, Matthias Nießner, and Justus Thies. Neural RGB-D Surface Reconstruction. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6280-6291, 2022. 3 +[2] Fausto Bernardini, Joshua Mittleman, Holly Rushmeier, Cláudio Silva, and Gabriel Taubin. The ball-pivoting algorithm for surface reconstruction. IEEE Trans. Vis. Comput. Graph., 5(4):349-359, 1999. 2 +[3] A. Broadhurst, T.W. Drummond, and R. Cipolla. A probabilistic framework for space carving. In Int. Conf. Comput. Vis., pages 388-393 vol.1, 2001. 2 +[4] Rohan Chabra, Jan E. Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep Local Shapes: Learning Local SDF Priors for Detailed 3D Reconstruction. In Eur. Conf. Comput. Vis., pages 608-625, Cham, 2020. Springer International Publishing. 2 +[5] Julian Chibane, Thiemo Alldieck, and Gerard Pons-Moll. Implicit Functions in Feature Space for 3D Shape Reconstruction and Completion. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6968-6979, 2020. 2 +[6] Julian Chibane, Mohamad Aymen mir, and Gerard Pons-Moll. Neural Unsigned Distance Fields for Implicit Function Learning. In Adv. Neural Inform. Process. Syst., pages 21638-21652. Curran Associates, Inc., 2020. 3 +[7] François Darmon, Bénédicte Bascle, Jean-Clement Devaux, Pascal Monasse, and Mathieu Aubry. Improving neural implicit surfaces geometry with patch warping. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6250-6259, 2022. 1, 2 +[8] J. De Bonet and P. Viola. Roxels: responsibility weighted 3D volume reconstruction. In Int. Conf. Comput. Vis., pages 418-425 vol.1, 1999. 2 +[9] Charles Dugas, Yoshua Bengio, François Bélisle, Claude Nadeau, and René Garcia. Incorporating Second-Order Functional Knowledge for Better Option Pricing. In Adv. Neural Inform. Process. Syst. MIT Press, 2000. 4 +[10] Qiancheng Fu, Qingshan Xu, Yew Soon Ong, and Wenbing Tao. Geo-Neus: Geometry-Consistent Neural Implicit Surfaces Learning for Multi-view Reconstruction. In Adv. Neural Inform. Process. Syst., pages 3403–3416. Curran Associates, Inc., 2022. 2 +[11] Kunihiko Fukushima. Cognitron: a self-organizing multilayered neural network. Biological Cybernetics, 20(3-4):121-136, 1975. 4 +[12] Yasutaka Furukawa and Carlos Hernández. Multi-View Stereo: A Tutorial. Found. Trends. Comput. Graph. Vis., 9 (1-2):1-148, 2015. 2 +[13] Silvano Galliani, Katrin Lasinger, and Konrad Schindler. Massively Parallel Multiview Stereopsis by Surface Normal Diffusion. In Int. Conf. Comput. Vis., pages 873-881, 2015. 2 +[14] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit Geometric Regularization for Learning Shapes. In Proceedings of the 37th International Conference on Machine Learning, pages 3789-3799. PMLR, 2020. 5 + +[15] Benoit Guillard, Federico Stella, and Pascal Fua. MeshUDF: Fast and Differentiable Meshing of Unsigned Distance Field Networks. In Eur. Conf. Comput. Vis., pages 576-592, Cham, 2022. Springer Nature Switzerland. 8 +[16] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, 2 edition, 2004. 2 +[17] Fei Hou, Chiyu Wang, Wencheng Wang, Hong Qin, Chen Qian, and Ying He. Iterative poisson surface reconstruction (iPSR) for unoriented points. ACM Trans. Graph., 41(4), 2022. 2 +[18] Fei Hou, Xuhui Chen, Wencheng Wang, Hong Qin, and Ying He. Robust Zero Level-Set Extraction from Unsigned Distance Fields Based on Double Covering. ACM Trans. Graph., 42(6), 2023. 8 +[19] Rasmus Jensen, Anders Dahl, George Vogiatzis, Engil Tola, and Henrik Aanæs. Large Scale Multi-view Stereopsis Evaluation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 406-413, 2014. 2, 5, 7 +[20] Mengqi Ji, Jinzhi Zhang, Qionghai Dai, and Lu Fang. SurfaceNet+: An End-to-end 3D Neural Network for Very Sparse Multi-View Stereopsis. IEEE Trans. Pattern Anal. Mach. Intell., 43(11):4078-4093, 2021. 2 +[21] Abhishek Kar, Christian Hane, and Jitendra Malik. Learning a Multi-View Stereo Machine. In Adv. Neural Inform. Process. Syst. Curran Associates, Inc., 2017. 2 +[22] Michael Kazhdan and Hugues Hoppe. Screenedoisson surface reconstruction. ACM Trans. Graph., 32(3), 2013. 2 +[23] Yu-Tao Liu, Li Wang, Jie Yang, Weikai Chen, Xiaoxu Meng, Bo Yang, and Lin Gao. NeUDF: Leaning Neural Unsigned Distance Fields with Volume Rendering. In IEEE Conf. Comput. Vis. Pattern Recog., pages 237-247, 2023. 1, 2, 3, 4, 5, 6, 7 +[24] Xiaoxiao Long, Cheng Lin, Peng Wang, Taku Komura, and Wenping Wang. SparseNeuS: Fast Generalizable Neural Surface Reconstruction from Sparse Views. In Eur. Conf. Comput. Vis., pages 210-227, Cham, 2022. Springer Nature Switzerland. 2 +[25] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Yuan Liu, Peng Wang, Christian Theobalt, Taku Komura, and Wenping Wang. NeuralUDF: Learning Unsigned Distance Fields for Multi-View Reconstruction of Surfaces with Arbitrary Topologies. In IEEE Conf. Comput. Vis. Pattern Recog., pages 20834–20843, 2023. 1, 2, 3, 4, 5, 6, 7 +[26] Baorui Ma, Zhizhong Han, Yu-Shen Liu, and Matthias Zwicker. Neural-Pull: Learning Signed Distance Function from Point clouds by Learning to Pull Space onto Surface. In Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, pages 7246-7257. PMLR, 2021. 2 +[27] Xiaoxu Meng, Weikai Chen, and Bo Yang. NeAT: Learning Neural Implicit Surfaces with Arbitrary Topologies from Multi-View Images. In IEEE Conf. Comput. Vis. Pattern Recog., pages 248–258, 2023. 2, 5, 6, 7 +[28] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy Networks: Learning 3D Reconstruction in Function Space. In + +IEEE Conf. Comput. Vis. Pattern Recog., pages 4455-4465, 2019. 2 +[29] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In Eur. Conf. Comput. Vis., pages 405-421, Cham, 2020. Springer International Publishing. 1, 2 +[30] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning Continuous Signed Distance Functions for Shape Representation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 165-174, 2019. 2 +[31] Johannes L. Schonberger, Enliang Zheng, Jan-Michael Frahm, and Marc Pollefeys. Pixelwise View Selection for Unstructured Multi-View Stereo. In Eur. Conf. Comput. Vis., pages 501–518, Cham, 2016. Springer International Publishing. 2 +[32] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit Neural Representations with Periodic Activation Functions. In Adv. Neural Inform. Process. Syst., pages 7462-7473. Curran Associates, Inc., 2020. 2 +[33] Jiaming Sun, Yiming Xie, Linghao Chen, Xiaowei Zhou, and Hujun Bao. NeuralRecon: Real-Time Coherent 3D Reconstruction from Monocular Video. In IEEE Conf. Comput. Vis. Pattern Recog., pages 15593-15602, 2021. 2 +[34] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. NeuS: Learning Neural Implicit Surfaces by Volume Rendering for Multi-view Reconstruction. In Adv. Neural Inform. Process. Syst., pages 27171-27183. Curran Associates, Inc., 2021. 1, 2, 5, 8 +[35] Yifan Wang, Lukas Rahmann, and Olga Sorkine-Hornung. Geometry-Consistent Neural Shape Representation with Implicit Displacement Fields. In Int. Conf. Learn. Represent. OpenReview.net, 2022. 2 +[36] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. HF-NeuS: Improved Surface Reconstruction Using High-Frequency Details. In Adv. Neural Inform. Process. Syst., pages 1966–1978. Curran Associates, Inc., 2022. 1, 2, 3, 5, 8 +[37] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. PETNeuS: Positional Encoding Tri-Planes for Neural Surfaces. In IEEE Conf. Comput. Vis. Pattern Recog., pages 12598–12607, 2023. 2, 8 +[38] Yao Yao, Zixin Luo, Shiwei Li, Tianwei Shen, Tian Fang, and Long Quan. Recurrent MVSNet for High-Resolution Multi-View Stereo Depth Inference. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5520–5529, 2019. 2 +[39] Yao Yao, Zixin Luo, Shiwei Li, Jingyang Zhang, Yufan Ren, Lei Zhou, Tian Fang, and Long Quan. BlendedMVS: A Large-Scale Dataset for Generalized Multi-View Stereo Networks. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1787–1796, 2020. 5 +[40] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume Rendering of Neural Implicit Surfaces. In Adv. Neural Inform. Process. Syst., pages 4805-4815. Curran Associates, Inc., 2021. 1, 2, 5 + +[41] Fang Zhao, Wenhao Wang, Shengcai Liao, and Ling Shao. Learning Anchored Unsigned Distance Functions with Gradient Direction Alignment for Single-view Garment Reconstruction. In Int. Conf. Comput. Vis., pages 12654-12663, 2021. 3 +[42] Junsheng Zhou, Baorui Ma, Yu-Shen Liu, Yi Fang, and Zhizhong Han. Learning Consistency-Aware Unsigned Distance Functions Progressively from Raw Point Clouds. In Adv. Neural Inform. Process. Syst., pages 16481-16494. Curran Associates, Inc., 2022. 3 +[43] Heming Zhu, Yu Cao, Hang Jin, Weikai Chen, Dong Du, Zhangye Wang, Shuguang Cui, and Xiaoguang Han. Deep Fashion3D: A Dataset and Benchmark for 3D Garment Reconstruction from Single Images. In Eur. Conf. Comput. Vis., pages 512-530, Cham, 2020. Springer International Publishing. 2, 5, 6 \ No newline at end of file diff --git a/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/images.zip b/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..1505787ec58cfc25f45d6e7269b53163955f974d --- /dev/null +++ b/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9260a8943b028acf3cbfb20c98ae84f8c80e22660f53caade91d782879c6f4b1 +size 466709 diff --git a/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/layout.json b/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..724d587240edc706e78c7242cc8bd85efd376d63 --- /dev/null +++ b/2024/2S-UDF_ A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images/layout.json @@ -0,0 +1,8483 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 53, + 102, + 539, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 102, + 539, + 140 + ], + "spans": [ + { + "bbox": [ + 53, + 102, + 539, + 140 + ], + "type": "text", + "content": "2S-UDF: A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 94, + 160, + 498, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 160, + 498, + 175 + ], + "spans": [ + { + "bbox": [ + 94, + 160, + 498, + 175 + ], + "type": "text", + "content": "Junkai Deng1,2 Fei Hou1,2* Xuhui Chen1,2 Wencheng Wang1,2 Ying He3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 63, + 175, + 531, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 175, + 531, + 190 + ], + "spans": [ + { + "bbox": [ + 63, + 175, + 531, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 63, + 175, + 531, + 190 + ], + "type": "text", + "content": "State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 187, + 189, + 405, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 189, + 405, + 203 + ], + "spans": [ + { + "bbox": [ + 187, + 189, + 405, + 203 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 187, + 189, + 405, + 203 + ], + "type": "text", + "content": "University of Chinese Academy of Sciences" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 96, + 203, + 495, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 203, + 495, + 217 + ], + "spans": [ + { + "bbox": [ + 96, + 203, + 495, + 217 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 96, + 203, + 495, + 217 + ], + "type": "text", + "content": "School of Computer Science and Engineering, Nanyang Technological University" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 220, + 440, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 220, + 440, + 232 + ], + "spans": [ + { + "bbox": [ + 138, + 220, + 440, + 232 + ], + "type": "text", + "content": "{dengjk,houfei,chenxh,whn}@ios.ac.cn yhe@ntu.edu.sg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 143, + 258, + 191, + 271 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 258, + 191, + 271 + ], + "spans": [ + { + "bbox": [ + 143, + 258, + 191, + 271 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 284, + 290, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 284, + 290, + 632 + ], + "spans": [ + { + "bbox": [ + 45, + 284, + 290, + 632 + ], + "type": "text", + "content": "Recently, building on the foundation of neural radiance field, various techniques have emerged to learn unsigned distance fields (UDF) to reconstruct 3D non-watertight models from multi-view images. Yet, a central challenge in UDF-based volume rendering is formulating a proper way to convert unsigned distance values into volume density, ensuring that the resulting weight function remains unbiased and sensitive to occlusions. Falling short on these requirements often results in incorrect topology or large reconstruction errors in resulting models. This paper addresses this challenge by presenting a novel two-stage algorithm, 2S-UDF, for learning a high-quality UDF from multi-view images. Initially, the method applies an easily trainable density function that, while slightly biased and transparent, aids in coarse reconstruction. The subsequent stage then refines the geometry and appearance of the object to achieve a high-quality reconstruction by directly adjusting the weight function used in volume rendering to ensure that it is unbiased and occlusion-aware. Decoupling density and weight in two stages makes our training stable and robust, distinguishing our technique from existing UDF learning approaches. Evaluations on the DeepFashion3D, DTU, and BlendedMVS datasets validate the robustness and effectiveness of our proposed approach. In both quantitative metrics and visual quality, the results indicate our superior performance over other UDF learning techniques in reconstructing 3D non-watertight models from multi-view images. Our code is available at https://bitbucket.org/jkdeng/2sudf/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 652, + 128, + 664 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 652, + 128, + 664 + ], + "spans": [ + { + "bbox": [ + 47, + 652, + 128, + 664 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 672, + 287, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 672, + 287, + 696 + ], + "spans": [ + { + "bbox": [ + 47, + 672, + 287, + 696 + ], + "type": "text", + "content": "As the success of neural radiance field (NeRF) [29], numerous volume rendering based 3D modeling methods are pro" + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 320, + 272, + 364, + 321 + ], + "blocks": [ + { + "bbox": [ + 334, + 258, + 349, + 268 + ], + "lines": [ + { + "bbox": [ + 334, + 258, + 349, + 268 + ], + "spans": [ + { + "bbox": [ + 334, + 258, + 349, + 268 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 320, + 272, + 364, + 321 + ], + "lines": [ + { + "bbox": [ + 320, + 272, + 364, + 321 + ], + "spans": [ + { + "bbox": [ + 320, + 272, + 364, + 321 + ], + "type": "image", + "image_path": "867bc421a97576f520a3274ecbdd5e9eba4ec439278a4d750a3d66c3827ea2c9.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 320, + 325, + 364, + 360 + ], + "blocks": [ + { + "bbox": [ + 320, + 325, + 364, + 360 + ], + "lines": [ + { + "bbox": [ + 320, + 325, + 364, + 360 + ], + "spans": [ + { + "bbox": [ + 320, + 325, + 364, + 360 + ], + "type": "image", + "image_path": "85e962c770c891a98f6b56af5d215f5073176408910292bed8e6f4a91d2c8ea6.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 373, + 545, + 451 + ], + "lines": [ + { + "bbox": [ + 305, + 373, + 545, + 451 + ], + "spans": [ + { + "bbox": [ + 305, + 373, + 545, + 451 + ], + "type": "text", + "content": "Figure 1. We learn a UDF from multiview images for nonwatertight model reconstruction. As illustrated in the cross sections of learned UDFs, our learned UDF approximates to the ground truth. In contrast, the learned UDF of NeuralUDF [25] is choppy leading to significant artifacts, e.g., unexpected pit. The learned UDF of NeUDF [23] is almost closed struggling to generate open surface." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 376, + 271, + 419, + 321 + ], + "blocks": [ + { + "bbox": [ + 386, + 258, + 407, + 269 + ], + "lines": [ + { + "bbox": [ + 386, + 258, + 407, + 269 + ], + "spans": [ + { + "bbox": [ + 386, + 258, + 407, + 269 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 376, + 271, + 419, + 321 + ], + "lines": [ + { + "bbox": [ + 376, + 271, + 419, + 321 + ], + "spans": [ + { + "bbox": [ + 376, + 271, + 419, + 321 + ], + "type": "image", + "image_path": "dabe67eff785b81ed83779618b589b9cdfe1376a1193b3914728b850dc1466e1.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 375, + 325, + 419, + 359 + ], + "blocks": [ + { + "bbox": [ + 375, + 325, + 419, + 359 + ], + "lines": [ + { + "bbox": [ + 375, + 325, + 419, + 359 + ], + "spans": [ + { + "bbox": [ + 375, + 325, + 419, + 359 + ], + "type": "image", + "image_path": "a42faf2f7448712bdc6de433b3d8c9e170355c4c40c61ea4bc1244b5a9d9d5db.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 432, + 271, + 475, + 321 + ], + "blocks": [ + { + "bbox": [ + 429, + 258, + 479, + 269 + ], + "lines": [ + { + "bbox": [ + 429, + 258, + 479, + 269 + ], + "spans": [ + { + "bbox": [ + 429, + 258, + 479, + 269 + ], + "type": "text", + "content": "NeuralUDF" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 432, + 271, + 475, + 321 + ], + "lines": [ + { + "bbox": [ + 432, + 271, + 475, + 321 + ], + "spans": [ + { + "bbox": [ + 432, + 271, + 475, + 321 + ], + "type": "image", + "image_path": "a59c2a6b379ff109af501cfcca8bad9f94ff27431c6799491729fa0cd5e3fd08.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 433, + 325, + 475, + 359 + ], + "blocks": [ + { + "bbox": [ + 433, + 325, + 475, + 359 + ], + "lines": [ + { + "bbox": [ + 433, + 325, + 475, + 359 + ], + "spans": [ + { + "bbox": [ + 433, + 325, + 475, + 359 + ], + "type": "image", + "image_path": "1f6e183a63714d014a9d2906c74a7296fa1748e9aebee81c5db5fcde94239134.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 490, + 271, + 533, + 321 + ], + "blocks": [ + { + "bbox": [ + 493, + 258, + 527, + 269 + ], + "lines": [ + { + "bbox": [ + 493, + 258, + 527, + 269 + ], + "spans": [ + { + "bbox": [ + 493, + 258, + 527, + 269 + ], + "type": "text", + "content": "NeUDF" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 490, + 271, + 533, + 321 + ], + "lines": [ + { + "bbox": [ + 490, + 271, + 533, + 321 + ], + "spans": [ + { + "bbox": [ + 490, + 271, + 533, + 321 + ], + "type": "image", + "image_path": "f2aa3375ed2b1b37854099d1c96a2e52adb2801ce536a43fc9776bdaa551545f.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 489, + 325, + 533, + 359 + ], + "blocks": [ + { + "bbox": [ + 489, + 325, + 533, + 359 + ], + "lines": [ + { + "bbox": [ + 489, + 325, + 533, + 359 + ], + "spans": [ + { + "bbox": [ + 489, + 325, + 533, + 359 + ], + "type": "image", + "image_path": "a8099774a3e49a585d31d7d5f2388893a45d5cebce77d9fc5502bcace31fa767.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 473, + 546, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 546, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 546, + 676 + ], + "type": "text", + "content": "posed to learn signed distance fields (SDF) for 3D model reconstruction from multi-view images [7, 34, 36, 40]. These approaches map signed distance value to a density function, thereby enabling the use of volume rendering to learn an implicit SDF representation. To calculate pixel colors, they compute the weighted sum of radiances along each light ray. Achieving an accurate surface depiction requires the density function to meet three essential criteria. Firstly, the weights, which are derived from the density function, must reach their maximum value when the distance is zero, ensuring unbiasedness. Secondly, as a ray traverses through the surface, the accumulated density should tend towards infinity, rendering the surface opaque — a property referred to as occlusion-awareness. Finally, the density function should be bounded to prevent numerical issues. The popular SDF approaches, such as NeuS [34] and VolSDF [40], adopt an S-shaped density function that meets all these requirements." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 305, + 677, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 547, + 712 + ], + "type": "text", + "content": "While SDF-based methods excel at reconstructing watertight models, they have limitations in representing open models. This is due to the intrinsic nature of SDF, which" + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 1, + 494, + 15 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 1, + 494, + 15 + ], + "spans": [ + { + "bbox": [ + 145, + 1, + 494, + 15 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 198, + 13, + 442, + 24 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 13, + 442, + 24 + ], + "spans": [ + { + "bbox": [ + 198, + 13, + 442, + 24 + ], + "type": "text", + "content": "Except for this watermark, it is identical to the accepted version;" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 179, + 25, + 461, + 36 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 25, + 461, + 36 + ], + "spans": [ + { + "bbox": [ + 179, + 25, + 461, + 36 + ], + "type": "text", + "content": "the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 703, + 135, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 703, + 135, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 703, + 135, + 712 + ], + "type": "text", + "content": "*Corresponding author" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5084" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 49, + 72, + 286, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 72, + 286, + 358 + ], + "spans": [ + { + "bbox": [ + 49, + 72, + 286, + 358 + ], + "type": "text", + "content": "differentiates between the interior and exterior of a model, thus failing to accommodate open boundaries. Recent advances have attempted to mitigate this constraint by employing unsigned distance fields (UDF) [23, 25, 27]. Unlike signed distance fields, UDFs have non-negative distance values, making them suitable for representing nonwatertight models. However, learning a UDF from multiview images is a challenging task since the gradients of the UDF are unstable due to directional changes near the zero level-set, making it difficult to train the neural network. Another major challenge lies in formulating a UDF-induced density function that can simultaneously meet the above-mentioned three requirements. Unlike SDFs, UDFs cannot distinguish between the front and back of a surface based on distance values, thus, directly using an S-shaped density function is off the table. Opting for a bell-shaped density function brings its own issues. It is impossible for these integrations to approach infinity, so as to be occlusion-aware, unless the density becomes boundless at zero distance values. These conflicting requirements make UDF learning a non-trivial task, forcing existing methods to sacrifice at least one of these conditions. As shown in Figure 1, the existing methods NeuralUDF [25] and NeUDF [23] result in either choppy or nearly closed UDFs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 360, + 286, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 360, + 286, + 658 + ], + "spans": [ + { + "bbox": [ + 49, + 360, + 286, + 658 + ], + "type": "text", + "content": "As designing a UDF-induced density function that simultaneously fulfills the three aforementioned conditions remains an unresolved challenge, we propose a novel approach that learns a UDF from multi-view images in two separate stages. In the first stage, we apply an easily trainable but slightly biased and transparent density function for coarse reconstruction. Such a UDF, although being approximate, provides an important clue so that we can determine where to truncate the light rays. This accounts for the occlusion effect, where points behind the surface are not visible and should not contribute to the output color. With truncated light rays, we are able to derive the weights from UDF directly bypassing the density function, to further refine the geometry and appearance in the second stage. Our two-stage learning method, called 2S-UDF, leads to an unbiased and occlusion-aware weight function. Furthermore, by sidestepping density function learning in Stage 2, we effectively bypass the challenges associated with ensuring its boundedness. This strategy enhances the numerical stability of our method. Evaluations on benchmark datasets DeepFashion3D [43] and DTU [19] show that 2S-UDF outperforms existing UDF learning methods in terms of both reconstruction accuracy and visual quality. Additionally, we observe that the training stability of 2S-UDF is notably superior compared to other UDF learning neural networks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 670, + 132, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 670, + 132, + 681 + ], + "spans": [ + { + "bbox": [ + 49, + 670, + 132, + 681 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 690, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 690, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 49, + 690, + 286, + 712 + ], + "type": "text", + "content": "3D Reconstruction from Multi-View Images. Surface reconstruction from multi-view images has been a subject of" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 308, + 72, + 544, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 72, + 544, + 239 + ], + "spans": [ + { + "bbox": [ + 308, + 72, + 544, + 239 + ], + "type": "text", + "content": "study for several decades, and can generally be classified into two categories: voxel-based and point-based methods. Voxel-based methods [3, 8, 20, 21, 33] divide the 3D space into voxels and determine which ones belong to the object. These methods can be computationally expensive and may not be suitable for reconstructing complex surfaces. Point-based methods [13, 31, 38] use structure-from-motion [16] to calibrate the images and generate a dense point cloud using multi-view stereo [12]. Finally, surface reconstruction methods (e.g., [2, 17, 22]) are used to generate a mesh. Since multi-view stereo requires dense correspondences to generate a dense point cloud, which are often difficult to compute, its results often contain various types of artifacts, such as noise, holes, and incomplete structures." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 308, + 243, + 544, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 243, + 544, + 624 + ], + "spans": [ + { + "bbox": [ + 308, + 243, + 544, + 624 + ], + "type": "text", + "content": "Neural Volume Rendering. Neural network-based 3D surface reconstruction has received attention in recent years with the emergence of neural rendering [29]. Several methods have been proposed for volume rendering and surface reconstruction using neural networks. VolSDF [40] uses the cumulative distribution function of Laplacian distribution to evaluate the density function from SDF for volume rendering and surface reconstruction. NeuS [34] adopts an unbiased density function to the first-order approximation of SDFs for more accurate reconstruction. SparseNeuS [24] extends NeuS to use fewer images for reconstruction. HFNeuS [36] improves NeuS by proposing a simplified and unbiased density function and using hierarchical multilayer perceptrons (MLPs) for detail reconstruction. GeoNeuS [10] incorporates structure-from-motion to add more constraints. NeuralWarp [7] improves the accuracy by optimizing consistency between warped views of different images. PET-NeuS [37] further improves the accuracy by introducing tri-planes into the SDF prediction module, incorporating with MLP. All these methods learn SDFs, which can only reconstruct watertight models. Recently, Long et al. proposed NeuralUDF [25] for learning UDF for reconstructing open models. It adapts the S-shaped density function for learning SDF to UDFs by introducing an indicator function. However, the indicator function is complicated to learn, and also introduces biases. Liu et al. proposed NeUDF [23] adopting a bell-shaped density. However, to make it occlusion-aware, the density has to be unbounded resulting in an improper integral, which reduces accuracy. Meng et al. proposed NeAT [27] to learn SDF with validity so as to reconstruct open models from SDF. However, it needs foreground masks for data." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 630, + 544, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 630, + 544, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 630, + 544, + 712 + ], + "type": "text", + "content": "3D Reconstruction from Point Clouds. There has been recent interest in surface representation using signed distance fields (SDFs) and occupation fields. Several methods have been proposed for learning SDFs [4, 26, 30, 32, 35], while occupation fields have been used in methods such as [5, 28]. However, both SDFs and occupation fields can only represent watertight models. To represent non-watertight" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 750, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 750, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 750, + 314, + 757 + ], + "type": "text", + "content": "5085" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": "models, some methods are proposed to learn UDF from 3D point clouds [6, 41, 42]. Our proposed method also uses UDF for non-watertight models representation, but we learn it directly from multi-view images, which is a challenging problem." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 141, + 102, + 153 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 141, + 102, + 153 + ], + "spans": [ + { + "bbox": [ + 47, + 141, + 102, + 153 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 161, + 287, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 161, + 287, + 266 + ], + "spans": [ + { + "bbox": [ + 46, + 161, + 287, + 266 + ], + "type": "text", + "content": "At the foundation of UDF-based learning approaches is the task of crafting a density function that converts unsigned distance values into volume density, ensuring that the resulting weight function is unbiased and responsive to occlusions. None of the existing UDF learning methods [23, 25] can simultaneously meet the three critical requirements, i.e., ensuring the density function is bounded, and that the weight function remains both unbiased and occlusion aware." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 269, + 287, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 269, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 269, + 287, + 460 + ], + "type": "text", + "content": "We tackle these challenges by decoupling the density function and weight function across two stages. In the initial stage (Section 3.1), we utilize an easy-to-train, bell-shaped density function (which is inherently bounded) to learn a coarse UDF. While the resulting weight function is not theoretically unbiased or occlusion-aware, we can make it practically usable by choosing a proper parameter. Moving into the second stage (Section 3.2), we sidestep the density function entirely, focusing instead on refining the UDF by directly adjusting the weight function within the neural volume rendering framework. Specifically, we truncate light rays after they hit the front side of the object and obtain a weight function that is both unbiased and sensitive to occlusions, without the overhang of density function boundedness concerns. Finally, Section 3.3 presents the training details." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 465, + 287, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 465, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 47, + 465, + 287, + 491 + ], + "type": "text", + "content": "3.1. Stage 1: Coarse UDF Learning via a Simple Density Function" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 496, + 287, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 496, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 46, + 496, + 287, + 544 + ], + "type": "text", + "content": "We consider the scenario of a single planar plane " + }, + { + "bbox": [ + 46, + 496, + 287, + 544 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 46, + 496, + 287, + 544 + ], + "type": "text", + "content": " and a single ray-plane intersection. Inspired by HF-NeuS [36], we propose an easy-to-learn density function " + }, + { + "bbox": [ + 46, + 496, + 287, + 544 + ], + "type": "inline_equation", + "content": "\\sigma_{1}" + }, + { + "bbox": [ + 46, + 496, + 287, + 544 + ], + "type": "text", + "content": " that maps unsigned distance " + }, + { + "bbox": [ + 46, + 496, + 287, + 544 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 496, + 287, + 544 + ], + "type": "text", + "content": " to density" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 85, + 548, + 287, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 548, + 287, + 574 + ], + "spans": [ + { + "bbox": [ + 85, + 548, + 287, + 574 + ], + "type": "interline_equation", + "content": "\\sigma_ {1} (f (t)) = \\frac {c s e ^ {- s f (t)}}{1 + e ^ {- s f (t)}}, s > 0, c > 0, \\tag {1}", + "image_path": "5f1bbdad694f8930775264eb2d5db9092e254f9e0df89cea54a59ce616eda8ec.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "inline_equation", + "content": "c > 0" + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "content": " is a fixed, user-specified parameter and " + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "inline_equation", + "content": "s > 0" + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "content": " is a learnable parameter controlling the width of the bell-shaped curve. Straightforward calculation shows that the weight function " + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "inline_equation", + "content": "w_{1}(f(t)) = e^{-\\int_{0}^{t}\\sigma_{1}(f(u))\\mathrm{d}u}\\sigma_{1}(f(t))" + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "content": " is monotonically decreasing behind the plane " + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "content": " and the maximum value occurs at a point " + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "inline_equation", + "content": "t^*" + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "content": " in front of " + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "content": " with an unsigned distance value of " + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "inline_equation", + "content": "f(t^{*}) = \\frac{1}{s}\\ln \\frac{c}{|\\cos(\\theta)|}, (c > |\\cos (\\theta)|)" + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "inline_equation", + "content": "f(t^{*}) = 0, (0 < c \\leq |\\cos (\\theta)|)" + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "content": " is the incident angle between the light ray and the surface normal. This means that the weight function " + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "inline_equation", + "content": "w_{1}" + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "content": " is not unbiased. Furthermore, the line integral " + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\int_0^t\\sigma_1(f(u))\\mathrm{d}u" + }, + { + "bbox": [ + 46, + 578, + 287, + 715 + ], + "type": "text", + "content": " does" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 545, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 107 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 107 + ], + "type": "text", + "content": "not approach infinity when a light ray passes through the front-most layer of the surface, indicating " + }, + { + "bbox": [ + 304, + 72, + 545, + 107 + ], + "type": "inline_equation", + "content": "w_{1}" + }, + { + "bbox": [ + 304, + 72, + 545, + 107 + ], + "type": "text", + "content": " is only partially occlusion-aware." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "content": "While the density function " + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "inline_equation", + "content": "\\sigma_{1}" + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "content": " is not perfect in theory, by selecting an appropriate " + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "content": ", we can practically minimize bias and enhance opacity. Clearly, a smaller " + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "content": " value decreases " + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "inline_equation", + "content": "f(t^{*})" + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "content": ", thereby reducing bias. To gauge the effect of " + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "content": " on opacity, we now consider the most extreme scenario where the incident light ray is perpendicular to the planar surface " + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "content": ", and assume that the intersection point is located at " + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "inline_equation", + "content": "t = 1" + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "content": ". In such a situation, the unsigned distance function is " + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "inline_equation", + "content": "f(t) = 1 - t" + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "content": " for points in front of " + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "inline_equation", + "content": "\\sigma_{1}" + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "content": " is symmetrical on either side of " + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 304, + 108, + 545, + 251 + ], + "type": "text", + "content": ", the surface transparency is the square of the transparency of the front side. The theoretic transparency is," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 312, + 261, + 539, + 320 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 261, + 539, + 320 + ], + "spans": [ + { + "bbox": [ + 312, + 261, + 539, + 320 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left(e ^ {- \\int_ {0} ^ {1} \\hat {\\sigma} _ {1} (f (t)) \\mathrm {d} t}\\right) ^ {2} = \\left[ \\exp \\left(- \\int_ {0} ^ {1} \\frac {c s e ^ {- s (1 - t)}}{1 + e ^ {- s (1 - t)}} \\mathrm {d} t\\right) \\right] ^ {2} \\\\ = \\left(\\frac {1 + e ^ {- s}}{2}\\right) ^ {2 c}. \\\\ \\end{array}", + "image_path": "3766ba56aa2188a3f5a9d32b6dd9afe704422a889ea6e8d0baced0997a4cb26c.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 329, + 545, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 329, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 304, + 329, + 545, + 425 + ], + "type": "text", + "content": "Therefore, we should choose a relatively large " + }, + { + "bbox": [ + 304, + 329, + 545, + 425 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 329, + 545, + 425 + ], + "type": "text", + "content": " to reduce transparency. In our implementation, we set the constant " + }, + { + "bbox": [ + 304, + 329, + 545, + 425 + ], + "type": "inline_equation", + "content": "c = 5" + }, + { + "bbox": [ + 304, + 329, + 545, + 425 + ], + "type": "text", + "content": " based on the typical value of the learned parameter " + }, + { + "bbox": [ + 304, + 329, + 545, + 425 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 304, + 329, + 545, + 425 + ], + "type": "text", + "content": " which usually ranges between 1000 and 2000. Calculations of bias and translucency show that this setting offers a good balance between occlusion-awareness and unbiasedness in the first stage training. Please refer to the supplementary material for a detailed analysis." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 433, + 545, + 458 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 433, + 545, + 458 + ], + "spans": [ + { + "bbox": [ + 305, + 433, + 545, + 458 + ], + "type": "text", + "content": "3.2. Stage 2: UDF Refinement through Weight Adjustment" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 464, + 545, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 464, + 545, + 535 + ], + "spans": [ + { + "bbox": [ + 304, + 464, + 545, + 535 + ], + "type": "text", + "content": "In this stage, we refine the UDF learned in Stage 1 to improve the quality of geometry and appearance. Unlike Stage 1 and all other UDF-learning methods, inspired by [1], we truncate light rays based on the approximated UDF learned in Stage 1 and learn the weight function " + }, + { + "bbox": [ + 304, + 464, + 545, + 535 + ], + "type": "inline_equation", + "content": "w(t)" + }, + { + "bbox": [ + 304, + 464, + 545, + 535 + ], + "type": "text", + "content": " directly instead of the density function " + }, + { + "bbox": [ + 304, + 464, + 545, + 535 + ], + "type": "inline_equation", + "content": "\\sigma(t)" + }, + { + "bbox": [ + 304, + 464, + 545, + 535 + ], + "type": "text", + "content": " to refine the UDF." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 536, + 545, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 536, + 545, + 596 + ], + "spans": [ + { + "bbox": [ + 304, + 536, + 545, + 596 + ], + "type": "text", + "content": "Ideally, for a single ray-plane intersection, we want a bell-shaped function " + }, + { + "bbox": [ + 304, + 536, + 545, + 596 + ], + "type": "inline_equation", + "content": "w(t)" + }, + { + "bbox": [ + 304, + 536, + 545, + 596 + ], + "type": "text", + "content": " that attains its maximum at the points with zero distance values, and satisfies partition of unity. Therefore, we adopt the derivative of the sigmoid function as the weight function [1], defined as" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 347, + 605, + 545, + 633 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 605, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 347, + 605, + 545, + 633 + ], + "type": "interline_equation", + "content": "w _ {2} (f (t)) = \\frac {s e ^ {- s f (t)}}{(1 + e ^ {- s f (t)}) ^ {2}} \\cdot | \\cos (\\theta) |. \\tag {2}", + "image_path": "e78659945f9c358162dd5f414d69b195676abfb6794f62320daff5ff13f92e13.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 641, + 545, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 545, + 664 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 545, + 664 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 304, + 641, + 545, + 664 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 641, + 545, + 664 + ], + "type": "text", + "content": " being the incident angle between the light ray and the surface normal." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Intuitively speaking, learning such a weight function " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "w_{2}" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": " in Stage 2 of our UDF method is similar to learning an S-shaped density function in SDF-based approaches, such as [36]. As a result, the learning process in Stage 2 is as" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5086" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "type": "text", + "content": "stable as those SDF approaches. Furthermore, it can totally avoid using the visibility indicator function, which is necessary in NeuralUDF [25]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 110, + 287, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 110, + 287, + 289 + ], + "spans": [ + { + "bbox": [ + 46, + 110, + 287, + 289 + ], + "type": "text", + "content": "Calculation shows that the weight " + }, + { + "bbox": [ + 46, + 110, + 287, + 289 + ], + "type": "inline_equation", + "content": "w_{2}" + }, + { + "bbox": [ + 46, + 110, + 287, + 289 + ], + "type": "text", + "content": " attains its maximum at zero distance values, therefore it is unbiased. However, if we naively predict the weight function directly, it will not be occlusion-aware, so we introduce the ray truncation. To make " + }, + { + "bbox": [ + 46, + 110, + 287, + 289 + ], + "type": "inline_equation", + "content": "w_{2}" + }, + { + "bbox": [ + 46, + 110, + 287, + 289 + ], + "type": "text", + "content": " occlusion-aware, we can truncate the light rays after they pass through the frontmost layer of the surface, thereby preventing rendering the interior of the object. Note that we do not expect the truncation to be exactly on the frontmost layer of the surface. In fact, as long as it occurs between the frontmost layer and the second layer, we consider the truncation valid. This means that the approximate UDF learned in the first stage, which can capture the main topological features (such as boundaries) and provide a fairly good representation of the target object, is sufficient for us to determine where to cut off the light rays." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 290, + 287, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 290, + 287, + 337 + ], + "spans": [ + { + "bbox": [ + 46, + 290, + 287, + 337 + ], + "type": "text", + "content": "In our implementation, we adopt a simple strategy to determine the truncation point for each light ray. Specifically, the truncation point of ray " + }, + { + "bbox": [ + 46, + 290, + 287, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{r}" + }, + { + "bbox": [ + 46, + 290, + 287, + 337 + ], + "type": "text", + "content": " is the first sample point along " + }, + { + "bbox": [ + 46, + 290, + 287, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{r}" + }, + { + "bbox": [ + 46, + 290, + 287, + 337 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 339, + 286, + 399 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 47, + 339, + 286, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 339, + 286, + 374 + ], + "spans": [ + { + "bbox": [ + 47, + 339, + 286, + 374 + ], + "type": "text", + "content": "- The unsigned distance value at the point is a local maxima. To avoid distance vibration interference, it should be the maximum in a window centered at the point. And" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 376, + 286, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 376, + 286, + 399 + ], + "spans": [ + { + "bbox": [ + 47, + 376, + 286, + 399 + ], + "type": "text", + "content": "- The accumulated weight up to this point is greater than " + }, + { + "bbox": [ + 47, + 376, + 286, + 399 + ], + "type": "inline_equation", + "content": "\\delta_{\\text{thres}}" + }, + { + "bbox": [ + 47, + 376, + 286, + 399 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 400, + 287, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 400, + 287, + 531 + ], + "spans": [ + { + "bbox": [ + 46, + 400, + 287, + 531 + ], + "type": "text", + "content": "The accumulated weight threshold " + }, + { + "bbox": [ + 46, + 400, + 287, + 531 + ], + "type": "inline_equation", + "content": "\\delta_{\\text{thres}}" + }, + { + "bbox": [ + 46, + 400, + 287, + 531 + ], + "type": "text", + "content": " is intuitively set to 0.5. This choice is based on the assumption that if the Stage 1 training is performed well enough, the accumulated weights at each sample point along the ray would be either 0 (for not reaching a surface) or 1 (for having intersected with a surface). Hence, we intuitively select 0.5 for " + }, + { + "bbox": [ + 46, + 400, + 287, + 531 + ], + "type": "inline_equation", + "content": "\\delta_{\\text{thres}}" + }, + { + "bbox": [ + 46, + 400, + 287, + 531 + ], + "type": "text", + "content": " because it is the midpoint between 0 and 1. With the cutoff mechanism, only the first ray-surface intersection contributes to the color of the ray, effectively achieving occlusion-awareness. Given these properties, we conclude that," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 545, + 287, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 287, + 569 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 287, + 569 + ], + "type": "text", + "content": "Theorem 1 The weight " + }, + { + "bbox": [ + 46, + 545, + 287, + 569 + ], + "type": "inline_equation", + "content": "w_{2}" + }, + { + "bbox": [ + 46, + 545, + 287, + 569 + ], + "type": "text", + "content": " with light cutting off is unbiased and occlusion-aware." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "type": "text", + "content": "Figure 2 is an intuitive illustration of our Stage 2 weight learning and truncation strategy. The UDF maxima point " + }, + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "type": "text", + "content": " in front of the intersection surface would not affect the cutting point selection as the accumulated weight is below " + }, + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\delta_{\\text{thres}}" + }, + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "type": "text", + "content": " (0.5). The local maxima " + }, + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "type": "text", + "content": " due to UDF oscillation also would not affect it since it's not the maximum in a large enough neighborhood. The light is cut at maxima point " + }, + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "type": "text", + "content": ", and thus the weight of point " + }, + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 46, + 582, + 287, + 712 + ], + "type": "text", + "content": " is zero without contributions to the rendering. As illustrated in Figure 2, the cutting process is robust against UDF oscillation, open boundaries, and local maxima in front of the intersection surface." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 317, + 78, + 537, + 164 + ], + "blocks": [ + { + "bbox": [ + 317, + 78, + 537, + 164 + ], + "lines": [ + { + "bbox": [ + 317, + 78, + 537, + 164 + ], + "spans": [ + { + "bbox": [ + 317, + 78, + 537, + 164 + ], + "type": "image", + "image_path": "75a6bd4f9512b87da68e33f3321462eac43b0aa68cbe0f9128dac677398021a8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 172, + 545, + 239 + ], + "lines": [ + { + "bbox": [ + 304, + 172, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 304, + 172, + 545, + 239 + ], + "type": "text", + "content": "Figure 2. An intuitive illustration of our ray cutting algorithm, best viewed in color and magnified. A ray shoots from left to right, approaching the boundary of the first surface, and going through another two surfaces (gray boxes). The violet solid line represents the UDF values along the ray; the orange dashed line represents the corresponding color weight." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 258, + 369, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 258, + 369, + 270 + ], + "spans": [ + { + "bbox": [ + 306, + 258, + 369, + 270 + ], + "type": "text", + "content": "3.3. Training" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 276, + 545, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 276, + 545, + 335 + ], + "spans": [ + { + "bbox": [ + 304, + 276, + 545, + 335 + ], + "type": "text", + "content": "Differentiable UDFs. NeuS uses an MLP network to learn the signed distance function " + }, + { + "bbox": [ + 304, + 276, + 545, + 335 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 276, + 545, + 335 + ], + "type": "text", + "content": ", which is a differentiable function. In contrast, UDF is not differentiable at the zero level set, making the network difficult to learn the values and gradients of the UDF close to the zero level set." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 336, + 546, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 336, + 546, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 336, + 546, + 479 + ], + "type": "text", + "content": "Another crucial requirement is to ensure non-negative values for the computed distances, which seems like a trivial task as one may simply apply absolute value or normalization such as ReLU [11] to the MLP output. However, applying the absolute value to the distance is not viable due to its non-differentiability at zero. Similarly, normalizing the output value using ReLU is not feasible as it is also non-differentiable at zero and its gradient vanishes for negative inputs. This can be particularly problematic for learning UDFs, since when the MLP returns a negative distance value, the ReLU gradient vanishes, hindering the update of the distance to a positive value in the subsequent iterations." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 479, + 545, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 479, + 545, + 598 + ], + "spans": [ + { + "bbox": [ + 304, + 479, + 545, + 598 + ], + "type": "text", + "content": "We add a softplus [9] function after the output layer of the MLP [23]. The softplus function is a smooth and differentiable approximation of the ReLU function, which is defined as " + }, + { + "bbox": [ + 304, + 479, + 545, + 598 + ], + "type": "inline_equation", + "content": "\\mathrm{softplus}(x) = \\frac{1}{\\beta}\\ln (1 + e^{\\beta x})" + }, + { + "bbox": [ + 304, + 479, + 545, + 598 + ], + "type": "text", + "content": ". Softplus has the same shape as ReLU, but it is continuous and differentiable at every point and its gradients do not vanish anywhere. Using the softplus function allows us to ensure that the output of the MLP is non-negative and differentiable, making it suitable for learning the UDF. Similar to NeUDF [23], we set " + }, + { + "bbox": [ + 304, + 479, + 545, + 598 + ], + "type": "inline_equation", + "content": "\\beta = 100" + }, + { + "bbox": [ + 304, + 479, + 545, + 598 + ], + "type": "text", + "content": " in our experiments." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 599, + 545, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 599, + 545, + 658 + ], + "spans": [ + { + "bbox": [ + 304, + 599, + 545, + 658 + ], + "type": "text", + "content": "Loss functions. Following NeuralUDF [25], we adopt an iso-surface regularizer to penalize the UDF values of the non-surface points from being zero, therefore encouraging smooth and clean UDFs. The regularization loss is defined as [25]" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 350, + 665, + 501, + 693 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 665, + 501, + 693 + ], + "spans": [ + { + "bbox": [ + 350, + 665, + 501, + 693 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {r e g} = \\frac {1}{M N} \\sum_ {i, k} \\exp (- \\tau \\cdot f (t _ {i, k})),", + "image_path": "b40f170a4270284285d4e34139768b03681dee5591775ee064370457f95ea081.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 701, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 701, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 306, + 701, + 545, + 712 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 701, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 306, + 701, + 545, + 712 + ], + "type": "text", + "content": " is a constant scalar that scales the learned UDF" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5087" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "values, " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " is the total number of sampled rays per training iteration, and " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " is the number of sampled points on a single ray. " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " is set to 5.0 in the first stage and 50.0 in the second stage." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 120, + 288, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 120, + 288, + 216 + ], + "spans": [ + { + "bbox": [ + 46, + 120, + 288, + 216 + ], + "type": "text", + "content": "The value of " + }, + { + "bbox": [ + 46, + 120, + 288, + 216 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 120, + 288, + 216 + ], + "type": "text", + "content": ", which is learnable in our method, significantly affects the quality of the reconstruction. When " + }, + { + "bbox": [ + 46, + 120, + 288, + 216 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 120, + 288, + 216 + ], + "type": "text", + "content": " is small, it introduces a larger bias and leads to a more blurred output. We observe that " + }, + { + "bbox": [ + 46, + 120, + 288, + 216 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 120, + 288, + 216 + ], + "type": "text", + "content": " typically converges to a relatively large value between 1000 and 2000, leading to visually pleasing results. However, in rare cases when " + }, + { + "bbox": [ + 46, + 120, + 288, + 216 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 120, + 288, + 216 + ], + "type": "text", + "content": " stops increasing during training, we apply a penalty to force it to increase. The penalty is defined as follows" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 127, + 222, + 208, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 222, + 208, + 251 + ], + "spans": [ + { + "bbox": [ + 127, + 222, + 208, + 251 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s} = \\frac {1}{M} \\sum_ {i, k} \\frac {1}{s _ {i , k}},", + "image_path": "111fa935545d056b34d530a6cc12eeaa5673f1f6bc650261f039142bbef91042.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "text", + "content": " is the number of rays during a training epoch. This term " + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_s" + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "text", + "content": " aggregates the reciprocals of all " + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "text", + "content": " values used for the point " + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "inline_equation", + "content": "t_{i,k}" + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "text", + "content": " on ray " + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "inline_equation", + "content": "r_i" + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "text", + "content": ". Intuitively speaking, it encourages a larger " + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "text", + "content": " during the early stage of training. In our implementation, we make this term optional since " + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "text", + "content": " generally increases with a decreasing rate during training, and the penalty term is only necessary in rare cases when " + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 259, + 287, + 354 + ], + "type": "text", + "content": " stops at a relatively low value." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 355, + 288, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 355, + 288, + 475 + ], + "spans": [ + { + "bbox": [ + 46, + 355, + 288, + 475 + ], + "type": "text", + "content": "As in other SDF- and UDF-based methods [25, 34, 36], we adopt color loss and Eikonal loss in our approach. Specifically, the color loss " + }, + { + "bbox": [ + 46, + 355, + 288, + 475 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{color}" + }, + { + "bbox": [ + 46, + 355, + 288, + 475 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 46, + 355, + 288, + 475 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 46, + 355, + 288, + 475 + ], + "type": "text", + "content": " loss between the predicted color and the ground truth color of a single pixel as used in [34]. The Eikonal loss " + }, + { + "bbox": [ + 46, + 355, + 288, + 475 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{eik}" + }, + { + "bbox": [ + 46, + 355, + 288, + 475 + ], + "type": "text", + "content": " is used to regularize the learned distance field to have a unit gradient [14]. Users may also choose to adopt object masks for supervision as introduced in other SDF- and UDF-based methods [25, 34]. Putting it all together, we define the combined loss function as a weighted sum," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 483, + 280, + 496 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 483, + 280, + 496 + ], + "spans": [ + { + "bbox": [ + 53, + 483, + 280, + 496 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {\\text {c o l o r}} + \\lambda_ {1} \\mathcal {L} _ {\\text {e i k}} + \\lambda_ {2} \\mathcal {L} _ {\\text {r e g}} + \\lambda_ {3} \\mathcal {L} _ {s} (+ \\lambda_ {m} \\mathcal {L} _ {\\text {m a s k}}),", + "image_path": "75e36738c08fe2ae0fa5b573784cc9d874291680e51a4c936fd65d2d17a47381.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 503, + 287, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 503, + 287, + 528 + ], + "spans": [ + { + "bbox": [ + 46, + 503, + 287, + 528 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 503, + 287, + 528 + ], + "type": "inline_equation", + "content": "\\lambda_1, \\lambda_2, \\lambda_3" + }, + { + "bbox": [ + 46, + 503, + 287, + 528 + ], + "type": "text", + "content": " and the optional " + }, + { + "bbox": [ + 46, + 503, + 287, + 528 + ], + "type": "inline_equation", + "content": "\\lambda_m" + }, + { + "bbox": [ + 46, + 503, + 287, + 528 + ], + "type": "text", + "content": " are hyperparameters that control the weight of each loss term." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 538, + 128, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 538, + 128, + 551 + ], + "spans": [ + { + "bbox": [ + 47, + 538, + 128, + 551 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 558, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 288, + 713 + ], + "type": "text", + "content": "Datasets. To evaluate our method, we use three datasets: DeepFashion3D [43], DTU [19] and BlendedMVS [39]. The DeepFashion3D dataset consists of clothing models, which are open models with boundaries. As only 3D points are available, we render 72 images of resolution " + }, + { + "bbox": [ + 46, + 558, + 288, + 713 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 46, + 558, + 288, + 713 + ], + "type": "text", + "content": " with a white background from different viewpoints for each model. In addition to DeepFashion3D images rendered by us most of which are texture-less, we also take the image data from NeuralUDF [25] most of which are texture-rich into our experiments. We call them DF3D#Ours and DF3D#NeuralUDF, respectively. The DTU dataset consists of models captured in a studio, all of which are watertight. We use this dataset to validate that our method also works" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "content": "well for watertight models. These datasets have been widely used in previous works such as [34, 36, 40]. In our experiments, open models such as in DeepFashion3D are trained without mask supervision; DTU is trained with mask supervision." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 133, + 547, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 133, + 547, + 181 + ], + "spans": [ + { + "bbox": [ + 304, + 133, + 547, + 181 + ], + "type": "text", + "content": "Baselines. To validate the effectiveness of our method, we compare it with state-of-the-art UDF learning methods: NeuralUDF [25], NeUDF [23] and NeAT [27]; and SDF learning methods: VolSDF [40] and NeuS [34]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 189, + 471, + 202 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 189, + 471, + 202 + ], + "spans": [ + { + "bbox": [ + 305, + 189, + 471, + 202 + ], + "type": "text", + "content": "4.1. Comparisons on Open Models" + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 307, + 218, + 555, + 358 + ], + "blocks": [ + { + "bbox": [ + 307, + 218, + 555, + 358 + ], + "lines": [ + { + "bbox": [ + 307, + 218, + 555, + 358 + ], + "spans": [ + { + "bbox": [ + 307, + 218, + 555, + 358 + ], + "type": "table", + "html": "
Method#1#2#3#4#5#6#7#8#9Mean
NeuS6.6913.5010.3215.018.9912.9212.949.939.4911.09
VolSDF6.369.4411.8716.0310.7814.9115.0611.348.9611.64
NeAT10.5413.897.3013.1213.1812.448.2210.3011.3311.15
NeuralUDF6.0711.587.6810.9611.169.766.986.136.418.53
NeUDF4.398.294.9419.567.528.183.813.815.767.36
Ours4.555.774.277.436.594.772.883.215.735.02
MethodLS-C0SS-D0LS-D0NS-D1LS-C1Skirt1SS-C0Mean
NeuS3.184.825.712.213.602.445.133.87
VolSDF5.924.795.964.368.737.748.846.62
NeAT3.064.335.923.528.843.914.304.84
NeuralUDF1.922.054.111.502.472.162.152.34
NeUDF1.952.93N.A.1.482.662.741.772.26
Ours1.921.972.461.472.141.841.911.96
", + "image_path": "a88aa4fc13a72c8f95751da90c1b3d14e94a9ccaae9c16f0138b5db0a7ffd86c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 365, + 547, + 399 + ], + "lines": [ + { + "bbox": [ + 304, + 365, + 547, + 399 + ], + "spans": [ + { + "bbox": [ + 304, + 365, + 547, + 399 + ], + "type": "text", + "content": "Table 1. Chamfer distances " + }, + { + "bbox": [ + 304, + 365, + 547, + 399 + ], + "type": "inline_equation", + "content": "(\\times 10^{-3})" + }, + { + "bbox": [ + 304, + 365, + 547, + 399 + ], + "type": "text", + "content": " on DF3D#Ours (top) and DF3D#NeuralUDF (bottom). NeAT requires mask supervision and others do not need." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 413, + 545, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 413, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 304, + 413, + 545, + 460 + ], + "type": "text", + "content": "We evaluate our method and compare it with baselines using the garments from DeepFashion3D [43], where the models have multiple open boundaries. VolSDF and NeuS always close the boundaries since they learn SDFs." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 461, + 545, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 461, + 545, + 556 + ], + "spans": [ + { + "bbox": [ + 304, + 461, + 545, + 556 + ], + "type": "text", + "content": "NeuralUDF, NeUDF and NeAT are designed to learn non-watertight models. NeAT learns SDFs for open models, and requires mask supervision to produce reasonable results, but other methods do not require mask supervision for DeepFashion3D. The released codebase of NeuralUDF indicates that it also has a two-stage training process. We evaluate the results of NeuralUDF at the end of both stages, and present whichever is better." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 557, + 545, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 545, + 629 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 545, + 629 + ], + "type": "text", + "content": "In contrast, NeuralUDF, NeUDF and our method learn UDFs, which can generate open models. Table 1 shows the Chamfer distances of the results on DeepFashion3D. Some of the Chamfer distances of the compared methods are large because the open holes are closed or the model is over-smoothed, resulting in significant errors." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 630, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 547, + 713 + ], + "type": "text", + "content": "As demonstrated in Figure 3, we test various types of garments, some of which have rich textures, while others are nearly a single color. Learning UDFs for textureless models is more challenging since various regions of a model are ambiguous without clear color differences. However, our 2S-UDF generates satisfactory results even without masks. Though with mask supervision, the results of" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5088" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 68, + 539, + 257 + ], + "blocks": [ + { + "bbox": [ + 61, + 68, + 539, + 257 + ], + "lines": [ + { + "bbox": [ + 61, + 68, + 539, + 257 + ], + "spans": [ + { + "bbox": [ + 61, + 68, + 539, + 257 + ], + "type": "image", + "image_path": "6e75436779e812106317753dd55b5af603b8f9b6bdd3d73e9de62d92abfb5d60.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 268, + 548, + 338 + ], + "lines": [ + { + "bbox": [ + 46, + 268, + 548, + 338 + ], + "spans": [ + { + "bbox": [ + 46, + 268, + 548, + 338 + ], + "type": "text", + "content": "Figure 3. Visual comparisons on selected models of the DeepFashion3D [43] dataset. The surfaces produced by NeuS and VolSDF are closed watertight models, thereby post-processing is required to remove the unnecessary parts. NeAT can produce open models by learning an SDF and predicting which surfaces in the extracted meshes should be removed, but it needs mask for supervision. NeuralUDF can generate open surfaces, but struggles with textureless inputs, leading to double-layered regions and large reconstruction errors. NeUDF generally performs well, but its training is unstable and may stumble on less distinguished, darker models like LS-D0. In contrast, our 2S-UDF consistently delivers effective reconstructions of non-watertight models. See the supplementary material for additional results." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 47, + 353, + 289, + 548 + ], + "blocks": [ + { + "bbox": [ + 47, + 353, + 289, + 548 + ], + "lines": [ + { + "bbox": [ + 47, + 353, + 289, + 548 + ], + "spans": [ + { + "bbox": [ + 47, + 353, + 289, + 548 + ], + "type": "image", + "image_path": "04d8dfc5034a66bbcc23c3a95227ad5925a06feb7ff94eb62433c342d2e373e8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 559, + 288, + 625 + ], + "lines": [ + { + "bbox": [ + 46, + 559, + 288, + 625 + ], + "spans": [ + { + "bbox": [ + 46, + 559, + 288, + 625 + ], + "type": "text", + "content": "Figure 4. Visualization of the learned UDFs on cross sections. Compared with the ground truth, our method can learn a UDFs that most closely resemble the ground truth, among our method, NeuralUDF, and NeUDF. NeAT is omitted in this visualization, because it learns SDFs in lieu of UDFs. Note that for LS-D0, NeUDF completely collapses without a reasonable UDF learned." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": "NeAT [27] are over-smoothed, missing details, resulting in large Chamfer distance errors. NeuralUDF [25] is unable to properly reconstruct textureless models on most models, possibly due to their complex density function which is difficult to converge. Some of the NeUDF [23] models" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 356, + 547, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 356, + 547, + 536 + ], + "spans": [ + { + "bbox": [ + 304, + 356, + 547, + 536 + ], + "type": "text", + "content": "become watertight. To analyze the reasons, we illustrate these UDFs cross sections in Figure 4. To compute the ground truth UDFs, we sample 30,000 points from every input point model and compute the distances to the nearest sample point for every point in a 3D grid of resolution " + }, + { + "bbox": [ + 304, + 356, + 547, + 536 + ], + "type": "inline_equation", + "content": "512 \\times 512 \\times 512" + }, + { + "bbox": [ + 304, + 356, + 547, + 536 + ], + "type": "text", + "content": ". All other UDFs are extracted by querying the distance neural network in a 3D grid of the same resolution. Our learned UDFs resemble the ground truth with little difference. While, the UDFs of NeuralUDF deviate from the ground truth significantly explaining its difficulty to converge. The UDFs of NeUDF are better, but the distances approach zero around open holes. As a result, it is challenging and tricky to generate non-watertight models and some of them are even closed. NeAT learns SDF, so we do not show their distance fields." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 540, + 547, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 540, + 547, + 625 + ], + "spans": [ + { + "bbox": [ + 304, + 540, + 547, + 625 + ], + "type": "text", + "content": "As illustrated in Figure 5, perhaps due to the absolute of an MLP for UDF representation, NeuralUDF possibly generates two layers of zero level-sets on both sides of the surface resulting in double-layered regions after Stage 1 learning. However, in its Stage 2 refinement, the surface is crushed into pieces and the Chamfer distance errors surge suddenly." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "content": "In Figure 6, we conduct additional experiments on some open model dataset provided by NeUDF [23]. For the rack model, the thin structures reconstructed by NeuralUDF [25] and NeUDF [23] seem eroded, but ours don't. The thin structures reconstructed by NeAT [27] is the closest to the reference image, but the surface is dented inward with visible artifacts due to imperfect SDF validity learning." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5089" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 106, + 149, + 175 + ], + "blocks": [ + { + "bbox": [ + 50, + 106, + 149, + 175 + ], + "lines": [ + { + "bbox": [ + 50, + 106, + 149, + 175 + ], + "spans": [ + { + "bbox": [ + 50, + 106, + 149, + 175 + ], + "type": "image", + "image_path": "e250f6fc48e46af907d9111a4bdbb8adc50d42105537c6b4c5b5759c2c597d83.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 202, + 287, + 280 + ], + "lines": [ + { + "bbox": [ + 47, + 202, + 287, + 280 + ], + "spans": [ + { + "bbox": [ + 47, + 202, + 287, + 280 + ], + "type": "text", + "content": "Figure 5. Plots of the Chamfer distance throughout the training process. Our method consistently reduces CD across both stages. In contrast, NeuralUDF, which also adopts a two-stage learning strategy, exhibits instability and yields a fragmented output following the second stage. The first-stage output of NeuralUDF, however, contains double-layered regions as marked above. In this figure, both methods start their stage 2 training at 250k iterations." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 153, + 71, + 290, + 114 + ], + "blocks": [ + { + "bbox": [ + 153, + 71, + 290, + 114 + ], + "lines": [ + { + "bbox": [ + 153, + 71, + 290, + 114 + ], + "spans": [ + { + "bbox": [ + 153, + 71, + 290, + 114 + ], + "type": "image", + "image_path": "b08c5db4015974cecf2dd55bf7ef7ac0fa66b41467f17aa49dc19ac8215b6cb7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 115, + 238, + 130 + ], + "lines": [ + { + "bbox": [ + 187, + 115, + 238, + 130 + ], + "spans": [ + { + "bbox": [ + 187, + 115, + 238, + 130 + ], + "type": "text", + "content": "Stage 1 NeuralUDF" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 153, + 132, + 290, + 175 + ], + "blocks": [ + { + "bbox": [ + 153, + 132, + 290, + 175 + ], + "lines": [ + { + "bbox": [ + 153, + 132, + 290, + 175 + ], + "spans": [ + { + "bbox": [ + 153, + 132, + 290, + 175 + ], + "type": "image", + "image_path": "5722455419a86704be598c7e0c84a72095e1ecc7e2c9d1c1b6116450a8c1e8bc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 176, + 234, + 191 + ], + "lines": [ + { + "bbox": [ + 187, + 176, + 234, + 191 + ], + "spans": [ + { + "bbox": [ + 187, + 176, + 234, + 191 + ], + "type": "text", + "content": "Stage 1 2S-UDF" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 52, + 289, + 280, + 342 + ], + "blocks": [ + { + "bbox": [ + 52, + 289, + 280, + 342 + ], + "lines": [ + { + "bbox": [ + 52, + 289, + 280, + 342 + ], + "spans": [ + { + "bbox": [ + 52, + 289, + 280, + 342 + ], + "type": "table", + "html": "
Method3755656997105106114118122Mean
NeuralUDF1.180.440.660.670.940.950.570.370.560.550.69
NeAT1.180.470.820.841.090.750.760.380.560.550.74
NeUDF0.900.650.730.971.070.630.940.590.720.620.78
Ours0.890.550.680.881.150.700.740.410.610.510.71
", + "image_path": "78fc93ab36c6a9e2d739a90287bbe009216f95a90a4d62ec4efcea94810eac1f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 86, + 350, + 247, + 360 + ], + "lines": [ + { + "bbox": [ + 86, + 350, + 247, + 360 + ], + "spans": [ + { + "bbox": [ + 86, + 350, + 247, + 360 + ], + "type": "text", + "content": "Table 2. Chamfer distances on DTU dataset." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 380, + 287, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 380, + 287, + 475 + ], + "spans": [ + { + "bbox": [ + 46, + 380, + 287, + 475 + ], + "type": "text", + "content": "The plant model does not have an object mask, making NeAT [27] impractical for training. NeuralUDF [25] completely fails to reconstruct a reasonable surface. Between our method and NeUDF [23] which can reconstruct a sensible model, the flower pot region marked in red is missing in NeUDF but not in ours. These show our method's ability to reconstruct non-watertight models more robustly compared to other methods." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 50, + 483, + 284, + 571 + ], + "blocks": [ + { + "bbox": [ + 50, + 483, + 284, + 571 + ], + "lines": [ + { + "bbox": [ + 50, + 483, + 284, + 571 + ], + "spans": [ + { + "bbox": [ + 50, + 483, + 284, + 571 + ], + "type": "image", + "image_path": "43c4ebd9f38166d8e2c76d98ad88d6c9be7b273fb8040d09c8e0c49ee1277594.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 582, + 287, + 626 + ], + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 626 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 626 + ], + "type": "text", + "content": "Figure 6. Qualitative comparisons with NeAT [27], NeuralUDF [25] and NeUDF [23] on some example data released by NeUDF [23]. Note that NeAT cannot reconstruct \"plant\" dataset because the ground truth mask for \"plant\" is unavailable." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 647, + 237, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 647, + 237, + 659 + ], + "spans": [ + { + "bbox": [ + 47, + 647, + 237, + 659 + ], + "type": "text", + "content": "4.2. Comparisons on Watertight Models" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 665, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 712 + ], + "type": "text", + "content": "Other methods can also be used as the first stage of our 2S- UDF. We use NeUDF for the first stage training on the DTU dataset [19]. As detailed in Table 2, we compare the Chamfer distances of the reconstruction results with NeuralUDF," + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 309, + 70, + 543, + 125 + ], + "blocks": [ + { + "bbox": [ + 309, + 70, + 543, + 125 + ], + "lines": [ + { + "bbox": [ + 309, + 70, + 543, + 125 + ], + "spans": [ + { + "bbox": [ + 309, + 70, + 543, + 125 + ], + "type": "image", + "image_path": "625df05cda1eda0a5e170c400280861942e31769d997e9d5fe339a30328e2fd3.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 308, + 127, + 538, + 174 + ], + "blocks": [ + { + "bbox": [ + 308, + 127, + 538, + 174 + ], + "lines": [ + { + "bbox": [ + 308, + 127, + 538, + 174 + ], + "spans": [ + { + "bbox": [ + 308, + 127, + 538, + 174 + ], + "type": "image", + "image_path": "f475c8258dde381aa4725b94c3d0d8d675c4cf3f4e724f95df61deb96e55b5b8.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 309, + 175, + 537, + 217 + ], + "blocks": [ + { + "bbox": [ + 309, + 175, + 537, + 217 + ], + "lines": [ + { + "bbox": [ + 309, + 175, + 537, + 217 + ], + "spans": [ + { + "bbox": [ + 309, + 175, + 537, + 217 + ], + "type": "image", + "image_path": "0c5a010e174d1de796f8b345f9f3dbc0f36961289e5f10773f5eb798b2dda984.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 309, + 220, + 538, + 265 + ], + "blocks": [ + { + "bbox": [ + 309, + 220, + 538, + 265 + ], + "lines": [ + { + "bbox": [ + 309, + 220, + 538, + 265 + ], + "spans": [ + { + "bbox": [ + 309, + 220, + 538, + 265 + ], + "type": "image", + "image_path": "5f6ce4e47ba03086b925e6e9a32a8198afad462ab494e647eede489bfa366ce8.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 309, + 266, + 537, + 311 + ], + "blocks": [ + { + "bbox": [ + 309, + 266, + 537, + 311 + ], + "lines": [ + { + "bbox": [ + 309, + 266, + 537, + 311 + ], + "spans": [ + { + "bbox": [ + 309, + 266, + 537, + 311 + ], + "type": "image", + "image_path": "e1bbcb01c969473cdd14228cef52d5c8678e3e921f30ed07c5c0b5a1d379b533.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 322, + 545, + 389 + ], + "lines": [ + { + "bbox": [ + 305, + 322, + 545, + 389 + ], + "spans": [ + { + "bbox": [ + 305, + 322, + 545, + 389 + ], + "type": "text", + "content": "Figure 7. Qualitative comparisons with NeAT, NeuralUDF and NeUDF on the DTU [19] dataset and close-up comparisons against NeUDF. Our method can reconstruct surfaces closer to the ground truth point clouds in various places such as the marked region, generally improving the reconstruction accuracy of NeUDF by around " + }, + { + "bbox": [ + 305, + 322, + 545, + 389 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 305, + 322, + 545, + 389 + ], + "type": "text", + "content": ", on a par with NeuralUDF and NeAT at the bottom two rows." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 410, + 545, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 410, + 545, + 638 + ], + "spans": [ + { + "bbox": [ + 304, + 410, + 545, + 638 + ], + "type": "text", + "content": "NeAT and NeUDF without our second-stage training. SDFs generally excel at learning watertight models, and it is worth pointing out that NeuralUDF takes the absolute value of the output of MLP as the UDF value of a given point. Therefore for closed models, they can easily learn an SDF and take its absolute value to produce a UDF. NeAT, on the other hand, explicitly learns an SDF. NeUDF and our method truly learn UDFs. While UDF learning is much more complicated than SDF learning because the UDF gradient nearby 0 is blurry and the gradient is not available at 0, our method still improves the reconstruction quality of NeUDF by around " + }, + { + "bbox": [ + 304, + 410, + 545, + 638 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 304, + 410, + 545, + 638 + ], + "type": "text", + "content": " as shown in Figure 7. We further provide a close-up view of specific parts of the models for detailed comparisons in Figure 7. These local callouts exhibit the ground truth points located on both sides of our surfaces, whereas most of the points are only on one side of the surfaces of NeUDF. These illustrate our reconstructed surfaces are closer to the ground truth points and thus improving the resulting quality over NeUDF, on a par with NeuralUDF and NeAT." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 646, + 406, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 646, + 406, + 658 + ], + "spans": [ + { + "bbox": [ + 306, + 646, + 406, + 658 + ], + "type": "text", + "content": "4.3. Ablation Studies" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 665, + 545, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 665, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 305, + 665, + 545, + 700 + ], + "type": "text", + "content": "In this section, we present main ablation studies. We refer interested readers to the supplementary material for additional ablation studies." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 318, + 701, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 701, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 318, + 701, + 545, + 713 + ], + "type": "text", + "content": "Effect of the two-stage training. We conduct an ablation" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5090" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 70, + 288, + 171 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 288, + 171 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 288, + 171 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 288, + 171 + ], + "type": "table", + "html": "
Method#1#7#8LS-D0
S1 & S24.552.883.212.46
S17.222.463.386.04
S25.754.005.963.65
MethodNS-D1LS-C1DTU 114DTU 122
S1 & S21.472.140.410.51
S11.466.230.590.62
S21.642.980.630.60
", + "image_path": "fccdf2f8004311c88d16e956b8ba4ca5b13a0550885a50583c39c822ce1b3b15.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 178, + 288, + 234 + ], + "lines": [ + { + "bbox": [ + 46, + 178, + 288, + 234 + ], + "spans": [ + { + "bbox": [ + 46, + 178, + 288, + 234 + ], + "type": "text", + "content": "Table 3. Chamfer distances of models learned by both Stage 1 and 2 (S1 & S2), only Stage 1 (S1) and only Stage 2 (S2) on selected datasets. Models learned by two stages yield similar Chamfer distances, but when trained with only Stage 1 or Stage 2, the Chamfer distances generally become significantly higher." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 255, + 287, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 255, + 287, + 326 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 287, + 326 + ], + "type": "text", + "content": "study on the effect of the two-stage learning. We compare the Chamfer distances among both two stages, only Stage 1 and only Stage 2 training, shown in Table 3. Our results show that two-stage training improves the Chamfer distance (lower is better) compared to training with only Stage 1 or 2, under most circumstances." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 327, + 287, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 327, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 46, + 327, + 287, + 399 + ], + "type": "text", + "content": "It should be noted that training by the second stage from scratch is also capable of generating a generally reasonable result. However, the Chamfer distances, as shown in Table 3, indicate that its learning ability is limited. Therefore, the second refinement learning stage should cooperate with the first coarse learning stage to generate the best results." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "spans": [ + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "text", + "content": "Choice of accumulated weight threshold " + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\delta_{thres}" + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "text", + "content": ". In Stage 2, being a ray truncate point requires the accumulated weight up until that point to be greater than " + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\delta_{thres}" + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "text", + "content": ", where we intuitively select " + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\delta_{thres} = 0.5" + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "text", + "content": ". Figure 8 shows the reconstruction results for other choices of " + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\delta_{thres}" + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "text", + "content": ", namely 0.3 and 0.7, respectively. We observe that all threshold choices successfully reconstruct the model. Setting the threshold " + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\delta_{thres}" + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "text", + "content": " up to 0.7 produces visually similar results. Setting the threshold " + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\delta_{thres}" + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "text", + "content": " down to 0.3 also works fine generally despite that it may introduce more holes to the reconstructed meshes. We deduce that setting a lower threshold increases the possibility that a ray may be truncated prematurely, leading to less desirable results. Nevertheless, we still have a considerable range of " + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\delta_{thres}" + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "text", + "content": " from 0.3 to 0.7 without major result regression, indicating that our Stage 2 training exhibits robustness against " + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\delta_{thres}" + }, + { + "bbox": [ + 46, + 399, + 287, + 591 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 599, + 125, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 599, + 125, + 609 + ], + "spans": [ + { + "bbox": [ + 47, + 599, + 125, + 609 + ], + "type": "text", + "content": "4.4. Limitations" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 617, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 287, + 715 + ], + "type": "text", + "content": "Since the light is cut off after going through a layer of surface, our method relinquishes the ability to model planes with transparency. Occasionally, due to learning uncertainty, the Chamfer distance may increase slightly in the second stage, but the difference is quite small without visual impact. Overall, the two-stage learning improves the quality significantly. For watertight models, SDF learning is more suitable than UDF learning, since UDF learning is" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 338, + 71, + 516, + 201 + ], + "blocks": [ + { + "bbox": [ + 338, + 71, + 516, + 201 + ], + "lines": [ + { + "bbox": [ + 338, + 71, + 516, + 201 + ], + "spans": [ + { + "bbox": [ + 338, + 71, + 516, + 201 + ], + "type": "image", + "image_path": "769c7f1b165581d56d8ee2b70ab1418a2cfb555702c6ff80bad0ce9640a5c48a.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 209, + 545, + 254 + ], + "lines": [ + { + "bbox": [ + 305, + 209, + 545, + 254 + ], + "spans": [ + { + "bbox": [ + 305, + 209, + 545, + 254 + ], + "type": "text", + "content": "Figure 8. Qualitative comparisons on different choices of accumulated weight " + }, + { + "bbox": [ + 305, + 209, + 545, + 254 + ], + "type": "inline_equation", + "content": "\\delta_{\\text{thres}}" + }, + { + "bbox": [ + 305, + 209, + 545, + 254 + ], + "type": "text", + "content": ". Setting a higher threshold works well few little visual differences; Setting a lower threshold generally works fine, but may introduce more holes in reconstructed meshes." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 278, + 545, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 278, + 545, + 374 + ], + "spans": [ + { + "bbox": [ + 304, + 278, + 545, + 374 + ], + "type": "text", + "content": "more complicated than SDF learning. We still advise using SDF learning, e.g., NeuS [34], HF-NeuS [36] or PET-NeuS [37], for watertight model reconstruction. Also, the mesh extraction of MeshUDF [15] tends to generate holes and \"staircase\" artifacts affecting the mesh reconstruction quality. Adopting a more robust extraction method, e.g., DoubleCoverUDF [18], could alleviate the problem, but we use MeshUDF here for all methods for a fair comparison." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 390, + 383, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 390, + 383, + 403 + ], + "spans": [ + { + "bbox": [ + 306, + 390, + 383, + 403 + ], + "type": "text", + "content": "5. Conclusions" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 412, + 546, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 412, + 546, + 579 + ], + "spans": [ + { + "bbox": [ + 304, + 412, + 546, + 579 + ], + "type": "text", + "content": "Overall, 2S-UDF offers a promising approach to the problem of reconstructing both open and watertight models from multi-view images. Its advantages over existing methods lie in the use of a simple and more accurate density function, and a smooth differentiable UDF representation, so that the learned UDF approximates the ground truth as much as possible. A two-stage learning strategy further eliminates bias and improves UDF accuracy. Results from our experiments on the DeepFashion3D, DTU and BlendedMVS datasets demonstrate the effectiveness of our method, particularly in learning smooth and stably open UDFs revealing the robustness of 2S-UDF. Moreover, our method does not rely on object masks for open model reconstruction, making it more practical in real-world applications." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 596, + 403, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 596, + 403, + 609 + ], + "spans": [ + { + "bbox": [ + 306, + 596, + 403, + 609 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "This project was supported in part by the National Natural Science Foundation of China under Grants (61872347, 62072446), in part by the National Key R&D Program of China under Grant 2023YFB3002901, in part by the Basic Research Project of ISCAS under Grant ISCAS-JCMS-202303 and in part by the Ministry of Education, Singapore, under its Academic Research Fund Grants (MOET2EP20220-0005, RG20/20 & RT19/22)." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "type": "text", + "content": "5091" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Dejan Azinović, Ricardo Martin-Brualla, Dan B Goldman, Matthias Nießner, and Justus Thies. Neural RGB-D Surface Reconstruction. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6280-6291, 2022. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 287, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 287, + 179 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 287, + 179 + ], + "type": "text", + "content": "[2] Fausto Bernardini, Joshua Mittleman, Holly Rushmeier, Cláudio Silva, and Gabriel Taubin. The ball-pivoting algorithm for surface reconstruction. IEEE Trans. Vis. Comput. Graph., 5(4):349-359, 1999. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 182, + 286, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 182, + 286, + 214 + ], + "spans": [ + { + "bbox": [ + 53, + 182, + 286, + 214 + ], + "type": "text", + "content": "[3] A. Broadhurst, T.W. Drummond, and R. Cipolla. A probabilistic framework for space carving. In Int. Conf. Comput. Vis., pages 388-393 vol.1, 2001. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 216, + 286, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 216, + 286, + 270 + ], + "spans": [ + { + "bbox": [ + 53, + 216, + 286, + 270 + ], + "type": "text", + "content": "[4] Rohan Chabra, Jan E. Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep Local Shapes: Learning Local SDF Priors for Detailed 3D Reconstruction. In Eur. Conf. Comput. Vis., pages 608-625, Cham, 2020. Springer International Publishing. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 272, + 286, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 272, + 286, + 316 + ], + "spans": [ + { + "bbox": [ + 53, + 272, + 286, + 316 + ], + "type": "text", + "content": "[5] Julian Chibane, Thiemo Alldieck, and Gerard Pons-Moll. Implicit Functions in Feature Space for 3D Shape Reconstruction and Completion. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6968-6979, 2020. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 318, + 286, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 318, + 286, + 361 + ], + "spans": [ + { + "bbox": [ + 53, + 318, + 286, + 361 + ], + "type": "text", + "content": "[6] Julian Chibane, Mohamad Aymen mir, and Gerard Pons-Moll. Neural Unsigned Distance Fields for Implicit Function Learning. In Adv. Neural Inform. Process. Syst., pages 21638-21652. Curran Associates, Inc., 2020. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 363, + 286, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 363, + 286, + 407 + ], + "spans": [ + { + "bbox": [ + 53, + 363, + 286, + 407 + ], + "type": "text", + "content": "[7] François Darmon, Bénédicte Bascle, Jean-Clement Devaux, Pascal Monasse, and Mathieu Aubry. Improving neural implicit surfaces geometry with patch warping. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6250-6259, 2022. 1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 408, + 286, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 408, + 286, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 408, + 286, + 441 + ], + "type": "text", + "content": "[8] J. De Bonet and P. Viola. Roxels: responsibility weighted 3D volume reconstruction. In Int. Conf. Comput. Vis., pages 418-425 vol.1, 1999. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 443, + 286, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 443, + 286, + 487 + ], + "spans": [ + { + "bbox": [ + 53, + 443, + 286, + 487 + ], + "type": "text", + "content": "[9] Charles Dugas, Yoshua Bengio, François Bélisle, Claude Nadeau, and René Garcia. Incorporating Second-Order Functional Knowledge for Better Option Pricing. In Adv. Neural Inform. Process. Syst. MIT Press, 2000. 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 488, + 286, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 488, + 286, + 542 + ], + "spans": [ + { + "bbox": [ + 48, + 488, + 286, + 542 + ], + "type": "text", + "content": "[10] Qiancheng Fu, Qingshan Xu, Yew Soon Ong, and Wenbing Tao. Geo-Neus: Geometry-Consistent Neural Implicit Surfaces Learning for Multi-view Reconstruction. In Adv. Neural Inform. Process. Syst., pages 3403–3416. Curran Associates, Inc., 2022. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 544, + 286, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 286, + 576 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 286, + 576 + ], + "type": "text", + "content": "[11] Kunihiko Fukushima. Cognitron: a self-organizing multilayered neural network. Biological Cybernetics, 20(3-4):121-136, 1975. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 578, + 286, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 286, + 611 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 286, + 611 + ], + "type": "text", + "content": "[12] Yasutaka Furukawa and Carlos Hernández. Multi-View Stereo: A Tutorial. Found. Trends. Comput. Graph. Vis., 9 (1-2):1-148, 2015. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 613, + 286, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 286, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 286, + 656 + ], + "type": "text", + "content": "[13] Silvano Galliani, Katrin Lasinger, and Konrad Schindler. Massively Parallel Multiview Stereopsis by Surface Normal Diffusion. In Int. Conf. Comput. Vis., pages 873-881, 2015. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 658, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 286, + 712 + ], + "type": "text", + "content": "[14] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit Geometric Regularization for Learning Shapes. In Proceedings of the 37th International Conference on Machine Learning, pages 3789-3799. PMLR, 2020. 5" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[15] Benoit Guillard, Federico Stella, and Pascal Fua. MeshUDF: Fast and Differentiable Meshing of Unsigned Distance Field Networks. In Eur. Conf. Comput. Vis., pages 576-592, Cham, 2022. Springer Nature Switzerland. 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "type": "text", + "content": "[16] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, 2 edition, 2004. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 152, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 194 + ], + "type": "text", + "content": "[17] Fei Hou, Chiyu Wang, Wencheng Wang, Hong Qin, Chen Qian, and Ying He. Iterative poisson surface reconstruction (iPSR) for unoriented points. ACM Trans. Graph., 41(4), 2022. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 197, + 545, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 545, + 240 + ], + "type": "text", + "content": "[18] Fei Hou, Xuhui Chen, Wencheng Wang, Hong Qin, and Ying He. Robust Zero Level-Set Extraction from Unsigned Distance Fields Based on Double Covering. ACM Trans. Graph., 42(6), 2023. 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 243, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 243, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 243, + 545, + 285 + ], + "type": "text", + "content": "[19] Rasmus Jensen, Anders Dahl, George Vogiatzis, Engil Tola, and Henrik Aanæs. Large Scale Multi-view Stereopsis Evaluation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 406-413, 2014. 2, 5, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 287, + 545, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 287, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 307, + 287, + 545, + 331 + ], + "type": "text", + "content": "[20] Mengqi Ji, Jinzhi Zhang, Qionghai Dai, and Lu Fang. SurfaceNet+: An End-to-end 3D Neural Network for Very Sparse Multi-View Stereopsis. IEEE Trans. Pattern Anal. Mach. Intell., 43(11):4078-4093, 2021. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 332, + 545, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 332, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 307, + 332, + 545, + 365 + ], + "type": "text", + "content": "[21] Abhishek Kar, Christian Hane, and Jitendra Malik. Learning a Multi-View Stereo Machine. In Adv. Neural Inform. Process. Syst. Curran Associates, Inc., 2017. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 366, + 545, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 366, + 545, + 388 + ], + "spans": [ + { + "bbox": [ + 307, + 366, + 545, + 388 + ], + "type": "text", + "content": "[22] Michael Kazhdan and Hugues Hoppe. Screenedoisson surface reconstruction. ACM Trans. Graph., 32(3), 2013. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 389, + 545, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 389, + 545, + 443 + ], + "spans": [ + { + "bbox": [ + 307, + 389, + 545, + 443 + ], + "type": "text", + "content": "[23] Yu-Tao Liu, Li Wang, Jie Yang, Weikai Chen, Xiaoxu Meng, Bo Yang, and Lin Gao. NeUDF: Leaning Neural Unsigned Distance Fields with Volume Rendering. In IEEE Conf. Comput. Vis. Pattern Recog., pages 237-247, 2023. 1, 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 445, + 545, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 445, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 307, + 445, + 545, + 499 + ], + "type": "text", + "content": "[24] Xiaoxiao Long, Cheng Lin, Peng Wang, Taku Komura, and Wenping Wang. SparseNeuS: Fast Generalizable Neural Surface Reconstruction from Sparse Views. In Eur. Conf. Comput. Vis., pages 210-227, Cham, 2022. Springer Nature Switzerland. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 502, + 545, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 502, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 307, + 502, + 545, + 567 + ], + "type": "text", + "content": "[25] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Yuan Liu, Peng Wang, Christian Theobalt, Taku Komura, and Wenping Wang. NeuralUDF: Learning Unsigned Distance Fields for Multi-View Reconstruction of Surfaces with Arbitrary Topologies. In IEEE Conf. Comput. Vis. Pattern Recog., pages 20834–20843, 2023. 1, 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 568, + 545, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 568, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 307, + 568, + 545, + 633 + ], + "type": "text", + "content": "[26] Baorui Ma, Zhizhong Han, Yu-Shen Liu, and Matthias Zwicker. Neural-Pull: Learning Signed Distance Function from Point clouds by Learning to Pull Space onto Surface. In Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, pages 7246-7257. PMLR, 2021. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 635, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 545, + 678 + ], + "type": "text", + "content": "[27] Xiaoxu Meng, Weikai Chen, and Bo Yang. NeAT: Learning Neural Implicit Surfaces with Arbitrary Topologies from Multi-View Images. In IEEE Conf. Comput. Vis. Pattern Recog., pages 248–258, 2023. 2, 5, 6, 7" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "text", + "content": "[28] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy Networks: Learning 3D Reconstruction in Function Space. In" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5092" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 72, + 286, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 286, + 94 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 286, + 94 + ], + "type": "text", + "content": "IEEE Conf. Comput. Vis. Pattern Recog., pages 4455-4465, 2019. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 151 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 151 + ], + "type": "text", + "content": "[29] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In Eur. Conf. Comput. Vis., pages 405-421, Cham, 2020. Springer International Publishing. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 287, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 287, + 206 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 287, + 206 + ], + "type": "text", + "content": "[30] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning Continuous Signed Distance Functions for Shape Representation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 165-174, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 209, + 287, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 209, + 287, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 209, + 287, + 262 + ], + "type": "text", + "content": "[31] Johannes L. Schonberger, Enliang Zheng, Jan-Michael Frahm, and Marc Pollefeys. Pixelwise View Selection for Unstructured Multi-View Stereo. In Eur. Conf. Comput. Vis., pages 501–518, Cham, 2016. Springer International Publishing. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 264, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 264, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 264, + 287, + 319 + ], + "type": "text", + "content": "[32] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit Neural Representations with Periodic Activation Functions. In Adv. Neural Inform. Process. Syst., pages 7462-7473. Curran Associates, Inc., 2020. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 321, + 287, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 321, + 287, + 364 + ], + "spans": [ + { + "bbox": [ + 48, + 321, + 287, + 364 + ], + "type": "text", + "content": "[33] Jiaming Sun, Yiming Xie, Linghao Chen, Xiaowei Zhou, and Hujun Bao. NeuralRecon: Real-Time Coherent 3D Reconstruction from Monocular Video. In IEEE Conf. Comput. Vis. Pattern Recog., pages 15593-15602, 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 365, + 287, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 287, + 420 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 287, + 420 + ], + "type": "text", + "content": "[34] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. NeuS: Learning Neural Implicit Surfaces by Volume Rendering for Multi-view Reconstruction. In Adv. Neural Inform. Process. Syst., pages 27171-27183. Curran Associates, Inc., 2021. 1, 2, 5, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 422, + 287, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 422, + 287, + 465 + ], + "spans": [ + { + "bbox": [ + 48, + 422, + 287, + 465 + ], + "type": "text", + "content": "[35] Yifan Wang, Lukas Rahmann, and Olga Sorkine-Hornung. Geometry-Consistent Neural Shape Representation with Implicit Displacement Fields. In Int. Conf. Learn. Represent. OpenReview.net, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 468, + 287, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 468, + 287, + 520 + ], + "spans": [ + { + "bbox": [ + 48, + 468, + 287, + 520 + ], + "type": "text", + "content": "[36] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. HF-NeuS: Improved Surface Reconstruction Using High-Frequency Details. In Adv. Neural Inform. Process. Syst., pages 1966–1978. Curran Associates, Inc., 2022. 1, 2, 3, 5, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 523, + 287, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 523, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 523, + 287, + 567 + ], + "type": "text", + "content": "[37] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. PETNeuS: Positional Encoding Tri-Planes for Neural Surfaces. In IEEE Conf. Comput. Vis. Pattern Recog., pages 12598–12607, 2023. 2, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 568, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 568, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 568, + 287, + 612 + ], + "type": "text", + "content": "[38] Yao Yao, Zixin Luo, Shiwei Li, Tianwei Shen, Tian Fang, and Long Quan. Recurrent MVSNet for High-Resolution Multi-View Stereo Depth Inference. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5520–5529, 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 614, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 287, + 667 + ], + "type": "text", + "content": "[39] Yao Yao, Zixin Luo, Shiwei Li, Jingyang Zhang, Yufan Ren, Lei Zhou, Tian Fang, and Long Quan. BlendedMVS: A Large-Scale Dataset for Generalized Multi-View Stereo Networks. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1787–1796, 2020. 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "text", + "content": "[40] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume Rendering of Neural Implicit Surfaces. In Adv. Neural Inform. Process. Syst., pages 4805-4815. Curran Associates, Inc., 2021. 1, 2, 5" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 251 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "text", + "content": "[41] Fang Zhao, Wenhao Wang, Shengcai Liao, and Ling Shao. Learning Anchored Unsigned Distance Functions with Gradient Direction Alignment for Single-view Garment Reconstruction. In Int. Conf. Comput. Vis., pages 12654-12663, 2021. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 129, + 545, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 183 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 183 + ], + "type": "text", + "content": "[42] Junsheng Zhou, Baorui Ma, Yu-Shen Liu, Yi Fang, and Zhizhong Han. Learning Consistency-Aware Unsigned Distance Functions Progressively from Raw Point Clouds. In Adv. Neural Inform. Process. Syst., pages 16481-16494. Curran Associates, Inc., 2022. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 185, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 185, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 308, + 185, + 545, + 251 + ], + "type": "text", + "content": "[43] Heming Zhu, Yu Cao, Hang Jin, Weikai Chen, Dong Du, Zhangye Wang, Shuguang Cui, and Xiaoguang Han. Deep Fashion3D: A Dataset and Benchmark for 3D Garment Reconstruction from Single Images. In Eur. Conf. Comput. Vis., pages 512-530, Cham, 2020. Springer International Publishing. 2, 5, 6" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5093" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_content_list.json b/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..9d15fde5f86dc47c42e3f9a0815872bb7898fc39 --- /dev/null +++ b/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_content_list.json @@ -0,0 +1,1360 @@ +[ + { + "type": "text", + "text": "$360 + x$ : A Panoptic Multi-modal Scene Understanding Dataset", + "text_level": 1, + "bbox": [ + 171, + 130, + 797, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hao Chen Yuqi Hou Chenyuan Qu Irene Testini Xiaohan Hong Jianbo Jiao", + "bbox": [ + 142, + 180, + 825, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The Machine Intelligence $+x$ Group, University of Birmingham, UK", + "bbox": [ + 210, + 205, + 756, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project page: https://x360dataset.github.io/", + "bbox": [ + 310, + 229, + 653, + 248 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 282, + 313, + 299 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Human perception of the world is shaped by a multitude of viewpoints and modalities. While many existing datasets focus on scene understanding from a certain perspective (e.g. egocentric or third-person views), our dataset offers a panoptic perspective (i.e. multiple viewpoints with multiple data modalities). Specifically, we encapsulate third-person panoramic and front views, as well as egocentric monocular/binocular views with rich modalities including video, multi-channel audio, directional binaural delay, location data and textual scene descriptions within each scene captured, presenting comprehensive observation of the world. To the best of our knowledge, this is the first database that covers multiple viewpoints with multiple data modalities to mimic how daily information is accessed in the real world. Through our benchmark analysis, we presented 5 different scene understanding tasks on the proposed $360 + x$ dataset to evaluate the impact and benefit of each data modality and perspective in panoptic scene understanding. We hope this unique dataset could broaden the scope of comprehensive scene understanding and encourage the community to approach these problems from more diverse perspectives.", + "bbox": [ + 75, + 316, + 472, + 633 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 661, + 209, + 678 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Scene understanding is crucial for robotics and artificial intelligent systems to perceive the environment around them. As humans, we intuitively understand the world through primarily visual inputs, as well as auditory and other sensory inputs (e.g. touch and smell). The community has made remarkable progress in mimicking human perception with contributions from various datasets and benchmarks [4, 5, 7, 9, 13, 15, 23]. These efforts have approached scene understanding from a diverse range of perspectives, such as normal frontal-view vision [5, 13, 23], panoramic view [22, 28], binocular/stereo view [20, 30], egocentric monocular view [4, 9], and audio [2, 7].", + "bbox": [ + 75, + 686, + 468, + 868 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While there has been exciting progress in understanding scenes from a limited number of perspectives, it is notable", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "that humans understand the world by incorporating a combination of viewpoints, in a holistic manner. This includes an egocentric view for activities we are involved in and a third-person view for activities we are observing. In addition to visual cues, we also rely on a range of modalities, including hearing and binaural delay, to fully comprehend our surroundings and track movements. Our prior knowledge of the scene, such as localisation information and scene descriptions, has also supported our understanding of the environment (e.g. the cafe in the city centre may be different from a similar cafe on a university campus).", + "bbox": [ + 496, + 284, + 893, + 450 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Taking the above observations into consideration, a new dataset covering all these aforementioned aspects is presented in this work, to provide a panoptic scene understanding, termed $360 + x$ dataset. This new dataset offers a diverse selection of perspectives, including a $360^{\\circ}$ panoramic view providing a complete panoptic view of the environment, and a third-person front view that highlights the region of interest that has the most movements in front of the camera. Additionally, we have included egocentric monocular and binocular videos to capture the first-person perspective of individuals in the environment. These viewpoints are complemented by aligned multi-channel audio with directional binaural delay information, as well as location information and scene descriptions as metadata. An illustration of the presented dataset collection system is shown in Figure 1.", + "bbox": [ + 496, + 454, + 893, + 681 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Based on this newly collected dataset, we perform 5 visual-audio scene understanding tasks to analyse the contribution and effectiveness of each data viewpoint and modality. Particularly, we look at video classification, temporal action localisation, self-supervised representation learning, cross-modality retrieval and pre-training model migration for dataset adaptation, with interesting findings and insights from extensive experimental analysis. The main contributions of this work are summarised as follows:", + "bbox": [ + 496, + 684, + 893, + 820 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose to our knowledge the first and probably the most authentic panoptic scene understanding dataset covering multiple viewpoints and data modalities in the wild.", + "- We perform extensive experimental analysis to validate the effectiveness of the proposed dataset on different tasks" + ], + "bbox": [ + 500, + 825, + 890, + 898 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "19373", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/cff7a8284de8415f6a93cc4e75a54533058f0cdc1222c6e3ea0e80912a4ca199.jpg", + "image_caption": [ + "Figure 1. Illustration of the proposed $360 + x$ dataset. The $360^{\\circ}$ camera records fish-eye raw videos with front and back lenses. These videos are merged to create a spherical $360^{\\circ}$ panorama (middle-up figure, zoom in for details), which is then transformed to (a) $360^{\\circ}$ panoramic data using equirectangular projection. The (b) third-person front view is obtained by de-warping the rich movements region highlighted red in the spherical field of $360^{\\circ}$ panorama (the middle-left figure). By wearing stereo cameras, the capturers record (c) egocentric clips while staying visible to the fixed $360^{\\circ}$ camera (central ellipse). (e) Directional audio time delay data is generated from left and right audio inputs (d) from the $360^{\\circ}$ camera by interaural time delay process [3]. This helps locate sound sources in the $360^{\\circ}$ panorama." + ], + "image_footnote": [], + "bbox": [ + 81, + 88, + 893, + 440 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "from various perspectives and modalities.", + "bbox": [ + 89, + 573, + 369, + 587 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Interesting findings are derived from the analysis, suggesting the effectiveness of each viewpoint and data modality. Learning from this new dataset without supervision even shows a better performance than that from a model trained in a supervised manner.", + "bbox": [ + 76, + 588, + 470, + 662 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 76, + 678, + 227, + 694 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Video understanding and analysis. Video analysis has been extensively studied in the literature. Existing datasets such as UCF101 [23], ActivityNet [5] and Kinetics [13] have provided large-scale video data for activity understanding tasks. However, these datasets often exhibit lower complexity compared to real-world scenes. Some datasets, like MultiThumos [31], aim to increase complexity but are limited to specific scenarios with domain-specific actions, deviating from real-life daily activities. In contrast, our dataset builds upon the activity labels from ActivityNet [5] and strives to capture data that closely simulates real-life scenarios. Apart from that, we also include multiple data viewpoints and modalities as compared to existing datasets.", + "bbox": [ + 75, + 703, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Panoramic scene understanding. In recent years, panoramic scene understanding has gained significant attention due to its holistic reflection of the environment. Several datasets have been introduced to facilitate research in this area. For instance, the KITTI-360 [16] provides a collection of panoramic images for urban scene analysis. EGOK360 [1] has been introduced to address the need for video data with a panoramic view. Im2Pano3D [22] presents a panoramic dataset for indoor scenarios with semantic segmentation and focuses on the prediction from a partial observation. However, these datasets primarily focus on panoramic visual data while lacking the incorporation of other viewpoints (e.g. egocentric) and data modalities (e.g. audio), limiting their potential for comprehensive scene understanding and analysis.", + "bbox": [ + 496, + 571, + 893, + 800 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Egocentric video analysis. Focusing on understanding scenes from a first-person perspective, existing datasets such as EPIC-Kitchens [4] and Ego4D [9] provide egocentric video data collected during daily activities. They have contributed to research on activity recognition and object detection in egocentric scenes. Unlike these datasets fo", + "bbox": [ + 496, + 809, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "19374", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "cusing on egocentric views, our dataset also covers other viewpoints and modalities aiming at supporting scene understanding research in a more panoptic manner.", + "bbox": [ + 76, + 90, + 468, + 137 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Visual-audio analysis. Integrating visual and audio information often enhances the performance of models in scene understanding tasks, as it provides richer contextual information. There are some existing datasets available to support research in audio-visual analysis, e.g. AVA [10], AudioSet [6] and VGGSound [2], to name a few. However, these datasets are lacking in multiple viewpoints and the directional property of audio signals, which are provided in the proposed new dataset.", + "bbox": [ + 76, + 143, + 468, + 280 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. $360 + x$ Dataset", + "text_level": 1, + "bbox": [ + 76, + 294, + 220, + 309 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Data Acquisition and Alignment", + "text_level": 1, + "bbox": [ + 76, + 318, + 361, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Two main devices were used for our data collection: the Insta 360 One X2 and Snapchat Spectacles 3 cameras. The 360 One X2 has two fish-eye cameras that collect $360^{\\circ}$ panoramic visual information in the scene with $5760 \\times 2880$ resolution and a frame rate of 25 FPS. Additionally, directional audio was recorded using four microphones in directional audio mode. While the Spectacles 3 has a stereo camera attached to a pair of glasses used to capture the egocentric binocular vision within the scene at a resolution of $2432 \\times 1216$ and a frame rate of 60 FPS.", + "bbox": [ + 76, + 340, + 468, + 491 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Once we obtained the raw data, we aligned the different viewpoints and modalities through a specific process. The initial raw footage captured by the two fish-eye cameras on the $360^{\\circ}$ camera was in the form of two circular videos, which were then stitched and de-warped into a spherical panorama. This panorama can be projected into an equirectangular format to produce a panoramic video. However, this direct compression of the spherical view into a rectangular format can introduce unnatural distortions. In order to provide a more natural and informative view, we inversely project a rectangular region into equirectangular space and use it to crop the spherical panorama. We use optical flow to determine the crop region with the most motion activity in the spherical panorama field. This crop region is then projected back to rectangular, resulting in an informative video view with minimal distortions.", + "bbox": [ + 75, + 492, + 468, + 733 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Egocentric binocular videos, as shown in Figure 1(c), were captured ranging from approximately 30 seconds to 1 minute in duration for each clip. A total of 1 to 5 stereo clips were recorded, scattered throughout the duration of the average 6 mins $360^{\\circ}$ video. In addition to stereo videos, we also provide the corresponding monocular videos for the egocentric view.", + "bbox": [ + 76, + 734, + 468, + 839 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The audio recordings were temporally aligned with their corresponding videos with left/right channel modality. The four-channel audios with the $360^{\\circ}$ panoramic video are provided as well for further exploration. Moreover, we also", + "bbox": [ + 76, + 839, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "provide the directional information of the audio which was presented using the estimated interaural time delay of the sound obtained from the method introduced in [3]. The GPS information and weather information were also provided.", + "bbox": [ + 496, + 90, + 890, + 151 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given the possibility of occlusions in regions visible to the egocentric camera but not to the $360^{\\circ}$ camera, we ensured during data collection that the cameras were positioned in close proximity. This setup, with clear mutual visibility, allowed both cameras to capture a similar overall scene.", + "bbox": [ + 496, + 151, + 890, + 241 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Scene Selection", + "text_level": 1, + "bbox": [ + 500, + 252, + 653, + 267 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To broaden scene coverage and promote multi-modal collaborative learning, we integrated a strategic selection process for captured scenes, governed by three key criteria:", + "bbox": [ + 496, + 276, + 890, + 321 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "i) Scene categories must be carefully crafted to be comprehensive, yet concise, while also being authoritative and reflective of everyday life. The location where a scene unfolds plays a crucial role in providing essential environmental context to the activities within it [17]. Distinct scenes can impart unique meanings or emotional nuances to identical events. For instance, the act of chatting could convey divergent implications in a school setting as compared to a home environment. Such nuances are critical as they offer deeper insights into the contextual interpretation of behaviours and interactions in varied settings.", + "bbox": [ + 496, + 321, + 890, + 487 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ii) The data should ideally span a wide array of weather and lighting conditions. This criterion aims to ensure the inclusion of both indoor and outdoor activities under various environmental scenarios. Such diversity is important in accurately representing the multifaceted nature of daily life and the various conditions in which these activities occur.", + "bbox": [ + 496, + 487, + 890, + 577 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "iii) Our third criterion is the inclusion of scenarios rich in distinctive sound sources, particularly those where multiple activities co-occur. It is essential for the dataset to not only visually represent these activities but also to capture the corresponding auditory elements. The goal is to present the complexity and realism of real-world environments as much as possible, marked by simultaneous and various actions and behaviours.", + "bbox": [ + 496, + 578, + 890, + 698 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "It is worth noting that our dataset was collected across several countries, including the United Kingdom (e.g. London, Birmingham, Cardiff and Jersey), France (Paris), Spain (e.g. Oviedo and Picos de Europa), China (e.g. Guangzhou and Shenzhen), and Japan (e.g. Kyoto and Osaka). During the data collection, the $360^{\\circ}$ Camera was placed statically to record the scene, while a capturer wearing the Spectacles glasses recorded first-person interactions with the scene.", + "bbox": [ + 496, + 699, + 890, + 820 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Sensitive data handling. Our dataset was collected in a real-world setting and may contain sensitive personal information (e.g. human faces). To ensure ethical and responsible research, the video capture was conducted with proper", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "19375", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/ff9d14f6190e582495ba4849c002e12299ba6bd533ce6c87f4b0afa652b81c1b.jpg", + "image_caption": [ + "(a). Distribution of the scene categories (number)." + ], + "image_footnote": [], + "bbox": [ + 40, + 85, + 354, + 277 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/df7877990d8be77e8a12fc7f8df19206e525faa0882e849874cba44e7a1b34d3.jpg", + "image_caption": [ + "(d). Distribution comparison of the number of action instances per video." + ], + "image_footnote": [], + "bbox": [ + 83, + 295, + 344, + 419 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/650c19bd920fad706ffcab352e0f1d1fc3f168472a5e5867c094a0845e440f36.jpg", + "image_caption": [ + "(b). Geographical distribution of actions." + ], + "image_footnote": [], + "bbox": [ + 364, + 84, + 640, + 273 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d8d992fb3e4b2a6a9391bb2d6502753c64cc6c931b0b97c2d2d032ea162a90ee.jpg", + "image_caption": [ + "(e). Capture time of the day." + ], + "image_footnote": [], + "bbox": [ + 367, + 294, + 547, + 430 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/8b68580db91837f64da6a68edf3c21b70b7c70d54ca34f90f9f346aca235a498.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 653, + 97, + 910, + 266 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/0a1eba5ead79a9e63953d95dda3e60b66d877b72f3bab94187dea5c4d8614333.jpg", + "image_caption": [ + "(c). Overall distribution of actions duration.", + "(f). Binaural delay per clip.", + "Figure 2. Dataset statistics analysis, on the distributions of (a) the scene category, (b) action distribution per cities, (c) temporal action instance duration, and (d) number of actions per video, (e) capturing time, (f) binaural delay per clip." + ], + "image_footnote": [], + "bbox": [ + 571, + 294, + 890, + 420 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "consent. Additionally, we have taken measures to protect privacy by anonymising the data. This includes applying a face detection mechanism to outline predicted face locations in each frame and applying blurring filters to maintain meaningful details while ensuring information security. More detailed information on our privacy protection measures can be found in the supplementary material.", + "bbox": [ + 75, + 515, + 468, + 621 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Data Annotation", + "text_level": 1, + "bbox": [ + 76, + 633, + 248, + 648 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Scene label rationale. The $360 + x$ dataset comprises a total of 28 scene categories (15 indoor scenes and 13 outdoor scenes), as illustrated in Figure 2(a). To establish comprehensive and authoritative scene categories that reflect daily life, we referred to the Places Database [34], which is derived from WordNet [18], as our primary basis. We then leverage the sophisticated semantic analysis capabilities of large language models, to conduct a thorough filtering and classification of a multitude of everyday scenes. This curation resulted in a refined set of 28 scene categories, each symbolising aspects of daily life. Simultaneously, the recordings concentrate on capturing common occurrences within conventional settings, providing a realistic depiction of everyday life. Detailed descriptions defining each category, along with discussions regarding these constraints and potential sampling biases, are presented in the supplimen", + "bbox": [ + 75, + 659, + 470, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "tary material.", + "bbox": [ + 500, + 516, + 591, + 531 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Temporal segmentation label. We also provide temporal segment labelling for the understanding of activities in the shooting scenes. We follow the activity hierarchy standard defined by ActivityNet [5], which provides a comprehensive categorisation of human activities, consisting of seven top-level categories (Personal Care, Eating and Drinking, Household, Caring and Helping, Working, Socialising and Leisure, and Sports and Exercises). To capture the diversity and granularity of activities within each category, we defined a total of 38 action instances, covering specific actions and behaviours. To ensure high-quality annotations, the temporal segmentation labelling was annotated by three experienced annotators. Each annotator independently annotated the temporal segments corresponding to the activities in the videos. To obtain a consensus, we merged the individual annotations and resolved any discrepancies according to discussion and consensus among the annotators.", + "bbox": [ + 496, + 544, + 890, + 801 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Dataset Statistics and Analysis", + "text_level": 1, + "bbox": [ + 500, + 815, + 769, + 830 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Overview. Existing publicly available datasets primarily focus on visual unimodality [4, 5, 13, 15, 23]. In contrast, our dataset introduces a novel approach by collecting different views or modalities, as presented in Table 1, including", + "bbox": [ + 496, + 839, + 892, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "19376", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/ab4b351576efb8a898937522aa6c25be7769c22977ada0c4e8bf27926d1f46e5.jpg", + "table_caption": [ + "Table 1. Dataset comparison. Ego: Egocentric, V: Video, A: Audio, A+V: Audio-visual events." + ], + "table_footnote": [], + "table_body": "
DatasetVideo ViewpointsOther ModalitiesStatisticsAttributions
Third-person Front View360° PanoramicEgo MonocularEgo BinocularNormal AudioDirectional Binaural DelayGPS InfoAvg DurationTotal Duration(s)Frames Count(K)Annotations SourceMultiple Events
UCF101 [23]XXXXX7.21 s96,0002,400VX
Kinetics [13]XXXXXX10 s2,998,80074,970VX
HMDB51 [14]XXXXXX3 s21,426643VX
ActivityNet [5]XXXXXX2 min2,332,80011,664V
EPIC-Kitchens [4]XXXXX7.6 min198,00011,500VX
Ego4D [9]XXXX8 min13,212,000-A+V
360+x (Ours)6.2 min244,0008,579A+V
", + "bbox": [ + 80, + 114, + 888, + 242 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$360^{\\circ}$ panoramic video, third-person front view video, egocentric monocular video, egocentric binocular video, normal audio, directional binaural delay, location and textual scene description. This diverse range of modalities provides multiple dimensions and clues for understanding and analysing complex scenes. Our dataset consists of 2,152 videos representing 232 data examples, with 464 videos captured using the 360 camera and the remaining 1,688 recorded with the Spectacles camera.", + "bbox": [ + 75, + 268, + 468, + 404 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Figure 2(a) presents the distribution of video counts across each of the 28 scene categories. Our dataset is characterised by a balanced distribution of data across these scenes. Notably, it diverges from conventional databases like UCF101 [23], Kinetics [13], HMDB [15], and ActivityNet [5], particularly in terms of average video duration, which is approximately 6.2 minutes. This longer duration is crucial for maintaining the integrity and coherence of actions within each scene, allowing for a comprehensive temporal analysis of the activities.", + "bbox": [ + 75, + 404, + 468, + 555 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Temporal segment label. The annotations of temporal segment labels in our dataset contribute to the fine-grained analysis of activities. We defined 38 action instances representing specific actions and behaviours. The length of each segment labelled with a specific activity varies across the dataset, as depicted in Figure 2(c). Note we acknowledge the significance of audio in accurately identifying certain actions, such as 'coughing' or 'clapping'. Therefore, our dataset combines audio information to enhance accuracy in action recognition [4, 5, 13, 15, 23], as shown in Table 1.", + "bbox": [ + 75, + 569, + 468, + 720 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Comparative complexity. Due to its realistic scene simulation, our dataset offers more complexity compared to previous datasets. This complexity arises from the diverse range of activities and interactions captured, resulting in a more challenging and realistic setting for scene understanding and activity recognition. As shown in Figure 2(d), most existing datasets, such as UCF101 [23], Kinetics [13], and HMDB51 [14], typically consist of one action instance per video. While datasets like Ego4D [9] and ActivityNet [5] have large volumes and broad coverage, they often contain a limited number of action instances per individual video.", + "bbox": [ + 75, + 734, + 468, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The HACS dataset [33] contains more multiple action instances per video but still pales in comparison to the richness of the proposed dataset. Our dataset surpasses these existing datasets in terms of the number of action instances per video, showcasing the extensive variety of activities captured. The improved complexity and richness of our dataset enable follow-up research to explore and develop more robust algorithms, pushing the boundaries of scene understanding in real-world contexts.", + "bbox": [ + 496, + 267, + 890, + 405 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Data distribution. We have ensured a balanced distribution across various dimensions, including scene categories, action instances, binaural delay, etc. Figure 2(a) depicts the scene number distribution across 28 scene categories, demonstrating a comprehensive coverage of scene categories. Notably, the dataset achieves an almost equal proportion of indoor and outdoor scenes, accounting for $54.7\\%$ and $45.3\\%$ respectively. Our dataset allows each scene to conclude multiple diverse action instances naturally, and also enables different scenes to share common action instances. Notably, in Figure 2(b), it displays the 'types of action per location' that can be observed in the geographic distribution and the diversity of the data, where the inner circle shows the location and the outer circle shows the action types captured in each location. As illustrated in Figure 2(c), the distribution of action duration shows our dataset has captured extensive and realistic human behaviours across natural scenes. One interesting observation from our dataset is the high-frequency occurrence of action 'operating phone', which contributes $17.54\\%$ of the whole duration, providing a reflection of mobile usage in modern daily life. Additionally, the dataset offers valuable directional audio to supplement visual understanding. The distribution of data capture times in the dataset corresponds with natural human activities, as shown in Figure 2(e). Human activities throughout the day are mainly concentrated during the daytime (more in the afternoon and evening). Figure 2(f) illustrates the diversity of binaural delay for each clip. The positive point means the audio is directed towards the left direction while the negative the right. In summary, the presented $360 + x$ dataset covers broad modalities and diversity with an authentic distribution from different per", + "bbox": [ + 496, + 417, + 892, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "19377", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "spectives, mimicking real daily life.", + "bbox": [ + 76, + 90, + 315, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Benchmark and Experiments", + "text_level": 1, + "bbox": [ + 76, + 125, + 346, + 142 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To establish a comprehensive benchmark for the presented $360 + x$ dataset, we choose five visual understanding tasks to delve into the exploration of multiple viewpoints and modalities usage, including: video scene classification, temporal action localisation, cross-modality retrieval, self-supervised representation learning, and dataset adaptation.", + "bbox": [ + 76, + 150, + 467, + 241 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Remark: Unless specifically stated otherwise, the experiments on $360 + x$ will utilise three views: the $360^{\\circ}$ view, egocentric binocular view, and the third-person front view.", + "bbox": [ + 76, + 242, + 467, + 287 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Experimental Setting", + "text_level": 1, + "bbox": [ + 76, + 301, + 277, + 316 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Models. We employed a consistent set of model backbones across different tasks to minimise model interference, except for temporal action localisation task (detailed in section 4.3). We followed the commonly used setup and selected the backbone I3D [13] as our video model. To handle audio-related aspects, we chose the VGGish [12] as our audio model. Additionally, for directional binaural feature extraction, we utilised the ResNet-18 model [11]. A linear layer is positioned after the backbones to carry out each specific task based on backbone output features.", + "bbox": [ + 76, + 325, + 467, + 477 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "It is important to note that a simple concatenation of all modalities features can diminish the potential information derived from multi-modality [26]. Therefore, instead of solely concatenating modality features, we leverage a hierarchical attention mechanism for multi-modality integration. In this approach, the directional binaural feature serves as an attention query to direct focused attention towards the audio feature, enabling it to encapsulate the directional information into the audio feature. At the same time, the audio feature is also leveraged by acting as a query itself, enabling it to attentively interact with the video feature. This mechanism allows for creating a synergistic representation of the underlying data that integrates the features of all modalities. For more details and in-depth analysis, please refer to the supplementary material.", + "bbox": [ + 76, + 478, + 467, + 705 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training and verification setup. For each temporal action localisation model, we follow their original training settings. For I3D, VGGish, and ResNet-18 networks, the training settings are 200 epochs with the parameters described in [19]. The training process utilises the AdamW optimiser with a learning rate of $1 \\times 10^{-5}$ and a decay rate of 0.1 at the 80th and 120th epochs. We also apply data augmentation techniques such as rotation, scaling, and colour jittering. The dataset was divided into training, validation, and test sets, following an 80/10/10 split. To ensure a balanced representation of scene categories, the examples were stratified probabilistically across the sets.", + "bbox": [ + 76, + 719, + 467, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/410f6847b750728d14e5ecd7b880836dd41521e883b67d793395003cd75ca657.jpg", + "table_caption": [ + "Table 2. Video classification performance across different views (Ego: egocentric binocular view, Front: third-person front view, and $360^{\\circ}$ .. $360^{\\circ}$ view) and data modalities (V: Video, A: Audio, D: Directional binaural delay). Reported in Avg. Prec. $(\\%)$" + ], + "table_footnote": [], + "table_body": "
Selected ViewsModalities
VV + AV + A + D
Egocentric Only51.95(±0.0)55.24(±0.0)58.92
Front Only54.05(+2.1)65.33(+10.1)67.19
360° Only56.33(+4.4)67.14(+11.9)70.95
360° + Egocentric58.99(+7.0)70.48(+15.2)72.11
360° + Front59.70(+7.8)75.06(+19.8)77.69
360° + Front + Ego63.73(+11.8)77.32(+22.1)80.62
", + "bbox": [ + 504, + 156, + 888, + 265 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Video Scene Classification", + "text_level": 1, + "bbox": [ + 500, + 290, + 736, + 306 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Video scene classification assigns scene labels to videos based on their frames, enabling analysis of visual content and determining the subject matter.", + "bbox": [ + 500, + 314, + 890, + 359 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Single view vs. multi-view. First, we are interested in the influence of different combinations of video views on the classification performance. The results, representing each combination, are summarised in Table 2. The results for single views are presented in the first three rows, indicating that using a single $360^{\\circ}$ panoramic view outperforms using either an egocentric binocular view or a third-person front view only. When employing multiple views, it is noted that better performance can be achieved compared to using a single view. Specifically, utilising all three views leads to the best performance. Such a performance can be attributed to the fact that although these three views describe the same scene, each different view offers a unique perspective that contributes to a more comprehensive understanding of the scene, resulting in improved performance.", + "bbox": [ + 496, + 369, + 890, + 597 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Single-modality vs. multi-modality and more. We further investigate the impact of modalities on the model's performance. Various combinations of modalities are analysed, and the results are summarised in Table 2 on a column-wise basis. In particular, the first column represents the visual modality alone, the second column combines video with audio, and the last column incorporates visual, audio, and directional binaural information modalities.", + "bbox": [ + 496, + 608, + 890, + 728 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The inclusion of additional modalities leads to average precision improvements. For example, when all three views are utilised, incorporating more modalities results in improvements of $13.59\\%$ and $16.89\\%$ , respectively. This underscores the benefits of leveraging multiple modalities for a more comprehensive understanding of the scene and enhancing overall performance.", + "bbox": [ + 496, + 729, + 890, + 835 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Temporal Action Localisation", + "text_level": 1, + "bbox": [ + 500, + 845, + 764, + 863 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Temporal Action Localisation (TAL) is a video understanding task that involves the dense identification and temporal", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "19378", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/e8b7df263babb295d9f03fb834d6f88104ff122f660c20873ee10874619d9150.jpg", + "table_caption": [ + "Table 3. Temporal action localisation results. Baseline extractors are used in [2, 21, 24, 32]. The mAP@σ represents the mean average precision (%) at a threshold of σ. The best performance is achieved by employing $V + A + D$ modalities with extractors pre-trained on $360 + x$ ." + ], + "table_footnote": [], + "table_body": "
ExtractorsModalitiesmAP @0.5Actionformer [32]Avg.TemporalMaxer [24]Avg.TriDet [21]Avg.
mAP @0.75mAP @0.95mAP @0.5mAP @0.75mAP @0.95mAP @0.5mAP @0.75mAP @0.95
Baseline ExtractorsV11.9 (±0.0)7.8 (±0.0)3.3 (±0.0)7.7 (±0.0)13.1 (±0.0)8.8 (±0.0)3.7 (±0.0)8.6 (±0.0)16.7 (±0.0)10.1 (±0.0)4.8 (±0.0)10.5 (±0.0)
V + A19.1 (+7.2)11.3 (+3.5)4.2 (+0.9)11.5 (+3.8)21.0 (+7.9)14.8 (+6.0)5.6 (+1.9)13.8 (+5.2)23.6 (+6.9)17.2 (+7.1)6.4 (+1.6)15.7 (+5.2)
Pre-trained on 360+xV16.4 (+4.5)9.8 (+2.0)3.9 (+0.6)10.0 (+2.3)20.4 (+7.3)14.3 (+5.5)5.2 (+1.5)13.3 (+4.7)21.1 (+4.4)15.3 (+5.2)5.5 (+0.7)14.0 (+3.5)
V + A23.6 (+11.7)16.9 (+9.1)5.7 (+2.4)15.4 (+7.7)25.8 (+12.7)18.0 (+9.2)6.4 (+2.7)16.7 (+8.1)26.4 (+8.7)18.5 (+8.4)6.9 (+2.1)17.3 (+6.8)
V + A + D24.9 (+13.0)17.4 (+9.6)6.1 (+2.8)16.1 (+8.4)26.6 (+13.5)18.3 (+9.5)6.5 (+2.8)17.1 (+8.5)27.1 (+10.4)18.7 (+8.6)7.0 (+2.2)17.6 (+7.1)
", + "bbox": [ + 78, + 128, + 890, + 219 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "segmentation of activities within a video stream over a specific time period. Current TAL approaches typically employ a two-stage paradigm [27, 32]. The first stage extracts features from the entire video, and the second stage predicts temporal segmentation based on these features.", + "bbox": [ + 75, + 244, + 470, + 320 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Feature extractors. Baseline extractors are widely utilised for various datasets, e.g. ActivityNet [5] and Ego4D [9], on the TAL task. The baseline video features are obtained from an I3D model pre-trained on the Kinetics400 dataset [13]. The baseline audio features are derived from the pre-classification layer following activation of the VG-Gish model, pre-trained on AudioSet [7]. There is no baseline extractor for directional binaural delay feature, so the $\\mathrm{V + A + D}$ modality was not included accordingly. For a fair comparison, we reused our video classification models in section 4.2 as Pre-trained on $360 + x$ extractors, following the same baseline extraction setup for both video and audio features. Additionally, the ResNet-18 feature extractor was used for directional binaural delay feature extraction.", + "bbox": [ + 75, + 330, + 470, + 542 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Experimental results. We provide a concise overview of the performance comparison for various temporal action localisation methods, including ActionFormer [32], TriDet [21] and TemporalMaxer [24], between the baseline extractors and our Pre-trained on $360 + x$ extractors. The summarised results are presented in Table 3, from which we can see that the introduction of additional modalities (i.e. audio and direction binaural delay) has a prominent positive impact on the TAL task, leading to performance improvements for both sets of extractors. This result highlights the importance of leveraging multiple modalities in enhancing the accuracy and effectiveness of temporal activity localisation techniques. Using our custom extractors can provide additional improvements, as the baseline extractors may not be optimised for our specific binocular or $360^{\\circ}$ views.", + "bbox": [ + 75, + 551, + 468, + 779 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Cross-modality Retrieval", + "text_level": 1, + "bbox": [ + 76, + 787, + 307, + 803 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this context, we focus on a series of retrieval tasks that across modalities including audio, video and directional binaural delay. In a modality-specific retrieval scenario, the query modality (Q) serves as the input for retrieving the key modality (K) in the Q-to-K retrieval task. The performance evaluation metric $\\mathrm{R}\\theta$ represents the recall at ranks $\\theta$ .", + "bbox": [ + 75, + 811, + 470, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/59992e95a4eaf03692782c57ba9c64aac734f475b86b6fa8cec47788baf79eb7.jpg", + "table_caption": [ + "Table 4. Q-to-Video retrieval results. The superscript* indicates modalities are co-trained. Recall reported with rank in $\\{1,5,10\\}$ ." + ], + "table_footnote": [], + "table_body": "
Query ModalityR1 (%)R5 (%)R10 (%)
A39.14(±0.0)62.76(±0.0)79.21(±0.0)
A + D44.30(+5.16)66.92(+4.16)84.78(+5.57)
(A + D)*55.88(+16.74)72.53(+9.77)86.6(+7.39)
", + "bbox": [ + 501, + 281, + 890, + 349 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Q-to-Video retrieval results. Table 4 illustrates the retrieval results for the Query modality retrieve videos. In this table, $A + D$ denotes a set of independently trained audio and directional binaural features employed as query features. Moreover, $(A + D)^*$ signifies the collaborative training of these features instead of treating them independently. The inter-modality retrieval results shown in Table 4 clearly show the modality compliance quality of the $360 + x$ dataset. Besides Q-to-Video retrieval, we also performed Q-to-Audio and Q-to-Directional binaural delay experiments, details can be found in the supplementary material.", + "bbox": [ + 496, + 375, + 890, + 542 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Self-supervised Representation Learning", + "text_level": 1, + "bbox": [ + 498, + 554, + 849, + 570 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Experiment setup. In this section, we investigated the impact of different self-supervised learning (SSL) methods using two engaging video pretext tasks: video pace (VP) prediction [25] and clip order (CO) shuffle prediction [29]. The VP task challenges the model to determine the pace of a video, while the CO task asks the model to rearrange shuffled video clips into their correct chronological order. The original VP and CO primarily concentrated on video data, but to capitalise on the advantages of multi-modality, we expanded these approaches to include audio and directional binaural delay modalities. This extension was done to align modality with the temporal coherence and dynamics observed in the video. For more comprehensive explanations, please refer to the supplementary material.", + "bbox": [ + 496, + 578, + 890, + 790 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Experimental results. We first examined the impact of self-supervised learning models for video classification. Table 5 demonstrates the consistent precision gains achieved by utilising SSL pre-trained models. Notably, leveraging both video pace and clip order SSL techniques resulted in an average performance improvement of $\\sim 7\\%$ .", + "bbox": [ + 496, + 810, + 893, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "19379", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/71c9eff591b34ce4bf3c47bc40bab9906b301543fb41036a33f0101c82f4b65c.jpg", + "table_caption": [ + "Table 5. Models with different pre-train methods were fine-tuned and tested on video classification. The experiments use all three video views. Reported in Avg. Prec. $(\\%)$ ." + ], + "table_footnote": [], + "table_body": "
Pre-train MethodModalities
VV + AV + A + D
From Scratch63.73(±0.0)77.32(±0.0)80.62
Video Pace [25]69.27(+5.5)79.56(+2.2)81.97
Clip Order [29]69.91(+6.2)80.14(+2.8)82.18
VP [25] + CO [29]76.84(+13.1)82.66(+5.3)83.32
", + "bbox": [ + 84, + 127, + 450, + 215 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/9582193217b6737bffede79e2961e5f793c3469dc64c93c19231ae0f741e753b.jpg", + "table_caption": [ + "Table 6. Comparison between supervised pre-trained extractors with SSL pretrained counterparts on TAL task. The experiments use all three video views with modalities (V+A+D)." + ], + "table_footnote": [], + "table_body": "
Pre-train MethodmAP @0.5mAP @0.75mAP @0.95Avg.
Supervised27.1 (±0.0)18.7 (±0.0)7.0 (±0.0)17.6 (±0.0)
Video Pace [25]29.4 (+2.3)19.6 (+0.9)7.4 (+0.4)18.8 (+1.2)
Clip Order [29]28.9 (+1.8)19.3 (+0.6)7.3 (+0.3)18.5 (+0.9)
VP [25] + CO [29]30.3 (+3.2)20.2 (+1.5)7.9 (+0.9)19.5 (+1.9)
", + "bbox": [ + 465, + 127, + 885, + 215 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/30dc64e8faac9dc2b3e985d75a68e16572dbb2d3a0b84bfa741b8e10461e36fd.jpg", + "table_caption": [ + "Table 7. Following original setup of THUMOS14 dataset [8], our dataset adaptation task uses video modality only." + ], + "table_footnote": [], + "table_body": "
Feature ExtractormAP@0.3mAP@0.4mAP@0.5mAP@0.6mAP0.7Avg.
Kinetics400 [13] (Pre-train)83.7 (±0.0)80.2 (±0.0)72.8 (±0.0)62.4 (±0.0)47.4 (±0.0)69.5 (±0.0)
360+x (Pre-train)84.5 (+0.8)81.0 (+0.8)73.4 (+0.6)65.9 (+3.5)54.6 (+7.2)71.9 (+2.4)
Kinetics400 [13] (Pre-train) and 360+x (Fine-tune)85.3 (+1.6)81.8 (+1.6)74.9 (+2.1)68.1 (+5.7)58.2 (+10.8)73.7 (+4.2)
", + "bbox": [ + 80, + 247, + 888, + 316 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We proceeded to perform experiments using SSL pretrained models as feature extractors for the temporal action localisation task incorporating all three modalities $(\\mathrm{V} + \\mathrm{A} + \\mathrm{D})$ with the TriDet framework [21]. Since a training-from-scratch model cannot serve as the first-stage extractor, we employed the supervised extractors from section 4.2 as a comparison. The summarised results in Table 6 indicate that pre-training with video pace (VP) or clip order (CO) individually leads to an average performance improvement of $\\sim 1.2\\%$ and $\\sim 0.9\\%$ respectively on average, compared to the supervised baseline. The combination of both SSL methods yields the highest performance gain of $\\sim 1.9\\%$ .", + "bbox": [ + 75, + 340, + 472, + 523 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.6. Pre-training Model for Dataset Adaptation", + "text_level": 1, + "bbox": [ + 76, + 539, + 442, + 556 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This section explores the efficacy of leveraging models pretrained on the $360 + x$ dataset for adaptation to other datasets like THUMOS14 [8]. By adhering to THUMOS14 setup, the experiments use TriDet framework [21] for conducting Temporal Action Localisation (TAL).", + "bbox": [ + 75, + 565, + 468, + 642 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The performance of this experiment, specifically the mean average precision (mAP) scores covering IoU thresholds from 0.3 to 0.7, are presented in Table 7. As outlined by the results, exclusive reliance on $360 + x$ video data for training showcases the potential for enhanced performance as compared to training solely based on the Kinetics400 dataset [13]. Remarkably, this performance improvement becomes more prominent at higher IoU thresholds. The utmost optimal performance, however, emerges through a two-step approach, commencing with pre-training on the Kinetics400 dataset followed by fine-tuning on the $360 + x$ dataset with an average $\\sim 4.2\\%$ improvement compared to solely Kinetics400 pre-trained extractor. This finding showcases that the employment of the $360 + x$ dataset for feature extractor training can be beneficial for dataset adaptation in sub-stream tasks. More experimental results on dataset integration are available in the supplementary materials.", + "bbox": [ + 75, + 643, + 472, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusions", + "text_level": 1, + "bbox": [ + 498, + 339, + 627, + 356 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we studied the problem of panoptic scene understanding and presented, to our knowledge, the first-of-its-kind dataset $-360 + x$ to support the study. The proposed $360 + x$ is a large-scale multi-modal dataset that consists of several different viewpoints (e.g. egocentric, third-person-view, and panoramic view) and covers various real-world activities in real daily life. With the most possibly available perspectives describing a real-world scene, $360 + x$ aims to support the research in understanding the world around us in a way that humans understand (and even beyond). Additionally, we also presented a benchmark study of several scene understanding tasks based on this newly collected dataset, with a comparison to other existing datasets. Extensive experimental analysis validated the effectiveness of each of the perspectives within our dataset, and also suggested interesting insights, confirming that with more viewpoints or data modalities, the understanding of a scene could be more comprehensive. Surprisingly, models trained without manual annotation (i.e. self-supervised learning) on our dataset even perform better than those trained with human annotations in a fully supervised manner. We hope this new dataset could bring in new directions towards scene understanding and look forward to the research on them.", + "bbox": [ + 496, + 364, + 893, + 712 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement", + "text_level": 1, + "bbox": [ + 500, + 720, + 648, + 736 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This project was partially supported by the Ramsay Research Fund, and the Royal Society Short Industry Fellowship (SIF\\R1\\231009). Y. Hou and C. Qu were partially supported by the CSC grant (No.202308060328) and Allsee Technologies Ltd., respectively. The computations described in this research were performed using the Baskerville Tier 2 HPC service1 (funded by EP/T022221/1 and EP/W032244/1) and is operated by Advanced Research Computing at the University of Birmingham.", + "bbox": [ + 496, + 743, + 893, + 881 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "1https://www.baskerville.ac.uk/", + "bbox": [ + 517, + 886, + 687, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "19380", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Keshav Bhandari, Mario A DeLaGarza, Ziliang Zong, Hugo Latapie, and Yan Yan. Egok360: A 360 egocentric kinetic human activity video dataset. In 2020 IEEE International Conference on Image Processing (ICIP), pages 266-270. IEEE, 2020. 2", + "[2] Honglie Chen, Weidi Xie, Andrea Vedaldi, and Andrew Zisserman. Vggsound: A large-scale audio-visual dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 721-725. IEEE, 2020. 1, 3, 7", + "[3] Ziyang Chen, David F Fouhey, and Andrew Owens. Sound localization by self-supervised time delay estimation. In European Conference on Computer Vision, pages 489-508. Springer, 2022. 2, 3", + "[4] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, et al. Scaling egocentric vision: The epic-kitchens dataset. In Proceedings of the European Conference on Computer Vision (ECCV), pages 720-736, 2018. 1, 2, 4, 5", + "[5] Bernard Ghanem Fabian Caba Heilbron, Victor Escorcia and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 961-970, 2015. 1, 2, 4, 5, 7", + "[6] Jort F. Gemmeke, Daniel P. W. Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R. Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780, 2017. 3", + "[7] Jort F. Gemmeke, Daniel P. W. Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R. Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), page 776-780. IEEE Press, 2017. 1, 7", + "[8] A. Gorban, H. Idrees, Y.-G. Jiang, A. Roshan Zamir, I. Laptev, M. Shah, and R. Sukthankar. Thumos challenge: Action recognition with a large number of classes. http://www.thumos.info, 2015. 8", + "[9] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18995-19012, 2022. 1, 2, 5, 7", + "[10] Chunhui Gu, Chen Sun, David A Ross, Carl Vondrick, Caroline Pantofaru, Yeqing Li, Sudheendra Vijayanarasimhan, George Toderici, Susanna Ricco, Rahul Sukthankar, et al. Ava: A video dataset of spatio-temporally localized atomic visual actions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6047-6056, 2018. 3" + ], + "bbox": [ + 78, + 116, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6", + "[12] Shawn Hershey, Sourish Chaudhuri, Daniel PW Ellis, Jort F Gemmeke, Aren Jansen, R Channing Moore, Manoj Plakal, Devin Platt, Rif A Saurous, Bryan Seybold, et al. Cnn architectures for large-scale audio classification. In 2017 iee international conference on acoustics, speech and signal processing (icassp), pages 131-135. IEEE, 2017. 6", + "[13] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. 1, 2, 4, 5, 6, 7, 8", + "[14] H. Kuehne, H. Jhuang, E. Garrote, T. Poggio, and T. Serre. HMDB: a large video database for human motion recognition. In Proceedings of the International Conference on Computer Vision (ICCV), 2011. 5", + "[15] Hildegard Kuehne, Hueihan Jhuang, Estíbaliz Garrote, Tomaso Poggio, and Thomas Serre. Hmdb: a large video database for human motion recognition. In 2011 International conference on computer vision, pages 2556-2563. IEEE, 2011. 1, 4, 5", + "[16] Yiyi Liao, Jun Xie, and Andreas Geiger. Kitti-360: A novel dataset and benchmarks for urban scene understanding in 2d and 3d. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2", + "[17] Benjamin R Meagher. Ecologizing social psychology: The physical environment as a necessary constituent of social processes. *Personality and social psychology review*, 24(1): 3-23, 2020. 3", + "[18] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41, 1995. 4", + "[19] Xiaokang Peng, Yake Wei, Andong Deng, Dong Wang, and Di Hu. Balanced multimodal learning via on-the-fly gradient modulation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022. 6", + "[20] Daniel Scharstein, Heiko Hirschmüller, York Kitajima, Greg Krathwohl, Nera Nesic, Xi Wang, and Porter Westling. High-resolution stereo datasets with subpixel-accurate ground truth. In Pattern Recognition: 36th German Conference, GCPR 2014, Münster, Germany, September 2-5, 2014, Proceedings 36, pages 31-42. Springer, 2014. 1", + "[21] Dingfeng Shi, Yujie Zhong, Qiong Cao, Lin Ma, Jia Li, and Dacheng Tao. Tridet: Temporal action detection with relative boundary modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18857-18866, 2023. 7, 8", + "[22] Shuran Song, Andy Zeng, Angel X Chang, Manolis Savva, Silvio Savarese, and Thomas Funkhouser. Im2pano3d: Extrapolating 360 structure and semantics beyond the field of view. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3847-3856, 2018. 1, 2", + "[23] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402, 2012. 1, 2, 4, 5" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "19381", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[24] Tuan N Tang, Kwonyoung Kim, and Kwanghoon Sohn. Temporalmaxer: Maximize temporal context with only max pooling for temporal action localization. arXiv preprint arXiv:2303.09055, 2023. 7", + "[25] Jiangliu Wang, Jianbo Jiao, and Yunhui Liu. Self-supervised video representation learning by pace prediction. In European Conference on Computer Vision, 2020. 7, 8", + "[26] Weiyao Wang, Du Tran, and Matt Feiszli. What makes training multi-modal classification networks hard? In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12695-12705, 2020. 6", + "[27] Xiang Wang, Zhiwu Qing, Ziyuan Huang, Yutong Feng, Shiwei Zhang, Jianwen Jiang, Mingqian Tang, Changxin Gao, and Nong Sang. Proposal relation network for temporal action detection. arXiv preprint arXiv:2106.11812, 2021. 7", + "[28] Jianxiong Xiao, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Recognizing scene viewpoint using panoramic place representation. In 2012 IEEE Conference on Computer Vision and Pattern Recognition, pages 2695-2702. IEEE, 2012. 1", + "[29] Dejing Xu, Jun Xiao, Zhou Zhao, Jian Shao, Di Xie, and Yueting Zhuang. Self-supervised spatiotemporal learning via video clip order prediction. In Computer Vision and Pattern Recognition (CVPR), 2019. 7, 8", + "[30] Guorun Yang, Xiao Song, Chaoqin Huang, Zhidong Deng, Jianping Shi, and Bolei Zhou. Drivingstereo: A large-scale dataset for stereo matching in autonomous driving scenarios. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 899–908, 2019. 1", + "[31] Serena Yeung, Olga Russakovsky, Ning Jin, Mykhaylo Andriluka, Greg Mori, and Li Fei-Fei. Every moment counts: Dense detailed labeling of actions in complex videos. International Journal of Computer Vision, 126:375–389, 2018. 2", + "[32] Chen-Lin Zhang, Jianxin Wu, and Yin Li. Actionformer: Localizing moments of actions with transformers. In European Conference on Computer Vision, pages 492-510. Springer, 2022. 7", + "[33] Hang Zhao, Antonio Torralba, Lorenzo Torresani, and Zhicheng Yan. Hacs: Human action clips and segments dataset for recognition and temporal localization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8668-8678, 2019. 5", + "[34] Bolei Zhou, Agata Lapedriza, Aditya Khosla, Aude Oliva, and Antonio Torralba. Places: A 10 million image database for scene recognition. IEEE transactions on pattern analysis and machine intelligence, 40(6):1452-1464, 2017. 4" + ], + "bbox": [ + 78, + 90, + 470, + 739 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "19382", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_model.json b/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_model.json new file mode 100644 index 0000000000000000000000000000000000000000..178431c9414963b02dfea638d6a4b45e3fb0ddc9 --- /dev/null +++ b/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_model.json @@ -0,0 +1,1815 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.131, + 0.798, + 0.153 + ], + "angle": 0, + "content": "\\(360 + x\\): A Panoptic Multi-modal Scene Understanding Dataset" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.181, + 0.826, + 0.2 + ], + "angle": 0, + "content": "Hao Chen Yuqi Hou Chenyuan Qu Irene Testini Xiaohan Hong Jianbo Jiao" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.206, + 0.757, + 0.224 + ], + "angle": 0, + "content": "The Machine Intelligence \\(+x\\) Group, University of Birmingham, UK" + }, + { + "type": "text", + "bbox": [ + 0.311, + 0.231, + 0.655, + 0.249 + ], + "angle": 0, + "content": "Project page: https://x360dataset.github.io/" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.283, + 0.314, + 0.3 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.317, + 0.473, + 0.635 + ], + "angle": 0, + "content": "Human perception of the world is shaped by a multitude of viewpoints and modalities. While many existing datasets focus on scene understanding from a certain perspective (e.g. egocentric or third-person views), our dataset offers a panoptic perspective (i.e. multiple viewpoints with multiple data modalities). Specifically, we encapsulate third-person panoramic and front views, as well as egocentric monocular/binocular views with rich modalities including video, multi-channel audio, directional binaural delay, location data and textual scene descriptions within each scene captured, presenting comprehensive observation of the world. To the best of our knowledge, this is the first database that covers multiple viewpoints with multiple data modalities to mimic how daily information is accessed in the real world. Through our benchmark analysis, we presented 5 different scene understanding tasks on the proposed \\(360 + x\\) dataset to evaluate the impact and benefit of each data modality and perspective in panoptic scene understanding. We hope this unique dataset could broaden the scope of comprehensive scene understanding and encourage the community to approach these problems from more diverse perspectives." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.662, + 0.21, + 0.679 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.688, + 0.47, + 0.869 + ], + "angle": 0, + "content": "Scene understanding is crucial for robotics and artificial intelligent systems to perceive the environment around them. As humans, we intuitively understand the world through primarily visual inputs, as well as auditory and other sensory inputs (e.g. touch and smell). The community has made remarkable progress in mimicking human perception with contributions from various datasets and benchmarks [4, 5, 7, 9, 13, 15, 23]. These efforts have approached scene understanding from a diverse range of perspectives, such as normal frontal-view vision [5, 13, 23], panoramic view [22, 28], binocular/stereo view [20, 30], egocentric monocular view [4, 9], and audio [2, 7]." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.47, + 0.901 + ], + "angle": 0, + "content": "While there has been exciting progress in understanding scenes from a limited number of perspectives, it is notable" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.285, + 0.895, + 0.452 + ], + "angle": 0, + "content": "that humans understand the world by incorporating a combination of viewpoints, in a holistic manner. This includes an egocentric view for activities we are involved in and a third-person view for activities we are observing. In addition to visual cues, we also rely on a range of modalities, including hearing and binaural delay, to fully comprehend our surroundings and track movements. Our prior knowledge of the scene, such as localisation information and scene descriptions, has also supported our understanding of the environment (e.g. the cafe in the city centre may be different from a similar cafe on a university campus)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.455, + 0.895, + 0.682 + ], + "angle": 0, + "content": "Taking the above observations into consideration, a new dataset covering all these aforementioned aspects is presented in this work, to provide a panoptic scene understanding, termed \\(360 + x\\) dataset. This new dataset offers a diverse selection of perspectives, including a \\(360^{\\circ}\\) panoramic view providing a complete panoptic view of the environment, and a third-person front view that highlights the region of interest that has the most movements in front of the camera. Additionally, we have included egocentric monocular and binocular videos to capture the first-person perspective of individuals in the environment. These viewpoints are complemented by aligned multi-channel audio with directional binaural delay information, as well as location information and scene descriptions as metadata. An illustration of the presented dataset collection system is shown in Figure 1." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.685, + 0.895, + 0.821 + ], + "angle": 0, + "content": "Based on this newly collected dataset, we perform 5 visual-audio scene understanding tasks to analyse the contribution and effectiveness of each data viewpoint and modality. Particularly, we look at video classification, temporal action localisation, self-supervised representation learning, cross-modality retrieval and pre-training model migration for dataset adaptation, with interesting findings and insights from extensive experimental analysis. The main contributions of this work are summarised as follows:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.826, + 0.891, + 0.87 + ], + "angle": 0, + "content": "- We propose to our knowledge the first and probably the most authentic panoptic scene understanding dataset covering multiple viewpoints and data modalities in the wild." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.891, + 0.9 + ], + "angle": 0, + "content": "- We perform extensive experimental analysis to validate the effectiveness of the proposed dataset on different tasks" + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.826, + 0.891, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19373" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.089, + 0.895, + 0.441 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.463, + 0.895, + 0.548 + ], + "angle": 0, + "content": "Figure 1. Illustration of the proposed \\(360 + x\\) dataset. The \\(360^{\\circ}\\) camera records fish-eye raw videos with front and back lenses. These videos are merged to create a spherical \\(360^{\\circ}\\) panorama (middle-up figure, zoom in for details), which is then transformed to (a) \\(360^{\\circ}\\) panoramic data using equirectangular projection. The (b) third-person front view is obtained by de-warping the rich movements region highlighted red in the spherical field of \\(360^{\\circ}\\) panorama (the middle-left figure). By wearing stereo cameras, the capturers record (c) egocentric clips while staying visible to the fixed \\(360^{\\circ}\\) camera (central ellipse). (e) Directional audio time delay data is generated from left and right audio inputs (d) from the \\(360^{\\circ}\\) camera by interaural time delay process [3]. This helps locate sound sources in the \\(360^{\\circ}\\) panorama." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.574, + 0.37, + 0.588 + ], + "angle": 0, + "content": "from various perspectives and modalities." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.589, + 0.471, + 0.664 + ], + "angle": 0, + "content": "- Interesting findings are derived from the analysis, suggesting the effectiveness of each viewpoint and data modality. Learning from this new dataset without supervision even shows a better performance than that from a model trained in a supervised manner." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.679, + 0.228, + 0.695 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.704, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Video understanding and analysis. Video analysis has been extensively studied in the literature. Existing datasets such as UCF101 [23], ActivityNet [5] and Kinetics [13] have provided large-scale video data for activity understanding tasks. However, these datasets often exhibit lower complexity compared to real-world scenes. Some datasets, like MultiThumos [31], aim to increase complexity but are limited to specific scenarios with domain-specific actions, deviating from real-life daily activities. In contrast, our dataset builds upon the activity labels from ActivityNet [5] and strives to capture data that closely simulates real-life scenarios. Apart from that, we also include multiple data viewpoints and modalities as compared to existing datasets." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.573, + 0.895, + 0.801 + ], + "angle": 0, + "content": "Panoramic scene understanding. In recent years, panoramic scene understanding has gained significant attention due to its holistic reflection of the environment. Several datasets have been introduced to facilitate research in this area. For instance, the KITTI-360 [16] provides a collection of panoramic images for urban scene analysis. EGOK360 [1] has been introduced to address the need for video data with a panoramic view. Im2Pano3D [22] presents a panoramic dataset for indoor scenarios with semantic segmentation and focuses on the prediction from a partial observation. However, these datasets primarily focus on panoramic visual data while lacking the incorporation of other viewpoints (e.g. egocentric) and data modalities (e.g. audio), limiting their potential for comprehensive scene understanding and analysis." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Egocentric video analysis. Focusing on understanding scenes from a first-person perspective, existing datasets such as EPIC-Kitchens [4] and Ego4D [9] provide egocentric video data collected during daily activities. They have contributed to research on activity recognition and object detection in egocentric scenes. Unlike these datasets fo" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "19374" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.138 + ], + "angle": 0, + "content": "cusing on egocentric views, our dataset also covers other viewpoints and modalities aiming at supporting scene understanding research in a more panoptic manner." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.144, + 0.47, + 0.281 + ], + "angle": 0, + "content": "Visual-audio analysis. Integrating visual and audio information often enhances the performance of models in scene understanding tasks, as it provides richer contextual information. There are some existing datasets available to support research in audio-visual analysis, e.g. AVA [10], AudioSet [6] and VGGSound [2], to name a few. However, these datasets are lacking in multiple viewpoints and the directional property of audio signals, which are provided in the proposed new dataset." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.295, + 0.222, + 0.31 + ], + "angle": 0, + "content": "3. \\(360 + x\\) Dataset" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.319, + 0.362, + 0.336 + ], + "angle": 0, + "content": "3.1. Data Acquisition and Alignment" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.342, + 0.47, + 0.492 + ], + "angle": 0, + "content": "Two main devices were used for our data collection: the Insta 360 One X2 and Snapchat Spectacles 3 cameras. The 360 One X2 has two fish-eye cameras that collect \\(360^{\\circ}\\) panoramic visual information in the scene with \\(5760 \\times 2880\\) resolution and a frame rate of 25 FPS. Additionally, directional audio was recorded using four microphones in directional audio mode. While the Spectacles 3 has a stereo camera attached to a pair of glasses used to capture the egocentric binocular vision within the scene at a resolution of \\(2432 \\times 1216\\) and a frame rate of 60 FPS." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.493, + 0.469, + 0.734 + ], + "angle": 0, + "content": "Once we obtained the raw data, we aligned the different viewpoints and modalities through a specific process. The initial raw footage captured by the two fish-eye cameras on the \\(360^{\\circ}\\) camera was in the form of two circular videos, which were then stitched and de-warped into a spherical panorama. This panorama can be projected into an equirectangular format to produce a panoramic video. However, this direct compression of the spherical view into a rectangular format can introduce unnatural distortions. In order to provide a more natural and informative view, we inversely project a rectangular region into equirectangular space and use it to crop the spherical panorama. We use optical flow to determine the crop region with the most motion activity in the spherical panorama field. This crop region is then projected back to rectangular, resulting in an informative video view with minimal distortions." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.735, + 0.469, + 0.84 + ], + "angle": 0, + "content": "Egocentric binocular videos, as shown in Figure 1(c), were captured ranging from approximately 30 seconds to 1 minute in duration for each clip. A total of 1 to 5 stereo clips were recorded, scattered throughout the duration of the average 6 mins \\(360^{\\circ}\\) video. In addition to stereo videos, we also provide the corresponding monocular videos for the egocentric view." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.84, + 0.469, + 0.901 + ], + "angle": 0, + "content": "The audio recordings were temporally aligned with their corresponding videos with left/right channel modality. The four-channel audios with the \\(360^{\\circ}\\) panoramic video are provided as well for further exploration. Moreover, we also" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.152 + ], + "angle": 0, + "content": "provide the directional information of the audio which was presented using the estimated interaural time delay of the sound obtained from the method introduced in [3]. The GPS information and weather information were also provided." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.152, + 0.892, + 0.242 + ], + "angle": 0, + "content": "Given the possibility of occlusions in regions visible to the egocentric camera but not to the \\(360^{\\circ}\\) camera, we ensured during data collection that the cameras were positioned in close proximity. This setup, with clear mutual visibility, allowed both cameras to capture a similar overall scene." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.253, + 0.655, + 0.268 + ], + "angle": 0, + "content": "3.2. Scene Selection" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.277, + 0.892, + 0.322 + ], + "angle": 0, + "content": "To broaden scene coverage and promote multi-modal collaborative learning, we integrated a strategic selection process for captured scenes, governed by three key criteria:" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.323, + 0.892, + 0.488 + ], + "angle": 0, + "content": "i) Scene categories must be carefully crafted to be comprehensive, yet concise, while also being authoritative and reflective of everyday life. The location where a scene unfolds plays a crucial role in providing essential environmental context to the activities within it [17]. Distinct scenes can impart unique meanings or emotional nuances to identical events. For instance, the act of chatting could convey divergent implications in a school setting as compared to a home environment. Such nuances are critical as they offer deeper insights into the contextual interpretation of behaviours and interactions in varied settings." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.488, + 0.892, + 0.578 + ], + "angle": 0, + "content": "ii) The data should ideally span a wide array of weather and lighting conditions. This criterion aims to ensure the inclusion of both indoor and outdoor activities under various environmental scenarios. Such diversity is important in accurately representing the multifaceted nature of daily life and the various conditions in which these activities occur." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.579, + 0.892, + 0.699 + ], + "angle": 0, + "content": "iii) Our third criterion is the inclusion of scenarios rich in distinctive sound sources, particularly those where multiple activities co-occur. It is essential for the dataset to not only visually represent these activities but also to capture the corresponding auditory elements. The goal is to present the complexity and realism of real-world environments as much as possible, marked by simultaneous and various actions and behaviours." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.7, + 0.892, + 0.821 + ], + "angle": 0, + "content": "It is worth noting that our dataset was collected across several countries, including the United Kingdom (e.g. London, Birmingham, Cardiff and Jersey), France (Paris), Spain (e.g. Oviedo and Picos de Europa), China (e.g. Guangzhou and Shenzhen), and Japan (e.g. Kyoto and Osaka). During the data collection, the \\(360^{\\circ}\\) Camera was placed statically to record the scene, while a capturer wearing the Spectacles glasses recorded first-person interactions with the scene." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Sensitive data handling. Our dataset was collected in a real-world setting and may contain sensitive personal information (e.g. human faces). To ensure ethical and responsible research, the video capture was conducted with proper" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19375" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.042, + 0.087, + 0.355, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.28, + 0.347, + 0.293 + ], + "angle": 0, + "content": "(a). Distribution of the scene categories (number)." + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.296, + 0.345, + 0.42 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.078, + 0.429, + 0.35, + 0.453 + ], + "angle": 0, + "content": "(d). Distribution comparison of the number of action instances per video." + }, + { + "type": "image", + "bbox": [ + 0.366, + 0.085, + 0.642, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.377, + 0.279, + 0.593, + 0.292 + ], + "angle": 0, + "content": "(b). Geographical distribution of actions." + }, + { + "type": "image", + "bbox": [ + 0.369, + 0.295, + 0.549, + 0.431 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.385, + 0.44, + 0.536, + 0.454 + ], + "angle": 0, + "content": "(e). Capture time of the day." + }, + { + "type": "image", + "bbox": [ + 0.655, + 0.098, + 0.911, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.641, + 0.279, + 0.871, + 0.292 + ], + "angle": 0, + "content": "(c). Overall distribution of actions duration." + }, + { + "type": "image", + "bbox": [ + 0.573, + 0.295, + 0.891, + 0.421 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.659, + 0.44, + 0.804, + 0.454 + ], + "angle": 0, + "content": "(f). Binaural delay per clip." + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.461, + 0.893, + 0.491 + ], + "angle": 0, + "content": "Figure 2. Dataset statistics analysis, on the distributions of (a) the scene category, (b) action distribution per cities, (c) temporal action instance duration, and (d) number of actions per video, (e) capturing time, (f) binaural delay per clip." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.516, + 0.47, + 0.622 + ], + "angle": 0, + "content": "consent. Additionally, we have taken measures to protect privacy by anonymising the data. This includes applying a face detection mechanism to outline predicted face locations in each frame and applying blurring filters to maintain meaningful details while ensuring information security. More detailed information on our privacy protection measures can be found in the supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.635, + 0.249, + 0.649 + ], + "angle": 0, + "content": "3.3. Data Annotation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Scene label rationale. The \\(360 + x\\) dataset comprises a total of 28 scene categories (15 indoor scenes and 13 outdoor scenes), as illustrated in Figure 2(a). To establish comprehensive and authoritative scene categories that reflect daily life, we referred to the Places Database [34], which is derived from WordNet [18], as our primary basis. We then leverage the sophisticated semantic analysis capabilities of large language models, to conduct a thorough filtering and classification of a multitude of everyday scenes. This curation resulted in a refined set of 28 scene categories, each symbolising aspects of daily life. Simultaneously, the recordings concentrate on capturing common occurrences within conventional settings, providing a realistic depiction of everyday life. Detailed descriptions defining each category, along with discussions regarding these constraints and potential sampling biases, are presented in the supplimen" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.517, + 0.592, + 0.532 + ], + "angle": 0, + "content": "tary material." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.545, + 0.892, + 0.803 + ], + "angle": 0, + "content": "Temporal segmentation label. We also provide temporal segment labelling for the understanding of activities in the shooting scenes. We follow the activity hierarchy standard defined by ActivityNet [5], which provides a comprehensive categorisation of human activities, consisting of seven top-level categories (Personal Care, Eating and Drinking, Household, Caring and Helping, Working, Socialising and Leisure, and Sports and Exercises). To capture the diversity and granularity of activities within each category, we defined a total of 38 action instances, covering specific actions and behaviours. To ensure high-quality annotations, the temporal segmentation labelling was annotated by three experienced annotators. Each annotator independently annotated the temporal segments corresponding to the activities in the videos. To obtain a consensus, we merged the individual annotations and resolved any discrepancies according to discussion and consensus among the annotators." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.816, + 0.771, + 0.832 + ], + "angle": 0, + "content": "3.4. Dataset Statistics and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Overview. Existing publicly available datasets primarily focus on visual unimodality [4, 5, 13, 15, 23]. In contrast, our dataset introduces a novel approach by collecting different views or modalities, as presented in Table 1, including" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19376" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.2, + 0.09, + 0.768, + 0.104 + ], + "angle": 0, + "content": "Table 1. Dataset comparison. Ego: Egocentric, V: Video, A: Audio, A+V: Audio-visual events." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.115, + 0.89, + 0.243 + ], + "angle": 0, + "content": "
DatasetVideo ViewpointsOther ModalitiesStatisticsAttributions
Third-person Front View360° PanoramicEgo MonocularEgo BinocularNormal AudioDirectional Binaural DelayGPS InfoAvg DurationTotal Duration(s)Frames Count(K)Annotations SourceMultiple Events
UCF101 [23]XXXXX7.21 s96,0002,400VX
Kinetics [13]XXXXXX10 s2,998,80074,970VX
HMDB51 [14]XXXXXX3 s21,426643VX
ActivityNet [5]XXXXXX2 min2,332,80011,664V
EPIC-Kitchens [4]XXXXX7.6 min198,00011,500VX
Ego4D [9]XXXX8 min13,212,000-A+V
360+x (Ours)6.2 min244,0008,579A+V
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.269, + 0.47, + 0.405 + ], + "angle": 0, + "content": "\\(360^{\\circ}\\) panoramic video, third-person front view video, egocentric monocular video, egocentric binocular video, normal audio, directional binaural delay, location and textual scene description. This diverse range of modalities provides multiple dimensions and clues for understanding and analysing complex scenes. Our dataset consists of 2,152 videos representing 232 data examples, with 464 videos captured using the 360 camera and the remaining 1,688 recorded with the Spectacles camera." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.405, + 0.47, + 0.556 + ], + "angle": 0, + "content": "Figure 2(a) presents the distribution of video counts across each of the 28 scene categories. Our dataset is characterised by a balanced distribution of data across these scenes. Notably, it diverges from conventional databases like UCF101 [23], Kinetics [13], HMDB [15], and ActivityNet [5], particularly in terms of average video duration, which is approximately 6.2 minutes. This longer duration is crucial for maintaining the integrity and coherence of actions within each scene, allowing for a comprehensive temporal analysis of the activities." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.57, + 0.47, + 0.722 + ], + "angle": 0, + "content": "Temporal segment label. The annotations of temporal segment labels in our dataset contribute to the fine-grained analysis of activities. We defined 38 action instances representing specific actions and behaviours. The length of each segment labelled with a specific activity varies across the dataset, as depicted in Figure 2(c). Note we acknowledge the significance of audio in accurately identifying certain actions, such as 'coughing' or 'clapping'. Therefore, our dataset combines audio information to enhance accuracy in action recognition [4, 5, 13, 15, 23], as shown in Table 1." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Comparative complexity. Due to its realistic scene simulation, our dataset offers more complexity compared to previous datasets. This complexity arises from the diverse range of activities and interactions captured, resulting in a more challenging and realistic setting for scene understanding and activity recognition. As shown in Figure 2(d), most existing datasets, such as UCF101 [23], Kinetics [13], and HMDB51 [14], typically consist of one action instance per video. While datasets like Ego4D [9] and ActivityNet [5] have large volumes and broad coverage, they often contain a limited number of action instances per individual video." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.268, + 0.892, + 0.406 + ], + "angle": 0, + "content": "The HACS dataset [33] contains more multiple action instances per video but still pales in comparison to the richness of the proposed dataset. Our dataset surpasses these existing datasets in terms of the number of action instances per video, showcasing the extensive variety of activities captured. The improved complexity and richness of our dataset enable follow-up research to explore and develop more robust algorithms, pushing the boundaries of scene understanding in real-world contexts." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.418, + 0.893, + 0.903 + ], + "angle": 0, + "content": "Data distribution. We have ensured a balanced distribution across various dimensions, including scene categories, action instances, binaural delay, etc. Figure 2(a) depicts the scene number distribution across 28 scene categories, demonstrating a comprehensive coverage of scene categories. Notably, the dataset achieves an almost equal proportion of indoor and outdoor scenes, accounting for \\(54.7\\%\\) and \\(45.3\\%\\) respectively. Our dataset allows each scene to conclude multiple diverse action instances naturally, and also enables different scenes to share common action instances. Notably, in Figure 2(b), it displays the 'types of action per location' that can be observed in the geographic distribution and the diversity of the data, where the inner circle shows the location and the outer circle shows the action types captured in each location. As illustrated in Figure 2(c), the distribution of action duration shows our dataset has captured extensive and realistic human behaviours across natural scenes. One interesting observation from our dataset is the high-frequency occurrence of action 'operating phone', which contributes \\(17.54\\%\\) of the whole duration, providing a reflection of mobile usage in modern daily life. Additionally, the dataset offers valuable directional audio to supplement visual understanding. The distribution of data capture times in the dataset corresponds with natural human activities, as shown in Figure 2(e). Human activities throughout the day are mainly concentrated during the daytime (more in the afternoon and evening). Figure 2(f) illustrates the diversity of binaural delay for each clip. The positive point means the audio is directed towards the left direction while the negative the right. In summary, the presented \\(360 + x\\) dataset covers broad modalities and diversity with an authentic distribution from different per" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19377" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.078, + 0.092, + 0.316, + 0.108 + ], + "angle": 0, + "content": "spectives, mimicking real daily life." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.125, + 0.348, + 0.143 + ], + "angle": 0, + "content": "4. Benchmark and Experiments" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.151, + 0.468, + 0.242 + ], + "angle": 0, + "content": "To establish a comprehensive benchmark for the presented \\(360 + x\\) dataset, we choose five visual understanding tasks to delve into the exploration of multiple viewpoints and modalities usage, including: video scene classification, temporal action localisation, cross-modality retrieval, self-supervised representation learning, and dataset adaptation." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.243, + 0.468, + 0.289 + ], + "angle": 0, + "content": "Remark: Unless specifically stated otherwise, the experiments on \\(360 + x\\) will utilise three views: the \\(360^{\\circ}\\) view, egocentric binocular view, and the third-person front view." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.302, + 0.279, + 0.318 + ], + "angle": 0, + "content": "4.1. Experimental Setting" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.326, + 0.468, + 0.478 + ], + "angle": 0, + "content": "Models. We employed a consistent set of model backbones across different tasks to minimise model interference, except for temporal action localisation task (detailed in section 4.3). We followed the commonly used setup and selected the backbone I3D [13] as our video model. To handle audio-related aspects, we chose the VGGish [12] as our audio model. Additionally, for directional binaural feature extraction, we utilised the ResNet-18 model [11]. A linear layer is positioned after the backbones to carry out each specific task based on backbone output features." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.479, + 0.468, + 0.706 + ], + "angle": 0, + "content": "It is important to note that a simple concatenation of all modalities features can diminish the potential information derived from multi-modality [26]. Therefore, instead of solely concatenating modality features, we leverage a hierarchical attention mechanism for multi-modality integration. In this approach, the directional binaural feature serves as an attention query to direct focused attention towards the audio feature, enabling it to encapsulate the directional information into the audio feature. At the same time, the audio feature is also leveraged by acting as a query itself, enabling it to attentively interact with the video feature. This mechanism allows for creating a synergistic representation of the underlying data that integrates the features of all modalities. For more details and in-depth analysis, please refer to the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.72, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Training and verification setup. For each temporal action localisation model, we follow their original training settings. For I3D, VGGish, and ResNet-18 networks, the training settings are 200 epochs with the parameters described in [19]. The training process utilises the AdamW optimiser with a learning rate of \\(1 \\times 10^{-5}\\) and a decay rate of 0.1 at the 80th and 120th epochs. We also apply data augmentation techniques such as rotation, scaling, and colour jittering. The dataset was divided into training, validation, and test sets, following an 80/10/10 split. To ensure a balanced representation of scene categories, the examples were stratified probabilistically across the sets." + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.09, + 0.892, + 0.146 + ], + "angle": 0, + "content": "Table 2. Video classification performance across different views (Ego: egocentric binocular view, Front: third-person front view, and \\(360^{\\circ}\\) .. \\(360^{\\circ}\\) view) and data modalities (V: Video, A: Audio, D: Directional binaural delay). Reported in Avg. Prec. \\((\\%)\\)" + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.157, + 0.89, + 0.266 + ], + "angle": 0, + "content": "
Selected ViewsModalities
VV + AV + A + D
Egocentric Only51.95(±0.0)55.24(±0.0)58.92
Front Only54.05(+2.1)65.33(+10.1)67.19
360° Only56.33(+4.4)67.14(+11.9)70.95
360° + Egocentric58.99(+7.0)70.48(+15.2)72.11
360° + Front59.70(+7.8)75.06(+19.8)77.69
360° + Front + Ego63.73(+11.8)77.32(+22.1)80.62
" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.291, + 0.737, + 0.307 + ], + "angle": 0, + "content": "4.2. Video Scene Classification" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.315, + 0.892, + 0.36 + ], + "angle": 0, + "content": "Video scene classification assigns scene labels to videos based on their frames, enabling analysis of visual content and determining the subject matter." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.371, + 0.892, + 0.598 + ], + "angle": 0, + "content": "Single view vs. multi-view. First, we are interested in the influence of different combinations of video views on the classification performance. The results, representing each combination, are summarised in Table 2. The results for single views are presented in the first three rows, indicating that using a single \\(360^{\\circ}\\) panoramic view outperforms using either an egocentric binocular view or a third-person front view only. When employing multiple views, it is noted that better performance can be achieved compared to using a single view. Specifically, utilising all three views leads to the best performance. Such a performance can be attributed to the fact that although these three views describe the same scene, each different view offers a unique perspective that contributes to a more comprehensive understanding of the scene, resulting in improved performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.609, + 0.892, + 0.729 + ], + "angle": 0, + "content": "Single-modality vs. multi-modality and more. We further investigate the impact of modalities on the model's performance. Various combinations of modalities are analysed, and the results are summarised in Table 2 on a column-wise basis. In particular, the first column represents the visual modality alone, the second column combines video with audio, and the last column incorporates visual, audio, and directional binaural information modalities." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.73, + 0.892, + 0.836 + ], + "angle": 0, + "content": "The inclusion of additional modalities leads to average precision improvements. For example, when all three views are utilised, incorporating more modalities results in improvements of \\(13.59\\%\\) and \\(16.89\\%\\), respectively. This underscores the benefits of leveraging multiple modalities for a more comprehensive understanding of the scene and enhancing overall performance." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.847, + 0.765, + 0.864 + ], + "angle": 0, + "content": "4.3. Temporal Action Localisation" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Temporal Action Localisation (TAL) is a video understanding task that involves the dense identification and temporal" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19378" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.895, + 0.12 + ], + "angle": 0, + "content": "Table 3. Temporal action localisation results. Baseline extractors are used in [2, 21, 24, 32]. The mAP@σ represents the mean average precision (%) at a threshold of σ. The best performance is achieved by employing \\( V + A + D \\) modalities with extractors pre-trained on \\( 360 + x \\)." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.129, + 0.891, + 0.22 + ], + "angle": 0, + "content": "
ExtractorsModalitiesmAP @0.5Actionformer [32]Avg.TemporalMaxer [24]Avg.TriDet [21]Avg.
mAP @0.75mAP @0.95mAP @0.5mAP @0.75mAP @0.95mAP @0.5mAP @0.75mAP @0.95
Baseline ExtractorsV11.9 (±0.0)7.8 (±0.0)3.3 (±0.0)7.7 (±0.0)13.1 (±0.0)8.8 (±0.0)3.7 (±0.0)8.6 (±0.0)16.7 (±0.0)10.1 (±0.0)4.8 (±0.0)10.5 (±0.0)
V + A19.1 (+7.2)11.3 (+3.5)4.2 (+0.9)11.5 (+3.8)21.0 (+7.9)14.8 (+6.0)5.6 (+1.9)13.8 (+5.2)23.6 (+6.9)17.2 (+7.1)6.4 (+1.6)15.7 (+5.2)
Pre-trained on 360+xV16.4 (+4.5)9.8 (+2.0)3.9 (+0.6)10.0 (+2.3)20.4 (+7.3)14.3 (+5.5)5.2 (+1.5)13.3 (+4.7)21.1 (+4.4)15.3 (+5.2)5.5 (+0.7)14.0 (+3.5)
V + A23.6 (+11.7)16.9 (+9.1)5.7 (+2.4)15.4 (+7.7)25.8 (+12.7)18.0 (+9.2)6.4 (+2.7)16.7 (+8.1)26.4 (+8.7)18.5 (+8.4)6.9 (+2.1)17.3 (+6.8)
V + A + D24.9 (+13.0)17.4 (+9.6)6.1 (+2.8)16.1 (+8.4)26.6 (+13.5)18.3 (+9.5)6.5 (+2.8)17.1 (+8.5)27.1 (+10.4)18.7 (+8.6)7.0 (+2.2)17.6 (+7.1)
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.246, + 0.471, + 0.321 + ], + "angle": 0, + "content": "segmentation of activities within a video stream over a specific time period. Current TAL approaches typically employ a two-stage paradigm [27, 32]. The first stage extracts features from the entire video, and the second stage predicts temporal segmentation based on these features." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.332, + 0.471, + 0.543 + ], + "angle": 0, + "content": "Feature extractors. Baseline extractors are widely utilised for various datasets, e.g. ActivityNet [5] and Ego4D [9], on the TAL task. The baseline video features are obtained from an I3D model pre-trained on the Kinetics400 dataset [13]. The baseline audio features are derived from the pre-classification layer following activation of the VG-Gish model, pre-trained on AudioSet [7]. There is no baseline extractor for directional binaural delay feature, so the \\(\\mathrm{V + A + D}\\) modality was not included accordingly. For a fair comparison, we reused our video classification models in section 4.2 as Pre-trained on \\(360 + x\\) extractors, following the same baseline extraction setup for both video and audio features. Additionally, the ResNet-18 feature extractor was used for directional binaural delay feature extraction." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.553, + 0.47, + 0.78 + ], + "angle": 0, + "content": "Experimental results. We provide a concise overview of the performance comparison for various temporal action localisation methods, including ActionFormer [32], TriDet [21] and TemporalMaxer [24], between the baseline extractors and our Pre-trained on \\( 360 + x \\) extractors. The summarised results are presented in Table 3, from which we can see that the introduction of additional modalities (i.e. audio and direction binaural delay) has a prominent positive impact on the TAL task, leading to performance improvements for both sets of extractors. This result highlights the importance of leveraging multiple modalities in enhancing the accuracy and effectiveness of temporal activity localisation techniques. Using our custom extractors can provide additional improvements, as the baseline extractors may not be optimised for our specific binocular or \\( 360^{\\circ} \\) views." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.788, + 0.308, + 0.804 + ], + "angle": 0, + "content": "4.4. Cross-modality Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.812, + 0.471, + 0.903 + ], + "angle": 0, + "content": "In this context, we focus on a series of retrieval tasks that across modalities including audio, video and directional binaural delay. In a modality-specific retrieval scenario, the query modality (Q) serves as the input for retrieving the key modality (K) in the Q-to-K retrieval task. The performance evaluation metric \\( \\mathrm{R}\\theta \\) represents the recall at ranks \\( \\theta \\)." + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.244, + 0.892, + 0.272 + ], + "angle": 0, + "content": "Table 4. Q-to-Video retrieval results. The superscript* indicates modalities are co-trained. Recall reported with rank in \\(\\{1,5,10\\}\\)." + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.282, + 0.891, + 0.35 + ], + "angle": 0, + "content": "
Query ModalityR1 (%)R5 (%)R10 (%)
A39.14(±0.0)62.76(±0.0)79.21(±0.0)
A + D44.30(+5.16)66.92(+4.16)84.78(+5.57)
(A + D)*55.88(+16.74)72.53(+9.77)86.6(+7.39)
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.376, + 0.892, + 0.543 + ], + "angle": 0, + "content": "Q-to-Video retrieval results. Table 4 illustrates the retrieval results for the Query modality retrieve videos. In this table, \\( A + D \\) denotes a set of independently trained audio and directional binaural features employed as query features. Moreover, \\( (A + D)^* \\) signifies the collaborative training of these features instead of treating them independently. The inter-modality retrieval results shown in Table 4 clearly show the modality compliance quality of the \\( 360 + x \\) dataset. Besides Q-to-Video retrieval, we also performed Q-to-Audio and Q-to-Directional binaural delay experiments, details can be found in the supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.555, + 0.851, + 0.571 + ], + "angle": 0, + "content": "4.5. Self-supervised Representation Learning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.579, + 0.892, + 0.791 + ], + "angle": 0, + "content": "Experiment setup. In this section, we investigated the impact of different self-supervised learning (SSL) methods using two engaging video pretext tasks: video pace (VP) prediction [25] and clip order (CO) shuffle prediction [29]. The VP task challenges the model to determine the pace of a video, while the CO task asks the model to rearrange shuffled video clips into their correct chronological order. The original VP and CO primarily concentrated on video data, but to capitalise on the advantages of multi-modality, we expanded these approaches to include audio and directional binaural delay modalities. This extension was done to align modality with the temporal coherence and dynamics observed in the video. For more comprehensive explanations, please refer to the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Experimental results. We first examined the impact of self-supervised learning models for video classification. Table 5 demonstrates the consistent precision gains achieved by utilising SSL pre-trained models. Notably, leveraging both video pace and clip order SSL techniques resulted in an average performance improvement of \\(\\sim 7\\%\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "19379" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.089, + 0.454, + 0.126 + ], + "angle": 0, + "content": "Table 5. Models with different pre-train methods were fine-tuned and tested on video classification. The experiments use all three video views. Reported in Avg. Prec. \\((\\%)\\)." + }, + { + "type": "table", + "bbox": [ + 0.085, + 0.128, + 0.451, + 0.216 + ], + "angle": 0, + "content": "
Pre-train MethodModalities
VV + AV + A + D
From Scratch63.73(±0.0)77.32(±0.0)80.62
Video Pace [25]69.27(+5.5)79.56(+2.2)81.97
Clip Order [29]69.91(+6.2)80.14(+2.8)82.18
VP [25] + CO [29]76.84(+13.1)82.66(+5.3)83.32
" + }, + { + "type": "table_caption", + "bbox": [ + 0.464, + 0.089, + 0.887, + 0.126 + ], + "angle": 0, + "content": "Table 6. Comparison between supervised pre-trained extractors with SSL pretrained counterparts on TAL task. The experiments use all three video views with modalities (V+A+D)." + }, + { + "type": "table", + "bbox": [ + 0.467, + 0.128, + 0.887, + 0.217 + ], + "angle": 0, + "content": "
Pre-train MethodmAP @0.5mAP @0.75mAP @0.95Avg.
Supervised27.1 (±0.0)18.7 (±0.0)7.0 (±0.0)17.6 (±0.0)
Video Pace [25]29.4 (+2.3)19.6 (+0.9)7.4 (+0.4)18.8 (+1.2)
Clip Order [29]28.9 (+1.8)19.3 (+0.6)7.3 (+0.3)18.5 (+0.9)
VP [25] + CO [29]30.3 (+3.2)20.2 (+1.5)7.9 (+0.9)19.5 (+1.9)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.145, + 0.23, + 0.825, + 0.246 + ], + "angle": 0, + "content": "Table 7. Following original setup of THUMOS14 dataset [8], our dataset adaptation task uses video modality only." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.248, + 0.89, + 0.318 + ], + "angle": 0, + "content": "
Feature ExtractormAP@0.3mAP@0.4mAP@0.5mAP@0.6mAP0.7Avg.
Kinetics400 [13] (Pre-train)83.7 (±0.0)80.2 (±0.0)72.8 (±0.0)62.4 (±0.0)47.4 (±0.0)69.5 (±0.0)
360+x (Pre-train)84.5 (+0.8)81.0 (+0.8)73.4 (+0.6)65.9 (+3.5)54.6 (+7.2)71.9 (+2.4)
Kinetics400 [13] (Pre-train) and 360+x (Fine-tune)85.3 (+1.6)81.8 (+1.6)74.9 (+2.1)68.1 (+5.7)58.2 (+10.8)73.7 (+4.2)
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.342, + 0.473, + 0.525 + ], + "angle": 0, + "content": "We proceeded to perform experiments using SSL pretrained models as feature extractors for the temporal action localisation task incorporating all three modalities \\((\\mathrm{V} + \\mathrm{A} + \\mathrm{D})\\) with the TriDet framework [21]. Since a training-from-scratch model cannot serve as the first-stage extractor, we employed the supervised extractors from section 4.2 as a comparison. The summarised results in Table 6 indicate that pre-training with video pace (VP) or clip order (CO) individually leads to an average performance improvement of \\(\\sim 1.2\\%\\) and \\(\\sim 0.9\\%\\) respectively on average, compared to the supervised baseline. The combination of both SSL methods yields the highest performance gain of \\(\\sim 1.9\\%\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.54, + 0.443, + 0.558 + ], + "angle": 0, + "content": "4.6. Pre-training Model for Dataset Adaptation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.566, + 0.47, + 0.643 + ], + "angle": 0, + "content": "This section explores the efficacy of leveraging models pretrained on the \\(360 + x\\) dataset for adaptation to other datasets like THUMOS14 [8]. By adhering to THUMOS14 setup, the experiments use TriDet framework [21] for conducting Temporal Action Localisation (TAL)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.644, + 0.473, + 0.903 + ], + "angle": 0, + "content": "The performance of this experiment, specifically the mean average precision (mAP) scores covering IoU thresholds from 0.3 to 0.7, are presented in Table 7. As outlined by the results, exclusive reliance on \\(360 + x\\) video data for training showcases the potential for enhanced performance as compared to training solely based on the Kinetics400 dataset [13]. Remarkably, this performance improvement becomes more prominent at higher IoU thresholds. The utmost optimal performance, however, emerges through a two-step approach, commencing with pre-training on the Kinetics400 dataset followed by fine-tuning on the \\(360 + x\\) dataset with an average \\(\\sim 4.2\\%\\) improvement compared to solely Kinetics400 pre-trained extractor. This finding showcases that the employment of the \\(360 + x\\) dataset for feature extractor training can be beneficial for dataset adaptation in sub-stream tasks. More experimental results on dataset integration are available in the supplementary materials." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.34, + 0.629, + 0.357 + ], + "angle": 0, + "content": "5. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.366, + 0.895, + 0.713 + ], + "angle": 0, + "content": "In this work, we studied the problem of panoptic scene understanding and presented, to our knowledge, the first-of-its-kind dataset \\( -360 + x \\) to support the study. The proposed \\( 360 + x \\) is a large-scale multi-modal dataset that consists of several different viewpoints (e.g. egocentric, third-person-view, and panoramic view) and covers various real-world activities in real daily life. With the most possibly available perspectives describing a real-world scene, \\( 360 + x \\) aims to support the research in understanding the world around us in a way that humans understand (and even beyond). Additionally, we also presented a benchmark study of several scene understanding tasks based on this newly collected dataset, with a comparison to other existing datasets. Extensive experimental analysis validated the effectiveness of each of the perspectives within our dataset, and also suggested interesting insights, confirming that with more viewpoints or data modalities, the understanding of a scene could be more comprehensive. Surprisingly, models trained without manual annotation (i.e. self-supervised learning) on our dataset even perform better than those trained with human annotations in a fully supervised manner. We hope this new dataset could bring in new directions towards scene understanding and look forward to the research on them." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.721, + 0.649, + 0.737 + ], + "angle": 0, + "content": "Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.744, + 0.894, + 0.882 + ], + "angle": 0, + "content": "This project was partially supported by the Ramsay Research Fund, and the Royal Society Short Industry Fellowship (SIF\\R1\\231009). Y. Hou and C. Qu were partially supported by the CSC grant (No.202308060328) and Allsee Technologies Ltd., respectively. The computations described in this research were performed using the Baskerville Tier 2 HPC service1 (funded by EP/T022221/1 and EP/W032244/1) and is operated by Advanced Research Computing at the University of Birmingham." + }, + { + "type": "page_footnote", + "bbox": [ + 0.518, + 0.887, + 0.689, + 0.901 + ], + "angle": 0, + "content": "1https://www.baskerville.ac.uk/" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "19380" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.117, + 0.47, + 0.185 + ], + "angle": 0, + "content": "[1] Keshav Bhandari, Mario A DeLaGarza, Ziliang Zong, Hugo Latapie, and Yan Yan. Egok360: A 360 egocentric kinetic human activity video dataset. In 2020 IEEE International Conference on Image Processing (ICIP), pages 266-270. IEEE, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.188, + 0.472, + 0.257 + ], + "angle": 0, + "content": "[2] Honglie Chen, Weidi Xie, Andrea Vedaldi, and Andrew Zisserman. Vggsound: A large-scale audio-visual dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 721-725. IEEE, 2020. 1, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.26, + 0.47, + 0.316 + ], + "angle": 0, + "content": "[3] Ziyang Chen, David F Fouhey, and Andrew Owens. Sound localization by self-supervised time delay estimation. In European Conference on Computer Vision, pages 489-508. Springer, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.318, + 0.47, + 0.401 + ], + "angle": 0, + "content": "[4] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, et al. Scaling egocentric vision: The epic-kitchens dataset. In Proceedings of the European Conference on Computer Vision (ECCV), pages 720-736, 2018. 1, 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.404, + 0.47, + 0.473 + ], + "angle": 0, + "content": "[5] Bernard Ghanem Fabian Caba Heilbron, Victor Escorcia and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 961-970, 2015. 1, 2, 4, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.476, + 0.47, + 0.559 + ], + "angle": 0, + "content": "[6] Jort F. Gemmeke, Daniel P. W. Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R. Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.562, + 0.47, + 0.644 + ], + "angle": 0, + "content": "[7] Jort F. Gemmeke, Daniel P. W. Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R. Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), page 776-780. IEEE Press, 2017. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.647, + 0.469, + 0.702 + ], + "angle": 0, + "content": "[8] A. Gorban, H. Idrees, Y.-G. Jiang, A. Roshan Zamir, I. Laptev, M. Shah, and R. Sukthankar. Thumos challenge: Action recognition with a large number of classes. http://www.thumos.info, 2015. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.705, + 0.47, + 0.8 + ], + "angle": 0, + "content": "[9] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18995-19012, 2022. 1, 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.804, + 0.47, + 0.899 + ], + "angle": 0, + "content": "[10] Chunhui Gu, Chen Sun, David A Ross, Carl Vondrick, Caroline Pantofaru, Yeqing Li, Sudheendra Vijayanarasimhan, George Toderici, Susanna Ricco, Rahul Sukthankar, et al. Ava: A video dataset of spatio-temporally localized atomic visual actions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6047-6056, 2018. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.117, + 0.472, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.894, + 0.232 + ], + "angle": 0, + "content": "[12] Shawn Hershey, Sourish Chaudhuri, Daniel PW Ellis, Jort F Gemmeke, Aren Jansen, R Channing Moore, Manoj Plakal, Devin Platt, Rif A Saurous, Bryan Seybold, et al. Cnn architectures for large-scale audio classification. In 2017 iee international conference on acoustics, speech and signal processing (icassp), pages 131-135. IEEE, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.233, + 0.894, + 0.3 + ], + "angle": 0, + "content": "[13] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. 1, 2, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.302, + 0.892, + 0.357 + ], + "angle": 0, + "content": "[14] H. Kuehne, H. Jhuang, E. Garrote, T. Poggio, and T. Serre. HMDB: a large video database for human motion recognition. In Proceedings of the International Conference on Computer Vision (ICCV), 2011. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.358, + 0.892, + 0.425 + ], + "angle": 0, + "content": "[15] Hildegard Kuehne, Hueihan Jhuang, Estíbaliz Garrote, Tomaso Poggio, and Thomas Serre. Hmdb: a large video database for human motion recognition. In 2011 International conference on computer vision, pages 2556-2563. IEEE, 2011. 1, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.428, + 0.892, + 0.482 + ], + "angle": 0, + "content": "[16] Yiyi Liao, Jun Xie, and Andreas Geiger. Kitti-360: A novel dataset and benchmarks for urban scene understanding in 2d and 3d. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.484, + 0.892, + 0.537 + ], + "angle": 0, + "content": "[17] Benjamin R Meagher. Ecologizing social psychology: The physical environment as a necessary constituent of social processes. *Personality and social psychology review*, 24(1): 3-23, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.539, + 0.892, + 0.566 + ], + "angle": 0, + "content": "[18] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41, 1995. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.568, + 0.892, + 0.622 + ], + "angle": 0, + "content": "[19] Xiaokang Peng, Yake Wei, Andong Deng, Dong Wang, and Di Hu. Balanced multimodal learning via on-the-fly gradient modulation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.624, + 0.892, + 0.706 + ], + "angle": 0, + "content": "[20] Daniel Scharstein, Heiko Hirschmüller, York Kitajima, Greg Krathwohl, Nera Nesic, Xi Wang, and Porter Westling. High-resolution stereo datasets with subpixel-accurate ground truth. In Pattern Recognition: 36th German Conference, GCPR 2014, Münster, Germany, September 2-5, 2014, Proceedings 36, pages 31-42. Springer, 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.707, + 0.892, + 0.774 + ], + "angle": 0, + "content": "[21] Dingfeng Shi, Yujie Zhong, Qiong Cao, Lin Ma, Jia Li, and Dacheng Tao. Tridet: Temporal action detection with relative boundary modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18857-18866, 2023. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.776, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[22] Shuran Song, Andy Zeng, Angel X Chang, Manolis Savva, Silvio Savarese, and Thomas Funkhouser. Im2pano3d: Extrapolating 360 structure and semantics beyond the field of view. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3847-3856, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.859, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[23] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402, 2012. 1, 2, 4, 5" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "19381" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[24] Tuan N Tang, Kwonyoung Kim, and Kwanghoon Sohn. Temporalmaxer: Maximize temporal context with only max pooling for temporal action localization. arXiv preprint arXiv:2303.09055, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.47, + 0.19 + ], + "angle": 0, + "content": "[25] Jiangliu Wang, Jianbo Jiao, and Yunhui Liu. Self-supervised video representation learning by pace prediction. In European Conference on Computer Vision, 2020. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.192, + 0.471, + 0.247 + ], + "angle": 0, + "content": "[26] Weiyao Wang, Du Tran, and Matt Feiszli. What makes training multi-modal classification networks hard? In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12695-12705, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.249, + 0.47, + 0.303 + ], + "angle": 0, + "content": "[27] Xiang Wang, Zhiwu Qing, Ziyuan Huang, Yutong Feng, Shiwei Zhang, Jianwen Jiang, Mingqian Tang, Changxin Gao, and Nong Sang. Proposal relation network for temporal action detection. arXiv preprint arXiv:2106.11812, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.305, + 0.47, + 0.373 + ], + "angle": 0, + "content": "[28] Jianxiong Xiao, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Recognizing scene viewpoint using panoramic place representation. In 2012 IEEE Conference on Computer Vision and Pattern Recognition, pages 2695-2702. IEEE, 2012. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.375, + 0.47, + 0.431 + ], + "angle": 0, + "content": "[29] Dejing Xu, Jun Xiao, Zhou Zhao, Jian Shao, Di Xie, and Yueting Zhuang. Self-supervised spatiotemporal learning via video clip order prediction. In Computer Vision and Pattern Recognition (CVPR), 2019. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.433, + 0.47, + 0.501 + ], + "angle": 0, + "content": "[30] Guorun Yang, Xiao Song, Chaoqin Huang, Zhidong Deng, Jianping Shi, and Bolei Zhou. Drivingstereo: A large-scale dataset for stereo matching in autonomous driving scenarios. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 899–908, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.503, + 0.47, + 0.558 + ], + "angle": 0, + "content": "[31] Serena Yeung, Olga Russakovsky, Ning Jin, Mykhaylo Andriluka, Greg Mori, and Li Fei-Fei. Every moment counts: Dense detailed labeling of actions in complex videos. International Journal of Computer Vision, 126:375–389, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.56, + 0.47, + 0.613 + ], + "angle": 0, + "content": "[32] Chen-Lin Zhang, Jianxin Wu, and Yin Li. Actionformer: Localizing moments of actions with transformers. In European Conference on Computer Vision, pages 492-510. Springer, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.616, + 0.47, + 0.685 + ], + "angle": 0, + "content": "[33] Hang Zhao, Antonio Torralba, Lorenzo Torresani, and Zhicheng Yan. Hacs: Human action clips and segments dataset for recognition and temporal localization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8668-8678, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.687, + 0.47, + 0.741 + ], + "angle": 0, + "content": "[34] Bolei Zhou, Agata Lapedriza, Aditya Khosla, Aude Oliva, and Antonio Torralba. Places: A 10 million image database for scene recognition. IEEE transactions on pattern analysis and machine intelligence, 40(6):1452-1464, 2017. 4" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.741 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "19382" + } + ] +] \ No newline at end of file diff --git a/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_origin.pdf b/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cf7d5dffec6895bbdcfb757f10d70f8fb3407229 --- /dev/null +++ b/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43351f3e0b4a971218148f732f3953cef594fe528c6ee4da25b9b730a699e7a0 +size 1039950 diff --git a/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/full.md b/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/full.md new file mode 100644 index 0000000000000000000000000000000000000000..4e84ffc70bd0abc706277ace47ce5ba1097e59bd --- /dev/null +++ b/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/full.md @@ -0,0 +1,251 @@ +# $360 + x$ : A Panoptic Multi-modal Scene Understanding Dataset + +Hao Chen Yuqi Hou Chenyuan Qu Irene Testini Xiaohan Hong Jianbo Jiao + +The Machine Intelligence $+x$ Group, University of Birmingham, UK + +Project page: https://x360dataset.github.io/ + +# Abstract + +Human perception of the world is shaped by a multitude of viewpoints and modalities. While many existing datasets focus on scene understanding from a certain perspective (e.g. egocentric or third-person views), our dataset offers a panoptic perspective (i.e. multiple viewpoints with multiple data modalities). Specifically, we encapsulate third-person panoramic and front views, as well as egocentric monocular/binocular views with rich modalities including video, multi-channel audio, directional binaural delay, location data and textual scene descriptions within each scene captured, presenting comprehensive observation of the world. To the best of our knowledge, this is the first database that covers multiple viewpoints with multiple data modalities to mimic how daily information is accessed in the real world. Through our benchmark analysis, we presented 5 different scene understanding tasks on the proposed $360 + x$ dataset to evaluate the impact and benefit of each data modality and perspective in panoptic scene understanding. We hope this unique dataset could broaden the scope of comprehensive scene understanding and encourage the community to approach these problems from more diverse perspectives. + +# 1. Introduction + +Scene understanding is crucial for robotics and artificial intelligent systems to perceive the environment around them. As humans, we intuitively understand the world through primarily visual inputs, as well as auditory and other sensory inputs (e.g. touch and smell). The community has made remarkable progress in mimicking human perception with contributions from various datasets and benchmarks [4, 5, 7, 9, 13, 15, 23]. These efforts have approached scene understanding from a diverse range of perspectives, such as normal frontal-view vision [5, 13, 23], panoramic view [22, 28], binocular/stereo view [20, 30], egocentric monocular view [4, 9], and audio [2, 7]. + +While there has been exciting progress in understanding scenes from a limited number of perspectives, it is notable + +that humans understand the world by incorporating a combination of viewpoints, in a holistic manner. This includes an egocentric view for activities we are involved in and a third-person view for activities we are observing. In addition to visual cues, we also rely on a range of modalities, including hearing and binaural delay, to fully comprehend our surroundings and track movements. Our prior knowledge of the scene, such as localisation information and scene descriptions, has also supported our understanding of the environment (e.g. the cafe in the city centre may be different from a similar cafe on a university campus). + +Taking the above observations into consideration, a new dataset covering all these aforementioned aspects is presented in this work, to provide a panoptic scene understanding, termed $360 + x$ dataset. This new dataset offers a diverse selection of perspectives, including a $360^{\circ}$ panoramic view providing a complete panoptic view of the environment, and a third-person front view that highlights the region of interest that has the most movements in front of the camera. Additionally, we have included egocentric monocular and binocular videos to capture the first-person perspective of individuals in the environment. These viewpoints are complemented by aligned multi-channel audio with directional binaural delay information, as well as location information and scene descriptions as metadata. An illustration of the presented dataset collection system is shown in Figure 1. + +Based on this newly collected dataset, we perform 5 visual-audio scene understanding tasks to analyse the contribution and effectiveness of each data viewpoint and modality. Particularly, we look at video classification, temporal action localisation, self-supervised representation learning, cross-modality retrieval and pre-training model migration for dataset adaptation, with interesting findings and insights from extensive experimental analysis. The main contributions of this work are summarised as follows: + +- We propose to our knowledge the first and probably the most authentic panoptic scene understanding dataset covering multiple viewpoints and data modalities in the wild. +- We perform extensive experimental analysis to validate the effectiveness of the proposed dataset on different tasks + +![](images/cff7a8284de8415f6a93cc4e75a54533058f0cdc1222c6e3ea0e80912a4ca199.jpg) +Figure 1. Illustration of the proposed $360 + x$ dataset. The $360^{\circ}$ camera records fish-eye raw videos with front and back lenses. These videos are merged to create a spherical $360^{\circ}$ panorama (middle-up figure, zoom in for details), which is then transformed to (a) $360^{\circ}$ panoramic data using equirectangular projection. The (b) third-person front view is obtained by de-warping the rich movements region highlighted red in the spherical field of $360^{\circ}$ panorama (the middle-left figure). By wearing stereo cameras, the capturers record (c) egocentric clips while staying visible to the fixed $360^{\circ}$ camera (central ellipse). (e) Directional audio time delay data is generated from left and right audio inputs (d) from the $360^{\circ}$ camera by interaural time delay process [3]. This helps locate sound sources in the $360^{\circ}$ panorama. + +from various perspectives and modalities. + +- Interesting findings are derived from the analysis, suggesting the effectiveness of each viewpoint and data modality. Learning from this new dataset without supervision even shows a better performance than that from a model trained in a supervised manner. + +# 2. Related Works + +Video understanding and analysis. Video analysis has been extensively studied in the literature. Existing datasets such as UCF101 [23], ActivityNet [5] and Kinetics [13] have provided large-scale video data for activity understanding tasks. However, these datasets often exhibit lower complexity compared to real-world scenes. Some datasets, like MultiThumos [31], aim to increase complexity but are limited to specific scenarios with domain-specific actions, deviating from real-life daily activities. In contrast, our dataset builds upon the activity labels from ActivityNet [5] and strives to capture data that closely simulates real-life scenarios. Apart from that, we also include multiple data viewpoints and modalities as compared to existing datasets. + +Panoramic scene understanding. In recent years, panoramic scene understanding has gained significant attention due to its holistic reflection of the environment. Several datasets have been introduced to facilitate research in this area. For instance, the KITTI-360 [16] provides a collection of panoramic images for urban scene analysis. EGOK360 [1] has been introduced to address the need for video data with a panoramic view. Im2Pano3D [22] presents a panoramic dataset for indoor scenarios with semantic segmentation and focuses on the prediction from a partial observation. However, these datasets primarily focus on panoramic visual data while lacking the incorporation of other viewpoints (e.g. egocentric) and data modalities (e.g. audio), limiting their potential for comprehensive scene understanding and analysis. + +Egocentric video analysis. Focusing on understanding scenes from a first-person perspective, existing datasets such as EPIC-Kitchens [4] and Ego4D [9] provide egocentric video data collected during daily activities. They have contributed to research on activity recognition and object detection in egocentric scenes. Unlike these datasets fo + +cusing on egocentric views, our dataset also covers other viewpoints and modalities aiming at supporting scene understanding research in a more panoptic manner. + +Visual-audio analysis. Integrating visual and audio information often enhances the performance of models in scene understanding tasks, as it provides richer contextual information. There are some existing datasets available to support research in audio-visual analysis, e.g. AVA [10], AudioSet [6] and VGGSound [2], to name a few. However, these datasets are lacking in multiple viewpoints and the directional property of audio signals, which are provided in the proposed new dataset. + +# 3. $360 + x$ Dataset + +# 3.1. Data Acquisition and Alignment + +Two main devices were used for our data collection: the Insta 360 One X2 and Snapchat Spectacles 3 cameras. The 360 One X2 has two fish-eye cameras that collect $360^{\circ}$ panoramic visual information in the scene with $5760 \times 2880$ resolution and a frame rate of 25 FPS. Additionally, directional audio was recorded using four microphones in directional audio mode. While the Spectacles 3 has a stereo camera attached to a pair of glasses used to capture the egocentric binocular vision within the scene at a resolution of $2432 \times 1216$ and a frame rate of 60 FPS. + +Once we obtained the raw data, we aligned the different viewpoints and modalities through a specific process. The initial raw footage captured by the two fish-eye cameras on the $360^{\circ}$ camera was in the form of two circular videos, which were then stitched and de-warped into a spherical panorama. This panorama can be projected into an equirectangular format to produce a panoramic video. However, this direct compression of the spherical view into a rectangular format can introduce unnatural distortions. In order to provide a more natural and informative view, we inversely project a rectangular region into equirectangular space and use it to crop the spherical panorama. We use optical flow to determine the crop region with the most motion activity in the spherical panorama field. This crop region is then projected back to rectangular, resulting in an informative video view with minimal distortions. + +Egocentric binocular videos, as shown in Figure 1(c), were captured ranging from approximately 30 seconds to 1 minute in duration for each clip. A total of 1 to 5 stereo clips were recorded, scattered throughout the duration of the average 6 mins $360^{\circ}$ video. In addition to stereo videos, we also provide the corresponding monocular videos for the egocentric view. + +The audio recordings were temporally aligned with their corresponding videos with left/right channel modality. The four-channel audios with the $360^{\circ}$ panoramic video are provided as well for further exploration. Moreover, we also + +provide the directional information of the audio which was presented using the estimated interaural time delay of the sound obtained from the method introduced in [3]. The GPS information and weather information were also provided. + +Given the possibility of occlusions in regions visible to the egocentric camera but not to the $360^{\circ}$ camera, we ensured during data collection that the cameras were positioned in close proximity. This setup, with clear mutual visibility, allowed both cameras to capture a similar overall scene. + +# 3.2. Scene Selection + +To broaden scene coverage and promote multi-modal collaborative learning, we integrated a strategic selection process for captured scenes, governed by three key criteria: + +i) Scene categories must be carefully crafted to be comprehensive, yet concise, while also being authoritative and reflective of everyday life. The location where a scene unfolds plays a crucial role in providing essential environmental context to the activities within it [17]. Distinct scenes can impart unique meanings or emotional nuances to identical events. For instance, the act of chatting could convey divergent implications in a school setting as compared to a home environment. Such nuances are critical as they offer deeper insights into the contextual interpretation of behaviours and interactions in varied settings. + +ii) The data should ideally span a wide array of weather and lighting conditions. This criterion aims to ensure the inclusion of both indoor and outdoor activities under various environmental scenarios. Such diversity is important in accurately representing the multifaceted nature of daily life and the various conditions in which these activities occur. + +iii) Our third criterion is the inclusion of scenarios rich in distinctive sound sources, particularly those where multiple activities co-occur. It is essential for the dataset to not only visually represent these activities but also to capture the corresponding auditory elements. The goal is to present the complexity and realism of real-world environments as much as possible, marked by simultaneous and various actions and behaviours. + +It is worth noting that our dataset was collected across several countries, including the United Kingdom (e.g. London, Birmingham, Cardiff and Jersey), France (Paris), Spain (e.g. Oviedo and Picos de Europa), China (e.g. Guangzhou and Shenzhen), and Japan (e.g. Kyoto and Osaka). During the data collection, the $360^{\circ}$ Camera was placed statically to record the scene, while a capturer wearing the Spectacles glasses recorded first-person interactions with the scene. + +Sensitive data handling. Our dataset was collected in a real-world setting and may contain sensitive personal information (e.g. human faces). To ensure ethical and responsible research, the video capture was conducted with proper + +![](images/ff9d14f6190e582495ba4849c002e12299ba6bd533ce6c87f4b0afa652b81c1b.jpg) +(a). Distribution of the scene categories (number). + +![](images/df7877990d8be77e8a12fc7f8df19206e525faa0882e849874cba44e7a1b34d3.jpg) +(d). Distribution comparison of the number of action instances per video. + +![](images/650c19bd920fad706ffcab352e0f1d1fc3f168472a5e5867c094a0845e440f36.jpg) +(b). Geographical distribution of actions. + +![](images/d8d992fb3e4b2a6a9391bb2d6502753c64cc6c931b0b97c2d2d032ea162a90ee.jpg) +(e). Capture time of the day. + +![](images/8b68580db91837f64da6a68edf3c21b70b7c70d54ca34f90f9f346aca235a498.jpg) + +![](images/0a1eba5ead79a9e63953d95dda3e60b66d877b72f3bab94187dea5c4d8614333.jpg) +(c). Overall distribution of actions duration. +(f). Binaural delay per clip. +Figure 2. Dataset statistics analysis, on the distributions of (a) the scene category, (b) action distribution per cities, (c) temporal action instance duration, and (d) number of actions per video, (e) capturing time, (f) binaural delay per clip. + +consent. Additionally, we have taken measures to protect privacy by anonymising the data. This includes applying a face detection mechanism to outline predicted face locations in each frame and applying blurring filters to maintain meaningful details while ensuring information security. More detailed information on our privacy protection measures can be found in the supplementary material. + +# 3.3. Data Annotation + +Scene label rationale. The $360 + x$ dataset comprises a total of 28 scene categories (15 indoor scenes and 13 outdoor scenes), as illustrated in Figure 2(a). To establish comprehensive and authoritative scene categories that reflect daily life, we referred to the Places Database [34], which is derived from WordNet [18], as our primary basis. We then leverage the sophisticated semantic analysis capabilities of large language models, to conduct a thorough filtering and classification of a multitude of everyday scenes. This curation resulted in a refined set of 28 scene categories, each symbolising aspects of daily life. Simultaneously, the recordings concentrate on capturing common occurrences within conventional settings, providing a realistic depiction of everyday life. Detailed descriptions defining each category, along with discussions regarding these constraints and potential sampling biases, are presented in the supplimen + +tary material. + +Temporal segmentation label. We also provide temporal segment labelling for the understanding of activities in the shooting scenes. We follow the activity hierarchy standard defined by ActivityNet [5], which provides a comprehensive categorisation of human activities, consisting of seven top-level categories (Personal Care, Eating and Drinking, Household, Caring and Helping, Working, Socialising and Leisure, and Sports and Exercises). To capture the diversity and granularity of activities within each category, we defined a total of 38 action instances, covering specific actions and behaviours. To ensure high-quality annotations, the temporal segmentation labelling was annotated by three experienced annotators. Each annotator independently annotated the temporal segments corresponding to the activities in the videos. To obtain a consensus, we merged the individual annotations and resolved any discrepancies according to discussion and consensus among the annotators. + +# 3.4. Dataset Statistics and Analysis + +Overview. Existing publicly available datasets primarily focus on visual unimodality [4, 5, 13, 15, 23]. In contrast, our dataset introduces a novel approach by collecting different views or modalities, as presented in Table 1, including + +Table 1. Dataset comparison. Ego: Egocentric, V: Video, A: Audio, A+V: Audio-visual events. + +
DatasetVideo ViewpointsOther ModalitiesStatisticsAttributions
Third-person Front View360° PanoramicEgo MonocularEgo BinocularNormal AudioDirectional Binaural DelayGPS InfoAvg DurationTotal Duration(s)Frames Count(K)Annotations SourceMultiple Events
UCF101 [23]XXXXX7.21 s96,0002,400VX
Kinetics [13]XXXXXX10 s2,998,80074,970VX
HMDB51 [14]XXXXXX3 s21,426643VX
ActivityNet [5]XXXXXX2 min2,332,80011,664V
EPIC-Kitchens [4]XXXXX7.6 min198,00011,500VX
Ego4D [9]XXXX8 min13,212,000-A+V
360+x (Ours)6.2 min244,0008,579A+V
+ +$360^{\circ}$ panoramic video, third-person front view video, egocentric monocular video, egocentric binocular video, normal audio, directional binaural delay, location and textual scene description. This diverse range of modalities provides multiple dimensions and clues for understanding and analysing complex scenes. Our dataset consists of 2,152 videos representing 232 data examples, with 464 videos captured using the 360 camera and the remaining 1,688 recorded with the Spectacles camera. + +Figure 2(a) presents the distribution of video counts across each of the 28 scene categories. Our dataset is characterised by a balanced distribution of data across these scenes. Notably, it diverges from conventional databases like UCF101 [23], Kinetics [13], HMDB [15], and ActivityNet [5], particularly in terms of average video duration, which is approximately 6.2 minutes. This longer duration is crucial for maintaining the integrity and coherence of actions within each scene, allowing for a comprehensive temporal analysis of the activities. + +Temporal segment label. The annotations of temporal segment labels in our dataset contribute to the fine-grained analysis of activities. We defined 38 action instances representing specific actions and behaviours. The length of each segment labelled with a specific activity varies across the dataset, as depicted in Figure 2(c). Note we acknowledge the significance of audio in accurately identifying certain actions, such as 'coughing' or 'clapping'. Therefore, our dataset combines audio information to enhance accuracy in action recognition [4, 5, 13, 15, 23], as shown in Table 1. + +Comparative complexity. Due to its realistic scene simulation, our dataset offers more complexity compared to previous datasets. This complexity arises from the diverse range of activities and interactions captured, resulting in a more challenging and realistic setting for scene understanding and activity recognition. As shown in Figure 2(d), most existing datasets, such as UCF101 [23], Kinetics [13], and HMDB51 [14], typically consist of one action instance per video. While datasets like Ego4D [9] and ActivityNet [5] have large volumes and broad coverage, they often contain a limited number of action instances per individual video. + +The HACS dataset [33] contains more multiple action instances per video but still pales in comparison to the richness of the proposed dataset. Our dataset surpasses these existing datasets in terms of the number of action instances per video, showcasing the extensive variety of activities captured. The improved complexity and richness of our dataset enable follow-up research to explore and develop more robust algorithms, pushing the boundaries of scene understanding in real-world contexts. + +Data distribution. We have ensured a balanced distribution across various dimensions, including scene categories, action instances, binaural delay, etc. Figure 2(a) depicts the scene number distribution across 28 scene categories, demonstrating a comprehensive coverage of scene categories. Notably, the dataset achieves an almost equal proportion of indoor and outdoor scenes, accounting for $54.7\%$ and $45.3\%$ respectively. Our dataset allows each scene to conclude multiple diverse action instances naturally, and also enables different scenes to share common action instances. Notably, in Figure 2(b), it displays the 'types of action per location' that can be observed in the geographic distribution and the diversity of the data, where the inner circle shows the location and the outer circle shows the action types captured in each location. As illustrated in Figure 2(c), the distribution of action duration shows our dataset has captured extensive and realistic human behaviours across natural scenes. One interesting observation from our dataset is the high-frequency occurrence of action 'operating phone', which contributes $17.54\%$ of the whole duration, providing a reflection of mobile usage in modern daily life. Additionally, the dataset offers valuable directional audio to supplement visual understanding. The distribution of data capture times in the dataset corresponds with natural human activities, as shown in Figure 2(e). Human activities throughout the day are mainly concentrated during the daytime (more in the afternoon and evening). Figure 2(f) illustrates the diversity of binaural delay for each clip. The positive point means the audio is directed towards the left direction while the negative the right. In summary, the presented $360 + x$ dataset covers broad modalities and diversity with an authentic distribution from different per + +spectives, mimicking real daily life. + +# 4. Benchmark and Experiments + +To establish a comprehensive benchmark for the presented $360 + x$ dataset, we choose five visual understanding tasks to delve into the exploration of multiple viewpoints and modalities usage, including: video scene classification, temporal action localisation, cross-modality retrieval, self-supervised representation learning, and dataset adaptation. + +Remark: Unless specifically stated otherwise, the experiments on $360 + x$ will utilise three views: the $360^{\circ}$ view, egocentric binocular view, and the third-person front view. + +# 4.1. Experimental Setting + +Models. We employed a consistent set of model backbones across different tasks to minimise model interference, except for temporal action localisation task (detailed in section 4.3). We followed the commonly used setup and selected the backbone I3D [13] as our video model. To handle audio-related aspects, we chose the VGGish [12] as our audio model. Additionally, for directional binaural feature extraction, we utilised the ResNet-18 model [11]. A linear layer is positioned after the backbones to carry out each specific task based on backbone output features. + +It is important to note that a simple concatenation of all modalities features can diminish the potential information derived from multi-modality [26]. Therefore, instead of solely concatenating modality features, we leverage a hierarchical attention mechanism for multi-modality integration. In this approach, the directional binaural feature serves as an attention query to direct focused attention towards the audio feature, enabling it to encapsulate the directional information into the audio feature. At the same time, the audio feature is also leveraged by acting as a query itself, enabling it to attentively interact with the video feature. This mechanism allows for creating a synergistic representation of the underlying data that integrates the features of all modalities. For more details and in-depth analysis, please refer to the supplementary material. + +Training and verification setup. For each temporal action localisation model, we follow their original training settings. For I3D, VGGish, and ResNet-18 networks, the training settings are 200 epochs with the parameters described in [19]. The training process utilises the AdamW optimiser with a learning rate of $1 \times 10^{-5}$ and a decay rate of 0.1 at the 80th and 120th epochs. We also apply data augmentation techniques such as rotation, scaling, and colour jittering. The dataset was divided into training, validation, and test sets, following an 80/10/10 split. To ensure a balanced representation of scene categories, the examples were stratified probabilistically across the sets. + +Table 2. Video classification performance across different views (Ego: egocentric binocular view, Front: third-person front view, and $360^{\circ}$ .. $360^{\circ}$ view) and data modalities (V: Video, A: Audio, D: Directional binaural delay). Reported in Avg. Prec. $(\%)$ + +
Selected ViewsModalities
VV + AV + A + D
Egocentric Only51.95(±0.0)55.24(±0.0)58.92
Front Only54.05(+2.1)65.33(+10.1)67.19
360° Only56.33(+4.4)67.14(+11.9)70.95
360° + Egocentric58.99(+7.0)70.48(+15.2)72.11
360° + Front59.70(+7.8)75.06(+19.8)77.69
360° + Front + Ego63.73(+11.8)77.32(+22.1)80.62
+ +# 4.2. Video Scene Classification + +Video scene classification assigns scene labels to videos based on their frames, enabling analysis of visual content and determining the subject matter. + +Single view vs. multi-view. First, we are interested in the influence of different combinations of video views on the classification performance. The results, representing each combination, are summarised in Table 2. The results for single views are presented in the first three rows, indicating that using a single $360^{\circ}$ panoramic view outperforms using either an egocentric binocular view or a third-person front view only. When employing multiple views, it is noted that better performance can be achieved compared to using a single view. Specifically, utilising all three views leads to the best performance. Such a performance can be attributed to the fact that although these three views describe the same scene, each different view offers a unique perspective that contributes to a more comprehensive understanding of the scene, resulting in improved performance. + +Single-modality vs. multi-modality and more. We further investigate the impact of modalities on the model's performance. Various combinations of modalities are analysed, and the results are summarised in Table 2 on a column-wise basis. In particular, the first column represents the visual modality alone, the second column combines video with audio, and the last column incorporates visual, audio, and directional binaural information modalities. + +The inclusion of additional modalities leads to average precision improvements. For example, when all three views are utilised, incorporating more modalities results in improvements of $13.59\%$ and $16.89\%$ , respectively. This underscores the benefits of leveraging multiple modalities for a more comprehensive understanding of the scene and enhancing overall performance. + +# 4.3. Temporal Action Localisation + +Temporal Action Localisation (TAL) is a video understanding task that involves the dense identification and temporal + +Table 3. Temporal action localisation results. Baseline extractors are used in [2, 21, 24, 32]. The mAP@σ represents the mean average precision (%) at a threshold of σ. The best performance is achieved by employing $V + A + D$ modalities with extractors pre-trained on $360 + x$ . + +
ExtractorsModalitiesmAP @0.5Actionformer [32]Avg.TemporalMaxer [24]Avg.TriDet [21]Avg.
mAP @0.75mAP @0.95mAP @0.5mAP @0.75mAP @0.95mAP @0.5mAP @0.75mAP @0.95
Baseline ExtractorsV11.9 (±0.0)7.8 (±0.0)3.3 (±0.0)7.7 (±0.0)13.1 (±0.0)8.8 (±0.0)3.7 (±0.0)8.6 (±0.0)16.7 (±0.0)10.1 (±0.0)4.8 (±0.0)10.5 (±0.0)
V + A19.1 (+7.2)11.3 (+3.5)4.2 (+0.9)11.5 (+3.8)21.0 (+7.9)14.8 (+6.0)5.6 (+1.9)13.8 (+5.2)23.6 (+6.9)17.2 (+7.1)6.4 (+1.6)15.7 (+5.2)
Pre-trained on 360+xV16.4 (+4.5)9.8 (+2.0)3.9 (+0.6)10.0 (+2.3)20.4 (+7.3)14.3 (+5.5)5.2 (+1.5)13.3 (+4.7)21.1 (+4.4)15.3 (+5.2)5.5 (+0.7)14.0 (+3.5)
V + A23.6 (+11.7)16.9 (+9.1)5.7 (+2.4)15.4 (+7.7)25.8 (+12.7)18.0 (+9.2)6.4 (+2.7)16.7 (+8.1)26.4 (+8.7)18.5 (+8.4)6.9 (+2.1)17.3 (+6.8)
V + A + D24.9 (+13.0)17.4 (+9.6)6.1 (+2.8)16.1 (+8.4)26.6 (+13.5)18.3 (+9.5)6.5 (+2.8)17.1 (+8.5)27.1 (+10.4)18.7 (+8.6)7.0 (+2.2)17.6 (+7.1)
+ +segmentation of activities within a video stream over a specific time period. Current TAL approaches typically employ a two-stage paradigm [27, 32]. The first stage extracts features from the entire video, and the second stage predicts temporal segmentation based on these features. + +Feature extractors. Baseline extractors are widely utilised for various datasets, e.g. ActivityNet [5] and Ego4D [9], on the TAL task. The baseline video features are obtained from an I3D model pre-trained on the Kinetics400 dataset [13]. The baseline audio features are derived from the pre-classification layer following activation of the VG-Gish model, pre-trained on AudioSet [7]. There is no baseline extractor for directional binaural delay feature, so the $\mathrm{V + A + D}$ modality was not included accordingly. For a fair comparison, we reused our video classification models in section 4.2 as Pre-trained on $360 + x$ extractors, following the same baseline extraction setup for both video and audio features. Additionally, the ResNet-18 feature extractor was used for directional binaural delay feature extraction. + +Experimental results. We provide a concise overview of the performance comparison for various temporal action localisation methods, including ActionFormer [32], TriDet [21] and TemporalMaxer [24], between the baseline extractors and our Pre-trained on $360 + x$ extractors. The summarised results are presented in Table 3, from which we can see that the introduction of additional modalities (i.e. audio and direction binaural delay) has a prominent positive impact on the TAL task, leading to performance improvements for both sets of extractors. This result highlights the importance of leveraging multiple modalities in enhancing the accuracy and effectiveness of temporal activity localisation techniques. Using our custom extractors can provide additional improvements, as the baseline extractors may not be optimised for our specific binocular or $360^{\circ}$ views. + +# 4.4. Cross-modality Retrieval + +In this context, we focus on a series of retrieval tasks that across modalities including audio, video and directional binaural delay. In a modality-specific retrieval scenario, the query modality (Q) serves as the input for retrieving the key modality (K) in the Q-to-K retrieval task. The performance evaluation metric $\mathrm{R}\theta$ represents the recall at ranks $\theta$ . + +Table 4. Q-to-Video retrieval results. The superscript* indicates modalities are co-trained. Recall reported with rank in $\{1,5,10\}$ . + +
Query ModalityR1 (%)R5 (%)R10 (%)
A39.14(±0.0)62.76(±0.0)79.21(±0.0)
A + D44.30(+5.16)66.92(+4.16)84.78(+5.57)
(A + D)*55.88(+16.74)72.53(+9.77)86.6(+7.39)
+ +Q-to-Video retrieval results. Table 4 illustrates the retrieval results for the Query modality retrieve videos. In this table, $A + D$ denotes a set of independently trained audio and directional binaural features employed as query features. Moreover, $(A + D)^*$ signifies the collaborative training of these features instead of treating them independently. The inter-modality retrieval results shown in Table 4 clearly show the modality compliance quality of the $360 + x$ dataset. Besides Q-to-Video retrieval, we also performed Q-to-Audio and Q-to-Directional binaural delay experiments, details can be found in the supplementary material. + +# 4.5. Self-supervised Representation Learning + +Experiment setup. In this section, we investigated the impact of different self-supervised learning (SSL) methods using two engaging video pretext tasks: video pace (VP) prediction [25] and clip order (CO) shuffle prediction [29]. The VP task challenges the model to determine the pace of a video, while the CO task asks the model to rearrange shuffled video clips into their correct chronological order. The original VP and CO primarily concentrated on video data, but to capitalise on the advantages of multi-modality, we expanded these approaches to include audio and directional binaural delay modalities. This extension was done to align modality with the temporal coherence and dynamics observed in the video. For more comprehensive explanations, please refer to the supplementary material. + +Experimental results. We first examined the impact of self-supervised learning models for video classification. Table 5 demonstrates the consistent precision gains achieved by utilising SSL pre-trained models. Notably, leveraging both video pace and clip order SSL techniques resulted in an average performance improvement of $\sim 7\%$ . + +Table 5. Models with different pre-train methods were fine-tuned and tested on video classification. The experiments use all three video views. Reported in Avg. Prec. $(\%)$ . + +
Pre-train MethodModalities
VV + AV + A + D
From Scratch63.73(±0.0)77.32(±0.0)80.62
Video Pace [25]69.27(+5.5)79.56(+2.2)81.97
Clip Order [29]69.91(+6.2)80.14(+2.8)82.18
VP [25] + CO [29]76.84(+13.1)82.66(+5.3)83.32
+ +Table 6. Comparison between supervised pre-trained extractors with SSL pretrained counterparts on TAL task. The experiments use all three video views with modalities (V+A+D). + +
Pre-train MethodmAP @0.5mAP @0.75mAP @0.95Avg.
Supervised27.1 (±0.0)18.7 (±0.0)7.0 (±0.0)17.6 (±0.0)
Video Pace [25]29.4 (+2.3)19.6 (+0.9)7.4 (+0.4)18.8 (+1.2)
Clip Order [29]28.9 (+1.8)19.3 (+0.6)7.3 (+0.3)18.5 (+0.9)
VP [25] + CO [29]30.3 (+3.2)20.2 (+1.5)7.9 (+0.9)19.5 (+1.9)
+ +Table 7. Following original setup of THUMOS14 dataset [8], our dataset adaptation task uses video modality only. + +
Feature ExtractormAP@0.3mAP@0.4mAP@0.5mAP@0.6mAP0.7Avg.
Kinetics400 [13] (Pre-train)83.7 (±0.0)80.2 (±0.0)72.8 (±0.0)62.4 (±0.0)47.4 (±0.0)69.5 (±0.0)
360+x (Pre-train)84.5 (+0.8)81.0 (+0.8)73.4 (+0.6)65.9 (+3.5)54.6 (+7.2)71.9 (+2.4)
Kinetics400 [13] (Pre-train) and 360+x (Fine-tune)85.3 (+1.6)81.8 (+1.6)74.9 (+2.1)68.1 (+5.7)58.2 (+10.8)73.7 (+4.2)
+ +We proceeded to perform experiments using SSL pretrained models as feature extractors for the temporal action localisation task incorporating all three modalities $(\mathrm{V} + \mathrm{A} + \mathrm{D})$ with the TriDet framework [21]. Since a training-from-scratch model cannot serve as the first-stage extractor, we employed the supervised extractors from section 4.2 as a comparison. The summarised results in Table 6 indicate that pre-training with video pace (VP) or clip order (CO) individually leads to an average performance improvement of $\sim 1.2\%$ and $\sim 0.9\%$ respectively on average, compared to the supervised baseline. The combination of both SSL methods yields the highest performance gain of $\sim 1.9\%$ . + +# 4.6. Pre-training Model for Dataset Adaptation + +This section explores the efficacy of leveraging models pretrained on the $360 + x$ dataset for adaptation to other datasets like THUMOS14 [8]. By adhering to THUMOS14 setup, the experiments use TriDet framework [21] for conducting Temporal Action Localisation (TAL). + +The performance of this experiment, specifically the mean average precision (mAP) scores covering IoU thresholds from 0.3 to 0.7, are presented in Table 7. As outlined by the results, exclusive reliance on $360 + x$ video data for training showcases the potential for enhanced performance as compared to training solely based on the Kinetics400 dataset [13]. Remarkably, this performance improvement becomes more prominent at higher IoU thresholds. The utmost optimal performance, however, emerges through a two-step approach, commencing with pre-training on the Kinetics400 dataset followed by fine-tuning on the $360 + x$ dataset with an average $\sim 4.2\%$ improvement compared to solely Kinetics400 pre-trained extractor. This finding showcases that the employment of the $360 + x$ dataset for feature extractor training can be beneficial for dataset adaptation in sub-stream tasks. More experimental results on dataset integration are available in the supplementary materials. + +# 5. Conclusions + +In this work, we studied the problem of panoptic scene understanding and presented, to our knowledge, the first-of-its-kind dataset $-360 + x$ to support the study. The proposed $360 + x$ is a large-scale multi-modal dataset that consists of several different viewpoints (e.g. egocentric, third-person-view, and panoramic view) and covers various real-world activities in real daily life. With the most possibly available perspectives describing a real-world scene, $360 + x$ aims to support the research in understanding the world around us in a way that humans understand (and even beyond). Additionally, we also presented a benchmark study of several scene understanding tasks based on this newly collected dataset, with a comparison to other existing datasets. Extensive experimental analysis validated the effectiveness of each of the perspectives within our dataset, and also suggested interesting insights, confirming that with more viewpoints or data modalities, the understanding of a scene could be more comprehensive. Surprisingly, models trained without manual annotation (i.e. self-supervised learning) on our dataset even perform better than those trained with human annotations in a fully supervised manner. We hope this new dataset could bring in new directions towards scene understanding and look forward to the research on them. + +# Acknowledgement + +This project was partially supported by the Ramsay Research Fund, and the Royal Society Short Industry Fellowship (SIF\R1\231009). Y. Hou and C. Qu were partially supported by the CSC grant (No.202308060328) and Allsee Technologies Ltd., respectively. The computations described in this research were performed using the Baskerville Tier 2 HPC service1 (funded by EP/T022221/1 and EP/W032244/1) and is operated by Advanced Research Computing at the University of Birmingham. + +# References + +[1] Keshav Bhandari, Mario A DeLaGarza, Ziliang Zong, Hugo Latapie, and Yan Yan. Egok360: A 360 egocentric kinetic human activity video dataset. In 2020 IEEE International Conference on Image Processing (ICIP), pages 266-270. IEEE, 2020. 2 +[2] Honglie Chen, Weidi Xie, Andrea Vedaldi, and Andrew Zisserman. Vggsound: A large-scale audio-visual dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 721-725. IEEE, 2020. 1, 3, 7 +[3] Ziyang Chen, David F Fouhey, and Andrew Owens. Sound localization by self-supervised time delay estimation. In European Conference on Computer Vision, pages 489-508. Springer, 2022. 2, 3 +[4] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, et al. Scaling egocentric vision: The epic-kitchens dataset. In Proceedings of the European Conference on Computer Vision (ECCV), pages 720-736, 2018. 1, 2, 4, 5 +[5] Bernard Ghanem Fabian Caba Heilbron, Victor Escorcia and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 961-970, 2015. 1, 2, 4, 5, 7 +[6] Jort F. Gemmeke, Daniel P. W. Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R. Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780, 2017. 3 +[7] Jort F. Gemmeke, Daniel P. W. Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R. Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), page 776-780. IEEE Press, 2017. 1, 7 +[8] A. Gorban, H. Idrees, Y.-G. Jiang, A. Roshan Zamir, I. Laptev, M. Shah, and R. Sukthankar. Thumos challenge: Action recognition with a large number of classes. http://www.thumos.info, 2015. 8 +[9] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18995-19012, 2022. 1, 2, 5, 7 +[10] Chunhui Gu, Chen Sun, David A Ross, Carl Vondrick, Caroline Pantofaru, Yeqing Li, Sudheendra Vijayanarasimhan, George Toderici, Susanna Ricco, Rahul Sukthankar, et al. Ava: A video dataset of spatio-temporally localized atomic visual actions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6047-6056, 2018. 3 + +[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6 +[12] Shawn Hershey, Sourish Chaudhuri, Daniel PW Ellis, Jort F Gemmeke, Aren Jansen, R Channing Moore, Manoj Plakal, Devin Platt, Rif A Saurous, Bryan Seybold, et al. Cnn architectures for large-scale audio classification. In 2017 iee international conference on acoustics, speech and signal processing (icassp), pages 131-135. IEEE, 2017. 6 +[13] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. 1, 2, 4, 5, 6, 7, 8 +[14] H. Kuehne, H. Jhuang, E. Garrote, T. Poggio, and T. Serre. HMDB: a large video database for human motion recognition. In Proceedings of the International Conference on Computer Vision (ICCV), 2011. 5 +[15] Hildegard Kuehne, Hueihan Jhuang, Estíbaliz Garrote, Tomaso Poggio, and Thomas Serre. Hmdb: a large video database for human motion recognition. In 2011 International conference on computer vision, pages 2556-2563. IEEE, 2011. 1, 4, 5 +[16] Yiyi Liao, Jun Xie, and Andreas Geiger. Kitti-360: A novel dataset and benchmarks for urban scene understanding in 2d and 3d. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2 +[17] Benjamin R Meagher. Ecologizing social psychology: The physical environment as a necessary constituent of social processes. *Personality and social psychology review*, 24(1): 3-23, 2020. 3 +[18] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41, 1995. 4 +[19] Xiaokang Peng, Yake Wei, Andong Deng, Dong Wang, and Di Hu. Balanced multimodal learning via on-the-fly gradient modulation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022. 6 +[20] Daniel Scharstein, Heiko Hirschmüller, York Kitajima, Greg Krathwohl, Nera Nesic, Xi Wang, and Porter Westling. High-resolution stereo datasets with subpixel-accurate ground truth. In Pattern Recognition: 36th German Conference, GCPR 2014, Münster, Germany, September 2-5, 2014, Proceedings 36, pages 31-42. Springer, 2014. 1 +[21] Dingfeng Shi, Yujie Zhong, Qiong Cao, Lin Ma, Jia Li, and Dacheng Tao. Tridet: Temporal action detection with relative boundary modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18857-18866, 2023. 7, 8 +[22] Shuran Song, Andy Zeng, Angel X Chang, Manolis Savva, Silvio Savarese, and Thomas Funkhouser. Im2pano3d: Extrapolating 360 structure and semantics beyond the field of view. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3847-3856, 2018. 1, 2 +[23] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402, 2012. 1, 2, 4, 5 + +[24] Tuan N Tang, Kwonyoung Kim, and Kwanghoon Sohn. Temporalmaxer: Maximize temporal context with only max pooling for temporal action localization. arXiv preprint arXiv:2303.09055, 2023. 7 +[25] Jiangliu Wang, Jianbo Jiao, and Yunhui Liu. Self-supervised video representation learning by pace prediction. In European Conference on Computer Vision, 2020. 7, 8 +[26] Weiyao Wang, Du Tran, and Matt Feiszli. What makes training multi-modal classification networks hard? In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12695-12705, 2020. 6 +[27] Xiang Wang, Zhiwu Qing, Ziyuan Huang, Yutong Feng, Shiwei Zhang, Jianwen Jiang, Mingqian Tang, Changxin Gao, and Nong Sang. Proposal relation network for temporal action detection. arXiv preprint arXiv:2106.11812, 2021. 7 +[28] Jianxiong Xiao, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Recognizing scene viewpoint using panoramic place representation. In 2012 IEEE Conference on Computer Vision and Pattern Recognition, pages 2695-2702. IEEE, 2012. 1 +[29] Dejing Xu, Jun Xiao, Zhou Zhao, Jian Shao, Di Xie, and Yueting Zhuang. Self-supervised spatiotemporal learning via video clip order prediction. In Computer Vision and Pattern Recognition (CVPR), 2019. 7, 8 +[30] Guorun Yang, Xiao Song, Chaoqin Huang, Zhidong Deng, Jianping Shi, and Bolei Zhou. Drivingstereo: A large-scale dataset for stereo matching in autonomous driving scenarios. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 899–908, 2019. 1 +[31] Serena Yeung, Olga Russakovsky, Ning Jin, Mykhaylo Andriluka, Greg Mori, and Li Fei-Fei. Every moment counts: Dense detailed labeling of actions in complex videos. International Journal of Computer Vision, 126:375–389, 2018. 2 +[32] Chen-Lin Zhang, Jianxin Wu, and Yin Li. Actionformer: Localizing moments of actions with transformers. In European Conference on Computer Vision, pages 492-510. Springer, 2022. 7 +[33] Hang Zhao, Antonio Torralba, Lorenzo Torresani, and Zhicheng Yan. Hacs: Human action clips and segments dataset for recognition and temporal localization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8668-8678, 2019. 5 +[34] Bolei Zhou, Agata Lapedriza, Aditya Khosla, Aude Oliva, and Antonio Torralba. Places: A 10 million image database for scene recognition. IEEE transactions on pattern analysis and machine intelligence, 40(6):1452-1464, 2017. 4 \ No newline at end of file diff --git a/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/images.zip b/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..d27f40b38b618c2794764eb4256b86702a9f978b --- /dev/null +++ b/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bccadc09b08a35becaf7753da7a603cd100a50f307716d0f2e1a7f1c47a52a48 +size 474050 diff --git a/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/layout.json b/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ffae53124134077d1d37aa7318844490cea4b57c --- /dev/null +++ b/2024/360+x_ A Panoptic Multi-modal Scene Understanding Dataset/layout.json @@ -0,0 +1,6844 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 103, + 488, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 103, + 488, + 121 + ], + "spans": [ + { + "bbox": [ + 105, + 103, + 488, + 121 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 105, + 103, + 488, + 121 + ], + "type": "text", + "content": ": A Panoptic Multi-modal Scene Understanding Dataset" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 87, + 143, + 505, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 143, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 87, + 143, + 505, + 158 + ], + "type": "text", + "content": "Hao Chen Yuqi Hou Chenyuan Qu Irene Testini Xiaohan Hong Jianbo Jiao" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 129, + 163, + 463, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 163, + 463, + 177 + ], + "spans": [ + { + "bbox": [ + 129, + 163, + 463, + 177 + ], + "type": "text", + "content": "The Machine Intelligence " + }, + { + "bbox": [ + 129, + 163, + 463, + 177 + ], + "type": "inline_equation", + "content": "+x" + }, + { + "bbox": [ + 129, + 163, + 463, + 177 + ], + "type": "text", + "content": " Group, University of Birmingham, UK" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 190, + 182, + 400, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 182, + 400, + 197 + ], + "spans": [ + { + "bbox": [ + 190, + 182, + 400, + 197 + ], + "type": "text", + "content": "Project page: https://x360dataset.github.io/" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 224, + 192, + 237 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 224, + 192, + 237 + ], + "spans": [ + { + "bbox": [ + 143, + 224, + 192, + 237 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 251, + 289, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 251, + 289, + 502 + ], + "spans": [ + { + "bbox": [ + 46, + 251, + 289, + 502 + ], + "type": "text", + "content": "Human perception of the world is shaped by a multitude of viewpoints and modalities. While many existing datasets focus on scene understanding from a certain perspective (e.g. egocentric or third-person views), our dataset offers a panoptic perspective (i.e. multiple viewpoints with multiple data modalities). Specifically, we encapsulate third-person panoramic and front views, as well as egocentric monocular/binocular views with rich modalities including video, multi-channel audio, directional binaural delay, location data and textual scene descriptions within each scene captured, presenting comprehensive observation of the world. To the best of our knowledge, this is the first database that covers multiple viewpoints with multiple data modalities to mimic how daily information is accessed in the real world. Through our benchmark analysis, we presented 5 different scene understanding tasks on the proposed " + }, + { + "bbox": [ + 46, + 251, + 289, + 502 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 46, + 251, + 289, + 502 + ], + "type": "text", + "content": " dataset to evaluate the impact and benefit of each data modality and perspective in panoptic scene understanding. We hope this unique dataset could broaden the scope of comprehensive scene understanding and encourage the community to approach these problems from more diverse perspectives." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 524, + 128, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 524, + 128, + 537 + ], + "spans": [ + { + "bbox": [ + 47, + 524, + 128, + 537 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 544, + 287, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 688 + ], + "type": "text", + "content": "Scene understanding is crucial for robotics and artificial intelligent systems to perceive the environment around them. As humans, we intuitively understand the world through primarily visual inputs, as well as auditory and other sensory inputs (e.g. touch and smell). The community has made remarkable progress in mimicking human perception with contributions from various datasets and benchmarks [4, 5, 7, 9, 13, 15, 23]. These efforts have approached scene understanding from a diverse range of perspectives, such as normal frontal-view vision [5, 13, 23], panoramic view [22, 28], binocular/stereo view [20, 30], egocentric monocular view [4, 9], and audio [2, 7]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "While there has been exciting progress in understanding scenes from a limited number of perspectives, it is notable" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 225, + 547, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 225, + 547, + 357 + ], + "spans": [ + { + "bbox": [ + 304, + 225, + 547, + 357 + ], + "type": "text", + "content": "that humans understand the world by incorporating a combination of viewpoints, in a holistic manner. This includes an egocentric view for activities we are involved in and a third-person view for activities we are observing. In addition to visual cues, we also rely on a range of modalities, including hearing and binaural delay, to fully comprehend our surroundings and track movements. Our prior knowledge of the scene, such as localisation information and scene descriptions, has also supported our understanding of the environment (e.g. the cafe in the city centre may be different from a similar cafe on a university campus)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 360, + 547, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 360, + 547, + 540 + ], + "spans": [ + { + "bbox": [ + 304, + 360, + 547, + 540 + ], + "type": "text", + "content": "Taking the above observations into consideration, a new dataset covering all these aforementioned aspects is presented in this work, to provide a panoptic scene understanding, termed " + }, + { + "bbox": [ + 304, + 360, + 547, + 540 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 304, + 360, + 547, + 540 + ], + "type": "text", + "content": " dataset. This new dataset offers a diverse selection of perspectives, including a " + }, + { + "bbox": [ + 304, + 360, + 547, + 540 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 360, + 547, + 540 + ], + "type": "text", + "content": " panoramic view providing a complete panoptic view of the environment, and a third-person front view that highlights the region of interest that has the most movements in front of the camera. Additionally, we have included egocentric monocular and binocular videos to capture the first-person perspective of individuals in the environment. These viewpoints are complemented by aligned multi-channel audio with directional binaural delay information, as well as location information and scene descriptions as metadata. An illustration of the presented dataset collection system is shown in Figure 1." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 542, + 547, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 542, + 547, + 650 + ], + "spans": [ + { + "bbox": [ + 304, + 542, + 547, + 650 + ], + "type": "text", + "content": "Based on this newly collected dataset, we perform 5 visual-audio scene understanding tasks to analyse the contribution and effectiveness of each data viewpoint and modality. Particularly, we look at video classification, temporal action localisation, self-supervised representation learning, cross-modality retrieval and pre-training model migration for dataset adaptation, with interesting findings and insights from extensive experimental analysis. The main contributions of this work are summarised as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 654, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 306, + 654, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 654, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 306, + 654, + 545, + 689 + ], + "type": "text", + "content": "- We propose to our knowledge the first and probably the most authentic panoptic scene understanding dataset covering multiple viewpoints and data modalities in the wild." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 689, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 712 + ], + "type": "text", + "content": "- We perform extensive experimental analysis to validate the effectiveness of the proposed dataset on different tasks" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19373" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 70, + 547, + 349 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 547, + 349 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 547, + 349 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 547, + 349 + ], + "type": "image", + "image_path": "cff7a8284de8415f6a93cc4e75a54533058f0cdc1222c6e3ea0e80912a4ca199.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "lines": [ + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "spans": [ + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "text", + "content": "Figure 1. Illustration of the proposed " + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "text", + "content": " dataset. The " + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "text", + "content": " camera records fish-eye raw videos with front and back lenses. These videos are merged to create a spherical " + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "text", + "content": " panorama (middle-up figure, zoom in for details), which is then transformed to (a) " + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "text", + "content": " panoramic data using equirectangular projection. The (b) third-person front view is obtained by de-warping the rich movements region highlighted red in the spherical field of " + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "text", + "content": " panorama (the middle-left figure). By wearing stereo cameras, the capturers record (c) egocentric clips while staying visible to the fixed " + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "text", + "content": " camera (central ellipse). (e) Directional audio time delay data is generated from left and right audio inputs (d) from the " + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "text", + "content": " camera by interaural time delay process [3]. This helps locate sound sources in the " + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 366, + 547, + 434 + ], + "type": "text", + "content": " panorama." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 454, + 226, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 454, + 226, + 465 + ], + "spans": [ + { + "bbox": [ + 55, + 454, + 226, + 465 + ], + "type": "text", + "content": "from various perspectives and modalities." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 466, + 288, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 466, + 288, + 525 + ], + "spans": [ + { + "bbox": [ + 47, + 466, + 288, + 525 + ], + "type": "text", + "content": "- Interesting findings are derived from the analysis, suggesting the effectiveness of each viewpoint and data modality. Learning from this new dataset without supervision even shows a better performance than that from a model trained in a supervised manner." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 537, + 139, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 537, + 139, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 537, + 139, + 550 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 557, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 557, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 557, + 289, + 715 + ], + "type": "text", + "content": "Video understanding and analysis. Video analysis has been extensively studied in the literature. Existing datasets such as UCF101 [23], ActivityNet [5] and Kinetics [13] have provided large-scale video data for activity understanding tasks. However, these datasets often exhibit lower complexity compared to real-world scenes. Some datasets, like MultiThumos [31], aim to increase complexity but are limited to specific scenarios with domain-specific actions, deviating from real-life daily activities. In contrast, our dataset builds upon the activity labels from ActivityNet [5] and strives to capture data that closely simulates real-life scenarios. Apart from that, we also include multiple data viewpoints and modalities as compared to existing datasets." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 453, + 547, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 453, + 547, + 634 + ], + "spans": [ + { + "bbox": [ + 304, + 453, + 547, + 634 + ], + "type": "text", + "content": "Panoramic scene understanding. In recent years, panoramic scene understanding has gained significant attention due to its holistic reflection of the environment. Several datasets have been introduced to facilitate research in this area. For instance, the KITTI-360 [16] provides a collection of panoramic images for urban scene analysis. EGOK360 [1] has been introduced to address the need for video data with a panoramic view. Im2Pano3D [22] presents a panoramic dataset for indoor scenarios with semantic segmentation and focuses on the prediction from a partial observation. However, these datasets primarily focus on panoramic visual data while lacking the incorporation of other viewpoints (e.g. egocentric) and data modalities (e.g. audio), limiting their potential for comprehensive scene understanding and analysis." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 641, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 547, + 715 + ], + "type": "text", + "content": "Egocentric video analysis. Focusing on understanding scenes from a first-person perspective, existing datasets such as EPIC-Kitchens [4] and Ego4D [9] provide egocentric video data collected during daily activities. They have contributed to research on activity recognition and object detection in egocentric scenes. Unlike these datasets fo" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "19374" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": "cusing on egocentric views, our dataset also covers other viewpoints and modalities aiming at supporting scene understanding research in a more panoptic manner." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 114, + 287, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 114, + 287, + 222 + ], + "spans": [ + { + "bbox": [ + 47, + 114, + 287, + 222 + ], + "type": "text", + "content": "Visual-audio analysis. Integrating visual and audio information often enhances the performance of models in scene understanding tasks, as it provides richer contextual information. There are some existing datasets available to support research in audio-visual analysis, e.g. AVA [10], AudioSet [6] and VGGSound [2], to name a few. However, these datasets are lacking in multiple viewpoints and the directional property of audio signals, which are provided in the proposed new dataset." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 233, + 135, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 233, + 135, + 245 + ], + "spans": [ + { + "bbox": [ + 47, + 233, + 135, + 245 + ], + "type": "text", + "content": "3. " + }, + { + "bbox": [ + 47, + 233, + 135, + 245 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 47, + 233, + 135, + 245 + ], + "type": "text", + "content": " Dataset" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 252, + 221, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 252, + 221, + 266 + ], + "spans": [ + { + "bbox": [ + 47, + 252, + 221, + 266 + ], + "type": "text", + "content": "3.1. Data Acquisition and Alignment" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 270, + 287, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 270, + 287, + 389 + ], + "spans": [ + { + "bbox": [ + 47, + 270, + 287, + 389 + ], + "type": "text", + "content": "Two main devices were used for our data collection: the Insta 360 One X2 and Snapchat Spectacles 3 cameras. The 360 One X2 has two fish-eye cameras that collect " + }, + { + "bbox": [ + 47, + 270, + 287, + 389 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 270, + 287, + 389 + ], + "type": "text", + "content": " panoramic visual information in the scene with " + }, + { + "bbox": [ + 47, + 270, + 287, + 389 + ], + "type": "inline_equation", + "content": "5760 \\times 2880" + }, + { + "bbox": [ + 47, + 270, + 287, + 389 + ], + "type": "text", + "content": " resolution and a frame rate of 25 FPS. Additionally, directional audio was recorded using four microphones in directional audio mode. While the Spectacles 3 has a stereo camera attached to a pair of glasses used to capture the egocentric binocular vision within the scene at a resolution of " + }, + { + "bbox": [ + 47, + 270, + 287, + 389 + ], + "type": "inline_equation", + "content": "2432 \\times 1216" + }, + { + "bbox": [ + 47, + 270, + 287, + 389 + ], + "type": "text", + "content": " and a frame rate of 60 FPS." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 390, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 390, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 390, + 287, + 581 + ], + "type": "text", + "content": "Once we obtained the raw data, we aligned the different viewpoints and modalities through a specific process. The initial raw footage captured by the two fish-eye cameras on the " + }, + { + "bbox": [ + 46, + 390, + 287, + 581 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 390, + 287, + 581 + ], + "type": "text", + "content": " camera was in the form of two circular videos, which were then stitched and de-warped into a spherical panorama. This panorama can be projected into an equirectangular format to produce a panoramic video. However, this direct compression of the spherical view into a rectangular format can introduce unnatural distortions. In order to provide a more natural and informative view, we inversely project a rectangular region into equirectangular space and use it to crop the spherical panorama. We use optical flow to determine the crop region with the most motion activity in the spherical panorama field. This crop region is then projected back to rectangular, resulting in an informative video view with minimal distortions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 582, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 582, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 47, + 582, + 287, + 665 + ], + "type": "text", + "content": "Egocentric binocular videos, as shown in Figure 1(c), were captured ranging from approximately 30 seconds to 1 minute in duration for each clip. A total of 1 to 5 stereo clips were recorded, scattered throughout the duration of the average 6 mins " + }, + { + "bbox": [ + 47, + 582, + 287, + 665 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 582, + 287, + 665 + ], + "type": "text", + "content": " video. In addition to stereo videos, we also provide the corresponding monocular videos for the egocentric view." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 665, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 665, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 665, + 287, + 713 + ], + "type": "text", + "content": "The audio recordings were temporally aligned with their corresponding videos with left/right channel modality. The four-channel audios with the " + }, + { + "bbox": [ + 47, + 665, + 287, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 665, + 287, + 713 + ], + "type": "text", + "content": " panoramic video are provided as well for further exploration. Moreover, we also" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 545, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 120 + ], + "type": "text", + "content": "provide the directional information of the audio which was presented using the estimated interaural time delay of the sound obtained from the method introduced in [3]. The GPS information and weather information were also provided." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 120, + 545, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 120, + 545, + 191 + ], + "spans": [ + { + "bbox": [ + 304, + 120, + 545, + 191 + ], + "type": "text", + "content": "Given the possibility of occlusions in regions visible to the egocentric camera but not to the " + }, + { + "bbox": [ + 304, + 120, + 545, + 191 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 120, + 545, + 191 + ], + "type": "text", + "content": " camera, we ensured during data collection that the cameras were positioned in close proximity. This setup, with clear mutual visibility, allowed both cameras to capture a similar overall scene." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 200, + 400, + 212 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 200, + 400, + 212 + ], + "spans": [ + { + "bbox": [ + 306, + 200, + 400, + 212 + ], + "type": "text", + "content": "3.2. Scene Selection" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 219, + 545, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 219, + 545, + 255 + ], + "spans": [ + { + "bbox": [ + 304, + 219, + 545, + 255 + ], + "type": "text", + "content": "To broaden scene coverage and promote multi-modal collaborative learning, we integrated a strategic selection process for captured scenes, governed by three key criteria:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 255, + 545, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 255, + 545, + 386 + ], + "spans": [ + { + "bbox": [ + 304, + 255, + 545, + 386 + ], + "type": "text", + "content": "i) Scene categories must be carefully crafted to be comprehensive, yet concise, while also being authoritative and reflective of everyday life. The location where a scene unfolds plays a crucial role in providing essential environmental context to the activities within it [17]. Distinct scenes can impart unique meanings or emotional nuances to identical events. For instance, the act of chatting could convey divergent implications in a school setting as compared to a home environment. Such nuances are critical as they offer deeper insights into the contextual interpretation of behaviours and interactions in varied settings." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 386, + 545, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 386, + 545, + 457 + ], + "spans": [ + { + "bbox": [ + 304, + 386, + 545, + 457 + ], + "type": "text", + "content": "ii) The data should ideally span a wide array of weather and lighting conditions. This criterion aims to ensure the inclusion of both indoor and outdoor activities under various environmental scenarios. Such diversity is important in accurately representing the multifaceted nature of daily life and the various conditions in which these activities occur." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 458, + 545, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 458, + 545, + 553 + ], + "spans": [ + { + "bbox": [ + 304, + 458, + 545, + 553 + ], + "type": "text", + "content": "iii) Our third criterion is the inclusion of scenarios rich in distinctive sound sources, particularly those where multiple activities co-occur. It is essential for the dataset to not only visually represent these activities but also to capture the corresponding auditory elements. The goal is to present the complexity and realism of real-world environments as much as possible, marked by simultaneous and various actions and behaviours." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 554, + 545, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 554, + 545, + 650 + ], + "spans": [ + { + "bbox": [ + 304, + 554, + 545, + 650 + ], + "type": "text", + "content": "It is worth noting that our dataset was collected across several countries, including the United Kingdom (e.g. London, Birmingham, Cardiff and Jersey), France (Paris), Spain (e.g. Oviedo and Picos de Europa), China (e.g. Guangzhou and Shenzhen), and Japan (e.g. Kyoto and Osaka). During the data collection, the " + }, + { + "bbox": [ + 304, + 554, + 545, + 650 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 554, + 545, + 650 + ], + "type": "text", + "content": " Camera was placed statically to record the scene, while a capturer wearing the Spectacles glasses recorded first-person interactions with the scene." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Sensitive data handling. Our dataset was collected in a real-world setting and may contain sensitive personal information (e.g. human faces). To ensure ethical and responsible research, the video capture was conducted with proper" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19375" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 25, + 68, + 217, + 220 + ], + "blocks": [ + { + "bbox": [ + 25, + 68, + 217, + 220 + ], + "lines": [ + { + "bbox": [ + 25, + 68, + 217, + 220 + ], + "spans": [ + { + "bbox": [ + 25, + 68, + 217, + 220 + ], + "type": "image", + "image_path": "ff9d14f6190e582495ba4849c002e12299ba6bd533ce6c87f4b0afa652b81c1b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 221, + 212, + 232 + ], + "lines": [ + { + "bbox": [ + 50, + 221, + 212, + 232 + ], + "spans": [ + { + "bbox": [ + 50, + 221, + 212, + 232 + ], + "type": "text", + "content": "(a). Distribution of the scene categories (number)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 51, + 234, + 211, + 332 + ], + "blocks": [ + { + "bbox": [ + 51, + 234, + 211, + 332 + ], + "lines": [ + { + "bbox": [ + 51, + 234, + 211, + 332 + ], + "spans": [ + { + "bbox": [ + 51, + 234, + 211, + 332 + ], + "type": "image", + "image_path": "df7877990d8be77e8a12fc7f8df19206e525faa0882e849874cba44e7a1b34d3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 339, + 214, + 358 + ], + "lines": [ + { + "bbox": [ + 47, + 339, + 214, + 358 + ], + "spans": [ + { + "bbox": [ + 47, + 339, + 214, + 358 + ], + "type": "text", + "content": "(d). Distribution comparison of the number of action instances per video." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 223, + 67, + 392, + 217 + ], + "blocks": [ + { + "bbox": [ + 223, + 67, + 392, + 217 + ], + "lines": [ + { + "bbox": [ + 223, + 67, + 392, + 217 + ], + "spans": [ + { + "bbox": [ + 223, + 67, + 392, + 217 + ], + "type": "image", + "image_path": "650c19bd920fad706ffcab352e0f1d1fc3f168472a5e5867c094a0845e440f36.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 230, + 220, + 362, + 231 + ], + "lines": [ + { + "bbox": [ + 230, + 220, + 362, + 231 + ], + "spans": [ + { + "bbox": [ + 230, + 220, + 362, + 231 + ], + "type": "text", + "content": "(b). Geographical distribution of actions." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 225, + 233, + 335, + 341 + ], + "blocks": [ + { + "bbox": [ + 225, + 233, + 335, + 341 + ], + "lines": [ + { + "bbox": [ + 225, + 233, + 335, + 341 + ], + "spans": [ + { + "bbox": [ + 225, + 233, + 335, + 341 + ], + "type": "image", + "image_path": "d8d992fb3e4b2a6a9391bb2d6502753c64cc6c931b0b97c2d2d032ea162a90ee.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 235, + 348, + 328, + 359 + ], + "lines": [ + { + "bbox": [ + 235, + 348, + 328, + 359 + ], + "spans": [ + { + "bbox": [ + 235, + 348, + 328, + 359 + ], + "type": "text", + "content": "(e). Capture time of the day." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 400, + 77, + 557, + 211 + ], + "blocks": [ + { + "bbox": [ + 400, + 77, + 557, + 211 + ], + "lines": [ + { + "bbox": [ + 400, + 77, + 557, + 211 + ], + "spans": [ + { + "bbox": [ + 400, + 77, + 557, + 211 + ], + "type": "image", + "image_path": "8b68580db91837f64da6a68edf3c21b70b7c70d54ca34f90f9f346aca235a498.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 350, + 233, + 545, + 333 + ], + "blocks": [ + { + "bbox": [ + 392, + 220, + 533, + 231 + ], + "lines": [ + { + "bbox": [ + 392, + 220, + 533, + 231 + ], + "spans": [ + { + "bbox": [ + 392, + 220, + 533, + 231 + ], + "type": "text", + "content": "(c). Overall distribution of actions duration." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 350, + 233, + 545, + 333 + ], + "lines": [ + { + "bbox": [ + 350, + 233, + 545, + 333 + ], + "spans": [ + { + "bbox": [ + 350, + 233, + 545, + 333 + ], + "type": "image", + "image_path": "0a1eba5ead79a9e63953d95dda3e60b66d877b72f3bab94187dea5c4d8614333.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 403, + 348, + 492, + 359 + ], + "lines": [ + { + "bbox": [ + 403, + 348, + 492, + 359 + ], + "spans": [ + { + "bbox": [ + 403, + 348, + 492, + 359 + ], + "type": "text", + "content": "(f). Binaural delay per clip." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 365, + 546, + 388 + ], + "lines": [ + { + "bbox": [ + 46, + 365, + 546, + 388 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 546, + 388 + ], + "type": "text", + "content": "Figure 2. Dataset statistics analysis, on the distributions of (a) the scene category, (b) action distribution per cities, (c) temporal action instance duration, and (d) number of actions per video, (e) capturing time, (f) binaural delay per clip." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 408, + 287, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 408, + 287, + 492 + ], + "spans": [ + { + "bbox": [ + 46, + 408, + 287, + 492 + ], + "type": "text", + "content": "consent. Additionally, we have taken measures to protect privacy by anonymising the data. This includes applying a face detection mechanism to outline predicted face locations in each frame and applying blurring filters to maintain meaningful details while ensuring information security. More detailed information on our privacy protection measures can be found in the supplementary material." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 502, + 152, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 502, + 152, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 502, + 152, + 514 + ], + "type": "text", + "content": "3.3. Data Annotation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": "Scene label rationale. The " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": " dataset comprises a total of 28 scene categories (15 indoor scenes and 13 outdoor scenes), as illustrated in Figure 2(a). To establish comprehensive and authoritative scene categories that reflect daily life, we referred to the Places Database [34], which is derived from WordNet [18], as our primary basis. We then leverage the sophisticated semantic analysis capabilities of large language models, to conduct a thorough filtering and classification of a multitude of everyday scenes. This curation resulted in a refined set of 28 scene categories, each symbolising aspects of daily life. Simultaneously, the recordings concentrate on capturing common occurrences within conventional settings, providing a realistic depiction of everyday life. Detailed descriptions defining each category, along with discussions regarding these constraints and potential sampling biases, are presented in the supplimen" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 409, + 362, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 409, + 362, + 421 + ], + "spans": [ + { + "bbox": [ + 306, + 409, + 362, + 421 + ], + "type": "text", + "content": "tary material." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 431, + 545, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 431, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 304, + 431, + 545, + 635 + ], + "type": "text", + "content": "Temporal segmentation label. We also provide temporal segment labelling for the understanding of activities in the shooting scenes. We follow the activity hierarchy standard defined by ActivityNet [5], which provides a comprehensive categorisation of human activities, consisting of seven top-level categories (Personal Care, Eating and Drinking, Household, Caring and Helping, Working, Socialising and Leisure, and Sports and Exercises). To capture the diversity and granularity of activities within each category, we defined a total of 38 action instances, covering specific actions and behaviours. To ensure high-quality annotations, the temporal segmentation labelling was annotated by three experienced annotators. Each annotator independently annotated the temporal segments corresponding to the activities in the videos. To obtain a consensus, we merged the individual annotations and resolved any discrepancies according to discussion and consensus among the annotators." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 646, + 471, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 646, + 471, + 658 + ], + "spans": [ + { + "bbox": [ + 306, + 646, + 471, + 658 + ], + "type": "text", + "content": "3.4. Dataset Statistics and Analysis" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 665, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 546, + 713 + ], + "type": "text", + "content": "Overview. Existing publicly available datasets primarily focus on visual unimodality [4, 5, 13, 15, 23]. In contrast, our dataset introduces a novel approach by collecting different views or modalities, as presented in Table 1, including" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19376" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 49, + 91, + 544, + 192 + ], + "blocks": [ + { + "bbox": [ + 122, + 71, + 470, + 82 + ], + "lines": [ + { + "bbox": [ + 122, + 71, + 470, + 82 + ], + "spans": [ + { + "bbox": [ + 122, + 71, + 470, + 82 + ], + "type": "text", + "content": "Table 1. Dataset comparison. Ego: Egocentric, V: Video, A: Audio, A+V: Audio-visual events." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 91, + 544, + 192 + ], + "lines": [ + { + "bbox": [ + 49, + 91, + 544, + 192 + ], + "spans": [ + { + "bbox": [ + 49, + 91, + 544, + 192 + ], + "type": "table", + "html": "
DatasetVideo ViewpointsOther ModalitiesStatisticsAttributions
Third-person Front View360° PanoramicEgo MonocularEgo BinocularNormal AudioDirectional Binaural DelayGPS InfoAvg DurationTotal Duration(s)Frames Count(K)Annotations SourceMultiple Events
UCF101 [23]XXXXX7.21 s96,0002,400VX
Kinetics [13]XXXXXX10 s2,998,80074,970VX
HMDB51 [14]XXXXXX3 s21,426643VX
ActivityNet [5]XXXXXX2 min2,332,80011,664V
EPIC-Kitchens [4]XXXXX7.6 min198,00011,500VX
Ego4D [9]XXXX8 min13,212,000-A+V
360+x (Ours)6.2 min244,0008,579A+V
", + "image_path": "ab4b351576efb8a898937522aa6c25be7769c22977ada0c4e8bf27926d1f46e5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 213, + 287, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 213, + 287, + 320 + ], + "spans": [ + { + "bbox": [ + 46, + 213, + 287, + 320 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 213, + 287, + 320 + ], + "type": "text", + "content": " panoramic video, third-person front view video, egocentric monocular video, egocentric binocular video, normal audio, directional binaural delay, location and textual scene description. This diverse range of modalities provides multiple dimensions and clues for understanding and analysing complex scenes. Our dataset consists of 2,152 videos representing 232 data examples, with 464 videos captured using the 360 camera and the remaining 1,688 recorded with the Spectacles camera." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 320, + 287, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 320, + 287, + 440 + ], + "spans": [ + { + "bbox": [ + 46, + 320, + 287, + 440 + ], + "type": "text", + "content": "Figure 2(a) presents the distribution of video counts across each of the 28 scene categories. Our dataset is characterised by a balanced distribution of data across these scenes. Notably, it diverges from conventional databases like UCF101 [23], Kinetics [13], HMDB [15], and ActivityNet [5], particularly in terms of average video duration, which is approximately 6.2 minutes. This longer duration is crucial for maintaining the integrity and coherence of actions within each scene, allowing for a comprehensive temporal analysis of the activities." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 451, + 287, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 451, + 287, + 571 + ], + "spans": [ + { + "bbox": [ + 46, + 451, + 287, + 571 + ], + "type": "text", + "content": "Temporal segment label. The annotations of temporal segment labels in our dataset contribute to the fine-grained analysis of activities. We defined 38 action instances representing specific actions and behaviours. The length of each segment labelled with a specific activity varies across the dataset, as depicted in Figure 2(c). Note we acknowledge the significance of audio in accurately identifying certain actions, such as 'coughing' or 'clapping'. Therefore, our dataset combines audio information to enhance accuracy in action recognition [4, 5, 13, 15, 23], as shown in Table 1." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 582, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 715 + ], + "type": "text", + "content": "Comparative complexity. Due to its realistic scene simulation, our dataset offers more complexity compared to previous datasets. This complexity arises from the diverse range of activities and interactions captured, resulting in a more challenging and realistic setting for scene understanding and activity recognition. As shown in Figure 2(d), most existing datasets, such as UCF101 [23], Kinetics [13], and HMDB51 [14], typically consist of one action instance per video. While datasets like Ego4D [9] and ActivityNet [5] have large volumes and broad coverage, they often contain a limited number of action instances per individual video." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 212, + 545, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 212, + 545, + 321 + ], + "spans": [ + { + "bbox": [ + 304, + 212, + 545, + 321 + ], + "type": "text", + "content": "The HACS dataset [33] contains more multiple action instances per video but still pales in comparison to the richness of the proposed dataset. Our dataset surpasses these existing datasets in terms of the number of action instances per video, showcasing the extensive variety of activities captured. The improved complexity and richness of our dataset enable follow-up research to explore and develop more robust algorithms, pushing the boundaries of scene understanding in real-world contexts." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 331, + 546, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 331, + 546, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 331, + 546, + 715 + ], + "type": "text", + "content": "Data distribution. We have ensured a balanced distribution across various dimensions, including scene categories, action instances, binaural delay, etc. Figure 2(a) depicts the scene number distribution across 28 scene categories, demonstrating a comprehensive coverage of scene categories. Notably, the dataset achieves an almost equal proportion of indoor and outdoor scenes, accounting for " + }, + { + "bbox": [ + 304, + 331, + 546, + 715 + ], + "type": "inline_equation", + "content": "54.7\\%" + }, + { + "bbox": [ + 304, + 331, + 546, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 331, + 546, + 715 + ], + "type": "inline_equation", + "content": "45.3\\%" + }, + { + "bbox": [ + 304, + 331, + 546, + 715 + ], + "type": "text", + "content": " respectively. Our dataset allows each scene to conclude multiple diverse action instances naturally, and also enables different scenes to share common action instances. Notably, in Figure 2(b), it displays the 'types of action per location' that can be observed in the geographic distribution and the diversity of the data, where the inner circle shows the location and the outer circle shows the action types captured in each location. As illustrated in Figure 2(c), the distribution of action duration shows our dataset has captured extensive and realistic human behaviours across natural scenes. One interesting observation from our dataset is the high-frequency occurrence of action 'operating phone', which contributes " + }, + { + "bbox": [ + 304, + 331, + 546, + 715 + ], + "type": "inline_equation", + "content": "17.54\\%" + }, + { + "bbox": [ + 304, + 331, + 546, + 715 + ], + "type": "text", + "content": " of the whole duration, providing a reflection of mobile usage in modern daily life. Additionally, the dataset offers valuable directional audio to supplement visual understanding. The distribution of data capture times in the dataset corresponds with natural human activities, as shown in Figure 2(e). Human activities throughout the day are mainly concentrated during the daytime (more in the afternoon and evening). Figure 2(f) illustrates the diversity of binaural delay for each clip. The positive point means the audio is directed towards the left direction while the negative the right. In summary, the presented " + }, + { + "bbox": [ + 304, + 331, + 546, + 715 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 304, + 331, + 546, + 715 + ], + "type": "text", + "content": " dataset covers broad modalities and diversity with an authentic distribution from different per" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19377" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 193, + 85 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 193, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 193, + 85 + ], + "type": "text", + "content": "spectives, mimicking real daily life." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 99, + 212, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 99, + 212, + 113 + ], + "spans": [ + { + "bbox": [ + 47, + 99, + 212, + 113 + ], + "type": "text", + "content": "4. Benchmark and Experiments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 119, + 286, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 119, + 286, + 191 + ], + "spans": [ + { + "bbox": [ + 47, + 119, + 286, + 191 + ], + "type": "text", + "content": "To establish a comprehensive benchmark for the presented " + }, + { + "bbox": [ + 47, + 119, + 286, + 191 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 47, + 119, + 286, + 191 + ], + "type": "text", + "content": " dataset, we choose five visual understanding tasks to delve into the exploration of multiple viewpoints and modalities usage, including: video scene classification, temporal action localisation, cross-modality retrieval, self-supervised representation learning, and dataset adaptation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 192, + 286, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 192, + 286, + 228 + ], + "spans": [ + { + "bbox": [ + 47, + 192, + 286, + 228 + ], + "type": "text", + "content": "Remark: Unless specifically stated otherwise, the experiments on " + }, + { + "bbox": [ + 47, + 192, + 286, + 228 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 47, + 192, + 286, + 228 + ], + "type": "text", + "content": " will utilise three views: the " + }, + { + "bbox": [ + 47, + 192, + 286, + 228 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 192, + 286, + 228 + ], + "type": "text", + "content": " view, egocentric binocular view, and the third-person front view." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 239, + 170, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 170, + 251 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 170, + 251 + ], + "type": "text", + "content": "4.1. Experimental Setting" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 258, + 286, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 258, + 286, + 378 + ], + "spans": [ + { + "bbox": [ + 47, + 258, + 286, + 378 + ], + "type": "text", + "content": "Models. We employed a consistent set of model backbones across different tasks to minimise model interference, except for temporal action localisation task (detailed in section 4.3). We followed the commonly used setup and selected the backbone I3D [13] as our video model. To handle audio-related aspects, we chose the VGGish [12] as our audio model. Additionally, for directional binaural feature extraction, we utilised the ResNet-18 model [11]. A linear layer is positioned after the backbones to carry out each specific task based on backbone output features." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 379, + 286, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 379, + 286, + 559 + ], + "spans": [ + { + "bbox": [ + 47, + 379, + 286, + 559 + ], + "type": "text", + "content": "It is important to note that a simple concatenation of all modalities features can diminish the potential information derived from multi-modality [26]. Therefore, instead of solely concatenating modality features, we leverage a hierarchical attention mechanism for multi-modality integration. In this approach, the directional binaural feature serves as an attention query to direct focused attention towards the audio feature, enabling it to encapsulate the directional information into the audio feature. At the same time, the audio feature is also leveraged by acting as a query itself, enabling it to attentively interact with the video feature. This mechanism allows for creating a synergistic representation of the underlying data that integrates the features of all modalities. For more details and in-depth analysis, please refer to the supplementary material." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 570, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 570, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 570, + 286, + 713 + ], + "type": "text", + "content": "Training and verification setup. For each temporal action localisation model, we follow their original training settings. For I3D, VGGish, and ResNet-18 networks, the training settings are 200 epochs with the parameters described in [19]. The training process utilises the AdamW optimiser with a learning rate of " + }, + { + "bbox": [ + 47, + 570, + 286, + 713 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 47, + 570, + 286, + 713 + ], + "type": "text", + "content": " and a decay rate of 0.1 at the 80th and 120th epochs. We also apply data augmentation techniques such as rotation, scaling, and colour jittering. The dataset was divided into training, validation, and test sets, following an 80/10/10 split. To ensure a balanced representation of scene categories, the examples were stratified probabilistically across the sets." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 309, + 124, + 544, + 210 + ], + "blocks": [ + { + "bbox": [ + 306, + 71, + 545, + 115 + ], + "lines": [ + { + "bbox": [ + 306, + 71, + 545, + 115 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 545, + 115 + ], + "type": "text", + "content": "Table 2. Video classification performance across different views (Ego: egocentric binocular view, Front: third-person front view, and " + }, + { + "bbox": [ + 306, + 71, + 545, + 115 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 306, + 71, + 545, + 115 + ], + "type": "text", + "content": " .. " + }, + { + "bbox": [ + 306, + 71, + 545, + 115 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 306, + 71, + 545, + 115 + ], + "type": "text", + "content": " view) and data modalities (V: Video, A: Audio, D: Directional binaural delay). Reported in Avg. Prec. " + }, + { + "bbox": [ + 306, + 71, + 545, + 115 + ], + "type": "inline_equation", + "content": "(\\%)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 124, + 544, + 210 + ], + "lines": [ + { + "bbox": [ + 309, + 124, + 544, + 210 + ], + "spans": [ + { + "bbox": [ + 309, + 124, + 544, + 210 + ], + "type": "table", + "html": "
Selected ViewsModalities
VV + AV + A + D
Egocentric Only51.95(±0.0)55.24(±0.0)58.92
Front Only54.05(+2.1)65.33(+10.1)67.19
360° Only56.33(+4.4)67.14(+11.9)70.95
360° + Egocentric58.99(+7.0)70.48(+15.2)72.11
360° + Front59.70(+7.8)75.06(+19.8)77.69
360° + Front + Ego63.73(+11.8)77.32(+22.1)80.62
", + "image_path": "410f6847b750728d14e5ecd7b880836dd41521e883b67d793395003cd75ca657.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 230, + 451, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 230, + 451, + 243 + ], + "spans": [ + { + "bbox": [ + 306, + 230, + 451, + 243 + ], + "type": "text", + "content": "4.2. Video Scene Classification" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 249, + 545, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 249, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 306, + 249, + 545, + 285 + ], + "type": "text", + "content": "Video scene classification assigns scene labels to videos based on their frames, enabling analysis of visual content and determining the subject matter." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 293, + 545, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 293, + 545, + 473 + ], + "spans": [ + { + "bbox": [ + 304, + 293, + 545, + 473 + ], + "type": "text", + "content": "Single view vs. multi-view. First, we are interested in the influence of different combinations of video views on the classification performance. The results, representing each combination, are summarised in Table 2. The results for single views are presented in the first three rows, indicating that using a single " + }, + { + "bbox": [ + 304, + 293, + 545, + 473 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 293, + 545, + 473 + ], + "type": "text", + "content": " panoramic view outperforms using either an egocentric binocular view or a third-person front view only. When employing multiple views, it is noted that better performance can be achieved compared to using a single view. Specifically, utilising all three views leads to the best performance. Such a performance can be attributed to the fact that although these three views describe the same scene, each different view offers a unique perspective that contributes to a more comprehensive understanding of the scene, resulting in improved performance." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 482, + 545, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 482, + 545, + 577 + ], + "spans": [ + { + "bbox": [ + 304, + 482, + 545, + 577 + ], + "type": "text", + "content": "Single-modality vs. multi-modality and more. We further investigate the impact of modalities on the model's performance. Various combinations of modalities are analysed, and the results are summarised in Table 2 on a column-wise basis. In particular, the first column represents the visual modality alone, the second column combines video with audio, and the last column incorporates visual, audio, and directional binaural information modalities." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 578, + 545, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 578, + 545, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 578, + 545, + 662 + ], + "type": "text", + "content": "The inclusion of additional modalities leads to average precision improvements. For example, when all three views are utilised, incorporating more modalities results in improvements of " + }, + { + "bbox": [ + 304, + 578, + 545, + 662 + ], + "type": "inline_equation", + "content": "13.59\\%" + }, + { + "bbox": [ + 304, + 578, + 545, + 662 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 578, + 545, + 662 + ], + "type": "inline_equation", + "content": "16.89\\%" + }, + { + "bbox": [ + 304, + 578, + 545, + 662 + ], + "type": "text", + "content": ", respectively. This underscores the benefits of leveraging multiple modalities for a more comprehensive understanding of the scene and enhancing overall performance." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 670, + 468, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 468, + 684 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 468, + 684 + ], + "type": "text", + "content": "4.3. Temporal Action Localisation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "Temporal Action Localisation (TAL) is a video understanding task that involves the dense identification and temporal" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19378" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 102, + 545, + 174 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 547, + 95 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 547, + 95 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 547, + 95 + ], + "type": "text", + "content": "Table 3. Temporal action localisation results. Baseline extractors are used in [2, 21, 24, 32]. The mAP@σ represents the mean average precision (%) at a threshold of σ. The best performance is achieved by employing " + }, + { + "bbox": [ + 47, + 70, + 547, + 95 + ], + "type": "inline_equation", + "content": "V + A + D" + }, + { + "bbox": [ + 47, + 70, + 547, + 95 + ], + "type": "text", + "content": " modalities with extractors pre-trained on " + }, + { + "bbox": [ + 47, + 70, + 547, + 95 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 47, + 70, + 547, + 95 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 102, + 545, + 174 + ], + "lines": [ + { + "bbox": [ + 48, + 102, + 545, + 174 + ], + "spans": [ + { + "bbox": [ + 48, + 102, + 545, + 174 + ], + "type": "table", + "html": "
ExtractorsModalitiesmAP @0.5Actionformer [32]Avg.TemporalMaxer [24]Avg.TriDet [21]Avg.
mAP @0.75mAP @0.95mAP @0.5mAP @0.75mAP @0.95mAP @0.5mAP @0.75mAP @0.95
Baseline ExtractorsV11.9 (±0.0)7.8 (±0.0)3.3 (±0.0)7.7 (±0.0)13.1 (±0.0)8.8 (±0.0)3.7 (±0.0)8.6 (±0.0)16.7 (±0.0)10.1 (±0.0)4.8 (±0.0)10.5 (±0.0)
V + A19.1 (+7.2)11.3 (+3.5)4.2 (+0.9)11.5 (+3.8)21.0 (+7.9)14.8 (+6.0)5.6 (+1.9)13.8 (+5.2)23.6 (+6.9)17.2 (+7.1)6.4 (+1.6)15.7 (+5.2)
Pre-trained on 360+xV16.4 (+4.5)9.8 (+2.0)3.9 (+0.6)10.0 (+2.3)20.4 (+7.3)14.3 (+5.5)5.2 (+1.5)13.3 (+4.7)21.1 (+4.4)15.3 (+5.2)5.5 (+0.7)14.0 (+3.5)
V + A23.6 (+11.7)16.9 (+9.1)5.7 (+2.4)15.4 (+7.7)25.8 (+12.7)18.0 (+9.2)6.4 (+2.7)16.7 (+8.1)26.4 (+8.7)18.5 (+8.4)6.9 (+2.1)17.3 (+6.8)
V + A + D24.9 (+13.0)17.4 (+9.6)6.1 (+2.8)16.1 (+8.4)26.6 (+13.5)18.3 (+9.5)6.5 (+2.8)17.1 (+8.5)27.1 (+10.4)18.7 (+8.6)7.0 (+2.2)17.6 (+7.1)
", + "image_path": "e8b7df263babb295d9f03fb834d6f88104ff122f660c20873ee10874619d9150.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 194, + 288, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 194, + 288, + 254 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 288, + 254 + ], + "type": "text", + "content": "segmentation of activities within a video stream over a specific time period. Current TAL approaches typically employ a two-stage paradigm [27, 32]. The first stage extracts features from the entire video, and the second stage predicts temporal segmentation based on these features." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 262, + 288, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 262, + 288, + 430 + ], + "spans": [ + { + "bbox": [ + 46, + 262, + 288, + 430 + ], + "type": "text", + "content": "Feature extractors. Baseline extractors are widely utilised for various datasets, e.g. ActivityNet [5] and Ego4D [9], on the TAL task. The baseline video features are obtained from an I3D model pre-trained on the Kinetics400 dataset [13]. The baseline audio features are derived from the pre-classification layer following activation of the VG-Gish model, pre-trained on AudioSet [7]. There is no baseline extractor for directional binaural delay feature, so the " + }, + { + "bbox": [ + 46, + 262, + 288, + 430 + ], + "type": "inline_equation", + "content": "\\mathrm{V + A + D}" + }, + { + "bbox": [ + 46, + 262, + 288, + 430 + ], + "type": "text", + "content": " modality was not included accordingly. For a fair comparison, we reused our video classification models in section 4.2 as Pre-trained on " + }, + { + "bbox": [ + 46, + 262, + 288, + 430 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 46, + 262, + 288, + 430 + ], + "type": "text", + "content": " extractors, following the same baseline extraction setup for both video and audio features. Additionally, the ResNet-18 feature extractor was used for directional binaural delay feature extraction." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 437, + 287, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 437, + 287, + 617 + ], + "spans": [ + { + "bbox": [ + 46, + 437, + 287, + 617 + ], + "type": "text", + "content": "Experimental results. We provide a concise overview of the performance comparison for various temporal action localisation methods, including ActionFormer [32], TriDet [21] and TemporalMaxer [24], between the baseline extractors and our Pre-trained on " + }, + { + "bbox": [ + 46, + 437, + 287, + 617 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 46, + 437, + 287, + 617 + ], + "type": "text", + "content": " extractors. The summarised results are presented in Table 3, from which we can see that the introduction of additional modalities (i.e. audio and direction binaural delay) has a prominent positive impact on the TAL task, leading to performance improvements for both sets of extractors. This result highlights the importance of leveraging multiple modalities in enhancing the accuracy and effectiveness of temporal activity localisation techniques. Using our custom extractors can provide additional improvements, as the baseline extractors may not be optimised for our specific binocular or " + }, + { + "bbox": [ + 46, + 437, + 287, + 617 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 437, + 287, + 617 + ], + "type": "text", + "content": " views." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 624, + 188, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 624, + 188, + 636 + ], + "spans": [ + { + "bbox": [ + 47, + 624, + 188, + 636 + ], + "type": "text", + "content": "4.4. Cross-modality Retrieval" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 643, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 643, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 643, + 288, + 715 + ], + "type": "text", + "content": "In this context, we focus on a series of retrieval tasks that across modalities including audio, video and directional binaural delay. In a modality-specific retrieval scenario, the query modality (Q) serves as the input for retrieving the key modality (K) in the Q-to-K retrieval task. The performance evaluation metric " + }, + { + "bbox": [ + 46, + 643, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{R}\\theta" + }, + { + "bbox": [ + 46, + 643, + 288, + 715 + ], + "type": "text", + "content": " represents the recall at ranks " + }, + { + "bbox": [ + 46, + 643, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 643, + 288, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 307, + 223, + 545, + 277 + ], + "blocks": [ + { + "bbox": [ + 305, + 193, + 545, + 215 + ], + "lines": [ + { + "bbox": [ + 305, + 193, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 305, + 193, + 545, + 215 + ], + "type": "text", + "content": "Table 4. Q-to-Video retrieval results. The superscript* indicates modalities are co-trained. Recall reported with rank in " + }, + { + "bbox": [ + 305, + 193, + 545, + 215 + ], + "type": "inline_equation", + "content": "\\{1,5,10\\}" + }, + { + "bbox": [ + 305, + 193, + 545, + 215 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 223, + 545, + 277 + ], + "lines": [ + { + "bbox": [ + 307, + 223, + 545, + 277 + ], + "spans": [ + { + "bbox": [ + 307, + 223, + 545, + 277 + ], + "type": "table", + "html": "
Query ModalityR1 (%)R5 (%)R10 (%)
A39.14(±0.0)62.76(±0.0)79.21(±0.0)
A + D44.30(+5.16)66.92(+4.16)84.78(+5.57)
(A + D)*55.88(+16.74)72.53(+9.77)86.6(+7.39)
", + "image_path": "59992e95a4eaf03692782c57ba9c64aac734f475b86b6fa8cec47788baf79eb7.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 297, + 545, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 297, + 545, + 430 + ], + "spans": [ + { + "bbox": [ + 304, + 297, + 545, + 430 + ], + "type": "text", + "content": "Q-to-Video retrieval results. Table 4 illustrates the retrieval results for the Query modality retrieve videos. In this table, " + }, + { + "bbox": [ + 304, + 297, + 545, + 430 + ], + "type": "inline_equation", + "content": "A + D" + }, + { + "bbox": [ + 304, + 297, + 545, + 430 + ], + "type": "text", + "content": " denotes a set of independently trained audio and directional binaural features employed as query features. Moreover, " + }, + { + "bbox": [ + 304, + 297, + 545, + 430 + ], + "type": "inline_equation", + "content": "(A + D)^*" + }, + { + "bbox": [ + 304, + 297, + 545, + 430 + ], + "type": "text", + "content": " signifies the collaborative training of these features instead of treating them independently. The inter-modality retrieval results shown in Table 4 clearly show the modality compliance quality of the " + }, + { + "bbox": [ + 304, + 297, + 545, + 430 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 304, + 297, + 545, + 430 + ], + "type": "text", + "content": " dataset. Besides Q-to-Video retrieval, we also performed Q-to-Audio and Q-to-Directional binaural delay experiments, details can be found in the supplementary material." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 439, + 520, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 439, + 520, + 452 + ], + "spans": [ + { + "bbox": [ + 305, + 439, + 520, + 452 + ], + "type": "text", + "content": "4.5. Self-supervised Representation Learning" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 458, + 545, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 458, + 545, + 626 + ], + "spans": [ + { + "bbox": [ + 304, + 458, + 545, + 626 + ], + "type": "text", + "content": "Experiment setup. In this section, we investigated the impact of different self-supervised learning (SSL) methods using two engaging video pretext tasks: video pace (VP) prediction [25] and clip order (CO) shuffle prediction [29]. The VP task challenges the model to determine the pace of a video, while the CO task asks the model to rearrange shuffled video clips into their correct chronological order. The original VP and CO primarily concentrated on video data, but to capitalise on the advantages of multi-modality, we expanded these approaches to include audio and directional binaural delay modalities. This extension was done to align modality with the temporal coherence and dynamics observed in the video. For more comprehensive explanations, please refer to the supplementary material." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "content": "Experimental results. We first examined the impact of self-supervised learning models for video classification. Table 5 demonstrates the consistent precision gains achieved by utilising SSL pre-trained models. Notably, leveraging both video pace and clip order SSL techniques resulted in an average performance improvement of " + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\sim 7\\%" + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "19379" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 101, + 276, + 171 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 277, + 99 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 277, + 99 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 277, + 99 + ], + "type": "text", + "content": "Table 5. Models with different pre-train methods were fine-tuned and tested on video classification. The experiments use all three video views. Reported in Avg. Prec. " + }, + { + "bbox": [ + 50, + 70, + 277, + 99 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 50, + 70, + 277, + 99 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 101, + 276, + 171 + ], + "lines": [ + { + "bbox": [ + 52, + 101, + 276, + 171 + ], + "spans": [ + { + "bbox": [ + 52, + 101, + 276, + 171 + ], + "type": "table", + "html": "
Pre-train MethodModalities
VV + AV + A + D
From Scratch63.73(±0.0)77.32(±0.0)80.62
Video Pace [25]69.27(+5.5)79.56(+2.2)81.97
Clip Order [29]69.91(+6.2)80.14(+2.8)82.18
VP [25] + CO [29]76.84(+13.1)82.66(+5.3)83.32
", + "image_path": "71c9eff591b34ce4bf3c47bc40bab9906b301543fb41036a33f0101c82f4b65c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 285, + 101, + 542, + 171 + ], + "blocks": [ + { + "bbox": [ + 283, + 70, + 542, + 99 + ], + "lines": [ + { + "bbox": [ + 283, + 70, + 542, + 99 + ], + "spans": [ + { + "bbox": [ + 283, + 70, + 542, + 99 + ], + "type": "text", + "content": "Table 6. Comparison between supervised pre-trained extractors with SSL pretrained counterparts on TAL task. The experiments use all three video views with modalities (V+A+D)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 285, + 101, + 542, + 171 + ], + "lines": [ + { + "bbox": [ + 285, + 101, + 542, + 171 + ], + "spans": [ + { + "bbox": [ + 285, + 101, + 542, + 171 + ], + "type": "table", + "html": "
Pre-train MethodmAP @0.5mAP @0.75mAP @0.95Avg.
Supervised27.1 (±0.0)18.7 (±0.0)7.0 (±0.0)17.6 (±0.0)
Video Pace [25]29.4 (+2.3)19.6 (+0.9)7.4 (+0.4)18.8 (+1.2)
Clip Order [29]28.9 (+1.8)19.3 (+0.6)7.3 (+0.3)18.5 (+0.9)
VP [25] + CO [29]30.3 (+3.2)20.2 (+1.5)7.9 (+0.9)19.5 (+1.9)
", + "image_path": "9582193217b6737bffede79e2961e5f793c3469dc64c93c19231ae0f741e753b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 49, + 196, + 544, + 251 + ], + "blocks": [ + { + "bbox": [ + 88, + 182, + 504, + 194 + ], + "lines": [ + { + "bbox": [ + 88, + 182, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 88, + 182, + 504, + 194 + ], + "type": "text", + "content": "Table 7. Following original setup of THUMOS14 dataset [8], our dataset adaptation task uses video modality only." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 196, + 544, + 251 + ], + "lines": [ + { + "bbox": [ + 49, + 196, + 544, + 251 + ], + "spans": [ + { + "bbox": [ + 49, + 196, + 544, + 251 + ], + "type": "table", + "html": "
Feature ExtractormAP@0.3mAP@0.4mAP@0.5mAP@0.6mAP0.7Avg.
Kinetics400 [13] (Pre-train)83.7 (±0.0)80.2 (±0.0)72.8 (±0.0)62.4 (±0.0)47.4 (±0.0)69.5 (±0.0)
360+x (Pre-train)84.5 (+0.8)81.0 (+0.8)73.4 (+0.6)65.9 (+3.5)54.6 (+7.2)71.9 (+2.4)
Kinetics400 [13] (Pre-train) and 360+x (Fine-tune)85.3 (+1.6)81.8 (+1.6)74.9 (+2.1)68.1 (+5.7)58.2 (+10.8)73.7 (+4.2)
", + "image_path": "30dc64e8faac9dc2b3e985d75a68e16572dbb2d3a0b84bfa741b8e10461e36fd.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 270, + 289, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 270, + 289, + 415 + ], + "spans": [ + { + "bbox": [ + 46, + 270, + 289, + 415 + ], + "type": "text", + "content": "We proceeded to perform experiments using SSL pretrained models as feature extractors for the temporal action localisation task incorporating all three modalities " + }, + { + "bbox": [ + 46, + 270, + 289, + 415 + ], + "type": "inline_equation", + "content": "(\\mathrm{V} + \\mathrm{A} + \\mathrm{D})" + }, + { + "bbox": [ + 46, + 270, + 289, + 415 + ], + "type": "text", + "content": " with the TriDet framework [21]. Since a training-from-scratch model cannot serve as the first-stage extractor, we employed the supervised extractors from section 4.2 as a comparison. The summarised results in Table 6 indicate that pre-training with video pace (VP) or clip order (CO) individually leads to an average performance improvement of " + }, + { + "bbox": [ + 46, + 270, + 289, + 415 + ], + "type": "inline_equation", + "content": "\\sim 1.2\\%" + }, + { + "bbox": [ + 46, + 270, + 289, + 415 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 270, + 289, + 415 + ], + "type": "inline_equation", + "content": "\\sim 0.9\\%" + }, + { + "bbox": [ + 46, + 270, + 289, + 415 + ], + "type": "text", + "content": " respectively on average, compared to the supervised baseline. The combination of both SSL methods yields the highest performance gain of " + }, + { + "bbox": [ + 46, + 270, + 289, + 415 + ], + "type": "inline_equation", + "content": "\\sim 1.9\\%" + }, + { + "bbox": [ + 46, + 270, + 289, + 415 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 427, + 271, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 427, + 271, + 441 + ], + "spans": [ + { + "bbox": [ + 47, + 427, + 271, + 441 + ], + "type": "text", + "content": "4.6. Pre-training Model for Dataset Adaptation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 448, + 287, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 448, + 287, + 509 + ], + "spans": [ + { + "bbox": [ + 46, + 448, + 287, + 509 + ], + "type": "text", + "content": "This section explores the efficacy of leveraging models pretrained on the " + }, + { + "bbox": [ + 46, + 448, + 287, + 509 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 46, + 448, + 287, + 509 + ], + "type": "text", + "content": " dataset for adaptation to other datasets like THUMOS14 [8]. By adhering to THUMOS14 setup, the experiments use TriDet framework [21] for conducting Temporal Action Localisation (TAL)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "type": "text", + "content": "The performance of this experiment, specifically the mean average precision (mAP) scores covering IoU thresholds from 0.3 to 0.7, are presented in Table 7. As outlined by the results, exclusive reliance on " + }, + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "type": "text", + "content": " video data for training showcases the potential for enhanced performance as compared to training solely based on the Kinetics400 dataset [13]. Remarkably, this performance improvement becomes more prominent at higher IoU thresholds. The utmost optimal performance, however, emerges through a two-step approach, commencing with pre-training on the Kinetics400 dataset followed by fine-tuning on the " + }, + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "type": "text", + "content": " dataset with an average " + }, + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\sim 4.2\\%" + }, + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "type": "text", + "content": " improvement compared to solely Kinetics400 pre-trained extractor. This finding showcases that the employment of the " + }, + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "type": "text", + "content": " dataset for feature extractor training can be beneficial for dataset adaptation in sub-stream tasks. More experimental results on dataset integration are available in the supplementary materials." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 269, + 384, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 269, + 384, + 282 + ], + "spans": [ + { + "bbox": [ + 305, + 269, + 384, + 282 + ], + "type": "text", + "content": "5. Conclusions" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 289, + 547, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 289, + 547, + 564 + ], + "spans": [ + { + "bbox": [ + 304, + 289, + 547, + 564 + ], + "type": "text", + "content": "In this work, we studied the problem of panoptic scene understanding and presented, to our knowledge, the first-of-its-kind dataset " + }, + { + "bbox": [ + 304, + 289, + 547, + 564 + ], + "type": "inline_equation", + "content": "-360 + x" + }, + { + "bbox": [ + 304, + 289, + 547, + 564 + ], + "type": "text", + "content": " to support the study. The proposed " + }, + { + "bbox": [ + 304, + 289, + 547, + 564 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 304, + 289, + 547, + 564 + ], + "type": "text", + "content": " is a large-scale multi-modal dataset that consists of several different viewpoints (e.g. egocentric, third-person-view, and panoramic view) and covers various real-world activities in real daily life. With the most possibly available perspectives describing a real-world scene, " + }, + { + "bbox": [ + 304, + 289, + 547, + 564 + ], + "type": "inline_equation", + "content": "360 + x" + }, + { + "bbox": [ + 304, + 289, + 547, + 564 + ], + "type": "text", + "content": " aims to support the research in understanding the world around us in a way that humans understand (and even beyond). Additionally, we also presented a benchmark study of several scene understanding tasks based on this newly collected dataset, with a comparison to other existing datasets. Extensive experimental analysis validated the effectiveness of each of the perspectives within our dataset, and also suggested interesting insights, confirming that with more viewpoints or data modalities, the understanding of a scene could be more comprehensive. Surprisingly, models trained without manual annotation (i.e. self-supervised learning) on our dataset even perform better than those trained with human annotations in a fully supervised manner. We hope this new dataset could bring in new directions towards scene understanding and look forward to the research on them." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 571, + 397, + 583 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 571, + 397, + 583 + ], + "spans": [ + { + "bbox": [ + 306, + 571, + 397, + 583 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 589, + 547, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 589, + 547, + 698 + ], + "spans": [ + { + "bbox": [ + 304, + 589, + 547, + 698 + ], + "type": "text", + "content": "This project was partially supported by the Ramsay Research Fund, and the Royal Society Short Industry Fellowship (SIF\\R1\\231009). Y. Hou and C. Qu were partially supported by the CSC grant (No.202308060328) and Allsee Technologies Ltd., respectively. The computations described in this research were performed using the Baskerville Tier 2 HPC service1 (funded by EP/T022221/1 and EP/W032244/1) and is operated by Advanced Research Computing at the University of Birmingham." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 317, + 702, + 421, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 702, + 421, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 702, + 421, + 713 + ], + "type": "text", + "content": "1https://www.baskerville.ac.uk/" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "19380" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 92, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 53, + 92, + 287, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 92, + 287, + 146 + ], + "spans": [ + { + "bbox": [ + 53, + 92, + 287, + 146 + ], + "type": "text", + "content": "[1] Keshav Bhandari, Mario A DeLaGarza, Ziliang Zong, Hugo Latapie, and Yan Yan. Egok360: A 360 egocentric kinetic human activity video dataset. In 2020 IEEE International Conference on Image Processing (ICIP), pages 266-270. IEEE, 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 148, + 288, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 148, + 288, + 203 + ], + "spans": [ + { + "bbox": [ + 53, + 148, + 288, + 203 + ], + "type": "text", + "content": "[2] Honglie Chen, Weidi Xie, Andrea Vedaldi, and Andrew Zisserman. Vggsound: A large-scale audio-visual dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 721-725. IEEE, 2020. 1, 3, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 205, + 287, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 287, + 250 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 287, + 250 + ], + "type": "text", + "content": "[3] Ziyang Chen, David F Fouhey, and Andrew Owens. Sound localization by self-supervised time delay estimation. In European Conference on Computer Vision, pages 489-508. Springer, 2022. 2, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 251, + 287, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 251, + 287, + 317 + ], + "spans": [ + { + "bbox": [ + 53, + 251, + 287, + 317 + ], + "type": "text", + "content": "[4] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, et al. Scaling egocentric vision: The epic-kitchens dataset. In Proceedings of the European Conference on Computer Vision (ECCV), pages 720-736, 2018. 1, 2, 4, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 319, + 287, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 319, + 287, + 374 + ], + "spans": [ + { + "bbox": [ + 53, + 319, + 287, + 374 + ], + "type": "text", + "content": "[5] Bernard Ghanem Fabian Caba Heilbron, Victor Escorcia and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 961-970, 2015. 1, 2, 4, 5, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 376, + 287, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 376, + 287, + 442 + ], + "spans": [ + { + "bbox": [ + 53, + 376, + 287, + 442 + ], + "type": "text", + "content": "[6] Jort F. Gemmeke, Daniel P. W. Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R. Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780, 2017. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 445, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 445, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 53, + 445, + 287, + 510 + ], + "type": "text", + "content": "[7] Jort F. Gemmeke, Daniel P. W. Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R. Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), page 776-780. IEEE Press, 2017. 1, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 512, + 287, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 512, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 53, + 512, + 287, + 555 + ], + "type": "text", + "content": "[8] A. Gorban, H. Idrees, Y.-G. Jiang, A. Roshan Zamir, I. Laptev, M. Shah, and R. Sukthankar. Thumos challenge: Action recognition with a large number of classes. http://www.thumos.info, 2015. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 558, + 287, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 558, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 53, + 558, + 287, + 633 + ], + "type": "text", + "content": "[9] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18995-19012, 2022. 1, 2, 5, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 636, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 636, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 636, + 287, + 712 + ], + "type": "text", + "content": "[10] Chunhui Gu, Chen Sun, David A Ross, Carl Vondrick, Caroline Pantofaru, Yeqing Li, Sudheendra Vijayanarasimhan, George Toderici, Susanna Ricco, Rahul Sukthankar, et al. Ava: A video dataset of spatio-temporally localized atomic visual actions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6047-6056, 2018. 3" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 118, + 547, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 547, + 183 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 547, + 183 + ], + "type": "text", + "content": "[12] Shawn Hershey, Sourish Chaudhuri, Daniel PW Ellis, Jort F Gemmeke, Aren Jansen, R Channing Moore, Manoj Plakal, Devin Platt, Rif A Saurous, Bryan Seybold, et al. Cnn architectures for large-scale audio classification. In 2017 iee international conference on acoustics, speech and signal processing (icassp), pages 131-135. IEEE, 2017. 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 184, + 547, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 184, + 547, + 237 + ], + "spans": [ + { + "bbox": [ + 308, + 184, + 547, + 237 + ], + "type": "text", + "content": "[13] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. 1, 2, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 239, + 545, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 239, + 545, + 282 + ], + "spans": [ + { + "bbox": [ + 308, + 239, + 545, + 282 + ], + "type": "text", + "content": "[14] H. Kuehne, H. Jhuang, E. Garrote, T. Poggio, and T. Serre. HMDB: a large video database for human motion recognition. In Proceedings of the International Conference on Computer Vision (ICCV), 2011. 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 283, + 545, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 283, + 545, + 336 + ], + "spans": [ + { + "bbox": [ + 308, + 283, + 545, + 336 + ], + "type": "text", + "content": "[15] Hildegard Kuehne, Hueihan Jhuang, Estíbaliz Garrote, Tomaso Poggio, and Thomas Serre. Hmdb: a large video database for human motion recognition. In 2011 International conference on computer vision, pages 2556-2563. IEEE, 2011. 1, 4, 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 338, + 545, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 338, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 308, + 338, + 545, + 381 + ], + "type": "text", + "content": "[16] Yiyi Liao, Jun Xie, and Andreas Geiger. Kitti-360: A novel dataset and benchmarks for urban scene understanding in 2d and 3d. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 383, + 545, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 383, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 308, + 383, + 545, + 425 + ], + "type": "text", + "content": "[17] Benjamin R Meagher. Ecologizing social psychology: The physical environment as a necessary constituent of social processes. *Personality and social psychology review*, 24(1): 3-23, 2020. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 426, + 545, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 426, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 308, + 426, + 545, + 448 + ], + "type": "text", + "content": "[18] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41, 1995. 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 449, + 545, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 449, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 308, + 449, + 545, + 492 + ], + "type": "text", + "content": "[19] Xiaokang Peng, Yake Wei, Andong Deng, Dong Wang, and Di Hu. Balanced multimodal learning via on-the-fly gradient modulation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 494, + 545, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 494, + 545, + 559 + ], + "spans": [ + { + "bbox": [ + 308, + 494, + 545, + 559 + ], + "type": "text", + "content": "[20] Daniel Scharstein, Heiko Hirschmüller, York Kitajima, Greg Krathwohl, Nera Nesic, Xi Wang, and Porter Westling. High-resolution stereo datasets with subpixel-accurate ground truth. In Pattern Recognition: 36th German Conference, GCPR 2014, Münster, Germany, September 2-5, 2014, Proceedings 36, pages 31-42. Springer, 2014. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 559, + 545, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 559, + 545, + 613 + ], + "spans": [ + { + "bbox": [ + 308, + 559, + 545, + 613 + ], + "type": "text", + "content": "[21] Dingfeng Shi, Yujie Zhong, Qiong Cao, Lin Ma, Jia Li, and Dacheng Tao. Tridet: Temporal action detection with relative boundary modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18857-18866, 2023. 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 614, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 614, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 308, + 614, + 545, + 678 + ], + "type": "text", + "content": "[22] Shuran Song, Andy Zeng, Angel X Chang, Manolis Savva, Silvio Savarese, and Thomas Funkhouser. Im2pano3d: Extrapolating 360 structure and semantics beyond the field of view. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3847-3856, 2018. 1, 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 680, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 680, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 680, + 545, + 713 + ], + "type": "text", + "content": "[23] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402, 2012. 1, 2, 4, 5" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19381" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 586 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[24] Tuan N Tang, Kwonyoung Kim, and Kwanghoon Sohn. Temporalmaxer: Maximize temporal context with only max pooling for temporal action localization. arXiv preprint arXiv:2303.09055, 2023. 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 287, + 150 + ], + "type": "text", + "content": "[25] Jiangliu Wang, Jianbo Jiao, and Yunhui Liu. Self-supervised video representation learning by pace prediction. In European Conference on Computer Vision, 2020. 7, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 152, + 288, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 152, + 288, + 195 + ], + "spans": [ + { + "bbox": [ + 49, + 152, + 288, + 195 + ], + "type": "text", + "content": "[26] Weiyao Wang, Du Tran, and Matt Feiszli. What makes training multi-modal classification networks hard? In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12695-12705, 2020. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 197, + 287, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 197, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 49, + 197, + 287, + 239 + ], + "type": "text", + "content": "[27] Xiang Wang, Zhiwu Qing, Ziyuan Huang, Yutong Feng, Shiwei Zhang, Jianwen Jiang, Mingqian Tang, Changxin Gao, and Nong Sang. Proposal relation network for temporal action detection. arXiv preprint arXiv:2106.11812, 2021. 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 241, + 287, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 241, + 287, + 295 + ], + "spans": [ + { + "bbox": [ + 49, + 241, + 287, + 295 + ], + "type": "text", + "content": "[28] Jianxiong Xiao, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Recognizing scene viewpoint using panoramic place representation. In 2012 IEEE Conference on Computer Vision and Pattern Recognition, pages 2695-2702. IEEE, 2012. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 297, + 287, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 297, + 287, + 341 + ], + "spans": [ + { + "bbox": [ + 49, + 297, + 287, + 341 + ], + "type": "text", + "content": "[29] Dejing Xu, Jun Xiao, Zhou Zhao, Jian Shao, Di Xie, and Yueting Zhuang. Self-supervised spatiotemporal learning via video clip order prediction. In Computer Vision and Pattern Recognition (CVPR), 2019. 7, 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 342, + 287, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 342, + 287, + 396 + ], + "spans": [ + { + "bbox": [ + 49, + 342, + 287, + 396 + ], + "type": "text", + "content": "[30] Guorun Yang, Xiao Song, Chaoqin Huang, Zhidong Deng, Jianping Shi, and Bolei Zhou. Drivingstereo: A large-scale dataset for stereo matching in autonomous driving scenarios. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 899–908, 2019. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 398, + 287, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 398, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 49, + 398, + 287, + 441 + ], + "type": "text", + "content": "[31] Serena Yeung, Olga Russakovsky, Ning Jin, Mykhaylo Andriluka, Greg Mori, and Li Fei-Fei. Every moment counts: Dense detailed labeling of actions in complex videos. International Journal of Computer Vision, 126:375–389, 2018. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 443, + 287, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 443, + 287, + 485 + ], + "spans": [ + { + "bbox": [ + 49, + 443, + 287, + 485 + ], + "type": "text", + "content": "[32] Chen-Lin Zhang, Jianxin Wu, and Yin Li. Actionformer: Localizing moments of actions with transformers. In European Conference on Computer Vision, pages 492-510. Springer, 2022. 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 487, + 287, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 487, + 287, + 542 + ], + "spans": [ + { + "bbox": [ + 49, + 487, + 287, + 542 + ], + "type": "text", + "content": "[33] Hang Zhao, Antonio Torralba, Lorenzo Torresani, and Zhicheng Yan. Hacs: Human action clips and segments dataset for recognition and temporal localization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8668-8678, 2019. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 544, + 287, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 544, + 287, + 586 + ], + "spans": [ + { + "bbox": [ + 49, + 544, + 287, + 586 + ], + "type": "text", + "content": "[34] Bolei Zhou, Agata Lapedriza, Aditya Khosla, Aude Oliva, and Antonio Torralba. Places: A 10 million image database for scene recognition. IEEE transactions on pattern analysis and machine intelligence, 40(6):1452-1464, 2017. 4" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "19382" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_content_list.json b/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..472aacaec492ece2c1b86c6b89b68f4ad38b7332 --- /dev/null +++ b/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_content_list.json @@ -0,0 +1,1414 @@ +[ + { + "type": "text", + "text": "360DVD: Controllable Panorama Video Generation with 360-Degree Video Diffusion Model", + "text_level": 1, + "bbox": [ + 197, + 130, + 772, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Qian Wang $^{1,2}$ , Weiqi Li $^{1}$ , Chong Mou $^{1,2}$ , Xinhua Cheng $^{1,2}$ , Jian Zhang $^{1,2}$ $^{1}$ School of Electronic and Computer Engineering, Peking University \n $^{2}$ Peking University Shenzhen Graduate School-Rabbitpre AIGC Joint Research Laboratory \n{qianwang, liweiqi, eechongm, chengxinhua}@stu.pku.edu.cn, zhangjian.sz@pku.edu.cn", + "bbox": [ + 125, + 202, + 843, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 308, + 313, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Panorama video recently attracts more interest in both study and application, courtesy of its immersive experience. Due to the expensive cost of capturing $360^{\\circ}$ panoramic videos, generating desirable panorama videos by prompts is urgently required. Lately, the emerging text-to-video (T2V) diffusion methods demonstrate notable effectiveness in standard video generation. However, due to the significant gap in content and motion patterns between panoramic and standard videos, these methods encounter challenges in yielding satisfactory $360^{\\circ}$ panoramic videos. In this paper, we propose a pipeline named 360-Degree Video Diffusion model (360DVD) for generating $360^{\\circ}$ panoramic videos based on the given prompts and motion conditions. Specifically, we introduce a lightweight 360-Adapter accompanied by 360 Enhancement Techniques to transform pre-trained T2V models for panorama video generation. We further propose a new panorama dataset named WEB360 consisting of panoramic video-text pairs for training 360DVD, addressing the absence of captioned panoramic video datasets. Extensive experiments demonstrate the superiority and effectiveness of 360DVD for panorama video generation. Our project page is at https://akaneqwq.github.io/360DVD/.", + "bbox": [ + 76, + 340, + 472, + 690 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 717, + 209, + 734 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "With the recent advancements in VR technology, 360-degree panoramic videos have been gaining increasing popularity. This video format which offers audiences an immersive experience, is helpful for various applications, including entertainment, education, and communication. To capture details of the entire scene, $360^{\\circ}$ videos are typically recorded using an array of high-resolution fisheye cameras that yields a $360^{\\circ} \\times 180^{\\circ}$ field-of-view (FoV) [1], which", + "bbox": [ + 75, + 743, + 468, + 864 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This work was supported by National Natural Science Foundation of China under Grant 62372016. (Corresponding author: Jian Zhang)", + "bbox": [ + 75, + 875, + 470, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "is quite costly in both time and resources. Therefore, the generation of $360^{\\circ}$ panoramic videos is urgently required for border applications, while panoramic video generation receives little attention in studies to date.", + "bbox": [ + 496, + 310, + 893, + 369 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Thanks to the emerging theory and training strategies, text-to-image (T2I) diffusion models [26, 27, 31, 32, 35] demonstrate remarkable image generation capacity from prompts given by users, and such impressive achievement in image generation is further extended to text-to-video (T2V) generation. Various T2V diffusion models [3, 16, 37, 46, 52, 60] are recently proposed with adopting space-time separable architectures, wherein spatial operations are inherited from the pre-trained T2I models to reduce the complexity of constructing space-time models from scratch. Among these,AnimateDiff [16] enables the capability to generate animated images for various personalized T2I models, which alleviates the requirement for model-specific tuning and achieves compelling content consistency over time.", + "bbox": [ + 496, + 371, + 893, + 582 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Although T2V methods on standard videos are widely studied, there is no method proposed for panorama video generation. One potential approach is to leverage existing powerful T2V models, e.g.,AnimateDiff to directly generate the equirectangular projection (ERP) of panoramic videos. Since ERP is a commonly adopted format for storing and transmitting panoramic videos, each frame is treated by ERP as a rectangular image with an aspect ratio of 1:2, which aligns well with the output format of existing standard T2V models. However, due to the significant differences between panoramic videos and standard videos, existing methods suffer challenges in directly producing satisfactory $360^{\\circ}$ panoramic videos. Concretely, the main challenges include three aspects: (1) The content distribution of ERPs differs from standard videos. ERPs require a wider FoV, reaching $360^{\\circ} \\times 180^{\\circ}$ . (2) The motion patterns of ERPs are different from standard videos, with movements often following curves rather than straight lines. (3) The left and right ends of ERPs should exhibit continuity since they correspond to the same meridian on the Earth.", + "bbox": [ + 496, + 583, + 895, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Therefore, we propose a specifically designed method", + "bbox": [ + 519, + 885, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "6913", + "bbox": [ + 482, + 944, + 514, + 957 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8e32efc6bee76ad52d5a48a7b9d8d07d1b7a95fd16b08df5393246543e02fcfa.jpg", + "image_caption": [ + "Figure 1. Main results. Our 360DVD creates text-aligned, coherent, and high-quality $360^{\\circ}$ panorama videos. Furthermore, 360DVD can cooperate with multiple personalized text-to-image models and consistently generate stylized panorama videos." + ], + "image_footnote": [], + "bbox": [ + 91, + 88, + 480, + 433 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/95f33fe15aab58457293c435e6c1db7089b31e786666d0b74f687c56cb4bfe60.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 488, + 89, + 875, + 434 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "named 360-Degree Video Diffusion (360DVD) for generating panorama videos. We first introduce a plug-and-play module named 360-Adapter to address challenge mentioned above. Our 360-Adapter receives zero values or motion conditions (e.g., optical flow) as input and outputs motion features, which are fed into the frozen denoising U-Net at different levels of the encoder. This transformation is aimed at converting the T2V model into a panoramic video generation without altering the foundational generative capabilities. In addition, we introduce 360 Enhancement Techniques including two mechanisms to enhance continuity at both ends of ERPs from both macro and micro perspectives, and a latitude-aware loss function for encouraging the model to focus more on low-latitude regions. Cooperated with carefully designed techniques, our 360DVD generates text-aligned, coherent, high-quality, $360^{\\circ}$ panorama videos with various styles, as shown in Fig. 1.", + "bbox": [ + 75, + 500, + 472, + 758 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Furthermore, we collect a panorama dataset named WEB360 including ERP-formatted videos from the internet and games for training our method. WEB360 involves approximately 2,000 video clips with each clip consisting of 100 frames. Considering the domain gap between panoramic and standard images, to enhance the accuracy and granularity of captions, we introduce a GPT-based 360 Text Fusion module for obtaining detailed captions. Our contributions can be summarized as follows:", + "bbox": [ + 75, + 763, + 472, + 900 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce a controllable $360^{\\circ}$ panorama video generation diffusion model named 360DVD, achieved by adopting a controllable standard T2V model with a trainable lightweight 360-Adapter. Our model can generate text-guided panorama videos conditioned on desired motions.", + "- We design 360 Enhancement Techniques including a latitude-aware loss and two mechanisms to enhance the content and motion quality of generated panorama videos.", + "- We propose a new high-quality dataset named WEB360 comprising approximately 2,000 panoramic videos, with each video accompanied by a detailed caption enhanced through 360 Text Fusion.", + "- Experiments demonstrate that our 360DVD is capable of generating high-quality, high-diversity, and more consistent $360^{\\circ}$ panorama videos." + ], + "bbox": [ + 500, + 500, + 890, + 726 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 500, + 744, + 650, + 760 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Text-to-Image Diffusion Model", + "text_level": 1, + "bbox": [ + 500, + 770, + 772, + 786 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The Denoising Diffusion Probabilistic Model [9, 17, 39] has proven to be highly successful in generating high-quality images, outperforming previous approaches such as generative adversarial networks (GANs)[11, 57], variational autoencoders (VAEs)[20, 38], and flow-based methods [5]. With text guidance during training, users can generate images based on textual input. Noteworthy examples include", + "bbox": [ + 496, + 794, + 890, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "6914", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "GLIDE [27], DALLE-2 [31], Imagen [35]. To address the computational burden of the iterative denoising process, LDM [32] conducts the diffusion process on a compressed latent space rather than the original pixel space. This accomplishment has prompted further exploration in extending customization [14, 34], image guidance [53, 55], precise control [25, 26, 58] and protection [56].", + "bbox": [ + 75, + 90, + 472, + 196 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Text-to-Video Diffusion Model", + "text_level": 1, + "bbox": [ + 76, + 207, + 346, + 222 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Despite significant advancements in Text-to-Image (T2I) generation, Text-to-Video (T2V) generation faces challenges, including the absence of large-scale, high-quality paired text-video datasets, the inherent complexity in modeling temporal consistency, and the resource-intensive nature of training. To address these challenges, many works leverage the knowledge from pre-trained T2I models, and they manage training costs by executing the diffusion process in the latent space. Some methods [15, 29, 48, 49, 54] utilize T2I models in zero-shot or few-shot ways. However, these methods often suffer from suboptimal frame consistency due to insufficient training. To address this limitation, another category of T2V diffusion models typically adopts space-time separable architectures. These models [3, 37, 46, 60] inherit spatial operations from pre-trained T2I models, reducing the complexity of constructing space-time models from scratch. Given that most personalized T2I models are derived from the same base one (e.g. Stable Diffusion [32]),AnimateDiff [16] designs a motion modeling module that trained with a base T2I model and could animate most of derived personalized T2I models once for all. There are also efforts focused on enhancing control in T2V models. Gen-1 [13], MCDiff [6], LaMD [18] and VideoComposer [47] introduce diverse conditions to T2V models. Despite these advancements, the aforementioned methods demand extensive training and lack a plug-and-play nature, making it challenging to apply them to a diverse range of personalized T2I models.", + "bbox": [ + 75, + 231, + 472, + 654 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Panorama Generation", + "text_level": 1, + "bbox": [ + 76, + 665, + 284, + 679 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "GAN-based methods for generating panoramic images have been widely studied [2, 4, 7, 10, 12, 23, 24, 28, 40, 41, 43, 50]. For instance, OmniDreamer [2] accepts a single NFoV image as an input condition and introduces a cyclic inference scheme to meet the inherent horizontal cyclicity of 360-degree images. ImmenseGAN [12] fine-tunes the generative model using a large-scale private text-image pair dataset, making the generation more controllable. Text2Light [7] introduces a zero-shot text-guided 360-image synthesis pipeline by utilizing the CLIP model. Very recently, diffusion models have achieved promising results in panoramic image generation. DiffCollage [59] uses semantic maps as conditions and generates images based on complex factor graphs using retrained diffusion mod", + "bbox": [ + 75, + 688, + 470, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "els. PanoGen [21] employs a latent diffusion model and synthesizes new indoor panoramic images through recursive image drawing techniques based on multiple text descriptions. PanoDiff [45] achieves a multi-NFoV synthesis of panoramic images through a two-stage pose estimation module. IPO-LDM [51] uses a dual-modal diffusion structure of RGB-D to better learn the spatial distribution and patterns of panoramic images. StitchDiffusion [44] employs a T2I diffusion model, ensuring continuity at both ends through stitching. However, to date, panoramic video generation has received limited attention. To the best of our knowledge, we are the first to leverage diffusion models for panoramic video generation.", + "bbox": [ + 496, + 90, + 893, + 287 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 300, + 591, + 316 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we begin with a concise review of the latent diffusion fusion model andAnimateDiff [16]. Following that, we introduce the construction method of the WEB360 dataset. We then provide an overview of 360DVD and elaborate on the implementation details of 360-Adapter. Finally, we describe the 360 enhancement techniques aimed at enriching the panoramic nature of the video.", + "bbox": [ + 496, + 325, + 893, + 431 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminaries", + "text_level": 1, + "bbox": [ + 500, + 440, + 640, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Latent Diffusion Model. Given an input signal $\\mathbf{x}_0$ , a diffusion forward process in DDPM [17] is defined as:", + "bbox": [ + 498, + 464, + 890, + 494 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\theta} \\left(\\mathbf {x} _ {t} \\mid \\mathbf {x} _ {t - 1}\\right) = \\mathcal {N} \\left(\\mathbf {x} _ {t}; \\sqrt {1 - \\beta_ {t}} \\mathbf {x} _ {t - 1}, \\beta_ {t} \\mathbf {I}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 550, + 506, + 890, + 525 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "for $t = 1,\\ldots ,T$ , where $T$ is the total timestep of the diffusion process. A noise depending on the variance $\\beta_{t}$ is gradually added to $\\mathbf{x}_{t - 1}$ to obtain $\\mathbf{x}_t$ at the next timestep and finally reach $\\mathbf{x}_T\\in \\mathcal{N}(0,\\mathbf{I})$ . The goal of the diffusion model is to learn to reverse the diffusion process (denoising). Given a random noise $\\mathbf{x}_t$ , the model predicts the added noise at the next timestep $\\mathbf{x}_{t - 1}$ until the origin signal $\\mathbf{x}_0$ :", + "bbox": [ + 496, + 534, + 893, + 642 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\theta} (\\mathbf {x} _ {t - 1} | \\mathbf {x} _ {t}) = \\mathcal {N} (\\mathbf {x} _ {t - 1}; \\boldsymbol {\\mu} _ {\\theta} (\\mathbf {x} _ {t}, t), \\boldsymbol {\\Sigma} _ {\\theta} (\\mathbf {x} _ {t}, t)), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 539, + 652, + 890, + 670 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "for $t = T,\\ldots ,1$ . We fix the variance $\\Sigma_{\\theta}(\\mathbf{x}_t,t)$ and utilize the diffusion model with parameter $\\theta$ to predict the mean of the inverse process $\\pmb{\\mu}_{\\theta}(\\mathbf{x}_t,t)$ . The model can be simplified as denoising models $\\epsilon_{\\theta}(\\mathbf{x}_t,t)$ , which are trained to predict the noise of $\\mathbf{x}_t$ with a noise prediction loss:", + "bbox": [ + 496, + 679, + 893, + 755 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathbb {E} _ {\\mathbf {x} _ {0}, \\mathbf {y}, \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\epsilon - \\epsilon_ {\\theta} (\\mathbf {x} _ {t}, t, \\boldsymbol {\\tau} _ {\\theta} (\\mathbf {y})) \\| _ {2} ^ {2} ], \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 537, + 765, + 890, + 785 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\epsilon$ is the added noise to the input image $\\mathbf{x}_0$ , $\\mathbf{y}$ is the corresponding textual description, $\\tau_{\\theta}(\\cdot)$ is a text encoder mapping the string to a sequence of vectors.", + "bbox": [ + 496, + 794, + 890, + 839 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Latent Diffusion Model (LDM) [32] executes the denoising process in the latent space of an autoencoder, namely $\\mathcal{E}(\\cdot)$ and $\\mathcal{D}(\\cdot)$ , implemented as VQ-GAN [19] or VQ-VAE [42] pre-trained on large image datasets. During the", + "bbox": [ + 498, + 839, + 893, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "6915", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "training of the latent diffusion networks, an input image $\\mathbf{x}_0$ is initially mapped to the latent space by the frozen encoder, yielding $\\mathbf{z}_0 = \\mathcal{E}(\\mathbf{x}_0)$ . Thus, the training objective can be formulated as follows:", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathbb {E} _ {\\mathcal {E} (\\mathbf {x} _ {0}), \\mathbf {y}, \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\boldsymbol {\\epsilon} - \\boldsymbol {\\epsilon} _ {\\theta} (\\mathbf {z} _ {t}, t, \\boldsymbol {\\tau} _ {\\theta} (\\mathbf {y})) \\| _ {2} ^ {2} ]. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 98, + 161, + 468, + 181 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In widely-used LDM Stable Diffusion (SD), which our method is based on, $\\epsilon_{\\theta}(\\cdot)$ is implemented with a modified UNet [33] that incorporates four downsample/upsample blocks and one middle block, resulting in four resolution levels within the networks' latent space. Each resolution level integrates 2D convolution layers as well as self- and cross-attention mechanisms. Text model $\\tau_{\\theta}(\\cdot)$ is implemented using the CLIP [30] ViT-L/14 text encoder.", + "bbox": [ + 75, + 191, + 468, + 311 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "AnimateDiff.AnimateDiff inflates base SD by adding temporal-aware structures and learning reasonable motion priors from large-scale video datasets. Since the original SD can only process 4D image data batches, while T2V task takes a 5D video tensor as input. It transforms each 2D convolution and attention layer in the original image model into spatial-only pseudo-3D layers. The motion module is inserted at every resolution level of the U-shaped diffusion network, using vanilla temporal transformers consisting of several self-attention blocks operating along the temporal axis. The training objective ofAnimateDiff can be written as:", + "bbox": [ + 75, + 311, + 468, + 492 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathbb {E} _ {\\mathcal {E} \\left(\\mathbf {x} _ {0} ^ {1: N}\\right), \\mathbf {y}, \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\boldsymbol {\\epsilon} - \\boldsymbol {\\epsilon} _ {\\theta} \\left(\\mathbf {z} _ {t} ^ {1: N}, t, \\boldsymbol {\\tau} _ {\\theta} (\\mathbf {y})\\right) \\| _ {2} ^ {2} ], \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 503, + 468, + 525 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{x}_0^{1:N}$ is the sampled video data, $\\mathbf{z}_0^{1:N}$ is the latent code which $\\mathbf{x}_0^{1:N}$ are encoded into via the pre-trained autoencoder, $\\mathbf{z}_t^{1:N}$ is the latent code obtained by perturbing the initial latent code $\\mathbf{z}_0^{1:N}$ with noise at timestep $t$ . During training, the pre-trained weights of the base T2I model are frozen to keep its feature space unchanged.", + "bbox": [ + 75, + 534, + 468, + 627 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. WEB360 Dataset", + "text_level": 1, + "bbox": [ + 76, + 635, + 243, + 648 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Diverse text-video pairs datasets are essential for training open-domain text-to-video generation models. However, existing $360^{\\circ}$ panorama video datasets lack corresponding textual annotations. Moreover, these datasets are often constrained either in scale or quality, thereby impeding the upper limit of high-quality video generation.", + "bbox": [ + 75, + 659, + 468, + 750 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To address the aforementioned challenges and achieve high-quality 360 panorama video generation, we introduce a novel text-video dataset named WEB360. This dataset comprises 2114 text-video pairs sourced from open-domain content, presented in high-definition (720p) ERP format. Our dataset creation process involved extracting 210 high-resolution panoramic video clips from the ODV360 [4] training set. Additionally, we collected over 400 original videos from YouTube. Due to the complex scene transitions present in the original videos, which pose challenges", + "bbox": [ + 75, + 750, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/dfbc63c0901348edca027e0e3bea0da40cec19d80163ef05c694b8e4f9e76b1f.jpg", + "image_caption": [ + "Figure 2. 360 Text Fusion. The captions of four images with a FoV of 90 are fed into ChatGPT to generate a new $360^{\\circ}$ summarization. Compared to the caption of ERP at the bottom right, 360 Text Fusion allows for more fine-grained captions." + ], + "image_footnote": [], + "bbox": [ + 506, + 89, + 890, + 234 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "for models in learning temporal correlations, we perform a manual screening process to split the original videos into 1904 single-scene video clips. We employ BLIP [22] to annotate the first frame of the 2104 video clips. However, we observed that direct application of BLIP to ERP images often resulted in bad captions. Therefore, we propose a panoramic image caption method named 360 Text Fusion, based on ChatGPT.", + "bbox": [ + 496, + 330, + 890, + 450 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "360 Text Fusion. We find that directly using BLIP [22] to label ERP has drawbacks. On one hand, errors may arise due to the distortion caused by the polarities, leading to misidentifications such as labeling \"person\" as \"dog\". On the other hand, the captions generated by BLIP lack granularity, making them insufficient for providing a detailed description of the current scene. Thus, we propose 360 Text Fusion (360TF) method, as shown in Fig. 2. To deal with the irregular distortion of ERP, we turn to less-distorted perspective images. We first project the original ERP image to four non-overlapping perspective images at 0 degrees longitude, with a FoV of 90. The four images are then fed into BLIP to be captioned. By pre-informing ChatGPT about the task and providing examples, these four captions are collectively input to ChatGPT, which then generates a summary of the scene as our final caption. In comparison to directly using BLIP to label the entire image, our 360TF demonstrates a significant advantage in granularity.", + "bbox": [ + 496, + 453, + 892, + 726 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. 360-degree Video Diffusion Model", + "text_level": 1, + "bbox": [ + 500, + 738, + 795, + 753 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "An overview of the 360-degree Video Diffusion Model (360 DVD) is presented in Fig. 3, which is composed of a pretrained denoising U-Net and 360-Adapter. The pre-trained denoising U-Net adopts a structure identical to that ofAnimateDiff. In every resolution level of the U-Net, the spatial layer unfolds pre-trained weights from SD, while the temporal layer incorporates the motion module ofAnimateDiff trained on a large-scale text-video dataset.", + "bbox": [ + 496, + 762, + 890, + 883 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During the training process, we first sample a video $\\mathbf{x}_0^{1:N}$", + "bbox": [ + 517, + 883, + 888, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "6916", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/9fdb99ac86341b86153ac301dbe00976f41c74a1b06897891b29cd12c743a14b.jpg", + "image_caption": [ + "Figure 3. Overview of 360DVD. 360DVD leverages a trainable 360-Adapter to extend standard T2V models to the panorama domain and is able to generate high-quality panorama videos with given prompts and optional motion conditions. In addition, 360 Enhancement Techniques are proposed for quality improvement in the panorama perspective." + ], + "image_footnote": [], + "bbox": [ + 86, + 89, + 893, + 270 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "from the dataset. The video is encoded into latent code $\\mathbf{z}_0^{1:N}$ through pre-trained VAE encoder $\\mathcal{E}(\\cdot)$ and noised to $\\mathbf{z}_t^{1:N}$ . Simultaneously, the corresponding text $\\mathbf{y}$ for the video is encoded using the text encoder $\\pmb{\\tau}_{\\theta}(\\cdot)$ of the CLIP. The video is also input into a motion estimation network to generate corresponding motion conditions $\\mathbf{c}$ , which are then fed into the 360-Adapter $\\mathcal{F}_{360}(\\cdot)$ . Finally, noised latent code $\\mathbf{z}_t^{1:N}$ , timestep $t$ , text embedding $\\pmb{\\tau}_{\\theta}(\\mathbf{y})$ , and the feature maps $\\mathbf{f}_{360}$ generated by 360-Adapter are collectively input into the U-Net $\\epsilon(\\cdot)$ to predict the noise strength added to the latent code. As we aim to preserve the priors learned by SD andAnimateDiff on large datasets, we freeze their weights during the training process. If we use a simple L2 loss term, the training objective is given as follows:", + "bbox": [ + 75, + 352, + 472, + 565 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathbb {E} _ {\\mathcal {E} (\\mathbf {x} _ {0} ^ {1: N}), \\mathbf {y}, \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\boldsymbol {\\epsilon} - \\boldsymbol {\\epsilon} _ {\\theta} (\\mathbf {z} _ {t} ^ {1: N}, t, \\boldsymbol {\\tau} _ {\\theta} (\\mathbf {y}), \\mathbf {f} _ {3 6 0}) \\| _ {2} ^ {2} ]. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 568, + 468, + 597 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To ensure satisfactory generation of $360^{\\circ}$ panoramic videos without motion control input, we set the input of the 360-Adapter to zero with a probability $P$ during training. This strategy aims to encourage the model to learn representations that are not solely reliant on motion conditions, enhancing its ability to generate compelling panoramic videos without explicit motion guidance.", + "bbox": [ + 75, + 599, + 468, + 703 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In inference, users have the option to selectively provide text prompts and motion guidance to carry out denoising over a total of $T$ steps. Here, we employ DDIM [39] to accelerate the sampling process. The estimated latent code $\\hat{\\mathbf{z}}_0^{1:N}$ is then input into a pre-trained VAE decoder to decode the desired $360^\\circ$ panoramic videos $\\hat{\\mathbf{x}}_0^{1:N}$ . Due to constraints such as resolution limitations imposed by existing SD and considerations regarding GPU memory usage, the experimental results presented in this paper showcase a resolution of $512 \\times 1024$ . In practical applications, super-resolution methods [8, 40] can be employed to upscale the generated results to the desired size.", + "bbox": [ + 75, + 704, + 470, + 883 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "360-Adapter. Our proposed 360-Adapter is simple and", + "bbox": [ + 76, + 885, + 470, + 901 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/bcff2f4fddc23bfaae1cc06eb9c8e3cceb676329129ea2ba8f60b036e2116d27.jpg", + "image_caption": [ + "Figure 4. Overview of 360-Adapter. 360-Adapter is a simple but effective module in which intermediate features are fed into the U-Net encoder blocks for modulation." + ], + "image_footnote": [], + "bbox": [ + 542, + 352, + 852, + 597 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "lightweight, as shown in Fig. 4. The original condition input has the same resolution as the video of $H \\times W$ . Here, we utilize the pixel unshuffle [36] operation to downsample it to $H / 8 \\times W / 8$ . Following that are four 360-Adapter blocks, we depict only one for simplification in Fig. 4. To maintain consistency with the U-Net architecture, the first three 360-Adapter blocks each include a downsampling block. In each 360-Adapter block, one 2D convolution layer and a residual block (RB) with pseudo-3D convolution layers are utilized to extract the condition feature $\\mathbf{f}_{360}^{k}$ . Finally, multiscale condition features $\\mathbf{f}_{360} = \\{\\mathbf{f}_{360}^{1}, \\mathbf{f}_{360}^{2}, \\mathbf{f}_{360}^{3}, \\mathbf{f}_{360}^{4}\\}$ are formed. Suppose the intermediate features in the U-Net encoder block is $\\mathbf{f}_{enc} = \\{\\mathbf{f}_{enc}^{1}, \\mathbf{f}_{enc}^{2}, \\mathbf{f}_{enc}^{3}, \\mathbf{f}_{enc}^{4}\\}$ . $\\mathbf{f}_{360}$ is then added with $\\mathbf{f}_{enc}$ at each scale. In summary, the condition", + "bbox": [ + 496, + 688, + 893, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "6917", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "feature extraction and conditioning operation of the 360-Adapter can be defined as the following formulation:", + "bbox": [ + 76, + 90, + 468, + 121 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} _ {3 6 0} = \\mathcal {F} _ {3 6 0} (\\mathbf {c}), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 217, + 131, + 468, + 147 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {f}} _ {e n c} ^ {i} = \\mathbf {f} _ {e n c} ^ {i} + \\mathbf {f} _ {3 6 0} ^ {i}, i \\in \\{1, 2, 3, 4 \\}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 155, + 169, + 468, + 188 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the previous description, we omit some details. Our motion condition $\\mathbf{c}$ is a 5D tensor, assuming its size is batch $\\times$ channels $\\times$ frames $\\times$ height $\\times$ width. We first reshape it into a 4D tensor of size (batch $\\times$ frames) $\\times$ channels $\\times$ height $\\times$ width to allow it to be fed into the 2D convolution layer and restore it to 5D to go through the RB with pseudo-3D convolution layers. Subsequently, in the RB, we employ a $1 \\times 3 \\times 3$ pseudo-3D convolution to extract features in the spatial dimension, followed by a $3 \\times 1 \\times 1$ pseudo-3D convolution to model information along the temporal dimension. The resulting features are reshaped back to (batch $\\times$ frames) $\\times$ channels $\\times$ height $\\times$ width to add the output of the skip connection. Finally, condition features are reshaped back into a 5D vector of size batch $\\times$ channels $\\times$ frames $\\times$ height $\\times$ width to align with the U-Net encoder intermediate features.", + "bbox": [ + 75, + 193, + 470, + 434 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4. 360 Enhancement Techniques", + "text_level": 1, + "bbox": [ + 76, + 443, + 341, + 458 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Latitude-aware Loss. When projecting panoramic videos into ERPs, meridians are mapped as vertically spaced lines with a constant interval, while parallels are mapped as horizontally spaced lines with a constant interval. This projection method establishes a straightforward mapping relationship, but it is neither equal-area nor conformal, introducing significant distortion, particularly in the polar regions. To make the denoiser pay more attention to low-latitude regions with less distortion, which is more crucial for human visual perception, we introduce a latitude-aware loss:", + "bbox": [ + 75, + 465, + 468, + 617 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathbb {E} _ {\\mathcal {E} \\left(\\mathbf {x} _ {0} ^ {1: N}\\right), \\mathbf {y}, \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\left| \\mathbf {W} \\odot (\\epsilon - \\hat {\\epsilon} _ {\\theta}) \\right| | _ {2} ^ {2} ], \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 117, + 625, + 468, + 645 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\hat{\\epsilon}_{\\theta} = \\epsilon_{\\theta}(\\mathbf{z}_t^{1:N}, t, \\boldsymbol{\\tau}_{\\theta}(\\mathbf{y}), \\mathbf{f}_{360})$ , and $\\mathbf{W}$ is a weight matrix used to perform element-wise product, defined as:", + "bbox": [ + 76, + 652, + 468, + 685 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {W} _ {i, j} = \\cos \\left(\\frac {2 i - H / 8 + 1}{H / 4} \\pi\\right), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 693, + 468, + 724 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $i \\in [0, H/8)$ , $j \\in [0, W/8)$ , $H/8$ and $W/8$ is the height and width of latent code $\\mathbf{z}_t^{1:N}$ . The visualized result of $\\mathbf{W}$ is shown in Fig. 5, where pixels in low and middle latitudes are given more weight during training.", + "bbox": [ + 76, + 733, + 468, + 795 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Latent Rotation Mechanism. Because ERPs can be considered as the unfolding of a spherical surface along a meridian, they are meant to be wraparound consistent, implying that their left and right sides are continuous. However, during the process of video generation, the left and right sides are physically separated. Inspired by PanoDiff [45], we employ a latent rotation mechanism to enhance", + "bbox": [ + 75, + 795, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7859becf69cbcc15915e74e7e242374adb457af96d8f73541d153cabbf97abf5.jpg", + "image_caption": [ + "Figure 5. Left: the visualization of weight matrix $\\mathbf{W}$ , brighter colors indicate values closer to 1, while darker colors suggest values closer to 0. Right: a schematic diagram of the latent rotation mechanism. In each iteration, the far left portion of angle $\\theta$ is shifted to the far right." + ], + "image_footnote": [], + "bbox": [ + 506, + 90, + 883, + 176 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "the macroscopic coherence between the left and right ends of the video. During the inference process, we perform a horizontal rotation at an angle of $\\theta$ on $\\mathbf{z}_t^{1:N}$ and motion condition $\\mathbf{c}$ , at each denoising step. As illustrated in Fig. 5, the content on the far left is shifted to the far right, where we use $\\mathbf{x}_0^1$ to replace $\\mathbf{z}_t^{1:N}$ for a better visual effect of its continuity. During the training process, we also randomly rotate the training videos along with the motion condition by a random angle as a data augmentation strategy.", + "bbox": [ + 496, + 289, + 890, + 425 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Circular Padding Mechanism. Although the previous latent rotation mechanism achieves semantic continuity at a macroscopic level, achieving pixel-level continuity is challenging. Therefore, in the inference process, we adopt a mechanism of circular padding by modifying the padding method of the convolution layers. We observe that the early stages of $360^{\\circ}$ video generation often involve layout modeling, while the later stages focus on detail completion. To maintain the stable video generation quality of 360DVD, we only implement the circular padding mechanism in the late $\\left\\lfloor \\frac{T}{2} \\right\\rfloor$ steps of a total of $T$ denoising steps.", + "bbox": [ + 496, + 428, + 892, + 595 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiment", + "text_level": 1, + "bbox": [ + 500, + 617, + 624, + 633 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Implementation Details", + "text_level": 1, + "bbox": [ + 500, + 643, + 717, + 660 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training Settings. We choose Stable Diffusion v1.5 and Motion Module v14 as our base model. We utilize the panoramic optical flow estimator PanoFlow [45] to generate motion conditions. We train the 360-Adapter using the proposed WEB360 dataset. The resolution is set to $512 \\times 1024$ , the length of frames to 16, the batch size to 1, the learning rate to $1 \\times 10^{-5}$ , and the total number of training steps to $100k$ , probability $P = 0.2$ . We use a linear beta schedule as animateDiff, where $\\beta_{start} = 0.00085$ and $\\beta_{end} = 0.012$ .", + "bbox": [ + 496, + 670, + 890, + 806 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Inference Settings. We use DDIM with 25 sampling steps, and the scale for text guidance is 7.5, the angle $\\theta = \\pi /2$ . We collect several personalized Stable Diffusion models from CivitAI to verify the effectiveness and generalizability of our method, including Realistic Vision, Lyriel, ToonYou, and RCNZ Cartoon.", + "bbox": [ + 496, + 809, + 890, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6918", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5875d91e0c3dc9a487f99f4eda4b1e1f59e7fab5215c1ea864dd2c37a2223e06.jpg", + "image_caption": [ + "Figure 6. Qualitative comparisons with baseline methods. 360DVD successfully produces stable and high-quality panorama video over various prompts while other methods are failed." + ], + "image_footnote": [], + "bbox": [ + 86, + 92, + 880, + 412 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b2302c66f49956ac65b576c168d7edb4feeeddd6f588df6d2a6834ee2e55a578.jpg", + "image_caption": [ + "Figure 7. Qualitative comparisons of optical flow. 360DVD generates panorama videos with reasonable motion patterns consistent with the conditioned optical flow." + ], + "image_footnote": [], + "bbox": [ + 84, + 470, + 888, + 631 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Qualitative Results", + "text_level": 1, + "bbox": [ + 76, + 696, + 261, + 713 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Due to space limitations, we only display several frames of each video. We strongly recommend readers refer to our project page for more results and better visual quality.", + "bbox": [ + 75, + 727, + 470, + 773 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Prompt-guided Panorama Video Generation. We present several prompt-guided $360^{\\circ}$ panorama video generation results across different personalized models in Fig. 1. The figure shows that our method successfully turns personalized T2I models into panorama video generators. Our method can produce impressive generation results ranging from real to cartoon styles, from natural landscapes to cultural scenery. This success is attributed to the fact that our", + "bbox": [ + 75, + 780, + 472, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "method preserves the image generation priors and temporal modeling priors learned by SD andAnimateDiff on large-scale datasets.", + "bbox": [ + 498, + 696, + 890, + 742 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Motion-guided Panorama Video Generation. We showcase panoramic video generation results guided by three typical optical flow maps, as shown in Fig. 7. The optical flow maps in the first row indicate the primary motion areas in the Arctic, where we can observe significant movement of clouds in the sky. The optical flow maps in the second row and third row indicate motion areas primarily in the Antarctic, where we can see the movement of trees and hot air balloons near the Antarctic.", + "bbox": [ + 496, + 763, + 893, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "6919", + "bbox": [ + 482, + 944, + 514, + 957 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/de39fcbd411b9fb9c7acc6ef381bafe4af99c7432c50b2f376805cde7aa31cd5.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
IndexMethodsVideo CriteriaPanorama Criteria
Graphics QualityFrame ConsistencyEnd ContinuityContent DistributionMotion Pattern
AAnimateDiff11.3%15.3%5.3%4.8%4.4%
BA+LoRA14.1%10.5%6.0%12.1%6.5%
CB+360ET23.0%9.7%16.9%16.1%14.5%
DOurs51.6%64.5%71.8%67.0%74.6%
", + "bbox": [ + 81, + 88, + 890, + 202 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 1. User preference studies. More raters prefer videos generated by our 360DVD, especially over panorama criteria including if generated videos have left-to-right continuity, the panorama content distribution, and the panorama motion pattern.", + "bbox": [ + 75, + 210, + 892, + 241 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. Comparison", + "text_level": 1, + "bbox": [ + 76, + 266, + 209, + 282 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We compare our results with nativeAnimateDiff,AnimateDiff with a LoRA for panorama image generation from CivitAI named LatentLabs360,AnimateDiff with panoramic LoRA, and our proposed 360 Enhancement Techniques (loss excepted). We can observe that the results generated by the nativeAnimateDiff have a very narrow field of view, which does not align with the content distribution of panoramic videos. WhenAnimateDiff is augmented with panoramic LoRA, it produces videos with a broader field of view; however, the two ends of videos lack continuity, and object movements are highly random. Our proposed 360ET method significantly enhances the continuity between two ends of the videos but fails to address issues such as non-compliance with panoramic motion patterns and poor cross-frame consistency. Notably, our 360DVD can generate videos that best adhere to the content distribution and motion patterns of panoramic videos. We are pleased to discover that, thanks to the high-quality training data provided by WEB360, the videos generated by 360DVD exhibit more realistic colors and nuanced lighting, providing an immersive experience.", + "bbox": [ + 75, + 289, + 472, + 608 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 617, + 232, + 633 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We primarily conducted ablation studies on the proposed 360 Text Fusion strategy, the pseudo-3D layer in the 360-Adapter, and the latitude-aware loss, as illustrated in Fig. 8. Given the prompt \"a car driving down a street next to a forest\", the first row without 360TF can not generate the car because of low-quality captions in the training process. The second row without pseudo-3D layer can generate a car, but due to the lack of temporal modeling, the results exhibit flickering. The third row without latitude-aware loss can produce relatively good results, but it still falls slightly short in terms of clarity, field of view, and other aspects compared to the last row with the complete 360DVD.", + "bbox": [ + 75, + 640, + 472, + 823 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. User Study", + "text_level": 1, + "bbox": [ + 76, + 830, + 199, + 848 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "31 participants were surveyed to evaluate the graphics quality, cross-frame consistency, left-right continuity, content distribution, and motion patterns of 8 sets of generated", + "bbox": [ + 75, + 854, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a24ad557b7827453850145863261f8d88df87d01a8dca7dd403fb8f9d7a38cbe.jpg", + "image_caption": [ + "Figure 8. Ablation studies on 360 Text Fusion (360TF), pseudo-3D layer in 360-Adapter (Pseudo-3D), and latitude-aware loss (Lat. Loss)." + ], + "image_footnote": [], + "bbox": [ + 506, + 268, + 883, + 473 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "results. For each criterion, they selected the video they deemed most fitting for the theme of high-quality 360-degree panoramic videos. The data presented in Table 1 indicates that our model outperforms the other three methods significantly across all five dimensions. Simultaneously, our proposed 360ET can remarkably improve video quality, and left-right continuity, solely based on the nativeAnimateDiff and panoramic LoRA.", + "bbox": [ + 496, + 546, + 893, + 667 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 681, + 619, + 696 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we introduce 360DVD, a pipeline for controllable $360^{\\circ}$ panorama video generation. Our framework leverages text prompts and motion guidance to animate personalized T2I models. Utilizing the proposed WEB360 dataset, 360-Adapter, and 360 Enhancement Techniques, our framework can generate videos that adhere to the content distribution and motion patterns in real captured panoramic videos. Extensive experiments demonstrate our effectiveness in creating high-quality panorama videos with various prompts and styles. We believe that our framework provides a simple but effective solution for panoramic video generation, and leads to inspiration for possible future works.", + "bbox": [ + 496, + 705, + 893, + 888 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "6920", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Hao Ai, Zidong Cao, Jinjing Zhu, Haotian Bai, Yucheng Chen, and Lin Wang. Deep learning for omnidirectional vision: A survey and new perspectives. arXiv preprint arXiv:2205.10468, 2022. 1", + "[2] Naofumi Akimoto, Yuhi Matsuo, and Yoshimitsu Aoki. *Diverse plausible 360-degree image outpainting for efficient 3dgc background creation.* In *Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition*, pages 11441–11450, 2022. 3", + "[3] Jie An, Songyang Zhang, Harry Yang, Sonal Gupta, Jia-Bin Huang, Jiebo Luo, and Xi Yin. Latent-shift: Latent diffusion with temporal shift for efficient text-to-video generation. arXiv preprint arXiv:2304.08477, 2023. 1, 3", + "[4] Mingdeng Cao, Chong Mou, Fanghua Yu, Xintao Wang, Yinqiang Zheng, Jian Zhang, Chao Dong, Gen Li, Ying Shan, Radu Timofte, Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Xuhan Sheng, Bin Chen, Haoyu Ma, Ming Cheng, Shijie Zhao, Wanwan Cui, Tianyu Xu, Chunyang Li, Long Bao, Heng Sun, Huaibo Huang, Xiaoqiang Zhou, Yang Ai, Ran He, Renlong Wu, Yi Yang, Zhilu Zhang, Shuo-hao Zhang, Junyi Li, Yunjin Chen, Dongwei Ren, Wang-meng Zuo, Qian Wang, Hao-Hsiang Yang, Yi-Chung Chen, Zhi-Kai Huang, Wei-Ting Chen, Yuan-Chun Chiang, Hua-En Chang, I-Hsiang Chen, Chia-Hsuan Hsieh, Sy-Yen Kuo, Zebin Zhang, Jiaqi Zhang, Yuhui Wang, Shuhao Cui, Junshi Huang, Li Zhu, Shuman Tian, Wei Yu, and Bingchun Luo. Ntire 2023 challenge on 360deg omnidirectional image and video super-resolution: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 1731-1745, 2023. 3, 4", + "[5] Ricky TQ Chen, Jens Behrmann, David K Duvenaud, and Jorn-Henrik Jacobsen. Residual flows for invertible generative modeling. Advances in Neural Information Processing Systems, 32, 2019. 2", + "[6] Tsai-Shien Chen, Chieh Hubert Lin, Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang. Motion-conditioned diffusion model for controllable video synthesis. arXiv preprint arXiv:2304.14404, 2023. 3", + "[7] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG), 41(6):1-16, 2022. 3", + "[8] Ming Cheng, Haoyu Ma, Qiufang Ma, Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Xuhan Sheng, Shijie Zhao, Junlin Li, and Li Zhang. Hybrid transformer and cnn attention network for stereo image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1702-1711, 2023. 5", + "[9] Xinhua Cheng, Nan Zhang, Jiwen Yu, Yinhuai Wang, Ge Li, and Jian Zhang. Null-space diffusion sampling for zero-shot point cloud completion. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence (IJ-CAI), 2023. 2", + "[10] Yen-Chi Cheng, Chieh Hubert Lin, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, and Ming-Hsuan Yang. Inout: Diverse image outpainting via gan inversion. In Proceedings of" + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11431-11440, 2022. 3", + "[11] Antonia Creswell, Tom White, Vincent Dumoulin, Kai Arulkumaran, Biswa Sengupta, and Anil A Bharath. Generative adversarial networks: An overview. IEEE signal processing magazine, 35(1):53-65, 2018. 2", + "[12] Mohammad Reza Karimi Dastjerdi, Yannick Hold-Geoffroy, Jonathan Eisenmann, Siavash Khodadadeh, and Jean-François Lalonde. Guided co-modulated gan for $360^{\\circ}$ field of view extrapolation. In 2022 International Conference on 3D Vision (3DV), pages 475–485. IEEE, 2022. 3", + "[13] Patrick Esser, Johnathan Chiu, Parmida Atighechian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7346-7356, 2023. 3", + "[14] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022.3", + "[15] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 3", + "[16] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 1, 3", + "[17] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2, 3", + "[18] Yaosi Hu, Zhenzhong Chen, and Chong Luo. Lamd: Latent motion diffusion for video generation. arXiv preprint arXiv:2304.11603, 2023. 3", + "[19] Xuhui Jia, Yang Zhao, Kelvin CK Chan, Yandong Li, Han Zhang, Boqing Gong, Tingbo Hou, Huisheng Wang, and Yu-Chuan Su. Taming encoder for zero fine-tuning image customization with text-to-image diffusion models. arXiv preprint arXiv:2304.02642, 2023. 3", + "[20] Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 2", + "[21] Jialu Li and Mohit Bansal. Panogen: Text-conditioned panoramic environment generation for vision-and-language navigation. Advances in Neural Information Processing Systems, 36, 2024. 3", + "[22] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International Conference on Machine Learning, pages 12888-12900. PMLR, 2022. 4", + "[23] Chieh Hubert Lin, Chia-Che Chang, Yu-Sheng Chen, Da-Cheng Juan, Wei Wei, and Hwann-Tzong Chen. Coco-gan: Generation by parts via conditional coordinating. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4512-4521, 2019. 3", + "[24] Chieh Hubert Lin, Hsin-Ying Lee, Yen-Chi Cheng, Sergey Tulyakov, and Ming-Hsuan Yang. Infinitygan: To" + ], + "bbox": [ + 501, + 92, + 893, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "6921", + "bbox": [ + 482, + 945, + 513, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "wards infinite-pixel image synthesis. arXiv preprint arXiv:2104.03963, 2021. 3", + "[25] Chong Mou, Xintao Wang, Jiechong Song, Ying Shan, and Jian Zhang. Dragondiffusion: Enabling drag-style manipulation on diffusion models. In The Twelfth International Conference on Learning Representations, 2024. 3", + "[26] Chong Mou, Xintao Wang, Liangbin Xie, Yanze Wu, Jian Zhang, Zhongang Qi, and Ying Shan. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4296–4304, 2024. 1, 3", + "[27] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 1, 3", + "[28] Changgyoon Oh, Wonjune Cho, Yujeong Chae, Daehee Park, Lin Wang, and Kuk-Jin Yoon. Bips: Bi-modal indoor panorama synthesis via residual depth-aided adversarial learning. In European Conference on Computer Vision, pages 352–371. Springer, 2022. 3", + "[29] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing attentions for zero-shot text-based video editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15932-15942, 2023. 3", + "[30] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 4", + "[31] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 1, 3", + "[32] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Bjorn Ommer. High-resolution image synthesis with latent diffusion models. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 3", + "[33] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional Networks for Biomedical Image Segmentation, page 234–241. 2015. 4", + "[34] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. 3", + "[35] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 1, 3", + "[36] Wenzhe Shi, Jose Caballero, Ferenc Huszar, Johannes Totz, Andrew P. Aitken, Rob Bishop, Daniel Rueckert, and Zehan" + ], + "bbox": [ + 78, + 92, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 5", + "[37] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022. 1, 3", + "[38] Kihyuk Sohn, Honglak Lee, and Xinchen Yan. Learning structured output representation using deep conditional generative models. Advances in neural information processing systems, 28, 2015. 2", + "[39] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502, 2020. 2, 5", + "[40] Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Qiufang Ma, Xuhan Sheng, Ming Cheng, Haoyu Ma, Shijie Zhao, Jian Zhang, Junlin Li, et al. Opdn: Omnidirectional position-aware deformable network for omnidirectional image superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1293-1301, 2023. 3, 5", + "[41] Piotr Teterwak, Aaron Sarna, Dilip Krishnan, Aaron Maschinot, David Belanger, Ce Liu, and William T Freeman. Boundless: Generative adversarial networks for image extension. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10521-10530, 2019. 3", + "[42] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 3", + "[43] Guangcong Wang, Yinuo Yang, Chen Change Loy, and Zwei Liu. Stylelight: HDR panorama generation for lighting estimation and editing. In European Conference on Computer Vision, pages 477-492. Springer, 2022. 3", + "[44] Hai Wang, Xiaoyu Xiang, Yuchen Fan, and Jing-Hao Xue. Customizing 360-degree panoramas through text-to-image diffusion models. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 4933-4943, 2024. 3", + "[45] Jionghao Wang, Ziyu Chen, Jun Ling, Rong Xie, and Li Song. 360-degree panorama generation from few unregistered nfov images. arXiv preprint arXiv:2308.14686, 2023. 3, 6", + "[46] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 1, 3", + "[47] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems, 36, 2024. 3", + "[48] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "6922", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "of image diffusion models for text-to-video generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7623-7633, 2023. 3", + "[49] Ruiqi Wu, Liangyu Chen, Tong Yang, Chunle Guo, Chongyi Li, and Xiangyu Zhang. Lamp: Learn a motion pattern for few-shot-based video generation. arXiv preprint arXiv:2310.10769, 2023. 3", + "[50] Songsong Wu, Hao Tang, Xiao-Yuan Jing, Haifeng Zhao, Jianjun Qian, Nicu Sebe, and Yan Yan. Cross-view panorama image synthesis. IEEE Transactions on Multimedia, 2022. 3", + "[51] Tianhao Wu, Chuanxia Zheng, and Tat-Jen Cham. IPO-ldm: Depth-aided 360-degree indoor rgb panorama outpainting via latent diffusion model. arXiv preprint arXiv:2307.03177, 2023. 3", + "[52] Jinbo Xing, Menghan Xia, Yuxin Liu, Yuechen Zhang, Y He, H Liu, H Chen, X Cun, X Wang, Y Shan, et al. Makeyour-video: Customized video generation using textual and structural guidance. IEEE Transactions on Visualization and Computer Graphics, 2024. 1", + "[53] Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, and Fang Wen. Paint by example: Exemplar-based image editing with diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18381-18391, 2023. 3", + "[54] Shuai Yang, Yifan Zhou, Ziwei Liu, and Chen Change Loy. Rerender a video: Zero-shot text-guided video-to-video translation. In SIGGRAPH Asia 2023 Conference Papers, pages 1-11, 2023. 3", + "[55] Jiwen Yu, Yinhuai Wang, Chen Zhao, Bernard Ghanem, and Jian Zhang. Freedom: Training-free energy-guided conditional diffusion model. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23174-23184, 2023. 3", + "[56] Jiwen Yu, Xuanyu Zhang, Youmin Xu, and Jian Zhang. CRoSS: Diffusion model makes controllable, robust and secure image steganography. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 3", + "[57] Han Zhang, Tao Xu, Hongsheng Li, Shaoting Zhang, Xiaogang Wang, Xiaolei Huang, and Dimitris N Metaxas. Stackgan: Text to photo-realistic image synthesis with stacked generative adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 5907-5915, 2017. 2", + "[58] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 3", + "[59] Qinsheng Zhang, Jiaming Song, Xun Huang, Yongxin Chen, and Ming-Yu Liu. Diffcollage: Parallel generation of large content with diffusion models. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10188-10198. IEEE, 2023. 3", + "[60] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 1, 3" + ], + "bbox": [ + 78, + 90, + 468, + 896 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "6923", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_model.json b/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..0150b74139e44bf9d39d3b40bc84a42c31006081 --- /dev/null +++ b/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_model.json @@ -0,0 +1,2125 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.131, + 0.773, + 0.175 + ], + "angle": 0, + "content": "360DVD: Controllable Panorama Video Generation with 360-Degree Video Diffusion Model" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.203, + 0.844, + 0.275 + ], + "angle": 0, + "content": "Qian Wang\\(^{1,2}\\), Weiqi Li\\(^{1}\\), Chong Mou\\(^{1,2}\\), Xinhua Cheng\\(^{1,2}\\), Jian Zhang\\(^{1,2}\\) \n\\(^{1}\\)School of Electronic and Computer Engineering, Peking University \n\\(^{2}\\)Peking University Shenzhen Graduate School-Rabbitpre AIGC Joint Research Laboratory \n{qianwang, liweiqi, eechongm, chengxinhua}@stu.pku.edu.cn, zhangjian.sz@pku.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.309, + 0.314, + 0.327 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.342, + 0.473, + 0.691 + ], + "angle": 0, + "content": "Panorama video recently attracts more interest in both study and application, courtesy of its immersive experience. Due to the expensive cost of capturing \\(360^{\\circ}\\) panoramic videos, generating desirable panorama videos by prompts is urgently required. Lately, the emerging text-to-video (T2V) diffusion methods demonstrate notable effectiveness in standard video generation. However, due to the significant gap in content and motion patterns between panoramic and standard videos, these methods encounter challenges in yielding satisfactory \\(360^{\\circ}\\) panoramic videos. In this paper, we propose a pipeline named 360-Degree Video Diffusion model (360DVD) for generating \\(360^{\\circ}\\) panoramic videos based on the given prompts and motion conditions. Specifically, we introduce a lightweight 360-Adapter accompanied by 360 Enhancement Techniques to transform pre-trained T2V models for panorama video generation. We further propose a new panorama dataset named WEB360 consisting of panoramic video-text pairs for training 360DVD, addressing the absence of captioned panoramic video datasets. Extensive experiments demonstrate the superiority and effectiveness of 360DVD for panorama video generation. Our project page is at https://akaneqwq.github.io/360DVD/." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.718, + 0.21, + 0.735 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.744, + 0.47, + 0.866 + ], + "angle": 0, + "content": "With the recent advancements in VR technology, 360-degree panoramic videos have been gaining increasing popularity. This video format which offers audiences an immersive experience, is helpful for various applications, including entertainment, education, and communication. To capture details of the entire scene, \\(360^{\\circ}\\) videos are typically recorded using an array of high-resolution fisheye cameras that yields a \\(360^{\\circ} \\times 180^{\\circ}\\) field-of-view (FoV) [1], which" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.875, + 0.472, + 0.903 + ], + "angle": 0, + "content": "This work was supported by National Natural Science Foundation of China under Grant 62372016. (Corresponding author: Jian Zhang)" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.311, + 0.895, + 0.37 + ], + "angle": 0, + "content": "is quite costly in both time and resources. Therefore, the generation of \\(360^{\\circ}\\) panoramic videos is urgently required for border applications, while panoramic video generation receives little attention in studies to date." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.372, + 0.895, + 0.583 + ], + "angle": 0, + "content": "Thanks to the emerging theory and training strategies, text-to-image (T2I) diffusion models [26, 27, 31, 32, 35] demonstrate remarkable image generation capacity from prompts given by users, and such impressive achievement in image generation is further extended to text-to-video (T2V) generation. Various T2V diffusion models [3, 16, 37, 46, 52, 60] are recently proposed with adopting space-time separable architectures, wherein spatial operations are inherited from the pre-trained T2I models to reduce the complexity of constructing space-time models from scratch. Among these,AnimateDiff [16] enables the capability to generate animated images for various personalized T2I models, which alleviates the requirement for model-specific tuning and achieves compelling content consistency over time." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.584, + 0.896, + 0.885 + ], + "angle": 0, + "content": "Although T2V methods on standard videos are widely studied, there is no method proposed for panorama video generation. One potential approach is to leverage existing powerful T2V models, e.g.,AnimateDiff to directly generate the equirectangular projection (ERP) of panoramic videos. Since ERP is a commonly adopted format for storing and transmitting panoramic videos, each frame is treated by ERP as a rectangular image with an aspect ratio of 1:2, which aligns well with the output format of existing standard T2V models. However, due to the significant differences between panoramic videos and standard videos, existing methods suffer challenges in directly producing satisfactory \\(360^{\\circ}\\) panoramic videos. Concretely, the main challenges include three aspects: (1) The content distribution of ERPs differs from standard videos. ERPs require a wider FoV, reaching \\(360^{\\circ} \\times 180^{\\circ}\\). (2) The motion patterns of ERPs are different from standard videos, with movements often following curves rather than straight lines. (3) The left and right ends of ERPs should exhibit continuity since they correspond to the same meridian on the Earth." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.886, + 0.895, + 0.902 + ], + "angle": 0, + "content": "Therefore, we propose a specifically designed method" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.958 + ], + "angle": 0, + "content": "6913" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.089, + 0.482, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.489, + 0.09, + 0.877, + 0.435 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.446, + 0.893, + 0.476 + ], + "angle": 0, + "content": "Figure 1. Main results. Our 360DVD creates text-aligned, coherent, and high-quality \\(360^{\\circ}\\) panorama videos. Furthermore, 360DVD can cooperate with multiple personalized text-to-image models and consistently generate stylized panorama videos." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.501, + 0.473, + 0.759 + ], + "angle": 0, + "content": "named 360-Degree Video Diffusion (360DVD) for generating panorama videos. We first introduce a plug-and-play module named 360-Adapter to address challenge mentioned above. Our 360-Adapter receives zero values or motion conditions (e.g., optical flow) as input and outputs motion features, which are fed into the frozen denoising U-Net at different levels of the encoder. This transformation is aimed at converting the T2V model into a panoramic video generation without altering the foundational generative capabilities. In addition, we introduce 360 Enhancement Techniques including two mechanisms to enhance continuity at both ends of ERPs from both macro and micro perspectives, and a latitude-aware loss function for encouraging the model to focus more on low-latitude regions. Cooperated with carefully designed techniques, our 360DVD generates text-aligned, coherent, high-quality, \\(360^{\\circ}\\) panorama videos with various styles, as shown in Fig. 1." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.473, + 0.901 + ], + "angle": 0, + "content": "Furthermore, we collect a panorama dataset named WEB360 including ERP-formatted videos from the internet and games for training our method. WEB360 involves approximately 2,000 video clips with each clip consisting of 100 frames. Considering the domain gap between panoramic and standard images, to enhance the accuracy and granularity of captions, we introduce a GPT-based 360 Text Fusion module for obtaining detailed captions. Our contributions can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.501, + 0.892, + 0.576 + ], + "angle": 0, + "content": "- We introduce a controllable \\(360^{\\circ}\\) panorama video generation diffusion model named 360DVD, achieved by adopting a controllable standard T2V model with a trainable lightweight 360-Adapter. Our model can generate text-guided panorama videos conditioned on desired motions." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.577, + 0.892, + 0.621 + ], + "angle": 0, + "content": "- We design 360 Enhancement Techniques including a latitude-aware loss and two mechanisms to enhance the content and motion quality of generated panorama videos." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.622, + 0.892, + 0.681 + ], + "angle": 0, + "content": "- We propose a new high-quality dataset named WEB360 comprising approximately 2,000 panoramic videos, with each video accompanied by a detailed caption enhanced through 360 Text Fusion." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.682, + 0.892, + 0.727 + ], + "angle": 0, + "content": "- Experiments demonstrate that our 360DVD is capable of generating high-quality, high-diversity, and more consistent \\(360^{\\circ}\\) panorama videos." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.501, + 0.892, + 0.727 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.745, + 0.651, + 0.761 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.771, + 0.774, + 0.787 + ], + "angle": 0, + "content": "2.1. Text-to-Image Diffusion Model" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.892, + 0.903 + ], + "angle": 0, + "content": "The Denoising Diffusion Probabilistic Model [9, 17, 39] has proven to be highly successful in generating high-quality images, outperforming previous approaches such as generative adversarial networks (GANs)[11, 57], variational autoencoders (VAEs)[20, 38], and flow-based methods [5]. With text guidance during training, users can generate images based on textual input. Noteworthy examples include" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6914" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.473, + 0.198 + ], + "angle": 0, + "content": "GLIDE [27], DALLE-2 [31], Imagen [35]. To address the computational burden of the iterative denoising process, LDM [32] conducts the diffusion process on a compressed latent space rather than the original pixel space. This accomplishment has prompted further exploration in extending customization [14, 34], image guidance [53, 55], precise control [25, 26, 58] and protection [56]." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.208, + 0.347, + 0.223 + ], + "angle": 0, + "content": "2.2. Text-to-Video Diffusion Model" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.232, + 0.473, + 0.655 + ], + "angle": 0, + "content": "Despite significant advancements in Text-to-Image (T2I) generation, Text-to-Video (T2V) generation faces challenges, including the absence of large-scale, high-quality paired text-video datasets, the inherent complexity in modeling temporal consistency, and the resource-intensive nature of training. To address these challenges, many works leverage the knowledge from pre-trained T2I models, and they manage training costs by executing the diffusion process in the latent space. Some methods [15, 29, 48, 49, 54] utilize T2I models in zero-shot or few-shot ways. However, these methods often suffer from suboptimal frame consistency due to insufficient training. To address this limitation, another category of T2V diffusion models typically adopts space-time separable architectures. These models [3, 37, 46, 60] inherit spatial operations from pre-trained T2I models, reducing the complexity of constructing space-time models from scratch. Given that most personalized T2I models are derived from the same base one (e.g. Stable Diffusion [32]),AnimateDiff [16] designs a motion modeling module that trained with a base T2I model and could animate most of derived personalized T2I models once for all. There are also efforts focused on enhancing control in T2V models. Gen-1 [13], MCDiff [6], LaMD [18] and VideoComposer [47] introduce diverse conditions to T2V models. Despite these advancements, the aforementioned methods demand extensive training and lack a plug-and-play nature, making it challenging to apply them to a diverse range of personalized T2I models." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.666, + 0.285, + 0.68 + ], + "angle": 0, + "content": "2.3. Panorama Generation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.472, + 0.901 + ], + "angle": 0, + "content": "GAN-based methods for generating panoramic images have been widely studied [2, 4, 7, 10, 12, 23, 24, 28, 40, 41, 43, 50]. For instance, OmniDreamer [2] accepts a single NFoV image as an input condition and introduces a cyclic inference scheme to meet the inherent horizontal cyclicity of 360-degree images. ImmenseGAN [12] fine-tunes the generative model using a large-scale private text-image pair dataset, making the generation more controllable. Text2Light [7] introduces a zero-shot text-guided 360-image synthesis pipeline by utilizing the CLIP model. Very recently, diffusion models have achieved promising results in panoramic image generation. DiffCollage [59] uses semantic maps as conditions and generates images based on complex factor graphs using retrained diffusion mod" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.091, + 0.895, + 0.289 + ], + "angle": 0, + "content": "els. PanoGen [21] employs a latent diffusion model and synthesizes new indoor panoramic images through recursive image drawing techniques based on multiple text descriptions. PanoDiff [45] achieves a multi-NFoV synthesis of panoramic images through a two-stage pose estimation module. IPO-LDM [51] uses a dual-modal diffusion structure of RGB-D to better learn the spatial distribution and patterns of panoramic images. StitchDiffusion [44] employs a T2I diffusion model, ensuring continuity at both ends through stitching. However, to date, panoramic video generation has received limited attention. To the best of our knowledge, we are the first to leverage diffusion models for panoramic video generation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.301, + 0.593, + 0.317 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.327, + 0.894, + 0.432 + ], + "angle": 0, + "content": "In this section, we begin with a concise review of the latent diffusion fusion model andAnimateDiff [16]. Following that, we introduce the construction method of the WEB360 dataset. We then provide an overview of 360DVD and elaborate on the implementation details of 360-Adapter. Finally, we describe the 360 enhancement techniques aimed at enriching the panoramic nature of the video." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.441, + 0.642, + 0.456 + ], + "angle": 0, + "content": "3.1. Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.465, + 0.892, + 0.495 + ], + "angle": 0, + "content": "Latent Diffusion Model. Given an input signal \\(\\mathbf{x}_0\\), a diffusion forward process in DDPM [17] is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.552, + 0.507, + 0.892, + 0.526 + ], + "angle": 0, + "content": "\\[\np _ {\\theta} \\left(\\mathbf {x} _ {t} \\mid \\mathbf {x} _ {t - 1}\\right) = \\mathcal {N} \\left(\\mathbf {x} _ {t}; \\sqrt {1 - \\beta_ {t}} \\mathbf {x} _ {t - 1}, \\beta_ {t} \\mathbf {I}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.535, + 0.894, + 0.643 + ], + "angle": 0, + "content": "for \\(t = 1,\\ldots ,T\\), where \\(T\\) is the total timestep of the diffusion process. A noise depending on the variance \\(\\beta_{t}\\) is gradually added to \\(\\mathbf{x}_{t - 1}\\) to obtain \\(\\mathbf{x}_t\\) at the next timestep and finally reach \\(\\mathbf{x}_T\\in \\mathcal{N}(0,\\mathbf{I})\\). The goal of the diffusion model is to learn to reverse the diffusion process (denoising). Given a random noise \\(\\mathbf{x}_t\\), the model predicts the added noise at the next timestep \\(\\mathbf{x}_{t - 1}\\) until the origin signal \\(\\mathbf{x}_0\\):" + }, + { + "type": "equation", + "bbox": [ + 0.54, + 0.653, + 0.892, + 0.671 + ], + "angle": 0, + "content": "\\[\np _ {\\theta} (\\mathbf {x} _ {t - 1} | \\mathbf {x} _ {t}) = \\mathcal {N} (\\mathbf {x} _ {t - 1}; \\boldsymbol {\\mu} _ {\\theta} (\\mathbf {x} _ {t}, t), \\boldsymbol {\\Sigma} _ {\\theta} (\\mathbf {x} _ {t}, t)), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.68, + 0.895, + 0.756 + ], + "angle": 0, + "content": "for \\(t = T,\\ldots ,1\\). We fix the variance \\(\\Sigma_{\\theta}(\\mathbf{x}_t,t)\\) and utilize the diffusion model with parameter \\(\\theta\\) to predict the mean of the inverse process \\(\\pmb{\\mu}_{\\theta}(\\mathbf{x}_t,t)\\). The model can be simplified as denoising models \\(\\epsilon_{\\theta}(\\mathbf{x}_t,t)\\), which are trained to predict the noise of \\(\\mathbf{x}_t\\) with a noise prediction loss:" + }, + { + "type": "equation", + "bbox": [ + 0.539, + 0.766, + 0.892, + 0.786 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathbb {E} _ {\\mathbf {x} _ {0}, \\mathbf {y}, \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\epsilon - \\epsilon_ {\\theta} (\\mathbf {x} _ {t}, t, \\boldsymbol {\\tau} _ {\\theta} (\\mathbf {y})) \\| _ {2} ^ {2} ], \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.892, + 0.84 + ], + "angle": 0, + "content": "where \\(\\epsilon\\) is the added noise to the input image \\(\\mathbf{x}_0\\), \\(\\mathbf{y}\\) is the corresponding textual description, \\(\\tau_{\\theta}(\\cdot)\\) is a text encoder mapping the string to a sequence of vectors." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.84, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Latent Diffusion Model (LDM) [32] executes the denoising process in the latent space of an autoencoder, namely \\(\\mathcal{E}(\\cdot)\\) and \\(\\mathcal{D}(\\cdot)\\), implemented as VQ-GAN [19] or VQ-VAE [42] pre-trained on large image datasets. During the" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6915" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "training of the latent diffusion networks, an input image \\(\\mathbf{x}_0\\) is initially mapped to the latent space by the frozen encoder, yielding \\(\\mathbf{z}_0 = \\mathcal{E}(\\mathbf{x}_0)\\). Thus, the training objective can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.099, + 0.162, + 0.47, + 0.183 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathbb {E} _ {\\mathcal {E} (\\mathbf {x} _ {0}), \\mathbf {y}, \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\boldsymbol {\\epsilon} - \\boldsymbol {\\epsilon} _ {\\theta} (\\mathbf {z} _ {t}, t, \\boldsymbol {\\tau} _ {\\theta} (\\mathbf {y})) \\| _ {2} ^ {2} ]. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.192, + 0.469, + 0.312 + ], + "angle": 0, + "content": "In widely-used LDM Stable Diffusion (SD), which our method is based on, \\(\\epsilon_{\\theta}(\\cdot)\\) is implemented with a modified UNet [33] that incorporates four downsample/upsample blocks and one middle block, resulting in four resolution levels within the networks' latent space. Each resolution level integrates 2D convolution layers as well as self- and cross-attention mechanisms. Text model \\(\\tau_{\\theta}(\\cdot)\\) is implemented using the CLIP [30] ViT-L/14 text encoder." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.313, + 0.47, + 0.493 + ], + "angle": 0, + "content": "AnimateDiff.AnimateDiff inflates base SD by adding temporal-aware structures and learning reasonable motion priors from large-scale video datasets. Since the original SD can only process 4D image data batches, while T2V task takes a 5D video tensor as input. It transforms each 2D convolution and attention layer in the original image model into spatial-only pseudo-3D layers. The motion module is inserted at every resolution level of the U-shaped diffusion network, using vanilla temporal transformers consisting of several self-attention blocks operating along the temporal axis. The training objective ofAnimateDiff can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.504, + 0.47, + 0.526 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathbb {E} _ {\\mathcal {E} \\left(\\mathbf {x} _ {0} ^ {1: N}\\right), \\mathbf {y}, \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\boldsymbol {\\epsilon} - \\boldsymbol {\\epsilon} _ {\\theta} \\left(\\mathbf {z} _ {t} ^ {1: N}, t, \\boldsymbol {\\tau} _ {\\theta} (\\mathbf {y})\\right) \\| _ {2} ^ {2} ], \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.535, + 0.469, + 0.628 + ], + "angle": 0, + "content": "where \\(\\mathbf{x}_0^{1:N}\\) is the sampled video data, \\(\\mathbf{z}_0^{1:N}\\) is the latent code which \\(\\mathbf{x}_0^{1:N}\\) are encoded into via the pre-trained autoencoder, \\(\\mathbf{z}_t^{1:N}\\) is the latent code obtained by perturbing the initial latent code \\(\\mathbf{z}_0^{1:N}\\) with noise at timestep \\(t\\). During training, the pre-trained weights of the base T2I model are frozen to keep its feature space unchanged." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.636, + 0.245, + 0.65 + ], + "angle": 0, + "content": "3.2. WEB360 Dataset" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.469, + 0.75 + ], + "angle": 0, + "content": "Diverse text-video pairs datasets are essential for training open-domain text-to-video generation models. However, existing \\(360^{\\circ}\\) panorama video datasets lack corresponding textual annotations. Moreover, these datasets are often constrained either in scale or quality, thereby impeding the upper limit of high-quality video generation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.47, + 0.902 + ], + "angle": 0, + "content": "To address the aforementioned challenges and achieve high-quality 360 panorama video generation, we introduce a novel text-video dataset named WEB360. This dataset comprises 2114 text-video pairs sourced from open-domain content, presented in high-definition (720p) ERP format. Our dataset creation process involved extracting 210 high-resolution panoramic video clips from the ODV360 [4] training set. Additionally, we collected over 400 original videos from YouTube. Due to the complex scene transitions present in the original videos, which pose challenges" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.09, + 0.892, + 0.235 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.247, + 0.894, + 0.304 + ], + "angle": 0, + "content": "Figure 2. 360 Text Fusion. The captions of four images with a FoV of 90 are fed into ChatGPT to generate a new \\(360^{\\circ}\\) summarization. Compared to the caption of ERP at the bottom right, 360 Text Fusion allows for more fine-grained captions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.332, + 0.892, + 0.452 + ], + "angle": 0, + "content": "for models in learning temporal correlations, we perform a manual screening process to split the original videos into 1904 single-scene video clips. We employ BLIP [22] to annotate the first frame of the 2104 video clips. However, we observed that direct application of BLIP to ERP images often resulted in bad captions. Therefore, we propose a panoramic image caption method named 360 Text Fusion, based on ChatGPT." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.454, + 0.893, + 0.727 + ], + "angle": 0, + "content": "360 Text Fusion. We find that directly using BLIP [22] to label ERP has drawbacks. On one hand, errors may arise due to the distortion caused by the polarities, leading to misidentifications such as labeling \"person\" as \"dog\". On the other hand, the captions generated by BLIP lack granularity, making them insufficient for providing a detailed description of the current scene. Thus, we propose 360 Text Fusion (360TF) method, as shown in Fig. 2. To deal with the irregular distortion of ERP, we turn to less-distorted perspective images. We first project the original ERP image to four non-overlapping perspective images at 0 degrees longitude, with a FoV of 90. The four images are then fed into BLIP to be captioned. By pre-informing ChatGPT about the task and providing examples, these four captions are collectively input to ChatGPT, which then generates a summary of the scene as our final caption. In comparison to directly using BLIP to label the entire image, our 360TF demonstrates a significant advantage in granularity." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.739, + 0.797, + 0.755 + ], + "angle": 0, + "content": "3.3. 360-degree Video Diffusion Model" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.763, + 0.892, + 0.884 + ], + "angle": 0, + "content": "An overview of the 360-degree Video Diffusion Model (360 DVD) is presented in Fig. 3, which is composed of a pretrained denoising U-Net and 360-Adapter. The pre-trained denoising U-Net adopts a structure identical to that ofAnimateDiff. In every resolution level of the U-Net, the spatial layer unfolds pre-trained weights from SD, while the temporal layer incorporates the motion module ofAnimateDiff trained on a large-scale text-video dataset." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.885, + 0.89, + 0.903 + ], + "angle": 0, + "content": "During the training process, we first sample a video \\(\\mathbf{x}_0^{1:N}\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6916" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.087, + 0.09, + 0.895, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.284, + 0.895, + 0.329 + ], + "angle": 0, + "content": "Figure 3. Overview of 360DVD. 360DVD leverages a trainable 360-Adapter to extend standard T2V models to the panorama domain and is able to generate high-quality panorama videos with given prompts and optional motion conditions. In addition, 360 Enhancement Techniques are proposed for quality improvement in the panorama perspective." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.353, + 0.473, + 0.566 + ], + "angle": 0, + "content": "from the dataset. The video is encoded into latent code \\(\\mathbf{z}_0^{1:N}\\) through pre-trained VAE encoder \\(\\mathcal{E}(\\cdot)\\) and noised to \\(\\mathbf{z}_t^{1:N}\\). Simultaneously, the corresponding text \\(\\mathbf{y}\\) for the video is encoded using the text encoder \\(\\pmb{\\tau}_{\\theta}(\\cdot)\\) of the CLIP. The video is also input into a motion estimation network to generate corresponding motion conditions \\(\\mathbf{c}\\), which are then fed into the 360-Adapter \\(\\mathcal{F}_{360}(\\cdot)\\). Finally, noised latent code \\(\\mathbf{z}_t^{1:N}\\), timestep \\(t\\), text embedding \\(\\pmb{\\tau}_{\\theta}(\\mathbf{y})\\), and the feature maps \\(\\mathbf{f}_{360}\\) generated by 360-Adapter are collectively input into the U-Net \\(\\epsilon(\\cdot)\\) to predict the noise strength added to the latent code. As we aim to preserve the priors learned by SD andAnimateDiff on large datasets, we freeze their weights during the training process. If we use a simple L2 loss term, the training objective is given as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.569, + 0.47, + 0.598 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathbb {E} _ {\\mathcal {E} (\\mathbf {x} _ {0} ^ {1: N}), \\mathbf {y}, \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\boldsymbol {\\epsilon} - \\boldsymbol {\\epsilon} _ {\\theta} (\\mathbf {z} _ {t} ^ {1: N}, t, \\boldsymbol {\\tau} _ {\\theta} (\\mathbf {y}), \\mathbf {f} _ {3 6 0}) \\| _ {2} ^ {2} ]. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.6, + 0.47, + 0.704 + ], + "angle": 0, + "content": "To ensure satisfactory generation of \\(360^{\\circ}\\) panoramic videos without motion control input, we set the input of the 360-Adapter to zero with a probability \\(P\\) during training. This strategy aims to encourage the model to learn representations that are not solely reliant on motion conditions, enhancing its ability to generate compelling panoramic videos without explicit motion guidance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.471, + 0.884 + ], + "angle": 0, + "content": "In inference, users have the option to selectively provide text prompts and motion guidance to carry out denoising over a total of \\( T \\) steps. Here, we employ DDIM [39] to accelerate the sampling process. The estimated latent code \\( \\hat{\\mathbf{z}}_0^{1:N} \\) is then input into a pre-trained VAE decoder to decode the desired \\( 360^\\circ \\) panoramic videos \\( \\hat{\\mathbf{x}}_0^{1:N} \\). Due to constraints such as resolution limitations imposed by existing SD and considerations regarding GPU memory usage, the experimental results presented in this paper showcase a resolution of \\( 512 \\times 1024 \\). In practical applications, super-resolution methods [8, 40] can be employed to upscale the generated results to the desired size." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.886, + 0.471, + 0.902 + ], + "angle": 0, + "content": "360-Adapter. Our proposed 360-Adapter is simple and" + }, + { + "type": "image", + "bbox": [ + 0.543, + 0.353, + 0.853, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.608, + 0.895, + 0.651 + ], + "angle": 0, + "content": "Figure 4. Overview of 360-Adapter. 360-Adapter is a simple but effective module in which intermediate features are fed into the U-Net encoder blocks for modulation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.895, + 0.903 + ], + "angle": 0, + "content": "lightweight, as shown in Fig. 4. The original condition input has the same resolution as the video of \\( H \\times W \\). Here, we utilize the pixel unshuffle [36] operation to downsample it to \\( H / 8 \\times W / 8 \\). Following that are four 360-Adapter blocks, we depict only one for simplification in Fig. 4. To maintain consistency with the U-Net architecture, the first three 360-Adapter blocks each include a downsampling block. In each 360-Adapter block, one 2D convolution layer and a residual block (RB) with pseudo-3D convolution layers are utilized to extract the condition feature \\( \\mathbf{f}_{360}^{k} \\). Finally, multiscale condition features \\( \\mathbf{f}_{360} = \\{\\mathbf{f}_{360}^{1}, \\mathbf{f}_{360}^{2}, \\mathbf{f}_{360}^{3}, \\mathbf{f}_{360}^{4}\\} \\) are formed. Suppose the intermediate features in the U-Net encoder block is \\( \\mathbf{f}_{enc} = \\{\\mathbf{f}_{enc}^{1}, \\mathbf{f}_{enc}^{2}, \\mathbf{f}_{enc}^{3}, \\mathbf{f}_{enc}^{4}\\} \\). \\( \\mathbf{f}_{360} \\) is then added with \\( \\mathbf{f}_{enc} \\) at each scale. In summary, the condition" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6917" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "feature extraction and conditioning operation of the 360-Adapter can be defined as the following formulation:" + }, + { + "type": "equation", + "bbox": [ + 0.218, + 0.132, + 0.47, + 0.148 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} _ {3 6 0} = \\mathcal {F} _ {3 6 0} (\\mathbf {c}), \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.156, + 0.17, + 0.469, + 0.189 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {f}} _ {e n c} ^ {i} = \\mathbf {f} _ {e n c} ^ {i} + \\mathbf {f} _ {3 6 0} ^ {i}, i \\in \\{1, 2, 3, 4 \\}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.194, + 0.471, + 0.435 + ], + "angle": 0, + "content": "In the previous description, we omit some details. Our motion condition \\(\\mathbf{c}\\) is a 5D tensor, assuming its size is batch \\(\\times\\) channels \\(\\times\\) frames \\(\\times\\) height \\(\\times\\) width. We first reshape it into a 4D tensor of size (batch \\(\\times\\) frames) \\(\\times\\) channels \\(\\times\\) height \\(\\times\\) width to allow it to be fed into the 2D convolution layer and restore it to 5D to go through the RB with pseudo-3D convolution layers. Subsequently, in the RB, we employ a \\(1 \\times 3 \\times 3\\) pseudo-3D convolution to extract features in the spatial dimension, followed by a \\(3 \\times 1 \\times 1\\) pseudo-3D convolution to model information along the temporal dimension. The resulting features are reshaped back to (batch \\(\\times\\) frames) \\(\\times\\) channels \\(\\times\\) height \\(\\times\\) width to add the output of the skip connection. Finally, condition features are reshaped back into a 5D vector of size batch \\(\\times\\) channels \\(\\times\\) frames \\(\\times\\) height \\(\\times\\) width to align with the U-Net encoder intermediate features." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.444, + 0.342, + 0.459 + ], + "angle": 0, + "content": "3.4. 360 Enhancement Techniques" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.467, + 0.469, + 0.618 + ], + "angle": 0, + "content": "Latitude-aware Loss. When projecting panoramic videos into ERPs, meridians are mapped as vertically spaced lines with a constant interval, while parallels are mapped as horizontally spaced lines with a constant interval. This projection method establishes a straightforward mapping relationship, but it is neither equal-area nor conformal, introducing significant distortion, particularly in the polar regions. To make the denoiser pay more attention to low-latitude regions with less distortion, which is more crucial for human visual perception, we introduce a latitude-aware loss:" + }, + { + "type": "equation", + "bbox": [ + 0.118, + 0.626, + 0.469, + 0.646 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathbb {E} _ {\\mathcal {E} \\left(\\mathbf {x} _ {0} ^ {1: N}\\right), \\mathbf {y}, \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\left| \\mathbf {W} \\odot (\\epsilon - \\hat {\\epsilon} _ {\\theta}) \\right| | _ {2} ^ {2} ], \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.654, + 0.469, + 0.686 + ], + "angle": 0, + "content": "where \\(\\hat{\\epsilon}_{\\theta} = \\epsilon_{\\theta}(\\mathbf{z}_t^{1:N}, t, \\boldsymbol{\\tau}_{\\theta}(\\mathbf{y}), \\mathbf{f}_{360})\\), and \\(\\mathbf{W}\\) is a weight matrix used to perform element-wise product, defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.694, + 0.469, + 0.726 + ], + "angle": 0, + "content": "\\[\n\\mathbf {W} _ {i, j} = \\cos \\left(\\frac {2 i - H / 8 + 1}{H / 4} \\pi\\right), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.734, + 0.469, + 0.796 + ], + "angle": 0, + "content": "where \\( i \\in [0, H/8) \\), \\( j \\in [0, W/8) \\), \\( H/8 \\) and \\( W/8 \\) is the height and width of latent code \\( \\mathbf{z}_t^{1:N} \\). The visualized result of \\( \\mathbf{W} \\) is shown in Fig. 5, where pixels in low and middle latitudes are given more weight during training." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Latent Rotation Mechanism. Because ERPs can be considered as the unfolding of a spherical surface along a meridian, they are meant to be wraparound consistent, implying that their left and right sides are continuous. However, during the process of video generation, the left and right sides are physically separated. Inspired by PanoDiff [45], we employ a latent rotation mechanism to enhance" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.092, + 0.885, + 0.177 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.187, + 0.892, + 0.258 + ], + "angle": 0, + "content": "Figure 5. Left: the visualization of weight matrix \\(\\mathbf{W}\\), brighter colors indicate values closer to 1, while darker colors suggest values closer to 0. Right: a schematic diagram of the latent rotation mechanism. In each iteration, the far left portion of angle \\(\\theta\\) is shifted to the far right." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.29, + 0.892, + 0.426 + ], + "angle": 0, + "content": "the macroscopic coherence between the left and right ends of the video. During the inference process, we perform a horizontal rotation at an angle of \\(\\theta\\) on \\(\\mathbf{z}_t^{1:N}\\) and motion condition \\(\\mathbf{c}\\), at each denoising step. As illustrated in Fig. 5, the content on the far left is shifted to the far right, where we use \\(\\mathbf{x}_0^1\\) to replace \\(\\mathbf{z}_t^{1:N}\\) for a better visual effect of its continuity. During the training process, we also randomly rotate the training videos along with the motion condition by a random angle as a data augmentation strategy." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.429, + 0.893, + 0.596 + ], + "angle": 0, + "content": "Circular Padding Mechanism. Although the previous latent rotation mechanism achieves semantic continuity at a macroscopic level, achieving pixel-level continuity is challenging. Therefore, in the inference process, we adopt a mechanism of circular padding by modifying the padding method of the convolution layers. We observe that the early stages of \\(360^{\\circ}\\) video generation often involve layout modeling, while the later stages focus on detail completion. To maintain the stable video generation quality of 360DVD, we only implement the circular padding mechanism in the late \\(\\left\\lfloor \\frac{T}{2} \\right\\rfloor\\) steps of a total of \\(T\\) denoising steps." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.618, + 0.625, + 0.635 + ], + "angle": 0, + "content": "4. Experiment" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.645, + 0.718, + 0.661 + ], + "angle": 0, + "content": "4.1. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.671, + 0.892, + 0.808 + ], + "angle": 0, + "content": "Training Settings. We choose Stable Diffusion v1.5 and Motion Module v14 as our base model. We utilize the panoramic optical flow estimator PanoFlow [45] to generate motion conditions. We train the 360-Adapter using the proposed WEB360 dataset. The resolution is set to \\(512 \\times 1024\\), the length of frames to 16, the batch size to 1, the learning rate to \\(1 \\times 10^{-5}\\), and the total number of training steps to \\(100k\\), probability \\(P = 0.2\\). We use a linear beta schedule as animateDiff, where \\(\\beta_{start} = 0.00085\\) and \\(\\beta_{end} = 0.012\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Inference Settings. We use DDIM with 25 sampling steps, and the scale for text guidance is 7.5, the angle \\(\\theta = \\pi /2\\). We collect several personalized Stable Diffusion models from CivitAI to verify the effectiveness and generalizability of our method, including Realistic Vision, Lyriel, ToonYou, and RCNZ Cartoon." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6918" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.093, + 0.881, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.427, + 0.895, + 0.457 + ], + "angle": 0, + "content": "Figure 6. Qualitative comparisons with baseline methods. 360DVD successfully produces stable and high-quality panorama video over various prompts while other methods are failed." + }, + { + "type": "image", + "bbox": [ + 0.085, + 0.471, + 0.889, + 0.632 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.643, + 0.895, + 0.675 + ], + "angle": 0, + "content": "Figure 7. Qualitative comparisons of optical flow. 360DVD generates panorama videos with reasonable motion patterns consistent with the conditioned optical flow." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.697, + 0.262, + 0.714 + ], + "angle": 0, + "content": "4.2. Qualitative Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.728, + 0.471, + 0.775 + ], + "angle": 0, + "content": "Due to space limitations, we only display several frames of each video. We strongly recommend readers refer to our project page for more results and better visual quality." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Prompt-guided Panorama Video Generation. We present several prompt-guided \\(360^{\\circ}\\) panorama video generation results across different personalized models in Fig. 1. The figure shows that our method successfully turns personalized T2I models into panorama video generators. Our method can produce impressive generation results ranging from real to cartoon styles, from natural landscapes to cultural scenery. This success is attributed to the fact that our" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.697, + 0.892, + 0.743 + ], + "angle": 0, + "content": "method preserves the image generation priors and temporal modeling priors learned by SD andAnimateDiff on large-scale datasets." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Motion-guided Panorama Video Generation. We showcase panoramic video generation results guided by three typical optical flow maps, as shown in Fig. 7. The optical flow maps in the first row indicate the primary motion areas in the Arctic, where we can observe significant movement of clouds in the sky. The optical flow maps in the second row and third row indicate motion areas primarily in the Antarctic, where we can see the movement of trees and hot air balloons near the Antarctic." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.958 + ], + "angle": 0, + "content": "6919" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.089, + 0.891, + 0.203 + ], + "angle": 0, + "content": "
IndexMethodsVideo CriteriaPanorama Criteria
Graphics QualityFrame ConsistencyEnd ContinuityContent DistributionMotion Pattern
AAnimateDiff11.3%15.3%5.3%4.8%4.4%
BA+LoRA14.1%10.5%6.0%12.1%6.5%
CB+360ET23.0%9.7%16.9%16.1%14.5%
DOurs51.6%64.5%71.8%67.0%74.6%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.212, + 0.893, + 0.242 + ], + "angle": 0, + "content": "Table 1. User preference studies. More raters prefer videos generated by our 360DVD, especially over panorama criteria including if generated videos have left-to-right continuity, the panorama content distribution, and the panorama motion pattern." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.267, + 0.21, + 0.284 + ], + "angle": 0, + "content": "4.3. Comparison" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.29, + 0.473, + 0.609 + ], + "angle": 0, + "content": "We compare our results with nativeAnimateDiff,AnimateDiff with a LoRA for panorama image generation from CivitAI named LatentLabs360,AnimateDiff with panoramic LoRA, and our proposed 360 Enhancement Techniques (loss excepted). We can observe that the results generated by the nativeAnimateDiff have a very narrow field of view, which does not align with the content distribution of panoramic videos. WhenAnimateDiff is augmented with panoramic LoRA, it produces videos with a broader field of view; however, the two ends of videos lack continuity, and object movements are highly random. Our proposed 360ET method significantly enhances the continuity between two ends of the videos but fails to address issues such as non-compliance with panoramic motion patterns and poor cross-frame consistency. Notably, our 360DVD can generate videos that best adhere to the content distribution and motion patterns of panoramic videos. We are pleased to discover that, thanks to the high-quality training data provided by WEB360, the videos generated by 360DVD exhibit more realistic colors and nuanced lighting, providing an immersive experience." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.618, + 0.233, + 0.634 + ], + "angle": 0, + "content": "4.4. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.641, + 0.473, + 0.824 + ], + "angle": 0, + "content": "We primarily conducted ablation studies on the proposed 360 Text Fusion strategy, the pseudo-3D layer in the 360-Adapter, and the latitude-aware loss, as illustrated in Fig. 8. Given the prompt \"a car driving down a street next to a forest\", the first row without 360TF can not generate the car because of low-quality captions in the training process. The second row without pseudo-3D layer can generate a car, but due to the lack of temporal modeling, the results exhibit flickering. The third row without latitude-aware loss can produce relatively good results, but it still falls slightly short in terms of clarity, field of view, and other aspects compared to the last row with the complete 360DVD." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.832, + 0.2, + 0.849 + ], + "angle": 0, + "content": "4.5. User Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.472, + 0.903 + ], + "angle": 0, + "content": "31 participants were surveyed to evaluate the graphics quality, cross-frame consistency, left-right continuity, content distribution, and motion patterns of 8 sets of generated" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.269, + 0.885, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.485, + 0.893, + 0.527 + ], + "angle": 0, + "content": "Figure 8. Ablation studies on 360 Text Fusion (360TF), pseudo-3D layer in 360-Adapter (Pseudo-3D), and latitude-aware loss (Lat. Loss)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.547, + 0.895, + 0.669 + ], + "angle": 0, + "content": "results. For each criterion, they selected the video they deemed most fitting for the theme of high-quality 360-degree panoramic videos. The data presented in Table 1 indicates that our model outperforms the other three methods significantly across all five dimensions. Simultaneously, our proposed 360ET can remarkably improve video quality, and left-right continuity, solely based on the nativeAnimateDiff and panoramic LoRA." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.682, + 0.62, + 0.698 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.706, + 0.895, + 0.89 + ], + "angle": 0, + "content": "In this paper, we introduce 360DVD, a pipeline for controllable \\(360^{\\circ}\\) panorama video generation. Our framework leverages text prompts and motion guidance to animate personalized T2I models. Utilizing the proposed WEB360 dataset, 360-Adapter, and 360 Enhancement Techniques, our framework can generate videos that adhere to the content distribution and motion patterns in real captured panoramic videos. Extensive experiments demonstrate our effectiveness in creating high-quality panorama videos with various prompts and styles. We believe that our framework provides a simple but effective solution for panoramic video generation, and leads to inspiration for possible future works." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "6920" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.17 + ], + "angle": 0, + "content": "[1] Hao Ai, Zidong Cao, Jinjing Zhu, Haotian Bai, Yucheng Chen, and Lin Wang. Deep learning for omnidirectional vision: A survey and new perspectives. arXiv preprint arXiv:2205.10468, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.172, + 0.472, + 0.241 + ], + "angle": 0, + "content": "[2] Naofumi Akimoto, Yuhi Matsuo, and Yoshimitsu Aoki. *Diverse plausible 360-degree image outpainting for efficient 3dgc background creation.* In *Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition*, pages 11441–11450, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.243, + 0.471, + 0.297 + ], + "angle": 0, + "content": "[3] Jie An, Songyang Zhang, Harry Yang, Sonal Gupta, Jia-Bin Huang, Jiebo Luo, and Xi Yin. Latent-shift: Latent diffusion with temporal shift for efficient text-to-video generation. arXiv preprint arXiv:2304.08477, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.299, + 0.471, + 0.547 + ], + "angle": 0, + "content": "[4] Mingdeng Cao, Chong Mou, Fanghua Yu, Xintao Wang, Yinqiang Zheng, Jian Zhang, Chao Dong, Gen Li, Ying Shan, Radu Timofte, Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Xuhan Sheng, Bin Chen, Haoyu Ma, Ming Cheng, Shijie Zhao, Wanwan Cui, Tianyu Xu, Chunyang Li, Long Bao, Heng Sun, Huaibo Huang, Xiaoqiang Zhou, Yang Ai, Ran He, Renlong Wu, Yi Yang, Zhilu Zhang, Shuo-hao Zhang, Junyi Li, Yunjin Chen, Dongwei Ren, Wang-meng Zuo, Qian Wang, Hao-Hsiang Yang, Yi-Chung Chen, Zhi-Kai Huang, Wei-Ting Chen, Yuan-Chun Chiang, Hua-En Chang, I-Hsiang Chen, Chia-Hsuan Hsieh, Sy-Yen Kuo, Zebin Zhang, Jiaqi Zhang, Yuhui Wang, Shuhao Cui, Junshi Huang, Li Zhu, Shuman Tian, Wei Yu, and Bingchun Luo. Ntire 2023 challenge on 360deg omnidirectional image and video super-resolution: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 1731-1745, 2023. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.549, + 0.47, + 0.604 + ], + "angle": 0, + "content": "[5] Ricky TQ Chen, Jens Behrmann, David K Duvenaud, and Jorn-Henrik Jacobsen. Residual flows for invertible generative modeling. Advances in Neural Information Processing Systems, 32, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.606, + 0.47, + 0.66 + ], + "angle": 0, + "content": "[6] Tsai-Shien Chen, Chieh Hubert Lin, Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang. Motion-conditioned diffusion model for controllable video synthesis. arXiv preprint arXiv:2304.14404, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.662, + 0.469, + 0.703 + ], + "angle": 0, + "content": "[7] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG), 41(6):1-16, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.705, + 0.471, + 0.788 + ], + "angle": 0, + "content": "[8] Ming Cheng, Haoyu Ma, Qiufang Ma, Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Xuhan Sheng, Shijie Zhao, Junlin Li, and Li Zhang. Hybrid transformer and cnn attention network for stereo image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1702-1711, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.789, + 0.471, + 0.857 + ], + "angle": 0, + "content": "[9] Xinhua Cheng, Nan Zhang, Jiwen Yu, Yinhuai Wang, Ge Li, and Jian Zhang. Null-space diffusion sampling for zero-shot point cloud completion. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence (IJ-CAI), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.471, + 0.902 + ], + "angle": 0, + "content": "[10] Yen-Chi Cheng, Chieh Hubert Lin, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, and Ming-Hsuan Yang. Inout: Diverse image outpainting via gan inversion. In Proceedings of" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11431-11440, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.894, + 0.178 + ], + "angle": 0, + "content": "[11] Antonia Creswell, Tom White, Vincent Dumoulin, Kai Arulkumaran, Biswa Sengupta, and Anil A Bharath. Generative adversarial networks: An overview. IEEE signal processing magazine, 35(1):53-65, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.894, + 0.248 + ], + "angle": 0, + "content": "[12] Mohammad Reza Karimi Dastjerdi, Yannick Hold-Geoffroy, Jonathan Eisenmann, Siavash Khodadadeh, and Jean-François Lalonde. Guided co-modulated gan for \\(360^{\\circ}\\) field of view extrapolation. In 2022 International Conference on 3D Vision (3DV), pages 475–485. IEEE, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.25, + 0.894, + 0.318 + ], + "angle": 0, + "content": "[13] Patrick Esser, Johnathan Chiu, Parmida Atighechian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7346-7356, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.32, + 0.894, + 0.387 + ], + "angle": 0, + "content": "[14] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.39, + 0.892, + 0.432 + ], + "angle": 0, + "content": "[15] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.434, + 0.892, + 0.489 + ], + "angle": 0, + "content": "[16] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.49, + 0.892, + 0.532 + ], + "angle": 0, + "content": "[17] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.533, + 0.892, + 0.574 + ], + "angle": 0, + "content": "[18] Yaosi Hu, Zhenzhong Chen, and Chong Luo. Lamd: Latent motion diffusion for video generation. arXiv preprint arXiv:2304.11603, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.576, + 0.892, + 0.645 + ], + "angle": 0, + "content": "[19] Xuhui Jia, Yang Zhao, Kelvin CK Chan, Yandong Li, Han Zhang, Boqing Gong, Tingbo Hou, Huisheng Wang, and Yu-Chuan Su. Taming encoder for zero fine-tuning image customization with text-to-image diffusion models. arXiv preprint arXiv:2304.02642, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.647, + 0.892, + 0.674 + ], + "angle": 0, + "content": "[20] Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.676, + 0.892, + 0.731 + ], + "angle": 0, + "content": "[21] Jialu Li and Mohit Bansal. Panogen: Text-conditioned panoramic environment generation for vision-and-language navigation. Advances in Neural Information Processing Systems, 36, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.733, + 0.892, + 0.8 + ], + "angle": 0, + "content": "[22] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International Conference on Machine Learning, pages 12888-12900. PMLR, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.802, + 0.892, + 0.872 + ], + "angle": 0, + "content": "[23] Chieh Hubert Lin, Chia-Che Chang, Yu-Sheng Chen, Da-Cheng Juan, Wei Wei, and Hwann-Tzong Chen. Coco-gan: Generation by parts via conditional coordinating. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4512-4521, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.874, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[24] Chieh Hubert Lin, Hsin-Ying Lee, Yen-Chi Cheng, Sergey Tulyakov, and Ming-Hsuan Yang. Infinitygan: To" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.514, + 0.957 + ], + "angle": 0, + "content": "6921" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.093, + 0.47, + 0.12 + ], + "angle": 0, + "content": "wards infinite-pixel image synthesis. arXiv preprint arXiv:2104.03963, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[25] Chong Mou, Xintao Wang, Jiechong Song, Ying Shan, and Jian Zhang. Dragondiffusion: Enabling drag-style manipulation on diffusion models. In The Twelfth International Conference on Learning Representations, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.178, + 0.469, + 0.246 + ], + "angle": 0, + "content": "[26] Chong Mou, Xintao Wang, Liangbin Xie, Yanze Wu, Jian Zhang, Zhongang Qi, and Ying Shan. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4296–4304, 2024. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.247, + 0.469, + 0.315 + ], + "angle": 0, + "content": "[27] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.317, + 0.469, + 0.385 + ], + "angle": 0, + "content": "[28] Changgyoon Oh, Wonjune Cho, Yujeong Chae, Daehee Park, Lin Wang, and Kuk-Jin Yoon. Bips: Bi-modal indoor panorama synthesis via residual depth-aided adversarial learning. In European Conference on Computer Vision, pages 352–371. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.387, + 0.469, + 0.455 + ], + "angle": 0, + "content": "[29] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing attentions for zero-shot text-based video editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15932-15942, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.457, + 0.469, + 0.537 + ], + "angle": 0, + "content": "[30] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.539, + 0.469, + 0.593 + ], + "angle": 0, + "content": "[31] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.596, + 0.469, + 0.663 + ], + "angle": 0, + "content": "[32] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Bjorn Ommer. High-resolution image synthesis with latent diffusion models. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.665, + 0.469, + 0.706 + ], + "angle": 0, + "content": "[33] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional Networks for Biomedical Image Segmentation, page 234–241. 2015. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.707, + 0.469, + 0.788 + ], + "angle": 0, + "content": "[34] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.469, + 0.872 + ], + "angle": 0, + "content": "[35] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.874, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[36] Wenzhe Shi, Jose Caballero, Ferenc Huszar, Johannes Totz, Andrew P. Aitken, Rob Bishop, Daniel Rueckert, and Zehan" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.148 + ], + "angle": 0, + "content": "Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.151, + 0.892, + 0.218 + ], + "angle": 0, + "content": "[37] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[38] Kihyuk Sohn, Honglak Lee, and Xinchen Yan. Learning structured output representation using deep conditional generative models. Advances in neural information processing systems, 28, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[39] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502, 2020. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.321, + 0.892, + 0.417 + ], + "angle": 0, + "content": "[40] Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Qiufang Ma, Xuhan Sheng, Ming Cheng, Haoyu Ma, Shijie Zhao, Jian Zhang, Junlin Li, et al. Opdn: Omnidirectional position-aware deformable network for omnidirectional image superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1293-1301, 2023. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.42, + 0.892, + 0.5 + ], + "angle": 0, + "content": "[41] Piotr Teterwak, Aaron Sarna, Dilip Krishnan, Aaron Maschinot, David Belanger, Ce Liu, and William T Freeman. Boundless: Generative adversarial networks for image extension. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10521-10530, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.504, + 0.892, + 0.545 + ], + "angle": 0, + "content": "[42] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.548, + 0.892, + 0.603 + ], + "angle": 0, + "content": "[43] Guangcong Wang, Yinuo Yang, Chen Change Loy, and Zwei Liu. Stylelight: HDR panorama generation for lighting estimation and editing. In European Conference on Computer Vision, pages 477-492. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.605, + 0.892, + 0.672 + ], + "angle": 0, + "content": "[44] Hai Wang, Xiaoyu Xiang, Yuchen Fan, and Jing-Hao Xue. Customizing 360-degree panoramas through text-to-image diffusion models. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 4933-4943, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.675, + 0.892, + 0.729 + ], + "angle": 0, + "content": "[45] Jionghao Wang, Ziyu Chen, Jun Ling, Rong Xie, and Li Song. 360-degree panorama generation from few unregistered nfov images. arXiv preprint arXiv:2308.14686, 2023. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.732, + 0.892, + 0.787 + ], + "angle": 0, + "content": "[46] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.789, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[47] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems, 36, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[48] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6922" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.469, + 0.134 + ], + "angle": 0, + "content": "of image diffusion models for text-to-video generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7623-7633, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.469, + 0.19 + ], + "angle": 0, + "content": "[49] Ruiqi Wu, Liangyu Chen, Tong Yang, Chunle Guo, Chongyi Li, and Xiangyu Zhang. Lamp: Learn a motion pattern for few-shot-based video generation. arXiv preprint arXiv:2310.10769, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.469, + 0.234 + ], + "angle": 0, + "content": "[50] Songsong Wu, Hao Tang, Xiao-Yuan Jing, Haifeng Zhao, Jianjun Qian, Nicu Sebe, and Yan Yan. Cross-view panorama image synthesis. IEEE Transactions on Multimedia, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.235, + 0.469, + 0.289 + ], + "angle": 0, + "content": "[51] Tianhao Wu, Chuanxia Zheng, and Tat-Jen Cham. IPO-ldm: Depth-aided 360-degree indoor rgb panorama outpainting via latent diffusion model. arXiv preprint arXiv:2307.03177, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.291, + 0.469, + 0.36 + ], + "angle": 0, + "content": "[52] Jinbo Xing, Menghan Xia, Yuxin Liu, Yuechen Zhang, Y He, H Liu, H Chen, X Cun, X Wang, Y Shan, et al. Makeyour-video: Customized video generation using textual and structural guidance. IEEE Transactions on Visualization and Computer Graphics, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.362, + 0.469, + 0.444 + ], + "angle": 0, + "content": "[53] Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, and Fang Wen. Paint by example: Exemplar-based image editing with diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18381-18391, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.446, + 0.469, + 0.502 + ], + "angle": 0, + "content": "[54] Shuai Yang, Yifan Zhou, Ziwei Liu, and Chen Change Loy. Rerender a video: Zero-shot text-guided video-to-video translation. In SIGGRAPH Asia 2023 Conference Papers, pages 1-11, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.504, + 0.469, + 0.572 + ], + "angle": 0, + "content": "[55] Jiwen Yu, Yinhuai Wang, Chen Zhao, Bernard Ghanem, and Jian Zhang. Freedom: Training-free energy-guided conditional diffusion model. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23174-23184, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.574, + 0.469, + 0.628 + ], + "angle": 0, + "content": "[56] Jiwen Yu, Xuanyu Zhang, Youmin Xu, and Jian Zhang. CRoSS: Diffusion model makes controllable, robust and secure image steganography. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.63, + 0.469, + 0.713 + ], + "angle": 0, + "content": "[57] Han Zhang, Tao Xu, Hongsheng Li, Shaoting Zhang, Xiaogang Wang, Xiaolei Huang, and Dimitris N Metaxas. Stackgan: Text to photo-realistic image synthesis with stacked generative adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 5907-5915, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.715, + 0.469, + 0.769 + ], + "angle": 0, + "content": "[58] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.771, + 0.469, + 0.84 + ], + "angle": 0, + "content": "[59] Qinsheng Zhang, Jiaming Song, Xun Huang, Yongxin Chen, and Ming-Yu Liu. Diffcollage: Parallel generation of large content with diffusion models. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10188-10198. IEEE, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.842, + 0.469, + 0.897 + ], + "angle": 0, + "content": "[60] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 1, 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.897 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.515, + 0.957 + ], + "angle": 0, + "content": "6923" + } + ] +] \ No newline at end of file diff --git a/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_origin.pdf b/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ed169ed4ad9fa86c29fb2df761cc31b9c8dbfeda --- /dev/null +++ b/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01d96ce145bbf44f2adf1c5d78e08990550d928553a9f19f322e401d5c9e2fd8 +size 3434502 diff --git a/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/full.md b/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0414566b819509d53a30c6f8113608a0b1380c37 --- /dev/null +++ b/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/full.md @@ -0,0 +1,295 @@ +# 360DVD: Controllable Panorama Video Generation with 360-Degree Video Diffusion Model + +Qian Wang $^{1,2}$ , Weiqi Li $^{1}$ , Chong Mou $^{1,2}$ , Xinhua Cheng $^{1,2}$ , Jian Zhang $^{1,2}$ $^{1}$ School of Electronic and Computer Engineering, Peking University + $^{2}$ Peking University Shenzhen Graduate School-Rabbitpre AIGC Joint Research Laboratory +{qianwang, liweiqi, eechongm, chengxinhua}@stu.pku.edu.cn, zhangjian.sz@pku.edu.cn + +# Abstract + +Panorama video recently attracts more interest in both study and application, courtesy of its immersive experience. Due to the expensive cost of capturing $360^{\circ}$ panoramic videos, generating desirable panorama videos by prompts is urgently required. Lately, the emerging text-to-video (T2V) diffusion methods demonstrate notable effectiveness in standard video generation. However, due to the significant gap in content and motion patterns between panoramic and standard videos, these methods encounter challenges in yielding satisfactory $360^{\circ}$ panoramic videos. In this paper, we propose a pipeline named 360-Degree Video Diffusion model (360DVD) for generating $360^{\circ}$ panoramic videos based on the given prompts and motion conditions. Specifically, we introduce a lightweight 360-Adapter accompanied by 360 Enhancement Techniques to transform pre-trained T2V models for panorama video generation. We further propose a new panorama dataset named WEB360 consisting of panoramic video-text pairs for training 360DVD, addressing the absence of captioned panoramic video datasets. Extensive experiments demonstrate the superiority and effectiveness of 360DVD for panorama video generation. Our project page is at https://akaneqwq.github.io/360DVD/. + +# 1. Introduction + +With the recent advancements in VR technology, 360-degree panoramic videos have been gaining increasing popularity. This video format which offers audiences an immersive experience, is helpful for various applications, including entertainment, education, and communication. To capture details of the entire scene, $360^{\circ}$ videos are typically recorded using an array of high-resolution fisheye cameras that yields a $360^{\circ} \times 180^{\circ}$ field-of-view (FoV) [1], which + +This work was supported by National Natural Science Foundation of China under Grant 62372016. (Corresponding author: Jian Zhang) + +is quite costly in both time and resources. Therefore, the generation of $360^{\circ}$ panoramic videos is urgently required for border applications, while panoramic video generation receives little attention in studies to date. + +Thanks to the emerging theory and training strategies, text-to-image (T2I) diffusion models [26, 27, 31, 32, 35] demonstrate remarkable image generation capacity from prompts given by users, and such impressive achievement in image generation is further extended to text-to-video (T2V) generation. Various T2V diffusion models [3, 16, 37, 46, 52, 60] are recently proposed with adopting space-time separable architectures, wherein spatial operations are inherited from the pre-trained T2I models to reduce the complexity of constructing space-time models from scratch. Among these,AnimateDiff [16] enables the capability to generate animated images for various personalized T2I models, which alleviates the requirement for model-specific tuning and achieves compelling content consistency over time. + +Although T2V methods on standard videos are widely studied, there is no method proposed for panorama video generation. One potential approach is to leverage existing powerful T2V models, e.g.,AnimateDiff to directly generate the equirectangular projection (ERP) of panoramic videos. Since ERP is a commonly adopted format for storing and transmitting panoramic videos, each frame is treated by ERP as a rectangular image with an aspect ratio of 1:2, which aligns well with the output format of existing standard T2V models. However, due to the significant differences between panoramic videos and standard videos, existing methods suffer challenges in directly producing satisfactory $360^{\circ}$ panoramic videos. Concretely, the main challenges include three aspects: (1) The content distribution of ERPs differs from standard videos. ERPs require a wider FoV, reaching $360^{\circ} \times 180^{\circ}$ . (2) The motion patterns of ERPs are different from standard videos, with movements often following curves rather than straight lines. (3) The left and right ends of ERPs should exhibit continuity since they correspond to the same meridian on the Earth. + +Therefore, we propose a specifically designed method + +![](images/8e32efc6bee76ad52d5a48a7b9d8d07d1b7a95fd16b08df5393246543e02fcfa.jpg) +Figure 1. Main results. Our 360DVD creates text-aligned, coherent, and high-quality $360^{\circ}$ panorama videos. Furthermore, 360DVD can cooperate with multiple personalized text-to-image models and consistently generate stylized panorama videos. + +![](images/95f33fe15aab58457293c435e6c1db7089b31e786666d0b74f687c56cb4bfe60.jpg) + +named 360-Degree Video Diffusion (360DVD) for generating panorama videos. We first introduce a plug-and-play module named 360-Adapter to address challenge mentioned above. Our 360-Adapter receives zero values or motion conditions (e.g., optical flow) as input and outputs motion features, which are fed into the frozen denoising U-Net at different levels of the encoder. This transformation is aimed at converting the T2V model into a panoramic video generation without altering the foundational generative capabilities. In addition, we introduce 360 Enhancement Techniques including two mechanisms to enhance continuity at both ends of ERPs from both macro and micro perspectives, and a latitude-aware loss function for encouraging the model to focus more on low-latitude regions. Cooperated with carefully designed techniques, our 360DVD generates text-aligned, coherent, high-quality, $360^{\circ}$ panorama videos with various styles, as shown in Fig. 1. + +Furthermore, we collect a panorama dataset named WEB360 including ERP-formatted videos from the internet and games for training our method. WEB360 involves approximately 2,000 video clips with each clip consisting of 100 frames. Considering the domain gap between panoramic and standard images, to enhance the accuracy and granularity of captions, we introduce a GPT-based 360 Text Fusion module for obtaining detailed captions. Our contributions can be summarized as follows: + +- We introduce a controllable $360^{\circ}$ panorama video generation diffusion model named 360DVD, achieved by adopting a controllable standard T2V model with a trainable lightweight 360-Adapter. Our model can generate text-guided panorama videos conditioned on desired motions. +- We design 360 Enhancement Techniques including a latitude-aware loss and two mechanisms to enhance the content and motion quality of generated panorama videos. +- We propose a new high-quality dataset named WEB360 comprising approximately 2,000 panoramic videos, with each video accompanied by a detailed caption enhanced through 360 Text Fusion. +- Experiments demonstrate that our 360DVD is capable of generating high-quality, high-diversity, and more consistent $360^{\circ}$ panorama videos. + +# 2. Related Works + +# 2.1. Text-to-Image Diffusion Model + +The Denoising Diffusion Probabilistic Model [9, 17, 39] has proven to be highly successful in generating high-quality images, outperforming previous approaches such as generative adversarial networks (GANs)[11, 57], variational autoencoders (VAEs)[20, 38], and flow-based methods [5]. With text guidance during training, users can generate images based on textual input. Noteworthy examples include + +GLIDE [27], DALLE-2 [31], Imagen [35]. To address the computational burden of the iterative denoising process, LDM [32] conducts the diffusion process on a compressed latent space rather than the original pixel space. This accomplishment has prompted further exploration in extending customization [14, 34], image guidance [53, 55], precise control [25, 26, 58] and protection [56]. + +# 2.2. Text-to-Video Diffusion Model + +Despite significant advancements in Text-to-Image (T2I) generation, Text-to-Video (T2V) generation faces challenges, including the absence of large-scale, high-quality paired text-video datasets, the inherent complexity in modeling temporal consistency, and the resource-intensive nature of training. To address these challenges, many works leverage the knowledge from pre-trained T2I models, and they manage training costs by executing the diffusion process in the latent space. Some methods [15, 29, 48, 49, 54] utilize T2I models in zero-shot or few-shot ways. However, these methods often suffer from suboptimal frame consistency due to insufficient training. To address this limitation, another category of T2V diffusion models typically adopts space-time separable architectures. These models [3, 37, 46, 60] inherit spatial operations from pre-trained T2I models, reducing the complexity of constructing space-time models from scratch. Given that most personalized T2I models are derived from the same base one (e.g. Stable Diffusion [32]),AnimateDiff [16] designs a motion modeling module that trained with a base T2I model and could animate most of derived personalized T2I models once for all. There are also efforts focused on enhancing control in T2V models. Gen-1 [13], MCDiff [6], LaMD [18] and VideoComposer [47] introduce diverse conditions to T2V models. Despite these advancements, the aforementioned methods demand extensive training and lack a plug-and-play nature, making it challenging to apply them to a diverse range of personalized T2I models. + +# 2.3. Panorama Generation + +GAN-based methods for generating panoramic images have been widely studied [2, 4, 7, 10, 12, 23, 24, 28, 40, 41, 43, 50]. For instance, OmniDreamer [2] accepts a single NFoV image as an input condition and introduces a cyclic inference scheme to meet the inherent horizontal cyclicity of 360-degree images. ImmenseGAN [12] fine-tunes the generative model using a large-scale private text-image pair dataset, making the generation more controllable. Text2Light [7] introduces a zero-shot text-guided 360-image synthesis pipeline by utilizing the CLIP model. Very recently, diffusion models have achieved promising results in panoramic image generation. DiffCollage [59] uses semantic maps as conditions and generates images based on complex factor graphs using retrained diffusion mod + +els. PanoGen [21] employs a latent diffusion model and synthesizes new indoor panoramic images through recursive image drawing techniques based on multiple text descriptions. PanoDiff [45] achieves a multi-NFoV synthesis of panoramic images through a two-stage pose estimation module. IPO-LDM [51] uses a dual-modal diffusion structure of RGB-D to better learn the spatial distribution and patterns of panoramic images. StitchDiffusion [44] employs a T2I diffusion model, ensuring continuity at both ends through stitching. However, to date, panoramic video generation has received limited attention. To the best of our knowledge, we are the first to leverage diffusion models for panoramic video generation. + +# 3. Method + +In this section, we begin with a concise review of the latent diffusion fusion model andAnimateDiff [16]. Following that, we introduce the construction method of the WEB360 dataset. We then provide an overview of 360DVD and elaborate on the implementation details of 360-Adapter. Finally, we describe the 360 enhancement techniques aimed at enriching the panoramic nature of the video. + +# 3.1. Preliminaries + +Latent Diffusion Model. Given an input signal $\mathbf{x}_0$ , a diffusion forward process in DDPM [17] is defined as: + +$$ +p _ {\theta} \left(\mathbf {x} _ {t} \mid \mathbf {x} _ {t - 1}\right) = \mathcal {N} \left(\mathbf {x} _ {t}; \sqrt {1 - \beta_ {t}} \mathbf {x} _ {t - 1}, \beta_ {t} \mathbf {I}\right), \tag {1} +$$ + +for $t = 1,\ldots ,T$ , where $T$ is the total timestep of the diffusion process. A noise depending on the variance $\beta_{t}$ is gradually added to $\mathbf{x}_{t - 1}$ to obtain $\mathbf{x}_t$ at the next timestep and finally reach $\mathbf{x}_T\in \mathcal{N}(0,\mathbf{I})$ . The goal of the diffusion model is to learn to reverse the diffusion process (denoising). Given a random noise $\mathbf{x}_t$ , the model predicts the added noise at the next timestep $\mathbf{x}_{t - 1}$ until the origin signal $\mathbf{x}_0$ : + +$$ +p _ {\theta} (\mathbf {x} _ {t - 1} | \mathbf {x} _ {t}) = \mathcal {N} (\mathbf {x} _ {t - 1}; \boldsymbol {\mu} _ {\theta} (\mathbf {x} _ {t}, t), \boldsymbol {\Sigma} _ {\theta} (\mathbf {x} _ {t}, t)), \tag {2} +$$ + +for $t = T,\ldots ,1$ . We fix the variance $\Sigma_{\theta}(\mathbf{x}_t,t)$ and utilize the diffusion model with parameter $\theta$ to predict the mean of the inverse process $\pmb{\mu}_{\theta}(\mathbf{x}_t,t)$ . The model can be simplified as denoising models $\epsilon_{\theta}(\mathbf{x}_t,t)$ , which are trained to predict the noise of $\mathbf{x}_t$ with a noise prediction loss: + +$$ +\mathcal {L} = \mathbb {E} _ {\mathbf {x} _ {0}, \mathbf {y}, \epsilon \sim \mathcal {N} (0, \mathbf {I}), t} [ \| \epsilon - \epsilon_ {\theta} (\mathbf {x} _ {t}, t, \boldsymbol {\tau} _ {\theta} (\mathbf {y})) \| _ {2} ^ {2} ], \tag {3} +$$ + +where $\epsilon$ is the added noise to the input image $\mathbf{x}_0$ , $\mathbf{y}$ is the corresponding textual description, $\tau_{\theta}(\cdot)$ is a text encoder mapping the string to a sequence of vectors. + +Latent Diffusion Model (LDM) [32] executes the denoising process in the latent space of an autoencoder, namely $\mathcal{E}(\cdot)$ and $\mathcal{D}(\cdot)$ , implemented as VQ-GAN [19] or VQ-VAE [42] pre-trained on large image datasets. During the + +training of the latent diffusion networks, an input image $\mathbf{x}_0$ is initially mapped to the latent space by the frozen encoder, yielding $\mathbf{z}_0 = \mathcal{E}(\mathbf{x}_0)$ . Thus, the training objective can be formulated as follows: + +$$ +\mathcal {L} = \mathbb {E} _ {\mathcal {E} (\mathbf {x} _ {0}), \mathbf {y}, \boldsymbol {\epsilon} \sim \mathcal {N} (0, \mathbf {I}), t} [ \| \boldsymbol {\epsilon} - \boldsymbol {\epsilon} _ {\theta} (\mathbf {z} _ {t}, t, \boldsymbol {\tau} _ {\theta} (\mathbf {y})) \| _ {2} ^ {2} ]. \tag {4} +$$ + +In widely-used LDM Stable Diffusion (SD), which our method is based on, $\epsilon_{\theta}(\cdot)$ is implemented with a modified UNet [33] that incorporates four downsample/upsample blocks and one middle block, resulting in four resolution levels within the networks' latent space. Each resolution level integrates 2D convolution layers as well as self- and cross-attention mechanisms. Text model $\tau_{\theta}(\cdot)$ is implemented using the CLIP [30] ViT-L/14 text encoder. + +AnimateDiff.AnimateDiff inflates base SD by adding temporal-aware structures and learning reasonable motion priors from large-scale video datasets. Since the original SD can only process 4D image data batches, while T2V task takes a 5D video tensor as input. It transforms each 2D convolution and attention layer in the original image model into spatial-only pseudo-3D layers. The motion module is inserted at every resolution level of the U-shaped diffusion network, using vanilla temporal transformers consisting of several self-attention blocks operating along the temporal axis. The training objective ofAnimateDiff can be written as: + +$$ +\mathcal {L} = \mathbb {E} _ {\mathcal {E} \left(\mathbf {x} _ {0} ^ {1: N}\right), \mathbf {y}, \boldsymbol {\epsilon} \sim \mathcal {N} (0, \mathbf {I}), t} [ \| \boldsymbol {\epsilon} - \boldsymbol {\epsilon} _ {\theta} \left(\mathbf {z} _ {t} ^ {1: N}, t, \boldsymbol {\tau} _ {\theta} (\mathbf {y})\right) \| _ {2} ^ {2} ], \tag {5} +$$ + +where $\mathbf{x}_0^{1:N}$ is the sampled video data, $\mathbf{z}_0^{1:N}$ is the latent code which $\mathbf{x}_0^{1:N}$ are encoded into via the pre-trained autoencoder, $\mathbf{z}_t^{1:N}$ is the latent code obtained by perturbing the initial latent code $\mathbf{z}_0^{1:N}$ with noise at timestep $t$ . During training, the pre-trained weights of the base T2I model are frozen to keep its feature space unchanged. + +# 3.2. WEB360 Dataset + +Diverse text-video pairs datasets are essential for training open-domain text-to-video generation models. However, existing $360^{\circ}$ panorama video datasets lack corresponding textual annotations. Moreover, these datasets are often constrained either in scale or quality, thereby impeding the upper limit of high-quality video generation. + +To address the aforementioned challenges and achieve high-quality 360 panorama video generation, we introduce a novel text-video dataset named WEB360. This dataset comprises 2114 text-video pairs sourced from open-domain content, presented in high-definition (720p) ERP format. Our dataset creation process involved extracting 210 high-resolution panoramic video clips from the ODV360 [4] training set. Additionally, we collected over 400 original videos from YouTube. Due to the complex scene transitions present in the original videos, which pose challenges + +![](images/dfbc63c0901348edca027e0e3bea0da40cec19d80163ef05c694b8e4f9e76b1f.jpg) +Figure 2. 360 Text Fusion. The captions of four images with a FoV of 90 are fed into ChatGPT to generate a new $360^{\circ}$ summarization. Compared to the caption of ERP at the bottom right, 360 Text Fusion allows for more fine-grained captions. + +for models in learning temporal correlations, we perform a manual screening process to split the original videos into 1904 single-scene video clips. We employ BLIP [22] to annotate the first frame of the 2104 video clips. However, we observed that direct application of BLIP to ERP images often resulted in bad captions. Therefore, we propose a panoramic image caption method named 360 Text Fusion, based on ChatGPT. + +360 Text Fusion. We find that directly using BLIP [22] to label ERP has drawbacks. On one hand, errors may arise due to the distortion caused by the polarities, leading to misidentifications such as labeling "person" as "dog". On the other hand, the captions generated by BLIP lack granularity, making them insufficient for providing a detailed description of the current scene. Thus, we propose 360 Text Fusion (360TF) method, as shown in Fig. 2. To deal with the irregular distortion of ERP, we turn to less-distorted perspective images. We first project the original ERP image to four non-overlapping perspective images at 0 degrees longitude, with a FoV of 90. The four images are then fed into BLIP to be captioned. By pre-informing ChatGPT about the task and providing examples, these four captions are collectively input to ChatGPT, which then generates a summary of the scene as our final caption. In comparison to directly using BLIP to label the entire image, our 360TF demonstrates a significant advantage in granularity. + +# 3.3. 360-degree Video Diffusion Model + +An overview of the 360-degree Video Diffusion Model (360 DVD) is presented in Fig. 3, which is composed of a pretrained denoising U-Net and 360-Adapter. The pre-trained denoising U-Net adopts a structure identical to that ofAnimateDiff. In every resolution level of the U-Net, the spatial layer unfolds pre-trained weights from SD, while the temporal layer incorporates the motion module ofAnimateDiff trained on a large-scale text-video dataset. + +During the training process, we first sample a video $\mathbf{x}_0^{1:N}$ + +![](images/9fdb99ac86341b86153ac301dbe00976f41c74a1b06897891b29cd12c743a14b.jpg) +Figure 3. Overview of 360DVD. 360DVD leverages a trainable 360-Adapter to extend standard T2V models to the panorama domain and is able to generate high-quality panorama videos with given prompts and optional motion conditions. In addition, 360 Enhancement Techniques are proposed for quality improvement in the panorama perspective. + +from the dataset. The video is encoded into latent code $\mathbf{z}_0^{1:N}$ through pre-trained VAE encoder $\mathcal{E}(\cdot)$ and noised to $\mathbf{z}_t^{1:N}$ . Simultaneously, the corresponding text $\mathbf{y}$ for the video is encoded using the text encoder $\pmb{\tau}_{\theta}(\cdot)$ of the CLIP. The video is also input into a motion estimation network to generate corresponding motion conditions $\mathbf{c}$ , which are then fed into the 360-Adapter $\mathcal{F}_{360}(\cdot)$ . Finally, noised latent code $\mathbf{z}_t^{1:N}$ , timestep $t$ , text embedding $\pmb{\tau}_{\theta}(\mathbf{y})$ , and the feature maps $\mathbf{f}_{360}$ generated by 360-Adapter are collectively input into the U-Net $\epsilon(\cdot)$ to predict the noise strength added to the latent code. As we aim to preserve the priors learned by SD andAnimateDiff on large datasets, we freeze their weights during the training process. If we use a simple L2 loss term, the training objective is given as follows: + +$$ +\mathcal {L} = \mathbb {E} _ {\mathcal {E} (\mathbf {x} _ {0} ^ {1: N}), \mathbf {y}, \epsilon \sim \mathcal {N} (0, \mathbf {I}), t} [ \| \boldsymbol {\epsilon} - \boldsymbol {\epsilon} _ {\theta} (\mathbf {z} _ {t} ^ {1: N}, t, \boldsymbol {\tau} _ {\theta} (\mathbf {y}), \mathbf {f} _ {3 6 0}) \| _ {2} ^ {2} ]. \tag {6} +$$ + +To ensure satisfactory generation of $360^{\circ}$ panoramic videos without motion control input, we set the input of the 360-Adapter to zero with a probability $P$ during training. This strategy aims to encourage the model to learn representations that are not solely reliant on motion conditions, enhancing its ability to generate compelling panoramic videos without explicit motion guidance. + +In inference, users have the option to selectively provide text prompts and motion guidance to carry out denoising over a total of $T$ steps. Here, we employ DDIM [39] to accelerate the sampling process. The estimated latent code $\hat{\mathbf{z}}_0^{1:N}$ is then input into a pre-trained VAE decoder to decode the desired $360^\circ$ panoramic videos $\hat{\mathbf{x}}_0^{1:N}$ . Due to constraints such as resolution limitations imposed by existing SD and considerations regarding GPU memory usage, the experimental results presented in this paper showcase a resolution of $512 \times 1024$ . In practical applications, super-resolution methods [8, 40] can be employed to upscale the generated results to the desired size. + +360-Adapter. Our proposed 360-Adapter is simple and + +![](images/bcff2f4fddc23bfaae1cc06eb9c8e3cceb676329129ea2ba8f60b036e2116d27.jpg) +Figure 4. Overview of 360-Adapter. 360-Adapter is a simple but effective module in which intermediate features are fed into the U-Net encoder blocks for modulation. + +lightweight, as shown in Fig. 4. The original condition input has the same resolution as the video of $H \times W$ . Here, we utilize the pixel unshuffle [36] operation to downsample it to $H / 8 \times W / 8$ . Following that are four 360-Adapter blocks, we depict only one for simplification in Fig. 4. To maintain consistency with the U-Net architecture, the first three 360-Adapter blocks each include a downsampling block. In each 360-Adapter block, one 2D convolution layer and a residual block (RB) with pseudo-3D convolution layers are utilized to extract the condition feature $\mathbf{f}_{360}^{k}$ . Finally, multiscale condition features $\mathbf{f}_{360} = \{\mathbf{f}_{360}^{1}, \mathbf{f}_{360}^{2}, \mathbf{f}_{360}^{3}, \mathbf{f}_{360}^{4}\}$ are formed. Suppose the intermediate features in the U-Net encoder block is $\mathbf{f}_{enc} = \{\mathbf{f}_{enc}^{1}, \mathbf{f}_{enc}^{2}, \mathbf{f}_{enc}^{3}, \mathbf{f}_{enc}^{4}\}$ . $\mathbf{f}_{360}$ is then added with $\mathbf{f}_{enc}$ at each scale. In summary, the condition + +feature extraction and conditioning operation of the 360-Adapter can be defined as the following formulation: + +$$ +\mathbf {f} _ {3 6 0} = \mathcal {F} _ {3 6 0} (\mathbf {c}), \tag {7} +$$ + +$$ +\hat {\mathbf {f}} _ {e n c} ^ {i} = \mathbf {f} _ {e n c} ^ {i} + \mathbf {f} _ {3 6 0} ^ {i}, i \in \{1, 2, 3, 4 \}. \tag {8} +$$ + +In the previous description, we omit some details. Our motion condition $\mathbf{c}$ is a 5D tensor, assuming its size is batch $\times$ channels $\times$ frames $\times$ height $\times$ width. We first reshape it into a 4D tensor of size (batch $\times$ frames) $\times$ channels $\times$ height $\times$ width to allow it to be fed into the 2D convolution layer and restore it to 5D to go through the RB with pseudo-3D convolution layers. Subsequently, in the RB, we employ a $1 \times 3 \times 3$ pseudo-3D convolution to extract features in the spatial dimension, followed by a $3 \times 1 \times 1$ pseudo-3D convolution to model information along the temporal dimension. The resulting features are reshaped back to (batch $\times$ frames) $\times$ channels $\times$ height $\times$ width to add the output of the skip connection. Finally, condition features are reshaped back into a 5D vector of size batch $\times$ channels $\times$ frames $\times$ height $\times$ width to align with the U-Net encoder intermediate features. + +# 3.4. 360 Enhancement Techniques + +Latitude-aware Loss. When projecting panoramic videos into ERPs, meridians are mapped as vertically spaced lines with a constant interval, while parallels are mapped as horizontally spaced lines with a constant interval. This projection method establishes a straightforward mapping relationship, but it is neither equal-area nor conformal, introducing significant distortion, particularly in the polar regions. To make the denoiser pay more attention to low-latitude regions with less distortion, which is more crucial for human visual perception, we introduce a latitude-aware loss: + +$$ +\mathcal {L} = \mathbb {E} _ {\mathcal {E} \left(\mathbf {x} _ {0} ^ {1: N}\right), \mathbf {y}, \epsilon \sim \mathcal {N} (0, \mathbf {I}), t} [ \| \left| \mathbf {W} \odot (\epsilon - \hat {\epsilon} _ {\theta}) \right| | _ {2} ^ {2} ], \tag {9} +$$ + +where $\hat{\epsilon}_{\theta} = \epsilon_{\theta}(\mathbf{z}_t^{1:N}, t, \boldsymbol{\tau}_{\theta}(\mathbf{y}), \mathbf{f}_{360})$ , and $\mathbf{W}$ is a weight matrix used to perform element-wise product, defined as: + +$$ +\mathbf {W} _ {i, j} = \cos \left(\frac {2 i - H / 8 + 1}{H / 4} \pi\right), \tag {10} +$$ + +where $i \in [0, H/8)$ , $j \in [0, W/8)$ , $H/8$ and $W/8$ is the height and width of latent code $\mathbf{z}_t^{1:N}$ . The visualized result of $\mathbf{W}$ is shown in Fig. 5, where pixels in low and middle latitudes are given more weight during training. + +Latent Rotation Mechanism. Because ERPs can be considered as the unfolding of a spherical surface along a meridian, they are meant to be wraparound consistent, implying that their left and right sides are continuous. However, during the process of video generation, the left and right sides are physically separated. Inspired by PanoDiff [45], we employ a latent rotation mechanism to enhance + +![](images/7859becf69cbcc15915e74e7e242374adb457af96d8f73541d153cabbf97abf5.jpg) +Figure 5. Left: the visualization of weight matrix $\mathbf{W}$ , brighter colors indicate values closer to 1, while darker colors suggest values closer to 0. Right: a schematic diagram of the latent rotation mechanism. In each iteration, the far left portion of angle $\theta$ is shifted to the far right. + +the macroscopic coherence between the left and right ends of the video. During the inference process, we perform a horizontal rotation at an angle of $\theta$ on $\mathbf{z}_t^{1:N}$ and motion condition $\mathbf{c}$ , at each denoising step. As illustrated in Fig. 5, the content on the far left is shifted to the far right, where we use $\mathbf{x}_0^1$ to replace $\mathbf{z}_t^{1:N}$ for a better visual effect of its continuity. During the training process, we also randomly rotate the training videos along with the motion condition by a random angle as a data augmentation strategy. + +Circular Padding Mechanism. Although the previous latent rotation mechanism achieves semantic continuity at a macroscopic level, achieving pixel-level continuity is challenging. Therefore, in the inference process, we adopt a mechanism of circular padding by modifying the padding method of the convolution layers. We observe that the early stages of $360^{\circ}$ video generation often involve layout modeling, while the later stages focus on detail completion. To maintain the stable video generation quality of 360DVD, we only implement the circular padding mechanism in the late $\left\lfloor \frac{T}{2} \right\rfloor$ steps of a total of $T$ denoising steps. + +# 4. Experiment + +# 4.1. Implementation Details + +Training Settings. We choose Stable Diffusion v1.5 and Motion Module v14 as our base model. We utilize the panoramic optical flow estimator PanoFlow [45] to generate motion conditions. We train the 360-Adapter using the proposed WEB360 dataset. The resolution is set to $512 \times 1024$ , the length of frames to 16, the batch size to 1, the learning rate to $1 \times 10^{-5}$ , and the total number of training steps to $100k$ , probability $P = 0.2$ . We use a linear beta schedule as animateDiff, where $\beta_{start} = 0.00085$ and $\beta_{end} = 0.012$ . + +Inference Settings. We use DDIM with 25 sampling steps, and the scale for text guidance is 7.5, the angle $\theta = \pi /2$ . We collect several personalized Stable Diffusion models from CivitAI to verify the effectiveness and generalizability of our method, including Realistic Vision, Lyriel, ToonYou, and RCNZ Cartoon. + +![](images/5875d91e0c3dc9a487f99f4eda4b1e1f59e7fab5215c1ea864dd2c37a2223e06.jpg) +Figure 6. Qualitative comparisons with baseline methods. 360DVD successfully produces stable and high-quality panorama video over various prompts while other methods are failed. + +![](images/b2302c66f49956ac65b576c168d7edb4feeeddd6f588df6d2a6834ee2e55a578.jpg) +Figure 7. Qualitative comparisons of optical flow. 360DVD generates panorama videos with reasonable motion patterns consistent with the conditioned optical flow. + +# 4.2. Qualitative Results + +Due to space limitations, we only display several frames of each video. We strongly recommend readers refer to our project page for more results and better visual quality. + +Prompt-guided Panorama Video Generation. We present several prompt-guided $360^{\circ}$ panorama video generation results across different personalized models in Fig. 1. The figure shows that our method successfully turns personalized T2I models into panorama video generators. Our method can produce impressive generation results ranging from real to cartoon styles, from natural landscapes to cultural scenery. This success is attributed to the fact that our + +method preserves the image generation priors and temporal modeling priors learned by SD andAnimateDiff on large-scale datasets. + +Motion-guided Panorama Video Generation. We showcase panoramic video generation results guided by three typical optical flow maps, as shown in Fig. 7. The optical flow maps in the first row indicate the primary motion areas in the Arctic, where we can observe significant movement of clouds in the sky. The optical flow maps in the second row and third row indicate motion areas primarily in the Antarctic, where we can see the movement of trees and hot air balloons near the Antarctic. + +
IndexMethodsVideo CriteriaPanorama Criteria
Graphics QualityFrame ConsistencyEnd ContinuityContent DistributionMotion Pattern
AAnimateDiff11.3%15.3%5.3%4.8%4.4%
BA+LoRA14.1%10.5%6.0%12.1%6.5%
CB+360ET23.0%9.7%16.9%16.1%14.5%
DOurs51.6%64.5%71.8%67.0%74.6%
+ +Table 1. User preference studies. More raters prefer videos generated by our 360DVD, especially over panorama criteria including if generated videos have left-to-right continuity, the panorama content distribution, and the panorama motion pattern. + +# 4.3. Comparison + +We compare our results with nativeAnimateDiff,AnimateDiff with a LoRA for panorama image generation from CivitAI named LatentLabs360,AnimateDiff with panoramic LoRA, and our proposed 360 Enhancement Techniques (loss excepted). We can observe that the results generated by the nativeAnimateDiff have a very narrow field of view, which does not align with the content distribution of panoramic videos. WhenAnimateDiff is augmented with panoramic LoRA, it produces videos with a broader field of view; however, the two ends of videos lack continuity, and object movements are highly random. Our proposed 360ET method significantly enhances the continuity between two ends of the videos but fails to address issues such as non-compliance with panoramic motion patterns and poor cross-frame consistency. Notably, our 360DVD can generate videos that best adhere to the content distribution and motion patterns of panoramic videos. We are pleased to discover that, thanks to the high-quality training data provided by WEB360, the videos generated by 360DVD exhibit more realistic colors and nuanced lighting, providing an immersive experience. + +# 4.4. Ablation Study + +We primarily conducted ablation studies on the proposed 360 Text Fusion strategy, the pseudo-3D layer in the 360-Adapter, and the latitude-aware loss, as illustrated in Fig. 8. Given the prompt "a car driving down a street next to a forest", the first row without 360TF can not generate the car because of low-quality captions in the training process. The second row without pseudo-3D layer can generate a car, but due to the lack of temporal modeling, the results exhibit flickering. The third row without latitude-aware loss can produce relatively good results, but it still falls slightly short in terms of clarity, field of view, and other aspects compared to the last row with the complete 360DVD. + +# 4.5. User Study + +31 participants were surveyed to evaluate the graphics quality, cross-frame consistency, left-right continuity, content distribution, and motion patterns of 8 sets of generated + +![](images/a24ad557b7827453850145863261f8d88df87d01a8dca7dd403fb8f9d7a38cbe.jpg) +Figure 8. Ablation studies on 360 Text Fusion (360TF), pseudo-3D layer in 360-Adapter (Pseudo-3D), and latitude-aware loss (Lat. Loss). + +results. For each criterion, they selected the video they deemed most fitting for the theme of high-quality 360-degree panoramic videos. The data presented in Table 1 indicates that our model outperforms the other three methods significantly across all five dimensions. Simultaneously, our proposed 360ET can remarkably improve video quality, and left-right continuity, solely based on the nativeAnimateDiff and panoramic LoRA. + +# 5. Conclusion + +In this paper, we introduce 360DVD, a pipeline for controllable $360^{\circ}$ panorama video generation. Our framework leverages text prompts and motion guidance to animate personalized T2I models. Utilizing the proposed WEB360 dataset, 360-Adapter, and 360 Enhancement Techniques, our framework can generate videos that adhere to the content distribution and motion patterns in real captured panoramic videos. Extensive experiments demonstrate our effectiveness in creating high-quality panorama videos with various prompts and styles. We believe that our framework provides a simple but effective solution for panoramic video generation, and leads to inspiration for possible future works. + +# References + +[1] Hao Ai, Zidong Cao, Jinjing Zhu, Haotian Bai, Yucheng Chen, and Lin Wang. Deep learning for omnidirectional vision: A survey and new perspectives. arXiv preprint arXiv:2205.10468, 2022. 1 +[2] Naofumi Akimoto, Yuhi Matsuo, and Yoshimitsu Aoki. *Diverse plausible 360-degree image outpainting for efficient 3dgc background creation.* In *Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition*, pages 11441–11450, 2022. 3 +[3] Jie An, Songyang Zhang, Harry Yang, Sonal Gupta, Jia-Bin Huang, Jiebo Luo, and Xi Yin. Latent-shift: Latent diffusion with temporal shift for efficient text-to-video generation. arXiv preprint arXiv:2304.08477, 2023. 1, 3 +[4] Mingdeng Cao, Chong Mou, Fanghua Yu, Xintao Wang, Yinqiang Zheng, Jian Zhang, Chao Dong, Gen Li, Ying Shan, Radu Timofte, Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Xuhan Sheng, Bin Chen, Haoyu Ma, Ming Cheng, Shijie Zhao, Wanwan Cui, Tianyu Xu, Chunyang Li, Long Bao, Heng Sun, Huaibo Huang, Xiaoqiang Zhou, Yang Ai, Ran He, Renlong Wu, Yi Yang, Zhilu Zhang, Shuo-hao Zhang, Junyi Li, Yunjin Chen, Dongwei Ren, Wang-meng Zuo, Qian Wang, Hao-Hsiang Yang, Yi-Chung Chen, Zhi-Kai Huang, Wei-Ting Chen, Yuan-Chun Chiang, Hua-En Chang, I-Hsiang Chen, Chia-Hsuan Hsieh, Sy-Yen Kuo, Zebin Zhang, Jiaqi Zhang, Yuhui Wang, Shuhao Cui, Junshi Huang, Li Zhu, Shuman Tian, Wei Yu, and Bingchun Luo. Ntire 2023 challenge on 360deg omnidirectional image and video super-resolution: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 1731-1745, 2023. 3, 4 +[5] Ricky TQ Chen, Jens Behrmann, David K Duvenaud, and Jorn-Henrik Jacobsen. Residual flows for invertible generative modeling. Advances in Neural Information Processing Systems, 32, 2019. 2 +[6] Tsai-Shien Chen, Chieh Hubert Lin, Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang. Motion-conditioned diffusion model for controllable video synthesis. arXiv preprint arXiv:2304.14404, 2023. 3 +[7] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG), 41(6):1-16, 2022. 3 +[8] Ming Cheng, Haoyu Ma, Qiufang Ma, Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Xuhan Sheng, Shijie Zhao, Junlin Li, and Li Zhang. Hybrid transformer and cnn attention network for stereo image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1702-1711, 2023. 5 +[9] Xinhua Cheng, Nan Zhang, Jiwen Yu, Yinhuai Wang, Ge Li, and Jian Zhang. Null-space diffusion sampling for zero-shot point cloud completion. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence (IJ-CAI), 2023. 2 +[10] Yen-Chi Cheng, Chieh Hubert Lin, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, and Ming-Hsuan Yang. Inout: Diverse image outpainting via gan inversion. In Proceedings of + +the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11431-11440, 2022. 3 +[11] Antonia Creswell, Tom White, Vincent Dumoulin, Kai Arulkumaran, Biswa Sengupta, and Anil A Bharath. Generative adversarial networks: An overview. IEEE signal processing magazine, 35(1):53-65, 2018. 2 +[12] Mohammad Reza Karimi Dastjerdi, Yannick Hold-Geoffroy, Jonathan Eisenmann, Siavash Khodadadeh, and Jean-François Lalonde. Guided co-modulated gan for $360^{\circ}$ field of view extrapolation. In 2022 International Conference on 3D Vision (3DV), pages 475–485. IEEE, 2022. 3 +[13] Patrick Esser, Johnathan Chiu, Parmida Atighechian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7346-7356, 2023. 3 +[14] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022.3 +[15] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 3 +[16] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 1, 3 +[17] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2, 3 +[18] Yaosi Hu, Zhenzhong Chen, and Chong Luo. Lamd: Latent motion diffusion for video generation. arXiv preprint arXiv:2304.11603, 2023. 3 +[19] Xuhui Jia, Yang Zhao, Kelvin CK Chan, Yandong Li, Han Zhang, Boqing Gong, Tingbo Hou, Huisheng Wang, and Yu-Chuan Su. Taming encoder for zero fine-tuning image customization with text-to-image diffusion models. arXiv preprint arXiv:2304.02642, 2023. 3 +[20] Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 2 +[21] Jialu Li and Mohit Bansal. Panogen: Text-conditioned panoramic environment generation for vision-and-language navigation. Advances in Neural Information Processing Systems, 36, 2024. 3 +[22] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International Conference on Machine Learning, pages 12888-12900. PMLR, 2022. 4 +[23] Chieh Hubert Lin, Chia-Che Chang, Yu-Sheng Chen, Da-Cheng Juan, Wei Wei, and Hwann-Tzong Chen. Coco-gan: Generation by parts via conditional coordinating. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4512-4521, 2019. 3 +[24] Chieh Hubert Lin, Hsin-Ying Lee, Yen-Chi Cheng, Sergey Tulyakov, and Ming-Hsuan Yang. Infinitygan: To + +wards infinite-pixel image synthesis. arXiv preprint arXiv:2104.03963, 2021. 3 +[25] Chong Mou, Xintao Wang, Jiechong Song, Ying Shan, and Jian Zhang. Dragondiffusion: Enabling drag-style manipulation on diffusion models. In The Twelfth International Conference on Learning Representations, 2024. 3 +[26] Chong Mou, Xintao Wang, Liangbin Xie, Yanze Wu, Jian Zhang, Zhongang Qi, and Ying Shan. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4296–4304, 2024. 1, 3 +[27] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 1, 3 +[28] Changgyoon Oh, Wonjune Cho, Yujeong Chae, Daehee Park, Lin Wang, and Kuk-Jin Yoon. Bips: Bi-modal indoor panorama synthesis via residual depth-aided adversarial learning. In European Conference on Computer Vision, pages 352–371. Springer, 2022. 3 +[29] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing attentions for zero-shot text-based video editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15932-15942, 2023. 3 +[30] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 4 +[31] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 1, 3 +[32] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Bjorn Ommer. High-resolution image synthesis with latent diffusion models. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 3 +[33] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional Networks for Biomedical Image Segmentation, page 234–241. 2015. 4 +[34] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. 3 +[35] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 1, 3 +[36] Wenzhe Shi, Jose Caballero, Ferenc Huszar, Johannes Totz, Andrew P. Aitken, Rob Bishop, Daniel Rueckert, and Zehan + +Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 5 +[37] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022. 1, 3 +[38] Kihyuk Sohn, Honglak Lee, and Xinchen Yan. Learning structured output representation using deep conditional generative models. Advances in neural information processing systems, 28, 2015. 2 +[39] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502, 2020. 2, 5 +[40] Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Qiufang Ma, Xuhan Sheng, Ming Cheng, Haoyu Ma, Shijie Zhao, Jian Zhang, Junlin Li, et al. Opdn: Omnidirectional position-aware deformable network for omnidirectional image superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1293-1301, 2023. 3, 5 +[41] Piotr Teterwak, Aaron Sarna, Dilip Krishnan, Aaron Maschinot, David Belanger, Ce Liu, and William T Freeman. Boundless: Generative adversarial networks for image extension. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10521-10530, 2019. 3 +[42] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 3 +[43] Guangcong Wang, Yinuo Yang, Chen Change Loy, and Zwei Liu. Stylelight: HDR panorama generation for lighting estimation and editing. In European Conference on Computer Vision, pages 477-492. Springer, 2022. 3 +[44] Hai Wang, Xiaoyu Xiang, Yuchen Fan, and Jing-Hao Xue. Customizing 360-degree panoramas through text-to-image diffusion models. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 4933-4943, 2024. 3 +[45] Jionghao Wang, Ziyu Chen, Jun Ling, Rong Xie, and Li Song. 360-degree panorama generation from few unregistered nfov images. arXiv preprint arXiv:2308.14686, 2023. 3, 6 +[46] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 1, 3 +[47] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems, 36, 2024. 3 +[48] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning + +of image diffusion models for text-to-video generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7623-7633, 2023. 3 +[49] Ruiqi Wu, Liangyu Chen, Tong Yang, Chunle Guo, Chongyi Li, and Xiangyu Zhang. Lamp: Learn a motion pattern for few-shot-based video generation. arXiv preprint arXiv:2310.10769, 2023. 3 +[50] Songsong Wu, Hao Tang, Xiao-Yuan Jing, Haifeng Zhao, Jianjun Qian, Nicu Sebe, and Yan Yan. Cross-view panorama image synthesis. IEEE Transactions on Multimedia, 2022. 3 +[51] Tianhao Wu, Chuanxia Zheng, and Tat-Jen Cham. IPO-ldm: Depth-aided 360-degree indoor rgb panorama outpainting via latent diffusion model. arXiv preprint arXiv:2307.03177, 2023. 3 +[52] Jinbo Xing, Menghan Xia, Yuxin Liu, Yuechen Zhang, Y He, H Liu, H Chen, X Cun, X Wang, Y Shan, et al. Makeyour-video: Customized video generation using textual and structural guidance. IEEE Transactions on Visualization and Computer Graphics, 2024. 1 +[53] Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, and Fang Wen. Paint by example: Exemplar-based image editing with diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18381-18391, 2023. 3 +[54] Shuai Yang, Yifan Zhou, Ziwei Liu, and Chen Change Loy. Rerender a video: Zero-shot text-guided video-to-video translation. In SIGGRAPH Asia 2023 Conference Papers, pages 1-11, 2023. 3 +[55] Jiwen Yu, Yinhuai Wang, Chen Zhao, Bernard Ghanem, and Jian Zhang. Freedom: Training-free energy-guided conditional diffusion model. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23174-23184, 2023. 3 +[56] Jiwen Yu, Xuanyu Zhang, Youmin Xu, and Jian Zhang. CRoSS: Diffusion model makes controllable, robust and secure image steganography. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 3 +[57] Han Zhang, Tao Xu, Hongsheng Li, Shaoting Zhang, Xiaogang Wang, Xiaolei Huang, and Dimitris N Metaxas. Stackgan: Text to photo-realistic image synthesis with stacked generative adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 5907-5915, 2017. 2 +[58] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 3 +[59] Qinsheng Zhang, Jiaming Song, Xun Huang, Yongxin Chen, and Ming-Yu Liu. Diffcollage: Parallel generation of large content with diffusion models. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10188-10198. IEEE, 2023. 3 +[60] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 1, 3 \ No newline at end of file diff --git a/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/images.zip b/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..921b1717784208f99d8cb376608be104156b70c9 --- /dev/null +++ b/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b58b30f370c31a751fee0b7b81dec9fd7874ff9b5289a520d3aeececb56191f0 +size 838967 diff --git a/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/layout.json b/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..92cf9c672667d28e9c28eec75322986f5d6cfc15 --- /dev/null +++ b/2024/360DVD_ Controllable Panorama Video Generation with 360-Degree Video Diffusion Model/layout.json @@ -0,0 +1,8967 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 121, + 103, + 473, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 103, + 473, + 138 + ], + "spans": [ + { + "bbox": [ + 121, + 103, + 473, + 138 + ], + "type": "text", + "content": "360DVD: Controllable Panorama Video Generation with 360-Degree Video Diffusion Model" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "spans": [ + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "text", + "content": "Qian Wang" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "text", + "content": ", Weiqi Li" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "text", + "content": ", Chong Mou" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "text", + "content": ", Xinhua Cheng" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "text", + "content": ", Jian Zhang" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "text", + "content": "School of Electronic and Computer Engineering, Peking University \n" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 77, + 160, + 516, + 217 + ], + "type": "text", + "content": "Peking University Shenzhen Graduate School-Rabbitpre AIGC Joint Research Laboratory \n{qianwang, liweiqi, eechongm, chengxinhua}@stu.pku.edu.cn, zhangjian.sz@pku.edu.cn" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "spans": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 270, + 289, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 270, + 289, + 547 + ], + "spans": [ + { + "bbox": [ + 47, + 270, + 289, + 547 + ], + "type": "text", + "content": "Panorama video recently attracts more interest in both study and application, courtesy of its immersive experience. Due to the expensive cost of capturing " + }, + { + "bbox": [ + 47, + 270, + 289, + 547 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 270, + 289, + 547 + ], + "type": "text", + "content": " panoramic videos, generating desirable panorama videos by prompts is urgently required. Lately, the emerging text-to-video (T2V) diffusion methods demonstrate notable effectiveness in standard video generation. However, due to the significant gap in content and motion patterns between panoramic and standard videos, these methods encounter challenges in yielding satisfactory " + }, + { + "bbox": [ + 47, + 270, + 289, + 547 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 270, + 289, + 547 + ], + "type": "text", + "content": " panoramic videos. In this paper, we propose a pipeline named 360-Degree Video Diffusion model (360DVD) for generating " + }, + { + "bbox": [ + 47, + 270, + 289, + 547 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 270, + 289, + 547 + ], + "type": "text", + "content": " panoramic videos based on the given prompts and motion conditions. Specifically, we introduce a lightweight 360-Adapter accompanied by 360 Enhancement Techniques to transform pre-trained T2V models for panorama video generation. We further propose a new panorama dataset named WEB360 consisting of panoramic video-text pairs for training 360DVD, addressing the absence of captioned panoramic video datasets. Extensive experiments demonstrate the superiority and effectiveness of 360DVD for panorama video generation. Our project page is at https://akaneqwq.github.io/360DVD/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 568, + 128, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 568, + 128, + 582 + ], + "spans": [ + { + "bbox": [ + 47, + 568, + 128, + 582 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 589, + 287, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 589, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 46, + 589, + 287, + 685 + ], + "type": "text", + "content": "With the recent advancements in VR technology, 360-degree panoramic videos have been gaining increasing popularity. This video format which offers audiences an immersive experience, is helpful for various applications, including entertainment, education, and communication. To capture details of the entire scene, " + }, + { + "bbox": [ + 46, + 589, + 287, + 685 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 589, + 287, + 685 + ], + "type": "text", + "content": " videos are typically recorded using an array of high-resolution fisheye cameras that yields a " + }, + { + "bbox": [ + 46, + 589, + 287, + 685 + ], + "type": "inline_equation", + "content": "360^{\\circ} \\times 180^{\\circ}" + }, + { + "bbox": [ + 46, + 589, + 287, + 685 + ], + "type": "text", + "content": " field-of-view (FoV) [1], which" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 693, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 693, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 693, + 288, + 715 + ], + "type": "text", + "content": "This work was supported by National Natural Science Foundation of China under Grant 62372016. (Corresponding author: Jian Zhang)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 246, + 547, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 246, + 547, + 293 + ], + "spans": [ + { + "bbox": [ + 304, + 246, + 547, + 293 + ], + "type": "text", + "content": "is quite costly in both time and resources. Therefore, the generation of " + }, + { + "bbox": [ + 304, + 246, + 547, + 293 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 246, + 547, + 293 + ], + "type": "text", + "content": " panoramic videos is urgently required for border applications, while panoramic video generation receives little attention in studies to date." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 294, + 547, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 294, + 547, + 461 + ], + "spans": [ + { + "bbox": [ + 304, + 294, + 547, + 461 + ], + "type": "text", + "content": "Thanks to the emerging theory and training strategies, text-to-image (T2I) diffusion models [26, 27, 31, 32, 35] demonstrate remarkable image generation capacity from prompts given by users, and such impressive achievement in image generation is further extended to text-to-video (T2V) generation. Various T2V diffusion models [3, 16, 37, 46, 52, 60] are recently proposed with adopting space-time separable architectures, wherein spatial operations are inherited from the pre-trained T2I models to reduce the complexity of constructing space-time models from scratch. Among these,AnimateDiff [16] enables the capability to generate animated images for various personalized T2I models, which alleviates the requirement for model-specific tuning and achieves compelling content consistency over time." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 462, + 548, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 462, + 548, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 462, + 548, + 700 + ], + "type": "text", + "content": "Although T2V methods on standard videos are widely studied, there is no method proposed for panorama video generation. One potential approach is to leverage existing powerful T2V models, e.g.,AnimateDiff to directly generate the equirectangular projection (ERP) of panoramic videos. Since ERP is a commonly adopted format for storing and transmitting panoramic videos, each frame is treated by ERP as a rectangular image with an aspect ratio of 1:2, which aligns well with the output format of existing standard T2V models. However, due to the significant differences between panoramic videos and standard videos, existing methods suffer challenges in directly producing satisfactory " + }, + { + "bbox": [ + 304, + 462, + 548, + 700 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 462, + 548, + 700 + ], + "type": "text", + "content": " panoramic videos. Concretely, the main challenges include three aspects: (1) The content distribution of ERPs differs from standard videos. ERPs require a wider FoV, reaching " + }, + { + "bbox": [ + 304, + 462, + 548, + 700 + ], + "type": "inline_equation", + "content": "360^{\\circ} \\times 180^{\\circ}" + }, + { + "bbox": [ + 304, + 462, + 548, + 700 + ], + "type": "text", + "content": ". (2) The motion patterns of ERPs are different from standard videos, with movements often following curves rather than straight lines. (3) The left and right ends of ERPs should exhibit continuity since they correspond to the same meridian on the Earth." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 318, + 701, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 701, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 318, + 701, + 547, + 714 + ], + "type": "text", + "content": "Therefore, we propose a specifically designed method" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "text", + "content": "6913" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 70, + 294, + 343 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 294, + 343 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 294, + 343 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 294, + 343 + ], + "type": "image", + "image_path": "8e32efc6bee76ad52d5a48a7b9d8d07d1b7a95fd16b08df5393246543e02fcfa.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 353, + 546, + 376 + ], + "lines": [ + { + "bbox": [ + 46, + 353, + 546, + 376 + ], + "spans": [ + { + "bbox": [ + 46, + 353, + 546, + 376 + ], + "type": "text", + "content": "Figure 1. Main results. Our 360DVD creates text-aligned, coherent, and high-quality " + }, + { + "bbox": [ + 46, + 353, + 546, + 376 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 353, + 546, + 376 + ], + "type": "text", + "content": " panorama videos. Furthermore, 360DVD can cooperate with multiple personalized text-to-image models and consistently generate stylized panorama videos." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 299, + 71, + 536, + 344 + ], + "blocks": [ + { + "bbox": [ + 299, + 71, + 536, + 344 + ], + "lines": [ + { + "bbox": [ + 299, + 71, + 536, + 344 + ], + "spans": [ + { + "bbox": [ + 299, + 71, + 536, + 344 + ], + "type": "image", + "image_path": "95f33fe15aab58457293c435e6c1db7089b31e786666d0b74f687c56cb4bfe60.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 396, + 289, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 396, + 289, + 601 + ], + "spans": [ + { + "bbox": [ + 46, + 396, + 289, + 601 + ], + "type": "text", + "content": "named 360-Degree Video Diffusion (360DVD) for generating panorama videos. We first introduce a plug-and-play module named 360-Adapter to address challenge mentioned above. Our 360-Adapter receives zero values or motion conditions (e.g., optical flow) as input and outputs motion features, which are fed into the frozen denoising U-Net at different levels of the encoder. This transformation is aimed at converting the T2V model into a panoramic video generation without altering the foundational generative capabilities. In addition, we introduce 360 Enhancement Techniques including two mechanisms to enhance continuity at both ends of ERPs from both macro and micro perspectives, and a latitude-aware loss function for encouraging the model to focus more on low-latitude regions. Cooperated with carefully designed techniques, our 360DVD generates text-aligned, coherent, high-quality, " + }, + { + "bbox": [ + 46, + 396, + 289, + 601 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 396, + 289, + 601 + ], + "type": "text", + "content": " panorama videos with various styles, as shown in Fig. 1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 605, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 289, + 713 + ], + "type": "text", + "content": "Furthermore, we collect a panorama dataset named WEB360 including ERP-formatted videos from the internet and games for training our method. WEB360 involves approximately 2,000 video clips with each clip consisting of 100 frames. Considering the domain gap between panoramic and standard images, to enhance the accuracy and granularity of captions, we introduce a GPT-based 360 Text Fusion module for obtaining detailed captions. Our contributions can be summarized as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 396, + 545, + 575 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 306, + 396, + 545, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 396, + 545, + 456 + ], + "spans": [ + { + "bbox": [ + 306, + 396, + 545, + 456 + ], + "type": "text", + "content": "- We introduce a controllable " + }, + { + "bbox": [ + 306, + 396, + 545, + 456 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 306, + 396, + 545, + 456 + ], + "type": "text", + "content": " panorama video generation diffusion model named 360DVD, achieved by adopting a controllable standard T2V model with a trainable lightweight 360-Adapter. Our model can generate text-guided panorama videos conditioned on desired motions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 456, + 545, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 456, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 306, + 456, + 545, + 491 + ], + "type": "text", + "content": "- We design 360 Enhancement Techniques including a latitude-aware loss and two mechanisms to enhance the content and motion quality of generated panorama videos." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 492, + 545, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 492, + 545, + 539 + ], + "spans": [ + { + "bbox": [ + 306, + 492, + 545, + 539 + ], + "type": "text", + "content": "- We propose a new high-quality dataset named WEB360 comprising approximately 2,000 panoramic videos, with each video accompanied by a detailed caption enhanced through 360 Text Fusion." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 540, + 545, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 540, + 545, + 575 + ], + "spans": [ + { + "bbox": [ + 306, + 540, + 545, + 575 + ], + "type": "text", + "content": "- Experiments demonstrate that our 360DVD is capable of generating high-quality, high-diversity, and more consistent " + }, + { + "bbox": [ + 306, + 540, + 545, + 575 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 306, + 540, + 545, + 575 + ], + "type": "text", + "content": " panorama videos." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 590, + 398, + 602 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 590, + 398, + 602 + ], + "spans": [ + { + "bbox": [ + 306, + 590, + 398, + 602 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 610, + 473, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 610, + 473, + 623 + ], + "spans": [ + { + "bbox": [ + 306, + 610, + 473, + 623 + ], + "type": "text", + "content": "2.1. Text-to-Image Diffusion Model" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 629, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 545, + 715 + ], + "type": "text", + "content": "The Denoising Diffusion Probabilistic Model [9, 17, 39] has proven to be highly successful in generating high-quality images, outperforming previous approaches such as generative adversarial networks (GANs)[11, 57], variational autoencoders (VAEs)[20, 38], and flow-based methods [5]. With text guidance during training, users can generate images based on textual input. Noteworthy examples include" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6914" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "content": "GLIDE [27], DALLE-2 [31], Imagen [35]. To address the computational burden of the iterative denoising process, LDM [32] conducts the diffusion process on a compressed latent space rather than the original pixel space. This accomplishment has prompted further exploration in extending customization [14, 34], image guidance [53, 55], precise control [25, 26, 58] and protection [56]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 164, + 212, + 176 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 164, + 212, + 176 + ], + "spans": [ + { + "bbox": [ + 47, + 164, + 212, + 176 + ], + "type": "text", + "content": "2.2. Text-to-Video Diffusion Model" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 183, + 289, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 183, + 289, + 518 + ], + "spans": [ + { + "bbox": [ + 46, + 183, + 289, + 518 + ], + "type": "text", + "content": "Despite significant advancements in Text-to-Image (T2I) generation, Text-to-Video (T2V) generation faces challenges, including the absence of large-scale, high-quality paired text-video datasets, the inherent complexity in modeling temporal consistency, and the resource-intensive nature of training. To address these challenges, many works leverage the knowledge from pre-trained T2I models, and they manage training costs by executing the diffusion process in the latent space. Some methods [15, 29, 48, 49, 54] utilize T2I models in zero-shot or few-shot ways. However, these methods often suffer from suboptimal frame consistency due to insufficient training. To address this limitation, another category of T2V diffusion models typically adopts space-time separable architectures. These models [3, 37, 46, 60] inherit spatial operations from pre-trained T2I models, reducing the complexity of constructing space-time models from scratch. Given that most personalized T2I models are derived from the same base one (e.g. Stable Diffusion [32]),AnimateDiff [16] designs a motion modeling module that trained with a base T2I model and could animate most of derived personalized T2I models once for all. There are also efforts focused on enhancing control in T2V models. Gen-1 [13], MCDiff [6], LaMD [18] and VideoComposer [47] introduce diverse conditions to T2V models. Despite these advancements, the aforementioned methods demand extensive training and lack a plug-and-play nature, making it challenging to apply them to a diverse range of personalized T2I models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 527, + 174, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 174, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 174, + 538 + ], + "type": "text", + "content": "2.3. Panorama Generation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "content": "GAN-based methods for generating panoramic images have been widely studied [2, 4, 7, 10, 12, 23, 24, 28, 40, 41, 43, 50]. For instance, OmniDreamer [2] accepts a single NFoV image as an input condition and introduces a cyclic inference scheme to meet the inherent horizontal cyclicity of 360-degree images. ImmenseGAN [12] fine-tunes the generative model using a large-scale private text-image pair dataset, making the generation more controllable. Text2Light [7] introduces a zero-shot text-guided 360-image synthesis pipeline by utilizing the CLIP model. Very recently, diffusion models have achieved promising results in panoramic image generation. DiffCollage [59] uses semantic maps as conditions and generates images based on complex factor graphs using retrained diffusion mod" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "text", + "content": "els. PanoGen [21] employs a latent diffusion model and synthesizes new indoor panoramic images through recursive image drawing techniques based on multiple text descriptions. PanoDiff [45] achieves a multi-NFoV synthesis of panoramic images through a two-stage pose estimation module. IPO-LDM [51] uses a dual-modal diffusion structure of RGB-D to better learn the spatial distribution and patterns of panoramic images. StitchDiffusion [44] employs a T2I diffusion model, ensuring continuity at both ends through stitching. However, to date, panoramic video generation has received limited attention. To the best of our knowledge, we are the first to leverage diffusion models for panoramic video generation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 238, + 362, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 238, + 362, + 251 + ], + "spans": [ + { + "bbox": [ + 306, + 238, + 362, + 251 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 258, + 547, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 258, + 547, + 342 + ], + "spans": [ + { + "bbox": [ + 304, + 258, + 547, + 342 + ], + "type": "text", + "content": "In this section, we begin with a concise review of the latent diffusion fusion model andAnimateDiff [16]. Following that, we introduce the construction method of the WEB360 dataset. We then provide an overview of 360DVD and elaborate on the implementation details of 360-Adapter. Finally, we describe the 360 enhancement techniques aimed at enriching the panoramic nature of the video." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 349, + 392, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 349, + 392, + 361 + ], + "spans": [ + { + "bbox": [ + 306, + 349, + 392, + 361 + ], + "type": "text", + "content": "3.1. Preliminaries" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 368, + 545, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 368, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 305, + 368, + 545, + 392 + ], + "type": "text", + "content": "Latent Diffusion Model. Given an input signal " + }, + { + "bbox": [ + 305, + 368, + 545, + 392 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0" + }, + { + "bbox": [ + 305, + 368, + 545, + 392 + ], + "type": "text", + "content": ", a diffusion forward process in DDPM [17] is defined as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 337, + 401, + 545, + 416 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 401, + 545, + 416 + ], + "spans": [ + { + "bbox": [ + 337, + 401, + 545, + 416 + ], + "type": "interline_equation", + "content": "p _ {\\theta} \\left(\\mathbf {x} _ {t} \\mid \\mathbf {x} _ {t - 1}\\right) = \\mathcal {N} \\left(\\mathbf {x} _ {t}; \\sqrt {1 - \\beta_ {t}} \\mathbf {x} _ {t - 1}, \\beta_ {t} \\mathbf {I}\\right), \\tag {1}", + "image_path": "fefd175b1ba76a6a0e0eba2a3e0727b31aa774f1401f1d4474b9c376afe34724.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "spans": [ + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "text", + "content": "for " + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "inline_equation", + "content": "t = 1,\\ldots ,T" + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "text", + "content": " is the total timestep of the diffusion process. A noise depending on the variance " + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "inline_equation", + "content": "\\beta_{t}" + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "text", + "content": " is gradually added to " + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_{t - 1}" + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "text", + "content": " to obtain " + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "text", + "content": " at the next timestep and finally reach " + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_T\\in \\mathcal{N}(0,\\mathbf{I})" + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "text", + "content": ". The goal of the diffusion model is to learn to reverse the diffusion process (denoising). Given a random noise " + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "text", + "content": ", the model predicts the added noise at the next timestep " + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_{t - 1}" + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "text", + "content": " until the origin signal " + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0" + }, + { + "bbox": [ + 304, + 423, + 547, + 509 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 330, + 517, + 545, + 531 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 517, + 545, + 531 + ], + "spans": [ + { + "bbox": [ + 330, + 517, + 545, + 531 + ], + "type": "interline_equation", + "content": "p _ {\\theta} (\\mathbf {x} _ {t - 1} | \\mathbf {x} _ {t}) = \\mathcal {N} (\\mathbf {x} _ {t - 1}; \\boldsymbol {\\mu} _ {\\theta} (\\mathbf {x} _ {t}, t), \\boldsymbol {\\Sigma} _ {\\theta} (\\mathbf {x} _ {t}, t)), \\tag {2}", + "image_path": "9f4d2f0f5f2355d41a4ef5830eeba65a7c9325e9fb7ee3e1081c4af85a320f3d.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "spans": [ + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "text", + "content": "for " + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "inline_equation", + "content": "t = T,\\ldots ,1" + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "text", + "content": ". We fix the variance " + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "inline_equation", + "content": "\\Sigma_{\\theta}(\\mathbf{x}_t,t)" + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "text", + "content": " and utilize the diffusion model with parameter " + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "text", + "content": " to predict the mean of the inverse process " + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "inline_equation", + "content": "\\pmb{\\mu}_{\\theta}(\\mathbf{x}_t,t)" + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "text", + "content": ". The model can be simplified as denoising models " + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}(\\mathbf{x}_t,t)" + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "text", + "content": ", which are trained to predict the noise of " + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 304, + 538, + 547, + 598 + ], + "type": "text", + "content": " with a noise prediction loss:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 329, + 606, + 545, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 606, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 329, + 606, + 545, + 622 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathbb {E} _ {\\mathbf {x} _ {0}, \\mathbf {y}, \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\epsilon - \\epsilon_ {\\theta} (\\mathbf {x} _ {t}, t, \\boldsymbol {\\tau} _ {\\theta} (\\mathbf {y})) \\| _ {2} ^ {2} ], \\tag {3}", + "image_path": "25f6a02315e3846be7faeb7cc3f9a1fd65f0403f9e0f07d7f1db7d62746bd080.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 629, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 545, + 665 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 629, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 304, + 629, + 545, + 665 + ], + "type": "text", + "content": " is the added noise to the input image " + }, + { + "bbox": [ + 304, + 629, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0" + }, + { + "bbox": [ + 304, + 629, + 545, + 665 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 629, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 304, + 629, + 545, + 665 + ], + "type": "text", + "content": " is the corresponding textual description, " + }, + { + "bbox": [ + 304, + 629, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\tau_{\\theta}(\\cdot)" + }, + { + "bbox": [ + 304, + 629, + 545, + 665 + ], + "type": "text", + "content": " is a text encoder mapping the string to a sequence of vectors." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 665, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 665, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 665, + 547, + 713 + ], + "type": "text", + "content": "Latent Diffusion Model (LDM) [32] executes the denoising process in the latent space of an autoencoder, namely " + }, + { + "bbox": [ + 305, + 665, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{E}(\\cdot)" + }, + { + "bbox": [ + 305, + 665, + 547, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 665, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{D}(\\cdot)" + }, + { + "bbox": [ + 305, + 665, + 547, + 713 + ], + "type": "text", + "content": ", implemented as VQ-GAN [19] or VQ-VAE [42] pre-trained on large image datasets. During the" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6915" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "training of the latent diffusion networks, an input image " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " is initially mapped to the latent space by the frozen encoder, yielding " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_0 = \\mathcal{E}(\\mathbf{x}_0)" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": ". Thus, the training objective can be formulated as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 60, + 128, + 287, + 144 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 128, + 287, + 144 + ], + "spans": [ + { + "bbox": [ + 60, + 128, + 287, + 144 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathbb {E} _ {\\mathcal {E} (\\mathbf {x} _ {0}), \\mathbf {y}, \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\boldsymbol {\\epsilon} - \\boldsymbol {\\epsilon} _ {\\theta} (\\mathbf {z} _ {t}, t, \\boldsymbol {\\tau} _ {\\theta} (\\mathbf {y})) \\| _ {2} ^ {2} ]. \\tag {4}", + "image_path": "66e48130591889f5227ed2e8b0fe15015e772ec8fe4c755d25907adf8a0611c8.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 152, + 287, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 152, + 287, + 247 + ], + "spans": [ + { + "bbox": [ + 46, + 152, + 287, + 247 + ], + "type": "text", + "content": "In widely-used LDM Stable Diffusion (SD), which our method is based on, " + }, + { + "bbox": [ + 46, + 152, + 287, + 247 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}(\\cdot)" + }, + { + "bbox": [ + 46, + 152, + 287, + 247 + ], + "type": "text", + "content": " is implemented with a modified UNet [33] that incorporates four downsample/upsample blocks and one middle block, resulting in four resolution levels within the networks' latent space. Each resolution level integrates 2D convolution layers as well as self- and cross-attention mechanisms. Text model " + }, + { + "bbox": [ + 46, + 152, + 287, + 247 + ], + "type": "inline_equation", + "content": "\\tau_{\\theta}(\\cdot)" + }, + { + "bbox": [ + 46, + 152, + 287, + 247 + ], + "type": "text", + "content": " is implemented using the CLIP [30] ViT-L/14 text encoder." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 247, + 287, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 247, + 287, + 390 + ], + "spans": [ + { + "bbox": [ + 46, + 247, + 287, + 390 + ], + "type": "text", + "content": "AnimateDiff.AnimateDiff inflates base SD by adding temporal-aware structures and learning reasonable motion priors from large-scale video datasets. Since the original SD can only process 4D image data batches, while T2V task takes a 5D video tensor as input. It transforms each 2D convolution and attention layer in the original image model into spatial-only pseudo-3D layers. The motion module is inserted at every resolution level of the U-shaped diffusion network, using vanilla temporal transformers consisting of several self-attention blocks operating along the temporal axis. The training objective ofAnimateDiff can be written as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 399, + 287, + 416 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 399, + 287, + 416 + ], + "spans": [ + { + "bbox": [ + 52, + 399, + 287, + 416 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathbb {E} _ {\\mathcal {E} \\left(\\mathbf {x} _ {0} ^ {1: N}\\right), \\mathbf {y}, \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\boldsymbol {\\epsilon} - \\boldsymbol {\\epsilon} _ {\\theta} \\left(\\mathbf {z} _ {t} ^ {1: N}, t, \\boldsymbol {\\tau} _ {\\theta} (\\mathbf {y})\\right) \\| _ {2} ^ {2} ], \\tag {5}", + "image_path": "965d9ce37cca154fc2a4b53a11e18d84942bafe34c5c789b05b1fcf0d1834abe.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "spans": [ + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0^{1:N}" + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "text", + "content": " is the sampled video data, " + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_0^{1:N}" + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "text", + "content": " is the latent code which " + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0^{1:N}" + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "text", + "content": " are encoded into via the pre-trained autoencoder, " + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t^{1:N}" + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "text", + "content": " is the latent code obtained by perturbing the initial latent code " + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_0^{1:N}" + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "text", + "content": " with noise at timestep " + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 423, + 287, + 497 + ], + "type": "text", + "content": ". During training, the pre-trained weights of the base T2I model are frozen to keep its feature space unchanged." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 503, + 149, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 503, + 149, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 503, + 149, + 514 + ], + "type": "text", + "content": "3.2. WEB360 Dataset" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 522, + 287, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 287, + 594 + ], + "type": "text", + "content": "Diverse text-video pairs datasets are essential for training open-domain text-to-video generation models. However, existing " + }, + { + "bbox": [ + 46, + 522, + 287, + 594 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 522, + 287, + 594 + ], + "type": "text", + "content": " panorama video datasets lack corresponding textual annotations. Moreover, these datasets are often constrained either in scale or quality, thereby impeding the upper limit of high-quality video generation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "type": "text", + "content": "To address the aforementioned challenges and achieve high-quality 360 panorama video generation, we introduce a novel text-video dataset named WEB360. This dataset comprises 2114 text-video pairs sourced from open-domain content, presented in high-definition (720p) ERP format. Our dataset creation process involved extracting 210 high-resolution panoramic video clips from the ODV360 [4] training set. Additionally, we collected over 400 original videos from YouTube. Due to the complex scene transitions present in the original videos, which pose challenges" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 71, + 545, + 186 + ], + "blocks": [ + { + "bbox": [ + 310, + 71, + 545, + 186 + ], + "lines": [ + { + "bbox": [ + 310, + 71, + 545, + 186 + ], + "spans": [ + { + "bbox": [ + 310, + 71, + 545, + 186 + ], + "type": "image", + "image_path": "dfbc63c0901348edca027e0e3bea0da40cec19d80163ef05c694b8e4f9e76b1f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 195, + 547, + 240 + ], + "lines": [ + { + "bbox": [ + 304, + 195, + 547, + 240 + ], + "spans": [ + { + "bbox": [ + 304, + 195, + 547, + 240 + ], + "type": "text", + "content": "Figure 2. 360 Text Fusion. The captions of four images with a FoV of 90 are fed into ChatGPT to generate a new " + }, + { + "bbox": [ + 304, + 195, + 547, + 240 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 195, + 547, + 240 + ], + "type": "text", + "content": " summarization. Compared to the caption of ERP at the bottom right, 360 Text Fusion allows for more fine-grained captions." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 262, + 545, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 262, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 304, + 262, + 545, + 357 + ], + "type": "text", + "content": "for models in learning temporal correlations, we perform a manual screening process to split the original videos into 1904 single-scene video clips. We employ BLIP [22] to annotate the first frame of the 2104 video clips. However, we observed that direct application of BLIP to ERP images often resulted in bad captions. Therefore, we propose a panoramic image caption method named 360 Text Fusion, based on ChatGPT." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 359, + 546, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 359, + 546, + 575 + ], + "spans": [ + { + "bbox": [ + 304, + 359, + 546, + 575 + ], + "type": "text", + "content": "360 Text Fusion. We find that directly using BLIP [22] to label ERP has drawbacks. On one hand, errors may arise due to the distortion caused by the polarities, leading to misidentifications such as labeling \"person\" as \"dog\". On the other hand, the captions generated by BLIP lack granularity, making them insufficient for providing a detailed description of the current scene. Thus, we propose 360 Text Fusion (360TF) method, as shown in Fig. 2. To deal with the irregular distortion of ERP, we turn to less-distorted perspective images. We first project the original ERP image to four non-overlapping perspective images at 0 degrees longitude, with a FoV of 90. The four images are then fed into BLIP to be captioned. By pre-informing ChatGPT about the task and providing examples, these four captions are collectively input to ChatGPT, which then generates a summary of the scene as our final caption. In comparison to directly using BLIP to label the entire image, our 360TF demonstrates a significant advantage in granularity." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 585, + 487, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 585, + 487, + 597 + ], + "spans": [ + { + "bbox": [ + 306, + 585, + 487, + 597 + ], + "type": "text", + "content": "3.3. 360-degree Video Diffusion Model" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 604, + 545, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 604, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 604, + 545, + 700 + ], + "type": "text", + "content": "An overview of the 360-degree Video Diffusion Model (360 DVD) is presented in Fig. 3, which is composed of a pretrained denoising U-Net and 360-Adapter. The pre-trained denoising U-Net adopts a structure identical to that ofAnimateDiff. In every resolution level of the U-Net, the spatial layer unfolds pre-trained weights from SD, while the temporal layer incorporates the motion module ofAnimateDiff trained on a large-scale text-video dataset." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 700, + 544, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 700, + 544, + 715 + ], + "spans": [ + { + "bbox": [ + 317, + 700, + 544, + 715 + ], + "type": "text", + "content": "During the training process, we first sample a video " + }, + { + "bbox": [ + 317, + 700, + 544, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0^{1:N}" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6916" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 71, + 547, + 214 + ], + "blocks": [ + { + "bbox": [ + 53, + 71, + 547, + 214 + ], + "lines": [ + { + "bbox": [ + 53, + 71, + 547, + 214 + ], + "spans": [ + { + "bbox": [ + 53, + 71, + 547, + 214 + ], + "type": "image", + "image_path": "9fdb99ac86341b86153ac301dbe00976f41c74a1b06897891b29cd12c743a14b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 224, + 547, + 260 + ], + "lines": [ + { + "bbox": [ + 46, + 224, + 547, + 260 + ], + "spans": [ + { + "bbox": [ + 46, + 224, + 547, + 260 + ], + "type": "text", + "content": "Figure 3. Overview of 360DVD. 360DVD leverages a trainable 360-Adapter to extend standard T2V models to the panorama domain and is able to generate high-quality panorama videos with given prompts and optional motion conditions. In addition, 360 Enhancement Techniques are proposed for quality improvement in the panorama perspective." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "spans": [ + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": "from the dataset. The video is encoded into latent code " + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_0^{1:N}" + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": " through pre-trained VAE encoder " + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "inline_equation", + "content": "\\mathcal{E}(\\cdot)" + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": " and noised to " + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t^{1:N}" + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": ". Simultaneously, the corresponding text " + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": " for the video is encoded using the text encoder " + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "inline_equation", + "content": "\\pmb{\\tau}_{\\theta}(\\cdot)" + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": " of the CLIP. The video is also input into a motion estimation network to generate corresponding motion conditions " + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": ", which are then fed into the 360-Adapter " + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{360}(\\cdot)" + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": ". Finally, noised latent code " + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t^{1:N}" + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": ", timestep " + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": ", text embedding " + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "inline_equation", + "content": "\\pmb{\\tau}_{\\theta}(\\mathbf{y})" + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": ", and the feature maps " + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{360}" + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": " generated by 360-Adapter are collectively input into the U-Net " + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "inline_equation", + "content": "\\epsilon(\\cdot)" + }, + { + "bbox": [ + 46, + 279, + 289, + 448 + ], + "type": "text", + "content": " to predict the noise strength added to the latent code. As we aim to preserve the priors learned by SD andAnimateDiff on large datasets, we freeze their weights during the training process. If we use a simple L2 loss term, the training objective is given as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 450, + 287, + 473 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 450, + 287, + 473 + ], + "spans": [ + { + "bbox": [ + 55, + 450, + 287, + 473 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathbb {E} _ {\\mathcal {E} (\\mathbf {x} _ {0} ^ {1: N}), \\mathbf {y}, \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\boldsymbol {\\epsilon} - \\boldsymbol {\\epsilon} _ {\\theta} (\\mathbf {z} _ {t} ^ {1: N}, t, \\boldsymbol {\\tau} _ {\\theta} (\\mathbf {y}), \\mathbf {f} _ {3 6 0}) \\| _ {2} ^ {2} ]. \\tag {6}", + "image_path": "0cac1a053439644a5098168e5c99575f1b4d6178225763aafc23190b195bf17c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 475, + 287, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 475, + 287, + 557 + ], + "spans": [ + { + "bbox": [ + 46, + 475, + 287, + 557 + ], + "type": "text", + "content": "To ensure satisfactory generation of " + }, + { + "bbox": [ + 46, + 475, + 287, + 557 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 475, + 287, + 557 + ], + "type": "text", + "content": " panoramic videos without motion control input, we set the input of the 360-Adapter to zero with a probability " + }, + { + "bbox": [ + 46, + 475, + 287, + 557 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 46, + 475, + 287, + 557 + ], + "type": "text", + "content": " during training. This strategy aims to encourage the model to learn representations that are not solely reliant on motion conditions, enhancing its ability to generate compelling panoramic videos without explicit motion guidance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "type": "text", + "content": "In inference, users have the option to selectively provide text prompts and motion guidance to carry out denoising over a total of " + }, + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "type": "text", + "content": " steps. Here, we employ DDIM [39] to accelerate the sampling process. The estimated latent code " + }, + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{z}}_0^{1:N}" + }, + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "type": "text", + "content": " is then input into a pre-trained VAE decoder to decode the desired " + }, + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "type": "inline_equation", + "content": "360^\\circ" + }, + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "type": "text", + "content": " panoramic videos " + }, + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}_0^{1:N}" + }, + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "type": "text", + "content": ". Due to constraints such as resolution limitations imposed by existing SD and considerations regarding GPU memory usage, the experimental results presented in this paper showcase a resolution of " + }, + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "type": "inline_equation", + "content": "512 \\times 1024" + }, + { + "bbox": [ + 46, + 558, + 288, + 700 + ], + "type": "text", + "content": ". In practical applications, super-resolution methods [8, 40] can be employed to upscale the generated results to the desired size." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "type": "text", + "content": "360-Adapter. Our proposed 360-Adapter is simple and" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 332, + 279, + 522, + 473 + ], + "blocks": [ + { + "bbox": [ + 332, + 279, + 522, + 473 + ], + "lines": [ + { + "bbox": [ + 332, + 279, + 522, + 473 + ], + "spans": [ + { + "bbox": [ + 332, + 279, + 522, + 473 + ], + "type": "image", + "image_path": "bcff2f4fddc23bfaae1cc06eb9c8e3cceb676329129ea2ba8f60b036e2116d27.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 481, + 547, + 515 + ], + "lines": [ + { + "bbox": [ + 305, + 481, + 547, + 515 + ], + "spans": [ + { + "bbox": [ + 305, + 481, + 547, + 515 + ], + "type": "text", + "content": "Figure 4. Overview of 360-Adapter. 360-Adapter is a simple but effective module in which intermediate features are fed into the U-Net encoder blocks for modulation." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "content": "lightweight, as shown in Fig. 4. The original condition input has the same resolution as the video of " + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "inline_equation", + "content": "H \\times W" + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "content": ". Here, we utilize the pixel unshuffle [36] operation to downsample it to " + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "inline_equation", + "content": "H / 8 \\times W / 8" + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "content": ". Following that are four 360-Adapter blocks, we depict only one for simplification in Fig. 4. To maintain consistency with the U-Net architecture, the first three 360-Adapter blocks each include a downsampling block. In each 360-Adapter block, one 2D convolution layer and a residual block (RB) with pseudo-3D convolution layers are utilized to extract the condition feature " + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{360}^{k}" + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "content": ". Finally, multiscale condition features " + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{360} = \\{\\mathbf{f}_{360}^{1}, \\mathbf{f}_{360}^{2}, \\mathbf{f}_{360}^{3}, \\mathbf{f}_{360}^{4}\\}" + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "content": " are formed. Suppose the intermediate features in the U-Net encoder block is " + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{enc} = \\{\\mathbf{f}_{enc}^{1}, \\mathbf{f}_{enc}^{2}, \\mathbf{f}_{enc}^{3}, \\mathbf{f}_{enc}^{4}\\}" + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{360}" + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "content": " is then added with " + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{enc}" + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "content": " at each scale. In summary, the condition" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6917" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": "feature extraction and conditioning operation of the 360-Adapter can be defined as the following formulation:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 133, + 104, + 287, + 117 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 104, + 287, + 117 + ], + "spans": [ + { + "bbox": [ + 133, + 104, + 287, + 117 + ], + "type": "interline_equation", + "content": "\\mathbf {f} _ {3 6 0} = \\mathcal {F} _ {3 6 0} (\\mathbf {c}), \\tag {7}", + "image_path": "7060277ee6b51395040bf792e65a2c5138ca855fe9295a7dbecc1c30cb0ddcff.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 95, + 134, + 287, + 149 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 134, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 95, + 134, + 287, + 149 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {f}} _ {e n c} ^ {i} = \\mathbf {f} _ {e n c} ^ {i} + \\mathbf {f} _ {3 6 0} ^ {i}, i \\in \\{1, 2, 3, 4 \\}. \\tag {8}", + "image_path": "aba2161c020c289f55db0df8d871815eba34fdb121127a141310a4f8af9f7f58.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "spans": [ + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": "In the previous description, we omit some details. Our motion condition " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " is a 5D tensor, assuming its size is batch " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " channels " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " frames " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " height " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " width. We first reshape it into a 4D tensor of size (batch " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " frames) " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " channels " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " height " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " width to allow it to be fed into the 2D convolution layer and restore it to 5D to go through the RB with pseudo-3D convolution layers. Subsequently, in the RB, we employ a " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "1 \\times 3 \\times 3" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " pseudo-3D convolution to extract features in the spatial dimension, followed by a " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "3 \\times 1 \\times 1" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " pseudo-3D convolution to model information along the temporal dimension. The resulting features are reshaped back to (batch " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " frames) " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " channels " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " height " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " width to add the output of the skip connection. Finally, condition features are reshaped back into a 5D vector of size batch " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " channels " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " frames " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " height " + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 153, + 288, + 344 + ], + "type": "text", + "content": " width to align with the U-Net encoder intermediate features." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 351, + 209, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 351, + 209, + 363 + ], + "spans": [ + { + "bbox": [ + 47, + 351, + 209, + 363 + ], + "type": "text", + "content": "3.4. 360 Enhancement Techniques" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 369, + 287, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 369, + 287, + 489 + ], + "spans": [ + { + "bbox": [ + 46, + 369, + 287, + 489 + ], + "type": "text", + "content": "Latitude-aware Loss. When projecting panoramic videos into ERPs, meridians are mapped as vertically spaced lines with a constant interval, while parallels are mapped as horizontally spaced lines with a constant interval. This projection method establishes a straightforward mapping relationship, but it is neither equal-area nor conformal, introducing significant distortion, particularly in the polar regions. To make the denoiser pay more attention to low-latitude regions with less distortion, which is more crucial for human visual perception, we introduce a latitude-aware loss:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 72, + 495, + 287, + 511 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 495, + 287, + 511 + ], + "spans": [ + { + "bbox": [ + 72, + 495, + 287, + 511 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathbb {E} _ {\\mathcal {E} \\left(\\mathbf {x} _ {0} ^ {1: N}\\right), \\mathbf {y}, \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I}), t} [ \\| \\left| \\mathbf {W} \\odot (\\epsilon - \\hat {\\epsilon} _ {\\theta}) \\right| | _ {2} ^ {2} ], \\tag {9}", + "image_path": "cab48fd9bf02a355dc6c29084f56a362a0530d84a014200d610c16ad2934feca.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 517, + 287, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 517, + 287, + 543 + ], + "spans": [ + { + "bbox": [ + 47, + 517, + 287, + 543 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 517, + 287, + 543 + ], + "type": "inline_equation", + "content": "\\hat{\\epsilon}_{\\theta} = \\epsilon_{\\theta}(\\mathbf{z}_t^{1:N}, t, \\boldsymbol{\\tau}_{\\theta}(\\mathbf{y}), \\mathbf{f}_{360})" + }, + { + "bbox": [ + 47, + 517, + 287, + 543 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 517, + 287, + 543 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 47, + 517, + 287, + 543 + ], + "type": "text", + "content": " is a weight matrix used to perform element-wise product, defined as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 102, + 549, + 287, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 549, + 287, + 574 + ], + "spans": [ + { + "bbox": [ + 102, + 549, + 287, + 574 + ], + "type": "interline_equation", + "content": "\\mathbf {W} _ {i, j} = \\cos \\left(\\frac {2 i - H / 8 + 1}{H / 4} \\pi\\right), \\tag {10}", + "image_path": "ef6c3a217979601642e0e5c9f944b53e153f964cd36c1b513cdea360ee33d48d.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "spans": [ + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "inline_equation", + "content": "i \\in [0, H/8)" + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "inline_equation", + "content": "j \\in [0, W/8)" + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "inline_equation", + "content": "H/8" + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "inline_equation", + "content": "W/8" + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "text", + "content": " is the height and width of latent code " + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t^{1:N}" + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "text", + "content": ". The visualized result of " + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 47, + 581, + 287, + 630 + ], + "type": "text", + "content": " is shown in Fig. 5, where pixels in low and middle latitudes are given more weight during training." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "type": "text", + "content": "Latent Rotation Mechanism. Because ERPs can be considered as the unfolding of a spherical surface along a meridian, they are meant to be wraparound consistent, implying that their left and right sides are continuous. However, during the process of video generation, the left and right sides are physically separated. Inspired by PanoDiff [45], we employ a latent rotation mechanism to enhance" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 310, + 72, + 541, + 140 + ], + "blocks": [ + { + "bbox": [ + 310, + 72, + 541, + 140 + ], + "lines": [ + { + "bbox": [ + 310, + 72, + 541, + 140 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 541, + 140 + ], + "type": "image", + "image_path": "7859becf69cbcc15915e74e7e242374adb457af96d8f73541d153cabbf97abf5.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 148, + 545, + 204 + ], + "lines": [ + { + "bbox": [ + 304, + 148, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 304, + 148, + 545, + 204 + ], + "type": "text", + "content": "Figure 5. Left: the visualization of weight matrix " + }, + { + "bbox": [ + 304, + 148, + 545, + 204 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 304, + 148, + 545, + 204 + ], + "type": "text", + "content": ", brighter colors indicate values closer to 1, while darker colors suggest values closer to 0. Right: a schematic diagram of the latent rotation mechanism. In each iteration, the far left portion of angle " + }, + { + "bbox": [ + 304, + 148, + 545, + 204 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 148, + 545, + 204 + ], + "type": "text", + "content": " is shifted to the far right." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "spans": [ + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "type": "text", + "content": "the macroscopic coherence between the left and right ends of the video. During the inference process, we perform a horizontal rotation at an angle of " + }, + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t^{1:N}" + }, + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "type": "text", + "content": " and motion condition " + }, + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "type": "text", + "content": ", at each denoising step. As illustrated in Fig. 5, the content on the far left is shifted to the far right, where we use " + }, + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0^1" + }, + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "type": "text", + "content": " to replace " + }, + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t^{1:N}" + }, + { + "bbox": [ + 304, + 229, + 545, + 337 + ], + "type": "text", + "content": " for a better visual effect of its continuity. During the training process, we also randomly rotate the training videos along with the motion condition by a random angle as a data augmentation strategy." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 339, + 546, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 339, + 546, + 472 + ], + "spans": [ + { + "bbox": [ + 304, + 339, + 546, + 472 + ], + "type": "text", + "content": "Circular Padding Mechanism. Although the previous latent rotation mechanism achieves semantic continuity at a macroscopic level, achieving pixel-level continuity is challenging. Therefore, in the inference process, we adopt a mechanism of circular padding by modifying the padding method of the convolution layers. We observe that the early stages of " + }, + { + "bbox": [ + 304, + 339, + 546, + 472 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 339, + 546, + 472 + ], + "type": "text", + "content": " video generation often involve layout modeling, while the later stages focus on detail completion. To maintain the stable video generation quality of 360DVD, we only implement the circular padding mechanism in the late " + }, + { + "bbox": [ + 304, + 339, + 546, + 472 + ], + "type": "inline_equation", + "content": "\\left\\lfloor \\frac{T}{2} \\right\\rfloor" + }, + { + "bbox": [ + 304, + 339, + 546, + 472 + ], + "type": "text", + "content": " steps of a total of " + }, + { + "bbox": [ + 304, + 339, + 546, + 472 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 339, + 546, + 472 + ], + "type": "text", + "content": " denoising steps." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 489, + 382, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 489, + 382, + 502 + ], + "spans": [ + { + "bbox": [ + 306, + 489, + 382, + 502 + ], + "type": "text", + "content": "4. Experiment" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 510, + 439, + 523 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 510, + 439, + 523 + ], + "spans": [ + { + "bbox": [ + 306, + 510, + 439, + 523 + ], + "type": "text", + "content": "4.1. Implementation Details" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "text", + "content": "Training Settings. We choose Stable Diffusion v1.5 and Motion Module v14 as our base model. We utilize the panoramic optical flow estimator PanoFlow [45] to generate motion conditions. We train the 360-Adapter using the proposed WEB360 dataset. The resolution is set to " + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "inline_equation", + "content": "512 \\times 1024" + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "text", + "content": ", the length of frames to 16, the batch size to 1, the learning rate to " + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "text", + "content": ", and the total number of training steps to " + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "inline_equation", + "content": "100k" + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "text", + "content": ", probability " + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "inline_equation", + "content": "P = 0.2" + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "text", + "content": ". We use a linear beta schedule as animateDiff, where " + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "inline_equation", + "content": "\\beta_{start} = 0.00085" + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "inline_equation", + "content": "\\beta_{end} = 0.012" + }, + { + "bbox": [ + 304, + 531, + 545, + 639 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 641, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 545, + 712 + ], + "type": "text", + "content": "Inference Settings. We use DDIM with 25 sampling steps, and the scale for text guidance is 7.5, the angle " + }, + { + "bbox": [ + 304, + 641, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\theta = \\pi /2" + }, + { + "bbox": [ + 304, + 641, + 545, + 712 + ], + "type": "text", + "content": ". We collect several personalized Stable Diffusion models from CivitAI to verify the effectiveness and generalizability of our method, including Realistic Vision, Lyriel, ToonYou, and RCNZ Cartoon." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6918" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 73, + 539, + 327 + ], + "blocks": [ + { + "bbox": [ + 53, + 73, + 539, + 327 + ], + "lines": [ + { + "bbox": [ + 53, + 73, + 539, + 327 + ], + "spans": [ + { + "bbox": [ + 53, + 73, + 539, + 327 + ], + "type": "image", + "image_path": "5875d91e0c3dc9a487f99f4eda4b1e1f59e7fab5215c1ea864dd2c37a2223e06.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 338, + 547, + 361 + ], + "lines": [ + { + "bbox": [ + 47, + 338, + 547, + 361 + ], + "spans": [ + { + "bbox": [ + 47, + 338, + 547, + 361 + ], + "type": "text", + "content": "Figure 6. Qualitative comparisons with baseline methods. 360DVD successfully produces stable and high-quality panorama video over various prompts while other methods are failed." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 52, + 373, + 544, + 500 + ], + "blocks": [ + { + "bbox": [ + 52, + 373, + 544, + 500 + ], + "lines": [ + { + "bbox": [ + 52, + 373, + 544, + 500 + ], + "spans": [ + { + "bbox": [ + 52, + 373, + 544, + 500 + ], + "type": "image", + "image_path": "b2302c66f49956ac65b576c168d7edb4feeeddd6f588df6d2a6834ee2e55a578.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 509, + 547, + 534 + ], + "lines": [ + { + "bbox": [ + 46, + 509, + 547, + 534 + ], + "spans": [ + { + "bbox": [ + 46, + 509, + 547, + 534 + ], + "type": "text", + "content": "Figure 7. Qualitative comparisons of optical flow. 360DVD generates panorama videos with reasonable motion patterns consistent with the conditioned optical flow." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 552, + 160, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 552, + 160, + 565 + ], + "spans": [ + { + "bbox": [ + 47, + 552, + 160, + 565 + ], + "type": "text", + "content": "4.2. Qualitative Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 576, + 288, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 576, + 288, + 613 + ], + "spans": [ + { + "bbox": [ + 46, + 576, + 288, + 613 + ], + "type": "text", + "content": "Due to space limitations, we only display several frames of each video. We strongly recommend readers refer to our project page for more results and better visual quality." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 618, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 289, + 715 + ], + "type": "text", + "content": "Prompt-guided Panorama Video Generation. We present several prompt-guided " + }, + { + "bbox": [ + 46, + 618, + 289, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 618, + 289, + 715 + ], + "type": "text", + "content": " panorama video generation results across different personalized models in Fig. 1. The figure shows that our method successfully turns personalized T2I models into panorama video generators. Our method can produce impressive generation results ranging from real to cartoon styles, from natural landscapes to cultural scenery. This success is attributed to the fact that our" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 552, + 545, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 552, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 305, + 552, + 545, + 588 + ], + "type": "text", + "content": "method preserves the image generation priors and temporal modeling priors learned by SD andAnimateDiff on large-scale datasets." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": "Motion-guided Panorama Video Generation. We showcase panoramic video generation results guided by three typical optical flow maps, as shown in Fig. 7. The optical flow maps in the first row indicate the primary motion areas in the Arctic, where we can observe significant movement of clouds in the sky. The optical flow maps in the second row and third row indicate motion areas primarily in the Antarctic, where we can see the movement of trees and hot air balloons near the Antarctic." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "text", + "content": "6919" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 70, + 545, + 160 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 545, + 160 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 545, + 160 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 545, + 160 + ], + "type": "table", + "html": "
IndexMethodsVideo CriteriaPanorama Criteria
Graphics QualityFrame ConsistencyEnd ContinuityContent DistributionMotion Pattern
AAnimateDiff11.3%15.3%5.3%4.8%4.4%
BA+LoRA14.1%10.5%6.0%12.1%6.5%
CB+360ET23.0%9.7%16.9%16.1%14.5%
DOurs51.6%64.5%71.8%67.0%74.6%
", + "image_path": "de39fcbd411b9fb9c7acc6ef381bafe4af99c7432c50b2f376805cde7aa31cd5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 167, + 546, + 191 + ], + "lines": [ + { + "bbox": [ + 46, + 167, + 546, + 191 + ], + "spans": [ + { + "bbox": [ + 46, + 167, + 546, + 191 + ], + "type": "text", + "content": "Table 1. User preference studies. More raters prefer videos generated by our 360DVD, especially over panorama criteria including if generated videos have left-to-right continuity, the panorama content distribution, and the panorama motion pattern." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 211, + 128, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 211, + 128, + 224 + ], + "spans": [ + { + "bbox": [ + 47, + 211, + 128, + 224 + ], + "type": "text", + "content": "4.3. Comparison" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 229, + 289, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 229, + 289, + 482 + ], + "spans": [ + { + "bbox": [ + 46, + 229, + 289, + 482 + ], + "type": "text", + "content": "We compare our results with nativeAnimateDiff,AnimateDiff with a LoRA for panorama image generation from CivitAI named LatentLabs360,AnimateDiff with panoramic LoRA, and our proposed 360 Enhancement Techniques (loss excepted). We can observe that the results generated by the nativeAnimateDiff have a very narrow field of view, which does not align with the content distribution of panoramic videos. WhenAnimateDiff is augmented with panoramic LoRA, it produces videos with a broader field of view; however, the two ends of videos lack continuity, and object movements are highly random. Our proposed 360ET method significantly enhances the continuity between two ends of the videos but fails to address issues such as non-compliance with panoramic motion patterns and poor cross-frame consistency. Notably, our 360DVD can generate videos that best adhere to the content distribution and motion patterns of panoramic videos. We are pleased to discover that, thanks to the high-quality training data provided by WEB360, the videos generated by 360DVD exhibit more realistic colors and nuanced lighting, providing an immersive experience." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 489, + 142, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 489, + 142, + 502 + ], + "spans": [ + { + "bbox": [ + 47, + 489, + 142, + 502 + ], + "type": "text", + "content": "4.4. Ablation Study" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 507, + 289, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 507, + 289, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 507, + 289, + 652 + ], + "type": "text", + "content": "We primarily conducted ablation studies on the proposed 360 Text Fusion strategy, the pseudo-3D layer in the 360-Adapter, and the latitude-aware loss, as illustrated in Fig. 8. Given the prompt \"a car driving down a street next to a forest\", the first row without 360TF can not generate the car because of low-quality captions in the training process. The second row without pseudo-3D layer can generate a car, but due to the lack of temporal modeling, the results exhibit flickering. The third row without latitude-aware loss can produce relatively good results, but it still falls slightly short in terms of clarity, field of view, and other aspects compared to the last row with the complete 360DVD." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 658, + 122, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 658, + 122, + 672 + ], + "spans": [ + { + "bbox": [ + 47, + 658, + 122, + 672 + ], + "type": "text", + "content": "4.5. User Study" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "content": "31 participants were surveyed to evaluate the graphics quality, cross-frame consistency, left-right continuity, content distribution, and motion patterns of 8 sets of generated" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 213, + 541, + 375 + ], + "blocks": [ + { + "bbox": [ + 310, + 213, + 541, + 375 + ], + "lines": [ + { + "bbox": [ + 310, + 213, + 541, + 375 + ], + "spans": [ + { + "bbox": [ + 310, + 213, + 541, + 375 + ], + "type": "image", + "image_path": "a24ad557b7827453850145863261f8d88df87d01a8dca7dd403fb8f9d7a38cbe.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 384, + 546, + 417 + ], + "lines": [ + { + "bbox": [ + 305, + 384, + 546, + 417 + ], + "spans": [ + { + "bbox": [ + 305, + 384, + 546, + 417 + ], + "type": "text", + "content": "Figure 8. Ablation studies on 360 Text Fusion (360TF), pseudo-3D layer in 360-Adapter (Pseudo-3D), and latitude-aware loss (Lat. Loss)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 433, + 547, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 433, + 547, + 529 + ], + "spans": [ + { + "bbox": [ + 304, + 433, + 547, + 529 + ], + "type": "text", + "content": "results. For each criterion, they selected the video they deemed most fitting for the theme of high-quality 360-degree panoramic videos. The data presented in Table 1 indicates that our model outperforms the other three methods significantly across all five dimensions. Simultaneously, our proposed 360ET can remarkably improve video quality, and left-right continuity, solely based on the nativeAnimateDiff and panoramic LoRA." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 540, + 379, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 540, + 379, + 552 + ], + "spans": [ + { + "bbox": [ + 306, + 540, + 379, + 552 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 559, + 547, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 559, + 547, + 704 + ], + "spans": [ + { + "bbox": [ + 304, + 559, + 547, + 704 + ], + "type": "text", + "content": "In this paper, we introduce 360DVD, a pipeline for controllable " + }, + { + "bbox": [ + 304, + 559, + 547, + 704 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 559, + 547, + 704 + ], + "type": "text", + "content": " panorama video generation. Our framework leverages text prompts and motion guidance to animate personalized T2I models. Utilizing the proposed WEB360 dataset, 360-Adapter, and 360 Enhancement Techniques, our framework can generate videos that adhere to the content distribution and motion patterns in real captured panoramic videos. Extensive experiments demonstrate our effectiveness in creating high-quality panorama videos with various prompts and styles. We believe that our framework provides a simple but effective solution for panoramic video generation, and leads to inspiration for possible future works." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "6920" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "text", + "content": "[1] Hao Ai, Zidong Cao, Jinjing Zhu, Haotian Bai, Yucheng Chen, and Lin Wang. Deep learning for omnidirectional vision: A survey and new perspectives. arXiv preprint arXiv:2205.10468, 2022. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 136, + 288, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 136, + 288, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 136, + 288, + 190 + ], + "type": "text", + "content": "[2] Naofumi Akimoto, Yuhi Matsuo, and Yoshimitsu Aoki. *Diverse plausible 360-degree image outpainting for efficient 3dgc background creation.* In *Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition*, pages 11441–11450, 2022. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 192, + 288, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 192, + 288, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 192, + 288, + 235 + ], + "type": "text", + "content": "[3] Jie An, Songyang Zhang, Harry Yang, Sonal Gupta, Jia-Bin Huang, Jiebo Luo, and Xi Yin. Latent-shift: Latent diffusion with temporal shift for efficient text-to-video generation. arXiv preprint arXiv:2304.08477, 2023. 1, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 236, + 288, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 288, + 433 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 288, + 433 + ], + "type": "text", + "content": "[4] Mingdeng Cao, Chong Mou, Fanghua Yu, Xintao Wang, Yinqiang Zheng, Jian Zhang, Chao Dong, Gen Li, Ying Shan, Radu Timofte, Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Xuhan Sheng, Bin Chen, Haoyu Ma, Ming Cheng, Shijie Zhao, Wanwan Cui, Tianyu Xu, Chunyang Li, Long Bao, Heng Sun, Huaibo Huang, Xiaoqiang Zhou, Yang Ai, Ran He, Renlong Wu, Yi Yang, Zhilu Zhang, Shuo-hao Zhang, Junyi Li, Yunjin Chen, Dongwei Ren, Wang-meng Zuo, Qian Wang, Hao-Hsiang Yang, Yi-Chung Chen, Zhi-Kai Huang, Wei-Ting Chen, Yuan-Chun Chiang, Hua-En Chang, I-Hsiang Chen, Chia-Hsuan Hsieh, Sy-Yen Kuo, Zebin Zhang, Jiaqi Zhang, Yuhui Wang, Shuhao Cui, Junshi Huang, Li Zhu, Shuman Tian, Wei Yu, and Bingchun Luo. Ntire 2023 challenge on 360deg omnidirectional image and video super-resolution: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 1731-1745, 2023. 3, 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 434, + 287, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 434, + 287, + 478 + ], + "spans": [ + { + "bbox": [ + 53, + 434, + 287, + 478 + ], + "type": "text", + "content": "[5] Ricky TQ Chen, Jens Behrmann, David K Duvenaud, and Jorn-Henrik Jacobsen. Residual flows for invertible generative modeling. Advances in Neural Information Processing Systems, 32, 2019. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 479, + 287, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 479, + 287, + 522 + ], + "spans": [ + { + "bbox": [ + 53, + 479, + 287, + 522 + ], + "type": "text", + "content": "[6] Tsai-Shien Chen, Chieh Hubert Lin, Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang. Motion-conditioned diffusion model for controllable video synthesis. arXiv preprint arXiv:2304.14404, 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 524, + 287, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 524, + 287, + 556 + ], + "spans": [ + { + "bbox": [ + 53, + 524, + 287, + 556 + ], + "type": "text", + "content": "[7] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG), 41(6):1-16, 2022. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 558, + 288, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 558, + 288, + 624 + ], + "spans": [ + { + "bbox": [ + 53, + 558, + 288, + 624 + ], + "type": "text", + "content": "[8] Ming Cheng, Haoyu Ma, Qiufang Ma, Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Xuhan Sheng, Shijie Zhao, Junlin Li, and Li Zhang. Hybrid transformer and cnn attention network for stereo image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1702-1711, 2023. 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 624, + 288, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 624, + 288, + 678 + ], + "spans": [ + { + "bbox": [ + 53, + 624, + 288, + 678 + ], + "type": "text", + "content": "[9] Xinhua Cheng, Nan Zhang, Jiwen Yu, Yinhuai Wang, Ge Li, and Jian Zhang. Null-space diffusion sampling for zero-shot point cloud completion. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence (IJ-CAI), 2023. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 680, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 288, + 714 + ], + "type": "text", + "content": "[10] Yen-Chi Cheng, Chieh Hubert Lin, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, and Ming-Hsuan Yang. Inout: Diverse image outpainting via gan inversion. In Proceedings of" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 714 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11431-11440, 2022. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 96, + 547, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 547, + 140 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 547, + 140 + ], + "type": "text", + "content": "[11] Antonia Creswell, Tom White, Vincent Dumoulin, Kai Arulkumaran, Biswa Sengupta, and Anil A Bharath. Generative adversarial networks: An overview. IEEE signal processing magazine, 35(1):53-65, 2018. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 141, + 547, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 547, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 547, + 196 + ], + "type": "text", + "content": "[12] Mohammad Reza Karimi Dastjerdi, Yannick Hold-Geoffroy, Jonathan Eisenmann, Siavash Khodadadeh, and Jean-François Lalonde. Guided co-modulated gan for " + }, + { + "bbox": [ + 307, + 141, + 547, + 196 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 307, + 141, + 547, + 196 + ], + "type": "text", + "content": " field of view extrapolation. In 2022 International Conference on 3D Vision (3DV), pages 475–485. IEEE, 2022. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 198, + 547, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 198, + 547, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 198, + 547, + 251 + ], + "type": "text", + "content": "[13] Patrick Esser, Johnathan Chiu, Parmida Atighechian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7346-7356, 2023. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 253, + 547, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 253, + 547, + 306 + ], + "spans": [ + { + "bbox": [ + 307, + 253, + 547, + 306 + ], + "type": "text", + "content": "[14] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022.3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 308, + 545, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 308, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 308, + 545, + 342 + ], + "type": "text", + "content": "[15] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 343, + 545, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 343, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 307, + 343, + 545, + 387 + ], + "type": "text", + "content": "[16] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 1, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 388, + 545, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 421 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 421 + ], + "type": "text", + "content": "[17] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2, 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 422, + 545, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 545, + 454 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 545, + 454 + ], + "type": "text", + "content": "[18] Yaosi Hu, Zhenzhong Chen, and Chong Luo. Lamd: Latent motion diffusion for video generation. arXiv preprint arXiv:2304.11603, 2023. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 456, + 545, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 545, + 510 + ], + "type": "text", + "content": "[19] Xuhui Jia, Yang Zhao, Kelvin CK Chan, Yandong Li, Han Zhang, Boqing Gong, Tingbo Hou, Huisheng Wang, and Yu-Chuan Su. Taming encoder for zero fine-tuning image customization with text-to-image diffusion models. arXiv preprint arXiv:2304.02642, 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 512, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 512, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 307, + 512, + 545, + 533 + ], + "type": "text", + "content": "[20] Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 535, + 545, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 535, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 307, + 535, + 545, + 578 + ], + "type": "text", + "content": "[21] Jialu Li and Mohit Bansal. Panogen: Text-conditioned panoramic environment generation for vision-and-language navigation. Advances in Neural Information Processing Systems, 36, 2024. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 580, + 545, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 580, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 307, + 580, + 545, + 633 + ], + "type": "text", + "content": "[22] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International Conference on Machine Learning, pages 12888-12900. PMLR, 2022. 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 635, + 545, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 545, + 690 + ], + "type": "text", + "content": "[23] Chieh Hubert Lin, Chia-Che Chang, Yu-Sheng Chen, Da-Cheng Juan, Wei Wei, and Hwann-Tzong Chen. Coco-gan: Generation by parts via conditional coordinating. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4512-4521, 2019. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 692, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 692, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 307, + 692, + 545, + 714 + ], + "type": "text", + "content": "[24] Chieh Hubert Lin, Hsin-Ying Lee, Yen-Chi Cheng, Sergey Tulyakov, and Ming-Hsuan Yang. Infinitygan: To" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "text", + "content": "6921" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "text", + "content": "wards infinite-pixel image synthesis. arXiv preprint arXiv:2104.03963, 2021. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 139 + ], + "type": "text", + "content": "[25] Chong Mou, Xintao Wang, Jiechong Song, Ying Shan, and Jian Zhang. Dragondiffusion: Enabling drag-style manipulation on diffusion models. In The Twelfth International Conference on Learning Representations, 2024. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 140, + 287, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 140, + 287, + 194 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 287, + 194 + ], + "type": "text", + "content": "[26] Chong Mou, Xintao Wang, Liangbin Xie, Yanze Wu, Jian Zhang, Zhongang Qi, and Ying Shan. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4296–4304, 2024. 1, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 195, + 287, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 195, + 287, + 249 + ], + "spans": [ + { + "bbox": [ + 48, + 195, + 287, + 249 + ], + "type": "text", + "content": "[27] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 1, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 251, + 287, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 251, + 287, + 304 + ], + "spans": [ + { + "bbox": [ + 48, + 251, + 287, + 304 + ], + "type": "text", + "content": "[28] Changgyoon Oh, Wonjune Cho, Yujeong Chae, Daehee Park, Lin Wang, and Kuk-Jin Yoon. Bips: Bi-modal indoor panorama synthesis via residual depth-aided adversarial learning. In European Conference on Computer Vision, pages 352–371. Springer, 2022. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 306, + 287, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 306, + 287, + 360 + ], + "spans": [ + { + "bbox": [ + 48, + 306, + 287, + 360 + ], + "type": "text", + "content": "[29] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing attentions for zero-shot text-based video editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15932-15942, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 361, + 287, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 361, + 287, + 425 + ], + "spans": [ + { + "bbox": [ + 48, + 361, + 287, + 425 + ], + "type": "text", + "content": "[30] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 426, + 287, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 426, + 287, + 469 + ], + "spans": [ + { + "bbox": [ + 48, + 426, + 287, + 469 + ], + "type": "text", + "content": "[31] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 1, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 472, + 287, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 472, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 48, + 472, + 287, + 525 + ], + "type": "text", + "content": "[32] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Bjorn Ommer. High-resolution image synthesis with latent diffusion models. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 526, + 287, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 526, + 287, + 559 + ], + "spans": [ + { + "bbox": [ + 48, + 526, + 287, + 559 + ], + "type": "text", + "content": "[33] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional Networks for Biomedical Image Segmentation, page 234–241. 2015. 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 559, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 287, + 624 + ], + "type": "text", + "content": "[34] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 625, + 287, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 287, + 690 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 287, + 690 + ], + "type": "text", + "content": "[35] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 1, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "type": "text", + "content": "[36] Wenzhe Shi, Jose Caballero, Ferenc Huszar, Johannes Totz, Andrew P. Aitken, Rob Bishop, Daniel Rueckert, and Zehan" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 117 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 117 + ], + "type": "text", + "content": "Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 119, + 545, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 119, + 545, + 172 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 545, + 172 + ], + "type": "text", + "content": "[37] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022. 1, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "type": "text", + "content": "[38] Kihyuk Sohn, Honglak Lee, and Xinchen Yan. Learning structured output representation using deep conditional generative models. Advances in neural information processing systems, 28, 2015. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 220, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 251 + ], + "type": "text", + "content": "[39] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502, 2020. 2, 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 254, + 545, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 254, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 307, + 254, + 545, + 330 + ], + "type": "text", + "content": "[40] Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Qiufang Ma, Xuhan Sheng, Ming Cheng, Haoyu Ma, Shijie Zhao, Jian Zhang, Junlin Li, et al. Opdn: Omnidirectional position-aware deformable network for omnidirectional image superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1293-1301, 2023. 3, 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 332, + 545, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 332, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 307, + 332, + 545, + 396 + ], + "type": "text", + "content": "[41] Piotr Teterwak, Aaron Sarna, Dilip Krishnan, Aaron Maschinot, David Belanger, Ce Liu, and William T Freeman. Boundless: Generative adversarial networks for image extension. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10521-10530, 2019. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 399, + 545, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 545, + 431 + ], + "type": "text", + "content": "[42] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 434, + 545, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 434, + 545, + 477 + ], + "spans": [ + { + "bbox": [ + 307, + 434, + 545, + 477 + ], + "type": "text", + "content": "[43] Guangcong Wang, Yinuo Yang, Chen Change Loy, and Zwei Liu. Stylelight: HDR panorama generation for lighting estimation and editing. In European Conference on Computer Vision, pages 477-492. Springer, 2022. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 479, + 545, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 479, + 545, + 532 + ], + "spans": [ + { + "bbox": [ + 307, + 479, + 545, + 532 + ], + "type": "text", + "content": "[44] Hai Wang, Xiaoyu Xiang, Yuchen Fan, and Jing-Hao Xue. Customizing 360-degree panoramas through text-to-image diffusion models. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 4933-4943, 2024. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 534, + 545, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 534, + 545, + 577 + ], + "spans": [ + { + "bbox": [ + 307, + 534, + 545, + 577 + ], + "type": "text", + "content": "[45] Jionghao Wang, Ziyu Chen, Jun Ling, Rong Xie, and Li Song. 360-degree panorama generation from few unregistered nfov images. arXiv preprint arXiv:2308.14686, 2023. 3, 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 579, + 545, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 579, + 545, + 623 + ], + "spans": [ + { + "bbox": [ + 307, + 579, + 545, + 623 + ], + "type": "text", + "content": "[46] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 1, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 624, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 624, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 307, + 624, + 545, + 678 + ], + "type": "text", + "content": "[47] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems, 36, 2024. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "text", + "content": "[48] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "6922" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 710 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 106 + ], + "type": "text", + "content": "of image diffusion models for text-to-video generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7623-7633, 2023. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 287, + 150 + ], + "type": "text", + "content": "[49] Ruiqi Wu, Liangyu Chen, Tong Yang, Chunle Guo, Chongyi Li, and Xiangyu Zhang. Lamp: Learn a motion pattern for few-shot-based video generation. arXiv preprint arXiv:2310.10769, 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 287, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 287, + 185 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 287, + 185 + ], + "type": "text", + "content": "[50] Songsong Wu, Hao Tang, Xiao-Yuan Jing, Haifeng Zhao, Jianjun Qian, Nicu Sebe, and Yan Yan. Cross-view panorama image synthesis. IEEE Transactions on Multimedia, 2022. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 287, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 287, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 287, + 228 + ], + "type": "text", + "content": "[51] Tianhao Wu, Chuanxia Zheng, and Tat-Jen Cham. IPO-ldm: Depth-aided 360-degree indoor rgb panorama outpainting via latent diffusion model. arXiv preprint arXiv:2307.03177, 2023. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 230, + 287, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 230, + 287, + 285 + ], + "spans": [ + { + "bbox": [ + 48, + 230, + 287, + 285 + ], + "type": "text", + "content": "[52] Jinbo Xing, Menghan Xia, Yuxin Liu, Yuechen Zhang, Y He, H Liu, H Chen, X Cun, X Wang, Y Shan, et al. Makeyour-video: Customized video generation using textual and structural guidance. IEEE Transactions on Visualization and Computer Graphics, 2024. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 286, + 287, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 286, + 287, + 351 + ], + "spans": [ + { + "bbox": [ + 48, + 286, + 287, + 351 + ], + "type": "text", + "content": "[53] Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, and Fang Wen. Paint by example: Exemplar-based image editing with diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18381-18391, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 353, + 287, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 353, + 287, + 397 + ], + "spans": [ + { + "bbox": [ + 48, + 353, + 287, + 397 + ], + "type": "text", + "content": "[54] Shuai Yang, Yifan Zhou, Ziwei Liu, and Chen Change Loy. Rerender a video: Zero-shot text-guided video-to-video translation. In SIGGRAPH Asia 2023 Conference Papers, pages 1-11, 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 399, + 287, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 399, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 399, + 287, + 453 + ], + "type": "text", + "content": "[55] Jiwen Yu, Yinhuai Wang, Chen Zhao, Bernard Ghanem, and Jian Zhang. Freedom: Training-free energy-guided conditional diffusion model. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23174-23184, 2023. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 454, + 287, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 454, + 287, + 497 + ], + "spans": [ + { + "bbox": [ + 48, + 454, + 287, + 497 + ], + "type": "text", + "content": "[56] Jiwen Yu, Xuanyu Zhang, Youmin Xu, and Jian Zhang. CRoSS: Diffusion model makes controllable, robust and secure image steganography. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 498, + 287, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 498, + 287, + 564 + ], + "spans": [ + { + "bbox": [ + 48, + 498, + 287, + 564 + ], + "type": "text", + "content": "[57] Han Zhang, Tao Xu, Hongsheng Li, Shaoting Zhang, Xiaogang Wang, Xiaolei Huang, and Dimitris N Metaxas. Stackgan: Text to photo-realistic image synthesis with stacked generative adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 5907-5915, 2017. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 566, + 287, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 566, + 287, + 609 + ], + "spans": [ + { + "bbox": [ + 48, + 566, + 287, + 609 + ], + "type": "text", + "content": "[58] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 610, + 287, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 610, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 48, + 610, + 287, + 665 + ], + "type": "text", + "content": "[59] Qinsheng Zhang, Jiaming Song, Xun Huang, Yongxin Chen, and Ming-Yu Liu. Diffcollage: Parallel generation of large content with diffusion models. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10188-10198. IEEE, 2023. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 666, + 287, + 710 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 666, + 287, + 710 + ], + "spans": [ + { + "bbox": [ + 48, + 666, + 287, + 710 + ], + "type": "text", + "content": "[60] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 1, 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "6923" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/15eb225d-3032-419c-84b0-35d6ec576cbc_content_list.json b/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/15eb225d-3032-419c-84b0-35d6ec576cbc_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..1435525e79d0534d04c8779a1507ee6ab73d0d34 --- /dev/null +++ b/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/15eb225d-3032-419c-84b0-35d6ec576cbc_content_list.json @@ -0,0 +1,1453 @@ +[ + { + "type": "text", + "text": "360Loc: A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries", + "text_level": 1, + "bbox": [ + 78, + 130, + 890, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Huajian Huang $^{1*}$ Changkun Liu $^{1*}$ Yipeng Zhu $^{1}$ Hui Cheng $^{2}$ Tristan Braud $^{1}$ Sai-Kit Yeung $^{1}$ $^{1}$ The Hong Kong University of Science and Technology * equal contribution Sun Yat-sen University", + "bbox": [ + 86, + 202, + 898, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{hhuangbg, cliudg, yzhudg}@connect.ust.hk, chengh9@mail.sysu.edu.cn, {braudit, saikit}@ust.hk", + "bbox": [ + 96, + 258, + 887, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 309, + 313, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Portable $360^{\\circ}$ cameras are becoming a cheap and efficient tool to establish large visual databases. By capturing omnidirectional views of a scene, these cameras could expedite building environment models that are essential for visual localization. However, such an advantage is often overlooked due to the lack of valuable datasets. This paper introduces a new benchmark dataset, 360Loc, composed of $360^{\\circ}$ images with ground truth poses for visual localization. We present a practical implementation of $360^{\\circ}$ mapping combining $360^{\\circ}$ images with lidar data to generate the ground truth 6DoF poses. 360Loc is the first dataset and benchmark that explores the challenge of cross-device visual positioning, involving $360^{\\circ}$ reference frames, and query frames from pinhole, ultra-wide FoV fisheye, and $360^{\\circ}$ cameras. We propose a virtual camera approach to generate lower-FoV query frames from $360^{\\circ}$ images, which ensures a fair comparison of performance among different query types in visual localization tasks. We also extend this virtual camera approach to feature matching-based and pose regression-based methods to alleviate the performance loss caused by the cross-device domain gap, and evaluate its effectiveness against state-of-the-art baselines. We demonstrate that omnidirectional visual localization is more robust in challenging large-scale scenes with symmetries and repetitive structures. These results provide new insights into 360-camera mapping and omnidirectional visual localization with cross-device queries. Project Page and dataset: https://huajianup.github.io/research/360Loc/.", + "bbox": [ + 76, + 342, + 472, + 780 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 813, + 209, + 829 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Visual localization refers to predicting the 6DoF absolute pose (translation and rotation) of query images in a known scene. Accurate visual localization has wide applications in augmented reality (AR), navigation, and robotics.", + "bbox": [ + 75, + 839, + 468, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Over the last decade, many visual localization methods have been proposed, including feature matching-based approaches [17, 33, 42, 45, 54], scene coordinate regression [5-7] and absolute pose regressors (APRs) [23, 24, 49]. Much of this progress has been driven by the availability of numerous datasets and benchmarks targeting different challenges, as shown in Table 1. However, existing methods and datasets focus on localization and mapping using pinhole images. Although the merits of $360^{\\circ}$ camera on visual perception have been recognized [22, 60, 62], the application of $360^{\\circ}$ cameras for visual localization is still under-explored. Recently, SensLoc [61] started to apply $360^{\\circ}$ cameras to facilitate data collection, but their pipeline cannot perform omnidirectional localization directly from the $360^{\\circ}$ images.", + "bbox": [ + 496, + 310, + 893, + 521 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper introduces 360Loc, a new challenging benchmark dataset to facilitate research on omnidirectional visual localization. The dataset contains $360^{\\circ}$ images captured in diverse campus-scale indoor and outdoor environments, featuring highly symmetrical and repetitive features, as well as interference of dynamic objects. To capture this dataset, we present a practical pipeline using a portable 360-cameras platform to obtain reliable pose estimations of $360^{\\circ}$ cameras as ground truth. Although $360^{\\circ}$ cameras present significant advantages for capturing reference data, real-life applications applying visual localization often rely on traditional cameras. Examples include robots equipped with fisheye cameras and phone-based AR applications using the embedded pinhole camera. This raises the problem of cross-device visual localization on image databases captured with $360^{\\circ}$ cameras. We thus supplement the reference database composed of $360^{\\circ}$ images with query frames including pin-hole, fisheye and $360^{\\circ}$ cameras.", + "bbox": [ + 496, + 522, + 895, + 792 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We introduce the concept of virtual camera to generate high-quality lower-FoV images with different camera parameters from $360^{\\circ}$ images. This enables a fair comparison of performance among queries from different devices in cross-device visual localization. We adapt existing feature-matching-based methods and APRs to support $360^{\\circ}$ image queries and benchmark these methods for 360-based cross-", + "bbox": [ + 496, + 795, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "22314", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/db604ce410d40d69b14afc1112843cff89686cce30fc31d6967b76f9a479c2a0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 83, + 89, + 890, + 295 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/b04c5ebe9bb3f7b53d057b8ed2eecf41bd550b802b96fd8124e25e075458ecc4.jpg", + "image_caption": [ + "Figure 1. Overview of dataset collection and ground truth generation: 1) Use the platform to collect $360^{\\circ}$ images and frame-by-frame point clouds. Obtain real-time camera poses; 2) Apply optimization methodology to achieve data registration, resulting in a globally reconstructed point cloud model. Then, align the models in daytime and nighttime to get consistent poses; 3) Perform cropping to get virtual camera images and generate corresponding depth images. As a result, 360Loc takes advantage of $360^{\\circ}$ images for efficient mapping while providing query images in five different camera models in order to analyze the challenge of cross-domain visual localization." + ], + "image_footnote": [], + "bbox": [ + 83, + 297, + 271, + 383 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/49886da25d424f9dd2d7cd4d9c452062c8af942c39c265d2b7ac498dce54b55d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 274, + 297, + 452, + 383 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5a0f63ab7919017480006a31cc712dd989bce962a717e19ed0f54edc146729a3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 454, + 297, + 890, + 383 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "device visual localization. Since different cameras present different imaging patterns, the cross-device domain gap is expected to lead to performance loss. We extend the virtual camera approach to data augmentation for end-to-end solutions such as image retrieval (IR) and APRs.", + "bbox": [ + 75, + 496, + 468, + 571 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By conducting exhaustive evaluations, we demonstrate the advantages of $360^{\\circ}$ cameras in reducing ambiguity in visual localization on scenes featuring symmetric or repetitive features. We also show improvements against state-of-the-art (SOTA) baselines using the virtual camera method for cross-device visual localization on images databases captured with $360^{\\circ}$ cameras. These results provide novel insights on mapping using $360^{\\circ}$ images, enhancing the anti-ambiguity capability of query images, reducing domain gap cross-device in visual localization, and improving the generalization ability of APRs by applying virtual cameras.", + "bbox": [ + 75, + 575, + 468, + 743 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contribution can be summarized as follows:", + "bbox": [ + 94, + 746, + 416, + 760 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a practical implementation of $360^{\\circ}$ mapping combining lidar data with $360^{\\circ}$ images for establishing the ground truth 6DoF poses.", + "- A virtual camera approach to generate high-quality lower-FoV images with different camera parameters from $360^{\\circ}$ views.", + "- A novel dataset for cross-device visual localization based on $360^{\\circ}$ reference images with pinhole, fisheye, and $360^{\\circ}$ query images." + ], + "bbox": [ + 76, + 763, + 468, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Demonstration of our approach's efficacy over state-of-the-art solutions for visual localization using $360^{\\circ}$ image databases, resulting in decreased localization ambiguity, reduced cross-device domain gap, and improved generalization ability of APRs.", + "bbox": [ + 500, + 496, + 890, + 571 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related work", + "text_level": 1, + "bbox": [ + 500, + 590, + 638, + 607 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Visual Localization", + "text_level": 1, + "bbox": [ + 500, + 618, + 684, + 633 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Structure-based methods predict camera poses by establishing 2D-3D correspondences indirectly with local feature extractors and matchers [16, 35, 42, 43, 52, 55] or directly with scene coordinate regression [5-7]. HLoc [42, 43] pipeline scales up to large scenes using image retrieval [1, 3, 18, 20] as an intermediate step, which achieves SOTA accuracy on many benchmarks. This type of approach usually supports pinhole cameras with different intrinsic parameters. However, the performance of $360^{\\circ}$ and fisheye cameras has not been evaluated before due to the lack of support for $360^{\\circ}$ cameras in the Structure from Motion (SfM) tools like COLMAP [45] and the lack of datasets for fisheye and $360^{\\circ}$ cameras. [25-27] are point-cloud-based panorama localization methods for $360^{\\circ}$ queries but they do not consider cross-device visual localization.", + "bbox": [ + 496, + 643, + 890, + 869 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Absolute Pose Regressors (APRs) are end-to-end learning-based methods that directly regress the absolute camera", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "22315", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "pose from input images without the knowledge of 3D models and establish 2D-3D correspondences. APRs [4, 8, 12, 13, 23, 24, 36, 37, 49, 59] provide faster inference than structure-based methods at the cost of accuracy and robustness [47]. Besides, APRs have generally only been tested on the [9], 7Scenes [50], and Cambridge Landmarks [24] datasets in previous studies. A notable characteristic of these datasets is that the training set and test set images were taken from the same camera. In this paper, we enhance cross-device pose regression for APRs by introducing virtual cameras as a data augmentation technique.", + "bbox": [ + 76, + 90, + 472, + 257 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Datasets", + "text_level": 1, + "bbox": [ + 76, + 271, + 179, + 286 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The existing dataset has the following limitations. 1). Most datasets [9, 10, 24, 50, 54, 58] do not consider the need for cross-device localization, i.e., query images come from the same camera. Even though some datasets [11, 14, 30, 44, 46, 48, 53, 61] take into account cross-device localization, these devices are only pinhole cameras with different camera intrinsic parameters and do not have particularly large domain-gaps. Compared to [32], our pinhole and fisheye images are extracted from $360^{\\circ}$ images via virtual cameras, which makes less demands on the device and allows for a fair and more flexible comparison of the effects of different FoVs. In this paper, our 360Loc datasets provide five kinds of queries from pinhole, fisheye and $360^{\\circ}$ cameras to promote the research of cross-device localization. 2). Now there is no 6DoF visual localization dataset and benchmark considering $360^{\\circ}$ reference images and $360^{\\circ}$ query images, even though [2, 25, 38] contain $360^{\\circ}$ images with 6DoF pose labels, they are not standard visual localization datasets with independent mapping/reference sequences and query sequences like datasets in Table 1. Other datasets [11, 61] use $360^{\\circ}$ cameras for data collection, in the end they cropped $360^{\\circ}$ to perspective images and then tailor these images to the classical visual localization pipeline of pinhole cameras. The academic community is mainly driven by benchmarks where all training, reference, and query images are pinhole images because they rely on SfM tools [45] which does not support $360^{\\circ}$ cameras to obtain ground-truth (GT) and get sparse 3D point cloud models for recovering camera poses. However, we note that the $360^{\\circ}$ camera can cover the scene with greater efficiency than normal pinhole cameras with narrow Field-of-View (FoV), which makes $360^{\\circ}$ images particularly suitable as reference images. 3) Although the current dataset has explored the challenges of visual localization from various aspects such as weather variations, daynight transitions, scene changes, and moving individuals and objects [24, 30, 44, 46, 58, 61], there is still insufficient research specifically targeting highly ambiguous environments which contain symmetries, repetitive structures and insufficient textures. Only two indoor datasets [9, 53]", + "bbox": [ + 76, + 296, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/5a21dc87b08877d924835e56c6a68dbaca2e7f703bb022dd57a74f2331ae6cad.jpg", + "image_caption": [ + "Figure 2. The four scenes in 360Loc, all four scenes contain symmetrical, repetitive structures and moving objects. The camera trajectories are visualized as spheres." + ], + "image_footnote": [], + "bbox": [ + 506, + 90, + 887, + 252 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and LaMAR [44] consider challenges in ambiguous environments. In this paper, we studied 4 ambiguous scenes from both indoor and outdoor environments with a scale much larger than dataset [9] (See Figure 2). We conduct exhaustive assessments of image retrieval, local matching localization, and absolute pose regression to show that queries from the $360^{\\circ}$ camera are harder to obtain plausible solutions than other queries from cameras with narrower FoV.", + "bbox": [ + 498, + 330, + 890, + 452 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. The 360Loc Dataset", + "text_level": 1, + "bbox": [ + 500, + 465, + 691, + 479 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The 360Loc dataset contains 4 locations from a local university. Figure 2 displays the reference point cloud and example frames from each scene. Atrium is inside a building with a surrounding structure that exhibits a high degree of symmetry and repetition, making it a highly ambiguous environment. Concourse is a large indoor scene with many moving people, which can be used for evaluating the robustness of any localization algorithm in scenes with many moving objects. Piatrium is a scene containing both indoor Atrium and outdoor environments, covering an outdoor piazza with coffee shops, bookstores, and souvenir shops. Hall is a modern building of a student dormitory.", + "bbox": [ + 498, + 489, + 890, + 672 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. 360 Mapping Platform", + "text_level": 1, + "bbox": [ + 500, + 681, + 710, + 696 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We utilized the handheld multimodal data acquisition platform depicted in Figure 1 for data collection. This platform incorporates a $360^{\\circ}$ camera, a Velodyne VLP-16 multi-line lidar, an NUC mini-computer, and a display screen. Figure 1 also illustrates the relative relationship among the $360^{\\circ}$ camera coordinate system $\\mathbf{O}_{\\mathrm{c}}$ -XYZ, the lidar coordinate system $\\mathbf{O}_{\\mathrm{l}}$ -XYZ as well as the world coordinate $\\mathbf{O}_{\\mathrm{w}}$ -XYZ. The portable 360 camera equipped on this device can capture high-resolution omnidirectional images with a resolution of $6144 \\times 3072$ (2:1 aspect ratio). It also features a built-in six-axis gyroscope that provides stabilization support, making it suitable for handheld mobile data capture. The Velodyne VLP-16 multi-line lidar has", + "bbox": [ + 498, + 703, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "22316", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/64616107b0ee699395a44ca51e2990724bd19ce9bdd7a53fe865f446ab0eed11.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetScale and EnvironmentChallengesReference/Query typeGroundtruth SolutionAccuracy
7Scenes [50]Small IndoorNonepinhole / pinholeRGB-D≈ cm
RIO10 [58]Small IndoorChangespinhole / pinholeVIO> d m
Baidu Mall [53]Medium IndoorPeople, Ambiguouspinhole / pinholelidar+Manual≈ d m
Naver Labs [30]Medium IndoorPeople, Changespinhole / pinholelidar+SfM≈ d m
InLoc [54]Medium IndoorNonepinhole / pinholelidar+Manual> d m
AmbiguousLoc [9]Small IndoorAmbiguouspinhole / pinholeSLAM≈ cm
Achen [46]Large outdoorPeople, Day-Nightpinhole / pinholeSfM> d m
Cambridge [24]Medium outdoorPeople, Weatherpinhole / pinholeSfM> d m
San Francisco [11]Large outdoorPeople, Constructionpinhole / pinholeSfM+GPS≈ m
NCLT [10]Medium Outdoor + IndoorWeatherpinhole / pinholeGPS+SLAM+lidar≈ d m
ADVIO [14]Medium Outdoor+IndoorPeoplepinhole / pinholeVIO+Manual≈ m
ETH3D [48]Medium Outdoor + IndoorNonepinhole / pinholelidar+Manual≈ mm
LaMAR [44]Medium Outdoor+IndoorPeople, Weather, Day-Night, Construction, Changes, Ambiguouspinhole / pinholelidar+SfM+VIO≈ cm
SensLoc [61]Large OutdoorPeople, Weather, Day-Night, Construction, Changespinhole / pinholeSL+VIO+RTK+Gravity< dm
360Loc (ours)Medium Outdoor+IndoorPeople, Weather, Day-Night, Construction, Changes, Ambiguous360 / (360 + pinhole + fisheye)lidar+VIO≈ cm
", + "bbox": [ + 86, + 88, + 880, + 287 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/5dc53a18d7cebc7fc1336a8883adbc08eaf5bf8557fabc99a4e01a10d3d68356.jpg", + "table_caption": [ + "Table 1. Overview of popular visual localization datasets. No dataset, besides ours, consider $360^{\\circ}$ images as reference and query frames from pinhole, ultra-wide FoV fisheye, and $360^{\\circ}$ cameras." + ], + "table_footnote": [], + "table_body": "
SymbolNameField of ViewResolutionType
c0360360°6144×3072reference/query
c1fisheye1120°1280×1024query
c2fisheye2150°1280×1024query
c3fisheye3195°1280×1024query
c4pinhole85°1920×1200query
", + "bbox": [ + 107, + 344, + 431, + 433 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/11966165d1a0d4bd16dd6e085a06137ba23aadacd68190719a8b45209749a8fd.jpg", + "table_caption": [ + "Table 2. The representation and parameters of 5 cameras." + ], + "table_footnote": [], + "table_body": "
Scene# Frames Reference 360# Frames Query (day / night)Spatial Extent (m)
360PinholeFisheye1Fisheye2Fisheye3
Concourse491593/5141186/10281186/10281186/10281186/102893 × 15
Hall5401123/10612246/21222246/21222246/21222246/2122105 × 52
Atrium581875/12191750/24381750/24381750/24381750/243865 × 36
Piatrium6321008/6972016/13942016/13942016/13942016/139498 × 70
", + "bbox": [ + 78, + 465, + 468, + 536 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 3. 360Loc dataset description.", + "bbox": [ + 163, + 540, + 380, + 554 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "a FoV of $360^{\\circ} \\times 30^{\\circ}$ , angular resolution of $0.2^{\\circ} \\times 2.0^{\\circ}$ , and rotation rate of $10\\mathrm{Hz}$ , offering a comprehensive $360^{\\circ}$ environmental view. Regarding the calibration of the extrinsic poses between the lidar and the $360^{\\circ}$ camera, we employed a calibration toolbox [29] that applies to both lidar and camera projection models. This toolbox utilizes the SuperGlue [43] image matching pipeline to establish 2D-3D correspondences between the lidar and camera image. We perform pseudo-registration by synchronizing the two data modalities, images, and point clouds. Eventually, we use graph-based SLAM techniques for continuous pose estimations. In the four scenes, a total of 18 independent sequences of $360^{\\circ}$ images were captured (12 daytime, and 6 nighttime), resulting in a total number of 9334 images. For each scene, we selected a specific sequence captured during the daytime as the reference images, while the remaining images were defined as query images of the $360^{\\circ}$ image type. We provide more details and show why $360^{\\circ}$ mapping is superior to pinhole SfM in ambiguous scenes with repetitive and symmetric structures in the supplementary material.", + "bbox": [ + 75, + 583, + 472, + 901 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/60b85e647fd37d209120f74b536d02e8c0e48bff2ad2d0d153036a52694d43f3.jpg", + "image_caption": [ + "Figure 3. Illustration of obtaining virtual camera images through random poses and image cropping." + ], + "image_footnote": [], + "bbox": [ + 532, + 345, + 854, + 536 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1.1 Cross-device Queries", + "text_level": 1, + "bbox": [ + 500, + 594, + 700, + 609 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To enable a rigorous comparison of the difference in the performance of different FoV queries for visual localization tasks, we created four virtual cameras with diverse FoV from $360^{\\circ}$ cameras, which are shown in Figure 2. Given a $360^{\\circ}$ image $\\mathcal{I}_{c_0}$ , the corresponding virtual camera with preconfigured intrinsic parameters is extracted by", + "bbox": [ + 496, + 617, + 890, + 709 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {I} _ {c _ {n}} = \\Psi_ {c _ {n}} \\left(\\mathcal {I} _ {c _ {0}}\\right) = \\pi_ {c _ {n}} ^ {- 1} \\left(\\pi_ {c _ {0}} \\left(\\boldsymbol {R} \\mathcal {I} _ {c _ {0}}\\right)\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 565, + 719, + 890, + 739 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\pi_{c_n}$ denote the projection function of virtual camera and $\\pi_{c_0}$ is the projection function of $360^{\\circ}$ camera. $R\\in SO(3)$ is a random relative rotation matrix to increase the diversity of views representing the scenes. Moreover, the inversed operation $\\Psi_{c_n}^{-1}$ can convert the $c_{n}$ image back to a $360^{\\circ}$ image. As reported in Table 2, the virtual cameras include an undistorted pinhole model with $85^{\\circ}$ FoV and three fisheye cameras in Dual Sphere mode [56] with $120^{\\circ}$ , $150^{\\circ}$ , and $195^{\\circ}$ FoV respectively. Table 3 presents the number of image frames in the 360Loc dataset.", + "bbox": [ + 496, + 750, + 893, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "22317", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Ground Truth Generation", + "text_level": 1, + "bbox": [ + 76, + 90, + 313, + 104 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Besides the graph-based optimization in SLAM, we designed a set of offline optimization strategies to further improve the accuracy of camera pose estimation. After the acquisition of precise dense point cloud reconstructions and poses of $360^{\\circ}$ cameras, an Iterative Closest Point (ICP) algorithm is applied to align models between reference and the query sequences in the same scene. Moreover, we reconstructed the mesh model of the scenes and generated corresponding depth maps of $360^{\\circ}$ cameras.", + "bbox": [ + 75, + 113, + 468, + 250 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Bundle Adjustment (BA) of lidar mapping. Incremental map construction can suffer from accumulating errors due to environmental degradation. We utilized a BA framework based on feature points extracted from lidar to refine the map and the poses. The optimization process involved minimizing the covariance matrix to constrain the distances between feature points and edge lines or plane features that are mutually matched.", + "bbox": [ + 75, + 250, + 468, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "First, we utilize an octree data structure to perform adaptive voxelization-based feature extraction. In this method, the point cloud map is segmented into voxels of predetermined size. Each voxel is checked to determine if its points $P_{u}^{f}$ lie on a plane or a line, where $u \\in \\{1,2,\\dots ,U\\}$ , obtained from the $u$ -th frame of lidar scans. If not, the voxel is recursively subdivided using an octree structure until each voxel contains points $P_{u}^{f}$ belonging to the same feature. Let's assume that the pose of the lidar in each frame is $\\pmb{\\eta} = \\{\\pmb{\\eta}_1,\\pmb{\\eta}_2,\\dots ,\\pmb{\\eta}_M\\}$ , where $\\pmb{\\eta}_{u} = (R_{u},t_{u}|R_{u} \\in SO(3), t_{u} \\in \\mathbb{R}^{3})$ . In that case, the feature points in the global map can be represented as follows:", + "bbox": [ + 76, + 369, + 468, + 551 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {P} _ {u} = \\boldsymbol {R} _ {u} \\times \\boldsymbol {P} _ {u} ^ {f} + \\boldsymbol {t} _ {u}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 196, + 556, + 468, + 574 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After simplifying the lidar map to edge or plane features, the process of BA becomes focused on determining the pose $\\pmb{\\eta}$ and the location of the single feature, which can be represented as $(\\pmb{n}_f,\\pmb{q})$ , where $\\pmb{q}$ represents the location of a specific feature, $\\pmb{n}_f$ is the direction vector of an edge line or the normal vector of a plane. To minimize the distance between each feature point and the corresponding feature, we can utilize the BA:", + "bbox": [ + 75, + 580, + 468, + 700 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\boldsymbol {\\eta} ^ {*}, \\boldsymbol {n} _ {f} ^ {*}, \\boldsymbol {q} ^ {*}\\right) = \\underset {\\boldsymbol {\\eta}, \\boldsymbol {n} _ {f}, \\boldsymbol {q}} {\\arg \\min } \\frac {1}{U} \\sum_ {u = 1} ^ {U} \\left(\\boldsymbol {n} _ {f} ^ {T} \\left(\\boldsymbol {P} _ {\\boldsymbol {u}} - \\boldsymbol {q}\\right)\\right) ^ {2}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 705, + 468, + 744 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "It has been proved that when the plane's normal vector is set to the minimum eigenvector, and $\\mathbf{q}$ is set to the centroid of the feature, i.e. $\\mathbf{q} = \\hat{\\mathbf{P}} = \\frac{1}{U}\\sum_{u=1}^{U}\\mathbf{P}_{u}$ , Eq. 3 reaches its minimum value. Additionally, the BA problem in lidar mapping has a closed-form solution that is independent of the features $(\\mathbf{n}_f,\\mathbf{q})$ [34]. It can be simplified to the following problem:", + "bbox": [ + 75, + 750, + 468, + 872 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\eta} ^ {*} = \\underset {\\boldsymbol {\\eta}} {\\arg \\min } \\lambda_ {\\min } (\\boldsymbol {A}), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 878, + 468, + 902 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/0c7f60c6616090658bc51805166bafe8f52d16397e1ad2cdb4eef4425ed8a6f7.jpg", + "image_caption": [ + "Figure 4. Overview of GT generation." + ], + "image_footnote": [], + "bbox": [ + 506, + 89, + 885, + 256 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where, $\\lambda$ represents the eigenvalue of $A$ , and", + "bbox": [ + 500, + 308, + 795, + 323 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {A} = \\frac {1}{U} \\sum_ {u = 1} ^ {U} \\left(\\boldsymbol {P} _ {u} - \\hat {\\boldsymbol {P}}\\right) \\left(\\boldsymbol {P} _ {u} - \\hat {\\boldsymbol {P}}\\right) ^ {T}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 335, + 890, + 376 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Now, the BA problem is simplified by adjusting the lidar pose $\\pmb{\\eta}$ to minimize the smallest eigenvalue $\\lambda_3$ of the point covariance matrix $\\mathbf{A}$ defined in Eq. 5. By employing this strategy, we refined the pose $\\pmb{\\eta}$ of each frame and the edge or plane features in the lidar map.", + "bbox": [ + 496, + 387, + 890, + 462 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Refined cameras poses. The poses of $360^{\\circ}$ camera obtained from online SLAM are further optimized by the registration with respect to the dense refined point cloud model. Taking the pre-calibrated extrinsic parameters as the initial guess, we used the RANSAC to refine the lidar-camera transformation [29]. This registration process is based on the normalized information distance (NID) [51], which serves as a mutual information-based cross-modal distance metric. Finally, we align the reference models and query models into the same coordinate system to generate the ground truth for the query sequences. Specifically, we utilize the CloudCompare toolbox [19] to manually select feature points across multiple point cloud models as initial values. Then, we employ the ICP algorithm to register the point cloud models together. Afterwards, we employed a practical approach to volumetric surface reconstruction called Truncated Signed Distance Functions (TSDFs) [57] to achieve the reconstruction from point clouds to meshes with an efficient and sparse data structure called Voxel Data Base (VDB) [39]. At this stage, we can utilize the ray-mesh intersection method [15] to cast rays from cameras onto the mesh model. By intersecting the rays with the mesh, we can determine the depths of the corresponding points on the mesh surface. After a series of joint optimizations between multiple modalities, we have generated a set of GT data. Figure 2 shows some instances. This GT data includes reference images $\\mathcal{I}_{c_0}^r$ , the depth maps $D_{c_0}^r$ of the reference images, and the reference maps containing the point cloud models $\\mathcal{P}$ , mesh models $M$ , as well as camera pose odom", + "bbox": [ + 496, + 463, + 892, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "22318", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "etry $\\{\\xi \\}$ . Figure 4 summarizes the GT generation.", + "bbox": [ + 76, + 90, + 410, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Omnidirectional Visual Localization", + "text_level": 1, + "bbox": [ + 76, + 119, + 405, + 137 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We extend the current feature-matching-based and absolute pose regression pipelines for omnidirectional visual localization. Given a query image $\\mathcal{I}^q$ in any camera model, we seek to estimate its poses within the environment modeled by $360^{\\circ}$ images $\\mathbf{I}^r$ . To minimize the domain gap between the query image from $c_{1}, c_{2}, c_{3}, c_{4}$ and reference images, we explore visual cameras (VC) in two ways: VC1, remapping query images to 360 domain using $\\Psi_{c_n}^{-1}$ ; VC2, rectifying $360^{\\circ}$ images into queries' domains using $\\Psi_{c_n}$ .", + "bbox": [ + 75, + 146, + 468, + 282 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Feature-matching-based Localization", + "text_level": 1, + "bbox": [ + 76, + 292, + 398, + 309 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Most feature-matching-based techniques first perform IR to reduce the search space before estimating the pose.", + "bbox": [ + 76, + 316, + 468, + 347 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.1 Image Retrieval", + "text_level": 1, + "bbox": [ + 76, + 367, + 241, + 382 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For method VC1, if query $\\mathcal{I}^q$ captured from $c_{0}$ , we retrieve the $k$ most similar images from $\\mathbf{I}^r$ by calculating and sorting simi $_{\\mathrm{cos}}(\\mathcal{F}(\\mathcal{I}^q), \\mathcal{F}(\\mathcal{I}^r))$ , $\\mathcal{I}^r \\in \\mathbf{I}^r$ and $\\mathcal{F}(\\cdot)$ denotes the function to map each image to the global feature domain. simi $_{\\mathrm{cos}}(\\cdot)$ is cosine similarity for two feature embeddings. If query $\\mathcal{I}^q$ captured from $c_{1}, c_{2}, c_{3}, c_{4}$ , we then retrieve top- $k$ reference images based on simi $_{\\mathrm{cos}}(\\mathcal{F}(\\Psi_{c_n}^{-1}(\\mathcal{I}^q)), \\mathcal{F}(\\mathcal{I}^r)), \\mathcal{I}^r \\in \\mathbf{I}^r$ .", + "bbox": [ + 75, + 391, + 468, + 513 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In method VC2, we expand the global features for each $360^{\\circ}$ reference image by cameras $c$ including virtual pin-hole cameras forming a cube map and virtual fisheye cameras. We define the similarity score between $\\mathcal{I}^q$ and $\\mathcal{I}^r$ as:", + "bbox": [ + 75, + 513, + 468, + 573 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\max \\left(\\operatorname {s i m i} _ {\\cos} \\left(\\mathcal {F} \\left(\\mathcal {I} ^ {q}\\right), \\mathcal {G} _ {\\mathcal {F}} \\left(\\mathcal {I} ^ {r}\\right)\\right), \\right. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 589, + 468, + 606 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where global feature group of reference is $\\mathcal{G}_{\\mathcal{F}}(\\mathcal{I}^r) = \\{\\mathcal{F}(\\Psi_c(\\mathcal{I}^r)),\\ldots \\}$ . We use the highest similarity value calculated from $\\mathcal{F}(\\mathcal{I}^q)$ and $\\mathcal{G}_{\\mathcal{F}}(\\mathcal{I}^r)$ as the similarity score for each $\\mathcal{I}^r$ to ensure retrieve $k$ most similar $360^{\\circ}$ reference images because some rectified images are from the same $\\mathcal{I}^r$ . Note that we can eliminate the domain gap during the image retrieval step in this way.", + "bbox": [ + 75, + 614, + 468, + 719 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.2 Local Feature Matching and Pose Estimation", + "text_level": 1, + "bbox": [ + 76, + 739, + 444, + 755 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For each pinhole query frame, we retrieve relevant reference images, match their local features, leverage the depth maps $D_{c_0}$ to establish the 2D-3D correspondences, and finally estimate a pose with $\\mathrm{PnP + RANSAC}$ . Unlike [11, 61], we directly match query image with retrieved $360^{\\circ}$ reference images described in Section 4.1.1. For query images from $c_{0}, c_{1}, c_{2}, c_{3}$ , i.e., fisheye and $360^{\\circ}$ query frames, we utilize the function that calculates pose error in sphere camera model in OpenGV [28] library for $\\mathrm{PnP + RANSAC}$ .", + "bbox": [ + 75, + 763, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Absolute Pose Regression", + "text_level": 1, + "bbox": [ + 500, + 90, + 730, + 106 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "APRs train deep neural networks to regress the 6DoF camera pose of a query image.", + "bbox": [ + 498, + 114, + 890, + 143 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "PN. PoseNet (PN) is the first APR model. Since there is no open source code [23, 24], we follow the modification in [8, 36] and use ResNet34 [21] as the backbone network.", + "bbox": [ + 498, + 145, + 890, + 189 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "MS-T. MS-Transformer [49] is an APR model incorporating attention and implementing transformers as backbone. We note APR methods using our virtual camera method, VC2, as $\\mathbf{APR}^{vc2}$ . The difference between APR and $\\mathbf{APR}^{vc2}$ is the training stage. For APR baselines, the training set is $\\mathbf{I}^r$ . For $\\mathbf{APR}^{vc2}$ , they are trained with $360^\\circ$ images, cropped pinhole images, and cropped fisheye images, i.e., $\\mathbf{I}^r \\cup \\Psi_c(\\mathbf{I}^r)$ introduced in Section 4.1.1 and Eq. 1.", + "bbox": [ + 498, + 190, + 890, + 311 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "All APR models are implemented in Python using PyTorch [41]. During training, all input images are resized to $256 \\times 256$ and then randomly cropped to $224 \\times 224$ . For both PN and MS-T, we set an initial learning rate of $\\lambda = 10^{-4}$ and a batch size of 32 for 300 epochs of each scene. Training and evaluation in Section 5 are performed on an NVIDIA GeForce GTX 3090 GPU.", + "bbox": [ + 498, + 311, + 890, + 415 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Evaluation", + "text_level": 1, + "bbox": [ + 500, + 431, + 614, + 446 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We provide detailed results for each scene in the dataset and more settings in supplementary material.", + "bbox": [ + 498, + 458, + 890, + 488 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Image Retrieval", + "text_level": 1, + "bbox": [ + 500, + 498, + 658, + 513 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate global descriptors computed by NetVLAD [1], CosPlace [3], OpenIBL [18] and AP-GeM [20]. The query image is deemed correctly localized if at least one of the top $k$ retrieved database images is within $d = 5m$ from the ground truth position of the query for Concourse and $d = 10m$ for the other three scenes. The image retrieval results are shown in Table 4. Among all global feature descriptor methods, the $360^{\\circ}$ query exhibits the best precision and recall in most cases, while the pinhole query performs the worst. The remap method (VC1) provides limited improvement for pinhole queries but yields higher improvement for fisheye1, fisheye2, and fisheye3 queries. The reason is that the FoV of pinhole cameras is only $85^{\\circ}$ . Consequently, VC1 results in significant black borders when converting to a $360^{\\circ}$ image due to the limited coverage.", + "bbox": [ + 496, + 522, + 890, + 748 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The rectify method (VC2) significantly improves pin-hole, fisheye1, fisheye2, and fisheye3 queries by eliminating the domain gap in IR. However, the pinhole, fisheye1, and fisheye2 queries' recall and precision are still much lower than those of the $360^{\\circ}$ query. Only the query from fisheye3 (widest FoV) approaches the performance of $360^{\\circ}$ query. The domain gap mainly affects the precision and recall of fisheye3. Both remap (VC1) and crop (VC2) significantly improve IR performance for fisheye3. On the other hand, pinhole queries are more prone to being mistaken as error", + "bbox": [ + 496, + 750, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "22319", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/6349f03ae60a961836f7fcc2bd0d099af2615d353aab2468b696a1d23d6d1929.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
QueryNetVLAD [1]Cosplace [3]OpenIBL [18]AP-GeM [20]
R@1R@5P@5R@10P@10R@1R@5P@5R@10P@10R@1R@5P@5R@10P@10R@1R@5P@5R@10P@10
pinhole0.230.450.220.580.220.150.260.150.330.150.180.360.180.480.180.20.370.20.470.2
+VC10.240.450.240.570.230.210.330.210.410.210.210.390.210.50.20.250.420.250.530.24
+VC20.50.670.480.750.470.320.410.320.480.310.510.670.490.750.470.50.680.490.770.47
fisheye10.420.670.410.770.390.280.430.280.520.280.370.580.360.690.340.350.550.340.660.33
+VC10.510.720.490.80.470.360.480.350.560.340.520.70.50.790.480.430.620.420.720.4
+VC20.730.910.630.950.570.630.850.510.920.430.740.910.620.950.540.650.880.570.940.51
fisheye20.450.70.440.80.420.30.460.310.550.310.410.620.40.730.380.380.590.360.680.35
+VC10.540.740.520.830.490.370.490.360.570.350.560.730.540.810.510.460.650.450.740.43
+VC20.740.920.650.950.580.640.870.530.930.450.760.920.650.960.560.670.890.580.940.52
fisheye30.570.790.550.860.520.40.560.40.650.40.530.740.510.830.490.450.660.430.750.41
+VC10.630.810.610.880.580.480.610.480.680.470.670.820.650.880.610.550.730.530.810.51
+VC20.770.930.680.960.610.690.890.580.940.50.790.930.680.960.60.670.90.590.940.54
3600.790.860.770.880.730.920.950.910.960.890.890.940.880.950.830.790.90.770.940.72
", + "bbox": [ + 99, + 88, + 874, + 311 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/fd3a704f2bb60f2215640277a6f5b1f8d157efc582757a2802a9ac3a696ee3e0.jpg", + "table_caption": [ + "Table 4. Image retrieval results based on $360^{\\circ}$ reference database average over four scenes, the recall, and precision for the top $k$ retrieved images, $k = 1,5,10$ . $\\#$ indicates the highest value of R@k and P@k for each device w and w/o virtual cameras (VC1, VC2). Best results for all devices of R@k and P@k are in bold with $\\#$ ." + ], + "table_footnote": [], + "table_body": "
NetVLAD [1]CosPlace [3]
DISK + LGSP + LGSP + SGDISK + LGSP + LGSP + SG
DayNightDayNightDayNightDayNightDayNightDayNight
pinhole6.0/11.3/24.61.7/4.4/10.38.0/14.9/30.92.2/5.5/13.58.4/15.2/30.72.3/5.6/12.34.2/7.8/18.01.6/3.5/8.64.8/10.2/22.11.9/4.7/11.15.4/10.4/21.12.1/4.7/10.4
+VC18.5/14.0/23.52.2/4.1/7.910.4/17.0/27.52.9/5.3/10.110.9/17.8/28.52.8/5.6/9.96.1/10.8/21.11.7/3.6/8.27.5/13.2/22.52.0/4.5/9.67.6/13.5/22.82.1/4.7/9.6
+VC214.2/22.2/35.54.1/7.8/13.619.8 / 29.7/42.96.1/10.4/16.921.6/33.2 / 49.75.9 / 11.0 / 18.48.0/13.1/23.52.5/4.6/9.110.7/16.4/26.63.0/5.7/11.411.6/18.5/30.53.5/6.8/12.8
fisheye11.6/4.4/17.70.5/1.8/7.41.9/5.4/20.10.7/2.3/10.51.6/4.7/18.40.5/1.9/8.20.8/2.5/11.80.4/1.4/5.81.0/3.5/13.00.5/1.4/8.20.9/3.4/12.10.3/1.4/7.0
+VC13.3/9.2/27.60.8/2.7/9.64.1/10.6/32.21.4/4.4/14.93.0/9.5/29.60.9/3.1/11.72.3/5.5/19.40.5/1.6/7.32.1/6.1/19.90.7/2.2/9.01.9/5.5/19.10.5/1.9/7.3
+VC23.9/10.5/33.01.0/4.0/14.64.3/12.4/38.21.9/6.4/21.83.6/11.0/34.51.1/5.3/19.42.5/6.9/25.30.8/2.8/12.22.8/8.2/29.01.3/4.6/18.02.1/7.1/26.71.0/4.0/16.2
fisheye21.6/4.9/20.90.5/2.0/8.71.9/6.7/23.20.8/3.0/11.81.7/5.2/19.50.7/2.5/9.91.3/3.5/14.20.4/1.6/6.91.2/3.8/15.20.5/1.5/9.11.2/3.9/12.90.6/1.6/7.2
+VC14.3 / 10.8/30.90.8/3.0/11.24.7/12.4/34.11.8/5.4/15.84.1 / 10.6/31.51.1/3.6/13.72.5/6.5/20.60.5/1.77.42.5/7.0/22.10.8/2.4/9.42.2/6.8/20.20.5/2.1/8.0
+VC24.3/11.0/34.41.1/4.7/17.35.1/14.0/41.12.0/7.2/24.83.7 / 11.5/36.81.5/5.9/21.22.8/7.3/27.10.8/2.9/13.42.9/8.9/32.01.6/5.3/20.12.5/8.0/27.91.1/4.2/17.7
fisheye33.8/9.5/29.81.0/3.6/13.84.0/10.5/31.61.3/4.6/16.43.4/9.1/28.40.8/3.8/13.82.5/6.3/21.90.6/2.4/10.12.8/7.2/22.30.9/2.9/12.42.0/5.9/20.01.3/4.2/15.0
+VC15.9/14.7 / 39.51.5/5.2/17.76.0 / 16.2/43.52.0/6.8/21.95.8/14.7 / 39.11.8/5.5/18.34.4/10.2 / 30.11.1/3.3/12.84.6/11.6/32.01.4/4.1/14.44.3/10.5 / 29.71.2/3.8/12.3
+VC25.2/13.9 / 41.82.1/6.5/22.55.9 / 16.5/46.32.5/8.6/29.15.4/14.2 / 40.52.1/7.3/25.94.3/9.8 / 34.61.7/5.2/19.54.7/12.6 / 36.82.2/7.1/23.83.8 / 10.5 / 32.51.6/5.1/20.7
36017.1 / 30.8 / 66.18.5 / 20.1 / 47.518.2 / 34.6 / 64.27.0 / 18.7 / 45.315.8 / 31.2 /60.4 / 7.0 / 17.8 / 42.817.6 / 31.8 / 68.18.7 / 22.0 / 56.018.7 / 34.9 / 68.17.3 / 20.0 / 53.416.6 / 32.6 / 65.77.1 / 18.7 / 50.4
", + "bbox": [ + 86, + 382, + 883, + 560 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5. Local matching localization results. The average percentage of predictions with high (0.25m, $2^{\\circ}$ ), medium (0.5m, $5^{\\circ}$ ), and low (5m, $10^{\\circ}$ ) accuracy [46] (higher is better) over four scenes. # indicates the highest value for each device w and w/o virtual cameras (VC1, VC2) of each accuracy level. The best results for all devices of each accuracy level are in bold with # .", + "bbox": [ + 75, + 569, + 892, + 617 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "neous locations with similar structures due to their narrower FoV even there is no cross-device domain gap during IR by applying VC2 (Some figures in supplementary material).", + "bbox": [ + 75, + 643, + 470, + 691 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2. Visual Localization", + "text_level": 1, + "bbox": [ + 76, + 705, + 261, + 722 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We compare our approach with the following baselines in two categories: 1) Local feature matching pipelines tailored from HLoc [42], using different keypoint descriptors (Superpoint (SP) [16] and DISK [55]), and matchers (SuperGlue (SG) [43], follow-up SOTA LightGlue (LG) [31]). 2) The end-to-end APRs: PN [23, 24] and MS-T [49].", + "bbox": [ + 75, + 731, + 468, + 823 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Local feature matching: During local feature matching, all $360^{\\circ}$ images are cropped to $1228 \\times 614$ because of the tradeoff of time and computation. We report the average results over four scenes in Table 5. The $360^{\\circ}$ query achieves the best performance in three accuracy levels in most cases", + "bbox": [ + 75, + 824, + 470, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "across all IR, keypoint descriptors, and matchers settings. It is especially more robust in challenging nighttime conditions. VC1 and VC2 techniques improve the recall and precision of IR, increasing the accuracy of 2D-2D matching for all cameras. In most cases, the performance at the low accuracy level $(5m, 10^{\\circ})$ is correlated with the FoV, where a larger FoV results in higher performance. However, the pin-hole query with VC2 during IR performs comparably to the $360^{\\circ}$ queries at the high $(0.25m, 2^{\\circ})$ and median $(0.5m, 5^{\\circ})$ accuracy levels. In contrast, query frames from $c_{1}, c_{2}$ and $c_{3}$ demonstrate relatively lower performance at the high and medium accuracy levels.", + "bbox": [ + 496, + 643, + 890, + 827 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As observed in Table 4, different IR methods display different performances depending on the type of camera. We thus consider both NetVLAD and CosPlace in visual localization. In most cases, $360^{\\circ}$ query frames achieve higher", + "bbox": [ + 496, + 839, + 893, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "22320", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "accuracy with CosPlace while pinhole and fisheye query frames have lower accuracy than NetVLAD as shown in Table 5. These results match the precision and recall difference noted in Table 4. We believe that the FoV not only affects the robustness of IR but also has an impact on local 2D-2D matching performance. Pinhole queries suffer from erroneous matches due to interference from symmetrical and repetitive structures, while the larger FoV of fisheye and $360^{\\circ}$ query frames capture more unique visual features. We provide examples in the supplementary material.", + "bbox": [ + 75, + 90, + 472, + 242 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "APR: APRs cannot extrapolate well beyond the training set [40, 47]. cross-device queries further complicate this challenge by introducing an additional dimension of FoV. Due to the high efficiency of $360^{\\circ}$ mapping, the training set $\\mathbf{I}^r$ in 360Loc contains only around one-third of the images compared to datasets [24]. Figure 5 shows that when PN and MS-T are trained solely on $\\mathbf{I}^r$ with only $360^{\\circ}$ images, a smaller domain gap between the query and the $360^{\\circ}$ image yields a lower error. However, when we introduce images from virtual cameras for data augmentation, $\\mathrm{PN}^{vc2}$ and MS- $\\mathrm{T}^{vc2}$ exhibit significantly reduced translation and rotation errors across all queries, particularly during daytime. MS- $\\mathrm{T}^{vc2}$ reduces translation error by up to $79\\%$ and rotation error by up to $72\\%$ compared to MS-T. $\\mathrm{PN}^{vc2}$ displays similar improvement over PN. In most cases, except for $\\mathrm{PN}^{vc2}$ , s rotation error for the $360^{\\circ}$ queries during daytime, both the $360^{\\circ}$ and fisheye queries exhibit higher accuracy than the pinhole query on $\\mathrm{PN}^{vc2}$ and MS- $\\mathrm{T}^{vc2}$ . This suggests that a larger FoV still helps improve visual localization accuracy in challenging scenes. Another interesting finding is that even though the augmented training set $\\mathbf{I}^r \\cup \\Psi_c(\\mathbf{I}^r)$ , which includes virtual camera images, does not increase the number of $360^{\\circ}$ images, the error for the $360^{\\circ}$ query still decreases. This reduction is particularly noticeable in the case of translation errors during daytime. The result fully demonstrates the utility of employing virtual cameras for data augmentation.", + "bbox": [ + 75, + 243, + 472, + 652 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3. Analysis", + "text_level": 1, + "bbox": [ + 76, + 664, + 181, + 680 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Cross-device visual positioning presents significant challenges for IR, local matching, and APRs. Our VC1 and VC2 methods demonstrate practical enhancements in the performance of IR and APR for cross-device scenarios. However, it is essential to note that during the local matching process, the accuracy of matches and the recall and precision of IR for query frames from different cameras may not align perfectly. The chosen IR method and its training noticeably affect accuracy for similar cameras. Fisheye cameras exhibit better performance in IR compared to pinhole cameras. However, pinhole cameras outperform fisheye cameras for high accuracy and median accuracy levels in local matching. This is likely due to existing feature extraction and matching models lacking training data on $360^{\\circ}$ and fisheye", + "bbox": [ + 75, + 688, + 472, + 902 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/21c1a101e9d74cf3e9bf39c3e37a7244e02cfa1ea47cb78c1884559328819e15.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 87, + 696, + 172 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2365dd97b73b1cf7536dfc86daa8dbe38f6b210cb8b770a05402721a59c5619b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 696, + 88, + 872, + 172 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2d3e8e2a36dd03710f89023fc7e0eb829fc8eb8577459e039b7c2f10874df89c.jpg", + "image_caption": [ + "(a) Trans. (day)", + "(c) Trans. (night)", + "Figure 5. The average of median translation/rotation errors in $(m / ^{\\circ})$ over 4 scenes." + ], + "image_footnote": [], + "bbox": [ + 517, + 186, + 699, + 272 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/87b3ba1233e73dec1da977ae657f32a3189dbcfdc569c031b4207dde3f22d3d3.jpg", + "image_caption": [ + "(b) Rot. (day)", + "(d) Rot. (night)" + ], + "image_footnote": [], + "bbox": [ + 699, + 186, + 872, + 272 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "cameras, resulting in less accurate matching. We attribute the inferior performance of pinhole query frames at the low accuracy level to IR's insufficient recall and precision. Additionally, pinhole queries are more susceptible to interference when there are many repetitive and symmetrical features in the scene, even when the retrieved reference image is correct (some example figures in the supplementary material). By utilizing VC2 to augment IR and APR's training data, we eliminate the cross-device domain gap. We demonstrate that panoramic perspective and a larger FoV can significantly improve the performance of IR and APRs and find that query frames from $360^{\\circ}$ camera and ultra-wide FoV cameras are less prone to being misidentified as erroneous locations with similar structures. This result suggests the promising potential of fisheye and $360^{\\circ}$ cameras as viable sensors for localization tasks in indoor environments with low GPS accuracy.", + "bbox": [ + 496, + 354, + 890, + 611 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 626, + 619, + 641 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "360Loc is the first dataset and benchmark that explores the challenge of cross-device visual positioning, involving $360^{\\circ}$ reference frames, and query frames from pinhole, ultra-wide FoV fisheye, and $360^{\\circ}$ cameras. We first identified the absence of datasets with ground truth 6DoF poses for $360^{\\circ}$ images, and the limited research on cross-device localization and the robustness of different cameras in ambiguous scenes. To address these limitations, we build a dataset with $360^{\\circ}$ images as reference and query frames from pinhole, ultra-wide FoV fisheye camera and $360^{\\circ}$ cameras via a virtual camera solution. This method enables fair comparisons in cross-device visual localization tasks and helps reduce the domain gap between different cameras. By evaluating feature-matching-based and pose regression-based methods, we demonstrate the effectiveness of our virtual camera approach and the increased robustness of $360^{\\circ}$ cameras in visual localization for challenging and ambiguous scenes.", + "bbox": [ + 496, + 648, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "22321", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 118, + 174, + 133 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Relja Arandjelovic, Petr Gronat, Akihiko Torii, Tomas Pajdla, and Josef Sivic. Netvlad: Cnn architecture for weakly supervised place recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5297-5307, 2016. 2, 6, 7", + "[2] Iro Armeni, Sasha Sax, Amir R Zamir, and Silvio Savarese. Joint 2d-3d-semantic data for indoor scene understanding. arXiv preprint arXiv:1702.01105, 2017. 3", + "[3] Gabriele Berton, Carlo Masone, and Barbara Caputo. Rethinking visual geo-localization for large-scale applications. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4878-4888, 2022. 2, 6, 7", + "[4] Hunter Blanton, Connor Greenwell, Scott Workman, and Nathan Jacobs. Extending absolute pose regression to multiple scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2020. 3", + "[5] Eric Brachmann and Carsten Rother. Learning less is more - 6D camera localization via 3D surface regression. In CVPR, 2018. 1, 2", + "[6] Eric Brachmann and Carsten Rother. Visual camera relocalization from RGB and RGB-D images using DSAC. TPAMI, 2021.", + "[7] Eric Brachmann, Alexander Krull, Sebastian Nowozin, Jamie Shotton, Frank Michel, Stefan Gumhold, and Carsten Rother. DSAC-Differentiable RANSAC for camera localization. In CVPR, 2017. 1, 2", + "[8] Samarth Brahmbhatt, Jinwei Gu, Kihwan Kim, James Hays, and Jan Kautz. Geometry-aware learning of maps for camera localization. In IEEE conference on computer vision and pattern recognition, 2018. 3, 6", + "[9] Mai Bui, Tolga Birdal, Haowen Deng, Shadi Albarqouni, Leonidas Guibas, Slobodan Ilic, and Nassir Navab. 6d camera relocalization in ambiguous scenes via continuous multi-modal inference. 2020. 3, 4", + "[10] Nicholas Carlevaris-Bianco, Arash K Ushani, and Ryan M Eustice. University of michigan north campus long-term vision and lidar dataset. The International Journal of Robotics Research, 35(9):1023-1035, 2016. 3, 4", + "[11] David M Chen, Georges Baatz, Kevin Koser, Sam S Tsai, Ramakrishna Vedantham, Timo Pylvanäinen, Kimmo Roimela, Xin Chen, Jeff Bach, Marc Pollefeys, et al. City-scale landmark identification on mobile devices. In CVPR 2011, pages 737-744. IEEE, 2011. 3, 4, 6", + "[12] Shuai Chen, Zirui Wang, and Victor Prisacariu. Directposenet: absolute pose regression with photometric consistency. In 2021 International Conference on 3D Vision (3DV), pages 1175-1185. IEEE, 2021. 3", + "[13] Shuai Chen, Xinghui Li, Zirui Wang, and Victor A Prisacariu. Dfnet: Enhance absolute pose regression with direct feature matching. In ECCV 2022. Tel Aviv, Israel, October 23-27, 2022, Part X. Springer, 2022. 3", + "[14] Santiago Cortés, Arno Solin, Esa Rahtu, and Juho Kannala. Advio: An authentic dataset for visual-inertial odometry. In" + ], + "bbox": [ + 78, + 143, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Proceedings of the European Conference on Computer Vision (ECCV), pages 419-434, 2018. 3, 4", + "[15] Dawson-Haggerty et al. trimesh. 5", + "[16] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 224-236, 2018. 2, 7", + "[17] Mihai Dusmanu, Ignacio Rocco, Tomas Pajdla, Marc Pollefeys, Josef Sivic, Akihiko Torii, and Torsten Sattler. D2-net: A trainable cnn for joint description and detection of local features. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 8092-8101, 2019. 1", + "[18] Yixiao Ge, Haibo Wang, Feng Zhu, Rui Zhao, and Hongsheng Li. Self-supervising fine-grained region similarities for large-scale image localization. In European Conference on Computer Vision, 2020. 2, 6, 7", + "[19] Daniel Girardeau-Montaut. Cloudcompare. France: EDF R&D Telecom ParisTech, 11, 2016. 5", + "[20] A. Gordo, J. Almazan, J. Revaud, and D. Larlus. End-to-end learning of deep visual representations for image retrieval. *IJCV*, 2017. 2, 6, 7", + "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6", + "[22] Huajian Huang, Yinzhe Xu, Yingshu Chen, and Sai-Kit Yeung. 360vot: A new benchmark dataset for omnidirectional visual object tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20566–20576, 2023. 1", + "[23] Alex Kendall and Roberto Cipolla. Geometric loss functions for camera pose regression with deep learning. In IEEE conference on computer vision and pattern recognition, pages 5974-5983, 2017. 1, 3, 6, 7", + "[24] Alex Kendall, Matthew Grimes, and Roberto Cipolla. Posenet: A convolutional network for real-time 6-dof camera relocalization. In Proceedings of the IEEE international conference on computer vision, pages 2938-2946, 2015. 1, 3, 4, 6, 7, 8", + "[25] Junho Kim, Changwoon Choi, Hojun Jang, and Young Min Kim. Piccolo: point cloud-centric omnidirectional localization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3313-3323, 2021. 2, 3", + "[26] Junho Kim, Hojun Jang, Changwoon Choi, and Young Min Kim. Cpo: Change robust panorama to point cloud localization. In European Conference on Computer Vision, pages 176-192. Springer, 2022.", + "[27] Junho Kim, Eun Sun Lee, and Young Min Kim. Calibrating panoramic depth estimation for practical localization and mapping. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8830-8840, 2023. 2", + "[28] Laurent Kneip and Paul Furgale. Opengv: A unified and generalized approach to real-time calibrated geometric vision. In 2014 IEEE international conference on robotics and automation (ICRA), pages 1-8. IEEE, 2014. 6" + ], + "bbox": [ + 503, + 92, + 890, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "22322", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[29] Kenji Koide, Shuji Oishi, Masashi Yokozuka, and Atsuhiko Banno. General, single-shot, target-less, and automatic lidar-camera extrinsic calibration toolbox. arXiv preprint arXiv:2302.05094, 2023. 4, 5", + "[30] Donghwan Lee, Soohyun Ryu, Suyong Yeon, Yonghan Lee, Deokhwa Kim, Cheolho Han, Yohann Cabon, Philippe Weinzaepfel, Nicolas Guérin, Gabriela Csurka, et al. Large-scale localization datasets in crowded indoor spaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3227-3236, 2021. 3, 4", + "[31] Philipp Lindenberger, Paul-Edouard Sarlin, and Marc Pollefeys. LightGlue: Local Feature Matching at Light Speed. In ICCV, 2023. 7", + "[32] Haomin Liu, Linsheng Zhao, Zhen Peng, Weijian Xie, Mingxuan Jiang, Hongbin Zha, Hujun Bao, and Guofeng Zhang. A low-cost and scalable framework to build large-scale localization benchmark for augmented reality. IEEE Transactions on Circuits and Systems for Video Technology, 2023. 3", + "[33] Liu Liu, Hongdong Li, and Yuchao Dai. Efficient global 2d-3d matching for camera localization in a large-scale 3d map. In Proceedings of the IEEE International Conference on Computer Vision, pages 2372-2381, 2017. 1", + "[34] Zheng Liu and Fu Zhang. Balm: Bundle adjustment for lidar mapping. IEEE Robotics and Automation Letters, 6(2): 3184-3191, 2021. 5", + "[35] David G Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60:91-110, 2004. 2", + "[36] Iaroslav Melekhov, Juha Ylioinas, Juho Kannala, and Esa Rahtu. Image-based localization using hourglass networks. In IEEE international conference on computer vision workshops, 2017. 3, 6", + "[37] Arthur Moreau, Nathan Piasco, Dzmitry Tsishkou, Bogdan Stanciulescu, and Arnaud de La Fortelle. Coordinet: uncertainty-aware pose regressor for reliable vehicle localization. In IEEE/CVF Winter Conference on Applications of Computer Vision, 2022. 3", + "[38] Jeffri Murragarra-Llerena, Thiago LT Da Silveira, and Claudio R Jung. Pose estimation for two-view panoramas based on keypoint matching: A comparative study and critical analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5202-5211, 2022. 3", + "[39] Ken Museth. Vdb: High-resolution sparse volumes with dynamic topology. ACM transactions on graphics (TOG), 32 (3):1-22, 2013. 5", + "[40] Tony Ng, Adrian Lopez-Rodriguez, Vassileios Balntas, and Krystian Mikolajczyk. Reassessing the limitations of cnn methods for camera pose regression. arXiv preprint arXiv:2108.07260, 2021. 8", + "[41] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 6" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[42] Paul-Edouard Sarlin, Cesar Cadena, Roland Siegwart, and Marcin Dymczyk. From coarse to fine: Robust hierarchical localization at large scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12716–12725, 2019. 1, 2, 7", + "[43] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4938–4947, 2020. 2, 4, 7", + "[44] Paul-Edouard Sarlin, Mihai Dusmanu, Johannes L Schonberger, Pablo Speciale, Lukas Gruber, Viktor Larsson, Ondrej Miksik, and Marc Pollefeys. Lamar: Benchmarking localization and mapping for augmented reality. In European Conference on Computer Vision, pages 686-704. Springer, 2022. 3, 4", + "[45] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Efficient & effective prioritized matching for large-scale image-based localization. IEEE transactions on pattern analysis and machine intelligence, 39(9):1744-1756, 2016. 1, 2, 3", + "[46] Torsten Sattler, Will Maddern, Carl Toft, Akihiko Torii, Lars Hammarstrand, Erik Stenborg, Daniel Safari, Masatoshi Okutomi, Marc Pollefeys, Josef Sivic, et al. Benchmarking 6dof outdoor visual localization in changing conditions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8601-8610, 2018. 3, 4, 7", + "[47] Torsten Sattler, Qunjie Zhou, Marc Pollefeys, and Laura Leal-Taixe. Understanding the limitations of cnn-based absolute camera pose regression. In IEEE/CVF conference on computer vision and pattern recognition, 2019. 3, 8", + "[48] Thomas Schops, Johannes L Schonberger, Silvano Galliani, Torsten Sattler, Konrad Schindler, Marc Pollefeys, and Andreas Geiger. A multi-view stereo benchmark with high-resolution images and multi-camera videos. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3260-3269, 2017. 3, 4", + "[49] Yoli Shavit, Ron Ferens, and Yoshi Keller. Learning multiscene absolute pose regression with transformers. In IEEE/CVF International Conference on Computer Vision, pages 2733-2742, 2021. 1, 3, 6, 7", + "[50] Jamie Shotton, Ben Glocker, Christopher Zach, Shahram Izadi, Antonio Criminisi, and Andrew Fitzgibbon. Scene coordinate regression forests for camera relocalization in rgb-d images. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2930-2937, 2013. 3, 4", + "[51] Alexander D Stewart. Localisation using the appearance of prior structure. PhD thesis, University of Oxford, 2014. 5", + "[52] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8922-8931, 2021. 2", + "[53] Xun Sun, Yuanfan Xie, Pei Luo, and Liang Wang. A dataset for benchmarking image-based localization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7436-7444, 2017. 3, 4", + "[54] Hajime Taira, Masatoshi Okutomi, Torsten Sattler, Mircea Cimpoi, Marc Pollefeys, Josef Sivic, Tomas Pajdla, and Ak" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "22323", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ihiko Torii. Inloc: Indoor visual localization with dense matching and view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7199-7209, 2018. 1, 3, 4", + "[55] Michal Tyszkiiewicz, Pascal Fua, and Eduard Trulls. Disk: Learning local features with policy gradient. Advances in Neural Information Processing Systems, 33:14254-14265, 2020. 2, 7", + "[56] Vladyslav Usenko, Nikolaus Demmel, and Daniel Cremers. The double sphere camera model. In 2018 International Conference on 3D Vision (3DV), pages 552-560. IEEE, 2018. 4", + "[57] Ignacio Vizzo, Tiziano Guadagnino, Jens Behley, and Cyril Stachniss. Vdbfusion: Flexible and efficient tsdf integration of range sensor data. Sensors, 22(3):1296, 2022. 5", + "[58] Johanna Wald, Torsten Sattler, Stuart Golodetz, Tommaso Cavallari, and Federico Tombari. Beyond controlled environments: 3d camera re-localization in changing indoor scenes. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VII 16, pages 467-487. Springer, 2020. 3, 4", + "[59] Jian Wu, Liwei Ma, and Xiaolin Hu. Delving deeper into convolutional neural networks for camera relocalization. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 5644-5651. IEEE, 2017. 3", + "[60] Hang Xu, Qiang Zhao, Yike Ma, Xiaodong Li, Peng Yuan, Bailan Feng, Chenggang Yan, and Feng Dai. Pandora: A panoramic detection dataset for object with orientation. In ECCV, 2022. 1", + "[61] Shen Yan, Yu Liu, Long Wang, Zehong Shen, Zhen Peng, Haomin Liu, Maojun Zhang, Guofeng Zhang, and Xiaowei Zhou. Long-term visual localization with mobile sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17245-17255, 2023. 1, 3, 4, 6", + "[62] Dawen Yu and Shunping Ji. Grid based spherical cnn for object detection from panoramic images. Sensors, 19(11): 2622, 2019. 1" + ], + "bbox": [ + 78, + 90, + 470, + 627 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "22324", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/15eb225d-3032-419c-84b0-35d6ec576cbc_model.json b/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/15eb225d-3032-419c-84b0-35d6ec576cbc_model.json new file mode 100644 index 0000000000000000000000000000000000000000..5d98ab83d8b7f16be299ac497017006511a325bb --- /dev/null +++ b/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/15eb225d-3032-419c-84b0-35d6ec576cbc_model.json @@ -0,0 +1,2180 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.08, + 0.131, + 0.892, + 0.175 + ], + "angle": 0, + "content": "360Loc: A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.203, + 0.9, + 0.256 + ], + "angle": 0, + "content": "Huajian Huang\\(^{1*}\\) Changkun Liu\\(^{1*}\\) Yipeng Zhu\\(^{1}\\) Hui Cheng\\(^{2}\\) Tristan Braud\\(^{1}\\) Sai-Kit Yeung\\(^{1}\\) \n\\(^{1}\\)The Hong Kong University of Science and Technology * equal contribution Sun Yat-sen University" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.26, + 0.888, + 0.275 + ], + "angle": 0, + "content": "{hhuangbg, cliudg, yzhudg}@connect.ust.hk, chengh9@mail.sysu.edu.cn, {braudit, saikit}@ust.hk" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.31, + 0.314, + 0.327 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.343, + 0.473, + 0.781 + ], + "angle": 0, + "content": "Portable \\(360^{\\circ}\\) cameras are becoming a cheap and efficient tool to establish large visual databases. By capturing omnidirectional views of a scene, these cameras could expedite building environment models that are essential for visual localization. However, such an advantage is often overlooked due to the lack of valuable datasets. This paper introduces a new benchmark dataset, 360Loc, composed of \\(360^{\\circ}\\) images with ground truth poses for visual localization. We present a practical implementation of \\(360^{\\circ}\\) mapping combining \\(360^{\\circ}\\) images with lidar data to generate the ground truth 6DoF poses. 360Loc is the first dataset and benchmark that explores the challenge of cross-device visual positioning, involving \\(360^{\\circ}\\) reference frames, and query frames from pinhole, ultra-wide FoV fisheye, and \\(360^{\\circ}\\) cameras. We propose a virtual camera approach to generate lower-FoV query frames from \\(360^{\\circ}\\) images, which ensures a fair comparison of performance among different query types in visual localization tasks. We also extend this virtual camera approach to feature matching-based and pose regression-based methods to alleviate the performance loss caused by the cross-device domain gap, and evaluate its effectiveness against state-of-the-art baselines. We demonstrate that omnidirectional visual localization is more robust in challenging large-scale scenes with symmetries and repetitive structures. These results provide new insights into 360-camera mapping and omnidirectional visual localization with cross-device queries. Project Page and dataset: https://huajianup.github.io/research/360Loc/." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.814, + 0.21, + 0.83 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Visual localization refers to predicting the 6DoF absolute pose (translation and rotation) of query images in a known scene. Accurate visual localization has wide applications in augmented reality (AR), navigation, and robotics." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.311, + 0.895, + 0.522 + ], + "angle": 0, + "content": "Over the last decade, many visual localization methods have been proposed, including feature matching-based approaches [17, 33, 42, 45, 54], scene coordinate regression [5-7] and absolute pose regressors (APRs) [23, 24, 49]. Much of this progress has been driven by the availability of numerous datasets and benchmarks targeting different challenges, as shown in Table 1. However, existing methods and datasets focus on localization and mapping using pinhole images. Although the merits of \\(360^{\\circ}\\) camera on visual perception have been recognized [22, 60, 62], the application of \\(360^{\\circ}\\) cameras for visual localization is still under-explored. Recently, SensLoc [61] started to apply \\(360^{\\circ}\\) cameras to facilitate data collection, but their pipeline cannot perform omnidirectional localization directly from the \\(360^{\\circ}\\) images." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.523, + 0.896, + 0.794 + ], + "angle": 0, + "content": "This paper introduces 360Loc, a new challenging benchmark dataset to facilitate research on omnidirectional visual localization. The dataset contains \\(360^{\\circ}\\) images captured in diverse campus-scale indoor and outdoor environments, featuring highly symmetrical and repetitive features, as well as interference of dynamic objects. To capture this dataset, we present a practical pipeline using a portable 360-cameras platform to obtain reliable pose estimations of \\(360^{\\circ}\\) cameras as ground truth. Although \\(360^{\\circ}\\) cameras present significant advantages for capturing reference data, real-life applications applying visual localization often rely on traditional cameras. Examples include robots equipped with fisheye cameras and phone-based AR applications using the embedded pinhole camera. This raises the problem of cross-device visual localization on image databases captured with \\(360^{\\circ}\\) cameras. We thus supplement the reference database composed of \\(360^{\\circ}\\) images with query frames including pin-hole, fisheye and \\(360^{\\circ}\\) cameras." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.895, + 0.903 + ], + "angle": 0, + "content": "We introduce the concept of virtual camera to generate high-quality lower-FoV images with different camera parameters from \\(360^{\\circ}\\) images. This enables a fair comparison of performance among queries from different devices in cross-device visual localization. We adapt existing feature-matching-based methods and APRs to support \\(360^{\\circ}\\) image queries and benchmark these methods for 360-based cross-" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "22314" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.09, + 0.891, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.298, + 0.272, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.298, + 0.453, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.455, + 0.298, + 0.891, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.4, + 0.897, + 0.471 + ], + "angle": 0, + "content": "Figure 1. Overview of dataset collection and ground truth generation: 1) Use the platform to collect \\(360^{\\circ}\\) images and frame-by-frame point clouds. Obtain real-time camera poses; 2) Apply optimization methodology to achieve data registration, resulting in a globally reconstructed point cloud model. Then, align the models in daytime and nighttime to get consistent poses; 3) Perform cropping to get virtual camera images and generate corresponding depth images. As a result, 360Loc takes advantage of \\(360^{\\circ}\\) images for efficient mapping while providing query images in five different camera models in order to analyze the challenge of cross-domain visual localization." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.497, + 0.47, + 0.573 + ], + "angle": 0, + "content": "device visual localization. Since different cameras present different imaging patterns, the cross-device domain gap is expected to lead to performance loss. We extend the virtual camera approach to data augmentation for end-to-end solutions such as image retrieval (IR) and APRs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.577, + 0.47, + 0.744 + ], + "angle": 0, + "content": "By conducting exhaustive evaluations, we demonstrate the advantages of \\(360^{\\circ}\\) cameras in reducing ambiguity in visual localization on scenes featuring symmetric or repetitive features. We also show improvements against state-of-the-art (SOTA) baselines using the virtual camera method for cross-device visual localization on images databases captured with \\(360^{\\circ}\\) cameras. These results provide novel insights on mapping using \\(360^{\\circ}\\) images, enhancing the anti-ambiguity capability of query images, reducing domain gap cross-device in visual localization, and improving the generalization ability of APRs by applying virtual cameras." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.747, + 0.418, + 0.761 + ], + "angle": 0, + "content": "Our contribution can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.765, + 0.47, + 0.81 + ], + "angle": 0, + "content": "- We propose a practical implementation of \\(360^{\\circ}\\) mapping combining lidar data with \\(360^{\\circ}\\) images for establishing the ground truth 6DoF poses." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.811, + 0.469, + 0.855 + ], + "angle": 0, + "content": "- A virtual camera approach to generate high-quality lower-FoV images with different camera parameters from \\(360^{\\circ}\\) views." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.856, + 0.469, + 0.902 + ], + "angle": 0, + "content": "- A novel dataset for cross-device visual localization based on \\(360^{\\circ}\\) reference images with pinhole, fisheye, and \\(360^{\\circ}\\) query images." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.765, + 0.47, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.497, + 0.892, + 0.573 + ], + "angle": 0, + "content": "- Demonstration of our approach's efficacy over state-of-the-art solutions for visual localization using \\(360^{\\circ}\\) image databases, resulting in decreased localization ambiguity, reduced cross-device domain gap, and improved generalization ability of APRs." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.592, + 0.64, + 0.608 + ], + "angle": 0, + "content": "2. Related work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.619, + 0.686, + 0.634 + ], + "angle": 0, + "content": "2.1. Visual Localization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.87 + ], + "angle": 0, + "content": "Structure-based methods predict camera poses by establishing 2D-3D correspondences indirectly with local feature extractors and matchers [16, 35, 42, 43, 52, 55] or directly with scene coordinate regression [5-7]. HLoc [42, 43] pipeline scales up to large scenes using image retrieval [1, 3, 18, 20] as an intermediate step, which achieves SOTA accuracy on many benchmarks. This type of approach usually supports pinhole cameras with different intrinsic parameters. However, the performance of \\(360^{\\circ}\\) and fisheye cameras has not been evaluated before due to the lack of support for \\(360^{\\circ}\\) cameras in the Structure from Motion (SfM) tools like COLMAP [45] and the lack of datasets for fisheye and \\(360^{\\circ}\\) cameras. [25-27] are point-cloud-based panorama localization methods for \\(360^{\\circ}\\) queries but they do not consider cross-device visual localization." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Absolute Pose Regressors (APRs) are end-to-end learning-based methods that directly regress the absolute camera" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "22315" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.258 + ], + "angle": 0, + "content": "pose from input images without the knowledge of 3D models and establish 2D-3D correspondences. APRs [4, 8, 12, 13, 23, 24, 36, 37, 49, 59] provide faster inference than structure-based methods at the cost of accuracy and robustness [47]. Besides, APRs have generally only been tested on the [9], 7Scenes [50], and Cambridge Landmarks [24] datasets in previous studies. A notable characteristic of these datasets is that the training set and test set images were taken from the same camera. In this paper, we enhance cross-device pose regression for APRs by introducing virtual cameras as a data augmentation technique." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.272, + 0.181, + 0.287 + ], + "angle": 0, + "content": "2.2. Datasets" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.297, + 0.473, + 0.904 + ], + "angle": 0, + "content": "The existing dataset has the following limitations. 1). Most datasets [9, 10, 24, 50, 54, 58] do not consider the need for cross-device localization, i.e., query images come from the same camera. Even though some datasets [11, 14, 30, 44, 46, 48, 53, 61] take into account cross-device localization, these devices are only pinhole cameras with different camera intrinsic parameters and do not have particularly large domain-gaps. Compared to [32], our pinhole and fisheye images are extracted from \\(360^{\\circ}\\) images via virtual cameras, which makes less demands on the device and allows for a fair and more flexible comparison of the effects of different FoVs. In this paper, our 360Loc datasets provide five kinds of queries from pinhole, fisheye and \\(360^{\\circ}\\) cameras to promote the research of cross-device localization. 2). Now there is no 6DoF visual localization dataset and benchmark considering \\(360^{\\circ}\\) reference images and \\(360^{\\circ}\\) query images, even though [2, 25, 38] contain \\(360^{\\circ}\\) images with 6DoF pose labels, they are not standard visual localization datasets with independent mapping/reference sequences and query sequences like datasets in Table 1. Other datasets [11, 61] use \\(360^{\\circ}\\) cameras for data collection, in the end they cropped \\(360^{\\circ}\\) to perspective images and then tailor these images to the classical visual localization pipeline of pinhole cameras. The academic community is mainly driven by benchmarks where all training, reference, and query images are pinhole images because they rely on SfM tools [45] which does not support \\(360^{\\circ}\\) cameras to obtain ground-truth (GT) and get sparse 3D point cloud models for recovering camera poses. However, we note that the \\(360^{\\circ}\\) camera can cover the scene with greater efficiency than normal pinhole cameras with narrow Field-of-View (FoV), which makes \\(360^{\\circ}\\) images particularly suitable as reference images. 3) Although the current dataset has explored the challenges of visual localization from various aspects such as weather variations, daynight transitions, scene changes, and moving individuals and objects [24, 30, 44, 46, 58, 61], there is still insufficient research specifically targeting highly ambiguous environments which contain symmetries, repetitive structures and insufficient textures. Only two indoor datasets [9, 53]" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.091, + 0.888, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.264, + 0.892, + 0.307 + ], + "angle": 0, + "content": "Figure 2. The four scenes in 360Loc, all four scenes contain symmetrical, repetitive structures and moving objects. The camera trajectories are visualized as spheres." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.332, + 0.892, + 0.453 + ], + "angle": 0, + "content": "and LaMAR [44] consider challenges in ambiguous environments. In this paper, we studied 4 ambiguous scenes from both indoor and outdoor environments with a scale much larger than dataset [9] (See Figure 2). We conduct exhaustive assessments of image retrieval, local matching localization, and absolute pose regression to show that queries from the \\(360^{\\circ}\\) camera are harder to obtain plausible solutions than other queries from cameras with narrower FoV." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.466, + 0.692, + 0.481 + ], + "angle": 0, + "content": "3. The 360Loc Dataset" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.491, + 0.892, + 0.673 + ], + "angle": 0, + "content": "The 360Loc dataset contains 4 locations from a local university. Figure 2 displays the reference point cloud and example frames from each scene. Atrium is inside a building with a surrounding structure that exhibits a high degree of symmetry and repetition, making it a highly ambiguous environment. Concourse is a large indoor scene with many moving people, which can be used for evaluating the robustness of any localization algorithm in scenes with many moving objects. Piatrium is a scene containing both indoor Atrium and outdoor environments, covering an outdoor piazza with coffee shops, bookstores, and souvenir shops. Hall is a modern building of a student dormitory." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.682, + 0.712, + 0.698 + ], + "angle": 0, + "content": "3.1. 360 Mapping Platform" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.704, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We utilized the handheld multimodal data acquisition platform depicted in Figure 1 for data collection. This platform incorporates a \\(360^{\\circ}\\) camera, a Velodyne VLP-16 multi-line lidar, an NUC mini-computer, and a display screen. Figure 1 also illustrates the relative relationship among the \\(360^{\\circ}\\) camera coordinate system \\(\\mathbf{O}_{\\mathrm{c}}\\)-XYZ, the lidar coordinate system \\(\\mathbf{O}_{\\mathrm{l}}\\)-XYZ as well as the world coordinate \\(\\mathbf{O}_{\\mathrm{w}}\\)-XYZ. The portable 360 camera equipped on this device can capture high-resolution omnidirectional images with a resolution of \\(6144 \\times 3072\\) (2:1 aspect ratio). It also features a built-in six-axis gyroscope that provides stabilization support, making it suitable for handheld mobile data capture. The Velodyne VLP-16 multi-line lidar has" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "22316" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.088, + 0.089, + 0.881, + 0.289 + ], + "angle": 0, + "content": "
DatasetScale and EnvironmentChallengesReference/Query typeGroundtruth SolutionAccuracy
7Scenes [50]Small IndoorNonepinhole / pinholeRGB-D≈ cm
RIO10 [58]Small IndoorChangespinhole / pinholeVIO> d m
Baidu Mall [53]Medium IndoorPeople, Ambiguouspinhole / pinholelidar+Manual≈ d m
Naver Labs [30]Medium IndoorPeople, Changespinhole / pinholelidar+SfM≈ d m
InLoc [54]Medium IndoorNonepinhole / pinholelidar+Manual> d m
AmbiguousLoc [9]Small IndoorAmbiguouspinhole / pinholeSLAM≈ cm
Achen [46]Large outdoorPeople, Day-Nightpinhole / pinholeSfM> d m
Cambridge [24]Medium outdoorPeople, Weatherpinhole / pinholeSfM> d m
San Francisco [11]Large outdoorPeople, Constructionpinhole / pinholeSfM+GPS≈ m
NCLT [10]Medium Outdoor + IndoorWeatherpinhole / pinholeGPS+SLAM+lidar≈ d m
ADVIO [14]Medium Outdoor+IndoorPeoplepinhole / pinholeVIO+Manual≈ m
ETH3D [48]Medium Outdoor + IndoorNonepinhole / pinholelidar+Manual≈ mm
LaMAR [44]Medium Outdoor+IndoorPeople, Weather, Day-Night, Construction, Changes, Ambiguouspinhole / pinholelidar+SfM+VIO≈ cm
SensLoc [61]Large OutdoorPeople, Weather, Day-Night, Construction, Changespinhole / pinholeSL+VIO+RTK+Gravity< dm
360Loc (ours)Medium Outdoor+IndoorPeople, Weather, Day-Night, Construction, Changes, Ambiguous360 / (360 + pinhole + fisheye)lidar+VIO≈ cm
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.293, + 0.893, + 0.322 + ], + "angle": 0, + "content": "Table 1. Overview of popular visual localization datasets. No dataset, besides ours, consider \\(360^{\\circ}\\) images as reference and query frames from pinhole, ultra-wide FoV fisheye, and \\(360^{\\circ}\\) cameras." + }, + { + "type": "table", + "bbox": [ + 0.109, + 0.345, + 0.432, + 0.434 + ], + "angle": 0, + "content": "
SymbolNameField of ViewResolutionType
c0360360°6144×3072reference/query
c1fisheye1120°1280×1024query
c2fisheye2150°1280×1024query
c3fisheye3195°1280×1024query
c4pinhole85°1920×1200query
" + }, + { + "type": "table_caption", + "bbox": [ + 0.103, + 0.437, + 0.443, + 0.451 + ], + "angle": 0, + "content": "Table 2. The representation and parameters of 5 cameras." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.467, + 0.47, + 0.537 + ], + "angle": 0, + "content": "
Scene# Frames Reference 360# Frames Query (day / night)Spatial Extent (m)
360PinholeFisheye1Fisheye2Fisheye3
Concourse491593/5141186/10281186/10281186/10281186/102893 × 15
Hall5401123/10612246/21222246/21222246/21222246/2122105 × 52
Atrium581875/12191750/24381750/24381750/24381750/243865 × 36
Piatrium6321008/6972016/13942016/13942016/13942016/139498 × 70
" + }, + { + "type": "table_caption", + "bbox": [ + 0.165, + 0.541, + 0.382, + 0.555 + ], + "angle": 0, + "content": "Table 3. 360Loc dataset description." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.584, + 0.473, + 0.902 + ], + "angle": 0, + "content": "a FoV of \\(360^{\\circ} \\times 30^{\\circ}\\), angular resolution of \\(0.2^{\\circ} \\times 2.0^{\\circ}\\), and rotation rate of \\(10\\mathrm{Hz}\\), offering a comprehensive \\(360^{\\circ}\\) environmental view. Regarding the calibration of the extrinsic poses between the lidar and the \\(360^{\\circ}\\) camera, we employed a calibration toolbox [29] that applies to both lidar and camera projection models. This toolbox utilizes the SuperGlue [43] image matching pipeline to establish 2D-3D correspondences between the lidar and camera image. We perform pseudo-registration by synchronizing the two data modalities, images, and point clouds. Eventually, we use graph-based SLAM techniques for continuous pose estimations. In the four scenes, a total of 18 independent sequences of \\(360^{\\circ}\\) images were captured (12 daytime, and 6 nighttime), resulting in a total number of 9334 images. For each scene, we selected a specific sequence captured during the daytime as the reference images, while the remaining images were defined as query images of the \\(360^{\\circ}\\) image type. We provide more details and show why \\(360^{\\circ}\\) mapping is superior to pinhole SfM in ambiguous scenes with repetitive and symmetric structures in the supplementary material." + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.346, + 0.856, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.54, + 0.892, + 0.569 + ], + "angle": 0, + "content": "Figure 3. Illustration of obtaining virtual camera images through random poses and image cropping." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.595, + 0.702, + 0.61 + ], + "angle": 0, + "content": "3.1.1 Cross-device Queries" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.618, + 0.892, + 0.71 + ], + "angle": 0, + "content": "To enable a rigorous comparison of the difference in the performance of different FoV queries for visual localization tasks, we created four virtual cameras with diverse FoV from \\(360^{\\circ}\\) cameras, which are shown in Figure 2. Given a \\(360^{\\circ}\\) image \\(\\mathcal{I}_{c_0}\\), the corresponding virtual camera with preconfigured intrinsic parameters is extracted by" + }, + { + "type": "equation", + "bbox": [ + 0.566, + 0.72, + 0.892, + 0.74 + ], + "angle": 0, + "content": "\\[\n\\mathcal {I} _ {c _ {n}} = \\Psi_ {c _ {n}} \\left(\\mathcal {I} _ {c _ {0}}\\right) = \\pi_ {c _ {n}} ^ {- 1} \\left(\\pi_ {c _ {0}} \\left(\\boldsymbol {R} \\mathcal {I} _ {c _ {0}}\\right)\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.895, + 0.901 + ], + "angle": 0, + "content": "where \\(\\pi_{c_n}\\) denote the projection function of virtual camera and \\(\\pi_{c_0}\\) is the projection function of \\(360^{\\circ}\\) camera. \\(R\\in SO(3)\\) is a random relative rotation matrix to increase the diversity of views representing the scenes. Moreover, the inversed operation \\(\\Psi_{c_n}^{-1}\\) can convert the \\(c_{n}\\) image back to a \\(360^{\\circ}\\) image. As reported in Table 2, the virtual cameras include an undistorted pinhole model with \\(85^{\\circ}\\) FoV and three fisheye cameras in Dual Sphere mode [56] with \\(120^{\\circ}\\), \\(150^{\\circ}\\), and \\(195^{\\circ}\\) FoV respectively. Table 3 presents the number of image frames in the 360Loc dataset." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22317" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.091, + 0.315, + 0.106 + ], + "angle": 0, + "content": "3.2. Ground Truth Generation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.114, + 0.47, + 0.25 + ], + "angle": 0, + "content": "Besides the graph-based optimization in SLAM, we designed a set of offline optimization strategies to further improve the accuracy of camera pose estimation. After the acquisition of precise dense point cloud reconstructions and poses of \\(360^{\\circ}\\) cameras, an Iterative Closest Point (ICP) algorithm is applied to align models between reference and the query sequences in the same scene. Moreover, we reconstructed the mesh model of the scenes and generated corresponding depth maps of \\(360^{\\circ}\\) cameras." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.25, + 0.469, + 0.37 + ], + "angle": 0, + "content": "Bundle Adjustment (BA) of lidar mapping. Incremental map construction can suffer from accumulating errors due to environmental degradation. We utilized a BA framework based on feature points extracted from lidar to refine the map and the poses. The optimization process involved minimizing the covariance matrix to constrain the distances between feature points and edge lines or plane features that are mutually matched." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.371, + 0.47, + 0.552 + ], + "angle": 0, + "content": "First, we utilize an octree data structure to perform adaptive voxelization-based feature extraction. In this method, the point cloud map is segmented into voxels of predetermined size. Each voxel is checked to determine if its points \\( P_{u}^{f} \\) lie on a plane or a line, where \\( u \\in \\{1,2,\\dots ,U\\} \\), obtained from the \\( u \\)-th frame of lidar scans. If not, the voxel is recursively subdivided using an octree structure until each voxel contains points \\( P_{u}^{f} \\) belonging to the same feature. Let's assume that the pose of the lidar in each frame is \\( \\pmb{\\eta} = \\{\\pmb{\\eta}_1,\\pmb{\\eta}_2,\\dots ,\\pmb{\\eta}_M\\} \\), where \\( \\pmb{\\eta}_{u} = (R_{u},t_{u}|R_{u} \\in SO(3), t_{u} \\in \\mathbb{R}^{3}) \\). In that case, the feature points in the global map can be represented as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.197, + 0.558, + 0.469, + 0.575 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {P} _ {u} = \\boldsymbol {R} _ {u} \\times \\boldsymbol {P} _ {u} ^ {f} + \\boldsymbol {t} _ {u}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.581, + 0.469, + 0.701 + ], + "angle": 0, + "content": "After simplifying the lidar map to edge or plane features, the process of BA becomes focused on determining the pose \\(\\pmb{\\eta}\\) and the location of the single feature, which can be represented as \\((\\pmb{n}_f,\\pmb{q})\\), where \\(\\pmb{q}\\) represents the location of a specific feature, \\(\\pmb{n}_f\\) is the direction vector of an edge line or the normal vector of a plane. To minimize the distance between each feature point and the corresponding feature, we can utilize the BA:" + }, + { + "type": "equation", + "bbox": [ + 0.1, + 0.706, + 0.469, + 0.746 + ], + "angle": 0, + "content": "\\[\n\\left(\\boldsymbol {\\eta} ^ {*}, \\boldsymbol {n} _ {f} ^ {*}, \\boldsymbol {q} ^ {*}\\right) = \\underset {\\boldsymbol {\\eta}, \\boldsymbol {n} _ {f}, \\boldsymbol {q}} {\\arg \\min } \\frac {1}{U} \\sum_ {u = 1} ^ {U} \\left(\\boldsymbol {n} _ {f} ^ {T} \\left(\\boldsymbol {P} _ {\\boldsymbol {u}} - \\boldsymbol {q}\\right)\\right) ^ {2}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.751, + 0.47, + 0.873 + ], + "angle": 0, + "content": "It has been proved that when the plane's normal vector is set to the minimum eigenvector, and \\( \\mathbf{q} \\) is set to the centroid of the feature, i.e. \\( \\mathbf{q} = \\hat{\\mathbf{P}} = \\frac{1}{U}\\sum_{u=1}^{U}\\mathbf{P}_{u} \\), Eq. 3 reaches its minimum value. Additionally, the BA problem in lidar mapping has a closed-form solution that is independent of the features \\( (\\mathbf{n}_f,\\mathbf{q}) \\) [34]. It can be simplified to the following problem:" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.879, + 0.469, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\eta} ^ {*} = \\underset {\\boldsymbol {\\eta}} {\\arg \\min } \\lambda_ {\\min } (\\boldsymbol {A}), \\tag {4}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.09, + 0.887, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.581, + 0.268, + 0.808, + 0.281 + ], + "angle": 0, + "content": "Figure 4. Overview of GT generation." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.309, + 0.796, + 0.324 + ], + "angle": 0, + "content": "where, \\(\\lambda\\) represents the eigenvalue of \\(A\\), and" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.336, + 0.892, + 0.377 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {A} = \\frac {1}{U} \\sum_ {u = 1} ^ {U} \\left(\\boldsymbol {P} _ {u} - \\hat {\\boldsymbol {P}}\\right) \\left(\\boldsymbol {P} _ {u} - \\hat {\\boldsymbol {P}}\\right) ^ {T}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.388, + 0.892, + 0.463 + ], + "angle": 0, + "content": "Now, the BA problem is simplified by adjusting the lidar pose \\(\\pmb{\\eta}\\) to minimize the smallest eigenvalue \\(\\lambda_3\\) of the point covariance matrix \\(\\mathbf{A}\\) defined in Eq. 5. By employing this strategy, we refined the pose \\(\\pmb{\\eta}\\) of each frame and the edge or plane features in the lidar map." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.464, + 0.893, + 0.902 + ], + "angle": 0, + "content": "Refined cameras poses. The poses of \\(360^{\\circ}\\) camera obtained from online SLAM are further optimized by the registration with respect to the dense refined point cloud model. Taking the pre-calibrated extrinsic parameters as the initial guess, we used the RANSAC to refine the lidar-camera transformation [29]. This registration process is based on the normalized information distance (NID) [51], which serves as a mutual information-based cross-modal distance metric. Finally, we align the reference models and query models into the same coordinate system to generate the ground truth for the query sequences. Specifically, we utilize the CloudCompare toolbox [19] to manually select feature points across multiple point cloud models as initial values. Then, we employ the ICP algorithm to register the point cloud models together. Afterwards, we employed a practical approach to volumetric surface reconstruction called Truncated Signed Distance Functions (TSDFs) [57] to achieve the reconstruction from point clouds to meshes with an efficient and sparse data structure called Voxel Data Base (VDB) [39]. At this stage, we can utilize the ray-mesh intersection method [15] to cast rays from cameras onto the mesh model. By intersecting the rays with the mesh, we can determine the depths of the corresponding points on the mesh surface. After a series of joint optimizations between multiple modalities, we have generated a set of GT data. Figure 2 shows some instances. This GT data includes reference images \\(\\mathcal{I}_{c_0}^r\\), the depth maps \\(D_{c_0}^r\\) of the reference images, and the reference maps containing the point cloud models \\(\\mathcal{P}\\), mesh models \\(M\\), as well as camera pose odom" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "22318" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.411, + 0.108 + ], + "angle": 0, + "content": "etry \\(\\{\\xi \\}\\) . Figure 4 summarizes the GT generation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.121, + 0.406, + 0.138 + ], + "angle": 0, + "content": "4. Omnidirectional Visual Localization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.147, + 0.47, + 0.284 + ], + "angle": 0, + "content": "We extend the current feature-matching-based and absolute pose regression pipelines for omnidirectional visual localization. Given a query image \\(\\mathcal{I}^q\\) in any camera model, we seek to estimate its poses within the environment modeled by \\(360^{\\circ}\\) images \\(\\mathbf{I}^r\\). To minimize the domain gap between the query image from \\(c_{1}, c_{2}, c_{3}, c_{4}\\) and reference images, we explore visual cameras (VC) in two ways: VC1, remapping query images to 360 domain using \\(\\Psi_{c_n}^{-1}\\); VC2, rectifying \\(360^{\\circ}\\) images into queries' domains using \\(\\Psi_{c_n}\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.294, + 0.4, + 0.31 + ], + "angle": 0, + "content": "4.1. Feature-matching-based Localization" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.318, + 0.469, + 0.348 + ], + "angle": 0, + "content": "Most feature-matching-based techniques first perform IR to reduce the search space before estimating the pose." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.368, + 0.243, + 0.383 + ], + "angle": 0, + "content": "4.1.1 Image Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.392, + 0.469, + 0.514 + ], + "angle": 0, + "content": "For method VC1, if query \\(\\mathcal{I}^q\\) captured from \\(c_{0}\\), we retrieve the \\(k\\) most similar images from \\(\\mathbf{I}^r\\) by calculating and sorting simi\\(_{\\mathrm{cos}}(\\mathcal{F}(\\mathcal{I}^q), \\mathcal{F}(\\mathcal{I}^r))\\), \\(\\mathcal{I}^r \\in \\mathbf{I}^r\\) and \\(\\mathcal{F}(\\cdot)\\) denotes the function to map each image to the global feature domain. simi\\(_{\\mathrm{cos}}(\\cdot)\\) is cosine similarity for two feature embeddings. If query \\(\\mathcal{I}^q\\) captured from \\(c_{1}, c_{2}, c_{3}, c_{4}\\), we then retrieve top-\\(k\\) reference images based on simi\\(_{\\mathrm{cos}}(\\mathcal{F}(\\Psi_{c_n}^{-1}(\\mathcal{I}^q)), \\mathcal{F}(\\mathcal{I}^r)), \\mathcal{I}^r \\in \\mathbf{I}^r\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.515, + 0.469, + 0.574 + ], + "angle": 0, + "content": "In method VC2, we expand the global features for each \\(360^{\\circ}\\) reference image by cameras \\(c\\) including virtual pin-hole cameras forming a cube map and virtual fisheye cameras. We define the similarity score between \\(\\mathcal{I}^q\\) and \\(\\mathcal{I}^r\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.167, + 0.59, + 0.469, + 0.607 + ], + "angle": 0, + "content": "\\[\n\\max \\left(\\operatorname {s i m i} _ {\\cos} \\left(\\mathcal {F} \\left(\\mathcal {I} ^ {q}\\right), \\mathcal {G} _ {\\mathcal {F}} \\left(\\mathcal {I} ^ {r}\\right)\\right), \\right. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.615, + 0.469, + 0.72 + ], + "angle": 0, + "content": "where global feature group of reference is \\(\\mathcal{G}_{\\mathcal{F}}(\\mathcal{I}^r) = \\{\\mathcal{F}(\\Psi_c(\\mathcal{I}^r)),\\ldots \\}\\). We use the highest similarity value calculated from \\(\\mathcal{F}(\\mathcal{I}^q)\\) and \\(\\mathcal{G}_{\\mathcal{F}}(\\mathcal{I}^r)\\) as the similarity score for each \\(\\mathcal{I}^r\\) to ensure retrieve \\(k\\) most similar \\(360^{\\circ}\\) reference images because some rectified images are from the same \\(\\mathcal{I}^r\\). Note that we can eliminate the domain gap during the image retrieval step in this way." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.741, + 0.446, + 0.756 + ], + "angle": 0, + "content": "4.1.2 Local Feature Matching and Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.469, + 0.901 + ], + "angle": 0, + "content": "For each pinhole query frame, we retrieve relevant reference images, match their local features, leverage the depth maps \\(D_{c_0}\\) to establish the 2D-3D correspondences, and finally estimate a pose with \\(\\mathrm{PnP + RANSAC}\\). Unlike [11, 61], we directly match query image with retrieved \\(360^{\\circ}\\) reference images described in Section 4.1.1. For query images from \\(c_{0}, c_{1}, c_{2}, c_{3}\\), i.e., fisheye and \\(360^{\\circ}\\) query frames, we utilize the function that calculates pose error in sphere camera model in OpenGV [28] library for \\(\\mathrm{PnP + RANSAC}\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.731, + 0.107 + ], + "angle": 0, + "content": "4.2. Absolute Pose Regression" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.115, + 0.891, + 0.145 + ], + "angle": 0, + "content": "APRs train deep neural networks to regress the 6DoF camera pose of a query image." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.146, + 0.892, + 0.19 + ], + "angle": 0, + "content": "PN. PoseNet (PN) is the first APR model. Since there is no open source code [23, 24], we follow the modification in [8, 36] and use ResNet34 [21] as the backbone network." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.191, + 0.892, + 0.312 + ], + "angle": 0, + "content": "MS-T. MS-Transformer [49] is an APR model incorporating attention and implementing transformers as backbone. We note APR methods using our virtual camera method, VC2, as \\(\\mathbf{APR}^{vc2}\\). The difference between APR and \\(\\mathbf{APR}^{vc2}\\) is the training stage. For APR baselines, the training set is \\(\\mathbf{I}^r\\). For \\(\\mathbf{APR}^{vc2}\\), they are trained with \\(360^\\circ\\) images, cropped pinhole images, and cropped fisheye images, i.e., \\(\\mathbf{I}^r \\cup \\Psi_c(\\mathbf{I}^r)\\) introduced in Section 4.1.1 and Eq. 1." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.313, + 0.892, + 0.416 + ], + "angle": 0, + "content": "All APR models are implemented in Python using PyTorch [41]. During training, all input images are resized to \\(256 \\times 256\\) and then randomly cropped to \\(224 \\times 224\\). For both PN and MS-T, we set an initial learning rate of \\(\\lambda = 10^{-4}\\) and a batch size of 32 for 300 epochs of each scene. Training and evaluation in Section 5 are performed on an NVIDIA GeForce GTX 3090 GPU." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.433, + 0.616, + 0.448 + ], + "angle": 0, + "content": "5. Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.459, + 0.891, + 0.489 + ], + "angle": 0, + "content": "We provide detailed results for each scene in the dataset and more settings in supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.499, + 0.66, + 0.515 + ], + "angle": 0, + "content": "5.1. Image Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.523, + 0.892, + 0.749 + ], + "angle": 0, + "content": "We evaluate global descriptors computed by NetVLAD [1], CosPlace [3], OpenIBL [18] and AP-GeM [20]. The query image is deemed correctly localized if at least one of the top \\(k\\) retrieved database images is within \\(d = 5m\\) from the ground truth position of the query for Concourse and \\(d = 10m\\) for the other three scenes. The image retrieval results are shown in Table 4. Among all global feature descriptor methods, the \\(360^{\\circ}\\) query exhibits the best precision and recall in most cases, while the pinhole query performs the worst. The remap method (VC1) provides limited improvement for pinhole queries but yields higher improvement for fisheye1, fisheye2, and fisheye3 queries. The reason is that the FoV of pinhole cameras is only \\(85^{\\circ}\\). Consequently, VC1 results in significant black borders when converting to a \\(360^{\\circ}\\) image due to the limited coverage." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.901 + ], + "angle": 0, + "content": "The rectify method (VC2) significantly improves pin-hole, fisheye1, fisheye2, and fisheye3 queries by eliminating the domain gap in IR. However, the pinhole, fisheye1, and fisheye2 queries' recall and precision are still much lower than those of the \\(360^{\\circ}\\) query. Only the query from fisheye3 (widest FoV) approaches the performance of \\(360^{\\circ}\\) query. The domain gap mainly affects the precision and recall of fisheye3. Both remap (VC1) and crop (VC2) significantly improve IR performance for fisheye3. On the other hand, pinhole queries are more prone to being mistaken as error" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22319" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.1, + 0.089, + 0.875, + 0.313 + ], + "angle": 0, + "content": "
QueryNetVLAD [1]Cosplace [3]OpenIBL [18]AP-GeM [20]
R@1R@5P@5R@10P@10R@1R@5P@5R@10P@10R@1R@5P@5R@10P@10R@1R@5P@5R@10P@10
pinhole0.230.450.220.580.220.150.260.150.330.150.180.360.180.480.180.20.370.20.470.2
+VC10.240.450.240.570.230.210.330.210.410.210.210.390.210.50.20.250.420.250.530.24
+VC20.50.670.480.750.470.320.410.320.480.310.510.670.490.750.470.50.680.490.770.47
fisheye10.420.670.410.770.390.280.430.280.520.280.370.580.360.690.340.350.550.340.660.33
+VC10.510.720.490.80.470.360.480.350.560.340.520.70.50.790.480.430.620.420.720.4
+VC20.730.910.630.950.570.630.850.510.920.430.740.910.620.950.540.650.880.570.940.51
fisheye20.450.70.440.80.420.30.460.310.550.310.410.620.40.730.380.380.590.360.680.35
+VC10.540.740.520.830.490.370.490.360.570.350.560.730.540.810.510.460.650.450.740.43
+VC20.740.920.650.950.580.640.870.530.930.450.760.920.650.960.560.670.890.580.940.52
fisheye30.570.790.550.860.520.40.560.40.650.40.530.740.510.830.490.450.660.430.750.41
+VC10.630.810.610.880.580.480.610.480.680.470.670.820.650.880.610.550.730.530.810.51
+VC20.770.930.680.960.610.690.890.580.940.50.790.930.680.960.60.670.90.590.940.54
3600.790.860.770.880.730.920.950.910.960.890.890.940.880.950.830.790.90.770.940.72
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.322, + 0.895, + 0.371 + ], + "angle": 0, + "content": "Table 4. Image retrieval results based on \\(360^{\\circ}\\) reference database average over four scenes, the recall, and precision for the top \\(k\\) retrieved images, \\(k = 1,5,10\\). \\(\\#\\) indicates the highest value of R@k and P@k for each device w and w/o virtual cameras (VC1, VC2). Best results for all devices of R@k and P@k are in bold with \\(\\#\\)." + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.383, + 0.885, + 0.561 + ], + "angle": 0, + "content": "
NetVLAD [1]CosPlace [3]
DISK + LGSP + LGSP + SGDISK + LGSP + LGSP + SG
DayNightDayNightDayNightDayNightDayNightDayNight
pinhole6.0/11.3/24.61.7/4.4/10.38.0/14.9/30.92.2/5.5/13.58.4/15.2/30.72.3/5.6/12.34.2/7.8/18.01.6/3.5/8.64.8/10.2/22.11.9/4.7/11.15.4/10.4/21.12.1/4.7/10.4
+VC18.5/14.0/23.52.2/4.1/7.910.4/17.0/27.52.9/5.3/10.110.9/17.8/28.52.8/5.6/9.96.1/10.8/21.11.7/3.6/8.27.5/13.2/22.52.0/4.5/9.67.6/13.5/22.82.1/4.7/9.6
+VC214.2/22.2/35.54.1/7.8/13.619.8 / 29.7/42.96.1/10.4/16.921.6/33.2 / 49.75.9 / 11.0 / 18.48.0/13.1/23.52.5/4.6/9.110.7/16.4/26.63.0/5.7/11.411.6/18.5/30.53.5/6.8/12.8
fisheye11.6/4.4/17.70.5/1.8/7.41.9/5.4/20.10.7/2.3/10.51.6/4.7/18.40.5/1.9/8.20.8/2.5/11.80.4/1.4/5.81.0/3.5/13.00.5/1.4/8.20.9/3.4/12.10.3/1.4/7.0
+VC13.3/9.2/27.60.8/2.7/9.64.1/10.6/32.21.4/4.4/14.93.0/9.5/29.60.9/3.1/11.72.3/5.5/19.40.5/1.6/7.32.1/6.1/19.90.7/2.2/9.01.9/5.5/19.10.5/1.9/7.3
+VC23.9/10.5/33.01.0/4.0/14.64.3/12.4/38.21.9/6.4/21.83.6/11.0/34.51.1/5.3/19.42.5/6.9/25.30.8/2.8/12.22.8/8.2/29.01.3/4.6/18.02.1/7.1/26.71.0/4.0/16.2
fisheye21.6/4.9/20.90.5/2.0/8.71.9/6.7/23.20.8/3.0/11.81.7/5.2/19.50.7/2.5/9.91.3/3.5/14.20.4/1.6/6.91.2/3.8/15.20.5/1.5/9.11.2/3.9/12.90.6/1.6/7.2
+VC14.3 / 10.8/30.90.8/3.0/11.24.7/12.4/34.11.8/5.4/15.84.1 / 10.6/31.51.1/3.6/13.72.5/6.5/20.60.5/1.77.42.5/7.0/22.10.8/2.4/9.42.2/6.8/20.20.5/2.1/8.0
+VC24.3/11.0/34.41.1/4.7/17.35.1/14.0/41.12.0/7.2/24.83.7 / 11.5/36.81.5/5.9/21.22.8/7.3/27.10.8/2.9/13.42.9/8.9/32.01.6/5.3/20.12.5/8.0/27.91.1/4.2/17.7
fisheye33.8/9.5/29.81.0/3.6/13.84.0/10.5/31.61.3/4.6/16.43.4/9.1/28.40.8/3.8/13.82.5/6.3/21.90.6/2.4/10.12.8/7.2/22.30.9/2.9/12.42.0/5.9/20.01.3/4.2/15.0
+VC15.9/14.7 / 39.51.5/5.2/17.76.0 / 16.2/43.52.0/6.8/21.95.8/14.7 / 39.11.8/5.5/18.34.4/10.2 / 30.11.1/3.3/12.84.6/11.6/32.01.4/4.1/14.44.3/10.5 / 29.71.2/3.8/12.3
+VC25.2/13.9 / 41.82.1/6.5/22.55.9 / 16.5/46.32.5/8.6/29.15.4/14.2 / 40.52.1/7.3/25.94.3/9.8 / 34.61.7/5.2/19.54.7/12.6 / 36.82.2/7.1/23.83.8 / 10.5 / 32.51.6/5.1/20.7
36017.1 / 30.8 / 66.18.5 / 20.1 / 47.518.2 / 34.6 / 64.27.0 / 18.7 / 45.315.8 / 31.2 /60.4 / 7.0 / 17.8 / 42.817.6 / 31.8 / 68.18.7 / 22.0 / 56.018.7 / 34.9 / 68.17.3 / 20.0 / 53.416.6 / 32.6 / 65.77.1 / 18.7 / 50.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.57, + 0.893, + 0.618 + ], + "angle": 0, + "content": "Table 5. Local matching localization results. The average percentage of predictions with high (0.25m, \\(2^{\\circ}\\)), medium (0.5m, \\(5^{\\circ}\\)), and low (5m, \\(10^{\\circ}\\)) accuracy [46] (higher is better) over four scenes. # indicates the highest value for each device w and w/o virtual cameras (VC1, VC2) of each accuracy level. The best results for all devices of each accuracy level are in bold with # ." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.645, + 0.471, + 0.692 + ], + "angle": 0, + "content": "neous locations with similar structures due to their narrower FoV even there is no cross-device domain gap during IR by applying VC2 (Some figures in supplementary material)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.707, + 0.262, + 0.723 + ], + "angle": 0, + "content": "5.2. Visual Localization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.732, + 0.47, + 0.824 + ], + "angle": 0, + "content": "We compare our approach with the following baselines in two categories: 1) Local feature matching pipelines tailored from HLoc [42], using different keypoint descriptors (Superpoint (SP) [16] and DISK [55]), and matchers (SuperGlue (SG) [43], follow-up SOTA LightGlue (LG) [31]). 2) The end-to-end APRs: PN [23, 24] and MS-T [49]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Local feature matching: During local feature matching, all \\(360^{\\circ}\\) images are cropped to \\(1228 \\times 614\\) because of the tradeoff of time and computation. We report the average results over four scenes in Table 5. The \\(360^{\\circ}\\) query achieves the best performance in three accuracy levels in most cases" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.645, + 0.892, + 0.828 + ], + "angle": 0, + "content": "across all IR, keypoint descriptors, and matchers settings. It is especially more robust in challenging nighttime conditions. VC1 and VC2 techniques improve the recall and precision of IR, increasing the accuracy of 2D-2D matching for all cameras. In most cases, the performance at the low accuracy level \\((5m, 10^{\\circ})\\) is correlated with the FoV, where a larger FoV results in higher performance. However, the pin-hole query with VC2 during IR performs comparably to the \\(360^{\\circ}\\) queries at the high \\((0.25m, 2^{\\circ})\\) and median \\((0.5m, 5^{\\circ})\\) accuracy levels. In contrast, query frames from \\(c_{1}, c_{2}\\) and \\(c_{3}\\) demonstrate relatively lower performance at the high and medium accuracy levels." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.895, + 0.903 + ], + "angle": 0, + "content": "As observed in Table 4, different IR methods display different performances depending on the type of camera. We thus consider both NetVLAD and CosPlace in visual localization. In most cases, \\(360^{\\circ}\\) query frames achieve higher" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "22320" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.243 + ], + "angle": 0, + "content": "accuracy with CosPlace while pinhole and fisheye query frames have lower accuracy than NetVLAD as shown in Table 5. These results match the precision and recall difference noted in Table 4. We believe that the FoV not only affects the robustness of IR but also has an impact on local 2D-2D matching performance. Pinhole queries suffer from erroneous matches due to interference from symmetrical and repetitive structures, while the larger FoV of fisheye and \\(360^{\\circ}\\) query frames capture more unique visual features. We provide examples in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.244, + 0.473, + 0.653 + ], + "angle": 0, + "content": "APR: APRs cannot extrapolate well beyond the training set [40, 47]. cross-device queries further complicate this challenge by introducing an additional dimension of FoV. Due to the high efficiency of \\(360^{\\circ}\\) mapping, the training set \\(\\mathbf{I}^r\\) in 360Loc contains only around one-third of the images compared to datasets [24]. Figure 5 shows that when PN and MS-T are trained solely on \\(\\mathbf{I}^r\\) with only \\(360^{\\circ}\\) images, a smaller domain gap between the query and the \\(360^{\\circ}\\) image yields a lower error. However, when we introduce images from virtual cameras for data augmentation, \\(\\mathrm{PN}^{vc2}\\) and MS-\\(\\mathrm{T}^{vc2}\\) exhibit significantly reduced translation and rotation errors across all queries, particularly during daytime. MS-\\(\\mathrm{T}^{vc2}\\) reduces translation error by up to \\(79\\%\\) and rotation error by up to \\(72\\%\\) compared to MS-T. \\(\\mathrm{PN}^{vc2}\\) displays similar improvement over PN. In most cases, except for \\(\\mathrm{PN}^{vc2}\\), s rotation error for the \\(360^{\\circ}\\) queries during daytime, both the \\(360^{\\circ}\\) and fisheye queries exhibit higher accuracy than the pinhole query on \\(\\mathrm{PN}^{vc2}\\) and MS-\\(\\mathrm{T}^{vc2}\\). This suggests that a larger FoV still helps improve visual localization accuracy in challenging scenes. Another interesting finding is that even though the augmented training set \\(\\mathbf{I}^r \\cup \\Psi_c(\\mathbf{I}^r)\\), which includes virtual camera images, does not increase the number of \\(360^{\\circ}\\) images, the error for the \\(360^{\\circ}\\) query still decreases. This reduction is particularly noticeable in the case of translation errors during daytime. The result fully demonstrates the utility of employing virtual cameras for data augmentation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.665, + 0.182, + 0.681 + ], + "angle": 0, + "content": "5.3. Analysis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Cross-device visual positioning presents significant challenges for IR, local matching, and APRs. Our VC1 and VC2 methods demonstrate practical enhancements in the performance of IR and APR for cross-device scenarios. However, it is essential to note that during the local matching process, the accuracy of matches and the recall and precision of IR for query frames from different cameras may not align perfectly. The chosen IR method and its training noticeably affect accuracy for similar cameras. Fisheye cameras exhibit better performance in IR compared to pinhole cameras. However, pinhole cameras outperform fisheye cameras for high accuracy and median accuracy levels in local matching. This is likely due to existing feature extraction and matching models lacking training data on \\(360^{\\circ}\\) and fisheye" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.088, + 0.697, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.568, + 0.175, + 0.652, + 0.188 + ], + "angle": 0, + "content": "(a) Trans. (day)" + }, + { + "type": "image", + "bbox": [ + 0.697, + 0.089, + 0.874, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.747, + 0.175, + 0.821, + 0.188 + ], + "angle": 0, + "content": "(b) Rot. (day)" + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.188, + 0.7, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.564, + 0.275, + 0.656, + 0.287 + ], + "angle": 0, + "content": "(c) Trans. (night)" + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.188, + 0.874, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.743, + 0.275, + 0.825, + 0.287 + ], + "angle": 0, + "content": "(d) Rot. (night)" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.299, + 0.892, + 0.328 + ], + "angle": 0, + "content": "Figure 5. The average of median translation/rotation errors in \\((m / ^{\\circ})\\) over 4 scenes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.355, + 0.892, + 0.612 + ], + "angle": 0, + "content": "cameras, resulting in less accurate matching. We attribute the inferior performance of pinhole query frames at the low accuracy level to IR's insufficient recall and precision. Additionally, pinhole queries are more susceptible to interference when there are many repetitive and symmetrical features in the scene, even when the retrieved reference image is correct (some example figures in the supplementary material). By utilizing VC2 to augment IR and APR's training data, we eliminate the cross-device domain gap. We demonstrate that panoramic perspective and a larger FoV can significantly improve the performance of IR and APRs and find that query frames from \\(360^{\\circ}\\) camera and ultra-wide FoV cameras are less prone to being misidentified as erroneous locations with similar structures. This result suggests the promising potential of fisheye and \\(360^{\\circ}\\) cameras as viable sensors for localization tasks in indoor environments with low GPS accuracy." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.627, + 0.62, + 0.642 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.65, + 0.892, + 0.901 + ], + "angle": 0, + "content": "360Loc is the first dataset and benchmark that explores the challenge of cross-device visual positioning, involving \\(360^{\\circ}\\) reference frames, and query frames from pinhole, ultra-wide FoV fisheye, and \\(360^{\\circ}\\) cameras. We first identified the absence of datasets with ground truth 6DoF poses for \\(360^{\\circ}\\) images, and the limited research on cross-device localization and the robustness of different cameras in ambiguous scenes. To address these limitations, we build a dataset with \\(360^{\\circ}\\) images as reference and query frames from pinhole, ultra-wide FoV fisheye camera and \\(360^{\\circ}\\) cameras via a virtual camera solution. This method enables fair comparisons in cross-device visual localization tasks and helps reduce the domain gap between different cameras. By evaluating feature-matching-based and pose regression-based methods, we demonstrate the effectiveness of our virtual camera approach and the increased robustness of \\(360^{\\circ}\\) cameras in visual localization for challenging and ambiguous scenes." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "22321" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.119, + 0.176, + 0.135 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.144, + 0.47, + 0.214 + ], + "angle": 0, + "content": "[1] Relja Arandjelovic, Petr Gronat, Akihiko Torii, Tomas Pajdla, and Josef Sivic. Netvlad: Cnn architecture for weakly supervised place recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5297-5307, 2016. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.215, + 0.47, + 0.257 + ], + "angle": 0, + "content": "[2] Iro Armeni, Sasha Sax, Amir R Zamir, and Silvio Savarese. Joint 2d-3d-semantic data for indoor scene understanding. arXiv preprint arXiv:1702.01105, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.257, + 0.471, + 0.325 + ], + "angle": 0, + "content": "[3] Gabriele Berton, Carlo Masone, and Barbara Caputo. Rethinking visual geo-localization for large-scale applications. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4878-4888, 2022. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.326, + 0.471, + 0.383 + ], + "angle": 0, + "content": "[4] Hunter Blanton, Connor Greenwell, Scott Workman, and Nathan Jacobs. Extending absolute pose regression to multiple scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.383, + 0.471, + 0.422 + ], + "angle": 0, + "content": "[5] Eric Brachmann and Carsten Rother. Learning less is more - 6D camera localization via 3D surface regression. In CVPR, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.424, + 0.471, + 0.465 + ], + "angle": 0, + "content": "[6] Eric Brachmann and Carsten Rother. Visual camera relocalization from RGB and RGB-D images using DSAC. TPAMI, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.467, + 0.471, + 0.522 + ], + "angle": 0, + "content": "[7] Eric Brachmann, Alexander Krull, Sebastian Nowozin, Jamie Shotton, Frank Michel, Stefan Gumhold, and Carsten Rother. DSAC-Differentiable RANSAC for camera localization. In CVPR, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.523, + 0.471, + 0.579 + ], + "angle": 0, + "content": "[8] Samarth Brahmbhatt, Jinwei Gu, Kihwan Kim, James Hays, and Jan Kautz. Geometry-aware learning of maps for camera localization. In IEEE conference on computer vision and pattern recognition, 2018. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.58, + 0.471, + 0.634 + ], + "angle": 0, + "content": "[9] Mai Bui, Tolga Birdal, Haowen Deng, Shadi Albarqouni, Leonidas Guibas, Slobodan Ilic, and Nassir Navab. 6d camera relocalization in ambiguous scenes via continuous multi-modal inference. 2020. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.635, + 0.471, + 0.69 + ], + "angle": 0, + "content": "[10] Nicholas Carlevaris-Bianco, Arash K Ushani, and Ryan M Eustice. University of michigan north campus long-term vision and lidar dataset. The International Journal of Robotics Research, 35(9):1023-1035, 2016. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.691, + 0.471, + 0.761 + ], + "angle": 0, + "content": "[11] David M Chen, Georges Baatz, Kevin Koser, Sam S Tsai, Ramakrishna Vedantham, Timo Pylvanäinen, Kimmo Roimela, Xin Chen, Jeff Bach, Marc Pollefeys, et al. City-scale landmark identification on mobile devices. In CVPR 2011, pages 737-744. IEEE, 2011. 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.761, + 0.471, + 0.816 + ], + "angle": 0, + "content": "[12] Shuai Chen, Zirui Wang, and Victor Prisacariu. Directposenet: absolute pose regression with photometric consistency. In 2021 International Conference on 3D Vision (3DV), pages 1175-1185. IEEE, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.817, + 0.471, + 0.873 + ], + "angle": 0, + "content": "[13] Shuai Chen, Xinghui Li, Zirui Wang, and Victor A Prisacariu. Dfnet: Enhance absolute pose regression with direct feature matching. In ECCV 2022. Tel Aviv, Israel, October 23-27, 2022, Part X. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.873, + 0.471, + 0.902 + ], + "angle": 0, + "content": "[14] Santiago Cortés, Arno Solin, Esa Rahtu, and Juho Kannala. Advio: An authentic dataset for visual-inertial odometry. In" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.144, + 0.471, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.121 + ], + "angle": 0, + "content": "Proceedings of the European Conference on Computer Vision (ECCV), pages 419-434, 2018. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.743, + 0.136 + ], + "angle": 0, + "content": "[15] Dawson-Haggerty et al. trimesh. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.137, + 0.892, + 0.207 + ], + "angle": 0, + "content": "[16] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 224-236, 2018. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.208, + 0.892, + 0.29 + ], + "angle": 0, + "content": "[17] Mihai Dusmanu, Ignacio Rocco, Tomas Pajdla, Marc Pollefeys, Josef Sivic, Akihiko Torii, and Torsten Sattler. D2-net: A trainable cnn for joint description and detection of local features. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 8092-8101, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.291, + 0.892, + 0.348 + ], + "angle": 0, + "content": "[18] Yixiao Ge, Haibo Wang, Feng Zhu, Rui Zhao, and Hongsheng Li. Self-supervising fine-grained region similarities for large-scale image localization. In European Conference on Computer Vision, 2020. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.349, + 0.892, + 0.376 + ], + "angle": 0, + "content": "[19] Daniel Girardeau-Montaut. Cloudcompare. France: EDF R&D Telecom ParisTech, 11, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.377, + 0.892, + 0.419 + ], + "angle": 0, + "content": "[20] A. Gordo, J. Almazan, J. Revaud, and D. Larlus. End-to-end learning of deep visual representations for image retrieval. *IJCV*, 2017. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.421, + 0.892, + 0.476 + ], + "angle": 0, + "content": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.477, + 0.892, + 0.546 + ], + "angle": 0, + "content": "[22] Huajian Huang, Yinzhe Xu, Yingshu Chen, and Sai-Kit Yeung. 360vot: A new benchmark dataset for omnidirectional visual object tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20566–20576, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.548, + 0.892, + 0.603 + ], + "angle": 0, + "content": "[23] Alex Kendall and Roberto Cipolla. Geometric loss functions for camera pose regression with deep learning. In IEEE conference on computer vision and pattern recognition, pages 5974-5983, 2017. 1, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.604, + 0.892, + 0.673 + ], + "angle": 0, + "content": "[24] Alex Kendall, Matthew Grimes, and Roberto Cipolla. Posenet: A convolutional network for real-time 6-dof camera relocalization. In Proceedings of the IEEE international conference on computer vision, pages 2938-2946, 2015. 1, 3, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.675, + 0.892, + 0.732 + ], + "angle": 0, + "content": "[25] Junho Kim, Changwoon Choi, Hojun Jang, and Young Min Kim. Piccolo: point cloud-centric omnidirectional localization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3313-3323, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.733, + 0.892, + 0.788 + ], + "angle": 0, + "content": "[26] Junho Kim, Hojun Jang, Changwoon Choi, and Young Min Kim. Cpo: Change robust panorama to point cloud localization. In European Conference on Computer Vision, pages 176-192. Springer, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.789, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[27] Junho Kim, Eun Sun Lee, and Young Min Kim. Calibrating panoramic depth estimation for practical localization and mapping. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8830-8840, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[28] Laurent Kneip and Paul Furgale. Opengv: A unified and generalized approach to real-time calibrated geometric vision. In 2014 IEEE international conference on robotics and automation (ICRA), pages 1-8. IEEE, 2014. 6" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "22322" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[29] Kenji Koide, Shuji Oishi, Masashi Yokozuka, and Atsuhiko Banno. General, single-shot, target-less, and automatic lidar-camera extrinsic calibration toolbox. arXiv preprint arXiv:2302.05094, 2023. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.47, + 0.233 + ], + "angle": 0, + "content": "[30] Donghwan Lee, Soohyun Ryu, Suyong Yeon, Yonghan Lee, Deokhwa Kim, Cheolho Han, Yohann Cabon, Philippe Weinzaepfel, Nicolas Guérin, Gabriela Csurka, et al. Large-scale localization datasets in crowded indoor spaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3227-3236, 2021. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.234, + 0.47, + 0.275 + ], + "angle": 0, + "content": "[31] Philipp Lindenberger, Paul-Edouard Sarlin, and Marc Pollefeys. LightGlue: Local Feature Matching at Light Speed. In ICCV, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.278, + 0.47, + 0.359 + ], + "angle": 0, + "content": "[32] Haomin Liu, Linsheng Zhao, Zhen Peng, Weijian Xie, Mingxuan Jiang, Hongbin Zha, Hujun Bao, and Guofeng Zhang. A low-cost and scalable framework to build large-scale localization benchmark for augmented reality. IEEE Transactions on Circuits and Systems for Video Technology, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.362, + 0.469, + 0.417 + ], + "angle": 0, + "content": "[33] Liu Liu, Hongdong Li, and Yuchao Dai. Efficient global 2d-3d matching for camera localization in a large-scale 3d map. In Proceedings of the IEEE International Conference on Computer Vision, pages 2372-2381, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.42, + 0.469, + 0.46 + ], + "angle": 0, + "content": "[34] Zheng Liu and Fu Zhang. Balm: Bundle adjustment for lidar mapping. IEEE Robotics and Automation Letters, 6(2): 3184-3191, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.462, + 0.469, + 0.503 + ], + "angle": 0, + "content": "[35] David G Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60:91-110, 2004. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.505, + 0.469, + 0.56 + ], + "angle": 0, + "content": "[36] Iaroslav Melekhov, Juha Ylioinas, Juho Kannala, and Esa Rahtu. Image-based localization using hourglass networks. In IEEE international conference on computer vision workshops, 2017. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.563, + 0.47, + 0.631 + ], + "angle": 0, + "content": "[37] Arthur Moreau, Nathan Piasco, Dzmitry Tsishkou, Bogdan Stanciulescu, and Arnaud de La Fortelle. Coordinet: uncertainty-aware pose regressor for reliable vehicle localization. In IEEE/CVF Winter Conference on Applications of Computer Vision, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.633, + 0.469, + 0.715 + ], + "angle": 0, + "content": "[38] Jeffri Murragarra-Llerena, Thiago LT Da Silveira, and Claudio R Jung. Pose estimation for two-view panoramas based on keypoint matching: A comparative study and critical analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5202-5211, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.718, + 0.469, + 0.759 + ], + "angle": 0, + "content": "[39] Ken Museth. Vdb: High-resolution sparse volumes with dynamic topology. ACM transactions on graphics (TOG), 32 (3):1-22, 2013. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.469, + 0.815 + ], + "angle": 0, + "content": "[40] Tony Ng, Adrian Lopez-Rodriguez, Vassileios Balntas, and Krystian Mikolajczyk. Reassessing the limitations of cnn methods for camera pose regression. arXiv preprint arXiv:2108.07260, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[41] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 6" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.161 + ], + "angle": 0, + "content": "[42] Paul-Edouard Sarlin, Cesar Cadena, Roland Siegwart, and Marcin Dymczyk. From coarse to fine: Robust hierarchical localization at large scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12716–12725, 2019. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.163, + 0.894, + 0.232 + ], + "angle": 0, + "content": "[43] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4938–4947, 2020. 2, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.233, + 0.892, + 0.315 + ], + "angle": 0, + "content": "[44] Paul-Edouard Sarlin, Mihai Dusmanu, Johannes L Schonberger, Pablo Speciale, Lukas Gruber, Viktor Larsson, Ondrej Miksik, and Marc Pollefeys. Lamar: Benchmarking localization and mapping for augmented reality. In European Conference on Computer Vision, pages 686-704. Springer, 2022. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.316, + 0.892, + 0.371 + ], + "angle": 0, + "content": "[45] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Efficient & effective prioritized matching for large-scale image-based localization. IEEE transactions on pattern analysis and machine intelligence, 39(9):1744-1756, 2016. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.372, + 0.892, + 0.455 + ], + "angle": 0, + "content": "[46] Torsten Sattler, Will Maddern, Carl Toft, Akihiko Torii, Lars Hammarstrand, Erik Stenborg, Daniel Safari, Masatoshi Okutomi, Marc Pollefeys, Josef Sivic, et al. Benchmarking 6dof outdoor visual localization in changing conditions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8601-8610, 2018. 3, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.456, + 0.892, + 0.51 + ], + "angle": 0, + "content": "[47] Torsten Sattler, Qunjie Zhou, Marc Pollefeys, and Laura Leal-Taixe. Understanding the limitations of cnn-based absolute camera pose regression. In IEEE/CVF conference on computer vision and pattern recognition, 2019. 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.511, + 0.892, + 0.593 + ], + "angle": 0, + "content": "[48] Thomas Schops, Johannes L Schonberger, Silvano Galliani, Torsten Sattler, Konrad Schindler, Marc Pollefeys, and Andreas Geiger. A multi-view stereo benchmark with high-resolution images and multi-camera videos. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3260-3269, 2017. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.595, + 0.892, + 0.649 + ], + "angle": 0, + "content": "[49] Yoli Shavit, Ron Ferens, and Yoshi Keller. Learning multiscene absolute pose regression with transformers. In IEEE/CVF International Conference on Computer Vision, pages 2733-2742, 2021. 1, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.65, + 0.892, + 0.718 + ], + "angle": 0, + "content": "[50] Jamie Shotton, Ben Glocker, Christopher Zach, Shahram Izadi, Antonio Criminisi, and Andrew Fitzgibbon. Scene coordinate regression forests for camera relocalization in rgb-d images. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2930-2937, 2013. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.72, + 0.892, + 0.748 + ], + "angle": 0, + "content": "[51] Alexander D Stewart. Localisation using the appearance of prior structure. PhD thesis, University of Oxford, 2014. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.749, + 0.892, + 0.815 + ], + "angle": 0, + "content": "[52] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8922-8931, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.818, + 0.892, + 0.872 + ], + "angle": 0, + "content": "[53] Xun Sun, Yuanfan Xie, Pei Luo, and Liang Wang. A dataset for benchmarking image-based localization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7436-7444, 2017. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[54] Hajime Taira, Masatoshi Okutomi, Torsten Sattler, Mircea Cimpoi, Marc Pollefeys, Josef Sivic, Tomas Pajdla, and Ak" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22323" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "ihiko Torii. Inloc: Indoor visual localization with dense matching and view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7199-7209, 2018. 1, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.47, + 0.204 + ], + "angle": 0, + "content": "[55] Michal Tyszkiiewicz, Pascal Fua, and Eduard Trulls. Disk: Learning local features with policy gradient. Advances in Neural Information Processing Systems, 33:14254-14265, 2020. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.207, + 0.471, + 0.26 + ], + "angle": 0, + "content": "[56] Vladyslav Usenko, Nikolaus Demmel, and Daniel Cremers. The double sphere camera model. In 2018 International Conference on 3D Vision (3DV), pages 552-560. IEEE, 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.263, + 0.471, + 0.304 + ], + "angle": 0, + "content": "[57] Ignacio Vizzo, Tiziano Guadagnino, Jens Behley, and Cyril Stachniss. Vdbfusion: Flexible and efficient tsdf integration of range sensor data. Sensors, 22(3):1296, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.471, + 0.388 + ], + "angle": 0, + "content": "[58] Johanna Wald, Torsten Sattler, Stuart Golodetz, Tommaso Cavallari, and Federico Tombari. Beyond controlled environments: 3d camera re-localization in changing indoor scenes. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VII 16, pages 467-487. Springer, 2020. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.39, + 0.471, + 0.445 + ], + "angle": 0, + "content": "[59] Jian Wu, Liwei Ma, and Xiaolin Hu. Delving deeper into convolutional neural networks for camera relocalization. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 5644-5651. IEEE, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.446, + 0.47, + 0.5 + ], + "angle": 0, + "content": "[60] Hang Xu, Qiang Zhao, Yike Ma, Xiaodong Li, Peng Yuan, Bailan Feng, Chenggang Yan, and Feng Dai. Pandora: A panoramic detection dataset for object with orientation. In ECCV, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.503, + 0.471, + 0.585 + ], + "angle": 0, + "content": "[61] Shen Yan, Yu Liu, Long Wang, Zehong Shen, Zhen Peng, Haomin Liu, Maojun Zhang, Guofeng Zhang, and Xiaowei Zhou. Long-term visual localization with mobile sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17245-17255, 2023. 1, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.587, + 0.471, + 0.628 + ], + "angle": 0, + "content": "[62] Dawen Yu and Shunping Ji. Grid based spherical cnn for object detection from panoramic images. Sensors, 19(11): 2622, 2019. 1" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.628 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "22324" + } + ] +] \ No newline at end of file diff --git a/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/15eb225d-3032-419c-84b0-35d6ec576cbc_origin.pdf b/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/15eb225d-3032-419c-84b0-35d6ec576cbc_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ef312db5963464d152be6ab636204d542ceca501 --- /dev/null +++ b/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/15eb225d-3032-419c-84b0-35d6ec576cbc_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2abcc7cbf6a0fe4c6079ceeaf4fc07e39a5fffed2cfb3ef1a507f409d831f68a +size 8065437 diff --git a/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/full.md b/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e1868a90a54ed45b7ac48f5fcbb918207780c523 --- /dev/null +++ b/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/full.md @@ -0,0 +1,295 @@ +# 360Loc: A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries + +Huajian Huang $^{1*}$ Changkun Liu $^{1*}$ Yipeng Zhu $^{1}$ Hui Cheng $^{2}$ Tristan Braud $^{1}$ Sai-Kit Yeung $^{1}$ $^{1}$ The Hong Kong University of Science and Technology * equal contribution Sun Yat-sen University + +{hhuangbg, cliudg, yzhudg}@connect.ust.hk, chengh9@mail.sysu.edu.cn, {braudit, saikit}@ust.hk + +# Abstract + +Portable $360^{\circ}$ cameras are becoming a cheap and efficient tool to establish large visual databases. By capturing omnidirectional views of a scene, these cameras could expedite building environment models that are essential for visual localization. However, such an advantage is often overlooked due to the lack of valuable datasets. This paper introduces a new benchmark dataset, 360Loc, composed of $360^{\circ}$ images with ground truth poses for visual localization. We present a practical implementation of $360^{\circ}$ mapping combining $360^{\circ}$ images with lidar data to generate the ground truth 6DoF poses. 360Loc is the first dataset and benchmark that explores the challenge of cross-device visual positioning, involving $360^{\circ}$ reference frames, and query frames from pinhole, ultra-wide FoV fisheye, and $360^{\circ}$ cameras. We propose a virtual camera approach to generate lower-FoV query frames from $360^{\circ}$ images, which ensures a fair comparison of performance among different query types in visual localization tasks. We also extend this virtual camera approach to feature matching-based and pose regression-based methods to alleviate the performance loss caused by the cross-device domain gap, and evaluate its effectiveness against state-of-the-art baselines. We demonstrate that omnidirectional visual localization is more robust in challenging large-scale scenes with symmetries and repetitive structures. These results provide new insights into 360-camera mapping and omnidirectional visual localization with cross-device queries. Project Page and dataset: https://huajianup.github.io/research/360Loc/. + +# 1. Introduction + +Visual localization refers to predicting the 6DoF absolute pose (translation and rotation) of query images in a known scene. Accurate visual localization has wide applications in augmented reality (AR), navigation, and robotics. + +Over the last decade, many visual localization methods have been proposed, including feature matching-based approaches [17, 33, 42, 45, 54], scene coordinate regression [5-7] and absolute pose regressors (APRs) [23, 24, 49]. Much of this progress has been driven by the availability of numerous datasets and benchmarks targeting different challenges, as shown in Table 1. However, existing methods and datasets focus on localization and mapping using pinhole images. Although the merits of $360^{\circ}$ camera on visual perception have been recognized [22, 60, 62], the application of $360^{\circ}$ cameras for visual localization is still under-explored. Recently, SensLoc [61] started to apply $360^{\circ}$ cameras to facilitate data collection, but their pipeline cannot perform omnidirectional localization directly from the $360^{\circ}$ images. + +This paper introduces 360Loc, a new challenging benchmark dataset to facilitate research on omnidirectional visual localization. The dataset contains $360^{\circ}$ images captured in diverse campus-scale indoor and outdoor environments, featuring highly symmetrical and repetitive features, as well as interference of dynamic objects. To capture this dataset, we present a practical pipeline using a portable 360-cameras platform to obtain reliable pose estimations of $360^{\circ}$ cameras as ground truth. Although $360^{\circ}$ cameras present significant advantages for capturing reference data, real-life applications applying visual localization often rely on traditional cameras. Examples include robots equipped with fisheye cameras and phone-based AR applications using the embedded pinhole camera. This raises the problem of cross-device visual localization on image databases captured with $360^{\circ}$ cameras. We thus supplement the reference database composed of $360^{\circ}$ images with query frames including pin-hole, fisheye and $360^{\circ}$ cameras. + +We introduce the concept of virtual camera to generate high-quality lower-FoV images with different camera parameters from $360^{\circ}$ images. This enables a fair comparison of performance among queries from different devices in cross-device visual localization. We adapt existing feature-matching-based methods and APRs to support $360^{\circ}$ image queries and benchmark these methods for 360-based cross- + +![](images/db604ce410d40d69b14afc1112843cff89686cce30fc31d6967b76f9a479c2a0.jpg) + +![](images/b04c5ebe9bb3f7b53d057b8ed2eecf41bd550b802b96fd8124e25e075458ecc4.jpg) +Figure 1. Overview of dataset collection and ground truth generation: 1) Use the platform to collect $360^{\circ}$ images and frame-by-frame point clouds. Obtain real-time camera poses; 2) Apply optimization methodology to achieve data registration, resulting in a globally reconstructed point cloud model. Then, align the models in daytime and nighttime to get consistent poses; 3) Perform cropping to get virtual camera images and generate corresponding depth images. As a result, 360Loc takes advantage of $360^{\circ}$ images for efficient mapping while providing query images in five different camera models in order to analyze the challenge of cross-domain visual localization. + +![](images/49886da25d424f9dd2d7cd4d9c452062c8af942c39c265d2b7ac498dce54b55d.jpg) + +![](images/5a0f63ab7919017480006a31cc712dd989bce962a717e19ed0f54edc146729a3.jpg) + +device visual localization. Since different cameras present different imaging patterns, the cross-device domain gap is expected to lead to performance loss. We extend the virtual camera approach to data augmentation for end-to-end solutions such as image retrieval (IR) and APRs. + +By conducting exhaustive evaluations, we demonstrate the advantages of $360^{\circ}$ cameras in reducing ambiguity in visual localization on scenes featuring symmetric or repetitive features. We also show improvements against state-of-the-art (SOTA) baselines using the virtual camera method for cross-device visual localization on images databases captured with $360^{\circ}$ cameras. These results provide novel insights on mapping using $360^{\circ}$ images, enhancing the anti-ambiguity capability of query images, reducing domain gap cross-device in visual localization, and improving the generalization ability of APRs by applying virtual cameras. + +Our contribution can be summarized as follows: + +- We propose a practical implementation of $360^{\circ}$ mapping combining lidar data with $360^{\circ}$ images for establishing the ground truth 6DoF poses. +- A virtual camera approach to generate high-quality lower-FoV images with different camera parameters from $360^{\circ}$ views. +- A novel dataset for cross-device visual localization based on $360^{\circ}$ reference images with pinhole, fisheye, and $360^{\circ}$ query images. + +- Demonstration of our approach's efficacy over state-of-the-art solutions for visual localization using $360^{\circ}$ image databases, resulting in decreased localization ambiguity, reduced cross-device domain gap, and improved generalization ability of APRs. + +# 2. Related work + +# 2.1. Visual Localization + +Structure-based methods predict camera poses by establishing 2D-3D correspondences indirectly with local feature extractors and matchers [16, 35, 42, 43, 52, 55] or directly with scene coordinate regression [5-7]. HLoc [42, 43] pipeline scales up to large scenes using image retrieval [1, 3, 18, 20] as an intermediate step, which achieves SOTA accuracy on many benchmarks. This type of approach usually supports pinhole cameras with different intrinsic parameters. However, the performance of $360^{\circ}$ and fisheye cameras has not been evaluated before due to the lack of support for $360^{\circ}$ cameras in the Structure from Motion (SfM) tools like COLMAP [45] and the lack of datasets for fisheye and $360^{\circ}$ cameras. [25-27] are point-cloud-based panorama localization methods for $360^{\circ}$ queries but they do not consider cross-device visual localization. + +Absolute Pose Regressors (APRs) are end-to-end learning-based methods that directly regress the absolute camera + +pose from input images without the knowledge of 3D models and establish 2D-3D correspondences. APRs [4, 8, 12, 13, 23, 24, 36, 37, 49, 59] provide faster inference than structure-based methods at the cost of accuracy and robustness [47]. Besides, APRs have generally only been tested on the [9], 7Scenes [50], and Cambridge Landmarks [24] datasets in previous studies. A notable characteristic of these datasets is that the training set and test set images were taken from the same camera. In this paper, we enhance cross-device pose regression for APRs by introducing virtual cameras as a data augmentation technique. + +# 2.2. Datasets + +The existing dataset has the following limitations. 1). Most datasets [9, 10, 24, 50, 54, 58] do not consider the need for cross-device localization, i.e., query images come from the same camera. Even though some datasets [11, 14, 30, 44, 46, 48, 53, 61] take into account cross-device localization, these devices are only pinhole cameras with different camera intrinsic parameters and do not have particularly large domain-gaps. Compared to [32], our pinhole and fisheye images are extracted from $360^{\circ}$ images via virtual cameras, which makes less demands on the device and allows for a fair and more flexible comparison of the effects of different FoVs. In this paper, our 360Loc datasets provide five kinds of queries from pinhole, fisheye and $360^{\circ}$ cameras to promote the research of cross-device localization. 2). Now there is no 6DoF visual localization dataset and benchmark considering $360^{\circ}$ reference images and $360^{\circ}$ query images, even though [2, 25, 38] contain $360^{\circ}$ images with 6DoF pose labels, they are not standard visual localization datasets with independent mapping/reference sequences and query sequences like datasets in Table 1. Other datasets [11, 61] use $360^{\circ}$ cameras for data collection, in the end they cropped $360^{\circ}$ to perspective images and then tailor these images to the classical visual localization pipeline of pinhole cameras. The academic community is mainly driven by benchmarks where all training, reference, and query images are pinhole images because they rely on SfM tools [45] which does not support $360^{\circ}$ cameras to obtain ground-truth (GT) and get sparse 3D point cloud models for recovering camera poses. However, we note that the $360^{\circ}$ camera can cover the scene with greater efficiency than normal pinhole cameras with narrow Field-of-View (FoV), which makes $360^{\circ}$ images particularly suitable as reference images. 3) Although the current dataset has explored the challenges of visual localization from various aspects such as weather variations, daynight transitions, scene changes, and moving individuals and objects [24, 30, 44, 46, 58, 61], there is still insufficient research specifically targeting highly ambiguous environments which contain symmetries, repetitive structures and insufficient textures. Only two indoor datasets [9, 53] + +![](images/5a21dc87b08877d924835e56c6a68dbaca2e7f703bb022dd57a74f2331ae6cad.jpg) +Figure 2. The four scenes in 360Loc, all four scenes contain symmetrical, repetitive structures and moving objects. The camera trajectories are visualized as spheres. + +and LaMAR [44] consider challenges in ambiguous environments. In this paper, we studied 4 ambiguous scenes from both indoor and outdoor environments with a scale much larger than dataset [9] (See Figure 2). We conduct exhaustive assessments of image retrieval, local matching localization, and absolute pose regression to show that queries from the $360^{\circ}$ camera are harder to obtain plausible solutions than other queries from cameras with narrower FoV. + +# 3. The 360Loc Dataset + +The 360Loc dataset contains 4 locations from a local university. Figure 2 displays the reference point cloud and example frames from each scene. Atrium is inside a building with a surrounding structure that exhibits a high degree of symmetry and repetition, making it a highly ambiguous environment. Concourse is a large indoor scene with many moving people, which can be used for evaluating the robustness of any localization algorithm in scenes with many moving objects. Piatrium is a scene containing both indoor Atrium and outdoor environments, covering an outdoor piazza with coffee shops, bookstores, and souvenir shops. Hall is a modern building of a student dormitory. + +# 3.1. 360 Mapping Platform + +We utilized the handheld multimodal data acquisition platform depicted in Figure 1 for data collection. This platform incorporates a $360^{\circ}$ camera, a Velodyne VLP-16 multi-line lidar, an NUC mini-computer, and a display screen. Figure 1 also illustrates the relative relationship among the $360^{\circ}$ camera coordinate system $\mathbf{O}_{\mathrm{c}}$ -XYZ, the lidar coordinate system $\mathbf{O}_{\mathrm{l}}$ -XYZ as well as the world coordinate $\mathbf{O}_{\mathrm{w}}$ -XYZ. The portable 360 camera equipped on this device can capture high-resolution omnidirectional images with a resolution of $6144 \times 3072$ (2:1 aspect ratio). It also features a built-in six-axis gyroscope that provides stabilization support, making it suitable for handheld mobile data capture. The Velodyne VLP-16 multi-line lidar has + +
DatasetScale and EnvironmentChallengesReference/Query typeGroundtruth SolutionAccuracy
7Scenes [50]Small IndoorNonepinhole / pinholeRGB-D≈ cm
RIO10 [58]Small IndoorChangespinhole / pinholeVIO> d m
Baidu Mall [53]Medium IndoorPeople, Ambiguouspinhole / pinholelidar+Manual≈ d m
Naver Labs [30]Medium IndoorPeople, Changespinhole / pinholelidar+SfM≈ d m
InLoc [54]Medium IndoorNonepinhole / pinholelidar+Manual> d m
AmbiguousLoc [9]Small IndoorAmbiguouspinhole / pinholeSLAM≈ cm
Achen [46]Large outdoorPeople, Day-Nightpinhole / pinholeSfM> d m
Cambridge [24]Medium outdoorPeople, Weatherpinhole / pinholeSfM> d m
San Francisco [11]Large outdoorPeople, Constructionpinhole / pinholeSfM+GPS≈ m
NCLT [10]Medium Outdoor + IndoorWeatherpinhole / pinholeGPS+SLAM+lidar≈ d m
ADVIO [14]Medium Outdoor+IndoorPeoplepinhole / pinholeVIO+Manual≈ m
ETH3D [48]Medium Outdoor + IndoorNonepinhole / pinholelidar+Manual≈ mm
LaMAR [44]Medium Outdoor+IndoorPeople, Weather, Day-Night, Construction, Changes, Ambiguouspinhole / pinholelidar+SfM+VIO≈ cm
SensLoc [61]Large OutdoorPeople, Weather, Day-Night, Construction, Changespinhole / pinholeSL+VIO+RTK+Gravity< dm
360Loc (ours)Medium Outdoor+IndoorPeople, Weather, Day-Night, Construction, Changes, Ambiguous360 / (360 + pinhole + fisheye)lidar+VIO≈ cm
+ +Table 1. Overview of popular visual localization datasets. No dataset, besides ours, consider $360^{\circ}$ images as reference and query frames from pinhole, ultra-wide FoV fisheye, and $360^{\circ}$ cameras. + +
SymbolNameField of ViewResolutionType
c0360360°6144×3072reference/query
c1fisheye1120°1280×1024query
c2fisheye2150°1280×1024query
c3fisheye3195°1280×1024query
c4pinhole85°1920×1200query
+ +Table 2. The representation and parameters of 5 cameras. + +
Scene# Frames Reference 360# Frames Query (day / night)Spatial Extent (m)
360PinholeFisheye1Fisheye2Fisheye3
Concourse491593/5141186/10281186/10281186/10281186/102893 × 15
Hall5401123/10612246/21222246/21222246/21222246/2122105 × 52
Atrium581875/12191750/24381750/24381750/24381750/243865 × 36
Piatrium6321008/6972016/13942016/13942016/13942016/139498 × 70
+ +Table 3. 360Loc dataset description. + +a FoV of $360^{\circ} \times 30^{\circ}$ , angular resolution of $0.2^{\circ} \times 2.0^{\circ}$ , and rotation rate of $10\mathrm{Hz}$ , offering a comprehensive $360^{\circ}$ environmental view. Regarding the calibration of the extrinsic poses between the lidar and the $360^{\circ}$ camera, we employed a calibration toolbox [29] that applies to both lidar and camera projection models. This toolbox utilizes the SuperGlue [43] image matching pipeline to establish 2D-3D correspondences between the lidar and camera image. We perform pseudo-registration by synchronizing the two data modalities, images, and point clouds. Eventually, we use graph-based SLAM techniques for continuous pose estimations. In the four scenes, a total of 18 independent sequences of $360^{\circ}$ images were captured (12 daytime, and 6 nighttime), resulting in a total number of 9334 images. For each scene, we selected a specific sequence captured during the daytime as the reference images, while the remaining images were defined as query images of the $360^{\circ}$ image type. We provide more details and show why $360^{\circ}$ mapping is superior to pinhole SfM in ambiguous scenes with repetitive and symmetric structures in the supplementary material. + +![](images/60b85e647fd37d209120f74b536d02e8c0e48bff2ad2d0d153036a52694d43f3.jpg) +Figure 3. Illustration of obtaining virtual camera images through random poses and image cropping. + +# 3.1.1 Cross-device Queries + +To enable a rigorous comparison of the difference in the performance of different FoV queries for visual localization tasks, we created four virtual cameras with diverse FoV from $360^{\circ}$ cameras, which are shown in Figure 2. Given a $360^{\circ}$ image $\mathcal{I}_{c_0}$ , the corresponding virtual camera with preconfigured intrinsic parameters is extracted by + +$$ +\mathcal {I} _ {c _ {n}} = \Psi_ {c _ {n}} \left(\mathcal {I} _ {c _ {0}}\right) = \pi_ {c _ {n}} ^ {- 1} \left(\pi_ {c _ {0}} \left(\boldsymbol {R} \mathcal {I} _ {c _ {0}}\right)\right), \tag {1} +$$ + +where $\pi_{c_n}$ denote the projection function of virtual camera and $\pi_{c_0}$ is the projection function of $360^{\circ}$ camera. $R\in SO(3)$ is a random relative rotation matrix to increase the diversity of views representing the scenes. Moreover, the inversed operation $\Psi_{c_n}^{-1}$ can convert the $c_{n}$ image back to a $360^{\circ}$ image. As reported in Table 2, the virtual cameras include an undistorted pinhole model with $85^{\circ}$ FoV and three fisheye cameras in Dual Sphere mode [56] with $120^{\circ}$ , $150^{\circ}$ , and $195^{\circ}$ FoV respectively. Table 3 presents the number of image frames in the 360Loc dataset. + +# 3.2. Ground Truth Generation + +Besides the graph-based optimization in SLAM, we designed a set of offline optimization strategies to further improve the accuracy of camera pose estimation. After the acquisition of precise dense point cloud reconstructions and poses of $360^{\circ}$ cameras, an Iterative Closest Point (ICP) algorithm is applied to align models between reference and the query sequences in the same scene. Moreover, we reconstructed the mesh model of the scenes and generated corresponding depth maps of $360^{\circ}$ cameras. + +Bundle Adjustment (BA) of lidar mapping. Incremental map construction can suffer from accumulating errors due to environmental degradation. We utilized a BA framework based on feature points extracted from lidar to refine the map and the poses. The optimization process involved minimizing the covariance matrix to constrain the distances between feature points and edge lines or plane features that are mutually matched. + +First, we utilize an octree data structure to perform adaptive voxelization-based feature extraction. In this method, the point cloud map is segmented into voxels of predetermined size. Each voxel is checked to determine if its points $P_{u}^{f}$ lie on a plane or a line, where $u \in \{1,2,\dots ,U\}$ , obtained from the $u$ -th frame of lidar scans. If not, the voxel is recursively subdivided using an octree structure until each voxel contains points $P_{u}^{f}$ belonging to the same feature. Let's assume that the pose of the lidar in each frame is $\pmb{\eta} = \{\pmb{\eta}_1,\pmb{\eta}_2,\dots ,\pmb{\eta}_M\}$ , where $\pmb{\eta}_{u} = (R_{u},t_{u}|R_{u} \in SO(3), t_{u} \in \mathbb{R}^{3})$ . In that case, the feature points in the global map can be represented as follows: + +$$ +\boldsymbol {P} _ {u} = \boldsymbol {R} _ {u} \times \boldsymbol {P} _ {u} ^ {f} + \boldsymbol {t} _ {u}. \tag {2} +$$ + +After simplifying the lidar map to edge or plane features, the process of BA becomes focused on determining the pose $\pmb{\eta}$ and the location of the single feature, which can be represented as $(\pmb{n}_f,\pmb{q})$ , where $\pmb{q}$ represents the location of a specific feature, $\pmb{n}_f$ is the direction vector of an edge line or the normal vector of a plane. To minimize the distance between each feature point and the corresponding feature, we can utilize the BA: + +$$ +\left(\boldsymbol {\eta} ^ {*}, \boldsymbol {n} _ {f} ^ {*}, \boldsymbol {q} ^ {*}\right) = \underset {\boldsymbol {\eta}, \boldsymbol {n} _ {f}, \boldsymbol {q}} {\arg \min } \frac {1}{U} \sum_ {u = 1} ^ {U} \left(\boldsymbol {n} _ {f} ^ {T} \left(\boldsymbol {P} _ {\boldsymbol {u}} - \boldsymbol {q}\right)\right) ^ {2}. \tag {3} +$$ + +It has been proved that when the plane's normal vector is set to the minimum eigenvector, and $\mathbf{q}$ is set to the centroid of the feature, i.e. $\mathbf{q} = \hat{\mathbf{P}} = \frac{1}{U}\sum_{u=1}^{U}\mathbf{P}_{u}$ , Eq. 3 reaches its minimum value. Additionally, the BA problem in lidar mapping has a closed-form solution that is independent of the features $(\mathbf{n}_f,\mathbf{q})$ [34]. It can be simplified to the following problem: + +$$ +\boldsymbol {\eta} ^ {*} = \underset {\boldsymbol {\eta}} {\arg \min } \lambda_ {\min } (\boldsymbol {A}), \tag {4} +$$ + +![](images/0c7f60c6616090658bc51805166bafe8f52d16397e1ad2cdb4eef4425ed8a6f7.jpg) +Figure 4. Overview of GT generation. + +where, $\lambda$ represents the eigenvalue of $A$ , and + +$$ +\boldsymbol {A} = \frac {1}{U} \sum_ {u = 1} ^ {U} \left(\boldsymbol {P} _ {u} - \hat {\boldsymbol {P}}\right) \left(\boldsymbol {P} _ {u} - \hat {\boldsymbol {P}}\right) ^ {T}. \tag {5} +$$ + +Now, the BA problem is simplified by adjusting the lidar pose $\pmb{\eta}$ to minimize the smallest eigenvalue $\lambda_3$ of the point covariance matrix $\mathbf{A}$ defined in Eq. 5. By employing this strategy, we refined the pose $\pmb{\eta}$ of each frame and the edge or plane features in the lidar map. + +Refined cameras poses. The poses of $360^{\circ}$ camera obtained from online SLAM are further optimized by the registration with respect to the dense refined point cloud model. Taking the pre-calibrated extrinsic parameters as the initial guess, we used the RANSAC to refine the lidar-camera transformation [29]. This registration process is based on the normalized information distance (NID) [51], which serves as a mutual information-based cross-modal distance metric. Finally, we align the reference models and query models into the same coordinate system to generate the ground truth for the query sequences. Specifically, we utilize the CloudCompare toolbox [19] to manually select feature points across multiple point cloud models as initial values. Then, we employ the ICP algorithm to register the point cloud models together. Afterwards, we employed a practical approach to volumetric surface reconstruction called Truncated Signed Distance Functions (TSDFs) [57] to achieve the reconstruction from point clouds to meshes with an efficient and sparse data structure called Voxel Data Base (VDB) [39]. At this stage, we can utilize the ray-mesh intersection method [15] to cast rays from cameras onto the mesh model. By intersecting the rays with the mesh, we can determine the depths of the corresponding points on the mesh surface. After a series of joint optimizations between multiple modalities, we have generated a set of GT data. Figure 2 shows some instances. This GT data includes reference images $\mathcal{I}_{c_0}^r$ , the depth maps $D_{c_0}^r$ of the reference images, and the reference maps containing the point cloud models $\mathcal{P}$ , mesh models $M$ , as well as camera pose odom + +etry $\{\xi \}$ . Figure 4 summarizes the GT generation. + +# 4. Omnidirectional Visual Localization + +We extend the current feature-matching-based and absolute pose regression pipelines for omnidirectional visual localization. Given a query image $\mathcal{I}^q$ in any camera model, we seek to estimate its poses within the environment modeled by $360^{\circ}$ images $\mathbf{I}^r$ . To minimize the domain gap between the query image from $c_{1}, c_{2}, c_{3}, c_{4}$ and reference images, we explore visual cameras (VC) in two ways: VC1, remapping query images to 360 domain using $\Psi_{c_n}^{-1}$ ; VC2, rectifying $360^{\circ}$ images into queries' domains using $\Psi_{c_n}$ . + +# 4.1. Feature-matching-based Localization + +Most feature-matching-based techniques first perform IR to reduce the search space before estimating the pose. + +# 4.1.1 Image Retrieval + +For method VC1, if query $\mathcal{I}^q$ captured from $c_{0}$ , we retrieve the $k$ most similar images from $\mathbf{I}^r$ by calculating and sorting simi $_{\mathrm{cos}}(\mathcal{F}(\mathcal{I}^q), \mathcal{F}(\mathcal{I}^r))$ , $\mathcal{I}^r \in \mathbf{I}^r$ and $\mathcal{F}(\cdot)$ denotes the function to map each image to the global feature domain. simi $_{\mathrm{cos}}(\cdot)$ is cosine similarity for two feature embeddings. If query $\mathcal{I}^q$ captured from $c_{1}, c_{2}, c_{3}, c_{4}$ , we then retrieve top- $k$ reference images based on simi $_{\mathrm{cos}}(\mathcal{F}(\Psi_{c_n}^{-1}(\mathcal{I}^q)), \mathcal{F}(\mathcal{I}^r)), \mathcal{I}^r \in \mathbf{I}^r$ . + +In method VC2, we expand the global features for each $360^{\circ}$ reference image by cameras $c$ including virtual pin-hole cameras forming a cube map and virtual fisheye cameras. We define the similarity score between $\mathcal{I}^q$ and $\mathcal{I}^r$ as: + +$$ +\max \left(\operatorname {s i m i} _ {\cos} \left(\mathcal {F} \left(\mathcal {I} ^ {q}\right), \mathcal {G} _ {\mathcal {F}} \left(\mathcal {I} ^ {r}\right)\right), \right. \tag {6} +$$ + +where global feature group of reference is $\mathcal{G}_{\mathcal{F}}(\mathcal{I}^r) = \{\mathcal{F}(\Psi_c(\mathcal{I}^r)),\ldots \}$ . We use the highest similarity value calculated from $\mathcal{F}(\mathcal{I}^q)$ and $\mathcal{G}_{\mathcal{F}}(\mathcal{I}^r)$ as the similarity score for each $\mathcal{I}^r$ to ensure retrieve $k$ most similar $360^{\circ}$ reference images because some rectified images are from the same $\mathcal{I}^r$ . Note that we can eliminate the domain gap during the image retrieval step in this way. + +# 4.1.2 Local Feature Matching and Pose Estimation + +For each pinhole query frame, we retrieve relevant reference images, match their local features, leverage the depth maps $D_{c_0}$ to establish the 2D-3D correspondences, and finally estimate a pose with $\mathrm{PnP + RANSAC}$ . Unlike [11, 61], we directly match query image with retrieved $360^{\circ}$ reference images described in Section 4.1.1. For query images from $c_{0}, c_{1}, c_{2}, c_{3}$ , i.e., fisheye and $360^{\circ}$ query frames, we utilize the function that calculates pose error in sphere camera model in OpenGV [28] library for $\mathrm{PnP + RANSAC}$ . + +# 4.2. Absolute Pose Regression + +APRs train deep neural networks to regress the 6DoF camera pose of a query image. + +PN. PoseNet (PN) is the first APR model. Since there is no open source code [23, 24], we follow the modification in [8, 36] and use ResNet34 [21] as the backbone network. + +MS-T. MS-Transformer [49] is an APR model incorporating attention and implementing transformers as backbone. We note APR methods using our virtual camera method, VC2, as $\mathbf{APR}^{vc2}$ . The difference between APR and $\mathbf{APR}^{vc2}$ is the training stage. For APR baselines, the training set is $\mathbf{I}^r$ . For $\mathbf{APR}^{vc2}$ , they are trained with $360^\circ$ images, cropped pinhole images, and cropped fisheye images, i.e., $\mathbf{I}^r \cup \Psi_c(\mathbf{I}^r)$ introduced in Section 4.1.1 and Eq. 1. + +All APR models are implemented in Python using PyTorch [41]. During training, all input images are resized to $256 \times 256$ and then randomly cropped to $224 \times 224$ . For both PN and MS-T, we set an initial learning rate of $\lambda = 10^{-4}$ and a batch size of 32 for 300 epochs of each scene. Training and evaluation in Section 5 are performed on an NVIDIA GeForce GTX 3090 GPU. + +# 5. Evaluation + +We provide detailed results for each scene in the dataset and more settings in supplementary material. + +# 5.1. Image Retrieval + +We evaluate global descriptors computed by NetVLAD [1], CosPlace [3], OpenIBL [18] and AP-GeM [20]. The query image is deemed correctly localized if at least one of the top $k$ retrieved database images is within $d = 5m$ from the ground truth position of the query for Concourse and $d = 10m$ for the other three scenes. The image retrieval results are shown in Table 4. Among all global feature descriptor methods, the $360^{\circ}$ query exhibits the best precision and recall in most cases, while the pinhole query performs the worst. The remap method (VC1) provides limited improvement for pinhole queries but yields higher improvement for fisheye1, fisheye2, and fisheye3 queries. The reason is that the FoV of pinhole cameras is only $85^{\circ}$ . Consequently, VC1 results in significant black borders when converting to a $360^{\circ}$ image due to the limited coverage. + +The rectify method (VC2) significantly improves pin-hole, fisheye1, fisheye2, and fisheye3 queries by eliminating the domain gap in IR. However, the pinhole, fisheye1, and fisheye2 queries' recall and precision are still much lower than those of the $360^{\circ}$ query. Only the query from fisheye3 (widest FoV) approaches the performance of $360^{\circ}$ query. The domain gap mainly affects the precision and recall of fisheye3. Both remap (VC1) and crop (VC2) significantly improve IR performance for fisheye3. On the other hand, pinhole queries are more prone to being mistaken as error + +
QueryNetVLAD [1]Cosplace [3]OpenIBL [18]AP-GeM [20]
R@1R@5P@5R@10P@10R@1R@5P@5R@10P@10R@1R@5P@5R@10P@10R@1R@5P@5R@10P@10
pinhole0.230.450.220.580.220.150.260.150.330.150.180.360.180.480.180.20.370.20.470.2
+VC10.240.450.240.570.230.210.330.210.410.210.210.390.210.50.20.250.420.250.530.24
+VC20.50.670.480.750.470.320.410.320.480.310.510.670.490.750.470.50.680.490.770.47
fisheye10.420.670.410.770.390.280.430.280.520.280.370.580.360.690.340.350.550.340.660.33
+VC10.510.720.490.80.470.360.480.350.560.340.520.70.50.790.480.430.620.420.720.4
+VC20.730.910.630.950.570.630.850.510.920.430.740.910.620.950.540.650.880.570.940.51
fisheye20.450.70.440.80.420.30.460.310.550.310.410.620.40.730.380.380.590.360.680.35
+VC10.540.740.520.830.490.370.490.360.570.350.560.730.540.810.510.460.650.450.740.43
+VC20.740.920.650.950.580.640.870.530.930.450.760.920.650.960.560.670.890.580.940.52
fisheye30.570.790.550.860.520.40.560.40.650.40.530.740.510.830.490.450.660.430.750.41
+VC10.630.810.610.880.580.480.610.480.680.470.670.820.650.880.610.550.730.530.810.51
+VC20.770.930.680.960.610.690.890.580.940.50.790.930.680.960.60.670.90.590.940.54
3600.790.860.770.880.730.920.950.910.960.890.890.940.880.950.830.790.90.770.940.72
+ +Table 4. Image retrieval results based on $360^{\circ}$ reference database average over four scenes, the recall, and precision for the top $k$ retrieved images, $k = 1,5,10$ . $\#$ indicates the highest value of R@k and P@k for each device w and w/o virtual cameras (VC1, VC2). Best results for all devices of R@k and P@k are in bold with $\#$ . + +
NetVLAD [1]CosPlace [3]
DISK + LGSP + LGSP + SGDISK + LGSP + LGSP + SG
DayNightDayNightDayNightDayNightDayNightDayNight
pinhole6.0/11.3/24.61.7/4.4/10.38.0/14.9/30.92.2/5.5/13.58.4/15.2/30.72.3/5.6/12.34.2/7.8/18.01.6/3.5/8.64.8/10.2/22.11.9/4.7/11.15.4/10.4/21.12.1/4.7/10.4
+VC18.5/14.0/23.52.2/4.1/7.910.4/17.0/27.52.9/5.3/10.110.9/17.8/28.52.8/5.6/9.96.1/10.8/21.11.7/3.6/8.27.5/13.2/22.52.0/4.5/9.67.6/13.5/22.82.1/4.7/9.6
+VC214.2/22.2/35.54.1/7.8/13.619.8 / 29.7/42.96.1/10.4/16.921.6/33.2 / 49.75.9 / 11.0 / 18.48.0/13.1/23.52.5/4.6/9.110.7/16.4/26.63.0/5.7/11.411.6/18.5/30.53.5/6.8/12.8
fisheye11.6/4.4/17.70.5/1.8/7.41.9/5.4/20.10.7/2.3/10.51.6/4.7/18.40.5/1.9/8.20.8/2.5/11.80.4/1.4/5.81.0/3.5/13.00.5/1.4/8.20.9/3.4/12.10.3/1.4/7.0
+VC13.3/9.2/27.60.8/2.7/9.64.1/10.6/32.21.4/4.4/14.93.0/9.5/29.60.9/3.1/11.72.3/5.5/19.40.5/1.6/7.32.1/6.1/19.90.7/2.2/9.01.9/5.5/19.10.5/1.9/7.3
+VC23.9/10.5/33.01.0/4.0/14.64.3/12.4/38.21.9/6.4/21.83.6/11.0/34.51.1/5.3/19.42.5/6.9/25.30.8/2.8/12.22.8/8.2/29.01.3/4.6/18.02.1/7.1/26.71.0/4.0/16.2
fisheye21.6/4.9/20.90.5/2.0/8.71.9/6.7/23.20.8/3.0/11.81.7/5.2/19.50.7/2.5/9.91.3/3.5/14.20.4/1.6/6.91.2/3.8/15.20.5/1.5/9.11.2/3.9/12.90.6/1.6/7.2
+VC14.3 / 10.8/30.90.8/3.0/11.24.7/12.4/34.11.8/5.4/15.84.1 / 10.6/31.51.1/3.6/13.72.5/6.5/20.60.5/1.77.42.5/7.0/22.10.8/2.4/9.42.2/6.8/20.20.5/2.1/8.0
+VC24.3/11.0/34.41.1/4.7/17.35.1/14.0/41.12.0/7.2/24.83.7 / 11.5/36.81.5/5.9/21.22.8/7.3/27.10.8/2.9/13.42.9/8.9/32.01.6/5.3/20.12.5/8.0/27.91.1/4.2/17.7
fisheye33.8/9.5/29.81.0/3.6/13.84.0/10.5/31.61.3/4.6/16.43.4/9.1/28.40.8/3.8/13.82.5/6.3/21.90.6/2.4/10.12.8/7.2/22.30.9/2.9/12.42.0/5.9/20.01.3/4.2/15.0
+VC15.9/14.7 / 39.51.5/5.2/17.76.0 / 16.2/43.52.0/6.8/21.95.8/14.7 / 39.11.8/5.5/18.34.4/10.2 / 30.11.1/3.3/12.84.6/11.6/32.01.4/4.1/14.44.3/10.5 / 29.71.2/3.8/12.3
+VC25.2/13.9 / 41.82.1/6.5/22.55.9 / 16.5/46.32.5/8.6/29.15.4/14.2 / 40.52.1/7.3/25.94.3/9.8 / 34.61.7/5.2/19.54.7/12.6 / 36.82.2/7.1/23.83.8 / 10.5 / 32.51.6/5.1/20.7
36017.1 / 30.8 / 66.18.5 / 20.1 / 47.518.2 / 34.6 / 64.27.0 / 18.7 / 45.315.8 / 31.2 /60.4 / 7.0 / 17.8 / 42.817.6 / 31.8 / 68.18.7 / 22.0 / 56.018.7 / 34.9 / 68.17.3 / 20.0 / 53.416.6 / 32.6 / 65.77.1 / 18.7 / 50.4
+ +Table 5. Local matching localization results. The average percentage of predictions with high (0.25m, $2^{\circ}$ ), medium (0.5m, $5^{\circ}$ ), and low (5m, $10^{\circ}$ ) accuracy [46] (higher is better) over four scenes. # indicates the highest value for each device w and w/o virtual cameras (VC1, VC2) of each accuracy level. The best results for all devices of each accuracy level are in bold with # . + +neous locations with similar structures due to their narrower FoV even there is no cross-device domain gap during IR by applying VC2 (Some figures in supplementary material). + +# 5.2. Visual Localization + +We compare our approach with the following baselines in two categories: 1) Local feature matching pipelines tailored from HLoc [42], using different keypoint descriptors (Superpoint (SP) [16] and DISK [55]), and matchers (SuperGlue (SG) [43], follow-up SOTA LightGlue (LG) [31]). 2) The end-to-end APRs: PN [23, 24] and MS-T [49]. + +Local feature matching: During local feature matching, all $360^{\circ}$ images are cropped to $1228 \times 614$ because of the tradeoff of time and computation. We report the average results over four scenes in Table 5. The $360^{\circ}$ query achieves the best performance in three accuracy levels in most cases + +across all IR, keypoint descriptors, and matchers settings. It is especially more robust in challenging nighttime conditions. VC1 and VC2 techniques improve the recall and precision of IR, increasing the accuracy of 2D-2D matching for all cameras. In most cases, the performance at the low accuracy level $(5m, 10^{\circ})$ is correlated with the FoV, where a larger FoV results in higher performance. However, the pin-hole query with VC2 during IR performs comparably to the $360^{\circ}$ queries at the high $(0.25m, 2^{\circ})$ and median $(0.5m, 5^{\circ})$ accuracy levels. In contrast, query frames from $c_{1}, c_{2}$ and $c_{3}$ demonstrate relatively lower performance at the high and medium accuracy levels. + +As observed in Table 4, different IR methods display different performances depending on the type of camera. We thus consider both NetVLAD and CosPlace in visual localization. In most cases, $360^{\circ}$ query frames achieve higher + +accuracy with CosPlace while pinhole and fisheye query frames have lower accuracy than NetVLAD as shown in Table 5. These results match the precision and recall difference noted in Table 4. We believe that the FoV not only affects the robustness of IR but also has an impact on local 2D-2D matching performance. Pinhole queries suffer from erroneous matches due to interference from symmetrical and repetitive structures, while the larger FoV of fisheye and $360^{\circ}$ query frames capture more unique visual features. We provide examples in the supplementary material. + +APR: APRs cannot extrapolate well beyond the training set [40, 47]. cross-device queries further complicate this challenge by introducing an additional dimension of FoV. Due to the high efficiency of $360^{\circ}$ mapping, the training set $\mathbf{I}^r$ in 360Loc contains only around one-third of the images compared to datasets [24]. Figure 5 shows that when PN and MS-T are trained solely on $\mathbf{I}^r$ with only $360^{\circ}$ images, a smaller domain gap between the query and the $360^{\circ}$ image yields a lower error. However, when we introduce images from virtual cameras for data augmentation, $\mathrm{PN}^{vc2}$ and MS- $\mathrm{T}^{vc2}$ exhibit significantly reduced translation and rotation errors across all queries, particularly during daytime. MS- $\mathrm{T}^{vc2}$ reduces translation error by up to $79\%$ and rotation error by up to $72\%$ compared to MS-T. $\mathrm{PN}^{vc2}$ displays similar improvement over PN. In most cases, except for $\mathrm{PN}^{vc2}$ , s rotation error for the $360^{\circ}$ queries during daytime, both the $360^{\circ}$ and fisheye queries exhibit higher accuracy than the pinhole query on $\mathrm{PN}^{vc2}$ and MS- $\mathrm{T}^{vc2}$ . This suggests that a larger FoV still helps improve visual localization accuracy in challenging scenes. Another interesting finding is that even though the augmented training set $\mathbf{I}^r \cup \Psi_c(\mathbf{I}^r)$ , which includes virtual camera images, does not increase the number of $360^{\circ}$ images, the error for the $360^{\circ}$ query still decreases. This reduction is particularly noticeable in the case of translation errors during daytime. The result fully demonstrates the utility of employing virtual cameras for data augmentation. + +# 5.3. Analysis + +Cross-device visual positioning presents significant challenges for IR, local matching, and APRs. Our VC1 and VC2 methods demonstrate practical enhancements in the performance of IR and APR for cross-device scenarios. However, it is essential to note that during the local matching process, the accuracy of matches and the recall and precision of IR for query frames from different cameras may not align perfectly. The chosen IR method and its training noticeably affect accuracy for similar cameras. Fisheye cameras exhibit better performance in IR compared to pinhole cameras. However, pinhole cameras outperform fisheye cameras for high accuracy and median accuracy levels in local matching. This is likely due to existing feature extraction and matching models lacking training data on $360^{\circ}$ and fisheye + +![](images/21c1a101e9d74cf3e9bf39c3e37a7244e02cfa1ea47cb78c1884559328819e15.jpg) + +![](images/2365dd97b73b1cf7536dfc86daa8dbe38f6b210cb8b770a05402721a59c5619b.jpg) + +![](images/2d3e8e2a36dd03710f89023fc7e0eb829fc8eb8577459e039b7c2f10874df89c.jpg) +(a) Trans. (day) +(c) Trans. (night) +Figure 5. The average of median translation/rotation errors in $(m / ^{\circ})$ over 4 scenes. + +![](images/87b3ba1233e73dec1da977ae657f32a3189dbcfdc569c031b4207dde3f22d3d3.jpg) +(b) Rot. (day) +(d) Rot. (night) + +cameras, resulting in less accurate matching. We attribute the inferior performance of pinhole query frames at the low accuracy level to IR's insufficient recall and precision. Additionally, pinhole queries are more susceptible to interference when there are many repetitive and symmetrical features in the scene, even when the retrieved reference image is correct (some example figures in the supplementary material). By utilizing VC2 to augment IR and APR's training data, we eliminate the cross-device domain gap. We demonstrate that panoramic perspective and a larger FoV can significantly improve the performance of IR and APRs and find that query frames from $360^{\circ}$ camera and ultra-wide FoV cameras are less prone to being misidentified as erroneous locations with similar structures. This result suggests the promising potential of fisheye and $360^{\circ}$ cameras as viable sensors for localization tasks in indoor environments with low GPS accuracy. + +# 6. Conclusion + +360Loc is the first dataset and benchmark that explores the challenge of cross-device visual positioning, involving $360^{\circ}$ reference frames, and query frames from pinhole, ultra-wide FoV fisheye, and $360^{\circ}$ cameras. We first identified the absence of datasets with ground truth 6DoF poses for $360^{\circ}$ images, and the limited research on cross-device localization and the robustness of different cameras in ambiguous scenes. To address these limitations, we build a dataset with $360^{\circ}$ images as reference and query frames from pinhole, ultra-wide FoV fisheye camera and $360^{\circ}$ cameras via a virtual camera solution. This method enables fair comparisons in cross-device visual localization tasks and helps reduce the domain gap between different cameras. By evaluating feature-matching-based and pose regression-based methods, we demonstrate the effectiveness of our virtual camera approach and the increased robustness of $360^{\circ}$ cameras in visual localization for challenging and ambiguous scenes. + +# References + +[1] Relja Arandjelovic, Petr Gronat, Akihiko Torii, Tomas Pajdla, and Josef Sivic. Netvlad: Cnn architecture for weakly supervised place recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5297-5307, 2016. 2, 6, 7 +[2] Iro Armeni, Sasha Sax, Amir R Zamir, and Silvio Savarese. Joint 2d-3d-semantic data for indoor scene understanding. arXiv preprint arXiv:1702.01105, 2017. 3 +[3] Gabriele Berton, Carlo Masone, and Barbara Caputo. Rethinking visual geo-localization for large-scale applications. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4878-4888, 2022. 2, 6, 7 +[4] Hunter Blanton, Connor Greenwell, Scott Workman, and Nathan Jacobs. Extending absolute pose regression to multiple scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2020. 3 +[5] Eric Brachmann and Carsten Rother. Learning less is more - 6D camera localization via 3D surface regression. In CVPR, 2018. 1, 2 +[6] Eric Brachmann and Carsten Rother. Visual camera relocalization from RGB and RGB-D images using DSAC. TPAMI, 2021. +[7] Eric Brachmann, Alexander Krull, Sebastian Nowozin, Jamie Shotton, Frank Michel, Stefan Gumhold, and Carsten Rother. DSAC-Differentiable RANSAC for camera localization. In CVPR, 2017. 1, 2 +[8] Samarth Brahmbhatt, Jinwei Gu, Kihwan Kim, James Hays, and Jan Kautz. Geometry-aware learning of maps for camera localization. In IEEE conference on computer vision and pattern recognition, 2018. 3, 6 +[9] Mai Bui, Tolga Birdal, Haowen Deng, Shadi Albarqouni, Leonidas Guibas, Slobodan Ilic, and Nassir Navab. 6d camera relocalization in ambiguous scenes via continuous multi-modal inference. 2020. 3, 4 +[10] Nicholas Carlevaris-Bianco, Arash K Ushani, and Ryan M Eustice. University of michigan north campus long-term vision and lidar dataset. The International Journal of Robotics Research, 35(9):1023-1035, 2016. 3, 4 +[11] David M Chen, Georges Baatz, Kevin Koser, Sam S Tsai, Ramakrishna Vedantham, Timo Pylvanäinen, Kimmo Roimela, Xin Chen, Jeff Bach, Marc Pollefeys, et al. City-scale landmark identification on mobile devices. In CVPR 2011, pages 737-744. IEEE, 2011. 3, 4, 6 +[12] Shuai Chen, Zirui Wang, and Victor Prisacariu. Directposenet: absolute pose regression with photometric consistency. In 2021 International Conference on 3D Vision (3DV), pages 1175-1185. IEEE, 2021. 3 +[13] Shuai Chen, Xinghui Li, Zirui Wang, and Victor A Prisacariu. Dfnet: Enhance absolute pose regression with direct feature matching. In ECCV 2022. Tel Aviv, Israel, October 23-27, 2022, Part X. Springer, 2022. 3 +[14] Santiago Cortés, Arno Solin, Esa Rahtu, and Juho Kannala. Advio: An authentic dataset for visual-inertial odometry. In + +Proceedings of the European Conference on Computer Vision (ECCV), pages 419-434, 2018. 3, 4 +[15] Dawson-Haggerty et al. trimesh. 5 +[16] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 224-236, 2018. 2, 7 +[17] Mihai Dusmanu, Ignacio Rocco, Tomas Pajdla, Marc Pollefeys, Josef Sivic, Akihiko Torii, and Torsten Sattler. D2-net: A trainable cnn for joint description and detection of local features. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 8092-8101, 2019. 1 +[18] Yixiao Ge, Haibo Wang, Feng Zhu, Rui Zhao, and Hongsheng Li. Self-supervising fine-grained region similarities for large-scale image localization. In European Conference on Computer Vision, 2020. 2, 6, 7 +[19] Daniel Girardeau-Montaut. Cloudcompare. France: EDF R&D Telecom ParisTech, 11, 2016. 5 +[20] A. Gordo, J. Almazan, J. Revaud, and D. Larlus. End-to-end learning of deep visual representations for image retrieval. *IJCV*, 2017. 2, 6, 7 +[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6 +[22] Huajian Huang, Yinzhe Xu, Yingshu Chen, and Sai-Kit Yeung. 360vot: A new benchmark dataset for omnidirectional visual object tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20566–20576, 2023. 1 +[23] Alex Kendall and Roberto Cipolla. Geometric loss functions for camera pose regression with deep learning. In IEEE conference on computer vision and pattern recognition, pages 5974-5983, 2017. 1, 3, 6, 7 +[24] Alex Kendall, Matthew Grimes, and Roberto Cipolla. Posenet: A convolutional network for real-time 6-dof camera relocalization. In Proceedings of the IEEE international conference on computer vision, pages 2938-2946, 2015. 1, 3, 4, 6, 7, 8 +[25] Junho Kim, Changwoon Choi, Hojun Jang, and Young Min Kim. Piccolo: point cloud-centric omnidirectional localization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3313-3323, 2021. 2, 3 +[26] Junho Kim, Hojun Jang, Changwoon Choi, and Young Min Kim. Cpo: Change robust panorama to point cloud localization. In European Conference on Computer Vision, pages 176-192. Springer, 2022. +[27] Junho Kim, Eun Sun Lee, and Young Min Kim. Calibrating panoramic depth estimation for practical localization and mapping. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8830-8840, 2023. 2 +[28] Laurent Kneip and Paul Furgale. Opengv: A unified and generalized approach to real-time calibrated geometric vision. In 2014 IEEE international conference on robotics and automation (ICRA), pages 1-8. IEEE, 2014. 6 + +[29] Kenji Koide, Shuji Oishi, Masashi Yokozuka, and Atsuhiko Banno. General, single-shot, target-less, and automatic lidar-camera extrinsic calibration toolbox. arXiv preprint arXiv:2302.05094, 2023. 4, 5 +[30] Donghwan Lee, Soohyun Ryu, Suyong Yeon, Yonghan Lee, Deokhwa Kim, Cheolho Han, Yohann Cabon, Philippe Weinzaepfel, Nicolas Guérin, Gabriela Csurka, et al. Large-scale localization datasets in crowded indoor spaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3227-3236, 2021. 3, 4 +[31] Philipp Lindenberger, Paul-Edouard Sarlin, and Marc Pollefeys. LightGlue: Local Feature Matching at Light Speed. In ICCV, 2023. 7 +[32] Haomin Liu, Linsheng Zhao, Zhen Peng, Weijian Xie, Mingxuan Jiang, Hongbin Zha, Hujun Bao, and Guofeng Zhang. A low-cost and scalable framework to build large-scale localization benchmark for augmented reality. IEEE Transactions on Circuits and Systems for Video Technology, 2023. 3 +[33] Liu Liu, Hongdong Li, and Yuchao Dai. Efficient global 2d-3d matching for camera localization in a large-scale 3d map. In Proceedings of the IEEE International Conference on Computer Vision, pages 2372-2381, 2017. 1 +[34] Zheng Liu and Fu Zhang. Balm: Bundle adjustment for lidar mapping. IEEE Robotics and Automation Letters, 6(2): 3184-3191, 2021. 5 +[35] David G Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60:91-110, 2004. 2 +[36] Iaroslav Melekhov, Juha Ylioinas, Juho Kannala, and Esa Rahtu. Image-based localization using hourglass networks. In IEEE international conference on computer vision workshops, 2017. 3, 6 +[37] Arthur Moreau, Nathan Piasco, Dzmitry Tsishkou, Bogdan Stanciulescu, and Arnaud de La Fortelle. Coordinet: uncertainty-aware pose regressor for reliable vehicle localization. In IEEE/CVF Winter Conference on Applications of Computer Vision, 2022. 3 +[38] Jeffri Murragarra-Llerena, Thiago LT Da Silveira, and Claudio R Jung. Pose estimation for two-view panoramas based on keypoint matching: A comparative study and critical analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5202-5211, 2022. 3 +[39] Ken Museth. Vdb: High-resolution sparse volumes with dynamic topology. ACM transactions on graphics (TOG), 32 (3):1-22, 2013. 5 +[40] Tony Ng, Adrian Lopez-Rodriguez, Vassileios Balntas, and Krystian Mikolajczyk. Reassessing the limitations of cnn methods for camera pose regression. arXiv preprint arXiv:2108.07260, 2021. 8 +[41] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 6 + +[42] Paul-Edouard Sarlin, Cesar Cadena, Roland Siegwart, and Marcin Dymczyk. From coarse to fine: Robust hierarchical localization at large scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12716–12725, 2019. 1, 2, 7 +[43] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4938–4947, 2020. 2, 4, 7 +[44] Paul-Edouard Sarlin, Mihai Dusmanu, Johannes L Schonberger, Pablo Speciale, Lukas Gruber, Viktor Larsson, Ondrej Miksik, and Marc Pollefeys. Lamar: Benchmarking localization and mapping for augmented reality. In European Conference on Computer Vision, pages 686-704. Springer, 2022. 3, 4 +[45] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Efficient & effective prioritized matching for large-scale image-based localization. IEEE transactions on pattern analysis and machine intelligence, 39(9):1744-1756, 2016. 1, 2, 3 +[46] Torsten Sattler, Will Maddern, Carl Toft, Akihiko Torii, Lars Hammarstrand, Erik Stenborg, Daniel Safari, Masatoshi Okutomi, Marc Pollefeys, Josef Sivic, et al. Benchmarking 6dof outdoor visual localization in changing conditions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8601-8610, 2018. 3, 4, 7 +[47] Torsten Sattler, Qunjie Zhou, Marc Pollefeys, and Laura Leal-Taixe. Understanding the limitations of cnn-based absolute camera pose regression. In IEEE/CVF conference on computer vision and pattern recognition, 2019. 3, 8 +[48] Thomas Schops, Johannes L Schonberger, Silvano Galliani, Torsten Sattler, Konrad Schindler, Marc Pollefeys, and Andreas Geiger. A multi-view stereo benchmark with high-resolution images and multi-camera videos. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3260-3269, 2017. 3, 4 +[49] Yoli Shavit, Ron Ferens, and Yoshi Keller. Learning multiscene absolute pose regression with transformers. In IEEE/CVF International Conference on Computer Vision, pages 2733-2742, 2021. 1, 3, 6, 7 +[50] Jamie Shotton, Ben Glocker, Christopher Zach, Shahram Izadi, Antonio Criminisi, and Andrew Fitzgibbon. Scene coordinate regression forests for camera relocalization in rgb-d images. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2930-2937, 2013. 3, 4 +[51] Alexander D Stewart. Localisation using the appearance of prior structure. PhD thesis, University of Oxford, 2014. 5 +[52] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8922-8931, 2021. 2 +[53] Xun Sun, Yuanfan Xie, Pei Luo, and Liang Wang. A dataset for benchmarking image-based localization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7436-7444, 2017. 3, 4 +[54] Hajime Taira, Masatoshi Okutomi, Torsten Sattler, Mircea Cimpoi, Marc Pollefeys, Josef Sivic, Tomas Pajdla, and Ak + +ihiko Torii. Inloc: Indoor visual localization with dense matching and view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7199-7209, 2018. 1, 3, 4 +[55] Michal Tyszkiiewicz, Pascal Fua, and Eduard Trulls. Disk: Learning local features with policy gradient. Advances in Neural Information Processing Systems, 33:14254-14265, 2020. 2, 7 +[56] Vladyslav Usenko, Nikolaus Demmel, and Daniel Cremers. The double sphere camera model. In 2018 International Conference on 3D Vision (3DV), pages 552-560. IEEE, 2018. 4 +[57] Ignacio Vizzo, Tiziano Guadagnino, Jens Behley, and Cyril Stachniss. Vdbfusion: Flexible and efficient tsdf integration of range sensor data. Sensors, 22(3):1296, 2022. 5 +[58] Johanna Wald, Torsten Sattler, Stuart Golodetz, Tommaso Cavallari, and Federico Tombari. Beyond controlled environments: 3d camera re-localization in changing indoor scenes. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VII 16, pages 467-487. Springer, 2020. 3, 4 +[59] Jian Wu, Liwei Ma, and Xiaolin Hu. Delving deeper into convolutional neural networks for camera relocalization. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 5644-5651. IEEE, 2017. 3 +[60] Hang Xu, Qiang Zhao, Yike Ma, Xiaodong Li, Peng Yuan, Bailan Feng, Chenggang Yan, and Feng Dai. Pandora: A panoramic detection dataset for object with orientation. In ECCV, 2022. 1 +[61] Shen Yan, Yu Liu, Long Wang, Zehong Shen, Zhen Peng, Haomin Liu, Maojun Zhang, Guofeng Zhang, and Xiaowei Zhou. Long-term visual localization with mobile sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17245-17255, 2023. 1, 3, 4, 6 +[62] Dawen Yu and Shunping Ji. Grid based spherical cnn for object detection from panoramic images. Sensors, 19(11): 2622, 2019. 1 \ No newline at end of file diff --git a/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/images.zip b/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..57f742a44877651d3add4285b9481a57d7beb8a2 --- /dev/null +++ b/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b82f3eab1d21ceb05dacfdefc81a95464674be637432e661ad49e0195b1221b5 +size 728977 diff --git a/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/layout.json b/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..6ba49b7facf4dbf3a7b3374f6bb610d8381bd1c4 --- /dev/null +++ b/2024/360Loc_ A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries/layout.json @@ -0,0 +1,10874 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 48, + 103, + 545, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 103, + 545, + 138 + ], + "spans": [ + { + "bbox": [ + 48, + 103, + 545, + 138 + ], + "type": "text", + "content": "360Loc: A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "spans": [ + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "text", + "content": "Huajian Huang" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "text", + "content": " Changkun Liu" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "text", + "content": " Yipeng Zhu" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "text", + "content": " Hui Cheng" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "text", + "content": " Tristan Braud" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "text", + "content": " Sai-Kit Yeung" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 53, + 160, + 550, + 202 + ], + "type": "text", + "content": "The Hong Kong University of Science and Technology * equal contribution Sun Yat-sen University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 205, + 543, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 205, + 543, + 217 + ], + "spans": [ + { + "bbox": [ + 59, + 205, + 543, + 217 + ], + "type": "text", + "content": "{hhuangbg, cliudg, yzhudg}@connect.ust.hk, chengh9@mail.sysu.edu.cn, {braudit, saikit}@ust.hk" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 245, + 192, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 245, + 192, + 258 + ], + "spans": [ + { + "bbox": [ + 143, + 245, + 192, + 258 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "spans": [ + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "text", + "content": "Portable " + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "text", + "content": " cameras are becoming a cheap and efficient tool to establish large visual databases. By capturing omnidirectional views of a scene, these cameras could expedite building environment models that are essential for visual localization. However, such an advantage is often overlooked due to the lack of valuable datasets. This paper introduces a new benchmark dataset, 360Loc, composed of " + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "text", + "content": " images with ground truth poses for visual localization. We present a practical implementation of " + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "text", + "content": " mapping combining " + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "text", + "content": " images with lidar data to generate the ground truth 6DoF poses. 360Loc is the first dataset and benchmark that explores the challenge of cross-device visual positioning, involving " + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "text", + "content": " reference frames, and query frames from pinhole, ultra-wide FoV fisheye, and " + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "text", + "content": " cameras. We propose a virtual camera approach to generate lower-FoV query frames from " + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 271, + 289, + 618 + ], + "type": "text", + "content": " images, which ensures a fair comparison of performance among different query types in visual localization tasks. We also extend this virtual camera approach to feature matching-based and pose regression-based methods to alleviate the performance loss caused by the cross-device domain gap, and evaluate its effectiveness against state-of-the-art baselines. We demonstrate that omnidirectional visual localization is more robust in challenging large-scale scenes with symmetries and repetitive structures. These results provide new insights into 360-camera mapping and omnidirectional visual localization with cross-device queries. Project Page and dataset: https://huajianup.github.io/research/360Loc/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 644, + 128, + 657 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 644, + 128, + 657 + ], + "spans": [ + { + "bbox": [ + 47, + 644, + 128, + 657 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "content": "Visual localization refers to predicting the 6DoF absolute pose (translation and rotation) of query images in a known scene. Accurate visual localization has wide applications in augmented reality (AR), navigation, and robotics." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 246, + 547, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 246, + 547, + 413 + ], + "spans": [ + { + "bbox": [ + 304, + 246, + 547, + 413 + ], + "type": "text", + "content": "Over the last decade, many visual localization methods have been proposed, including feature matching-based approaches [17, 33, 42, 45, 54], scene coordinate regression [5-7] and absolute pose regressors (APRs) [23, 24, 49]. Much of this progress has been driven by the availability of numerous datasets and benchmarks targeting different challenges, as shown in Table 1. However, existing methods and datasets focus on localization and mapping using pinhole images. Although the merits of " + }, + { + "bbox": [ + 304, + 246, + 547, + 413 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 246, + 547, + 413 + ], + "type": "text", + "content": " camera on visual perception have been recognized [22, 60, 62], the application of " + }, + { + "bbox": [ + 304, + 246, + 547, + 413 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 246, + 547, + 413 + ], + "type": "text", + "content": " cameras for visual localization is still under-explored. Recently, SensLoc [61] started to apply " + }, + { + "bbox": [ + 304, + 246, + 547, + 413 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 246, + 547, + 413 + ], + "type": "text", + "content": " cameras to facilitate data collection, but their pipeline cannot perform omnidirectional localization directly from the " + }, + { + "bbox": [ + 304, + 246, + 547, + 413 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 246, + 547, + 413 + ], + "type": "text", + "content": " images." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "text", + "content": "This paper introduces 360Loc, a new challenging benchmark dataset to facilitate research on omnidirectional visual localization. The dataset contains " + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "text", + "content": " images captured in diverse campus-scale indoor and outdoor environments, featuring highly symmetrical and repetitive features, as well as interference of dynamic objects. To capture this dataset, we present a practical pipeline using a portable 360-cameras platform to obtain reliable pose estimations of " + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "text", + "content": " cameras as ground truth. Although " + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "text", + "content": " cameras present significant advantages for capturing reference data, real-life applications applying visual localization often rely on traditional cameras. Examples include robots equipped with fisheye cameras and phone-based AR applications using the embedded pinhole camera. This raises the problem of cross-device visual localization on image databases captured with " + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "text", + "content": " cameras. We thus supplement the reference database composed of " + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "text", + "content": " images with query frames including pin-hole, fisheye and " + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 414, + 548, + 628 + ], + "type": "text", + "content": " cameras." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "content": "We introduce the concept of virtual camera to generate high-quality lower-FoV images with different camera parameters from " + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "content": " images. This enables a fair comparison of performance among queries from different devices in cross-device visual localization. We adapt existing feature-matching-based methods and APRs to support " + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "content": " image queries and benchmark these methods for 360-based cross-" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "22314" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 71, + 545, + 234 + ], + "blocks": [ + { + "bbox": [ + 51, + 71, + 545, + 234 + ], + "lines": [ + { + "bbox": [ + 51, + 71, + 545, + 234 + ], + "spans": [ + { + "bbox": [ + 51, + 71, + 545, + 234 + ], + "type": "image", + "image_path": "db604ce410d40d69b14afc1112843cff89686cce30fc31d6967b76f9a479c2a0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 51, + 236, + 166, + 304 + ], + "blocks": [ + { + "bbox": [ + 51, + 236, + 166, + 304 + ], + "lines": [ + { + "bbox": [ + 51, + 236, + 166, + 304 + ], + "spans": [ + { + "bbox": [ + 51, + 236, + 166, + 304 + ], + "type": "image", + "image_path": "b04c5ebe9bb3f7b53d057b8ed2eecf41bd550b802b96fd8124e25e075458ecc4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 316, + 548, + 373 + ], + "lines": [ + { + "bbox": [ + 46, + 316, + 548, + 373 + ], + "spans": [ + { + "bbox": [ + 46, + 316, + 548, + 373 + ], + "type": "text", + "content": "Figure 1. Overview of dataset collection and ground truth generation: 1) Use the platform to collect " + }, + { + "bbox": [ + 46, + 316, + 548, + 373 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 316, + 548, + 373 + ], + "type": "text", + "content": " images and frame-by-frame point clouds. Obtain real-time camera poses; 2) Apply optimization methodology to achieve data registration, resulting in a globally reconstructed point cloud model. Then, align the models in daytime and nighttime to get consistent poses; 3) Perform cropping to get virtual camera images and generate corresponding depth images. As a result, 360Loc takes advantage of " + }, + { + "bbox": [ + 46, + 316, + 548, + 373 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 316, + 548, + 373 + ], + "type": "text", + "content": " images for efficient mapping while providing query images in five different camera models in order to analyze the challenge of cross-domain visual localization." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 168, + 236, + 277, + 304 + ], + "blocks": [ + { + "bbox": [ + 168, + 236, + 277, + 304 + ], + "lines": [ + { + "bbox": [ + 168, + 236, + 277, + 304 + ], + "spans": [ + { + "bbox": [ + 168, + 236, + 277, + 304 + ], + "type": "image", + "image_path": "49886da25d424f9dd2d7cd4d9c452062c8af942c39c265d2b7ac498dce54b55d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 278, + 236, + 545, + 304 + ], + "blocks": [ + { + "bbox": [ + 278, + 236, + 545, + 304 + ], + "lines": [ + { + "bbox": [ + 278, + 236, + 545, + 304 + ], + "spans": [ + { + "bbox": [ + 278, + 236, + 545, + 304 + ], + "type": "image", + "image_path": "5a0f63ab7919017480006a31cc712dd989bce962a717e19ed0f54edc146729a3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 393, + 287, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 393, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 46, + 393, + 287, + 453 + ], + "type": "text", + "content": "device visual localization. Since different cameras present different imaging patterns, the cross-device domain gap is expected to lead to performance loss. We extend the virtual camera approach to data augmentation for end-to-end solutions such as image retrieval (IR) and APRs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 456, + 287, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 456, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 46, + 456, + 287, + 589 + ], + "type": "text", + "content": "By conducting exhaustive evaluations, we demonstrate the advantages of " + }, + { + "bbox": [ + 46, + 456, + 287, + 589 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 456, + 287, + 589 + ], + "type": "text", + "content": " cameras in reducing ambiguity in visual localization on scenes featuring symmetric or repetitive features. We also show improvements against state-of-the-art (SOTA) baselines using the virtual camera method for cross-device visual localization on images databases captured with " + }, + { + "bbox": [ + 46, + 456, + 287, + 589 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 456, + 287, + 589 + ], + "type": "text", + "content": " cameras. These results provide novel insights on mapping using " + }, + { + "bbox": [ + 46, + 456, + 287, + 589 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 456, + 287, + 589 + ], + "type": "text", + "content": " images, enhancing the anti-ambiguity capability of query images, reducing domain gap cross-device in visual localization, and improving the generalization ability of APRs by applying virtual cameras." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 58, + 591, + 255, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 591, + 255, + 602 + ], + "spans": [ + { + "bbox": [ + 58, + 591, + 255, + 602 + ], + "type": "text", + "content": "Our contribution can be summarized as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 47, + 605, + 287, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 605, + 287, + 641 + ], + "spans": [ + { + "bbox": [ + 47, + 605, + 287, + 641 + ], + "type": "text", + "content": "- We propose a practical implementation of " + }, + { + "bbox": [ + 47, + 605, + 287, + 641 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 605, + 287, + 641 + ], + "type": "text", + "content": " mapping combining lidar data with " + }, + { + "bbox": [ + 47, + 605, + 287, + 641 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 605, + 287, + 641 + ], + "type": "text", + "content": " images for establishing the ground truth 6DoF poses." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 642, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 642, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 642, + 287, + 677 + ], + "type": "text", + "content": "- A virtual camera approach to generate high-quality lower-FoV images with different camera parameters from " + }, + { + "bbox": [ + 47, + 642, + 287, + 677 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 642, + 287, + 677 + ], + "type": "text", + "content": " views." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": "- A novel dataset for cross-device visual localization based on " + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": " reference images with pinhole, fisheye, and " + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": " query images." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 393, + 545, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 393, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 306, + 393, + 545, + 453 + ], + "type": "text", + "content": "- Demonstration of our approach's efficacy over state-of-the-art solutions for visual localization using " + }, + { + "bbox": [ + 306, + 393, + 545, + 453 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 306, + 393, + 545, + 453 + ], + "type": "text", + "content": " image databases, resulting in decreased localization ambiguity, reduced cross-device domain gap, and improved generalization ability of APRs." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 468, + 391, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 468, + 391, + 481 + ], + "spans": [ + { + "bbox": [ + 306, + 468, + 391, + 481 + ], + "type": "text", + "content": "2. Related work" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 490, + 419, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 490, + 419, + 502 + ], + "spans": [ + { + "bbox": [ + 306, + 490, + 419, + 502 + ], + "type": "text", + "content": "2.1. Visual Localization" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 510, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 689 + ], + "type": "text", + "content": "Structure-based methods predict camera poses by establishing 2D-3D correspondences indirectly with local feature extractors and matchers [16, 35, 42, 43, 52, 55] or directly with scene coordinate regression [5-7]. HLoc [42, 43] pipeline scales up to large scenes using image retrieval [1, 3, 18, 20] as an intermediate step, which achieves SOTA accuracy on many benchmarks. This type of approach usually supports pinhole cameras with different intrinsic parameters. However, the performance of " + }, + { + "bbox": [ + 304, + 510, + 545, + 689 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 510, + 545, + 689 + ], + "type": "text", + "content": " and fisheye cameras has not been evaluated before due to the lack of support for " + }, + { + "bbox": [ + 304, + 510, + 545, + 689 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 510, + 545, + 689 + ], + "type": "text", + "content": " cameras in the Structure from Motion (SfM) tools like COLMAP [45] and the lack of datasets for fisheye and " + }, + { + "bbox": [ + 304, + 510, + 545, + 689 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 510, + 545, + 689 + ], + "type": "text", + "content": " cameras. [25-27] are point-cloud-based panorama localization methods for " + }, + { + "bbox": [ + 304, + 510, + 545, + 689 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 510, + 545, + 689 + ], + "type": "text", + "content": " queries but they do not consider cross-device visual localization." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "Absolute Pose Regressors (APRs) are end-to-end learning-based methods that directly regress the absolute camera" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "22315" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 204 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 204 + ], + "type": "text", + "content": "pose from input images without the knowledge of 3D models and establish 2D-3D correspondences. APRs [4, 8, 12, 13, 23, 24, 36, 37, 49, 59] provide faster inference than structure-based methods at the cost of accuracy and robustness [47]. Besides, APRs have generally only been tested on the [9], 7Scenes [50], and Cambridge Landmarks [24] datasets in previous studies. A notable characteristic of these datasets is that the training set and test set images were taken from the same camera. In this paper, we enhance cross-device pose regression for APRs by introducing virtual cameras as a data augmentation technique." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 215, + 110, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 215, + 110, + 227 + ], + "spans": [ + { + "bbox": [ + 47, + 215, + 110, + 227 + ], + "type": "text", + "content": "2.2. Datasets" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "text", + "content": "The existing dataset has the following limitations. 1). Most datasets [9, 10, 24, 50, 54, 58] do not consider the need for cross-device localization, i.e., query images come from the same camera. Even though some datasets [11, 14, 30, 44, 46, 48, 53, 61] take into account cross-device localization, these devices are only pinhole cameras with different camera intrinsic parameters and do not have particularly large domain-gaps. Compared to [32], our pinhole and fisheye images are extracted from " + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "text", + "content": " images via virtual cameras, which makes less demands on the device and allows for a fair and more flexible comparison of the effects of different FoVs. In this paper, our 360Loc datasets provide five kinds of queries from pinhole, fisheye and " + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "text", + "content": " cameras to promote the research of cross-device localization. 2). Now there is no 6DoF visual localization dataset and benchmark considering " + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "text", + "content": " reference images and " + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "text", + "content": " query images, even though [2, 25, 38] contain " + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "text", + "content": " images with 6DoF pose labels, they are not standard visual localization datasets with independent mapping/reference sequences and query sequences like datasets in Table 1. Other datasets [11, 61] use " + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "text", + "content": " cameras for data collection, in the end they cropped " + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "text", + "content": " to perspective images and then tailor these images to the classical visual localization pipeline of pinhole cameras. The academic community is mainly driven by benchmarks where all training, reference, and query images are pinhole images because they rely on SfM tools [45] which does not support " + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "text", + "content": " cameras to obtain ground-truth (GT) and get sparse 3D point cloud models for recovering camera poses. However, we note that the " + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "text", + "content": " camera can cover the scene with greater efficiency than normal pinhole cameras with narrow Field-of-View (FoV), which makes " + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 235, + 289, + 715 + ], + "type": "text", + "content": " images particularly suitable as reference images. 3) Although the current dataset has explored the challenges of visual localization from various aspects such as weather variations, daynight transitions, scene changes, and moving individuals and objects [24, 30, 44, 46, 58, 61], there is still insufficient research specifically targeting highly ambiguous environments which contain symmetries, repetitive structures and insufficient textures. Only two indoor datasets [9, 53]" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 310, + 72, + 543, + 200 + ], + "blocks": [ + { + "bbox": [ + 310, + 72, + 543, + 200 + ], + "lines": [ + { + "bbox": [ + 310, + 72, + 543, + 200 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 543, + 200 + ], + "type": "image", + "image_path": "5a21dc87b08877d924835e56c6a68dbaca2e7f703bb022dd57a74f2331ae6cad.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 209, + 545, + 243 + ], + "lines": [ + { + "bbox": [ + 305, + 209, + 545, + 243 + ], + "spans": [ + { + "bbox": [ + 305, + 209, + 545, + 243 + ], + "type": "text", + "content": "Figure 2. The four scenes in 360Loc, all four scenes contain symmetrical, repetitive structures and moving objects. The camera trajectories are visualized as spheres." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 305, + 262, + 545, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 262, + 545, + 358 + ], + "spans": [ + { + "bbox": [ + 305, + 262, + 545, + 358 + ], + "type": "text", + "content": "and LaMAR [44] consider challenges in ambiguous environments. In this paper, we studied 4 ambiguous scenes from both indoor and outdoor environments with a scale much larger than dataset [9] (See Figure 2). We conduct exhaustive assessments of image retrieval, local matching localization, and absolute pose regression to show that queries from the " + }, + { + "bbox": [ + 305, + 262, + 545, + 358 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 305, + 262, + 545, + 358 + ], + "type": "text", + "content": " camera are harder to obtain plausible solutions than other queries from cameras with narrower FoV." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 369, + 423, + 380 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 369, + 423, + 380 + ], + "spans": [ + { + "bbox": [ + 306, + 369, + 423, + 380 + ], + "type": "text", + "content": "3. The 360Loc Dataset" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 388, + 545, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 388, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 305, + 388, + 545, + 533 + ], + "type": "text", + "content": "The 360Loc dataset contains 4 locations from a local university. Figure 2 displays the reference point cloud and example frames from each scene. Atrium is inside a building with a surrounding structure that exhibits a high degree of symmetry and repetition, making it a highly ambiguous environment. Concourse is a large indoor scene with many moving people, which can be used for evaluating the robustness of any localization algorithm in scenes with many moving objects. Piatrium is a scene containing both indoor Atrium and outdoor environments, covering an outdoor piazza with coffee shops, bookstores, and souvenir shops. Hall is a modern building of a student dormitory." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 540, + 435, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 540, + 435, + 552 + ], + "spans": [ + { + "bbox": [ + 306, + 540, + 435, + 552 + ], + "type": "text", + "content": "3.1. 360 Mapping Platform" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "text", + "content": "We utilized the handheld multimodal data acquisition platform depicted in Figure 1 for data collection. This platform incorporates a " + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "text", + "content": " camera, a Velodyne VLP-16 multi-line lidar, an NUC mini-computer, and a display screen. Figure 1 also illustrates the relative relationship among the " + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "text", + "content": " camera coordinate system " + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{c}}" + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "text", + "content": "-XYZ, the lidar coordinate system " + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{l}}" + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "text", + "content": "-XYZ as well as the world coordinate " + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{w}}" + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "text", + "content": "-XYZ. The portable 360 camera equipped on this device can capture high-resolution omnidirectional images with a resolution of " + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "6144 \\times 3072" + }, + { + "bbox": [ + 305, + 557, + 545, + 713 + ], + "type": "text", + "content": " (2:1 aspect ratio). It also features a built-in six-axis gyroscope that provides stabilization support, making it suitable for handheld mobile data capture. The Velodyne VLP-16 multi-line lidar has" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "22316" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 70, + 539, + 228 + ], + "blocks": [ + { + "bbox": [ + 53, + 70, + 539, + 228 + ], + "lines": [ + { + "bbox": [ + 53, + 70, + 539, + 228 + ], + "spans": [ + { + "bbox": [ + 53, + 70, + 539, + 228 + ], + "type": "table", + "html": "
DatasetScale and EnvironmentChallengesReference/Query typeGroundtruth SolutionAccuracy
7Scenes [50]Small IndoorNonepinhole / pinholeRGB-D≈ cm
RIO10 [58]Small IndoorChangespinhole / pinholeVIO> d m
Baidu Mall [53]Medium IndoorPeople, Ambiguouspinhole / pinholelidar+Manual≈ d m
Naver Labs [30]Medium IndoorPeople, Changespinhole / pinholelidar+SfM≈ d m
InLoc [54]Medium IndoorNonepinhole / pinholelidar+Manual> d m
AmbiguousLoc [9]Small IndoorAmbiguouspinhole / pinholeSLAM≈ cm
Achen [46]Large outdoorPeople, Day-Nightpinhole / pinholeSfM> d m
Cambridge [24]Medium outdoorPeople, Weatherpinhole / pinholeSfM> d m
San Francisco [11]Large outdoorPeople, Constructionpinhole / pinholeSfM+GPS≈ m
NCLT [10]Medium Outdoor + IndoorWeatherpinhole / pinholeGPS+SLAM+lidar≈ d m
ADVIO [14]Medium Outdoor+IndoorPeoplepinhole / pinholeVIO+Manual≈ m
ETH3D [48]Medium Outdoor + IndoorNonepinhole / pinholelidar+Manual≈ mm
LaMAR [44]Medium Outdoor+IndoorPeople, Weather, Day-Night, Construction, Changes, Ambiguouspinhole / pinholelidar+SfM+VIO≈ cm
SensLoc [61]Large OutdoorPeople, Weather, Day-Night, Construction, Changespinhole / pinholeSL+VIO+RTK+Gravity< dm
360Loc (ours)Medium Outdoor+IndoorPeople, Weather, Day-Night, Construction, Changes, Ambiguous360 / (360 + pinhole + fisheye)lidar+VIO≈ cm
", + "image_path": "64616107b0ee699395a44ca51e2990724bd19ce9bdd7a53fe865f446ab0eed11.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 66, + 273, + 264, + 343 + ], + "blocks": [ + { + "bbox": [ + 47, + 232, + 546, + 255 + ], + "lines": [ + { + "bbox": [ + 47, + 232, + 546, + 255 + ], + "spans": [ + { + "bbox": [ + 47, + 232, + 546, + 255 + ], + "type": "text", + "content": "Table 1. Overview of popular visual localization datasets. No dataset, besides ours, consider " + }, + { + "bbox": [ + 47, + 232, + 546, + 255 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 232, + 546, + 255 + ], + "type": "text", + "content": " images as reference and query frames from pinhole, ultra-wide FoV fisheye, and " + }, + { + "bbox": [ + 47, + 232, + 546, + 255 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 232, + 546, + 255 + ], + "type": "text", + "content": " cameras." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 66, + 273, + 264, + 343 + ], + "lines": [ + { + "bbox": [ + 66, + 273, + 264, + 343 + ], + "spans": [ + { + "bbox": [ + 66, + 273, + 264, + 343 + ], + "type": "table", + "html": "
SymbolNameField of ViewResolutionType
c0360360°6144×3072reference/query
c1fisheye1120°1280×1024query
c2fisheye2150°1280×1024query
c3fisheye3195°1280×1024query
c4pinhole85°1920×1200query
", + "image_path": "5dc53a18d7cebc7fc1336a8883adbc08eaf5bf8557fabc99a4e01a10d3d68356.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 48, + 369, + 287, + 425 + ], + "blocks": [ + { + "bbox": [ + 63, + 346, + 271, + 357 + ], + "lines": [ + { + "bbox": [ + 63, + 346, + 271, + 357 + ], + "spans": [ + { + "bbox": [ + 63, + 346, + 271, + 357 + ], + "type": "text", + "content": "Table 2. The representation and parameters of 5 cameras." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 369, + 287, + 425 + ], + "lines": [ + { + "bbox": [ + 48, + 369, + 287, + 425 + ], + "spans": [ + { + "bbox": [ + 48, + 369, + 287, + 425 + ], + "type": "table", + "html": "
Scene# Frames Reference 360# Frames Query (day / night)Spatial Extent (m)
360PinholeFisheye1Fisheye2Fisheye3
Concourse491593/5141186/10281186/10281186/10281186/102893 × 15
Hall5401123/10612246/21222246/21222246/21222246/2122105 × 52
Atrium581875/12191750/24381750/24381750/24381750/243865 × 36
Piatrium6321008/6972016/13942016/13942016/13942016/139498 × 70
", + "image_path": "11966165d1a0d4bd16dd6e085a06137ba23aadacd68190719a8b45209749a8fd.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 100, + 428, + 233, + 439 + ], + "lines": [ + { + "bbox": [ + 100, + 428, + 233, + 439 + ], + "spans": [ + { + "bbox": [ + 100, + 428, + 233, + 439 + ], + "type": "text", + "content": "Table 3. 360Loc dataset description." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "text", + "content": "a FoV of " + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "inline_equation", + "content": "360^{\\circ} \\times 30^{\\circ}" + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "text", + "content": ", angular resolution of " + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "inline_equation", + "content": "0.2^{\\circ} \\times 2.0^{\\circ}" + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "text", + "content": ", and rotation rate of " + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "inline_equation", + "content": "10\\mathrm{Hz}" + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "text", + "content": ", offering a comprehensive " + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "text", + "content": " environmental view. Regarding the calibration of the extrinsic poses between the lidar and the " + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "text", + "content": " camera, we employed a calibration toolbox [29] that applies to both lidar and camera projection models. This toolbox utilizes the SuperGlue [43] image matching pipeline to establish 2D-3D correspondences between the lidar and camera image. We perform pseudo-registration by synchronizing the two data modalities, images, and point clouds. Eventually, we use graph-based SLAM techniques for continuous pose estimations. In the four scenes, a total of 18 independent sequences of " + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "text", + "content": " images were captured (12 daytime, and 6 nighttime), resulting in a total number of 9334 images. For each scene, we selected a specific sequence captured during the daytime as the reference images, while the remaining images were defined as query images of the " + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "text", + "content": " image type. We provide more details and show why " + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 462, + 289, + 714 + ], + "type": "text", + "content": " mapping is superior to pinhole SfM in ambiguous scenes with repetitive and symmetric structures in the supplementary material." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 326, + 274, + 523, + 425 + ], + "blocks": [ + { + "bbox": [ + 326, + 274, + 523, + 425 + ], + "lines": [ + { + "bbox": [ + 326, + 274, + 523, + 425 + ], + "spans": [ + { + "bbox": [ + 326, + 274, + 523, + 425 + ], + "type": "image", + "image_path": "60b85e647fd37d209120f74b536d02e8c0e48bff2ad2d0d153036a52694d43f3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 427, + 545, + 450 + ], + "lines": [ + { + "bbox": [ + 305, + 427, + 545, + 450 + ], + "spans": [ + { + "bbox": [ + 305, + 427, + 545, + 450 + ], + "type": "text", + "content": "Figure 3. Illustration of obtaining virtual camera images through random poses and image cropping." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 471, + 429, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 471, + 429, + 483 + ], + "spans": [ + { + "bbox": [ + 306, + 471, + 429, + 483 + ], + "type": "text", + "content": "3.1.1 Cross-device Queries" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 489, + 545, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 489, + 545, + 562 + ], + "spans": [ + { + "bbox": [ + 304, + 489, + 545, + 562 + ], + "type": "text", + "content": "To enable a rigorous comparison of the difference in the performance of different FoV queries for visual localization tasks, we created four virtual cameras with diverse FoV from " + }, + { + "bbox": [ + 304, + 489, + 545, + 562 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 489, + 545, + 562 + ], + "type": "text", + "content": " cameras, which are shown in Figure 2. Given a " + }, + { + "bbox": [ + 304, + 489, + 545, + 562 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 489, + 545, + 562 + ], + "type": "text", + "content": " image " + }, + { + "bbox": [ + 304, + 489, + 545, + 562 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{c_0}" + }, + { + "bbox": [ + 304, + 489, + 545, + 562 + ], + "type": "text", + "content": ", the corresponding virtual camera with preconfigured intrinsic parameters is extracted by" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 346, + 570, + 545, + 586 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 570, + 545, + 586 + ], + "spans": [ + { + "bbox": [ + 346, + 570, + 545, + 586 + ], + "type": "interline_equation", + "content": "\\mathcal {I} _ {c _ {n}} = \\Psi_ {c _ {n}} \\left(\\mathcal {I} _ {c _ {0}}\\right) = \\pi_ {c _ {n}} ^ {- 1} \\left(\\pi_ {c _ {0}} \\left(\\boldsymbol {R} \\mathcal {I} _ {c _ {0}}\\right)\\right), \\tag {1}", + "image_path": "a751cd97b12f1af05c7fc20dce54d5400d63af70f35fa0fc9f0b155cbe23320d.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\pi_{c_n}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": " denote the projection function of virtual camera and " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\pi_{c_0}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": " is the projection function of " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": " camera. " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "R\\in SO(3)" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": " is a random relative rotation matrix to increase the diversity of views representing the scenes. Moreover, the inversed operation " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\Psi_{c_n}^{-1}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": " can convert the " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "c_{n}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": " image back to a " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": " image. As reported in Table 2, the virtual cameras include an undistorted pinhole model with " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "85^{\\circ}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": " FoV and three fisheye cameras in Dual Sphere mode [56] with " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "120^{\\circ}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "150^{\\circ}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "195^{\\circ}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": " FoV respectively. Table 3 presents the number of image frames in the 360Loc dataset." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22317" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 192, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 192, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 192, + 83 + ], + "type": "text", + "content": "3.2. Ground Truth Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 90, + 287, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 90, + 287, + 198 + ], + "spans": [ + { + "bbox": [ + 46, + 90, + 287, + 198 + ], + "type": "text", + "content": "Besides the graph-based optimization in SLAM, we designed a set of offline optimization strategies to further improve the accuracy of camera pose estimation. After the acquisition of precise dense point cloud reconstructions and poses of " + }, + { + "bbox": [ + 46, + 90, + 287, + 198 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 90, + 287, + 198 + ], + "type": "text", + "content": " cameras, an Iterative Closest Point (ICP) algorithm is applied to align models between reference and the query sequences in the same scene. Moreover, we reconstructed the mesh model of the scenes and generated corresponding depth maps of " + }, + { + "bbox": [ + 46, + 90, + 287, + 198 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 90, + 287, + 198 + ], + "type": "text", + "content": " cameras." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 198, + 287, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 198, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 46, + 198, + 287, + 293 + ], + "type": "text", + "content": "Bundle Adjustment (BA) of lidar mapping. Incremental map construction can suffer from accumulating errors due to environmental degradation. We utilized a BA framework based on feature points extracted from lidar to refine the map and the poses. The optimization process involved minimizing the covariance matrix to constrain the distances between feature points and edge lines or plane features that are mutually matched." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "text", + "content": "First, we utilize an octree data structure to perform adaptive voxelization-based feature extraction. In this method, the point cloud map is segmented into voxels of predetermined size. Each voxel is checked to determine if its points " + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "inline_equation", + "content": "P_{u}^{f}" + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "text", + "content": " lie on a plane or a line, where " + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "inline_equation", + "content": "u \\in \\{1,2,\\dots ,U\\}" + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "text", + "content": ", obtained from the " + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "text", + "content": "-th frame of lidar scans. If not, the voxel is recursively subdivided using an octree structure until each voxel contains points " + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "inline_equation", + "content": "P_{u}^{f}" + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "text", + "content": " belonging to the same feature. Let's assume that the pose of the lidar in each frame is " + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "inline_equation", + "content": "\\pmb{\\eta} = \\{\\pmb{\\eta}_1,\\pmb{\\eta}_2,\\dots ,\\pmb{\\eta}_M\\}" + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "inline_equation", + "content": "\\pmb{\\eta}_{u} = (R_{u},t_{u}|R_{u} \\in SO(3), t_{u} \\in \\mathbb{R}^{3})" + }, + { + "bbox": [ + 47, + 293, + 287, + 437 + ], + "type": "text", + "content": ". In that case, the feature points in the global map can be represented as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 120, + 441, + 287, + 455 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 441, + 287, + 455 + ], + "spans": [ + { + "bbox": [ + 120, + 441, + 287, + 455 + ], + "type": "interline_equation", + "content": "\\boldsymbol {P} _ {u} = \\boldsymbol {R} _ {u} \\times \\boldsymbol {P} _ {u} ^ {f} + \\boldsymbol {t} _ {u}. \\tag {2}", + "image_path": "4d4e5668ad04d3d502f9b5a1b783b2119cc9fdaf53edfc9218fdfe6ad1559af3.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 460, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 460, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 460, + 287, + 555 + ], + "type": "text", + "content": "After simplifying the lidar map to edge or plane features, the process of BA becomes focused on determining the pose " + }, + { + "bbox": [ + 46, + 460, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\pmb{\\eta}" + }, + { + "bbox": [ + 46, + 460, + 287, + 555 + ], + "type": "text", + "content": " and the location of the single feature, which can be represented as " + }, + { + "bbox": [ + 46, + 460, + 287, + 555 + ], + "type": "inline_equation", + "content": "(\\pmb{n}_f,\\pmb{q})" + }, + { + "bbox": [ + 46, + 460, + 287, + 555 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 460, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\pmb{q}" + }, + { + "bbox": [ + 46, + 460, + 287, + 555 + ], + "type": "text", + "content": " represents the location of a specific feature, " + }, + { + "bbox": [ + 46, + 460, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\pmb{n}_f" + }, + { + "bbox": [ + 46, + 460, + 287, + 555 + ], + "type": "text", + "content": " is the direction vector of an edge line or the normal vector of a plane. To minimize the distance between each feature point and the corresponding feature, we can utilize the BA:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 559, + 287, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 559, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 61, + 559, + 287, + 590 + ], + "type": "interline_equation", + "content": "\\left(\\boldsymbol {\\eta} ^ {*}, \\boldsymbol {n} _ {f} ^ {*}, \\boldsymbol {q} ^ {*}\\right) = \\underset {\\boldsymbol {\\eta}, \\boldsymbol {n} _ {f}, \\boldsymbol {q}} {\\arg \\min } \\frac {1}{U} \\sum_ {u = 1} ^ {U} \\left(\\boldsymbol {n} _ {f} ^ {T} \\left(\\boldsymbol {P} _ {\\boldsymbol {u}} - \\boldsymbol {q}\\right)\\right) ^ {2}. \\tag {3}", + "image_path": "75f6001039fa3170f055d821a815cdce9d57b59a173513fa3a748b6fa7bbfd1c.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 594, + 287, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 287, + 691 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 287, + 691 + ], + "type": "text", + "content": "It has been proved that when the plane's normal vector is set to the minimum eigenvector, and " + }, + { + "bbox": [ + 46, + 594, + 287, + 691 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 46, + 594, + 287, + 691 + ], + "type": "text", + "content": " is set to the centroid of the feature, i.e. " + }, + { + "bbox": [ + 46, + 594, + 287, + 691 + ], + "type": "inline_equation", + "content": "\\mathbf{q} = \\hat{\\mathbf{P}} = \\frac{1}{U}\\sum_{u=1}^{U}\\mathbf{P}_{u}" + }, + { + "bbox": [ + 46, + 594, + 287, + 691 + ], + "type": "text", + "content": ", Eq. 3 reaches its minimum value. Additionally, the BA problem in lidar mapping has a closed-form solution that is independent of the features " + }, + { + "bbox": [ + 46, + 594, + 287, + 691 + ], + "type": "inline_equation", + "content": "(\\mathbf{n}_f,\\mathbf{q})" + }, + { + "bbox": [ + 46, + 594, + 287, + 691 + ], + "type": "text", + "content": " [34]. It can be simplified to the following problem:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 696, + 287, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 696, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 115, + 696, + 287, + 715 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\eta} ^ {*} = \\underset {\\boldsymbol {\\eta}} {\\arg \\min } \\lambda_ {\\min } (\\boldsymbol {A}), \\tag {4}", + "image_path": "9668c0f04273eed9d9b89705e808da5db2301c97459d30fbeac2d15d4356fbce.jpg" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 71, + 542, + 203 + ], + "blocks": [ + { + "bbox": [ + 310, + 71, + 542, + 203 + ], + "lines": [ + { + "bbox": [ + 310, + 71, + 542, + 203 + ], + "spans": [ + { + "bbox": [ + 310, + 71, + 542, + 203 + ], + "type": "image", + "image_path": "0c7f60c6616090658bc51805166bafe8f52d16397e1ad2cdb4eef4425ed8a6f7.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 355, + 212, + 494, + 222 + ], + "lines": [ + { + "bbox": [ + 355, + 212, + 494, + 222 + ], + "spans": [ + { + "bbox": [ + 355, + 212, + 494, + 222 + ], + "type": "text", + "content": "Figure 4. Overview of GT generation." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 244, + 487, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 244, + 487, + 256 + ], + "spans": [ + { + "bbox": [ + 306, + 244, + 487, + 256 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 306, + 244, + 487, + 256 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 306, + 244, + 487, + 256 + ], + "type": "text", + "content": " represents the eigenvalue of " + }, + { + "bbox": [ + 306, + 244, + 487, + 256 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 306, + 244, + 487, + 256 + ], + "type": "text", + "content": ", and" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 347, + 266, + 545, + 298 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 266, + 545, + 298 + ], + "spans": [ + { + "bbox": [ + 347, + 266, + 545, + 298 + ], + "type": "interline_equation", + "content": "\\boldsymbol {A} = \\frac {1}{U} \\sum_ {u = 1} ^ {U} \\left(\\boldsymbol {P} _ {u} - \\hat {\\boldsymbol {P}}\\right) \\left(\\boldsymbol {P} _ {u} - \\hat {\\boldsymbol {P}}\\right) ^ {T}. \\tag {5}", + "image_path": "4b86b7fc13aeadf006720a2b8fe9ed6681e04296da7cc8487d7702498abf90e0.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 307, + 545, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 307, + 545, + 366 + ], + "spans": [ + { + "bbox": [ + 304, + 307, + 545, + 366 + ], + "type": "text", + "content": "Now, the BA problem is simplified by adjusting the lidar pose " + }, + { + "bbox": [ + 304, + 307, + 545, + 366 + ], + "type": "inline_equation", + "content": "\\pmb{\\eta}" + }, + { + "bbox": [ + 304, + 307, + 545, + 366 + ], + "type": "text", + "content": " to minimize the smallest eigenvalue " + }, + { + "bbox": [ + 304, + 307, + 545, + 366 + ], + "type": "inline_equation", + "content": "\\lambda_3" + }, + { + "bbox": [ + 304, + 307, + 545, + 366 + ], + "type": "text", + "content": " of the point covariance matrix " + }, + { + "bbox": [ + 304, + 307, + 545, + 366 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 304, + 307, + 545, + 366 + ], + "type": "text", + "content": " defined in Eq. 5. By employing this strategy, we refined the pose " + }, + { + "bbox": [ + 304, + 307, + 545, + 366 + ], + "type": "inline_equation", + "content": "\\pmb{\\eta}" + }, + { + "bbox": [ + 304, + 307, + 545, + 366 + ], + "type": "text", + "content": " of each frame and the edge or plane features in the lidar map." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "type": "text", + "content": "Refined cameras poses. The poses of " + }, + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "type": "text", + "content": " camera obtained from online SLAM are further optimized by the registration with respect to the dense refined point cloud model. Taking the pre-calibrated extrinsic parameters as the initial guess, we used the RANSAC to refine the lidar-camera transformation [29]. This registration process is based on the normalized information distance (NID) [51], which serves as a mutual information-based cross-modal distance metric. Finally, we align the reference models and query models into the same coordinate system to generate the ground truth for the query sequences. Specifically, we utilize the CloudCompare toolbox [19] to manually select feature points across multiple point cloud models as initial values. Then, we employ the ICP algorithm to register the point cloud models together. Afterwards, we employed a practical approach to volumetric surface reconstruction called Truncated Signed Distance Functions (TSDFs) [57] to achieve the reconstruction from point clouds to meshes with an efficient and sparse data structure called Voxel Data Base (VDB) [39]. At this stage, we can utilize the ray-mesh intersection method [15] to cast rays from cameras onto the mesh model. By intersecting the rays with the mesh, we can determine the depths of the corresponding points on the mesh surface. After a series of joint optimizations between multiple modalities, we have generated a set of GT data. Figure 2 shows some instances. This GT data includes reference images " + }, + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{c_0}^r" + }, + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "type": "text", + "content": ", the depth maps " + }, + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "type": "inline_equation", + "content": "D_{c_0}^r" + }, + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "type": "text", + "content": " of the reference images, and the reference maps containing the point cloud models " + }, + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "type": "text", + "content": ", mesh models " + }, + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 367, + 546, + 714 + ], + "type": "text", + "content": ", as well as camera pose odom" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22318" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 251, + 85 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 251, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 251, + 85 + ], + "type": "text", + "content": "etry " + }, + { + "bbox": [ + 47, + 72, + 251, + 85 + ], + "type": "inline_equation", + "content": "\\{\\xi \\}" + }, + { + "bbox": [ + 47, + 72, + 251, + 85 + ], + "type": "text", + "content": " . Figure 4 summarizes the GT generation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 95, + 248, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 95, + 248, + 109 + ], + "spans": [ + { + "bbox": [ + 47, + 95, + 248, + 109 + ], + "type": "text", + "content": "4. Omnidirectional Visual Localization" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "spans": [ + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "text", + "content": "We extend the current feature-matching-based and absolute pose regression pipelines for omnidirectional visual localization. Given a query image " + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "inline_equation", + "content": "\\mathcal{I}^q" + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "text", + "content": " in any camera model, we seek to estimate its poses within the environment modeled by " + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "text", + "content": " images " + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^r" + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "text", + "content": ". To minimize the domain gap between the query image from " + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "inline_equation", + "content": "c_{1}, c_{2}, c_{3}, c_{4}" + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "text", + "content": " and reference images, we explore visual cameras (VC) in two ways: VC1, remapping query images to 360 domain using " + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "inline_equation", + "content": "\\Psi_{c_n}^{-1}" + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "text", + "content": "; VC2, rectifying " + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "text", + "content": " images into queries' domains using " + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "inline_equation", + "content": "\\Psi_{c_n}" + }, + { + "bbox": [ + 46, + 116, + 287, + 224 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 232, + 244, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 232, + 244, + 245 + ], + "spans": [ + { + "bbox": [ + 47, + 232, + 244, + 245 + ], + "type": "text", + "content": "4.1. Feature-matching-based Localization" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 251, + 287, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 287, + 275 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 287, + 275 + ], + "type": "text", + "content": "Most feature-matching-based techniques first perform IR to reduce the search space before estimating the pose." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 291, + 148, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 291, + 148, + 303 + ], + "spans": [ + { + "bbox": [ + 47, + 291, + 148, + 303 + ], + "type": "text", + "content": "4.1.1 Image Retrieval" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": "For method VC1, if query " + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{I}^q" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": " captured from " + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "inline_equation", + "content": "c_{0}" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": ", we retrieve the " + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": " most similar images from " + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^r" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": " by calculating and sorting simi" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "inline_equation", + "content": "_{\\mathrm{cos}}(\\mathcal{F}(\\mathcal{I}^q), \\mathcal{F}(\\mathcal{I}^r))" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{I}^r \\in \\mathbf{I}^r" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{F}(\\cdot)" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": " denotes the function to map each image to the global feature domain. simi" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "inline_equation", + "content": "_{\\mathrm{cos}}(\\cdot)" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": " is cosine similarity for two feature embeddings. If query " + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{I}^q" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": " captured from " + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "inline_equation", + "content": "c_{1}, c_{2}, c_{3}, c_{4}" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": ", we then retrieve top-" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": " reference images based on simi" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "inline_equation", + "content": "_{\\mathrm{cos}}(\\mathcal{F}(\\Psi_{c_n}^{-1}(\\mathcal{I}^q)), \\mathcal{F}(\\mathcal{I}^r)), \\mathcal{I}^r \\in \\mathbf{I}^r" + }, + { + "bbox": [ + 46, + 310, + 287, + 407 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 407, + 287, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 407, + 287, + 454 + ], + "spans": [ + { + "bbox": [ + 46, + 407, + 287, + 454 + ], + "type": "text", + "content": "In method VC2, we expand the global features for each " + }, + { + "bbox": [ + 46, + 407, + 287, + 454 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 407, + 287, + 454 + ], + "type": "text", + "content": " reference image by cameras " + }, + { + "bbox": [ + 46, + 407, + 287, + 454 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 46, + 407, + 287, + 454 + ], + "type": "text", + "content": " including virtual pin-hole cameras forming a cube map and virtual fisheye cameras. We define the similarity score between " + }, + { + "bbox": [ + 46, + 407, + 287, + 454 + ], + "type": "inline_equation", + "content": "\\mathcal{I}^q" + }, + { + "bbox": [ + 46, + 407, + 287, + 454 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 407, + 287, + 454 + ], + "type": "inline_equation", + "content": "\\mathcal{I}^r" + }, + { + "bbox": [ + 46, + 407, + 287, + 454 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 102, + 467, + 287, + 480 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 467, + 287, + 480 + ], + "spans": [ + { + "bbox": [ + 102, + 467, + 287, + 480 + ], + "type": "interline_equation", + "content": "\\max \\left(\\operatorname {s i m i} _ {\\cos} \\left(\\mathcal {F} \\left(\\mathcal {I} ^ {q}\\right), \\mathcal {G} _ {\\mathcal {F}} \\left(\\mathcal {I} ^ {r}\\right)\\right), \\right. \\tag {6}", + "image_path": "723be7bca2b4660c479c82921c80313caf69ca03dc5a635f252c248d2aa1bbe9.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "spans": [ + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "text", + "content": "where global feature group of reference is " + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathcal{F}}(\\mathcal{I}^r) = \\{\\mathcal{F}(\\Psi_c(\\mathcal{I}^r)),\\ldots \\}" + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "text", + "content": ". We use the highest similarity value calculated from " + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "inline_equation", + "content": "\\mathcal{F}(\\mathcal{I}^q)" + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathcal{F}}(\\mathcal{I}^r)" + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "text", + "content": " as the similarity score for each " + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "inline_equation", + "content": "\\mathcal{I}^r" + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "text", + "content": " to ensure retrieve " + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "text", + "content": " most similar " + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "text", + "content": " reference images because some rectified images are from the same " + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "inline_equation", + "content": "\\mathcal{I}^r" + }, + { + "bbox": [ + 46, + 487, + 287, + 570 + ], + "type": "text", + "content": ". Note that we can eliminate the domain gap during the image retrieval step in this way." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 586, + 272, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 586, + 272, + 598 + ], + "spans": [ + { + "bbox": [ + 47, + 586, + 272, + 598 + ], + "type": "text", + "content": "4.1.2 Local Feature Matching and Pose Estimation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": "For each pinhole query frame, we retrieve relevant reference images, match their local features, leverage the depth maps " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "D_{c_0}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " to establish the 2D-3D correspondences, and finally estimate a pose with " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{PnP + RANSAC}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": ". Unlike [11, 61], we directly match query image with retrieved " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " reference images described in Section 4.1.1. For query images from " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "c_{0}, c_{1}, c_{2}, c_{3}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": ", i.e., fisheye and " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " query frames, we utilize the function that calculates pose error in sphere camera model in OpenGV [28] library for " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{PnP + RANSAC}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 72, + 447, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 447, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 447, + 84 + ], + "type": "text", + "content": "4.2. Absolute Pose Regression" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 91, + 545, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 91, + 545, + 114 + ], + "spans": [ + { + "bbox": [ + 305, + 91, + 545, + 114 + ], + "type": "text", + "content": "APRs train deep neural networks to regress the 6DoF camera pose of a query image." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 115, + 545, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 115, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 305, + 115, + 545, + 150 + ], + "type": "text", + "content": "PN. PoseNet (PN) is the first APR model. Since there is no open source code [23, 24], we follow the modification in [8, 36] and use ResNet34 [21] as the backbone network." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "spans": [ + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "text", + "content": "MS-T. MS-Transformer [49] is an APR model incorporating attention and implementing transformers as backbone. We note APR methods using our virtual camera method, VC2, as " + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{APR}^{vc2}" + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "text", + "content": ". The difference between APR and " + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{APR}^{vc2}" + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "text", + "content": " is the training stage. For APR baselines, the training set is " + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^r" + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "text", + "content": ". For " + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{APR}^{vc2}" + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "text", + "content": ", they are trained with " + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "inline_equation", + "content": "360^\\circ" + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "text", + "content": " images, cropped pinhole images, and cropped fisheye images, i.e., " + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^r \\cup \\Psi_c(\\mathbf{I}^r)" + }, + { + "bbox": [ + 305, + 151, + 545, + 247 + ], + "type": "text", + "content": " introduced in Section 4.1.1 and Eq. 1." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 247, + 545, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 247, + 545, + 329 + ], + "spans": [ + { + "bbox": [ + 305, + 247, + 545, + 329 + ], + "type": "text", + "content": "All APR models are implemented in Python using PyTorch [41]. During training, all input images are resized to " + }, + { + "bbox": [ + 305, + 247, + 545, + 329 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 305, + 247, + 545, + 329 + ], + "type": "text", + "content": " and then randomly cropped to " + }, + { + "bbox": [ + 305, + 247, + 545, + 329 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 305, + 247, + 545, + 329 + ], + "type": "text", + "content": ". For both PN and MS-T, we set an initial learning rate of " + }, + { + "bbox": [ + 305, + 247, + 545, + 329 + ], + "type": "inline_equation", + "content": "\\lambda = 10^{-4}" + }, + { + "bbox": [ + 305, + 247, + 545, + 329 + ], + "type": "text", + "content": " and a batch size of 32 for 300 epochs of each scene. Training and evaluation in Section 5 are performed on an NVIDIA GeForce GTX 3090 GPU." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 342, + 376, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 342, + 376, + 354 + ], + "spans": [ + { + "bbox": [ + 306, + 342, + 376, + 354 + ], + "type": "text", + "content": "5. Evaluation" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 363, + 545, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 363, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 305, + 363, + 545, + 387 + ], + "type": "text", + "content": "We provide detailed results for each scene in the dataset and more settings in supplementary material." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 395, + 403, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 395, + 403, + 407 + ], + "spans": [ + { + "bbox": [ + 306, + 395, + 403, + 407 + ], + "type": "text", + "content": "5.1. Image Retrieval" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "spans": [ + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "text", + "content": "We evaluate global descriptors computed by NetVLAD [1], CosPlace [3], OpenIBL [18] and AP-GeM [20]. The query image is deemed correctly localized if at least one of the top " + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "text", + "content": " retrieved database images is within " + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "inline_equation", + "content": "d = 5m" + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "text", + "content": " from the ground truth position of the query for Concourse and " + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "inline_equation", + "content": "d = 10m" + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "text", + "content": " for the other three scenes. The image retrieval results are shown in Table 4. Among all global feature descriptor methods, the " + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "text", + "content": " query exhibits the best precision and recall in most cases, while the pinhole query performs the worst. The remap method (VC1) provides limited improvement for pinhole queries but yields higher improvement for fisheye1, fisheye2, and fisheye3 queries. The reason is that the FoV of pinhole cameras is only " + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "inline_equation", + "content": "85^{\\circ}" + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "text", + "content": ". Consequently, VC1 results in significant black borders when converting to a " + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 414, + 545, + 593 + ], + "type": "text", + "content": " image due to the limited coverage." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": "The rectify method (VC2) significantly improves pin-hole, fisheye1, fisheye2, and fisheye3 queries by eliminating the domain gap in IR. However, the pinhole, fisheye1, and fisheye2 queries' recall and precision are still much lower than those of the " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " query. Only the query from fisheye3 (widest FoV) approaches the performance of " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " query. The domain gap mainly affects the precision and recall of fisheye3. Both remap (VC1) and crop (VC2) significantly improve IR performance for fisheye3. On the other hand, pinhole queries are more prone to being mistaken as error" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22319" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 61, + 70, + 535, + 247 + ], + "blocks": [ + { + "bbox": [ + 61, + 70, + 535, + 247 + ], + "lines": [ + { + "bbox": [ + 61, + 70, + 535, + 247 + ], + "spans": [ + { + "bbox": [ + 61, + 70, + 535, + 247 + ], + "type": "table", + "html": "
QueryNetVLAD [1]Cosplace [3]OpenIBL [18]AP-GeM [20]
R@1R@5P@5R@10P@10R@1R@5P@5R@10P@10R@1R@5P@5R@10P@10R@1R@5P@5R@10P@10
pinhole0.230.450.220.580.220.150.260.150.330.150.180.360.180.480.180.20.370.20.470.2
+VC10.240.450.240.570.230.210.330.210.410.210.210.390.210.50.20.250.420.250.530.24
+VC20.50.670.480.750.470.320.410.320.480.310.510.670.490.750.470.50.680.490.770.47
fisheye10.420.670.410.770.390.280.430.280.520.280.370.580.360.690.340.350.550.340.660.33
+VC10.510.720.490.80.470.360.480.350.560.340.520.70.50.790.480.430.620.420.720.4
+VC20.730.910.630.950.570.630.850.510.920.430.740.910.620.950.540.650.880.570.940.51
fisheye20.450.70.440.80.420.30.460.310.550.310.410.620.40.730.380.380.590.360.680.35
+VC10.540.740.520.830.490.370.490.360.570.350.560.730.540.810.510.460.650.450.740.43
+VC20.740.920.650.950.580.640.870.530.930.450.760.920.650.960.560.670.890.580.940.52
fisheye30.570.790.550.860.520.40.560.40.650.40.530.740.510.830.490.450.660.430.750.41
+VC10.630.810.610.880.580.480.610.480.680.470.670.820.650.880.610.550.730.530.810.51
+VC20.770.930.680.960.610.690.890.580.940.50.790.930.680.960.60.670.90.590.940.54
3600.790.860.770.880.730.920.950.910.960.890.890.940.880.950.830.790.90.770.940.72
", + "image_path": "6349f03ae60a961836f7fcc2bd0d099af2615d353aab2468b696a1d23d6d1929.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 53, + 303, + 541, + 444 + ], + "blocks": [ + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "lines": [ + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "type": "text", + "content": "Table 4. Image retrieval results based on " + }, + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "type": "text", + "content": " reference database average over four scenes, the recall, and precision for the top " + }, + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "type": "text", + "content": " retrieved images, " + }, + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "type": "inline_equation", + "content": "k = 1,5,10" + }, + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "type": "inline_equation", + "content": "\\#" + }, + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "type": "text", + "content": " indicates the highest value of R@k and P@k for each device w and w/o virtual cameras (VC1, VC2). Best results for all devices of R@k and P@k are in bold with " + }, + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "type": "inline_equation", + "content": "\\#" + }, + { + "bbox": [ + 46, + 255, + 547, + 293 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 303, + 541, + 444 + ], + "lines": [ + { + "bbox": [ + 53, + 303, + 541, + 444 + ], + "spans": [ + { + "bbox": [ + 53, + 303, + 541, + 444 + ], + "type": "table", + "html": "
NetVLAD [1]CosPlace [3]
DISK + LGSP + LGSP + SGDISK + LGSP + LGSP + SG
DayNightDayNightDayNightDayNightDayNightDayNight
pinhole6.0/11.3/24.61.7/4.4/10.38.0/14.9/30.92.2/5.5/13.58.4/15.2/30.72.3/5.6/12.34.2/7.8/18.01.6/3.5/8.64.8/10.2/22.11.9/4.7/11.15.4/10.4/21.12.1/4.7/10.4
+VC18.5/14.0/23.52.2/4.1/7.910.4/17.0/27.52.9/5.3/10.110.9/17.8/28.52.8/5.6/9.96.1/10.8/21.11.7/3.6/8.27.5/13.2/22.52.0/4.5/9.67.6/13.5/22.82.1/4.7/9.6
+VC214.2/22.2/35.54.1/7.8/13.619.8 / 29.7/42.96.1/10.4/16.921.6/33.2 / 49.75.9 / 11.0 / 18.48.0/13.1/23.52.5/4.6/9.110.7/16.4/26.63.0/5.7/11.411.6/18.5/30.53.5/6.8/12.8
fisheye11.6/4.4/17.70.5/1.8/7.41.9/5.4/20.10.7/2.3/10.51.6/4.7/18.40.5/1.9/8.20.8/2.5/11.80.4/1.4/5.81.0/3.5/13.00.5/1.4/8.20.9/3.4/12.10.3/1.4/7.0
+VC13.3/9.2/27.60.8/2.7/9.64.1/10.6/32.21.4/4.4/14.93.0/9.5/29.60.9/3.1/11.72.3/5.5/19.40.5/1.6/7.32.1/6.1/19.90.7/2.2/9.01.9/5.5/19.10.5/1.9/7.3
+VC23.9/10.5/33.01.0/4.0/14.64.3/12.4/38.21.9/6.4/21.83.6/11.0/34.51.1/5.3/19.42.5/6.9/25.30.8/2.8/12.22.8/8.2/29.01.3/4.6/18.02.1/7.1/26.71.0/4.0/16.2
fisheye21.6/4.9/20.90.5/2.0/8.71.9/6.7/23.20.8/3.0/11.81.7/5.2/19.50.7/2.5/9.91.3/3.5/14.20.4/1.6/6.91.2/3.8/15.20.5/1.5/9.11.2/3.9/12.90.6/1.6/7.2
+VC14.3 / 10.8/30.90.8/3.0/11.24.7/12.4/34.11.8/5.4/15.84.1 / 10.6/31.51.1/3.6/13.72.5/6.5/20.60.5/1.77.42.5/7.0/22.10.8/2.4/9.42.2/6.8/20.20.5/2.1/8.0
+VC24.3/11.0/34.41.1/4.7/17.35.1/14.0/41.12.0/7.2/24.83.7 / 11.5/36.81.5/5.9/21.22.8/7.3/27.10.8/2.9/13.42.9/8.9/32.01.6/5.3/20.12.5/8.0/27.91.1/4.2/17.7
fisheye33.8/9.5/29.81.0/3.6/13.84.0/10.5/31.61.3/4.6/16.43.4/9.1/28.40.8/3.8/13.82.5/6.3/21.90.6/2.4/10.12.8/7.2/22.30.9/2.9/12.42.0/5.9/20.01.3/4.2/15.0
+VC15.9/14.7 / 39.51.5/5.2/17.76.0 / 16.2/43.52.0/6.8/21.95.8/14.7 / 39.11.8/5.5/18.34.4/10.2 / 30.11.1/3.3/12.84.6/11.6/32.01.4/4.1/14.44.3/10.5 / 29.71.2/3.8/12.3
+VC25.2/13.9 / 41.82.1/6.5/22.55.9 / 16.5/46.32.5/8.6/29.15.4/14.2 / 40.52.1/7.3/25.94.3/9.8 / 34.61.7/5.2/19.54.7/12.6 / 36.82.2/7.1/23.83.8 / 10.5 / 32.51.6/5.1/20.7
36017.1 / 30.8 / 66.18.5 / 20.1 / 47.518.2 / 34.6 / 64.27.0 / 18.7 / 45.315.8 / 31.2 /60.4 / 7.0 / 17.8 / 42.817.6 / 31.8 / 68.18.7 / 22.0 / 56.018.7 / 34.9 / 68.17.3 / 20.0 / 53.416.6 / 32.6 / 65.77.1 / 18.7 / 50.4
", + "image_path": "fd3a704f2bb60f2215640277a6f5b1f8d157efc582757a2802a9ac3a696ee3e0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 451, + 546, + 489 + ], + "lines": [ + { + "bbox": [ + 46, + 451, + 546, + 489 + ], + "spans": [ + { + "bbox": [ + 46, + 451, + 546, + 489 + ], + "type": "text", + "content": "Table 5. Local matching localization results. The average percentage of predictions with high (0.25m, " + }, + { + "bbox": [ + 46, + 451, + 546, + 489 + ], + "type": "inline_equation", + "content": "2^{\\circ}" + }, + { + "bbox": [ + 46, + 451, + 546, + 489 + ], + "type": "text", + "content": "), medium (0.5m, " + }, + { + "bbox": [ + 46, + 451, + 546, + 489 + ], + "type": "inline_equation", + "content": "5^{\\circ}" + }, + { + "bbox": [ + 46, + 451, + 546, + 489 + ], + "type": "text", + "content": "), and low (5m, " + }, + { + "bbox": [ + 46, + 451, + 546, + 489 + ], + "type": "inline_equation", + "content": "10^{\\circ}" + }, + { + "bbox": [ + 46, + 451, + 546, + 489 + ], + "type": "text", + "content": ") accuracy [46] (higher is better) over four scenes. # indicates the highest value for each device w and w/o virtual cameras (VC1, VC2) of each accuracy level. The best results for all devices of each accuracy level are in bold with # ." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 510, + 288, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 288, + 548 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 288, + 548 + ], + "type": "text", + "content": "neous locations with similar structures due to their narrower FoV even there is no cross-device domain gap during IR by applying VC2 (Some figures in supplementary material)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 559, + 160, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 559, + 160, + 572 + ], + "spans": [ + { + "bbox": [ + 47, + 559, + 160, + 572 + ], + "type": "text", + "content": "5.2. Visual Localization" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 579, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 579, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 579, + 287, + 652 + ], + "type": "text", + "content": "We compare our approach with the following baselines in two categories: 1) Local feature matching pipelines tailored from HLoc [42], using different keypoint descriptors (Superpoint (SP) [16] and DISK [55]), and matchers (SuperGlue (SG) [43], follow-up SOTA LightGlue (LG) [31]). 2) The end-to-end APRs: PN [23, 24] and MS-T [49]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": "Local feature matching: During local feature matching, all " + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": " images are cropped to " + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "1228 \\times 614" + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": " because of the tradeoff of time and computation. We report the average results over four scenes in Table 5. The " + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": " query achieves the best performance in three accuracy levels in most cases" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "text", + "content": "across all IR, keypoint descriptors, and matchers settings. It is especially more robust in challenging nighttime conditions. VC1 and VC2 techniques improve the recall and precision of IR, increasing the accuracy of 2D-2D matching for all cameras. In most cases, the performance at the low accuracy level " + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "inline_equation", + "content": "(5m, 10^{\\circ})" + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "text", + "content": " is correlated with the FoV, where a larger FoV results in higher performance. However, the pin-hole query with VC2 during IR performs comparably to the " + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "text", + "content": " queries at the high " + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "inline_equation", + "content": "(0.25m, 2^{\\circ})" + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "text", + "content": " and median " + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "inline_equation", + "content": "(0.5m, 5^{\\circ})" + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "text", + "content": " accuracy levels. In contrast, query frames from " + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "inline_equation", + "content": "c_{1}, c_{2}" + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "inline_equation", + "content": "c_{3}" + }, + { + "bbox": [ + 304, + 510, + 545, + 655 + ], + "type": "text", + "content": " demonstrate relatively lower performance at the high and medium accuracy levels." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": "As observed in Table 4, different IR methods display different performances depending on the type of camera. We thus consider both NetVLAD and CosPlace in visual localization. In most cases, " + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": " query frames achieve higher" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "22320" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 192 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 192 + ], + "type": "text", + "content": "accuracy with CosPlace while pinhole and fisheye query frames have lower accuracy than NetVLAD as shown in Table 5. These results match the precision and recall difference noted in Table 4. We believe that the FoV not only affects the robustness of IR but also has an impact on local 2D-2D matching performance. Pinhole queries suffer from erroneous matches due to interference from symmetrical and repetitive structures, while the larger FoV of fisheye and " + }, + { + "bbox": [ + 46, + 72, + 289, + 192 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 72, + 289, + 192 + ], + "type": "text", + "content": " query frames capture more unique visual features. We provide examples in the supplementary material." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "spans": [ + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": "APR: APRs cannot extrapolate well beyond the training set [40, 47]. cross-device queries further complicate this challenge by introducing an additional dimension of FoV. Due to the high efficiency of " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " mapping, the training set " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^r" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " in 360Loc contains only around one-third of the images compared to datasets [24]. Figure 5 shows that when PN and MS-T are trained solely on " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^r" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " with only " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " images, a smaller domain gap between the query and the " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " image yields a lower error. However, when we introduce images from virtual cameras for data augmentation, " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "\\mathrm{PN}^{vc2}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " and MS-" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "\\mathrm{T}^{vc2}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " exhibit significantly reduced translation and rotation errors across all queries, particularly during daytime. MS-" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "\\mathrm{T}^{vc2}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " reduces translation error by up to " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "79\\%" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " and rotation error by up to " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "72\\%" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " compared to MS-T. " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "\\mathrm{PN}^{vc2}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " displays similar improvement over PN. In most cases, except for " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "\\mathrm{PN}^{vc2}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": ", s rotation error for the " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " queries during daytime, both the " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " and fisheye queries exhibit higher accuracy than the pinhole query on " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "\\mathrm{PN}^{vc2}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " and MS-" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "\\mathrm{T}^{vc2}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": ". This suggests that a larger FoV still helps improve visual localization accuracy in challenging scenes. Another interesting finding is that even though the augmented training set " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^r \\cup \\Psi_c(\\mathbf{I}^r)" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": ", which includes virtual camera images, does not increase the number of " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " images, the error for the " + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 193, + 289, + 517 + ], + "type": "text", + "content": " query still decreases. This reduction is particularly noticeable in the case of translation errors during daytime. The result fully demonstrates the utility of employing virtual cameras for data augmentation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 526, + 111, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 526, + 111, + 539 + ], + "spans": [ + { + "bbox": [ + 47, + 526, + 111, + 539 + ], + "type": "text", + "content": "5.3. Analysis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "content": "Cross-device visual positioning presents significant challenges for IR, local matching, and APRs. Our VC1 and VC2 methods demonstrate practical enhancements in the performance of IR and APR for cross-device scenarios. However, it is essential to note that during the local matching process, the accuracy of matches and the recall and precision of IR for query frames from different cameras may not align perfectly. The chosen IR method and its training noticeably affect accuracy for similar cameras. Fisheye cameras exhibit better performance in IR compared to pinhole cameras. However, pinhole cameras outperform fisheye cameras for high accuracy and median accuracy levels in local matching. This is likely due to existing feature extraction and matching models lacking training data on " + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "content": " and fisheye" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 317, + 69, + 426, + 137 + ], + "blocks": [ + { + "bbox": [ + 317, + 69, + 426, + 137 + ], + "lines": [ + { + "bbox": [ + 317, + 69, + 426, + 137 + ], + "spans": [ + { + "bbox": [ + 317, + 69, + 426, + 137 + ], + "type": "image", + "image_path": "21c1a101e9d74cf3e9bf39c3e37a7244e02cfa1ea47cb78c1884559328819e15.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 426, + 70, + 534, + 137 + ], + "blocks": [ + { + "bbox": [ + 426, + 70, + 534, + 137 + ], + "lines": [ + { + "bbox": [ + 426, + 70, + 534, + 137 + ], + "spans": [ + { + "bbox": [ + 426, + 70, + 534, + 137 + ], + "type": "image", + "image_path": "2365dd97b73b1cf7536dfc86daa8dbe38f6b210cb8b770a05402721a59c5619b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 317, + 148, + 428, + 216 + ], + "blocks": [ + { + "bbox": [ + 347, + 138, + 399, + 148 + ], + "lines": [ + { + "bbox": [ + 347, + 138, + 399, + 148 + ], + "spans": [ + { + "bbox": [ + 347, + 138, + 399, + 148 + ], + "type": "text", + "content": "(a) Trans. (day)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 317, + 148, + 428, + 216 + ], + "lines": [ + { + "bbox": [ + 317, + 148, + 428, + 216 + ], + "spans": [ + { + "bbox": [ + 317, + 148, + 428, + 216 + ], + "type": "image", + "image_path": "2d3e8e2a36dd03710f89023fc7e0eb829fc8eb8577459e039b7c2f10874df89c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 345, + 217, + 401, + 227 + ], + "lines": [ + { + "bbox": [ + 345, + 217, + 401, + 227 + ], + "spans": [ + { + "bbox": [ + 345, + 217, + 401, + 227 + ], + "type": "text", + "content": "(c) Trans. (night)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 236, + 545, + 259 + ], + "lines": [ + { + "bbox": [ + 305, + 236, + 545, + 259 + ], + "spans": [ + { + "bbox": [ + 305, + 236, + 545, + 259 + ], + "type": "text", + "content": "Figure 5. The average of median translation/rotation errors in " + }, + { + "bbox": [ + 305, + 236, + 545, + 259 + ], + "type": "inline_equation", + "content": "(m / ^{\\circ})" + }, + { + "bbox": [ + 305, + 236, + 545, + 259 + ], + "type": "text", + "content": " over 4 scenes." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 428, + 148, + 534, + 216 + ], + "blocks": [ + { + "bbox": [ + 457, + 138, + 502, + 148 + ], + "lines": [ + { + "bbox": [ + 457, + 138, + 502, + 148 + ], + "spans": [ + { + "bbox": [ + 457, + 138, + 502, + 148 + ], + "type": "text", + "content": "(b) Rot. (day)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 428, + 148, + 534, + 216 + ], + "lines": [ + { + "bbox": [ + 428, + 148, + 534, + 216 + ], + "spans": [ + { + "bbox": [ + 428, + 148, + 534, + 216 + ], + "type": "image", + "image_path": "87b3ba1233e73dec1da977ae657f32a3189dbcfdc569c031b4207dde3f22d3d3.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 454, + 217, + 504, + 227 + ], + "lines": [ + { + "bbox": [ + 454, + 217, + 504, + 227 + ], + "spans": [ + { + "bbox": [ + 454, + 217, + 504, + 227 + ], + "type": "text", + "content": "(d) Rot. (night)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 281, + 545, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 281, + 545, + 484 + ], + "spans": [ + { + "bbox": [ + 304, + 281, + 545, + 484 + ], + "type": "text", + "content": "cameras, resulting in less accurate matching. We attribute the inferior performance of pinhole query frames at the low accuracy level to IR's insufficient recall and precision. Additionally, pinhole queries are more susceptible to interference when there are many repetitive and symmetrical features in the scene, even when the retrieved reference image is correct (some example figures in the supplementary material). By utilizing VC2 to augment IR and APR's training data, we eliminate the cross-device domain gap. We demonstrate that panoramic perspective and a larger FoV can significantly improve the performance of IR and APRs and find that query frames from " + }, + { + "bbox": [ + 304, + 281, + 545, + 484 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 281, + 545, + 484 + ], + "type": "text", + "content": " camera and ultra-wide FoV cameras are less prone to being misidentified as erroneous locations with similar structures. This result suggests the promising potential of fisheye and " + }, + { + "bbox": [ + 304, + 281, + 545, + 484 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 281, + 545, + 484 + ], + "type": "text", + "content": " cameras as viable sensors for localization tasks in indoor environments with low GPS accuracy." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 496, + 379, + 508 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 496, + 379, + 508 + ], + "spans": [ + { + "bbox": [ + 306, + 496, + 379, + 508 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "text", + "content": "360Loc is the first dataset and benchmark that explores the challenge of cross-device visual positioning, involving " + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "text", + "content": " reference frames, and query frames from pinhole, ultra-wide FoV fisheye, and " + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "text", + "content": " cameras. We first identified the absence of datasets with ground truth 6DoF poses for " + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "text", + "content": " images, and the limited research on cross-device localization and the robustness of different cameras in ambiguous scenes. To address these limitations, we build a dataset with " + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "text", + "content": " images as reference and query frames from pinhole, ultra-wide FoV fisheye camera and " + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "text", + "content": " cameras via a virtual camera solution. This method enables fair comparisons in cross-device visual localization tasks and helps reduce the domain gap between different cameras. By evaluating feature-matching-based and pose regression-based methods, we demonstrate the effectiveness of our virtual camera approach and the increased robustness of " + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 514, + 545, + 713 + ], + "type": "text", + "content": " cameras in visual localization for challenging and ambiguous scenes." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22321" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 94, + 107, + 106 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 94, + 107, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 94, + 107, + 106 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 114, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 114, + 287, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 114, + 287, + 169 + ], + "spans": [ + { + "bbox": [ + 53, + 114, + 287, + 169 + ], + "type": "text", + "content": "[1] Relja Arandjelovic, Petr Gronat, Akihiko Torii, Tomas Pajdla, and Josef Sivic. Netvlad: Cnn architecture for weakly supervised place recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5297-5307, 2016. 2, 6, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 170, + 287, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 170, + 287, + 203 + ], + "spans": [ + { + "bbox": [ + 53, + 170, + 287, + 203 + ], + "type": "text", + "content": "[2] Iro Armeni, Sasha Sax, Amir R Zamir, and Silvio Savarese. Joint 2d-3d-semantic data for indoor scene understanding. arXiv preprint arXiv:1702.01105, 2017. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 203, + 288, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 203, + 288, + 257 + ], + "spans": [ + { + "bbox": [ + 53, + 203, + 288, + 257 + ], + "type": "text", + "content": "[3] Gabriele Berton, Carlo Masone, and Barbara Caputo. Rethinking visual geo-localization for large-scale applications. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4878-4888, 2022. 2, 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 258, + 288, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 288, + 303 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 288, + 303 + ], + "type": "text", + "content": "[4] Hunter Blanton, Connor Greenwell, Scott Workman, and Nathan Jacobs. Extending absolute pose regression to multiple scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2020. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 303, + 288, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 303, + 288, + 334 + ], + "spans": [ + { + "bbox": [ + 53, + 303, + 288, + 334 + ], + "type": "text", + "content": "[5] Eric Brachmann and Carsten Rother. Learning less is more - 6D camera localization via 3D surface regression. In CVPR, 2018. 1, 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 335, + 288, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 335, + 288, + 368 + ], + "spans": [ + { + "bbox": [ + 53, + 335, + 288, + 368 + ], + "type": "text", + "content": "[6] Eric Brachmann and Carsten Rother. Visual camera relocalization from RGB and RGB-D images using DSAC. TPAMI, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 369, + 288, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 369, + 288, + 413 + ], + "spans": [ + { + "bbox": [ + 53, + 369, + 288, + 413 + ], + "type": "text", + "content": "[7] Eric Brachmann, Alexander Krull, Sebastian Nowozin, Jamie Shotton, Frank Michel, Stefan Gumhold, and Carsten Rother. DSAC-Differentiable RANSAC for camera localization. In CVPR, 2017. 1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 414, + 288, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 414, + 288, + 458 + ], + "spans": [ + { + "bbox": [ + 53, + 414, + 288, + 458 + ], + "type": "text", + "content": "[8] Samarth Brahmbhatt, Jinwei Gu, Kihwan Kim, James Hays, and Jan Kautz. Geometry-aware learning of maps for camera localization. In IEEE conference on computer vision and pattern recognition, 2018. 3, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 459, + 288, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 459, + 288, + 502 + ], + "spans": [ + { + "bbox": [ + 53, + 459, + 288, + 502 + ], + "type": "text", + "content": "[9] Mai Bui, Tolga Birdal, Haowen Deng, Shadi Albarqouni, Leonidas Guibas, Slobodan Ilic, and Nassir Navab. 6d camera relocalization in ambiguous scenes via continuous multi-modal inference. 2020. 3, 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 502, + 288, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 502, + 288, + 546 + ], + "spans": [ + { + "bbox": [ + 48, + 502, + 288, + 546 + ], + "type": "text", + "content": "[10] Nicholas Carlevaris-Bianco, Arash K Ushani, and Ryan M Eustice. University of michigan north campus long-term vision and lidar dataset. The International Journal of Robotics Research, 35(9):1023-1035, 2016. 3, 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 547, + 288, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 547, + 288, + 602 + ], + "spans": [ + { + "bbox": [ + 48, + 547, + 288, + 602 + ], + "type": "text", + "content": "[11] David M Chen, Georges Baatz, Kevin Koser, Sam S Tsai, Ramakrishna Vedantham, Timo Pylvanäinen, Kimmo Roimela, Xin Chen, Jeff Bach, Marc Pollefeys, et al. City-scale landmark identification on mobile devices. In CVPR 2011, pages 737-744. IEEE, 2011. 3, 4, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 602, + 288, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 288, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 288, + 646 + ], + "type": "text", + "content": "[12] Shuai Chen, Zirui Wang, and Victor Prisacariu. Directposenet: absolute pose regression with photometric consistency. In 2021 International Conference on 3D Vision (3DV), pages 1175-1185. IEEE, 2021. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 647, + 288, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 691 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 691 + ], + "type": "text", + "content": "[13] Shuai Chen, Xinghui Li, Zirui Wang, and Victor A Prisacariu. Dfnet: Enhance absolute pose regression with direct feature matching. In ECCV 2022. Tel Aviv, Israel, October 23-27, 2022, Part X. Springer, 2022. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 691, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 714 + ], + "type": "text", + "content": "[14] Santiago Cortés, Arno Solin, Esa Rahtu, and Juho Kannala. Advio: An authentic dataset for visual-inertial odometry. In" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 714 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "Proceedings of the European Conference on Computer Vision (ECCV), pages 419-434, 2018. 3, 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 96, + 454, + 107 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 454, + 107 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 454, + 107 + ], + "type": "text", + "content": "[15] Dawson-Haggerty et al. trimesh. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 108, + 545, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 108, + 545, + 163 + ], + "spans": [ + { + "bbox": [ + 308, + 108, + 545, + 163 + ], + "type": "text", + "content": "[16] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 224-236, 2018. 2, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 164, + 545, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 164, + 545, + 229 + ], + "spans": [ + { + "bbox": [ + 308, + 164, + 545, + 229 + ], + "type": "text", + "content": "[17] Mihai Dusmanu, Ignacio Rocco, Tomas Pajdla, Marc Pollefeys, Josef Sivic, Akihiko Torii, and Torsten Sattler. D2-net: A trainable cnn for joint description and detection of local features. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 8092-8101, 2019. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 230, + 545, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 230, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 308, + 230, + 545, + 275 + ], + "type": "text", + "content": "[18] Yixiao Ge, Haibo Wang, Feng Zhu, Rui Zhao, and Hongsheng Li. Self-supervising fine-grained region similarities for large-scale image localization. In European Conference on Computer Vision, 2020. 2, 6, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 276, + 545, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 276, + 545, + 297 + ], + "spans": [ + { + "bbox": [ + 308, + 276, + 545, + 297 + ], + "type": "text", + "content": "[19] Daniel Girardeau-Montaut. Cloudcompare. France: EDF R&D Telecom ParisTech, 11, 2016. 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 298, + 545, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 298, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 308, + 298, + 545, + 331 + ], + "type": "text", + "content": "[20] A. Gordo, J. Almazan, J. Revaud, and D. Larlus. End-to-end learning of deep visual representations for image retrieval. *IJCV*, 2017. 2, 6, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 333, + 545, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 333, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 308, + 333, + 545, + 376 + ], + "type": "text", + "content": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 377, + 545, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 377, + 545, + 432 + ], + "spans": [ + { + "bbox": [ + 308, + 377, + 545, + 432 + ], + "type": "text", + "content": "[22] Huajian Huang, Yinzhe Xu, Yingshu Chen, and Sai-Kit Yeung. 360vot: A new benchmark dataset for omnidirectional visual object tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20566–20576, 2023. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 434, + 545, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 434, + 545, + 477 + ], + "spans": [ + { + "bbox": [ + 308, + 434, + 545, + 477 + ], + "type": "text", + "content": "[23] Alex Kendall and Roberto Cipolla. Geometric loss functions for camera pose regression with deep learning. In IEEE conference on computer vision and pattern recognition, pages 5974-5983, 2017. 1, 3, 6, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 478, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 478, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 308, + 478, + 545, + 533 + ], + "type": "text", + "content": "[24] Alex Kendall, Matthew Grimes, and Roberto Cipolla. Posenet: A convolutional network for real-time 6-dof camera relocalization. In Proceedings of the IEEE international conference on computer vision, pages 2938-2946, 2015. 1, 3, 4, 6, 7, 8" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 534, + 545, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 534, + 545, + 579 + ], + "spans": [ + { + "bbox": [ + 308, + 534, + 545, + 579 + ], + "type": "text", + "content": "[25] Junho Kim, Changwoon Choi, Hojun Jang, and Young Min Kim. Piccolo: point cloud-centric omnidirectional localization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3313-3323, 2021. 2, 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 580, + 545, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 580, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 308, + 580, + 545, + 624 + ], + "type": "text", + "content": "[26] Junho Kim, Hojun Jang, Changwoon Choi, and Young Min Kim. Cpo: Change robust panorama to point cloud localization. In European Conference on Computer Vision, pages 176-192. Springer, 2022." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 624, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 624, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 308, + 624, + 545, + 669 + ], + "type": "text", + "content": "[27] Junho Kim, Eun Sun Lee, and Young Min Kim. Calibrating panoramic depth estimation for practical localization and mapping. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8830-8840, 2023. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 670, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 714 + ], + "type": "text", + "content": "[28] Laurent Kneip and Paul Furgale. Opengv: A unified and generalized approach to real-time calibrated geometric vision. In 2014 IEEE international conference on robotics and automation (ICRA), pages 1-8. IEEE, 2014. 6" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "22322" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[29] Kenji Koide, Shuji Oishi, Masashi Yokozuka, and Atsuhiko Banno. General, single-shot, target-less, and automatic lidar-camera extrinsic calibration toolbox. arXiv preprint arXiv:2302.05094, 2023. 4, 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 184 + ], + "type": "text", + "content": "[30] Donghwan Lee, Soohyun Ryu, Suyong Yeon, Yonghan Lee, Deokhwa Kim, Cheolho Han, Yohann Cabon, Philippe Weinzaepfel, Nicolas Guérin, Gabriela Csurka, et al. Large-scale localization datasets in crowded indoor spaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3227-3236, 2021. 3, 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 185, + 287, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 185, + 287, + 217 + ], + "spans": [ + { + "bbox": [ + 48, + 185, + 287, + 217 + ], + "type": "text", + "content": "[31] Philipp Lindenberger, Paul-Edouard Sarlin, and Marc Pollefeys. LightGlue: Local Feature Matching at Light Speed. In ICCV, 2023. 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 220, + 287, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 220, + 287, + 284 + ], + "spans": [ + { + "bbox": [ + 48, + 220, + 287, + 284 + ], + "type": "text", + "content": "[32] Haomin Liu, Linsheng Zhao, Zhen Peng, Weijian Xie, Mingxuan Jiang, Hongbin Zha, Hujun Bao, and Guofeng Zhang. A low-cost and scalable framework to build large-scale localization benchmark for augmented reality. IEEE Transactions on Circuits and Systems for Video Technology, 2023. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 286, + 287, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 286, + 287, + 330 + ], + "spans": [ + { + "bbox": [ + 48, + 286, + 287, + 330 + ], + "type": "text", + "content": "[33] Liu Liu, Hongdong Li, and Yuchao Dai. Efficient global 2d-3d matching for camera localization in a large-scale 3d map. In Proceedings of the IEEE International Conference on Computer Vision, pages 2372-2381, 2017. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 332, + 287, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 332, + 287, + 364 + ], + "spans": [ + { + "bbox": [ + 48, + 332, + 287, + 364 + ], + "type": "text", + "content": "[34] Zheng Liu and Fu Zhang. Balm: Bundle adjustment for lidar mapping. IEEE Robotics and Automation Letters, 6(2): 3184-3191, 2021. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 365, + 287, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 287, + 398 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 287, + 398 + ], + "type": "text", + "content": "[35] David G Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60:91-110, 2004. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 399, + 287, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 399, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 48, + 399, + 287, + 443 + ], + "type": "text", + "content": "[36] Iaroslav Melekhov, Juha Ylioinas, Juho Kannala, and Esa Rahtu. Image-based localization using hourglass networks. In IEEE international conference on computer vision workshops, 2017. 3, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 445, + 287, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 445, + 287, + 499 + ], + "spans": [ + { + "bbox": [ + 48, + 445, + 287, + 499 + ], + "type": "text", + "content": "[37] Arthur Moreau, Nathan Piasco, Dzmitry Tsishkou, Bogdan Stanciulescu, and Arnaud de La Fortelle. Coordinet: uncertainty-aware pose regressor for reliable vehicle localization. In IEEE/CVF Winter Conference on Applications of Computer Vision, 2022. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 501, + 287, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 501, + 287, + 566 + ], + "spans": [ + { + "bbox": [ + 48, + 501, + 287, + 566 + ], + "type": "text", + "content": "[38] Jeffri Murragarra-Llerena, Thiago LT Da Silveira, and Claudio R Jung. Pose estimation for two-view panoramas based on keypoint matching: A comparative study and critical analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5202-5211, 2022. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 568, + 287, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 568, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 568, + 287, + 601 + ], + "type": "text", + "content": "[39] Ken Museth. Vdb: High-resolution sparse volumes with dynamic topology. ACM transactions on graphics (TOG), 32 (3):1-22, 2013. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 602, + 287, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 287, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 287, + 645 + ], + "type": "text", + "content": "[40] Tony Ng, Adrian Lopez-Rodriguez, Vassileios Balntas, and Krystian Mikolajczyk. Reassessing the limitations of cnn methods for camera pose regression. arXiv preprint arXiv:2108.07260, 2021. 8" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "text", + "content": "[41] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 6" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "text", + "content": "[42] Paul-Edouard Sarlin, Cesar Cadena, Roland Siegwart, and Marcin Dymczyk. From coarse to fine: Robust hierarchical localization at large scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12716–12725, 2019. 1, 2, 7" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 129, + 547, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 547, + 183 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 547, + 183 + ], + "type": "text", + "content": "[43] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4938–4947, 2020. 2, 4, 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 184, + 545, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 184, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 308, + 184, + 545, + 249 + ], + "type": "text", + "content": "[44] Paul-Edouard Sarlin, Mihai Dusmanu, Johannes L Schonberger, Pablo Speciale, Lukas Gruber, Viktor Larsson, Ondrej Miksik, and Marc Pollefeys. Lamar: Benchmarking localization and mapping for augmented reality. In European Conference on Computer Vision, pages 686-704. Springer, 2022. 3, 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 250, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 250, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 308, + 250, + 545, + 293 + ], + "type": "text", + "content": "[45] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Efficient & effective prioritized matching for large-scale image-based localization. IEEE transactions on pattern analysis and machine intelligence, 39(9):1744-1756, 2016. 1, 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 294, + 545, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 294, + 545, + 360 + ], + "spans": [ + { + "bbox": [ + 308, + 294, + 545, + 360 + ], + "type": "text", + "content": "[46] Torsten Sattler, Will Maddern, Carl Toft, Akihiko Torii, Lars Hammarstrand, Erik Stenborg, Daniel Safari, Masatoshi Okutomi, Marc Pollefeys, Josef Sivic, et al. Benchmarking 6dof outdoor visual localization in changing conditions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8601-8610, 2018. 3, 4, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 361, + 545, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 361, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 308, + 361, + 545, + 403 + ], + "type": "text", + "content": "[47] Torsten Sattler, Qunjie Zhou, Marc Pollefeys, and Laura Leal-Taixe. Understanding the limitations of cnn-based absolute camera pose regression. In IEEE/CVF conference on computer vision and pattern recognition, 2019. 3, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 404, + 545, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 404, + 545, + 469 + ], + "spans": [ + { + "bbox": [ + 308, + 404, + 545, + 469 + ], + "type": "text", + "content": "[48] Thomas Schops, Johannes L Schonberger, Silvano Galliani, Torsten Sattler, Konrad Schindler, Marc Pollefeys, and Andreas Geiger. A multi-view stereo benchmark with high-resolution images and multi-camera videos. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3260-3269, 2017. 3, 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 471, + 545, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 471, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 308, + 471, + 545, + 514 + ], + "type": "text", + "content": "[49] Yoli Shavit, Ron Ferens, and Yoshi Keller. Learning multiscene absolute pose regression with transformers. In IEEE/CVF International Conference on Computer Vision, pages 2733-2742, 2021. 1, 3, 6, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 514, + 545, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 514, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 308, + 514, + 545, + 568 + ], + "type": "text", + "content": "[50] Jamie Shotton, Ben Glocker, Christopher Zach, Shahram Izadi, Antonio Criminisi, and Andrew Fitzgibbon. Scene coordinate regression forests for camera relocalization in rgb-d images. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2930-2937, 2013. 3, 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 570, + 545, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 570, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 308, + 570, + 545, + 592 + ], + "type": "text", + "content": "[51] Alexander D Stewart. Localisation using the appearance of prior structure. PhD thesis, University of Oxford, 2014. 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 593, + 545, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 593, + 545, + 645 + ], + "spans": [ + { + "bbox": [ + 308, + 593, + 545, + 645 + ], + "type": "text", + "content": "[52] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8922-8931, 2021. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 647, + 545, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 545, + 690 + ], + "type": "text", + "content": "[53] Xun Sun, Yuanfan Xie, Pei Luo, and Liang Wang. A dataset for benchmarking image-based localization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7436-7444, 2017. 3, 4" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "text", + "content": "[54] Hajime Taira, Masatoshi Okutomi, Torsten Sattler, Mircea Cimpoi, Marc Pollefeys, Josef Sivic, Tomas Pajdla, and Ak" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "22323" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 497 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "text", + "content": "ihiko Torii. Inloc: Indoor visual localization with dense matching and view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7199-7209, 2018. 1, 3, 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 161 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 161 + ], + "type": "text", + "content": "[55] Michal Tyszkiiewicz, Pascal Fua, and Eduard Trulls. Disk: Learning local features with policy gradient. Advances in Neural Information Processing Systems, 33:14254-14265, 2020. 2, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 163, + 288, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 288, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 288, + 205 + ], + "type": "text", + "content": "[56] Vladyslav Usenko, Nikolaus Demmel, and Daniel Cremers. The double sphere camera model. In 2018 International Conference on 3D Vision (3DV), pages 552-560. IEEE, 2018. 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 208, + 288, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 208, + 288, + 240 + ], + "spans": [ + { + "bbox": [ + 48, + 208, + 288, + 240 + ], + "type": "text", + "content": "[57] Ignacio Vizzo, Tiziano Guadagnino, Jens Behley, and Cyril Stachniss. Vdbfusion: Flexible and efficient tsdf integration of range sensor data. Sensors, 22(3):1296, 2022. 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 241, + 288, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 288, + 307 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 288, + 307 + ], + "type": "text", + "content": "[58] Johanna Wald, Torsten Sattler, Stuart Golodetz, Tommaso Cavallari, and Federico Tombari. Beyond controlled environments: 3d camera re-localization in changing indoor scenes. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VII 16, pages 467-487. Springer, 2020. 3, 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 308, + 288, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 308, + 288, + 352 + ], + "spans": [ + { + "bbox": [ + 48, + 308, + 288, + 352 + ], + "type": "text", + "content": "[59] Jian Wu, Liwei Ma, and Xiaolin Hu. Delving deeper into convolutional neural networks for camera relocalization. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 5644-5651. IEEE, 2017. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 353, + 287, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 353, + 287, + 396 + ], + "spans": [ + { + "bbox": [ + 48, + 353, + 287, + 396 + ], + "type": "text", + "content": "[60] Hang Xu, Qiang Zhao, Yike Ma, Xiaodong Li, Peng Yuan, Bailan Feng, Chenggang Yan, and Feng Dai. Pandora: A panoramic detection dataset for object with orientation. In ECCV, 2022. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 398, + 288, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 398, + 288, + 463 + ], + "spans": [ + { + "bbox": [ + 48, + 398, + 288, + 463 + ], + "type": "text", + "content": "[61] Shen Yan, Yu Liu, Long Wang, Zehong Shen, Zhen Peng, Haomin Liu, Maojun Zhang, Guofeng Zhang, and Xiaowei Zhou. Long-term visual localization with mobile sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17245-17255, 2023. 1, 3, 4, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 464, + 288, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 464, + 288, + 497 + ], + "spans": [ + { + "bbox": [ + 48, + 464, + 288, + 497 + ], + "type": "text", + "content": "[62] Dawen Yu and Shunping Ji. Grid based spherical cnn for object detection from panoramic images. Sensors, 19(11): 2622, 2019. 1" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "22324" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_content_list.json b/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..15191483461dadcadefec165e8987be70e8a2c8e --- /dev/null +++ b/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_content_list.json @@ -0,0 +1,1608 @@ +[ + { + "type": "text", + "text": "3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions", + "text_level": 1, + "bbox": [ + 135, + 128, + 833, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Weijia Li $^{1*}$ , Haote Yang $^{2*}$ , Zhenghao Hu $^{1}$ , Juepeng Zheng $^{1}$ , Gui-Song Xia $^{3}$ , Conghui He $^{2,4\\dagger}$ , Sun Yat-Sen University, Shanghai AI Laboratory, Wuhan University, SenseTime Research", + "bbox": [ + 114, + 202, + 854, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{liweij29, zhengjp8}@mail.sysu.edu.cn, {yanghaote, heconghui}@pjlab.org.cn, huzhh9@mail2.sysu.edu.cn, guisong.xia@whu.edu.cn", + "bbox": [ + 148, + 241, + 808, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 308, + 313, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D building reconstruction from monocular remote sensing images is an important and challenging research problem that has received increasing attention in recent years, owing to its low cost of data acquisition and availability for large-scale applications. However, existing methods rely on expensive 3D-annotated samples for fully-supervised training, restricting their application to large-scale cross-city scenarios. In this work, we propose MLS-BRN, a multi-level supervised building reconstruction network that can flexibly utilize training samples with different annotation levels to achieve better reconstruction results in an end-to-end manner. To alleviate the demand on full 3D supervision, we design two new modules, Pseudo Building Bbox Calculator and Roof-Offset guided Footprint Extractor, as well as new tasks and training strategies for different types of samples. Experimental results on several public and new datasets demonstrate that our proposed MLS-BRN achieves competitive performance using much fewer 3D-annotated samples, and significantly improves the footprint extraction and 3D reconstruction performance compared with current state-of-the-art. The code and datasets of this work will be released at https://github.com/opendatalabMLS-BRN.git.", + "bbox": [ + 76, + 340, + 473, + 674 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 703, + 209, + 718 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D building reconstruction is a fundamental task for large-scale city modeling and has received increasing attention in recent studies. Among these studies, monocular 3D building reconstruction has become a promising and economic solution for large-scale real-world applications, owing to its lower data acquisition cost and larger data coverage compared to multi-view stereo imagery and LiDAR data [6, 31]. Meanwhile, the limited information of monocular images as well as the diversity of building structures also result in", + "bbox": [ + 75, + 728, + 468, + 864 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9f5db6e39225d5b826e5067100e20cdd9d49b0d7ed32878666e38393ca52c82b.jpg", + "image_caption": [ + "Training samples of different annotation levels" + ], + "image_footnote": [], + "bbox": [ + 506, + 327, + 658, + 428 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0260dd48f10a2defdbcaac4633276157e283388fa4c2ee3ee3a66550bd18a6b9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 329, + 767, + 428 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/07979d40fbfcae765e8fd4a67ecae21039709debc7748947828dab30f41bd727.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 777, + 329, + 879, + 428 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/aace82f59c79bbf32be3bc4991239091e7407c18a3b94d51158ee9397f190c3a.jpg", + "image_caption": [ + "Monocular 3D building reconstruction", + "Figure 1. Our proposed method achieves 3D building reconstruction by training samples of different annotation levels. Large quantity of samples only include building footprint annotations, whereas a small quantity of samples contain extra roof-to-footprint offset and building height annotations." + ], + "image_footnote": [], + "bbox": [ + 506, + 452, + 888, + 520 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "great challenges for large-scale 3D building reconstruction.", + "bbox": [ + 500, + 638, + 890, + 655 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Inspired by the progress of supervised monocular depth estimation methods, deep neural networks have been broadly applied to monocular 3D building reconstruction studies. Most studies utilize building footprints or other types of semantic labels as prior information to facilitate building height estimation from near-nadir images [15, 24, 25, 29, 37]. Off-nadir images, by contrast, constitute a larger proportion of the remote sensing images and provide additional useful information for building height estimation, which have demonstrated significant potential in several recent studies [4, 5, 19, 32, 33]. Some studies designed geocentric pose estimation task considering the parallax effect of building roof and footprint [4, 5], aiming at estimating the height values instead of reconstruct a 3D model. Other studies leveraged the relation between different components of a building instance (e.g. roof, footprint,", + "bbox": [ + 496, + 657, + 892, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*These authors contributed equally to this work.", + "bbox": [ + 94, + 875, + 351, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding author.", + "bbox": [ + 96, + 887, + 222, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "27728", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "and facade) as well as the offset between roof and footprint, which has proven to be an effective solution for 3D building reconstruction and accurate extraction of building footprints [19, 32].", + "bbox": [ + 76, + 90, + 468, + 151 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In general, existing monocular building reconstruction methods are designed for fully-supervised learning, requiring a large number of fully-annotated 3D labels for network training. However, due to the expensive annotation cost, the available datasets for 3D building reconstruction are still very insufficient, restricting existing 3D reconstruction methods to single city or single dataset scenarios. By contrast, owing to the low annotation cost and the increase of open map data, public building footprints have an extremely large coverage and quantity. Additionally, existing building datasets provide different levels of annotations, such as footprint only, footprint and pixel-wise height [4], footprint and offset vector [19, 32], etc. The large-scale 2D footprints and different levels of annotated datasets can provide new opportunities for enlarging 3D building reconstruction application scenarios and reducing the annotation cost if they are effectively utilized.", + "bbox": [ + 75, + 164, + 470, + 421 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we propose MLS-BRN, a Multi-Level Supervised Building Reconstruction Network based on monocular remote sensing images, which is a unified and flexible framework that is capable of utilizing the training samples with different annotation levels. To alleviate the demand on 3D annotations and enhance the building reconstruction performance, we design new tasks regarding the meta information of off-nadir images and two new modules, i.e., Pseudo Building Bbox Calculator and Roof-Offset guided Footprint Extractor, as well as a new training strategy based on different types of samples. Experimental results on several public and new datasets demonstrate that our method achieves competitive performance when only using a small proportion of 3D-annotated samples, and significantly improves the building segmentation and height estimation performance compared with current state-of-the-art. Our main contributions are summarized as follows:", + "bbox": [ + 75, + 434, + 468, + 690 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We design MLS-BRN, a multi-level supervised building reconstruction network, which consists of new tasks and modules to enhance the relation between different components of a building instance and alleviate the demand on 3D annotations.", + "- We propose a multi-level training strategy that enables the training of MLS-BRN with different supervision levels to further improve the 3D reconstruction performance.", + "- We extend the monocular building reconstruction datasets to more cities. Comprehensive experiments under different settings demonstrate the potential of MLS-BRN in large-scale cross-city scenarios." + ], + "bbox": [ + 76, + 719, + 467, + 898 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related work", + "text_level": 1, + "bbox": [ + 500, + 90, + 635, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Building footprint extraction", + "text_level": 1, + "bbox": [ + 500, + 114, + 756, + 131 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Building footprint extraction is an important prerequisite for monocular 3D building reconstruction. Various instance and semantic segmentation networks have been broadly applied to building extraction tasks. Many studies utilize multi-task segmentation network to improve the building segmentation performance. For instance, Yuan [35] proposed the signed distance representation for building footprint extraction, achieving better performance compared with the single-task fully-connected network. Similarly, in [24], a modified signed distance function was introduced and jointly learned with other tasks for predicting building footprint outlines and heights. To improve the geometry shapes of building extraction results, several methods directly predicted the vertices of a building polygon based on Recurrent Neural Network or Graph Neural Network [22, 36, 39], or combined the pixel-based multi-task segmentation network with a graph-based polygon refinement network using a rule-based module [20]. In addition, some recent studies converted building footprint extraction into roof segmentation and roof-to-footprint offset estimation tasks, which achieved promising performance for building footprint extraction, especially for high-rise buildings in off-nadir images [19, 32].", + "bbox": [ + 496, + 138, + 890, + 484 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, most existing methods directly extract the building footprints and perform worse for high-rise buildings in off-nadir images. Offset-based methods can effectively alleviate this problem, but the expensive offset annotation efforts and the post-processing process are still inevitable. On the contrary, our work proposes a multi-level supervised solution that is capable of leveraging different types of samples to reduce the demand for offset annotation, achieving promising footprint extraction results in an end-to-end manner.", + "bbox": [ + 496, + 487, + 890, + 636 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Monocular 3D building reconstruction", + "text_level": 1, + "bbox": [ + 500, + 650, + 830, + 666 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Inspired by the progress of monocular depth estimation, deep neural networks have been widely used for monocular building height estimation in recent studies [8, 18, 33]. Most of these studies are designed for height estimation from near-nadir images, in which the building roof and footprint are almost overlapped. Some methods used an encoder-decoder network to regress the height values [25], or used a generative adversarial network to simulate a height map [9]. Moreover, the semantic labels have been utilized as effective priors in many existing methods considering the limited information provided from the near-nadir images for height estimation. Some studies designed a multitask network for joint footprint extraction and height estimation [8, 29, 37], while others exploit the semantic labels as prior information for height estimation [15]. In actual", + "bbox": [ + 496, + 674, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "27729", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d3ca67c098577710a9ae026ded680c76d81571b9dc0d7e6ad63a70eeb02f4c79.jpg", + "image_caption": [ + "Figure 2. An overview of our proposed method. Taking a monocular remote sensing image as input, our MLS-BRN generates a set of building bboxes, roof-to-footprint offsets, building heights, and pixel-wise roof masks. The predicted roof masks and their corresponding offsets are further integrated to predict pixel-wise footprint masks. The predicted footprint mask and building height are used to produce the final vectorized 3D model. Two novel modules are introduced: (1) the ROFE predicts footprint masks guided by the predicted roof masks and offsets; (2) the PBC predicts off-nadir and offset angles to calculate pseudo building bboxes for buildingbbox-unknown samples." + ], + "image_footnote": [], + "bbox": [ + 102, + 89, + 867, + 303 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "scenarios, off-nadir images constitute a large proportion of the remote sensing images, in which the parallax effect of roof and footprint results in more challenges for extracting footprints but provides additional information for height estimation as well. Some recent studies [4, 5] design methods to learn the geocentric pose of buildings in off-nadir images for monocular height estimation [28], while others leverage the offset between building roof and footprint and the relation between different components to reconstruct a 3D building model [19, 32].", + "bbox": [ + 75, + 409, + 470, + 559 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In summary, the monocular building reconstruction methods in existing studies require expensive and fully-annotated 3D labels for supervised learning. Our proposed method, by contrast, is a unified and flexible framework for 3D building reconstruction with different supervision levels, which effectively reduces the demand for the large-scale 3D annotations.", + "bbox": [ + 75, + 561, + 468, + 667 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Monocular 3D reconstruction with fewer labels", + "text_level": 1, + "bbox": [ + 76, + 679, + 468, + 695 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In monocular 3D reconstruction in the general computer vision domain, several methods have been proposed for reducing the 3D annotation demand via weakly-supervised or semi-supervised learning [3, 11, 14, 16, 26]. In Yang et al. [34], a unified framework combining two types of supervisions was proposed, i.e., a small number of camera pose annotations and a large number of unlabeled images. In Neverova et al. [27], an intermediate representation containing important topological and structural information of hand was introduced to enable the weakly-supervised training for hand pose estimation. Concurrently, Gwak et al. [10] effectually leveraged a weak supervision type, i.e., foreground mask, as a substitute for costly 3D CAD annota", + "bbox": [ + 75, + 704, + 470, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tions, which incorporates a raytrace pooling layer to enable perspective projection and backpropagation.", + "bbox": [ + 498, + 409, + 890, + 440 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In contrast to the aforementioned studies, our proposed method leverages prior knowledge about the 3D structure of a building instance and the monocular remote sensing image, including the relation between roof, footprint, height, offset angle, and off-nadir angle, enabling multi-level supervised 3D reconstruction with fewer annotation efforts.", + "bbox": [ + 496, + 441, + 892, + 531 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methods", + "text_level": 1, + "bbox": [ + 500, + 547, + 598, + 561 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Problem statement", + "text_level": 1, + "bbox": [ + 500, + 573, + 681, + 587 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given an off-nadir remote sensing image $I$ that includes buildings $B = \\{b_{1}, b_{2}, \\ldots, b_{N}\\}$ , the objective of monocular 3D building reconstruction is to identify all the footprints $F = \\{f_{1}, f_{2}, \\ldots, f_{N}\\}$ and roofs $R = \\{r_{1}, r_{2}, \\ldots, r_{N}\\}$ corresponding to $B$ . The difficulty is that the footprints of buildings may be partially visible from an off-nadir viewing angle. Thus, previous studies, including [19] and [32], typically solve this issue by training a deep neural network with samples annotated with both $F$ and roof-to-footprint offsets $\\vec{V} = \\{v_{1}, v_{2}, \\ldots, v_{N}\\}$ .", + "bbox": [ + 496, + 595, + 890, + 747 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "However, the cost of annotating remote sensing images is still high, particularly for offset annotations. Therefore, we suggest addressing this issue by training a deep model that effectively uses samples containing both $F$ and $\\vec{V}$ annotations, alongside samples only annotated with $F$ .", + "bbox": [ + 496, + 750, + 890, + 824 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To facilitate training with offset-unknown samples, two tasks are included; one for predicting the off-nadir angle $\\theta_{I}$ and the other for the offset angle $\\varphi_{I}$ . Additionally, an instance-wise footprint segmentation task is included to predict the footprint conditioned on the predicted roof and off", + "bbox": [ + 496, + 825, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "27730", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "set. Finally, a task for predicting real-world height is introduced to enhance the comprehension of the correlation between footprint and roof placement. In summary, four additional tasks are added to the original three tasks in LOFT-FOA [32]: (1) off-nadir angle prediction task; (2) offset angle prediction task; (3) footprint segmentation task; (4) real-world height prediction task.", + "bbox": [ + 75, + 90, + 472, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Network structure", + "text_level": 1, + "bbox": [ + 76, + 207, + 256, + 222 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fig. 2 illustrates the proposed architecture of our MLS-BRN. To facilitate multi-level supervised learning, two novel modules are introduced, namely the Pseudo Building Bbox Calculator (PBC) and the Roof-Offset guided Footprint Extractor (ROFE). The PBC module provides pseudo building boxes to determine the positivity/negativity of the region proposals from the RPN module when offset-unknown (i.e. building bbox-unknown) samples are processed in the MLS-BRN. The ROFE module has two significant functions. Firstly, it provides a more straightforward method to supervise the building footprint segmentation task. Secondly, it offers an indirect method of supervising offset prediction and roof segmentation for offset-unknown samples as they pass through the MLS-BRN. Additionally, a building height prediction task has been included in order to predict the real-world building height.", + "bbox": [ + 75, + 231, + 472, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 Pseudo Building Bbox Calculator (PBC)", + "text_level": 1, + "bbox": [ + 76, + 494, + 411, + 508 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Samples without the ground truth for building bounding box $b$ -bbox $_{gt}$ cannot be utilized by previous models, like LOFT-FOA [32]. To address this issue, we propose a module that predicts pseudo building bounding boxes to substitute $b$ -bbox $_{gt}$ . For a provided off-nadir remote sensing image $I$ and one building $b$ contained by $I$ , we can describe the connection between the image-wise off-nadir angle $\\theta_{I}$ , the offset angle $\\varphi_{I}$ , the factor for scaling real-world height to pixel scale $s_{I}$ , and the building's height $h_{b}$ and offset $\\vec{v}_{b}$ using the following equation:", + "bbox": [ + 75, + 518, + 468, + 670 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\vec {v} _ {b} = | | \\vec {v} _ {b} | | _ {2} \\times \\vec {e} \\\\ = \\left\\| \\vec {v} _ {b} \\right\\| _ {2} \\times \\left[ e _ {x}, e _ {y} \\right] \\tag {1} \\\\ = h _ {b} \\times s _ {I} \\times \\tan \\theta_ {I} \\times [ \\cos \\varphi_ {I}, \\sin \\varphi_ {I} ] \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 681, + 468, + 736 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $||\\vec{v}_b||_2$ is the $L2$ norm of the offset, $\\vec{e}$ is the unit normal vector of $\\vec{v}_b$ . The PBC module uses an off-nadir angle head to predict an image-wise off-nadir angle $\\theta_{pred}$ and an offset angle head to predict an image-wise offset angle $\\varphi_{pred}$ . Then, following Eq. (1), they are combined with the instance-wise building height ground truth $h_{gt}$ , and scale factor $s_{gt}$ to compute the pseudo offset $\\vec{v}_{pred}$ . Finally, $f_{gt}$ is translated to get the pseudo building bbox $b$ -bbox $_{pred}$ guided by $\\vec{v}_{pred}$ . $b$ -bbox $_{pred}$ will play the role of $b$ -bbox $_{gt}$ during the training of the building bbox-unknown samples.", + "bbox": [ + 75, + 750, + 472, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "From the perspective of weak supervision, the PBC module extracts the image-wise angle information, i.e. the offset angle and the off-nadir angle, and uses it to supervise the instance-wise task. Note that for building height-unknown samples, the pseudo bounding boxes are calculated by directly enlarge the footprint boxes.", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 Roof-Offset guided Footprint Extractor (ROFE)", + "text_level": 1, + "bbox": [ + 498, + 200, + 888, + 215 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Previous works calculate the footprint mask in the inference stage by translating the inferred roof guided by the inferred offset. The ROFE module, however, predicts the footprint mask directly. It trains a convolutional network to learn the translation process, using the inferred roof mask and offset as inputs. For offset-aware (i.e. roof-aware) samples, this end-to-end training process adds more supervision on the offset head and the roof head. And for offset-unknown samples, which cannot contribute to the training of the offset head and the roof head due to lack of ground truth, ROFE provides an indirect way to supervise these two heads.", + "bbox": [ + 496, + 224, + 890, + 391 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Network training", + "text_level": 1, + "bbox": [ + 498, + 400, + 671, + 416 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we first introduce the loss functions in our MLS-BRN. Then we introduce our three levels of training samples graded by their level of supervision and their training strategies. The total hybrid loss is presented at the end of this section.", + "bbox": [ + 496, + 424, + 890, + 498 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3.1 Loss definition", + "text_level": 1, + "bbox": [ + 498, + 518, + 656, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The LOFT-FOA [32] is trained by minimising Eq. (2), where $\\mathcal{L}_{rp}$ , $\\mathcal{L}_{rc}$ , $\\mathcal{L}_{mh}$ are the same as those in Mask R-CNN [13], i.e., the losses for the RPN, R-CNN, and mask head, respectively; $\\mathcal{L}_o$ is the loss for the offset head, which is a standard smooth L1 Loss.", + "bbox": [ + 496, + 542, + 890, + 617 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {L F} = \\mathcal {L} _ {r p} + \\beta_ {1} \\mathcal {L} _ {r c} + \\beta_ {2} \\mathcal {L} _ {m h} + \\beta_ {3} \\mathcal {L} _ {o} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 562, + 630, + 890, + 647 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The MLS-BRN model keeps the four losses the same as LOFT-FOA [32] and introduces new losses to train the newly added modules. The footprint mask loss of the ROFE module is the same as $\\mathcal{L}_{mh}$ , which is a standard cross entropy loss (Eq. (3)).", + "bbox": [ + 496, + 659, + 890, + 734 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {f} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\sum_ {c = 1} ^ {C} y _ {i, c} \\times \\log (p \\left(y _ {i, c}\\right)) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 571, + 744, + 890, + 787 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The loss of the offset angle head of the PBC module is calculated according to Eq. (4), in which $\\mathcal{L}_{\\text{ova}}$ denotes the offset angle loss; $\\vec{v}_{pred}$ denotes the predicted unit normal vector of the offset.", + "bbox": [ + 496, + 797, + 890, + 857 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {o v a} = \\mathcal {L} _ {a n g} + \\lambda_ {1} \\mathcal {L} _ {r e g} \\tag {4} \\\\ = | | \\vec {v} _ {p r e d} - \\vec {v} _ {g t} | | _ {1} + \\lambda_ {1} | | | | \\vec {v} _ {p r e d} | | _ {2} - 1 | | _ {1} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 539, + 867, + 890, + 904 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "27731", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The nadir angle head of the PBC module is trained following Eq. (5), where $\\mathcal{L}_{ona}$ is the off-nadir angle loss; $\\theta_{pred}$ is the predicted tangent of the off-nadir angle.", + "bbox": [ + 76, + 90, + 468, + 137 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {o n a}} = \\left\\| \\tan \\theta_ {\\text {p r e d}} - \\tan \\theta_ {g t} \\right\\| _ {1} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 165, + 150, + 468, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The height head loss of our MLS-BRN is calculated by Eq. (6), in which $\\mathcal{L}_h$ denotes the height loss; $h_{pred}$ denotes the predicted building height.", + "bbox": [ + 76, + 180, + 468, + 226 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {h} = \\left| \\left| h _ {p r e d} - h _ {g t} \\right| \\right| _ {1} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 239, + 468, + 257 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.2 Multi-level training strategy", + "text_level": 1, + "bbox": [ + 76, + 277, + 326, + 292 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In our proposed unified framework, all the training samples can be graded into three levels according to their level of supervision (Fig. 1):", + "bbox": [ + 76, + 303, + 468, + 348 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Level 1 samples: samples with only instance-wise footprint annotation, which are denoted by $\\mathcal{X}^N = \\{x_1^N, x_2^N, \\dots, x_{n_3}^N\\}$ . $N$ means no additional supervision.", + "- Level 2 samples: samples with instance-wise footprint and building height annotation, which are denoted by $\\mathcal{X}^H = \\{x_1^H,x_2^H,\\dots,x_{n_2}^H\\}$ .", + "- Level 3 samples: samples with instance-wise footprint, offset, and building height annotation, which are denoted by $\\mathcal{X}^{OH} = \\{x_1^{OH}, x_2^{OH}, \\dots, x_{n_1}^{OH}\\}$ ." + ], + "bbox": [ + 76, + 349, + 467, + 484 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Different levels of samples are supervised by different training strategies. As defined in Eq. (7), the loss function for $\\mathcal{X}^N$ is only based on $\\mathcal{L}_f$ .", + "bbox": [ + 76, + 486, + 468, + 531 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathcal {X} ^ {N}} = \\mathcal {L} _ {f} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 232, + 545, + 468, + 561 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The loss function for $\\mathcal{X}^H$ is defined in Eq. (8). In $\\mathcal{L}_{\\mathcal{X}^H}$ , the $\\mathcal{L}_{rp}$ is activated since the PBC module can predict a high-quality pseudo building bbox, which is good enough to supervise the RPN module.", + "bbox": [ + 76, + 575, + 468, + 635 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathcal {X} ^ {H}} = \\mathcal {L} _ {\\mathcal {X} ^ {N}} + \\alpha_ {1} \\mathcal {L} _ {r p} + \\alpha_ {2} \\mathcal {L} _ {h} \\tag {8} \\\\ = \\mathcal {L} _ {f} + \\alpha_ {1} \\mathcal {L} _ {r p} + \\alpha_ {2} \\mathcal {L} _ {h} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 165, + 648, + 467, + 684 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The loss function for $\\mathcal{X}^{OH}$ is defined in Eq. (9). Compared with the original $\\mathcal{L}_{LF}$ , $\\mathcal{L}_{\\mathcal{X}^{OH}}$ adds four more losses: $\\mathcal{L}_f$ , $\\mathcal{L}_h$ , $\\mathcal{L}_{ona}$ , $\\mathcal{L}_{ova}$ . The $\\mathcal{L}_{ona}$ and $\\mathcal{L}_{ova}$ are used for training the two angle heads of the PBC module.", + "bbox": [ + 76, + 699, + 468, + 758 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\chi^ {O H}} = \\mathcal {L} _ {\\chi^ {H}} + \\alpha_ {3} \\mathcal {L} _ {r c} + \\alpha_ {4} \\mathcal {L} _ {m h} \\\\ + \\alpha_ {5} \\mathcal {L} _ {o} + \\alpha_ {6} \\mathcal {L} _ {o n a} + \\alpha_ {7} \\mathcal {L} _ {o v a} \\tag {9} \\\\ = \\mathcal {L} _ {L F} + \\mathcal {L} _ {f} + \\alpha_ {2} \\mathcal {L} _ {h} + \\alpha_ {6} \\mathcal {L} _ {o n a} + \\alpha_ {7} \\mathcal {L} _ {o v a} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 773, + 467, + 827 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The final hybrid loss is defined as the total loss of the three levels of training samples according to Eq. (10).", + "bbox": [ + 76, + 840, + 468, + 871 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {\\mathcal {X} ^ {N}} + \\mathcal {L} _ {\\mathcal {X} ^ {H}} + \\mathcal {L} _ {\\mathcal {X} ^ {O H}} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 885, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Implementation details", + "text_level": 1, + "bbox": [ + 500, + 90, + 712, + 106 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As mentioned in Fig. 2, we use ResNet-50 [12] with FPN [23] pre-trained on the ImageNet as the backbone. All the models are trained with a batch size of 4 using NVIDIA 3090 GPUs. To align with LOFT-FOA [32], we train 24 epochs for all the models, with the learning rate starting from 0.01 and decaying by a factor of 0.1 at the $16^{th}$ and $22^{nd}$ epochs. The SGD algorithm with a weight decay of 0.0001 and a momentum of 0.9 is used for all experiments. LOFT-FOA [32] is used as the basic architecture of the MLS-BRN model, and all the hyperparameters that occur in both LOFT-FOA [32] and MLS-BRN are the same, except for the learning rate mentioned above. All models are built in PyTorch.", + "bbox": [ + 496, + 113, + 890, + 308 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Eq. (4), we set $\\lambda_{1} = 0.1$ to balance the two loss items. In Eq. (8), we set $\\alpha_{1} = 1$ to keep the loss weight of ROFE the same as the roof mask head, and set $\\alpha_{2} = 32$ since the absolute building height loss value is relatively small. In Eq. (9), we set $\\alpha_{3} = \\alpha_{4} = 1, \\alpha_{5} = 16$ to keep them the same as LOFT-FOA [32], and set $\\alpha_{6} = 1, \\alpha_{7} = 8$ to balance the effects of the magnitude of these two losses.", + "bbox": [ + 496, + 310, + 890, + 415 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 430, + 630, + 446 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Datasets", + "text_level": 1, + "bbox": [ + 500, + 454, + 601, + 468 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In our experiments, we employ multi-supervised datasets for training our methods: (1) BONAI [32] provides building footprint segmentation, offset, and height annotations, which contains 3,000 and 300 images for train-val and test respectively; (2) OmniCity-view3 [21] originally provides satellite images with annotations for footprint segmentation and building height. We add additional offset annotations for 17,092 and 4,929 images from train-val and test sets respectively; (3) Additionally, we release a new dataset named HK, which includes 500 and 119 satellite images specifically captured from Hong Kong for train-val and test sets, along with annotations for footprint segmentation, offset and height.", + "bbox": [ + 496, + 478, + 890, + 672 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As detailed in Sec. 3, all our training samples are graded into three levels: samples from $\\mathcal{X}^N$ , $\\mathcal{X}^H$ , and $\\mathcal{X}^{OH}$ . To create different levels of training samples, we extract samples from the datasets mentioned above, reorganizing their annotations as necessary. We randomly choose $30\\%$ of the samples from the BONAI dataset [32] as a smaller $\\mathcal{X}^{OH}$ dataset, which we call $BN_{30}$ . We randomly drop the offset annotations of $70\\%$ of the samples in the BONAI dataset [32], regard the entire BONAI [32] dataset as a $\\mathcal{X}^{OH} + \\mathcal{X}^H$ dataset, and name it $BN_{30/70}$ . Similarly, the original BONAI dataset [32] is regarded as a large $\\mathcal{X}^{OH}$ and is named $BN_{100}$ . We use $OC$ to designate the OmniCity-view3 dataset [21]. Naturally, the abbreviations $OC_{30}$ , $OC_{30/70}$ , and $OC_{100}$ have the similar meaning with $BN_{30}$ , $BN_{30/70}$ , and $BN_{100}$ respectively. Moreover, we use $BH$", + "bbox": [ + 496, + 674, + 890, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "27732", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "to refer to the combination of BONAI [32] and HK. It is important to note that in $BH_{30/70}$ , $30\\%$ of BONAI's [32] samples are $\\mathcal{X}^{OH}$ type while the remaining $70\\%$ are $\\mathcal{X}^H$ type. Additionally, $30\\%$ of HK's samples belong to $\\mathcal{X}^{OH}$ type and the remaining $70\\%$ belong to $\\mathcal{X}^N$ type.", + "bbox": [ + 75, + 90, + 468, + 167 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Performance comparison", + "text_level": 1, + "bbox": [ + 76, + 179, + 307, + 194 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we evaluate our method's performance in footprint segmentation, offset prediction, and height prediction against several competitive methods for the single-level supervised learning scenario. In a Multi-level supervised learning scenario, we mainly compare our method with LOFT-FOA [32]. Additionally, we present our method's offset and off-nadir angles prediction performance. More results will be provided in the supplementary materials.", + "bbox": [ + 75, + 202, + 468, + 323 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Single-level supervised learning. The performance of footprint segmentation and offset prediction for different methods trained on $BN_{100}$ and $OC_{100}$ are listed in Tab. 1 and Tab. 2, respectively. Additionally, Fig. 3 provides a qualitative comparison of footprint segmentation results on the BONAI [32] test set. Note that all the experimental results in this section are obtained using $\\mathcal{X}^{OH}$ samples, and the results obtained using $\\mathcal{X}^H$ and $\\mathcal{X}^N$ samples will be analysed in the following paragraph. For the footprint segmentation task, experimental results tested on $BN_{100}$ demonstrate that our method improves the F1-score by $5.42\\% - 8.30\\%$ compared with the instance segmentation methods that directly extract the building footprints. Furthermore, our method enhances the F1-score by $2.05\\% - 2.76\\%$ relative to MTBR-Net [19] and LOFT-FOA [32], which are specifically designed for extracting off-nadir building footprints based on predicted roof and offset, tested on $BN_{100}$ . Regarding the offset prediction task, our experimental findings indicate that our approach betters the EPE by 0.18 - 0.93 in comparison to MTBR-Net [19] and LOFT-FOA [32] tested on $BN_{100}$ . The results show that the direct supervision of the footprint segmentation, the constraint on the building height, and the encouragement of the angular feature extraction can help to achieve better performance in the footprint segmentation and offset prediction tasks in the single-level supervised learning scenario.", + "bbox": [ + 75, + 325, + 468, + 717 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/004c1992528d9ca91b2c900e335ff8100a31d27139f945d70e0c784465aa879f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
methodF1PrecisionRecallEPE
PANet [17]58.0659.2656.91-
HRNetv2 [30]60.8161.2060.42-
M R-CNN [13]58.1259.2657.03-
CM R-CNN [1]60.9467.0955.83-
MTBR-Net [19]63.6064.3462.875.69
LOFT-FOA [32]64.3163.3765.294.94
Ours66.3665.9066.834.76
", + "bbox": [ + 122, + 733, + 424, + 843 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/a48e93cbac87a30de3c80d8d64c776a7e53fd4d4273a1f7c68db22d9ff7d0494.jpg", + "table_caption": [ + "Table 1. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\\%) and offset prediction results in terms of EPE trained on $BN_{100}$ ." + ], + "table_footnote": [], + "table_body": "
methodF1PrecisionRecallEPE
M R-CNN [13]69.7569.7469.76-
LOFT-FOA [32]70.4668.7772.236.08
Ours72.2569.5775.145.38
", + "bbox": [ + 517, + 88, + 877, + 162 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4256aa5620a724c611023dfd0b2d6cf2b2e317d7b1b0eb38edda2feadea7f029.jpg", + "image_caption": [ + "Figure 3. The results of the baselines and our method trained on $BN_{100}$ and tested on the BONAI test set in terms of the footprint segmentation performance. The yellow, cyan, and red polygons denote the TP, FP, and FN." + ], + "image_footnote": [], + "bbox": [ + 501, + 243, + 890, + 558 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/86e9236639ba8ef560f0507f155dffc8b476818489bed2e4a535b0b18c8e1ede.jpg", + "table_caption": [ + "Table 2. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\\%) and offset prediction results in terms of EPE trained on $OC_{100}$ ." + ], + "table_footnote": [], + "table_body": "
methoddatasetsampleF1-scoreEPE
LOFT-FOA [32]BN30XOH61.355.70
OursBN30/70XOH+XH65.495.39
LOFT-FOA [32]BN100XOH64.314.94
OursBN100XOH66.364.76
LOFT-FOA [32]OC30XOH67.096.08
OursOC30/70XOH+XH70.535.92
LOFT-FOA [32]OC100XOH70.465.38
OursOC100XOH72.255.38
LOFT-FOA [32]BH30XOH54.965.78
OursBH30/70XOH+XH+XN58.575.60
LOFT-FOA [32]BH100XOH60.854.74
OursBH100XOH60.924.69
", + "bbox": [ + 504, + 656, + 890, + 843 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 3. Building footprint segmentation results of different methods in terms of F1-score (\\%) and offset prediction results in terms of EPE trained on different datasets.", + "bbox": [ + 498, + 854, + 890, + 895 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "27733", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Multi-level supervised learning. Tab. 3 displays the footprint segmentation and offset prediction performance of LOFT-FOA [32] and our method when trained and tested on multi-level supervision datasets. Our approach's experiment outcomes, trained on $BN_{30/70}$ , $OC_{30/70}$ and $BH_{30/70}$ , demonstrate a $4.14\\%$ , $3.44\\%$ and $3.61\\%$ improvement in F1-score compared to LOFT-FOA [32] trained on $BN_{30}$ , $OC_{30}$ and $BH_{30}$ . Additionally, our method's experimental results, trained on samples from $BN_{30/70}$ , $OC_{30/70}$ and $BH_{30/70}$ exhibit similar performance to LOFT-FOA [32], which is trained on samples from $BN_{100}$ , $OC_{100}$ and $BH_{100}$ . These findings demonstrate the effectiveness of MLS-BRN in combining samples from $\\mathcal{X}^{OH}$ , $\\mathcal{X}^H$ and $\\mathcal{X}^N$ levels to address the building reconstruction task.", + "bbox": [ + 76, + 90, + 472, + 301 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Building height and angles prediction. Tab. 4 displays the results of building height prediction performance. The experimental findings indicate that our method enhances the height MAE by 0.22 - 4.33 and the height RMSE by 0.51 - 7.60 in comparison to SARPN [2], DORN [7], and LOFT-FOA+H. It's worth noting that SARPN [2], DORN [7] predicts pixel-wise building height, and MSL-BRN predicts instance-wise building height. As far as we know, MSL-BRN is the first-ever method to predict instance-wise real-world building height. Thus, we add a building height head directly to LOFT-FOA [32] (i.e. LOFT-FOA+H) and compare its prediction results with our own method. Fig. 4 presents the qualitative building height prediction results from our method and LOFT-FOA+H. Regarding the angle prediction tasks, when trained on $BN_{100}$ , the PBC module results in an MAE of 9.92 for offset angle prediction and an MAE of 1.22 for off-nadir angle prediction. The performance increase demonstrates the efficacy of the PBC, ROFE, and the building height prediction module in a single-level supervised learning scenario.", + "bbox": [ + 76, + 301, + 472, + 604 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c7a2aac3827cc9653346b778e97806ea835a730d9d92c82ca5bc7d0945522d58.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
methodheight MAEheight RMSE
SARPN [2]15.2328.69
DORN [7]13.4027.03
LOFT-FOA+H11.1221.60
Ours10.9021.09
", + "bbox": [ + 140, + 614, + 406, + 691 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/845c322b3c0a2bd0a627f6f754de5452a7686ba6b02c3dcfdb3b0e2859804871.jpg", + "table_caption": [ + "Table 4. Building height prediction results of different methods in terms of MAE and MSE trained on $OC_{100}$ and tested on the OmniCity-view3 test set." + ], + "table_footnote": [], + "table_body": "
methodF1-scorePrecisionRecallEPE
baseline61.3561.8461.655.70
+PBC62.3262.2862.355.53
+ROFE62.8763.8962.155.63
+PBC+ROFE65.4066.7464.125.49
", + "bbox": [ + 120, + 768, + 426, + 844 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5. Footprint segmentation results of different modules in terms of F1-score, precision, recall (\\%) and offset prediction results in terms of EPE.", + "bbox": [ + 75, + 854, + 470, + 897 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2b98b8cbe3821137140de9b550528a797c021425e10653772dbd86d93fbb2c32.jpg", + "image_caption": [ + "Figure 4. The visualization results of building height prediction from our method and LOFT-FOA+H on the OmniCity-view3 test set." + ], + "image_footnote": [], + "bbox": [ + 553, + 87, + 839, + 305 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Ablation study", + "text_level": 1, + "bbox": [ + 498, + 390, + 650, + 407 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we examine the impact of the principal new components of our method: (1) the PBC module; (2) the ROFE module; and (3) the building height head. Additionally, we will analyze the outcome of the data ablation experiment in the multi-level supervised learning setting.", + "bbox": [ + 496, + 415, + 890, + 491 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Module ablation. The outcomes acquired by implementing the aforementioned modules successively on $BN_{30/70}$ are detailed in Tab. 5. The table provides information on F1-score for footprint segmentation and EPE for offset prediction. LOFT-FOA [32] is trained on $BN_{30}$ and serves as the baseline. The second row (+PBC) illustrates the results obtained by applying the PBC module to LOFT-FOA [32]. The results indicate that incorporating the two-angle prediction tasks enhances the F1-score of the footprint extraction by $0.97\\%$ . It should be noted that the added offset-unknown $70\\%$ samples in $BN_{30/70}$ , which lacks angle ground truth, does not contribute to PBC's training. The third row (+ROFE) displays the outcomes achieved by applying the ROFE module to LOFT-FOA [32]. Results demonstrate that, compared with the baseline, prediction of the footprint segmentation guided by predicted offset and roof, coupled with additional $70\\%$ offset-unknown samples from $BN_{30/70}$ , leads to a $1.52\\%$ improvement in the F1-score. The fourth row (+PBC+ROFE) indicates that the simultaneous inclusion of the PBC and ROFE modules can improve the F1-score of the footprint extraction by $4.05\\%$ . The aforementioned results show that PBC and ROFE modules can help to enhance the accuracy of footprint segmentation and offset prediction.", + "bbox": [ + 496, + 491, + 892, + 852 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Data ablation. The outcomes of our approach trained on various dataset combinations concerning F1-score for footprint segmentation, and EPE for offset prediction are", + "bbox": [ + 496, + 854, + 893, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "27734", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d09418cf9fe9a336ba00b7358533bf0ad5d0e0bc36e680a03651d73b91b781b0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 153, + 89, + 316, + 218 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bbdf1f274d2dae90c301224ae4326d94fef0b200c52dcf87e089cc7e0019a5f3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 318, + 90, + 482, + 217 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b77dffcb8e12fbe8d10f207bd4c4afe040fd186c7c2cc12ed37a307ed8cdc271.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 485, + 90, + 650, + 218 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/628fe83b128a2348c00d3d6176c870c4830402d97500f2e9f195303bf3641fb7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 90, + 816, + 218 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7a01ae5ddb52c7b543be1e6ad8cb89c54a8844c178546b5b9cec74b89e489023.jpg", + "image_caption": [ + "Shanghai" + ], + "image_footnote": [], + "bbox": [ + 151, + 229, + 316, + 330 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0daf4c90c98acb9c43668a1321fcdc6a89f44ad8e224348fac6b356a29c04a65.jpg", + "image_caption": [ + "Xi'an" + ], + "image_footnote": [], + "bbox": [ + 320, + 239, + 483, + 329 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f4981fd0461ca0f0987164acf84c46b7c4488af7a04d5697eda8e8bae4450307.jpg", + "image_caption": [ + "Hong Kong", + "Figure 5. 3D reconstruction results of Shanghai, Xi'an, Hong Kong, and New York obtained using our method. The remote sensing images for Shanghai and Xi'an are chosen from the BONAI test set, whereas the remote sensing image for New York is chosen from the OmniCity-view3 test set." + ], + "image_footnote": [], + "bbox": [ + 486, + 227, + 650, + 329 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dbb35c1ef77098f9dcb0791f28b97b38d1cd99ea818f79145c1e493594bb5c4f.jpg", + "image_caption": [ + "New York" + ], + "image_footnote": [], + "bbox": [ + 653, + 229, + 818, + 329 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "shown in Tab. 6. The first line $(\\mathcal{X}^{OH})$ displays the results of training LOFT-FOA [32] on $30\\%$ of OmniCity-view3 [21] $\\mathcal{X}^{OH}$ samples $(OC_{30})$ . The second row $(\\mathcal{X}^{OH} + \\mathcal{X}^{H})$ shows the results of our method trained on a mix of $30\\%$ of OmniCity-view3 [21] $\\mathcal{X}^{OH}$ samples $(OC_{30})$ and $30\\%$ of the OmniCity-view3 $\\mathcal{X}^{H}$ samples. The results demonstrate a $3.28\\%$ improvement in F1-score for footprint extraction compared to LOFT-FOA [32] trained solely on $OC_{30}$ . The third row $(\\mathcal{X}^{OH} + \\mathcal{X}^{H} + \\mathcal{X}^{N})$ presents the outcomes of our methodology, trained on a mix of $30\\%$ of OmniCity-view3 [21] $\\mathcal{X}^{OH}$ samples, $30\\%$ of OmniCity-view3 [21] $\\mathcal{X}^{H}$ samples, and the rest $40\\%$ of OmniCity-view3 [21] $\\mathcal{X}^{N}$ samples. The results demonstrate a $0.44\\%$ increase in F1-score compared to our method trained on $\\mathcal{X}^{OH} + \\mathcal{X}^{H}$ , indicating the effectiveness of including $\\mathcal{X}^{N}$ samples. The reason for training LOFT-FOA [32] instead of our method on $OC_{30}$ (first row) is to evaluate the gain in a scenario where $\\mathcal{X}^{H}$ and $\\mathcal{X}^{N}$ samples are available by using our method.", + "bbox": [ + 75, + 430, + 472, + 705 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c72e98212733f346370b487ed341318320e2225621a12f91e20d6dc3cc41982a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
dataF1PrecisionRecallEPE
XOH67.0963.2371.476.08
XOH+XH70.3765.3576.245.99
XOH+XH+XN70.8166.1576.185.84
", + "bbox": [ + 122, + 714, + 424, + 779 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 6. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\\%) and offset prediction results in terms of EPE trained on different dataset combinations.", + "bbox": [ + 75, + 787, + 470, + 830 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. 3D reconstruction results of different cities", + "text_level": 1, + "bbox": [ + 76, + 847, + 441, + 862 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Fig. 5 shows the 3D reconstruction results of four cities (i.e. Shanghai, Xi'an, Hong Kong, and New York) obtained from", + "bbox": [ + 75, + 869, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "our method. The results demonstrate the effectiveness of our method on 3D building reconstruction across different cities. Note that we use the method in [38] to regularize the predicted building footprint masks.", + "bbox": [ + 496, + 431, + 893, + 494 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 506, + 619, + 522 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we have presented a new method for multi-level supervised building reconstruction from monocular remote sensing images, which is capable of reconstructing the accurate 3D building models using samples of different annotation levels. Qualitative and quantitative evaluations confirm that our method achieves competitive performance and significantly enhances the 3D building reconstruction capability in comparison to the current state-of-the-art across diverse experimental settings. The effect of the Pseudo Building Bbox Calculator and the Roof-Offset guided Footprint Extractor, as well as the annotation levels of the samples were also analyzed in the ablation study. Furthermore, we expanded the monocular building reconstruction datasets to encompass additional cities. We believe that our approach offers efficient and cost-effective solutions for 3D building reconstruction in complex real-world scenes. In our future work, we would like to investigate more effective strategies to improve the 3D building reconstruction performance whilst exploring more adaptable and practical techniques for large-scale city modeling.", + "bbox": [ + 496, + 530, + 890, + 833 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. This project was funded in part by National Natural Science Foundation of China (Grant No. 42201358 and No. 62325111) and Shanghai Artificial Intelligence Laboratory.", + "bbox": [ + 496, + 833, + 893, + 893 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "27735", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Kai Chen, Jiangmiao Pang, Jiaqi Wang, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jianping Shi, Wanli Ouyang, et al. Hybrid task cascade for instance segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4974-4983, 2019. 6", + "[2] Xiaotian Chen, Xuejin Chen, and Zheng-Jun Zha. Structure-aware residual pyramid network for monocular depth estimation. In Proceedings of the 28th International Joint Conference on Artificial Intelligence, pages 694-700, 2019. 7", + "[3] Yujin Chen, Zhigang Tu, Liuhao Ge, Dejun Zhang, Ruizhi Chen, and Junsong Yuan. So-handnet: Self-organizing network for 3d hand pose estimation with semi-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6961–6970, 2019. 3", + "[4] Gordon Christie, Rodrigo Rene Rai Munoz Abujder, Kevin Foster, Shea Hagstrom, Gregory D Hager, and Myron Z Brown. Learning geocentric object pose in oblique monocular images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14512-14520, 2020. 1, 2, 3", + "[5] Gordon Christie, Kevin Foster, Shea Hagstrom, Gregory D Hager, and Myron Z Brown. Single view geocentric pose in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1162-1171, 2021. 1, 3", + "[6] Liuyun Duan and Florent Lafarge. Towards large-scale city reconstruction from satellites. In European Conference on Computer Vision (ECCV), 2016. 1", + "[7] Huan Fu, Mingming Gong, Chaohui Wang, Kayhan Bat-manghelich, and Dacheng Tao. Deep ordinal regression network for monocular depth estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2002-2011, 2018. 7", + "[8] Zhi Gao, Wenbo Sun, Yao Lu, Yichen Zhang, Weiwei Song, Yongjun Zhang, and Ruifang Zhai. Joint learning of semantic segmentation and height estimation for remote sensing image leveraging contrastive learning. IEEE Transactions on Geoscience and Remote Sensing, 2023. 2", + "[9] Pedram Ghamisi and Naoto Yokoya. Img2dsm: Height simulation from single imagery using conditional generative adversarial net. IEEE Geoence Remote Sensing Letters, pages 1-5, 2018. 2", + "[10] JunYoung Gwak, Christopher B Choy, Manmohan Chandraker, Animesh Garg, and Silvio Savarese. Weakly supervised 3d reconstruction with adversarial constraint. In 2017 International Conference on 3D Vision (3DV), pages 263-272. IEEE, 2017. 3", + "[11] Junwei Han, Yang Yang, Dingwen Zhang, Dong Huang, Dong Xu, and Fernando De La Torre. Weakly-supervised learning of category-specific 3d object shapes. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(4): 1423-1437, 2021. 3", + "[12] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision (CVPR), pages 2961-2969, 2017. 4, 6", + "[14] Rongrong Ji, Ke Li, Yan Wang, Xiaoshuai Sun, Feng Guo, Xiaowei Guo, Yongjian Wu, Feiyue Huang, and Jiebo Luo. Semi-supervised adversarial monocular depth estimation. IEEE transactions on pattern analysis and machine intelligence, 42(10):2410-2422, 2019. 3", + "[15] Saket Kunwar. U-net ensemble for semantic and height estimation using coarse-map initialization. In IGARSS 2019-2019 IEEE International Geoscience and Remote Sensing Symposium, pages 4959-4962. IEEE, 2019. 1, 2", + "[16] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. Robust model-based face reconstruction through weakly-supervised outlier segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 372–381, 2023. 3", + "[17] Muxingzi Li, Florent Lafarge, and Renaud Marlet. Approximating shapes in images with low-complexity polygons. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6", + "[18] Qingyu Li, Lichao Mou, Yuansheng Hua, Yilei Shi, Sining Chen, Yao Sun, and Xiao Xiang Zhu. 3dcentripetalnet: Building height retrieval from monocular remote sensing imagery. International Journal of Applied Earth Observation and Geoinformation, 120:103311, 2023. 2", + "[19] Weijia Li, Lingxuan Meng, Jinwang Wang, Conghui He, Gui-Song Xia, and Dahua Lin. 3d building reconstruction from monocular remote sensing images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12548-12557, 2021. 1, 2, 3, 6", + "[20] Weijia Li, Wenqian Zhao, Huaping Zhong, Conghui He, and Dahua Lin. Joint semantic-geometric learning for polygonal building segmentation. In AAAI, 2021. 2", + "[21] Weijia Li, Yawen Lai, Linning Xu, Yuanbo Xiangli, Jinhua Yu, Conghui He, Gui-Song Xia, and Dahua Lin. Omnicity: Omnipotent city understanding with multi-level and multiview images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17397-17407, 2023. 5, 8", + "[22] Zuoyue Li, Jan Dirk Wegner, and Aurélien Lucchi. Topological map extraction from overhead images. In Proceedings of the IEEE International Conference on Computer Vision (CVPR), pages 1715-1724, 2019. 2", + "[23] Tsung-Yi Lin, Piotr Dólar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 2117-2125, 2017. 5", + "[24] Jisan Mahmud, True Price, Akash Bapat, and Jan Michael Frahm. Boundary-aware 3d building reconstruction from a single overhead image. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 1, 2", + "[25] Yongqiang Mao, Kaiqiang Chen, Liangjin Zhao, Wei Chen, Deke Tang, Wenjie Liu, Zhirui Wang, Wenhui Diao, Xian" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "27736", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Sun, and Kun Fu. Elevation estimation-driven building 3d reconstruction from single-view remote sensing imagery. IEEE Transactions on Geoscience and Remote Sensing, 2023. 1, 2", + "[26] Rahul Mitra, Nitesh B Gundavarapu, Abhishek Sharma, and Arjun Jain. Multiview-consistent semi-supervised learning for 3d human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6907-6916, 2020. 3", + "[27] Natalia Neverova, Christian Wolf, Florian Nebout, and Graham W Taylor. Hand pose estimation through semi-supervised and weakly-supervised learning. Computer Vision and Image Understanding, 164:56-67, 2017. 3", + "[28] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 3", + "[29] Shivangi Srivastava, Michele Volpi, and Devis Tuia. Joint height estimation and semantic labeling of monocular aerial images with cnns. In Igarss IEEE International Geoscience Remote Sensing Symposium, 2017. 1, 2", + "[30] Ke Sun, Yang Zhao, Borui Jiang, Tianheng Cheng, Bin Xiao, Dong Liu, Yadong Mu, Xinggang Wang, Wenyu Liu, and Jingdong Wang. High-resolution representations for labeling pixels and regions. arXiv preprint arXiv:1904.04514, 2019. 6", + "[31] Vivek Verma, Rakesh Kumar, and Stephen Hsu. 3d building detection and modeling from aerial lidar data. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2006. 1", + "[32] Jinwang Wang, Lingxuan Meng, Weijia Li, Wen Yang, Lei Yu, and Gui-Song Xia. Learning to extract building footprints from off-nadir aerial images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(1):1294-1301, 2022. 1, 2, 3, 4, 5, 6, 7, 8", + "[33] Zhitong Xiong, Wei Huang, Jingtao Hu, and Xiao Xiang Zhu. The benchmark: Transferable representation learning for monocular height estimation. IEEE Transactions on Geoscience and Remote Sensing, 2023. 1, 2", + "[34] Guandao Yang, Yin Cui, Serge Belongie, and Bharath Hariharan. Learning single-view 3d reconstruction with limited pose supervision. In Proceedings of the European Conference on Computer Vision (ECCV), pages 86-101, 2018. 3", + "[35] Jiangye Yuan. Learning building extraction in aerial scenes with convolutional networks. IEEE transactions on pattern analysis and machine intelligence, 40(11):2793-2798, 2017. 2", + "[36] Wufan Zhao, Claudio Persello, and Alfred Stein. Building outline delineation: From aerial images to polygons with an improved end-to-end learning framework. ISPRS journal of photogrammetry and remote sensing, 175:119-131, 2021. 2", + "[37] Zhuo Zheng, Yanfei Zhong, and Junjue Wang. Pop-net: Encoder-dual decoder for semantic segmentation and single-view height estimation. In IGARSS 2019-2019 IEEE International Geoscience and Remote Sensing Symposium, pages 4963-4966. IEEE, 2019. 1, 2" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[38] Stefano Zorzi, Ksenia Bittner, and Friedrich Fraundorfer. Machine-learned regularization and polygonization of building segmentation masks. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 3098–3105. IEEE, 2021. 8", + "[39] Stefano Zorzi, Shabab Bazrafkan, Stefan Habenschuss, and Friedrich Fraundorfer. *Polyworld: Polygonal building extraction with graph neural networks in satellite images*. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1848-1857, 2022. 2" + ], + "bbox": [ + 501, + 90, + 892, + 232 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "27737", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_model.json b/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_model.json new file mode 100644 index 0000000000000000000000000000000000000000..26dd0c72159b60fd82f26537ae15fc1c15c3c2fb --- /dev/null +++ b/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_model.json @@ -0,0 +1,2134 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.044 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.13, + 0.834, + 0.177 + ], + "angle": 0, + "content": "3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.203, + 0.856, + 0.24 + ], + "angle": 0, + "content": "Weijia Li\\(^{1*}\\), Haote Yang\\(^{2*}\\), Zhenghao Hu\\(^{1}\\), Juepeng Zheng\\(^{1}\\), Gui-Song Xia\\(^{3}\\), Conghui He\\(^{2,4\\dagger}\\), Sun Yat-Sen University, Shanghai AI Laboratory, Wuhan University, SenseTime Research" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.242, + 0.81, + 0.275 + ], + "angle": 0, + "content": "{liweij29, zhengjp8}@mail.sysu.edu.cn, {yanghaote, heconghui}@pjlab.org.cn, huzhh9@mail2.sysu.edu.cn, guisong.xia@whu.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.309, + 0.314, + 0.327 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.342, + 0.474, + 0.675 + ], + "angle": 0, + "content": "3D building reconstruction from monocular remote sensing images is an important and challenging research problem that has received increasing attention in recent years, owing to its low cost of data acquisition and availability for large-scale applications. However, existing methods rely on expensive 3D-annotated samples for fully-supervised training, restricting their application to large-scale cross-city scenarios. In this work, we propose MLS-BRN, a multi-level supervised building reconstruction network that can flexibly utilize training samples with different annotation levels to achieve better reconstruction results in an end-to-end manner. To alleviate the demand on full 3D supervision, we design two new modules, Pseudo Building Bbox Calculator and Roof-Offset guided Footprint Extractor, as well as new tasks and training strategies for different types of samples. Experimental results on several public and new datasets demonstrate that our proposed MLS-BRN achieves competitive performance using much fewer 3D-annotated samples, and significantly improves the footprint extraction and 3D reconstruction performance compared with current state-of-the-art. The code and datasets of this work will be released at https://github.com/opendatalabMLS-BRN.git." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.704, + 0.21, + 0.719 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.729, + 0.47, + 0.866 + ], + "angle": 0, + "content": "3D building reconstruction is a fundamental task for large-scale city modeling and has received increasing attention in recent studies. Among these studies, monocular 3D building reconstruction has become a promising and economic solution for large-scale real-world applications, owing to its lower data acquisition cost and larger data coverage compared to multi-view stereo imagery and LiDAR data [6, 31]. Meanwhile, the limited information of monocular images as well as the diversity of building structures also result in" + }, + { + "type": "image_caption", + "bbox": [ + 0.572, + 0.313, + 0.823, + 0.326 + ], + "angle": 0, + "content": "Training samples of different annotation levels" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.328, + 0.66, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.33, + 0.768, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.779, + 0.33, + 0.88, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.587, + 0.438, + 0.796, + 0.451 + ], + "angle": 0, + "content": "Monocular 3D building reconstruction" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.453, + 0.89, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.533, + 0.894, + 0.604 + ], + "angle": 0, + "content": "Figure 1. Our proposed method achieves 3D building reconstruction by training samples of different annotation levels. Large quantity of samples only include building footprint annotations, whereas a small quantity of samples contain extra roof-to-footprint offset and building height annotations." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.639, + 0.892, + 0.656 + ], + "angle": 0, + "content": "great challenges for large-scale 3D building reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.893, + 0.903 + ], + "angle": 0, + "content": "Inspired by the progress of supervised monocular depth estimation methods, deep neural networks have been broadly applied to monocular 3D building reconstruction studies. Most studies utilize building footprints or other types of semantic labels as prior information to facilitate building height estimation from near-nadir images [15, 24, 25, 29, 37]. Off-nadir images, by contrast, constitute a larger proportion of the remote sensing images and provide additional useful information for building height estimation, which have demonstrated significant potential in several recent studies [4, 5, 19, 32, 33]. Some studies designed geocentric pose estimation task considering the parallax effect of building roof and footprint [4, 5], aiming at estimating the height values instead of reconstruct a 3D model. Other studies leveraged the relation between different components of a building instance (e.g. roof, footprint," + }, + { + "type": "page_footnote", + "bbox": [ + 0.095, + 0.875, + 0.352, + 0.888 + ], + "angle": 0, + "content": "*These authors contributed equally to this work." + }, + { + "type": "page_footnote", + "bbox": [ + 0.097, + 0.888, + 0.223, + 0.9 + ], + "angle": 0, + "content": "† Corresponding author." + }, + { + "type": "list", + "bbox": [ + 0.095, + 0.875, + 0.352, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "27728" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "and facade) as well as the offset between roof and footprint, which has proven to be an effective solution for 3D building reconstruction and accurate extraction of building footprints [19, 32]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.165, + 0.471, + 0.422 + ], + "angle": 0, + "content": "In general, existing monocular building reconstruction methods are designed for fully-supervised learning, requiring a large number of fully-annotated 3D labels for network training. However, due to the expensive annotation cost, the available datasets for 3D building reconstruction are still very insufficient, restricting existing 3D reconstruction methods to single city or single dataset scenarios. By contrast, owing to the low annotation cost and the increase of open map data, public building footprints have an extremely large coverage and quantity. Additionally, existing building datasets provide different levels of annotations, such as footprint only, footprint and pixel-wise height [4], footprint and offset vector [19, 32], etc. The large-scale 2D footprints and different levels of annotated datasets can provide new opportunities for enlarging 3D building reconstruction application scenarios and reducing the annotation cost if they are effectively utilized." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.435, + 0.47, + 0.691 + ], + "angle": 0, + "content": "In this work, we propose MLS-BRN, a Multi-Level Supervised Building Reconstruction Network based on monocular remote sensing images, which is a unified and flexible framework that is capable of utilizing the training samples with different annotation levels. To alleviate the demand on 3D annotations and enhance the building reconstruction performance, we design new tasks regarding the meta information of off-nadir images and two new modules, i.e., Pseudo Building Bbox Calculator and Roof-Offset guided Footprint Extractor, as well as a new training strategy based on different types of samples. Experimental results on several public and new datasets demonstrate that our method achieves competitive performance when only using a small proportion of 3D-annotated samples, and significantly improves the building segmentation and height estimation performance compared with current state-of-the-art. Our main contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.72, + 0.468, + 0.794 + ], + "angle": 0, + "content": "- We design MLS-BRN, a multi-level supervised building reconstruction network, which consists of new tasks and modules to enhance the relation between different components of a building instance and alleviate the demand on 3D annotations." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.796, + 0.468, + 0.84 + ], + "angle": 0, + "content": "- We propose a multi-level training strategy that enables the training of MLS-BRN with different supervision levels to further improve the 3D reconstruction performance." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.84, + 0.468, + 0.9 + ], + "angle": 0, + "content": "- We extend the monocular building reconstruction datasets to more cities. Comprehensive experiments under different settings demonstrate the potential of MLS-BRN in large-scale cross-city scenarios." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.72, + 0.468, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.637, + 0.105 + ], + "angle": 0, + "content": "2. Related work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.116, + 0.757, + 0.132 + ], + "angle": 0, + "content": "2.1. Building footprint extraction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.14, + 0.892, + 0.486 + ], + "angle": 0, + "content": "Building footprint extraction is an important prerequisite for monocular 3D building reconstruction. Various instance and semantic segmentation networks have been broadly applied to building extraction tasks. Many studies utilize multi-task segmentation network to improve the building segmentation performance. For instance, Yuan [35] proposed the signed distance representation for building footprint extraction, achieving better performance compared with the single-task fully-connected network. Similarly, in [24], a modified signed distance function was introduced and jointly learned with other tasks for predicting building footprint outlines and heights. To improve the geometry shapes of building extraction results, several methods directly predicted the vertices of a building polygon based on Recurrent Neural Network or Graph Neural Network [22, 36, 39], or combined the pixel-based multi-task segmentation network with a graph-based polygon refinement network using a rule-based module [20]. In addition, some recent studies converted building footprint extraction into roof segmentation and roof-to-footprint offset estimation tasks, which achieved promising performance for building footprint extraction, especially for high-rise buildings in off-nadir images [19, 32]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.488, + 0.892, + 0.637 + ], + "angle": 0, + "content": "In summary, most existing methods directly extract the building footprints and perform worse for high-rise buildings in off-nadir images. Offset-based methods can effectively alleviate this problem, but the expensive offset annotation efforts and the post-processing process are still inevitable. On the contrary, our work proposes a multi-level supervised solution that is capable of leveraging different types of samples to reduce the demand for offset annotation, achieving promising footprint extraction results in an end-to-end manner." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.651, + 0.831, + 0.667 + ], + "angle": 0, + "content": "2.2. Monocular 3D building reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Inspired by the progress of monocular depth estimation, deep neural networks have been widely used for monocular building height estimation in recent studies [8, 18, 33]. Most of these studies are designed for height estimation from near-nadir images, in which the building roof and footprint are almost overlapped. Some methods used an encoder-decoder network to regress the height values [25], or used a generative adversarial network to simulate a height map [9]. Moreover, the semantic labels have been utilized as effective priors in many existing methods considering the limited information provided from the near-nadir images for height estimation. Some studies designed a multitask network for joint footprint extraction and height estimation [8, 29, 37], while others exploit the semantic labels as prior information for height estimation [15]. In actual" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27729" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.104, + 0.09, + 0.868, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.314, + 0.896, + 0.385 + ], + "angle": 0, + "content": "Figure 2. An overview of our proposed method. Taking a monocular remote sensing image as input, our MLS-BRN generates a set of building bboxes, roof-to-footprint offsets, building heights, and pixel-wise roof masks. The predicted roof masks and their corresponding offsets are further integrated to predict pixel-wise footprint masks. The predicted footprint mask and building height are used to produce the final vectorized 3D model. Two novel modules are introduced: (1) the ROFE predicts footprint masks guided by the predicted roof masks and offsets; (2) the PBC predicts off-nadir and offset angles to calculate pseudo building bboxes for buildingbbox-unknown samples." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.41, + 0.471, + 0.56 + ], + "angle": 0, + "content": "scenarios, off-nadir images constitute a large proportion of the remote sensing images, in which the parallax effect of roof and footprint results in more challenges for extracting footprints but provides additional information for height estimation as well. Some recent studies [4, 5] design methods to learn the geocentric pose of buildings in off-nadir images for monocular height estimation [28], while others leverage the offset between building roof and footprint and the relation between different components to reconstruct a 3D building model [19, 32]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.563, + 0.47, + 0.668 + ], + "angle": 0, + "content": "In summary, the monocular building reconstruction methods in existing studies require expensive and fully-annotated 3D labels for supervised learning. Our proposed method, by contrast, is a unified and flexible framework for 3D building reconstruction with different supervision levels, which effectively reduces the demand for the large-scale 3D annotations." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.68, + 0.47, + 0.696 + ], + "angle": 0, + "content": "2.3. Monocular 3D reconstruction with fewer labels" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.471, + 0.902 + ], + "angle": 0, + "content": "In monocular 3D reconstruction in the general computer vision domain, several methods have been proposed for reducing the 3D annotation demand via weakly-supervised or semi-supervised learning [3, 11, 14, 16, 26]. In Yang et al. [34], a unified framework combining two types of supervisions was proposed, i.e., a small number of camera pose annotations and a large number of unlabeled images. In Neverova et al. [27], an intermediate representation containing important topological and structural information of hand was introduced to enable the weakly-supervised training for hand pose estimation. Concurrently, Gwak et al. [10] effectually leveraged a weak supervision type, i.e., foreground mask, as a substitute for costly 3D CAD annota" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.41, + 0.892, + 0.441 + ], + "angle": 0, + "content": "tions, which incorporates a raytrace pooling layer to enable perspective projection and backpropagation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.442, + 0.893, + 0.532 + ], + "angle": 0, + "content": "In contrast to the aforementioned studies, our proposed method leverages prior knowledge about the 3D structure of a building instance and the monocular remote sensing image, including the relation between roof, footprint, height, offset angle, and off-nadir angle, enabling multi-level supervised 3D reconstruction with fewer annotation efforts." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.548, + 0.599, + 0.563 + ], + "angle": 0, + "content": "3. Methods" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.574, + 0.682, + 0.588 + ], + "angle": 0, + "content": "3.1. Problem statement" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.597, + 0.892, + 0.748 + ], + "angle": 0, + "content": "Given an off-nadir remote sensing image \\(I\\) that includes buildings \\(B = \\{b_{1}, b_{2}, \\ldots, b_{N}\\}\\), the objective of monocular 3D building reconstruction is to identify all the footprints \\(F = \\{f_{1}, f_{2}, \\ldots, f_{N}\\}\\) and roofs \\(R = \\{r_{1}, r_{2}, \\ldots, r_{N}\\}\\) corresponding to \\(B\\). The difficulty is that the footprints of buildings may be partially visible from an off-nadir viewing angle. Thus, previous studies, including [19] and [32], typically solve this issue by training a deep neural network with samples annotated with both \\(F\\) and roof-to-footprint offsets \\(\\vec{V} = \\{v_{1}, v_{2}, \\ldots, v_{N}\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.825 + ], + "angle": 0, + "content": "However, the cost of annotating remote sensing images is still high, particularly for offset annotations. Therefore, we suggest addressing this issue by training a deep model that effectively uses samples containing both \\( F \\) and \\( \\vec{V} \\) annotations, alongside samples only annotated with \\( F \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.901 + ], + "angle": 0, + "content": "To facilitate training with offset-unknown samples, two tasks are included; one for predicting the off-nadir angle \\(\\theta_{I}\\) and the other for the offset angle \\(\\varphi_{I}\\). Additionally, an instance-wise footprint segmentation task is included to predict the footprint conditioned on the predicted roof and off" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "27730" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.198 + ], + "angle": 0, + "content": "set. Finally, a task for predicting real-world height is introduced to enhance the comprehension of the correlation between footprint and roof placement. In summary, four additional tasks are added to the original three tasks in LOFT-FOA [32]: (1) off-nadir angle prediction task; (2) offset angle prediction task; (3) footprint segmentation task; (4) real-world height prediction task." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.208, + 0.258, + 0.223 + ], + "angle": 0, + "content": "3.2. Network structure" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.232, + 0.473, + 0.475 + ], + "angle": 0, + "content": "Fig. 2 illustrates the proposed architecture of our MLS-BRN. To facilitate multi-level supervised learning, two novel modules are introduced, namely the Pseudo Building Bbox Calculator (PBC) and the Roof-Offset guided Footprint Extractor (ROFE). The PBC module provides pseudo building boxes to determine the positivity/negativity of the region proposals from the RPN module when offset-unknown (i.e. building bbox-unknown) samples are processed in the MLS-BRN. The ROFE module has two significant functions. Firstly, it provides a more straightforward method to supervise the building footprint segmentation task. Secondly, it offers an indirect method of supervising offset prediction and roof segmentation for offset-unknown samples as they pass through the MLS-BRN. Additionally, a building height prediction task has been included in order to predict the real-world building height." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.495, + 0.413, + 0.51 + ], + "angle": 0, + "content": "3.2.1 Pseudo Building Bbox Calculator (PBC)" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.52, + 0.47, + 0.671 + ], + "angle": 0, + "content": "Samples without the ground truth for building bounding box \\( b \\)-bbox\\(_{gt}\\) cannot be utilized by previous models, like LOFT-FOA [32]. To address this issue, we propose a module that predicts pseudo building bounding boxes to substitute \\( b \\)-bbox\\(_{gt}\\). For a provided off-nadir remote sensing image \\( I \\) and one building \\( b \\) contained by \\( I \\), we can describe the connection between the image-wise off-nadir angle \\( \\theta_{I} \\), the offset angle \\( \\varphi_{I} \\), the factor for scaling real-world height to pixel scale \\( s_{I} \\), and the building's height \\( h_{b} \\) and offset \\( \\vec{v}_{b} \\) using the following equation:" + }, + { + "type": "equation", + "bbox": [ + 0.136, + 0.683, + 0.469, + 0.737 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\vec {v} _ {b} = | | \\vec {v} _ {b} | | _ {2} \\times \\vec {e} \\\\ = \\left\\| \\vec {v} _ {b} \\right\\| _ {2} \\times \\left[ e _ {x}, e _ {y} \\right] \\tag {1} \\\\ = h _ {b} \\times s _ {I} \\times \\tan \\theta_ {I} \\times [ \\cos \\varphi_ {I}, \\sin \\varphi_ {I} ] \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.473, + 0.902 + ], + "angle": 0, + "content": "where \\(||\\vec{v}_b||_2\\) is the \\(L2\\) norm of the offset, \\(\\vec{e}\\) is the unit normal vector of \\(\\vec{v}_b\\). The PBC module uses an off-nadir angle head to predict an image-wise off-nadir angle \\(\\theta_{pred}\\) and an offset angle head to predict an image-wise offset angle \\(\\varphi_{pred}\\). Then, following Eq. (1), they are combined with the instance-wise building height ground truth \\(h_{gt}\\), and scale factor \\(s_{gt}\\) to compute the pseudo offset \\(\\vec{v}_{pred}\\). Finally, \\(f_{gt}\\) is translated to get the pseudo building bbox \\(b\\)-bbox \\(_{pred}\\) guided by \\(\\vec{v}_{pred}\\). \\(b\\)-bbox \\(_{pred}\\) will play the role of \\(b\\)-bbox \\(_{gt}\\) during the training of the building bbox-unknown samples." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.183 + ], + "angle": 0, + "content": "From the perspective of weak supervision, the PBC module extracts the image-wise angle information, i.e. the offset angle and the off-nadir angle, and uses it to supervise the instance-wise task. Note that for building height-unknown samples, the pseudo bounding boxes are calculated by directly enlarge the footprint boxes." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.201, + 0.89, + 0.216 + ], + "angle": 0, + "content": "3.2.2 Roof-Offset guided Footprint Extractor (ROFE)" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.226, + 0.892, + 0.392 + ], + "angle": 0, + "content": "Previous works calculate the footprint mask in the inference stage by translating the inferred roof guided by the inferred offset. The ROFE module, however, predicts the footprint mask directly. It trains a convolutional network to learn the translation process, using the inferred roof mask and offset as inputs. For offset-aware (i.e. roof-aware) samples, this end-to-end training process adds more supervision on the offset head and the roof head. And for offset-unknown samples, which cannot contribute to the training of the offset head and the roof head due to lack of ground truth, ROFE provides an indirect way to supervise these two heads." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.401, + 0.673, + 0.417 + ], + "angle": 0, + "content": "3.3. Network training" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.425, + 0.892, + 0.499 + ], + "angle": 0, + "content": "In this section, we first introduce the loss functions in our MLS-BRN. Then we introduce our three levels of training samples graded by their level of supervision and their training strategies. The total hybrid loss is presented at the end of this section." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.519, + 0.658, + 0.533 + ], + "angle": 0, + "content": "3.3.1 Loss definition" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.543, + 0.892, + 0.618 + ], + "angle": 0, + "content": "The LOFT-FOA [32] is trained by minimising Eq. (2), where \\(\\mathcal{L}_{rp}\\), \\(\\mathcal{L}_{rc}\\), \\(\\mathcal{L}_{mh}\\) are the same as those in Mask R-CNN [13], i.e., the losses for the RPN, R-CNN, and mask head, respectively; \\(\\mathcal{L}_o\\) is the loss for the offset head, which is a standard smooth L1 Loss." + }, + { + "type": "equation", + "bbox": [ + 0.563, + 0.631, + 0.892, + 0.648 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {L F} = \\mathcal {L} _ {r p} + \\beta_ {1} \\mathcal {L} _ {r c} + \\beta_ {2} \\mathcal {L} _ {m h} + \\beta_ {3} \\mathcal {L} _ {o} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.892, + 0.736 + ], + "angle": 0, + "content": "The MLS-BRN model keeps the four losses the same as LOFT-FOA [32] and introduces new losses to train the newly added modules. The footprint mask loss of the ROFE module is the same as \\(\\mathcal{L}_{mh}\\), which is a standard cross entropy loss (Eq. (3))." + }, + { + "type": "equation", + "bbox": [ + 0.573, + 0.746, + 0.892, + 0.789 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {f} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\sum_ {c = 1} ^ {C} y _ {i, c} \\times \\log (p \\left(y _ {i, c}\\right)) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.799, + 0.892, + 0.858 + ], + "angle": 0, + "content": "The loss of the offset angle head of the PBC module is calculated according to Eq. (4), in which \\(\\mathcal{L}_{\\text{ova}}\\) denotes the offset angle loss; \\(\\vec{v}_{pred}\\) denotes the predicted unit normal vector of the offset." + }, + { + "type": "equation", + "bbox": [ + 0.54, + 0.868, + 0.892, + 0.905 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {o v a} = \\mathcal {L} _ {a n g} + \\lambda_ {1} \\mathcal {L} _ {r e g} \\tag {4} \\\\ = | | \\vec {v} _ {p r e d} - \\vec {v} _ {g t} | | _ {1} + \\lambda_ {1} | | | | \\vec {v} _ {p r e d} | | _ {2} - 1 | | _ {1} \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "27731" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.138 + ], + "angle": 0, + "content": "The nadir angle head of the PBC module is trained following Eq. (5), where \\(\\mathcal{L}_{ona}\\) is the off-nadir angle loss; \\(\\theta_{pred}\\) is the predicted tangent of the off-nadir angle." + }, + { + "type": "equation", + "bbox": [ + 0.166, + 0.151, + 0.469, + 0.168 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {o n a}} = \\left\\| \\tan \\theta_ {\\text {p r e d}} - \\tan \\theta_ {g t} \\right\\| _ {1} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.181, + 0.469, + 0.227 + ], + "angle": 0, + "content": "The height head loss of our MLS-BRN is calculated by Eq. (6), in which \\(\\mathcal{L}_h\\) denotes the height loss; \\(h_{pred}\\) denotes the predicted building height." + }, + { + "type": "equation", + "bbox": [ + 0.198, + 0.241, + 0.469, + 0.258 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {h} = \\left| \\left| h _ {p r e d} - h _ {g t} \\right| \\right| _ {1} \\tag {6}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.279, + 0.327, + 0.294 + ], + "angle": 0, + "content": "3.3.2 Multi-level training strategy" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.304, + 0.469, + 0.349 + ], + "angle": 0, + "content": "In our proposed unified framework, all the training samples can be graded into three levels according to their level of supervision (Fig. 1):" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.351, + 0.468, + 0.395 + ], + "angle": 0, + "content": "- Level 1 samples: samples with only instance-wise footprint annotation, which are denoted by \\(\\mathcal{X}^N = \\{x_1^N, x_2^N, \\dots, x_{n_3}^N\\}\\). \\(N\\) means no additional supervision." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.396, + 0.468, + 0.441 + ], + "angle": 0, + "content": "- Level 2 samples: samples with instance-wise footprint and building height annotation, which are denoted by \\(\\mathcal{X}^H = \\{x_1^H,x_2^H,\\dots,x_{n_2}^H\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.441, + 0.468, + 0.486 + ], + "angle": 0, + "content": "- Level 3 samples: samples with instance-wise footprint, offset, and building height annotation, which are denoted by \\(\\mathcal{X}^{OH} = \\{x_1^{OH}, x_2^{OH}, \\dots, x_{n_1}^{OH}\\}\\)." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.351, + 0.468, + 0.486 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.487, + 0.469, + 0.532 + ], + "angle": 0, + "content": "Different levels of samples are supervised by different training strategies. As defined in Eq. (7), the loss function for \\(\\mathcal{X}^N\\) is only based on \\(\\mathcal{L}_f\\)." + }, + { + "type": "equation", + "bbox": [ + 0.233, + 0.546, + 0.469, + 0.562 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathcal {X} ^ {N}} = \\mathcal {L} _ {f} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.576, + 0.469, + 0.636 + ], + "angle": 0, + "content": "The loss function for \\(\\mathcal{X}^H\\) is defined in Eq. (8). In \\(\\mathcal{L}_{\\mathcal{X}^H}\\), the \\(\\mathcal{L}_{rp}\\) is activated since the PBC module can predict a high-quality pseudo building bbox, which is good enough to supervise the RPN module." + }, + { + "type": "equation", + "bbox": [ + 0.166, + 0.65, + 0.468, + 0.685 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathcal {X} ^ {H}} = \\mathcal {L} _ {\\mathcal {X} ^ {N}} + \\alpha_ {1} \\mathcal {L} _ {r p} + \\alpha_ {2} \\mathcal {L} _ {h} \\tag {8} \\\\ = \\mathcal {L} _ {f} + \\alpha_ {1} \\mathcal {L} _ {r p} + \\alpha_ {2} \\mathcal {L} _ {h} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.7, + 0.469, + 0.76 + ], + "angle": 0, + "content": "The loss function for \\(\\mathcal{X}^{OH}\\) is defined in Eq. (9). Compared with the original \\(\\mathcal{L}_{LF}\\), \\(\\mathcal{L}_{\\mathcal{X}^{OH}}\\) adds four more losses: \\(\\mathcal{L}_f\\), \\(\\mathcal{L}_h\\), \\(\\mathcal{L}_{ona}\\), \\(\\mathcal{L}_{ova}\\). The \\(\\mathcal{L}_{ona}\\) and \\(\\mathcal{L}_{ova}\\) are used for training the two angle heads of the PBC module." + }, + { + "type": "equation", + "bbox": [ + 0.096, + 0.775, + 0.468, + 0.828 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\chi^ {O H}} = \\mathcal {L} _ {\\chi^ {H}} + \\alpha_ {3} \\mathcal {L} _ {r c} + \\alpha_ {4} \\mathcal {L} _ {m h} \\\\ + \\alpha_ {5} \\mathcal {L} _ {o} + \\alpha_ {6} \\mathcal {L} _ {o n a} + \\alpha_ {7} \\mathcal {L} _ {o v a} \\tag {9} \\\\ = \\mathcal {L} _ {L F} + \\mathcal {L} _ {f} + \\alpha_ {2} \\mathcal {L} _ {h} + \\alpha_ {6} \\mathcal {L} _ {o n a} + \\alpha_ {7} \\mathcal {L} _ {o v a} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.841, + 0.469, + 0.872 + ], + "angle": 0, + "content": "The final hybrid loss is defined as the total loss of the three levels of training samples according to Eq. (10)." + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.886, + 0.469, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {\\mathcal {X} ^ {N}} + \\mathcal {L} _ {\\mathcal {X} ^ {H}} + \\mathcal {L} _ {\\mathcal {X} ^ {O H}} \\tag {10}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.092, + 0.714, + 0.107 + ], + "angle": 0, + "content": "3.4. Implementation details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.114, + 0.892, + 0.309 + ], + "angle": 0, + "content": "As mentioned in Fig. 2, we use ResNet-50 [12] with FPN [23] pre-trained on the ImageNet as the backbone. All the models are trained with a batch size of 4 using NVIDIA 3090 GPUs. To align with LOFT-FOA [32], we train 24 epochs for all the models, with the learning rate starting from 0.01 and decaying by a factor of 0.1 at the \\(16^{th}\\) and \\(22^{nd}\\) epochs. The SGD algorithm with a weight decay of 0.0001 and a momentum of 0.9 is used for all experiments. LOFT-FOA [32] is used as the basic architecture of the MLS-BRN model, and all the hyperparameters that occur in both LOFT-FOA [32] and MLS-BRN are the same, except for the learning rate mentioned above. All models are built in PyTorch." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.311, + 0.892, + 0.416 + ], + "angle": 0, + "content": "In Eq. (4), we set \\(\\lambda_{1} = 0.1\\) to balance the two loss items. In Eq. (8), we set \\(\\alpha_{1} = 1\\) to keep the loss weight of ROFE the same as the roof mask head, and set \\(\\alpha_{2} = 32\\) since the absolute building height loss value is relatively small. In Eq. (9), we set \\(\\alpha_{3} = \\alpha_{4} = 1, \\alpha_{5} = 16\\) to keep them the same as LOFT-FOA [32], and set \\(\\alpha_{6} = 1, \\alpha_{7} = 8\\) to balance the effects of the magnitude of these two losses." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.431, + 0.632, + 0.447 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.455, + 0.602, + 0.469 + ], + "angle": 0, + "content": "4.1. Datasets" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.479, + 0.892, + 0.674 + ], + "angle": 0, + "content": "In our experiments, we employ multi-supervised datasets for training our methods: (1) BONAI [32] provides building footprint segmentation, offset, and height annotations, which contains 3,000 and 300 images for train-val and test respectively; (2) OmniCity-view3 [21] originally provides satellite images with annotations for footprint segmentation and building height. We add additional offset annotations for 17,092 and 4,929 images from train-val and test sets respectively; (3) Additionally, we release a new dataset named HK, which includes 500 and 119 satellite images specifically captured from Hong Kong for train-val and test sets, along with annotations for footprint segmentation, offset and height." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.892, + 0.902 + ], + "angle": 0, + "content": "As detailed in Sec. 3, all our training samples are graded into three levels: samples from \\(\\mathcal{X}^N\\), \\(\\mathcal{X}^H\\), and \\(\\mathcal{X}^{OH}\\). To create different levels of training samples, we extract samples from the datasets mentioned above, reorganizing their annotations as necessary. We randomly choose \\(30\\%\\) of the samples from the BONAI dataset [32] as a smaller \\(\\mathcal{X}^{OH}\\) dataset, which we call \\(BN_{30}\\). We randomly drop the offset annotations of \\(70\\%\\) of the samples in the BONAI dataset [32], regard the entire BONAI [32] dataset as a \\(\\mathcal{X}^{OH} + \\mathcal{X}^H\\) dataset, and name it \\(BN_{30/70}\\). Similarly, the original BONAI dataset [32] is regarded as a large \\(\\mathcal{X}^{OH}\\) and is named \\(BN_{100}\\). We use \\(OC\\) to designate the OmniCity-view3 dataset [21]. Naturally, the abbreviations \\(OC_{30}\\), \\(OC_{30/70}\\), and \\(OC_{100}\\) have the similar meaning with \\(BN_{30}\\), \\(BN_{30/70}\\), and \\(BN_{100}\\) respectively. Moreover, we use \\(BH\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27732" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.47, + 0.168 + ], + "angle": 0, + "content": "to refer to the combination of BONAI [32] and HK. It is important to note that in \\( BH_{30/70} \\), \\( 30\\% \\) of BONAI's [32] samples are \\( \\mathcal{X}^{OH} \\) type while the remaining \\( 70\\% \\) are \\( \\mathcal{X}^H \\) type. Additionally, \\( 30\\% \\) of HK's samples belong to \\( \\mathcal{X}^{OH} \\) type and the remaining \\( 70\\% \\) belong to \\( \\mathcal{X}^N \\) type." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.18, + 0.308, + 0.195 + ], + "angle": 0, + "content": "4.2. Performance comparison" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.203, + 0.469, + 0.324 + ], + "angle": 0, + "content": "In this section, we evaluate our method's performance in footprint segmentation, offset prediction, and height prediction against several competitive methods for the single-level supervised learning scenario. In a Multi-level supervised learning scenario, we mainly compare our method with LOFT-FOA [32]. Additionally, we present our method's offset and off-nadir angles prediction performance. More results will be provided in the supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.326, + 0.47, + 0.718 + ], + "angle": 0, + "content": "Single-level supervised learning. The performance of footprint segmentation and offset prediction for different methods trained on \\( BN_{100} \\) and \\( OC_{100} \\) are listed in Tab. 1 and Tab. 2, respectively. Additionally, Fig. 3 provides a qualitative comparison of footprint segmentation results on the BONAI [32] test set. Note that all the experimental results in this section are obtained using \\( \\mathcal{X}^{OH} \\) samples, and the results obtained using \\( \\mathcal{X}^H \\) and \\( \\mathcal{X}^N \\) samples will be analysed in the following paragraph. For the footprint segmentation task, experimental results tested on \\( BN_{100} \\) demonstrate that our method improves the F1-score by \\( 5.42\\% - 8.30\\% \\) compared with the instance segmentation methods that directly extract the building footprints. Furthermore, our method enhances the F1-score by \\( 2.05\\% - 2.76\\% \\) relative to MTBR-Net [19] and LOFT-FOA [32], which are specifically designed for extracting off-nadir building footprints based on predicted roof and offset, tested on \\( BN_{100} \\). Regarding the offset prediction task, our experimental findings indicate that our approach betters the EPE by 0.18 - 0.93 in comparison to MTBR-Net [19] and LOFT-FOA [32] tested on \\( BN_{100} \\). The results show that the direct supervision of the footprint segmentation, the constraint on the building height, and the encouragement of the angular feature extraction can help to achieve better performance in the footprint segmentation and offset prediction tasks in the single-level supervised learning scenario." + }, + { + "type": "table", + "bbox": [ + 0.123, + 0.734, + 0.426, + 0.844 + ], + "angle": 0, + "content": "
methodF1PrecisionRecallEPE
PANet [17]58.0659.2656.91-
HRNetv2 [30]60.8161.2060.42-
M R-CNN [13]58.1259.2657.03-
CM R-CNN [1]60.9467.0955.83-
MTBR-Net [19]63.6064.3462.875.69
LOFT-FOA [32]64.3163.3765.294.94
Ours66.3665.9066.834.76
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.855, + 0.469, + 0.896 + ], + "angle": 0, + "content": "Table 1. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\\%) and offset prediction results in terms of EPE trained on \\(BN_{100}\\)." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.089, + 0.878, + 0.163 + ], + "angle": 0, + "content": "
methodF1PrecisionRecallEPE
M R-CNN [13]69.7569.7469.76-
LOFT-FOA [32]70.4668.7772.236.08
Ours72.2569.5775.145.38
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.172, + 0.892, + 0.214 + ], + "angle": 0, + "content": "Table 2. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\\%) and offset prediction results in terms of EPE trained on \\(OC_{100}\\)." + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.244, + 0.892, + 0.559 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.571, + 0.892, + 0.626 + ], + "angle": 0, + "content": "Figure 3. The results of the baselines and our method trained on \\(BN_{100}\\) and tested on the BONAI test set in terms of the footprint segmentation performance. The yellow, cyan, and red polygons denote the TP, FP, and FN." + }, + { + "type": "table", + "bbox": [ + 0.506, + 0.657, + 0.892, + 0.844 + ], + "angle": 0, + "content": "
methoddatasetsampleF1-scoreEPE
LOFT-FOA [32]BN30XOH61.355.70
OursBN30/70XOH+XH65.495.39
LOFT-FOA [32]BN100XOH64.314.94
OursBN100XOH66.364.76
LOFT-FOA [32]OC30XOH67.096.08
OursOC30/70XOH+XH70.535.92
LOFT-FOA [32]OC100XOH70.465.38
OursOC100XOH72.255.38
LOFT-FOA [32]BH30XOH54.965.78
OursBH30/70XOH+XH+XN58.575.60
LOFT-FOA [32]BH100XOH60.854.74
OursBH100XOH60.924.69
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.855, + 0.892, + 0.896 + ], + "angle": 0, + "content": "Table 3. Building footprint segmentation results of different methods in terms of F1-score (\\%) and offset prediction results in terms of EPE trained on different datasets." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "27733" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.302 + ], + "angle": 0, + "content": "Multi-level supervised learning. Tab. 3 displays the footprint segmentation and offset prediction performance of LOFT-FOA [32] and our method when trained and tested on multi-level supervision datasets. Our approach's experiment outcomes, trained on \\(BN_{30/70}\\), \\(OC_{30/70}\\) and \\(BH_{30/70}\\), demonstrate a \\(4.14\\%\\), \\(3.44\\%\\) and \\(3.61\\%\\) improvement in F1-score compared to LOFT-FOA [32] trained on \\(BN_{30}\\), \\(OC_{30}\\) and \\(BH_{30}\\). Additionally, our method's experimental results, trained on samples from \\(BN_{30/70}\\), \\(OC_{30/70}\\) and \\(BH_{30/70}\\) exhibit similar performance to LOFT-FOA [32], which is trained on samples from \\(BN_{100}\\), \\(OC_{100}\\) and \\(BH_{100}\\). These findings demonstrate the effectiveness of MLS-BRN in combining samples from \\(\\mathcal{X}^{OH}\\), \\(\\mathcal{X}^H\\) and \\(\\mathcal{X}^N\\) levels to address the building reconstruction task." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.303, + 0.473, + 0.606 + ], + "angle": 0, + "content": "Building height and angles prediction. Tab. 4 displays the results of building height prediction performance. The experimental findings indicate that our method enhances the height MAE by 0.22 - 4.33 and the height RMSE by 0.51 - 7.60 in comparison to SARPN [2], DORN [7], and LOFT-FOA+H. It's worth noting that SARPN [2], DORN [7] predicts pixel-wise building height, and MSL-BRN predicts instance-wise building height. As far as we know, MSL-BRN is the first-ever method to predict instance-wise real-world building height. Thus, we add a building height head directly to LOFT-FOA [32] (i.e. LOFT-FOA+H) and compare its prediction results with our own method. Fig. 4 presents the qualitative building height prediction results from our method and LOFT-FOA+H. Regarding the angle prediction tasks, when trained on \\(BN_{100}\\), the PBC module results in an MAE of 9.92 for offset angle prediction and an MAE of 1.22 for off-nadir angle prediction. The performance increase demonstrates the efficacy of the PBC, ROFE, and the building height prediction module in a single-level supervised learning scenario." + }, + { + "type": "table", + "bbox": [ + 0.141, + 0.616, + 0.408, + 0.692 + ], + "angle": 0, + "content": "
methodheight MAEheight RMSE
SARPN [2]15.2328.69
DORN [7]13.4027.03
LOFT-FOA+H11.1221.60
Ours10.9021.09
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.702, + 0.471, + 0.745 + ], + "angle": 0, + "content": "Table 4. Building height prediction results of different methods in terms of MAE and MSE trained on \\(OC_{100}\\) and tested on the OmniCity-view3 test set." + }, + { + "type": "table", + "bbox": [ + 0.122, + 0.769, + 0.428, + 0.845 + ], + "angle": 0, + "content": "
methodF1-scorePrecisionRecallEPE
baseline61.3561.8461.655.70
+PBC62.3262.2862.355.53
+ROFE62.8763.8962.155.63
+PBC+ROFE65.4066.7464.125.49
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.855, + 0.471, + 0.898 + ], + "angle": 0, + "content": "Table 5. Footprint segmentation results of different modules in terms of F1-score, precision, recall (\\%) and offset prediction results in terms of EPE." + }, + { + "type": "image", + "bbox": [ + 0.554, + 0.088, + 0.841, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.316, + 0.895, + 0.358 + ], + "angle": 0, + "content": "Figure 4. The visualization results of building height prediction from our method and LOFT-FOA+H on the OmniCity-view3 test set." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.391, + 0.651, + 0.408 + ], + "angle": 0, + "content": "4.3. Ablation study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.416, + 0.892, + 0.492 + ], + "angle": 0, + "content": "In this section, we examine the impact of the principal new components of our method: (1) the PBC module; (2) the ROFE module; and (3) the building height head. Additionally, we will analyze the outcome of the data ablation experiment in the multi-level supervised learning setting." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.492, + 0.893, + 0.853 + ], + "angle": 0, + "content": "Module ablation. The outcomes acquired by implementing the aforementioned modules successively on \\(BN_{30/70}\\) are detailed in Tab. 5. The table provides information on F1-score for footprint segmentation and EPE for offset prediction. LOFT-FOA [32] is trained on \\(BN_{30}\\) and serves as the baseline. The second row (+PBC) illustrates the results obtained by applying the PBC module to LOFT-FOA [32]. The results indicate that incorporating the two-angle prediction tasks enhances the F1-score of the footprint extraction by \\(0.97\\%\\). It should be noted that the added offset-unknown \\(70\\%\\) samples in \\(BN_{30/70}\\), which lacks angle ground truth, does not contribute to PBC's training. The third row (+ROFE) displays the outcomes achieved by applying the ROFE module to LOFT-FOA [32]. Results demonstrate that, compared with the baseline, prediction of the footprint segmentation guided by predicted offset and roof, coupled with additional \\(70\\%\\) offset-unknown samples from \\(BN_{30/70}\\), leads to a \\(1.52\\%\\) improvement in the F1-score. The fourth row (+PBC+ROFE) indicates that the simultaneous inclusion of the PBC and ROFE modules can improve the F1-score of the footprint extraction by \\(4.05\\%\\). The aforementioned results show that PBC and ROFE modules can help to enhance the accuracy of footprint segmentation and offset prediction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Data ablation. The outcomes of our approach trained on various dataset combinations concerning F1-score for footprint segmentation, and EPE for offset prediction are" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "27734" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.154, + 0.09, + 0.318, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.319, + 0.091, + 0.483, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.486, + 0.091, + 0.651, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.091, + 0.817, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.153, + 0.23, + 0.318, + 0.331 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.205, + 0.336, + 0.26, + 0.349 + ], + "angle": 0, + "content": "Shanghai" + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.24, + 0.484, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.382, + 0.336, + 0.416, + 0.348 + ], + "angle": 0, + "content": "Xi'an" + }, + { + "type": "image", + "bbox": [ + 0.488, + 0.228, + 0.651, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.532, + 0.336, + 0.6, + 0.35 + ], + "angle": 0, + "content": "Hong Kong" + }, + { + "type": "image", + "bbox": [ + 0.655, + 0.23, + 0.82, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.703, + 0.336, + 0.762, + 0.348 + ], + "angle": 0, + "content": "New York" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.363, + 0.895, + 0.406 + ], + "angle": 0, + "content": "Figure 5. 3D reconstruction results of Shanghai, Xi'an, Hong Kong, and New York obtained using our method. The remote sensing images for Shanghai and Xi'an are chosen from the BONAI test set, whereas the remote sensing image for New York is chosen from the OmniCity-view3 test set." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.431, + 0.473, + 0.706 + ], + "angle": 0, + "content": "shown in Tab. 6. The first line \\((\\mathcal{X}^{OH})\\) displays the results of training LOFT-FOA [32] on \\(30\\%\\) of OmniCity-view3 [21] \\(\\mathcal{X}^{OH}\\) samples \\((OC_{30})\\). The second row \\((\\mathcal{X}^{OH} + \\mathcal{X}^{H})\\) shows the results of our method trained on a mix of \\(30\\%\\) of OmniCity-view3 [21] \\(\\mathcal{X}^{OH}\\) samples \\((OC_{30})\\) and \\(30\\%\\) of the OmniCity-view3 \\(\\mathcal{X}^{H}\\) samples. The results demonstrate a \\(3.28\\%\\) improvement in F1-score for footprint extraction compared to LOFT-FOA [32] trained solely on \\(OC_{30}\\). The third row \\((\\mathcal{X}^{OH} + \\mathcal{X}^{H} + \\mathcal{X}^{N})\\) presents the outcomes of our methodology, trained on a mix of \\(30\\%\\) of OmniCity-view3 [21] \\(\\mathcal{X}^{OH}\\) samples, \\(30\\%\\) of OmniCity-view3 [21] \\(\\mathcal{X}^{H}\\) samples, and the rest \\(40\\%\\) of OmniCity-view3 [21] \\(\\mathcal{X}^{N}\\) samples. The results demonstrate a \\(0.44\\%\\) increase in F1-score compared to our method trained on \\(\\mathcal{X}^{OH} + \\mathcal{X}^{H}\\), indicating the effectiveness of including \\(\\mathcal{X}^{N}\\) samples. The reason for training LOFT-FOA [32] instead of our method on \\(OC_{30}\\) (first row) is to evaluate the gain in a scenario where \\(\\mathcal{X}^{H}\\) and \\(\\mathcal{X}^{N}\\) samples are available by using our method." + }, + { + "type": "table", + "bbox": [ + 0.123, + 0.715, + 0.426, + 0.78 + ], + "angle": 0, + "content": "
dataF1PrecisionRecallEPE
XOH67.0963.2371.476.08
XOH+XH70.3765.3576.245.99
XOH+XH+XN70.8166.1576.185.84
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.789, + 0.471, + 0.832 + ], + "angle": 0, + "content": "Table 6. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\\%) and offset prediction results in terms of EPE trained on different dataset combinations." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.848, + 0.442, + 0.863 + ], + "angle": 0, + "content": "4.4. 3D reconstruction results of different cities" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Fig. 5 shows the 3D reconstruction results of four cities (i.e. Shanghai, Xi'an, Hong Kong, and New York) obtained from" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.432, + 0.895, + 0.495 + ], + "angle": 0, + "content": "our method. The results demonstrate the effectiveness of our method on 3D building reconstruction across different cities. Note that we use the method in [38] to regularize the predicted building footprint masks." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.507, + 0.62, + 0.523 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.531, + 0.892, + 0.834 + ], + "angle": 0, + "content": "In this paper, we have presented a new method for multi-level supervised building reconstruction from monocular remote sensing images, which is capable of reconstructing the accurate 3D building models using samples of different annotation levels. Qualitative and quantitative evaluations confirm that our method achieves competitive performance and significantly enhances the 3D building reconstruction capability in comparison to the current state-of-the-art across diverse experimental settings. The effect of the Pseudo Building Bbox Calculator and the Roof-Offset guided Footprint Extractor, as well as the annotation levels of the samples were also analyzed in the ablation study. Furthermore, we expanded the monocular building reconstruction datasets to encompass additional cities. We believe that our approach offers efficient and cost-effective solutions for 3D building reconstruction in complex real-world scenes. In our future work, we would like to investigate more effective strategies to improve the 3D building reconstruction performance whilst exploring more adaptable and practical techniques for large-scale city modeling." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.834, + 0.894, + 0.895 + ], + "angle": 0, + "content": "Acknowledgements. This project was funded in part by National Natural Science Foundation of China (Grant No. 42201358 and No. 62325111) and Shanghai Artificial Intelligence Laboratory." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "27735" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Kai Chen, Jiangmiao Pang, Jiaqi Wang, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jianping Shi, Wanli Ouyang, et al. Hybrid task cascade for instance segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4974-4983, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.186, + 0.472, + 0.242 + ], + "angle": 0, + "content": "[2] Xiaotian Chen, Xuejin Chen, and Zheng-Jun Zha. Structure-aware residual pyramid network for monocular depth estimation. In Proceedings of the 28th International Joint Conference on Artificial Intelligence, pages 694-700, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.243, + 0.471, + 0.311 + ], + "angle": 0, + "content": "[3] Yujin Chen, Zhigang Tu, Liuhao Ge, Dejun Zhang, Ruizhi Chen, and Junsong Yuan. So-handnet: Self-organizing network for 3d hand pose estimation with semi-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6961–6970, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.312, + 0.471, + 0.395 + ], + "angle": 0, + "content": "[4] Gordon Christie, Rodrigo Rene Rai Munoz Abujder, Kevin Foster, Shea Hagstrom, Gregory D Hager, and Myron Z Brown. Learning geocentric object pose in oblique monocular images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14512-14520, 2020. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.397, + 0.471, + 0.464 + ], + "angle": 0, + "content": "[5] Gordon Christie, Kevin Foster, Shea Hagstrom, Gregory D Hager, and Myron Z Brown. Single view geocentric pose in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1162-1171, 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.466, + 0.47, + 0.508 + ], + "angle": 0, + "content": "[6] Liuyun Duan and Florent Lafarge. Towards large-scale city reconstruction from satellites. In European Conference on Computer Vision (ECCV), 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.509, + 0.471, + 0.578 + ], + "angle": 0, + "content": "[7] Huan Fu, Mingming Gong, Chaohui Wang, Kayhan Bat-manghelich, and Dacheng Tao. Deep ordinal regression network for monocular depth estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2002-2011, 2018. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.579, + 0.47, + 0.648 + ], + "angle": 0, + "content": "[8] Zhi Gao, Wenbo Sun, Yao Lu, Yichen Zhang, Weiwei Song, Yongjun Zhang, and Ruifang Zhai. Joint learning of semantic segmentation and height estimation for remote sensing image leveraging contrastive learning. IEEE Transactions on Geoscience and Remote Sensing, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.649, + 0.47, + 0.703 + ], + "angle": 0, + "content": "[9] Pedram Ghamisi and Naoto Yokoya. Img2dsm: Height simulation from single imagery using conditional generative adversarial net. IEEE Geoence Remote Sensing Letters, pages 1-5, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.47, + 0.773 + ], + "angle": 0, + "content": "[10] JunYoung Gwak, Christopher B Choy, Manmohan Chandraker, Animesh Garg, and Silvio Savarese. Weakly supervised 3d reconstruction with adversarial constraint. In 2017 International Conference on 3D Vision (3DV), pages 263-272. IEEE, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.47, + 0.844 + ], + "angle": 0, + "content": "[11] Junwei Han, Yang Yang, Dingwen Zhang, Dong Huang, Dong Xu, and Fernando De La Torre. Weakly-supervised learning of category-specific 3d object shapes. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(4): 1423-1437, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[12] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[13] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision (CVPR), pages 2961-2969, 2017. 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.15, + 0.892, + 0.219 + ], + "angle": 0, + "content": "[14] Rongrong Ji, Ke Li, Yan Wang, Xiaoshuai Sun, Feng Guo, Xiaowei Guo, Yongjian Wu, Feiyue Huang, and Jiebo Luo. Semi-supervised adversarial monocular depth estimation. IEEE transactions on pattern analysis and machine intelligence, 42(10):2410-2422, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.276 + ], + "angle": 0, + "content": "[15] Saket Kunwar. U-net ensemble for semantic and height estimation using coarse-map initialization. In IGARSS 2019-2019 IEEE International Geoscience and Remote Sensing Symposium, pages 4959-4962. IEEE, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.892, + 0.358 + ], + "angle": 0, + "content": "[16] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. Robust model-based face reconstruction through weakly-supervised outlier segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 372–381, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.362, + 0.892, + 0.417 + ], + "angle": 0, + "content": "[17] Muxingzi Li, Florent Lafarge, and Renaud Marlet. Approximating shapes in images with low-complexity polygons. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.42, + 0.892, + 0.488 + ], + "angle": 0, + "content": "[18] Qingyu Li, Lichao Mou, Yuansheng Hua, Yilei Shi, Sining Chen, Yao Sun, and Xiao Xiang Zhu. 3dcentripetalnet: Building height retrieval from monocular remote sensing imagery. International Journal of Applied Earth Observation and Geoinformation, 120:103311, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.49, + 0.892, + 0.559 + ], + "angle": 0, + "content": "[19] Weijia Li, Lingxuan Meng, Jinwang Wang, Conghui He, Gui-Song Xia, and Dahua Lin. 3d building reconstruction from monocular remote sensing images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12548-12557, 2021. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.561, + 0.892, + 0.602 + ], + "angle": 0, + "content": "[20] Weijia Li, Wenqian Zhao, Huaping Zhong, Conghui He, and Dahua Lin. Joint semantic-geometric learning for polygonal building segmentation. In AAAI, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.604, + 0.892, + 0.686 + ], + "angle": 0, + "content": "[21] Weijia Li, Yawen Lai, Linning Xu, Yuanbo Xiangli, Jinhua Yu, Conghui He, Gui-Song Xia, and Dahua Lin. Omnicity: Omnipotent city understanding with multi-level and multiview images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17397-17407, 2023. 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.689, + 0.892, + 0.744 + ], + "angle": 0, + "content": "[22] Zuoyue Li, Jan Dirk Wegner, and Aurélien Lucchi. Topological map extraction from overhead images. In Proceedings of the IEEE International Conference on Computer Vision (CVPR), pages 1715-1724, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.746, + 0.892, + 0.814 + ], + "angle": 0, + "content": "[23] Tsung-Yi Lin, Piotr Dólar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 2117-2125, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.817, + 0.892, + 0.872 + ], + "angle": 0, + "content": "[24] Jisan Mahmud, True Price, Akash Bapat, and Jan Michael Frahm. Boundary-aware 3d building reconstruction from a single overhead image. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.874, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[25] Yongqiang Mao, Kaiqiang Chen, Liangjin Zhao, Wei Chen, Deke Tang, Wenjie Liu, Zhirui Wang, Wenhui Diao, Xian" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "27736" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.47, + 0.135 + ], + "angle": 0, + "content": "Sun, and Kun Fu. Elevation estimation-driven building 3d reconstruction from single-view remote sensing imagery. IEEE Transactions on Geoscience and Remote Sensing, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.137, + 0.472, + 0.207 + ], + "angle": 0, + "content": "[26] Rahul Mitra, Nitesh B Gundavarapu, Abhishek Sharma, and Arjun Jain. Multiview-consistent semi-supervised learning for 3d human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6907-6916, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.209, + 0.471, + 0.264 + ], + "angle": 0, + "content": "[27] Natalia Neverova, Christian Wolf, Florian Nebout, and Graham W Taylor. Hand pose estimation through semi-supervised and weakly-supervised learning. Computer Vision and Image Understanding, 164:56-67, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.267, + 0.47, + 0.336 + ], + "angle": 0, + "content": "[28] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.339, + 0.47, + 0.395 + ], + "angle": 0, + "content": "[29] Shivangi Srivastava, Michele Volpi, and Devis Tuia. Joint height estimation and semantic labeling of monocular aerial images with cnns. In Igarss IEEE International Geoscience Remote Sensing Symposium, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.398, + 0.47, + 0.464 + ], + "angle": 0, + "content": "[30] Ke Sun, Yang Zhao, Borui Jiang, Tianheng Cheng, Bin Xiao, Dong Liu, Yadong Mu, Xinggang Wang, Wenyu Liu, and Jingdong Wang. High-resolution representations for labeling pixels and regions. arXiv preprint arXiv:1904.04514, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.469, + 0.47, + 0.524 + ], + "angle": 0, + "content": "[31] Vivek Verma, Rakesh Kumar, and Stephen Hsu. 3d building detection and modeling from aerial lidar data. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2006. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.527, + 0.47, + 0.595 + ], + "angle": 0, + "content": "[32] Jinwang Wang, Lingxuan Meng, Weijia Li, Wen Yang, Lei Yu, and Gui-Song Xia. Learning to extract building footprints from off-nadir aerial images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(1):1294-1301, 2022. 1, 2, 3, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.599, + 0.47, + 0.655 + ], + "angle": 0, + "content": "[33] Zhitong Xiong, Wei Huang, Jingtao Hu, and Xiao Xiang Zhu. The benchmark: Transferable representation learning for monocular height estimation. IEEE Transactions on Geoscience and Remote Sensing, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.657, + 0.47, + 0.713 + ], + "angle": 0, + "content": "[34] Guandao Yang, Yin Cui, Serge Belongie, and Bharath Hariharan. Learning single-view 3d reconstruction with limited pose supervision. In Proceedings of the European Conference on Computer Vision (ECCV), pages 86-101, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.716, + 0.47, + 0.769 + ], + "angle": 0, + "content": "[35] Jiangye Yuan. Learning building extraction in aerial scenes with convolutional networks. IEEE transactions on pattern analysis and machine intelligence, 40(11):2793-2798, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.773, + 0.47, + 0.829 + ], + "angle": 0, + "content": "[36] Wufan Zhao, Claudio Persello, and Alfred Stein. Building outline delineation: From aerial images to polygons with an improved end-to-end learning framework. ISPRS journal of photogrammetry and remote sensing, 175:119-131, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.47, + 0.9 + ], + "angle": 0, + "content": "[37] Zhuo Zheng, Yanfei Zhong, and Junjue Wang. Pop-net: Encoder-dual decoder for semantic segmentation and single-view height estimation. In IGARSS 2019-2019 IEEE International Geoscience and Remote Sensing Symposium, pages 4963-4966. IEEE, 2019. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.092, + 0.893, + 0.161 + ], + "angle": 0, + "content": "[38] Stefano Zorzi, Ksenia Bittner, and Friedrich Fraundorfer. Machine-learned regularization and polygonization of building segmentation masks. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 3098–3105. IEEE, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.893, + 0.233 + ], + "angle": 0, + "content": "[39] Stefano Zorzi, Shabab Bazrafkan, Stefan Habenschuss, and Friedrich Fraundorfer. *Polyworld: Polygonal building extraction with graph neural networks in satellite images*. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1848-1857, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.092, + 0.893, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27737" + } + ] +] \ No newline at end of file diff --git a/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_origin.pdf b/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..474450319a12f61dcc112d0e18061c1895c131cf --- /dev/null +++ b/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ad91f98e89e47eb43ca423d633acc5855d7ba57c7efd5b55394622b1a0c8b05 +size 5391341 diff --git a/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/full.md b/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/full.md new file mode 100644 index 0000000000000000000000000000000000000000..35455b73f4d5c69e640379403c24d2e8dae3e0ae --- /dev/null +++ b/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/full.md @@ -0,0 +1,310 @@ +# 3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions + +Weijia Li $^{1*}$ , Haote Yang $^{2*}$ , Zhenghao Hu $^{1}$ , Juepeng Zheng $^{1}$ , Gui-Song Xia $^{3}$ , Conghui He $^{2,4\dagger}$ , Sun Yat-Sen University, Shanghai AI Laboratory, Wuhan University, SenseTime Research + +{liweij29, zhengjp8}@mail.sysu.edu.cn, {yanghaote, heconghui}@pjlab.org.cn, huzhh9@mail2.sysu.edu.cn, guisong.xia@whu.edu.cn + +# Abstract + +3D building reconstruction from monocular remote sensing images is an important and challenging research problem that has received increasing attention in recent years, owing to its low cost of data acquisition and availability for large-scale applications. However, existing methods rely on expensive 3D-annotated samples for fully-supervised training, restricting their application to large-scale cross-city scenarios. In this work, we propose MLS-BRN, a multi-level supervised building reconstruction network that can flexibly utilize training samples with different annotation levels to achieve better reconstruction results in an end-to-end manner. To alleviate the demand on full 3D supervision, we design two new modules, Pseudo Building Bbox Calculator and Roof-Offset guided Footprint Extractor, as well as new tasks and training strategies for different types of samples. Experimental results on several public and new datasets demonstrate that our proposed MLS-BRN achieves competitive performance using much fewer 3D-annotated samples, and significantly improves the footprint extraction and 3D reconstruction performance compared with current state-of-the-art. The code and datasets of this work will be released at https://github.com/opendatalabMLS-BRN.git. + +# 1. Introduction + +3D building reconstruction is a fundamental task for large-scale city modeling and has received increasing attention in recent studies. Among these studies, monocular 3D building reconstruction has become a promising and economic solution for large-scale real-world applications, owing to its lower data acquisition cost and larger data coverage compared to multi-view stereo imagery and LiDAR data [6, 31]. Meanwhile, the limited information of monocular images as well as the diversity of building structures also result in + +![](images/9f5db6e39225d5b826e5067100e20cdd9d49b0d7ed32878666e38393ca52c82b.jpg) +Training samples of different annotation levels + +![](images/0260dd48f10a2defdbcaac4633276157e283388fa4c2ee3ee3a66550bd18a6b9.jpg) + +![](images/07979d40fbfcae765e8fd4a67ecae21039709debc7748947828dab30f41bd727.jpg) + +![](images/aace82f59c79bbf32be3bc4991239091e7407c18a3b94d51158ee9397f190c3a.jpg) +Monocular 3D building reconstruction +Figure 1. Our proposed method achieves 3D building reconstruction by training samples of different annotation levels. Large quantity of samples only include building footprint annotations, whereas a small quantity of samples contain extra roof-to-footprint offset and building height annotations. + +great challenges for large-scale 3D building reconstruction. + +Inspired by the progress of supervised monocular depth estimation methods, deep neural networks have been broadly applied to monocular 3D building reconstruction studies. Most studies utilize building footprints or other types of semantic labels as prior information to facilitate building height estimation from near-nadir images [15, 24, 25, 29, 37]. Off-nadir images, by contrast, constitute a larger proportion of the remote sensing images and provide additional useful information for building height estimation, which have demonstrated significant potential in several recent studies [4, 5, 19, 32, 33]. Some studies designed geocentric pose estimation task considering the parallax effect of building roof and footprint [4, 5], aiming at estimating the height values instead of reconstruct a 3D model. Other studies leveraged the relation between different components of a building instance (e.g. roof, footprint, + +and facade) as well as the offset between roof and footprint, which has proven to be an effective solution for 3D building reconstruction and accurate extraction of building footprints [19, 32]. + +In general, existing monocular building reconstruction methods are designed for fully-supervised learning, requiring a large number of fully-annotated 3D labels for network training. However, due to the expensive annotation cost, the available datasets for 3D building reconstruction are still very insufficient, restricting existing 3D reconstruction methods to single city or single dataset scenarios. By contrast, owing to the low annotation cost and the increase of open map data, public building footprints have an extremely large coverage and quantity. Additionally, existing building datasets provide different levels of annotations, such as footprint only, footprint and pixel-wise height [4], footprint and offset vector [19, 32], etc. The large-scale 2D footprints and different levels of annotated datasets can provide new opportunities for enlarging 3D building reconstruction application scenarios and reducing the annotation cost if they are effectively utilized. + +In this work, we propose MLS-BRN, a Multi-Level Supervised Building Reconstruction Network based on monocular remote sensing images, which is a unified and flexible framework that is capable of utilizing the training samples with different annotation levels. To alleviate the demand on 3D annotations and enhance the building reconstruction performance, we design new tasks regarding the meta information of off-nadir images and two new modules, i.e., Pseudo Building Bbox Calculator and Roof-Offset guided Footprint Extractor, as well as a new training strategy based on different types of samples. Experimental results on several public and new datasets demonstrate that our method achieves competitive performance when only using a small proportion of 3D-annotated samples, and significantly improves the building segmentation and height estimation performance compared with current state-of-the-art. Our main contributions are summarized as follows: + +- We design MLS-BRN, a multi-level supervised building reconstruction network, which consists of new tasks and modules to enhance the relation between different components of a building instance and alleviate the demand on 3D annotations. +- We propose a multi-level training strategy that enables the training of MLS-BRN with different supervision levels to further improve the 3D reconstruction performance. +- We extend the monocular building reconstruction datasets to more cities. Comprehensive experiments under different settings demonstrate the potential of MLS-BRN in large-scale cross-city scenarios. + +# 2. Related work + +# 2.1. Building footprint extraction + +Building footprint extraction is an important prerequisite for monocular 3D building reconstruction. Various instance and semantic segmentation networks have been broadly applied to building extraction tasks. Many studies utilize multi-task segmentation network to improve the building segmentation performance. For instance, Yuan [35] proposed the signed distance representation for building footprint extraction, achieving better performance compared with the single-task fully-connected network. Similarly, in [24], a modified signed distance function was introduced and jointly learned with other tasks for predicting building footprint outlines and heights. To improve the geometry shapes of building extraction results, several methods directly predicted the vertices of a building polygon based on Recurrent Neural Network or Graph Neural Network [22, 36, 39], or combined the pixel-based multi-task segmentation network with a graph-based polygon refinement network using a rule-based module [20]. In addition, some recent studies converted building footprint extraction into roof segmentation and roof-to-footprint offset estimation tasks, which achieved promising performance for building footprint extraction, especially for high-rise buildings in off-nadir images [19, 32]. + +In summary, most existing methods directly extract the building footprints and perform worse for high-rise buildings in off-nadir images. Offset-based methods can effectively alleviate this problem, but the expensive offset annotation efforts and the post-processing process are still inevitable. On the contrary, our work proposes a multi-level supervised solution that is capable of leveraging different types of samples to reduce the demand for offset annotation, achieving promising footprint extraction results in an end-to-end manner. + +# 2.2. Monocular 3D building reconstruction + +Inspired by the progress of monocular depth estimation, deep neural networks have been widely used for monocular building height estimation in recent studies [8, 18, 33]. Most of these studies are designed for height estimation from near-nadir images, in which the building roof and footprint are almost overlapped. Some methods used an encoder-decoder network to regress the height values [25], or used a generative adversarial network to simulate a height map [9]. Moreover, the semantic labels have been utilized as effective priors in many existing methods considering the limited information provided from the near-nadir images for height estimation. Some studies designed a multitask network for joint footprint extraction and height estimation [8, 29, 37], while others exploit the semantic labels as prior information for height estimation [15]. In actual + +![](images/d3ca67c098577710a9ae026ded680c76d81571b9dc0d7e6ad63a70eeb02f4c79.jpg) +Figure 2. An overview of our proposed method. Taking a monocular remote sensing image as input, our MLS-BRN generates a set of building bboxes, roof-to-footprint offsets, building heights, and pixel-wise roof masks. The predicted roof masks and their corresponding offsets are further integrated to predict pixel-wise footprint masks. The predicted footprint mask and building height are used to produce the final vectorized 3D model. Two novel modules are introduced: (1) the ROFE predicts footprint masks guided by the predicted roof masks and offsets; (2) the PBC predicts off-nadir and offset angles to calculate pseudo building bboxes for buildingbbox-unknown samples. + +scenarios, off-nadir images constitute a large proportion of the remote sensing images, in which the parallax effect of roof and footprint results in more challenges for extracting footprints but provides additional information for height estimation as well. Some recent studies [4, 5] design methods to learn the geocentric pose of buildings in off-nadir images for monocular height estimation [28], while others leverage the offset between building roof and footprint and the relation between different components to reconstruct a 3D building model [19, 32]. + +In summary, the monocular building reconstruction methods in existing studies require expensive and fully-annotated 3D labels for supervised learning. Our proposed method, by contrast, is a unified and flexible framework for 3D building reconstruction with different supervision levels, which effectively reduces the demand for the large-scale 3D annotations. + +# 2.3. Monocular 3D reconstruction with fewer labels + +In monocular 3D reconstruction in the general computer vision domain, several methods have been proposed for reducing the 3D annotation demand via weakly-supervised or semi-supervised learning [3, 11, 14, 16, 26]. In Yang et al. [34], a unified framework combining two types of supervisions was proposed, i.e., a small number of camera pose annotations and a large number of unlabeled images. In Neverova et al. [27], an intermediate representation containing important topological and structural information of hand was introduced to enable the weakly-supervised training for hand pose estimation. Concurrently, Gwak et al. [10] effectually leveraged a weak supervision type, i.e., foreground mask, as a substitute for costly 3D CAD annota + +tions, which incorporates a raytrace pooling layer to enable perspective projection and backpropagation. + +In contrast to the aforementioned studies, our proposed method leverages prior knowledge about the 3D structure of a building instance and the monocular remote sensing image, including the relation between roof, footprint, height, offset angle, and off-nadir angle, enabling multi-level supervised 3D reconstruction with fewer annotation efforts. + +# 3. Methods + +# 3.1. Problem statement + +Given an off-nadir remote sensing image $I$ that includes buildings $B = \{b_{1}, b_{2}, \ldots, b_{N}\}$ , the objective of monocular 3D building reconstruction is to identify all the footprints $F = \{f_{1}, f_{2}, \ldots, f_{N}\}$ and roofs $R = \{r_{1}, r_{2}, \ldots, r_{N}\}$ corresponding to $B$ . The difficulty is that the footprints of buildings may be partially visible from an off-nadir viewing angle. Thus, previous studies, including [19] and [32], typically solve this issue by training a deep neural network with samples annotated with both $F$ and roof-to-footprint offsets $\vec{V} = \{v_{1}, v_{2}, \ldots, v_{N}\}$ . + +However, the cost of annotating remote sensing images is still high, particularly for offset annotations. Therefore, we suggest addressing this issue by training a deep model that effectively uses samples containing both $F$ and $\vec{V}$ annotations, alongside samples only annotated with $F$ . + +To facilitate training with offset-unknown samples, two tasks are included; one for predicting the off-nadir angle $\theta_{I}$ and the other for the offset angle $\varphi_{I}$ . Additionally, an instance-wise footprint segmentation task is included to predict the footprint conditioned on the predicted roof and off + +set. Finally, a task for predicting real-world height is introduced to enhance the comprehension of the correlation between footprint and roof placement. In summary, four additional tasks are added to the original three tasks in LOFT-FOA [32]: (1) off-nadir angle prediction task; (2) offset angle prediction task; (3) footprint segmentation task; (4) real-world height prediction task. + +# 3.2. Network structure + +Fig. 2 illustrates the proposed architecture of our MLS-BRN. To facilitate multi-level supervised learning, two novel modules are introduced, namely the Pseudo Building Bbox Calculator (PBC) and the Roof-Offset guided Footprint Extractor (ROFE). The PBC module provides pseudo building boxes to determine the positivity/negativity of the region proposals from the RPN module when offset-unknown (i.e. building bbox-unknown) samples are processed in the MLS-BRN. The ROFE module has two significant functions. Firstly, it provides a more straightforward method to supervise the building footprint segmentation task. Secondly, it offers an indirect method of supervising offset prediction and roof segmentation for offset-unknown samples as they pass through the MLS-BRN. Additionally, a building height prediction task has been included in order to predict the real-world building height. + +# 3.2.1 Pseudo Building Bbox Calculator (PBC) + +Samples without the ground truth for building bounding box $b$ -bbox $_{gt}$ cannot be utilized by previous models, like LOFT-FOA [32]. To address this issue, we propose a module that predicts pseudo building bounding boxes to substitute $b$ -bbox $_{gt}$ . For a provided off-nadir remote sensing image $I$ and one building $b$ contained by $I$ , we can describe the connection between the image-wise off-nadir angle $\theta_{I}$ , the offset angle $\varphi_{I}$ , the factor for scaling real-world height to pixel scale $s_{I}$ , and the building's height $h_{b}$ and offset $\vec{v}_{b}$ using the following equation: + +$$ +\begin{array}{l} \vec {v} _ {b} = | | \vec {v} _ {b} | | _ {2} \times \vec {e} \\ = \left\| \vec {v} _ {b} \right\| _ {2} \times \left[ e _ {x}, e _ {y} \right] \tag {1} \\ = h _ {b} \times s _ {I} \times \tan \theta_ {I} \times [ \cos \varphi_ {I}, \sin \varphi_ {I} ] \\ \end{array} +$$ + +where $||\vec{v}_b||_2$ is the $L2$ norm of the offset, $\vec{e}$ is the unit normal vector of $\vec{v}_b$ . The PBC module uses an off-nadir angle head to predict an image-wise off-nadir angle $\theta_{pred}$ and an offset angle head to predict an image-wise offset angle $\varphi_{pred}$ . Then, following Eq. (1), they are combined with the instance-wise building height ground truth $h_{gt}$ , and scale factor $s_{gt}$ to compute the pseudo offset $\vec{v}_{pred}$ . Finally, $f_{gt}$ is translated to get the pseudo building bbox $b$ -bbox $_{pred}$ guided by $\vec{v}_{pred}$ . $b$ -bbox $_{pred}$ will play the role of $b$ -bbox $_{gt}$ during the training of the building bbox-unknown samples. + +From the perspective of weak supervision, the PBC module extracts the image-wise angle information, i.e. the offset angle and the off-nadir angle, and uses it to supervise the instance-wise task. Note that for building height-unknown samples, the pseudo bounding boxes are calculated by directly enlarge the footprint boxes. + +# 3.2.2 Roof-Offset guided Footprint Extractor (ROFE) + +Previous works calculate the footprint mask in the inference stage by translating the inferred roof guided by the inferred offset. The ROFE module, however, predicts the footprint mask directly. It trains a convolutional network to learn the translation process, using the inferred roof mask and offset as inputs. For offset-aware (i.e. roof-aware) samples, this end-to-end training process adds more supervision on the offset head and the roof head. And for offset-unknown samples, which cannot contribute to the training of the offset head and the roof head due to lack of ground truth, ROFE provides an indirect way to supervise these two heads. + +# 3.3. Network training + +In this section, we first introduce the loss functions in our MLS-BRN. Then we introduce our three levels of training samples graded by their level of supervision and their training strategies. The total hybrid loss is presented at the end of this section. + +# 3.3.1 Loss definition + +The LOFT-FOA [32] is trained by minimising Eq. (2), where $\mathcal{L}_{rp}$ , $\mathcal{L}_{rc}$ , $\mathcal{L}_{mh}$ are the same as those in Mask R-CNN [13], i.e., the losses for the RPN, R-CNN, and mask head, respectively; $\mathcal{L}_o$ is the loss for the offset head, which is a standard smooth L1 Loss. + +$$ +\mathcal {L} _ {L F} = \mathcal {L} _ {r p} + \beta_ {1} \mathcal {L} _ {r c} + \beta_ {2} \mathcal {L} _ {m h} + \beta_ {3} \mathcal {L} _ {o} \tag {2} +$$ + +The MLS-BRN model keeps the four losses the same as LOFT-FOA [32] and introduces new losses to train the newly added modules. The footprint mask loss of the ROFE module is the same as $\mathcal{L}_{mh}$ , which is a standard cross entropy loss (Eq. (3)). + +$$ +\mathcal {L} _ {f} = \frac {1}{N} \sum_ {i = 1} ^ {N} \sum_ {c = 1} ^ {C} y _ {i, c} \times \log (p \left(y _ {i, c}\right)) \tag {3} +$$ + +The loss of the offset angle head of the PBC module is calculated according to Eq. (4), in which $\mathcal{L}_{\text{ova}}$ denotes the offset angle loss; $\vec{v}_{pred}$ denotes the predicted unit normal vector of the offset. + +$$ +\begin{array}{l} \mathcal {L} _ {o v a} = \mathcal {L} _ {a n g} + \lambda_ {1} \mathcal {L} _ {r e g} \tag {4} \\ = | | \vec {v} _ {p r e d} - \vec {v} _ {g t} | | _ {1} + \lambda_ {1} | | | | \vec {v} _ {p r e d} | | _ {2} - 1 | | _ {1} \\ \end{array} +$$ + +The nadir angle head of the PBC module is trained following Eq. (5), where $\mathcal{L}_{ona}$ is the off-nadir angle loss; $\theta_{pred}$ is the predicted tangent of the off-nadir angle. + +$$ +\mathcal {L} _ {\text {o n a}} = \left\| \tan \theta_ {\text {p r e d}} - \tan \theta_ {g t} \right\| _ {1} \tag {5} +$$ + +The height head loss of our MLS-BRN is calculated by Eq. (6), in which $\mathcal{L}_h$ denotes the height loss; $h_{pred}$ denotes the predicted building height. + +$$ +\mathcal {L} _ {h} = \left| \left| h _ {p r e d} - h _ {g t} \right| \right| _ {1} \tag {6} +$$ + +# 3.3.2 Multi-level training strategy + +In our proposed unified framework, all the training samples can be graded into three levels according to their level of supervision (Fig. 1): + +- Level 1 samples: samples with only instance-wise footprint annotation, which are denoted by $\mathcal{X}^N = \{x_1^N, x_2^N, \dots, x_{n_3}^N\}$ . $N$ means no additional supervision. +- Level 2 samples: samples with instance-wise footprint and building height annotation, which are denoted by $\mathcal{X}^H = \{x_1^H,x_2^H,\dots,x_{n_2}^H\}$ . +- Level 3 samples: samples with instance-wise footprint, offset, and building height annotation, which are denoted by $\mathcal{X}^{OH} = \{x_1^{OH}, x_2^{OH}, \dots, x_{n_1}^{OH}\}$ . + +Different levels of samples are supervised by different training strategies. As defined in Eq. (7), the loss function for $\mathcal{X}^N$ is only based on $\mathcal{L}_f$ . + +$$ +\mathcal {L} _ {\mathcal {X} ^ {N}} = \mathcal {L} _ {f} \tag {7} +$$ + +The loss function for $\mathcal{X}^H$ is defined in Eq. (8). In $\mathcal{L}_{\mathcal{X}^H}$ , the $\mathcal{L}_{rp}$ is activated since the PBC module can predict a high-quality pseudo building bbox, which is good enough to supervise the RPN module. + +$$ +\begin{array}{l} \mathcal {L} _ {\mathcal {X} ^ {H}} = \mathcal {L} _ {\mathcal {X} ^ {N}} + \alpha_ {1} \mathcal {L} _ {r p} + \alpha_ {2} \mathcal {L} _ {h} \tag {8} \\ = \mathcal {L} _ {f} + \alpha_ {1} \mathcal {L} _ {r p} + \alpha_ {2} \mathcal {L} _ {h} \\ \end{array} +$$ + +The loss function for $\mathcal{X}^{OH}$ is defined in Eq. (9). Compared with the original $\mathcal{L}_{LF}$ , $\mathcal{L}_{\mathcal{X}^{OH}}$ adds four more losses: $\mathcal{L}_f$ , $\mathcal{L}_h$ , $\mathcal{L}_{ona}$ , $\mathcal{L}_{ova}$ . The $\mathcal{L}_{ona}$ and $\mathcal{L}_{ova}$ are used for training the two angle heads of the PBC module. + +$$ +\begin{array}{l} \mathcal {L} _ {\chi^ {O H}} = \mathcal {L} _ {\chi^ {H}} + \alpha_ {3} \mathcal {L} _ {r c} + \alpha_ {4} \mathcal {L} _ {m h} \\ + \alpha_ {5} \mathcal {L} _ {o} + \alpha_ {6} \mathcal {L} _ {o n a} + \alpha_ {7} \mathcal {L} _ {o v a} \tag {9} \\ = \mathcal {L} _ {L F} + \mathcal {L} _ {f} + \alpha_ {2} \mathcal {L} _ {h} + \alpha_ {6} \mathcal {L} _ {o n a} + \alpha_ {7} \mathcal {L} _ {o v a} \\ \end{array} +$$ + +The final hybrid loss is defined as the total loss of the three levels of training samples according to Eq. (10). + +$$ +\mathcal {L} = \mathcal {L} _ {\mathcal {X} ^ {N}} + \mathcal {L} _ {\mathcal {X} ^ {H}} + \mathcal {L} _ {\mathcal {X} ^ {O H}} \tag {10} +$$ + +# 3.4. Implementation details + +As mentioned in Fig. 2, we use ResNet-50 [12] with FPN [23] pre-trained on the ImageNet as the backbone. All the models are trained with a batch size of 4 using NVIDIA 3090 GPUs. To align with LOFT-FOA [32], we train 24 epochs for all the models, with the learning rate starting from 0.01 and decaying by a factor of 0.1 at the $16^{th}$ and $22^{nd}$ epochs. The SGD algorithm with a weight decay of 0.0001 and a momentum of 0.9 is used for all experiments. LOFT-FOA [32] is used as the basic architecture of the MLS-BRN model, and all the hyperparameters that occur in both LOFT-FOA [32] and MLS-BRN are the same, except for the learning rate mentioned above. All models are built in PyTorch. + +In Eq. (4), we set $\lambda_{1} = 0.1$ to balance the two loss items. In Eq. (8), we set $\alpha_{1} = 1$ to keep the loss weight of ROFE the same as the roof mask head, and set $\alpha_{2} = 32$ since the absolute building height loss value is relatively small. In Eq. (9), we set $\alpha_{3} = \alpha_{4} = 1, \alpha_{5} = 16$ to keep them the same as LOFT-FOA [32], and set $\alpha_{6} = 1, \alpha_{7} = 8$ to balance the effects of the magnitude of these two losses. + +# 4. Experiments + +# 4.1. Datasets + +In our experiments, we employ multi-supervised datasets for training our methods: (1) BONAI [32] provides building footprint segmentation, offset, and height annotations, which contains 3,000 and 300 images for train-val and test respectively; (2) OmniCity-view3 [21] originally provides satellite images with annotations for footprint segmentation and building height. We add additional offset annotations for 17,092 and 4,929 images from train-val and test sets respectively; (3) Additionally, we release a new dataset named HK, which includes 500 and 119 satellite images specifically captured from Hong Kong for train-val and test sets, along with annotations for footprint segmentation, offset and height. + +As detailed in Sec. 3, all our training samples are graded into three levels: samples from $\mathcal{X}^N$ , $\mathcal{X}^H$ , and $\mathcal{X}^{OH}$ . To create different levels of training samples, we extract samples from the datasets mentioned above, reorganizing their annotations as necessary. We randomly choose $30\%$ of the samples from the BONAI dataset [32] as a smaller $\mathcal{X}^{OH}$ dataset, which we call $BN_{30}$ . We randomly drop the offset annotations of $70\%$ of the samples in the BONAI dataset [32], regard the entire BONAI [32] dataset as a $\mathcal{X}^{OH} + \mathcal{X}^H$ dataset, and name it $BN_{30/70}$ . Similarly, the original BONAI dataset [32] is regarded as a large $\mathcal{X}^{OH}$ and is named $BN_{100}$ . We use $OC$ to designate the OmniCity-view3 dataset [21]. Naturally, the abbreviations $OC_{30}$ , $OC_{30/70}$ , and $OC_{100}$ have the similar meaning with $BN_{30}$ , $BN_{30/70}$ , and $BN_{100}$ respectively. Moreover, we use $BH$ + +to refer to the combination of BONAI [32] and HK. It is important to note that in $BH_{30/70}$ , $30\%$ of BONAI's [32] samples are $\mathcal{X}^{OH}$ type while the remaining $70\%$ are $\mathcal{X}^H$ type. Additionally, $30\%$ of HK's samples belong to $\mathcal{X}^{OH}$ type and the remaining $70\%$ belong to $\mathcal{X}^N$ type. + +# 4.2. Performance comparison + +In this section, we evaluate our method's performance in footprint segmentation, offset prediction, and height prediction against several competitive methods for the single-level supervised learning scenario. In a Multi-level supervised learning scenario, we mainly compare our method with LOFT-FOA [32]. Additionally, we present our method's offset and off-nadir angles prediction performance. More results will be provided in the supplementary materials. + +Single-level supervised learning. The performance of footprint segmentation and offset prediction for different methods trained on $BN_{100}$ and $OC_{100}$ are listed in Tab. 1 and Tab. 2, respectively. Additionally, Fig. 3 provides a qualitative comparison of footprint segmentation results on the BONAI [32] test set. Note that all the experimental results in this section are obtained using $\mathcal{X}^{OH}$ samples, and the results obtained using $\mathcal{X}^H$ and $\mathcal{X}^N$ samples will be analysed in the following paragraph. For the footprint segmentation task, experimental results tested on $BN_{100}$ demonstrate that our method improves the F1-score by $5.42\% - 8.30\%$ compared with the instance segmentation methods that directly extract the building footprints. Furthermore, our method enhances the F1-score by $2.05\% - 2.76\%$ relative to MTBR-Net [19] and LOFT-FOA [32], which are specifically designed for extracting off-nadir building footprints based on predicted roof and offset, tested on $BN_{100}$ . Regarding the offset prediction task, our experimental findings indicate that our approach betters the EPE by 0.18 - 0.93 in comparison to MTBR-Net [19] and LOFT-FOA [32] tested on $BN_{100}$ . The results show that the direct supervision of the footprint segmentation, the constraint on the building height, and the encouragement of the angular feature extraction can help to achieve better performance in the footprint segmentation and offset prediction tasks in the single-level supervised learning scenario. + +
methodF1PrecisionRecallEPE
PANet [17]58.0659.2656.91-
HRNetv2 [30]60.8161.2060.42-
M R-CNN [13]58.1259.2657.03-
CM R-CNN [1]60.9467.0955.83-
MTBR-Net [19]63.6064.3462.875.69
LOFT-FOA [32]64.3163.3765.294.94
Ours66.3665.9066.834.76
+ +Table 1. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\%) and offset prediction results in terms of EPE trained on $BN_{100}$ . + +
methodF1PrecisionRecallEPE
M R-CNN [13]69.7569.7469.76-
LOFT-FOA [32]70.4668.7772.236.08
Ours72.2569.5775.145.38
+ +![](images/4256aa5620a724c611023dfd0b2d6cf2b2e317d7b1b0eb38edda2feadea7f029.jpg) +Figure 3. The results of the baselines and our method trained on $BN_{100}$ and tested on the BONAI test set in terms of the footprint segmentation performance. The yellow, cyan, and red polygons denote the TP, FP, and FN. + +Table 2. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\%) and offset prediction results in terms of EPE trained on $OC_{100}$ . + +
methoddatasetsampleF1-scoreEPE
LOFT-FOA [32]BN30XOH61.355.70
OursBN30/70XOH+XH65.495.39
LOFT-FOA [32]BN100XOH64.314.94
OursBN100XOH66.364.76
LOFT-FOA [32]OC30XOH67.096.08
OursOC30/70XOH+XH70.535.92
LOFT-FOA [32]OC100XOH70.465.38
OursOC100XOH72.255.38
LOFT-FOA [32]BH30XOH54.965.78
OursBH30/70XOH+XH+XN58.575.60
LOFT-FOA [32]BH100XOH60.854.74
OursBH100XOH60.924.69
+ +Table 3. Building footprint segmentation results of different methods in terms of F1-score (\%) and offset prediction results in terms of EPE trained on different datasets. + +Multi-level supervised learning. Tab. 3 displays the footprint segmentation and offset prediction performance of LOFT-FOA [32] and our method when trained and tested on multi-level supervision datasets. Our approach's experiment outcomes, trained on $BN_{30/70}$ , $OC_{30/70}$ and $BH_{30/70}$ , demonstrate a $4.14\%$ , $3.44\%$ and $3.61\%$ improvement in F1-score compared to LOFT-FOA [32] trained on $BN_{30}$ , $OC_{30}$ and $BH_{30}$ . Additionally, our method's experimental results, trained on samples from $BN_{30/70}$ , $OC_{30/70}$ and $BH_{30/70}$ exhibit similar performance to LOFT-FOA [32], which is trained on samples from $BN_{100}$ , $OC_{100}$ and $BH_{100}$ . These findings demonstrate the effectiveness of MLS-BRN in combining samples from $\mathcal{X}^{OH}$ , $\mathcal{X}^H$ and $\mathcal{X}^N$ levels to address the building reconstruction task. + +Building height and angles prediction. Tab. 4 displays the results of building height prediction performance. The experimental findings indicate that our method enhances the height MAE by 0.22 - 4.33 and the height RMSE by 0.51 - 7.60 in comparison to SARPN [2], DORN [7], and LOFT-FOA+H. It's worth noting that SARPN [2], DORN [7] predicts pixel-wise building height, and MSL-BRN predicts instance-wise building height. As far as we know, MSL-BRN is the first-ever method to predict instance-wise real-world building height. Thus, we add a building height head directly to LOFT-FOA [32] (i.e. LOFT-FOA+H) and compare its prediction results with our own method. Fig. 4 presents the qualitative building height prediction results from our method and LOFT-FOA+H. Regarding the angle prediction tasks, when trained on $BN_{100}$ , the PBC module results in an MAE of 9.92 for offset angle prediction and an MAE of 1.22 for off-nadir angle prediction. The performance increase demonstrates the efficacy of the PBC, ROFE, and the building height prediction module in a single-level supervised learning scenario. + +
methodheight MAEheight RMSE
SARPN [2]15.2328.69
DORN [7]13.4027.03
LOFT-FOA+H11.1221.60
Ours10.9021.09
+ +Table 4. Building height prediction results of different methods in terms of MAE and MSE trained on $OC_{100}$ and tested on the OmniCity-view3 test set. + +
methodF1-scorePrecisionRecallEPE
baseline61.3561.8461.655.70
+PBC62.3262.2862.355.53
+ROFE62.8763.8962.155.63
+PBC+ROFE65.4066.7464.125.49
+ +Table 5. Footprint segmentation results of different modules in terms of F1-score, precision, recall (\%) and offset prediction results in terms of EPE. + +![](images/2b98b8cbe3821137140de9b550528a797c021425e10653772dbd86d93fbb2c32.jpg) +Figure 4. The visualization results of building height prediction from our method and LOFT-FOA+H on the OmniCity-view3 test set. + +# 4.3. Ablation study + +In this section, we examine the impact of the principal new components of our method: (1) the PBC module; (2) the ROFE module; and (3) the building height head. Additionally, we will analyze the outcome of the data ablation experiment in the multi-level supervised learning setting. + +Module ablation. The outcomes acquired by implementing the aforementioned modules successively on $BN_{30/70}$ are detailed in Tab. 5. The table provides information on F1-score for footprint segmentation and EPE for offset prediction. LOFT-FOA [32] is trained on $BN_{30}$ and serves as the baseline. The second row (+PBC) illustrates the results obtained by applying the PBC module to LOFT-FOA [32]. The results indicate that incorporating the two-angle prediction tasks enhances the F1-score of the footprint extraction by $0.97\%$ . It should be noted that the added offset-unknown $70\%$ samples in $BN_{30/70}$ , which lacks angle ground truth, does not contribute to PBC's training. The third row (+ROFE) displays the outcomes achieved by applying the ROFE module to LOFT-FOA [32]. Results demonstrate that, compared with the baseline, prediction of the footprint segmentation guided by predicted offset and roof, coupled with additional $70\%$ offset-unknown samples from $BN_{30/70}$ , leads to a $1.52\%$ improvement in the F1-score. The fourth row (+PBC+ROFE) indicates that the simultaneous inclusion of the PBC and ROFE modules can improve the F1-score of the footprint extraction by $4.05\%$ . The aforementioned results show that PBC and ROFE modules can help to enhance the accuracy of footprint segmentation and offset prediction. + +Data ablation. The outcomes of our approach trained on various dataset combinations concerning F1-score for footprint segmentation, and EPE for offset prediction are + +![](images/d09418cf9fe9a336ba00b7358533bf0ad5d0e0bc36e680a03651d73b91b781b0.jpg) + +![](images/bbdf1f274d2dae90c301224ae4326d94fef0b200c52dcf87e089cc7e0019a5f3.jpg) + +![](images/b77dffcb8e12fbe8d10f207bd4c4afe040fd186c7c2cc12ed37a307ed8cdc271.jpg) + +![](images/628fe83b128a2348c00d3d6176c870c4830402d97500f2e9f195303bf3641fb7.jpg) + +![](images/7a01ae5ddb52c7b543be1e6ad8cb89c54a8844c178546b5b9cec74b89e489023.jpg) +Shanghai + +![](images/0daf4c90c98acb9c43668a1321fcdc6a89f44ad8e224348fac6b356a29c04a65.jpg) +Xi'an + +![](images/f4981fd0461ca0f0987164acf84c46b7c4488af7a04d5697eda8e8bae4450307.jpg) +Hong Kong +Figure 5. 3D reconstruction results of Shanghai, Xi'an, Hong Kong, and New York obtained using our method. The remote sensing images for Shanghai and Xi'an are chosen from the BONAI test set, whereas the remote sensing image for New York is chosen from the OmniCity-view3 test set. + +![](images/dbb35c1ef77098f9dcb0791f28b97b38d1cd99ea818f79145c1e493594bb5c4f.jpg) +New York + +shown in Tab. 6. The first line $(\mathcal{X}^{OH})$ displays the results of training LOFT-FOA [32] on $30\%$ of OmniCity-view3 [21] $\mathcal{X}^{OH}$ samples $(OC_{30})$ . The second row $(\mathcal{X}^{OH} + \mathcal{X}^{H})$ shows the results of our method trained on a mix of $30\%$ of OmniCity-view3 [21] $\mathcal{X}^{OH}$ samples $(OC_{30})$ and $30\%$ of the OmniCity-view3 $\mathcal{X}^{H}$ samples. The results demonstrate a $3.28\%$ improvement in F1-score for footprint extraction compared to LOFT-FOA [32] trained solely on $OC_{30}$ . The third row $(\mathcal{X}^{OH} + \mathcal{X}^{H} + \mathcal{X}^{N})$ presents the outcomes of our methodology, trained on a mix of $30\%$ of OmniCity-view3 [21] $\mathcal{X}^{OH}$ samples, $30\%$ of OmniCity-view3 [21] $\mathcal{X}^{H}$ samples, and the rest $40\%$ of OmniCity-view3 [21] $\mathcal{X}^{N}$ samples. The results demonstrate a $0.44\%$ increase in F1-score compared to our method trained on $\mathcal{X}^{OH} + \mathcal{X}^{H}$ , indicating the effectiveness of including $\mathcal{X}^{N}$ samples. The reason for training LOFT-FOA [32] instead of our method on $OC_{30}$ (first row) is to evaluate the gain in a scenario where $\mathcal{X}^{H}$ and $\mathcal{X}^{N}$ samples are available by using our method. + +
dataF1PrecisionRecallEPE
XOH67.0963.2371.476.08
XOH+XH70.3765.3576.245.99
XOH+XH+XN70.8166.1576.185.84
+ +Table 6. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\%) and offset prediction results in terms of EPE trained on different dataset combinations. + +# 4.4. 3D reconstruction results of different cities + +Fig. 5 shows the 3D reconstruction results of four cities (i.e. Shanghai, Xi'an, Hong Kong, and New York) obtained from + +our method. The results demonstrate the effectiveness of our method on 3D building reconstruction across different cities. Note that we use the method in [38] to regularize the predicted building footprint masks. + +# 5. Conclusion + +In this paper, we have presented a new method for multi-level supervised building reconstruction from monocular remote sensing images, which is capable of reconstructing the accurate 3D building models using samples of different annotation levels. Qualitative and quantitative evaluations confirm that our method achieves competitive performance and significantly enhances the 3D building reconstruction capability in comparison to the current state-of-the-art across diverse experimental settings. The effect of the Pseudo Building Bbox Calculator and the Roof-Offset guided Footprint Extractor, as well as the annotation levels of the samples were also analyzed in the ablation study. Furthermore, we expanded the monocular building reconstruction datasets to encompass additional cities. We believe that our approach offers efficient and cost-effective solutions for 3D building reconstruction in complex real-world scenes. In our future work, we would like to investigate more effective strategies to improve the 3D building reconstruction performance whilst exploring more adaptable and practical techniques for large-scale city modeling. + +Acknowledgements. This project was funded in part by National Natural Science Foundation of China (Grant No. 42201358 and No. 62325111) and Shanghai Artificial Intelligence Laboratory. + +# References + +[1] Kai Chen, Jiangmiao Pang, Jiaqi Wang, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jianping Shi, Wanli Ouyang, et al. Hybrid task cascade for instance segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4974-4983, 2019. 6 +[2] Xiaotian Chen, Xuejin Chen, and Zheng-Jun Zha. Structure-aware residual pyramid network for monocular depth estimation. In Proceedings of the 28th International Joint Conference on Artificial Intelligence, pages 694-700, 2019. 7 +[3] Yujin Chen, Zhigang Tu, Liuhao Ge, Dejun Zhang, Ruizhi Chen, and Junsong Yuan. So-handnet: Self-organizing network for 3d hand pose estimation with semi-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6961–6970, 2019. 3 +[4] Gordon Christie, Rodrigo Rene Rai Munoz Abujder, Kevin Foster, Shea Hagstrom, Gregory D Hager, and Myron Z Brown. Learning geocentric object pose in oblique monocular images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14512-14520, 2020. 1, 2, 3 +[5] Gordon Christie, Kevin Foster, Shea Hagstrom, Gregory D Hager, and Myron Z Brown. Single view geocentric pose in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1162-1171, 2021. 1, 3 +[6] Liuyun Duan and Florent Lafarge. Towards large-scale city reconstruction from satellites. In European Conference on Computer Vision (ECCV), 2016. 1 +[7] Huan Fu, Mingming Gong, Chaohui Wang, Kayhan Bat-manghelich, and Dacheng Tao. Deep ordinal regression network for monocular depth estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2002-2011, 2018. 7 +[8] Zhi Gao, Wenbo Sun, Yao Lu, Yichen Zhang, Weiwei Song, Yongjun Zhang, and Ruifang Zhai. Joint learning of semantic segmentation and height estimation for remote sensing image leveraging contrastive learning. IEEE Transactions on Geoscience and Remote Sensing, 2023. 2 +[9] Pedram Ghamisi and Naoto Yokoya. Img2dsm: Height simulation from single imagery using conditional generative adversarial net. IEEE Geoence Remote Sensing Letters, pages 1-5, 2018. 2 +[10] JunYoung Gwak, Christopher B Choy, Manmohan Chandraker, Animesh Garg, and Silvio Savarese. Weakly supervised 3d reconstruction with adversarial constraint. In 2017 International Conference on 3D Vision (3DV), pages 263-272. IEEE, 2017. 3 +[11] Junwei Han, Yang Yang, Dingwen Zhang, Dong Huang, Dong Xu, and Fernando De La Torre. Weakly-supervised learning of category-specific 3d object shapes. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(4): 1423-1437, 2021. 3 +[12] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5 + +[13] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision (CVPR), pages 2961-2969, 2017. 4, 6 +[14] Rongrong Ji, Ke Li, Yan Wang, Xiaoshuai Sun, Feng Guo, Xiaowei Guo, Yongjian Wu, Feiyue Huang, and Jiebo Luo. Semi-supervised adversarial monocular depth estimation. IEEE transactions on pattern analysis and machine intelligence, 42(10):2410-2422, 2019. 3 +[15] Saket Kunwar. U-net ensemble for semantic and height estimation using coarse-map initialization. In IGARSS 2019-2019 IEEE International Geoscience and Remote Sensing Symposium, pages 4959-4962. IEEE, 2019. 1, 2 +[16] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. Robust model-based face reconstruction through weakly-supervised outlier segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 372–381, 2023. 3 +[17] Muxingzi Li, Florent Lafarge, and Renaud Marlet. Approximating shapes in images with low-complexity polygons. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6 +[18] Qingyu Li, Lichao Mou, Yuansheng Hua, Yilei Shi, Sining Chen, Yao Sun, and Xiao Xiang Zhu. 3dcentripetalnet: Building height retrieval from monocular remote sensing imagery. International Journal of Applied Earth Observation and Geoinformation, 120:103311, 2023. 2 +[19] Weijia Li, Lingxuan Meng, Jinwang Wang, Conghui He, Gui-Song Xia, and Dahua Lin. 3d building reconstruction from monocular remote sensing images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12548-12557, 2021. 1, 2, 3, 6 +[20] Weijia Li, Wenqian Zhao, Huaping Zhong, Conghui He, and Dahua Lin. Joint semantic-geometric learning for polygonal building segmentation. In AAAI, 2021. 2 +[21] Weijia Li, Yawen Lai, Linning Xu, Yuanbo Xiangli, Jinhua Yu, Conghui He, Gui-Song Xia, and Dahua Lin. Omnicity: Omnipotent city understanding with multi-level and multiview images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17397-17407, 2023. 5, 8 +[22] Zuoyue Li, Jan Dirk Wegner, and Aurélien Lucchi. Topological map extraction from overhead images. In Proceedings of the IEEE International Conference on Computer Vision (CVPR), pages 1715-1724, 2019. 2 +[23] Tsung-Yi Lin, Piotr Dólar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 2117-2125, 2017. 5 +[24] Jisan Mahmud, True Price, Akash Bapat, and Jan Michael Frahm. Boundary-aware 3d building reconstruction from a single overhead image. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 1, 2 +[25] Yongqiang Mao, Kaiqiang Chen, Liangjin Zhao, Wei Chen, Deke Tang, Wenjie Liu, Zhirui Wang, Wenhui Diao, Xian + +Sun, and Kun Fu. Elevation estimation-driven building 3d reconstruction from single-view remote sensing imagery. IEEE Transactions on Geoscience and Remote Sensing, 2023. 1, 2 +[26] Rahul Mitra, Nitesh B Gundavarapu, Abhishek Sharma, and Arjun Jain. Multiview-consistent semi-supervised learning for 3d human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6907-6916, 2020. 3 +[27] Natalia Neverova, Christian Wolf, Florian Nebout, and Graham W Taylor. Hand pose estimation through semi-supervised and weakly-supervised learning. Computer Vision and Image Understanding, 164:56-67, 2017. 3 +[28] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 3 +[29] Shivangi Srivastava, Michele Volpi, and Devis Tuia. Joint height estimation and semantic labeling of monocular aerial images with cnns. In Igarss IEEE International Geoscience Remote Sensing Symposium, 2017. 1, 2 +[30] Ke Sun, Yang Zhao, Borui Jiang, Tianheng Cheng, Bin Xiao, Dong Liu, Yadong Mu, Xinggang Wang, Wenyu Liu, and Jingdong Wang. High-resolution representations for labeling pixels and regions. arXiv preprint arXiv:1904.04514, 2019. 6 +[31] Vivek Verma, Rakesh Kumar, and Stephen Hsu. 3d building detection and modeling from aerial lidar data. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2006. 1 +[32] Jinwang Wang, Lingxuan Meng, Weijia Li, Wen Yang, Lei Yu, and Gui-Song Xia. Learning to extract building footprints from off-nadir aerial images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(1):1294-1301, 2022. 1, 2, 3, 4, 5, 6, 7, 8 +[33] Zhitong Xiong, Wei Huang, Jingtao Hu, and Xiao Xiang Zhu. The benchmark: Transferable representation learning for monocular height estimation. IEEE Transactions on Geoscience and Remote Sensing, 2023. 1, 2 +[34] Guandao Yang, Yin Cui, Serge Belongie, and Bharath Hariharan. Learning single-view 3d reconstruction with limited pose supervision. In Proceedings of the European Conference on Computer Vision (ECCV), pages 86-101, 2018. 3 +[35] Jiangye Yuan. Learning building extraction in aerial scenes with convolutional networks. IEEE transactions on pattern analysis and machine intelligence, 40(11):2793-2798, 2017. 2 +[36] Wufan Zhao, Claudio Persello, and Alfred Stein. Building outline delineation: From aerial images to polygons with an improved end-to-end learning framework. ISPRS journal of photogrammetry and remote sensing, 175:119-131, 2021. 2 +[37] Zhuo Zheng, Yanfei Zhong, and Junjue Wang. Pop-net: Encoder-dual decoder for semantic segmentation and single-view height estimation. In IGARSS 2019-2019 IEEE International Geoscience and Remote Sensing Symposium, pages 4963-4966. IEEE, 2019. 1, 2 + +[38] Stefano Zorzi, Ksenia Bittner, and Friedrich Fraundorfer. Machine-learned regularization and polygonization of building segmentation masks. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 3098–3105. IEEE, 2021. 8 +[39] Stefano Zorzi, Shabab Bazrafkan, Stefan Habenschuss, and Friedrich Fraundorfer. *Polyworld: Polygonal building extraction with graph neural networks in satellite images*. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1848-1857, 2022. 2 \ No newline at end of file diff --git a/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/images.zip b/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..e3d39d9821e19bba4bca48e8c3c64c5301e9e236 --- /dev/null +++ b/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0162535b338c3aeb31b4e0ccc16a6a3b8320579b33a874d9e9f5aac568ce24b +size 624987 diff --git a/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/layout.json b/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d9ec84c64b313454c616fe7badfc9fcad997855a --- /dev/null +++ b/2024/3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions/layout.json @@ -0,0 +1,10123 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 83, + 102, + 510, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 102, + 510, + 140 + ], + "spans": [ + { + "bbox": [ + 83, + 102, + 510, + 140 + ], + "type": "text", + "content": "3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "spans": [ + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "text", + "content": "Weijia Li" + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "text", + "content": ", Haote Yang" + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "text", + "content": ", Zhenghao Hu" + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "text", + "content": ", Juepeng Zheng" + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "text", + "content": ", Gui-Song Xia" + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "text", + "content": ", Conghui He" + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "inline_equation", + "content": "^{2,4\\dagger}" + }, + { + "bbox": [ + 70, + 160, + 523, + 190 + ], + "type": "text", + "content": ", Sun Yat-Sen University, Shanghai AI Laboratory, Wuhan University, SenseTime Research" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 91, + 191, + 495, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 191, + 495, + 217 + ], + "spans": [ + { + "bbox": [ + 91, + 191, + 495, + 217 + ], + "type": "text", + "content": "{liweij29, zhengjp8}@mail.sysu.edu.cn, {yanghaote, heconghui}@pjlab.org.cn, huzhh9@mail2.sysu.edu.cn, guisong.xia@whu.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "spans": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 270, + 290, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 270, + 290, + 534 + ], + "spans": [ + { + "bbox": [ + 47, + 270, + 290, + 534 + ], + "type": "text", + "content": "3D building reconstruction from monocular remote sensing images is an important and challenging research problem that has received increasing attention in recent years, owing to its low cost of data acquisition and availability for large-scale applications. However, existing methods rely on expensive 3D-annotated samples for fully-supervised training, restricting their application to large-scale cross-city scenarios. In this work, we propose MLS-BRN, a multi-level supervised building reconstruction network that can flexibly utilize training samples with different annotation levels to achieve better reconstruction results in an end-to-end manner. To alleviate the demand on full 3D supervision, we design two new modules, Pseudo Building Bbox Calculator and Roof-Offset guided Footprint Extractor, as well as new tasks and training strategies for different types of samples. Experimental results on several public and new datasets demonstrate that our proposed MLS-BRN achieves competitive performance using much fewer 3D-annotated samples, and significantly improves the footprint extraction and 3D reconstruction performance compared with current state-of-the-art. The code and datasets of this work will be released at https://github.com/opendatalabMLS-BRN.git." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 557, + 128, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 557, + 128, + 569 + ], + "spans": [ + { + "bbox": [ + 47, + 557, + 128, + 569 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 577, + 287, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 577, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 46, + 577, + 287, + 685 + ], + "type": "text", + "content": "3D building reconstruction is a fundamental task for large-scale city modeling and has received increasing attention in recent studies. Among these studies, monocular 3D building reconstruction has become a promising and economic solution for large-scale real-world applications, owing to its lower data acquisition cost and larger data coverage compared to multi-view stereo imagery and LiDAR data [6, 31]. Meanwhile, the limited information of monocular images as well as the diversity of building structures also result in" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 259, + 403, + 339 + ], + "blocks": [ + { + "bbox": [ + 350, + 247, + 503, + 258 + ], + "lines": [ + { + "bbox": [ + 350, + 247, + 503, + 258 + ], + "spans": [ + { + "bbox": [ + 350, + 247, + 503, + 258 + ], + "type": "text", + "content": "Training samples of different annotation levels" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 259, + 403, + 339 + ], + "lines": [ + { + "bbox": [ + 310, + 259, + 403, + 339 + ], + "spans": [ + { + "bbox": [ + 310, + 259, + 403, + 339 + ], + "type": "image", + "image_path": "9f5db6e39225d5b826e5067100e20cdd9d49b0d7ed32878666e38393ca52c82b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 408, + 261, + 470, + 339 + ], + "blocks": [ + { + "bbox": [ + 408, + 261, + 470, + 339 + ], + "lines": [ + { + "bbox": [ + 408, + 261, + 470, + 339 + ], + "spans": [ + { + "bbox": [ + 408, + 261, + 470, + 339 + ], + "type": "image", + "image_path": "0260dd48f10a2defdbcaac4633276157e283388fa4c2ee3ee3a66550bd18a6b9.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 476, + 261, + 538, + 339 + ], + "blocks": [ + { + "bbox": [ + 476, + 261, + 538, + 339 + ], + "lines": [ + { + "bbox": [ + 476, + 261, + 538, + 339 + ], + "spans": [ + { + "bbox": [ + 476, + 261, + 538, + 339 + ], + "type": "image", + "image_path": "07979d40fbfcae765e8fd4a67ecae21039709debc7748947828dab30f41bd727.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 310, + 358, + 544, + 412 + ], + "blocks": [ + { + "bbox": [ + 359, + 346, + 487, + 357 + ], + "lines": [ + { + "bbox": [ + 359, + 346, + 487, + 357 + ], + "spans": [ + { + "bbox": [ + 359, + 346, + 487, + 357 + ], + "type": "text", + "content": "Monocular 3D building reconstruction" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 358, + 544, + 412 + ], + "lines": [ + { + "bbox": [ + 310, + 358, + 544, + 412 + ], + "spans": [ + { + "bbox": [ + 310, + 358, + 544, + 412 + ], + "type": "image", + "image_path": "aace82f59c79bbf32be3bc4991239091e7407c18a3b94d51158ee9397f190c3a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 422, + 547, + 478 + ], + "lines": [ + { + "bbox": [ + 305, + 422, + 547, + 478 + ], + "spans": [ + { + "bbox": [ + 305, + 422, + 547, + 478 + ], + "type": "text", + "content": "Figure 1. Our proposed method achieves 3D building reconstruction by training samples of different annotation levels. Large quantity of samples only include building footprint annotations, whereas a small quantity of samples contain extra roof-to-footprint offset and building height annotations." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 506, + 545, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 506, + 545, + 519 + ], + "spans": [ + { + "bbox": [ + 306, + 506, + 545, + 519 + ], + "type": "text", + "content": "great challenges for large-scale 3D building reconstruction." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 521, + 546, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 546, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 546, + 715 + ], + "type": "text", + "content": "Inspired by the progress of supervised monocular depth estimation methods, deep neural networks have been broadly applied to monocular 3D building reconstruction studies. Most studies utilize building footprints or other types of semantic labels as prior information to facilitate building height estimation from near-nadir images [15, 24, 25, 29, 37]. Off-nadir images, by contrast, constitute a larger proportion of the remote sensing images and provide additional useful information for building height estimation, which have demonstrated significant potential in several recent studies [4, 5, 19, 32, 33]. Some studies designed geocentric pose estimation task considering the parallax effect of building roof and footprint [4, 5], aiming at estimating the height values instead of reconstruct a 3D model. Other studies leveraged the relation between different components of a building instance (e.g. roof, footprint," + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 693, + 215, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 693, + 215, + 703 + ], + "spans": [ + { + "bbox": [ + 58, + 693, + 215, + 703 + ], + "type": "text", + "content": "*These authors contributed equally to this work." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 59, + 703, + 136, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 703, + 136, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 703, + 136, + 712 + ], + "type": "text", + "content": "† Corresponding author." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "27728" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": "and facade) as well as the offset between roof and footprint, which has proven to be an effective solution for 3D building reconstruction and accurate extraction of building footprints [19, 32]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 130, + 288, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 130, + 288, + 334 + ], + "spans": [ + { + "bbox": [ + 46, + 130, + 288, + 334 + ], + "type": "text", + "content": "In general, existing monocular building reconstruction methods are designed for fully-supervised learning, requiring a large number of fully-annotated 3D labels for network training. However, due to the expensive annotation cost, the available datasets for 3D building reconstruction are still very insufficient, restricting existing 3D reconstruction methods to single city or single dataset scenarios. By contrast, owing to the low annotation cost and the increase of open map data, public building footprints have an extremely large coverage and quantity. Additionally, existing building datasets provide different levels of annotations, such as footprint only, footprint and pixel-wise height [4], footprint and offset vector [19, 32], etc. The large-scale 2D footprints and different levels of annotated datasets can provide new opportunities for enlarging 3D building reconstruction application scenarios and reducing the annotation cost if they are effectively utilized." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 344, + 287, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 344, + 287, + 547 + ], + "spans": [ + { + "bbox": [ + 46, + 344, + 287, + 547 + ], + "type": "text", + "content": "In this work, we propose MLS-BRN, a Multi-Level Supervised Building Reconstruction Network based on monocular remote sensing images, which is a unified and flexible framework that is capable of utilizing the training samples with different annotation levels. To alleviate the demand on 3D annotations and enhance the building reconstruction performance, we design new tasks regarding the meta information of off-nadir images and two new modules, i.e., Pseudo Building Bbox Calculator and Roof-Offset guided Footprint Extractor, as well as a new training strategy based on different types of samples. Experimental results on several public and new datasets demonstrate that our method achieves competitive performance when only using a small proportion of 3D-annotated samples, and significantly improves the building segmentation and height estimation performance compared with current state-of-the-art. Our main contributions are summarized as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 570, + 286, + 712 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 47, + 570, + 286, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 570, + 286, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 570, + 286, + 628 + ], + "type": "text", + "content": "- We design MLS-BRN, a multi-level supervised building reconstruction network, which consists of new tasks and modules to enhance the relation between different components of a building instance and alleviate the demand on 3D annotations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 630, + 286, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 630, + 286, + 665 + ], + "spans": [ + { + "bbox": [ + 47, + 630, + 286, + 665 + ], + "type": "text", + "content": "- We propose a multi-level training strategy that enables the training of MLS-BRN with different supervision levels to further improve the 3D reconstruction performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 665, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 665, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 665, + 286, + 712 + ], + "type": "text", + "content": "- We extend the monocular building reconstruction datasets to more cities. Comprehensive experiments under different settings demonstrate the potential of MLS-BRN in large-scale cross-city scenarios." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 72, + 389, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 389, + 83 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 389, + 83 + ], + "type": "text", + "content": "2. Related work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 91, + 463, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 91, + 463, + 104 + ], + "spans": [ + { + "bbox": [ + 306, + 91, + 463, + 104 + ], + "type": "text", + "content": "2.1. Building footprint extraction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 110, + 545, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 110, + 545, + 384 + ], + "spans": [ + { + "bbox": [ + 304, + 110, + 545, + 384 + ], + "type": "text", + "content": "Building footprint extraction is an important prerequisite for monocular 3D building reconstruction. Various instance and semantic segmentation networks have been broadly applied to building extraction tasks. Many studies utilize multi-task segmentation network to improve the building segmentation performance. For instance, Yuan [35] proposed the signed distance representation for building footprint extraction, achieving better performance compared with the single-task fully-connected network. Similarly, in [24], a modified signed distance function was introduced and jointly learned with other tasks for predicting building footprint outlines and heights. To improve the geometry shapes of building extraction results, several methods directly predicted the vertices of a building polygon based on Recurrent Neural Network or Graph Neural Network [22, 36, 39], or combined the pixel-based multi-task segmentation network with a graph-based polygon refinement network using a rule-based module [20]. In addition, some recent studies converted building footprint extraction into roof segmentation and roof-to-footprint offset estimation tasks, which achieved promising performance for building footprint extraction, especially for high-rise buildings in off-nadir images [19, 32]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 386, + 545, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 386, + 545, + 504 + ], + "spans": [ + { + "bbox": [ + 304, + 386, + 545, + 504 + ], + "type": "text", + "content": "In summary, most existing methods directly extract the building footprints and perform worse for high-rise buildings in off-nadir images. Offset-based methods can effectively alleviate this problem, but the expensive offset annotation efforts and the post-processing process are still inevitable. On the contrary, our work proposes a multi-level supervised solution that is capable of leveraging different types of samples to reduce the demand for offset annotation, achieving promising footprint extraction results in an end-to-end manner." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 515, + 508, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 515, + 508, + 528 + ], + "spans": [ + { + "bbox": [ + 306, + 515, + 508, + 528 + ], + "type": "text", + "content": "2.2. Monocular 3D building reconstruction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 534, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 545, + 713 + ], + "type": "text", + "content": "Inspired by the progress of monocular depth estimation, deep neural networks have been widely used for monocular building height estimation in recent studies [8, 18, 33]. Most of these studies are designed for height estimation from near-nadir images, in which the building roof and footprint are almost overlapped. Some methods used an encoder-decoder network to regress the height values [25], or used a generative adversarial network to simulate a height map [9]. Moreover, the semantic labels have been utilized as effective priors in many existing methods considering the limited information provided from the near-nadir images for height estimation. Some studies designed a multitask network for joint footprint extraction and height estimation [8, 29, 37], while others exploit the semantic labels as prior information for height estimation [15]. In actual" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27729" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 71, + 531, + 240 + ], + "blocks": [ + { + "bbox": [ + 63, + 71, + 531, + 240 + ], + "lines": [ + { + "bbox": [ + 63, + 71, + 531, + 240 + ], + "spans": [ + { + "bbox": [ + 63, + 71, + 531, + 240 + ], + "type": "image", + "image_path": "d3ca67c098577710a9ae026ded680c76d81571b9dc0d7e6ad63a70eeb02f4c79.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 248, + 548, + 304 + ], + "lines": [ + { + "bbox": [ + 46, + 248, + 548, + 304 + ], + "spans": [ + { + "bbox": [ + 46, + 248, + 548, + 304 + ], + "type": "text", + "content": "Figure 2. An overview of our proposed method. Taking a monocular remote sensing image as input, our MLS-BRN generates a set of building bboxes, roof-to-footprint offsets, building heights, and pixel-wise roof masks. The predicted roof masks and their corresponding offsets are further integrated to predict pixel-wise footprint masks. The predicted footprint mask and building height are used to produce the final vectorized 3D model. Two novel modules are introduced: (1) the ROFE predicts footprint masks guided by the predicted roof masks and offsets; (2) the PBC predicts off-nadir and offset angles to calculate pseudo building bboxes for buildingbbox-unknown samples." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 324, + 288, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 324, + 288, + 443 + ], + "spans": [ + { + "bbox": [ + 46, + 324, + 288, + 443 + ], + "type": "text", + "content": "scenarios, off-nadir images constitute a large proportion of the remote sensing images, in which the parallax effect of roof and footprint results in more challenges for extracting footprints but provides additional information for height estimation as well. Some recent studies [4, 5] design methods to learn the geocentric pose of buildings in off-nadir images for monocular height estimation [28], while others leverage the offset between building roof and footprint and the relation between different components to reconstruct a 3D building model [19, 32]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "spans": [ + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "type": "text", + "content": "In summary, the monocular building reconstruction methods in existing studies require expensive and fully-annotated 3D labels for supervised learning. Our proposed method, by contrast, is a unified and flexible framework for 3D building reconstruction with different supervision levels, which effectively reduces the demand for the large-scale 3D annotations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 538, + 287, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 538, + 287, + 551 + ], + "spans": [ + { + "bbox": [ + 47, + 538, + 287, + 551 + ], + "type": "text", + "content": "2.3. Monocular 3D reconstruction with fewer labels" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 558, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 288, + 714 + ], + "type": "text", + "content": "In monocular 3D reconstruction in the general computer vision domain, several methods have been proposed for reducing the 3D annotation demand via weakly-supervised or semi-supervised learning [3, 11, 14, 16, 26]. In Yang et al. [34], a unified framework combining two types of supervisions was proposed, i.e., a small number of camera pose annotations and a large number of unlabeled images. In Neverova et al. [27], an intermediate representation containing important topological and structural information of hand was introduced to enable the weakly-supervised training for hand pose estimation. Concurrently, Gwak et al. [10] effectually leveraged a weak supervision type, i.e., foreground mask, as a substitute for costly 3D CAD annota" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 324, + 545, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 324, + 545, + 349 + ], + "spans": [ + { + "bbox": [ + 305, + 324, + 545, + 349 + ], + "type": "text", + "content": "tions, which incorporates a raytrace pooling layer to enable perspective projection and backpropagation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 350, + 546, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 546, + 421 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 546, + 421 + ], + "type": "text", + "content": "In contrast to the aforementioned studies, our proposed method leverages prior knowledge about the 3D structure of a building instance and the monocular remote sensing image, including the relation between roof, footprint, height, offset angle, and off-nadir angle, enabling multi-level supervised 3D reconstruction with fewer annotation efforts." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 434, + 366, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 434, + 366, + 445 + ], + "spans": [ + { + "bbox": [ + 306, + 434, + 366, + 445 + ], + "type": "text", + "content": "3. Methods" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 454, + 417, + 465 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 454, + 417, + 465 + ], + "spans": [ + { + "bbox": [ + 306, + 454, + 417, + 465 + ], + "type": "text", + "content": "3.1. Problem statement" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "text", + "content": "Given an off-nadir remote sensing image " + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "text", + "content": " that includes buildings " + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "inline_equation", + "content": "B = \\{b_{1}, b_{2}, \\ldots, b_{N}\\}" + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "text", + "content": ", the objective of monocular 3D building reconstruction is to identify all the footprints " + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "inline_equation", + "content": "F = \\{f_{1}, f_{2}, \\ldots, f_{N}\\}" + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "text", + "content": " and roofs " + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "inline_equation", + "content": "R = \\{r_{1}, r_{2}, \\ldots, r_{N}\\}" + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "text", + "content": " corresponding to " + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "text", + "content": ". The difficulty is that the footprints of buildings may be partially visible from an off-nadir viewing angle. Thus, previous studies, including [19] and [32], typically solve this issue by training a deep neural network with samples annotated with both " + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "text", + "content": " and roof-to-footprint offsets " + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "inline_equation", + "content": "\\vec{V} = \\{v_{1}, v_{2}, \\ldots, v_{N}\\}" + }, + { + "bbox": [ + 304, + 472, + 545, + 592 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 594, + 545, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 653 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 653 + ], + "type": "text", + "content": "However, the cost of annotating remote sensing images is still high, particularly for offset annotations. Therefore, we suggest addressing this issue by training a deep model that effectively uses samples containing both " + }, + { + "bbox": [ + 304, + 594, + 545, + 653 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 594, + 545, + 653 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 594, + 545, + 653 + ], + "type": "inline_equation", + "content": "\\vec{V}" + }, + { + "bbox": [ + 304, + 594, + 545, + 653 + ], + "type": "text", + "content": " annotations, alongside samples only annotated with " + }, + { + "bbox": [ + 304, + 594, + 545, + 653 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 594, + 545, + 653 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": "To facilitate training with offset-unknown samples, two tasks are included; one for predicting the off-nadir angle " + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\theta_{I}" + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": " and the other for the offset angle " + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\varphi_{I}" + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": ". Additionally, an instance-wise footprint segmentation task is included to predict the footprint conditioned on the predicted roof and off" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "27730" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "content": "set. Finally, a task for predicting real-world height is introduced to enhance the comprehension of the correlation between footprint and roof placement. In summary, four additional tasks are added to the original three tasks in LOFT-FOA [32]: (1) off-nadir angle prediction task; (2) offset angle prediction task; (3) footprint segmentation task; (4) real-world height prediction task." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 164, + 157, + 176 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 164, + 157, + 176 + ], + "spans": [ + { + "bbox": [ + 47, + 164, + 157, + 176 + ], + "type": "text", + "content": "3.2. Network structure" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 183, + 289, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 183, + 289, + 376 + ], + "spans": [ + { + "bbox": [ + 46, + 183, + 289, + 376 + ], + "type": "text", + "content": "Fig. 2 illustrates the proposed architecture of our MLS-BRN. To facilitate multi-level supervised learning, two novel modules are introduced, namely the Pseudo Building Bbox Calculator (PBC) and the Roof-Offset guided Footprint Extractor (ROFE). The PBC module provides pseudo building boxes to determine the positivity/negativity of the region proposals from the RPN module when offset-unknown (i.e. building bbox-unknown) samples are processed in the MLS-BRN. The ROFE module has two significant functions. Firstly, it provides a more straightforward method to supervise the building footprint segmentation task. Secondly, it offers an indirect method of supervising offset prediction and roof segmentation for offset-unknown samples as they pass through the MLS-BRN. Additionally, a building height prediction task has been included in order to predict the real-world building height." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 392, + 252, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 392, + 252, + 403 + ], + "spans": [ + { + "bbox": [ + 47, + 392, + 252, + 403 + ], + "type": "text", + "content": "3.2.1 Pseudo Building Bbox Calculator (PBC)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "spans": [ + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": "Samples without the ground truth for building bounding box " + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": "-bbox" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "inline_equation", + "content": "_{gt}" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": " cannot be utilized by previous models, like LOFT-FOA [32]. To address this issue, we propose a module that predicts pseudo building bounding boxes to substitute " + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": "-bbox" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "inline_equation", + "content": "_{gt}" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": ". For a provided off-nadir remote sensing image " + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": " and one building " + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": " contained by " + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": ", we can describe the connection between the image-wise off-nadir angle " + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "inline_equation", + "content": "\\theta_{I}" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": ", the offset angle " + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "inline_equation", + "content": "\\varphi_{I}" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": ", the factor for scaling real-world height to pixel scale " + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "inline_equation", + "content": "s_{I}" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": ", and the building's height " + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "inline_equation", + "content": "h_{b}" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": " and offset " + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "inline_equation", + "content": "\\vec{v}_{b}" + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": " using the following equation:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 83, + 540, + 287, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 540, + 287, + 583 + ], + "spans": [ + { + "bbox": [ + 83, + 540, + 287, + 583 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\vec {v} _ {b} = | | \\vec {v} _ {b} | | _ {2} \\times \\vec {e} \\\\ = \\left\\| \\vec {v} _ {b} \\right\\| _ {2} \\times \\left[ e _ {x}, e _ {y} \\right] \\tag {1} \\\\ = h _ {b} \\times s _ {I} \\times \\tan \\theta_ {I} \\times [ \\cos \\varphi_ {I}, \\sin \\varphi_ {I} ] \\\\ \\end{array}", + "image_path": "d44c0faae03a5f5cb05bf294d9281ae7efce23cb82a0c9b3b6d06e7b10026a40.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "||\\vec{v}_b||_2" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "L2" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": " norm of the offset, " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\vec{e}" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": " is the unit normal vector of " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\vec{v}_b" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": ". The PBC module uses an off-nadir angle head to predict an image-wise off-nadir angle " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\theta_{pred}" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": " and an offset angle head to predict an image-wise offset angle " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\varphi_{pred}" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": ". Then, following Eq. (1), they are combined with the instance-wise building height ground truth " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "h_{gt}" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": ", and scale factor " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "s_{gt}" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": " to compute the pseudo offset " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\vec{v}_{pred}" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": ". Finally, " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "f_{gt}" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": " is translated to get the pseudo building bbox " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": "-bbox " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "_{pred}" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": " guided by " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\vec{v}_{pred}" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": "-bbox " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "_{pred}" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": " will play the role of " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": "-bbox " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "_{gt}" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": " during the training of the building bbox-unknown samples." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "From the perspective of weak supervision, the PBC module extracts the image-wise angle information, i.e. the offset angle and the off-nadir angle, and uses it to supervise the instance-wise task. Note that for building height-unknown samples, the pseudo bounding boxes are calculated by directly enlarge the footprint boxes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 159, + 544, + 171 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 159, + 544, + 171 + ], + "spans": [ + { + "bbox": [ + 305, + 159, + 544, + 171 + ], + "type": "text", + "content": "3.2.2 Roof-Offset guided Footprint Extractor (ROFE)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 178, + 545, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 178, + 545, + 310 + ], + "spans": [ + { + "bbox": [ + 304, + 178, + 545, + 310 + ], + "type": "text", + "content": "Previous works calculate the footprint mask in the inference stage by translating the inferred roof guided by the inferred offset. The ROFE module, however, predicts the footprint mask directly. It trains a convolutional network to learn the translation process, using the inferred roof mask and offset as inputs. For offset-aware (i.e. roof-aware) samples, this end-to-end training process adds more supervision on the offset head and the roof head. And for offset-unknown samples, which cannot contribute to the training of the offset head and the roof head due to lack of ground truth, ROFE provides an indirect way to supervise these two heads." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 317, + 411, + 330 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 317, + 411, + 330 + ], + "spans": [ + { + "bbox": [ + 305, + 317, + 411, + 330 + ], + "type": "text", + "content": "3.3. Network training" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 336, + 545, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 336, + 545, + 395 + ], + "spans": [ + { + "bbox": [ + 304, + 336, + 545, + 395 + ], + "type": "text", + "content": "In this section, we first introduce the loss functions in our MLS-BRN. Then we introduce our three levels of training samples graded by their level of supervision and their training strategies. The total hybrid loss is presented at the end of this section." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 411, + 402, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 411, + 402, + 422 + ], + "spans": [ + { + "bbox": [ + 305, + 411, + 402, + 422 + ], + "type": "text", + "content": "3.3.1 Loss definition" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 430, + 545, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 430, + 545, + 489 + ], + "spans": [ + { + "bbox": [ + 304, + 430, + 545, + 489 + ], + "type": "text", + "content": "The LOFT-FOA [32] is trained by minimising Eq. (2), where " + }, + { + "bbox": [ + 304, + 430, + 545, + 489 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{rp}" + }, + { + "bbox": [ + 304, + 430, + 545, + 489 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 430, + 545, + 489 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{rc}" + }, + { + "bbox": [ + 304, + 430, + 545, + 489 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 430, + 545, + 489 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{mh}" + }, + { + "bbox": [ + 304, + 430, + 545, + 489 + ], + "type": "text", + "content": " are the same as those in Mask R-CNN [13], i.e., the losses for the RPN, R-CNN, and mask head, respectively; " + }, + { + "bbox": [ + 304, + 430, + 545, + 489 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_o" + }, + { + "bbox": [ + 304, + 430, + 545, + 489 + ], + "type": "text", + "content": " is the loss for the offset head, which is a standard smooth L1 Loss." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 344, + 499, + 545, + 513 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 499, + 545, + 513 + ], + "spans": [ + { + "bbox": [ + 344, + 499, + 545, + 513 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {L F} = \\mathcal {L} _ {r p} + \\beta_ {1} \\mathcal {L} _ {r c} + \\beta_ {2} \\mathcal {L} _ {m h} + \\beta_ {3} \\mathcal {L} _ {o} \\tag {2}", + "image_path": "5a88b582fc6614401b3caa9b3e584d706fca261a8fc0957d5dea0cd45895f52e.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 522, + 545, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 545, + 582 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 545, + 582 + ], + "type": "text", + "content": "The MLS-BRN model keeps the four losses the same as LOFT-FOA [32] and introduces new losses to train the newly added modules. The footprint mask loss of the ROFE module is the same as " + }, + { + "bbox": [ + 304, + 522, + 545, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{mh}" + }, + { + "bbox": [ + 304, + 522, + 545, + 582 + ], + "type": "text", + "content": ", which is a standard cross entropy loss (Eq. (3))." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 350, + 590, + 545, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 590, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 350, + 590, + 545, + 624 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {f} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\sum_ {c = 1} ^ {C} y _ {i, c} \\times \\log (p \\left(y _ {i, c}\\right)) \\tag {3}", + "image_path": "968a292eb8f0b1883d4f7941e44d72bd1bfa3650ed358d534319d31c4cdd4a7a.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 632, + 545, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 632, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 304, + 632, + 545, + 679 + ], + "type": "text", + "content": "The loss of the offset angle head of the PBC module is calculated according to Eq. (4), in which " + }, + { + "bbox": [ + 304, + 632, + 545, + 679 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text{ova}}" + }, + { + "bbox": [ + 304, + 632, + 545, + 679 + ], + "type": "text", + "content": " denotes the offset angle loss; " + }, + { + "bbox": [ + 304, + 632, + 545, + 679 + ], + "type": "inline_equation", + "content": "\\vec{v}_{pred}" + }, + { + "bbox": [ + 304, + 632, + 545, + 679 + ], + "type": "text", + "content": " denotes the predicted unit normal vector of the offset." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 330, + 687, + 545, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 687, + 545, + 716 + ], + "spans": [ + { + "bbox": [ + 330, + 687, + 545, + 716 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {o v a} = \\mathcal {L} _ {a n g} + \\lambda_ {1} \\mathcal {L} _ {r e g} \\tag {4} \\\\ = | | \\vec {v} _ {p r e d} - \\vec {v} _ {g t} | | _ {1} + \\lambda_ {1} | | | | \\vec {v} _ {p r e d} | | _ {2} - 1 | | _ {1} \\\\ \\end{array}", + "image_path": "fb2495a65851272458ab3537e9b0afefd7328fd741cfccda467e2f4aa51ef129.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27731" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": "The nadir angle head of the PBC module is trained following Eq. (5), where " + }, + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ona}" + }, + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": " is the off-nadir angle loss; " + }, + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "inline_equation", + "content": "\\theta_{pred}" + }, + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": " is the predicted tangent of the off-nadir angle." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 101, + 119, + 287, + 133 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 119, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 101, + 119, + 287, + 133 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {o n a}} = \\left\\| \\tan \\theta_ {\\text {p r e d}} - \\tan \\theta_ {g t} \\right\\| _ {1} \\tag {5}", + "image_path": "1ac5340b27ffe69def244309ffb12e1132ab69f00713634186c0e08e1ba83ad7.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 143, + 287, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 143, + 287, + 179 + ], + "spans": [ + { + "bbox": [ + 47, + 143, + 287, + 179 + ], + "type": "text", + "content": "The height head loss of our MLS-BRN is calculated by Eq. (6), in which " + }, + { + "bbox": [ + 47, + 143, + 287, + 179 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_h" + }, + { + "bbox": [ + 47, + 143, + 287, + 179 + ], + "type": "text", + "content": " denotes the height loss; " + }, + { + "bbox": [ + 47, + 143, + 287, + 179 + ], + "type": "inline_equation", + "content": "h_{pred}" + }, + { + "bbox": [ + 47, + 143, + 287, + 179 + ], + "type": "text", + "content": " denotes the predicted building height." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 190, + 287, + 204 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 190, + 287, + 204 + ], + "spans": [ + { + "bbox": [ + 121, + 190, + 287, + 204 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {h} = \\left| \\left| h _ {p r e d} - h _ {g t} \\right| \\right| _ {1} \\tag {6}", + "image_path": "2d7dc5534bbbfac9fe6efbfc4311997f3a40e40802ab200555b71f5f3dac7c36.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 220, + 200, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 220, + 200, + 232 + ], + "spans": [ + { + "bbox": [ + 47, + 220, + 200, + 232 + ], + "type": "text", + "content": "3.3.2 Multi-level training strategy" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 240, + 287, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 240, + 287, + 276 + ], + "spans": [ + { + "bbox": [ + 47, + 240, + 287, + 276 + ], + "type": "text", + "content": "In our proposed unified framework, all the training samples can be graded into three levels according to their level of supervision (Fig. 1):" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 277, + 286, + 384 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 47, + 277, + 286, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 277, + 286, + 312 + ], + "spans": [ + { + "bbox": [ + 47, + 277, + 286, + 312 + ], + "type": "text", + "content": "- Level 1 samples: samples with only instance-wise footprint annotation, which are denoted by " + }, + { + "bbox": [ + 47, + 277, + 286, + 312 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^N = \\{x_1^N, x_2^N, \\dots, x_{n_3}^N\\}" + }, + { + "bbox": [ + 47, + 277, + 286, + 312 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 47, + 277, + 286, + 312 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 47, + 277, + 286, + 312 + ], + "type": "text", + "content": " means no additional supervision." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 313, + 286, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 313, + 286, + 349 + ], + "spans": [ + { + "bbox": [ + 47, + 313, + 286, + 349 + ], + "type": "text", + "content": "- Level 2 samples: samples with instance-wise footprint and building height annotation, which are denoted by " + }, + { + "bbox": [ + 47, + 313, + 286, + 349 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^H = \\{x_1^H,x_2^H,\\dots,x_{n_2}^H\\}" + }, + { + "bbox": [ + 47, + 313, + 286, + 349 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 349, + 286, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 349, + 286, + 384 + ], + "spans": [ + { + "bbox": [ + 47, + 349, + 286, + 384 + ], + "type": "text", + "content": "- Level 3 samples: samples with instance-wise footprint, offset, and building height annotation, which are denoted by " + }, + { + "bbox": [ + 47, + 349, + 286, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH} = \\{x_1^{OH}, x_2^{OH}, \\dots, x_{n_1}^{OH}\\}" + }, + { + "bbox": [ + 47, + 349, + 286, + 384 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 385, + 287, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 385, + 287, + 421 + ], + "spans": [ + { + "bbox": [ + 47, + 385, + 287, + 421 + ], + "type": "text", + "content": "Different levels of samples are supervised by different training strategies. As defined in Eq. (7), the loss function for " + }, + { + "bbox": [ + 47, + 385, + 287, + 421 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^N" + }, + { + "bbox": [ + 47, + 385, + 287, + 421 + ], + "type": "text", + "content": " is only based on " + }, + { + "bbox": [ + 47, + 385, + 287, + 421 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_f" + }, + { + "bbox": [ + 47, + 385, + 287, + 421 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 142, + 432, + 287, + 445 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 432, + 287, + 445 + ], + "spans": [ + { + "bbox": [ + 142, + 432, + 287, + 445 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathcal {X} ^ {N}} = \\mathcal {L} _ {f} \\tag {7}", + "image_path": "551ebdbedf8acbc2ccf985ec6654541649d1674428235398a8d2d695a08b7dd0.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 456, + 287, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 456, + 287, + 503 + ], + "spans": [ + { + "bbox": [ + 47, + 456, + 287, + 503 + ], + "type": "text", + "content": "The loss function for " + }, + { + "bbox": [ + 47, + 456, + 287, + 503 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^H" + }, + { + "bbox": [ + 47, + 456, + 287, + 503 + ], + "type": "text", + "content": " is defined in Eq. (8). In " + }, + { + "bbox": [ + 47, + 456, + 287, + 503 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathcal{X}^H}" + }, + { + "bbox": [ + 47, + 456, + 287, + 503 + ], + "type": "text", + "content": ", the " + }, + { + "bbox": [ + 47, + 456, + 287, + 503 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{rp}" + }, + { + "bbox": [ + 47, + 456, + 287, + 503 + ], + "type": "text", + "content": " is activated since the PBC module can predict a high-quality pseudo building bbox, which is good enough to supervise the RPN module." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 101, + 514, + 286, + 542 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 514, + 286, + 542 + ], + "spans": [ + { + "bbox": [ + 101, + 514, + 286, + 542 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathcal {X} ^ {H}} = \\mathcal {L} _ {\\mathcal {X} ^ {N}} + \\alpha_ {1} \\mathcal {L} _ {r p} + \\alpha_ {2} \\mathcal {L} _ {h} \\tag {8} \\\\ = \\mathcal {L} _ {f} + \\alpha_ {1} \\mathcal {L} _ {r p} + \\alpha_ {2} \\mathcal {L} _ {h} \\\\ \\end{array}", + "image_path": "b3b14000318d12f97fa8a5eb0122b84b2a2b20ed297336bd0fb1207518b8d35d.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "text", + "content": "The loss function for " + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH}" + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "text", + "content": " is defined in Eq. (9). Compared with the original " + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{LF}" + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathcal{X}^{OH}}" + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "text", + "content": " adds four more losses: " + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_f" + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_h" + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ona}" + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ova}" + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ona}" + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ova}" + }, + { + "bbox": [ + 47, + 554, + 287, + 601 + ], + "type": "text", + "content": " are used for training the two angle heads of the PBC module." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 58, + 613, + 286, + 655 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 613, + 286, + 655 + ], + "spans": [ + { + "bbox": [ + 58, + 613, + 286, + 655 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\chi^ {O H}} = \\mathcal {L} _ {\\chi^ {H}} + \\alpha_ {3} \\mathcal {L} _ {r c} + \\alpha_ {4} \\mathcal {L} _ {m h} \\\\ + \\alpha_ {5} \\mathcal {L} _ {o} + \\alpha_ {6} \\mathcal {L} _ {o n a} + \\alpha_ {7} \\mathcal {L} _ {o v a} \\tag {9} \\\\ = \\mathcal {L} _ {L F} + \\mathcal {L} _ {f} + \\alpha_ {2} \\mathcal {L} _ {h} + \\alpha_ {6} \\mathcal {L} _ {o n a} + \\alpha_ {7} \\mathcal {L} _ {o v a} \\\\ \\end{array}", + "image_path": "4399f702f6e2574bb08f6eb550f4591707276b44121337cdef822c080891d651.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 666, + 287, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 666, + 287, + 690 + ], + "spans": [ + { + "bbox": [ + 47, + 666, + 287, + 690 + ], + "type": "text", + "content": "The final hybrid loss is defined as the total loss of the three levels of training samples according to Eq. (10)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 110, + 701, + 287, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 701, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 110, + 701, + 287, + 714 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {\\mathcal {X} ^ {N}} + \\mathcal {L} _ {\\mathcal {X} ^ {H}} + \\mathcal {L} _ {\\mathcal {X} ^ {O H}} \\tag {10}", + "image_path": "425aa3d08d12777d49499fae4ff00138e00144b47da7fb1642cf7c92916a47f5.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 72, + 436, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 436, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 436, + 84 + ], + "type": "text", + "content": "3.4. Implementation details" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 90, + 545, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 90, + 545, + 244 + ], + "spans": [ + { + "bbox": [ + 304, + 90, + 545, + 244 + ], + "type": "text", + "content": "As mentioned in Fig. 2, we use ResNet-50 [12] with FPN [23] pre-trained on the ImageNet as the backbone. All the models are trained with a batch size of 4 using NVIDIA 3090 GPUs. To align with LOFT-FOA [32], we train 24 epochs for all the models, with the learning rate starting from 0.01 and decaying by a factor of 0.1 at the " + }, + { + "bbox": [ + 304, + 90, + 545, + 244 + ], + "type": "inline_equation", + "content": "16^{th}" + }, + { + "bbox": [ + 304, + 90, + 545, + 244 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 90, + 545, + 244 + ], + "type": "inline_equation", + "content": "22^{nd}" + }, + { + "bbox": [ + 304, + 90, + 545, + 244 + ], + "type": "text", + "content": " epochs. The SGD algorithm with a weight decay of 0.0001 and a momentum of 0.9 is used for all experiments. LOFT-FOA [32] is used as the basic architecture of the MLS-BRN model, and all the hyperparameters that occur in both LOFT-FOA [32] and MLS-BRN are the same, except for the learning rate mentioned above. All models are built in PyTorch." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "spans": [ + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "type": "text", + "content": "In Eq. (4), we set " + }, + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "type": "inline_equation", + "content": "\\lambda_{1} = 0.1" + }, + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "type": "text", + "content": " to balance the two loss items. In Eq. (8), we set " + }, + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "type": "inline_equation", + "content": "\\alpha_{1} = 1" + }, + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "type": "text", + "content": " to keep the loss weight of ROFE the same as the roof mask head, and set " + }, + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "type": "inline_equation", + "content": "\\alpha_{2} = 32" + }, + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "type": "text", + "content": " since the absolute building height loss value is relatively small. In Eq. (9), we set " + }, + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "type": "inline_equation", + "content": "\\alpha_{3} = \\alpha_{4} = 1, \\alpha_{5} = 16" + }, + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "type": "text", + "content": " to keep them the same as LOFT-FOA [32], and set " + }, + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "type": "inline_equation", + "content": "\\alpha_{6} = 1, \\alpha_{7} = 8" + }, + { + "bbox": [ + 304, + 246, + 545, + 329 + ], + "type": "text", + "content": " to balance the effects of the magnitude of these two losses." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 341, + 386, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 341, + 386, + 354 + ], + "spans": [ + { + "bbox": [ + 306, + 341, + 386, + 354 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 360, + 368, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 360, + 368, + 371 + ], + "spans": [ + { + "bbox": [ + 306, + 360, + 368, + 371 + ], + "type": "text", + "content": "4.1. Datasets" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 379, + 545, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 379, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 379, + 545, + 533 + ], + "type": "text", + "content": "In our experiments, we employ multi-supervised datasets for training our methods: (1) BONAI [32] provides building footprint segmentation, offset, and height annotations, which contains 3,000 and 300 images for train-val and test respectively; (2) OmniCity-view3 [21] originally provides satellite images with annotations for footprint segmentation and building height. We add additional offset annotations for 17,092 and 4,929 images from train-val and test sets respectively; (3) Additionally, we release a new dataset named HK, which includes 500 and 119 satellite images specifically captured from Hong Kong for train-val and test sets, along with annotations for footprint segmentation, offset and height." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": "As detailed in Sec. 3, all our training samples are graded into three levels: samples from " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^N" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^H" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH}" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": ". To create different levels of training samples, we extract samples from the datasets mentioned above, reorganizing their annotations as necessary. We randomly choose " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": " of the samples from the BONAI dataset [32] as a smaller " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH}" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": " dataset, which we call " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "BN_{30}" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": ". We randomly drop the offset annotations of " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": " of the samples in the BONAI dataset [32], regard the entire BONAI [32] dataset as a " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH} + \\mathcal{X}^H" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": " dataset, and name it " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "BN_{30/70}" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": ". Similarly, the original BONAI dataset [32] is regarded as a large " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH}" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": " and is named " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "BN_{100}" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": ". We use " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "OC" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": " to designate the OmniCity-view3 dataset [21]. Naturally, the abbreviations " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "OC_{30}" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "OC_{30/70}" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "OC_{100}" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": " have the similar meaning with " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "BN_{30}" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "BN_{30/70}" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "BN_{100}" + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "text", + "content": " respectively. Moreover, we use " + }, + { + "bbox": [ + 304, + 534, + 545, + 714 + ], + "type": "inline_equation", + "content": "BH" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27732" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": "to refer to the combination of BONAI [32] and HK. It is important to note that in " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "BH_{30/70}" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " of BONAI's [32] samples are " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH}" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " type while the remaining " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^H" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " type. Additionally, " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " of HK's samples belong to " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH}" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " type and the remaining " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " belong to " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^N" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " type." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 142, + 188, + 154 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 142, + 188, + 154 + ], + "spans": [ + { + "bbox": [ + 47, + 142, + 188, + 154 + ], + "type": "text", + "content": "4.2. Performance comparison" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 160, + 287, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 160, + 287, + 256 + ], + "spans": [ + { + "bbox": [ + 46, + 160, + 287, + 256 + ], + "type": "text", + "content": "In this section, we evaluate our method's performance in footprint segmentation, offset prediction, and height prediction against several competitive methods for the single-level supervised learning scenario. In a Multi-level supervised learning scenario, we mainly compare our method with LOFT-FOA [32]. Additionally, we present our method's offset and off-nadir angles prediction performance. More results will be provided in the supplementary materials." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "text", + "content": "Single-level supervised learning. The performance of footprint segmentation and offset prediction for different methods trained on " + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "inline_equation", + "content": "BN_{100}" + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "inline_equation", + "content": "OC_{100}" + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "text", + "content": " are listed in Tab. 1 and Tab. 2, respectively. Additionally, Fig. 3 provides a qualitative comparison of footprint segmentation results on the BONAI [32] test set. Note that all the experimental results in this section are obtained using " + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH}" + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "text", + "content": " samples, and the results obtained using " + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^H" + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^N" + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "text", + "content": " samples will be analysed in the following paragraph. For the footprint segmentation task, experimental results tested on " + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "inline_equation", + "content": "BN_{100}" + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "text", + "content": " demonstrate that our method improves the F1-score by " + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "inline_equation", + "content": "5.42\\% - 8.30\\%" + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "text", + "content": " compared with the instance segmentation methods that directly extract the building footprints. Furthermore, our method enhances the F1-score by " + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "inline_equation", + "content": "2.05\\% - 2.76\\%" + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "text", + "content": " relative to MTBR-Net [19] and LOFT-FOA [32], which are specifically designed for extracting off-nadir building footprints based on predicted roof and offset, tested on " + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "inline_equation", + "content": "BN_{100}" + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "text", + "content": ". Regarding the offset prediction task, our experimental findings indicate that our approach betters the EPE by 0.18 - 0.93 in comparison to MTBR-Net [19] and LOFT-FOA [32] tested on " + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "inline_equation", + "content": "BN_{100}" + }, + { + "bbox": [ + 46, + 258, + 287, + 568 + ], + "type": "text", + "content": ". The results show that the direct supervision of the footprint segmentation, the constraint on the building height, and the encouragement of the angular feature extraction can help to achieve better performance in the footprint segmentation and offset prediction tasks in the single-level supervised learning scenario." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 75, + 581, + 260, + 668 + ], + "blocks": [ + { + "bbox": [ + 75, + 581, + 260, + 668 + ], + "lines": [ + { + "bbox": [ + 75, + 581, + 260, + 668 + ], + "spans": [ + { + "bbox": [ + 75, + 581, + 260, + 668 + ], + "type": "table", + "html": "
methodF1PrecisionRecallEPE
PANet [17]58.0659.2656.91-
HRNetv2 [30]60.8161.2060.42-
M R-CNN [13]58.1259.2657.03-
CM R-CNN [1]60.9467.0955.83-
MTBR-Net [19]63.6064.3462.875.69
LOFT-FOA [32]64.3163.3765.294.94
Ours66.3665.9066.834.76
", + "image_path": "004c1992528d9ca91b2c900e335ff8100a31d27139f945d70e0c784465aa879f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 317, + 70, + 537, + 129 + ], + "blocks": [ + { + "bbox": [ + 46, + 677, + 287, + 709 + ], + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 709 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 709 + ], + "type": "text", + "content": "Table 1. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\\%) and offset prediction results in terms of EPE trained on " + }, + { + "bbox": [ + 46, + 677, + 287, + 709 + ], + "type": "inline_equation", + "content": "BN_{100}" + }, + { + "bbox": [ + 46, + 677, + 287, + 709 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 70, + 537, + 129 + ], + "lines": [ + { + "bbox": [ + 317, + 70, + 537, + 129 + ], + "spans": [ + { + "bbox": [ + 317, + 70, + 537, + 129 + ], + "type": "table", + "html": "
methodF1PrecisionRecallEPE
M R-CNN [13]69.7569.7469.76-
LOFT-FOA [32]70.4668.7772.236.08
Ours72.2569.5775.145.38
", + "image_path": "a48e93cbac87a30de3c80d8d64c776a7e53fd4d4273a1f7c68db22d9ff7d0494.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 193, + 545, + 442 + ], + "blocks": [ + { + "bbox": [ + 307, + 193, + 545, + 442 + ], + "lines": [ + { + "bbox": [ + 307, + 193, + 545, + 442 + ], + "spans": [ + { + "bbox": [ + 307, + 193, + 545, + 442 + ], + "type": "image", + "image_path": "4256aa5620a724c611023dfd0b2d6cf2b2e317d7b1b0eb38edda2feadea7f029.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 452, + 545, + 495 + ], + "lines": [ + { + "bbox": [ + 305, + 452, + 545, + 495 + ], + "spans": [ + { + "bbox": [ + 305, + 452, + 545, + 495 + ], + "type": "text", + "content": "Figure 3. The results of the baselines and our method trained on " + }, + { + "bbox": [ + 305, + 452, + 545, + 495 + ], + "type": "inline_equation", + "content": "BN_{100}" + }, + { + "bbox": [ + 305, + 452, + 545, + 495 + ], + "type": "text", + "content": " and tested on the BONAI test set in terms of the footprint segmentation performance. The yellow, cyan, and red polygons denote the TP, FP, and FN." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 309, + 520, + 545, + 668 + ], + "blocks": [ + { + "bbox": [ + 305, + 136, + 545, + 169 + ], + "lines": [ + { + "bbox": [ + 305, + 136, + 545, + 169 + ], + "spans": [ + { + "bbox": [ + 305, + 136, + 545, + 169 + ], + "type": "text", + "content": "Table 2. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\\%) and offset prediction results in terms of EPE trained on " + }, + { + "bbox": [ + 305, + 136, + 545, + 169 + ], + "type": "inline_equation", + "content": "OC_{100}" + }, + { + "bbox": [ + 305, + 136, + 545, + 169 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 520, + 545, + 668 + ], + "lines": [ + { + "bbox": [ + 309, + 520, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 309, + 520, + 545, + 668 + ], + "type": "table", + "html": "
methoddatasetsampleF1-scoreEPE
LOFT-FOA [32]BN30XOH61.355.70
OursBN30/70XOH+XH65.495.39
LOFT-FOA [32]BN100XOH64.314.94
OursBN100XOH66.364.76
LOFT-FOA [32]OC30XOH67.096.08
OursOC30/70XOH+XH70.535.92
LOFT-FOA [32]OC100XOH70.465.38
OursOC100XOH72.255.38
LOFT-FOA [32]BH30XOH54.965.78
OursBH30/70XOH+XH+XN58.575.60
LOFT-FOA [32]BH100XOH60.854.74
OursBH100XOH60.924.69
", + "image_path": "86e9236639ba8ef560f0507f155dffc8b476818489bed2e4a535b0b18c8e1ede.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 677, + 545, + 709 + ], + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 709 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 709 + ], + "type": "text", + "content": "Table 3. Building footprint segmentation results of different methods in terms of F1-score (\\%) and offset prediction results in terms of EPE trained on different datasets." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27733" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": "Multi-level supervised learning. Tab. 3 displays the footprint segmentation and offset prediction performance of LOFT-FOA [32] and our method when trained and tested on multi-level supervision datasets. Our approach's experiment outcomes, trained on " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "BN_{30/70}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "OC_{30/70}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "BH_{30/70}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": ", demonstrate a " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "4.14\\%" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "3.44\\%" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "3.61\\%" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": " improvement in F1-score compared to LOFT-FOA [32] trained on " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "BN_{30}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "OC_{30}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "BH_{30}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": ". Additionally, our method's experimental results, trained on samples from " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "BN_{30/70}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "OC_{30/70}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "BH_{30/70}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": " exhibit similar performance to LOFT-FOA [32], which is trained on samples from " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "BN_{100}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "OC_{100}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "BH_{100}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": ". These findings demonstrate the effectiveness of MLS-BRN in combining samples from " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH}" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^H" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^N" + }, + { + "bbox": [ + 47, + 72, + 289, + 239 + ], + "type": "text", + "content": " levels to address the building reconstruction task." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 239, + 289, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 289, + 479 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 289, + 479 + ], + "type": "text", + "content": "Building height and angles prediction. Tab. 4 displays the results of building height prediction performance. The experimental findings indicate that our method enhances the height MAE by 0.22 - 4.33 and the height RMSE by 0.51 - 7.60 in comparison to SARPN [2], DORN [7], and LOFT-FOA+H. It's worth noting that SARPN [2], DORN [7] predicts pixel-wise building height, and MSL-BRN predicts instance-wise building height. As far as we know, MSL-BRN is the first-ever method to predict instance-wise real-world building height. Thus, we add a building height head directly to LOFT-FOA [32] (i.e. LOFT-FOA+H) and compare its prediction results with our own method. Fig. 4 presents the qualitative building height prediction results from our method and LOFT-FOA+H. Regarding the angle prediction tasks, when trained on " + }, + { + "bbox": [ + 47, + 239, + 289, + 479 + ], + "type": "inline_equation", + "content": "BN_{100}" + }, + { + "bbox": [ + 47, + 239, + 289, + 479 + ], + "type": "text", + "content": ", the PBC module results in an MAE of 9.92 for offset angle prediction and an MAE of 1.22 for off-nadir angle prediction. The performance increase demonstrates the efficacy of the PBC, ROFE, and the building height prediction module in a single-level supervised learning scenario." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 86, + 487, + 249, + 548 + ], + "blocks": [ + { + "bbox": [ + 86, + 487, + 249, + 548 + ], + "lines": [ + { + "bbox": [ + 86, + 487, + 249, + 548 + ], + "spans": [ + { + "bbox": [ + 86, + 487, + 249, + 548 + ], + "type": "table", + "html": "
methodheight MAEheight RMSE
SARPN [2]15.2328.69
DORN [7]13.4027.03
LOFT-FOA+H11.1221.60
Ours10.9021.09
", + "image_path": "c7a2aac3827cc9653346b778e97806ea835a730d9d92c82ca5bc7d0945522d58.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 74, + 609, + 261, + 669 + ], + "blocks": [ + { + "bbox": [ + 46, + 555, + 288, + 590 + ], + "lines": [ + { + "bbox": [ + 46, + 555, + 288, + 590 + ], + "spans": [ + { + "bbox": [ + 46, + 555, + 288, + 590 + ], + "type": "text", + "content": "Table 4. Building height prediction results of different methods in terms of MAE and MSE trained on " + }, + { + "bbox": [ + 46, + 555, + 288, + 590 + ], + "type": "inline_equation", + "content": "OC_{100}" + }, + { + "bbox": [ + 46, + 555, + 288, + 590 + ], + "type": "text", + "content": " and tested on the OmniCity-view3 test set." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 74, + 609, + 261, + 669 + ], + "lines": [ + { + "bbox": [ + 74, + 609, + 261, + 669 + ], + "spans": [ + { + "bbox": [ + 74, + 609, + 261, + 669 + ], + "type": "table", + "html": "
methodF1-scorePrecisionRecallEPE
baseline61.3561.8461.655.70
+PBC62.3262.2862.355.53
+ROFE62.8763.8962.155.63
+PBC+ROFE65.4066.7464.125.49
", + "image_path": "845c322b3c0a2bd0a627f6f754de5452a7686ba6b02c3dcfdb3b0e2859804871.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 677, + 288, + 711 + ], + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 711 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 711 + ], + "type": "text", + "content": "Table 5. Footprint segmentation results of different modules in terms of F1-score, precision, recall (\\%) and offset prediction results in terms of EPE." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 339, + 69, + 514, + 242 + ], + "blocks": [ + { + "bbox": [ + 339, + 69, + 514, + 242 + ], + "lines": [ + { + "bbox": [ + 339, + 69, + 514, + 242 + ], + "spans": [ + { + "bbox": [ + 339, + 69, + 514, + 242 + ], + "type": "image", + "image_path": "2b98b8cbe3821137140de9b550528a797c021425e10653772dbd86d93fbb2c32.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 250, + 547, + 283 + ], + "lines": [ + { + "bbox": [ + 304, + 250, + 547, + 283 + ], + "spans": [ + { + "bbox": [ + 304, + 250, + 547, + 283 + ], + "type": "text", + "content": "Figure 4. The visualization results of building height prediction from our method and LOFT-FOA+H on the OmniCity-view3 test set." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 309, + 398, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 309, + 398, + 323 + ], + "spans": [ + { + "bbox": [ + 305, + 309, + 398, + 323 + ], + "type": "text", + "content": "4.3. Ablation study" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 329, + 545, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 329, + 545, + 389 + ], + "spans": [ + { + "bbox": [ + 304, + 329, + 545, + 389 + ], + "type": "text", + "content": "In this section, we examine the impact of the principal new components of our method: (1) the PBC module; (2) the ROFE module; and (3) the building height head. Additionally, we will analyze the outcome of the data ablation experiment in the multi-level supervised learning setting." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "spans": [ + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "text", + "content": "Module ablation. The outcomes acquired by implementing the aforementioned modules successively on " + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "inline_equation", + "content": "BN_{30/70}" + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "text", + "content": " are detailed in Tab. 5. The table provides information on F1-score for footprint segmentation and EPE for offset prediction. LOFT-FOA [32] is trained on " + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "inline_equation", + "content": "BN_{30}" + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "text", + "content": " and serves as the baseline. The second row (+PBC) illustrates the results obtained by applying the PBC module to LOFT-FOA [32]. The results indicate that incorporating the two-angle prediction tasks enhances the F1-score of the footprint extraction by " + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "inline_equation", + "content": "0.97\\%" + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "text", + "content": ". It should be noted that the added offset-unknown " + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "text", + "content": " samples in " + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "inline_equation", + "content": "BN_{30/70}" + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "text", + "content": ", which lacks angle ground truth, does not contribute to PBC's training. The third row (+ROFE) displays the outcomes achieved by applying the ROFE module to LOFT-FOA [32]. Results demonstrate that, compared with the baseline, prediction of the footprint segmentation guided by predicted offset and roof, coupled with additional " + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "text", + "content": " offset-unknown samples from " + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "inline_equation", + "content": "BN_{30/70}" + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "text", + "content": ", leads to a " + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "inline_equation", + "content": "1.52\\%" + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "text", + "content": " improvement in the F1-score. The fourth row (+PBC+ROFE) indicates that the simultaneous inclusion of the PBC and ROFE modules can improve the F1-score of the footprint extraction by " + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "inline_equation", + "content": "4.05\\%" + }, + { + "bbox": [ + 304, + 389, + 546, + 675 + ], + "type": "text", + "content": ". The aforementioned results show that PBC and ROFE modules can help to enhance the accuracy of footprint segmentation and offset prediction." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "text", + "content": "Data ablation. The outcomes of our approach trained on various dataset combinations concerning F1-score for footprint segmentation, and EPE for offset prediction are" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "27734" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 94, + 71, + 194, + 173 + ], + "blocks": [ + { + "bbox": [ + 94, + 71, + 194, + 173 + ], + "lines": [ + { + "bbox": [ + 94, + 71, + 194, + 173 + ], + "spans": [ + { + "bbox": [ + 94, + 71, + 194, + 173 + ], + "type": "image", + "image_path": "d09418cf9fe9a336ba00b7358533bf0ad5d0e0bc36e680a03651d73b91b781b0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 195, + 72, + 295, + 172 + ], + "blocks": [ + { + "bbox": [ + 195, + 72, + 295, + 172 + ], + "lines": [ + { + "bbox": [ + 195, + 72, + 295, + 172 + ], + "spans": [ + { + "bbox": [ + 195, + 72, + 295, + 172 + ], + "type": "image", + "image_path": "bbdf1f274d2dae90c301224ae4326d94fef0b200c52dcf87e089cc7e0019a5f3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 297, + 72, + 398, + 173 + ], + "blocks": [ + { + "bbox": [ + 297, + 72, + 398, + 173 + ], + "lines": [ + { + "bbox": [ + 297, + 72, + 398, + 173 + ], + "spans": [ + { + "bbox": [ + 297, + 72, + 398, + 173 + ], + "type": "image", + "image_path": "b77dffcb8e12fbe8d10f207bd4c4afe040fd186c7c2cc12ed37a307ed8cdc271.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 399, + 72, + 500, + 173 + ], + "blocks": [ + { + "bbox": [ + 399, + 72, + 500, + 173 + ], + "lines": [ + { + "bbox": [ + 399, + 72, + 500, + 173 + ], + "spans": [ + { + "bbox": [ + 399, + 72, + 500, + 173 + ], + "type": "image", + "image_path": "628fe83b128a2348c00d3d6176c870c4830402d97500f2e9f195303bf3641fb7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 93, + 182, + 194, + 262 + ], + "blocks": [ + { + "bbox": [ + 93, + 182, + 194, + 262 + ], + "lines": [ + { + "bbox": [ + 93, + 182, + 194, + 262 + ], + "spans": [ + { + "bbox": [ + 93, + 182, + 194, + 262 + ], + "type": "image", + "image_path": "7a01ae5ddb52c7b543be1e6ad8cb89c54a8844c178546b5b9cec74b89e489023.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 125, + 266, + 159, + 276 + ], + "lines": [ + { + "bbox": [ + 125, + 266, + 159, + 276 + ], + "spans": [ + { + "bbox": [ + 125, + 266, + 159, + 276 + ], + "type": "text", + "content": "Shanghai" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 196, + 190, + 296, + 261 + ], + "blocks": [ + { + "bbox": [ + 196, + 190, + 296, + 261 + ], + "lines": [ + { + "bbox": [ + 196, + 190, + 296, + 261 + ], + "spans": [ + { + "bbox": [ + 196, + 190, + 296, + 261 + ], + "type": "image", + "image_path": "0daf4c90c98acb9c43668a1321fcdc6a89f44ad8e224348fac6b356a29c04a65.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 233, + 266, + 254, + 275 + ], + "lines": [ + { + "bbox": [ + 233, + 266, + 254, + 275 + ], + "spans": [ + { + "bbox": [ + 233, + 266, + 254, + 275 + ], + "type": "text", + "content": "Xi'an" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 298, + 180, + 398, + 261 + ], + "blocks": [ + { + "bbox": [ + 298, + 180, + 398, + 261 + ], + "lines": [ + { + "bbox": [ + 298, + 180, + 398, + 261 + ], + "spans": [ + { + "bbox": [ + 298, + 180, + 398, + 261 + ], + "type": "image", + "image_path": "f4981fd0461ca0f0987164acf84c46b7c4488af7a04d5697eda8e8bae4450307.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 325, + 266, + 367, + 277 + ], + "lines": [ + { + "bbox": [ + 325, + 266, + 367, + 277 + ], + "spans": [ + { + "bbox": [ + 325, + 266, + 367, + 277 + ], + "type": "text", + "content": "Hong Kong" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 287, + 547, + 321 + ], + "lines": [ + { + "bbox": [ + 46, + 287, + 547, + 321 + ], + "spans": [ + { + "bbox": [ + 46, + 287, + 547, + 321 + ], + "type": "text", + "content": "Figure 5. 3D reconstruction results of Shanghai, Xi'an, Hong Kong, and New York obtained using our method. The remote sensing images for Shanghai and Xi'an are chosen from the BONAI test set, whereas the remote sensing image for New York is chosen from the OmniCity-view3 test set." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 400, + 182, + 501, + 261 + ], + "blocks": [ + { + "bbox": [ + 400, + 182, + 501, + 261 + ], + "lines": [ + { + "bbox": [ + 400, + 182, + 501, + 261 + ], + "spans": [ + { + "bbox": [ + 400, + 182, + 501, + 261 + ], + "type": "image", + "image_path": "dbb35c1ef77098f9dcb0791f28b97b38d1cd99ea818f79145c1e493594bb5c4f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 430, + 266, + 466, + 275 + ], + "lines": [ + { + "bbox": [ + 430, + 266, + 466, + 275 + ], + "spans": [ + { + "bbox": [ + 430, + 266, + 466, + 275 + ], + "type": "text", + "content": "New York" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "spans": [ + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": "shown in Tab. 6. The first line " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "(\\mathcal{X}^{OH})" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " displays the results of training LOFT-FOA [32] on " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " of OmniCity-view3 [21] " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH}" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " samples " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "(OC_{30})" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": ". The second row " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "(\\mathcal{X}^{OH} + \\mathcal{X}^{H})" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " shows the results of our method trained on a mix of " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " of OmniCity-view3 [21] " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH}" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " samples " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "(OC_{30})" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " of the OmniCity-view3 " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{H}" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " samples. The results demonstrate a " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "3.28\\%" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " improvement in F1-score for footprint extraction compared to LOFT-FOA [32] trained solely on " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "OC_{30}" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": ". The third row " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "(\\mathcal{X}^{OH} + \\mathcal{X}^{H} + \\mathcal{X}^{N})" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " presents the outcomes of our methodology, trained on a mix of " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " of OmniCity-view3 [21] " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH}" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " samples, " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " of OmniCity-view3 [21] " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{H}" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " samples, and the rest " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " of OmniCity-view3 [21] " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{N}" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " samples. The results demonstrate a " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "0.44\\%" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " increase in F1-score compared to our method trained on " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{OH} + \\mathcal{X}^{H}" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": ", indicating the effectiveness of including " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{N}" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " samples. The reason for training LOFT-FOA [32] instead of our method on " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "OC_{30}" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " (first row) is to evaluate the gain in a scenario where " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{H}" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{N}" + }, + { + "bbox": [ + 46, + 341, + 289, + 559 + ], + "type": "text", + "content": " samples are available by using our method." + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 75, + 566, + 260, + 617 + ], + "blocks": [ + { + "bbox": [ + 75, + 566, + 260, + 617 + ], + "lines": [ + { + "bbox": [ + 75, + 566, + 260, + 617 + ], + "spans": [ + { + "bbox": [ + 75, + 566, + 260, + 617 + ], + "type": "table", + "html": "
dataF1PrecisionRecallEPE
XOH67.0963.2371.476.08
XOH+XH70.3765.3576.245.99
XOH+XH+XN70.8166.1576.185.84
", + "image_path": "c72e98212733f346370b487ed341318320e2225621a12f91e20d6dc3cc41982a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 624, + 288, + 658 + ], + "lines": [ + { + "bbox": [ + 46, + 624, + 288, + 658 + ], + "spans": [ + { + "bbox": [ + 46, + 624, + 288, + 658 + ], + "type": "text", + "content": "Table 6. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\\%) and offset prediction results in terms of EPE trained on different dataset combinations." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 671, + 270, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 671, + 270, + 683 + ], + "spans": [ + { + "bbox": [ + 47, + 671, + 270, + 683 + ], + "type": "text", + "content": "4.4. 3D reconstruction results of different cities" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "content": "Fig. 5 shows the 3D reconstruction results of four cities (i.e. Shanghai, Xi'an, Hong Kong, and New York) obtained from" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 342, + 547, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 547, + 392 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 547, + 392 + ], + "type": "text", + "content": "our method. The results demonstrate the effectiveness of our method on 3D building reconstruction across different cities. Note that we use the method in [38] to regularize the predicted building footprint masks." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 401, + 379, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 401, + 379, + 414 + ], + "spans": [ + { + "bbox": [ + 306, + 401, + 379, + 414 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 420, + 545, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 420, + 545, + 660 + ], + "spans": [ + { + "bbox": [ + 304, + 420, + 545, + 660 + ], + "type": "text", + "content": "In this paper, we have presented a new method for multi-level supervised building reconstruction from monocular remote sensing images, which is capable of reconstructing the accurate 3D building models using samples of different annotation levels. Qualitative and quantitative evaluations confirm that our method achieves competitive performance and significantly enhances the 3D building reconstruction capability in comparison to the current state-of-the-art across diverse experimental settings. The effect of the Pseudo Building Bbox Calculator and the Roof-Offset guided Footprint Extractor, as well as the annotation levels of the samples were also analyzed in the ablation study. Furthermore, we expanded the monocular building reconstruction datasets to encompass additional cities. We believe that our approach offers efficient and cost-effective solutions for 3D building reconstruction in complex real-world scenes. In our future work, we would like to investigate more effective strategies to improve the 3D building reconstruction performance whilst exploring more adaptable and practical techniques for large-scale city modeling." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 660, + 547, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 660, + 547, + 708 + ], + "spans": [ + { + "bbox": [ + 304, + 660, + 547, + 708 + ], + "type": "text", + "content": "Acknowledgements. This project was funded in part by National Natural Science Foundation of China (Grant No. 42201358 and No. 62325111) and Shanghai Artificial Intelligence Laboratory." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "27735" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Kai Chen, Jiangmiao Pang, Jiaqi Wang, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jianping Shi, Wanli Ouyang, et al. Hybrid task cascade for instance segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4974-4983, 2019. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 147, + 288, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 147, + 288, + 191 + ], + "spans": [ + { + "bbox": [ + 53, + 147, + 288, + 191 + ], + "type": "text", + "content": "[2] Xiaotian Chen, Xuejin Chen, and Zheng-Jun Zha. Structure-aware residual pyramid network for monocular depth estimation. In Proceedings of the 28th International Joint Conference on Artificial Intelligence, pages 694-700, 2019. 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 192, + 288, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 192, + 288, + 246 + ], + "spans": [ + { + "bbox": [ + 53, + 192, + 288, + 246 + ], + "type": "text", + "content": "[3] Yujin Chen, Zhigang Tu, Liuhao Ge, Dejun Zhang, Ruizhi Chen, and Junsong Yuan. So-handnet: Self-organizing network for 3d hand pose estimation with semi-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6961–6970, 2019. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 247, + 288, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 247, + 288, + 312 + ], + "spans": [ + { + "bbox": [ + 53, + 247, + 288, + 312 + ], + "type": "text", + "content": "[4] Gordon Christie, Rodrigo Rene Rai Munoz Abujder, Kevin Foster, Shea Hagstrom, Gregory D Hager, and Myron Z Brown. Learning geocentric object pose in oblique monocular images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14512-14520, 2020. 1, 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 314, + 288, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 314, + 288, + 367 + ], + "spans": [ + { + "bbox": [ + 53, + 314, + 288, + 367 + ], + "type": "text", + "content": "[5] Gordon Christie, Kevin Foster, Shea Hagstrom, Gregory D Hager, and Myron Z Brown. Single view geocentric pose in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1162-1171, 2021. 1, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 369, + 287, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 369, + 287, + 402 + ], + "spans": [ + { + "bbox": [ + 53, + 369, + 287, + 402 + ], + "type": "text", + "content": "[6] Liuyun Duan and Florent Lafarge. Towards large-scale city reconstruction from satellites. In European Conference on Computer Vision (ECCV), 2016. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 403, + 288, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 403, + 288, + 457 + ], + "spans": [ + { + "bbox": [ + 53, + 403, + 288, + 457 + ], + "type": "text", + "content": "[7] Huan Fu, Mingming Gong, Chaohui Wang, Kayhan Bat-manghelich, and Dacheng Tao. Deep ordinal regression network for monocular depth estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2002-2011, 2018. 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 458, + 287, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 458, + 287, + 513 + ], + "spans": [ + { + "bbox": [ + 53, + 458, + 287, + 513 + ], + "type": "text", + "content": "[8] Zhi Gao, Wenbo Sun, Yao Lu, Yichen Zhang, Weiwei Song, Yongjun Zhang, and Ruifang Zhai. Joint learning of semantic segmentation and height estimation for remote sensing image leveraging contrastive learning. IEEE Transactions on Geoscience and Remote Sensing, 2023. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 514, + 287, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 514, + 287, + 556 + ], + "spans": [ + { + "bbox": [ + 53, + 514, + 287, + 556 + ], + "type": "text", + "content": "[9] Pedram Ghamisi and Naoto Yokoya. Img2dsm: Height simulation from single imagery using conditional generative adversarial net. IEEE Geoence Remote Sensing Letters, pages 1-5, 2018. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 558, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 287, + 612 + ], + "type": "text", + "content": "[10] JunYoung Gwak, Christopher B Choy, Manmohan Chandraker, Animesh Garg, and Silvio Savarese. Weakly supervised 3d reconstruction with adversarial constraint. In 2017 International Conference on 3D Vision (3DV), pages 263-272. IEEE, 2017. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "type": "text", + "content": "[11] Junwei Han, Yang Yang, Dingwen Zhang, Dong Huang, Dong Xu, and Fernando De La Torre. Weakly-supervised learning of category-specific 3d object shapes. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(4): 1423-1437, 2021. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "type": "text", + "content": "[12] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[13] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision (CVPR), pages 2961-2969, 2017. 4, 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 118, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 173 + ], + "type": "text", + "content": "[14] Rongrong Ji, Ke Li, Yan Wang, Xiaoshuai Sun, Feng Guo, Xiaowei Guo, Yongjian Wu, Feiyue Huang, and Jiebo Luo. Semi-supervised adversarial monocular depth estimation. IEEE transactions on pattern analysis and machine intelligence, 42(10):2410-2422, 2019. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 175, + 545, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 218 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 218 + ], + "type": "text", + "content": "[15] Saket Kunwar. U-net ensemble for semantic and height estimation using coarse-map initialization. In IGARSS 2019-2019 IEEE International Geoscience and Remote Sensing Symposium, pages 4959-4962. IEEE, 2019. 1, 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 220, + 545, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 283 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 283 + ], + "type": "text", + "content": "[16] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. Robust model-based face reconstruction through weakly-supervised outlier segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 372–381, 2023. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 286, + 545, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 286, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 307, + 286, + 545, + 330 + ], + "type": "text", + "content": "[17] Muxingzi Li, Florent Lafarge, and Renaud Marlet. Approximating shapes in images with low-complexity polygons. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 332, + 545, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 332, + 545, + 386 + ], + "spans": [ + { + "bbox": [ + 307, + 332, + 545, + 386 + ], + "type": "text", + "content": "[18] Qingyu Li, Lichao Mou, Yuansheng Hua, Yilei Shi, Sining Chen, Yao Sun, and Xiao Xiang Zhu. 3dcentripetalnet: Building height retrieval from monocular remote sensing imagery. International Journal of Applied Earth Observation and Geoinformation, 120:103311, 2023. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 388, + 545, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 442 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 442 + ], + "type": "text", + "content": "[19] Weijia Li, Lingxuan Meng, Jinwang Wang, Conghui He, Gui-Song Xia, and Dahua Lin. 3d building reconstruction from monocular remote sensing images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12548-12557, 2021. 1, 2, 3, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 444, + 545, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 444, + 545, + 476 + ], + "spans": [ + { + "bbox": [ + 307, + 444, + 545, + 476 + ], + "type": "text", + "content": "[20] Weijia Li, Wenqian Zhao, Huaping Zhong, Conghui He, and Dahua Lin. Joint semantic-geometric learning for polygonal building segmentation. In AAAI, 2021. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 478, + 545, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 478, + 545, + 543 + ], + "spans": [ + { + "bbox": [ + 307, + 478, + 545, + 543 + ], + "type": "text", + "content": "[21] Weijia Li, Yawen Lai, Linning Xu, Yuanbo Xiangli, Jinhua Yu, Conghui He, Gui-Song Xia, and Dahua Lin. Omnicity: Omnipotent city understanding with multi-level and multiview images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17397-17407, 2023. 5, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 545, + 545, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 545, + 545, + 589 + ], + "spans": [ + { + "bbox": [ + 307, + 545, + 545, + 589 + ], + "type": "text", + "content": "[22] Zuoyue Li, Jan Dirk Wegner, and Aurélien Lucchi. Topological map extraction from overhead images. In Proceedings of the IEEE International Conference on Computer Vision (CVPR), pages 1715-1724, 2019. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 590, + 545, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 545, + 644 + ], + "type": "text", + "content": "[23] Tsung-Yi Lin, Piotr Dólar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 2117-2125, 2017. 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 647, + 545, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 690 + ], + "type": "text", + "content": "[24] Jisan Mahmud, True Price, Akash Bapat, and Jan Michael Frahm. Boundary-aware 3d building reconstruction from a single overhead image. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 1, 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 692, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 692, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 692, + 545, + 713 + ], + "type": "text", + "content": "[25] Yongqiang Mao, Kaiqiang Chen, Liangjin Zhao, Wei Chen, Deke Tang, Wenjie Liu, Zhirui Wang, Wenhui Diao, Xian" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "27736" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "text", + "content": "Sun, and Kun Fu. Elevation estimation-driven building 3d reconstruction from single-view remote sensing imagery. IEEE Transactions on Geoscience and Remote Sensing, 2023. 1, 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 108, + 288, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 108, + 288, + 163 + ], + "spans": [ + { + "bbox": [ + 48, + 108, + 288, + 163 + ], + "type": "text", + "content": "[26] Rahul Mitra, Nitesh B Gundavarapu, Abhishek Sharma, and Arjun Jain. Multiview-consistent semi-supervised learning for 3d human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6907-6916, 2020. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 165, + 288, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 165, + 288, + 209 + ], + "spans": [ + { + "bbox": [ + 48, + 165, + 288, + 209 + ], + "type": "text", + "content": "[27] Natalia Neverova, Christian Wolf, Florian Nebout, and Graham W Taylor. Hand pose estimation through semi-supervised and weakly-supervised learning. Computer Vision and Image Understanding, 164:56-67, 2017. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 211, + 287, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 211, + 287, + 266 + ], + "spans": [ + { + "bbox": [ + 48, + 211, + 287, + 266 + ], + "type": "text", + "content": "[28] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 268, + 287, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 268, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 48, + 268, + 287, + 312 + ], + "type": "text", + "content": "[29] Shivangi Srivastava, Michele Volpi, and Devis Tuia. Joint height estimation and semantic labeling of monocular aerial images with cnns. In Igarss IEEE International Geoscience Remote Sensing Symposium, 2017. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 315, + 287, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 315, + 287, + 367 + ], + "spans": [ + { + "bbox": [ + 48, + 315, + 287, + 367 + ], + "type": "text", + "content": "[30] Ke Sun, Yang Zhao, Borui Jiang, Tianheng Cheng, Bin Xiao, Dong Liu, Yadong Mu, Xinggang Wang, Wenyu Liu, and Jingdong Wang. High-resolution representations for labeling pixels and regions. arXiv preprint arXiv:1904.04514, 2019. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 371, + 287, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 371, + 287, + 415 + ], + "spans": [ + { + "bbox": [ + 48, + 371, + 287, + 415 + ], + "type": "text", + "content": "[31] Vivek Verma, Rakesh Kumar, and Stephen Hsu. 3d building detection and modeling from aerial lidar data. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2006. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 417, + 287, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 417, + 287, + 471 + ], + "spans": [ + { + "bbox": [ + 48, + 417, + 287, + 471 + ], + "type": "text", + "content": "[32] Jinwang Wang, Lingxuan Meng, Weijia Li, Wen Yang, Lei Yu, and Gui-Song Xia. Learning to extract building footprints from off-nadir aerial images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(1):1294-1301, 2022. 1, 2, 3, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 474, + 287, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 474, + 287, + 518 + ], + "spans": [ + { + "bbox": [ + 48, + 474, + 287, + 518 + ], + "type": "text", + "content": "[33] Zhitong Xiong, Wei Huang, Jingtao Hu, and Xiao Xiang Zhu. The benchmark: Transferable representation learning for monocular height estimation. IEEE Transactions on Geoscience and Remote Sensing, 2023. 1, 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 520, + 287, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 520, + 287, + 564 + ], + "spans": [ + { + "bbox": [ + 48, + 520, + 287, + 564 + ], + "type": "text", + "content": "[34] Guandao Yang, Yin Cui, Serge Belongie, and Bharath Hariharan. Learning single-view 3d reconstruction with limited pose supervision. In Proceedings of the European Conference on Computer Vision (ECCV), pages 86-101, 2018. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 567, + 287, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 287, + 609 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 287, + 609 + ], + "type": "text", + "content": "[35] Jiangye Yuan. Learning building extraction in aerial scenes with convolutional networks. IEEE transactions on pattern analysis and machine intelligence, 40(11):2793-2798, 2017. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 612, + 287, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 612, + 287, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 612, + 287, + 656 + ], + "type": "text", + "content": "[36] Wufan Zhao, Claudio Persello, and Alfred Stein. Building outline delineation: From aerial images to polygons with an improved end-to-end learning framework. ISPRS journal of photogrammetry and remote sensing, 175:119-131, 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "text", + "content": "[37] Zhuo Zheng, Yanfei Zhong, and Junjue Wang. Pop-net: Encoder-dual decoder for semantic segmentation and single-view height estimation. In IGARSS 2019-2019 IEEE International Geoscience and Remote Sensing Symposium, pages 4963-4966. IEEE, 2019. 1, 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 72, + 546, + 184 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 307, + 72, + 546, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 72, + 546, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 546, + 127 + ], + "type": "text", + "content": "[38] Stefano Zorzi, Ksenia Bittner, and Friedrich Fraundorfer. Machine-learned regularization and polygonization of building segmentation masks. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 3098–3105. IEEE, 2021. 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 129, + 546, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 546, + 184 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 546, + 184 + ], + "type": "text", + "content": "[39] Stefano Zorzi, Shabab Bazrafkan, Stefan Habenschuss, and Friedrich Fraundorfer. *Polyworld: Polygonal building extraction with graph neural networks in satellite images*. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1848-1857, 2022. 2" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "27737" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/29911afb-57cf-4105-bc3b-b432a117add8_content_list.json b/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/29911afb-57cf-4105-bc3b-b432a117add8_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..9369280b35da46fff95617518ac7a36f5b124c17 --- /dev/null +++ b/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/29911afb-57cf-4105-bc3b-b432a117add8_content_list.json @@ -0,0 +1,1829 @@ +[ + { + "type": "text", + "text": "3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation", + "text_level": 1, + "bbox": [ + 197, + 130, + 772, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zidu Wang $^{1,2}$ , Xiangyu Zhu $^{1,2*}$ , Tianshuo Zhang $^{1,2}$ , Baiqin Wang $^{1,2}$ , Zhen Lei $^{1,2,3}$", + "bbox": [ + 163, + 202, + 803, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ State Key Laboratory of Multimodal Artificial Intelligence Systems, Institute of Automation, Chinese Academy of Sciences", + "bbox": [ + 212, + 220, + 756, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ School of Artificial Intelligence, University of Chinese Academy of Sciences", + "bbox": [ + 176, + 256, + 792, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3 Centre for Artificial Intelligence and Robotics, Hong Kong Institute of Science & Innovation, Chinese Academy of Sciences", + "bbox": [ + 109, + 273, + 857, + 308 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{wangzidu2022, wangbaiqin2024}@ia.ac.cn,{xiangyu.zhu, tianshuo.zhang, zlei}@nlpr.ia.ac.cn", + "bbox": [ + 93, + 311, + 877, + 327 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 361, + 313, + 378 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D Morphable Models (3DMMs) provide promising 3D face reconstructions in various applications. However, existing methods struggle to reconstruct faces with extreme expressions due to deficiencies in supervisory signals, such as sparse or inaccurate landmarks. Segmentation information contains effective geometric contexts for face reconstruction. Certain attempts intuitively depend on differentiable renderers to compare the rendered silhouettes of reconstruction with segmentation, which is prone to issues like local optima and gradient instability. In this paper, we fully utilize the facial part segmentation geometry by introducing Part Re-projection Distance Loss (PRDL). Specifically, PRDL transforms facial part segmentation into 2D points and re-projects the reconstruction onto the image plane. Subsequently, by introducing grid anchors and computing different statistical distances from these anchors to the point sets, PRDL establishes geometry descriptors to optimize the distribution of the point sets for face reconstruction. PRDL exhibits a clear gradient compared to the renderer-based methods and presents state-of-the-art reconstruction performance in extensive quantitative and qualitative experiments. Our project is available at https://github.com/wang-zidu/3DDFA-V3.", + "bbox": [ + 75, + 393, + 473, + 741 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 772, + 209, + 787 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Reconstructing 3D faces from 2D images is an essential task in computer vision and graphics, finding diverse applications in fields such as Virtual Reality (VR), Augmented Reality (AR), and Computer-generated Imagery (CGI), etc. In applications like VR makeup and AR emoji, 3DMMs", + "bbox": [ + 75, + 797, + 468, + 875 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d6637cfd28f40026409df73e4475d5cc70b7d2fa6f410fb461882c562ae98f02.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 361, + 624, + 454 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/98ab93bdca526f27de508e1d5f90995e02eb38b906fe0b041eb360221623ef89.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 361, + 754, + 454 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/252cd538d0639398cd84839dafd613e9906a1a8a4e04fa765e8c37a5eee7e369.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 767, + 361, + 888, + 454 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/69fa143e20d25b4688b2fdb83092ff9d68b5b6dad5c8745f4b1c700df16992da.jpg", + "image_caption": [ + "Figure 1. We introduce Part Re-projection Distance Loss (PRDL) for 3D face reconstruction, leveraging the geometric guidance provided by facial part segmentation. PRDL enhances the alignment of reconstructed facial features with the original image and excels in capturing extreme expressions." + ], + "image_footnote": [], + "bbox": [ + 503, + 455, + 624, + 547 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b5c36c3144c4e2a08d39de04771a49ce356cb286c18fa683d329733688a98bd2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 455, + 754, + 547 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4fcf17a949d1dbecf6a69ed0acd4b17a73476b43141ab566851a7be2a810026c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 767, + 455, + 888, + 547 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "[5] are commonly employed for precise facial feature positioning and capturing expressions. One of the most critical concerns is ensuring that the reconstructed facial components, including the eyes, eyebrows, lips, etc., seamlessly align with their corresponding regions in the input image with pixel-level accuracy, particularly when dealing with extreme facial expressions, as shown in Fig. 1.", + "bbox": [ + 496, + 643, + 890, + 748 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Although current methods [11, 14, 17, 19, 25] have made notable strides in face reconstruction, some issues persist. On the one hand, existing works often rely on landmarks [17, 60] and photometric-texture [12, 45] to guide face reconstruction. In the case of extreme facial expressions, landmarks are sparse or inaccurate and the gradient from the texture loss cannot directly constrain the shape [59], posing a challenge for existing methods to achieve precise alignment of facial features in 3D face reconstruction, as depicted in Fig. 2(a). On the other hand, many methods", + "bbox": [ + 496, + 750, + 892, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author: Xiangyu Zhu", + "bbox": [ + 94, + 886, + 297, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1672", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "primarily adopt 3D errors as a quality metric, overlooking the precise alignment of facial parts. As shown in Fig. 2(b), when evaluating the REALY [7] benchmark in the eye region, comparing the results of 3DDFA-v2 [17] and DECA [14], a lower 3D region error may not lead to better 2D region alignment. We believe in the potential for a more comprehensive utilization of the geometry information inherent in each facial part segmentation to guide 3D face reconstruction, addressing the issues mentioned above.", + "bbox": [ + 75, + 90, + 472, + 227 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Facial part segmentation [24, 31, 32, 34] has been well developed, offering precise geometry for each facial feature with pixel-level accuracy. Compared with commonly used landmarks, part segmentation provides denser labels covering the whole image. Compared with photometric texture, part segmentation is less susceptible to lighting or shadow interference. Although facial part segmentation occasionally appears in the process of 3D face reconstruction, it is not fully utilized. For instance, it only serves to enhance the reconstruction quality of specific regions [25, 48], or to distinguish the overall texture location for photometric-texture-loss [26], without delving into the specifics of facial parts. Attempts [33, 56] to fit 3D parts with the guidance of segmentation information rely on differentiable renderers [15, 42, 46] to generate the silhouettes of the predicted 3D facial regions and optimize the difference between the rendered silhouettes and the 2D segmentation through Intersection over Union (IoU) loss. However, these renderers fail to provide sufficient and stable geometric signals for face reconstruction due to local optima, rendering error propagation, and gradient instability [22].", + "bbox": [ + 75, + 231, + 472, + 549 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This paper leverages the precise and rich geometric information in facial part silhouettes to guide face reconstruction, thereby improving the alignment of reconstructed facial features with the original image and excelling in reconstructing extreme facial expressions. Fig.1 provides an overview of the proposed Part Re-projection Distance Loss (PRDL). Firstly, PRDL samples points within the segmented region and transforms the segmentation information into a 2D point set for each facial part. The 3D face reconstruction is also re-projected onto the image plane and transformed into 2D point sets for different regions. Secondly, PRDL samples the image grid anchors and establishes geometric descriptors. These descriptors are constructed by using various statistical distances from the anchors to the point set. Finally, PRDL optimizes the distribution of the same semantic point sets, leading to improved overlap between the regions covered by the target and predicted point sets. In contrast to renderer-based methods, PRDL exhibits a clear gradient. To facilitate the use of PRDL, we provide a new 3D mesh part annotation aligned with semantic regions in 2D face segmentation [24, 55], which differs from the existing annotations [30, 49], as shown in Fig.2(c). Besides the drawbacks of supervisory signals, the challenge of han", + "bbox": [ + 75, + 553, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ac68d5bbb39a20f8fcd0a8f124eba73e61b88118bb3adbf8e07621e45cd10413.jpg", + "image_caption": [ + "(a) Performance on extreme expressions" + ], + "image_footnote": [], + "bbox": [ + 501, + 90, + 732, + 150 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/be08dc941b4c38b740950507e6cdff094eed3a01abb3719f5d81e47ca113dd96.jpg", + "image_caption": [ + "(b) 3D error vs. 2D alignment", + "Figure 2. Drawbacks of existing research and our results. (a) Present researches fail to reconstruct extreme expressions and perform bad region alignment. (b) Inconsistencies between 3D errors and 2D alignments, such as the eye region in this case. (c) Geometric optimization of each semantically consistent part is only achievable through our annotations." + ], + "image_footnote": [], + "bbox": [ + 501, + 160, + 732, + 224 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5c93af5dd693181cbe20a1c67826b7893bcffb0abc7a8f736e2feec185b172a2.jpg", + "image_caption": [ + "(c) 3D face model annotations" + ], + "image_footnote": [], + "bbox": [ + 735, + 90, + 887, + 224 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "dling extreme expressions arises from data limitations. To boost studies and address the lack of emotional expression (e.g., closed-eye, open-mouth, frown, etc.), we synthesize a face dataset using the GAN-based method [24]. To highlight the performance of region overlapping, we propose a new benchmark to quantify the accuracy of 3D reconstruction parts cling to their corresponding image components on the 2D image plane. Our main contributions are as follows:", + "bbox": [ + 496, + 335, + 890, + 457 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce a novel Part Re-projection Distance Loss (PRDL) to comprehensively utilize segmentation information for face reconstruction. PRDL transforms the target and prediction into semantic point sets, optimizing the distribution of point sets to ensure that the reconstructed regions and the target share the same geometry.", + "- We introduce a new synthetic face dataset including closed-eye, open-mouth, and frown expressions, with more than $200K$ images.", + "- Extensive experiments show that the results with PRDL achieve excellent performance and outperform the existing methods. The data and code are available at https://github.com/wang-zidu/3DDFA-V3." + ], + "bbox": [ + 500, + 459, + 890, + 656 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 671, + 640, + 686 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2D-to-3D Losses for 3D Face Reconstruction. Landmark loss [11, 17, 60] stands out as the most widely employed and effective supervised way for face reconstruction. Some studies [20, 37] reveal that it can generate 3D faces under the guidance of sufficient hundreds or thousands landmarks. Photometric loss is another commonly used loss involving rendering the reconstructed mesh with texture into an image and comparing it to the original input. Some researchers focus on predicting the facial features that need to be fitted while excluding occlusions [12, 45]. The photometric loss is susceptible to factors like texture basis, skin masks, and rendering modes. It emphasizes overall visualization and may not effectively constrain local details. Perception loss", + "bbox": [ + 496, + 704, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "1673", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/9993a79f4147ffc2959550ceabcd52c36438bcd60ffeb38a1fa0351a35b53af9.jpg", + "image_caption": [ + "Figure 3. Overview of Part Re-projection Distance Loss (PRDL). (a): Transforming facial part segmentation into target point sets $\\{C_p\\}$ . (b): Re-projecting $V_{3d}(\\alpha)$ onto the image plane to obtain predicted point sets $\\{V_{2d}^p (\\alpha)\\}$ . (c): Given anchors $\\mathbf{A}$ and distance functions $\\mathcal{F}$ , the core idea of PRDL is to minimize the difference of every statistical distance from any $\\pmb{a}_i\\in \\pmb{A}$ to the $V_{2d}^{p}(\\alpha)$ or $C_p$ , leading to enhanced overlap between the regions covered by the target and predicted point sets." + ], + "image_footnote": [], + "bbox": [ + 81, + 89, + 478, + 258 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/c2bf4c7eb6a15c3244b1665bdca0276d122e58692fb2aefc44d7fb8ec13f107a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 478, + 89, + 885, + 258 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "[11, 14, 16] distinguishes itself from image-level methods by employing pre-trained deep face recognition networks [9] to extract high-level features from the rendered reconstruction results. These features are then compared with the features from the input. Lip segmentation consistency loss [48] employs mouth segmentation to help reconstruction.", + "bbox": [ + 75, + 335, + 470, + 428 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Differentiable Silhouette Renderers. The development of differentiable renderers [15, 42, 46] has enriched the supervised methods for 3D face reconstruction. These pipelines make the rasterization process differentiable, allowing for the computation of gradients for every pixel in the rendered results. By combining IoU loss with segmentation information, the silhouettes produced by these renderers have been shown to optimize 3D shapes [8, 33, 56]. These rasterization processes typically rely on either local [21, 36] or global [8, 33] geometric distance-based weighted aggregation, generating silhouettes by computing a probability related to the distance from pixels to mesh faces. However, to obtain a suitable sharp silhouette, the weight contribution of each position to the rendered pixel will decrease sharply with the increase of distance, and the gradient generated by the shape difference at the large distance will be small or zero, which makes it difficult to retain accurate geometry guidance. These renderers also encounter issues such as rendering error propagation and gradient instability [22].", + "bbox": [ + 75, + 436, + 468, + 724 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Synthetic Dataset. Synthetic data [41, 52, 58] is commonly used to train 3D face reconstruction models [11, 17, 25]. However, these synthetic faces either prioritize the diversification of background, illumination, and identities [41, 52], or concentrate on pose variation [58], contributing to achieve good results in reconstructing natural facial expressions but struggling to reconstruct extreme expressions. To overcome these limitations and facilitate the related research, this paper adopts a GAN-based method [24] to synthesize realistic and diverse facial expression data, including closed eyes, open mouths, and frowns.", + "bbox": [ + 75, + 734, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methodology", + "text_level": 1, + "bbox": [ + 500, + 334, + 633, + 351 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminaries", + "text_level": 1, + "bbox": [ + 500, + 359, + 640, + 373 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We conduct a face model, an illumination model, and a camera model based on [6, 11, 14, 17].", + "bbox": [ + 498, + 382, + 892, + 412 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Face Model. The vertices and albedo of a 3D face is determined by the following formula:", + "bbox": [ + 498, + 420, + 890, + 450 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nV _ {3 d} (\\boldsymbol {\\alpha}) = \\boldsymbol {R} \\left(\\boldsymbol {\\alpha} _ {a}\\right) \\left(\\bar {\\boldsymbol {V}} + \\boldsymbol {\\alpha} _ {i d} \\boldsymbol {A} _ {i d} + \\boldsymbol {\\alpha} _ {\\exp} \\boldsymbol {A} _ {\\exp}\\right) + \\boldsymbol {\\alpha} _ {t} \\\\ - \\left(\\frac {\\partial}{\\partial t}\\right) \\quad , \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 521, + 460, + 890, + 486 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nT _ {a l b} (\\boldsymbol {\\alpha}) = \\overline {{\\boldsymbol {T}}} + \\boldsymbol {\\alpha} _ {a l b} \\boldsymbol {A} _ {a l b}\n$$\n", + "text_format": "latex", + "bbox": [ + 522, + 481, + 683, + 496 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $V_{3d}(\\alpha) \\in \\mathbb{R}^{3 \\times 35709}$ is the 3D face vertices, $\\overline{\\boldsymbol{V}}$ is the mean shape. $T_{alb}(\\alpha) \\in \\mathbb{R}^{3 \\times 35709}$ is the albedo, $\\overline{T}$ is the mean albedo. $A_{id}$ , $A_{exp}$ and $A_{alb}$ are the face identity vector bases, the expression vector bases and the albedo vector bases, respectively. $\\alpha_{id} \\in \\mathbb{R}^{80}$ , $\\alpha_{exp} \\in \\mathbb{R}^{64}$ and $\\alpha_{alb} \\in \\mathbb{R}^{80}$ are the identity parameter, the expression parameter and the albedo parameter, respectively. $\\alpha_{t} \\in \\mathbb{R}^{3}$ is the translation parameter. $\\pmb{R}(\\pmb{\\alpha}_{a}) \\in \\mathbb{R}^{3 \\times 3}$ is the rotation matrix corresponding to pitch/raw/roll angles $\\alpha_{a} \\in \\mathbb{R}^{3}$ .", + "bbox": [ + 498, + 511, + 890, + 648 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Camera. We employ a camera with a fixed perspective projection, which is same as [11, 25]. Using this camera to re-project $V_{3d}(\\alpha)$ into the 2D image plane yields $V_{2d}(\\alpha) \\in \\mathbb{R}^{2 \\times 35709}$ .", + "bbox": [ + 498, + 654, + 890, + 715 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Illumination Model. Following [14], we adopt Spherical Harmonics (SH) [40] for the estimation of the shaded texture $T_{tex}(\\alpha)$ :", + "bbox": [ + 498, + 722, + 890, + 768 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nT _ {t e x} (\\boldsymbol {\\alpha}) = T _ {a l b} (\\boldsymbol {\\alpha}) \\odot \\sum_ {k = 1} ^ {9} \\boldsymbol {\\alpha} _ {s h} ^ {k} \\boldsymbol {\\Psi} _ {k} (\\boldsymbol {N}), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 562, + 779, + 890, + 815 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\odot$ denotes the Hadamard product, $N$ is the surface normal of $V_{3d}(\\alpha)$ , $\\Psi : \\mathbb{R}^3 \\to \\mathbb{R}$ is the SH basis function and $\\alpha_{sh} \\in \\mathbb{R}^9$ is the corresponding SH parameter. In summary, $\\alpha = [\\alpha_{id}, \\alpha_{\\mathrm{exp}}, \\alpha_a, \\alpha_t, \\alpha_{sh}]$ is the undetermined parameter.", + "bbox": [ + 498, + 824, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "1674", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Point Transformation on the Image Plane", + "text_level": 1, + "bbox": [ + 76, + 90, + 431, + 107 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Transforming Segmentation to 2D Points. For an input RGB face image $I \\in \\mathbb{R}^{H \\times W \\times 3}$ , the prediction of a face segmentation method can be represented by a set of binary tensors $M = \\{M_p | p \\in P\\}$ , where $P = \\{\\text{left-eye, right-eye, left_eyebrow, right_eyebrow, up\\_lip, down\\_lip, nose, skin}\\}$ and $M_p \\in \\{0,1\\}^{H \\times W}$ . Specifically, $M_p^{(x,y)} = 1$ only if the 2D pixel position $(x,y)$ of $M_p$ belongs to a certain face part $p$ , and otherwise $M_p^{(x,y)} = 0$ . $M$ can be transformed into a set of point sets $C = \\{C_p | p \\in P\\}$ , where $C_p = \\{(x,y) | if M_p^{(x,y)} = 1\\}$ . In this step, we employ DML-CSR [55] for face segmentation, excluding the ear regions, filtering out noise from the segmentation, and dynamically removing the forehead region above the eyebrows based on their position. This procedure is illustrated in Fig. 3(a). More implementation details are provided in the supplemental materials.", + "bbox": [ + 76, + 119, + 472, + 371 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Facial Part Annotation on 3D Face Model. Our objective is to leverage $\\{C_p\\}$ for guiding 3D face reconstruction. Thus, we should ensure that the reconstructed mesh can be divided into regions consistent with the semantics of the 2D segmentation. Due to the topological consistency of the face model, every vertex on the mesh can be annotated for a specific region. However, existing annotations [27, 30, 49] do not conform to widely accepted 2D face segmentation definitions [24, 32], as shown in Fig.2(c). To address this misalignment, we introduce new part annotations on both BFM [5] and FaceVerse [51]. We partition the vertices based on their indices. $i \\in Ind_p$ indicates that the $i$ -th vertex (denoted as $\\mathbf{v}$ ) on the mesh belongs to part $p$ . $\\{Ind_p|p \\in P\\}$ can be obtained by:", + "bbox": [ + 76, + 377, + 472, + 590 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} I ^ {s e g} = \\operatorname {S e g} (\\operatorname {R e n d e r} (V _ {3 d}, T e x)) \\\\ i \\subset I _ {i d d} - i f, I ^ {s e g} (v) \\subset v \\end{array} , \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 599, + 468, + 623 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ni \\in I n d _ {p}, i f I ^ {s e g} (\\boldsymbol {v}) \\in p\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 614, + 344, + 631 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\text{Render}(\\cdot)$ generates an image by applying texture on the mesh, and $\\text{Seg}(\\cdot)$ is responsible for segmenting the rendered result. We employ different shape $V_{3d}$ and varying textures $Tex$ to label every $v \\in V_{3d}$ with hand-crafted modification. The annotation $\\{Ind_p\\}$ is pre-completed offline in the training process. Consequently, we utilize $\\{Ind_p\\}$ to transform the re-projection $V_{2d}(\\alpha)$ into semantic point sets $\\{V_{2d}^p (\\alpha)|p \\in P\\}$ . Besides, the upper forehead region situated above the eyebrows is dynamically excluded to ensure consistency with target. Points obstructed by hair are removed based on $\\{C_p\\}$ , as shown in Fig. 3(b). Please refer to supplemental materials for annotation details.", + "bbox": [ + 76, + 641, + 472, + 824 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Part Re-projection Distance Loss (PRDL)", + "text_level": 1, + "bbox": [ + 76, + 830, + 431, + 848 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This section describes the design of PRDL, focusing on constructing geometric descriptors and establishing the relation between the prediction $\\{V_{2d}^p (\\alpha)\\}$ and the ground", + "bbox": [ + 76, + 854, + 470, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "truth $\\{C_p\\}$ for a given $p \\in P$ , which is proved instrumental for face reconstruction.", + "bbox": [ + 498, + 90, + 890, + 119 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In a more generalized formulation, considering two point sets $C = \\{c_1, c_2, \\dots, c_{|C|}\\}$ and $C^* = \\{c_1^*, c_2^*, \\dots, c_{|C^*|}^*\\}$ , we aim to establish geometry descriptions by quantifying shape alignment between them for reconstruction. $C$ and $C^*$ may not possess the same number of points, and their points lack correspondence. Instead of directly searching the correspondence between the two sets, we use a set of fixed points as anchors $A = \\{a_1, a_2, \\dots, a_{|A|}\\}$ and a collection of statistical distance functions $\\mathcal{F} = \\{f_1, f_2, \\dots, f_{|\\mathcal{F}|}\\}$ to construct geometry description tensors $\\Gamma(C, A, \\mathcal{F}) \\in \\mathbb{R}^{|\\mathcal{A}| \\times |\\mathcal{F}|}$ and $\\Gamma(C^*, A, \\mathcal{F}) \\in \\mathbb{R}^{|\\mathcal{A}| \\times |\\mathcal{F}|}$ for $C$ and $C^*$ , respectively (denoted as $\\Gamma$ and $\\Gamma^*$ for brevity). The value $\\Gamma(i, j)$ and $\\Gamma^*(i, j)$ at the position $(i, j)$ are determined by:", + "bbox": [ + 498, + 125, + 893, + 323 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{ \\begin{array}{l} \\boldsymbol {\\Gamma} (i, j) = f _ {j} (\\boldsymbol {C}, \\boldsymbol {a} _ {i}) \\\\ \\boldsymbol {\\Gamma} ^ {*} (i, j) = f _ {j} (\\boldsymbol {C} ^ {*}, \\boldsymbol {a} _ {i}), \\end{array} \\right. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 609, + 342, + 890, + 383 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where every function $f_{j}(\\pmb {B},\\pmb {b})\\in \\mathcal{F}$ describes the distance from a single point $\\pmb{b}$ to a set of points $\\pmb{B}$ , and $f_{j}(\\pmb {B},\\pmb {b})$ can be any statistically meaningful distance.", + "bbox": [ + 498, + 398, + 890, + 444 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "When fitting 3DMM to the segmented silhouettes for part $p$ , we set $\\boldsymbol{C} = V_{2d}^{p}(\\boldsymbol{\\alpha})$ and $C^* = C_p$ with specified anchors $\\mathbf{A}$ and a set of distance functions $\\mathcal{F}$ . Then we calculate their corresponding geometry descriptor tensors $\\Gamma_p = \\Gamma(V_{2d}^p(\\boldsymbol{\\alpha}), \\boldsymbol{A}, \\mathcal{F})$ and $\\Gamma_p^* = \\Gamma(C_p, \\boldsymbol{A}, \\mathcal{F})$ . Part Re-projection Distance Loss (PRDL) $\\mathcal{L}_{prdl}$ is defined as:", + "bbox": [ + 498, + 446, + 893, + 539 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {p r d l} = \\sum_ {p \\in P} w _ {p r d l} ^ {p} \\left\\| \\boldsymbol {\\Gamma} _ {p} - \\boldsymbol {\\Gamma} _ {p} ^ {*} \\right\\| _ {2} ^ {2}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 556, + 890, + 583 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $w_{prdl}^p$ is the weight of each part $p$ . In this paper, we set $\\mathcal{F}$ as a collection of the nearest $(f_{min})$ , furthest $(f_{max})$ , and average $(f_{ave})$ distance, i.e. $\\mathcal{F} = \\{f_{max}, f_{min}, f_{ave}\\}$ . We set $\\mathbf{A}$ as a $H \\times W$ mesh grid. Then for $\\forall \\mathbf{a}_i \\in \\mathbf{A}$ , the optimization objective of $\\mathcal{L}_{prdl}$ is to:", + "bbox": [ + 498, + 602, + 890, + 679 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{ \\begin{array}{l} \\min | | f _ {m i n} (\\boldsymbol {C} _ {p}, \\boldsymbol {a} _ {i}) - f _ {m i n} (V _ {2 d} ^ {p} (\\boldsymbol {\\alpha}), \\boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\\\ \\min | | f _ {m a x} (\\boldsymbol {C} _ {p}, \\boldsymbol {a} _ {i}) - f _ {m a x} (V _ {2 d} ^ {p} (\\boldsymbol {\\alpha}), \\boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\\\ \\min | | f _ {a v e} (\\boldsymbol {C} _ {p}, \\boldsymbol {a} _ {i}) - f _ {a v e} (V _ {2 d} ^ {p} (\\boldsymbol {\\alpha}), \\boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\end{array} . \\right. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 524, + 696, + 890, + 746 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This process is shown in Fig. 3(c). When $p =$ left_eye, PRDL minimizes the length difference between the indigo and orange lines (also as shown in Fig. 6(a) when $p =$ right_eybrow). The upper right corner of Fig. 3(c) is a visualization of $\\Gamma_{left\\_eye}$ with the last channel separately by reshaping it from $\\mathbb{R}^{|A| \\times |\\mathcal{F}|}$ to $\\mathbb{R}^{H \\times W \\times |\\mathcal{F}|}$ . It is worth note that, the points number in $V_{2d}^{p}(\\alpha)$ , $C_p$ and $A$ can be reduced by using Farthest Point Sampling (FPS) [38] to decrease computational costs.", + "bbox": [ + 496, + 763, + 893, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "1675", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/766e73ee8c9c97609c8c05e2e3524c9f09ab61c9881f32c6004b1b618c9cdf79.jpg", + "image_caption": [ + "Figure 4. Synthesize emotional expression data." + ], + "image_footnote": [], + "bbox": [ + 78, + 88, + 472, + 239 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d19110c11f631ebae74ef38ce7b33c14019e99529639079797d13881cd02ee48.jpg", + "image_caption": [ + "Figure 5. Examples of our synthetic face dataset." + ], + "image_footnote": [], + "bbox": [ + 78, + 262, + 468, + 345 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Overall Losses", + "text_level": 1, + "bbox": [ + 76, + 378, + 225, + 393 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To reconstruct a 3D face from image $I$ , we build frameworks to minimize the total loss $\\mathcal{L}$ as follows:", + "bbox": [ + 76, + 402, + 468, + 431 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} = \\lambda_ {p r d l} \\mathcal {L} _ {p r d l} + \\lambda_ {l m k} \\mathcal {L} _ {l m k} + \\lambda_ {p h o} \\mathcal {L} _ {p h o} \\tag {7} \\\\ + \\lambda_ {p e r} \\mathcal {L} _ {p e r} + \\lambda_ {r e g} \\mathcal {L} _ {r e g}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 132, + 441, + 468, + 479 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_{lmk}$ is the landmark loss, we use detectors to locate 240 2D landmarks for $\\mathcal{L}_{lmk}$ and adopt the dynamic landmark marching [57] to handle the non-correspondence between 2D and 3D cheek contour landmarks arising from pose variations. The photometric loss $\\mathcal{L}_{pho}$ and the perceptual loss $\\mathcal{L}_{per}$ are based on [11, 14]. $\\mathcal{L}_{reg}$ is the regularization loss for $\\alpha$ . $\\lambda_{prdl} = 0.8e - 3$ , $\\lambda_{lmk} = 1.6e - 3$ , $\\lambda_{pho} = 1.9$ , $\\lambda_{per} = 0.2$ , and $\\lambda_{reg} = 3e - 4$ are the balance weights. $\\mathcal{L}_{prdl}$ and $\\mathcal{L}_{lmk}$ are normalized by $H\\times W$ .", + "bbox": [ + 76, + 489, + 468, + 626 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Synthetic Emotional Expression Data", + "text_level": 1, + "bbox": [ + 76, + 635, + 398, + 651 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Benefiting from recent developments in face editing research [24, 47], we can generate realistic faces through segmentation $M$ . We aim to mass-produce realistic and diverse facial expression data. To achieve this, we start by obtaining the segmentation $M$ and landmarks $lmk$ of the original image $I$ with a segmentation method [55] and a landmark detector, respectively. Leveraging the location of landmarks $lmk$ , we apply affine transformation with various patterns onto the segmentation $M$ , resulting in $M'$ . Subsequently, $M'$ is fed into the generative network [24] to produce a new facial expression image $I'$ , as depicted in Fig. 4. Based on CelebA [35] and CelebAMask-HQ [24], we have generated a dataset comprising more than $200K$ images, including expressions such as closed-eye, open-mouth, and frown, as depicted in Fig. 5. This dataset will be publicly available to facilitate research.", + "bbox": [ + 75, + 659, + 470, + 900 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5fc61e55e55d6e9c3c8976dad38e3a3e67baaf1d815ebc5ab83b4ea4584e3946.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 504, + 90, + 656, + 208 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d6479ca4c8bb2e358c69b3f9a3551d1def43942ce23c5a305e1a506de1f537a6.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 661, + 90, + 813, + 208 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5e1b49134b57f58610cec69f9dded9de03593de84919e22809059b2ef388c1f0.jpg", + "image_caption": [ + "(d)", + "Figure 6. (a): $p =$ right_eyebrow when the closest distance $(f_{min})$ is compared. (b): The gradient descent of PRDL for (a). (c): $\\mathbf{\\Gamma}_p^*$ is the regression target of PRDL in $f_{min}$ channel. (d): $M_p$ is the regression target of renderer-based methods. $\\mathbf{\\Gamma}_p^*$ is more informative than $M_p$ ." + ], + "image_footnote": [], + "bbox": [ + 818, + 90, + 890, + 208 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Analysis of PRDL and Related Methods", + "text_level": 1, + "bbox": [ + 500, + 306, + 857, + 323 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The Gradient of PRDL. With anchors and distance functions as the bridge, PRDL establishes the geometry descriptions of the two point sets. In Fig. 6, we take $p =$ right_eyebrow as an example to analyze the gradient of PRDL. When considering $f_{min}$ and a specific anchor $\\pmb{a}_i \\in \\pmb{A}$ , $f_{min}$ identifies $\\pmb{c}_m$ and $\\pmb{v}_n$ from $C_p$ and $V_{2d}^p(\\alpha)$ , respectively, by selecting the ones closest to $\\pmb{a}_i$ :", + "bbox": [ + 498, + 339, + 890, + 445 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nm = \\underset {j} {\\arg \\min } \\| \\boldsymbol {a} _ {i} - \\boldsymbol {c} _ {j} \\| _ {2}, \\quad \\boldsymbol {c} _ {j} \\in C _ {p}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 455, + 890, + 481 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nn = \\underset {j} {\\arg \\min } \\| \\boldsymbol {a} _ {i} - \\boldsymbol {v} _ {j} \\| _ {2}, \\quad \\boldsymbol {v} _ {j} \\in V _ {2 d} ^ {p} (\\boldsymbol {\\alpha}). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 493, + 890, + 518 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Under the definition of PRDL, the corresponding energy function $E_{i,m,n}$ for $\\pmb{a}_i,\\pmb{c}_m$ and $\\pmb{v}_n$ is:", + "bbox": [ + 500, + 523, + 890, + 554 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} E _ {i, m, n} = \\left(\\left\\| \\boldsymbol {a} _ {i} - \\boldsymbol {c} _ {m} \\right\\| _ {2} - \\left\\| \\boldsymbol {a} _ {i} - \\boldsymbol {v} _ {n} \\right\\| _ {2}\\right) ^ {2} \\tag {10} \\\\ = \\left(d _ {i, m} - d _ {i, n}\\right) ^ {2}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 560, + 563, + 890, + 602 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $d_{i,m} = ||\\pmb{a}_i - \\pmb{c}_m||_2, d_{i,n} = ||\\pmb{a}_i - \\pmb{v}_n||_2$ . The gradient descent of $E_{i,m,n}$ on $\\pmb{v}_n$ is:", + "bbox": [ + 498, + 609, + 890, + 642 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n- \\frac {\\partial E _ {i , m , n}}{\\partial \\boldsymbol {v} _ {n}} = 2 (\\boldsymbol {v} _ {n} - \\boldsymbol {a} _ {i}) \\left(\\frac {d _ {i , m}}{d _ {i , n}} - 1\\right). \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 575, + 650, + 890, + 672 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The physical explanation of Eqn. 11 is comprehensible and concise: the direction of $-\\nabla E_{i,m,n}$ always aligns with the line connecting $\\pmb{a}_i$ and $\\pmb{v}_n$ , if $d_{i,n} > d_{i,m}$ , the direction of $-\\nabla E_{i,m,n}$ is from $\\pmb{v}_n$ to $\\pmb{a}_i$ (as shown in Fig. 6(b)), and vice versa. In the context of gradient descent, the effect of $-\\nabla E_{i,m,n}$ is to make $d_{i,n} = d_{i,m}$ as much as possible. Given $\\pmb{A}$ and $f_{min}$ , the gradient descent of $\\mathcal{L}_{prdl}$ on $\\pmb{v}_n$ is the aggregation of all anchors:", + "bbox": [ + 498, + 679, + 890, + 799 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} - \\frac {\\partial \\mathcal {L} _ {p r d l}}{\\partial \\boldsymbol {v} _ {n}} = - w _ {p r d l} ^ {p} \\sum \\frac {\\partial E _ {i , m , n}}{\\partial \\boldsymbol {v} _ {n}} \\\\ = - w _ {p r d l} ^ {p} \\sum_ {i, m} ^ {i, m} \\nabla E _ {i, m, n}. \\tag {12} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 806, + 890, + 861 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The scenario with $f_{max}$ is similar to that of $f_{min}$ , with the only distinction lying in the selection of points. $f_{max}$", + "bbox": [ + 498, + 869, + 890, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "1676", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/a3746a0e263eb2f59e75c06cb3803df38c8b23f75f635aa7fd8595b269610798.jpg", + "table_caption": [ + "Table 1. Quantitative comparison on Part IoU benchmark. The best and runner-up are highlighted in bold and underlined, respectively. R_eye denotes the right eye, and similar definitions for the rest are omitted." + ], + "table_footnote": [], + "table_body": "
MethodsPart IoU(%)↑
R_eyeavg.± std.L_eyeavg.± std.R_browavg.± std.L_browavg.± std.Noseavg.± std.Up_lipavg.± std.Down_lipavg.± std.avg.
PRNet [13]65.87±16.3666.73±14.7461.46±15.8959.18±16.3183.34±4.5750.88±18.3558.16±17.7263.66
MGCNet [45]64.42±16.0264.81±16.9155.25±15.2961.30±15.5887.40±3.5141.16±19.7066.22±13.8362.94
Deep3D [11]71.87±12.0070.52±12.1964.66±11.3164.70±11.9887.69±3.5161.21±15.6065.95±13.0869.51
3DDFA-v2 [17]61.39±15.9857.51±18.0943.38±25.2538.85±24.3880.83±4.9250.20±17.1759.01±15.2355.88
HRN [25]73.31±11.3973.61±11.5067.91±8.2666.78±10.2790.00±2.6063.80±14.1666.40±11.9471.69
DECA [14]58.09±21.4062.56±19.4155.27±19.4951.86±19.9386.54±9.1156.39±16.9662.81±17.6661.93
Ours (w/o Lprdl)70.72±9.4475.69±10.7971.11±8.5871.69±8.7388.35±4.6057.26±15.9769.71±10.6872.08
Ours (w/o Syn. Data)73.81±10.1272.55±10.6872.24±9.2370.90±8.5588.71±4.1157.43±14.3769.87±10.5472.22
Ours74.55±11.4676.06±10.3274.00±7.7274.05±7.7089.06±3.5358.16±12.7670.86±10.3473.82
", + "bbox": [ + 80, + 121, + 890, + 273 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/e263c5edec0776988387ee454347fd06794f38b88a204b5793f26c84d935602f.jpg", + "table_caption": [ + "Table 2. Quantitative comparison on Realty benchmark. Lower values indicate better results. The best and runner-up are highlighted in bold and underlined, respectively." + ], + "table_footnote": [], + "table_body": "
MethodsFrontal-view (mm) ↓Side-view (mm) ↓
Nose avg.± std.Mouth avg.± std.Forehead avg.± std.Cheek avg.± std.avg.Nose avg.± std.Mouth avg.± std.Forehead avg.± std.Cheek avg.± std.avg.
PRNet [13]1.923±0.5181.838±0.6372.429±0.5881.863±0.6982.0131.868±0.5101.856±0.6072.445±0.5701.960±0.7312.032
MGCNet [45]1.771±0.3801.417±0.4092.268±0.5031.639±0.6501.7741.827±0.3831.409±0.4182.248±0.5081.665±0.6441.787
Deep3D[11]1.719±0.3541.368±0.4392.015±0.4491.528±0.5011.6571.749±0.3431.411±0.3952.074±0.4861.528±0.5171.691
3DDFA-v2 [17]1.903±0.5171.597±0.4782.447±0.6471.757±0.6421.9261.883±0.4991.642±0.5012.465±0.6221.781±0.6361.943
HRN [25]1.722±0.3301.357±0.5231.995±0.4761.072±0.3331.5371.642±0.3101.285±0.5281.906±0.4791.038±0.3221.468
DECA [14]1.694±0.3552.516±0.8392.394±0.5761.479±0.5352.0101.903±1.0502.472±1.0792.423±0.7201.630±1.1352.107
Ours (w/o Lpr dl)1.671±0.3321.460±0.4742.001±0.4281.142±0.3151.5681.665±0.3491.297±0.4002.016±0.4481.134±0.3421.528
Ours (w/o Syn. Data)1.592±0.3271.339±0.4331.823±0.4071.119±0.3321.4681.628±0.3201.229±0.4331.872±0.4071.091±0.3121.455
Ours1.586±0.3061.238±0.3731.810±0.3941.111±0.3271.4361.623±0.3131.205±0.3661.864±0.4241.076±0.3151.442
", + "bbox": [ + 80, + 311, + 890, + 444 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "also has the capability to constrain $V_{2d}^{p}(\\alpha)$ within the confines of $C_p$ . $f_{ave}$ acts on the entire $V_{2d}^{p}(\\alpha)$ , striving to bring its centroid as close as possible to the centroid of $C_p$ . The introduction of additional anchors and the integration of diverse statistical distances in PRDL prevent the optimization from local optima and provide sufficient geometric signals. Please refer to supplementary materials for more details.", + "bbox": [ + 75, + 454, + 468, + 561 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "PRDL vs. Renderer-Based Loss: An intuitive approach for fitting segmentation is to use the renderer-based IoU loss, where differentiable silhouette renderers play a crucial role. Consequently, we delve into the distinctions between PRDL and renderers. We can reshape $\\Gamma_p^*$ ( $\\mathbb{R}^{|A| \\times |\\mathcal{F}|} \\to \\mathbb{R}^{H \\times W \\times |\\mathcal{F}|}$ ) to visualize it with the last channel separately. Fig. 6(c) illustrates the visualization of the $f_{min}$ channel for $p =$ right_eyebrow, while Fig. 6(d) represents the silhouette rendered by [33] or [8]. In comparison with the regression target $M_p$ utilized in renderer-based methods, $\\Gamma_p^*$ in PRDL is more informative and more conducive to fitting. Please refer to supplementary materials for more details.", + "bbox": [ + 75, + 574, + 468, + 758 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Furthermore, considering existing theoretical analyses [8, 22, 56], PRDL exhibits several notable advantages. First, in these renderers, all triangles constituting the object influence every pixel within the silhouettes, making it intricate to isolate specific geometric features. In contrast, $f_{min}$ or $f_{max}$ in PRDL matches the nearest or furthest point on the object, allowing for a more straightforward measurement of the shape's boundary characteristics. Secondly, these renderers either neglect pixels outside any triangles of", + "bbox": [ + 75, + 763, + 470, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "the 3D object or assign minimal weights to them, emphasizing the rendered object region. However, this operation is equivalent to selectively choosing anchors $A$ in the interior of the rendered shape, while the external anchors are either not chosen or treated differently by assigning small weights, thereby diminishing descriptive power. In Eqn. 11, Eqn. 12 and Fig. 6(b), we have analyzed that external anchors play a significant role in the fitting process. Ablation study (Fig.8) also proves that PRDL is more effective than renderer-based methods like [8, 33, 56].", + "bbox": [ + 496, + 454, + 890, + 606 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 500, + 619, + 633, + 636 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 500, + 643, + 705, + 660 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Reconstruction Frameworks. We implement PRDL based on PyTorch [39] and PyTorch3D [42]. We use ResNet-50 [18] as the backbone to predict $\\alpha$ . The input image is cropped and aligned by [10], and resized into $224 \\times 224$ .", + "bbox": [ + 496, + 674, + 890, + 736 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Data. The face images are from publicly available datasets: Dad-3dheads [37], CelebA [35], RAF-ML [28], RAF-DB [29] and 300W [43]. Our synthetic images are mainly from [24, 35]. We use [58] for face pose augmentation. In total, our training data contained about $600K$ face images. We employ DML-CSR [55] to predict 2D face segmentation.", + "bbox": [ + 496, + 741, + 890, + 833 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation Details. Considering the inherent feature of 2D segmentation, if part $p$ of a face is invisible or occluded, it may lead to $C_p = \\varnothing$ . In such a situation during training, we set $w_{prdl}^p = 0$ for these samples. We use Adam", + "bbox": [ + 496, + 839, + 890, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "1677", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3c56e60d7b03eb66c8ea5c6a50b08deb753cc6e193d82074e49255858a1239de.jpg", + "image_caption": [ + "Figure 7. Qualitative comparison with the other methods. Our method achieves realistic reconstructions, particularly in the eye region." + ], + "image_footnote": [], + "bbox": [ + 80, + 90, + 890, + 534 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "[23] as the optimizer with an initial learning rate of $1e - 4$ . We use Farthest Point Sampling (FPS) [38] to reduce the point number of $V_{2d}^{skin}(\\alpha)$ and $C_{skin}$ to 3000, reducing computational consumption. Please refer to supplemental materials for more details.", + "bbox": [ + 75, + 561, + 468, + 638 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2. Metric", + "text_level": 1, + "bbox": [ + 76, + 652, + 168, + 667 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In various VR/AR applications, 3DMMs are crucial in capturing facial motions or providing fine-grained regions covering facial features. One crucial objective in such applications is to ensure the alignment of overlapping facial parts between prediction and input. Widely used benchmarks [7, 44] typically rely on the 3D accuracy performance of reconstructions. However, there are instances where inconsistencies arise between 3D errors and 2D alignments. As shown in Fig.2(b), comparing with 3DDFA-v2 [17], DECA [14] have better 2D eye region overlapping IoU (70.29% vs. 39.37%) but a higher 3D forehead error (1.88mm vs. 1.75mm). To address this, we introduce Part IoU to emphasize the performance of overlap.", + "bbox": [ + 75, + 679, + 470, + 876 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Part IoU is a new benchmark to quantify how well the part", + "bbox": [ + 76, + 885, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "reconstruction $V_{3d}^{p}(\\alpha)$ aligns with their corresponding parts from the original face. The core idea is to measure the overlap of facial components between the reconstruction and the original image using IoU. The ground truth is a binary tensor $\\{M_p\\}$ (as defined above). We render $V_{3d}(\\alpha)$ with a mean texture as an image, generate the predicted segmentation $\\{M_p^{pred}\\}$ with [55]. The use of mean texture focuses the metric more on overlap effects than other factors, making it applicable to methods without texture-fitting [13, 17]. Part IoU $IoU_p$ of part $p$ can be obtained by:", + "bbox": [ + 496, + 561, + 890, + 714 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nI o U _ {p} = I o U \\left(M _ {p} ^ {p r e d}, M _ {p}\\right). \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 718, + 890, + 738 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "MEAD [50] is an emotional talking-face dataset. We test Part IoU by selecting 10 individuals from MEAD, each contributing 50 random different images. Part IoU measures the overlap performance between each part of the reconstruction and the ground truth. More detail is in the supplemental materials.", + "bbox": [ + 496, + 742, + 890, + 830 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "REALY [7] benchmark consists of 100 scanned neutral expression faces, which are divided into four parts: nose, mouth, forehead (eyes and eyebrows), and cheek for 3D alignment and distance error calculation.", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "1678", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/335aac99e095e85b2b5f743ce9f2f159e659c4351b9b50c58e35d9730dbb8379.jpg", + "image_caption": [ + "Input", + "Figure 8. Comparison with the renderer-based geometric guidance of segmentation." + ], + "image_footnote": [], + "bbox": [ + 80, + 89, + 158, + 210 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/532f7c06be93b30dd9daf6d2f4933e89bd0f6a0c7c3c133dbf13f218c668edba.jpg", + "image_caption": [ + "SoftRas" + ], + "image_footnote": [], + "bbox": [ + 158, + 89, + 223, + 210 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/cb36a202880902ae1a564cbed054916650bc8838671697dae42b18e61c55bcc7.jpg", + "image_caption": [ + "DIB-R" + ], + "image_footnote": [], + "bbox": [ + 246, + 89, + 300, + 210 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/aafd61dbd91e68cfd655195d7c0a665dc81caf9ec3972a2060c317920a56085a.jpg", + "image_caption": [ + "ReDA" + ], + "image_footnote": [], + "bbox": [ + 320, + 89, + 375, + 210 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8e806851a899441bf975b9e6061cb36ba18c5f5048dcb0ddf458e37cb60dcf1a.jpg", + "image_caption": [ + "PRDL" + ], + "image_footnote": [], + "bbox": [ + 400, + 89, + 465, + 210 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3. Qualitative Comparison", + "text_level": 1, + "bbox": [ + 76, + 253, + 297, + 271 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We conduct a comprehensive evaluation of our method with the state-of-the-art approaches, including PRNet [13], MGCNet [45], Deep3D [11], 3DDFA-V2 [17], HRN [25] and DECA [14]. The visualization of HRN and DECA uses the mid-frequency details and coarse shape (denoted as HRN-m and DECA-c) since their further steps only change the renderer's normal map, while no 3D refinement is made. As shown in Fig. 7, our results excel in capturing extreme expressions, even better than HRN-m which has fine reconstruction steps.", + "bbox": [ + 75, + 277, + 468, + 429 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4.Quantitative Comparison", + "text_level": 1, + "bbox": [ + 76, + 436, + 308, + 454 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "On both the Part IoU and REALY [7] benchmarks, our results outperforms the existing state-of-the-art methods. As shown in Tab. 1, our method is almost always the highest overlap IoU across various facial parts with $73.82\\%$ total average, demonstrating PRDL enhances the part alignment of reconstruction. PRDL also performs the best average 3D error on the REALY benchmark (1.436mm in frontal-view and 1.442mm in side-view), as shown in Tab. 2.", + "bbox": [ + 75, + 460, + 468, + 580 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.5. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 590, + 228, + 607 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation for PRDL and Synthetic Data. We conduct quantitative ablation experiments for PRDL and synthetic data on REALY and Part IoU. As depicted in Table 1 and Table 2, only introducing PRDL already yields superior results compared to all other methods (72.22%, 1.468mm, and 1.455mm). Introducing synthetic data without PRDL demonstrates a significant improvement in Part IoU, but not as effectively as PRDL (72.08% vs. 72.22%). Using both synthetic data and PRDL could lead to the best result.", + "bbox": [ + 75, + 621, + 468, + 756 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Compare with the Differentiable Silhouette Renderers. SoftRas [33] and DIB-R [8] are the two most widely used renderers, which serve as the basis for PyTorch3D [42] and Kaolin [15], respectively. Based on the image-fitting framework [1], we use them to render a silhouette of each face part and calculate the IoU loss with the ground truth. ReDA [56] is also a renderer-based method using the geometric guidance of segmentation. Fig.8 shows that PRDL is significantly better than these methods. It is essential to em", + "bbox": [ + 75, + 763, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7dae62bebef5ed7d9b06c30517d6279b3cf317fdb955a559770a77ff028362e3.jpg", + "image_caption": [ + "Input", + "Chamfer Distance", + "Figure 9. Comparison with the other point-driven-based geometric guidance of segmentation." + ], + "image_footnote": [], + "bbox": [ + 503, + 89, + 571, + 189 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/aa40965bc2b915017141f8e4086f6c645fead550126d0fd7cae293b49063d06f.jpg", + "image_caption": [ + "Density Aware" + ], + "image_footnote": [], + "bbox": [ + 571, + 89, + 658, + 188 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ff57dc0994d61bd7f37a4b027bebeb3b5e95d186a16772797141a3dad3b7eb54.jpg", + "image_caption": [ + "ICP" + ], + "image_footnote": [], + "bbox": [ + 666, + 89, + 751, + 188 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fe60e4798093524f74c52637a4cecbb3c65bbc778834bb35585115f8b99b6660.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 754, + 89, + 810, + 188 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c66e5da9aec31e8ea104677bc7f911f2b4c293c398c8c44524427de817bbe9ef.jpg", + "image_caption": [ + "PRDL" + ], + "image_footnote": [], + "bbox": [ + 810, + 89, + 885, + 188 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "phasize that all the results in Fig.8 and Fig.9 do not include $\\mathcal{L}_{lmk}$ , $\\mathcal{L}_{pho}$ , and $\\mathcal{L}_{per}$ .", + "bbox": [ + 500, + 239, + 890, + 271 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Compare with the Other Point-Driven Optimization Methods. One of the key insights of PRDL is transforming segmentation into points. Thus the 3DMM fitting becomes an optimization of two 2D point clouds until they share the same geometry. While an intuitive idea is incorporating the point-driven optimization methods like iterative closest points (ICP) [2-4] or chamfer distance [53], these methods are predominantly rooted in nearest-neighbor principles, and solely opting for the minimum distance potentially leads to local optima. We compare PRDL with ICP [54], chamfer distance and density aware chamfer distance [53] based on [1]. Since the ICP distance can be calculated from target to prediction or vice versa, we provide both methods. As depicted in Fig.9, PRDL outperforms other methods, producing outputs that align more accurately with the desired geometry. This superiority is attributed to the use of additional anchors and diverse statistical distances in PRDL. Referring to Fig.8 and Fig.9, PRDL stands out as the only loss capable of reconstructing effective results when the segmentation information is used independently. More comparison is in the supplemental materials.", + "bbox": [ + 498, + 277, + 890, + 594 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 609, + 625, + 625 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This paper proposes a novel Part Re-projection Distance Loss (PRDL) to reconstruct 3D faces with the geometric guidance of facial part segmentation. Analysis proves that PRDL is superior to renderer-based and other point-driven optimization methods. We also provide a new emotional face expression dataset and a new 3D mesh part annotation to facilitate studies. Experiments further highlight the state-of-the-art performance of PRDL in achieving high-fidelity and better part alignment in 3D face reconstruction.", + "bbox": [ + 498, + 633, + 890, + 771 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement", + "text_level": 1, + "bbox": [ + 500, + 784, + 658, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported in part by Chinese National Natural Science Foundation Projects 62176256, U23B2054, 62276254, 62206280, the Beijing Science and Technology Plan Project Z231100005923033, Beijing Natural Science Foundation L221013, the Youth Innovation Promotion Association CAS Y2021131 and InnoHK program.", + "bbox": [ + 498, + 809, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "1679", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] 3dmm model fitting using pytorch. https://github.com/ascust/3DMM-Fitting-Pytorch, 2021.8", + "[2] Brian Amberg, Sami Romdhani, and Thomas Vetter. Optimal step nonrigid icp algorithms for surface registration. In 2007 IEEE conference on computer vision and pattern recognition, pages 1-8. IEEE, 2007. 8", + "[3] K. S. Arun, T. S. Huang, and S. D. Blostein. Least-squares fitting of two 3-d point sets. IEEE Transactions on Pattern Analysis and Machine Intelligence, PAMI-9(5):698-700, 1987.", + "[4] P.J. Besl and Neil D. McKay. A method for registration of 3-d shapes. IEEE Transactions on Pattern Analysis and Machine Intelligence, 14(2):239-256, 1992. 8", + "[5] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 187-194, 1999. 1, 4", + "[6] Volker Blanz and Thomas Vetter. Face recognition based on fitting a 3d morphable model. IEEE Transactions on pattern analysis and machine intelligence, 25(9):1063-1074, 2003. 3", + "[7] Zenghao Chai, Haoxian Zhang, Jing Ren, Di Kang, Zhengzhuo Xu, Xuefei Zhe, Chun Yuan, and Linchao Bao. Really: Rethinking the evaluation of 3d face reconstruction. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part VIII, pages 74-92. Springer, 2022. 2, 7, 8", + "[8] Wenzheng Chen, Huan Ling, Jun Gao, Edward Smith, Jaakko Lehtinen, Alec Jacobson, and Sanja Fidler. Learning to predict 3d objects with an interpolation-based differentiable renderer. Advances in neural information processing systems, 32, 2019. 3, 6, 8", + "[9] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4690-4699, 2019. 3", + "[10] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In CVPR, 2020. 6", + "[11] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops, pages 0–0, 2019. 1, 2, 3, 5, 6, 8", + "[12] Bernhard Egger, Sandro Schonborn, Andreas Schneider, Adam Kortylewski, Andreas Morel-Forster, Clemens Blumer, and Thomas Vetter. Occlusion-aware 3d morphable models and an illumination prior for face image analysis. International Journal of Computer Vision, 126:1269-1287, 2018. 1, 2", + "[13] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In Proceedings of the European conference on computer vision (ECCV), pages 534-551, 2018. 6, 7, 8" + ], + "bbox": [ + 80, + 114, + 468, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3D face model from in-the-wild images. 2021. 1, 2, 3, 5, 6, 7, 8", + "[15] Clement Fuji Tsang, Maria Shugrina, Jean Francois Lafleche, Towaki Takikawa, Jiehan Wang, Charles Loop, Wenzheng Chen, Krishna Murthy Jatavallabhula, Edward Smith, Artem Rozantsev, Or Perel, Tianchang Shen, Jun Gao, Sanja Fidler, Gavriel State, Jason Gorski, Tommy Xiang, Jianing Li, Michael Li, and Rev Lebaredian. Kaolin: A pytorch library for accelerating 3d deep learning research. https://github.com/NVIDIAGames/kaolin, 2022.2,3,8", + "[16] Kyle Genova, Forrester Cole, Aaron Maschinot, Aaron Sarna, Daniel Vlasic, and William T Freeman. Unsupervised training for 3d morphable model regression. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8377-8386, 2018. 3", + "[17] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. pages 152-168, 2020. 1, 2, 3, 6, 7, 8", + "[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6", + "[19] Yueying Kao, Bowen Pan, Miao Xu, Jiangjing Lyu, Xiangyu Zhu, Yuanzhang Chang, Xiaobo Li, and Zhen Lei. Toward 3d face reconstruction in perspective projection: Estimating 6dof face pose from monocular image. IEEE Transactions on Image Processing, 32:3080-3091, 2023. 1", + "[20] Yury Kartynnik, Artsiom Ablavatski, Ivan Grishchenko, and Matthias Grundmann. Real-time facial surface geometry from monocular video on mobile gpus. arXiv preprint arXiv:1907.06724, 2019. 2", + "[21] Hiroharu Kato, Yoshitaka Ushiku, and Tatsuya Harada. Neural 3d mesh renderer. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3907-3916, 2018. 3", + "[22] Hiroharu Kato, Deniz Beker, Mihai Morariu, Takahiro Ando, Toru Matsuoka, Wadim Kehl, and Adrien Gaidon. Differentiable rendering: A survey. arXiv preprint arXiv:2006.12057, 2020. 2, 3, 6", + "[23] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 7", + "[24] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 3, 4, 5, 6", + "[25] Biwen Lei, Jianqiang Ren, Mengyang Feng, Miaomiao Cui, and Xuansong Xie. A hierarchical representation network for accurate and detailed face reconstruction from in-the-wild images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 394-403, 2023. 1, 2, 3, 6, 8", + "[26] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. To fit or not to fit: Model-based face reconstruction and occlusion segmentation from" + ], + "bbox": [ + 503, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "1680", + "bbox": [ + 483, + 945, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "weak supervision. arXiv preprint arXiv:2106.09614, 2021. 2", + "[27] Ruilong Li, Karl Bladin, Yajie Zhao, Chinmay Chinara, Owen Ingraham, Pengda Xiang, Xinglei Ren, Pratusha Prasad, Bipin Kishore, Jun Xing, and Hao Li. Learning formation of physically-based face attributes. 2020. 4", + "[28] Shan Li and Weihong Deng. Blended emotion in-the-wild: Multi-label facial expression recognition using crowdsourced annotations and deep locality feature learning. International Journal of Computer Vision, 127(6-7):884–906, 2019. 6", + "[29] Shan Li and Weihong Deng. Reliable crowdsourcing and deep locality-preserving learning for unconstrained facial expression recognition. IEEE Transactions on Image Processing, 28(1):356-370, 2019. 6", + "[30] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 4", + "[31] Jinpeng Lin, Hao Yang, Dong Chen, Ming Zeng, Fang Wen, and Lu Yuan. Face parsing with roi tanh-warping. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5654-5663, 2019. 2", + "[32] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Roi tanh-polar transformer network for face parsing in the wild. Image and Vision Computing, 112:104190, 2021. 2, 4", + "[33] Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7708-7717, 2019. 2, 3, 6, 8", + "[34] Yinglu Liu, Hailin Shi, Hao Shen, Yue Si, Xiaobo Wang, and Tao Mei. A new dataset and boundary-attention semantic segmentation for face parsing. In AAAI, pages 11637–11644, 2020. 2", + "[35] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 5, 6", + "[36] Matthew M Loper and Michael J Black. Opendr: An approximate differentiable renderer. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pages 154-169. Springer, 2014. 3", + "[37] Tetiana Martyniuk, Orest Kupyn, Yana Kurlyak, Igor Krashenyi, Jiri Matas, and Viktoriya Sharmanska. Dad-3heads: A large-scale dense, accurate and diverse dataset for 3d head alignment from a single image. In Proc. IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6", + "[38] Carsten Moenning and Neil A Dodgson. Fast marching farthest point sampling. Technical report, University of Cambridge, Computer Laboratory, 2003. 4, 7", + "[39] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 6" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[40] Ravi Ramamoorthi and Pat Hanrahan. An efficient representation for irradiance environment maps. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 497-500, 2001. 3", + "[41] Chirag Raman, Charlie Hewitt, Erroll Wood, and Tadas Baltrusaitis. Mesh-tension driven expression-based wrinkles for synthetic faces. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3515-3525, 2023. 3", + "[42] Nikhila Ravi, Jeremy Reizenstein, David Novotny, Taylor Gordon, Wan-Yen Lo, Justin Johnson, and Georgia Gkioxari. Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501, 2020. 2, 3, 6, 8", + "[43] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013. 6", + "[44] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael J Black. Learning to regress 3d face shape and expression from an image without 3d supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7763-7772, 2019. 7", + "[45] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Mingmin Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3d face reconstruction by occlusion-aware multiview geometry consistency. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XV, pages 53-70. Springer, 2020. 1, 2, 6, 8", + "[46] Dave Shreiner, Bill The Khronos OpenGL ARB Working Group, et al. OpenGL programming guide: the official guide to learning OpenGL, versions 3.0 and 3.1. Pearson Education, 2009. 2, 3", + "[47] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM Transactions on Graphics (TOG), 41(6):1-10, 2022. 5", + "[48] Ayush Tewari, Hans-Peter Seidel, Mohamed Elgharib, Christian Theobalt, et al. Learning complete 3d morphable face models from images and videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3361-3371, 2021. 2, 3", + "[49] Graphics University of Basel and Vision Research. parametric-face-image-generator. https://github.com/unibas-gravis/parametric-face-image-generator, 2017.2,4", + "[50] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 7", + "[51] Lizhen Wang, Zhiyuan Chen, Tao Yu, Chenguang Ma, Liang Li, and Yebin Liu. Faceverse: a fine-grained and detail-controllable 3d face morphable model from a hybrid dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20333-20342, 2022. 4", + "[52] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "1681", + "bbox": [ + 483, + 945, + 513, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3681-3691, 2021. 3", + "[53] Tong Wu, Liang Pan, Junzhe Zhang, Tai Wang, Ziwei Liu, and Dahua Lin. Density-aware chamfer distance as a comprehensive metric for point cloud completion. arXiv preprint arXiv:2111.12702, 2021. 8", + "[54] Jiaolong Yang, Hongdong Li, Dylan Campbell, and Yunde Jia. Go-icp: A globally optimal solution to 3d icp point-set registration. IEEE transactions on pattern analysis and machine intelligence, 38(11):2241–2254, 2015. 8", + "[55] Qi Zheng, Jiankang Deng, Zheng Zhu, Ying Li, and Stefanos Zafeiriou. Decoupled multi-task learning with cyclical self-regulation for face parsing. In Computer Vision and Pattern Recognition, 2022. 2, 4, 5, 6, 7", + "[56] Wenbin Zhu, HsiangTao Wu, Zeyu Chen, Noranart Vesdapunt, and Baoyuan Wang. Reda: reinforced differentiable attribute for 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4958-4967, 2020. 2, 3, 6, 8", + "[57] Xiangyu Zhu, Zhen Lei, Junjie Yan, Dong Yi, and Stan Z Li. High-fidelity pose and expression normalization for face recognition in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 787-796, 2015. 5", + "[58] Xiangyu Zhu, Xiaoming Liu, Zhen Lei, and Stan Z Li. Face alignment in full pose range: A 3d total solution. IEEE transactions on pattern analysis and machine intelligence, 41(1): 78-92, 2017. 3, 6", + "[59] Xiangyu Zhu, Chang Yu, Di Huang, Zhen Lei, Hao Wang, and Stan Z Li. Beyond 3dmm: Learning to capture high-fidelity 3d face shape. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1", + "[60] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces. In European Conference on Computer Vision, pages 250–269. Springer, 2022. 1, 2" + ], + "bbox": [ + 78, + 92, + 468, + 612 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "1682", + "bbox": [ + 483, + 945, + 514, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/29911afb-57cf-4105-bc3b-b432a117add8_model.json b/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/29911afb-57cf-4105-bc3b-b432a117add8_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a4d551f78312c12947c66a851671e56f6f2cf804 --- /dev/null +++ b/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/29911afb-57cf-4105-bc3b-b432a117add8_model.json @@ -0,0 +1,2642 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.044 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.131, + 0.773, + 0.177 + ], + "angle": 0, + "content": "3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.203, + 0.804, + 0.222 + ], + "angle": 0, + "content": "Zidu Wang\\(^{1,2}\\), Xiangyu Zhu\\(^{1,2*}\\), Tianshuo Zhang\\(^{1,2}\\), Baiqin Wang\\(^{1,2}\\), Zhen Lei\\(^{1,2,3}\\)" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.222, + 0.758, + 0.256 + ], + "angle": 0, + "content": "\\(^{1}\\)State Key Laboratory of Multimodal Artificial Intelligence Systems, Institute of Automation, Chinese Academy of Sciences" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.257, + 0.793, + 0.275 + ], + "angle": 0, + "content": "\\(^{2}\\)School of Artificial Intelligence, University of Chinese Academy of Sciences" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.275, + 0.858, + 0.309 + ], + "angle": 0, + "content": "3 Centre for Artificial Intelligence and Robotics, Hong Kong Institute of Science & Innovation, Chinese Academy of Sciences" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.313, + 0.878, + 0.328 + ], + "angle": 0, + "content": "{wangzidu2022, wangbaiqin2024}@ia.ac.cn,{xiangyu.zhu, tianshuo.zhang, zlei}@nlpr.ia.ac.cn" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.362, + 0.314, + 0.379 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.395, + 0.474, + 0.742 + ], + "angle": 0, + "content": "3D Morphable Models (3DMMs) provide promising 3D face reconstructions in various applications. However, existing methods struggle to reconstruct faces with extreme expressions due to deficiencies in supervisory signals, such as sparse or inaccurate landmarks. Segmentation information contains effective geometric contexts for face reconstruction. Certain attempts intuitively depend on differentiable renderers to compare the rendered silhouettes of reconstruction with segmentation, which is prone to issues like local optima and gradient instability. In this paper, we fully utilize the facial part segmentation geometry by introducing Part Re-projection Distance Loss (PRDL). Specifically, PRDL transforms facial part segmentation into 2D points and re-projects the reconstruction onto the image plane. Subsequently, by introducing grid anchors and computing different statistical distances from these anchors to the point sets, PRDL establishes geometry descriptors to optimize the distribution of the point sets for face reconstruction. PRDL exhibits a clear gradient compared to the renderer-based methods and presents state-of-the-art reconstruction performance in extensive quantitative and qualitative experiments. Our project is available at https://github.com/wang-zidu/3DDFA-V3." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.773, + 0.21, + 0.789 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.799, + 0.47, + 0.876 + ], + "angle": 0, + "content": "Reconstructing 3D faces from 2D images is an essential task in computer vision and graphics, finding diverse applications in fields such as Virtual Reality (VR), Augmented Reality (AR), and Computer-generated Imagery (CGI), etc. In applications like VR makeup and AR emoji, 3DMMs" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.362, + 0.625, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.362, + 0.755, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.768, + 0.362, + 0.89, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.457, + 0.625, + 0.549 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.457, + 0.755, + 0.549 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.768, + 0.457, + 0.89, + 0.549 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.561, + 0.894, + 0.631 + ], + "angle": 0, + "content": "Figure 1. We introduce Part Re-projection Distance Loss (PRDL) for 3D face reconstruction, leveraging the geometric guidance provided by facial part segmentation. PRDL enhances the alignment of reconstructed facial features with the original image and excels in capturing extreme expressions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.749 + ], + "angle": 0, + "content": "[5] are commonly employed for precise facial feature positioning and capturing expressions. One of the most critical concerns is ensuring that the reconstructed facial components, including the eyes, eyebrows, lips, etc., seamlessly align with their corresponding regions in the input image with pixel-level accuracy, particularly when dealing with extreme facial expressions, as shown in Fig. 1." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Although current methods [11, 14, 17, 19, 25] have made notable strides in face reconstruction, some issues persist. On the one hand, existing works often rely on landmarks [17, 60] and photometric-texture [12, 45] to guide face reconstruction. In the case of extreme facial expressions, landmarks are sparse or inaccurate and the gradient from the texture loss cannot directly constrain the shape [59], posing a challenge for existing methods to achieve precise alignment of facial features in 3D face reconstruction, as depicted in Fig. 2(a). On the other hand, many methods" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.298, + 0.901 + ], + "angle": 0, + "content": "*Corresponding author: Xiangyu Zhu" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1672" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.228 + ], + "angle": 0, + "content": "primarily adopt 3D errors as a quality metric, overlooking the precise alignment of facial parts. As shown in Fig. 2(b), when evaluating the REALY [7] benchmark in the eye region, comparing the results of 3DDFA-v2 [17] and DECA [14], a lower 3D region error may not lead to better 2D region alignment. We believe in the potential for a more comprehensive utilization of the geometry information inherent in each facial part segmentation to guide 3D face reconstruction, addressing the issues mentioned above." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.232, + 0.473, + 0.55 + ], + "angle": 0, + "content": "Facial part segmentation [24, 31, 32, 34] has been well developed, offering precise geometry for each facial feature with pixel-level accuracy. Compared with commonly used landmarks, part segmentation provides denser labels covering the whole image. Compared with photometric texture, part segmentation is less susceptible to lighting or shadow interference. Although facial part segmentation occasionally appears in the process of 3D face reconstruction, it is not fully utilized. For instance, it only serves to enhance the reconstruction quality of specific regions [25, 48], or to distinguish the overall texture location for photometric-texture-loss [26], without delving into the specifics of facial parts. Attempts [33, 56] to fit 3D parts with the guidance of segmentation information rely on differentiable renderers [15, 42, 46] to generate the silhouettes of the predicted 3D facial regions and optimize the difference between the rendered silhouettes and the 2D segmentation through Intersection over Union (IoU) loss. However, these renderers fail to provide sufficient and stable geometric signals for face reconstruction due to local optima, rendering error propagation, and gradient instability [22]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.554, + 0.473, + 0.903 + ], + "angle": 0, + "content": "This paper leverages the precise and rich geometric information in facial part silhouettes to guide face reconstruction, thereby improving the alignment of reconstructed facial features with the original image and excelling in reconstructing extreme facial expressions. Fig.1 provides an overview of the proposed Part Re-projection Distance Loss (PRDL). Firstly, PRDL samples points within the segmented region and transforms the segmentation information into a 2D point set for each facial part. The 3D face reconstruction is also re-projected onto the image plane and transformed into 2D point sets for different regions. Secondly, PRDL samples the image grid anchors and establishes geometric descriptors. These descriptors are constructed by using various statistical distances from the anchors to the point set. Finally, PRDL optimizes the distribution of the same semantic point sets, leading to improved overlap between the regions covered by the target and predicted point sets. In contrast to renderer-based methods, PRDL exhibits a clear gradient. To facilitate the use of PRDL, we provide a new 3D mesh part annotation aligned with semantic regions in 2D face segmentation [24, 55], which differs from the existing annotations [30, 49], as shown in Fig.2(c). Besides the drawbacks of supervisory signals, the challenge of han" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.091, + 0.733, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.523, + 0.151, + 0.712, + 0.16 + ], + "angle": 0, + "content": "(a) Performance on extreme expressions" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.161, + 0.733, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.545, + 0.227, + 0.69, + 0.237 + ], + "angle": 0, + "content": "(b) 3D error vs. 2D alignment" + }, + { + "type": "image", + "bbox": [ + 0.736, + 0.091, + 0.888, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.741, + 0.227, + 0.885, + 0.237 + ], + "angle": 0, + "content": "(c) 3D face model annotations" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.242, + 0.892, + 0.325 + ], + "angle": 0, + "content": "Figure 2. Drawbacks of existing research and our results. (a) Present researches fail to reconstruct extreme expressions and perform bad region alignment. (b) Inconsistencies between 3D errors and 2D alignments, such as the eye region in this case. (c) Geometric optimization of each semantically consistent part is only achievable through our annotations." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.337, + 0.892, + 0.458 + ], + "angle": 0, + "content": "dling extreme expressions arises from data limitations. To boost studies and address the lack of emotional expression (e.g., closed-eye, open-mouth, frown, etc.), we synthesize a face dataset using the GAN-based method [24]. To highlight the performance of region overlapping, we propose a new benchmark to quantify the accuracy of 3D reconstruction parts cling to their corresponding image components on the 2D image plane. Our main contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.46, + 0.892, + 0.55 + ], + "angle": 0, + "content": "- We introduce a novel Part Re-projection Distance Loss (PRDL) to comprehensively utilize segmentation information for face reconstruction. PRDL transforms the target and prediction into semantic point sets, optimizing the distribution of point sets to ensure that the reconstructed regions and the target share the same geometry." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.551, + 0.892, + 0.596 + ], + "angle": 0, + "content": "- We introduce a new synthetic face dataset including closed-eye, open-mouth, and frown expressions, with more than \\(200K\\) images." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.596, + 0.892, + 0.657 + ], + "angle": 0, + "content": "- Extensive experiments show that the results with PRDL achieve excellent performance and outperform the existing methods. The data and code are available at https://github.com/wang-zidu/3DDFA-V3." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.46, + 0.892, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.672, + 0.642, + 0.687 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.892, + 0.901 + ], + "angle": 0, + "content": "2D-to-3D Losses for 3D Face Reconstruction. Landmark loss [11, 17, 60] stands out as the most widely employed and effective supervised way for face reconstruction. Some studies [20, 37] reveal that it can generate 3D faces under the guidance of sufficient hundreds or thousands landmarks. Photometric loss is another commonly used loss involving rendering the reconstructed mesh with texture into an image and comparing it to the original input. Some researchers focus on predicting the facial features that need to be fitted while excluding occlusions [12, 45]. The photometric loss is susceptible to factors like texture basis, skin masks, and rendering modes. It emphasizes overall visualization and may not effectively constrain local details. Perception loss" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1673" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.09, + 0.48, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.48, + 0.09, + 0.887, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.267, + 0.894, + 0.325 + ], + "angle": 0, + "content": "Figure 3. Overview of Part Re-projection Distance Loss (PRDL). (a): Transforming facial part segmentation into target point sets \\(\\{C_p\\}\\). (b): Re-projecting \\(V_{3d}(\\alpha)\\) onto the image plane to obtain predicted point sets \\(\\{V_{2d}^p (\\alpha)\\}\\). (c): Given anchors \\(\\mathbf{A}\\) and distance functions \\(\\mathcal{F}\\), the core idea of PRDL is to minimize the difference of every statistical distance from any \\(\\pmb{a}_i\\in \\pmb{A}\\) to the \\(V_{2d}^{p}(\\alpha)\\) or \\(C_p\\), leading to enhanced overlap between the regions covered by the target and predicted point sets." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.336, + 0.471, + 0.429 + ], + "angle": 0, + "content": "[11, 14, 16] distinguishes itself from image-level methods by employing pre-trained deep face recognition networks [9] to extract high-level features from the rendered reconstruction results. These features are then compared with the features from the input. Lip segmentation consistency loss [48] employs mouth segmentation to help reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.437, + 0.47, + 0.726 + ], + "angle": 0, + "content": "Differentiable Silhouette Renderers. The development of differentiable renderers [15, 42, 46] has enriched the supervised methods for 3D face reconstruction. These pipelines make the rasterization process differentiable, allowing for the computation of gradients for every pixel in the rendered results. By combining IoU loss with segmentation information, the silhouettes produced by these renderers have been shown to optimize 3D shapes [8, 33, 56]. These rasterization processes typically rely on either local [21, 36] or global [8, 33] geometric distance-based weighted aggregation, generating silhouettes by computing a probability related to the distance from pixels to mesh faces. However, to obtain a suitable sharp silhouette, the weight contribution of each position to the rendered pixel will decrease sharply with the increase of distance, and the gradient generated by the shape difference at the large distance will be small or zero, which makes it difficult to retain accurate geometry guidance. These renderers also encounter issues such as rendering error propagation and gradient instability [22]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Synthetic Dataset. Synthetic data [41, 52, 58] is commonly used to train 3D face reconstruction models [11, 17, 25]. However, these synthetic faces either prioritize the diversification of background, illumination, and identities [41, 52], or concentrate on pose variation [58], contributing to achieve good results in reconstructing natural facial expressions but struggling to reconstruct extreme expressions. To overcome these limitations and facilitate the related research, this paper adopts a GAN-based method [24] to synthesize realistic and diverse facial expression data, including closed eyes, open mouths, and frowns." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.335, + 0.635, + 0.352 + ], + "angle": 0, + "content": "3. Methodology" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.36, + 0.642, + 0.374 + ], + "angle": 0, + "content": "3.1. Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.383, + 0.893, + 0.414 + ], + "angle": 0, + "content": "We conduct a face model, an illumination model, and a camera model based on [6, 11, 14, 17]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.421, + 0.892, + 0.452 + ], + "angle": 0, + "content": "Face Model. The vertices and albedo of a 3D face is determined by the following formula:" + }, + { + "type": "equation", + "bbox": [ + 0.522, + 0.462, + 0.892, + 0.487 + ], + "angle": 0, + "content": "\\[\nV _ {3 d} (\\boldsymbol {\\alpha}) = \\boldsymbol {R} \\left(\\boldsymbol {\\alpha} _ {a}\\right) \\left(\\bar {\\boldsymbol {V}} + \\boldsymbol {\\alpha} _ {i d} \\boldsymbol {A} _ {i d} + \\boldsymbol {\\alpha} _ {\\exp} \\boldsymbol {A} _ {\\exp}\\right) + \\boldsymbol {\\alpha} _ {t} \\\\ - \\left(\\frac {\\partial}{\\partial t}\\right) \\quad , \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.523, + 0.482, + 0.684, + 0.497 + ], + "angle": 0, + "content": "\\[\nT _ {a l b} (\\boldsymbol {\\alpha}) = \\overline {{\\boldsymbol {T}}} + \\boldsymbol {\\alpha} _ {a l b} \\boldsymbol {A} _ {a l b}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.512, + 0.892, + 0.65 + ], + "angle": 0, + "content": "where \\(V_{3d}(\\alpha) \\in \\mathbb{R}^{3 \\times 35709}\\) is the 3D face vertices, \\(\\overline{\\boldsymbol{V}}\\) is the mean shape. \\(T_{alb}(\\alpha) \\in \\mathbb{R}^{3 \\times 35709}\\) is the albedo, \\(\\overline{T}\\) is the mean albedo. \\(A_{id}\\), \\(A_{exp}\\) and \\(A_{alb}\\) are the face identity vector bases, the expression vector bases and the albedo vector bases, respectively. \\(\\alpha_{id} \\in \\mathbb{R}^{80}\\), \\(\\alpha_{exp} \\in \\mathbb{R}^{64}\\) and \\(\\alpha_{alb} \\in \\mathbb{R}^{80}\\) are the identity parameter, the expression parameter and the albedo parameter, respectively. \\(\\alpha_{t} \\in \\mathbb{R}^{3}\\) is the translation parameter. \\(\\pmb{R}(\\pmb{\\alpha}_{a}) \\in \\mathbb{R}^{3 \\times 3}\\) is the rotation matrix corresponding to pitch/raw/roll angles \\(\\alpha_{a} \\in \\mathbb{R}^{3}\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.655, + 0.892, + 0.717 + ], + "angle": 0, + "content": "Camera. We employ a camera with a fixed perspective projection, which is same as [11, 25]. Using this camera to re-project \\( V_{3d}(\\alpha) \\) into the 2D image plane yields \\( V_{2d}(\\alpha) \\in \\mathbb{R}^{2 \\times 35709} \\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.723, + 0.892, + 0.77 + ], + "angle": 0, + "content": "Illumination Model. Following [14], we adopt Spherical Harmonics (SH) [40] for the estimation of the shaded texture \\( T_{tex}(\\alpha) \\):" + }, + { + "type": "equation", + "bbox": [ + 0.563, + 0.78, + 0.892, + 0.816 + ], + "angle": 0, + "content": "\\[\nT _ {t e x} (\\boldsymbol {\\alpha}) = T _ {a l b} (\\boldsymbol {\\alpha}) \\odot \\sum_ {k = 1} ^ {9} \\boldsymbol {\\alpha} _ {s h} ^ {k} \\boldsymbol {\\Psi} _ {k} (\\boldsymbol {N}), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.825, + 0.892, + 0.902 + ], + "angle": 0, + "content": "where \\(\\odot\\) denotes the Hadamard product, \\(N\\) is the surface normal of \\(V_{3d}(\\alpha)\\), \\(\\Psi : \\mathbb{R}^3 \\to \\mathbb{R}\\) is the SH basis function and \\(\\alpha_{sh} \\in \\mathbb{R}^9\\) is the corresponding SH parameter. In summary, \\(\\alpha = [\\alpha_{id}, \\alpha_{\\mathrm{exp}}, \\alpha_a, \\alpha_t, \\alpha_{sh}]\\) is the undetermined parameter." + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1674" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.091, + 0.432, + 0.108 + ], + "angle": 0, + "content": "3.2. Point Transformation on the Image Plane" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.121, + 0.473, + 0.372 + ], + "angle": 0, + "content": "Transforming Segmentation to 2D Points. For an input RGB face image \\( I \\in \\mathbb{R}^{H \\times W \\times 3} \\), the prediction of a face segmentation method can be represented by a set of binary tensors \\( M = \\{M_p | p \\in P\\} \\), where \\( P = \\{\\text{left-eye, right-eye, left_eyebrow, right_eyebrow, up\\_lip, down\\_lip, nose, skin}\\} \\) and \\( M_p \\in \\{0,1\\}^{H \\times W} \\). Specifically, \\( M_p^{(x,y)} = 1 \\) only if the 2D pixel position \\( (x,y) \\) of \\( M_p \\) belongs to a certain face part \\( p \\), and otherwise \\( M_p^{(x,y)} = 0 \\). \\( M \\) can be transformed into a set of point sets \\( C = \\{C_p | p \\in P\\} \\), where \\( C_p = \\{(x,y) | if M_p^{(x,y)} = 1\\} \\). In this step, we employ DML-CSR [55] for face segmentation, excluding the ear regions, filtering out noise from the segmentation, and dynamically removing the forehead region above the eyebrows based on their position. This procedure is illustrated in Fig. 3(a). More implementation details are provided in the supplemental materials." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.378, + 0.473, + 0.592 + ], + "angle": 0, + "content": "Facial Part Annotation on 3D Face Model. Our objective is to leverage \\(\\{C_p\\}\\) for guiding 3D face reconstruction. Thus, we should ensure that the reconstructed mesh can be divided into regions consistent with the semantics of the 2D segmentation. Due to the topological consistency of the face model, every vertex on the mesh can be annotated for a specific region. However, existing annotations [27, 30, 49] do not conform to widely accepted 2D face segmentation definitions [24, 32], as shown in Fig.2(c). To address this misalignment, we introduce new part annotations on both BFM [5] and FaceVerse [51]. We partition the vertices based on their indices. \\(i \\in Ind_p\\) indicates that the \\(i\\)-th vertex (denoted as \\(\\mathbf{v}\\)) on the mesh belongs to part \\(p\\). \\(\\{Ind_p|p \\in P\\}\\) can be obtained by:" + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.6, + 0.47, + 0.624 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} I ^ {s e g} = \\operatorname {S e g} (\\operatorname {R e n d e r} (V _ {3 d}, T e x)) \\\\ i \\subset I _ {i d d} - i f, I ^ {s e g} (v) \\subset v \\end{array} , \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.162, + 0.616, + 0.345, + 0.632 + ], + "angle": 0, + "content": "\\[\ni \\in I n d _ {p}, i f I ^ {s e g} (\\boldsymbol {v}) \\in p\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.642, + 0.473, + 0.825 + ], + "angle": 0, + "content": "where \\( \\text{Render}(\\cdot) \\) generates an image by applying texture on the mesh, and \\( \\text{Seg}(\\cdot) \\) is responsible for segmenting the rendered result. We employ different shape \\( V_{3d} \\) and varying textures \\( Tex \\) to label every \\( v \\in V_{3d} \\) with hand-crafted modification. The annotation \\( \\{Ind_p\\} \\) is pre-completed offline in the training process. Consequently, we utilize \\( \\{Ind_p\\} \\) to transform the re-projection \\( V_{2d}(\\alpha) \\) into semantic point sets \\( \\{V_{2d}^p (\\alpha)|p \\in P\\} \\). Besides, the upper forehead region situated above the eyebrows is dynamically excluded to ensure consistency with target. Points obstructed by hair are removed based on \\( \\{C_p\\} \\), as shown in Fig. 3(b). Please refer to supplemental materials for annotation details." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.832, + 0.433, + 0.849 + ], + "angle": 0, + "content": "3.3. Part Re-projection Distance Loss (PRDL)" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.472, + 0.903 + ], + "angle": 0, + "content": "This section describes the design of PRDL, focusing on constructing geometric descriptors and establishing the relation between the prediction \\(\\{V_{2d}^p (\\alpha)\\}\\) and the ground" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.091, + 0.892, + 0.121 + ], + "angle": 0, + "content": "truth \\(\\{C_p\\}\\) for a given \\(p \\in P\\), which is proved instrumental for face reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.125, + 0.895, + 0.324 + ], + "angle": 0, + "content": "In a more generalized formulation, considering two point sets \\(C = \\{c_1, c_2, \\dots, c_{|C|}\\}\\) and \\(C^* = \\{c_1^*, c_2^*, \\dots, c_{|C^*|}^*\\}\\), we aim to establish geometry descriptions by quantifying shape alignment between them for reconstruction. \\(C\\) and \\(C^*\\) may not possess the same number of points, and their points lack correspondence. Instead of directly searching the correspondence between the two sets, we use a set of fixed points as anchors \\(A = \\{a_1, a_2, \\dots, a_{|A|}\\}\\) and a collection of statistical distance functions \\(\\mathcal{F} = \\{f_1, f_2, \\dots, f_{|\\mathcal{F}|}\\}\\) to construct geometry description tensors \\(\\Gamma(C, A, \\mathcal{F}) \\in \\mathbb{R}^{|\\mathcal{A}| \\times |\\mathcal{F}|}\\) and \\(\\Gamma(C^*, A, \\mathcal{F}) \\in \\mathbb{R}^{|\\mathcal{A}| \\times |\\mathcal{F}|}\\) for \\(C\\) and \\(C^*\\), respectively (denoted as \\(\\Gamma\\) and \\(\\Gamma^*\\) for brevity). The value \\(\\Gamma(i, j)\\) and \\(\\Gamma^*(i, j)\\) at the position \\((i, j)\\) are determined by:" + }, + { + "type": "equation", + "bbox": [ + 0.611, + 0.343, + 0.892, + 0.385 + ], + "angle": 0, + "content": "\\[\n\\left\\{ \\begin{array}{l} \\boldsymbol {\\Gamma} (i, j) = f _ {j} (\\boldsymbol {C}, \\boldsymbol {a} _ {i}) \\\\ \\boldsymbol {\\Gamma} ^ {*} (i, j) = f _ {j} (\\boldsymbol {C} ^ {*}, \\boldsymbol {a} _ {i}), \\end{array} \\right. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.399, + 0.892, + 0.445 + ], + "angle": 0, + "content": "where every function \\(f_{j}(\\pmb {B},\\pmb {b})\\in \\mathcal{F}\\) describes the distance from a single point \\(\\pmb{b}\\) to a set of points \\(\\pmb{B}\\), and \\(f_{j}(\\pmb {B},\\pmb {b})\\) can be any statistically meaningful distance." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.448, + 0.894, + 0.54 + ], + "angle": 0, + "content": "When fitting 3DMM to the segmented silhouettes for part \\(p\\), we set \\(\\boldsymbol{C} = V_{2d}^{p}(\\boldsymbol{\\alpha})\\) and \\(C^* = C_p\\) with specified anchors \\(\\mathbf{A}\\) and a set of distance functions \\(\\mathcal{F}\\). Then we calculate their corresponding geometry descriptor tensors \\(\\Gamma_p = \\Gamma(V_{2d}^p(\\boldsymbol{\\alpha}), \\boldsymbol{A}, \\mathcal{F})\\) and \\(\\Gamma_p^* = \\Gamma(C_p, \\boldsymbol{A}, \\mathcal{F})\\). Part Re-projection Distance Loss (PRDL) \\(\\mathcal{L}_{prdl}\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.588, + 0.557, + 0.892, + 0.584 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {p r d l} = \\sum_ {p \\in P} w _ {p r d l} ^ {p} \\left\\| \\boldsymbol {\\Gamma} _ {p} - \\boldsymbol {\\Gamma} _ {p} ^ {*} \\right\\| _ {2} ^ {2}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.603, + 0.892, + 0.68 + ], + "angle": 0, + "content": "where \\(w_{prdl}^p\\) is the weight of each part \\(p\\). In this paper, we set \\(\\mathcal{F}\\) as a collection of the nearest \\((f_{min})\\), furthest \\((f_{max})\\), and average \\((f_{ave})\\) distance, i.e. \\(\\mathcal{F} = \\{f_{max}, f_{min}, f_{ave}\\}\\). We set \\(\\mathbf{A}\\) as a \\(H \\times W\\) mesh grid. Then for \\(\\forall \\mathbf{a}_i \\in \\mathbf{A}\\), the optimization objective of \\(\\mathcal{L}_{prdl}\\) is to:" + }, + { + "type": "equation", + "bbox": [ + 0.525, + 0.697, + 0.892, + 0.747 + ], + "angle": 0, + "content": "\\[\n\\left\\{ \\begin{array}{l} \\min | | f _ {m i n} (\\boldsymbol {C} _ {p}, \\boldsymbol {a} _ {i}) - f _ {m i n} (V _ {2 d} ^ {p} (\\boldsymbol {\\alpha}), \\boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\\\ \\min | | f _ {m a x} (\\boldsymbol {C} _ {p}, \\boldsymbol {a} _ {i}) - f _ {m a x} (V _ {2 d} ^ {p} (\\boldsymbol {\\alpha}), \\boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\\\ \\min | | f _ {a v e} (\\boldsymbol {C} _ {p}, \\boldsymbol {a} _ {i}) - f _ {a v e} (V _ {2 d} ^ {p} (\\boldsymbol {\\alpha}), \\boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\end{array} . \\right. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.902 + ], + "angle": 0, + "content": "This process is shown in Fig. 3(c). When \\( p = \\) left_eye, PRDL minimizes the length difference between the indigo and orange lines (also as shown in Fig. 6(a) when \\( p = \\) right_eybrow). The upper right corner of Fig. 3(c) is a visualization of \\( \\Gamma_{left\\_eye} \\) with the last channel separately by reshaping it from \\( \\mathbb{R}^{|A| \\times |\\mathcal{F}|} \\) to \\( \\mathbb{R}^{H \\times W \\times |\\mathcal{F}|} \\). It is worth note that, the points number in \\( V_{2d}^{p}(\\alpha) \\), \\( C_p \\) and \\( A \\) can be reduced by using Farthest Point Sampling (FPS) [38] to decrease computational costs." + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1675" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.079, + 0.089, + 0.473, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.13, + 0.242, + 0.416, + 0.256 + ], + "angle": 0, + "content": "Figure 4. Synthesize emotional expression data." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.263, + 0.47, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.351, + 0.419, + 0.364 + ], + "angle": 0, + "content": "Figure 5. Examples of our synthetic face dataset." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.38, + 0.226, + 0.395 + ], + "angle": 0, + "content": "3.4. Overall Losses" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.403, + 0.47, + 0.433 + ], + "angle": 0, + "content": "To reconstruct a 3D face from image \\(I\\), we build frameworks to minimize the total loss \\(\\mathcal{L}\\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.133, + 0.443, + 0.469, + 0.48 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} = \\lambda_ {p r d l} \\mathcal {L} _ {p r d l} + \\lambda_ {l m k} \\mathcal {L} _ {l m k} + \\lambda_ {p h o} \\mathcal {L} _ {p h o} \\tag {7} \\\\ + \\lambda_ {p e r} \\mathcal {L} _ {p e r} + \\lambda_ {r e g} \\mathcal {L} _ {r e g}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.49, + 0.47, + 0.627 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_{lmk}\\) is the landmark loss, we use detectors to locate 240 2D landmarks for \\(\\mathcal{L}_{lmk}\\) and adopt the dynamic landmark marching [57] to handle the non-correspondence between 2D and 3D cheek contour landmarks arising from pose variations. The photometric loss \\(\\mathcal{L}_{pho}\\) and the perceptual loss \\(\\mathcal{L}_{per}\\) are based on [11, 14]. \\(\\mathcal{L}_{reg}\\) is the regularization loss for \\(\\alpha\\). \\(\\lambda_{prdl} = 0.8e - 3\\), \\(\\lambda_{lmk} = 1.6e - 3\\), \\(\\lambda_{pho} = 1.9\\), \\(\\lambda_{per} = 0.2\\), and \\(\\lambda_{reg} = 3e - 4\\) are the balance weights. \\(\\mathcal{L}_{prdl}\\) and \\(\\mathcal{L}_{lmk}\\) are normalized by \\(H\\times W\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.636, + 0.4, + 0.652 + ], + "angle": 0, + "content": "3.5. Synthetic Emotional Expression Data" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Benefiting from recent developments in face editing research [24, 47], we can generate realistic faces through segmentation \\( M \\). We aim to mass-produce realistic and diverse facial expression data. To achieve this, we start by obtaining the segmentation \\( M \\) and landmarks \\( lmk \\) of the original image \\( I \\) with a segmentation method [55] and a landmark detector, respectively. Leveraging the location of landmarks \\( lmk \\), we apply affine transformation with various patterns onto the segmentation \\( M \\), resulting in \\( M' \\). Subsequently, \\( M' \\) is fed into the generative network [24] to produce a new facial expression image \\( I' \\), as depicted in Fig. 4. Based on CelebA [35] and CelebAMask-HQ [24], we have generated a dataset comprising more than \\( 200K \\) images, including expressions such as closed-eye, open-mouth, and frown, as depicted in Fig. 5. This dataset will be publicly available to facilitate research." + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.091, + 0.657, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.56, + 0.21, + 0.585, + 0.222 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.091, + 0.815, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.724, + 0.21, + 0.749, + 0.222 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.819, + 0.091, + 0.891, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.841, + 0.21, + 0.865, + 0.221 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.225, + 0.892, + 0.295 + ], + "angle": 0, + "content": "Figure 6. (a): \\( p = \\) right_eyebrow when the closest distance \\( (f_{min}) \\) is compared. (b): The gradient descent of PRDL for (a). (c): \\( \\mathbf{\\Gamma}_p^* \\) is the regression target of PRDL in \\( f_{min} \\) channel. (d): \\( M_p \\) is the regression target of renderer-based methods. \\( \\mathbf{\\Gamma}_p^* \\) is more informative than \\( M_p \\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.308, + 0.858, + 0.324 + ], + "angle": 0, + "content": "4. Analysis of PRDL and Related Methods" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.34, + 0.892, + 0.446 + ], + "angle": 0, + "content": "The Gradient of PRDL. With anchors and distance functions as the bridge, PRDL establishes the geometry descriptions of the two point sets. In Fig. 6, we take \\( p = \\) right_eyebrow as an example to analyze the gradient of PRDL. When considering \\( f_{min} \\) and a specific anchor \\( \\pmb{a}_i \\in \\pmb{A} \\), \\( f_{min} \\) identifies \\( \\pmb{c}_m \\) and \\( \\pmb{v}_n \\) from \\( C_p \\) and \\( V_{2d}^p(\\alpha) \\), respectively, by selecting the ones closest to \\( \\pmb{a}_i \\):" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.457, + 0.891, + 0.482 + ], + "angle": 0, + "content": "\\[\nm = \\underset {j} {\\arg \\min } \\| \\boldsymbol {a} _ {i} - \\boldsymbol {c} _ {j} \\| _ {2}, \\quad \\boldsymbol {c} _ {j} \\in C _ {p}, \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.556, + 0.494, + 0.891, + 0.52 + ], + "angle": 0, + "content": "\\[\nn = \\underset {j} {\\arg \\min } \\| \\boldsymbol {a} _ {i} - \\boldsymbol {v} _ {j} \\| _ {2}, \\quad \\boldsymbol {v} _ {j} \\in V _ {2 d} ^ {p} (\\boldsymbol {\\alpha}). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.524, + 0.891, + 0.555 + ], + "angle": 0, + "content": "Under the definition of PRDL, the corresponding energy function \\(E_{i,m,n}\\) for \\(\\pmb{a}_i,\\pmb{c}_m\\) and \\(\\pmb{v}_n\\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.562, + 0.564, + 0.891, + 0.603 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} E _ {i, m, n} = \\left(\\left\\| \\boldsymbol {a} _ {i} - \\boldsymbol {c} _ {m} \\right\\| _ {2} - \\left\\| \\boldsymbol {a} _ {i} - \\boldsymbol {v} _ {n} \\right\\| _ {2}\\right) ^ {2} \\tag {10} \\\\ = \\left(d _ {i, m} - d _ {i, n}\\right) ^ {2}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.611, + 0.891, + 0.643 + ], + "angle": 0, + "content": "where \\(d_{i,m} = ||\\pmb{a}_i - \\pmb{c}_m||_2, d_{i,n} = ||\\pmb{a}_i - \\pmb{v}_n||_2\\). The gradient descent of \\(E_{i,m,n}\\) on \\(\\pmb{v}_n\\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.576, + 0.651, + 0.891, + 0.674 + ], + "angle": 0, + "content": "\\[\n- \\frac {\\partial E _ {i , m , n}}{\\partial \\boldsymbol {v} _ {n}} = 2 (\\boldsymbol {v} _ {n} - \\boldsymbol {a} _ {i}) \\left(\\frac {d _ {i , m}}{d _ {i , n}} - 1\\right). \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.68, + 0.892, + 0.8 + ], + "angle": 0, + "content": "The physical explanation of Eqn. 11 is comprehensible and concise: the direction of \\(-\\nabla E_{i,m,n}\\) always aligns with the line connecting \\(\\pmb{a}_i\\) and \\(\\pmb{v}_n\\), if \\(d_{i,n} > d_{i,m}\\), the direction of \\(-\\nabla E_{i,m,n}\\) is from \\(\\pmb{v}_n\\) to \\(\\pmb{a}_i\\) (as shown in Fig. 6(b)), and vice versa. In the context of gradient descent, the effect of \\(-\\nabla E_{i,m,n}\\) is to make \\(d_{i,n} = d_{i,m}\\) as much as possible. Given \\(\\pmb{A}\\) and \\(f_{min}\\), the gradient descent of \\(\\mathcal{L}_{prdl}\\) on \\(\\pmb{v}_n\\) is the aggregation of all anchors:" + }, + { + "type": "equation", + "bbox": [ + 0.586, + 0.808, + 0.891, + 0.862 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} - \\frac {\\partial \\mathcal {L} _ {p r d l}}{\\partial \\boldsymbol {v} _ {n}} = - w _ {p r d l} ^ {p} \\sum \\frac {\\partial E _ {i , m , n}}{\\partial \\boldsymbol {v} _ {n}} \\\\ = - w _ {p r d l} ^ {p} \\sum_ {i, m} ^ {i, m} \\nabla E _ {i, m, n}. \\tag {12} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "The scenario with \\( f_{max} \\) is similar to that of \\( f_{min} \\), with the only distinction lying in the selection of points. \\( f_{max} \\)" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1676" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.892, + 0.118 + ], + "angle": 0, + "content": "Table 1. Quantitative comparison on Part IoU benchmark. The best and runner-up are highlighted in bold and underlined, respectively. R_eye denotes the right eye, and similar definitions for the rest are omitted." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.122, + 0.892, + 0.274 + ], + "angle": 0, + "content": "
MethodsPart IoU(%)↑
R_eyeavg.± std.L_eyeavg.± std.R_browavg.± std.L_browavg.± std.Noseavg.± std.Up_lipavg.± std.Down_lipavg.± std.avg.
PRNet [13]65.87±16.3666.73±14.7461.46±15.8959.18±16.3183.34±4.5750.88±18.3558.16±17.7263.66
MGCNet [45]64.42±16.0264.81±16.9155.25±15.2961.30±15.5887.40±3.5141.16±19.7066.22±13.8362.94
Deep3D [11]71.87±12.0070.52±12.1964.66±11.3164.70±11.9887.69±3.5161.21±15.6065.95±13.0869.51
3DDFA-v2 [17]61.39±15.9857.51±18.0943.38±25.2538.85±24.3880.83±4.9250.20±17.1759.01±15.2355.88
HRN [25]73.31±11.3973.61±11.5067.91±8.2666.78±10.2790.00±2.6063.80±14.1666.40±11.9471.69
DECA [14]58.09±21.4062.56±19.4155.27±19.4951.86±19.9386.54±9.1156.39±16.9662.81±17.6661.93
Ours (w/o Lprdl)70.72±9.4475.69±10.7971.11±8.5871.69±8.7388.35±4.6057.26±15.9769.71±10.6872.08
Ours (w/o Syn. Data)73.81±10.1272.55±10.6872.24±9.2370.90±8.5588.71±4.1157.43±14.3769.87±10.5472.22
Ours74.55±11.4676.06±10.3274.00±7.7274.05±7.7089.06±3.5358.16±12.7670.86±10.3473.82
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.28, + 0.892, + 0.308 + ], + "angle": 0, + "content": "Table 2. Quantitative comparison on Realty benchmark. Lower values indicate better results. The best and runner-up are highlighted in bold and underlined, respectively." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.312, + 0.892, + 0.445 + ], + "angle": 0, + "content": "
MethodsFrontal-view (mm) ↓Side-view (mm) ↓
Nose avg.± std.Mouth avg.± std.Forehead avg.± std.Cheek avg.± std.avg.Nose avg.± std.Mouth avg.± std.Forehead avg.± std.Cheek avg.± std.avg.
PRNet [13]1.923±0.5181.838±0.6372.429±0.5881.863±0.6982.0131.868±0.5101.856±0.6072.445±0.5701.960±0.7312.032
MGCNet [45]1.771±0.3801.417±0.4092.268±0.5031.639±0.6501.7741.827±0.3831.409±0.4182.248±0.5081.665±0.6441.787
Deep3D[11]1.719±0.3541.368±0.4392.015±0.4491.528±0.5011.6571.749±0.3431.411±0.3952.074±0.4861.528±0.5171.691
3DDFA-v2 [17]1.903±0.5171.597±0.4782.447±0.6471.757±0.6421.9261.883±0.4991.642±0.5012.465±0.6221.781±0.6361.943
HRN [25]1.722±0.3301.357±0.5231.995±0.4761.072±0.3331.5371.642±0.3101.285±0.5281.906±0.4791.038±0.3221.468
DECA [14]1.694±0.3552.516±0.8392.394±0.5761.479±0.5352.0101.903±1.0502.472±1.0792.423±0.7201.630±1.1352.107
Ours (w/o Lpr dl)1.671±0.3321.460±0.4742.001±0.4281.142±0.3151.5681.665±0.3491.297±0.4002.016±0.4481.134±0.3421.528
Ours (w/o Syn. Data)1.592±0.3271.339±0.4331.823±0.4071.119±0.3321.4681.628±0.3201.229±0.4331.872±0.4071.091±0.3121.455
Ours1.586±0.3061.238±0.3731.810±0.3941.111±0.3271.4361.623±0.3131.205±0.3661.864±0.4241.076±0.3151.442
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.455, + 0.47, + 0.562 + ], + "angle": 0, + "content": "also has the capability to constrain \\( V_{2d}^{p}(\\alpha) \\) within the confines of \\( C_p \\). \\( f_{ave} \\) acts on the entire \\( V_{2d}^{p}(\\alpha) \\), striving to bring its centroid as close as possible to the centroid of \\( C_p \\). The introduction of additional anchors and the integration of diverse statistical distances in PRDL prevent the optimization from local optima and provide sufficient geometric signals. Please refer to supplementary materials for more details." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.575, + 0.47, + 0.759 + ], + "angle": 0, + "content": "PRDL vs. Renderer-Based Loss: An intuitive approach for fitting segmentation is to use the renderer-based IoU loss, where differentiable silhouette renderers play a crucial role. Consequently, we delve into the distinctions between PRDL and renderers. We can reshape \\(\\Gamma_p^*\\) (\\(\\mathbb{R}^{|A| \\times |\\mathcal{F}|} \\to \\mathbb{R}^{H \\times W \\times |\\mathcal{F}|}\\)) to visualize it with the last channel separately. Fig. 6(c) illustrates the visualization of the \\(f_{min}\\) channel for \\(p =\\) right_eyebrow, while Fig. 6(d) represents the silhouette rendered by [33] or [8]. In comparison with the regression target \\(M_p\\) utilized in renderer-based methods, \\(\\Gamma_p^*\\) in PRDL is more informative and more conducive to fitting. Please refer to supplementary materials for more details." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Furthermore, considering existing theoretical analyses [8, 22, 56], PRDL exhibits several notable advantages. First, in these renderers, all triangles constituting the object influence every pixel within the silhouettes, making it intricate to isolate specific geometric features. In contrast, \\( f_{min} \\) or \\( f_{max} \\) in PRDL matches the nearest or furthest point on the object, allowing for a more straightforward measurement of the shape's boundary characteristics. Secondly, these renderers either neglect pixels outside any triangles of" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.455, + 0.892, + 0.607 + ], + "angle": 0, + "content": "the 3D object or assign minimal weights to them, emphasizing the rendered object region. However, this operation is equivalent to selectively choosing anchors \\( A \\) in the interior of the rendered shape, while the external anchors are either not chosen or treated differently by assigning small weights, thereby diminishing descriptive power. In Eqn. 11, Eqn. 12 and Fig. 6(b), we have analyzed that external anchors play a significant role in the fitting process. Ablation study (Fig.8) also proves that PRDL is more effective than renderer-based methods like [8, 33, 56]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.62, + 0.634, + 0.637 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.645, + 0.707, + 0.661 + ], + "angle": 0, + "content": "5.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.892, + 0.737 + ], + "angle": 0, + "content": "Reconstruction Frameworks. We implement PRDL based on PyTorch [39] and PyTorch3D [42]. We use ResNet-50 [18] as the backbone to predict \\(\\alpha\\). The input image is cropped and aligned by [10], and resized into \\(224 \\times 224\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.742, + 0.892, + 0.834 + ], + "angle": 0, + "content": "Data. The face images are from publicly available datasets: Dad-3dheads [37], CelebA [35], RAF-ML [28], RAF-DB [29] and 300W [43]. Our synthetic images are mainly from [24, 35]. We use [58] for face pose augmentation. In total, our training data contained about \\(600K\\) face images. We employ DML-CSR [55] to predict 2D face segmentation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.903 + ], + "angle": 0, + "content": "Implementation Details. Considering the inherent feature of 2D segmentation, if part \\( p \\) of a face is invisible or occluded, it may lead to \\( C_p = \\varnothing \\). In such a situation during training, we set \\( w_{prdl}^p = 0 \\) for these samples. We use Adam" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1677" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.091, + 0.891, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.086, + 0.538, + 0.882, + 0.553 + ], + "angle": 0, + "content": "Figure 7. Qualitative comparison with the other methods. Our method achieves realistic reconstructions, particularly in the eye region." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.563, + 0.47, + 0.639 + ], + "angle": 0, + "content": "[23] as the optimizer with an initial learning rate of \\( 1e - 4 \\). We use Farthest Point Sampling (FPS) [38] to reduce the point number of \\( V_{2d}^{skin}(\\alpha) \\) and \\( C_{skin} \\) to 3000, reducing computational consumption. Please refer to supplemental materials for more details." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.654, + 0.169, + 0.669 + ], + "angle": 0, + "content": "5.2. Metric" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.68, + 0.471, + 0.877 + ], + "angle": 0, + "content": "In various VR/AR applications, 3DMMs are crucial in capturing facial motions or providing fine-grained regions covering facial features. One crucial objective in such applications is to ensure the alignment of overlapping facial parts between prediction and input. Widely used benchmarks [7, 44] typically rely on the 3D accuracy performance of reconstructions. However, there are instances where inconsistencies arise between 3D errors and 2D alignments. As shown in Fig.2(b), comparing with 3DDFA-v2 [17], DECA [14] have better 2D eye region overlapping IoU (70.29% vs. 39.37%) but a higher 3D forehead error (1.88mm vs. 1.75mm). To address this, we introduce Part IoU to emphasize the performance of overlap." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.886, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Part IoU is a new benchmark to quantify how well the part" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.563, + 0.892, + 0.715 + ], + "angle": 0, + "content": "reconstruction \\( V_{3d}^{p}(\\alpha) \\) aligns with their corresponding parts from the original face. The core idea is to measure the overlap of facial components between the reconstruction and the original image using IoU. The ground truth is a binary tensor \\( \\{M_p\\} \\) (as defined above). We render \\( V_{3d}(\\alpha) \\) with a mean texture as an image, generate the predicted segmentation \\( \\{M_p^{pred}\\} \\) with [55]. The use of mean texture focuses the metric more on overlap effects than other factors, making it applicable to methods without texture-fitting [13, 17]. Part IoU \\( IoU_p \\) of part \\( p \\) can be obtained by:" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.719, + 0.891, + 0.739 + ], + "angle": 0, + "content": "\\[\nI o U _ {p} = I o U \\left(M _ {p} ^ {p r e d}, M _ {p}\\right). \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.743, + 0.892, + 0.832 + ], + "angle": 0, + "content": "MEAD [50] is an emotional talking-face dataset. We test Part IoU by selecting 10 individuals from MEAD, each contributing 50 random different images. Part IoU measures the overlap performance between each part of the reconstruction and the ground truth. More detail is in the supplemental materials." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "REALY [7] benchmark consists of 100 scanned neutral expression faces, which are divided into four parts: nose, mouth, forehead (eyes and eyebrows), and cheek for 3D alignment and distance error calculation." + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1678" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.09, + 0.159, + 0.211 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.108, + 0.212, + 0.132, + 0.219 + ], + "angle": 0, + "content": "Input" + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.09, + 0.225, + 0.211 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.182, + 0.212, + 0.212, + 0.218 + ], + "angle": 0, + "content": "SoftRas" + }, + { + "type": "image", + "bbox": [ + 0.248, + 0.09, + 0.301, + 0.211 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.262, + 0.212, + 0.288, + 0.219 + ], + "angle": 0, + "content": "DIB-R" + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.09, + 0.377, + 0.211 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.338, + 0.212, + 0.364, + 0.219 + ], + "angle": 0, + "content": "ReDA" + }, + { + "type": "image", + "bbox": [ + 0.401, + 0.09, + 0.467, + 0.211 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.415, + 0.212, + 0.441, + 0.219 + ], + "angle": 0, + "content": "PRDL" + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.221, + 0.468, + 0.249 + ], + "angle": 0, + "content": "Figure 8. Comparison with the renderer-based geometric guidance of segmentation." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.255, + 0.299, + 0.272 + ], + "angle": 0, + "content": "5.3. Qualitative Comparison" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.279, + 0.47, + 0.43 + ], + "angle": 0, + "content": "We conduct a comprehensive evaluation of our method with the state-of-the-art approaches, including PRNet [13], MGCNet [45], Deep3D [11], 3DDFA-V2 [17], HRN [25] and DECA [14]. The visualization of HRN and DECA uses the mid-frequency details and coarse shape (denoted as HRN-m and DECA-c) since their further steps only change the renderer's normal map, while no 3D refinement is made. As shown in Fig. 7, our results excel in capturing extreme expressions, even better than HRN-m which has fine reconstruction steps." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.438, + 0.31, + 0.455 + ], + "angle": 0, + "content": "5.4.Quantitative Comparison" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.462, + 0.469, + 0.581 + ], + "angle": 0, + "content": "On both the Part IoU and REALY [7] benchmarks, our results outperforms the existing state-of-the-art methods. As shown in Tab. 1, our method is almost always the highest overlap IoU across various facial parts with \\(73.82\\%\\) total average, demonstrating PRDL enhances the part alignment of reconstruction. PRDL also performs the best average 3D error on the REALY benchmark (1.436mm in frontal-view and 1.442mm in side-view), as shown in Tab. 2." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.592, + 0.229, + 0.608 + ], + "angle": 0, + "content": "5.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.622, + 0.469, + 0.757 + ], + "angle": 0, + "content": "Ablation for PRDL and Synthetic Data. We conduct quantitative ablation experiments for PRDL and synthetic data on REALY and Part IoU. As depicted in Table 1 and Table 2, only introducing PRDL already yields superior results compared to all other methods (72.22%, 1.468mm, and 1.455mm). Introducing synthetic data without PRDL demonstrates a significant improvement in Part IoU, but not as effectively as PRDL (72.08% vs. 72.22%). Using both synthetic data and PRDL could lead to the best result." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Compare with the Differentiable Silhouette Renderers. SoftRas [33] and DIB-R [8] are the two most widely used renderers, which serve as the basis for PyTorch3D [42] and Kaolin [15], respectively. Based on the image-fitting framework [1], we use them to render a silhouette of each face part and calculate the IoU loss with the ground truth. ReDA [56] is also a renderer-based method using the geometric guidance of segmentation. Fig.8 shows that PRDL is significantly better than these methods. It is essential to em" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.09, + 0.572, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.527, + 0.193, + 0.547, + 0.201 + ], + "angle": 0, + "content": "Input" + }, + { + "type": "image", + "bbox": [ + 0.572, + 0.09, + 0.659, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.572, + 0.193, + 0.63, + 0.2 + ], + "angle": 0, + "content": "Chamfer Distance" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.09, + 0.753, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.639, + 0.193, + 0.69, + 0.199 + ], + "angle": 0, + "content": "Density Aware" + }, + { + "type": "image", + "bbox": [ + 0.755, + 0.09, + 0.812, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.748, + 0.193, + 0.797, + 0.2 + ], + "angle": 0, + "content": "ICP" + }, + { + "type": "image", + "bbox": [ + 0.812, + 0.09, + 0.887, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.846, + 0.193, + 0.867, + 0.2 + ], + "angle": 0, + "content": "PRDL" + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.206, + 0.891, + 0.233 + ], + "angle": 0, + "content": "Figure 9. Comparison with the other point-driven-based geometric guidance of segmentation." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.241, + 0.891, + 0.272 + ], + "angle": 0, + "content": "phasize that all the results in Fig.8 and Fig.9 do not include \\(\\mathcal{L}_{lmk}\\), \\(\\mathcal{L}_{pho}\\), and \\(\\mathcal{L}_{per}\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.278, + 0.892, + 0.595 + ], + "angle": 0, + "content": "Compare with the Other Point-Driven Optimization Methods. One of the key insights of PRDL is transforming segmentation into points. Thus the 3DMM fitting becomes an optimization of two 2D point clouds until they share the same geometry. While an intuitive idea is incorporating the point-driven optimization methods like iterative closest points (ICP) [2-4] or chamfer distance [53], these methods are predominantly rooted in nearest-neighbor principles, and solely opting for the minimum distance potentially leads to local optima. We compare PRDL with ICP [54], chamfer distance and density aware chamfer distance [53] based on [1]. Since the ICP distance can be calculated from target to prediction or vice versa, we provide both methods. As depicted in Fig.9, PRDL outperforms other methods, producing outputs that align more accurately with the desired geometry. This superiority is attributed to the use of additional anchors and diverse statistical distances in PRDL. Referring to Fig.8 and Fig.9, PRDL stands out as the only loss capable of reconstructing effective results when the segmentation information is used independently. More comparison is in the supplemental materials." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.61, + 0.627, + 0.625 + ], + "angle": 0, + "content": "6. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.635, + 0.892, + 0.772 + ], + "angle": 0, + "content": "This paper proposes a novel Part Re-projection Distance Loss (PRDL) to reconstruct 3D faces with the geometric guidance of facial part segmentation. Analysis proves that PRDL is superior to renderer-based and other point-driven optimization methods. We also provide a new emotional face expression dataset and a new 3D mesh part annotation to facilitate studies. Experiments further highlight the state-of-the-art performance of PRDL in achieving high-fidelity and better part alignment in 3D face reconstruction." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.785, + 0.66, + 0.802 + ], + "angle": 0, + "content": "Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.81, + 0.892, + 0.901 + ], + "angle": 0, + "content": "This work was supported in part by Chinese National Natural Science Foundation Projects 62176256, U23B2054, 62276254, 62206280, the Beijing Science and Technology Plan Project Z231100005923033, Beijing Natural Science Foundation L221013, the Youth Innovation Promotion Association CAS Y2021131 and InnoHK program." + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1679" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.116, + 0.466, + 0.143 + ], + "angle": 0, + "content": "[1] 3dmm model fitting using pytorch. https://github.com/ascust/3DMM-Fitting-Pytorch, 2021.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.146, + 0.469, + 0.2 + ], + "angle": 0, + "content": "[2] Brian Amberg, Sami Romdhani, and Thomas Vetter. Optimal step nonrigid icp algorithms for surface registration. In 2007 IEEE conference on computer vision and pattern recognition, pages 1-8. IEEE, 2007. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.203, + 0.469, + 0.244 + ], + "angle": 0, + "content": "[3] K. S. Arun, T. S. Huang, and S. D. Blostein. Least-squares fitting of two 3-d point sets. IEEE Transactions on Pattern Analysis and Machine Intelligence, PAMI-9(5):698-700, 1987." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.247, + 0.469, + 0.287 + ], + "angle": 0, + "content": "[4] P.J. Besl and Neil D. McKay. A method for registration of 3-d shapes. IEEE Transactions on Pattern Analysis and Machine Intelligence, 14(2):239-256, 1992. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.29, + 0.469, + 0.345 + ], + "angle": 0, + "content": "[5] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 187-194, 1999. 1, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.348, + 0.469, + 0.389 + ], + "angle": 0, + "content": "[6] Volker Blanz and Thomas Vetter. Face recognition based on fitting a 3d morphable model. IEEE Transactions on pattern analysis and machine intelligence, 25(9):1063-1074, 2003. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.391, + 0.469, + 0.473 + ], + "angle": 0, + "content": "[7] Zenghao Chai, Haoxian Zhang, Jing Ren, Di Kang, Zhengzhuo Xu, Xuefei Zhe, Chun Yuan, and Linchao Bao. Really: Rethinking the evaluation of 3d face reconstruction. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part VIII, pages 74-92. Springer, 2022. 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.476, + 0.469, + 0.544 + ], + "angle": 0, + "content": "[8] Wenzheng Chen, Huan Ling, Jun Gao, Edward Smith, Jaakko Lehtinen, Alec Jacobson, and Sanja Fidler. Learning to predict 3d objects with an interpolation-based differentiable renderer. Advances in neural information processing systems, 32, 2019. 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.548, + 0.469, + 0.614 + ], + "angle": 0, + "content": "[9] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4690-4699, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.618, + 0.469, + 0.659 + ], + "angle": 0, + "content": "[10] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In CVPR, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.662, + 0.469, + 0.743 + ], + "angle": 0, + "content": "[11] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops, pages 0–0, 2019. 1, 2, 3, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.747, + 0.469, + 0.828 + ], + "angle": 0, + "content": "[12] Bernhard Egger, Sandro Schonborn, Andreas Schneider, Adam Kortylewski, Andreas Morel-Forster, Clemens Blumer, and Thomas Vetter. Occlusion-aware 3d morphable models and an illumination prior for face image analysis. International Journal of Computer Vision, 126:1269-1287, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.832, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[13] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In Proceedings of the European conference on computer vision (ECCV), pages 534-551, 2018. 6, 7, 8" + }, + { + "type": "list", + "bbox": [ + 0.081, + 0.116, + 0.469, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[14] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3D face model from in-the-wild images. 2021. 1, 2, 3, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.137, + 0.892, + 0.259 + ], + "angle": 0, + "content": "[15] Clement Fuji Tsang, Maria Shugrina, Jean Francois Lafleche, Towaki Takikawa, Jiehan Wang, Charles Loop, Wenzheng Chen, Krishna Murthy Jatavallabhula, Edward Smith, Artem Rozantsev, Or Perel, Tianchang Shen, Jun Gao, Sanja Fidler, Gavriel State, Jason Gorski, Tommy Xiang, Jianing Li, Michael Li, and Rev Lebaredian. Kaolin: A pytorch library for accelerating 3d deep learning research. https://github.com/NVIDIAGames/kaolin, 2022.2,3,8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.263, + 0.892, + 0.331 + ], + "angle": 0, + "content": "[16] Kyle Genova, Forrester Cole, Aaron Maschinot, Aaron Sarna, Daniel Vlasic, and William T Freeman. Unsupervised training for 3d morphable model regression. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8377-8386, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.334, + 0.892, + 0.374 + ], + "angle": 0, + "content": "[17] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. pages 152-168, 2020. 1, 2, 3, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.377, + 0.892, + 0.431 + ], + "angle": 0, + "content": "[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.434, + 0.892, + 0.501 + ], + "angle": 0, + "content": "[19] Yueying Kao, Bowen Pan, Miao Xu, Jiangjing Lyu, Xiangyu Zhu, Yuanzhang Chang, Xiaobo Li, and Zhen Lei. Toward 3d face reconstruction in perspective projection: Estimating 6dof face pose from monocular image. IEEE Transactions on Image Processing, 32:3080-3091, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.504, + 0.892, + 0.558 + ], + "angle": 0, + "content": "[20] Yury Kartynnik, Artsiom Ablavatski, Ivan Grishchenko, and Matthias Grundmann. Real-time facial surface geometry from monocular video on mobile gpus. arXiv preprint arXiv:1907.06724, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.562, + 0.892, + 0.614 + ], + "angle": 0, + "content": "[21] Hiroharu Kato, Yoshitaka Ushiku, and Tatsuya Harada. Neural 3d mesh renderer. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3907-3916, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.618, + 0.892, + 0.672 + ], + "angle": 0, + "content": "[22] Hiroharu Kato, Deniz Beker, Mihai Morariu, Takahiro Ando, Toru Matsuoka, Wadim Kehl, and Adrien Gaidon. Differentiable rendering: A survey. arXiv preprint arXiv:2006.12057, 2020. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.676, + 0.892, + 0.715 + ], + "angle": 0, + "content": "[23] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.719, + 0.892, + 0.772 + ], + "angle": 0, + "content": "[24] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.776, + 0.892, + 0.856 + ], + "angle": 0, + "content": "[25] Biwen Lei, Jianqiang Ren, Mengyang Feng, Miaomiao Cui, and Xuansong Xie. A hierarchical representation network for accurate and detailed face reconstruction from in-the-wild images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 394-403, 2023. 1, 2, 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.86, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[26] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. To fit or not to fit: Model-based face reconstruction and occlusion segmentation from" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "1680" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.104, + 0.092, + 0.468, + 0.119 + ], + "angle": 0, + "content": "weak supervision. arXiv preprint arXiv:2106.09614, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.177 + ], + "angle": 0, + "content": "[27] Ruilong Li, Karl Bladin, Yajie Zhao, Chinmay Chinara, Owen Ingraham, Pengda Xiang, Xinglei Ren, Pratusha Prasad, Bipin Kishore, Jun Xing, and Hao Li. Learning formation of physically-based face attributes. 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.469, + 0.234 + ], + "angle": 0, + "content": "[28] Shan Li and Weihong Deng. Blended emotion in-the-wild: Multi-label facial expression recognition using crowdsourced annotations and deep locality feature learning. International Journal of Computer Vision, 127(6-7):884–906, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.236, + 0.469, + 0.291 + ], + "angle": 0, + "content": "[29] Shan Li and Weihong Deng. Reliable crowdsourcing and deep locality-preserving learning for unconstrained facial expression recognition. IEEE Transactions on Image Processing, 28(1):356-370, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.293, + 0.469, + 0.347 + ], + "angle": 0, + "content": "[30] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.35, + 0.469, + 0.404 + ], + "angle": 0, + "content": "[31] Jinpeng Lin, Hao Yang, Dong Chen, Ming Zeng, Fang Wen, and Lu Yuan. Face parsing with roi tanh-warping. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5654-5663, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.407, + 0.469, + 0.448 + ], + "angle": 0, + "content": "[32] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Roi tanh-polar transformer network for face parsing in the wild. Image and Vision Computing, 112:104190, 2021. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.45, + 0.469, + 0.516 + ], + "angle": 0, + "content": "[33] Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7708-7717, 2019. 2, 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.52, + 0.469, + 0.573 + ], + "angle": 0, + "content": "[34] Yinglu Liu, Hailin Shi, Hao Shen, Yue Si, Xiaobo Wang, and Tao Mei. A new dataset and boundary-attention semantic segmentation for face parsing. In AAAI, pages 11637–11644, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.577, + 0.47, + 0.631 + ], + "angle": 0, + "content": "[35] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.634, + 0.469, + 0.702 + ], + "angle": 0, + "content": "[36] Matthew M Loper and Michael J Black. Opendr: An approximate differentiable renderer. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pages 154-169. Springer, 2014. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.469, + 0.786 + ], + "angle": 0, + "content": "[37] Tetiana Martyniuk, Orest Kupyn, Yana Kurlyak, Igor Krashenyi, Jiri Matas, and Viktoriya Sharmanska. Dad-3heads: A large-scale dense, accurate and diverse dataset for 3d head alignment from a single image. In Proc. IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.469, + 0.83 + ], + "angle": 0, + "content": "[38] Carsten Moenning and Neil A Dodgson. Fast marching farthest point sampling. Technical report, University of Cambridge, Computer Laboratory, 2003. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[39] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 6" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[40] Ravi Ramamoorthi and Pat Hanrahan. An efficient representation for irradiance environment maps. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 497-500, 2001. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.216 + ], + "angle": 0, + "content": "[41] Chirag Raman, Charlie Hewitt, Erroll Wood, and Tadas Baltrusaitis. Mesh-tension driven expression-based wrinkles for synthetic faces. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3515-3525, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.219, + 0.892, + 0.272 + ], + "angle": 0, + "content": "[42] Nikhila Ravi, Jeremy Reizenstein, David Novotny, Taylor Gordon, Wan-Yen Lo, Justin Johnson, and Georgia Gkioxari. Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501, 2020. 2, 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.274, + 0.892, + 0.343 + ], + "angle": 0, + "content": "[43] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.345, + 0.892, + 0.412 + ], + "angle": 0, + "content": "[44] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael J Black. Learning to regress 3d face shape and expression from an image without 3d supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7763-7772, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.414, + 0.892, + 0.509 + ], + "angle": 0, + "content": "[45] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Mingmin Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3d face reconstruction by occlusion-aware multiview geometry consistency. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XV, pages 53-70. Springer, 2020. 1, 2, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.511, + 0.892, + 0.564 + ], + "angle": 0, + "content": "[46] Dave Shreiner, Bill The Khronos OpenGL ARB Working Group, et al. OpenGL programming guide: the official guide to learning OpenGL, versions 3.0 and 3.1. Pearson Education, 2009. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.567, + 0.892, + 0.621 + ], + "angle": 0, + "content": "[47] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM Transactions on Graphics (TOG), 41(6):1-10, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.623, + 0.892, + 0.691 + ], + "angle": 0, + "content": "[48] Ayush Tewari, Hans-Peter Seidel, Mohamed Elgharib, Christian Theobalt, et al. Learning complete 3d morphable face models from images and videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3361-3371, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.692, + 0.892, + 0.747 + ], + "angle": 0, + "content": "[49] Graphics University of Basel and Vision Research. parametric-face-image-generator. https://github.com/unibas-gravis/parametric-face-image-generator, 2017.2,4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.749, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[50] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.804, + 0.892, + 0.872 + ], + "angle": 0, + "content": "[51] Lizhen Wang, Zhiyuan Chen, Tao Yu, Chenguang Ma, Liang Li, and Yebin Liu. Faceverse: a fine-grained and detail-controllable 3d face morphable model from a hybrid dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20333-20342, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.874, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[52] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.946, + 0.514, + 0.957 + ], + "angle": 0, + "content": "1681" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.104, + 0.093, + 0.469, + 0.134 + ], + "angle": 0, + "content": "you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3681-3691, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.47, + 0.189 + ], + "angle": 0, + "content": "[53] Tong Wu, Liang Pan, Junzhe Zhang, Tai Wang, Ziwei Liu, and Dahua Lin. Density-aware chamfer distance as a comprehensive metric for point cloud completion. arXiv preprint arXiv:2111.12702, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.192, + 0.469, + 0.247 + ], + "angle": 0, + "content": "[54] Jiaolong Yang, Hongdong Li, Dylan Campbell, and Yunde Jia. Go-icp: A globally optimal solution to 3d icp point-set registration. IEEE transactions on pattern analysis and machine intelligence, 38(11):2241–2254, 2015. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.249, + 0.469, + 0.303 + ], + "angle": 0, + "content": "[55] Qi Zheng, Jiankang Deng, Zheng Zhu, Ying Li, and Stefanos Zafeiriou. Decoupled multi-task learning with cyclical self-regulation for face parsing. In Computer Vision and Pattern Recognition, 2022. 2, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.305, + 0.47, + 0.374 + ], + "angle": 0, + "content": "[56] Wenbin Zhu, HsiangTao Wu, Zeyu Chen, Noranart Vesdapunt, and Baoyuan Wang. Reda: reinforced differentiable attribute for 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4958-4967, 2020. 2, 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.376, + 0.469, + 0.443 + ], + "angle": 0, + "content": "[57] Xiangyu Zhu, Zhen Lei, Junjie Yan, Dong Yi, and Stan Z Li. High-fidelity pose and expression normalization for face recognition in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 787-796, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.446, + 0.469, + 0.5 + ], + "angle": 0, + "content": "[58] Xiangyu Zhu, Xiaoming Liu, Zhen Lei, and Stan Z Li. Face alignment in full pose range: A 3d total solution. IEEE transactions on pattern analysis and machine intelligence, 41(1): 78-92, 2017. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.503, + 0.469, + 0.557 + ], + "angle": 0, + "content": "[59] Xiangyu Zhu, Chang Yu, Di Huang, Zhen Lei, Hao Wang, and Stan Z Li. Beyond 3dmm: Learning to capture high-fidelity 3d face shape. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.56, + 0.469, + 0.613 + ], + "angle": 0, + "content": "[60] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces. In European Conference on Computer Vision, pages 250–269. Springer, 2022. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.47, + 0.613 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "1682" + } + ] +] \ No newline at end of file diff --git a/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/29911afb-57cf-4105-bc3b-b432a117add8_origin.pdf b/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/29911afb-57cf-4105-bc3b-b432a117add8_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f718c623d81f77288cb5588bcd5be6dfa1ea4d7e --- /dev/null +++ b/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/29911afb-57cf-4105-bc3b-b432a117add8_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:688025ef9dd8f345b585c21c97048baf46635525338d14b9dd96a8a356d19c0c +size 10150991 diff --git a/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/full.md b/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..40ae92bca8875b5ec427835d1982e37ca7347fde --- /dev/null +++ b/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/full.md @@ -0,0 +1,381 @@ +# 3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation + +Zidu Wang $^{1,2}$ , Xiangyu Zhu $^{1,2*}$ , Tianshuo Zhang $^{1,2}$ , Baiqin Wang $^{1,2}$ , Zhen Lei $^{1,2,3}$ + +$^{1}$ State Key Laboratory of Multimodal Artificial Intelligence Systems, Institute of Automation, Chinese Academy of Sciences + +$^{2}$ School of Artificial Intelligence, University of Chinese Academy of Sciences + +3 Centre for Artificial Intelligence and Robotics, Hong Kong Institute of Science & Innovation, Chinese Academy of Sciences + +{wangzidu2022, wangbaiqin2024}@ia.ac.cn,{xiangyu.zhu, tianshuo.zhang, zlei}@nlpr.ia.ac.cn + +# Abstract + +3D Morphable Models (3DMMs) provide promising 3D face reconstructions in various applications. However, existing methods struggle to reconstruct faces with extreme expressions due to deficiencies in supervisory signals, such as sparse or inaccurate landmarks. Segmentation information contains effective geometric contexts for face reconstruction. Certain attempts intuitively depend on differentiable renderers to compare the rendered silhouettes of reconstruction with segmentation, which is prone to issues like local optima and gradient instability. In this paper, we fully utilize the facial part segmentation geometry by introducing Part Re-projection Distance Loss (PRDL). Specifically, PRDL transforms facial part segmentation into 2D points and re-projects the reconstruction onto the image plane. Subsequently, by introducing grid anchors and computing different statistical distances from these anchors to the point sets, PRDL establishes geometry descriptors to optimize the distribution of the point sets for face reconstruction. PRDL exhibits a clear gradient compared to the renderer-based methods and presents state-of-the-art reconstruction performance in extensive quantitative and qualitative experiments. Our project is available at https://github.com/wang-zidu/3DDFA-V3. + +# 1. Introduction + +Reconstructing 3D faces from 2D images is an essential task in computer vision and graphics, finding diverse applications in fields such as Virtual Reality (VR), Augmented Reality (AR), and Computer-generated Imagery (CGI), etc. In applications like VR makeup and AR emoji, 3DMMs + +![](images/d6637cfd28f40026409df73e4475d5cc70b7d2fa6f410fb461882c562ae98f02.jpg) + +![](images/98ab93bdca526f27de508e1d5f90995e02eb38b906fe0b041eb360221623ef89.jpg) + +![](images/252cd538d0639398cd84839dafd613e9906a1a8a4e04fa765e8c37a5eee7e369.jpg) + +![](images/69fa143e20d25b4688b2fdb83092ff9d68b5b6dad5c8745f4b1c700df16992da.jpg) +Figure 1. We introduce Part Re-projection Distance Loss (PRDL) for 3D face reconstruction, leveraging the geometric guidance provided by facial part segmentation. PRDL enhances the alignment of reconstructed facial features with the original image and excels in capturing extreme expressions. + +![](images/b5c36c3144c4e2a08d39de04771a49ce356cb286c18fa683d329733688a98bd2.jpg) + +![](images/4fcf17a949d1dbecf6a69ed0acd4b17a73476b43141ab566851a7be2a810026c.jpg) + +[5] are commonly employed for precise facial feature positioning and capturing expressions. One of the most critical concerns is ensuring that the reconstructed facial components, including the eyes, eyebrows, lips, etc., seamlessly align with their corresponding regions in the input image with pixel-level accuracy, particularly when dealing with extreme facial expressions, as shown in Fig. 1. + +Although current methods [11, 14, 17, 19, 25] have made notable strides in face reconstruction, some issues persist. On the one hand, existing works often rely on landmarks [17, 60] and photometric-texture [12, 45] to guide face reconstruction. In the case of extreme facial expressions, landmarks are sparse or inaccurate and the gradient from the texture loss cannot directly constrain the shape [59], posing a challenge for existing methods to achieve precise alignment of facial features in 3D face reconstruction, as depicted in Fig. 2(a). On the other hand, many methods + +primarily adopt 3D errors as a quality metric, overlooking the precise alignment of facial parts. As shown in Fig. 2(b), when evaluating the REALY [7] benchmark in the eye region, comparing the results of 3DDFA-v2 [17] and DECA [14], a lower 3D region error may not lead to better 2D region alignment. We believe in the potential for a more comprehensive utilization of the geometry information inherent in each facial part segmentation to guide 3D face reconstruction, addressing the issues mentioned above. + +Facial part segmentation [24, 31, 32, 34] has been well developed, offering precise geometry for each facial feature with pixel-level accuracy. Compared with commonly used landmarks, part segmentation provides denser labels covering the whole image. Compared with photometric texture, part segmentation is less susceptible to lighting or shadow interference. Although facial part segmentation occasionally appears in the process of 3D face reconstruction, it is not fully utilized. For instance, it only serves to enhance the reconstruction quality of specific regions [25, 48], or to distinguish the overall texture location for photometric-texture-loss [26], without delving into the specifics of facial parts. Attempts [33, 56] to fit 3D parts with the guidance of segmentation information rely on differentiable renderers [15, 42, 46] to generate the silhouettes of the predicted 3D facial regions and optimize the difference between the rendered silhouettes and the 2D segmentation through Intersection over Union (IoU) loss. However, these renderers fail to provide sufficient and stable geometric signals for face reconstruction due to local optima, rendering error propagation, and gradient instability [22]. + +This paper leverages the precise and rich geometric information in facial part silhouettes to guide face reconstruction, thereby improving the alignment of reconstructed facial features with the original image and excelling in reconstructing extreme facial expressions. Fig.1 provides an overview of the proposed Part Re-projection Distance Loss (PRDL). Firstly, PRDL samples points within the segmented region and transforms the segmentation information into a 2D point set for each facial part. The 3D face reconstruction is also re-projected onto the image plane and transformed into 2D point sets for different regions. Secondly, PRDL samples the image grid anchors and establishes geometric descriptors. These descriptors are constructed by using various statistical distances from the anchors to the point set. Finally, PRDL optimizes the distribution of the same semantic point sets, leading to improved overlap between the regions covered by the target and predicted point sets. In contrast to renderer-based methods, PRDL exhibits a clear gradient. To facilitate the use of PRDL, we provide a new 3D mesh part annotation aligned with semantic regions in 2D face segmentation [24, 55], which differs from the existing annotations [30, 49], as shown in Fig.2(c). Besides the drawbacks of supervisory signals, the challenge of han + +![](images/ac68d5bbb39a20f8fcd0a8f124eba73e61b88118bb3adbf8e07621e45cd10413.jpg) +(a) Performance on extreme expressions + +![](images/be08dc941b4c38b740950507e6cdff094eed3a01abb3719f5d81e47ca113dd96.jpg) +(b) 3D error vs. 2D alignment +Figure 2. Drawbacks of existing research and our results. (a) Present researches fail to reconstruct extreme expressions and perform bad region alignment. (b) Inconsistencies between 3D errors and 2D alignments, such as the eye region in this case. (c) Geometric optimization of each semantically consistent part is only achievable through our annotations. + +![](images/5c93af5dd693181cbe20a1c67826b7893bcffb0abc7a8f736e2feec185b172a2.jpg) +(c) 3D face model annotations + +dling extreme expressions arises from data limitations. To boost studies and address the lack of emotional expression (e.g., closed-eye, open-mouth, frown, etc.), we synthesize a face dataset using the GAN-based method [24]. To highlight the performance of region overlapping, we propose a new benchmark to quantify the accuracy of 3D reconstruction parts cling to their corresponding image components on the 2D image plane. Our main contributions are as follows: + +- We introduce a novel Part Re-projection Distance Loss (PRDL) to comprehensively utilize segmentation information for face reconstruction. PRDL transforms the target and prediction into semantic point sets, optimizing the distribution of point sets to ensure that the reconstructed regions and the target share the same geometry. +- We introduce a new synthetic face dataset including closed-eye, open-mouth, and frown expressions, with more than $200K$ images. +- Extensive experiments show that the results with PRDL achieve excellent performance and outperform the existing methods. The data and code are available at https://github.com/wang-zidu/3DDFA-V3. + +# 2. Related Work + +2D-to-3D Losses for 3D Face Reconstruction. Landmark loss [11, 17, 60] stands out as the most widely employed and effective supervised way for face reconstruction. Some studies [20, 37] reveal that it can generate 3D faces under the guidance of sufficient hundreds or thousands landmarks. Photometric loss is another commonly used loss involving rendering the reconstructed mesh with texture into an image and comparing it to the original input. Some researchers focus on predicting the facial features that need to be fitted while excluding occlusions [12, 45]. The photometric loss is susceptible to factors like texture basis, skin masks, and rendering modes. It emphasizes overall visualization and may not effectively constrain local details. Perception loss + +![](images/9993a79f4147ffc2959550ceabcd52c36438bcd60ffeb38a1fa0351a35b53af9.jpg) +Figure 3. Overview of Part Re-projection Distance Loss (PRDL). (a): Transforming facial part segmentation into target point sets $\{C_p\}$ . (b): Re-projecting $V_{3d}(\alpha)$ onto the image plane to obtain predicted point sets $\{V_{2d}^p (\alpha)\}$ . (c): Given anchors $\mathbf{A}$ and distance functions $\mathcal{F}$ , the core idea of PRDL is to minimize the difference of every statistical distance from any $\pmb{a}_i\in \pmb{A}$ to the $V_{2d}^{p}(\alpha)$ or $C_p$ , leading to enhanced overlap between the regions covered by the target and predicted point sets. + +![](images/c2bf4c7eb6a15c3244b1665bdca0276d122e58692fb2aefc44d7fb8ec13f107a.jpg) + +[11, 14, 16] distinguishes itself from image-level methods by employing pre-trained deep face recognition networks [9] to extract high-level features from the rendered reconstruction results. These features are then compared with the features from the input. Lip segmentation consistency loss [48] employs mouth segmentation to help reconstruction. + +Differentiable Silhouette Renderers. The development of differentiable renderers [15, 42, 46] has enriched the supervised methods for 3D face reconstruction. These pipelines make the rasterization process differentiable, allowing for the computation of gradients for every pixel in the rendered results. By combining IoU loss with segmentation information, the silhouettes produced by these renderers have been shown to optimize 3D shapes [8, 33, 56]. These rasterization processes typically rely on either local [21, 36] or global [8, 33] geometric distance-based weighted aggregation, generating silhouettes by computing a probability related to the distance from pixels to mesh faces. However, to obtain a suitable sharp silhouette, the weight contribution of each position to the rendered pixel will decrease sharply with the increase of distance, and the gradient generated by the shape difference at the large distance will be small or zero, which makes it difficult to retain accurate geometry guidance. These renderers also encounter issues such as rendering error propagation and gradient instability [22]. + +Synthetic Dataset. Synthetic data [41, 52, 58] is commonly used to train 3D face reconstruction models [11, 17, 25]. However, these synthetic faces either prioritize the diversification of background, illumination, and identities [41, 52], or concentrate on pose variation [58], contributing to achieve good results in reconstructing natural facial expressions but struggling to reconstruct extreme expressions. To overcome these limitations and facilitate the related research, this paper adopts a GAN-based method [24] to synthesize realistic and diverse facial expression data, including closed eyes, open mouths, and frowns. + +# 3. Methodology + +# 3.1. Preliminaries + +We conduct a face model, an illumination model, and a camera model based on [6, 11, 14, 17]. + +Face Model. The vertices and albedo of a 3D face is determined by the following formula: + +$$ +V _ {3 d} (\boldsymbol {\alpha}) = \boldsymbol {R} \left(\boldsymbol {\alpha} _ {a}\right) \left(\bar {\boldsymbol {V}} + \boldsymbol {\alpha} _ {i d} \boldsymbol {A} _ {i d} + \boldsymbol {\alpha} _ {\exp} \boldsymbol {A} _ {\exp}\right) + \boldsymbol {\alpha} _ {t} \\ - \left(\frac {\partial}{\partial t}\right) \quad , \tag {1} +$$ + +$$ +T _ {a l b} (\boldsymbol {\alpha}) = \overline {{\boldsymbol {T}}} + \boldsymbol {\alpha} _ {a l b} \boldsymbol {A} _ {a l b} +$$ + +where $V_{3d}(\alpha) \in \mathbb{R}^{3 \times 35709}$ is the 3D face vertices, $\overline{\boldsymbol{V}}$ is the mean shape. $T_{alb}(\alpha) \in \mathbb{R}^{3 \times 35709}$ is the albedo, $\overline{T}$ is the mean albedo. $A_{id}$ , $A_{exp}$ and $A_{alb}$ are the face identity vector bases, the expression vector bases and the albedo vector bases, respectively. $\alpha_{id} \in \mathbb{R}^{80}$ , $\alpha_{exp} \in \mathbb{R}^{64}$ and $\alpha_{alb} \in \mathbb{R}^{80}$ are the identity parameter, the expression parameter and the albedo parameter, respectively. $\alpha_{t} \in \mathbb{R}^{3}$ is the translation parameter. $\pmb{R}(\pmb{\alpha}_{a}) \in \mathbb{R}^{3 \times 3}$ is the rotation matrix corresponding to pitch/raw/roll angles $\alpha_{a} \in \mathbb{R}^{3}$ . + +Camera. We employ a camera with a fixed perspective projection, which is same as [11, 25]. Using this camera to re-project $V_{3d}(\alpha)$ into the 2D image plane yields $V_{2d}(\alpha) \in \mathbb{R}^{2 \times 35709}$ . + +Illumination Model. Following [14], we adopt Spherical Harmonics (SH) [40] for the estimation of the shaded texture $T_{tex}(\alpha)$ : + +$$ +T _ {t e x} (\boldsymbol {\alpha}) = T _ {a l b} (\boldsymbol {\alpha}) \odot \sum_ {k = 1} ^ {9} \boldsymbol {\alpha} _ {s h} ^ {k} \boldsymbol {\Psi} _ {k} (\boldsymbol {N}), \tag {2} +$$ + +where $\odot$ denotes the Hadamard product, $N$ is the surface normal of $V_{3d}(\alpha)$ , $\Psi : \mathbb{R}^3 \to \mathbb{R}$ is the SH basis function and $\alpha_{sh} \in \mathbb{R}^9$ is the corresponding SH parameter. In summary, $\alpha = [\alpha_{id}, \alpha_{\mathrm{exp}}, \alpha_a, \alpha_t, \alpha_{sh}]$ is the undetermined parameter. + +# 3.2. Point Transformation on the Image Plane + +Transforming Segmentation to 2D Points. For an input RGB face image $I \in \mathbb{R}^{H \times W \times 3}$ , the prediction of a face segmentation method can be represented by a set of binary tensors $M = \{M_p | p \in P\}$ , where $P = \{\text{left-eye, right-eye, left_eyebrow, right_eyebrow, up\_lip, down\_lip, nose, skin}\}$ and $M_p \in \{0,1\}^{H \times W}$ . Specifically, $M_p^{(x,y)} = 1$ only if the 2D pixel position $(x,y)$ of $M_p$ belongs to a certain face part $p$ , and otherwise $M_p^{(x,y)} = 0$ . $M$ can be transformed into a set of point sets $C = \{C_p | p \in P\}$ , where $C_p = \{(x,y) | if M_p^{(x,y)} = 1\}$ . In this step, we employ DML-CSR [55] for face segmentation, excluding the ear regions, filtering out noise from the segmentation, and dynamically removing the forehead region above the eyebrows based on their position. This procedure is illustrated in Fig. 3(a). More implementation details are provided in the supplemental materials. + +Facial Part Annotation on 3D Face Model. Our objective is to leverage $\{C_p\}$ for guiding 3D face reconstruction. Thus, we should ensure that the reconstructed mesh can be divided into regions consistent with the semantics of the 2D segmentation. Due to the topological consistency of the face model, every vertex on the mesh can be annotated for a specific region. However, existing annotations [27, 30, 49] do not conform to widely accepted 2D face segmentation definitions [24, 32], as shown in Fig.2(c). To address this misalignment, we introduce new part annotations on both BFM [5] and FaceVerse [51]. We partition the vertices based on their indices. $i \in Ind_p$ indicates that the $i$ -th vertex (denoted as $\mathbf{v}$ ) on the mesh belongs to part $p$ . $\{Ind_p|p \in P\}$ can be obtained by: + +$$ +\begin{array}{l} I ^ {s e g} = \operatorname {S e g} (\operatorname {R e n d e r} (V _ {3 d}, T e x)) \\ i \subset I _ {i d d} - i f, I ^ {s e g} (v) \subset v \end{array} , \tag {3} +$$ + +$$ +i \in I n d _ {p}, i f I ^ {s e g} (\boldsymbol {v}) \in p +$$ + +where $\text{Render}(\cdot)$ generates an image by applying texture on the mesh, and $\text{Seg}(\cdot)$ is responsible for segmenting the rendered result. We employ different shape $V_{3d}$ and varying textures $Tex$ to label every $v \in V_{3d}$ with hand-crafted modification. The annotation $\{Ind_p\}$ is pre-completed offline in the training process. Consequently, we utilize $\{Ind_p\}$ to transform the re-projection $V_{2d}(\alpha)$ into semantic point sets $\{V_{2d}^p (\alpha)|p \in P\}$ . Besides, the upper forehead region situated above the eyebrows is dynamically excluded to ensure consistency with target. Points obstructed by hair are removed based on $\{C_p\}$ , as shown in Fig. 3(b). Please refer to supplemental materials for annotation details. + +# 3.3. Part Re-projection Distance Loss (PRDL) + +This section describes the design of PRDL, focusing on constructing geometric descriptors and establishing the relation between the prediction $\{V_{2d}^p (\alpha)\}$ and the ground + +truth $\{C_p\}$ for a given $p \in P$ , which is proved instrumental for face reconstruction. + +In a more generalized formulation, considering two point sets $C = \{c_1, c_2, \dots, c_{|C|}\}$ and $C^* = \{c_1^*, c_2^*, \dots, c_{|C^*|}^*\}$ , we aim to establish geometry descriptions by quantifying shape alignment between them for reconstruction. $C$ and $C^*$ may not possess the same number of points, and their points lack correspondence. Instead of directly searching the correspondence between the two sets, we use a set of fixed points as anchors $A = \{a_1, a_2, \dots, a_{|A|}\}$ and a collection of statistical distance functions $\mathcal{F} = \{f_1, f_2, \dots, f_{|\mathcal{F}|}\}$ to construct geometry description tensors $\Gamma(C, A, \mathcal{F}) \in \mathbb{R}^{|\mathcal{A}| \times |\mathcal{F}|}$ and $\Gamma(C^*, A, \mathcal{F}) \in \mathbb{R}^{|\mathcal{A}| \times |\mathcal{F}|}$ for $C$ and $C^*$ , respectively (denoted as $\Gamma$ and $\Gamma^*$ for brevity). The value $\Gamma(i, j)$ and $\Gamma^*(i, j)$ at the position $(i, j)$ are determined by: + +$$ +\left\{ \begin{array}{l} \boldsymbol {\Gamma} (i, j) = f _ {j} (\boldsymbol {C}, \boldsymbol {a} _ {i}) \\ \boldsymbol {\Gamma} ^ {*} (i, j) = f _ {j} (\boldsymbol {C} ^ {*}, \boldsymbol {a} _ {i}), \end{array} \right. \tag {4} +$$ + +where every function $f_{j}(\pmb {B},\pmb {b})\in \mathcal{F}$ describes the distance from a single point $\pmb{b}$ to a set of points $\pmb{B}$ , and $f_{j}(\pmb {B},\pmb {b})$ can be any statistically meaningful distance. + +When fitting 3DMM to the segmented silhouettes for part $p$ , we set $\boldsymbol{C} = V_{2d}^{p}(\boldsymbol{\alpha})$ and $C^* = C_p$ with specified anchors $\mathbf{A}$ and a set of distance functions $\mathcal{F}$ . Then we calculate their corresponding geometry descriptor tensors $\Gamma_p = \Gamma(V_{2d}^p(\boldsymbol{\alpha}), \boldsymbol{A}, \mathcal{F})$ and $\Gamma_p^* = \Gamma(C_p, \boldsymbol{A}, \mathcal{F})$ . Part Re-projection Distance Loss (PRDL) $\mathcal{L}_{prdl}$ is defined as: + +$$ +\mathcal {L} _ {p r d l} = \sum_ {p \in P} w _ {p r d l} ^ {p} \left\| \boldsymbol {\Gamma} _ {p} - \boldsymbol {\Gamma} _ {p} ^ {*} \right\| _ {2} ^ {2}, \tag {5} +$$ + +where $w_{prdl}^p$ is the weight of each part $p$ . In this paper, we set $\mathcal{F}$ as a collection of the nearest $(f_{min})$ , furthest $(f_{max})$ , and average $(f_{ave})$ distance, i.e. $\mathcal{F} = \{f_{max}, f_{min}, f_{ave}\}$ . We set $\mathbf{A}$ as a $H \times W$ mesh grid. Then for $\forall \mathbf{a}_i \in \mathbf{A}$ , the optimization objective of $\mathcal{L}_{prdl}$ is to: + +$$ +\left\{ \begin{array}{l} \min | | f _ {m i n} (\boldsymbol {C} _ {p}, \boldsymbol {a} _ {i}) - f _ {m i n} (V _ {2 d} ^ {p} (\boldsymbol {\alpha}), \boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\ \min | | f _ {m a x} (\boldsymbol {C} _ {p}, \boldsymbol {a} _ {i}) - f _ {m a x} (V _ {2 d} ^ {p} (\boldsymbol {\alpha}), \boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\ \min | | f _ {a v e} (\boldsymbol {C} _ {p}, \boldsymbol {a} _ {i}) - f _ {a v e} (V _ {2 d} ^ {p} (\boldsymbol {\alpha}), \boldsymbol {a} _ {i}) | | _ {2} ^ {2} \end{array} . \right. \tag {6} +$$ + +This process is shown in Fig. 3(c). When $p =$ left_eye, PRDL minimizes the length difference between the indigo and orange lines (also as shown in Fig. 6(a) when $p =$ right_eybrow). The upper right corner of Fig. 3(c) is a visualization of $\Gamma_{left\_eye}$ with the last channel separately by reshaping it from $\mathbb{R}^{|A| \times |\mathcal{F}|}$ to $\mathbb{R}^{H \times W \times |\mathcal{F}|}$ . It is worth note that, the points number in $V_{2d}^{p}(\alpha)$ , $C_p$ and $A$ can be reduced by using Farthest Point Sampling (FPS) [38] to decrease computational costs. + +![](images/766e73ee8c9c97609c8c05e2e3524c9f09ab61c9881f32c6004b1b618c9cdf79.jpg) +Figure 4. Synthesize emotional expression data. + +![](images/d19110c11f631ebae74ef38ce7b33c14019e99529639079797d13881cd02ee48.jpg) +Figure 5. Examples of our synthetic face dataset. + +# 3.4. Overall Losses + +To reconstruct a 3D face from image $I$ , we build frameworks to minimize the total loss $\mathcal{L}$ as follows: + +$$ +\begin{array}{l} \mathcal {L} = \lambda_ {p r d l} \mathcal {L} _ {p r d l} + \lambda_ {l m k} \mathcal {L} _ {l m k} + \lambda_ {p h o} \mathcal {L} _ {p h o} \tag {7} \\ + \lambda_ {p e r} \mathcal {L} _ {p e r} + \lambda_ {r e g} \mathcal {L} _ {r e g}, \\ \end{array} +$$ + +where $\mathcal{L}_{lmk}$ is the landmark loss, we use detectors to locate 240 2D landmarks for $\mathcal{L}_{lmk}$ and adopt the dynamic landmark marching [57] to handle the non-correspondence between 2D and 3D cheek contour landmarks arising from pose variations. The photometric loss $\mathcal{L}_{pho}$ and the perceptual loss $\mathcal{L}_{per}$ are based on [11, 14]. $\mathcal{L}_{reg}$ is the regularization loss for $\alpha$ . $\lambda_{prdl} = 0.8e - 3$ , $\lambda_{lmk} = 1.6e - 3$ , $\lambda_{pho} = 1.9$ , $\lambda_{per} = 0.2$ , and $\lambda_{reg} = 3e - 4$ are the balance weights. $\mathcal{L}_{prdl}$ and $\mathcal{L}_{lmk}$ are normalized by $H\times W$ . + +# 3.5. Synthetic Emotional Expression Data + +Benefiting from recent developments in face editing research [24, 47], we can generate realistic faces through segmentation $M$ . We aim to mass-produce realistic and diverse facial expression data. To achieve this, we start by obtaining the segmentation $M$ and landmarks $lmk$ of the original image $I$ with a segmentation method [55] and a landmark detector, respectively. Leveraging the location of landmarks $lmk$ , we apply affine transformation with various patterns onto the segmentation $M$ , resulting in $M'$ . Subsequently, $M'$ is fed into the generative network [24] to produce a new facial expression image $I'$ , as depicted in Fig. 4. Based on CelebA [35] and CelebAMask-HQ [24], we have generated a dataset comprising more than $200K$ images, including expressions such as closed-eye, open-mouth, and frown, as depicted in Fig. 5. This dataset will be publicly available to facilitate research. + +![](images/5fc61e55e55d6e9c3c8976dad38e3a3e67baaf1d815ebc5ab83b4ea4584e3946.jpg) +(a) + +![](images/d6479ca4c8bb2e358c69b3f9a3551d1def43942ce23c5a305e1a506de1f537a6.jpg) +(b) + +![](images/5e1b49134b57f58610cec69f9dded9de03593de84919e22809059b2ef388c1f0.jpg) +(d) +Figure 6. (a): $p =$ right_eyebrow when the closest distance $(f_{min})$ is compared. (b): The gradient descent of PRDL for (a). (c): $\mathbf{\Gamma}_p^*$ is the regression target of PRDL in $f_{min}$ channel. (d): $M_p$ is the regression target of renderer-based methods. $\mathbf{\Gamma}_p^*$ is more informative than $M_p$ . + +# 4. Analysis of PRDL and Related Methods + +The Gradient of PRDL. With anchors and distance functions as the bridge, PRDL establishes the geometry descriptions of the two point sets. In Fig. 6, we take $p =$ right_eyebrow as an example to analyze the gradient of PRDL. When considering $f_{min}$ and a specific anchor $\pmb{a}_i \in \pmb{A}$ , $f_{min}$ identifies $\pmb{c}_m$ and $\pmb{v}_n$ from $C_p$ and $V_{2d}^p(\alpha)$ , respectively, by selecting the ones closest to $\pmb{a}_i$ : + +$$ +m = \underset {j} {\arg \min } \| \boldsymbol {a} _ {i} - \boldsymbol {c} _ {j} \| _ {2}, \quad \boldsymbol {c} _ {j} \in C _ {p}, \tag {8} +$$ + +$$ +n = \underset {j} {\arg \min } \| \boldsymbol {a} _ {i} - \boldsymbol {v} _ {j} \| _ {2}, \quad \boldsymbol {v} _ {j} \in V _ {2 d} ^ {p} (\boldsymbol {\alpha}). \tag {9} +$$ + +Under the definition of PRDL, the corresponding energy function $E_{i,m,n}$ for $\pmb{a}_i,\pmb{c}_m$ and $\pmb{v}_n$ is: + +$$ +\begin{array}{l} E _ {i, m, n} = \left(\left\| \boldsymbol {a} _ {i} - \boldsymbol {c} _ {m} \right\| _ {2} - \left\| \boldsymbol {a} _ {i} - \boldsymbol {v} _ {n} \right\| _ {2}\right) ^ {2} \tag {10} \\ = \left(d _ {i, m} - d _ {i, n}\right) ^ {2}, \\ \end{array} +$$ + +where $d_{i,m} = ||\pmb{a}_i - \pmb{c}_m||_2, d_{i,n} = ||\pmb{a}_i - \pmb{v}_n||_2$ . The gradient descent of $E_{i,m,n}$ on $\pmb{v}_n$ is: + +$$ +- \frac {\partial E _ {i , m , n}}{\partial \boldsymbol {v} _ {n}} = 2 (\boldsymbol {v} _ {n} - \boldsymbol {a} _ {i}) \left(\frac {d _ {i , m}}{d _ {i , n}} - 1\right). \tag {11} +$$ + +The physical explanation of Eqn. 11 is comprehensible and concise: the direction of $-\nabla E_{i,m,n}$ always aligns with the line connecting $\pmb{a}_i$ and $\pmb{v}_n$ , if $d_{i,n} > d_{i,m}$ , the direction of $-\nabla E_{i,m,n}$ is from $\pmb{v}_n$ to $\pmb{a}_i$ (as shown in Fig. 6(b)), and vice versa. In the context of gradient descent, the effect of $-\nabla E_{i,m,n}$ is to make $d_{i,n} = d_{i,m}$ as much as possible. Given $\pmb{A}$ and $f_{min}$ , the gradient descent of $\mathcal{L}_{prdl}$ on $\pmb{v}_n$ is the aggregation of all anchors: + +$$ +\begin{array}{l} - \frac {\partial \mathcal {L} _ {p r d l}}{\partial \boldsymbol {v} _ {n}} = - w _ {p r d l} ^ {p} \sum \frac {\partial E _ {i , m , n}}{\partial \boldsymbol {v} _ {n}} \\ = - w _ {p r d l} ^ {p} \sum_ {i, m} ^ {i, m} \nabla E _ {i, m, n}. \tag {12} \\ \end{array} +$$ + +The scenario with $f_{max}$ is similar to that of $f_{min}$ , with the only distinction lying in the selection of points. $f_{max}$ + +Table 1. Quantitative comparison on Part IoU benchmark. The best and runner-up are highlighted in bold and underlined, respectively. R_eye denotes the right eye, and similar definitions for the rest are omitted. + +
MethodsPart IoU(%)↑
R_eyeavg.± std.L_eyeavg.± std.R_browavg.± std.L_browavg.± std.Noseavg.± std.Up_lipavg.± std.Down_lipavg.± std.avg.
PRNet [13]65.87±16.3666.73±14.7461.46±15.8959.18±16.3183.34±4.5750.88±18.3558.16±17.7263.66
MGCNet [45]64.42±16.0264.81±16.9155.25±15.2961.30±15.5887.40±3.5141.16±19.7066.22±13.8362.94
Deep3D [11]71.87±12.0070.52±12.1964.66±11.3164.70±11.9887.69±3.5161.21±15.6065.95±13.0869.51
3DDFA-v2 [17]61.39±15.9857.51±18.0943.38±25.2538.85±24.3880.83±4.9250.20±17.1759.01±15.2355.88
HRN [25]73.31±11.3973.61±11.5067.91±8.2666.78±10.2790.00±2.6063.80±14.1666.40±11.9471.69
DECA [14]58.09±21.4062.56±19.4155.27±19.4951.86±19.9386.54±9.1156.39±16.9662.81±17.6661.93
Ours (w/o Lprdl)70.72±9.4475.69±10.7971.11±8.5871.69±8.7388.35±4.6057.26±15.9769.71±10.6872.08
Ours (w/o Syn. Data)73.81±10.1272.55±10.6872.24±9.2370.90±8.5588.71±4.1157.43±14.3769.87±10.5472.22
Ours74.55±11.4676.06±10.3274.00±7.7274.05±7.7089.06±3.5358.16±12.7670.86±10.3473.82
+ +Table 2. Quantitative comparison on Realty benchmark. Lower values indicate better results. The best and runner-up are highlighted in bold and underlined, respectively. + +
MethodsFrontal-view (mm) ↓Side-view (mm) ↓
Nose avg.± std.Mouth avg.± std.Forehead avg.± std.Cheek avg.± std.avg.Nose avg.± std.Mouth avg.± std.Forehead avg.± std.Cheek avg.± std.avg.
PRNet [13]1.923±0.5181.838±0.6372.429±0.5881.863±0.6982.0131.868±0.5101.856±0.6072.445±0.5701.960±0.7312.032
MGCNet [45]1.771±0.3801.417±0.4092.268±0.5031.639±0.6501.7741.827±0.3831.409±0.4182.248±0.5081.665±0.6441.787
Deep3D[11]1.719±0.3541.368±0.4392.015±0.4491.528±0.5011.6571.749±0.3431.411±0.3952.074±0.4861.528±0.5171.691
3DDFA-v2 [17]1.903±0.5171.597±0.4782.447±0.6471.757±0.6421.9261.883±0.4991.642±0.5012.465±0.6221.781±0.6361.943
HRN [25]1.722±0.3301.357±0.5231.995±0.4761.072±0.3331.5371.642±0.3101.285±0.5281.906±0.4791.038±0.3221.468
DECA [14]1.694±0.3552.516±0.8392.394±0.5761.479±0.5352.0101.903±1.0502.472±1.0792.423±0.7201.630±1.1352.107
Ours (w/o Lpr dl)1.671±0.3321.460±0.4742.001±0.4281.142±0.3151.5681.665±0.3491.297±0.4002.016±0.4481.134±0.3421.528
Ours (w/o Syn. Data)1.592±0.3271.339±0.4331.823±0.4071.119±0.3321.4681.628±0.3201.229±0.4331.872±0.4071.091±0.3121.455
Ours1.586±0.3061.238±0.3731.810±0.3941.111±0.3271.4361.623±0.3131.205±0.3661.864±0.4241.076±0.3151.442
+ +also has the capability to constrain $V_{2d}^{p}(\alpha)$ within the confines of $C_p$ . $f_{ave}$ acts on the entire $V_{2d}^{p}(\alpha)$ , striving to bring its centroid as close as possible to the centroid of $C_p$ . The introduction of additional anchors and the integration of diverse statistical distances in PRDL prevent the optimization from local optima and provide sufficient geometric signals. Please refer to supplementary materials for more details. + +PRDL vs. Renderer-Based Loss: An intuitive approach for fitting segmentation is to use the renderer-based IoU loss, where differentiable silhouette renderers play a crucial role. Consequently, we delve into the distinctions between PRDL and renderers. We can reshape $\Gamma_p^*$ ( $\mathbb{R}^{|A| \times |\mathcal{F}|} \to \mathbb{R}^{H \times W \times |\mathcal{F}|}$ ) to visualize it with the last channel separately. Fig. 6(c) illustrates the visualization of the $f_{min}$ channel for $p =$ right_eyebrow, while Fig. 6(d) represents the silhouette rendered by [33] or [8]. In comparison with the regression target $M_p$ utilized in renderer-based methods, $\Gamma_p^*$ in PRDL is more informative and more conducive to fitting. Please refer to supplementary materials for more details. + +Furthermore, considering existing theoretical analyses [8, 22, 56], PRDL exhibits several notable advantages. First, in these renderers, all triangles constituting the object influence every pixel within the silhouettes, making it intricate to isolate specific geometric features. In contrast, $f_{min}$ or $f_{max}$ in PRDL matches the nearest or furthest point on the object, allowing for a more straightforward measurement of the shape's boundary characteristics. Secondly, these renderers either neglect pixels outside any triangles of + +the 3D object or assign minimal weights to them, emphasizing the rendered object region. However, this operation is equivalent to selectively choosing anchors $A$ in the interior of the rendered shape, while the external anchors are either not chosen or treated differently by assigning small weights, thereby diminishing descriptive power. In Eqn. 11, Eqn. 12 and Fig. 6(b), we have analyzed that external anchors play a significant role in the fitting process. Ablation study (Fig.8) also proves that PRDL is more effective than renderer-based methods like [8, 33, 56]. + +# 5. Experiments + +# 5.1. Experimental Settings + +Reconstruction Frameworks. We implement PRDL based on PyTorch [39] and PyTorch3D [42]. We use ResNet-50 [18] as the backbone to predict $\alpha$ . The input image is cropped and aligned by [10], and resized into $224 \times 224$ . + +Data. The face images are from publicly available datasets: Dad-3dheads [37], CelebA [35], RAF-ML [28], RAF-DB [29] and 300W [43]. Our synthetic images are mainly from [24, 35]. We use [58] for face pose augmentation. In total, our training data contained about $600K$ face images. We employ DML-CSR [55] to predict 2D face segmentation. + +Implementation Details. Considering the inherent feature of 2D segmentation, if part $p$ of a face is invisible or occluded, it may lead to $C_p = \varnothing$ . In such a situation during training, we set $w_{prdl}^p = 0$ for these samples. We use Adam + +![](images/3c56e60d7b03eb66c8ea5c6a50b08deb753cc6e193d82074e49255858a1239de.jpg) +Figure 7. Qualitative comparison with the other methods. Our method achieves realistic reconstructions, particularly in the eye region. + +[23] as the optimizer with an initial learning rate of $1e - 4$ . We use Farthest Point Sampling (FPS) [38] to reduce the point number of $V_{2d}^{skin}(\alpha)$ and $C_{skin}$ to 3000, reducing computational consumption. Please refer to supplemental materials for more details. + +# 5.2. Metric + +In various VR/AR applications, 3DMMs are crucial in capturing facial motions or providing fine-grained regions covering facial features. One crucial objective in such applications is to ensure the alignment of overlapping facial parts between prediction and input. Widely used benchmarks [7, 44] typically rely on the 3D accuracy performance of reconstructions. However, there are instances where inconsistencies arise between 3D errors and 2D alignments. As shown in Fig.2(b), comparing with 3DDFA-v2 [17], DECA [14] have better 2D eye region overlapping IoU (70.29% vs. 39.37%) but a higher 3D forehead error (1.88mm vs. 1.75mm). To address this, we introduce Part IoU to emphasize the performance of overlap. + +Part IoU is a new benchmark to quantify how well the part + +reconstruction $V_{3d}^{p}(\alpha)$ aligns with their corresponding parts from the original face. The core idea is to measure the overlap of facial components between the reconstruction and the original image using IoU. The ground truth is a binary tensor $\{M_p\}$ (as defined above). We render $V_{3d}(\alpha)$ with a mean texture as an image, generate the predicted segmentation $\{M_p^{pred}\}$ with [55]. The use of mean texture focuses the metric more on overlap effects than other factors, making it applicable to methods without texture-fitting [13, 17]. Part IoU $IoU_p$ of part $p$ can be obtained by: + +$$ +I o U _ {p} = I o U \left(M _ {p} ^ {p r e d}, M _ {p}\right). \tag {13} +$$ + +MEAD [50] is an emotional talking-face dataset. We test Part IoU by selecting 10 individuals from MEAD, each contributing 50 random different images. Part IoU measures the overlap performance between each part of the reconstruction and the ground truth. More detail is in the supplemental materials. + +REALY [7] benchmark consists of 100 scanned neutral expression faces, which are divided into four parts: nose, mouth, forehead (eyes and eyebrows), and cheek for 3D alignment and distance error calculation. + +![](images/335aac99e095e85b2b5f743ce9f2f159e659c4351b9b50c58e35d9730dbb8379.jpg) +Input +Figure 8. Comparison with the renderer-based geometric guidance of segmentation. + +![](images/532f7c06be93b30dd9daf6d2f4933e89bd0f6a0c7c3c133dbf13f218c668edba.jpg) +SoftRas + +![](images/cb36a202880902ae1a564cbed054916650bc8838671697dae42b18e61c55bcc7.jpg) +DIB-R + +![](images/aafd61dbd91e68cfd655195d7c0a665dc81caf9ec3972a2060c317920a56085a.jpg) +ReDA + +![](images/8e806851a899441bf975b9e6061cb36ba18c5f5048dcb0ddf458e37cb60dcf1a.jpg) +PRDL + +# 5.3. Qualitative Comparison + +We conduct a comprehensive evaluation of our method with the state-of-the-art approaches, including PRNet [13], MGCNet [45], Deep3D [11], 3DDFA-V2 [17], HRN [25] and DECA [14]. The visualization of HRN and DECA uses the mid-frequency details and coarse shape (denoted as HRN-m and DECA-c) since their further steps only change the renderer's normal map, while no 3D refinement is made. As shown in Fig. 7, our results excel in capturing extreme expressions, even better than HRN-m which has fine reconstruction steps. + +# 5.4.Quantitative Comparison + +On both the Part IoU and REALY [7] benchmarks, our results outperforms the existing state-of-the-art methods. As shown in Tab. 1, our method is almost always the highest overlap IoU across various facial parts with $73.82\%$ total average, demonstrating PRDL enhances the part alignment of reconstruction. PRDL also performs the best average 3D error on the REALY benchmark (1.436mm in frontal-view and 1.442mm in side-view), as shown in Tab. 2. + +# 5.5. Ablation Study + +Ablation for PRDL and Synthetic Data. We conduct quantitative ablation experiments for PRDL and synthetic data on REALY and Part IoU. As depicted in Table 1 and Table 2, only introducing PRDL already yields superior results compared to all other methods (72.22%, 1.468mm, and 1.455mm). Introducing synthetic data without PRDL demonstrates a significant improvement in Part IoU, but not as effectively as PRDL (72.08% vs. 72.22%). Using both synthetic data and PRDL could lead to the best result. + +Compare with the Differentiable Silhouette Renderers. SoftRas [33] and DIB-R [8] are the two most widely used renderers, which serve as the basis for PyTorch3D [42] and Kaolin [15], respectively. Based on the image-fitting framework [1], we use them to render a silhouette of each face part and calculate the IoU loss with the ground truth. ReDA [56] is also a renderer-based method using the geometric guidance of segmentation. Fig.8 shows that PRDL is significantly better than these methods. It is essential to em + +![](images/7dae62bebef5ed7d9b06c30517d6279b3cf317fdb955a559770a77ff028362e3.jpg) +Input +Chamfer Distance +Figure 9. Comparison with the other point-driven-based geometric guidance of segmentation. + +![](images/aa40965bc2b915017141f8e4086f6c645fead550126d0fd7cae293b49063d06f.jpg) +Density Aware + +![](images/ff57dc0994d61bd7f37a4b027bebeb3b5e95d186a16772797141a3dad3b7eb54.jpg) +ICP + +![](images/fe60e4798093524f74c52637a4cecbb3c65bbc778834bb35585115f8b99b6660.jpg) + +![](images/c66e5da9aec31e8ea104677bc7f911f2b4c293c398c8c44524427de817bbe9ef.jpg) +PRDL + +phasize that all the results in Fig.8 and Fig.9 do not include $\mathcal{L}_{lmk}$ , $\mathcal{L}_{pho}$ , and $\mathcal{L}_{per}$ . + +Compare with the Other Point-Driven Optimization Methods. One of the key insights of PRDL is transforming segmentation into points. Thus the 3DMM fitting becomes an optimization of two 2D point clouds until they share the same geometry. While an intuitive idea is incorporating the point-driven optimization methods like iterative closest points (ICP) [2-4] or chamfer distance [53], these methods are predominantly rooted in nearest-neighbor principles, and solely opting for the minimum distance potentially leads to local optima. We compare PRDL with ICP [54], chamfer distance and density aware chamfer distance [53] based on [1]. Since the ICP distance can be calculated from target to prediction or vice versa, we provide both methods. As depicted in Fig.9, PRDL outperforms other methods, producing outputs that align more accurately with the desired geometry. This superiority is attributed to the use of additional anchors and diverse statistical distances in PRDL. Referring to Fig.8 and Fig.9, PRDL stands out as the only loss capable of reconstructing effective results when the segmentation information is used independently. More comparison is in the supplemental materials. + +# 6. Conclusions + +This paper proposes a novel Part Re-projection Distance Loss (PRDL) to reconstruct 3D faces with the geometric guidance of facial part segmentation. Analysis proves that PRDL is superior to renderer-based and other point-driven optimization methods. We also provide a new emotional face expression dataset and a new 3D mesh part annotation to facilitate studies. Experiments further highlight the state-of-the-art performance of PRDL in achieving high-fidelity and better part alignment in 3D face reconstruction. + +# Acknowledgement + +This work was supported in part by Chinese National Natural Science Foundation Projects 62176256, U23B2054, 62276254, 62206280, the Beijing Science and Technology Plan Project Z231100005923033, Beijing Natural Science Foundation L221013, the Youth Innovation Promotion Association CAS Y2021131 and InnoHK program. + +# References + +[1] 3dmm model fitting using pytorch. https://github.com/ascust/3DMM-Fitting-Pytorch, 2021.8 +[2] Brian Amberg, Sami Romdhani, and Thomas Vetter. Optimal step nonrigid icp algorithms for surface registration. In 2007 IEEE conference on computer vision and pattern recognition, pages 1-8. IEEE, 2007. 8 +[3] K. S. Arun, T. S. Huang, and S. D. Blostein. Least-squares fitting of two 3-d point sets. IEEE Transactions on Pattern Analysis and Machine Intelligence, PAMI-9(5):698-700, 1987. +[4] P.J. Besl and Neil D. McKay. A method for registration of 3-d shapes. IEEE Transactions on Pattern Analysis and Machine Intelligence, 14(2):239-256, 1992. 8 +[5] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 187-194, 1999. 1, 4 +[6] Volker Blanz and Thomas Vetter. Face recognition based on fitting a 3d morphable model. IEEE Transactions on pattern analysis and machine intelligence, 25(9):1063-1074, 2003. 3 +[7] Zenghao Chai, Haoxian Zhang, Jing Ren, Di Kang, Zhengzhuo Xu, Xuefei Zhe, Chun Yuan, and Linchao Bao. Really: Rethinking the evaluation of 3d face reconstruction. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part VIII, pages 74-92. Springer, 2022. 2, 7, 8 +[8] Wenzheng Chen, Huan Ling, Jun Gao, Edward Smith, Jaakko Lehtinen, Alec Jacobson, and Sanja Fidler. Learning to predict 3d objects with an interpolation-based differentiable renderer. Advances in neural information processing systems, 32, 2019. 3, 6, 8 +[9] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4690-4699, 2019. 3 +[10] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In CVPR, 2020. 6 +[11] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops, pages 0–0, 2019. 1, 2, 3, 5, 6, 8 +[12] Bernhard Egger, Sandro Schonborn, Andreas Schneider, Adam Kortylewski, Andreas Morel-Forster, Clemens Blumer, and Thomas Vetter. Occlusion-aware 3d morphable models and an illumination prior for face image analysis. International Journal of Computer Vision, 126:1269-1287, 2018. 1, 2 +[13] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In Proceedings of the European conference on computer vision (ECCV), pages 534-551, 2018. 6, 7, 8 + +[14] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3D face model from in-the-wild images. 2021. 1, 2, 3, 5, 6, 7, 8 +[15] Clement Fuji Tsang, Maria Shugrina, Jean Francois Lafleche, Towaki Takikawa, Jiehan Wang, Charles Loop, Wenzheng Chen, Krishna Murthy Jatavallabhula, Edward Smith, Artem Rozantsev, Or Perel, Tianchang Shen, Jun Gao, Sanja Fidler, Gavriel State, Jason Gorski, Tommy Xiang, Jianing Li, Michael Li, and Rev Lebaredian. Kaolin: A pytorch library for accelerating 3d deep learning research. https://github.com/NVIDIAGames/kaolin, 2022.2,3,8 +[16] Kyle Genova, Forrester Cole, Aaron Maschinot, Aaron Sarna, Daniel Vlasic, and William T Freeman. Unsupervised training for 3d morphable model regression. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8377-8386, 2018. 3 +[17] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. pages 152-168, 2020. 1, 2, 3, 6, 7, 8 +[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6 +[19] Yueying Kao, Bowen Pan, Miao Xu, Jiangjing Lyu, Xiangyu Zhu, Yuanzhang Chang, Xiaobo Li, and Zhen Lei. Toward 3d face reconstruction in perspective projection: Estimating 6dof face pose from monocular image. IEEE Transactions on Image Processing, 32:3080-3091, 2023. 1 +[20] Yury Kartynnik, Artsiom Ablavatski, Ivan Grishchenko, and Matthias Grundmann. Real-time facial surface geometry from monocular video on mobile gpus. arXiv preprint arXiv:1907.06724, 2019. 2 +[21] Hiroharu Kato, Yoshitaka Ushiku, and Tatsuya Harada. Neural 3d mesh renderer. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3907-3916, 2018. 3 +[22] Hiroharu Kato, Deniz Beker, Mihai Morariu, Takahiro Ando, Toru Matsuoka, Wadim Kehl, and Adrien Gaidon. Differentiable rendering: A survey. arXiv preprint arXiv:2006.12057, 2020. 2, 3, 6 +[23] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 7 +[24] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 3, 4, 5, 6 +[25] Biwen Lei, Jianqiang Ren, Mengyang Feng, Miaomiao Cui, and Xuansong Xie. A hierarchical representation network for accurate and detailed face reconstruction from in-the-wild images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 394-403, 2023. 1, 2, 3, 6, 8 +[26] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. To fit or not to fit: Model-based face reconstruction and occlusion segmentation from + +weak supervision. arXiv preprint arXiv:2106.09614, 2021. 2 +[27] Ruilong Li, Karl Bladin, Yajie Zhao, Chinmay Chinara, Owen Ingraham, Pengda Xiang, Xinglei Ren, Pratusha Prasad, Bipin Kishore, Jun Xing, and Hao Li. Learning formation of physically-based face attributes. 2020. 4 +[28] Shan Li and Weihong Deng. Blended emotion in-the-wild: Multi-label facial expression recognition using crowdsourced annotations and deep locality feature learning. International Journal of Computer Vision, 127(6-7):884–906, 2019. 6 +[29] Shan Li and Weihong Deng. Reliable crowdsourcing and deep locality-preserving learning for unconstrained facial expression recognition. IEEE Transactions on Image Processing, 28(1):356-370, 2019. 6 +[30] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 4 +[31] Jinpeng Lin, Hao Yang, Dong Chen, Ming Zeng, Fang Wen, and Lu Yuan. Face parsing with roi tanh-warping. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5654-5663, 2019. 2 +[32] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Roi tanh-polar transformer network for face parsing in the wild. Image and Vision Computing, 112:104190, 2021. 2, 4 +[33] Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7708-7717, 2019. 2, 3, 6, 8 +[34] Yinglu Liu, Hailin Shi, Hao Shen, Yue Si, Xiaobo Wang, and Tao Mei. A new dataset and boundary-attention semantic segmentation for face parsing. In AAAI, pages 11637–11644, 2020. 2 +[35] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 5, 6 +[36] Matthew M Loper and Michael J Black. Opendr: An approximate differentiable renderer. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pages 154-169. Springer, 2014. 3 +[37] Tetiana Martyniuk, Orest Kupyn, Yana Kurlyak, Igor Krashenyi, Jiri Matas, and Viktoriya Sharmanska. Dad-3heads: A large-scale dense, accurate and diverse dataset for 3d head alignment from a single image. In Proc. IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6 +[38] Carsten Moenning and Neil A Dodgson. Fast marching farthest point sampling. Technical report, University of Cambridge, Computer Laboratory, 2003. 4, 7 +[39] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 6 + +[40] Ravi Ramamoorthi and Pat Hanrahan. An efficient representation for irradiance environment maps. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 497-500, 2001. 3 +[41] Chirag Raman, Charlie Hewitt, Erroll Wood, and Tadas Baltrusaitis. Mesh-tension driven expression-based wrinkles for synthetic faces. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3515-3525, 2023. 3 +[42] Nikhila Ravi, Jeremy Reizenstein, David Novotny, Taylor Gordon, Wan-Yen Lo, Justin Johnson, and Georgia Gkioxari. Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501, 2020. 2, 3, 6, 8 +[43] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013. 6 +[44] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael J Black. Learning to regress 3d face shape and expression from an image without 3d supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7763-7772, 2019. 7 +[45] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Mingmin Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3d face reconstruction by occlusion-aware multiview geometry consistency. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XV, pages 53-70. Springer, 2020. 1, 2, 6, 8 +[46] Dave Shreiner, Bill The Khronos OpenGL ARB Working Group, et al. OpenGL programming guide: the official guide to learning OpenGL, versions 3.0 and 3.1. Pearson Education, 2009. 2, 3 +[47] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM Transactions on Graphics (TOG), 41(6):1-10, 2022. 5 +[48] Ayush Tewari, Hans-Peter Seidel, Mohamed Elgharib, Christian Theobalt, et al. Learning complete 3d morphable face models from images and videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3361-3371, 2021. 2, 3 +[49] Graphics University of Basel and Vision Research. parametric-face-image-generator. https://github.com/unibas-gravis/parametric-face-image-generator, 2017.2,4 +[50] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 7 +[51] Lizhen Wang, Zhiyuan Chen, Tao Yu, Chenguang Ma, Liang Li, and Yebin Liu. Faceverse: a fine-grained and detail-controllable 3d face morphable model from a hybrid dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20333-20342, 2022. 4 +[52] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till + +you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3681-3691, 2021. 3 +[53] Tong Wu, Liang Pan, Junzhe Zhang, Tai Wang, Ziwei Liu, and Dahua Lin. Density-aware chamfer distance as a comprehensive metric for point cloud completion. arXiv preprint arXiv:2111.12702, 2021. 8 +[54] Jiaolong Yang, Hongdong Li, Dylan Campbell, and Yunde Jia. Go-icp: A globally optimal solution to 3d icp point-set registration. IEEE transactions on pattern analysis and machine intelligence, 38(11):2241–2254, 2015. 8 +[55] Qi Zheng, Jiankang Deng, Zheng Zhu, Ying Li, and Stefanos Zafeiriou. Decoupled multi-task learning with cyclical self-regulation for face parsing. In Computer Vision and Pattern Recognition, 2022. 2, 4, 5, 6, 7 +[56] Wenbin Zhu, HsiangTao Wu, Zeyu Chen, Noranart Vesdapunt, and Baoyuan Wang. Reda: reinforced differentiable attribute for 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4958-4967, 2020. 2, 3, 6, 8 +[57] Xiangyu Zhu, Zhen Lei, Junjie Yan, Dong Yi, and Stan Z Li. High-fidelity pose and expression normalization for face recognition in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 787-796, 2015. 5 +[58] Xiangyu Zhu, Xiaoming Liu, Zhen Lei, and Stan Z Li. Face alignment in full pose range: A 3d total solution. IEEE transactions on pattern analysis and machine intelligence, 41(1): 78-92, 2017. 3, 6 +[59] Xiangyu Zhu, Chang Yu, Di Huang, Zhen Lei, Hao Wang, and Stan Z Li. Beyond 3dmm: Learning to capture high-fidelity 3d face shape. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1 +[60] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces. In European Conference on Computer Vision, pages 250–269. Springer, 2022. 1, 2 \ No newline at end of file diff --git a/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/images.zip b/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..a707c4c6e33fae69a8a5889b418ce27c6e4704ce --- /dev/null +++ b/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bee8e322de15541f3516f7ab9423be3637c427232571cab6fcc4a231df0a1dc +size 782763 diff --git a/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/layout.json b/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b129dd64848ac17c3f4796a3b2bd6982c9327cc9 --- /dev/null +++ b/2024/3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation/layout.json @@ -0,0 +1,12432 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 121, + 103, + 473, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 103, + 473, + 140 + ], + "spans": [ + { + "bbox": [ + 121, + 103, + 473, + 140 + ], + "type": "text", + "content": "3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 100, + 160, + 492, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 160, + 492, + 175 + ], + "spans": [ + { + "bbox": [ + 100, + 160, + 492, + 175 + ], + "type": "text", + "content": "Zidu Wang" + }, + { + "bbox": [ + 100, + 160, + 492, + 175 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 100, + 160, + 492, + 175 + ], + "type": "text", + "content": ", Xiangyu Zhu" + }, + { + "bbox": [ + 100, + 160, + 492, + 175 + ], + "type": "inline_equation", + "content": "^{1,2*}" + }, + { + "bbox": [ + 100, + 160, + 492, + 175 + ], + "type": "text", + "content": ", Tianshuo Zhang" + }, + { + "bbox": [ + 100, + 160, + 492, + 175 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 100, + 160, + 492, + 175 + ], + "type": "text", + "content": ", Baiqin Wang" + }, + { + "bbox": [ + 100, + 160, + 492, + 175 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 100, + 160, + 492, + 175 + ], + "type": "text", + "content": ", Zhen Lei" + }, + { + "bbox": [ + 100, + 160, + 492, + 175 + ], + "type": "inline_equation", + "content": "^{1,2,3}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 175, + 463, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 175, + 463, + 202 + ], + "spans": [ + { + "bbox": [ + 130, + 175, + 463, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 130, + 175, + 463, + 202 + ], + "type": "text", + "content": "State Key Laboratory of Multimodal Artificial Intelligence Systems, Institute of Automation, Chinese Academy of Sciences" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 203, + 485, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 203, + 485, + 217 + ], + "spans": [ + { + "bbox": [ + 108, + 203, + 485, + 217 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 108, + 203, + 485, + 217 + ], + "type": "text", + "content": "School of Artificial Intelligence, University of Chinese Academy of Sciences" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 217, + 525, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 217, + 525, + 244 + ], + "spans": [ + { + "bbox": [ + 67, + 217, + 525, + 244 + ], + "type": "text", + "content": "3 Centre for Artificial Intelligence and Robotics, Hong Kong Institute of Science & Innovation, Chinese Academy of Sciences" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 57, + 247, + 537, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 247, + 537, + 259 + ], + "spans": [ + { + "bbox": [ + 57, + 247, + 537, + 259 + ], + "type": "text", + "content": "{wangzidu2022, wangbaiqin2024}@ia.ac.cn,{xiangyu.zhu, tianshuo.zhang, zlei}@nlpr.ia.ac.cn" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 143, + 286, + 192, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 286, + 192, + 300 + ], + "spans": [ + { + "bbox": [ + 143, + 286, + 192, + 300 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 312, + 290, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 312, + 290, + 587 + ], + "spans": [ + { + "bbox": [ + 46, + 312, + 290, + 587 + ], + "type": "text", + "content": "3D Morphable Models (3DMMs) provide promising 3D face reconstructions in various applications. However, existing methods struggle to reconstruct faces with extreme expressions due to deficiencies in supervisory signals, such as sparse or inaccurate landmarks. Segmentation information contains effective geometric contexts for face reconstruction. Certain attempts intuitively depend on differentiable renderers to compare the rendered silhouettes of reconstruction with segmentation, which is prone to issues like local optima and gradient instability. In this paper, we fully utilize the facial part segmentation geometry by introducing Part Re-projection Distance Loss (PRDL). Specifically, PRDL transforms facial part segmentation into 2D points and re-projects the reconstruction onto the image plane. Subsequently, by introducing grid anchors and computing different statistical distances from these anchors to the point sets, PRDL establishes geometry descriptors to optimize the distribution of the point sets for face reconstruction. PRDL exhibits a clear gradient compared to the renderer-based methods and presents state-of-the-art reconstruction performance in extensive quantitative and qualitative experiments. Our project is available at https://github.com/wang-zidu/3DDFA-V3." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 612, + 128, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 612, + 128, + 624 + ], + "spans": [ + { + "bbox": [ + 47, + 612, + 128, + 624 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 632, + 287, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 632, + 287, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 632, + 287, + 693 + ], + "type": "text", + "content": "Reconstructing 3D faces from 2D images is an essential task in computer vision and graphics, finding diverse applications in fields such as Virtual Reality (VR), Augmented Reality (AR), and Computer-generated Imagery (CGI), etc. In applications like VR makeup and AR emoji, 3DMMs" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 309, + 286, + 382, + 360 + ], + "blocks": [ + { + "bbox": [ + 309, + 286, + 382, + 360 + ], + "lines": [ + { + "bbox": [ + 309, + 286, + 382, + 360 + ], + "spans": [ + { + "bbox": [ + 309, + 286, + 382, + 360 + ], + "type": "image", + "image_path": "d6637cfd28f40026409df73e4475d5cc70b7d2fa6f410fb461882c562ae98f02.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 389, + 286, + 462, + 360 + ], + "blocks": [ + { + "bbox": [ + 389, + 286, + 462, + 360 + ], + "lines": [ + { + "bbox": [ + 389, + 286, + 462, + 360 + ], + "spans": [ + { + "bbox": [ + 389, + 286, + 462, + 360 + ], + "type": "image", + "image_path": "98ab93bdca526f27de508e1d5f90995e02eb38b906fe0b041eb360221623ef89.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 470, + 286, + 544, + 360 + ], + "blocks": [ + { + "bbox": [ + 470, + 286, + 544, + 360 + ], + "lines": [ + { + "bbox": [ + 470, + 286, + 544, + 360 + ], + "spans": [ + { + "bbox": [ + 470, + 286, + 544, + 360 + ], + "type": "image", + "image_path": "252cd538d0639398cd84839dafd613e9906a1a8a4e04fa765e8c37a5eee7e369.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 308, + 361, + 382, + 434 + ], + "blocks": [ + { + "bbox": [ + 308, + 361, + 382, + 434 + ], + "lines": [ + { + "bbox": [ + 308, + 361, + 382, + 434 + ], + "spans": [ + { + "bbox": [ + 308, + 361, + 382, + 434 + ], + "type": "image", + "image_path": "69fa143e20d25b4688b2fdb83092ff9d68b5b6dad5c8745f4b1c700df16992da.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 444, + 547, + 499 + ], + "lines": [ + { + "bbox": [ + 305, + 444, + 547, + 499 + ], + "spans": [ + { + "bbox": [ + 305, + 444, + 547, + 499 + ], + "type": "text", + "content": "Figure 1. We introduce Part Re-projection Distance Loss (PRDL) for 3D face reconstruction, leveraging the geometric guidance provided by facial part segmentation. PRDL enhances the alignment of reconstructed facial features with the original image and excels in capturing extreme expressions." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 389, + 361, + 462, + 434 + ], + "blocks": [ + { + "bbox": [ + 389, + 361, + 462, + 434 + ], + "lines": [ + { + "bbox": [ + 389, + 361, + 462, + 434 + ], + "spans": [ + { + "bbox": [ + 389, + 361, + 462, + 434 + ], + "type": "image", + "image_path": "b5c36c3144c4e2a08d39de04771a49ce356cb286c18fa683d329733688a98bd2.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 470, + 361, + 544, + 434 + ], + "blocks": [ + { + "bbox": [ + 470, + 361, + 544, + 434 + ], + "lines": [ + { + "bbox": [ + 470, + 361, + 544, + 434 + ], + "spans": [ + { + "bbox": [ + 470, + 361, + 544, + 434 + ], + "type": "image", + "image_path": "4fcf17a949d1dbecf6a69ed0acd4b17a73476b43141ab566851a7be2a810026c.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 510, + 545, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 593 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 593 + ], + "type": "text", + "content": "[5] are commonly employed for precise facial feature positioning and capturing expressions. One of the most critical concerns is ensuring that the reconstructed facial components, including the eyes, eyebrows, lips, etc., seamlessly align with their corresponding regions in the input image with pixel-level accuracy, particularly when dealing with extreme facial expressions, as shown in Fig. 1." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 594, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 546, + 713 + ], + "type": "text", + "content": "Although current methods [11, 14, 17, 19, 25] have made notable strides in face reconstruction, some issues persist. On the one hand, existing works often rely on landmarks [17, 60] and photometric-texture [12, 45] to guide face reconstruction. In the case of extreme facial expressions, landmarks are sparse or inaccurate and the gradient from the texture loss cannot directly constrain the shape [59], posing a challenge for existing methods to achieve precise alignment of facial features in 3D face reconstruction, as depicted in Fig. 2(a). On the other hand, many methods" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 182, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 182, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 182, + 713 + ], + "type": "text", + "content": "*Corresponding author: Xiangyu Zhu" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1672" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": "primarily adopt 3D errors as a quality metric, overlooking the precise alignment of facial parts. As shown in Fig. 2(b), when evaluating the REALY [7] benchmark in the eye region, comparing the results of 3DDFA-v2 [17] and DECA [14], a lower 3D region error may not lead to better 2D region alignment. We believe in the potential for a more comprehensive utilization of the geometry information inherent in each facial part segmentation to guide 3D face reconstruction, addressing the issues mentioned above." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 183, + 289, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 183, + 289, + 435 + ], + "spans": [ + { + "bbox": [ + 46, + 183, + 289, + 435 + ], + "type": "text", + "content": "Facial part segmentation [24, 31, 32, 34] has been well developed, offering precise geometry for each facial feature with pixel-level accuracy. Compared with commonly used landmarks, part segmentation provides denser labels covering the whole image. Compared with photometric texture, part segmentation is less susceptible to lighting or shadow interference. Although facial part segmentation occasionally appears in the process of 3D face reconstruction, it is not fully utilized. For instance, it only serves to enhance the reconstruction quality of specific regions [25, 48], or to distinguish the overall texture location for photometric-texture-loss [26], without delving into the specifics of facial parts. Attempts [33, 56] to fit 3D parts with the guidance of segmentation information rely on differentiable renderers [15, 42, 46] to generate the silhouettes of the predicted 3D facial regions and optimize the difference between the rendered silhouettes and the 2D segmentation through Intersection over Union (IoU) loss. However, these renderers fail to provide sufficient and stable geometric signals for face reconstruction due to local optima, rendering error propagation, and gradient instability [22]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 438, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 438, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 438, + 289, + 715 + ], + "type": "text", + "content": "This paper leverages the precise and rich geometric information in facial part silhouettes to guide face reconstruction, thereby improving the alignment of reconstructed facial features with the original image and excelling in reconstructing extreme facial expressions. Fig.1 provides an overview of the proposed Part Re-projection Distance Loss (PRDL). Firstly, PRDL samples points within the segmented region and transforms the segmentation information into a 2D point set for each facial part. The 3D face reconstruction is also re-projected onto the image plane and transformed into 2D point sets for different regions. Secondly, PRDL samples the image grid anchors and establishes geometric descriptors. These descriptors are constructed by using various statistical distances from the anchors to the point set. Finally, PRDL optimizes the distribution of the same semantic point sets, leading to improved overlap between the regions covered by the target and predicted point sets. In contrast to renderer-based methods, PRDL exhibits a clear gradient. To facilitate the use of PRDL, we provide a new 3D mesh part annotation aligned with semantic regions in 2D face segmentation [24, 55], which differs from the existing annotations [30, 49], as shown in Fig.2(c). Besides the drawbacks of supervisory signals, the challenge of han" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 307, + 72, + 448, + 119 + ], + "blocks": [ + { + "bbox": [ + 307, + 72, + 448, + 119 + ], + "lines": [ + { + "bbox": [ + 307, + 72, + 448, + 119 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 448, + 119 + ], + "type": "image", + "image_path": "ac68d5bbb39a20f8fcd0a8f124eba73e61b88118bb3adbf8e07621e45cd10413.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 320, + 119, + 435, + 126 + ], + "lines": [ + { + "bbox": [ + 320, + 119, + 435, + 126 + ], + "spans": [ + { + "bbox": [ + 320, + 119, + 435, + 126 + ], + "type": "text", + "content": "(a) Performance on extreme expressions" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 127, + 448, + 178 + ], + "blocks": [ + { + "bbox": [ + 307, + 127, + 448, + 178 + ], + "lines": [ + { + "bbox": [ + 307, + 127, + 448, + 178 + ], + "spans": [ + { + "bbox": [ + 307, + 127, + 448, + 178 + ], + "type": "image", + "image_path": "be08dc941b4c38b740950507e6cdff094eed3a01abb3719f5d81e47ca113dd96.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 333, + 179, + 422, + 187 + ], + "lines": [ + { + "bbox": [ + 333, + 179, + 422, + 187 + ], + "spans": [ + { + "bbox": [ + 333, + 179, + 422, + 187 + ], + "type": "text", + "content": "(b) 3D error vs. 2D alignment" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 191, + 545, + 257 + ], + "lines": [ + { + "bbox": [ + 304, + 191, + 545, + 257 + ], + "spans": [ + { + "bbox": [ + 304, + 191, + 545, + 257 + ], + "type": "text", + "content": "Figure 2. Drawbacks of existing research and our results. (a) Present researches fail to reconstruct extreme expressions and perform bad region alignment. (b) Inconsistencies between 3D errors and 2D alignments, such as the eye region in this case. (c) Geometric optimization of each semantically consistent part is only achievable through our annotations." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 450, + 72, + 543, + 178 + ], + "blocks": [ + { + "bbox": [ + 450, + 72, + 543, + 178 + ], + "lines": [ + { + "bbox": [ + 450, + 72, + 543, + 178 + ], + "spans": [ + { + "bbox": [ + 450, + 72, + 543, + 178 + ], + "type": "image", + "image_path": "5c93af5dd693181cbe20a1c67826b7893bcffb0abc7a8f736e2feec185b172a2.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 453, + 179, + 541, + 187 + ], + "lines": [ + { + "bbox": [ + 453, + 179, + 541, + 187 + ], + "spans": [ + { + "bbox": [ + 453, + 179, + 541, + 187 + ], + "type": "text", + "content": "(c) 3D face model annotations" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 266, + 545, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 266, + 545, + 362 + ], + "spans": [ + { + "bbox": [ + 304, + 266, + 545, + 362 + ], + "type": "text", + "content": "dling extreme expressions arises from data limitations. To boost studies and address the lack of emotional expression (e.g., closed-eye, open-mouth, frown, etc.), we synthesize a face dataset using the GAN-based method [24]. To highlight the performance of region overlapping, we propose a new benchmark to quantify the accuracy of 3D reconstruction parts cling to their corresponding image components on the 2D image plane. Our main contributions are as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 364, + 545, + 520 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 306, + 364, + 545, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 364, + 545, + 435 + ], + "spans": [ + { + "bbox": [ + 306, + 364, + 545, + 435 + ], + "type": "text", + "content": "- We introduce a novel Part Re-projection Distance Loss (PRDL) to comprehensively utilize segmentation information for face reconstruction. PRDL transforms the target and prediction into semantic point sets, optimizing the distribution of point sets to ensure that the reconstructed regions and the target share the same geometry." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 436, + 545, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 436, + 545, + 472 + ], + "spans": [ + { + "bbox": [ + 306, + 436, + 545, + 472 + ], + "type": "text", + "content": "- We introduce a new synthetic face dataset including closed-eye, open-mouth, and frown expressions, with more than " + }, + { + "bbox": [ + 306, + 436, + 545, + 472 + ], + "type": "inline_equation", + "content": "200K" + }, + { + "bbox": [ + 306, + 436, + 545, + 472 + ], + "type": "text", + "content": " images." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 472, + 545, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 472, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 306, + 472, + 545, + 520 + ], + "type": "text", + "content": "- Extensive experiments show that the results with PRDL achieve excellent performance and outperform the existing methods. The data and code are available at https://github.com/wang-zidu/3DDFA-V3." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 532, + 392, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 532, + 392, + 544 + ], + "spans": [ + { + "bbox": [ + 306, + 532, + 392, + 544 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "type": "text", + "content": "2D-to-3D Losses for 3D Face Reconstruction. Landmark loss [11, 17, 60] stands out as the most widely employed and effective supervised way for face reconstruction. Some studies [20, 37] reveal that it can generate 3D faces under the guidance of sufficient hundreds or thousands landmarks. Photometric loss is another commonly used loss involving rendering the reconstructed mesh with texture into an image and comparing it to the original input. Some researchers focus on predicting the facial features that need to be fitted while excluding occlusions [12, 45]. The photometric loss is susceptible to factors like texture basis, skin masks, and rendering modes. It emphasizes overall visualization and may not effectively constrain local details. Perception loss" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1673" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 71, + 293, + 205 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 293, + 205 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 293, + 205 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 293, + 205 + ], + "type": "image", + "image_path": "9993a79f4147ffc2959550ceabcd52c36438bcd60ffeb38a1fa0351a35b53af9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "lines": [ + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "spans": [ + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "text", + "content": "Figure 3. Overview of Part Re-projection Distance Loss (PRDL). (a): Transforming facial part segmentation into target point sets " + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "inline_equation", + "content": "\\{C_p\\}" + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "text", + "content": ". (b): Re-projecting " + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "inline_equation", + "content": "V_{3d}(\\alpha)" + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "text", + "content": " onto the image plane to obtain predicted point sets " + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "inline_equation", + "content": "\\{V_{2d}^p (\\alpha)\\}" + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "text", + "content": ". (c): Given anchors " + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "text", + "content": " and distance functions " + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "text", + "content": ", the core idea of PRDL is to minimize the difference of every statistical distance from any " + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "inline_equation", + "content": "\\pmb{a}_i\\in \\pmb{A}" + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "text", + "content": " to the " + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "inline_equation", + "content": "V_{2d}^{p}(\\alpha)" + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "inline_equation", + "content": "C_p" + }, + { + "bbox": [ + 46, + 211, + 547, + 257 + ], + "type": "text", + "content": ", leading to enhanced overlap between the regions covered by the target and predicted point sets." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 293, + 71, + 542, + 205 + ], + "blocks": [ + { + "bbox": [ + 293, + 71, + 542, + 205 + ], + "lines": [ + { + "bbox": [ + 293, + 71, + 542, + 205 + ], + "spans": [ + { + "bbox": [ + 293, + 71, + 542, + 205 + ], + "type": "image", + "image_path": "c2bf4c7eb6a15c3244b1665bdca0276d122e58692fb2aefc44d7fb8ec13f107a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 266, + 288, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 288, + 339 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 288, + 339 + ], + "type": "text", + "content": "[11, 14, 16] distinguishes itself from image-level methods by employing pre-trained deep face recognition networks [9] to extract high-level features from the rendered reconstruction results. These features are then compared with the features from the input. Lip segmentation consistency loss [48] employs mouth segmentation to help reconstruction." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 346, + 287, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 346, + 287, + 574 + ], + "spans": [ + { + "bbox": [ + 46, + 346, + 287, + 574 + ], + "type": "text", + "content": "Differentiable Silhouette Renderers. The development of differentiable renderers [15, 42, 46] has enriched the supervised methods for 3D face reconstruction. These pipelines make the rasterization process differentiable, allowing for the computation of gradients for every pixel in the rendered results. By combining IoU loss with segmentation information, the silhouettes produced by these renderers have been shown to optimize 3D shapes [8, 33, 56]. These rasterization processes typically rely on either local [21, 36] or global [8, 33] geometric distance-based weighted aggregation, generating silhouettes by computing a probability related to the distance from pixels to mesh faces. However, to obtain a suitable sharp silhouette, the weight contribution of each position to the rendered pixel will decrease sharply with the increase of distance, and the gradient generated by the shape difference at the large distance will be small or zero, which makes it difficult to retain accurate geometry guidance. These renderers also encounter issues such as rendering error propagation and gradient instability [22]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "content": "Synthetic Dataset. Synthetic data [41, 52, 58] is commonly used to train 3D face reconstruction models [11, 17, 25]. However, these synthetic faces either prioritize the diversification of background, illumination, and identities [41, 52], or concentrate on pose variation [58], contributing to achieve good results in reconstructing natural facial expressions but struggling to reconstruct extreme expressions. To overcome these limitations and facilitate the related research, this paper adopts a GAN-based method [24] to synthesize realistic and diverse facial expression data, including closed eyes, open mouths, and frowns." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 265, + 388, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 265, + 388, + 278 + ], + "spans": [ + { + "bbox": [ + 306, + 265, + 388, + 278 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 285, + 392, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 285, + 392, + 296 + ], + "spans": [ + { + "bbox": [ + 306, + 285, + 392, + 296 + ], + "type": "text", + "content": "3.1. Preliminaries" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 303, + 546, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 303, + 546, + 327 + ], + "spans": [ + { + "bbox": [ + 305, + 303, + 546, + 327 + ], + "type": "text", + "content": "We conduct a face model, an illumination model, and a camera model based on [6, 11, 14, 17]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 333, + 545, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 333, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 305, + 333, + 545, + 357 + ], + "type": "text", + "content": "Face Model. The vertices and albedo of a 3D face is determined by the following formula:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 319, + 365, + 545, + 385 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 365, + 545, + 385 + ], + "spans": [ + { + "bbox": [ + 319, + 365, + 545, + 385 + ], + "type": "interline_equation", + "content": "V _ {3 d} (\\boldsymbol {\\alpha}) = \\boldsymbol {R} \\left(\\boldsymbol {\\alpha} _ {a}\\right) \\left(\\bar {\\boldsymbol {V}} + \\boldsymbol {\\alpha} _ {i d} \\boldsymbol {A} _ {i d} + \\boldsymbol {\\alpha} _ {\\exp} \\boldsymbol {A} _ {\\exp}\\right) + \\boldsymbol {\\alpha} _ {t} \\\\ - \\left(\\frac {\\partial}{\\partial t}\\right) \\quad , \\tag {1}", + "image_path": "02cc55626e087f603eb7b2a5bc742114f21e266d7d22a622e4be51d925c65d8b.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 320, + 381, + 418, + 393 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 381, + 418, + 393 + ], + "spans": [ + { + "bbox": [ + 320, + 381, + 418, + 393 + ], + "type": "interline_equation", + "content": "T _ {a l b} (\\boldsymbol {\\alpha}) = \\overline {{\\boldsymbol {T}}} + \\boldsymbol {\\alpha} _ {a l b} \\boldsymbol {A} _ {a l b}", + "image_path": "83434ecc70d39177ec2504b9b16da94df41eca4e746ccf26705cf65eb6c82b69.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "V_{3d}(\\alpha) \\in \\mathbb{R}^{3 \\times 35709}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": " is the 3D face vertices, " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\overline{\\boldsymbol{V}}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": " is the mean shape. " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "T_{alb}(\\alpha) \\in \\mathbb{R}^{3 \\times 35709}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": " is the albedo, " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\overline{T}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": " is the mean albedo. " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "A_{id}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "A_{exp}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "A_{alb}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": " are the face identity vector bases, the expression vector bases and the albedo vector bases, respectively. " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\alpha_{id} \\in \\mathbb{R}^{80}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\alpha_{exp} \\in \\mathbb{R}^{64}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\alpha_{alb} \\in \\mathbb{R}^{80}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": " are the identity parameter, the expression parameter and the albedo parameter, respectively. " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\alpha_{t} \\in \\mathbb{R}^{3}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": " is the translation parameter. " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\pmb{R}(\\pmb{\\alpha}_{a}) \\in \\mathbb{R}^{3 \\times 3}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": " is the rotation matrix corresponding to pitch/raw/roll angles " + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\alpha_{a} \\in \\mathbb{R}^{3}" + }, + { + "bbox": [ + 305, + 405, + 545, + 514 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 518, + 545, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 518, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 305, + 518, + 545, + 567 + ], + "type": "text", + "content": "Camera. We employ a camera with a fixed perspective projection, which is same as [11, 25]. Using this camera to re-project " + }, + { + "bbox": [ + 305, + 518, + 545, + 567 + ], + "type": "inline_equation", + "content": "V_{3d}(\\alpha)" + }, + { + "bbox": [ + 305, + 518, + 545, + 567 + ], + "type": "text", + "content": " into the 2D image plane yields " + }, + { + "bbox": [ + 305, + 518, + 545, + 567 + ], + "type": "inline_equation", + "content": "V_{2d}(\\alpha) \\in \\mathbb{R}^{2 \\times 35709}" + }, + { + "bbox": [ + 305, + 518, + 545, + 567 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 572, + 545, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 572, + 545, + 609 + ], + "spans": [ + { + "bbox": [ + 305, + 572, + 545, + 609 + ], + "type": "text", + "content": "Illumination Model. Following [14], we adopt Spherical Harmonics (SH) [40] for the estimation of the shaded texture " + }, + { + "bbox": [ + 305, + 572, + 545, + 609 + ], + "type": "inline_equation", + "content": "T_{tex}(\\alpha)" + }, + { + "bbox": [ + 305, + 572, + 545, + 609 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 344, + 617, + 545, + 646 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 617, + 545, + 646 + ], + "spans": [ + { + "bbox": [ + 344, + 617, + 545, + 646 + ], + "type": "interline_equation", + "content": "T _ {t e x} (\\boldsymbol {\\alpha}) = T _ {a l b} (\\boldsymbol {\\alpha}) \\odot \\sum_ {k = 1} ^ {9} \\boldsymbol {\\alpha} _ {s h} ^ {k} \\boldsymbol {\\Psi} _ {k} (\\boldsymbol {N}), \\tag {2}", + "image_path": "bce45b2946a230d6e3a7aa839dd79ab81f9741a1ea05a87738a6603712da46d5.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "text", + "content": " denotes the Hadamard product, " + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "text", + "content": " is the surface normal of " + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "inline_equation", + "content": "V_{3d}(\\alpha)" + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\Psi : \\mathbb{R}^3 \\to \\mathbb{R}" + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "text", + "content": " is the SH basis function and " + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\alpha_{sh} \\in \\mathbb{R}^9" + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "text", + "content": " is the corresponding SH parameter. In summary, " + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\alpha = [\\alpha_{id}, \\alpha_{\\mathrm{exp}}, \\alpha_a, \\alpha_t, \\alpha_{sh}]" + }, + { + "bbox": [ + 305, + 653, + 545, + 714 + ], + "type": "text", + "content": " is the undetermined parameter." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1674" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 264, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 264, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 264, + 85 + ], + "type": "text", + "content": "3.2. Point Transformation on the Image Plane" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "spans": [ + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": "Transforming Segmentation to 2D Points. For an input RGB face image " + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "inline_equation", + "content": "I \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": ", the prediction of a face segmentation method can be represented by a set of binary tensors " + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "inline_equation", + "content": "M = \\{M_p | p \\in P\\}" + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "inline_equation", + "content": "P = \\{\\text{left-eye, right-eye, left_eyebrow, right_eyebrow, up\\_lip, down\\_lip, nose, skin}\\}" + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "inline_equation", + "content": "M_p \\in \\{0,1\\}^{H \\times W}" + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": ". Specifically, " + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "inline_equation", + "content": "M_p^{(x,y)} = 1" + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": " only if the 2D pixel position " + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "inline_equation", + "content": "(x,y)" + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "inline_equation", + "content": "M_p" + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": " belongs to a certain face part " + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": ", and otherwise " + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "inline_equation", + "content": "M_p^{(x,y)} = 0" + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": " can be transformed into a set of point sets " + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "inline_equation", + "content": "C = \\{C_p | p \\in P\\}" + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "inline_equation", + "content": "C_p = \\{(x,y) | if M_p^{(x,y)} = 1\\}" + }, + { + "bbox": [ + 47, + 95, + 289, + 294 + ], + "type": "text", + "content": ". In this step, we employ DML-CSR [55] for face segmentation, excluding the ear regions, filtering out noise from the segmentation, and dynamically removing the forehead region above the eyebrows based on their position. This procedure is illustrated in Fig. 3(a). More implementation details are provided in the supplemental materials." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "spans": [ + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "text", + "content": "Facial Part Annotation on 3D Face Model. Our objective is to leverage " + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "inline_equation", + "content": "\\{C_p\\}" + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "text", + "content": " for guiding 3D face reconstruction. Thus, we should ensure that the reconstructed mesh can be divided into regions consistent with the semantics of the 2D segmentation. Due to the topological consistency of the face model, every vertex on the mesh can be annotated for a specific region. However, existing annotations [27, 30, 49] do not conform to widely accepted 2D face segmentation definitions [24, 32], as shown in Fig.2(c). To address this misalignment, we introduce new part annotations on both BFM [5] and FaceVerse [51]. We partition the vertices based on their indices. " + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "inline_equation", + "content": "i \\in Ind_p" + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "text", + "content": " indicates that the " + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "text", + "content": "-th vertex (denoted as " + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "text", + "content": ") on the mesh belongs to part " + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "inline_equation", + "content": "\\{Ind_p|p \\in P\\}" + }, + { + "bbox": [ + 47, + 299, + 289, + 468 + ], + "type": "text", + "content": " can be obtained by:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 97, + 475, + 287, + 494 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 475, + 287, + 494 + ], + "spans": [ + { + "bbox": [ + 97, + 475, + 287, + 494 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} I ^ {s e g} = \\operatorname {S e g} (\\operatorname {R e n d e r} (V _ {3 d}, T e x)) \\\\ i \\subset I _ {i d d} - i f, I ^ {s e g} (v) \\subset v \\end{array} , \\tag {3}", + "image_path": "2c3c9b00bba20d07149db58cf82941bedc88c109044333899627d04cde288420.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 99, + 487, + 211, + 500 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 487, + 211, + 500 + ], + "spans": [ + { + "bbox": [ + 99, + 487, + 211, + 500 + ], + "type": "interline_equation", + "content": "i \\in I n d _ {p}, i f I ^ {s e g} (\\boldsymbol {v}) \\in p", + "image_path": "d7b35b335d06cc4bfe5960af28d4e2ae64397684dba80c3cc6b0896e4ab426e6.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "spans": [ + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "inline_equation", + "content": "\\text{Render}(\\cdot)" + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "text", + "content": " generates an image by applying texture on the mesh, and " + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "inline_equation", + "content": "\\text{Seg}(\\cdot)" + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "text", + "content": " is responsible for segmenting the rendered result. We employ different shape " + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "inline_equation", + "content": "V_{3d}" + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "text", + "content": " and varying textures " + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "inline_equation", + "content": "Tex" + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "text", + "content": " to label every " + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "inline_equation", + "content": "v \\in V_{3d}" + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "text", + "content": " with hand-crafted modification. The annotation " + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "inline_equation", + "content": "\\{Ind_p\\}" + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "text", + "content": " is pre-completed offline in the training process. Consequently, we utilize " + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "inline_equation", + "content": "\\{Ind_p\\}" + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "text", + "content": " to transform the re-projection " + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "inline_equation", + "content": "V_{2d}(\\alpha)" + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "text", + "content": " into semantic point sets " + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "inline_equation", + "content": "\\{V_{2d}^p (\\alpha)|p \\in P\\}" + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "text", + "content": ". Besides, the upper forehead region situated above the eyebrows is dynamically excluded to ensure consistency with target. Points obstructed by hair are removed based on " + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "inline_equation", + "content": "\\{C_p\\}" + }, + { + "bbox": [ + 47, + 508, + 289, + 653 + ], + "type": "text", + "content": ", as shown in Fig. 3(b). Please refer to supplemental materials for annotation details." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 658, + 264, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 658, + 264, + 672 + ], + "spans": [ + { + "bbox": [ + 47, + 658, + 264, + 672 + ], + "type": "text", + "content": "3.3. Part Re-projection Distance Loss (PRDL)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "text", + "content": "This section describes the design of PRDL, focusing on constructing geometric descriptors and establishing the relation between the prediction " + }, + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\{V_{2d}^p (\\alpha)\\}" + }, + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "text", + "content": " and the ground" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "content": "truth " + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "inline_equation", + "content": "\\{C_p\\}" + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "content": " for a given " + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "inline_equation", + "content": "p \\in P" + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "content": ", which is proved instrumental for face reconstruction." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "spans": [ + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": "In a more generalized formulation, considering two point sets " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "C = \\{c_1, c_2, \\dots, c_{|C|}\\}" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "C^* = \\{c_1^*, c_2^*, \\dots, c_{|C^*|}^*\\}" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": ", we aim to establish geometry descriptions by quantifying shape alignment between them for reconstruction. " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "C^*" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " may not possess the same number of points, and their points lack correspondence. Instead of directly searching the correspondence between the two sets, we use a set of fixed points as anchors " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "A = \\{a_1, a_2, \\dots, a_{|A|}\\}" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " and a collection of statistical distance functions " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "\\mathcal{F} = \\{f_1, f_2, \\dots, f_{|\\mathcal{F}|}\\}" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " to construct geometry description tensors " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "\\Gamma(C, A, \\mathcal{F}) \\in \\mathbb{R}^{|\\mathcal{A}| \\times |\\mathcal{F}|}" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "\\Gamma(C^*, A, \\mathcal{F}) \\in \\mathbb{R}^{|\\mathcal{A}| \\times |\\mathcal{F}|}" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "C^*" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": ", respectively (denoted as " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "\\Gamma^*" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " for brevity). The value " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "\\Gamma(i, j)" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "\\Gamma^*(i, j)" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " at the position " + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "inline_equation", + "content": "(i, j)" + }, + { + "bbox": [ + 305, + 99, + 547, + 256 + ], + "type": "text", + "content": " are determined by:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 373, + 271, + 545, + 304 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 271, + 545, + 304 + ], + "spans": [ + { + "bbox": [ + 373, + 271, + 545, + 304 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{l} \\boldsymbol {\\Gamma} (i, j) = f _ {j} (\\boldsymbol {C}, \\boldsymbol {a} _ {i}) \\\\ \\boldsymbol {\\Gamma} ^ {*} (i, j) = f _ {j} (\\boldsymbol {C} ^ {*}, \\boldsymbol {a} _ {i}), \\end{array} \\right. \\tag {4}", + "image_path": "27f9841cc6f44786f4882e32e0243aaa22f21073c2d3a141a61ffaee8a251237.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 316, + 545, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 316, + 545, + 352 + ], + "spans": [ + { + "bbox": [ + 305, + 316, + 545, + 352 + ], + "type": "text", + "content": "where every function " + }, + { + "bbox": [ + 305, + 316, + 545, + 352 + ], + "type": "inline_equation", + "content": "f_{j}(\\pmb {B},\\pmb {b})\\in \\mathcal{F}" + }, + { + "bbox": [ + 305, + 316, + 545, + 352 + ], + "type": "text", + "content": " describes the distance from a single point " + }, + { + "bbox": [ + 305, + 316, + 545, + 352 + ], + "type": "inline_equation", + "content": "\\pmb{b}" + }, + { + "bbox": [ + 305, + 316, + 545, + 352 + ], + "type": "text", + "content": " to a set of points " + }, + { + "bbox": [ + 305, + 316, + 545, + 352 + ], + "type": "inline_equation", + "content": "\\pmb{B}" + }, + { + "bbox": [ + 305, + 316, + 545, + 352 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 305, + 316, + 545, + 352 + ], + "type": "inline_equation", + "content": "f_{j}(\\pmb {B},\\pmb {b})" + }, + { + "bbox": [ + 305, + 316, + 545, + 352 + ], + "type": "text", + "content": " can be any statistically meaningful distance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "spans": [ + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "text", + "content": "When fitting 3DMM to the segmented silhouettes for part " + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "text", + "content": ", we set " + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "inline_equation", + "content": "\\boldsymbol{C} = V_{2d}^{p}(\\boldsymbol{\\alpha})" + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "inline_equation", + "content": "C^* = C_p" + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "text", + "content": " with specified anchors " + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "text", + "content": " and a set of distance functions " + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "text", + "content": ". Then we calculate their corresponding geometry descriptor tensors " + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "inline_equation", + "content": "\\Gamma_p = \\Gamma(V_{2d}^p(\\boldsymbol{\\alpha}), \\boldsymbol{A}, \\mathcal{F})" + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "inline_equation", + "content": "\\Gamma_p^* = \\Gamma(C_p, \\boldsymbol{A}, \\mathcal{F})" + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "text", + "content": ". Part Re-projection Distance Loss (PRDL) " + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{prdl}" + }, + { + "bbox": [ + 305, + 354, + 547, + 427 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 359, + 441, + 545, + 462 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 441, + 545, + 462 + ], + "spans": [ + { + "bbox": [ + 359, + 441, + 545, + 462 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p r d l} = \\sum_ {p \\in P} w _ {p r d l} ^ {p} \\left\\| \\boldsymbol {\\Gamma} _ {p} - \\boldsymbol {\\Gamma} _ {p} ^ {*} \\right\\| _ {2} ^ {2}, \\tag {5}", + "image_path": "f30ada2a4020fd70d9c66cbfa0e8784dce669e7b7f166b79fba6979bd327f4bd.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "spans": [ + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "inline_equation", + "content": "w_{prdl}^p" + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "content": " is the weight of each part " + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "content": ". In this paper, we set " + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "content": " as a collection of the nearest " + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "inline_equation", + "content": "(f_{min})" + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "content": ", furthest " + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "inline_equation", + "content": "(f_{max})" + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "content": ", and average " + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "inline_equation", + "content": "(f_{ave})" + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "content": " distance, i.e. " + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "inline_equation", + "content": "\\mathcal{F} = \\{f_{max}, f_{min}, f_{ave}\\}" + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "content": ". We set " + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "content": " as a " + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "inline_equation", + "content": "H \\times W" + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "content": " mesh grid. Then for " + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "inline_equation", + "content": "\\forall \\mathbf{a}_i \\in \\mathbf{A}" + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "content": ", the optimization objective of " + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{prdl}" + }, + { + "bbox": [ + 305, + 477, + 545, + 538 + ], + "type": "text", + "content": " is to:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 321, + 552, + 545, + 591 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 552, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 321, + 552, + 545, + 591 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{l} \\min | | f _ {m i n} (\\boldsymbol {C} _ {p}, \\boldsymbol {a} _ {i}) - f _ {m i n} (V _ {2 d} ^ {p} (\\boldsymbol {\\alpha}), \\boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\\\ \\min | | f _ {m a x} (\\boldsymbol {C} _ {p}, \\boldsymbol {a} _ {i}) - f _ {m a x} (V _ {2 d} ^ {p} (\\boldsymbol {\\alpha}), \\boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\\\ \\min | | f _ {a v e} (\\boldsymbol {C} _ {p}, \\boldsymbol {a} _ {i}) - f _ {a v e} (V _ {2 d} ^ {p} (\\boldsymbol {\\alpha}), \\boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\end{array} . \\right. \\tag {6}", + "image_path": "f471427c84b6b150032d9566dfcc77b0aa5a5cb77a9323cab596b000f59b91e8.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "content": "This process is shown in Fig. 3(c). When " + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "inline_equation", + "content": "p =" + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "content": " left_eye, PRDL minimizes the length difference between the indigo and orange lines (also as shown in Fig. 6(a) when " + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "inline_equation", + "content": "p =" + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "content": " right_eybrow). The upper right corner of Fig. 3(c) is a visualization of " + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\Gamma_{left\\_eye}" + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "content": " with the last channel separately by reshaping it from " + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{|A| \\times |\\mathcal{F}|}" + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{H \\times W \\times |\\mathcal{F}|}" + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "content": ". It is worth note that, the points number in " + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "inline_equation", + "content": "V_{2d}^{p}(\\alpha)" + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "inline_equation", + "content": "C_p" + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "content": " can be reduced by using Farthest Point Sampling (FPS) [38] to decrease computational costs." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1675" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 70, + 289, + 190 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 289, + 190 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 289, + 190 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 289, + 190 + ], + "type": "image", + "image_path": "766e73ee8c9c97609c8c05e2e3524c9f09ab61c9881f32c6004b1b618c9cdf79.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 79, + 191, + 254, + 202 + ], + "lines": [ + { + "bbox": [ + 79, + 191, + 254, + 202 + ], + "spans": [ + { + "bbox": [ + 79, + 191, + 254, + 202 + ], + "type": "text", + "content": "Figure 4. Synthesize emotional expression data." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 48, + 208, + 287, + 274 + ], + "blocks": [ + { + "bbox": [ + 48, + 208, + 287, + 274 + ], + "lines": [ + { + "bbox": [ + 48, + 208, + 287, + 274 + ], + "spans": [ + { + "bbox": [ + 48, + 208, + 287, + 274 + ], + "type": "image", + "image_path": "d19110c11f631ebae74ef38ce7b33c14019e99529639079797d13881cd02ee48.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 277, + 256, + 288 + ], + "lines": [ + { + "bbox": [ + 77, + 277, + 256, + 288 + ], + "spans": [ + { + "bbox": [ + 77, + 277, + 256, + 288 + ], + "type": "text", + "content": "Figure 5. Examples of our synthetic face dataset." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 300, + 138, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 300, + 138, + 312 + ], + "spans": [ + { + "bbox": [ + 47, + 300, + 138, + 312 + ], + "type": "text", + "content": "3.4. Overall Losses" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 319, + 287, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 319, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 47, + 319, + 287, + 342 + ], + "type": "text", + "content": "To reconstruct a 3D face from image " + }, + { + "bbox": [ + 47, + 319, + 287, + 342 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 47, + 319, + 287, + 342 + ], + "type": "text", + "content": ", we build frameworks to minimize the total loss " + }, + { + "bbox": [ + 47, + 319, + 287, + 342 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 47, + 319, + 287, + 342 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 81, + 350, + 287, + 380 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 350, + 287, + 380 + ], + "spans": [ + { + "bbox": [ + 81, + 350, + 287, + 380 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} = \\lambda_ {p r d l} \\mathcal {L} _ {p r d l} + \\lambda_ {l m k} \\mathcal {L} _ {l m k} + \\lambda_ {p h o} \\mathcal {L} _ {p h o} \\tag {7} \\\\ + \\lambda_ {p e r} \\mathcal {L} _ {p e r} + \\lambda_ {r e g} \\mathcal {L} _ {r e g}, \\\\ \\end{array}", + "image_path": "ddeb8674007e2f219ac402b859427dff125843f358954ac4ac48c9597d73072e.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "spans": [ + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{lmk}" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": " is the landmark loss, we use detectors to locate 240 2D landmarks for " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{lmk}" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": " and adopt the dynamic landmark marching [57] to handle the non-correspondence between 2D and 3D cheek contour landmarks arising from pose variations. The photometric loss " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{pho}" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": " and the perceptual loss " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{per}" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": " are based on [11, 14]. " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{reg}" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": " is the regularization loss for " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\lambda_{prdl} = 0.8e - 3" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\lambda_{lmk} = 1.6e - 3" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\lambda_{pho} = 1.9" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\lambda_{per} = 0.2" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\lambda_{reg} = 3e - 4" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": " are the balance weights. " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{prdl}" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{lmk}" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": " are normalized by " + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "inline_equation", + "content": "H\\times W" + }, + { + "bbox": [ + 47, + 388, + 287, + 496 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 503, + 244, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 503, + 244, + 516 + ], + "spans": [ + { + "bbox": [ + 47, + 503, + 244, + 516 + ], + "type": "text", + "content": "3.5. Synthetic Emotional Expression Data" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": "Benefiting from recent developments in face editing research [24, 47], we can generate realistic faces through segmentation " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": ". We aim to mass-produce realistic and diverse facial expression data. To achieve this, we start by obtaining the segmentation " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": " and landmarks " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "lmk" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": " of the original image " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": " with a segmentation method [55] and a landmark detector, respectively. Leveraging the location of landmarks " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "lmk" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": ", we apply affine transformation with various patterns onto the segmentation " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": ", resulting in " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "M'" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": ". Subsequently, " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "M'" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": " is fed into the generative network [24] to produce a new facial expression image " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "I'" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": ", as depicted in Fig. 4. Based on CelebA [35] and CelebAMask-HQ [24], we have generated a dataset comprising more than " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "200K" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": " images, including expressions such as closed-eye, open-mouth, and frown, as depicted in Fig. 5. This dataset will be publicly available to facilitate research." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 72, + 402, + 165 + ], + "blocks": [ + { + "bbox": [ + 309, + 72, + 402, + 165 + ], + "lines": [ + { + "bbox": [ + 309, + 72, + 402, + 165 + ], + "spans": [ + { + "bbox": [ + 309, + 72, + 402, + 165 + ], + "type": "image", + "image_path": "5fc61e55e55d6e9c3c8976dad38e3a3e67baaf1d815ebc5ab83b4ea4584e3946.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 166, + 358, + 175 + ], + "lines": [ + { + "bbox": [ + 342, + 166, + 358, + 175 + ], + "spans": [ + { + "bbox": [ + 342, + 166, + 358, + 175 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 405, + 72, + 498, + 165 + ], + "blocks": [ + { + "bbox": [ + 405, + 72, + 498, + 165 + ], + "lines": [ + { + "bbox": [ + 405, + 72, + 498, + 165 + ], + "spans": [ + { + "bbox": [ + 405, + 72, + 498, + 165 + ], + "type": "image", + "image_path": "d6479ca4c8bb2e358c69b3f9a3551d1def43942ce23c5a305e1a506de1f537a6.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 443, + 166, + 458, + 175 + ], + "lines": [ + { + "bbox": [ + 443, + 166, + 458, + 175 + ], + "spans": [ + { + "bbox": [ + 443, + 166, + 458, + 175 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 501, + 72, + 545, + 165 + ], + "blocks": [ + { + "bbox": [ + 501, + 72, + 545, + 165 + ], + "lines": [ + { + "bbox": [ + 501, + 72, + 545, + 165 + ], + "spans": [ + { + "bbox": [ + 501, + 72, + 545, + 165 + ], + "type": "image", + "image_path": "5e1b49134b57f58610cec69f9dded9de03593de84919e22809059b2ef388c1f0.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 514, + 166, + 529, + 175 + ], + "lines": [ + { + "bbox": [ + 514, + 166, + 529, + 175 + ], + "spans": [ + { + "bbox": [ + 514, + 166, + 529, + 175 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "lines": [ + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "spans": [ + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "text", + "content": "Figure 6. (a): " + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "inline_equation", + "content": "p =" + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "text", + "content": " right_eyebrow when the closest distance " + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "inline_equation", + "content": "(f_{min})" + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "text", + "content": " is compared. (b): The gradient descent of PRDL for (a). (c): " + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "inline_equation", + "content": "\\mathbf{\\Gamma}_p^*" + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "text", + "content": " is the regression target of PRDL in " + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "inline_equation", + "content": "f_{min}" + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "text", + "content": " channel. (d): " + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "inline_equation", + "content": "M_p" + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "text", + "content": " is the regression target of renderer-based methods. " + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "inline_equation", + "content": "\\mathbf{\\Gamma}_p^*" + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "text", + "content": " is more informative than " + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "inline_equation", + "content": "M_p" + }, + { + "bbox": [ + 305, + 178, + 545, + 233 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 243, + 525, + 256 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 243, + 525, + 256 + ], + "spans": [ + { + "bbox": [ + 306, + 243, + 525, + 256 + ], + "type": "text", + "content": "4. Analysis of PRDL and Related Methods" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "text", + "content": "The Gradient of PRDL. With anchors and distance functions as the bridge, PRDL establishes the geometry descriptions of the two point sets. In Fig. 6, we take " + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "inline_equation", + "content": "p =" + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "text", + "content": " right_eyebrow as an example to analyze the gradient of PRDL. When considering " + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "inline_equation", + "content": "f_{min}" + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "text", + "content": " and a specific anchor " + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "inline_equation", + "content": "\\pmb{a}_i \\in \\pmb{A}" + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "inline_equation", + "content": "f_{min}" + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "text", + "content": " identifies " + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "inline_equation", + "content": "\\pmb{c}_m" + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "inline_equation", + "content": "\\pmb{v}_n" + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "inline_equation", + "content": "C_p" + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "inline_equation", + "content": "V_{2d}^p(\\alpha)" + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "text", + "content": ", respectively, by selecting the ones closest to " + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "inline_equation", + "content": "\\pmb{a}_i" + }, + { + "bbox": [ + 305, + 269, + 545, + 353 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 347, + 361, + 545, + 381 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 361, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 347, + 361, + 545, + 381 + ], + "type": "interline_equation", + "content": "m = \\underset {j} {\\arg \\min } \\| \\boldsymbol {a} _ {i} - \\boldsymbol {c} _ {j} \\| _ {2}, \\quad \\boldsymbol {c} _ {j} \\in C _ {p}, \\tag {8}", + "image_path": "42b59afc5ce43a2ff830a88535a2cb1066c32ff6c7c39abbb2263c3bafba4561.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 340, + 391, + 545, + 411 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 391, + 545, + 411 + ], + "spans": [ + { + "bbox": [ + 340, + 391, + 545, + 411 + ], + "type": "interline_equation", + "content": "n = \\underset {j} {\\arg \\min } \\| \\boldsymbol {a} _ {i} - \\boldsymbol {v} _ {j} \\| _ {2}, \\quad \\boldsymbol {v} _ {j} \\in V _ {2 d} ^ {p} (\\boldsymbol {\\alpha}). \\tag {9}", + "image_path": "afd0db0557c431d57cd58e4b028c38eaff4fe682b83c02d2484bb90f5fea2b33.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 415, + 545, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 415, + 545, + 439 + ], + "spans": [ + { + "bbox": [ + 306, + 415, + 545, + 439 + ], + "type": "text", + "content": "Under the definition of PRDL, the corresponding energy function " + }, + { + "bbox": [ + 306, + 415, + 545, + 439 + ], + "type": "inline_equation", + "content": "E_{i,m,n}" + }, + { + "bbox": [ + 306, + 415, + 545, + 439 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 306, + 415, + 545, + 439 + ], + "type": "inline_equation", + "content": "\\pmb{a}_i,\\pmb{c}_m" + }, + { + "bbox": [ + 306, + 415, + 545, + 439 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 306, + 415, + 545, + 439 + ], + "type": "inline_equation", + "content": "\\pmb{v}_n" + }, + { + "bbox": [ + 306, + 415, + 545, + 439 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 343, + 446, + 545, + 477 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 446, + 545, + 477 + ], + "spans": [ + { + "bbox": [ + 343, + 446, + 545, + 477 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} E _ {i, m, n} = \\left(\\left\\| \\boldsymbol {a} _ {i} - \\boldsymbol {c} _ {m} \\right\\| _ {2} - \\left\\| \\boldsymbol {a} _ {i} - \\boldsymbol {v} _ {n} \\right\\| _ {2}\\right) ^ {2} \\tag {10} \\\\ = \\left(d _ {i, m} - d _ {i, n}\\right) ^ {2}, \\\\ \\end{array}", + "image_path": "ab092de264d32a2b1d1330f45c7e18a177e3abc7e9c85a9568ba16056600c526.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 483, + 545, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 483, + 545, + 509 + ], + "spans": [ + { + "bbox": [ + 305, + 483, + 545, + 509 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 483, + 545, + 509 + ], + "type": "inline_equation", + "content": "d_{i,m} = ||\\pmb{a}_i - \\pmb{c}_m||_2, d_{i,n} = ||\\pmb{a}_i - \\pmb{v}_n||_2" + }, + { + "bbox": [ + 305, + 483, + 545, + 509 + ], + "type": "text", + "content": ". The gradient descent of " + }, + { + "bbox": [ + 305, + 483, + 545, + 509 + ], + "type": "inline_equation", + "content": "E_{i,m,n}" + }, + { + "bbox": [ + 305, + 483, + 545, + 509 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 305, + 483, + 545, + 509 + ], + "type": "inline_equation", + "content": "\\pmb{v}_n" + }, + { + "bbox": [ + 305, + 483, + 545, + 509 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 352, + 515, + 545, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 352, + 515, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 352, + 515, + 545, + 533 + ], + "type": "interline_equation", + "content": "- \\frac {\\partial E _ {i , m , n}}{\\partial \\boldsymbol {v} _ {n}} = 2 (\\boldsymbol {v} _ {n} - \\boldsymbol {a} _ {i}) \\left(\\frac {d _ {i , m}}{d _ {i , n}} - 1\\right). \\tag {11}", + "image_path": "655e381531837c4370e57c245b7b80ee22d0fa8588b59ec0d47dbd0f11be58be.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": "The physical explanation of Eqn. 11 is comprehensible and concise: the direction of " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "-\\nabla E_{i,m,n}" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": " always aligns with the line connecting " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "\\pmb{a}_i" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "\\pmb{v}_n" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": ", if " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "d_{i,n} > d_{i,m}" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": ", the direction of " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "-\\nabla E_{i,m,n}" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": " is from " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "\\pmb{v}_n" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "\\pmb{a}_i" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": " (as shown in Fig. 6(b)), and vice versa. In the context of gradient descent, the effect of " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "-\\nabla E_{i,m,n}" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": " is to make " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "d_{i,n} = d_{i,m}" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": " as much as possible. Given " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "\\pmb{A}" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "f_{min}" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": ", the gradient descent of " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{prdl}" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "inline_equation", + "content": "\\pmb{v}_n" + }, + { + "bbox": [ + 305, + 538, + 545, + 633 + ], + "type": "text", + "content": " is the aggregation of all anchors:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 358, + 639, + 545, + 682 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 639, + 545, + 682 + ], + "spans": [ + { + "bbox": [ + 358, + 639, + 545, + 682 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} - \\frac {\\partial \\mathcal {L} _ {p r d l}}{\\partial \\boldsymbol {v} _ {n}} = - w _ {p r d l} ^ {p} \\sum \\frac {\\partial E _ {i , m , n}}{\\partial \\boldsymbol {v} _ {n}} \\\\ = - w _ {p r d l} ^ {p} \\sum_ {i, m} ^ {i, m} \\nabla E _ {i, m, n}. \\tag {12} \\\\ \\end{array}", + "image_path": "7e43317f52b49f76667cef566ec7e84b768c90f65dab6fb1d4cbf0f3145db793.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": "The scenario with " + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "f_{max}" + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": " is similar to that of " + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "f_{min}" + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": ", with the only distinction lying in the selection of points. " + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "f_{max}" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1676" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 49, + 96, + 545, + 217 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 545, + 93 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 545, + 93 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 545, + 93 + ], + "type": "text", + "content": "Table 1. Quantitative comparison on Part IoU benchmark. The best and runner-up are highlighted in bold and underlined, respectively. R_eye denotes the right eye, and similar definitions for the rest are omitted." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 96, + 545, + 217 + ], + "lines": [ + { + "bbox": [ + 49, + 96, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 49, + 96, + 545, + 217 + ], + "type": "table", + "html": "
MethodsPart IoU(%)↑
R_eyeavg.± std.L_eyeavg.± std.R_browavg.± std.L_browavg.± std.Noseavg.± std.Up_lipavg.± std.Down_lipavg.± std.avg.
PRNet [13]65.87±16.3666.73±14.7461.46±15.8959.18±16.3183.34±4.5750.88±18.3558.16±17.7263.66
MGCNet [45]64.42±16.0264.81±16.9155.25±15.2961.30±15.5887.40±3.5141.16±19.7066.22±13.8362.94
Deep3D [11]71.87±12.0070.52±12.1964.66±11.3164.70±11.9887.69±3.5161.21±15.6065.95±13.0869.51
3DDFA-v2 [17]61.39±15.9857.51±18.0943.38±25.2538.85±24.3880.83±4.9250.20±17.1759.01±15.2355.88
HRN [25]73.31±11.3973.61±11.5067.91±8.2666.78±10.2790.00±2.6063.80±14.1666.40±11.9471.69
DECA [14]58.09±21.4062.56±19.4155.27±19.4951.86±19.9386.54±9.1156.39±16.9662.81±17.6661.93
Ours (w/o Lprdl)70.72±9.4475.69±10.7971.11±8.5871.69±8.7388.35±4.6057.26±15.9769.71±10.6872.08
Ours (w/o Syn. Data)73.81±10.1272.55±10.6872.24±9.2370.90±8.5588.71±4.1157.43±14.3769.87±10.5472.22
Ours74.55±11.4676.06±10.3274.00±7.7274.05±7.7089.06±3.5358.16±12.7670.86±10.3473.82
", + "image_path": "a3746a0e263eb2f59e75c06cb3803df38c8b23f75f635aa7fd8595b269610798.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 49, + 247, + 545, + 352 + ], + "blocks": [ + { + "bbox": [ + 47, + 221, + 545, + 243 + ], + "lines": [ + { + "bbox": [ + 47, + 221, + 545, + 243 + ], + "spans": [ + { + "bbox": [ + 47, + 221, + 545, + 243 + ], + "type": "text", + "content": "Table 2. Quantitative comparison on Realty benchmark. Lower values indicate better results. The best and runner-up are highlighted in bold and underlined, respectively." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 247, + 545, + 352 + ], + "lines": [ + { + "bbox": [ + 49, + 247, + 545, + 352 + ], + "spans": [ + { + "bbox": [ + 49, + 247, + 545, + 352 + ], + "type": "table", + "html": "
MethodsFrontal-view (mm) ↓Side-view (mm) ↓
Nose avg.± std.Mouth avg.± std.Forehead avg.± std.Cheek avg.± std.avg.Nose avg.± std.Mouth avg.± std.Forehead avg.± std.Cheek avg.± std.avg.
PRNet [13]1.923±0.5181.838±0.6372.429±0.5881.863±0.6982.0131.868±0.5101.856±0.6072.445±0.5701.960±0.7312.032
MGCNet [45]1.771±0.3801.417±0.4092.268±0.5031.639±0.6501.7741.827±0.3831.409±0.4182.248±0.5081.665±0.6441.787
Deep3D[11]1.719±0.3541.368±0.4392.015±0.4491.528±0.5011.6571.749±0.3431.411±0.3952.074±0.4861.528±0.5171.691
3DDFA-v2 [17]1.903±0.5171.597±0.4782.447±0.6471.757±0.6421.9261.883±0.4991.642±0.5012.465±0.6221.781±0.6361.943
HRN [25]1.722±0.3301.357±0.5231.995±0.4761.072±0.3331.5371.642±0.3101.285±0.5281.906±0.4791.038±0.3221.468
DECA [14]1.694±0.3552.516±0.8392.394±0.5761.479±0.5352.0101.903±1.0502.472±1.0792.423±0.7201.630±1.1352.107
Ours (w/o Lpr dl)1.671±0.3321.460±0.4742.001±0.4281.142±0.3151.5681.665±0.3491.297±0.4002.016±0.4481.134±0.3421.528
Ours (w/o Syn. Data)1.592±0.3271.339±0.4331.823±0.4071.119±0.3321.4681.628±0.3201.229±0.4331.872±0.4071.091±0.3121.455
Ours1.586±0.3061.238±0.3731.810±0.3941.111±0.3271.4361.623±0.3131.205±0.3661.864±0.4241.076±0.3151.442
", + "image_path": "e263c5edec0776988387ee454347fd06794f38b88a204b5793f26c84d935602f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "spans": [ + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "type": "text", + "content": "also has the capability to constrain " + }, + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "type": "inline_equation", + "content": "V_{2d}^{p}(\\alpha)" + }, + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "type": "text", + "content": " within the confines of " + }, + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "type": "inline_equation", + "content": "C_p" + }, + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "type": "inline_equation", + "content": "f_{ave}" + }, + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "type": "text", + "content": " acts on the entire " + }, + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "type": "inline_equation", + "content": "V_{2d}^{p}(\\alpha)" + }, + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "type": "text", + "content": ", striving to bring its centroid as close as possible to the centroid of " + }, + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "type": "inline_equation", + "content": "C_p" + }, + { + "bbox": [ + 46, + 360, + 287, + 445 + ], + "type": "text", + "content": ". The introduction of additional anchors and the integration of diverse statistical distances in PRDL prevent the optimization from local optima and provide sufficient geometric signals. Please refer to supplementary materials for more details." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "text", + "content": "PRDL vs. Renderer-Based Loss: An intuitive approach for fitting segmentation is to use the renderer-based IoU loss, where differentiable silhouette renderers play a crucial role. Consequently, we delve into the distinctions between PRDL and renderers. We can reshape " + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "inline_equation", + "content": "\\Gamma_p^*" + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{|A| \\times |\\mathcal{F}|} \\to \\mathbb{R}^{H \\times W \\times |\\mathcal{F}|}" + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "text", + "content": ") to visualize it with the last channel separately. Fig. 6(c) illustrates the visualization of the " + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "inline_equation", + "content": "f_{min}" + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "text", + "content": " channel for " + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "inline_equation", + "content": "p =" + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "text", + "content": " right_eyebrow, while Fig. 6(d) represents the silhouette rendered by [33] or [8]. In comparison with the regression target " + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "inline_equation", + "content": "M_p" + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "text", + "content": " utilized in renderer-based methods, " + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "inline_equation", + "content": "\\Gamma_p^*" + }, + { + "bbox": [ + 46, + 455, + 287, + 601 + ], + "type": "text", + "content": " in PRDL is more informative and more conducive to fitting. Please refer to supplementary materials for more details." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "content": "Furthermore, considering existing theoretical analyses [8, 22, 56], PRDL exhibits several notable advantages. First, in these renderers, all triangles constituting the object influence every pixel within the silhouettes, making it intricate to isolate specific geometric features. In contrast, " + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "inline_equation", + "content": "f_{min}" + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "inline_equation", + "content": "f_{max}" + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "content": " in PRDL matches the nearest or furthest point on the object, allowing for a more straightforward measurement of the shape's boundary characteristics. Secondly, these renderers either neglect pixels outside any triangles of" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 360, + 545, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 360, + 545, + 480 + ], + "spans": [ + { + "bbox": [ + 304, + 360, + 545, + 480 + ], + "type": "text", + "content": "the 3D object or assign minimal weights to them, emphasizing the rendered object region. However, this operation is equivalent to selectively choosing anchors " + }, + { + "bbox": [ + 304, + 360, + 545, + 480 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 304, + 360, + 545, + 480 + ], + "type": "text", + "content": " in the interior of the rendered shape, while the external anchors are either not chosen or treated differently by assigning small weights, thereby diminishing descriptive power. In Eqn. 11, Eqn. 12 and Fig. 6(b), we have analyzed that external anchors play a significant role in the fitting process. Ablation study (Fig.8) also proves that PRDL is more effective than renderer-based methods like [8, 33, 56]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 491, + 388, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 491, + 388, + 504 + ], + "spans": [ + { + "bbox": [ + 306, + 491, + 388, + 504 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 510, + 432, + 523 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 510, + 432, + 523 + ], + "spans": [ + { + "bbox": [ + 306, + 510, + 432, + 523 + ], + "type": "text", + "content": "5.1. Experimental Settings" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 534, + 545, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 545, + 583 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 545, + 583 + ], + "type": "text", + "content": "Reconstruction Frameworks. We implement PRDL based on PyTorch [39] and PyTorch3D [42]. We use ResNet-50 [18] as the backbone to predict " + }, + { + "bbox": [ + 304, + 534, + 545, + 583 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 534, + 545, + 583 + ], + "type": "text", + "content": ". The input image is cropped and aligned by [10], and resized into " + }, + { + "bbox": [ + 304, + 534, + 545, + 583 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 304, + 534, + 545, + 583 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 587, + 545, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 587, + 545, + 660 + ], + "spans": [ + { + "bbox": [ + 304, + 587, + 545, + 660 + ], + "type": "text", + "content": "Data. The face images are from publicly available datasets: Dad-3dheads [37], CelebA [35], RAF-ML [28], RAF-DB [29] and 300W [43]. Our synthetic images are mainly from [24, 35]. We use [58] for face pose augmentation. In total, our training data contained about " + }, + { + "bbox": [ + 304, + 587, + 545, + 660 + ], + "type": "inline_equation", + "content": "600K" + }, + { + "bbox": [ + 304, + 587, + 545, + 660 + ], + "type": "text", + "content": " face images. We employ DML-CSR [55] to predict 2D face segmentation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 665, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 715 + ], + "type": "text", + "content": "Implementation Details. Considering the inherent feature of 2D segmentation, if part " + }, + { + "bbox": [ + 304, + 665, + 545, + 715 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 665, + 545, + 715 + ], + "type": "text", + "content": " of a face is invisible or occluded, it may lead to " + }, + { + "bbox": [ + 304, + 665, + 545, + 715 + ], + "type": "inline_equation", + "content": "C_p = \\varnothing" + }, + { + "bbox": [ + 304, + 665, + 545, + 715 + ], + "type": "text", + "content": ". In such a situation during training, we set " + }, + { + "bbox": [ + 304, + 665, + 545, + 715 + ], + "type": "inline_equation", + "content": "w_{prdl}^p = 0" + }, + { + "bbox": [ + 304, + 665, + 545, + 715 + ], + "type": "text", + "content": " for these samples. We use Adam" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1677" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 72, + 545, + 423 + ], + "blocks": [ + { + "bbox": [ + 49, + 72, + 545, + 423 + ], + "lines": [ + { + "bbox": [ + 49, + 72, + 545, + 423 + ], + "spans": [ + { + "bbox": [ + 49, + 72, + 545, + 423 + ], + "type": "image", + "image_path": "3c56e60d7b03eb66c8ea5c6a50b08deb753cc6e193d82074e49255858a1239de.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 426, + 539, + 437 + ], + "lines": [ + { + "bbox": [ + 52, + 426, + 539, + 437 + ], + "spans": [ + { + "bbox": [ + 52, + 426, + 539, + 437 + ], + "type": "text", + "content": "Figure 7. Qualitative comparison with the other methods. Our method achieves realistic reconstructions, particularly in the eye region." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 445, + 287, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 445, + 287, + 506 + ], + "spans": [ + { + "bbox": [ + 46, + 445, + 287, + 506 + ], + "type": "text", + "content": "[23] as the optimizer with an initial learning rate of " + }, + { + "bbox": [ + 46, + 445, + 287, + 506 + ], + "type": "inline_equation", + "content": "1e - 4" + }, + { + "bbox": [ + 46, + 445, + 287, + 506 + ], + "type": "text", + "content": ". We use Farthest Point Sampling (FPS) [38] to reduce the point number of " + }, + { + "bbox": [ + 46, + 445, + 287, + 506 + ], + "type": "inline_equation", + "content": "V_{2d}^{skin}(\\alpha)" + }, + { + "bbox": [ + 46, + 445, + 287, + 506 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 445, + 287, + 506 + ], + "type": "inline_equation", + "content": "C_{skin}" + }, + { + "bbox": [ + 46, + 445, + 287, + 506 + ], + "type": "text", + "content": " to 3000, reducing computational consumption. Please refer to supplemental materials for more details." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 517, + 103, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 517, + 103, + 529 + ], + "spans": [ + { + "bbox": [ + 47, + 517, + 103, + 529 + ], + "type": "text", + "content": "5.2. Metric" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 538, + 288, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 538, + 288, + 694 + ], + "spans": [ + { + "bbox": [ + 46, + 538, + 288, + 694 + ], + "type": "text", + "content": "In various VR/AR applications, 3DMMs are crucial in capturing facial motions or providing fine-grained regions covering facial features. One crucial objective in such applications is to ensure the alignment of overlapping facial parts between prediction and input. Widely used benchmarks [7, 44] typically rely on the 3D accuracy performance of reconstructions. However, there are instances where inconsistencies arise between 3D errors and 2D alignments. As shown in Fig.2(b), comparing with 3DDFA-v2 [17], DECA [14] have better 2D eye region overlapping IoU (70.29% vs. 39.37%) but a higher 3D forehead error (1.88mm vs. 1.75mm). To address this, we introduce Part IoU to emphasize the performance of overlap." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "type": "text", + "content": "Part IoU is a new benchmark to quantify how well the part" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "spans": [ + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "text", + "content": "reconstruction " + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "inline_equation", + "content": "V_{3d}^{p}(\\alpha)" + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "text", + "content": " aligns with their corresponding parts from the original face. The core idea is to measure the overlap of facial components between the reconstruction and the original image using IoU. The ground truth is a binary tensor " + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "inline_equation", + "content": "\\{M_p\\}" + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "text", + "content": " (as defined above). We render " + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "inline_equation", + "content": "V_{3d}(\\alpha)" + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "text", + "content": " with a mean texture as an image, generate the predicted segmentation " + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "inline_equation", + "content": "\\{M_p^{pred}\\}" + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "text", + "content": " with [55]. The use of mean texture focuses the metric more on overlap effects than other factors, making it applicable to methods without texture-fitting [13, 17]. Part IoU " + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "inline_equation", + "content": "IoU_p" + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "text", + "content": " of part " + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 445, + 545, + 566 + ], + "type": "text", + "content": " can be obtained by:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 367, + 569, + 545, + 585 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 569, + 545, + 585 + ], + "spans": [ + { + "bbox": [ + 367, + 569, + 545, + 585 + ], + "type": "interline_equation", + "content": "I o U _ {p} = I o U \\left(M _ {p} ^ {p r e d}, M _ {p}\\right). \\tag {13}", + "image_path": "9fa9181041c30e5b15f836585d260fe722e178c936405ebf022432267d0d8448.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 588, + 545, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 588, + 545, + 658 + ], + "spans": [ + { + "bbox": [ + 304, + 588, + 545, + 658 + ], + "type": "text", + "content": "MEAD [50] is an emotional talking-face dataset. We test Part IoU by selecting 10 individuals from MEAD, each contributing 50 random different images. Part IoU measures the overlap performance between each part of the reconstruction and the ground truth. More detail is in the supplemental materials." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "REALY [7] benchmark consists of 100 scanned neutral expression faces, which are divided into four parts: nose, mouth, forehead (eyes and eyebrows), and cheek for 3D alignment and distance error calculation." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1678" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 71, + 97, + 167 + ], + "blocks": [ + { + "bbox": [ + 49, + 71, + 97, + 167 + ], + "lines": [ + { + "bbox": [ + 49, + 71, + 97, + 167 + ], + "spans": [ + { + "bbox": [ + 49, + 71, + 97, + 167 + ], + "type": "image", + "image_path": "335aac99e095e85b2b5f743ce9f2f159e659c4351b9b50c58e35d9730dbb8379.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 167, + 80, + 173 + ], + "lines": [ + { + "bbox": [ + 66, + 167, + 80, + 173 + ], + "spans": [ + { + "bbox": [ + 66, + 167, + 80, + 173 + ], + "type": "text", + "content": "Input" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 47, + 175, + 286, + 197 + ], + "lines": [ + { + "bbox": [ + 47, + 175, + 286, + 197 + ], + "spans": [ + { + "bbox": [ + 47, + 175, + 286, + 197 + ], + "type": "text", + "content": "Figure 8. Comparison with the renderer-based geometric guidance of segmentation." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 97, + 71, + 137, + 167 + ], + "blocks": [ + { + "bbox": [ + 97, + 71, + 137, + 167 + ], + "lines": [ + { + "bbox": [ + 97, + 71, + 137, + 167 + ], + "spans": [ + { + "bbox": [ + 97, + 71, + 137, + 167 + ], + "type": "image", + "image_path": "532f7c06be93b30dd9daf6d2f4933e89bd0f6a0c7c3c133dbf13f218c668edba.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 167, + 129, + 172 + ], + "lines": [ + { + "bbox": [ + 111, + 167, + 129, + 172 + ], + "spans": [ + { + "bbox": [ + 111, + 167, + 129, + 172 + ], + "type": "text", + "content": "SoftRas" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 151, + 71, + 184, + 167 + ], + "blocks": [ + { + "bbox": [ + 151, + 71, + 184, + 167 + ], + "lines": [ + { + "bbox": [ + 151, + 71, + 184, + 167 + ], + "spans": [ + { + "bbox": [ + 151, + 71, + 184, + 167 + ], + "type": "image", + "image_path": "cb36a202880902ae1a564cbed054916650bc8838671697dae42b18e61c55bcc7.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 160, + 167, + 176, + 173 + ], + "lines": [ + { + "bbox": [ + 160, + 167, + 176, + 173 + ], + "spans": [ + { + "bbox": [ + 160, + 167, + 176, + 173 + ], + "type": "text", + "content": "DIB-R" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 196, + 71, + 230, + 167 + ], + "blocks": [ + { + "bbox": [ + 196, + 71, + 230, + 167 + ], + "lines": [ + { + "bbox": [ + 196, + 71, + 230, + 167 + ], + "spans": [ + { + "bbox": [ + 196, + 71, + 230, + 167 + ], + "type": "image", + "image_path": "aafd61dbd91e68cfd655195d7c0a665dc81caf9ec3972a2060c317920a56085a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 206, + 167, + 222, + 173 + ], + "lines": [ + { + "bbox": [ + 206, + 167, + 222, + 173 + ], + "spans": [ + { + "bbox": [ + 206, + 167, + 222, + 173 + ], + "type": "text", + "content": "ReDA" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 245, + 71, + 285, + 167 + ], + "blocks": [ + { + "bbox": [ + 245, + 71, + 285, + 167 + ], + "lines": [ + { + "bbox": [ + 245, + 71, + 285, + 167 + ], + "spans": [ + { + "bbox": [ + 245, + 71, + 285, + 167 + ], + "type": "image", + "image_path": "8e806851a899441bf975b9e6061cb36ba18c5f5048dcb0ddf458e37cb60dcf1a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 253, + 167, + 269, + 173 + ], + "lines": [ + { + "bbox": [ + 253, + 167, + 269, + 173 + ], + "spans": [ + { + "bbox": [ + 253, + 167, + 269, + 173 + ], + "type": "text", + "content": "PRDL" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 201, + 182, + 215 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 201, + 182, + 215 + ], + "spans": [ + { + "bbox": [ + 47, + 201, + 182, + 215 + ], + "type": "text", + "content": "5.3. Qualitative Comparison" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 220, + 287, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 220, + 287, + 340 + ], + "spans": [ + { + "bbox": [ + 46, + 220, + 287, + 340 + ], + "type": "text", + "content": "We conduct a comprehensive evaluation of our method with the state-of-the-art approaches, including PRNet [13], MGCNet [45], Deep3D [11], 3DDFA-V2 [17], HRN [25] and DECA [14]. The visualization of HRN and DECA uses the mid-frequency details and coarse shape (denoted as HRN-m and DECA-c) since their further steps only change the renderer's normal map, while no 3D refinement is made. As shown in Fig. 7, our results excel in capturing extreme expressions, even better than HRN-m which has fine reconstruction steps." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 346, + 189, + 360 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 346, + 189, + 360 + ], + "spans": [ + { + "bbox": [ + 47, + 346, + 189, + 360 + ], + "type": "text", + "content": "5.4.Quantitative Comparison" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": "On both the Part IoU and REALY [7] benchmarks, our results outperforms the existing state-of-the-art methods. As shown in Tab. 1, our method is almost always the highest overlap IoU across various facial parts with " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "73.82\\%" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " total average, demonstrating PRDL enhances the part alignment of reconstruction. PRDL also performs the best average 3D error on the REALY benchmark (1.436mm in frontal-view and 1.442mm in side-view), as shown in Tab. 2." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 468, + 140, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 468, + 140, + 481 + ], + "spans": [ + { + "bbox": [ + 47, + 468, + 140, + 481 + ], + "type": "text", + "content": "5.5. Ablation Study" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 492, + 287, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 492, + 287, + 599 + ], + "spans": [ + { + "bbox": [ + 46, + 492, + 287, + 599 + ], + "type": "text", + "content": "Ablation for PRDL and Synthetic Data. We conduct quantitative ablation experiments for PRDL and synthetic data on REALY and Part IoU. As depicted in Table 1 and Table 2, only introducing PRDL already yields superior results compared to all other methods (72.22%, 1.468mm, and 1.455mm). Introducing synthetic data without PRDL demonstrates a significant improvement in Part IoU, but not as effectively as PRDL (72.08% vs. 72.22%). Using both synthetic data and PRDL could lead to the best result." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": "Compare with the Differentiable Silhouette Renderers. SoftRas [33] and DIB-R [8] are the two most widely used renderers, which serve as the basis for PyTorch3D [42] and Kaolin [15], respectively. Based on the image-fitting framework [1], we use them to render a silhouette of each face part and calculate the IoU loss with the ground truth. ReDA [56] is also a renderer-based method using the geometric guidance of segmentation. Fig.8 shows that PRDL is significantly better than these methods. It is essential to em" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 308, + 71, + 350, + 150 + ], + "blocks": [ + { + "bbox": [ + 308, + 71, + 350, + 150 + ], + "lines": [ + { + "bbox": [ + 308, + 71, + 350, + 150 + ], + "spans": [ + { + "bbox": [ + 308, + 71, + 350, + 150 + ], + "type": "image", + "image_path": "7dae62bebef5ed7d9b06c30517d6279b3cf317fdb955a559770a77ff028362e3.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 322, + 152, + 334, + 159 + ], + "lines": [ + { + "bbox": [ + 322, + 152, + 334, + 159 + ], + "spans": [ + { + "bbox": [ + 322, + 152, + 334, + 159 + ], + "type": "text", + "content": "Input" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 350, + 152, + 385, + 158 + ], + "lines": [ + { + "bbox": [ + 350, + 152, + 385, + 158 + ], + "spans": [ + { + "bbox": [ + 350, + 152, + 385, + 158 + ], + "type": "text", + "content": "Chamfer Distance" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 163, + 545, + 184 + ], + "lines": [ + { + "bbox": [ + 306, + 163, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 306, + 163, + 545, + 184 + ], + "type": "text", + "content": "Figure 9. Comparison with the other point-driven-based geometric guidance of segmentation." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 350, + 71, + 403, + 149 + ], + "blocks": [ + { + "bbox": [ + 350, + 71, + 403, + 149 + ], + "lines": [ + { + "bbox": [ + 350, + 71, + 403, + 149 + ], + "spans": [ + { + "bbox": [ + 350, + 71, + 403, + 149 + ], + "type": "image", + "image_path": "aa40965bc2b915017141f8e4086f6c645fead550126d0fd7cae293b49063d06f.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 391, + 152, + 422, + 157 + ], + "lines": [ + { + "bbox": [ + 391, + 152, + 422, + 157 + ], + "spans": [ + { + "bbox": [ + 391, + 152, + 422, + 157 + ], + "type": "text", + "content": "Density Aware" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 408, + 71, + 460, + 149 + ], + "blocks": [ + { + "bbox": [ + 408, + 71, + 460, + 149 + ], + "lines": [ + { + "bbox": [ + 408, + 71, + 460, + 149 + ], + "spans": [ + { + "bbox": [ + 408, + 71, + 460, + 149 + ], + "type": "image", + "image_path": "ff57dc0994d61bd7f37a4b027bebeb3b5e95d186a16772797141a3dad3b7eb54.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 457, + 152, + 487, + 158 + ], + "lines": [ + { + "bbox": [ + 457, + 152, + 487, + 158 + ], + "spans": [ + { + "bbox": [ + 457, + 152, + 487, + 158 + ], + "type": "text", + "content": "ICP" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 462, + 71, + 496, + 149 + ], + "blocks": [ + { + "bbox": [ + 462, + 71, + 496, + 149 + ], + "lines": [ + { + "bbox": [ + 462, + 71, + 496, + 149 + ], + "spans": [ + { + "bbox": [ + 462, + 71, + 496, + 149 + ], + "type": "image", + "image_path": "fe60e4798093524f74c52637a4cecbb3c65bbc778834bb35585115f8b99b6660.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 496, + 71, + 542, + 149 + ], + "blocks": [ + { + "bbox": [ + 496, + 71, + 542, + 149 + ], + "lines": [ + { + "bbox": [ + 496, + 71, + 542, + 149 + ], + "spans": [ + { + "bbox": [ + 496, + 71, + 542, + 149 + ], + "type": "image", + "image_path": "c66e5da9aec31e8ea104677bc7f911f2b4c293c398c8c44524427de817bbe9ef.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 517, + 152, + 530, + 158 + ], + "lines": [ + { + "bbox": [ + 517, + 152, + 530, + 158 + ], + "spans": [ + { + "bbox": [ + 517, + 152, + 530, + 158 + ], + "type": "text", + "content": "PRDL" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "bbox": [ + 306, + 190, + 545, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 190, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 306, + 190, + 545, + 215 + ], + "type": "text", + "content": "phasize that all the results in Fig.8 and Fig.9 do not include " + }, + { + "bbox": [ + 306, + 190, + 545, + 215 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{lmk}" + }, + { + "bbox": [ + 306, + 190, + 545, + 215 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 306, + 190, + 545, + 215 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{pho}" + }, + { + "bbox": [ + 306, + 190, + 545, + 215 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 306, + 190, + 545, + 215 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{per}" + }, + { + "bbox": [ + 306, + 190, + 545, + 215 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 305, + 220, + 545, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 220, + 545, + 471 + ], + "spans": [ + { + "bbox": [ + 305, + 220, + 545, + 471 + ], + "type": "text", + "content": "Compare with the Other Point-Driven Optimization Methods. One of the key insights of PRDL is transforming segmentation into points. Thus the 3DMM fitting becomes an optimization of two 2D point clouds until they share the same geometry. While an intuitive idea is incorporating the point-driven optimization methods like iterative closest points (ICP) [2-4] or chamfer distance [53], these methods are predominantly rooted in nearest-neighbor principles, and solely opting for the minimum distance potentially leads to local optima. We compare PRDL with ICP [54], chamfer distance and density aware chamfer distance [53] based on [1]. Since the ICP distance can be calculated from target to prediction or vice versa, we provide both methods. As depicted in Fig.9, PRDL outperforms other methods, producing outputs that align more accurately with the desired geometry. This superiority is attributed to the use of additional anchors and diverse statistical distances in PRDL. Referring to Fig.8 and Fig.9, PRDL stands out as the only loss capable of reconstructing effective results when the segmentation information is used independently. More comparison is in the supplemental materials." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 306, + 483, + 383, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 483, + 383, + 495 + ], + "spans": [ + { + "bbox": [ + 306, + 483, + 383, + 495 + ], + "type": "text", + "content": "6. Conclusions" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 305, + 502, + 545, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 502, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 305, + 502, + 545, + 611 + ], + "type": "text", + "content": "This paper proposes a novel Part Re-projection Distance Loss (PRDL) to reconstruct 3D faces with the geometric guidance of facial part segmentation. Analysis proves that PRDL is superior to renderer-based and other point-driven optimization methods. We also provide a new emotional face expression dataset and a new 3D mesh part annotation to facilitate studies. Experiments further highlight the state-of-the-art performance of PRDL in achieving high-fidelity and better part alignment in 3D face reconstruction." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 306, + 621, + 403, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 621, + 403, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 621, + 403, + 635 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "text", + "content": "This work was supported in part by Chinese National Natural Science Foundation Projects 62176256, U23B2054, 62276254, 62206280, the Beijing Science and Technology Plan Project Z231100005923033, Beijing Natural Science Foundation L221013, the Youth Innovation Promotion Association CAS Y2021131 and InnoHK program." + } + ] + } + ], + "index": 34 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1679" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 91, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 49, + 91, + 285, + 113 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 91, + 285, + 113 + ], + "spans": [ + { + "bbox": [ + 49, + 91, + 285, + 113 + ], + "type": "text", + "content": "[1] 3dmm model fitting using pytorch. https://github.com/ascust/3DMM-Fitting-Pytorch, 2021.8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 115, + 287, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 115, + 287, + 158 + ], + "spans": [ + { + "bbox": [ + 49, + 115, + 287, + 158 + ], + "type": "text", + "content": "[2] Brian Amberg, Sami Romdhani, and Thomas Vetter. Optimal step nonrigid icp algorithms for surface registration. In 2007 IEEE conference on computer vision and pattern recognition, pages 1-8. IEEE, 2007. 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 160, + 287, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 160, + 287, + 193 + ], + "spans": [ + { + "bbox": [ + 49, + 160, + 287, + 193 + ], + "type": "text", + "content": "[3] K. S. Arun, T. S. Huang, and S. D. Blostein. Least-squares fitting of two 3-d point sets. IEEE Transactions on Pattern Analysis and Machine Intelligence, PAMI-9(5):698-700, 1987." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 195, + 287, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 195, + 287, + 227 + ], + "spans": [ + { + "bbox": [ + 49, + 195, + 287, + 227 + ], + "type": "text", + "content": "[4] P.J. Besl and Neil D. McKay. A method for registration of 3-d shapes. IEEE Transactions on Pattern Analysis and Machine Intelligence, 14(2):239-256, 1992. 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 229, + 287, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 229, + 287, + 273 + ], + "spans": [ + { + "bbox": [ + 49, + 229, + 287, + 273 + ], + "type": "text", + "content": "[5] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 187-194, 1999. 1, 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 275, + 287, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 275, + 287, + 308 + ], + "spans": [ + { + "bbox": [ + 49, + 275, + 287, + 308 + ], + "type": "text", + "content": "[6] Volker Blanz and Thomas Vetter. Face recognition based on fitting a 3d morphable model. IEEE Transactions on pattern analysis and machine intelligence, 25(9):1063-1074, 2003. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 309, + 287, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 309, + 287, + 374 + ], + "spans": [ + { + "bbox": [ + 49, + 309, + 287, + 374 + ], + "type": "text", + "content": "[7] Zenghao Chai, Haoxian Zhang, Jing Ren, Di Kang, Zhengzhuo Xu, Xuefei Zhe, Chun Yuan, and Linchao Bao. Really: Rethinking the evaluation of 3d face reconstruction. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part VIII, pages 74-92. Springer, 2022. 2, 7, 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 376, + 287, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 376, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 49, + 376, + 287, + 430 + ], + "type": "text", + "content": "[8] Wenzheng Chen, Huan Ling, Jun Gao, Edward Smith, Jaakko Lehtinen, Alec Jacobson, and Sanja Fidler. Learning to predict 3d objects with an interpolation-based differentiable renderer. Advances in neural information processing systems, 32, 2019. 3, 6, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 434, + 287, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 434, + 287, + 486 + ], + "spans": [ + { + "bbox": [ + 49, + 434, + 287, + 486 + ], + "type": "text", + "content": "[9] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4690-4699, 2019. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 489, + 287, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 489, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 49, + 489, + 287, + 521 + ], + "type": "text", + "content": "[10] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In CVPR, 2020. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 524, + 287, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 524, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 49, + 524, + 287, + 588 + ], + "type": "text", + "content": "[11] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops, pages 0–0, 2019. 1, 2, 3, 5, 6, 8" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 591, + 287, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 591, + 287, + 655 + ], + "spans": [ + { + "bbox": [ + 49, + 591, + 287, + 655 + ], + "type": "text", + "content": "[12] Bernhard Egger, Sandro Schonborn, Andreas Schneider, Adam Kortylewski, Andreas Morel-Forster, Clemens Blumer, and Thomas Vetter. Occlusion-aware 3d morphable models and an illumination prior for face image analysis. International Journal of Computer Vision, 126:1269-1287, 2018. 1, 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 49, + 658, + 287, + 712 + ], + "type": "text", + "content": "[13] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In Proceedings of the European conference on computer vision (ECCV), pages 534-551, 2018. 6, 7, 8" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 308, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 308, + 73, + 545, + 106 + ], + "type": "text", + "content": "[14] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3D face model from in-the-wild images. 2021. 1, 2, 3, 5, 6, 7, 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 108, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 108, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 308, + 108, + 545, + 205 + ], + "type": "text", + "content": "[15] Clement Fuji Tsang, Maria Shugrina, Jean Francois Lafleche, Towaki Takikawa, Jiehan Wang, Charles Loop, Wenzheng Chen, Krishna Murthy Jatavallabhula, Edward Smith, Artem Rozantsev, Or Perel, Tianchang Shen, Jun Gao, Sanja Fidler, Gavriel State, Jason Gorski, Tommy Xiang, Jianing Li, Michael Li, and Rev Lebaredian. Kaolin: A pytorch library for accelerating 3d deep learning research. https://github.com/NVIDIAGames/kaolin, 2022.2,3,8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 208, + 545, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 208, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 308, + 208, + 545, + 262 + ], + "type": "text", + "content": "[16] Kyle Genova, Forrester Cole, Aaron Maschinot, Aaron Sarna, Daniel Vlasic, and William T Freeman. Unsupervised training for 3d morphable model regression. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8377-8386, 2018. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 264, + 545, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 264, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 308, + 264, + 545, + 296 + ], + "type": "text", + "content": "[17] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. pages 152-168, 2020. 1, 2, 3, 6, 7, 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 298, + 545, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 298, + 545, + 341 + ], + "spans": [ + { + "bbox": [ + 308, + 298, + 545, + 341 + ], + "type": "text", + "content": "[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 343, + 545, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 343, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 308, + 343, + 545, + 396 + ], + "type": "text", + "content": "[19] Yueying Kao, Bowen Pan, Miao Xu, Jiangjing Lyu, Xiangyu Zhu, Yuanzhang Chang, Xiaobo Li, and Zhen Lei. Toward 3d face reconstruction in perspective projection: Estimating 6dof face pose from monocular image. IEEE Transactions on Image Processing, 32:3080-3091, 2023. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 399, + 545, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 399, + 545, + 441 + ], + "spans": [ + { + "bbox": [ + 308, + 399, + 545, + 441 + ], + "type": "text", + "content": "[20] Yury Kartynnik, Artsiom Ablavatski, Ivan Grishchenko, and Matthias Grundmann. Real-time facial surface geometry from monocular video on mobile gpus. arXiv preprint arXiv:1907.06724, 2019. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 445, + 545, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 445, + 545, + 486 + ], + "spans": [ + { + "bbox": [ + 308, + 445, + 545, + 486 + ], + "type": "text", + "content": "[21] Hiroharu Kato, Yoshitaka Ushiku, and Tatsuya Harada. Neural 3d mesh renderer. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3907-3916, 2018. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 489, + 545, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 489, + 545, + 532 + ], + "spans": [ + { + "bbox": [ + 308, + 489, + 545, + 532 + ], + "type": "text", + "content": "[22] Hiroharu Kato, Deniz Beker, Mihai Morariu, Takahiro Ando, Toru Matsuoka, Wadim Kehl, and Adrien Gaidon. Differentiable rendering: A survey. arXiv preprint arXiv:2006.12057, 2020. 2, 3, 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 535, + 545, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 535, + 545, + 566 + ], + "spans": [ + { + "bbox": [ + 308, + 535, + 545, + 566 + ], + "type": "text", + "content": "[23] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 569, + 545, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 569, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 308, + 569, + 545, + 611 + ], + "type": "text", + "content": "[24] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 614, + 545, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 614, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 308, + 614, + 545, + 677 + ], + "type": "text", + "content": "[25] Biwen Lei, Jianqiang Ren, Mengyang Feng, Miaomiao Cui, and Xuansong Xie. A hierarchical representation network for accurate and detailed face reconstruction from in-the-wild images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 394-403, 2023. 1, 2, 3, 6, 8" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 681, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 681, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 681, + 545, + 712 + ], + "type": "text", + "content": "[26] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. To fit or not to fit: Model-based face reconstruction and occlusion segmentation from" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "type": "text", + "content": "1680" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 63, + 72, + 286, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 72, + 286, + 94 + ], + "spans": [ + { + "bbox": [ + 63, + 72, + 286, + 94 + ], + "type": "text", + "content": "weak supervision. arXiv preprint arXiv:2106.09614, 2021. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "text", + "content": "[27] Ruilong Li, Karl Bladin, Yajie Zhao, Chinmay Chinara, Owen Ingraham, Pengda Xiang, Xinglei Ren, Pratusha Prasad, Bipin Kishore, Jun Xing, and Hao Li. Learning formation of physically-based face attributes. 2020. 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 185 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 185 + ], + "type": "text", + "content": "[28] Shan Li and Weihong Deng. Blended emotion in-the-wild: Multi-label facial expression recognition using crowdsourced annotations and deep locality feature learning. International Journal of Computer Vision, 127(6-7):884–906, 2019. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 287, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 287, + 230 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 287, + 230 + ], + "type": "text", + "content": "[29] Shan Li and Weihong Deng. Reliable crowdsourcing and deep locality-preserving learning for unconstrained facial expression recognition. IEEE Transactions on Image Processing, 28(1):356-370, 2019. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 232, + 287, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 232, + 287, + 274 + ], + "spans": [ + { + "bbox": [ + 48, + 232, + 287, + 274 + ], + "type": "text", + "content": "[30] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 277, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 277, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 277, + 287, + 319 + ], + "type": "text", + "content": "[31] Jinpeng Lin, Hao Yang, Dong Chen, Ming Zeng, Fang Wen, and Lu Yuan. Face parsing with roi tanh-warping. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5654-5663, 2019. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 322, + 287, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 322, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 48, + 322, + 287, + 354 + ], + "type": "text", + "content": "[32] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Roi tanh-polar transformer network for face parsing in the wild. Image and Vision Computing, 112:104190, 2021. 2, 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 356, + 287, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 356, + 287, + 408 + ], + "spans": [ + { + "bbox": [ + 48, + 356, + 287, + 408 + ], + "type": "text", + "content": "[33] Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7708-7717, 2019. 2, 3, 6, 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 411, + 287, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 411, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 411, + 287, + 453 + ], + "type": "text", + "content": "[34] Yinglu Liu, Hailin Shi, Hao Shen, Yue Si, Xiaobo Wang, and Tao Mei. A new dataset and boundary-attention semantic segmentation for face parsing. In AAAI, pages 11637–11644, 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 456, + 287, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 456, + 287, + 499 + ], + "spans": [ + { + "bbox": [ + 48, + 456, + 287, + 499 + ], + "type": "text", + "content": "[35] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 5, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 502, + 287, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 502, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 48, + 502, + 287, + 555 + ], + "type": "text", + "content": "[36] Matthew M Loper and Michael J Black. Opendr: An approximate differentiable renderer. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pages 154-169. Springer, 2014. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 558, + 287, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 287, + 622 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 287, + 622 + ], + "type": "text", + "content": "[37] Tetiana Martyniuk, Orest Kupyn, Yana Kurlyak, Igor Krashenyi, Jiri Matas, and Viktoriya Sharmanska. Dad-3heads: A large-scale dense, accurate and diverse dataset for 3d head alignment from a single image. In Proc. IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 624, + 287, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 657 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 657 + ], + "type": "text", + "content": "[38] Carsten Moenning and Neil A Dodgson. Fast marching farthest point sampling. Technical report, University of Cambridge, Computer Laboratory, 2003. 4, 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 658, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 713 + ], + "type": "text", + "content": "[39] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 6" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[40] Ravi Ramamoorthi and Pat Hanrahan. An efficient representation for irradiance environment maps. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 497-500, 2001. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 118, + 545, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 171 + ], + "type": "text", + "content": "[41] Chirag Raman, Charlie Hewitt, Erroll Wood, and Tadas Baltrusaitis. Mesh-tension driven expression-based wrinkles for synthetic faces. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3515-3525, 2023. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 173, + 545, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 173, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 307, + 173, + 545, + 215 + ], + "type": "text", + "content": "[42] Nikhila Ravi, Jeremy Reizenstein, David Novotny, Taylor Gordon, Wan-Yen Lo, Justin Johnson, and Georgia Gkioxari. Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501, 2020. 2, 3, 6, 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 217, + 545, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 217, + 545, + 271 + ], + "spans": [ + { + "bbox": [ + 307, + 217, + 545, + 271 + ], + "type": "text", + "content": "[43] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 273, + 545, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 273, + 545, + 326 + ], + "spans": [ + { + "bbox": [ + 307, + 273, + 545, + 326 + ], + "type": "text", + "content": "[44] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael J Black. Learning to regress 3d face shape and expression from an image without 3d supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7763-7772, 2019. 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 327, + 545, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 327, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 307, + 327, + 545, + 403 + ], + "type": "text", + "content": "[45] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Mingmin Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3d face reconstruction by occlusion-aware multiview geometry consistency. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XV, pages 53-70. Springer, 2020. 1, 2, 6, 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 404, + 545, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 404, + 545, + 446 + ], + "spans": [ + { + "bbox": [ + 307, + 404, + 545, + 446 + ], + "type": "text", + "content": "[46] Dave Shreiner, Bill The Khronos OpenGL ARB Working Group, et al. OpenGL programming guide: the official guide to learning OpenGL, versions 3.0 and 3.1. Pearson Education, 2009. 2, 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 449, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 449, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 307, + 449, + 545, + 491 + ], + "type": "text", + "content": "[47] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM Transactions on Graphics (TOG), 41(6):1-10, 2022. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 493, + 545, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 493, + 545, + 547 + ], + "spans": [ + { + "bbox": [ + 307, + 493, + 545, + 547 + ], + "type": "text", + "content": "[48] Ayush Tewari, Hans-Peter Seidel, Mohamed Elgharib, Christian Theobalt, et al. Learning complete 3d morphable face models from images and videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3361-3371, 2021. 2, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 548, + 545, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 548, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 307, + 548, + 545, + 591 + ], + "type": "text", + "content": "[49] Graphics University of Basel and Vision Research. parametric-face-image-generator. https://github.com/unibas-gravis/parametric-face-image-generator, 2017.2,4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 593, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 593, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 593, + 545, + 635 + ], + "type": "text", + "content": "[50] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 636, + 545, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 636, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 307, + 636, + 545, + 690 + ], + "type": "text", + "content": "[51] Lizhen Wang, Zhiyuan Chen, Tao Yu, Chenguang Ma, Liang Li, and Yebin Liu. Faceverse: a fine-grained and detail-controllable 3d face morphable model from a hybrid dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20333-20342, 2022. 4" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 692, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 692, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 692, + 545, + 712 + ], + "type": "text", + "content": "[52] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 749, + 314, + 757 + ], + "type": "text", + "content": "1681" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 485 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 63, + 73, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 73, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 63, + 73, + 287, + 106 + ], + "type": "text", + "content": "you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3681-3691, 2021. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 287, + 149 + ], + "type": "text", + "content": "[53] Tong Wu, Liang Pan, Junzhe Zhang, Tai Wang, Ziwei Liu, and Dahua Lin. Density-aware chamfer distance as a comprehensive metric for point cloud completion. arXiv preprint arXiv:2111.12702, 2021. 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 152, + 287, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 152, + 287, + 195 + ], + "spans": [ + { + "bbox": [ + 49, + 152, + 287, + 195 + ], + "type": "text", + "content": "[54] Jiaolong Yang, Hongdong Li, Dylan Campbell, and Yunde Jia. Go-icp: A globally optimal solution to 3d icp point-set registration. IEEE transactions on pattern analysis and machine intelligence, 38(11):2241–2254, 2015. 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 197, + 287, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 197, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 49, + 197, + 287, + 239 + ], + "type": "text", + "content": "[55] Qi Zheng, Jiankang Deng, Zheng Zhu, Ying Li, and Stefanos Zafeiriou. Decoupled multi-task learning with cyclical self-regulation for face parsing. In Computer Vision and Pattern Recognition, 2022. 2, 4, 5, 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 241, + 287, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 241, + 287, + 296 + ], + "spans": [ + { + "bbox": [ + 49, + 241, + 287, + 296 + ], + "type": "text", + "content": "[56] Wenbin Zhu, HsiangTao Wu, Zeyu Chen, Noranart Vesdapunt, and Baoyuan Wang. Reda: reinforced differentiable attribute for 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4958-4967, 2020. 2, 3, 6, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 297, + 287, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 297, + 287, + 350 + ], + "spans": [ + { + "bbox": [ + 49, + 297, + 287, + 350 + ], + "type": "text", + "content": "[57] Xiangyu Zhu, Zhen Lei, Junjie Yan, Dong Yi, and Stan Z Li. High-fidelity pose and expression normalization for face recognition in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 787-796, 2015. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 353, + 287, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 353, + 287, + 396 + ], + "spans": [ + { + "bbox": [ + 49, + 353, + 287, + 396 + ], + "type": "text", + "content": "[58] Xiangyu Zhu, Xiaoming Liu, Zhen Lei, and Stan Z Li. Face alignment in full pose range: A 3d total solution. IEEE transactions on pattern analysis and machine intelligence, 41(1): 78-92, 2017. 3, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 398, + 287, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 398, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 49, + 398, + 287, + 441 + ], + "type": "text", + "content": "[59] Xiangyu Zhu, Chang Yu, Di Huang, Zhen Lei, Hao Wang, and Stan Z Li. Beyond 3dmm: Learning to capture high-fidelity 3d face shape. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 443, + 287, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 443, + 287, + 485 + ], + "spans": [ + { + "bbox": [ + 49, + 443, + 287, + 485 + ], + "type": "text", + "content": "[60] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces. In European Conference on Computer Vision, pages 250–269. Springer, 2022. 1, 2" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "type": "text", + "content": "1682" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_content_list.json b/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..737a386d30d7a8018716fe686a27f064097f12f4 --- /dev/null +++ b/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_content_list.json @@ -0,0 +1,1759 @@ +[ + { + "type": "text", + "text": "3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow", + "text_level": 1, + "bbox": [ + 99, + 130, + 870, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Felix Taubner", + "bbox": [ + 88, + 180, + 202, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Prashant Raina", + "bbox": [ + 230, + 181, + 352, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mathieu Tuli", + "bbox": [ + 380, + 181, + 486, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Eu Wern Teh", + "bbox": [ + 516, + 181, + 622, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chul Lee", + "bbox": [ + 651, + 181, + 727, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jinmiao Huang", + "bbox": [ + 756, + 181, + 879, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "LG Electronics", + "bbox": [ + 423, + 199, + 545, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{prashant.raina, mathieu.tuli, euwern.teh, clee.lee}@lge.com", + "bbox": [ + 217, + 219, + 746, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "When working with 3D facial data, improving fidelity and avoiding the uncanny valley effect is critically dependent on accurate 3D facial performance capture. Because such methods are expensive and due to the widespread availability of 2D videos, recent methods have focused on how to perform monocular 3D face tracking. However, these methods often fall short in capturing precise facial movements due to limitations in their network architecture, training, and evaluation processes. Addressing these challenges, we propose a novel face tracker, FlowFace, that introduces an innovative 2D alignment network for dense pervertex alignment. Unlike prior work, FlowFace is trained on high-quality 3D scan annotations rather than weak supervision or synthetic data. Our 3D model fitting module jointly fits a 3D face model from one or many observations, integrating existing neutral shape priors for enhanced identity and expression disentanglement and per-vertex deformations for detailed facial feature reconstruction. Additionally, we propose a novel metric and benchmark for assessing tracking accuracy. Our method exhibits superior performance on both custom and publicly available benchmarks. We further validate the effectiveness of our tracker by generating high-quality 3D data from 2D videos, which leads to performance gains on downstream tasks.", + "bbox": [ + 75, + 301, + 473, + 664 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 678, + 209, + 694 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Access to 3D face tracking data lays the foundation for many computer graphics tasks such as 3D facial animation, 3D human avatar reconstruction, and expression transfer. Obtaining high visual fidelity, portraying subtle emotional cues, and preventing the uncanny valley effect in these downstream tasks is reliant on high motion capture accuracy. As a result, a common approach to generating 3D face tracking data is to use 3D scans and visual markers however, this process is cost-intensive. To alleviate this burden, building computational models to obtain 3D faces from monocular 2D videos and images has cemented its importance in recent years and seen great progress [10, 14, 19, 24, 37, 42, 57]. Nevertheless, three", + "bbox": [ + 75, + 703, + 470, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "issues persist: First, current methods rely heavily on sparse landmarks and photometric similarity, which is computationally expensive and ineffective in ensuring accurate face motion. Second, the monocular face tracking problem is both ill-posed and contains a large solution space dependent on camera intrinsics, pose, head shape, and expression [58]. Third, current benchmarks for this task neglect the temporal aspect of face tracking and do not adequately evaluate facial motion capture accuracy.", + "bbox": [ + 496, + 268, + 893, + 407 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address the aforementioned issues, we introduce a novel 3D face tracking model called FlowFace, consisting of a versatile two-stage pipeline: A 2D alignment network that predicts the screen-space positions of each vertex of a 3D morphable model [2] (3DMM) and an optimization module that jointly fits this model across multiple views by minimizing an alignment energy function. Unlike traditional methods that rely on sparse landmarks and photometric consistency, FlowFace uses only 2D alignment as input signal, similar to recent work [42]. This alleviates the computational burden of inverse rendering and allows joint reconstruction using a very large number of observations. We enhance previous work in four ways: (1) The 2D alignment network features a novel architecture with a vision-transformer backbone and an iterative, recurrent refinement block. (2) In contrast to previous methods that use weak supervision or synthetic data, the alignment network is trained using high-quality annotations from 3D scans. (3) The alignment network predicts dense, per-vertex alignment instead of key-points, which enables the reconstruction of finer details. (4) We integrate an off-the-shelf neutral shape prediction model to improve identity and expression disentanglement.", + "bbox": [ + 496, + 411, + 895, + 760 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In addition, we present the screen-space motion error (SSME) as a novel face tracking metric. Based on optical flow, SSME computes and contrasts screen-space motion, aiming to resolve the limitation observed in existing evaluation methods. These often rely on sparse key points, synthetic annotations, or RGB/3D reconstruction errors, and lack a thorough and comprehensive measurement of temporal consistency. Using the Multiface [44] dataset, we develop a 3D face tracking benchmark around this metric.", + "bbox": [ + 496, + 763, + 895, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1227", + "bbox": [ + 482, + 924, + 514, + 936 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Finally, through extensive experiments on available benchmarks, we show that our method significantly outperforms the state-of-the-art on various tasks. To round off our work, we demonstrate how our face tracker can positively affect the performance of downstream tasks, including speech-driven 3D facial animation and 3D head avatar synthesis. Specifically, we demonstrate how our method can be used to generate high-quality data — comparable to studio-captured data — for both these tasks by using it to augment existing models to achieve state-of-the-art results.", + "bbox": [ + 75, + 90, + 472, + 243 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 257, + 218, + 273 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Uncalibrated 3D Face Reconstruction. Previous work reconstructing 3D face shapes from uncalibrated 2D images or video fall into two broad categories:", + "bbox": [ + 75, + 282, + 468, + 328 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Optimization-based methods recover face shape and motion by jointly optimizing 3D model parameters to fit the 2D observations. They traditionally treat this optimization as an inverse rendering problem [15, 16, 37, 43, 48, 52, 57], using sparse key-points as guidance. Typically, they employ geometric priors such as 3DMMs [2, 6, 22, 26, 47], texture models, simplified illumination models, and temporal priors. Some methods use additional constraints such as depth [37] or optical flow [5]. [58] and [28] present detailed surveys of such methods. Most methods use 3DMMs to disentangle shape and expression components. MPT [57] is the first method to integrate metrical head shape priors predicted by a deep neural network (DNN). However, photometric and sparse landmark supervision is not sufficient to obtain consistent and accurate face alignment, especially in areas not covered by landmarks and or of low visual saliency. More recently, [42] proposes to use only 2D face alignment (dense landmarks) as supervision, avoiding the computationally expensive inverse rendering process. Our method extends this idea with an improved 2D alignment module, better shape priors, and per-vertex deformation.", + "bbox": [ + 75, + 329, + 472, + 647 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Regression-based methods train DNNs to directly predict face reconstructions from single images [7, 10, 12, 19, 24, 31, 32, 34, 35]. This reconstruction includes information such as pose, 3DMM components, and sometimes texture. Typically, convolutional networks like image classification networks [21, 33] or encoder-decoder networks [41] are used. Due to the lack of large-scale 2D to 3D annotations, these methods typically rely on photometric supervision for their training. Some methods propose complex multi-step network architectures [24, 32] to improve reconstruction. [24] use additional handcrafted losses to improve alignment, whereas [7] use synthetic data and numerous of landmarks. More recently, [38] proposes to use vision-transformers to improve face reconstruction.", + "bbox": [ + 75, + 647, + 472, + 859 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2D Face Alignment. Traditional 2D face alignment methods predict a sparse set of manually defined landmarks.", + "bbox": [ + 75, + 869, + 470, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "These methods typically involve convolutional DNNs to predict heat maps for each landmark [4, 30, 54]. Sparse key-points are not sufficient to describe full face motion, and heat maps make it computationally infeasible to predict a larger number of key-points. [42] and [18] achieve pseudo-dense alignment by using classifier networks to directly predict a very large number of landmarks. [20] predict the UV coordinates in image space and then map the vertices onto the image. Just like [41] and [32], our method predicts a per-pixel dense mapping between the UV space of a face model and the image space. However, we set our method apart by using better network architectures with vision-transformers and real instead of synthetic data.", + "bbox": [ + 496, + 90, + 893, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Evaluation of Face Trackers. Prior work evaluates face tracking and reconstruction using key-point accuracy [19, 32, 41, 42, 55], depth [37, 57], photometric [37, 57] or 3D reconstruction [5, 6, 47] errors. Sparse key-points are usually manually-annotated, difficult to define without ambiguities [54], and insufficient to describe the full motion of the face. Dense key-points [55] are difficult to compare between models using different mesh topologies. Photometric errors [37, 38, 57] are unsuitable since a perfect solution already exists within the input data, and areas with low visual saliency are neglected. A fair comparison of depth errors [37, 57] is only possible for methods using a pre-calibrated, perspective camera model. Methods that evaluate 3D reconstruction errors have to rigidly align the target and predicted mesh to fairly evaluate results [6, 34, 47], which causes valuable tracking information such as pose and intrinsics to be lost. Most importantly, depth and 3D reconstruction metrics neglect motion tangential to the surface normal. In contrast, our proposed metric measures the dense face motion in screen space, which is topology-independent and eliminates the need for rigid alignment.", + "bbox": [ + 496, + 299, + 893, + 618 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 632, + 591, + 648 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our 3D face tracking pipeline consists of two stages: The first stage is predicting a dense 2D alignment of the face model, and the second stage is fitting a parametric 3D model to this alignment.", + "bbox": [ + 496, + 657, + 893, + 719 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1. Dense 2D Face Alignment Network", + "text_level": 1, + "bbox": [ + 498, + 729, + 805, + 746 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1.1 Network Architecture", + "text_level": 1, + "bbox": [ + 500, + 753, + 707, + 768 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The 2D alignment module is responsible for predicting the probabilistic location — in image space — of each vertex of our face model. As in [42], the 2D alignment of each vertex is represented as a random variable $A_{i} = \\{\\mu_{i},\\sigma_{i}\\}$ . $\\mu_{i} = [x_{i},y_{i}]\\in \\mathcal{I}$ is the expected vertex position in image space $\\mathcal{I}\\in [0,D_{img}]^2$ , and $\\sigma_{i}\\in \\mathbb{R}_{>0}$ is its uncertainty, modeled as the standard deviation of a circular 2D Gaussian density function. As an intermediate step, for each iteration", + "bbox": [ + 496, + 779, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "1228", + "bbox": [ + 483, + 924, + 514, + 936 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/069fad4bb0552718c5ca1181c746f8a2602905b4515f8fce047934d32644ac6a.jpg", + "image_caption": [ + "Figure 1. An overview of the proposed 2D alignment network architecture. A feature encoder transforms the image into a latent feature map that is then iteratively aligned with a learned UV positional embedding map by the recurrent update block." + ], + "image_footnote": [], + "bbox": [ + 98, + 90, + 877, + 250 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$k$ , the alignment network predicts a dense UV to image correspondence map $\\mathbf{F}_k: \\mathcal{U} \\to \\mathcal{I}$ and uncertainty map $\\mathbf{S}_k$ . $\\mathbf{F}_k$ maps any point in UV space $\\mathcal{U} \\in [0, D_{uv}]^2$ to a position in image space through a pixel-wise offset, which we call UV-image flow. This network consists of three parts (Fig. 1):", + "bbox": [ + 76, + 292, + 468, + 369 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. An image feature encoder producing a latent feature map of the target image.", + "2. A positional encoding module that produces learned positional embeddings in UV space.", + "3. An iterative, recurrent optical flow module that predicts the probabilistic UV-image flow." + ], + "bbox": [ + 76, + 369, + 468, + 459 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The image space position and uncertainty of each vertex is then bi-linearly sampled from the intermediate correspondence and uncertainty map for each iteration:", + "bbox": [ + 75, + 460, + 468, + 505 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mu_ {i, k} = \\nu_ {i} + \\mathbf {F} _ {k} (\\nu_ {i}) \\quad \\text {a n d} \\quad \\sigma_ {i, k} = \\mathbf {S} _ {k} (\\nu_ {i}) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 130, + 518, + 468, + 534 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\nu_{i}\\in \\mathcal{U}$ denotes the pre-defined UV coordinate of each vertex. These are manually defined by a 3D artist.", + "bbox": [ + 75, + 546, + 468, + 575 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Image feature encoder. To obtain the input to the image encoder $\\mathcal{F}$ , we use SFD [51] to detect a square face bounding box from the target image and enlarge it by $20\\%$ . We then crop the image to the bounding box and resize it to $D_{img}$ . We use Segformer [45] as the backbone, and replace the final classification layer with a linear layer to produce a 128-dimensional feature encoding. We further down-sample it to attain a final image feature map $Z_{img} \\in \\mathbb{R}^{D_{uv} \\times D_{uv} \\times 128}$ through average pooling. With image $\\mathbf{I}$ and network parameters $\\theta_{\\mathcal{F}}$ , this is defined as:", + "bbox": [ + 75, + 585, + 470, + 736 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nZ _ {i m g} = \\mathcal {F} (\\mathbf {I}, \\theta_ {\\mathcal {F}}) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 750, + 468, + 766 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "UV positional encoding module. We use a set of modules $\\mathcal{G}$ with identical architecture to generate learned positional embeddings in UV-space. Each module is comprised of a multi-scale texture pyramid and a pixel-wise linear layer. This pyramid consists of four trainable textures with 32 channels and squared resolutions of $D_{uv}$ , $\\frac{D_{uv}}{2}$ , $\\frac{D_{uv}}{4}$ , and $\\frac{D_{uv}}{8}$ respectively. Each texture is upsampled to $D_{uv}$ through bi-linear interpolation before concatenating them", + "bbox": [ + 75, + 777, + 468, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "along the channel dimension. The concatenated textures are then passed through a pixel-wise linear layer to produce the UV positional embeddings. The multi-scale setup ensures structural consistency in UV space (closer pixels in UV should have similar features). We use 3 of these modules: $\\mathcal{G}_{Z_{uv}}$ to generate a UV feature map $Z_{uv}$ , $\\mathcal{G}_c$ to generator a context map $c$ , and $\\mathcal{G}_{h_0}$ to generate an initial hidden state $h_0$ . With corresponding network parameters $\\theta_{\\mathcal{G}_{Z_{uv}}}, \\theta_{\\mathcal{G}_c}$ and $\\theta_{\\mathcal{G}_{h_0}}$ , this is described as:", + "bbox": [ + 496, + 292, + 893, + 431 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nZ _ {u v} = \\mathcal {G} \\left(\\theta_ {\\mathcal {G} _ {Z _ {u v}}}\\right); c = \\mathcal {G} \\left(\\theta_ {\\mathcal {G} _ {c}}\\right); h _ {0} = \\mathcal {G} \\left(\\theta_ {\\mathcal {G} _ {h _ {0}}}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 542, + 439, + 890, + 458 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "UV-image flow. The RAFT [36] network is designed to predict the optical flow between two images. It consists of a correlation block that maps the latent features encoded from each image into a 4D correlation volume. A context encoder initializes the hidden state of a recurrent update block and provides it with additional context information. The update block then iteratively refines a flow estimate while sampling the correlation volume.", + "bbox": [ + 496, + 465, + 890, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We adapt this network to predict the UV-image flow $\\mathbf{F} \\in \\mathbb{R}^{D_{uv} \\times D_{uv} \\times 2}$ . We directly pass $Z_{uv}$ and $Z_{img}$ to the correlation block $\\mathbf{C}$ . We use the context map $c$ and initial hidden state $h_0$ from the positional encoding modules for the update module $\\mathbf{U}$ . We modify the update module to also predict a per-iteration uncertainty in addition to the flow estimate, by duplicating the flow prediction head to predict a 1-channel uncertainty map $\\mathbf{S} \\in \\mathbb{R}_{>0}^{D_{uv} \\times D_{uv}}$ . An exponential operation is applied to ensure positive values. The motion encoder head is adjusted to accept the uncertainty as an input. The modified RAFT network then works as follows: For each iteration $k$ , the recurrent update module performs a look-up in the correlation volume, context map $c$ , previous hidden state $h_{k-1}$ , previous flow $\\mathbf{F}_{k-1}$ and previous uncertainty $\\mathbf{S}_{k-1}$ . It outputs the refined flow estimate $\\mathbf{F}_k$ and uncertainty $\\mathbf{S}_k$ and the subsequent hidden state $h_k$ . Formally,", + "bbox": [ + 496, + 587, + 892, + 829 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {k}, \\mathbf {S} _ {k}, h _ {k} = \\mathbf {U} \\left(\\mathbf {C} \\left(Z _ {u v}, Z _ {i m g}\\right), c, \\mathbf {F} _ {k - 1}, \\mathbf {S} _ {k - 1}, h _ {k - 1}, \\theta_ {\\mathbf {U}}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 504, + 839, + 890, + 869 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "with update module weights $\\theta_{\\mathbf{U}}$ . For a detailed explanation of our modified RAFT, we defer to [36] and Appendix B.", + "bbox": [ + 496, + 869, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "1229", + "bbox": [ + 483, + 926, + 514, + 936 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.2 Loss Functions", + "text_level": 1, + "bbox": [ + 76, + 90, + 236, + 104 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We supervise our network with Gaussian negative log-likelihood (GNLL) both on the probabilistic per-vertex positions and the dense UV-image flow. For each iteration $k$ of the update module, we apply the per-vertex loss function:", + "bbox": [ + 75, + 114, + 468, + 175 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {k} ^ {\\text {v e r t e x}} = \\sum_ {i = 1} ^ {N _ {v}} \\lambda_ {i} \\left(\\log \\left(\\sigma_ {i, k} ^ {2}\\right) + \\frac {\\left\\| \\mu_ {i , k} - \\mu_ {i} ^ {\\prime} \\right\\| ^ {2}}{2 \\sigma_ {i , k} ^ {2}}\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 122, + 179, + 468, + 219 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\lambda_{i}$ is a pre-defined vertex weight and $\\mu_i^\\prime$ is the ground truth vertex position. We encourage our network to predict coherent flow and uncertainty maps in areas with no vertices by applying the GNLL loss for each pixel $p$ in UV space:", + "bbox": [ + 75, + 223, + 468, + 284 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {k} ^ {\\text {d e n s e}} = \\sum_ {p \\in | \\mathcal {U} |} \\lambda_ {p} \\left(\\log \\left(\\mathbf {S} _ {k, p} ^ {2}\\right) + \\frac {\\| \\mathbf {F} _ {k , p} - \\mathbf {F} _ {p} ^ {\\prime} \\| ^ {2}}{2 \\mathbf {S} _ {k , p} ^ {2}}\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 102, + 286, + 468, + 327 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\lambda_{p}$ is a pre-defined per-pixel weight and $\\mathbf{F}'$ is the ground truth UV-image flow. The final loss is a weighted sum of these losses, with a decay factor for each iteration of $\\alpha = 0.8$ and a dense weight of $\\lambda_{dense} = 0.01$ :", + "bbox": [ + 75, + 332, + 468, + 393 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {L o s s} = \\sum_ {k = 1} ^ {N _ {\\text {i t e r}}} \\alpha^ {N _ {\\text {i t e r}} - k} \\left(L _ {k} ^ {\\text {v e r t e x}} + \\lambda_ {\\text {d e n s e}} L _ {k} ^ {\\text {d e n s e}}\\right) \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 132, + 397, + 468, + 439 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. 3D Model Fitting", + "text_level": 1, + "bbox": [ + 76, + 449, + 246, + 465 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As in [42], the 3D reconstruction is obtained by jointly fitting a 3D head model and camera parameters to the predicted 2D alignment observations for the entire sequence. This is done by optimizing the energy function $E(\\Phi; A)$ w.r.t to the model parameters $\\Phi$ and alignment $A$ (see Fig. 2). These parameters and the energy terms are defined below.", + "bbox": [ + 75, + 472, + 468, + 564 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/e6e44bfea6c668c34742af0b14986f3a32f71da681fe64c2eb8f77dee6bf2d22.jpg", + "image_caption": [ + "Figure 2. An illustration of the 3D model fitting process." + ], + "image_footnote": [], + "bbox": [ + 78, + 578, + 478, + 686 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 Tracking Model and Parameters", + "text_level": 1, + "bbox": [ + 76, + 717, + 357, + 732 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The tracking model consists of a 3D head model and a camera model. A tracking sequence contains $C$ cameras, $F$ frames with a total of $C \\times F$ images.", + "bbox": [ + 75, + 739, + 468, + 787 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D head model. We use FLAME [26] as our 3D head model M. This model consists of $N_{\\nu} = 5023$ vertices, which are controlled by identity shape parameters $\\beta \\in \\mathbb{R}^{300}$ , expression shape parameters $\\phi \\in \\mathbb{R}^{100}$ and $K = 5$ skeletal joint poses $\\theta \\in \\mathbb{R}^{3K + 3}$ (including the root translation) through linear blend skinning [25]. We ignore root, neck and jaw pose and use the FLAME2023 model, which", + "bbox": [ + 75, + 794, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "includes deformations due to jaw rotation within the expression blend-shapes. We also introduce additional static pervertex deformations $\\delta_d\\in \\mathbb{R}^{N_v\\times 3}$ to enhance identity shape detail. The local head model vertices can be expressed using its parameters as follows:", + "bbox": [ + 496, + 90, + 890, + 167 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {M} (\\boldsymbol {\\beta}, \\boldsymbol {\\delta} _ {d}, \\boldsymbol {\\phi}, \\boldsymbol {\\theta}) = F L A M E (\\boldsymbol {\\beta}, \\boldsymbol {\\phi}, \\boldsymbol {\\theta}) + \\boldsymbol {\\delta} _ {d} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 557, + 175, + 890, + 191 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The rigid transform $\\mathbf{T}^{\\mathbf{M}}\\in \\mathbb{R}^{3\\times 4}$ represents the head pose, which transforms head model vertices $i$ into world space for each frame $t$ :", + "bbox": [ + 496, + 200, + 890, + 244 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {i, t} ^ {\\mathrm {3 D}} = \\mathbf {T} _ {t} ^ {\\mathbf {M}} \\mathbf {M} _ {i} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 642, + 244, + 890, + 263 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Camera model. The cameras are described by the world-to-camera rigid transform $\\mathbf{T}_{cam} \\in \\mathbb{R}^{3 \\times 4}$ and the pinhole camera projection matrix $\\mathbf{K} \\in \\mathbb{R}^{3 \\times 3}$ defined by a single focal length $f \\in \\mathbb{R}$ parameter. The camera model defines the image-space projection of the 3D vertices in camera $j$ :", + "bbox": [ + 496, + 270, + 890, + 347 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {i, j, t} ^ {\\mathrm {2 D}} = \\mathbf {K} _ {j} \\mathbf {T} _ {j} ^ {c a m} \\mathbf {x} _ {i, t} ^ {\\mathrm {3 D}} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 624, + 354, + 890, + 375 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Parameters. The parameters $\\Psi$ consist of the head model and camera parameters, which are optimized to minimize $E(\\Phi; A)$ . The camera parameters can be fixed to known values, if the calibration is available. Expression and poses vary for each frame $t$ , whereas camera, identity shape, and deformation parameters are shared over the sequence.", + "bbox": [ + 496, + 381, + 890, + 472 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\Psi = \\left\\{\\beta , \\Phi_ {F \\times | \\phi |}, \\Theta_ {F \\times | \\theta |}, \\delta_ {\\mathrm {d}}; \\mathbf {T} _ {F \\times 3 \\times 4} ^ {\\mathbf {M}}; \\mathbf {T} _ {C \\times 3 \\times 4} ^ {c a m}, f _ {C} \\right\\} \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 479, + 890, + 500 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 Energy Terms", + "text_level": 1, + "bbox": [ + 500, + 515, + 653, + 531 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The energy function is defined as:", + "bbox": [ + 500, + 539, + 725, + 554 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nE (\\Phi ; A) = E _ {A} + E _ {F L A M E} + E _ {\\text {t e m p}} + E _ {M I C A} + E _ {\\text {d e f o r m}} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 564, + 890, + 580 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$E_{A}$ encourages 2D alignment:", + "bbox": [ + 517, + 590, + 720, + 604 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nE _ {A} = \\sum_ {i, j, t} ^ {N _ {\\nu}, C, F} \\lambda_ {i} \\frac {\\left\\| \\mathbf {x} _ {i , j , t} ^ {\\mathrm {2 D}} - \\mu_ {i , j , t} \\right\\| ^ {2}}{2 \\sigma_ {i , j , t} ^ {2}} \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 581, + 606, + 890, + 650 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where for vertex $i$ seen by camera $j$ in frame $t$ . $\\mu_{i,j,t}$ and $\\sigma_{i,j,t}$ is the 2D location and uncertainty predicted by the final iteration of our 2D alignment network, and $\\mathbf{x}_{i,j,t}^{2D}$ (Eq. (10)) is the 2D camera projection of that vertex.", + "bbox": [ + 496, + 655, + 890, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$E_{FLAME} = \\lambda_{FLAME}(\\| \\beta \\|^{2} + \\| \\Phi \\|^{2})$ encourages the optimizer to explain the data with smaller identity and expression parameters. This leads to face shapes that are statistically more likely [10, 14, 26, 57] and a more accurate 3D reconstruction. We do not penalize joint rotation, face translation or rotation.", + "bbox": [ + 496, + 715, + 890, + 804 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$E_{\\text {temp }}$ applies a loss on the acceleration of the 3D position $\\mathbf{x}_{i,t}^{3\\mathrm{D}}$ of every vertex of the 3D model to prevent jitter and encourage a smoother, more natural face motion:", + "bbox": [ + 496, + 806, + 890, + 849 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nE _ {\\text {t e m p}} = \\lambda_ {\\text {t e m p}} \\sum_ {i, j, t = 2} ^ {N _ {v}, C, F - 1} \\| \\mathbf {x} _ {j, t - 1} ^ {\\mathrm {3 D}} - 2 \\mathbf {x} _ {j, t} ^ {\\mathrm {3 D}} + \\mathbf {x} _ {j, t + 1} ^ {\\mathrm {3 D}} \\| ^ {2} \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 511, + 859, + 890, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "1230", + "bbox": [ + 483, + 924, + 514, + 936 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$E_{MICA} = \\lambda_{MICA} \\| \\mathbf{M}_{\\Phi=0,\\theta=0} - \\mathbf{M}_{MICA} \\|^2$ provides a 3D neutral geometry prior for the optimizer to enable a better disentanglement between identity and expression components. It consists of the L2 distance of the neutral head model vertices to the MICA [57] template $\\mathbf{M}_{MICA}$ . This template is computed by predicting the average neutral head vertices using the MICA model [57] for all frames of the sequence. The term also enables a more accurate 3D reconstruction since the model can rely on MICA predictions where the alignment is uncertain, such as in the depth direction or for occluded vertices. In areas of confident alignment, the MICA prediction can be refined.", + "bbox": [ + 76, + 90, + 472, + 271 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$E_{\\text{deform}} = \\lambda_{\\text{deform}} \\| \\delta_{\\mathrm{d}} \\|^2$ encourages per-vertex deformations to be small w.r.t. the FLAME model.", + "bbox": [ + 76, + 272, + 470, + 301 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Multiface Face Tracking Benchmark", + "text_level": 1, + "bbox": [ + 76, + 311, + 395, + 328 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our monocular 3D face tracking benchmark focuses on 3D reconstruction and motion capture accuracy. To evaluate these, we use our proposed screen space motion error (SSME) and the scan-to-mesh chamfer distance (CD).", + "bbox": [ + 76, + 335, + 468, + 397 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/63eba31d72bebffaeb9cd79123be37fd3f1e90c4e64be42185a730efb8d91c24.jpg", + "image_caption": [ + "Figure 3. An illustration of the EPE computation for each frame." + ], + "image_footnote": [], + "bbox": [ + 81, + 404, + 472, + 641 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Screen Space Motion Error. To define the Screen Space Motion Error (SSME), we reformulate face tracking as an optical flow prediction problem over a set of time windows. First, we project the ground truth mesh and predicted mesh into screen space using the respective camera model. Then, we use the screen space coordinates to compute the ground truth optical flow $\\mathbf{f}_{t:t + h}^{\\prime}$ and predicted optical flow $\\mathbf{f}_{t:t + h}$ from frame $t$ to frame $t + h$ for each frame $t\\in [1,\\dots ,F]$ and a sequence of frame windows $h = [1,\\dots ,N_H]$ . For each frame and frame window, the average end-point-error $EPE_{t:t + h}$ is computed by averaging the L2-distance between ground truth and predicted optical flow for each pixel (see Fig. 3).", + "bbox": [ + 76, + 676, + 468, + 872 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nE P E _ {t: t + h} = \\left\\| V \\odot \\left(\\mathbf {f} _ {t: t + h} - \\mathbf {f} _ {t: t + h} ^ {\\prime}\\right) \\right\\| ^ {2} \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 883, + 468, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $V$ is a mask to separate different face regions and $\\odot$ is the Hadamard product. See Fig. 3 for a visual reference.", + "bbox": [ + 498, + 90, + 890, + 119 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The screen space motion error $SSME_{h}$ for frame window $h$ is then defined as the mean of all EPEs over all frames $t$ where frame $t + h$ exists:", + "bbox": [ + 498, + 121, + 890, + 165 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS S M E _ {h} = \\frac {1}{F - h} \\sum_ {t = 1} ^ {t + h \\leq F} E P E _ {t: t + h} \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 581, + 166, + 890, + 207 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, to summarize tracking performance in one value, we compute the average screen space motion error $\\overline{SSME}$ over all frame windows as", + "bbox": [ + 498, + 210, + 890, + 255 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\overline {{S S M E}} = \\sum_ {h = 1} ^ {N _ {H}} S S M E _ {h} \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 622, + 256, + 890, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In other words, $\\overline{SSME}$ measures the average trajectory accuracy of each pixel over a time horizon of $N_{H}$ frames. We choose a maximum frame window of $N_{H} = 30$ (1 second) since most human expressions are performed within this time frame. Because the screen space motion is directly affected by most face-tracking parameters such as intrinsics, pose, and face shape, it also measures their precision in a holistic manner. In contrast to prior works and benchmarks that use sparse key-points, SSME covers the motion of all visible face regions and is invariant to mesh topology. As it operates in screen space, it does not require additional alignment and works with all camera models, unlike 3D reconstruction or depth errors. In our benchmark, we evaluate SSME over a set of masks for semantically meaningful face regions (face, eyes, nose, mouth, and ears) (Fig. 3), permitting a more nuanced analysis of the tracking performance.", + "bbox": [ + 496, + 303, + 892, + 546 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3D Reconstruction. To complete our benchmark, we additionally measure the chamfer distance (CD) to account for the depth dimension. Similar to [34], the tracked mesh is rigidly aligned to the ground truth mesh using 7 key-points and ICP. Then, the distance of each ground truth vertex with respect to the predicted mesh is computed and averaged. For a detailed explanation, we defer to the NoW benchmark [34]. Just like the SSME, we evaluate the CD for the same set of face regions to provide a more detailed analysis of reconstruction accuracy, similar to [6].", + "bbox": [ + 496, + 551, + 892, + 704 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Multiface Dataset. We build our benchmark around the Multiface dataset [44]. Multiface consists of multi-view videos with high quality topologically consistent 3D registrations. High-resolution videos are captured at 30 FPS from a large variety of calibrated views. We limit the evaluation data to a manageable size by carefully selecting a subset of 86 sequences with a diverse set of view directions and facial performances (see Appendix C).", + "bbox": [ + 496, + 710, + 890, + 833 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 844, + 633, + 861 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training data. To train the 2D alignment network, we use a combined dataset made up of FaceScape [47], Stirling [1],", + "bbox": [ + 500, + 869, + 890, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "1231", + "bbox": [ + 483, + 926, + 513, + 936 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and FaMoS [3]. Where a FLAME [26] registration is not available, we fit the FLAME template mesh to the 3D scan through semi-automatic key-point annotation and commercial topology fitting software. For an accurate capture of face motion, we auto-announce expression scans with additional key-points propagated with optical flow (more information in Appendix D). The ground truth image space vertex positions $\\mu^{\\prime}$ are obtained by projecting the vertices of the fitted FLAME mesh into screen space using the available camera calibrations.", + "bbox": [ + 75, + 90, + 472, + 241 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training strategy for 2D alignment network. We use Segformer-b5 (pre-trained on ImageNet [11]) as our backbone, with $D_{img} = 512$ , $D_{uv} = 64$ and $N_{iter} = 3$ . We use the RAFT-L configuration for the update module and keep its hyperparameters when possible [36]. We optimize the model for 6 epochs using the AdamW optimizer [27], an initial learning rate of $1 \\times 10^{-4}$ and a decay of 0.1 every 2 epochs. We use image augmentation such as random scaling, rotation, and color corruption [42], synthetic occlusions [39] and synthetic backgrounds (see Appendix D).", + "bbox": [ + 75, + 253, + 470, + 405 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3D model fitting. To minimize the energy function and obtain tracking parameters, we use the AdamW optimizer with an initial learning rate of $1 \\times 10^{-2}$ and a automatic learning rate scheduler with a decay factor of 0.5 and patience of 30 steps, until convergence. We enable $\\delta_{d}$ only for multi-view reconstruction, and only for the nose region.", + "bbox": [ + 75, + 416, + 470, + 507 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We implement and test against the most recent publicly available methods for single image regression-based approaches 3DDFAv2 [19], SADRNet [32], PRNet [41], DECA (coarse) [14], EMOCA (coarse) [10], and HRN [24]. We extend the ability of these methods to use temporal priors by applying a simple temporal Gaussian filter to the screen-space vertices. We also include the popular photometric optimization-based approach MPT [57]. Lastly, we compare against the key-point-only optimization-based method Dense proposed by [42] on public benchmarks.", + "bbox": [ + 75, + 518, + 468, + 670 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Multiface Benchmark", + "text_level": 1, + "bbox": [ + 76, + 680, + 282, + 695 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We divide our Multiface benchmark into two categories: Without temporal information sharing, where each method is restricted to operate on single images, and with (both forward and backward) temporal information sharing, where each method is allowed to use the entire sequence as observations. Our method significantly outperforms the best publicly available method by $54\\%$ w.r.t. face-region SSME on both on single-image and by $46\\%$ on sequence prediction. This confirms the superior 2D alignment accuracy of our method. Despite using only 2D alignment as supervision, our method performs $8\\%$ better in terms of 3D reconstruction (CD) than the photometric optimization approach MPT [57] (see Tab. 2. To our surprise, MPT performs in", + "bbox": [ + 75, + 703, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/56e140f3608727c83214744d118f0fe14a861d5fb215c184fc93d6b0ed9865fc.jpg", + "image_caption": [ + "Figure 4. $SSME_h$ plotted over all frame horizons for each evaluated tracker for single-image and full sequence tracking (right). Lower $SSME_h$ in smaller frame horizons $h$ (left in the graph) means short-term temporal stability while lower $SSME_h$ in larger frame horizons (right in the graph) means better long-term tracking consistency. Our tracker performs significantly better over every time horizon." + ], + "image_footnote": [], + "bbox": [ + 498, + 97, + 694, + 247 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/02092b215babc05f1b379f4a21721f8f37cdf01bde2c8c8349351a72b42e2fce.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 700, + 98, + 893, + 247 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ferior w.r.t. motion error than some regression-based models — this is likely due to uniform lighting and texture in the Multiface dataset. Qualitative results Fig. 5 confirm that methods using photometric errors (DECA, HRN, MPT) perform inferior w.r.t. screen space motion in areas without key-point supervision such as cheeks and forehead. Plotting the $SSME_h$ over different time windows $h$ (see Fig. 4) gives a previously unseen overview of temporal stability. Regression-based methods suffer from high short-term error $(SSME_1)$ which is due to temporal instability and jitter. As expected, introducing temporal smoothing improves this issue and the overall $SSME$ for these methods. Our method achieves very low short-term SSME even with single image prediction, which indicates the high robustness and accuracy of the alignment network. As expected, introducing temporal priors reduces $SSME$ .", + "bbox": [ + 496, + 373, + 893, + 616 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. FaceScape Benchmark", + "text_level": 1, + "bbox": [ + 500, + 625, + 710, + 640 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/d8beaa7926536b02b6c5f4112d27ef9b2453ca2da73da2a5a1b5a0f276e9de4e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodCD ↓ (mm)NME ↓ (rad)
MGCNet [35]4.000.093
PRNet [41]3.560.126
SADRNet [32]6.750.133
DECA [14]4.690.108
3DDFAv2 [19]3.600.096
HRN [24]3.670.087
Ours2.210.083
", + "bbox": [ + 573, + 652, + 820, + 761 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Results on the FaceScape benchmark [47].", + "bbox": [ + 540, + 771, + 846, + 786 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We also compare our method on the FaceScape benchmark [47], which measures 3D reconstruction accuracy from 2D images under large view (up to $90^{\\circ}$ ) and expression variations. On this benchmark, we outperform the best previous regression-based methods by $38\\%$ in terms of CD and $4.6\\%$ in terms of mean normal error (NME) Tab. 1. This shows that our method can accurately reconstruct faces even", + "bbox": [ + 496, + 794, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "1232", + "bbox": [ + 483, + 925, + 514, + 936 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/771b770c20257cbf42592d1d6888a33d7794b9e4137f3b996e0cf10857f23ad1.jpg", + "image_caption": [ + "Figure 5. Qualitative results on two sequences (top and bottom 3 rows) of our Multiface benchmark. Warmer colors represent high error, while colder colors represent low error. DECA [14], HRN [24], and MPT [57] struggle with motion in the cheek and forehead region, which is visible in the SSME error plot (right columns). Despite using only 2D alignment as supervision, our method achieves a better 3D reconstruction (CD) (center columns)." + ], + "image_footnote": [], + "bbox": [ + 133, + 85, + 834, + 334 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/91d3e82e28e9b8e96467d726c70e25caf99982bf905198fd0516bc9da58cce3a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 78, + 398, + 910, + 542 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/79249166a7cd09452e670256781ede41d1f959a61379f989c54cd1dd57ec8960.jpg", + "table_caption": [ + "Table 2. Results on our Multiface tracking benchmark with and without temporal information sharing. Our method consistently outperforms previous methods on every single category, metric and face region." + ], + "table_footnote": [], + "table_body": "
Single-viewMulti-view
MethodError (mm) ↓Error (mm) ↓
MedianMeanStdMedianMeanStd
MGCNet [35]1.311.872.63---
PRNet [41]1.501.981.88---
DECA [14]1.091.381.18---
Deep3D [12]1.111.411.211.081.351.15
Dense [42]1.021.281.080.811.010.84
MICA [57]0.901.110.92---
TokenFace [38]0.760.950.82---
Ours0.871.070.880.710.880.73
", + "bbox": [ + 106, + 585, + 442, + 729 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. Results on the NoW Challenge [34]. Multi-view evaluation is done as in [42]. Multi-view results for [12] and [42] are reported by [42].", + "bbox": [ + 75, + 738, + 468, + 781 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "under large view deviations.", + "bbox": [ + 76, + 790, + 264, + 806 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Now Challenge", + "text_level": 1, + "bbox": [ + 76, + 816, + 230, + 832 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The NoW benchmark is a public benchmark for evaluating neutral head reconstruction from 2D images captured indoors and outdoors, with different expressions, and under variations in lighting conditions and occlusions. We", + "bbox": [ + 75, + 839, + 470, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "evaluate our method on the non-metrical challenge (Tab. 3). For single-view reconstruction, our model outperforms our neutral shape predictor MICA [57] by $4\\%$ on mean scan-to-mesh distance. For the multi-view case, we outperform the baseline Dense [42] by $13\\%$ , likely due to our method's high 2D alignment accuracy, better neutral shape priors, and per-vertex deformations. TokenFace [38] performs better for the single-view case, however, their predictions could be integrated into our pipeline since they use the FLAME topology. Importantly, our network is able to generalize to these in-the-wild images despite being trained only on in-the-lab data captured under controlled lighting conditions. An important sub-task for 3D face trackers is to disentangle the identity and expression components of the face shape. The outstanding results on the NoW benchmark indicate the ability of our tracker to accomplish this.", + "bbox": [ + 496, + 590, + 893, + 832 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Downstream Tasks", + "text_level": 1, + "bbox": [ + 500, + 845, + 681, + 859 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In the following, we show how we enhance downstream models using our face tracker.", + "bbox": [ + 498, + 869, + 890, + 901 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "1233", + "bbox": [ + 483, + 925, + 514, + 936 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3D Head Avatar Synthesis. Recent head avatar synthesis methods heavily rely on photometric head trackers to generate face alignment priors [17, 53, 56]. INSTA [56], a top-performing model, uses MPT [57]. We modify INSTA by replacing their tracker with ours. We compare our enhanced FlowFace-INSTA to the baseline MPT-INSTA. On their publicly available dataset, we outperform MPT-INSTA by $10.5\\%$ on perceptual visual fidelity (LPIPS). On our Multiface benchmark videos, we outperform MPT-INSTA by $20.3\\%$ on LPIPS. Detailed results can be viewed in Appendix G. These results demonstrate how better face trackers can directly improve performance on down-stream tasks which highlights the importance of our research.", + "bbox": [ + 75, + 90, + 472, + 287 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Speech-driven 3D facial animation. The field of speech-driven facial animation often suffers from data sparsity [9, 13, 46]. To alleviate this issue, we generate 3D face meshes using the multi-view video dataset MEAD [40]. In using this generated dataset to augment the training of the state-of-the-art model CodeTalker [46] (see Appendix H), we are able to improve from a lip vertex error of $3.13 \\times 10^{-5}$ to $2.85 \\times 10^{-5}$ on the VOCASET benchmark [9], an $8.8\\%$ improvement. This underlines the benefit of high-accuracy video face trackers for large-scale data generation.", + "bbox": [ + 75, + 300, + 470, + 450 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5.2D Alignment", + "text_level": 1, + "bbox": [ + 76, + 463, + 220, + 478 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To show the benefit of our 2D alignment model architecture, we conduct an evaluation on our validation set, which consists of 84 subjects of our dataset. We implement the dense landmark model of [42] (ResNet-101 backbone) and adapt it to output FLAME vertex alignment and uncertainty. We also implement PRNet [41] and modify it in the same way. We retrain each method on our training set. In evaluate the 2D alignment accuracy with respect to normalized mean error (NME) of every vertex in the face area (Fig. 14, green vertices). With an NME of 1.30, our method performs significantly better than the ResNet architecture of Dense [42] (NME = 1.63), and PRNet (NME = 2.52). We note that the accuracy of uncertainty cannot be evaluated with NME. A qualitative comparison can be viewed in Fig. 17.", + "bbox": [ + 75, + 487, + 468, + 698 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.6. Ablation Studies", + "text_level": 1, + "bbox": [ + 76, + 710, + 240, + 724 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "2D alignment network. To analyze the effect of different feature encoder backbones, we replace our backbone with different variations of the Segformer model and also test the CNN-based backbone BiSeNet-v2 [49] (see Tab. 4). As expected, vision-transformer-based networks show better performance. Experimenting with the number of iterations $N_{iter}$ for the update module, we find that multiple iterations instead of one improves the performance. Finally, we confirm the superior performance of our 2D alignment network compared to the ResNet-101-based network of [42] mentioned in Sec. 4.5.", + "bbox": [ + 75, + 734, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/a1ffb61ed11762e1f606889e9cef44b24a2d5f37ee8cf3aaed3b788572941ffa.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BackboneNiter#Paramlatency (ms)CD↓SSME↓
ResNet-10173.4M91.543.90
BiSeNet-v2317.6M231.213.52
MiT-b1317.3M291.223.21
MiT-b2331.0M461.202.78
MiT-b5188.2M661.252.70
MiT-b5288.2M711.212.61
MiT-b5388.2M751.182.58
MiT-b5488.2M801.232.62
", + "bbox": [ + 501, + 90, + 893, + 204 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3D model fitting. We show in Tab. 5 the benefit of integrating the MICA neutral shape prediction on the NoW Challenge validation set. The significant performance gain on single-image predictions shows that our 3D tracking pipeline can integrate MICA predictions very well, even improving them. We also show the benefit of predicting a dense face alignment in conjunction with per-vertex deformations in multi-view settings. This shows that our 2D alignment is precise enough to predict face shapes that lie outside of the FLAME blend-shape space, which previous optimization-based methods [42, 57] cannot achieve. For a qualitative analysis, see Appendix E.", + "bbox": [ + 496, + 276, + 890, + 458 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/9781b5ce269c03d9518a5cde998fc7341c467a44016f5a6661ea04d506a483ce.jpg", + "table_caption": [ + "Table 4. Ablations for backbone architectures and hyperparameters of the 2D alignment network on our Multiface benchmark. Latency is evaluated on a Quadro RTX 5000 GPU." + ], + "table_footnote": [], + "table_body": "
Single-viewMulti-view
MethodError (mm)Error (mm)
MedianMeanStdMedianMeanStd
Ours w/o MICA0.991.231.030.710.880.76
MICA only0.911.130.94---
Ours w/o δd---0.680.840.72
Ours0.821.020.850.670.830.71
", + "bbox": [ + 514, + 472, + 877, + 566 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 5. Ablations for the 3D model fitting module on single and multi-view reconstruction on the NoW validation set.", + "bbox": [ + 498, + 575, + 890, + 603 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion and Future Work", + "text_level": 1, + "bbox": [ + 500, + 630, + 769, + 645 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This paper presents a state-of-the-art face tracking pipeline with a highly robust and accurate 2D alignment module. Its performance is thoroughly validated on a variety of benchmarks and downstream tasks. However, the proposed two-stage pipeline is not fully differentiable, which prevents end-to-end learning. Furthermore, our training data is limited to data captured in-the-lab. In future work, we intend to extend the alignment network to directly predict depth as well, obviating the need for the 3D model fitting step. Synthetic datasets [42] could alleviate the data issue.", + "bbox": [ + 496, + 657, + 890, + 808 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We're confident that our tracker will accelerate research in downstream tasks by generating large-scale face capture data using readily available video datasets [8, 29, 50]. We also believe that our novel motion capture evaluation benchmark will focus and align future research efforts to create even more accurate methods.", + "bbox": [ + 496, + 810, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "1234", + "bbox": [ + 483, + 926, + 514, + 936 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Stirling/esrc 3d face database. https://pics.stir.ac.uk/ESRC/. Accessed: 2023-10-25. 5, 2, 4", + "[2] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th Annual Conference on Computer Graphics and Interactive Techniques, page 187-194, USA, 1999. ACM Press/Addison-Wesley Publishing Co. 1, 2", + "[3] Timo Bolkart, Tianye Li, and Michael J. Black. Instant multi-view head capture through learnable registration. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 768-779, 2023. 6, 2", + "[4] Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In International Conference on Computer Vision, 2017. 2", + "[5] Chen Cao, Mengei Chai, Oliver Woodford, and Linjie Luo. Stabilized real-time face tracking via a learned dynamic rigidity prior. ACM Trans. Graph., 37(6), 2018. 2", + "[6] Zenghao Chai, Haoxian Zhang, Jing Ren, Di Kang, Zhengzhuo Xu, Xuefei Zhe, Chun Yuan, and Linchao Bao. Really: Rethinking the evaluation of 3d face reconstruction, 2022. 2, 5", + "[7] Zenghao Chai, Tianke Zhang, Tianyu He, Xu Tan, Tadas Baltrusaitis, HsiangTao Wu, Runnan Li, Sheng Zhao, Chun Yuan, and Jiang Bian. Hiface: High-fidelity 3d face reconstruction by learning static and dynamic details, 2023. 2", + "[8] J. S. Chung, A. Nagrani, and A. Zisserman. Voxceleb2: Deep speaker recognition. In INTERSPEECH, 2018. 8", + "[9] Daniel Cudeiro, Timo Bolkart, Cassidy Laidlaw, Anurag Ranjan, and Michael Black. Capture, learning, and synthesis of 3D speaking styles. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pages 10101-10111, 2019. 8, 7", + "[10] Radek Danecek, Michael J. Black, and Timo Bolkart. Emoca: Emotion driven monocular face capture and animation, 2022. 1, 2, 4, 6, 7", + "[11] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 6, 1", + "[12] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In IEEE Computer Vision and Pattern Recognition Workshops, 2019. 2, 7", + "[13] Yingruo Fan, Zhaojiang Lin, Jun Saito, Wenping Wang, and Taku Komura. Faceformer: Speech-driven 3d facial animation with transformers. arXiv preprint arXiv:2112.05329, 2021.8", + "[14] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3d face model from in-the-wild images. CoRR, abs/2012.04012, 2020. 1, 4, 6, 7, 10, 11", + "[15] Pablo Garrido, Michael Zollhöfer, Dan Casas, Levi Valgaerts, Kiran Varanasi, Patrick Pérez, and Christian" + ], + "bbox": [ + 78, + 114, + 468, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Theobalt. Reconstruction of personalized 3d face rigs from monocular video. ACM Trans. Graph., 35(3), 2016. 2", + "[16] Pablo Garrido, Michael Zollhöfer, Chenglei Wu, Derek Bradley, Patrick Pérez, Thabo Beeler, and Christian Theobalt. Corrective 3d reconstruction of lips from monococular video. ACM Trans. Graph., 35(6), 2016. 2", + "[17] Philip-William Grassal, Malte Prinzler, Titus Leistner, Carsten Rother, Matthias Nießner, and Justus Thies. Neural head avatars from monocular rgb videos. arXiv preprint arXiv:2112.01554, 2021. 8", + "[18] Ivan Grishchenko, Artsiom Ablavatski, Yury Kartynnik, Karthik Raveendran, and Matthias Grundmann. Attention mesh: High-fidelity face mesh prediction in real-time. CoRR, abs/2006.10962, 2020. 2", + "[19] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Yang Fan, Zhen Lei, and Stan Li. Towards Fast, Accurate and Stable 3D Dense Face Alignment, pages 152-168. 2020. 1, 2, 6, 7, 10, 11", + "[20] Riza Alp Güler, George Trigeorgis, Epameinondas Antonakos, Patrick Snape, Stefanos Zafeiriou, and Iasonas Kokkinos. Densereg: Fully convolutional dense shape regression in-the-wild, 2017. 2", + "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015. 2", + "[22] A 3D Face Model for Pose and Illumination Invariant Face Recognition, Genova, Italy, 2009. IEEE. 2", + "[23] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. 7", + "[24] Biwen Lei, Jianqiang Ren, Mengyang Feng, Miaomiao Cui, and Xuansong Xie. A hierarchical representation network for accurate and detailed face reconstruction from in-the-wild images, 2023. 1, 2, 6, 7, 10, 11", + "[25] J. P. Lewis, Matt Cordner, and Nickson Fong. Pose space deformation: A unified approach to shape interpolation and skeleton-driven deformation. In Proceedings of the 27th Annual Conference on Computer Graphics and Interactive Techniques, page 165-172, USA, 2000. ACM Press/Addison-Wesley Publishing Co. 4", + "[26] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 4, 6, 3, 5, 7", + "[27] Ilya Loshchilov and Frank Hutter. Fixing weight decay regularization in adam. CoRR, abs/1711.05101, 2017. 6", + "[28] Araceli Morales, Gemma Piella, and Federico M. Sukno. Survey on 3d face reconstruction from uncalibrated images. CoRR, abs/2011.05740, 2020. 2", + "[29] Arsha Nagrani, Joon Son Chung, Weidi Xie, and Andrew Zisserman. Voxceleb: Large-scale speaker verification in the wild. Computer Science and Language, 2019. 8", + "[30] Andrés Prados-Torreblanca, José M Buenaposada, and Luis Baumela. Shape preserving facial landmarks with graph attention networks. In 33rd British Machine Vision Conference 2022, BMVC 2022, London, UK, November 21-24, 2022. BMVA Press, 2022. 2" + ], + "bbox": [ + 503, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "1235", + "bbox": [ + 483, + 926, + 514, + 936 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[31] Aashish Rai, Hiresh Gupta, Ayush Pandey, Francisco Vicente Carrasco, Shingo Jason Takagi, Amaury Aubel, Daeil Kim, Aayush Prakash, and Fernando de la Torre. Towards realistic generative 3d face models, 2023. 2", + "[32] Zeyu Ruan, Changqing Zou, Longhai Wu, Gangshan Wu, and Limin Wang. SADRNet: Self-aligned dual face regression networks for robust 3d dense face alignment and reconstruction. IEEE Transactions on Image Processing, 30: 5793-5806, 2021. 2, 6, 7, 10, 11", + "[33] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zh-moginov, and Liang-Chieh Chen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019. 2", + "[34] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael Black. Learning to regress 3d face shape and expression from an image without 3d supervision. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2019, 2, 5, 7, 3", + "[35] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Mingmin Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3d face reconstruction by occlusion-aware multi-view geometry consistency. arXiv preprint arXiv:2007.12494, 2020. 2, 6, 7", + "[36] Zachary Teed and Jia Deng. RAFT: recurrent all-pairs field transforms for optical flow. CoRR, abs/2003.12039, 2020. 3, 6, 1", + "[37] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2face: Real-time face capture and reenactment of rgb videos, 2020. 1, 2", + "[38] Zhang Tianke, Chu Xuangeng, Liu Yunfei, Lin Lijian, Yang Zhendong, Xu Zhengzhuo, Cao Chengkun, Yu Fei, Zhou Changyin, Yuan Chun, and Yu Li. Accurate 3d face reconstruction with facial component tokens. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2023. 2, 7", + "[39] Kenny T. R. Voo, Liming Jiang, and Chen Change Loy. Delving into high-quality synthetic face occlusion segmentation datasets. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2022. 6, 3", + "[40] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 8, 6", + "[41] Yue Wang and Justin M. Solomon. Prnet: Self-supervised learning for partial-to-partial registration, 2019. 2, 6, 7, 8", + "[42] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Matthew Johnson, Jingjing Shen, Nikola Milosavljevic, Daniel Wilde, Stephan Garbin, Chirag Raman, Jamie Shotton, Toby Sharp, Ivan Stojiljkovic, Tom Cashman, and Julien Valentin. 3d face reconstruction with dense landmarks, 2022. 1, 2, 4, 6, 7, 8, 3, 5", + "[43] Chenglei Wu, Derek Bradley, Markus Gross, and Thabo Beeler. An anatomically-constrained local deformation model for monocular face capture. ACM Trans. Graph., 35 (4), 2016. 2", + "[44] Cheng-hsin Wu, Ningyuan Zheng, Scott Ardisson, Rohan Bali, Danielle Belko, Eric Brockmeyer, Lucas Evans, Tim" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Othy Godisart, Hyowon Ha, Xuhua Huang, Alexander Hypes, Taylor Koska, Steven Krenn, Stephen Lombardi, Xiaomin Luo, Kevyn McPhail, Laura Millerschoen, Michal Perdoch, Mark Pitts, Alexander Richard, Jason Saragih, Junko Saragih, Takaaki Shiratori, Tomas Simon, Matt Stewart, Autumn Trimble, Xinshuo Weng, David Whitewolf, Chenglei Wu, Shouou-I Yu, and Yaser Sheikh. Multiface: A dataset for neural face rendering. In arXiv, 2022. 1, 5, 2, 6", + "[45] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. Segformer: Simple and efficient design for semantic segmentation with transformers. In Neural Information Processing Systems (NeurIPS), 2021. 3, 1", + "[46] Jinbo Xing, Menghan Xia, Yuechen Zhang, Xiaodong Cun, Jue Wang, and Tien-Tsin Wong. Codetalker: Speech-driven 3d facial animation with discrete motion prior, 2023. 8, 7", + "[47] Haotian Yang, Hao Zhu, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, and Xun Cao. Facescape: a large-scale high quality 3d face dataset and detailed riggable 3d face prediction, 2020. 2, 5, 6, 4", + "[48] Hongwei Yi, Hualin Liang, Yifei Liu, Qiong Cao, Yandong Wen, Timo Bolkart, Dacheng Tao, and Michael J. Black. Generating holistic 3d human motion from speech, 2023. 2", + "[49] Changqian Yu, Changxin Gao, FlowFace-INSTA to the baseline MPT-INSTA Jingbo Wang, Gang Yu, Chunhua Shen, and Nong Sang. Bisenet V2: bilateral network with guided aggregation for real-time semantic segmentation. CoRR, abs/2004.02147, 2020. 8, 5", + "[50] Jianhui Yu, Hao Zhu, Liming Jiang, Chen Change Loy, Weidong Cai, and Wayne Wu. CelebV-Text: A large-scale facial text-video dataset. In CVPR, 2023. 8", + "[51] Shifeng Zhang, Xiangyu Zhu, Zhen Lei, Hailin Shi, Xiaobo Wang, and Stan Z. Li. S $^3$ fd: Single shot scale-invariant face detector, 2017. 3", + "[52] Yufeng Zheng, Victoria Fernández Abrevaya, Xu Chen, Marcel C. Bühler, Michael J. Black, and Otmar Hilliges. I M avatar: Implicit morphable head avatars from videos. CoRR, abs/2112.07471, 2021. 2", + "[53] Yufeng Zheng, Wang Yifan, Gordon Wetzstein, Michael J. Black, and Otmar Hilliges. Pointavatar: Deformable point-based head avatars from videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 8", + "[54] Zhenglin Zhou, Huaxia Li, Hong Liu, Nanyang Wang, Gang Yu, and Rongrong Ji. Star loss: Reducing semantic ambiguity in facial landmark detection, 2023. 2", + "[55] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z. Li. Face alignment across large poses: A 3d solution. CoRR, abs/1511.07212, 2015. 2", + "[56] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Instant volumetric head avatars. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4574-4584, 2022. 8, 4, 6", + "[57] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces, 2022. 1, 2, 4, 5, 6, 7, 8", + "[58] Michael Zollhöfer, Justus Thies, Darek Bradley, Pablo Garrido, Thabo Beeler, Patrick Pérez, Marc Stamminger," + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "1236", + "bbox": [ + 483, + 926, + 514, + 936 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Matthias Nießner, and Christian Theobalt. State of the art on monocular 3d face reconstruction, tracking, and applications. 2018. 1, 2", + "bbox": [ + 109, + 90, + 470, + 133 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "1237", + "bbox": [ + 483, + 926, + 514, + 938 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_model.json b/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_model.json new file mode 100644 index 0000000000000000000000000000000000000000..d3a98545868e4411be8f7c81c5adf8e769e1e0dd --- /dev/null +++ b/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_model.json @@ -0,0 +1,2400 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.101, + 0.131, + 0.872, + 0.153 + ], + "angle": 0, + "content": "3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.181, + 0.203, + 0.198 + ], + "angle": 0, + "content": "Felix Taubner" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.182, + 0.354, + 0.197 + ], + "angle": 0, + "content": "Prashant Raina" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.182, + 0.488, + 0.197 + ], + "angle": 0, + "content": "Mathieu Tuli" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.182, + 0.623, + 0.197 + ], + "angle": 0, + "content": "Eu Wern Teh" + }, + { + "type": "text", + "bbox": [ + 0.653, + 0.182, + 0.728, + 0.197 + ], + "angle": 0, + "content": "Chul Lee" + }, + { + "type": "text", + "bbox": [ + 0.757, + 0.182, + 0.88, + 0.199 + ], + "angle": 0, + "content": "Jinmiao Huang" + }, + { + "type": "text", + "bbox": [ + 0.424, + 0.2, + 0.547, + 0.216 + ], + "angle": 0, + "content": "LG Electronics" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.22, + 0.748, + 0.234 + ], + "angle": 0, + "content": "{prashant.raina, mathieu.tuli, euwern.teh, clee.lee}@lge.com" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.285 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.302, + 0.474, + 0.665 + ], + "angle": 0, + "content": "When working with 3D facial data, improving fidelity and avoiding the uncanny valley effect is critically dependent on accurate 3D facial performance capture. Because such methods are expensive and due to the widespread availability of 2D videos, recent methods have focused on how to perform monocular 3D face tracking. However, these methods often fall short in capturing precise facial movements due to limitations in their network architecture, training, and evaluation processes. Addressing these challenges, we propose a novel face tracker, FlowFace, that introduces an innovative 2D alignment network for dense pervertex alignment. Unlike prior work, FlowFace is trained on high-quality 3D scan annotations rather than weak supervision or synthetic data. Our 3D model fitting module jointly fits a 3D face model from one or many observations, integrating existing neutral shape priors for enhanced identity and expression disentanglement and per-vertex deformations for detailed facial feature reconstruction. Additionally, we propose a novel metric and benchmark for assessing tracking accuracy. Our method exhibits superior performance on both custom and publicly available benchmarks. We further validate the effectiveness of our tracker by generating high-quality 3D data from 2D videos, which leads to performance gains on downstream tasks." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.679, + 0.21, + 0.695 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.704, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Access to 3D face tracking data lays the foundation for many computer graphics tasks such as 3D facial animation, 3D human avatar reconstruction, and expression transfer. Obtaining high visual fidelity, portraying subtle emotional cues, and preventing the uncanny valley effect in these downstream tasks is reliant on high motion capture accuracy. As a result, a common approach to generating 3D face tracking data is to use 3D scans and visual markers however, this process is cost-intensive. To alleviate this burden, building computational models to obtain 3D faces from monocular 2D videos and images has cemented its importance in recent years and seen great progress [10, 14, 19, 24, 37, 42, 57]. Nevertheless, three" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.27, + 0.895, + 0.408 + ], + "angle": 0, + "content": "issues persist: First, current methods rely heavily on sparse landmarks and photometric similarity, which is computationally expensive and ineffective in ensuring accurate face motion. Second, the monocular face tracking problem is both ill-posed and contains a large solution space dependent on camera intrinsics, pose, head shape, and expression [58]. Third, current benchmarks for this task neglect the temporal aspect of face tracking and do not adequately evaluate facial motion capture accuracy." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.412, + 0.896, + 0.761 + ], + "angle": 0, + "content": "To address the aforementioned issues, we introduce a novel 3D face tracking model called FlowFace, consisting of a versatile two-stage pipeline: A 2D alignment network that predicts the screen-space positions of each vertex of a 3D morphable model [2] (3DMM) and an optimization module that jointly fits this model across multiple views by minimizing an alignment energy function. Unlike traditional methods that rely on sparse landmarks and photometric consistency, FlowFace uses only 2D alignment as input signal, similar to recent work [42]. This alleviates the computational burden of inverse rendering and allows joint reconstruction using a very large number of observations. We enhance previous work in four ways: (1) The 2D alignment network features a novel architecture with a vision-transformer backbone and an iterative, recurrent refinement block. (2) In contrast to previous methods that use weak supervision or synthetic data, the alignment network is trained using high-quality annotations from 3D scans. (3) The alignment network predicts dense, per-vertex alignment instead of key-points, which enables the reconstruction of finer details. (4) We integrate an off-the-shelf neutral shape prediction model to improve identity and expression disentanglement." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.897, + 0.903 + ], + "angle": 0, + "content": "In addition, we present the screen-space motion error (SSME) as a novel face tracking metric. Based on optical flow, SSME computes and contrasts screen-space motion, aiming to resolve the limitation observed in existing evaluation methods. These often rely on sparse key points, synthetic annotations, or RGB/3D reconstruction errors, and lack a thorough and comprehensive measurement of temporal consistency. Using the Multiface [44] dataset, we develop a 3D face tracking benchmark around this metric." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.925, + 0.516, + 0.937 + ], + "angle": 0, + "content": "1227" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.244 + ], + "angle": 0, + "content": "Finally, through extensive experiments on available benchmarks, we show that our method significantly outperforms the state-of-the-art on various tasks. To round off our work, we demonstrate how our face tracker can positively affect the performance of downstream tasks, including speech-driven 3D facial animation and 3D head avatar synthesis. Specifically, we demonstrate how our method can be used to generate high-quality data — comparable to studio-captured data — for both these tasks by using it to augment existing models to achieve state-of-the-art results." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.258, + 0.22, + 0.274 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.284, + 0.47, + 0.329 + ], + "angle": 0, + "content": "Uncalibrated 3D Face Reconstruction. Previous work reconstructing 3D face shapes from uncalibrated 2D images or video fall into two broad categories:" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.33, + 0.473, + 0.648 + ], + "angle": 0, + "content": "Optimization-based methods recover face shape and motion by jointly optimizing 3D model parameters to fit the 2D observations. They traditionally treat this optimization as an inverse rendering problem [15, 16, 37, 43, 48, 52, 57], using sparse key-points as guidance. Typically, they employ geometric priors such as 3DMMs [2, 6, 22, 26, 47], texture models, simplified illumination models, and temporal priors. Some methods use additional constraints such as depth [37] or optical flow [5]. [58] and [28] present detailed surveys of such methods. Most methods use 3DMMs to disentangle shape and expression components. MPT [57] is the first method to integrate metrical head shape priors predicted by a deep neural network (DNN). However, photometric and sparse landmark supervision is not sufficient to obtain consistent and accurate face alignment, especially in areas not covered by landmarks and or of low visual saliency. More recently, [42] proposes to use only 2D face alignment (dense landmarks) as supervision, avoiding the computationally expensive inverse rendering process. Our method extends this idea with an improved 2D alignment module, better shape priors, and per-vertex deformation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.648, + 0.473, + 0.86 + ], + "angle": 0, + "content": "Regression-based methods train DNNs to directly predict face reconstructions from single images [7, 10, 12, 19, 24, 31, 32, 34, 35]. This reconstruction includes information such as pose, 3DMM components, and sometimes texture. Typically, convolutional networks like image classification networks [21, 33] or encoder-decoder networks [41] are used. Due to the lack of large-scale 2D to 3D annotations, these methods typically rely on photometric supervision for their training. Some methods propose complex multi-step network architectures [24, 32] to improve reconstruction. [24] use additional handcrafted losses to improve alignment, whereas [7] use synthetic data and numerous of landmarks. More recently, [38] proposes to use vision-transformers to improve face reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.472, + 0.903 + ], + "angle": 0, + "content": "2D Face Alignment. Traditional 2D face alignment methods predict a sparse set of manually defined landmarks." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.289 + ], + "angle": 0, + "content": "These methods typically involve convolutional DNNs to predict heat maps for each landmark [4, 30, 54]. Sparse key-points are not sufficient to describe full face motion, and heat maps make it computationally infeasible to predict a larger number of key-points. [42] and [18] achieve pseudo-dense alignment by using classifier networks to directly predict a very large number of landmarks. [20] predict the UV coordinates in image space and then map the vertices onto the image. Just like [41] and [32], our method predicts a per-pixel dense mapping between the UV space of a face model and the image space. However, we set our method apart by using better network architectures with vision-transformers and real instead of synthetic data." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.3, + 0.895, + 0.619 + ], + "angle": 0, + "content": "Evaluation of Face Trackers. Prior work evaluates face tracking and reconstruction using key-point accuracy [19, 32, 41, 42, 55], depth [37, 57], photometric [37, 57] or 3D reconstruction [5, 6, 47] errors. Sparse key-points are usually manually-annotated, difficult to define without ambiguities [54], and insufficient to describe the full motion of the face. Dense key-points [55] are difficult to compare between models using different mesh topologies. Photometric errors [37, 38, 57] are unsuitable since a perfect solution already exists within the input data, and areas with low visual saliency are neglected. A fair comparison of depth errors [37, 57] is only possible for methods using a pre-calibrated, perspective camera model. Methods that evaluate 3D reconstruction errors have to rigidly align the target and predicted mesh to fairly evaluate results [6, 34, 47], which causes valuable tracking information such as pose and intrinsics to be lost. Most importantly, depth and 3D reconstruction metrics neglect motion tangential to the surface normal. In contrast, our proposed metric measures the dense face motion in screen space, which is topology-independent and eliminates the need for rigid alignment." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.633, + 0.593, + 0.649 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.894, + 0.72 + ], + "angle": 0, + "content": "Our 3D face tracking pipeline consists of two stages: The first stage is predicting a dense 2D alignment of the face model, and the second stage is fitting a parametric 3D model to this alignment." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.731, + 0.807, + 0.747 + ], + "angle": 0, + "content": "3.1. Dense 2D Face Alignment Network" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.755, + 0.709, + 0.77 + ], + "angle": 0, + "content": "3.1.1 Network Architecture" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.903 + ], + "angle": 0, + "content": "The 2D alignment module is responsible for predicting the probabilistic location — in image space — of each vertex of our face model. As in [42], the 2D alignment of each vertex is represented as a random variable \\( A_{i} = \\{\\mu_{i},\\sigma_{i}\\} \\). \\( \\mu_{i} = [x_{i},y_{i}]\\in \\mathcal{I} \\) is the expected vertex position in image space \\( \\mathcal{I}\\in [0,D_{img}]^2 \\), and \\( \\sigma_{i}\\in \\mathbb{R}_{>0} \\) is its uncertainty, modeled as the standard deviation of a circular 2D Gaussian density function. As an intermediate step, for each iteration" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.925, + 0.516, + 0.937 + ], + "angle": 0, + "content": "1228" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.092, + 0.878, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.254, + 0.895, + 0.283 + ], + "angle": 0, + "content": "Figure 1. An overview of the proposed 2D alignment network architecture. A feature encoder transforms the image into a latent feature map that is then iteratively aligned with a learned UV positional embedding map by the recurrent update block." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.294, + 0.47, + 0.37 + ], + "angle": 0, + "content": "\\(k\\), the alignment network predicts a dense UV to image correspondence map \\(\\mathbf{F}_k: \\mathcal{U} \\to \\mathcal{I}\\) and uncertainty map \\(\\mathbf{S}_k\\). \\(\\mathbf{F}_k\\) maps any point in UV space \\(\\mathcal{U} \\in [0, D_{uv}]^2\\) to a position in image space through a pixel-wise offset, which we call UV-image flow. This network consists of three parts (Fig. 1):" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.37, + 0.47, + 0.4 + ], + "angle": 0, + "content": "1. An image feature encoder producing a latent feature map of the target image." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.4, + 0.469, + 0.43 + ], + "angle": 0, + "content": "2. A positional encoding module that produces learned positional embeddings in UV space." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.43, + 0.469, + 0.46 + ], + "angle": 0, + "content": "3. An iterative, recurrent optical flow module that predicts the probabilistic UV-image flow." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.37, + 0.47, + 0.46 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.461, + 0.47, + 0.506 + ], + "angle": 0, + "content": "The image space position and uncertainty of each vertex is then bi-linearly sampled from the intermediate correspondence and uncertainty map for each iteration:" + }, + { + "type": "equation", + "bbox": [ + 0.131, + 0.519, + 0.47, + 0.535 + ], + "angle": 0, + "content": "\\[\n\\mu_ {i, k} = \\nu_ {i} + \\mathbf {F} _ {k} (\\nu_ {i}) \\quad \\text {a n d} \\quad \\sigma_ {i, k} = \\mathbf {S} _ {k} (\\nu_ {i}) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.547, + 0.47, + 0.577 + ], + "angle": 0, + "content": "where \\(\\nu_{i}\\in \\mathcal{U}\\) denotes the pre-defined UV coordinate of each vertex. These are manually defined by a 3D artist." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.587, + 0.471, + 0.737 + ], + "angle": 0, + "content": "Image feature encoder. To obtain the input to the image encoder \\(\\mathcal{F}\\), we use SFD [51] to detect a square face bounding box from the target image and enlarge it by \\(20\\%\\). We then crop the image to the bounding box and resize it to \\(D_{img}\\). We use Segformer [45] as the backbone, and replace the final classification layer with a linear layer to produce a 128-dimensional feature encoding. We further down-sample it to attain a final image feature map \\(Z_{img} \\in \\mathbb{R}^{D_{uv} \\times D_{uv} \\times 128}\\) through average pooling. With image \\(\\mathbf{I}\\) and network parameters \\(\\theta_{\\mathcal{F}}\\), this is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.75, + 0.469, + 0.767 + ], + "angle": 0, + "content": "\\[\nZ _ {i m g} = \\mathcal {F} (\\mathbf {I}, \\theta_ {\\mathcal {F}}) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.779, + 0.47, + 0.903 + ], + "angle": 0, + "content": "UV positional encoding module. We use a set of modules \\(\\mathcal{G}\\) with identical architecture to generate learned positional embeddings in UV-space. Each module is comprised of a multi-scale texture pyramid and a pixel-wise linear layer. This pyramid consists of four trainable textures with 32 channels and squared resolutions of \\(D_{uv}\\), \\(\\frac{D_{uv}}{2}\\), \\(\\frac{D_{uv}}{4}\\), and \\(\\frac{D_{uv}}{8}\\) respectively. Each texture is upsampled to \\(D_{uv}\\) through bi-linear interpolation before concatenating them" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.294, + 0.895, + 0.433 + ], + "angle": 0, + "content": "along the channel dimension. The concatenated textures are then passed through a pixel-wise linear layer to produce the UV positional embeddings. The multi-scale setup ensures structural consistency in UV space (closer pixels in UV should have similar features). We use 3 of these modules: \\(\\mathcal{G}_{Z_{uv}}\\) to generate a UV feature map \\(Z_{uv}\\), \\(\\mathcal{G}_c\\) to generator a context map \\(c\\), and \\(\\mathcal{G}_{h_0}\\) to generate an initial hidden state \\(h_0\\). With corresponding network parameters \\(\\theta_{\\mathcal{G}_{Z_{uv}}}, \\theta_{\\mathcal{G}_c}\\) and \\(\\theta_{\\mathcal{G}_{h_0}}\\), this is described as:" + }, + { + "type": "equation", + "bbox": [ + 0.544, + 0.44, + 0.892, + 0.459 + ], + "angle": 0, + "content": "\\[\nZ _ {u v} = \\mathcal {G} \\left(\\theta_ {\\mathcal {G} _ {Z _ {u v}}}\\right); c = \\mathcal {G} \\left(\\theta_ {\\mathcal {G} _ {c}}\\right); h _ {0} = \\mathcal {G} \\left(\\theta_ {\\mathcal {G} _ {h _ {0}}}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.467, + 0.892, + 0.587 + ], + "angle": 0, + "content": "UV-image flow. The RAFT [36] network is designed to predict the optical flow between two images. It consists of a correlation block that maps the latent features encoded from each image into a 4D correlation volume. A context encoder initializes the hidden state of a recurrent update block and provides it with additional context information. The update block then iteratively refines a flow estimate while sampling the correlation volume." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.588, + 0.893, + 0.83 + ], + "angle": 0, + "content": "We adapt this network to predict the UV-image flow \\(\\mathbf{F} \\in \\mathbb{R}^{D_{uv} \\times D_{uv} \\times 2}\\). We directly pass \\(Z_{uv}\\) and \\(Z_{img}\\) to the correlation block \\(\\mathbf{C}\\). We use the context map \\(c\\) and initial hidden state \\(h_0\\) from the positional encoding modules for the update module \\(\\mathbf{U}\\). We modify the update module to also predict a per-iteration uncertainty in addition to the flow estimate, by duplicating the flow prediction head to predict a 1-channel uncertainty map \\(\\mathbf{S} \\in \\mathbb{R}_{>0}^{D_{uv} \\times D_{uv}}\\). An exponential operation is applied to ensure positive values. The motion encoder head is adjusted to accept the uncertainty as an input. The modified RAFT network then works as follows: For each iteration \\(k\\), the recurrent update module performs a look-up in the correlation volume, context map \\(c\\), previous hidden state \\(h_{k-1}\\), previous flow \\(\\mathbf{F}_{k-1}\\) and previous uncertainty \\(\\mathbf{S}_{k-1}\\). It outputs the refined flow estimate \\(\\mathbf{F}_k\\) and uncertainty \\(\\mathbf{S}_k\\) and the subsequent hidden state \\(h_k\\). Formally," + }, + { + "type": "equation", + "bbox": [ + 0.506, + 0.84, + 0.892, + 0.871 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {k}, \\mathbf {S} _ {k}, h _ {k} = \\mathbf {U} \\left(\\mathbf {C} \\left(Z _ {u v}, Z _ {i m g}\\right), c, \\mathbf {F} _ {k - 1}, \\mathbf {S} _ {k - 1}, h _ {k - 1}, \\theta_ {\\mathbf {U}}\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "with update module weights \\(\\theta_{\\mathbf{U}}\\). For a detailed explanation of our modified RAFT, we defer to [36] and Appendix B." + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.927, + 0.516, + 0.938 + ], + "angle": 0, + "content": "1229" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.092, + 0.238, + 0.106 + ], + "angle": 0, + "content": "3.1.2 Loss Functions" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.115, + 0.47, + 0.176 + ], + "angle": 0, + "content": "We supervise our network with Gaussian negative log-likelihood (GNLL) both on the probabilistic per-vertex positions and the dense UV-image flow. For each iteration \\( k \\) of the update module, we apply the per-vertex loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.124, + 0.18, + 0.47, + 0.22 + ], + "angle": 0, + "content": "\\[\nL _ {k} ^ {\\text {v e r t e x}} = \\sum_ {i = 1} ^ {N _ {v}} \\lambda_ {i} \\left(\\log \\left(\\sigma_ {i, k} ^ {2}\\right) + \\frac {\\left\\| \\mu_ {i , k} - \\mu_ {i} ^ {\\prime} \\right\\| ^ {2}}{2 \\sigma_ {i , k} ^ {2}}\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.224, + 0.47, + 0.285 + ], + "angle": 0, + "content": "where \\(\\lambda_{i}\\) is a pre-defined vertex weight and \\(\\mu_i^\\prime\\) is the ground truth vertex position. We encourage our network to predict coherent flow and uncertainty maps in areas with no vertices by applying the GNLL loss for each pixel \\(p\\) in UV space:" + }, + { + "type": "equation", + "bbox": [ + 0.104, + 0.287, + 0.469, + 0.328 + ], + "angle": 0, + "content": "\\[\nL _ {k} ^ {\\text {d e n s e}} = \\sum_ {p \\in | \\mathcal {U} |} \\lambda_ {p} \\left(\\log \\left(\\mathbf {S} _ {k, p} ^ {2}\\right) + \\frac {\\| \\mathbf {F} _ {k , p} - \\mathbf {F} _ {p} ^ {\\prime} \\| ^ {2}}{2 \\mathbf {S} _ {k , p} ^ {2}}\\right) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.333, + 0.47, + 0.395 + ], + "angle": 0, + "content": "where \\(\\lambda_{p}\\) is a pre-defined per-pixel weight and \\(\\mathbf{F}'\\) is the ground truth UV-image flow. The final loss is a weighted sum of these losses, with a decay factor for each iteration of \\(\\alpha = 0.8\\) and a dense weight of \\(\\lambda_{dense} = 0.01\\):" + }, + { + "type": "equation", + "bbox": [ + 0.133, + 0.398, + 0.469, + 0.44 + ], + "angle": 0, + "content": "\\[\n\\operatorname {L o s s} = \\sum_ {k = 1} ^ {N _ {\\text {i t e r}}} \\alpha^ {N _ {\\text {i t e r}} - k} \\left(L _ {k} ^ {\\text {v e r t e x}} + \\lambda_ {\\text {d e n s e}} L _ {k} ^ {\\text {d e n s e}}\\right) \\tag {7}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.45, + 0.247, + 0.466 + ], + "angle": 0, + "content": "3.2. 3D Model Fitting" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.473, + 0.47, + 0.565 + ], + "angle": 0, + "content": "As in [42], the 3D reconstruction is obtained by jointly fitting a 3D head model and camera parameters to the predicted 2D alignment observations for the entire sequence. This is done by optimizing the energy function \\( E(\\Phi; A) \\) w.r.t to the model parameters \\( \\Phi \\) and alignment \\( A \\) (see Fig. 2). These parameters and the energy terms are defined below." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.579, + 0.479, + 0.687 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.105, + 0.687, + 0.442, + 0.702 + ], + "angle": 0, + "content": "Figure 2. An illustration of the 3D model fitting process." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.718, + 0.358, + 0.733 + ], + "angle": 0, + "content": "3.2.1 Tracking Model and Parameters" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.741, + 0.47, + 0.788 + ], + "angle": 0, + "content": "The tracking model consists of a 3D head model and a camera model. A tracking sequence contains \\( C \\) cameras, \\( F \\) frames with a total of \\( C \\times F \\) images." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.47, + 0.901 + ], + "angle": 0, + "content": "3D head model. We use FLAME [26] as our 3D head model M. This model consists of \\(N_{\\nu} = 5023\\) vertices, which are controlled by identity shape parameters \\(\\beta \\in \\mathbb{R}^{300}\\), expression shape parameters \\(\\phi \\in \\mathbb{R}^{100}\\) and \\(K = 5\\) skeletal joint poses \\(\\theta \\in \\mathbb{R}^{3K + 3}\\) (including the root translation) through linear blend skinning [25]. We ignore root, neck and jaw pose and use the FLAME2023 model, which" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.168 + ], + "angle": 0, + "content": "includes deformations due to jaw rotation within the expression blend-shapes. We also introduce additional static pervertex deformations \\(\\delta_d\\in \\mathbb{R}^{N_v\\times 3}\\) to enhance identity shape detail. The local head model vertices can be expressed using its parameters as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.558, + 0.176, + 0.892, + 0.193 + ], + "angle": 0, + "content": "\\[\n\\mathbf {M} (\\boldsymbol {\\beta}, \\boldsymbol {\\delta} _ {d}, \\boldsymbol {\\phi}, \\boldsymbol {\\theta}) = F L A M E (\\boldsymbol {\\beta}, \\boldsymbol {\\phi}, \\boldsymbol {\\theta}) + \\boldsymbol {\\delta} _ {d} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.201, + 0.892, + 0.246 + ], + "angle": 0, + "content": "The rigid transform \\(\\mathbf{T}^{\\mathbf{M}}\\in \\mathbb{R}^{3\\times 4}\\) represents the head pose, which transforms head model vertices \\(i\\) into world space for each frame \\(t\\):" + }, + { + "type": "equation", + "bbox": [ + 0.643, + 0.246, + 0.892, + 0.265 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {i, t} ^ {\\mathrm {3 D}} = \\mathbf {T} _ {t} ^ {\\mathbf {M}} \\mathbf {M} _ {i} \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.271, + 0.892, + 0.348 + ], + "angle": 0, + "content": "Camera model. The cameras are described by the world-to-camera rigid transform \\(\\mathbf{T}_{cam} \\in \\mathbb{R}^{3 \\times 4}\\) and the pinhole camera projection matrix \\(\\mathbf{K} \\in \\mathbb{R}^{3 \\times 3}\\) defined by a single focal length \\(f \\in \\mathbb{R}\\) parameter. The camera model defines the image-space projection of the 3D vertices in camera \\(j\\):" + }, + { + "type": "equation", + "bbox": [ + 0.625, + 0.355, + 0.892, + 0.375 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {i, j, t} ^ {\\mathrm {2 D}} = \\mathbf {K} _ {j} \\mathbf {T} _ {j} ^ {c a m} \\mathbf {x} _ {i, t} ^ {\\mathrm {3 D}} \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.382, + 0.892, + 0.473 + ], + "angle": 0, + "content": "Parameters. The parameters \\(\\Psi\\) consist of the head model and camera parameters, which are optimized to minimize \\(E(\\Phi; A)\\). The camera parameters can be fixed to known values, if the calibration is available. Expression and poses vary for each frame \\(t\\), whereas camera, identity shape, and deformation parameters are shared over the sequence." + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.481, + 0.892, + 0.501 + ], + "angle": 0, + "content": "\\[\n\\Psi = \\left\\{\\beta , \\Phi_ {F \\times | \\phi |}, \\Theta_ {F \\times | \\theta |}, \\delta_ {\\mathrm {d}}; \\mathbf {T} _ {F \\times 3 \\times 4} ^ {\\mathbf {M}}; \\mathbf {T} _ {C \\times 3 \\times 4} ^ {c a m}, f _ {C} \\right\\} \\tag {11}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.516, + 0.654, + 0.532 + ], + "angle": 0, + "content": "3.2.2 Energy Terms" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.54, + 0.726, + 0.555 + ], + "angle": 0, + "content": "The energy function is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.565, + 0.892, + 0.582 + ], + "angle": 0, + "content": "\\[\nE (\\Phi ; A) = E _ {A} + E _ {F L A M E} + E _ {\\text {t e m p}} + E _ {M I C A} + E _ {\\text {d e f o r m}} \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.591, + 0.722, + 0.606 + ], + "angle": 0, + "content": "\\(E_{A}\\) encourages 2D alignment:" + }, + { + "type": "equation", + "bbox": [ + 0.582, + 0.607, + 0.892, + 0.651 + ], + "angle": 0, + "content": "\\[\nE _ {A} = \\sum_ {i, j, t} ^ {N _ {\\nu}, C, F} \\lambda_ {i} \\frac {\\left\\| \\mathbf {x} _ {i , j , t} ^ {\\mathrm {2 D}} - \\mu_ {i , j , t} \\right\\| ^ {2}}{2 \\sigma_ {i , j , t} ^ {2}} \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.656, + 0.892, + 0.716 + ], + "angle": 0, + "content": "where for vertex \\(i\\) seen by camera \\(j\\) in frame \\(t\\). \\(\\mu_{i,j,t}\\) and \\(\\sigma_{i,j,t}\\) is the 2D location and uncertainty predicted by the final iteration of our 2D alignment network, and \\(\\mathbf{x}_{i,j,t}^{2D}\\) (Eq. (10)) is the 2D camera projection of that vertex." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.716, + 0.892, + 0.805 + ], + "angle": 0, + "content": "\\(E_{FLAME} = \\lambda_{FLAME}(\\| \\beta \\|^{2} + \\| \\Phi \\|^{2})\\) encourages the optimizer to explain the data with smaller identity and expression parameters. This leads to face shapes that are statistically more likely [10, 14, 26, 57] and a more accurate 3D reconstruction. We do not penalize joint rotation, face translation or rotation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.807, + 0.892, + 0.851 + ], + "angle": 0, + "content": "\\(E_{\\text {temp }}\\) applies a loss on the acceleration of the 3D position \\(\\mathbf{x}_{i,t}^{3\\mathrm{D}}\\) of every vertex of the 3D model to prevent jitter and encourage a smoother, more natural face motion:" + }, + { + "type": "equation", + "bbox": [ + 0.512, + 0.86, + 0.892, + 0.904 + ], + "angle": 0, + "content": "\\[\nE _ {\\text {t e m p}} = \\lambda_ {\\text {t e m p}} \\sum_ {i, j, t = 2} ^ {N _ {v}, C, F - 1} \\| \\mathbf {x} _ {j, t - 1} ^ {\\mathrm {3 D}} - 2 \\mathbf {x} _ {j, t} ^ {\\mathrm {3 D}} + \\mathbf {x} _ {j, t + 1} ^ {\\mathrm {3 D}} \\| ^ {2} \\tag {14}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.925, + 0.516, + 0.937 + ], + "angle": 0, + "content": "1230" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.091, + 0.473, + 0.272 + ], + "angle": 0, + "content": "\\( E_{MICA} = \\lambda_{MICA} \\| \\mathbf{M}_{\\Phi=0,\\theta=0} - \\mathbf{M}_{MICA} \\|^2 \\) provides a 3D neutral geometry prior for the optimizer to enable a better disentanglement between identity and expression components. It consists of the L2 distance of the neutral head model vertices to the MICA [57] template \\( \\mathbf{M}_{MICA} \\). This template is computed by predicting the average neutral head vertices using the MICA model [57] for all frames of the sequence. The term also enables a more accurate 3D reconstruction since the model can rely on MICA predictions where the alignment is uncertain, such as in the depth direction or for occluded vertices. In areas of confident alignment, the MICA prediction can be refined." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.273, + 0.472, + 0.303 + ], + "angle": 0, + "content": "\\(E_{\\text{deform}} = \\lambda_{\\text{deform}} \\| \\delta_{\\mathrm{d}} \\|^2\\) encourages per-vertex deformations to be small w.r.t. the FLAME model." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.313, + 0.396, + 0.329 + ], + "angle": 0, + "content": "3.3. Multiface Face Tracking Benchmark" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.336, + 0.47, + 0.398 + ], + "angle": 0, + "content": "Our monocular 3D face tracking benchmark focuses on 3D reconstruction and motion capture accuracy. To evaluate these, we use our proposed screen space motion error (SSME) and the scan-to-mesh chamfer distance (CD)." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.405, + 0.473, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.081, + 0.645, + 0.466, + 0.659 + ], + "angle": 0, + "content": "Figure 3. An illustration of the EPE computation for each frame." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.677, + 0.47, + 0.873 + ], + "angle": 0, + "content": "Screen Space Motion Error. To define the Screen Space Motion Error (SSME), we reformulate face tracking as an optical flow prediction problem over a set of time windows. First, we project the ground truth mesh and predicted mesh into screen space using the respective camera model. Then, we use the screen space coordinates to compute the ground truth optical flow \\(\\mathbf{f}_{t:t + h}^{\\prime}\\) and predicted optical flow \\(\\mathbf{f}_{t:t + h}\\) from frame \\(t\\) to frame \\(t + h\\) for each frame \\(t\\in [1,\\dots ,F]\\) and a sequence of frame windows \\(h = [1,\\dots ,N_H]\\). For each frame and frame window, the average end-point-error \\(EPE_{t:t + h}\\) is computed by averaging the L2-distance between ground truth and predicted optical flow for each pixel (see Fig. 3)." + }, + { + "type": "equation", + "bbox": [ + 0.144, + 0.884, + 0.469, + 0.903 + ], + "angle": 0, + "content": "\\[\nE P E _ {t: t + h} = \\left\\| V \\odot \\left(\\mathbf {f} _ {t: t + h} - \\mathbf {f} _ {t: t + h} ^ {\\prime}\\right) \\right\\| ^ {2} \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.121 + ], + "angle": 0, + "content": "where \\(V\\) is a mask to separate different face regions and \\(\\odot\\) is the Hadamard product. See Fig. 3 for a visual reference." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.122, + 0.892, + 0.166 + ], + "angle": 0, + "content": "The screen space motion error \\( SSME_{h} \\) for frame window \\( h \\) is then defined as the mean of all EPEs over all frames \\( t \\) where frame \\( t + h \\) exists:" + }, + { + "type": "equation", + "bbox": [ + 0.583, + 0.167, + 0.892, + 0.208 + ], + "angle": 0, + "content": "\\[\nS S M E _ {h} = \\frac {1}{F - h} \\sum_ {t = 1} ^ {t + h \\leq F} E P E _ {t: t + h} \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.212, + 0.892, + 0.256 + ], + "angle": 0, + "content": "Finally, to summarize tracking performance in one value, we compute the average screen space motion error \\( \\overline{SSME} \\) over all frame windows as" + }, + { + "type": "equation", + "bbox": [ + 0.624, + 0.257, + 0.892, + 0.298 + ], + "angle": 0, + "content": "\\[\n\\overline {{S S M E}} = \\sum_ {h = 1} ^ {N _ {H}} S S M E _ {h} \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.304, + 0.893, + 0.547 + ], + "angle": 0, + "content": "In other words, \\( \\overline{SSME} \\) measures the average trajectory accuracy of each pixel over a time horizon of \\( N_{H} \\) frames. We choose a maximum frame window of \\( N_{H} = 30 \\) (1 second) since most human expressions are performed within this time frame. Because the screen space motion is directly affected by most face-tracking parameters such as intrinsics, pose, and face shape, it also measures their precision in a holistic manner. In contrast to prior works and benchmarks that use sparse key-points, SSME covers the motion of all visible face regions and is invariant to mesh topology. As it operates in screen space, it does not require additional alignment and works with all camera models, unlike 3D reconstruction or depth errors. In our benchmark, we evaluate SSME over a set of masks for semantically meaningful face regions (face, eyes, nose, mouth, and ears) (Fig. 3), permitting a more nuanced analysis of the tracking performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.553, + 0.893, + 0.705 + ], + "angle": 0, + "content": "3D Reconstruction. To complete our benchmark, we additionally measure the chamfer distance (CD) to account for the depth dimension. Similar to [34], the tracked mesh is rigidly aligned to the ground truth mesh using 7 key-points and ICP. Then, the distance of each ground truth vertex with respect to the predicted mesh is computed and averaged. For a detailed explanation, we defer to the NoW benchmark [34]. Just like the SSME, we evaluate the CD for the same set of face regions to provide a more detailed analysis of reconstruction accuracy, similar to [6]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.712, + 0.892, + 0.834 + ], + "angle": 0, + "content": "Multiface Dataset. We build our benchmark around the Multiface dataset [44]. Multiface consists of multi-view videos with high quality topologically consistent 3D registrations. High-resolution videos are captured at 30 FPS from a large variety of calibrated views. We limit the evaluation data to a manageable size by carefully selecting a subset of 86 sequences with a diverse set of view directions and facial performances (see Appendix C)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.845, + 0.634, + 0.862 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Training data. To train the 2D alignment network, we use a combined dataset made up of FaceScape [47], Stirling [1]," + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.927, + 0.514, + 0.938 + ], + "angle": 0, + "content": "1231" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.242 + ], + "angle": 0, + "content": "and FaMoS [3]. Where a FLAME [26] registration is not available, we fit the FLAME template mesh to the 3D scan through semi-automatic key-point annotation and commercial topology fitting software. For an accurate capture of face motion, we auto-announce expression scans with additional key-points propagated with optical flow (more information in Appendix D). The ground truth image space vertex positions \\(\\mu^{\\prime}\\) are obtained by projecting the vertices of the fitted FLAME mesh into screen space using the available camera calibrations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.254, + 0.472, + 0.406 + ], + "angle": 0, + "content": "Training strategy for 2D alignment network. We use Segformer-b5 (pre-trained on ImageNet [11]) as our backbone, with \\( D_{img} = 512 \\), \\( D_{uv} = 64 \\) and \\( N_{iter} = 3 \\). We use the RAFT-L configuration for the update module and keep its hyperparameters when possible [36]. We optimize the model for 6 epochs using the AdamW optimizer [27], an initial learning rate of \\( 1 \\times 10^{-4} \\) and a decay of 0.1 every 2 epochs. We use image augmentation such as random scaling, rotation, and color corruption [42], synthetic occlusions [39] and synthetic backgrounds (see Appendix D)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.417, + 0.471, + 0.508 + ], + "angle": 0, + "content": "3D model fitting. To minimize the energy function and obtain tracking parameters, we use the AdamW optimizer with an initial learning rate of \\(1 \\times 10^{-2}\\) and a automatic learning rate scheduler with a decay factor of 0.5 and patience of 30 steps, until convergence. We enable \\(\\delta_{d}\\) only for multi-view reconstruction, and only for the nose region." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.519, + 0.47, + 0.671 + ], + "angle": 0, + "content": "Baselines. We implement and test against the most recent publicly available methods for single image regression-based approaches 3DDFAv2 [19], SADRNet [32], PRNet [41], DECA (coarse) [14], EMOCA (coarse) [10], and HRN [24]. We extend the ability of these methods to use temporal priors by applying a simple temporal Gaussian filter to the screen-space vertices. We also include the popular photometric optimization-based approach MPT [57]. Lastly, we compare against the key-point-only optimization-based method Dense proposed by [42] on public benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.681, + 0.283, + 0.696 + ], + "angle": 0, + "content": "4.1. Multiface Benchmark" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.704, + 0.471, + 0.903 + ], + "angle": 0, + "content": "We divide our Multiface benchmark into two categories: Without temporal information sharing, where each method is restricted to operate on single images, and with (both forward and backward) temporal information sharing, where each method is allowed to use the entire sequence as observations. Our method significantly outperforms the best publicly available method by \\(54\\%\\) w.r.t. face-region SSME on both on single-image and by \\(46\\%\\) on sequence prediction. This confirms the superior 2D alignment accuracy of our method. Despite using only 2D alignment as supervision, our method performs \\(8\\%\\) better in terms of 3D reconstruction (CD) than the photometric optimization approach MPT [57] (see Tab. 2. To our surprise, MPT performs in" + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.098, + 0.695, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.099, + 0.895, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.261, + 0.895, + 0.358 + ], + "angle": 0, + "content": "Figure 4. \\(SSME_h\\) plotted over all frame horizons for each evaluated tracker for single-image and full sequence tracking (right). Lower \\(SSME_h\\) in smaller frame horizons \\(h\\) (left in the graph) means short-term temporal stability while lower \\(SSME_h\\) in larger frame horizons (right in the graph) means better long-term tracking consistency. Our tracker performs significantly better over every time horizon." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.374, + 0.895, + 0.617 + ], + "angle": 0, + "content": "ferior w.r.t. motion error than some regression-based models — this is likely due to uniform lighting and texture in the Multiface dataset. Qualitative results Fig. 5 confirm that methods using photometric errors (DECA, HRN, MPT) perform inferior w.r.t. screen space motion in areas without key-point supervision such as cheeks and forehead. Plotting the \\(SSME_h\\) over different time windows \\(h\\) (see Fig. 4) gives a previously unseen overview of temporal stability. Regression-based methods suffer from high short-term error \\((SSME_1)\\) which is due to temporal instability and jitter. As expected, introducing temporal smoothing improves this issue and the overall \\(SSME\\) for these methods. Our method achieves very low short-term SSME even with single image prediction, which indicates the high robustness and accuracy of the alignment network. As expected, introducing temporal priors reduces \\(SSME\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.625, + 0.712, + 0.641 + ], + "angle": 0, + "content": "4.2. FaceScape Benchmark" + }, + { + "type": "table", + "bbox": [ + 0.575, + 0.653, + 0.821, + 0.762 + ], + "angle": 0, + "content": "
MethodCD ↓ (mm)NME ↓ (rad)
MGCNet [35]4.000.093
PRNet [41]3.560.126
SADRNet [32]6.750.133
DECA [14]4.690.108
3DDFAv2 [19]3.600.096
HRN [24]3.670.087
Ours2.210.083
" + }, + { + "type": "table_caption", + "bbox": [ + 0.542, + 0.772, + 0.848, + 0.787 + ], + "angle": 0, + "content": "Table 1. Results on the FaceScape benchmark [47]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We also compare our method on the FaceScape benchmark [47], which measures 3D reconstruction accuracy from 2D images under large view (up to \\(90^{\\circ}\\)) and expression variations. On this benchmark, we outperform the best previous regression-based methods by \\(38\\%\\) in terms of CD and \\(4.6\\%\\) in terms of mean normal error (NME) Tab. 1. This shows that our method can accurately reconstruct faces even" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.926, + 0.516, + 0.938 + ], + "angle": 0, + "content": "1232" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.134, + 0.086, + 0.835, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.34, + 0.896, + 0.396 + ], + "angle": 0, + "content": "Figure 5. Qualitative results on two sequences (top and bottom 3 rows) of our Multiface benchmark. Warmer colors represent high error, while colder colors represent low error. DECA [14], HRN [24], and MPT [57] struggle with motion in the cheek and forehead region, which is visible in the SSME error plot (right columns). Despite using only 2D alignment as supervision, our method achieves a better 3D reconstruction (CD) (center columns)." + }, + { + "type": "image", + "bbox": [ + 0.079, + 0.4, + 0.911, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.554, + 0.895, + 0.584 + ], + "angle": 0, + "content": "Table 2. Results on our Multiface tracking benchmark with and without temporal information sharing. Our method consistently outperforms previous methods on every single category, metric and face region." + }, + { + "type": "table", + "bbox": [ + 0.107, + 0.587, + 0.443, + 0.73 + ], + "angle": 0, + "content": "
Single-viewMulti-view
MethodError (mm) ↓Error (mm) ↓
MedianMeanStdMedianMeanStd
MGCNet [35]1.311.872.63---
PRNet [41]1.501.981.88---
DECA [14]1.091.381.18---
Deep3D [12]1.111.411.211.081.351.15
Dense [42]1.021.281.080.811.010.84
MICA [57]0.901.110.92---
TokenFace [38]0.760.950.82---
Ours0.871.070.880.710.880.73
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.739, + 0.47, + 0.782 + ], + "angle": 0, + "content": "Table 3. Results on the NoW Challenge [34]. Multi-view evaluation is done as in [42]. Multi-view results for [12] and [42] are reported by [42]." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.791, + 0.265, + 0.807 + ], + "angle": 0, + "content": "under large view deviations." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.817, + 0.231, + 0.833 + ], + "angle": 0, + "content": "4.3. Now Challenge" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.903 + ], + "angle": 0, + "content": "The NoW benchmark is a public benchmark for evaluating neutral head reconstruction from 2D images captured indoors and outdoors, with different expressions, and under variations in lighting conditions and occlusions. We" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.591, + 0.895, + 0.833 + ], + "angle": 0, + "content": "evaluate our method on the non-metrical challenge (Tab. 3). For single-view reconstruction, our model outperforms our neutral shape predictor MICA [57] by \\(4\\%\\) on mean scan-to-mesh distance. For the multi-view case, we outperform the baseline Dense [42] by \\(13\\%\\), likely due to our method's high 2D alignment accuracy, better neutral shape priors, and per-vertex deformations. TokenFace [38] performs better for the single-view case, however, their predictions could be integrated into our pipeline since they use the FLAME topology. Importantly, our network is able to generalize to these in-the-wild images despite being trained only on in-the-lab data captured under controlled lighting conditions. An important sub-task for 3D face trackers is to disentangle the identity and expression components of the face shape. The outstanding results on the NoW benchmark indicate the ability of our tracker to accomplish this." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.846, + 0.683, + 0.861 + ], + "angle": 0, + "content": "4.4. Downstream Tasks" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "In the following, we show how we enhance downstream models using our face tracker." + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.926, + 0.516, + 0.938 + ], + "angle": 0, + "content": "1233" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.288 + ], + "angle": 0, + "content": "3D Head Avatar Synthesis. Recent head avatar synthesis methods heavily rely on photometric head trackers to generate face alignment priors [17, 53, 56]. INSTA [56], a top-performing model, uses MPT [57]. We modify INSTA by replacing their tracker with ours. We compare our enhanced FlowFace-INSTA to the baseline MPT-INSTA. On their publicly available dataset, we outperform MPT-INSTA by \\(10.5\\%\\) on perceptual visual fidelity (LPIPS). On our Multiface benchmark videos, we outperform MPT-INSTA by \\(20.3\\%\\) on LPIPS. Detailed results can be viewed in Appendix G. These results demonstrate how better face trackers can directly improve performance on down-stream tasks which highlights the importance of our research." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.301, + 0.472, + 0.452 + ], + "angle": 0, + "content": "Speech-driven 3D facial animation. The field of speech-driven facial animation often suffers from data sparsity [9, 13, 46]. To alleviate this issue, we generate 3D face meshes using the multi-view video dataset MEAD [40]. In using this generated dataset to augment the training of the state-of-the-art model CodeTalker [46] (see Appendix H), we are able to improve from a lip vertex error of \\(3.13 \\times 10^{-5}\\) to \\(2.85 \\times 10^{-5}\\) on the VOCASET benchmark [9], an \\(8.8\\%\\) improvement. This underlines the benefit of high-accuracy video face trackers for large-scale data generation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.464, + 0.221, + 0.479 + ], + "angle": 0, + "content": "4.5.2D Alignment" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.488, + 0.47, + 0.699 + ], + "angle": 0, + "content": "To show the benefit of our 2D alignment model architecture, we conduct an evaluation on our validation set, which consists of 84 subjects of our dataset. We implement the dense landmark model of [42] (ResNet-101 backbone) and adapt it to output FLAME vertex alignment and uncertainty. We also implement PRNet [41] and modify it in the same way. We retrain each method on our training set. In evaluate the 2D alignment accuracy with respect to normalized mean error (NME) of every vertex in the face area (Fig. 14, green vertices). With an NME of 1.30, our method performs significantly better than the ResNet architecture of Dense [42] (NME = 1.63), and PRNet (NME = 2.52). We note that the accuracy of uncertainty cannot be evaluated with NME. A qualitative comparison can be viewed in Fig. 17." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.711, + 0.241, + 0.725 + ], + "angle": 0, + "content": "4.6. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.47, + 0.901 + ], + "angle": 0, + "content": "2D alignment network. To analyze the effect of different feature encoder backbones, we replace our backbone with different variations of the Segformer model and also test the CNN-based backbone BiSeNet-v2 [49] (see Tab. 4). As expected, vision-transformer-based networks show better performance. Experimenting with the number of iterations \\( N_{iter} \\) for the update module, we find that multiple iterations instead of one improves the performance. Finally, we confirm the superior performance of our 2D alignment network compared to the ResNet-101-based network of [42] mentioned in Sec. 4.5." + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.091, + 0.895, + 0.205 + ], + "angle": 0, + "content": "
BackboneNiter#Paramlatency (ms)CD↓SSME↓
ResNet-10173.4M91.543.90
BiSeNet-v2317.6M231.213.52
MiT-b1317.3M291.223.21
MiT-b2331.0M461.202.78
MiT-b5188.2M661.252.70
MiT-b5288.2M711.212.61
MiT-b5388.2M751.182.58
MiT-b5488.2M801.232.62
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.216, + 0.892, + 0.258 + ], + "angle": 0, + "content": "Table 4. Ablations for backbone architectures and hyperparameters of the 2D alignment network on our Multiface benchmark. Latency is evaluated on a Quadro RTX 5000 GPU." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.277, + 0.892, + 0.459 + ], + "angle": 0, + "content": "3D model fitting. We show in Tab. 5 the benefit of integrating the MICA neutral shape prediction on the NoW Challenge validation set. The significant performance gain on single-image predictions shows that our 3D tracking pipeline can integrate MICA predictions very well, even improving them. We also show the benefit of predicting a dense face alignment in conjunction with per-vertex deformations in multi-view settings. This shows that our 2D alignment is precise enough to predict face shapes that lie outside of the FLAME blend-shape space, which previous optimization-based methods [42, 57] cannot achieve. For a qualitative analysis, see Appendix E." + }, + { + "type": "table", + "bbox": [ + 0.516, + 0.473, + 0.878, + 0.567 + ], + "angle": 0, + "content": "
Single-viewMulti-view
MethodError (mm)Error (mm)
MedianMeanStdMedianMeanStd
Ours w/o MICA0.991.231.030.710.880.76
MICA only0.911.130.94---
Ours w/o δd---0.680.840.72
Ours0.821.020.850.670.830.71
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.576, + 0.892, + 0.604 + ], + "angle": 0, + "content": "Table 5. Ablations for the 3D model fitting module on single and multi-view reconstruction on the NoW validation set." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.631, + 0.77, + 0.646 + ], + "angle": 0, + "content": "5. Conclusion and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.658, + 0.892, + 0.809 + ], + "angle": 0, + "content": "This paper presents a state-of-the-art face tracking pipeline with a highly robust and accurate 2D alignment module. Its performance is thoroughly validated on a variety of benchmarks and downstream tasks. However, the proposed two-stage pipeline is not fully differentiable, which prevents end-to-end learning. Furthermore, our training data is limited to data captured in-the-lab. In future work, we intend to extend the alignment network to directly predict depth as well, obviating the need for the 3D model fitting step. Synthetic datasets [42] could alleviate the data issue." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.9 + ], + "angle": 0, + "content": "We're confident that our tracker will accelerate research in downstream tasks by generating large-scale face capture data using readily available video datasets [8, 29, 50]. We also believe that our novel motion capture evaluation benchmark will focus and align future research efforts to create even more accurate methods." + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.927, + 0.516, + 0.938 + ], + "angle": 0, + "content": "1234" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.466, + 0.142 + ], + "angle": 0, + "content": "[1] Stirling/esrc 3d face database. https://pics.stir.ac.uk/ESRC/. Accessed: 2023-10-25. 5, 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.145, + 0.469, + 0.213 + ], + "angle": 0, + "content": "[2] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th Annual Conference on Computer Graphics and Interactive Techniques, page 187-194, USA, 1999. ACM Press/Addison-Wesley Publishing Co. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.216, + 0.469, + 0.271 + ], + "angle": 0, + "content": "[3] Timo Bolkart, Tianye Li, and Michael J. Black. Instant multi-view head capture through learnable registration. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 768-779, 2023. 6, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.273, + 0.469, + 0.327 + ], + "angle": 0, + "content": "[4] Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In International Conference on Computer Vision, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.33, + 0.469, + 0.371 + ], + "angle": 0, + "content": "[5] Chen Cao, Mengei Chai, Oliver Woodford, and Linjie Luo. Stabilized real-time face tracking via a learned dynamic rigidity prior. ACM Trans. Graph., 37(6), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.374, + 0.469, + 0.427 + ], + "angle": 0, + "content": "[6] Zenghao Chai, Haoxian Zhang, Jing Ren, Di Kang, Zhengzhuo Xu, Xuefei Zhe, Chun Yuan, and Linchao Bao. Really: Rethinking the evaluation of 3d face reconstruction, 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.43, + 0.469, + 0.486 + ], + "angle": 0, + "content": "[7] Zenghao Chai, Tianke Zhang, Tianyu He, Xu Tan, Tadas Baltrusaitis, HsiangTao Wu, Runnan Li, Sheng Zhao, Chun Yuan, and Jiang Bian. Hiface: High-fidelity 3d face reconstruction by learning static and dynamic details, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.488, + 0.469, + 0.514 + ], + "angle": 0, + "content": "[8] J. S. Chung, A. Nagrani, and A. Zisserman. Voxceleb2: Deep speaker recognition. In INTERSPEECH, 2018. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.517, + 0.469, + 0.584 + ], + "angle": 0, + "content": "[9] Daniel Cudeiro, Timo Bolkart, Cassidy Laidlaw, Anurag Ranjan, and Michael Black. Capture, learning, and synthesis of 3D speaking styles. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pages 10101-10111, 2019. 8, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.588, + 0.469, + 0.628 + ], + "angle": 0, + "content": "[10] Radek Danecek, Michael J. Black, and Timo Bolkart. Emoca: Emotion driven monocular face capture and animation, 2022. 1, 2, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.631, + 0.469, + 0.686 + ], + "angle": 0, + "content": "[11] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.688, + 0.469, + 0.756 + ], + "angle": 0, + "content": "[12] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In IEEE Computer Vision and Pattern Recognition Workshops, 2019. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.759, + 0.469, + 0.813 + ], + "angle": 0, + "content": "[13] Yingruo Fan, Zhaojiang Lin, Jun Saito, Wenping Wang, and Taku Komura. Faceformer: Speech-driven 3d facial animation with transformers. arXiv preprint arXiv:2112.05329, 2021.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.469, + 0.87 + ], + "angle": 0, + "content": "[14] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3d face model from in-the-wild images. CoRR, abs/2012.04012, 2020. 1, 4, 6, 7, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[15] Pablo Garrido, Michael Zollhöfer, Dan Casas, Levi Valgaerts, Kiran Varanasi, Patrick Pérez, and Christian" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.469, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "Theobalt. Reconstruction of personalized 3d face rigs from monocular video. ACM Trans. Graph., 35(3), 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.892, + 0.177 + ], + "angle": 0, + "content": "[16] Pablo Garrido, Michael Zollhöfer, Chenglei Wu, Derek Bradley, Patrick Pérez, Thabo Beeler, and Christian Theobalt. Corrective 3d reconstruction of lips from monococular video. ACM Trans. Graph., 35(6), 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.179, + 0.892, + 0.233 + ], + "angle": 0, + "content": "[17] Philip-William Grassal, Malte Prinzler, Titus Leistner, Carsten Rother, Matthias Nießner, and Justus Thies. Neural head avatars from monocular rgb videos. arXiv preprint arXiv:2112.01554, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.235, + 0.892, + 0.289 + ], + "angle": 0, + "content": "[18] Ivan Grishchenko, Artsiom Ablavatski, Yury Kartynnik, Karthik Raveendran, and Matthias Grundmann. Attention mesh: High-fidelity face mesh prediction in real-time. CoRR, abs/2006.10962, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.291, + 0.892, + 0.333 + ], + "angle": 0, + "content": "[19] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Yang Fan, Zhen Lei, and Stan Li. Towards Fast, Accurate and Stable 3D Dense Face Alignment, pages 152-168. 2020. 1, 2, 6, 7, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.335, + 0.892, + 0.389 + ], + "angle": 0, + "content": "[20] Riza Alp Güler, George Trigeorgis, Epameinondas Antonakos, Patrick Snape, Stefanos Zafeiriou, and Iasonas Kokkinos. Densereg: Fully convolutional dense shape regression in-the-wild, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.391, + 0.892, + 0.418 + ], + "angle": 0, + "content": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.42, + 0.892, + 0.447 + ], + "angle": 0, + "content": "[22] A 3D Face Model for Pose and Illumination Invariant Face Recognition, Genova, Italy, 2009. IEEE. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.449, + 0.892, + 0.504 + ], + "angle": 0, + "content": "[23] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.506, + 0.892, + 0.56 + ], + "angle": 0, + "content": "[24] Biwen Lei, Jianqiang Ren, Mengyang Feng, Miaomiao Cui, and Xuansong Xie. A hierarchical representation network for accurate and detailed face reconstruction from in-the-wild images, 2023. 1, 2, 6, 7, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.563, + 0.892, + 0.645 + ], + "angle": 0, + "content": "[25] J. P. Lewis, Matt Cordner, and Nickson Fong. Pose space deformation: A unified approach to shape interpolation and skeleton-driven deformation. In Proceedings of the 27th Annual Conference on Computer Graphics and Interactive Techniques, page 165-172, USA, 2000. ACM Press/Addison-Wesley Publishing Co. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.647, + 0.892, + 0.714 + ], + "angle": 0, + "content": "[26] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 4, 6, 3, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.717, + 0.892, + 0.744 + ], + "angle": 0, + "content": "[27] Ilya Loshchilov and Frank Hutter. Fixing weight decay regularization in adam. CoRR, abs/1711.05101, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.746, + 0.892, + 0.786 + ], + "angle": 0, + "content": "[28] Araceli Morales, Gemma Piella, and Federico M. Sukno. Survey on 3d face reconstruction from uncalibrated images. CoRR, abs/2011.05740, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.789, + 0.892, + 0.83 + ], + "angle": 0, + "content": "[29] Arsha Nagrani, Joon Son Chung, Weidi Xie, and Andrew Zisserman. Voxceleb: Large-scale speaker verification in the wild. Computer Science and Language, 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.892, + 0.899 + ], + "angle": 0, + "content": "[30] Andrés Prados-Torreblanca, José M Buenaposada, and Luis Baumela. Shape preserving facial landmarks with graph attention networks. In 33rd British Machine Vision Conference 2022, BMVC 2022, London, UK, November 21-24, 2022. BMVA Press, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.927, + 0.515, + 0.938 + ], + "angle": 0, + "content": "1235" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[31] Aashish Rai, Hiresh Gupta, Ayush Pandey, Francisco Vicente Carrasco, Shingo Jason Takagi, Amaury Aubel, Daeil Kim, Aayush Prakash, and Fernando de la Torre. Towards realistic generative 3d face models, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.47, + 0.218 + ], + "angle": 0, + "content": "[32] Zeyu Ruan, Changqing Zou, Longhai Wu, Gangshan Wu, and Limin Wang. SADRNet: Self-aligned dual face regression networks for robust 3d dense face alignment and reconstruction. IEEE Transactions on Image Processing, 30: 5793-5806, 2021. 2, 6, 7, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.22, + 0.469, + 0.261 + ], + "angle": 0, + "content": "[33] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zh-moginov, and Liang-Chieh Chen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.263, + 0.469, + 0.331 + ], + "angle": 0, + "content": "[34] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael Black. Learning to regress 3d face shape and expression from an image without 3d supervision. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2019, 2, 5, 7, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.333, + 0.469, + 0.402 + ], + "angle": 0, + "content": "[35] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Mingmin Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3d face reconstruction by occlusion-aware multi-view geometry consistency. arXiv preprint arXiv:2007.12494, 2020. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.404, + 0.469, + 0.445 + ], + "angle": 0, + "content": "[36] Zachary Teed and Jia Deng. RAFT: recurrent all-pairs field transforms for optical flow. CoRR, abs/2003.12039, 2020. 3, 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.447, + 0.469, + 0.489 + ], + "angle": 0, + "content": "[37] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2face: Real-time face capture and reenactment of rgb videos, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.49, + 0.47, + 0.572 + ], + "angle": 0, + "content": "[38] Zhang Tianke, Chu Xuangeng, Liu Yunfei, Lin Lijian, Yang Zhendong, Xu Zhengzhuo, Cao Chengkun, Yu Fei, Zhou Changyin, Yuan Chun, and Yu Li. Accurate 3d face reconstruction with facial component tokens. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2023. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.575, + 0.469, + 0.644 + ], + "angle": 0, + "content": "[39] Kenny T. R. Voo, Liming Jiang, and Chen Change Loy. Delving into high-quality synthetic face occlusion segmentation datasets. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2022. 6, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.646, + 0.469, + 0.701 + ], + "angle": 0, + "content": "[40] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 8, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.702, + 0.469, + 0.731 + ], + "angle": 0, + "content": "[41] Yue Wang and Justin M. Solomon. Prnet: Self-supervised learning for partial-to-partial registration, 2019. 2, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.732, + 0.469, + 0.813 + ], + "angle": 0, + "content": "[42] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Matthew Johnson, Jingjing Shen, Nikola Milosavljevic, Daniel Wilde, Stephan Garbin, Chirag Raman, Jamie Shotton, Toby Sharp, Ivan Stojiljkovic, Tom Cashman, and Julien Valentin. 3d face reconstruction with dense landmarks, 2022. 1, 2, 4, 6, 7, 8, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[43] Chenglei Wu, Derek Bradley, Markus Gross, and Thabo Beeler. An anatomically-constrained local deformation model for monocular face capture. ACM Trans. Graph., 35 (4), 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[44] Cheng-hsin Wu, Ningyuan Zheng, Scott Ardisson, Rohan Bali, Danielle Belko, Eric Brockmeyer, Lucas Evans, Tim" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.203 + ], + "angle": 0, + "content": "Othy Godisart, Hyowon Ha, Xuhua Huang, Alexander Hypes, Taylor Koska, Steven Krenn, Stephen Lombardi, Xiaomin Luo, Kevyn McPhail, Laura Millerschoen, Michal Perdoch, Mark Pitts, Alexander Richard, Jason Saragih, Junko Saragih, Takaaki Shiratori, Tomas Simon, Matt Stewart, Autumn Trimble, Xinshuo Weng, David Whitewolf, Chenglei Wu, Shouou-I Yu, and Yaser Sheikh. Multiface: A dataset for neural face rendering. In arXiv, 2022. 1, 5, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.204, + 0.892, + 0.272 + ], + "angle": 0, + "content": "[45] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. Segformer: Simple and efficient design for semantic segmentation with transformers. In Neural Information Processing Systems (NeurIPS), 2021. 3, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.274, + 0.892, + 0.314 + ], + "angle": 0, + "content": "[46] Jinbo Xing, Menghan Xia, Yuechen Zhang, Xiaodong Cun, Jue Wang, and Tien-Tsin Wong. Codetalker: Speech-driven 3d facial animation with discrete motion prior, 2023. 8, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.315, + 0.892, + 0.37 + ], + "angle": 0, + "content": "[47] Haotian Yang, Hao Zhu, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, and Xun Cao. Facescape: a large-scale high quality 3d face dataset and detailed riggable 3d face prediction, 2020. 2, 5, 6, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.371, + 0.892, + 0.412 + ], + "angle": 0, + "content": "[48] Hongwei Yi, Hualin Liang, Yifei Liu, Qiong Cao, Yandong Wen, Timo Bolkart, Dacheng Tao, and Michael J. Black. Generating holistic 3d human motion from speech, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.413, + 0.892, + 0.481 + ], + "angle": 0, + "content": "[49] Changqian Yu, Changxin Gao, FlowFace-INSTA to the baseline MPT-INSTA Jingbo Wang, Gang Yu, Chunhua Shen, and Nong Sang. Bisenet V2: bilateral network with guided aggregation for real-time semantic segmentation. CoRR, abs/2004.02147, 2020. 8, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.482, + 0.892, + 0.523 + ], + "angle": 0, + "content": "[50] Jianhui Yu, Hao Zhu, Liming Jiang, Chen Change Loy, Weidong Cai, and Wayne Wu. CelebV-Text: A large-scale facial text-video dataset. In CVPR, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.524, + 0.892, + 0.565 + ], + "angle": 0, + "content": "[51] Shifeng Zhang, Xiangyu Zhu, Zhen Lei, Hailin Shi, Xiaobo Wang, and Stan Z. Li. S\\(^3\\)fd: Single shot scale-invariant face detector, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.567, + 0.892, + 0.62 + ], + "angle": 0, + "content": "[52] Yufeng Zheng, Victoria Fernández Abrevaya, Xu Chen, Marcel C. Bühler, Michael J. Black, and Otmar Hilliges. I M avatar: Implicit morphable head avatars from videos. CoRR, abs/2112.07471, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.622, + 0.892, + 0.691 + ], + "angle": 0, + "content": "[53] Yufeng Zheng, Wang Yifan, Gordon Wetzstein, Michael J. Black, and Otmar Hilliges. Pointavatar: Deformable point-based head avatars from videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.692, + 0.892, + 0.733 + ], + "angle": 0, + "content": "[54] Zhenglin Zhou, Huaxia Li, Hong Liu, Nanyang Wang, Gang Yu, and Rongrong Ji. Star loss: Reducing semantic ambiguity in facial landmark detection, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.734, + 0.892, + 0.774 + ], + "angle": 0, + "content": "[55] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z. Li. Face alignment across large poses: A 3d solution. CoRR, abs/1511.07212, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.775, + 0.892, + 0.83 + ], + "angle": 0, + "content": "[56] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Instant volumetric head avatars. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4574-4584, 2022. 8, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.831, + 0.892, + 0.871 + ], + "angle": 0, + "content": "[57] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces, 2022. 1, 2, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[58] Michael Zollhöfer, Justus Thies, Darek Bradley, Pablo Garrido, Thabo Beeler, Patrick Pérez, Marc Stamminger," + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.927, + 0.516, + 0.938 + ], + "angle": 0, + "content": "1236" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.092, + 0.471, + 0.134 + ], + "angle": 0, + "content": "Matthias Nießner, and Christian Theobalt. State of the art on monocular 3d face reconstruction, tracking, and applications. 2018. 1, 2" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.927, + 0.516, + 0.939 + ], + "angle": 0, + "content": "1237" + } + ] +] \ No newline at end of file diff --git a/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_origin.pdf b/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cd31187fa0f5d576130adaca75bea8c137222640 --- /dev/null +++ b/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b3590d1d0d8a3f7dcec8e06f0629d3afe00102dd80ef0899134a29781613292 +size 1470559 diff --git a/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/full.md b/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5ab796f09133626d7a3d936dd531a48bcc97f4e7 --- /dev/null +++ b/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/full.md @@ -0,0 +1,366 @@ +# 3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow + +Felix Taubner + +Prashant Raina + +Mathieu Tuli + +Eu Wern Teh + +Chul Lee + +Jinmiao Huang + +LG Electronics + +{prashant.raina, mathieu.tuli, euwern.teh, clee.lee}@lge.com + +# Abstract + +When working with 3D facial data, improving fidelity and avoiding the uncanny valley effect is critically dependent on accurate 3D facial performance capture. Because such methods are expensive and due to the widespread availability of 2D videos, recent methods have focused on how to perform monocular 3D face tracking. However, these methods often fall short in capturing precise facial movements due to limitations in their network architecture, training, and evaluation processes. Addressing these challenges, we propose a novel face tracker, FlowFace, that introduces an innovative 2D alignment network for dense pervertex alignment. Unlike prior work, FlowFace is trained on high-quality 3D scan annotations rather than weak supervision or synthetic data. Our 3D model fitting module jointly fits a 3D face model from one or many observations, integrating existing neutral shape priors for enhanced identity and expression disentanglement and per-vertex deformations for detailed facial feature reconstruction. Additionally, we propose a novel metric and benchmark for assessing tracking accuracy. Our method exhibits superior performance on both custom and publicly available benchmarks. We further validate the effectiveness of our tracker by generating high-quality 3D data from 2D videos, which leads to performance gains on downstream tasks. + +# 1. Introduction + +Access to 3D face tracking data lays the foundation for many computer graphics tasks such as 3D facial animation, 3D human avatar reconstruction, and expression transfer. Obtaining high visual fidelity, portraying subtle emotional cues, and preventing the uncanny valley effect in these downstream tasks is reliant on high motion capture accuracy. As a result, a common approach to generating 3D face tracking data is to use 3D scans and visual markers however, this process is cost-intensive. To alleviate this burden, building computational models to obtain 3D faces from monocular 2D videos and images has cemented its importance in recent years and seen great progress [10, 14, 19, 24, 37, 42, 57]. Nevertheless, three + +issues persist: First, current methods rely heavily on sparse landmarks and photometric similarity, which is computationally expensive and ineffective in ensuring accurate face motion. Second, the monocular face tracking problem is both ill-posed and contains a large solution space dependent on camera intrinsics, pose, head shape, and expression [58]. Third, current benchmarks for this task neglect the temporal aspect of face tracking and do not adequately evaluate facial motion capture accuracy. + +To address the aforementioned issues, we introduce a novel 3D face tracking model called FlowFace, consisting of a versatile two-stage pipeline: A 2D alignment network that predicts the screen-space positions of each vertex of a 3D morphable model [2] (3DMM) and an optimization module that jointly fits this model across multiple views by minimizing an alignment energy function. Unlike traditional methods that rely on sparse landmarks and photometric consistency, FlowFace uses only 2D alignment as input signal, similar to recent work [42]. This alleviates the computational burden of inverse rendering and allows joint reconstruction using a very large number of observations. We enhance previous work in four ways: (1) The 2D alignment network features a novel architecture with a vision-transformer backbone and an iterative, recurrent refinement block. (2) In contrast to previous methods that use weak supervision or synthetic data, the alignment network is trained using high-quality annotations from 3D scans. (3) The alignment network predicts dense, per-vertex alignment instead of key-points, which enables the reconstruction of finer details. (4) We integrate an off-the-shelf neutral shape prediction model to improve identity and expression disentanglement. + +In addition, we present the screen-space motion error (SSME) as a novel face tracking metric. Based on optical flow, SSME computes and contrasts screen-space motion, aiming to resolve the limitation observed in existing evaluation methods. These often rely on sparse key points, synthetic annotations, or RGB/3D reconstruction errors, and lack a thorough and comprehensive measurement of temporal consistency. Using the Multiface [44] dataset, we develop a 3D face tracking benchmark around this metric. + +Finally, through extensive experiments on available benchmarks, we show that our method significantly outperforms the state-of-the-art on various tasks. To round off our work, we demonstrate how our face tracker can positively affect the performance of downstream tasks, including speech-driven 3D facial animation and 3D head avatar synthesis. Specifically, we demonstrate how our method can be used to generate high-quality data — comparable to studio-captured data — for both these tasks by using it to augment existing models to achieve state-of-the-art results. + +# 2. Related Work + +Uncalibrated 3D Face Reconstruction. Previous work reconstructing 3D face shapes from uncalibrated 2D images or video fall into two broad categories: + +Optimization-based methods recover face shape and motion by jointly optimizing 3D model parameters to fit the 2D observations. They traditionally treat this optimization as an inverse rendering problem [15, 16, 37, 43, 48, 52, 57], using sparse key-points as guidance. Typically, they employ geometric priors such as 3DMMs [2, 6, 22, 26, 47], texture models, simplified illumination models, and temporal priors. Some methods use additional constraints such as depth [37] or optical flow [5]. [58] and [28] present detailed surveys of such methods. Most methods use 3DMMs to disentangle shape and expression components. MPT [57] is the first method to integrate metrical head shape priors predicted by a deep neural network (DNN). However, photometric and sparse landmark supervision is not sufficient to obtain consistent and accurate face alignment, especially in areas not covered by landmarks and or of low visual saliency. More recently, [42] proposes to use only 2D face alignment (dense landmarks) as supervision, avoiding the computationally expensive inverse rendering process. Our method extends this idea with an improved 2D alignment module, better shape priors, and per-vertex deformation. + +Regression-based methods train DNNs to directly predict face reconstructions from single images [7, 10, 12, 19, 24, 31, 32, 34, 35]. This reconstruction includes information such as pose, 3DMM components, and sometimes texture. Typically, convolutional networks like image classification networks [21, 33] or encoder-decoder networks [41] are used. Due to the lack of large-scale 2D to 3D annotations, these methods typically rely on photometric supervision for their training. Some methods propose complex multi-step network architectures [24, 32] to improve reconstruction. [24] use additional handcrafted losses to improve alignment, whereas [7] use synthetic data and numerous of landmarks. More recently, [38] proposes to use vision-transformers to improve face reconstruction. + +2D Face Alignment. Traditional 2D face alignment methods predict a sparse set of manually defined landmarks. + +These methods typically involve convolutional DNNs to predict heat maps for each landmark [4, 30, 54]. Sparse key-points are not sufficient to describe full face motion, and heat maps make it computationally infeasible to predict a larger number of key-points. [42] and [18] achieve pseudo-dense alignment by using classifier networks to directly predict a very large number of landmarks. [20] predict the UV coordinates in image space and then map the vertices onto the image. Just like [41] and [32], our method predicts a per-pixel dense mapping between the UV space of a face model and the image space. However, we set our method apart by using better network architectures with vision-transformers and real instead of synthetic data. + +Evaluation of Face Trackers. Prior work evaluates face tracking and reconstruction using key-point accuracy [19, 32, 41, 42, 55], depth [37, 57], photometric [37, 57] or 3D reconstruction [5, 6, 47] errors. Sparse key-points are usually manually-annotated, difficult to define without ambiguities [54], and insufficient to describe the full motion of the face. Dense key-points [55] are difficult to compare between models using different mesh topologies. Photometric errors [37, 38, 57] are unsuitable since a perfect solution already exists within the input data, and areas with low visual saliency are neglected. A fair comparison of depth errors [37, 57] is only possible for methods using a pre-calibrated, perspective camera model. Methods that evaluate 3D reconstruction errors have to rigidly align the target and predicted mesh to fairly evaluate results [6, 34, 47], which causes valuable tracking information such as pose and intrinsics to be lost. Most importantly, depth and 3D reconstruction metrics neglect motion tangential to the surface normal. In contrast, our proposed metric measures the dense face motion in screen space, which is topology-independent and eliminates the need for rigid alignment. + +# 3. Method + +Our 3D face tracking pipeline consists of two stages: The first stage is predicting a dense 2D alignment of the face model, and the second stage is fitting a parametric 3D model to this alignment. + +# 3.1. Dense 2D Face Alignment Network + +# 3.1.1 Network Architecture + +The 2D alignment module is responsible for predicting the probabilistic location — in image space — of each vertex of our face model. As in [42], the 2D alignment of each vertex is represented as a random variable $A_{i} = \{\mu_{i},\sigma_{i}\}$ . $\mu_{i} = [x_{i},y_{i}]\in \mathcal{I}$ is the expected vertex position in image space $\mathcal{I}\in [0,D_{img}]^2$ , and $\sigma_{i}\in \mathbb{R}_{>0}$ is its uncertainty, modeled as the standard deviation of a circular 2D Gaussian density function. As an intermediate step, for each iteration + +![](images/069fad4bb0552718c5ca1181c746f8a2602905b4515f8fce047934d32644ac6a.jpg) +Figure 1. An overview of the proposed 2D alignment network architecture. A feature encoder transforms the image into a latent feature map that is then iteratively aligned with a learned UV positional embedding map by the recurrent update block. + +$k$ , the alignment network predicts a dense UV to image correspondence map $\mathbf{F}_k: \mathcal{U} \to \mathcal{I}$ and uncertainty map $\mathbf{S}_k$ . $\mathbf{F}_k$ maps any point in UV space $\mathcal{U} \in [0, D_{uv}]^2$ to a position in image space through a pixel-wise offset, which we call UV-image flow. This network consists of three parts (Fig. 1): + +1. An image feature encoder producing a latent feature map of the target image. +2. A positional encoding module that produces learned positional embeddings in UV space. +3. An iterative, recurrent optical flow module that predicts the probabilistic UV-image flow. + +The image space position and uncertainty of each vertex is then bi-linearly sampled from the intermediate correspondence and uncertainty map for each iteration: + +$$ +\mu_ {i, k} = \nu_ {i} + \mathbf {F} _ {k} (\nu_ {i}) \quad \text {a n d} \quad \sigma_ {i, k} = \mathbf {S} _ {k} (\nu_ {i}) \tag {1} +$$ + +where $\nu_{i}\in \mathcal{U}$ denotes the pre-defined UV coordinate of each vertex. These are manually defined by a 3D artist. + +Image feature encoder. To obtain the input to the image encoder $\mathcal{F}$ , we use SFD [51] to detect a square face bounding box from the target image and enlarge it by $20\%$ . We then crop the image to the bounding box and resize it to $D_{img}$ . We use Segformer [45] as the backbone, and replace the final classification layer with a linear layer to produce a 128-dimensional feature encoding. We further down-sample it to attain a final image feature map $Z_{img} \in \mathbb{R}^{D_{uv} \times D_{uv} \times 128}$ through average pooling. With image $\mathbf{I}$ and network parameters $\theta_{\mathcal{F}}$ , this is defined as: + +$$ +Z _ {i m g} = \mathcal {F} (\mathbf {I}, \theta_ {\mathcal {F}}) \tag {2} +$$ + +UV positional encoding module. We use a set of modules $\mathcal{G}$ with identical architecture to generate learned positional embeddings in UV-space. Each module is comprised of a multi-scale texture pyramid and a pixel-wise linear layer. This pyramid consists of four trainable textures with 32 channels and squared resolutions of $D_{uv}$ , $\frac{D_{uv}}{2}$ , $\frac{D_{uv}}{4}$ , and $\frac{D_{uv}}{8}$ respectively. Each texture is upsampled to $D_{uv}$ through bi-linear interpolation before concatenating them + +along the channel dimension. The concatenated textures are then passed through a pixel-wise linear layer to produce the UV positional embeddings. The multi-scale setup ensures structural consistency in UV space (closer pixels in UV should have similar features). We use 3 of these modules: $\mathcal{G}_{Z_{uv}}$ to generate a UV feature map $Z_{uv}$ , $\mathcal{G}_c$ to generator a context map $c$ , and $\mathcal{G}_{h_0}$ to generate an initial hidden state $h_0$ . With corresponding network parameters $\theta_{\mathcal{G}_{Z_{uv}}}, \theta_{\mathcal{G}_c}$ and $\theta_{\mathcal{G}_{h_0}}$ , this is described as: + +$$ +Z _ {u v} = \mathcal {G} \left(\theta_ {\mathcal {G} _ {Z _ {u v}}}\right); c = \mathcal {G} \left(\theta_ {\mathcal {G} _ {c}}\right); h _ {0} = \mathcal {G} \left(\theta_ {\mathcal {G} _ {h _ {0}}}\right) \tag {3} +$$ + +UV-image flow. The RAFT [36] network is designed to predict the optical flow between two images. It consists of a correlation block that maps the latent features encoded from each image into a 4D correlation volume. A context encoder initializes the hidden state of a recurrent update block and provides it with additional context information. The update block then iteratively refines a flow estimate while sampling the correlation volume. + +We adapt this network to predict the UV-image flow $\mathbf{F} \in \mathbb{R}^{D_{uv} \times D_{uv} \times 2}$ . We directly pass $Z_{uv}$ and $Z_{img}$ to the correlation block $\mathbf{C}$ . We use the context map $c$ and initial hidden state $h_0$ from the positional encoding modules for the update module $\mathbf{U}$ . We modify the update module to also predict a per-iteration uncertainty in addition to the flow estimate, by duplicating the flow prediction head to predict a 1-channel uncertainty map $\mathbf{S} \in \mathbb{R}_{>0}^{D_{uv} \times D_{uv}}$ . An exponential operation is applied to ensure positive values. The motion encoder head is adjusted to accept the uncertainty as an input. The modified RAFT network then works as follows: For each iteration $k$ , the recurrent update module performs a look-up in the correlation volume, context map $c$ , previous hidden state $h_{k-1}$ , previous flow $\mathbf{F}_{k-1}$ and previous uncertainty $\mathbf{S}_{k-1}$ . It outputs the refined flow estimate $\mathbf{F}_k$ and uncertainty $\mathbf{S}_k$ and the subsequent hidden state $h_k$ . Formally, + +$$ +\mathbf {F} _ {k}, \mathbf {S} _ {k}, h _ {k} = \mathbf {U} \left(\mathbf {C} \left(Z _ {u v}, Z _ {i m g}\right), c, \mathbf {F} _ {k - 1}, \mathbf {S} _ {k - 1}, h _ {k - 1}, \theta_ {\mathbf {U}}\right) \tag {4} +$$ + +with update module weights $\theta_{\mathbf{U}}$ . For a detailed explanation of our modified RAFT, we defer to [36] and Appendix B. + +# 3.1.2 Loss Functions + +We supervise our network with Gaussian negative log-likelihood (GNLL) both on the probabilistic per-vertex positions and the dense UV-image flow. For each iteration $k$ of the update module, we apply the per-vertex loss function: + +$$ +L _ {k} ^ {\text {v e r t e x}} = \sum_ {i = 1} ^ {N _ {v}} \lambda_ {i} \left(\log \left(\sigma_ {i, k} ^ {2}\right) + \frac {\left\| \mu_ {i , k} - \mu_ {i} ^ {\prime} \right\| ^ {2}}{2 \sigma_ {i , k} ^ {2}}\right) \tag {5} +$$ + +where $\lambda_{i}$ is a pre-defined vertex weight and $\mu_i^\prime$ is the ground truth vertex position. We encourage our network to predict coherent flow and uncertainty maps in areas with no vertices by applying the GNLL loss for each pixel $p$ in UV space: + +$$ +L _ {k} ^ {\text {d e n s e}} = \sum_ {p \in | \mathcal {U} |} \lambda_ {p} \left(\log \left(\mathbf {S} _ {k, p} ^ {2}\right) + \frac {\| \mathbf {F} _ {k , p} - \mathbf {F} _ {p} ^ {\prime} \| ^ {2}}{2 \mathbf {S} _ {k , p} ^ {2}}\right) \tag {6} +$$ + +where $\lambda_{p}$ is a pre-defined per-pixel weight and $\mathbf{F}'$ is the ground truth UV-image flow. The final loss is a weighted sum of these losses, with a decay factor for each iteration of $\alpha = 0.8$ and a dense weight of $\lambda_{dense} = 0.01$ : + +$$ +\operatorname {L o s s} = \sum_ {k = 1} ^ {N _ {\text {i t e r}}} \alpha^ {N _ {\text {i t e r}} - k} \left(L _ {k} ^ {\text {v e r t e x}} + \lambda_ {\text {d e n s e}} L _ {k} ^ {\text {d e n s e}}\right) \tag {7} +$$ + +# 3.2. 3D Model Fitting + +As in [42], the 3D reconstruction is obtained by jointly fitting a 3D head model and camera parameters to the predicted 2D alignment observations for the entire sequence. This is done by optimizing the energy function $E(\Phi; A)$ w.r.t to the model parameters $\Phi$ and alignment $A$ (see Fig. 2). These parameters and the energy terms are defined below. + +![](images/e6e44bfea6c668c34742af0b14986f3a32f71da681fe64c2eb8f77dee6bf2d22.jpg) +Figure 2. An illustration of the 3D model fitting process. + +# 3.2.1 Tracking Model and Parameters + +The tracking model consists of a 3D head model and a camera model. A tracking sequence contains $C$ cameras, $F$ frames with a total of $C \times F$ images. + +3D head model. We use FLAME [26] as our 3D head model M. This model consists of $N_{\nu} = 5023$ vertices, which are controlled by identity shape parameters $\beta \in \mathbb{R}^{300}$ , expression shape parameters $\phi \in \mathbb{R}^{100}$ and $K = 5$ skeletal joint poses $\theta \in \mathbb{R}^{3K + 3}$ (including the root translation) through linear blend skinning [25]. We ignore root, neck and jaw pose and use the FLAME2023 model, which + +includes deformations due to jaw rotation within the expression blend-shapes. We also introduce additional static pervertex deformations $\delta_d\in \mathbb{R}^{N_v\times 3}$ to enhance identity shape detail. The local head model vertices can be expressed using its parameters as follows: + +$$ +\mathbf {M} (\boldsymbol {\beta}, \boldsymbol {\delta} _ {d}, \boldsymbol {\phi}, \boldsymbol {\theta}) = F L A M E (\boldsymbol {\beta}, \boldsymbol {\phi}, \boldsymbol {\theta}) + \boldsymbol {\delta} _ {d} \tag {8} +$$ + +The rigid transform $\mathbf{T}^{\mathbf{M}}\in \mathbb{R}^{3\times 4}$ represents the head pose, which transforms head model vertices $i$ into world space for each frame $t$ : + +$$ +\mathbf {x} _ {i, t} ^ {\mathrm {3 D}} = \mathbf {T} _ {t} ^ {\mathbf {M}} \mathbf {M} _ {i} \tag {9} +$$ + +Camera model. The cameras are described by the world-to-camera rigid transform $\mathbf{T}_{cam} \in \mathbb{R}^{3 \times 4}$ and the pinhole camera projection matrix $\mathbf{K} \in \mathbb{R}^{3 \times 3}$ defined by a single focal length $f \in \mathbb{R}$ parameter. The camera model defines the image-space projection of the 3D vertices in camera $j$ : + +$$ +\mathbf {x} _ {i, j, t} ^ {\mathrm {2 D}} = \mathbf {K} _ {j} \mathbf {T} _ {j} ^ {c a m} \mathbf {x} _ {i, t} ^ {\mathrm {3 D}} \tag {10} +$$ + +Parameters. The parameters $\Psi$ consist of the head model and camera parameters, which are optimized to minimize $E(\Phi; A)$ . The camera parameters can be fixed to known values, if the calibration is available. Expression and poses vary for each frame $t$ , whereas camera, identity shape, and deformation parameters are shared over the sequence. + +$$ +\Psi = \left\{\beta , \Phi_ {F \times | \phi |}, \Theta_ {F \times | \theta |}, \delta_ {\mathrm {d}}; \mathbf {T} _ {F \times 3 \times 4} ^ {\mathbf {M}}; \mathbf {T} _ {C \times 3 \times 4} ^ {c a m}, f _ {C} \right\} \tag {11} +$$ + +# 3.2.2 Energy Terms + +The energy function is defined as: + +$$ +E (\Phi ; A) = E _ {A} + E _ {F L A M E} + E _ {\text {t e m p}} + E _ {M I C A} + E _ {\text {d e f o r m}} \tag {12} +$$ + +$E_{A}$ encourages 2D alignment: + +$$ +E _ {A} = \sum_ {i, j, t} ^ {N _ {\nu}, C, F} \lambda_ {i} \frac {\left\| \mathbf {x} _ {i , j , t} ^ {\mathrm {2 D}} - \mu_ {i , j , t} \right\| ^ {2}}{2 \sigma_ {i , j , t} ^ {2}} \tag {13} +$$ + +where for vertex $i$ seen by camera $j$ in frame $t$ . $\mu_{i,j,t}$ and $\sigma_{i,j,t}$ is the 2D location and uncertainty predicted by the final iteration of our 2D alignment network, and $\mathbf{x}_{i,j,t}^{2D}$ (Eq. (10)) is the 2D camera projection of that vertex. + +$E_{FLAME} = \lambda_{FLAME}(\| \beta \|^{2} + \| \Phi \|^{2})$ encourages the optimizer to explain the data with smaller identity and expression parameters. This leads to face shapes that are statistically more likely [10, 14, 26, 57] and a more accurate 3D reconstruction. We do not penalize joint rotation, face translation or rotation. + +$E_{\text {temp }}$ applies a loss on the acceleration of the 3D position $\mathbf{x}_{i,t}^{3\mathrm{D}}$ of every vertex of the 3D model to prevent jitter and encourage a smoother, more natural face motion: + +$$ +E _ {\text {t e m p}} = \lambda_ {\text {t e m p}} \sum_ {i, j, t = 2} ^ {N _ {v}, C, F - 1} \| \mathbf {x} _ {j, t - 1} ^ {\mathrm {3 D}} - 2 \mathbf {x} _ {j, t} ^ {\mathrm {3 D}} + \mathbf {x} _ {j, t + 1} ^ {\mathrm {3 D}} \| ^ {2} \tag {14} +$$ + +$E_{MICA} = \lambda_{MICA} \| \mathbf{M}_{\Phi=0,\theta=0} - \mathbf{M}_{MICA} \|^2$ provides a 3D neutral geometry prior for the optimizer to enable a better disentanglement between identity and expression components. It consists of the L2 distance of the neutral head model vertices to the MICA [57] template $\mathbf{M}_{MICA}$ . This template is computed by predicting the average neutral head vertices using the MICA model [57] for all frames of the sequence. The term also enables a more accurate 3D reconstruction since the model can rely on MICA predictions where the alignment is uncertain, such as in the depth direction or for occluded vertices. In areas of confident alignment, the MICA prediction can be refined. + +$E_{\text{deform}} = \lambda_{\text{deform}} \| \delta_{\mathrm{d}} \|^2$ encourages per-vertex deformations to be small w.r.t. the FLAME model. + +# 3.3. Multiface Face Tracking Benchmark + +Our monocular 3D face tracking benchmark focuses on 3D reconstruction and motion capture accuracy. To evaluate these, we use our proposed screen space motion error (SSME) and the scan-to-mesh chamfer distance (CD). + +![](images/63eba31d72bebffaeb9cd79123be37fd3f1e90c4e64be42185a730efb8d91c24.jpg) +Figure 3. An illustration of the EPE computation for each frame. + +Screen Space Motion Error. To define the Screen Space Motion Error (SSME), we reformulate face tracking as an optical flow prediction problem over a set of time windows. First, we project the ground truth mesh and predicted mesh into screen space using the respective camera model. Then, we use the screen space coordinates to compute the ground truth optical flow $\mathbf{f}_{t:t + h}^{\prime}$ and predicted optical flow $\mathbf{f}_{t:t + h}$ from frame $t$ to frame $t + h$ for each frame $t\in [1,\dots ,F]$ and a sequence of frame windows $h = [1,\dots ,N_H]$ . For each frame and frame window, the average end-point-error $EPE_{t:t + h}$ is computed by averaging the L2-distance between ground truth and predicted optical flow for each pixel (see Fig. 3). + +$$ +E P E _ {t: t + h} = \left\| V \odot \left(\mathbf {f} _ {t: t + h} - \mathbf {f} _ {t: t + h} ^ {\prime}\right) \right\| ^ {2} \tag {15} +$$ + +where $V$ is a mask to separate different face regions and $\odot$ is the Hadamard product. See Fig. 3 for a visual reference. + +The screen space motion error $SSME_{h}$ for frame window $h$ is then defined as the mean of all EPEs over all frames $t$ where frame $t + h$ exists: + +$$ +S S M E _ {h} = \frac {1}{F - h} \sum_ {t = 1} ^ {t + h \leq F} E P E _ {t: t + h} \tag {16} +$$ + +Finally, to summarize tracking performance in one value, we compute the average screen space motion error $\overline{SSME}$ over all frame windows as + +$$ +\overline {{S S M E}} = \sum_ {h = 1} ^ {N _ {H}} S S M E _ {h} \tag {17} +$$ + +In other words, $\overline{SSME}$ measures the average trajectory accuracy of each pixel over a time horizon of $N_{H}$ frames. We choose a maximum frame window of $N_{H} = 30$ (1 second) since most human expressions are performed within this time frame. Because the screen space motion is directly affected by most face-tracking parameters such as intrinsics, pose, and face shape, it also measures their precision in a holistic manner. In contrast to prior works and benchmarks that use sparse key-points, SSME covers the motion of all visible face regions and is invariant to mesh topology. As it operates in screen space, it does not require additional alignment and works with all camera models, unlike 3D reconstruction or depth errors. In our benchmark, we evaluate SSME over a set of masks for semantically meaningful face regions (face, eyes, nose, mouth, and ears) (Fig. 3), permitting a more nuanced analysis of the tracking performance. + +3D Reconstruction. To complete our benchmark, we additionally measure the chamfer distance (CD) to account for the depth dimension. Similar to [34], the tracked mesh is rigidly aligned to the ground truth mesh using 7 key-points and ICP. Then, the distance of each ground truth vertex with respect to the predicted mesh is computed and averaged. For a detailed explanation, we defer to the NoW benchmark [34]. Just like the SSME, we evaluate the CD for the same set of face regions to provide a more detailed analysis of reconstruction accuracy, similar to [6]. + +Multiface Dataset. We build our benchmark around the Multiface dataset [44]. Multiface consists of multi-view videos with high quality topologically consistent 3D registrations. High-resolution videos are captured at 30 FPS from a large variety of calibrated views. We limit the evaluation data to a manageable size by carefully selecting a subset of 86 sequences with a diverse set of view directions and facial performances (see Appendix C). + +# 4. Experiments + +Training data. To train the 2D alignment network, we use a combined dataset made up of FaceScape [47], Stirling [1], + +and FaMoS [3]. Where a FLAME [26] registration is not available, we fit the FLAME template mesh to the 3D scan through semi-automatic key-point annotation and commercial topology fitting software. For an accurate capture of face motion, we auto-announce expression scans with additional key-points propagated with optical flow (more information in Appendix D). The ground truth image space vertex positions $\mu^{\prime}$ are obtained by projecting the vertices of the fitted FLAME mesh into screen space using the available camera calibrations. + +Training strategy for 2D alignment network. We use Segformer-b5 (pre-trained on ImageNet [11]) as our backbone, with $D_{img} = 512$ , $D_{uv} = 64$ and $N_{iter} = 3$ . We use the RAFT-L configuration for the update module and keep its hyperparameters when possible [36]. We optimize the model for 6 epochs using the AdamW optimizer [27], an initial learning rate of $1 \times 10^{-4}$ and a decay of 0.1 every 2 epochs. We use image augmentation such as random scaling, rotation, and color corruption [42], synthetic occlusions [39] and synthetic backgrounds (see Appendix D). + +3D model fitting. To minimize the energy function and obtain tracking parameters, we use the AdamW optimizer with an initial learning rate of $1 \times 10^{-2}$ and a automatic learning rate scheduler with a decay factor of 0.5 and patience of 30 steps, until convergence. We enable $\delta_{d}$ only for multi-view reconstruction, and only for the nose region. + +Baselines. We implement and test against the most recent publicly available methods for single image regression-based approaches 3DDFAv2 [19], SADRNet [32], PRNet [41], DECA (coarse) [14], EMOCA (coarse) [10], and HRN [24]. We extend the ability of these methods to use temporal priors by applying a simple temporal Gaussian filter to the screen-space vertices. We also include the popular photometric optimization-based approach MPT [57]. Lastly, we compare against the key-point-only optimization-based method Dense proposed by [42] on public benchmarks. + +# 4.1. Multiface Benchmark + +We divide our Multiface benchmark into two categories: Without temporal information sharing, where each method is restricted to operate on single images, and with (both forward and backward) temporal information sharing, where each method is allowed to use the entire sequence as observations. Our method significantly outperforms the best publicly available method by $54\%$ w.r.t. face-region SSME on both on single-image and by $46\%$ on sequence prediction. This confirms the superior 2D alignment accuracy of our method. Despite using only 2D alignment as supervision, our method performs $8\%$ better in terms of 3D reconstruction (CD) than the photometric optimization approach MPT [57] (see Tab. 2. To our surprise, MPT performs in + +![](images/56e140f3608727c83214744d118f0fe14a861d5fb215c184fc93d6b0ed9865fc.jpg) +Figure 4. $SSME_h$ plotted over all frame horizons for each evaluated tracker for single-image and full sequence tracking (right). Lower $SSME_h$ in smaller frame horizons $h$ (left in the graph) means short-term temporal stability while lower $SSME_h$ in larger frame horizons (right in the graph) means better long-term tracking consistency. Our tracker performs significantly better over every time horizon. + +![](images/02092b215babc05f1b379f4a21721f8f37cdf01bde2c8c8349351a72b42e2fce.jpg) + +ferior w.r.t. motion error than some regression-based models — this is likely due to uniform lighting and texture in the Multiface dataset. Qualitative results Fig. 5 confirm that methods using photometric errors (DECA, HRN, MPT) perform inferior w.r.t. screen space motion in areas without key-point supervision such as cheeks and forehead. Plotting the $SSME_h$ over different time windows $h$ (see Fig. 4) gives a previously unseen overview of temporal stability. Regression-based methods suffer from high short-term error $(SSME_1)$ which is due to temporal instability and jitter. As expected, introducing temporal smoothing improves this issue and the overall $SSME$ for these methods. Our method achieves very low short-term SSME even with single image prediction, which indicates the high robustness and accuracy of the alignment network. As expected, introducing temporal priors reduces $SSME$ . + +# 4.2. FaceScape Benchmark + +
MethodCD ↓ (mm)NME ↓ (rad)
MGCNet [35]4.000.093
PRNet [41]3.560.126
SADRNet [32]6.750.133
DECA [14]4.690.108
3DDFAv2 [19]3.600.096
HRN [24]3.670.087
Ours2.210.083
+ +Table 1. Results on the FaceScape benchmark [47]. + +We also compare our method on the FaceScape benchmark [47], which measures 3D reconstruction accuracy from 2D images under large view (up to $90^{\circ}$ ) and expression variations. On this benchmark, we outperform the best previous regression-based methods by $38\%$ in terms of CD and $4.6\%$ in terms of mean normal error (NME) Tab. 1. This shows that our method can accurately reconstruct faces even + +![](images/771b770c20257cbf42592d1d6888a33d7794b9e4137f3b996e0cf10857f23ad1.jpg) +Figure 5. Qualitative results on two sequences (top and bottom 3 rows) of our Multiface benchmark. Warmer colors represent high error, while colder colors represent low error. DECA [14], HRN [24], and MPT [57] struggle with motion in the cheek and forehead region, which is visible in the SSME error plot (right columns). Despite using only 2D alignment as supervision, our method achieves a better 3D reconstruction (CD) (center columns). + +![](images/91d3e82e28e9b8e96467d726c70e25caf99982bf905198fd0516bc9da58cce3a.jpg) + +Table 2. Results on our Multiface tracking benchmark with and without temporal information sharing. Our method consistently outperforms previous methods on every single category, metric and face region. + +
Single-viewMulti-view
MethodError (mm) ↓Error (mm) ↓
MedianMeanStdMedianMeanStd
MGCNet [35]1.311.872.63---
PRNet [41]1.501.981.88---
DECA [14]1.091.381.18---
Deep3D [12]1.111.411.211.081.351.15
Dense [42]1.021.281.080.811.010.84
MICA [57]0.901.110.92---
TokenFace [38]0.760.950.82---
Ours0.871.070.880.710.880.73
+ +Table 3. Results on the NoW Challenge [34]. Multi-view evaluation is done as in [42]. Multi-view results for [12] and [42] are reported by [42]. + +under large view deviations. + +# 4.3. Now Challenge + +The NoW benchmark is a public benchmark for evaluating neutral head reconstruction from 2D images captured indoors and outdoors, with different expressions, and under variations in lighting conditions and occlusions. We + +evaluate our method on the non-metrical challenge (Tab. 3). For single-view reconstruction, our model outperforms our neutral shape predictor MICA [57] by $4\%$ on mean scan-to-mesh distance. For the multi-view case, we outperform the baseline Dense [42] by $13\%$ , likely due to our method's high 2D alignment accuracy, better neutral shape priors, and per-vertex deformations. TokenFace [38] performs better for the single-view case, however, their predictions could be integrated into our pipeline since they use the FLAME topology. Importantly, our network is able to generalize to these in-the-wild images despite being trained only on in-the-lab data captured under controlled lighting conditions. An important sub-task for 3D face trackers is to disentangle the identity and expression components of the face shape. The outstanding results on the NoW benchmark indicate the ability of our tracker to accomplish this. + +# 4.4. Downstream Tasks + +In the following, we show how we enhance downstream models using our face tracker. + +3D Head Avatar Synthesis. Recent head avatar synthesis methods heavily rely on photometric head trackers to generate face alignment priors [17, 53, 56]. INSTA [56], a top-performing model, uses MPT [57]. We modify INSTA by replacing their tracker with ours. We compare our enhanced FlowFace-INSTA to the baseline MPT-INSTA. On their publicly available dataset, we outperform MPT-INSTA by $10.5\%$ on perceptual visual fidelity (LPIPS). On our Multiface benchmark videos, we outperform MPT-INSTA by $20.3\%$ on LPIPS. Detailed results can be viewed in Appendix G. These results demonstrate how better face trackers can directly improve performance on down-stream tasks which highlights the importance of our research. + +Speech-driven 3D facial animation. The field of speech-driven facial animation often suffers from data sparsity [9, 13, 46]. To alleviate this issue, we generate 3D face meshes using the multi-view video dataset MEAD [40]. In using this generated dataset to augment the training of the state-of-the-art model CodeTalker [46] (see Appendix H), we are able to improve from a lip vertex error of $3.13 \times 10^{-5}$ to $2.85 \times 10^{-5}$ on the VOCASET benchmark [9], an $8.8\%$ improvement. This underlines the benefit of high-accuracy video face trackers for large-scale data generation. + +# 4.5.2D Alignment + +To show the benefit of our 2D alignment model architecture, we conduct an evaluation on our validation set, which consists of 84 subjects of our dataset. We implement the dense landmark model of [42] (ResNet-101 backbone) and adapt it to output FLAME vertex alignment and uncertainty. We also implement PRNet [41] and modify it in the same way. We retrain each method on our training set. In evaluate the 2D alignment accuracy with respect to normalized mean error (NME) of every vertex in the face area (Fig. 14, green vertices). With an NME of 1.30, our method performs significantly better than the ResNet architecture of Dense [42] (NME = 1.63), and PRNet (NME = 2.52). We note that the accuracy of uncertainty cannot be evaluated with NME. A qualitative comparison can be viewed in Fig. 17. + +# 4.6. Ablation Studies + +2D alignment network. To analyze the effect of different feature encoder backbones, we replace our backbone with different variations of the Segformer model and also test the CNN-based backbone BiSeNet-v2 [49] (see Tab. 4). As expected, vision-transformer-based networks show better performance. Experimenting with the number of iterations $N_{iter}$ for the update module, we find that multiple iterations instead of one improves the performance. Finally, we confirm the superior performance of our 2D alignment network compared to the ResNet-101-based network of [42] mentioned in Sec. 4.5. + +
BackboneNiter#Paramlatency (ms)CD↓SSME↓
ResNet-10173.4M91.543.90
BiSeNet-v2317.6M231.213.52
MiT-b1317.3M291.223.21
MiT-b2331.0M461.202.78
MiT-b5188.2M661.252.70
MiT-b5288.2M711.212.61
MiT-b5388.2M751.182.58
MiT-b5488.2M801.232.62
+ +3D model fitting. We show in Tab. 5 the benefit of integrating the MICA neutral shape prediction on the NoW Challenge validation set. The significant performance gain on single-image predictions shows that our 3D tracking pipeline can integrate MICA predictions very well, even improving them. We also show the benefit of predicting a dense face alignment in conjunction with per-vertex deformations in multi-view settings. This shows that our 2D alignment is precise enough to predict face shapes that lie outside of the FLAME blend-shape space, which previous optimization-based methods [42, 57] cannot achieve. For a qualitative analysis, see Appendix E. + +Table 4. Ablations for backbone architectures and hyperparameters of the 2D alignment network on our Multiface benchmark. Latency is evaluated on a Quadro RTX 5000 GPU. + +
Single-viewMulti-view
MethodError (mm)Error (mm)
MedianMeanStdMedianMeanStd
Ours w/o MICA0.991.231.030.710.880.76
MICA only0.911.130.94---
Ours w/o δd---0.680.840.72
Ours0.821.020.850.670.830.71
+ +Table 5. Ablations for the 3D model fitting module on single and multi-view reconstruction on the NoW validation set. + +# 5. Conclusion and Future Work + +This paper presents a state-of-the-art face tracking pipeline with a highly robust and accurate 2D alignment module. Its performance is thoroughly validated on a variety of benchmarks and downstream tasks. However, the proposed two-stage pipeline is not fully differentiable, which prevents end-to-end learning. Furthermore, our training data is limited to data captured in-the-lab. In future work, we intend to extend the alignment network to directly predict depth as well, obviating the need for the 3D model fitting step. Synthetic datasets [42] could alleviate the data issue. + +We're confident that our tracker will accelerate research in downstream tasks by generating large-scale face capture data using readily available video datasets [8, 29, 50]. We also believe that our novel motion capture evaluation benchmark will focus and align future research efforts to create even more accurate methods. + +# References + +[1] Stirling/esrc 3d face database. https://pics.stir.ac.uk/ESRC/. Accessed: 2023-10-25. 5, 2, 4 +[2] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th Annual Conference on Computer Graphics and Interactive Techniques, page 187-194, USA, 1999. ACM Press/Addison-Wesley Publishing Co. 1, 2 +[3] Timo Bolkart, Tianye Li, and Michael J. Black. Instant multi-view head capture through learnable registration. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 768-779, 2023. 6, 2 +[4] Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In International Conference on Computer Vision, 2017. 2 +[5] Chen Cao, Mengei Chai, Oliver Woodford, and Linjie Luo. Stabilized real-time face tracking via a learned dynamic rigidity prior. ACM Trans. Graph., 37(6), 2018. 2 +[6] Zenghao Chai, Haoxian Zhang, Jing Ren, Di Kang, Zhengzhuo Xu, Xuefei Zhe, Chun Yuan, and Linchao Bao. Really: Rethinking the evaluation of 3d face reconstruction, 2022. 2, 5 +[7] Zenghao Chai, Tianke Zhang, Tianyu He, Xu Tan, Tadas Baltrusaitis, HsiangTao Wu, Runnan Li, Sheng Zhao, Chun Yuan, and Jiang Bian. Hiface: High-fidelity 3d face reconstruction by learning static and dynamic details, 2023. 2 +[8] J. S. Chung, A. Nagrani, and A. Zisserman. Voxceleb2: Deep speaker recognition. In INTERSPEECH, 2018. 8 +[9] Daniel Cudeiro, Timo Bolkart, Cassidy Laidlaw, Anurag Ranjan, and Michael Black. Capture, learning, and synthesis of 3D speaking styles. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pages 10101-10111, 2019. 8, 7 +[10] Radek Danecek, Michael J. Black, and Timo Bolkart. Emoca: Emotion driven monocular face capture and animation, 2022. 1, 2, 4, 6, 7 +[11] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 6, 1 +[12] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In IEEE Computer Vision and Pattern Recognition Workshops, 2019. 2, 7 +[13] Yingruo Fan, Zhaojiang Lin, Jun Saito, Wenping Wang, and Taku Komura. Faceformer: Speech-driven 3d facial animation with transformers. arXiv preprint arXiv:2112.05329, 2021.8 +[14] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3d face model from in-the-wild images. CoRR, abs/2012.04012, 2020. 1, 4, 6, 7, 10, 11 +[15] Pablo Garrido, Michael Zollhöfer, Dan Casas, Levi Valgaerts, Kiran Varanasi, Patrick Pérez, and Christian + +Theobalt. Reconstruction of personalized 3d face rigs from monocular video. ACM Trans. Graph., 35(3), 2016. 2 +[16] Pablo Garrido, Michael Zollhöfer, Chenglei Wu, Derek Bradley, Patrick Pérez, Thabo Beeler, and Christian Theobalt. Corrective 3d reconstruction of lips from monococular video. ACM Trans. Graph., 35(6), 2016. 2 +[17] Philip-William Grassal, Malte Prinzler, Titus Leistner, Carsten Rother, Matthias Nießner, and Justus Thies. Neural head avatars from monocular rgb videos. arXiv preprint arXiv:2112.01554, 2021. 8 +[18] Ivan Grishchenko, Artsiom Ablavatski, Yury Kartynnik, Karthik Raveendran, and Matthias Grundmann. Attention mesh: High-fidelity face mesh prediction in real-time. CoRR, abs/2006.10962, 2020. 2 +[19] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Yang Fan, Zhen Lei, and Stan Li. Towards Fast, Accurate and Stable 3D Dense Face Alignment, pages 152-168. 2020. 1, 2, 6, 7, 10, 11 +[20] Riza Alp Güler, George Trigeorgis, Epameinondas Antonakos, Patrick Snape, Stefanos Zafeiriou, and Iasonas Kokkinos. Densereg: Fully convolutional dense shape regression in-the-wild, 2017. 2 +[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015. 2 +[22] A 3D Face Model for Pose and Illumination Invariant Face Recognition, Genova, Italy, 2009. IEEE. 2 +[23] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. 7 +[24] Biwen Lei, Jianqiang Ren, Mengyang Feng, Miaomiao Cui, and Xuansong Xie. A hierarchical representation network for accurate and detailed face reconstruction from in-the-wild images, 2023. 1, 2, 6, 7, 10, 11 +[25] J. P. Lewis, Matt Cordner, and Nickson Fong. Pose space deformation: A unified approach to shape interpolation and skeleton-driven deformation. In Proceedings of the 27th Annual Conference on Computer Graphics and Interactive Techniques, page 165-172, USA, 2000. ACM Press/Addison-Wesley Publishing Co. 4 +[26] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 4, 6, 3, 5, 7 +[27] Ilya Loshchilov and Frank Hutter. Fixing weight decay regularization in adam. CoRR, abs/1711.05101, 2017. 6 +[28] Araceli Morales, Gemma Piella, and Federico M. Sukno. Survey on 3d face reconstruction from uncalibrated images. CoRR, abs/2011.05740, 2020. 2 +[29] Arsha Nagrani, Joon Son Chung, Weidi Xie, and Andrew Zisserman. Voxceleb: Large-scale speaker verification in the wild. Computer Science and Language, 2019. 8 +[30] Andrés Prados-Torreblanca, José M Buenaposada, and Luis Baumela. Shape preserving facial landmarks with graph attention networks. In 33rd British Machine Vision Conference 2022, BMVC 2022, London, UK, November 21-24, 2022. BMVA Press, 2022. 2 + +[31] Aashish Rai, Hiresh Gupta, Ayush Pandey, Francisco Vicente Carrasco, Shingo Jason Takagi, Amaury Aubel, Daeil Kim, Aayush Prakash, and Fernando de la Torre. Towards realistic generative 3d face models, 2023. 2 +[32] Zeyu Ruan, Changqing Zou, Longhai Wu, Gangshan Wu, and Limin Wang. SADRNet: Self-aligned dual face regression networks for robust 3d dense face alignment and reconstruction. IEEE Transactions on Image Processing, 30: 5793-5806, 2021. 2, 6, 7, 10, 11 +[33] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zh-moginov, and Liang-Chieh Chen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019. 2 +[34] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael Black. Learning to regress 3d face shape and expression from an image without 3d supervision. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2019, 2, 5, 7, 3 +[35] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Mingmin Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3d face reconstruction by occlusion-aware multi-view geometry consistency. arXiv preprint arXiv:2007.12494, 2020. 2, 6, 7 +[36] Zachary Teed and Jia Deng. RAFT: recurrent all-pairs field transforms for optical flow. CoRR, abs/2003.12039, 2020. 3, 6, 1 +[37] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2face: Real-time face capture and reenactment of rgb videos, 2020. 1, 2 +[38] Zhang Tianke, Chu Xuangeng, Liu Yunfei, Lin Lijian, Yang Zhendong, Xu Zhengzhuo, Cao Chengkun, Yu Fei, Zhou Changyin, Yuan Chun, and Yu Li. Accurate 3d face reconstruction with facial component tokens. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2023. 2, 7 +[39] Kenny T. R. Voo, Liming Jiang, and Chen Change Loy. Delving into high-quality synthetic face occlusion segmentation datasets. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2022. 6, 3 +[40] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 8, 6 +[41] Yue Wang and Justin M. Solomon. Prnet: Self-supervised learning for partial-to-partial registration, 2019. 2, 6, 7, 8 +[42] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Matthew Johnson, Jingjing Shen, Nikola Milosavljevic, Daniel Wilde, Stephan Garbin, Chirag Raman, Jamie Shotton, Toby Sharp, Ivan Stojiljkovic, Tom Cashman, and Julien Valentin. 3d face reconstruction with dense landmarks, 2022. 1, 2, 4, 6, 7, 8, 3, 5 +[43] Chenglei Wu, Derek Bradley, Markus Gross, and Thabo Beeler. An anatomically-constrained local deformation model for monocular face capture. ACM Trans. Graph., 35 (4), 2016. 2 +[44] Cheng-hsin Wu, Ningyuan Zheng, Scott Ardisson, Rohan Bali, Danielle Belko, Eric Brockmeyer, Lucas Evans, Tim + +Othy Godisart, Hyowon Ha, Xuhua Huang, Alexander Hypes, Taylor Koska, Steven Krenn, Stephen Lombardi, Xiaomin Luo, Kevyn McPhail, Laura Millerschoen, Michal Perdoch, Mark Pitts, Alexander Richard, Jason Saragih, Junko Saragih, Takaaki Shiratori, Tomas Simon, Matt Stewart, Autumn Trimble, Xinshuo Weng, David Whitewolf, Chenglei Wu, Shouou-I Yu, and Yaser Sheikh. Multiface: A dataset for neural face rendering. In arXiv, 2022. 1, 5, 2, 6 +[45] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. Segformer: Simple and efficient design for semantic segmentation with transformers. In Neural Information Processing Systems (NeurIPS), 2021. 3, 1 +[46] Jinbo Xing, Menghan Xia, Yuechen Zhang, Xiaodong Cun, Jue Wang, and Tien-Tsin Wong. Codetalker: Speech-driven 3d facial animation with discrete motion prior, 2023. 8, 7 +[47] Haotian Yang, Hao Zhu, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, and Xun Cao. Facescape: a large-scale high quality 3d face dataset and detailed riggable 3d face prediction, 2020. 2, 5, 6, 4 +[48] Hongwei Yi, Hualin Liang, Yifei Liu, Qiong Cao, Yandong Wen, Timo Bolkart, Dacheng Tao, and Michael J. Black. Generating holistic 3d human motion from speech, 2023. 2 +[49] Changqian Yu, Changxin Gao, FlowFace-INSTA to the baseline MPT-INSTA Jingbo Wang, Gang Yu, Chunhua Shen, and Nong Sang. Bisenet V2: bilateral network with guided aggregation for real-time semantic segmentation. CoRR, abs/2004.02147, 2020. 8, 5 +[50] Jianhui Yu, Hao Zhu, Liming Jiang, Chen Change Loy, Weidong Cai, and Wayne Wu. CelebV-Text: A large-scale facial text-video dataset. In CVPR, 2023. 8 +[51] Shifeng Zhang, Xiangyu Zhu, Zhen Lei, Hailin Shi, Xiaobo Wang, and Stan Z. Li. S $^3$ fd: Single shot scale-invariant face detector, 2017. 3 +[52] Yufeng Zheng, Victoria Fernández Abrevaya, Xu Chen, Marcel C. Bühler, Michael J. Black, and Otmar Hilliges. I M avatar: Implicit morphable head avatars from videos. CoRR, abs/2112.07471, 2021. 2 +[53] Yufeng Zheng, Wang Yifan, Gordon Wetzstein, Michael J. Black, and Otmar Hilliges. Pointavatar: Deformable point-based head avatars from videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 8 +[54] Zhenglin Zhou, Huaxia Li, Hong Liu, Nanyang Wang, Gang Yu, and Rongrong Ji. Star loss: Reducing semantic ambiguity in facial landmark detection, 2023. 2 +[55] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z. Li. Face alignment across large poses: A 3d solution. CoRR, abs/1511.07212, 2015. 2 +[56] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Instant volumetric head avatars. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4574-4584, 2022. 8, 4, 6 +[57] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces, 2022. 1, 2, 4, 5, 6, 7, 8 +[58] Michael Zollhöfer, Justus Thies, Darek Bradley, Pablo Garrido, Thabo Beeler, Patrick Pérez, Marc Stamminger, + +Matthias Nießner, and Christian Theobalt. State of the art on monocular 3d face reconstruction, tracking, and applications. 2018. 1, 2 \ No newline at end of file diff --git a/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/images.zip b/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..efa2166849e3a0e2929725c751e3dcc13481d351 --- /dev/null +++ b/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fe1943236ff099603874d2e979c7aa6ec5e35fbe627e197bf9536d3dc74e68d +size 596411 diff --git a/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/layout.json b/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3188ba1af183077531105592fef259a7587a7464 --- /dev/null +++ b/2024/3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow/layout.json @@ -0,0 +1,10013 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 61, + 103, + 533, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 103, + 533, + 121 + ], + "spans": [ + { + "bbox": [ + 61, + 103, + 533, + 121 + ], + "type": "text", + "content": "3D Face Tracking from 2D Video through Iterative Dense UV to Image Flow" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 143, + 124, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 143, + 124, + 156 + ], + "spans": [ + { + "bbox": [ + 54, + 143, + 124, + 156 + ], + "type": "text", + "content": "Felix Taubner" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 141, + 144, + 216, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 144, + 216, + 156 + ], + "spans": [ + { + "bbox": [ + 141, + 144, + 216, + 156 + ], + "type": "text", + "content": "Prashant Raina" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 233, + 144, + 298, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 144, + 298, + 156 + ], + "spans": [ + { + "bbox": [ + 233, + 144, + 298, + 156 + ], + "type": "text", + "content": "Mathieu Tuli" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 316, + 144, + 381, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 144, + 381, + 156 + ], + "spans": [ + { + "bbox": [ + 316, + 144, + 381, + 156 + ], + "type": "text", + "content": "Eu Wern Teh" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 399, + 144, + 445, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 144, + 445, + 156 + ], + "spans": [ + { + "bbox": [ + 399, + 144, + 445, + 156 + ], + "type": "text", + "content": "Chul Lee" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 463, + 144, + 538, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 463, + 144, + 538, + 157 + ], + "spans": [ + { + "bbox": [ + 463, + 144, + 538, + 157 + ], + "type": "text", + "content": "Jinmiao Huang" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 259, + 158, + 334, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 259, + 158, + 334, + 171 + ], + "spans": [ + { + "bbox": [ + 259, + 158, + 334, + 171 + ], + "type": "text", + "content": "LG Electronics" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 133, + 174, + 457, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 174, + 457, + 185 + ], + "spans": [ + { + "bbox": [ + 133, + 174, + 457, + 185 + ], + "type": "text", + "content": "{prashant.raina, mathieu.tuli, euwern.teh, clee.lee}@lge.com" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 239, + 290, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 239, + 290, + 526 + ], + "spans": [ + { + "bbox": [ + 46, + 239, + 290, + 526 + ], + "type": "text", + "content": "When working with 3D facial data, improving fidelity and avoiding the uncanny valley effect is critically dependent on accurate 3D facial performance capture. Because such methods are expensive and due to the widespread availability of 2D videos, recent methods have focused on how to perform monocular 3D face tracking. However, these methods often fall short in capturing precise facial movements due to limitations in their network architecture, training, and evaluation processes. Addressing these challenges, we propose a novel face tracker, FlowFace, that introduces an innovative 2D alignment network for dense pervertex alignment. Unlike prior work, FlowFace is trained on high-quality 3D scan annotations rather than weak supervision or synthetic data. Our 3D model fitting module jointly fits a 3D face model from one or many observations, integrating existing neutral shape priors for enhanced identity and expression disentanglement and per-vertex deformations for detailed facial feature reconstruction. Additionally, we propose a novel metric and benchmark for assessing tracking accuracy. Our method exhibits superior performance on both custom and publicly available benchmarks. We further validate the effectiveness of our tracker by generating high-quality 3D data from 2D videos, which leads to performance gains on downstream tasks." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 537, + 128, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 537, + 128, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 537, + 128, + 550 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 557, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 557, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 557, + 288, + 714 + ], + "type": "text", + "content": "Access to 3D face tracking data lays the foundation for many computer graphics tasks such as 3D facial animation, 3D human avatar reconstruction, and expression transfer. Obtaining high visual fidelity, portraying subtle emotional cues, and preventing the uncanny valley effect in these downstream tasks is reliant on high motion capture accuracy. As a result, a common approach to generating 3D face tracking data is to use 3D scans and visual markers however, this process is cost-intensive. To alleviate this burden, building computational models to obtain 3D faces from monocular 2D videos and images has cemented its importance in recent years and seen great progress [10, 14, 19, 24, 37, 42, 57]. Nevertheless, three" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 213, + 547, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 213, + 547, + 323 + ], + "spans": [ + { + "bbox": [ + 304, + 213, + 547, + 323 + ], + "type": "text", + "content": "issues persist: First, current methods rely heavily on sparse landmarks and photometric similarity, which is computationally expensive and ineffective in ensuring accurate face motion. Second, the monocular face tracking problem is both ill-posed and contains a large solution space dependent on camera intrinsics, pose, head shape, and expression [58]. Third, current benchmarks for this task neglect the temporal aspect of face tracking and do not adequately evaluate facial motion capture accuracy." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 326, + 548, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 326, + 548, + 602 + ], + "spans": [ + { + "bbox": [ + 304, + 326, + 548, + 602 + ], + "type": "text", + "content": "To address the aforementioned issues, we introduce a novel 3D face tracking model called FlowFace, consisting of a versatile two-stage pipeline: A 2D alignment network that predicts the screen-space positions of each vertex of a 3D morphable model [2] (3DMM) and an optimization module that jointly fits this model across multiple views by minimizing an alignment energy function. Unlike traditional methods that rely on sparse landmarks and photometric consistency, FlowFace uses only 2D alignment as input signal, similar to recent work [42]. This alleviates the computational burden of inverse rendering and allows joint reconstruction using a very large number of observations. We enhance previous work in four ways: (1) The 2D alignment network features a novel architecture with a vision-transformer backbone and an iterative, recurrent refinement block. (2) In contrast to previous methods that use weak supervision or synthetic data, the alignment network is trained using high-quality annotations from 3D scans. (3) The alignment network predicts dense, per-vertex alignment instead of key-points, which enables the reconstruction of finer details. (4) We integrate an off-the-shelf neutral shape prediction model to improve identity and expression disentanglement." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 605, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 548, + 715 + ], + "type": "text", + "content": "In addition, we present the screen-space motion error (SSME) as a novel face tracking metric. Based on optical flow, SSME computes and contrasts screen-space motion, aiming to resolve the limitation observed in existing evaluation methods. These often rely on sparse key points, synthetic annotations, or RGB/3D reconstruction errors, and lack a thorough and comprehensive measurement of temporal consistency. Using the Multiface [44] dataset, we develop a 3D face tracking benchmark around this metric." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 295, + 732, + 315, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 732, + 315, + 742 + ], + "spans": [ + { + "bbox": [ + 295, + 732, + 315, + 742 + ], + "type": "text", + "content": "1227" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "type": "text", + "content": "Finally, through extensive experiments on available benchmarks, we show that our method significantly outperforms the state-of-the-art on various tasks. To round off our work, we demonstrate how our face tracker can positively affect the performance of downstream tasks, including speech-driven 3D facial animation and 3D head avatar synthesis. Specifically, we demonstrate how our method can be used to generate high-quality data — comparable to studio-captured data — for both these tasks by using it to augment existing models to achieve state-of-the-art results." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 204, + 134, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 204, + 134, + 217 + ], + "spans": [ + { + "bbox": [ + 47, + 204, + 134, + 217 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 224, + 287, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 224, + 287, + 260 + ], + "spans": [ + { + "bbox": [ + 46, + 224, + 287, + 260 + ], + "type": "text", + "content": "Uncalibrated 3D Face Reconstruction. Previous work reconstructing 3D face shapes from uncalibrated 2D images or video fall into two broad categories:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 261, + 289, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 261, + 289, + 513 + ], + "spans": [ + { + "bbox": [ + 46, + 261, + 289, + 513 + ], + "type": "text", + "content": "Optimization-based methods recover face shape and motion by jointly optimizing 3D model parameters to fit the 2D observations. They traditionally treat this optimization as an inverse rendering problem [15, 16, 37, 43, 48, 52, 57], using sparse key-points as guidance. Typically, they employ geometric priors such as 3DMMs [2, 6, 22, 26, 47], texture models, simplified illumination models, and temporal priors. Some methods use additional constraints such as depth [37] or optical flow [5]. [58] and [28] present detailed surveys of such methods. Most methods use 3DMMs to disentangle shape and expression components. MPT [57] is the first method to integrate metrical head shape priors predicted by a deep neural network (DNN). However, photometric and sparse landmark supervision is not sufficient to obtain consistent and accurate face alignment, especially in areas not covered by landmarks and or of low visual saliency. More recently, [42] proposes to use only 2D face alignment (dense landmarks) as supervision, avoiding the computationally expensive inverse rendering process. Our method extends this idea with an improved 2D alignment module, better shape priors, and per-vertex deformation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 513, + 289, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 513, + 289, + 681 + ], + "spans": [ + { + "bbox": [ + 46, + 513, + 289, + 681 + ], + "type": "text", + "content": "Regression-based methods train DNNs to directly predict face reconstructions from single images [7, 10, 12, 19, 24, 31, 32, 34, 35]. This reconstruction includes information such as pose, 3DMM components, and sometimes texture. Typically, convolutional networks like image classification networks [21, 33] or encoder-decoder networks [41] are used. Due to the lack of large-scale 2D to 3D annotations, these methods typically rely on photometric supervision for their training. Some methods propose complex multi-step network architectures [24, 32] to improve reconstruction. [24] use additional handcrafted losses to improve alignment, whereas [7] use synthetic data and numerous of landmarks. More recently, [38] proposes to use vision-transformers to improve face reconstruction." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "content": "2D Face Alignment. Traditional 2D face alignment methods predict a sparse set of manually defined landmarks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "text", + "content": "These methods typically involve convolutional DNNs to predict heat maps for each landmark [4, 30, 54]. Sparse key-points are not sufficient to describe full face motion, and heat maps make it computationally infeasible to predict a larger number of key-points. [42] and [18] achieve pseudo-dense alignment by using classifier networks to directly predict a very large number of landmarks. [20] predict the UV coordinates in image space and then map the vertices onto the image. Just like [41] and [32], our method predicts a per-pixel dense mapping between the UV space of a face model and the image space. However, we set our method apart by using better network architectures with vision-transformers and real instead of synthetic data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 237, + 547, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 237, + 547, + 490 + ], + "spans": [ + { + "bbox": [ + 304, + 237, + 547, + 490 + ], + "type": "text", + "content": "Evaluation of Face Trackers. Prior work evaluates face tracking and reconstruction using key-point accuracy [19, 32, 41, 42, 55], depth [37, 57], photometric [37, 57] or 3D reconstruction [5, 6, 47] errors. Sparse key-points are usually manually-annotated, difficult to define without ambiguities [54], and insufficient to describe the full motion of the face. Dense key-points [55] are difficult to compare between models using different mesh topologies. Photometric errors [37, 38, 57] are unsuitable since a perfect solution already exists within the input data, and areas with low visual saliency are neglected. A fair comparison of depth errors [37, 57] is only possible for methods using a pre-calibrated, perspective camera model. Methods that evaluate 3D reconstruction errors have to rigidly align the target and predicted mesh to fairly evaluate results [6, 34, 47], which causes valuable tracking information such as pose and intrinsics to be lost. Most importantly, depth and 3D reconstruction metrics neglect motion tangential to the surface normal. In contrast, our proposed metric measures the dense face motion in screen space, which is topology-independent and eliminates the need for rigid alignment." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 501, + 362, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 501, + 362, + 514 + ], + "spans": [ + { + "bbox": [ + 306, + 501, + 362, + 514 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 521, + 547, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 547, + 570 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 547, + 570 + ], + "type": "text", + "content": "Our 3D face tracking pipeline consists of two stages: The first stage is predicting a dense 2D alignment of the face model, and the second stage is fitting a parametric 3D model to this alignment." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 578, + 493, + 591 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 578, + 493, + 591 + ], + "spans": [ + { + "bbox": [ + 305, + 578, + 493, + 591 + ], + "type": "text", + "content": "3.1. Dense 2D Face Alignment Network" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 597, + 433, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 597, + 433, + 609 + ], + "spans": [ + { + "bbox": [ + 306, + 597, + 433, + 609 + ], + "type": "text", + "content": "3.1.1 Network Architecture" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": "The 2D alignment module is responsible for predicting the probabilistic location — in image space — of each vertex of our face model. As in [42], the 2D alignment of each vertex is represented as a random variable " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "A_{i} = \\{\\mu_{i},\\sigma_{i}\\}" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mu_{i} = [x_{i},y_{i}]\\in \\mathcal{I}" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": " is the expected vertex position in image space " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{I}\\in [0,D_{img}]^2" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\sigma_{i}\\in \\mathbb{R}_{>0}" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": " is its uncertainty, modeled as the standard deviation of a circular 2D Gaussian density function. As an intermediate step, for each iteration" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 732, + 315, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 732, + 315, + 742 + ], + "spans": [ + { + "bbox": [ + 296, + 732, + 315, + 742 + ], + "type": "text", + "content": "1228" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 72, + 537, + 198 + ], + "blocks": [ + { + "bbox": [ + 60, + 72, + 537, + 198 + ], + "lines": [ + { + "bbox": [ + 60, + 72, + 537, + 198 + ], + "spans": [ + { + "bbox": [ + 60, + 72, + 537, + 198 + ], + "type": "image", + "image_path": "069fad4bb0552718c5ca1181c746f8a2602905b4515f8fce047934d32644ac6a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 201, + 547, + 224 + ], + "lines": [ + { + "bbox": [ + 46, + 201, + 547, + 224 + ], + "spans": [ + { + "bbox": [ + 46, + 201, + 547, + 224 + ], + "type": "text", + "content": "Figure 1. An overview of the proposed 2D alignment network architecture. A feature encoder transforms the image into a latent feature map that is then iteratively aligned with a learned UV positional embedding map by the recurrent update block." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 232, + 287, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 232, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 47, + 232, + 287, + 293 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 47, + 232, + 287, + 293 + ], + "type": "text", + "content": ", the alignment network predicts a dense UV to image correspondence map " + }, + { + "bbox": [ + 47, + 232, + 287, + 293 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_k: \\mathcal{U} \\to \\mathcal{I}" + }, + { + "bbox": [ + 47, + 232, + 287, + 293 + ], + "type": "text", + "content": " and uncertainty map " + }, + { + "bbox": [ + 47, + 232, + 287, + 293 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_k" + }, + { + "bbox": [ + 47, + 232, + 287, + 293 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 47, + 232, + 287, + 293 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_k" + }, + { + "bbox": [ + 47, + 232, + 287, + 293 + ], + "type": "text", + "content": " maps any point in UV space " + }, + { + "bbox": [ + 47, + 232, + 287, + 293 + ], + "type": "inline_equation", + "content": "\\mathcal{U} \\in [0, D_{uv}]^2" + }, + { + "bbox": [ + 47, + 232, + 287, + 293 + ], + "type": "text", + "content": " to a position in image space through a pixel-wise offset, which we call UV-image flow. This network consists of three parts (Fig. 1):" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 293, + 287, + 364 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 47, + 293, + 287, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 293, + 287, + 316 + ], + "spans": [ + { + "bbox": [ + 47, + 293, + 287, + 316 + ], + "type": "text", + "content": "1. An image feature encoder producing a latent feature map of the target image." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 316, + 287, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 316, + 287, + 340 + ], + "spans": [ + { + "bbox": [ + 47, + 316, + 287, + 340 + ], + "type": "text", + "content": "2. A positional encoding module that produces learned positional embeddings in UV space." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 340, + 287, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 340, + 287, + 364 + ], + "spans": [ + { + "bbox": [ + 47, + 340, + 287, + 364 + ], + "type": "text", + "content": "3. An iterative, recurrent optical flow module that predicts the probabilistic UV-image flow." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 365, + 287, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 287, + 400 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 287, + 400 + ], + "type": "text", + "content": "The image space position and uncertainty of each vertex is then bi-linearly sampled from the intermediate correspondence and uncertainty map for each iteration:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 80, + 411, + 287, + 423 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 411, + 287, + 423 + ], + "spans": [ + { + "bbox": [ + 80, + 411, + 287, + 423 + ], + "type": "interline_equation", + "content": "\\mu_ {i, k} = \\nu_ {i} + \\mathbf {F} _ {k} (\\nu_ {i}) \\quad \\text {a n d} \\quad \\sigma_ {i, k} = \\mathbf {S} _ {k} (\\nu_ {i}) \\tag {1}", + "image_path": "bb99c23ed31a3f8309dc0e60565ddc5e8094e7d97e2140308833018b67a2138f.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 433, + 287, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 433, + 287, + 456 + ], + "spans": [ + { + "bbox": [ + 46, + 433, + 287, + 456 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 433, + 287, + 456 + ], + "type": "inline_equation", + "content": "\\nu_{i}\\in \\mathcal{U}" + }, + { + "bbox": [ + 46, + 433, + 287, + 456 + ], + "type": "text", + "content": " denotes the pre-defined UV coordinate of each vertex. These are manually defined by a 3D artist." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "spans": [ + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "text", + "content": "Image feature encoder. To obtain the input to the image encoder " + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "text", + "content": ", we use SFD [51] to detect a square face bounding box from the target image and enlarge it by " + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "text", + "content": ". We then crop the image to the bounding box and resize it to " + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "inline_equation", + "content": "D_{img}" + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "text", + "content": ". We use Segformer [45] as the backbone, and replace the final classification layer with a linear layer to produce a 128-dimensional feature encoding. We further down-sample it to attain a final image feature map " + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "inline_equation", + "content": "Z_{img} \\in \\mathbb{R}^{D_{uv} \\times D_{uv} \\times 128}" + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "text", + "content": " through average pooling. With image " + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "inline_equation", + "content": "\\mathbf{I}" + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "text", + "content": " and network parameters " + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathcal{F}}" + }, + { + "bbox": [ + 46, + 464, + 288, + 583 + ], + "type": "text", + "content": ", this is defined as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 594, + 287, + 607 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 594, + 287, + 607 + ], + "spans": [ + { + "bbox": [ + 132, + 594, + 287, + 607 + ], + "type": "interline_equation", + "content": "Z _ {i m g} = \\mathcal {F} (\\mathbf {I}, \\theta_ {\\mathcal {F}}) \\tag {2}", + "image_path": "9e5c29b936710ee0d3dfe7a1306f2957e60c2a4375cf0c4e01ad03990d3bae03.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "text", + "content": "UV positional encoding module. We use a set of modules " + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "text", + "content": " with identical architecture to generate learned positional embeddings in UV-space. Each module is comprised of a multi-scale texture pyramid and a pixel-wise linear layer. This pyramid consists of four trainable textures with 32 channels and squared resolutions of " + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "inline_equation", + "content": "D_{uv}" + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\frac{D_{uv}}{2}" + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\frac{D_{uv}}{4}" + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\frac{D_{uv}}{8}" + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "text", + "content": " respectively. Each texture is upsampled to " + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "inline_equation", + "content": "D_{uv}" + }, + { + "bbox": [ + 46, + 616, + 287, + 715 + ], + "type": "text", + "content": " through bi-linear interpolation before concatenating them" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "spans": [ + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "text", + "content": "along the channel dimension. The concatenated textures are then passed through a pixel-wise linear layer to produce the UV positional embeddings. The multi-scale setup ensures structural consistency in UV space (closer pixels in UV should have similar features). We use 3 of these modules: " + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{Z_{uv}}" + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "text", + "content": " to generate a UV feature map " + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "inline_equation", + "content": "Z_{uv}" + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_c" + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "text", + "content": " to generator a context map " + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{h_0}" + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "text", + "content": " to generate an initial hidden state " + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "inline_equation", + "content": "h_0" + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "text", + "content": ". With corresponding network parameters " + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathcal{G}_{Z_{uv}}}, \\theta_{\\mathcal{G}_c}" + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathcal{G}_{h_0}}" + }, + { + "bbox": [ + 304, + 232, + 547, + 342 + ], + "type": "text", + "content": ", this is described as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 332, + 348, + 545, + 363 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 348, + 545, + 363 + ], + "spans": [ + { + "bbox": [ + 332, + 348, + 545, + 363 + ], + "type": "interline_equation", + "content": "Z _ {u v} = \\mathcal {G} \\left(\\theta_ {\\mathcal {G} _ {Z _ {u v}}}\\right); c = \\mathcal {G} \\left(\\theta_ {\\mathcal {G} _ {c}}\\right); h _ {0} = \\mathcal {G} \\left(\\theta_ {\\mathcal {G} _ {h _ {0}}}\\right) \\tag {3}", + "image_path": "ff81a323d1e9526cc71678e5b3d8f3e6ea93c0787dfc22bf1ea9beb0d8d55786.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 369, + 545, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 369, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 369, + 545, + 464 + ], + "type": "text", + "content": "UV-image flow. The RAFT [36] network is designed to predict the optical flow between two images. It consists of a correlation block that maps the latent features encoded from each image into a 4D correlation volume. A context encoder initializes the hidden state of a recurrent update block and provides it with additional context information. The update block then iteratively refines a flow estimate while sampling the correlation volume." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "spans": [ + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": "We adapt this network to predict the UV-image flow " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "\\mathbf{F} \\in \\mathbb{R}^{D_{uv} \\times D_{uv} \\times 2}" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": ". We directly pass " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "Z_{uv}" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "Z_{img}" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": " to the correlation block " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": ". We use the context map " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": " and initial hidden state " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "h_0" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": " from the positional encoding modules for the update module " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "\\mathbf{U}" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": ". We modify the update module to also predict a per-iteration uncertainty in addition to the flow estimate, by duplicating the flow prediction head to predict a 1-channel uncertainty map " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "\\mathbf{S} \\in \\mathbb{R}_{>0}^{D_{uv} \\times D_{uv}}" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": ". An exponential operation is applied to ensure positive values. The motion encoder head is adjusted to accept the uncertainty as an input. The modified RAFT network then works as follows: For each iteration " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": ", the recurrent update module performs a look-up in the correlation volume, context map " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": ", previous hidden state " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "h_{k-1}" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": ", previous flow " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{k-1}" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": " and previous uncertainty " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_{k-1}" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": ". It outputs the refined flow estimate " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_k" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": " and uncertainty " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_k" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": " and the subsequent hidden state " + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "inline_equation", + "content": "h_k" + }, + { + "bbox": [ + 304, + 465, + 546, + 657 + ], + "type": "text", + "content": ". Formally," + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 309, + 665, + 545, + 689 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 665, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 309, + 665, + 545, + 689 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {k}, \\mathbf {S} _ {k}, h _ {k} = \\mathbf {U} \\left(\\mathbf {C} \\left(Z _ {u v}, Z _ {i m g}\\right), c, \\mathbf {F} _ {k - 1}, \\mathbf {S} _ {k - 1}, h _ {k - 1}, \\theta_ {\\mathbf {U}}\\right) \\tag {4}", + "image_path": "2aefbf665a44545fc78569f7119e000eece3726a4553cc71898bb9aaf2c8f606.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "type": "text", + "content": "with update module weights " + }, + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathbf{U}}" + }, + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "type": "text", + "content": ". For a detailed explanation of our modified RAFT, we defer to [36] and Appendix B." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 734, + 315, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 734, + 315, + 742 + ], + "spans": [ + { + "bbox": [ + 296, + 734, + 315, + 742 + ], + "type": "text", + "content": "1229" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 145, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 145, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 145, + 83 + ], + "type": "text", + "content": "3.1.2 Loss Functions" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 91, + 287, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 91, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 46, + 91, + 287, + 139 + ], + "type": "text", + "content": "We supervise our network with Gaussian negative log-likelihood (GNLL) both on the probabilistic per-vertex positions and the dense UV-image flow. For each iteration " + }, + { + "bbox": [ + 46, + 91, + 287, + 139 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 91, + 287, + 139 + ], + "type": "text", + "content": " of the update module, we apply the per-vertex loss function:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 75, + 142, + 287, + 174 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 142, + 287, + 174 + ], + "spans": [ + { + "bbox": [ + 75, + 142, + 287, + 174 + ], + "type": "interline_equation", + "content": "L _ {k} ^ {\\text {v e r t e x}} = \\sum_ {i = 1} ^ {N _ {v}} \\lambda_ {i} \\left(\\log \\left(\\sigma_ {i, k} ^ {2}\\right) + \\frac {\\left\\| \\mu_ {i , k} - \\mu_ {i} ^ {\\prime} \\right\\| ^ {2}}{2 \\sigma_ {i , k} ^ {2}}\\right) \\tag {5}", + "image_path": "40574f24f7d7fcea33d45294427f3a62f01ccfcd1ff12a37cdefa5589441cc39.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 177, + 287, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 177, + 287, + 225 + ], + "spans": [ + { + "bbox": [ + 46, + 177, + 287, + 225 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 177, + 287, + 225 + ], + "type": "inline_equation", + "content": "\\lambda_{i}" + }, + { + "bbox": [ + 46, + 177, + 287, + 225 + ], + "type": "text", + "content": " is a pre-defined vertex weight and " + }, + { + "bbox": [ + 46, + 177, + 287, + 225 + ], + "type": "inline_equation", + "content": "\\mu_i^\\prime" + }, + { + "bbox": [ + 46, + 177, + 287, + 225 + ], + "type": "text", + "content": " is the ground truth vertex position. We encourage our network to predict coherent flow and uncertainty maps in areas with no vertices by applying the GNLL loss for each pixel " + }, + { + "bbox": [ + 46, + 177, + 287, + 225 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 46, + 177, + 287, + 225 + ], + "type": "text", + "content": " in UV space:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 63, + 227, + 287, + 259 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 227, + 287, + 259 + ], + "spans": [ + { + "bbox": [ + 63, + 227, + 287, + 259 + ], + "type": "interline_equation", + "content": "L _ {k} ^ {\\text {d e n s e}} = \\sum_ {p \\in | \\mathcal {U} |} \\lambda_ {p} \\left(\\log \\left(\\mathbf {S} _ {k, p} ^ {2}\\right) + \\frac {\\| \\mathbf {F} _ {k , p} - \\mathbf {F} _ {p} ^ {\\prime} \\| ^ {2}}{2 \\mathbf {S} _ {k , p} ^ {2}}\\right) \\tag {6}", + "image_path": "51a519243903a821736a1341048360626aa6cc9301cd6a762fd4c688e73fcca6.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 263, + 287, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 263, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 46, + 263, + 287, + 312 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 263, + 287, + 312 + ], + "type": "inline_equation", + "content": "\\lambda_{p}" + }, + { + "bbox": [ + 46, + 263, + 287, + 312 + ], + "type": "text", + "content": " is a pre-defined per-pixel weight and " + }, + { + "bbox": [ + 46, + 263, + 287, + 312 + ], + "type": "inline_equation", + "content": "\\mathbf{F}'" + }, + { + "bbox": [ + 46, + 263, + 287, + 312 + ], + "type": "text", + "content": " is the ground truth UV-image flow. The final loss is a weighted sum of these losses, with a decay factor for each iteration of " + }, + { + "bbox": [ + 46, + 263, + 287, + 312 + ], + "type": "inline_equation", + "content": "\\alpha = 0.8" + }, + { + "bbox": [ + 46, + 263, + 287, + 312 + ], + "type": "text", + "content": " and a dense weight of " + }, + { + "bbox": [ + 46, + 263, + 287, + 312 + ], + "type": "inline_equation", + "content": "\\lambda_{dense} = 0.01" + }, + { + "bbox": [ + 46, + 263, + 287, + 312 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 81, + 315, + 287, + 348 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 315, + 287, + 348 + ], + "spans": [ + { + "bbox": [ + 81, + 315, + 287, + 348 + ], + "type": "interline_equation", + "content": "\\operatorname {L o s s} = \\sum_ {k = 1} ^ {N _ {\\text {i t e r}}} \\alpha^ {N _ {\\text {i t e r}} - k} \\left(L _ {k} ^ {\\text {v e r t e x}} + \\lambda_ {\\text {d e n s e}} L _ {k} ^ {\\text {d e n s e}}\\right) \\tag {7}", + "image_path": "282fe0e07c19a0b0851534d22ed90466a45e7fc7c53bb0bc4566ad7606bfc950.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 356, + 151, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 356, + 151, + 369 + ], + "spans": [ + { + "bbox": [ + 47, + 356, + 151, + 369 + ], + "type": "text", + "content": "3.2. 3D Model Fitting" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 374, + 287, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 374, + 287, + 447 + ], + "spans": [ + { + "bbox": [ + 46, + 374, + 287, + 447 + ], + "type": "text", + "content": "As in [42], the 3D reconstruction is obtained by jointly fitting a 3D head model and camera parameters to the predicted 2D alignment observations for the entire sequence. This is done by optimizing the energy function " + }, + { + "bbox": [ + 46, + 374, + 287, + 447 + ], + "type": "inline_equation", + "content": "E(\\Phi; A)" + }, + { + "bbox": [ + 46, + 374, + 287, + 447 + ], + "type": "text", + "content": " w.r.t to the model parameters " + }, + { + "bbox": [ + 46, + 374, + 287, + 447 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 46, + 374, + 287, + 447 + ], + "type": "text", + "content": " and alignment " + }, + { + "bbox": [ + 46, + 374, + 287, + 447 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 374, + 287, + 447 + ], + "type": "text", + "content": " (see Fig. 2). These parameters and the energy terms are defined below." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 48, + 458, + 293, + 544 + ], + "blocks": [ + { + "bbox": [ + 48, + 458, + 293, + 544 + ], + "lines": [ + { + "bbox": [ + 48, + 458, + 293, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 458, + 293, + 544 + ], + "type": "image", + "image_path": "e6e44bfea6c668c34742af0b14986f3a32f71da681fe64c2eb8f77dee6bf2d22.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 64, + 544, + 270, + 555 + ], + "lines": [ + { + "bbox": [ + 64, + 544, + 270, + 555 + ], + "spans": [ + { + "bbox": [ + 64, + 544, + 270, + 555 + ], + "type": "text", + "content": "Figure 2. An illustration of the 3D model fitting process." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 568, + 219, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 568, + 219, + 580 + ], + "spans": [ + { + "bbox": [ + 47, + 568, + 219, + 580 + ], + "type": "text", + "content": "3.2.1 Tracking Model and Parameters" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "text", + "content": "The tracking model consists of a 3D head model and a camera model. A tracking sequence contains " + }, + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "text", + "content": " cameras, " + }, + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "text", + "content": " frames with a total of " + }, + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "inline_equation", + "content": "C \\times F" + }, + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "text", + "content": " images." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": "3D head model. We use FLAME [26] as our 3D head model M. This model consists of " + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "inline_equation", + "content": "N_{\\nu} = 5023" + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": " vertices, which are controlled by identity shape parameters " + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\beta \\in \\mathbb{R}^{300}" + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": ", expression shape parameters " + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\phi \\in \\mathbb{R}^{100}" + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "inline_equation", + "content": "K = 5" + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": " skeletal joint poses " + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\theta \\in \\mathbb{R}^{3K + 3}" + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": " (including the root translation) through linear blend skinning [25]. We ignore root, neck and jaw pose and use the FLAME2023 model, which" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "content": "includes deformations due to jaw rotation within the expression blend-shapes. We also introduce additional static pervertex deformations " + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "inline_equation", + "content": "\\delta_d\\in \\mathbb{R}^{N_v\\times 3}" + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "content": " to enhance identity shape detail. The local head model vertices can be expressed using its parameters as follows:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 341, + 139, + 545, + 152 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 139, + 545, + 152 + ], + "spans": [ + { + "bbox": [ + 341, + 139, + 545, + 152 + ], + "type": "interline_equation", + "content": "\\mathbf {M} (\\boldsymbol {\\beta}, \\boldsymbol {\\delta} _ {d}, \\boldsymbol {\\phi}, \\boldsymbol {\\theta}) = F L A M E (\\boldsymbol {\\beta}, \\boldsymbol {\\phi}, \\boldsymbol {\\theta}) + \\boldsymbol {\\delta} _ {d} \\tag {8}", + "image_path": "e8a203a22bfd707d77120244a378b837419c83a8d95e906c27b3adbed71a28c1.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 159, + 545, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 159, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 304, + 159, + 545, + 194 + ], + "type": "text", + "content": "The rigid transform " + }, + { + "bbox": [ + 304, + 159, + 545, + 194 + ], + "type": "inline_equation", + "content": "\\mathbf{T}^{\\mathbf{M}}\\in \\mathbb{R}^{3\\times 4}" + }, + { + "bbox": [ + 304, + 159, + 545, + 194 + ], + "type": "text", + "content": " represents the head pose, which transforms head model vertices " + }, + { + "bbox": [ + 304, + 159, + 545, + 194 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 159, + 545, + 194 + ], + "type": "text", + "content": " into world space for each frame " + }, + { + "bbox": [ + 304, + 159, + 545, + 194 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 159, + 545, + 194 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 393, + 194, + 545, + 209 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 393, + 194, + 545, + 209 + ], + "spans": [ + { + "bbox": [ + 393, + 194, + 545, + 209 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {i, t} ^ {\\mathrm {3 D}} = \\mathbf {T} _ {t} ^ {\\mathbf {M}} \\mathbf {M} _ {i} \\tag {9}", + "image_path": "41f5259ba2b88f8e4dd1ebd1940fe2ddc23a15c52c66f196ced6989d0b5c1d74.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 214, + 545, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 214, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 304, + 214, + 545, + 275 + ], + "type": "text", + "content": "Camera model. The cameras are described by the world-to-camera rigid transform " + }, + { + "bbox": [ + 304, + 214, + 545, + 275 + ], + "type": "inline_equation", + "content": "\\mathbf{T}_{cam} \\in \\mathbb{R}^{3 \\times 4}" + }, + { + "bbox": [ + 304, + 214, + 545, + 275 + ], + "type": "text", + "content": " and the pinhole camera projection matrix " + }, + { + "bbox": [ + 304, + 214, + 545, + 275 + ], + "type": "inline_equation", + "content": "\\mathbf{K} \\in \\mathbb{R}^{3 \\times 3}" + }, + { + "bbox": [ + 304, + 214, + 545, + 275 + ], + "type": "text", + "content": " defined by a single focal length " + }, + { + "bbox": [ + 304, + 214, + 545, + 275 + ], + "type": "inline_equation", + "content": "f \\in \\mathbb{R}" + }, + { + "bbox": [ + 304, + 214, + 545, + 275 + ], + "type": "text", + "content": " parameter. The camera model defines the image-space projection of the 3D vertices in camera " + }, + { + "bbox": [ + 304, + 214, + 545, + 275 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 304, + 214, + 545, + 275 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 382, + 281, + 545, + 297 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 382, + 281, + 545, + 297 + ], + "spans": [ + { + "bbox": [ + 382, + 281, + 545, + 297 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {i, j, t} ^ {\\mathrm {2 D}} = \\mathbf {K} _ {j} \\mathbf {T} _ {j} ^ {c a m} \\mathbf {x} _ {i, t} ^ {\\mathrm {3 D}} \\tag {10}", + "image_path": "412ff6e99d58a510df2313f07e4d1df60b0e1f7449870885ad0b4ee4484b183b.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 302, + 545, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 302, + 545, + 374 + ], + "spans": [ + { + "bbox": [ + 304, + 302, + 545, + 374 + ], + "type": "text", + "content": "Parameters. The parameters " + }, + { + "bbox": [ + 304, + 302, + 545, + 374 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 304, + 302, + 545, + 374 + ], + "type": "text", + "content": " consist of the head model and camera parameters, which are optimized to minimize " + }, + { + "bbox": [ + 304, + 302, + 545, + 374 + ], + "type": "inline_equation", + "content": "E(\\Phi; A)" + }, + { + "bbox": [ + 304, + 302, + 545, + 374 + ], + "type": "text", + "content": ". The camera parameters can be fixed to known values, if the calibration is available. Expression and poses vary for each frame " + }, + { + "bbox": [ + 304, + 302, + 545, + 374 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 302, + 545, + 374 + ], + "type": "text", + "content": ", whereas camera, identity shape, and deformation parameters are shared over the sequence." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 380, + 545, + 396 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 380, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 312, + 380, + 545, + 396 + ], + "type": "interline_equation", + "content": "\\Psi = \\left\\{\\beta , \\Phi_ {F \\times | \\phi |}, \\Theta_ {F \\times | \\theta |}, \\delta_ {\\mathrm {d}}; \\mathbf {T} _ {F \\times 3 \\times 4} ^ {\\mathbf {M}}; \\mathbf {T} _ {C \\times 3 \\times 4} ^ {c a m}, f _ {C} \\right\\} \\tag {11}", + "image_path": "2745456208b10425f7aa9e3e1548746f6860e914d60d611e2362aa2bd36260e3.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 408, + 400, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 408, + 400, + 421 + ], + "spans": [ + { + "bbox": [ + 306, + 408, + 400, + 421 + ], + "type": "text", + "content": "3.2.2 Energy Terms" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 427, + 444, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 427, + 444, + 439 + ], + "spans": [ + { + "bbox": [ + 306, + 427, + 444, + 439 + ], + "type": "text", + "content": "The energy function is defined as:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 447, + 545, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 447, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 312, + 447, + 545, + 460 + ], + "type": "interline_equation", + "content": "E (\\Phi ; A) = E _ {A} + E _ {F L A M E} + E _ {\\text {t e m p}} + E _ {M I C A} + E _ {\\text {d e f o r m}} \\tag {12}", + "image_path": "06f845ebc0cc533c924ad62ef754d14bdb881c3844ca0c61bf2178afc590025d.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 468, + 441, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 468, + 441, + 479 + ], + "spans": [ + { + "bbox": [ + 317, + 468, + 441, + 479 + ], + "type": "inline_equation", + "content": "E_{A}" + }, + { + "bbox": [ + 317, + 468, + 441, + 479 + ], + "type": "text", + "content": " encourages 2D alignment:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 356, + 480, + 545, + 515 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 356, + 480, + 545, + 515 + ], + "spans": [ + { + "bbox": [ + 356, + 480, + 545, + 515 + ], + "type": "interline_equation", + "content": "E _ {A} = \\sum_ {i, j, t} ^ {N _ {\\nu}, C, F} \\lambda_ {i} \\frac {\\left\\| \\mathbf {x} _ {i , j , t} ^ {\\mathrm {2 D}} - \\mu_ {i , j , t} \\right\\| ^ {2}}{2 \\sigma_ {i , j , t} ^ {2}} \\tag {13}", + "image_path": "bfd183c3a80250511c42c9e67773c983bdbd9bc0917f872b66e3a7d3a7c841b2.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "text", + "content": "where for vertex " + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "text", + "content": " seen by camera " + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "text", + "content": " in frame " + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "inline_equation", + "content": "\\mu_{i,j,t}" + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "inline_equation", + "content": "\\sigma_{i,j,t}" + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "text", + "content": " is the 2D location and uncertainty predicted by the final iteration of our 2D alignment network, and " + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_{i,j,t}^{2D}" + }, + { + "bbox": [ + 304, + 519, + 545, + 567 + ], + "type": "text", + "content": " (Eq. (10)) is the 2D camera projection of that vertex." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 304, + 567, + 545, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 567, + 545, + 637 + ], + "spans": [ + { + "bbox": [ + 304, + 567, + 545, + 637 + ], + "type": "inline_equation", + "content": "E_{FLAME} = \\lambda_{FLAME}(\\| \\beta \\|^{2} + \\| \\Phi \\|^{2})" + }, + { + "bbox": [ + 304, + 567, + 545, + 637 + ], + "type": "text", + "content": " encourages the optimizer to explain the data with smaller identity and expression parameters. This leads to face shapes that are statistically more likely [10, 14, 26, 57] and a more accurate 3D reconstruction. We do not penalize joint rotation, face translation or rotation." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 304, + 639, + 545, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 639, + 545, + 673 + ], + "spans": [ + { + "bbox": [ + 304, + 639, + 545, + 673 + ], + "type": "inline_equation", + "content": "E_{\\text {temp }}" + }, + { + "bbox": [ + 304, + 639, + 545, + 673 + ], + "type": "text", + "content": " applies a loss on the acceleration of the 3D position " + }, + { + "bbox": [ + 304, + 639, + 545, + 673 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_{i,t}^{3\\mathrm{D}}" + }, + { + "bbox": [ + 304, + 639, + 545, + 673 + ], + "type": "text", + "content": " of every vertex of the 3D model to prevent jitter and encourage a smoother, more natural face motion:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 313, + 681, + 545, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 681, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 681, + 545, + 715 + ], + "type": "interline_equation", + "content": "E _ {\\text {t e m p}} = \\lambda_ {\\text {t e m p}} \\sum_ {i, j, t = 2} ^ {N _ {v}, C, F - 1} \\| \\mathbf {x} _ {j, t - 1} ^ {\\mathrm {3 D}} - 2 \\mathbf {x} _ {j, t} ^ {\\mathrm {3 D}} + \\mathbf {x} _ {j, t + 1} ^ {\\mathrm {3 D}} \\| ^ {2} \\tag {14}", + "image_path": "e483562da730afa0b9b53f363062a07b9ef5b7d695a760f374166e98b1d99da5.jpg" + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 732, + 315, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 732, + 315, + 742 + ], + "spans": [ + { + "bbox": [ + 296, + 732, + 315, + 742 + ], + "type": "text", + "content": "1230" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 215 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 215 + ], + "type": "inline_equation", + "content": "E_{MICA} = \\lambda_{MICA} \\| \\mathbf{M}_{\\Phi=0,\\theta=0} - \\mathbf{M}_{MICA} \\|^2" + }, + { + "bbox": [ + 47, + 72, + 289, + 215 + ], + "type": "text", + "content": " provides a 3D neutral geometry prior for the optimizer to enable a better disentanglement between identity and expression components. It consists of the L2 distance of the neutral head model vertices to the MICA [57] template " + }, + { + "bbox": [ + 47, + 72, + 289, + 215 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_{MICA}" + }, + { + "bbox": [ + 47, + 72, + 289, + 215 + ], + "type": "text", + "content": ". This template is computed by predicting the average neutral head vertices using the MICA model [57] for all frames of the sequence. The term also enables a more accurate 3D reconstruction since the model can rely on MICA predictions where the alignment is uncertain, such as in the depth direction or for occluded vertices. In areas of confident alignment, the MICA prediction can be refined." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 216, + 288, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 216, + 288, + 239 + ], + "spans": [ + { + "bbox": [ + 47, + 216, + 288, + 239 + ], + "type": "inline_equation", + "content": "E_{\\text{deform}} = \\lambda_{\\text{deform}} \\| \\delta_{\\mathrm{d}} \\|^2" + }, + { + "bbox": [ + 47, + 216, + 288, + 239 + ], + "type": "text", + "content": " encourages per-vertex deformations to be small w.r.t. the FLAME model." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 247, + 242, + 260 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 247, + 242, + 260 + ], + "spans": [ + { + "bbox": [ + 47, + 247, + 242, + 260 + ], + "type": "text", + "content": "3.3. Multiface Face Tracking Benchmark" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 266, + 287, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 266, + 287, + 315 + ], + "spans": [ + { + "bbox": [ + 47, + 266, + 287, + 315 + ], + "type": "text", + "content": "Our monocular 3D face tracking benchmark focuses on 3D reconstruction and motion capture accuracy. To evaluate these, we use our proposed screen space motion error (SSME) and the scan-to-mesh chamfer distance (CD)." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 50, + 320, + 289, + 508 + ], + "blocks": [ + { + "bbox": [ + 50, + 320, + 289, + 508 + ], + "lines": [ + { + "bbox": [ + 50, + 320, + 289, + 508 + ], + "spans": [ + { + "bbox": [ + 50, + 320, + 289, + 508 + ], + "type": "image", + "image_path": "63eba31d72bebffaeb9cd79123be37fd3f1e90c4e64be42185a730efb8d91c24.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 49, + 510, + 285, + 521 + ], + "lines": [ + { + "bbox": [ + 49, + 510, + 285, + 521 + ], + "spans": [ + { + "bbox": [ + 49, + 510, + 285, + 521 + ], + "type": "text", + "content": "Figure 3. An illustration of the EPE computation for each frame." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "spans": [ + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "text", + "content": "Screen Space Motion Error. To define the Screen Space Motion Error (SSME), we reformulate face tracking as an optical flow prediction problem over a set of time windows. First, we project the ground truth mesh and predicted mesh into screen space using the respective camera model. Then, we use the screen space coordinates to compute the ground truth optical flow " + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{t:t + h}^{\\prime}" + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "text", + "content": " and predicted optical flow " + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{t:t + h}" + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "text", + "content": " from frame " + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "text", + "content": " to frame " + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "inline_equation", + "content": "t + h" + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "text", + "content": " for each frame " + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "inline_equation", + "content": "t\\in [1,\\dots ,F]" + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "text", + "content": " and a sequence of frame windows " + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "inline_equation", + "content": "h = [1,\\dots ,N_H]" + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "text", + "content": ". For each frame and frame window, the average end-point-error " + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "inline_equation", + "content": "EPE_{t:t + h}" + }, + { + "bbox": [ + 47, + 536, + 287, + 691 + ], + "type": "text", + "content": " is computed by averaging the L2-distance between ground truth and predicted optical flow for each pixel (see Fig. 3)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 88, + 700, + 287, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 700, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 88, + 700, + 287, + 715 + ], + "type": "interline_equation", + "content": "E P E _ {t: t + h} = \\left\\| V \\odot \\left(\\mathbf {f} _ {t: t + h} - \\mathbf {f} _ {t: t + h} ^ {\\prime}\\right) \\right\\| ^ {2} \\tag {15}", + "image_path": "0f60ecfe41b434786617d6f61f3adcfc74ff3cd5f87ebde6e1db4b7e20b224c5.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "content": " is a mask to separate different face regions and " + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "content": " is the Hadamard product. See Fig. 3 for a visual reference." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 96, + 545, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 96, + 545, + 131 + ], + "spans": [ + { + "bbox": [ + 305, + 96, + 545, + 131 + ], + "type": "text", + "content": "The screen space motion error " + }, + { + "bbox": [ + 305, + 96, + 545, + 131 + ], + "type": "inline_equation", + "content": "SSME_{h}" + }, + { + "bbox": [ + 305, + 96, + 545, + 131 + ], + "type": "text", + "content": " for frame window " + }, + { + "bbox": [ + 305, + 96, + 545, + 131 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 305, + 96, + 545, + 131 + ], + "type": "text", + "content": " is then defined as the mean of all EPEs over all frames " + }, + { + "bbox": [ + 305, + 96, + 545, + 131 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 305, + 96, + 545, + 131 + ], + "type": "text", + "content": " where frame " + }, + { + "bbox": [ + 305, + 96, + 545, + 131 + ], + "type": "inline_equation", + "content": "t + h" + }, + { + "bbox": [ + 305, + 96, + 545, + 131 + ], + "type": "text", + "content": " exists:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 356, + 132, + 545, + 164 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 356, + 132, + 545, + 164 + ], + "spans": [ + { + "bbox": [ + 356, + 132, + 545, + 164 + ], + "type": "interline_equation", + "content": "S S M E _ {h} = \\frac {1}{F - h} \\sum_ {t = 1} ^ {t + h \\leq F} E P E _ {t: t + h} \\tag {16}", + "image_path": "b77ac94cbc12433e8b130916e45bf7478d436badcbe2871296ef69eaad2be060.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 167, + 545, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 167, + 545, + 202 + ], + "spans": [ + { + "bbox": [ + 305, + 167, + 545, + 202 + ], + "type": "text", + "content": "Finally, to summarize tracking performance in one value, we compute the average screen space motion error " + }, + { + "bbox": [ + 305, + 167, + 545, + 202 + ], + "type": "inline_equation", + "content": "\\overline{SSME}" + }, + { + "bbox": [ + 305, + 167, + 545, + 202 + ], + "type": "text", + "content": " over all frame windows as" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 381, + 203, + 545, + 236 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 203, + 545, + 236 + ], + "spans": [ + { + "bbox": [ + 381, + 203, + 545, + 236 + ], + "type": "interline_equation", + "content": "\\overline {{S S M E}} = \\sum_ {h = 1} ^ {N _ {H}} S S M E _ {h} \\tag {17}", + "image_path": "d49021e1b883e6516bd6d4575c9486646cfd8ef57bf6a35c433bc4eef37a85c9.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 240, + 546, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 240, + 546, + 433 + ], + "spans": [ + { + "bbox": [ + 304, + 240, + 546, + 433 + ], + "type": "text", + "content": "In other words, " + }, + { + "bbox": [ + 304, + 240, + 546, + 433 + ], + "type": "inline_equation", + "content": "\\overline{SSME}" + }, + { + "bbox": [ + 304, + 240, + 546, + 433 + ], + "type": "text", + "content": " measures the average trajectory accuracy of each pixel over a time horizon of " + }, + { + "bbox": [ + 304, + 240, + 546, + 433 + ], + "type": "inline_equation", + "content": "N_{H}" + }, + { + "bbox": [ + 304, + 240, + 546, + 433 + ], + "type": "text", + "content": " frames. We choose a maximum frame window of " + }, + { + "bbox": [ + 304, + 240, + 546, + 433 + ], + "type": "inline_equation", + "content": "N_{H} = 30" + }, + { + "bbox": [ + 304, + 240, + 546, + 433 + ], + "type": "text", + "content": " (1 second) since most human expressions are performed within this time frame. Because the screen space motion is directly affected by most face-tracking parameters such as intrinsics, pose, and face shape, it also measures their precision in a holistic manner. In contrast to prior works and benchmarks that use sparse key-points, SSME covers the motion of all visible face regions and is invariant to mesh topology. As it operates in screen space, it does not require additional alignment and works with all camera models, unlike 3D reconstruction or depth errors. In our benchmark, we evaluate SSME over a set of masks for semantically meaningful face regions (face, eyes, nose, mouth, and ears) (Fig. 3), permitting a more nuanced analysis of the tracking performance." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 437, + 546, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 546, + 558 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 546, + 558 + ], + "type": "text", + "content": "3D Reconstruction. To complete our benchmark, we additionally measure the chamfer distance (CD) to account for the depth dimension. Similar to [34], the tracked mesh is rigidly aligned to the ground truth mesh using 7 key-points and ICP. Then, the distance of each ground truth vertex with respect to the predicted mesh is computed and averaged. For a detailed explanation, we defer to the NoW benchmark [34]. Just like the SSME, we evaluate the CD for the same set of face regions to provide a more detailed analysis of reconstruction accuracy, similar to [6]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 563, + 545, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 563, + 545, + 660 + ], + "spans": [ + { + "bbox": [ + 304, + 563, + 545, + 660 + ], + "type": "text", + "content": "Multiface Dataset. We build our benchmark around the Multiface dataset [44]. Multiface consists of multi-view videos with high quality topologically consistent 3D registrations. High-resolution videos are captured at 30 FPS from a large variety of calibrated views. We limit the evaluation data to a manageable size by carefully selecting a subset of 86 sequences with a diverse set of view directions and facial performances (see Appendix C)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 669, + 388, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 669, + 388, + 682 + ], + "spans": [ + { + "bbox": [ + 306, + 669, + 388, + 682 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 714 + ], + "type": "text", + "content": "Training data. To train the 2D alignment network, we use a combined dataset made up of FaceScape [47], Stirling [1]," + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 734, + 314, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 734, + 314, + 742 + ], + "spans": [ + { + "bbox": [ + 296, + 734, + 314, + 742 + ], + "type": "text", + "content": "1231" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "content": "and FaMoS [3]. Where a FLAME [26] registration is not available, we fit the FLAME template mesh to the 3D scan through semi-automatic key-point annotation and commercial topology fitting software. For an accurate capture of face motion, we auto-announce expression scans with additional key-points propagated with optical flow (more information in Appendix D). The ground truth image space vertex positions " + }, + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "inline_equation", + "content": "\\mu^{\\prime}" + }, + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "content": " are obtained by projecting the vertices of the fitted FLAME mesh into screen space using the available camera calibrations." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 201, + 288, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 201, + 288, + 321 + ], + "spans": [ + { + "bbox": [ + 46, + 201, + 288, + 321 + ], + "type": "text", + "content": "Training strategy for 2D alignment network. We use Segformer-b5 (pre-trained on ImageNet [11]) as our backbone, with " + }, + { + "bbox": [ + 46, + 201, + 288, + 321 + ], + "type": "inline_equation", + "content": "D_{img} = 512" + }, + { + "bbox": [ + 46, + 201, + 288, + 321 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 201, + 288, + 321 + ], + "type": "inline_equation", + "content": "D_{uv} = 64" + }, + { + "bbox": [ + 46, + 201, + 288, + 321 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 201, + 288, + 321 + ], + "type": "inline_equation", + "content": "N_{iter} = 3" + }, + { + "bbox": [ + 46, + 201, + 288, + 321 + ], + "type": "text", + "content": ". We use the RAFT-L configuration for the update module and keep its hyperparameters when possible [36]. We optimize the model for 6 epochs using the AdamW optimizer [27], an initial learning rate of " + }, + { + "bbox": [ + 46, + 201, + 288, + 321 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 46, + 201, + 288, + 321 + ], + "type": "text", + "content": " and a decay of 0.1 every 2 epochs. We use image augmentation such as random scaling, rotation, and color corruption [42], synthetic occlusions [39] and synthetic backgrounds (see Appendix D)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 330, + 288, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 330, + 288, + 402 + ], + "spans": [ + { + "bbox": [ + 46, + 330, + 288, + 402 + ], + "type": "text", + "content": "3D model fitting. To minimize the energy function and obtain tracking parameters, we use the AdamW optimizer with an initial learning rate of " + }, + { + "bbox": [ + 46, + 330, + 288, + 402 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-2}" + }, + { + "bbox": [ + 46, + 330, + 288, + 402 + ], + "type": "text", + "content": " and a automatic learning rate scheduler with a decay factor of 0.5 and patience of 30 steps, until convergence. We enable " + }, + { + "bbox": [ + 46, + 330, + 288, + 402 + ], + "type": "inline_equation", + "content": "\\delta_{d}" + }, + { + "bbox": [ + 46, + 330, + 288, + 402 + ], + "type": "text", + "content": " only for multi-view reconstruction, and only for the nose region." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "spans": [ + { + "bbox": [ + 46, + 411, + 287, + 531 + ], + "type": "text", + "content": "Baselines. We implement and test against the most recent publicly available methods for single image regression-based approaches 3DDFAv2 [19], SADRNet [32], PRNet [41], DECA (coarse) [14], EMOCA (coarse) [10], and HRN [24]. We extend the ability of these methods to use temporal priors by applying a simple temporal Gaussian filter to the screen-space vertices. We also include the popular photometric optimization-based approach MPT [57]. Lastly, we compare against the key-point-only optimization-based method Dense proposed by [42] on public benchmarks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 539, + 173, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 173, + 551 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 173, + 551 + ], + "type": "text", + "content": "4.1. Multiface Benchmark" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 557, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 557, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 557, + 288, + 715 + ], + "type": "text", + "content": "We divide our Multiface benchmark into two categories: Without temporal information sharing, where each method is restricted to operate on single images, and with (both forward and backward) temporal information sharing, where each method is allowed to use the entire sequence as observations. Our method significantly outperforms the best publicly available method by " + }, + { + "bbox": [ + 46, + 557, + 288, + 715 + ], + "type": "inline_equation", + "content": "54\\%" + }, + { + "bbox": [ + 46, + 557, + 288, + 715 + ], + "type": "text", + "content": " w.r.t. face-region SSME on both on single-image and by " + }, + { + "bbox": [ + 46, + 557, + 288, + 715 + ], + "type": "inline_equation", + "content": "46\\%" + }, + { + "bbox": [ + 46, + 557, + 288, + 715 + ], + "type": "text", + "content": " on sequence prediction. This confirms the superior 2D alignment accuracy of our method. Despite using only 2D alignment as supervision, our method performs " + }, + { + "bbox": [ + 46, + 557, + 288, + 715 + ], + "type": "inline_equation", + "content": "8\\%" + }, + { + "bbox": [ + 46, + 557, + 288, + 715 + ], + "type": "text", + "content": " better in terms of 3D reconstruction (CD) than the photometric optimization approach MPT [57] (see Tab. 2. To our surprise, MPT performs in" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 305, + 77, + 425, + 196 + ], + "blocks": [ + { + "bbox": [ + 305, + 77, + 425, + 196 + ], + "lines": [ + { + "bbox": [ + 305, + 77, + 425, + 196 + ], + "spans": [ + { + "bbox": [ + 305, + 77, + 425, + 196 + ], + "type": "image", + "image_path": "56e140f3608727c83214744d118f0fe14a861d5fb215c184fc93d6b0ed9865fc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 206, + 547, + 283 + ], + "lines": [ + { + "bbox": [ + 304, + 206, + 547, + 283 + ], + "spans": [ + { + "bbox": [ + 304, + 206, + 547, + 283 + ], + "type": "text", + "content": "Figure 4. " + }, + { + "bbox": [ + 304, + 206, + 547, + 283 + ], + "type": "inline_equation", + "content": "SSME_h" + }, + { + "bbox": [ + 304, + 206, + 547, + 283 + ], + "type": "text", + "content": " plotted over all frame horizons for each evaluated tracker for single-image and full sequence tracking (right). Lower " + }, + { + "bbox": [ + 304, + 206, + 547, + 283 + ], + "type": "inline_equation", + "content": "SSME_h" + }, + { + "bbox": [ + 304, + 206, + 547, + 283 + ], + "type": "text", + "content": " in smaller frame horizons " + }, + { + "bbox": [ + 304, + 206, + 547, + 283 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 304, + 206, + 547, + 283 + ], + "type": "text", + "content": " (left in the graph) means short-term temporal stability while lower " + }, + { + "bbox": [ + 304, + 206, + 547, + 283 + ], + "type": "inline_equation", + "content": "SSME_h" + }, + { + "bbox": [ + 304, + 206, + 547, + 283 + ], + "type": "text", + "content": " in larger frame horizons (right in the graph) means better long-term tracking consistency. Our tracker performs significantly better over every time horizon." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 429, + 78, + 547, + 196 + ], + "blocks": [ + { + "bbox": [ + 429, + 78, + 547, + 196 + ], + "lines": [ + { + "bbox": [ + 429, + 78, + 547, + 196 + ], + "spans": [ + { + "bbox": [ + 429, + 78, + 547, + 196 + ], + "type": "image", + "image_path": "02092b215babc05f1b379f4a21721f8f37cdf01bde2c8c8349351a72b42e2fce.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "spans": [ + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "type": "text", + "content": "ferior w.r.t. motion error than some regression-based models — this is likely due to uniform lighting and texture in the Multiface dataset. Qualitative results Fig. 5 confirm that methods using photometric errors (DECA, HRN, MPT) perform inferior w.r.t. screen space motion in areas without key-point supervision such as cheeks and forehead. Plotting the " + }, + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "type": "inline_equation", + "content": "SSME_h" + }, + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "type": "text", + "content": " over different time windows " + }, + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "type": "text", + "content": " (see Fig. 4) gives a previously unseen overview of temporal stability. Regression-based methods suffer from high short-term error " + }, + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "type": "inline_equation", + "content": "(SSME_1)" + }, + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "type": "text", + "content": " which is due to temporal instability and jitter. As expected, introducing temporal smoothing improves this issue and the overall " + }, + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "type": "inline_equation", + "content": "SSME" + }, + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "type": "text", + "content": " for these methods. Our method achieves very low short-term SSME even with single image prediction, which indicates the high robustness and accuracy of the alignment network. As expected, introducing temporal priors reduces " + }, + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "type": "inline_equation", + "content": "SSME" + }, + { + "bbox": [ + 304, + 296, + 547, + 488 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 495, + 435, + 507 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 495, + 435, + 507 + ], + "spans": [ + { + "bbox": [ + 306, + 495, + 435, + 507 + ], + "type": "text", + "content": "4.2. FaceScape Benchmark" + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 351, + 517, + 502, + 603 + ], + "blocks": [ + { + "bbox": [ + 351, + 517, + 502, + 603 + ], + "lines": [ + { + "bbox": [ + 351, + 517, + 502, + 603 + ], + "spans": [ + { + "bbox": [ + 351, + 517, + 502, + 603 + ], + "type": "table", + "html": "
MethodCD ↓ (mm)NME ↓ (rad)
MGCNet [35]4.000.093
PRNet [41]3.560.126
SADRNet [32]6.750.133
DECA [14]4.690.108
3DDFAv2 [19]3.600.096
HRN [24]3.670.087
Ours2.210.083
", + "image_path": "d8beaa7926536b02b6c5f4112d27ef9b2453ca2da73da2a5a1b5a0f276e9de4e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 331, + 611, + 518, + 623 + ], + "lines": [ + { + "bbox": [ + 331, + 611, + 518, + 623 + ], + "spans": [ + { + "bbox": [ + 331, + 611, + 518, + 623 + ], + "type": "text", + "content": "Table 1. Results on the FaceScape benchmark [47]." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "content": "We also compare our method on the FaceScape benchmark [47], which measures 3D reconstruction accuracy from 2D images under large view (up to " + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "content": ") and expression variations. On this benchmark, we outperform the best previous regression-based methods by " + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "inline_equation", + "content": "38\\%" + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "content": " in terms of CD and " + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "inline_equation", + "content": "4.6\\%" + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "content": " in terms of mean normal error (NME) Tab. 1. This shows that our method can accurately reconstruct faces even" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 733, + 315, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 733, + 315, + 742 + ], + "spans": [ + { + "bbox": [ + 296, + 733, + 315, + 742 + ], + "type": "text", + "content": "1232" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 82, + 68, + 511, + 265 + ], + "blocks": [ + { + "bbox": [ + 82, + 68, + 511, + 265 + ], + "lines": [ + { + "bbox": [ + 82, + 68, + 511, + 265 + ], + "spans": [ + { + "bbox": [ + 82, + 68, + 511, + 265 + ], + "type": "image", + "image_path": "771b770c20257cbf42592d1d6888a33d7794b9e4137f3b996e0cf10857f23ad1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 269, + 548, + 313 + ], + "lines": [ + { + "bbox": [ + 46, + 269, + 548, + 313 + ], + "spans": [ + { + "bbox": [ + 46, + 269, + 548, + 313 + ], + "type": "text", + "content": "Figure 5. Qualitative results on two sequences (top and bottom 3 rows) of our Multiface benchmark. Warmer colors represent high error, while colder colors represent low error. DECA [14], HRN [24], and MPT [57] struggle with motion in the cheek and forehead region, which is visible in the SSME error plot (right columns). Despite using only 2D alignment as supervision, our method achieves a better 3D reconstruction (CD) (center columns)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 48, + 316, + 557, + 430 + ], + "blocks": [ + { + "bbox": [ + 48, + 316, + 557, + 430 + ], + "lines": [ + { + "bbox": [ + 48, + 316, + 557, + 430 + ], + "spans": [ + { + "bbox": [ + 48, + 316, + 557, + 430 + ], + "type": "image", + "image_path": "91d3e82e28e9b8e96467d726c70e25caf99982bf905198fd0516bc9da58cce3a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 65, + 464, + 271, + 578 + ], + "blocks": [ + { + "bbox": [ + 46, + 438, + 547, + 462 + ], + "lines": [ + { + "bbox": [ + 46, + 438, + 547, + 462 + ], + "spans": [ + { + "bbox": [ + 46, + 438, + 547, + 462 + ], + "type": "text", + "content": "Table 2. Results on our Multiface tracking benchmark with and without temporal information sharing. Our method consistently outperforms previous methods on every single category, metric and face region." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 65, + 464, + 271, + 578 + ], + "lines": [ + { + "bbox": [ + 65, + 464, + 271, + 578 + ], + "spans": [ + { + "bbox": [ + 65, + 464, + 271, + 578 + ], + "type": "table", + "html": "
Single-viewMulti-view
MethodError (mm) ↓Error (mm) ↓
MedianMeanStdMedianMeanStd
MGCNet [35]1.311.872.63---
PRNet [41]1.501.981.88---
DECA [14]1.091.381.18---
Deep3D [12]1.111.411.211.081.351.15
Dense [42]1.021.281.080.811.010.84
MICA [57]0.901.110.92---
TokenFace [38]0.760.950.82---
Ours0.871.070.880.710.880.73
", + "image_path": "79249166a7cd09452e670256781ede41d1f959a61379f989c54cd1dd57ec8960.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 585, + 287, + 619 + ], + "lines": [ + { + "bbox": [ + 46, + 585, + 287, + 619 + ], + "spans": [ + { + "bbox": [ + 46, + 585, + 287, + 619 + ], + "type": "text", + "content": "Table 3. Results on the NoW Challenge [34]. Multi-view evaluation is done as in [42]. Multi-view results for [12] and [42] are reported by [42]." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 626, + 162, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 626, + 162, + 639 + ], + "spans": [ + { + "bbox": [ + 47, + 626, + 162, + 639 + ], + "type": "text", + "content": "under large view deviations." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 647, + 141, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 647, + 141, + 659 + ], + "spans": [ + { + "bbox": [ + 47, + 647, + 141, + 659 + ], + "type": "text", + "content": "4.3. Now Challenge" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": "The NoW benchmark is a public benchmark for evaluating neutral head reconstruction from 2D images captured indoors and outdoors, with different expressions, and under variations in lighting conditions and occlusions. We" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 468, + 547, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 468, + 547, + 659 + ], + "spans": [ + { + "bbox": [ + 304, + 468, + 547, + 659 + ], + "type": "text", + "content": "evaluate our method on the non-metrical challenge (Tab. 3). For single-view reconstruction, our model outperforms our neutral shape predictor MICA [57] by " + }, + { + "bbox": [ + 304, + 468, + 547, + 659 + ], + "type": "inline_equation", + "content": "4\\%" + }, + { + "bbox": [ + 304, + 468, + 547, + 659 + ], + "type": "text", + "content": " on mean scan-to-mesh distance. For the multi-view case, we outperform the baseline Dense [42] by " + }, + { + "bbox": [ + 304, + 468, + 547, + 659 + ], + "type": "inline_equation", + "content": "13\\%" + }, + { + "bbox": [ + 304, + 468, + 547, + 659 + ], + "type": "text", + "content": ", likely due to our method's high 2D alignment accuracy, better neutral shape priors, and per-vertex deformations. TokenFace [38] performs better for the single-view case, however, their predictions could be integrated into our pipeline since they use the FLAME topology. Importantly, our network is able to generalize to these in-the-wild images despite being trained only on in-the-lab data captured under controlled lighting conditions. An important sub-task for 3D face trackers is to disentangle the identity and expression components of the face shape. The outstanding results on the NoW benchmark indicate the ability of our tracker to accomplish this." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 670, + 417, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 417, + 681 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 417, + 681 + ], + "type": "text", + "content": "4.4. Downstream Tasks" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": "In the following, we show how we enhance downstream models using our face tracker." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 733, + 315, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 733, + 315, + 742 + ], + "spans": [ + { + "bbox": [ + 296, + 733, + 315, + 742 + ], + "type": "text", + "content": "1233" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": "3D Head Avatar Synthesis. Recent head avatar synthesis methods heavily rely on photometric head trackers to generate face alignment priors [17, 53, 56]. INSTA [56], a top-performing model, uses MPT [57]. We modify INSTA by replacing their tracker with ours. We compare our enhanced FlowFace-INSTA to the baseline MPT-INSTA. On their publicly available dataset, we outperform MPT-INSTA by " + }, + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "inline_equation", + "content": "10.5\\%" + }, + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": " on perceptual visual fidelity (LPIPS). On our Multiface benchmark videos, we outperform MPT-INSTA by " + }, + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "inline_equation", + "content": "20.3\\%" + }, + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": " on LPIPS. Detailed results can be viewed in Appendix G. These results demonstrate how better face trackers can directly improve performance on down-stream tasks which highlights the importance of our research." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 238, + 288, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 238, + 288, + 357 + ], + "spans": [ + { + "bbox": [ + 46, + 238, + 288, + 357 + ], + "type": "text", + "content": "Speech-driven 3D facial animation. The field of speech-driven facial animation often suffers from data sparsity [9, 13, 46]. To alleviate this issue, we generate 3D face meshes using the multi-view video dataset MEAD [40]. In using this generated dataset to augment the training of the state-of-the-art model CodeTalker [46] (see Appendix H), we are able to improve from a lip vertex error of " + }, + { + "bbox": [ + 46, + 238, + 288, + 357 + ], + "type": "inline_equation", + "content": "3.13 \\times 10^{-5}" + }, + { + "bbox": [ + 46, + 238, + 288, + 357 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 238, + 288, + 357 + ], + "type": "inline_equation", + "content": "2.85 \\times 10^{-5}" + }, + { + "bbox": [ + 46, + 238, + 288, + 357 + ], + "type": "text", + "content": " on the VOCASET benchmark [9], an " + }, + { + "bbox": [ + 46, + 238, + 288, + 357 + ], + "type": "inline_equation", + "content": "8.8\\%" + }, + { + "bbox": [ + 46, + 238, + 288, + 357 + ], + "type": "text", + "content": " improvement. This underlines the benefit of high-accuracy video face trackers for large-scale data generation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 367, + 135, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 367, + 135, + 379 + ], + "spans": [ + { + "bbox": [ + 47, + 367, + 135, + 379 + ], + "type": "text", + "content": "4.5.2D Alignment" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 386, + 287, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 386, + 287, + 553 + ], + "spans": [ + { + "bbox": [ + 46, + 386, + 287, + 553 + ], + "type": "text", + "content": "To show the benefit of our 2D alignment model architecture, we conduct an evaluation on our validation set, which consists of 84 subjects of our dataset. We implement the dense landmark model of [42] (ResNet-101 backbone) and adapt it to output FLAME vertex alignment and uncertainty. We also implement PRNet [41] and modify it in the same way. We retrain each method on our training set. In evaluate the 2D alignment accuracy with respect to normalized mean error (NME) of every vertex in the face area (Fig. 14, green vertices). With an NME of 1.30, our method performs significantly better than the ResNet architecture of Dense [42] (NME = 1.63), and PRNet (NME = 2.52). We note that the accuracy of uncertainty cannot be evaluated with NME. A qualitative comparison can be viewed in Fig. 17." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 563, + 147, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 563, + 147, + 574 + ], + "spans": [ + { + "bbox": [ + 47, + 563, + 147, + 574 + ], + "type": "text", + "content": "4.6. Ablation Studies" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "text", + "content": "2D alignment network. To analyze the effect of different feature encoder backbones, we replace our backbone with different variations of the Segformer model and also test the CNN-based backbone BiSeNet-v2 [49] (see Tab. 4). As expected, vision-transformer-based networks show better performance. Experimenting with the number of iterations " + }, + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "inline_equation", + "content": "N_{iter}" + }, + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "text", + "content": " for the update module, we find that multiple iterations instead of one improves the performance. Finally, we confirm the superior performance of our 2D alignment network compared to the ResNet-101-based network of [42] mentioned in Sec. 4.5." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 307, + 72, + 547, + 162 + ], + "blocks": [ + { + "bbox": [ + 307, + 72, + 547, + 162 + ], + "lines": [ + { + "bbox": [ + 307, + 72, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 547, + 162 + ], + "type": "table", + "html": "
BackboneNiter#Paramlatency (ms)CD↓SSME↓
ResNet-10173.4M91.543.90
BiSeNet-v2317.6M231.213.52
MiT-b1317.3M291.223.21
MiT-b2331.0M461.202.78
MiT-b5188.2M661.252.70
MiT-b5288.2M711.212.61
MiT-b5388.2M751.182.58
MiT-b5488.2M801.232.62
", + "image_path": "a1ffb61ed11762e1f606889e9cef44b24a2d5f37ee8cf3aaed3b788572941ffa.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 219, + 545, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 219, + 545, + 363 + ], + "spans": [ + { + "bbox": [ + 304, + 219, + 545, + 363 + ], + "type": "text", + "content": "3D model fitting. We show in Tab. 5 the benefit of integrating the MICA neutral shape prediction on the NoW Challenge validation set. The significant performance gain on single-image predictions shows that our 3D tracking pipeline can integrate MICA predictions very well, even improving them. We also show the benefit of predicting a dense face alignment in conjunction with per-vertex deformations in multi-view settings. This shows that our 2D alignment is precise enough to predict face shapes that lie outside of the FLAME blend-shape space, which previous optimization-based methods [42, 57] cannot achieve. For a qualitative analysis, see Appendix E." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 315, + 374, + 537, + 449 + ], + "blocks": [ + { + "bbox": [ + 305, + 171, + 545, + 204 + ], + "lines": [ + { + "bbox": [ + 305, + 171, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 305, + 171, + 545, + 204 + ], + "type": "text", + "content": "Table 4. Ablations for backbone architectures and hyperparameters of the 2D alignment network on our Multiface benchmark. Latency is evaluated on a Quadro RTX 5000 GPU." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 315, + 374, + 537, + 449 + ], + "lines": [ + { + "bbox": [ + 315, + 374, + 537, + 449 + ], + "spans": [ + { + "bbox": [ + 315, + 374, + 537, + 449 + ], + "type": "table", + "html": "
Single-viewMulti-view
MethodError (mm)Error (mm)
MedianMeanStdMedianMeanStd
Ours w/o MICA0.991.231.030.710.880.76
MICA only0.911.130.94---
Ours w/o δd---0.680.840.72
Ours0.821.020.850.670.830.71
", + "image_path": "9781b5ce269c03d9518a5cde998fc7341c467a44016f5a6661ea04d506a483ce.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 456, + 545, + 478 + ], + "lines": [ + { + "bbox": [ + 305, + 456, + 545, + 478 + ], + "spans": [ + { + "bbox": [ + 305, + 456, + 545, + 478 + ], + "type": "text", + "content": "Table 5. Ablations for the 3D model fitting module on single and multi-view reconstruction on the NoW validation set." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 499, + 471, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 499, + 471, + 511 + ], + "spans": [ + { + "bbox": [ + 306, + 499, + 471, + 511 + ], + "type": "text", + "content": "5. Conclusion and Future Work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 521, + 545, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 640 + ], + "type": "text", + "content": "This paper presents a state-of-the-art face tracking pipeline with a highly robust and accurate 2D alignment module. Its performance is thoroughly validated on a variety of benchmarks and downstream tasks. However, the proposed two-stage pipeline is not fully differentiable, which prevents end-to-end learning. Furthermore, our training data is limited to data captured in-the-lab. In future work, we intend to extend the alignment network to directly predict depth as well, obviating the need for the 3D model fitting step. Synthetic datasets [42] could alleviate the data issue." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "type": "text", + "content": "We're confident that our tracker will accelerate research in downstream tasks by generating large-scale face capture data using readily available video datasets [8, 29, 50]. We also believe that our novel motion capture evaluation benchmark will focus and align future research efforts to create even more accurate methods." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 734, + 315, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 734, + 315, + 742 + ], + "spans": [ + { + "bbox": [ + 296, + 734, + 315, + 742 + ], + "type": "text", + "content": "1234" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 285, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 285, + 112 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 285, + 112 + ], + "type": "text", + "content": "[1] Stirling/esrc 3d face database. https://pics.stir.ac.uk/ESRC/. Accessed: 2023-10-25. 5, 2, 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 114, + 287, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 114, + 287, + 168 + ], + "spans": [ + { + "bbox": [ + 53, + 114, + 287, + 168 + ], + "type": "text", + "content": "[2] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th Annual Conference on Computer Graphics and Interactive Techniques, page 187-194, USA, 1999. ACM Press/Addison-Wesley Publishing Co. 1, 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 171, + 287, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 171, + 287, + 214 + ], + "spans": [ + { + "bbox": [ + 53, + 171, + 287, + 214 + ], + "type": "text", + "content": "[3] Timo Bolkart, Tianye Li, and Michael J. Black. Instant multi-view head capture through learnable registration. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 768-779, 2023. 6, 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 216, + 287, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 216, + 287, + 258 + ], + "spans": [ + { + "bbox": [ + 53, + 216, + 287, + 258 + ], + "type": "text", + "content": "[4] Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In International Conference on Computer Vision, 2017. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 261, + 287, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 261, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 53, + 261, + 287, + 293 + ], + "type": "text", + "content": "[5] Chen Cao, Mengei Chai, Oliver Woodford, and Linjie Luo. Stabilized real-time face tracking via a learned dynamic rigidity prior. ACM Trans. Graph., 37(6), 2018. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 296, + 287, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 296, + 287, + 338 + ], + "spans": [ + { + "bbox": [ + 53, + 296, + 287, + 338 + ], + "type": "text", + "content": "[6] Zenghao Chai, Haoxian Zhang, Jing Ren, Di Kang, Zhengzhuo Xu, Xuefei Zhe, Chun Yuan, and Linchao Bao. Really: Rethinking the evaluation of 3d face reconstruction, 2022. 2, 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 340, + 287, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 340, + 287, + 384 + ], + "spans": [ + { + "bbox": [ + 53, + 340, + 287, + 384 + ], + "type": "text", + "content": "[7] Zenghao Chai, Tianke Zhang, Tianyu He, Xu Tan, Tadas Baltrusaitis, HsiangTao Wu, Runnan Li, Sheng Zhao, Chun Yuan, and Jiang Bian. Hiface: High-fidelity 3d face reconstruction by learning static and dynamic details, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 386, + 287, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 386, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 53, + 386, + 287, + 407 + ], + "type": "text", + "content": "[8] J. S. Chung, A. Nagrani, and A. Zisserman. Voxceleb2: Deep speaker recognition. In INTERSPEECH, 2018. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 409, + 287, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 409, + 287, + 462 + ], + "spans": [ + { + "bbox": [ + 53, + 409, + 287, + 462 + ], + "type": "text", + "content": "[9] Daniel Cudeiro, Timo Bolkart, Cassidy Laidlaw, Anurag Ranjan, and Michael Black. Capture, learning, and synthesis of 3D speaking styles. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pages 10101-10111, 2019. 8, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 465, + 287, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 465, + 287, + 497 + ], + "spans": [ + { + "bbox": [ + 48, + 465, + 287, + 497 + ], + "type": "text", + "content": "[10] Radek Danecek, Michael J. Black, and Timo Bolkart. Emoca: Emotion driven monocular face capture and animation, 2022. 1, 2, 4, 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 499, + 287, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 499, + 287, + 543 + ], + "spans": [ + { + "bbox": [ + 48, + 499, + 287, + 543 + ], + "type": "text", + "content": "[11] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 6, 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 544, + 287, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 287, + 598 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 287, + 598 + ], + "type": "text", + "content": "[12] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In IEEE Computer Vision and Pattern Recognition Workshops, 2019. 2, 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 601, + 287, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 287, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 287, + 643 + ], + "type": "text", + "content": "[13] Yingruo Fan, Zhaojiang Lin, Jun Saito, Wenping Wang, and Taku Komura. Faceformer: Speech-driven 3d facial animation with transformers. arXiv preprint arXiv:2112.05329, 2021.8" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 646, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 287, + 689 + ], + "type": "text", + "content": "[14] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3d face model from in-the-wild images. CoRR, abs/2012.04012, 2020. 1, 4, 6, 7, 10, 11" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 691, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 712 + ], + "type": "text", + "content": "[15] Pablo Garrido, Michael Zollhöfer, Dan Casas, Levi Valgaerts, Kiran Varanasi, Patrick Pérez, and Christian" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "Theobalt. Reconstruction of personalized 3d face rigs from monocular video. ACM Trans. Graph., 35(3), 2016. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 96, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 140 + ], + "type": "text", + "content": "[16] Pablo Garrido, Michael Zollhöfer, Chenglei Wu, Derek Bradley, Patrick Pérez, Thabo Beeler, and Christian Theobalt. Corrective 3d reconstruction of lips from monococular video. ACM Trans. Graph., 35(6), 2016. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 141, + 545, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 141, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 308, + 141, + 545, + 184 + ], + "type": "text", + "content": "[17] Philip-William Grassal, Malte Prinzler, Titus Leistner, Carsten Rother, Matthias Nießner, and Justus Thies. Neural head avatars from monocular rgb videos. arXiv preprint arXiv:2112.01554, 2021. 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 186, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 186, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 308, + 186, + 545, + 228 + ], + "type": "text", + "content": "[18] Ivan Grishchenko, Artsiom Ablavatski, Yury Kartynnik, Karthik Raveendran, and Matthias Grundmann. Attention mesh: High-fidelity face mesh prediction in real-time. CoRR, abs/2006.10962, 2020. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 230, + 545, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 230, + 545, + 263 + ], + "spans": [ + { + "bbox": [ + 308, + 230, + 545, + 263 + ], + "type": "text", + "content": "[19] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Yang Fan, Zhen Lei, and Stan Li. Towards Fast, Accurate and Stable 3D Dense Face Alignment, pages 152-168. 2020. 1, 2, 6, 7, 10, 11" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 265, + 545, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 265, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 308, + 265, + 545, + 308 + ], + "type": "text", + "content": "[20] Riza Alp Güler, George Trigeorgis, Epameinondas Antonakos, Patrick Snape, Stefanos Zafeiriou, and Iasonas Kokkinos. Densereg: Fully convolutional dense shape regression in-the-wild, 2017. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 309, + 545, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 309, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 308, + 309, + 545, + 331 + ], + "type": "text", + "content": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 332, + 545, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 332, + 545, + 354 + ], + "spans": [ + { + "bbox": [ + 308, + 332, + 545, + 354 + ], + "type": "text", + "content": "[22] A 3D Face Model for Pose and Illumination Invariant Face Recognition, Genova, Italy, 2009. IEEE. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 355, + 545, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 355, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 308, + 355, + 545, + 399 + ], + "type": "text", + "content": "[23] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 400, + 545, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 400, + 545, + 443 + ], + "spans": [ + { + "bbox": [ + 308, + 400, + 545, + 443 + ], + "type": "text", + "content": "[24] Biwen Lei, Jianqiang Ren, Mengyang Feng, Miaomiao Cui, and Xuansong Xie. A hierarchical representation network for accurate and detailed face reconstruction from in-the-wild images, 2023. 1, 2, 6, 7, 10, 11" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 445, + 545, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 445, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 308, + 445, + 545, + 510 + ], + "type": "text", + "content": "[25] J. P. Lewis, Matt Cordner, and Nickson Fong. Pose space deformation: A unified approach to shape interpolation and skeleton-driven deformation. In Proceedings of the 27th Annual Conference on Computer Graphics and Interactive Techniques, page 165-172, USA, 2000. ACM Press/Addison-Wesley Publishing Co. 4" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 512, + 545, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 512, + 545, + 565 + ], + "spans": [ + { + "bbox": [ + 308, + 512, + 545, + 565 + ], + "type": "text", + "content": "[26] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 4, 6, 3, 5, 7" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 567, + 545, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 567, + 545, + 589 + ], + "spans": [ + { + "bbox": [ + 308, + 567, + 545, + 589 + ], + "type": "text", + "content": "[27] Ilya Loshchilov and Frank Hutter. Fixing weight decay regularization in adam. CoRR, abs/1711.05101, 2017. 6" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 590, + 545, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 590, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 308, + 590, + 545, + 622 + ], + "type": "text", + "content": "[28] Araceli Morales, Gemma Piella, and Federico M. Sukno. Survey on 3d face reconstruction from uncalibrated images. CoRR, abs/2011.05740, 2020. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 624, + 545, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 624, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 308, + 624, + 545, + 657 + ], + "type": "text", + "content": "[29] Arsha Nagrani, Joon Son Chung, Weidi Xie, and Andrew Zisserman. Voxceleb: Large-scale speaker verification in the wild. Computer Science and Language, 2019. 8" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "text", + "content": "[30] Andrés Prados-Torreblanca, José M Buenaposada, and Luis Baumela. Shape preserving facial landmarks with graph attention networks. In 33rd British Machine Vision Conference 2022, BMVC 2022, London, UK, November 21-24, 2022. BMVA Press, 2022. 2" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 734, + 315, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 734, + 315, + 742 + ], + "spans": [ + { + "bbox": [ + 296, + 734, + 315, + 742 + ], + "type": "text", + "content": "1235" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[31] Aashish Rai, Hiresh Gupta, Ayush Pandey, Francisco Vicente Carrasco, Shingo Jason Takagi, Amaury Aubel, Daeil Kim, Aayush Prakash, and Fernando de la Torre. Towards realistic generative 3d face models, 2023. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 172 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 172 + ], + "type": "text", + "content": "[32] Zeyu Ruan, Changqing Zou, Longhai Wu, Gangshan Wu, and Limin Wang. SADRNet: Self-aligned dual face regression networks for robust 3d dense face alignment and reconstruction. IEEE Transactions on Image Processing, 30: 5793-5806, 2021. 2, 6, 7, 10, 11" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 174, + 287, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 174, + 287, + 206 + ], + "spans": [ + { + "bbox": [ + 48, + 174, + 287, + 206 + ], + "type": "text", + "content": "[33] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zh-moginov, and Liang-Chieh Chen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 208, + 287, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 208, + 287, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 208, + 287, + 262 + ], + "type": "text", + "content": "[34] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael Black. Learning to regress 3d face shape and expression from an image without 3d supervision. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2019, 2, 5, 7, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 263, + 287, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 263, + 287, + 318 + ], + "spans": [ + { + "bbox": [ + 48, + 263, + 287, + 318 + ], + "type": "text", + "content": "[35] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Mingmin Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3d face reconstruction by occlusion-aware multi-view geometry consistency. arXiv preprint arXiv:2007.12494, 2020. 2, 6, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 319, + 287, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 319, + 287, + 352 + ], + "spans": [ + { + "bbox": [ + 48, + 319, + 287, + 352 + ], + "type": "text", + "content": "[36] Zachary Teed and Jia Deng. RAFT: recurrent all-pairs field transforms for optical flow. CoRR, abs/2003.12039, 2020. 3, 6, 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 354, + 287, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 354, + 287, + 387 + ], + "spans": [ + { + "bbox": [ + 48, + 354, + 287, + 387 + ], + "type": "text", + "content": "[37] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2face: Real-time face capture and reenactment of rgb videos, 2020. 1, 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 388, + 287, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 287, + 453 + ], + "type": "text", + "content": "[38] Zhang Tianke, Chu Xuangeng, Liu Yunfei, Lin Lijian, Yang Zhendong, Xu Zhengzhuo, Cao Chengkun, Yu Fei, Zhou Changyin, Yuan Chun, and Yu Li. Accurate 3d face reconstruction with facial component tokens. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2023. 2, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 455, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 455, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 455, + 287, + 510 + ], + "type": "text", + "content": "[39] Kenny T. R. Voo, Liming Jiang, and Chen Change Loy. Delving into high-quality synthetic face occlusion segmentation datasets. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2022. 6, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 511, + 287, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 511, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 48, + 511, + 287, + 555 + ], + "type": "text", + "content": "[40] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 8, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 555, + 287, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 287, + 578 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 287, + 578 + ], + "type": "text", + "content": "[41] Yue Wang and Justin M. Solomon. Prnet: Self-supervised learning for partial-to-partial registration, 2019. 2, 6, 7, 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 579, + 287, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 579, + 287, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 579, + 287, + 643 + ], + "type": "text", + "content": "[42] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Matthew Johnson, Jingjing Shen, Nikola Milosavljevic, Daniel Wilde, Stephan Garbin, Chirag Raman, Jamie Shotton, Toby Sharp, Ivan Stojiljkovic, Tom Cashman, and Julien Valentin. 3d face reconstruction with dense landmarks, 2022. 1, 2, 4, 6, 7, 8, 3, 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 646, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 287, + 689 + ], + "type": "text", + "content": "[43] Chenglei Wu, Derek Bradley, Markus Gross, and Thabo Beeler. An anatomically-constrained local deformation model for monocular face capture. ACM Trans. Graph., 35 (4), 2016. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[44] Cheng-hsin Wu, Ningyuan Zheng, Scott Ardisson, Rohan Bali, Danielle Belko, Eric Brockmeyer, Lucas Evans, Tim" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 160 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 160 + ], + "type": "text", + "content": "Othy Godisart, Hyowon Ha, Xuhua Huang, Alexander Hypes, Taylor Koska, Steven Krenn, Stephen Lombardi, Xiaomin Luo, Kevyn McPhail, Laura Millerschoen, Michal Perdoch, Mark Pitts, Alexander Richard, Jason Saragih, Junko Saragih, Takaaki Shiratori, Tomas Simon, Matt Stewart, Autumn Trimble, Xinshuo Weng, David Whitewolf, Chenglei Wu, Shouou-I Yu, and Yaser Sheikh. Multiface: A dataset for neural face rendering. In arXiv, 2022. 1, 5, 2, 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 161, + 545, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 161, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 308, + 161, + 545, + 215 + ], + "type": "text", + "content": "[45] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. Segformer: Simple and efficient design for semantic segmentation with transformers. In Neural Information Processing Systems (NeurIPS), 2021. 3, 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 217, + 545, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 217, + 545, + 248 + ], + "spans": [ + { + "bbox": [ + 308, + 217, + 545, + 248 + ], + "type": "text", + "content": "[46] Jinbo Xing, Menghan Xia, Yuechen Zhang, Xiaodong Cun, Jue Wang, and Tien-Tsin Wong. Codetalker: Speech-driven 3d facial animation with discrete motion prior, 2023. 8, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 249, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 249, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 308, + 249, + 545, + 293 + ], + "type": "text", + "content": "[47] Haotian Yang, Hao Zhu, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, and Xun Cao. Facescape: a large-scale high quality 3d face dataset and detailed riggable 3d face prediction, 2020. 2, 5, 6, 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 293, + 545, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 293, + 545, + 326 + ], + "spans": [ + { + "bbox": [ + 308, + 293, + 545, + 326 + ], + "type": "text", + "content": "[48] Hongwei Yi, Hualin Liang, Yifei Liu, Qiong Cao, Yandong Wen, Timo Bolkart, Dacheng Tao, and Michael J. Black. Generating holistic 3d human motion from speech, 2023. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 327, + 545, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 327, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 308, + 327, + 545, + 380 + ], + "type": "text", + "content": "[49] Changqian Yu, Changxin Gao, FlowFace-INSTA to the baseline MPT-INSTA Jingbo Wang, Gang Yu, Chunhua Shen, and Nong Sang. Bisenet V2: bilateral network with guided aggregation for real-time semantic segmentation. CoRR, abs/2004.02147, 2020. 8, 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 381, + 545, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 381, + 545, + 414 + ], + "spans": [ + { + "bbox": [ + 308, + 381, + 545, + 414 + ], + "type": "text", + "content": "[50] Jianhui Yu, Hao Zhu, Liming Jiang, Chen Change Loy, Weidong Cai, and Wayne Wu. CelebV-Text: A large-scale facial text-video dataset. In CVPR, 2023. 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 415, + 545, + 447 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 415, + 545, + 447 + ], + "spans": [ + { + "bbox": [ + 308, + 415, + 545, + 447 + ], + "type": "text", + "content": "[51] Shifeng Zhang, Xiangyu Zhu, Zhen Lei, Hailin Shi, Xiaobo Wang, and Stan Z. Li. S" + }, + { + "bbox": [ + 308, + 415, + 545, + 447 + ], + "type": "inline_equation", + "content": "^3" + }, + { + "bbox": [ + 308, + 415, + 545, + 447 + ], + "type": "text", + "content": "fd: Single shot scale-invariant face detector, 2017. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 449, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 449, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 308, + 449, + 545, + 491 + ], + "type": "text", + "content": "[52] Yufeng Zheng, Victoria Fernández Abrevaya, Xu Chen, Marcel C. Bühler, Michael J. Black, and Otmar Hilliges. I M avatar: Implicit morphable head avatars from videos. CoRR, abs/2112.07471, 2021. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 492, + 545, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 492, + 545, + 547 + ], + "spans": [ + { + "bbox": [ + 308, + 492, + 545, + 547 + ], + "type": "text", + "content": "[53] Yufeng Zheng, Wang Yifan, Gordon Wetzstein, Michael J. Black, and Otmar Hilliges. Pointavatar: Deformable point-based head avatars from videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 548, + 545, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 548, + 545, + 580 + ], + "spans": [ + { + "bbox": [ + 308, + 548, + 545, + 580 + ], + "type": "text", + "content": "[54] Zhenglin Zhou, Huaxia Li, Hong Liu, Nanyang Wang, Gang Yu, and Rongrong Ji. Star loss: Reducing semantic ambiguity in facial landmark detection, 2023. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 581, + 545, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 581, + 545, + 613 + ], + "spans": [ + { + "bbox": [ + 308, + 581, + 545, + 613 + ], + "type": "text", + "content": "[55] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z. Li. Face alignment across large poses: A 3d solution. CoRR, abs/1511.07212, 2015. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 613, + 545, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 613, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 308, + 613, + 545, + 657 + ], + "type": "text", + "content": "[56] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Instant volumetric head avatars. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4574-4584, 2022. 8, 4, 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 658, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 689 + ], + "type": "text", + "content": "[57] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces, 2022. 1, 2, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "text", + "content": "[58] Michael Zollhöfer, Justus Thies, Darek Bradley, Pablo Garrido, Thabo Beeler, Patrick Pérez, Marc Stamminger," + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 734, + 315, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 734, + 315, + 742 + ], + "spans": [ + { + "bbox": [ + 296, + 734, + 315, + 742 + ], + "type": "text", + "content": "1236" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 288, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 288, + 106 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 288, + 106 + ], + "type": "text", + "content": "Matthias Nießner, and Christian Theobalt. State of the art on monocular 3d face reconstruction, tracking, and applications. 2018. 1, 2" + } + ] + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 734, + 315, + 743 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 734, + 315, + 743 + ], + "spans": [ + { + "bbox": [ + 296, + 734, + 315, + 743 + ], + "type": "text", + "content": "1237" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/444ecb6f-5ab4-45bb-9d08-b2b359c08da3_content_list.json b/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/444ecb6f-5ab4-45bb-9d08-b2b359c08da3_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..5a1874e25921357984e1f3a5c2bdabe1061aef2c --- /dev/null +++ b/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/444ecb6f-5ab4-45bb-9d08-b2b359c08da3_content_list.json @@ -0,0 +1,1588 @@ +[ + { + "type": "text", + "text": "3D Facial Expressions through Analysis-by-Neural-Synthesis", + "text_level": 1, + "bbox": [ + 178, + 130, + 792, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "George Retsinas1† Panagiotis P. Filntisis1† Radek Daněček3 Victoria F. Abrevaya3 Anastasios Roussos4 Timo Bolkart3* Petros Maragos1,2", + "bbox": [ + 189, + 172, + 777, + 209 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Institute of Robotics, Athena Research Center, 15125 Maroussi, Greece", + "bbox": [ + 292, + 218, + 674, + 232 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ School of Electrical & Computer Engineering, National Technical University of Athens, Greece", + "bbox": [ + 230, + 233, + 738, + 247 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ MPI for Intelligent Systems, Tübingen, Germany", + "bbox": [ + 352, + 247, + 617, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{4}$ Institute of Computer Science (ICS), Foundation for Research & Technology - Hellas (FORTH), Greece", + "bbox": [ + 205, + 261, + 759, + 273 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/da1a4cae32a6366d3ca8377814176725aa5cd0467fc824055cc8ccc1bf9dca84.jpg", + "image_caption": [ + "Figure 1. SMIRK reconstructs 3D faces from monocular images with facial geometry that faithfully recover extreme, asymmetric, and subtle expressions. Top: images of people with challenging expressions. Bottom: SMIRK reconstructions." + ], + "image_footnote": [], + "bbox": [ + 78, + 287, + 890, + 449 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 501, + 313, + 517 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While existing methods for 3D face reconstruction from in-the-wild images excel at recovering the overall face shape, they commonly miss subtle, extreme, asymmetric, or rarely observed expressions. We improve upon these methods with SMIRK (Spatial Modeling for Image-based Reconstruction of Kinesics), which faithfully reconstructs expressive 3D faces from images. We identify two key limitations in existing methods: shortcomings in their self-supervised training formulation, and a lack of expression diversity in the training images. For training, most methods employ differentiable rendering to compare a predicted face mesh with the input image, along with a plethora of additional loss functions. This differentiable rendering loss not only has to provide supervision to optimize for 3D face geometry, camera, albedo, and lighting, which is an ill-posed optimization problem, but the domain gap between rendering and input image further hinders the learning process. Instead, SMIRK replaces the differentiable rendering with a neural rendering module that, given the rendered predicted mesh geometry, and sparsely sampled pixels of the input image, generates a face image. As the neural rendering gets color information from sampled image pixels, supervising with neural rendering-based reconstruction loss can focus solely on the geometry. Further,", + "bbox": [ + 75, + 537, + 473, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "it enables us to generate images of the input identity with varying expressions while training. These are then utilized as input to the reconstruction model and used as supervision with ground truth geometry. This effectively augments the training data and enhances the generalization for diverse expressions. Our qualitative, quantitative and particularly our perceptual evaluations demonstrate that SMIRK achieves the new state-of-the-art performance on accurate expression reconstruction. For our method's source code, demo video and more, please visit our project webpage: https://georgenetsi.github.io/smirk/.", + "bbox": [ + 496, + 503, + 893, + 670 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 696, + 632, + 712 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Reconstructing 3D faces from single images in-the-wild has been a central goal of computer vision for the last three decades [98] with practical implications in various fields including virtual and augmented reality, entertainment, and telecommunication. Commonly, these methods estimate the parameters of a 3D Morphable Model (3DMM) [12, 26], either through optimization [3, 6-8, 34, 67, 80] or regression with deep learning [16, 18, 20, 28, 29, 33, 46, 65, 66, 70, 75, 82]. Due to the lack of large-scale paired 2D-3D data, most learning-based methods follow a self-supervised train", + "bbox": [ + 496, + 722, + 893, + 875 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Equal contributions. * Now at Google.", + "bbox": [ + 522, + 887, + 769, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "2490", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ing scheme using an analysis-by-synthesis approach [7, 75].", + "bbox": [ + 76, + 90, + 467, + 107 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Although there has been a persistent improvement in the accuracy of identity shape reconstruction, as indicated by established benchmarks [28, 70], the majority of works fail to capture the full range of facial expressions, including extreme, asymmetric, or subtle movements which are perceptually significant to humans -see e.g. Fig. 1. Recent works addressed this by augmenting the photometric error with image-based perceptual losses based on expert networks for emotion [18], lip reading [29], or face recognition [32], or with a GAN-inspired discriminator [61]. However, this requires a careful balancing of the different loss terms, and can often produce over-exaggerated facial expressions.", + "bbox": [ + 75, + 109, + 467, + 290 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We argue here that the main problem is the shortcomings of the differentiable rendering loss. Jointly optimizing for geometry, camera, appearance, and lighting is an ill-posed optimization problem due to shape-camera [73] and albedo-lighting [25] ambiguities. Further the loss is negatively impacted by the large domain gap between natural input image and the rendering. The commonly employed Lambertian reflectance model is an over-simplistic approximation of the light-face interaction [26], and it is insufficient to account for hard self-shadows, unusual illumination environments, highly reflective skin, and differences in camera color patterns. This, in turn, can result in sub-optimal reconstructions by providing incorrect guidance during training.", + "bbox": [ + 75, + 292, + 467, + 489 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we introduce a simple but effective analysis-by-neural-synthesis supervision to improve the perceived quality of the reconstructed expressions. For this, we replace the differentiable rendering step of self-supervised approaches with an image-to-image translator based on U-Net [68]. Given a monochromatic rendering of the geometry together with sparsely sampled pixels of the input image, this U-Net generates an image which is then compared to the input image. Our key observation is that this neural rendering provides more accurate gradients for the task of expressive 3D face reconstruction. This approach has two advantages. First, by providing the rendered predicted mesh without appearance to the generator, the system is forced to rely on the geometry of the rendered mesh for recreating the input, leading to more faithful reconstructions. Second, the generator can create novel images, that modify the expression of the input. We leverage this while training with an expression consistency/augmentation loss. This renders a mesh of the input identity under a novel expression, renders an image with the generator, project the rendering through the encoder, and penalizes the difference between the augmented and the reconstructed expression parameters. By employing parameters from complex and extreme expressions captured under controlled laboratory settings, the network learns to handle non-typical expressions that are underrepresented in the data, promoting generalization. Our extensive experiments demonstrate that", + "bbox": [ + 75, + 493, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "SMIRK faithfully captures a wide range of facial expressions (Fig. 1), including challenging cases such as asymmetric and subtle expressions (e.g., smirking). This result is highlighted by the conducted user study, where SMIRK significantly outperformed all competing methods.", + "bbox": [ + 498, + 90, + 890, + 167 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our contributions are: 1) A method to faithfully recover expressive 3D faces from an input image.2) A novel analysis-by-neural-synthesis supervision that improves the quality of the reconstructed expressions. 3) A cycle-based expression consistency loss that augments expressions during training.", + "bbox": [ + 498, + 167, + 890, + 257 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 271, + 640, + 286 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Over the past two decades, the field of monocular 3D face reconstruction has witnessed extensive research and development [26, 98]. Model-free approaches directly regress 3D meshes [4, 19, 22, 27, 43, 69, 71, 74, 87, 89, 92] or voxels [41], or adapt a Signed Distance Function [17, 63, 91] for image fitting. These techniques commonly depend on extensive 3D training data, often generated using a 3D face model. However, this dependency can constrain their expressiveness due to limitations inherent to data creation [4, 19, 27, 41, 43, 69, 87] and disparities between synthetic and real images [22, 71, 92].", + "bbox": [ + 498, + 296, + 890, + 462 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Many works estimate parameters of established 3D Morphable Models (3DMMs), like BFM [64], FaceWarehouse [14], or FLAME [53]. This can be achieved using direct optimization procedure in an analysis-by-synthesis framework [3, 6-8, 15, 30, 34, 47, 52, 65, 67, 78-80], but this needs to be applied on novel images every time, which is computationally expensive. Recent deep learning approaches offer fast and robust estimation of 3DMM parameters, using either supervised [16, 36, 46, 66, 82, 83, 94, 96, 97] or self-supervised training, for which different types of supervision have been proposed and used in combination, with the most important being the following: a) 2D landmarks supervision [20, 28, 55, 70, 72, 75-77, 90] is critical for coarse facial geometry and alignment, but is limited by the sparsity and potential inaccuracy of the predicted landmarks, particularly for complex expressions and poses. Methods that rely on dense landmarks [4, 88] overcome the sparsity problem but their accuracy is limited by the inherent ambiguity of dense correspondences across different faces. b) Photometric constraints [20, 28, 33, 72, 75-77, 90] are particularly effective for facial data, but are susceptible to alignment errors and depend on the quality of the rendered image. c) Perceptual losses have been proven beneficial in aligning the output with human perception [93]. Several methods make use of this by applying perceptual features losses of expert networks for identity recognition [20, 28, 32, 33, 72], emotion [18] or lip articulation [29, 37], but are hard to balance with other terms and can sometimes produce exaggerated results, particularly in terms of expressions.", + "bbox": [ + 498, + 463, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2491", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We explore an alternative approach, where an image-to-image translation model is coupled with a simple photometric error, encouraging more nuanced details to be explained by the geometry.", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Closer to our work are methods that simultaneously train a regressor network and an appearance model to improve the photometric error signal. Booth et al. [10, 11] employ a 3DMM for shape estimation coupled with a PCA appearance model learned from images in-the-wild. Grecer et al. [32] extend this idea by using a GAN to model the facial appearance more effectively. [58, 76, 77, 84, 85] learn nonlinear models of shape and expression while training a regressor in a self-supervised manner. Lin et al. [54] refine an initial 3DMM texture while training the regressor. Several other works learn neural appearance models for faces from large datasets [5, 32, 48-50, 57]. In this work, we do not learn a new appearance model, but directly use a generator for better geometry supervision, achieving significantly improved expression estimation. Also related to this work are approaches that train a conditional generative model that transforms a rendering of a mesh model into a realistic image, e.g. [21, 23, 24, 35, 45, 62]. While their focus is on controllable image generation, we investigate here how a generator of average capacity can improve supervision for the task of 3D face reconstruction.", + "bbox": [ + 75, + 152, + 472, + 468 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method: Analysis-by-Neural-Synthesis", + "text_level": 1, + "bbox": [ + 76, + 481, + 426, + 498 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "SMIRK is inspired by recent self-supervised face reconstruction methods [18, 28, 29, 94] that combine an analysis-by-synthesis approach with deep learning. While the majority of these works produce renderings based on linear statistical models and Lambertian reflectance, SMIRK contributes with a novel neural rendering module that bridges the domain gap between the input and the synthesized output. By minimizing this discrepancy, SMIRK enables a stronger supervision signal within an analysis-by-synthesis framework. Notably, this means that neural-network based losses such as perceptual [42], identity [20, 28], or emotion [18] can be used to compare the reconstructed and input images without the typical domain-gap problem that is present in most works.", + "bbox": [ + 75, + 506, + 468, + 717 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Architecture", + "text_level": 1, + "bbox": [ + 76, + 726, + 210, + 739 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Face Model: SMIRK employs FLAME [53] to model the 3D geometry of a face, which generates a mesh of $n_v = 5023$ vertices based on identity $\\beta$ and expression $\\psi_{expr}$ parameters, extended with two blendshapes $\\psi_{eye}$ to account for eye closure [97], as well as jaw rotation $\\theta_{jaw}$ parameters. Additionally, we consider the rigid pose $\\theta_{pose}$ and the orthographic camera parameters $\\mathbf{c}$ . For brevity, we refer to all expression parameters (i.e. $\\psi_{expr}$ , $\\psi_{eye}$ and $\\theta_{jaw}$ ) as $\\psi$ , and all global transformation parameters (i.e. $\\mathbf{c}$ and $\\theta_{pose}$ ) as $\\theta$ .", + "bbox": [ + 75, + 750, + 468, + 898 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Encoder: The encoder $E(.)$ is a deep neural network that takes an image $I$ as input and regresses FLAME parameters. We separate $E$ into three different branches, each consisting of a MobilenetV3 [39] backbone: 1) $E_{\\psi}$ , which predicts the expression parameters $\\psi$ , 2) $E_{\\beta}$ that predicts the shape parameters $\\beta$ , and 3) $E_{\\theta}$ that predicts the global transformation coefficients $\\theta$ . Formally,", + "bbox": [ + 496, + 90, + 890, + 196 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\theta} = E _ {\\boldsymbol {\\theta}} (I), \\quad \\beta = E _ {\\boldsymbol {\\beta}} (I), \\quad \\psi = E _ {\\psi} (I). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 550, + 210, + 890, + 227 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Since the main focus of this work is on improving facial expression reconstruction, we assume at train time that $E_{\\theta}$ and $E_{\\beta}$ were pre-trained and remain frozen. Note that unlike previous methods [18, 28, 29], $E$ does not predict albedo parameters since the neural rendering module does not require such explicit information.", + "bbox": [ + 496, + 241, + 890, + 330 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Neural Renderer: The neural renderer is designed to replace traditional graphics-based rendering with an imaged-to-image convolutional network $T$ . The key idea here is to provide $T$ with an input image where the face is masked out and only a small number of randomly sampled pixels within the mask remain, along with the predicted facial geometry from the encoder $E$ . By limiting the available relevant information from the input image, $T$ is forced to rely on the predicted geometry from $E$ to accurately reconstruct it.", + "bbox": [ + 496, + 332, + 890, + 468 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Formally, let $S = R(\\theta, \\beta, \\psi)$ denote the output of the differentiable rasterization step, where $S$ is the monochrome rendering of the reconstructed face mesh. The masking function $M(\\cdot)$ is applied to the input image $I$ , masking out the face and retaining only a small amount of random pixels within the mask. $M(I)$ is then concatenated with $S$ , and the resulting tensor is passed through the neural renderer $T$ to produce a reconstruction of the original image $I' = T(S \\oplus M(I))$ , where $\\oplus$ denotes concatenation. A crucial property of this module is to assist the gradient flow towards the encoder. Therefore, we adopt a U-Net architecture [40, 68, 95] for $T$ , since the shortcuts will allow the gradient to flow uninterrupted towards $E$ (an ablation study on this can be found in the Suppl. Mat.).", + "bbox": [ + 496, + 469, + 890, + 680 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Optimization of the SMIRK Components", + "text_level": 1, + "bbox": [ + 500, + 691, + 851, + 709 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "SMIRK is supervised with two separate training passes: a reconstruction path and an augmented expression cycle path. We alternate between these passes on each training iteration, optimizing their respective losses. We describe each in the following subsections.", + "bbox": [ + 496, + 715, + 890, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2.1 Reconstruction Path", + "text_level": 1, + "bbox": [ + 500, + 815, + 696, + 829 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the reconstruction path (Fig. 2), the encoder $E$ regresses FLAME parameters from the input image $I$ and the resulting 3D face is rendered to obtain $S$ . Next, $I$ is masked out using the masking function $M(\\cdot)$ , is concatenated with $S$ ,", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "2492", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/ae57b5a2b0a98fb6d122a38935d91b4c8f7f943dda471eb9bd2d14d944b75f9b.jpg", + "image_caption": [ + "Figure 2. Reconstruction pass. An input image is passed to the encoder which regresses FLAME and camera parameters. A 3D shape is reconstructed, rendered with a differentiable rasterizer and finally translated into the output domain with the image translation network. Then, standard self-supervised landmark, photometric and perceptual losses are computed." + ], + "image_footnote": [], + "bbox": [ + 78, + 87, + 472, + 204 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/68f7f94d2ee9d10a002d3ded380cbd5b1af7065263f18387c8b4cbd12c3a28b6.jpg", + "image_caption": [ + "Figure 3. Masking Process. An input image is masked to obscure the face (upper path), then we sample random pixels to be unmasked (lower path)" + ], + "image_footnote": [], + "bbox": [ + 96, + 310, + 454, + 431 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and fed into $T$ to obtain a reconstruction of the input image $I'$ .", + "bbox": [ + 76, + 492, + 468, + 521 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Masking: To promote the reliance of $T$ on the 3D rendered face for reconstructing $I$ , we need to mask out the face in the input image $I$ . We do that by using the convex hull of detected 2D landmarks [13], dilated so that it fully covers the face. However, without any information of the face interior, training the translator becomes challenging since texture information, such as skin color, facial hair or even accessories (e.g., glasses) are \"distractors\" that complicate training. To address this we randomly sample and retain a small amount of pixels (1%) that are used as guidance for the image reconstruction. Note that sampling too many pixels makes the reconstruction overly guided and the 3D rendered face does not control the reconstruction output. We observed a similar behavior when we tried to randomly mask out blocks of the image, as in [38]. The masking process is depicted in Fig. 3.", + "bbox": [ + 75, + 522, + 468, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Loss functions: The reconstruction path is supervised with the following loss functions:", + "bbox": [ + 75, + 763, + 468, + 794 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Photometric loss. This is the L1 error between the input and the output images: $\\mathcal{L}_{photo} = \\| I' - I\\| _1$", + "bbox": [ + 76, + 794, + 468, + 825 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "VGG loss. The VGG loss [42] has a similar effect to the photometric one, but helps to converge faster in the initial phases of training: $\\mathcal{L}_{vgg} = \\| \\Gamma(I') - \\Gamma(I)\\|_1$ , where $\\Gamma(.)$ represents the VGG perceptual encoder.", + "bbox": [ + 75, + 825, + 468, + 885 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Landmark loss. The landmark loss, denoted as $L_{lmk} =$", + "bbox": [ + 96, + 885, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/dbbdb408efd0df96af2959913adf0fc4a9b71d8abe0fc12ce120a70981504ed3.jpg", + "image_caption": [ + "Figure 4. Augmented cycle pass. The FLAME expression parameters of an existing reconstruction are modified. The resulting modified face is then rendered using our neural renderer. The rendering is then passed to the face reconstruction encoder to regress the FLAME parameters and a consistency loss between the modified input and reconstructed FLAME parameters is computed." + ], + "image_footnote": [], + "bbox": [ + 500, + 87, + 893, + 209 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\sum_{i=1}^{K}\\left\\|\\mathbf{k}-\\mathbf{k}'\\right\\|_{2}^{2}$ , measures the $L_{2}$ norm between the ground-truth 2D facial landmarks detected in the input image $(\\mathbf{k})$ and the 2D landmarks projected from the predicted 3D mesh $(\\mathbf{k}')$ , summed over $K$ landmarks.", + "bbox": [ + 496, + 319, + 890, + 382 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Expression Regularization. We employ an $L_{2}$ regularization over the expression parameters $L_{reg} = \\|\\psi\\|_2^2$ , penalizing extreme, unrealistic expressions.", + "bbox": [ + 496, + 383, + 890, + 429 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Emotion Loss. Finally, to obtain reconstructions that faithfully capture the emotional content, we employ an emotion loss $\\mathcal{L}_{emo}$ based on features extracted from a pretrained emotion recognition network $P_{e}$ , as in EMOCA [18]: $\\mathcal{L}_{emo} = \\| P_e(I') - P_e(I)\\| _2^2$ . To prevent the image translator from adversarially optimizing the emotion loss by perturbing a few pixels, for this loss we keep the image translator $T$ \"frozen\", optimizing only the expression encoder $E_{\\psi}$ . Note that unlike EMOCA, our framework ensures that the emotion loss does not suffer from domain gap problems, as the compared images reside in the same space.", + "bbox": [ + 496, + 430, + 892, + 597 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 Augmented Expression Cycle Path", + "text_level": 1, + "bbox": [ + 500, + 618, + 794, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "While the reconstruction path improves 3D reconstruction thanks to the better supervision signal provided by the neural module, it is still affected by a lack of expression diversity in the training datasets - a problem shared by all previous methods. This means for example that if a more complex lip structure, scarcely seen in the training data, cannot be reproduced fast enough by the encoder, the translator $T$ could learn to correlate miss-aligned lip 3D structures and images and thus multiple similar, but distinct, facial expressions will be collapsed to a single reconstructed representation. Further, this may lead to the translator compensating for the encoder's failures during the joint optimization.", + "bbox": [ + 496, + 642, + 890, + 824 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "These issues are addressed with the augmented expression cycle consistency path. In this path, we start from the predicted set $\\beta, \\psi, \\theta$ , and replace the original predicted expression $\\psi$ with a new one $\\psi_{aug}$ . We then use the translator $T$ to generate a photorealistic image $I_{aug}^{\\prime}$ which adheres to", + "bbox": [ + 496, + 825, + 890, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "2493", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "it. This process effectively synthesizes an augmented training pair of $\\psi_{aug}$ and the corresponding output image $I_{aug}^{\\prime}$ . Then, the image is fed into $E$ which should perfectly recover $\\psi_{aug}$ . A cycle consistency loss can now be directly applied in the expression parameter space of the 3D model, enforcing the predicted expression to be as close as possible to the initial one. This concept is illustrated in Fig. 4.", + "bbox": [ + 75, + 90, + 468, + 196 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The benefit of this cycle path is two-fold: 1) it reduces over-compensation errors via the consistency loss and 2) it promotes diverse expressions. The latter further helps consistency by avoiding the collapse of neighboring expressions into a single parameter representation. Concerning the consistency property, we can distinguish two overcompensating factors. First, during the joint optimization of the encoder and the translator, the latter can compensate when the encoder provides erroneous predictions, leading to an overall sub-par reconstruction. Second, if we discard the consistency loss, the expression will try to over-compensate erroneous shape/pose, since we assume the shape/pose parameters are predicted from an already trained system and they are not optimized in our framework. As an example, if the shape parameters do not fully capture an elongated nose, which is an identity characteristic of the person, the expression parameters may compensate this error. Such behavior is problematic because it entangles expression, shape and pose and adds undesired biases during training.", + "bbox": [ + 75, + 198, + 470, + 484 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Pixel Transfer: The masking process retains a small amount of pixels within the face area. However, when a new expression is introduced, the previously selected pixels need to be updated and transferred such that they correspond with the vertices of the new expression. This operation is referred to as pixel transfer, where we sample pixels from the initial image according to a selected set of vertices, we then find the new position of the same vertices for the updated expression, and we assign their position as the new pixel, with the initial pixel value. This avoids inconsistencies between the underlying structure of the pixels (initial expression) and the new expression, which would hinder realistic reconstructions in the cycle path.", + "bbox": [ + 75, + 488, + 468, + 684 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Promoting Diverse Expressions: Ideally, in this path we also want to promote high variations in the expression parameter space, generating shapes (and their corresponding images) with complex, rare and asymmetric expressions that are still plausible. To effectively augment the cycle path with interesting variations we consider the following augmentations:", + "bbox": [ + 75, + 686, + 468, + 792 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Permutation: permute the expressions in a batch.", + "- Perturbation: add non-trivial noise to the reconstructed expression parameters.", + "- Template Injection: use expression templates of extreme expressions. To obtain such parameters for FLAME we perform direct iterative parameter fitting on the FaMoS [9] dataset which depicts multiple subjects per" + ], + "bbox": [ + 76, + 795, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/796c7fd999bd73be834afc7b28dd0712b4c9c2b30db24e8ece381c66515d6acf.jpg", + "image_caption": [ + "Figure 5. Neural expression augmentation. Our neural renderer enables us to modify the expression, generating a new image-3D training pair. We can edit the expression with random noise, permutation from other reconstructions, template injection, or zeroing." + ], + "image_footnote": [], + "bbox": [ + 501, + 88, + 890, + 209 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "form extreme and asymmetric expressions.", + "bbox": [ + 511, + 318, + 797, + 332 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- Zero Expression: neutral expressions help avoid biasing the system towards complex cases.", + "bbox": [ + 500, + 333, + 890, + 363 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For all expression augmentations, we simultaneously simulate jaw and eyelid openings/closings, with more aggressive augmentations in the zero-expression case to avoid incompatible blending with intense expressions. Fig. 5 presents visual examples of all augmentations and the corresponding generated images from $T$ , showcasing its ability to generate realistic images with notable expression manipulation.", + "bbox": [ + 496, + 368, + 890, + 473 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Loss functions:", + "text_level": 1, + "bbox": [ + 500, + 479, + 609, + 492 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Expression Consistency. The expression consistency loss, or cycle loss for brevity, is the mean-squared error between the given augmented expression parameters $\\psi_{aug}$ and the predicted expressions at the end of the cycle path:", + "bbox": [ + 496, + 498, + 890, + 559 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {e x p} = \\left\\| E _ {\\psi} \\left(T \\left(R (\\boldsymbol {\\theta}, \\boldsymbol {\\beta}, \\psi_ {a u g}) \\oplus M (I)\\right)\\right) - \\psi_ {a u g} \\right\\| _ {2} ^ {2} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 580, + 890, + 599 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The pose/cam and shape parameters are kept as predicted by the initial image, namely $\\theta = E_{\\theta}(I)$ and $\\beta = E_{\\beta}(I)$ . The internal $E_{\\psi}(I)$ operation, inside the renderer $R(\\cdot)$ , does not allow gradients to flow through and is used as an off-the-self frozen module.", + "bbox": [ + 496, + 619, + 890, + 694 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Identity Consistency. To aid the translator in faithfully reconstructing the identity of the person, we introduce an additional consistency loss similar to Eq. 2, applied to the shape parameters $\\beta$ . Note that since the shape encoder $E_{\\beta}$ is frozen, the consistency loss only affects the optimization of the translator $T$ .", + "bbox": [ + 496, + 699, + 890, + 789 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Alternating Optimization: Overall, we alternate between the two passes, aiming to further reduce the effect of the translator compensating for the encoder. In more detail, during the augmented cycle pass, we freeze alternatively the encoder and the translator. Thus, this pass avoids the joint optimization of the two networks in a single step, acting as a regularizer to the other pass and enforcing consistency.", + "bbox": [ + 496, + 795, + 890, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "2494", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Results", + "text_level": 1, + "bbox": [ + 76, + 89, + 163, + 104 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We now present objective and subjective evaluations of our method, along with comparisons with recent state of the art. Additional experimental evaluations and visualizations can be found in our Suppl. Mat. and demo video.", + "bbox": [ + 75, + 114, + 468, + 175 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 76, + 181, + 267, + 199 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training Datasets: We use the following datasets for training: FFHQ [44], CelebA [56], LRS3 [1], and MEAD [86]. LRS3 and MEAD are video datasets, and we randomly sample images from each video during training.", + "bbox": [ + 75, + 205, + 468, + 265 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "SOTA Methods: We compare with the following recent state-of-the-art methods that have publicly available implementations: DECA [28] and EMOCA v2 [18, 29], which use the FLAME [53] model, and Deep3DFace [20] and FOCUS [51], which use the BFM [64] model.", + "bbox": [ + 75, + 266, + 468, + 340 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Pretraining: Before the core training stage, all three encoders are pretrained, supervised by two losses - the landmark loss of the reconstruction for pose and expression and the shape predictions of MICA [97]. After that, $E_{\\beta}$ and $E_{\\theta}$ remain frozen.", + "bbox": [ + 75, + 342, + 468, + 416 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Quantitative Evaluations", + "text_level": 1, + "bbox": [ + 76, + 424, + 305, + 439 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "It has been consistently reported [2, 18, 29, 31, 60] that evaluating facial expression reconstruction in terms of geometric metrics is ill-posed. The geometric errors tend to be dominated by the identity face shape and do not correlate well with human perception of facial expressions. Accordingly, we compare our method in a quantitative manner with three experiments: 1) emotion recognition accuracy [18], 2) ability of a model to guide a UNet to faithfully reconstruct an input image, and 3) a perceptual user study.", + "bbox": [ + 75, + 446, + 468, + 583 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Emotion Recognition: Following the protocol of [18], we train an MLP to classify eight basic expressions and regress valence and arousal values using AffectNet [59]. We report Concordance Correlation Coefficient (CCC), root mean square error (RMSE), for both valence (V-) and arousal (A-), and expression classification accuracy (E-ACC). Results are found in Tab. 1. As it can be seen, SMIRK achieves a higher emotion recognition score compared to most other methods, although falling behind EMOCAv1/2 and Deep3DFace. It is worth noting that, although EMOCA v1 achieves the highest emotion accuracy, it often overexaggerates expressions which helps with emotion recognition. EMOCA v2, arguably a more accurate reconstruction model, performs slightly worse. Our main model is comparable with Deep3DFace and outperforms DECA and FOCUS. We can also train a model that scores better on emotion recognition, by increasing the emotion loss weight. However, similarly to what was reported by Daněček et al. [18], this leads to undesirable artifacts. We discuss the trade-off between higher emotion recognition scores and reconstruction accuracy in more detail in Sup.Mat. Notably,", + "bbox": [ + 75, + 584, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/191709f04840f34c2946294402a6156bc80ddc6459615733496fc359787deb55.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelV-CCC ↑V-RMSE ↓A-CCC ↑A-RMSE ↓E-ACC ↑
MGCNet0.690.350.580.340.60
3DDFA-v20.620.390.500.340.52
Deep3DFace0.730.330.650.310.65
DECA0.690.360.580.330.59
FOCUS-CelebA0.690.350.540.330.58
EMOCA v10.770.310.680.300.68
EMOCA v20.760.330.660.300.66
SMIRK0.720.350.610.310.64
SMIRK w/o emo0.710.350.600.320.62
", + "bbox": [ + 506, + 89, + 890, + 224 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "even without the emotion loss, the proposed model achieves a decent emotion recognition score, indicating that our reconstruction scheme can adequately capture emotions without the need for explicit perceptual supervision.", + "bbox": [ + 496, + 281, + 890, + 340 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Reconstruction Loss: In order to evaluate the faithfulness of a 3D face reconstruction technique, we have devised a protocol based on our analysis-by-neural-synthesis method. Under this protocol, we train a UNet image-to-image translator, but freeze the weights of the encoder so that only the translator is trained. The motivation is simple: if the 3D mesh is accurate enough, the reconstruction will be more faithful, due to a one-to-one appearance correspondence. For each method (including ours for fairness), we train a UNet for 5 epochs, using the masked image and the rendered 3D geometry as input. Finally, we report the $L_{1}$ reconstruction loss and the VGG loss between the reconstructed image and the input image on the test set of AffectNet [59] which features subjects under multiple expressions. The results can be seen in Table 2. We observe here that using the information for the rendered shape geometry of SMIRK, the trained UNet achieves a more faithful reconstruction of the input image when compared to DECA and EMOCAv2. Particularly for EMOCAv2, we observe that although it can capture expressions, the results in many cases do not faithfully represent the input image, leading to an overall worse image reconstruction error. In terms of $L_{1}$ loss, SMIRK is on par with Deep3DFace and FOCUS and has a small improvement in terms of VGG loss.", + "bbox": [ + 496, + 342, + 892, + 704 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/f59f4aa97855dea7b092bf1cf6858a0c9e296992b561c5002425f945f1b4c731.jpg", + "table_caption": [ + "Table 1. Emotion recognition performance on the AffectNet test set [59]. We follow the same metrics as in [18]." + ], + "table_footnote": [], + "table_body": "
DECAEMOCAv2FOCUSDeep3DFaceSMIRK
L1 Loss ↓0.100.110.090.090.09
VGG Loss ↓0.800.840.780.780.76
", + "bbox": [ + 504, + 715, + 887, + 758 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Image reconstruction performance on the AffectNet test set [59]. SMIRK achieves better reconstruction and perceptual scores compared to other methods.", + "bbox": [ + 498, + 768, + 890, + 811 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "User Study: Arguably, the perception of the reconstructed facial expressions is the most important aspect in 3D face reconstruction, as it directly influences how well the reconstructed model captures the emotions and nuances of the original face. Considering this, we also designed a", + "bbox": [ + 496, + 824, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "2495", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/32cbe24dde0545ecccec380f483f1a5ecdd9eb46aa1a9f16fcf5d1f4837995ad.jpg", + "image_caption": [ + "Figure 6. Visual comparison of 3D face reconstruction. From left to right: Input, Deep3DFaceRecon[20], FOCUS[51], DECA[28], EMOCAv2[18], and SMIRK. Many more examples can also be found in the Suppl. Mat. and the demo video in our webpage." + ], + "image_footnote": [], + "bbox": [ + 84, + 89, + 880, + 496 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "user study to assess the perception of the reconstructed facial expressions from human participants. We randomly selected 80 images from the AffectNet [59] test set (using the split from [81]) and 80 images from our MEAD test set (unseen subjects) and performed 3D face reconstruction with both SMIRK and its competitors. To mitigate bias w.r.t. the identity component for the FLAME-based methods, for DECA and EMOCAv2 we used the same identity parameters as our method (which itself was distilled from MICA). In the user study, participants were shown an image of a human face alongside two 3D face reconstructions, either from our method or the others, and were asked to choose the one with the most faithful facial expression representation. The order was randomized for each question, and each user answered a total of 32 questions, equally distributed among the different methods.", + "bbox": [ + 75, + 556, + 472, + 797 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A total of 85 users completed the study, and the results in Table 3 show that our method was significantly preferred over all competitors, confirming the performance of SMIRK in terms of faithful expressive 3D reconstruction. The results were statistically significant (for all pairs, $p < 0.01$ with binomial test, adjusted using the Bonfer", + "bbox": [ + 75, + 809, + 468, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "roni method). EMOCAv2, which also uses an emotion loss for expressive 3D reconstruction, was the closest competitor to our method, followed by FOCUS and Deep3D, while DECA was the least selected.", + "bbox": [ + 498, + 556, + 890, + 617 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/039270fcb09646fdb5b653d678fbcf126c3bd8ed0123095bc39e6c0ec5897ea1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DECAEMOCAv2Deep3DFOCUS
SMIRK603/77461/219510/170534/146
", + "bbox": [ + 531, + 631, + 857, + 664 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. User study results: \"a/b\" indicates Ours (left) was preferred $a$ times,while the competing method was chosen $b$ times. SMIRK is overwhelmingly preferred over all other methods.", + "bbox": [ + 498, + 671, + 890, + 713 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Visual Examples", + "text_level": 1, + "bbox": [ + 500, + 753, + 665, + 768 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In Fig. 6 we present multiple visual comparisons with the four other methods. As it can be visually assessed, our method can more accurately capture the facial expressions across multiple diverse subjects and conditions. Furthermore, the presented methodology can also capture expressions that other methods fail to capture, such as nonsymmetric mouth movements, eye closures, and exaggerated expressions.", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "2496", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Ablation Studies", + "text_level": 1, + "bbox": [ + 76, + 90, + 240, + 104 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation on the effect of landmarks: We first assess the effect of the landmark loss. To do that, we calculate for different versions of our model the L1 loss, VGG Loss, and Cycle loss after manipulation of expressions using the same protocol we performed in Sec. 4.2. Note that this time, we also evaluate performance by considering the cycle loss. That is, we also manipulate the predicted expressions, regenerate a new image, and expect that the method can successfully predict the same parameters. We consider three different versions of our model: 1) Protocol 1 - no landmarks loss, 2) Protocol 2 - training some epochs with landmarks loss and then removing it, 3) Protocol 3 - full training with landmarks loss. We present these results in Table 4.", + "bbox": [ + 75, + 114, + 467, + 311 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As we can see, completely omitting landmarks leads to degraded results. However, if we first train for a few epochs with landmarks and then set the loss weight to 0, the model achieves very similar performance with the original model which uses the loss throughout the full training. These results suggest that, in contrast with previous works [18, 28], the landmarks loss in SMIRK acts more as a regularizer during training, helping to guide the model towards good solutions, but in the later stages it may somewhat constrain its flexibility. We plan to explore this balance in more depth in future work.", + "bbox": [ + 75, + 314, + 467, + 479 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/b7c03a1ac4390aa0bf227b877af582acf5538570f69aa4395124b3256bb53e12.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
L1 Loss ↓VGG Loss ↓Cycle Loss ↓
P10.1110.7570.588
P20.0930.7130.487
P30.0930.7140.544
", + "bbox": [ + 117, + 489, + 424, + 554 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4. Ablation study on the effect of landmark loss. P1: no landmark loss, P2: landmark loss removed after a few epochs, P3: landmark loss throughout whole training.", + "bbox": [ + 75, + 560, + 467, + 602 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Impact of Cycle Path: Here we also present examples on how the cycle path affects the reconstruction performance. First, we show an example result in Fig. 7, where we see that using the proposed augmentations provides more detailed expressions. For example, template injection augmentation considerably helps the reconstruction of the mouth structure. Secondly, we have also observed that the cycle path makes the model more robust, especially w.r.t. mouth closures (e.g. zero jaw opening). We show such indicative cases in Figure 8. Such artifacts can be seen when using the no-cycle variant, acting as a visual confirmation of the aforementioned numerical results. Here, the mouth is not properly closed in the 3D reconstructed face, since it was miss-corresponded to a properly closed mouth in the image reconstruction space. The cycle path can solve such instances by providing tweaked expressions that are enforced to be recognized correctly, avoiding \"misalignments\" between expected expressions and reconstructed images.", + "bbox": [ + 75, + 613, + 467, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/50a9bbe87b32c8191eea95da6743dfa4f2869306879395f9805a487db95f85c1.jpg", + "image_caption": [ + "Figure 7. Impact of cycle augmentations. From left to right: input image, no cycle loss, cycle loss with all augmentations." + ], + "image_footnote": [], + "bbox": [ + 578, + 89, + 813, + 161 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6e72d6a2ac6dc79aaf57a9f8e8d0b44d5c966bd50a40055a45bae7dd16fa43ba.jpg", + "image_caption": [ + "Figure 8. Impact of the Cycle Path. Artifacts can appear when not training with the cycle path. From left to right: input image, 3D reconstruction and image reconstruction without cycle path, 3D reconstruction and image reconstruction with cycle path." + ], + "image_footnote": [], + "bbox": [ + 501, + 205, + 890, + 311 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Limitations", + "text_level": 1, + "bbox": [ + 500, + 393, + 624, + 407 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Despite the effectiveness of SMIRK, there are limitations to be addressed. It is sensitive to occlusions, as the training datasets do not include them, and assumes more intense expressions when parts are missing instead of extrapolating from available information. In addition, SMIRK has been trained on single images, and the temporal aspect is not yet explored. Also note that while SMIRK does not need to predict albedo and lighting, this can be limiting for specific applications in 3D facial animation and video editing. Please refer to the Suppl. Mat. for a more detailed discussion.", + "bbox": [ + 496, + 415, + 890, + 566 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 580, + 617, + 594 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We have presented SMIRK, a new paradigm for accurate expressive 3D face reconstruction from images. Instead of the traditional graphics-based approach for self-supervision which is commonly used for monocular 3D face reconstruction in-the-wild, SMIRK employs a neural image-to-image translator model, which learns to reconstruct the input face image given the rendered predicted facial geometry. Our extensive experimental results show that SMIRK outperforms previous methods and can faithfully reconstruct expressive 3D faces, including challenging complex expressions such as asymmetries, and subtle expressions such as smirking.", + "bbox": [ + 496, + 604, + 890, + 771 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 500, + 784, + 658, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This research work was supported by the project \"Applied Research for Autonomous Robotic Systems\" (MIS 5200632) which is implemented within the framework of the National Recovery and Resilience Plan \"Greece 2.0\" (Measure: 16618- Basic and Applied Research) and is funded by the European Union- NextGenerationEU.", + "bbox": [ + 496, + 810, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "2497", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. Lrs3-ted: a large-scale dataset for visual speech recognition. arXiv preprint arXiv:1809.00496, 2018. 6", + "[2] Zakaria Aldeneh, Masha Fedzechkina, Skyler Seto, Katherine Metcalf, Miguel Sarabia, Nicholas Apostoloff, and Barry-John Theobald. Towards a Perceptual Model for Estimating the Quality of Visual Speech, 2022. arXiv:2203.10117 [cs, eess]. 6", + "[3] Oswald Aldrian and William AP Smith. Inverse rendering of faces with a 3d morphable model. IEEE transactions on pattern analysis and machine intelligence, 35(5):1080-1093, 2012. 1, 2", + "[4] Riza Alp Guler, George Trigeorgis, Epameinondas Antonakos, Patrick Snape, Stefanos Zafeiriou, and Iasonas Kokkinos. Densereg: Fully convolutional dense shape regression in-the-wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6799-6808, 2017. 2", + "[5] Haoran Bai, Di Kang, Haoxian Zhang, Jinshan Pan, and Linchao Bao. FFHQ-UV: Normalized facial uv-texture dataset for 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 362–371, 2023. 3", + "[6] Anil Bas, William A. P. Smith, Timo Bolkart, and Stefanie Wuhrer. Fitting a 3D morphable model to edges: A comparison between hard and soft correspondences. In Asian Conference on Computer Vision Workshops, pages 377-391, 2017. 1, 2", + "[7] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3D faces. In Proceedings of the 26th Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 1999. 2", + "[8] Volker Blanz, Sami Romdhani, and Thomas Vetter. Face identification across different poses and illuminations with a 3D morphable model. In International Conference on Automatic Face & Gesture Recognition (FG), pages 202-207, 2002. 1, 2", + "[9] Timo Bolkart, Tianye Li, and Michael J Black. Instant multiview head capture through learnable registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 768-779, 2023. 5", + "[10] James Booth, Epameinondas Antonakos, Stylianos Ploumpis, George Trigeorgis, Yannis Panagakis, and Stefanos Zafeiriou. 3d face morphable models\" in-the-wild\". In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 48-57, 2017. 3", + "[11] James Booth, Anastasios Roussos, Evangelos Ververas, Epameinondas Antonakos, Stylianos Ploumpis, Yannis Panagakis, and Stefanos Zafeiriou. 3D reconstruction of \"inthe-wild\" faces in images and videos. IEEE transactions on pattern analysis and machine intelligence, 40(11):2638-2652, 2018. 3", + "[12] Alan Brunton, Augusto Salazar, Timo Bolkart, and Stefanie Wuhrer. Review of statistical shape spaces for 3D data with comparative analysis for human faces. Computer Vision and Image Understanding (CVIU), 128:1-17, 2014. 1" + ], + "bbox": [ + 78, + 114, + 468, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In Proceedings of the IEEE International Conference on Computer Vision, pages 1021-1030, 2017. 4", + "[14] Chen Cao, Yanlin Weng, Shun Zhou, Yiying Tong, and Kun Zhou. Facewarehouse: A 3d facial expression database for visual computing. IEEE Transactions on Visualization and Computer Graphics, 20(3):413-425, 2013. 2", + "[15] Chen Cao, Qiming Hou, and Kun Zhou. Displaced dynamic expression regression for real-time facial tracking and animation. Transactions on Graphics (TOG), 33(4):1-10, 2014. 2", + "[16] Feng-Ju Chang, Anh Tuan Tran, Tal Hassner, Iacopo Masi, Ram Nevatia, and Gerard Medioni. ExpNet: Landmark-free, deep, 3D facial expressions. In International Conference on Automatic Face & Gesture Recognition (FG), pages 122-129, 2018. 1, 2", + "[17] Aggelina Chatziagapi, ShahRukh Athar, Francesc Moreno-Noguer, and Dimitris Samaras. Sider: Single-image neural optimization for facial geometric detail recovery. In 2021 International Conference on 3D Vision (3DV), pages 815-824. IEEE, 2021. 2", + "[18] Radek Daneček, Michael J Black, and Timo Bolkart. EMOCA: Emotion driven monocular face capture and animation. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 20311-20322, 2022. 1, 2, 3, 4, 6, 7, 8", + "[19] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multilevel face localisation in the wild. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5203-5212, 2020. 2", + "[20] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3D face reconstruction with weakly-supervised learning: From single image to image set. In Conference on Computer Vision and Pattern Recognition Workshops (CVPR-W), pages 285-295, 2019. 1, 2, 3, 6, 7", + "[21] Zheng Ding, Xuaner Zhang, Zhihao Xia, Lars Jebe, Zhuowen Tu, and Xiuming Zhang. Diffusionrig: Learning personalized priors for facial appearance editing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12736-12746, 2023. 3", + "[22] Pengfei Dou, Shishir K. Shah, and Ioannis A. Kakadiaris. End-to-end 3D face reconstruction with deep neural networks. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 5908-5917, 2017. 2", + "[23] Michail Christos Doukas, Mohammad Rami Koujan, Viktoria Sharmanska, Anastasios Roussos, and Stefanos Zafeiriou. Head2head++: Deep facial attributes re-targeting. IEEE Transactions on Biometrics, Behavior, and Identity Science, 3(1):31-43, 2021. 3", + "[24] Michail Christos Doukas, Stefanos Zafeiriou, and Viktoriia Sharmanska. Headgan: One-shot neural head synthesis and editing. In Proceedings of the IEEE/CVF International conference on Computer Vision, pages 14398-14407, 2021. 3", + "[25] Bernhard Egger. Semantic Morphable Models. PhD thesis, University of Basel, 2018. 2" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "2498", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[26] Bernhard Egger, William A. P. Smith, Ayush Tewari, Stefanie Wuhrer, Michael Zollhoefer, Thabo Beeler, Florian Bernard, Timo Bolkart, Adam Kortylewski, Sami Romdhani, Christian Theobalt, Volker Blanz, and Thomas Vetter. 3D morphable face models—past, present, and future. Transactions on Graphics (TOG), 39(5), 2020. 1, 2", + "[27] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In European Conference on Computer Vision (ECCV), 2018. 2", + "[28] Yao Feng, Haiwen Feng, Michael J Black, and Timo Bolkart. Learning an animatable detailed 3D face model from in-the-wild images. Transactions on Graphics, (Proc. SIGGRAPH), 40(4):1-13, 2021. 1, 2, 3, 6, 7, 8", + "[29] Panagiotis P. Filntisis, George Retsinas, Foivos Paraperas-Papantoniou, Athanasios Katsamanis, Anastasios Roussos, and Petros Maragos. SPECTRE: Visual speech-informed perceptual 3D facial expression reconstruction from videos. In Conference on Computer Vision and Pattern Recognition Workshops (CVPR-W), pages 5745-5755, 2023. 1, 2, 3, 6", + "[30] Pablo Garrido, Michael Zollhöfer, Dan Casas, Levi Valgaerts, Kiran Varanasi, Patrick Pérez, and Christian Theobalt. Reconstruction of personalized 3d face rigs from monocular video. ACM Transactions on Graphics (TOG), 35 (3):1-15, 2016. 2", + "[31] Pablo Garrido, Michael Zollhöfer, Chenglei Wu, Derek Bradley, Patrick Pérez, Thabo Beeler, and Christian Theobalt. Corrective 3d reconstruction of lips from monocular video. ACM Trans. Graph., 35(6):219-1, 2016. 6", + "[32] Baris Gecer, Stylianos Ploumpis, Irene Kotsia, and Stefanos Zafeiriou. GANFIT: Generative adversarial network fitting for high fidelity 3D face reconstruction. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1155-1164, 2019. 2, 3", + "[33] Kyle Genova, Forrester Cole, Aaron Maschinot, Aaron Sarna, Daniel Vlasic, and William T. Freeman. Unsupervised training for 3D morphable model regression. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 8377-8386, 2018. 1, 2", + "[34] Thomas Gerg, Andreas Morel-Forster, Clemens Blumer, Bernhard Egger, Marcel Luthi, Sandro Schoenborn, and Thomas Vetter. Morphable face models - an open framework. In International Conference on Automatic Face & Gesture Recognition (FG), pages 75–82, 2018. 1, 2", + "[35] Partha Ghosh, Pravir Singh Gupta, Roy Uziel, Anurag Ranjan, Michael J Black, and Timo Bolkart. GIF: Generative interpretable faces. In 2020 International Conference on 3D Vision (3DV), pages 868-878. IEEE, 2020. 3", + "[36] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2", + "[37] Shan He, Haonan He, Shuo Yang, Xiaoyan Wu, Pengcheng Xia, Bing Yin, Cong Liu, Lirong Dai, and Chang Xu. Speech4mesh: Speech-assisted monocular 3d facial reconstruction for speech-driven 3d facial animation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14192-14202, 2023. 2" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[38] Xingzhe He, Bastian Wandt, and Helge Rhodin. Autolink: Self-supervised learning of human skeletons and object outlines by linking keypoints. Advances in Neural Information Processing Systems, 35:36123-36141, 2022. 4", + "[39] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, et al. Searching for mobilenetv3. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1314-1324, 2019. 3", + "[40] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A. Efros. Image-to-image translation with conditional adversarial networks. CoRR, abs/1611.07004, 2016. 3", + "[41] Aaron S Jackson, Adrian Bulat, Vasileios Argyriou, and Georgios Tzimiropoulos. Large pose 3D face reconstruction from a single image via direct volumetric CNN regression. In International Conference on Computer Vision (ICCV), pages 1031-1039, 2017. 2", + "[42] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 694-711. Springer, 2016. 3, 4", + "[43] Harim Jung, Myeong-Seok Oh, and Seong-Whan Lee. Learning free-form deformation for 3D face reconstruction from in-the-wild images. In International Conference on Systems, Man, and Cybernetics (SMC), pages 2737–2742, 2021. 2", + "[44] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 6", + "[45] Hyeongwoo Kim, Pablo Garrido, Ayush Tewari, Weipeng Xu, Justus Thies, Matthias Nießner, Patrick Pérez, Christian Richardt, Michael Zolloffer, and Christian Theobalt. Deep video portraits. ACM Transactions on Graphics (TOG), 37 (4):163, 2018. 3", + "[46] Hyeongwoo Kim, Michael Zollhöfer, Ayush Tewari, Justus Thies, Christian Richardt, and Christian Theobalt. Inverse-FaceNet: deep monocular inverse face rendering. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 4625-4634, 2018. 1, 2", + "[47] Tatsuro Koizumi and William A. P. Smith. \"look ma, no landmarks!\" - unsupervised, model-based dense face alignment. In European Conference on Computer Vision (ECCV), pages 690-706, 2020. 2", + "[48] Alexandros Lattas, Stylianos Moschoglou, Baris Gecer, Stylianos Ploumpis, Vasileios Triantafyllou, Abhijeet Ghosh, and Stefanos Zafeiriou. AvatarMe: Realistically renderable 3d facial reconstruction\" in-the-wild\". In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 760-769, 2020. 3", + "[49] Alexandros Lattas, Stylianos Moschoglou, Stylianos Ploumpis, Baris Gecer, Jiankang Deng, and Stefanos Zafeiriou. Fitme: Deep photorealistic 3d morphable model avatars. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8629-8640, 2023." + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "2499", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[50] Gun-Hee Lee and Seong-Whan Lee. Uncertainty-aware mesh decoder for high fidelity 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6100–6109, 2020. 3", + "[51] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. To fit or not to fit: Model-based face reconstruction and occlusion segmentation from weak supervision. CoRR, abs/2106.09614, 2021. 6, 7", + "[52] Hao Li, Jihun Yu, Yuting Ye, and Chris Bregler. Realtime facial animation with on-the-fly correctives. Transactions on Graphics (TOG), 32(4):42-1, 2013. 2", + "[53] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 3, 6", + "[54] Jiangke Lin, Yi Yuan, Tianjia Shao, and Kun Zhou. Towards high-fidelity 3d face reconstruction from in-the-wild images using graph convolutional networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5891-5900, 2020. 3", + "[55] Yaojie Liu, Amin Jourabloo, William Ren, and Xiaoming Liu. Dense face alignment. In International Conference on Computer Vision Workshops (ICCV-W), pages 1619-1628, 2017. 2", + "[56] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 6", + "[57] Huiwen Luo, Koki Nagano, Han-Wei Kung, Qingguo Xu, Zejian Wang, Lingyu Wei, Liwen Hu, and Hao Li. Normalized avatar synthesis using stylegan and perceptual refinement. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11662–11672, 2021. 3", + "[58] B.R. Mallikarjun, Ayush Tewari, Hans-Peter Seidel, Mohamed Elgharib, Christian Theobalt, et al. Learning complete 3d morphable face models from images and videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3361-3371, 2021. 3", + "[59] Ali Mollahosseini, Behzad Hasani, and Mohammad H Ma-hoor. Affectnet: A database for facial expression, valence, and arousal computing in the wild. IEEE Transactions on Affective Computing, 10(1):18-31, 2017. 6, 7", + "[60] Masahiro Mori, Karl F MacDorman, and Norri Kageki. The uncanny valley [from the field]. IEEE Robotics & Automation magazine, 19(2):98-100, 2012. 6", + "[61] Christopher Otto, Prashanth Chandran, Gaspard Zoss, Markus H. Gross, Paulo F. U. Gotardo, and Derek Bradley. A perceptual shape loss for monocular 3D face reconstruction. Computer Graphics Forum (Proc. Pacific Graphics), 2023. 2", + "[62] Foivos Paraperas Papantoniou, Panagiotis P Filntisis, Petros Maragos, and Anastasios Roussos. Neural emotion director: Speech-preserving semantic control of facial expressions in \"in-the-wild\" videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18781-18790, 2022. 3", + "[63] Jeong Joon Park, Peter Florence, Julian Straub, Richard A. Newcombe, and Steven Lovegrove. DeepSDF: Learning" + ], + "bbox": [ + 78, + 90, + 468, + 901 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "continuous signed distance functions for shape representation. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 165-174, 2019. 2", + "[64] Pascal Paysan, Reinhard Knothe, Brian Amberg, Sami Romdhani, and Thomas Vetter. A 3d face model for pose and illumination invariant face recognition. In 2009 sixth IEEE international conference on advanced video and signal based surveillance, pages 296-301. IEEE, 2009. 2, 6", + "[65] Stylianos Ploumpis, Evangelos Ververas, Eimear O' Sullivan, Stylianos Moschoglou, Haoyang Wang, Nick E. Pears, William A. P. Smith, Baris Gecer, and Stefanos Zafeiriou. Towards a complete 3D morphable model of the human head. Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 43(11):4142-4160, 2021. 1, 2", + "[66] E. Richardson, M. Sela, and R. Kimmel. 3D face reconstruction by learning from synthetic data. In International Conference on 3D Vision (3DV), pages 460-469, 2016. 1, 2", + "[67] Sami Romdhani and Thomas Vetter. Estimating 3D shape and texture using pixel intensity, edges, specular highlights, texture constraints and aprior. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 986-993, 2005. 1, 2", + "[68] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Medical Image Computing and Computer-Assisted Intervention - MICCAI 2015 - 18th International Conference Munich, Germany, October 5 - 9, 2015, Proceedings, Part III, pages 234-241. Springer, 2015. 2, 3", + "[69] Zeyu Ruan, Changqing Zou, Longhai Wu, Gangshan Wu, and Limin Wang. SADRNet: Self-aligned dual face regression networks for robust 3d dense face alignment and reconstruction. IEEE Transactions on Image Processing, 30: 5793-5806, 2021. 2", + "[70] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael Black. Learning to regress 3D face shape and expression from an image without 3d supervision. In Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2", + "[71] Matan Sela, Elad Richardson, and Ron Kimmel. Unrestricted facial geometry reconstruction using image-to-image translation. In International Conference on Computer Vision (ICCV), pages 1576-1585, 2017. 2", + "[72] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Ming-min Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3D face reconstruction by occlusion-aware multiview geometry consistency. In European Conference on Computer Vision (ECCV), pages 53-70. Springer, 2020. 2", + "[73] William AP Smith. The perspective face shape ambiguity. In Perspectives in Shape Analysis, pages 299-319. Springer, 2016. 2", + "[74] Attila Szabó, Givi Meishvili, and Paolo Favaro. Unsupervised generative 3D shape learning from natural images. CoRR, abs/1910.00287, 2019. 2", + "[75] Ayush Tewari, Michael Zolloffer, Hyeongwoo Kim, Pablo Garrido, Florian Bernard, Patrick Perez, and Christian Theobalt. MoFA: Model-based deep convolutional face autoencoder for unsupervised monocular reconstruction. In In" + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "2500", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ternational Conference on Computer Vision (ICCV), pages 1274-1283, 2017. 1, 2", + "[76] Ayush Tewari, Michael Zollhöfer, Pablo Garrido, Florian Bernard, Hyeongwoo Kim, Patrick Pérez, and Christian Theobalt. Self-supervised multi-level face model learning for monocular reconstruction at over $250\\mathrm{~hz}$ . In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2549-2559, 2018. 3", + "[77] Ayush Tewari, Florian Bernard, Pablo Garrido, Gaurav Bharaj, Mohamed Elgharib, Hans-Peter Seidel, Patrick Pérez, Michael Zollhöfer, and Christian Theobalt. FML: face model learning from videos. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 10812-10822, 2019. 2, 3", + "[78] Justus Thies, Michael Zollhöfer, Matthias Nießner, Levi Valgaerts, Marc Stamminger, and Christian Theobalt. Real-time expression transfer for facial reenactment. ACM Trans. Graph., 34(6), 2015. 2", + "[79] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Facevr: Real-time facial reenactment and eye gaze control in virtual reality. arXiv preprint arXiv:1610.03151, 2016.", + "[80] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2Face: Real-time face capture and reenactment of RGB videos. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 2387-2395, 2016. 1, 2", + "[81] Antoine Toisoul, Jean Kossaifi, Adrian Bulat, Georgios Tzimiropoulos, and Maja Pantic. Estimation of continuous valence and arousal levels from faces in naturalistic conditions. Nature Machine Intelligence, 3(1):42-50, 2021. 7", + "[82] Anh Tuan Tran, Tal Hassner, Iacopo Masi, and Gerard Medioni. Regressing robust and discriminative 3D morphable models with a very deep neural network. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1599-1608, 2017. 1, 2", + "[83] Anh Tuan Tran, Tal Hassner, Iacopo Masi, Eran Paz, Yuval Nirkin, and Gérard Medioni. Extreme 3d face reconstruction: Seeing through occlusions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3935-3944, 2018. 2", + "[84] Luan Tran and Xiaoming Liu. Nonlinear 3d face morphable model. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7346-7355, 2018. 3", + "[85] Luan Tran, Feng Liu, and Xiaoming Liu. Towards high-fidelity nonlinear 3d face morphable model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1126-1135, 2019. 3", + "[86] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 6", + "[87] Huawei Wei, Shuang Liang, and Yichen Wei. 3D dense face alignment via graph convolution networks. arXiv preprint arXiv:1904.05562, 2019. 2", + "[88] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Matthew Johnson, Jingjing Shen, Nikola Milosavljevic, Daniel Wilde," + ], + "bbox": [ + 78, + 92, + 468, + 901 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Stephan J. Garbin, Toby Sharp, Ivan Stojiljkovic, Tom Cashman, and Julien P. C. Valentin. 3D face reconstruction with dense landmarks. In European Conference on Computer Vision (ECCV), pages 160-177. Springer, 2022. 2", + "[89] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3D objects from images in the wild. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1-10, 2020. 2", + "[90] Haotian Yang, Hao Zhu, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, and Xun Cao. Facescape: a large-scale high quality 3d face dataset and detailed riggable 3d face prediction. In Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[91] Tarun Yenamandra, Ayush Tewari, Florian Bernard, Hans-Peter Seidel, Mohamed Elgharib, Daniel Cremers, and Christian Theobalt. i3dmm: Deep implicit 3d morphable model of human heads. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12803-12813, 2021. 2", + "[92] Xiaoxing Zeng, Xiaojiang Peng, and Yu Qiao. DF2Net: A dense-fine-finer network for detailed 3D face reconstruction. In International Conference on Computer Vision (ICCV), 2019. 2", + "[93] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 2", + "[94] Tianke Zhang, Xuangeng Chu, Yunfei Liu, Lijian Lin, Zhendong Yang, Zhengzhuo Xu, Chengkun Cao, Fei Yu, Changyin Zhou, Chun Yuan, et al. Accurate 3d face reconstruction with facial component tokens. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9033-9042, 2023. 2, 3", + "[95] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A. Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In IEEE International Conference on Computer Vision, ICCV 2017, Venice, Italy, October 22-29, 2017, pages 2242-2251. IEEE Computer Society, 2017. 3", + "[96] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z. Li. Face alignment across large poses: A 3D solution. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 146-155, 2016. 2", + "[97] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces. In European Conference on Computer Vision, pages 250–269, 2022. 2, 3, 6", + "[98] Michael Zollhöfer, Justus Thies, Darek Bradley, Pablo Garrido, Thabo Beeler, Patrick Pérez, Marc Stamminger, Matthias Nießner, and Christian Theobalt. State of the art on monocular 3D face reconstruction, tracking, and applications. Computer Graphics Forum, 2018. 1, 2" + ], + "bbox": [ + 501, + 92, + 890, + 838 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "2501", + "bbox": [ + 482, + 945, + 513, + 955 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/444ecb6f-5ab4-45bb-9d08-b2b359c08da3_model.json b/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/444ecb6f-5ab4-45bb-9d08-b2b359c08da3_model.json new file mode 100644 index 0000000000000000000000000000000000000000..9be7f98c23b0979dc78e386ed604cadb78d3feb7 --- /dev/null +++ b/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/444ecb6f-5ab4-45bb-9d08-b2b359c08da3_model.json @@ -0,0 +1,2666 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.179, + 0.131, + 0.793, + 0.154 + ], + "angle": 0, + "content": "3D Facial Expressions through Analysis-by-Neural-Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.174, + 0.778, + 0.21 + ], + "angle": 0, + "content": "George Retsinas1† Panagiotis P. Filntisis1† Radek Daněček3 Victoria F. Abrevaya3 Anastasios Roussos4 Timo Bolkart3* Petros Maragos1,2" + }, + { + "type": "text", + "bbox": [ + 0.293, + 0.219, + 0.676, + 0.233 + ], + "angle": 0, + "content": "\\(^{1}\\)Institute of Robotics, Athena Research Center, 15125 Maroussi, Greece" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.234, + 0.74, + 0.248 + ], + "angle": 0, + "content": "\\(^{2}\\)School of Electrical & Computer Engineering, National Technical University of Athens, Greece" + }, + { + "type": "text", + "bbox": [ + 0.353, + 0.248, + 0.618, + 0.262 + ], + "angle": 0, + "content": "\\(^{3}\\)MPI for Intelligent Systems, Tübingen, Germany" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.262, + 0.76, + 0.275 + ], + "angle": 0, + "content": "\\(^{4}\\)Institute of Computer Science (ICS), Foundation for Research & Technology - Hellas (FORTH), Greece" + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.289, + 0.891, + 0.45 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.461, + 0.893, + 0.49 + ], + "angle": 0, + "content": "Figure 1. SMIRK reconstructs 3D faces from monocular images with facial geometry that faithfully recover extreme, asymmetric, and subtle expressions. Top: images of people with challenging expressions. Bottom: SMIRK reconstructions." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.502, + 0.314, + 0.518 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.539, + 0.474, + 0.903 + ], + "angle": 0, + "content": "While existing methods for 3D face reconstruction from in-the-wild images excel at recovering the overall face shape, they commonly miss subtle, extreme, asymmetric, or rarely observed expressions. We improve upon these methods with SMIRK (Spatial Modeling for Image-based Reconstruction of Kinesics), which faithfully reconstructs expressive 3D faces from images. We identify two key limitations in existing methods: shortcomings in their self-supervised training formulation, and a lack of expression diversity in the training images. For training, most methods employ differentiable rendering to compare a predicted face mesh with the input image, along with a plethora of additional loss functions. This differentiable rendering loss not only has to provide supervision to optimize for 3D face geometry, camera, albedo, and lighting, which is an ill-posed optimization problem, but the domain gap between rendering and input image further hinders the learning process. Instead, SMIRK replaces the differentiable rendering with a neural rendering module that, given the rendered predicted mesh geometry, and sparsely sampled pixels of the input image, generates a face image. As the neural rendering gets color information from sampled image pixels, supervising with neural rendering-based reconstruction loss can focus solely on the geometry. Further," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.504, + 0.895, + 0.671 + ], + "angle": 0, + "content": "it enables us to generate images of the input identity with varying expressions while training. These are then utilized as input to the reconstruction model and used as supervision with ground truth geometry. This effectively augments the training data and enhances the generalization for diverse expressions. Our qualitative, quantitative and particularly our perceptual evaluations demonstrate that SMIRK achieves the new state-of-the-art performance on accurate expression reconstruction. For our method's source code, demo video and more, please visit our project webpage: https://georgenetsi.github.io/smirk/." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.697, + 0.633, + 0.713 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.723, + 0.894, + 0.875 + ], + "angle": 0, + "content": "Reconstructing 3D faces from single images in-the-wild has been a central goal of computer vision for the last three decades [98] with practical implications in various fields including virtual and augmented reality, entertainment, and telecommunication. Commonly, these methods estimate the parameters of a 3D Morphable Model (3DMM) [12, 26], either through optimization [3, 6-8, 34, 67, 80] or regression with deep learning [16, 18, 20, 28, 29, 33, 46, 65, 66, 70, 75, 82]. Due to the lack of large-scale paired 2D-3D data, most learning-based methods follow a self-supervised train" + }, + { + "type": "page_footnote", + "bbox": [ + 0.524, + 0.888, + 0.771, + 0.901 + ], + "angle": 0, + "content": "† Equal contributions. * Now at Google." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2490" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.468, + 0.108 + ], + "angle": 0, + "content": "ing scheme using an analysis-by-synthesis approach [7, 75]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.11, + 0.468, + 0.291 + ], + "angle": 0, + "content": "Although there has been a persistent improvement in the accuracy of identity shape reconstruction, as indicated by established benchmarks [28, 70], the majority of works fail to capture the full range of facial expressions, including extreme, asymmetric, or subtle movements which are perceptually significant to humans -see e.g. Fig. 1. Recent works addressed this by augmenting the photometric error with image-based perceptual losses based on expert networks for emotion [18], lip reading [29], or face recognition [32], or with a GAN-inspired discriminator [61]. However, this requires a careful balancing of the different loss terms, and can often produce over-exaggerated facial expressions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.294, + 0.468, + 0.491 + ], + "angle": 0, + "content": "We argue here that the main problem is the shortcomings of the differentiable rendering loss. Jointly optimizing for geometry, camera, appearance, and lighting is an ill-posed optimization problem due to shape-camera [73] and albedo-lighting [25] ambiguities. Further the loss is negatively impacted by the large domain gap between natural input image and the rendering. The commonly employed Lambertian reflectance model is an over-simplistic approximation of the light-face interaction [26], and it is insufficient to account for hard self-shadows, unusual illumination environments, highly reflective skin, and differences in camera color patterns. This, in turn, can result in sub-optimal reconstructions by providing incorrect guidance during training." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.494, + 0.468, + 0.901 + ], + "angle": 0, + "content": "In this work, we introduce a simple but effective analysis-by-neural-synthesis supervision to improve the perceived quality of the reconstructed expressions. For this, we replace the differentiable rendering step of self-supervised approaches with an image-to-image translator based on U-Net [68]. Given a monochromatic rendering of the geometry together with sparsely sampled pixels of the input image, this U-Net generates an image which is then compared to the input image. Our key observation is that this neural rendering provides more accurate gradients for the task of expressive 3D face reconstruction. This approach has two advantages. First, by providing the rendered predicted mesh without appearance to the generator, the system is forced to rely on the geometry of the rendered mesh for recreating the input, leading to more faithful reconstructions. Second, the generator can create novel images, that modify the expression of the input. We leverage this while training with an expression consistency/augmentation loss. This renders a mesh of the input identity under a novel expression, renders an image with the generator, project the rendering through the encoder, and penalizes the difference between the augmented and the reconstructed expression parameters. By employing parameters from complex and extreme expressions captured under controlled laboratory settings, the network learns to handle non-typical expressions that are underrepresented in the data, promoting generalization. Our extensive experiments demonstrate that" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.168 + ], + "angle": 0, + "content": "SMIRK faithfully captures a wide range of facial expressions (Fig. 1), including challenging cases such as asymmetric and subtle expressions (e.g., smirking). This result is highlighted by the conducted user study, where SMIRK significantly outperformed all competing methods." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.168, + 0.892, + 0.258 + ], + "angle": 0, + "content": "In summary, our contributions are: 1) A method to faithfully recover expressive 3D faces from an input image.2) A novel analysis-by-neural-synthesis supervision that improves the quality of the reconstructed expressions. 3) A cycle-based expression consistency loss that augments expressions during training." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.272, + 0.642, + 0.287 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.297, + 0.892, + 0.463 + ], + "angle": 0, + "content": "Over the past two decades, the field of monocular 3D face reconstruction has witnessed extensive research and development [26, 98]. Model-free approaches directly regress 3D meshes [4, 19, 22, 27, 43, 69, 71, 74, 87, 89, 92] or voxels [41], or adapt a Signed Distance Function [17, 63, 91] for image fitting. These techniques commonly depend on extensive 3D training data, often generated using a 3D face model. However, this dependency can constrain their expressiveness due to limitations inherent to data creation [4, 19, 27, 41, 43, 69, 87] and disparities between synthetic and real images [22, 71, 92]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.464, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Many works estimate parameters of established 3D Morphable Models (3DMMs), like BFM [64], FaceWarehouse [14], or FLAME [53]. This can be achieved using direct optimization procedure in an analysis-by-synthesis framework [3, 6-8, 15, 30, 34, 47, 52, 65, 67, 78-80], but this needs to be applied on novel images every time, which is computationally expensive. Recent deep learning approaches offer fast and robust estimation of 3DMM parameters, using either supervised [16, 36, 46, 66, 82, 83, 94, 96, 97] or self-supervised training, for which different types of supervision have been proposed and used in combination, with the most important being the following: a) 2D landmarks supervision [20, 28, 55, 70, 72, 75-77, 90] is critical for coarse facial geometry and alignment, but is limited by the sparsity and potential inaccuracy of the predicted landmarks, particularly for complex expressions and poses. Methods that rely on dense landmarks [4, 88] overcome the sparsity problem but their accuracy is limited by the inherent ambiguity of dense correspondences across different faces. b) Photometric constraints [20, 28, 33, 72, 75-77, 90] are particularly effective for facial data, but are susceptible to alignment errors and depend on the quality of the rendered image. c) Perceptual losses have been proven beneficial in aligning the output with human perception [93]. Several methods make use of this by applying perceptual features losses of expert networks for identity recognition [20, 28, 32, 33, 72], emotion [18] or lip articulation [29, 37], but are hard to balance with other terms and can sometimes produce exaggerated results, particularly in terms of expressions." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "2491" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "We explore an alternative approach, where an image-to-image translation model is coupled with a simple photometric error, encouraging more nuanced details to be explained by the geometry." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.153, + 0.473, + 0.469 + ], + "angle": 0, + "content": "Closer to our work are methods that simultaneously train a regressor network and an appearance model to improve the photometric error signal. Booth et al. [10, 11] employ a 3DMM for shape estimation coupled with a PCA appearance model learned from images in-the-wild. Grecer et al. [32] extend this idea by using a GAN to model the facial appearance more effectively. [58, 76, 77, 84, 85] learn nonlinear models of shape and expression while training a regressor in a self-supervised manner. Lin et al. [54] refine an initial 3DMM texture while training the regressor. Several other works learn neural appearance models for faces from large datasets [5, 32, 48-50, 57]. In this work, we do not learn a new appearance model, but directly use a generator for better geometry supervision, achieving significantly improved expression estimation. Also related to this work are approaches that train a conditional generative model that transforms a rendering of a mesh model into a realistic image, e.g. [21, 23, 24, 35, 45, 62]. While their focus is on controllable image generation, we investigate here how a generator of average capacity can improve supervision for the task of 3D face reconstruction." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.482, + 0.427, + 0.499 + ], + "angle": 0, + "content": "3. Method: Analysis-by-Neural-Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.507, + 0.47, + 0.718 + ], + "angle": 0, + "content": "SMIRK is inspired by recent self-supervised face reconstruction methods [18, 28, 29, 94] that combine an analysis-by-synthesis approach with deep learning. While the majority of these works produce renderings based on linear statistical models and Lambertian reflectance, SMIRK contributes with a novel neural rendering module that bridges the domain gap between the input and the synthesized output. By minimizing this discrepancy, SMIRK enables a stronger supervision signal within an analysis-by-synthesis framework. Notably, this means that neural-network based losses such as perceptual [42], identity [20, 28], or emotion [18] can be used to compare the reconstructed and input images without the typical domain-gap problem that is present in most works." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.727, + 0.212, + 0.741 + ], + "angle": 0, + "content": "3.1. Architecture" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.47, + 0.9 + ], + "angle": 0, + "content": "Face Model: SMIRK employs FLAME [53] to model the 3D geometry of a face, which generates a mesh of \\( n_v = 5023 \\) vertices based on identity \\( \\beta \\) and expression \\( \\psi_{expr} \\) parameters, extended with two blendshapes \\( \\psi_{eye} \\) to account for eye closure [97], as well as jaw rotation \\( \\theta_{jaw} \\) parameters. Additionally, we consider the rigid pose \\( \\theta_{pose} \\) and the orthographic camera parameters \\( \\mathbf{c} \\). For brevity, we refer to all expression parameters (i.e. \\( \\psi_{expr} \\), \\( \\psi_{eye} \\) and \\( \\theta_{jaw} \\)) as \\( \\psi \\), and all global transformation parameters (i.e. \\( \\mathbf{c} \\) and \\( \\theta_{pose} \\)) as \\( \\theta \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.198 + ], + "angle": 0, + "content": "Encoder: The encoder \\( E(.) \\) is a deep neural network that takes an image \\( I \\) as input and regresses FLAME parameters. We separate \\( E \\) into three different branches, each consisting of a MobilenetV3 [39] backbone: 1) \\( E_{\\psi} \\), which predicts the expression parameters \\( \\psi \\), 2) \\( E_{\\beta} \\) that predicts the shape parameters \\( \\beta \\), and 3) \\( E_{\\theta} \\) that predicts the global transformation coefficients \\( \\theta \\). Formally," + }, + { + "type": "equation", + "bbox": [ + 0.552, + 0.211, + 0.892, + 0.228 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\theta} = E _ {\\boldsymbol {\\theta}} (I), \\quad \\beta = E _ {\\boldsymbol {\\beta}} (I), \\quad \\psi = E _ {\\psi} (I). \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.242, + 0.892, + 0.332 + ], + "angle": 0, + "content": "Since the main focus of this work is on improving facial expression reconstruction, we assume at train time that \\( E_{\\theta} \\) and \\( E_{\\beta} \\) were pre-trained and remain frozen. Note that unlike previous methods [18, 28, 29], \\( E \\) does not predict albedo parameters since the neural rendering module does not require such explicit information." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.333, + 0.892, + 0.469 + ], + "angle": 0, + "content": "Neural Renderer: The neural renderer is designed to replace traditional graphics-based rendering with an imaged-to-image convolutional network \\( T \\). The key idea here is to provide \\( T \\) with an input image where the face is masked out and only a small number of randomly sampled pixels within the mask remain, along with the predicted facial geometry from the encoder \\( E \\). By limiting the available relevant information from the input image, \\( T \\) is forced to rely on the predicted geometry from \\( E \\) to accurately reconstruct it." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.47, + 0.892, + 0.681 + ], + "angle": 0, + "content": "Formally, let \\( S = R(\\theta, \\beta, \\psi) \\) denote the output of the differentiable rasterization step, where \\( S \\) is the monochrome rendering of the reconstructed face mesh. The masking function \\( M(\\cdot) \\) is applied to the input image \\( I \\), masking out the face and retaining only a small amount of random pixels within the mask. \\( M(I) \\) is then concatenated with \\( S \\), and the resulting tensor is passed through the neural renderer \\( T \\) to produce a reconstruction of the original image \\( I' = T(S \\oplus M(I)) \\), where \\( \\oplus \\) denotes concatenation. A crucial property of this module is to assist the gradient flow towards the encoder. Therefore, we adopt a U-Net architecture [40, 68, 95] for \\( T \\), since the shortcuts will allow the gradient to flow uninterrupted towards \\( E \\) (an ablation study on this can be found in the Suppl. Mat.)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.693, + 0.852, + 0.71 + ], + "angle": 0, + "content": "3.2. Optimization of the SMIRK Components" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.717, + 0.892, + 0.793 + ], + "angle": 0, + "content": "SMIRK is supervised with two separate training passes: a reconstruction path and an augmented expression cycle path. We alternate between these passes on each training iteration, optimizing their respective losses. We describe each in the following subsections." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.816, + 0.697, + 0.83 + ], + "angle": 0, + "content": "3.2.1 Reconstruction Path" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In the reconstruction path (Fig. 2), the encoder \\(E\\) regresses FLAME parameters from the input image \\(I\\) and the resulting 3D face is rendered to obtain \\(S\\). Next, \\(I\\) is masked out using the masking function \\(M(\\cdot)\\), is concatenated with \\(S\\)," + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2492" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.079, + 0.088, + 0.473, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.214, + 0.471, + 0.299 + ], + "angle": 0, + "content": "Figure 2. Reconstruction pass. An input image is passed to the encoder which regresses FLAME and camera parameters. A 3D shape is reconstructed, rendered with a differentiable rasterizer and finally translated into the output domain with the image translation network. Then, standard self-supervised landmark, photometric and perceptual losses are computed." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.311, + 0.455, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.438, + 0.471, + 0.482 + ], + "angle": 0, + "content": "Figure 3. Masking Process. An input image is masked to obscure the face (upper path), then we sample random pixels to be unmasked (lower path)" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.493, + 0.47, + 0.522 + ], + "angle": 0, + "content": "and fed into \\( T \\) to obtain a reconstruction of the input image \\( I' \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.523, + 0.47, + 0.765 + ], + "angle": 0, + "content": "Masking: To promote the reliance of \\( T \\) on the 3D rendered face for reconstructing \\( I \\), we need to mask out the face in the input image \\( I \\). We do that by using the convex hull of detected 2D landmarks [13], dilated so that it fully covers the face. However, without any information of the face interior, training the translator becomes challenging since texture information, such as skin color, facial hair or even accessories (e.g., glasses) are \"distractors\" that complicate training. To address this we randomly sample and retain a small amount of pixels (1%) that are used as guidance for the image reconstruction. Note that sampling too many pixels makes the reconstruction overly guided and the 3D rendered face does not control the reconstruction output. We observed a similar behavior when we tried to randomly mask out blocks of the image, as in [38]. The masking process is depicted in Fig. 3." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.469, + 0.795 + ], + "angle": 0, + "content": "Loss functions: The reconstruction path is supervised with the following loss functions:" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.795, + 0.469, + 0.826 + ], + "angle": 0, + "content": "Photometric loss. This is the L1 error between the input and the output images: \\(\\mathcal{L}_{photo} = \\| I' - I\\| _1\\)" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.469, + 0.886 + ], + "angle": 0, + "content": "VGG loss. The VGG loss [42] has a similar effect to the photometric one, but helps to converge faster in the initial phases of training: \\(\\mathcal{L}_{vgg} = \\| \\Gamma(I') - \\Gamma(I)\\|_1\\), where \\(\\Gamma(.)\\) represents the VGG perceptual encoder." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Landmark loss. The landmark loss, denoted as \\( L_{lmk} = \\)" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.088, + 0.895, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.218, + 0.892, + 0.303 + ], + "angle": 0, + "content": "Figure 4. Augmented cycle pass. The FLAME expression parameters of an existing reconstruction are modified. The resulting modified face is then rendered using our neural renderer. The rendering is then passed to the face reconstruction encoder to regress the FLAME parameters and a consistency loss between the modified input and reconstructed FLAME parameters is computed." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.32, + 0.892, + 0.383 + ], + "angle": 0, + "content": "\\(\\sum_{i=1}^{K}\\left\\|\\mathbf{k}-\\mathbf{k}'\\right\\|_{2}^{2}\\), measures the \\(L_{2}\\) norm between the ground-truth 2D facial landmarks detected in the input image \\((\\mathbf{k})\\) and the 2D landmarks projected from the predicted 3D mesh \\((\\mathbf{k}')\\), summed over \\(K\\) landmarks." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.384, + 0.892, + 0.43 + ], + "angle": 0, + "content": "Expression Regularization. We employ an \\(L_{2}\\) regularization over the expression parameters \\(L_{reg} = \\|\\psi\\|_2^2\\), penalizing extreme, unrealistic expressions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.431, + 0.893, + 0.598 + ], + "angle": 0, + "content": "Emotion Loss. Finally, to obtain reconstructions that faithfully capture the emotional content, we employ an emotion loss \\(\\mathcal{L}_{emo}\\) based on features extracted from a pretrained emotion recognition network \\(P_{e}\\), as in EMOCA [18]: \\(\\mathcal{L}_{emo} = \\| P_e(I') - P_e(I)\\| _2^2\\). To prevent the image translator from adversarially optimizing the emotion loss by perturbing a few pixels, for this loss we keep the image translator \\(T\\) \"frozen\", optimizing only the expression encoder \\(E_{\\psi}\\). Note that unlike EMOCA, our framework ensures that the emotion loss does not suffer from domain gap problems, as the compared images reside in the same space." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.619, + 0.795, + 0.634 + ], + "angle": 0, + "content": "3.2.2 Augmented Expression Cycle Path" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.643, + 0.892, + 0.825 + ], + "angle": 0, + "content": "While the reconstruction path improves 3D reconstruction thanks to the better supervision signal provided by the neural module, it is still affected by a lack of expression diversity in the training datasets - a problem shared by all previous methods. This means for example that if a more complex lip structure, scarcely seen in the training data, cannot be reproduced fast enough by the encoder, the translator \\( T \\) could learn to correlate miss-aligned lip 3D structures and images and thus multiple similar, but distinct, facial expressions will be collapsed to a single reconstructed representation. Further, this may lead to the translator compensating for the encoder's failures during the joint optimization." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.903 + ], + "angle": 0, + "content": "These issues are addressed with the augmented expression cycle consistency path. In this path, we start from the predicted set \\(\\beta, \\psi, \\theta\\), and replace the original predicted expression \\(\\psi\\) with a new one \\(\\psi_{aug}\\). We then use the translator \\(T\\) to generate a photorealistic image \\(I_{aug}^{\\prime}\\) which adheres to" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2493" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.198 + ], + "angle": 0, + "content": "it. This process effectively synthesizes an augmented training pair of \\(\\psi_{aug}\\) and the corresponding output image \\(I_{aug}^{\\prime}\\). Then, the image is fed into \\(E\\) which should perfectly recover \\(\\psi_{aug}\\). A cycle consistency loss can now be directly applied in the expression parameter space of the 3D model, enforcing the predicted expression to be as close as possible to the initial one. This concept is illustrated in Fig. 4." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.199, + 0.471, + 0.486 + ], + "angle": 0, + "content": "The benefit of this cycle path is two-fold: 1) it reduces over-compensation errors via the consistency loss and 2) it promotes diverse expressions. The latter further helps consistency by avoiding the collapse of neighboring expressions into a single parameter representation. Concerning the consistency property, we can distinguish two overcompensating factors. First, during the joint optimization of the encoder and the translator, the latter can compensate when the encoder provides erroneous predictions, leading to an overall sub-par reconstruction. Second, if we discard the consistency loss, the expression will try to over-compensate erroneous shape/pose, since we assume the shape/pose parameters are predicted from an already trained system and they are not optimized in our framework. As an example, if the shape parameters do not fully capture an elongated nose, which is an identity characteristic of the person, the expression parameters may compensate this error. Such behavior is problematic because it entangles expression, shape and pose and adds undesired biases during training." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.489, + 0.47, + 0.685 + ], + "angle": 0, + "content": "Pixel Transfer: The masking process retains a small amount of pixels within the face area. However, when a new expression is introduced, the previously selected pixels need to be updated and transferred such that they correspond with the vertices of the new expression. This operation is referred to as pixel transfer, where we sample pixels from the initial image according to a selected set of vertices, we then find the new position of the same vertices for the updated expression, and we assign their position as the new pixel, with the initial pixel value. This avoids inconsistencies between the underlying structure of the pixels (initial expression) and the new expression, which would hinder realistic reconstructions in the cycle path." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.687, + 0.47, + 0.793 + ], + "angle": 0, + "content": "Promoting Diverse Expressions: Ideally, in this path we also want to promote high variations in the expression parameter space, generating shapes (and their corresponding images) with complex, rare and asymmetric expressions that are still plausible. To effectively augment the cycle path with interesting variations we consider the following augmentations:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.796, + 0.422, + 0.81 + ], + "angle": 0, + "content": "- Permutation: permute the expressions in a batch." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.811, + 0.47, + 0.841 + ], + "angle": 0, + "content": "- Perturbation: add non-trivial noise to the reconstructed expression parameters." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.841, + 0.469, + 0.902 + ], + "angle": 0, + "content": "- Template Injection: use expression templates of extreme expressions. To obtain such parameters for FLAME we perform direct iterative parameter fitting on the FaMoS [9] dataset which depicts multiple subjects per" + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.796, + 0.47, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.089, + 0.892, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.219, + 0.892, + 0.29 + ], + "angle": 0, + "content": "Figure 5. Neural expression augmentation. Our neural renderer enables us to modify the expression, generating a new image-3D training pair. We can edit the expression with random noise, permutation from other reconstructions, template injection, or zeroing." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.319, + 0.798, + 0.333 + ], + "angle": 0, + "content": "form extreme and asymmetric expressions." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.334, + 0.892, + 0.364 + ], + "angle": 0, + "content": "- Zero Expression: neutral expressions help avoid biasing the system towards complex cases." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.369, + 0.892, + 0.474 + ], + "angle": 0, + "content": "For all expression augmentations, we simultaneously simulate jaw and eyelid openings/closings, with more aggressive augmentations in the zero-expression case to avoid incompatible blending with intense expressions. Fig. 5 presents visual examples of all augmentations and the corresponding generated images from \\( T \\), showcasing its ability to generate realistic images with notable expression manipulation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.48, + 0.61, + 0.493 + ], + "angle": 0, + "content": "Loss functions:" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.499, + 0.892, + 0.56 + ], + "angle": 0, + "content": "Expression Consistency. The expression consistency loss, or cycle loss for brevity, is the mean-squared error between the given augmented expression parameters \\(\\psi_{aug}\\) and the predicted expressions at the end of the cycle path:" + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.581, + 0.892, + 0.6 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {e x p} = \\left\\| E _ {\\psi} \\left(T \\left(R (\\boldsymbol {\\theta}, \\boldsymbol {\\beta}, \\psi_ {a u g}) \\oplus M (I)\\right)\\right) - \\psi_ {a u g} \\right\\| _ {2} ^ {2} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.62, + 0.892, + 0.695 + ], + "angle": 0, + "content": "The pose/cam and shape parameters are kept as predicted by the initial image, namely \\(\\theta = E_{\\theta}(I)\\) and \\(\\beta = E_{\\beta}(I)\\). The internal \\(E_{\\psi}(I)\\) operation, inside the renderer \\(R(\\cdot)\\), does not allow gradients to flow through and is used as an off-the-self frozen module." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.7, + 0.892, + 0.79 + ], + "angle": 0, + "content": "Identity Consistency. To aid the translator in faithfully reconstructing the identity of the person, we introduce an additional consistency loss similar to Eq. 2, applied to the shape parameters \\(\\beta\\). Note that since the shape encoder \\(E_{\\beta}\\) is frozen, the consistency loss only affects the optimization of the translator \\(T\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Alternating Optimization: Overall, we alternate between the two passes, aiming to further reduce the effect of the translator compensating for the encoder. In more detail, during the augmented cycle pass, we freeze alternatively the encoder and the translator. Thus, this pass avoids the joint optimization of the two networks in a single step, acting as a regularizer to the other pass and enforcing consistency." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2494" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.09, + 0.165, + 0.106 + ], + "angle": 0, + "content": "4. Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.115, + 0.47, + 0.176 + ], + "angle": 0, + "content": "We now present objective and subjective evaluations of our method, along with comparisons with recent state of the art. Additional experimental evaluations and visualizations can be found in our Suppl. Mat. and demo video." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.183, + 0.268, + 0.2 + ], + "angle": 0, + "content": "4.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.206, + 0.469, + 0.266 + ], + "angle": 0, + "content": "Training Datasets: We use the following datasets for training: FFHQ [44], CelebA [56], LRS3 [1], and MEAD [86]. LRS3 and MEAD are video datasets, and we randomly sample images from each video during training." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.267, + 0.469, + 0.342 + ], + "angle": 0, + "content": "SOTA Methods: We compare with the following recent state-of-the-art methods that have publicly available implementations: DECA [28] and EMOCA v2 [18, 29], which use the FLAME [53] model, and Deep3DFace [20] and FOCUS [51], which use the BFM [64] model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.343, + 0.469, + 0.417 + ], + "angle": 0, + "content": "Pretraining: Before the core training stage, all three encoders are pretrained, supervised by two losses - the landmark loss of the reconstruction for pose and expression and the shape predictions of MICA [97]. After that, \\( E_{\\beta} \\) and \\( E_{\\theta} \\) remain frozen." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.425, + 0.307, + 0.44 + ], + "angle": 0, + "content": "4.2. Quantitative Evaluations" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.448, + 0.469, + 0.584 + ], + "angle": 0, + "content": "It has been consistently reported [2, 18, 29, 31, 60] that evaluating facial expression reconstruction in terms of geometric metrics is ill-posed. The geometric errors tend to be dominated by the identity face shape and do not correlate well with human perception of facial expressions. Accordingly, we compare our method in a quantitative manner with three experiments: 1) emotion recognition accuracy [18], 2) ability of a model to guide a UNet to faithfully reconstruct an input image, and 3) a perceptual user study." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.585, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Emotion Recognition: Following the protocol of [18], we train an MLP to classify eight basic expressions and regress valence and arousal values using AffectNet [59]. We report Concordance Correlation Coefficient (CCC), root mean square error (RMSE), for both valence (V-) and arousal (A-), and expression classification accuracy (E-ACC). Results are found in Tab. 1. As it can be seen, SMIRK achieves a higher emotion recognition score compared to most other methods, although falling behind EMOCAv1/2 and Deep3DFace. It is worth noting that, although EMOCA v1 achieves the highest emotion accuracy, it often overexaggerates expressions which helps with emotion recognition. EMOCA v2, arguably a more accurate reconstruction model, performs slightly worse. Our main model is comparable with Deep3DFace and outperforms DECA and FOCUS. We can also train a model that scores better on emotion recognition, by increasing the emotion loss weight. However, similarly to what was reported by Daněček et al. [18], this leads to undesirable artifacts. We discuss the trade-off between higher emotion recognition scores and reconstruction accuracy in more detail in Sup.Mat. Notably," + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.09, + 0.892, + 0.226 + ], + "angle": 0, + "content": "
ModelV-CCC ↑V-RMSE ↓A-CCC ↑A-RMSE ↓E-ACC ↑
MGCNet0.690.350.580.340.60
3DDFA-v20.620.390.500.340.52
Deep3DFace0.730.330.650.310.65
DECA0.690.360.580.330.59
FOCUS-CelebA0.690.350.540.330.58
EMOCA v10.770.310.680.300.68
EMOCA v20.760.330.660.300.66
SMIRK0.720.350.610.310.64
SMIRK w/o emo0.710.350.600.320.62
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.235, + 0.892, + 0.264 + ], + "angle": 0, + "content": "Table 1. Emotion recognition performance on the AffectNet test set [59]. We follow the same metrics as in [18]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.282, + 0.892, + 0.342 + ], + "angle": 0, + "content": "even without the emotion loss, the proposed model achieves a decent emotion recognition score, indicating that our reconstruction scheme can adequately capture emotions without the need for explicit perceptual supervision." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.343, + 0.893, + 0.705 + ], + "angle": 0, + "content": "Reconstruction Loss: In order to evaluate the faithfulness of a 3D face reconstruction technique, we have devised a protocol based on our analysis-by-neural-synthesis method. Under this protocol, we train a UNet image-to-image translator, but freeze the weights of the encoder so that only the translator is trained. The motivation is simple: if the 3D mesh is accurate enough, the reconstruction will be more faithful, due to a one-to-one appearance correspondence. For each method (including ours for fairness), we train a UNet for 5 epochs, using the masked image and the rendered 3D geometry as input. Finally, we report the \\( L_{1} \\) reconstruction loss and the VGG loss between the reconstructed image and the input image on the test set of AffectNet [59] which features subjects under multiple expressions. The results can be seen in Table 2. We observe here that using the information for the rendered shape geometry of SMIRK, the trained UNet achieves a more faithful reconstruction of the input image when compared to DECA and EMOCAv2. Particularly for EMOCAv2, we observe that although it can capture expressions, the results in many cases do not faithfully represent the input image, leading to an overall worse image reconstruction error. In terms of \\( L_{1} \\) loss, SMIRK is on par with Deep3DFace and FOCUS and has a small improvement in terms of VGG loss." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.717, + 0.888, + 0.759 + ], + "angle": 0, + "content": "
DECAEMOCAv2FOCUSDeep3DFaceSMIRK
L1 Loss ↓0.100.110.090.090.09
VGG Loss ↓0.800.840.780.780.76
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.77, + 0.892, + 0.812 + ], + "angle": 0, + "content": "Table 2. Image reconstruction performance on the AffectNet test set [59]. SMIRK achieves better reconstruction and perceptual scores compared to other methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.892, + 0.901 + ], + "angle": 0, + "content": "User Study: Arguably, the perception of the reconstructed facial expressions is the most important aspect in 3D face reconstruction, as it directly influences how well the reconstructed model captures the emotions and nuances of the original face. Considering this, we also designed a" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2495" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.09, + 0.881, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.504, + 0.893, + 0.534 + ], + "angle": 0, + "content": "Figure 6. Visual comparison of 3D face reconstruction. From left to right: Input, Deep3DFaceRecon[20], FOCUS[51], DECA[28], EMOCAv2[18], and SMIRK. Many more examples can also be found in the Suppl. Mat. and the demo video in our webpage." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.557, + 0.473, + 0.798 + ], + "angle": 0, + "content": "user study to assess the perception of the reconstructed facial expressions from human participants. We randomly selected 80 images from the AffectNet [59] test set (using the split from [81]) and 80 images from our MEAD test set (unseen subjects) and performed 3D face reconstruction with both SMIRK and its competitors. To mitigate bias w.r.t. the identity component for the FLAME-based methods, for DECA and EMOCAv2 we used the same identity parameters as our method (which itself was distilled from MICA). In the user study, participants were shown an image of a human face alongside two 3D face reconstructions, either from our method or the others, and were asked to choose the one with the most faithful facial expression representation. The order was randomized for each question, and each user answered a total of 32 questions, equally distributed among the different methods." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.47, + 0.903 + ], + "angle": 0, + "content": "A total of 85 users completed the study, and the results in Table 3 show that our method was significantly preferred over all competitors, confirming the performance of SMIRK in terms of faithful expressive 3D reconstruction. The results were statistically significant (for all pairs, \\( p < 0.01 \\) with binomial test, adjusted using the Bonfer" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.557, + 0.892, + 0.618 + ], + "angle": 0, + "content": "roni method). EMOCAv2, which also uses an emotion loss for expressive 3D reconstruction, was the closest competitor to our method, followed by FOCUS and Deep3D, while DECA was the least selected." + }, + { + "type": "table", + "bbox": [ + 0.532, + 0.632, + 0.858, + 0.665 + ], + "angle": 0, + "content": "
DECAEMOCAv2Deep3DFOCUS
SMIRK603/77461/219510/170534/146
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.672, + 0.892, + 0.714 + ], + "angle": 0, + "content": "Table 3. User study results: \"a/b\" indicates Ours (left) was preferred \\( a \\) times,while the competing method was chosen \\( b \\) times. SMIRK is overwhelmingly preferred over all other methods." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.754, + 0.666, + 0.77 + ], + "angle": 0, + "content": "4.3. Visual Examples" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In Fig. 6 we present multiple visual comparisons with the four other methods. As it can be visually assessed, our method can more accurately capture the facial expressions across multiple diverse subjects and conditions. Furthermore, the presented methodology can also capture expressions that other methods fail to capture, such as nonsymmetric mouth movements, eye closures, and exaggerated expressions." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2496" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.091, + 0.241, + 0.105 + ], + "angle": 0, + "content": "4.4. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.116, + 0.468, + 0.312 + ], + "angle": 0, + "content": "Ablation on the effect of landmarks: We first assess the effect of the landmark loss. To do that, we calculate for different versions of our model the L1 loss, VGG Loss, and Cycle loss after manipulation of expressions using the same protocol we performed in Sec. 4.2. Note that this time, we also evaluate performance by considering the cycle loss. That is, we also manipulate the predicted expressions, regenerate a new image, and expect that the method can successfully predict the same parameters. We consider three different versions of our model: 1) Protocol 1 - no landmarks loss, 2) Protocol 2 - training some epochs with landmarks loss and then removing it, 3) Protocol 3 - full training with landmarks loss. We present these results in Table 4." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.315, + 0.468, + 0.48 + ], + "angle": 0, + "content": "As we can see, completely omitting landmarks leads to degraded results. However, if we first train for a few epochs with landmarks and then set the loss weight to 0, the model achieves very similar performance with the original model which uses the loss throughout the full training. These results suggest that, in contrast with previous works [18, 28], the landmarks loss in SMIRK acts more as a regularizer during training, helping to guide the model towards good solutions, but in the later stages it may somewhat constrain its flexibility. We plan to explore this balance in more depth in future work." + }, + { + "type": "table", + "bbox": [ + 0.118, + 0.491, + 0.425, + 0.555 + ], + "angle": 0, + "content": "
L1 Loss ↓VGG Loss ↓Cycle Loss ↓
P10.1110.7570.588
P20.0930.7130.487
P30.0930.7140.544
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.561, + 0.468, + 0.603 + ], + "angle": 0, + "content": "Table 4. Ablation study on the effect of landmark loss. P1: no landmark loss, P2: landmark loss removed after a few epochs, P3: landmark loss throughout whole training." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.614, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Impact of Cycle Path: Here we also present examples on how the cycle path affects the reconstruction performance. First, we show an example result in Fig. 7, where we see that using the proposed augmentations provides more detailed expressions. For example, template injection augmentation considerably helps the reconstruction of the mouth structure. Secondly, we have also observed that the cycle path makes the model more robust, especially w.r.t. mouth closures (e.g. zero jaw opening). We show such indicative cases in Figure 8. Such artifacts can be seen when using the no-cycle variant, acting as a visual confirmation of the aforementioned numerical results. Here, the mouth is not properly closed in the 3D reconstructed face, since it was miss-corresponded to a properly closed mouth in the image reconstruction space. The cycle path can solve such instances by providing tweaked expressions that are enforced to be recognized correctly, avoiding \"misalignments\" between expected expressions and reconstructed images." + }, + { + "type": "image", + "bbox": [ + 0.58, + 0.09, + 0.814, + 0.162 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.172, + 0.892, + 0.201 + ], + "angle": 0, + "content": "Figure 7. Impact of cycle augmentations. From left to right: input image, no cycle loss, cycle loss with all augmentations." + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.207, + 0.892, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.322, + 0.892, + 0.378 + ], + "angle": 0, + "content": "Figure 8. Impact of the Cycle Path. Artifacts can appear when not training with the cycle path. From left to right: input image, 3D reconstruction and image reconstruction without cycle path, 3D reconstruction and image reconstruction with cycle path." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.394, + 0.625, + 0.408 + ], + "angle": 0, + "content": "4.5. Limitations" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.416, + 0.892, + 0.567 + ], + "angle": 0, + "content": "Despite the effectiveness of SMIRK, there are limitations to be addressed. It is sensitive to occlusions, as the training datasets do not include them, and assumes more intense expressions when parts are missing instead of extrapolating from available information. In addition, SMIRK has been trained on single images, and the temporal aspect is not yet explored. Also note that while SMIRK does not need to predict albedo and lighting, this can be limiting for specific applications in 3D facial animation and video editing. Please refer to the Suppl. Mat. for a more detailed discussion." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.581, + 0.618, + 0.595 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.606, + 0.892, + 0.772 + ], + "angle": 0, + "content": "We have presented SMIRK, a new paradigm for accurate expressive 3D face reconstruction from images. Instead of the traditional graphics-based approach for self-supervision which is commonly used for monocular 3D face reconstruction in-the-wild, SMIRK employs a neural image-to-image translator model, which learns to reconstruct the input face image given the rendered predicted facial geometry. Our extensive experimental results show that SMIRK outperforms previous methods and can faithfully reconstruct expressive 3D faces, including challenging complex expressions such as asymmetries, and subtle expressions such as smirking." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.785, + 0.659, + 0.802 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.901 + ], + "angle": 0, + "content": "This research work was supported by the project \"Applied Research for Autonomous Robotic Systems\" (MIS 5200632) which is implemented within the framework of the National Recovery and Resilience Plan \"Greece 2.0\" (Measure: 16618- Basic and Applied Research) and is funded by the European Union- NextGenerationEU." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2497" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.469, + 0.157 + ], + "angle": 0, + "content": "[1] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. Lrs3-ted: a large-scale dataset for visual speech recognition. arXiv preprint arXiv:1809.00496, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.469, + 0.227 + ], + "angle": 0, + "content": "[2] Zakaria Aldeneh, Masha Fedzechkina, Skyler Seto, Katherine Metcalf, Miguel Sarabia, Nicholas Apostoloff, and Barry-John Theobald. Towards a Perceptual Model for Estimating the Quality of Visual Speech, 2022. arXiv:2203.10117 [cs, eess]. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.228, + 0.469, + 0.282 + ], + "angle": 0, + "content": "[3] Oswald Aldrian and William AP Smith. Inverse rendering of faces with a 3d morphable model. IEEE transactions on pattern analysis and machine intelligence, 35(5):1080-1093, 2012. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.285, + 0.469, + 0.366 + ], + "angle": 0, + "content": "[4] Riza Alp Guler, George Trigeorgis, Epameinondas Antonakos, Patrick Snape, Stefanos Zafeiriou, and Iasonas Kokkinos. Densereg: Fully convolutional dense shape regression in-the-wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6799-6808, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.369, + 0.469, + 0.437 + ], + "angle": 0, + "content": "[5] Haoran Bai, Di Kang, Haoxian Zhang, Jinshan Pan, and Linchao Bao. FFHQ-UV: Normalized facial uv-texture dataset for 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 362–371, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.439, + 0.469, + 0.506 + ], + "angle": 0, + "content": "[6] Anil Bas, William A. P. Smith, Timo Bolkart, and Stefanie Wuhrer. Fitting a 3D morphable model to edges: A comparison between hard and soft correspondences. In Asian Conference on Computer Vision Workshops, pages 377-391, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.509, + 0.469, + 0.564 + ], + "angle": 0, + "content": "[7] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3D faces. In Proceedings of the 26th Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 1999. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.565, + 0.469, + 0.633 + ], + "angle": 0, + "content": "[8] Volker Blanz, Sami Romdhani, and Thomas Vetter. Face identification across different poses and illuminations with a 3D morphable model. In International Conference on Automatic Face & Gesture Recognition (FG), pages 202-207, 2002. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.636, + 0.469, + 0.69 + ], + "angle": 0, + "content": "[9] Timo Bolkart, Tianye Li, and Michael J Black. Instant multiview head capture through learnable registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 768-779, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.692, + 0.469, + 0.761 + ], + "angle": 0, + "content": "[10] James Booth, Epameinondas Antonakos, Stylianos Ploumpis, George Trigeorgis, Yannis Panagakis, and Stefanos Zafeiriou. 3d face morphable models\" in-the-wild\". In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 48-57, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.762, + 0.469, + 0.843 + ], + "angle": 0, + "content": "[11] James Booth, Anastasios Roussos, Evangelos Ververas, Epameinondas Antonakos, Stylianos Ploumpis, Yannis Panagakis, and Stefanos Zafeiriou. 3D reconstruction of \"inthe-wild\" faces in images and videos. IEEE transactions on pattern analysis and machine intelligence, 40(11):2638-2652, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[12] Alan Brunton, Augusto Salazar, Timo Bolkart, and Stefanie Wuhrer. Review of statistical shape spaces for 3D data with comparative analysis for human faces. Computer Vision and Image Understanding (CVIU), 128:1-17, 2014. 1" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.469, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.161 + ], + "angle": 0, + "content": "[13] Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In Proceedings of the IEEE International Conference on Computer Vision, pages 1021-1030, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.163, + 0.892, + 0.218 + ], + "angle": 0, + "content": "[14] Chen Cao, Yanlin Weng, Shun Zhou, Yiying Tong, and Kun Zhou. Facewarehouse: A 3d facial expression database for visual computing. IEEE Transactions on Visualization and Computer Graphics, 20(3):413-425, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.219, + 0.892, + 0.272 + ], + "angle": 0, + "content": "[15] Chen Cao, Qiming Hou, and Kun Zhou. Displaced dynamic expression regression for real-time facial tracking and animation. Transactions on Graphics (TOG), 33(4):1-10, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.274, + 0.892, + 0.342 + ], + "angle": 0, + "content": "[16] Feng-Ju Chang, Anh Tuan Tran, Tal Hassner, Iacopo Masi, Ram Nevatia, and Gerard Medioni. ExpNet: Landmark-free, deep, 3D facial expressions. In International Conference on Automatic Face & Gesture Recognition (FG), pages 122-129, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.344, + 0.892, + 0.412 + ], + "angle": 0, + "content": "[17] Aggelina Chatziagapi, ShahRukh Athar, Francesc Moreno-Noguer, and Dimitris Samaras. Sider: Single-image neural optimization for facial geometric detail recovery. In 2021 International Conference on 3D Vision (3DV), pages 815-824. IEEE, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.414, + 0.892, + 0.481 + ], + "angle": 0, + "content": "[18] Radek Daneček, Michael J Black, and Timo Bolkart. EMOCA: Emotion driven monocular face capture and animation. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 20311-20322, 2022. 1, 2, 3, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.483, + 0.892, + 0.552 + ], + "angle": 0, + "content": "[19] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multilevel face localisation in the wild. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5203-5212, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.553, + 0.892, + 0.621 + ], + "angle": 0, + "content": "[20] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3D face reconstruction with weakly-supervised learning: From single image to image set. In Conference on Computer Vision and Pattern Recognition Workshops (CVPR-W), pages 285-295, 2019. 1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.623, + 0.892, + 0.691 + ], + "angle": 0, + "content": "[21] Zheng Ding, Xuaner Zhang, Zhihao Xia, Lars Jebe, Zhuowen Tu, and Xiuming Zhang. Diffusionrig: Learning personalized priors for facial appearance editing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12736-12746, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.692, + 0.892, + 0.748 + ], + "angle": 0, + "content": "[22] Pengfei Dou, Shishir K. Shah, and Ioannis A. Kakadiaris. End-to-end 3D face reconstruction with deep neural networks. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 5908-5917, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.749, + 0.892, + 0.816 + ], + "angle": 0, + "content": "[23] Michail Christos Doukas, Mohammad Rami Koujan, Viktoria Sharmanska, Anastasios Roussos, and Stefanos Zafeiriou. Head2head++: Deep facial attributes re-targeting. IEEE Transactions on Biometrics, Behavior, and Identity Science, 3(1):31-43, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.818, + 0.892, + 0.872 + ], + "angle": 0, + "content": "[24] Michail Christos Doukas, Stefanos Zafeiriou, and Viktoriia Sharmanska. Headgan: One-shot neural head synthesis and editing. In Proceedings of the IEEE/CVF International conference on Computer Vision, pages 14398-14407, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[25] Bernhard Egger. Semantic Morphable Models. PhD thesis, University of Basel, 2018. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "2498" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.175 + ], + "angle": 0, + "content": "[26] Bernhard Egger, William A. P. Smith, Ayush Tewari, Stefanie Wuhrer, Michael Zollhoefer, Thabo Beeler, Florian Bernard, Timo Bolkart, Adam Kortylewski, Sami Romdhani, Christian Theobalt, Volker Blanz, and Thomas Vetter. 3D morphable face models—past, present, and future. Transactions on Graphics (TOG), 39(5), 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.177, + 0.47, + 0.232 + ], + "angle": 0, + "content": "[27] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In European Conference on Computer Vision (ECCV), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.233, + 0.469, + 0.287 + ], + "angle": 0, + "content": "[28] Yao Feng, Haiwen Feng, Michael J Black, and Timo Bolkart. Learning an animatable detailed 3D face model from in-the-wild images. Transactions on Graphics, (Proc. SIGGRAPH), 40(4):1-13, 2021. 1, 2, 3, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.289, + 0.469, + 0.37 + ], + "angle": 0, + "content": "[29] Panagiotis P. Filntisis, George Retsinas, Foivos Paraperas-Papantoniou, Athanasios Katsamanis, Anastasios Roussos, and Petros Maragos. SPECTRE: Visual speech-informed perceptual 3D facial expression reconstruction from videos. In Conference on Computer Vision and Pattern Recognition Workshops (CVPR-W), pages 5745-5755, 2023. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.372, + 0.469, + 0.44 + ], + "angle": 0, + "content": "[30] Pablo Garrido, Michael Zollhöfer, Dan Casas, Levi Valgaerts, Kiran Varanasi, Patrick Pérez, and Christian Theobalt. Reconstruction of personalized 3d face rigs from monocular video. ACM Transactions on Graphics (TOG), 35 (3):1-15, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.442, + 0.469, + 0.496 + ], + "angle": 0, + "content": "[31] Pablo Garrido, Michael Zollhöfer, Chenglei Wu, Derek Bradley, Patrick Pérez, Thabo Beeler, and Christian Theobalt. Corrective 3d reconstruction of lips from monocular video. ACM Trans. Graph., 35(6):219-1, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.498, + 0.469, + 0.566 + ], + "angle": 0, + "content": "[32] Baris Gecer, Stylianos Ploumpis, Irene Kotsia, and Stefanos Zafeiriou. GANFIT: Generative adversarial network fitting for high fidelity 3D face reconstruction. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1155-1164, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.567, + 0.469, + 0.635 + ], + "angle": 0, + "content": "[33] Kyle Genova, Forrester Cole, Aaron Maschinot, Aaron Sarna, Daniel Vlasic, and William T. Freeman. Unsupervised training for 3D morphable model regression. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 8377-8386, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.637, + 0.469, + 0.705 + ], + "angle": 0, + "content": "[34] Thomas Gerg, Andreas Morel-Forster, Clemens Blumer, Bernhard Egger, Marcel Luthi, Sandro Schoenborn, and Thomas Vetter. Morphable face models - an open framework. In International Conference on Automatic Face & Gesture Recognition (FG), pages 75–82, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.707, + 0.469, + 0.761 + ], + "angle": 0, + "content": "[35] Partha Ghosh, Pravir Singh Gupta, Roy Uziel, Anurag Ranjan, Michael J Black, and Timo Bolkart. GIF: Generative interpretable faces. In 2020 International Conference on 3D Vision (3DV), pages 868-878. IEEE, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.763, + 0.469, + 0.817 + ], + "angle": 0, + "content": "[36] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[37] Shan He, Haonan He, Shuo Yang, Xiaoyan Wu, Pengcheng Xia, Bing Yin, Cong Liu, Lirong Dai, and Chang Xu. Speech4mesh: Speech-assisted monocular 3d facial reconstruction for speech-driven 3d facial animation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14192-14202, 2023. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[38] Xingzhe He, Bastian Wandt, and Helge Rhodin. Autolink: Self-supervised learning of human skeletons and object outlines by linking keypoints. Advances in Neural Information Processing Systems, 35:36123-36141, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.217 + ], + "angle": 0, + "content": "[39] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, et al. Searching for mobilenetv3. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1314-1324, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.219, + 0.892, + 0.259 + ], + "angle": 0, + "content": "[40] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A. Efros. Image-to-image translation with conditional adversarial networks. CoRR, abs/1611.07004, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.261, + 0.892, + 0.328 + ], + "angle": 0, + "content": "[41] Aaron S Jackson, Adrian Bulat, Vasileios Argyriou, and Georgios Tzimiropoulos. Large pose 3D face reconstruction from a single image via direct volumetric CNN regression. In International Conference on Computer Vision (ICCV), pages 1031-1039, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.33, + 0.892, + 0.41 + ], + "angle": 0, + "content": "[42] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 694-711. Springer, 2016. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.413, + 0.892, + 0.481 + ], + "angle": 0, + "content": "[43] Harim Jung, Myeong-Seok Oh, and Seong-Whan Lee. Learning free-form deformation for 3D face reconstruction from in-the-wild images. In International Conference on Systems, Man, and Cybernetics (SMC), pages 2737–2742, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.484, + 0.892, + 0.538 + ], + "angle": 0, + "content": "[44] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.54, + 0.892, + 0.607 + ], + "angle": 0, + "content": "[45] Hyeongwoo Kim, Pablo Garrido, Ayush Tewari, Weipeng Xu, Justus Thies, Matthias Nießner, Patrick Pérez, Christian Richardt, Michael Zolloffer, and Christian Theobalt. Deep video portraits. ACM Transactions on Graphics (TOG), 37 (4):163, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.609, + 0.892, + 0.678 + ], + "angle": 0, + "content": "[46] Hyeongwoo Kim, Michael Zollhöfer, Ayush Tewari, Justus Thies, Christian Richardt, and Christian Theobalt. Inverse-FaceNet: deep monocular inverse face rendering. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 4625-4634, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.679, + 0.892, + 0.734 + ], + "angle": 0, + "content": "[47] Tatsuro Koizumi and William A. P. Smith. \"look ma, no landmarks!\" - unsupervised, model-based dense face alignment. In European Conference on Computer Vision (ECCV), pages 690-706, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.735, + 0.892, + 0.817 + ], + "angle": 0, + "content": "[48] Alexandros Lattas, Stylianos Moschoglou, Baris Gecer, Stylianos Ploumpis, Vasileios Triantafyllou, Abhijeet Ghosh, and Stefanos Zafeiriou. AvatarMe: Realistically renderable 3d facial reconstruction\" in-the-wild\". In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 760-769, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.818, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[49] Alexandros Lattas, Stylianos Moschoglou, Stylianos Ploumpis, Baris Gecer, Jiankang Deng, and Stefanos Zafeiriou. Fitme: Deep photorealistic 3d morphable model avatars. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8629-8640, 2023." + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "2499" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[50] Gun-Hee Lee and Seong-Whan Lee. Uncertainty-aware mesh decoder for high fidelity 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6100–6109, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.47, + 0.204 + ], + "angle": 0, + "content": "[51] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. To fit or not to fit: Model-based face reconstruction and occlusion segmentation from weak supervision. CoRR, abs/2106.09614, 2021. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.205, + 0.469, + 0.245 + ], + "angle": 0, + "content": "[52] Hao Li, Jihun Yu, Yuting Ye, and Chris Bregler. Realtime facial animation with on-the-fly correctives. Transactions on Graphics (TOG), 32(4):42-1, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.247, + 0.469, + 0.302 + ], + "angle": 0, + "content": "[53] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.303, + 0.469, + 0.37 + ], + "angle": 0, + "content": "[54] Jiangke Lin, Yi Yuan, Tianjia Shao, and Kun Zhou. Towards high-fidelity 3d face reconstruction from in-the-wild images using graph convolutional networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5891-5900, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.372, + 0.469, + 0.426 + ], + "angle": 0, + "content": "[55] Yaojie Liu, Amin Jourabloo, William Ren, and Xiaoming Liu. Dense face alignment. In International Conference on Computer Vision Workshops (ICCV-W), pages 1619-1628, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.428, + 0.47, + 0.481 + ], + "angle": 0, + "content": "[56] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.484, + 0.469, + 0.564 + ], + "angle": 0, + "content": "[57] Huiwen Luo, Koki Nagano, Han-Wei Kung, Qingguo Xu, Zejian Wang, Lingyu Wei, Liwen Hu, and Hao Li. Normalized avatar synthesis using stylegan and perceptual refinement. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11662–11672, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.567, + 0.469, + 0.635 + ], + "angle": 0, + "content": "[58] B.R. Mallikarjun, Ayush Tewari, Hans-Peter Seidel, Mohamed Elgharib, Christian Theobalt, et al. Learning complete 3d morphable face models from images and videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3361-3371, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.637, + 0.469, + 0.692 + ], + "angle": 0, + "content": "[59] Ali Mollahosseini, Behzad Hasani, and Mohammad H Ma-hoor. Affectnet: A database for facial expression, valence, and arousal computing in the wild. IEEE Transactions on Affective Computing, 10(1):18-31, 2017. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.693, + 0.469, + 0.734 + ], + "angle": 0, + "content": "[60] Masahiro Mori, Karl F MacDorman, and Norri Kageki. The uncanny valley [from the field]. IEEE Robotics & Automation magazine, 19(2):98-100, 2012. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.735, + 0.469, + 0.789 + ], + "angle": 0, + "content": "[61] Christopher Otto, Prashanth Chandran, Gaspard Zoss, Markus H. Gross, Paulo F. U. Gotardo, and Derek Bradley. A perceptual shape loss for monocular 3D face reconstruction. Computer Graphics Forum (Proc. Pacific Graphics), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.79, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[62] Foivos Paraperas Papantoniou, Panagiotis P Filntisis, Petros Maragos, and Anastasios Roussos. Neural emotion director: Speech-preserving semantic control of facial expressions in \"in-the-wild\" videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18781-18790, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.873, + 0.469, + 0.902 + ], + "angle": 0, + "content": "[63] Jeong Joon Park, Peter Florence, Julian Straub, Richard A. Newcombe, and Steven Lovegrove. DeepSDF: Learning" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.135 + ], + "angle": 0, + "content": "continuous signed distance functions for shape representation. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 165-174, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.137, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[64] Pascal Paysan, Reinhard Knothe, Brian Amberg, Sami Romdhani, and Thomas Vetter. A 3d face model for pose and illumination invariant face recognition. In 2009 sixth IEEE international conference on advanced video and signal based surveillance, pages 296-301. IEEE, 2009. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.207, + 0.892, + 0.29 + ], + "angle": 0, + "content": "[65] Stylianos Ploumpis, Evangelos Ververas, Eimear O' Sullivan, Stylianos Moschoglou, Haoyang Wang, Nick E. Pears, William A. P. Smith, Baris Gecer, and Stefanos Zafeiriou. Towards a complete 3D morphable model of the human head. Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 43(11):4142-4160, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.292, + 0.892, + 0.333 + ], + "angle": 0, + "content": "[66] E. Richardson, M. Sela, and R. Kimmel. 3D face reconstruction by learning from synthetic data. In International Conference on 3D Vision (3DV), pages 460-469, 2016. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.335, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[67] Sami Romdhani and Thomas Vetter. Estimating 3D shape and texture using pixel intensity, edges, specular highlights, texture constraints and aprior. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 986-993, 2005. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.406, + 0.892, + 0.489 + ], + "angle": 0, + "content": "[68] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Medical Image Computing and Computer-Assisted Intervention - MICCAI 2015 - 18th International Conference Munich, Germany, October 5 - 9, 2015, Proceedings, Part III, pages 234-241. Springer, 2015. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.49, + 0.892, + 0.558 + ], + "angle": 0, + "content": "[69] Zeyu Ruan, Changqing Zou, Longhai Wu, Gangshan Wu, and Limin Wang. SADRNet: Self-aligned dual face regression networks for robust 3d dense face alignment and reconstruction. IEEE Transactions on Image Processing, 30: 5793-5806, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.561, + 0.892, + 0.628 + ], + "angle": 0, + "content": "[70] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael Black. Learning to regress 3D face shape and expression from an image without 3d supervision. In Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.632, + 0.892, + 0.687 + ], + "angle": 0, + "content": "[71] Matan Sela, Elad Richardson, and Ron Kimmel. Unrestricted facial geometry reconstruction using image-to-image translation. In International Conference on Computer Vision (ICCV), pages 1576-1585, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.689, + 0.892, + 0.758 + ], + "angle": 0, + "content": "[72] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Ming-min Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3D face reconstruction by occlusion-aware multiview geometry consistency. In European Conference on Computer Vision (ECCV), pages 53-70. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.76, + 0.892, + 0.8 + ], + "angle": 0, + "content": "[73] William AP Smith. The perspective face shape ambiguity. In Perspectives in Shape Analysis, pages 299-319. Springer, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.803, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[74] Attila Szabó, Givi Meishvili, and Paolo Favaro. Unsupervised generative 3D shape learning from natural images. CoRR, abs/1910.00287, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[75] Ayush Tewari, Michael Zolloffer, Hyeongwoo Kim, Pablo Garrido, Florian Bernard, Patrick Perez, and Christian Theobalt. MoFA: Model-based deep convolutional face autoencoder for unsupervised monocular reconstruction. In In" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "2500" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.093, + 0.468, + 0.12 + ], + "angle": 0, + "content": "ternational Conference on Computer Vision (ICCV), pages 1274-1283, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.205 + ], + "angle": 0, + "content": "[76] Ayush Tewari, Michael Zollhöfer, Pablo Garrido, Florian Bernard, Hyeongwoo Kim, Patrick Pérez, and Christian Theobalt. Self-supervised multi-level face model learning for monocular reconstruction at over \\(250\\mathrm{~hz}\\). In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2549-2559, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.207, + 0.469, + 0.288 + ], + "angle": 0, + "content": "[77] Ayush Tewari, Florian Bernard, Pablo Garrido, Gaurav Bharaj, Mohamed Elgharib, Hans-Peter Seidel, Patrick Pérez, Michael Zollhöfer, and Christian Theobalt. FML: face model learning from videos. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 10812-10822, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.291, + 0.469, + 0.346 + ], + "angle": 0, + "content": "[78] Justus Thies, Michael Zollhöfer, Matthias Nießner, Levi Valgaerts, Marc Stamminger, and Christian Theobalt. Real-time expression transfer for facial reenactment. ACM Trans. Graph., 34(6), 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.348, + 0.469, + 0.403 + ], + "angle": 0, + "content": "[79] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Facevr: Real-time facial reenactment and eye gaze control in virtual reality. arXiv preprint arXiv:1610.03151, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.405, + 0.469, + 0.473 + ], + "angle": 0, + "content": "[80] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2Face: Real-time face capture and reenactment of RGB videos. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 2387-2395, 2016. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.476, + 0.469, + 0.531 + ], + "angle": 0, + "content": "[81] Antoine Toisoul, Jean Kossaifi, Adrian Bulat, Georgios Tzimiropoulos, and Maja Pantic. Estimation of continuous valence and arousal levels from faces in naturalistic conditions. Nature Machine Intelligence, 3(1):42-50, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.532, + 0.469, + 0.601 + ], + "angle": 0, + "content": "[82] Anh Tuan Tran, Tal Hassner, Iacopo Masi, and Gerard Medioni. Regressing robust and discriminative 3D morphable models with a very deep neural network. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1599-1608, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.603, + 0.469, + 0.672 + ], + "angle": 0, + "content": "[83] Anh Tuan Tran, Tal Hassner, Iacopo Masi, Eran Paz, Yuval Nirkin, and Gérard Medioni. Extreme 3d face reconstruction: Seeing through occlusions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3935-3944, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.673, + 0.469, + 0.715 + ], + "angle": 0, + "content": "[84] Luan Tran and Xiaoming Liu. Nonlinear 3d face morphable model. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7346-7355, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.717, + 0.469, + 0.771 + ], + "angle": 0, + "content": "[85] Luan Tran, Feng Liu, and Xiaoming Liu. Towards high-fidelity nonlinear 3d face morphable model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1126-1135, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.773, + 0.469, + 0.829 + ], + "angle": 0, + "content": "[86] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.83, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[87] Huawei Wei, Shuang Liang, and Yichen Wei. 3D dense face alignment via graph convolution networks. arXiv preprint arXiv:1904.05562, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.902 + ], + "angle": 0, + "content": "[88] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Matthew Johnson, Jingjing Shen, Nikola Milosavljevic, Daniel Wilde," + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.469, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.148 + ], + "angle": 0, + "content": "Stephan J. Garbin, Toby Sharp, Ivan Stojiljkovic, Tom Cashman, and Julien P. C. Valentin. 3D face reconstruction with dense landmarks. In European Conference on Computer Vision (ECCV), pages 160-177. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.217 + ], + "angle": 0, + "content": "[89] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3D objects from images in the wild. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1-10, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.219, + 0.892, + 0.289 + ], + "angle": 0, + "content": "[90] Haotian Yang, Hao Zhu, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, and Xun Cao. Facescape: a large-scale high quality 3d face dataset and detailed riggable 3d face prediction. In Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.29, + 0.892, + 0.373 + ], + "angle": 0, + "content": "[91] Tarun Yenamandra, Ayush Tewari, Florian Bernard, Hans-Peter Seidel, Mohamed Elgharib, Daniel Cremers, and Christian Theobalt. i3dmm: Deep implicit 3d morphable model of human heads. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12803-12813, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.375, + 0.892, + 0.428 + ], + "angle": 0, + "content": "[92] Xiaoxing Zeng, Xiaojiang Peng, and Yu Qiao. DF2Net: A dense-fine-finer network for detailed 3D face reconstruction. In International Conference on Computer Vision (ICCV), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.431, + 0.892, + 0.5 + ], + "angle": 0, + "content": "[93] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.501, + 0.892, + 0.585 + ], + "angle": 0, + "content": "[94] Tianke Zhang, Xuangeng Chu, Yunfei Liu, Lijian Lin, Zhendong Yang, Zhengzhuo Xu, Chengkun Cao, Fei Yu, Changyin Zhou, Chun Yuan, et al. Accurate 3d face reconstruction with facial component tokens. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9033-9042, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.585, + 0.892, + 0.667 + ], + "angle": 0, + "content": "[95] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A. Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In IEEE International Conference on Computer Vision, ICCV 2017, Venice, Italy, October 22-29, 2017, pages 2242-2251. IEEE Computer Society, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.669, + 0.892, + 0.726 + ], + "angle": 0, + "content": "[96] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z. Li. Face alignment across large poses: A 3D solution. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 146-155, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.727, + 0.892, + 0.768 + ], + "angle": 0, + "content": "[97] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces. In European Conference on Computer Vision, pages 250–269, 2022. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.769, + 0.892, + 0.839 + ], + "angle": 0, + "content": "[98] Michael Zollhöfer, Justus Thies, Darek Bradley, Pablo Garrido, Thabo Beeler, Patrick Pérez, Marc Stamminger, Matthias Nießner, and Christian Theobalt. State of the art on monocular 3D face reconstruction, tracking, and applications. Computer Graphics Forum, 2018. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.839 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.514, + 0.957 + ], + "angle": 0, + "content": "2501" + } + ] +] \ No newline at end of file diff --git a/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/444ecb6f-5ab4-45bb-9d08-b2b359c08da3_origin.pdf b/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/444ecb6f-5ab4-45bb-9d08-b2b359c08da3_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f6dabb539505d2b5ea93b98cdc597fa97f3076d7 --- /dev/null +++ b/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/444ecb6f-5ab4-45bb-9d08-b2b359c08da3_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4511e611f59bcf4b8b140f18a2c8f9f38efbb7652c363e298fb7f65de09001fa +size 8969949 diff --git a/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/full.md b/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d5a3a18ba8418cdbbfa88832efbebecf0047feca --- /dev/null +++ b/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/full.md @@ -0,0 +1,332 @@ +# 3D Facial Expressions through Analysis-by-Neural-Synthesis + +George Retsinas1† Panagiotis P. Filntisis1† Radek Daněček3 Victoria F. Abrevaya3 Anastasios Roussos4 Timo Bolkart3* Petros Maragos1,2 + +$^{1}$ Institute of Robotics, Athena Research Center, 15125 Maroussi, Greece + +$^{2}$ School of Electrical & Computer Engineering, National Technical University of Athens, Greece + +$^{3}$ MPI for Intelligent Systems, Tübingen, Germany + +$^{4}$ Institute of Computer Science (ICS), Foundation for Research & Technology - Hellas (FORTH), Greece + +![](images/da1a4cae32a6366d3ca8377814176725aa5cd0467fc824055cc8ccc1bf9dca84.jpg) +Figure 1. SMIRK reconstructs 3D faces from monocular images with facial geometry that faithfully recover extreme, asymmetric, and subtle expressions. Top: images of people with challenging expressions. Bottom: SMIRK reconstructions. + +# Abstract + +While existing methods for 3D face reconstruction from in-the-wild images excel at recovering the overall face shape, they commonly miss subtle, extreme, asymmetric, or rarely observed expressions. We improve upon these methods with SMIRK (Spatial Modeling for Image-based Reconstruction of Kinesics), which faithfully reconstructs expressive 3D faces from images. We identify two key limitations in existing methods: shortcomings in their self-supervised training formulation, and a lack of expression diversity in the training images. For training, most methods employ differentiable rendering to compare a predicted face mesh with the input image, along with a plethora of additional loss functions. This differentiable rendering loss not only has to provide supervision to optimize for 3D face geometry, camera, albedo, and lighting, which is an ill-posed optimization problem, but the domain gap between rendering and input image further hinders the learning process. Instead, SMIRK replaces the differentiable rendering with a neural rendering module that, given the rendered predicted mesh geometry, and sparsely sampled pixels of the input image, generates a face image. As the neural rendering gets color information from sampled image pixels, supervising with neural rendering-based reconstruction loss can focus solely on the geometry. Further, + +it enables us to generate images of the input identity with varying expressions while training. These are then utilized as input to the reconstruction model and used as supervision with ground truth geometry. This effectively augments the training data and enhances the generalization for diverse expressions. Our qualitative, quantitative and particularly our perceptual evaluations demonstrate that SMIRK achieves the new state-of-the-art performance on accurate expression reconstruction. For our method's source code, demo video and more, please visit our project webpage: https://georgenetsi.github.io/smirk/. + +# 1. Introduction + +Reconstructing 3D faces from single images in-the-wild has been a central goal of computer vision for the last three decades [98] with practical implications in various fields including virtual and augmented reality, entertainment, and telecommunication. Commonly, these methods estimate the parameters of a 3D Morphable Model (3DMM) [12, 26], either through optimization [3, 6-8, 34, 67, 80] or regression with deep learning [16, 18, 20, 28, 29, 33, 46, 65, 66, 70, 75, 82]. Due to the lack of large-scale paired 2D-3D data, most learning-based methods follow a self-supervised train + +ing scheme using an analysis-by-synthesis approach [7, 75]. + +Although there has been a persistent improvement in the accuracy of identity shape reconstruction, as indicated by established benchmarks [28, 70], the majority of works fail to capture the full range of facial expressions, including extreme, asymmetric, or subtle movements which are perceptually significant to humans -see e.g. Fig. 1. Recent works addressed this by augmenting the photometric error with image-based perceptual losses based on expert networks for emotion [18], lip reading [29], or face recognition [32], or with a GAN-inspired discriminator [61]. However, this requires a careful balancing of the different loss terms, and can often produce over-exaggerated facial expressions. + +We argue here that the main problem is the shortcomings of the differentiable rendering loss. Jointly optimizing for geometry, camera, appearance, and lighting is an ill-posed optimization problem due to shape-camera [73] and albedo-lighting [25] ambiguities. Further the loss is negatively impacted by the large domain gap between natural input image and the rendering. The commonly employed Lambertian reflectance model is an over-simplistic approximation of the light-face interaction [26], and it is insufficient to account for hard self-shadows, unusual illumination environments, highly reflective skin, and differences in camera color patterns. This, in turn, can result in sub-optimal reconstructions by providing incorrect guidance during training. + +In this work, we introduce a simple but effective analysis-by-neural-synthesis supervision to improve the perceived quality of the reconstructed expressions. For this, we replace the differentiable rendering step of self-supervised approaches with an image-to-image translator based on U-Net [68]. Given a monochromatic rendering of the geometry together with sparsely sampled pixels of the input image, this U-Net generates an image which is then compared to the input image. Our key observation is that this neural rendering provides more accurate gradients for the task of expressive 3D face reconstruction. This approach has two advantages. First, by providing the rendered predicted mesh without appearance to the generator, the system is forced to rely on the geometry of the rendered mesh for recreating the input, leading to more faithful reconstructions. Second, the generator can create novel images, that modify the expression of the input. We leverage this while training with an expression consistency/augmentation loss. This renders a mesh of the input identity under a novel expression, renders an image with the generator, project the rendering through the encoder, and penalizes the difference between the augmented and the reconstructed expression parameters. By employing parameters from complex and extreme expressions captured under controlled laboratory settings, the network learns to handle non-typical expressions that are underrepresented in the data, promoting generalization. Our extensive experiments demonstrate that + +SMIRK faithfully captures a wide range of facial expressions (Fig. 1), including challenging cases such as asymmetric and subtle expressions (e.g., smirking). This result is highlighted by the conducted user study, where SMIRK significantly outperformed all competing methods. + +In summary, our contributions are: 1) A method to faithfully recover expressive 3D faces from an input image.2) A novel analysis-by-neural-synthesis supervision that improves the quality of the reconstructed expressions. 3) A cycle-based expression consistency loss that augments expressions during training. + +# 2. Related Work + +Over the past two decades, the field of monocular 3D face reconstruction has witnessed extensive research and development [26, 98]. Model-free approaches directly regress 3D meshes [4, 19, 22, 27, 43, 69, 71, 74, 87, 89, 92] or voxels [41], or adapt a Signed Distance Function [17, 63, 91] for image fitting. These techniques commonly depend on extensive 3D training data, often generated using a 3D face model. However, this dependency can constrain their expressiveness due to limitations inherent to data creation [4, 19, 27, 41, 43, 69, 87] and disparities between synthetic and real images [22, 71, 92]. + +Many works estimate parameters of established 3D Morphable Models (3DMMs), like BFM [64], FaceWarehouse [14], or FLAME [53]. This can be achieved using direct optimization procedure in an analysis-by-synthesis framework [3, 6-8, 15, 30, 34, 47, 52, 65, 67, 78-80], but this needs to be applied on novel images every time, which is computationally expensive. Recent deep learning approaches offer fast and robust estimation of 3DMM parameters, using either supervised [16, 36, 46, 66, 82, 83, 94, 96, 97] or self-supervised training, for which different types of supervision have been proposed and used in combination, with the most important being the following: a) 2D landmarks supervision [20, 28, 55, 70, 72, 75-77, 90] is critical for coarse facial geometry and alignment, but is limited by the sparsity and potential inaccuracy of the predicted landmarks, particularly for complex expressions and poses. Methods that rely on dense landmarks [4, 88] overcome the sparsity problem but their accuracy is limited by the inherent ambiguity of dense correspondences across different faces. b) Photometric constraints [20, 28, 33, 72, 75-77, 90] are particularly effective for facial data, but are susceptible to alignment errors and depend on the quality of the rendered image. c) Perceptual losses have been proven beneficial in aligning the output with human perception [93]. Several methods make use of this by applying perceptual features losses of expert networks for identity recognition [20, 28, 32, 33, 72], emotion [18] or lip articulation [29, 37], but are hard to balance with other terms and can sometimes produce exaggerated results, particularly in terms of expressions. + +We explore an alternative approach, where an image-to-image translation model is coupled with a simple photometric error, encouraging more nuanced details to be explained by the geometry. + +Closer to our work are methods that simultaneously train a regressor network and an appearance model to improve the photometric error signal. Booth et al. [10, 11] employ a 3DMM for shape estimation coupled with a PCA appearance model learned from images in-the-wild. Grecer et al. [32] extend this idea by using a GAN to model the facial appearance more effectively. [58, 76, 77, 84, 85] learn nonlinear models of shape and expression while training a regressor in a self-supervised manner. Lin et al. [54] refine an initial 3DMM texture while training the regressor. Several other works learn neural appearance models for faces from large datasets [5, 32, 48-50, 57]. In this work, we do not learn a new appearance model, but directly use a generator for better geometry supervision, achieving significantly improved expression estimation. Also related to this work are approaches that train a conditional generative model that transforms a rendering of a mesh model into a realistic image, e.g. [21, 23, 24, 35, 45, 62]. While their focus is on controllable image generation, we investigate here how a generator of average capacity can improve supervision for the task of 3D face reconstruction. + +# 3. Method: Analysis-by-Neural-Synthesis + +SMIRK is inspired by recent self-supervised face reconstruction methods [18, 28, 29, 94] that combine an analysis-by-synthesis approach with deep learning. While the majority of these works produce renderings based on linear statistical models and Lambertian reflectance, SMIRK contributes with a novel neural rendering module that bridges the domain gap between the input and the synthesized output. By minimizing this discrepancy, SMIRK enables a stronger supervision signal within an analysis-by-synthesis framework. Notably, this means that neural-network based losses such as perceptual [42], identity [20, 28], or emotion [18] can be used to compare the reconstructed and input images without the typical domain-gap problem that is present in most works. + +# 3.1. Architecture + +Face Model: SMIRK employs FLAME [53] to model the 3D geometry of a face, which generates a mesh of $n_v = 5023$ vertices based on identity $\beta$ and expression $\psi_{expr}$ parameters, extended with two blendshapes $\psi_{eye}$ to account for eye closure [97], as well as jaw rotation $\theta_{jaw}$ parameters. Additionally, we consider the rigid pose $\theta_{pose}$ and the orthographic camera parameters $\mathbf{c}$ . For brevity, we refer to all expression parameters (i.e. $\psi_{expr}$ , $\psi_{eye}$ and $\theta_{jaw}$ ) as $\psi$ , and all global transformation parameters (i.e. $\mathbf{c}$ and $\theta_{pose}$ ) as $\theta$ . + +Encoder: The encoder $E(.)$ is a deep neural network that takes an image $I$ as input and regresses FLAME parameters. We separate $E$ into three different branches, each consisting of a MobilenetV3 [39] backbone: 1) $E_{\psi}$ , which predicts the expression parameters $\psi$ , 2) $E_{\beta}$ that predicts the shape parameters $\beta$ , and 3) $E_{\theta}$ that predicts the global transformation coefficients $\theta$ . Formally, + +$$ +\boldsymbol {\theta} = E _ {\boldsymbol {\theta}} (I), \quad \beta = E _ {\boldsymbol {\beta}} (I), \quad \psi = E _ {\psi} (I). \tag {1} +$$ + +Since the main focus of this work is on improving facial expression reconstruction, we assume at train time that $E_{\theta}$ and $E_{\beta}$ were pre-trained and remain frozen. Note that unlike previous methods [18, 28, 29], $E$ does not predict albedo parameters since the neural rendering module does not require such explicit information. + +Neural Renderer: The neural renderer is designed to replace traditional graphics-based rendering with an imaged-to-image convolutional network $T$ . The key idea here is to provide $T$ with an input image where the face is masked out and only a small number of randomly sampled pixels within the mask remain, along with the predicted facial geometry from the encoder $E$ . By limiting the available relevant information from the input image, $T$ is forced to rely on the predicted geometry from $E$ to accurately reconstruct it. + +Formally, let $S = R(\theta, \beta, \psi)$ denote the output of the differentiable rasterization step, where $S$ is the monochrome rendering of the reconstructed face mesh. The masking function $M(\cdot)$ is applied to the input image $I$ , masking out the face and retaining only a small amount of random pixels within the mask. $M(I)$ is then concatenated with $S$ , and the resulting tensor is passed through the neural renderer $T$ to produce a reconstruction of the original image $I' = T(S \oplus M(I))$ , where $\oplus$ denotes concatenation. A crucial property of this module is to assist the gradient flow towards the encoder. Therefore, we adopt a U-Net architecture [40, 68, 95] for $T$ , since the shortcuts will allow the gradient to flow uninterrupted towards $E$ (an ablation study on this can be found in the Suppl. Mat.). + +# 3.2. Optimization of the SMIRK Components + +SMIRK is supervised with two separate training passes: a reconstruction path and an augmented expression cycle path. We alternate between these passes on each training iteration, optimizing their respective losses. We describe each in the following subsections. + +# 3.2.1 Reconstruction Path + +In the reconstruction path (Fig. 2), the encoder $E$ regresses FLAME parameters from the input image $I$ and the resulting 3D face is rendered to obtain $S$ . Next, $I$ is masked out using the masking function $M(\cdot)$ , is concatenated with $S$ , + +![](images/ae57b5a2b0a98fb6d122a38935d91b4c8f7f943dda471eb9bd2d14d944b75f9b.jpg) +Figure 2. Reconstruction pass. An input image is passed to the encoder which regresses FLAME and camera parameters. A 3D shape is reconstructed, rendered with a differentiable rasterizer and finally translated into the output domain with the image translation network. Then, standard self-supervised landmark, photometric and perceptual losses are computed. + +![](images/68f7f94d2ee9d10a002d3ded380cbd5b1af7065263f18387c8b4cbd12c3a28b6.jpg) +Figure 3. Masking Process. An input image is masked to obscure the face (upper path), then we sample random pixels to be unmasked (lower path) + +and fed into $T$ to obtain a reconstruction of the input image $I'$ . + +Masking: To promote the reliance of $T$ on the 3D rendered face for reconstructing $I$ , we need to mask out the face in the input image $I$ . We do that by using the convex hull of detected 2D landmarks [13], dilated so that it fully covers the face. However, without any information of the face interior, training the translator becomes challenging since texture information, such as skin color, facial hair or even accessories (e.g., glasses) are "distractors" that complicate training. To address this we randomly sample and retain a small amount of pixels (1%) that are used as guidance for the image reconstruction. Note that sampling too many pixels makes the reconstruction overly guided and the 3D rendered face does not control the reconstruction output. We observed a similar behavior when we tried to randomly mask out blocks of the image, as in [38]. The masking process is depicted in Fig. 3. + +Loss functions: The reconstruction path is supervised with the following loss functions: + +Photometric loss. This is the L1 error between the input and the output images: $\mathcal{L}_{photo} = \| I' - I\| _1$ + +VGG loss. The VGG loss [42] has a similar effect to the photometric one, but helps to converge faster in the initial phases of training: $\mathcal{L}_{vgg} = \| \Gamma(I') - \Gamma(I)\|_1$ , where $\Gamma(.)$ represents the VGG perceptual encoder. + +Landmark loss. The landmark loss, denoted as $L_{lmk} =$ + +![](images/dbbdb408efd0df96af2959913adf0fc4a9b71d8abe0fc12ce120a70981504ed3.jpg) +Figure 4. Augmented cycle pass. The FLAME expression parameters of an existing reconstruction are modified. The resulting modified face is then rendered using our neural renderer. The rendering is then passed to the face reconstruction encoder to regress the FLAME parameters and a consistency loss between the modified input and reconstructed FLAME parameters is computed. + +$\sum_{i=1}^{K}\left\|\mathbf{k}-\mathbf{k}'\right\|_{2}^{2}$ , measures the $L_{2}$ norm between the ground-truth 2D facial landmarks detected in the input image $(\mathbf{k})$ and the 2D landmarks projected from the predicted 3D mesh $(\mathbf{k}')$ , summed over $K$ landmarks. + +Expression Regularization. We employ an $L_{2}$ regularization over the expression parameters $L_{reg} = \|\psi\|_2^2$ , penalizing extreme, unrealistic expressions. + +Emotion Loss. Finally, to obtain reconstructions that faithfully capture the emotional content, we employ an emotion loss $\mathcal{L}_{emo}$ based on features extracted from a pretrained emotion recognition network $P_{e}$ , as in EMOCA [18]: $\mathcal{L}_{emo} = \| P_e(I') - P_e(I)\| _2^2$ . To prevent the image translator from adversarially optimizing the emotion loss by perturbing a few pixels, for this loss we keep the image translator $T$ "frozen", optimizing only the expression encoder $E_{\psi}$ . Note that unlike EMOCA, our framework ensures that the emotion loss does not suffer from domain gap problems, as the compared images reside in the same space. + +# 3.2.2 Augmented Expression Cycle Path + +While the reconstruction path improves 3D reconstruction thanks to the better supervision signal provided by the neural module, it is still affected by a lack of expression diversity in the training datasets - a problem shared by all previous methods. This means for example that if a more complex lip structure, scarcely seen in the training data, cannot be reproduced fast enough by the encoder, the translator $T$ could learn to correlate miss-aligned lip 3D structures and images and thus multiple similar, but distinct, facial expressions will be collapsed to a single reconstructed representation. Further, this may lead to the translator compensating for the encoder's failures during the joint optimization. + +These issues are addressed with the augmented expression cycle consistency path. In this path, we start from the predicted set $\beta, \psi, \theta$ , and replace the original predicted expression $\psi$ with a new one $\psi_{aug}$ . We then use the translator $T$ to generate a photorealistic image $I_{aug}^{\prime}$ which adheres to + +it. This process effectively synthesizes an augmented training pair of $\psi_{aug}$ and the corresponding output image $I_{aug}^{\prime}$ . Then, the image is fed into $E$ which should perfectly recover $\psi_{aug}$ . A cycle consistency loss can now be directly applied in the expression parameter space of the 3D model, enforcing the predicted expression to be as close as possible to the initial one. This concept is illustrated in Fig. 4. + +The benefit of this cycle path is two-fold: 1) it reduces over-compensation errors via the consistency loss and 2) it promotes diverse expressions. The latter further helps consistency by avoiding the collapse of neighboring expressions into a single parameter representation. Concerning the consistency property, we can distinguish two overcompensating factors. First, during the joint optimization of the encoder and the translator, the latter can compensate when the encoder provides erroneous predictions, leading to an overall sub-par reconstruction. Second, if we discard the consistency loss, the expression will try to over-compensate erroneous shape/pose, since we assume the shape/pose parameters are predicted from an already trained system and they are not optimized in our framework. As an example, if the shape parameters do not fully capture an elongated nose, which is an identity characteristic of the person, the expression parameters may compensate this error. Such behavior is problematic because it entangles expression, shape and pose and adds undesired biases during training. + +Pixel Transfer: The masking process retains a small amount of pixels within the face area. However, when a new expression is introduced, the previously selected pixels need to be updated and transferred such that they correspond with the vertices of the new expression. This operation is referred to as pixel transfer, where we sample pixels from the initial image according to a selected set of vertices, we then find the new position of the same vertices for the updated expression, and we assign their position as the new pixel, with the initial pixel value. This avoids inconsistencies between the underlying structure of the pixels (initial expression) and the new expression, which would hinder realistic reconstructions in the cycle path. + +Promoting Diverse Expressions: Ideally, in this path we also want to promote high variations in the expression parameter space, generating shapes (and their corresponding images) with complex, rare and asymmetric expressions that are still plausible. To effectively augment the cycle path with interesting variations we consider the following augmentations: + +- Permutation: permute the expressions in a batch. +- Perturbation: add non-trivial noise to the reconstructed expression parameters. +- Template Injection: use expression templates of extreme expressions. To obtain such parameters for FLAME we perform direct iterative parameter fitting on the FaMoS [9] dataset which depicts multiple subjects per + +![](images/796c7fd999bd73be834afc7b28dd0712b4c9c2b30db24e8ece381c66515d6acf.jpg) +Figure 5. Neural expression augmentation. Our neural renderer enables us to modify the expression, generating a new image-3D training pair. We can edit the expression with random noise, permutation from other reconstructions, template injection, or zeroing. + +form extreme and asymmetric expressions. + +- Zero Expression: neutral expressions help avoid biasing the system towards complex cases. + +For all expression augmentations, we simultaneously simulate jaw and eyelid openings/closings, with more aggressive augmentations in the zero-expression case to avoid incompatible blending with intense expressions. Fig. 5 presents visual examples of all augmentations and the corresponding generated images from $T$ , showcasing its ability to generate realistic images with notable expression manipulation. + +# Loss functions: + +Expression Consistency. The expression consistency loss, or cycle loss for brevity, is the mean-squared error between the given augmented expression parameters $\psi_{aug}$ and the predicted expressions at the end of the cycle path: + +$$ +\mathcal {L} _ {e x p} = \left\| E _ {\psi} \left(T \left(R (\boldsymbol {\theta}, \boldsymbol {\beta}, \psi_ {a u g}) \oplus M (I)\right)\right) - \psi_ {a u g} \right\| _ {2} ^ {2} \tag {2} +$$ + +The pose/cam and shape parameters are kept as predicted by the initial image, namely $\theta = E_{\theta}(I)$ and $\beta = E_{\beta}(I)$ . The internal $E_{\psi}(I)$ operation, inside the renderer $R(\cdot)$ , does not allow gradients to flow through and is used as an off-the-self frozen module. + +Identity Consistency. To aid the translator in faithfully reconstructing the identity of the person, we introduce an additional consistency loss similar to Eq. 2, applied to the shape parameters $\beta$ . Note that since the shape encoder $E_{\beta}$ is frozen, the consistency loss only affects the optimization of the translator $T$ . + +Alternating Optimization: Overall, we alternate between the two passes, aiming to further reduce the effect of the translator compensating for the encoder. In more detail, during the augmented cycle pass, we freeze alternatively the encoder and the translator. Thus, this pass avoids the joint optimization of the two networks in a single step, acting as a regularizer to the other pass and enforcing consistency. + +# 4. Results + +We now present objective and subjective evaluations of our method, along with comparisons with recent state of the art. Additional experimental evaluations and visualizations can be found in our Suppl. Mat. and demo video. + +# 4.1. Experimental Setup + +Training Datasets: We use the following datasets for training: FFHQ [44], CelebA [56], LRS3 [1], and MEAD [86]. LRS3 and MEAD are video datasets, and we randomly sample images from each video during training. + +SOTA Methods: We compare with the following recent state-of-the-art methods that have publicly available implementations: DECA [28] and EMOCA v2 [18, 29], which use the FLAME [53] model, and Deep3DFace [20] and FOCUS [51], which use the BFM [64] model. + +Pretraining: Before the core training stage, all three encoders are pretrained, supervised by two losses - the landmark loss of the reconstruction for pose and expression and the shape predictions of MICA [97]. After that, $E_{\beta}$ and $E_{\theta}$ remain frozen. + +# 4.2. Quantitative Evaluations + +It has been consistently reported [2, 18, 29, 31, 60] that evaluating facial expression reconstruction in terms of geometric metrics is ill-posed. The geometric errors tend to be dominated by the identity face shape and do not correlate well with human perception of facial expressions. Accordingly, we compare our method in a quantitative manner with three experiments: 1) emotion recognition accuracy [18], 2) ability of a model to guide a UNet to faithfully reconstruct an input image, and 3) a perceptual user study. + +Emotion Recognition: Following the protocol of [18], we train an MLP to classify eight basic expressions and regress valence and arousal values using AffectNet [59]. We report Concordance Correlation Coefficient (CCC), root mean square error (RMSE), for both valence (V-) and arousal (A-), and expression classification accuracy (E-ACC). Results are found in Tab. 1. As it can be seen, SMIRK achieves a higher emotion recognition score compared to most other methods, although falling behind EMOCAv1/2 and Deep3DFace. It is worth noting that, although EMOCA v1 achieves the highest emotion accuracy, it often overexaggerates expressions which helps with emotion recognition. EMOCA v2, arguably a more accurate reconstruction model, performs slightly worse. Our main model is comparable with Deep3DFace and outperforms DECA and FOCUS. We can also train a model that scores better on emotion recognition, by increasing the emotion loss weight. However, similarly to what was reported by Daněček et al. [18], this leads to undesirable artifacts. We discuss the trade-off between higher emotion recognition scores and reconstruction accuracy in more detail in Sup.Mat. Notably, + +
ModelV-CCC ↑V-RMSE ↓A-CCC ↑A-RMSE ↓E-ACC ↑
MGCNet0.690.350.580.340.60
3DDFA-v20.620.390.500.340.52
Deep3DFace0.730.330.650.310.65
DECA0.690.360.580.330.59
FOCUS-CelebA0.690.350.540.330.58
EMOCA v10.770.310.680.300.68
EMOCA v20.760.330.660.300.66
SMIRK0.720.350.610.310.64
SMIRK w/o emo0.710.350.600.320.62
+ +even without the emotion loss, the proposed model achieves a decent emotion recognition score, indicating that our reconstruction scheme can adequately capture emotions without the need for explicit perceptual supervision. + +Reconstruction Loss: In order to evaluate the faithfulness of a 3D face reconstruction technique, we have devised a protocol based on our analysis-by-neural-synthesis method. Under this protocol, we train a UNet image-to-image translator, but freeze the weights of the encoder so that only the translator is trained. The motivation is simple: if the 3D mesh is accurate enough, the reconstruction will be more faithful, due to a one-to-one appearance correspondence. For each method (including ours for fairness), we train a UNet for 5 epochs, using the masked image and the rendered 3D geometry as input. Finally, we report the $L_{1}$ reconstruction loss and the VGG loss between the reconstructed image and the input image on the test set of AffectNet [59] which features subjects under multiple expressions. The results can be seen in Table 2. We observe here that using the information for the rendered shape geometry of SMIRK, the trained UNet achieves a more faithful reconstruction of the input image when compared to DECA and EMOCAv2. Particularly for EMOCAv2, we observe that although it can capture expressions, the results in many cases do not faithfully represent the input image, leading to an overall worse image reconstruction error. In terms of $L_{1}$ loss, SMIRK is on par with Deep3DFace and FOCUS and has a small improvement in terms of VGG loss. + +Table 1. Emotion recognition performance on the AffectNet test set [59]. We follow the same metrics as in [18]. + +
DECAEMOCAv2FOCUSDeep3DFaceSMIRK
L1 Loss ↓0.100.110.090.090.09
VGG Loss ↓0.800.840.780.780.76
+ +Table 2. Image reconstruction performance on the AffectNet test set [59]. SMIRK achieves better reconstruction and perceptual scores compared to other methods. + +User Study: Arguably, the perception of the reconstructed facial expressions is the most important aspect in 3D face reconstruction, as it directly influences how well the reconstructed model captures the emotions and nuances of the original face. Considering this, we also designed a + +![](images/32cbe24dde0545ecccec380f483f1a5ecdd9eb46aa1a9f16fcf5d1f4837995ad.jpg) +Figure 6. Visual comparison of 3D face reconstruction. From left to right: Input, Deep3DFaceRecon[20], FOCUS[51], DECA[28], EMOCAv2[18], and SMIRK. Many more examples can also be found in the Suppl. Mat. and the demo video in our webpage. + +user study to assess the perception of the reconstructed facial expressions from human participants. We randomly selected 80 images from the AffectNet [59] test set (using the split from [81]) and 80 images from our MEAD test set (unseen subjects) and performed 3D face reconstruction with both SMIRK and its competitors. To mitigate bias w.r.t. the identity component for the FLAME-based methods, for DECA and EMOCAv2 we used the same identity parameters as our method (which itself was distilled from MICA). In the user study, participants were shown an image of a human face alongside two 3D face reconstructions, either from our method or the others, and were asked to choose the one with the most faithful facial expression representation. The order was randomized for each question, and each user answered a total of 32 questions, equally distributed among the different methods. + +A total of 85 users completed the study, and the results in Table 3 show that our method was significantly preferred over all competitors, confirming the performance of SMIRK in terms of faithful expressive 3D reconstruction. The results were statistically significant (for all pairs, $p < 0.01$ with binomial test, adjusted using the Bonfer + +roni method). EMOCAv2, which also uses an emotion loss for expressive 3D reconstruction, was the closest competitor to our method, followed by FOCUS and Deep3D, while DECA was the least selected. + +
DECAEMOCAv2Deep3DFOCUS
SMIRK603/77461/219510/170534/146
+ +Table 3. User study results: "a/b" indicates Ours (left) was preferred $a$ times,while the competing method was chosen $b$ times. SMIRK is overwhelmingly preferred over all other methods. + +# 4.3. Visual Examples + +In Fig. 6 we present multiple visual comparisons with the four other methods. As it can be visually assessed, our method can more accurately capture the facial expressions across multiple diverse subjects and conditions. Furthermore, the presented methodology can also capture expressions that other methods fail to capture, such as nonsymmetric mouth movements, eye closures, and exaggerated expressions. + +# 4.4. Ablation Studies + +Ablation on the effect of landmarks: We first assess the effect of the landmark loss. To do that, we calculate for different versions of our model the L1 loss, VGG Loss, and Cycle loss after manipulation of expressions using the same protocol we performed in Sec. 4.2. Note that this time, we also evaluate performance by considering the cycle loss. That is, we also manipulate the predicted expressions, regenerate a new image, and expect that the method can successfully predict the same parameters. We consider three different versions of our model: 1) Protocol 1 - no landmarks loss, 2) Protocol 2 - training some epochs with landmarks loss and then removing it, 3) Protocol 3 - full training with landmarks loss. We present these results in Table 4. + +As we can see, completely omitting landmarks leads to degraded results. However, if we first train for a few epochs with landmarks and then set the loss weight to 0, the model achieves very similar performance with the original model which uses the loss throughout the full training. These results suggest that, in contrast with previous works [18, 28], the landmarks loss in SMIRK acts more as a regularizer during training, helping to guide the model towards good solutions, but in the later stages it may somewhat constrain its flexibility. We plan to explore this balance in more depth in future work. + +
L1 Loss ↓VGG Loss ↓Cycle Loss ↓
P10.1110.7570.588
P20.0930.7130.487
P30.0930.7140.544
+ +Table 4. Ablation study on the effect of landmark loss. P1: no landmark loss, P2: landmark loss removed after a few epochs, P3: landmark loss throughout whole training. + +Impact of Cycle Path: Here we also present examples on how the cycle path affects the reconstruction performance. First, we show an example result in Fig. 7, where we see that using the proposed augmentations provides more detailed expressions. For example, template injection augmentation considerably helps the reconstruction of the mouth structure. Secondly, we have also observed that the cycle path makes the model more robust, especially w.r.t. mouth closures (e.g. zero jaw opening). We show such indicative cases in Figure 8. Such artifacts can be seen when using the no-cycle variant, acting as a visual confirmation of the aforementioned numerical results. Here, the mouth is not properly closed in the 3D reconstructed face, since it was miss-corresponded to a properly closed mouth in the image reconstruction space. The cycle path can solve such instances by providing tweaked expressions that are enforced to be recognized correctly, avoiding "misalignments" between expected expressions and reconstructed images. + +![](images/50a9bbe87b32c8191eea95da6743dfa4f2869306879395f9805a487db95f85c1.jpg) +Figure 7. Impact of cycle augmentations. From left to right: input image, no cycle loss, cycle loss with all augmentations. + +![](images/6e72d6a2ac6dc79aaf57a9f8e8d0b44d5c966bd50a40055a45bae7dd16fa43ba.jpg) +Figure 8. Impact of the Cycle Path. Artifacts can appear when not training with the cycle path. From left to right: input image, 3D reconstruction and image reconstruction without cycle path, 3D reconstruction and image reconstruction with cycle path. + +# 4.5. Limitations + +Despite the effectiveness of SMIRK, there are limitations to be addressed. It is sensitive to occlusions, as the training datasets do not include them, and assumes more intense expressions when parts are missing instead of extrapolating from available information. In addition, SMIRK has been trained on single images, and the temporal aspect is not yet explored. Also note that while SMIRK does not need to predict albedo and lighting, this can be limiting for specific applications in 3D facial animation and video editing. Please refer to the Suppl. Mat. for a more detailed discussion. + +# 5. Conclusion + +We have presented SMIRK, a new paradigm for accurate expressive 3D face reconstruction from images. Instead of the traditional graphics-based approach for self-supervision which is commonly used for monocular 3D face reconstruction in-the-wild, SMIRK employs a neural image-to-image translator model, which learns to reconstruct the input face image given the rendered predicted facial geometry. Our extensive experimental results show that SMIRK outperforms previous methods and can faithfully reconstruct expressive 3D faces, including challenging complex expressions such as asymmetries, and subtle expressions such as smirking. + +# Acknowledgments + +This research work was supported by the project "Applied Research for Autonomous Robotic Systems" (MIS 5200632) which is implemented within the framework of the National Recovery and Resilience Plan "Greece 2.0" (Measure: 16618- Basic and Applied Research) and is funded by the European Union- NextGenerationEU. + +# References + +[1] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. Lrs3-ted: a large-scale dataset for visual speech recognition. arXiv preprint arXiv:1809.00496, 2018. 6 +[2] Zakaria Aldeneh, Masha Fedzechkina, Skyler Seto, Katherine Metcalf, Miguel Sarabia, Nicholas Apostoloff, and Barry-John Theobald. Towards a Perceptual Model for Estimating the Quality of Visual Speech, 2022. arXiv:2203.10117 [cs, eess]. 6 +[3] Oswald Aldrian and William AP Smith. Inverse rendering of faces with a 3d morphable model. IEEE transactions on pattern analysis and machine intelligence, 35(5):1080-1093, 2012. 1, 2 +[4] Riza Alp Guler, George Trigeorgis, Epameinondas Antonakos, Patrick Snape, Stefanos Zafeiriou, and Iasonas Kokkinos. Densereg: Fully convolutional dense shape regression in-the-wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6799-6808, 2017. 2 +[5] Haoran Bai, Di Kang, Haoxian Zhang, Jinshan Pan, and Linchao Bao. FFHQ-UV: Normalized facial uv-texture dataset for 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 362–371, 2023. 3 +[6] Anil Bas, William A. P. Smith, Timo Bolkart, and Stefanie Wuhrer. Fitting a 3D morphable model to edges: A comparison between hard and soft correspondences. In Asian Conference on Computer Vision Workshops, pages 377-391, 2017. 1, 2 +[7] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3D faces. In Proceedings of the 26th Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 1999. 2 +[8] Volker Blanz, Sami Romdhani, and Thomas Vetter. Face identification across different poses and illuminations with a 3D morphable model. In International Conference on Automatic Face & Gesture Recognition (FG), pages 202-207, 2002. 1, 2 +[9] Timo Bolkart, Tianye Li, and Michael J Black. Instant multiview head capture through learnable registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 768-779, 2023. 5 +[10] James Booth, Epameinondas Antonakos, Stylianos Ploumpis, George Trigeorgis, Yannis Panagakis, and Stefanos Zafeiriou. 3d face morphable models" in-the-wild". In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 48-57, 2017. 3 +[11] James Booth, Anastasios Roussos, Evangelos Ververas, Epameinondas Antonakos, Stylianos Ploumpis, Yannis Panagakis, and Stefanos Zafeiriou. 3D reconstruction of "inthe-wild" faces in images and videos. IEEE transactions on pattern analysis and machine intelligence, 40(11):2638-2652, 2018. 3 +[12] Alan Brunton, Augusto Salazar, Timo Bolkart, and Stefanie Wuhrer. Review of statistical shape spaces for 3D data with comparative analysis for human faces. Computer Vision and Image Understanding (CVIU), 128:1-17, 2014. 1 + +[13] Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In Proceedings of the IEEE International Conference on Computer Vision, pages 1021-1030, 2017. 4 +[14] Chen Cao, Yanlin Weng, Shun Zhou, Yiying Tong, and Kun Zhou. Facewarehouse: A 3d facial expression database for visual computing. IEEE Transactions on Visualization and Computer Graphics, 20(3):413-425, 2013. 2 +[15] Chen Cao, Qiming Hou, and Kun Zhou. Displaced dynamic expression regression for real-time facial tracking and animation. Transactions on Graphics (TOG), 33(4):1-10, 2014. 2 +[16] Feng-Ju Chang, Anh Tuan Tran, Tal Hassner, Iacopo Masi, Ram Nevatia, and Gerard Medioni. ExpNet: Landmark-free, deep, 3D facial expressions. In International Conference on Automatic Face & Gesture Recognition (FG), pages 122-129, 2018. 1, 2 +[17] Aggelina Chatziagapi, ShahRukh Athar, Francesc Moreno-Noguer, and Dimitris Samaras. Sider: Single-image neural optimization for facial geometric detail recovery. In 2021 International Conference on 3D Vision (3DV), pages 815-824. IEEE, 2021. 2 +[18] Radek Daneček, Michael J Black, and Timo Bolkart. EMOCA: Emotion driven monocular face capture and animation. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 20311-20322, 2022. 1, 2, 3, 4, 6, 7, 8 +[19] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multilevel face localisation in the wild. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5203-5212, 2020. 2 +[20] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3D face reconstruction with weakly-supervised learning: From single image to image set. In Conference on Computer Vision and Pattern Recognition Workshops (CVPR-W), pages 285-295, 2019. 1, 2, 3, 6, 7 +[21] Zheng Ding, Xuaner Zhang, Zhihao Xia, Lars Jebe, Zhuowen Tu, and Xiuming Zhang. Diffusionrig: Learning personalized priors for facial appearance editing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12736-12746, 2023. 3 +[22] Pengfei Dou, Shishir K. Shah, and Ioannis A. Kakadiaris. End-to-end 3D face reconstruction with deep neural networks. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 5908-5917, 2017. 2 +[23] Michail Christos Doukas, Mohammad Rami Koujan, Viktoria Sharmanska, Anastasios Roussos, and Stefanos Zafeiriou. Head2head++: Deep facial attributes re-targeting. IEEE Transactions on Biometrics, Behavior, and Identity Science, 3(1):31-43, 2021. 3 +[24] Michail Christos Doukas, Stefanos Zafeiriou, and Viktoriia Sharmanska. Headgan: One-shot neural head synthesis and editing. In Proceedings of the IEEE/CVF International conference on Computer Vision, pages 14398-14407, 2021. 3 +[25] Bernhard Egger. Semantic Morphable Models. PhD thesis, University of Basel, 2018. 2 + +[26] Bernhard Egger, William A. P. Smith, Ayush Tewari, Stefanie Wuhrer, Michael Zollhoefer, Thabo Beeler, Florian Bernard, Timo Bolkart, Adam Kortylewski, Sami Romdhani, Christian Theobalt, Volker Blanz, and Thomas Vetter. 3D morphable face models—past, present, and future. Transactions on Graphics (TOG), 39(5), 2020. 1, 2 +[27] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In European Conference on Computer Vision (ECCV), 2018. 2 +[28] Yao Feng, Haiwen Feng, Michael J Black, and Timo Bolkart. Learning an animatable detailed 3D face model from in-the-wild images. Transactions on Graphics, (Proc. SIGGRAPH), 40(4):1-13, 2021. 1, 2, 3, 6, 7, 8 +[29] Panagiotis P. Filntisis, George Retsinas, Foivos Paraperas-Papantoniou, Athanasios Katsamanis, Anastasios Roussos, and Petros Maragos. SPECTRE: Visual speech-informed perceptual 3D facial expression reconstruction from videos. In Conference on Computer Vision and Pattern Recognition Workshops (CVPR-W), pages 5745-5755, 2023. 1, 2, 3, 6 +[30] Pablo Garrido, Michael Zollhöfer, Dan Casas, Levi Valgaerts, Kiran Varanasi, Patrick Pérez, and Christian Theobalt. Reconstruction of personalized 3d face rigs from monocular video. ACM Transactions on Graphics (TOG), 35 (3):1-15, 2016. 2 +[31] Pablo Garrido, Michael Zollhöfer, Chenglei Wu, Derek Bradley, Patrick Pérez, Thabo Beeler, and Christian Theobalt. Corrective 3d reconstruction of lips from monocular video. ACM Trans. Graph., 35(6):219-1, 2016. 6 +[32] Baris Gecer, Stylianos Ploumpis, Irene Kotsia, and Stefanos Zafeiriou. GANFIT: Generative adversarial network fitting for high fidelity 3D face reconstruction. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1155-1164, 2019. 2, 3 +[33] Kyle Genova, Forrester Cole, Aaron Maschinot, Aaron Sarna, Daniel Vlasic, and William T. Freeman. Unsupervised training for 3D morphable model regression. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 8377-8386, 2018. 1, 2 +[34] Thomas Gerg, Andreas Morel-Forster, Clemens Blumer, Bernhard Egger, Marcel Luthi, Sandro Schoenborn, and Thomas Vetter. Morphable face models - an open framework. In International Conference on Automatic Face & Gesture Recognition (FG), pages 75–82, 2018. 1, 2 +[35] Partha Ghosh, Pravir Singh Gupta, Roy Uziel, Anurag Ranjan, Michael J Black, and Timo Bolkart. GIF: Generative interpretable faces. In 2020 International Conference on 3D Vision (3DV), pages 868-878. IEEE, 2020. 3 +[36] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2 +[37] Shan He, Haonan He, Shuo Yang, Xiaoyan Wu, Pengcheng Xia, Bing Yin, Cong Liu, Lirong Dai, and Chang Xu. Speech4mesh: Speech-assisted monocular 3d facial reconstruction for speech-driven 3d facial animation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14192-14202, 2023. 2 + +[38] Xingzhe He, Bastian Wandt, and Helge Rhodin. Autolink: Self-supervised learning of human skeletons and object outlines by linking keypoints. Advances in Neural Information Processing Systems, 35:36123-36141, 2022. 4 +[39] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, et al. Searching for mobilenetv3. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1314-1324, 2019. 3 +[40] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A. Efros. Image-to-image translation with conditional adversarial networks. CoRR, abs/1611.07004, 2016. 3 +[41] Aaron S Jackson, Adrian Bulat, Vasileios Argyriou, and Georgios Tzimiropoulos. Large pose 3D face reconstruction from a single image via direct volumetric CNN regression. In International Conference on Computer Vision (ICCV), pages 1031-1039, 2017. 2 +[42] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 694-711. Springer, 2016. 3, 4 +[43] Harim Jung, Myeong-Seok Oh, and Seong-Whan Lee. Learning free-form deformation for 3D face reconstruction from in-the-wild images. In International Conference on Systems, Man, and Cybernetics (SMC), pages 2737–2742, 2021. 2 +[44] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 6 +[45] Hyeongwoo Kim, Pablo Garrido, Ayush Tewari, Weipeng Xu, Justus Thies, Matthias Nießner, Patrick Pérez, Christian Richardt, Michael Zolloffer, and Christian Theobalt. Deep video portraits. ACM Transactions on Graphics (TOG), 37 (4):163, 2018. 3 +[46] Hyeongwoo Kim, Michael Zollhöfer, Ayush Tewari, Justus Thies, Christian Richardt, and Christian Theobalt. Inverse-FaceNet: deep monocular inverse face rendering. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 4625-4634, 2018. 1, 2 +[47] Tatsuro Koizumi and William A. P. Smith. "look ma, no landmarks!" - unsupervised, model-based dense face alignment. In European Conference on Computer Vision (ECCV), pages 690-706, 2020. 2 +[48] Alexandros Lattas, Stylianos Moschoglou, Baris Gecer, Stylianos Ploumpis, Vasileios Triantafyllou, Abhijeet Ghosh, and Stefanos Zafeiriou. AvatarMe: Realistically renderable 3d facial reconstruction" in-the-wild". In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 760-769, 2020. 3 +[49] Alexandros Lattas, Stylianos Moschoglou, Stylianos Ploumpis, Baris Gecer, Jiankang Deng, and Stefanos Zafeiriou. Fitme: Deep photorealistic 3d morphable model avatars. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8629-8640, 2023. + +[50] Gun-Hee Lee and Seong-Whan Lee. Uncertainty-aware mesh decoder for high fidelity 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6100–6109, 2020. 3 +[51] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. To fit or not to fit: Model-based face reconstruction and occlusion segmentation from weak supervision. CoRR, abs/2106.09614, 2021. 6, 7 +[52] Hao Li, Jihun Yu, Yuting Ye, and Chris Bregler. Realtime facial animation with on-the-fly correctives. Transactions on Graphics (TOG), 32(4):42-1, 2013. 2 +[53] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 3, 6 +[54] Jiangke Lin, Yi Yuan, Tianjia Shao, and Kun Zhou. Towards high-fidelity 3d face reconstruction from in-the-wild images using graph convolutional networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5891-5900, 2020. 3 +[55] Yaojie Liu, Amin Jourabloo, William Ren, and Xiaoming Liu. Dense face alignment. In International Conference on Computer Vision Workshops (ICCV-W), pages 1619-1628, 2017. 2 +[56] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 6 +[57] Huiwen Luo, Koki Nagano, Han-Wei Kung, Qingguo Xu, Zejian Wang, Lingyu Wei, Liwen Hu, and Hao Li. Normalized avatar synthesis using stylegan and perceptual refinement. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11662–11672, 2021. 3 +[58] B.R. Mallikarjun, Ayush Tewari, Hans-Peter Seidel, Mohamed Elgharib, Christian Theobalt, et al. Learning complete 3d morphable face models from images and videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3361-3371, 2021. 3 +[59] Ali Mollahosseini, Behzad Hasani, and Mohammad H Ma-hoor. Affectnet: A database for facial expression, valence, and arousal computing in the wild. IEEE Transactions on Affective Computing, 10(1):18-31, 2017. 6, 7 +[60] Masahiro Mori, Karl F MacDorman, and Norri Kageki. The uncanny valley [from the field]. IEEE Robotics & Automation magazine, 19(2):98-100, 2012. 6 +[61] Christopher Otto, Prashanth Chandran, Gaspard Zoss, Markus H. Gross, Paulo F. U. Gotardo, and Derek Bradley. A perceptual shape loss for monocular 3D face reconstruction. Computer Graphics Forum (Proc. Pacific Graphics), 2023. 2 +[62] Foivos Paraperas Papantoniou, Panagiotis P Filntisis, Petros Maragos, and Anastasios Roussos. Neural emotion director: Speech-preserving semantic control of facial expressions in "in-the-wild" videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18781-18790, 2022. 3 +[63] Jeong Joon Park, Peter Florence, Julian Straub, Richard A. Newcombe, and Steven Lovegrove. DeepSDF: Learning + +continuous signed distance functions for shape representation. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 165-174, 2019. 2 +[64] Pascal Paysan, Reinhard Knothe, Brian Amberg, Sami Romdhani, and Thomas Vetter. A 3d face model for pose and illumination invariant face recognition. In 2009 sixth IEEE international conference on advanced video and signal based surveillance, pages 296-301. IEEE, 2009. 2, 6 +[65] Stylianos Ploumpis, Evangelos Ververas, Eimear O' Sullivan, Stylianos Moschoglou, Haoyang Wang, Nick E. Pears, William A. P. Smith, Baris Gecer, and Stefanos Zafeiriou. Towards a complete 3D morphable model of the human head. Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 43(11):4142-4160, 2021. 1, 2 +[66] E. Richardson, M. Sela, and R. Kimmel. 3D face reconstruction by learning from synthetic data. In International Conference on 3D Vision (3DV), pages 460-469, 2016. 1, 2 +[67] Sami Romdhani and Thomas Vetter. Estimating 3D shape and texture using pixel intensity, edges, specular highlights, texture constraints and aprior. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 986-993, 2005. 1, 2 +[68] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Medical Image Computing and Computer-Assisted Intervention - MICCAI 2015 - 18th International Conference Munich, Germany, October 5 - 9, 2015, Proceedings, Part III, pages 234-241. Springer, 2015. 2, 3 +[69] Zeyu Ruan, Changqing Zou, Longhai Wu, Gangshan Wu, and Limin Wang. SADRNet: Self-aligned dual face regression networks for robust 3d dense face alignment and reconstruction. IEEE Transactions on Image Processing, 30: 5793-5806, 2021. 2 +[70] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael Black. Learning to regress 3D face shape and expression from an image without 3d supervision. In Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2 +[71] Matan Sela, Elad Richardson, and Ron Kimmel. Unrestricted facial geometry reconstruction using image-to-image translation. In International Conference on Computer Vision (ICCV), pages 1576-1585, 2017. 2 +[72] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Ming-min Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3D face reconstruction by occlusion-aware multiview geometry consistency. In European Conference on Computer Vision (ECCV), pages 53-70. Springer, 2020. 2 +[73] William AP Smith. The perspective face shape ambiguity. In Perspectives in Shape Analysis, pages 299-319. Springer, 2016. 2 +[74] Attila Szabó, Givi Meishvili, and Paolo Favaro. Unsupervised generative 3D shape learning from natural images. CoRR, abs/1910.00287, 2019. 2 +[75] Ayush Tewari, Michael Zolloffer, Hyeongwoo Kim, Pablo Garrido, Florian Bernard, Patrick Perez, and Christian Theobalt. MoFA: Model-based deep convolutional face autoencoder for unsupervised monocular reconstruction. In In + +ternational Conference on Computer Vision (ICCV), pages 1274-1283, 2017. 1, 2 +[76] Ayush Tewari, Michael Zollhöfer, Pablo Garrido, Florian Bernard, Hyeongwoo Kim, Patrick Pérez, and Christian Theobalt. Self-supervised multi-level face model learning for monocular reconstruction at over $250\mathrm{~hz}$ . In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2549-2559, 2018. 3 +[77] Ayush Tewari, Florian Bernard, Pablo Garrido, Gaurav Bharaj, Mohamed Elgharib, Hans-Peter Seidel, Patrick Pérez, Michael Zollhöfer, and Christian Theobalt. FML: face model learning from videos. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 10812-10822, 2019. 2, 3 +[78] Justus Thies, Michael Zollhöfer, Matthias Nießner, Levi Valgaerts, Marc Stamminger, and Christian Theobalt. Real-time expression transfer for facial reenactment. ACM Trans. Graph., 34(6), 2015. 2 +[79] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Facevr: Real-time facial reenactment and eye gaze control in virtual reality. arXiv preprint arXiv:1610.03151, 2016. +[80] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2Face: Real-time face capture and reenactment of RGB videos. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 2387-2395, 2016. 1, 2 +[81] Antoine Toisoul, Jean Kossaifi, Adrian Bulat, Georgios Tzimiropoulos, and Maja Pantic. Estimation of continuous valence and arousal levels from faces in naturalistic conditions. Nature Machine Intelligence, 3(1):42-50, 2021. 7 +[82] Anh Tuan Tran, Tal Hassner, Iacopo Masi, and Gerard Medioni. Regressing robust and discriminative 3D morphable models with a very deep neural network. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1599-1608, 2017. 1, 2 +[83] Anh Tuan Tran, Tal Hassner, Iacopo Masi, Eran Paz, Yuval Nirkin, and Gérard Medioni. Extreme 3d face reconstruction: Seeing through occlusions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3935-3944, 2018. 2 +[84] Luan Tran and Xiaoming Liu. Nonlinear 3d face morphable model. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7346-7355, 2018. 3 +[85] Luan Tran, Feng Liu, and Xiaoming Liu. Towards high-fidelity nonlinear 3d face morphable model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1126-1135, 2019. 3 +[86] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 6 +[87] Huawei Wei, Shuang Liang, and Yichen Wei. 3D dense face alignment via graph convolution networks. arXiv preprint arXiv:1904.05562, 2019. 2 +[88] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Matthew Johnson, Jingjing Shen, Nikola Milosavljevic, Daniel Wilde, + +Stephan J. Garbin, Toby Sharp, Ivan Stojiljkovic, Tom Cashman, and Julien P. C. Valentin. 3D face reconstruction with dense landmarks. In European Conference on Computer Vision (ECCV), pages 160-177. Springer, 2022. 2 +[89] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3D objects from images in the wild. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1-10, 2020. 2 +[90] Haotian Yang, Hao Zhu, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, and Xun Cao. Facescape: a large-scale high quality 3d face dataset and detailed riggable 3d face prediction. In Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[91] Tarun Yenamandra, Ayush Tewari, Florian Bernard, Hans-Peter Seidel, Mohamed Elgharib, Daniel Cremers, and Christian Theobalt. i3dmm: Deep implicit 3d morphable model of human heads. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12803-12813, 2021. 2 +[92] Xiaoxing Zeng, Xiaojiang Peng, and Yu Qiao. DF2Net: A dense-fine-finer network for detailed 3D face reconstruction. In International Conference on Computer Vision (ICCV), 2019. 2 +[93] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 2 +[94] Tianke Zhang, Xuangeng Chu, Yunfei Liu, Lijian Lin, Zhendong Yang, Zhengzhuo Xu, Chengkun Cao, Fei Yu, Changyin Zhou, Chun Yuan, et al. Accurate 3d face reconstruction with facial component tokens. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9033-9042, 2023. 2, 3 +[95] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A. Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In IEEE International Conference on Computer Vision, ICCV 2017, Venice, Italy, October 22-29, 2017, pages 2242-2251. IEEE Computer Society, 2017. 3 +[96] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z. Li. Face alignment across large poses: A 3D solution. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 146-155, 2016. 2 +[97] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces. In European Conference on Computer Vision, pages 250–269, 2022. 2, 3, 6 +[98] Michael Zollhöfer, Justus Thies, Darek Bradley, Pablo Garrido, Thabo Beeler, Patrick Pérez, Marc Stamminger, Matthias Nießner, and Christian Theobalt. State of the art on monocular 3D face reconstruction, tracking, and applications. Computer Graphics Forum, 2018. 1, 2 \ No newline at end of file diff --git a/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/images.zip b/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..80e32941ca86604fe29fb5742b01c660ea0c54a9 --- /dev/null +++ b/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1924ea72d42b779d5e17be7a154d08505dd5152d4484b4764e66625e8554580b +size 422666 diff --git a/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/layout.json b/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..24558d1a9b61b887c8a99765a8be220ec8616d68 --- /dev/null +++ b/2024/3D Facial Expressions through Analysis-by-Neural-Synthesis/layout.json @@ -0,0 +1,9967 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 109, + 103, + 485, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 103, + 485, + 121 + ], + "spans": [ + { + "bbox": [ + 109, + 103, + 485, + 121 + ], + "type": "text", + "content": "3D Facial Expressions through Analysis-by-Neural-Synthesis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 137, + 476, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 137, + 476, + 166 + ], + "spans": [ + { + "bbox": [ + 116, + 137, + 476, + 166 + ], + "type": "text", + "content": "George Retsinas1† Panagiotis P. Filntisis1† Radek Daněček3 Victoria F. Abrevaya3 Anastasios Roussos4 Timo Bolkart3* Petros Maragos1,2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 179, + 173, + 413, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 173, + 413, + 184 + ], + "spans": [ + { + "bbox": [ + 179, + 173, + 413, + 184 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 179, + 173, + 413, + 184 + ], + "type": "text", + "content": "Institute of Robotics, Athena Research Center, 15125 Maroussi, Greece" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 141, + 185, + 452, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 185, + 452, + 196 + ], + "spans": [ + { + "bbox": [ + 141, + 185, + 452, + 196 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 141, + 185, + 452, + 196 + ], + "type": "text", + "content": "School of Electrical & Computer Engineering, National Technical University of Athens, Greece" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 216, + 196, + 378, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 196, + 378, + 207 + ], + "spans": [ + { + "bbox": [ + 216, + 196, + 378, + 207 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 216, + 196, + 378, + 207 + ], + "type": "text", + "content": "MPI for Intelligent Systems, Tübingen, Germany" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 126, + 207, + 465, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 207, + 465, + 217 + ], + "spans": [ + { + "bbox": [ + 126, + 207, + 465, + 217 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 126, + 207, + 465, + 217 + ], + "type": "text", + "content": "Institute of Computer Science (ICS), Foundation for Research & Technology - Hellas (FORTH), Greece" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 48, + 228, + 545, + 356 + ], + "blocks": [ + { + "bbox": [ + 48, + 228, + 545, + 356 + ], + "lines": [ + { + "bbox": [ + 48, + 228, + 545, + 356 + ], + "spans": [ + { + "bbox": [ + 48, + 228, + 545, + 356 + ], + "type": "image", + "image_path": "da1a4cae32a6366d3ca8377814176725aa5cd0467fc824055cc8ccc1bf9dca84.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 365, + 546, + 388 + ], + "lines": [ + { + "bbox": [ + 46, + 365, + 546, + 388 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 546, + 388 + ], + "type": "text", + "content": "Figure 1. SMIRK reconstructs 3D faces from monocular images with facial geometry that faithfully recover extreme, asymmetric, and subtle expressions. Top: images of people with challenging expressions. Bottom: SMIRK reconstructions." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 143, + 397, + 192, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 397, + 192, + 410 + ], + "spans": [ + { + "bbox": [ + 143, + 397, + 192, + 410 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 426, + 290, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 290, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 290, + 715 + ], + "type": "text", + "content": "While existing methods for 3D face reconstruction from in-the-wild images excel at recovering the overall face shape, they commonly miss subtle, extreme, asymmetric, or rarely observed expressions. We improve upon these methods with SMIRK (Spatial Modeling for Image-based Reconstruction of Kinesics), which faithfully reconstructs expressive 3D faces from images. We identify two key limitations in existing methods: shortcomings in their self-supervised training formulation, and a lack of expression diversity in the training images. For training, most methods employ differentiable rendering to compare a predicted face mesh with the input image, along with a plethora of additional loss functions. This differentiable rendering loss not only has to provide supervision to optimize for 3D face geometry, camera, albedo, and lighting, which is an ill-posed optimization problem, but the domain gap between rendering and input image further hinders the learning process. Instead, SMIRK replaces the differentiable rendering with a neural rendering module that, given the rendered predicted mesh geometry, and sparsely sampled pixels of the input image, generates a face image. As the neural rendering gets color information from sampled image pixels, supervising with neural rendering-based reconstruction loss can focus solely on the geometry. Further," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 399, + 547, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 399, + 547, + 531 + ], + "spans": [ + { + "bbox": [ + 304, + 399, + 547, + 531 + ], + "type": "text", + "content": "it enables us to generate images of the input identity with varying expressions while training. These are then utilized as input to the reconstruction model and used as supervision with ground truth geometry. This effectively augments the training data and enhances the generalization for diverse expressions. Our qualitative, quantitative and particularly our perceptual evaluations demonstrate that SMIRK achieves the new state-of-the-art performance on accurate expression reconstruction. For our method's source code, demo video and more, please visit our project webpage: https://georgenetsi.github.io/smirk/." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 552, + 387, + 564 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 552, + 387, + 564 + ], + "spans": [ + { + "bbox": [ + 306, + 552, + 387, + 564 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 572, + 547, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 572, + 547, + 693 + ], + "spans": [ + { + "bbox": [ + 304, + 572, + 547, + 693 + ], + "type": "text", + "content": "Reconstructing 3D faces from single images in-the-wild has been a central goal of computer vision for the last three decades [98] with practical implications in various fields including virtual and augmented reality, entertainment, and telecommunication. Commonly, these methods estimate the parameters of a 3D Morphable Model (3DMM) [12, 26], either through optimization [3, 6-8, 34, 67, 80] or regression with deep learning [16, 18, 20, 28, 29, 33, 46, 65, 66, 70, 75, 82]. Due to the lack of large-scale paired 2D-3D data, most learning-based methods follow a self-supervised train" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 320, + 703, + 471, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 703, + 471, + 713 + ], + "spans": [ + { + "bbox": [ + 320, + 703, + 471, + 713 + ], + "type": "text", + "content": "† Equal contributions. * Now at Google." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2490" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 286, + 85 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 286, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 286, + 85 + ], + "type": "text", + "content": "ing scheme using an analysis-by-synthesis approach [7, 75]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 87, + 286, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 87, + 286, + 230 + ], + "spans": [ + { + "bbox": [ + 46, + 87, + 286, + 230 + ], + "type": "text", + "content": "Although there has been a persistent improvement in the accuracy of identity shape reconstruction, as indicated by established benchmarks [28, 70], the majority of works fail to capture the full range of facial expressions, including extreme, asymmetric, or subtle movements which are perceptually significant to humans -see e.g. Fig. 1. Recent works addressed this by augmenting the photometric error with image-based perceptual losses based on expert networks for emotion [18], lip reading [29], or face recognition [32], or with a GAN-inspired discriminator [61]. However, this requires a careful balancing of the different loss terms, and can often produce over-exaggerated facial expressions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 232, + 286, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 232, + 286, + 388 + ], + "spans": [ + { + "bbox": [ + 46, + 232, + 286, + 388 + ], + "type": "text", + "content": "We argue here that the main problem is the shortcomings of the differentiable rendering loss. Jointly optimizing for geometry, camera, appearance, and lighting is an ill-posed optimization problem due to shape-camera [73] and albedo-lighting [25] ambiguities. Further the loss is negatively impacted by the large domain gap between natural input image and the rendering. The commonly employed Lambertian reflectance model is an over-simplistic approximation of the light-face interaction [26], and it is insufficient to account for hard self-shadows, unusual illumination environments, highly reflective skin, and differences in camera color patterns. This, in turn, can result in sub-optimal reconstructions by providing incorrect guidance during training." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 391, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 391, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 391, + 286, + 713 + ], + "type": "text", + "content": "In this work, we introduce a simple but effective analysis-by-neural-synthesis supervision to improve the perceived quality of the reconstructed expressions. For this, we replace the differentiable rendering step of self-supervised approaches with an image-to-image translator based on U-Net [68]. Given a monochromatic rendering of the geometry together with sparsely sampled pixels of the input image, this U-Net generates an image which is then compared to the input image. Our key observation is that this neural rendering provides more accurate gradients for the task of expressive 3D face reconstruction. This approach has two advantages. First, by providing the rendered predicted mesh without appearance to the generator, the system is forced to rely on the geometry of the rendered mesh for recreating the input, leading to more faithful reconstructions. Second, the generator can create novel images, that modify the expression of the input. We leverage this while training with an expression consistency/augmentation loss. This renders a mesh of the input identity under a novel expression, renders an image with the generator, project the rendering through the encoder, and penalizes the difference between the augmented and the reconstructed expression parameters. By employing parameters from complex and extreme expressions captured under controlled laboratory settings, the network learns to handle non-typical expressions that are underrepresented in the data, promoting generalization. Our extensive experiments demonstrate that" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 305, + 72, + 545, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 133 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 133 + ], + "type": "text", + "content": "SMIRK faithfully captures a wide range of facial expressions (Fig. 1), including challenging cases such as asymmetric and subtle expressions (e.g., smirking). This result is highlighted by the conducted user study, where SMIRK significantly outperformed all competing methods." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 133, + 545, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 133, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 305, + 133, + 545, + 204 + ], + "type": "text", + "content": "In summary, our contributions are: 1) A method to faithfully recover expressive 3D faces from an input image.2) A novel analysis-by-neural-synthesis supervision that improves the quality of the reconstructed expressions. 3) A cycle-based expression consistency loss that augments expressions during training." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 215, + 392, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 215, + 392, + 227 + ], + "spans": [ + { + "bbox": [ + 306, + 215, + 392, + 227 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 235, + 545, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 235, + 545, + 366 + ], + "spans": [ + { + "bbox": [ + 305, + 235, + 545, + 366 + ], + "type": "text", + "content": "Over the past two decades, the field of monocular 3D face reconstruction has witnessed extensive research and development [26, 98]. Model-free approaches directly regress 3D meshes [4, 19, 22, 27, 43, 69, 71, 74, 87, 89, 92] or voxels [41], or adapt a Signed Distance Function [17, 63, 91] for image fitting. These techniques commonly depend on extensive 3D training data, often generated using a 3D face model. However, this dependency can constrain their expressiveness due to limitations inherent to data creation [4, 19, 27, 41, 43, 69, 87] and disparities between synthetic and real images [22, 71, 92]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 367, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 367, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 367, + 545, + 713 + ], + "type": "text", + "content": "Many works estimate parameters of established 3D Morphable Models (3DMMs), like BFM [64], FaceWarehouse [14], or FLAME [53]. This can be achieved using direct optimization procedure in an analysis-by-synthesis framework [3, 6-8, 15, 30, 34, 47, 52, 65, 67, 78-80], but this needs to be applied on novel images every time, which is computationally expensive. Recent deep learning approaches offer fast and robust estimation of 3DMM parameters, using either supervised [16, 36, 46, 66, 82, 83, 94, 96, 97] or self-supervised training, for which different types of supervision have been proposed and used in combination, with the most important being the following: a) 2D landmarks supervision [20, 28, 55, 70, 72, 75-77, 90] is critical for coarse facial geometry and alignment, but is limited by the sparsity and potential inaccuracy of the predicted landmarks, particularly for complex expressions and poses. Methods that rely on dense landmarks [4, 88] overcome the sparsity problem but their accuracy is limited by the inherent ambiguity of dense correspondences across different faces. b) Photometric constraints [20, 28, 33, 72, 75-77, 90] are particularly effective for facial data, but are susceptible to alignment errors and depend on the quality of the rendered image. c) Perceptual losses have been proven beneficial in aligning the output with human perception [93]. Several methods make use of this by applying perceptual features losses of expert networks for identity recognition [20, 28, 32, 33, 72], emotion [18] or lip articulation [29, 37], but are hard to balance with other terms and can sometimes produce exaggerated results, particularly in terms of expressions." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "2491" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "We explore an alternative approach, where an image-to-image translation model is coupled with a simple photometric error, encouraging more nuanced details to be explained by the geometry." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 121, + 289, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 121, + 289, + 371 + ], + "spans": [ + { + "bbox": [ + 46, + 121, + 289, + 371 + ], + "type": "text", + "content": "Closer to our work are methods that simultaneously train a regressor network and an appearance model to improve the photometric error signal. Booth et al. [10, 11] employ a 3DMM for shape estimation coupled with a PCA appearance model learned from images in-the-wild. Grecer et al. [32] extend this idea by using a GAN to model the facial appearance more effectively. [58, 76, 77, 84, 85] learn nonlinear models of shape and expression while training a regressor in a self-supervised manner. Lin et al. [54] refine an initial 3DMM texture while training the regressor. Several other works learn neural appearance models for faces from large datasets [5, 32, 48-50, 57]. In this work, we do not learn a new appearance model, but directly use a generator for better geometry supervision, achieving significantly improved expression estimation. Also related to this work are approaches that train a conditional generative model that transforms a rendering of a mesh model into a realistic image, e.g. [21, 23, 24, 35, 45, 62]. While their focus is on controllable image generation, we investigate here how a generator of average capacity can improve supervision for the task of 3D face reconstruction." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 381, + 261, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 381, + 261, + 395 + ], + "spans": [ + { + "bbox": [ + 47, + 381, + 261, + 395 + ], + "type": "text", + "content": "3. Method: Analysis-by-Neural-Synthesis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 401, + 287, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 401, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 401, + 287, + 568 + ], + "type": "text", + "content": "SMIRK is inspired by recent self-supervised face reconstruction methods [18, 28, 29, 94] that combine an analysis-by-synthesis approach with deep learning. While the majority of these works produce renderings based on linear statistical models and Lambertian reflectance, SMIRK contributes with a novel neural rendering module that bridges the domain gap between the input and the synthesized output. By minimizing this discrepancy, SMIRK enables a stronger supervision signal within an analysis-by-synthesis framework. Notably, this means that neural-network based losses such as perceptual [42], identity [20, 28], or emotion [18] can be used to compare the reconstructed and input images without the typical domain-gap problem that is present in most works." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 575, + 129, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 575, + 129, + 586 + ], + "spans": [ + { + "bbox": [ + 47, + 575, + 129, + 586 + ], + "type": "text", + "content": "3.1. Architecture" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": "Face Model: SMIRK employs FLAME [53] to model the 3D geometry of a face, which generates a mesh of " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "n_v = 5023" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": " vertices based on identity " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": " and expression " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\psi_{expr}" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": " parameters, extended with two blendshapes " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\psi_{eye}" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": " to account for eye closure [97], as well as jaw rotation " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\theta_{jaw}" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": " parameters. Additionally, we consider the rigid pose " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\theta_{pose}" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": " and the orthographic camera parameters " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": ". For brevity, we refer to all expression parameters (i.e. " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\psi_{expr}" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\psi_{eye}" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\theta_{jaw}" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": ") as " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": ", and all global transformation parameters (i.e. " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\theta_{pose}" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": ") as " + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 594, + 287, + 712 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "Encoder: The encoder " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "E(.)" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " is a deep neural network that takes an image " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " as input and regresses FLAME parameters. We separate " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " into three different branches, each consisting of a MobilenetV3 [39] backbone: 1) " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "E_{\\psi}" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": ", which predicts the expression parameters " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": ", 2) " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "E_{\\beta}" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " that predicts the shape parameters " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": ", and 3) " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "E_{\\theta}" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " that predicts the global transformation coefficients " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": ". Formally," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 337, + 167, + 545, + 180 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 167, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 337, + 167, + 545, + 180 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\theta} = E _ {\\boldsymbol {\\theta}} (I), \\quad \\beta = E _ {\\boldsymbol {\\beta}} (I), \\quad \\psi = E _ {\\psi} (I). \\tag {1}", + "image_path": "1bf96a58afb18f895e39d6f34e0b28c0fdf658e529fb738e4c491a7aa1937f3a.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 191, + 545, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 191, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 304, + 191, + 545, + 262 + ], + "type": "text", + "content": "Since the main focus of this work is on improving facial expression reconstruction, we assume at train time that " + }, + { + "bbox": [ + 304, + 191, + 545, + 262 + ], + "type": "inline_equation", + "content": "E_{\\theta}" + }, + { + "bbox": [ + 304, + 191, + 545, + 262 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 191, + 545, + 262 + ], + "type": "inline_equation", + "content": "E_{\\beta}" + }, + { + "bbox": [ + 304, + 191, + 545, + 262 + ], + "type": "text", + "content": " were pre-trained and remain frozen. Note that unlike previous methods [18, 28, 29], " + }, + { + "bbox": [ + 304, + 191, + 545, + 262 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 191, + 545, + 262 + ], + "type": "text", + "content": " does not predict albedo parameters since the neural rendering module does not require such explicit information." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "type": "text", + "content": "Neural Renderer: The neural renderer is designed to replace traditional graphics-based rendering with an imaged-to-image convolutional network " + }, + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "type": "text", + "content": ". The key idea here is to provide " + }, + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "type": "text", + "content": " with an input image where the face is masked out and only a small number of randomly sampled pixels within the mask remain, along with the predicted facial geometry from the encoder " + }, + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "type": "text", + "content": ". By limiting the available relevant information from the input image, " + }, + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "type": "text", + "content": " is forced to rely on the predicted geometry from " + }, + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 263, + 545, + 371 + ], + "type": "text", + "content": " to accurately reconstruct it." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "spans": [ + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "content": "Formally, let " + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "inline_equation", + "content": "S = R(\\theta, \\beta, \\psi)" + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "content": " denote the output of the differentiable rasterization step, where " + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "content": " is the monochrome rendering of the reconstructed face mesh. The masking function " + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "inline_equation", + "content": "M(\\cdot)" + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "content": " is applied to the input image " + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "content": ", masking out the face and retaining only a small amount of random pixels within the mask. " + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "inline_equation", + "content": "M(I)" + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "content": " is then concatenated with " + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "content": ", and the resulting tensor is passed through the neural renderer " + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "content": " to produce a reconstruction of the original image " + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "inline_equation", + "content": "I' = T(S \\oplus M(I))" + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "content": " denotes concatenation. A crucial property of this module is to assist the gradient flow towards the encoder. Therefore, we adopt a U-Net architecture [40, 68, 95] for " + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "content": ", since the shortcuts will allow the gradient to flow uninterrupted towards " + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 372, + 545, + 539 + ], + "type": "text", + "content": " (an ablation study on this can be found in the Suppl. Mat.)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 548, + 521, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 548, + 521, + 562 + ], + "spans": [ + { + "bbox": [ + 306, + 548, + 521, + 562 + ], + "type": "text", + "content": "3.2. Optimization of the SMIRK Components" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 567, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 567, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 567, + 545, + 628 + ], + "type": "text", + "content": "SMIRK is supervised with two separate training passes: a reconstruction path and an augmented expression cycle path. We alternate between these passes on each training iteration, optimizing their respective losses. We describe each in the following subsections." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 646, + 426, + 657 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 646, + 426, + 657 + ], + "spans": [ + { + "bbox": [ + 306, + 646, + 426, + 657 + ], + "type": "text", + "content": "3.2.1 Reconstruction Path" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "In the reconstruction path (Fig. 2), the encoder " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": " regresses FLAME parameters from the input image " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": " and the resulting 3D face is rendered to obtain " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": ". Next, " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": " is masked out using the masking function " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "M(\\cdot)" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": ", is concatenated with " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2492" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 69, + 289, + 162 + ], + "blocks": [ + { + "bbox": [ + 48, + 69, + 289, + 162 + ], + "lines": [ + { + "bbox": [ + 48, + 69, + 289, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 69, + 289, + 162 + ], + "type": "image", + "image_path": "ae57b5a2b0a98fb6d122a38935d91b4c8f7f943dda471eb9bd2d14d944b75f9b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 169, + 288, + 236 + ], + "lines": [ + { + "bbox": [ + 46, + 169, + 288, + 236 + ], + "spans": [ + { + "bbox": [ + 46, + 169, + 288, + 236 + ], + "type": "text", + "content": "Figure 2. Reconstruction pass. An input image is passed to the encoder which regresses FLAME and camera parameters. A 3D shape is reconstructed, rendered with a differentiable rasterizer and finally translated into the output domain with the image translation network. Then, standard self-supervised landmark, photometric and perceptual losses are computed." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 59, + 246, + 278, + 342 + ], + "blocks": [ + { + "bbox": [ + 59, + 246, + 278, + 342 + ], + "lines": [ + { + "bbox": [ + 59, + 246, + 278, + 342 + ], + "spans": [ + { + "bbox": [ + 59, + 246, + 278, + 342 + ], + "type": "image", + "image_path": "68f7f94d2ee9d10a002d3ded380cbd5b1af7065263f18387c8b4cbd12c3a28b6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 346, + 288, + 381 + ], + "lines": [ + { + "bbox": [ + 46, + 346, + 288, + 381 + ], + "spans": [ + { + "bbox": [ + 46, + 346, + 288, + 381 + ], + "type": "text", + "content": "Figure 3. Masking Process. An input image is masked to obscure the face (upper path), then we sample random pixels to be unmasked (lower path)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 390, + 287, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 390, + 287, + 413 + ], + "spans": [ + { + "bbox": [ + 47, + 390, + 287, + 413 + ], + "type": "text", + "content": "and fed into " + }, + { + "bbox": [ + 47, + 390, + 287, + 413 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 47, + 390, + 287, + 413 + ], + "type": "text", + "content": " to obtain a reconstruction of the input image " + }, + { + "bbox": [ + 47, + 390, + 287, + 413 + ], + "type": "inline_equation", + "content": "I'" + }, + { + "bbox": [ + 47, + 390, + 287, + 413 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 414, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 414, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 414, + 287, + 605 + ], + "type": "text", + "content": "Masking: To promote the reliance of " + }, + { + "bbox": [ + 46, + 414, + 287, + 605 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 414, + 287, + 605 + ], + "type": "text", + "content": " on the 3D rendered face for reconstructing " + }, + { + "bbox": [ + 46, + 414, + 287, + 605 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 46, + 414, + 287, + 605 + ], + "type": "text", + "content": ", we need to mask out the face in the input image " + }, + { + "bbox": [ + 46, + 414, + 287, + 605 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 46, + 414, + 287, + 605 + ], + "type": "text", + "content": ". We do that by using the convex hull of detected 2D landmarks [13], dilated so that it fully covers the face. However, without any information of the face interior, training the translator becomes challenging since texture information, such as skin color, facial hair or even accessories (e.g., glasses) are \"distractors\" that complicate training. To address this we randomly sample and retain a small amount of pixels (1%) that are used as guidance for the image reconstruction. Note that sampling too many pixels makes the reconstruction overly guided and the 3D rendered face does not control the reconstruction output. We observed a similar behavior when we tried to randomly mask out blocks of the image, as in [38]. The masking process is depicted in Fig. 3." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 605, + 287, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 629 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 629 + ], + "type": "text", + "content": "Loss functions: The reconstruction path is supervised with the following loss functions:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 629, + 287, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 629, + 287, + 654 + ], + "spans": [ + { + "bbox": [ + 47, + 629, + 287, + 654 + ], + "type": "text", + "content": "Photometric loss. This is the L1 error between the input and the output images: " + }, + { + "bbox": [ + 47, + 629, + 287, + 654 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{photo} = \\| I' - I\\| _1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 654, + 287, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 287, + 701 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 287, + 701 + ], + "type": "text", + "content": "VGG loss. The VGG loss [42] has a similar effect to the photometric one, but helps to converge faster in the initial phases of training: " + }, + { + "bbox": [ + 46, + 654, + 287, + 701 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{vgg} = \\| \\Gamma(I') - \\Gamma(I)\\|_1" + }, + { + "bbox": [ + 46, + 654, + 287, + 701 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 654, + 287, + 701 + ], + "type": "inline_equation", + "content": "\\Gamma(.)" + }, + { + "bbox": [ + 46, + 654, + 287, + 701 + ], + "type": "text", + "content": " represents the VGG perceptual encoder." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 59, + 701, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 287, + 714 + ], + "type": "text", + "content": "Landmark loss. The landmark loss, denoted as " + }, + { + "bbox": [ + 59, + 701, + 287, + 714 + ], + "type": "inline_equation", + "content": "L_{lmk} =" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 306, + 69, + 547, + 166 + ], + "blocks": [ + { + "bbox": [ + 306, + 69, + 547, + 166 + ], + "lines": [ + { + "bbox": [ + 306, + 69, + 547, + 166 + ], + "spans": [ + { + "bbox": [ + 306, + 69, + 547, + 166 + ], + "type": "image", + "image_path": "dbbdb408efd0df96af2959913adf0fc4a9b71d8abe0fc12ce120a70981504ed3.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 172, + 545, + 239 + ], + "lines": [ + { + "bbox": [ + 304, + 172, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 304, + 172, + 545, + 239 + ], + "type": "text", + "content": "Figure 4. Augmented cycle pass. The FLAME expression parameters of an existing reconstruction are modified. The resulting modified face is then rendered using our neural renderer. The rendering is then passed to the face reconstruction encoder to regress the FLAME parameters and a consistency loss between the modified input and reconstructed FLAME parameters is computed." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 253, + 545, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 253, + 545, + 303 + ], + "spans": [ + { + "bbox": [ + 304, + 253, + 545, + 303 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{K}\\left\\|\\mathbf{k}-\\mathbf{k}'\\right\\|_{2}^{2}" + }, + { + "bbox": [ + 304, + 253, + 545, + 303 + ], + "type": "text", + "content": ", measures the " + }, + { + "bbox": [ + 304, + 253, + 545, + 303 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 304, + 253, + 545, + 303 + ], + "type": "text", + "content": " norm between the ground-truth 2D facial landmarks detected in the input image " + }, + { + "bbox": [ + 304, + 253, + 545, + 303 + ], + "type": "inline_equation", + "content": "(\\mathbf{k})" + }, + { + "bbox": [ + 304, + 253, + 545, + 303 + ], + "type": "text", + "content": " and the 2D landmarks projected from the predicted 3D mesh " + }, + { + "bbox": [ + 304, + 253, + 545, + 303 + ], + "type": "inline_equation", + "content": "(\\mathbf{k}')" + }, + { + "bbox": [ + 304, + 253, + 545, + 303 + ], + "type": "text", + "content": ", summed over " + }, + { + "bbox": [ + 304, + 253, + 545, + 303 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 253, + 545, + 303 + ], + "type": "text", + "content": " landmarks." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 304, + 545, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 304, + 545, + 340 + ], + "spans": [ + { + "bbox": [ + 304, + 304, + 545, + 340 + ], + "type": "text", + "content": "Expression Regularization. We employ an " + }, + { + "bbox": [ + 304, + 304, + 545, + 340 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 304, + 304, + 545, + 340 + ], + "type": "text", + "content": " regularization over the expression parameters " + }, + { + "bbox": [ + 304, + 304, + 545, + 340 + ], + "type": "inline_equation", + "content": "L_{reg} = \\|\\psi\\|_2^2" + }, + { + "bbox": [ + 304, + 304, + 545, + 340 + ], + "type": "text", + "content": ", penalizing extreme, unrealistic expressions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "spans": [ + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "type": "text", + "content": "Emotion Loss. Finally, to obtain reconstructions that faithfully capture the emotional content, we employ an emotion loss " + }, + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{emo}" + }, + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "type": "text", + "content": " based on features extracted from a pretrained emotion recognition network " + }, + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "type": "inline_equation", + "content": "P_{e}" + }, + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "type": "text", + "content": ", as in EMOCA [18]: " + }, + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{emo} = \\| P_e(I') - P_e(I)\\| _2^2" + }, + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "type": "text", + "content": ". To prevent the image translator from adversarially optimizing the emotion loss by perturbing a few pixels, for this loss we keep the image translator " + }, + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "type": "text", + "content": " \"frozen\", optimizing only the expression encoder " + }, + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "type": "inline_equation", + "content": "E_{\\psi}" + }, + { + "bbox": [ + 304, + 341, + 546, + 473 + ], + "type": "text", + "content": ". Note that unlike EMOCA, our framework ensures that the emotion loss does not suffer from domain gap problems, as the compared images reside in the same space." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 490, + 486, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 490, + 486, + 502 + ], + "spans": [ + { + "bbox": [ + 306, + 490, + 486, + 502 + ], + "type": "text", + "content": "3.2.2 Augmented Expression Cycle Path" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 509, + 545, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 509, + 545, + 653 + ], + "spans": [ + { + "bbox": [ + 304, + 509, + 545, + 653 + ], + "type": "text", + "content": "While the reconstruction path improves 3D reconstruction thanks to the better supervision signal provided by the neural module, it is still affected by a lack of expression diversity in the training datasets - a problem shared by all previous methods. This means for example that if a more complex lip structure, scarcely seen in the training data, cannot be reproduced fast enough by the encoder, the translator " + }, + { + "bbox": [ + 304, + 509, + 545, + 653 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 509, + 545, + 653 + ], + "type": "text", + "content": " could learn to correlate miss-aligned lip 3D structures and images and thus multiple similar, but distinct, facial expressions will be collapsed to a single reconstructed representation. Further, this may lead to the translator compensating for the encoder's failures during the joint optimization." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "text", + "content": "These issues are addressed with the augmented expression cycle consistency path. In this path, we start from the predicted set " + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\beta, \\psi, \\theta" + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "text", + "content": ", and replace the original predicted expression " + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "text", + "content": " with a new one " + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\psi_{aug}" + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "text", + "content": ". We then use the translator " + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "text", + "content": " to generate a photorealistic image " + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "inline_equation", + "content": "I_{aug}^{\\prime}" + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "text", + "content": " which adheres to" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2493" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 156 + ], + "type": "text", + "content": "it. This process effectively synthesizes an augmented training pair of " + }, + { + "bbox": [ + 46, + 72, + 287, + 156 + ], + "type": "inline_equation", + "content": "\\psi_{aug}" + }, + { + "bbox": [ + 46, + 72, + 287, + 156 + ], + "type": "text", + "content": " and the corresponding output image " + }, + { + "bbox": [ + 46, + 72, + 287, + 156 + ], + "type": "inline_equation", + "content": "I_{aug}^{\\prime}" + }, + { + "bbox": [ + 46, + 72, + 287, + 156 + ], + "type": "text", + "content": ". Then, the image is fed into " + }, + { + "bbox": [ + 46, + 72, + 287, + 156 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 72, + 287, + 156 + ], + "type": "text", + "content": " which should perfectly recover " + }, + { + "bbox": [ + 46, + 72, + 287, + 156 + ], + "type": "inline_equation", + "content": "\\psi_{aug}" + }, + { + "bbox": [ + 46, + 72, + 287, + 156 + ], + "type": "text", + "content": ". A cycle consistency loss can now be directly applied in the expression parameter space of the 3D model, enforcing the predicted expression to be as close as possible to the initial one. This concept is illustrated in Fig. 4." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 157, + 288, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 157, + 288, + 384 + ], + "spans": [ + { + "bbox": [ + 46, + 157, + 288, + 384 + ], + "type": "text", + "content": "The benefit of this cycle path is two-fold: 1) it reduces over-compensation errors via the consistency loss and 2) it promotes diverse expressions. The latter further helps consistency by avoiding the collapse of neighboring expressions into a single parameter representation. Concerning the consistency property, we can distinguish two overcompensating factors. First, during the joint optimization of the encoder and the translator, the latter can compensate when the encoder provides erroneous predictions, leading to an overall sub-par reconstruction. Second, if we discard the consistency loss, the expression will try to over-compensate erroneous shape/pose, since we assume the shape/pose parameters are predicted from an already trained system and they are not optimized in our framework. As an example, if the shape parameters do not fully capture an elongated nose, which is an identity characteristic of the person, the expression parameters may compensate this error. Such behavior is problematic because it entangles expression, shape and pose and adds undesired biases during training." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 387, + 287, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 387, + 287, + 542 + ], + "spans": [ + { + "bbox": [ + 46, + 387, + 287, + 542 + ], + "type": "text", + "content": "Pixel Transfer: The masking process retains a small amount of pixels within the face area. However, when a new expression is introduced, the previously selected pixels need to be updated and transferred such that they correspond with the vertices of the new expression. This operation is referred to as pixel transfer, where we sample pixels from the initial image according to a selected set of vertices, we then find the new position of the same vertices for the updated expression, and we assign their position as the new pixel, with the initial pixel value. This avoids inconsistencies between the underlying structure of the pixels (initial expression) and the new expression, which would hinder realistic reconstructions in the cycle path." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 544, + 287, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 628 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 628 + ], + "type": "text", + "content": "Promoting Diverse Expressions: Ideally, in this path we also want to promote high variations in the expression parameter space, generating shapes (and their corresponding images) with complex, rare and asymmetric expressions that are still plausible. To effectively augment the cycle path with interesting variations we consider the following augmentations:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 630, + 287, + 714 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 47, + 630, + 258, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 630, + 258, + 641 + ], + "spans": [ + { + "bbox": [ + 47, + 630, + 258, + 641 + ], + "type": "text", + "content": "- Permutation: permute the expressions in a batch." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 642, + 287, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 642, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 47, + 642, + 287, + 666 + ], + "type": "text", + "content": "- Perturbation: add non-trivial noise to the reconstructed expression parameters." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 666, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 666, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 666, + 287, + 714 + ], + "type": "text", + "content": "- Template Injection: use expression templates of extreme expressions. To obtain such parameters for FLAME we perform direct iterative parameter fitting on the FaMoS [9] dataset which depicts multiple subjects per" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 307, + 70, + 545, + 166 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 545, + 166 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 545, + 166 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 545, + 166 + ], + "type": "image", + "image_path": "796c7fd999bd73be834afc7b28dd0712b4c9c2b30db24e8ece381c66515d6acf.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 173, + 545, + 229 + ], + "lines": [ + { + "bbox": [ + 304, + 173, + 545, + 229 + ], + "spans": [ + { + "bbox": [ + 304, + 173, + 545, + 229 + ], + "type": "text", + "content": "Figure 5. Neural expression augmentation. Our neural renderer enables us to modify the expression, generating a new image-3D training pair. We can edit the expression with random noise, permutation from other reconstructions, template injection, or zeroing." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 252, + 488, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 252, + 488, + 263 + ], + "spans": [ + { + "bbox": [ + 313, + 252, + 488, + 263 + ], + "type": "text", + "content": "form extreme and asymmetric expressions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 264, + 545, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 264, + 545, + 288 + ], + "spans": [ + { + "bbox": [ + 306, + 264, + 545, + 288 + ], + "type": "text", + "content": "- Zero Expression: neutral expressions help avoid biasing the system towards complex cases." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 292, + 545, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 292, + 545, + 375 + ], + "spans": [ + { + "bbox": [ + 304, + 292, + 545, + 375 + ], + "type": "text", + "content": "For all expression augmentations, we simultaneously simulate jaw and eyelid openings/closings, with more aggressive augmentations in the zero-expression case to avoid incompatible blending with intense expressions. Fig. 5 presents visual examples of all augmentations and the corresponding generated images from " + }, + { + "bbox": [ + 304, + 292, + 545, + 375 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 292, + 545, + 375 + ], + "type": "text", + "content": ", showcasing its ability to generate realistic images with notable expression manipulation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 380, + 373, + 390 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 380, + 373, + 390 + ], + "spans": [ + { + "bbox": [ + 306, + 380, + 373, + 390 + ], + "type": "text", + "content": "Loss functions:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 395, + 545, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 395, + 545, + 443 + ], + "spans": [ + { + "bbox": [ + 304, + 395, + 545, + 443 + ], + "type": "text", + "content": "Expression Consistency. The expression consistency loss, or cycle loss for brevity, is the mean-squared error between the given augmented expression parameters " + }, + { + "bbox": [ + 304, + 395, + 545, + 443 + ], + "type": "inline_equation", + "content": "\\psi_{aug}" + }, + { + "bbox": [ + 304, + 395, + 545, + 443 + ], + "type": "text", + "content": " and the predicted expressions at the end of the cycle path:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 312, + 460, + 545, + 475 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 460, + 545, + 475 + ], + "spans": [ + { + "bbox": [ + 312, + 460, + 545, + 475 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {e x p} = \\left\\| E _ {\\psi} \\left(T \\left(R (\\boldsymbol {\\theta}, \\boldsymbol {\\beta}, \\psi_ {a u g}) \\oplus M (I)\\right)\\right) - \\psi_ {a u g} \\right\\| _ {2} ^ {2} \\tag {2}", + "image_path": "fa5bd8aefb1fd17ad047a7e97c1be32aba9ba215537595ba326c3fd05ae39ae9.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 491, + 545, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 491, + 545, + 550 + ], + "spans": [ + { + "bbox": [ + 304, + 491, + 545, + 550 + ], + "type": "text", + "content": "The pose/cam and shape parameters are kept as predicted by the initial image, namely " + }, + { + "bbox": [ + 304, + 491, + 545, + 550 + ], + "type": "inline_equation", + "content": "\\theta = E_{\\theta}(I)" + }, + { + "bbox": [ + 304, + 491, + 545, + 550 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 491, + 545, + 550 + ], + "type": "inline_equation", + "content": "\\beta = E_{\\beta}(I)" + }, + { + "bbox": [ + 304, + 491, + 545, + 550 + ], + "type": "text", + "content": ". The internal " + }, + { + "bbox": [ + 304, + 491, + 545, + 550 + ], + "type": "inline_equation", + "content": "E_{\\psi}(I)" + }, + { + "bbox": [ + 304, + 491, + 545, + 550 + ], + "type": "text", + "content": " operation, inside the renderer " + }, + { + "bbox": [ + 304, + 491, + 545, + 550 + ], + "type": "inline_equation", + "content": "R(\\cdot)" + }, + { + "bbox": [ + 304, + 491, + 545, + 550 + ], + "type": "text", + "content": ", does not allow gradients to flow through and is used as an off-the-self frozen module." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 554, + 545, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 554, + 545, + 625 + ], + "spans": [ + { + "bbox": [ + 304, + 554, + 545, + 625 + ], + "type": "text", + "content": "Identity Consistency. To aid the translator in faithfully reconstructing the identity of the person, we introduce an additional consistency loss similar to Eq. 2, applied to the shape parameters " + }, + { + "bbox": [ + 304, + 554, + 545, + 625 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 554, + 545, + 625 + ], + "type": "text", + "content": ". Note that since the shape encoder " + }, + { + "bbox": [ + 304, + 554, + 545, + 625 + ], + "type": "inline_equation", + "content": "E_{\\beta}" + }, + { + "bbox": [ + 304, + 554, + 545, + 625 + ], + "type": "text", + "content": " is frozen, the consistency loss only affects the optimization of the translator " + }, + { + "bbox": [ + 304, + 554, + 545, + 625 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 554, + 545, + 625 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 630, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 714 + ], + "type": "text", + "content": "Alternating Optimization: Overall, we alternate between the two passes, aiming to further reduce the effect of the translator compensating for the encoder. In more detail, during the augmented cycle pass, we freeze alternatively the encoder and the translator. Thus, this pass avoids the joint optimization of the two networks in a single step, acting as a regularizer to the other pass and enforcing consistency." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2494" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 100, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 100, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 100, + 83 + ], + "type": "text", + "content": "4. Results" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 91, + 287, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 91, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 46, + 91, + 287, + 139 + ], + "type": "text", + "content": "We now present objective and subjective evaluations of our method, along with comparisons with recent state of the art. Additional experimental evaluations and visualizations can be found in our Suppl. Mat. and demo video." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 144, + 164, + 158 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 144, + 164, + 158 + ], + "spans": [ + { + "bbox": [ + 47, + 144, + 164, + 158 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 163, + 287, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 163, + 287, + 210 + ], + "spans": [ + { + "bbox": [ + 46, + 163, + 287, + 210 + ], + "type": "text", + "content": "Training Datasets: We use the following datasets for training: FFHQ [44], CelebA [56], LRS3 [1], and MEAD [86]. LRS3 and MEAD are video datasets, and we randomly sample images from each video during training." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 211, + 287, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 211, + 287, + 270 + ], + "spans": [ + { + "bbox": [ + 46, + 211, + 287, + 270 + ], + "type": "text", + "content": "SOTA Methods: We compare with the following recent state-of-the-art methods that have publicly available implementations: DECA [28] and EMOCA v2 [18, 29], which use the FLAME [53] model, and Deep3DFace [20] and FOCUS [51], which use the BFM [64] model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 271, + 287, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 271, + 287, + 330 + ], + "spans": [ + { + "bbox": [ + 46, + 271, + 287, + 330 + ], + "type": "text", + "content": "Pretraining: Before the core training stage, all three encoders are pretrained, supervised by two losses - the landmark loss of the reconstruction for pose and expression and the shape predictions of MICA [97]. After that, " + }, + { + "bbox": [ + 46, + 271, + 287, + 330 + ], + "type": "inline_equation", + "content": "E_{\\beta}" + }, + { + "bbox": [ + 46, + 271, + 287, + 330 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 271, + 287, + 330 + ], + "type": "inline_equation", + "content": "E_{\\theta}" + }, + { + "bbox": [ + 46, + 271, + 287, + 330 + ], + "type": "text", + "content": " remain frozen." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 336, + 187, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 336, + 187, + 348 + ], + "spans": [ + { + "bbox": [ + 47, + 336, + 187, + 348 + ], + "type": "text", + "content": "4.2. Quantitative Evaluations" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 354, + 287, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 354, + 287, + 462 + ], + "spans": [ + { + "bbox": [ + 46, + 354, + 287, + 462 + ], + "type": "text", + "content": "It has been consistently reported [2, 18, 29, 31, 60] that evaluating facial expression reconstruction in terms of geometric metrics is ill-posed. The geometric errors tend to be dominated by the identity face shape and do not correlate well with human perception of facial expressions. Accordingly, we compare our method in a quantitative manner with three experiments: 1) emotion recognition accuracy [18], 2) ability of a model to guide a UNet to faithfully reconstruct an input image, and 3) a perceptual user study." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 463, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 463, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 463, + 287, + 713 + ], + "type": "text", + "content": "Emotion Recognition: Following the protocol of [18], we train an MLP to classify eight basic expressions and regress valence and arousal values using AffectNet [59]. We report Concordance Correlation Coefficient (CCC), root mean square error (RMSE), for both valence (V-) and arousal (A-), and expression classification accuracy (E-ACC). Results are found in Tab. 1. As it can be seen, SMIRK achieves a higher emotion recognition score compared to most other methods, although falling behind EMOCAv1/2 and Deep3DFace. It is worth noting that, although EMOCA v1 achieves the highest emotion accuracy, it often overexaggerates expressions which helps with emotion recognition. EMOCA v2, arguably a more accurate reconstruction model, performs slightly worse. Our main model is comparable with Deep3DFace and outperforms DECA and FOCUS. We can also train a model that scores better on emotion recognition, by increasing the emotion loss weight. However, similarly to what was reported by Daněček et al. [18], this leads to undesirable artifacts. We discuss the trade-off between higher emotion recognition scores and reconstruction accuracy in more detail in Sup.Mat. Notably," + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 310, + 71, + 545, + 178 + ], + "blocks": [ + { + "bbox": [ + 310, + 71, + 545, + 178 + ], + "lines": [ + { + "bbox": [ + 310, + 71, + 545, + 178 + ], + "spans": [ + { + "bbox": [ + 310, + 71, + 545, + 178 + ], + "type": "table", + "html": "
ModelV-CCC ↑V-RMSE ↓A-CCC ↑A-RMSE ↓E-ACC ↑
MGCNet0.690.350.580.340.60
3DDFA-v20.620.390.500.340.52
Deep3DFace0.730.330.650.310.65
DECA0.690.360.580.330.59
FOCUS-CelebA0.690.350.540.330.58
EMOCA v10.770.310.680.300.68
EMOCA v20.760.330.660.300.66
SMIRK0.720.350.610.310.64
SMIRK w/o emo0.710.350.600.320.62
", + "image_path": "191709f04840f34c2946294402a6156bc80ddc6459615733496fc359787deb55.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 223, + 545, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 223, + 545, + 270 + ], + "spans": [ + { + "bbox": [ + 304, + 223, + 545, + 270 + ], + "type": "text", + "content": "even without the emotion loss, the proposed model achieves a decent emotion recognition score, indicating that our reconstruction scheme can adequately capture emotions without the need for explicit perceptual supervision." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 271, + 546, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 271, + 546, + 558 + ], + "spans": [ + { + "bbox": [ + 304, + 271, + 546, + 558 + ], + "type": "text", + "content": "Reconstruction Loss: In order to evaluate the faithfulness of a 3D face reconstruction technique, we have devised a protocol based on our analysis-by-neural-synthesis method. Under this protocol, we train a UNet image-to-image translator, but freeze the weights of the encoder so that only the translator is trained. The motivation is simple: if the 3D mesh is accurate enough, the reconstruction will be more faithful, due to a one-to-one appearance correspondence. For each method (including ours for fairness), we train a UNet for 5 epochs, using the masked image and the rendered 3D geometry as input. Finally, we report the " + }, + { + "bbox": [ + 304, + 271, + 546, + 558 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 304, + 271, + 546, + 558 + ], + "type": "text", + "content": " reconstruction loss and the VGG loss between the reconstructed image and the input image on the test set of AffectNet [59] which features subjects under multiple expressions. The results can be seen in Table 2. We observe here that using the information for the rendered shape geometry of SMIRK, the trained UNet achieves a more faithful reconstruction of the input image when compared to DECA and EMOCAv2. Particularly for EMOCAv2, we observe that although it can capture expressions, the results in many cases do not faithfully represent the input image, leading to an overall worse image reconstruction error. In terms of " + }, + { + "bbox": [ + 304, + 271, + 546, + 558 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 304, + 271, + 546, + 558 + ], + "type": "text", + "content": " loss, SMIRK is on par with Deep3DFace and FOCUS and has a small improvement in terms of VGG loss." + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 309, + 567, + 543, + 601 + ], + "blocks": [ + { + "bbox": [ + 305, + 186, + 545, + 209 + ], + "lines": [ + { + "bbox": [ + 305, + 186, + 545, + 209 + ], + "spans": [ + { + "bbox": [ + 305, + 186, + 545, + 209 + ], + "type": "text", + "content": "Table 1. Emotion recognition performance on the AffectNet test set [59]. We follow the same metrics as in [18]." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 567, + 543, + 601 + ], + "lines": [ + { + "bbox": [ + 309, + 567, + 543, + 601 + ], + "spans": [ + { + "bbox": [ + 309, + 567, + 543, + 601 + ], + "type": "table", + "html": "
DECAEMOCAv2FOCUSDeep3DFaceSMIRK
L1 Loss ↓0.100.110.090.090.09
VGG Loss ↓0.800.840.780.780.76
", + "image_path": "f59f4aa97855dea7b092bf1cf6858a0c9e296992b561c5002425f945f1b4c731.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 609, + 545, + 643 + ], + "lines": [ + { + "bbox": [ + 305, + 609, + 545, + 643 + ], + "spans": [ + { + "bbox": [ + 305, + 609, + 545, + 643 + ], + "type": "text", + "content": "Table 2. Image reconstruction performance on the AffectNet test set [59]. SMIRK achieves better reconstruction and perceptual scores compared to other methods." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "type": "text", + "content": "User Study: Arguably, the perception of the reconstructed facial expressions is the most important aspect in 3D face reconstruction, as it directly influences how well the reconstructed model captures the emotions and nuances of the original face. Considering this, we also designed a" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2495" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 71, + 539, + 393 + ], + "blocks": [ + { + "bbox": [ + 52, + 71, + 539, + 393 + ], + "lines": [ + { + "bbox": [ + 52, + 71, + 539, + 393 + ], + "spans": [ + { + "bbox": [ + 52, + 71, + 539, + 393 + ], + "type": "image", + "image_path": "32cbe24dde0545ecccec380f483f1a5ecdd9eb46aa1a9f16fcf5d1f4837995ad.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 399, + 546, + 422 + ], + "lines": [ + { + "bbox": [ + 46, + 399, + 546, + 422 + ], + "spans": [ + { + "bbox": [ + 46, + 399, + 546, + 422 + ], + "type": "text", + "content": "Figure 6. Visual comparison of 3D face reconstruction. From left to right: Input, Deep3DFaceRecon[20], FOCUS[51], DECA[28], EMOCAv2[18], and SMIRK. Many more examples can also be found in the Suppl. Mat. and the demo video in our webpage." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 441, + 289, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 441, + 289, + 632 + ], + "spans": [ + { + "bbox": [ + 46, + 441, + 289, + 632 + ], + "type": "text", + "content": "user study to assess the perception of the reconstructed facial expressions from human participants. We randomly selected 80 images from the AffectNet [59] test set (using the split from [81]) and 80 images from our MEAD test set (unseen subjects) and performed 3D face reconstruction with both SMIRK and its competitors. To mitigate bias w.r.t. the identity component for the FLAME-based methods, for DECA and EMOCAv2 we used the same identity parameters as our method (which itself was distilled from MICA). In the user study, participants were shown an image of a human face alongside two 3D face reconstructions, either from our method or the others, and were asked to choose the one with the most faithful facial expression representation. The order was randomized for each question, and each user answered a total of 32 questions, equally distributed among the different methods." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 641, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 287, + 715 + ], + "type": "text", + "content": "A total of 85 users completed the study, and the results in Table 3 show that our method was significantly preferred over all competitors, confirming the performance of SMIRK in terms of faithful expressive 3D reconstruction. The results were statistically significant (for all pairs, " + }, + { + "bbox": [ + 46, + 641, + 287, + 715 + ], + "type": "inline_equation", + "content": "p < 0.01" + }, + { + "bbox": [ + 46, + 641, + 287, + 715 + ], + "type": "text", + "content": " with binomial test, adjusted using the Bonfer" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 305, + 441, + 545, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 441, + 545, + 489 + ], + "spans": [ + { + "bbox": [ + 305, + 441, + 545, + 489 + ], + "type": "text", + "content": "roni method). EMOCAv2, which also uses an emotion loss for expressive 3D reconstruction, was the closest competitor to our method, followed by FOCUS and Deep3D, while DECA was the least selected." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 325, + 500, + 525, + 526 + ], + "blocks": [ + { + "bbox": [ + 325, + 500, + 525, + 526 + ], + "lines": [ + { + "bbox": [ + 325, + 500, + 525, + 526 + ], + "spans": [ + { + "bbox": [ + 325, + 500, + 525, + 526 + ], + "type": "table", + "html": "
DECAEMOCAv2Deep3DFOCUS
SMIRK603/77461/219510/170534/146
", + "image_path": "039270fcb09646fdb5b653d678fbcf126c3bd8ed0123095bc39e6c0ec5897ea1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 532, + 545, + 565 + ], + "lines": [ + { + "bbox": [ + 305, + 532, + 545, + 565 + ], + "spans": [ + { + "bbox": [ + 305, + 532, + 545, + 565 + ], + "type": "text", + "content": "Table 3. User study results: \"a/b\" indicates Ours (left) was preferred " + }, + { + "bbox": [ + 305, + 532, + 545, + 565 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 305, + 532, + 545, + 565 + ], + "type": "text", + "content": " times,while the competing method was chosen " + }, + { + "bbox": [ + 305, + 532, + 545, + 565 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 305, + 532, + 545, + 565 + ], + "type": "text", + "content": " times. SMIRK is overwhelmingly preferred over all other methods." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 597, + 407, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 597, + 407, + 609 + ], + "spans": [ + { + "bbox": [ + 306, + 597, + 407, + 609 + ], + "type": "text", + "content": "4.3. Visual Examples" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "In Fig. 6 we present multiple visual comparisons with the four other methods. As it can be visually assessed, our method can more accurately capture the facial expressions across multiple diverse subjects and conditions. Furthermore, the presented methodology can also capture expressions that other methods fail to capture, such as nonsymmetric mouth movements, eye closures, and exaggerated expressions." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2496" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 147, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 147, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 147, + 83 + ], + "type": "text", + "content": "4.4. Ablation Studies" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 91, + 286, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 91, + 286, + 247 + ], + "spans": [ + { + "bbox": [ + 46, + 91, + 286, + 247 + ], + "type": "text", + "content": "Ablation on the effect of landmarks: We first assess the effect of the landmark loss. To do that, we calculate for different versions of our model the L1 loss, VGG Loss, and Cycle loss after manipulation of expressions using the same protocol we performed in Sec. 4.2. Note that this time, we also evaluate performance by considering the cycle loss. That is, we also manipulate the predicted expressions, regenerate a new image, and expect that the method can successfully predict the same parameters. We consider three different versions of our model: 1) Protocol 1 - no landmarks loss, 2) Protocol 2 - training some epochs with landmarks loss and then removing it, 3) Protocol 3 - full training with landmarks loss. We present these results in Table 4." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 249, + 286, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 249, + 286, + 380 + ], + "spans": [ + { + "bbox": [ + 46, + 249, + 286, + 380 + ], + "type": "text", + "content": "As we can see, completely omitting landmarks leads to degraded results. However, if we first train for a few epochs with landmarks and then set the loss weight to 0, the model achieves very similar performance with the original model which uses the loss throughout the full training. These results suggest that, in contrast with previous works [18, 28], the landmarks loss in SMIRK acts more as a regularizer during training, helping to guide the model towards good solutions, but in the later stages it may somewhat constrain its flexibility. We plan to explore this balance in more depth in future work." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 72, + 388, + 260, + 439 + ], + "blocks": [ + { + "bbox": [ + 72, + 388, + 260, + 439 + ], + "lines": [ + { + "bbox": [ + 72, + 388, + 260, + 439 + ], + "spans": [ + { + "bbox": [ + 72, + 388, + 260, + 439 + ], + "type": "table", + "html": "
L1 Loss ↓VGG Loss ↓Cycle Loss ↓
P10.1110.7570.588
P20.0930.7130.487
P30.0930.7140.544
", + "image_path": "b7c03a1ac4390aa0bf227b877af582acf5538570f69aa4395124b3256bb53e12.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 444, + 286, + 477 + ], + "lines": [ + { + "bbox": [ + 46, + 444, + 286, + 477 + ], + "spans": [ + { + "bbox": [ + 46, + 444, + 286, + 477 + ], + "type": "text", + "content": "Table 4. Ablation study on the effect of landmark loss. P1: no landmark loss, P2: landmark loss removed after a few epochs, P3: landmark loss throughout whole training." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 486, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 486, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 486, + 286, + 713 + ], + "type": "text", + "content": "Impact of Cycle Path: Here we also present examples on how the cycle path affects the reconstruction performance. First, we show an example result in Fig. 7, where we see that using the proposed augmentations provides more detailed expressions. For example, template injection augmentation considerably helps the reconstruction of the mouth structure. Secondly, we have also observed that the cycle path makes the model more robust, especially w.r.t. mouth closures (e.g. zero jaw opening). We show such indicative cases in Figure 8. Such artifacts can be seen when using the no-cycle variant, acting as a visual confirmation of the aforementioned numerical results. Here, the mouth is not properly closed in the 3D reconstructed face, since it was miss-corresponded to a properly closed mouth in the image reconstruction space. The cycle path can solve such instances by providing tweaked expressions that are enforced to be recognized correctly, avoiding \"misalignments\" between expected expressions and reconstructed images." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 354, + 71, + 498, + 128 + ], + "blocks": [ + { + "bbox": [ + 354, + 71, + 498, + 128 + ], + "lines": [ + { + "bbox": [ + 354, + 71, + 498, + 128 + ], + "spans": [ + { + "bbox": [ + 354, + 71, + 498, + 128 + ], + "type": "image", + "image_path": "50a9bbe87b32c8191eea95da6743dfa4f2869306879395f9805a487db95f85c1.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 136, + 545, + 159 + ], + "lines": [ + { + "bbox": [ + 305, + 136, + 545, + 159 + ], + "spans": [ + { + "bbox": [ + 305, + 136, + 545, + 159 + ], + "type": "text", + "content": "Figure 7. Impact of cycle augmentations. From left to right: input image, no cycle loss, cycle loss with all augmentations." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 163, + 545, + 247 + ], + "blocks": [ + { + "bbox": [ + 307, + 163, + 545, + 247 + ], + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 247 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 247 + ], + "type": "image", + "image_path": "6e72d6a2ac6dc79aaf57a9f8e8d0b44d5c966bd50a40055a45bae7dd16fa43ba.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 255, + 545, + 299 + ], + "lines": [ + { + "bbox": [ + 305, + 255, + 545, + 299 + ], + "spans": [ + { + "bbox": [ + 305, + 255, + 545, + 299 + ], + "type": "text", + "content": "Figure 8. Impact of the Cycle Path. Artifacts can appear when not training with the cycle path. From left to right: input image, 3D reconstruction and image reconstruction without cycle path, 3D reconstruction and image reconstruction with cycle path." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 312, + 382, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 312, + 382, + 323 + ], + "spans": [ + { + "bbox": [ + 306, + 312, + 382, + 323 + ], + "type": "text", + "content": "4.5. Limitations" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 329, + 545, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 329, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 304, + 329, + 545, + 449 + ], + "type": "text", + "content": "Despite the effectiveness of SMIRK, there are limitations to be addressed. It is sensitive to occlusions, as the training datasets do not include them, and assumes more intense expressions when parts are missing instead of extrapolating from available information. In addition, SMIRK has been trained on single images, and the temporal aspect is not yet explored. Also note that while SMIRK does not need to predict albedo and lighting, this can be limiting for specific applications in 3D facial animation and video editing. Please refer to the Suppl. Mat. for a more detailed discussion." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 460, + 378, + 471 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 460, + 378, + 471 + ], + "spans": [ + { + "bbox": [ + 306, + 460, + 378, + 471 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 479, + 545, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 479, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 304, + 479, + 545, + 611 + ], + "type": "text", + "content": "We have presented SMIRK, a new paradigm for accurate expressive 3D face reconstruction from images. Instead of the traditional graphics-based approach for self-supervision which is commonly used for monocular 3D face reconstruction in-the-wild, SMIRK employs a neural image-to-image translator model, which learns to reconstruct the input face image given the rendered predicted facial geometry. Our extensive experimental results show that SMIRK outperforms previous methods and can faithfully reconstruct expressive 3D faces, including challenging complex expressions such as asymmetries, and subtle expressions such as smirking." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 621, + 403, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 621, + 403, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 621, + 403, + 635 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "This research work was supported by the project \"Applied Research for Autonomous Robotic Systems\" (MIS 5200632) which is implemented within the framework of the National Recovery and Resilience Plan \"Greece 2.0\" (Measure: 16618- Basic and Applied Research) and is funded by the European Union- NextGenerationEU." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2497" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "text", + "content": "[1] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. Lrs3-ted: a large-scale dataset for visual speech recognition. arXiv preprint arXiv:1809.00496, 2018. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 287, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 287, + 179 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 287, + 179 + ], + "type": "text", + "content": "[2] Zakaria Aldeneh, Masha Fedzechkina, Skyler Seto, Katherine Metcalf, Miguel Sarabia, Nicholas Apostoloff, and Barry-John Theobald. Towards a Perceptual Model for Estimating the Quality of Visual Speech, 2022. arXiv:2203.10117 [cs, eess]. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 180, + 287, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 180, + 287, + 223 + ], + "spans": [ + { + "bbox": [ + 53, + 180, + 287, + 223 + ], + "type": "text", + "content": "[3] Oswald Aldrian and William AP Smith. Inverse rendering of faces with a 3d morphable model. IEEE transactions on pattern analysis and machine intelligence, 35(5):1080-1093, 2012. 1, 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 225, + 287, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 225, + 287, + 289 + ], + "spans": [ + { + "bbox": [ + 53, + 225, + 287, + 289 + ], + "type": "text", + "content": "[4] Riza Alp Guler, George Trigeorgis, Epameinondas Antonakos, Patrick Snape, Stefanos Zafeiriou, and Iasonas Kokkinos. Densereg: Fully convolutional dense shape regression in-the-wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6799-6808, 2017. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 292, + 287, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 292, + 287, + 346 + ], + "spans": [ + { + "bbox": [ + 53, + 292, + 287, + 346 + ], + "type": "text", + "content": "[5] Haoran Bai, Di Kang, Haoxian Zhang, Jinshan Pan, and Linchao Bao. FFHQ-UV: Normalized facial uv-texture dataset for 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 362–371, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 347, + 287, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 347, + 287, + 400 + ], + "spans": [ + { + "bbox": [ + 53, + 347, + 287, + 400 + ], + "type": "text", + "content": "[6] Anil Bas, William A. P. Smith, Timo Bolkart, and Stefanie Wuhrer. Fitting a 3D morphable model to edges: A comparison between hard and soft correspondences. In Asian Conference on Computer Vision Workshops, pages 377-391, 2017. 1, 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 403, + 287, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 403, + 287, + 446 + ], + "spans": [ + { + "bbox": [ + 53, + 403, + 287, + 446 + ], + "type": "text", + "content": "[7] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3D faces. In Proceedings of the 26th Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 1999. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 447, + 287, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 447, + 287, + 501 + ], + "spans": [ + { + "bbox": [ + 53, + 447, + 287, + 501 + ], + "type": "text", + "content": "[8] Volker Blanz, Sami Romdhani, and Thomas Vetter. Face identification across different poses and illuminations with a 3D morphable model. In International Conference on Automatic Face & Gesture Recognition (FG), pages 202-207, 2002. 1, 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 503, + 287, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 503, + 287, + 546 + ], + "spans": [ + { + "bbox": [ + 53, + 503, + 287, + 546 + ], + "type": "text", + "content": "[9] Timo Bolkart, Tianye Li, and Michael J Black. Instant multiview head capture through learnable registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 768-779, 2023. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 548, + 287, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 548, + 287, + 602 + ], + "spans": [ + { + "bbox": [ + 48, + 548, + 287, + 602 + ], + "type": "text", + "content": "[10] James Booth, Epameinondas Antonakos, Stylianos Ploumpis, George Trigeorgis, Yannis Panagakis, and Stefanos Zafeiriou. 3d face morphable models\" in-the-wild\". In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 48-57, 2017. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 603, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 603, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 603, + 287, + 667 + ], + "type": "text", + "content": "[11] James Booth, Anastasios Roussos, Evangelos Ververas, Epameinondas Antonakos, Stylianos Ploumpis, Yannis Panagakis, and Stefanos Zafeiriou. 3D reconstruction of \"inthe-wild\" faces in images and videos. IEEE transactions on pattern analysis and machine intelligence, 40(11):2638-2652, 2018. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "text", + "content": "[12] Alan Brunton, Augusto Salazar, Timo Bolkart, and Stefanie Wuhrer. Review of statistical shape spaces for 3D data with comparative analysis for human faces. Computer Vision and Image Understanding (CVIU), 128:1-17, 2014. 1" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "text", + "content": "[13] Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In Proceedings of the IEEE International Conference on Computer Vision, pages 1021-1030, 2017. 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 129, + 545, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 172 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 172 + ], + "type": "text", + "content": "[14] Chen Cao, Yanlin Weng, Shun Zhou, Yiying Tong, and Kun Zhou. Facewarehouse: A 3d facial expression database for visual computing. IEEE Transactions on Visualization and Computer Graphics, 20(3):413-425, 2013. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 173, + 545, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 173, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 307, + 173, + 545, + 215 + ], + "type": "text", + "content": "[15] Chen Cao, Qiming Hou, and Kun Zhou. Displaced dynamic expression regression for real-time facial tracking and animation. Transactions on Graphics (TOG), 33(4):1-10, 2014. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 217, + 545, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 217, + 545, + 270 + ], + "spans": [ + { + "bbox": [ + 307, + 217, + 545, + 270 + ], + "type": "text", + "content": "[16] Feng-Ju Chang, Anh Tuan Tran, Tal Hassner, Iacopo Masi, Ram Nevatia, and Gerard Medioni. ExpNet: Landmark-free, deep, 3D facial expressions. In International Conference on Automatic Face & Gesture Recognition (FG), pages 122-129, 2018. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 272, + 545, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 272, + 545, + 326 + ], + "spans": [ + { + "bbox": [ + 307, + 272, + 545, + 326 + ], + "type": "text", + "content": "[17] Aggelina Chatziagapi, ShahRukh Athar, Francesc Moreno-Noguer, and Dimitris Samaras. Sider: Single-image neural optimization for facial geometric detail recovery. In 2021 International Conference on 3D Vision (3DV), pages 815-824. IEEE, 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 327, + 545, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 327, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 307, + 327, + 545, + 380 + ], + "type": "text", + "content": "[18] Radek Daneček, Michael J Black, and Timo Bolkart. EMOCA: Emotion driven monocular face capture and animation. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 20311-20322, 2022. 1, 2, 3, 4, 6, 7, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 382, + 545, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 382, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 307, + 382, + 545, + 437 + ], + "type": "text", + "content": "[19] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multilevel face localisation in the wild. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5203-5212, 2020. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 437, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 437, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 307, + 437, + 545, + 491 + ], + "type": "text", + "content": "[20] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3D face reconstruction with weakly-supervised learning: From single image to image set. In Conference on Computer Vision and Pattern Recognition Workshops (CVPR-W), pages 285-295, 2019. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 493, + 545, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 493, + 545, + 547 + ], + "spans": [ + { + "bbox": [ + 307, + 493, + 545, + 547 + ], + "type": "text", + "content": "[21] Zheng Ding, Xuaner Zhang, Zhihao Xia, Lars Jebe, Zhuowen Tu, and Xiuming Zhang. Diffusionrig: Learning personalized priors for facial appearance editing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12736-12746, 2023. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 548, + 545, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 548, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 307, + 548, + 545, + 592 + ], + "type": "text", + "content": "[22] Pengfei Dou, Shishir K. Shah, and Ioannis A. Kakadiaris. End-to-end 3D face reconstruction with deep neural networks. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 5908-5917, 2017. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 593, + 545, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 593, + 545, + 646 + ], + "spans": [ + { + "bbox": [ + 307, + 593, + 545, + 646 + ], + "type": "text", + "content": "[23] Michail Christos Doukas, Mohammad Rami Koujan, Viktoria Sharmanska, Anastasios Roussos, and Stefanos Zafeiriou. Head2head++: Deep facial attributes re-targeting. IEEE Transactions on Biometrics, Behavior, and Identity Science, 3(1):31-43, 2021. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 647, + 545, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 690 + ], + "type": "text", + "content": "[24] Michail Christos Doukas, Stefanos Zafeiriou, and Viktoriia Sharmanska. Headgan: One-shot neural head synthesis and editing. In Proceedings of the IEEE/CVF International conference on Computer Vision, pages 14398-14407, 2021. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 691, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 712 + ], + "type": "text", + "content": "[25] Bernhard Egger. Semantic Morphable Models. PhD thesis, University of Basel, 2018. 2" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2498" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 138 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 138 + ], + "type": "text", + "content": "[26] Bernhard Egger, William A. P. Smith, Ayush Tewari, Stefanie Wuhrer, Michael Zollhoefer, Thabo Beeler, Florian Bernard, Timo Bolkart, Adam Kortylewski, Sami Romdhani, Christian Theobalt, Volker Blanz, and Thomas Vetter. 3D morphable face models—past, present, and future. Transactions on Graphics (TOG), 39(5), 2020. 1, 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 140, + 287, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 140, + 287, + 183 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 287, + 183 + ], + "type": "text", + "content": "[27] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In European Conference on Computer Vision (ECCV), 2018. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 184, + 287, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 184, + 287, + 227 + ], + "spans": [ + { + "bbox": [ + 48, + 184, + 287, + 227 + ], + "type": "text", + "content": "[28] Yao Feng, Haiwen Feng, Michael J Black, and Timo Bolkart. Learning an animatable detailed 3D face model from in-the-wild images. Transactions on Graphics, (Proc. SIGGRAPH), 40(4):1-13, 2021. 1, 2, 3, 6, 7, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 228, + 287, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 228, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 48, + 228, + 287, + 293 + ], + "type": "text", + "content": "[29] Panagiotis P. Filntisis, George Retsinas, Foivos Paraperas-Papantoniou, Athanasios Katsamanis, Anastasios Roussos, and Petros Maragos. SPECTRE: Visual speech-informed perceptual 3D facial expression reconstruction from videos. In Conference on Computer Vision and Pattern Recognition Workshops (CVPR-W), pages 5745-5755, 2023. 1, 2, 3, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 294, + 287, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 294, + 287, + 348 + ], + "spans": [ + { + "bbox": [ + 48, + 294, + 287, + 348 + ], + "type": "text", + "content": "[30] Pablo Garrido, Michael Zollhöfer, Dan Casas, Levi Valgaerts, Kiran Varanasi, Patrick Pérez, and Christian Theobalt. Reconstruction of personalized 3d face rigs from monocular video. ACM Transactions on Graphics (TOG), 35 (3):1-15, 2016. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 350, + 287, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 350, + 287, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 350, + 287, + 392 + ], + "type": "text", + "content": "[31] Pablo Garrido, Michael Zollhöfer, Chenglei Wu, Derek Bradley, Patrick Pérez, Thabo Beeler, and Christian Theobalt. Corrective 3d reconstruction of lips from monocular video. ACM Trans. Graph., 35(6):219-1, 2016. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 394, + 287, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 394, + 287, + 448 + ], + "spans": [ + { + "bbox": [ + 48, + 394, + 287, + 448 + ], + "type": "text", + "content": "[32] Baris Gecer, Stylianos Ploumpis, Irene Kotsia, and Stefanos Zafeiriou. GANFIT: Generative adversarial network fitting for high fidelity 3D face reconstruction. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1155-1164, 2019. 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 449, + 287, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 449, + 287, + 502 + ], + "spans": [ + { + "bbox": [ + 48, + 449, + 287, + 502 + ], + "type": "text", + "content": "[33] Kyle Genova, Forrester Cole, Aaron Maschinot, Aaron Sarna, Daniel Vlasic, and William T. Freeman. Unsupervised training for 3D morphable model regression. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 8377-8386, 2018. 1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 504, + 287, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 504, + 287, + 558 + ], + "spans": [ + { + "bbox": [ + 48, + 504, + 287, + 558 + ], + "type": "text", + "content": "[34] Thomas Gerg, Andreas Morel-Forster, Clemens Blumer, Bernhard Egger, Marcel Luthi, Sandro Schoenborn, and Thomas Vetter. Morphable face models - an open framework. In International Conference on Automatic Face & Gesture Recognition (FG), pages 75–82, 2018. 1, 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 559, + 287, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 287, + 602 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 287, + 602 + ], + "type": "text", + "content": "[35] Partha Ghosh, Pravir Singh Gupta, Roy Uziel, Anurag Ranjan, Michael J Black, and Timo Bolkart. GIF: Generative interpretable faces. In 2020 International Conference on 3D Vision (3DV), pages 868-878. IEEE, 2020. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 604, + 287, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 604, + 287, + 647 + ], + "spans": [ + { + "bbox": [ + 48, + 604, + 287, + 647 + ], + "type": "text", + "content": "[36] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "text", + "content": "[37] Shan He, Haonan He, Shuo Yang, Xiaoyan Wu, Pengcheng Xia, Bing Yin, Cong Liu, Lirong Dai, and Chang Xu. Speech4mesh: Speech-assisted monocular 3d facial reconstruction for speech-driven 3d facial animation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14192-14202, 2023. 2" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[38] Xingzhe He, Bastian Wandt, and Helge Rhodin. Autolink: Self-supervised learning of human skeletons and object outlines by linking keypoints. Advances in Neural Information Processing Systems, 35:36123-36141, 2022. 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 118, + 545, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 171 + ], + "type": "text", + "content": "[39] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, et al. Searching for mobilenetv3. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1314-1324, 2019. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 173, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 173, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 307, + 173, + 545, + 205 + ], + "type": "text", + "content": "[40] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A. Efros. Image-to-image translation with conditional adversarial networks. CoRR, abs/1611.07004, 2016. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 206, + 545, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 206, + 545, + 259 + ], + "spans": [ + { + "bbox": [ + 307, + 206, + 545, + 259 + ], + "type": "text", + "content": "[41] Aaron S Jackson, Adrian Bulat, Vasileios Argyriou, and Georgios Tzimiropoulos. Large pose 3D face reconstruction from a single image via direct volumetric CNN regression. In International Conference on Computer Vision (ICCV), pages 1031-1039, 2017. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 261, + 545, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 261, + 545, + 324 + ], + "spans": [ + { + "bbox": [ + 307, + 261, + 545, + 324 + ], + "type": "text", + "content": "[42] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 694-711. Springer, 2016. 3, 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 327, + 545, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 327, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 307, + 327, + 545, + 380 + ], + "type": "text", + "content": "[43] Harim Jung, Myeong-Seok Oh, and Seong-Whan Lee. Learning free-form deformation for 3D face reconstruction from in-the-wild images. In International Conference on Systems, Man, and Cybernetics (SMC), pages 2737–2742, 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 383, + 545, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 383, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 307, + 383, + 545, + 426 + ], + "type": "text", + "content": "[44] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 427, + 545, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 427, + 545, + 480 + ], + "spans": [ + { + "bbox": [ + 307, + 427, + 545, + 480 + ], + "type": "text", + "content": "[45] Hyeongwoo Kim, Pablo Garrido, Ayush Tewari, Weipeng Xu, Justus Thies, Matthias Nießner, Patrick Pérez, Christian Richardt, Michael Zolloffer, and Christian Theobalt. Deep video portraits. ACM Transactions on Graphics (TOG), 37 (4):163, 2018. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 482, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 482, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 307, + 482, + 545, + 536 + ], + "type": "text", + "content": "[46] Hyeongwoo Kim, Michael Zollhöfer, Ayush Tewari, Justus Thies, Christian Richardt, and Christian Theobalt. Inverse-FaceNet: deep monocular inverse face rendering. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 4625-4634, 2018. 1, 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 537, + 545, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 537, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 307, + 537, + 545, + 581 + ], + "type": "text", + "content": "[47] Tatsuro Koizumi and William A. P. Smith. \"look ma, no landmarks!\" - unsupervised, model-based dense face alignment. In European Conference on Computer Vision (ECCV), pages 690-706, 2020. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 582, + 545, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 582, + 545, + 647 + ], + "spans": [ + { + "bbox": [ + 307, + 582, + 545, + 647 + ], + "type": "text", + "content": "[48] Alexandros Lattas, Stylianos Moschoglou, Baris Gecer, Stylianos Ploumpis, Vasileios Triantafyllou, Abhijeet Ghosh, and Stefanos Zafeiriou. AvatarMe: Realistically renderable 3d facial reconstruction\" in-the-wild\". In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 760-769, 2020. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 647, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 712 + ], + "type": "text", + "content": "[49] Alexandros Lattas, Stylianos Moschoglou, Stylianos Ploumpis, Baris Gecer, Jiankang Deng, and Stefanos Zafeiriou. Fitme: Deep photorealistic 3d morphable model avatars. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8629-8640, 2023." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2499" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 714 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[50] Gun-Hee Lee and Seong-Whan Lee. Uncertainty-aware mesh decoder for high fidelity 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6100–6109, 2020. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 287, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 287, + 161 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 287, + 161 + ], + "type": "text", + "content": "[51] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. To fit or not to fit: Model-based face reconstruction and occlusion segmentation from weak supervision. CoRR, abs/2106.09614, 2021. 6, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 162, + 287, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 162, + 287, + 194 + ], + "spans": [ + { + "bbox": [ + 49, + 162, + 287, + 194 + ], + "type": "text", + "content": "[52] Hao Li, Jihun Yu, Yuting Ye, and Chris Bregler. Realtime facial animation with on-the-fly correctives. Transactions on Graphics (TOG), 32(4):42-1, 2013. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 195, + 287, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 195, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 49, + 195, + 287, + 239 + ], + "type": "text", + "content": "[53] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 3, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 239, + 287, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 239, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 49, + 239, + 287, + 293 + ], + "type": "text", + "content": "[54] Jiangke Lin, Yi Yuan, Tianjia Shao, and Kun Zhou. Towards high-fidelity 3d face reconstruction from in-the-wild images using graph convolutional networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5891-5900, 2020. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 294, + 287, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 294, + 287, + 337 + ], + "spans": [ + { + "bbox": [ + 49, + 294, + 287, + 337 + ], + "type": "text", + "content": "[55] Yaojie Liu, Amin Jourabloo, William Ren, and Xiaoming Liu. Dense face alignment. In International Conference on Computer Vision Workshops (ICCV-W), pages 1619-1628, 2017. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 338, + 287, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 338, + 287, + 380 + ], + "spans": [ + { + "bbox": [ + 49, + 338, + 287, + 380 + ], + "type": "text", + "content": "[56] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 383, + 287, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 383, + 287, + 446 + ], + "spans": [ + { + "bbox": [ + 49, + 383, + 287, + 446 + ], + "type": "text", + "content": "[57] Huiwen Luo, Koki Nagano, Han-Wei Kung, Qingguo Xu, Zejian Wang, Lingyu Wei, Liwen Hu, and Hao Li. Normalized avatar synthesis using stylegan and perceptual refinement. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11662–11672, 2021. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 449, + 287, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 449, + 287, + 502 + ], + "spans": [ + { + "bbox": [ + 49, + 449, + 287, + 502 + ], + "type": "text", + "content": "[58] B.R. Mallikarjun, Ayush Tewari, Hans-Peter Seidel, Mohamed Elgharib, Christian Theobalt, et al. Learning complete 3d morphable face models from images and videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3361-3371, 2021. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 504, + 287, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 504, + 287, + 548 + ], + "spans": [ + { + "bbox": [ + 49, + 504, + 287, + 548 + ], + "type": "text", + "content": "[59] Ali Mollahosseini, Behzad Hasani, and Mohammad H Ma-hoor. Affectnet: A database for facial expression, valence, and arousal computing in the wild. IEEE Transactions on Affective Computing, 10(1):18-31, 2017. 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 548, + 287, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 548, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 49, + 548, + 287, + 581 + ], + "type": "text", + "content": "[60] Masahiro Mori, Karl F MacDorman, and Norri Kageki. The uncanny valley [from the field]. IEEE Robotics & Automation magazine, 19(2):98-100, 2012. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 582, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 582, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 49, + 582, + 287, + 624 + ], + "type": "text", + "content": "[61] Christopher Otto, Prashanth Chandran, Gaspard Zoss, Markus H. Gross, Paulo F. U. Gotardo, and Derek Bradley. A perceptual shape loss for monocular 3D face reconstruction. Computer Graphics Forum (Proc. Pacific Graphics), 2023. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 625, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 625, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 49, + 625, + 287, + 689 + ], + "type": "text", + "content": "[62] Foivos Paraperas Papantoniou, Panagiotis P Filntisis, Petros Maragos, and Anastasios Roussos. Neural emotion director: Speech-preserving semantic control of facial expressions in \"in-the-wild\" videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18781-18790, 2022. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 691, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 691, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 49, + 691, + 287, + 714 + ], + "type": "text", + "content": "[63] Jeong Joon Park, Peter Florence, Julian Straub, Richard A. Newcombe, and Steven Lovegrove. DeepSDF: Learning" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "text", + "content": "continuous signed distance functions for shape representation. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 165-174, 2019. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 108, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 108, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 308, + 108, + 545, + 162 + ], + "type": "text", + "content": "[64] Pascal Paysan, Reinhard Knothe, Brian Amberg, Sami Romdhani, and Thomas Vetter. A 3d face model for pose and illumination invariant face recognition. In 2009 sixth IEEE international conference on advanced video and signal based surveillance, pages 296-301. IEEE, 2009. 2, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 163, + 545, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 163, + 545, + 229 + ], + "spans": [ + { + "bbox": [ + 308, + 163, + 545, + 229 + ], + "type": "text", + "content": "[65] Stylianos Ploumpis, Evangelos Ververas, Eimear O' Sullivan, Stylianos Moschoglou, Haoyang Wang, Nick E. Pears, William A. P. Smith, Baris Gecer, and Stefanos Zafeiriou. Towards a complete 3D morphable model of the human head. Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 43(11):4142-4160, 2021. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 231, + 545, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 231, + 545, + 263 + ], + "spans": [ + { + "bbox": [ + 308, + 231, + 545, + 263 + ], + "type": "text", + "content": "[66] E. Richardson, M. Sela, and R. Kimmel. 3D face reconstruction by learning from synthetic data. In International Conference on 3D Vision (3DV), pages 460-469, 2016. 1, 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 265, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 265, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 308, + 265, + 545, + 319 + ], + "type": "text", + "content": "[67] Sami Romdhani and Thomas Vetter. Estimating 3D shape and texture using pixel intensity, edges, specular highlights, texture constraints and aprior. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 986-993, 2005. 1, 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 321, + 545, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 321, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 308, + 321, + 545, + 387 + ], + "type": "text", + "content": "[68] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Medical Image Computing and Computer-Assisted Intervention - MICCAI 2015 - 18th International Conference Munich, Germany, October 5 - 9, 2015, Proceedings, Part III, pages 234-241. Springer, 2015. 2, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 388, + 545, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 388, + 545, + 441 + ], + "spans": [ + { + "bbox": [ + 308, + 388, + 545, + 441 + ], + "type": "text", + "content": "[69] Zeyu Ruan, Changqing Zou, Longhai Wu, Gangshan Wu, and Limin Wang. SADRNet: Self-aligned dual face regression networks for robust 3d dense face alignment and reconstruction. IEEE Transactions on Image Processing, 30: 5793-5806, 2021. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 444, + 545, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 444, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 308, + 444, + 545, + 497 + ], + "type": "text", + "content": "[70] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael Black. Learning to regress 3D face shape and expression from an image without 3d supervision. In Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 500, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 500, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 308, + 500, + 545, + 544 + ], + "type": "text", + "content": "[71] Matan Sela, Elad Richardson, and Ron Kimmel. Unrestricted facial geometry reconstruction using image-to-image translation. In International Conference on Computer Vision (ICCV), pages 1576-1585, 2017. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 545, + 545, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 545, + 545, + 600 + ], + "spans": [ + { + "bbox": [ + 308, + 545, + 545, + 600 + ], + "type": "text", + "content": "[72] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Ming-min Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3D face reconstruction by occlusion-aware multiview geometry consistency. In European Conference on Computer Vision (ECCV), pages 53-70. Springer, 2020. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 601, + 545, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 601, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 308, + 601, + 545, + 633 + ], + "type": "text", + "content": "[73] William AP Smith. The perspective face shape ambiguity. In Perspectives in Shape Analysis, pages 299-319. Springer, 2016. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 635, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 635, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 308, + 635, + 545, + 667 + ], + "type": "text", + "content": "[74] Attila Szabó, Givi Meishvili, and Paolo Favaro. Unsupervised generative 3D shape learning from natural images. CoRR, abs/1910.00287, 2019. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "text", + "content": "[75] Ayush Tewari, Michael Zolloffer, Hyeongwoo Kim, Pablo Garrido, Florian Bernard, Patrick Perez, and Christian Theobalt. MoFA: Model-based deep convolutional face autoencoder for unsupervised monocular reconstruction. In In" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2500" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 714 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 67, + 73, + 286, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 286, + 95 + ], + "type": "text", + "content": "ternational Conference on Computer Vision (ICCV), pages 1274-1283, 2017. 1, 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 162 + ], + "type": "text", + "content": "[76] Ayush Tewari, Michael Zollhöfer, Pablo Garrido, Florian Bernard, Hyeongwoo Kim, Patrick Pérez, and Christian Theobalt. Self-supervised multi-level face model learning for monocular reconstruction at over " + }, + { + "bbox": [ + 48, + 96, + 287, + 162 + ], + "type": "inline_equation", + "content": "250\\mathrm{~hz}" + }, + { + "bbox": [ + 48, + 96, + 287, + 162 + ], + "type": "text", + "content": ". In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2549-2559, 2018. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 163, + 287, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 287, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 287, + 228 + ], + "type": "text", + "content": "[77] Ayush Tewari, Florian Bernard, Pablo Garrido, Gaurav Bharaj, Mohamed Elgharib, Hans-Peter Seidel, Patrick Pérez, Michael Zollhöfer, and Christian Theobalt. FML: face model learning from videos. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 10812-10822, 2019. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 230, + 287, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 230, + 287, + 274 + ], + "spans": [ + { + "bbox": [ + 48, + 230, + 287, + 274 + ], + "type": "text", + "content": "[78] Justus Thies, Michael Zollhöfer, Matthias Nießner, Levi Valgaerts, Marc Stamminger, and Christian Theobalt. Real-time expression transfer for facial reenactment. ACM Trans. Graph., 34(6), 2015. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 275, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 275, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 275, + 287, + 319 + ], + "type": "text", + "content": "[79] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Facevr: Real-time facial reenactment and eye gaze control in virtual reality. arXiv preprint arXiv:1610.03151, 2016." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 320, + 287, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 320, + 287, + 374 + ], + "spans": [ + { + "bbox": [ + 48, + 320, + 287, + 374 + ], + "type": "text", + "content": "[80] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2Face: Real-time face capture and reenactment of RGB videos. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 2387-2395, 2016. 1, 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 376, + 287, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 376, + 287, + 420 + ], + "spans": [ + { + "bbox": [ + 48, + 376, + 287, + 420 + ], + "type": "text", + "content": "[81] Antoine Toisoul, Jean Kossaifi, Adrian Bulat, Georgios Tzimiropoulos, and Maja Pantic. Estimation of continuous valence and arousal levels from faces in naturalistic conditions. Nature Machine Intelligence, 3(1):42-50, 2021. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 421, + 287, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 421, + 287, + 475 + ], + "spans": [ + { + "bbox": [ + 48, + 421, + 287, + 475 + ], + "type": "text", + "content": "[82] Anh Tuan Tran, Tal Hassner, Iacopo Masi, and Gerard Medioni. Regressing robust and discriminative 3D morphable models with a very deep neural network. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1599-1608, 2017. 1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 477, + 287, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 477, + 287, + 532 + ], + "spans": [ + { + "bbox": [ + 48, + 477, + 287, + 532 + ], + "type": "text", + "content": "[83] Anh Tuan Tran, Tal Hassner, Iacopo Masi, Eran Paz, Yuval Nirkin, and Gérard Medioni. Extreme 3d face reconstruction: Seeing through occlusions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3935-3944, 2018. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 533, + 287, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 533, + 287, + 566 + ], + "spans": [ + { + "bbox": [ + 48, + 533, + 287, + 566 + ], + "type": "text", + "content": "[84] Luan Tran and Xiaoming Liu. Nonlinear 3d face morphable model. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7346-7355, 2018. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 567, + 287, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 287, + 610 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 287, + 610 + ], + "type": "text", + "content": "[85] Luan Tran, Feng Liu, and Xiaoming Liu. Towards high-fidelity nonlinear 3d face morphable model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1126-1135, 2019. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 612, + 287, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 612, + 287, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 612, + 287, + 656 + ], + "type": "text", + "content": "[86] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 657, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 657, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 657, + 287, + 689 + ], + "type": "text", + "content": "[87] Huawei Wei, Shuang Liang, and Yichen Wei. 3D dense face alignment via graph convolution networks. arXiv preprint arXiv:1904.05562, 2019. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "text", + "content": "[88] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Matthew Johnson, Jingjing Shen, Nikola Milosavljevic, Daniel Wilde," + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 664 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 117 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 117 + ], + "type": "text", + "content": "Stephan J. Garbin, Toby Sharp, Ivan Stojiljkovic, Tom Cashman, and Julien P. C. Valentin. 3D face reconstruction with dense landmarks. In European Conference on Computer Vision (ECCV), pages 160-177. Springer, 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 118, + 545, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 171 + ], + "type": "text", + "content": "[89] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3D objects from images in the wild. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1-10, 2020. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 173, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 173, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 307, + 173, + 545, + 228 + ], + "type": "text", + "content": "[90] Haotian Yang, Hao Zhu, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, and Xun Cao. Facescape: a large-scale high quality 3d face dataset and detailed riggable 3d face prediction. In Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 229, + 545, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 229, + 545, + 295 + ], + "spans": [ + { + "bbox": [ + 307, + 229, + 545, + 295 + ], + "type": "text", + "content": "[91] Tarun Yenamandra, Ayush Tewari, Florian Bernard, Hans-Peter Seidel, Mohamed Elgharib, Daniel Cremers, and Christian Theobalt. i3dmm: Deep implicit 3d morphable model of human heads. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12803-12813, 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 297, + 545, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 297, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 307, + 297, + 545, + 338 + ], + "type": "text", + "content": "[92] Xiaoxing Zeng, Xiaojiang Peng, and Yu Qiao. DF2Net: A dense-fine-finer network for detailed 3D face reconstruction. In International Conference on Computer Vision (ICCV), 2019. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 341, + 545, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 341, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 307, + 341, + 545, + 396 + ], + "type": "text", + "content": "[93] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 396, + 545, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 396, + 545, + 463 + ], + "spans": [ + { + "bbox": [ + 307, + 396, + 545, + 463 + ], + "type": "text", + "content": "[94] Tianke Zhang, Xuangeng Chu, Yunfei Liu, Lijian Lin, Zhendong Yang, Zhengzhuo Xu, Chengkun Cao, Fei Yu, Changyin Zhou, Chun Yuan, et al. Accurate 3d face reconstruction with facial component tokens. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9033-9042, 2023. 2, 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 463, + 545, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 463, + 545, + 528 + ], + "spans": [ + { + "bbox": [ + 307, + 463, + 545, + 528 + ], + "type": "text", + "content": "[95] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A. Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In IEEE International Conference on Computer Vision, ICCV 2017, Venice, Italy, October 22-29, 2017, pages 2242-2251. IEEE Computer Society, 2017. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 529, + 545, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 529, + 545, + 574 + ], + "spans": [ + { + "bbox": [ + 307, + 529, + 545, + 574 + ], + "type": "text", + "content": "[96] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z. Li. Face alignment across large poses: A 3D solution. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 146-155, 2016. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 575, + 545, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 575, + 545, + 608 + ], + "spans": [ + { + "bbox": [ + 307, + 575, + 545, + 608 + ], + "type": "text", + "content": "[97] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces. In European Conference on Computer Vision, pages 250–269, 2022. 2, 3, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 609, + 545, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 609, + 545, + 664 + ], + "spans": [ + { + "bbox": [ + 307, + 609, + 545, + 664 + ], + "type": "text", + "content": "[98] Michael Zollhöfer, Justus Thies, Darek Bradley, Pablo Garrido, Thabo Beeler, Patrick Pérez, Marc Stamminger, Matthias Nießner, and Christian Theobalt. State of the art on monocular 3D face reconstruction, tracking, and applications. Computer Graphics Forum, 2018. 1, 2" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "text", + "content": "2501" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Feature Tracking via Event Camera/a6809d22-03e7-4639-b845-8393b79ecc8d_content_list.json b/2024/3D Feature Tracking via Event Camera/a6809d22-03e7-4639-b845-8393b79ecc8d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7bfc459328892b1e535ee29d42de5d6826906ac0 --- /dev/null +++ b/2024/3D Feature Tracking via Event Camera/a6809d22-03e7-4639-b845-8393b79ecc8d_content_list.json @@ -0,0 +1,1718 @@ +[ + { + "type": "text", + "text": "3D Feature Tracking via Event Camera", + "text_level": 1, + "bbox": [ + 284, + 130, + 687, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Siqi Li $^{1}$ Zhikuan Zhou $^{1}$ Zhou Xue $^{2}$ Yipeng Li $^{3}$ Shaoyi Du $^{4}$ Yue Gao $^{1*}$", + "bbox": [ + 171, + 179, + 802, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ {BNRist, THUIBCS, School of Software}, Tsinghua University $^{2}$ Li Auto $^{3}$ Department of Automation, Tsinghua University", + "bbox": [ + 104, + 200, + 864, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{4}$ National Key Laboratory of Human-Machine Hybrid Augmented Intelligence, National Engineering Research Center for Visual Information and Applications, and Institute of Artificial Intelligence and Robotics, Xi'an Jiaotong University", + "bbox": [ + 125, + 218, + 844, + 251 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{lsq19, zzk22}@mails.tsinghua.edu.cn, xuezhou08@gmail.com, dushaoyi@xjtu.edu.cn, {liep, gaoyue}@tsinghua.edu.cn", + "bbox": [ + 101, + 255, + 864, + 268 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3f4a0f5c4a276caa64fde7232058fb978b99663fb703a31baad1d97f4a1c1148.jpg", + "image_caption": [ + "Figure 1. We present the first high-speed 3D feature tracking method via stereo event cameras and the corresponding high-speed 3D feature tracking dataset. Our proposed method takes high temporal resolution event streams captured from stereo event cameras as input, and could predict the long-term feature motion trajectories of multiple high-speed moving objects within the scene at a rate of 250 FPS." + ], + "image_footnote": [], + "bbox": [ + 81, + 287, + 883, + 505 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 555, + 313, + 570 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper presents the first 3D feature tracking method with the corresponding dataset. Our proposed method takes event streams from stereo event cameras as input to predict 3D trajectories of the target features with high-speed motion. To achieve this, our method leverages a joint framework to predict the 2D feature motion offsets and the 3D feature spatial position simultaneously. A motion compensation module is leveraged to overcome the feature deformation. A patch matching module based on bipolarity hypergraph modeling is proposed to robustly estimate the feature spatial position. Meanwhile, we collect the first 3D feature tracking dataset with high-speed moving objects and ground truth 3D feature trajectories at 250 FPS, named E-3DTrack, which can be used as the first high-speed 3D feature tracking benchmark. Our code and dataset could be found at: https://github.com/lisiqi19971013/E-3DTrack.", + "bbox": [ + 73, + 577, + 473, + 834 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 844, + 209, + 859 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Feature tracking aims to predict the long-term trajectories of target features, which is fundamental in many computer", + "bbox": [ + 75, + 869, + 470, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "vision tasks, e.g., object tracking [36, 37], 3D reconstruction [8, 17], and SLAM [20, 41]. Frame-based feature tracking methods [5, 24, 25, 33, 35] have been extensively investigated in the past decades. However, all existing methods focus on tracking 2D feature trajectories in the image plane.", + "bbox": [ + 496, + 556, + 890, + 633 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In real-world scenarios, objects are moving in 3D space, e.g., cars are racing on the road from near to far. The tracking of features with high-speed 3D motion becomes essential. Consequently, there is an imperative need to investigate 3D feature tracking methods capable of predicting feature trajectories for objects undergoing high-speed 3D motion. Such methods hold significant promise for various downstream applications, e.g., VR, AR, and autonomous driving. To the best of our knowledge, existing literature lacks established high-speed 3D feature tracking methodologies.", + "bbox": [ + 496, + 637, + 892, + 789 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "For the 3D feature tracking of high-speed moving objects, the main challenges lie in three folds. (1) With the limited frame rate of traditional frame-based cameras, the motion of high-speed moving objects may not be consistently captured due to the blind time between consecutive frames. Therefore, how to continually record valid motion information of high-speed moving objects is the first chal", + "bbox": [ + 496, + 795, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "18974", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "lenge. (2) The second challenge lies in establishing the correlation between the 3D position of the feature and the 2D visual data acquired by cameras to generate a continuous and smooth 3D feature trajectory. (3) To the best of our knowledge, there are currently no existing high-speed 3D feature tracking datasets. This is primarily due to the difficulty in capturing ground truth 3D feature trajectories of high-speed moving objects, which is constrained by the insufficient capture frequency of existing 3D vision sensors. Thus, the lack of high-speed 3D feature tracking dataset is the third challenge, which is also a principal impediment to the advancement of research within this domain.", + "bbox": [ + 75, + 90, + 472, + 271 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To overcome the motion capture challenge, we use event cameras to record motion dynamics of high-speed moving objects. Event cameras [7, 32] are bio-inspired vision sensors that asynchronously respond to pixel-wise brightness changes. Specifically, when the logarithmic change of the brightness at a pixel exceeds a certain threshold, i.e., $|\\Delta_t \\log I(x,y,t)| > C$ , where $I(x,y,t)$ is the brightness at pixel $(x,y)$ and timestamp $t$ , an event will be triggered, denoted as $e = (t,x,y,p)$ , where $p \\in \\{1,-1\\}$ is the polarity. The output event stream of event cameras, formed by events triggered by all pixels, showcases their remarkably high temporal resolution (in the order of microseconds) and broad dynamic range (up to 140 dB) [13]. These unique features of event cameras render them promising tools for achieving 3D feature tracking in the context of high-speed moving objects.", + "bbox": [ + 75, + 275, + 472, + 517 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the aforementioned technical challenge, we propose a high-speed 3D feature tracking method based on stereo event cameras, predicting the long-term 3D trajectories of target features from stereo event streams and template patches. To achieve 3D feature tracking, our proposed method leverages a joint framework to predict the 2D feature motion offsets and the feature spatial position at each timestamp simultaneously. A motion compensation module is leveraged to adapt to the feature deformation, and a patch matching module based on bi-polarity hypergraph modeling is proposed to accurately estimate the feature spatial position. In addition, we introduce a stereo motion consistency mechanism that establishes the constraint between the feature motion offsets and the spatial position to achieve smooth 3D trajectory estimation.", + "bbox": [ + 75, + 520, + 472, + 747 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the data challenge, we establish a hybrid vision system and curate the first real-world event-based 3D feature tracking dataset, named E-3DTrack. Our dataset includes multiple objects demonstrating high-speed motion in the scene, with stereo event cameras capturing high temporal resolution event streams, as shown in Fig. 1. To obtain the ground truth of the 3D feature trajectories, we utilize the Optitrack motion capture system to record the motion trajectory of each moving object. This information is then integrated with the high-precision object point cloud", + "bbox": [ + 75, + 750, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "scanned by FARO Quantum ScanArm, resulting in the generation of the ground truth 3D trajectories of each feature at a rate of 250 FPS. To the best of our knowledge, our dataset is the first event-based feature tracking dataset containing high-speed moving objects and providing 3D ground truth feature trajectories.", + "bbox": [ + 496, + 90, + 893, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions could be summarized as follows:", + "bbox": [ + 516, + 181, + 857, + 196 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose the first high-speed 3D feature tracking method based on stereo event cameras, which could track the 3D trajectories of features with high-speed motion.", + "- We achieve satisfactory 3D feature tracking performance through a motion compensation module for addressing feature deformation, a patch matching module based on bi-polarity hypergraph modeling for accurate estimation of 3D feature positions, and a stereo motion consistency mechanism to establish constraints between feature motion offsets and 3D position.", + "- We collect the first real-world 3D feature tracking dataset containing multiple high-speed moving objects, named E-3DTrack. Our dataset contains stereo event streams and 250 FPS ground truth 3D feature trajectories, which could be used as the 3D feature tracking benchmark." + ], + "bbox": [ + 500, + 196, + 890, + 422 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 436, + 640, + 452 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Trajectory Prediction via Event Camera. Event-based feature tracking methods have been developed rapidly within the last decade. Earlier works [18, 38] treat the events as a point set and used ICP [6] to estimate feature motion trajectories. Then, EKLT [10] is proposed to obtain feature patch from the reference frame as template, and use the event stream to track the template and predict the trajectory. Meanwhile, some event-by-event trackers [2, 3] are proposed to exploit the asynchronicity of event camera, e.g., eCDT [16] employs a clustering method to cluster adjacent events, and uses cluster descriptors to find continual feature tracks. Recently, DeepEvT [26] is proposed as the first data-driven event-based feature tracking method, which achieves state-of-the-art 2D feature tracking performance.", + "bbox": [ + 496, + 462, + 890, + 674 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "An alternative approach for trajectory prediction is optical flow estimation, wherein the pixel-level motion field is predicted using the input event stream. Compared with feature tracking, these methods [1, 4, 12, 29, 30] focus more on estimating the motion field between adjacent moments and lack modeling of long-term trajectory consistency.", + "bbox": [ + 496, + 674, + 890, + 763 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, all these existing trajectory prediction methods could only predict 2D feature trajectories in the image plane while the real objects are moving in 3D space, i.e., the predicted feature motion trajectories are information-deficient.", + "bbox": [ + 496, + 763, + 890, + 824 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Event-based 3D Position Estimation. As the 2D feature trajectories can be predicted, a simple and straightforward solution is to use a monocular or stereo depth estimation method to predict the depth of the feature and calculate the 3D position. In recent years, several event-based", + "bbox": [ + 496, + 825, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "18975", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/7a23ae9b22e080f0e58ca26cbca2c176a2e44801272b3a01263a7167241dfc85.jpg", + "image_caption": [ + "Figure 2. Our proposed method takes stereo event streams as input to predict the 3D trajectory of the target feature provided in the initial template patch $I_{t_0}$ . For a subsequent timestamp $t_i$ , the deformed template patch $I_{t_i}$ is predicted using the motion compensation module. Then, $I_{t_i}$ , $I_{t_0}$ , and the events $P_{t_i}$ triggered within the spatiotemporal neighboring patch of the predicted feature position $\\mathbf{u}_{t_i}$ are forwarded into the offset estimation module to estimate the feature motion offsets $\\Delta \\mathbf{u}_{t_i}$ . Meanwhile, a patch matching module based on bi-polarity hypergraph modeling is leveraged to predict the disparity. Finally, a projection operation is performed to update the 3D trajectory." + ], + "image_footnote": [], + "bbox": [ + 86, + 85, + 890, + 343 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "monocular [11, 14, 40] or stereo [28, 39] depth estimation methods are proposed, which could estimate the depth map from the input single-view or multi-view event streams.", + "bbox": [ + 75, + 433, + 468, + 479 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "However, we will show that the simple combination of these two types of methods could not achieve satisfying long-term 3D feature trajectories prediction performance in Sec. 5.2. Therefore, high-speed 3D feature tracking is still a challenging open problem.", + "bbox": [ + 75, + 479, + 470, + 556 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 571, + 168, + 588 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we commence with an overview of the pipeline in Sec. 3.1, subsequently delving into the detailed architecture in Sec. 3.2, and conclude by outlining the supervision of our method in Sec. 3.3.", + "bbox": [ + 75, + 599, + 468, + 659 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overview", + "text_level": 1, + "bbox": [ + 76, + 672, + 187, + 686 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As shown in Fig. 2, our proposed method takes stereo event streams as input to predict 3D feature trajectories in camera 1 coordinate system. The target features are contained in gray-scale template patches at the initial moment. This is the common setting for event-based feature tracking, e.g., EKLT [10] and DeepEvT [26]. Our method leverages a joint framework to predict features' 2D motion offsets and the 3D spatial positions simultaneously at each timestamp, and further obtain the 3D trajectories through projection.", + "bbox": [ + 75, + 696, + 468, + 833 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Specifically, let $\\mathcal{E}^j = \\left\\{e_k^j = (t_k^j,u_k^j,v_k^j,p_k^j)\\right\\}$ denote the event stream captured by an event camera, where $j = 1,2$ denotes camera 1 and camera 2, respectively, $e_k^j$ is the $k$ -th event captured by camera $j$ . The feature to be tracked", + "bbox": [ + 76, + 833, + 470, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "is provided in a $d \\times d$ template patch $I_0$ captured by camera 1 at initial moment $t_0$ . Then, the feature trajectory is predicted step-by-step. For a subsequent timestamp $t_i$ , we calculate the 2D feature coordinates $\\mathbf{u}_{t_i}^1 = (u_{t_i}, v_{t_i})$ projected in camera 1 based on the predicted 3D feature position $\\mathbf{X}_{t_i} = (x_{t_i}, y_{t_i}, z_{t_i})$ at the previous step. To calculate the feature trajectory, the events $\\mathcal{E}_i^1$ triggered in the $d \\times d$ patch around $\\mathbf{u}_{t_i}^1$ and within the time bin $[t_i, t_{i+1}]$ are leveraged to provide feature motion information. Then, $\\mathcal{E}_i^1$ is converted into grid-based event patch $P_{t_i}^1$ using the event representation method proposed in [26].", + "bbox": [ + 496, + 433, + 892, + 601 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As shown in Fig. 2, the 3D movement of the object may cause deformation of the feature template patch. To tackle this challenge, we leverage a motion compensation module to predict the deformed template patch $\\tilde{I}_{t_i}$ at timestamp $t_i$ . Then, $\\tilde{I}_{t_i}$ and $I_{t_0}$ are concatenated and forwarded into the offset estimation module together with $P_{t_i}^1$ to predict the 2D feature motion offset $\\Delta \\mathbf{u}_{t_i}^1$ . To further estimate the 3D position of the target feature, we use the events triggered within the same $d$ rows as $P_{t_i}^1$ from $\\mathcal{E}^2$ , i.e., $\\mathcal{E}_i^2 = \\{e_k^2 | u_{t_i} - \\frac{d-1}{2} \\leq u_k^2 \\leq u_{t_i} + \\frac{d-1}{2}, t_i \\leq t_k^2 \\leq t_{i+1}\\}$ , to generate event row patch $R_{t_i}^2$ using the same event representation method. Then, the disparity $d_{t_{i+1}}$ could be predicted from $P_{t_i}^1$ and $R_{t_i}^2$ using our proposed patch matching module based on bipolarity hypergraph modeling. In addition, inspired by the fact that the 2D motion offsets in both camera planes have a constraint with the disparity change, we use the event patch $P_{t_i}^2$ of camera 2 to compute the offset $\\Delta \\mathbf{u}_{t_i}^2$ in the training stage, and propose a stereo motion consistency mechanism to enhance the trajectory prediction. Finally, the 3D feature", + "bbox": [ + 496, + 612, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "18976", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "position $\\mathbf{X}_{t_{i + 1}}$ at $t_{i + 1}$ is obtained by projection according to $\\Delta \\mathbf{u}_{t_i}^1$ and $d_{t_{i + 1}}$ . In practice, the patch size is $d = 31$ , and the length of the time bin is set to $4\\mathrm{ms}$ , i.e., $t_{i + 1} - t_i = 4\\mathrm{ms}$ . Thus, our proposed method could track the long-term 3D feature trajectories at 250 FPS.", + "bbox": [ + 76, + 90, + 468, + 165 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Model Architecture", + "text_level": 1, + "bbox": [ + 76, + 179, + 264, + 194 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Offset Estimation Module. As mentioned above, at times- tamp $t_i$ , we use an offset estimation module to predict the feature motion offsets projected in the camera plane, which takes $\\tilde{I}_{t_i}$ , $I_{t_0}$ , and $P_{t_i}$ as input. Inspired by the great success of DeepEvT [26], we use a similar two-branch Feature Pyramid Network (FPN) [19] to extract multi-modal features from event patch $P_{t_i}$ and the template patches $I_{t_i}$ and $I_{t_0}$ , respectively. The FPN contains 4 down-sample layers and 4 up-sample layers. Then, the bottleneck feature of FPN is leveraged to calculate the correlation map between the event patch and the feature template patch. The correlation map is further concatenated with the multi-modal feature and forwarded into a joint encoder with 4 down-sample layers and a ConvLSTM [34] layer to obtain fused feature $F_{t_i}$ . Then, we use a linear layer to compute the weights of $F_{t_{i - 1}}$ and $F_{t_i}$ and explicitly fuse the temporal information. Finally, a linear layer is leveraged to generate predicted feature motion offsets. Detailed network architecture is provided in the supplementary material. Using the offset estimation module, the feature motion offset $\\Delta \\mathbf{u}_{t_i}$ projected in the camera plane could be estimated.", + "bbox": [ + 76, + 203, + 470, + 520 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Motion Compensation Module. As shown in Fig. 2, high-speed 3D moving objects may have depth change and rotation, which may cause feature shape deformation. Therefore, tracking with the initial template patch may lead to fatal errors or even incorrectly tracking other features. To tackle this problem, we leverage a motion compensation module to correct the template patch at each moment. Specifically, the feature template patch may have scaling, rotation, and shear changes. It should be noted that translation is not considered since the feature motion offset is already predicted. At the timestamp $t_i$ , the fused temporal feature $F_{t_{i-1}}$ is leveraged as input to predict the scale factors $s_x, s_y$ , rotation angle $\\theta$ , and shear factors $t_x, t_y$ using 2 linear layers. Then, the affine transform is performed according to the predicted transform factors:", + "bbox": [ + 75, + 522, + 468, + 750 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {I} _ {t _ {i}} (u, v) = \\left[ \\begin{array}{c} \\beta s _ {x}, \\alpha s _ {y} \\\\ - \\alpha s _ {x}, \\beta s _ {y} \\end{array} \\right] \\left[ \\begin{array}{c} 1, a \\\\ - b, 1 + a b \\end{array} \\right] I _ {t _ {0}} (u, v), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 753, + 468, + 787 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\alpha = \\sin \\theta$ , $\\beta = \\cos \\theta$ , $a = \\tan t_x$ , and $b = \\tan t_y$ . Using the motion compensation module, the corrected template patch $\\tilde{I}_{t_i}$ at each timestamp could be obtained.", + "bbox": [ + 76, + 792, + 468, + 838 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Patch Matching Module. To further estimate the 3D position of the target feature, we propose a patch matching module based on bi-polarity hypergraph modeling to obtain the spatial position of the feature by predicting the disparity.", + "bbox": [ + 76, + 839, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Different from traditional stereo matching, for the 3D feature tracking task, the target feature is contained in the local event patch $P_{t_i}^1$ . Therefore, the disparity could only be predicted from the local patch instead of global information. Under such condition, mismatching will occur since the target scene may contain multiple similar features distributed in space and $P_{t_i}^1$ only contains local information. Therefore, we propose a bi-polarity hypergraph-based high-order correlation modeling mechanism to eliminate mismatching.", + "bbox": [ + 496, + 90, + 890, + 226 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As mentioned in Sec. 3.1, for each timestamp $t_i$ , we use the event patch $P_{t_i}^1$ around $\\mathbf{u}_{t_i}^1$ and the corresponding event row patch $R_{t_i}^2$ from camera 2 to achieve patch matching. Specifically, we use 4 convolutional layers to extract features $\\mathbf{M}_{t_i}^1 \\in \\mathbb{R}^{d \\times d \\times c}$ and $\\mathbf{M}_{t_i}^2 \\in \\mathbb{R}^{d \\times W \\times c}$ from $P_{t_i}^1$ and $R_{t_i}^2$ , respectively, where $c$ is the feature channel and $W$ is the image width, i.e., the number of candidate matching positions. We further calculate the cost volume $\\mathbf{C}_{t_i} \\in \\mathbb{R}^{W \\times c}$ composed of the feature similarity between $\\mathbf{M}_{t_i}^1$ and $\\mathbf{M}_{t_i}^2$ at each matching position, which represents the pair-wise similarity between $P_{t_i}^1$ and the sub-patch of $R_{t_i}^2$ at each matching position. Then, the $W$ matching positions are used as vertices to construct bi-polarity hypergraphs. Compared to the pair-wise correlation contained in the cost volume, each hyperedge of a hypergraph could connect multiple vertices, i.e., high-order correlations among multiple vertices could be constructed. In practice, we use the Euclidean distance of the vertex feature as metric and calculate the $k$ nearest neighbors of each vertex. For each vertex, we use a hyperedge to connect the vertices in its $k$ neighbor vertices with spatial distance smaller than a certain threshold $\\delta$ . Therefore, a positive hypergraph $G^+$ with the adjacency matrix $H^+$ could be constructed. Besides, for each vertex, vertices with spatial distance larger than $\\delta$ in its $k$ neighbor vertices are connected by another hyperedge. Thus, a negative hypergraph $G^-$ with the adjacency matrix $H^-$ could be constructed. Each hyperedge of $G^+$ connects matching patches that are semantic similar and spatially close to $P_{t_i}^1$ . These connections are expected to be enhanced. In contrast, each hyperedge of $G^-$ connects matching patches that are semantic similar but spatially distant from $P_{t_i}^1$ , which are interference and needs to be suppressed. Then, inspired by [9], we propose a feature aggregation method based on bi-polarity hypergraphs:", + "bbox": [ + 496, + 226, + 892, + 739 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\mathbf {C}} _ {t _ {i}} = \\mathbf {C} _ {t _ {i}} + \\sigma \\left(\\left(\\mathbf {D} _ {v} ^ {+}\\right) ^ {- 1} \\mathbf {H} ^ {+} \\left(\\mathbf {D} _ {e} ^ {+}\\right) ^ {- 1} \\left(\\mathbf {H} ^ {+}\\right) ^ {\\top} \\mathbf {C} _ {t _ {i}} \\boldsymbol {\\Theta} ^ {+}\\right), \\tag {2} \\\\ - \\sigma \\left(\\left(\\mathbf {D} _ {v} ^ {-}\\right) ^ {- 1} \\mathbf {H} ^ {-} \\left(\\mathbf {D} _ {e} ^ {-}\\right) ^ {- 1} \\left(\\mathbf {H} ^ {-}\\right) ^ {\\top} \\mathbf {C} _ {t _ {i}} \\boldsymbol {\\Theta} ^ {-}\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 742, + 890, + 792 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{D}_e^*$ and $\\mathbf{D}_v^*$ are the diagonal matrices of hyperedge degree and vertex degree, respectively. $\\Theta^{*}$ is the learnable parameter, and $\\sigma (\\cdot)$ is the non-linear activation function. Using Eq. (2), features are aggregated to enhance vertices with similar features and spatial close and suppress vertices with similar features but spatially distant. Finally, $\\hat{\\mathbf{C}}_{t_i}$ is forwarded into a 1D convolutional layer with the kernel size of", + "bbox": [ + 496, + 794, + 892, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "18977", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 to regress the matching result. Using the patch matching module, the disparity $d_{t_i}$ of the feature is predicted.", + "bbox": [ + 75, + 90, + 468, + 119 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Projection. After the feature motion offsets $\\Delta \\mathbf{u}_{t_i}$ and the disparity $d_{t_i}$ are predicted, the 3D feature coordinates $\\mathbf{X}_{t_{i + 1}}$ at $t_{i + 1}$ could be computed using projection.", + "bbox": [ + 75, + 121, + 468, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Supervision and Loss Functions", + "text_level": 1, + "bbox": [ + 76, + 176, + 357, + 191 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stereo Motion Consistency. For objects moving in 3D space captured by stereo cameras, the 2D motion offsets are strongly constrained with the disparity. Meanwhile, our offset estimation module is also deeply coupled with the patch matching module. Therefore, inspired by [21], we leverage a stereo motion consistency constraint to reinforce this correlation. Consider a point $\\mathbf{X} = (x,y,z)$ , it's 2D coordinates in the camera plane could be calculated by $\\mathbf{u} = (u,v) = \\frac{f}{s}\\frac{(x,y)}{z}$ , where $f$ is the camera focal length and $s$ the coordinate convert factor. For calibrated stereo cameras with the baseline distance of $b$ , the disparity of $\\mathbf{X}$ is $d = \\frac{f}{s}\\frac{b}{z}$ . By taking the time derivative, we could obtain that $\\frac{\\Delta d}{\\Delta t} = -\\frac{f}{s}\\frac{b}{z^2}\\frac{\\Delta z}{\\Delta t}$ . Therefore, we have:", + "bbox": [ + 75, + 199, + 470, + 402 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nd _ {t _ {i}} - d _ {t _ {i - 1}} = \\Delta d = - \\frac {f}{s} \\frac {b}{z _ {t _ {i}} ^ {2}} \\left(z _ {t _ {i}} - z _ {t _ {i - 1}}\\right). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 130, + 404, + 468, + 436 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For the 2D motion offsets, we could similarly obtain that $\\frac{\\Delta\\mathbf{u}}{\\Delta t} = \\frac{f}{zs} (\\frac{\\Delta x}{\\Delta t},\\frac{\\Delta y}{\\Delta t}) - \\frac{f}{z^2s}\\frac{\\Delta z}{\\Delta t} (x,y)$ , i.e., we have $\\Delta \\mathbf{u} = (\\Delta u,\\Delta v) = \\frac{f}{zs} (\\Delta x,\\Delta y) - \\frac{f\\Delta z}{z^2s} (x,y)$ . In practice, suppose the coordinates of a feature in camera 1 at timestamp $t_i$ is $\\mathbf{X}_{t_i}^1 = (x_{t_i},y_{t_i},z_{t_i})$ , then the coordinates in camera 2 is $\\mathbf{X}_{t_i}^2 = (x_{t_i} - b,y_{t_i},z_{t_i})$ . Therefore, we have:", + "bbox": [ + 76, + 440, + 468, + 535 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\Delta u _ {t _ {i}} ^ {1} - \\Delta u _ {t _ {i}} ^ {2} = \\frac {f}{s} \\frac {b}{z _ {t _ {i}} ^ {2}} \\Delta z _ {t _ {i}} = - \\frac {f}{s} \\frac {b}{z _ {t _ {i}} ^ {2}} \\left(z _ {t _ {i}} - z _ {t _ {i - 1}}\\right). \\tag {4} \\\\ \\Delta v _ {t _ {i}} ^ {1} - \\Delta v _ {t _ {i}} ^ {2} = 0 \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 537, + 468, + 590 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Therefore, we could obtain the stereo motion constraint $\\Delta u_{t_i}^1 -\\Delta u_{t_i}^2 = d_{t_i} - d_{t_{i - 1}}$ from Eq. (3) and Eq. (4).", + "bbox": [ + 76, + 595, + 468, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "According to the stereo motion constraint, we introduce the stereo motion consistency loss:", + "bbox": [ + 76, + 626, + 468, + 654 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {i} ^ {\\mathrm {s m c}} = \\mathcal {L} _ {1} \\left(\\Delta u _ {t _ {i}} ^ {1} - \\Delta u _ {t _ {i}} ^ {2}, d _ {t _ {i}} - d _ {t _ {i - 1}}\\right) + \\mathcal {L} _ {1} \\left(\\Delta v _ {t _ {i}} ^ {1}, \\Delta v _ {t _ {i}} ^ {2}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 83, + 656, + 468, + 672 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_1(\\cdot ,\\cdot)$ is the Manhattan Distance.", + "bbox": [ + 76, + 676, + 349, + 690 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Loss Functions. Since our proposed method could predict the 3D feature coordinate $\\mathbf{X}_{t_i}$ at each timestamp, we use the Manhattan Distance between the predicted trajectories and ground truth trajectories as supervision:", + "bbox": [ + 75, + 691, + 468, + 751 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {i} ^ {\\text {t r a j}} = \\mathcal {L} _ {1} \\left(\\mathbf {X} _ {t _ {i}}, \\mathbf {X} _ {t _ {i}} ^ {\\mathrm {g t}}\\right). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 753, + 468, + 773 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Since both the offset estimation module and the patch matching module severely affect the 3D trajectory prediction accuracy, we compute the ground truth 2D feature offsets $\\mathbf{u}_{t_i}^{1^{\\mathrm{gt}}}$ and disparity $d_{t_i}^{\\mathrm{gt}}$ at each timestamp based on ground truth 3D trajectory through projection and use them as supervision. In practice, the offset estimation is supervised with the loss function:", + "bbox": [ + 75, + 777, + 468, + 881 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {i} ^ {\\text {o f f}} = \\mathcal {L} _ {1} \\left(\\Delta \\mathbf {u} _ {t _ {i}} ^ {1}, \\Delta \\mathbf {u} _ {t _ {i}} ^ {1 ^ {\\mathrm {g t}}}\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 885, + 468, + 905 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/4a799901f3d6ab383c1cc04cbcff7ba38e695001d2763159712b95eca725b4a5.jpg", + "table_caption": [ + "Table 1. Comparison of our E-3DTrack dataset with other existing event-based feature tracking datasets." + ], + "table_footnote": [], + "table_body": "
DatasetDim.MotionScenarioGT Freq.
EC [27]2DHomo.Static200
EDS [15]2DHomo.Static150
E-3DTrack3DNon-homo.Dynamic250
", + "bbox": [ + 501, + 121, + 890, + 200 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The disparity prediction is supervised with:", + "bbox": [ + 500, + 214, + 787, + 229 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {i} ^ {\\text {d i s p}} = \\mathcal {L} _ {1} \\left(d _ {t _ {i}}, d _ {t _ {i}} ^ {\\mathrm {g t}}\\right). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 624, + 233, + 890, + 253 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, our model is trained end-to-end with the supervision of the following total loss function:", + "bbox": [ + 498, + 257, + 890, + 287 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\sum_ {i = 1} ^ {N} \\left(\\mathcal {L} _ {i} ^ {\\text {t r a j}} + \\mathcal {L} _ {i} ^ {\\text {o f f}} + \\mathcal {L} _ {i} ^ {\\text {d i s p}} + \\alpha \\mathcal {L} _ {i} ^ {\\text {s m c}}\\right), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 289, + 890, + 329 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\alpha$ is a hyper-parameter and $N$ is the sequence length.", + "bbox": [ + 500, + 330, + 890, + 345 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. 3D Feature Tracking Dataset: E-3DTrack", + "text_level": 1, + "bbox": [ + 498, + 359, + 870, + 378 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In addressing the deficiency of high-speed 3D feature tracking datasets, we establish a hybrid vision system containing stereo event cameras and Optitrack, as shown in Fig. 3 (a), and curate the first event-based 3D feature tracking dataset, named E-3DTrack. Compared to existing event-based feature tracking datasets that contain only static scenes and 2D trajectories, our dataset is the first to contain high-speed moving objects and ground truth 3D feature trajectories.", + "bbox": [ + 496, + 386, + 890, + 507 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Limited by the capturing frequency of 3D vision sensors (e.g., $< 30$ FPS for LiDAR), it is difficult to accurately record the 3D feature trajectories of high-speed moving objects at a high frame rate. To tackle this problem, we use the motion capture system, i.e., Optitrack, to record the trajectory of each object attached with fixed markers. To explicitly obtain feature-level 3D trajectories, we use a scanner, i.e., FARO Quantum ScanArm, to capture the high precision point cloud of each object. Then, the 3D affine transform, incorporating a homogeneous scale, is calculated from the object coordinate system to the Optitrack coordinate system based on the markers' coordinates. This leads to the acquisition of the time-series point cloud sequence of the moving objects under the Optitrack coordinate system. Finally, the feature trajectories can be derived from the time-series point cloud sequence based on the feature point index. Hence, our dataset comprises ground truth 3D feature trajectories of high-speed moving objects at 250 FPS, surpassing the capturing frequency of most existing 3D vision sensors.", + "bbox": [ + 496, + 508, + 890, + 794 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Using our hybrid vision system, we captured 40 high-speed motion scenarios containing a total of 1300 sequences. We randomly select 10 scenarios as the test set, and the remaining 30 scenarios are selected as the training set. Note that due to the cross-scene division, the scene in the test set are unseen in the training set. More details of our dataset are provided in the supplementary material.", + "bbox": [ + 496, + 795, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "18978", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/66e0935b298ff5c1093ad283321fea5a22ff79f906e5d979e3c7a4799654f72a.jpg", + "image_caption": [ + "(a) Our hybrid vision system." + ], + "image_footnote": [], + "bbox": [ + 80, + 90, + 279, + 306 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/08521545b4d309877e6bcae82d07b84fc92d958dcadd6120848439c649462540.jpg", + "image_caption": [ + "(b) Samples of our dataset. From left to right: reference frame, feature patch, stereo event streams, and ground truth 3D feature trajectory." + ], + "image_footnote": [], + "bbox": [ + 287, + 90, + 410, + 306 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/408a890f2805c6dd15b7b6310e34938141138fd7f8bcb91ea9ba3844f9ad53c4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 413, + 90, + 506, + 306 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4543f26c0d61c9449759f4dbbbdf96a80a8d6f59dac960d350127f0e41a0cc16.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 90, + 630, + 306 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5cd31bd7ee6863a32c9932f002dfe8ff5452a00ba92be1da30d5d9430bfde28b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 633, + 90, + 751, + 306 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/debfb1089fde99fdbeee925cb6fc76c9b491857fc71619cd0e0f66e4740da341.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 756, + 90, + 885, + 306 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1 shows the comparison of our E-3DTrack dataset with other existing event-based feature tracking datasets, including Event Camera dataset (EC) [27] and Event-aided Direct Sparse Odometry (EDS) dataset [15]. The main advantages of our dataset are in the following three aspects.", + "bbox": [ + 76, + 376, + 467, + 450 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- 3D trajectory. Our dataset is the first feature tracking dataset containing ground truth 3D trajectories, enabling the feature motion trajectory estimation in 3D space.", + "- Non-homogeneous motion. Our dataset is the first event-based feature tracking dataset containing high-speed moving objects. Existing EC and EDS datasets mainly contain stationary scenarios. Thus, feature motions are caused by the camera movement. Since there are no moving objects in the scene, the motions of all features are almost homogeneous, as shown in Fig. 4. In contrast, the feature motions in our dataset are non-homogeneous, which is more conducive to applications.", + "- Accurate ground truth. Our dataset contains ground truth 3D feature trajectories captured from Optitrack. In contrast, since the DAVIS346 event camera could record event streams and 25 FPS video simultaneously, the ground truth 2D trajectories in EC and EDS datasets are obtained using frame-based feature tracking method KLT [25], or further triangulating KLT tracks using camera poses and reprojecting them to the frames. Thus, our dataset contains more accurate ground truth trajectories.", + "Figure 3 (b) shows some samples of our E-3DTrack dataset. We visualize the reference frames, feature template patches, stereo event streams, and the ground truth 3D feature trajectories of each sample." + ], + "bbox": [ + 76, + 452, + 467, + 829 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 844, + 207, + 859 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we first introduce the experimental settings. Then, we analyze the quantitative and qualitative compar", + "bbox": [ + 76, + 869, + 467, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/06316860464a31f3aedc0892a1232443b9493735a91a6e0a4f62883e76f9b448.jpg", + "image_caption": [ + "Figure 3. (a) Our hybrid vision system. (b) Samples of our E-3DTrack dataset. The first column is the reference frame at the initial moment, and the features to be tracked are marked in each frame. Some feature template patches are zoomed in for display in the second column. The stereo event streams and the ground truth 3D feature trajectories are shown in the last three columns, respectively.", + "(a) Sample from EC Dataset", + "Figure 4. Examples from existing EC [27] and EDS [15] dataset." + ], + "image_footnote": [], + "bbox": [ + 519, + 375, + 694, + 476 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/76a18df92186aa05f15cd2f015496559e22427e5a97ff952f9409d40c0a4f014.jpg", + "image_caption": [ + "(b) Sample from EDS Dataset" + ], + "image_footnote": [], + "bbox": [ + 697, + 375, + 867, + 476 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "isons, respectively. Finally, we conduct ablation studies to demonstrate the effectiveness of each proposed module.", + "bbox": [ + 500, + 518, + 890, + 550 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 500, + 559, + 705, + 575 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison Methods. Since there are no existing high-speed 3D feature tracking methods, we use existing event-based trajectory prediction methods to obtain 2D feature trajectories, and use stereo depth estimation methods to further obtain the 3D feature trajectory. Specifically, we combine the event-based optical flow estimation method E-RAFT [12], event-based feature tracking methods EKLT [10] and DeepEvT [26] with event-based stereo depth estimation methods TSES [39] and SDE [28], respectively, as our baseline comparison methods.", + "bbox": [ + 496, + 583, + 890, + 733 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics. To evaluate our proposed method and other comparison methods, we use the Tracked Feature Ratio (TFR, higher is better), Feature Age [26] (FR, higher is better), and the Root Mean Squared Error (RMSE, lower is better) as the metrics. TFR is calculated as the ratio of the time that the spatial distance between the predicted 3D trajectory and the ground truth 3D trajectory is less than a certain threshold $c$ to the total sequence time. See detailed definition in the supplementary material.", + "bbox": [ + 496, + 734, + 890, + 869 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation Details. Our method is implemented based on PyTorch [31]. Our model is trained end-to-end", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "18979", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/9b9a10b086ab67a0b88f4a0013e2b35807314c93a72db9909dd9aa1f79c1d329.jpg", + "table_caption": [ + "Table 2. Quantitative results on our E-3DTrack dataset. Feature age (FA), tracked feature ratio (TFR), and root mean square error (RMSE) are selected as the metrics. Bold numbers represent the best scores, and underlined numbers represent the second-best scores." + ], + "table_footnote": [], + "table_body": "
MethodFA(0.1m) ↑FA(0.15m) ↑FA(0.2m) ↑TFR(0.1m) ↑TFR(0.15m) ↑TFR(0.2m) ↑RMSE ↓
E-RAFT [12] + TSES [39]0.04090.06640.0920.17010.26670.34390.4726
E-RAFT [12] + SDE [28]0.13850.23990.32040.31210.47260.58060.3368
EKLIT [10] + TSES [39]0.02320.04290.06280.11800.19610.26850.4806
EKLIT [10] + SDE [28]0.10260.18560.25840.24210.37380.47000.4034
DeepEvT [26] + TSES [39]0.07130.11170.14520.37860.49910.58180.3549
DeepEvT [26] + SDE [28]0.23140.34620.43390.57820.70600.77650.1889
E-3DTrack (Ours)0.26010.41790.54280.69280.81640.87720.1181
", + "bbox": [ + 78, + 121, + 890, + 244 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/67becf0074b6712dc8070a378f68217081920948145beea1a1084d4262f4473f.jpg", + "table_caption": [ + "Table 3. Comparison of inference time on E-3DTrack dataset." + ], + "table_footnote": [], + "table_body": "
MethodE-RAFT + SDEDeepEvT + SDEOurs
Time (ms/step)154.8393.2240.30
", + "bbox": [ + 78, + 275, + 468, + 314 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "for 100 epochs with a batch size of 16. The optimization method is AdamW [23], and the cosine annealing schedule [22] is leveraged. The learning rate decays from $2 \\times 10^{-4}$ to $1 \\times 10^{-6}$ within 100 epochs. The hyperparameters are selected as $\\alpha = 0.25$ in Eq. (9), $k = 3$ and $\\delta = 16$ for bi-polarity hypergraph construction.", + "bbox": [ + 75, + 332, + 468, + 424 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2. Quantitative Comparison", + "text_level": 1, + "bbox": [ + 76, + 436, + 308, + 452 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table. 2 shows the quantitative comparison of our proposed method with other comparison methods. From the table, we could observe that our proposed method significantly outperforms all comparison methods and achieve state-of-the-art performance. Specifically, compared with the second-best method, i.e., the combination of the state-of-the-art 2D event-based feature tracking method DeepEvT [26] and stereo depth estimation method SDE [28], our proposed method reduces the RMSE by $37.5\\%$ and improves the FA by $12.4\\%$ , $20.7\\%$ , and $25.1\\%$ in terms of $c = 0.1 \\mathrm{~m}$ , $0.15 \\mathrm{~m}$ , and $0.2 \\mathrm{~m}$ , respectively.", + "bbox": [ + 75, + 460, + 468, + 626 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Compared to comparison methods that achieve trajectory prediction and depth estimation separately, our proposed method leverages a joint framework to track the 3D feature trajectories of high-speed moving objects. This indicates that for 3D moving objects, the feature trajectory in the camera plane is highly correlated with the 3D position. The simple combination of 2D trajectory prediction and 3D position estimation will lead to fatal errors. Instead, our proposed method tracks 3D trajectories accurately using the stereo motion consistency constraint. Meanwhile, compared to traditional stereo depth estimation methods, our proposed patch matching module uses a high-order correlation modeling mechanism based on bi-polarity hypergraph to eliminate mismatching of similar features, further enhancing the 3D feature tracking robustness.", + "bbox": [ + 75, + 628, + 468, + 853 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table. 3 shows the inference time comparison of our proposed method with other comparison methods. Specifically, we test the inference time of each tracking update", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/60071049676d8abe68b498277633c3877768eef4b8b64f2772d487d0fcabc3b6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 256, + 622, + 325 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fb85a277362ca3fa2517db46065df845ab1d8a065784e68d3507291ae04b453e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 325, + 622, + 393 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/55a918718219c6b8e379082b265ed1d021ed8f9caca51f27e34e88aa294b46f1.jpg", + "image_caption": [ + "(a) Reference Feature Patch" + ], + "image_footnote": [], + "bbox": [ + 504, + 395, + 622, + 534 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6001b6668a4a34f5eed73689e5b8998d4c8648e61642cad5e036f7056d80fef1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 622, + 256, + 753, + 325 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9891ea5799361c2ab61fa2ea01ea44fccb490be75df0d0f00c89c75aec2fe094.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 624, + 325, + 753, + 395 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bc7bed56ccc987baad30604d08a63a4145adfb5b64d5fc88c704a329abd62968.jpg", + "image_caption": [ + "(b) DeepEvT + SDE" + ], + "image_footnote": [], + "bbox": [ + 624, + 395, + 753, + 534 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/de7d353215f22019a59b572a331ffa0b6bb10f6c4efbc8ea70d7400801106583.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 754, + 256, + 888, + 325 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c42af8aacb337deff3757c50aa8ab7d84123173aa59aad64a1e1900a482fa359.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 754, + 325, + 888, + 395 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/50415d6b9e9d0efeef18b30c59e5f65140153402a9358570a3d3d717cd9b1caf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 754, + 395, + 888, + 465 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a53b273207a25cdf6317f69b492bc024999198f6967e414dc4baafbbbe4a26ba.jpg", + "image_caption": [ + "(c) Ours", + "Figure 5. Qualitative comparison on our E-3DTrack dataset. From left to right: the reference feature patch, the ground truth feature trajectories (red), the feature trajectories (blue) predicted by DeepEvT [26] + SDE [28] and our proposed method, respectively." + ], + "image_footnote": [], + "bbox": [ + 754, + 465, + 888, + 534 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "step. From the table, we could observe that compared to the second-best method, i.e., DeepEvT + SDE, our proposed method reduces the inference time by $56.8\\%$ while achieving better tracking performance. This demonstrates the computational efficiency of our proposed method.", + "bbox": [ + 496, + 611, + 890, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3. Qualitative Comparison", + "text_level": 1, + "bbox": [ + 500, + 696, + 720, + 713 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 5 shows the qualitative 3D feature tracking results of our proposed method and the second-best comparison method, i.e., DeepEvT [26] + SDE [28]. The predicted 3D trajectories and the ground truth trajectories are shown in blue and red, respectively. From the figure, we could observe that our proposed method achieves more robust 3D feature tracking. As shown in the first row, the comparison method achieves adequate feature tracking performance when facing simple scenarios where the object motions do not contain significant depth changes. Such scenarios are similar to 2D feature tracking. Similar observations could be found in the second row. For the white geometric model", + "bbox": [ + 496, + 719, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "18980", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f516e9abefed5e9c3bf0b240442b88451917c06c6ca9ffbef1a20d4c655b5706.jpg", + "image_caption": [ + "Tracking Error" + ], + "image_footnote": [], + "bbox": [ + 76, + 104, + 271, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5a308180ff0d17a86bb0b60d51646c627c9dac3f90771f609e6964abd6baf8b4.jpg", + "image_caption": [ + "Tracked Feature Ratio", + "Figure 6. Results of the mean tracking error (left) and the feature tracked ratio (right) over tracking time." + ], + "image_footnote": [], + "bbox": [ + 276, + 104, + 468, + 224 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "with slight depth variation, the comparison method achieves 3D feature tracking with slight oscillations. However, for the red star with large depth variation and rotation, it could not be tracked accurately by the comparison method. The last two rows show two extreme scenarios, i.e., the 3D motions of the objects are with large depth variation and rotation, which will cause significant feature shape deformation. Under such scenarios, our comparison method tracks the features with fatal errors. In contrast, our proposed method tracks the 3D trajectories of the high-speed moving features robustly and continuously due to our motion compensation module and patch matching module.", + "bbox": [ + 75, + 277, + 468, + 459 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 6 further shows the tracking error (RMSE) and the tracked feature ratio (TFR) over time on our E-3DTrack dataset. The threshold is selected as $c = 0.1 \\, \\text{m}$ to calculate TFR. From the figure, we could observe that our proposed method can continuously track 3D trajectories of target features, i.e., our method maintains a high TFR consistently. From the figure, we could also observe that the TFR of E-RAFT + SDE is comparable with DeepEvT + SDE in initial stage, but gradually decreases over time. This is because the optical flow estimation method is lack of long-term consistent modeling. In contrast, our proposed method maintains a high TFR and a low tracking error over all time.", + "bbox": [ + 75, + 459, + 470, + 641 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4. Ablation Experiments", + "text_level": 1, + "bbox": [ + 76, + 650, + 284, + 667 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To demonstrate the effectiveness of each proposed module, we validate the performance of our model with and without the motion compensation module (denoted as MC), stereo motion consistency mechanism (denoted as $\\mathcal{L}^{\\mathrm{smc}}$ ), and the bi-polarity hypergraph-based high-order correlation modeling mechanism (denoted as BiHCM), respectively. The ablation experimental results are shown in Tab. 4. See supplementary material for detailed settings.", + "bbox": [ + 75, + 672, + 468, + 794 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Bi-Polarity Hypergraph Modeling. From Tab. 4 we could observe that compared with our base model (row (1)), the addition of BiHCM will increase TFR from 0.5586 to 0.6082. Compared with our full model, the removal of the BiHCM will lead to an RMSE increase of $20.8\\%$ . This is due to the fact that our proposed BiHCM could enhance the connection between patches with similar features that", + "bbox": [ + 75, + 795, + 470, + 901 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/2e6b97c77530af9c68b9cc924111819fa94be5f4ddff70f1437f517bbbf6c6bb.jpg", + "table_caption": [ + "Table 4. Ablation experiments on our E-3DTrack dataset." + ], + "table_footnote": [], + "table_body": "
BiHCMLsmcMCTFR0.1 m ↑RMSE↓
(1)XXX0.55860.1807
(2)XX0.60820.1505
(3)XX0.59420.1512
(4)XX0.56600.1624
(5)X0.67050.1268
(6)X0.65990.1312
(7)X0.64410.1427
(8)0.69280.1181
", + "bbox": [ + 501, + 107, + 885, + 244 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "are spatially close, and suppress patches with similar features but spatially distant, which could eliminate mismatching and further improve 3D feature tracking performance.", + "bbox": [ + 496, + 258, + 890, + 303 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Stereo Motion Consistency. As shown in Tab. 4, compared with the base model, the addition of stereo motion consistency constraint will reduce RMSE from 0.1807 to 0.1512. Compared with the full model, the removal of the stereo motion consistency constraint will increase RMSE by $11.1\\%$ . This is due to the fact that the stereo motion consistency could effectively constrain the correlation between the 2D trajectory and 3D spatial position of the objects, making our method predict more accurate and smooth 3D trajectory.", + "bbox": [ + 496, + 304, + 892, + 440 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Motion Compensation. As shown in Tab. 4, compared with the base model and the full model, the addition and removal of the motion compensation module resulted in $10.1\\%$ and $7.4\\%$ decrease and increase in RMSE, respectively. With the addition of the motion compensation module, our proposed method could better deal with feature deformation caused by depth changes and rotations of moving objects, and achieve more robust 3D feature tracking.", + "bbox": [ + 496, + 440, + 893, + 561 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "These ablation experiments demonstrate the effectiveness of each proposed module.", + "bbox": [ + 498, + 561, + 890, + 592 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 607, + 619, + 623 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose the first high-speed 3D feature tracking method that takes stereo event streams as input to estimate 3D feature trajectories. Our proposed method leverages a joint framework to obtain 3D feature trajectories by estimating the feature motion offsets and spatial position simultaneously. A motion compensation module and a patch matching module based on bi-polarity hypergraphs are proposed to achieve robust feature tracking. Meanwhile, the first 3D feature tracking dataset containing high-speed moving objects and ground truth 3D feature trajectories at 250 FPS is constructed, named E-3DTrack, which can be used as the first 3D feature tracking benchmark.", + "bbox": [ + 496, + 633, + 893, + 815 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Acknowledgment", + "text_level": 1, + "bbox": [ + 500, + 829, + 669, + 847 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported by National Natural Science Funds of China (No. 62021002 and No. 62088102), Beijing Natural Science Foundation (No. 4222025).", + "bbox": [ + 498, + 854, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "18981", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Himanshu Akolkar, Sio-Hoi Ieng, and Ryad Benosman. Real-Time High Speed Motion Prediction Using Fast Aperture-Robust Event-Driven Visual Flow. IEEE Trans. Pattern Anal. Mach. Intell., 44(1):361-372, 2020. 2", + "[2] Ignacio Alzugaray and Margarita Chli. ACE: An Efficient Asynchronous Corner Tracker for Event Cameras. In Int. Conf. on 3D Vis., pages 653-661. IEEE, 2018. 2", + "[3] Ignacio Alzugaray and Margarita Chli. HASTE: MultiHypothesis Asynchronous Speeded-up Tracking of Events. In The British Machine Vision Conference, page 744, 2020. 2", + "[4] Patrick Bardow, Andrew J Davison, and Stefan Leutenegger. Simultaneous optical flow and intensity estimation from an event camera. In IEEE Conf. Comput. Vis. Pattern Recog., pages 884-892, 2016. 2", + "[5] Herbert Bay, Andreas Ess, Tinne Tuytelaars, and Luc Van Gool. Speeded-up Robust Features (SURF). Comput. Vis. and Image Underst., 110(3):346-359, 2008. 1", + "[6] Paul J Besl and Neil D McKay. Method for Registration of 3-D Shapes. In Sensor Fusion IV: Control Paradigms and Data Structures, pages 586-606, 1992. 2", + "[7] Christian Brandli, Raphael Berner, Minhao Yang, Shih-Chii Liu, and Tobi Delbruck. A $240 \\times 180$ 130 dB $3\\mu s$ Latency Global Shutter Spatiotemporal Vision Sensor. IEEE J. of Solid-State Circuits, 49(10):2333-2341, 2014. 2", + "[8] Guillermo Gallego, Henri Rebecq, and Davide Scaramuzzi. A Unifying Contrast Maximization Framework for Event Cameras, with Applications to Motion, Depth, and Optical Flow Estimation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 3867-3876, 2018. 1", + "[9] Yue Gao, Yifan Feng, Shuyi Ji, and Rongrong Ji. HGNN+: General Hypergraph Neural Networks. IEEE Trans. Pattern Anal. Mach. Intell., 45(3):3181-3199, 2023. 4", + "[10] Daniel Gehrig, Henri Rebecq, Guillermo Gallego, and Davide Scaramuzza. EKLT: Asynchronous Photometric Feature Tracking using Events and Frames. Int. J. Comput. Vis., 128(3):601-618, 2020. 2, 3, 6, 7", + "[11] Daniel Gehrig, Michelle Ruegg, Mathias Gehrig, Javier Hidalgo-Carrió, and Davide Scaramuzza. Combining Events and Frames Using Recurrent Asynchronous Multimodal Networks for Monocular Depth Prediction. IEEE Robot. and Autom. Lett., 6(2):2822-2829, 2021. 3", + "[12] Mathias Gehrig, Mario Millhäsler, Daniel Gehrig, and Davide Scaramuzza. E-RAFT: Dense Optical Flow from Event Cameras. In Int. Conf. 3D Vis., pages 197–206. IEEE, 2021. 2, 6, 7", + "[13] Gallego Guillermo, Delbruck Tobi, Michael Orchard Garrick, Bartolozzi Chiara, Taba Brian, Censi Andrea, Leutenegger Stefan, Davison Andrew, Conradt Jorg, Daniilidis Kostas, and Scaramuzza Davide. Event-Based Vision: A Survey. IEEE Trans. Pattern Anal. Mach. Intell., 2020. 2", + "[14] Javier Hidalgo-Carrió, Daniel Gehrig, and Davide Scaramuzza. Learning monocular dense depth from events. In Int. Conf. on 3D Vis., pages 534-542. IEEE, 2020. 3", + "[15] Javier Hidalgo-Carrio, Guillermo Gallego, and Davide Scaramuzza. Event-aided direct sparse odometry. In IEEE" + ], + "bbox": [ + 76, + 114, + 470, + 902 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Conf. Comput. Vis. Pattern Recog., pages 5781-5790, 2022. 5, 6", + "[16] Sumin Hu, Yeeun Kim, Hyungtae Lim, Alex Junho Lee, and Hyun Myung. eCDT: Event Clustering for Simultaneous Feature Detection and Tracking. In Int. Conf. Intel. Robot. Syst., pages 3808-3815. IEEE, 2022. 2", + "[17] Hanme Kim, Stefan Leutenegger, and Andrew J. Davison. Real-Time 3D Reconstruction and 6-DoF Tracking with an Event Camera. In *Eur. Conf. Comput. Vis.*, pages 349–364, 2016. 1", + "[18] Beat Kueng, Elias Mueggler, Guillermo Gallego, and Davide Scaramuzza. Low-latency visual odometry using event-based feature tracks. In Int. Conf. Intell. Robot. Syst., pages 16-23. IEEE, 2016. 2", + "[19] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature Pyramid Networks for Object Detection. In IEEE Conf. Comput. Vis. Pattern Recog., pages 2117-2125, 2017. 4", + "[20] Daqi Liu, Alvaro Parra, and Tat-Jun Chin. Globally Optimal Contrast Maximisation for Event-based Motion Estimation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6349-6358, 2020. 1", + "[21] Pengpeng Liu, Irwin King, Michael R Lyu, and Jia Xu. Flow2stereo: Effective Self-Supervised Learning of Optical Flow and Stereo Matching. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6648-6657, 2020. 5", + "[22] Ilya Loshchilov and Frank Hutter. SGDR: Stochastic Gradient Descent with Warm Restarts. In Int. Conf. Learn. Represent., 2017. 7", + "[23] Ilya Loshchilov and Frank Hutter. Decoupled Weight Decay Regularization. Int. Conf. Learn. Represent., 2019. 7", + "[24] David G Lowe. Distinctive Image Features from Scale-Invariant Keypoints. Int. J. Comput. Vis., 60:91-110, 2004. 1", + "[25] Bruce D Lucas and Takeo Kanade. An Iterative Image Registration Technique with an Application to Stereo Vision. In IJCAI, pages 674-679, 1981. 1, 6", + "[26] Nico Messikommer, Carter Fang, Mathias Gehrig, and Davide Scaramuzza. Data-Driven Feature Tracking for Event Cameras. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5642–5651, 2023. 2, 3, 4, 6, 7", + "[27] Elias Mueggler, Henri Rebecq, Guillermo Gallego, Tobi Delbruck, and Davide Scaramuzza. The Event-Camera Dataset and Simulator: Event-Based Data for Pose Estimation, Visual Odometry, and SLAM. Int. J. of Robot. Researc., 36(2): 142-149, 2017. 5, 6", + "[28] Yeongwoo Nam, Mohammad Mostafavi, Kuk-Jin Yoon, and Jonghyun Choi. Stereo Depth From Events Cameras: Concentrate and Focus on the Future. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6114-6123, 2022. 3, 6, 7", + "[29] Liyuan Pan, Miaomiao Liu, and Richard Hartley. Single Image Optical Flow Estimation with an Event Camera. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1669-1678. IEEE, 2020. 2", + "[30] Federico Paredes-Vallés, Kirk YW Scheper, and Guido CHE De Croon. Unsupervised Learning of a Hierarchical Spiking Neural Network for Optical Flow Estimation: From Events" + ], + "bbox": [ + 501, + 92, + 890, + 902 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "18982", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "to Global Motion Perception. IEEE Trans. Pattern Anal. Mach. Intell., 42(8):2051-2064, 2019. 2", + "[31] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An Imperative Style, High-Performance Deep Learning Library. Adv. Neural Inform. Process. Syst., 32, 2019. 6", + "[32] Lichtsteiner Patrick, Posch Christoph, and Delbruck Tobi. A $128 \\times 128$ dB 15 μs Latency Asynchronous Temporal Contrast Vision Sensor. IEEE J. of Solid-State Circuits, 43 (2):566-576, 2008. 2", + "[33] Jianbo Shi and Carlo Tomasi. Good Features to Track. In IEEE Conf. Comput. Vis. Pattern Recog., pages 593-600. IEEE, 1994. 1", + "[34] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-Kin Wong, and Wang-chun Woo. Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting. Adv. Neural Inform. Process. Syst., 28, 2015. 4", + "[35] Carlo Tomasi and Takeo Kanade. Detection and Tracking of Point. Int. J. Comput. Vis., 9(137-154):3, 1991. 1", + "[36] Jiqing Zhang, Xin Yang, Yingkai Fu, Xiaopeng Wei, Baocai Yin, and Bo Dong. Object Tracking by Jointly Exploiting Frame and Event Domain. In Int. Conf. Comput. Vis., pages 13043-13052, 2021. 1", + "[37] Jiqing Zhang, Bo Dong, Haiwei Zhang, Jianchuan Ding, Felix Heide, Baocai Yin, and Xin Yang. Spiking Transformers for Event-Based Single Object Tracking. In IEEE Conf. Comput. Vis. Pattern Recog., pages 8801-8810, 2022. 1", + "[38] Alex Zihao Zhu, Nikolay Atanasov, and Kostas Daniilidis. Event-Based Feature Tracking with Probabilistic Data Association. In IEEE Int. Conf. Robot. Autom., pages 4465-4470. IEEE, 2017. 2", + "[39] Alex Zihao Zhu, Yibo Chen, and Kostas Daniilidis. Realtime Time Synchronized Event-Based Stereo. In Eur. Conf. Comput. Vis., pages 433-447, 2018. 3, 6, 7", + "[40] Alex Zihao Zhu, Liangzhe Yuan, Kenneth Chaney, and Kostas Daniilidis. Unsupervised Event-Based Learning of Optical Flow, Depth, and Egomotion. In IEEE Conf. Comput. Vis. Pattern Recog., pages 989-997, 2019. 3", + "[41] Alex Zihao Zhu, Nikolay Atanasov, and Kostas Daniilidis. Event-Based Visual Inertial Odometry. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5391-5399, 2017. 1" + ], + "bbox": [ + 78, + 90, + 468, + 686 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "18983", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/3D Feature Tracking via Event Camera/a6809d22-03e7-4639-b845-8393b79ecc8d_model.json b/2024/3D Feature Tracking via Event Camera/a6809d22-03e7-4639-b845-8393b79ecc8d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..3eb3b6a30642a61240e74b2e0a43f980111cd903 --- /dev/null +++ b/2024/3D Feature Tracking via Event Camera/a6809d22-03e7-4639-b845-8393b79ecc8d_model.json @@ -0,0 +1,2321 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.044 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.285, + 0.131, + 0.688, + 0.154 + ], + "angle": 0, + "content": "3D Feature Tracking via Event Camera" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.18, + 0.803, + 0.2 + ], + "angle": 0, + "content": "Siqi Li\\(^{1}\\) Zhikuan Zhou\\(^{1}\\) Zhou Xue\\(^{2}\\) Yipeng Li\\(^{3}\\) Shaoyi Du\\(^{4}\\) Yue Gao\\(^{1*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.201, + 0.865, + 0.217 + ], + "angle": 0, + "content": "\\(^{1}\\) {BNRist, THUIBCS, School of Software}, Tsinghua University \\(^{2}\\)Li Auto \\(^{3}\\)Department of Automation, Tsinghua University" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.219, + 0.846, + 0.252 + ], + "angle": 0, + "content": "\\(^{4}\\)National Key Laboratory of Human-Machine Hybrid Augmented Intelligence, National Engineering Research Center for Visual Information and Applications, and Institute of Artificial Intelligence and Robotics, Xi'an Jiaotong University" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.256, + 0.865, + 0.27 + ], + "angle": 0, + "content": "{lsq19, zzk22}@mails.tsinghua.edu.cn, xuezhou08@gmail.com, dushaoyi@xjtu.edu.cn, {liep, gaoyue}@tsinghua.edu.cn" + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.289, + 0.885, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.508, + 0.895, + 0.551 + ], + "angle": 0, + "content": "Figure 1. We present the first high-speed 3D feature tracking method via stereo event cameras and the corresponding high-speed 3D feature tracking dataset. Our proposed method takes high temporal resolution event streams captured from stereo event cameras as input, and could predict the long-term feature motion trajectories of multiple high-speed moving objects within the scene at a rate of 250 FPS." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.556, + 0.314, + 0.571 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.578, + 0.474, + 0.835 + ], + "angle": 0, + "content": "This paper presents the first 3D feature tracking method with the corresponding dataset. Our proposed method takes event streams from stereo event cameras as input to predict 3D trajectories of the target features with high-speed motion. To achieve this, our method leverages a joint framework to predict the 2D feature motion offsets and the 3D feature spatial position simultaneously. A motion compensation module is leveraged to overcome the feature deformation. A patch matching module based on bipolarity hypergraph modeling is proposed to robustly estimate the feature spatial position. Meanwhile, we collect the first 3D feature tracking dataset with high-speed moving objects and ground truth 3D feature trajectories at 250 FPS, named E-3DTrack, which can be used as the first high-speed 3D feature tracking benchmark. Our code and dataset could be found at: https://github.com/lisiqi19971013/E-3DTrack." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.845, + 0.21, + 0.86 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Feature tracking aims to predict the long-term trajectories of target features, which is fundamental in many computer" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.557, + 0.892, + 0.634 + ], + "angle": 0, + "content": "vision tasks, e.g., object tracking [36, 37], 3D reconstruction [8, 17], and SLAM [20, 41]. Frame-based feature tracking methods [5, 24, 25, 33, 35] have been extensively investigated in the past decades. However, all existing methods focus on tracking 2D feature trajectories in the image plane." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.638, + 0.893, + 0.79 + ], + "angle": 0, + "content": "In real-world scenarios, objects are moving in 3D space, e.g., cars are racing on the road from near to far. The tracking of features with high-speed 3D motion becomes essential. Consequently, there is an imperative need to investigate 3D feature tracking methods capable of predicting feature trajectories for objects undergoing high-speed 3D motion. Such methods hold significant promise for various downstream applications, e.g., VR, AR, and autonomous driving. To the best of our knowledge, existing literature lacks established high-speed 3D feature tracking methodologies." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.895, + 0.903 + ], + "angle": 0, + "content": "For the 3D feature tracking of high-speed moving objects, the main challenges lie in three folds. (1) With the limited frame rate of traditional frame-based cameras, the motion of high-speed moving objects may not be consistently captured due to the blind time between consecutive frames. Therefore, how to continually record valid motion information of high-speed moving objects is the first chal" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "18974" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.272 + ], + "angle": 0, + "content": "lenge. (2) The second challenge lies in establishing the correlation between the 3D position of the feature and the 2D visual data acquired by cameras to generate a continuous and smooth 3D feature trajectory. (3) To the best of our knowledge, there are currently no existing high-speed 3D feature tracking datasets. This is primarily due to the difficulty in capturing ground truth 3D feature trajectories of high-speed moving objects, which is constrained by the insufficient capture frequency of existing 3D vision sensors. Thus, the lack of high-speed 3D feature tracking dataset is the third challenge, which is also a principal impediment to the advancement of research within this domain." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.276, + 0.473, + 0.518 + ], + "angle": 0, + "content": "To overcome the motion capture challenge, we use event cameras to record motion dynamics of high-speed moving objects. Event cameras [7, 32] are bio-inspired vision sensors that asynchronously respond to pixel-wise brightness changes. Specifically, when the logarithmic change of the brightness at a pixel exceeds a certain threshold, i.e., \\( |\\Delta_t \\log I(x,y,t)| > C \\), where \\( I(x,y,t) \\) is the brightness at pixel \\( (x,y) \\) and timestamp \\( t \\), an event will be triggered, denoted as \\( e = (t,x,y,p) \\), where \\( p \\in \\{1,-1\\} \\) is the polarity. The output event stream of event cameras, formed by events triggered by all pixels, showcases their remarkably high temporal resolution (in the order of microseconds) and broad dynamic range (up to 140 dB) [13]. These unique features of event cameras render them promising tools for achieving 3D feature tracking in the context of high-speed moving objects." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.521, + 0.473, + 0.748 + ], + "angle": 0, + "content": "To address the aforementioned technical challenge, we propose a high-speed 3D feature tracking method based on stereo event cameras, predicting the long-term 3D trajectories of target features from stereo event streams and template patches. To achieve 3D feature tracking, our proposed method leverages a joint framework to predict the 2D feature motion offsets and the feature spatial position at each timestamp simultaneously. A motion compensation module is leveraged to adapt to the feature deformation, and a patch matching module based on bi-polarity hypergraph modeling is proposed to accurately estimate the feature spatial position. In addition, we introduce a stereo motion consistency mechanism that establishes the constraint between the feature motion offsets and the spatial position to achieve smooth 3D trajectory estimation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.473, + 0.903 + ], + "angle": 0, + "content": "To address the data challenge, we establish a hybrid vision system and curate the first real-world event-based 3D feature tracking dataset, named E-3DTrack. Our dataset includes multiple objects demonstrating high-speed motion in the scene, with stereo event cameras capturing high temporal resolution event streams, as shown in Fig. 1. To obtain the ground truth of the 3D feature trajectories, we utilize the Optitrack motion capture system to record the motion trajectory of each moving object. This information is then integrated with the high-precision object point cloud" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.182 + ], + "angle": 0, + "content": "scanned by FARO Quantum ScanArm, resulting in the generation of the ground truth 3D trajectories of each feature at a rate of 250 FPS. To the best of our knowledge, our dataset is the first event-based feature tracking dataset containing high-speed moving objects and providing 3D ground truth feature trajectories." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.183, + 0.858, + 0.197 + ], + "angle": 0, + "content": "Our contributions could be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.198, + 0.892, + 0.242 + ], + "angle": 0, + "content": "- We propose the first high-speed 3D feature tracking method based on stereo event cameras, which could track the 3D trajectories of features with high-speed motion." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.243, + 0.892, + 0.348 + ], + "angle": 0, + "content": "- We achieve satisfactory 3D feature tracking performance through a motion compensation module for addressing feature deformation, a patch matching module based on bi-polarity hypergraph modeling for accurate estimation of 3D feature positions, and a stereo motion consistency mechanism to establish constraints between feature motion offsets and 3D position." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.348, + 0.892, + 0.424 + ], + "angle": 0, + "content": "- We collect the first real-world 3D feature tracking dataset containing multiple high-speed moving objects, named E-3DTrack. Our dataset contains stereo event streams and 250 FPS ground truth 3D feature trajectories, which could be used as the 3D feature tracking benchmark." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.198, + 0.892, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.438, + 0.642, + 0.453 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.463, + 0.892, + 0.675 + ], + "angle": 0, + "content": "Trajectory Prediction via Event Camera. Event-based feature tracking methods have been developed rapidly within the last decade. Earlier works [18, 38] treat the events as a point set and used ICP [6] to estimate feature motion trajectories. Then, EKLT [10] is proposed to obtain feature patch from the reference frame as template, and use the event stream to track the template and predict the trajectory. Meanwhile, some event-by-event trackers [2, 3] are proposed to exploit the asynchronicity of event camera, e.g., eCDT [16] employs a clustering method to cluster adjacent events, and uses cluster descriptors to find continual feature tracks. Recently, DeepEvT [26] is proposed as the first data-driven event-based feature tracking method, which achieves state-of-the-art 2D feature tracking performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.892, + 0.765 + ], + "angle": 0, + "content": "An alternative approach for trajectory prediction is optical flow estimation, wherein the pixel-level motion field is predicted using the input event stream. Compared with feature tracking, these methods [1, 4, 12, 29, 30] focus more on estimating the motion field between adjacent moments and lack modeling of long-term trajectory consistency." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.825 + ], + "angle": 0, + "content": "However, all these existing trajectory prediction methods could only predict 2D feature trajectories in the image plane while the real objects are moving in 3D space, i.e., the predicted feature motion trajectories are information-deficient." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Event-based 3D Position Estimation. As the 2D feature trajectories can be predicted, a simple and straightforward solution is to use a monocular or stereo depth estimation method to predict the depth of the feature and calculate the 3D position. In recent years, several event-based" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18975" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.087, + 0.087, + 0.891, + 0.344 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.349, + 0.893, + 0.42 + ], + "angle": 0, + "content": "Figure 2. Our proposed method takes stereo event streams as input to predict the 3D trajectory of the target feature provided in the initial template patch \\( I_{t_0} \\). For a subsequent timestamp \\( t_i \\), the deformed template patch \\( I_{t_i} \\) is predicted using the motion compensation module. Then, \\( I_{t_i} \\), \\( I_{t_0} \\), and the events \\( P_{t_i} \\) triggered within the spatiotemporal neighboring patch of the predicted feature position \\( \\mathbf{u}_{t_i} \\) are forwarded into the offset estimation module to estimate the feature motion offsets \\( \\Delta \\mathbf{u}_{t_i} \\). Meanwhile, a patch matching module based on bi-polarity hypergraph modeling is leveraged to predict the disparity. Finally, a projection operation is performed to update the 3D trajectory." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.434, + 0.47, + 0.48 + ], + "angle": 0, + "content": "monocular [11, 14, 40] or stereo [28, 39] depth estimation methods are proposed, which could estimate the depth map from the input single-view or multi-view event streams." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.481, + 0.471, + 0.557 + ], + "angle": 0, + "content": "However, we will show that the simple combination of these two types of methods could not achieve satisfying long-term 3D feature trajectories prediction performance in Sec. 5.2. Therefore, high-speed 3D feature tracking is still a challenging open problem." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.573, + 0.169, + 0.589 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.6, + 0.47, + 0.66 + ], + "angle": 0, + "content": "In this section, we commence with an overview of the pipeline in Sec. 3.1, subsequently delving into the detailed architecture in Sec. 3.2, and conclude by outlining the supervision of our method in Sec. 3.3." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.673, + 0.188, + 0.687 + ], + "angle": 0, + "content": "3.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.697, + 0.47, + 0.834 + ], + "angle": 0, + "content": "As shown in Fig. 2, our proposed method takes stereo event streams as input to predict 3D feature trajectories in camera 1 coordinate system. The target features are contained in gray-scale template patches at the initial moment. This is the common setting for event-based feature tracking, e.g., EKLT [10] and DeepEvT [26]. Our method leverages a joint framework to predict features' 2D motion offsets and the 3D spatial positions simultaneously at each timestamp, and further obtain the 3D trajectories through projection." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.834, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Specifically, let \\(\\mathcal{E}^j = \\left\\{e_k^j = (t_k^j,u_k^j,v_k^j,p_k^j)\\right\\}\\) denote the event stream captured by an event camera, where \\(j = 1,2\\) denotes camera 1 and camera 2, respectively, \\(e_k^j\\) is the \\(k\\)-th event captured by camera \\(j\\). The feature to be tracked" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.434, + 0.893, + 0.602 + ], + "angle": 0, + "content": "is provided in a \\(d \\times d\\) template patch \\(I_0\\) captured by camera 1 at initial moment \\(t_0\\). Then, the feature trajectory is predicted step-by-step. For a subsequent timestamp \\(t_i\\), we calculate the 2D feature coordinates \\(\\mathbf{u}_{t_i}^1 = (u_{t_i}, v_{t_i})\\) projected in camera 1 based on the predicted 3D feature position \\(\\mathbf{X}_{t_i} = (x_{t_i}, y_{t_i}, z_{t_i})\\) at the previous step. To calculate the feature trajectory, the events \\(\\mathcal{E}_i^1\\) triggered in the \\(d \\times d\\) patch around \\(\\mathbf{u}_{t_i}^1\\) and within the time bin \\([t_i, t_{i+1}]\\) are leveraged to provide feature motion information. Then, \\(\\mathcal{E}_i^1\\) is converted into grid-based event patch \\(P_{t_i}^1\\) using the event representation method proposed in [26]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.613, + 0.895, + 0.903 + ], + "angle": 0, + "content": "As shown in Fig. 2, the 3D movement of the object may cause deformation of the feature template patch. To tackle this challenge, we leverage a motion compensation module to predict the deformed template patch \\(\\tilde{I}_{t_i}\\) at timestamp \\(t_i\\). Then, \\(\\tilde{I}_{t_i}\\) and \\(I_{t_0}\\) are concatenated and forwarded into the offset estimation module together with \\(P_{t_i}^1\\) to predict the 2D feature motion offset \\(\\Delta \\mathbf{u}_{t_i}^1\\). To further estimate the 3D position of the target feature, we use the events triggered within the same \\(d\\) rows as \\(P_{t_i}^1\\) from \\(\\mathcal{E}^2\\), i.e., \\(\\mathcal{E}_i^2 = \\{e_k^2 | u_{t_i} - \\frac{d-1}{2} \\leq u_k^2 \\leq u_{t_i} + \\frac{d-1}{2}, t_i \\leq t_k^2 \\leq t_{i+1}\\}\\), to generate event row patch \\(R_{t_i}^2\\) using the same event representation method. Then, the disparity \\(d_{t_{i+1}}\\) could be predicted from \\(P_{t_i}^1\\) and \\(R_{t_i}^2\\) using our proposed patch matching module based on bipolarity hypergraph modeling. In addition, inspired by the fact that the 2D motion offsets in both camera planes have a constraint with the disparity change, we use the event patch \\(P_{t_i}^2\\) of camera 2 to compute the offset \\(\\Delta \\mathbf{u}_{t_i}^2\\) in the training stage, and propose a stereo motion consistency mechanism to enhance the trajectory prediction. Finally, the 3D feature" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "18976" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.166 + ], + "angle": 0, + "content": "position \\(\\mathbf{X}_{t_{i + 1}}\\) at \\(t_{i + 1}\\) is obtained by projection according to \\(\\Delta \\mathbf{u}_{t_i}^1\\) and \\(d_{t_{i + 1}}\\). In practice, the patch size is \\(d = 31\\), and the length of the time bin is set to \\(4\\mathrm{ms}\\), i.e., \\(t_{i + 1} - t_i = 4\\mathrm{ms}\\). Thus, our proposed method could track the long-term 3D feature trajectories at 250 FPS." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.18, + 0.265, + 0.195 + ], + "angle": 0, + "content": "3.2. Model Architecture" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.204, + 0.471, + 0.521 + ], + "angle": 0, + "content": "Offset Estimation Module. As mentioned above, at times- tamp \\( t_i \\), we use an offset estimation module to predict the feature motion offsets projected in the camera plane, which takes \\( \\tilde{I}_{t_i} \\), \\( I_{t_0} \\), and \\( P_{t_i} \\) as input. Inspired by the great success of DeepEvT [26], we use a similar two-branch Feature Pyramid Network (FPN) [19] to extract multi-modal features from event patch \\( P_{t_i} \\) and the template patches \\( I_{t_i} \\) and \\( I_{t_0} \\), respectively. The FPN contains 4 down-sample layers and 4 up-sample layers. Then, the bottleneck feature of FPN is leveraged to calculate the correlation map between the event patch and the feature template patch. The correlation map is further concatenated with the multi-modal feature and forwarded into a joint encoder with 4 down-sample layers and a ConvLSTM [34] layer to obtain fused feature \\( F_{t_i} \\). Then, we use a linear layer to compute the weights of \\( F_{t_{i - 1}} \\) and \\( F_{t_i} \\) and explicitly fuse the temporal information. Finally, a linear layer is leveraged to generate predicted feature motion offsets. Detailed network architecture is provided in the supplementary material. Using the offset estimation module, the feature motion offset \\( \\Delta \\mathbf{u}_{t_i} \\) projected in the camera plane could be estimated." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.523, + 0.47, + 0.75 + ], + "angle": 0, + "content": "Motion Compensation Module. As shown in Fig. 2, high-speed 3D moving objects may have depth change and rotation, which may cause feature shape deformation. Therefore, tracking with the initial template patch may lead to fatal errors or even incorrectly tracking other features. To tackle this problem, we leverage a motion compensation module to correct the template patch at each moment. Specifically, the feature template patch may have scaling, rotation, and shear changes. It should be noted that translation is not considered since the feature motion offset is already predicted. At the timestamp \\( t_i \\), the fused temporal feature \\( F_{t_{i-1}} \\) is leveraged as input to predict the scale factors \\( s_x, s_y \\), rotation angle \\( \\theta \\), and shear factors \\( t_x, t_y \\) using 2 linear layers. Then, the affine transform is performed according to the predicted transform factors:" + }, + { + "type": "equation", + "bbox": [ + 0.1, + 0.755, + 0.469, + 0.789 + ], + "angle": 0, + "content": "\\[\n\\tilde {I} _ {t _ {i}} (u, v) = \\left[ \\begin{array}{c} \\beta s _ {x}, \\alpha s _ {y} \\\\ - \\alpha s _ {x}, \\beta s _ {y} \\end{array} \\right] \\left[ \\begin{array}{c} 1, a \\\\ - b, 1 + a b \\end{array} \\right] I _ {t _ {0}} (u, v), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.794, + 0.469, + 0.839 + ], + "angle": 0, + "content": "where \\(\\alpha = \\sin \\theta\\), \\(\\beta = \\cos \\theta\\), \\(a = \\tan t_x\\), and \\(b = \\tan t_y\\). Using the motion compensation module, the corrected template patch \\(\\tilde{I}_{t_i}\\) at each timestamp could be obtained." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.84, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Patch Matching Module. To further estimate the 3D position of the target feature, we propose a patch matching module based on bi-polarity hypergraph modeling to obtain the spatial position of the feature by predicting the disparity." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.227 + ], + "angle": 0, + "content": "Different from traditional stereo matching, for the 3D feature tracking task, the target feature is contained in the local event patch \\( P_{t_i}^1 \\). Therefore, the disparity could only be predicted from the local patch instead of global information. Under such condition, mismatching will occur since the target scene may contain multiple similar features distributed in space and \\( P_{t_i}^1 \\) only contains local information. Therefore, we propose a bi-polarity hypergraph-based high-order correlation modeling mechanism to eliminate mismatching." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.227, + 0.893, + 0.741 + ], + "angle": 0, + "content": "As mentioned in Sec. 3.1, for each timestamp \\( t_i \\), we use the event patch \\( P_{t_i}^1 \\) around \\( \\mathbf{u}_{t_i}^1 \\) and the corresponding event row patch \\( R_{t_i}^2 \\) from camera 2 to achieve patch matching. Specifically, we use 4 convolutional layers to extract features \\( \\mathbf{M}_{t_i}^1 \\in \\mathbb{R}^{d \\times d \\times c} \\) and \\( \\mathbf{M}_{t_i}^2 \\in \\mathbb{R}^{d \\times W \\times c} \\) from \\( P_{t_i}^1 \\) and \\( R_{t_i}^2 \\), respectively, where \\( c \\) is the feature channel and \\( W \\) is the image width, i.e., the number of candidate matching positions. We further calculate the cost volume \\( \\mathbf{C}_{t_i} \\in \\mathbb{R}^{W \\times c} \\) composed of the feature similarity between \\( \\mathbf{M}_{t_i}^1 \\) and \\( \\mathbf{M}_{t_i}^2 \\) at each matching position, which represents the pair-wise similarity between \\( P_{t_i}^1 \\) and the sub-patch of \\( R_{t_i}^2 \\) at each matching position. Then, the \\( W \\) matching positions are used as vertices to construct bi-polarity hypergraphs. Compared to the pair-wise correlation contained in the cost volume, each hyperedge of a hypergraph could connect multiple vertices, i.e., high-order correlations among multiple vertices could be constructed. In practice, we use the Euclidean distance of the vertex feature as metric and calculate the \\( k \\) nearest neighbors of each vertex. For each vertex, we use a hyperedge to connect the vertices in its \\( k \\) neighbor vertices with spatial distance smaller than a certain threshold \\( \\delta \\). Therefore, a positive hypergraph \\( G^+ \\) with the adjacency matrix \\( H^+ \\) could be constructed. Besides, for each vertex, vertices with spatial distance larger than \\( \\delta \\) in its \\( k \\) neighbor vertices are connected by another hyperedge. Thus, a negative hypergraph \\( G^- \\) with the adjacency matrix \\( H^- \\) could be constructed. Each hyperedge of \\( G^+ \\) connects matching patches that are semantic similar and spatially close to \\( P_{t_i}^1 \\). These connections are expected to be enhanced. In contrast, each hyperedge of \\( G^- \\) connects matching patches that are semantic similar but spatially distant from \\( P_{t_i}^1 \\), which are interference and needs to be suppressed. Then, inspired by [9], we propose a feature aggregation method based on bi-polarity hypergraphs:" + }, + { + "type": "equation", + "bbox": [ + 0.516, + 0.743, + 0.892, + 0.793 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\mathbf {C}} _ {t _ {i}} = \\mathbf {C} _ {t _ {i}} + \\sigma \\left(\\left(\\mathbf {D} _ {v} ^ {+}\\right) ^ {- 1} \\mathbf {H} ^ {+} \\left(\\mathbf {D} _ {e} ^ {+}\\right) ^ {- 1} \\left(\\mathbf {H} ^ {+}\\right) ^ {\\top} \\mathbf {C} _ {t _ {i}} \\boldsymbol {\\Theta} ^ {+}\\right), \\tag {2} \\\\ - \\sigma \\left(\\left(\\mathbf {D} _ {v} ^ {-}\\right) ^ {- 1} \\mathbf {H} ^ {-} \\left(\\mathbf {D} _ {e} ^ {-}\\right) ^ {- 1} \\left(\\mathbf {H} ^ {-}\\right) ^ {\\top} \\mathbf {C} _ {t _ {i}} \\boldsymbol {\\Theta} ^ {-}\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.893, + 0.901 + ], + "angle": 0, + "content": "where \\(\\mathbf{D}_e^*\\) and \\(\\mathbf{D}_v^*\\) are the diagonal matrices of hyperedge degree and vertex degree, respectively. \\(\\Theta^{*}\\) is the learnable parameter, and \\(\\sigma (\\cdot)\\) is the non-linear activation function. Using Eq. (2), features are aggregated to enhance vertices with similar features and spatial close and suppress vertices with similar features but spatially distant. Finally, \\(\\hat{\\mathbf{C}}_{t_i}\\) is forwarded into a 1D convolutional layer with the kernel size of" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "18977" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.121 + ], + "angle": 0, + "content": "3 to regress the matching result. Using the patch matching module, the disparity \\( d_{t_i} \\) of the feature is predicted." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.122, + 0.47, + 0.168 + ], + "angle": 0, + "content": "Projection. After the feature motion offsets \\(\\Delta \\mathbf{u}_{t_i}\\) and the disparity \\(d_{t_i}\\) are predicted, the 3D feature coordinates \\(\\mathbf{X}_{t_{i + 1}}\\) at \\(t_{i + 1}\\) could be computed using projection." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.177, + 0.358, + 0.192 + ], + "angle": 0, + "content": "3.3. Supervision and Loss Functions" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.2, + 0.471, + 0.403 + ], + "angle": 0, + "content": "Stereo Motion Consistency. For objects moving in 3D space captured by stereo cameras, the 2D motion offsets are strongly constrained with the disparity. Meanwhile, our offset estimation module is also deeply coupled with the patch matching module. Therefore, inspired by [21], we leverage a stereo motion consistency constraint to reinforce this correlation. Consider a point \\(\\mathbf{X} = (x,y,z)\\), it's 2D coordinates in the camera plane could be calculated by \\(\\mathbf{u} = (u,v) = \\frac{f}{s}\\frac{(x,y)}{z}\\), where \\(f\\) is the camera focal length and \\(s\\) the coordinate convert factor. For calibrated stereo cameras with the baseline distance of \\(b\\), the disparity of \\(\\mathbf{X}\\) is \\(d = \\frac{f}{s}\\frac{b}{z}\\). By taking the time derivative, we could obtain that \\(\\frac{\\Delta d}{\\Delta t} = -\\frac{f}{s}\\frac{b}{z^2}\\frac{\\Delta z}{\\Delta t}\\). Therefore, we have:" + }, + { + "type": "equation", + "bbox": [ + 0.132, + 0.405, + 0.469, + 0.438 + ], + "angle": 0, + "content": "\\[\nd _ {t _ {i}} - d _ {t _ {i - 1}} = \\Delta d = - \\frac {f}{s} \\frac {b}{z _ {t _ {i}} ^ {2}} \\left(z _ {t _ {i}} - z _ {t _ {i - 1}}\\right). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.441, + 0.47, + 0.536 + ], + "angle": 0, + "content": "For the 2D motion offsets, we could similarly obtain that \\(\\frac{\\Delta\\mathbf{u}}{\\Delta t} = \\frac{f}{zs} (\\frac{\\Delta x}{\\Delta t},\\frac{\\Delta y}{\\Delta t}) - \\frac{f}{z^2s}\\frac{\\Delta z}{\\Delta t} (x,y)\\), i.e., we have \\(\\Delta \\mathbf{u} = (\\Delta u,\\Delta v) = \\frac{f}{zs} (\\Delta x,\\Delta y) - \\frac{f\\Delta z}{z^2s} (x,y)\\). In practice, suppose the coordinates of a feature in camera 1 at timestamp \\(t_i\\) is \\(\\mathbf{X}_{t_i}^1 = (x_{t_i},y_{t_i},z_{t_i})\\), then the coordinates in camera 2 is \\(\\mathbf{X}_{t_i}^2 = (x_{t_i} - b,y_{t_i},z_{t_i})\\). Therefore, we have:" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.538, + 0.469, + 0.592 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\Delta u _ {t _ {i}} ^ {1} - \\Delta u _ {t _ {i}} ^ {2} = \\frac {f}{s} \\frac {b}{z _ {t _ {i}} ^ {2}} \\Delta z _ {t _ {i}} = - \\frac {f}{s} \\frac {b}{z _ {t _ {i}} ^ {2}} \\left(z _ {t _ {i}} - z _ {t _ {i - 1}}\\right). \\tag {4} \\\\ \\Delta v _ {t _ {i}} ^ {1} - \\Delta v _ {t _ {i}} ^ {2} = 0 \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.596, + 0.469, + 0.626 + ], + "angle": 0, + "content": "Therefore, we could obtain the stereo motion constraint \\(\\Delta u_{t_i}^1 -\\Delta u_{t_i}^2 = d_{t_i} - d_{t_{i - 1}}\\) from Eq. (3) and Eq. (4)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.627, + 0.469, + 0.655 + ], + "angle": 0, + "content": "According to the stereo motion constraint, we introduce the stereo motion consistency loss:" + }, + { + "type": "equation", + "bbox": [ + 0.084, + 0.657, + 0.469, + 0.674 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {i} ^ {\\mathrm {s m c}} = \\mathcal {L} _ {1} \\left(\\Delta u _ {t _ {i}} ^ {1} - \\Delta u _ {t _ {i}} ^ {2}, d _ {t _ {i}} - d _ {t _ {i - 1}}\\right) + \\mathcal {L} _ {1} \\left(\\Delta v _ {t _ {i}} ^ {1}, \\Delta v _ {t _ {i}} ^ {2}\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.677, + 0.351, + 0.691 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_1(\\cdot ,\\cdot)\\) is the Manhattan Distance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.692, + 0.469, + 0.752 + ], + "angle": 0, + "content": "Loss Functions. Since our proposed method could predict the 3D feature coordinate \\(\\mathbf{X}_{t_i}\\) at each timestamp, we use the Manhattan Distance between the predicted trajectories and ground truth trajectories as supervision:" + }, + { + "type": "equation", + "bbox": [ + 0.199, + 0.755, + 0.469, + 0.775 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {i} ^ {\\text {t r a j}} = \\mathcal {L} _ {1} \\left(\\mathbf {X} _ {t _ {i}}, \\mathbf {X} _ {t _ {i}} ^ {\\mathrm {g t}}\\right). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.778, + 0.469, + 0.882 + ], + "angle": 0, + "content": "Since both the offset estimation module and the patch matching module severely affect the 3D trajectory prediction accuracy, we compute the ground truth 2D feature offsets \\(\\mathbf{u}_{t_i}^{1^{\\mathrm{gt}}}\\) and disparity \\(d_{t_i}^{\\mathrm{gt}}\\) at each timestamp based on ground truth 3D trajectory through projection and use them as supervision. In practice, the offset estimation is supervised with the loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.189, + 0.886, + 0.469, + 0.906 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {i} ^ {\\text {o f f}} = \\mathcal {L} _ {1} \\left(\\Delta \\mathbf {u} _ {t _ {i}} ^ {1}, \\Delta \\mathbf {u} _ {t _ {i}} ^ {1 ^ {\\mathrm {g t}}}\\right). \\tag {7}\n\\]" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.09, + 0.892, + 0.118 + ], + "angle": 0, + "content": "Table 1. Comparison of our E-3DTrack dataset with other existing event-based feature tracking datasets." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.122, + 0.892, + 0.201 + ], + "angle": 0, + "content": "
DatasetDim.MotionScenarioGT Freq.
EC [27]2DHomo.Static200
EDS [15]2DHomo.Static150
E-3DTrack3DNon-homo.Dynamic250
" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.215, + 0.788, + 0.23 + ], + "angle": 0, + "content": "The disparity prediction is supervised with:" + }, + { + "type": "equation", + "bbox": [ + 0.625, + 0.234, + 0.892, + 0.254 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {i} ^ {\\text {d i s p}} = \\mathcal {L} _ {1} \\left(d _ {t _ {i}}, d _ {t _ {i}} ^ {\\mathrm {g t}}\\right). \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.258, + 0.891, + 0.288 + ], + "angle": 0, + "content": "Finally, our model is trained end-to-end with the supervision of the following total loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.564, + 0.29, + 0.891, + 0.33 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\sum_ {i = 1} ^ {N} \\left(\\mathcal {L} _ {i} ^ {\\text {t r a j}} + \\mathcal {L} _ {i} ^ {\\text {o f f}} + \\mathcal {L} _ {i} ^ {\\text {d i s p}} + \\alpha \\mathcal {L} _ {i} ^ {\\text {s m c}}\\right), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.332, + 0.891, + 0.347 + ], + "angle": 0, + "content": "where \\(\\alpha\\) is a hyper-parameter and \\(N\\) is the sequence length." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.361, + 0.872, + 0.379 + ], + "angle": 0, + "content": "4. 3D Feature Tracking Dataset: E-3DTrack" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.387, + 0.892, + 0.508 + ], + "angle": 0, + "content": "In addressing the deficiency of high-speed 3D feature tracking datasets, we establish a hybrid vision system containing stereo event cameras and Optitrack, as shown in Fig. 3 (a), and curate the first event-based 3D feature tracking dataset, named E-3DTrack. Compared to existing event-based feature tracking datasets that contain only static scenes and 2D trajectories, our dataset is the first to contain high-speed moving objects and ground truth 3D feature trajectories." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.509, + 0.892, + 0.795 + ], + "angle": 0, + "content": "Limited by the capturing frequency of 3D vision sensors (e.g., \\(< 30\\) FPS for LiDAR), it is difficult to accurately record the 3D feature trajectories of high-speed moving objects at a high frame rate. To tackle this problem, we use the motion capture system, i.e., Optitrack, to record the trajectory of each object attached with fixed markers. To explicitly obtain feature-level 3D trajectories, we use a scanner, i.e., FARO Quantum ScanArm, to capture the high precision point cloud of each object. Then, the 3D affine transform, incorporating a homogeneous scale, is calculated from the object coordinate system to the Optitrack coordinate system based on the markers' coordinates. This leads to the acquisition of the time-series point cloud sequence of the moving objects under the Optitrack coordinate system. Finally, the feature trajectories can be derived from the time-series point cloud sequence based on the feature point index. Hence, our dataset comprises ground truth 3D feature trajectories of high-speed moving objects at 250 FPS, surpassing the capturing frequency of most existing 3D vision sensors." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Using our hybrid vision system, we captured 40 high-speed motion scenarios containing a total of 1300 sequences. We randomly select 10 scenarios as the test set, and the remaining 30 scenarios are selected as the training set. Note that due to the cross-scene division, the scene in the test set are unseen in the training set. More details of our dataset are provided in the supplementary material." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18978" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.091, + 0.28, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.309, + 0.244, + 0.317 + ], + "angle": 0, + "content": "(a) Our hybrid vision system." + }, + { + "type": "image", + "bbox": [ + 0.288, + 0.091, + 0.411, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.091, + 0.508, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.091, + 0.631, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.635, + 0.091, + 0.753, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.757, + 0.091, + 0.886, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.29, + 0.308, + 0.888, + 0.318 + ], + "angle": 0, + "content": "(b) Samples of our dataset. From left to right: reference frame, feature patch, stereo event streams, and ground truth 3D feature trajectory." + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.323, + 0.892, + 0.365 + ], + "angle": 0, + "content": "Figure 3. (a) Our hybrid vision system. (b) Samples of our E-3DTrack dataset. The first column is the reference frame at the initial moment, and the features to be tracked are marked in each frame. Some feature template patches are zoomed in for display in the second column. The stereo event streams and the ground truth 3D feature trajectories are shown in the last three columns, respectively." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.377, + 0.468, + 0.452 + ], + "angle": 0, + "content": "Table 1 shows the comparison of our E-3DTrack dataset with other existing event-based feature tracking datasets, including Event Camera dataset (EC) [27] and Event-aided Direct Sparse Odometry (EDS) dataset [15]. The main advantages of our dataset are in the following three aspects." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.453, + 0.468, + 0.497 + ], + "angle": 0, + "content": "- 3D trajectory. Our dataset is the first feature tracking dataset containing ground truth 3D trajectories, enabling the feature motion trajectory estimation in 3D space." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.499, + 0.468, + 0.633 + ], + "angle": 0, + "content": "- Non-homogeneous motion. Our dataset is the first event-based feature tracking dataset containing high-speed moving objects. Existing EC and EDS datasets mainly contain stationary scenarios. Thus, feature motions are caused by the camera movement. Since there are no moving objects in the scene, the motions of all features are almost homogeneous, as shown in Fig. 4. In contrast, the feature motions in our dataset are non-homogeneous, which is more conducive to applications." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.635, + 0.468, + 0.77 + ], + "angle": 0, + "content": "- Accurate ground truth. Our dataset contains ground truth 3D feature trajectories captured from Optitrack. In contrast, since the DAVIS346 event camera could record event streams and 25 FPS video simultaneously, the ground truth 2D trajectories in EC and EDS datasets are obtained using frame-based feature tracking method KLT [25], or further triangulating KLT tracks using camera poses and reprojecting them to the frames. Thus, our dataset contains more accurate ground truth trajectories." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.77, + 0.468, + 0.83 + ], + "angle": 0, + "content": "Figure 3 (b) shows some samples of our E-3DTrack dataset. We visualize the reference frames, feature template patches, stereo event streams, and the ground truth 3D feature trajectories of each sample." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.453, + 0.468, + 0.83 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.845, + 0.208, + 0.861 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.468, + 0.901 + ], + "angle": 0, + "content": "In this section, we first introduce the experimental settings. Then, we analyze the quantitative and qualitative compar" + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.376, + 0.696, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.528, + 0.478, + 0.687, + 0.488 + ], + "angle": 0, + "content": "(a) Sample from EC Dataset" + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.376, + 0.869, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.701, + 0.478, + 0.867, + 0.489 + ], + "angle": 0, + "content": "(b) Sample from EDS Dataset" + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.493, + 0.888, + 0.507 + ], + "angle": 0, + "content": "Figure 4. Examples from existing EC [27] and EDS [15] dataset." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.52, + 0.891, + 0.551 + ], + "angle": 0, + "content": "isons, respectively. Finally, we conduct ablation studies to demonstrate the effectiveness of each proposed module." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.56, + 0.706, + 0.576 + ], + "angle": 0, + "content": "5.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.584, + 0.892, + 0.734 + ], + "angle": 0, + "content": "Comparison Methods. Since there are no existing high-speed 3D feature tracking methods, we use existing event-based trajectory prediction methods to obtain 2D feature trajectories, and use stereo depth estimation methods to further obtain the 3D feature trajectory. Specifically, we combine the event-based optical flow estimation method E-RAFT [12], event-based feature tracking methods EKLT [10] and DeepEvT [26] with event-based stereo depth estimation methods TSES [39] and SDE [28], respectively, as our baseline comparison methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.87 + ], + "angle": 0, + "content": "Metrics. To evaluate our proposed method and other comparison methods, we use the Tracked Feature Ratio (TFR, higher is better), Feature Age [26] (FR, higher is better), and the Root Mean Squared Error (RMSE, lower is better) as the metrics. TFR is calculated as the ratio of the time that the spatial distance between the predicted 3D trajectory and the ground truth 3D trajectory is less than a certain threshold \\( c \\) to the total sequence time. See detailed definition in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Implementation Details. Our method is implemented based on PyTorch [31]. Our model is trained end-to-end" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18979" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.892, + 0.118 + ], + "angle": 0, + "content": "Table 2. Quantitative results on our E-3DTrack dataset. Feature age (FA), tracked feature ratio (TFR), and root mean square error (RMSE) are selected as the metrics. Bold numbers represent the best scores, and underlined numbers represent the second-best scores." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.122, + 0.892, + 0.246 + ], + "angle": 0, + "content": "
MethodFA(0.1m) ↑FA(0.15m) ↑FA(0.2m) ↑TFR(0.1m) ↑TFR(0.15m) ↑TFR(0.2m) ↑RMSE ↓
E-RAFT [12] + TSES [39]0.04090.06640.0920.17010.26670.34390.4726
E-RAFT [12] + SDE [28]0.13850.23990.32040.31210.47260.58060.3368
EKLIT [10] + TSES [39]0.02320.04290.06280.11800.19610.26850.4806
EKLIT [10] + SDE [28]0.10260.18560.25840.24210.37380.47000.4034
DeepEvT [26] + TSES [39]0.07130.11170.14520.37860.49910.58180.3549
DeepEvT [26] + SDE [28]0.23140.34620.43390.57820.70600.77650.1889
E-3DTrack (Ours)0.26010.41790.54280.69280.81640.87720.1181
" + }, + { + "type": "table_caption", + "bbox": [ + 0.089, + 0.257, + 0.456, + 0.271 + ], + "angle": 0, + "content": "Table 3. Comparison of inference time on E-3DTrack dataset." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.276, + 0.47, + 0.315 + ], + "angle": 0, + "content": "
MethodE-RAFT + SDEDeepEvT + SDEOurs
Time (ms/step)154.8393.2240.30
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.333, + 0.47, + 0.425 + ], + "angle": 0, + "content": "for 100 epochs with a batch size of 16. The optimization method is AdamW [23], and the cosine annealing schedule [22] is leveraged. The learning rate decays from \\(2 \\times 10^{-4}\\) to \\(1 \\times 10^{-6}\\) within 100 epochs. The hyperparameters are selected as \\(\\alpha = 0.25\\) in Eq. (9), \\(k = 3\\) and \\(\\delta = 16\\) for bi-polarity hypergraph construction." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.437, + 0.31, + 0.453 + ], + "angle": 0, + "content": "5.2. Quantitative Comparison" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.461, + 0.47, + 0.627 + ], + "angle": 0, + "content": "Table. 2 shows the quantitative comparison of our proposed method with other comparison methods. From the table, we could observe that our proposed method significantly outperforms all comparison methods and achieve state-of-the-art performance. Specifically, compared with the second-best method, i.e., the combination of the state-of-the-art 2D event-based feature tracking method DeepEvT [26] and stereo depth estimation method SDE [28], our proposed method reduces the RMSE by \\(37.5\\%\\) and improves the FA by \\(12.4\\%\\), \\(20.7\\%\\), and \\(25.1\\%\\) in terms of \\(c = 0.1 \\mathrm{~m}\\), \\(0.15 \\mathrm{~m}\\), and \\(0.2 \\mathrm{~m}\\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.629, + 0.469, + 0.854 + ], + "angle": 0, + "content": "Compared to comparison methods that achieve trajectory prediction and depth estimation separately, our proposed method leverages a joint framework to track the 3D feature trajectories of high-speed moving objects. This indicates that for 3D moving objects, the feature trajectory in the camera plane is highly correlated with the 3D position. The simple combination of 2D trajectory prediction and 3D position estimation will lead to fatal errors. Instead, our proposed method tracks 3D trajectories accurately using the stereo motion consistency constraint. Meanwhile, compared to traditional stereo depth estimation methods, our proposed patch matching module uses a high-order correlation modeling mechanism based on bi-polarity hypergraph to eliminate mismatching of similar features, further enhancing the 3D feature tracking robustness." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Table. 3 shows the inference time comparison of our proposed method with other comparison methods. Specifically, we test the inference time of each tracking update" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.257, + 0.623, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.327, + 0.623, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.396, + 0.623, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.536, + 0.624, + 0.545 + ], + "angle": 0, + "content": "(a) Reference Feature Patch" + }, + { + "type": "image", + "bbox": [ + 0.624, + 0.257, + 0.754, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.625, + 0.327, + 0.754, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.625, + 0.396, + 0.754, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.647, + 0.536, + 0.733, + 0.545 + ], + "angle": 0, + "content": "(b) DeepEvT + SDE" + }, + { + "type": "image", + "bbox": [ + 0.755, + 0.257, + 0.89, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.755, + 0.327, + 0.89, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.755, + 0.396, + 0.89, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.755, + 0.466, + 0.89, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.805, + 0.536, + 0.842, + 0.545 + ], + "angle": 0, + "content": "(c) Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.553, + 0.892, + 0.609 + ], + "angle": 0, + "content": "Figure 5. Qualitative comparison on our E-3DTrack dataset. From left to right: the reference feature patch, the ground truth feature trajectories (red), the feature trajectories (blue) predicted by DeepEvT [26] + SDE [28] and our proposed method, respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.612, + 0.892, + 0.688 + ], + "angle": 0, + "content": "step. From the table, we could observe that compared to the second-best method, i.e., DeepEvT + SDE, our proposed method reduces the inference time by \\(56.8\\%\\) while achieving better tracking performance. This demonstrates the computational efficiency of our proposed method." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.697, + 0.722, + 0.714 + ], + "angle": 0, + "content": "5.3. Qualitative Comparison" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Figure 5 shows the qualitative 3D feature tracking results of our proposed method and the second-best comparison method, i.e., DeepEvT [26] + SDE [28]. The predicted 3D trajectories and the ground truth trajectories are shown in blue and red, respectively. From the figure, we could observe that our proposed method achieves more robust 3D feature tracking. As shown in the first row, the comparison method achieves adequate feature tracking performance when facing simple scenarios where the object motions do not contain significant depth changes. Such scenarios are similar to 2D feature tracking. Similar observations could be found in the second row. For the white geometric model" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18980" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.136, + 0.091, + 0.24, + 0.104 + ], + "angle": 0, + "content": "Tracking Error" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.105, + 0.272, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.31, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Tracked Feature Ratio" + }, + { + "type": "image", + "bbox": [ + 0.277, + 0.105, + 0.47, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.231, + 0.47, + 0.26 + ], + "angle": 0, + "content": "Figure 6. Results of the mean tracking error (left) and the feature tracked ratio (right) over tracking time." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.279, + 0.47, + 0.46 + ], + "angle": 0, + "content": "with slight depth variation, the comparison method achieves 3D feature tracking with slight oscillations. However, for the red star with large depth variation and rotation, it could not be tracked accurately by the comparison method. The last two rows show two extreme scenarios, i.e., the 3D motions of the objects are with large depth variation and rotation, which will cause significant feature shape deformation. Under such scenarios, our comparison method tracks the features with fatal errors. In contrast, our proposed method tracks the 3D trajectories of the high-speed moving features robustly and continuously due to our motion compensation module and patch matching module." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.46, + 0.471, + 0.642 + ], + "angle": 0, + "content": "Figure 6 further shows the tracking error (RMSE) and the tracked feature ratio (TFR) over time on our E-3DTrack dataset. The threshold is selected as \\( c = 0.1 \\, \\text{m} \\) to calculate TFR. From the figure, we could observe that our proposed method can continuously track 3D trajectories of target features, i.e., our method maintains a high TFR consistently. From the figure, we could also observe that the TFR of E-RAFT + SDE is comparable with DeepEvT + SDE in initial stage, but gradually decreases over time. This is because the optical flow estimation method is lack of long-term consistent modeling. In contrast, our proposed method maintains a high TFR and a low tracking error over all time." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.651, + 0.285, + 0.668 + ], + "angle": 0, + "content": "5.4. Ablation Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.674, + 0.47, + 0.795 + ], + "angle": 0, + "content": "To demonstrate the effectiveness of each proposed module, we validate the performance of our model with and without the motion compensation module (denoted as MC), stereo motion consistency mechanism (denoted as \\(\\mathcal{L}^{\\mathrm{smc}}\\)), and the bi-polarity hypergraph-based high-order correlation modeling mechanism (denoted as BiHCM), respectively. The ablation experimental results are shown in Tab. 4. See supplementary material for detailed settings." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Bi-Polarity Hypergraph Modeling. From Tab. 4 we could observe that compared with our base model (row (1)), the addition of BiHCM will increase TFR from 0.5586 to 0.6082. Compared with our full model, the removal of the BiHCM will lead to an RMSE increase of \\(20.8\\%\\). This is due to the fact that our proposed BiHCM could enhance the connection between patches with similar features that" + }, + { + "type": "table_caption", + "bbox": [ + 0.525, + 0.09, + 0.868, + 0.104 + ], + "angle": 0, + "content": "Table 4. Ablation experiments on our E-3DTrack dataset." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.108, + 0.887, + 0.246 + ], + "angle": 0, + "content": "
BiHCMLsmcMCTFR0.1 m ↑RMSE↓
(1)XXX0.55860.1807
(2)XX0.60820.1505
(3)XX0.59420.1512
(4)XX0.56600.1624
(5)X0.67050.1268
(6)X0.65990.1312
(7)X0.64410.1427
(8)0.69280.1181
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.259, + 0.892, + 0.304 + ], + "angle": 0, + "content": "are spatially close, and suppress patches with similar features but spatially distant, which could eliminate mismatching and further improve 3D feature tracking performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.305, + 0.893, + 0.441 + ], + "angle": 0, + "content": "Stereo Motion Consistency. As shown in Tab. 4, compared with the base model, the addition of stereo motion consistency constraint will reduce RMSE from 0.1807 to 0.1512. Compared with the full model, the removal of the stereo motion consistency constraint will increase RMSE by \\(11.1\\%\\). This is due to the fact that the stereo motion consistency could effectively constrain the correlation between the 2D trajectory and 3D spatial position of the objects, making our method predict more accurate and smooth 3D trajectory." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.441, + 0.894, + 0.562 + ], + "angle": 0, + "content": "Motion Compensation. As shown in Tab. 4, compared with the base model and the full model, the addition and removal of the motion compensation module resulted in \\(10.1\\%\\) and \\(7.4\\%\\) decrease and increase in RMSE, respectively. With the addition of the motion compensation module, our proposed method could better deal with feature deformation caused by depth changes and rotations of moving objects, and achieve more robust 3D feature tracking." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.563, + 0.892, + 0.593 + ], + "angle": 0, + "content": "These ablation experiments demonstrate the effectiveness of each proposed module." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.608, + 0.62, + 0.624 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.634, + 0.894, + 0.816 + ], + "angle": 0, + "content": "In this paper, we propose the first high-speed 3D feature tracking method that takes stereo event streams as input to estimate 3D feature trajectories. Our proposed method leverages a joint framework to obtain 3D feature trajectories by estimating the feature motion offsets and spatial position simultaneously. A motion compensation module and a patch matching module based on bi-polarity hypergraphs are proposed to achieve robust feature tracking. Meanwhile, the first 3D feature tracking dataset containing high-speed moving objects and ground truth 3D feature trajectories at 250 FPS is constructed, named E-3DTrack, which can be used as the first 3D feature tracking benchmark." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.83, + 0.671, + 0.848 + ], + "angle": 0, + "content": "7. Acknowledgment" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "This work was supported by National Natural Science Funds of China (No. 62021002 and No. 62088102), Beijing Natural Science Foundation (No. 4222025)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "18981" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.17 + ], + "angle": 0, + "content": "[1] Himanshu Akolkar, Sio-Hoi Ieng, and Ryad Benosman. Real-Time High Speed Motion Prediction Using Fast Aperture-Robust Event-Driven Visual Flow. IEEE Trans. Pattern Anal. Mach. Intell., 44(1):361-372, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.47, + 0.213 + ], + "angle": 0, + "content": "[2] Ignacio Alzugaray and Margarita Chli. ACE: An Efficient Asynchronous Corner Tracker for Event Cameras. In Int. Conf. on 3D Vis., pages 653-661. IEEE, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.213, + 0.47, + 0.268 + ], + "angle": 0, + "content": "[3] Ignacio Alzugaray and Margarita Chli. HASTE: MultiHypothesis Asynchronous Speeded-up Tracking of Events. In The British Machine Vision Conference, page 744, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.269, + 0.47, + 0.325 + ], + "angle": 0, + "content": "[4] Patrick Bardow, Andrew J Davison, and Stefan Leutenegger. Simultaneous optical flow and intensity estimation from an event camera. In IEEE Conf. Comput. Vis. Pattern Recog., pages 884-892, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.326, + 0.47, + 0.368 + ], + "angle": 0, + "content": "[5] Herbert Bay, Andreas Ess, Tinne Tuytelaars, and Luc Van Gool. Speeded-up Robust Features (SURF). Comput. Vis. and Image Underst., 110(3):346-359, 2008. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.368, + 0.471, + 0.41 + ], + "angle": 0, + "content": "[6] Paul J Besl and Neil D McKay. Method for Registration of 3-D Shapes. In Sensor Fusion IV: Control Paradigms and Data Structures, pages 586-606, 1992. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.411, + 0.471, + 0.465 + ], + "angle": 0, + "content": "[7] Christian Brandli, Raphael Berner, Minhao Yang, Shih-Chii Liu, and Tobi Delbruck. A \\(240 \\times 180\\) 130 dB \\(3\\mu s\\) Latency Global Shutter Spatiotemporal Vision Sensor. IEEE J. of Solid-State Circuits, 49(10):2333-2341, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.466, + 0.47, + 0.536 + ], + "angle": 0, + "content": "[8] Guillermo Gallego, Henri Rebecq, and Davide Scaramuzzi. A Unifying Contrast Maximization Framework for Event Cameras, with Applications to Motion, Depth, and Optical Flow Estimation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 3867-3876, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.536, + 0.47, + 0.578 + ], + "angle": 0, + "content": "[9] Yue Gao, Yifan Feng, Shuyi Ji, and Rongrong Ji. HGNN+: General Hypergraph Neural Networks. IEEE Trans. Pattern Anal. Mach. Intell., 45(3):3181-3199, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.579, + 0.47, + 0.634 + ], + "angle": 0, + "content": "[10] Daniel Gehrig, Henri Rebecq, Guillermo Gallego, and Davide Scaramuzza. EKLT: Asynchronous Photometric Feature Tracking using Events and Frames. Int. J. Comput. Vis., 128(3):601-618, 2020. 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.634, + 0.471, + 0.704 + ], + "angle": 0, + "content": "[11] Daniel Gehrig, Michelle Ruegg, Mathias Gehrig, Javier Hidalgo-Carrió, and Davide Scaramuzza. Combining Events and Frames Using Recurrent Asynchronous Multimodal Networks for Monocular Depth Prediction. IEEE Robot. and Autom. Lett., 6(2):2822-2829, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.705, + 0.471, + 0.76 + ], + "angle": 0, + "content": "[12] Mathias Gehrig, Mario Millhäsler, Daniel Gehrig, and Davide Scaramuzza. E-RAFT: Dense Optical Flow from Event Cameras. In Int. Conf. 3D Vis., pages 197–206. IEEE, 2021. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.761, + 0.471, + 0.829 + ], + "angle": 0, + "content": "[13] Gallego Guillermo, Delbruck Tobi, Michael Orchard Garrick, Bartolozzi Chiara, Taba Brian, Censi Andrea, Leutenegger Stefan, Davison Andrew, Conradt Jorg, Daniilidis Kostas, and Scaramuzza Davide. Event-Based Vision: A Survey. IEEE Trans. Pattern Anal. Mach. Intell., 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.83, + 0.471, + 0.873 + ], + "angle": 0, + "content": "[14] Javier Hidalgo-Carrió, Daniel Gehrig, and Davide Scaramuzza. Learning monocular dense depth from events. In Int. Conf. on 3D Vis., pages 534-542. IEEE, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.873, + 0.471, + 0.903 + ], + "angle": 0, + "content": "[15] Javier Hidalgo-Carrio, Guillermo Gallego, and Davide Scaramuzza. Event-aided direct sparse odometry. In IEEE" + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.115, + 0.471, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "Conf. Comput. Vis. Pattern Recog., pages 5781-5790, 2022. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.121, + 0.892, + 0.178 + ], + "angle": 0, + "content": "[16] Sumin Hu, Yeeun Kim, Hyungtae Lim, Alex Junho Lee, and Hyun Myung. eCDT: Event Clustering for Simultaneous Feature Detection and Tracking. In Int. Conf. Intel. Robot. Syst., pages 3808-3815. IEEE, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.233 + ], + "angle": 0, + "content": "[17] Hanme Kim, Stefan Leutenegger, and Andrew J. Davison. Real-Time 3D Reconstruction and 6-DoF Tracking with an Event Camera. In *Eur. Conf. Comput. Vis.*, pages 349–364, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.234, + 0.892, + 0.29 + ], + "angle": 0, + "content": "[18] Beat Kueng, Elias Mueggler, Guillermo Gallego, and Davide Scaramuzza. Low-latency visual odometry using event-based feature tracks. In Int. Conf. Intell. Robot. Syst., pages 16-23. IEEE, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.291, + 0.892, + 0.348 + ], + "angle": 0, + "content": "[19] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature Pyramid Networks for Object Detection. In IEEE Conf. Comput. Vis. Pattern Recog., pages 2117-2125, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.348, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[20] Daqi Liu, Alvaro Parra, and Tat-Jun Chin. Globally Optimal Contrast Maximisation for Event-based Motion Estimation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6349-6358, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.404, + 0.892, + 0.461 + ], + "angle": 0, + "content": "[21] Pengpeng Liu, Irwin King, Michael R Lyu, and Jia Xu. Flow2stereo: Effective Self-Supervised Learning of Optical Flow and Stereo Matching. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6648-6657, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.461, + 0.892, + 0.503 + ], + "angle": 0, + "content": "[22] Ilya Loshchilov and Frank Hutter. SGDR: Stochastic Gradient Descent with Warm Restarts. In Int. Conf. Learn. Represent., 2017. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.504, + 0.892, + 0.533 + ], + "angle": 0, + "content": "[23] Ilya Loshchilov and Frank Hutter. Decoupled Weight Decay Regularization. Int. Conf. Learn. Represent., 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.533, + 0.892, + 0.573 + ], + "angle": 0, + "content": "[24] David G Lowe. Distinctive Image Features from Scale-Invariant Keypoints. Int. J. Comput. Vis., 60:91-110, 2004. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.575, + 0.892, + 0.618 + ], + "angle": 0, + "content": "[25] Bruce D Lucas and Takeo Kanade. An Iterative Image Registration Technique with an Application to Stereo Vision. In IJCAI, pages 674-679, 1981. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.619, + 0.892, + 0.674 + ], + "angle": 0, + "content": "[26] Nico Messikommer, Carter Fang, Mathias Gehrig, and Davide Scaramuzza. Data-Driven Feature Tracking for Event Cameras. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5642–5651, 2023. 2, 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.675, + 0.892, + 0.745 + ], + "angle": 0, + "content": "[27] Elias Mueggler, Henri Rebecq, Guillermo Gallego, Tobi Delbruck, and Davide Scaramuzza. The Event-Camera Dataset and Simulator: Event-Based Data for Pose Estimation, Visual Odometry, and SLAM. Int. J. of Robot. Researc., 36(2): 142-149, 2017. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.745, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[28] Yeongwoo Nam, Mohammad Mostafavi, Kuk-Jin Yoon, and Jonghyun Choi. Stereo Depth From Events Cameras: Concentrate and Focus on the Future. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6114-6123, 2022. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.802, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[29] Liyuan Pan, Miaomiao Liu, and Richard Hartley. Single Image Optical Flow Estimation with an Event Camera. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1669-1678. IEEE, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.858, + 0.892, + 0.903 + ], + "angle": 0, + "content": "[30] Federico Paredes-Vallés, Kirk YW Scheper, and Guido CHE De Croon. Unsupervised Learning of a Hierarchical Spiking Neural Network for Optical Flow Estimation: From Events" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "18982" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.468, + 0.12 + ], + "angle": 0, + "content": "to Global Motion Perception. IEEE Trans. Pattern Anal. Mach. Intell., 42(8):2051-2064, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.19 + ], + "angle": 0, + "content": "[31] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An Imperative Style, High-Performance Deep Learning Library. Adv. Neural Inform. Process. Syst., 32, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.469, + 0.247 + ], + "angle": 0, + "content": "[32] Lichtsteiner Patrick, Posch Christoph, and Delbruck Tobi. A \\(128 \\times 128\\) dB 15 μs Latency Asynchronous Temporal Contrast Vision Sensor. IEEE J. of Solid-State Circuits, 43 (2):566-576, 2008. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.249, + 0.469, + 0.289 + ], + "angle": 0, + "content": "[33] Jianbo Shi and Carlo Tomasi. Good Features to Track. In IEEE Conf. Comput. Vis. Pattern Recog., pages 593-600. IEEE, 1994. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.291, + 0.469, + 0.347 + ], + "angle": 0, + "content": "[34] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-Kin Wong, and Wang-chun Woo. Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting. Adv. Neural Inform. Process. Syst., 28, 2015. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.349, + 0.469, + 0.376 + ], + "angle": 0, + "content": "[35] Carlo Tomasi and Takeo Kanade. Detection and Tracking of Point. Int. J. Comput. Vis., 9(137-154):3, 1991. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.378, + 0.469, + 0.432 + ], + "angle": 0, + "content": "[36] Jiqing Zhang, Xin Yang, Yingkai Fu, Xiaopeng Wei, Baocai Yin, and Bo Dong. Object Tracking by Jointly Exploiting Frame and Event Domain. In Int. Conf. Comput. Vis., pages 13043-13052, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.434, + 0.469, + 0.489 + ], + "angle": 0, + "content": "[37] Jiqing Zhang, Bo Dong, Haiwei Zhang, Jianchuan Ding, Felix Heide, Baocai Yin, and Xin Yang. Spiking Transformers for Event-Based Single Object Tracking. In IEEE Conf. Comput. Vis. Pattern Recog., pages 8801-8810, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.491, + 0.469, + 0.544 + ], + "angle": 0, + "content": "[38] Alex Zihao Zhu, Nikolay Atanasov, and Kostas Daniilidis. Event-Based Feature Tracking with Probabilistic Data Association. In IEEE Int. Conf. Robot. Autom., pages 4465-4470. IEEE, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.547, + 0.469, + 0.589 + ], + "angle": 0, + "content": "[39] Alex Zihao Zhu, Yibo Chen, and Kostas Daniilidis. Realtime Time Synchronized Event-Based Stereo. In Eur. Conf. Comput. Vis., pages 433-447, 2018. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.59, + 0.469, + 0.645 + ], + "angle": 0, + "content": "[40] Alex Zihao Zhu, Liangzhe Yuan, Kenneth Chaney, and Kostas Daniilidis. Unsupervised Event-Based Learning of Optical Flow, Depth, and Egomotion. In IEEE Conf. Comput. Vis. Pattern Recog., pages 989-997, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.647, + 0.469, + 0.688 + ], + "angle": 0, + "content": "[41] Alex Zihao Zhu, Nikolay Atanasov, and Kostas Daniilidis. Event-Based Visual Inertial Odometry. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5391-5399, 2017. 1" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.688 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.518, + 0.956 + ], + "angle": 0, + "content": "18983" + } + ] +] \ No newline at end of file diff --git a/2024/3D Feature Tracking via Event Camera/a6809d22-03e7-4639-b845-8393b79ecc8d_origin.pdf b/2024/3D Feature Tracking via Event Camera/a6809d22-03e7-4639-b845-8393b79ecc8d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e4d841634ab9dd794eb08586e859df2335bf3c4e --- /dev/null +++ b/2024/3D Feature Tracking via Event Camera/a6809d22-03e7-4639-b845-8393b79ecc8d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f960d8fa78106b58adb952432624b8b5fff17b584414bfb3853a33002739d461 +size 2692901 diff --git a/2024/3D Feature Tracking via Event Camera/full.md b/2024/3D Feature Tracking via Event Camera/full.md new file mode 100644 index 0000000000000000000000000000000000000000..37e081c2e157feefbbbd175441fd292cf282678a --- /dev/null +++ b/2024/3D Feature Tracking via Event Camera/full.md @@ -0,0 +1,341 @@ +# 3D Feature Tracking via Event Camera + +Siqi Li $^{1}$ Zhikuan Zhou $^{1}$ Zhou Xue $^{2}$ Yipeng Li $^{3}$ Shaoyi Du $^{4}$ Yue Gao $^{1*}$ + +$^{1}$ {BNRist, THUIBCS, School of Software}, Tsinghua University $^{2}$ Li Auto $^{3}$ Department of Automation, Tsinghua University + +$^{4}$ National Key Laboratory of Human-Machine Hybrid Augmented Intelligence, National Engineering Research Center for Visual Information and Applications, and Institute of Artificial Intelligence and Robotics, Xi'an Jiaotong University + +{lsq19, zzk22}@mails.tsinghua.edu.cn, xuezhou08@gmail.com, dushaoyi@xjtu.edu.cn, {liep, gaoyue}@tsinghua.edu.cn + +![](images/3f4a0f5c4a276caa64fde7232058fb978b99663fb703a31baad1d97f4a1c1148.jpg) +Figure 1. We present the first high-speed 3D feature tracking method via stereo event cameras and the corresponding high-speed 3D feature tracking dataset. Our proposed method takes high temporal resolution event streams captured from stereo event cameras as input, and could predict the long-term feature motion trajectories of multiple high-speed moving objects within the scene at a rate of 250 FPS. + +# Abstract + +This paper presents the first 3D feature tracking method with the corresponding dataset. Our proposed method takes event streams from stereo event cameras as input to predict 3D trajectories of the target features with high-speed motion. To achieve this, our method leverages a joint framework to predict the 2D feature motion offsets and the 3D feature spatial position simultaneously. A motion compensation module is leveraged to overcome the feature deformation. A patch matching module based on bipolarity hypergraph modeling is proposed to robustly estimate the feature spatial position. Meanwhile, we collect the first 3D feature tracking dataset with high-speed moving objects and ground truth 3D feature trajectories at 250 FPS, named E-3DTrack, which can be used as the first high-speed 3D feature tracking benchmark. Our code and dataset could be found at: https://github.com/lisiqi19971013/E-3DTrack. + +# 1. Introduction + +Feature tracking aims to predict the long-term trajectories of target features, which is fundamental in many computer + +vision tasks, e.g., object tracking [36, 37], 3D reconstruction [8, 17], and SLAM [20, 41]. Frame-based feature tracking methods [5, 24, 25, 33, 35] have been extensively investigated in the past decades. However, all existing methods focus on tracking 2D feature trajectories in the image plane. + +In real-world scenarios, objects are moving in 3D space, e.g., cars are racing on the road from near to far. The tracking of features with high-speed 3D motion becomes essential. Consequently, there is an imperative need to investigate 3D feature tracking methods capable of predicting feature trajectories for objects undergoing high-speed 3D motion. Such methods hold significant promise for various downstream applications, e.g., VR, AR, and autonomous driving. To the best of our knowledge, existing literature lacks established high-speed 3D feature tracking methodologies. + +For the 3D feature tracking of high-speed moving objects, the main challenges lie in three folds. (1) With the limited frame rate of traditional frame-based cameras, the motion of high-speed moving objects may not be consistently captured due to the blind time between consecutive frames. Therefore, how to continually record valid motion information of high-speed moving objects is the first chal + +lenge. (2) The second challenge lies in establishing the correlation between the 3D position of the feature and the 2D visual data acquired by cameras to generate a continuous and smooth 3D feature trajectory. (3) To the best of our knowledge, there are currently no existing high-speed 3D feature tracking datasets. This is primarily due to the difficulty in capturing ground truth 3D feature trajectories of high-speed moving objects, which is constrained by the insufficient capture frequency of existing 3D vision sensors. Thus, the lack of high-speed 3D feature tracking dataset is the third challenge, which is also a principal impediment to the advancement of research within this domain. + +To overcome the motion capture challenge, we use event cameras to record motion dynamics of high-speed moving objects. Event cameras [7, 32] are bio-inspired vision sensors that asynchronously respond to pixel-wise brightness changes. Specifically, when the logarithmic change of the brightness at a pixel exceeds a certain threshold, i.e., $|\Delta_t \log I(x,y,t)| > C$ , where $I(x,y,t)$ is the brightness at pixel $(x,y)$ and timestamp $t$ , an event will be triggered, denoted as $e = (t,x,y,p)$ , where $p \in \{1,-1\}$ is the polarity. The output event stream of event cameras, formed by events triggered by all pixels, showcases their remarkably high temporal resolution (in the order of microseconds) and broad dynamic range (up to 140 dB) [13]. These unique features of event cameras render them promising tools for achieving 3D feature tracking in the context of high-speed moving objects. + +To address the aforementioned technical challenge, we propose a high-speed 3D feature tracking method based on stereo event cameras, predicting the long-term 3D trajectories of target features from stereo event streams and template patches. To achieve 3D feature tracking, our proposed method leverages a joint framework to predict the 2D feature motion offsets and the feature spatial position at each timestamp simultaneously. A motion compensation module is leveraged to adapt to the feature deformation, and a patch matching module based on bi-polarity hypergraph modeling is proposed to accurately estimate the feature spatial position. In addition, we introduce a stereo motion consistency mechanism that establishes the constraint between the feature motion offsets and the spatial position to achieve smooth 3D trajectory estimation. + +To address the data challenge, we establish a hybrid vision system and curate the first real-world event-based 3D feature tracking dataset, named E-3DTrack. Our dataset includes multiple objects demonstrating high-speed motion in the scene, with stereo event cameras capturing high temporal resolution event streams, as shown in Fig. 1. To obtain the ground truth of the 3D feature trajectories, we utilize the Optitrack motion capture system to record the motion trajectory of each moving object. This information is then integrated with the high-precision object point cloud + +scanned by FARO Quantum ScanArm, resulting in the generation of the ground truth 3D trajectories of each feature at a rate of 250 FPS. To the best of our knowledge, our dataset is the first event-based feature tracking dataset containing high-speed moving objects and providing 3D ground truth feature trajectories. + +Our contributions could be summarized as follows: + +- We propose the first high-speed 3D feature tracking method based on stereo event cameras, which could track the 3D trajectories of features with high-speed motion. +- We achieve satisfactory 3D feature tracking performance through a motion compensation module for addressing feature deformation, a patch matching module based on bi-polarity hypergraph modeling for accurate estimation of 3D feature positions, and a stereo motion consistency mechanism to establish constraints between feature motion offsets and 3D position. +- We collect the first real-world 3D feature tracking dataset containing multiple high-speed moving objects, named E-3DTrack. Our dataset contains stereo event streams and 250 FPS ground truth 3D feature trajectories, which could be used as the 3D feature tracking benchmark. + +# 2. Related Work + +Trajectory Prediction via Event Camera. Event-based feature tracking methods have been developed rapidly within the last decade. Earlier works [18, 38] treat the events as a point set and used ICP [6] to estimate feature motion trajectories. Then, EKLT [10] is proposed to obtain feature patch from the reference frame as template, and use the event stream to track the template and predict the trajectory. Meanwhile, some event-by-event trackers [2, 3] are proposed to exploit the asynchronicity of event camera, e.g., eCDT [16] employs a clustering method to cluster adjacent events, and uses cluster descriptors to find continual feature tracks. Recently, DeepEvT [26] is proposed as the first data-driven event-based feature tracking method, which achieves state-of-the-art 2D feature tracking performance. + +An alternative approach for trajectory prediction is optical flow estimation, wherein the pixel-level motion field is predicted using the input event stream. Compared with feature tracking, these methods [1, 4, 12, 29, 30] focus more on estimating the motion field between adjacent moments and lack modeling of long-term trajectory consistency. + +However, all these existing trajectory prediction methods could only predict 2D feature trajectories in the image plane while the real objects are moving in 3D space, i.e., the predicted feature motion trajectories are information-deficient. + +Event-based 3D Position Estimation. As the 2D feature trajectories can be predicted, a simple and straightforward solution is to use a monocular or stereo depth estimation method to predict the depth of the feature and calculate the 3D position. In recent years, several event-based + +![](images/7a23ae9b22e080f0e58ca26cbca2c176a2e44801272b3a01263a7167241dfc85.jpg) +Figure 2. Our proposed method takes stereo event streams as input to predict the 3D trajectory of the target feature provided in the initial template patch $I_{t_0}$ . For a subsequent timestamp $t_i$ , the deformed template patch $I_{t_i}$ is predicted using the motion compensation module. Then, $I_{t_i}$ , $I_{t_0}$ , and the events $P_{t_i}$ triggered within the spatiotemporal neighboring patch of the predicted feature position $\mathbf{u}_{t_i}$ are forwarded into the offset estimation module to estimate the feature motion offsets $\Delta \mathbf{u}_{t_i}$ . Meanwhile, a patch matching module based on bi-polarity hypergraph modeling is leveraged to predict the disparity. Finally, a projection operation is performed to update the 3D trajectory. + +monocular [11, 14, 40] or stereo [28, 39] depth estimation methods are proposed, which could estimate the depth map from the input single-view or multi-view event streams. + +However, we will show that the simple combination of these two types of methods could not achieve satisfying long-term 3D feature trajectories prediction performance in Sec. 5.2. Therefore, high-speed 3D feature tracking is still a challenging open problem. + +# 3. Method + +In this section, we commence with an overview of the pipeline in Sec. 3.1, subsequently delving into the detailed architecture in Sec. 3.2, and conclude by outlining the supervision of our method in Sec. 3.3. + +# 3.1. Overview + +As shown in Fig. 2, our proposed method takes stereo event streams as input to predict 3D feature trajectories in camera 1 coordinate system. The target features are contained in gray-scale template patches at the initial moment. This is the common setting for event-based feature tracking, e.g., EKLT [10] and DeepEvT [26]. Our method leverages a joint framework to predict features' 2D motion offsets and the 3D spatial positions simultaneously at each timestamp, and further obtain the 3D trajectories through projection. + +Specifically, let $\mathcal{E}^j = \left\{e_k^j = (t_k^j,u_k^j,v_k^j,p_k^j)\right\}$ denote the event stream captured by an event camera, where $j = 1,2$ denotes camera 1 and camera 2, respectively, $e_k^j$ is the $k$ -th event captured by camera $j$ . The feature to be tracked + +is provided in a $d \times d$ template patch $I_0$ captured by camera 1 at initial moment $t_0$ . Then, the feature trajectory is predicted step-by-step. For a subsequent timestamp $t_i$ , we calculate the 2D feature coordinates $\mathbf{u}_{t_i}^1 = (u_{t_i}, v_{t_i})$ projected in camera 1 based on the predicted 3D feature position $\mathbf{X}_{t_i} = (x_{t_i}, y_{t_i}, z_{t_i})$ at the previous step. To calculate the feature trajectory, the events $\mathcal{E}_i^1$ triggered in the $d \times d$ patch around $\mathbf{u}_{t_i}^1$ and within the time bin $[t_i, t_{i+1}]$ are leveraged to provide feature motion information. Then, $\mathcal{E}_i^1$ is converted into grid-based event patch $P_{t_i}^1$ using the event representation method proposed in [26]. + +As shown in Fig. 2, the 3D movement of the object may cause deformation of the feature template patch. To tackle this challenge, we leverage a motion compensation module to predict the deformed template patch $\tilde{I}_{t_i}$ at timestamp $t_i$ . Then, $\tilde{I}_{t_i}$ and $I_{t_0}$ are concatenated and forwarded into the offset estimation module together with $P_{t_i}^1$ to predict the 2D feature motion offset $\Delta \mathbf{u}_{t_i}^1$ . To further estimate the 3D position of the target feature, we use the events triggered within the same $d$ rows as $P_{t_i}^1$ from $\mathcal{E}^2$ , i.e., $\mathcal{E}_i^2 = \{e_k^2 | u_{t_i} - \frac{d-1}{2} \leq u_k^2 \leq u_{t_i} + \frac{d-1}{2}, t_i \leq t_k^2 \leq t_{i+1}\}$ , to generate event row patch $R_{t_i}^2$ using the same event representation method. Then, the disparity $d_{t_{i+1}}$ could be predicted from $P_{t_i}^1$ and $R_{t_i}^2$ using our proposed patch matching module based on bipolarity hypergraph modeling. In addition, inspired by the fact that the 2D motion offsets in both camera planes have a constraint with the disparity change, we use the event patch $P_{t_i}^2$ of camera 2 to compute the offset $\Delta \mathbf{u}_{t_i}^2$ in the training stage, and propose a stereo motion consistency mechanism to enhance the trajectory prediction. Finally, the 3D feature + +position $\mathbf{X}_{t_{i + 1}}$ at $t_{i + 1}$ is obtained by projection according to $\Delta \mathbf{u}_{t_i}^1$ and $d_{t_{i + 1}}$ . In practice, the patch size is $d = 31$ , and the length of the time bin is set to $4\mathrm{ms}$ , i.e., $t_{i + 1} - t_i = 4\mathrm{ms}$ . Thus, our proposed method could track the long-term 3D feature trajectories at 250 FPS. + +# 3.2. Model Architecture + +Offset Estimation Module. As mentioned above, at times- tamp $t_i$ , we use an offset estimation module to predict the feature motion offsets projected in the camera plane, which takes $\tilde{I}_{t_i}$ , $I_{t_0}$ , and $P_{t_i}$ as input. Inspired by the great success of DeepEvT [26], we use a similar two-branch Feature Pyramid Network (FPN) [19] to extract multi-modal features from event patch $P_{t_i}$ and the template patches $I_{t_i}$ and $I_{t_0}$ , respectively. The FPN contains 4 down-sample layers and 4 up-sample layers. Then, the bottleneck feature of FPN is leveraged to calculate the correlation map between the event patch and the feature template patch. The correlation map is further concatenated with the multi-modal feature and forwarded into a joint encoder with 4 down-sample layers and a ConvLSTM [34] layer to obtain fused feature $F_{t_i}$ . Then, we use a linear layer to compute the weights of $F_{t_{i - 1}}$ and $F_{t_i}$ and explicitly fuse the temporal information. Finally, a linear layer is leveraged to generate predicted feature motion offsets. Detailed network architecture is provided in the supplementary material. Using the offset estimation module, the feature motion offset $\Delta \mathbf{u}_{t_i}$ projected in the camera plane could be estimated. + +Motion Compensation Module. As shown in Fig. 2, high-speed 3D moving objects may have depth change and rotation, which may cause feature shape deformation. Therefore, tracking with the initial template patch may lead to fatal errors or even incorrectly tracking other features. To tackle this problem, we leverage a motion compensation module to correct the template patch at each moment. Specifically, the feature template patch may have scaling, rotation, and shear changes. It should be noted that translation is not considered since the feature motion offset is already predicted. At the timestamp $t_i$ , the fused temporal feature $F_{t_{i-1}}$ is leveraged as input to predict the scale factors $s_x, s_y$ , rotation angle $\theta$ , and shear factors $t_x, t_y$ using 2 linear layers. Then, the affine transform is performed according to the predicted transform factors: + +$$ +\tilde {I} _ {t _ {i}} (u, v) = \left[ \begin{array}{c} \beta s _ {x}, \alpha s _ {y} \\ - \alpha s _ {x}, \beta s _ {y} \end{array} \right] \left[ \begin{array}{c} 1, a \\ - b, 1 + a b \end{array} \right] I _ {t _ {0}} (u, v), \tag {1} +$$ + +where $\alpha = \sin \theta$ , $\beta = \cos \theta$ , $a = \tan t_x$ , and $b = \tan t_y$ . Using the motion compensation module, the corrected template patch $\tilde{I}_{t_i}$ at each timestamp could be obtained. + +Patch Matching Module. To further estimate the 3D position of the target feature, we propose a patch matching module based on bi-polarity hypergraph modeling to obtain the spatial position of the feature by predicting the disparity. + +Different from traditional stereo matching, for the 3D feature tracking task, the target feature is contained in the local event patch $P_{t_i}^1$ . Therefore, the disparity could only be predicted from the local patch instead of global information. Under such condition, mismatching will occur since the target scene may contain multiple similar features distributed in space and $P_{t_i}^1$ only contains local information. Therefore, we propose a bi-polarity hypergraph-based high-order correlation modeling mechanism to eliminate mismatching. + +As mentioned in Sec. 3.1, for each timestamp $t_i$ , we use the event patch $P_{t_i}^1$ around $\mathbf{u}_{t_i}^1$ and the corresponding event row patch $R_{t_i}^2$ from camera 2 to achieve patch matching. Specifically, we use 4 convolutional layers to extract features $\mathbf{M}_{t_i}^1 \in \mathbb{R}^{d \times d \times c}$ and $\mathbf{M}_{t_i}^2 \in \mathbb{R}^{d \times W \times c}$ from $P_{t_i}^1$ and $R_{t_i}^2$ , respectively, where $c$ is the feature channel and $W$ is the image width, i.e., the number of candidate matching positions. We further calculate the cost volume $\mathbf{C}_{t_i} \in \mathbb{R}^{W \times c}$ composed of the feature similarity between $\mathbf{M}_{t_i}^1$ and $\mathbf{M}_{t_i}^2$ at each matching position, which represents the pair-wise similarity between $P_{t_i}^1$ and the sub-patch of $R_{t_i}^2$ at each matching position. Then, the $W$ matching positions are used as vertices to construct bi-polarity hypergraphs. Compared to the pair-wise correlation contained in the cost volume, each hyperedge of a hypergraph could connect multiple vertices, i.e., high-order correlations among multiple vertices could be constructed. In practice, we use the Euclidean distance of the vertex feature as metric and calculate the $k$ nearest neighbors of each vertex. For each vertex, we use a hyperedge to connect the vertices in its $k$ neighbor vertices with spatial distance smaller than a certain threshold $\delta$ . Therefore, a positive hypergraph $G^+$ with the adjacency matrix $H^+$ could be constructed. Besides, for each vertex, vertices with spatial distance larger than $\delta$ in its $k$ neighbor vertices are connected by another hyperedge. Thus, a negative hypergraph $G^-$ with the adjacency matrix $H^-$ could be constructed. Each hyperedge of $G^+$ connects matching patches that are semantic similar and spatially close to $P_{t_i}^1$ . These connections are expected to be enhanced. In contrast, each hyperedge of $G^-$ connects matching patches that are semantic similar but spatially distant from $P_{t_i}^1$ , which are interference and needs to be suppressed. Then, inspired by [9], we propose a feature aggregation method based on bi-polarity hypergraphs: + +$$ +\begin{array}{l} \hat {\mathbf {C}} _ {t _ {i}} = \mathbf {C} _ {t _ {i}} + \sigma \left(\left(\mathbf {D} _ {v} ^ {+}\right) ^ {- 1} \mathbf {H} ^ {+} \left(\mathbf {D} _ {e} ^ {+}\right) ^ {- 1} \left(\mathbf {H} ^ {+}\right) ^ {\top} \mathbf {C} _ {t _ {i}} \boldsymbol {\Theta} ^ {+}\right), \tag {2} \\ - \sigma \left(\left(\mathbf {D} _ {v} ^ {-}\right) ^ {- 1} \mathbf {H} ^ {-} \left(\mathbf {D} _ {e} ^ {-}\right) ^ {- 1} \left(\mathbf {H} ^ {-}\right) ^ {\top} \mathbf {C} _ {t _ {i}} \boldsymbol {\Theta} ^ {-}\right) \\ \end{array} +$$ + +where $\mathbf{D}_e^*$ and $\mathbf{D}_v^*$ are the diagonal matrices of hyperedge degree and vertex degree, respectively. $\Theta^{*}$ is the learnable parameter, and $\sigma (\cdot)$ is the non-linear activation function. Using Eq. (2), features are aggregated to enhance vertices with similar features and spatial close and suppress vertices with similar features but spatially distant. Finally, $\hat{\mathbf{C}}_{t_i}$ is forwarded into a 1D convolutional layer with the kernel size of + +3 to regress the matching result. Using the patch matching module, the disparity $d_{t_i}$ of the feature is predicted. + +Projection. After the feature motion offsets $\Delta \mathbf{u}_{t_i}$ and the disparity $d_{t_i}$ are predicted, the 3D feature coordinates $\mathbf{X}_{t_{i + 1}}$ at $t_{i + 1}$ could be computed using projection. + +# 3.3. Supervision and Loss Functions + +Stereo Motion Consistency. For objects moving in 3D space captured by stereo cameras, the 2D motion offsets are strongly constrained with the disparity. Meanwhile, our offset estimation module is also deeply coupled with the patch matching module. Therefore, inspired by [21], we leverage a stereo motion consistency constraint to reinforce this correlation. Consider a point $\mathbf{X} = (x,y,z)$ , it's 2D coordinates in the camera plane could be calculated by $\mathbf{u} = (u,v) = \frac{f}{s}\frac{(x,y)}{z}$ , where $f$ is the camera focal length and $s$ the coordinate convert factor. For calibrated stereo cameras with the baseline distance of $b$ , the disparity of $\mathbf{X}$ is $d = \frac{f}{s}\frac{b}{z}$ . By taking the time derivative, we could obtain that $\frac{\Delta d}{\Delta t} = -\frac{f}{s}\frac{b}{z^2}\frac{\Delta z}{\Delta t}$ . Therefore, we have: + +$$ +d _ {t _ {i}} - d _ {t _ {i - 1}} = \Delta d = - \frac {f}{s} \frac {b}{z _ {t _ {i}} ^ {2}} \left(z _ {t _ {i}} - z _ {t _ {i - 1}}\right). \tag {3} +$$ + +For the 2D motion offsets, we could similarly obtain that $\frac{\Delta\mathbf{u}}{\Delta t} = \frac{f}{zs} (\frac{\Delta x}{\Delta t},\frac{\Delta y}{\Delta t}) - \frac{f}{z^2s}\frac{\Delta z}{\Delta t} (x,y)$ , i.e., we have $\Delta \mathbf{u} = (\Delta u,\Delta v) = \frac{f}{zs} (\Delta x,\Delta y) - \frac{f\Delta z}{z^2s} (x,y)$ . In practice, suppose the coordinates of a feature in camera 1 at timestamp $t_i$ is $\mathbf{X}_{t_i}^1 = (x_{t_i},y_{t_i},z_{t_i})$ , then the coordinates in camera 2 is $\mathbf{X}_{t_i}^2 = (x_{t_i} - b,y_{t_i},z_{t_i})$ . Therefore, we have: + +$$ +\begin{array}{l} \Delta u _ {t _ {i}} ^ {1} - \Delta u _ {t _ {i}} ^ {2} = \frac {f}{s} \frac {b}{z _ {t _ {i}} ^ {2}} \Delta z _ {t _ {i}} = - \frac {f}{s} \frac {b}{z _ {t _ {i}} ^ {2}} \left(z _ {t _ {i}} - z _ {t _ {i - 1}}\right). \tag {4} \\ \Delta v _ {t _ {i}} ^ {1} - \Delta v _ {t _ {i}} ^ {2} = 0 \\ \end{array} +$$ + +Therefore, we could obtain the stereo motion constraint $\Delta u_{t_i}^1 -\Delta u_{t_i}^2 = d_{t_i} - d_{t_{i - 1}}$ from Eq. (3) and Eq. (4). + +According to the stereo motion constraint, we introduce the stereo motion consistency loss: + +$$ +\mathcal {L} _ {i} ^ {\mathrm {s m c}} = \mathcal {L} _ {1} \left(\Delta u _ {t _ {i}} ^ {1} - \Delta u _ {t _ {i}} ^ {2}, d _ {t _ {i}} - d _ {t _ {i - 1}}\right) + \mathcal {L} _ {1} \left(\Delta v _ {t _ {i}} ^ {1}, \Delta v _ {t _ {i}} ^ {2}\right), \tag {5} +$$ + +where $\mathcal{L}_1(\cdot ,\cdot)$ is the Manhattan Distance. + +Loss Functions. Since our proposed method could predict the 3D feature coordinate $\mathbf{X}_{t_i}$ at each timestamp, we use the Manhattan Distance between the predicted trajectories and ground truth trajectories as supervision: + +$$ +\mathcal {L} _ {i} ^ {\text {t r a j}} = \mathcal {L} _ {1} \left(\mathbf {X} _ {t _ {i}}, \mathbf {X} _ {t _ {i}} ^ {\mathrm {g t}}\right). \tag {6} +$$ + +Since both the offset estimation module and the patch matching module severely affect the 3D trajectory prediction accuracy, we compute the ground truth 2D feature offsets $\mathbf{u}_{t_i}^{1^{\mathrm{gt}}}$ and disparity $d_{t_i}^{\mathrm{gt}}$ at each timestamp based on ground truth 3D trajectory through projection and use them as supervision. In practice, the offset estimation is supervised with the loss function: + +$$ +\mathcal {L} _ {i} ^ {\text {o f f}} = \mathcal {L} _ {1} \left(\Delta \mathbf {u} _ {t _ {i}} ^ {1}, \Delta \mathbf {u} _ {t _ {i}} ^ {1 ^ {\mathrm {g t}}}\right). \tag {7} +$$ + +Table 1. Comparison of our E-3DTrack dataset with other existing event-based feature tracking datasets. + +
DatasetDim.MotionScenarioGT Freq.
EC [27]2DHomo.Static200
EDS [15]2DHomo.Static150
E-3DTrack3DNon-homo.Dynamic250
+ +The disparity prediction is supervised with: + +$$ +\mathcal {L} _ {i} ^ {\text {d i s p}} = \mathcal {L} _ {1} \left(d _ {t _ {i}}, d _ {t _ {i}} ^ {\mathrm {g t}}\right). \tag {8} +$$ + +Finally, our model is trained end-to-end with the supervision of the following total loss function: + +$$ +\mathcal {L} = \sum_ {i = 1} ^ {N} \left(\mathcal {L} _ {i} ^ {\text {t r a j}} + \mathcal {L} _ {i} ^ {\text {o f f}} + \mathcal {L} _ {i} ^ {\text {d i s p}} + \alpha \mathcal {L} _ {i} ^ {\text {s m c}}\right), \tag {9} +$$ + +where $\alpha$ is a hyper-parameter and $N$ is the sequence length. + +# 4. 3D Feature Tracking Dataset: E-3DTrack + +In addressing the deficiency of high-speed 3D feature tracking datasets, we establish a hybrid vision system containing stereo event cameras and Optitrack, as shown in Fig. 3 (a), and curate the first event-based 3D feature tracking dataset, named E-3DTrack. Compared to existing event-based feature tracking datasets that contain only static scenes and 2D trajectories, our dataset is the first to contain high-speed moving objects and ground truth 3D feature trajectories. + +Limited by the capturing frequency of 3D vision sensors (e.g., $< 30$ FPS for LiDAR), it is difficult to accurately record the 3D feature trajectories of high-speed moving objects at a high frame rate. To tackle this problem, we use the motion capture system, i.e., Optitrack, to record the trajectory of each object attached with fixed markers. To explicitly obtain feature-level 3D trajectories, we use a scanner, i.e., FARO Quantum ScanArm, to capture the high precision point cloud of each object. Then, the 3D affine transform, incorporating a homogeneous scale, is calculated from the object coordinate system to the Optitrack coordinate system based on the markers' coordinates. This leads to the acquisition of the time-series point cloud sequence of the moving objects under the Optitrack coordinate system. Finally, the feature trajectories can be derived from the time-series point cloud sequence based on the feature point index. Hence, our dataset comprises ground truth 3D feature trajectories of high-speed moving objects at 250 FPS, surpassing the capturing frequency of most existing 3D vision sensors. + +Using our hybrid vision system, we captured 40 high-speed motion scenarios containing a total of 1300 sequences. We randomly select 10 scenarios as the test set, and the remaining 30 scenarios are selected as the training set. Note that due to the cross-scene division, the scene in the test set are unseen in the training set. More details of our dataset are provided in the supplementary material. + +![](images/66e0935b298ff5c1093ad283321fea5a22ff79f906e5d979e3c7a4799654f72a.jpg) +(a) Our hybrid vision system. + +![](images/08521545b4d309877e6bcae82d07b84fc92d958dcadd6120848439c649462540.jpg) +(b) Samples of our dataset. From left to right: reference frame, feature patch, stereo event streams, and ground truth 3D feature trajectory. + +![](images/408a890f2805c6dd15b7b6310e34938141138fd7f8bcb91ea9ba3844f9ad53c4.jpg) + +![](images/4543f26c0d61c9449759f4dbbbdf96a80a8d6f59dac960d350127f0e41a0cc16.jpg) + +![](images/5cd31bd7ee6863a32c9932f002dfe8ff5452a00ba92be1da30d5d9430bfde28b.jpg) + +![](images/debfb1089fde99fdbeee925cb6fc76c9b491857fc71619cd0e0f66e4740da341.jpg) + +Table 1 shows the comparison of our E-3DTrack dataset with other existing event-based feature tracking datasets, including Event Camera dataset (EC) [27] and Event-aided Direct Sparse Odometry (EDS) dataset [15]. The main advantages of our dataset are in the following three aspects. + +- 3D trajectory. Our dataset is the first feature tracking dataset containing ground truth 3D trajectories, enabling the feature motion trajectory estimation in 3D space. +- Non-homogeneous motion. Our dataset is the first event-based feature tracking dataset containing high-speed moving objects. Existing EC and EDS datasets mainly contain stationary scenarios. Thus, feature motions are caused by the camera movement. Since there are no moving objects in the scene, the motions of all features are almost homogeneous, as shown in Fig. 4. In contrast, the feature motions in our dataset are non-homogeneous, which is more conducive to applications. +- Accurate ground truth. Our dataset contains ground truth 3D feature trajectories captured from Optitrack. In contrast, since the DAVIS346 event camera could record event streams and 25 FPS video simultaneously, the ground truth 2D trajectories in EC and EDS datasets are obtained using frame-based feature tracking method KLT [25], or further triangulating KLT tracks using camera poses and reprojecting them to the frames. Thus, our dataset contains more accurate ground truth trajectories. +Figure 3 (b) shows some samples of our E-3DTrack dataset. We visualize the reference frames, feature template patches, stereo event streams, and the ground truth 3D feature trajectories of each sample. + +# 5. Experiments + +In this section, we first introduce the experimental settings. Then, we analyze the quantitative and qualitative compar + +![](images/06316860464a31f3aedc0892a1232443b9493735a91a6e0a4f62883e76f9b448.jpg) +Figure 3. (a) Our hybrid vision system. (b) Samples of our E-3DTrack dataset. The first column is the reference frame at the initial moment, and the features to be tracked are marked in each frame. Some feature template patches are zoomed in for display in the second column. The stereo event streams and the ground truth 3D feature trajectories are shown in the last three columns, respectively. +(a) Sample from EC Dataset +Figure 4. Examples from existing EC [27] and EDS [15] dataset. + +![](images/76a18df92186aa05f15cd2f015496559e22427e5a97ff952f9409d40c0a4f014.jpg) +(b) Sample from EDS Dataset + +isons, respectively. Finally, we conduct ablation studies to demonstrate the effectiveness of each proposed module. + +# 5.1. Experimental Settings + +Comparison Methods. Since there are no existing high-speed 3D feature tracking methods, we use existing event-based trajectory prediction methods to obtain 2D feature trajectories, and use stereo depth estimation methods to further obtain the 3D feature trajectory. Specifically, we combine the event-based optical flow estimation method E-RAFT [12], event-based feature tracking methods EKLT [10] and DeepEvT [26] with event-based stereo depth estimation methods TSES [39] and SDE [28], respectively, as our baseline comparison methods. + +Metrics. To evaluate our proposed method and other comparison methods, we use the Tracked Feature Ratio (TFR, higher is better), Feature Age [26] (FR, higher is better), and the Root Mean Squared Error (RMSE, lower is better) as the metrics. TFR is calculated as the ratio of the time that the spatial distance between the predicted 3D trajectory and the ground truth 3D trajectory is less than a certain threshold $c$ to the total sequence time. See detailed definition in the supplementary material. + +Implementation Details. Our method is implemented based on PyTorch [31]. Our model is trained end-to-end + +Table 2. Quantitative results on our E-3DTrack dataset. Feature age (FA), tracked feature ratio (TFR), and root mean square error (RMSE) are selected as the metrics. Bold numbers represent the best scores, and underlined numbers represent the second-best scores. + +
MethodFA(0.1m) ↑FA(0.15m) ↑FA(0.2m) ↑TFR(0.1m) ↑TFR(0.15m) ↑TFR(0.2m) ↑RMSE ↓
E-RAFT [12] + TSES [39]0.04090.06640.0920.17010.26670.34390.4726
E-RAFT [12] + SDE [28]0.13850.23990.32040.31210.47260.58060.3368
EKLIT [10] + TSES [39]0.02320.04290.06280.11800.19610.26850.4806
EKLIT [10] + SDE [28]0.10260.18560.25840.24210.37380.47000.4034
DeepEvT [26] + TSES [39]0.07130.11170.14520.37860.49910.58180.3549
DeepEvT [26] + SDE [28]0.23140.34620.43390.57820.70600.77650.1889
E-3DTrack (Ours)0.26010.41790.54280.69280.81640.87720.1181
+ +Table 3. Comparison of inference time on E-3DTrack dataset. + +
MethodE-RAFT + SDEDeepEvT + SDEOurs
Time (ms/step)154.8393.2240.30
+ +for 100 epochs with a batch size of 16. The optimization method is AdamW [23], and the cosine annealing schedule [22] is leveraged. The learning rate decays from $2 \times 10^{-4}$ to $1 \times 10^{-6}$ within 100 epochs. The hyperparameters are selected as $\alpha = 0.25$ in Eq. (9), $k = 3$ and $\delta = 16$ for bi-polarity hypergraph construction. + +# 5.2. Quantitative Comparison + +Table. 2 shows the quantitative comparison of our proposed method with other comparison methods. From the table, we could observe that our proposed method significantly outperforms all comparison methods and achieve state-of-the-art performance. Specifically, compared with the second-best method, i.e., the combination of the state-of-the-art 2D event-based feature tracking method DeepEvT [26] and stereo depth estimation method SDE [28], our proposed method reduces the RMSE by $37.5\%$ and improves the FA by $12.4\%$ , $20.7\%$ , and $25.1\%$ in terms of $c = 0.1 \mathrm{~m}$ , $0.15 \mathrm{~m}$ , and $0.2 \mathrm{~m}$ , respectively. + +Compared to comparison methods that achieve trajectory prediction and depth estimation separately, our proposed method leverages a joint framework to track the 3D feature trajectories of high-speed moving objects. This indicates that for 3D moving objects, the feature trajectory in the camera plane is highly correlated with the 3D position. The simple combination of 2D trajectory prediction and 3D position estimation will lead to fatal errors. Instead, our proposed method tracks 3D trajectories accurately using the stereo motion consistency constraint. Meanwhile, compared to traditional stereo depth estimation methods, our proposed patch matching module uses a high-order correlation modeling mechanism based on bi-polarity hypergraph to eliminate mismatching of similar features, further enhancing the 3D feature tracking robustness. + +Table. 3 shows the inference time comparison of our proposed method with other comparison methods. Specifically, we test the inference time of each tracking update + +![](images/60071049676d8abe68b498277633c3877768eef4b8b64f2772d487d0fcabc3b6.jpg) + +![](images/fb85a277362ca3fa2517db46065df845ab1d8a065784e68d3507291ae04b453e.jpg) + +![](images/55a918718219c6b8e379082b265ed1d021ed8f9caca51f27e34e88aa294b46f1.jpg) +(a) Reference Feature Patch + +![](images/6001b6668a4a34f5eed73689e5b8998d4c8648e61642cad5e036f7056d80fef1.jpg) + +![](images/9891ea5799361c2ab61fa2ea01ea44fccb490be75df0d0f00c89c75aec2fe094.jpg) + +![](images/bc7bed56ccc987baad30604d08a63a4145adfb5b64d5fc88c704a329abd62968.jpg) +(b) DeepEvT + SDE + +![](images/de7d353215f22019a59b572a331ffa0b6bb10f6c4efbc8ea70d7400801106583.jpg) + +![](images/c42af8aacb337deff3757c50aa8ab7d84123173aa59aad64a1e1900a482fa359.jpg) + +![](images/50415d6b9e9d0efeef18b30c59e5f65140153402a9358570a3d3d717cd9b1caf.jpg) + +![](images/a53b273207a25cdf6317f69b492bc024999198f6967e414dc4baafbbbe4a26ba.jpg) +(c) Ours +Figure 5. Qualitative comparison on our E-3DTrack dataset. From left to right: the reference feature patch, the ground truth feature trajectories (red), the feature trajectories (blue) predicted by DeepEvT [26] + SDE [28] and our proposed method, respectively. + +step. From the table, we could observe that compared to the second-best method, i.e., DeepEvT + SDE, our proposed method reduces the inference time by $56.8\%$ while achieving better tracking performance. This demonstrates the computational efficiency of our proposed method. + +# 5.3. Qualitative Comparison + +Figure 5 shows the qualitative 3D feature tracking results of our proposed method and the second-best comparison method, i.e., DeepEvT [26] + SDE [28]. The predicted 3D trajectories and the ground truth trajectories are shown in blue and red, respectively. From the figure, we could observe that our proposed method achieves more robust 3D feature tracking. As shown in the first row, the comparison method achieves adequate feature tracking performance when facing simple scenarios where the object motions do not contain significant depth changes. Such scenarios are similar to 2D feature tracking. Similar observations could be found in the second row. For the white geometric model + +![](images/f516e9abefed5e9c3bf0b240442b88451917c06c6ca9ffbef1a20d4c655b5706.jpg) +Tracking Error + +![](images/5a308180ff0d17a86bb0b60d51646c627c9dac3f90771f609e6964abd6baf8b4.jpg) +Tracked Feature Ratio +Figure 6. Results of the mean tracking error (left) and the feature tracked ratio (right) over tracking time. + +with slight depth variation, the comparison method achieves 3D feature tracking with slight oscillations. However, for the red star with large depth variation and rotation, it could not be tracked accurately by the comparison method. The last two rows show two extreme scenarios, i.e., the 3D motions of the objects are with large depth variation and rotation, which will cause significant feature shape deformation. Under such scenarios, our comparison method tracks the features with fatal errors. In contrast, our proposed method tracks the 3D trajectories of the high-speed moving features robustly and continuously due to our motion compensation module and patch matching module. + +Figure 6 further shows the tracking error (RMSE) and the tracked feature ratio (TFR) over time on our E-3DTrack dataset. The threshold is selected as $c = 0.1 \, \text{m}$ to calculate TFR. From the figure, we could observe that our proposed method can continuously track 3D trajectories of target features, i.e., our method maintains a high TFR consistently. From the figure, we could also observe that the TFR of E-RAFT + SDE is comparable with DeepEvT + SDE in initial stage, but gradually decreases over time. This is because the optical flow estimation method is lack of long-term consistent modeling. In contrast, our proposed method maintains a high TFR and a low tracking error over all time. + +# 5.4. Ablation Experiments + +To demonstrate the effectiveness of each proposed module, we validate the performance of our model with and without the motion compensation module (denoted as MC), stereo motion consistency mechanism (denoted as $\mathcal{L}^{\mathrm{smc}}$ ), and the bi-polarity hypergraph-based high-order correlation modeling mechanism (denoted as BiHCM), respectively. The ablation experimental results are shown in Tab. 4. See supplementary material for detailed settings. + +Bi-Polarity Hypergraph Modeling. From Tab. 4 we could observe that compared with our base model (row (1)), the addition of BiHCM will increase TFR from 0.5586 to 0.6082. Compared with our full model, the removal of the BiHCM will lead to an RMSE increase of $20.8\%$ . This is due to the fact that our proposed BiHCM could enhance the connection between patches with similar features that + +Table 4. Ablation experiments on our E-3DTrack dataset. + +
BiHCMLsmcMCTFR0.1 m ↑RMSE↓
(1)XXX0.55860.1807
(2)XX0.60820.1505
(3)XX0.59420.1512
(4)XX0.56600.1624
(5)X0.67050.1268
(6)X0.65990.1312
(7)X0.64410.1427
(8)0.69280.1181
+ +are spatially close, and suppress patches with similar features but spatially distant, which could eliminate mismatching and further improve 3D feature tracking performance. + +Stereo Motion Consistency. As shown in Tab. 4, compared with the base model, the addition of stereo motion consistency constraint will reduce RMSE from 0.1807 to 0.1512. Compared with the full model, the removal of the stereo motion consistency constraint will increase RMSE by $11.1\%$ . This is due to the fact that the stereo motion consistency could effectively constrain the correlation between the 2D trajectory and 3D spatial position of the objects, making our method predict more accurate and smooth 3D trajectory. + +Motion Compensation. As shown in Tab. 4, compared with the base model and the full model, the addition and removal of the motion compensation module resulted in $10.1\%$ and $7.4\%$ decrease and increase in RMSE, respectively. With the addition of the motion compensation module, our proposed method could better deal with feature deformation caused by depth changes and rotations of moving objects, and achieve more robust 3D feature tracking. + +These ablation experiments demonstrate the effectiveness of each proposed module. + +# 6. Conclusion + +In this paper, we propose the first high-speed 3D feature tracking method that takes stereo event streams as input to estimate 3D feature trajectories. Our proposed method leverages a joint framework to obtain 3D feature trajectories by estimating the feature motion offsets and spatial position simultaneously. A motion compensation module and a patch matching module based on bi-polarity hypergraphs are proposed to achieve robust feature tracking. Meanwhile, the first 3D feature tracking dataset containing high-speed moving objects and ground truth 3D feature trajectories at 250 FPS is constructed, named E-3DTrack, which can be used as the first 3D feature tracking benchmark. + +# 7. Acknowledgment + +This work was supported by National Natural Science Funds of China (No. 62021002 and No. 62088102), Beijing Natural Science Foundation (No. 4222025). + +# References + +[1] Himanshu Akolkar, Sio-Hoi Ieng, and Ryad Benosman. Real-Time High Speed Motion Prediction Using Fast Aperture-Robust Event-Driven Visual Flow. IEEE Trans. Pattern Anal. Mach. Intell., 44(1):361-372, 2020. 2 +[2] Ignacio Alzugaray and Margarita Chli. ACE: An Efficient Asynchronous Corner Tracker for Event Cameras. In Int. Conf. on 3D Vis., pages 653-661. IEEE, 2018. 2 +[3] Ignacio Alzugaray and Margarita Chli. HASTE: MultiHypothesis Asynchronous Speeded-up Tracking of Events. In The British Machine Vision Conference, page 744, 2020. 2 +[4] Patrick Bardow, Andrew J Davison, and Stefan Leutenegger. Simultaneous optical flow and intensity estimation from an event camera. In IEEE Conf. Comput. Vis. Pattern Recog., pages 884-892, 2016. 2 +[5] Herbert Bay, Andreas Ess, Tinne Tuytelaars, and Luc Van Gool. Speeded-up Robust Features (SURF). Comput. Vis. and Image Underst., 110(3):346-359, 2008. 1 +[6] Paul J Besl and Neil D McKay. Method for Registration of 3-D Shapes. In Sensor Fusion IV: Control Paradigms and Data Structures, pages 586-606, 1992. 2 +[7] Christian Brandli, Raphael Berner, Minhao Yang, Shih-Chii Liu, and Tobi Delbruck. A $240 \times 180$ 130 dB $3\mu s$ Latency Global Shutter Spatiotemporal Vision Sensor. IEEE J. of Solid-State Circuits, 49(10):2333-2341, 2014. 2 +[8] Guillermo Gallego, Henri Rebecq, and Davide Scaramuzzi. A Unifying Contrast Maximization Framework for Event Cameras, with Applications to Motion, Depth, and Optical Flow Estimation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 3867-3876, 2018. 1 +[9] Yue Gao, Yifan Feng, Shuyi Ji, and Rongrong Ji. HGNN+: General Hypergraph Neural Networks. IEEE Trans. Pattern Anal. Mach. Intell., 45(3):3181-3199, 2023. 4 +[10] Daniel Gehrig, Henri Rebecq, Guillermo Gallego, and Davide Scaramuzza. EKLT: Asynchronous Photometric Feature Tracking using Events and Frames. Int. J. Comput. Vis., 128(3):601-618, 2020. 2, 3, 6, 7 +[11] Daniel Gehrig, Michelle Ruegg, Mathias Gehrig, Javier Hidalgo-Carrió, and Davide Scaramuzza. Combining Events and Frames Using Recurrent Asynchronous Multimodal Networks for Monocular Depth Prediction. IEEE Robot. and Autom. Lett., 6(2):2822-2829, 2021. 3 +[12] Mathias Gehrig, Mario Millhäsler, Daniel Gehrig, and Davide Scaramuzza. E-RAFT: Dense Optical Flow from Event Cameras. In Int. Conf. 3D Vis., pages 197–206. IEEE, 2021. 2, 6, 7 +[13] Gallego Guillermo, Delbruck Tobi, Michael Orchard Garrick, Bartolozzi Chiara, Taba Brian, Censi Andrea, Leutenegger Stefan, Davison Andrew, Conradt Jorg, Daniilidis Kostas, and Scaramuzza Davide. Event-Based Vision: A Survey. IEEE Trans. Pattern Anal. Mach. Intell., 2020. 2 +[14] Javier Hidalgo-Carrió, Daniel Gehrig, and Davide Scaramuzza. Learning monocular dense depth from events. In Int. Conf. on 3D Vis., pages 534-542. IEEE, 2020. 3 +[15] Javier Hidalgo-Carrio, Guillermo Gallego, and Davide Scaramuzza. Event-aided direct sparse odometry. In IEEE + +Conf. Comput. Vis. Pattern Recog., pages 5781-5790, 2022. 5, 6 +[16] Sumin Hu, Yeeun Kim, Hyungtae Lim, Alex Junho Lee, and Hyun Myung. eCDT: Event Clustering for Simultaneous Feature Detection and Tracking. In Int. Conf. Intel. Robot. Syst., pages 3808-3815. IEEE, 2022. 2 +[17] Hanme Kim, Stefan Leutenegger, and Andrew J. Davison. Real-Time 3D Reconstruction and 6-DoF Tracking with an Event Camera. In *Eur. Conf. Comput. Vis.*, pages 349–364, 2016. 1 +[18] Beat Kueng, Elias Mueggler, Guillermo Gallego, and Davide Scaramuzza. Low-latency visual odometry using event-based feature tracks. In Int. Conf. Intell. Robot. Syst., pages 16-23. IEEE, 2016. 2 +[19] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature Pyramid Networks for Object Detection. In IEEE Conf. Comput. Vis. Pattern Recog., pages 2117-2125, 2017. 4 +[20] Daqi Liu, Alvaro Parra, and Tat-Jun Chin. Globally Optimal Contrast Maximisation for Event-based Motion Estimation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6349-6358, 2020. 1 +[21] Pengpeng Liu, Irwin King, Michael R Lyu, and Jia Xu. Flow2stereo: Effective Self-Supervised Learning of Optical Flow and Stereo Matching. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6648-6657, 2020. 5 +[22] Ilya Loshchilov and Frank Hutter. SGDR: Stochastic Gradient Descent with Warm Restarts. In Int. Conf. Learn. Represent., 2017. 7 +[23] Ilya Loshchilov and Frank Hutter. Decoupled Weight Decay Regularization. Int. Conf. Learn. Represent., 2019. 7 +[24] David G Lowe. Distinctive Image Features from Scale-Invariant Keypoints. Int. J. Comput. Vis., 60:91-110, 2004. 1 +[25] Bruce D Lucas and Takeo Kanade. An Iterative Image Registration Technique with an Application to Stereo Vision. In IJCAI, pages 674-679, 1981. 1, 6 +[26] Nico Messikommer, Carter Fang, Mathias Gehrig, and Davide Scaramuzza. Data-Driven Feature Tracking for Event Cameras. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5642–5651, 2023. 2, 3, 4, 6, 7 +[27] Elias Mueggler, Henri Rebecq, Guillermo Gallego, Tobi Delbruck, and Davide Scaramuzza. The Event-Camera Dataset and Simulator: Event-Based Data for Pose Estimation, Visual Odometry, and SLAM. Int. J. of Robot. Researc., 36(2): 142-149, 2017. 5, 6 +[28] Yeongwoo Nam, Mohammad Mostafavi, Kuk-Jin Yoon, and Jonghyun Choi. Stereo Depth From Events Cameras: Concentrate and Focus on the Future. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6114-6123, 2022. 3, 6, 7 +[29] Liyuan Pan, Miaomiao Liu, and Richard Hartley. Single Image Optical Flow Estimation with an Event Camera. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1669-1678. IEEE, 2020. 2 +[30] Federico Paredes-Vallés, Kirk YW Scheper, and Guido CHE De Croon. Unsupervised Learning of a Hierarchical Spiking Neural Network for Optical Flow Estimation: From Events + +to Global Motion Perception. IEEE Trans. Pattern Anal. Mach. Intell., 42(8):2051-2064, 2019. 2 +[31] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An Imperative Style, High-Performance Deep Learning Library. Adv. Neural Inform. Process. Syst., 32, 2019. 6 +[32] Lichtsteiner Patrick, Posch Christoph, and Delbruck Tobi. A $128 \times 128$ dB 15 μs Latency Asynchronous Temporal Contrast Vision Sensor. IEEE J. of Solid-State Circuits, 43 (2):566-576, 2008. 2 +[33] Jianbo Shi and Carlo Tomasi. Good Features to Track. In IEEE Conf. Comput. Vis. Pattern Recog., pages 593-600. IEEE, 1994. 1 +[34] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-Kin Wong, and Wang-chun Woo. Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting. Adv. Neural Inform. Process. Syst., 28, 2015. 4 +[35] Carlo Tomasi and Takeo Kanade. Detection and Tracking of Point. Int. J. Comput. Vis., 9(137-154):3, 1991. 1 +[36] Jiqing Zhang, Xin Yang, Yingkai Fu, Xiaopeng Wei, Baocai Yin, and Bo Dong. Object Tracking by Jointly Exploiting Frame and Event Domain. In Int. Conf. Comput. Vis., pages 13043-13052, 2021. 1 +[37] Jiqing Zhang, Bo Dong, Haiwei Zhang, Jianchuan Ding, Felix Heide, Baocai Yin, and Xin Yang. Spiking Transformers for Event-Based Single Object Tracking. In IEEE Conf. Comput. Vis. Pattern Recog., pages 8801-8810, 2022. 1 +[38] Alex Zihao Zhu, Nikolay Atanasov, and Kostas Daniilidis. Event-Based Feature Tracking with Probabilistic Data Association. In IEEE Int. Conf. Robot. Autom., pages 4465-4470. IEEE, 2017. 2 +[39] Alex Zihao Zhu, Yibo Chen, and Kostas Daniilidis. Realtime Time Synchronized Event-Based Stereo. In Eur. Conf. Comput. Vis., pages 433-447, 2018. 3, 6, 7 +[40] Alex Zihao Zhu, Liangzhe Yuan, Kenneth Chaney, and Kostas Daniilidis. Unsupervised Event-Based Learning of Optical Flow, Depth, and Egomotion. In IEEE Conf. Comput. Vis. Pattern Recog., pages 989-997, 2019. 3 +[41] Alex Zihao Zhu, Nikolay Atanasov, and Kostas Daniilidis. Event-Based Visual Inertial Odometry. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5391-5399, 2017. 1 \ No newline at end of file diff --git a/2024/3D Feature Tracking via Event Camera/images.zip b/2024/3D Feature Tracking via Event Camera/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..00a776eea4895a848df7f91b2e78040e82f08d48 --- /dev/null +++ b/2024/3D Feature Tracking via Event Camera/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57a2bb355a3a00fe944a823354bd5f4db79516eef695bee586f4841dbace6e0f +size 610424 diff --git a/2024/3D Feature Tracking via Event Camera/layout.json b/2024/3D Feature Tracking via Event Camera/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..8d526d80e0ca7a9ebafcb016da4ab5d67df8debe --- /dev/null +++ b/2024/3D Feature Tracking via Event Camera/layout.json @@ -0,0 +1,10613 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 174, + 103, + 421, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 103, + 421, + 121 + ], + "spans": [ + { + "bbox": [ + 174, + 103, + 421, + 121 + ], + "type": "text", + "content": "3D Feature Tracking via Event Camera" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "text", + "content": "Siqi Li" + }, + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "text", + "content": " Zhikuan Zhou" + }, + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "text", + "content": " Zhou Xue" + }, + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "text", + "content": " Yipeng Li" + }, + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "text", + "content": " Shaoyi Du" + }, + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "text", + "content": " Yue Gao" + }, + { + "bbox": [ + 105, + 142, + 491, + 158 + ], + "type": "inline_equation", + "content": "^{1*}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 64, + 159, + 529, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 159, + 529, + 171 + ], + "spans": [ + { + "bbox": [ + 64, + 159, + 529, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 64, + 159, + 529, + 171 + ], + "type": "text", + "content": " {BNRist, THUIBCS, School of Software}, Tsinghua University " + }, + { + "bbox": [ + 64, + 159, + 529, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 64, + 159, + 529, + 171 + ], + "type": "text", + "content": "Li Auto " + }, + { + "bbox": [ + 64, + 159, + 529, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 64, + 159, + 529, + 171 + ], + "type": "text", + "content": "Department of Automation, Tsinghua University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 173, + 517, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 173, + 517, + 199 + ], + "spans": [ + { + "bbox": [ + 77, + 173, + 517, + 199 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 77, + 173, + 517, + 199 + ], + "type": "text", + "content": "National Key Laboratory of Human-Machine Hybrid Augmented Intelligence, National Engineering Research Center for Visual Information and Applications, and Institute of Artificial Intelligence and Robotics, Xi'an Jiaotong University" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 202, + 529, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 202, + 529, + 213 + ], + "spans": [ + { + "bbox": [ + 62, + 202, + 529, + 213 + ], + "type": "text", + "content": "{lsq19, zzk22}@mails.tsinghua.edu.cn, xuezhou08@gmail.com, dushaoyi@xjtu.edu.cn, {liep, gaoyue}@tsinghua.edu.cn" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 50, + 228, + 541, + 400 + ], + "blocks": [ + { + "bbox": [ + 50, + 228, + 541, + 400 + ], + "lines": [ + { + "bbox": [ + 50, + 228, + 541, + 400 + ], + "spans": [ + { + "bbox": [ + 50, + 228, + 541, + 400 + ], + "type": "image", + "image_path": "3f4a0f5c4a276caa64fde7232058fb978b99663fb703a31baad1d97f4a1c1148.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 402, + 547, + 436 + ], + "lines": [ + { + "bbox": [ + 46, + 402, + 547, + 436 + ], + "spans": [ + { + "bbox": [ + 46, + 402, + 547, + 436 + ], + "type": "text", + "content": "Figure 1. We present the first high-speed 3D feature tracking method via stereo event cameras and the corresponding high-speed 3D feature tracking dataset. Our proposed method takes high temporal resolution event streams captured from stereo event cameras as input, and could predict the long-term feature motion trajectories of multiple high-speed moving objects within the scene at a rate of 250 FPS." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 143, + 440, + 192, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 440, + 192, + 452 + ], + "spans": [ + { + "bbox": [ + 143, + 440, + 192, + 452 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 457, + 290, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 457, + 290, + 661 + ], + "spans": [ + { + "bbox": [ + 45, + 457, + 290, + 661 + ], + "type": "text", + "content": "This paper presents the first 3D feature tracking method with the corresponding dataset. Our proposed method takes event streams from stereo event cameras as input to predict 3D trajectories of the target features with high-speed motion. To achieve this, our method leverages a joint framework to predict the 2D feature motion offsets and the 3D feature spatial position simultaneously. A motion compensation module is leveraged to overcome the feature deformation. A patch matching module based on bipolarity hypergraph modeling is proposed to robustly estimate the feature spatial position. Meanwhile, we collect the first 3D feature tracking dataset with high-speed moving objects and ground truth 3D feature trajectories at 250 FPS, named E-3DTrack, which can be used as the first high-speed 3D feature tracking benchmark. Our code and dataset could be found at: https://github.com/lisiqi19971013/E-3DTrack." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 669, + 128, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 669, + 128, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 669, + 128, + 681 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "content": "Feature tracking aims to predict the long-term trajectories of target features, which is fundamental in many computer" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 441, + 545, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 441, + 545, + 502 + ], + "spans": [ + { + "bbox": [ + 304, + 441, + 545, + 502 + ], + "type": "text", + "content": "vision tasks, e.g., object tracking [36, 37], 3D reconstruction [8, 17], and SLAM [20, 41]. Frame-based feature tracking methods [5, 24, 25, 33, 35] have been extensively investigated in the past decades. However, all existing methods focus on tracking 2D feature trajectories in the image plane." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 505, + 546, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 505, + 546, + 625 + ], + "spans": [ + { + "bbox": [ + 304, + 505, + 546, + 625 + ], + "type": "text", + "content": "In real-world scenarios, objects are moving in 3D space, e.g., cars are racing on the road from near to far. The tracking of features with high-speed 3D motion becomes essential. Consequently, there is an imperative need to investigate 3D feature tracking methods capable of predicting feature trajectories for objects undergoing high-speed 3D motion. Such methods hold significant promise for various downstream applications, e.g., VR, AR, and autonomous driving. To the best of our knowledge, existing literature lacks established high-speed 3D feature tracking methodologies." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "content": "For the 3D feature tracking of high-speed moving objects, the main challenges lie in three folds. (1) With the limited frame rate of traditional frame-based cameras, the motion of high-speed moving objects may not be consistently captured due to the blind time between consecutive frames. Therefore, how to continually record valid motion information of high-speed moving objects is the first chal" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "18974" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "type": "text", + "content": "lenge. (2) The second challenge lies in establishing the correlation between the 3D position of the feature and the 2D visual data acquired by cameras to generate a continuous and smooth 3D feature trajectory. (3) To the best of our knowledge, there are currently no existing high-speed 3D feature tracking datasets. This is primarily due to the difficulty in capturing ground truth 3D feature trajectories of high-speed moving objects, which is constrained by the insufficient capture frequency of existing 3D vision sensors. Thus, the lack of high-speed 3D feature tracking dataset is the third challenge, which is also a principal impediment to the advancement of research within this domain." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "spans": [ + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "text", + "content": "To overcome the motion capture challenge, we use event cameras to record motion dynamics of high-speed moving objects. Event cameras [7, 32] are bio-inspired vision sensors that asynchronously respond to pixel-wise brightness changes. Specifically, when the logarithmic change of the brightness at a pixel exceeds a certain threshold, i.e., " + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "inline_equation", + "content": "|\\Delta_t \\log I(x,y,t)| > C" + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "inline_equation", + "content": "I(x,y,t)" + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "text", + "content": " is the brightness at pixel " + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "inline_equation", + "content": "(x,y)" + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "text", + "content": " and timestamp " + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "text", + "content": ", an event will be triggered, denoted as " + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "inline_equation", + "content": "e = (t,x,y,p)" + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "inline_equation", + "content": "p \\in \\{1,-1\\}" + }, + { + "bbox": [ + 46, + 218, + 289, + 410 + ], + "type": "text", + "content": " is the polarity. The output event stream of event cameras, formed by events triggered by all pixels, showcases their remarkably high temporal resolution (in the order of microseconds) and broad dynamic range (up to 140 dB) [13]. These unique features of event cameras render them promising tools for achieving 3D feature tracking in the context of high-speed moving objects." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 412, + 289, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 412, + 289, + 592 + ], + "spans": [ + { + "bbox": [ + 46, + 412, + 289, + 592 + ], + "type": "text", + "content": "To address the aforementioned technical challenge, we propose a high-speed 3D feature tracking method based on stereo event cameras, predicting the long-term 3D trajectories of target features from stereo event streams and template patches. To achieve 3D feature tracking, our proposed method leverages a joint framework to predict the 2D feature motion offsets and the feature spatial position at each timestamp simultaneously. A motion compensation module is leveraged to adapt to the feature deformation, and a patch matching module based on bi-polarity hypergraph modeling is proposed to accurately estimate the feature spatial position. In addition, we introduce a stereo motion consistency mechanism that establishes the constraint between the feature motion offsets and the spatial position to achieve smooth 3D trajectory estimation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": "To address the data challenge, we establish a hybrid vision system and curate the first real-world event-based 3D feature tracking dataset, named E-3DTrack. Our dataset includes multiple objects demonstrating high-speed motion in the scene, with stereo event cameras capturing high temporal resolution event streams, as shown in Fig. 1. To obtain the ground truth of the 3D feature trajectories, we utilize the Optitrack motion capture system to record the motion trajectory of each moving object. This information is then integrated with the high-precision object point cloud" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 547, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 144 + ], + "type": "text", + "content": "scanned by FARO Quantum ScanArm, resulting in the generation of the ground truth 3D trajectories of each feature at a rate of 250 FPS. To the best of our knowledge, our dataset is the first event-based feature tracking dataset containing high-speed moving objects and providing 3D ground truth feature trajectories." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 316, + 144, + 525, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 144, + 525, + 156 + ], + "spans": [ + { + "bbox": [ + 316, + 144, + 525, + 156 + ], + "type": "text", + "content": "Our contributions could be summarized as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 156, + 545, + 335 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 306, + 156, + 545, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 156, + 545, + 191 + ], + "spans": [ + { + "bbox": [ + 306, + 156, + 545, + 191 + ], + "type": "text", + "content": "- We propose the first high-speed 3D feature tracking method based on stereo event cameras, which could track the 3D trajectories of features with high-speed motion." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 192, + 545, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 192, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 306, + 192, + 545, + 275 + ], + "type": "text", + "content": "- We achieve satisfactory 3D feature tracking performance through a motion compensation module for addressing feature deformation, a patch matching module based on bi-polarity hypergraph modeling for accurate estimation of 3D feature positions, and a stereo motion consistency mechanism to establish constraints between feature motion offsets and 3D position." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 275, + 545, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 275, + 545, + 335 + ], + "spans": [ + { + "bbox": [ + 306, + 275, + 545, + 335 + ], + "type": "text", + "content": "- We collect the first real-world 3D feature tracking dataset containing multiple high-speed moving objects, named E-3DTrack. Our dataset contains stereo event streams and 250 FPS ground truth 3D feature trajectories, which could be used as the 3D feature tracking benchmark." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 346, + 392, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 346, + 392, + 358 + ], + "spans": [ + { + "bbox": [ + 306, + 346, + 392, + 358 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 366, + 545, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 545, + 534 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 545, + 534 + ], + "type": "text", + "content": "Trajectory Prediction via Event Camera. Event-based feature tracking methods have been developed rapidly within the last decade. Earlier works [18, 38] treat the events as a point set and used ICP [6] to estimate feature motion trajectories. Then, EKLT [10] is proposed to obtain feature patch from the reference frame as template, and use the event stream to track the template and predict the trajectory. Meanwhile, some event-by-event trackers [2, 3] are proposed to exploit the asynchronicity of event camera, e.g., eCDT [16] employs a clustering method to cluster adjacent events, and uses cluster descriptors to find continual feature tracks. Recently, DeepEvT [26] is proposed as the first data-driven event-based feature tracking method, which achieves state-of-the-art 2D feature tracking performance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 534, + 545, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 545, + 605 + ], + "type": "text", + "content": "An alternative approach for trajectory prediction is optical flow estimation, wherein the pixel-level motion field is predicted using the input event stream. Compared with feature tracking, these methods [1, 4, 12, 29, 30] focus more on estimating the motion field between adjacent moments and lack modeling of long-term trajectory consistency." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 605, + 545, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 653 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 653 + ], + "type": "text", + "content": "However, all these existing trajectory prediction methods could only predict 2D feature trajectories in the image plane while the real objects are moving in 3D space, i.e., the predicted feature motion trajectories are information-deficient." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": "Event-based 3D Position Estimation. As the 2D feature trajectories can be predicted, a simple and straightforward solution is to use a monocular or stereo depth estimation method to predict the depth of the feature and calculate the 3D position. In recent years, several event-based" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18975" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 68, + 545, + 272 + ], + "blocks": [ + { + "bbox": [ + 53, + 68, + 545, + 272 + ], + "lines": [ + { + "bbox": [ + 53, + 68, + 545, + 272 + ], + "spans": [ + { + "bbox": [ + 53, + 68, + 545, + 272 + ], + "type": "image", + "image_path": "7a23ae9b22e080f0e58ca26cbca2c176a2e44801272b3a01263a7167241dfc85.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "lines": [ + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "spans": [ + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "text", + "content": "Figure 2. Our proposed method takes stereo event streams as input to predict the 3D trajectory of the target feature provided in the initial template patch " + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "inline_equation", + "content": "I_{t_0}" + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "text", + "content": ". For a subsequent timestamp " + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "text", + "content": ", the deformed template patch " + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "inline_equation", + "content": "I_{t_i}" + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "text", + "content": " is predicted using the motion compensation module. Then, " + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "inline_equation", + "content": "I_{t_i}" + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "inline_equation", + "content": "I_{t_0}" + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "text", + "content": ", and the events " + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "inline_equation", + "content": "P_{t_i}" + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "text", + "content": " triggered within the spatiotemporal neighboring patch of the predicted feature position " + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{t_i}" + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "text", + "content": " are forwarded into the offset estimation module to estimate the feature motion offsets " + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{u}_{t_i}" + }, + { + "bbox": [ + 46, + 276, + 546, + 332 + ], + "type": "text", + "content": ". Meanwhile, a patch matching module based on bi-polarity hypergraph modeling is leveraged to predict the disparity. Finally, a projection operation is performed to update the 3D trajectory." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 343, + 287, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 343, + 287, + 380 + ], + "spans": [ + { + "bbox": [ + 46, + 343, + 287, + 380 + ], + "type": "text", + "content": "monocular [11, 14, 40] or stereo [28, 39] depth estimation methods are proposed, which could estimate the depth map from the input single-view or multi-view event streams." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 380, + 288, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 380, + 288, + 441 + ], + "spans": [ + { + "bbox": [ + 46, + 380, + 288, + 441 + ], + "type": "text", + "content": "However, we will show that the simple combination of these two types of methods could not achieve satisfying long-term 3D feature trajectories prediction performance in Sec. 5.2. Therefore, high-speed 3D feature tracking is still a challenging open problem." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 453, + 103, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 453, + 103, + 466 + ], + "spans": [ + { + "bbox": [ + 47, + 453, + 103, + 466 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 475, + 287, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 475, + 287, + 522 + ], + "spans": [ + { + "bbox": [ + 46, + 475, + 287, + 522 + ], + "type": "text", + "content": "In this section, we commence with an overview of the pipeline in Sec. 3.1, subsequently delving into the detailed architecture in Sec. 3.2, and conclude by outlining the supervision of our method in Sec. 3.3." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 533, + 115, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 533, + 115, + 544 + ], + "spans": [ + { + "bbox": [ + 47, + 533, + 115, + 544 + ], + "type": "text", + "content": "3.1. Overview" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 552, + 287, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 552, + 287, + 660 + ], + "spans": [ + { + "bbox": [ + 46, + 552, + 287, + 660 + ], + "type": "text", + "content": "As shown in Fig. 2, our proposed method takes stereo event streams as input to predict 3D feature trajectories in camera 1 coordinate system. The target features are contained in gray-scale template patches at the initial moment. This is the common setting for event-based feature tracking, e.g., EKLT [10] and DeepEvT [26]. Our method leverages a joint framework to predict features' 2D motion offsets and the 3D spatial positions simultaneously at each timestamp, and further obtain the 3D trajectories through projection." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "type": "text", + "content": "Specifically, let " + }, + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{E}^j = \\left\\{e_k^j = (t_k^j,u_k^j,v_k^j,p_k^j)\\right\\}" + }, + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "type": "text", + "content": " denote the event stream captured by an event camera, where " + }, + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "type": "inline_equation", + "content": "j = 1,2" + }, + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "type": "text", + "content": " denotes camera 1 and camera 2, respectively, " + }, + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "type": "inline_equation", + "content": "e_k^j" + }, + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "type": "text", + "content": "-th event captured by camera " + }, + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 47, + 660, + 288, + 714 + ], + "type": "text", + "content": ". The feature to be tracked" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "spans": [ + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": "is provided in a " + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "inline_equation", + "content": "d \\times d" + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": " template patch " + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "inline_equation", + "content": "I_0" + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": " captured by camera 1 at initial moment " + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "inline_equation", + "content": "t_0" + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": ". Then, the feature trajectory is predicted step-by-step. For a subsequent timestamp " + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": ", we calculate the 2D feature coordinates " + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{t_i}^1 = (u_{t_i}, v_{t_i})" + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": " projected in camera 1 based on the predicted 3D feature position " + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{t_i} = (x_{t_i}, y_{t_i}, z_{t_i})" + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": " at the previous step. To calculate the feature trajectory, the events " + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_i^1" + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": " triggered in the " + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "inline_equation", + "content": "d \\times d" + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": " patch around " + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{t_i}^1" + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": " and within the time bin " + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "inline_equation", + "content": "[t_i, t_{i+1}]" + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": " are leveraged to provide feature motion information. Then, " + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_i^1" + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": " is converted into grid-based event patch " + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "inline_equation", + "content": "P_{t_i}^1" + }, + { + "bbox": [ + 304, + 343, + 546, + 476 + ], + "type": "text", + "content": " using the event representation method proposed in [26]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": "As shown in Fig. 2, the 3D movement of the object may cause deformation of the feature template patch. To tackle this challenge, we leverage a motion compensation module to predict the deformed template patch " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{t_i}" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": " at timestamp " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": ". Then, " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{t_i}" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "I_{t_0}" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": " are concatenated and forwarded into the offset estimation module together with " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "P_{t_i}^1" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": " to predict the 2D feature motion offset " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{u}_{t_i}^1" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": ". To further estimate the 3D position of the target feature, we use the events triggered within the same " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": " rows as " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "P_{t_i}^1" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{E}^2" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_i^2 = \\{e_k^2 | u_{t_i} - \\frac{d-1}{2} \\leq u_k^2 \\leq u_{t_i} + \\frac{d-1}{2}, t_i \\leq t_k^2 \\leq t_{i+1}\\}" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": ", to generate event row patch " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "R_{t_i}^2" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": " using the same event representation method. Then, the disparity " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "d_{t_{i+1}}" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": " could be predicted from " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "P_{t_i}^1" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "R_{t_i}^2" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": " using our proposed patch matching module based on bipolarity hypergraph modeling. In addition, inspired by the fact that the 2D motion offsets in both camera planes have a constraint with the disparity change, we use the event patch " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "P_{t_i}^2" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": " of camera 2 to compute the offset " + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{u}_{t_i}^2" + }, + { + "bbox": [ + 304, + 485, + 547, + 715 + ], + "type": "text", + "content": " in the training stage, and propose a stereo motion consistency mechanism to enhance the trajectory prediction. Finally, the 3D feature" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "18976" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "text", + "content": "position " + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{t_{i + 1}}" + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "inline_equation", + "content": "t_{i + 1}" + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "text", + "content": " is obtained by projection according to " + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{u}_{t_i}^1" + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "inline_equation", + "content": "d_{t_{i + 1}}" + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "text", + "content": ". In practice, the patch size is " + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "inline_equation", + "content": "d = 31" + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "text", + "content": ", and the length of the time bin is set to " + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "inline_equation", + "content": "4\\mathrm{ms}" + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "inline_equation", + "content": "t_{i + 1} - t_i = 4\\mathrm{ms}" + }, + { + "bbox": [ + 47, + 72, + 287, + 131 + ], + "type": "text", + "content": ". Thus, our proposed method could track the long-term 3D feature trajectories at 250 FPS." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 142, + 162, + 154 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 142, + 162, + 154 + ], + "spans": [ + { + "bbox": [ + 47, + 142, + 162, + 154 + ], + "type": "text", + "content": "3.2. Model Architecture" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "spans": [ + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "content": "Offset Estimation Module. As mentioned above, at times- tamp " + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "content": ", we use an offset estimation module to predict the feature motion offsets projected in the camera plane, which takes " + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{t_i}" + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "inline_equation", + "content": "I_{t_0}" + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "inline_equation", + "content": "P_{t_i}" + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "content": " as input. Inspired by the great success of DeepEvT [26], we use a similar two-branch Feature Pyramid Network (FPN) [19] to extract multi-modal features from event patch " + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "inline_equation", + "content": "P_{t_i}" + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "content": " and the template patches " + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "inline_equation", + "content": "I_{t_i}" + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "inline_equation", + "content": "I_{t_0}" + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "content": ", respectively. The FPN contains 4 down-sample layers and 4 up-sample layers. Then, the bottleneck feature of FPN is leveraged to calculate the correlation map between the event patch and the feature template patch. The correlation map is further concatenated with the multi-modal feature and forwarded into a joint encoder with 4 down-sample layers and a ConvLSTM [34] layer to obtain fused feature " + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "inline_equation", + "content": "F_{t_i}" + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "content": ". Then, we use a linear layer to compute the weights of " + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "inline_equation", + "content": "F_{t_{i - 1}}" + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "inline_equation", + "content": "F_{t_i}" + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "content": " and explicitly fuse the temporal information. Finally, a linear layer is leveraged to generate predicted feature motion offsets. Detailed network architecture is provided in the supplementary material. Using the offset estimation module, the feature motion offset " + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{u}_{t_i}" + }, + { + "bbox": [ + 47, + 161, + 288, + 412 + ], + "type": "text", + "content": " projected in the camera plane could be estimated." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "type": "text", + "content": "Motion Compensation Module. As shown in Fig. 2, high-speed 3D moving objects may have depth change and rotation, which may cause feature shape deformation. Therefore, tracking with the initial template patch may lead to fatal errors or even incorrectly tracking other features. To tackle this problem, we leverage a motion compensation module to correct the template patch at each moment. Specifically, the feature template patch may have scaling, rotation, and shear changes. It should be noted that translation is not considered since the feature motion offset is already predicted. At the timestamp " + }, + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "type": "text", + "content": ", the fused temporal feature " + }, + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "type": "inline_equation", + "content": "F_{t_{i-1}}" + }, + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "type": "text", + "content": " is leveraged as input to predict the scale factors " + }, + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "type": "inline_equation", + "content": "s_x, s_y" + }, + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "type": "text", + "content": ", rotation angle " + }, + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "type": "text", + "content": ", and shear factors " + }, + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "type": "inline_equation", + "content": "t_x, t_y" + }, + { + "bbox": [ + 46, + 414, + 287, + 594 + ], + "type": "text", + "content": " using 2 linear layers. Then, the affine transform is performed according to the predicted transform factors:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 597, + 287, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 597, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 61, + 597, + 287, + 624 + ], + "type": "interline_equation", + "content": "\\tilde {I} _ {t _ {i}} (u, v) = \\left[ \\begin{array}{c} \\beta s _ {x}, \\alpha s _ {y} \\\\ - \\alpha s _ {x}, \\beta s _ {y} \\end{array} \\right] \\left[ \\begin{array}{c} 1, a \\\\ - b, 1 + a b \\end{array} \\right] I _ {t _ {0}} (u, v), \\tag {1}", + "image_path": "3ef0dadab8b338d99130550a5821b2ace03b3d9cf2ca71bb569e80879f2f41b0.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "spans": [ + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "type": "inline_equation", + "content": "\\alpha = \\sin \\theta" + }, + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "type": "inline_equation", + "content": "\\beta = \\cos \\theta" + }, + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "type": "inline_equation", + "content": "a = \\tan t_x" + }, + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "type": "inline_equation", + "content": "b = \\tan t_y" + }, + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "type": "text", + "content": ". Using the motion compensation module, the corrected template patch " + }, + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{t_i}" + }, + { + "bbox": [ + 47, + 628, + 287, + 664 + ], + "type": "text", + "content": " at each timestamp could be obtained." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 665, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 665, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 665, + 287, + 714 + ], + "type": "text", + "content": "Patch Matching Module. To further estimate the 3D position of the target feature, we propose a patch matching module based on bi-polarity hypergraph modeling to obtain the spatial position of the feature by predicting the disparity." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "content": "Different from traditional stereo matching, for the 3D feature tracking task, the target feature is contained in the local event patch " + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "inline_equation", + "content": "P_{t_i}^1" + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "content": ". Therefore, the disparity could only be predicted from the local patch instead of global information. Under such condition, mismatching will occur since the target scene may contain multiple similar features distributed in space and " + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "inline_equation", + "content": "P_{t_i}^1" + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "content": " only contains local information. Therefore, we propose a bi-polarity hypergraph-based high-order correlation modeling mechanism to eliminate mismatching." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "spans": [ + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": "As mentioned in Sec. 3.1, for each timestamp " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": ", we use the event patch " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "P_{t_i}^1" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " around " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{t_i}^1" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " and the corresponding event row patch " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "R_{t_i}^2" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " from camera 2 to achieve patch matching. Specifically, we use 4 convolutional layers to extract features " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_{t_i}^1 \\in \\mathbb{R}^{d \\times d \\times c}" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_{t_i}^2 \\in \\mathbb{R}^{d \\times W \\times c}" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "P_{t_i}^1" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "R_{t_i}^2" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": ", respectively, where " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " is the feature channel and " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " is the image width, i.e., the number of candidate matching positions. We further calculate the cost volume " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_{t_i} \\in \\mathbb{R}^{W \\times c}" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " composed of the feature similarity between " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_{t_i}^1" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_{t_i}^2" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " at each matching position, which represents the pair-wise similarity between " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "P_{t_i}^1" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " and the sub-patch of " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "R_{t_i}^2" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " at each matching position. Then, the " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " matching positions are used as vertices to construct bi-polarity hypergraphs. Compared to the pair-wise correlation contained in the cost volume, each hyperedge of a hypergraph could connect multiple vertices, i.e., high-order correlations among multiple vertices could be constructed. In practice, we use the Euclidean distance of the vertex feature as metric and calculate the " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " nearest neighbors of each vertex. For each vertex, we use a hyperedge to connect the vertices in its " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " neighbor vertices with spatial distance smaller than a certain threshold " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": ". Therefore, a positive hypergraph " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "G^+" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " with the adjacency matrix " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "H^+" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " could be constructed. Besides, for each vertex, vertices with spatial distance larger than " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " in its " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " neighbor vertices are connected by another hyperedge. Thus, a negative hypergraph " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "G^-" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " with the adjacency matrix " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "H^-" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " could be constructed. Each hyperedge of " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "G^+" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " connects matching patches that are semantic similar and spatially close to " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "P_{t_i}^1" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": ". These connections are expected to be enhanced. In contrast, each hyperedge of " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "G^-" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": " connects matching patches that are semantic similar but spatially distant from " + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "inline_equation", + "content": "P_{t_i}^1" + }, + { + "bbox": [ + 304, + 179, + 546, + 586 + ], + "type": "text", + "content": ", which are interference and needs to be suppressed. Then, inspired by [9], we propose a feature aggregation method based on bi-polarity hypergraphs:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 588, + 545, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 588, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 315, + 588, + 545, + 628 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\mathbf {C}} _ {t _ {i}} = \\mathbf {C} _ {t _ {i}} + \\sigma \\left(\\left(\\mathbf {D} _ {v} ^ {+}\\right) ^ {- 1} \\mathbf {H} ^ {+} \\left(\\mathbf {D} _ {e} ^ {+}\\right) ^ {- 1} \\left(\\mathbf {H} ^ {+}\\right) ^ {\\top} \\mathbf {C} _ {t _ {i}} \\boldsymbol {\\Theta} ^ {+}\\right), \\tag {2} \\\\ - \\sigma \\left(\\left(\\mathbf {D} _ {v} ^ {-}\\right) ^ {- 1} \\mathbf {H} ^ {-} \\left(\\mathbf {D} _ {e} ^ {-}\\right) ^ {- 1} \\left(\\mathbf {H} ^ {-}\\right) ^ {\\top} \\mathbf {C} _ {t _ {i}} \\boldsymbol {\\Theta} ^ {-}\\right) \\\\ \\end{array}", + "image_path": "599db89b2f8ab820a8763a97c303fa8d610caada33142d54d0481af432681b46.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_e^*" + }, + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_v^*" + }, + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "type": "text", + "content": " are the diagonal matrices of hyperedge degree and vertex degree, respectively. " + }, + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\Theta^{*}" + }, + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "type": "text", + "content": " is the learnable parameter, and " + }, + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\sigma (\\cdot)" + }, + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "type": "text", + "content": " is the non-linear activation function. Using Eq. (2), features are aggregated to enhance vertices with similar features and spatial close and suppress vertices with similar features but spatially distant. Finally, " + }, + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{C}}_{t_i}" + }, + { + "bbox": [ + 304, + 629, + 546, + 713 + ], + "type": "text", + "content": " is forwarded into a 1D convolutional layer with the kernel size of" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18977" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "content": "3 to regress the matching result. Using the patch matching module, the disparity " + }, + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "inline_equation", + "content": "d_{t_i}" + }, + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "content": " of the feature is predicted." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 96, + 287, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 96, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 96, + 287, + 133 + ], + "type": "text", + "content": "Projection. After the feature motion offsets " + }, + { + "bbox": [ + 46, + 96, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{u}_{t_i}" + }, + { + "bbox": [ + 46, + 96, + 287, + 133 + ], + "type": "text", + "content": " and the disparity " + }, + { + "bbox": [ + 46, + 96, + 287, + 133 + ], + "type": "inline_equation", + "content": "d_{t_i}" + }, + { + "bbox": [ + 46, + 96, + 287, + 133 + ], + "type": "text", + "content": " are predicted, the 3D feature coordinates " + }, + { + "bbox": [ + 46, + 96, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{t_{i + 1}}" + }, + { + "bbox": [ + 46, + 96, + 287, + 133 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 46, + 96, + 287, + 133 + ], + "type": "inline_equation", + "content": "t_{i + 1}" + }, + { + "bbox": [ + 46, + 96, + 287, + 133 + ], + "type": "text", + "content": " could be computed using projection." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 140, + 219, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 140, + 219, + 152 + ], + "spans": [ + { + "bbox": [ + 47, + 140, + 219, + 152 + ], + "type": "text", + "content": "3.3. Supervision and Loss Functions" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "spans": [ + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "text", + "content": "Stereo Motion Consistency. For objects moving in 3D space captured by stereo cameras, the 2D motion offsets are strongly constrained with the disparity. Meanwhile, our offset estimation module is also deeply coupled with the patch matching module. Therefore, inspired by [21], we leverage a stereo motion consistency constraint to reinforce this correlation. Consider a point " + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "inline_equation", + "content": "\\mathbf{X} = (x,y,z)" + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "text", + "content": ", it's 2D coordinates in the camera plane could be calculated by " + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "inline_equation", + "content": "\\mathbf{u} = (u,v) = \\frac{f}{s}\\frac{(x,y)}{z}" + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "text", + "content": " is the camera focal length and " + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "text", + "content": " the coordinate convert factor. For calibrated stereo cameras with the baseline distance of " + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "text", + "content": ", the disparity of " + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "inline_equation", + "content": "d = \\frac{f}{s}\\frac{b}{z}" + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "text", + "content": ". By taking the time derivative, we could obtain that " + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "inline_equation", + "content": "\\frac{\\Delta d}{\\Delta t} = -\\frac{f}{s}\\frac{b}{z^2}\\frac{\\Delta z}{\\Delta t}" + }, + { + "bbox": [ + 46, + 158, + 288, + 319 + ], + "type": "text", + "content": ". Therefore, we have:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 80, + 320, + 287, + 346 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 320, + 287, + 346 + ], + "spans": [ + { + "bbox": [ + 80, + 320, + 287, + 346 + ], + "type": "interline_equation", + "content": "d _ {t _ {i}} - d _ {t _ {i - 1}} = \\Delta d = - \\frac {f}{s} \\frac {b}{z _ {t _ {i}} ^ {2}} \\left(z _ {t _ {i}} - z _ {t _ {i - 1}}\\right). \\tag {3}", + "image_path": "9fc1c5435cca306daaa4d58a9c93e1fa1ca088989a05c229210ebdceef55470f.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "spans": [ + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "type": "text", + "content": "For the 2D motion offsets, we could similarly obtain that " + }, + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "type": "inline_equation", + "content": "\\frac{\\Delta\\mathbf{u}}{\\Delta t} = \\frac{f}{zs} (\\frac{\\Delta x}{\\Delta t},\\frac{\\Delta y}{\\Delta t}) - \\frac{f}{z^2s}\\frac{\\Delta z}{\\Delta t} (x,y)" + }, + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "type": "text", + "content": ", i.e., we have " + }, + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{u} = (\\Delta u,\\Delta v) = \\frac{f}{zs} (\\Delta x,\\Delta y) - \\frac{f\\Delta z}{z^2s} (x,y)" + }, + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "type": "text", + "content": ". In practice, suppose the coordinates of a feature in camera 1 at timestamp " + }, + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{t_i}^1 = (x_{t_i},y_{t_i},z_{t_i})" + }, + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "type": "text", + "content": ", then the coordinates in camera 2 is " + }, + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{t_i}^2 = (x_{t_i} - b,y_{t_i},z_{t_i})" + }, + { + "bbox": [ + 47, + 349, + 287, + 424 + ], + "type": "text", + "content": ". Therefore, we have:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 426, + 287, + 468 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 426, + 287, + 468 + ], + "spans": [ + { + "bbox": [ + 56, + 426, + 287, + 468 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\Delta u _ {t _ {i}} ^ {1} - \\Delta u _ {t _ {i}} ^ {2} = \\frac {f}{s} \\frac {b}{z _ {t _ {i}} ^ {2}} \\Delta z _ {t _ {i}} = - \\frac {f}{s} \\frac {b}{z _ {t _ {i}} ^ {2}} \\left(z _ {t _ {i}} - z _ {t _ {i - 1}}\\right). \\tag {4} \\\\ \\Delta v _ {t _ {i}} ^ {1} - \\Delta v _ {t _ {i}} ^ {2} = 0 \\\\ \\end{array}", + "image_path": "56e3a01e91f3c2dc9d8de76e794ee1e5ce5b51bffebecb0c7801190fe5fc2174.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 472, + 287, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 472, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 47, + 472, + 287, + 495 + ], + "type": "text", + "content": "Therefore, we could obtain the stereo motion constraint " + }, + { + "bbox": [ + 47, + 472, + 287, + 495 + ], + "type": "inline_equation", + "content": "\\Delta u_{t_i}^1 -\\Delta u_{t_i}^2 = d_{t_i} - d_{t_{i - 1}}" + }, + { + "bbox": [ + 47, + 472, + 287, + 495 + ], + "type": "text", + "content": " from Eq. (3) and Eq. (4)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 496, + 287, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 496, + 287, + 518 + ], + "spans": [ + { + "bbox": [ + 47, + 496, + 287, + 518 + ], + "type": "text", + "content": "According to the stereo motion constraint, we introduce the stereo motion consistency loss:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 520, + 287, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 520, + 287, + 533 + ], + "spans": [ + { + "bbox": [ + 51, + 520, + 287, + 533 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {i} ^ {\\mathrm {s m c}} = \\mathcal {L} _ {1} \\left(\\Delta u _ {t _ {i}} ^ {1} - \\Delta u _ {t _ {i}} ^ {2}, d _ {t _ {i}} - d _ {t _ {i - 1}}\\right) + \\mathcal {L} _ {1} \\left(\\Delta v _ {t _ {i}} ^ {1}, \\Delta v _ {t _ {i}} ^ {2}\\right), \\tag {5}", + "image_path": "468bbe49a92ee22a6ee91518a6c6b2ada5f8befb565c10691aeea1822726225e.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 536, + 214, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 536, + 214, + 547 + ], + "spans": [ + { + "bbox": [ + 47, + 536, + 214, + 547 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 536, + 214, + 547 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_1(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 47, + 536, + 214, + 547 + ], + "type": "text", + "content": " is the Manhattan Distance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 548, + 287, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 548, + 287, + 595 + ], + "spans": [ + { + "bbox": [ + 46, + 548, + 287, + 595 + ], + "type": "text", + "content": "Loss Functions. Since our proposed method could predict the 3D feature coordinate " + }, + { + "bbox": [ + 46, + 548, + 287, + 595 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{t_i}" + }, + { + "bbox": [ + 46, + 548, + 287, + 595 + ], + "type": "text", + "content": " at each timestamp, we use the Manhattan Distance between the predicted trajectories and ground truth trajectories as supervision:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 597, + 287, + 613 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 597, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 121, + 597, + 287, + 613 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {i} ^ {\\text {t r a j}} = \\mathcal {L} _ {1} \\left(\\mathbf {X} _ {t _ {i}}, \\mathbf {X} _ {t _ {i}} ^ {\\mathrm {g t}}\\right). \\tag {6}", + "image_path": "a4002b4c723d98534d9d9952b00a0988370111c08f78505b839b4c6933a1c0c3.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 616, + 287, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 616, + 287, + 698 + ], + "spans": [ + { + "bbox": [ + 46, + 616, + 287, + 698 + ], + "type": "text", + "content": "Since both the offset estimation module and the patch matching module severely affect the 3D trajectory prediction accuracy, we compute the ground truth 2D feature offsets " + }, + { + "bbox": [ + 46, + 616, + 287, + 698 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{t_i}^{1^{\\mathrm{gt}}}" + }, + { + "bbox": [ + 46, + 616, + 287, + 698 + ], + "type": "text", + "content": " and disparity " + }, + { + "bbox": [ + 46, + 616, + 287, + 698 + ], + "type": "inline_equation", + "content": "d_{t_i}^{\\mathrm{gt}}" + }, + { + "bbox": [ + 46, + 616, + 287, + 698 + ], + "type": "text", + "content": " at each timestamp based on ground truth 3D trajectory through projection and use them as supervision. In practice, the offset estimation is supervised with the loss function:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 701, + 287, + 717 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 701, + 287, + 717 + ], + "spans": [ + { + "bbox": [ + 115, + 701, + 287, + 717 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {i} ^ {\\text {o f f}} = \\mathcal {L} _ {1} \\left(\\Delta \\mathbf {u} _ {t _ {i}} ^ {1}, \\Delta \\mathbf {u} _ {t _ {i}} ^ {1 ^ {\\mathrm {g t}}}\\right). \\tag {7}", + "image_path": "f41747fd1b23c1c8d12a05774ab1b51162e69649770219a26631013c5084a73f.jpg" + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 307, + 96, + 545, + 159 + ], + "blocks": [ + { + "bbox": [ + 305, + 71, + 545, + 93 + ], + "lines": [ + { + "bbox": [ + 305, + 71, + 545, + 93 + ], + "spans": [ + { + "bbox": [ + 305, + 71, + 545, + 93 + ], + "type": "text", + "content": "Table 1. Comparison of our E-3DTrack dataset with other existing event-based feature tracking datasets." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 96, + 545, + 159 + ], + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 159 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 159 + ], + "type": "table", + "html": "
DatasetDim.MotionScenarioGT Freq.
EC [27]2DHomo.Static200
EDS [15]2DHomo.Static150
E-3DTrack3DNon-homo.Dynamic250
", + "image_path": "4a799901f3d6ab383c1cc04cbcff7ba38e695001d2763159712b95eca725b4a5.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 170, + 482, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 170, + 482, + 182 + ], + "spans": [ + { + "bbox": [ + 306, + 170, + 482, + 182 + ], + "type": "text", + "content": "The disparity prediction is supervised with:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 382, + 185, + 545, + 201 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 382, + 185, + 545, + 201 + ], + "spans": [ + { + "bbox": [ + 382, + 185, + 545, + 201 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {i} ^ {\\text {d i s p}} = \\mathcal {L} _ {1} \\left(d _ {t _ {i}}, d _ {t _ {i}} ^ {\\mathrm {g t}}\\right). \\tag {8}", + "image_path": "1174149845ec475d8c80bb47a5cf43a50904785113a14cecd5445b05fa09d984.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 204, + 545, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 204, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 305, + 204, + 545, + 228 + ], + "type": "text", + "content": "Finally, our model is trained end-to-end with the supervision of the following total loss function:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 345, + 229, + 545, + 261 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 229, + 545, + 261 + ], + "spans": [ + { + "bbox": [ + 345, + 229, + 545, + 261 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\sum_ {i = 1} ^ {N} \\left(\\mathcal {L} _ {i} ^ {\\text {t r a j}} + \\mathcal {L} _ {i} ^ {\\text {o f f}} + \\mathcal {L} _ {i} ^ {\\text {d i s p}} + \\alpha \\mathcal {L} _ {i} ^ {\\text {s m c}}\\right), \\tag {9}", + "image_path": "fd655d2c34e095f807e48d7bdaccf1ad5c891ab7dd2a0107fbdc42d72b0e52f3.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 262, + 545, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 262, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 306, + 262, + 545, + 274 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 262, + 545, + 274 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 306, + 262, + 545, + 274 + ], + "type": "text", + "content": " is a hyper-parameter and " + }, + { + "bbox": [ + 306, + 262, + 545, + 274 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 306, + 262, + 545, + 274 + ], + "type": "text", + "content": " is the sequence length." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 285, + 533, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 285, + 533, + 300 + ], + "spans": [ + { + "bbox": [ + 305, + 285, + 533, + 300 + ], + "type": "text", + "content": "4. 3D Feature Tracking Dataset: E-3DTrack" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 306, + 545, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 306, + 545, + 402 + ], + "spans": [ + { + "bbox": [ + 304, + 306, + 545, + 402 + ], + "type": "text", + "content": "In addressing the deficiency of high-speed 3D feature tracking datasets, we establish a hybrid vision system containing stereo event cameras and Optitrack, as shown in Fig. 3 (a), and curate the first event-based 3D feature tracking dataset, named E-3DTrack. Compared to existing event-based feature tracking datasets that contain only static scenes and 2D trajectories, our dataset is the first to contain high-speed moving objects and ground truth 3D feature trajectories." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 403, + 545, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 403, + 545, + 629 + ], + "spans": [ + { + "bbox": [ + 304, + 403, + 545, + 629 + ], + "type": "text", + "content": "Limited by the capturing frequency of 3D vision sensors (e.g., " + }, + { + "bbox": [ + 304, + 403, + 545, + 629 + ], + "type": "inline_equation", + "content": "< 30" + }, + { + "bbox": [ + 304, + 403, + 545, + 629 + ], + "type": "text", + "content": " FPS for LiDAR), it is difficult to accurately record the 3D feature trajectories of high-speed moving objects at a high frame rate. To tackle this problem, we use the motion capture system, i.e., Optitrack, to record the trajectory of each object attached with fixed markers. To explicitly obtain feature-level 3D trajectories, we use a scanner, i.e., FARO Quantum ScanArm, to capture the high precision point cloud of each object. Then, the 3D affine transform, incorporating a homogeneous scale, is calculated from the object coordinate system to the Optitrack coordinate system based on the markers' coordinates. This leads to the acquisition of the time-series point cloud sequence of the moving objects under the Optitrack coordinate system. Finally, the feature trajectories can be derived from the time-series point cloud sequence based on the feature point index. Hence, our dataset comprises ground truth 3D feature trajectories of high-speed moving objects at 250 FPS, surpassing the capturing frequency of most existing 3D vision sensors." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": "Using our hybrid vision system, we captured 40 high-speed motion scenarios containing a total of 1300 sequences. We randomly select 10 scenarios as the test set, and the remaining 30 scenarios are selected as the training set. Note that due to the cross-scene division, the scene in the test set are unseen in the training set. More details of our dataset are provided in the supplementary material." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18978" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 72, + 171, + 243 + ], + "blocks": [ + { + "bbox": [ + 49, + 72, + 171, + 243 + ], + "lines": [ + { + "bbox": [ + 49, + 72, + 171, + 243 + ], + "spans": [ + { + "bbox": [ + 49, + 72, + 171, + 243 + ], + "type": "image", + "image_path": "66e0935b298ff5c1093ad283321fea5a22ff79f906e5d979e3c7a4799654f72a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 70, + 244, + 149, + 251 + ], + "lines": [ + { + "bbox": [ + 70, + 244, + 149, + 251 + ], + "spans": [ + { + "bbox": [ + 70, + 244, + 149, + 251 + ], + "type": "text", + "content": "(a) Our hybrid vision system." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 176, + 72, + 251, + 243 + ], + "blocks": [ + { + "bbox": [ + 176, + 72, + 251, + 243 + ], + "lines": [ + { + "bbox": [ + 176, + 72, + 251, + 243 + ], + "spans": [ + { + "bbox": [ + 176, + 72, + 251, + 243 + ], + "type": "image", + "image_path": "08521545b4d309877e6bcae82d07b84fc92d958dcadd6120848439c649462540.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 177, + 243, + 543, + 251 + ], + "lines": [ + { + "bbox": [ + 177, + 243, + 543, + 251 + ], + "spans": [ + { + "bbox": [ + 177, + 243, + 543, + 251 + ], + "type": "text", + "content": "(b) Samples of our dataset. From left to right: reference frame, feature patch, stereo event streams, and ground truth 3D feature trajectory." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 253, + 72, + 310, + 243 + ], + "blocks": [ + { + "bbox": [ + 253, + 72, + 310, + 243 + ], + "lines": [ + { + "bbox": [ + 253, + 72, + 310, + 243 + ], + "spans": [ + { + "bbox": [ + 253, + 72, + 310, + 243 + ], + "type": "image", + "image_path": "408a890f2805c6dd15b7b6310e34938141138fd7f8bcb91ea9ba3844f9ad53c4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 313, + 72, + 386, + 243 + ], + "blocks": [ + { + "bbox": [ + 313, + 72, + 386, + 243 + ], + "lines": [ + { + "bbox": [ + 313, + 72, + 386, + 243 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 386, + 243 + ], + "type": "image", + "image_path": "4543f26c0d61c9449759f4dbbbdf96a80a8d6f59dac960d350127f0e41a0cc16.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 388, + 72, + 460, + 243 + ], + "blocks": [ + { + "bbox": [ + 388, + 72, + 460, + 243 + ], + "lines": [ + { + "bbox": [ + 388, + 72, + 460, + 243 + ], + "spans": [ + { + "bbox": [ + 388, + 72, + 460, + 243 + ], + "type": "image", + "image_path": "5cd31bd7ee6863a32c9932f002dfe8ff5452a00ba92be1da30d5d9430bfde28b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 463, + 72, + 542, + 243 + ], + "blocks": [ + { + "bbox": [ + 463, + 72, + 542, + 243 + ], + "lines": [ + { + "bbox": [ + 463, + 72, + 542, + 243 + ], + "spans": [ + { + "bbox": [ + 463, + 72, + 542, + 243 + ], + "type": "image", + "image_path": "debfb1089fde99fdbeee925cb6fc76c9b491857fc71619cd0e0f66e4740da341.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 298, + 286, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 298, + 286, + 357 + ], + "spans": [ + { + "bbox": [ + 47, + 298, + 286, + 357 + ], + "type": "text", + "content": "Table 1 shows the comparison of our E-3DTrack dataset with other existing event-based feature tracking datasets, including Event Camera dataset (EC) [27] and Event-aided Direct Sparse Odometry (EDS) dataset [15]. The main advantages of our dataset are in the following three aspects." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 358, + 286, + 657 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 47, + 358, + 286, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 358, + 286, + 393 + ], + "spans": [ + { + "bbox": [ + 47, + 358, + 286, + 393 + ], + "type": "text", + "content": "- 3D trajectory. Our dataset is the first feature tracking dataset containing ground truth 3D trajectories, enabling the feature motion trajectory estimation in 3D space." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 395, + 286, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 395, + 286, + 501 + ], + "spans": [ + { + "bbox": [ + 47, + 395, + 286, + 501 + ], + "type": "text", + "content": "- Non-homogeneous motion. Our dataset is the first event-based feature tracking dataset containing high-speed moving objects. Existing EC and EDS datasets mainly contain stationary scenarios. Thus, feature motions are caused by the camera movement. Since there are no moving objects in the scene, the motions of all features are almost homogeneous, as shown in Fig. 4. In contrast, the feature motions in our dataset are non-homogeneous, which is more conducive to applications." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 502, + 286, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 502, + 286, + 609 + ], + "spans": [ + { + "bbox": [ + 47, + 502, + 286, + 609 + ], + "type": "text", + "content": "- Accurate ground truth. Our dataset contains ground truth 3D feature trajectories captured from Optitrack. In contrast, since the DAVIS346 event camera could record event streams and 25 FPS video simultaneously, the ground truth 2D trajectories in EC and EDS datasets are obtained using frame-based feature tracking method KLT [25], or further triangulating KLT tracks using camera poses and reprojecting them to the frames. Thus, our dataset contains more accurate ground truth trajectories." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 609, + 286, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 609, + 286, + 657 + ], + "spans": [ + { + "bbox": [ + 47, + 609, + 286, + 657 + ], + "type": "text", + "content": "Figure 3 (b) shows some samples of our E-3DTrack dataset. We visualize the reference frames, feature template patches, stereo event streams, and the ground truth 3D feature trajectories of each sample." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 669, + 127, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 669, + 127, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 669, + 127, + 681 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "type": "text", + "content": "In this section, we first introduce the experimental settings. Then, we analyze the quantitative and qualitative compar" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 318, + 297, + 425, + 377 + ], + "blocks": [ + { + "bbox": [ + 46, + 255, + 545, + 289 + ], + "lines": [ + { + "bbox": [ + 46, + 255, + 545, + 289 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 545, + 289 + ], + "type": "text", + "content": "Figure 3. (a) Our hybrid vision system. (b) Samples of our E-3DTrack dataset. The first column is the reference frame at the initial moment, and the features to be tracked are marked in each frame. Some feature template patches are zoomed in for display in the second column. The stereo event streams and the ground truth 3D feature trajectories are shown in the last three columns, respectively." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 318, + 297, + 425, + 377 + ], + "lines": [ + { + "bbox": [ + 318, + 297, + 425, + 377 + ], + "spans": [ + { + "bbox": [ + 318, + 297, + 425, + 377 + ], + "type": "image", + "image_path": "06316860464a31f3aedc0892a1232443b9493735a91a6e0a4f62883e76f9b448.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 323, + 378, + 420, + 386 + ], + "lines": [ + { + "bbox": [ + 323, + 378, + 420, + 386 + ], + "spans": [ + { + "bbox": [ + 323, + 378, + 420, + 386 + ], + "type": "text", + "content": "(a) Sample from EC Dataset" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 390, + 543, + 401 + ], + "lines": [ + { + "bbox": [ + 307, + 390, + 543, + 401 + ], + "spans": [ + { + "bbox": [ + 307, + 390, + 543, + 401 + ], + "type": "text", + "content": "Figure 4. Examples from existing EC [27] and EDS [15] dataset." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 427, + 297, + 531, + 377 + ], + "blocks": [ + { + "bbox": [ + 427, + 297, + 531, + 377 + ], + "lines": [ + { + "bbox": [ + 427, + 297, + 531, + 377 + ], + "spans": [ + { + "bbox": [ + 427, + 297, + 531, + 377 + ], + "type": "image", + "image_path": "76a18df92186aa05f15cd2f015496559e22427e5a97ff952f9409d40c0a4f014.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 429, + 378, + 530, + 387 + ], + "lines": [ + { + "bbox": [ + 429, + 378, + 530, + 387 + ], + "spans": [ + { + "bbox": [ + 429, + 378, + 530, + 387 + ], + "type": "text", + "content": "(b) Sample from EDS Dataset" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 411, + 545, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 411, + 545, + 436 + ], + "spans": [ + { + "bbox": [ + 306, + 411, + 545, + 436 + ], + "type": "text", + "content": "isons, respectively. Finally, we conduct ablation studies to demonstrate the effectiveness of each proposed module." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 443, + 432, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 443, + 432, + 456 + ], + "spans": [ + { + "bbox": [ + 306, + 443, + 432, + 456 + ], + "type": "text", + "content": "5.1. Experimental Settings" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 462, + 545, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 462, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 462, + 545, + 581 + ], + "type": "text", + "content": "Comparison Methods. Since there are no existing high-speed 3D feature tracking methods, we use existing event-based trajectory prediction methods to obtain 2D feature trajectories, and use stereo depth estimation methods to further obtain the 3D feature trajectory. Specifically, we combine the event-based optical flow estimation method E-RAFT [12], event-based feature tracking methods EKLT [10] and DeepEvT [26] with event-based stereo depth estimation methods TSES [39] and SDE [28], respectively, as our baseline comparison methods." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 582, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 689 + ], + "type": "text", + "content": "Metrics. To evaluate our proposed method and other comparison methods, we use the Tracked Feature Ratio (TFR, higher is better), Feature Age [26] (FR, higher is better), and the Root Mean Squared Error (RMSE, lower is better) as the metrics. TFR is calculated as the ratio of the time that the spatial distance between the predicted 3D trajectory and the ground truth 3D trajectory is less than a certain threshold " + }, + { + "bbox": [ + 304, + 582, + 545, + 689 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 582, + 545, + 689 + ], + "type": "text", + "content": " to the total sequence time. See detailed definition in the supplementary material." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "Implementation Details. Our method is implemented based on PyTorch [31]. Our model is trained end-to-end" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18979" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 96, + 545, + 194 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 545, + 93 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 545, + 93 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 545, + 93 + ], + "type": "text", + "content": "Table 2. Quantitative results on our E-3DTrack dataset. Feature age (FA), tracked feature ratio (TFR), and root mean square error (RMSE) are selected as the metrics. Bold numbers represent the best scores, and underlined numbers represent the second-best scores." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 96, + 545, + 194 + ], + "lines": [ + { + "bbox": [ + 48, + 96, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 545, + 194 + ], + "type": "table", + "html": "
MethodFA(0.1m) ↑FA(0.15m) ↑FA(0.2m) ↑TFR(0.1m) ↑TFR(0.15m) ↑TFR(0.2m) ↑RMSE ↓
E-RAFT [12] + TSES [39]0.04090.06640.0920.17010.26670.34390.4726
E-RAFT [12] + SDE [28]0.13850.23990.32040.31210.47260.58060.3368
EKLIT [10] + TSES [39]0.02320.04290.06280.11800.19610.26850.4806
EKLIT [10] + SDE [28]0.10260.18560.25840.24210.37380.47000.4034
DeepEvT [26] + TSES [39]0.07130.11170.14520.37860.49910.58180.3549
DeepEvT [26] + SDE [28]0.23140.34620.43390.57820.70600.77650.1889
E-3DTrack (Ours)0.26010.41790.54280.69280.81640.87720.1181
", + "image_path": "9b9a10b086ab67a0b88f4a0013e2b35807314c93a72db9909dd9aa1f79c1d329.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 48, + 218, + 287, + 249 + ], + "blocks": [ + { + "bbox": [ + 54, + 203, + 279, + 214 + ], + "lines": [ + { + "bbox": [ + 54, + 203, + 279, + 214 + ], + "spans": [ + { + "bbox": [ + 54, + 203, + 279, + 214 + ], + "type": "text", + "content": "Table 3. Comparison of inference time on E-3DTrack dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 218, + 287, + 249 + ], + "lines": [ + { + "bbox": [ + 48, + 218, + 287, + 249 + ], + "spans": [ + { + "bbox": [ + 48, + 218, + 287, + 249 + ], + "type": "table", + "html": "
MethodE-RAFT + SDEDeepEvT + SDEOurs
Time (ms/step)154.8393.2240.30
", + "image_path": "67becf0074b6712dc8070a378f68217081920948145beea1a1084d4262f4473f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "spans": [ + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "type": "text", + "content": "for 100 epochs with a batch size of 16. The optimization method is AdamW [23], and the cosine annealing schedule [22] is leveraged. The learning rate decays from " + }, + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-4}" + }, + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "type": "text", + "content": " within 100 epochs. The hyperparameters are selected as " + }, + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "type": "inline_equation", + "content": "\\alpha = 0.25" + }, + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "type": "text", + "content": " in Eq. (9), " + }, + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "type": "inline_equation", + "content": "k = 3" + }, + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "type": "inline_equation", + "content": "\\delta = 16" + }, + { + "bbox": [ + 46, + 263, + 287, + 336 + ], + "type": "text", + "content": " for bi-polarity hypergraph construction." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 346, + 189, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 346, + 189, + 358 + ], + "spans": [ + { + "bbox": [ + 47, + 346, + 189, + 358 + ], + "type": "text", + "content": "5.2. Quantitative Comparison" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "text", + "content": "Table. 2 shows the quantitative comparison of our proposed method with other comparison methods. From the table, we could observe that our proposed method significantly outperforms all comparison methods and achieve state-of-the-art performance. Specifically, compared with the second-best method, i.e., the combination of the state-of-the-art 2D event-based feature tracking method DeepEvT [26] and stereo depth estimation method SDE [28], our proposed method reduces the RMSE by " + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "inline_equation", + "content": "37.5\\%" + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "text", + "content": " and improves the FA by " + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "inline_equation", + "content": "12.4\\%" + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "inline_equation", + "content": "20.7\\%" + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "inline_equation", + "content": "25.1\\%" + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "text", + "content": " in terms of " + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "inline_equation", + "content": "c = 0.1 \\mathrm{~m}" + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "inline_equation", + "content": "0.15 \\mathrm{~m}" + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "inline_equation", + "content": "0.2 \\mathrm{~m}" + }, + { + "bbox": [ + 46, + 365, + 287, + 496 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 498, + 287, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 287, + 676 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 287, + 676 + ], + "type": "text", + "content": "Compared to comparison methods that achieve trajectory prediction and depth estimation separately, our proposed method leverages a joint framework to track the 3D feature trajectories of high-speed moving objects. This indicates that for 3D moving objects, the feature trajectory in the camera plane is highly correlated with the 3D position. The simple combination of 2D trajectory prediction and 3D position estimation will lead to fatal errors. Instead, our proposed method tracks 3D trajectories accurately using the stereo motion consistency constraint. Meanwhile, compared to traditional stereo depth estimation methods, our proposed patch matching module uses a high-order correlation modeling mechanism based on bi-polarity hypergraph to eliminate mismatching of similar features, further enhancing the 3D feature tracking robustness." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "Table. 3 shows the inference time comparison of our proposed method with other comparison methods. Specifically, we test the inference time of each tracking update" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 309, + 203, + 381, + 258 + ], + "blocks": [ + { + "bbox": [ + 309, + 203, + 381, + 258 + ], + "lines": [ + { + "bbox": [ + 309, + 203, + 381, + 258 + ], + "spans": [ + { + "bbox": [ + 309, + 203, + 381, + 258 + ], + "type": "image", + "image_path": "60071049676d8abe68b498277633c3877768eef4b8b64f2772d487d0fcabc3b6.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 258, + 381, + 312 + ], + "blocks": [ + { + "bbox": [ + 309, + 258, + 381, + 312 + ], + "lines": [ + { + "bbox": [ + 309, + 258, + 381, + 312 + ], + "spans": [ + { + "bbox": [ + 309, + 258, + 381, + 312 + ], + "type": "image", + "image_path": "fb85a277362ca3fa2517db46065df845ab1d8a065784e68d3507291ae04b453e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 309, + 313, + 381, + 423 + ], + "blocks": [ + { + "bbox": [ + 309, + 313, + 381, + 423 + ], + "lines": [ + { + "bbox": [ + 309, + 313, + 381, + 423 + ], + "spans": [ + { + "bbox": [ + 309, + 313, + 381, + 423 + ], + "type": "image", + "image_path": "55a918718219c6b8e379082b265ed1d021ed8f9caca51f27e34e88aa294b46f1.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 424, + 381, + 431 + ], + "lines": [ + { + "bbox": [ + 307, + 424, + 381, + 431 + ], + "spans": [ + { + "bbox": [ + 307, + 424, + 381, + 431 + ], + "type": "text", + "content": "(a) Reference Feature Patch" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 381, + 203, + 461, + 258 + ], + "blocks": [ + { + "bbox": [ + 381, + 203, + 461, + 258 + ], + "lines": [ + { + "bbox": [ + 381, + 203, + 461, + 258 + ], + "spans": [ + { + "bbox": [ + 381, + 203, + 461, + 258 + ], + "type": "image", + "image_path": "6001b6668a4a34f5eed73689e5b8998d4c8648e61642cad5e036f7056d80fef1.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 382, + 258, + 461, + 313 + ], + "blocks": [ + { + "bbox": [ + 382, + 258, + 461, + 313 + ], + "lines": [ + { + "bbox": [ + 382, + 258, + 461, + 313 + ], + "spans": [ + { + "bbox": [ + 382, + 258, + 461, + 313 + ], + "type": "image", + "image_path": "9891ea5799361c2ab61fa2ea01ea44fccb490be75df0d0f00c89c75aec2fe094.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 382, + 313, + 461, + 423 + ], + "blocks": [ + { + "bbox": [ + 382, + 313, + 461, + 423 + ], + "lines": [ + { + "bbox": [ + 382, + 313, + 461, + 423 + ], + "spans": [ + { + "bbox": [ + 382, + 313, + 461, + 423 + ], + "type": "image", + "image_path": "bc7bed56ccc987baad30604d08a63a4145adfb5b64d5fc88c704a329abd62968.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 395, + 424, + 448, + 431 + ], + "lines": [ + { + "bbox": [ + 395, + 424, + 448, + 431 + ], + "spans": [ + { + "bbox": [ + 395, + 424, + 448, + 431 + ], + "type": "text", + "content": "(b) DeepEvT + SDE" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 462, + 203, + 544, + 258 + ], + "blocks": [ + { + "bbox": [ + 462, + 203, + 544, + 258 + ], + "lines": [ + { + "bbox": [ + 462, + 203, + 544, + 258 + ], + "spans": [ + { + "bbox": [ + 462, + 203, + 544, + 258 + ], + "type": "image", + "image_path": "de7d353215f22019a59b572a331ffa0b6bb10f6c4efbc8ea70d7400801106583.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 462, + 258, + 544, + 313 + ], + "blocks": [ + { + "bbox": [ + 462, + 258, + 544, + 313 + ], + "lines": [ + { + "bbox": [ + 462, + 258, + 544, + 313 + ], + "spans": [ + { + "bbox": [ + 462, + 258, + 544, + 313 + ], + "type": "image", + "image_path": "c42af8aacb337deff3757c50aa8ab7d84123173aa59aad64a1e1900a482fa359.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 462, + 313, + 544, + 369 + ], + "blocks": [ + { + "bbox": [ + 462, + 313, + 544, + 369 + ], + "lines": [ + { + "bbox": [ + 462, + 313, + 544, + 369 + ], + "spans": [ + { + "bbox": [ + 462, + 313, + 544, + 369 + ], + "type": "image", + "image_path": "50415d6b9e9d0efeef18b30c59e5f65140153402a9358570a3d3d717cd9b1caf.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 462, + 369, + 544, + 423 + ], + "blocks": [ + { + "bbox": [ + 462, + 369, + 544, + 423 + ], + "lines": [ + { + "bbox": [ + 462, + 369, + 544, + 423 + ], + "spans": [ + { + "bbox": [ + 462, + 369, + 544, + 423 + ], + "type": "image", + "image_path": "a53b273207a25cdf6317f69b492bc024999198f6967e414dc4baafbbbe4a26ba.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 492, + 424, + 515, + 431 + ], + "lines": [ + { + "bbox": [ + 492, + 424, + 515, + 431 + ], + "spans": [ + { + "bbox": [ + 492, + 424, + 515, + 431 + ], + "type": "text", + "content": "(c) Ours" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 437, + 545, + 482 + ], + "lines": [ + { + "bbox": [ + 304, + 437, + 545, + 482 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 545, + 482 + ], + "type": "text", + "content": "Figure 5. Qualitative comparison on our E-3DTrack dataset. From left to right: the reference feature patch, the ground truth feature trajectories (red), the feature trajectories (blue) predicted by DeepEvT [26] + SDE [28] and our proposed method, respectively." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 484, + 545, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 484, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 484, + 545, + 544 + ], + "type": "text", + "content": "step. From the table, we could observe that compared to the second-best method, i.e., DeepEvT + SDE, our proposed method reduces the inference time by " + }, + { + "bbox": [ + 304, + 484, + 545, + 544 + ], + "type": "inline_equation", + "content": "56.8\\%" + }, + { + "bbox": [ + 304, + 484, + 545, + 544 + ], + "type": "text", + "content": " while achieving better tracking performance. This demonstrates the computational efficiency of our proposed method." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 552, + 441, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 552, + 441, + 565 + ], + "spans": [ + { + "bbox": [ + 306, + 552, + 441, + 565 + ], + "type": "text", + "content": "5.3. Qualitative Comparison" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": "Figure 5 shows the qualitative 3D feature tracking results of our proposed method and the second-best comparison method, i.e., DeepEvT [26] + SDE [28]. The predicted 3D trajectories and the ground truth trajectories are shown in blue and red, respectively. From the figure, we could observe that our proposed method achieves more robust 3D feature tracking. As shown in the first row, the comparison method achieves adequate feature tracking performance when facing simple scenarios where the object motions do not contain significant depth changes. Such scenarios are similar to 2D feature tracking. Similar observations could be found in the second row. For the white geometric model" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18980" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 83, + 166, + 178 + ], + "blocks": [ + { + "bbox": [ + 83, + 72, + 146, + 82 + ], + "lines": [ + { + "bbox": [ + 83, + 72, + 146, + 82 + ], + "spans": [ + { + "bbox": [ + 83, + 72, + 146, + 82 + ], + "type": "text", + "content": "Tracking Error" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 47, + 83, + 166, + 178 + ], + "lines": [ + { + "bbox": [ + 47, + 83, + 166, + 178 + ], + "spans": [ + { + "bbox": [ + 47, + 83, + 166, + 178 + ], + "type": "image", + "image_path": "f516e9abefed5e9c3bf0b240442b88451917c06c6ca9ffbef1a20d4c655b5706.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 169, + 83, + 287, + 178 + ], + "blocks": [ + { + "bbox": [ + 189, + 72, + 281, + 81 + ], + "lines": [ + { + "bbox": [ + 189, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 189, + 72, + 281, + 81 + ], + "type": "text", + "content": "Tracked Feature Ratio" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 169, + 83, + 287, + 178 + ], + "lines": [ + { + "bbox": [ + 169, + 83, + 287, + 178 + ], + "spans": [ + { + "bbox": [ + 169, + 83, + 287, + 178 + ], + "type": "image", + "image_path": "5a308180ff0d17a86bb0b60d51646c627c9dac3f90771f609e6964abd6baf8b4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 182, + 287, + 205 + ], + "lines": [ + { + "bbox": [ + 46, + 182, + 287, + 205 + ], + "spans": [ + { + "bbox": [ + 46, + 182, + 287, + 205 + ], + "type": "text", + "content": "Figure 6. Results of the mean tracking error (left) and the feature tracked ratio (right) over tracking time." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 220, + 287, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 220, + 287, + 364 + ], + "spans": [ + { + "bbox": [ + 46, + 220, + 287, + 364 + ], + "type": "text", + "content": "with slight depth variation, the comparison method achieves 3D feature tracking with slight oscillations. However, for the red star with large depth variation and rotation, it could not be tracked accurately by the comparison method. The last two rows show two extreme scenarios, i.e., the 3D motions of the objects are with large depth variation and rotation, which will cause significant feature shape deformation. Under such scenarios, our comparison method tracks the features with fatal errors. In contrast, our proposed method tracks the 3D trajectories of the high-speed moving features robustly and continuously due to our motion compensation module and patch matching module." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 364, + 288, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 364, + 288, + 508 + ], + "spans": [ + { + "bbox": [ + 46, + 364, + 288, + 508 + ], + "type": "text", + "content": "Figure 6 further shows the tracking error (RMSE) and the tracked feature ratio (TFR) over time on our E-3DTrack dataset. The threshold is selected as " + }, + { + "bbox": [ + 46, + 364, + 288, + 508 + ], + "type": "inline_equation", + "content": "c = 0.1 \\, \\text{m}" + }, + { + "bbox": [ + 46, + 364, + 288, + 508 + ], + "type": "text", + "content": " to calculate TFR. From the figure, we could observe that our proposed method can continuously track 3D trajectories of target features, i.e., our method maintains a high TFR consistently. From the figure, we could also observe that the TFR of E-RAFT + SDE is comparable with DeepEvT + SDE in initial stage, but gradually decreases over time. This is because the optical flow estimation method is lack of long-term consistent modeling. In contrast, our proposed method maintains a high TFR and a low tracking error over all time." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 515, + 174, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 515, + 174, + 529 + ], + "spans": [ + { + "bbox": [ + 47, + 515, + 174, + 529 + ], + "type": "text", + "content": "5.4. Ablation Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 533, + 287, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 629 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 629 + ], + "type": "text", + "content": "To demonstrate the effectiveness of each proposed module, we validate the performance of our model with and without the motion compensation module (denoted as MC), stereo motion consistency mechanism (denoted as " + }, + { + "bbox": [ + 46, + 533, + 287, + 629 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{\\mathrm{smc}}" + }, + { + "bbox": [ + 46, + 533, + 287, + 629 + ], + "type": "text", + "content": "), and the bi-polarity hypergraph-based high-order correlation modeling mechanism (denoted as BiHCM), respectively. The ablation experimental results are shown in Tab. 4. See supplementary material for detailed settings." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 630, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 288, + 714 + ], + "type": "text", + "content": "Bi-Polarity Hypergraph Modeling. From Tab. 4 we could observe that compared with our base model (row (1)), the addition of BiHCM will increase TFR from 0.5586 to 0.6082. Compared with our full model, the removal of the BiHCM will lead to an RMSE increase of " + }, + { + "bbox": [ + 46, + 630, + 288, + 714 + ], + "type": "inline_equation", + "content": "20.8\\%" + }, + { + "bbox": [ + 46, + 630, + 288, + 714 + ], + "type": "text", + "content": ". This is due to the fact that our proposed BiHCM could enhance the connection between patches with similar features that" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 307, + 85, + 542, + 194 + ], + "blocks": [ + { + "bbox": [ + 321, + 71, + 531, + 82 + ], + "lines": [ + { + "bbox": [ + 321, + 71, + 531, + 82 + ], + "spans": [ + { + "bbox": [ + 321, + 71, + 531, + 82 + ], + "type": "text", + "content": "Table 4. Ablation experiments on our E-3DTrack dataset." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 85, + 542, + 194 + ], + "lines": [ + { + "bbox": [ + 307, + 85, + 542, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 85, + 542, + 194 + ], + "type": "table", + "html": "
BiHCMLsmcMCTFR0.1 m ↑RMSE↓
(1)XXX0.55860.1807
(2)XX0.60820.1505
(3)XX0.59420.1512
(4)XX0.56600.1624
(5)X0.67050.1268
(6)X0.65990.1312
(7)X0.64410.1427
(8)0.69280.1181
", + "image_path": "2e6b97c77530af9c68b9cc924111819fa94be5f4ddff70f1437f517bbbf6c6bb.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 205, + 545, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 205, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 304, + 205, + 545, + 240 + ], + "type": "text", + "content": "are spatially close, and suppress patches with similar features but spatially distant, which could eliminate mismatching and further improve 3D feature tracking performance." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 241, + 546, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 241, + 546, + 349 + ], + "spans": [ + { + "bbox": [ + 304, + 241, + 546, + 349 + ], + "type": "text", + "content": "Stereo Motion Consistency. As shown in Tab. 4, compared with the base model, the addition of stereo motion consistency constraint will reduce RMSE from 0.1807 to 0.1512. Compared with the full model, the removal of the stereo motion consistency constraint will increase RMSE by " + }, + { + "bbox": [ + 304, + 241, + 546, + 349 + ], + "type": "inline_equation", + "content": "11.1\\%" + }, + { + "bbox": [ + 304, + 241, + 546, + 349 + ], + "type": "text", + "content": ". This is due to the fact that the stereo motion consistency could effectively constrain the correlation between the 2D trajectory and 3D spatial position of the objects, making our method predict more accurate and smooth 3D trajectory." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 349, + 547, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 349, + 547, + 445 + ], + "spans": [ + { + "bbox": [ + 304, + 349, + 547, + 445 + ], + "type": "text", + "content": "Motion Compensation. As shown in Tab. 4, compared with the base model and the full model, the addition and removal of the motion compensation module resulted in " + }, + { + "bbox": [ + 304, + 349, + 547, + 445 + ], + "type": "inline_equation", + "content": "10.1\\%" + }, + { + "bbox": [ + 304, + 349, + 547, + 445 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 349, + 547, + 445 + ], + "type": "inline_equation", + "content": "7.4\\%" + }, + { + "bbox": [ + 304, + 349, + 547, + 445 + ], + "type": "text", + "content": " decrease and increase in RMSE, respectively. With the addition of the motion compensation module, our proposed method could better deal with feature deformation caused by depth changes and rotations of moving objects, and achieve more robust 3D feature tracking." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 445, + 545, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 445, + 545, + 469 + ], + "spans": [ + { + "bbox": [ + 305, + 445, + 545, + 469 + ], + "type": "text", + "content": "These ablation experiments demonstrate the effectiveness of each proposed module." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 481, + 379, + 494 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 481, + 379, + 494 + ], + "spans": [ + { + "bbox": [ + 306, + 481, + 379, + 494 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 502, + 547, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 502, + 547, + 646 + ], + "spans": [ + { + "bbox": [ + 304, + 502, + 547, + 646 + ], + "type": "text", + "content": "In this paper, we propose the first high-speed 3D feature tracking method that takes stereo event streams as input to estimate 3D feature trajectories. Our proposed method leverages a joint framework to obtain 3D feature trajectories by estimating the feature motion offsets and spatial position simultaneously. A motion compensation module and a patch matching module based on bi-polarity hypergraphs are proposed to achieve robust feature tracking. Meanwhile, the first 3D feature tracking dataset containing high-speed moving objects and ground truth 3D feature trajectories at 250 FPS is constructed, named E-3DTrack, which can be used as the first 3D feature tracking benchmark." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 657, + 410, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 657, + 410, + 671 + ], + "spans": [ + { + "bbox": [ + 306, + 657, + 410, + 671 + ], + "type": "text", + "content": "7. Acknowledgment" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "content": "This work was supported by National Natural Science Funds of China (No. 62021002 and No. 62088102), Beijing Natural Science Foundation (No. 4222025)." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18981" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 288, + 715 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "text", + "content": "[1] Himanshu Akolkar, Sio-Hoi Ieng, and Ryad Benosman. Real-Time High Speed Motion Prediction Using Fast Aperture-Robust Event-Driven Visual Flow. IEEE Trans. Pattern Anal. Mach. Intell., 44(1):361-372, 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 135, + 287, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 287, + 168 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 287, + 168 + ], + "type": "text", + "content": "[2] Ignacio Alzugaray and Margarita Chli. ACE: An Efficient Asynchronous Corner Tracker for Event Cameras. In Int. Conf. on 3D Vis., pages 653-661. IEEE, 2018. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 168, + 287, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 168, + 287, + 212 + ], + "spans": [ + { + "bbox": [ + 53, + 168, + 287, + 212 + ], + "type": "text", + "content": "[3] Ignacio Alzugaray and Margarita Chli. HASTE: MultiHypothesis Asynchronous Speeded-up Tracking of Events. In The British Machine Vision Conference, page 744, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 213, + 287, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 213, + 287, + 257 + ], + "spans": [ + { + "bbox": [ + 53, + 213, + 287, + 257 + ], + "type": "text", + "content": "[4] Patrick Bardow, Andrew J Davison, and Stefan Leutenegger. Simultaneous optical flow and intensity estimation from an event camera. In IEEE Conf. Comput. Vis. Pattern Recog., pages 884-892, 2016. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 258, + 287, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 287, + 291 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 287, + 291 + ], + "type": "text", + "content": "[5] Herbert Bay, Andreas Ess, Tinne Tuytelaars, and Luc Van Gool. Speeded-up Robust Features (SURF). Comput. Vis. and Image Underst., 110(3):346-359, 2008. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 291, + 288, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 291, + 288, + 324 + ], + "spans": [ + { + "bbox": [ + 53, + 291, + 288, + 324 + ], + "type": "text", + "content": "[6] Paul J Besl and Neil D McKay. Method for Registration of 3-D Shapes. In Sensor Fusion IV: Control Paradigms and Data Structures, pages 586-606, 1992. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 325, + 288, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 325, + 288, + 368 + ], + "spans": [ + { + "bbox": [ + 53, + 325, + 288, + 368 + ], + "type": "text", + "content": "[7] Christian Brandli, Raphael Berner, Minhao Yang, Shih-Chii Liu, and Tobi Delbruck. A " + }, + { + "bbox": [ + 53, + 325, + 288, + 368 + ], + "type": "inline_equation", + "content": "240 \\times 180" + }, + { + "bbox": [ + 53, + 325, + 288, + 368 + ], + "type": "text", + "content": " 130 dB " + }, + { + "bbox": [ + 53, + 325, + 288, + 368 + ], + "type": "inline_equation", + "content": "3\\mu s" + }, + { + "bbox": [ + 53, + 325, + 288, + 368 + ], + "type": "text", + "content": " Latency Global Shutter Spatiotemporal Vision Sensor. IEEE J. of Solid-State Circuits, 49(10):2333-2341, 2014. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 369, + 287, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 369, + 287, + 424 + ], + "spans": [ + { + "bbox": [ + 53, + 369, + 287, + 424 + ], + "type": "text", + "content": "[8] Guillermo Gallego, Henri Rebecq, and Davide Scaramuzzi. A Unifying Contrast Maximization Framework for Event Cameras, with Applications to Motion, Depth, and Optical Flow Estimation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 3867-3876, 2018. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 424, + 287, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 424, + 287, + 457 + ], + "spans": [ + { + "bbox": [ + 53, + 424, + 287, + 457 + ], + "type": "text", + "content": "[9] Yue Gao, Yifan Feng, Shuyi Ji, and Rongrong Ji. HGNN+: General Hypergraph Neural Networks. IEEE Trans. Pattern Anal. Mach. Intell., 45(3):3181-3199, 2023. 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 458, + 287, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 458, + 287, + 502 + ], + "spans": [ + { + "bbox": [ + 47, + 458, + 287, + 502 + ], + "type": "text", + "content": "[10] Daniel Gehrig, Henri Rebecq, Guillermo Gallego, and Davide Scaramuzza. EKLT: Asynchronous Photometric Feature Tracking using Events and Frames. Int. J. Comput. Vis., 128(3):601-618, 2020. 2, 3, 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 502, + 288, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 502, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 47, + 502, + 288, + 557 + ], + "type": "text", + "content": "[11] Daniel Gehrig, Michelle Ruegg, Mathias Gehrig, Javier Hidalgo-Carrió, and Davide Scaramuzza. Combining Events and Frames Using Recurrent Asynchronous Multimodal Networks for Monocular Depth Prediction. IEEE Robot. and Autom. Lett., 6(2):2822-2829, 2021. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 558, + 288, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 558, + 288, + 601 + ], + "spans": [ + { + "bbox": [ + 47, + 558, + 288, + 601 + ], + "type": "text", + "content": "[12] Mathias Gehrig, Mario Millhäsler, Daniel Gehrig, and Davide Scaramuzza. E-RAFT: Dense Optical Flow from Event Cameras. In Int. Conf. 3D Vis., pages 197–206. IEEE, 2021. 2, 6, 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 602, + 288, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 602, + 288, + 656 + ], + "spans": [ + { + "bbox": [ + 47, + 602, + 288, + 656 + ], + "type": "text", + "content": "[13] Gallego Guillermo, Delbruck Tobi, Michael Orchard Garrick, Bartolozzi Chiara, Taba Brian, Censi Andrea, Leutenegger Stefan, Davison Andrew, Conradt Jorg, Daniilidis Kostas, and Scaramuzza Davide. Event-Based Vision: A Survey. IEEE Trans. Pattern Anal. Mach. Intell., 2020. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 657, + 288, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 657, + 288, + 691 + ], + "spans": [ + { + "bbox": [ + 47, + 657, + 288, + 691 + ], + "type": "text", + "content": "[14] Javier Hidalgo-Carrió, Daniel Gehrig, and Davide Scaramuzza. Learning monocular dense depth from events. In Int. Conf. on 3D Vis., pages 534-542. IEEE, 2020. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 691, + 288, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 691, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 691, + 288, + 715 + ], + "type": "text", + "content": "[15] Javier Hidalgo-Carrio, Guillermo Gallego, and Davide Scaramuzza. Event-aided direct sparse odometry. In IEEE" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 715 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "Conf. Comput. Vis. Pattern Recog., pages 5781-5790, 2022. 5, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 95, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 95, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 307, + 95, + 545, + 140 + ], + "type": "text", + "content": "[16] Sumin Hu, Yeeun Kim, Hyungtae Lim, Alex Junho Lee, and Hyun Myung. eCDT: Event Clustering for Simultaneous Feature Detection and Tracking. In Int. Conf. Intel. Robot. Syst., pages 3808-3815. IEEE, 2022. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 141, + 545, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 184 + ], + "type": "text", + "content": "[17] Hanme Kim, Stefan Leutenegger, and Andrew J. Davison. Real-Time 3D Reconstruction and 6-DoF Tracking with an Event Camera. In *Eur. Conf. Comput. Vis.*, pages 349–364, 2016. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 185, + 545, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 185, + 545, + 229 + ], + "spans": [ + { + "bbox": [ + 307, + 185, + 545, + 229 + ], + "type": "text", + "content": "[18] Beat Kueng, Elias Mueggler, Guillermo Gallego, and Davide Scaramuzza. Low-latency visual odometry using event-based feature tracks. In Int. Conf. Intell. Robot. Syst., pages 16-23. IEEE, 2016. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 230, + 545, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 230, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 545, + 275 + ], + "type": "text", + "content": "[19] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature Pyramid Networks for Object Detection. In IEEE Conf. Comput. Vis. Pattern Recog., pages 2117-2125, 2017. 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 275, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 275, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 275, + 545, + 319 + ], + "type": "text", + "content": "[20] Daqi Liu, Alvaro Parra, and Tat-Jun Chin. Globally Optimal Contrast Maximisation for Event-based Motion Estimation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6349-6358, 2020. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 319, + 545, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 319, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 307, + 319, + 545, + 365 + ], + "type": "text", + "content": "[21] Pengpeng Liu, Irwin King, Michael R Lyu, and Jia Xu. Flow2stereo: Effective Self-Supervised Learning of Optical Flow and Stereo Matching. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6648-6657, 2020. 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 365, + 545, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 365, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 307, + 365, + 545, + 398 + ], + "type": "text", + "content": "[22] Ilya Loshchilov and Frank Hutter. SGDR: Stochastic Gradient Descent with Warm Restarts. In Int. Conf. Learn. Represent., 2017. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 399, + 545, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 545, + 422 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 545, + 422 + ], + "type": "text", + "content": "[23] Ilya Loshchilov and Frank Hutter. Decoupled Weight Decay Regularization. Int. Conf. Learn. Represent., 2019. 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 422, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 545, + 453 + ], + "type": "text", + "content": "[24] David G Lowe. Distinctive Image Features from Scale-Invariant Keypoints. Int. J. Comput. Vis., 60:91-110, 2004. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 455, + 545, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 455, + 545, + 489 + ], + "spans": [ + { + "bbox": [ + 307, + 455, + 545, + 489 + ], + "type": "text", + "content": "[25] Bruce D Lucas and Takeo Kanade. An Iterative Image Registration Technique with an Application to Stereo Vision. In IJCAI, pages 674-679, 1981. 1, 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 490, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 490, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 307, + 490, + 545, + 533 + ], + "type": "text", + "content": "[26] Nico Messikommer, Carter Fang, Mathias Gehrig, and Davide Scaramuzza. Data-Driven Feature Tracking for Event Cameras. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5642–5651, 2023. 2, 3, 4, 6, 7" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 534, + 545, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 534, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 307, + 534, + 545, + 590 + ], + "type": "text", + "content": "[27] Elias Mueggler, Henri Rebecq, Guillermo Gallego, Tobi Delbruck, and Davide Scaramuzza. The Event-Camera Dataset and Simulator: Event-Based Data for Pose Estimation, Visual Odometry, and SLAM. Int. J. of Robot. Researc., 36(2): 142-149, 2017. 5, 6" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 590, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 545, + 635 + ], + "type": "text", + "content": "[28] Yeongwoo Nam, Mohammad Mostafavi, Kuk-Jin Yoon, and Jonghyun Choi. Stereo Depth From Events Cameras: Concentrate and Focus on the Future. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6114-6123, 2022. 3, 6, 7" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 635, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 545, + 678 + ], + "type": "text", + "content": "[29] Liyuan Pan, Miaomiao Liu, and Richard Hartley. Single Image Optical Flow Estimation with an Event Camera. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1669-1678. IEEE, 2020. 2" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 679, + 545, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 679, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 307, + 679, + 545, + 715 + ], + "type": "text", + "content": "[30] Federico Paredes-Vallés, Kirk YW Scheper, and Guido CHE De Croon. Unsupervised Learning of a Hierarchical Spiking Neural Network for Optical Flow Estimation: From Events" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "18982" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 544 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 67, + 72, + 286, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 286, + 95 + ], + "type": "text", + "content": "to Global Motion Perception. IEEE Trans. Pattern Anal. Mach. Intell., 42(8):2051-2064, 2019. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 150 + ], + "type": "text", + "content": "[31] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An Imperative Style, High-Performance Deep Learning Library. Adv. Neural Inform. Process. Syst., 32, 2019. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 287, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 287, + 195 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 287, + 195 + ], + "type": "text", + "content": "[32] Lichtsteiner Patrick, Posch Christoph, and Delbruck Tobi. A " + }, + { + "bbox": [ + 48, + 152, + 287, + 195 + ], + "type": "inline_equation", + "content": "128 \\times 128" + }, + { + "bbox": [ + 48, + 152, + 287, + 195 + ], + "type": "text", + "content": " dB 15 μs Latency Asynchronous Temporal Contrast Vision Sensor. IEEE J. of Solid-State Circuits, 43 (2):566-576, 2008. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 197, + 287, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 197, + 287, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 197, + 287, + 228 + ], + "type": "text", + "content": "[33] Jianbo Shi and Carlo Tomasi. Good Features to Track. In IEEE Conf. Comput. Vis. Pattern Recog., pages 593-600. IEEE, 1994. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 230, + 287, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 230, + 287, + 274 + ], + "spans": [ + { + "bbox": [ + 48, + 230, + 287, + 274 + ], + "type": "text", + "content": "[34] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-Kin Wong, and Wang-chun Woo. Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting. Adv. Neural Inform. Process. Syst., 28, 2015. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 276, + 287, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 276, + 287, + 297 + ], + "spans": [ + { + "bbox": [ + 48, + 276, + 287, + 297 + ], + "type": "text", + "content": "[35] Carlo Tomasi and Takeo Kanade. Detection and Tracking of Point. Int. J. Comput. Vis., 9(137-154):3, 1991. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 299, + 287, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 299, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 48, + 299, + 287, + 342 + ], + "type": "text", + "content": "[36] Jiqing Zhang, Xin Yang, Yingkai Fu, Xiaopeng Wei, Baocai Yin, and Bo Dong. Object Tracking by Jointly Exploiting Frame and Event Domain. In Int. Conf. Comput. Vis., pages 13043-13052, 2021. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 343, + 287, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 343, + 287, + 387 + ], + "spans": [ + { + "bbox": [ + 48, + 343, + 287, + 387 + ], + "type": "text", + "content": "[37] Jiqing Zhang, Bo Dong, Haiwei Zhang, Jianchuan Ding, Felix Heide, Baocai Yin, and Xin Yang. Spiking Transformers for Event-Based Single Object Tracking. In IEEE Conf. Comput. Vis. Pattern Recog., pages 8801-8810, 2022. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 388, + 287, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 287, + 430 + ], + "type": "text", + "content": "[38] Alex Zihao Zhu, Nikolay Atanasov, and Kostas Daniilidis. Event-Based Feature Tracking with Probabilistic Data Association. In IEEE Int. Conf. Robot. Autom., pages 4465-4470. IEEE, 2017. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 433, + 287, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 433, + 287, + 466 + ], + "spans": [ + { + "bbox": [ + 48, + 433, + 287, + 466 + ], + "type": "text", + "content": "[39] Alex Zihao Zhu, Yibo Chen, and Kostas Daniilidis. Realtime Time Synchronized Event-Based Stereo. In Eur. Conf. Comput. Vis., pages 433-447, 2018. 3, 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 467, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 467, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 467, + 287, + 510 + ], + "type": "text", + "content": "[40] Alex Zihao Zhu, Liangzhe Yuan, Kenneth Chaney, and Kostas Daniilidis. Unsupervised Event-Based Learning of Optical Flow, Depth, and Egomotion. In IEEE Conf. Comput. Vis. Pattern Recog., pages 989-997, 2019. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 512, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 512, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 512, + 287, + 544 + ], + "type": "text", + "content": "[41] Alex Zihao Zhu, Nikolay Atanasov, and Kostas Daniilidis. Event-Based Visual Inertial Odometry. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5391-5399, 2017. 1" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "18983" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/f455d128-070e-4b9d-a550-262379c7f3f3_content_list.json b/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/f455d128-070e-4b9d-a550-262379c7f3f3_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..51a4edf398dceb5d7236cc189e20fe9a29b3a998 --- /dev/null +++ b/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/f455d128-070e-4b9d-a550-262379c7f3f3_content_list.json @@ -0,0 +1,1794 @@ +[ + { + "type": "text", + "text": "3D Geometry-aware Deformable Gaussian Splitting for Dynamic View Synthesis", + "text_level": 1, + "bbox": [ + 76, + 130, + 890, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhicheng Lu $^{1*}$ , Xiang Guo $^{1*}$ , Le Hui $^{1\\dagger}$ , Tianrui Chen $^{1,2}$ , Min Yang $^{2}$ , Xiao Tang $^{2}$ , Feng Zhu $^{2}$ , Yuchao Dai $^{1\\dagger}$", + "bbox": [ + 240, + 179, + 725, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1Northwestern Polytechnical University 2Samsung R&D Institute", + "bbox": [ + 228, + 215, + 741, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{zhichenglu, guoxiang, cherryxchen}@mail.nwpu.edu.cn", + "bbox": [ + 254, + 236, + 712, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{daiyuchao, huile}@nwpu.edu.cn {min16.yang, xiao1.tang, f15.zhu}@samsung.com", + "bbox": [ + 142, + 253, + 820, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 304, + 312, + 319 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we propose a 3D geometry-aware deformable Gaussian Splatting method for dynamic view synthesis. Existing neural radiance fields (NeRF) based solutions learn the deformation in an implicit manner, which cannot incorporate 3D scene geometry. Therefore, the learned deformation is not necessarily geometrically coherent, which results in unsatisfactory dynamic view synthesis and 3D dynamic reconstruction. Recently, 3D Gaussian Splatting provides a new representation of the 3D scene, building upon which the 3D geometry could be exploited in learning the complex 3D deformation. Specifically, the scenes are represented as a collection of 3D Gaussian, where each 3D Gaussian is optimized to move and rotate over time to model the deformation. To enforce the 3D scene geometry constraint during deformation, we explicitly extract 3D geometry features and integrate them in learning the 3D deformation. In this way, our solution achieves 3D geometry-aware deformation modeling, which enables improved dynamic view synthesis and 3D dynamic reconstruction. Extensive experimental results on both synthetic and real datasets prove the superiority of our solution, which achieves new state-of-the-art performance. The project is available at https://npucvr.github.io/GaGS/.", + "bbox": [ + 75, + 335, + 473, + 684 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 715, + 207, + 729 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dynamic View Synthesis (DVS) aims at rendering novel photorealistic views at arbitrary viewpoints and any input time step given a monocular video of a dynamic scene, which has broad applications in virtual reality and augmented reality. Recently, empowered with effective representations such as neural radiance fields (NeRF) [30] and Gaussian Splitting [21], novel view synthesis for static scenes has been greatly advanced. However, this success", + "bbox": [ + 75, + 739, + 468, + 862 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/905bce9a44d2ccb907b545a37b4c00b1f27925f14301d96bd815439e515ac8cd.jpg", + "image_caption": [ + "Figure 1. Geometric information exploited by different methods. a) Early dynamic NeRF methods such as DNeRF[37] directly encode the coordinate $\\mathbf{p}$ of the sample point as input feature for deformation network. b) Interpolation is used to fuse features from neighbouring grids and multiscale interpolation enhances the local geometry information [11, 16, 27, 53]. c) We propose to voxelize a set of Gaussian distributions and use a sparse convolution network to extract geometry-aware features for deformation learning." + ], + "image_footnote": [], + "bbox": [ + 506, + 305, + 888, + 489 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "cannot be extended to its dynamic counterpart directly. This is mainly due to the difficulty in modeling and representing the scene deformation. Due to the inherent motion/shape ambiguity in monocular dynamic 3D representation, dynamic scene modeling and synthesis are more challenging, especially for monocular video with limited observations.", + "bbox": [ + 496, + 612, + 890, + 702 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In addressing the above challenges, one common strategy is to represent the dynamic scenes as a combination of a static canonical field and a deformation model [11, 16, 17, 27, 33, 34, 37, 51, 53], whereas the bottleneck lies in representing the diverse and complex real-world 3D deformation. To represent geometrically consistent 3D deformation, the local geometric/structural information is critical, since the deformations of the objects in the real world are highly correlated to their 3D structures. Furthermore, the motions of the object points are deeply coupled with the motions of their neighboring points. Thus, how to incorporate the local geometric information to learn locally smooth and consistent 3D deformations becomes the research focus in DVS.", + "bbox": [ + 496, + 704, + 892, + 898 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contributions.", + "bbox": [ + 94, + 875, + 210, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding authors.", + "bbox": [ + 96, + 887, + 225, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "8900", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, different deformation models have utilized the local geometric information, but they all have their limitations. As shown in Fig. 1 a), originally in D-NeRF [37], the feature (positional encoding) of each sampled point is extracted independently with each other. Following works notice that this method could not handle the complex dynamic scene since the extracted features contain little information from neighboring points. In Fig. 1 b), interpolation is introduced to fuse features of neighboring grids. NDVG [16] and RoDynRF [27] gradually increase the voxel resolution so that the large voxel size could cover a larger area, introducing the local smoothness at the early stage of the training. However, this strategy has a limited cover range of local areas and cannot work at a later training stage. TiNeuVox [11] and SUDS [53] interpolate with multi-scales. Nevertheless, the interpolation operation is rather simple in extracting local geometric information and introduces un-smoothness and artifacts [3, 19].", + "bbox": [ + 76, + 90, + 472, + 363 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In modling the nonrigid deformation, it is crucial to account for the consistency in the motion of local neighborhood. Since point-level MLP has a limited receptive field, which cannot capture the local geometric features of point clouds. To utilize the local geometric information effectively, we propose to use 3D sparse convolution. As shown in Fig. 1 c), building upon the recent explicit point cloud based Gaussian Splatting representation, we introduce a sparse convolution network to extract 3D geometry-aware features. Compared with simple feature interpolation, the convolutional neural network is superior in extracting local information and has a much larger reception field. Also, we treat the 3D Gaussian distributions as point clouds, which enable sparse 3D convolution for time and memory efficiency. Note that FDNeRF [17] uses a 3D U-Net to inpaint the missing area in the voxel grid. But this inpaint network is not used for deformation modeling, while the rendering speed and voxel resolution are also limited.", + "bbox": [ + 76, + 364, + 472, + 637 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Originally in Gaussian Splitting [21], the rotation parameter of each Gaussian is represented by quaternion. However, quaternion representation for rotation is discontinuous in parameter space for neural network learning [68]. We introduce the continuous 6D rotation [68] to ensure that the network learns a continuous function in the parameter space, which accurately represents the rotational states of each Gaussian at different time.", + "bbox": [ + 76, + 641, + 472, + 761 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Overall, our method mainly has two components: a Gaussian canonical field and a deformation field. The Gaussian canonical field consists of 3D Gaussian distributions and a geometry-aware feature learning network. The explicit 3D Gaussian distribution represents the geometry of the canonical scene, and the sparse 3D CNN network extracts local structural/geometric information for each Gaussian. The deformation field estimates a transformation for each Gaussian in the canonical field, which transfers the", + "bbox": [ + 76, + 763, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Gaussian from the canonical field to the given timestamp. Finally, we use 3D Gaussian splatting to render images for the given timestamp.", + "bbox": [ + 496, + 90, + 890, + 136 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our main contributions are summarized as:", + "bbox": [ + 517, + 137, + 805, + 150 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a geometry-aware feature extraction network based on 3D Gaussian distribution to better utilize local geometric information.", + "- We propose to use continuous 6D rotation representation and modified density control strategy to adapt Gaussian splatting to dynamic scenes.", + "- Extensive experiments on both synthetic and real datasets show that our method surpasses competing methods by a wide margin." + ], + "bbox": [ + 500, + 152, + 890, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 303, + 640, + 319 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Novel View Synthesis", + "text_level": 1, + "bbox": [ + 500, + 328, + 699, + 344 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Novel View Synthesis (NVS) is a well-known task in both computer vision and graphics [5, 8, 15, 22]. Surveys such as [43, 48, 49] provide comprehensive discussions. Explicit NVS methods generally reconstruct an explicit 3D model of a scene in the form of point clouds [1], voxels, or meshes [18, 41, 42, 50]. Once the geometry of the scene is represented, novel view images can be rendered from arbitrary viewpoints via manipulating the camera pose parameters. Other methods [9, 12, 20, 35, 41, 42, 59] tackle NVS by estimating depth maps using multi-view geometry, whereas the features are aggregated from co-visible frames.", + "bbox": [ + 496, + 351, + 890, + 518 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural Radiance Fields (NeRF) [30] is a groundbreaking approach that utilizes Multi-Layer Perceptrons (MLPs) to represent scenes implicitly. This methodology enables the modeling of a 5D radiance field, resulting in the impressive synthesis of views for static scenes. Numerous subsequent works expand the capabilities of NeRF by adapting it to various scenarios, such as handling larger and unbounded scenes [29, 40, 47, 58, 65], scene editing and relighting, [4, 45, 60, 67], [2, 3, 19], and improving the generalization ability [7, 52, 55, 64]. Meanwhile, researchers focus on achieving more efficient rendering and optimization in a NeRF-like framework. [6, 13, 24, 26, 28, 32, 36, 63] investigate efficient sampling methods along each ray for color accumulation, while [38, 39] partition the scene into multiple sub-regions as an efficient pre-processing, and [6, 13, 31, 46, 62] exploit voxel-grid representation to speed up the optimization. Very recently, [21] proposes to use 3D Guussian distribution to represent the scene, obtaining promising results. However, these methods are mainly applicable to static scenes, and fail in scenes with dynamic objects.", + "bbox": [ + 496, + 518, + 890, + 821 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Dynamic View Synthesis", + "text_level": 1, + "bbox": [ + 500, + 830, + 723, + 847 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A recent trend in NVs is to extend the success in static NVs to dynamic NVs. One viable strategy is to construct a 4D spatial-temporal representation. Yoon et al. [61] combine", + "bbox": [ + 496, + 854, + 890, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "8901", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/49cd04cb0a0fd62e67e59d4951cca6cdb33605a04e9fe69fcb8e1d3203f5c73d.jpg", + "image_caption": [ + "Figure 2. The pipeline of our proposed 3D geometry-aware deformable Gaussian splitting. In the Gaussian canonical field, we reconstruct a static scene in canonical space using 3D Gaussian distributions. We extract positional features using an MLP, as well as local geometric features using a 3D U-Net, fused by another MLP to form the geometry-aware features. In the deformation field, taking the geometry-aware features and timestamp $t$ , an MLP estimates the 3D Gaussian deformation, which transfers the canonical 3D Gaussian distributions to timestamp $t$ . Finally, a rasterizer renders the transformed 3D Gaussian to images." + ], + "image_footnote": [], + "bbox": [ + 98, + 88, + 867, + 303 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "single-view and multi-view depth to achieve NVS by 3D warping. Gao et al. [14] use a time-invariant model and a time-varying model to represent the static part and dynamic part of a scene, respectively, and use scene flow for motion modeling. NeRFlow [10] proposes a 4D spatial-temporal representation of a dynamic scene. Xian et al. [57] map a spatial-temporal location to the color and volume density by a 4D spatial-temporal radiance field. NSFF [23] represents a dynamic scene as a continuously changing function, encompassing various aspects of the scene, including appearance, geometry, and 3D scene motion. DCT-NeRF [54] uses the Discrete Cosine Transform (DCT) to replace the scene flow in NSFF [23] to enable smoother motion trajectories. HexPlane [6] and K-Plane [13] project 4D spatial-temporal space to multiple 2D planes.", + "bbox": [ + 75, + 378, + 472, + 606 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "On the other hand, works such as [11, 16, 17, 27, 33, 34, 37, 44, 51, 53] decode the dynamic scene with a canonical field and a deformation field. Along this pipeline, D-NeRF [37] first proposes the canonical-based framework. However, the deformation network utilizes positional features with little geometry information, which cannot handle complex dynamic scenarios well. Nerfies [33] proposes a coarse-to-fine optimization method for coordinate-based models that allows for more robust optimization. HyperNeRF [34] lifts the canonical field into a higher dimensional space to handle topological changes. NDVG [16] and RoDynRF [27] gradually increase the voxel resolution, which has two benefits. TiNeuVox [11] and SUDS [53] interpolate the features with multi-scales for deformation learning. The multi-scales interpolation covers a larger reception field, which benefits modeling varying motions.", + "bbox": [ + 75, + 609, + 468, + 851 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Very recently, with the stunning debut of 3D Gaussian [21], some works introduce this point-based representation into their pipelines to synthesize high-fidelity images", + "bbox": [ + 75, + 854, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "of a dynamic scene. Wu et al. [56] introduce a 4D Gaussian Splitting representation and utilize a deformation field to model both Gaussian motions and shape changes. However, the multi-scale HexPlane interpolation has limited ability in extracting the geometry information, which is still insufficient for modeling complex motions. The projection-based representation compresses the 3D space to 2D space, losing 3D geometric information for deformation learning. In contrast, our canonical-based method can fully leverage the 3D information in 3D space.", + "bbox": [ + 496, + 378, + 893, + 531 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 554, + 591, + 570 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we present our 3D geometry-aware deformable Gaussian Splating solution for dynamic view synthesis, where an overview of our pipeline is illustrated in Fig. 2. Given a set of images or monocular video of a dynamic scene with frames with corresponding time labels and known camera intrinsic and extrinsic parameters, our goal is to synthesize a novel view at any desired view at any desired time. Our method mainly consists of two core components: the Gaussian canonical field is used to learn the reconstruction of static scenes, while the deformation field is used to learn object deformation. First, we review the static 3D Gaussian splatting in Sec. 3.1. Then, we introduce the proposed Gaussian canonical field in Sec. 3.2, which consists of 3D Gaussian distributions and a geometry feature learning network. Next, in Sec. 3.3, we propose a 3D geometry-aware deformation field to learn transformations for given time steps, which transform our canonical 3D Gaussian distributions to corresponding times. In Sec. 3.4, we explain the process of rendering images from transformed 3D Gaussian distributions. Finally, we present our losses and density control modifications in Sec. 3.5.", + "bbox": [ + 496, + 583, + 893, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "8902", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminary", + "text_level": 1, + "bbox": [ + 76, + 90, + 207, + 107 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D-GS [21] represents the scene with sparse 3D Gaussians distributions. Each Gaussian has an anisotropic covariance $\\Sigma \\in \\mathbb{R}^{3\\times 3}$ and a mean value $\\mu \\in \\mathbb{R}^3$ :", + "bbox": [ + 76, + 113, + 468, + 159 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {G} (\\mathbf {x}) = e ^ {- \\frac {1}{2} (\\mathbf {x} - \\mu) ^ {\\top} \\boldsymbol {\\Sigma} ^ {- 1} (\\mathbf {x} - \\mu)}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 167, + 468, + 186 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The covariance matrix $\\pmb{\\Sigma}$ can be decomposed into a scaling matrix $\\mathbf{S} \\in \\mathbb{R}^{3 \\times 3}$ and a rotation matrix $\\mathbf{R} \\in \\mathrm{SO}(3)$ . This ensures that the covariance matrix is positive semi-definite, while reducing the learning difficulty of 3D Gaussians:", + "bbox": [ + 76, + 196, + 468, + 257 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\Sigma} = \\mathbf {R} \\mathbf {S} \\mathbf {S} ^ {\\top} \\mathbf {R} ^ {\\top}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 214, + 265, + 468, + 282 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To render an image from a designated viewpoint, the covariance matrix $\\pmb{\\Sigma}^{\\prime}$ in camera coordinates can be calculated by giving a viewing transformation $\\mathbf{W}$ , followed by [69]:", + "bbox": [ + 76, + 292, + 468, + 339 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\Sigma} ^ {\\prime} = \\mathbf {J} \\mathbf {W} \\boldsymbol {\\Sigma} \\mathbf {W} ^ {\\top} \\mathbf {J} ^ {\\top}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 345, + 468, + 364 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{J}$ is the Jacobian of the affine approximation of the projective transformation, and $\\mathbf{W}$ is the world to camera transformation matrix.", + "bbox": [ + 76, + 375, + 468, + 417 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Each Gaussian is parameterized into the following attributes: position $\\mathbf{x} \\in \\mathbb{R}^3$ , color defined by spherical harmonics coefficients $\\mathbf{c} \\in \\mathbb{R}^k$ , rotation $\\mathbf{r} \\in \\mathbb{R}^4$ , scale $\\mathbf{s} \\in \\mathbb{R}^3$ , and opacity $o \\in \\mathbb{R}$ . Point-based $\\alpha$ -blending and volumetric rendering like NeRF [30] essentially share the same image formation model for the splattering process. Specifically, the color $\\mathbf{C}$ of each pixel is influenced by the related Gaussians:", + "bbox": [ + 76, + 419, + 468, + 525 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {C} = \\sum_ {i = 1} ^ {N} \\mathbf {T} _ {i} \\alpha_ {i} \\mathbf {c} _ {i}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 536, + 468, + 577 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\alpha_{i}$ represents the density of the Gaussian point computed by a Gaussian with covariance $\\pmb{\\Sigma}$ multiplied by its opacity.", + "bbox": [ + 76, + 582, + 468, + 628 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Gaussian Canonical Field", + "text_level": 1, + "bbox": [ + 76, + 635, + 310, + 648 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we first reconstruct a static scene in canonical space. Then, we propose a geometric branch, which enables geometry feature learning of the 3D Gaussian distributions for the subsequent deformation field.", + "bbox": [ + 76, + 659, + 468, + 718 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Gaussian parameters. Similar to 3D-GS [21], each Gaussian in the canonical space is characterized by position, color, scale, and opacity. Note that for rotation, we are inspired by [68] to use a continuous 6D rotation representation. Compared with the quaternion representation used in 3D-GS, the 6D rotation representation can benefit our method in estimating the deformation of each Gaussian from canonical space to time-space, especially in helping the neural networks to learn smooth rotation variation from time to time. Specifically, we set learnable parameter $[a_1,a_2]$ for each Gaussian to denote its rotation in canonical space, where $a_1$ and $a_2$ are the column vectors of three", + "bbox": [ + 76, + 719, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "rows, respectively. They are initialized to $[100]^{\\top}$ and $[010]^{\\top}$ , corresponding precisely to the identity rotation matrix. The mapping from this 6D representation vector to SO(3) matrix is defined as [68]:", + "bbox": [ + 498, + 90, + 890, + 151 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf _ {\\mathrm {V} 2 \\mathrm {M}} \\left(\\left[ \\begin{array}{c c} | & | \\\\ a _ {1} & a _ {2} \\\\ | & | \\end{array} \\right]\\right) = \\left[ \\begin{array}{c c c} | & | & | \\\\ b _ {1} & b _ {2} & b _ {3} \\\\ | & | & | \\end{array} \\right], \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 545, + 161, + 890, + 212 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nb _ {i} = \\left[ \\left\\{ \\begin{array}{c c} \\mathcal {N} \\left(a _ {1}\\right) & \\text {i f} i = 1 \\\\ \\mathcal {N} \\left(a _ {2} - \\left(b _ {1} \\cdot a _ {2}\\right) b _ {1}\\right) & \\text {i f} i = 2 \\\\ b _ {1} \\times b _ {2} & \\text {i f} i = 3 \\end{array} \\right] ^ {\\top}, \\right. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 539, + 234, + 890, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{N}(\\cdot)$ denotes a normalization function. “.” represents the inner product of a vector and “ $\\times$ ” represents vector cross product. V2M in $f_{\\mathrm{V2M}}$ means the transform from 6D vector to rotation matrix.", + "bbox": [ + 498, + 296, + 890, + 356 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Geometry feature learning. To capture the local geometric structure of the canonical scene, we regard the 3D Gaussian as the 3D point cloud, i.e., we only use the 3D coordinates of the 3D Gaussian. In order to handle a large number of point clouds, we leverage a simple two-branch structure: the geometric branch learns local features of point clouds across different receptive fields, while the identity branch preserves the independent point-level features at high resolution. By integrating the geometric branch and identity branch, we can efficiently obtain point-level features at high resolution while embedding the local geometric information of the point cloud.", + "bbox": [ + 498, + 357, + 890, + 537 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The geometric branch leverages the sparse convolution [25] on the sparse voxels to extract local geometric features at different receptive fields. Given the point cloud $\\mathbf{P} \\in \\mathbb{R}^{N \\times 3}$ , we first transform the high-resolution point clouds into low-resolution voxels by dividing the space through fixed grid size $s$ :", + "bbox": [ + 498, + 539, + 890, + 628 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {V} = \\operatorname {f l o o r} (\\mathbf {P} / s), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 638, + 641, + 890, + 657 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the size of $\\mathbf{V}$ is $M\\times 3$ and $M$ is the number of voxels. Then, we construct a sparse 3D U-Net by stacking a set of sparse convolutions with a skip connection. Taking $\\mathbf{V}$ as input, we perform sparse 3D U-Net to aggregate local features (dubbed as $\\mathbf{F}_v\\in \\mathbb{R}^{M\\times C}$ ) of the point clouds.", + "bbox": [ + 498, + 669, + 890, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The identity branch uses a multi-layer perception (MLP) to map the 3D coordinate of the point cloud into the embedding space (dubbed as $\\mathbf{F}_p\\in \\mathbb{R}^{N\\times C}$ ) to maintain the independence of point features. To accurately characterize the local geometric structure of the canonical scene, we fuse the voxel features with local information onto point features. Specifically, we transform the voxel feature $\\mathbf{F}_v$ back to the corresponding points to obtain point-level features $\\mathbf{F}_p^{\\prime}\\in \\mathbb{R}^{N\\times C}$ by assigning the voxel features to the corresponding points within it. Finally, we concatenate $\\mathbf{F}_p^{\\prime}$", + "bbox": [ + 498, + 744, + 890, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "8903", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and $\\mathbf{F}_p$ to obtain the fused point-level feature followed by an MLP layer as:", + "bbox": [ + 76, + 90, + 468, + 121 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {\\text {f u s e}} = \\operatorname {M L P} \\left(\\operatorname {C o n c a t} \\left(\\mathbf {F} _ {p} ^ {\\prime}, \\mathbf {F} _ {p}\\right)\\right). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 136, + 468, + 157 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Deformation Field", + "text_level": 1, + "bbox": [ + 76, + 170, + 253, + 186 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we propose a deformation field that estimates the deformation of each 3D Gaussian in the canonical space based on a given time $t$ .", + "bbox": [ + 76, + 196, + 468, + 241 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Deformation estimation. We adopt an MLP as the decoder $\\mathcal{G}_{\\Phi}$ , which takes the geometry feature learned from the geometry branch in the Gaussian canonical field, the position of each Gaussian, and timestamp as input, outputs the deformation of each Gaussian from canonical space to time $t$ , including position deformation $\\Delta \\mathbf{x}_{t} \\in \\mathbb{R}^{3}$ , rotation deformation $\\Delta \\mathbf{r}_{t} \\in \\mathbb{R}^{6}$ and scale deformation $\\Delta \\mathbf{s}_{t} \\in \\mathbb{R}^{3}$ :", + "bbox": [ + 76, + 243, + 468, + 348 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\Delta \\mathbf {x} _ {\\mathbf {t}}, \\Delta \\mathbf {r} _ {\\mathbf {t}}, \\Delta \\mathbf {s} _ {\\mathbf {t}} = \\mathcal {G} _ {\\Phi} (\\mathbf {F} _ {\\text {f u s e}}, \\gamma (\\mathbf {x}), \\gamma (t)), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 364, + 468, + 381 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\gamma (\\cdot)$ denotes the positional encoding in NeRF [30], which maps a one dimension signal from $\\mathbb{R}$ into a higher dimensional space $\\mathbb{R}^{2L}$ :", + "bbox": [ + 76, + 396, + 468, + 441 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\gamma (p) = (\\sin \\left(2 ^ {0} \\pi p\\right), \\cos \\left(2 ^ {0} \\pi p\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 140, + 455, + 361, + 474 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\dots , \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 477, + 468, + 491 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\sin \\left(2 ^ {L - 1} \\pi p\\right), \\cos \\left(2 ^ {L - 1} \\pi p)\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 494, + 401, + 513 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that we set the color parameters $\\mathbf{c}$ and opacity $o$ of canonical 3D Gaussian distributions constant over time. These two factors are highly related to the physical properties of the Gaussian distributions, and we want each distribution to represent the same object area over the timeline.", + "bbox": [ + 76, + 529, + 468, + 604 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Transformation. Using the estimated deformation for time $t$ above, we could transform the 3D Gaussian distributions to current time by", + "bbox": [ + 76, + 606, + 468, + 652 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {t} = \\mathbf {x} + \\Delta \\mathbf {x} _ {\\mathbf {t}},\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 667, + 285, + 683 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {s} _ {t} = \\mathbf {s} + \\Delta \\mathbf {s} _ {\\mathbf {t}}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 686, + 468, + 702 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {r} _ {t} = f _ {\\mathrm {V 2 M}} \\left(\\Delta \\mathbf {r} _ {\\mathbf {t}}\\right) \\times f _ {\\mathrm {V 2 M}} (\\mathbf {r}).\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 705, + 370, + 720 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Rasterization", + "text_level": 1, + "bbox": [ + 76, + 737, + 215, + 751 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Once we have completed preparing the attributes of each Gaussian $(\\mathbf{x}_t,\\mathbf{c},\\mathbf{r}_t,\\mathbf{s}_t,o)$ , we use the differentiable tile rasterizer [21] to render the image at any desired viewpoint at this timestamp:", + "bbox": [ + 76, + 762, + 468, + 823 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {C}} _ {t} = \\text {R a s t e r i z e r} (\\mathbf {x} _ {t}, \\mathbf {c}, \\mathbf {r} _ {t}, \\mathbf {s} _ {t}, o, \\mathbf {K}, [ \\mathbf {R} | \\mathbf {T} ]), \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 109, + 837, + 468, + 854 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{K}$ and $[\\mathbf{R}|\\mathbf{T}]$ represent the camera's intrinsic and extrinsic parameters, respectively.", + "bbox": [ + 76, + 869, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1cba156c55a86ae2a58373e8d3960276ff0137c542d3109f10ea1c71d597f12d.jpg", + "image_caption": [ + "Figure 3. Our density control is designed for dynamic scenes. We control the densification of Gaussian distributions according to their transformed parameters at timestamp $t$ rather than parameters at canonical space." + ], + "image_footnote": [], + "bbox": [ + 504, + 90, + 890, + 280 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Optimization", + "text_level": 1, + "bbox": [ + 500, + 340, + 638, + 356 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To optimize the model, we use the photometric loss, and a motion loss, and also adapt the density control from 3D-GS [21] with our modifications.", + "bbox": [ + 498, + 364, + 890, + 407 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Photometric loss. The photometric loss consists of the $L_{1}$ loss and structural similarity loss $L_{D - SSIM}$ between the rendered image $\\hat{\\mathbf{C}}_t$ and ground truth image $\\mathbf{C}_t$ .", + "bbox": [ + 498, + 409, + 890, + 455 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {p h o t o}} = (1 - \\lambda) L _ {1} + \\lambda L _ {D - S S I M}. \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 573, + 465, + 890, + 482 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Regularization. We accept the fact that in a scene, the proportion of dynamic points is much smaller than that of static points, and the motion amplitude at dynamic points is not too large. In other words, the point in a scene should be as static as possible,", + "bbox": [ + 498, + 492, + 890, + 568 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {m o t i o n}} = \\left\\| \\Delta \\mathbf {x} _ {\\mathbf {t}} \\right\\| _ {1}. \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 624, + 579, + 890, + 595 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Total loss. The total loss we used is defined as follows,", + "bbox": [ + 500, + 604, + 864, + 619 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL = L _ {\\text {p h o t o}} + \\omega L _ {\\text {m o t i o n}}, \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 609, + 632, + 890, + 648 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\omega$ is a trade-off parameter to balance the components. Density control. 3D-GS has shown that adaptive density control is essential in achieving high rendering performance. On the one hand, the Gaussians need to populate empty areas without geometric features. Thus, it simply creates a copy of the Gaussian for under-reconstructed regions. On the other hand, large Gaussians in regions with high variance need to be split into smaller Gaussians. We implement our method like 3D-GS but replace such Gaussians with two new ones, divide their scale by a factor of $\\phi = 1.6$ , and initialize their position by using the original 3D Gaussian as a PDF for sampling.", + "bbox": [ + 498, + 659, + 890, + 840 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our method differs from 3D-GS in the following aspects. For 3D-GS, there only exists sets of Gaussians. However, in our case, we initialize the Gaussians in the canonical space, then estimate the deformations of these Gaussians,", + "bbox": [ + 498, + 840, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "8904", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/842d208980618c33c0f97b5975baa4ed9ee68efb69e4df758d145d9dd4990227.jpg", + "table_caption": [ + "Table 1. Quantitative comparison between our method and competing methods on the D-NeRF dataset. The best results are highlighted in bold." + ], + "table_footnote": [], + "table_body": "
MethodHell WarriorMutantHookBouncing Balls
PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
3D-GS [21]15.39240.87760.130021.75540.93590.057518.69330.87330.114422.55750.94850.0647
D-NeRF [37]25.02930.95060.069131.29000.97390.026829.25670.96500.117438.93000.99000.1031
TiNeuVox-B[11]28.20580.96610.063133.90290.97710.030131.79290.97180.043640.85360.99130.0401
NDVG [16]26.49330.96000.067034.41310.98010.027030.00090.96260.046337.51570.98740.0751
FDNeRF [17]27.71200.96650.050834.97270.98100.031232.28670.97560.038840.01910.99120.0395
4D-GS [56]28.11960.97300.027638.34110.99360.006233.15600.98100.016840.74180.99410.0105
Ours32.27120.98350.016441.42840.99690.002936.96470.99160.007643.59290.99600.0061
MethodLegoT-RexStand UpJumping Jacks
PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
3D-GS [21]23.09910.93290.056725.74960.95670.047419.37790.92000.090920.71630.92270.0980
D-NeRF [37]21.64270.83940.165431.75680.97670.039632.79920.98180.021532.80310.98100.0373
TiNeuVox-B[11]25.17480.92170.068932.77500.97830.030736.20310.98590.019934.73900.98230.0328
NDVG [16]25.04160.93950.053432.62290.97810.033033.21580.97930.030231.25300.97370.0398
FDNeRF [17]25.27000.93900.046030.70680.97310.036836.91070.98780.018833.55210.98120.0329
4D-GS [56]25.40240.94340.037733.39120.98690.013038.26100.99230.007135.66560.98820.0159
Ours25.44110.94740.032939.02850.99520.005242.21010.99660.002837.96040.99280.0088
", + "bbox": [ + 99, + 117, + 867, + 362 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/6338813f16de55fe2f099c26c8e47ea6f48a7cd4380a378c8d222ab06e5f87bb.jpg", + "table_caption": [ + "Table 2. Quantitative comparison between our method and competing methods on the HyperNeRF dataset. The best results are highlighted in bold." + ], + "table_footnote": [], + "table_body": "
MethodChicken3D PrinterBroomPeel Banana
PSNR↑MS-SSIM↑PSNR↑MS-SSIM↑PSNR↑MS-SSIM↑PSNR↑MS-SSIM↑
TiNeuVox[11]28.28610.947422.75140.839221.26820.683224.51360.8743
NDVG [16]27.05360.939022.41960.838921.46580.702822.82040.8279
FDNeRF [17]27.96270.943822.80270.845321.90910.715424.25150.8645
3D-GS [21]20.89150.742618.39910.611420.39530.659820.56540.8094
Ours28.53420.933122.04030.809820.89940.524125.57850.9067
", + "bbox": [ + 200, + 393, + 764, + 486 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/e5f27307c87a566184c1010d5178936df212644979934d678b0be83b46cfac70.jpg", + "table_caption": [ + "Table 3. Quantitative comparison on HyperNeRF dataset: Average on Cut Lemon, Chicken, 3D Printer, and Split Cookie. The best results are highlighted in bold." + ], + "table_footnote": [], + "table_body": "
MethodPSNR↑SSIM↑LPIPS↓
TiNeuVox-B [11]27.160.760.40
3D-GS [21]21.260.690.40
4D-GS [56]26.980.780.31
Ours27.520.800.25
", + "bbox": [ + 143, + 537, + 395, + 608 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "and transform their attributes into a timestamp space. As shown in Fig. 3, we use the Gaussians at the current moment to render the image. Therefore, we determine whether the Gaussians need to conduct density control by the current attributes (like scale) at the current timestamp rather than the canonical attributes. Afterward, we inverse the transformation of the split/cloned Gaussian back to the canonical space.", + "bbox": [ + 75, + 621, + 468, + 742 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 758, + 207, + 776 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Dataset", + "text_level": 1, + "bbox": [ + 76, + 785, + 171, + 799 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the paper, we use both synthetic and real datasets for evaluating our method. The synthetic dataset D-NeRF [37] contains 8 dynamic scenes, including Hell Warrior, Mutant, Hook, Bouncing Balls, Lego, T-Rex, Stand Up, and Jumping Jacks. The real dataset proposed by HyperNeRF [34], including interp-cut-lemon, interp-cut-lemon1, vrig", + "bbox": [ + 75, + 809, + 468, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "chicken, vrig-3dprinter, misc-split-codon, and misc-split-codon. Following previous works [21], we report three evaluation metrics, including Peak Signal-to-Noise Ratio (PSNR), Structural Similarity (SSIM), and Learned Perceptual Image Patch Similarity (LPIPS) [66].", + "bbox": [ + 496, + 492, + 890, + 569 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Implementation Details", + "text_level": 1, + "bbox": [ + 500, + 575, + 717, + 590 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our implementation is based on 3D-GS [21]. We trained a total of 40000 iterations, with the first 3000 iterations only optimizing static scenes, and then adding deformation fields to optimize dynamic scenes. The learning rate of our network takes an exponential decay from 8e-4 to 1.6e-6 with the Adam optimizer. Moreover, we use a 2-layer MLP with a width of 64 for the front point feature extraction, and a 3-layer MLP with a width of 64 for the back point feature fusion. Then 5 layers MLP with width 256 and skip connection is used for a decoder. For the positional encoding process, we use $L = 10$ for position $x$ and $L = 6$ for timestep $t$ . For the D-NeRF dataset, which does not provide point clouds, we randomly initialize 150000 points. Meanwhile, for the HyperNeRF dataset, we use the point cloud provided in its dataset as the initial point cloud. All the experiments are tested on a single RTX 4090 GPU.", + "bbox": [ + 496, + 598, + 890, + 840 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3.Quantitative Results", + "text_level": 1, + "bbox": [ + 500, + 847, + 692, + 863 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Synthetic scenes. We compare our method with recent state-of-the-art methods in the field, including 3D-GS, D-", + "bbox": [ + 496, + 869, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "8905", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f5efbbc16829caa1ec54d7b4789f1e37fcdb9ec9b26faaf1c358057615b89fd5.jpg", + "image_caption": [ + "Figure 4. Qualitative comparisons between baselines and our method on the synthetic dataset." + ], + "image_footnote": [], + "bbox": [ + 116, + 85, + 851, + 349 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4950b47e09ddea53fa8179bb3b9514c33d4e5b5c3a3fcef84eac255dbab450b5.jpg", + "image_caption": [ + "Figure 5. Qualitative comparisons between baselines and our method on the HyperNeRF real dataset[34]." + ], + "image_footnote": [], + "bbox": [ + 76, + 380, + 472, + 648 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "NeRF, TiNeuVox, NDVG, FDNeRF, and 4D-GS on the D-NeRF Dataset. As shown in Table 1, we list the results of each scene. It can be observed that our method is significantly better than other methods in terms of all three metrics for physical canonical-based methods. On average, our method significantly improves PSNR compared with static Gaussian, 3D-GS. The computational costs are: training time around 2h (avg. on D-NeRF dataset), render FPS 12 (fixed viewpoint), model size (34MB points cloud + 14MB network). Since it inherently cannot model the deformation of the dynamic scene, 3D-GS performs poorly in dynamic view synthesis.", + "bbox": [ + 75, + 719, + 472, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Real scenes. We further compare our method with some highly related works on the real scene dataset proposed by [34]. We have shown the detailed results on chicken, 3D printer, broom, and peel banana in Table 2, and the average result on cut lemon, chicken, 3d printer, split cookie in Table 3. It can be observed that our method achieves good performance compared with other state-of-the-art methods. Compared with synthetic datasets, real datasets are more challenging due to the narrow camera viewing range and pose ambiguity. The quantitative results can demonstrate the effectiveness of the proposed method in real scenes.", + "bbox": [ + 496, + 378, + 893, + 547 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Visualization Results", + "text_level": 1, + "bbox": [ + 498, + 558, + 697, + 571 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Visual comparison. In addition to quantitative results, we also provide visualization results of different methods to demonstrate the superiority of our method. For better comparison, we show the rendered images of each synthetic scene from the same viewpoint in Fig. 4. By comparing the visualization results of different methods, it is shown that the rendered images by our method are closer to the ground truth images, indicating that our method can recover accurate and detailed images. In addition, we provide visualization results of the real scenes in Fig. 5. Compared with TiNueVox [11], our method can recover the detailed structure of dynamic objects, like chicken and banana.", + "bbox": [ + 496, + 582, + 890, + 763 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Gaussian visualization. To verify the effectiveness of our method, we show the 3D point cloud of the 3D Gaussian. Specifically, we only use the 3D coordinates of the 3D Gaussian. As shown in Fig. 7, we provide the point clouds of different methods on the synthetic dataset, including 3D-GS [21], 4D-GS [56], and ours. Note that the color of the point cloud is generated by 3D coordinates. Since 3D-DS cannot model dynamic scenes, the quality of the point cloud is poor. Comparing 4D-GS with ours, it can be observed", + "bbox": [ + 496, + 763, + 893, + 901 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "8906", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4ee70ea5442e870eb0d598aa233800289e86d78c0eec50272768f1e9612b22a6.jpg", + "image_caption": [ + "Figure 6. Visualization of learned geometry-aware features." + ], + "image_footnote": [], + "bbox": [ + 80, + 89, + 205, + 215 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/40331151a924f8e95ea8f134904708f6dd605e56b16dbaf478bfaf1fa4766a3d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 210, + 88, + 334, + 215 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/08e57af452a8fb5489e8c7008a2d47bb5ead4a5dcd47c06b063fbfd78edcc54d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 338, + 89, + 464, + 215 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c36374877d414d5c0a500370084b52c0365a1cd37b2217e3775ac3f98196002a.jpg", + "image_caption": [ + "Figure 7. Visualization of learned Gaussian. Colored with position coordinates" + ], + "image_footnote": [], + "bbox": [ + 81, + 247, + 205, + 398 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d1595a23c127bab518436e79dcb2d0262f38899d2fe05de461989ccedff79551.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 222, + 250, + 336, + 398 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c381318dca20e7e09894c7b90be8438e9b0fd7baf87da7f2b03908b238ce2353.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 351, + 250, + 464, + 398 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "that the point cloud of our method has a clear local geometric structure.", + "bbox": [ + 76, + 452, + 468, + 481 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 494, + 228, + 510 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We conduct ablation studies on the synthetic dataset $(800 \\times 800)$ to verify the effectiveness of our proposed components. In Table 4, vanilla model is a simple MLP model without our components.", + "bbox": [ + 76, + 520, + 468, + 579 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effect of geometric-aware features. To learn the geometric information of the object in our Gaussian canonical field, we voxelize the 3D Gaussian distributions and extract geometric aware features using our 3D U-Net. To demonstrate the effectiveness of this design, we test our method with geometric branch blocks and leave others unchanged. In Table 4, ours full has a clear advantage over w/o geo. feat., and our geometry branch plays the most important role among the components studied in the ablations.", + "bbox": [ + 75, + 580, + 468, + 715 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Fig. 6, we visualize the learned geometric-aware features. We color the point clouds with the learned features, and it shows meaningful geometric information. Interestingly, we can see an obvious difference in the learned features between the moving objects (bucket of the lego and the t-rex body) and the static objects (body of the lego and the ground in t-rex). Also, our geometric-aware features reflect the local geometric structure. For example, the spines of the bones on the t-rex tail have similar features, and the smooth part of the tail bones have other patterns.", + "bbox": [ + 75, + 718, + 468, + 868 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Different geometric features. We use the PointNet-like architecture and plane projection (2D CNN) to conduct ex", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c8f90c2733fbdbf036dfe9a0b473b763c1af4b3a402015c75da8a541c1098201.jpg", + "table_caption": [ + "Table 4. Ablation Study. Ablation studys in terms of average PSNR, SSIM, and LPIPS. The best results are highlighted in bold." + ], + "table_footnote": [], + "table_body": "
MethodPSNR↑SSIM↑LPIPS↓
w/o geo. feat.37.57570.98410.0173
w/o 6D rotation37.87500.98510.0154
canonical DC37.80260.98470.0166
vanilla35.23070.97930.0242
PointNet feat.36.73530.98260.0184
Plane feat.35.90540.98110.0212
ours full38.01340.98530.0153
", + "bbox": [ + 540, + 121, + 846, + 252 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "periments. Compared with the results (dubbed as \"PointNet feat.\" and \"Plane feat.\") in Table 4, it can be observed that our method achieves significant performance gains.", + "bbox": [ + 498, + 253, + 890, + 300 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6D representation. To study the effect of 6D representation of the rotation parameters of the 3D Gaussian, we conduct an experiment that replaces the 6D vector with quaternion $\\mathbf{q}$ which is used in the original 3D-GS. To deform the 3D Gaussian in canonical space, our deformation field estimates a $\\Delta \\mathbf{q}_{\\mathrm{t}}$ and gets $\\mathbf{q}_{\\mathrm{t}} = \\mathbf{q} + \\Delta \\mathbf{q}_{\\mathrm{t}}$ , using the quaternion add operation. In Table 4, quaternion demonstrates an obvious performance drop, which proves the effectiveness of the 6D representation.", + "bbox": [ + 498, + 301, + 890, + 435 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Density control. In terms of density control, we test the setting that only uses the 3D Gaussian in canonical space without considering the transform 3D Gaussian at other timestamps. In Table 4, canonical DC shows a performance drop, as the canonical 3D Gaussian alone cannot reflect the over/under reconstruction information at all timestamps for dynamic scenes.", + "bbox": [ + 498, + 436, + 890, + 542 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 559, + 617, + 574 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we have proposed a 3D geometry aware Gaussian Splatting solution for dynamic view synthesis. We addressed the limitations of existing approaches from two perspectives: 1) we introduced 3D sparse convolution to extract local structural information effectively and efficiently for deformation learning, and 2) we represented the dynamic scenes as a collection of deforming 3D Gaussian distributions, which are optimized to deform (move, rotate, scaling) over time. Experimental results across synthetic and real datasets demonstrate the superiority of our solution in dynamic view synthesis and 3D reconstruction. We plan to further investigate explicit motion modeling by exploiting the foreground and background motion segmentation cues.", + "bbox": [ + 496, + 585, + 890, + 782 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 500, + 799, + 658, + 815 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We thank the area chairs and the reviewers for their insightful and positive feedback. This work was supported in part by the National Science Fund of China (Grant Nos. 62271410, 62306238) and the Fundamental Research Funds for the Central Universities.", + "bbox": [ + 498, + 825, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8907", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Kara-Ali Aliev, Artem Sevastopolsky, Maria Kolos, Dmitry Ulyanov, and Victor Lempitsky. Neural point-based graphics. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2", + "[2] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-NeRF: A multiscale representation for antiailiasing neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[3] Jonathan T. Barron, Ben Mildenhall, Dor Verbin, Pratul P. Srinivasan, and Peter Hedman. Zip-NeRF: Anti-aliased grid-based neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 2", + "[4] Mark Boss, Raphael Braun, Varun Jampani, Jonathan T Barron, Ce Liu, and Hendrik Lensch. NeRD: Neural reflectance decomposition from image collections. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[5] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the Conference on Computer Graphics and Interactive Techniques, 2001. 2", + "[6] Ang Cao and Justin Johnson. HexPlane: A fast representation for dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3", + "[7] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. MVSNeRF: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[8] Shenchang Eric Chen and Lance Williams. View interpolation for image synthesis. In Proceedings of the Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 1993. 2", + "[9] Inchang Choi, Orazio Gallo, Alejandro Troccoli, Min H Kim, and Jan Kautz. Extreme view synthesis. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019. 2", + "[10] Yilun Du, Yinan Zhang, Hong-Xing Yu, Joshua B Tenenbaum, and Jiajun Wu. Neural radiance flow for 4d view synthesis and video processing. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 3", + "[11] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In Proceedings of the Conference on Computer Graphics and Interactive Techniques in Asia (SIGGRAPH ASIA), 2022. 1, 2, 3, 6, 7", + "[12] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. DeepStereo: Learning to predict new views from the world's imagery. In Proceedings of the IEEE Conference" + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "on Computer Vision and Pattern Recognition (CVPR), 2016. 2", + "[13] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbaek Warburg, Benjamin Recht, and Angjoo Kanazawa. K-Planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3", + "[14] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 3", + "[15] Ned Greene. Environment mapping and other applications of world projections. IEEE Computer Graphics and Applications, 1986. 2", + "[16] Xiang Guo, Guanying Chen, Yuchao Dai, Xiaqing Ye, Jiadai Sun, Xiao Tan, and Errui Ding. Neural deformable voxel grid for fast optimization of dynamic view synthesis. In Proceedings of the Asian Conference on Computer Vision (ACCV), 2022. 1, 2, 3, 6", + "[17] Xiang Guo, Jiadai Sun, Yuchao Dai, Guanying Chen, Xiaoqing Ye, Xiao Tan, Errui Ding, Yumeng Zhang, and Jingdong Wang. Forward flow for novel view synthesis of dynamic scenes. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 1, 2, 3, 6", + "[18] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (TOG), 2018. 2", + "[19] Wenbo Hu, Yuling Wang, Lin Ma, Bangbang Yang, Lin Gao, Xiao Liu, and Yuewen Ma. Tri-MipRF: Tri-mip representation for efficient anti-aliasing neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 2", + "[20] Nima Khademi Kalantari, Ting-Chun Wang, and Ravi Ramamoorthi. Learning-based view synthesis for light field cameras. ACM Transactions on Graphics (TOG), 2016. 2", + "[21] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3D Gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (TOG), 2023. 1, 2, 3, 4, 5, 6, 7", + "[22] Marc Levoy and Pat Hanrahan. Light field rendering. In Proceedings of the Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 1996. 2", + "[23] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3", + "[24] David B Lindell, Julien NP Martel, and Gordon Wetzstein. AutoInt: Automatic integration for fast neural volume rendering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[25] Baoyuan Liu, Min Wang, Hassan Foroosh, Marshall Tappen, and Marianna Pensky. Sparse convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015. 4", + "[26] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. In Proceed-" + ], + "bbox": [ + 503, + 92, + 893, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "8908", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ings of the Advances in Neural Information Processing Systems (NeurIPS), 2020. 2", + "[27] Yu-Lun Liu, Chen Gao, Andreas Meuleman, Hung-Yu Tseng, Ayush Saraf, Changil Kim, Yung-Yu Chuang, Johannes Kopf, and Jia-Bin Huang. Robust dynamic radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 3", + "[28] Stephen Lombardi, Tomas Simon, Gabriel Schwartz, Michael Zollhoefer, Yaser Sheikh, and Jason Saragih. Mixture of volumetric primitives for efficient neural rendering. ACM Transactions on Graphics (TOG), 2021. 2", + "[29] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural radiance fields for unconstrained photo collections. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[30] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 1, 2, 4, 5", + "[31] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (TOG), 2022. 2", + "[32] Thomas Neff, Pascal Stadlbauer, Mathias Parger, Andreas Kurz, Joerg H. Mueller, Chakravarty R. Alla Chaitanya, Anton S. Kaplanyan, and Markus Steinberger. DONeRF: Towards real-time rendering of compact neural radiance fields using depth oracle networks. Computer Graphics Forum (CGF), 2021. 2", + "[33] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 1, 3", + "[34] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. HyperNeRF: A higher-dimensional representation for topologically varying neural radiance fields. ACM Transactions on Graphics (TOG), 2021. 1, 3, 6, 7", + "[35] Eric Penner and Li Zhang. Soft 3D reconstruction for view synthesis. ACM Transactions on Graphics (TOG), 2017. 2", + "[36] Martin Piala and Ronald Clark. TermiNeRF: Ray termination prediction for efficient neural rendering. In Proceedings of the International Conference on 3D Vision (3DV), 2021. 2", + "[37] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1, 2, 3, 6", + "[38] Daniel Rebain, Wei Jiang, Soroosh Yazdani, Ke Li, Kwang Moo Yi, and Andrea Tagliasacchi. DeRF: Decomposed radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + ], + "bbox": [ + 78, + 92, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[39] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. KiloNeRF: Speeding up neural radiance fields with thousands of tiny MLPs. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[40] Konstantinos Rematas, Andrew Liu, Pratul P. Srinivasan, Jonathan T. Barron, Andrea Tagliasacchi, Thomas Funkhouser, and Vittorio Ferrari. Urban radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[41] Gernot Riegler and Vladlen Koltun. Free view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2", + "[42] Gernot Riegler and Vladlen Koltun. Stable view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[43] Harry Shum and Sing Bing Kang. Review of image-based rendering techniques. In Visual Communications and Image Processing (VCIP), 2000. 2", + "[44] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. NeRFPlayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics (TVCG), 2023. 3", + "[45] Pratul P Srinivasan, Boyang Deng, Xiuming Zhang, Matthew Tancik, Ben Mildenhall, and Jonathan T Barron. NeRV: Neural reflectance and visibility fields for relighting and view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[46] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct Voxel Grid Optimization: Super-fast convergence for radiance fields reconstruction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[47] Matthew Tancik, Vincent Casser, Xinchen Yan, Sabeek Pradhan, Ben Mildenhall, Pratul P Srinivasan, Jonathan T Barron, and Henrik Kretzschmar. Block-NeRF: Scalable large scene neural view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[48] Ayush Tewari, Ohad Fried, Justus Thies, Vincent Sitzmann, Stephen Lombardi, Kalyan Sunkavalli, Ricardo MartinBrualla, Tomas Simon, Jason Saragih, Matthias Nießner, et al. State of the art on neural rendering. In Computer Graphics Forum (CGF), 2020. 2", + "[49] Ayush Tewari, O Fried, J Thies, V Sitzmann, S Lombardi, Z Xu, T Simon, M Nießner, E Tretschk, L Liu, et al. Advances in neural rendering. In Proceedings of the Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 2021. 2", + "[50] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 2019. 2", + "[51] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Pro" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "8909", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 1, 3", + "[52] Alex Trevithick and Bo Yang. GRF: Learning a general radiance field for 3D representation and rendering. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[53] Haithem Turki, Jason Y Zhang, Francesco Ferroni, and Deva Ramanan. SUDS: Scalable urban dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 3", + "[54] Chaoyang Wang, Ben Eckart, Simon Lucey, and Orazio Gallo. Neural trajectory fields for dynamic novel view synthesis. arXiv preprint arXiv:2105.05994, 2021. 3", + "[55] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P Srinivasan, Howard Zhou, Jonathan T Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas Funkhouser. IBRNet: Learning multi-view image-based rendering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[56] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Xinggang Wang. 4D Gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 3, 6, 7", + "[57] Wenqi Xian, Jia-Bin Huang, Johannes Kopf, and Changil Kim. Space-time neural irradiance fields for free-viewpoint video. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3", + "[58] Yuanbo Xiangli, Linning Xu, Xingang Pan, Nanxuan Zhao, Anyi Rao, Christian Theobalt, Bo Dai, and Dahua Lin. BungeeNeRF: Progressive neural radiance field for extreme multi-scale scene rendering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[59] Zexiang Xu, Sai Bi, Kalyan Sunkavalli, Sunil Hadap, Hao Su, and Ravi Ramamoorthi. Deep view synthesis from sparse photometric images. ACM Transactions on Graphics (TOG), 2019. 2", + "[60] Wenqi Yang, Guanying Chen, Chaofeng Chen, Zhenfang Chen, and Kwan-Yee K Wong. S $^3$ -NeRF: Neural reflectance field from shading and shadow under a single viewpoint. In Proceedings of the Advances in Neural Information Processing Systems (NeurIPS), 2022. 2", + "[61] Jae Shin Yoon, Kihwan Kim, Orazio Gallo, Hyun Soo Park, and Jan Kautz. Novel view synthesis of dynamic scenes with globally coherent depths from a monocular camera. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[62] Alex Yu, Sara Fridovich-Keil, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[63] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[64] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images." + ], + "bbox": [ + 78, + 92, + 470, + 902 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[65] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. NeRF++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2", + "[66] Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 6", + "[67] Xiuming Zhang, Pratul P Srinivasan, Boyang Deng, Paul Debevec, William T Freeman, and Jonathan T Barron. NeR-Factor: Neural factorization of shape and reflectance under an unknown illumination. ACM Transactions on Graphics (TOG), 2021. 2", + "[68] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 4", + "[69] M. Zwicker, H. Pfister, J. van Baar, and M. Gross. Ewa volume splatting. In Proceedings of IEEE Visualization (VIS), 2001. 4" + ], + "bbox": [ + 501, + 92, + 890, + 402 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "8910", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/f455d128-070e-4b9d-a550-262379c7f3f3_model.json b/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/f455d128-070e-4b9d-a550-262379c7f3f3_model.json new file mode 100644 index 0000000000000000000000000000000000000000..1006880d47d7ce6580e2572cc424c01e6cb97322 --- /dev/null +++ b/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/f455d128-070e-4b9d-a550-262379c7f3f3_model.json @@ -0,0 +1,2598 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.131, + 0.892, + 0.152 + ], + "angle": 0, + "content": "3D Geometry-aware Deformable Gaussian Splitting for Dynamic View Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.18, + 0.727, + 0.217 + ], + "angle": 0, + "content": "Zhicheng Lu\\(^{1*}\\), Xiang Guo\\(^{1*}\\), Le Hui\\(^{1\\dagger}\\), Tianrui Chen\\(^{1,2}\\), Min Yang\\(^{2}\\), Xiao Tang\\(^{2}\\), Feng Zhu\\(^{2}\\), Yuchao Dai\\(^{1\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.217, + 0.743, + 0.234 + ], + "angle": 0, + "content": "1Northwestern Polytechnical University 2Samsung R&D Institute" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.237, + 0.714, + 0.251 + ], + "angle": 0, + "content": "{zhichenglu, guoxiang, cherryxchen}@mail.nwpu.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.255, + 0.821, + 0.269 + ], + "angle": 0, + "content": "{daiyuchao, huile}@nwpu.edu.cn {min16.yang, xiao1.tang, f15.zhu}@samsung.com" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.305, + 0.313, + 0.32 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.337, + 0.474, + 0.685 + ], + "angle": 0, + "content": "In this paper, we propose a 3D geometry-aware deformable Gaussian Splatting method for dynamic view synthesis. Existing neural radiance fields (NeRF) based solutions learn the deformation in an implicit manner, which cannot incorporate 3D scene geometry. Therefore, the learned deformation is not necessarily geometrically coherent, which results in unsatisfactory dynamic view synthesis and 3D dynamic reconstruction. Recently, 3D Gaussian Splatting provides a new representation of the 3D scene, building upon which the 3D geometry could be exploited in learning the complex 3D deformation. Specifically, the scenes are represented as a collection of 3D Gaussian, where each 3D Gaussian is optimized to move and rotate over time to model the deformation. To enforce the 3D scene geometry constraint during deformation, we explicitly extract 3D geometry features and integrate them in learning the 3D deformation. In this way, our solution achieves 3D geometry-aware deformation modeling, which enables improved dynamic view synthesis and 3D dynamic reconstruction. Extensive experimental results on both synthetic and real datasets prove the superiority of our solution, which achieves new state-of-the-art performance. The project is available at https://npucvr.github.io/GaGS/." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.716, + 0.208, + 0.731 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.741, + 0.47, + 0.863 + ], + "angle": 0, + "content": "Dynamic View Synthesis (DVS) aims at rendering novel photorealistic views at arbitrary viewpoints and any input time step given a monocular video of a dynamic scene, which has broad applications in virtual reality and augmented reality. Recently, empowered with effective representations such as neural radiance fields (NeRF) [30] and Gaussian Splitting [21], novel view synthesis for static scenes has been greatly advanced. However, this success" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.306, + 0.889, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.495, + 0.892, + 0.607 + ], + "angle": 0, + "content": "Figure 1. Geometric information exploited by different methods. a) Early dynamic NeRF methods such as DNeRF[37] directly encode the coordinate \\(\\mathbf{p}\\) of the sample point as input feature for deformation network. b) Interpolation is used to fuse features from neighbouring grids and multiscale interpolation enhances the local geometry information [11, 16, 27, 53]. c) We propose to voxelize a set of Gaussian distributions and use a sparse convolution network to extract geometry-aware features for deformation learning." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.613, + 0.892, + 0.703 + ], + "angle": 0, + "content": "cannot be extended to its dynamic counterpart directly. This is mainly due to the difficulty in modeling and representing the scene deformation. Due to the inherent motion/shape ambiguity in monocular dynamic 3D representation, dynamic scene modeling and synthesis are more challenging, especially for monocular video with limited observations." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.893, + 0.9 + ], + "angle": 0, + "content": "In addressing the above challenges, one common strategy is to represent the dynamic scenes as a combination of a static canonical field and a deformation model [11, 16, 17, 27, 33, 34, 37, 51, 53], whereas the bottleneck lies in representing the diverse and complex real-world 3D deformation. To represent geometrically consistent 3D deformation, the local geometric/structural information is critical, since the deformations of the objects in the real world are highly correlated to their 3D structures. Furthermore, the motions of the object points are deeply coupled with the motions of their neighboring points. Thus, how to incorporate the local geometric information to learn locally smooth and consistent 3D deformations becomes the research focus in DVS." + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.875, + 0.212, + 0.888 + ], + "angle": 0, + "content": "*Equal contributions." + }, + { + "type": "page_footnote", + "bbox": [ + 0.098, + 0.888, + 0.227, + 0.9 + ], + "angle": 0, + "content": "† Corresponding authors." + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.875, + 0.227, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8900" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.364 + ], + "angle": 0, + "content": "Recently, different deformation models have utilized the local geometric information, but they all have their limitations. As shown in Fig. 1 a), originally in D-NeRF [37], the feature (positional encoding) of each sampled point is extracted independently with each other. Following works notice that this method could not handle the complex dynamic scene since the extracted features contain little information from neighboring points. In Fig. 1 b), interpolation is introduced to fuse features of neighboring grids. NDVG [16] and RoDynRF [27] gradually increase the voxel resolution so that the large voxel size could cover a larger area, introducing the local smoothness at the early stage of the training. However, this strategy has a limited cover range of local areas and cannot work at a later training stage. TiNeuVox [11] and SUDS [53] interpolate with multi-scales. Nevertheless, the interpolation operation is rather simple in extracting local geometric information and introduces un-smoothness and artifacts [3, 19]." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.366, + 0.473, + 0.638 + ], + "angle": 0, + "content": "In modling the nonrigid deformation, it is crucial to account for the consistency in the motion of local neighborhood. Since point-level MLP has a limited receptive field, which cannot capture the local geometric features of point clouds. To utilize the local geometric information effectively, we propose to use 3D sparse convolution. As shown in Fig. 1 c), building upon the recent explicit point cloud based Gaussian Splatting representation, we introduce a sparse convolution network to extract 3D geometry-aware features. Compared with simple feature interpolation, the convolutional neural network is superior in extracting local information and has a much larger reception field. Also, we treat the 3D Gaussian distributions as point clouds, which enable sparse 3D convolution for time and memory efficiency. Note that FDNeRF [17] uses a 3D U-Net to inpaint the missing area in the voxel grid. But this inpaint network is not used for deformation modeling, while the rendering speed and voxel resolution are also limited." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.642, + 0.473, + 0.762 + ], + "angle": 0, + "content": "Originally in Gaussian Splitting [21], the rotation parameter of each Gaussian is represented by quaternion. However, quaternion representation for rotation is discontinuous in parameter space for neural network learning [68]. We introduce the continuous 6D rotation [68] to ensure that the network learns a continuous function in the parameter space, which accurately represents the rotational states of each Gaussian at different time." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.765, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Overall, our method mainly has two components: a Gaussian canonical field and a deformation field. The Gaussian canonical field consists of 3D Gaussian distributions and a geometry-aware feature learning network. The explicit 3D Gaussian distribution represents the geometry of the canonical scene, and the sparse 3D CNN network extracts local structural/geometric information for each Gaussian. The deformation field estimates a transformation for each Gaussian in the canonical field, which transfers the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.137 + ], + "angle": 0, + "content": "Gaussian from the canonical field to the given timestamp. Finally, we use 3D Gaussian splatting to render images for the given timestamp." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.138, + 0.807, + 0.151 + ], + "angle": 0, + "content": "Our main contributions are summarized as:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.154, + 0.892, + 0.198 + ], + "angle": 0, + "content": "- We propose a geometry-aware feature extraction network based on 3D Gaussian distribution to better utilize local geometric information." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.199, + 0.892, + 0.243 + ], + "angle": 0, + "content": "- We propose to use continuous 6D rotation representation and modified density control strategy to adapt Gaussian splatting to dynamic scenes." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.244, + 0.892, + 0.289 + ], + "angle": 0, + "content": "- Extensive experiments on both synthetic and real datasets show that our method surpasses competing methods by a wide margin." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.154, + 0.892, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.304, + 0.642, + 0.32 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.329, + 0.7, + 0.345 + ], + "angle": 0, + "content": "2.1. Novel View Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.352, + 0.892, + 0.519 + ], + "angle": 0, + "content": "Novel View Synthesis (NVS) is a well-known task in both computer vision and graphics [5, 8, 15, 22]. Surveys such as [43, 48, 49] provide comprehensive discussions. Explicit NVS methods generally reconstruct an explicit 3D model of a scene in the form of point clouds [1], voxels, or meshes [18, 41, 42, 50]. Once the geometry of the scene is represented, novel view images can be rendered from arbitrary viewpoints via manipulating the camera pose parameters. Other methods [9, 12, 20, 35, 41, 42, 59] tackle NVS by estimating depth maps using multi-view geometry, whereas the features are aggregated from co-visible frames." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.519, + 0.892, + 0.822 + ], + "angle": 0, + "content": "Neural Radiance Fields (NeRF) [30] is a groundbreaking approach that utilizes Multi-Layer Perceptrons (MLPs) to represent scenes implicitly. This methodology enables the modeling of a 5D radiance field, resulting in the impressive synthesis of views for static scenes. Numerous subsequent works expand the capabilities of NeRF by adapting it to various scenarios, such as handling larger and unbounded scenes [29, 40, 47, 58, 65], scene editing and relighting, [4, 45, 60, 67], [2, 3, 19], and improving the generalization ability [7, 52, 55, 64]. Meanwhile, researchers focus on achieving more efficient rendering and optimization in a NeRF-like framework. [6, 13, 24, 26, 28, 32, 36, 63] investigate efficient sampling methods along each ray for color accumulation, while [38, 39] partition the scene into multiple sub-regions as an efficient pre-processing, and [6, 13, 31, 46, 62] exploit voxel-grid representation to speed up the optimization. Very recently, [21] proposes to use 3D Guussian distribution to represent the scene, obtaining promising results. However, these methods are mainly applicable to static scenes, and fail in scenes with dynamic objects." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.725, + 0.848 + ], + "angle": 0, + "content": "2.2. Dynamic View Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.903 + ], + "angle": 0, + "content": "A recent trend in NVs is to extend the success in static NVs to dynamic NVs. One viable strategy is to construct a 4D spatial-temporal representation. Yoon et al. [61] combine" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "8901" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.089, + 0.868, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.306, + 0.894, + 0.377 + ], + "angle": 0, + "content": "Figure 2. The pipeline of our proposed 3D geometry-aware deformable Gaussian splitting. In the Gaussian canonical field, we reconstruct a static scene in canonical space using 3D Gaussian distributions. We extract positional features using an MLP, as well as local geometric features using a 3D U-Net, fused by another MLP to form the geometry-aware features. In the deformation field, taking the geometry-aware features and timestamp \\( t \\), an MLP estimates the 3D Gaussian deformation, which transfers the canonical 3D Gaussian distributions to timestamp \\( t \\). Finally, a rasterizer renders the transformed 3D Gaussian to images." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.38, + 0.473, + 0.607 + ], + "angle": 0, + "content": "single-view and multi-view depth to achieve NVS by 3D warping. Gao et al. [14] use a time-invariant model and a time-varying model to represent the static part and dynamic part of a scene, respectively, and use scene flow for motion modeling. NeRFlow [10] proposes a 4D spatial-temporal representation of a dynamic scene. Xian et al. [57] map a spatial-temporal location to the color and volume density by a 4D spatial-temporal radiance field. NSFF [23] represents a dynamic scene as a continuously changing function, encompassing various aspects of the scene, including appearance, geometry, and 3D scene motion. DCT-NeRF [54] uses the Discrete Cosine Transform (DCT) to replace the scene flow in NSFF [23] to enable smoother motion trajectories. HexPlane [6] and K-Plane [13] project 4D spatial-temporal space to multiple 2D planes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.611, + 0.47, + 0.852 + ], + "angle": 0, + "content": "On the other hand, works such as [11, 16, 17, 27, 33, 34, 37, 44, 51, 53] decode the dynamic scene with a canonical field and a deformation field. Along this pipeline, D-NeRF [37] first proposes the canonical-based framework. However, the deformation network utilizes positional features with little geometry information, which cannot handle complex dynamic scenarios well. Nerfies [33] proposes a coarse-to-fine optimization method for coordinate-based models that allows for more robust optimization. HyperNeRF [34] lifts the canonical field into a higher dimensional space to handle topological changes. NDVG [16] and RoDynRF [27] gradually increase the voxel resolution, which has two benefits. TiNeuVox [11] and SUDS [53] interpolate the features with multi-scales for deformation learning. The multi-scales interpolation covers a larger reception field, which benefits modeling varying motions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Very recently, with the stunning debut of 3D Gaussian [21], some works introduce this point-based representation into their pipelines to synthesize high-fidelity images" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.38, + 0.895, + 0.532 + ], + "angle": 0, + "content": "of a dynamic scene. Wu et al. [56] introduce a 4D Gaussian Splitting representation and utilize a deformation field to model both Gaussian motions and shape changes. However, the multi-scale HexPlane interpolation has limited ability in extracting the geometry information, which is still insufficient for modeling complex motions. The projection-based representation compresses the 3D space to 2D space, losing 3D geometric information for deformation learning. In contrast, our canonical-based method can fully leverage the 3D information in 3D space." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.555, + 0.593, + 0.571 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.584, + 0.895, + 0.901 + ], + "angle": 0, + "content": "In this section, we present our 3D geometry-aware deformable Gaussian Splating solution for dynamic view synthesis, where an overview of our pipeline is illustrated in Fig. 2. Given a set of images or monocular video of a dynamic scene with frames with corresponding time labels and known camera intrinsic and extrinsic parameters, our goal is to synthesize a novel view at any desired view at any desired time. Our method mainly consists of two core components: the Gaussian canonical field is used to learn the reconstruction of static scenes, while the deformation field is used to learn object deformation. First, we review the static 3D Gaussian splatting in Sec. 3.1. Then, we introduce the proposed Gaussian canonical field in Sec. 3.2, which consists of 3D Gaussian distributions and a geometry feature learning network. Next, in Sec. 3.3, we propose a 3D geometry-aware deformation field to learn transformations for given time steps, which transform our canonical 3D Gaussian distributions to corresponding times. In Sec. 3.4, we explain the process of rendering images from transformed 3D Gaussian distributions. Finally, we present our losses and density control modifications in Sec. 3.5." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8902" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.091, + 0.208, + 0.108 + ], + "angle": 0, + "content": "3.1. Preliminary" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.114, + 0.47, + 0.16 + ], + "angle": 0, + "content": "3D-GS [21] represents the scene with sparse 3D Gaussians distributions. Each Gaussian has an anisotropic covariance \\(\\Sigma \\in \\mathbb{R}^{3\\times 3}\\) and a mean value \\(\\mu \\in \\mathbb{R}^3\\):" + }, + { + "type": "equation", + "bbox": [ + 0.17, + 0.168, + 0.469, + 0.188 + ], + "angle": 0, + "content": "\\[\n\\mathbf {G} (\\mathbf {x}) = e ^ {- \\frac {1}{2} (\\mathbf {x} - \\mu) ^ {\\top} \\boldsymbol {\\Sigma} ^ {- 1} (\\mathbf {x} - \\mu)}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.197, + 0.469, + 0.258 + ], + "angle": 0, + "content": "The covariance matrix \\(\\pmb{\\Sigma}\\) can be decomposed into a scaling matrix \\(\\mathbf{S} \\in \\mathbb{R}^{3 \\times 3}\\) and a rotation matrix \\(\\mathbf{R} \\in \\mathrm{SO}(3)\\). This ensures that the covariance matrix is positive semi-definite, while reducing the learning difficulty of 3D Gaussians:" + }, + { + "type": "equation", + "bbox": [ + 0.215, + 0.266, + 0.469, + 0.284 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\Sigma} = \\mathbf {R} \\mathbf {S} \\mathbf {S} ^ {\\top} \\mathbf {R} ^ {\\top}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.294, + 0.469, + 0.34 + ], + "angle": 0, + "content": "To render an image from a designated viewpoint, the covariance matrix \\(\\pmb{\\Sigma}^{\\prime}\\) in camera coordinates can be calculated by giving a viewing transformation \\(\\mathbf{W}\\), followed by [69]:" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.347, + 0.469, + 0.365 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\Sigma} ^ {\\prime} = \\mathbf {J} \\mathbf {W} \\boldsymbol {\\Sigma} \\mathbf {W} ^ {\\top} \\mathbf {J} ^ {\\top}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.375, + 0.469, + 0.419 + ], + "angle": 0, + "content": "where \\(\\mathbf{J}\\) is the Jacobian of the affine approximation of the projective transformation, and \\(\\mathbf{W}\\) is the world to camera transformation matrix." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.42, + 0.469, + 0.526 + ], + "angle": 0, + "content": "Each Gaussian is parameterized into the following attributes: position \\(\\mathbf{x} \\in \\mathbb{R}^3\\), color defined by spherical harmonics coefficients \\(\\mathbf{c} \\in \\mathbb{R}^k\\), rotation \\(\\mathbf{r} \\in \\mathbb{R}^4\\), scale \\(\\mathbf{s} \\in \\mathbb{R}^3\\), and opacity \\(o \\in \\mathbb{R}\\). Point-based \\(\\alpha\\)-blending and volumetric rendering like NeRF [30] essentially share the same image formation model for the splattering process. Specifically, the color \\(\\mathbf{C}\\) of each pixel is influenced by the related Gaussians:" + }, + { + "type": "equation", + "bbox": [ + 0.213, + 0.537, + 0.469, + 0.578 + ], + "angle": 0, + "content": "\\[\n\\mathbf {C} = \\sum_ {i = 1} ^ {N} \\mathbf {T} _ {i} \\alpha_ {i} \\mathbf {c} _ {i}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.583, + 0.469, + 0.629 + ], + "angle": 0, + "content": "where \\(\\alpha_{i}\\) represents the density of the Gaussian point computed by a Gaussian with covariance \\(\\pmb{\\Sigma}\\) multiplied by its opacity." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.636, + 0.312, + 0.65 + ], + "angle": 0, + "content": "3.2. Gaussian Canonical Field" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.66, + 0.469, + 0.719 + ], + "angle": 0, + "content": "In this section, we first reconstruct a static scene in canonical space. Then, we propose a geometric branch, which enables geometry feature learning of the 3D Gaussian distributions for the subsequent deformation field." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.72, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Gaussian parameters. Similar to 3D-GS [21], each Gaussian in the canonical space is characterized by position, color, scale, and opacity. Note that for rotation, we are inspired by [68] to use a continuous 6D rotation representation. Compared with the quaternion representation used in 3D-GS, the 6D rotation representation can benefit our method in estimating the deformation of each Gaussian from canonical space to time-space, especially in helping the neural networks to learn smooth rotation variation from time to time. Specifically, we set learnable parameter \\([a_1,a_2]\\) for each Gaussian to denote its rotation in canonical space, where \\(a_1\\) and \\(a_2\\) are the column vectors of three" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.091, + 0.892, + 0.152 + ], + "angle": 0, + "content": "rows, respectively. They are initialized to \\([100]^{\\top}\\) and \\([010]^{\\top}\\), corresponding precisely to the identity rotation matrix. The mapping from this 6D representation vector to SO(3) matrix is defined as [68]:" + }, + { + "type": "equation", + "bbox": [ + 0.547, + 0.162, + 0.892, + 0.213 + ], + "angle": 0, + "content": "\\[\nf _ {\\mathrm {V} 2 \\mathrm {M}} \\left(\\left[ \\begin{array}{c c} | & | \\\\ a _ {1} & a _ {2} \\\\ | & | \\end{array} \\right]\\right) = \\left[ \\begin{array}{c c c} | & | & | \\\\ b _ {1} & b _ {2} & b _ {3} \\\\ | & | & | \\end{array} \\right], \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.54, + 0.235, + 0.892, + 0.288 + ], + "angle": 0, + "content": "\\[\nb _ {i} = \\left[ \\left\\{ \\begin{array}{c c} \\mathcal {N} \\left(a _ {1}\\right) & \\text {i f} i = 1 \\\\ \\mathcal {N} \\left(a _ {2} - \\left(b _ {1} \\cdot a _ {2}\\right) b _ {1}\\right) & \\text {i f} i = 2 \\\\ b _ {1} \\times b _ {2} & \\text {i f} i = 3 \\end{array} \\right] ^ {\\top}, \\right. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.297, + 0.892, + 0.357 + ], + "angle": 0, + "content": "where \\(\\mathcal{N}(\\cdot)\\) denotes a normalization function. “.” represents the inner product of a vector and “\\(\\times\\)” represents vector cross product. V2M in \\(f_{\\mathrm{V2M}}\\) means the transform from 6D vector to rotation matrix." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.358, + 0.892, + 0.538 + ], + "angle": 0, + "content": "Geometry feature learning. To capture the local geometric structure of the canonical scene, we regard the 3D Gaussian as the 3D point cloud, i.e., we only use the 3D coordinates of the 3D Gaussian. In order to handle a large number of point clouds, we leverage a simple two-branch structure: the geometric branch learns local features of point clouds across different receptive fields, while the identity branch preserves the independent point-level features at high resolution. By integrating the geometric branch and identity branch, we can efficiently obtain point-level features at high resolution while embedding the local geometric information of the point cloud." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.54, + 0.892, + 0.63 + ], + "angle": 0, + "content": "The geometric branch leverages the sparse convolution [25] on the sparse voxels to extract local geometric features at different receptive fields. Given the point cloud \\(\\mathbf{P} \\in \\mathbb{R}^{N \\times 3}\\), we first transform the high-resolution point clouds into low-resolution voxels by dividing the space through fixed grid size \\(s\\):" + }, + { + "type": "equation", + "bbox": [ + 0.639, + 0.642, + 0.891, + 0.658 + ], + "angle": 0, + "content": "\\[\n\\mathbf {V} = \\operatorname {f l o o r} (\\mathbf {P} / s), \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.67, + 0.892, + 0.745 + ], + "angle": 0, + "content": "where the size of \\(\\mathbf{V}\\) is \\(M\\times 3\\) and \\(M\\) is the number of voxels. Then, we construct a sparse 3D U-Net by stacking a set of sparse convolutions with a skip connection. Taking \\(\\mathbf{V}\\) as input, we perform sparse 3D U-Net to aggregate local features (dubbed as \\(\\mathbf{F}_v\\in \\mathbb{R}^{M\\times C}\\)) of the point clouds." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.746, + 0.892, + 0.903 + ], + "angle": 0, + "content": "The identity branch uses a multi-layer perception (MLP) to map the 3D coordinate of the point cloud into the embedding space (dubbed as \\(\\mathbf{F}_p\\in \\mathbb{R}^{N\\times C}\\)) to maintain the independence of point features. To accurately characterize the local geometric structure of the canonical scene, we fuse the voxel features with local information onto point features. Specifically, we transform the voxel feature \\(\\mathbf{F}_v\\) back to the corresponding points to obtain point-level features \\(\\mathbf{F}_p^{\\prime}\\in \\mathbb{R}^{N\\times C}\\) by assigning the voxel features to the corresponding points within it. Finally, we concatenate \\(\\mathbf{F}_p^{\\prime}\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8903" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "and \\(\\mathbf{F}_p\\) to obtain the fused point-level feature followed by an MLP layer as:" + }, + { + "type": "equation", + "bbox": [ + 0.165, + 0.137, + 0.47, + 0.158 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {\\text {f u s e}} = \\operatorname {M L P} \\left(\\operatorname {C o n c a t} \\left(\\mathbf {F} _ {p} ^ {\\prime}, \\mathbf {F} _ {p}\\right)\\right). \\tag {8}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.171, + 0.254, + 0.187 + ], + "angle": 0, + "content": "3.3. Deformation Field" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.197, + 0.469, + 0.242 + ], + "angle": 0, + "content": "In this section, we propose a deformation field that estimates the deformation of each 3D Gaussian in the canonical space based on a given time \\( t \\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.244, + 0.469, + 0.349 + ], + "angle": 0, + "content": "Deformation estimation. We adopt an MLP as the decoder \\(\\mathcal{G}_{\\Phi}\\), which takes the geometry feature learned from the geometry branch in the Gaussian canonical field, the position of each Gaussian, and timestamp as input, outputs the deformation of each Gaussian from canonical space to time \\(t\\), including position deformation \\(\\Delta \\mathbf{x}_{t} \\in \\mathbb{R}^{3}\\), rotation deformation \\(\\Delta \\mathbf{r}_{t} \\in \\mathbb{R}^{6}\\) and scale deformation \\(\\Delta \\mathbf{s}_{t} \\in \\mathbb{R}^{3}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.14, + 0.365, + 0.469, + 0.382 + ], + "angle": 0, + "content": "\\[\n\\Delta \\mathbf {x} _ {\\mathbf {t}}, \\Delta \\mathbf {r} _ {\\mathbf {t}}, \\Delta \\mathbf {s} _ {\\mathbf {t}} = \\mathcal {G} _ {\\Phi} (\\mathbf {F} _ {\\text {f u s e}}, \\gamma (\\mathbf {x}), \\gamma (t)), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.397, + 0.469, + 0.443 + ], + "angle": 0, + "content": "where \\(\\gamma (\\cdot)\\) denotes the positional encoding in NeRF [30], which maps a one dimension signal from \\(\\mathbb{R}\\) into a higher dimensional space \\(\\mathbb{R}^{2L}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.141, + 0.456, + 0.362, + 0.475 + ], + "angle": 0, + "content": "\\[\n\\gamma (p) = (\\sin \\left(2 ^ {0} \\pi p\\right), \\cos \\left(2 ^ {0} \\pi p\\right),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.478, + 0.469, + 0.492 + ], + "angle": 0, + "content": "\\[\n\\dots , \\tag {10}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.202, + 0.495, + 0.403, + 0.514 + ], + "angle": 0, + "content": "\\[\n\\sin \\left(2 ^ {L - 1} \\pi p\\right), \\cos \\left(2 ^ {L - 1} \\pi p)\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.53, + 0.469, + 0.606 + ], + "angle": 0, + "content": "Note that we set the color parameters \\(\\mathbf{c}\\) and opacity \\(o\\) of canonical 3D Gaussian distributions constant over time. These two factors are highly related to the physical properties of the Gaussian distributions, and we want each distribution to represent the same object area over the timeline." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.607, + 0.469, + 0.653 + ], + "angle": 0, + "content": "Transformation. Using the estimated deformation for time \\( t \\) above, we could transform the 3D Gaussian distributions to current time by" + }, + { + "type": "equation", + "bbox": [ + 0.172, + 0.668, + 0.286, + 0.684 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {t} = \\mathbf {x} + \\Delta \\mathbf {x} _ {\\mathbf {t}},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.178, + 0.687, + 0.469, + 0.703 + ], + "angle": 0, + "content": "\\[\n\\mathbf {s} _ {t} = \\mathbf {s} + \\Delta \\mathbf {s} _ {\\mathbf {t}}, \\tag {11}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.178, + 0.706, + 0.371, + 0.722 + ], + "angle": 0, + "content": "\\[\n\\mathbf {r} _ {t} = f _ {\\mathrm {V 2 M}} \\left(\\Delta \\mathbf {r} _ {\\mathbf {t}}\\right) \\times f _ {\\mathrm {V 2 M}} (\\mathbf {r}).\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.738, + 0.217, + 0.752 + ], + "angle": 0, + "content": "3.4. Rasterization" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.763, + 0.469, + 0.824 + ], + "angle": 0, + "content": "Once we have completed preparing the attributes of each Gaussian \\((\\mathbf{x}_t,\\mathbf{c},\\mathbf{r}_t,\\mathbf{s}_t,o)\\), we use the differentiable tile rasterizer [21] to render the image at any desired viewpoint at this timestamp:" + }, + { + "type": "equation", + "bbox": [ + 0.11, + 0.838, + 0.469, + 0.856 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {C}} _ {t} = \\text {R a s t e r i z e r} (\\mathbf {x} _ {t}, \\mathbf {c}, \\mathbf {r} _ {t}, \\mathbf {s} _ {t}, o, \\mathbf {K}, [ \\mathbf {R} | \\mathbf {T} ]), \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.902 + ], + "angle": 0, + "content": "where \\(\\mathbf{K}\\) and \\([\\mathbf{R}|\\mathbf{T}]\\) represent the camera's intrinsic and extrinsic parameters, respectively." + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.091, + 0.891, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.283, + 0.892, + 0.338 + ], + "angle": 0, + "content": "Figure 3. Our density control is designed for dynamic scenes. We control the densification of Gaussian distributions according to their transformed parameters at timestamp \\( t \\) rather than parameters at canonical space." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.341, + 0.64, + 0.357 + ], + "angle": 0, + "content": "3.5. Optimization" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.365, + 0.892, + 0.409 + ], + "angle": 0, + "content": "To optimize the model, we use the photometric loss, and a motion loss, and also adapt the density control from 3D-GS [21] with our modifications." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.41, + 0.892, + 0.456 + ], + "angle": 0, + "content": "Photometric loss. The photometric loss consists of the \\( L_{1} \\) loss and structural similarity loss \\( L_{D - SSIM} \\) between the rendered image \\( \\hat{\\mathbf{C}}_t \\) and ground truth image \\( \\mathbf{C}_t \\)." + }, + { + "type": "equation", + "bbox": [ + 0.574, + 0.466, + 0.891, + 0.483 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {p h o t o}} = (1 - \\lambda) L _ {1} + \\lambda L _ {D - S S I M}. \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.493, + 0.892, + 0.569 + ], + "angle": 0, + "content": "Regularization. We accept the fact that in a scene, the proportion of dynamic points is much smaller than that of static points, and the motion amplitude at dynamic points is not too large. In other words, the point in a scene should be as static as possible," + }, + { + "type": "equation", + "bbox": [ + 0.625, + 0.58, + 0.891, + 0.596 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {m o t i o n}} = \\left\\| \\Delta \\mathbf {x} _ {\\mathbf {t}} \\right\\| _ {1}. \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.606, + 0.866, + 0.621 + ], + "angle": 0, + "content": "Total loss. The total loss we used is defined as follows," + }, + { + "type": "equation", + "bbox": [ + 0.61, + 0.633, + 0.891, + 0.649 + ], + "angle": 0, + "content": "\\[\nL = L _ {\\text {p h o t o}} + \\omega L _ {\\text {m o t i o n}}, \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.66, + 0.892, + 0.841 + ], + "angle": 0, + "content": "where \\(\\omega\\) is a trade-off parameter to balance the components. Density control. 3D-GS has shown that adaptive density control is essential in achieving high rendering performance. On the one hand, the Gaussians need to populate empty areas without geometric features. Thus, it simply creates a copy of the Gaussian for under-reconstructed regions. On the other hand, large Gaussians in regions with high variance need to be split into smaller Gaussians. We implement our method like 3D-GS but replace such Gaussians with two new ones, divide their scale by a factor of \\(\\phi = 1.6\\), and initialize their position by using the original 3D Gaussian as a PDF for sampling." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.841, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Our method differs from 3D-GS in the following aspects. For 3D-GS, there only exists sets of Gaussians. However, in our case, we initialize the Gaussians in the canonical space, then estimate the deformations of these Gaussians," + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8904" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.892, + 0.117 + ], + "angle": 0, + "content": "Table 1. Quantitative comparison between our method and competing methods on the D-NeRF dataset. The best results are highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.1, + 0.118, + 0.868, + 0.363 + ], + "angle": 0, + "content": "
MethodHell WarriorMutantHookBouncing Balls
PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
3D-GS [21]15.39240.87760.130021.75540.93590.057518.69330.87330.114422.55750.94850.0647
D-NeRF [37]25.02930.95060.069131.29000.97390.026829.25670.96500.117438.93000.99000.1031
TiNeuVox-B[11]28.20580.96610.063133.90290.97710.030131.79290.97180.043640.85360.99130.0401
NDVG [16]26.49330.96000.067034.41310.98010.027030.00090.96260.046337.51570.98740.0751
FDNeRF [17]27.71200.96650.050834.97270.98100.031232.28670.97560.038840.01910.99120.0395
4D-GS [56]28.11960.97300.027638.34110.99360.006233.15600.98100.016840.74180.99410.0105
Ours32.27120.98350.016441.42840.99690.002936.96470.99160.007643.59290.99600.0061
MethodLegoT-RexStand UpJumping Jacks
PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
3D-GS [21]23.09910.93290.056725.74960.95670.047419.37790.92000.090920.71630.92270.0980
D-NeRF [37]21.64270.83940.165431.75680.97670.039632.79920.98180.021532.80310.98100.0373
TiNeuVox-B[11]25.17480.92170.068932.77500.97830.030736.20310.98590.019934.73900.98230.0328
NDVG [16]25.04160.93950.053432.62290.97810.033033.21580.97930.030231.25300.97370.0398
FDNeRF [17]25.27000.93900.046030.70680.97310.036836.91070.98780.018833.55210.98120.0329
4D-GS [56]25.40240.94340.037733.39120.98690.013038.26100.99230.007135.66560.98820.0159
Ours25.44110.94740.032939.02850.99520.005242.21010.99660.002837.96040.99280.0088
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.365, + 0.89, + 0.392 + ], + "angle": 0, + "content": "Table 2. Quantitative comparison between our method and competing methods on the HyperNeRF dataset. The best results are highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.202, + 0.395, + 0.766, + 0.487 + ], + "angle": 0, + "content": "
MethodChicken3D PrinterBroomPeel Banana
PSNR↑MS-SSIM↑PSNR↑MS-SSIM↑PSNR↑MS-SSIM↑PSNR↑MS-SSIM↑
TiNeuVox[11]28.28610.947422.75140.839221.26820.683224.51360.8743
NDVG [16]27.05360.939022.41960.838921.46580.702822.82040.8279
FDNeRF [17]27.96270.943822.80270.845321.90910.715424.25150.8645
3D-GS [21]20.89150.742618.39910.611420.39530.659820.56540.8094
Ours28.53420.933122.04030.809820.89940.524125.57850.9067
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.491, + 0.47, + 0.533 + ], + "angle": 0, + "content": "Table 3. Quantitative comparison on HyperNeRF dataset: Average on Cut Lemon, Chicken, 3D Printer, and Split Cookie. The best results are highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.145, + 0.538, + 0.396, + 0.609 + ], + "angle": 0, + "content": "
MethodPSNR↑SSIM↑LPIPS↓
TiNeuVox-B [11]27.160.760.40
3D-GS [21]21.260.690.40
4D-GS [56]26.980.780.31
Ours27.520.800.25
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.622, + 0.47, + 0.743 + ], + "angle": 0, + "content": "and transform their attributes into a timestamp space. As shown in Fig. 3, we use the Gaussians at the current moment to render the image. Therefore, we determine whether the Gaussians need to conduct density control by the current attributes (like scale) at the current timestamp rather than the canonical attributes. Afterward, we inverse the transformation of the split/cloned Gaussian back to the canonical space." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.76, + 0.208, + 0.777 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.786, + 0.173, + 0.8 + ], + "angle": 0, + "content": "4.1. Dataset" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.47, + 0.903 + ], + "angle": 0, + "content": "In the paper, we use both synthetic and real datasets for evaluating our method. The synthetic dataset D-NeRF [37] contains 8 dynamic scenes, including Hell Warrior, Mutant, Hook, Bouncing Balls, Lego, T-Rex, Stand Up, and Jumping Jacks. The real dataset proposed by HyperNeRF [34], including interp-cut-lemon, interp-cut-lemon1, vrig" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.493, + 0.892, + 0.57 + ], + "angle": 0, + "content": "chicken, vrig-3dprinter, misc-split-codon, and misc-split-codon. Following previous works [21], we report three evaluation metrics, including Peak Signal-to-Noise Ratio (PSNR), Structural Similarity (SSIM), and Learned Perceptual Image Patch Similarity (LPIPS) [66]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.576, + 0.718, + 0.592 + ], + "angle": 0, + "content": "4.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.599, + 0.892, + 0.841 + ], + "angle": 0, + "content": "Our implementation is based on 3D-GS [21]. We trained a total of 40000 iterations, with the first 3000 iterations only optimizing static scenes, and then adding deformation fields to optimize dynamic scenes. The learning rate of our network takes an exponential decay from 8e-4 to 1.6e-6 with the Adam optimizer. Moreover, we use a 2-layer MLP with a width of 64 for the front point feature extraction, and a 3-layer MLP with a width of 64 for the back point feature fusion. Then 5 layers MLP with width 256 and skip connection is used for a decoder. For the positional encoding process, we use \\( L = 10 \\) for position \\( x \\) and \\( L = 6 \\) for timestep \\( t \\). For the D-NeRF dataset, which does not provide point clouds, we randomly initialize 150000 points. Meanwhile, for the HyperNeRF dataset, we use the point cloud provided in its dataset as the initial point cloud. All the experiments are tested on a single RTX 4090 GPU." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.848, + 0.694, + 0.864 + ], + "angle": 0, + "content": "4.3.Quantitative Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Synthetic scenes. We compare our method with recent state-of-the-art methods in the field, including 3D-GS, D-" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8905" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.117, + 0.086, + 0.852, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.206, + 0.358, + 0.761, + 0.373 + ], + "angle": 0, + "content": "Figure 4. Qualitative comparisons between baselines and our method on the synthetic dataset." + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.381, + 0.473, + 0.65 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.673, + 0.47, + 0.701 + ], + "angle": 0, + "content": "Figure 5. Qualitative comparisons between baselines and our method on the HyperNeRF real dataset[34]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.473, + 0.903 + ], + "angle": 0, + "content": "NeRF, TiNeuVox, NDVG, FDNeRF, and 4D-GS on the D-NeRF Dataset. As shown in Table 1, we list the results of each scene. It can be observed that our method is significantly better than other methods in terms of all three metrics for physical canonical-based methods. On average, our method significantly improves PSNR compared with static Gaussian, 3D-GS. The computational costs are: training time around 2h (avg. on D-NeRF dataset), render FPS 12 (fixed viewpoint), model size (34MB points cloud + 14MB network). Since it inherently cannot model the deformation of the dynamic scene, 3D-GS performs poorly in dynamic view synthesis." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.38, + 0.895, + 0.548 + ], + "angle": 0, + "content": "Real scenes. We further compare our method with some highly related works on the real scene dataset proposed by [34]. We have shown the detailed results on chicken, 3D printer, broom, and peel banana in Table 2, and the average result on cut lemon, chicken, 3d printer, split cookie in Table 3. It can be observed that our method achieves good performance compared with other state-of-the-art methods. Compared with synthetic datasets, real datasets are more challenging due to the narrow camera viewing range and pose ambiguity. The quantitative results can demonstrate the effectiveness of the proposed method in real scenes." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.559, + 0.698, + 0.573 + ], + "angle": 0, + "content": "4.4. Visualization Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.583, + 0.892, + 0.764 + ], + "angle": 0, + "content": "Visual comparison. In addition to quantitative results, we also provide visualization results of different methods to demonstrate the superiority of our method. For better comparison, we show the rendered images of each synthetic scene from the same viewpoint in Fig. 4. By comparing the visualization results of different methods, it is shown that the rendered images by our method are closer to the ground truth images, indicating that our method can recover accurate and detailed images. In addition, we provide visualization results of the real scenes in Fig. 5. Compared with TiNueVox [11], our method can recover the detailed structure of dynamic objects, like chicken and banana." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.902 + ], + "angle": 0, + "content": "Gaussian visualization. To verify the effectiveness of our method, we show the 3D point cloud of the 3D Gaussian. Specifically, we only use the 3D coordinates of the 3D Gaussian. As shown in Fig. 7, we provide the point clouds of different methods on the synthetic dataset, including 3D-GS [21], 4D-GS [56], and ours. Note that the color of the point cloud is generated by 3D coordinates. Since 3D-DS cannot model dynamic scenes, the quality of the point cloud is poor. Comparing 4D-GS with ours, it can be observed" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8906" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.09, + 0.207, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.089, + 0.335, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.09, + 0.465, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.1, + 0.224, + 0.447, + 0.239 + ], + "angle": 0, + "content": "Figure 6. Visualization of learned geometry-aware features." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.248, + 0.206, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.25, + 0.338, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.352, + 0.251, + 0.465, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.412, + 0.47, + 0.439 + ], + "angle": 0, + "content": "Figure 7. Visualization of learned Gaussian. Colored with position coordinates" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.453, + 0.469, + 0.482 + ], + "angle": 0, + "content": "that the point cloud of our method has a clear local geometric structure." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.496, + 0.23, + 0.511 + ], + "angle": 0, + "content": "4.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.521, + 0.469, + 0.58 + ], + "angle": 0, + "content": "We conduct ablation studies on the synthetic dataset \\((800 \\times 800)\\) to verify the effectiveness of our proposed components. In Table 4, vanilla model is a simple MLP model without our components." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.582, + 0.469, + 0.717 + ], + "angle": 0, + "content": "Effect of geometric-aware features. To learn the geometric information of the object in our Gaussian canonical field, we voxelize the 3D Gaussian distributions and extract geometric aware features using our 3D U-Net. To demonstrate the effectiveness of this design, we test our method with geometric branch blocks and leave others unchanged. In Table 4, ours full has a clear advantage over w/o geo. feat., and our geometry branch plays the most important role among the components studied in the ablations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.719, + 0.469, + 0.869 + ], + "angle": 0, + "content": "In Fig. 6, we visualize the learned geometric-aware features. We color the point clouds with the learned features, and it shows meaningful geometric information. Interestingly, we can see an obvious difference in the learned features between the moving objects (bucket of the lego and the t-rex body) and the static objects (body of the lego and the ground in t-rex). Also, our geometric-aware features reflect the local geometric structure. For example, the spines of the bones on the t-rex tail have similar features, and the smooth part of the tail bones have other patterns." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Different geometric features. We use the PointNet-like architecture and plane projection (2D CNN) to conduct ex" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.09, + 0.892, + 0.118 + ], + "angle": 0, + "content": "Table 4. Ablation Study. Ablation studys in terms of average PSNR, SSIM, and LPIPS. The best results are highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.542, + 0.122, + 0.847, + 0.253 + ], + "angle": 0, + "content": "
MethodPSNR↑SSIM↑LPIPS↓
w/o geo. feat.37.57570.98410.0173
w/o 6D rotation37.87500.98510.0154
canonical DC37.80260.98470.0166
vanilla35.23070.97930.0242
PointNet feat.36.73530.98260.0184
Plane feat.35.90540.98110.0212
ours full38.01340.98530.0153
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.255, + 0.892, + 0.301 + ], + "angle": 0, + "content": "periments. Compared with the results (dubbed as \"PointNet feat.\" and \"Plane feat.\") in Table 4, it can be observed that our method achieves significant performance gains." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.302, + 0.892, + 0.436 + ], + "angle": 0, + "content": "6D representation. To study the effect of 6D representation of the rotation parameters of the 3D Gaussian, we conduct an experiment that replaces the 6D vector with quaternion \\(\\mathbf{q}\\) which is used in the original 3D-GS. To deform the 3D Gaussian in canonical space, our deformation field estimates a \\(\\Delta \\mathbf{q}_{\\mathrm{t}}\\) and gets \\(\\mathbf{q}_{\\mathrm{t}} = \\mathbf{q} + \\Delta \\mathbf{q}_{\\mathrm{t}}\\), using the quaternion add operation. In Table 4, quaternion demonstrates an obvious performance drop, which proves the effectiveness of the 6D representation." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.438, + 0.892, + 0.543 + ], + "angle": 0, + "content": "Density control. In terms of density control, we test the setting that only uses the 3D Gaussian in canonical space without considering the transform 3D Gaussian at other timestamps. In Table 4, canonical DC shows a performance drop, as the canonical 3D Gaussian alone cannot reflect the over/under reconstruction information at all timestamps for dynamic scenes." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.56, + 0.619, + 0.575 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.587, + 0.892, + 0.783 + ], + "angle": 0, + "content": "In this paper, we have proposed a 3D geometry aware Gaussian Splatting solution for dynamic view synthesis. We addressed the limitations of existing approaches from two perspectives: 1) we introduced 3D sparse convolution to extract local structural information effectively and efficiently for deformation learning, and 2) we represented the dynamic scenes as a collection of deforming 3D Gaussian distributions, which are optimized to deform (move, rotate, scaling) over time. Experimental results across synthetic and real datasets demonstrate the superiority of our solution in dynamic view synthesis and 3D reconstruction. We plan to further investigate explicit motion modeling by exploiting the foreground and background motion segmentation cues." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.8, + 0.659, + 0.816 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.826, + 0.892, + 0.9 + ], + "angle": 0, + "content": "We thank the area chairs and the reviewers for their insightful and positive feedback. This work was supported in part by the National Science Fund of China (Grant Nos. 62271410, 62306238) and the Fundamental Research Funds for the Central Universities." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8907" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Kara-Ali Aliev, Artem Sevastopolsky, Maria Kolos, Dmitry Ulyanov, and Victor Lempitsky. Neural point-based graphics. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.255 + ], + "angle": 0, + "content": "[2] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-NeRF: A multiscale representation for antiailiasing neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.258, + 0.472, + 0.327 + ], + "angle": 0, + "content": "[3] Jonathan T. Barron, Ben Mildenhall, Dor Verbin, Pratul P. Srinivasan, and Peter Hedman. Zip-NeRF: Anti-aliased grid-based neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.329, + 0.472, + 0.398 + ], + "angle": 0, + "content": "[4] Mark Boss, Raphael Braun, Varun Jampani, Jonathan T Barron, Ce Liu, and Hendrik Lensch. NeRD: Neural reflectance decomposition from image collections. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.401, + 0.472, + 0.457 + ], + "angle": 0, + "content": "[5] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the Conference on Computer Graphics and Interactive Techniques, 2001. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.459, + 0.472, + 0.514 + ], + "angle": 0, + "content": "[6] Ang Cao and Justin Johnson. HexPlane: A fast representation for dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.517, + 0.472, + 0.585 + ], + "angle": 0, + "content": "[7] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. MVSNeRF: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.588, + 0.472, + 0.643 + ], + "angle": 0, + "content": "[8] Shenchang Eric Chen and Lance Williams. View interpolation for image synthesis. In Proceedings of the Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 1993. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.645, + 0.472, + 0.7 + ], + "angle": 0, + "content": "[9] Inchang Choi, Orazio Gallo, Alejandro Troccoli, Min H Kim, and Jan Kautz. Extreme view synthesis. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.703, + 0.47, + 0.771 + ], + "angle": 0, + "content": "[10] Yilun Du, Yinan Zhang, Hong-Xing Yu, Joshua B Tenenbaum, and Jiajun Wu. Neural radiance flow for 4d view synthesis and video processing. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.774, + 0.47, + 0.857 + ], + "angle": 0, + "content": "[11] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In Proceedings of the Conference on Computer Graphics and Interactive Techniques in Asia (SIGGRAPH ASIA), 2022. 1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.47, + 0.902 + ], + "angle": 0, + "content": "[12] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. DeepStereo: Learning to predict new views from the world's imagery. In Proceedings of the IEEE Conference" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "on Computer Vision and Pattern Recognition (CVPR), 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.894, + 0.19 + ], + "angle": 0, + "content": "[13] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbaek Warburg, Benjamin Recht, and Angjoo Kanazawa. K-Planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.193, + 0.892, + 0.247 + ], + "angle": 0, + "content": "[14] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.249, + 0.892, + 0.29 + ], + "angle": 0, + "content": "[15] Ned Greene. Environment mapping and other applications of world projections. IEEE Computer Graphics and Applications, 1986. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.292, + 0.892, + 0.361 + ], + "angle": 0, + "content": "[16] Xiang Guo, Guanying Chen, Yuchao Dai, Xiaqing Ye, Jiadai Sun, Xiao Tan, and Errui Ding. Neural deformable voxel grid for fast optimization of dynamic view synthesis. In Proceedings of the Asian Conference on Computer Vision (ACCV), 2022. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.362, + 0.892, + 0.432 + ], + "angle": 0, + "content": "[17] Xiang Guo, Jiadai Sun, Yuchao Dai, Guanying Chen, Xiaoqing Ye, Xiao Tan, Errui Ding, Yumeng Zhang, and Jingdong Wang. Forward flow for novel view synthesis of dynamic scenes. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.434, + 0.892, + 0.489 + ], + "angle": 0, + "content": "[18] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (TOG), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.49, + 0.892, + 0.559 + ], + "angle": 0, + "content": "[19] Wenbo Hu, Yuling Wang, Lin Ma, Bangbang Yang, Lin Gao, Xiao Liu, and Yuewen Ma. Tri-MipRF: Tri-mip representation for efficient anti-aliasing neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.561, + 0.892, + 0.602 + ], + "angle": 0, + "content": "[20] Nima Khademi Kalantari, Ting-Chun Wang, and Ravi Ramamoorthi. Learning-based view synthesis for light field cameras. ACM Transactions on Graphics (TOG), 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.604, + 0.892, + 0.658 + ], + "angle": 0, + "content": "[21] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3D Gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (TOG), 2023. 1, 2, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.66, + 0.892, + 0.702 + ], + "angle": 0, + "content": "[22] Marc Levoy and Pat Hanrahan. Light field rendering. In Proceedings of the Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 1996. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.703, + 0.892, + 0.758 + ], + "angle": 0, + "content": "[23] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.76, + 0.892, + 0.815 + ], + "angle": 0, + "content": "[24] David B Lindell, Julien NP Martel, and Gordon Wetzstein. AutoInt: Automatic integration for fast neural volume rendering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.817, + 0.892, + 0.871 + ], + "angle": 0, + "content": "[25] Baoyuan Liu, Min Wang, Hassan Foroosh, Marshall Tappen, and Marianna Pensky. Sparse convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[26] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. In Proceed-" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8908" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.093, + 0.469, + 0.12 + ], + "angle": 0, + "content": "ings of the Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.189 + ], + "angle": 0, + "content": "[27] Yu-Lun Liu, Chen Gao, Andreas Meuleman, Hung-Yu Tseng, Ayush Saraf, Changil Kim, Yung-Yu Chuang, Johannes Kopf, and Jia-Bin Huang. Robust dynamic radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.191, + 0.469, + 0.245 + ], + "angle": 0, + "content": "[28] Stephen Lombardi, Tomas Simon, Gabriel Schwartz, Michael Zollhoefer, Yaser Sheikh, and Jason Saragih. Mixture of volumetric primitives for efficient neural rendering. ACM Transactions on Graphics (TOG), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.247, + 0.469, + 0.328 + ], + "angle": 0, + "content": "[29] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural radiance fields for unconstrained photo collections. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.33, + 0.469, + 0.399 + ], + "angle": 0, + "content": "[30] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 1, 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.401, + 0.469, + 0.453 + ], + "angle": 0, + "content": "[31] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (TOG), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.456, + 0.469, + 0.537 + ], + "angle": 0, + "content": "[32] Thomas Neff, Pascal Stadlbauer, Mathias Parger, Andreas Kurz, Joerg H. Mueller, Chakravarty R. Alla Chaitanya, Anton S. Kaplanyan, and Markus Steinberger. DONeRF: Towards real-time rendering of compact neural radiance fields using depth oracle networks. Computer Graphics Forum (CGF), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.539, + 0.469, + 0.607 + ], + "angle": 0, + "content": "[33] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.609, + 0.469, + 0.69 + ], + "angle": 0, + "content": "[34] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. HyperNeRF: A higher-dimensional representation for topologically varying neural radiance fields. ACM Transactions on Graphics (TOG), 2021. 1, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.692, + 0.469, + 0.719 + ], + "angle": 0, + "content": "[35] Eric Penner and Li Zhang. Soft 3D reconstruction for view synthesis. ACM Transactions on Graphics (TOG), 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.721, + 0.469, + 0.761 + ], + "angle": 0, + "content": "[36] Martin Piala and Ronald Clark. TermiNeRF: Ray termination prediction for efficient neural rendering. In Proceedings of the International Conference on 3D Vision (3DV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.763, + 0.469, + 0.829 + ], + "angle": 0, + "content": "[37] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[38] Daniel Rebain, Wei Jiang, Soroosh Yazdani, Ke Li, Kwang Moo Yi, and Andrea Tagliasacchi. DeRF: Decomposed radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[39] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. KiloNeRF: Speeding up neural radiance fields with thousands of tiny MLPs. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.15, + 0.892, + 0.219 + ], + "angle": 0, + "content": "[40] Konstantinos Rematas, Andrew Liu, Pratul P. Srinivasan, Jonathan T. Barron, Andrea Tagliasacchi, Thomas Funkhouser, and Vittorio Ferrari. Urban radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.261 + ], + "angle": 0, + "content": "[41] Gernot Riegler and Vladlen Koltun. Free view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.264, + 0.892, + 0.305 + ], + "angle": 0, + "content": "[42] Gernot Riegler and Vladlen Koltun. Stable view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.307, + 0.892, + 0.347 + ], + "angle": 0, + "content": "[43] Harry Shum and Sing Bing Kang. Review of image-based rendering techniques. In Visual Communications and Image Processing (VCIP), 2000. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.35, + 0.892, + 0.418 + ], + "angle": 0, + "content": "[44] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. NeRFPlayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics (TVCG), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.421, + 0.892, + 0.501 + ], + "angle": 0, + "content": "[45] Pratul P Srinivasan, Boyang Deng, Xiuming Zhang, Matthew Tancik, Ben Mildenhall, and Jonathan T Barron. NeRV: Neural reflectance and visibility fields for relighting and view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.505, + 0.892, + 0.572 + ], + "angle": 0, + "content": "[46] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct Voxel Grid Optimization: Super-fast convergence for radiance fields reconstruction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.576, + 0.892, + 0.657 + ], + "angle": 0, + "content": "[47] Matthew Tancik, Vincent Casser, Xinchen Yan, Sabeek Pradhan, Ben Mildenhall, Pratul P Srinivasan, Jonathan T Barron, and Henrik Kretzschmar. Block-NeRF: Scalable large scene neural view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.661, + 0.892, + 0.729 + ], + "angle": 0, + "content": "[48] Ayush Tewari, Ohad Fried, Justus Thies, Vincent Sitzmann, Stephen Lombardi, Kalyan Sunkavalli, Ricardo MartinBrualla, Tomas Simon, Jason Saragih, Matthias Nießner, et al. State of the art on neural rendering. In Computer Graphics Forum (CGF), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.732, + 0.892, + 0.8 + ], + "angle": 0, + "content": "[49] Ayush Tewari, O Fried, J Thies, V Sitzmann, S Lombardi, Z Xu, T Simon, M Nießner, E Tretschk, L Liu, et al. Advances in neural rendering. In Proceedings of the Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.803, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[50] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[51] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Pro" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.515, + 0.956 + ], + "angle": 0, + "content": "8909" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.093, + 0.47, + 0.12 + ], + "angle": 0, + "content": "ceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.121, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[52] Alex Trevithick and Bo Yang. GRF: Learning a general radiance field for 3D representation and rendering. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.177, + 0.471, + 0.232 + ], + "angle": 0, + "content": "[53] Haithem Turki, Jason Y Zhang, Francesco Ferroni, and Deva Ramanan. SUDS: Scalable urban dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.233, + 0.471, + 0.274 + ], + "angle": 0, + "content": "[54] Chaoyang Wang, Ben Eckart, Simon Lucey, and Orazio Gallo. Neural trajectory fields for dynamic novel view synthesis. arXiv preprint arXiv:2105.05994, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.275, + 0.471, + 0.357 + ], + "angle": 0, + "content": "[55] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P Srinivasan, Howard Zhou, Jonathan T Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas Funkhouser. IBRNet: Learning multi-view image-based rendering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.359, + 0.47, + 0.413 + ], + "angle": 0, + "content": "[56] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Xinggang Wang. 4D Gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.414, + 0.471, + 0.469 + ], + "angle": 0, + "content": "[57] Wenqi Xian, Jia-Bin Huang, Johannes Kopf, and Changil Kim. Space-time neural irradiance fields for free-viewpoint video. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.47, + 0.471, + 0.552 + ], + "angle": 0, + "content": "[58] Yuanbo Xiangli, Linning Xu, Xingang Pan, Nanxuan Zhao, Anyi Rao, Christian Theobalt, Bo Dai, and Dahua Lin. BungeeNeRF: Progressive neural radiance field for extreme multi-scale scene rendering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.553, + 0.471, + 0.607 + ], + "angle": 0, + "content": "[59] Zexiang Xu, Sai Bi, Kalyan Sunkavalli, Sunil Hadap, Hao Su, and Ravi Ramamoorthi. Deep view synthesis from sparse photometric images. ACM Transactions on Graphics (TOG), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.609, + 0.471, + 0.678 + ], + "angle": 0, + "content": "[60] Wenqi Yang, Guanying Chen, Chaofeng Chen, Zhenfang Chen, and Kwan-Yee K Wong. S\\(^3\\)-NeRF: Neural reflectance field from shading and shadow under a single viewpoint. In Proceedings of the Advances in Neural Information Processing Systems (NeurIPS), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.679, + 0.471, + 0.747 + ], + "angle": 0, + "content": "[61] Jae Shin Yoon, Kihwan Kim, Orazio Gallo, Hyun Soo Park, and Jan Kautz. Novel view synthesis of dynamic scenes with globally coherent depths from a monocular camera. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.748, + 0.471, + 0.816 + ], + "angle": 0, + "content": "[62] Alex Yu, Sara Fridovich-Keil, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.471, + 0.873 + ], + "angle": 0, + "content": "[63] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.874, + 0.471, + 0.903 + ], + "angle": 0, + "content": "[64] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.471, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.163 + ], + "angle": 0, + "content": "[65] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. NeRF++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.165, + 0.892, + 0.233 + ], + "angle": 0, + "content": "[66] Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.235, + 0.892, + 0.304 + ], + "angle": 0, + "content": "[67] Xiuming Zhang, Pratul P Srinivasan, Boyang Deng, Paul Debevec, William T Freeman, and Jonathan T Barron. NeR-Factor: Neural factorization of shape and reflectance under an unknown illumination. ACM Transactions on Graphics (TOG), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.305, + 0.892, + 0.361 + ], + "angle": 0, + "content": "[68] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.362, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[69] M. Zwicker, H. Pfister, J. van Baar, and M. Gross. Ewa volume splatting. In Proceedings of IEEE Visualization (VIS), 2001. 4" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "8910" + } + ] +] \ No newline at end of file diff --git a/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/f455d128-070e-4b9d-a550-262379c7f3f3_origin.pdf b/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/f455d128-070e-4b9d-a550-262379c7f3f3_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bfb2b93d85d572051832a353cbf2dfc67f72ca51 --- /dev/null +++ b/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/f455d128-070e-4b9d-a550-262379c7f3f3_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1c7f4b6b28e6abbc22809214dda7077cd595a2caa0c7fd6a00a6eae30508957 +size 4904836 diff --git a/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/full.md b/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/full.md new file mode 100644 index 0000000000000000000000000000000000000000..f80aaf72557217c0d5dc2c127eff2ca2386e0c07 --- /dev/null +++ b/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/full.md @@ -0,0 +1,383 @@ +# 3D Geometry-aware Deformable Gaussian Splitting for Dynamic View Synthesis + +Zhicheng Lu $^{1*}$ , Xiang Guo $^{1*}$ , Le Hui $^{1\dagger}$ , Tianrui Chen $^{1,2}$ , Min Yang $^{2}$ , Xiao Tang $^{2}$ , Feng Zhu $^{2}$ , Yuchao Dai $^{1\dagger}$ + +1Northwestern Polytechnical University 2Samsung R&D Institute + +{zhichenglu, guoxiang, cherryxchen}@mail.nwpu.edu.cn + +{daiyuchao, huile}@nwpu.edu.cn {min16.yang, xiao1.tang, f15.zhu}@samsung.com + +# Abstract + +In this paper, we propose a 3D geometry-aware deformable Gaussian Splatting method for dynamic view synthesis. Existing neural radiance fields (NeRF) based solutions learn the deformation in an implicit manner, which cannot incorporate 3D scene geometry. Therefore, the learned deformation is not necessarily geometrically coherent, which results in unsatisfactory dynamic view synthesis and 3D dynamic reconstruction. Recently, 3D Gaussian Splatting provides a new representation of the 3D scene, building upon which the 3D geometry could be exploited in learning the complex 3D deformation. Specifically, the scenes are represented as a collection of 3D Gaussian, where each 3D Gaussian is optimized to move and rotate over time to model the deformation. To enforce the 3D scene geometry constraint during deformation, we explicitly extract 3D geometry features and integrate them in learning the 3D deformation. In this way, our solution achieves 3D geometry-aware deformation modeling, which enables improved dynamic view synthesis and 3D dynamic reconstruction. Extensive experimental results on both synthetic and real datasets prove the superiority of our solution, which achieves new state-of-the-art performance. The project is available at https://npucvr.github.io/GaGS/. + +# 1. Introduction + +Dynamic View Synthesis (DVS) aims at rendering novel photorealistic views at arbitrary viewpoints and any input time step given a monocular video of a dynamic scene, which has broad applications in virtual reality and augmented reality. Recently, empowered with effective representations such as neural radiance fields (NeRF) [30] and Gaussian Splitting [21], novel view synthesis for static scenes has been greatly advanced. However, this success + +![](images/905bce9a44d2ccb907b545a37b4c00b1f27925f14301d96bd815439e515ac8cd.jpg) +Figure 1. Geometric information exploited by different methods. a) Early dynamic NeRF methods such as DNeRF[37] directly encode the coordinate $\mathbf{p}$ of the sample point as input feature for deformation network. b) Interpolation is used to fuse features from neighbouring grids and multiscale interpolation enhances the local geometry information [11, 16, 27, 53]. c) We propose to voxelize a set of Gaussian distributions and use a sparse convolution network to extract geometry-aware features for deformation learning. + +cannot be extended to its dynamic counterpart directly. This is mainly due to the difficulty in modeling and representing the scene deformation. Due to the inherent motion/shape ambiguity in monocular dynamic 3D representation, dynamic scene modeling and synthesis are more challenging, especially for monocular video with limited observations. + +In addressing the above challenges, one common strategy is to represent the dynamic scenes as a combination of a static canonical field and a deformation model [11, 16, 17, 27, 33, 34, 37, 51, 53], whereas the bottleneck lies in representing the diverse and complex real-world 3D deformation. To represent geometrically consistent 3D deformation, the local geometric/structural information is critical, since the deformations of the objects in the real world are highly correlated to their 3D structures. Furthermore, the motions of the object points are deeply coupled with the motions of their neighboring points. Thus, how to incorporate the local geometric information to learn locally smooth and consistent 3D deformations becomes the research focus in DVS. + +Recently, different deformation models have utilized the local geometric information, but they all have their limitations. As shown in Fig. 1 a), originally in D-NeRF [37], the feature (positional encoding) of each sampled point is extracted independently with each other. Following works notice that this method could not handle the complex dynamic scene since the extracted features contain little information from neighboring points. In Fig. 1 b), interpolation is introduced to fuse features of neighboring grids. NDVG [16] and RoDynRF [27] gradually increase the voxel resolution so that the large voxel size could cover a larger area, introducing the local smoothness at the early stage of the training. However, this strategy has a limited cover range of local areas and cannot work at a later training stage. TiNeuVox [11] and SUDS [53] interpolate with multi-scales. Nevertheless, the interpolation operation is rather simple in extracting local geometric information and introduces un-smoothness and artifacts [3, 19]. + +In modling the nonrigid deformation, it is crucial to account for the consistency in the motion of local neighborhood. Since point-level MLP has a limited receptive field, which cannot capture the local geometric features of point clouds. To utilize the local geometric information effectively, we propose to use 3D sparse convolution. As shown in Fig. 1 c), building upon the recent explicit point cloud based Gaussian Splatting representation, we introduce a sparse convolution network to extract 3D geometry-aware features. Compared with simple feature interpolation, the convolutional neural network is superior in extracting local information and has a much larger reception field. Also, we treat the 3D Gaussian distributions as point clouds, which enable sparse 3D convolution for time and memory efficiency. Note that FDNeRF [17] uses a 3D U-Net to inpaint the missing area in the voxel grid. But this inpaint network is not used for deformation modeling, while the rendering speed and voxel resolution are also limited. + +Originally in Gaussian Splitting [21], the rotation parameter of each Gaussian is represented by quaternion. However, quaternion representation for rotation is discontinuous in parameter space for neural network learning [68]. We introduce the continuous 6D rotation [68] to ensure that the network learns a continuous function in the parameter space, which accurately represents the rotational states of each Gaussian at different time. + +Overall, our method mainly has two components: a Gaussian canonical field and a deformation field. The Gaussian canonical field consists of 3D Gaussian distributions and a geometry-aware feature learning network. The explicit 3D Gaussian distribution represents the geometry of the canonical scene, and the sparse 3D CNN network extracts local structural/geometric information for each Gaussian. The deformation field estimates a transformation for each Gaussian in the canonical field, which transfers the + +Gaussian from the canonical field to the given timestamp. Finally, we use 3D Gaussian splatting to render images for the given timestamp. + +Our main contributions are summarized as: + +- We propose a geometry-aware feature extraction network based on 3D Gaussian distribution to better utilize local geometric information. +- We propose to use continuous 6D rotation representation and modified density control strategy to adapt Gaussian splatting to dynamic scenes. +- Extensive experiments on both synthetic and real datasets show that our method surpasses competing methods by a wide margin. + +# 2. Related Work + +# 2.1. Novel View Synthesis + +Novel View Synthesis (NVS) is a well-known task in both computer vision and graphics [5, 8, 15, 22]. Surveys such as [43, 48, 49] provide comprehensive discussions. Explicit NVS methods generally reconstruct an explicit 3D model of a scene in the form of point clouds [1], voxels, or meshes [18, 41, 42, 50]. Once the geometry of the scene is represented, novel view images can be rendered from arbitrary viewpoints via manipulating the camera pose parameters. Other methods [9, 12, 20, 35, 41, 42, 59] tackle NVS by estimating depth maps using multi-view geometry, whereas the features are aggregated from co-visible frames. + +Neural Radiance Fields (NeRF) [30] is a groundbreaking approach that utilizes Multi-Layer Perceptrons (MLPs) to represent scenes implicitly. This methodology enables the modeling of a 5D radiance field, resulting in the impressive synthesis of views for static scenes. Numerous subsequent works expand the capabilities of NeRF by adapting it to various scenarios, such as handling larger and unbounded scenes [29, 40, 47, 58, 65], scene editing and relighting, [4, 45, 60, 67], [2, 3, 19], and improving the generalization ability [7, 52, 55, 64]. Meanwhile, researchers focus on achieving more efficient rendering and optimization in a NeRF-like framework. [6, 13, 24, 26, 28, 32, 36, 63] investigate efficient sampling methods along each ray for color accumulation, while [38, 39] partition the scene into multiple sub-regions as an efficient pre-processing, and [6, 13, 31, 46, 62] exploit voxel-grid representation to speed up the optimization. Very recently, [21] proposes to use 3D Guussian distribution to represent the scene, obtaining promising results. However, these methods are mainly applicable to static scenes, and fail in scenes with dynamic objects. + +# 2.2. Dynamic View Synthesis + +A recent trend in NVs is to extend the success in static NVs to dynamic NVs. One viable strategy is to construct a 4D spatial-temporal representation. Yoon et al. [61] combine + +![](images/49cd04cb0a0fd62e67e59d4951cca6cdb33605a04e9fe69fcb8e1d3203f5c73d.jpg) +Figure 2. The pipeline of our proposed 3D geometry-aware deformable Gaussian splitting. In the Gaussian canonical field, we reconstruct a static scene in canonical space using 3D Gaussian distributions. We extract positional features using an MLP, as well as local geometric features using a 3D U-Net, fused by another MLP to form the geometry-aware features. In the deformation field, taking the geometry-aware features and timestamp $t$ , an MLP estimates the 3D Gaussian deformation, which transfers the canonical 3D Gaussian distributions to timestamp $t$ . Finally, a rasterizer renders the transformed 3D Gaussian to images. + +single-view and multi-view depth to achieve NVS by 3D warping. Gao et al. [14] use a time-invariant model and a time-varying model to represent the static part and dynamic part of a scene, respectively, and use scene flow for motion modeling. NeRFlow [10] proposes a 4D spatial-temporal representation of a dynamic scene. Xian et al. [57] map a spatial-temporal location to the color and volume density by a 4D spatial-temporal radiance field. NSFF [23] represents a dynamic scene as a continuously changing function, encompassing various aspects of the scene, including appearance, geometry, and 3D scene motion. DCT-NeRF [54] uses the Discrete Cosine Transform (DCT) to replace the scene flow in NSFF [23] to enable smoother motion trajectories. HexPlane [6] and K-Plane [13] project 4D spatial-temporal space to multiple 2D planes. + +On the other hand, works such as [11, 16, 17, 27, 33, 34, 37, 44, 51, 53] decode the dynamic scene with a canonical field and a deformation field. Along this pipeline, D-NeRF [37] first proposes the canonical-based framework. However, the deformation network utilizes positional features with little geometry information, which cannot handle complex dynamic scenarios well. Nerfies [33] proposes a coarse-to-fine optimization method for coordinate-based models that allows for more robust optimization. HyperNeRF [34] lifts the canonical field into a higher dimensional space to handle topological changes. NDVG [16] and RoDynRF [27] gradually increase the voxel resolution, which has two benefits. TiNeuVox [11] and SUDS [53] interpolate the features with multi-scales for deformation learning. The multi-scales interpolation covers a larger reception field, which benefits modeling varying motions. + +Very recently, with the stunning debut of 3D Gaussian [21], some works introduce this point-based representation into their pipelines to synthesize high-fidelity images + +of a dynamic scene. Wu et al. [56] introduce a 4D Gaussian Splitting representation and utilize a deformation field to model both Gaussian motions and shape changes. However, the multi-scale HexPlane interpolation has limited ability in extracting the geometry information, which is still insufficient for modeling complex motions. The projection-based representation compresses the 3D space to 2D space, losing 3D geometric information for deformation learning. In contrast, our canonical-based method can fully leverage the 3D information in 3D space. + +# 3. Method + +In this section, we present our 3D geometry-aware deformable Gaussian Splating solution for dynamic view synthesis, where an overview of our pipeline is illustrated in Fig. 2. Given a set of images or monocular video of a dynamic scene with frames with corresponding time labels and known camera intrinsic and extrinsic parameters, our goal is to synthesize a novel view at any desired view at any desired time. Our method mainly consists of two core components: the Gaussian canonical field is used to learn the reconstruction of static scenes, while the deformation field is used to learn object deformation. First, we review the static 3D Gaussian splatting in Sec. 3.1. Then, we introduce the proposed Gaussian canonical field in Sec. 3.2, which consists of 3D Gaussian distributions and a geometry feature learning network. Next, in Sec. 3.3, we propose a 3D geometry-aware deformation field to learn transformations for given time steps, which transform our canonical 3D Gaussian distributions to corresponding times. In Sec. 3.4, we explain the process of rendering images from transformed 3D Gaussian distributions. Finally, we present our losses and density control modifications in Sec. 3.5. + +# 3.1. Preliminary + +3D-GS [21] represents the scene with sparse 3D Gaussians distributions. Each Gaussian has an anisotropic covariance $\Sigma \in \mathbb{R}^{3\times 3}$ and a mean value $\mu \in \mathbb{R}^3$ : + +$$ +\mathbf {G} (\mathbf {x}) = e ^ {- \frac {1}{2} (\mathbf {x} - \mu) ^ {\top} \boldsymbol {\Sigma} ^ {- 1} (\mathbf {x} - \mu)}. \tag {1} +$$ + +The covariance matrix $\pmb{\Sigma}$ can be decomposed into a scaling matrix $\mathbf{S} \in \mathbb{R}^{3 \times 3}$ and a rotation matrix $\mathbf{R} \in \mathrm{SO}(3)$ . This ensures that the covariance matrix is positive semi-definite, while reducing the learning difficulty of 3D Gaussians: + +$$ +\boldsymbol {\Sigma} = \mathbf {R} \mathbf {S} \mathbf {S} ^ {\top} \mathbf {R} ^ {\top}. \tag {2} +$$ + +To render an image from a designated viewpoint, the covariance matrix $\pmb{\Sigma}^{\prime}$ in camera coordinates can be calculated by giving a viewing transformation $\mathbf{W}$ , followed by [69]: + +$$ +\boldsymbol {\Sigma} ^ {\prime} = \mathbf {J} \mathbf {W} \boldsymbol {\Sigma} \mathbf {W} ^ {\top} \mathbf {J} ^ {\top}, \tag {3} +$$ + +where $\mathbf{J}$ is the Jacobian of the affine approximation of the projective transformation, and $\mathbf{W}$ is the world to camera transformation matrix. + +Each Gaussian is parameterized into the following attributes: position $\mathbf{x} \in \mathbb{R}^3$ , color defined by spherical harmonics coefficients $\mathbf{c} \in \mathbb{R}^k$ , rotation $\mathbf{r} \in \mathbb{R}^4$ , scale $\mathbf{s} \in \mathbb{R}^3$ , and opacity $o \in \mathbb{R}$ . Point-based $\alpha$ -blending and volumetric rendering like NeRF [30] essentially share the same image formation model for the splattering process. Specifically, the color $\mathbf{C}$ of each pixel is influenced by the related Gaussians: + +$$ +\mathbf {C} = \sum_ {i = 1} ^ {N} \mathbf {T} _ {i} \alpha_ {i} \mathbf {c} _ {i}, \tag {4} +$$ + +where $\alpha_{i}$ represents the density of the Gaussian point computed by a Gaussian with covariance $\pmb{\Sigma}$ multiplied by its opacity. + +# 3.2. Gaussian Canonical Field + +In this section, we first reconstruct a static scene in canonical space. Then, we propose a geometric branch, which enables geometry feature learning of the 3D Gaussian distributions for the subsequent deformation field. + +Gaussian parameters. Similar to 3D-GS [21], each Gaussian in the canonical space is characterized by position, color, scale, and opacity. Note that for rotation, we are inspired by [68] to use a continuous 6D rotation representation. Compared with the quaternion representation used in 3D-GS, the 6D rotation representation can benefit our method in estimating the deformation of each Gaussian from canonical space to time-space, especially in helping the neural networks to learn smooth rotation variation from time to time. Specifically, we set learnable parameter $[a_1,a_2]$ for each Gaussian to denote its rotation in canonical space, where $a_1$ and $a_2$ are the column vectors of three + +rows, respectively. They are initialized to $[100]^{\top}$ and $[010]^{\top}$ , corresponding precisely to the identity rotation matrix. The mapping from this 6D representation vector to SO(3) matrix is defined as [68]: + +$$ +f _ {\mathrm {V} 2 \mathrm {M}} \left(\left[ \begin{array}{c c} | & | \\ a _ {1} & a _ {2} \\ | & | \end{array} \right]\right) = \left[ \begin{array}{c c c} | & | & | \\ b _ {1} & b _ {2} & b _ {3} \\ | & | & | \end{array} \right], \tag {5} +$$ + +$$ +b _ {i} = \left[ \left\{ \begin{array}{c c} \mathcal {N} \left(a _ {1}\right) & \text {i f} i = 1 \\ \mathcal {N} \left(a _ {2} - \left(b _ {1} \cdot a _ {2}\right) b _ {1}\right) & \text {i f} i = 2 \\ b _ {1} \times b _ {2} & \text {i f} i = 3 \end{array} \right] ^ {\top}, \right. \tag {6} +$$ + +where $\mathcal{N}(\cdot)$ denotes a normalization function. “.” represents the inner product of a vector and “ $\times$ ” represents vector cross product. V2M in $f_{\mathrm{V2M}}$ means the transform from 6D vector to rotation matrix. + +Geometry feature learning. To capture the local geometric structure of the canonical scene, we regard the 3D Gaussian as the 3D point cloud, i.e., we only use the 3D coordinates of the 3D Gaussian. In order to handle a large number of point clouds, we leverage a simple two-branch structure: the geometric branch learns local features of point clouds across different receptive fields, while the identity branch preserves the independent point-level features at high resolution. By integrating the geometric branch and identity branch, we can efficiently obtain point-level features at high resolution while embedding the local geometric information of the point cloud. + +The geometric branch leverages the sparse convolution [25] on the sparse voxels to extract local geometric features at different receptive fields. Given the point cloud $\mathbf{P} \in \mathbb{R}^{N \times 3}$ , we first transform the high-resolution point clouds into low-resolution voxels by dividing the space through fixed grid size $s$ : + +$$ +\mathbf {V} = \operatorname {f l o o r} (\mathbf {P} / s), \tag {7} +$$ + +where the size of $\mathbf{V}$ is $M\times 3$ and $M$ is the number of voxels. Then, we construct a sparse 3D U-Net by stacking a set of sparse convolutions with a skip connection. Taking $\mathbf{V}$ as input, we perform sparse 3D U-Net to aggregate local features (dubbed as $\mathbf{F}_v\in \mathbb{R}^{M\times C}$ ) of the point clouds. + +The identity branch uses a multi-layer perception (MLP) to map the 3D coordinate of the point cloud into the embedding space (dubbed as $\mathbf{F}_p\in \mathbb{R}^{N\times C}$ ) to maintain the independence of point features. To accurately characterize the local geometric structure of the canonical scene, we fuse the voxel features with local information onto point features. Specifically, we transform the voxel feature $\mathbf{F}_v$ back to the corresponding points to obtain point-level features $\mathbf{F}_p^{\prime}\in \mathbb{R}^{N\times C}$ by assigning the voxel features to the corresponding points within it. Finally, we concatenate $\mathbf{F}_p^{\prime}$ + +and $\mathbf{F}_p$ to obtain the fused point-level feature followed by an MLP layer as: + +$$ +\mathbf {F} _ {\text {f u s e}} = \operatorname {M L P} \left(\operatorname {C o n c a t} \left(\mathbf {F} _ {p} ^ {\prime}, \mathbf {F} _ {p}\right)\right). \tag {8} +$$ + +# 3.3. Deformation Field + +In this section, we propose a deformation field that estimates the deformation of each 3D Gaussian in the canonical space based on a given time $t$ . + +Deformation estimation. We adopt an MLP as the decoder $\mathcal{G}_{\Phi}$ , which takes the geometry feature learned from the geometry branch in the Gaussian canonical field, the position of each Gaussian, and timestamp as input, outputs the deformation of each Gaussian from canonical space to time $t$ , including position deformation $\Delta \mathbf{x}_{t} \in \mathbb{R}^{3}$ , rotation deformation $\Delta \mathbf{r}_{t} \in \mathbb{R}^{6}$ and scale deformation $\Delta \mathbf{s}_{t} \in \mathbb{R}^{3}$ : + +$$ +\Delta \mathbf {x} _ {\mathbf {t}}, \Delta \mathbf {r} _ {\mathbf {t}}, \Delta \mathbf {s} _ {\mathbf {t}} = \mathcal {G} _ {\Phi} (\mathbf {F} _ {\text {f u s e}}, \gamma (\mathbf {x}), \gamma (t)), \tag {9} +$$ + +where $\gamma (\cdot)$ denotes the positional encoding in NeRF [30], which maps a one dimension signal from $\mathbb{R}$ into a higher dimensional space $\mathbb{R}^{2L}$ : + +$$ +\gamma (p) = (\sin \left(2 ^ {0} \pi p\right), \cos \left(2 ^ {0} \pi p\right), +$$ + +$$ +\dots , \tag {10} +$$ + +$$ +\sin \left(2 ^ {L - 1} \pi p\right), \cos \left(2 ^ {L - 1} \pi p)\right). +$$ + +Note that we set the color parameters $\mathbf{c}$ and opacity $o$ of canonical 3D Gaussian distributions constant over time. These two factors are highly related to the physical properties of the Gaussian distributions, and we want each distribution to represent the same object area over the timeline. + +Transformation. Using the estimated deformation for time $t$ above, we could transform the 3D Gaussian distributions to current time by + +$$ +\mathbf {x} _ {t} = \mathbf {x} + \Delta \mathbf {x} _ {\mathbf {t}}, +$$ + +$$ +\mathbf {s} _ {t} = \mathbf {s} + \Delta \mathbf {s} _ {\mathbf {t}}, \tag {11} +$$ + +$$ +\mathbf {r} _ {t} = f _ {\mathrm {V 2 M}} \left(\Delta \mathbf {r} _ {\mathbf {t}}\right) \times f _ {\mathrm {V 2 M}} (\mathbf {r}). +$$ + +# 3.4. Rasterization + +Once we have completed preparing the attributes of each Gaussian $(\mathbf{x}_t,\mathbf{c},\mathbf{r}_t,\mathbf{s}_t,o)$ , we use the differentiable tile rasterizer [21] to render the image at any desired viewpoint at this timestamp: + +$$ +\hat {\mathbf {C}} _ {t} = \text {R a s t e r i z e r} (\mathbf {x} _ {t}, \mathbf {c}, \mathbf {r} _ {t}, \mathbf {s} _ {t}, o, \mathbf {K}, [ \mathbf {R} | \mathbf {T} ]), \tag {12} +$$ + +where $\mathbf{K}$ and $[\mathbf{R}|\mathbf{T}]$ represent the camera's intrinsic and extrinsic parameters, respectively. + +![](images/1cba156c55a86ae2a58373e8d3960276ff0137c542d3109f10ea1c71d597f12d.jpg) +Figure 3. Our density control is designed for dynamic scenes. We control the densification of Gaussian distributions according to their transformed parameters at timestamp $t$ rather than parameters at canonical space. + +# 3.5. Optimization + +To optimize the model, we use the photometric loss, and a motion loss, and also adapt the density control from 3D-GS [21] with our modifications. + +Photometric loss. The photometric loss consists of the $L_{1}$ loss and structural similarity loss $L_{D - SSIM}$ between the rendered image $\hat{\mathbf{C}}_t$ and ground truth image $\mathbf{C}_t$ . + +$$ +L _ {\text {p h o t o}} = (1 - \lambda) L _ {1} + \lambda L _ {D - S S I M}. \tag {13} +$$ + +Regularization. We accept the fact that in a scene, the proportion of dynamic points is much smaller than that of static points, and the motion amplitude at dynamic points is not too large. In other words, the point in a scene should be as static as possible, + +$$ +L _ {\text {m o t i o n}} = \left\| \Delta \mathbf {x} _ {\mathbf {t}} \right\| _ {1}. \tag {14} +$$ + +Total loss. The total loss we used is defined as follows, + +$$ +L = L _ {\text {p h o t o}} + \omega L _ {\text {m o t i o n}}, \tag {15} +$$ + +where $\omega$ is a trade-off parameter to balance the components. Density control. 3D-GS has shown that adaptive density control is essential in achieving high rendering performance. On the one hand, the Gaussians need to populate empty areas without geometric features. Thus, it simply creates a copy of the Gaussian for under-reconstructed regions. On the other hand, large Gaussians in regions with high variance need to be split into smaller Gaussians. We implement our method like 3D-GS but replace such Gaussians with two new ones, divide their scale by a factor of $\phi = 1.6$ , and initialize their position by using the original 3D Gaussian as a PDF for sampling. + +Our method differs from 3D-GS in the following aspects. For 3D-GS, there only exists sets of Gaussians. However, in our case, we initialize the Gaussians in the canonical space, then estimate the deformations of these Gaussians, + +Table 1. Quantitative comparison between our method and competing methods on the D-NeRF dataset. The best results are highlighted in bold. + +
MethodHell WarriorMutantHookBouncing Balls
PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
3D-GS [21]15.39240.87760.130021.75540.93590.057518.69330.87330.114422.55750.94850.0647
D-NeRF [37]25.02930.95060.069131.29000.97390.026829.25670.96500.117438.93000.99000.1031
TiNeuVox-B[11]28.20580.96610.063133.90290.97710.030131.79290.97180.043640.85360.99130.0401
NDVG [16]26.49330.96000.067034.41310.98010.027030.00090.96260.046337.51570.98740.0751
FDNeRF [17]27.71200.96650.050834.97270.98100.031232.28670.97560.038840.01910.99120.0395
4D-GS [56]28.11960.97300.027638.34110.99360.006233.15600.98100.016840.74180.99410.0105
Ours32.27120.98350.016441.42840.99690.002936.96470.99160.007643.59290.99600.0061
MethodLegoT-RexStand UpJumping Jacks
PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
3D-GS [21]23.09910.93290.056725.74960.95670.047419.37790.92000.090920.71630.92270.0980
D-NeRF [37]21.64270.83940.165431.75680.97670.039632.79920.98180.021532.80310.98100.0373
TiNeuVox-B[11]25.17480.92170.068932.77500.97830.030736.20310.98590.019934.73900.98230.0328
NDVG [16]25.04160.93950.053432.62290.97810.033033.21580.97930.030231.25300.97370.0398
FDNeRF [17]25.27000.93900.046030.70680.97310.036836.91070.98780.018833.55210.98120.0329
4D-GS [56]25.40240.94340.037733.39120.98690.013038.26100.99230.007135.66560.98820.0159
Ours25.44110.94740.032939.02850.99520.005242.21010.99660.002837.96040.99280.0088
+ +Table 2. Quantitative comparison between our method and competing methods on the HyperNeRF dataset. The best results are highlighted in bold. + +
MethodChicken3D PrinterBroomPeel Banana
PSNR↑MS-SSIM↑PSNR↑MS-SSIM↑PSNR↑MS-SSIM↑PSNR↑MS-SSIM↑
TiNeuVox[11]28.28610.947422.75140.839221.26820.683224.51360.8743
NDVG [16]27.05360.939022.41960.838921.46580.702822.82040.8279
FDNeRF [17]27.96270.943822.80270.845321.90910.715424.25150.8645
3D-GS [21]20.89150.742618.39910.611420.39530.659820.56540.8094
Ours28.53420.933122.04030.809820.89940.524125.57850.9067
+ +Table 3. Quantitative comparison on HyperNeRF dataset: Average on Cut Lemon, Chicken, 3D Printer, and Split Cookie. The best results are highlighted in bold. + +
MethodPSNR↑SSIM↑LPIPS↓
TiNeuVox-B [11]27.160.760.40
3D-GS [21]21.260.690.40
4D-GS [56]26.980.780.31
Ours27.520.800.25
+ +and transform their attributes into a timestamp space. As shown in Fig. 3, we use the Gaussians at the current moment to render the image. Therefore, we determine whether the Gaussians need to conduct density control by the current attributes (like scale) at the current timestamp rather than the canonical attributes. Afterward, we inverse the transformation of the split/cloned Gaussian back to the canonical space. + +# 4. Experiments + +# 4.1. Dataset + +In the paper, we use both synthetic and real datasets for evaluating our method. The synthetic dataset D-NeRF [37] contains 8 dynamic scenes, including Hell Warrior, Mutant, Hook, Bouncing Balls, Lego, T-Rex, Stand Up, and Jumping Jacks. The real dataset proposed by HyperNeRF [34], including interp-cut-lemon, interp-cut-lemon1, vrig + +chicken, vrig-3dprinter, misc-split-codon, and misc-split-codon. Following previous works [21], we report three evaluation metrics, including Peak Signal-to-Noise Ratio (PSNR), Structural Similarity (SSIM), and Learned Perceptual Image Patch Similarity (LPIPS) [66]. + +# 4.2. Implementation Details + +Our implementation is based on 3D-GS [21]. We trained a total of 40000 iterations, with the first 3000 iterations only optimizing static scenes, and then adding deformation fields to optimize dynamic scenes. The learning rate of our network takes an exponential decay from 8e-4 to 1.6e-6 with the Adam optimizer. Moreover, we use a 2-layer MLP with a width of 64 for the front point feature extraction, and a 3-layer MLP with a width of 64 for the back point feature fusion. Then 5 layers MLP with width 256 and skip connection is used for a decoder. For the positional encoding process, we use $L = 10$ for position $x$ and $L = 6$ for timestep $t$ . For the D-NeRF dataset, which does not provide point clouds, we randomly initialize 150000 points. Meanwhile, for the HyperNeRF dataset, we use the point cloud provided in its dataset as the initial point cloud. All the experiments are tested on a single RTX 4090 GPU. + +# 4.3.Quantitative Results + +Synthetic scenes. We compare our method with recent state-of-the-art methods in the field, including 3D-GS, D- + +![](images/f5efbbc16829caa1ec54d7b4789f1e37fcdb9ec9b26faaf1c358057615b89fd5.jpg) +Figure 4. Qualitative comparisons between baselines and our method on the synthetic dataset. + +![](images/4950b47e09ddea53fa8179bb3b9514c33d4e5b5c3a3fcef84eac255dbab450b5.jpg) +Figure 5. Qualitative comparisons between baselines and our method on the HyperNeRF real dataset[34]. + +NeRF, TiNeuVox, NDVG, FDNeRF, and 4D-GS on the D-NeRF Dataset. As shown in Table 1, we list the results of each scene. It can be observed that our method is significantly better than other methods in terms of all three metrics for physical canonical-based methods. On average, our method significantly improves PSNR compared with static Gaussian, 3D-GS. The computational costs are: training time around 2h (avg. on D-NeRF dataset), render FPS 12 (fixed viewpoint), model size (34MB points cloud + 14MB network). Since it inherently cannot model the deformation of the dynamic scene, 3D-GS performs poorly in dynamic view synthesis. + +Real scenes. We further compare our method with some highly related works on the real scene dataset proposed by [34]. We have shown the detailed results on chicken, 3D printer, broom, and peel banana in Table 2, and the average result on cut lemon, chicken, 3d printer, split cookie in Table 3. It can be observed that our method achieves good performance compared with other state-of-the-art methods. Compared with synthetic datasets, real datasets are more challenging due to the narrow camera viewing range and pose ambiguity. The quantitative results can demonstrate the effectiveness of the proposed method in real scenes. + +# 4.4. Visualization Results + +Visual comparison. In addition to quantitative results, we also provide visualization results of different methods to demonstrate the superiority of our method. For better comparison, we show the rendered images of each synthetic scene from the same viewpoint in Fig. 4. By comparing the visualization results of different methods, it is shown that the rendered images by our method are closer to the ground truth images, indicating that our method can recover accurate and detailed images. In addition, we provide visualization results of the real scenes in Fig. 5. Compared with TiNueVox [11], our method can recover the detailed structure of dynamic objects, like chicken and banana. + +Gaussian visualization. To verify the effectiveness of our method, we show the 3D point cloud of the 3D Gaussian. Specifically, we only use the 3D coordinates of the 3D Gaussian. As shown in Fig. 7, we provide the point clouds of different methods on the synthetic dataset, including 3D-GS [21], 4D-GS [56], and ours. Note that the color of the point cloud is generated by 3D coordinates. Since 3D-DS cannot model dynamic scenes, the quality of the point cloud is poor. Comparing 4D-GS with ours, it can be observed + +![](images/4ee70ea5442e870eb0d598aa233800289e86d78c0eec50272768f1e9612b22a6.jpg) +Figure 6. Visualization of learned geometry-aware features. + +![](images/40331151a924f8e95ea8f134904708f6dd605e56b16dbaf478bfaf1fa4766a3d.jpg) + +![](images/08e57af452a8fb5489e8c7008a2d47bb5ead4a5dcd47c06b063fbfd78edcc54d.jpg) + +![](images/c36374877d414d5c0a500370084b52c0365a1cd37b2217e3775ac3f98196002a.jpg) +Figure 7. Visualization of learned Gaussian. Colored with position coordinates + +![](images/d1595a23c127bab518436e79dcb2d0262f38899d2fe05de461989ccedff79551.jpg) + +![](images/c381318dca20e7e09894c7b90be8438e9b0fd7baf87da7f2b03908b238ce2353.jpg) + +that the point cloud of our method has a clear local geometric structure. + +# 4.5. Ablation Study + +We conduct ablation studies on the synthetic dataset $(800 \times 800)$ to verify the effectiveness of our proposed components. In Table 4, vanilla model is a simple MLP model without our components. + +Effect of geometric-aware features. To learn the geometric information of the object in our Gaussian canonical field, we voxelize the 3D Gaussian distributions and extract geometric aware features using our 3D U-Net. To demonstrate the effectiveness of this design, we test our method with geometric branch blocks and leave others unchanged. In Table 4, ours full has a clear advantage over w/o geo. feat., and our geometry branch plays the most important role among the components studied in the ablations. + +In Fig. 6, we visualize the learned geometric-aware features. We color the point clouds with the learned features, and it shows meaningful geometric information. Interestingly, we can see an obvious difference in the learned features between the moving objects (bucket of the lego and the t-rex body) and the static objects (body of the lego and the ground in t-rex). Also, our geometric-aware features reflect the local geometric structure. For example, the spines of the bones on the t-rex tail have similar features, and the smooth part of the tail bones have other patterns. + +Different geometric features. We use the PointNet-like architecture and plane projection (2D CNN) to conduct ex + +Table 4. Ablation Study. Ablation studys in terms of average PSNR, SSIM, and LPIPS. The best results are highlighted in bold. + +
MethodPSNR↑SSIM↑LPIPS↓
w/o geo. feat.37.57570.98410.0173
w/o 6D rotation37.87500.98510.0154
canonical DC37.80260.98470.0166
vanilla35.23070.97930.0242
PointNet feat.36.73530.98260.0184
Plane feat.35.90540.98110.0212
ours full38.01340.98530.0153
+ +periments. Compared with the results (dubbed as "PointNet feat." and "Plane feat.") in Table 4, it can be observed that our method achieves significant performance gains. + +6D representation. To study the effect of 6D representation of the rotation parameters of the 3D Gaussian, we conduct an experiment that replaces the 6D vector with quaternion $\mathbf{q}$ which is used in the original 3D-GS. To deform the 3D Gaussian in canonical space, our deformation field estimates a $\Delta \mathbf{q}_{\mathrm{t}}$ and gets $\mathbf{q}_{\mathrm{t}} = \mathbf{q} + \Delta \mathbf{q}_{\mathrm{t}}$ , using the quaternion add operation. In Table 4, quaternion demonstrates an obvious performance drop, which proves the effectiveness of the 6D representation. + +Density control. In terms of density control, we test the setting that only uses the 3D Gaussian in canonical space without considering the transform 3D Gaussian at other timestamps. In Table 4, canonical DC shows a performance drop, as the canonical 3D Gaussian alone cannot reflect the over/under reconstruction information at all timestamps for dynamic scenes. + +# 5. Conclusion + +In this paper, we have proposed a 3D geometry aware Gaussian Splatting solution for dynamic view synthesis. We addressed the limitations of existing approaches from two perspectives: 1) we introduced 3D sparse convolution to extract local structural information effectively and efficiently for deformation learning, and 2) we represented the dynamic scenes as a collection of deforming 3D Gaussian distributions, which are optimized to deform (move, rotate, scaling) over time. Experimental results across synthetic and real datasets demonstrate the superiority of our solution in dynamic view synthesis and 3D reconstruction. We plan to further investigate explicit motion modeling by exploiting the foreground and background motion segmentation cues. + +# Acknowledgments + +We thank the area chairs and the reviewers for their insightful and positive feedback. This work was supported in part by the National Science Fund of China (Grant Nos. 62271410, 62306238) and the Fundamental Research Funds for the Central Universities. + +# References + +[1] Kara-Ali Aliev, Artem Sevastopolsky, Maria Kolos, Dmitry Ulyanov, and Victor Lempitsky. Neural point-based graphics. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2 +[2] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-NeRF: A multiscale representation for antiailiasing neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[3] Jonathan T. Barron, Ben Mildenhall, Dor Verbin, Pratul P. Srinivasan, and Peter Hedman. Zip-NeRF: Anti-aliased grid-based neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 2 +[4] Mark Boss, Raphael Braun, Varun Jampani, Jonathan T Barron, Ce Liu, and Hendrik Lensch. NeRD: Neural reflectance decomposition from image collections. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[5] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the Conference on Computer Graphics and Interactive Techniques, 2001. 2 +[6] Ang Cao and Justin Johnson. HexPlane: A fast representation for dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3 +[7] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. MVSNeRF: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[8] Shenchang Eric Chen and Lance Williams. View interpolation for image synthesis. In Proceedings of the Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 1993. 2 +[9] Inchang Choi, Orazio Gallo, Alejandro Troccoli, Min H Kim, and Jan Kautz. Extreme view synthesis. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019. 2 +[10] Yilun Du, Yinan Zhang, Hong-Xing Yu, Joshua B Tenenbaum, and Jiajun Wu. Neural radiance flow for 4d view synthesis and video processing. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 3 +[11] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In Proceedings of the Conference on Computer Graphics and Interactive Techniques in Asia (SIGGRAPH ASIA), 2022. 1, 2, 3, 6, 7 +[12] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. DeepStereo: Learning to predict new views from the world's imagery. In Proceedings of the IEEE Conference + +on Computer Vision and Pattern Recognition (CVPR), 2016. 2 +[13] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbaek Warburg, Benjamin Recht, and Angjoo Kanazawa. K-Planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3 +[14] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 3 +[15] Ned Greene. Environment mapping and other applications of world projections. IEEE Computer Graphics and Applications, 1986. 2 +[16] Xiang Guo, Guanying Chen, Yuchao Dai, Xiaqing Ye, Jiadai Sun, Xiao Tan, and Errui Ding. Neural deformable voxel grid for fast optimization of dynamic view synthesis. In Proceedings of the Asian Conference on Computer Vision (ACCV), 2022. 1, 2, 3, 6 +[17] Xiang Guo, Jiadai Sun, Yuchao Dai, Guanying Chen, Xiaoqing Ye, Xiao Tan, Errui Ding, Yumeng Zhang, and Jingdong Wang. Forward flow for novel view synthesis of dynamic scenes. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 1, 2, 3, 6 +[18] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (TOG), 2018. 2 +[19] Wenbo Hu, Yuling Wang, Lin Ma, Bangbang Yang, Lin Gao, Xiao Liu, and Yuewen Ma. Tri-MipRF: Tri-mip representation for efficient anti-aliasing neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 2 +[20] Nima Khademi Kalantari, Ting-Chun Wang, and Ravi Ramamoorthi. Learning-based view synthesis for light field cameras. ACM Transactions on Graphics (TOG), 2016. 2 +[21] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3D Gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (TOG), 2023. 1, 2, 3, 4, 5, 6, 7 +[22] Marc Levoy and Pat Hanrahan. Light field rendering. In Proceedings of the Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 1996. 2 +[23] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3 +[24] David B Lindell, Julien NP Martel, and Gordon Wetzstein. AutoInt: Automatic integration for fast neural volume rendering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[25] Baoyuan Liu, Min Wang, Hassan Foroosh, Marshall Tappen, and Marianna Pensky. Sparse convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015. 4 +[26] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. In Proceed- + +ings of the Advances in Neural Information Processing Systems (NeurIPS), 2020. 2 +[27] Yu-Lun Liu, Chen Gao, Andreas Meuleman, Hung-Yu Tseng, Ayush Saraf, Changil Kim, Yung-Yu Chuang, Johannes Kopf, and Jia-Bin Huang. Robust dynamic radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 3 +[28] Stephen Lombardi, Tomas Simon, Gabriel Schwartz, Michael Zollhoefer, Yaser Sheikh, and Jason Saragih. Mixture of volumetric primitives for efficient neural rendering. ACM Transactions on Graphics (TOG), 2021. 2 +[29] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural radiance fields for unconstrained photo collections. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[30] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 1, 2, 4, 5 +[31] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (TOG), 2022. 2 +[32] Thomas Neff, Pascal Stadlbauer, Mathias Parger, Andreas Kurz, Joerg H. Mueller, Chakravarty R. Alla Chaitanya, Anton S. Kaplanyan, and Markus Steinberger. DONeRF: Towards real-time rendering of compact neural radiance fields using depth oracle networks. Computer Graphics Forum (CGF), 2021. 2 +[33] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 1, 3 +[34] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. HyperNeRF: A higher-dimensional representation for topologically varying neural radiance fields. ACM Transactions on Graphics (TOG), 2021. 1, 3, 6, 7 +[35] Eric Penner and Li Zhang. Soft 3D reconstruction for view synthesis. ACM Transactions on Graphics (TOG), 2017. 2 +[36] Martin Piala and Ronald Clark. TermiNeRF: Ray termination prediction for efficient neural rendering. In Proceedings of the International Conference on 3D Vision (3DV), 2021. 2 +[37] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1, 2, 3, 6 +[38] Daniel Rebain, Wei Jiang, Soroosh Yazdani, Ke Li, Kwang Moo Yi, and Andrea Tagliasacchi. DeRF: Decomposed radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 + +[39] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. KiloNeRF: Speeding up neural radiance fields with thousands of tiny MLPs. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[40] Konstantinos Rematas, Andrew Liu, Pratul P. Srinivasan, Jonathan T. Barron, Andrea Tagliasacchi, Thomas Funkhouser, and Vittorio Ferrari. Urban radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[41] Gernot Riegler and Vladlen Koltun. Free view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2 +[42] Gernot Riegler and Vladlen Koltun. Stable view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[43] Harry Shum and Sing Bing Kang. Review of image-based rendering techniques. In Visual Communications and Image Processing (VCIP), 2000. 2 +[44] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. NeRFPlayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics (TVCG), 2023. 3 +[45] Pratul P Srinivasan, Boyang Deng, Xiuming Zhang, Matthew Tancik, Ben Mildenhall, and Jonathan T Barron. NeRV: Neural reflectance and visibility fields for relighting and view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[46] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct Voxel Grid Optimization: Super-fast convergence for radiance fields reconstruction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[47] Matthew Tancik, Vincent Casser, Xinchen Yan, Sabeek Pradhan, Ben Mildenhall, Pratul P Srinivasan, Jonathan T Barron, and Henrik Kretzschmar. Block-NeRF: Scalable large scene neural view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[48] Ayush Tewari, Ohad Fried, Justus Thies, Vincent Sitzmann, Stephen Lombardi, Kalyan Sunkavalli, Ricardo MartinBrualla, Tomas Simon, Jason Saragih, Matthias Nießner, et al. State of the art on neural rendering. In Computer Graphics Forum (CGF), 2020. 2 +[49] Ayush Tewari, O Fried, J Thies, V Sitzmann, S Lombardi, Z Xu, T Simon, M Nießner, E Tretschk, L Liu, et al. Advances in neural rendering. In Proceedings of the Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 2021. 2 +[50] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 2019. 2 +[51] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Pro + +ceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 1, 3 +[52] Alex Trevithick and Bo Yang. GRF: Learning a general radiance field for 3D representation and rendering. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[53] Haithem Turki, Jason Y Zhang, Francesco Ferroni, and Deva Ramanan. SUDS: Scalable urban dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 3 +[54] Chaoyang Wang, Ben Eckart, Simon Lucey, and Orazio Gallo. Neural trajectory fields for dynamic novel view synthesis. arXiv preprint arXiv:2105.05994, 2021. 3 +[55] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P Srinivasan, Howard Zhou, Jonathan T Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas Funkhouser. IBRNet: Learning multi-view image-based rendering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[56] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Xinggang Wang. 4D Gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 3, 6, 7 +[57] Wenqi Xian, Jia-Bin Huang, Johannes Kopf, and Changil Kim. Space-time neural irradiance fields for free-viewpoint video. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3 +[58] Yuanbo Xiangli, Linning Xu, Xingang Pan, Nanxuan Zhao, Anyi Rao, Christian Theobalt, Bo Dai, and Dahua Lin. BungeeNeRF: Progressive neural radiance field for extreme multi-scale scene rendering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[59] Zexiang Xu, Sai Bi, Kalyan Sunkavalli, Sunil Hadap, Hao Su, and Ravi Ramamoorthi. Deep view synthesis from sparse photometric images. ACM Transactions on Graphics (TOG), 2019. 2 +[60] Wenqi Yang, Guanying Chen, Chaofeng Chen, Zhenfang Chen, and Kwan-Yee K Wong. S $^3$ -NeRF: Neural reflectance field from shading and shadow under a single viewpoint. In Proceedings of the Advances in Neural Information Processing Systems (NeurIPS), 2022. 2 +[61] Jae Shin Yoon, Kihwan Kim, Orazio Gallo, Hyun Soo Park, and Jan Kautz. Novel view synthesis of dynamic scenes with globally coherent depths from a monocular camera. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[62] Alex Yu, Sara Fridovich-Keil, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[63] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[64] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. + +In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[65] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. NeRF++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2 +[66] Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 6 +[67] Xiuming Zhang, Pratul P Srinivasan, Boyang Deng, Paul Debevec, William T Freeman, and Jonathan T Barron. NeR-Factor: Neural factorization of shape and reflectance under an unknown illumination. ACM Transactions on Graphics (TOG), 2021. 2 +[68] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 4 +[69] M. Zwicker, H. Pfister, J. van Baar, and M. Gross. Ewa volume splatting. In Proceedings of IEEE Visualization (VIS), 2001. 4 \ No newline at end of file diff --git a/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/images.zip b/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..9ee5bfad9c0211d216646cebcbf569563f5e57a9 --- /dev/null +++ b/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f407287707d20fc0f1151aeaf37e32f5aae29e3b5d4c8ac09716247d5a413937 +size 705487 diff --git a/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/layout.json b/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..335be0efe23ccfaba2a056a543ac62872e92446b --- /dev/null +++ b/2024/3D Geometry-Aware Deformable Gaussian Splatting for Dynamic View Synthesis/layout.json @@ -0,0 +1,9466 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 47, + 103, + 545, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 103, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 47, + 103, + 545, + 120 + ], + "type": "text", + "content": "3D Geometry-aware Deformable Gaussian Splitting for Dynamic View Synthesis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "spans": [ + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "text", + "content": "Zhicheng Lu" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "text", + "content": ", Xiang Guo" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "text", + "content": ", Le Hui" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "text", + "content": ", Tianrui Chen" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "text", + "content": ", Min Yang" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "text", + "content": ", Xiao Tang" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "text", + "content": ", Feng Zhu" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "text", + "content": ", Yuchao Dai" + }, + { + "bbox": [ + 147, + 142, + 444, + 171 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 171, + 454, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 171, + 454, + 185 + ], + "spans": [ + { + "bbox": [ + 140, + 171, + 454, + 185 + ], + "type": "text", + "content": "1Northwestern Polytechnical University 2Samsung R&D Institute" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 156, + 187, + 436, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 187, + 436, + 198 + ], + "spans": [ + { + "bbox": [ + 156, + 187, + 436, + 198 + ], + "type": "text", + "content": "{zhichenglu, guoxiang, cherryxchen}@mail.nwpu.edu.cn" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 87, + 201, + 502, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 201, + 502, + 213 + ], + "spans": [ + { + "bbox": [ + 87, + 201, + 502, + 213 + ], + "type": "text", + "content": "{daiyuchao, huile}@nwpu.edu.cn {min16.yang, xiao1.tang, f15.zhu}@samsung.com" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 241, + 191, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 241, + 191, + 253 + ], + "spans": [ + { + "bbox": [ + 143, + 241, + 191, + 253 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 266, + 290, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 290, + 542 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 290, + 542 + ], + "type": "text", + "content": "In this paper, we propose a 3D geometry-aware deformable Gaussian Splatting method for dynamic view synthesis. Existing neural radiance fields (NeRF) based solutions learn the deformation in an implicit manner, which cannot incorporate 3D scene geometry. Therefore, the learned deformation is not necessarily geometrically coherent, which results in unsatisfactory dynamic view synthesis and 3D dynamic reconstruction. Recently, 3D Gaussian Splatting provides a new representation of the 3D scene, building upon which the 3D geometry could be exploited in learning the complex 3D deformation. Specifically, the scenes are represented as a collection of 3D Gaussian, where each 3D Gaussian is optimized to move and rotate over time to model the deformation. To enforce the 3D scene geometry constraint during deformation, we explicitly extract 3D geometry features and integrate them in learning the 3D deformation. In this way, our solution achieves 3D geometry-aware deformation modeling, which enables improved dynamic view synthesis and 3D dynamic reconstruction. Extensive experimental results on both synthetic and real datasets prove the superiority of our solution, which achieves new state-of-the-art performance. The project is available at https://npucvr.github.io/GaGS/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 567, + 127, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 567, + 127, + 578 + ], + "spans": [ + { + "bbox": [ + 47, + 567, + 127, + 578 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 586, + 287, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 586, + 287, + 683 + ], + "spans": [ + { + "bbox": [ + 46, + 586, + 287, + 683 + ], + "type": "text", + "content": "Dynamic View Synthesis (DVS) aims at rendering novel photorealistic views at arbitrary viewpoints and any input time step given a monocular video of a dynamic scene, which has broad applications in virtual reality and augmented reality. Recently, empowered with effective representations such as neural radiance fields (NeRF) [30] and Gaussian Splitting [21], novel view synthesis for static scenes has been greatly advanced. However, this success" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 310, + 242, + 544, + 388 + ], + "blocks": [ + { + "bbox": [ + 310, + 242, + 544, + 388 + ], + "lines": [ + { + "bbox": [ + 310, + 242, + 544, + 388 + ], + "spans": [ + { + "bbox": [ + 310, + 242, + 544, + 388 + ], + "type": "image", + "image_path": "905bce9a44d2ccb907b545a37b4c00b1f27925f14301d96bd815439e515ac8cd.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 392, + 545, + 480 + ], + "lines": [ + { + "bbox": [ + 305, + 392, + 545, + 480 + ], + "spans": [ + { + "bbox": [ + 305, + 392, + 545, + 480 + ], + "type": "text", + "content": "Figure 1. Geometric information exploited by different methods. a) Early dynamic NeRF methods such as DNeRF[37] directly encode the coordinate " + }, + { + "bbox": [ + 305, + 392, + 545, + 480 + ], + "type": "inline_equation", + "content": "\\mathbf{p}" + }, + { + "bbox": [ + 305, + 392, + 545, + 480 + ], + "type": "text", + "content": " of the sample point as input feature for deformation network. b) Interpolation is used to fuse features from neighbouring grids and multiscale interpolation enhances the local geometry information [11, 16, 27, 53]. c) We propose to voxelize a set of Gaussian distributions and use a sparse convolution network to extract geometry-aware features for deformation learning." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 485, + 545, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 485, + 545, + 556 + ], + "spans": [ + { + "bbox": [ + 304, + 485, + 545, + 556 + ], + "type": "text", + "content": "cannot be extended to its dynamic counterpart directly. This is mainly due to the difficulty in modeling and representing the scene deformation. Due to the inherent motion/shape ambiguity in monocular dynamic 3D representation, dynamic scene modeling and synthesis are more challenging, especially for monocular video with limited observations." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 558, + 546, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 546, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 546, + 712 + ], + "type": "text", + "content": "In addressing the above challenges, one common strategy is to represent the dynamic scenes as a combination of a static canonical field and a deformation model [11, 16, 17, 27, 33, 34, 37, 51, 53], whereas the bottleneck lies in representing the diverse and complex real-world 3D deformation. To represent geometrically consistent 3D deformation, the local geometric/structural information is critical, since the deformations of the objects in the real world are highly correlated to their 3D structures. Furthermore, the motions of the object points are deeply coupled with the motions of their neighboring points. Thus, how to incorporate the local geometric information to learn locally smooth and consistent 3D deformations becomes the research focus in DVS." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 693, + 129, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 693, + 129, + 703 + ], + "spans": [ + { + "bbox": [ + 58, + 693, + 129, + 703 + ], + "type": "text", + "content": "*Equal contributions." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 59, + 703, + 138, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 703, + 138, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 703, + 138, + 712 + ], + "type": "text", + "content": "† Corresponding authors." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8900" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 288 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 288 + ], + "type": "text", + "content": "Recently, different deformation models have utilized the local geometric information, but they all have their limitations. As shown in Fig. 1 a), originally in D-NeRF [37], the feature (positional encoding) of each sampled point is extracted independently with each other. Following works notice that this method could not handle the complex dynamic scene since the extracted features contain little information from neighboring points. In Fig. 1 b), interpolation is introduced to fuse features of neighboring grids. NDVG [16] and RoDynRF [27] gradually increase the voxel resolution so that the large voxel size could cover a larger area, introducing the local smoothness at the early stage of the training. However, this strategy has a limited cover range of local areas and cannot work at a later training stage. TiNeuVox [11] and SUDS [53] interpolate with multi-scales. Nevertheless, the interpolation operation is rather simple in extracting local geometric information and introduces un-smoothness and artifacts [3, 19]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 289, + 289, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 289, + 289, + 505 + ], + "spans": [ + { + "bbox": [ + 47, + 289, + 289, + 505 + ], + "type": "text", + "content": "In modling the nonrigid deformation, it is crucial to account for the consistency in the motion of local neighborhood. Since point-level MLP has a limited receptive field, which cannot capture the local geometric features of point clouds. To utilize the local geometric information effectively, we propose to use 3D sparse convolution. As shown in Fig. 1 c), building upon the recent explicit point cloud based Gaussian Splatting representation, we introduce a sparse convolution network to extract 3D geometry-aware features. Compared with simple feature interpolation, the convolutional neural network is superior in extracting local information and has a much larger reception field. Also, we treat the 3D Gaussian distributions as point clouds, which enable sparse 3D convolution for time and memory efficiency. Note that FDNeRF [17] uses a 3D U-Net to inpaint the missing area in the voxel grid. But this inpaint network is not used for deformation modeling, while the rendering speed and voxel resolution are also limited." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 508, + 289, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 508, + 289, + 603 + ], + "spans": [ + { + "bbox": [ + 47, + 508, + 289, + 603 + ], + "type": "text", + "content": "Originally in Gaussian Splitting [21], the rotation parameter of each Gaussian is represented by quaternion. However, quaternion representation for rotation is discontinuous in parameter space for neural network learning [68]. We introduce the continuous 6D rotation [68] to ensure that the network learns a continuous function in the parameter space, which accurately represents the rotational states of each Gaussian at different time." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 605, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 605, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 605, + 289, + 715 + ], + "type": "text", + "content": "Overall, our method mainly has two components: a Gaussian canonical field and a deformation field. The Gaussian canonical field consists of 3D Gaussian distributions and a geometry-aware feature learning network. The explicit 3D Gaussian distribution represents the geometry of the canonical scene, and the sparse 3D CNN network extracts local structural/geometric information for each Gaussian. The deformation field estimates a transformation for each Gaussian in the canonical field, which transfers the" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "content": "Gaussian from the canonical field to the given timestamp. Finally, we use 3D Gaussian splatting to render images for the given timestamp." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 317, + 109, + 493, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 109, + 493, + 119 + ], + "spans": [ + { + "bbox": [ + 317, + 109, + 493, + 119 + ], + "type": "text", + "content": "Our main contributions are summarized as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 121, + 545, + 228 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 306, + 121, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 121, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 306, + 121, + 545, + 156 + ], + "type": "text", + "content": "- We propose a geometry-aware feature extraction network based on 3D Gaussian distribution to better utilize local geometric information." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 157, + 545, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 157, + 545, + 192 + ], + "spans": [ + { + "bbox": [ + 306, + 157, + 545, + 192 + ], + "type": "text", + "content": "- We propose to use continuous 6D rotation representation and modified density control strategy to adapt Gaussian splatting to dynamic scenes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 193, + 545, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 193, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 306, + 193, + 545, + 228 + ], + "type": "text", + "content": "- Extensive experiments on both synthetic and real datasets show that our method surpasses competing methods by a wide margin." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 240, + 392, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 240, + 392, + 253 + ], + "spans": [ + { + "bbox": [ + 306, + 240, + 392, + 253 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 260, + 428, + 273 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 260, + 428, + 273 + ], + "spans": [ + { + "bbox": [ + 306, + 260, + 428, + 273 + ], + "type": "text", + "content": "2.1. Novel View Synthesis" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 278, + 545, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 278, + 545, + 411 + ], + "spans": [ + { + "bbox": [ + 304, + 278, + 545, + 411 + ], + "type": "text", + "content": "Novel View Synthesis (NVS) is a well-known task in both computer vision and graphics [5, 8, 15, 22]. Surveys such as [43, 48, 49] provide comprehensive discussions. Explicit NVS methods generally reconstruct an explicit 3D model of a scene in the form of point clouds [1], voxels, or meshes [18, 41, 42, 50]. Once the geometry of the scene is represented, novel view images can be rendered from arbitrary viewpoints via manipulating the camera pose parameters. Other methods [9, 12, 20, 35, 41, 42, 59] tackle NVS by estimating depth maps using multi-view geometry, whereas the features are aggregated from co-visible frames." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 411, + 545, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 411, + 545, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 411, + 545, + 651 + ], + "type": "text", + "content": "Neural Radiance Fields (NeRF) [30] is a groundbreaking approach that utilizes Multi-Layer Perceptrons (MLPs) to represent scenes implicitly. This methodology enables the modeling of a 5D radiance field, resulting in the impressive synthesis of views for static scenes. Numerous subsequent works expand the capabilities of NeRF by adapting it to various scenarios, such as handling larger and unbounded scenes [29, 40, 47, 58, 65], scene editing and relighting, [4, 45, 60, 67], [2, 3, 19], and improving the generalization ability [7, 52, 55, 64]. Meanwhile, researchers focus on achieving more efficient rendering and optimization in a NeRF-like framework. [6, 13, 24, 26, 28, 32, 36, 63] investigate efficient sampling methods along each ray for color accumulation, while [38, 39] partition the scene into multiple sub-regions as an efficient pre-processing, and [6, 13, 31, 46, 62] exploit voxel-grid representation to speed up the optimization. Very recently, [21] proposes to use 3D Guussian distribution to represent the scene, obtaining promising results. However, these methods are mainly applicable to static scenes, and fail in scenes with dynamic objects." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 658, + 443, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 443, + 671 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 443, + 671 + ], + "type": "text", + "content": "2.2. Dynamic View Synthesis" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "text", + "content": "A recent trend in NVs is to extend the success in static NVs to dynamic NVs. One viable strategy is to construct a 4D spatial-temporal representation. Yoon et al. [61] combine" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "8901" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 70, + 531, + 240 + ], + "blocks": [ + { + "bbox": [ + 60, + 70, + 531, + 240 + ], + "lines": [ + { + "bbox": [ + 60, + 70, + 531, + 240 + ], + "spans": [ + { + "bbox": [ + 60, + 70, + 531, + 240 + ], + "type": "image", + "image_path": "49cd04cb0a0fd62e67e59d4951cca6cdb33605a04e9fe69fcb8e1d3203f5c73d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 242, + 547, + 298 + ], + "lines": [ + { + "bbox": [ + 46, + 242, + 547, + 298 + ], + "spans": [ + { + "bbox": [ + 46, + 242, + 547, + 298 + ], + "type": "text", + "content": "Figure 2. The pipeline of our proposed 3D geometry-aware deformable Gaussian splitting. In the Gaussian canonical field, we reconstruct a static scene in canonical space using 3D Gaussian distributions. We extract positional features using an MLP, as well as local geometric features using a 3D U-Net, fused by another MLP to form the geometry-aware features. In the deformation field, taking the geometry-aware features and timestamp " + }, + { + "bbox": [ + 46, + 242, + 547, + 298 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 242, + 547, + 298 + ], + "type": "text", + "content": ", an MLP estimates the 3D Gaussian deformation, which transfers the canonical 3D Gaussian distributions to timestamp " + }, + { + "bbox": [ + 46, + 242, + 547, + 298 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 242, + 547, + 298 + ], + "type": "text", + "content": ". Finally, a rasterizer renders the transformed 3D Gaussian to images." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 300, + 289, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 300, + 289, + 480 + ], + "spans": [ + { + "bbox": [ + 46, + 300, + 289, + 480 + ], + "type": "text", + "content": "single-view and multi-view depth to achieve NVS by 3D warping. Gao et al. [14] use a time-invariant model and a time-varying model to represent the static part and dynamic part of a scene, respectively, and use scene flow for motion modeling. NeRFlow [10] proposes a 4D spatial-temporal representation of a dynamic scene. Xian et al. [57] map a spatial-temporal location to the color and volume density by a 4D spatial-temporal radiance field. NSFF [23] represents a dynamic scene as a continuously changing function, encompassing various aspects of the scene, including appearance, geometry, and 3D scene motion. DCT-NeRF [54] uses the Discrete Cosine Transform (DCT) to replace the scene flow in NSFF [23] to enable smoother motion trajectories. HexPlane [6] and K-Plane [13] project 4D spatial-temporal space to multiple 2D planes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 483, + 287, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 483, + 287, + 674 + ], + "spans": [ + { + "bbox": [ + 46, + 483, + 287, + 674 + ], + "type": "text", + "content": "On the other hand, works such as [11, 16, 17, 27, 33, 34, 37, 44, 51, 53] decode the dynamic scene with a canonical field and a deformation field. Along this pipeline, D-NeRF [37] first proposes the canonical-based framework. However, the deformation network utilizes positional features with little geometry information, which cannot handle complex dynamic scenarios well. Nerfies [33] proposes a coarse-to-fine optimization method for coordinate-based models that allows for more robust optimization. HyperNeRF [34] lifts the canonical field into a higher dimensional space to handle topological changes. NDVG [16] and RoDynRF [27] gradually increase the voxel resolution, which has two benefits. TiNeuVox [11] and SUDS [53] interpolate the features with multi-scales for deformation learning. The multi-scales interpolation covers a larger reception field, which benefits modeling varying motions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "content": "Very recently, with the stunning debut of 3D Gaussian [21], some works introduce this point-based representation into their pipelines to synthesize high-fidelity images" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 300, + 547, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 300, + 547, + 421 + ], + "spans": [ + { + "bbox": [ + 304, + 300, + 547, + 421 + ], + "type": "text", + "content": "of a dynamic scene. Wu et al. [56] introduce a 4D Gaussian Splitting representation and utilize a deformation field to model both Gaussian motions and shape changes. However, the multi-scale HexPlane interpolation has limited ability in extracting the geometry information, which is still insufficient for modeling complex motions. The projection-based representation compresses the 3D space to 2D space, losing 3D geometric information for deformation learning. In contrast, our canonical-based method can fully leverage the 3D information in 3D space." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 439, + 362, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 439, + 362, + 452 + ], + "spans": [ + { + "bbox": [ + 306, + 439, + 362, + 452 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 462, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 462, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 462, + 547, + 713 + ], + "type": "text", + "content": "In this section, we present our 3D geometry-aware deformable Gaussian Splating solution for dynamic view synthesis, where an overview of our pipeline is illustrated in Fig. 2. Given a set of images or monocular video of a dynamic scene with frames with corresponding time labels and known camera intrinsic and extrinsic parameters, our goal is to synthesize a novel view at any desired view at any desired time. Our method mainly consists of two core components: the Gaussian canonical field is used to learn the reconstruction of static scenes, while the deformation field is used to learn object deformation. First, we review the static 3D Gaussian splatting in Sec. 3.1. Then, we introduce the proposed Gaussian canonical field in Sec. 3.2, which consists of 3D Gaussian distributions and a geometry feature learning network. Next, in Sec. 3.3, we propose a 3D geometry-aware deformation field to learn transformations for given time steps, which transform our canonical 3D Gaussian distributions to corresponding times. In Sec. 3.4, we explain the process of rendering images from transformed 3D Gaussian distributions. Finally, we present our losses and density control modifications in Sec. 3.5." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8902" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 127, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 127, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 127, + 85 + ], + "type": "text", + "content": "3.1. Preliminary" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "spans": [ + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "type": "text", + "content": "3D-GS [21] represents the scene with sparse 3D Gaussians distributions. Each Gaussian has an anisotropic covariance " + }, + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "type": "inline_equation", + "content": "\\Sigma \\in \\mathbb{R}^{3\\times 3}" + }, + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "type": "text", + "content": " and a mean value " + }, + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "type": "inline_equation", + "content": "\\mu \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 133, + 287, + 148 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 133, + 287, + 148 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 287, + 148 + ], + "type": "interline_equation", + "content": "\\mathbf {G} (\\mathbf {x}) = e ^ {- \\frac {1}{2} (\\mathbf {x} - \\mu) ^ {\\top} \\boldsymbol {\\Sigma} ^ {- 1} (\\mathbf {x} - \\mu)}. \\tag {1}", + "image_path": "323b1039fdb5e0ba1d4bac31b57b8bfbc769f3141d0c07f20a6254722897b5fb.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 156, + 287, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 156, + 287, + 204 + ], + "spans": [ + { + "bbox": [ + 47, + 156, + 287, + 204 + ], + "type": "text", + "content": "The covariance matrix " + }, + { + "bbox": [ + 47, + 156, + 287, + 204 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}" + }, + { + "bbox": [ + 47, + 156, + 287, + 204 + ], + "type": "text", + "content": " can be decomposed into a scaling matrix " + }, + { + "bbox": [ + 47, + 156, + 287, + 204 + ], + "type": "inline_equation", + "content": "\\mathbf{S} \\in \\mathbb{R}^{3 \\times 3}" + }, + { + "bbox": [ + 47, + 156, + 287, + 204 + ], + "type": "text", + "content": " and a rotation matrix " + }, + { + "bbox": [ + 47, + 156, + 287, + 204 + ], + "type": "inline_equation", + "content": "\\mathbf{R} \\in \\mathrm{SO}(3)" + }, + { + "bbox": [ + 47, + 156, + 287, + 204 + ], + "type": "text", + "content": ". This ensures that the covariance matrix is positive semi-definite, while reducing the learning difficulty of 3D Gaussians:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 210, + 287, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 210, + 287, + 224 + ], + "spans": [ + { + "bbox": [ + 131, + 210, + 287, + 224 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\Sigma} = \\mathbf {R} \\mathbf {S} \\mathbf {S} ^ {\\top} \\mathbf {R} ^ {\\top}. \\tag {2}", + "image_path": "ed45cc2bbb2d5909034667235b32cd7ee07116ed5d1149c7d9390d1df1a9f9bf.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 232, + 287, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 232, + 287, + 269 + ], + "spans": [ + { + "bbox": [ + 47, + 232, + 287, + 269 + ], + "type": "text", + "content": "To render an image from a designated viewpoint, the covariance matrix " + }, + { + "bbox": [ + 47, + 232, + 287, + 269 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{\\prime}" + }, + { + "bbox": [ + 47, + 232, + 287, + 269 + ], + "type": "text", + "content": " in camera coordinates can be calculated by giving a viewing transformation " + }, + { + "bbox": [ + 47, + 232, + 287, + 269 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 47, + 232, + 287, + 269 + ], + "type": "text", + "content": ", followed by [69]:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 123, + 274, + 287, + 289 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 274, + 287, + 289 + ], + "spans": [ + { + "bbox": [ + 123, + 274, + 287, + 289 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\Sigma} ^ {\\prime} = \\mathbf {J} \\mathbf {W} \\boldsymbol {\\Sigma} \\mathbf {W} ^ {\\top} \\mathbf {J} ^ {\\top}, \\tag {3}", + "image_path": "8c4bb1dd6574074ad3b6cc55652ce4e6af0116fd7e0da85dd3a81ea7fc20f1df.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 297, + 287, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 287, + 331 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 287, + 331 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 297, + 287, + 331 + ], + "type": "inline_equation", + "content": "\\mathbf{J}" + }, + { + "bbox": [ + 47, + 297, + 287, + 331 + ], + "type": "text", + "content": " is the Jacobian of the affine approximation of the projective transformation, and " + }, + { + "bbox": [ + 47, + 297, + 287, + 331 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 47, + 297, + 287, + 331 + ], + "type": "text", + "content": " is the world to camera transformation matrix." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "spans": [ + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "text", + "content": "Each Gaussian is parameterized into the following attributes: position " + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "text", + "content": ", color defined by spherical harmonics coefficients " + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "inline_equation", + "content": "\\mathbf{c} \\in \\mathbb{R}^k" + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "text", + "content": ", rotation " + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "inline_equation", + "content": "\\mathbf{r} \\in \\mathbb{R}^4" + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "text", + "content": ", scale " + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "inline_equation", + "content": "\\mathbf{s} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "text", + "content": ", and opacity " + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "inline_equation", + "content": "o \\in \\mathbb{R}" + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "text", + "content": ". Point-based " + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "text", + "content": "-blending and volumetric rendering like NeRF [30] essentially share the same image formation model for the splattering process. Specifically, the color " + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 47, + 332, + 287, + 416 + ], + "type": "text", + "content": " of each pixel is influenced by the related Gaussians:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 425, + 287, + 457 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 425, + 287, + 457 + ], + "spans": [ + { + "bbox": [ + 130, + 425, + 287, + 457 + ], + "type": "interline_equation", + "content": "\\mathbf {C} = \\sum_ {i = 1} ^ {N} \\mathbf {T} _ {i} \\alpha_ {i} \\mathbf {c} _ {i}, \\tag {4}", + "image_path": "1b11d0a7fa6f68ffb58d20f2e5a567fb7610e1428a1d4672e23253f9ebe9d97d.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 461, + 287, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 461, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 47, + 461, + 287, + 498 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 461, + 287, + 498 + ], + "type": "inline_equation", + "content": "\\alpha_{i}" + }, + { + "bbox": [ + 47, + 461, + 287, + 498 + ], + "type": "text", + "content": " represents the density of the Gaussian point computed by a Gaussian with covariance " + }, + { + "bbox": [ + 47, + 461, + 287, + 498 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}" + }, + { + "bbox": [ + 47, + 461, + 287, + 498 + ], + "type": "text", + "content": " multiplied by its opacity." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 503, + 190, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 503, + 190, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 503, + 190, + 514 + ], + "type": "text", + "content": "3.2. Gaussian Canonical Field" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 522, + 287, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 522, + 287, + 569 + ], + "spans": [ + { + "bbox": [ + 47, + 522, + 287, + 569 + ], + "type": "text", + "content": "In this section, we first reconstruct a static scene in canonical space. Then, we propose a geometric branch, which enables geometry feature learning of the 3D Gaussian distributions for the subsequent deformation field." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 570, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 570, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 570, + 287, + 713 + ], + "type": "text", + "content": "Gaussian parameters. Similar to 3D-GS [21], each Gaussian in the canonical space is characterized by position, color, scale, and opacity. Note that for rotation, we are inspired by [68] to use a continuous 6D rotation representation. Compared with the quaternion representation used in 3D-GS, the 6D rotation representation can benefit our method in estimating the deformation of each Gaussian from canonical space to time-space, especially in helping the neural networks to learn smooth rotation variation from time to time. Specifically, we set learnable parameter " + }, + { + "bbox": [ + 47, + 570, + 287, + 713 + ], + "type": "inline_equation", + "content": "[a_1,a_2]" + }, + { + "bbox": [ + 47, + 570, + 287, + 713 + ], + "type": "text", + "content": " for each Gaussian to denote its rotation in canonical space, where " + }, + { + "bbox": [ + 47, + 570, + 287, + 713 + ], + "type": "inline_equation", + "content": "a_1" + }, + { + "bbox": [ + 47, + 570, + 287, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 570, + 287, + 713 + ], + "type": "inline_equation", + "content": "a_2" + }, + { + "bbox": [ + 47, + 570, + 287, + 713 + ], + "type": "text", + "content": " are the column vectors of three" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "type": "text", + "content": "rows, respectively. They are initialized to " + }, + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "type": "inline_equation", + "content": "[100]^{\\top}" + }, + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "type": "inline_equation", + "content": "[010]^{\\top}" + }, + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "type": "text", + "content": ", corresponding precisely to the identity rotation matrix. The mapping from this 6D representation vector to SO(3) matrix is defined as [68]:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 334, + 128, + 545, + 168 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 128, + 545, + 168 + ], + "spans": [ + { + "bbox": [ + 334, + 128, + 545, + 168 + ], + "type": "interline_equation", + "content": "f _ {\\mathrm {V} 2 \\mathrm {M}} \\left(\\left[ \\begin{array}{c c} | & | \\\\ a _ {1} & a _ {2} \\\\ | & | \\end{array} \\right]\\right) = \\left[ \\begin{array}{c c c} | & | & | \\\\ b _ {1} & b _ {2} & b _ {3} \\\\ | & | & | \\end{array} \\right], \\tag {5}", + "image_path": "98260fbe5f75599115bce4152e3cf66bc7a13df83b37dc1c1946b7387cd779d1.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 330, + 186, + 545, + 228 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 186, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 330, + 186, + 545, + 228 + ], + "type": "interline_equation", + "content": "b _ {i} = \\left[ \\left\\{ \\begin{array}{c c} \\mathcal {N} \\left(a _ {1}\\right) & \\text {i f} i = 1 \\\\ \\mathcal {N} \\left(a _ {2} - \\left(b _ {1} \\cdot a _ {2}\\right) b _ {1}\\right) & \\text {i f} i = 2 \\\\ b _ {1} \\times b _ {2} & \\text {i f} i = 3 \\end{array} \\right] ^ {\\top}, \\right. \\tag {6}", + "image_path": "c61a0ad50abe77e35dfb15446f8e69aaf64f42381b852095b24ed10e80ac3215.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 235, + 545, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 235, + 545, + 282 + ], + "spans": [ + { + "bbox": [ + 305, + 235, + 545, + 282 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 235, + 545, + 282 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\cdot)" + }, + { + "bbox": [ + 305, + 235, + 545, + 282 + ], + "type": "text", + "content": " denotes a normalization function. “.” represents the inner product of a vector and “" + }, + { + "bbox": [ + 305, + 235, + 545, + 282 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 305, + 235, + 545, + 282 + ], + "type": "text", + "content": "” represents vector cross product. V2M in " + }, + { + "bbox": [ + 305, + 235, + 545, + 282 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{V2M}}" + }, + { + "bbox": [ + 305, + 235, + 545, + 282 + ], + "type": "text", + "content": " means the transform from 6D vector to rotation matrix." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 283, + 545, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 283, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 305, + 283, + 545, + 426 + ], + "type": "text", + "content": "Geometry feature learning. To capture the local geometric structure of the canonical scene, we regard the 3D Gaussian as the 3D point cloud, i.e., we only use the 3D coordinates of the 3D Gaussian. In order to handle a large number of point clouds, we leverage a simple two-branch structure: the geometric branch learns local features of point clouds across different receptive fields, while the identity branch preserves the independent point-level features at high resolution. By integrating the geometric branch and identity branch, we can efficiently obtain point-level features at high resolution while embedding the local geometric information of the point cloud." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 427, + 545, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 427, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 305, + 427, + 545, + 498 + ], + "type": "text", + "content": "The geometric branch leverages the sparse convolution [25] on the sparse voxels to extract local geometric features at different receptive fields. Given the point cloud " + }, + { + "bbox": [ + 305, + 427, + 545, + 498 + ], + "type": "inline_equation", + "content": "\\mathbf{P} \\in \\mathbb{R}^{N \\times 3}" + }, + { + "bbox": [ + 305, + 427, + 545, + 498 + ], + "type": "text", + "content": ", we first transform the high-resolution point clouds into low-resolution voxels by dividing the space through fixed grid size " + }, + { + "bbox": [ + 305, + 427, + 545, + 498 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 305, + 427, + 545, + 498 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 391, + 508, + 545, + 521 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 508, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 391, + 508, + 545, + 521 + ], + "type": "interline_equation", + "content": "\\mathbf {V} = \\operatorname {f l o o r} (\\mathbf {P} / s), \\tag {7}", + "image_path": "9ab85659d97978227c84d0286c0b1276418f2fc3ac402fb7efdcfe012cc22d97.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "type": "text", + "content": "where the size of " + }, + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "type": "inline_equation", + "content": "M\\times 3" + }, + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "type": "text", + "content": " is the number of voxels. Then, we construct a sparse 3D U-Net by stacking a set of sparse convolutions with a skip connection. Taking " + }, + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "type": "text", + "content": " as input, we perform sparse 3D U-Net to aggregate local features (dubbed as " + }, + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_v\\in \\mathbb{R}^{M\\times C}" + }, + { + "bbox": [ + 305, + 530, + 545, + 590 + ], + "type": "text", + "content": ") of the point clouds." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 590, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 590, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 590, + 545, + 715 + ], + "type": "text", + "content": "The identity branch uses a multi-layer perception (MLP) to map the 3D coordinate of the point cloud into the embedding space (dubbed as " + }, + { + "bbox": [ + 305, + 590, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_p\\in \\mathbb{R}^{N\\times C}" + }, + { + "bbox": [ + 305, + 590, + 545, + 715 + ], + "type": "text", + "content": ") to maintain the independence of point features. To accurately characterize the local geometric structure of the canonical scene, we fuse the voxel features with local information onto point features. Specifically, we transform the voxel feature " + }, + { + "bbox": [ + 305, + 590, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_v" + }, + { + "bbox": [ + 305, + 590, + 545, + 715 + ], + "type": "text", + "content": " back to the corresponding points to obtain point-level features " + }, + { + "bbox": [ + 305, + 590, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_p^{\\prime}\\in \\mathbb{R}^{N\\times C}" + }, + { + "bbox": [ + 305, + 590, + 545, + 715 + ], + "type": "text", + "content": " by assigning the voxel features to the corresponding points within it. Finally, we concatenate " + }, + { + "bbox": [ + 305, + 590, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_p^{\\prime}" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8903" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_p" + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": " to obtain the fused point-level feature followed by an MLP layer as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 100, + 108, + 287, + 125 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 108, + 287, + 125 + ], + "spans": [ + { + "bbox": [ + 100, + 108, + 287, + 125 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {\\text {f u s e}} = \\operatorname {M L P} \\left(\\operatorname {C o n c a t} \\left(\\mathbf {F} _ {p} ^ {\\prime}, \\mathbf {F} _ {p}\\right)\\right). \\tag {8}", + "image_path": "286183f39db7be969228f5e82d7feb24af5ba2a9335ee8aee0e137b056902445.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 135, + 155, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 135, + 155, + 148 + ], + "spans": [ + { + "bbox": [ + 47, + 135, + 155, + 148 + ], + "type": "text", + "content": "3.3. Deformation Field" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 156, + 287, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 156, + 287, + 191 + ], + "spans": [ + { + "bbox": [ + 47, + 156, + 287, + 191 + ], + "type": "text", + "content": "In this section, we propose a deformation field that estimates the deformation of each 3D Gaussian in the canonical space based on a given time " + }, + { + "bbox": [ + 47, + 156, + 287, + 191 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 156, + 287, + 191 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "spans": [ + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "type": "text", + "content": "Deformation estimation. We adopt an MLP as the decoder " + }, + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\Phi}" + }, + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "type": "text", + "content": ", which takes the geometry feature learned from the geometry branch in the Gaussian canonical field, the position of each Gaussian, and timestamp as input, outputs the deformation of each Gaussian from canonical space to time " + }, + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "type": "text", + "content": ", including position deformation " + }, + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{x}_{t} \\in \\mathbb{R}^{3}" + }, + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "type": "text", + "content": ", rotation deformation " + }, + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{r}_{t} \\in \\mathbb{R}^{6}" + }, + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "type": "text", + "content": " and scale deformation " + }, + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{s}_{t} \\in \\mathbb{R}^{3}" + }, + { + "bbox": [ + 47, + 193, + 287, + 276 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 289, + 287, + 302 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 289, + 287, + 302 + ], + "spans": [ + { + "bbox": [ + 85, + 289, + 287, + 302 + ], + "type": "interline_equation", + "content": "\\Delta \\mathbf {x} _ {\\mathbf {t}}, \\Delta \\mathbf {r} _ {\\mathbf {t}}, \\Delta \\mathbf {s} _ {\\mathbf {t}} = \\mathcal {G} _ {\\Phi} (\\mathbf {F} _ {\\text {f u s e}}, \\gamma (\\mathbf {x}), \\gamma (t)), \\tag {9}", + "image_path": "79ec4490525e543f16c86e7cdeec691dcc1b98724ff9422e2ff4263dd6ecff58.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 314, + 287, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 314, + 287, + 350 + ], + "spans": [ + { + "bbox": [ + 47, + 314, + 287, + 350 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 314, + 287, + 350 + ], + "type": "inline_equation", + "content": "\\gamma (\\cdot)" + }, + { + "bbox": [ + 47, + 314, + 287, + 350 + ], + "type": "text", + "content": " denotes the positional encoding in NeRF [30], which maps a one dimension signal from " + }, + { + "bbox": [ + 47, + 314, + 287, + 350 + ], + "type": "inline_equation", + "content": "\\mathbb{R}" + }, + { + "bbox": [ + 47, + 314, + 287, + 350 + ], + "type": "text", + "content": " into a higher dimensional space " + }, + { + "bbox": [ + 47, + 314, + 287, + 350 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{2L}" + }, + { + "bbox": [ + 47, + 314, + 287, + 350 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 86, + 361, + 221, + 376 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 361, + 221, + 376 + ], + "spans": [ + { + "bbox": [ + 86, + 361, + 221, + 376 + ], + "type": "interline_equation", + "content": "\\gamma (p) = (\\sin \\left(2 ^ {0} \\pi p\\right), \\cos \\left(2 ^ {0} \\pi p\\right),", + "image_path": "9a1851c3a6b398598b328d2a5a1c7e549beed0236a313a90d4aeb3b6ca2ddb67.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 123, + 378, + 287, + 389 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 378, + 287, + 389 + ], + "spans": [ + { + "bbox": [ + 123, + 378, + 287, + 389 + ], + "type": "interline_equation", + "content": "\\dots , \\tag {10}", + "image_path": "ec4dcdbaffe2abd15e2af25777364c01ff9d02d5692f839af79839d7d11e81d5.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 123, + 392, + 246, + 407 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 392, + 246, + 407 + ], + "spans": [ + { + "bbox": [ + 123, + 392, + 246, + 407 + ], + "type": "interline_equation", + "content": "\\sin \\left(2 ^ {L - 1} \\pi p\\right), \\cos \\left(2 ^ {L - 1} \\pi p)\\right).", + "image_path": "83a26d627bf98849d4e8e3a92c6144bea3a116a8411906e6d7798b4aa624aaa9.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 419, + 287, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 419, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 47, + 419, + 287, + 479 + ], + "type": "text", + "content": "Note that we set the color parameters " + }, + { + "bbox": [ + 47, + 419, + 287, + 479 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 47, + 419, + 287, + 479 + ], + "type": "text", + "content": " and opacity " + }, + { + "bbox": [ + 47, + 419, + 287, + 479 + ], + "type": "inline_equation", + "content": "o" + }, + { + "bbox": [ + 47, + 419, + 287, + 479 + ], + "type": "text", + "content": " of canonical 3D Gaussian distributions constant over time. These two factors are highly related to the physical properties of the Gaussian distributions, and we want each distribution to represent the same object area over the timeline." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 480, + 287, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 480, + 287, + 517 + ], + "spans": [ + { + "bbox": [ + 47, + 480, + 287, + 517 + ], + "type": "text", + "content": "Transformation. Using the estimated deformation for time " + }, + { + "bbox": [ + 47, + 480, + 287, + 517 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 480, + 287, + 517 + ], + "type": "text", + "content": " above, we could transform the 3D Gaussian distributions to current time by" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 529, + 175, + 541 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 175, + 541 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 175, + 541 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {t} = \\mathbf {x} + \\Delta \\mathbf {x} _ {\\mathbf {t}},", + "image_path": "78be8e5a001f0d5f1d4109fd259b39d2e38890a32fda97e7de0c393208d38cd5.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 108, + 544, + 287, + 556 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 544, + 287, + 556 + ], + "spans": [ + { + "bbox": [ + 108, + 544, + 287, + 556 + ], + "type": "interline_equation", + "content": "\\mathbf {s} _ {t} = \\mathbf {s} + \\Delta \\mathbf {s} _ {\\mathbf {t}}, \\tag {11}", + "image_path": "a197e29a004dc1d3852d4fa81d27bf4f24951e5089c24970bf2e12e73155ef6a.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 108, + 559, + 227, + 571 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 559, + 227, + 571 + ], + "spans": [ + { + "bbox": [ + 108, + 559, + 227, + 571 + ], + "type": "interline_equation", + "content": "\\mathbf {r} _ {t} = f _ {\\mathrm {V 2 M}} \\left(\\Delta \\mathbf {r} _ {\\mathbf {t}}\\right) \\times f _ {\\mathrm {V 2 M}} (\\mathbf {r}).", + "image_path": "ca94f12805de2884f746725fdf941f44ae93a1c30c1a60d10a23c759c354bd41.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 584, + 132, + 595 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 584, + 132, + 595 + ], + "spans": [ + { + "bbox": [ + 47, + 584, + 132, + 595 + ], + "type": "text", + "content": "3.4. Rasterization" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 604, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 604, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 47, + 604, + 287, + 652 + ], + "type": "text", + "content": "Once we have completed preparing the attributes of each Gaussian " + }, + { + "bbox": [ + 47, + 604, + 287, + 652 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}_t,\\mathbf{c},\\mathbf{r}_t,\\mathbf{s}_t,o)" + }, + { + "bbox": [ + 47, + 604, + 287, + 652 + ], + "type": "text", + "content": ", we use the differentiable tile rasterizer [21] to render the image at any desired viewpoint at this timestamp:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 663, + 287, + 677 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 663, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 67, + 663, + 287, + 677 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {C}} _ {t} = \\text {R a s t e r i z e r} (\\mathbf {x} _ {t}, \\mathbf {c}, \\mathbf {r} _ {t}, \\mathbf {s} _ {t}, o, \\mathbf {K}, [ \\mathbf {R} | \\mathbf {T} ]), \\tag {12}", + "image_path": "2a3a255b7cb31299add0c3aa6d869924aaec9821d2d86b3b9ab4b698ebfbcc46.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{K}" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "[\\mathbf{R}|\\mathbf{T}]" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": " represent the camera's intrinsic and extrinsic parameters, respectively." + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 309, + 72, + 545, + 222 + ], + "blocks": [ + { + "bbox": [ + 309, + 72, + 545, + 222 + ], + "lines": [ + { + "bbox": [ + 309, + 72, + 545, + 222 + ], + "spans": [ + { + "bbox": [ + 309, + 72, + 545, + 222 + ], + "type": "image", + "image_path": "1cba156c55a86ae2a58373e8d3960276ff0137c542d3109f10ea1c71d597f12d.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 224, + 545, + 267 + ], + "lines": [ + { + "bbox": [ + 305, + 224, + 545, + 267 + ], + "spans": [ + { + "bbox": [ + 305, + 224, + 545, + 267 + ], + "type": "text", + "content": "Figure 3. Our density control is designed for dynamic scenes. We control the densification of Gaussian distributions according to their transformed parameters at timestamp " + }, + { + "bbox": [ + 305, + 224, + 545, + 267 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 305, + 224, + 545, + 267 + ], + "type": "text", + "content": " rather than parameters at canonical space." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 270, + 391, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 270, + 391, + 282 + ], + "spans": [ + { + "bbox": [ + 306, + 270, + 391, + 282 + ], + "type": "text", + "content": "3.5. Optimization" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 289, + 545, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 289, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 305, + 289, + 545, + 323 + ], + "type": "text", + "content": "To optimize the model, we use the photometric loss, and a motion loss, and also adapt the density control from 3D-GS [21] with our modifications." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 324, + 545, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 324, + 545, + 361 + ], + "spans": [ + { + "bbox": [ + 305, + 324, + 545, + 361 + ], + "type": "text", + "content": "Photometric loss. The photometric loss consists of the " + }, + { + "bbox": [ + 305, + 324, + 545, + 361 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 305, + 324, + 545, + 361 + ], + "type": "text", + "content": " loss and structural similarity loss " + }, + { + "bbox": [ + 305, + 324, + 545, + 361 + ], + "type": "inline_equation", + "content": "L_{D - SSIM}" + }, + { + "bbox": [ + 305, + 324, + 545, + 361 + ], + "type": "text", + "content": " between the rendered image " + }, + { + "bbox": [ + 305, + 324, + 545, + 361 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{C}}_t" + }, + { + "bbox": [ + 305, + 324, + 545, + 361 + ], + "type": "text", + "content": " and ground truth image " + }, + { + "bbox": [ + 305, + 324, + 545, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_t" + }, + { + "bbox": [ + 305, + 324, + 545, + 361 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 351, + 369, + 545, + 382 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 351, + 369, + 545, + 382 + ], + "spans": [ + { + "bbox": [ + 351, + 369, + 545, + 382 + ], + "type": "interline_equation", + "content": "L _ {\\text {p h o t o}} = (1 - \\lambda) L _ {1} + \\lambda L _ {D - S S I M}. \\tag {13}", + "image_path": "15fb0f2501bb2251792c21b03f141aa5e787084f176f39a15079507cd0937b62.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 390, + 545, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 390, + 545, + 450 + ], + "spans": [ + { + "bbox": [ + 305, + 390, + 545, + 450 + ], + "type": "text", + "content": "Regularization. We accept the fact that in a scene, the proportion of dynamic points is much smaller than that of static points, and the motion amplitude at dynamic points is not too large. In other words, the point in a scene should be as static as possible," + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 382, + 459, + 545, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 382, + 459, + 545, + 472 + ], + "spans": [ + { + "bbox": [ + 382, + 459, + 545, + 472 + ], + "type": "interline_equation", + "content": "L _ {\\text {m o t i o n}} = \\left\\| \\Delta \\mathbf {x} _ {\\mathbf {t}} \\right\\| _ {1}. \\tag {14}", + "image_path": "1d100bfb546a61a686f1b00cfed0eb09ef58b9c6d2322864176abe6c99b3c6a7.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 306, + 479, + 529, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 479, + 529, + 491 + ], + "spans": [ + { + "bbox": [ + 306, + 479, + 529, + 491 + ], + "type": "text", + "content": "Total loss. The total loss we used is defined as follows," + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 373, + 501, + 545, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 501, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 373, + 501, + 545, + 514 + ], + "type": "interline_equation", + "content": "L = L _ {\\text {p h o t o}} + \\omega L _ {\\text {m o t i o n}}, \\tag {15}", + "image_path": "d382560bb4101e36360e7b34a56ee47314038ecf1d37b5528438ed6096487920.jpg" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 305, + 522, + 545, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 522, + 545, + 666 + ], + "spans": [ + { + "bbox": [ + 305, + 522, + 545, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 522, + 545, + 666 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 305, + 522, + 545, + 666 + ], + "type": "text", + "content": " is a trade-off parameter to balance the components. Density control. 3D-GS has shown that adaptive density control is essential in achieving high rendering performance. On the one hand, the Gaussians need to populate empty areas without geometric features. Thus, it simply creates a copy of the Gaussian for under-reconstructed regions. On the other hand, large Gaussians in regions with high variance need to be split into smaller Gaussians. We implement our method like 3D-GS but replace such Gaussians with two new ones, divide their scale by a factor of " + }, + { + "bbox": [ + 305, + 522, + 545, + 666 + ], + "type": "inline_equation", + "content": "\\phi = 1.6" + }, + { + "bbox": [ + 305, + 522, + 545, + 666 + ], + "type": "text", + "content": ", and initialize their position by using the original 3D Gaussian as a PDF for sampling." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 305, + 666, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 666, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 666, + 545, + 713 + ], + "type": "text", + "content": "Our method differs from 3D-GS in the following aspects. For 3D-GS, there only exists sets of Gaussians. However, in our case, we initialize the Gaussians in the canonical space, then estimate the deformations of these Gaussians," + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8904" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 61, + 93, + 531, + 287 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 545, + 92 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 545, + 92 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 545, + 92 + ], + "type": "text", + "content": "Table 1. Quantitative comparison between our method and competing methods on the D-NeRF dataset. The best results are highlighted in bold." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 61, + 93, + 531, + 287 + ], + "lines": [ + { + "bbox": [ + 61, + 93, + 531, + 287 + ], + "spans": [ + { + "bbox": [ + 61, + 93, + 531, + 287 + ], + "type": "table", + "html": "
MethodHell WarriorMutantHookBouncing Balls
PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
3D-GS [21]15.39240.87760.130021.75540.93590.057518.69330.87330.114422.55750.94850.0647
D-NeRF [37]25.02930.95060.069131.29000.97390.026829.25670.96500.117438.93000.99000.1031
TiNeuVox-B[11]28.20580.96610.063133.90290.97710.030131.79290.97180.043640.85360.99130.0401
NDVG [16]26.49330.96000.067034.41310.98010.027030.00090.96260.046337.51570.98740.0751
FDNeRF [17]27.71200.96650.050834.97270.98100.031232.28670.97560.038840.01910.99120.0395
4D-GS [56]28.11960.97300.027638.34110.99360.006233.15600.98100.016840.74180.99410.0105
Ours32.27120.98350.016441.42840.99690.002936.96470.99160.007643.59290.99600.0061
MethodLegoT-RexStand UpJumping Jacks
PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
3D-GS [21]23.09910.93290.056725.74960.95670.047419.37790.92000.090920.71630.92270.0980
D-NeRF [37]21.64270.83940.165431.75680.97670.039632.79920.98180.021532.80310.98100.0373
TiNeuVox-B[11]25.17480.92170.068932.77500.97830.030736.20310.98590.019934.73900.98230.0328
NDVG [16]25.04160.93950.053432.62290.97810.033033.21580.97930.030231.25300.97370.0398
FDNeRF [17]25.27000.93900.046030.70680.97310.036836.91070.98780.018833.55210.98120.0329
4D-GS [56]25.40240.94340.037733.39120.98690.013038.26100.99230.007135.66560.98820.0159
Ours25.44110.94740.032939.02850.99520.005242.21010.99660.002837.96040.99280.0088
", + "image_path": "842d208980618c33c0f97b5975baa4ed9ee68efb69e4df758d145d9dd4990227.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 123, + 312, + 468, + 385 + ], + "blocks": [ + { + "bbox": [ + 46, + 289, + 544, + 310 + ], + "lines": [ + { + "bbox": [ + 46, + 289, + 544, + 310 + ], + "spans": [ + { + "bbox": [ + 46, + 289, + 544, + 310 + ], + "type": "text", + "content": "Table 2. Quantitative comparison between our method and competing methods on the HyperNeRF dataset. The best results are highlighted in bold." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 123, + 312, + 468, + 385 + ], + "lines": [ + { + "bbox": [ + 123, + 312, + 468, + 385 + ], + "spans": [ + { + "bbox": [ + 123, + 312, + 468, + 385 + ], + "type": "table", + "html": "
MethodChicken3D PrinterBroomPeel Banana
PSNR↑MS-SSIM↑PSNR↑MS-SSIM↑PSNR↑MS-SSIM↑PSNR↑MS-SSIM↑
TiNeuVox[11]28.28610.947422.75140.839221.26820.683224.51360.8743
NDVG [16]27.05360.939022.41960.838921.46580.702822.82040.8279
FDNeRF [17]27.96270.943822.80270.845321.90910.715424.25150.8645
3D-GS [21]20.89150.742618.39910.611420.39530.659820.56540.8094
Ours28.53420.933122.04030.809820.89940.524125.57850.9067
", + "image_path": "6338813f16de55fe2f099c26c8e47ea6f48a7cd4380a378c8d222ab06e5f87bb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 88, + 426, + 242, + 482 + ], + "blocks": [ + { + "bbox": [ + 46, + 388, + 287, + 422 + ], + "lines": [ + { + "bbox": [ + 46, + 388, + 287, + 422 + ], + "spans": [ + { + "bbox": [ + 46, + 388, + 287, + 422 + ], + "type": "text", + "content": "Table 3. Quantitative comparison on HyperNeRF dataset: Average on Cut Lemon, Chicken, 3D Printer, and Split Cookie. The best results are highlighted in bold." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 88, + 426, + 242, + 482 + ], + "lines": [ + { + "bbox": [ + 88, + 426, + 242, + 482 + ], + "spans": [ + { + "bbox": [ + 88, + 426, + 242, + 482 + ], + "type": "table", + "html": "
MethodPSNR↑SSIM↑LPIPS↓
TiNeuVox-B [11]27.160.760.40
3D-GS [21]21.260.690.40
4D-GS [56]26.980.780.31
Ours27.520.800.25
", + "image_path": "e5f27307c87a566184c1010d5178936df212644979934d678b0be83b46cfac70.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 492, + 287, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 492, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 46, + 492, + 287, + 588 + ], + "type": "text", + "content": "and transform their attributes into a timestamp space. As shown in Fig. 3, we use the Gaussians at the current moment to render the image. Therefore, we determine whether the Gaussians need to conduct density control by the current attributes (like scale) at the current timestamp rather than the canonical attributes. Afterward, we inverse the transformation of the split/cloned Gaussian back to the canonical space." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 601, + 127, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 601, + 127, + 615 + ], + "spans": [ + { + "bbox": [ + 47, + 601, + 127, + 615 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 622, + 105, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 622, + 105, + 633 + ], + "spans": [ + { + "bbox": [ + 47, + 622, + 105, + 633 + ], + "type": "text", + "content": "4.1. Dataset" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 641, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 287, + 715 + ], + "type": "text", + "content": "In the paper, we use both synthetic and real datasets for evaluating our method. The synthetic dataset D-NeRF [37] contains 8 dynamic scenes, including Hell Warrior, Mutant, Hook, Bouncing Balls, Lego, T-Rex, Stand Up, and Jumping Jacks. The real dataset proposed by HyperNeRF [34], including interp-cut-lemon, interp-cut-lemon1, vrig" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 390, + 545, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 390, + 545, + 451 + ], + "spans": [ + { + "bbox": [ + 304, + 390, + 545, + 451 + ], + "type": "text", + "content": "chicken, vrig-3dprinter, misc-split-codon, and misc-split-codon. Following previous works [21], we report three evaluation metrics, including Peak Signal-to-Noise Ratio (PSNR), Structural Similarity (SSIM), and Learned Perceptual Image Patch Similarity (LPIPS) [66]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 456, + 439, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 456, + 439, + 468 + ], + "spans": [ + { + "bbox": [ + 306, + 456, + 439, + 468 + ], + "type": "text", + "content": "4.2. Implementation Details" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 474, + 545, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 474, + 545, + 666 + ], + "spans": [ + { + "bbox": [ + 304, + 474, + 545, + 666 + ], + "type": "text", + "content": "Our implementation is based on 3D-GS [21]. We trained a total of 40000 iterations, with the first 3000 iterations only optimizing static scenes, and then adding deformation fields to optimize dynamic scenes. The learning rate of our network takes an exponential decay from 8e-4 to 1.6e-6 with the Adam optimizer. Moreover, we use a 2-layer MLP with a width of 64 for the front point feature extraction, and a 3-layer MLP with a width of 64 for the back point feature fusion. Then 5 layers MLP with width 256 and skip connection is used for a decoder. For the positional encoding process, we use " + }, + { + "bbox": [ + 304, + 474, + 545, + 666 + ], + "type": "inline_equation", + "content": "L = 10" + }, + { + "bbox": [ + 304, + 474, + 545, + 666 + ], + "type": "text", + "content": " for position " + }, + { + "bbox": [ + 304, + 474, + 545, + 666 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 474, + 545, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 474, + 545, + 666 + ], + "type": "inline_equation", + "content": "L = 6" + }, + { + "bbox": [ + 304, + 474, + 545, + 666 + ], + "type": "text", + "content": " for timestep " + }, + { + "bbox": [ + 304, + 474, + 545, + 666 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 474, + 545, + 666 + ], + "type": "text", + "content": ". For the D-NeRF dataset, which does not provide point clouds, we randomly initialize 150000 points. Meanwhile, for the HyperNeRF dataset, we use the point cloud provided in its dataset as the initial point cloud. All the experiments are tested on a single RTX 4090 GPU." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 671, + 424, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 671, + 424, + 684 + ], + "spans": [ + { + "bbox": [ + 306, + 671, + 424, + 684 + ], + "type": "text", + "content": "4.3.Quantitative Results" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "type": "text", + "content": "Synthetic scenes. We compare our method with recent state-of-the-art methods in the field, including 3D-GS, D-" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8905" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 71, + 68, + 521, + 277 + ], + "blocks": [ + { + "bbox": [ + 71, + 68, + 521, + 277 + ], + "lines": [ + { + "bbox": [ + 71, + 68, + 521, + 277 + ], + "spans": [ + { + "bbox": [ + 71, + 68, + 521, + 277 + ], + "type": "image", + "image_path": "f5efbbc16829caa1ec54d7b4789f1e37fcdb9ec9b26faaf1c358057615b89fd5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 283, + 465, + 295 + ], + "lines": [ + { + "bbox": [ + 126, + 283, + 465, + 295 + ], + "spans": [ + { + "bbox": [ + 126, + 283, + 465, + 295 + ], + "type": "text", + "content": "Figure 4. Qualitative comparisons between baselines and our method on the synthetic dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 47, + 301, + 289, + 514 + ], + "blocks": [ + { + "bbox": [ + 47, + 301, + 289, + 514 + ], + "lines": [ + { + "bbox": [ + 47, + 301, + 289, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 301, + 289, + 514 + ], + "type": "image", + "image_path": "4950b47e09ddea53fa8179bb3b9514c33d4e5b5c3a3fcef84eac255dbab450b5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 533, + 287, + 555 + ], + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 555 + ], + "type": "text", + "content": "Figure 5. Qualitative comparisons between baselines and our method on the HyperNeRF real dataset[34]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": "NeRF, TiNeuVox, NDVG, FDNeRF, and 4D-GS on the D-NeRF Dataset. As shown in Table 1, we list the results of each scene. It can be observed that our method is significantly better than other methods in terms of all three metrics for physical canonical-based methods. On average, our method significantly improves PSNR compared with static Gaussian, 3D-GS. The computational costs are: training time around 2h (avg. on D-NeRF dataset), render FPS 12 (fixed viewpoint), model size (34MB points cloud + 14MB network). Since it inherently cannot model the deformation of the dynamic scene, 3D-GS performs poorly in dynamic view synthesis." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 300, + 547, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 300, + 547, + 434 + ], + "spans": [ + { + "bbox": [ + 304, + 300, + 547, + 434 + ], + "type": "text", + "content": "Real scenes. We further compare our method with some highly related works on the real scene dataset proposed by [34]. We have shown the detailed results on chicken, 3D printer, broom, and peel banana in Table 2, and the average result on cut lemon, chicken, 3d printer, split cookie in Table 3. It can be observed that our method achieves good performance compared with other state-of-the-art methods. Compared with synthetic datasets, real datasets are more challenging due to the narrow camera viewing range and pose ambiguity. The quantitative results can demonstrate the effectiveness of the proposed method in real scenes." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 442, + 427, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 442, + 427, + 453 + ], + "spans": [ + { + "bbox": [ + 305, + 442, + 427, + 453 + ], + "type": "text", + "content": "4.4. Visualization Results" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 461, + 545, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 461, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 304, + 461, + 545, + 605 + ], + "type": "text", + "content": "Visual comparison. In addition to quantitative results, we also provide visualization results of different methods to demonstrate the superiority of our method. For better comparison, we show the rendered images of each synthetic scene from the same viewpoint in Fig. 4. By comparing the visualization results of different methods, it is shown that the rendered images by our method are closer to the ground truth images, indicating that our method can recover accurate and detailed images. In addition, we provide visualization results of the real scenes in Fig. 5. Compared with TiNueVox [11], our method can recover the detailed structure of dynamic objects, like chicken and banana." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "content": "Gaussian visualization. To verify the effectiveness of our method, we show the 3D point cloud of the 3D Gaussian. Specifically, we only use the 3D coordinates of the 3D Gaussian. As shown in Fig. 7, we provide the point clouds of different methods on the synthetic dataset, including 3D-GS [21], 4D-GS [56], and ours. Note that the color of the point cloud is generated by 3D coordinates. Since 3D-DS cannot model dynamic scenes, the quality of the point cloud is poor. Comparing 4D-GS with ours, it can be observed" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8906" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 71, + 126, + 171 + ], + "blocks": [ + { + "bbox": [ + 49, + 71, + 126, + 171 + ], + "lines": [ + { + "bbox": [ + 49, + 71, + 126, + 171 + ], + "spans": [ + { + "bbox": [ + 49, + 71, + 126, + 171 + ], + "type": "image", + "image_path": "4ee70ea5442e870eb0d598aa233800289e86d78c0eec50272768f1e9612b22a6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 61, + 177, + 273, + 189 + ], + "lines": [ + { + "bbox": [ + 61, + 177, + 273, + 189 + ], + "spans": [ + { + "bbox": [ + 61, + 177, + 273, + 189 + ], + "type": "text", + "content": "Figure 6. Visualization of learned geometry-aware features." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 129, + 70, + 205, + 171 + ], + "blocks": [ + { + "bbox": [ + 129, + 70, + 205, + 171 + ], + "lines": [ + { + "bbox": [ + 129, + 70, + 205, + 171 + ], + "spans": [ + { + "bbox": [ + 129, + 70, + 205, + 171 + ], + "type": "image", + "image_path": "40331151a924f8e95ea8f134904708f6dd605e56b16dbaf478bfaf1fa4766a3d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 207, + 71, + 284, + 171 + ], + "blocks": [ + { + "bbox": [ + 207, + 71, + 284, + 171 + ], + "lines": [ + { + "bbox": [ + 207, + 71, + 284, + 171 + ], + "spans": [ + { + "bbox": [ + 207, + 71, + 284, + 171 + ], + "type": "image", + "image_path": "08e57af452a8fb5489e8c7008a2d47bb5ead4a5dcd47c06b063fbfd78edcc54d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 50, + 196, + 126, + 316 + ], + "blocks": [ + { + "bbox": [ + 50, + 196, + 126, + 316 + ], + "lines": [ + { + "bbox": [ + 50, + 196, + 126, + 316 + ], + "spans": [ + { + "bbox": [ + 50, + 196, + 126, + 316 + ], + "type": "image", + "image_path": "c36374877d414d5c0a500370084b52c0365a1cd37b2217e3775ac3f98196002a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 326, + 287, + 347 + ], + "lines": [ + { + "bbox": [ + 47, + 326, + 287, + 347 + ], + "spans": [ + { + "bbox": [ + 47, + 326, + 287, + 347 + ], + "type": "text", + "content": "Figure 7. Visualization of learned Gaussian. Colored with position coordinates" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 136, + 198, + 206, + 316 + ], + "blocks": [ + { + "bbox": [ + 136, + 198, + 206, + 316 + ], + "lines": [ + { + "bbox": [ + 136, + 198, + 206, + 316 + ], + "spans": [ + { + "bbox": [ + 136, + 198, + 206, + 316 + ], + "type": "image", + "image_path": "d1595a23c127bab518436e79dcb2d0262f38899d2fe05de461989ccedff79551.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 215, + 198, + 284, + 316 + ], + "blocks": [ + { + "bbox": [ + 215, + 198, + 284, + 316 + ], + "lines": [ + { + "bbox": [ + 215, + 198, + 284, + 316 + ], + "spans": [ + { + "bbox": [ + 215, + 198, + 284, + 316 + ], + "type": "image", + "image_path": "c381318dca20e7e09894c7b90be8438e9b0fd7baf87da7f2b03908b238ce2353.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 358, + 287, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 358, + 287, + 381 + ], + "spans": [ + { + "bbox": [ + 47, + 358, + 287, + 381 + ], + "type": "text", + "content": "that the point cloud of our method has a clear local geometric structure." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 392, + 140, + 404 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 392, + 140, + 404 + ], + "spans": [ + { + "bbox": [ + 47, + 392, + 140, + 404 + ], + "type": "text", + "content": "4.5. Ablation Study" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 412, + 287, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 287, + 459 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 287, + 459 + ], + "type": "text", + "content": "We conduct ablation studies on the synthetic dataset " + }, + { + "bbox": [ + 47, + 412, + 287, + 459 + ], + "type": "inline_equation", + "content": "(800 \\times 800)" + }, + { + "bbox": [ + 47, + 412, + 287, + 459 + ], + "type": "text", + "content": " to verify the effectiveness of our proposed components. In Table 4, vanilla model is a simple MLP model without our components." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 460, + 287, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 460, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 46, + 460, + 287, + 567 + ], + "type": "text", + "content": "Effect of geometric-aware features. To learn the geometric information of the object in our Gaussian canonical field, we voxelize the 3D Gaussian distributions and extract geometric aware features using our 3D U-Net. To demonstrate the effectiveness of this design, we test our method with geometric branch blocks and leave others unchanged. In Table 4, ours full has a clear advantage over w/o geo. feat., and our geometry branch plays the most important role among the components studied in the ablations." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 569, + 287, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 569, + 287, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 569, + 287, + 688 + ], + "type": "text", + "content": "In Fig. 6, we visualize the learned geometric-aware features. We color the point clouds with the learned features, and it shows meaningful geometric information. Interestingly, we can see an obvious difference in the learned features between the moving objects (bucket of the lego and the t-rex body) and the static objects (body of the lego and the ground in t-rex). Also, our geometric-aware features reflect the local geometric structure. For example, the spines of the bones on the t-rex tail have similar features, and the smooth part of the tail bones have other patterns." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "Different geometric features. We use the PointNet-like architecture and plane projection (2D CNN) to conduct ex" + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 331, + 96, + 518, + 200 + ], + "blocks": [ + { + "bbox": [ + 306, + 71, + 545, + 93 + ], + "lines": [ + { + "bbox": [ + 306, + 71, + 545, + 93 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 545, + 93 + ], + "type": "text", + "content": "Table 4. Ablation Study. Ablation studys in terms of average PSNR, SSIM, and LPIPS. The best results are highlighted in bold." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 331, + 96, + 518, + 200 + ], + "lines": [ + { + "bbox": [ + 331, + 96, + 518, + 200 + ], + "spans": [ + { + "bbox": [ + 331, + 96, + 518, + 200 + ], + "type": "table", + "html": "
MethodPSNR↑SSIM↑LPIPS↓
w/o geo. feat.37.57570.98410.0173
w/o 6D rotation37.87500.98510.0154
canonical DC37.80260.98470.0166
vanilla35.23070.97930.0242
PointNet feat.36.73530.98260.0184
Plane feat.35.90540.98110.0212
ours full38.01340.98530.0153
", + "image_path": "c8f90c2733fbdbf036dfe9a0b473b763c1af4b3a402015c75da8a541c1098201.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 201, + 545, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 201, + 545, + 238 + ], + "spans": [ + { + "bbox": [ + 305, + 201, + 545, + 238 + ], + "type": "text", + "content": "periments. Compared with the results (dubbed as \"PointNet feat.\" and \"Plane feat.\") in Table 4, it can be observed that our method achieves significant performance gains." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 239, + 545, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 239, + 545, + 345 + ], + "spans": [ + { + "bbox": [ + 305, + 239, + 545, + 345 + ], + "type": "text", + "content": "6D representation. To study the effect of 6D representation of the rotation parameters of the 3D Gaussian, we conduct an experiment that replaces the 6D vector with quaternion " + }, + { + "bbox": [ + 305, + 239, + 545, + 345 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 305, + 239, + 545, + 345 + ], + "type": "text", + "content": " which is used in the original 3D-GS. To deform the 3D Gaussian in canonical space, our deformation field estimates a " + }, + { + "bbox": [ + 305, + 239, + 545, + 345 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{q}_{\\mathrm{t}}" + }, + { + "bbox": [ + 305, + 239, + 545, + 345 + ], + "type": "text", + "content": " and gets " + }, + { + "bbox": [ + 305, + 239, + 545, + 345 + ], + "type": "inline_equation", + "content": "\\mathbf{q}_{\\mathrm{t}} = \\mathbf{q} + \\Delta \\mathbf{q}_{\\mathrm{t}}" + }, + { + "bbox": [ + 305, + 239, + 545, + 345 + ], + "type": "text", + "content": ", using the quaternion add operation. In Table 4, quaternion demonstrates an obvious performance drop, which proves the effectiveness of the 6D representation." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 346, + 545, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 346, + 545, + 430 + ], + "spans": [ + { + "bbox": [ + 305, + 346, + 545, + 430 + ], + "type": "text", + "content": "Density control. In terms of density control, we test the setting that only uses the 3D Gaussian in canonical space without considering the transform 3D Gaussian at other timestamps. In Table 4, canonical DC shows a performance drop, as the canonical 3D Gaussian alone cannot reflect the over/under reconstruction information at all timestamps for dynamic scenes." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 443, + 378, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 443, + 378, + 455 + ], + "spans": [ + { + "bbox": [ + 306, + 443, + 378, + 455 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 464, + 545, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 464, + 545, + 620 + ], + "spans": [ + { + "bbox": [ + 304, + 464, + 545, + 620 + ], + "type": "text", + "content": "In this paper, we have proposed a 3D geometry aware Gaussian Splatting solution for dynamic view synthesis. We addressed the limitations of existing approaches from two perspectives: 1) we introduced 3D sparse convolution to extract local structural information effectively and efficiently for deformation learning, and 2) we represented the dynamic scenes as a collection of deforming 3D Gaussian distributions, which are optimized to deform (move, rotate, scaling) over time. Experimental results across synthetic and real datasets demonstrate the superiority of our solution in dynamic view synthesis and 3D reconstruction. We plan to further investigate explicit motion modeling by exploiting the foreground and background motion segmentation cues." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 633, + 403, + 646 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 633, + 403, + 646 + ], + "spans": [ + { + "bbox": [ + 306, + 633, + 403, + 646 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 654, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 654, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 654, + 545, + 712 + ], + "type": "text", + "content": "We thank the area chairs and the reviewers for their insightful and positive feedback. This work was supported in part by the National Science Fund of China (Grant Nos. 62271410, 62306238) and the Fundamental Research Funds for the Central Universities." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8907" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Kara-Ali Aliev, Artem Sevastopolsky, Maria Kolos, Dmitry Ulyanov, and Victor Lempitsky. Neural point-based graphics. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 201 + ], + "type": "text", + "content": "[2] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-NeRF: A multiscale representation for antiailiasing neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 204, + 288, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 204, + 288, + 258 + ], + "spans": [ + { + "bbox": [ + 53, + 204, + 288, + 258 + ], + "type": "text", + "content": "[3] Jonathan T. Barron, Ben Mildenhall, Dor Verbin, Pratul P. Srinivasan, and Peter Hedman. Zip-NeRF: Anti-aliased grid-based neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 260, + 288, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 260, + 288, + 315 + ], + "spans": [ + { + "bbox": [ + 53, + 260, + 288, + 315 + ], + "type": "text", + "content": "[4] Mark Boss, Raphael Braun, Varun Jampani, Jonathan T Barron, Ce Liu, and Hendrik Lensch. NeRD: Neural reflectance decomposition from image collections. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 317, + 288, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 317, + 288, + 361 + ], + "spans": [ + { + "bbox": [ + 53, + 317, + 288, + 361 + ], + "type": "text", + "content": "[5] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the Conference on Computer Graphics and Interactive Techniques, 2001. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 363, + 288, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 363, + 288, + 407 + ], + "spans": [ + { + "bbox": [ + 53, + 363, + 288, + 407 + ], + "type": "text", + "content": "[6] Ang Cao and Justin Johnson. HexPlane: A fast representation for dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 409, + 288, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 409, + 288, + 463 + ], + "spans": [ + { + "bbox": [ + 53, + 409, + 288, + 463 + ], + "type": "text", + "content": "[7] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. MVSNeRF: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 465, + 288, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 465, + 288, + 509 + ], + "spans": [ + { + "bbox": [ + 53, + 465, + 288, + 509 + ], + "type": "text", + "content": "[8] Shenchang Eric Chen and Lance Williams. View interpolation for image synthesis. In Proceedings of the Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 1993. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 510, + 288, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 510, + 288, + 554 + ], + "spans": [ + { + "bbox": [ + 53, + 510, + 288, + 554 + ], + "type": "text", + "content": "[9] Inchang Choi, Orazio Gallo, Alejandro Troccoli, Min H Kim, and Jan Kautz. Extreme view synthesis. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 556, + 287, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 556, + 287, + 610 + ], + "spans": [ + { + "bbox": [ + 48, + 556, + 287, + 610 + ], + "type": "text", + "content": "[10] Yilun Du, Yinan Zhang, Hong-Xing Yu, Joshua B Tenenbaum, and Jiajun Wu. Neural radiance flow for 4d view synthesis and video processing. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 613, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 678 + ], + "type": "text", + "content": "[11] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In Proceedings of the Conference on Computer Graphics and Interactive Techniques in Asia (SIGGRAPH ASIA), 2022. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 680, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 714 + ], + "type": "text", + "content": "[12] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. DeepStereo: Learning to predict new views from the world's imagery. In Proceedings of the IEEE Conference" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 714 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "on Computer Vision and Pattern Recognition (CVPR), 2016. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 96, + 547, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 547, + 150 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 547, + 150 + ], + "type": "text", + "content": "[13] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbaek Warburg, Benjamin Recht, and Angjoo Kanazawa. K-Planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 152, + 545, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 152, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 308, + 152, + 545, + 195 + ], + "type": "text", + "content": "[14] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 197, + 545, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 197, + 545, + 229 + ], + "spans": [ + { + "bbox": [ + 308, + 197, + 545, + 229 + ], + "type": "text", + "content": "[15] Ned Greene. Environment mapping and other applications of world projections. IEEE Computer Graphics and Applications, 1986. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 231, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 231, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 308, + 231, + 545, + 285 + ], + "type": "text", + "content": "[16] Xiang Guo, Guanying Chen, Yuchao Dai, Xiaqing Ye, Jiadai Sun, Xiao Tan, and Errui Ding. Neural deformable voxel grid for fast optimization of dynamic view synthesis. In Proceedings of the Asian Conference on Computer Vision (ACCV), 2022. 1, 2, 3, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 286, + 545, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 286, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 308, + 286, + 545, + 342 + ], + "type": "text", + "content": "[17] Xiang Guo, Jiadai Sun, Yuchao Dai, Guanying Chen, Xiaoqing Ye, Xiao Tan, Errui Ding, Yumeng Zhang, and Jingdong Wang. Forward flow for novel view synthesis of dynamic scenes. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 1, 2, 3, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 343, + 545, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 343, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 308, + 343, + 545, + 387 + ], + "type": "text", + "content": "[18] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (TOG), 2018. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 388, + 545, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 388, + 545, + 442 + ], + "spans": [ + { + "bbox": [ + 308, + 388, + 545, + 442 + ], + "type": "text", + "content": "[19] Wenbo Hu, Yuling Wang, Lin Ma, Bangbang Yang, Lin Gao, Xiao Liu, and Yuewen Ma. Tri-MipRF: Tri-mip representation for efficient anti-aliasing neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 444, + 545, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 444, + 545, + 476 + ], + "spans": [ + { + "bbox": [ + 308, + 444, + 545, + 476 + ], + "type": "text", + "content": "[20] Nima Khademi Kalantari, Ting-Chun Wang, and Ravi Ramamoorthi. Learning-based view synthesis for light field cameras. ACM Transactions on Graphics (TOG), 2016. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 478, + 545, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 478, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 308, + 478, + 545, + 521 + ], + "type": "text", + "content": "[21] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3D Gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (TOG), 2023. 1, 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 522, + 545, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 522, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 308, + 522, + 545, + 555 + ], + "type": "text", + "content": "[22] Marc Levoy and Pat Hanrahan. Light field rendering. In Proceedings of the Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 1996. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 556, + 545, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 556, + 545, + 600 + ], + "spans": [ + { + "bbox": [ + 308, + 556, + 545, + 600 + ], + "type": "text", + "content": "[23] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 601, + 545, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 601, + 545, + 645 + ], + "spans": [ + { + "bbox": [ + 308, + 601, + 545, + 645 + ], + "type": "text", + "content": "[24] David B Lindell, Julien NP Martel, and Gordon Wetzstein. AutoInt: Automatic integration for fast neural volume rendering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "type": "text", + "content": "[25] Baoyuan Liu, Min Wang, Hassan Foroosh, Marshall Tappen, and Marianna Pensky. Sparse convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015. 4" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 691, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 714 + ], + "type": "text", + "content": "[26] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. In Proceed-" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8908" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "text", + "content": "ings of the Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "type": "text", + "content": "[27] Yu-Lun Liu, Chen Gao, Andreas Meuleman, Hung-Yu Tseng, Ayush Saraf, Changil Kim, Yung-Yu Chuang, Johannes Kopf, and Jia-Bin Huang. Robust dynamic radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 151, + 287, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 151, + 287, + 194 + ], + "spans": [ + { + "bbox": [ + 48, + 151, + 287, + 194 + ], + "type": "text", + "content": "[28] Stephen Lombardi, Tomas Simon, Gabriel Schwartz, Michael Zollhoefer, Yaser Sheikh, and Jason Saragih. Mixture of volumetric primitives for efficient neural rendering. ACM Transactions on Graphics (TOG), 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 195, + 287, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 195, + 287, + 259 + ], + "spans": [ + { + "bbox": [ + 48, + 195, + 287, + 259 + ], + "type": "text", + "content": "[29] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural radiance fields for unconstrained photo collections. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 261, + 287, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 261, + 287, + 316 + ], + "spans": [ + { + "bbox": [ + 48, + 261, + 287, + 316 + ], + "type": "text", + "content": "[30] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 1, 2, 4, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 317, + 287, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 317, + 287, + 358 + ], + "spans": [ + { + "bbox": [ + 48, + 317, + 287, + 358 + ], + "type": "text", + "content": "[31] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (TOG), 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 361, + 287, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 361, + 287, + 425 + ], + "spans": [ + { + "bbox": [ + 48, + 361, + 287, + 425 + ], + "type": "text", + "content": "[32] Thomas Neff, Pascal Stadlbauer, Mathias Parger, Andreas Kurz, Joerg H. Mueller, Chakravarty R. Alla Chaitanya, Anton S. Kaplanyan, and Markus Steinberger. DONeRF: Towards real-time rendering of compact neural radiance fields using depth oracle networks. Computer Graphics Forum (CGF), 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 426, + 287, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 426, + 287, + 480 + ], + "spans": [ + { + "bbox": [ + 48, + 426, + 287, + 480 + ], + "type": "text", + "content": "[33] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 1, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 482, + 287, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 482, + 287, + 546 + ], + "spans": [ + { + "bbox": [ + 48, + 482, + 287, + 546 + ], + "type": "text", + "content": "[34] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. HyperNeRF: A higher-dimensional representation for topologically varying neural radiance fields. ACM Transactions on Graphics (TOG), 2021. 1, 3, 6, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 548, + 287, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 548, + 287, + 569 + ], + "spans": [ + { + "bbox": [ + 48, + 548, + 287, + 569 + ], + "type": "text", + "content": "[35] Eric Penner and Li Zhang. Soft 3D reconstruction for view synthesis. ACM Transactions on Graphics (TOG), 2017. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 571, + 287, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 571, + 287, + 602 + ], + "spans": [ + { + "bbox": [ + 48, + 571, + 287, + 602 + ], + "type": "text", + "content": "[36] Martin Piala and Ronald Clark. TermiNeRF: Ray termination prediction for efficient neural rendering. In Proceedings of the International Conference on 3D Vision (3DV), 2021. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 604, + 287, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 604, + 287, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 604, + 287, + 656 + ], + "type": "text", + "content": "[37] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1, 2, 3, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "text", + "content": "[38] Daniel Rebain, Wei Jiang, Soroosh Yazdani, Ke Li, Kwang Moo Yi, and Andrea Tagliasacchi. DeRF: Decomposed radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[39] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. KiloNeRF: Speeding up neural radiance fields with thousands of tiny MLPs. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 118, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 173 + ], + "type": "text", + "content": "[40] Konstantinos Rematas, Andrew Liu, Pratul P. Srinivasan, Jonathan T. Barron, Andrea Tagliasacchi, Thomas Funkhouser, and Vittorio Ferrari. Urban radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 175, + 545, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 206 + ], + "type": "text", + "content": "[41] Gernot Riegler and Vladlen Koltun. Free view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 209, + 545, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 209, + 545, + 241 + ], + "spans": [ + { + "bbox": [ + 307, + 209, + 545, + 241 + ], + "type": "text", + "content": "[42] Gernot Riegler and Vladlen Koltun. Stable view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 243, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 243, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 243, + 545, + 274 + ], + "type": "text", + "content": "[43] Harry Shum and Sing Bing Kang. Review of image-based rendering techniques. In Visual Communications and Image Processing (VCIP), 2000. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 277, + 545, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 277, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 307, + 277, + 545, + 331 + ], + "type": "text", + "content": "[44] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. NeRFPlayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics (TVCG), 2023. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 333, + 545, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 333, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 307, + 333, + 545, + 396 + ], + "type": "text", + "content": "[45] Pratul P Srinivasan, Boyang Deng, Xiuming Zhang, Matthew Tancik, Ben Mildenhall, and Jonathan T Barron. NeRV: Neural reflectance and visibility fields for relighting and view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 399, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 545, + 453 + ], + "type": "text", + "content": "[46] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct Voxel Grid Optimization: Super-fast convergence for radiance fields reconstruction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 456, + 545, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 545, + 520 + ], + "type": "text", + "content": "[47] Matthew Tancik, Vincent Casser, Xinchen Yan, Sabeek Pradhan, Ben Mildenhall, Pratul P Srinivasan, Jonathan T Barron, and Henrik Kretzschmar. Block-NeRF: Scalable large scene neural view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 523, + 545, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 523, + 545, + 577 + ], + "spans": [ + { + "bbox": [ + 307, + 523, + 545, + 577 + ], + "type": "text", + "content": "[48] Ayush Tewari, Ohad Fried, Justus Thies, Vincent Sitzmann, Stephen Lombardi, Kalyan Sunkavalli, Ricardo MartinBrualla, Tomas Simon, Jason Saragih, Matthias Nießner, et al. State of the art on neural rendering. In Computer Graphics Forum (CGF), 2020. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 579, + 545, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 579, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 307, + 579, + 545, + 633 + ], + "type": "text", + "content": "[49] Ayush Tewari, O Fried, J Thies, V Sitzmann, S Lombardi, Z Xu, T Simon, M Nießner, E Tretschk, L Liu, et al. Advances in neural rendering. In Proceedings of the Conference on Computer Graphics and Interactive Techniques (SIGGRAPH), 2021. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 635, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 545, + 667 + ], + "type": "text", + "content": "[50] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 2019. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "type": "text", + "content": "[51] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Pro" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "8909" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 288, + 715 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "text", + "content": "ceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 1, 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 95, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 95, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 95, + 287, + 139 + ], + "type": "text", + "content": "[52] Alex Trevithick and Bo Yang. GRF: Learning a general radiance field for 3D representation and rendering. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 140, + 288, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 140, + 288, + 183 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 288, + 183 + ], + "type": "text", + "content": "[53] Haithem Turki, Jason Y Zhang, Francesco Ferroni, and Deva Ramanan. SUDS: Scalable urban dynamic scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 184, + 288, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 184, + 288, + 217 + ], + "spans": [ + { + "bbox": [ + 48, + 184, + 288, + 217 + ], + "type": "text", + "content": "[54] Chaoyang Wang, Ben Eckart, Simon Lucey, and Orazio Gallo. Neural trajectory fields for dynamic novel view synthesis. arXiv preprint arXiv:2105.05994, 2021. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 217, + 288, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 217, + 288, + 282 + ], + "spans": [ + { + "bbox": [ + 48, + 217, + 288, + 282 + ], + "type": "text", + "content": "[55] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P Srinivasan, Howard Zhou, Jonathan T Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas Funkhouser. IBRNet: Learning multi-view image-based rendering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 284, + 287, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 284, + 287, + 327 + ], + "spans": [ + { + "bbox": [ + 48, + 284, + 287, + 327 + ], + "type": "text", + "content": "[56] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Xinggang Wang. 4D Gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 3, 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 327, + 288, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 288, + 371 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 288, + 371 + ], + "type": "text", + "content": "[57] Wenqi Xian, Jia-Bin Huang, Johannes Kopf, and Changil Kim. Space-time neural irradiance fields for free-viewpoint video. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 372, + 288, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 372, + 288, + 437 + ], + "spans": [ + { + "bbox": [ + 48, + 372, + 288, + 437 + ], + "type": "text", + "content": "[58] Yuanbo Xiangli, Linning Xu, Xingang Pan, Nanxuan Zhao, Anyi Rao, Christian Theobalt, Bo Dai, and Dahua Lin. BungeeNeRF: Progressive neural radiance field for extreme multi-scale scene rendering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 437, + 288, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 437, + 288, + 480 + ], + "spans": [ + { + "bbox": [ + 48, + 437, + 288, + 480 + ], + "type": "text", + "content": "[59] Zexiang Xu, Sai Bi, Kalyan Sunkavalli, Sunil Hadap, Hao Su, and Ravi Ramamoorthi. Deep view synthesis from sparse photometric images. ACM Transactions on Graphics (TOG), 2019. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 482, + 288, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 482, + 288, + 536 + ], + "spans": [ + { + "bbox": [ + 48, + 482, + 288, + 536 + ], + "type": "text", + "content": "[60] Wenqi Yang, Guanying Chen, Chaofeng Chen, Zhenfang Chen, and Kwan-Yee K Wong. S" + }, + { + "bbox": [ + 48, + 482, + 288, + 536 + ], + "type": "inline_equation", + "content": "^3" + }, + { + "bbox": [ + 48, + 482, + 288, + 536 + ], + "type": "text", + "content": "-NeRF: Neural reflectance field from shading and shadow under a single viewpoint. In Proceedings of the Advances in Neural Information Processing Systems (NeurIPS), 2022. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 537, + 288, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 537, + 288, + 591 + ], + "spans": [ + { + "bbox": [ + 48, + 537, + 288, + 591 + ], + "type": "text", + "content": "[61] Jae Shin Yoon, Kihwan Kim, Orazio Gallo, Hyun Soo Park, and Jan Kautz. Novel view synthesis of dynamic scenes with globally coherent depths from a monocular camera. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 592, + 288, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 592, + 288, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 592, + 288, + 646 + ], + "type": "text", + "content": "[62] Alex Yu, Sara Fridovich-Keil, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 647, + 288, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 691 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 691 + ], + "type": "text", + "content": "[63] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 692, + 288, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 692, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 48, + 692, + 288, + 715 + ], + "type": "text", + "content": "[64] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 319 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 96, + 545, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 129 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 129 + ], + "type": "text", + "content": "[65] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. NeRF++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 130, + 545, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 130, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 307, + 130, + 545, + 184 + ], + "type": "text", + "content": "[66] Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 186, + 545, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 240 + ], + "type": "text", + "content": "[67] Xiuming Zhang, Pratul P Srinivasan, Boyang Deng, Paul Debevec, William T Freeman, and Jonathan T Barron. NeR-Factor: Neural factorization of shape and reflectance under an unknown illumination. ACM Transactions on Graphics (TOG), 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 241, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 545, + 285 + ], + "type": "text", + "content": "[68] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 286, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 286, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 286, + 545, + 319 + ], + "type": "text", + "content": "[69] M. Zwicker, H. Pfister, J. van Baar, and M. Gross. Ewa volume splatting. In Proceedings of IEEE Visualization (VIS), 2001. 4" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "8910" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Human Pose Perception from Egocentric Stereo Videos/95770e99-65fc-4fd7-9de3-96977a97b4b8_content_list.json b/2024/3D Human Pose Perception from Egocentric Stereo Videos/95770e99-65fc-4fd7-9de3-96977a97b4b8_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..2359495a2b95b971e00ef0c4b58bd8fd72835842 --- /dev/null +++ b/2024/3D Human Pose Perception from Egocentric Stereo Videos/95770e99-65fc-4fd7-9de3-96977a97b4b8_content_list.json @@ -0,0 +1,2149 @@ +[ + { + "type": "text", + "text": "3D Human Pose Perception from Egocentric Stereo Videos", + "text_level": 1, + "bbox": [ + 189, + 130, + 782, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hiroyasu Akada", + "bbox": [ + 153, + 181, + 285, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jian Wang Vladislav Golyanik Max Planck Institute for Informatics, SIC", + "bbox": [ + 315, + 181, + 648, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Christian Theobalt", + "bbox": [ + 665, + 181, + 816, + 196 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/32798e62043c3ac39b403815deb7be4d445d827994a554ed1cb13ca22411de73.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 78, + 250, + 202, + 392 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/78bc11864321b84e7ecfc867bc9854e84042332aaa51fd250b6f5a43d5ea1a6d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 205, + 251, + 292, + 319 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/34b3570884f1e618c6019c77328ba4cec270911dade00e158155f0f4ced7d98b.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 205, + 324, + 292, + 392 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/70cc8313ce36826cf52ae5a7de27ef4b51d1fc310842a0f92a56c4c10bbd7489.jpg", + "image_caption": [ + "(c)", + "Figure 1. 3D human pose estimation results of our proposed method from egocentric stereo fisheye videos. Left: results on synthetic images; (a) reference RGB view of the scene; (b) 3D-to-2D pose re-projections, and (c) a 3D pose in a scene mesh reconstructed by our framework. Right: results on real-world images; (d) reference view; (e) 3D-to-2D pose re-projections; (f) a 3D pose in the reconstructed scene, and (g) 3D virtual character animation (possible future application of our method)." + ], + "image_footnote": [], + "bbox": [ + 295, + 251, + 419, + 392 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/bf98069469d4197ec25f8ec9086a0483a755b084a34cfd118da947e8f97282a4.jpg", + "image_caption": [ + "(d)" + ], + "image_footnote": [], + "bbox": [ + 428, + 251, + 552, + 392 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f8aa611c225244d33dd9f3267562aee4d13f8a2a27de2f7a2cdc49e77c6a65e7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 553, + 251, + 640, + 319 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3adfca99ca8364a67f1d54e3950637089d9f3de1f073bd8e1ef40654fb260744.jpg", + "image_caption": [ + "(e)" + ], + "image_footnote": [], + "bbox": [ + 555, + 324, + 640, + 392 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/457e76fe84cd7c48b924fdabe7a6ac6b25f715964f12a39f10f3dfda678f6b09.jpg", + "image_caption": [ + "(f)" + ], + "image_footnote": [], + "bbox": [ + 642, + 251, + 764, + 392 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a3f374da09d58bd9853fde02a00bc166f7cb39516b785a35e949be4f26668c23.jpg", + "image_caption": [ + "(g)" + ], + "image_footnote": [], + "bbox": [ + 767, + 251, + 890, + 392 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 478, + 313, + 493 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While head-mounted devices are becoming more compact, they provide egocentric views with significant self-occlusions of the device user. Hence, existing methods often fail to accurately estimate complex 3D poses from egocentric views. In this work, we propose a new transformer-based framework to improve egocentric stereo 3D human pose estimation, which leverages the scene information and temporal context of egocentric stereo videos. Specifically, we utilize 1) depth features from our 3D scene reconstruction module with uniformly sampled windows of egocentric stereo frames, and 2) human joint queries enhanced by temporal features of the video inputs. Our method is able to accurately estimate human poses even in challenging scenarios, such as crouching and sitting. Furthermore, we introduce two new benchmark datasets, i.e., UnrealEgo2 and UnrealEgo-RW (RealWorld). The proposed datasets offer a much larger number of egocentric stereo views with a wider variety of human motions than the existing datasets, allowing comprehensive evaluation of existing and upcoming methods. Our extensive experiments show that the proposed approach significantly outperforms previous methods. UnrealEgo2, UnrealEgo-RW, and trained models are available on our project page1 and Benchmark Challenge2.", + "bbox": [ + 75, + 511, + 472, + 859 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 501, + 478, + 630, + 493 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Egocentric 3D human motion capture using wearable devices has received increased attention recently [1, 11, 22, 31, 37, 38, 40-42, 45, 48, 52, 53]. Different from traditional vision-based motion capture setups that require a fixed recording space, egocentric systems allow flexible motion capture in less constrained situations. Therefore, the egocentric setups offer various applications, such as motion analysis and XR technologies (Fig. 1-(g)).", + "bbox": [ + 496, + 503, + 890, + 626 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Previous works proposed various egocentric methods to capture device users. On the one hand, the vast majority of existing methods—which use a monocular camera—would fail for complex human poses due to depth ambiguity and self-occlusion. On the other hand, the methods designed for stereo devices do not yet realize the full potential of their stereo settings, especially with the most recent compact eyeglasses-based setups [1, 53]. Specifically, they do not deliver high 3D reconstruction accuracy across different scenarios. Moreover, these approaches do not consider scene information, which further limits their accuracy.", + "bbox": [ + 496, + 627, + 892, + 792 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address the challenges outlined above, we propose a new transformer-based framework for egocentric 3D human motion capture from compact eyeglasses-based devices; see Fig. 1. The first step of our framework is to estimate 2D joint heatmaps from egocentric stereo fisheye RGB videos (Sec. 4.1). These 2D joint heatmaps are then processed with human joint queries in our transformer-based 3D mod", + "bbox": [ + 496, + 795, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1https://4dqv.mpi-inf.mpq.de/UnrealEgo2/", + "bbox": [ + 93, + 875, + 410, + 886 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "2https://unrealego.mpi-inf.mpg.de/", + "bbox": [ + 96, + 887, + 364, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "767", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ule to estimate 3D poses. Here, we leverage the scene information and temporal context of the input videos in the 3D module to improve estimation accuracy. Firstly, we use uniformly sampled windows of egocentric stereo frames to reconstruct a 3D background scene using Structure from Motion (SfM) [33], obtaining scene depth as additional information for the 3D module (Sec. 4.2 and 4.3). In our challenging eyeglasses-based setup, however, the 3D scene and camera poses can not always be estimated due to severe self-occlusion in the egocentric images. This results in depth maps with zero (invalid) values and undesired computation of network gradients during training. To mitigate this issue, we propose to use depth padding masks that prevent processing such invalid depth values in the 3D module. Additionally, we propose video-dependent query augmentation that enhances the joint queries with the temporal context of stereo video inputs to effectively capture the temporal relation of human motions at a joint level (Sec. 4.4).", + "bbox": [ + 75, + 90, + 472, + 363 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We also introduce two new benchmark datasets: UnrealEgo2 and UnrealEgo-RW. UnrealEgo2 is an extended version of UnrealEgo [1] and the largest eyeglasses-based synthetic data with various new motions, offering $2.8 \\times$ larger data (2.5M images) than the existing dataset [1]. UnrealEgo-RW is a real-world dataset recorded with our newly developed device that resembles the virtual eyeglasses-based setup [1], offering 260k images with various motions and 3D poses. The proposed datasets make it possible to evaluate existing and upcoming methods on a variety of motions, not only in synthetic scenes but also in real-world cases.", + "bbox": [ + 75, + 363, + 472, + 542 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In short, the contributions of this paper are as follows:", + "bbox": [ + 96, + 544, + 452, + 559 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The transformer-based framework for egocentric stereo 3D human pose estimation that accounts for temporal context in egocentric stereo views.", + "- 3D pose estimation is enhanced via the utilization of scene information from our video-based 3D scene reconstruction module as well as joint queries obtained from our video-dependent query augmentation policy.", + "- A new portable device for egocentric stereo view capture with its specification and two new benchmark datasets: UnrealEgo2 and UnrealEgo-RW recorded with our device. The proposed datasets allow for a comprehensive evaluation of methods for egocentric 3D human pose estimation from stereo views." + ], + "bbox": [ + 76, + 560, + 468, + 758 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our experiments demonstrate that the proposed method outperforms the previous state-of-the-art approaches by a substantial margin, i.e., $>15\\%$ on UnrealEgo [1], $\\geq 40\\%$ on UnrealEgo2, and $\\geq 10\\%$ on UnrealEgo-RW (on MPJPE). We release UnrealEgo2, UnrealEgo-RW, and our trained models on our project page3 and Benchmark Challenge4 to foster the area of egocentric 3D vision.", + "bbox": [ + 75, + 758, + 468, + 864 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 89, + 640, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Egocentric 3D Human Motion Capture. Recent years witnessed significant innovations in egocentric 3D human pose estimation. To capture device users, many existing works use downward-facing cameras and the existing methods can be categorized into two groups. The first group are monocular approaches [11, 21, 22, 27, 37, 38, 40, 41, 43, 45, 48, 52]. For example, Wang et al. [43] uses a diffusion-based [10] motion prior to tackle self-conclusions. Due to the depth ambiguity, monocular methods often fail to estimate accurate 3D poses. Wang et al. [42] tackled this issue by projecting depth and 2D pose features into a pre-defined voxel space. This method requires additional training with ground-truth depths and human body segmentation; it cannot easily be extended for multi-view or temporal inputs. Zhang et al. [51] utilized a diffusion model [10] conditioned on a 3D scene to generate poses. They require pre-scanned scene mesh as an input and cannot capture a device user.", + "bbox": [ + 496, + 114, + 893, + 372 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The second group, including our work, focuses on the multi-view (often stereo) setting. Rhodin et al. [31] proposed an optimization approach whereas Cha et al. [3] used eight cameras to estimate a 3D body and reconstruct a 3D scene separately. Other works [1, 53] used the multi-branch autoencoder [37] to the stereo setup. Kang et al. [12] (arXiv pre-print at the time of submission) leveraged a stereomatching mechanism and perspective embedding heatmaps. In contrast to the existing methods, we propose a new transformer-based method that effectively utilizes egocentric stereo videos via our video-based 3D scene reconstruction module and video-dependent query augmentation policy. Our method considers the scene information without the supervision of the scene data.", + "bbox": [ + 496, + 372, + 893, + 583 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Transformers in 3D Human Pose Estimation from External Cameras. 3D pose estimation from external cameras has shown significant progress due to the advances in transformer architectures [39]. Some works [20, 47] predict 3D human pose and mesh from monocular views. Other works [5, 18, 19, 28, 29, 36, 46, 49, 54-58] present a 2D-to-3D lifting module that estimates 3D poses from monocular 2D joints obtained with off-the-shelf 2D joint detectors. Although their lifting modules show impressive results, those monocular methods cannot be easily applied to our stereo setting. On the other hand, some works utilize transformers in multi-view settings. He et al. [9] and Ma et al. [23] aggregate stereo information on epipolar lines of stereo images, which are difficult to obtain from fisheye images. Recent work [44] regresses multi-person 3D poses from multi-view inputs, powered by projective attention and query adaptation. However, no existing works explored the potential of transformers along with 2D joint heatmaps or explicit scene information in stereo 3D pose estimation. In this paper, we propose a transformer-based framework that accounts for the temporal relation of human motion at a joint level via", + "bbox": [ + 496, + 583, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "3https://4dqv.mpi-inf.mpg.de/UnrealEgo2/", + "bbox": [ + 91, + 873, + 410, + 886 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "4https://unrealego.mpi-inf.mpg.de/", + "bbox": [ + 91, + 886, + 364, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "768", + "bbox": [ + 486, + 945, + 509, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "intermediate 2D joint heatmap and depth maps even with inaccurate depth values mixed in the framework.", + "bbox": [ + 75, + 90, + 467, + 119 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Datasets for Egocentric 3D Human Pose Estimation. Several works proposed unique setups to create datasets, using a monocular camera [11, 17, 22, 37, 40, 41, 45, 48] and forward-facing cameras [11, 14, 17, 22, 26, 48, 50, 51]. There also exist datasets captured with stereo devices [3, 7, 14, 26, 31, 53]. However, they are small [31] with limited motion types [31, 53], not publicly available [3, 53], or do not provide ground truth 3D poses of device users [7, 14, 26]. Recently, Akada et al. [1] introduced UnrealEgo, a synthetic dataset based on virtual eyeglasses with two fisheye cameras. However, they provide only synthetic images. Meanwhile, more glasses-based stereo datasets that offer a wider variety of motions or real-world footage are required nowadays for an extensive evaluation of existing and upcoming methods. Hence, we introduce two new benchmark datasets that in their characteristics go beyond the existing data: UnrealEgo2 and UnrealEgo-RW. We describe the proposed datasets in the following section.", + "bbox": [ + 75, + 121, + 467, + 393 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Mobile Device and Datasets", + "text_level": 1, + "bbox": [ + 76, + 407, + 331, + 422 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We present two new datasets for egocentric stereo 3D motion capture: UnrealEgo2 and UnrealEgo-RW; see Fig. 1. Please watch our supplementary video for visualizations. UnrealEgo2 Dataset. To create UnrealEgo2 (an extension of UnrealEgo [1]), we adapt the publicly available setup with a virtual eyeglasses device [1]. This setup comes with two downward-facing fisheye cameras attached $12\\mathrm{cm}$ apart from each other on the glasses frames. The camera's field of view is $170^{\\circ}$ . With this device, we capture 17 realistic 3D human models [30] animated by the Mixamo [25] dataset in various 3D environments. We record simple to highly complex motions such as crouching and crawling, for 14 hours.", + "bbox": [ + 75, + 431, + 467, + 612 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Overall, UnrealEgo2 offers 15,207 motions and $>1.25\\mathrm{M}$ stereo views (2.5M images) as well as depth maps with a resolution $1024\\times 1024$ pixel rendered at 25 frames per second. Each frame is annotated with 32 body and 40 hand joints. Note that UnrealEgo2 is the largest glasses-based dataset and $2.8\\times$ larger than UnrealEgo. Also, it does not share the same motions with UnrealEgo, providing a larger motion variety for a comprehensive evaluation.", + "bbox": [ + 75, + 613, + 467, + 733 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Design of Our Mobile Device. Evaluation with real-world datasets plays a pivotal role in computer vision research. Therefore, we build a new portable device; see Fig. 2. Our device is based on a helmet with two RIBCAGE RX0 II cameras [32] and two FUJINON FE185C057HA-1 fisheye lenses [6]. We placed the cameras $12\\mathrm{cm}$ away from each other and $2\\mathrm{cm}$ away from user's face. We cropped the margins of the egocentric images to resemble the field of view of $170^{\\circ}$ of the UnrealEgo and UnrealEgo2 setups. Note that our setup is more compact than EgoCap [31] that placed cameras $25\\mathrm{cm}$ away from user's face.", + "bbox": [ + 75, + 734, + 467, + 900 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/81ffc82403fe2599dd97479c7fa1a3a4102d1fdd1f5fc02aa55f787c93d1f63f.jpg", + "image_caption": [ + "Figure 2. Our portable setup to acquire UnrealEgo-RW." + ], + "image_footnote": [], + "bbox": [ + 501, + 88, + 761, + 172 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/7bbcdf0ba48c212d7974b9b7c8cf872e21909a2a3cf5e92c7192753b13909bc9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 774, + 88, + 892, + 172 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "UnrealEgo-RW (Real-World) Dataset. With our device, we record various motions of 16 identities in a multi-view motion capture studio (Fig. 1-(d)). We capture simple and challenging activities, e.g., crawling and dancing, for 1.5 hours. This is in strong contrast to the existing real-world stereo dataset [53] (not publicly available) that records only three simple actions, i.e., sitting, standing, and walking.", + "bbox": [ + 496, + 223, + 890, + 330 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In total, we obtained 591 motion segments from 16 identities with various textured clothing. This results in more than $130\\mathrm{k}$ stereo views (260k images) of a resolution $872\\times 872$ pixel rendered at 25 frames per second with ground-truth 3D poses of 16 joints. Note that UnrealEgoRW offers $4.3\\times$ larger data with a wider variety of motions than the publicly available real-world stereo data [31].", + "bbox": [ + 496, + 330, + 890, + 436 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4. Method", + "text_level": 1, + "bbox": [ + 500, + 458, + 589, + 473 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We propose a new framework for egocentric stereo 3D human pose estimation as shown in Fig. 3. Our framework first estimates the 2D joint heatmaps from egocentric stereo fisheye videos in our 2D module (Sec. 4.1). The heatmaps and input videos are then processed in our segmentation module to obtain 2D human body masks (Sec. 4.2). Next, we use uniformly sampled windows of input frames and human body masks to reconstruct 3D scenes (Sec. 4.3). Here, we render depth maps and depth region masks from the reconstructed mesh. Finally, our transformer-based 3D module processes the joint heatmaps, depth information, and joint queries to estimate 3D poses (Sec. 4.4). Here, the 3D module leverages depth padding masks based on the availability of the depth maps as well as joint queries enhanced by the stereo video features from the 2D module.", + "bbox": [ + 496, + 486, + 890, + 712 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1.2D Pose Estimation", + "text_level": 1, + "bbox": [ + 500, + 729, + 684, + 743 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given egocentric stereo videos with $T$ frames $\\{\\mathbf{I}_{\\mathrm{Left}}^t,\\mathbf{I}_{\\mathrm{Right}}^t\\in$ $\\mathbb{R}^{H\\times W\\times 3}|t = 1,2,\\dots,T\\}$ , we use the existing stereo 2D joint heatmap estimator [1] to obtain a sequence of corresponding 2D heatmaps of 15 joints $\\{\\mathbf{H}_{\\mathrm{Left}}^t,\\mathbf{H}_{\\mathrm{Right}}^t\\in$ $\\mathbb{R}^{\\frac{H}{4}\\times \\frac{W}{4}\\times 15}\\}$ , including the neck, upper arms, lower arms, hands, thighs, calves, feet, and balls of the feet. We also extract intermediate feature maps $\\{\\mathbf{F}_{\\mathrm{Left}}^t,\\mathbf{F}_{\\mathrm{Right}}^t\\in$ $\\mathbb{R}^{\\frac{H}{32}\\times \\frac{W}{32}\\times C}\\}$ where $C = 512$ , which are used later in the 3D module.", + "bbox": [ + 496, + 753, + 890, + 898 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "769", + "bbox": [ + 486, + 945, + 509, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/d876a4516d86f54cf6a9f65342065f9bd9e19401042946519eae08d09e623b82.jpg", + "image_caption": [ + "Figure 3. Overview of our framework. Our method takes egocentric stereo videos $\\{\\mathbf{I}_{\\mathrm{Left}}^t,\\mathbf{I}_{\\mathrm{Right}}^t\\}$ as inputs. We first apply the 2D module to obtain 2D joint heatmaps $\\{\\mathbf{H}_{\\mathrm{Left}}^t,\\mathbf{H}_{\\mathrm{Right}}^t\\}$ and video features $\\{\\mathbf{F}_{\\mathrm{Left}}^t,\\mathbf{F}_{\\mathrm{Right}}^t\\}$ (Sec. 4.1). The heatmaps are used with input videos to create human body masks $\\{\\mathbf{M}_{\\mathrm{Left}}^t,\\mathbf{M}_{\\mathrm{Right}}^t\\}$ (Sec. 4.2). Next, we use uniformly sampled windows of input frames and human body masks to reconstruct a 3D scene mesh (Sec. 4.3). From the mesh, we generate depth maps $\\{\\mathbf{D}_{\\mathrm{Left}}^t,\\mathbf{D}_{\\mathrm{Right}}^t\\}$ and depth region masks $\\{\\mathbf{R}_{\\mathrm{Left}}^t,\\mathbf{R}_{\\mathrm{Right}}^t\\}$ . Note that this diagram shows an example case of missing depth values for the second input frame. Lastly, the depth data, 2D joint heatmaps, video features, joint queries $q^{t}$ and the padding masks $V_{\\mathrm{Depth}}^{t}$ are processed in the 3D module to estimate 3D poses $\\mathbf{P}^t$ (Sec. 4.4)." + ], + "image_footnote": [], + "bbox": [ + 86, + 92, + 885, + 401 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Human Body Segmentation", + "text_level": 1, + "bbox": [ + 76, + 513, + 323, + 530 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To reconstruct 3D scenes from egocentric videos, it is necessary to identify the pixels corresponding to the background environment. Therefore, we integrate an existing segmentation method, i.e., ViT-H SAM model [16], as our segmentation network $\\mathcal{F}_{\\mathrm{SAM}}$ . In this module, we firstly obtain 2D joint locations from the 2D joint heatmap $\\{\\widehat{\\mathbf{H}}_{\\mathrm{Left}}^t,\\widehat{\\mathbf{H}}_{\\mathrm{Right}}^t\\}$ . Then, we use the input video frames $\\{\\mathbf{I}_{\\mathrm{Left}}^t,\\mathbf{I}_{\\mathrm{Right}}^t\\}$ and its corresponding 2D joints to extract a human body mask $\\{\\mathbf{M}_{\\mathrm{Left}}^t,\\mathbf{M}_{\\mathrm{Right}}^t\\in \\mathbb{R}^{H\\times W\\times 1}\\}$ :", + "bbox": [ + 75, + 537, + 468, + 679 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {M} _ {\\text {L e f t}} ^ {t} = \\mathcal {F} _ {\\mathrm {S A M}} \\left(\\mathbf {I} _ {\\text {L e f t}} ^ {t}, \\widehat {\\mathbf {H}} _ {\\text {L e f t}} ^ {t}\\right). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 178, + 689, + 468, + 708 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The same process can be applied to obtain $\\mathbf{M}_{\\mathrm{Right}}^t$ . Note that we use the SAM model without re-training on ground-truth human body masks. Instead, we guide the predictions of SAM using joint positions extracted from the 2D heatmaps.", + "bbox": [ + 75, + 715, + 468, + 777 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.3. 3D Scene Reconstruction", + "text_level": 1, + "bbox": [ + 76, + 785, + 303, + 800 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We aim to reconstruct 3D environments from uniformly sampled windows of input frames $\\{\\mathbf{I}_{\\mathrm{Left}}^t,\\mathbf{I}_{\\mathrm{Right}}^t\\}$ and human body masks $\\{\\mathbf{M}_{\\mathrm{Left}}^t,\\mathbf{M}_{\\mathrm{Right}}^t\\}$ with a fixed length. The length is set to 4 seconds (some motion data contains shorter sequences). Given these data, we use Metashape [24] to perform SfM to obtain camera poses and a 3D scene", + "bbox": [ + 75, + 808, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "mesh. Here, as the baseline length between stereo cameras is known, i.e., $12\\mathrm{cm}$ , we can obtain the mesh in the real-world scale. Next, we render down-sampled depth maps $\\{\\mathbf{D}_{\\mathrm{Left}}^t,\\mathbf{D}_{\\mathrm{Right}}^t\\in \\mathbb{R}^{\\frac{H}{4}\\times \\frac{W}{4}\\times 1}\\}$ and depth region masks $\\{\\mathbf{R}_{\\mathrm{Left}}^t,\\mathbf{R}_{\\mathrm{Right}}^t\\in \\mathbb{R}^{\\frac{H}{4}\\times \\frac{W}{4}\\times 1}\\}$ from the reconstructed 3D scene mesh. The depth region masks show the regions where the depth values are obtained from the 3D scene. This depth information will be used later in the 3D module as additional cues for pose estimation. However, there are some cases where the egocentric RGB videos are largely occupied by a human body. In such scenarios, the 3D scene can not be reconstructed or camera poses can not be estimated. This results in missing (invalid) depth values and undesired computation of network gradients during training. Therefore, we tackle this issue in our 3D module.", + "bbox": [ + 496, + 515, + 890, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.4.3D Pose Estimation", + "text_level": 1, + "bbox": [ + 498, + 756, + 684, + 770 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the 3D module, we aim to estimate a sequence of 3D poses by considering scene information and the temporal context of the egocentric stereo videos. Specifically, given the 2D joint heatmaps, depth maps, depth region masks, and $T$ sets of joint queries $q^{t} \\in \\mathbb{R}^{16 \\times \\frac{C}{2}}$ , we use a transformer decoder to estimate a sequence of 3D poses $\\{\\mathbf{P}^t \\in \\mathbb{R}^{16 \\times 3} | t = 1, 2, \\dots, T\\}$ . Our pose output is the 3D pose at the last time step $\\mathbf{P}^T$ . We follow the existing", + "bbox": [ + 496, + 779, + 890, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "770", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/32454c867814dadd9ab5c39a6939ca15cedac044357fc3a2176892b65ee76f2d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodTaskMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]86.4563.7185.9750.50
Akada et al. [1]78.9859.3088.8154.31
Kang et al. [12]Pelvis relative60.8248.47--
Baseline59.8549.1492.0763.88
Ours50.5540.5093.8370.61
Zhao et al. [53]88.1265.3685.1050.37
Akada et al. [1]Device relative84.5363.9287.0552.76
Baseline63.4450.9792.3064.54
Ours46.2040.1994.0273.53
", + "bbox": [ + 81, + 90, + 467, + 212 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/9a30d66fc619d9c378bdf11517cbfbd4433d6f280fd1fda2e16c5c3809ec4358.jpg", + "table_caption": [ + "Table 1. Quantitative results on UnrealEgo [1] with mm-scale." + ], + "table_footnote": [], + "table_body": "
MethodMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]79.6458.2288.5053.82
Akada et al. [1]72.8052.8891.3255.81
Baseline52.2339.7895.7268.13
Ours30.5326.7297.2280.75
", + "bbox": [ + 81, + 244, + 467, + 319 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "works [1, 37, 38] to estimate 16 joints including the head.", + "bbox": [ + 76, + 375, + 457, + 388 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Depth and Heatmap Features. We use the sequence of the depth maps, depth region masks, and the 2D joint heatmaps as the memory of a cross-attention operation in the transformer decoder. For this purpose, we extract depth features $\\{\\mathbf{U}_{\\mathrm{Left}}^t,\\mathbf{U}_{\\mathrm{Right}}^t\\in \\mathbb{R}^{\\frac{H}{32}\\times \\frac{W}{32}\\times \\frac{C}{2}}\\}$ from the depth data:", + "bbox": [ + 76, + 388, + 467, + 467 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {U} _ {\\text {L e f t}} ^ {t} = \\mathcal {F} _ {\\text {D e p t h}} \\left(\\mathbf {D} _ {\\text {L e f t}} ^ {t} \\oplus \\widehat {\\mathbf {R}} _ {\\text {L e f t}} ^ {t}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 474, + 468, + 494 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where “ $\\oplus$ ” is a concatenation operation along the channel axis and $\\mathcal{F}_{\\mathrm{Depth}}$ represents a feature extractor. The same process can be applied to obtain $\\mathbf{U}_{\\mathrm{Right}}^t$ .", + "bbox": [ + 76, + 500, + 468, + 547 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Similarly, we extract heatmap features $\\{\\mathbf{G}_{\\mathrm{Left}}^t,\\mathbf{G}_{\\mathrm{Right}}^t\\in$ $\\mathbb{R}^{\\frac{H}{16}\\times \\frac{W}{16}\\times C}\\}$ from the 2D heatmaps:", + "bbox": [ + 76, + 547, + 468, + 580 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {G} _ {\\text {L e f t}} ^ {t} = \\mathcal {F} _ {\\mathrm {H M}} \\left(\\widehat {\\mathbf {H}} _ {\\text {L e f t}} ^ {t}\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 589, + 468, + 608 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{F}_{\\mathrm{HM}}$ represents another feature extractor. The same process can be applied to obtain $\\mathbf{G}_{\\mathrm{Right}}^t$ .", + "bbox": [ + 76, + 614, + 468, + 646 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "These features are forwarded with positional embeddings into the transformer. However, as mentioned in Sec. 4.3, depth values can be missing in some frames. To prevent processing features of such depth data and let the network focus only on valid frames, we propose to add padding masks $V_{\\mathrm{Depth}}^{t} \\in \\mathcal{R}$ to all the elements of $\\{\\mathbf{U}_{\\mathrm{Left}}^{t}, \\mathbf{U}_{\\mathrm{Right}}^{t}\\}$ :", + "bbox": [ + 76, + 646, + 468, + 753 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nV _ {\\text {D e p t h}} ^ {t} = \\left\\{ \\begin{array}{l l} - \\inf , & \\text {i f d e p t h v a l u e s a r e m i s s i n g} \\\\ 0, & \\text {o t h e r w i s e} \\end{array} . \\right. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 119, + 762, + 468, + 803 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When $V_{\\mathrm{Depth}}^{t} = -\\inf$ , the depth features $\\{\\mathbf{U}_{\\mathrm{Left}}^{t}, \\mathbf{U}_{\\mathrm{Right}}^{t}\\}$ after the softmax function in self-attention layers of the transformer will have zero effect on the network training.", + "bbox": [ + 76, + 809, + 468, + 854 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stereo-Video-Dependent Joint Query Adaptation. The existing work [44] represents human joints as learnable positional embeddings called joint queries that encode prior", + "bbox": [ + 76, + 854, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/116327d221ce28b77937ae22d78df26d7baff746a5162813e295dabbc9c3b096.jpg", + "table_caption": [ + "Table 2. Quantitative results of device-relative pose estimation on UnrealEgo2 with mm-scale." + ], + "table_footnote": [], + "table_body": "
MethodMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]117.5788.0173.1238.94
Akada et al. [1]122.6486.5572.5138.67
Baseline115.9585.0074.1340.11
Ours104.1482.1880.2046.22
", + "bbox": [ + 504, + 90, + 887, + 162 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 3. Quantitative results of device-relative pose estimation on UnrealEgo-RW with mm-scale.", + "bbox": [ + 500, + 169, + 890, + 196 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "knowledge about the skeleton joints. In our problem setting, the simplest way to design such joint queries is to set queries for each pose in a motion sequence. However, this can not capture the temporal context in video inputs, e.g., human motions and background changes. Therefore, we extend the multi-view joint query augmentation technique [44] for our stereo video setting to account for sequential information. Specifically, we enhance the joint queries with the temporal intermediate features of stereo RGB frames $\\{\\mathbf{F}_{\\mathrm{Left}}^t,\\mathbf{F}_{\\mathrm{Right}}^t\\}$ . Firstly, from the sequence of the intermediate features, we create a sequence of combined features $\\mathbf{F}^{t}\\in \\mathbb{R}^{\\frac{H}{32}\\times \\frac{W}{32}\\times \\frac{C}{2}}$", + "bbox": [ + 498, + 215, + 890, + 381 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} ^ {t} = \\operatorname {c o n v} \\left(\\mathbf {F} _ {\\text {L e f t}} ^ {t} \\oplus \\mathbf {F} _ {\\text {r i g h t}}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 391, + 890, + 409 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where \"conv(\\cdot)\" is a convolution operation with a kernel size of $1 \\times 1$ .", + "bbox": [ + 498, + 417, + 890, + 448 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Next, we fuse the sequence of the combined features $\\mathbf{F}^t$ to obtain a fused stereo features $\\mathbf{F}_{\\mathrm{Stereo}} \\in \\mathbb{R}^{\\frac{C}{2}}$ :", + "bbox": [ + 498, + 449, + 890, + 479 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {\\text {S t e r e o}} = \\mathbf {F} _ {\\mathrm {P}} ^ {1} \\oplus \\dots \\oplus \\mathbf {F} _ {\\mathrm {P}} ^ {T}, \\text {w h e r e} \\mathbf {F} _ {\\mathrm {P}} ^ {i} = p (\\mathbf {F} ^ {i}), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 540, + 488, + 890, + 507 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $p(\\cdot)$ is an operation of adaptive average pooling. Now, the feature $\\mathbf{F}_{\\mathrm{Stereo}}$ contains stereo video information.", + "bbox": [ + 498, + 516, + 888, + 546 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Lastly, with $\\mathbf{F}_{\\mathrm{Stereo}}$ and a fully connected layer \" $\\operatorname {fc}(\\cdot)$ \", we augment each query $q^{t}$ to obtain $q_{\\mathrm{Aug}}^t\\in \\mathbb{R}^{16\\times \\frac{C}{2}}$ ..", + "bbox": [ + 498, + 547, + 888, + 580 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {q} _ {\\text {A u g}} ^ {t} = \\operatorname {f c} \\left(\\mathbf {F} _ {\\text {S t e r e o}}\\right) + q ^ {t}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 612, + 590, + 890, + 609 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Transformer Decoder. We adopt a DETR [2]-based transformer decoder and a pose regression head. In decoder layers, all of the augmented joint queries $q_{\\mathrm{Aug}}^{t}$ first interact with each other on a self-attention layer. Then, the queries extract all of the temporal stereo features from the memory $\\{\\mathbf{U}_{\\mathrm{Left}}^{t}, \\mathbf{U}_{\\mathrm{Right}}^{t}, \\mathbf{G}_{\\mathrm{Left}}^{t}, \\mathbf{G}_{\\mathrm{Right}}^{t}\\}$ with the padding masks $V_{\\mathrm{Depth}}^{t}$ on a cross-attention layer. Lastly, the pose regression head estimates a sequence of 3D poses $\\{\\hat{\\mathbf{P}}^t \\in \\mathbb{R}^{16 \\times 3} | t = 1, 2, \\dots, T\\}$ , yielding the final pose output $\\mathbf{P}^T$ .", + "bbox": [ + 498, + 617, + 890, + 758 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Similar to the previous works [5, 49], we train the 3D module with the pose supervision of the current and past frames:", + "bbox": [ + 498, + 758, + 890, + 803 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {3 \\mathrm {D}} = L _ {\\text {p o s e}} \\left(\\mathbf {P} ^ {T}, \\hat {\\mathbf {P}} ^ {T}\\right) + \\frac {\\lambda_ {\\text {p a s t}}}{(T - 1)} \\sum_ {t = 1} ^ {T - 1} L _ {\\text {p o s e}} \\left(\\mathbf {P} ^ {t}, \\hat {\\mathbf {P}} ^ {t}\\right), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 811, + 890, + 852 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {p o s e}} (\\mathbf {P}, \\hat {\\mathbf {P}}) = \\lambda_ {\\text {p o s e}} (\\operatorname {m p j p e} (\\mathbf {P}, \\hat {\\mathbf {P}}) + \\quad \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 544, + 864, + 890, + 888 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda_ {\\cos} \\cos (\\operatorname {b o n e} (\\mathbf {P}), \\operatorname {b o n e} (\\hat {\\mathbf {P}}))),\n$$\n", + "text_format": "latex", + "bbox": [ + 640, + 886, + 844, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "771", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5fdfd2791925374276a711ccaf7b6c0a60c88242a9cfdb84e484781ea919d57d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 80, + 88, + 158, + 212 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/bb85da3c970f066a55c60be4a27389729801720ae1cd0cc79419dcb3a945ae53.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 160, + 89, + 238, + 212 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2c9a7a5f483c191cdf1a4d51e72928dea5ee499a6e0e87bb1b66dd9f8992bb1a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 245, + 89, + 316, + 210 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e2489535ff6e9edeab3aa400f279bf3c2aff2a8f17f045bbaa2041d5db319651.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 89, + 398, + 210 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/95f66cc1d2818b11a995170078690d4fc1aca3869ce3c9a4edcecb5f75dfe5da.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 403, + 89, + 480, + 210 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2030a68b736941d5ef4c06b68e8f8aeae02c92b6578628b4a6034c45eb16765e.jpg", + "image_caption": [ + "Stereo inputs" + ], + "image_footnote": [], + "bbox": [ + 80, + 213, + 158, + 275 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d5aba829f6cda66deb697e588045bdbd9281bb2a601fbba876fd86e8ab93defb.jpg", + "image_caption": [ + "Akada et al. [1]" + ], + "image_footnote": [], + "bbox": [ + 160, + 213, + 238, + 275 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ba584dcfcf272c76440124a9fec23001a2f5357c1bdc5fc5e4eadd8e27280260.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 245, + 213, + 316, + 275 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/462476cb03179b4c0534fa71939fbcd889bd00f6d732d0ab86b7852589f72e9e.jpg", + "image_caption": [ + "Baseline", + "Figure 4. Qualitative results of device-relative pose estimation. Left: UnrealEgo2. Right: UnrealEgo-RW. 3D pose prediction and ground truth are displayed in red and green, respectively. For UnrealEgo-RW, we show ground-truth scene meshes for visualization." + ], + "image_footnote": [], + "bbox": [ + 321, + 213, + 398, + 275 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1a96b72d97ae0aac10c5db8320c95d44577d7030eb4f232c6e66eacddd86ec88.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 403, + 213, + 480, + 275 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7fd62126b1f5a6a63c2bdfc237130d3f0dbd477ec9c0eb1d679c1a7494f2f172.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 486, + 88, + 566, + 212 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c81008e9a0e6bef2a258b064fb1f380d26c86e7023180f0363376d5a65952eea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 568, + 89, + 647, + 212 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/66192191b5b69d3a75a122ddc53af3bed3be8d1d61cc260b901becee9b28ed40.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 650, + 89, + 728, + 212 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/df562450fb7278eedaa5c8daab4b6db37d1090af2728dbd94eb6efe54d52dbce.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 730, + 89, + 808, + 212 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4b98a5b5ceeed27899cd52fbcbea7b2bca9e9917c9a34db177e878334bdd06fc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 812, + 89, + 890, + 212 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/40abad9cc79a8e0749af20d18c0eaa7a990bea2de937d5524793ec364d754f13.jpg", + "image_caption": [ + "Stereo inputs" + ], + "image_footnote": [], + "bbox": [ + 486, + 213, + 566, + 275 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2c9a47229bbe152715ceaabb68787da855f418043c6e687acc65ff28ef0987d4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 570, + 213, + 647, + 275 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8710bd0764b2cf931cb6489035157b562c8f5f614885cafb109ee55635230cf7.jpg", + "image_caption": [ + "Akada et al. [1]" + ], + "image_footnote": [], + "bbox": [ + 650, + 213, + 728, + 275 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2f1f1aa7225ba1552707b49a9c82fe28fc6ba778d760d8c035761e084dd8163d.jpg", + "image_caption": [ + "Baseline" + ], + "image_footnote": [], + "bbox": [ + 730, + 213, + 808, + 275 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a147f9ecea883b099f6753eaa58d149ca3423b26736030c2e01029e80d3355df.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 812, + 213, + 890, + 275 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/8d4514795546479ae3d78ebb0e0c5f0bb57b9297026e676b69e859464b3bbc38.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodMPJPE(↓)PA-MPJPE(↓)
(a) Baseline with depth information120.3986.23
Baseline115.3684.80
(b) Ours w/o query adaptation108.3386.69
(c) Ours w/o depth information112.5684.37
(d) Ours w/o depth padding mask108.7084.26
(e) Ours with latest pose supervision only105.6783.46
(f) Ours with a single set of queries105.5885.68
Ours104.1482.18
", + "bbox": [ + 83, + 349, + 464, + 486 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathbf{P}$ is a ground-truth 3D pose, $\\mathrm{mpjpe}(\\cdot)$ is the mean per joint position error, $\\cos (\\cdot)$ is a negative cosine similarity, and $\\mathrm{bone}(\\cdot)$ is an operation of obtaining bones of the 3D poses as used in the previous work [1]:", + "bbox": [ + 75, + 545, + 468, + 606 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {m p j p e} (\\mathbf {P}, \\hat {\\mathbf {P}}) = \\frac {1}{N J} \\sum_ {n = 1} ^ {N} \\sum_ {j = 1} ^ {J} | | \\mathbf {P} _ {n, j} - \\hat {\\mathbf {P}} _ {n, j} | | _ {2}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 617, + 468, + 659 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\cos (\\mathbf {B}, \\hat {\\mathbf {B}}) = - \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sum_ {m = 1} ^ {M} \\frac {\\mathbf {B} _ {n , m} \\cdot \\hat {\\mathbf {B}} _ {n , m}}{| | \\mathbf {B} _ {n , m} | | | | \\hat {\\mathbf {B}} _ {n , m} | |}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 102, + 671, + 468, + 712 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $N$ is batch size, $J$ is the number of joints, $M$ is the number of bones, and $\\mathbf{B}_{n,m} \\in \\mathbb{R}^3$ is a vector of $m$ -th bone.", + "bbox": [ + 76, + 717, + 468, + 750 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 762, + 207, + 779 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Datasets for Evaluation", + "text_level": 1, + "bbox": [ + 76, + 786, + 292, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We use three datasets for our experiments: UnrealEgo [1], UnrealEgo2, and UnrealEgo-RW. For UnrealEgo, we use their proposed data splits. Also, we divide UnrealEgo2 into 12,139 motions (1,002,656 stereo views) for training, 1,545 motions (127,968 stereo views) for validation, and 1523 motions (123,488 stereo views) for testing. Similarly, we", + "bbox": [ + 76, + 809, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/16d04a19efec5d6a07574c1343fca07cff37f6de91a7444a8f9f05ab2f0ffbf9.jpg", + "table_caption": [ + "Table 4. Ablation study of our model for device-relative pose estimation on UnrealEgo-RW with mm-scale." + ], + "table_footnote": [], + "table_body": "
MethodUpper body MPJPE(↓)Lower body MPJPE(↓)Foot MPJPE(↓)Foot MPE(↓)
Ours w/o depth information80.82144.31174.456.39
Ours w/o depth padding masks77.29140.10169.955.02
Ours77.85130.97155.864.83
", + "bbox": [ + 503, + 349, + 888, + 412 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 5. The effect of scene information (depth) per body part on UnrealEgo-RW. The numbers are in mm.", + "bbox": [ + 500, + 417, + 890, + 445 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "split UnrealEgo-RW into 547 motions (51,936 stereo views) for training, 77 motions (7,616 stereo views) for validation, and 86 motions (7,936 stereo views) for testing. We follow the existing works [1, 11, 37, 38, 40-42, 45, 52, 53] to report the results of device-relative 3D pose estimation. For UnrealEgo, we also follow the existing works [1, 12] to include the results of pelvis-relative 3D pose estimation.", + "bbox": [ + 496, + 470, + 890, + 578 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Training Details", + "text_level": 1, + "bbox": [ + 500, + 587, + 660, + 603 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We resize the input RGB images and ground-truth 2D keypoint heatmaps to $256 \\times 256$ and $64 \\times 64$ pixels, respectively. For the training of the 2D module, we follow the previous work [1] to use the ResNet18 [8] pre-trained on ImageNet [4] as an encoder and train the module with a batch size of 16 and an initial learning rate of $10^{-3}$ . Then, we train the 3D module with a batch size of 32 and an initial learning rate of $2 \\cdot 10^{-4}$ . The modules are trained with Adam optimizer [15] for ten epochs, starting with the initial learning rate for the first half epochs and applying a linearly decaying rate for the next half. Also, we set the hyperparameters as $\\lambda_{\\mathrm{pose}} = 0.1$ , $\\lambda_{\\mathrm{cos}} = 0.01$ , and $\\lambda_{\\mathrm{past}} = 0.1$ . We use five sequential stereo views as inputs to our model, i.e., $T = 5$ , with a skip size of 3. See our supplement for more details on the network architecture.", + "bbox": [ + 496, + 609, + 890, + 835 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3. Evaluation", + "text_level": 1, + "bbox": [ + 500, + 847, + 619, + 861 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We compare our method with existing stereo-based egocentric pose estimation methods [1, 53]. We use the of", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "772", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b8ff47f7aac3cbaab619fd868be5cec580eb53d5f42d9395aa1cf7256978ff8d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 81, + 89, + 514, + 180 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1601914d99990671ee990999c3a69dde366aeefad8f651f761acf684b0c2935c.jpg", + "image_caption": [ + "3D-to-2D pose reprojection" + ], + "image_footnote": [], + "bbox": [ + 537, + 87, + 643, + 167 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c0b25a4525d502891b94574637c2198768944e17df3903684575ac3c8e1eef1a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 660, + 88, + 764, + 167 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "reprojection", + "bbox": [ + 658, + 167, + 730, + 180 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7194d77dc8227a51473a041ee594b94294628daf57d75184299be30e5bb3f38f.jpg", + "image_caption": [ + "3D pose estimation" + ], + "image_footnote": [], + "bbox": [ + 779, + 88, + 883, + 166 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3c50cd63f1945538be9e269ad317db4e40468d31ba05666ae253de74b957d232.jpg", + "image_caption": [ + "Figure 5. Results of our framework and comparison methods on example sequences from UnrealEgo2 (above) and UnrealEgo-RW (below). Left: MPJPE curves. Right: Outputs of our method at frame 87 and 329 of the sequences, respectively. 3D pose estimation and ground truth are colored in red and green, respectively." + ], + "image_footnote": [], + "bbox": [ + 80, + 181, + 514, + 281 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0e71dce06ba4d72dc03b7bd045dee2555f3be3ea270bbca4ad2f91c95e98116f.jpg", + "image_caption": [ + "3D-to-2D pose reprojection" + ], + "image_footnote": [], + "bbox": [ + 539, + 189, + 643, + 270 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/78136c63fc3ada05f13f891c651fa17fc5e4c0d8ac7bb6f4fd10ab38996d2d8f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 660, + 189, + 764, + 270 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "reprojection", + "bbox": [ + 658, + 271, + 730, + 284 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e96a9b4a852ee2ff08ed4cf1b103d800aa7a293edb8b7cdba51d74cdf58bf412.jpg", + "image_caption": [ + "3D pose estimation" + ], + "image_footnote": [], + "bbox": [ + 779, + 189, + 883, + 270 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ficial source code of Akada et al. [1] and re-implement the framework of Zhao et al. [53] as its source code is not available. Note that the comparison methods are trained on the same datasets as our model. Kang et al. [12] (arXiv preprint at the time of submission) only shows results of the pelvis-relative estimation on UnrealEgo. Therefore, we include them for reference. Furthermore, we are interested in the performance of the publicly available state-of-the-art method [1] with temporal inputs. Thus, we modify their 3D module such that it can take as an input a sequence of stereo 2D keypoint heatmaps with the same time step as ours, i.e., $T = 5$ . Here, we replace the first and the last fully connected layers in the encoder, the pose decoder, and the heatmap reconstruction decoder of their autoencoder-based 3D module [1] by those with $T$ times the size of the original hidden dimension. We denote this model as Baseline and train it with the same training procedure as Akada et al. [1]. Note that Akada et al. [1], Baseline, and our model use the same 2D module.", + "bbox": [ + 75, + 345, + 472, + 632 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We follow the existing works [1, 11, 37, 38, 40-42, 45, 52, 53] to report Mean Per Joint Position Error (MPJPE) and Mean Per Joint Position Error with Procrustes Alignment [13] (PA-MPJPE). We additionally report 3D Percentage of Correct Keypoints (3D PCK) and Area Under the Curve (AUC) for UnrealEgo2 and UnrealEgo-RW.", + "bbox": [ + 75, + 638, + 468, + 729 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results on Synthetic Datasets. Tables 1 and 2 report the results with UnrealEgo [1] and UnrealEgo2. Our method outperforms the existing methods [1, 12, 53] and Baseline across all metrics by a significant margin, e.g., $>15\\%$ on UnrealEgo [1] and $\\geq 40\\%$ on UnrealEgo2 (on MPJPE). The qualitative results on UnrealEgo2 in Fig. 4-(left part) show that existing methods and Baseline fail to estimate lower bodies of complex poses with severe self-occlusions, such as crouching. Even under such challenging scenarios, however, our approach yields accurate 3D poses. See Fig. 5-(above part) for a MPJPE curve and visual outputs of our", + "bbox": [ + 75, + 734, + 470, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/90a8d937c779434d551a4076208a07f943e006cc70e252bf4818bd2e5c47a061.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
T = 1108.6384.6977.9844.15
T = 3108.2385.2878.3544.54
T = 5104.1482.1880.2046.22
T = 7104.0182.4380.5246.10
", + "bbox": [ + 524, + 345, + 867, + 419 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 6. Ablation study of our model with different sequence lengths on UnrealEgo-RW. The numbers are in mm.", + "bbox": [ + 498, + 424, + 890, + 452 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "framework on UnrealEgo2. Our method is able to constantly estimate accurate 3D poses compared to the existing methods. As evidenced by these results, our method demonstrates superiority and becomes a strong benchmark method in the egocentric stereo 3D pose estimation tasks. See our supplementary material and video for more results. Results on the Real-World Dataset. Table 3 shows quantitative results on UnrealEgo-RW. Again, our method outperforms the existing methods [1, 53] and Baseline across all metrics, e.g., by more than $10\\%$ on MPJPE. See Fig. 4-(right) for qualitative results. The current state-of-the-art methods [1, 53] or Baseline show floating feet, inaccurate pelvis position, and penetration to the floor ground. However, our method is able to estimate accurate 3D poses. See Fig. 5-(below part) for a MPJPE curve and visual outputs on an example motion of UnrealEgo-RW. The curve indicates that our method constantly shows lower 3D errors than the comparison methods. All of the results indicate the effectiveness of our proposed framework compared to the existing methods. We also visualize 2D heatmaps, 3D-to-2D pose reprojection, and 3D pose prediction from our method in Fig. 6. Even when the joint locations of the lower body are estimated closely in the 2D heatmaps, our approach predicts accurate lower body poses. These results suggest that the proposed method with our portable device can open up the possibility of many future applications, including animating virtual humans (Fig. 1-(g)). For the virtual human animation, we applied inverse kinematics with estimated 3D joint locations and ground-truth camera poses to drive the", + "bbox": [ + 496, + 462, + 890, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "773", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2e77b11a607c3d0a6cdc2dda1f39c426ba05d8a2660ab7857d4ce589147de834.jpg", + "image_caption": [ + "Stereo inputs" + ], + "image_footnote": [], + "bbox": [ + 78, + 88, + 191, + 174 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e3146f908f0672d9e0886816bb289d0455391a9e3cdaa223e0210afcb8c29dae.jpg", + "image_caption": [ + "Figure 6. Visualization of outputs from our model on UnrealEgo-RW. 3D-to-2D pose reprojection is visualized in the same colors as in Fig. 1-(e). 3D pose estimation and ground truth are displayed in red and green, respectively." + ], + "image_footnote": [], + "bbox": [ + 194, + 88, + 307, + 174 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bb449581a65ca399763951e5a742673f7e960769f766d4cbc7d77982c4ef0061.jpg", + "image_caption": [ + "2D joint heatmap estimation" + ], + "image_footnote": [], + "bbox": [ + 312, + 88, + 424, + 174 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/58fccb4ac4bb0af3ec4c15c1abe169eaa8e12a6d628f3abb227ac4fe0fea2df4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 428, + 88, + 542, + 174 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4f5d3e8fc901e75542a7996091992392e5269a756c31ff1a41cd9f01d82c81fc.jpg", + "image_caption": [ + "3D-to-2D pose reprojection" + ], + "image_footnote": [], + "bbox": [ + 545, + 88, + 656, + 174 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/21fb5b2e2f0cfec6ba1d5395392dc8d0c9e181ea9b4f08a7722884907956a5ff.jpg", + "image_caption": [ + "3D pose estimation" + ], + "image_footnote": [], + "bbox": [ + 661, + 88, + 774, + 174 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2c8793e1c98b587952e6df0b244e66eee51befb37da427a3eaba19a67aff0ffb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 779, + 88, + 890, + 174 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "character in a world coordinate system.", + "bbox": [ + 75, + 242, + 336, + 257 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation Study. In Table 4, we first ablate (a) the CNN-based 3D module (Baseline) with depth data concatenated to the heatmap inputs. However, naively adding this extra scene information to this 3D module does not help probably because the CNN layers can be affected by invalid depth values even with the depth region masks.", + "bbox": [ + 75, + 258, + 467, + 349 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Next, we test our transformer-based 3D module (b) without query augmentation (c) without depth data. They perform worse than our full framework. We also ablate our method (d) without the padding mask. The result indicates that adding depth padding masks helps because the padding mask can filter out the invalid values in depth maps in the attention module. These results validate that our video-based 3D scene reconstruction module and video-dependent query augmentation policy boost 3D joint localization accuracy. Next, we ablate our model (e) with 3D pose supervision of the latest frame only. Note that this ablation uses the same sets of input data and joint queries as the original model, i.e., $T = 5$ . This model estimates less accurate poses due to the loss of supervision from past 3D poses. We also test (f) a single set of joint queries, i.e., $q^1$ , instead of $T$ sets to predict the latest 3D pose. Similar to (e), this model cannot benefit from the supervision of past 3D poses.", + "bbox": [ + 75, + 352, + 467, + 609 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We further investigate the effect of the scene information. Table 5 shows the MPJPE per body part and Mean Penetration Error (MPE) [34, 35] between feet and floor ground. The results reveal that depth features with the padding masks reduce the errors in the lower body while maintaining the performance in the upper body.", + "bbox": [ + 75, + 611, + 467, + 702 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Table 6, we ablate the effect of the sequence length of input frames for our method. It is worth noting that our model with $T = 1$ yields better results than the best existing method [1] and Baseline that utilizes temporal information (see Table 3). Since our model uses the same 2D module as Akada et al. [1] and Baseline, the difference comes only from the 3D module. This suggests that their autoencoder-based 3D modules with the heatmap reconstruction component are, very likely, not the most suitable solution for estimating 3D poses from 2D joint heatmaps, highlighting the potential of our transformer-based framework. The result also indicates that although the longer sequence can bring performance improvement to some extent, the se", + "bbox": [ + 75, + 703, + 467, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/869defdf736714e5f81be48fd6cd05ec557116221724957e7dfdf0e5f0789fbb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodInitial training dataMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]99.0972.4779.8243.55
Akada et al. [1]UnrealEgo [1]94.8769.7982.7846.80
Baseline83.8964.3086.2051.63
Ours75.3457.2989.4355.77
Zhao et al. [53]97.8669.9281.5346.32
Akada et al. [1]UnrealEgo292.4867.1584.2548.04
Baseline82.1661.6087.0752.72
Ours72.8956.1990.2957.19
", + "bbox": [ + 501, + 241, + 888, + 354 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 7. Fine-tuning results of device-relative 3D pose estimation on UnrealEgo-RW with mm-scale.", + "bbox": [ + 500, + 358, + 890, + 386 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "quence lengths of five and seven show comparable results. Synthetic Data for Pre-training. No existing works explored the efficacy of synthetic data for pre-training in egocentric 3D pose estimation. Thus, we further conduct experiments with models pre-trained on the synthetic datasets and fine-tuned on the real-world data. Tables 3 and 7 show that all methods benefit from the training with the large-scale synthetic data even with the differences in the synthetic and real-world setups, e.g., fisheye distortion and syn-to-real domain gaps. Note that the gain of our method from UnrealEgo to UnrealEgo2 is significant, i.e., $3.3\\%$ on MPJPE (75.34mm to 72.89mm). This suggests that it is helpful to develop not only new models but also large-scale synthetic datasets even with different distortion and domain gaps.", + "bbox": [ + 496, + 407, + 890, + 618 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 633, + 617, + 648 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we proposed a new transformer-based framework that significantly boosts the accuracy of egocentric stereo 3D human pose estimation. The proposed framework leverages the scene information and temporal context of egocentric stereo video inputs via our video-based 3D scene reconstruction module and video-based joint query augmentation policy. Our extensive experiments on the new synthetic and real-world datasets with challenging human motions validate the effectiveness of our approach compared to the existing methods. We hope that our proposed benchmark datasets and trained models will foster the further development of methods for egocentric 3D vision.", + "bbox": [ + 496, + 657, + 890, + 839 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgment. The work was supported by the ERC Consolidator Grant 4DReply (770784) and the Nakajima Foundation. We thank Silicon Studio Corp. for providing the fisheye plug-in for Unreal Engine.", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "774", + "bbox": [ + 485, + 944, + 509, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Hiroyasu Akada, Jian Wang, Soshi Shimada, Masaki Takahashi, Christian Theobalt, and Vladislav Golyanik. Unrealego: A new dataset for robust egocentric 3d human motion capture. In European Conference on Computer Vision (ECCV), 2022. 1, 2, 3, 5, 6, 7, 8", + "[2] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision (ECCV), 2020. 5", + "[3] Young-Woon Cha, True Price, Zhen Wei, Xinran Lu, Nicholas Rewkowski, Rohan Chabra, Zihe Qin, Hyounghun Kim, Zhaoqi Su, Yebin Liu, Adrian Ilie, Andrei State, Zhenlin Xu, Jan-Michael Frahm, and Henry Fuchs. Towards fully mobile 3d face, body, and environment capture using only head-worn cameras. IEEE Transactions on Visualization and Computer Graphics, 24(11):2993-3004, 2018. 2, 3", + "[4] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei. ImageNet: A Large-Scale Hierarchical Image Database. In Computer Vision and Pattern Recognition (CVPR), 2009. 6", + "[5] Moritz Einfalt, Katja Ludwig, and Rainer Lienhart. Uplift and upsample: Efficient 3d human pose estimation with up-lifting transformers. In Winter Conference on Applications of Computer Vision (WACV), 2023. 2, 5", + "[6] FUJINON FE185C057HA-1 fisheye lens, 2023. https:// www.fujifilm.com/de/de/business/opticaldevices/mvlems/fe185.3", + "[7] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Computer Vision and Pattern Recognition (CVPR), 2022. 3", + "[8] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Computer Vision and Pattern Recognition (CVPR), 2016. 6", + "[9] Yihui He, Rui Yan, Katerina Fragkiadaki, and Shoou-I Yu. Epipolar transformers. In Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[10] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. 2020. 2", + "[11] Hao Jiang and Vamsi Krishna Ithapu. Egocentric pose estimation from human vision span. In International Conference on Computer Vision (ICCV), 2021. 1, 2, 3, 6, 7", + "[12] Taeho Kang, Kyungjin Lee, Jinrui Zhang, and Youngki Lee. Ego3dpose: Capturing 3d cues from binocular egocentric views. In SIGGRAPH Asia Conference, 2023. 2, 5, 6, 7", + "[13] David G. Kendall. A Survey of the Statistical Theory of Shape. Statistical Science, 4(2):87-99, 1989. 7", + "[14] Rawal Khirodkar, Aayush Bansal, Lingni Ma, Richard Newcombe, Minh Vo, and Kris Kitani. Ego-humans: An egocentric 3d multi-human benchmark. In International Conference on Computer Vision (ICCV), 2023. 3", + "[15] Diederik Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations (ICLR), 2015. 6", + "[16] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer White-" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "head, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything. arXiv:2304.02643, 2023. 4", + "[17] Jiaman Li, Karen Liu, and Jiajun Wu. Ego-body pose estimation via ego-head pose estimation. In Computer Vision and Pattern Recognition (CVPR), 2023. 3", + "[18] Wenhao Li, Hong Liu, Runwei Ding, Mengyuan Liu, Pichao Wang, and Wenming Yang. Exploiting temporal contexts with strided transformer for 3d human pose estimation. IEEE Transactions on Multimedia (TMM), 2022. 2", + "[19] Wenhao Li, Hong Liu, Hao Tang, Pichao Wang, and Luc Van Gool. Mhformer: Multi-hypothesis transformer for 3d human pose estimation. In Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[20] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[21] Yuxuan Liu, Jianxin Yang, Xiao Gu, Yijun Chen, Yao Guo, and Guang-Zhong Yang. Egofish3d: Egocentric 3d pose estimation from a fisheye camera via self-supervised learning. IEEE Transactions on Multimedia (TMM), pages 1-12, 2023. 2", + "[22] Zhengyi Luo, Ryo Hachiuma, Ye Yuan, and Kris Kitani. Dynamics-regulated kinematic policy for egocentric pose estimation. 2021. 1, 2, 3", + "[23] Haoyu Ma, Liangjian Chen, Deying Kong, Zhe Wang, Xingwei Liu, Hao Tang, Xiangyi Yan, Yusheng Xie, Shih-Yao Lin, and Xiaohui Xie. Transfusion: Cross-view fusion with transformer for 3d human pose estimation. In British Machine Vision Conference (BMVC), 2021. 2", + "[24] Metashape, 2023. https://www.agisoft.com/.4", + "[25] Mixamo, 2022. https://www MIXamo.com.3", + "[26] Xiaqing Pan, Nicholas Charron, Yongqian Yang, Scott Peters, Thomas Whelan, Chen Kong, Omkar Parkhi, Richard Newcombe, and Yuheng (Carl) Ren. Aria digital twin: A new benchmark dataset for egocentric 3d machine perception. In International Conference on Computer Vision (ICCV), 2023. 3", + "[27] Jinman Park, Kimathi Kaai, Saad Hossain, Norikatsu Sumi, Sirisha Rambhatla, and Paul Fieguth. Domain-guided spatiotemporal self-attention for egocentric 3d pose estimation. In Conference on Knowledge Discovery and Data Mining (KDD), 2023. 2", + "[28] Sungchan Park, Eunyi You, Inhoe Lee, and Joonseok Lee. Towards robust and smooth 3d multi-person pose estimation from monocular videos in the wild. In International Conference on Computer Vision (ICCV), 2023. 2", + "[29] Dario Pavllo, Christoph Feichtenhofer, David Grangier, and Michael Auli. 3d human pose estimation in video with temporal convolutions and semi-supervised training. In Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[30] RenderPeople, 2022. https://renderpeople.com.3", + "[31] Helge Rhodin, Christian Richardt, Dan Casas, Eldar Insafutdinov, Mohammad Shafiei, Hans-Peter Seidel, Bernt Schiele, and Christian Theobalt. Egocap: egocentric marker-less motion capture with two fisheye cameras. ACM Transactions on Graphics (TOG), 35(6):1-11, 2016. 1, 2, 3" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "775", + "bbox": [ + 486, + 945, + 509, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[32] RIBCAGE RX0 II camera, 2023. https://www.backbone.ca/product/ribcage-rx0-2/.3", + "[33] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Computer Vision and Pattern Recognition (CVPR), 2016. 2", + "[34] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, and Christian Theobalt. Physcap: Physically plausible monocular 3d motion capture in real time. ACM Transactions on Graphics (TOG), 39(6), 2020. 8", + "[35] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, Patrick Pérez, and Christian Theobalt. Neural monocular 3d human motion capture with physical awareness. ACM Transactions on Graphics (TOG), 40(4), 2021. 8", + "[36] Zhenhua Tang, Zhaofan Qiu, Yanbin Hao, Richang Hong, and Ting Yao. 3d human pose estimation with spatiotemporal criss-cross attention. In Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2", + "[37] Denis Tome, Patrick Peluse, Lourdes Agapito, and Hernan Badino. xr-egopose: Egocentric 3d human pose from an hmd camera. In International Conference on Computer Vision (ICCV), 2019. 1, 2, 3, 5, 6, 7", + "[38] Denis Tome, Thiemo Alldieck, Patrick Peluse, Gerard Pons-Moll, Lourdes Agapito, Hernan Badino, and Fernando de la Torre. Selfpose: 3d egocentric pose estimation from a headset mounted camera. IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), 45(6):6794-6806, 2023. 1, 2, 5, 6, 7", + "[39] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems (NeurIPS), 2017. 2", + "[40] Jian Wang, Lingjie Liu, Weipeng Xu, Kripasindhu Sarkar, and Christian Theobalt. Estimating egocentric 3d human pose in global space. In International Conference on Computer Vision (ICCV), 2021. 1, 2, 3, 6, 7", + "[41] Jian Wang, Lingjie Liu, Weipeng Xu, Kripasindhu Sarkar, Diogo Luvizon, and Christian Theobalt. Estimating egocentric 3d human pose in the wild with external weak supervision. In Computer Vision and Pattern Recognition (CVPR), 2022. 2, 3", + "[42] Jian Wang, Diogo Luvizon, Weipeng Xu, Lingjie Liu, Kripasindhu Sarkar, and Christian Theobalt. Scene-aware egocentric 3d human pose estimation. In Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 6, 7", + "[43] Jian Wang, Zhe Cao, Diogo Luvizon, Lingjie Liu, Kripasindhu Sarkar, Danhang Tang, Thabo Beeler, and Christian Theobalt. Egocentric whole-body motion capture with fisheyevit and diffusion-based motion refinement. In Computer Vision and Pattern Recognition (CVPR), 2024. 2", + "[44] Tao Wang, Jianfeng Zhang, Yujun Cai, Shuicheng Yan, and Jiashi Feng. Direct multi-view multi-person 3d human pose estimation. Advances in Neural Information Processing Systems (NeurIPS), 2021. 2, 5", + "[45] Weipeng Xu, Avishek Chatterjee, Michael Zollhoefer, Helge Rhodin, Pascal Fua, Hans-Peter Seidel, and Christian Theobalt. $\\mathrm{Mo}^2\\mathrm{Cap}^2$ : Real-time mobile 3d motion capture with a cap-mounted fisheye camera. IEEE Transactions on Visualization and Computer Graphics, 2019. 1, 2, 3, 6, 7" + ], + "bbox": [ + 78, + 92, + 467, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[46] Honghong Yang, Longfei Guo, Yumei Zhang, and Xiaojun Wu. U-shaped spatial-temporal transformer network for 3d human pose estimation. Machine Vision and Applications, 33(6):82, 2022. 2", + "[47] Yingxuan You, Hong Liu, Ti Wang, Wenhao Li, Runwei Ding, and Xia Li. Co-evolution of pose and mesh for 3d human body estimation from video. In International Conference on Computer Vision (ICCV), 2023. 2", + "[48] Ye Yuan and Kris Kitani. Ego-pose estimation and forecasting as real-time pd control. In International Conference on Computer Vision (ICCV), 2019. 1, 2, 3", + "[49] Jinlu Zhang, Zhigang Tu, Jianyu Yang, Yujin Chen, and Jun-song Yuan. Mixste: Seq2seq mixed spatio-temporal encoder for 3d human pose estimation in video. In Computer Vision and Pattern Recognition (CVPR), 2022. 2, 5", + "[50] Siwei Zhang, Qianli Ma, Yan Zhang, Zhiyin Qian, Taein Kwon, Marc Pollefeys, Federica Bogo, and Siyu Tang. Ego-body: Human body shape and motion of interacting people from head-mounted devices. In European conference on computer vision (ECCV), 2022. 3", + "[51] Siwei Zhang, Qianli Ma, Yan Zhang, Sadegh Aliakbarian, Darren Cosker, and Siyu Tang. Probabilistic human mesh recovery in 3d scenes from egocentric views. In International Conference on Computer Vision (ICCV), 2023. 2, 3", + "[52] Yahui Zhang, Shaodi You, and Theo Gevers. Automatic calibration of the fisheye camera for egocentric 3d human pose estimation from a single image. In Winter Conference on Applications of Computer Vision (WACV), 2021. 1, 2, 6, 7", + "[53] Dongxu Zhao, Zhen Wei, Jisan Mahmud, and Jan-Michael Frahm. Egoglass: Egocentric-view human pose estimation from an eyeglass frame. In International Conference on 3D Vision (3DV), 2021. 1, 2, 3, 5, 6, 7, 8", + "[54] Qitao Zhao, Ce Zheng, Mengyuan Liu, Pichao Wang, and Chen Chen. Poseformerv2: Exploring frequency domain for efficient and robust 3d human pose estimation. In Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2", + "[55] Ce Zheng, Sijie Zhu, Matias Mendieta, Taojiannan Yang, Chen Chen, and Zhengming Ding. 3d human pose estimation with spatial and temporal transformers. In International Conference on Computer Vision (ICCV), 2021.", + "[56] Jieming Zhou, Tong Zhang, Zeeshan Hayden, Lars Petersson, and Mehrtash Harandi. Diff3dhpe: A diffusion model for 3d human pose estimation. In International Conference on Computer Vision (ICCV) Workshops, 2023.", + "[57] Wentao Zhu, Xiaoxuan Ma, Zhaoyang Liu, Libin Liu, Wayne Wu, and Yizhou Wang. Motionbert: A unified perspective on learning human motion representations. In International Conference on Computer Vision (ICCV), 2023.", + "[58] Yiran Zhu, Xing Xu, Fumin Shen, Yanli Ji, Lianli Gao, and Heng Tao Shen. PosegTac: Graph transformer encoder-decoder with atrous convolution for 3d human pose estimation. In International Joint Conference on Artificial Intelligence (IJCAI), 2021. 2" + ], + "bbox": [ + 501, + 92, + 890, + 854 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "776", + "bbox": [ + 486, + 945, + 511, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/3D Human Pose Perception from Egocentric Stereo Videos/95770e99-65fc-4fd7-9de3-96977a97b4b8_model.json b/2024/3D Human Pose Perception from Egocentric Stereo Videos/95770e99-65fc-4fd7-9de3-96977a97b4b8_model.json new file mode 100644 index 0000000000000000000000000000000000000000..3ce47b04123384ebe91d2f96c0be90e1ffe8c48d --- /dev/null +++ b/2024/3D Human Pose Perception from Egocentric Stereo Videos/95770e99-65fc-4fd7-9de3-96977a97b4b8_model.json @@ -0,0 +1,2948 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.19, + 0.131, + 0.783, + 0.154 + ], + "angle": 0, + "content": "3D Human Pose Perception from Egocentric Stereo Videos" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.182, + 0.287, + 0.2 + ], + "angle": 0, + "content": "Hiroyasu Akada" + }, + { + "type": "text", + "bbox": [ + 0.316, + 0.182, + 0.649, + 0.216 + ], + "angle": 0, + "content": "Jian Wang Vladislav Golyanik Max Planck Institute for Informatics, SIC" + }, + { + "type": "text", + "bbox": [ + 0.666, + 0.182, + 0.818, + 0.198 + ], + "angle": 0, + "content": "Christian Theobalt" + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.251, + 0.203, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.135, + 0.396, + 0.154, + 0.407 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.206, + 0.252, + 0.294, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.207, + 0.325, + 0.293, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.396, + 0.262, + 0.408 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.297, + 0.252, + 0.421, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.396, + 0.369, + 0.408 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.429, + 0.252, + 0.553, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.485, + 0.396, + 0.504, + 0.408 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image", + "bbox": [ + 0.555, + 0.252, + 0.642, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.556, + 0.325, + 0.642, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.595, + 0.396, + 0.613, + 0.408 + ], + "angle": 0, + "content": "(e)" + }, + { + "type": "image", + "bbox": [ + 0.643, + 0.252, + 0.766, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.701, + 0.396, + 0.718, + 0.408 + ], + "angle": 0, + "content": "(f)" + }, + { + "type": "image", + "bbox": [ + 0.768, + 0.252, + 0.892, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.823, + 0.396, + 0.843, + 0.408 + ], + "angle": 0, + "content": "(g)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.41, + 0.895, + 0.467 + ], + "angle": 0, + "content": "Figure 1. 3D human pose estimation results of our proposed method from egocentric stereo fisheye videos. Left: results on synthetic images; (a) reference RGB view of the scene; (b) 3D-to-2D pose re-projections, and (c) a 3D pose in a scene mesh reconstructed by our framework. Right: results on real-world images; (d) reference view; (e) 3D-to-2D pose re-projections; (f) a 3D pose in the reconstructed scene, and (g) 3D virtual character animation (possible future application of our method)." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.479, + 0.314, + 0.494 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.512, + 0.473, + 0.86 + ], + "angle": 0, + "content": "While head-mounted devices are becoming more compact, they provide egocentric views with significant self-occlusions of the device user. Hence, existing methods often fail to accurately estimate complex 3D poses from egocentric views. In this work, we propose a new transformer-based framework to improve egocentric stereo 3D human pose estimation, which leverages the scene information and temporal context of egocentric stereo videos. Specifically, we utilize 1) depth features from our 3D scene reconstruction module with uniformly sampled windows of egocentric stereo frames, and 2) human joint queries enhanced by temporal features of the video inputs. Our method is able to accurately estimate human poses even in challenging scenarios, such as crouching and sitting. Furthermore, we introduce two new benchmark datasets, i.e., UnrealEgo2 and UnrealEgo-RW (RealWorld). The proposed datasets offer a much larger number of egocentric stereo views with a wider variety of human motions than the existing datasets, allowing comprehensive evaluation of existing and upcoming methods. Our extensive experiments show that the proposed approach significantly outperforms previous methods. UnrealEgo2, UnrealEgo-RW, and trained models are available on our project page1 and Benchmark Challenge2." + }, + { + "type": "title", + "bbox": [ + 0.502, + 0.479, + 0.632, + 0.494 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.505, + 0.892, + 0.627 + ], + "angle": 0, + "content": "Egocentric 3D human motion capture using wearable devices has received increased attention recently [1, 11, 22, 31, 37, 38, 40-42, 45, 48, 52, 53]. Different from traditional vision-based motion capture setups that require a fixed recording space, egocentric systems allow flexible motion capture in less constrained situations. Therefore, the egocentric setups offer various applications, such as motion analysis and XR technologies (Fig. 1-(g))." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.628, + 0.893, + 0.794 + ], + "angle": 0, + "content": "Previous works proposed various egocentric methods to capture device users. On the one hand, the vast majority of existing methods—which use a monocular camera—would fail for complex human poses due to depth ambiguity and self-occlusion. On the other hand, the methods designed for stereo devices do not yet realize the full potential of their stereo settings, especially with the most recent compact eyeglasses-based setups [1, 53]. Specifically, they do not deliver high 3D reconstruction accuracy across different scenarios. Moreover, these approaches do not consider scene information, which further limits their accuracy." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.895, + 0.902 + ], + "angle": 0, + "content": "To address the challenges outlined above, we propose a new transformer-based framework for egocentric 3D human motion capture from compact eyeglasses-based devices; see Fig. 1. The first step of our framework is to estimate 2D joint heatmaps from egocentric stereo fisheye RGB videos (Sec. 4.1). These 2D joint heatmaps are then processed with human joint queries in our transformer-based 3D mod" + }, + { + "type": "page_footnote", + "bbox": [ + 0.094, + 0.875, + 0.411, + 0.887 + ], + "angle": 0, + "content": "1https://4dqv.mpi-inf.mpq.de/UnrealEgo2/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.097, + 0.888, + 0.365, + 0.9 + ], + "angle": 0, + "content": "2https://unrealego.mpi-inf.mpg.de/" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.875, + 0.411, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "767" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.473, + 0.364 + ], + "angle": 0, + "content": "ule to estimate 3D poses. Here, we leverage the scene information and temporal context of the input videos in the 3D module to improve estimation accuracy. Firstly, we use uniformly sampled windows of egocentric stereo frames to reconstruct a 3D background scene using Structure from Motion (SfM) [33], obtaining scene depth as additional information for the 3D module (Sec. 4.2 and 4.3). In our challenging eyeglasses-based setup, however, the 3D scene and camera poses can not always be estimated due to severe self-occlusion in the egocentric images. This results in depth maps with zero (invalid) values and undesired computation of network gradients during training. To mitigate this issue, we propose to use depth padding masks that prevent processing such invalid depth values in the 3D module. Additionally, we propose video-dependent query augmentation that enhances the joint queries with the temporal context of stereo video inputs to effectively capture the temporal relation of human motions at a joint level (Sec. 4.4)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.364, + 0.473, + 0.544 + ], + "angle": 0, + "content": "We also introduce two new benchmark datasets: UnrealEgo2 and UnrealEgo-RW. UnrealEgo2 is an extended version of UnrealEgo [1] and the largest eyeglasses-based synthetic data with various new motions, offering \\(2.8 \\times\\) larger data (2.5M images) than the existing dataset [1]. UnrealEgo-RW is a real-world dataset recorded with our newly developed device that resembles the virtual eyeglasses-based setup [1], offering 260k images with various motions and 3D poses. The proposed datasets make it possible to evaluate existing and upcoming methods on a variety of motions, not only in synthetic scenes but also in real-world cases." + }, + { + "type": "text", + "bbox": [ + 0.098, + 0.545, + 0.454, + 0.56 + ], + "angle": 0, + "content": "In short, the contributions of this paper are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.561, + 0.47, + 0.605 + ], + "angle": 0, + "content": "- The transformer-based framework for egocentric stereo 3D human pose estimation that accounts for temporal context in egocentric stereo views." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.607, + 0.47, + 0.669 + ], + "angle": 0, + "content": "- 3D pose estimation is enhanced via the utilization of scene information from our video-based 3D scene reconstruction module as well as joint queries obtained from our video-dependent query augmentation policy." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.669, + 0.47, + 0.759 + ], + "angle": 0, + "content": "- A new portable device for egocentric stereo view capture with its specification and two new benchmark datasets: UnrealEgo2 and UnrealEgo-RW recorded with our device. The proposed datasets allow for a comprehensive evaluation of methods for egocentric 3D human pose estimation from stereo views." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.561, + 0.47, + 0.759 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.76, + 0.47, + 0.866 + ], + "angle": 0, + "content": "Our experiments demonstrate that the proposed method outperforms the previous state-of-the-art approaches by a substantial margin, i.e., \\(>15\\%\\) on UnrealEgo [1], \\(\\geq 40\\%\\) on UnrealEgo2, and \\(\\geq 10\\%\\) on UnrealEgo-RW (on MPJPE). We release UnrealEgo2, UnrealEgo-RW, and our trained models on our project page3 and Benchmark Challenge4 to foster the area of egocentric 3D vision." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.09, + 0.642, + 0.107 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.115, + 0.895, + 0.373 + ], + "angle": 0, + "content": "Egocentric 3D Human Motion Capture. Recent years witnessed significant innovations in egocentric 3D human pose estimation. To capture device users, many existing works use downward-facing cameras and the existing methods can be categorized into two groups. The first group are monocular approaches [11, 21, 22, 27, 37, 38, 40, 41, 43, 45, 48, 52]. For example, Wang et al. [43] uses a diffusion-based [10] motion prior to tackle self-conclusions. Due to the depth ambiguity, monocular methods often fail to estimate accurate 3D poses. Wang et al. [42] tackled this issue by projecting depth and 2D pose features into a pre-defined voxel space. This method requires additional training with ground-truth depths and human body segmentation; it cannot easily be extended for multi-view or temporal inputs. Zhang et al. [51] utilized a diffusion model [10] conditioned on a 3D scene to generate poses. They require pre-scanned scene mesh as an input and cannot capture a device user." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.373, + 0.895, + 0.584 + ], + "angle": 0, + "content": "The second group, including our work, focuses on the multi-view (often stereo) setting. Rhodin et al. [31] proposed an optimization approach whereas Cha et al. [3] used eight cameras to estimate a 3D body and reconstruct a 3D scene separately. Other works [1, 53] used the multi-branch autoencoder [37] to the stereo setup. Kang et al. [12] (arXiv pre-print at the time of submission) leveraged a stereomatching mechanism and perspective embedding heatmaps. In contrast to the existing methods, we propose a new transformer-based method that effectively utilizes egocentric stereo videos via our video-based 3D scene reconstruction module and video-dependent query augmentation policy. Our method considers the scene information without the supervision of the scene data." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.584, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Transformers in 3D Human Pose Estimation from External Cameras. 3D pose estimation from external cameras has shown significant progress due to the advances in transformer architectures [39]. Some works [20, 47] predict 3D human pose and mesh from monocular views. Other works [5, 18, 19, 28, 29, 36, 46, 49, 54-58] present a 2D-to-3D lifting module that estimates 3D poses from monocular 2D joints obtained with off-the-shelf 2D joint detectors. Although their lifting modules show impressive results, those monocular methods cannot be easily applied to our stereo setting. On the other hand, some works utilize transformers in multi-view settings. He et al. [9] and Ma et al. [23] aggregate stereo information on epipolar lines of stereo images, which are difficult to obtain from fisheye images. Recent work [44] regresses multi-person 3D poses from multi-view inputs, powered by projective attention and query adaptation. However, no existing works explored the potential of transformers along with 2D joint heatmaps or explicit scene information in stereo 3D pose estimation. In this paper, we propose a transformer-based framework that accounts for the temporal relation of human motion at a joint level via" + }, + { + "type": "page_footnote", + "bbox": [ + 0.093, + 0.874, + 0.411, + 0.887 + ], + "angle": 0, + "content": "3https://4dqv.mpi-inf.mpg.de/UnrealEgo2/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.093, + 0.887, + 0.365, + 0.901 + ], + "angle": 0, + "content": "4https://unrealego.mpi-inf.mpg.de/" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.874, + 0.411, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "768" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.468, + 0.12 + ], + "angle": 0, + "content": "intermediate 2D joint heatmap and depth maps even with inaccurate depth values mixed in the framework." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.122, + 0.468, + 0.394 + ], + "angle": 0, + "content": "Datasets for Egocentric 3D Human Pose Estimation. Several works proposed unique setups to create datasets, using a monocular camera [11, 17, 22, 37, 40, 41, 45, 48] and forward-facing cameras [11, 14, 17, 22, 26, 48, 50, 51]. There also exist datasets captured with stereo devices [3, 7, 14, 26, 31, 53]. However, they are small [31] with limited motion types [31, 53], not publicly available [3, 53], or do not provide ground truth 3D poses of device users [7, 14, 26]. Recently, Akada et al. [1] introduced UnrealEgo, a synthetic dataset based on virtual eyeglasses with two fisheye cameras. However, they provide only synthetic images. Meanwhile, more glasses-based stereo datasets that offer a wider variety of motions or real-world footage are required nowadays for an extensive evaluation of existing and upcoming methods. Hence, we introduce two new benchmark datasets that in their characteristics go beyond the existing data: UnrealEgo2 and UnrealEgo-RW. We describe the proposed datasets in the following section." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.408, + 0.333, + 0.423 + ], + "angle": 0, + "content": "3. Mobile Device and Datasets" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.433, + 0.468, + 0.613 + ], + "angle": 0, + "content": "We present two new datasets for egocentric stereo 3D motion capture: UnrealEgo2 and UnrealEgo-RW; see Fig. 1. Please watch our supplementary video for visualizations. UnrealEgo2 Dataset. To create UnrealEgo2 (an extension of UnrealEgo [1]), we adapt the publicly available setup with a virtual eyeglasses device [1]. This setup comes with two downward-facing fisheye cameras attached \\(12\\mathrm{cm}\\) apart from each other on the glasses frames. The camera's field of view is \\(170^{\\circ}\\). With this device, we capture 17 realistic 3D human models [30] animated by the Mixamo [25] dataset in various 3D environments. We record simple to highly complex motions such as crouching and crawling, for 14 hours." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.614, + 0.468, + 0.734 + ], + "angle": 0, + "content": "Overall, UnrealEgo2 offers 15,207 motions and \\(>1.25\\mathrm{M}\\) stereo views (2.5M images) as well as depth maps with a resolution \\(1024\\times 1024\\) pixel rendered at 25 frames per second. Each frame is annotated with 32 body and 40 hand joints. Note that UnrealEgo2 is the largest glasses-based dataset and \\(2.8\\times\\) larger than UnrealEgo. Also, it does not share the same motions with UnrealEgo, providing a larger motion variety for a comprehensive evaluation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Design of Our Mobile Device. Evaluation with real-world datasets plays a pivotal role in computer vision research. Therefore, we build a new portable device; see Fig. 2. Our device is based on a helmet with two RIBCAGE RX0 II cameras [32] and two FUJINON FE185C057HA-1 fisheye lenses [6]. We placed the cameras \\(12\\mathrm{cm}\\) away from each other and \\(2\\mathrm{cm}\\) away from user's face. We cropped the margins of the egocentric images to resemble the field of view of \\(170^{\\circ}\\) of the UnrealEgo and UnrealEgo2 setups. Note that our setup is more compact than EgoCap [31] that placed cameras \\(25\\mathrm{cm}\\) away from user's face." + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.089, + 0.763, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.775, + 0.089, + 0.893, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.529, + 0.179, + 0.861, + 0.193 + ], + "angle": 0, + "content": "Figure 2. Our portable setup to acquire UnrealEgo-RW." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.224, + 0.892, + 0.331 + ], + "angle": 0, + "content": "UnrealEgo-RW (Real-World) Dataset. With our device, we record various motions of 16 identities in a multi-view motion capture studio (Fig. 1-(d)). We capture simple and challenging activities, e.g., crawling and dancing, for 1.5 hours. This is in strong contrast to the existing real-world stereo dataset [53] (not publicly available) that records only three simple actions, i.e., sitting, standing, and walking." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.332, + 0.892, + 0.438 + ], + "angle": 0, + "content": "In total, we obtained 591 motion segments from 16 identities with various textured clothing. This results in more than \\(130\\mathrm{k}\\) stereo views (260k images) of a resolution \\(872\\times 872\\) pixel rendered at 25 frames per second with ground-truth 3D poses of 16 joints. Note that UnrealEgoRW offers \\(4.3\\times\\) larger data with a wider variety of motions than the publicly available real-world stereo data [31]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.459, + 0.59, + 0.474 + ], + "angle": 0, + "content": "4. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.487, + 0.892, + 0.713 + ], + "angle": 0, + "content": "We propose a new framework for egocentric stereo 3D human pose estimation as shown in Fig. 3. Our framework first estimates the 2D joint heatmaps from egocentric stereo fisheye videos in our 2D module (Sec. 4.1). The heatmaps and input videos are then processed in our segmentation module to obtain 2D human body masks (Sec. 4.2). Next, we use uniformly sampled windows of input frames and human body masks to reconstruct 3D scenes (Sec. 4.3). Here, we render depth maps and depth region masks from the reconstructed mesh. Finally, our transformer-based 3D module processes the joint heatmaps, depth information, and joint queries to estimate 3D poses (Sec. 4.4). Here, the 3D module leverages depth padding masks based on the availability of the depth maps as well as joint queries enhanced by the stereo video features from the 2D module." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.73, + 0.686, + 0.744 + ], + "angle": 0, + "content": "4.1.2D Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.755, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Given egocentric stereo videos with \\(T\\) frames \\(\\{\\mathbf{I}_{\\mathrm{Left}}^t,\\mathbf{I}_{\\mathrm{Right}}^t\\in\\) \\(\\mathbb{R}^{H\\times W\\times 3}|t = 1,2,\\dots,T\\}\\) , we use the existing stereo 2D joint heatmap estimator [1] to obtain a sequence of corresponding 2D heatmaps of 15 joints \\(\\{\\mathbf{H}_{\\mathrm{Left}}^t,\\mathbf{H}_{\\mathrm{Right}}^t\\in\\) \\(\\mathbb{R}^{\\frac{H}{4}\\times \\frac{W}{4}\\times 15}\\}\\) , including the neck, upper arms, lower arms, hands, thighs, calves, feet, and balls of the feet. We also extract intermediate feature maps \\(\\{\\mathbf{F}_{\\mathrm{Left}}^t,\\mathbf{F}_{\\mathrm{Right}}^t\\in\\) \\(\\mathbb{R}^{\\frac{H}{32}\\times \\frac{W}{32}\\times C}\\}\\) where \\(C = 512\\) , which are used later in the 3D module." + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "769" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.087, + 0.093, + 0.887, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.406, + 0.892, + 0.492 + ], + "angle": 0, + "content": "Figure 3. Overview of our framework. Our method takes egocentric stereo videos \\(\\{\\mathbf{I}_{\\mathrm{Left}}^t,\\mathbf{I}_{\\mathrm{Right}}^t\\}\\) as inputs. We first apply the 2D module to obtain 2D joint heatmaps \\(\\{\\mathbf{H}_{\\mathrm{Left}}^t,\\mathbf{H}_{\\mathrm{Right}}^t\\}\\) and video features \\(\\{\\mathbf{F}_{\\mathrm{Left}}^t,\\mathbf{F}_{\\mathrm{Right}}^t\\}\\) (Sec. 4.1). The heatmaps are used with input videos to create human body masks \\(\\{\\mathbf{M}_{\\mathrm{Left}}^t,\\mathbf{M}_{\\mathrm{Right}}^t\\}\\) (Sec. 4.2). Next, we use uniformly sampled windows of input frames and human body masks to reconstruct a 3D scene mesh (Sec. 4.3). From the mesh, we generate depth maps \\(\\{\\mathbf{D}_{\\mathrm{Left}}^t,\\mathbf{D}_{\\mathrm{Right}}^t\\}\\) and depth region masks \\(\\{\\mathbf{R}_{\\mathrm{Left}}^t,\\mathbf{R}_{\\mathrm{Right}}^t\\}\\). Note that this diagram shows an example case of missing depth values for the second input frame. Lastly, the depth data, 2D joint heatmaps, video features, joint queries \\(q^{t}\\) and the padding masks \\(V_{\\mathrm{Depth}}^{t}\\) are processed in the 3D module to estimate 3D poses \\(\\mathbf{P}^t\\) (Sec. 4.4)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.515, + 0.325, + 0.531 + ], + "angle": 0, + "content": "4.2. Human Body Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.539, + 0.47, + 0.68 + ], + "angle": 0, + "content": "To reconstruct 3D scenes from egocentric videos, it is necessary to identify the pixels corresponding to the background environment. Therefore, we integrate an existing segmentation method, i.e., ViT-H SAM model [16], as our segmentation network \\(\\mathcal{F}_{\\mathrm{SAM}}\\). In this module, we firstly obtain 2D joint locations from the 2D joint heatmap \\(\\{\\widehat{\\mathbf{H}}_{\\mathrm{Left}}^t,\\widehat{\\mathbf{H}}_{\\mathrm{Right}}^t\\}\\). Then, we use the input video frames \\(\\{\\mathbf{I}_{\\mathrm{Left}}^t,\\mathbf{I}_{\\mathrm{Right}}^t\\}\\) and its corresponding 2D joints to extract a human body mask \\(\\{\\mathbf{M}_{\\mathrm{Left}}^t,\\mathbf{M}_{\\mathrm{Right}}^t\\in \\mathbb{R}^{H\\times W\\times 1}\\}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.179, + 0.69, + 0.469, + 0.709 + ], + "angle": 0, + "content": "\\[\n\\mathbf {M} _ {\\text {L e f t}} ^ {t} = \\mathcal {F} _ {\\mathrm {S A M}} \\left(\\mathbf {I} _ {\\text {L e f t}} ^ {t}, \\widehat {\\mathbf {H}} _ {\\text {L e f t}} ^ {t}\\right). \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.717, + 0.47, + 0.779 + ], + "angle": 0, + "content": "The same process can be applied to obtain \\(\\mathbf{M}_{\\mathrm{Right}}^t\\). Note that we use the SAM model without re-training on ground-truth human body masks. Instead, we guide the predictions of SAM using joint positions extracted from the 2D heatmaps." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.786, + 0.305, + 0.801 + ], + "angle": 0, + "content": "4.3. 3D Scene Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.809, + 0.469, + 0.901 + ], + "angle": 0, + "content": "We aim to reconstruct 3D environments from uniformly sampled windows of input frames \\(\\{\\mathbf{I}_{\\mathrm{Left}}^t,\\mathbf{I}_{\\mathrm{Right}}^t\\}\\) and human body masks \\(\\{\\mathbf{M}_{\\mathrm{Left}}^t,\\mathbf{M}_{\\mathrm{Right}}^t\\}\\) with a fixed length. The length is set to 4 seconds (some motion data contains shorter sequences). Given these data, we use Metashape [24] to perform SfM to obtain camera poses and a 3D scene" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.516, + 0.892, + 0.746 + ], + "angle": 0, + "content": "mesh. Here, as the baseline length between stereo cameras is known, i.e., \\(12\\mathrm{cm}\\), we can obtain the mesh in the real-world scale. Next, we render down-sampled depth maps \\(\\{\\mathbf{D}_{\\mathrm{Left}}^t,\\mathbf{D}_{\\mathrm{Right}}^t\\in \\mathbb{R}^{\\frac{H}{4}\\times \\frac{W}{4}\\times 1}\\}\\) and depth region masks \\(\\{\\mathbf{R}_{\\mathrm{Left}}^t,\\mathbf{R}_{\\mathrm{Right}}^t\\in \\mathbb{R}^{\\frac{H}{4}\\times \\frac{W}{4}\\times 1}\\}\\) from the reconstructed 3D scene mesh. The depth region masks show the regions where the depth values are obtained from the 3D scene. This depth information will be used later in the 3D module as additional cues for pose estimation. However, there are some cases where the egocentric RGB videos are largely occupied by a human body. In such scenarios, the 3D scene can not be reconstructed or camera poses can not be estimated. This results in missing (invalid) depth values and undesired computation of network gradients during training. Therefore, we tackle this issue in our 3D module." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.757, + 0.686, + 0.771 + ], + "angle": 0, + "content": "4.4.3D Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.903 + ], + "angle": 0, + "content": "In the 3D module, we aim to estimate a sequence of 3D poses by considering scene information and the temporal context of the egocentric stereo videos. Specifically, given the 2D joint heatmaps, depth maps, depth region masks, and \\(T\\) sets of joint queries \\(q^{t} \\in \\mathbb{R}^{16 \\times \\frac{C}{2}}\\), we use a transformer decoder to estimate a sequence of 3D poses \\(\\{\\mathbf{P}^t \\in \\mathbb{R}^{16 \\times 3} | t = 1, 2, \\dots, T\\}\\). Our pose output is the 3D pose at the last time step \\(\\mathbf{P}^T\\). We follow the existing" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "770" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.091, + 0.468, + 0.213 + ], + "angle": 0, + "content": "
MethodTaskMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]86.4563.7185.9750.50
Akada et al. [1]78.9859.3088.8154.31
Kang et al. [12]Pelvis relative60.8248.47--
Baseline59.8549.1492.0763.88
Ours50.5540.5093.8370.61
Zhao et al. [53]88.1265.3685.1050.37
Akada et al. [1]Device relative84.5363.9287.0552.76
Baseline63.4450.9792.3064.54
Ours46.2040.1994.0273.53
" + }, + { + "type": "table_caption", + "bbox": [ + 0.088, + 0.217, + 0.458, + 0.232 + ], + "angle": 0, + "content": "Table 1. Quantitative results on UnrealEgo [1] with mm-scale." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.246, + 0.468, + 0.32 + ], + "angle": 0, + "content": "
MethodMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]79.6458.2288.5053.82
Akada et al. [1]72.8052.8891.3255.81
Baseline52.2339.7895.7268.13
Ours30.5326.7297.2280.75
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.323, + 0.468, + 0.352 + ], + "angle": 0, + "content": "Table 2. Quantitative results of device-relative pose estimation on UnrealEgo2 with mm-scale." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.375, + 0.459, + 0.39 + ], + "angle": 0, + "content": "works [1, 37, 38] to estimate 16 joints including the head." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.39, + 0.468, + 0.468 + ], + "angle": 0, + "content": "Depth and Heatmap Features. We use the sequence of the depth maps, depth region masks, and the 2D joint heatmaps as the memory of a cross-attention operation in the transformer decoder. For this purpose, we extract depth features \\(\\{\\mathbf{U}_{\\mathrm{Left}}^t,\\mathbf{U}_{\\mathrm{Right}}^t\\in \\mathbb{R}^{\\frac{H}{32}\\times \\frac{W}{32}\\times \\frac{C}{2}}\\}\\) from the depth data:" + }, + { + "type": "equation", + "bbox": [ + 0.17, + 0.476, + 0.469, + 0.495 + ], + "angle": 0, + "content": "\\[\n\\mathbf {U} _ {\\text {L e f t}} ^ {t} = \\mathcal {F} _ {\\text {D e p t h}} \\left(\\mathbf {D} _ {\\text {L e f t}} ^ {t} \\oplus \\widehat {\\mathbf {R}} _ {\\text {L e f t}} ^ {t}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.501, + 0.469, + 0.548 + ], + "angle": 0, + "content": "where “\\(\\oplus\\)” is a concatenation operation along the channel axis and \\(\\mathcal{F}_{\\mathrm{Depth}}\\) represents a feature extractor. The same process can be applied to obtain \\(\\mathbf{U}_{\\mathrm{Right}}^t\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.548, + 0.469, + 0.582 + ], + "angle": 0, + "content": "Similarly, we extract heatmap features \\(\\{\\mathbf{G}_{\\mathrm{Left}}^t,\\mathbf{G}_{\\mathrm{Right}}^t\\in\\) \\(\\mathbb{R}^{\\frac{H}{16}\\times \\frac{W}{16}\\times C}\\}\\) from the 2D heatmaps:" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.59, + 0.469, + 0.609 + ], + "angle": 0, + "content": "\\[\n\\mathbf {G} _ {\\text {L e f t}} ^ {t} = \\mathcal {F} _ {\\mathrm {H M}} \\left(\\widehat {\\mathbf {H}} _ {\\text {L e f t}} ^ {t}\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.616, + 0.469, + 0.647 + ], + "angle": 0, + "content": "where \\(\\mathcal{F}_{\\mathrm{HM}}\\) represents another feature extractor. The same process can be applied to obtain \\(\\mathbf{G}_{\\mathrm{Right}}^t\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.647, + 0.469, + 0.755 + ], + "angle": 0, + "content": "These features are forwarded with positional embeddings into the transformer. However, as mentioned in Sec. 4.3, depth values can be missing in some frames. To prevent processing features of such depth data and let the network focus only on valid frames, we propose to add padding masks \\( V_{\\mathrm{Depth}}^{t} \\in \\mathcal{R} \\) to all the elements of \\( \\{\\mathbf{U}_{\\mathrm{Left}}^{t}, \\mathbf{U}_{\\mathrm{Right}}^{t}\\} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.12, + 0.763, + 0.469, + 0.804 + ], + "angle": 0, + "content": "\\[\nV _ {\\text {D e p t h}} ^ {t} = \\left\\{ \\begin{array}{l l} - \\inf , & \\text {i f d e p t h v a l u e s a r e m i s s i n g} \\\\ 0, & \\text {o t h e r w i s e} \\end{array} . \\right. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.81, + 0.469, + 0.856 + ], + "angle": 0, + "content": "When \\( V_{\\mathrm{Depth}}^{t} = -\\inf \\), the depth features \\( \\{\\mathbf{U}_{\\mathrm{Left}}^{t}, \\mathbf{U}_{\\mathrm{Right}}^{t}\\} \\) after the softmax function in self-attention layers of the transformer will have zero effect on the network training." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Stereo-Video-Dependent Joint Query Adaptation. The existing work [44] represents human joints as learnable positional embeddings called joint queries that encode prior" + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.092, + 0.888, + 0.164 + ], + "angle": 0, + "content": "
MethodMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]117.5788.0173.1238.94
Akada et al. [1]122.6486.5572.5138.67
Baseline115.9585.0074.1340.11
Ours104.1482.1880.2046.22
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.17, + 0.892, + 0.198 + ], + "angle": 0, + "content": "Table 3. Quantitative results of device-relative pose estimation on UnrealEgo-RW with mm-scale." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.216, + 0.892, + 0.382 + ], + "angle": 0, + "content": "knowledge about the skeleton joints. In our problem setting, the simplest way to design such joint queries is to set queries for each pose in a motion sequence. However, this can not capture the temporal context in video inputs, e.g., human motions and background changes. Therefore, we extend the multi-view joint query augmentation technique [44] for our stereo video setting to account for sequential information. Specifically, we enhance the joint queries with the temporal intermediate features of stereo RGB frames \\(\\{\\mathbf{F}_{\\mathrm{Left}}^t,\\mathbf{F}_{\\mathrm{Right}}^t\\}\\). Firstly, from the sequence of the intermediate features, we create a sequence of combined features \\(\\mathbf{F}^{t}\\in \\mathbb{R}^{\\frac{H}{32}\\times \\frac{W}{32}\\times \\frac{C}{2}}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.606, + 0.392, + 0.891, + 0.41 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} ^ {t} = \\operatorname {c o n v} \\left(\\mathbf {F} _ {\\text {L e f t}} ^ {t} \\oplus \\mathbf {F} _ {\\text {r i g h t}}\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.419, + 0.891, + 0.449 + ], + "angle": 0, + "content": "where \"conv(\\cdot)\" is a convolution operation with a kernel size of \\(1 \\times 1\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.45, + 0.891, + 0.48 + ], + "angle": 0, + "content": "Next, we fuse the sequence of the combined features \\(\\mathbf{F}^t\\) to obtain a fused stereo features \\(\\mathbf{F}_{\\mathrm{Stereo}} \\in \\mathbb{R}^{\\frac{C}{2}}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.542, + 0.489, + 0.891, + 0.508 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {\\text {S t e r e o}} = \\mathbf {F} _ {\\mathrm {P}} ^ {1} \\oplus \\dots \\oplus \\mathbf {F} _ {\\mathrm {P}} ^ {T}, \\text {w h e r e} \\mathbf {F} _ {\\mathrm {P}} ^ {i} = p (\\mathbf {F} ^ {i}), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.517, + 0.89, + 0.547 + ], + "angle": 0, + "content": "where \\( p(\\cdot) \\) is an operation of adaptive average pooling. Now, the feature \\( \\mathbf{F}_{\\mathrm{Stereo}} \\) contains stereo video information." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.548, + 0.89, + 0.582 + ], + "angle": 0, + "content": "Lastly, with \\(\\mathbf{F}_{\\mathrm{Stereo}}\\) and a fully connected layer \" \\(\\operatorname {fc}(\\cdot)\\) \", we augment each query \\(q^{t}\\) to obtain \\(q_{\\mathrm{Aug}}^t\\in \\mathbb{R}^{16\\times \\frac{C}{2}}\\) .." + }, + { + "type": "equation", + "bbox": [ + 0.614, + 0.591, + 0.891, + 0.61 + ], + "angle": 0, + "content": "\\[\n\\mathbf {q} _ {\\text {A u g}} ^ {t} = \\operatorname {f c} \\left(\\mathbf {F} _ {\\text {S t e r e o}}\\right) + q ^ {t}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.618, + 0.892, + 0.76 + ], + "angle": 0, + "content": "Transformer Decoder. We adopt a DETR [2]-based transformer decoder and a pose regression head. In decoder layers, all of the augmented joint queries \\( q_{\\mathrm{Aug}}^{t} \\) first interact with each other on a self-attention layer. Then, the queries extract all of the temporal stereo features from the memory \\( \\{\\mathbf{U}_{\\mathrm{Left}}^{t}, \\mathbf{U}_{\\mathrm{Right}}^{t}, \\mathbf{G}_{\\mathrm{Left}}^{t}, \\mathbf{G}_{\\mathrm{Right}}^{t}\\} \\) with the padding masks \\( V_{\\mathrm{Depth}}^{t} \\) on a cross-attention layer. Lastly, the pose regression head estimates a sequence of 3D poses \\( \\{\\hat{\\mathbf{P}}^t \\in \\mathbb{R}^{16 \\times 3} | t = 1, 2, \\dots, T\\} \\), yielding the final pose output \\( \\mathbf{P}^T \\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.76, + 0.892, + 0.804 + ], + "angle": 0, + "content": "Similar to the previous works [5, 49], we train the 3D module with the pose supervision of the current and past frames:" + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.813, + 0.891, + 0.853 + ], + "angle": 0, + "content": "\\[\nL _ {3 \\mathrm {D}} = L _ {\\text {p o s e}} \\left(\\mathbf {P} ^ {T}, \\hat {\\mathbf {P}} ^ {T}\\right) + \\frac {\\lambda_ {\\text {p a s t}}}{(T - 1)} \\sum_ {t = 1} ^ {T - 1} L _ {\\text {p o s e}} \\left(\\mathbf {P} ^ {t}, \\hat {\\mathbf {P}} ^ {t}\\right), \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.545, + 0.865, + 0.891, + 0.89 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {p o s e}} (\\mathbf {P}, \\hat {\\mathbf {P}}) = \\lambda_ {\\text {p o s e}} (\\operatorname {m p j p e} (\\mathbf {P}, \\hat {\\mathbf {P}}) + \\quad \\tag {9}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.642, + 0.887, + 0.846, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\lambda_ {\\cos} \\cos (\\operatorname {b o n e} (\\mathbf {P}), \\operatorname {b o n e} (\\hat {\\mathbf {P}}))),\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.51, + 0.957 + ], + "angle": 0, + "content": "771" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.159, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.161, + 0.09, + 0.24, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.246, + 0.09, + 0.318, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.09, + 0.4, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.405, + 0.09, + 0.481, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.214, + 0.159, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.161, + 0.214, + 0.24, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.246, + 0.214, + 0.318, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.214, + 0.4, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.405, + 0.214, + 0.481, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.126, + 0.279, + 0.196, + 0.291 + ], + "angle": 0, + "content": "Stereo inputs" + }, + { + "type": "image_caption", + "bbox": [ + 0.24, + 0.279, + 0.323, + 0.291 + ], + "angle": 0, + "content": "Akada et al. [1]" + }, + { + "type": "image_caption", + "bbox": [ + 0.34, + 0.279, + 0.388, + 0.29 + ], + "angle": 0, + "content": "Baseline" + }, + { + "type": "image_caption", + "bbox": [ + 0.431, + 0.279, + 0.459, + 0.29 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.488, + 0.089, + 0.568, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.57, + 0.09, + 0.648, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.651, + 0.09, + 0.729, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.732, + 0.09, + 0.81, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.813, + 0.09, + 0.892, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.488, + 0.214, + 0.568, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.571, + 0.214, + 0.648, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.651, + 0.214, + 0.729, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.731, + 0.214, + 0.81, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.813, + 0.214, + 0.892, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.536, + 0.279, + 0.607, + 0.291 + ], + "angle": 0, + "content": "Stereo inputs" + }, + { + "type": "image_caption", + "bbox": [ + 0.651, + 0.279, + 0.735, + 0.291 + ], + "angle": 0, + "content": "Akada et al. [1]" + }, + { + "type": "image_caption", + "bbox": [ + 0.748, + 0.279, + 0.794, + 0.29 + ], + "angle": 0, + "content": "Baseline" + }, + { + "type": "image_caption", + "bbox": [ + 0.836, + 0.279, + 0.863, + 0.29 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.296, + 0.892, + 0.324 + ], + "angle": 0, + "content": "Figure 4. Qualitative results of device-relative pose estimation. Left: UnrealEgo2. Right: UnrealEgo-RW. 3D pose prediction and ground truth are displayed in red and green, respectively. For UnrealEgo-RW, we show ground-truth scene meshes for visualization." + }, + { + "type": "table", + "bbox": [ + 0.084, + 0.35, + 0.465, + 0.487 + ], + "angle": 0, + "content": "
MethodMPJPE(↓)PA-MPJPE(↓)
(a) Baseline with depth information120.3986.23
Baseline115.3684.80
(b) Ours w/o query adaptation108.3386.69
(c) Ours w/o depth information112.5684.37
(d) Ours w/o depth padding mask108.7084.26
(e) Ours with latest pose supervision only105.6783.46
(f) Ours with a single set of queries105.5885.68
Ours104.1482.18
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.491, + 0.469, + 0.52 + ], + "angle": 0, + "content": "Table 4. Ablation study of our model for device-relative pose estimation on UnrealEgo-RW with mm-scale." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.546, + 0.47, + 0.607 + ], + "angle": 0, + "content": "where \\(\\mathbf{P}\\) is a ground-truth 3D pose, \\(\\mathrm{mpjpe}(\\cdot)\\) is the mean per joint position error, \\(\\cos (\\cdot)\\) is a negative cosine similarity, and \\(\\mathrm{bone}(\\cdot)\\) is an operation of obtaining bones of the 3D poses as used in the previous work [1]:" + }, + { + "type": "equation", + "bbox": [ + 0.101, + 0.618, + 0.469, + 0.66 + ], + "angle": 0, + "content": "\\[\n\\operatorname {m p j p e} (\\mathbf {P}, \\hat {\\mathbf {P}}) = \\frac {1}{N J} \\sum_ {n = 1} ^ {N} \\sum_ {j = 1} ^ {J} | | \\mathbf {P} _ {n, j} - \\hat {\\mathbf {P}} _ {n, j} | | _ {2}, \\tag {10}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.104, + 0.672, + 0.469, + 0.713 + ], + "angle": 0, + "content": "\\[\n\\cos (\\mathbf {B}, \\hat {\\mathbf {B}}) = - \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sum_ {m = 1} ^ {M} \\frac {\\mathbf {B} _ {n , m} \\cdot \\hat {\\mathbf {B}} _ {n , m}}{| | \\mathbf {B} _ {n , m} | | | | \\hat {\\mathbf {B}} _ {n , m} | |}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.718, + 0.47, + 0.75 + ], + "angle": 0, + "content": "where \\(N\\) is batch size, \\(J\\) is the number of joints, \\(M\\) is the number of bones, and \\(\\mathbf{B}_{n,m} \\in \\mathbb{R}^3\\) is a vector of \\(m\\)-th bone." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.763, + 0.208, + 0.78 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.787, + 0.294, + 0.802 + ], + "angle": 0, + "content": "5.1. Datasets for Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.81, + 0.47, + 0.901 + ], + "angle": 0, + "content": "We use three datasets for our experiments: UnrealEgo [1], UnrealEgo2, and UnrealEgo-RW. For UnrealEgo, we use their proposed data splits. Also, we divide UnrealEgo2 into 12,139 motions (1,002,656 stereo views) for training, 1,545 motions (127,968 stereo views) for validation, and 1523 motions (123,488 stereo views) for testing. Similarly, we" + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.35, + 0.89, + 0.414 + ], + "angle": 0, + "content": "
MethodUpper body MPJPE(↓)Lower body MPJPE(↓)Foot MPJPE(↓)Foot MPE(↓)
Ours w/o depth information80.82144.31174.456.39
Ours w/o depth padding masks77.29140.10169.955.02
Ours77.85130.97155.864.83
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.418, + 0.892, + 0.446 + ], + "angle": 0, + "content": "Table 5. The effect of scene information (depth) per body part on UnrealEgo-RW. The numbers are in mm." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.472, + 0.892, + 0.579 + ], + "angle": 0, + "content": "split UnrealEgo-RW into 547 motions (51,936 stereo views) for training, 77 motions (7,616 stereo views) for validation, and 86 motions (7,936 stereo views) for testing. We follow the existing works [1, 11, 37, 38, 40-42, 45, 52, 53] to report the results of device-relative 3D pose estimation. For UnrealEgo, we also follow the existing works [1, 12] to include the results of pelvis-relative 3D pose estimation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.588, + 0.661, + 0.604 + ], + "angle": 0, + "content": "5.2. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.611, + 0.892, + 0.837 + ], + "angle": 0, + "content": "We resize the input RGB images and ground-truth 2D keypoint heatmaps to \\(256 \\times 256\\) and \\(64 \\times 64\\) pixels, respectively. For the training of the 2D module, we follow the previous work [1] to use the ResNet18 [8] pre-trained on ImageNet [4] as an encoder and train the module with a batch size of 16 and an initial learning rate of \\(10^{-3}\\). Then, we train the 3D module with a batch size of 32 and an initial learning rate of \\(2 \\cdot 10^{-4}\\). The modules are trained with Adam optimizer [15] for ten epochs, starting with the initial learning rate for the first half epochs and applying a linearly decaying rate for the next half. Also, we set the hyperparameters as \\(\\lambda_{\\mathrm{pose}} = 0.1\\), \\(\\lambda_{\\mathrm{cos}} = 0.01\\), and \\(\\lambda_{\\mathrm{past}} = 0.1\\). We use five sequential stereo views as inputs to our model, i.e., \\(T = 5\\), with a skip size of 3. See our supplement for more details on the network architecture." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.848, + 0.62, + 0.862 + ], + "angle": 0, + "content": "5.3. Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We compare our method with existing stereo-based egocentric pose estimation methods [1, 53]. We use the of" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "772" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.09, + 0.516, + 0.181 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.539, + 0.088, + 0.645, + 0.169 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.573, + 0.169, + 0.732, + 0.182 + ], + "angle": 0, + "content": "3D-to-2D pose reprojection" + }, + { + "type": "image", + "bbox": [ + 0.661, + 0.089, + 0.765, + 0.168 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.66, + 0.169, + 0.732, + 0.181 + ], + "angle": 0, + "content": "reprojection" + }, + { + "type": "image", + "bbox": [ + 0.78, + 0.089, + 0.885, + 0.167 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.772, + 0.169, + 0.883, + 0.181 + ], + "angle": 0, + "content": "3D pose estimation" + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.183, + 0.516, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.54, + 0.19, + 0.645, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.573, + 0.272, + 0.731, + 0.285 + ], + "angle": 0, + "content": "3D-to-2D pose reprojection" + }, + { + "type": "image", + "bbox": [ + 0.661, + 0.19, + 0.765, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.66, + 0.272, + 0.731, + 0.285 + ], + "angle": 0, + "content": "reprojection" + }, + { + "type": "image", + "bbox": [ + 0.78, + 0.19, + 0.885, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.772, + 0.272, + 0.883, + 0.284 + ], + "angle": 0, + "content": "3D pose estimation" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.289, + 0.893, + 0.331 + ], + "angle": 0, + "content": "Figure 5. Results of our framework and comparison methods on example sequences from UnrealEgo2 (above) and UnrealEgo-RW (below). Left: MPJPE curves. Right: Outputs of our method at frame 87 and 329 of the sequences, respectively. 3D pose estimation and ground truth are colored in red and green, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.347, + 0.473, + 0.633 + ], + "angle": 0, + "content": "ficial source code of Akada et al. [1] and re-implement the framework of Zhao et al. [53] as its source code is not available. Note that the comparison methods are trained on the same datasets as our model. Kang et al. [12] (arXiv preprint at the time of submission) only shows results of the pelvis-relative estimation on UnrealEgo. Therefore, we include them for reference. Furthermore, we are interested in the performance of the publicly available state-of-the-art method [1] with temporal inputs. Thus, we modify their 3D module such that it can take as an input a sequence of stereo 2D keypoint heatmaps with the same time step as ours, i.e., \\( T = 5 \\). Here, we replace the first and the last fully connected layers in the encoder, the pose decoder, and the heatmap reconstruction decoder of their autoencoder-based 3D module [1] by those with \\( T \\) times the size of the original hidden dimension. We denote this model as Baseline and train it with the same training procedure as Akada et al. [1]. Note that Akada et al. [1], Baseline, and our model use the same 2D module." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.639, + 0.47, + 0.731 + ], + "angle": 0, + "content": "We follow the existing works [1, 11, 37, 38, 40-42, 45, 52, 53] to report Mean Per Joint Position Error (MPJPE) and Mean Per Joint Position Error with Procrustes Alignment [13] (PA-MPJPE). We additionally report 3D Percentage of Correct Keypoints (3D PCK) and Area Under the Curve (AUC) for UnrealEgo2 and UnrealEgo-RW." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Results on Synthetic Datasets. Tables 1 and 2 report the results with UnrealEgo [1] and UnrealEgo2. Our method outperforms the existing methods [1, 12, 53] and Baseline across all metrics by a significant margin, e.g., \\(>15\\%\\) on UnrealEgo [1] and \\(\\geq 40\\%\\) on UnrealEgo2 (on MPJPE). The qualitative results on UnrealEgo2 in Fig. 4-(left part) show that existing methods and Baseline fail to estimate lower bodies of complex poses with severe self-occlusions, such as crouching. Even under such challenging scenarios, however, our approach yields accurate 3D poses. See Fig. 5-(above part) for a MPJPE curve and visual outputs of our" + }, + { + "type": "table", + "bbox": [ + 0.526, + 0.347, + 0.869, + 0.42 + ], + "angle": 0, + "content": "
MethodMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
T = 1108.6384.6977.9844.15
T = 3108.2385.2878.3544.54
T = 5104.1482.1880.2046.22
T = 7104.0182.4380.5246.10
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.425, + 0.892, + 0.453 + ], + "angle": 0, + "content": "Table 6. Ablation study of our model with different sequence lengths on UnrealEgo-RW. The numbers are in mm." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.463, + 0.892, + 0.903 + ], + "angle": 0, + "content": "framework on UnrealEgo2. Our method is able to constantly estimate accurate 3D poses compared to the existing methods. As evidenced by these results, our method demonstrates superiority and becomes a strong benchmark method in the egocentric stereo 3D pose estimation tasks. See our supplementary material and video for more results. Results on the Real-World Dataset. Table 3 shows quantitative results on UnrealEgo-RW. Again, our method outperforms the existing methods [1, 53] and Baseline across all metrics, e.g., by more than \\(10\\%\\) on MPJPE. See Fig. 4-(right) for qualitative results. The current state-of-the-art methods [1, 53] or Baseline show floating feet, inaccurate pelvis position, and penetration to the floor ground. However, our method is able to estimate accurate 3D poses. See Fig. 5-(below part) for a MPJPE curve and visual outputs on an example motion of UnrealEgo-RW. The curve indicates that our method constantly shows lower 3D errors than the comparison methods. All of the results indicate the effectiveness of our proposed framework compared to the existing methods. We also visualize 2D heatmaps, 3D-to-2D pose reprojection, and 3D pose prediction from our method in Fig. 6. Even when the joint locations of the lower body are estimated closely in the 2D heatmaps, our approach predicts accurate lower body poses. These results suggest that the proposed method with our portable device can open up the possibility of many future applications, including animating virtual humans (Fig. 1-(g)). For the virtual human animation, we applied inverse kinematics with estimated 3D joint locations and ground-truth camera poses to drive the" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "773" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.08, + 0.089, + 0.192, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.159, + 0.178, + 0.23, + 0.19 + ], + "angle": 0, + "content": "Stereo inputs" + }, + { + "type": "image", + "bbox": [ + 0.196, + 0.089, + 0.308, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.313, + 0.089, + 0.425, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.429, + 0.089, + 0.543, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.354, + 0.178, + 0.504, + 0.19 + ], + "angle": 0, + "content": "2D joint heatmap estimation" + }, + { + "type": "image", + "bbox": [ + 0.546, + 0.089, + 0.658, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.59, + 0.178, + 0.735, + 0.19 + ], + "angle": 0, + "content": "3D-to-2D pose reprojection" + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.089, + 0.775, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.781, + 0.089, + 0.892, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.783, + 0.178, + 0.885, + 0.19 + ], + "angle": 0, + "content": "3D pose estimation" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.195, + 0.893, + 0.224 + ], + "angle": 0, + "content": "Figure 6. Visualization of outputs from our model on UnrealEgo-RW. 3D-to-2D pose reprojection is visualized in the same colors as in Fig. 1-(e). 3D pose estimation and ground truth are displayed in red and green, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.243, + 0.338, + 0.258 + ], + "angle": 0, + "content": "character in a world coordinate system." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.26, + 0.468, + 0.351 + ], + "angle": 0, + "content": "Ablation Study. In Table 4, we first ablate (a) the CNN-based 3D module (Baseline) with depth data concatenated to the heatmap inputs. However, naively adding this extra scene information to this 3D module does not help probably because the CNN layers can be affected by invalid depth values even with the depth region masks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.353, + 0.468, + 0.61 + ], + "angle": 0, + "content": "Next, we test our transformer-based 3D module (b) without query augmentation (c) without depth data. They perform worse than our full framework. We also ablate our method (d) without the padding mask. The result indicates that adding depth padding masks helps because the padding mask can filter out the invalid values in depth maps in the attention module. These results validate that our video-based 3D scene reconstruction module and video-dependent query augmentation policy boost 3D joint localization accuracy. Next, we ablate our model (e) with 3D pose supervision of the latest frame only. Note that this ablation uses the same sets of input data and joint queries as the original model, i.e., \\( T = 5 \\). This model estimates less accurate poses due to the loss of supervision from past 3D poses. We also test (f) a single set of joint queries, i.e., \\( q^1 \\), instead of \\( T \\) sets to predict the latest 3D pose. Similar to (e), this model cannot benefit from the supervision of past 3D poses." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.612, + 0.468, + 0.703 + ], + "angle": 0, + "content": "We further investigate the effect of the scene information. Table 5 shows the MPJPE per body part and Mean Penetration Error (MPE) [34, 35] between feet and floor ground. The results reveal that depth features with the padding masks reduce the errors in the lower body while maintaining the performance in the upper body." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.704, + 0.468, + 0.901 + ], + "angle": 0, + "content": "In Table 6, we ablate the effect of the sequence length of input frames for our method. It is worth noting that our model with \\( T = 1 \\) yields better results than the best existing method [1] and Baseline that utilizes temporal information (see Table 3). Since our model uses the same 2D module as Akada et al. [1] and Baseline, the difference comes only from the 3D module. This suggests that their autoencoder-based 3D modules with the heatmap reconstruction component are, very likely, not the most suitable solution for estimating 3D poses from 2D joint heatmaps, highlighting the potential of our transformer-based framework. The result also indicates that although the longer sequence can bring performance improvement to some extent, the se" + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.242, + 0.89, + 0.355 + ], + "angle": 0, + "content": "
MethodInitial training dataMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]99.0972.4779.8243.55
Akada et al. [1]UnrealEgo [1]94.8769.7982.7846.80
Baseline83.8964.3086.2051.63
Ours75.3457.2989.4355.77
Zhao et al. [53]97.8669.9281.5346.32
Akada et al. [1]UnrealEgo292.4867.1584.2548.04
Baseline82.1661.6087.0752.72
Ours72.8956.1990.2957.19
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.359, + 0.892, + 0.387 + ], + "angle": 0, + "content": "Table 7. Fine-tuning results of device-relative 3D pose estimation on UnrealEgo-RW with mm-scale." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.408, + 0.892, + 0.619 + ], + "angle": 0, + "content": "quence lengths of five and seven show comparable results. Synthetic Data for Pre-training. No existing works explored the efficacy of synthetic data for pre-training in egocentric 3D pose estimation. Thus, we further conduct experiments with models pre-trained on the synthetic datasets and fine-tuned on the real-world data. Tables 3 and 7 show that all methods benefit from the training with the large-scale synthetic data even with the differences in the synthetic and real-world setups, e.g., fisheye distortion and syn-to-real domain gaps. Note that the gain of our method from UnrealEgo to UnrealEgo2 is significant, i.e., \\(3.3\\%\\) on MPJPE (75.34mm to 72.89mm). This suggests that it is helpful to develop not only new models but also large-scale synthetic datasets even with different distortion and domain gaps." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.634, + 0.619, + 0.649 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.84 + ], + "angle": 0, + "content": "In this paper, we proposed a new transformer-based framework that significantly boosts the accuracy of egocentric stereo 3D human pose estimation. The proposed framework leverages the scene information and temporal context of egocentric stereo video inputs via our video-based 3D scene reconstruction module and video-based joint query augmentation policy. Our extensive experiments on the new synthetic and real-world datasets with challenging human motions validate the effectiveness of our approach compared to the existing methods. We hope that our proposed benchmark datasets and trained models will foster the further development of methods for egocentric 3D vision." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Acknowledgment. The work was supported by the ERC Consolidator Grant 4DReply (770784) and the Nakajima Foundation. We thank Silicon Studio Corp. for providing the fisheye plug-in for Unreal Engine." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.945, + 0.511, + 0.956 + ], + "angle": 0, + "content": "774" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Hiroyasu Akada, Jian Wang, Soshi Shimada, Masaki Takahashi, Christian Theobalt, and Vladislav Golyanik. Unrealego: A new dataset for robust egocentric 3d human motion capture. In European Conference on Computer Vision (ECCV), 2022. 1, 2, 3, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.185, + 0.472, + 0.24 + ], + "angle": 0, + "content": "[2] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision (ECCV), 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.242, + 0.471, + 0.338 + ], + "angle": 0, + "content": "[3] Young-Woon Cha, True Price, Zhen Wei, Xinran Lu, Nicholas Rewkowski, Rohan Chabra, Zihe Qin, Hyounghun Kim, Zhaoqi Su, Yebin Liu, Adrian Ilie, Andrei State, Zhenlin Xu, Jan-Michael Frahm, and Henry Fuchs. Towards fully mobile 3d face, body, and environment capture using only head-worn cameras. IEEE Transactions on Visualization and Computer Graphics, 24(11):2993-3004, 2018. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.339, + 0.47, + 0.381 + ], + "angle": 0, + "content": "[4] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei. ImageNet: A Large-Scale Hierarchical Image Database. In Computer Vision and Pattern Recognition (CVPR), 2009. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.382, + 0.469, + 0.436 + ], + "angle": 0, + "content": "[5] Moritz Einfalt, Katja Ludwig, and Rainer Lienhart. Uplift and upsample: Efficient 3d human pose estimation with up-lifting transformers. In Winter Conference on Applications of Computer Vision (WACV), 2023. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.437, + 0.469, + 0.478 + ], + "angle": 0, + "content": "[6] FUJINON FE185C057HA-1 fisheye lens, 2023. https:// www.fujifilm.com/de/de/business/opticaldevices/mvlems/fe185.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.48, + 0.469, + 0.548 + ], + "angle": 0, + "content": "[7] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Computer Vision and Pattern Recognition (CVPR), 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.549, + 0.469, + 0.591 + ], + "angle": 0, + "content": "[8] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Computer Vision and Pattern Recognition (CVPR), 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.592, + 0.469, + 0.633 + ], + "angle": 0, + "content": "[9] Yihui He, Rui Yan, Katerina Fragkiadaki, and Shoou-I Yu. Epipolar transformers. In Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.634, + 0.469, + 0.661 + ], + "angle": 0, + "content": "[10] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.662, + 0.469, + 0.703 + ], + "angle": 0, + "content": "[11] Hao Jiang and Vamsi Krishna Ithapu. Egocentric pose estimation from human vision span. In International Conference on Computer Vision (ICCV), 2021. 1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.704, + 0.469, + 0.746 + ], + "angle": 0, + "content": "[12] Taeho Kang, Kyungjin Lee, Jinrui Zhang, and Youngki Lee. Ego3dpose: Capturing 3d cues from binocular egocentric views. In SIGGRAPH Asia Conference, 2023. 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.747, + 0.469, + 0.773 + ], + "angle": 0, + "content": "[13] David G. Kendall. A Survey of the Statistical Theory of Shape. Statistical Science, 4(2):87-99, 1989. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.469, + 0.83 + ], + "angle": 0, + "content": "[14] Rawal Khirodkar, Aayush Bansal, Lingni Ma, Richard Newcombe, Minh Vo, and Kris Kitani. Ego-humans: An egocentric 3d multi-human benchmark. In International Conference on Computer Vision (ICCV), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.831, + 0.469, + 0.872 + ], + "angle": 0, + "content": "[15] Diederik Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations (ICLR), 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[16] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer White-" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.133 + ], + "angle": 0, + "content": "head, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything. arXiv:2304.02643, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.892, + 0.177 + ], + "angle": 0, + "content": "[17] Jiaman Li, Karen Liu, and Jiajun Wu. Ego-body pose estimation via ego-head pose estimation. In Computer Vision and Pattern Recognition (CVPR), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.233 + ], + "angle": 0, + "content": "[18] Wenhao Li, Hong Liu, Runwei Ding, Mengyuan Liu, Pichao Wang, and Wenming Yang. Exploiting temporal contexts with strided transformer for 3d human pose estimation. IEEE Transactions on Multimedia (TMM), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.235, + 0.892, + 0.29 + ], + "angle": 0, + "content": "[19] Wenhao Li, Hong Liu, Hao Tang, Pichao Wang, and Luc Van Gool. Mhformer: Multi-hypothesis transformer for 3d human pose estimation. In Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.291, + 0.892, + 0.333 + ], + "angle": 0, + "content": "[20] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.334, + 0.892, + 0.402 + ], + "angle": 0, + "content": "[21] Yuxuan Liu, Jianxin Yang, Xiao Gu, Yijun Chen, Yao Guo, and Guang-Zhong Yang. Egofish3d: Egocentric 3d pose estimation from a fisheye camera via self-supervised learning. IEEE Transactions on Multimedia (TMM), pages 1-12, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.404, + 0.892, + 0.445 + ], + "angle": 0, + "content": "[22] Zhengyi Luo, Ryo Hachiuma, Ye Yuan, and Kris Kitani. Dynamics-regulated kinematic policy for egocentric pose estimation. 2021. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.448, + 0.892, + 0.517 + ], + "angle": 0, + "content": "[23] Haoyu Ma, Liangjian Chen, Deying Kong, Zhe Wang, Xingwei Liu, Hao Tang, Xiangyi Yan, Yusheng Xie, Shih-Yao Lin, and Xiaohui Xie. Transfusion: Cross-view fusion with transformer for 3d human pose estimation. In British Machine Vision Conference (BMVC), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.518, + 0.874, + 0.533 + ], + "angle": 0, + "content": "[24] Metashape, 2023. https://www.agisoft.com/.4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.534, + 0.842, + 0.547 + ], + "angle": 0, + "content": "[25] Mixamo, 2022. https://www MIXamo.com.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.548, + 0.892, + 0.631 + ], + "angle": 0, + "content": "[26] Xiaqing Pan, Nicholas Charron, Yongqian Yang, Scott Peters, Thomas Whelan, Chen Kong, Omkar Parkhi, Richard Newcombe, and Yuheng (Carl) Ren. Aria digital twin: A new benchmark dataset for egocentric 3d machine perception. In International Conference on Computer Vision (ICCV), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.633, + 0.892, + 0.701 + ], + "angle": 0, + "content": "[27] Jinman Park, Kimathi Kaai, Saad Hossain, Norikatsu Sumi, Sirisha Rambhatla, and Paul Fieguth. Domain-guided spatiotemporal self-attention for egocentric 3d pose estimation. In Conference on Knowledge Discovery and Data Mining (KDD), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.703, + 0.892, + 0.758 + ], + "angle": 0, + "content": "[28] Sungchan Park, Eunyi You, Inhoe Lee, and Joonseok Lee. Towards robust and smooth 3d multi-person pose estimation from monocular videos in the wild. In International Conference on Computer Vision (ICCV), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.759, + 0.892, + 0.815 + ], + "angle": 0, + "content": "[29] Dario Pavllo, Christoph Feichtenhofer, David Grangier, and Michael Auli. 3d human pose estimation in video with temporal convolutions and semi-supervised training. In Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.817, + 0.892, + 0.831 + ], + "angle": 0, + "content": "[30] RenderPeople, 2022. https://renderpeople.com.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[31] Helge Rhodin, Christian Richardt, Dan Casas, Eldar Insafutdinov, Mohammad Shafiei, Hans-Peter Seidel, Bernt Schiele, and Christian Theobalt. Egocap: egocentric marker-less motion capture with two fisheye cameras. ACM Transactions on Graphics (TOG), 35(6):1-11, 2016. 1, 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "775" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.093, + 0.468, + 0.12 + ], + "angle": 0, + "content": "[32] RIBCAGE RX0 II camera, 2023. https://www.backbone.ca/product/ribcage-rx0-2/.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.121, + 0.468, + 0.162 + ], + "angle": 0, + "content": "[33] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Computer Vision and Pattern Recognition (CVPR), 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.468, + 0.218 + ], + "angle": 0, + "content": "[34] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, and Christian Theobalt. Physcap: Physically plausible monocular 3d motion capture in real time. ACM Transactions on Graphics (TOG), 39(6), 2020. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.219, + 0.468, + 0.273 + ], + "angle": 0, + "content": "[35] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, Patrick Pérez, and Christian Theobalt. Neural monocular 3d human motion capture with physical awareness. ACM Transactions on Graphics (TOG), 40(4), 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.274, + 0.468, + 0.328 + ], + "angle": 0, + "content": "[36] Zhenhua Tang, Zhaofan Qiu, Yanbin Hao, Richang Hong, and Ting Yao. 3d human pose estimation with spatiotemporal criss-cross attention. In Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.33, + 0.468, + 0.385 + ], + "angle": 0, + "content": "[37] Denis Tome, Patrick Peluse, Lourdes Agapito, and Hernan Badino. xr-egopose: Egocentric 3d human pose from an hmd camera. In International Conference on Computer Vision (ICCV), 2019. 1, 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.386, + 0.468, + 0.468 + ], + "angle": 0, + "content": "[38] Denis Tome, Thiemo Alldieck, Patrick Peluse, Gerard Pons-Moll, Lourdes Agapito, Hernan Badino, and Fernando de la Torre. Selfpose: 3d egocentric pose estimation from a headset mounted camera. IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), 45(6):6794-6806, 2023. 1, 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.469, + 0.468, + 0.524 + ], + "angle": 0, + "content": "[39] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems (NeurIPS), 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.525, + 0.468, + 0.58 + ], + "angle": 0, + "content": "[40] Jian Wang, Lingjie Liu, Weipeng Xu, Kripasindhu Sarkar, and Christian Theobalt. Estimating egocentric 3d human pose in global space. In International Conference on Computer Vision (ICCV), 2021. 1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.581, + 0.468, + 0.649 + ], + "angle": 0, + "content": "[41] Jian Wang, Lingjie Liu, Weipeng Xu, Kripasindhu Sarkar, Diogo Luvizon, and Christian Theobalt. Estimating egocentric 3d human pose in the wild with external weak supervision. In Computer Vision and Pattern Recognition (CVPR), 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.65, + 0.468, + 0.705 + ], + "angle": 0, + "content": "[42] Jian Wang, Diogo Luvizon, Weipeng Xu, Lingjie Liu, Kripasindhu Sarkar, and Christian Theobalt. Scene-aware egocentric 3d human pose estimation. In Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.706, + 0.468, + 0.775 + ], + "angle": 0, + "content": "[43] Jian Wang, Zhe Cao, Diogo Luvizon, Lingjie Liu, Kripasindhu Sarkar, Danhang Tang, Thabo Beeler, and Christian Theobalt. Egocentric whole-body motion capture with fisheyevit and diffusion-based motion refinement. In Computer Vision and Pattern Recognition (CVPR), 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.468, + 0.831 + ], + "angle": 0, + "content": "[44] Tao Wang, Jianfeng Zhang, Yujun Cai, Shuicheng Yan, and Jiashi Feng. Direct multi-view multi-person 3d human pose estimation. Advances in Neural Information Processing Systems (NeurIPS), 2021. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.468, + 0.901 + ], + "angle": 0, + "content": "[45] Weipeng Xu, Avishek Chatterjee, Michael Zollhoefer, Helge Rhodin, Pascal Fua, Hans-Peter Seidel, and Christian Theobalt. \\(\\mathrm{Mo}^2\\mathrm{Cap}^2\\) : Real-time mobile 3d motion capture with a cap-mounted fisheye camera. IEEE Transactions on Visualization and Computer Graphics, 2019. 1, 2, 3, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.468, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[46] Honghong Yang, Longfei Guo, Yumei Zhang, and Xiaojun Wu. U-shaped spatial-temporal transformer network for 3d human pose estimation. Machine Vision and Applications, 33(6):82, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[47] Yingxuan You, Hong Liu, Ti Wang, Wenhao Li, Runwei Ding, and Xia Li. Co-evolution of pose and mesh for 3d human body estimation from video. In International Conference on Computer Vision (ICCV), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.206, + 0.892, + 0.248 + ], + "angle": 0, + "content": "[48] Ye Yuan and Kris Kitani. Ego-pose estimation and forecasting as real-time pd control. In International Conference on Computer Vision (ICCV), 2019. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.892, + 0.304 + ], + "angle": 0, + "content": "[49] Jinlu Zhang, Zhigang Tu, Jianyu Yang, Yujin Chen, and Jun-song Yuan. Mixste: Seq2seq mixed spatio-temporal encoder for 3d human pose estimation in video. In Computer Vision and Pattern Recognition (CVPR), 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.305, + 0.892, + 0.374 + ], + "angle": 0, + "content": "[50] Siwei Zhang, Qianli Ma, Yan Zhang, Zhiyin Qian, Taein Kwon, Marc Pollefeys, Federica Bogo, and Siyu Tang. Ego-body: Human body shape and motion of interacting people from head-mounted devices. In European conference on computer vision (ECCV), 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.375, + 0.892, + 0.432 + ], + "angle": 0, + "content": "[51] Siwei Zhang, Qianli Ma, Yan Zhang, Sadegh Aliakbarian, Darren Cosker, and Siyu Tang. Probabilistic human mesh recovery in 3d scenes from egocentric views. In International Conference on Computer Vision (ICCV), 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.433, + 0.892, + 0.489 + ], + "angle": 0, + "content": "[52] Yahui Zhang, Shaodi You, and Theo Gevers. Automatic calibration of the fisheye camera for egocentric 3d human pose estimation from a single image. In Winter Conference on Applications of Computer Vision (WACV), 2021. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.489, + 0.892, + 0.544 + ], + "angle": 0, + "content": "[53] Dongxu Zhao, Zhen Wei, Jisan Mahmud, and Jan-Michael Frahm. Egoglass: Egocentric-view human pose estimation from an eyeglass frame. In International Conference on 3D Vision (3DV), 2021. 1, 2, 3, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.545, + 0.892, + 0.613 + ], + "angle": 0, + "content": "[54] Qitao Zhao, Ce Zheng, Mengyuan Liu, Pichao Wang, and Chen Chen. Poseformerv2: Exploring frequency domain for efficient and robust 3d human pose estimation. In Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.615, + 0.892, + 0.671 + ], + "angle": 0, + "content": "[55] Ce Zheng, Sijie Zhu, Matias Mendieta, Taojiannan Yang, Chen Chen, and Zhengming Ding. 3d human pose estimation with spatial and temporal transformers. In International Conference on Computer Vision (ICCV), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.672, + 0.892, + 0.728 + ], + "angle": 0, + "content": "[56] Jieming Zhou, Tong Zhang, Zeeshan Hayden, Lars Petersson, and Mehrtash Harandi. Diff3dhpe: A diffusion model for 3d human pose estimation. In International Conference on Computer Vision (ICCV) Workshops, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.729, + 0.892, + 0.784 + ], + "angle": 0, + "content": "[57] Wentao Zhu, Xiaoxuan Ma, Zhaoyang Liu, Libin Liu, Wayne Wu, and Yizhou Wang. Motionbert: A unified perspective on learning human motion representations. In International Conference on Computer Vision (ICCV), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.786, + 0.892, + 0.855 + ], + "angle": 0, + "content": "[58] Yiran Zhu, Xing Xu, Fumin Shen, Yanli Ji, Lianli Gao, and Heng Tao Shen. PosegTac: Graph transformer encoder-decoder with atrous convolution for 3d human pose estimation. In International Joint Conference on Artificial Intelligence (IJCAI), 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.855 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "776" + } + ] +] \ No newline at end of file diff --git a/2024/3D Human Pose Perception from Egocentric Stereo Videos/95770e99-65fc-4fd7-9de3-96977a97b4b8_origin.pdf b/2024/3D Human Pose Perception from Egocentric Stereo Videos/95770e99-65fc-4fd7-9de3-96977a97b4b8_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5c5bbee0c1258d29a468001faf9479d79ace2599 --- /dev/null +++ b/2024/3D Human Pose Perception from Egocentric Stereo Videos/95770e99-65fc-4fd7-9de3-96977a97b4b8_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22bed19c71d7d28426beeaadde62ad1faa1fbf6d7cedce01b406826ee072fc39 +size 5927770 diff --git a/2024/3D Human Pose Perception from Egocentric Stereo Videos/full.md b/2024/3D Human Pose Perception from Egocentric Stereo Videos/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7e6a24bf2fc749822d75b5ed03eba07eb4ef02f6 --- /dev/null +++ b/2024/3D Human Pose Perception from Egocentric Stereo Videos/full.md @@ -0,0 +1,423 @@ +# 3D Human Pose Perception from Egocentric Stereo Videos + +Hiroyasu Akada + +Jian Wang Vladislav Golyanik Max Planck Institute for Informatics, SIC + +Christian Theobalt + +![](images/32798e62043c3ac39b403815deb7be4d445d827994a554ed1cb13ca22411de73.jpg) +(a) + +![](images/78bc11864321b84e7ecfc867bc9854e84042332aaa51fd250b6f5a43d5ea1a6d.jpg) + +![](images/34b3570884f1e618c6019c77328ba4cec270911dade00e158155f0f4ced7d98b.jpg) +(b) + +![](images/70cc8313ce36826cf52ae5a7de27ef4b51d1fc310842a0f92a56c4c10bbd7489.jpg) +(c) +Figure 1. 3D human pose estimation results of our proposed method from egocentric stereo fisheye videos. Left: results on synthetic images; (a) reference RGB view of the scene; (b) 3D-to-2D pose re-projections, and (c) a 3D pose in a scene mesh reconstructed by our framework. Right: results on real-world images; (d) reference view; (e) 3D-to-2D pose re-projections; (f) a 3D pose in the reconstructed scene, and (g) 3D virtual character animation (possible future application of our method). + +![](images/bf98069469d4197ec25f8ec9086a0483a755b084a34cfd118da947e8f97282a4.jpg) +(d) + +![](images/f8aa611c225244d33dd9f3267562aee4d13f8a2a27de2f7a2cdc49e77c6a65e7.jpg) + +![](images/3adfca99ca8364a67f1d54e3950637089d9f3de1f073bd8e1ef40654fb260744.jpg) +(e) + +![](images/457e76fe84cd7c48b924fdabe7a6ac6b25f715964f12a39f10f3dfda678f6b09.jpg) +(f) + +![](images/a3f374da09d58bd9853fde02a00bc166f7cb39516b785a35e949be4f26668c23.jpg) +(g) + +# Abstract + +While head-mounted devices are becoming more compact, they provide egocentric views with significant self-occlusions of the device user. Hence, existing methods often fail to accurately estimate complex 3D poses from egocentric views. In this work, we propose a new transformer-based framework to improve egocentric stereo 3D human pose estimation, which leverages the scene information and temporal context of egocentric stereo videos. Specifically, we utilize 1) depth features from our 3D scene reconstruction module with uniformly sampled windows of egocentric stereo frames, and 2) human joint queries enhanced by temporal features of the video inputs. Our method is able to accurately estimate human poses even in challenging scenarios, such as crouching and sitting. Furthermore, we introduce two new benchmark datasets, i.e., UnrealEgo2 and UnrealEgo-RW (RealWorld). The proposed datasets offer a much larger number of egocentric stereo views with a wider variety of human motions than the existing datasets, allowing comprehensive evaluation of existing and upcoming methods. Our extensive experiments show that the proposed approach significantly outperforms previous methods. UnrealEgo2, UnrealEgo-RW, and trained models are available on our project page1 and Benchmark Challenge2. + +# 1. Introduction + +Egocentric 3D human motion capture using wearable devices has received increased attention recently [1, 11, 22, 31, 37, 38, 40-42, 45, 48, 52, 53]. Different from traditional vision-based motion capture setups that require a fixed recording space, egocentric systems allow flexible motion capture in less constrained situations. Therefore, the egocentric setups offer various applications, such as motion analysis and XR technologies (Fig. 1-(g)). + +Previous works proposed various egocentric methods to capture device users. On the one hand, the vast majority of existing methods—which use a monocular camera—would fail for complex human poses due to depth ambiguity and self-occlusion. On the other hand, the methods designed for stereo devices do not yet realize the full potential of their stereo settings, especially with the most recent compact eyeglasses-based setups [1, 53]. Specifically, they do not deliver high 3D reconstruction accuracy across different scenarios. Moreover, these approaches do not consider scene information, which further limits their accuracy. + +To address the challenges outlined above, we propose a new transformer-based framework for egocentric 3D human motion capture from compact eyeglasses-based devices; see Fig. 1. The first step of our framework is to estimate 2D joint heatmaps from egocentric stereo fisheye RGB videos (Sec. 4.1). These 2D joint heatmaps are then processed with human joint queries in our transformer-based 3D mod + +ule to estimate 3D poses. Here, we leverage the scene information and temporal context of the input videos in the 3D module to improve estimation accuracy. Firstly, we use uniformly sampled windows of egocentric stereo frames to reconstruct a 3D background scene using Structure from Motion (SfM) [33], obtaining scene depth as additional information for the 3D module (Sec. 4.2 and 4.3). In our challenging eyeglasses-based setup, however, the 3D scene and camera poses can not always be estimated due to severe self-occlusion in the egocentric images. This results in depth maps with zero (invalid) values and undesired computation of network gradients during training. To mitigate this issue, we propose to use depth padding masks that prevent processing such invalid depth values in the 3D module. Additionally, we propose video-dependent query augmentation that enhances the joint queries with the temporal context of stereo video inputs to effectively capture the temporal relation of human motions at a joint level (Sec. 4.4). + +We also introduce two new benchmark datasets: UnrealEgo2 and UnrealEgo-RW. UnrealEgo2 is an extended version of UnrealEgo [1] and the largest eyeglasses-based synthetic data with various new motions, offering $2.8 \times$ larger data (2.5M images) than the existing dataset [1]. UnrealEgo-RW is a real-world dataset recorded with our newly developed device that resembles the virtual eyeglasses-based setup [1], offering 260k images with various motions and 3D poses. The proposed datasets make it possible to evaluate existing and upcoming methods on a variety of motions, not only in synthetic scenes but also in real-world cases. + +In short, the contributions of this paper are as follows: + +- The transformer-based framework for egocentric stereo 3D human pose estimation that accounts for temporal context in egocentric stereo views. +- 3D pose estimation is enhanced via the utilization of scene information from our video-based 3D scene reconstruction module as well as joint queries obtained from our video-dependent query augmentation policy. +- A new portable device for egocentric stereo view capture with its specification and two new benchmark datasets: UnrealEgo2 and UnrealEgo-RW recorded with our device. The proposed datasets allow for a comprehensive evaluation of methods for egocentric 3D human pose estimation from stereo views. + +Our experiments demonstrate that the proposed method outperforms the previous state-of-the-art approaches by a substantial margin, i.e., $>15\%$ on UnrealEgo [1], $\geq 40\%$ on UnrealEgo2, and $\geq 10\%$ on UnrealEgo-RW (on MPJPE). We release UnrealEgo2, UnrealEgo-RW, and our trained models on our project page3 and Benchmark Challenge4 to foster the area of egocentric 3D vision. + +# 2. Related Work + +Egocentric 3D Human Motion Capture. Recent years witnessed significant innovations in egocentric 3D human pose estimation. To capture device users, many existing works use downward-facing cameras and the existing methods can be categorized into two groups. The first group are monocular approaches [11, 21, 22, 27, 37, 38, 40, 41, 43, 45, 48, 52]. For example, Wang et al. [43] uses a diffusion-based [10] motion prior to tackle self-conclusions. Due to the depth ambiguity, monocular methods often fail to estimate accurate 3D poses. Wang et al. [42] tackled this issue by projecting depth and 2D pose features into a pre-defined voxel space. This method requires additional training with ground-truth depths and human body segmentation; it cannot easily be extended for multi-view or temporal inputs. Zhang et al. [51] utilized a diffusion model [10] conditioned on a 3D scene to generate poses. They require pre-scanned scene mesh as an input and cannot capture a device user. + +The second group, including our work, focuses on the multi-view (often stereo) setting. Rhodin et al. [31] proposed an optimization approach whereas Cha et al. [3] used eight cameras to estimate a 3D body and reconstruct a 3D scene separately. Other works [1, 53] used the multi-branch autoencoder [37] to the stereo setup. Kang et al. [12] (arXiv pre-print at the time of submission) leveraged a stereomatching mechanism and perspective embedding heatmaps. In contrast to the existing methods, we propose a new transformer-based method that effectively utilizes egocentric stereo videos via our video-based 3D scene reconstruction module and video-dependent query augmentation policy. Our method considers the scene information without the supervision of the scene data. + +Transformers in 3D Human Pose Estimation from External Cameras. 3D pose estimation from external cameras has shown significant progress due to the advances in transformer architectures [39]. Some works [20, 47] predict 3D human pose and mesh from monocular views. Other works [5, 18, 19, 28, 29, 36, 46, 49, 54-58] present a 2D-to-3D lifting module that estimates 3D poses from monocular 2D joints obtained with off-the-shelf 2D joint detectors. Although their lifting modules show impressive results, those monocular methods cannot be easily applied to our stereo setting. On the other hand, some works utilize transformers in multi-view settings. He et al. [9] and Ma et al. [23] aggregate stereo information on epipolar lines of stereo images, which are difficult to obtain from fisheye images. Recent work [44] regresses multi-person 3D poses from multi-view inputs, powered by projective attention and query adaptation. However, no existing works explored the potential of transformers along with 2D joint heatmaps or explicit scene information in stereo 3D pose estimation. In this paper, we propose a transformer-based framework that accounts for the temporal relation of human motion at a joint level via + +intermediate 2D joint heatmap and depth maps even with inaccurate depth values mixed in the framework. + +Datasets for Egocentric 3D Human Pose Estimation. Several works proposed unique setups to create datasets, using a monocular camera [11, 17, 22, 37, 40, 41, 45, 48] and forward-facing cameras [11, 14, 17, 22, 26, 48, 50, 51]. There also exist datasets captured with stereo devices [3, 7, 14, 26, 31, 53]. However, they are small [31] with limited motion types [31, 53], not publicly available [3, 53], or do not provide ground truth 3D poses of device users [7, 14, 26]. Recently, Akada et al. [1] introduced UnrealEgo, a synthetic dataset based on virtual eyeglasses with two fisheye cameras. However, they provide only synthetic images. Meanwhile, more glasses-based stereo datasets that offer a wider variety of motions or real-world footage are required nowadays for an extensive evaluation of existing and upcoming methods. Hence, we introduce two new benchmark datasets that in their characteristics go beyond the existing data: UnrealEgo2 and UnrealEgo-RW. We describe the proposed datasets in the following section. + +# 3. Mobile Device and Datasets + +We present two new datasets for egocentric stereo 3D motion capture: UnrealEgo2 and UnrealEgo-RW; see Fig. 1. Please watch our supplementary video for visualizations. UnrealEgo2 Dataset. To create UnrealEgo2 (an extension of UnrealEgo [1]), we adapt the publicly available setup with a virtual eyeglasses device [1]. This setup comes with two downward-facing fisheye cameras attached $12\mathrm{cm}$ apart from each other on the glasses frames. The camera's field of view is $170^{\circ}$ . With this device, we capture 17 realistic 3D human models [30] animated by the Mixamo [25] dataset in various 3D environments. We record simple to highly complex motions such as crouching and crawling, for 14 hours. + +Overall, UnrealEgo2 offers 15,207 motions and $>1.25\mathrm{M}$ stereo views (2.5M images) as well as depth maps with a resolution $1024\times 1024$ pixel rendered at 25 frames per second. Each frame is annotated with 32 body and 40 hand joints. Note that UnrealEgo2 is the largest glasses-based dataset and $2.8\times$ larger than UnrealEgo. Also, it does not share the same motions with UnrealEgo, providing a larger motion variety for a comprehensive evaluation. + +Design of Our Mobile Device. Evaluation with real-world datasets plays a pivotal role in computer vision research. Therefore, we build a new portable device; see Fig. 2. Our device is based on a helmet with two RIBCAGE RX0 II cameras [32] and two FUJINON FE185C057HA-1 fisheye lenses [6]. We placed the cameras $12\mathrm{cm}$ away from each other and $2\mathrm{cm}$ away from user's face. We cropped the margins of the egocentric images to resemble the field of view of $170^{\circ}$ of the UnrealEgo and UnrealEgo2 setups. Note that our setup is more compact than EgoCap [31] that placed cameras $25\mathrm{cm}$ away from user's face. + +![](images/81ffc82403fe2599dd97479c7fa1a3a4102d1fdd1f5fc02aa55f787c93d1f63f.jpg) +Figure 2. Our portable setup to acquire UnrealEgo-RW. + +![](images/7bbcdf0ba48c212d7974b9b7c8cf872e21909a2a3cf5e92c7192753b13909bc9.jpg) + +UnrealEgo-RW (Real-World) Dataset. With our device, we record various motions of 16 identities in a multi-view motion capture studio (Fig. 1-(d)). We capture simple and challenging activities, e.g., crawling and dancing, for 1.5 hours. This is in strong contrast to the existing real-world stereo dataset [53] (not publicly available) that records only three simple actions, i.e., sitting, standing, and walking. + +In total, we obtained 591 motion segments from 16 identities with various textured clothing. This results in more than $130\mathrm{k}$ stereo views (260k images) of a resolution $872\times 872$ pixel rendered at 25 frames per second with ground-truth 3D poses of 16 joints. Note that UnrealEgoRW offers $4.3\times$ larger data with a wider variety of motions than the publicly available real-world stereo data [31]. + +# 4. Method + +We propose a new framework for egocentric stereo 3D human pose estimation as shown in Fig. 3. Our framework first estimates the 2D joint heatmaps from egocentric stereo fisheye videos in our 2D module (Sec. 4.1). The heatmaps and input videos are then processed in our segmentation module to obtain 2D human body masks (Sec. 4.2). Next, we use uniformly sampled windows of input frames and human body masks to reconstruct 3D scenes (Sec. 4.3). Here, we render depth maps and depth region masks from the reconstructed mesh. Finally, our transformer-based 3D module processes the joint heatmaps, depth information, and joint queries to estimate 3D poses (Sec. 4.4). Here, the 3D module leverages depth padding masks based on the availability of the depth maps as well as joint queries enhanced by the stereo video features from the 2D module. + +# 4.1.2D Pose Estimation + +Given egocentric stereo videos with $T$ frames $\{\mathbf{I}_{\mathrm{Left}}^t,\mathbf{I}_{\mathrm{Right}}^t\in$ $\mathbb{R}^{H\times W\times 3}|t = 1,2,\dots,T\}$ , we use the existing stereo 2D joint heatmap estimator [1] to obtain a sequence of corresponding 2D heatmaps of 15 joints $\{\mathbf{H}_{\mathrm{Left}}^t,\mathbf{H}_{\mathrm{Right}}^t\in$ $\mathbb{R}^{\frac{H}{4}\times \frac{W}{4}\times 15}\}$ , including the neck, upper arms, lower arms, hands, thighs, calves, feet, and balls of the feet. We also extract intermediate feature maps $\{\mathbf{F}_{\mathrm{Left}}^t,\mathbf{F}_{\mathrm{Right}}^t\in$ $\mathbb{R}^{\frac{H}{32}\times \frac{W}{32}\times C}\}$ where $C = 512$ , which are used later in the 3D module. + +![](images/d876a4516d86f54cf6a9f65342065f9bd9e19401042946519eae08d09e623b82.jpg) +Figure 3. Overview of our framework. Our method takes egocentric stereo videos $\{\mathbf{I}_{\mathrm{Left}}^t,\mathbf{I}_{\mathrm{Right}}^t\}$ as inputs. We first apply the 2D module to obtain 2D joint heatmaps $\{\mathbf{H}_{\mathrm{Left}}^t,\mathbf{H}_{\mathrm{Right}}^t\}$ and video features $\{\mathbf{F}_{\mathrm{Left}}^t,\mathbf{F}_{\mathrm{Right}}^t\}$ (Sec. 4.1). The heatmaps are used with input videos to create human body masks $\{\mathbf{M}_{\mathrm{Left}}^t,\mathbf{M}_{\mathrm{Right}}^t\}$ (Sec. 4.2). Next, we use uniformly sampled windows of input frames and human body masks to reconstruct a 3D scene mesh (Sec. 4.3). From the mesh, we generate depth maps $\{\mathbf{D}_{\mathrm{Left}}^t,\mathbf{D}_{\mathrm{Right}}^t\}$ and depth region masks $\{\mathbf{R}_{\mathrm{Left}}^t,\mathbf{R}_{\mathrm{Right}}^t\}$ . Note that this diagram shows an example case of missing depth values for the second input frame. Lastly, the depth data, 2D joint heatmaps, video features, joint queries $q^{t}$ and the padding masks $V_{\mathrm{Depth}}^{t}$ are processed in the 3D module to estimate 3D poses $\mathbf{P}^t$ (Sec. 4.4). + +# 4.2. Human Body Segmentation + +To reconstruct 3D scenes from egocentric videos, it is necessary to identify the pixels corresponding to the background environment. Therefore, we integrate an existing segmentation method, i.e., ViT-H SAM model [16], as our segmentation network $\mathcal{F}_{\mathrm{SAM}}$ . In this module, we firstly obtain 2D joint locations from the 2D joint heatmap $\{\widehat{\mathbf{H}}_{\mathrm{Left}}^t,\widehat{\mathbf{H}}_{\mathrm{Right}}^t\}$ . Then, we use the input video frames $\{\mathbf{I}_{\mathrm{Left}}^t,\mathbf{I}_{\mathrm{Right}}^t\}$ and its corresponding 2D joints to extract a human body mask $\{\mathbf{M}_{\mathrm{Left}}^t,\mathbf{M}_{\mathrm{Right}}^t\in \mathbb{R}^{H\times W\times 1}\}$ : + +$$ +\mathbf {M} _ {\text {L e f t}} ^ {t} = \mathcal {F} _ {\mathrm {S A M}} \left(\mathbf {I} _ {\text {L e f t}} ^ {t}, \widehat {\mathbf {H}} _ {\text {L e f t}} ^ {t}\right). \tag {1} +$$ + +The same process can be applied to obtain $\mathbf{M}_{\mathrm{Right}}^t$ . Note that we use the SAM model without re-training on ground-truth human body masks. Instead, we guide the predictions of SAM using joint positions extracted from the 2D heatmaps. + +# 4.3. 3D Scene Reconstruction + +We aim to reconstruct 3D environments from uniformly sampled windows of input frames $\{\mathbf{I}_{\mathrm{Left}}^t,\mathbf{I}_{\mathrm{Right}}^t\}$ and human body masks $\{\mathbf{M}_{\mathrm{Left}}^t,\mathbf{M}_{\mathrm{Right}}^t\}$ with a fixed length. The length is set to 4 seconds (some motion data contains shorter sequences). Given these data, we use Metashape [24] to perform SfM to obtain camera poses and a 3D scene + +mesh. Here, as the baseline length between stereo cameras is known, i.e., $12\mathrm{cm}$ , we can obtain the mesh in the real-world scale. Next, we render down-sampled depth maps $\{\mathbf{D}_{\mathrm{Left}}^t,\mathbf{D}_{\mathrm{Right}}^t\in \mathbb{R}^{\frac{H}{4}\times \frac{W}{4}\times 1}\}$ and depth region masks $\{\mathbf{R}_{\mathrm{Left}}^t,\mathbf{R}_{\mathrm{Right}}^t\in \mathbb{R}^{\frac{H}{4}\times \frac{W}{4}\times 1}\}$ from the reconstructed 3D scene mesh. The depth region masks show the regions where the depth values are obtained from the 3D scene. This depth information will be used later in the 3D module as additional cues for pose estimation. However, there are some cases where the egocentric RGB videos are largely occupied by a human body. In such scenarios, the 3D scene can not be reconstructed or camera poses can not be estimated. This results in missing (invalid) depth values and undesired computation of network gradients during training. Therefore, we tackle this issue in our 3D module. + +# 4.4.3D Pose Estimation + +In the 3D module, we aim to estimate a sequence of 3D poses by considering scene information and the temporal context of the egocentric stereo videos. Specifically, given the 2D joint heatmaps, depth maps, depth region masks, and $T$ sets of joint queries $q^{t} \in \mathbb{R}^{16 \times \frac{C}{2}}$ , we use a transformer decoder to estimate a sequence of 3D poses $\{\mathbf{P}^t \in \mathbb{R}^{16 \times 3} | t = 1, 2, \dots, T\}$ . Our pose output is the 3D pose at the last time step $\mathbf{P}^T$ . We follow the existing + +
MethodTaskMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]86.4563.7185.9750.50
Akada et al. [1]78.9859.3088.8154.31
Kang et al. [12]Pelvis relative60.8248.47--
Baseline59.8549.1492.0763.88
Ours50.5540.5093.8370.61
Zhao et al. [53]88.1265.3685.1050.37
Akada et al. [1]Device relative84.5363.9287.0552.76
Baseline63.4450.9792.3064.54
Ours46.2040.1994.0273.53
+ +Table 1. Quantitative results on UnrealEgo [1] with mm-scale. + +
MethodMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]79.6458.2288.5053.82
Akada et al. [1]72.8052.8891.3255.81
Baseline52.2339.7895.7268.13
Ours30.5326.7297.2280.75
+ +works [1, 37, 38] to estimate 16 joints including the head. + +Depth and Heatmap Features. We use the sequence of the depth maps, depth region masks, and the 2D joint heatmaps as the memory of a cross-attention operation in the transformer decoder. For this purpose, we extract depth features $\{\mathbf{U}_{\mathrm{Left}}^t,\mathbf{U}_{\mathrm{Right}}^t\in \mathbb{R}^{\frac{H}{32}\times \frac{W}{32}\times \frac{C}{2}}\}$ from the depth data: + +$$ +\mathbf {U} _ {\text {L e f t}} ^ {t} = \mathcal {F} _ {\text {D e p t h}} \left(\mathbf {D} _ {\text {L e f t}} ^ {t} \oplus \widehat {\mathbf {R}} _ {\text {L e f t}} ^ {t}\right), \tag {2} +$$ + +where “ $\oplus$ ” is a concatenation operation along the channel axis and $\mathcal{F}_{\mathrm{Depth}}$ represents a feature extractor. The same process can be applied to obtain $\mathbf{U}_{\mathrm{Right}}^t$ . + +Similarly, we extract heatmap features $\{\mathbf{G}_{\mathrm{Left}}^t,\mathbf{G}_{\mathrm{Right}}^t\in$ $\mathbb{R}^{\frac{H}{16}\times \frac{W}{16}\times C}\}$ from the 2D heatmaps: + +$$ +\mathbf {G} _ {\text {L e f t}} ^ {t} = \mathcal {F} _ {\mathrm {H M}} \left(\widehat {\mathbf {H}} _ {\text {L e f t}} ^ {t}\right), \tag {3} +$$ + +where $\mathcal{F}_{\mathrm{HM}}$ represents another feature extractor. The same process can be applied to obtain $\mathbf{G}_{\mathrm{Right}}^t$ . + +These features are forwarded with positional embeddings into the transformer. However, as mentioned in Sec. 4.3, depth values can be missing in some frames. To prevent processing features of such depth data and let the network focus only on valid frames, we propose to add padding masks $V_{\mathrm{Depth}}^{t} \in \mathcal{R}$ to all the elements of $\{\mathbf{U}_{\mathrm{Left}}^{t}, \mathbf{U}_{\mathrm{Right}}^{t}\}$ : + +$$ +V _ {\text {D e p t h}} ^ {t} = \left\{ \begin{array}{l l} - \inf , & \text {i f d e p t h v a l u e s a r e m i s s i n g} \\ 0, & \text {o t h e r w i s e} \end{array} . \right. \tag {4} +$$ + +When $V_{\mathrm{Depth}}^{t} = -\inf$ , the depth features $\{\mathbf{U}_{\mathrm{Left}}^{t}, \mathbf{U}_{\mathrm{Right}}^{t}\}$ after the softmax function in self-attention layers of the transformer will have zero effect on the network training. + +Stereo-Video-Dependent Joint Query Adaptation. The existing work [44] represents human joints as learnable positional embeddings called joint queries that encode prior + +Table 2. Quantitative results of device-relative pose estimation on UnrealEgo2 with mm-scale. + +
MethodMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]117.5788.0173.1238.94
Akada et al. [1]122.6486.5572.5138.67
Baseline115.9585.0074.1340.11
Ours104.1482.1880.2046.22
+ +Table 3. Quantitative results of device-relative pose estimation on UnrealEgo-RW with mm-scale. + +knowledge about the skeleton joints. In our problem setting, the simplest way to design such joint queries is to set queries for each pose in a motion sequence. However, this can not capture the temporal context in video inputs, e.g., human motions and background changes. Therefore, we extend the multi-view joint query augmentation technique [44] for our stereo video setting to account for sequential information. Specifically, we enhance the joint queries with the temporal intermediate features of stereo RGB frames $\{\mathbf{F}_{\mathrm{Left}}^t,\mathbf{F}_{\mathrm{Right}}^t\}$ . Firstly, from the sequence of the intermediate features, we create a sequence of combined features $\mathbf{F}^{t}\in \mathbb{R}^{\frac{H}{32}\times \frac{W}{32}\times \frac{C}{2}}$ + +$$ +\mathbf {F} ^ {t} = \operatorname {c o n v} \left(\mathbf {F} _ {\text {L e f t}} ^ {t} \oplus \mathbf {F} _ {\text {r i g h t}}\right), \tag {5} +$$ + +where "conv(\cdot)" is a convolution operation with a kernel size of $1 \times 1$ . + +Next, we fuse the sequence of the combined features $\mathbf{F}^t$ to obtain a fused stereo features $\mathbf{F}_{\mathrm{Stereo}} \in \mathbb{R}^{\frac{C}{2}}$ : + +$$ +\mathbf {F} _ {\text {S t e r e o}} = \mathbf {F} _ {\mathrm {P}} ^ {1} \oplus \dots \oplus \mathbf {F} _ {\mathrm {P}} ^ {T}, \text {w h e r e} \mathbf {F} _ {\mathrm {P}} ^ {i} = p (\mathbf {F} ^ {i}), \tag {6} +$$ + +where $p(\cdot)$ is an operation of adaptive average pooling. Now, the feature $\mathbf{F}_{\mathrm{Stereo}}$ contains stereo video information. + +Lastly, with $\mathbf{F}_{\mathrm{Stereo}}$ and a fully connected layer " $\operatorname {fc}(\cdot)$ ", we augment each query $q^{t}$ to obtain $q_{\mathrm{Aug}}^t\in \mathbb{R}^{16\times \frac{C}{2}}$ .. + +$$ +\mathbf {q} _ {\text {A u g}} ^ {t} = \operatorname {f c} \left(\mathbf {F} _ {\text {S t e r e o}}\right) + q ^ {t}. \tag {7} +$$ + +Transformer Decoder. We adopt a DETR [2]-based transformer decoder and a pose regression head. In decoder layers, all of the augmented joint queries $q_{\mathrm{Aug}}^{t}$ first interact with each other on a self-attention layer. Then, the queries extract all of the temporal stereo features from the memory $\{\mathbf{U}_{\mathrm{Left}}^{t}, \mathbf{U}_{\mathrm{Right}}^{t}, \mathbf{G}_{\mathrm{Left}}^{t}, \mathbf{G}_{\mathrm{Right}}^{t}\}$ with the padding masks $V_{\mathrm{Depth}}^{t}$ on a cross-attention layer. Lastly, the pose regression head estimates a sequence of 3D poses $\{\hat{\mathbf{P}}^t \in \mathbb{R}^{16 \times 3} | t = 1, 2, \dots, T\}$ , yielding the final pose output $\mathbf{P}^T$ . + +Similar to the previous works [5, 49], we train the 3D module with the pose supervision of the current and past frames: + +$$ +L _ {3 \mathrm {D}} = L _ {\text {p o s e}} \left(\mathbf {P} ^ {T}, \hat {\mathbf {P}} ^ {T}\right) + \frac {\lambda_ {\text {p a s t}}}{(T - 1)} \sum_ {t = 1} ^ {T - 1} L _ {\text {p o s e}} \left(\mathbf {P} ^ {t}, \hat {\mathbf {P}} ^ {t}\right), \tag {8} +$$ + +$$ +L _ {\text {p o s e}} (\mathbf {P}, \hat {\mathbf {P}}) = \lambda_ {\text {p o s e}} (\operatorname {m p j p e} (\mathbf {P}, \hat {\mathbf {P}}) + \quad \tag {9} +$$ + +$$ +\lambda_ {\cos} \cos (\operatorname {b o n e} (\mathbf {P}), \operatorname {b o n e} (\hat {\mathbf {P}}))), +$$ + +![](images/5fdfd2791925374276a711ccaf7b6c0a60c88242a9cfdb84e484781ea919d57d.jpg) + +![](images/bb85da3c970f066a55c60be4a27389729801720ae1cd0cc79419dcb3a945ae53.jpg) + +![](images/2c9a7a5f483c191cdf1a4d51e72928dea5ee499a6e0e87bb1b66dd9f8992bb1a.jpg) + +![](images/e2489535ff6e9edeab3aa400f279bf3c2aff2a8f17f045bbaa2041d5db319651.jpg) + +![](images/95f66cc1d2818b11a995170078690d4fc1aca3869ce3c9a4edcecb5f75dfe5da.jpg) + +![](images/2030a68b736941d5ef4c06b68e8f8aeae02c92b6578628b4a6034c45eb16765e.jpg) +Stereo inputs + +![](images/d5aba829f6cda66deb697e588045bdbd9281bb2a601fbba876fd86e8ab93defb.jpg) +Akada et al. [1] + +![](images/ba584dcfcf272c76440124a9fec23001a2f5357c1bdc5fc5e4eadd8e27280260.jpg) + +![](images/462476cb03179b4c0534fa71939fbcd889bd00f6d732d0ab86b7852589f72e9e.jpg) +Baseline +Figure 4. Qualitative results of device-relative pose estimation. Left: UnrealEgo2. Right: UnrealEgo-RW. 3D pose prediction and ground truth are displayed in red and green, respectively. For UnrealEgo-RW, we show ground-truth scene meshes for visualization. + +![](images/1a96b72d97ae0aac10c5db8320c95d44577d7030eb4f232c6e66eacddd86ec88.jpg) +Ours + +![](images/7fd62126b1f5a6a63c2bdfc237130d3f0dbd477ec9c0eb1d679c1a7494f2f172.jpg) + +![](images/c81008e9a0e6bef2a258b064fb1f380d26c86e7023180f0363376d5a65952eea.jpg) + +![](images/66192191b5b69d3a75a122ddc53af3bed3be8d1d61cc260b901becee9b28ed40.jpg) + +![](images/df562450fb7278eedaa5c8daab4b6db37d1090af2728dbd94eb6efe54d52dbce.jpg) + +![](images/4b98a5b5ceeed27899cd52fbcbea7b2bca9e9917c9a34db177e878334bdd06fc.jpg) + +![](images/40abad9cc79a8e0749af20d18c0eaa7a990bea2de937d5524793ec364d754f13.jpg) +Stereo inputs + +![](images/2c9a47229bbe152715ceaabb68787da855f418043c6e687acc65ff28ef0987d4.jpg) + +![](images/8710bd0764b2cf931cb6489035157b562c8f5f614885cafb109ee55635230cf7.jpg) +Akada et al. [1] + +![](images/2f1f1aa7225ba1552707b49a9c82fe28fc6ba778d760d8c035761e084dd8163d.jpg) +Baseline + +![](images/a147f9ecea883b099f6753eaa58d149ca3423b26736030c2e01029e80d3355df.jpg) +Ours + +
MethodMPJPE(↓)PA-MPJPE(↓)
(a) Baseline with depth information120.3986.23
Baseline115.3684.80
(b) Ours w/o query adaptation108.3386.69
(c) Ours w/o depth information112.5684.37
(d) Ours w/o depth padding mask108.7084.26
(e) Ours with latest pose supervision only105.6783.46
(f) Ours with a single set of queries105.5885.68
Ours104.1482.18
+ +where $\mathbf{P}$ is a ground-truth 3D pose, $\mathrm{mpjpe}(\cdot)$ is the mean per joint position error, $\cos (\cdot)$ is a negative cosine similarity, and $\mathrm{bone}(\cdot)$ is an operation of obtaining bones of the 3D poses as used in the previous work [1]: + +$$ +\operatorname {m p j p e} (\mathbf {P}, \hat {\mathbf {P}}) = \frac {1}{N J} \sum_ {n = 1} ^ {N} \sum_ {j = 1} ^ {J} | | \mathbf {P} _ {n, j} - \hat {\mathbf {P}} _ {n, j} | | _ {2}, \tag {10} +$$ + +$$ +\cos (\mathbf {B}, \hat {\mathbf {B}}) = - \frac {1}{N} \sum_ {n = 1} ^ {N} \sum_ {m = 1} ^ {M} \frac {\mathbf {B} _ {n , m} \cdot \hat {\mathbf {B}} _ {n , m}}{| | \mathbf {B} _ {n , m} | | | | \hat {\mathbf {B}} _ {n , m} | |}, \tag {11} +$$ + +where $N$ is batch size, $J$ is the number of joints, $M$ is the number of bones, and $\mathbf{B}_{n,m} \in \mathbb{R}^3$ is a vector of $m$ -th bone. + +# 5. Experiments + +# 5.1. Datasets for Evaluation + +We use three datasets for our experiments: UnrealEgo [1], UnrealEgo2, and UnrealEgo-RW. For UnrealEgo, we use their proposed data splits. Also, we divide UnrealEgo2 into 12,139 motions (1,002,656 stereo views) for training, 1,545 motions (127,968 stereo views) for validation, and 1523 motions (123,488 stereo views) for testing. Similarly, we + +Table 4. Ablation study of our model for device-relative pose estimation on UnrealEgo-RW with mm-scale. + +
MethodUpper body MPJPE(↓)Lower body MPJPE(↓)Foot MPJPE(↓)Foot MPE(↓)
Ours w/o depth information80.82144.31174.456.39
Ours w/o depth padding masks77.29140.10169.955.02
Ours77.85130.97155.864.83
+ +Table 5. The effect of scene information (depth) per body part on UnrealEgo-RW. The numbers are in mm. + +split UnrealEgo-RW into 547 motions (51,936 stereo views) for training, 77 motions (7,616 stereo views) for validation, and 86 motions (7,936 stereo views) for testing. We follow the existing works [1, 11, 37, 38, 40-42, 45, 52, 53] to report the results of device-relative 3D pose estimation. For UnrealEgo, we also follow the existing works [1, 12] to include the results of pelvis-relative 3D pose estimation. + +# 5.2. Training Details + +We resize the input RGB images and ground-truth 2D keypoint heatmaps to $256 \times 256$ and $64 \times 64$ pixels, respectively. For the training of the 2D module, we follow the previous work [1] to use the ResNet18 [8] pre-trained on ImageNet [4] as an encoder and train the module with a batch size of 16 and an initial learning rate of $10^{-3}$ . Then, we train the 3D module with a batch size of 32 and an initial learning rate of $2 \cdot 10^{-4}$ . The modules are trained with Adam optimizer [15] for ten epochs, starting with the initial learning rate for the first half epochs and applying a linearly decaying rate for the next half. Also, we set the hyperparameters as $\lambda_{\mathrm{pose}} = 0.1$ , $\lambda_{\mathrm{cos}} = 0.01$ , and $\lambda_{\mathrm{past}} = 0.1$ . We use five sequential stereo views as inputs to our model, i.e., $T = 5$ , with a skip size of 3. See our supplement for more details on the network architecture. + +# 5.3. Evaluation + +We compare our method with existing stereo-based egocentric pose estimation methods [1, 53]. We use the of + +![](images/b8ff47f7aac3cbaab619fd868be5cec580eb53d5f42d9395aa1cf7256978ff8d.jpg) + +![](images/1601914d99990671ee990999c3a69dde366aeefad8f651f761acf684b0c2935c.jpg) +3D-to-2D pose reprojection + +![](images/c0b25a4525d502891b94574637c2198768944e17df3903684575ac3c8e1eef1a.jpg) + +reprojection + +![](images/7194d77dc8227a51473a041ee594b94294628daf57d75184299be30e5bb3f38f.jpg) +3D pose estimation + +![](images/3c50cd63f1945538be9e269ad317db4e40468d31ba05666ae253de74b957d232.jpg) +Figure 5. Results of our framework and comparison methods on example sequences from UnrealEgo2 (above) and UnrealEgo-RW (below). Left: MPJPE curves. Right: Outputs of our method at frame 87 and 329 of the sequences, respectively. 3D pose estimation and ground truth are colored in red and green, respectively. + +![](images/0e71dce06ba4d72dc03b7bd045dee2555f3be3ea270bbca4ad2f91c95e98116f.jpg) +3D-to-2D pose reprojection + +![](images/78136c63fc3ada05f13f891c651fa17fc5e4c0d8ac7bb6f4fd10ab38996d2d8f.jpg) + +reprojection + +![](images/e96a9b4a852ee2ff08ed4cf1b103d800aa7a293edb8b7cdba51d74cdf58bf412.jpg) +3D pose estimation + +ficial source code of Akada et al. [1] and re-implement the framework of Zhao et al. [53] as its source code is not available. Note that the comparison methods are trained on the same datasets as our model. Kang et al. [12] (arXiv preprint at the time of submission) only shows results of the pelvis-relative estimation on UnrealEgo. Therefore, we include them for reference. Furthermore, we are interested in the performance of the publicly available state-of-the-art method [1] with temporal inputs. Thus, we modify their 3D module such that it can take as an input a sequence of stereo 2D keypoint heatmaps with the same time step as ours, i.e., $T = 5$ . Here, we replace the first and the last fully connected layers in the encoder, the pose decoder, and the heatmap reconstruction decoder of their autoencoder-based 3D module [1] by those with $T$ times the size of the original hidden dimension. We denote this model as Baseline and train it with the same training procedure as Akada et al. [1]. Note that Akada et al. [1], Baseline, and our model use the same 2D module. + +We follow the existing works [1, 11, 37, 38, 40-42, 45, 52, 53] to report Mean Per Joint Position Error (MPJPE) and Mean Per Joint Position Error with Procrustes Alignment [13] (PA-MPJPE). We additionally report 3D Percentage of Correct Keypoints (3D PCK) and Area Under the Curve (AUC) for UnrealEgo2 and UnrealEgo-RW. + +Results on Synthetic Datasets. Tables 1 and 2 report the results with UnrealEgo [1] and UnrealEgo2. Our method outperforms the existing methods [1, 12, 53] and Baseline across all metrics by a significant margin, e.g., $>15\%$ on UnrealEgo [1] and $\geq 40\%$ on UnrealEgo2 (on MPJPE). The qualitative results on UnrealEgo2 in Fig. 4-(left part) show that existing methods and Baseline fail to estimate lower bodies of complex poses with severe self-occlusions, such as crouching. Even under such challenging scenarios, however, our approach yields accurate 3D poses. See Fig. 5-(above part) for a MPJPE curve and visual outputs of our + +
MethodMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
T = 1108.6384.6977.9844.15
T = 3108.2385.2878.3544.54
T = 5104.1482.1880.2046.22
T = 7104.0182.4380.5246.10
+ +Table 6. Ablation study of our model with different sequence lengths on UnrealEgo-RW. The numbers are in mm. + +framework on UnrealEgo2. Our method is able to constantly estimate accurate 3D poses compared to the existing methods. As evidenced by these results, our method demonstrates superiority and becomes a strong benchmark method in the egocentric stereo 3D pose estimation tasks. See our supplementary material and video for more results. Results on the Real-World Dataset. Table 3 shows quantitative results on UnrealEgo-RW. Again, our method outperforms the existing methods [1, 53] and Baseline across all metrics, e.g., by more than $10\%$ on MPJPE. See Fig. 4-(right) for qualitative results. The current state-of-the-art methods [1, 53] or Baseline show floating feet, inaccurate pelvis position, and penetration to the floor ground. However, our method is able to estimate accurate 3D poses. See Fig. 5-(below part) for a MPJPE curve and visual outputs on an example motion of UnrealEgo-RW. The curve indicates that our method constantly shows lower 3D errors than the comparison methods. All of the results indicate the effectiveness of our proposed framework compared to the existing methods. We also visualize 2D heatmaps, 3D-to-2D pose reprojection, and 3D pose prediction from our method in Fig. 6. Even when the joint locations of the lower body are estimated closely in the 2D heatmaps, our approach predicts accurate lower body poses. These results suggest that the proposed method with our portable device can open up the possibility of many future applications, including animating virtual humans (Fig. 1-(g)). For the virtual human animation, we applied inverse kinematics with estimated 3D joint locations and ground-truth camera poses to drive the + +![](images/2e77b11a607c3d0a6cdc2dda1f39c426ba05d8a2660ab7857d4ce589147de834.jpg) +Stereo inputs + +![](images/e3146f908f0672d9e0886816bb289d0455391a9e3cdaa223e0210afcb8c29dae.jpg) +Figure 6. Visualization of outputs from our model on UnrealEgo-RW. 3D-to-2D pose reprojection is visualized in the same colors as in Fig. 1-(e). 3D pose estimation and ground truth are displayed in red and green, respectively. + +![](images/bb449581a65ca399763951e5a742673f7e960769f766d4cbc7d77982c4ef0061.jpg) +2D joint heatmap estimation + +![](images/58fccb4ac4bb0af3ec4c15c1abe169eaa8e12a6d628f3abb227ac4fe0fea2df4.jpg) + +![](images/4f5d3e8fc901e75542a7996091992392e5269a756c31ff1a41cd9f01d82c81fc.jpg) +3D-to-2D pose reprojection + +![](images/21fb5b2e2f0cfec6ba1d5395392dc8d0c9e181ea9b4f08a7722884907956a5ff.jpg) +3D pose estimation + +![](images/2c8793e1c98b587952e6df0b244e66eee51befb37da427a3eaba19a67aff0ffb.jpg) + +character in a world coordinate system. + +Ablation Study. In Table 4, we first ablate (a) the CNN-based 3D module (Baseline) with depth data concatenated to the heatmap inputs. However, naively adding this extra scene information to this 3D module does not help probably because the CNN layers can be affected by invalid depth values even with the depth region masks. + +Next, we test our transformer-based 3D module (b) without query augmentation (c) without depth data. They perform worse than our full framework. We also ablate our method (d) without the padding mask. The result indicates that adding depth padding masks helps because the padding mask can filter out the invalid values in depth maps in the attention module. These results validate that our video-based 3D scene reconstruction module and video-dependent query augmentation policy boost 3D joint localization accuracy. Next, we ablate our model (e) with 3D pose supervision of the latest frame only. Note that this ablation uses the same sets of input data and joint queries as the original model, i.e., $T = 5$ . This model estimates less accurate poses due to the loss of supervision from past 3D poses. We also test (f) a single set of joint queries, i.e., $q^1$ , instead of $T$ sets to predict the latest 3D pose. Similar to (e), this model cannot benefit from the supervision of past 3D poses. + +We further investigate the effect of the scene information. Table 5 shows the MPJPE per body part and Mean Penetration Error (MPE) [34, 35] between feet and floor ground. The results reveal that depth features with the padding masks reduce the errors in the lower body while maintaining the performance in the upper body. + +In Table 6, we ablate the effect of the sequence length of input frames for our method. It is worth noting that our model with $T = 1$ yields better results than the best existing method [1] and Baseline that utilizes temporal information (see Table 3). Since our model uses the same 2D module as Akada et al. [1] and Baseline, the difference comes only from the 3D module. This suggests that their autoencoder-based 3D modules with the heatmap reconstruction component are, very likely, not the most suitable solution for estimating 3D poses from 2D joint heatmaps, highlighting the potential of our transformer-based framework. The result also indicates that although the longer sequence can bring performance improvement to some extent, the se + +
MethodInitial training dataMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]99.0972.4779.8243.55
Akada et al. [1]UnrealEgo [1]94.8769.7982.7846.80
Baseline83.8964.3086.2051.63
Ours75.3457.2989.4355.77
Zhao et al. [53]97.8669.9281.5346.32
Akada et al. [1]UnrealEgo292.4867.1584.2548.04
Baseline82.1661.6087.0752.72
Ours72.8956.1990.2957.19
+ +Table 7. Fine-tuning results of device-relative 3D pose estimation on UnrealEgo-RW with mm-scale. + +quence lengths of five and seven show comparable results. Synthetic Data for Pre-training. No existing works explored the efficacy of synthetic data for pre-training in egocentric 3D pose estimation. Thus, we further conduct experiments with models pre-trained on the synthetic datasets and fine-tuned on the real-world data. Tables 3 and 7 show that all methods benefit from the training with the large-scale synthetic data even with the differences in the synthetic and real-world setups, e.g., fisheye distortion and syn-to-real domain gaps. Note that the gain of our method from UnrealEgo to UnrealEgo2 is significant, i.e., $3.3\%$ on MPJPE (75.34mm to 72.89mm). This suggests that it is helpful to develop not only new models but also large-scale synthetic datasets even with different distortion and domain gaps. + +# 6. Conclusion + +In this paper, we proposed a new transformer-based framework that significantly boosts the accuracy of egocentric stereo 3D human pose estimation. The proposed framework leverages the scene information and temporal context of egocentric stereo video inputs via our video-based 3D scene reconstruction module and video-based joint query augmentation policy. Our extensive experiments on the new synthetic and real-world datasets with challenging human motions validate the effectiveness of our approach compared to the existing methods. We hope that our proposed benchmark datasets and trained models will foster the further development of methods for egocentric 3D vision. + +Acknowledgment. The work was supported by the ERC Consolidator Grant 4DReply (770784) and the Nakajima Foundation. We thank Silicon Studio Corp. for providing the fisheye plug-in for Unreal Engine. + +# References + +[1] Hiroyasu Akada, Jian Wang, Soshi Shimada, Masaki Takahashi, Christian Theobalt, and Vladislav Golyanik. Unrealego: A new dataset for robust egocentric 3d human motion capture. In European Conference on Computer Vision (ECCV), 2022. 1, 2, 3, 5, 6, 7, 8 +[2] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision (ECCV), 2020. 5 +[3] Young-Woon Cha, True Price, Zhen Wei, Xinran Lu, Nicholas Rewkowski, Rohan Chabra, Zihe Qin, Hyounghun Kim, Zhaoqi Su, Yebin Liu, Adrian Ilie, Andrei State, Zhenlin Xu, Jan-Michael Frahm, and Henry Fuchs. Towards fully mobile 3d face, body, and environment capture using only head-worn cameras. IEEE Transactions on Visualization and Computer Graphics, 24(11):2993-3004, 2018. 2, 3 +[4] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei. ImageNet: A Large-Scale Hierarchical Image Database. In Computer Vision and Pattern Recognition (CVPR), 2009. 6 +[5] Moritz Einfalt, Katja Ludwig, and Rainer Lienhart. Uplift and upsample: Efficient 3d human pose estimation with up-lifting transformers. In Winter Conference on Applications of Computer Vision (WACV), 2023. 2, 5 +[6] FUJINON FE185C057HA-1 fisheye lens, 2023. https:// www.fujifilm.com/de/de/business/opticaldevices/mvlems/fe185.3 +[7] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Computer Vision and Pattern Recognition (CVPR), 2022. 3 +[8] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Computer Vision and Pattern Recognition (CVPR), 2016. 6 +[9] Yihui He, Rui Yan, Katerina Fragkiadaki, and Shoou-I Yu. Epipolar transformers. In Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[10] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. 2020. 2 +[11] Hao Jiang and Vamsi Krishna Ithapu. Egocentric pose estimation from human vision span. In International Conference on Computer Vision (ICCV), 2021. 1, 2, 3, 6, 7 +[12] Taeho Kang, Kyungjin Lee, Jinrui Zhang, and Youngki Lee. Ego3dpose: Capturing 3d cues from binocular egocentric views. In SIGGRAPH Asia Conference, 2023. 2, 5, 6, 7 +[13] David G. Kendall. A Survey of the Statistical Theory of Shape. Statistical Science, 4(2):87-99, 1989. 7 +[14] Rawal Khirodkar, Aayush Bansal, Lingni Ma, Richard Newcombe, Minh Vo, and Kris Kitani. Ego-humans: An egocentric 3d multi-human benchmark. In International Conference on Computer Vision (ICCV), 2023. 3 +[15] Diederik Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations (ICLR), 2015. 6 +[16] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer White- + +head, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything. arXiv:2304.02643, 2023. 4 +[17] Jiaman Li, Karen Liu, and Jiajun Wu. Ego-body pose estimation via ego-head pose estimation. In Computer Vision and Pattern Recognition (CVPR), 2023. 3 +[18] Wenhao Li, Hong Liu, Runwei Ding, Mengyuan Liu, Pichao Wang, and Wenming Yang. Exploiting temporal contexts with strided transformer for 3d human pose estimation. IEEE Transactions on Multimedia (TMM), 2022. 2 +[19] Wenhao Li, Hong Liu, Hao Tang, Pichao Wang, and Luc Van Gool. Mhformer: Multi-hypothesis transformer for 3d human pose estimation. In Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[20] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[21] Yuxuan Liu, Jianxin Yang, Xiao Gu, Yijun Chen, Yao Guo, and Guang-Zhong Yang. Egofish3d: Egocentric 3d pose estimation from a fisheye camera via self-supervised learning. IEEE Transactions on Multimedia (TMM), pages 1-12, 2023. 2 +[22] Zhengyi Luo, Ryo Hachiuma, Ye Yuan, and Kris Kitani. Dynamics-regulated kinematic policy for egocentric pose estimation. 2021. 1, 2, 3 +[23] Haoyu Ma, Liangjian Chen, Deying Kong, Zhe Wang, Xingwei Liu, Hao Tang, Xiangyi Yan, Yusheng Xie, Shih-Yao Lin, and Xiaohui Xie. Transfusion: Cross-view fusion with transformer for 3d human pose estimation. In British Machine Vision Conference (BMVC), 2021. 2 +[24] Metashape, 2023. https://www.agisoft.com/.4 +[25] Mixamo, 2022. https://www MIXamo.com.3 +[26] Xiaqing Pan, Nicholas Charron, Yongqian Yang, Scott Peters, Thomas Whelan, Chen Kong, Omkar Parkhi, Richard Newcombe, and Yuheng (Carl) Ren. Aria digital twin: A new benchmark dataset for egocentric 3d machine perception. In International Conference on Computer Vision (ICCV), 2023. 3 +[27] Jinman Park, Kimathi Kaai, Saad Hossain, Norikatsu Sumi, Sirisha Rambhatla, and Paul Fieguth. Domain-guided spatiotemporal self-attention for egocentric 3d pose estimation. In Conference on Knowledge Discovery and Data Mining (KDD), 2023. 2 +[28] Sungchan Park, Eunyi You, Inhoe Lee, and Joonseok Lee. Towards robust and smooth 3d multi-person pose estimation from monocular videos in the wild. In International Conference on Computer Vision (ICCV), 2023. 2 +[29] Dario Pavllo, Christoph Feichtenhofer, David Grangier, and Michael Auli. 3d human pose estimation in video with temporal convolutions and semi-supervised training. In Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[30] RenderPeople, 2022. https://renderpeople.com.3 +[31] Helge Rhodin, Christian Richardt, Dan Casas, Eldar Insafutdinov, Mohammad Shafiei, Hans-Peter Seidel, Bernt Schiele, and Christian Theobalt. Egocap: egocentric marker-less motion capture with two fisheye cameras. ACM Transactions on Graphics (TOG), 35(6):1-11, 2016. 1, 2, 3 + +[32] RIBCAGE RX0 II camera, 2023. https://www.backbone.ca/product/ribcage-rx0-2/.3 +[33] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Computer Vision and Pattern Recognition (CVPR), 2016. 2 +[34] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, and Christian Theobalt. Physcap: Physically plausible monocular 3d motion capture in real time. ACM Transactions on Graphics (TOG), 39(6), 2020. 8 +[35] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, Patrick Pérez, and Christian Theobalt. Neural monocular 3d human motion capture with physical awareness. ACM Transactions on Graphics (TOG), 40(4), 2021. 8 +[36] Zhenhua Tang, Zhaofan Qiu, Yanbin Hao, Richang Hong, and Ting Yao. 3d human pose estimation with spatiotemporal criss-cross attention. In Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2 +[37] Denis Tome, Patrick Peluse, Lourdes Agapito, and Hernan Badino. xr-egopose: Egocentric 3d human pose from an hmd camera. In International Conference on Computer Vision (ICCV), 2019. 1, 2, 3, 5, 6, 7 +[38] Denis Tome, Thiemo Alldieck, Patrick Peluse, Gerard Pons-Moll, Lourdes Agapito, Hernan Badino, and Fernando de la Torre. Selfpose: 3d egocentric pose estimation from a headset mounted camera. IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), 45(6):6794-6806, 2023. 1, 2, 5, 6, 7 +[39] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems (NeurIPS), 2017. 2 +[40] Jian Wang, Lingjie Liu, Weipeng Xu, Kripasindhu Sarkar, and Christian Theobalt. Estimating egocentric 3d human pose in global space. In International Conference on Computer Vision (ICCV), 2021. 1, 2, 3, 6, 7 +[41] Jian Wang, Lingjie Liu, Weipeng Xu, Kripasindhu Sarkar, Diogo Luvizon, and Christian Theobalt. Estimating egocentric 3d human pose in the wild with external weak supervision. In Computer Vision and Pattern Recognition (CVPR), 2022. 2, 3 +[42] Jian Wang, Diogo Luvizon, Weipeng Xu, Lingjie Liu, Kripasindhu Sarkar, and Christian Theobalt. Scene-aware egocentric 3d human pose estimation. In Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 6, 7 +[43] Jian Wang, Zhe Cao, Diogo Luvizon, Lingjie Liu, Kripasindhu Sarkar, Danhang Tang, Thabo Beeler, and Christian Theobalt. Egocentric whole-body motion capture with fisheyevit and diffusion-based motion refinement. In Computer Vision and Pattern Recognition (CVPR), 2024. 2 +[44] Tao Wang, Jianfeng Zhang, Yujun Cai, Shuicheng Yan, and Jiashi Feng. Direct multi-view multi-person 3d human pose estimation. Advances in Neural Information Processing Systems (NeurIPS), 2021. 2, 5 +[45] Weipeng Xu, Avishek Chatterjee, Michael Zollhoefer, Helge Rhodin, Pascal Fua, Hans-Peter Seidel, and Christian Theobalt. $\mathrm{Mo}^2\mathrm{Cap}^2$ : Real-time mobile 3d motion capture with a cap-mounted fisheye camera. IEEE Transactions on Visualization and Computer Graphics, 2019. 1, 2, 3, 6, 7 + +[46] Honghong Yang, Longfei Guo, Yumei Zhang, and Xiaojun Wu. U-shaped spatial-temporal transformer network for 3d human pose estimation. Machine Vision and Applications, 33(6):82, 2022. 2 +[47] Yingxuan You, Hong Liu, Ti Wang, Wenhao Li, Runwei Ding, and Xia Li. Co-evolution of pose and mesh for 3d human body estimation from video. In International Conference on Computer Vision (ICCV), 2023. 2 +[48] Ye Yuan and Kris Kitani. Ego-pose estimation and forecasting as real-time pd control. In International Conference on Computer Vision (ICCV), 2019. 1, 2, 3 +[49] Jinlu Zhang, Zhigang Tu, Jianyu Yang, Yujin Chen, and Jun-song Yuan. Mixste: Seq2seq mixed spatio-temporal encoder for 3d human pose estimation in video. In Computer Vision and Pattern Recognition (CVPR), 2022. 2, 5 +[50] Siwei Zhang, Qianli Ma, Yan Zhang, Zhiyin Qian, Taein Kwon, Marc Pollefeys, Federica Bogo, and Siyu Tang. Ego-body: Human body shape and motion of interacting people from head-mounted devices. In European conference on computer vision (ECCV), 2022. 3 +[51] Siwei Zhang, Qianli Ma, Yan Zhang, Sadegh Aliakbarian, Darren Cosker, and Siyu Tang. Probabilistic human mesh recovery in 3d scenes from egocentric views. In International Conference on Computer Vision (ICCV), 2023. 2, 3 +[52] Yahui Zhang, Shaodi You, and Theo Gevers. Automatic calibration of the fisheye camera for egocentric 3d human pose estimation from a single image. In Winter Conference on Applications of Computer Vision (WACV), 2021. 1, 2, 6, 7 +[53] Dongxu Zhao, Zhen Wei, Jisan Mahmud, and Jan-Michael Frahm. Egoglass: Egocentric-view human pose estimation from an eyeglass frame. In International Conference on 3D Vision (3DV), 2021. 1, 2, 3, 5, 6, 7, 8 +[54] Qitao Zhao, Ce Zheng, Mengyuan Liu, Pichao Wang, and Chen Chen. Poseformerv2: Exploring frequency domain for efficient and robust 3d human pose estimation. In Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2 +[55] Ce Zheng, Sijie Zhu, Matias Mendieta, Taojiannan Yang, Chen Chen, and Zhengming Ding. 3d human pose estimation with spatial and temporal transformers. In International Conference on Computer Vision (ICCV), 2021. +[56] Jieming Zhou, Tong Zhang, Zeeshan Hayden, Lars Petersson, and Mehrtash Harandi. Diff3dhpe: A diffusion model for 3d human pose estimation. In International Conference on Computer Vision (ICCV) Workshops, 2023. +[57] Wentao Zhu, Xiaoxuan Ma, Zhaoyang Liu, Libin Liu, Wayne Wu, and Yizhou Wang. Motionbert: A unified perspective on learning human motion representations. In International Conference on Computer Vision (ICCV), 2023. +[58] Yiran Zhu, Xing Xu, Fumin Shen, Yanli Ji, Lianli Gao, and Heng Tao Shen. PosegTac: Graph transformer encoder-decoder with atrous convolution for 3d human pose estimation. In International Joint Conference on Artificial Intelligence (IJCAI), 2021. 2 \ No newline at end of file diff --git a/2024/3D Human Pose Perception from Egocentric Stereo Videos/images.zip b/2024/3D Human Pose Perception from Egocentric Stereo Videos/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..e46a60dbd4f0c7f20d0f41f4e0924e0f417544b1 --- /dev/null +++ b/2024/3D Human Pose Perception from Egocentric Stereo Videos/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c4f287c29201a115828acee1165c32812879f29c2dae7f8d7ba72ea8fa6c677 +size 685384 diff --git a/2024/3D Human Pose Perception from Egocentric Stereo Videos/layout.json b/2024/3D Human Pose Perception from Egocentric Stereo Videos/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..cfaade0d61532836a4415f6d095a10fe0d978f6c --- /dev/null +++ b/2024/3D Human Pose Perception from Egocentric Stereo Videos/layout.json @@ -0,0 +1,11410 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 116, + 103, + 479, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 103, + 479, + 121 + ], + "spans": [ + { + "bbox": [ + 116, + 103, + 479, + 121 + ], + "type": "text", + "content": "3D Human Pose Perception from Egocentric Stereo Videos" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 94, + 144, + 175, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 144, + 175, + 158 + ], + "spans": [ + { + "bbox": [ + 94, + 144, + 175, + 158 + ], + "type": "text", + "content": "Hiroyasu Akada" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 193, + 144, + 397, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 144, + 397, + 171 + ], + "spans": [ + { + "bbox": [ + 193, + 144, + 397, + 171 + ], + "type": "text", + "content": "Jian Wang Vladislav Golyanik Max Planck Institute for Informatics, SIC" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 407, + 144, + 500, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 407, + 144, + 500, + 156 + ], + "spans": [ + { + "bbox": [ + 407, + 144, + 500, + 156 + ], + "type": "text", + "content": "Christian Theobalt" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 48, + 198, + 124, + 311 + ], + "blocks": [ + { + "bbox": [ + 48, + 198, + 124, + 311 + ], + "lines": [ + { + "bbox": [ + 48, + 198, + 124, + 311 + ], + "spans": [ + { + "bbox": [ + 48, + 198, + 124, + 311 + ], + "type": "image", + "image_path": "32798e62043c3ac39b403815deb7be4d445d827994a554ed1cb13ca22411de73.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 82, + 313, + 94, + 322 + ], + "lines": [ + { + "bbox": [ + 82, + 313, + 94, + 322 + ], + "spans": [ + { + "bbox": [ + 82, + 313, + 94, + 322 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 126, + 199, + 179, + 253 + ], + "blocks": [ + { + "bbox": [ + 126, + 199, + 179, + 253 + ], + "lines": [ + { + "bbox": [ + 126, + 199, + 179, + 253 + ], + "spans": [ + { + "bbox": [ + 126, + 199, + 179, + 253 + ], + "type": "image", + "image_path": "78bc11864321b84e7ecfc867bc9854e84042332aaa51fd250b6f5a43d5ea1a6d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 126, + 257, + 179, + 311 + ], + "blocks": [ + { + "bbox": [ + 126, + 257, + 179, + 311 + ], + "lines": [ + { + "bbox": [ + 126, + 257, + 179, + 311 + ], + "spans": [ + { + "bbox": [ + 126, + 257, + 179, + 311 + ], + "type": "image", + "image_path": "34b3570884f1e618c6019c77328ba4cec270911dade00e158155f0f4ced7d98b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 313, + 160, + 323 + ], + "lines": [ + { + "bbox": [ + 149, + 313, + 160, + 323 + ], + "spans": [ + { + "bbox": [ + 149, + 313, + 160, + 323 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 181, + 199, + 257, + 311 + ], + "blocks": [ + { + "bbox": [ + 181, + 199, + 257, + 311 + ], + "lines": [ + { + "bbox": [ + 181, + 199, + 257, + 311 + ], + "spans": [ + { + "bbox": [ + 181, + 199, + 257, + 311 + ], + "type": "image", + "image_path": "70cc8313ce36826cf52ae5a7de27ef4b51d1fc310842a0f92a56c4c10bbd7489.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 313, + 225, + 323 + ], + "lines": [ + { + "bbox": [ + 214, + 313, + 225, + 323 + ], + "spans": [ + { + "bbox": [ + 214, + 313, + 225, + 323 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 324, + 547, + 369 + ], + "lines": [ + { + "bbox": [ + 46, + 324, + 547, + 369 + ], + "spans": [ + { + "bbox": [ + 46, + 324, + 547, + 369 + ], + "type": "text", + "content": "Figure 1. 3D human pose estimation results of our proposed method from egocentric stereo fisheye videos. Left: results on synthetic images; (a) reference RGB view of the scene; (b) 3D-to-2D pose re-projections, and (c) a 3D pose in a scene mesh reconstructed by our framework. Right: results on real-world images; (d) reference view; (e) 3D-to-2D pose re-projections; (f) a 3D pose in the reconstructed scene, and (g) 3D virtual character animation (possible future application of our method)." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 262, + 199, + 338, + 311 + ], + "blocks": [ + { + "bbox": [ + 262, + 199, + 338, + 311 + ], + "lines": [ + { + "bbox": [ + 262, + 199, + 338, + 311 + ], + "spans": [ + { + "bbox": [ + 262, + 199, + 338, + 311 + ], + "type": "image", + "image_path": "bf98069469d4197ec25f8ec9086a0483a755b084a34cfd118da947e8f97282a4.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 296, + 313, + 308, + 323 + ], + "lines": [ + { + "bbox": [ + 296, + 313, + 308, + 323 + ], + "spans": [ + { + "bbox": [ + 296, + 313, + 308, + 323 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 339, + 199, + 392, + 253 + ], + "blocks": [ + { + "bbox": [ + 339, + 199, + 392, + 253 + ], + "lines": [ + { + "bbox": [ + 339, + 199, + 392, + 253 + ], + "spans": [ + { + "bbox": [ + 339, + 199, + 392, + 253 + ], + "type": "image", + "image_path": "f8aa611c225244d33dd9f3267562aee4d13f8a2a27de2f7a2cdc49e77c6a65e7.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 340, + 257, + 392, + 311 + ], + "blocks": [ + { + "bbox": [ + 340, + 257, + 392, + 311 + ], + "lines": [ + { + "bbox": [ + 340, + 257, + 392, + 311 + ], + "spans": [ + { + "bbox": [ + 340, + 257, + 392, + 311 + ], + "type": "image", + "image_path": "3adfca99ca8364a67f1d54e3950637089d9f3de1f073bd8e1ef40654fb260744.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 364, + 313, + 375, + 323 + ], + "lines": [ + { + "bbox": [ + 364, + 313, + 375, + 323 + ], + "spans": [ + { + "bbox": [ + 364, + 313, + 375, + 323 + ], + "type": "text", + "content": "(e)" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 393, + 199, + 468, + 311 + ], + "blocks": [ + { + "bbox": [ + 393, + 199, + 468, + 311 + ], + "lines": [ + { + "bbox": [ + 393, + 199, + 468, + 311 + ], + "spans": [ + { + "bbox": [ + 393, + 199, + 468, + 311 + ], + "type": "image", + "image_path": "457e76fe84cd7c48b924fdabe7a6ac6b25f715964f12a39f10f3dfda678f6b09.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 429, + 313, + 439, + 323 + ], + "lines": [ + { + "bbox": [ + 429, + 313, + 439, + 323 + ], + "spans": [ + { + "bbox": [ + 429, + 313, + 439, + 323 + ], + "type": "text", + "content": "(f)" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 470, + 199, + 545, + 311 + ], + "blocks": [ + { + "bbox": [ + 470, + 199, + 545, + 311 + ], + "lines": [ + { + "bbox": [ + 470, + 199, + 545, + 311 + ], + "spans": [ + { + "bbox": [ + 470, + 199, + 545, + 311 + ], + "type": "image", + "image_path": "a3f374da09d58bd9853fde02a00bc166f7cb39516b785a35e949be4f26668c23.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 503, + 313, + 515, + 323 + ], + "lines": [ + { + "bbox": [ + 503, + 313, + 515, + 323 + ], + "spans": [ + { + "bbox": [ + 503, + 313, + 515, + 323 + ], + "type": "text", + "content": "(g)" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "bbox": [ + 143, + 379, + 192, + 391 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 379, + 192, + 391 + ], + "spans": [ + { + "bbox": [ + 143, + 379, + 192, + 391 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 46, + 405, + 289, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 405, + 289, + 681 + ], + "spans": [ + { + "bbox": [ + 46, + 405, + 289, + 681 + ], + "type": "text", + "content": "While head-mounted devices are becoming more compact, they provide egocentric views with significant self-occlusions of the device user. Hence, existing methods often fail to accurately estimate complex 3D poses from egocentric views. In this work, we propose a new transformer-based framework to improve egocentric stereo 3D human pose estimation, which leverages the scene information and temporal context of egocentric stereo videos. Specifically, we utilize 1) depth features from our 3D scene reconstruction module with uniformly sampled windows of egocentric stereo frames, and 2) human joint queries enhanced by temporal features of the video inputs. Our method is able to accurately estimate human poses even in challenging scenarios, such as crouching and sitting. Furthermore, we introduce two new benchmark datasets, i.e., UnrealEgo2 and UnrealEgo-RW (RealWorld). The proposed datasets offer a much larger number of egocentric stereo views with a wider variety of human motions than the existing datasets, allowing comprehensive evaluation of existing and upcoming methods. Our extensive experiments show that the proposed approach significantly outperforms previous methods. UnrealEgo2, UnrealEgo-RW, and trained models are available on our project page1 and Benchmark Challenge2." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 379, + 386, + 391 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 379, + 386, + 391 + ], + "spans": [ + { + "bbox": [ + 307, + 379, + 386, + 391 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 399, + 545, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 399, + 545, + 496 + ], + "spans": [ + { + "bbox": [ + 304, + 399, + 545, + 496 + ], + "type": "text", + "content": "Egocentric 3D human motion capture using wearable devices has received increased attention recently [1, 11, 22, 31, 37, 38, 40-42, 45, 48, 52, 53]. Different from traditional vision-based motion capture setups that require a fixed recording space, egocentric systems allow flexible motion capture in less constrained situations. Therefore, the egocentric setups offer various applications, such as motion analysis and XR technologies (Fig. 1-(g))." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 497, + 546, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 497, + 546, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 497, + 546, + 628 + ], + "type": "text", + "content": "Previous works proposed various egocentric methods to capture device users. On the one hand, the vast majority of existing methods—which use a monocular camera—would fail for complex human poses due to depth ambiguity and self-occlusion. On the other hand, the methods designed for stereo devices do not yet realize the full potential of their stereo settings, especially with the most recent compact eyeglasses-based setups [1, 53]. Specifically, they do not deliver high 3D reconstruction accuracy across different scenarios. Moreover, these approaches do not consider scene information, which further limits their accuracy." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 304, + 630, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 547, + 714 + ], + "type": "text", + "content": "To address the challenges outlined above, we propose a new transformer-based framework for egocentric 3D human motion capture from compact eyeglasses-based devices; see Fig. 1. The first step of our framework is to estimate 2D joint heatmaps from egocentric stereo fisheye RGB videos (Sec. 4.1). These 2D joint heatmaps are then processed with human joint queries in our transformer-based 3D mod" + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 693, + 251, + 702 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 693, + 251, + 702 + ], + "spans": [ + { + "bbox": [ + 57, + 693, + 251, + 702 + ], + "type": "text", + "content": "1https://4dqv.mpi-inf.mpq.de/UnrealEgo2/" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 59, + 703, + 223, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 703, + 223, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 703, + 223, + 712 + ], + "type": "text", + "content": "2https://unrealego.mpi-inf.mpg.de/" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "767" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 288 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 288 + ], + "type": "text", + "content": "ule to estimate 3D poses. Here, we leverage the scene information and temporal context of the input videos in the 3D module to improve estimation accuracy. Firstly, we use uniformly sampled windows of egocentric stereo frames to reconstruct a 3D background scene using Structure from Motion (SfM) [33], obtaining scene depth as additional information for the 3D module (Sec. 4.2 and 4.3). In our challenging eyeglasses-based setup, however, the 3D scene and camera poses can not always be estimated due to severe self-occlusion in the egocentric images. This results in depth maps with zero (invalid) values and undesired computation of network gradients during training. To mitigate this issue, we propose to use depth padding masks that prevent processing such invalid depth values in the 3D module. Additionally, we propose video-dependent query augmentation that enhances the joint queries with the temporal context of stereo video inputs to effectively capture the temporal relation of human motions at a joint level (Sec. 4.4)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 288, + 289, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 288, + 289, + 430 + ], + "spans": [ + { + "bbox": [ + 46, + 288, + 289, + 430 + ], + "type": "text", + "content": "We also introduce two new benchmark datasets: UnrealEgo2 and UnrealEgo-RW. UnrealEgo2 is an extended version of UnrealEgo [1] and the largest eyeglasses-based synthetic data with various new motions, offering " + }, + { + "bbox": [ + 46, + 288, + 289, + 430 + ], + "type": "inline_equation", + "content": "2.8 \\times" + }, + { + "bbox": [ + 46, + 288, + 289, + 430 + ], + "type": "text", + "content": " larger data (2.5M images) than the existing dataset [1]. UnrealEgo-RW is a real-world dataset recorded with our newly developed device that resembles the virtual eyeglasses-based setup [1], offering 260k images with various motions and 3D poses. The proposed datasets make it possible to evaluate existing and upcoming methods on a variety of motions, not only in synthetic scenes but also in real-world cases." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 59, + 431, + 277, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 431, + 277, + 443 + ], + "spans": [ + { + "bbox": [ + 59, + 431, + 277, + 443 + ], + "type": "text", + "content": "In short, the contributions of this paper are as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 444, + 287, + 601 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 47, + 444, + 287, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 444, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 47, + 444, + 287, + 479 + ], + "type": "text", + "content": "- The transformer-based framework for egocentric stereo 3D human pose estimation that accounts for temporal context in egocentric stereo views." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 480, + 287, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 480, + 287, + 529 + ], + "spans": [ + { + "bbox": [ + 47, + 480, + 287, + 529 + ], + "type": "text", + "content": "- 3D pose estimation is enhanced via the utilization of scene information from our video-based 3D scene reconstruction module as well as joint queries obtained from our video-dependent query augmentation policy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 529, + 287, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 529, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 47, + 529, + 287, + 601 + ], + "type": "text", + "content": "- A new portable device for egocentric stereo view capture with its specification and two new benchmark datasets: UnrealEgo2 and UnrealEgo-RW recorded with our device. The proposed datasets allow for a comprehensive evaluation of methods for egocentric 3D human pose estimation from stereo views." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 601, + 287, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 601, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 46, + 601, + 287, + 685 + ], + "type": "text", + "content": "Our experiments demonstrate that the proposed method outperforms the previous state-of-the-art approaches by a substantial margin, i.e., " + }, + { + "bbox": [ + 46, + 601, + 287, + 685 + ], + "type": "inline_equation", + "content": ">15\\%" + }, + { + "bbox": [ + 46, + 601, + 287, + 685 + ], + "type": "text", + "content": " on UnrealEgo [1], " + }, + { + "bbox": [ + 46, + 601, + 287, + 685 + ], + "type": "inline_equation", + "content": "\\geq 40\\%" + }, + { + "bbox": [ + 46, + 601, + 287, + 685 + ], + "type": "text", + "content": " on UnrealEgo2, and " + }, + { + "bbox": [ + 46, + 601, + 287, + 685 + ], + "type": "inline_equation", + "content": "\\geq 10\\%" + }, + { + "bbox": [ + 46, + 601, + 287, + 685 + ], + "type": "text", + "content": " on UnrealEgo-RW (on MPJPE). We release UnrealEgo2, UnrealEgo-RW, and our trained models on our project page3 and Benchmark Challenge4 to foster the area of egocentric 3D vision." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 71, + 392, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 71, + 392, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 392, + 84 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 91, + 547, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 547, + 295 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 547, + 295 + ], + "type": "text", + "content": "Egocentric 3D Human Motion Capture. Recent years witnessed significant innovations in egocentric 3D human pose estimation. To capture device users, many existing works use downward-facing cameras and the existing methods can be categorized into two groups. The first group are monocular approaches [11, 21, 22, 27, 37, 38, 40, 41, 43, 45, 48, 52]. For example, Wang et al. [43] uses a diffusion-based [10] motion prior to tackle self-conclusions. Due to the depth ambiguity, monocular methods often fail to estimate accurate 3D poses. Wang et al. [42] tackled this issue by projecting depth and 2D pose features into a pre-defined voxel space. This method requires additional training with ground-truth depths and human body segmentation; it cannot easily be extended for multi-view or temporal inputs. Zhang et al. [51] utilized a diffusion model [10] conditioned on a 3D scene to generate poses. They require pre-scanned scene mesh as an input and cannot capture a device user." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 295, + 547, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 295, + 547, + 462 + ], + "spans": [ + { + "bbox": [ + 304, + 295, + 547, + 462 + ], + "type": "text", + "content": "The second group, including our work, focuses on the multi-view (often stereo) setting. Rhodin et al. [31] proposed an optimization approach whereas Cha et al. [3] used eight cameras to estimate a 3D body and reconstruct a 3D scene separately. Other works [1, 53] used the multi-branch autoencoder [37] to the stereo setup. Kang et al. [12] (arXiv pre-print at the time of submission) leveraged a stereomatching mechanism and perspective embedding heatmaps. In contrast to the existing methods, we propose a new transformer-based method that effectively utilizes egocentric stereo videos via our video-based 3D scene reconstruction module and video-dependent query augmentation policy. Our method considers the scene information without the supervision of the scene data." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 462, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 462, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 462, + 547, + 715 + ], + "type": "text", + "content": "Transformers in 3D Human Pose Estimation from External Cameras. 3D pose estimation from external cameras has shown significant progress due to the advances in transformer architectures [39]. Some works [20, 47] predict 3D human pose and mesh from monocular views. Other works [5, 18, 19, 28, 29, 36, 46, 49, 54-58] present a 2D-to-3D lifting module that estimates 3D poses from monocular 2D joints obtained with off-the-shelf 2D joint detectors. Although their lifting modules show impressive results, those monocular methods cannot be easily applied to our stereo setting. On the other hand, some works utilize transformers in multi-view settings. He et al. [9] and Ma et al. [23] aggregate stereo information on epipolar lines of stereo images, which are difficult to obtain from fisheye images. Recent work [44] regresses multi-person 3D poses from multi-view inputs, powered by projective attention and query adaptation. However, no existing works explored the potential of transformers along with 2D joint heatmaps or explicit scene information in stereo 3D pose estimation. In this paper, we propose a transformer-based framework that accounts for the temporal relation of human motion at a joint level via" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 56, + 692, + 251, + 702 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 692, + 251, + 702 + ], + "spans": [ + { + "bbox": [ + 56, + 692, + 251, + 702 + ], + "type": "text", + "content": "3https://4dqv.mpi-inf.mpg.de/UnrealEgo2/" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 702, + 223, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 702, + 223, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 702, + 223, + 713 + ], + "type": "text", + "content": "4https://unrealego.mpi-inf.mpg.de/" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "text", + "content": "768" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 286, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 286, + 95 + ], + "type": "text", + "content": "intermediate 2D joint heatmap and depth maps even with inaccurate depth values mixed in the framework." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 96, + 286, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 96, + 286, + 312 + ], + "spans": [ + { + "bbox": [ + 46, + 96, + 286, + 312 + ], + "type": "text", + "content": "Datasets for Egocentric 3D Human Pose Estimation. Several works proposed unique setups to create datasets, using a monocular camera [11, 17, 22, 37, 40, 41, 45, 48] and forward-facing cameras [11, 14, 17, 22, 26, 48, 50, 51]. There also exist datasets captured with stereo devices [3, 7, 14, 26, 31, 53]. However, they are small [31] with limited motion types [31, 53], not publicly available [3, 53], or do not provide ground truth 3D poses of device users [7, 14, 26]. Recently, Akada et al. [1] introduced UnrealEgo, a synthetic dataset based on virtual eyeglasses with two fisheye cameras. However, they provide only synthetic images. Meanwhile, more glasses-based stereo datasets that offer a wider variety of motions or real-world footage are required nowadays for an extensive evaluation of existing and upcoming methods. Hence, we introduce two new benchmark datasets that in their characteristics go beyond the existing data: UnrealEgo2 and UnrealEgo-RW. We describe the proposed datasets in the following section." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 323, + 203, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 323, + 203, + 335 + ], + "spans": [ + { + "bbox": [ + 47, + 323, + 203, + 335 + ], + "type": "text", + "content": "3. Mobile Device and Datasets" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 342, + 286, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 286, + 485 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 286, + 485 + ], + "type": "text", + "content": "We present two new datasets for egocentric stereo 3D motion capture: UnrealEgo2 and UnrealEgo-RW; see Fig. 1. Please watch our supplementary video for visualizations. UnrealEgo2 Dataset. To create UnrealEgo2 (an extension of UnrealEgo [1]), we adapt the publicly available setup with a virtual eyeglasses device [1]. This setup comes with two downward-facing fisheye cameras attached " + }, + { + "bbox": [ + 46, + 342, + 286, + 485 + ], + "type": "inline_equation", + "content": "12\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 342, + 286, + 485 + ], + "type": "text", + "content": " apart from each other on the glasses frames. The camera's field of view is " + }, + { + "bbox": [ + 46, + 342, + 286, + 485 + ], + "type": "inline_equation", + "content": "170^{\\circ}" + }, + { + "bbox": [ + 46, + 342, + 286, + 485 + ], + "type": "text", + "content": ". With this device, we capture 17 realistic 3D human models [30] animated by the Mixamo [25] dataset in various 3D environments. We record simple to highly complex motions such as crouching and crawling, for 14 hours." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 486, + 286, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 486, + 286, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 486, + 286, + 581 + ], + "type": "text", + "content": "Overall, UnrealEgo2 offers 15,207 motions and " + }, + { + "bbox": [ + 46, + 486, + 286, + 581 + ], + "type": "inline_equation", + "content": ">1.25\\mathrm{M}" + }, + { + "bbox": [ + 46, + 486, + 286, + 581 + ], + "type": "text", + "content": " stereo views (2.5M images) as well as depth maps with a resolution " + }, + { + "bbox": [ + 46, + 486, + 286, + 581 + ], + "type": "inline_equation", + "content": "1024\\times 1024" + }, + { + "bbox": [ + 46, + 486, + 286, + 581 + ], + "type": "text", + "content": " pixel rendered at 25 frames per second. Each frame is annotated with 32 body and 40 hand joints. Note that UnrealEgo2 is the largest glasses-based dataset and " + }, + { + "bbox": [ + 46, + 486, + 286, + 581 + ], + "type": "inline_equation", + "content": "2.8\\times" + }, + { + "bbox": [ + 46, + 486, + 286, + 581 + ], + "type": "text", + "content": " larger than UnrealEgo. Also, it does not share the same motions with UnrealEgo, providing a larger motion variety for a comprehensive evaluation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "content": "Design of Our Mobile Device. Evaluation with real-world datasets plays a pivotal role in computer vision research. Therefore, we build a new portable device; see Fig. 2. Our device is based on a helmet with two RIBCAGE RX0 II cameras [32] and two FUJINON FE185C057HA-1 fisheye lenses [6]. We placed the cameras " + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "inline_equation", + "content": "12\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "content": " away from each other and " + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "inline_equation", + "content": "2\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "content": " away from user's face. We cropped the margins of the egocentric images to resemble the field of view of " + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "inline_equation", + "content": "170^{\\circ}" + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "content": " of the UnrealEgo and UnrealEgo2 setups. Note that our setup is more compact than EgoCap [31] that placed cameras " + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "inline_equation", + "content": "25\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "content": " away from user's face." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 70, + 466, + 137 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 466, + 137 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 466, + 137 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 466, + 137 + ], + "type": "image", + "image_path": "81ffc82403fe2599dd97479c7fa1a3a4102d1fdd1f5fc02aa55f787c93d1f63f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 323, + 141, + 526, + 152 + ], + "lines": [ + { + "bbox": [ + 323, + 141, + 526, + 152 + ], + "spans": [ + { + "bbox": [ + 323, + 141, + 526, + 152 + ], + "type": "text", + "content": "Figure 2. Our portable setup to acquire UnrealEgo-RW." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 474, + 70, + 546, + 137 + ], + "blocks": [ + { + "bbox": [ + 474, + 70, + 546, + 137 + ], + "lines": [ + { + "bbox": [ + 474, + 70, + 546, + 137 + ], + "spans": [ + { + "bbox": [ + 474, + 70, + 546, + 137 + ], + "type": "image", + "image_path": "7bbcdf0ba48c212d7974b9b7c8cf872e21909a2a3cf5e92c7192753b13909bc9.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 177, + 545, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 177, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 304, + 177, + 545, + 262 + ], + "type": "text", + "content": "UnrealEgo-RW (Real-World) Dataset. With our device, we record various motions of 16 identities in a multi-view motion capture studio (Fig. 1-(d)). We capture simple and challenging activities, e.g., crawling and dancing, for 1.5 hours. This is in strong contrast to the existing real-world stereo dataset [53] (not publicly available) that records only three simple actions, i.e., sitting, standing, and walking." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 262, + 545, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 262, + 545, + 346 + ], + "spans": [ + { + "bbox": [ + 304, + 262, + 545, + 346 + ], + "type": "text", + "content": "In total, we obtained 591 motion segments from 16 identities with various textured clothing. This results in more than " + }, + { + "bbox": [ + 304, + 262, + 545, + 346 + ], + "type": "inline_equation", + "content": "130\\mathrm{k}" + }, + { + "bbox": [ + 304, + 262, + 545, + 346 + ], + "type": "text", + "content": " stereo views (260k images) of a resolution " + }, + { + "bbox": [ + 304, + 262, + 545, + 346 + ], + "type": "inline_equation", + "content": "872\\times 872" + }, + { + "bbox": [ + 304, + 262, + 545, + 346 + ], + "type": "text", + "content": " pixel rendered at 25 frames per second with ground-truth 3D poses of 16 joints. Note that UnrealEgoRW offers " + }, + { + "bbox": [ + 304, + 262, + 545, + 346 + ], + "type": "inline_equation", + "content": "4.3\\times" + }, + { + "bbox": [ + 304, + 262, + 545, + 346 + ], + "type": "text", + "content": " larger data with a wider variety of motions than the publicly available real-world stereo data [31]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 363, + 361, + 375 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 363, + 361, + 375 + ], + "spans": [ + { + "bbox": [ + 306, + 363, + 361, + 375 + ], + "type": "text", + "content": "4. Method" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 385, + 545, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 385, + 545, + 564 + ], + "spans": [ + { + "bbox": [ + 304, + 385, + 545, + 564 + ], + "type": "text", + "content": "We propose a new framework for egocentric stereo 3D human pose estimation as shown in Fig. 3. Our framework first estimates the 2D joint heatmaps from egocentric stereo fisheye videos in our 2D module (Sec. 4.1). The heatmaps and input videos are then processed in our segmentation module to obtain 2D human body masks (Sec. 4.2). Next, we use uniformly sampled windows of input frames and human body masks to reconstruct 3D scenes (Sec. 4.3). Here, we render depth maps and depth region masks from the reconstructed mesh. Finally, our transformer-based 3D module processes the joint heatmaps, depth information, and joint queries to estimate 3D poses (Sec. 4.4). Here, the 3D module leverages depth padding masks based on the availability of the depth maps as well as joint queries enhanced by the stereo video features from the 2D module." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 578, + 419, + 589 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 578, + 419, + 589 + ], + "spans": [ + { + "bbox": [ + 306, + 578, + 419, + 589 + ], + "type": "text", + "content": "4.1.2D Pose Estimation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "text", + "content": "Given egocentric stereo videos with " + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "text", + "content": " frames " + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_{\\mathrm{Left}}^t,\\mathbf{I}_{\\mathrm{Right}}^t\\in" + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{H\\times W\\times 3}|t = 1,2,\\dots,T\\}" + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "text", + "content": " , we use the existing stereo 2D joint heatmap estimator [1] to obtain a sequence of corresponding 2D heatmaps of 15 joints " + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{H}_{\\mathrm{Left}}^t,\\mathbf{H}_{\\mathrm{Right}}^t\\in" + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{\\frac{H}{4}\\times \\frac{W}{4}\\times 15}\\}" + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "text", + "content": " , including the neck, upper arms, lower arms, hands, thighs, calves, feet, and balls of the feet. We also extract intermediate feature maps " + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{F}_{\\mathrm{Left}}^t,\\mathbf{F}_{\\mathrm{Right}}^t\\in" + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{\\frac{H}{32}\\times \\frac{W}{32}\\times C}\\}" + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "inline_equation", + "content": "C = 512" + }, + { + "bbox": [ + 304, + 597, + 545, + 712 + ], + "type": "text", + "content": " , which are used later in the 3D module." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "text", + "content": "769" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 73, + 542, + 318 + ], + "blocks": [ + { + "bbox": [ + 53, + 73, + 542, + 318 + ], + "lines": [ + { + "bbox": [ + 53, + 73, + 542, + 318 + ], + "spans": [ + { + "bbox": [ + 53, + 73, + 542, + 318 + ], + "type": "image", + "image_path": "d876a4516d86f54cf6a9f65342065f9bd9e19401042946519eae08d09e623b82.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "lines": [ + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "spans": [ + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "text", + "content": "Figure 3. Overview of our framework. Our method takes egocentric stereo videos " + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_{\\mathrm{Left}}^t,\\mathbf{I}_{\\mathrm{Right}}^t\\}" + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "text", + "content": " as inputs. We first apply the 2D module to obtain 2D joint heatmaps " + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{H}_{\\mathrm{Left}}^t,\\mathbf{H}_{\\mathrm{Right}}^t\\}" + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "text", + "content": " and video features " + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{F}_{\\mathrm{Left}}^t,\\mathbf{F}_{\\mathrm{Right}}^t\\}" + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "text", + "content": " (Sec. 4.1). The heatmaps are used with input videos to create human body masks " + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{M}_{\\mathrm{Left}}^t,\\mathbf{M}_{\\mathrm{Right}}^t\\}" + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "text", + "content": " (Sec. 4.2). Next, we use uniformly sampled windows of input frames and human body masks to reconstruct a 3D scene mesh (Sec. 4.3). From the mesh, we generate depth maps " + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{D}_{\\mathrm{Left}}^t,\\mathbf{D}_{\\mathrm{Right}}^t\\}" + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "text", + "content": " and depth region masks " + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{R}_{\\mathrm{Left}}^t,\\mathbf{R}_{\\mathrm{Right}}^t\\}" + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "text", + "content": ". Note that this diagram shows an example case of missing depth values for the second input frame. Lastly, the depth data, 2D joint heatmaps, video features, joint queries " + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "inline_equation", + "content": "q^{t}" + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "text", + "content": " and the padding masks " + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "inline_equation", + "content": "V_{\\mathrm{Depth}}^{t}" + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "text", + "content": " are processed in the 3D module to estimate 3D poses " + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^t" + }, + { + "bbox": [ + 46, + 321, + 545, + 389 + ], + "type": "text", + "content": " (Sec. 4.4)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 407, + 198, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 407, + 198, + 420 + ], + "spans": [ + { + "bbox": [ + 47, + 407, + 198, + 420 + ], + "type": "text", + "content": "4.2. Human Body Segmentation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 426, + 287, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 287, + 538 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 287, + 538 + ], + "type": "text", + "content": "To reconstruct 3D scenes from egocentric videos, it is necessary to identify the pixels corresponding to the background environment. Therefore, we integrate an existing segmentation method, i.e., ViT-H SAM model [16], as our segmentation network " + }, + { + "bbox": [ + 46, + 426, + 287, + 538 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{SAM}}" + }, + { + "bbox": [ + 46, + 426, + 287, + 538 + ], + "type": "text", + "content": ". In this module, we firstly obtain 2D joint locations from the 2D joint heatmap " + }, + { + "bbox": [ + 46, + 426, + 287, + 538 + ], + "type": "inline_equation", + "content": "\\{\\widehat{\\mathbf{H}}_{\\mathrm{Left}}^t,\\widehat{\\mathbf{H}}_{\\mathrm{Right}}^t\\}" + }, + { + "bbox": [ + 46, + 426, + 287, + 538 + ], + "type": "text", + "content": ". Then, we use the input video frames " + }, + { + "bbox": [ + 46, + 426, + 287, + 538 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_{\\mathrm{Left}}^t,\\mathbf{I}_{\\mathrm{Right}}^t\\}" + }, + { + "bbox": [ + 46, + 426, + 287, + 538 + ], + "type": "text", + "content": " and its corresponding 2D joints to extract a human body mask " + }, + { + "bbox": [ + 46, + 426, + 287, + 538 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{M}_{\\mathrm{Left}}^t,\\mathbf{M}_{\\mathrm{Right}}^t\\in \\mathbb{R}^{H\\times W\\times 1}\\}" + }, + { + "bbox": [ + 46, + 426, + 287, + 538 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 109, + 546, + 287, + 561 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 546, + 287, + 561 + ], + "spans": [ + { + "bbox": [ + 109, + 546, + 287, + 561 + ], + "type": "interline_equation", + "content": "\\mathbf {M} _ {\\text {L e f t}} ^ {t} = \\mathcal {F} _ {\\mathrm {S A M}} \\left(\\mathbf {I} _ {\\text {L e f t}} ^ {t}, \\widehat {\\mathbf {H}} _ {\\text {L e f t}} ^ {t}\\right). \\tag {1}", + "image_path": "e7ccf78e0fffa971ed1e9d877ed564a862c34fcbabe05f8eda19fc7f48c4de08.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 567, + 287, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 567, + 287, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 567, + 287, + 616 + ], + "type": "text", + "content": "The same process can be applied to obtain " + }, + { + "bbox": [ + 46, + 567, + 287, + 616 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_{\\mathrm{Right}}^t" + }, + { + "bbox": [ + 46, + 567, + 287, + 616 + ], + "type": "text", + "content": ". Note that we use the SAM model without re-training on ground-truth human body masks. Instead, we guide the predictions of SAM using joint positions extracted from the 2D heatmaps." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 622, + 186, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 622, + 186, + 634 + ], + "spans": [ + { + "bbox": [ + 47, + 622, + 186, + 634 + ], + "type": "text", + "content": "4.3. 3D Scene Reconstruction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 640, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 640, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 640, + 287, + 713 + ], + "type": "text", + "content": "We aim to reconstruct 3D environments from uniformly sampled windows of input frames " + }, + { + "bbox": [ + 46, + 640, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_{\\mathrm{Left}}^t,\\mathbf{I}_{\\mathrm{Right}}^t\\}" + }, + { + "bbox": [ + 46, + 640, + 287, + 713 + ], + "type": "text", + "content": " and human body masks " + }, + { + "bbox": [ + 46, + 640, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{M}_{\\mathrm{Left}}^t,\\mathbf{M}_{\\mathrm{Right}}^t\\}" + }, + { + "bbox": [ + 46, + 640, + 287, + 713 + ], + "type": "text", + "content": " with a fixed length. The length is set to 4 seconds (some motion data contains shorter sequences). Given these data, we use Metashape [24] to perform SfM to obtain camera poses and a 3D scene" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 408, + 545, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 408, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 304, + 408, + 545, + 590 + ], + "type": "text", + "content": "mesh. Here, as the baseline length between stereo cameras is known, i.e., " + }, + { + "bbox": [ + 304, + 408, + 545, + 590 + ], + "type": "inline_equation", + "content": "12\\mathrm{cm}" + }, + { + "bbox": [ + 304, + 408, + 545, + 590 + ], + "type": "text", + "content": ", we can obtain the mesh in the real-world scale. Next, we render down-sampled depth maps " + }, + { + "bbox": [ + 304, + 408, + 545, + 590 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{D}_{\\mathrm{Left}}^t,\\mathbf{D}_{\\mathrm{Right}}^t\\in \\mathbb{R}^{\\frac{H}{4}\\times \\frac{W}{4}\\times 1}\\}" + }, + { + "bbox": [ + 304, + 408, + 545, + 590 + ], + "type": "text", + "content": " and depth region masks " + }, + { + "bbox": [ + 304, + 408, + 545, + 590 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{R}_{\\mathrm{Left}}^t,\\mathbf{R}_{\\mathrm{Right}}^t\\in \\mathbb{R}^{\\frac{H}{4}\\times \\frac{W}{4}\\times 1}\\}" + }, + { + "bbox": [ + 304, + 408, + 545, + 590 + ], + "type": "text", + "content": " from the reconstructed 3D scene mesh. The depth region masks show the regions where the depth values are obtained from the 3D scene. This depth information will be used later in the 3D module as additional cues for pose estimation. However, there are some cases where the egocentric RGB videos are largely occupied by a human body. In such scenarios, the 3D scene can not be reconstructed or camera poses can not be estimated. This results in missing (invalid) depth values and undesired computation of network gradients during training. Therefore, we tackle this issue in our 3D module." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 599, + 419, + 610 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 599, + 419, + 610 + ], + "spans": [ + { + "bbox": [ + 305, + 599, + 419, + 610 + ], + "type": "text", + "content": "4.4.3D Pose Estimation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 617, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 715 + ], + "type": "text", + "content": "In the 3D module, we aim to estimate a sequence of 3D poses by considering scene information and the temporal context of the egocentric stereo videos. Specifically, given the 2D joint heatmaps, depth maps, depth region masks, and " + }, + { + "bbox": [ + 304, + 617, + 545, + 715 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 617, + 545, + 715 + ], + "type": "text", + "content": " sets of joint queries " + }, + { + "bbox": [ + 304, + 617, + 545, + 715 + ], + "type": "inline_equation", + "content": "q^{t} \\in \\mathbb{R}^{16 \\times \\frac{C}{2}}" + }, + { + "bbox": [ + 304, + 617, + 545, + 715 + ], + "type": "text", + "content": ", we use a transformer decoder to estimate a sequence of 3D poses " + }, + { + "bbox": [ + 304, + 617, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{P}^t \\in \\mathbb{R}^{16 \\times 3} | t = 1, 2, \\dots, T\\}" + }, + { + "bbox": [ + 304, + 617, + 545, + 715 + ], + "type": "text", + "content": ". Our pose output is the 3D pose at the last time step " + }, + { + "bbox": [ + 304, + 617, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^T" + }, + { + "bbox": [ + 304, + 617, + 545, + 715 + ], + "type": "text", + "content": ". We follow the existing" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "770" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 72, + 286, + 168 + ], + "blocks": [ + { + "bbox": [ + 50, + 72, + 286, + 168 + ], + "lines": [ + { + "bbox": [ + 50, + 72, + 286, + 168 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 286, + 168 + ], + "type": "table", + "html": "
MethodTaskMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]86.4563.7185.9750.50
Akada et al. [1]78.9859.3088.8154.31
Kang et al. [12]Pelvis relative60.8248.47--
Baseline59.8549.1492.0763.88
Ours50.5540.5093.8370.61
Zhao et al. [53]88.1265.3685.1050.37
Akada et al. [1]Device relative84.5363.9287.0552.76
Baseline63.4450.9792.3064.54
Ours46.2040.1994.0273.53
", + "image_path": "32454c867814dadd9ab5c39a6939ca15cedac044357fc3a2176892b65ee76f2d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 194, + 286, + 253 + ], + "blocks": [ + { + "bbox": [ + 53, + 171, + 280, + 183 + ], + "lines": [ + { + "bbox": [ + 53, + 171, + 280, + 183 + ], + "spans": [ + { + "bbox": [ + 53, + 171, + 280, + 183 + ], + "type": "text", + "content": "Table 1. Quantitative results on UnrealEgo [1] with mm-scale." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 194, + 286, + 253 + ], + "lines": [ + { + "bbox": [ + 50, + 194, + 286, + 253 + ], + "spans": [ + { + "bbox": [ + 50, + 194, + 286, + 253 + ], + "type": "table", + "html": "
MethodMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]79.6458.2288.5053.82
Akada et al. [1]72.8052.8891.3255.81
Baseline52.2339.7895.7268.13
Ours30.5326.7297.2280.75
", + "image_path": "9a30d66fc619d9c378bdf11517cbfbd4433d6f280fd1fda2e16c5c3809ec4358.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 297, + 280, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 280, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 280, + 308 + ], + "type": "text", + "content": "works [1, 37, 38] to estimate 16 joints including the head." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 308, + 286, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 286, + 370 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 286, + 370 + ], + "type": "text", + "content": "Depth and Heatmap Features. We use the sequence of the depth maps, depth region masks, and the 2D joint heatmaps as the memory of a cross-attention operation in the transformer decoder. For this purpose, we extract depth features " + }, + { + "bbox": [ + 47, + 308, + 286, + 370 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{U}_{\\mathrm{Left}}^t,\\mathbf{U}_{\\mathrm{Right}}^t\\in \\mathbb{R}^{\\frac{H}{32}\\times \\frac{W}{32}\\times \\frac{C}{2}}\\}" + }, + { + "bbox": [ + 47, + 308, + 286, + 370 + ], + "type": "text", + "content": " from the depth data:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 376, + 287, + 392 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 287, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 287, + 392 + ], + "type": "interline_equation", + "content": "\\mathbf {U} _ {\\text {L e f t}} ^ {t} = \\mathcal {F} _ {\\text {D e p t h}} \\left(\\mathbf {D} _ {\\text {L e f t}} ^ {t} \\oplus \\widehat {\\mathbf {R}} _ {\\text {L e f t}} ^ {t}\\right), \\tag {2}", + "image_path": "f1b3b979edd93606d97423c80a9129bf3ce0dfa9ae03cbe048cee73149d21eb2.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 396, + 287, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 396, + 287, + 434 + ], + "spans": [ + { + "bbox": [ + 47, + 396, + 287, + 434 + ], + "type": "text", + "content": "where “" + }, + { + "bbox": [ + 47, + 396, + 287, + 434 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 47, + 396, + 287, + 434 + ], + "type": "text", + "content": "” is a concatenation operation along the channel axis and " + }, + { + "bbox": [ + 47, + 396, + 287, + 434 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{Depth}}" + }, + { + "bbox": [ + 47, + 396, + 287, + 434 + ], + "type": "text", + "content": " represents a feature extractor. The same process can be applied to obtain " + }, + { + "bbox": [ + 47, + 396, + 287, + 434 + ], + "type": "inline_equation", + "content": "\\mathbf{U}_{\\mathrm{Right}}^t" + }, + { + "bbox": [ + 47, + 396, + 287, + 434 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 434, + 287, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 434, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 47, + 434, + 287, + 460 + ], + "type": "text", + "content": "Similarly, we extract heatmap features " + }, + { + "bbox": [ + 47, + 434, + 287, + 460 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{G}_{\\mathrm{Left}}^t,\\mathbf{G}_{\\mathrm{Right}}^t\\in" + }, + { + "bbox": [ + 47, + 434, + 287, + 460 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{\\frac{H}{16}\\times \\frac{W}{16}\\times C}\\}" + }, + { + "bbox": [ + 47, + 434, + 287, + 460 + ], + "type": "text", + "content": " from the 2D heatmaps:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 123, + 467, + 287, + 482 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 467, + 287, + 482 + ], + "spans": [ + { + "bbox": [ + 123, + 467, + 287, + 482 + ], + "type": "interline_equation", + "content": "\\mathbf {G} _ {\\text {L e f t}} ^ {t} = \\mathcal {F} _ {\\mathrm {H M}} \\left(\\widehat {\\mathbf {H}} _ {\\text {L e f t}} ^ {t}\\right), \\tag {3}", + "image_path": "72ce8d2e9eaf77d130bfe90fabdfc8cd39a81f0d644f8bc38dedcf43e5b50bcf.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 487, + 287, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 487, + 287, + 512 + ], + "spans": [ + { + "bbox": [ + 47, + 487, + 287, + 512 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 487, + 287, + 512 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{HM}}" + }, + { + "bbox": [ + 47, + 487, + 287, + 512 + ], + "type": "text", + "content": " represents another feature extractor. The same process can be applied to obtain " + }, + { + "bbox": [ + 47, + 487, + 287, + 512 + ], + "type": "inline_equation", + "content": "\\mathbf{G}_{\\mathrm{Right}}^t" + }, + { + "bbox": [ + 47, + 487, + 287, + 512 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 512, + 287, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 512, + 287, + 597 + ], + "spans": [ + { + "bbox": [ + 47, + 512, + 287, + 597 + ], + "type": "text", + "content": "These features are forwarded with positional embeddings into the transformer. However, as mentioned in Sec. 4.3, depth values can be missing in some frames. To prevent processing features of such depth data and let the network focus only on valid frames, we propose to add padding masks " + }, + { + "bbox": [ + 47, + 512, + 287, + 597 + ], + "type": "inline_equation", + "content": "V_{\\mathrm{Depth}}^{t} \\in \\mathcal{R}" + }, + { + "bbox": [ + 47, + 512, + 287, + 597 + ], + "type": "text", + "content": " to all the elements of " + }, + { + "bbox": [ + 47, + 512, + 287, + 597 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{U}_{\\mathrm{Left}}^{t}, \\mathbf{U}_{\\mathrm{Right}}^{t}\\}" + }, + { + "bbox": [ + 47, + 512, + 287, + 597 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 73, + 604, + 287, + 636 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 604, + 287, + 636 + ], + "spans": [ + { + "bbox": [ + 73, + 604, + 287, + 636 + ], + "type": "interline_equation", + "content": "V _ {\\text {D e p t h}} ^ {t} = \\left\\{ \\begin{array}{l l} - \\inf , & \\text {i f d e p t h v a l u e s a r e m i s s i n g} \\\\ 0, & \\text {o t h e r w i s e} \\end{array} . \\right. \\tag {4}", + "image_path": "20a9db448b842f67b8a63c742465e89fe2f4d3486ed36242aae27421e7b83c17.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 641, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 641, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 641, + 287, + 677 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 47, + 641, + 287, + 677 + ], + "type": "inline_equation", + "content": "V_{\\mathrm{Depth}}^{t} = -\\inf" + }, + { + "bbox": [ + 47, + 641, + 287, + 677 + ], + "type": "text", + "content": ", the depth features " + }, + { + "bbox": [ + 47, + 641, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{U}_{\\mathrm{Left}}^{t}, \\mathbf{U}_{\\mathrm{Right}}^{t}\\}" + }, + { + "bbox": [ + 47, + 641, + 287, + 677 + ], + "type": "text", + "content": " after the softmax function in self-attention layers of the transformer will have zero effect on the network training." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": "Stereo-Video-Dependent Joint Query Adaptation. The existing work [44] represents human joints as learnable positional embeddings called joint queries that encode prior" + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 309, + 72, + 543, + 129 + ], + "blocks": [ + { + "bbox": [ + 47, + 255, + 286, + 278 + ], + "lines": [ + { + "bbox": [ + 47, + 255, + 286, + 278 + ], + "spans": [ + { + "bbox": [ + 47, + 255, + 286, + 278 + ], + "type": "text", + "content": "Table 2. Quantitative results of device-relative pose estimation on UnrealEgo2 with mm-scale." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 72, + 543, + 129 + ], + "lines": [ + { + "bbox": [ + 309, + 72, + 543, + 129 + ], + "spans": [ + { + "bbox": [ + 309, + 72, + 543, + 129 + ], + "type": "table", + "html": "
MethodMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]117.5788.0173.1238.94
Akada et al. [1]122.6486.5572.5138.67
Baseline115.9585.0074.1340.11
Ours104.1482.1880.2046.22
", + "image_path": "116327d221ce28b77937ae22d78df26d7baff746a5162813e295dabbc9c3b096.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 134, + 545, + 156 + ], + "lines": [ + { + "bbox": [ + 306, + 134, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 306, + 134, + 545, + 156 + ], + "type": "text", + "content": "Table 3. Quantitative results of device-relative pose estimation on UnrealEgo-RW with mm-scale." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 171, + 545, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 171, + 545, + 302 + ], + "spans": [ + { + "bbox": [ + 305, + 171, + 545, + 302 + ], + "type": "text", + "content": "knowledge about the skeleton joints. In our problem setting, the simplest way to design such joint queries is to set queries for each pose in a motion sequence. However, this can not capture the temporal context in video inputs, e.g., human motions and background changes. Therefore, we extend the multi-view joint query augmentation technique [44] for our stereo video setting to account for sequential information. Specifically, we enhance the joint queries with the temporal intermediate features of stereo RGB frames " + }, + { + "bbox": [ + 305, + 171, + 545, + 302 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{F}_{\\mathrm{Left}}^t,\\mathbf{F}_{\\mathrm{Right}}^t\\}" + }, + { + "bbox": [ + 305, + 171, + 545, + 302 + ], + "type": "text", + "content": ". Firstly, from the sequence of the intermediate features, we create a sequence of combined features " + }, + { + "bbox": [ + 305, + 171, + 545, + 302 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^{t}\\in \\mathbb{R}^{\\frac{H}{32}\\times \\frac{W}{32}\\times \\frac{C}{2}}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 370, + 310, + 545, + 324 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 310, + 545, + 324 + ], + "spans": [ + { + "bbox": [ + 370, + 310, + 545, + 324 + ], + "type": "interline_equation", + "content": "\\mathbf {F} ^ {t} = \\operatorname {c o n v} \\left(\\mathbf {F} _ {\\text {L e f t}} ^ {t} \\oplus \\mathbf {F} _ {\\text {r i g h t}}\\right), \\tag {5}", + "image_path": "03291d34001ea5ea3989e1ab709ee26c2557e8a74fd732a95826a0627aae41b2.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 331, + 545, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 331, + 545, + 355 + ], + "spans": [ + { + "bbox": [ + 305, + 331, + 545, + 355 + ], + "type": "text", + "content": "where \"conv(\\cdot)\" is a convolution operation with a kernel size of " + }, + { + "bbox": [ + 305, + 331, + 545, + 355 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 305, + 331, + 545, + 355 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 356, + 545, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 356, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 305, + 356, + 545, + 380 + ], + "type": "text", + "content": "Next, we fuse the sequence of the combined features " + }, + { + "bbox": [ + 305, + 356, + 545, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^t" + }, + { + "bbox": [ + 305, + 356, + 545, + 380 + ], + "type": "text", + "content": " to obtain a fused stereo features " + }, + { + "bbox": [ + 305, + 356, + 545, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{Stereo}} \\in \\mathbb{R}^{\\frac{C}{2}}" + }, + { + "bbox": [ + 305, + 356, + 545, + 380 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 331, + 387, + 545, + 402 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 387, + 545, + 402 + ], + "spans": [ + { + "bbox": [ + 331, + 387, + 545, + 402 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {\\text {S t e r e o}} = \\mathbf {F} _ {\\mathrm {P}} ^ {1} \\oplus \\dots \\oplus \\mathbf {F} _ {\\mathrm {P}} ^ {T}, \\text {w h e r e} \\mathbf {F} _ {\\mathrm {P}} ^ {i} = p (\\mathbf {F} ^ {i}), \\tag {6}", + "image_path": "61f91267e58e4e5c5d76114f01b4be4afa761fe9c4082b0699115d27651f79fa.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 409, + 544, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 409, + 544, + 433 + ], + "spans": [ + { + "bbox": [ + 305, + 409, + 544, + 433 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 409, + 544, + 433 + ], + "type": "inline_equation", + "content": "p(\\cdot)" + }, + { + "bbox": [ + 305, + 409, + 544, + 433 + ], + "type": "text", + "content": " is an operation of adaptive average pooling. Now, the feature " + }, + { + "bbox": [ + 305, + 409, + 544, + 433 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{Stereo}}" + }, + { + "bbox": [ + 305, + 409, + 544, + 433 + ], + "type": "text", + "content": " contains stereo video information." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 434, + 544, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 434, + 544, + 460 + ], + "spans": [ + { + "bbox": [ + 305, + 434, + 544, + 460 + ], + "type": "text", + "content": "Lastly, with " + }, + { + "bbox": [ + 305, + 434, + 544, + 460 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{Stereo}}" + }, + { + "bbox": [ + 305, + 434, + 544, + 460 + ], + "type": "text", + "content": " and a fully connected layer \" " + }, + { + "bbox": [ + 305, + 434, + 544, + 460 + ], + "type": "inline_equation", + "content": "\\operatorname {fc}(\\cdot)" + }, + { + "bbox": [ + 305, + 434, + 544, + 460 + ], + "type": "text", + "content": " \", we augment each query " + }, + { + "bbox": [ + 305, + 434, + 544, + 460 + ], + "type": "inline_equation", + "content": "q^{t}" + }, + { + "bbox": [ + 305, + 434, + 544, + 460 + ], + "type": "text", + "content": " to obtain " + }, + { + "bbox": [ + 305, + 434, + 544, + 460 + ], + "type": "inline_equation", + "content": "q_{\\mathrm{Aug}}^t\\in \\mathbb{R}^{16\\times \\frac{C}{2}}" + }, + { + "bbox": [ + 305, + 434, + 544, + 460 + ], + "type": "text", + "content": " .." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 375, + 468, + 545, + 483 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 468, + 545, + 483 + ], + "spans": [ + { + "bbox": [ + 375, + 468, + 545, + 483 + ], + "type": "interline_equation", + "content": "\\mathbf {q} _ {\\text {A u g}} ^ {t} = \\operatorname {f c} \\left(\\mathbf {F} _ {\\text {S t e r e o}}\\right) + q ^ {t}. \\tag {7}", + "image_path": "01ec8a4a9368008708257153617051166f5308d7bf20b829f5c0924726af76c3.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "type": "text", + "content": "Transformer Decoder. We adopt a DETR [2]-based transformer decoder and a pose regression head. In decoder layers, all of the augmented joint queries " + }, + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "type": "inline_equation", + "content": "q_{\\mathrm{Aug}}^{t}" + }, + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "type": "text", + "content": " first interact with each other on a self-attention layer. Then, the queries extract all of the temporal stereo features from the memory " + }, + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{U}_{\\mathrm{Left}}^{t}, \\mathbf{U}_{\\mathrm{Right}}^{t}, \\mathbf{G}_{\\mathrm{Left}}^{t}, \\mathbf{G}_{\\mathrm{Right}}^{t}\\}" + }, + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "type": "text", + "content": " with the padding masks " + }, + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "type": "inline_equation", + "content": "V_{\\mathrm{Depth}}^{t}" + }, + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "type": "text", + "content": " on a cross-attention layer. Lastly, the pose regression head estimates a sequence of 3D poses " + }, + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "type": "inline_equation", + "content": "\\{\\hat{\\mathbf{P}}^t \\in \\mathbb{R}^{16 \\times 3} | t = 1, 2, \\dots, T\\}" + }, + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "type": "text", + "content": ", yielding the final pose output " + }, + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^T" + }, + { + "bbox": [ + 305, + 489, + 545, + 601 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 305, + 601, + 545, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 601, + 545, + 636 + ], + "spans": [ + { + "bbox": [ + 305, + 601, + 545, + 636 + ], + "type": "text", + "content": "Similar to the previous works [5, 49], we train the 3D module with the pose supervision of the current and past frames:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 312, + 643, + 545, + 675 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 643, + 545, + 675 + ], + "spans": [ + { + "bbox": [ + 312, + 643, + 545, + 675 + ], + "type": "interline_equation", + "content": "L _ {3 \\mathrm {D}} = L _ {\\text {p o s e}} \\left(\\mathbf {P} ^ {T}, \\hat {\\mathbf {P}} ^ {T}\\right) + \\frac {\\lambda_ {\\text {p a s t}}}{(T - 1)} \\sum_ {t = 1} ^ {T - 1} L _ {\\text {p o s e}} \\left(\\mathbf {P} ^ {t}, \\hat {\\mathbf {P}} ^ {t}\\right), \\tag {8}", + "image_path": "fdeadf29189908e2edc42c6e3a140bf5ac077d55b9a3205006adf40129642f29.jpg" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 333, + 685, + 545, + 704 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 685, + 545, + 704 + ], + "spans": [ + { + "bbox": [ + 333, + 685, + 545, + 704 + ], + "type": "interline_equation", + "content": "L _ {\\text {p o s e}} (\\mathbf {P}, \\hat {\\mathbf {P}}) = \\lambda_ {\\text {p o s e}} (\\operatorname {m p j p e} (\\mathbf {P}, \\hat {\\mathbf {P}}) + \\quad \\tag {9}", + "image_path": "317a30ab100e3d6bc06c36a8818638c83145be48fd728fbf4eced55e602a6c74.jpg" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 392, + 702, + 517, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 702, + 517, + 715 + ], + "spans": [ + { + "bbox": [ + 392, + 702, + 517, + 715 + ], + "type": "interline_equation", + "content": "\\lambda_ {\\cos} \\cos (\\operatorname {b o n e} (\\mathbf {P}), \\operatorname {b o n e} (\\hat {\\mathbf {P}}))),", + "image_path": "e8a1a2e84d84712530b2ba3bb0cc5cca253b915dfd56792ed42bed1abd837d42.jpg" + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "771" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 97, + 168 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 97, + 168 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 97, + 168 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 97, + 168 + ], + "type": "image", + "image_path": "5fdfd2791925374276a711ccaf7b6c0a60c88242a9cfdb84e484781ea919d57d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 98, + 71, + 146, + 168 + ], + "blocks": [ + { + "bbox": [ + 98, + 71, + 146, + 168 + ], + "lines": [ + { + "bbox": [ + 98, + 71, + 146, + 168 + ], + "spans": [ + { + "bbox": [ + 98, + 71, + 146, + 168 + ], + "type": "image", + "image_path": "bb85da3c970f066a55c60be4a27389729801720ae1cd0cc79419dcb3a945ae53.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 150, + 71, + 194, + 167 + ], + "blocks": [ + { + "bbox": [ + 150, + 71, + 194, + 167 + ], + "lines": [ + { + "bbox": [ + 150, + 71, + 194, + 167 + ], + "spans": [ + { + "bbox": [ + 150, + 71, + 194, + 167 + ], + "type": "image", + "image_path": "2c9a7a5f483c191cdf1a4d51e72928dea5ee499a6e0e87bb1b66dd9f8992bb1a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 197, + 71, + 244, + 167 + ], + "blocks": [ + { + "bbox": [ + 197, + 71, + 244, + 167 + ], + "lines": [ + { + "bbox": [ + 197, + 71, + 244, + 167 + ], + "spans": [ + { + "bbox": [ + 197, + 71, + 244, + 167 + ], + "type": "image", + "image_path": "e2489535ff6e9edeab3aa400f279bf3c2aff2a8f17f045bbaa2041d5db319651.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 247, + 71, + 294, + 167 + ], + "blocks": [ + { + "bbox": [ + 247, + 71, + 294, + 167 + ], + "lines": [ + { + "bbox": [ + 247, + 71, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 247, + 71, + 294, + 167 + ], + "type": "image", + "image_path": "95f66cc1d2818b11a995170078690d4fc1aca3869ce3c9a4edcecb5f75dfe5da.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 49, + 169, + 97, + 218 + ], + "blocks": [ + { + "bbox": [ + 49, + 169, + 97, + 218 + ], + "lines": [ + { + "bbox": [ + 49, + 169, + 97, + 218 + ], + "spans": [ + { + "bbox": [ + 49, + 169, + 97, + 218 + ], + "type": "image", + "image_path": "2030a68b736941d5ef4c06b68e8f8aeae02c92b6578628b4a6034c45eb16765e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 220, + 119, + 230 + ], + "lines": [ + { + "bbox": [ + 77, + 220, + 119, + 230 + ], + "spans": [ + { + "bbox": [ + 77, + 220, + 119, + 230 + ], + "type": "text", + "content": "Stereo inputs" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 98, + 169, + 146, + 218 + ], + "blocks": [ + { + "bbox": [ + 98, + 169, + 146, + 218 + ], + "lines": [ + { + "bbox": [ + 98, + 169, + 146, + 218 + ], + "spans": [ + { + "bbox": [ + 98, + 169, + 146, + 218 + ], + "type": "image", + "image_path": "d5aba829f6cda66deb697e588045bdbd9281bb2a601fbba876fd86e8ab93defb.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 220, + 197, + 230 + ], + "lines": [ + { + "bbox": [ + 146, + 220, + 197, + 230 + ], + "spans": [ + { + "bbox": [ + 146, + 220, + 197, + 230 + ], + "type": "text", + "content": "Akada et al. [1]" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 150, + 169, + 194, + 218 + ], + "blocks": [ + { + "bbox": [ + 150, + 169, + 194, + 218 + ], + "lines": [ + { + "bbox": [ + 150, + 169, + 194, + 218 + ], + "spans": [ + { + "bbox": [ + 150, + 169, + 194, + 218 + ], + "type": "image", + "image_path": "ba584dcfcf272c76440124a9fec23001a2f5357c1bdc5fc5e4eadd8e27280260.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 197, + 169, + 244, + 218 + ], + "blocks": [ + { + "bbox": [ + 197, + 169, + 244, + 218 + ], + "lines": [ + { + "bbox": [ + 197, + 169, + 244, + 218 + ], + "spans": [ + { + "bbox": [ + 197, + 169, + 244, + 218 + ], + "type": "image", + "image_path": "462476cb03179b4c0534fa71939fbcd889bd00f6d732d0ab86b7852589f72e9e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 208, + 220, + 237, + 229 + ], + "lines": [ + { + "bbox": [ + 208, + 220, + 237, + 229 + ], + "spans": [ + { + "bbox": [ + 208, + 220, + 237, + 229 + ], + "type": "text", + "content": "Baseline" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 234, + 545, + 256 + ], + "lines": [ + { + "bbox": [ + 46, + 234, + 545, + 256 + ], + "spans": [ + { + "bbox": [ + 46, + 234, + 545, + 256 + ], + "type": "text", + "content": "Figure 4. Qualitative results of device-relative pose estimation. Left: UnrealEgo2. Right: UnrealEgo-RW. 3D pose prediction and ground truth are displayed in red and green, respectively. For UnrealEgo-RW, we show ground-truth scene meshes for visualization." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 247, + 169, + 294, + 218 + ], + "blocks": [ + { + "bbox": [ + 247, + 169, + 294, + 218 + ], + "lines": [ + { + "bbox": [ + 247, + 169, + 294, + 218 + ], + "spans": [ + { + "bbox": [ + 247, + 169, + 294, + 218 + ], + "type": "image", + "image_path": "1a96b72d97ae0aac10c5db8320c95d44577d7030eb4f232c6e66eacddd86ec88.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 263, + 220, + 280, + 229 + ], + "lines": [ + { + "bbox": [ + 263, + 220, + 280, + 229 + ], + "spans": [ + { + "bbox": [ + 263, + 220, + 280, + 229 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 298, + 70, + 347, + 168 + ], + "blocks": [ + { + "bbox": [ + 298, + 70, + 347, + 168 + ], + "lines": [ + { + "bbox": [ + 298, + 70, + 347, + 168 + ], + "spans": [ + { + "bbox": [ + 298, + 70, + 347, + 168 + ], + "type": "image", + "image_path": "7fd62126b1f5a6a63c2bdfc237130d3f0dbd477ec9c0eb1d679c1a7494f2f172.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 348, + 71, + 396, + 168 + ], + "blocks": [ + { + "bbox": [ + 348, + 71, + 396, + 168 + ], + "lines": [ + { + "bbox": [ + 348, + 71, + 396, + 168 + ], + "spans": [ + { + "bbox": [ + 348, + 71, + 396, + 168 + ], + "type": "image", + "image_path": "c81008e9a0e6bef2a258b064fb1f380d26c86e7023180f0363376d5a65952eea.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 398, + 71, + 446, + 168 + ], + "blocks": [ + { + "bbox": [ + 398, + 71, + 446, + 168 + ], + "lines": [ + { + "bbox": [ + 398, + 71, + 446, + 168 + ], + "spans": [ + { + "bbox": [ + 398, + 71, + 446, + 168 + ], + "type": "image", + "image_path": "66192191b5b69d3a75a122ddc53af3bed3be8d1d61cc260b901becee9b28ed40.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 447, + 71, + 495, + 168 + ], + "blocks": [ + { + "bbox": [ + 447, + 71, + 495, + 168 + ], + "lines": [ + { + "bbox": [ + 447, + 71, + 495, + 168 + ], + "spans": [ + { + "bbox": [ + 447, + 71, + 495, + 168 + ], + "type": "image", + "image_path": "df562450fb7278eedaa5c8daab4b6db37d1090af2728dbd94eb6efe54d52dbce.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 497, + 71, + 545, + 168 + ], + "blocks": [ + { + "bbox": [ + 497, + 71, + 545, + 168 + ], + "lines": [ + { + "bbox": [ + 497, + 71, + 545, + 168 + ], + "spans": [ + { + "bbox": [ + 497, + 71, + 545, + 168 + ], + "type": "image", + "image_path": "4b98a5b5ceeed27899cd52fbcbea7b2bca9e9917c9a34db177e878334bdd06fc.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 298, + 169, + 347, + 218 + ], + "blocks": [ + { + "bbox": [ + 298, + 169, + 347, + 218 + ], + "lines": [ + { + "bbox": [ + 298, + 169, + 347, + 218 + ], + "spans": [ + { + "bbox": [ + 298, + 169, + 347, + 218 + ], + "type": "image", + "image_path": "40abad9cc79a8e0749af20d18c0eaa7a990bea2de937d5524793ec364d754f13.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 328, + 220, + 371, + 230 + ], + "lines": [ + { + "bbox": [ + 328, + 220, + 371, + 230 + ], + "spans": [ + { + "bbox": [ + 328, + 220, + 371, + 230 + ], + "type": "text", + "content": "Stereo inputs" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 349, + 169, + 396, + 218 + ], + "blocks": [ + { + "bbox": [ + 349, + 169, + 396, + 218 + ], + "lines": [ + { + "bbox": [ + 349, + 169, + 396, + 218 + ], + "spans": [ + { + "bbox": [ + 349, + 169, + 396, + 218 + ], + "type": "image", + "image_path": "2c9a47229bbe152715ceaabb68787da855f418043c6e687acc65ff28ef0987d4.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 398, + 169, + 446, + 218 + ], + "blocks": [ + { + "bbox": [ + 398, + 169, + 446, + 218 + ], + "lines": [ + { + "bbox": [ + 398, + 169, + 446, + 218 + ], + "spans": [ + { + "bbox": [ + 398, + 169, + 446, + 218 + ], + "type": "image", + "image_path": "8710bd0764b2cf931cb6489035157b562c8f5f614885cafb109ee55635230cf7.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 398, + 220, + 449, + 230 + ], + "lines": [ + { + "bbox": [ + 398, + 220, + 449, + 230 + ], + "spans": [ + { + "bbox": [ + 398, + 220, + 449, + 230 + ], + "type": "text", + "content": "Akada et al. [1]" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 447, + 169, + 495, + 218 + ], + "blocks": [ + { + "bbox": [ + 447, + 169, + 495, + 218 + ], + "lines": [ + { + "bbox": [ + 447, + 169, + 495, + 218 + ], + "spans": [ + { + "bbox": [ + 447, + 169, + 495, + 218 + ], + "type": "image", + "image_path": "2f1f1aa7225ba1552707b49a9c82fe28fc6ba778d760d8c035761e084dd8163d.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 457, + 220, + 485, + 229 + ], + "lines": [ + { + "bbox": [ + 457, + 220, + 485, + 229 + ], + "spans": [ + { + "bbox": [ + 457, + 220, + 485, + 229 + ], + "type": "text", + "content": "Baseline" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 497, + 169, + 545, + 218 + ], + "blocks": [ + { + "bbox": [ + 497, + 169, + 545, + 218 + ], + "lines": [ + { + "bbox": [ + 497, + 169, + 545, + 218 + ], + "spans": [ + { + "bbox": [ + 497, + 169, + 545, + 218 + ], + "type": "image", + "image_path": "a147f9ecea883b099f6753eaa58d149ca3423b26736030c2e01029e80d3355df.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 511, + 220, + 528, + 229 + ], + "lines": [ + { + "bbox": [ + 511, + 220, + 528, + 229 + ], + "spans": [ + { + "bbox": [ + 511, + 220, + 528, + 229 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "table", + "bbox": [ + 51, + 277, + 284, + 385 + ], + "blocks": [ + { + "bbox": [ + 51, + 277, + 284, + 385 + ], + "lines": [ + { + "bbox": [ + 51, + 277, + 284, + 385 + ], + "spans": [ + { + "bbox": [ + 51, + 277, + 284, + 385 + ], + "type": "table", + "html": "
MethodMPJPE(↓)PA-MPJPE(↓)
(a) Baseline with depth information120.3986.23
Baseline115.3684.80
(b) Ours w/o query adaptation108.3386.69
(c) Ours w/o depth information112.5684.37
(d) Ours w/o depth padding mask108.7084.26
(e) Ours with latest pose supervision only105.6783.46
(f) Ours with a single set of queries105.5885.68
Ours104.1482.18
", + "image_path": "8d4514795546479ae3d78ebb0e0c5f0bb57b9297026e676b69e859464b3bbc38.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "table_body" + } + ], + "index": 29 + }, + { + "bbox": [ + 46, + 432, + 287, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 432, + 287, + 480 + ], + "spans": [ + { + "bbox": [ + 46, + 432, + 287, + 480 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 432, + 287, + 480 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 46, + 432, + 287, + 480 + ], + "type": "text", + "content": " is a ground-truth 3D pose, " + }, + { + "bbox": [ + 46, + 432, + 287, + 480 + ], + "type": "inline_equation", + "content": "\\mathrm{mpjpe}(\\cdot)" + }, + { + "bbox": [ + 46, + 432, + 287, + 480 + ], + "type": "text", + "content": " is the mean per joint position error, " + }, + { + "bbox": [ + 46, + 432, + 287, + 480 + ], + "type": "inline_equation", + "content": "\\cos (\\cdot)" + }, + { + "bbox": [ + 46, + 432, + 287, + 480 + ], + "type": "text", + "content": " is a negative cosine similarity, and " + }, + { + "bbox": [ + 46, + 432, + 287, + 480 + ], + "type": "inline_equation", + "content": "\\mathrm{bone}(\\cdot)" + }, + { + "bbox": [ + 46, + 432, + 287, + 480 + ], + "type": "text", + "content": " is an operation of obtaining bones of the 3D poses as used in the previous work [1]:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 61, + 489, + 287, + 522 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 489, + 287, + 522 + ], + "spans": [ + { + "bbox": [ + 61, + 489, + 287, + 522 + ], + "type": "interline_equation", + "content": "\\operatorname {m p j p e} (\\mathbf {P}, \\hat {\\mathbf {P}}) = \\frac {1}{N J} \\sum_ {n = 1} ^ {N} \\sum_ {j = 1} ^ {J} | | \\mathbf {P} _ {n, j} - \\hat {\\mathbf {P}} _ {n, j} | | _ {2}, \\tag {10}", + "image_path": "d4d3f216a37b7aaedea526e11cea82cea5b3e43b43965a55464ab8320f1b4d71.jpg" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 63, + 532, + 287, + 564 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 532, + 287, + 564 + ], + "spans": [ + { + "bbox": [ + 63, + 532, + 287, + 564 + ], + "type": "interline_equation", + "content": "\\cos (\\mathbf {B}, \\hat {\\mathbf {B}}) = - \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sum_ {m = 1} ^ {M} \\frac {\\mathbf {B} _ {n , m} \\cdot \\hat {\\mathbf {B}} _ {n , m}}{| | \\mathbf {B} _ {n , m} | | | | \\hat {\\mathbf {B}} _ {n , m} | |}, \\tag {11}", + "image_path": "5dbd858e204abddaf6c38c23c8a6fb05a4da99148f440d78c9cc2c5e9be3ef7f.jpg" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "type": "text", + "content": " is batch size, " + }, + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "type": "text", + "content": " is the number of joints, " + }, + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "type": "text", + "content": " is the number of bones, and " + }, + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_{n,m} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "type": "text", + "content": " is a vector of " + }, + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 47, + 568, + 287, + 594 + ], + "type": "text", + "content": "-th bone." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 47, + 604, + 127, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 604, + 127, + 617 + ], + "spans": [ + { + "bbox": [ + 47, + 604, + 127, + 617 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 47, + 623, + 179, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 623, + 179, + 635 + ], + "spans": [ + { + "bbox": [ + 47, + 623, + 179, + 635 + ], + "type": "text", + "content": "5.1. Datasets for Evaluation" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 47, + 641, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 641, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 641, + 287, + 713 + ], + "type": "text", + "content": "We use three datasets for our experiments: UnrealEgo [1], UnrealEgo2, and UnrealEgo-RW. For UnrealEgo, we use their proposed data splits. Also, we divide UnrealEgo2 into 12,139 motions (1,002,656 stereo views) for training, 1,545 motions (127,968 stereo views) for validation, and 1523 motions (123,488 stereo views) for testing. Similarly, we" + } + ] + } + ], + "index": 37 + }, + { + "type": "table", + "bbox": [ + 308, + 277, + 544, + 327 + ], + "blocks": [ + { + "bbox": [ + 47, + 388, + 287, + 411 + ], + "lines": [ + { + "bbox": [ + 47, + 388, + 287, + 411 + ], + "spans": [ + { + "bbox": [ + 47, + 388, + 287, + 411 + ], + "type": "text", + "content": "Table 4. Ablation study of our model for device-relative pose estimation on UnrealEgo-RW with mm-scale." + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 277, + 544, + 327 + ], + "lines": [ + { + "bbox": [ + 308, + 277, + 544, + 327 + ], + "spans": [ + { + "bbox": [ + 308, + 277, + 544, + 327 + ], + "type": "table", + "html": "
MethodUpper body MPJPE(↓)Lower body MPJPE(↓)Foot MPJPE(↓)Foot MPE(↓)
Ours w/o depth information80.82144.31174.456.39
Ours w/o depth padding masks77.29140.10169.955.02
Ours77.85130.97155.864.83
", + "image_path": "16d04a19efec5d6a07574c1343fca07cff37f6de91a7444a8f9f05ab2f0ffbf9.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "table_body" + } + ], + "index": 38 + }, + { + "bbox": [ + 306, + 331, + 545, + 353 + ], + "lines": [ + { + "bbox": [ + 306, + 331, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 306, + 331, + 545, + 353 + ], + "type": "text", + "content": "Table 5. The effect of scene information (depth) per body part on UnrealEgo-RW. The numbers are in mm." + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 373, + 545, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 373, + 545, + 458 + ], + "spans": [ + { + "bbox": [ + 304, + 373, + 545, + 458 + ], + "type": "text", + "content": "split UnrealEgo-RW into 547 motions (51,936 stereo views) for training, 77 motions (7,616 stereo views) for validation, and 86 motions (7,936 stereo views) for testing. We follow the existing works [1, 11, 37, 38, 40-42, 45, 52, 53] to report the results of device-relative 3D pose estimation. For UnrealEgo, we also follow the existing works [1, 12] to include the results of pelvis-relative 3D pose estimation." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 306, + 465, + 404, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 465, + 404, + 478 + ], + "spans": [ + { + "bbox": [ + 306, + 465, + 404, + 478 + ], + "type": "text", + "content": "5.2. Training Details" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "text", + "content": "We resize the input RGB images and ground-truth 2D keypoint heatmaps to " + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "text", + "content": " pixels, respectively. For the training of the 2D module, we follow the previous work [1] to use the ResNet18 [8] pre-trained on ImageNet [4] as an encoder and train the module with a batch size of 16 and an initial learning rate of " + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "inline_equation", + "content": "10^{-3}" + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "text", + "content": ". Then, we train the 3D module with a batch size of 32 and an initial learning rate of " + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "inline_equation", + "content": "2 \\cdot 10^{-4}" + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "text", + "content": ". The modules are trained with Adam optimizer [15] for ten epochs, starting with the initial learning rate for the first half epochs and applying a linearly decaying rate for the next half. Also, we set the hyperparameters as " + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{pose}} = 0.1" + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{cos}} = 0.01" + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{past}} = 0.1" + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "text", + "content": ". We use five sequential stereo views as inputs to our model, i.e., " + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "inline_equation", + "content": "T = 5" + }, + { + "bbox": [ + 304, + 483, + 545, + 662 + ], + "type": "text", + "content": ", with a skip size of 3. See our supplement for more details on the network architecture." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 306, + 671, + 379, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 671, + 379, + 682 + ], + "spans": [ + { + "bbox": [ + 306, + 671, + 379, + 682 + ], + "type": "text", + "content": "5.3. Evaluation" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "We compare our method with existing stereo-based egocentric pose estimation methods [1, 53]. We use the of" + } + ] + } + ], + "index": 44 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "772" + } + ] + } + ], + "index": 45 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 71, + 315, + 143 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 315, + 143 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 315, + 143 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 315, + 143 + ], + "type": "image", + "image_path": "b8ff47f7aac3cbaab619fd868be5cec580eb53d5f42d9395aa1cf7256978ff8d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 329, + 69, + 394, + 133 + ], + "blocks": [ + { + "bbox": [ + 329, + 69, + 394, + 133 + ], + "lines": [ + { + "bbox": [ + 329, + 69, + 394, + 133 + ], + "spans": [ + { + "bbox": [ + 329, + 69, + 394, + 133 + ], + "type": "image", + "image_path": "1601914d99990671ee990999c3a69dde366aeefad8f651f761acf684b0c2935c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 350, + 133, + 447, + 144 + ], + "lines": [ + { + "bbox": [ + 350, + 133, + 447, + 144 + ], + "spans": [ + { + "bbox": [ + 350, + 133, + 447, + 144 + ], + "type": "text", + "content": "3D-to-2D pose reprojection" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 404, + 70, + 468, + 133 + ], + "blocks": [ + { + "bbox": [ + 404, + 70, + 468, + 133 + ], + "lines": [ + { + "bbox": [ + 404, + 70, + 468, + 133 + ], + "spans": [ + { + "bbox": [ + 404, + 70, + 468, + 133 + ], + "type": "image", + "image_path": "c0b25a4525d502891b94574637c2198768944e17df3903684575ac3c8e1eef1a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 403, + 133, + 447, + 143 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 403, + 133, + 447, + 143 + ], + "spans": [ + { + "bbox": [ + 403, + 133, + 447, + 143 + ], + "type": "text", + "content": "reprojection" + } + ] + } + ], + "index": 4, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 477, + 70, + 541, + 132 + ], + "blocks": [ + { + "bbox": [ + 477, + 70, + 541, + 132 + ], + "lines": [ + { + "bbox": [ + 477, + 70, + 541, + 132 + ], + "spans": [ + { + "bbox": [ + 477, + 70, + 541, + 132 + ], + "type": "image", + "image_path": "7194d77dc8227a51473a041ee594b94294628daf57d75184299be30e5bb3f38f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 472, + 133, + 540, + 143 + ], + "lines": [ + { + "bbox": [ + 472, + 133, + 540, + 143 + ], + "spans": [ + { + "bbox": [ + 472, + 133, + 540, + 143 + ], + "type": "text", + "content": "3D pose estimation" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 49, + 144, + 315, + 223 + ], + "blocks": [ + { + "bbox": [ + 49, + 144, + 315, + 223 + ], + "lines": [ + { + "bbox": [ + 49, + 144, + 315, + 223 + ], + "spans": [ + { + "bbox": [ + 49, + 144, + 315, + 223 + ], + "type": "image", + "image_path": "3c50cd63f1945538be9e269ad317db4e40468d31ba05666ae253de74b957d232.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 228, + 546, + 262 + ], + "lines": [ + { + "bbox": [ + 46, + 228, + 546, + 262 + ], + "spans": [ + { + "bbox": [ + 46, + 228, + 546, + 262 + ], + "type": "text", + "content": "Figure 5. Results of our framework and comparison methods on example sequences from UnrealEgo2 (above) and UnrealEgo-RW (below). Left: MPJPE curves. Right: Outputs of our method at frame 87 and 329 of the sequences, respectively. 3D pose estimation and ground truth are colored in red and green, respectively." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 330, + 150, + 394, + 214 + ], + "blocks": [ + { + "bbox": [ + 330, + 150, + 394, + 214 + ], + "lines": [ + { + "bbox": [ + 330, + 150, + 394, + 214 + ], + "spans": [ + { + "bbox": [ + 330, + 150, + 394, + 214 + ], + "type": "image", + "image_path": "0e71dce06ba4d72dc03b7bd045dee2555f3be3ea270bbca4ad2f91c95e98116f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 350, + 215, + 447, + 225 + ], + "lines": [ + { + "bbox": [ + 350, + 215, + 447, + 225 + ], + "spans": [ + { + "bbox": [ + 350, + 215, + 447, + 225 + ], + "type": "text", + "content": "3D-to-2D pose reprojection" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 404, + 150, + 468, + 214 + ], + "blocks": [ + { + "bbox": [ + 404, + 150, + 468, + 214 + ], + "lines": [ + { + "bbox": [ + 404, + 150, + 468, + 214 + ], + "spans": [ + { + "bbox": [ + 404, + 150, + 468, + 214 + ], + "type": "image", + "image_path": "78136c63fc3ada05f13f891c651fa17fc5e4c0d8ac7bb6f4fd10ab38996d2d8f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 403, + 215, + 447, + 225 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 403, + 215, + 447, + 225 + ], + "spans": [ + { + "bbox": [ + 403, + 215, + 447, + 225 + ], + "type": "text", + "content": "reprojection" + } + ] + } + ], + "index": 11, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 477, + 150, + 541, + 214 + ], + "blocks": [ + { + "bbox": [ + 477, + 150, + 541, + 214 + ], + "lines": [ + { + "bbox": [ + 477, + 150, + 541, + 214 + ], + "spans": [ + { + "bbox": [ + 477, + 150, + 541, + 214 + ], + "type": "image", + "image_path": "e96a9b4a852ee2ff08ed4cf1b103d800aa7a293edb8b7cdba51d74cdf58bf412.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 472, + 215, + 540, + 224 + ], + "lines": [ + { + "bbox": [ + 472, + 215, + 540, + 224 + ], + "spans": [ + { + "bbox": [ + 472, + 215, + 540, + 224 + ], + "type": "text", + "content": "3D pose estimation" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 274, + 289, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 274, + 289, + 501 + ], + "spans": [ + { + "bbox": [ + 46, + 274, + 289, + 501 + ], + "type": "text", + "content": "ficial source code of Akada et al. [1] and re-implement the framework of Zhao et al. [53] as its source code is not available. Note that the comparison methods are trained on the same datasets as our model. Kang et al. [12] (arXiv preprint at the time of submission) only shows results of the pelvis-relative estimation on UnrealEgo. Therefore, we include them for reference. Furthermore, we are interested in the performance of the publicly available state-of-the-art method [1] with temporal inputs. Thus, we modify their 3D module such that it can take as an input a sequence of stereo 2D keypoint heatmaps with the same time step as ours, i.e., " + }, + { + "bbox": [ + 46, + 274, + 289, + 501 + ], + "type": "inline_equation", + "content": "T = 5" + }, + { + "bbox": [ + 46, + 274, + 289, + 501 + ], + "type": "text", + "content": ". Here, we replace the first and the last fully connected layers in the encoder, the pose decoder, and the heatmap reconstruction decoder of their autoencoder-based 3D module [1] by those with " + }, + { + "bbox": [ + 46, + 274, + 289, + 501 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 274, + 289, + 501 + ], + "type": "text", + "content": " times the size of the original hidden dimension. We denote this model as Baseline and train it with the same training procedure as Akada et al. [1]. Note that Akada et al. [1], Baseline, and our model use the same 2D module." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 506, + 287, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 506, + 287, + 578 + ], + "spans": [ + { + "bbox": [ + 46, + 506, + 287, + 578 + ], + "type": "text", + "content": "We follow the existing works [1, 11, 37, 38, 40-42, 45, 52, 53] to report Mean Per Joint Position Error (MPJPE) and Mean Per Joint Position Error with Procrustes Alignment [13] (PA-MPJPE). We additionally report 3D Percentage of Correct Keypoints (3D PCK) and Area Under the Curve (AUC) for UnrealEgo2 and UnrealEgo-RW." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "content": "Results on Synthetic Datasets. Tables 1 and 2 report the results with UnrealEgo [1] and UnrealEgo2. Our method outperforms the existing methods [1, 12, 53] and Baseline across all metrics by a significant margin, e.g., " + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "inline_equation", + "content": ">15\\%" + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "content": " on UnrealEgo [1] and " + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\geq 40\\%" + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "content": " on UnrealEgo2 (on MPJPE). The qualitative results on UnrealEgo2 in Fig. 4-(left part) show that existing methods and Baseline fail to estimate lower bodies of complex poses with severe self-occlusions, such as crouching. Even under such challenging scenarios, however, our approach yields accurate 3D poses. See Fig. 5-(above part) for a MPJPE curve and visual outputs of our" + } + ] + } + ], + "index": 17 + }, + { + "type": "table", + "bbox": [ + 321, + 274, + 531, + 332 + ], + "blocks": [ + { + "bbox": [ + 321, + 274, + 531, + 332 + ], + "lines": [ + { + "bbox": [ + 321, + 274, + 531, + 332 + ], + "spans": [ + { + "bbox": [ + 321, + 274, + 531, + 332 + ], + "type": "table", + "html": "
MethodMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
T = 1108.6384.6977.9844.15
T = 3108.2385.2878.3544.54
T = 5104.1482.1880.2046.22
T = 7104.0182.4380.5246.10
", + "image_path": "90a8d937c779434d551a4076208a07f943e006cc70e252bf4818bd2e5c47a061.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 336, + 545, + 358 + ], + "lines": [ + { + "bbox": [ + 305, + 336, + 545, + 358 + ], + "spans": [ + { + "bbox": [ + 305, + 336, + 545, + 358 + ], + "type": "text", + "content": "Table 6. Ablation study of our model with different sequence lengths on UnrealEgo-RW. The numbers are in mm." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 366, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 545, + 715 + ], + "type": "text", + "content": "framework on UnrealEgo2. Our method is able to constantly estimate accurate 3D poses compared to the existing methods. As evidenced by these results, our method demonstrates superiority and becomes a strong benchmark method in the egocentric stereo 3D pose estimation tasks. See our supplementary material and video for more results. Results on the Real-World Dataset. Table 3 shows quantitative results on UnrealEgo-RW. Again, our method outperforms the existing methods [1, 53] and Baseline across all metrics, e.g., by more than " + }, + { + "bbox": [ + 304, + 366, + 545, + 715 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 304, + 366, + 545, + 715 + ], + "type": "text", + "content": " on MPJPE. See Fig. 4-(right) for qualitative results. The current state-of-the-art methods [1, 53] or Baseline show floating feet, inaccurate pelvis position, and penetration to the floor ground. However, our method is able to estimate accurate 3D poses. See Fig. 5-(below part) for a MPJPE curve and visual outputs on an example motion of UnrealEgo-RW. The curve indicates that our method constantly shows lower 3D errors than the comparison methods. All of the results indicate the effectiveness of our proposed framework compared to the existing methods. We also visualize 2D heatmaps, 3D-to-2D pose reprojection, and 3D pose prediction from our method in Fig. 6. Even when the joint locations of the lower body are estimated closely in the 2D heatmaps, our approach predicts accurate lower body poses. These results suggest that the proposed method with our portable device can open up the possibility of many future applications, including animating virtual humans (Fig. 1-(g)). For the virtual human animation, we applied inverse kinematics with estimated 3D joint locations and ground-truth camera poses to drive the" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "773" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 70, + 117, + 138 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 117, + 138 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 117, + 138 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 117, + 138 + ], + "type": "image", + "image_path": "2e77b11a607c3d0a6cdc2dda1f39c426ba05d8a2660ab7857d4ce589147de834.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 97, + 140, + 140, + 150 + ], + "lines": [ + { + "bbox": [ + 97, + 140, + 140, + 150 + ], + "spans": [ + { + "bbox": [ + 97, + 140, + 140, + 150 + ], + "type": "text", + "content": "Stereo inputs" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 119, + 70, + 188, + 138 + ], + "blocks": [ + { + "bbox": [ + 119, + 70, + 188, + 138 + ], + "lines": [ + { + "bbox": [ + 119, + 70, + 188, + 138 + ], + "spans": [ + { + "bbox": [ + 119, + 70, + 188, + 138 + ], + "type": "image", + "image_path": "e3146f908f0672d9e0886816bb289d0455391a9e3cdaa223e0210afcb8c29dae.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 154, + 546, + 177 + ], + "lines": [ + { + "bbox": [ + 46, + 154, + 546, + 177 + ], + "spans": [ + { + "bbox": [ + 46, + 154, + 546, + 177 + ], + "type": "text", + "content": "Figure 6. Visualization of outputs from our model on UnrealEgo-RW. 3D-to-2D pose reprojection is visualized in the same colors as in Fig. 1-(e). 3D pose estimation and ground truth are displayed in red and green, respectively." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 191, + 70, + 260, + 138 + ], + "blocks": [ + { + "bbox": [ + 191, + 70, + 260, + 138 + ], + "lines": [ + { + "bbox": [ + 191, + 70, + 260, + 138 + ], + "spans": [ + { + "bbox": [ + 191, + 70, + 260, + 138 + ], + "type": "image", + "image_path": "bb449581a65ca399763951e5a742673f7e960769f766d4cbc7d77982c4ef0061.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 140, + 308, + 150 + ], + "lines": [ + { + "bbox": [ + 216, + 140, + 308, + 150 + ], + "spans": [ + { + "bbox": [ + 216, + 140, + 308, + 150 + ], + "type": "text", + "content": "2D joint heatmap estimation" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 262, + 70, + 332, + 138 + ], + "blocks": [ + { + "bbox": [ + 262, + 70, + 332, + 138 + ], + "lines": [ + { + "bbox": [ + 262, + 70, + 332, + 138 + ], + "spans": [ + { + "bbox": [ + 262, + 70, + 332, + 138 + ], + "type": "image", + "image_path": "58fccb4ac4bb0af3ec4c15c1abe169eaa8e12a6d628f3abb227ac4fe0fea2df4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 334, + 70, + 402, + 138 + ], + "blocks": [ + { + "bbox": [ + 334, + 70, + 402, + 138 + ], + "lines": [ + { + "bbox": [ + 334, + 70, + 402, + 138 + ], + "spans": [ + { + "bbox": [ + 334, + 70, + 402, + 138 + ], + "type": "image", + "image_path": "4f5d3e8fc901e75542a7996091992392e5269a756c31ff1a41cd9f01d82c81fc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 361, + 140, + 449, + 150 + ], + "lines": [ + { + "bbox": [ + 361, + 140, + 449, + 150 + ], + "spans": [ + { + "bbox": [ + 361, + 140, + 449, + 150 + ], + "type": "text", + "content": "3D-to-2D pose reprojection" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 405, + 70, + 474, + 138 + ], + "blocks": [ + { + "bbox": [ + 405, + 70, + 474, + 138 + ], + "lines": [ + { + "bbox": [ + 405, + 70, + 474, + 138 + ], + "spans": [ + { + "bbox": [ + 405, + 70, + 474, + 138 + ], + "type": "image", + "image_path": "21fb5b2e2f0cfec6ba1d5395392dc8d0c9e181ea9b4f08a7722884907956a5ff.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 479, + 140, + 541, + 150 + ], + "lines": [ + { + "bbox": [ + 479, + 140, + 541, + 150 + ], + "spans": [ + { + "bbox": [ + 479, + 140, + 541, + 150 + ], + "type": "text", + "content": "3D pose estimation" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 477, + 70, + 545, + 138 + ], + "blocks": [ + { + "bbox": [ + 477, + 70, + 545, + 138 + ], + "lines": [ + { + "bbox": [ + 477, + 70, + 545, + 138 + ], + "spans": [ + { + "bbox": [ + 477, + 70, + 545, + 138 + ], + "type": "image", + "image_path": "2c8793e1c98b587952e6df0b244e66eee51befb37da427a3eaba19a67aff0ffb.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 192, + 206, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 192, + 206, + 204 + ], + "spans": [ + { + "bbox": [ + 46, + 192, + 206, + 204 + ], + "type": "text", + "content": "character in a world coordinate system." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 205, + 286, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 205, + 286, + 277 + ], + "spans": [ + { + "bbox": [ + 46, + 205, + 286, + 277 + ], + "type": "text", + "content": "Ablation Study. In Table 4, we first ablate (a) the CNN-based 3D module (Baseline) with depth data concatenated to the heatmap inputs. However, naively adding this extra scene information to this 3D module does not help probably because the CNN layers can be affected by invalid depth values even with the depth region masks." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 279, + 286, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 279, + 286, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 279, + 286, + 483 + ], + "type": "text", + "content": "Next, we test our transformer-based 3D module (b) without query augmentation (c) without depth data. They perform worse than our full framework. We also ablate our method (d) without the padding mask. The result indicates that adding depth padding masks helps because the padding mask can filter out the invalid values in depth maps in the attention module. These results validate that our video-based 3D scene reconstruction module and video-dependent query augmentation policy boost 3D joint localization accuracy. Next, we ablate our model (e) with 3D pose supervision of the latest frame only. Note that this ablation uses the same sets of input data and joint queries as the original model, i.e., " + }, + { + "bbox": [ + 46, + 279, + 286, + 483 + ], + "type": "inline_equation", + "content": "T = 5" + }, + { + "bbox": [ + 46, + 279, + 286, + 483 + ], + "type": "text", + "content": ". This model estimates less accurate poses due to the loss of supervision from past 3D poses. We also test (f) a single set of joint queries, i.e., " + }, + { + "bbox": [ + 46, + 279, + 286, + 483 + ], + "type": "inline_equation", + "content": "q^1" + }, + { + "bbox": [ + 46, + 279, + 286, + 483 + ], + "type": "text", + "content": ", instead of " + }, + { + "bbox": [ + 46, + 279, + 286, + 483 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 279, + 286, + 483 + ], + "type": "text", + "content": " sets to predict the latest 3D pose. Similar to (e), this model cannot benefit from the supervision of past 3D poses." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 484, + 286, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 484, + 286, + 556 + ], + "spans": [ + { + "bbox": [ + 46, + 484, + 286, + 556 + ], + "type": "text", + "content": "We further investigate the effect of the scene information. Table 5 shows the MPJPE per body part and Mean Penetration Error (MPE) [34, 35] between feet and floor ground. The results reveal that depth features with the padding masks reduce the errors in the lower body while maintaining the performance in the upper body." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 557, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 557, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 557, + 286, + 713 + ], + "type": "text", + "content": "In Table 6, we ablate the effect of the sequence length of input frames for our method. It is worth noting that our model with " + }, + { + "bbox": [ + 46, + 557, + 286, + 713 + ], + "type": "inline_equation", + "content": "T = 1" + }, + { + "bbox": [ + 46, + 557, + 286, + 713 + ], + "type": "text", + "content": " yields better results than the best existing method [1] and Baseline that utilizes temporal information (see Table 3). Since our model uses the same 2D module as Akada et al. [1] and Baseline, the difference comes only from the 3D module. This suggests that their autoencoder-based 3D modules with the heatmap reconstruction component are, very likely, not the most suitable solution for estimating 3D poses from 2D joint heatmaps, highlighting the potential of our transformer-based framework. The result also indicates that although the longer sequence can bring performance improvement to some extent, the se" + } + ] + } + ], + "index": 16 + }, + { + "type": "table", + "bbox": [ + 307, + 191, + 544, + 281 + ], + "blocks": [ + { + "bbox": [ + 307, + 191, + 544, + 281 + ], + "lines": [ + { + "bbox": [ + 307, + 191, + 544, + 281 + ], + "spans": [ + { + "bbox": [ + 307, + 191, + 544, + 281 + ], + "type": "table", + "html": "
MethodInitial training dataMPJPE(↓)PA-MPJPE(↓)3D PCA(↑)AUC(↑)
Zhao et al. [53]99.0972.4779.8243.55
Akada et al. [1]UnrealEgo [1]94.8769.7982.7846.80
Baseline83.8964.3086.2051.63
Ours75.3457.2989.4355.77
Zhao et al. [53]97.8669.9281.5346.32
Akada et al. [1]UnrealEgo292.4867.1584.2548.04
Baseline82.1661.6087.0752.72
Ours72.8956.1990.2957.19
", + "image_path": "869defdf736714e5f81be48fd6cd05ec557116221724957e7dfdf0e5f0789fbb.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 284, + 545, + 306 + ], + "lines": [ + { + "bbox": [ + 306, + 284, + 545, + 306 + ], + "spans": [ + { + "bbox": [ + 306, + 284, + 545, + 306 + ], + "type": "text", + "content": "Table 7. Fine-tuning results of device-relative 3D pose estimation on UnrealEgo-RW with mm-scale." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 323, + 545, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 323, + 545, + 490 + ], + "spans": [ + { + "bbox": [ + 304, + 323, + 545, + 490 + ], + "type": "text", + "content": "quence lengths of five and seven show comparable results. Synthetic Data for Pre-training. No existing works explored the efficacy of synthetic data for pre-training in egocentric 3D pose estimation. Thus, we further conduct experiments with models pre-trained on the synthetic datasets and fine-tuned on the real-world data. Tables 3 and 7 show that all methods benefit from the training with the large-scale synthetic data even with the differences in the synthetic and real-world setups, e.g., fisheye distortion and syn-to-real domain gaps. Note that the gain of our method from UnrealEgo to UnrealEgo2 is significant, i.e., " + }, + { + "bbox": [ + 304, + 323, + 545, + 490 + ], + "type": "inline_equation", + "content": "3.3\\%" + }, + { + "bbox": [ + 304, + 323, + 545, + 490 + ], + "type": "text", + "content": " on MPJPE (75.34mm to 72.89mm). This suggests that it is helpful to develop not only new models but also large-scale synthetic datasets even with different distortion and domain gaps." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 502, + 378, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 502, + 378, + 514 + ], + "spans": [ + { + "bbox": [ + 306, + 502, + 378, + 514 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 521, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 665 + ], + "type": "text", + "content": "In this paper, we proposed a new transformer-based framework that significantly boosts the accuracy of egocentric stereo 3D human pose estimation. The proposed framework leverages the scene information and temporal context of egocentric stereo video inputs via our video-based 3D scene reconstruction module and video-based joint query augmentation policy. Our extensive experiments on the new synthetic and real-world datasets with challenging human motions validate the effectiveness of our approach compared to the existing methods. We hope that our proposed benchmark datasets and trained models will foster the further development of methods for egocentric 3D vision." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Acknowledgment. The work was supported by the ERC Consolidator Grant 4DReply (770784) and the Nakajima Foundation. We thank Silicon Studio Corp. for providing the fisheye plug-in for Unreal Engine." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "text", + "content": "774" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Hiroyasu Akada, Jian Wang, Soshi Shimada, Masaki Takahashi, Christian Theobalt, and Vladislav Golyanik. Unrealego: A new dataset for robust egocentric 3d human motion capture. In European Conference on Computer Vision (ECCV), 2022. 1, 2, 3, 5, 6, 7, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 146, + 288, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 146, + 288, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 146, + 288, + 190 + ], + "type": "text", + "content": "[2] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision (ECCV), 2020. 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 191, + 288, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 191, + 288, + 267 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 288, + 267 + ], + "type": "text", + "content": "[3] Young-Woon Cha, True Price, Zhen Wei, Xinran Lu, Nicholas Rewkowski, Rohan Chabra, Zihe Qin, Hyounghun Kim, Zhaoqi Su, Yebin Liu, Adrian Ilie, Andrei State, Zhenlin Xu, Jan-Michael Frahm, and Henry Fuchs. Towards fully mobile 3d face, body, and environment capture using only head-worn cameras. IEEE Transactions on Visualization and Computer Graphics, 24(11):2993-3004, 2018. 2, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 268, + 287, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 268, + 287, + 301 + ], + "spans": [ + { + "bbox": [ + 53, + 268, + 287, + 301 + ], + "type": "text", + "content": "[4] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei. ImageNet: A Large-Scale Hierarchical Image Database. In Computer Vision and Pattern Recognition (CVPR), 2009. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 302, + 287, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 302, + 287, + 345 + ], + "spans": [ + { + "bbox": [ + 53, + 302, + 287, + 345 + ], + "type": "text", + "content": "[5] Moritz Einfalt, Katja Ludwig, and Rainer Lienhart. Uplift and upsample: Efficient 3d human pose estimation with up-lifting transformers. In Winter Conference on Applications of Computer Vision (WACV), 2023. 2, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 346, + 287, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 346, + 287, + 378 + ], + "spans": [ + { + "bbox": [ + 53, + 346, + 287, + 378 + ], + "type": "text", + "content": "[6] FUJINON FE185C057HA-1 fisheye lens, 2023. https:// www.fujifilm.com/de/de/business/opticaldevices/mvlems/fe185.3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 380, + 287, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 380, + 287, + 434 + ], + "spans": [ + { + "bbox": [ + 53, + 380, + 287, + 434 + ], + "type": "text", + "content": "[7] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Computer Vision and Pattern Recognition (CVPR), 2022. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 434, + 287, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 434, + 287, + 468 + ], + "spans": [ + { + "bbox": [ + 53, + 434, + 287, + 468 + ], + "type": "text", + "content": "[8] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Computer Vision and Pattern Recognition (CVPR), 2016. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 468, + 287, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 468, + 287, + 501 + ], + "spans": [ + { + "bbox": [ + 53, + 468, + 287, + 501 + ], + "type": "text", + "content": "[9] Yihui He, Rui Yan, Katerina Fragkiadaki, and Shoou-I Yu. Epipolar transformers. In Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 502, + 287, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 502, + 287, + 523 + ], + "spans": [ + { + "bbox": [ + 48, + 502, + 287, + 523 + ], + "type": "text", + "content": "[10] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 524, + 287, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 524, + 287, + 556 + ], + "spans": [ + { + "bbox": [ + 48, + 524, + 287, + 556 + ], + "type": "text", + "content": "[11] Hao Jiang and Vamsi Krishna Ithapu. Egocentric pose estimation from human vision span. In International Conference on Computer Vision (ICCV), 2021. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 557, + 287, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 557, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 48, + 557, + 287, + 590 + ], + "type": "text", + "content": "[12] Taeho Kang, Kyungjin Lee, Jinrui Zhang, and Youngki Lee. Ego3dpose: Capturing 3d cues from binocular egocentric views. In SIGGRAPH Asia Conference, 2023. 2, 5, 6, 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 591, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 591, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 591, + 287, + 612 + ], + "type": "text", + "content": "[13] David G. Kendall. A Survey of the Statistical Theory of Shape. Statistical Science, 4(2):87-99, 1989. 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 613, + 287, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 657 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 657 + ], + "type": "text", + "content": "[14] Rawal Khirodkar, Aayush Bansal, Lingni Ma, Richard Newcombe, Minh Vo, and Kris Kitani. Ego-humans: An egocentric 3d multi-human benchmark. In International Conference on Computer Vision (ICCV), 2023. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 658, + 287, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 690 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 690 + ], + "type": "text", + "content": "[15] Diederik Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations (ICLR), 2015. 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 691, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 712 + ], + "type": "text", + "content": "[16] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer White-" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 105 + ], + "type": "text", + "content": "head, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything. arXiv:2304.02643, 2023. 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 107, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 545, + 140 + ], + "type": "text", + "content": "[17] Jiaman Li, Karen Liu, and Jiajun Wu. Ego-body pose estimation via ego-head pose estimation. In Computer Vision and Pattern Recognition (CVPR), 2023. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 141, + 545, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 184 + ], + "type": "text", + "content": "[18] Wenhao Li, Hong Liu, Runwei Ding, Mengyuan Liu, Pichao Wang, and Wenming Yang. Exploiting temporal contexts with strided transformer for 3d human pose estimation. IEEE Transactions on Multimedia (TMM), 2022. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 186, + 545, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 186, + 545, + 229 + ], + "spans": [ + { + "bbox": [ + 308, + 186, + 545, + 229 + ], + "type": "text", + "content": "[19] Wenhao Li, Hong Liu, Hao Tang, Pichao Wang, and Luc Van Gool. Mhformer: Multi-hypothesis transformer for 3d human pose estimation. In Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 230, + 545, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 230, + 545, + 263 + ], + "spans": [ + { + "bbox": [ + 308, + 230, + 545, + 263 + ], + "type": "text", + "content": "[20] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 264, + 545, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 264, + 545, + 318 + ], + "spans": [ + { + "bbox": [ + 308, + 264, + 545, + 318 + ], + "type": "text", + "content": "[21] Yuxuan Liu, Jianxin Yang, Xiao Gu, Yijun Chen, Yao Guo, and Guang-Zhong Yang. Egofish3d: Egocentric 3d pose estimation from a fisheye camera via self-supervised learning. IEEE Transactions on Multimedia (TMM), pages 1-12, 2023. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 319, + 545, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 319, + 545, + 352 + ], + "spans": [ + { + "bbox": [ + 308, + 319, + 545, + 352 + ], + "type": "text", + "content": "[22] Zhengyi Luo, Ryo Hachiuma, Ye Yuan, and Kris Kitani. Dynamics-regulated kinematic policy for egocentric pose estimation. 2021. 1, 2, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 354, + 545, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 354, + 545, + 409 + ], + "spans": [ + { + "bbox": [ + 308, + 354, + 545, + 409 + ], + "type": "text", + "content": "[23] Haoyu Ma, Liangjian Chen, Deying Kong, Zhe Wang, Xingwei Liu, Hao Tang, Xiangyi Yan, Yusheng Xie, Shih-Yao Lin, and Xiaohui Xie. Transfusion: Cross-view fusion with transformer for 3d human pose estimation. In British Machine Vision Conference (BMVC), 2021. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 410, + 534, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 410, + 534, + 422 + ], + "spans": [ + { + "bbox": [ + 308, + 410, + 534, + 422 + ], + "type": "text", + "content": "[24] Metashape, 2023. https://www.agisoft.com/.4" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 422, + 515, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 422, + 515, + 433 + ], + "spans": [ + { + "bbox": [ + 308, + 422, + 515, + 433 + ], + "type": "text", + "content": "[25] Mixamo, 2022. https://www MIXamo.com.3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 434, + 545, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 434, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 308, + 434, + 545, + 499 + ], + "type": "text", + "content": "[26] Xiaqing Pan, Nicholas Charron, Yongqian Yang, Scott Peters, Thomas Whelan, Chen Kong, Omkar Parkhi, Richard Newcombe, and Yuheng (Carl) Ren. Aria digital twin: A new benchmark dataset for egocentric 3d machine perception. In International Conference on Computer Vision (ICCV), 2023. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 501, + 545, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 501, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 308, + 501, + 545, + 555 + ], + "type": "text", + "content": "[27] Jinman Park, Kimathi Kaai, Saad Hossain, Norikatsu Sumi, Sirisha Rambhatla, and Paul Fieguth. Domain-guided spatiotemporal self-attention for egocentric 3d pose estimation. In Conference on Knowledge Discovery and Data Mining (KDD), 2023. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 556, + 545, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 556, + 545, + 600 + ], + "spans": [ + { + "bbox": [ + 308, + 556, + 545, + 600 + ], + "type": "text", + "content": "[28] Sungchan Park, Eunyi You, Inhoe Lee, and Joonseok Lee. Towards robust and smooth 3d multi-person pose estimation from monocular videos in the wild. In International Conference on Computer Vision (ICCV), 2023. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 601, + 545, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 601, + 545, + 645 + ], + "spans": [ + { + "bbox": [ + 308, + 601, + 545, + 645 + ], + "type": "text", + "content": "[29] Dario Pavllo, Christoph Feichtenhofer, David Grangier, and Michael Auli. 3d human pose estimation in video with temporal convolutions and semi-supervised training. In Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 647, + 545, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 545, + 658 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 545, + 658 + ], + "type": "text", + "content": "[30] RenderPeople, 2022. https://renderpeople.com.3" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "text", + "content": "[31] Helge Rhodin, Christian Richardt, Dan Casas, Eldar Insafutdinov, Mohammad Shafiei, Hans-Peter Seidel, Bernt Schiele, and Christian Theobalt. Egocap: egocentric marker-less motion capture with two fisheye cameras. ACM Transactions on Graphics (TOG), 35(6):1-11, 2016. 1, 2, 3" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "text", + "content": "775" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 286, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 73, + 286, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 73, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 48, + 73, + 286, + 95 + ], + "type": "text", + "content": "[32] RIBCAGE RX0 II camera, 2023. https://www.backbone.ca/product/ribcage-rx0-2/.3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 95, + 286, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 95, + 286, + 128 + ], + "spans": [ + { + "bbox": [ + 48, + 95, + 286, + 128 + ], + "type": "text", + "content": "[33] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Computer Vision and Pattern Recognition (CVPR), 2016. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 129, + 286, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 286, + 172 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 286, + 172 + ], + "type": "text", + "content": "[34] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, and Christian Theobalt. Physcap: Physically plausible monocular 3d motion capture in real time. ACM Transactions on Graphics (TOG), 39(6), 2020. 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 173, + 286, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 173, + 286, + 216 + ], + "spans": [ + { + "bbox": [ + 48, + 173, + 286, + 216 + ], + "type": "text", + "content": "[35] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, Patrick Pérez, and Christian Theobalt. Neural monocular 3d human motion capture with physical awareness. ACM Transactions on Graphics (TOG), 40(4), 2021. 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 217, + 286, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 217, + 286, + 259 + ], + "spans": [ + { + "bbox": [ + 48, + 217, + 286, + 259 + ], + "type": "text", + "content": "[36] Zhenhua Tang, Zhaofan Qiu, Yanbin Hao, Richang Hong, and Ting Yao. 3d human pose estimation with spatiotemporal criss-cross attention. In Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 261, + 286, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 261, + 286, + 304 + ], + "spans": [ + { + "bbox": [ + 48, + 261, + 286, + 304 + ], + "type": "text", + "content": "[37] Denis Tome, Patrick Peluse, Lourdes Agapito, and Hernan Badino. xr-egopose: Egocentric 3d human pose from an hmd camera. In International Conference on Computer Vision (ICCV), 2019. 1, 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 305, + 286, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 305, + 286, + 370 + ], + "spans": [ + { + "bbox": [ + 48, + 305, + 286, + 370 + ], + "type": "text", + "content": "[38] Denis Tome, Thiemo Alldieck, Patrick Peluse, Gerard Pons-Moll, Lourdes Agapito, Hernan Badino, and Fernando de la Torre. Selfpose: 3d egocentric pose estimation from a headset mounted camera. IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), 45(6):6794-6806, 2023. 1, 2, 5, 6, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 371, + 286, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 371, + 286, + 415 + ], + "spans": [ + { + "bbox": [ + 48, + 371, + 286, + 415 + ], + "type": "text", + "content": "[39] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems (NeurIPS), 2017. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 415, + 286, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 415, + 286, + 459 + ], + "spans": [ + { + "bbox": [ + 48, + 415, + 286, + 459 + ], + "type": "text", + "content": "[40] Jian Wang, Lingjie Liu, Weipeng Xu, Kripasindhu Sarkar, and Christian Theobalt. Estimating egocentric 3d human pose in global space. In International Conference on Computer Vision (ICCV), 2021. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 460, + 286, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 460, + 286, + 514 + ], + "spans": [ + { + "bbox": [ + 48, + 460, + 286, + 514 + ], + "type": "text", + "content": "[41] Jian Wang, Lingjie Liu, Weipeng Xu, Kripasindhu Sarkar, Diogo Luvizon, and Christian Theobalt. Estimating egocentric 3d human pose in the wild with external weak supervision. In Computer Vision and Pattern Recognition (CVPR), 2022. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 514, + 286, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 286, + 558 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 286, + 558 + ], + "type": "text", + "content": "[42] Jian Wang, Diogo Luvizon, Weipeng Xu, Lingjie Liu, Kripasindhu Sarkar, and Christian Theobalt. Scene-aware egocentric 3d human pose estimation. In Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 559, + 286, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 286, + 613 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 286, + 613 + ], + "type": "text", + "content": "[43] Jian Wang, Zhe Cao, Diogo Luvizon, Lingjie Liu, Kripasindhu Sarkar, Danhang Tang, Thabo Beeler, and Christian Theobalt. Egocentric whole-body motion capture with fisheyevit and diffusion-based motion refinement. In Computer Vision and Pattern Recognition (CVPR), 2024. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 614, + 286, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 286, + 658 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 286, + 658 + ], + "type": "text", + "content": "[44] Tao Wang, Jianfeng Zhang, Yujun Cai, Shuicheng Yan, and Jiashi Feng. Direct multi-view multi-person 3d human pose estimation. Advances in Neural Information Processing Systems (NeurIPS), 2021. 2, 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 658, + 286, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 286, + 713 + ], + "type": "text", + "content": "[45] Weipeng Xu, Avishek Chatterjee, Michael Zollhoefer, Helge Rhodin, Pascal Fua, Hans-Peter Seidel, and Christian Theobalt. " + }, + { + "bbox": [ + 48, + 658, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{Mo}^2\\mathrm{Cap}^2" + }, + { + "bbox": [ + 48, + 658, + 286, + 713 + ], + "type": "text", + "content": " : Real-time mobile 3d motion capture with a cap-mounted fisheye camera. IEEE Transactions on Visualization and Computer Graphics, 2019. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 677 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[46] Honghong Yang, Longfei Guo, Yumei Zhang, and Xiaojun Wu. U-shaped spatial-temporal transformer network for 3d human pose estimation. Machine Vision and Applications, 33(6):82, 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 118, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 162 + ], + "type": "text", + "content": "[47] Yingxuan You, Hong Liu, Ti Wang, Wenhao Li, Runwei Ding, and Xia Li. Co-evolution of pose and mesh for 3d human body estimation from video. In International Conference on Computer Vision (ICCV), 2023. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 163, + 545, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 196 + ], + "type": "text", + "content": "[48] Ye Yuan and Kris Kitani. Ego-pose estimation and forecasting as real-time pd control. In International Conference on Computer Vision (ICCV), 2019. 1, 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 197, + 545, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 545, + 240 + ], + "type": "text", + "content": "[49] Jinlu Zhang, Zhigang Tu, Jianyu Yang, Yujin Chen, and Jun-song Yuan. Mixste: Seq2seq mixed spatio-temporal encoder for 3d human pose estimation in video. In Computer Vision and Pattern Recognition (CVPR), 2022. 2, 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 241, + 545, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 545, + 296 + ], + "type": "text", + "content": "[50] Siwei Zhang, Qianli Ma, Yan Zhang, Zhiyin Qian, Taein Kwon, Marc Pollefeys, Federica Bogo, and Siyu Tang. Ego-body: Human body shape and motion of interacting people from head-mounted devices. In European conference on computer vision (ECCV), 2022. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 297, + 545, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 297, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 297, + 545, + 342 + ], + "type": "text", + "content": "[51] Siwei Zhang, Qianli Ma, Yan Zhang, Sadegh Aliakbarian, Darren Cosker, and Siyu Tang. Probabilistic human mesh recovery in 3d scenes from egocentric views. In International Conference on Computer Vision (ICCV), 2023. 2, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 342, + 545, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 342, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 307, + 342, + 545, + 387 + ], + "type": "text", + "content": "[52] Yahui Zhang, Shaodi You, and Theo Gevers. Automatic calibration of the fisheye camera for egocentric 3d human pose estimation from a single image. In Winter Conference on Applications of Computer Vision (WACV), 2021. 1, 2, 6, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 387, + 545, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 387, + 545, + 430 + ], + "spans": [ + { + "bbox": [ + 307, + 387, + 545, + 430 + ], + "type": "text", + "content": "[53] Dongxu Zhao, Zhen Wei, Jisan Mahmud, and Jan-Michael Frahm. Egoglass: Egocentric-view human pose estimation from an eyeglass frame. In International Conference on 3D Vision (3DV), 2021. 1, 2, 3, 5, 6, 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 431, + 545, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 431, + 545, + 485 + ], + "spans": [ + { + "bbox": [ + 307, + 431, + 545, + 485 + ], + "type": "text", + "content": "[54] Qitao Zhao, Ce Zheng, Mengyuan Liu, Pichao Wang, and Chen Chen. Poseformerv2: Exploring frequency domain for efficient and robust 3d human pose estimation. In Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 487, + 545, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 487, + 545, + 531 + ], + "spans": [ + { + "bbox": [ + 307, + 487, + 545, + 531 + ], + "type": "text", + "content": "[55] Ce Zheng, Sijie Zhu, Matias Mendieta, Taojiannan Yang, Chen Chen, and Zhengming Ding. 3d human pose estimation with spatial and temporal transformers. In International Conference on Computer Vision (ICCV), 2021." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 532, + 545, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 532, + 545, + 576 + ], + "spans": [ + { + "bbox": [ + 307, + 532, + 545, + 576 + ], + "type": "text", + "content": "[56] Jieming Zhou, Tong Zhang, Zeeshan Hayden, Lars Petersson, and Mehrtash Harandi. Diff3dhpe: A diffusion model for 3d human pose estimation. In International Conference on Computer Vision (ICCV) Workshops, 2023." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 577, + 545, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 577, + 545, + 620 + ], + "spans": [ + { + "bbox": [ + 307, + 577, + 545, + 620 + ], + "type": "text", + "content": "[57] Wentao Zhu, Xiaoxuan Ma, Zhaoyang Liu, Libin Liu, Wayne Wu, and Yizhou Wang. Motionbert: A unified perspective on learning human motion representations. In International Conference on Computer Vision (ICCV), 2023." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 622, + 545, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 622, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 307, + 622, + 545, + 677 + ], + "type": "text", + "content": "[58] Yiran Zhu, Xing Xu, Fumin Shen, Yanli Ji, Lianli Gao, and Heng Tao Shen. PosegTac: Graph transformer encoder-decoder with atrous convolution for 3d human pose estimation. In International Joint Conference on Artificial Intelligence (IJCAI), 2021. 2" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "type": "text", + "content": "776" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/024f2dc1-2c03-4b1e-a716-0e0aea35b1de_content_list.json b/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/024f2dc1-2c03-4b1e-a716-0e0aea35b1de_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..9dbd8ed2d1801e9345b797e88fc81ca048e92dd4 --- /dev/null +++ b/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/024f2dc1-2c03-4b1e-a716-0e0aea35b1de_content_list.json @@ -0,0 +1,1929 @@ +[ + { + "type": "text", + "text": "3D LiDAR Mapping in Dynamic Environments Using a 4D Implicit Neural Representation", + "text_level": 1, + "bbox": [ + 246, + 130, + 723, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xingguang Zhong", + "bbox": [ + 197, + 202, + 352, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yue Pan", + "bbox": [ + 383, + 203, + 457, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Cyrill Stachniss $^{1,2}$", + "bbox": [ + 488, + 203, + 635, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jens Behley", + "bbox": [ + 663, + 203, + 767, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1Center for Robotics, University of Bonn, 2Lamarr Institute for Machine Learning and Artificial Intelligence {zhong, yue.pan, cyrill.stachniss, Jens.vehley}@igg.uni-bonn.de", + "bbox": [ + 163, + 223, + 805, + 256 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 291, + 313, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Building accurate maps is a key building block to enable reliable localization, planning, and navigation of autonomous vehicles. We propose a novel approach for building accurate maps of dynamic environments utilizing a sequence of LiDAR scans. To this end, we propose encoding the 4D scene into a novel spatio-temporal implicit neural map representation by fitting a time-dependent truncated signed distance function to each point. Using our representation, we extract the static map by filtering the dynamic parts. Our neural representation is based on sparse feature grids, a globally shared decoder, and time-dependent basis functions, which we jointly optimize in an unsupervised fashion. To learn this representation from a sequence of LiDAR scans, we design a simple yet efficient loss function to supervise the map optimization in a piecewise way. We evaluate our approach on various scenes containing moving objects in terms of the reconstruction quality of static maps and the segmentation of dynamic point clouds. The experimental results demonstrate that our method is capable of removing the dynamic part of the input point clouds while reconstructing accurate and complete 3D maps, outperforming several state-of-the-art methods.", + "bbox": [ + 75, + 323, + 473, + 656 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 672, + 209, + 688 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mapping using range sensors, like LiDAR or RGB-D cameras, is a fundamental task in computer vision and robotics. Often, we want to obtain accurate maps to support downstream tasks such as localization, planning, or navigation. For achieving an accurate reconstruction of an outdoor environment, we have to account for dynamics caused by moving objects, such as vehicles or pedestrians. Furthermore, dynamic object removal plays an important role in autonomous driving and robotics applications for creating digital twins for realistic simulation and high-definition mapping, where a static map is augmented with semantic and task-relevant information.", + "bbox": [ + 75, + 698, + 468, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6473e44b651ef1cd7abce4bfc4dc523c37147e3ccf451c3664ee202a2f382717.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 501, + 301, + 694, + 407 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b272e82d84603a0b12b8b1e058766ea4751e217f4718f55e8cf43fd6f7b550b5.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 699, + 299, + 890, + 407 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9bd3a3fb5914879d7ced7fd13c61a1efb77ea15a2c1f2e458a5d3e2ea0c6d2df.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 501, + 430, + 694, + 539 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/99700c34d4e15036d925f349d18336d6fb4830b54919ad360b7f20a2b21c7e54.jpg", + "image_caption": [ + "(d)", + "Figure 1. Given a sequence of point clouds, as shown in (a), we optimize our 4D neural representation that can be queried at arbitrary positions for a specific time. (b) Based on the estimated time-dependent TSDF values, we can extract a mesh at a specific point in time. Additionally, our 4D neural representation can be also used for static mapping (c) and dynamic object removal (c)." + ], + "image_footnote": [], + "bbox": [ + 699, + 436, + 893, + 539 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mapping and state estimation in dynamic environments is a classical problem in robotics [5, 56, 57]. Approaches for simultaneous localization and mapping (SLAM) can apply different strategies to deal with dynamics. Common ways are: (1) filtering dynamics from the input [1, 30, 47, 48, 51] as a pre-processing step, which requires a semantic interpretation of the scene; (2) modeling the occupancy in the map representation [17, 34, 37, 49, 50, 64], where dynamics can be implicitly removed by retrospectively removing measurements in free space; (3) including it in the state estimation [4, 16, 55, 61, 67] to model which measurements originated from the dynamic and static parts of the environment. Our proposed method falls into the last category and allows us to model dynamics directly in the map representation leading to a spatio-temporal map representation.", + "bbox": [ + 496, + 657, + 892, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, implicit neural representations gained increas", + "bbox": [ + 517, + 885, + 890, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{1}$ Code: https://github.com/PRBonn/4dNDF", + "bbox": [ + 94, + 886, + 382, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "15417", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ing interest in computer vision for novel view synthesis [35, 36] and 3D shape reconstruction [33, 40]. Due to their compactness and continuity, several approaches [65, 70, 73] investigate the use of neural representations in large-scale 3D LiDAR mapping leading to accurate maps while significantly reducing memory consumption. However, these approaches often do not address the problem of handling dynamics during mapping. The recent progress on dynamic NeRF [7, 13, 44, 52] and neural deformable object reconstruction [6, 10] indicates that neural representations can be also used to represent dynamic scenes, which inspires us to tackle the problem of mapping in dynamic environments from the perspective of 4D reconstruction.", + "bbox": [ + 75, + 90, + 472, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose a novel method to reconstruct large 4D dynamic scenes by encoding every point's time-dependent truncated signed distance function (TSDF) into an implicit neural scene representation. As illustrated in Fig. 1, we take sequentially recorded LiDAR point clouds collected in dynamic environments as input and generate a TSDF for each time frame, which can be used to extract a mesh using marching cubes [29]. The background TSDF, which is unchanged during the whole sequence, can be extracted from the 4D signal easily. We regard it as a static map that can be used to segment dynamic objects from the original point cloud. Compared to the traditional voxel-based mapping method, the continuous neural representation allows for the removal of dynamic objects while preserving rich map details. In summary, the main contributions of this paper are:", + "bbox": [ + 75, + 287, + 472, + 527 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a novel implicit neural representation to jointly reconstruct a dynamic 3D environment and maintain a static map using sequential LiDAR scans as input.", + "- We employ a piecewise training data sampling strategy and design a simple, yet effective loss function that maintains the consistency of the static point supervision through gradient constraints.", + "- We evaluate the mapping results by the accuracy of the dynamic object segmentation as well as the quality of the reconstructed static map showing superior performance compared to several baselines. We provide our code and the data used for experiments." + ], + "bbox": [ + 76, + 529, + 468, + 710 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 723, + 218, + 739 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Mapping and SLAM in dynamic environments is a classical topic in robotics [5, 56, 57] with a large body of work, which tackles the problem by pre-processing the sensor data [1, 30, 47, 48, 51], occupancy estimation to filter dynamics by removing measurements in free space [17, 34, 37, 39, 49, 50, 64], or state estimation techniques [4, 16, 55, 61, 67]. Below, we focus on closely related approaches using neural representations but also static map building approaches for scenes containing dynamics.", + "bbox": [ + 75, + 750, + 468, + 885 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Dynamic NeRF. Dynamic NeRFs aim to solve the prob-", + "bbox": [ + 96, + 885, + 468, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "lem of novel view synthesis in dynamic environments. Some approaches [41-43, 58, 63] address this challenge by modeling the deformation of each point with respect to a canonical frame. However, these methods cannot represent newly appearing objects. This can render them unsuited for complicated real-life scenarios. In contrast, NSFF [24] and DynIBaR [26] get rid of the canonical frame by computing the motion field of the whole scene. While these methods can deliver satisfactory results, the training time is usually in the order of hours or even days.", + "bbox": [ + 496, + 90, + 890, + 242 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Another type of method leverages the compactness of the neural representation to model the 4D spatio-temporal information directly. Several works [7, 13, 52] project the 4D input into multiple voxelized lower-dimensional feature spaces to avoid large memory consumption, which improves the efficiency of the optimization. Song et al. [54] propose a time-dependent sliding window strategy for accumulating the voxel features. Instead of only targeting novel view synthesis, several approaches [26, 68, 71] decompose the scene into dynamic objects and static background in a self-supervised way, which inspired our work. Other approaches [22, 23, 53] accomplish neural representation-based reconstruction for larger scenes by adding additional supervision such as object masks or optical flow.", + "bbox": [ + 496, + 244, + 892, + 455 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural representations for LiDAR scans. Recently, many approaches aim to enhance scene reconstruction using LiDAR data through neural representations. The early work URF [46] leverages LiDAR data as depth supervision to improve the optimization of a neural radiance field. With only LiDAR data as input, Huang et al. [20] achieve novel view synthesis for LiDAR scans with differentiable rendering. Similar to our work, Shine-mapping [73] and EIN-RUL [70] utilize sparse hierarchical feature voxel structures to achieve large-scale 3D mapping. Additionally, the data-driven approach NKSR [18] based on learned kernel regression demonstrates accurate surface reconstruction with noisy LiDAR point cloud as input. Although these approaches perform well in improving reconstruction accuracy and reducing memory consumption, none of them consider the problem of dynamic object interference in real-world environments.", + "bbox": [ + 496, + 459, + 892, + 715 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Static map building and motion detection. In addition to removing moving objects from the voxel map with ray tracing, numerous works [8, 19, 31, 32] try to segment dynamic points from raw LiDAR point clouds. However, these methods require a significant amount of labeled data, which makes it challenging to generalize them to various scenarios or sensors with different scan patterns. In contrast, geometry-based, more heuristic approaches have also produced promising results. Kim et al. [21] solve this problem using the visibility of range images, but their results are still highly affected by the resolution. Lim et al. proposed Erasor [27], which leverages ground fitting as prior", + "bbox": [ + 496, + 719, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "15418", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "to achieve better segmentation for dynamic points. More recent approaches [9, 28] extend it to instance level to improve results. However, these methods rely on an accurate ground fitting method, which is mainly designed for autonomous driving scenarios, which cannot be guaranteed in complex unstructured real environments.", + "bbox": [ + 75, + 90, + 467, + 180 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In contrast to the approaches discussed above, we follow recent developments in neural reconstruction and propose a novel scene representation that allows us to capture the spatio-temporal progression of a scene. We represent the time-varying SDF of a scene in an unsupervised fashion, which we exploit to remove dynamic objects and reconstruct accurate meshes of the static scene.", + "bbox": [ + 75, + 181, + 467, + 285 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Our Approach", + "text_level": 1, + "bbox": [ + 76, + 300, + 225, + 316 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The input of our approach is given by a sequence of point clouds, $S_{1:N} = (S_1,\\dots ,S_N)$ , and their corresponding global poses $\\mathsf{T}_t\\in \\mathbb{R}^{4\\times 4}$ $t\\in [1,N]$ , estimated via scan matching, LiDAR odometry, or SLAM methods [2, 11, 12, 60]. Each scan's point cloud $S_{t} = \\{\\pmb{s}_{t}^{1},\\dots ,\\pmb{s}_{t}^{M_{t}}\\}$ is a set of points, $\\pmb{s}_t^i\\in \\mathbb{R}^3$ , collected at time $t$ . Given such a sequence of scans $S_{1:N}$ , our approach aims to reconstruct a 4D TSDF of the traversed scene and maintain a static 3D map at the same time.", + "bbox": [ + 75, + 325, + 467, + 459 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the next sections, we first introduce our spatiotemporal representation and then explain how to optimize it to represent the dynamic and static parts of a point cloud sequence $S_{1:N}$ .", + "bbox": [ + 75, + 460, + 467, + 521 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Map Representation", + "text_level": 1, + "bbox": [ + 76, + 530, + 269, + 546 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The key component of our approach is an implicit neural scene representation that allows us to represent a 4D TSDF of the scene, as well as facilitates the extraction of a static map representation. Our proposed spatio-temporal scene representation is optimized for the given point cloud sequence $S_{1:N}$ such that we can retrieve for an arbitrary point $\\pmb{p} \\in \\mathbb{R}^3$ and time $t \\in [1,N]$ the corresponding time-varying signed distances value at that location.", + "bbox": [ + 75, + 551, + 467, + 672 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Temporal representation. We utilize an TSDF to represent the scene, i.e., a function that provides the signed distance to the nearest surface for any given point $\\pmb{p} \\in \\mathbb{R}^3$ . The sign of the distance is positive when the point is in free space or in front of the measured surface and is negative when the point is inside the occupied space or behind the measured surface.", + "bbox": [ + 75, + 674, + 467, + 777 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In a dynamic 3D scene, measuring the signed distance of any coordinate at each moment produces a time-dependent function that captures the signed distance changes over time, see Fig. 2 for an illustration. Additionally, if a coordinate is static throughout the period, the signed distance should remain constant. The key idea of our spatiotemporal scene representation is to fit the time-varying SDF at each point with several basis functions. Inspired by Li", + "bbox": [ + 75, + 779, + 467, + 900 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/1eb36f546eaeca0e2636c4e4c378d62bab5463a1c2343ff49a674b673775b880.jpg", + "image_caption": [ + "Figure 2. Principle of our 4D TSDF representation: The left figure shows a moving object and a query point $\\pmb{p}$ . The one on the right depicts the corresponding signed distance at $\\pmb{p}$ over time. At $t_0$ , $\\pmb{p}$ 's signed distance is a positive truncated value. When the moving object reaches $\\pmb{p}$ at time $t_1$ , $\\pmb{p}$ is inside the object and its signed distance is negative accordingly. At $t_2$ , the moving object moved past $\\pmb{p}$ , the signed distance of $\\pmb{p}$ gets positive again." + ], + "image_footnote": [], + "bbox": [ + 506, + 90, + 707, + 191 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/7672585101abf831228b543a099278c54743c320ac8da2480fd27fd0196a141a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 712, + 98, + 888, + 191 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "et al. [26]'s representation of moving point trajectories, we exploit $K$ globally shared basis functions $\\phi_k: \\mathbb{R} \\mapsto \\mathbb{R}$ . Using these basis functions $\\phi_k(t)$ , we model the time-varying TSDF $F(\\pmb{p}, t)$ that maps a location $\\pmb{p} \\in \\mathbb{R}^3$ at time $t$ to a signed distance as follows:", + "bbox": [ + 496, + 325, + 890, + 401 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nF (\\boldsymbol {p}, t) = \\sum_ {k = 1} ^ {K} w _ {\\boldsymbol {p}} ^ {k} \\phi_ {k} (t), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 614, + 414, + 890, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $w_{\\pmb{p}}^{k} \\in \\mathbb{R}$ are estimable location-dependent coefficients. In line with previous works [26, 62], we initialize the basis functions with discrete cosine transform (DCT) basis functions:", + "bbox": [ + 496, + 469, + 890, + 529 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\phi_ {k} (t) = \\cos \\left(\\frac {\\pi}{2 N} (2 t + 1) (k - 1)\\right). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 571, + 539, + 890, + 566 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The first basis function for $k = 1$ is time-independent as $\\phi_1(t) = 1$ . During the training process, we fix $\\phi_1(t)$ and determine the other basis functions by backpropagation. We consider $\\phi_1(t)$ 's corresponding weight $w_{\\pmb{p}}^{1}$ as the static SDF value of the point $\\pmb{p}$ . Hence, $F(\\pmb{p}, t)$ consists of its static background value, i.e., $w_{\\pmb{p}}^{1}\\phi_{1}(t) = w_{\\pmb{p}}^{1}$ , and the weighted sum of dynamic basis functions $\\phi_2(t), \\ldots, \\phi_K(t)$ .", + "bbox": [ + 496, + 580, + 890, + 686 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As the basis functions $\\phi_1(t),\\ldots ,\\phi_K(t)$ are shared between all points in the scene, we need to optimize the location-dependent weights that are implicitly represented in our spatial representation.", + "bbox": [ + 496, + 686, + 890, + 747 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Spatial representation. To achieve accurate scene reconstruction while maintaining memory efficiency, we employ a multi-resolution sparse voxel grid to store spatial geometric information.", + "bbox": [ + 496, + 750, + 890, + 806 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "First, we accumulate the input point clouds, $S_{1}, \\ldots, S_{N}$ based on their poses $T_{1}, \\ldots, T_{N}$ computed from LiDAR odometry and generate a hierarchy of voxel grids around points to ensure complete coverage in 3D. We use a spatial hash table for fast retrieval of the resulting voxels that are only initialized if points fall into a voxel.", + "bbox": [ + 496, + 809, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "15419", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/e0e986dd7c3c1d3401d87bc8d749bc80e7e1d37617c3cc9b641e7ca971ba76a0.jpg", + "image_caption": [ + "Figure 3. Overview of querying a TSDF value in our 4D map representation. For querying a point $\\pmb{p}$ at $t_i$ and $t_{i + 1}$ , we first retrieve each corner's feature in $\\mathcal{F}^l$ of the voxel that $\\pmb{p}$ is located in and obtain the fused feature $\\pmb{f}_{\\pmb{p}}$ by trilinear interpolation. Then, we feed $\\pmb{f}_{\\pmb{p}}$ into the decoder $D_{\\mathrm{mlp}}$ and take the output as the weights of different basis functions $\\phi_1(t),\\ldots ,\\phi_K(t)$ . Finally, we calculate the weighted sum of basis functions' values at $t_i$ and $t_{i + 1}$ to get their respective SDF results. For simplicity, we only illustrate one level of hashed feature grids." + ], + "image_footnote": [], + "bbox": [ + 101, + 90, + 872, + 238 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Similar to Instant-NGP [36], we save a feature vector $\\pmb{f} \\in \\mathbb{R}^{D}$ at each corner vertex of the voxel grid in each resolution level, where we denote as $\\mathcal{F}^l$ the level-wise corner features. We compute the feature vector $\\pmb{f}_{\\pmb{p}} \\in \\mathbb{R}^{D}$ for given query point $\\pmb{p} \\in \\mathbb{R}^3$ inside the hierarchical grid as follows:", + "bbox": [ + 76, + 332, + 472, + 411 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {f} _ {\\boldsymbol {p}} = \\sum_ {l = 1} ^ {L} \\operatorname {i n t e r p o l a t e} (\\boldsymbol {p}, \\mathcal {F} ^ {l}), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 419, + 468, + 460 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where interpolate is the trilinear interpolation for a given point $\\pmb{p}$ using the corner features $\\mathcal{F}^l$ at level $l$ .", + "bbox": [ + 76, + 469, + 468, + 498 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Then, we decode the interpolated feature vector $\\pmb{f}_{\\pmb{p}}$ into the desired weights $\\pmb{w}_{\\pmb{p}} = (w_{p}^{1},\\dots ,w_{p}^{K})\\in \\mathbb{R}^{K}$ by a globally shared multi-layer perceptron (MLP) $D_{\\mathrm{mlp}}$ :", + "bbox": [ + 76, + 500, + 470, + 547 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {w} _ {\\boldsymbol {p}} = D _ {\\operatorname {m l p}} \\left(\\boldsymbol {f} _ {\\boldsymbol {p}}\\right). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 558, + 468, + 575 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As every step is differentiable, we can optimize the multi-resolution feature grids $\\mathcal{F}^l$ , the MLP decoder $D_{\\mathrm{mlp}}$ and the values of the basis functions jointly by gradient descent once we have training data and corresponding target values. The SDF querying process is illustrated in Fig. 3.", + "bbox": [ + 76, + 585, + 468, + 660 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Objective Function", + "text_level": 1, + "bbox": [ + 76, + 667, + 261, + 684 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We take samples along the rays from the input scans $S_{t}$ to collect training data. Each scan frame $S_{t}$ corresponds to a moment $t$ in time, so we gather four-dimensional data points $(\\pmb {q},t)$ via sampling along the ray from the scan origin $\\pmb{o}_t\\in \\mathbb{R}^3$ to a point $s_t^i\\in S_t$ . We can represent the sampled points $q_{s}^{i}$ along the ray as $q_{s}^{i} = o_{t} + \\lambda (s_{t}^{i} - o_{t})$ . By setting a truncation threshold $\\tau$ , we split the ray into two regions, at the surface and in the free-space:", + "bbox": [ + 76, + 691, + 468, + 811 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {T} _ {\\text {s u r f}} ^ {i} = \\left\\{\\boldsymbol {q} _ {s} ^ {i} \\mid \\lambda \\in (1 - \\bar {\\tau}, 1 + \\bar {\\tau}) \\right\\} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 821, + 468, + 840 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {T} _ {\\text {f r e e}} ^ {i} = \\left\\{\\boldsymbol {q} _ {s} ^ {i} \\mid \\lambda \\in (0, 1 - \\bar {\\tau}) \\right\\}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 842, + 468, + 861 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\bar{\\tau} = \\tau (\\| \\pmb{s}_t^i -\\pmb {o}_t\\|)^{-1}$ . Thus, $\\mathcal{T}_{\\mathrm{surf}}^i$ represents the region close to the endpoint $s_t^i\\in S_t$ , and $\\mathcal{T}_{\\mathrm{free}}^i$ is the region", + "bbox": [ + 76, + 869, + 468, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "in the free space. We uniformly sample $M_{s}$ and $M_{f}$ points from $\\mathcal{T}_{\\mathrm{surf}}^i$ and $\\mathcal{T}_{\\mathrm{free}}^i$ separately. We obtain two sets $\\mathcal{D}_{\\mathrm{surf}}$ and $\\mathcal{D}_{\\mathrm{free}}$ of samples by sampling over all scans. Unlike prior work [20, 46] that use differentiable rendering to calculate the depth by integration along the ray, we design different losses for $\\mathcal{D}_{\\mathrm{surf}}$ and $\\mathcal{D}_{\\mathrm{free}}$ to supervise the 4D TSDF directly.", + "bbox": [ + 496, + 332, + 890, + 422 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Near Surface Loss. Since the output of our 4D map is the signed distance value $\\hat{d} = F(\\pmb {p},t)$ at an arbitrary position $\\pmb {p}\\in \\mathbb{R}^3$ in time $t\\in [1,N]$ , we expect that the predicted value $\\hat{d}$ does not change over time for static points. However, this cannot be guaranteed if we use the projective distance $d_{\\mathrm{surf}}$ to the surface along the ray direction directly as the target value, since the projective distance would change over time due to the change of view direction by the moving sensor, even in a static scene. Thus, for the sampled data in $\\mathcal{D}_{\\mathrm{surf}}$ , i.e., the sampled points near the surface, we can only obtain reliable information about the sign of the TSDF value of these points, which should be positive if the point is before the endpoint and negative if the point is behind. In addition, for a sampled point in front of the endpoint, its projective signed distance $d_{\\mathrm{surf}}$ should be the upper bound of its actual signed distance value. And for sampled points behind the endpoint, $d_{\\mathrm{surf}}$ should be the lower bound.", + "bbox": [ + 496, + 424, + 892, + 681 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We design a piecewise loss $L_{\\mathrm{surf}}$ to supervise the sampled points near the surface:", + "bbox": [ + 496, + 683, + 893, + 713 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {s u r f}} (\\hat {d}, d _ {\\text {s u r f}}) = \\left\\{ \\begin{array}{c l} | \\hat {d} | & \\text {i f} \\hat {d} d _ {\\text {s u r f}} < 0 \\\\ | \\hat {d} - d _ {\\text {s u r f}} | & \\text {i f} \\hat {d} d _ {\\text {s u r f}} > d _ {\\text {s u r f}} ^ {2} \\\\ 0 & \\text {o t h e r w i s e} \\end{array} , \\right. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 727, + 890, + 777 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\hat{d} = F(\\pmb {q},t)$ is the predicted value from our map for a sample point $\\pmb {q}\\in \\mathcal{D}_{\\mathrm{surf}}$ and $d_{\\mathrm{surf}}$ is its corresponding projective signed distance for that sampled point in the corresponding scan $\\mathcal{S}_t$ . This loss punishes only a prediction when the sign is wrong or its absolute value is larger than the absolute value of $d_{\\mathrm{surf}}$ . For a query point exactly on the surface, i.e., $d_{\\mathrm{surf}} = 0$ , $L_{\\mathrm{surf}}$ is simply the L1 loss.", + "bbox": [ + 496, + 792, + 890, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "15420", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To calculate an accurate signed distance value and maintain the consistency of constraints for static points from different observations, we use the natural property of signed distance function to constraint the length of the gradient vector for samples inside $\\mathcal{D}_{\\mathrm{surf}}$ , which is called Eikonal regularization [15, 38]:", + "bbox": [ + 76, + 90, + 472, + 183 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {e i k o n a l}} (\\boldsymbol {p}, t) = \\left(\\left\\| \\frac {\\partial F (\\boldsymbol {p} , t)}{\\partial \\boldsymbol {p}} \\right\\| - 1\\right) ^ {2}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 145, + 195, + 468, + 232 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Inspired by Neuralangelo [25], we manually add perturbations to compute more robust gradient vectors instead of using automatic differentiation, which means we compute numerical gradients:", + "bbox": [ + 76, + 247, + 470, + 306 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {x} F (\\boldsymbol {p}, t) = \\frac {F (\\boldsymbol {p} + \\epsilon_ {\\boldsymbol {x}} , t) - F (\\boldsymbol {p} - \\epsilon_ {\\boldsymbol {x}} , t)}{2 \\epsilon}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 124, + 321, + 468, + 353 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\nabla_x F(\\pmb{p}, t)$ is the component of the gradient $\\frac{\\partial F(\\pmb{p}, t)}{\\partial \\pmb{p}}$ on the $x$ axis, and $\\epsilon_x = (\\epsilon, 0, 0)^{\\top}$ is the added perturbation. We apply the same operation on $y$ and $z$ axes to calculate the numerical gradient. Furthermore, in order to get faster convergence at the beginning and ultimately recover the rich geometric details, we first set a large $\\epsilon$ and gradually reduce it during the training process.", + "bbox": [ + 76, + 367, + 468, + 479 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Free Space Loss. As we tackle the problem of mapping in dynamic environments, we cannot simply accumulate point clouds and then calculate accurate supervision of signed distance value via nearest neighbor search. Therefore, we use a L1 loss $L_{\\mathrm{free}}$ to constrain the signed distance prediction $\\hat{d}$ of the free space points, i.e., $\\pmb{p} \\in \\mathcal{D}_{\\mathrm{free}}$ :", + "bbox": [ + 75, + 479, + 470, + 571 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {f r e e}} (\\hat {d}) = | \\hat {d} - \\tau |, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 585, + 468, + 604 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\tau$ is the truncation threshold we used in Sec. 3.2.", + "bbox": [ + 76, + 619, + 444, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Thanks to our spatio-temporal representation, a single query point can get both, static and dynamic TSDF values. Thus, for some regions that are determined to be free space, we can directly add constraints to their static TSDF values.", + "bbox": [ + 76, + 636, + 468, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We divide the free space points $\\mathcal{D}_{\\mathrm{free}}$ into dense and sparse subset $\\mathcal{D}_{\\mathrm{dense}}$ and $\\mathcal{D}_{\\mathrm{sparse}}$ based on a threshold $r_{\\mathrm{dense}}$ for the distance from the free space point sampled at time $t$ to the scan origin $\\pmb{o}_t$ . For each point $\\pmb{p} \\in \\mathcal{D}_{\\mathrm{dense}}$ , we find the nearest neighbor $\\pmb{n_p}$ in the corresponding scan $\\mathcal{S}_t$ , i.e., $\\pmb{n_p} = \\arg \\min_{\\pmb{q} \\in \\mathcal{S}_t} \\| \\pmb{p} - \\pmb{q} \\|_2$ . Let $\\mathcal{D}_{\\mathrm{certain}} = \\{\\pmb{p} \\in \\mathcal{D}_{\\mathrm{dense}} | \\| \\pmb{p} - \\pmb{n_p} \\| > \\tau\\}$ be the points that we consider in the certain free space. Then, we supervise $\\pmb{p} \\in \\mathcal{D}_{\\mathrm{certain}}$ by its static signed distance value directly:", + "bbox": [ + 76, + 699, + 470, + 835 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {c e r t a i n}} (\\boldsymbol {p}) = \\left| w _ {\\boldsymbol {p}} ^ {1} - \\tau \\right|, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 849, + 468, + 869 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $w_{p}^{1}$ is the first weight of the decoder's output.", + "bbox": [ + 76, + 883, + 423, + 904 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/9480712a742e6610fc35ba16a2dd5d0cc3f35865c2d52ab271f969545f97912f.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 524, + 85, + 686, + 176 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/eff7d68930d37d785d5d9a4d83515144ac33853e6dff29b570a5ac20ff81e6fb.jpg", + "image_caption": [ + "(c)", + "Figure 4. Reconstructed TSDF for KITTI dataset [14]: Subfigures (a) and (b) are the input neighboring frames. Correspondingly, (c) and (d) are horizontal TSDF slices queried from our 4D map. Note that we only display the TSDF values that are less than $0.3\\mathrm{m}$ ." + ], + "image_footnote": [], + "bbox": [ + 524, + 186, + 683, + 273 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/337a268be407d9a8491494d3f5d2f09f07ea4122f94e1ab921ed626c3c06785c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 87, + 867, + 175 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/47f30848224463c1004b7eef8b60395cca84232bc99b7c86ca3c3ee443b14aed.jpg", + "image_caption": [ + "(b)", + "(d)" + ], + "image_footnote": [], + "bbox": [ + 707, + 186, + 864, + 273 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In summary, the final loss $L_{\\mathrm{total}}$ is given by:", + "bbox": [ + 517, + 366, + 808, + 382 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} L _ {\\text {t o t a l}} = \\frac {1}{| \\mathcal {D} _ {\\text {s u r f}} |} \\sum_ {(\\boldsymbol {p}, t) \\in \\mathcal {D} _ {\\text {s u r f}}} L _ {\\text {s u r f}} (\\hat {d}, d _ {\\text {s u r f}}) + \\lambda_ {e} L _ {\\text {e i k o n a l}} (\\boldsymbol {p}, t) \\\\ + \\frac {\\lambda_ {f}}{| \\mathcal {D} _ {\\text {f r e e}} |} \\sum_ {(p, t) \\in \\mathcal {D} _ {\\text {f r e e}}} L _ {\\text {f r e e}} (\\hat {d}) \\\\ + \\frac {\\lambda_ {c}}{\\left| \\mathcal {D} _ {\\text {c e r t a i n}} \\right|} \\sum_ {(\\boldsymbol {p}, t) \\in \\mathcal {D} _ {\\text {c e r t a i n}}} L _ {\\text {c e r t a i n}} (\\boldsymbol {p}), \\tag {12} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 393, + 890, + 513 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\hat{d} = F(\\pmb{p}, t)$ is the predicted signed distance at the sample position $\\pmb{p}$ at time $t$ and $d_{\\mathrm{surf}}$ is the projective signed distance of sample $\\pmb{p}$ . With the above loss function and data sampling strategy, we train our map offline until convergence. In Fig. 4, we show TSDF slices obtained using our optimized 4D map at different times.", + "bbox": [ + 496, + 529, + 890, + 619 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "One application of our 4D map representation is dynamic object segmentation. For a point $\\pmb{p}$ in the input scans $S_{1:N}$ , its static signed distance value $w_{\\pmb{p}}^{1}$ can be obtained by a simple query. If $\\pmb{p}$ belongs to the static background, it should have $w_{\\pmb{p}}^{1} = 0$ . Therefore, we simply set a threshold $d_{\\mathrm{static}}$ and regard a point as dynamic if $w_{\\pmb{p}}^{1} > d_{\\mathrm{static}}$ .", + "bbox": [ + 496, + 621, + 890, + 717 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Implementation Details", + "text_level": 1, + "bbox": [ + 500, + 724, + 718, + 742 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As hyperparameters of our approach, we use the values listed in Tab. 1 in all LiDAR experiments. Additional parameters are determined by the characteristics of the sensor and the dimensions of the scene. For instance, in the reconstruction of autonomous driving scenes, like KITTI, we set the highest resolution for the feature voxels to $0.3\\mathrm{m}$ . The truncation distance is set to $\\tau = 0.5\\mathrm{m}$ , and the dense area split threshold $r_{\\mathrm{dense}} = 15\\mathrm{m}$ . Regarding training time, it takes 12 minutes to train 140 frames from the KITTI dataset using a single Nvidia Quadro RTX 5000.", + "bbox": [ + 496, + 750, + 893, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "15421", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 89, + 209, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we show the effectiveness of our proposed approach with respect to two aspects: (1) Static mapping quality: The static TSDF built by our method allows us to extract a surface mesh using marching cubes [29]. We compare this extracted mesh with the ground truth mesh to evaluate the reconstruction. (2) Dynamic object segmentation: As mentioned above, our method can segment out the dynamic objects in the input scans. We use point-wise dynamic object segmentation accuracy to evaluate the results.", + "bbox": [ + 75, + 114, + 470, + 252 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Static Mapping Quality", + "text_level": 1, + "bbox": [ + 76, + 258, + 294, + 275 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets. We select two datasets collected in dynamic environments for quantitative evaluation. One is the synthetic dataset ToyCar3 from Co-Fusion [47], which provides accurate depth images and accurate masks of dynamic objects rendered using Blender, but also depth images with added noise. For this experiment, we select 150 frames from the whole sequence, mask out all dynamic objects in the accurate depth images, and accumulate background static points as the ground-truth static map. The original noisy depth images are used as the input for all methods.", + "bbox": [ + 75, + 281, + 468, + 431 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Furthermore, we use the Newer College [45] dataset as the real-world dataset, which is collected using a 64-beam LiDAR. Compared with synthetic datasets, it contains more uncertainty from measurements and pose estimates. We select 1,300 frames from the courtyard part for testing and this data includes a few pedestrians as dynamic objects. This dataset offers point clouds obtained by a high-precision terrestrial laser scanner that can be directly utilized as ground truth to evaluate the mapping quality.", + "bbox": [ + 75, + 431, + 468, + 568 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metric and Baselines. We report the reconstruction accuracy, completeness, the Chamfer distance, and the F1-score. Further details on the computation of the metrics can be found in the supplement.", + "bbox": [ + 75, + 568, + 468, + 628 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We compare our method with several different types of state-of-the-art methods: (i) the traditional TSDF-fusion method, VDBfusion [59], which uses space carving to eliminate the effects of dynamic objects, (ii) the data-driven-based method, neural kernel surface reconstruction (NKSR) [18], and (iii) the neural representation based 3D mapping approach, SHINE-mapping [73].", + "bbox": [ + 75, + 628, + 468, + 733 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For NKSR [18], we use the default parameters provided by Huang et al. with their official implementation. To ensure a fair comparison with SHINE-mapping, we adopt an equal number of free space samples (15 samples), aligning with our method for consistency.", + "bbox": [ + 75, + 734, + 468, + 809 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For the ToyCar3 dataset, we set VDB-Fusion's resolution to $1\\mathrm{cm}$ . To have all methods with a similar memory consumption, we set the resolution of SHINE-mapping's leaf feature voxel to $2\\mathrm{cm}$ , and our method's highest resolution accordingly to $2\\mathrm{cm}$ . For the Newer College dataset, we set the resolution to $10\\mathrm{cm}$ , $30\\mathrm{cm}$ , and $30\\mathrm{cm}$ respectively.", + "bbox": [ + 75, + 810, + 468, + 901 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/596ccf01a51729c772c1826d526ca989d41bd6a821f383afd43547c4318d88e8.jpg", + "table_caption": [ + "Table 1. Hyperparameters of our approach." + ], + "table_footnote": [], + "table_body": "
ParameterValueDescription
L2number of feature voxels level
D8The length of feature vectors
K32The number of basis functions
Dmlp2 × 64layer and size of the MLP decoder
Ms5The number of surface area samples
Mf15The number of free space samples
λe0.02weight for Eikonal loss
λf0.25weight for free space loss
λc0.2weight for certain free loss
", + "bbox": [ + 511, + 114, + 867, + 282 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/23f1dce81bf8edb1e0a54c55a8f8f695944c27ba556e3173fe8b310d9583ff58.jpg", + "table_caption": [ + "Table 2. Quantitative results of the reconstruction quality on ToyCar3. We report the distance error metrics, namely completion, accuracy and Chamfer-L1 in cm. Additionally, we show the F-score in % with a 1 cm error threshold." + ], + "table_footnote": [], + "table_body": "
MethodComp.↓Acc.↓C-L1↓F-score ↑
VDB-fusion [59]0.5740.4810.52897.95
NKSR [18]0.5262.8091.66789.54
SHINE-mapping [73]0.5830.6260.60598.01
Ours0.4380.4680.45298.35
", + "bbox": [ + 504, + 359, + 877, + 454 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results. The quantitative results for synthetic dataset ToyCar3 and real-world dataset Newer College are presented in Tab. 2 and Tab. 3, respectively. We also show the extracted meshes from all methods in Fig. 5 and Fig. 6.", + "bbox": [ + 498, + 472, + 890, + 532 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our method outperforms the baselines in terms of Completeness and Chamfer distance for both datasets (cf. Fig. 5 and Fig. 6). Regarding the accuracy, SHINE-mapping and VDB-Fusion can filter part of high-frequency noise by fusion of multiple frames, resulting in better performance on noisy Newer College dataset. In comparison, our method considers every scan as accurate to store 4D information, which makes it more sensitive to measurement noise. On the ToyCar3 dataset, both our method and VDB-Fusion successfully eliminate all moving objects. However, on the Newer College dataset, VDB-Fusion incorrectly eliminates the static tree and parts of the ground, resulting in poor completeness shown in Tab. 3. SHINE-mapping eliminates dynamic pedestrians on the Newer College dataset but retains a portion of the dynamic point cloud on the ToyCar3 dataset, which has a larger proportion of dynamic objects, leading to poorer accuracy in Tab. 2. NKSR performs the worst accuracy because it is unable to eliminate dynamic objects, which means it's not suitable to apply NKSR in dynamic real-world scenes directly.", + "bbox": [ + 496, + 534, + 892, + 835 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Dynamic Object Segmentation", + "text_level": 1, + "bbox": [ + 500, + 845, + 769, + 863 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets. For dynamic object segmentation, we use the KTH-Dynamic-Benchmark [72] for evaluation, which in-", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "15422", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3bf71086da1a1b85b773f0e5e204f5d7b91b2eca6c3bf9a953733509291ee25a.jpg", + "image_caption": [ + "(a) Merged input scans" + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 232, + 181 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1c779af19739d9302162ada79cefa43d162815e520617177d0d1635c4de1fc86.jpg", + "image_caption": [ + "(b) Ours" + ], + "image_footnote": [], + "bbox": [ + 240, + 88, + 395, + 181 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0de7f7c019dd294888f49a44810c6e34c566c7561b3cc26592fa09f737d4ddb9.jpg", + "image_caption": [ + "(c) VDB-Fusion [59]" + ], + "image_footnote": [], + "bbox": [ + 401, + 88, + 557, + 181 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b1d5c0cea9788d0c498e0c84bb6d7636ad5325398ea9d49b8206dbd4d65beb08.jpg", + "image_caption": [ + "(d) NKSR [18]" + ], + "image_footnote": [], + "bbox": [ + 563, + 88, + 720, + 181 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c267728fff044577e6303ec989e5b017853a7e9bb385809da6d3fea8c2130214.jpg", + "image_caption": [ + "(e) SHINE-mapping [73]" + ], + "image_footnote": [], + "bbox": [ + 725, + 88, + 880, + 181 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/76977bb0fe97692b3d565c308af89e8a8a2dca663c7ac697a2d7b4b835810906.jpg", + "image_caption": [ + "Figure 5. A comparison of the static mapping results of different methods on the ToyCar3 dataset. There are two dynamic toy cars moving through the scene. Our method can reconstruct the static scene with fine details and eliminate the dynamic car.", + "(a) Merged input scans" + ], + "image_footnote": [], + "bbox": [ + 83, + 238, + 235, + 395 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/91f788a922f82e91fbc40e959d3ee2609248ef5a463a558f2224577ead6d5732.jpg", + "image_caption": [ + "(b) Ours", + "Figure 6. A comparison of the static mapping results of different methods on the Newer College dataset. Several pedestrians are moving through the scene during the data collection. Our method can reconstruct the static scene completely and eliminate the moving pedestrians. Although VDB-Fusion manages to eliminate the pedestrians, it incorrectly removes the tree highlighted in the orange box." + ], + "image_footnote": [], + "bbox": [ + 243, + 238, + 398, + 397 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/953a87e2edc51b31a2d84fe969341705b56b354bc4b4ccb211b5a218e66620cd.jpg", + "image_caption": [ + "(c) VDB-Fusion [59]" + ], + "image_footnote": [], + "bbox": [ + 405, + 238, + 558, + 396 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ea17f4283b2069b34a1e533e4b94e9243b58afea03a4b50586f50adfb1dc82eb.jpg", + "image_caption": [ + "(d) NKSR [18]" + ], + "image_footnote": [], + "bbox": [ + 566, + 238, + 722, + 397 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/72e778c05a486bee3df7a6946756520826d3e8ee21cb3054ceecbcc9fa5e8f74.jpg", + "image_caption": [ + "(e) SHINE-mapping [73]" + ], + "image_footnote": [], + "bbox": [ + 730, + 243, + 883, + 396 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/30d7724ff38e86bdca4ad571a8abc17e7eef7171dca75f4a02d26a3ead4e5c7f.jpg", + "table_caption": [ + "Table 3. Quantitative results of the reconstruction quality on Newer College. We report the distance error metrics, namely completion, accuracy and Chamfer-L1 in cm. Additionally, we show the F-score in % with a $20\\mathrm{cm}$ error threshold." + ], + "table_footnote": [], + "table_body": "
MethodComp.↓Acc.↓C-L1↓F-score ↑
VDB-fusion [59]7.325.996.6596.68
NKSR [18]6.879.288.0895.65
SHINE-mapping [73]6.805.866.3397.67
Ours5.856.496.1797.50
", + "bbox": [ + 81, + 542, + 450, + 636 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "cludes four sequences in total: sequence 00 (frame 4,390 - 4,530) and sequence 05 (frame 2,350 - 2,670) from the KITTI dataset [3, 14], which are captured by a 64-beam LiDAR, one sequence from the Argoverse2 dataset [66] consisting of 575 frames captured by two 32-beam LiDARs, and a semi-indoor sequence captured by a sparser 16-beam LiDAR. All sequences come with corresponding pose files and point-wise dynamic or static labels as the ground truth. It is worth noting that the poses for KITTI 00 and 05 were obtained from SuMa [2] and the pose files for the Semi-indoor sequence come from NDT-SLAM [50].", + "bbox": [ + 75, + 655, + 467, + 821 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Metric and Baselines. The KTH-Dynamic-Benchmark evaluates the performance of the method by measuring the classification accuracy of dynamic points (DA%), static points (SA%) and also their associated accuracy (AA%) where $AA = \\sqrt{DA \\cdot SA}$ . The benchmark provides various", + "bbox": [ + 75, + 824, + 467, + 898 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "bases such as the state-of-the-art LiDAR dynamic object removal methods – Erasor [27] and Removert [21], as well as the traditional 3D mapping method, Octomap [17, 69], and its modified versions, Octomap with ground fitting and outlier filtering. As SHINE-mapping demonstrates the ability to remove dynamic objects in our static mapping experiments, we also report its result in this benchmark. Additionally, we report the performance of the state-of-the-art online moving object segmentation methods, 4DMOS [31] and its extension MapMOS [32]. As these two methods utilize KITTI sequences 00 and 05 for training, we only show the results of the remaining two sequences. For the parameter setting, we set our method's leaf resolution to $0.3\\mathrm{m}$ , and the threshold for segmentation as $d_{\\mathrm{static}} = 0.16\\mathrm{m}$ . We set the leaf resolution for Octomap to $0.1\\mathrm{m}$ .", + "bbox": [ + 496, + 478, + 890, + 704 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results. The quantitative results of the dynamic object segmentation are shown in Tab. 4. And we depict the accumulated static points generated by different methods in Fig. 7. We can see that our method achieves the best associated accuracy (AA) in three autonomous driving sequences (KITTI 00, KITTI 05, Argoverse2) and vastly outperforms baselines. The supervised learning-based methods 4DMOS and MapMOS do not obtain good dynamic accuracy (DA) due to limited generalizability. Erasor and Octomap tend to over-segment dynamic objects, resulting in poor static accuracy (SA). Removert and SHINE-mapping are too conservative and cannot detect all dynamic objects. Benefiting", + "bbox": [ + 496, + 719, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "15423", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ae778bfdf95f37252f4f12a572df121cde330e8eaad9de9c6b89eca3a7e7b8f0.jpg", + "table_caption": [ + "Table 4. Quantitative results of the dynamic object removal quality on the KTH-Dynamic-Benchmark. We report the static accuracy SA, dynamic static DA and the associated accuracy AA. Octomap* refers to the modified Octomap implementation by Zhang et al. [72]." + ], + "table_footnote": [], + "table_body": "
MethodKITTI Seq. 00KITTI Seq. 05Argoverse2Semi-Indoor
SADAAASADAAASADAAASADAAA
Octomap [17]68.0599.6982.3766.2899.2481.1065.9196.7079.8488.9782.1885.51
Octomap* [72]93.0698.6795.8393.5492.4893.0182.6682.4482.5596.7973.5084.34
Removert [21]99.4441.5364.2699.4222.2847.0698.9731.1655.5399.9612.1534.85
Erasor [27]66.7098.5481.0769.4099.0682.9277.5199.1887.6894.9066.2679.30
SHINE [73]98.9992.3795.6398.9153.2772.5897.6672.6284.2198.8859.1976.51
4DMOS [31]------99.9469.3383.2499.9910.6032.55
MapMOS [32]------99.9685.8892.6599.994.7521.80
Ours99.4698.4798.9799.5498.3698.9599.1795.9197.5394.1772.7982.79
", + "bbox": [ + 76, + 127, + 883, + 296 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0d4071b0915fae6a9433525e2edf75f04011427bea7e98c0651858af1529c7aa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 81, + 305, + 240, + 422 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2f552eab73051f3695c1c0c715dd5528566af4d6d212b402992088910f586f48.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 243, + 305, + 401, + 422 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/38d56d63475f86fc99982597abe589fa53938c1bd40edd2b5bd8216e4cefb41a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 405, + 305, + 563, + 422 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e787023611604dd5b37c4ec3cbb1ef0fb010461fd659e1e768439ddac27eb2a9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 566, + 305, + 725, + 422 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/54f569bf883300115200d5a4baecd3327c09bc8e5d043ed2319c01ec309113e2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 727, + 305, + 887, + 422 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/923ca0b2ac3e5ccc602f8336c0af5359b2e4d3620d7455893ff41f150ccf460f.jpg", + "image_caption": [ + "Figure 7. Comparison of dynamic object removal results produced by our proposed method and three baseline methods on the Argoverse2 data sequence of the KTH-benchmark. We show the bird's eye view on the first row and the zoomed view from the blue frustum shown in (a) on the second row. For the ground truth results in (a), the dynamic objects are shown in red. We only show the static points of ground truth for clearer comparison in zoomed view (f). We highlight the over-segmented parking car and sign by Erasor and the undetected moving vehicle by Removert." + ], + "image_footnote": [], + "bbox": [ + 81, + 438, + 238, + 554 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ce1802da6bd36c4e27dabd16dd9cd3a1ef92f182496dffc1fc742e308e704bea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 241, + 438, + 400, + 554 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fd50e0b220aaf97d5a0ae1d9ab88fef81a6232dd4a69e3a3a6d20b60585a8cd7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 403, + 438, + 560, + 554 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/cda7541e3b937718bfaa2d93ff7156a58547e843f0ccd83fb491cbdec3354be0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 565, + 438, + 722, + 555 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d01e5ba088140fc580080acc67070bd61f654c0500118004a84a45b80490b448.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 725, + 438, + 885, + 555 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "from the continuity and large capacity of the 4D neural representation, we strike a better balance between preserving static background points and removing dynamic objects.", + "bbox": [ + 75, + 645, + 468, + 690 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "It is worth mentioning again that our method does not rely on any pre-processing or post-processing algorithm such as ground fitting, outlier filtering, and clustering, but also does not require labels for training.", + "bbox": [ + 75, + 691, + 468, + 751 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 76, + 768, + 194, + 784 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose a 4D implicit neural map representation for dynamic scenes that allows us to represent the TSDF of static and dynamic parts of a scene. For this purpose, we use a hierarchical voxel-based feature representation that is then decoded into weights for basis functions to represent a time-varying TSDF that can be queried at arbitrary locations. For learning the representation from a se", + "bbox": [ + 75, + 794, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "quence of LiDAR scans, we design an effective data sampling strategy and loss functions. Equipped with our proposed representation, we experimentally show that we are able to tackle the challenging problems of static mapping and dynamic object segmentation. More specifically, our experiments show that our method has the ability to accurately reconstruct 3D maps of the static parts of a scene and can completely remove moving objects at the same time.", + "bbox": [ + 496, + 645, + 890, + 766 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations. While our method achieves compelling results, we have to acknowledge that we currently rely on estimated poses by a separate SLAM approach, but also cannot apply our approach in an online fashion. However, we see this as an avenue for future research into joint incremental mapping and pose estimation.", + "bbox": [ + 496, + 772, + 890, + 864 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. We thank Benedikt Mersch for the fruitful discussion and for providing experiment baselines.", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "15424", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Ioan A. Barsan, Peidong Liu, Marc Pollefeys, and Andreas Geiger. Robust Dense Mapping for Large-Scale Dynamic Environments. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018. 1, 2", + "[2] Jens Behley and Cyril Stachniss. Efficient Surfel-Based SLAM using 3D Laser Range Data in Urban Environments. In Proc. of Robotics: Science and Systems (RSS), 2018. 3, 7", + "[3] Jens Behley, Martin Garbade, Aandres Milioto, Jan Quenzel, Sven Behnke, Cyril Stachniss, and Juergen Gall. SemanticKITTI: A Dataset for Semantic Scene Understanding of LiDAR Sequences. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2019. 7", + "[4] Peter Biber and Tom Duckett. Dynamic Maps for Long-Term Operation of Mobile Service Robots. In Proc. of Robotics: Science and Systems (RSS), 2005. 1, 2", + "[5] Cesar Cadena, Luca Carlone, Henry Carrillo, Yasir Latif, Davide Scaramuzza, Jose Neira, Ian Reid, and John J. Leonard. Past, Present, and Future of Simultaneous Localization And Mapping: Towards the Robust-Perception Age. IEEE Trans. on Robotics (TRO), 32(6):1309-1332, 2016. 1, 2", + "[6] Hongrui Cai, Wanquan Feng, Xuetao Feng, Yan Wang, and Juyong Zhang. Neural surface reconstruction of dynamic scenes with monocular rgb-d camera. In Proc. of the Conf. on Neural Information Processing Systems (NeurIPS), 2022. 2", + "[7] Ang Cao and Justin Johnson. HexPlane: A Fast Representation for Dynamic Scenes. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2", + "[8] Xieyuanli Chen, Shijie Li, Benedikt Mersch, Louis Wiesmann, Juergen Gall, Jens Behley, and Cyril Stachniss. Moving Object Segmentation in 3D LiDAR Data: A Learning-based Approach Exploiting Sequential Data. IEEE Robotics and Automation Letters (RA-L), 6(4):6529-6536, 2021. 2", + "[9] Xieyuanli Chen, Benedikt Mersch, Lucas Nunes, Rodrigo Marcuzzi, Ignacio Vizzo, Jens Behley, and Cyril Stachniss. Automatic Labeling to Generate Training Data for Online LiDAR-Based Moving Object Segmentation. IEEE Robotics and Automation Letters (RA-L), 7(3):6107-6114, 2022. 3", + "[10] Xu Chen, Tianjian Jiang, Jie Song, Max Rietmann, Andreas Geiger, Michael J. Black, and Otmar Hilliges. Fast-snarf: A fast deformer for articulated neural fields. IEEE Trans. on Pattern Analysis and Machine Intelligence (TPAMI), 45(10): 11796-11809, 2023. 2", + "[11] Pierre Dellenbach, Jean-Emmanuel Deschaud, Bastien Jacquet, and Francois Goulette. CT-ICP Real-Time Elastic LiDAR Odometry with Loop Closure. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2022. 3", + "[12] Jean-Emmanuel Deschaud. IMLS-SLAM: scan-to-model matching based on 3D data. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018. 3", + "[13] Sara Fridovich-Keil, Giacomo Meanti, Frederik R. Warburg, Benjamin Recht, and Angjoo Kanazawa. K-Planes: Explicit Radiance Fields in Space, Time, and Appearance. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Andreas Geiger, Peter Lenz, and Raquel Urtasun. Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite. In Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2012. 5, 7", + "[15] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit Geometric Regularization for Learning Shapes. In Proc. of the Intl. Conf. on Machine Learning (ICML), 2020. 5", + "[16] Dirk Hähnel, Dirk Schulz, and Wolfram Burgard. Mobile robot mapping in populated environments. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2002. 1, 2", + "[17] Armin Hornung, Kai M. Wurm, Maren Bennewitz, Cyril Stachniss, and Wolfram Burgard. OctoMap: An Efficient Probabilistic 3D Mapping Framework Based on Octrees. Autonomous Robots, 34(3):189-206, 2013. 1, 2, 7, 8", + "[18] Jiahui Huang, Zan Gojcic, Matan Atzmon, Or Litany, Sanja Fidler, and Francis Williams. Neural Kernel Surface Reconstruction. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 6, 7", + "[19] Shengyu Huang, Zan Gojcic, Jiahui Huang, Andreas Wieser, and Konrad Schindler. Dynamic 3D Scene Analysis by Point Cloud Accumulation. In Proc. of the Europ. Conf. on Computer Vision (ECCV), 2022. 2", + "[20] Shengyu Huang, Zan Gojcic, Zian Wang, Francis Williams, Yoni Kasten, Sanja Fidler, Konrad Schindler, and Or Litany. Neural LiDAR Fields for Novel View Synthesis. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2023. 2, 4", + "[21] Giseop Kim and Ayoung Kim. Remove, Then Revert: Static Point Cloud Map Construction Using Multiresolution Range Images. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020. 2, 7, 8", + "[22] Xin Kong, Shikun Liu, Marwan Taher, and Andrew J. Davison. vMAP: Vectorised Object Mapping for Neural Field SLAM. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2", + "[23] Abhijit Kundu, Kyle Genova, Xiaoqi Yin, Alireza Fathi, Caroline Pantofaru, Leonidas Guibas, Andrea Tagliasacchi, Frank Dellaert, and Thomas Funkhouser. Panoptic neural fields: A semantic object-aware neural scene representation. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[24] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[25] Zhaoshuo Li, Thomas Müller, Alex Evans, Russell H Taylor, Mathias Unberath, Ming-Yu Liu, and Chen-Hsuan Lin. Neuralangelo: High-fidelity neural surface reconstruction. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 5", + "[26] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. DynIBaR: Neural Dynamic Image-Based Rendering. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "15425", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] Hyungtae Lim, Sungwon Hwang, and Hyun Myung. ERASOR: Egocentric Ratio of Pseudo Occupancy-Based Dynamic Object Removal for Static 3D Point Cloud Map Building. IEEE Robotics and Automation Letters (RA-L), 6(2): 2272-2279, 2021. 2, 7, 8", + "[28] Hyungtae Lim, Lucas Nunes, Benedikt Mersch, Xieyuanli Chen, Jens Behley, and Cyril Stachniss. ERASOR2: Instance-Aware Robust 3D Mapping of the Static World in Dynamic Scenes. In Proc. of Robotics: Science and Systems (RSS), 2023. 3", + "[29] William E. Lorensen and Harvey E. Cline. Marching Cubes: a High Resolution 3D Surface Construction Algorithm. In Proc. of the Intl. Conf. on Computer Graphics and Interactive Techniques (SIGGRAPH), 1987. 2, 6", + "[30] John McCormac, Ankur Handa, Aandrew J. Davison, and Stefan Leutenegger. SemanticFusion: Dense 3D Semantic Mapping with Convolutional Neural Networks. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2017. 1, 2", + "[31] Benedikt Mersch, Xieyuanli Chen, Ignacio Vizzo, Lucas Nunes, Jens Behley, and Cyril Stachniss. Receding Moving Object Segmentation in 3D LiDAR Data Using Sparse 4D Convolutions. IEEE Robotics and Automation Letters (RA-L), 7(3):7503-7510, 2022. 2, 7, 8", + "[32] Benedikt Mersch, Tiziano Guadagnino, Xieyuanli Chen, Tiziano, Ignacio Vizzo, Jens Behley, and Cyril Stachniss. Building Volumetric Beliefs for Dynamic Environments Exploiting Map-Based Moving Object Segmentation. IEEE Robotics and Automation Letters (RA-L), 8(8):5180-5187, 2023. 2, 7, 8", + "[33] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[34] Daniel Meyer-Delius, Maximilian Beinhofer, and Wolfram Burgard. Occupancy Grid Models for Robot Mapping in Changing Environments. In Proc. of the Conf. on Advancements of Artificial Intelligence (AAAI), 2012. 1, 2", + "[35] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In Proc. of the Europ. Conf. on Computer Vision (ECCV), 2020. 2", + "[36] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. on Graphics, 41(4): 102:1-102:15, 2022. 2, 4", + "[37] Richard A. Newcombe, Shahram Izadi, Otmar Hilliges, David Molyneaux, David Kim, Andrew J. Davison, Pushmeet Kohli, Jamie Shotton, Steve Hodges, and Andrew Fitzgibbon. KinectFusion: Real-Time Dense Surface Mapping and Tracking. In Proc. of the Intl. Symposium on Mixed and Augmented Reality (ISMAR), 2011. 1, 2", + "[38] Joseph Ortiz, Alexander Clegg, Jing Dong, Edgar Sucar, David Novotny, Michael Zollhoefer, and Mustafa Mukadam. isdf: Real-time neural signed distance fields for robot per" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ception. In Proc. of Robotics: Science and Systems (RSS), 2022. 5", + "[39] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguere, and Cyril Stachniss. ReFusion: 3D Reconstruction in Dynamic Environments for RGB-D Cameras Exploiting Residuals. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2019. 2", + "[40] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning Continuous Signed Distance Functions for Shape Representation. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[41] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable Neural Radiance Fields. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2021. 2", + "[42] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M. Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. ACM Trans. on Graphics (TOG), 40(6), 2021.", + "[43] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[44] Sameera Ramasinghe, Violetta Shevchenko, Gil Avraham, and Anton Van Den Hengel. *Blirf: Band limited radiance fields for dynamic scene modeling.* arXiv preprint arXiv:2302.13543, 2023. 2", + "[45] Milad Ramezani, Yiduo Wang, Marco Camurri, David Wisth, Matias Mattamala, and Maurice Fallon. The Newer College Dataset: Handheld LiDAR, Inertial and Vision with Ground Truth. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020. 6", + "[46] Konstantinos Rematas, Andrew Liu, Pratul P. Srinivasan, Jonathan T. Barron, Andrea Tagliasacchi, Thomas Funkhouser, and Vittorio Ferrari. Urban radiance fields. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 4", + "[47] Martin Rünz and Lourdes Agapito. Co-Fusion: Real-Time Segmentation, Tracking and Fusion of Multiple Objects. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2017. 1, 2, 6", + "[48] Martin Runz, Maud Buffier, and Lourdes Agapito. MaskFusion: Real-Time Recognition, Tracking and Reconstruction of Multiple Moving Objects. In Proc. of the Intl. Symposium on Mixed and Augmented Reality (ISMAR), 2018. 1, 2", + "[49] Jari Saarinen, Henrik Andreasson, and Achim Lilienthal. Independent Markov Chain Occupancy Grid Maps for Representation of Dynamic Environments. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2012. 1, 2", + "[50] Jari P. Saarinen, Todor Stoyanov, Henrik Andreasson, and Achim J. Lilienthal. Fast 3D Mapping in Highly Dynamic" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "15426", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Environments Using Normal Distributions Transform Occupancy Maps. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2013. 1, 2, 7", + "[51] Renato F. Salas-Moreno, Richard A. Newcombe, Hauke Strasdat, Paul H. Kelly, and Andrew J. Davison. SLAM++: Simultaneous Localisation and Mapping at the Level of Objects. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2013. 1, 2", + "[52] Ruizhi Shao, Zerong Zheng, Hanzhang Tu, Boning Liu, Hongwen Zhang, and Yebin Liu. Tensor4d: Efficient neural 4d decomposition for high-fidelity dynamic reconstruction and rendering. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2", + "[53] Chonghyuk Song, Gengshan Yang, Kangle Deng, Jun-Yan Zhu, and Deva Ramanan. Total-recon: Deformable scene reconstruction for embodied view synthesis. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2023. 2", + "[54] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. NeRF-Player: A Streamable Dynamic Scene Representation with Decomposed Neural Radiance Fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 2", + "[55]Cyrill Stachniss and Wolfram Burgard. Mobile Robot Mapping and Localization in Non-Static Environments. In Proc. of the National Conf. on Artificial Intelligence (AAAI), 2005.1,2", + "[56]Cyrill Stachniss,John J.Leonard,and Sebastian Thrun. Springer Handbook of Robotics,2nd edition, chapter Chapt.46:Simultaneous Localization and Mapping. Springer Verlag,2016.1,2", + "[57] Sebastian Thrun, Wolfram Burgard, and Dieter Fox. Probabilistic Robotics. MIT Press, 2005. 1, 2", + "[58] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2021. 2", + "[59] Ignacio Vizzo, Tiziano Guadagnino, Jens Behley, and Cyril Stachniss. VDBFusion: Flexible and Efficient TSDF Integration of Range Sensor Data. Sensors, 22(3):1296, 2022. 6, 7", + "[60] Ignacio Vizzo, Tiziano Guadagnino, Benedikt Mersch, Louis Wiesmann, Jens Behley, and Cyril Stachniss. KISS-ICP: In Defense of Point-to-Point ICP - Simple, Accurate, and Robust Registration If Done the Right Way. IEEE Robotics and Automation Letters (RA-L), 8(2):1029-1036, 2023. 3", + "[61] Aishan Walcott-Bryant, Michael Kaess, Hordur Johannsson, and John J. Leonard. Dynamic Pose Graph SLAM: Long-Term Mapping in Low Dynamic Environments. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2012. 1, 2", + "[62] Chaoyang Wang, Ben Eckart, Simon Lucey, and Orazio Gallo. Neural trajectory fields for dynamic novel view synthesis. arXiv preprint arXiv:2105.05994, 2021. 3" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[63] Chung-Yi Weng, Brian Curless, Pratul P. Srinivasan, Jonathan T. Barron, and Ira Kemelmacher-Shlizerman. HumanNeRF: Free-Viewpoint Rendering of Moving People From Monocular Video. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[64] Thomas Whelan, Stefan Leutenegger, Renato F. Salas-Moreno, Ben Glocker, and Andrew J. Davison. ElasticFusion: Dense SLAM Without A Pose Graph. In Proc. of Robotics: Science and Systems (RSS), 2015. 1, 2", + "[65] Louis Wiesmann, Tiziano Guadagnino, Ignacio Vizzo, Nicky Zimmerman, Yue Pan, Haofei Kuang, Jens Behley, and Cyrill Stachniss. LocNDF: Neural Distance Field Mapping for Robot Localization. IEEE Robotics and Automation Letters (RA-L), 8(8):4999-5006, 2023. 2", + "[66] Benjamin Wilson, William Qi, Tanmay Agarwal, John Lambert, Jagjeet Singh, Siddhesh Khandelwal, Bowen Pan, Ratnesh Kumar, Andrew Hartnett, Jhony Kaesemodel Pontes, Deva Ramanan, Peter Carr, and James Hays. Argoverse 2: Next Generation Datasets for Self-driving Perception and Forecasting. In Proc. of the Conf. on Neural Information Processing Systems (NeurIPS), 2021. 7", + "[67] Denis F. Wolf and Guarav S. Sukhatme. Mobile Robot Simultaneous Localization and Mapping in Dynamic Environments. Autonomous Robots, 19, 2005. 1, 2", + "[68] Tianhao Wu, Fangcheng Zhong, Andrea Tagliasacchi, Forrester Cole, and Cengiz Oztireli. D $^2$ NeRF: Self-Supervised Decoupling of Dynamic and Static Objects from a Monocular Video. In Proc. of the Conf. on Neural Information Processing Systems (NeurIPS), 2022. 2", + "[69] Kai M. Wurm, Armin Hornung, Maren Bennewitz, Cyril Stachniss, and Wolfram Burgard. OctoMap: A Probabilistic, Flexible, and Compact 3D Map Representation for Robotic Systems. In Workshop on Best Practice in 3D Perception and Modeling for Mobile Manipulation, IEEE Int. Conf. on Robotics & Automation (ICRA), 2010. 7", + "[70] Dongyu Yan, Xiaoyang Lyu, Jieqi Shi, and Yi Lin. Efficient Implicit Neural Reconstruction Using LiDAR. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023. 2", + "[71] Wentao Yuan, Zhaoyang Lv, Tanner Schmidt, and Steven Lovegrove. Star: Self-supervised tracking and reconstruction of rigid objects in motion with neural rendering. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[72] Qingwen Zhang, Daniel Duberg, Ruoyu Geng, Mingkai Jia, Lujia Wang, and Patric Jensfelt. A dynamic points removal benchmark in point cloud maps. In IEEE 26th International Conference on Intelligent Transportation Systems (ITSC), pages 608-614, 2023. 6, 8", + "[73] Xingguang Zhong, Yue Pan, Jens Behley, and Cyril Stachniss. SHINE-Mapping: Large-Scale 3D Mapping Using Sparse Hierarchical Implicit Neural Representations. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023. 2, 6, 7, 8" + ], + "bbox": [ + 501, + 92, + 890, + 837 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "15427", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/024f2dc1-2c03-4b1e-a716-0e0aea35b1de_model.json b/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/024f2dc1-2c03-4b1e-a716-0e0aea35b1de_model.json new file mode 100644 index 0000000000000000000000000000000000000000..298e1991a2ace9e493e55128f7a6e4c7c73b4fe4 --- /dev/null +++ b/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/024f2dc1-2c03-4b1e-a716-0e0aea35b1de_model.json @@ -0,0 +1,2884 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.248, + 0.131, + 0.724, + 0.176 + ], + "angle": 0, + "content": "3D LiDAR Mapping in Dynamic Environments Using a 4D Implicit Neural Representation" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.203, + 0.354, + 0.222 + ], + "angle": 0, + "content": "Xingguang Zhong" + }, + { + "type": "text", + "bbox": [ + 0.385, + 0.204, + 0.458, + 0.221 + ], + "angle": 0, + "content": "Yue Pan" + }, + { + "type": "text", + "bbox": [ + 0.489, + 0.204, + 0.636, + 0.222 + ], + "angle": 0, + "content": "Cyrill Stachniss\\(^{1,2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.665, + 0.204, + 0.768, + 0.222 + ], + "angle": 0, + "content": "Jens Behley" + }, + { + "type": "text", + "bbox": [ + 0.164, + 0.224, + 0.807, + 0.257 + ], + "angle": 0, + "content": "1Center for Robotics, University of Bonn, 2Lamarr Institute for Machine Learning and Artificial Intelligence {zhong, yue.pan, cyrill.stachniss, Jens.vehley}@igg.uni-bonn.de" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.292, + 0.314, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.324, + 0.474, + 0.657 + ], + "angle": 0, + "content": "Building accurate maps is a key building block to enable reliable localization, planning, and navigation of autonomous vehicles. We propose a novel approach for building accurate maps of dynamic environments utilizing a sequence of LiDAR scans. To this end, we propose encoding the 4D scene into a novel spatio-temporal implicit neural map representation by fitting a time-dependent truncated signed distance function to each point. Using our representation, we extract the static map by filtering the dynamic parts. Our neural representation is based on sparse feature grids, a globally shared decoder, and time-dependent basis functions, which we jointly optimize in an unsupervised fashion. To learn this representation from a sequence of LiDAR scans, we design a simple yet efficient loss function to supervise the map optimization in a piecewise way. We evaluate our approach on various scenes containing moving objects in terms of the reconstruction quality of static maps and the segmentation of dynamic point clouds. The experimental results demonstrate that our method is capable of removing the dynamic part of the input point clouds while reconstructing accurate and complete 3D maps, outperforming several state-of-the-art methods." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.673, + 0.21, + 0.689 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.699, + 0.47, + 0.879 + ], + "angle": 0, + "content": "Mapping using range sensors, like LiDAR or RGB-D cameras, is a fundamental task in computer vision and robotics. Often, we want to obtain accurate maps to support downstream tasks such as localization, planning, or navigation. For achieving an accurate reconstruction of an outdoor environment, we have to account for dynamics caused by moving objects, such as vehicles or pedestrians. Furthermore, dynamic object removal plays an important role in autonomous driving and robotics applications for creating digital twins for realistic simulation and high-definition mapping, where a static map is augmented with semantic and task-relevant information." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.302, + 0.695, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.59, + 0.411, + 0.606, + 0.421 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.3, + 0.892, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.788, + 0.411, + 0.805, + 0.421 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.431, + 0.695, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.59, + 0.542, + 0.606, + 0.552 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.438, + 0.894, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.788, + 0.541, + 0.805, + 0.552 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.564, + 0.894, + 0.648 + ], + "angle": 0, + "content": "Figure 1. Given a sequence of point clouds, as shown in (a), we optimize our 4D neural representation that can be queried at arbitrary positions for a specific time. (b) Based on the estimated time-dependent TSDF values, we can extract a mesh at a specific point in time. Additionally, our 4D neural representation can be also used for static mapping (c) and dynamic object removal (c)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.658, + 0.893, + 0.885 + ], + "angle": 0, + "content": "Mapping and state estimation in dynamic environments is a classical problem in robotics [5, 56, 57]. Approaches for simultaneous localization and mapping (SLAM) can apply different strategies to deal with dynamics. Common ways are: (1) filtering dynamics from the input [1, 30, 47, 48, 51] as a pre-processing step, which requires a semantic interpretation of the scene; (2) modeling the occupancy in the map representation [17, 34, 37, 49, 50, 64], where dynamics can be implicitly removed by retrospectively removing measurements in free space; (3) including it in the state estimation [4, 16, 55, 61, 67] to model which measurements originated from the dynamic and static parts of the environment. Our proposed method falls into the last category and allows us to model dynamics directly in the map representation leading to a spatio-temporal map representation." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.886, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Recently, implicit neural representations gained increas" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.383, + 0.9 + ], + "angle": 0, + "content": "\\(^{1}\\)Code: https://github.com/PRBonn/4dNDF" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15417" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.288 + ], + "angle": 0, + "content": "ing interest in computer vision for novel view synthesis [35, 36] and 3D shape reconstruction [33, 40]. Due to their compactness and continuity, several approaches [65, 70, 73] investigate the use of neural representations in large-scale 3D LiDAR mapping leading to accurate maps while significantly reducing memory consumption. However, these approaches often do not address the problem of handling dynamics during mapping. The recent progress on dynamic NeRF [7, 13, 44, 52] and neural deformable object reconstruction [6, 10] indicates that neural representations can be also used to represent dynamic scenes, which inspires us to tackle the problem of mapping in dynamic environments from the perspective of 4D reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.288, + 0.473, + 0.529 + ], + "angle": 0, + "content": "In this paper, we propose a novel method to reconstruct large 4D dynamic scenes by encoding every point's time-dependent truncated signed distance function (TSDF) into an implicit neural scene representation. As illustrated in Fig. 1, we take sequentially recorded LiDAR point clouds collected in dynamic environments as input and generate a TSDF for each time frame, which can be used to extract a mesh using marching cubes [29]. The background TSDF, which is unchanged during the whole sequence, can be extracted from the 4D signal easily. We regard it as a static map that can be used to segment dynamic objects from the original point cloud. Compared to the traditional voxel-based mapping method, the continuous neural representation allows for the removal of dynamic objects while preserving rich map details. In summary, the main contributions of this paper are:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.53, + 0.47, + 0.575 + ], + "angle": 0, + "content": "- We propose a novel implicit neural representation to jointly reconstruct a dynamic 3D environment and maintain a static map using sequential LiDAR scans as input." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.576, + 0.47, + 0.635 + ], + "angle": 0, + "content": "- We employ a piecewise training data sampling strategy and design a simple, yet effective loss function that maintains the consistency of the static point supervision through gradient constraints." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.636, + 0.47, + 0.711 + ], + "angle": 0, + "content": "- We evaluate the mapping results by the accuracy of the dynamic object segmentation as well as the quality of the reconstructed static map showing superior performance compared to several baselines. We provide our code and the data used for experiments." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.53, + 0.47, + 0.711 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.724, + 0.22, + 0.74 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.47, + 0.886 + ], + "angle": 0, + "content": "Mapping and SLAM in dynamic environments is a classical topic in robotics [5, 56, 57] with a large body of work, which tackles the problem by pre-processing the sensor data [1, 30, 47, 48, 51], occupancy estimation to filter dynamics by removing measurements in free space [17, 34, 37, 39, 49, 50, 64], or state estimation techniques [4, 16, 55, 61, 67]. Below, we focus on closely related approaches using neural representations but also static map building approaches for scenes containing dynamics." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Dynamic NeRF. Dynamic NeRFs aim to solve the prob-" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.243 + ], + "angle": 0, + "content": "lem of novel view synthesis in dynamic environments. Some approaches [41-43, 58, 63] address this challenge by modeling the deformation of each point with respect to a canonical frame. However, these methods cannot represent newly appearing objects. This can render them unsuited for complicated real-life scenarios. In contrast, NSFF [24] and DynIBaR [26] get rid of the canonical frame by computing the motion field of the whole scene. While these methods can deliver satisfactory results, the training time is usually in the order of hours or even days." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.246, + 0.893, + 0.457 + ], + "angle": 0, + "content": "Another type of method leverages the compactness of the neural representation to model the 4D spatio-temporal information directly. Several works [7, 13, 52] project the 4D input into multiple voxelized lower-dimensional feature spaces to avoid large memory consumption, which improves the efficiency of the optimization. Song et al. [54] propose a time-dependent sliding window strategy for accumulating the voxel features. Instead of only targeting novel view synthesis, several approaches [26, 68, 71] decompose the scene into dynamic objects and static background in a self-supervised way, which inspired our work. Other approaches [22, 23, 53] accomplish neural representation-based reconstruction for larger scenes by adding additional supervision such as object masks or optical flow." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.46, + 0.893, + 0.717 + ], + "angle": 0, + "content": "Neural representations for LiDAR scans. Recently, many approaches aim to enhance scene reconstruction using LiDAR data through neural representations. The early work URF [46] leverages LiDAR data as depth supervision to improve the optimization of a neural radiance field. With only LiDAR data as input, Huang et al. [20] achieve novel view synthesis for LiDAR scans with differentiable rendering. Similar to our work, Shine-mapping [73] and EIN-RUL [70] utilize sparse hierarchical feature voxel structures to achieve large-scale 3D mapping. Additionally, the data-driven approach NKSR [18] based on learned kernel regression demonstrates accurate surface reconstruction with noisy LiDAR point cloud as input. Although these approaches perform well in improving reconstruction accuracy and reducing memory consumption, none of them consider the problem of dynamic object interference in real-world environments." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Static map building and motion detection. In addition to removing moving objects from the voxel map with ray tracing, numerous works [8, 19, 31, 32] try to segment dynamic points from raw LiDAR point clouds. However, these methods require a significant amount of labeled data, which makes it challenging to generalize them to various scenarios or sensors with different scan patterns. In contrast, geometry-based, more heuristic approaches have also produced promising results. Kim et al. [21] solve this problem using the visibility of range images, but their results are still highly affected by the resolution. Lim et al. proposed Erasor [27], which leverages ground fitting as prior" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "15418" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.468, + 0.181 + ], + "angle": 0, + "content": "to achieve better segmentation for dynamic points. More recent approaches [9, 28] extend it to instance level to improve results. However, these methods rely on an accurate ground fitting method, which is mainly designed for autonomous driving scenarios, which cannot be guaranteed in complex unstructured real environments." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.183, + 0.468, + 0.286 + ], + "angle": 0, + "content": "In contrast to the approaches discussed above, we follow recent developments in neural reconstruction and propose a novel scene representation that allows us to capture the spatio-temporal progression of a scene. We represent the time-varying SDF of a scene in an unsupervised fashion, which we exploit to remove dynamic objects and reconstruct accurate meshes of the static scene." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.301, + 0.226, + 0.318 + ], + "angle": 0, + "content": "3. Our Approach" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.326, + 0.468, + 0.46 + ], + "angle": 0, + "content": "The input of our approach is given by a sequence of point clouds, \\(S_{1:N} = (S_1,\\dots ,S_N)\\), and their corresponding global poses \\(\\mathsf{T}_t\\in \\mathbb{R}^{4\\times 4}\\) \\(t\\in [1,N]\\), estimated via scan matching, LiDAR odometry, or SLAM methods [2, 11, 12, 60]. Each scan's point cloud \\(S_{t} = \\{\\pmb{s}_{t}^{1},\\dots ,\\pmb{s}_{t}^{M_{t}}\\}\\) is a set of points, \\(\\pmb{s}_t^i\\in \\mathbb{R}^3\\), collected at time \\(t\\). Given such a sequence of scans \\(S_{1:N}\\), our approach aims to reconstruct a 4D TSDF of the traversed scene and maintain a static 3D map at the same time." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.462, + 0.468, + 0.522 + ], + "angle": 0, + "content": "In the next sections, we first introduce our spatiotemporal representation and then explain how to optimize it to represent the dynamic and static parts of a point cloud sequence \\( S_{1:N} \\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.531, + 0.271, + 0.547 + ], + "angle": 0, + "content": "3.1. Map Representation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.553, + 0.468, + 0.674 + ], + "angle": 0, + "content": "The key component of our approach is an implicit neural scene representation that allows us to represent a 4D TSDF of the scene, as well as facilitates the extraction of a static map representation. Our proposed spatio-temporal scene representation is optimized for the given point cloud sequence \\( S_{1:N} \\) such that we can retrieve for an arbitrary point \\( \\pmb{p} \\in \\mathbb{R}^3 \\) and time \\( t \\in [1,N] \\) the corresponding time-varying signed distances value at that location." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.675, + 0.468, + 0.779 + ], + "angle": 0, + "content": "Temporal representation. We utilize an TSDF to represent the scene, i.e., a function that provides the signed distance to the nearest surface for any given point \\( \\pmb{p} \\in \\mathbb{R}^3 \\). The sign of the distance is positive when the point is in free space or in front of the measured surface and is negative when the point is inside the occupied space or behind the measured surface." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.468, + 0.901 + ], + "angle": 0, + "content": "In a dynamic 3D scene, measuring the signed distance of any coordinate at each moment produces a time-dependent function that captures the signed distance changes over time, see Fig. 2 for an illustration. Additionally, if a coordinate is static throughout the period, the signed distance should remain constant. The key idea of our spatiotemporal scene representation is to fit the time-varying SDF at each point with several basis functions. Inspired by Li" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.092, + 0.709, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.714, + 0.099, + 0.889, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.209, + 0.892, + 0.307 + ], + "angle": 0, + "content": "Figure 2. Principle of our 4D TSDF representation: The left figure shows a moving object and a query point \\( \\pmb{p} \\). The one on the right depicts the corresponding signed distance at \\( \\pmb{p} \\) over time. At \\( t_0 \\), \\( \\pmb{p} \\)'s signed distance is a positive truncated value. When the moving object reaches \\( \\pmb{p} \\) at time \\( t_1 \\), \\( \\pmb{p} \\) is inside the object and its signed distance is negative accordingly. At \\( t_2 \\), the moving object moved past \\( \\pmb{p} \\), the signed distance of \\( \\pmb{p} \\) gets positive again." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.326, + 0.892, + 0.402 + ], + "angle": 0, + "content": "et al. [26]'s representation of moving point trajectories, we exploit \\(K\\) globally shared basis functions \\(\\phi_k: \\mathbb{R} \\mapsto \\mathbb{R}\\). Using these basis functions \\(\\phi_k(t)\\), we model the time-varying TSDF \\(F(\\pmb{p}, t)\\) that maps a location \\(\\pmb{p} \\in \\mathbb{R}^3\\) at time \\(t\\) to a signed distance as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.615, + 0.415, + 0.892, + 0.456 + ], + "angle": 0, + "content": "\\[\nF (\\boldsymbol {p}, t) = \\sum_ {k = 1} ^ {K} w _ {\\boldsymbol {p}} ^ {k} \\phi_ {k} (t), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.47, + 0.892, + 0.53 + ], + "angle": 0, + "content": "where \\( w_{\\pmb{p}}^{k} \\in \\mathbb{R} \\) are estimable location-dependent coefficients. In line with previous works [26, 62], we initialize the basis functions with discrete cosine transform (DCT) basis functions:" + }, + { + "type": "equation", + "bbox": [ + 0.573, + 0.54, + 0.891, + 0.568 + ], + "angle": 0, + "content": "\\[\n\\phi_ {k} (t) = \\cos \\left(\\frac {\\pi}{2 N} (2 t + 1) (k - 1)\\right). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.581, + 0.892, + 0.687 + ], + "angle": 0, + "content": "The first basis function for \\( k = 1 \\) is time-independent as \\( \\phi_1(t) = 1 \\). During the training process, we fix \\( \\phi_1(t) \\) and determine the other basis functions by backpropagation. We consider \\( \\phi_1(t) \\)'s corresponding weight \\( w_{\\pmb{p}}^{1} \\) as the static SDF value of the point \\( \\pmb{p} \\). Hence, \\( F(\\pmb{p}, t) \\) consists of its static background value, i.e., \\( w_{\\pmb{p}}^{1}\\phi_{1}(t) = w_{\\pmb{p}}^{1} \\), and the weighted sum of dynamic basis functions \\( \\phi_2(t), \\ldots, \\phi_K(t) \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.687, + 0.892, + 0.748 + ], + "angle": 0, + "content": "As the basis functions \\(\\phi_1(t),\\ldots ,\\phi_K(t)\\) are shared between all points in the scene, we need to optimize the location-dependent weights that are implicitly represented in our spatial representation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.808 + ], + "angle": 0, + "content": "Spatial representation. To achieve accurate scene reconstruction while maintaining memory efficiency, we employ a multi-resolution sparse voxel grid to store spatial geometric information." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.892, + 0.901 + ], + "angle": 0, + "content": "First, we accumulate the input point clouds, \\( S_{1}, \\ldots, S_{N} \\) based on their poses \\( T_{1}, \\ldots, T_{N} \\) computed from LiDAR odometry and generate a hierarchy of voxel grids around points to ensure complete coverage in 3D. We use a spatial hash table for fast retrieval of the resulting voxels that are only initialized if points fall into a voxel." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15419" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.092, + 0.874, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.253, + 0.895, + 0.312 + ], + "angle": 0, + "content": "Figure 3. Overview of querying a TSDF value in our 4D map representation. For querying a point \\(\\pmb{p}\\) at \\(t_i\\) and \\(t_{i + 1}\\), we first retrieve each corner's feature in \\(\\mathcal{F}^l\\) of the voxel that \\(\\pmb{p}\\) is located in and obtain the fused feature \\(\\pmb{f}_{\\pmb{p}}\\) by trilinear interpolation. Then, we feed \\(\\pmb{f}_{\\pmb{p}}\\) into the decoder \\(D_{\\mathrm{mlp}}\\) and take the output as the weights of different basis functions \\(\\phi_1(t),\\ldots ,\\phi_K(t)\\). Finally, we calculate the weighted sum of basis functions' values at \\(t_i\\) and \\(t_{i + 1}\\) to get their respective SDF results. For simplicity, we only illustrate one level of hashed feature grids." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.333, + 0.473, + 0.412 + ], + "angle": 0, + "content": "Similar to Instant-NGP [36], we save a feature vector \\( \\pmb{f} \\in \\mathbb{R}^{D} \\) at each corner vertex of the voxel grid in each resolution level, where we denote as \\( \\mathcal{F}^l \\) the level-wise corner features. We compute the feature vector \\( \\pmb{f}_{\\pmb{p}} \\in \\mathbb{R}^{D} \\) for given query point \\( \\pmb{p} \\in \\mathbb{R}^3 \\) inside the hierarchical grid as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.42, + 0.47, + 0.462 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {f} _ {\\boldsymbol {p}} = \\sum_ {l = 1} ^ {L} \\operatorname {i n t e r p o l a t e} (\\boldsymbol {p}, \\mathcal {F} ^ {l}), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.47, + 0.47, + 0.499 + ], + "angle": 0, + "content": "where interpolate is the trilinear interpolation for a given point \\( \\pmb{p} \\) using the corner features \\( \\mathcal{F}^l \\) at level \\( l \\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.5, + 0.471, + 0.549 + ], + "angle": 0, + "content": "Then, we decode the interpolated feature vector \\( \\pmb{f}_{\\pmb{p}} \\) into the desired weights \\( \\pmb{w}_{\\pmb{p}} = (w_{p}^{1},\\dots ,w_{p}^{K})\\in \\mathbb{R}^{K} \\) by a globally shared multi-layer perceptron (MLP) \\( D_{\\mathrm{mlp}} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.214, + 0.559, + 0.47, + 0.577 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {w} _ {\\boldsymbol {p}} = D _ {\\operatorname {m l p}} \\left(\\boldsymbol {f} _ {\\boldsymbol {p}}\\right). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.586, + 0.47, + 0.661 + ], + "angle": 0, + "content": "As every step is differentiable, we can optimize the multi-resolution feature grids \\(\\mathcal{F}^l\\), the MLP decoder \\(D_{\\mathrm{mlp}}\\) and the values of the basis functions jointly by gradient descent once we have training data and corresponding target values. The SDF querying process is illustrated in Fig. 3." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.669, + 0.262, + 0.685 + ], + "angle": 0, + "content": "3.2. Objective Function" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.692, + 0.47, + 0.813 + ], + "angle": 0, + "content": "We take samples along the rays from the input scans \\(S_{t}\\) to collect training data. Each scan frame \\(S_{t}\\) corresponds to a moment \\(t\\) in time, so we gather four-dimensional data points \\((\\pmb {q},t)\\) via sampling along the ray from the scan origin \\(\\pmb{o}_t\\in \\mathbb{R}^3\\) to a point \\(s_t^i\\in S_t\\) . We can represent the sampled points \\(q_{s}^{i}\\) along the ray as \\(q_{s}^{i} = o_{t} + \\lambda (s_{t}^{i} - o_{t})\\) . By setting a truncation threshold \\(\\tau\\) , we split the ray into two regions, at the surface and in the free-space:" + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.823, + 0.469, + 0.841 + ], + "angle": 0, + "content": "\\[\n\\mathcal {T} _ {\\text {s u r f}} ^ {i} = \\left\\{\\boldsymbol {q} _ {s} ^ {i} \\mid \\lambda \\in (1 - \\bar {\\tau}, 1 + \\bar {\\tau}) \\right\\} \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.162, + 0.843, + 0.469, + 0.862 + ], + "angle": 0, + "content": "\\[\n\\mathcal {T} _ {\\text {f r e e}} ^ {i} = \\left\\{\\boldsymbol {q} _ {s} ^ {i} \\mid \\lambda \\in (0, 1 - \\bar {\\tau}) \\right\\}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.87, + 0.47, + 0.903 + ], + "angle": 0, + "content": "where \\(\\bar{\\tau} = \\tau (\\| \\pmb{s}_t^i -\\pmb {o}_t\\|)^{-1}\\). Thus, \\(\\mathcal{T}_{\\mathrm{surf}}^i\\) represents the region close to the endpoint \\(s_t^i\\in S_t\\), and \\(\\mathcal{T}_{\\mathrm{free}}^i\\) is the region" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.333, + 0.892, + 0.424 + ], + "angle": 0, + "content": "in the free space. We uniformly sample \\(M_{s}\\) and \\(M_{f}\\) points from \\(\\mathcal{T}_{\\mathrm{surf}}^i\\) and \\(\\mathcal{T}_{\\mathrm{free}}^i\\) separately. We obtain two sets \\(\\mathcal{D}_{\\mathrm{surf}}\\) and \\(\\mathcal{D}_{\\mathrm{free}}\\) of samples by sampling over all scans. Unlike prior work [20, 46] that use differentiable rendering to calculate the depth by integration along the ray, we design different losses for \\(\\mathcal{D}_{\\mathrm{surf}}\\) and \\(\\mathcal{D}_{\\mathrm{free}}\\) to supervise the 4D TSDF directly." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.425, + 0.893, + 0.682 + ], + "angle": 0, + "content": "Near Surface Loss. Since the output of our 4D map is the signed distance value \\(\\hat{d} = F(\\pmb {p},t)\\) at an arbitrary position \\(\\pmb {p}\\in \\mathbb{R}^3\\) in time \\(t\\in [1,N]\\), we expect that the predicted value \\(\\hat{d}\\) does not change over time for static points. However, this cannot be guaranteed if we use the projective distance \\(d_{\\mathrm{surf}}\\) to the surface along the ray direction directly as the target value, since the projective distance would change over time due to the change of view direction by the moving sensor, even in a static scene. Thus, for the sampled data in \\(\\mathcal{D}_{\\mathrm{surf}}\\), i.e., the sampled points near the surface, we can only obtain reliable information about the sign of the TSDF value of these points, which should be positive if the point is before the endpoint and negative if the point is behind. In addition, for a sampled point in front of the endpoint, its projective signed distance \\(d_{\\mathrm{surf}}\\) should be the upper bound of its actual signed distance value. And for sampled points behind the endpoint, \\(d_{\\mathrm{surf}}\\) should be the lower bound." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.684, + 0.894, + 0.714 + ], + "angle": 0, + "content": "We design a piecewise loss \\( L_{\\mathrm{surf}} \\) to supervise the sampled points near the surface:" + }, + { + "type": "equation", + "bbox": [ + 0.516, + 0.728, + 0.892, + 0.779 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {s u r f}} (\\hat {d}, d _ {\\text {s u r f}}) = \\left\\{ \\begin{array}{c l} | \\hat {d} | & \\text {i f} \\hat {d} d _ {\\text {s u r f}} < 0 \\\\ | \\hat {d} - d _ {\\text {s u r f}} | & \\text {i f} \\hat {d} d _ {\\text {s u r f}} > d _ {\\text {s u r f}} ^ {2} \\\\ 0 & \\text {o t h e r w i s e} \\end{array} , \\right. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.794, + 0.892, + 0.902 + ], + "angle": 0, + "content": "where \\(\\hat{d} = F(\\pmb {q},t)\\) is the predicted value from our map for a sample point \\(\\pmb {q}\\in \\mathcal{D}_{\\mathrm{surf}}\\) and \\(d_{\\mathrm{surf}}\\) is its corresponding projective signed distance for that sampled point in the corresponding scan \\(\\mathcal{S}_t\\). This loss punishes only a prediction when the sign is wrong or its absolute value is larger than the absolute value of \\(d_{\\mathrm{surf}}\\). For a query point exactly on the surface, i.e., \\(d_{\\mathrm{surf}} = 0\\), \\(L_{\\mathrm{surf}}\\) is simply the L1 loss." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15420" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.184 + ], + "angle": 0, + "content": "To calculate an accurate signed distance value and maintain the consistency of constraints for static points from different observations, we use the natural property of signed distance function to constraint the length of the gradient vector for samples inside \\( \\mathcal{D}_{\\mathrm{surf}} \\), which is called Eikonal regularization [15, 38]:" + }, + { + "type": "equation", + "bbox": [ + 0.146, + 0.196, + 0.47, + 0.233 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {e i k o n a l}} (\\boldsymbol {p}, t) = \\left(\\left\\| \\frac {\\partial F (\\boldsymbol {p} , t)}{\\partial \\boldsymbol {p}} \\right\\| - 1\\right) ^ {2}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.248, + 0.472, + 0.308 + ], + "angle": 0, + "content": "Inspired by Neuralangelo [25], we manually add perturbations to compute more robust gradient vectors instead of using automatic differentiation, which means we compute numerical gradients:" + }, + { + "type": "equation", + "bbox": [ + 0.125, + 0.323, + 0.47, + 0.354 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {x} F (\\boldsymbol {p}, t) = \\frac {F (\\boldsymbol {p} + \\epsilon_ {\\boldsymbol {x}} , t) - F (\\boldsymbol {p} - \\epsilon_ {\\boldsymbol {x}} , t)}{2 \\epsilon}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.368, + 0.47, + 0.48 + ], + "angle": 0, + "content": "where \\(\\nabla_x F(\\pmb{p}, t)\\) is the component of the gradient \\(\\frac{\\partial F(\\pmb{p}, t)}{\\partial \\pmb{p}}\\) on the \\(x\\) axis, and \\(\\epsilon_x = (\\epsilon, 0, 0)^{\\top}\\) is the added perturbation. We apply the same operation on \\(y\\) and \\(z\\) axes to calculate the numerical gradient. Furthermore, in order to get faster convergence at the beginning and ultimately recover the rich geometric details, we first set a large \\(\\epsilon\\) and gradually reduce it during the training process." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.481, + 0.471, + 0.573 + ], + "angle": 0, + "content": "Free Space Loss. As we tackle the problem of mapping in dynamic environments, we cannot simply accumulate point clouds and then calculate accurate supervision of signed distance value via nearest neighbor search. Therefore, we use a L1 loss \\(L_{\\mathrm{free}}\\) to constrain the signed distance prediction \\(\\hat{d}\\) of the free space points, i.e., \\(\\pmb{p} \\in \\mathcal{D}_{\\mathrm{free}}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.587, + 0.47, + 0.605 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {f r e e}} (\\hat {d}) = | \\hat {d} - \\tau |, \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.621, + 0.445, + 0.635 + ], + "angle": 0, + "content": "where \\(\\tau\\) is the truncation threshold we used in Sec. 3.2." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.637, + 0.47, + 0.698 + ], + "angle": 0, + "content": "Thanks to our spatio-temporal representation, a single query point can get both, static and dynamic TSDF values. Thus, for some regions that are determined to be free space, we can directly add constraints to their static TSDF values." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.7, + 0.471, + 0.837 + ], + "angle": 0, + "content": "We divide the free space points \\(\\mathcal{D}_{\\mathrm{free}}\\) into dense and sparse subset \\(\\mathcal{D}_{\\mathrm{dense}}\\) and \\(\\mathcal{D}_{\\mathrm{sparse}}\\) based on a threshold \\(r_{\\mathrm{dense}}\\) for the distance from the free space point sampled at time \\(t\\) to the scan origin \\(\\pmb{o}_t\\). For each point \\(\\pmb{p} \\in \\mathcal{D}_{\\mathrm{dense}}\\), we find the nearest neighbor \\(\\pmb{n_p}\\) in the corresponding scan \\(\\mathcal{S}_t\\), i.e., \\(\\pmb{n_p} = \\arg \\min_{\\pmb{q} \\in \\mathcal{S}_t} \\| \\pmb{p} - \\pmb{q} \\|_2\\). Let \\(\\mathcal{D}_{\\mathrm{certain}} = \\{\\pmb{p} \\in \\mathcal{D}_{\\mathrm{dense}} | \\| \\pmb{p} - \\pmb{n_p} \\| > \\tau\\}\\) be the points that we consider in the certain free space. Then, we supervise \\(\\pmb{p} \\in \\mathcal{D}_{\\mathrm{certain}}\\) by its static signed distance value directly:" + }, + { + "type": "equation", + "bbox": [ + 0.195, + 0.851, + 0.47, + 0.871 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {c e r t a i n}} (\\boldsymbol {p}) = \\left| w _ {\\boldsymbol {p}} ^ {1} - \\tau \\right|, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.885, + 0.424, + 0.905 + ], + "angle": 0, + "content": "where \\( w_{p}^{1} \\) is the first weight of the decoder's output." + }, + { + "type": "image", + "bbox": [ + 0.526, + 0.087, + 0.687, + 0.177 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.598, + 0.178, + 0.612, + 0.187 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.526, + 0.187, + 0.684, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.595, + 0.276, + 0.612, + 0.286 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.709, + 0.088, + 0.868, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.781, + 0.178, + 0.797, + 0.187 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.708, + 0.187, + 0.866, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.779, + 0.276, + 0.795, + 0.286 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.293, + 0.894, + 0.35 + ], + "angle": 0, + "content": "Figure 4. Reconstructed TSDF for KITTI dataset [14]: Subfigures (a) and (b) are the input neighboring frames. Correspondingly, (c) and (d) are horizontal TSDF slices queried from our 4D map. Note that we only display the TSDF values that are less than \\(0.3\\mathrm{m}\\)." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.367, + 0.809, + 0.383 + ], + "angle": 0, + "content": "In summary, the final loss \\( L_{\\mathrm{total}} \\) is given by:" + }, + { + "type": "equation", + "bbox": [ + 0.511, + 0.394, + 0.892, + 0.515 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} L _ {\\text {t o t a l}} = \\frac {1}{| \\mathcal {D} _ {\\text {s u r f}} |} \\sum_ {(\\boldsymbol {p}, t) \\in \\mathcal {D} _ {\\text {s u r f}}} L _ {\\text {s u r f}} (\\hat {d}, d _ {\\text {s u r f}}) + \\lambda_ {e} L _ {\\text {e i k o n a l}} (\\boldsymbol {p}, t) \\\\ + \\frac {\\lambda_ {f}}{| \\mathcal {D} _ {\\text {f r e e}} |} \\sum_ {(p, t) \\in \\mathcal {D} _ {\\text {f r e e}}} L _ {\\text {f r e e}} (\\hat {d}) \\\\ + \\frac {\\lambda_ {c}}{\\left| \\mathcal {D} _ {\\text {c e r t a i n}} \\right|} \\sum_ {(\\boldsymbol {p}, t) \\in \\mathcal {D} _ {\\text {c e r t a i n}}} L _ {\\text {c e r t a i n}} (\\boldsymbol {p}), \\tag {12} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.53, + 0.892, + 0.621 + ], + "angle": 0, + "content": "where \\(\\hat{d} = F(\\pmb{p}, t)\\) is the predicted signed distance at the sample position \\(\\pmb{p}\\) at time \\(t\\) and \\(d_{\\mathrm{surf}}\\) is the projective signed distance of sample \\(\\pmb{p}\\). With the above loss function and data sampling strategy, we train our map offline until convergence. In Fig. 4, we show TSDF slices obtained using our optimized 4D map at different times." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.622, + 0.892, + 0.718 + ], + "angle": 0, + "content": "One application of our 4D map representation is dynamic object segmentation. For a point \\( \\pmb{p} \\) in the input scans \\( S_{1:N} \\), its static signed distance value \\( w_{\\pmb{p}}^{1} \\) can be obtained by a simple query. If \\( \\pmb{p} \\) belongs to the static background, it should have \\( w_{\\pmb{p}}^{1} = 0 \\). Therefore, we simply set a threshold \\( d_{\\mathrm{static}} \\) and regard a point as dynamic if \\( w_{\\pmb{p}}^{1} > d_{\\mathrm{static}} \\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.726, + 0.719, + 0.743 + ], + "angle": 0, + "content": "3.3. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.894, + 0.901 + ], + "angle": 0, + "content": "As hyperparameters of our approach, we use the values listed in Tab. 1 in all LiDAR experiments. Additional parameters are determined by the characteristics of the sensor and the dimensions of the scene. For instance, in the reconstruction of autonomous driving scenes, like KITTI, we set the highest resolution for the feature voxels to \\(0.3\\mathrm{m}\\). The truncation distance is set to \\(\\tau = 0.5\\mathrm{m}\\), and the dense area split threshold \\(r_{\\mathrm{dense}} = 15\\mathrm{m}\\). Regarding training time, it takes 12 minutes to train 140 frames from the KITTI dataset using a single Nvidia Quadro RTX 5000." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "15421" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.09, + 0.21, + 0.108 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.115, + 0.471, + 0.253 + ], + "angle": 0, + "content": "In this section, we show the effectiveness of our proposed approach with respect to two aspects: (1) Static mapping quality: The static TSDF built by our method allows us to extract a surface mesh using marching cubes [29]. We compare this extracted mesh with the ground truth mesh to evaluate the reconstruction. (2) Dynamic object segmentation: As mentioned above, our method can segment out the dynamic objects in the input scans. We use point-wise dynamic object segmentation accuracy to evaluate the results." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.259, + 0.295, + 0.276 + ], + "angle": 0, + "content": "4.1. Static Mapping Quality" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.282, + 0.47, + 0.433 + ], + "angle": 0, + "content": "Datasets. We select two datasets collected in dynamic environments for quantitative evaluation. One is the synthetic dataset ToyCar3 from Co-Fusion [47], which provides accurate depth images and accurate masks of dynamic objects rendered using Blender, but also depth images with added noise. For this experiment, we select 150 frames from the whole sequence, mask out all dynamic objects in the accurate depth images, and accumulate background static points as the ground-truth static map. The original noisy depth images are used as the input for all methods." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.433, + 0.47, + 0.569 + ], + "angle": 0, + "content": "Furthermore, we use the Newer College [45] dataset as the real-world dataset, which is collected using a 64-beam LiDAR. Compared with synthetic datasets, it contains more uncertainty from measurements and pose estimates. We select 1,300 frames from the courtyard part for testing and this data includes a few pedestrians as dynamic objects. This dataset offers point clouds obtained by a high-precision terrestrial laser scanner that can be directly utilized as ground truth to evaluate the mapping quality." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.569, + 0.47, + 0.629 + ], + "angle": 0, + "content": "Metric and Baselines. We report the reconstruction accuracy, completeness, the Chamfer distance, and the F1-score. Further details on the computation of the metrics can be found in the supplement." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.63, + 0.47, + 0.734 + ], + "angle": 0, + "content": "We compare our method with several different types of state-of-the-art methods: (i) the traditional TSDF-fusion method, VDBfusion [59], which uses space carving to eliminate the effects of dynamic objects, (ii) the data-driven-based method, neural kernel surface reconstruction (NKSR) [18], and (iii) the neural representation based 3D mapping approach, SHINE-mapping [73]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.469, + 0.81 + ], + "angle": 0, + "content": "For NKSR [18], we use the default parameters provided by Huang et al. with their official implementation. To ensure a fair comparison with SHINE-mapping, we adopt an equal number of free space samples (15 samples), aligning with our method for consistency." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.47, + 0.902 + ], + "angle": 0, + "content": "For the ToyCar3 dataset, we set VDB-Fusion's resolution to \\(1\\mathrm{cm}\\). To have all methods with a similar memory consumption, we set the resolution of SHINE-mapping's leaf feature voxel to \\(2\\mathrm{cm}\\), and our method's highest resolution accordingly to \\(2\\mathrm{cm}\\). For the Newer College dataset, we set the resolution to \\(10\\mathrm{cm}\\), \\(30\\mathrm{cm}\\), and \\(30\\mathrm{cm}\\) respectively." + }, + { + "type": "table_caption", + "bbox": [ + 0.568, + 0.09, + 0.825, + 0.104 + ], + "angle": 0, + "content": "Table 1. Hyperparameters of our approach." + }, + { + "type": "table", + "bbox": [ + 0.512, + 0.115, + 0.868, + 0.283 + ], + "angle": 0, + "content": "
ParameterValueDescription
L2number of feature voxels level
D8The length of feature vectors
K32The number of basis functions
Dmlp2 × 64layer and size of the MLP decoder
Ms5The number of surface area samples
Mf15The number of free space samples
λe0.02weight for Eikonal loss
λf0.25weight for free space loss
λc0.2weight for certain free loss
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.294, + 0.892, + 0.351 + ], + "angle": 0, + "content": "Table 2. Quantitative results of the reconstruction quality on ToyCar3. We report the distance error metrics, namely completion, accuracy and Chamfer-L1 in cm. Additionally, we show the F-score in % with a 1 cm error threshold." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.361, + 0.878, + 0.455 + ], + "angle": 0, + "content": "
MethodComp.↓Acc.↓C-L1↓F-score ↑
VDB-fusion [59]0.5740.4810.52897.95
NKSR [18]0.5262.8091.66789.54
SHINE-mapping [73]0.5830.6260.60598.01
Ours0.4380.4680.45298.35
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.473, + 0.892, + 0.534 + ], + "angle": 0, + "content": "Results. The quantitative results for synthetic dataset ToyCar3 and real-world dataset Newer College are presented in Tab. 2 and Tab. 3, respectively. We also show the extracted meshes from all methods in Fig. 5 and Fig. 6." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.535, + 0.893, + 0.837 + ], + "angle": 0, + "content": "Our method outperforms the baselines in terms of Completeness and Chamfer distance for both datasets (cf. Fig. 5 and Fig. 6). Regarding the accuracy, SHINE-mapping and VDB-Fusion can filter part of high-frequency noise by fusion of multiple frames, resulting in better performance on noisy Newer College dataset. In comparison, our method considers every scan as accurate to store 4D information, which makes it more sensitive to measurement noise. On the ToyCar3 dataset, both our method and VDB-Fusion successfully eliminate all moving objects. However, on the Newer College dataset, VDB-Fusion incorrectly eliminates the static tree and parts of the ground, resulting in poor completeness shown in Tab. 3. SHINE-mapping eliminates dynamic pedestrians on the Newer College dataset but retains a portion of the dynamic point cloud on the ToyCar3 dataset, which has a larger proportion of dynamic objects, leading to poorer accuracy in Tab. 2. NKSR performs the worst accuracy because it is unable to eliminate dynamic objects, which means it's not suitable to apply NKSR in dynamic real-world scenes directly." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.847, + 0.771, + 0.864 + ], + "angle": 0, + "content": "4.2. Dynamic Object Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Datasets. For dynamic object segmentation, we use the KTH-Dynamic-Benchmark [72] for evaluation, which in-" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15422" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.233, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.106, + 0.184, + 0.213, + 0.195 + ], + "angle": 0, + "content": "(a) Merged input scans" + }, + { + "type": "image", + "bbox": [ + 0.241, + 0.089, + 0.397, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.301, + 0.184, + 0.342, + 0.195 + ], + "angle": 0, + "content": "(b) Ours" + }, + { + "type": "image", + "bbox": [ + 0.403, + 0.089, + 0.558, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.435, + 0.184, + 0.533, + 0.195 + ], + "angle": 0, + "content": "(c) VDB-Fusion [59]" + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.089, + 0.721, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.61, + 0.184, + 0.681, + 0.195 + ], + "angle": 0, + "content": "(d) NKSR [18]" + }, + { + "type": "image", + "bbox": [ + 0.727, + 0.089, + 0.882, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.749, + 0.184, + 0.866, + 0.196 + ], + "angle": 0, + "content": "(e) SHINE-mapping [73]" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.202, + 0.894, + 0.23 + ], + "angle": 0, + "content": "Figure 5. A comparison of the static mapping results of different methods on the ToyCar3 dataset. There are two dynamic toy cars moving through the scene. Our method can reconstruct the static scene with fine details and eliminate the dynamic car." + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.239, + 0.236, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.106, + 0.403, + 0.213, + 0.414 + ], + "angle": 0, + "content": "(a) Merged input scans" + }, + { + "type": "image", + "bbox": [ + 0.244, + 0.239, + 0.399, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.301, + 0.402, + 0.342, + 0.413 + ], + "angle": 0, + "content": "(b) Ours" + }, + { + "type": "image", + "bbox": [ + 0.406, + 0.239, + 0.56, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.434, + 0.402, + 0.533, + 0.413 + ], + "angle": 0, + "content": "(c) VDB-Fusion [59]" + }, + { + "type": "image", + "bbox": [ + 0.568, + 0.239, + 0.723, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.61, + 0.402, + 0.681, + 0.413 + ], + "angle": 0, + "content": "(d) NKSR [18]" + }, + { + "type": "image", + "bbox": [ + 0.731, + 0.244, + 0.884, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.748, + 0.402, + 0.866, + 0.414 + ], + "angle": 0, + "content": "(e) SHINE-mapping [73]" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.42, + 0.893, + 0.462 + ], + "angle": 0, + "content": "Figure 6. A comparison of the static mapping results of different methods on the Newer College dataset. Several pedestrians are moving through the scene during the data collection. Our method can reconstruct the static scene completely and eliminate the moving pedestrians. Although VDB-Fusion manages to eliminate the pedestrians, it incorrectly removes the tree highlighted in the orange box." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.477, + 0.47, + 0.532 + ], + "angle": 0, + "content": "Table 3. Quantitative results of the reconstruction quality on Newer College. We report the distance error metrics, namely completion, accuracy and Chamfer-L1 in cm. Additionally, we show the F-score in % with a \\(20\\mathrm{cm}\\) error threshold." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.544, + 0.452, + 0.637 + ], + "angle": 0, + "content": "
MethodComp.↓Acc.↓C-L1↓F-score ↑
VDB-fusion [59]7.325.996.6596.68
NKSR [18]6.879.288.0895.65
SHINE-mapping [73]6.805.866.3397.67
Ours5.856.496.1797.50
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.656, + 0.468, + 0.822 + ], + "angle": 0, + "content": "cludes four sequences in total: sequence 00 (frame 4,390 - 4,530) and sequence 05 (frame 2,350 - 2,670) from the KITTI dataset [3, 14], which are captured by a 64-beam LiDAR, one sequence from the Argoverse2 dataset [66] consisting of 575 frames captured by two 32-beam LiDARs, and a semi-indoor sequence captured by a sparser 16-beam LiDAR. All sequences come with corresponding pose files and point-wise dynamic or static labels as the ground truth. It is worth noting that the poses for KITTI 00 and 05 were obtained from SuMa [2] and the pose files for the Semi-indoor sequence come from NDT-SLAM [50]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.468, + 0.9 + ], + "angle": 0, + "content": "Metric and Baselines. The KTH-Dynamic-Benchmark evaluates the performance of the method by measuring the classification accuracy of dynamic points (DA%), static points (SA%) and also their associated accuracy (AA%) where \\( AA = \\sqrt{DA \\cdot SA} \\). The benchmark provides various" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.479, + 0.892, + 0.705 + ], + "angle": 0, + "content": "bases such as the state-of-the-art LiDAR dynamic object removal methods – Erasor [27] and Removert [21], as well as the traditional 3D mapping method, Octomap [17, 69], and its modified versions, Octomap with ground fitting and outlier filtering. As SHINE-mapping demonstrates the ability to remove dynamic objects in our static mapping experiments, we also report its result in this benchmark. Additionally, we report the performance of the state-of-the-art online moving object segmentation methods, 4DMOS [31] and its extension MapMOS [32]. As these two methods utilize KITTI sequences 00 and 05 for training, we only show the results of the remaining two sequences. For the parameter setting, we set our method's leaf resolution to \\(0.3\\mathrm{m}\\), and the threshold for segmentation as \\(d_{\\mathrm{static}} = 0.16\\mathrm{m}\\). We set the leaf resolution for Octomap to \\(0.1\\mathrm{m}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Results. The quantitative results of the dynamic object segmentation are shown in Tab. 4. And we depict the accumulated static points generated by different methods in Fig. 7. We can see that our method achieves the best associated accuracy (AA) in three autonomous driving sequences (KITTI 00, KITTI 05, Argoverse2) and vastly outperforms baselines. The supervised learning-based methods 4DMOS and MapMOS do not obtain good dynamic accuracy (DA) due to limited generalizability. Erasor and Octomap tend to over-segment dynamic objects, resulting in poor static accuracy (SA). Removert and SHINE-mapping are too conservative and cannot detect all dynamic objects. Benefiting" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "15423" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.892, + 0.119 + ], + "angle": 0, + "content": "Table 4. Quantitative results of the dynamic object removal quality on the KTH-Dynamic-Benchmark. We report the static accuracy SA, dynamic static DA and the associated accuracy AA. Octomap* refers to the modified Octomap implementation by Zhang et al. [72]." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.128, + 0.885, + 0.297 + ], + "angle": 0, + "content": "
MethodKITTI Seq. 00KITTI Seq. 05Argoverse2Semi-Indoor
SADAAASADAAASADAAASADAAA
Octomap [17]68.0599.6982.3766.2899.2481.1065.9196.7079.8488.9782.1885.51
Octomap* [72]93.0698.6795.8393.5492.4893.0182.6682.4482.5596.7973.5084.34
Removert [21]99.4441.5364.2699.4222.2847.0698.9731.1655.5399.9612.1534.85
Erasor [27]66.7098.5481.0769.4099.0682.9277.5199.1887.6894.9066.2679.30
SHINE [73]98.9992.3795.6398.9153.2772.5897.6672.6284.2198.8859.1976.51
4DMOS [31]------99.9469.3383.2499.9910.6032.55
MapMOS [32]------99.9685.8892.6599.994.7521.80
Ours99.4698.4798.9799.5498.3698.9599.1795.9197.5394.1772.7982.79
" + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.306, + 0.241, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.245, + 0.306, + 0.403, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.406, + 0.306, + 0.565, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.568, + 0.306, + 0.726, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.728, + 0.306, + 0.888, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.439, + 0.239, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.243, + 0.439, + 0.401, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.405, + 0.439, + 0.562, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.566, + 0.439, + 0.723, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.727, + 0.439, + 0.886, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.559, + 0.892, + 0.629 + ], + "angle": 0, + "content": "Figure 7. Comparison of dynamic object removal results produced by our proposed method and three baseline methods on the Argoverse2 data sequence of the KTH-benchmark. We show the bird's eye view on the first row and the zoomed view from the blue frustum shown in (a) on the second row. For the ground truth results in (a), the dynamic objects are shown in red. We only show the static points of ground truth for clearer comparison in zoomed view (f). We highlight the over-segmented parking car and sign by Erasor and the undetected moving vehicle by Removert." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.646, + 0.469, + 0.691 + ], + "angle": 0, + "content": "from the continuity and large capacity of the 4D neural representation, we strike a better balance between preserving static background points and removing dynamic objects." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.692, + 0.47, + 0.752 + ], + "angle": 0, + "content": "It is worth mentioning again that our method does not rely on any pre-processing or post-processing algorithm such as ground fitting, outlier filtering, and clustering, but also does not require labels for training." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.769, + 0.196, + 0.785 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.47, + 0.901 + ], + "angle": 0, + "content": "In this paper, we propose a 4D implicit neural map representation for dynamic scenes that allows us to represent the TSDF of static and dynamic parts of a scene. For this purpose, we use a hierarchical voxel-based feature representation that is then decoded into weights for basis functions to represent a time-varying TSDF that can be queried at arbitrary locations. For learning the representation from a se" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.646, + 0.892, + 0.767 + ], + "angle": 0, + "content": "quence of LiDAR scans, we design an effective data sampling strategy and loss functions. Equipped with our proposed representation, we experimentally show that we are able to tackle the challenging problems of static mapping and dynamic object segmentation. More specifically, our experiments show that our method has the ability to accurately reconstruct 3D maps of the static parts of a scene and can completely remove moving objects at the same time." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.773, + 0.892, + 0.865 + ], + "angle": 0, + "content": "Limitations. While our method achieves compelling results, we have to acknowledge that we currently rely on estimated poses by a separate SLAM approach, but also cannot apply our approach in an online fashion. However, we see this as an avenue for future research into joint incremental mapping and pose estimation." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Acknowledgements. We thank Benedikt Mersch for the fruitful discussion and for providing experiment baselines." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15424" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.17 + ], + "angle": 0, + "content": "[1] Ioan A. Barsan, Peidong Liu, Marc Pollefeys, and Andreas Geiger. Robust Dense Mapping for Large-Scale Dynamic Environments. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.47, + 0.213 + ], + "angle": 0, + "content": "[2] Jens Behley and Cyril Stachniss. Efficient Surfel-Based SLAM using 3D Laser Range Data in Urban Environments. In Proc. of Robotics: Science and Systems (RSS), 2018. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.213, + 0.471, + 0.284 + ], + "angle": 0, + "content": "[3] Jens Behley, Martin Garbade, Aandres Milioto, Jan Quenzel, Sven Behnke, Cyril Stachniss, and Juergen Gall. SemanticKITTI: A Dataset for Semantic Scene Understanding of LiDAR Sequences. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.284, + 0.471, + 0.325 + ], + "angle": 0, + "content": "[4] Peter Biber and Tom Duckett. Dynamic Maps for Long-Term Operation of Mobile Service Robots. In Proc. of Robotics: Science and Systems (RSS), 2005. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.326, + 0.471, + 0.408 + ], + "angle": 0, + "content": "[5] Cesar Cadena, Luca Carlone, Henry Carrillo, Yasir Latif, Davide Scaramuzza, Jose Neira, Ian Reid, and John J. Leonard. Past, Present, and Future of Simultaneous Localization And Mapping: Towards the Robust-Perception Age. IEEE Trans. on Robotics (TRO), 32(6):1309-1332, 2016. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.41, + 0.471, + 0.478 + ], + "angle": 0, + "content": "[6] Hongrui Cai, Wanquan Feng, Xuetao Feng, Yan Wang, and Juyong Zhang. Neural surface reconstruction of dynamic scenes with monocular rgb-d camera. In Proc. of the Conf. on Neural Information Processing Systems (NeurIPS), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.48, + 0.471, + 0.522 + ], + "angle": 0, + "content": "[7] Ang Cao and Justin Johnson. HexPlane: A Fast Representation for Dynamic Scenes. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.523, + 0.471, + 0.592 + ], + "angle": 0, + "content": "[8] Xieyuanli Chen, Shijie Li, Benedikt Mersch, Louis Wiesmann, Juergen Gall, Jens Behley, and Cyril Stachniss. Moving Object Segmentation in 3D LiDAR Data: A Learning-based Approach Exploiting Sequential Data. IEEE Robotics and Automation Letters (RA-L), 6(4):6529-6536, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.593, + 0.471, + 0.662 + ], + "angle": 0, + "content": "[9] Xieyuanli Chen, Benedikt Mersch, Lucas Nunes, Rodrigo Marcuzzi, Ignacio Vizzo, Jens Behley, and Cyril Stachniss. Automatic Labeling to Generate Training Data for Online LiDAR-Based Moving Object Segmentation. IEEE Robotics and Automation Letters (RA-L), 7(3):6107-6114, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.663, + 0.47, + 0.732 + ], + "angle": 0, + "content": "[10] Xu Chen, Tianjian Jiang, Jie Song, Max Rietmann, Andreas Geiger, Michael J. Black, and Otmar Hilliges. Fast-snarf: A fast deformer for articulated neural fields. IEEE Trans. on Pattern Analysis and Machine Intelligence (TPAMI), 45(10): 11796-11809, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.733, + 0.47, + 0.789 + ], + "angle": 0, + "content": "[11] Pierre Dellenbach, Jean-Emmanuel Deschaud, Bastien Jacquet, and Francois Goulette. CT-ICP Real-Time Elastic LiDAR Odometry with Loop Closure. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.47, + 0.831 + ], + "angle": 0, + "content": "[12] Jean-Emmanuel Deschaud. IMLS-SLAM: scan-to-model matching based on 3D data. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.471, + 0.9 + ], + "angle": 0, + "content": "[13] Sara Fridovich-Keil, Giacomo Meanti, Frederik R. Warburg, Benjamin Recht, and Angjoo Kanazawa. K-Planes: Explicit Radiance Fields in Space, Time, and Appearance. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.471, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.148 + ], + "angle": 0, + "content": "[14] Andreas Geiger, Peter Lenz, and Raquel Urtasun. Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite. In Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2012. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.151, + 0.892, + 0.207 + ], + "angle": 0, + "content": "[15] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit Geometric Regularization for Learning Shapes. In Proc. of the Intl. Conf. on Machine Learning (ICML), 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.209, + 0.895, + 0.264 + ], + "angle": 0, + "content": "[16] Dirk Hähnel, Dirk Schulz, and Wolfram Burgard. Mobile robot mapping in populated environments. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2002. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.267, + 0.892, + 0.323 + ], + "angle": 0, + "content": "[17] Armin Hornung, Kai M. Wurm, Maren Bennewitz, Cyril Stachniss, and Wolfram Burgard. OctoMap: An Efficient Probabilistic 3D Mapping Framework Based on Octrees. Autonomous Robots, 34(3):189-206, 2013. 1, 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.325, + 0.892, + 0.381 + ], + "angle": 0, + "content": "[18] Jiahui Huang, Zan Gojcic, Matan Atzmon, Or Litany, Sanja Fidler, and Francis Williams. Neural Kernel Surface Reconstruction. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.383, + 0.892, + 0.438 + ], + "angle": 0, + "content": "[19] Shengyu Huang, Zan Gojcic, Jiahui Huang, Andreas Wieser, and Konrad Schindler. Dynamic 3D Scene Analysis by Point Cloud Accumulation. In Proc. of the Europ. Conf. on Computer Vision (ECCV), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.441, + 0.895, + 0.511 + ], + "angle": 0, + "content": "[20] Shengyu Huang, Zan Gojcic, Zian Wang, Francis Williams, Yoni Kasten, Sanja Fidler, Konrad Schindler, and Or Litany. Neural LiDAR Fields for Novel View Synthesis. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2023. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.513, + 0.892, + 0.569 + ], + "angle": 0, + "content": "[21] Giseop Kim and Ayoung Kim. Remove, Then Revert: Static Point Cloud Map Construction Using Multiresolution Range Images. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020. 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.571, + 0.892, + 0.627 + ], + "angle": 0, + "content": "[22] Xin Kong, Shikun Liu, Marwan Taher, and Andrew J. Davison. vMAP: Vectorised Object Mapping for Neural Field SLAM. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.629, + 0.892, + 0.713 + ], + "angle": 0, + "content": "[23] Abhijit Kundu, Kyle Genova, Xiaoqi Yin, Alireza Fathi, Caroline Pantofaru, Leonidas Guibas, Andrea Tagliasacchi, Frank Dellaert, and Thomas Funkhouser. Panoptic neural fields: A semantic object-aware neural scene representation. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.715, + 0.892, + 0.771 + ], + "angle": 0, + "content": "[24] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.773, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[25] Zhaoshuo Li, Thomas Müller, Alex Evans, Russell H Taylor, Mathias Unberath, Ming-Yu Liu, and Chen-Hsuan Lin. Neuralangelo: High-fidelity neural surface reconstruction. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.845, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[26] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. DynIBaR: Neural Dynamic Image-Based Rendering. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.895, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15425" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[27] Hyungtae Lim, Sungwon Hwang, and Hyun Myung. ERASOR: Egocentric Ratio of Pseudo Occupancy-Based Dynamic Object Removal for Static 3D Point Cloud Map Building. IEEE Robotics and Automation Letters (RA-L), 6(2): 2272-2279, 2021. 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.164, + 0.47, + 0.233 + ], + "angle": 0, + "content": "[28] Hyungtae Lim, Lucas Nunes, Benedikt Mersch, Xieyuanli Chen, Jens Behley, and Cyril Stachniss. ERASOR2: Instance-Aware Robust 3D Mapping of the Static World in Dynamic Scenes. In Proc. of Robotics: Science and Systems (RSS), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.235, + 0.47, + 0.29 + ], + "angle": 0, + "content": "[29] William E. Lorensen and Harvey E. Cline. Marching Cubes: a High Resolution 3D Surface Construction Algorithm. In Proc. of the Intl. Conf. on Computer Graphics and Interactive Techniques (SIGGRAPH), 1987. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.292, + 0.471, + 0.36 + ], + "angle": 0, + "content": "[30] John McCormac, Ankur Handa, Aandrew J. Davison, and Stefan Leutenegger. SemanticFusion: Dense 3D Semantic Mapping with Convolutional Neural Networks. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.363, + 0.47, + 0.432 + ], + "angle": 0, + "content": "[31] Benedikt Mersch, Xieyuanli Chen, Ignacio Vizzo, Lucas Nunes, Jens Behley, and Cyril Stachniss. Receding Moving Object Segmentation in 3D LiDAR Data Using Sparse 4D Convolutions. IEEE Robotics and Automation Letters (RA-L), 7(3):7503-7510, 2022. 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.434, + 0.469, + 0.516 + ], + "angle": 0, + "content": "[32] Benedikt Mersch, Tiziano Guadagnino, Xieyuanli Chen, Tiziano, Ignacio Vizzo, Jens Behley, and Cyril Stachniss. Building Volumetric Beliefs for Dynamic Environments Exploiting Map-Based Moving Object Segmentation. IEEE Robotics and Automation Letters (RA-L), 8(8):5180-5187, 2023. 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.519, + 0.469, + 0.587 + ], + "angle": 0, + "content": "[33] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.59, + 0.469, + 0.645 + ], + "angle": 0, + "content": "[34] Daniel Meyer-Delius, Maximilian Beinhofer, and Wolfram Burgard. Occupancy Grid Models for Robot Mapping in Changing Environments. In Proc. of the Conf. on Advancements of Artificial Intelligence (AAAI), 2012. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.647, + 0.469, + 0.716 + ], + "angle": 0, + "content": "[35] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In Proc. of the Europ. Conf. on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.718, + 0.469, + 0.771 + ], + "angle": 0, + "content": "[36] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. on Graphics, 41(4): 102:1-102:15, 2022. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.469, + 0.857 + ], + "angle": 0, + "content": "[37] Richard A. Newcombe, Shahram Izadi, Otmar Hilliges, David Molyneaux, David Kim, Andrew J. Davison, Pushmeet Kohli, Jamie Shotton, Steve Hodges, and Andrew Fitzgibbon. KinectFusion: Real-Time Dense Surface Mapping and Tracking. In Proc. of the Intl. Symposium on Mixed and Augmented Reality (ISMAR), 2011. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[38] Joseph Ortiz, Alexander Clegg, Jing Dong, Edgar Sucar, David Novotny, Michael Zollhoefer, and Mustafa Mukadam. isdf: Real-time neural signed distance fields for robot per" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.119 + ], + "angle": 0, + "content": "ception. In Proc. of Robotics: Science and Systems (RSS), 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.191 + ], + "angle": 0, + "content": "[39] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguere, and Cyril Stachniss. ReFusion: 3D Reconstruction in Dynamic Environments for RGB-D Cameras Exploiting Residuals. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.193, + 0.892, + 0.262 + ], + "angle": 0, + "content": "[40] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning Continuous Signed Distance Functions for Shape Representation. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.264, + 0.892, + 0.332 + ], + "angle": 0, + "content": "[41] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable Neural Radiance Fields. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.334, + 0.892, + 0.417 + ], + "angle": 0, + "content": "[42] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M. Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. ACM Trans. on Graphics (TOG), 40(6), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.42, + 0.892, + 0.475 + ], + "angle": 0, + "content": "[43] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.477, + 0.892, + 0.53 + ], + "angle": 0, + "content": "[44] Sameera Ramasinghe, Violetta Shevchenko, Gil Avraham, and Anton Van Den Hengel. *Blirf: Band limited radiance fields for dynamic scene modeling.* arXiv preprint arXiv:2302.13543, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.533, + 0.892, + 0.602 + ], + "angle": 0, + "content": "[45] Milad Ramezani, Yiduo Wang, Marco Camurri, David Wisth, Matias Mattamala, and Maurice Fallon. The Newer College Dataset: Handheld LiDAR, Inertial and Vision with Ground Truth. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.604, + 0.892, + 0.673 + ], + "angle": 0, + "content": "[46] Konstantinos Rematas, Andrew Liu, Pratul P. Srinivasan, Jonathan T. Barron, Andrea Tagliasacchi, Thomas Funkhouser, and Vittorio Ferrari. Urban radiance fields. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.675, + 0.892, + 0.731 + ], + "angle": 0, + "content": "[47] Martin Rünz and Lourdes Agapito. Co-Fusion: Real-Time Segmentation, Tracking and Fusion of Multiple Objects. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2017. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.732, + 0.892, + 0.8 + ], + "angle": 0, + "content": "[48] Martin Runz, Maud Buffier, and Lourdes Agapito. MaskFusion: Real-Time Recognition, Tracking and Reconstruction of Multiple Moving Objects. In Proc. of the Intl. Symposium on Mixed and Augmented Reality (ISMAR), 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.803, + 0.892, + 0.871 + ], + "angle": 0, + "content": "[49] Jari Saarinen, Henrik Andreasson, and Achim Lilienthal. Independent Markov Chain Occupancy Grid Maps for Representation of Dynamic Environments. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2012. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[50] Jari P. Saarinen, Todor Stoyanov, Henrik Andreasson, and Achim J. Lilienthal. Fast 3D Mapping in Highly Dynamic" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "15426" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.109, + 0.092, + 0.469, + 0.134 + ], + "angle": 0, + "content": "Environments Using Normal Distributions Transform Occupancy Maps. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2013. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.137, + 0.469, + 0.207 + ], + "angle": 0, + "content": "[51] Renato F. Salas-Moreno, Richard A. Newcombe, Hauke Strasdat, Paul H. Kelly, and Andrew J. Davison. SLAM++: Simultaneous Localisation and Mapping at the Level of Objects. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2013. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.209, + 0.469, + 0.277 + ], + "angle": 0, + "content": "[52] Ruizhi Shao, Zerong Zheng, Hanzhang Tu, Boning Liu, Hongwen Zhang, and Yebin Liu. Tensor4d: Efficient neural 4d decomposition for high-fidelity dynamic reconstruction and rendering. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.281, + 0.469, + 0.336 + ], + "angle": 0, + "content": "[53] Chonghyuk Song, Gengshan Yang, Kangle Deng, Jun-Yan Zhu, and Deva Ramanan. Total-recon: Deformable scene reconstruction for embodied view synthesis. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.339, + 0.469, + 0.42 + ], + "angle": 0, + "content": "[54] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. NeRF-Player: A Streamable Dynamic Scene Representation with Decomposed Neural Radiance Fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.424, + 0.469, + 0.479 + ], + "angle": 0, + "content": "[55]Cyrill Stachniss and Wolfram Burgard. Mobile Robot Mapping and Localization in Non-Static Environments. In Proc. of the National Conf. on Artificial Intelligence (AAAI), 2005.1,2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.483, + 0.469, + 0.538 + ], + "angle": 0, + "content": "[56]Cyrill Stachniss,John J.Leonard,and Sebastian Thrun. Springer Handbook of Robotics,2nd edition, chapter Chapt.46:Simultaneous Localization and Mapping. Springer Verlag,2016.1,2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.541, + 0.469, + 0.568 + ], + "angle": 0, + "content": "[57] Sebastian Thrun, Wolfram Burgard, and Dieter Fox. Probabilistic Robotics. MIT Press, 2005. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.571, + 0.469, + 0.653 + ], + "angle": 0, + "content": "[58] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.657, + 0.469, + 0.71 + ], + "angle": 0, + "content": "[59] Ignacio Vizzo, Tiziano Guadagnino, Jens Behley, and Cyril Stachniss. VDBFusion: Flexible and Efficient TSDF Integration of Range Sensor Data. Sensors, 22(3):1296, 2022. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.716, + 0.469, + 0.784 + ], + "angle": 0, + "content": "[60] Ignacio Vizzo, Tiziano Guadagnino, Benedikt Mersch, Louis Wiesmann, Jens Behley, and Cyril Stachniss. KISS-ICP: In Defense of Point-to-Point ICP - Simple, Accurate, and Robust Registration If Done the Right Way. IEEE Robotics and Automation Letters (RA-L), 8(2):1029-1036, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.787, + 0.469, + 0.856 + ], + "angle": 0, + "content": "[61] Aishan Walcott-Bryant, Michael Kaess, Hordur Johannsson, and John J. Leonard. Dynamic Pose Graph SLAM: Long-Term Mapping in Low Dynamic Environments. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2012. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[62] Chaoyang Wang, Ben Eckart, Simon Lucey, and Orazio Gallo. Neural trajectory fields for dynamic novel view synthesis. arXiv preprint arXiv:2105.05994, 2021. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.161 + ], + "angle": 0, + "content": "[63] Chung-Yi Weng, Brian Curless, Pratul P. Srinivasan, Jonathan T. Barron, and Ira Kemelmacher-Shlizerman. HumanNeRF: Free-Viewpoint Rendering of Moving People From Monocular Video. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.892, + 0.218 + ], + "angle": 0, + "content": "[64] Thomas Whelan, Stefan Leutenegger, Renato F. Salas-Moreno, Ben Glocker, and Andrew J. Davison. ElasticFusion: Dense SLAM Without A Pose Graph. In Proc. of Robotics: Science and Systems (RSS), 2015. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.288 + ], + "angle": 0, + "content": "[65] Louis Wiesmann, Tiziano Guadagnino, Ignacio Vizzo, Nicky Zimmerman, Yue Pan, Haofei Kuang, Jens Behley, and Cyrill Stachniss. LocNDF: Neural Distance Field Mapping for Robot Localization. IEEE Robotics and Automation Letters (RA-L), 8(8):4999-5006, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.291, + 0.892, + 0.386 + ], + "angle": 0, + "content": "[66] Benjamin Wilson, William Qi, Tanmay Agarwal, John Lambert, Jagjeet Singh, Siddhesh Khandelwal, Bowen Pan, Ratnesh Kumar, Andrew Hartnett, Jhony Kaesemodel Pontes, Deva Ramanan, Peter Carr, and James Hays. Argoverse 2: Next Generation Datasets for Self-driving Perception and Forecasting. In Proc. of the Conf. on Neural Information Processing Systems (NeurIPS), 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.389, + 0.892, + 0.429 + ], + "angle": 0, + "content": "[67] Denis F. Wolf and Guarav S. Sukhatme. Mobile Robot Simultaneous Localization and Mapping in Dynamic Environments. Autonomous Robots, 19, 2005. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.432, + 0.892, + 0.5 + ], + "angle": 0, + "content": "[68] Tianhao Wu, Fangcheng Zhong, Andrea Tagliasacchi, Forrester Cole, and Cengiz Oztireli. D\\(^2\\)NeRF: Self-Supervised Decoupling of Dynamic and Static Objects from a Monocular Video. In Proc. of the Conf. on Neural Information Processing Systems (NeurIPS), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.502, + 0.892, + 0.583 + ], + "angle": 0, + "content": "[69] Kai M. Wurm, Armin Hornung, Maren Bennewitz, Cyril Stachniss, and Wolfram Burgard. OctoMap: A Probabilistic, Flexible, and Compact 3D Map Representation for Robotic Systems. In Workshop on Best Practice in 3D Perception and Modeling for Mobile Manipulation, IEEE Int. Conf. on Robotics & Automation (ICRA), 2010. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.586, + 0.892, + 0.627 + ], + "angle": 0, + "content": "[70] Dongyu Yan, Xiaoyang Lyu, Jieqi Shi, and Yi Lin. Efficient Implicit Neural Reconstruction Using LiDAR. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.629, + 0.892, + 0.697 + ], + "angle": 0, + "content": "[71] Wentao Yuan, Zhaoyang Lv, Tanner Schmidt, and Steven Lovegrove. Star: Self-supervised tracking and reconstruction of rigid objects in motion with neural rendering. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.7, + 0.892, + 0.768 + ], + "angle": 0, + "content": "[72] Qingwen Zhang, Daniel Duberg, Ruoyu Geng, Mingkai Jia, Lujia Wang, and Patric Jensfelt. A dynamic points removal benchmark in point cloud maps. In IEEE 26th International Conference on Intelligent Transportation Systems (ITSC), pages 608-614, 2023. 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.77, + 0.892, + 0.838 + ], + "angle": 0, + "content": "[73] Xingguang Zhong, Yue Pan, Jens Behley, and Cyril Stachniss. SHINE-Mapping: Large-Scale 3D Mapping Using Sparse Hierarchical Implicit Neural Representations. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023. 2, 6, 7, 8" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15427" + } + ] +] \ No newline at end of file diff --git a/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/024f2dc1-2c03-4b1e-a716-0e0aea35b1de_origin.pdf b/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/024f2dc1-2c03-4b1e-a716-0e0aea35b1de_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..31738e9499799a9d4b5ccf3f5d40ef7ae7f4eed6 --- /dev/null +++ b/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/024f2dc1-2c03-4b1e-a716-0e0aea35b1de_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6980a4ce0a9c1d32a9342a1bce6d761bacdc8e3cc8cd5fe95e64e94dba6c5be7 +size 6139804 diff --git a/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/full.md b/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..492aa965204a59a773c189538ecd8bf154af42a2 --- /dev/null +++ b/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/full.md @@ -0,0 +1,405 @@ +# 3D LiDAR Mapping in Dynamic Environments Using a 4D Implicit Neural Representation + +Xingguang Zhong + +Yue Pan + +Cyrill Stachniss $^{1,2}$ + +Jens Behley + +1Center for Robotics, University of Bonn, 2Lamarr Institute for Machine Learning and Artificial Intelligence {zhong, yue.pan, cyrill.stachniss, Jens.vehley}@igg.uni-bonn.de + +# Abstract + +Building accurate maps is a key building block to enable reliable localization, planning, and navigation of autonomous vehicles. We propose a novel approach for building accurate maps of dynamic environments utilizing a sequence of LiDAR scans. To this end, we propose encoding the 4D scene into a novel spatio-temporal implicit neural map representation by fitting a time-dependent truncated signed distance function to each point. Using our representation, we extract the static map by filtering the dynamic parts. Our neural representation is based on sparse feature grids, a globally shared decoder, and time-dependent basis functions, which we jointly optimize in an unsupervised fashion. To learn this representation from a sequence of LiDAR scans, we design a simple yet efficient loss function to supervise the map optimization in a piecewise way. We evaluate our approach on various scenes containing moving objects in terms of the reconstruction quality of static maps and the segmentation of dynamic point clouds. The experimental results demonstrate that our method is capable of removing the dynamic part of the input point clouds while reconstructing accurate and complete 3D maps, outperforming several state-of-the-art methods. + +# 1. Introduction + +Mapping using range sensors, like LiDAR or RGB-D cameras, is a fundamental task in computer vision and robotics. Often, we want to obtain accurate maps to support downstream tasks such as localization, planning, or navigation. For achieving an accurate reconstruction of an outdoor environment, we have to account for dynamics caused by moving objects, such as vehicles or pedestrians. Furthermore, dynamic object removal plays an important role in autonomous driving and robotics applications for creating digital twins for realistic simulation and high-definition mapping, where a static map is augmented with semantic and task-relevant information. + +![](images/6473e44b651ef1cd7abce4bfc4dc523c37147e3ccf451c3664ee202a2f382717.jpg) +(a) + +![](images/b272e82d84603a0b12b8b1e058766ea4751e217f4718f55e8cf43fd6f7b550b5.jpg) +(b) + +![](images/9bd3a3fb5914879d7ced7fd13c61a1efb77ea15a2c1f2e458a5d3e2ea0c6d2df.jpg) +(c) + +![](images/99700c34d4e15036d925f349d18336d6fb4830b54919ad360b7f20a2b21c7e54.jpg) +(d) +Figure 1. Given a sequence of point clouds, as shown in (a), we optimize our 4D neural representation that can be queried at arbitrary positions for a specific time. (b) Based on the estimated time-dependent TSDF values, we can extract a mesh at a specific point in time. Additionally, our 4D neural representation can be also used for static mapping (c) and dynamic object removal (c). + +Mapping and state estimation in dynamic environments is a classical problem in robotics [5, 56, 57]. Approaches for simultaneous localization and mapping (SLAM) can apply different strategies to deal with dynamics. Common ways are: (1) filtering dynamics from the input [1, 30, 47, 48, 51] as a pre-processing step, which requires a semantic interpretation of the scene; (2) modeling the occupancy in the map representation [17, 34, 37, 49, 50, 64], where dynamics can be implicitly removed by retrospectively removing measurements in free space; (3) including it in the state estimation [4, 16, 55, 61, 67] to model which measurements originated from the dynamic and static parts of the environment. Our proposed method falls into the last category and allows us to model dynamics directly in the map representation leading to a spatio-temporal map representation. + +Recently, implicit neural representations gained increas + +ing interest in computer vision for novel view synthesis [35, 36] and 3D shape reconstruction [33, 40]. Due to their compactness and continuity, several approaches [65, 70, 73] investigate the use of neural representations in large-scale 3D LiDAR mapping leading to accurate maps while significantly reducing memory consumption. However, these approaches often do not address the problem of handling dynamics during mapping. The recent progress on dynamic NeRF [7, 13, 44, 52] and neural deformable object reconstruction [6, 10] indicates that neural representations can be also used to represent dynamic scenes, which inspires us to tackle the problem of mapping in dynamic environments from the perspective of 4D reconstruction. + +In this paper, we propose a novel method to reconstruct large 4D dynamic scenes by encoding every point's time-dependent truncated signed distance function (TSDF) into an implicit neural scene representation. As illustrated in Fig. 1, we take sequentially recorded LiDAR point clouds collected in dynamic environments as input and generate a TSDF for each time frame, which can be used to extract a mesh using marching cubes [29]. The background TSDF, which is unchanged during the whole sequence, can be extracted from the 4D signal easily. We regard it as a static map that can be used to segment dynamic objects from the original point cloud. Compared to the traditional voxel-based mapping method, the continuous neural representation allows for the removal of dynamic objects while preserving rich map details. In summary, the main contributions of this paper are: + +- We propose a novel implicit neural representation to jointly reconstruct a dynamic 3D environment and maintain a static map using sequential LiDAR scans as input. +- We employ a piecewise training data sampling strategy and design a simple, yet effective loss function that maintains the consistency of the static point supervision through gradient constraints. +- We evaluate the mapping results by the accuracy of the dynamic object segmentation as well as the quality of the reconstructed static map showing superior performance compared to several baselines. We provide our code and the data used for experiments. + +# 2. Related Work + +Mapping and SLAM in dynamic environments is a classical topic in robotics [5, 56, 57] with a large body of work, which tackles the problem by pre-processing the sensor data [1, 30, 47, 48, 51], occupancy estimation to filter dynamics by removing measurements in free space [17, 34, 37, 39, 49, 50, 64], or state estimation techniques [4, 16, 55, 61, 67]. Below, we focus on closely related approaches using neural representations but also static map building approaches for scenes containing dynamics. + +Dynamic NeRF. Dynamic NeRFs aim to solve the prob- + +lem of novel view synthesis in dynamic environments. Some approaches [41-43, 58, 63] address this challenge by modeling the deformation of each point with respect to a canonical frame. However, these methods cannot represent newly appearing objects. This can render them unsuited for complicated real-life scenarios. In contrast, NSFF [24] and DynIBaR [26] get rid of the canonical frame by computing the motion field of the whole scene. While these methods can deliver satisfactory results, the training time is usually in the order of hours or even days. + +Another type of method leverages the compactness of the neural representation to model the 4D spatio-temporal information directly. Several works [7, 13, 52] project the 4D input into multiple voxelized lower-dimensional feature spaces to avoid large memory consumption, which improves the efficiency of the optimization. Song et al. [54] propose a time-dependent sliding window strategy for accumulating the voxel features. Instead of only targeting novel view synthesis, several approaches [26, 68, 71] decompose the scene into dynamic objects and static background in a self-supervised way, which inspired our work. Other approaches [22, 23, 53] accomplish neural representation-based reconstruction for larger scenes by adding additional supervision such as object masks or optical flow. + +Neural representations for LiDAR scans. Recently, many approaches aim to enhance scene reconstruction using LiDAR data through neural representations. The early work URF [46] leverages LiDAR data as depth supervision to improve the optimization of a neural radiance field. With only LiDAR data as input, Huang et al. [20] achieve novel view synthesis for LiDAR scans with differentiable rendering. Similar to our work, Shine-mapping [73] and EIN-RUL [70] utilize sparse hierarchical feature voxel structures to achieve large-scale 3D mapping. Additionally, the data-driven approach NKSR [18] based on learned kernel regression demonstrates accurate surface reconstruction with noisy LiDAR point cloud as input. Although these approaches perform well in improving reconstruction accuracy and reducing memory consumption, none of them consider the problem of dynamic object interference in real-world environments. + +Static map building and motion detection. In addition to removing moving objects from the voxel map with ray tracing, numerous works [8, 19, 31, 32] try to segment dynamic points from raw LiDAR point clouds. However, these methods require a significant amount of labeled data, which makes it challenging to generalize them to various scenarios or sensors with different scan patterns. In contrast, geometry-based, more heuristic approaches have also produced promising results. Kim et al. [21] solve this problem using the visibility of range images, but their results are still highly affected by the resolution. Lim et al. proposed Erasor [27], which leverages ground fitting as prior + +to achieve better segmentation for dynamic points. More recent approaches [9, 28] extend it to instance level to improve results. However, these methods rely on an accurate ground fitting method, which is mainly designed for autonomous driving scenarios, which cannot be guaranteed in complex unstructured real environments. + +In contrast to the approaches discussed above, we follow recent developments in neural reconstruction and propose a novel scene representation that allows us to capture the spatio-temporal progression of a scene. We represent the time-varying SDF of a scene in an unsupervised fashion, which we exploit to remove dynamic objects and reconstruct accurate meshes of the static scene. + +# 3. Our Approach + +The input of our approach is given by a sequence of point clouds, $S_{1:N} = (S_1,\dots ,S_N)$ , and their corresponding global poses $\mathsf{T}_t\in \mathbb{R}^{4\times 4}$ $t\in [1,N]$ , estimated via scan matching, LiDAR odometry, or SLAM methods [2, 11, 12, 60]. Each scan's point cloud $S_{t} = \{\pmb{s}_{t}^{1},\dots ,\pmb{s}_{t}^{M_{t}}\}$ is a set of points, $\pmb{s}_t^i\in \mathbb{R}^3$ , collected at time $t$ . Given such a sequence of scans $S_{1:N}$ , our approach aims to reconstruct a 4D TSDF of the traversed scene and maintain a static 3D map at the same time. + +In the next sections, we first introduce our spatiotemporal representation and then explain how to optimize it to represent the dynamic and static parts of a point cloud sequence $S_{1:N}$ . + +# 3.1. Map Representation + +The key component of our approach is an implicit neural scene representation that allows us to represent a 4D TSDF of the scene, as well as facilitates the extraction of a static map representation. Our proposed spatio-temporal scene representation is optimized for the given point cloud sequence $S_{1:N}$ such that we can retrieve for an arbitrary point $\pmb{p} \in \mathbb{R}^3$ and time $t \in [1,N]$ the corresponding time-varying signed distances value at that location. + +Temporal representation. We utilize an TSDF to represent the scene, i.e., a function that provides the signed distance to the nearest surface for any given point $\pmb{p} \in \mathbb{R}^3$ . The sign of the distance is positive when the point is in free space or in front of the measured surface and is negative when the point is inside the occupied space or behind the measured surface. + +In a dynamic 3D scene, measuring the signed distance of any coordinate at each moment produces a time-dependent function that captures the signed distance changes over time, see Fig. 2 for an illustration. Additionally, if a coordinate is static throughout the period, the signed distance should remain constant. The key idea of our spatiotemporal scene representation is to fit the time-varying SDF at each point with several basis functions. Inspired by Li + +![](images/1eb36f546eaeca0e2636c4e4c378d62bab5463a1c2343ff49a674b673775b880.jpg) +Figure 2. Principle of our 4D TSDF representation: The left figure shows a moving object and a query point $\pmb{p}$ . The one on the right depicts the corresponding signed distance at $\pmb{p}$ over time. At $t_0$ , $\pmb{p}$ 's signed distance is a positive truncated value. When the moving object reaches $\pmb{p}$ at time $t_1$ , $\pmb{p}$ is inside the object and its signed distance is negative accordingly. At $t_2$ , the moving object moved past $\pmb{p}$ , the signed distance of $\pmb{p}$ gets positive again. + +![](images/7672585101abf831228b543a099278c54743c320ac8da2480fd27fd0196a141a.jpg) + +et al. [26]'s representation of moving point trajectories, we exploit $K$ globally shared basis functions $\phi_k: \mathbb{R} \mapsto \mathbb{R}$ . Using these basis functions $\phi_k(t)$ , we model the time-varying TSDF $F(\pmb{p}, t)$ that maps a location $\pmb{p} \in \mathbb{R}^3$ at time $t$ to a signed distance as follows: + +$$ +F (\boldsymbol {p}, t) = \sum_ {k = 1} ^ {K} w _ {\boldsymbol {p}} ^ {k} \phi_ {k} (t), \tag {1} +$$ + +where $w_{\pmb{p}}^{k} \in \mathbb{R}$ are estimable location-dependent coefficients. In line with previous works [26, 62], we initialize the basis functions with discrete cosine transform (DCT) basis functions: + +$$ +\phi_ {k} (t) = \cos \left(\frac {\pi}{2 N} (2 t + 1) (k - 1)\right). \tag {2} +$$ + +The first basis function for $k = 1$ is time-independent as $\phi_1(t) = 1$ . During the training process, we fix $\phi_1(t)$ and determine the other basis functions by backpropagation. We consider $\phi_1(t)$ 's corresponding weight $w_{\pmb{p}}^{1}$ as the static SDF value of the point $\pmb{p}$ . Hence, $F(\pmb{p}, t)$ consists of its static background value, i.e., $w_{\pmb{p}}^{1}\phi_{1}(t) = w_{\pmb{p}}^{1}$ , and the weighted sum of dynamic basis functions $\phi_2(t), \ldots, \phi_K(t)$ . + +As the basis functions $\phi_1(t),\ldots ,\phi_K(t)$ are shared between all points in the scene, we need to optimize the location-dependent weights that are implicitly represented in our spatial representation. + +Spatial representation. To achieve accurate scene reconstruction while maintaining memory efficiency, we employ a multi-resolution sparse voxel grid to store spatial geometric information. + +First, we accumulate the input point clouds, $S_{1}, \ldots, S_{N}$ based on their poses $T_{1}, \ldots, T_{N}$ computed from LiDAR odometry and generate a hierarchy of voxel grids around points to ensure complete coverage in 3D. We use a spatial hash table for fast retrieval of the resulting voxels that are only initialized if points fall into a voxel. + +![](images/e0e986dd7c3c1d3401d87bc8d749bc80e7e1d37617c3cc9b641e7ca971ba76a0.jpg) +Figure 3. Overview of querying a TSDF value in our 4D map representation. For querying a point $\pmb{p}$ at $t_i$ and $t_{i + 1}$ , we first retrieve each corner's feature in $\mathcal{F}^l$ of the voxel that $\pmb{p}$ is located in and obtain the fused feature $\pmb{f}_{\pmb{p}}$ by trilinear interpolation. Then, we feed $\pmb{f}_{\pmb{p}}$ into the decoder $D_{\mathrm{mlp}}$ and take the output as the weights of different basis functions $\phi_1(t),\ldots ,\phi_K(t)$ . Finally, we calculate the weighted sum of basis functions' values at $t_i$ and $t_{i + 1}$ to get their respective SDF results. For simplicity, we only illustrate one level of hashed feature grids. + +Similar to Instant-NGP [36], we save a feature vector $\pmb{f} \in \mathbb{R}^{D}$ at each corner vertex of the voxel grid in each resolution level, where we denote as $\mathcal{F}^l$ the level-wise corner features. We compute the feature vector $\pmb{f}_{\pmb{p}} \in \mathbb{R}^{D}$ for given query point $\pmb{p} \in \mathbb{R}^3$ inside the hierarchical grid as follows: + +$$ +\boldsymbol {f} _ {\boldsymbol {p}} = \sum_ {l = 1} ^ {L} \operatorname {i n t e r p o l a t e} (\boldsymbol {p}, \mathcal {F} ^ {l}), \tag {3} +$$ + +where interpolate is the trilinear interpolation for a given point $\pmb{p}$ using the corner features $\mathcal{F}^l$ at level $l$ . + +Then, we decode the interpolated feature vector $\pmb{f}_{\pmb{p}}$ into the desired weights $\pmb{w}_{\pmb{p}} = (w_{p}^{1},\dots ,w_{p}^{K})\in \mathbb{R}^{K}$ by a globally shared multi-layer perceptron (MLP) $D_{\mathrm{mlp}}$ : + +$$ +\boldsymbol {w} _ {\boldsymbol {p}} = D _ {\operatorname {m l p}} \left(\boldsymbol {f} _ {\boldsymbol {p}}\right). \tag {4} +$$ + +As every step is differentiable, we can optimize the multi-resolution feature grids $\mathcal{F}^l$ , the MLP decoder $D_{\mathrm{mlp}}$ and the values of the basis functions jointly by gradient descent once we have training data and corresponding target values. The SDF querying process is illustrated in Fig. 3. + +# 3.2. Objective Function + +We take samples along the rays from the input scans $S_{t}$ to collect training data. Each scan frame $S_{t}$ corresponds to a moment $t$ in time, so we gather four-dimensional data points $(\pmb {q},t)$ via sampling along the ray from the scan origin $\pmb{o}_t\in \mathbb{R}^3$ to a point $s_t^i\in S_t$ . We can represent the sampled points $q_{s}^{i}$ along the ray as $q_{s}^{i} = o_{t} + \lambda (s_{t}^{i} - o_{t})$ . By setting a truncation threshold $\tau$ , we split the ray into two regions, at the surface and in the free-space: + +$$ +\mathcal {T} _ {\text {s u r f}} ^ {i} = \left\{\boldsymbol {q} _ {s} ^ {i} \mid \lambda \in (1 - \bar {\tau}, 1 + \bar {\tau}) \right\} \tag {5} +$$ + +$$ +\mathcal {T} _ {\text {f r e e}} ^ {i} = \left\{\boldsymbol {q} _ {s} ^ {i} \mid \lambda \in (0, 1 - \bar {\tau}) \right\}, \tag {6} +$$ + +where $\bar{\tau} = \tau (\| \pmb{s}_t^i -\pmb {o}_t\|)^{-1}$ . Thus, $\mathcal{T}_{\mathrm{surf}}^i$ represents the region close to the endpoint $s_t^i\in S_t$ , and $\mathcal{T}_{\mathrm{free}}^i$ is the region + +in the free space. We uniformly sample $M_{s}$ and $M_{f}$ points from $\mathcal{T}_{\mathrm{surf}}^i$ and $\mathcal{T}_{\mathrm{free}}^i$ separately. We obtain two sets $\mathcal{D}_{\mathrm{surf}}$ and $\mathcal{D}_{\mathrm{free}}$ of samples by sampling over all scans. Unlike prior work [20, 46] that use differentiable rendering to calculate the depth by integration along the ray, we design different losses for $\mathcal{D}_{\mathrm{surf}}$ and $\mathcal{D}_{\mathrm{free}}$ to supervise the 4D TSDF directly. + +Near Surface Loss. Since the output of our 4D map is the signed distance value $\hat{d} = F(\pmb {p},t)$ at an arbitrary position $\pmb {p}\in \mathbb{R}^3$ in time $t\in [1,N]$ , we expect that the predicted value $\hat{d}$ does not change over time for static points. However, this cannot be guaranteed if we use the projective distance $d_{\mathrm{surf}}$ to the surface along the ray direction directly as the target value, since the projective distance would change over time due to the change of view direction by the moving sensor, even in a static scene. Thus, for the sampled data in $\mathcal{D}_{\mathrm{surf}}$ , i.e., the sampled points near the surface, we can only obtain reliable information about the sign of the TSDF value of these points, which should be positive if the point is before the endpoint and negative if the point is behind. In addition, for a sampled point in front of the endpoint, its projective signed distance $d_{\mathrm{surf}}$ should be the upper bound of its actual signed distance value. And for sampled points behind the endpoint, $d_{\mathrm{surf}}$ should be the lower bound. + +We design a piecewise loss $L_{\mathrm{surf}}$ to supervise the sampled points near the surface: + +$$ +L _ {\text {s u r f}} (\hat {d}, d _ {\text {s u r f}}) = \left\{ \begin{array}{c l} | \hat {d} | & \text {i f} \hat {d} d _ {\text {s u r f}} < 0 \\ | \hat {d} - d _ {\text {s u r f}} | & \text {i f} \hat {d} d _ {\text {s u r f}} > d _ {\text {s u r f}} ^ {2} \\ 0 & \text {o t h e r w i s e} \end{array} , \right. \tag {7} +$$ + +where $\hat{d} = F(\pmb {q},t)$ is the predicted value from our map for a sample point $\pmb {q}\in \mathcal{D}_{\mathrm{surf}}$ and $d_{\mathrm{surf}}$ is its corresponding projective signed distance for that sampled point in the corresponding scan $\mathcal{S}_t$ . This loss punishes only a prediction when the sign is wrong or its absolute value is larger than the absolute value of $d_{\mathrm{surf}}$ . For a query point exactly on the surface, i.e., $d_{\mathrm{surf}} = 0$ , $L_{\mathrm{surf}}$ is simply the L1 loss. + +To calculate an accurate signed distance value and maintain the consistency of constraints for static points from different observations, we use the natural property of signed distance function to constraint the length of the gradient vector for samples inside $\mathcal{D}_{\mathrm{surf}}$ , which is called Eikonal regularization [15, 38]: + +$$ +L _ {\text {e i k o n a l}} (\boldsymbol {p}, t) = \left(\left\| \frac {\partial F (\boldsymbol {p} , t)}{\partial \boldsymbol {p}} \right\| - 1\right) ^ {2}, \tag {8} +$$ + +Inspired by Neuralangelo [25], we manually add perturbations to compute more robust gradient vectors instead of using automatic differentiation, which means we compute numerical gradients: + +$$ +\nabla_ {x} F (\boldsymbol {p}, t) = \frac {F (\boldsymbol {p} + \epsilon_ {\boldsymbol {x}} , t) - F (\boldsymbol {p} - \epsilon_ {\boldsymbol {x}} , t)}{2 \epsilon}, \tag {9} +$$ + +where $\nabla_x F(\pmb{p}, t)$ is the component of the gradient $\frac{\partial F(\pmb{p}, t)}{\partial \pmb{p}}$ on the $x$ axis, and $\epsilon_x = (\epsilon, 0, 0)^{\top}$ is the added perturbation. We apply the same operation on $y$ and $z$ axes to calculate the numerical gradient. Furthermore, in order to get faster convergence at the beginning and ultimately recover the rich geometric details, we first set a large $\epsilon$ and gradually reduce it during the training process. + +Free Space Loss. As we tackle the problem of mapping in dynamic environments, we cannot simply accumulate point clouds and then calculate accurate supervision of signed distance value via nearest neighbor search. Therefore, we use a L1 loss $L_{\mathrm{free}}$ to constrain the signed distance prediction $\hat{d}$ of the free space points, i.e., $\pmb{p} \in \mathcal{D}_{\mathrm{free}}$ : + +$$ +L _ {\text {f r e e}} (\hat {d}) = | \hat {d} - \tau |, \tag {10} +$$ + +where $\tau$ is the truncation threshold we used in Sec. 3.2. + +Thanks to our spatio-temporal representation, a single query point can get both, static and dynamic TSDF values. Thus, for some regions that are determined to be free space, we can directly add constraints to their static TSDF values. + +We divide the free space points $\mathcal{D}_{\mathrm{free}}$ into dense and sparse subset $\mathcal{D}_{\mathrm{dense}}$ and $\mathcal{D}_{\mathrm{sparse}}$ based on a threshold $r_{\mathrm{dense}}$ for the distance from the free space point sampled at time $t$ to the scan origin $\pmb{o}_t$ . For each point $\pmb{p} \in \mathcal{D}_{\mathrm{dense}}$ , we find the nearest neighbor $\pmb{n_p}$ in the corresponding scan $\mathcal{S}_t$ , i.e., $\pmb{n_p} = \arg \min_{\pmb{q} \in \mathcal{S}_t} \| \pmb{p} - \pmb{q} \|_2$ . Let $\mathcal{D}_{\mathrm{certain}} = \{\pmb{p} \in \mathcal{D}_{\mathrm{dense}} | \| \pmb{p} - \pmb{n_p} \| > \tau\}$ be the points that we consider in the certain free space. Then, we supervise $\pmb{p} \in \mathcal{D}_{\mathrm{certain}}$ by its static signed distance value directly: + +$$ +L _ {\text {c e r t a i n}} (\boldsymbol {p}) = \left| w _ {\boldsymbol {p}} ^ {1} - \tau \right|, \tag {11} +$$ + +where $w_{p}^{1}$ is the first weight of the decoder's output. + +![](images/9480712a742e6610fc35ba16a2dd5d0cc3f35865c2d52ab271f969545f97912f.jpg) +(a) + +![](images/eff7d68930d37d785d5d9a4d83515144ac33853e6dff29b570a5ac20ff81e6fb.jpg) +(c) +Figure 4. Reconstructed TSDF for KITTI dataset [14]: Subfigures (a) and (b) are the input neighboring frames. Correspondingly, (c) and (d) are horizontal TSDF slices queried from our 4D map. Note that we only display the TSDF values that are less than $0.3\mathrm{m}$ . + +![](images/337a268be407d9a8491494d3f5d2f09f07ea4122f94e1ab921ed626c3c06785c.jpg) + +![](images/47f30848224463c1004b7eef8b60395cca84232bc99b7c86ca3c3ee443b14aed.jpg) +(b) +(d) + +In summary, the final loss $L_{\mathrm{total}}$ is given by: + +$$ +\begin{array}{l} L _ {\text {t o t a l}} = \frac {1}{| \mathcal {D} _ {\text {s u r f}} |} \sum_ {(\boldsymbol {p}, t) \in \mathcal {D} _ {\text {s u r f}}} L _ {\text {s u r f}} (\hat {d}, d _ {\text {s u r f}}) + \lambda_ {e} L _ {\text {e i k o n a l}} (\boldsymbol {p}, t) \\ + \frac {\lambda_ {f}}{| \mathcal {D} _ {\text {f r e e}} |} \sum_ {(p, t) \in \mathcal {D} _ {\text {f r e e}}} L _ {\text {f r e e}} (\hat {d}) \\ + \frac {\lambda_ {c}}{\left| \mathcal {D} _ {\text {c e r t a i n}} \right|} \sum_ {(\boldsymbol {p}, t) \in \mathcal {D} _ {\text {c e r t a i n}}} L _ {\text {c e r t a i n}} (\boldsymbol {p}), \tag {12} \\ \end{array} +$$ + +where $\hat{d} = F(\pmb{p}, t)$ is the predicted signed distance at the sample position $\pmb{p}$ at time $t$ and $d_{\mathrm{surf}}$ is the projective signed distance of sample $\pmb{p}$ . With the above loss function and data sampling strategy, we train our map offline until convergence. In Fig. 4, we show TSDF slices obtained using our optimized 4D map at different times. + +One application of our 4D map representation is dynamic object segmentation. For a point $\pmb{p}$ in the input scans $S_{1:N}$ , its static signed distance value $w_{\pmb{p}}^{1}$ can be obtained by a simple query. If $\pmb{p}$ belongs to the static background, it should have $w_{\pmb{p}}^{1} = 0$ . Therefore, we simply set a threshold $d_{\mathrm{static}}$ and regard a point as dynamic if $w_{\pmb{p}}^{1} > d_{\mathrm{static}}$ . + +# 3.3. Implementation Details + +As hyperparameters of our approach, we use the values listed in Tab. 1 in all LiDAR experiments. Additional parameters are determined by the characteristics of the sensor and the dimensions of the scene. For instance, in the reconstruction of autonomous driving scenes, like KITTI, we set the highest resolution for the feature voxels to $0.3\mathrm{m}$ . The truncation distance is set to $\tau = 0.5\mathrm{m}$ , and the dense area split threshold $r_{\mathrm{dense}} = 15\mathrm{m}$ . Regarding training time, it takes 12 minutes to train 140 frames from the KITTI dataset using a single Nvidia Quadro RTX 5000. + +# 4. Experiments + +In this section, we show the effectiveness of our proposed approach with respect to two aspects: (1) Static mapping quality: The static TSDF built by our method allows us to extract a surface mesh using marching cubes [29]. We compare this extracted mesh with the ground truth mesh to evaluate the reconstruction. (2) Dynamic object segmentation: As mentioned above, our method can segment out the dynamic objects in the input scans. We use point-wise dynamic object segmentation accuracy to evaluate the results. + +# 4.1. Static Mapping Quality + +Datasets. We select two datasets collected in dynamic environments for quantitative evaluation. One is the synthetic dataset ToyCar3 from Co-Fusion [47], which provides accurate depth images and accurate masks of dynamic objects rendered using Blender, but also depth images with added noise. For this experiment, we select 150 frames from the whole sequence, mask out all dynamic objects in the accurate depth images, and accumulate background static points as the ground-truth static map. The original noisy depth images are used as the input for all methods. + +Furthermore, we use the Newer College [45] dataset as the real-world dataset, which is collected using a 64-beam LiDAR. Compared with synthetic datasets, it contains more uncertainty from measurements and pose estimates. We select 1,300 frames from the courtyard part for testing and this data includes a few pedestrians as dynamic objects. This dataset offers point clouds obtained by a high-precision terrestrial laser scanner that can be directly utilized as ground truth to evaluate the mapping quality. + +Metric and Baselines. We report the reconstruction accuracy, completeness, the Chamfer distance, and the F1-score. Further details on the computation of the metrics can be found in the supplement. + +We compare our method with several different types of state-of-the-art methods: (i) the traditional TSDF-fusion method, VDBfusion [59], which uses space carving to eliminate the effects of dynamic objects, (ii) the data-driven-based method, neural kernel surface reconstruction (NKSR) [18], and (iii) the neural representation based 3D mapping approach, SHINE-mapping [73]. + +For NKSR [18], we use the default parameters provided by Huang et al. with their official implementation. To ensure a fair comparison with SHINE-mapping, we adopt an equal number of free space samples (15 samples), aligning with our method for consistency. + +For the ToyCar3 dataset, we set VDB-Fusion's resolution to $1\mathrm{cm}$ . To have all methods with a similar memory consumption, we set the resolution of SHINE-mapping's leaf feature voxel to $2\mathrm{cm}$ , and our method's highest resolution accordingly to $2\mathrm{cm}$ . For the Newer College dataset, we set the resolution to $10\mathrm{cm}$ , $30\mathrm{cm}$ , and $30\mathrm{cm}$ respectively. + +Table 1. Hyperparameters of our approach. + +
ParameterValueDescription
L2number of feature voxels level
D8The length of feature vectors
K32The number of basis functions
Dmlp2 × 64layer and size of the MLP decoder
Ms5The number of surface area samples
Mf15The number of free space samples
λe0.02weight for Eikonal loss
λf0.25weight for free space loss
λc0.2weight for certain free loss
+ +Table 2. Quantitative results of the reconstruction quality on ToyCar3. We report the distance error metrics, namely completion, accuracy and Chamfer-L1 in cm. Additionally, we show the F-score in % with a 1 cm error threshold. + +
MethodComp.↓Acc.↓C-L1↓F-score ↑
VDB-fusion [59]0.5740.4810.52897.95
NKSR [18]0.5262.8091.66789.54
SHINE-mapping [73]0.5830.6260.60598.01
Ours0.4380.4680.45298.35
+ +Results. The quantitative results for synthetic dataset ToyCar3 and real-world dataset Newer College are presented in Tab. 2 and Tab. 3, respectively. We also show the extracted meshes from all methods in Fig. 5 and Fig. 6. + +Our method outperforms the baselines in terms of Completeness and Chamfer distance for both datasets (cf. Fig. 5 and Fig. 6). Regarding the accuracy, SHINE-mapping and VDB-Fusion can filter part of high-frequency noise by fusion of multiple frames, resulting in better performance on noisy Newer College dataset. In comparison, our method considers every scan as accurate to store 4D information, which makes it more sensitive to measurement noise. On the ToyCar3 dataset, both our method and VDB-Fusion successfully eliminate all moving objects. However, on the Newer College dataset, VDB-Fusion incorrectly eliminates the static tree and parts of the ground, resulting in poor completeness shown in Tab. 3. SHINE-mapping eliminates dynamic pedestrians on the Newer College dataset but retains a portion of the dynamic point cloud on the ToyCar3 dataset, which has a larger proportion of dynamic objects, leading to poorer accuracy in Tab. 2. NKSR performs the worst accuracy because it is unable to eliminate dynamic objects, which means it's not suitable to apply NKSR in dynamic real-world scenes directly. + +# 4.2. Dynamic Object Segmentation + +Datasets. For dynamic object segmentation, we use the KTH-Dynamic-Benchmark [72] for evaluation, which in- + +![](images/3bf71086da1a1b85b773f0e5e204f5d7b91b2eca6c3bf9a953733509291ee25a.jpg) +(a) Merged input scans + +![](images/1c779af19739d9302162ada79cefa43d162815e520617177d0d1635c4de1fc86.jpg) +(b) Ours + +![](images/0de7f7c019dd294888f49a44810c6e34c566c7561b3cc26592fa09f737d4ddb9.jpg) +(c) VDB-Fusion [59] + +![](images/b1d5c0cea9788d0c498e0c84bb6d7636ad5325398ea9d49b8206dbd4d65beb08.jpg) +(d) NKSR [18] + +![](images/c267728fff044577e6303ec989e5b017853a7e9bb385809da6d3fea8c2130214.jpg) +(e) SHINE-mapping [73] + +![](images/76977bb0fe97692b3d565c308af89e8a8a2dca663c7ac697a2d7b4b835810906.jpg) +Figure 5. A comparison of the static mapping results of different methods on the ToyCar3 dataset. There are two dynamic toy cars moving through the scene. Our method can reconstruct the static scene with fine details and eliminate the dynamic car. +(a) Merged input scans + +![](images/91f788a922f82e91fbc40e959d3ee2609248ef5a463a558f2224577ead6d5732.jpg) +(b) Ours +Figure 6. A comparison of the static mapping results of different methods on the Newer College dataset. Several pedestrians are moving through the scene during the data collection. Our method can reconstruct the static scene completely and eliminate the moving pedestrians. Although VDB-Fusion manages to eliminate the pedestrians, it incorrectly removes the tree highlighted in the orange box. + +![](images/953a87e2edc51b31a2d84fe969341705b56b354bc4b4ccb211b5a218e66620cd.jpg) +(c) VDB-Fusion [59] + +![](images/ea17f4283b2069b34a1e533e4b94e9243b58afea03a4b50586f50adfb1dc82eb.jpg) +(d) NKSR [18] + +![](images/72e778c05a486bee3df7a6946756520826d3e8ee21cb3054ceecbcc9fa5e8f74.jpg) +(e) SHINE-mapping [73] + +Table 3. Quantitative results of the reconstruction quality on Newer College. We report the distance error metrics, namely completion, accuracy and Chamfer-L1 in cm. Additionally, we show the F-score in % with a $20\mathrm{cm}$ error threshold. + +
MethodComp.↓Acc.↓C-L1↓F-score ↑
VDB-fusion [59]7.325.996.6596.68
NKSR [18]6.879.288.0895.65
SHINE-mapping [73]6.805.866.3397.67
Ours5.856.496.1797.50
+ +cludes four sequences in total: sequence 00 (frame 4,390 - 4,530) and sequence 05 (frame 2,350 - 2,670) from the KITTI dataset [3, 14], which are captured by a 64-beam LiDAR, one sequence from the Argoverse2 dataset [66] consisting of 575 frames captured by two 32-beam LiDARs, and a semi-indoor sequence captured by a sparser 16-beam LiDAR. All sequences come with corresponding pose files and point-wise dynamic or static labels as the ground truth. It is worth noting that the poses for KITTI 00 and 05 were obtained from SuMa [2] and the pose files for the Semi-indoor sequence come from NDT-SLAM [50]. + +Metric and Baselines. The KTH-Dynamic-Benchmark evaluates the performance of the method by measuring the classification accuracy of dynamic points (DA%), static points (SA%) and also their associated accuracy (AA%) where $AA = \sqrt{DA \cdot SA}$ . The benchmark provides various + +bases such as the state-of-the-art LiDAR dynamic object removal methods – Erasor [27] and Removert [21], as well as the traditional 3D mapping method, Octomap [17, 69], and its modified versions, Octomap with ground fitting and outlier filtering. As SHINE-mapping demonstrates the ability to remove dynamic objects in our static mapping experiments, we also report its result in this benchmark. Additionally, we report the performance of the state-of-the-art online moving object segmentation methods, 4DMOS [31] and its extension MapMOS [32]. As these two methods utilize KITTI sequences 00 and 05 for training, we only show the results of the remaining two sequences. For the parameter setting, we set our method's leaf resolution to $0.3\mathrm{m}$ , and the threshold for segmentation as $d_{\mathrm{static}} = 0.16\mathrm{m}$ . We set the leaf resolution for Octomap to $0.1\mathrm{m}$ . + +Results. The quantitative results of the dynamic object segmentation are shown in Tab. 4. And we depict the accumulated static points generated by different methods in Fig. 7. We can see that our method achieves the best associated accuracy (AA) in three autonomous driving sequences (KITTI 00, KITTI 05, Argoverse2) and vastly outperforms baselines. The supervised learning-based methods 4DMOS and MapMOS do not obtain good dynamic accuracy (DA) due to limited generalizability. Erasor and Octomap tend to over-segment dynamic objects, resulting in poor static accuracy (SA). Removert and SHINE-mapping are too conservative and cannot detect all dynamic objects. Benefiting + +Table 4. Quantitative results of the dynamic object removal quality on the KTH-Dynamic-Benchmark. We report the static accuracy SA, dynamic static DA and the associated accuracy AA. Octomap* refers to the modified Octomap implementation by Zhang et al. [72]. + +
MethodKITTI Seq. 00KITTI Seq. 05Argoverse2Semi-Indoor
SADAAASADAAASADAAASADAAA
Octomap [17]68.0599.6982.3766.2899.2481.1065.9196.7079.8488.9782.1885.51
Octomap* [72]93.0698.6795.8393.5492.4893.0182.6682.4482.5596.7973.5084.34
Removert [21]99.4441.5364.2699.4222.2847.0698.9731.1655.5399.9612.1534.85
Erasor [27]66.7098.5481.0769.4099.0682.9277.5199.1887.6894.9066.2679.30
SHINE [73]98.9992.3795.6398.9153.2772.5897.6672.6284.2198.8859.1976.51
4DMOS [31]------99.9469.3383.2499.9910.6032.55
MapMOS [32]------99.9685.8892.6599.994.7521.80
Ours99.4698.4798.9799.5498.3698.9599.1795.9197.5394.1772.7982.79
+ +![](images/0d4071b0915fae6a9433525e2edf75f04011427bea7e98c0651858af1529c7aa.jpg) + +![](images/2f552eab73051f3695c1c0c715dd5528566af4d6d212b402992088910f586f48.jpg) + +![](images/38d56d63475f86fc99982597abe589fa53938c1bd40edd2b5bd8216e4cefb41a.jpg) + +![](images/e787023611604dd5b37c4ec3cbb1ef0fb010461fd659e1e768439ddac27eb2a9.jpg) + +![](images/54f569bf883300115200d5a4baecd3327c09bc8e5d043ed2319c01ec309113e2.jpg) + +![](images/923ca0b2ac3e5ccc602f8336c0af5359b2e4d3620d7455893ff41f150ccf460f.jpg) +Figure 7. Comparison of dynamic object removal results produced by our proposed method and three baseline methods on the Argoverse2 data sequence of the KTH-benchmark. We show the bird's eye view on the first row and the zoomed view from the blue frustum shown in (a) on the second row. For the ground truth results in (a), the dynamic objects are shown in red. We only show the static points of ground truth for clearer comparison in zoomed view (f). We highlight the over-segmented parking car and sign by Erasor and the undetected moving vehicle by Removert. + +![](images/ce1802da6bd36c4e27dabd16dd9cd3a1ef92f182496dffc1fc742e308e704bea.jpg) + +![](images/fd50e0b220aaf97d5a0ae1d9ab88fef81a6232dd4a69e3a3a6d20b60585a8cd7.jpg) + +![](images/cda7541e3b937718bfaa2d93ff7156a58547e843f0ccd83fb491cbdec3354be0.jpg) + +![](images/d01e5ba088140fc580080acc67070bd61f654c0500118004a84a45b80490b448.jpg) + +from the continuity and large capacity of the 4D neural representation, we strike a better balance between preserving static background points and removing dynamic objects. + +It is worth mentioning again that our method does not rely on any pre-processing or post-processing algorithm such as ground fitting, outlier filtering, and clustering, but also does not require labels for training. + +# 5. Conclusion + +In this paper, we propose a 4D implicit neural map representation for dynamic scenes that allows us to represent the TSDF of static and dynamic parts of a scene. For this purpose, we use a hierarchical voxel-based feature representation that is then decoded into weights for basis functions to represent a time-varying TSDF that can be queried at arbitrary locations. For learning the representation from a se + +quence of LiDAR scans, we design an effective data sampling strategy and loss functions. Equipped with our proposed representation, we experimentally show that we are able to tackle the challenging problems of static mapping and dynamic object segmentation. More specifically, our experiments show that our method has the ability to accurately reconstruct 3D maps of the static parts of a scene and can completely remove moving objects at the same time. + +Limitations. While our method achieves compelling results, we have to acknowledge that we currently rely on estimated poses by a separate SLAM approach, but also cannot apply our approach in an online fashion. However, we see this as an avenue for future research into joint incremental mapping and pose estimation. + +Acknowledgements. We thank Benedikt Mersch for the fruitful discussion and for providing experiment baselines. + +# References + +[1] Ioan A. Barsan, Peidong Liu, Marc Pollefeys, and Andreas Geiger. Robust Dense Mapping for Large-Scale Dynamic Environments. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018. 1, 2 +[2] Jens Behley and Cyril Stachniss. Efficient Surfel-Based SLAM using 3D Laser Range Data in Urban Environments. In Proc. of Robotics: Science and Systems (RSS), 2018. 3, 7 +[3] Jens Behley, Martin Garbade, Aandres Milioto, Jan Quenzel, Sven Behnke, Cyril Stachniss, and Juergen Gall. SemanticKITTI: A Dataset for Semantic Scene Understanding of LiDAR Sequences. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2019. 7 +[4] Peter Biber and Tom Duckett. Dynamic Maps for Long-Term Operation of Mobile Service Robots. In Proc. of Robotics: Science and Systems (RSS), 2005. 1, 2 +[5] Cesar Cadena, Luca Carlone, Henry Carrillo, Yasir Latif, Davide Scaramuzza, Jose Neira, Ian Reid, and John J. Leonard. Past, Present, and Future of Simultaneous Localization And Mapping: Towards the Robust-Perception Age. IEEE Trans. on Robotics (TRO), 32(6):1309-1332, 2016. 1, 2 +[6] Hongrui Cai, Wanquan Feng, Xuetao Feng, Yan Wang, and Juyong Zhang. Neural surface reconstruction of dynamic scenes with monocular rgb-d camera. In Proc. of the Conf. on Neural Information Processing Systems (NeurIPS), 2022. 2 +[7] Ang Cao and Justin Johnson. HexPlane: A Fast Representation for Dynamic Scenes. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2 +[8] Xieyuanli Chen, Shijie Li, Benedikt Mersch, Louis Wiesmann, Juergen Gall, Jens Behley, and Cyril Stachniss. Moving Object Segmentation in 3D LiDAR Data: A Learning-based Approach Exploiting Sequential Data. IEEE Robotics and Automation Letters (RA-L), 6(4):6529-6536, 2021. 2 +[9] Xieyuanli Chen, Benedikt Mersch, Lucas Nunes, Rodrigo Marcuzzi, Ignacio Vizzo, Jens Behley, and Cyril Stachniss. Automatic Labeling to Generate Training Data for Online LiDAR-Based Moving Object Segmentation. IEEE Robotics and Automation Letters (RA-L), 7(3):6107-6114, 2022. 3 +[10] Xu Chen, Tianjian Jiang, Jie Song, Max Rietmann, Andreas Geiger, Michael J. Black, and Otmar Hilliges. Fast-snarf: A fast deformer for articulated neural fields. IEEE Trans. on Pattern Analysis and Machine Intelligence (TPAMI), 45(10): 11796-11809, 2023. 2 +[11] Pierre Dellenbach, Jean-Emmanuel Deschaud, Bastien Jacquet, and Francois Goulette. CT-ICP Real-Time Elastic LiDAR Odometry with Loop Closure. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2022. 3 +[12] Jean-Emmanuel Deschaud. IMLS-SLAM: scan-to-model matching based on 3D data. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018. 3 +[13] Sara Fridovich-Keil, Giacomo Meanti, Frederik R. Warburg, Benjamin Recht, and Angjoo Kanazawa. K-Planes: Explicit Radiance Fields in Space, Time, and Appearance. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2 + +[14] Andreas Geiger, Peter Lenz, and Raquel Urtasun. Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite. In Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2012. 5, 7 +[15] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit Geometric Regularization for Learning Shapes. In Proc. of the Intl. Conf. on Machine Learning (ICML), 2020. 5 +[16] Dirk Hähnel, Dirk Schulz, and Wolfram Burgard. Mobile robot mapping in populated environments. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2002. 1, 2 +[17] Armin Hornung, Kai M. Wurm, Maren Bennewitz, Cyril Stachniss, and Wolfram Burgard. OctoMap: An Efficient Probabilistic 3D Mapping Framework Based on Octrees. Autonomous Robots, 34(3):189-206, 2013. 1, 2, 7, 8 +[18] Jiahui Huang, Zan Gojcic, Matan Atzmon, Or Litany, Sanja Fidler, and Francis Williams. Neural Kernel Surface Reconstruction. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 6, 7 +[19] Shengyu Huang, Zan Gojcic, Jiahui Huang, Andreas Wieser, and Konrad Schindler. Dynamic 3D Scene Analysis by Point Cloud Accumulation. In Proc. of the Europ. Conf. on Computer Vision (ECCV), 2022. 2 +[20] Shengyu Huang, Zan Gojcic, Zian Wang, Francis Williams, Yoni Kasten, Sanja Fidler, Konrad Schindler, and Or Litany. Neural LiDAR Fields for Novel View Synthesis. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2023. 2, 4 +[21] Giseop Kim and Ayoung Kim. Remove, Then Revert: Static Point Cloud Map Construction Using Multiresolution Range Images. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020. 2, 7, 8 +[22] Xin Kong, Shikun Liu, Marwan Taher, and Andrew J. Davison. vMAP: Vectorised Object Mapping for Neural Field SLAM. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2 +[23] Abhijit Kundu, Kyle Genova, Xiaoqi Yin, Alireza Fathi, Caroline Pantofaru, Leonidas Guibas, Andrea Tagliasacchi, Frank Dellaert, and Thomas Funkhouser. Panoptic neural fields: A semantic object-aware neural scene representation. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[24] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[25] Zhaoshuo Li, Thomas Müller, Alex Evans, Russell H Taylor, Mathias Unberath, Ming-Yu Liu, and Chen-Hsuan Lin. Neuralangelo: High-fidelity neural surface reconstruction. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 5 +[26] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. DynIBaR: Neural Dynamic Image-Based Rendering. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3 + +[27] Hyungtae Lim, Sungwon Hwang, and Hyun Myung. ERASOR: Egocentric Ratio of Pseudo Occupancy-Based Dynamic Object Removal for Static 3D Point Cloud Map Building. IEEE Robotics and Automation Letters (RA-L), 6(2): 2272-2279, 2021. 2, 7, 8 +[28] Hyungtae Lim, Lucas Nunes, Benedikt Mersch, Xieyuanli Chen, Jens Behley, and Cyril Stachniss. ERASOR2: Instance-Aware Robust 3D Mapping of the Static World in Dynamic Scenes. In Proc. of Robotics: Science and Systems (RSS), 2023. 3 +[29] William E. Lorensen and Harvey E. Cline. Marching Cubes: a High Resolution 3D Surface Construction Algorithm. In Proc. of the Intl. Conf. on Computer Graphics and Interactive Techniques (SIGGRAPH), 1987. 2, 6 +[30] John McCormac, Ankur Handa, Aandrew J. Davison, and Stefan Leutenegger. SemanticFusion: Dense 3D Semantic Mapping with Convolutional Neural Networks. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2017. 1, 2 +[31] Benedikt Mersch, Xieyuanli Chen, Ignacio Vizzo, Lucas Nunes, Jens Behley, and Cyril Stachniss. Receding Moving Object Segmentation in 3D LiDAR Data Using Sparse 4D Convolutions. IEEE Robotics and Automation Letters (RA-L), 7(3):7503-7510, 2022. 2, 7, 8 +[32] Benedikt Mersch, Tiziano Guadagnino, Xieyuanli Chen, Tiziano, Ignacio Vizzo, Jens Behley, and Cyril Stachniss. Building Volumetric Beliefs for Dynamic Environments Exploiting Map-Based Moving Object Segmentation. IEEE Robotics and Automation Letters (RA-L), 8(8):5180-5187, 2023. 2, 7, 8 +[33] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[34] Daniel Meyer-Delius, Maximilian Beinhofer, and Wolfram Burgard. Occupancy Grid Models for Robot Mapping in Changing Environments. In Proc. of the Conf. on Advancements of Artificial Intelligence (AAAI), 2012. 1, 2 +[35] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In Proc. of the Europ. Conf. on Computer Vision (ECCV), 2020. 2 +[36] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. on Graphics, 41(4): 102:1-102:15, 2022. 2, 4 +[37] Richard A. Newcombe, Shahram Izadi, Otmar Hilliges, David Molyneaux, David Kim, Andrew J. Davison, Pushmeet Kohli, Jamie Shotton, Steve Hodges, and Andrew Fitzgibbon. KinectFusion: Real-Time Dense Surface Mapping and Tracking. In Proc. of the Intl. Symposium on Mixed and Augmented Reality (ISMAR), 2011. 1, 2 +[38] Joseph Ortiz, Alexander Clegg, Jing Dong, Edgar Sucar, David Novotny, Michael Zollhoefer, and Mustafa Mukadam. isdf: Real-time neural signed distance fields for robot per + +ception. In Proc. of Robotics: Science and Systems (RSS), 2022. 5 +[39] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguere, and Cyril Stachniss. ReFusion: 3D Reconstruction in Dynamic Environments for RGB-D Cameras Exploiting Residuals. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2019. 2 +[40] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning Continuous Signed Distance Functions for Shape Representation. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[41] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable Neural Radiance Fields. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2021. 2 +[42] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M. Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. ACM Trans. on Graphics (TOG), 40(6), 2021. +[43] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[44] Sameera Ramasinghe, Violetta Shevchenko, Gil Avraham, and Anton Van Den Hengel. *Blirf: Band limited radiance fields for dynamic scene modeling.* arXiv preprint arXiv:2302.13543, 2023. 2 +[45] Milad Ramezani, Yiduo Wang, Marco Camurri, David Wisth, Matias Mattamala, and Maurice Fallon. The Newer College Dataset: Handheld LiDAR, Inertial and Vision with Ground Truth. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020. 6 +[46] Konstantinos Rematas, Andrew Liu, Pratul P. Srinivasan, Jonathan T. Barron, Andrea Tagliasacchi, Thomas Funkhouser, and Vittorio Ferrari. Urban radiance fields. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 4 +[47] Martin Rünz and Lourdes Agapito. Co-Fusion: Real-Time Segmentation, Tracking and Fusion of Multiple Objects. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2017. 1, 2, 6 +[48] Martin Runz, Maud Buffier, and Lourdes Agapito. MaskFusion: Real-Time Recognition, Tracking and Reconstruction of Multiple Moving Objects. In Proc. of the Intl. Symposium on Mixed and Augmented Reality (ISMAR), 2018. 1, 2 +[49] Jari Saarinen, Henrik Andreasson, and Achim Lilienthal. Independent Markov Chain Occupancy Grid Maps for Representation of Dynamic Environments. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2012. 1, 2 +[50] Jari P. Saarinen, Todor Stoyanov, Henrik Andreasson, and Achim J. Lilienthal. Fast 3D Mapping in Highly Dynamic + +Environments Using Normal Distributions Transform Occupancy Maps. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2013. 1, 2, 7 +[51] Renato F. Salas-Moreno, Richard A. Newcombe, Hauke Strasdat, Paul H. Kelly, and Andrew J. Davison. SLAM++: Simultaneous Localisation and Mapping at the Level of Objects. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2013. 1, 2 +[52] Ruizhi Shao, Zerong Zheng, Hanzhang Tu, Boning Liu, Hongwen Zhang, and Yebin Liu. Tensor4d: Efficient neural 4d decomposition for high-fidelity dynamic reconstruction and rendering. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2 +[53] Chonghyuk Song, Gengshan Yang, Kangle Deng, Jun-Yan Zhu, and Deva Ramanan. Total-recon: Deformable scene reconstruction for embodied view synthesis. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2023. 2 +[54] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. NeRF-Player: A Streamable Dynamic Scene Representation with Decomposed Neural Radiance Fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 2 +[55]Cyrill Stachniss and Wolfram Burgard. Mobile Robot Mapping and Localization in Non-Static Environments. In Proc. of the National Conf. on Artificial Intelligence (AAAI), 2005.1,2 +[56]Cyrill Stachniss,John J.Leonard,and Sebastian Thrun. Springer Handbook of Robotics,2nd edition, chapter Chapt.46:Simultaneous Localization and Mapping. Springer Verlag,2016.1,2 +[57] Sebastian Thrun, Wolfram Burgard, and Dieter Fox. Probabilistic Robotics. MIT Press, 2005. 1, 2 +[58] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2021. 2 +[59] Ignacio Vizzo, Tiziano Guadagnino, Jens Behley, and Cyril Stachniss. VDBFusion: Flexible and Efficient TSDF Integration of Range Sensor Data. Sensors, 22(3):1296, 2022. 6, 7 +[60] Ignacio Vizzo, Tiziano Guadagnino, Benedikt Mersch, Louis Wiesmann, Jens Behley, and Cyril Stachniss. KISS-ICP: In Defense of Point-to-Point ICP - Simple, Accurate, and Robust Registration If Done the Right Way. IEEE Robotics and Automation Letters (RA-L), 8(2):1029-1036, 2023. 3 +[61] Aishan Walcott-Bryant, Michael Kaess, Hordur Johannsson, and John J. Leonard. Dynamic Pose Graph SLAM: Long-Term Mapping in Low Dynamic Environments. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2012. 1, 2 +[62] Chaoyang Wang, Ben Eckart, Simon Lucey, and Orazio Gallo. Neural trajectory fields for dynamic novel view synthesis. arXiv preprint arXiv:2105.05994, 2021. 3 + +[63] Chung-Yi Weng, Brian Curless, Pratul P. Srinivasan, Jonathan T. Barron, and Ira Kemelmacher-Shlizerman. HumanNeRF: Free-Viewpoint Rendering of Moving People From Monocular Video. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[64] Thomas Whelan, Stefan Leutenegger, Renato F. Salas-Moreno, Ben Glocker, and Andrew J. Davison. ElasticFusion: Dense SLAM Without A Pose Graph. In Proc. of Robotics: Science and Systems (RSS), 2015. 1, 2 +[65] Louis Wiesmann, Tiziano Guadagnino, Ignacio Vizzo, Nicky Zimmerman, Yue Pan, Haofei Kuang, Jens Behley, and Cyrill Stachniss. LocNDF: Neural Distance Field Mapping for Robot Localization. IEEE Robotics and Automation Letters (RA-L), 8(8):4999-5006, 2023. 2 +[66] Benjamin Wilson, William Qi, Tanmay Agarwal, John Lambert, Jagjeet Singh, Siddhesh Khandelwal, Bowen Pan, Ratnesh Kumar, Andrew Hartnett, Jhony Kaesemodel Pontes, Deva Ramanan, Peter Carr, and James Hays. Argoverse 2: Next Generation Datasets for Self-driving Perception and Forecasting. In Proc. of the Conf. on Neural Information Processing Systems (NeurIPS), 2021. 7 +[67] Denis F. Wolf and Guarav S. Sukhatme. Mobile Robot Simultaneous Localization and Mapping in Dynamic Environments. Autonomous Robots, 19, 2005. 1, 2 +[68] Tianhao Wu, Fangcheng Zhong, Andrea Tagliasacchi, Forrester Cole, and Cengiz Oztireli. D $^2$ NeRF: Self-Supervised Decoupling of Dynamic and Static Objects from a Monocular Video. In Proc. of the Conf. on Neural Information Processing Systems (NeurIPS), 2022. 2 +[69] Kai M. Wurm, Armin Hornung, Maren Bennewitz, Cyril Stachniss, and Wolfram Burgard. OctoMap: A Probabilistic, Flexible, and Compact 3D Map Representation for Robotic Systems. In Workshop on Best Practice in 3D Perception and Modeling for Mobile Manipulation, IEEE Int. Conf. on Robotics & Automation (ICRA), 2010. 7 +[70] Dongyu Yan, Xiaoyang Lyu, Jieqi Shi, and Yi Lin. Efficient Implicit Neural Reconstruction Using LiDAR. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023. 2 +[71] Wentao Yuan, Zhaoyang Lv, Tanner Schmidt, and Steven Lovegrove. Star: Self-supervised tracking and reconstruction of rigid objects in motion with neural rendering. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[72] Qingwen Zhang, Daniel Duberg, Ruoyu Geng, Mingkai Jia, Lujia Wang, and Patric Jensfelt. A dynamic points removal benchmark in point cloud maps. In IEEE 26th International Conference on Intelligent Transportation Systems (ITSC), pages 608-614, 2023. 6, 8 +[73] Xingguang Zhong, Yue Pan, Jens Behley, and Cyril Stachniss. SHINE-Mapping: Large-Scale 3D Mapping Using Sparse Hierarchical Implicit Neural Representations. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023. 2, 6, 7, 8 \ No newline at end of file diff --git a/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/images.zip b/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..c98e34e09c72c4496e2746ceb3f0655b5c03db52 --- /dev/null +++ b/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6b732dc7c25fe2e3775d937b6939e841799117dec15bb460591be9c7ef3cacd +size 647156 diff --git a/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/layout.json b/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..6812c85134f17d9c727427fe99273026b07245e9 --- /dev/null +++ b/2024/3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation/layout.json @@ -0,0 +1,12120 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 151, + 103, + 443, + 139 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 103, + 443, + 139 + ], + "spans": [ + { + "bbox": [ + 151, + 103, + 443, + 139 + ], + "type": "text", + "content": "3D LiDAR Mapping in Dynamic Environments Using a 4D Implicit Neural Representation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 160, + 216, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 160, + 216, + 175 + ], + "spans": [ + { + "bbox": [ + 121, + 160, + 216, + 175 + ], + "type": "text", + "content": "Xingguang Zhong" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 235, + 161, + 280, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 161, + 280, + 175 + ], + "spans": [ + { + "bbox": [ + 235, + 161, + 280, + 175 + ], + "type": "text", + "content": "Yue Pan" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 299, + 161, + 389, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 161, + 389, + 175 + ], + "spans": [ + { + "bbox": [ + 299, + 161, + 389, + 175 + ], + "type": "text", + "content": "Cyrill Stachniss" + }, + { + "bbox": [ + 299, + 161, + 389, + 175 + ], + "type": "inline_equation", + "content": "^{1,2}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 406, + 161, + 470, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 161, + 470, + 175 + ], + "spans": [ + { + "bbox": [ + 406, + 161, + 470, + 175 + ], + "type": "text", + "content": "Jens Behley" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 100, + 177, + 493, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 177, + 493, + 203 + ], + "spans": [ + { + "bbox": [ + 100, + 177, + 493, + 203 + ], + "type": "text", + "content": "1Center for Robotics, University of Bonn, 2Lamarr Institute for Machine Learning and Artificial Intelligence {zhong, yue.pan, cyrill.stachniss, Jens.vehley}@igg.uni-bonn.de" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "spans": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 256, + 290, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 256, + 290, + 520 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 290, + 520 + ], + "type": "text", + "content": "Building accurate maps is a key building block to enable reliable localization, planning, and navigation of autonomous vehicles. We propose a novel approach for building accurate maps of dynamic environments utilizing a sequence of LiDAR scans. To this end, we propose encoding the 4D scene into a novel spatio-temporal implicit neural map representation by fitting a time-dependent truncated signed distance function to each point. Using our representation, we extract the static map by filtering the dynamic parts. Our neural representation is based on sparse feature grids, a globally shared decoder, and time-dependent basis functions, which we jointly optimize in an unsupervised fashion. To learn this representation from a sequence of LiDAR scans, we design a simple yet efficient loss function to supervise the map optimization in a piecewise way. We evaluate our approach on various scenes containing moving objects in terms of the reconstruction quality of static maps and the segmentation of dynamic point clouds. The experimental results demonstrate that our method is capable of removing the dynamic part of the input point clouds while reconstructing accurate and complete 3D maps, outperforming several state-of-the-art methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 533, + 128, + 545 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 533, + 128, + 545 + ], + "spans": [ + { + "bbox": [ + 47, + 533, + 128, + 545 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 553, + 287, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 553, + 287, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 553, + 287, + 696 + ], + "type": "text", + "content": "Mapping using range sensors, like LiDAR or RGB-D cameras, is a fundamental task in computer vision and robotics. Often, we want to obtain accurate maps to support downstream tasks such as localization, planning, or navigation. For achieving an accurate reconstruction of an outdoor environment, we have to account for dynamics caused by moving objects, such as vehicles or pedestrians. Furthermore, dynamic object removal plays an important role in autonomous driving and robotics applications for creating digital twins for realistic simulation and high-definition mapping, where a static map is augmented with semantic and task-relevant information." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 307, + 239, + 425, + 323 + ], + "blocks": [ + { + "bbox": [ + 307, + 239, + 425, + 323 + ], + "lines": [ + { + "bbox": [ + 307, + 239, + 425, + 323 + ], + "spans": [ + { + "bbox": [ + 307, + 239, + 425, + 323 + ], + "type": "image", + "image_path": "6473e44b651ef1cd7abce4bfc4dc523c37147e3ccf451c3664ee202a2f382717.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 361, + 325, + 370, + 333 + ], + "lines": [ + { + "bbox": [ + 361, + 325, + 370, + 333 + ], + "spans": [ + { + "bbox": [ + 361, + 325, + 370, + 333 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 428, + 237, + 545, + 323 + ], + "blocks": [ + { + "bbox": [ + 428, + 237, + 545, + 323 + ], + "lines": [ + { + "bbox": [ + 428, + 237, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 428, + 237, + 545, + 323 + ], + "type": "image", + "image_path": "b272e82d84603a0b12b8b1e058766ea4751e217f4718f55e8cf43fd6f7b550b5.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 482, + 325, + 492, + 333 + ], + "lines": [ + { + "bbox": [ + 482, + 325, + 492, + 333 + ], + "spans": [ + { + "bbox": [ + 482, + 325, + 492, + 333 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 307, + 341, + 425, + 427 + ], + "blocks": [ + { + "bbox": [ + 307, + 341, + 425, + 427 + ], + "lines": [ + { + "bbox": [ + 307, + 341, + 425, + 427 + ], + "spans": [ + { + "bbox": [ + 307, + 341, + 425, + 427 + ], + "type": "image", + "image_path": "9bd3a3fb5914879d7ced7fd13c61a1efb77ea15a2c1f2e458a5d3e2ea0c6d2df.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 361, + 429, + 370, + 437 + ], + "lines": [ + { + "bbox": [ + 361, + 429, + 370, + 437 + ], + "spans": [ + { + "bbox": [ + 361, + 429, + 370, + 437 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 428, + 346, + 547, + 427 + ], + "blocks": [ + { + "bbox": [ + 428, + 346, + 547, + 427 + ], + "lines": [ + { + "bbox": [ + 428, + 346, + 547, + 427 + ], + "spans": [ + { + "bbox": [ + 428, + 346, + 547, + 427 + ], + "type": "image", + "image_path": "99700c34d4e15036d925f349d18336d6fb4830b54919ad360b7f20a2b21c7e54.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 482, + 428, + 492, + 437 + ], + "lines": [ + { + "bbox": [ + 482, + 428, + 492, + 437 + ], + "spans": [ + { + "bbox": [ + 482, + 428, + 492, + 437 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 446, + 547, + 513 + ], + "lines": [ + { + "bbox": [ + 304, + 446, + 547, + 513 + ], + "spans": [ + { + "bbox": [ + 304, + 446, + 547, + 513 + ], + "type": "text", + "content": "Figure 1. Given a sequence of point clouds, as shown in (a), we optimize our 4D neural representation that can be queried at arbitrary positions for a specific time. (b) Based on the estimated time-dependent TSDF values, we can extract a mesh at a specific point in time. Additionally, our 4D neural representation can be also used for static mapping (c) and dynamic object removal (c)." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 521, + 546, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 546, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 546, + 700 + ], + "type": "text", + "content": "Mapping and state estimation in dynamic environments is a classical problem in robotics [5, 56, 57]. Approaches for simultaneous localization and mapping (SLAM) can apply different strategies to deal with dynamics. Common ways are: (1) filtering dynamics from the input [1, 30, 47, 48, 51] as a pre-processing step, which requires a semantic interpretation of the scene; (2) modeling the occupancy in the map representation [17, 34, 37, 49, 50, 64], where dynamics can be implicitly removed by retrospectively removing measurements in free space; (3) including it in the state estimation [4, 16, 55, 61, 67] to model which measurements originated from the dynamic and static parts of the environment. Our proposed method falls into the last category and allows us to model dynamics directly in the map representation leading to a spatio-temporal map representation." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 701, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 701, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 317, + 701, + 545, + 714 + ], + "type": "text", + "content": "Recently, implicit neural representations gained increas" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 234, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 234, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 234, + 712 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 58, + 702, + 234, + 712 + ], + "type": "text", + "content": "Code: https://github.com/PRBonn/4dNDF" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15417" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": "ing interest in computer vision for novel view synthesis [35, 36] and 3D shape reconstruction [33, 40]. Due to their compactness and continuity, several approaches [65, 70, 73] investigate the use of neural representations in large-scale 3D LiDAR mapping leading to accurate maps while significantly reducing memory consumption. However, these approaches often do not address the problem of handling dynamics during mapping. The recent progress on dynamic NeRF [7, 13, 44, 52] and neural deformable object reconstruction [6, 10] indicates that neural representations can be also used to represent dynamic scenes, which inspires us to tackle the problem of mapping in dynamic environments from the perspective of 4D reconstruction." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 228, + 289, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 228, + 289, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 228, + 289, + 418 + ], + "type": "text", + "content": "In this paper, we propose a novel method to reconstruct large 4D dynamic scenes by encoding every point's time-dependent truncated signed distance function (TSDF) into an implicit neural scene representation. As illustrated in Fig. 1, we take sequentially recorded LiDAR point clouds collected in dynamic environments as input and generate a TSDF for each time frame, which can be used to extract a mesh using marching cubes [29]. The background TSDF, which is unchanged during the whole sequence, can be extracted from the 4D signal easily. We regard it as a static map that can be used to segment dynamic objects from the original point cloud. Compared to the traditional voxel-based mapping method, the continuous neural representation allows for the removal of dynamic objects while preserving rich map details. In summary, the main contributions of this paper are:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 419, + 287, + 563 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 47, + 419, + 287, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 419, + 287, + 455 + ], + "spans": [ + { + "bbox": [ + 47, + 419, + 287, + 455 + ], + "type": "text", + "content": "- We propose a novel implicit neural representation to jointly reconstruct a dynamic 3D environment and maintain a static map using sequential LiDAR scans as input." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 456, + 287, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 456, + 287, + 502 + ], + "spans": [ + { + "bbox": [ + 47, + 456, + 287, + 502 + ], + "type": "text", + "content": "- We employ a piecewise training data sampling strategy and design a simple, yet effective loss function that maintains the consistency of the static point supervision through gradient constraints." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 503, + 287, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 503, + 287, + 563 + ], + "spans": [ + { + "bbox": [ + 47, + 503, + 287, + 563 + ], + "type": "text", + "content": "- We evaluate the mapping results by the accuracy of the dynamic object segmentation as well as the quality of the reconstructed static map showing superior performance compared to several baselines. We provide our code and the data used for experiments." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 573, + 134, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 573, + 134, + 586 + ], + "spans": [ + { + "bbox": [ + 47, + 573, + 134, + 586 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 594, + 287, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 287, + 701 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 287, + 701 + ], + "type": "text", + "content": "Mapping and SLAM in dynamic environments is a classical topic in robotics [5, 56, 57] with a large body of work, which tackles the problem by pre-processing the sensor data [1, 30, 47, 48, 51], occupancy estimation to filter dynamics by removing measurements in free space [17, 34, 37, 39, 49, 50, 64], or state estimation techniques [4, 16, 55, 61, 67]. Below, we focus on closely related approaches using neural representations but also static map building approaches for scenes containing dynamics." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 59, + 701, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 287, + 714 + ], + "type": "text", + "content": "Dynamic NeRF. Dynamic NeRFs aim to solve the prob-" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 72, + 545, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 192 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 192 + ], + "type": "text", + "content": "lem of novel view synthesis in dynamic environments. Some approaches [41-43, 58, 63] address this challenge by modeling the deformation of each point with respect to a canonical frame. However, these methods cannot represent newly appearing objects. This can render them unsuited for complicated real-life scenarios. In contrast, NSFF [24] and DynIBaR [26] get rid of the canonical frame by computing the motion field of the whole scene. While these methods can deliver satisfactory results, the training time is usually in the order of hours or even days." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 194, + 546, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 194, + 546, + 361 + ], + "spans": [ + { + "bbox": [ + 304, + 194, + 546, + 361 + ], + "type": "text", + "content": "Another type of method leverages the compactness of the neural representation to model the 4D spatio-temporal information directly. Several works [7, 13, 52] project the 4D input into multiple voxelized lower-dimensional feature spaces to avoid large memory consumption, which improves the efficiency of the optimization. Song et al. [54] propose a time-dependent sliding window strategy for accumulating the voxel features. Instead of only targeting novel view synthesis, several approaches [26, 68, 71] decompose the scene into dynamic objects and static background in a self-supervised way, which inspired our work. Other approaches [22, 23, 53] accomplish neural representation-based reconstruction for larger scenes by adding additional supervision such as object masks or optical flow." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 364, + 546, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 364, + 546, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 364, + 546, + 567 + ], + "type": "text", + "content": "Neural representations for LiDAR scans. Recently, many approaches aim to enhance scene reconstruction using LiDAR data through neural representations. The early work URF [46] leverages LiDAR data as depth supervision to improve the optimization of a neural radiance field. With only LiDAR data as input, Huang et al. [20] achieve novel view synthesis for LiDAR scans with differentiable rendering. Similar to our work, Shine-mapping [73] and EIN-RUL [70] utilize sparse hierarchical feature voxel structures to achieve large-scale 3D mapping. Additionally, the data-driven approach NKSR [18] based on learned kernel regression demonstrates accurate surface reconstruction with noisy LiDAR point cloud as input. Although these approaches perform well in improving reconstruction accuracy and reducing memory consumption, none of them consider the problem of dynamic object interference in real-world environments." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 570, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 715 + ], + "type": "text", + "content": "Static map building and motion detection. In addition to removing moving objects from the voxel map with ray tracing, numerous works [8, 19, 31, 32] try to segment dynamic points from raw LiDAR point clouds. However, these methods require a significant amount of labeled data, which makes it challenging to generalize them to various scenarios or sensors with different scan patterns. In contrast, geometry-based, more heuristic approaches have also produced promising results. Kim et al. [21] solve this problem using the visibility of range images, but their results are still highly affected by the resolution. Lim et al. proposed Erasor [27], which leverages ground fitting as prior" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "15418" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 286, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 286, + 143 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 286, + 143 + ], + "type": "text", + "content": "to achieve better segmentation for dynamic points. More recent approaches [9, 28] extend it to instance level to improve results. However, these methods rely on an accurate ground fitting method, which is mainly designed for autonomous driving scenarios, which cannot be guaranteed in complex unstructured real environments." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 144, + 286, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 144, + 286, + 226 + ], + "spans": [ + { + "bbox": [ + 46, + 144, + 286, + 226 + ], + "type": "text", + "content": "In contrast to the approaches discussed above, we follow recent developments in neural reconstruction and propose a novel scene representation that allows us to capture the spatio-temporal progression of a scene. We represent the time-varying SDF of a scene in an unsupervised fashion, which we exploit to remove dynamic objects and reconstruct accurate meshes of the static scene." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 238, + 138, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 238, + 138, + 251 + ], + "spans": [ + { + "bbox": [ + 47, + 238, + 138, + 251 + ], + "type": "text", + "content": "3. Our Approach" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "spans": [ + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "text", + "content": "The input of our approach is given by a sequence of point clouds, " + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "inline_equation", + "content": "S_{1:N} = (S_1,\\dots ,S_N)" + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "text", + "content": ", and their corresponding global poses " + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_t\\in \\mathbb{R}^{4\\times 4}" + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "inline_equation", + "content": "t\\in [1,N]" + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "text", + "content": ", estimated via scan matching, LiDAR odometry, or SLAM methods [2, 11, 12, 60]. Each scan's point cloud " + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "inline_equation", + "content": "S_{t} = \\{\\pmb{s}_{t}^{1},\\dots ,\\pmb{s}_{t}^{M_{t}}\\}" + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "text", + "content": " is a set of points, " + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "inline_equation", + "content": "\\pmb{s}_t^i\\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "text", + "content": ", collected at time " + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "text", + "content": ". Given such a sequence of scans " + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "inline_equation", + "content": "S_{1:N}" + }, + { + "bbox": [ + 46, + 258, + 286, + 364 + ], + "type": "text", + "content": ", our approach aims to reconstruct a 4D TSDF of the traversed scene and maintain a static 3D map at the same time." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 365, + 286, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 286, + 413 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 286, + 413 + ], + "type": "text", + "content": "In the next sections, we first introduce our spatiotemporal representation and then explain how to optimize it to represent the dynamic and static parts of a point cloud sequence " + }, + { + "bbox": [ + 46, + 365, + 286, + 413 + ], + "type": "inline_equation", + "content": "S_{1:N}" + }, + { + "bbox": [ + 46, + 365, + 286, + 413 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 420, + 165, + 433 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 420, + 165, + 433 + ], + "spans": [ + { + "bbox": [ + 47, + 420, + 165, + 433 + ], + "type": "text", + "content": "3.1. Map Representation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 437, + 286, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 437, + 286, + 533 + ], + "spans": [ + { + "bbox": [ + 46, + 437, + 286, + 533 + ], + "type": "text", + "content": "The key component of our approach is an implicit neural scene representation that allows us to represent a 4D TSDF of the scene, as well as facilitates the extraction of a static map representation. Our proposed spatio-temporal scene representation is optimized for the given point cloud sequence " + }, + { + "bbox": [ + 46, + 437, + 286, + 533 + ], + "type": "inline_equation", + "content": "S_{1:N}" + }, + { + "bbox": [ + 46, + 437, + 286, + 533 + ], + "type": "text", + "content": " such that we can retrieve for an arbitrary point " + }, + { + "bbox": [ + 46, + 437, + 286, + 533 + ], + "type": "inline_equation", + "content": "\\pmb{p} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 437, + 286, + 533 + ], + "type": "text", + "content": " and time " + }, + { + "bbox": [ + 46, + 437, + 286, + 533 + ], + "type": "inline_equation", + "content": "t \\in [1,N]" + }, + { + "bbox": [ + 46, + 437, + 286, + 533 + ], + "type": "text", + "content": " the corresponding time-varying signed distances value at that location." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 534, + 286, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 534, + 286, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 534, + 286, + 616 + ], + "type": "text", + "content": "Temporal representation. We utilize an TSDF to represent the scene, i.e., a function that provides the signed distance to the nearest surface for any given point " + }, + { + "bbox": [ + 46, + 534, + 286, + 616 + ], + "type": "inline_equation", + "content": "\\pmb{p} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 534, + 286, + 616 + ], + "type": "text", + "content": ". The sign of the distance is positive when the point is in free space or in front of the measured surface and is negative when the point is inside the occupied space or behind the measured surface." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "type": "text", + "content": "In a dynamic 3D scene, measuring the signed distance of any coordinate at each moment produces a time-dependent function that captures the signed distance changes over time, see Fig. 2 for an illustration. Additionally, if a coordinate is static throughout the period, the signed distance should remain constant. The key idea of our spatiotemporal scene representation is to fit the time-varying SDF at each point with several basis functions. Inspired by Li" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 72, + 433, + 152 + ], + "blocks": [ + { + "bbox": [ + 310, + 72, + 433, + 152 + ], + "lines": [ + { + "bbox": [ + 310, + 72, + 433, + 152 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 433, + 152 + ], + "type": "image", + "image_path": "1eb36f546eaeca0e2636c4e4c378d62bab5463a1c2343ff49a674b673775b880.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "lines": [ + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "spans": [ + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "text", + "content": "Figure 2. Principle of our 4D TSDF representation: The left figure shows a moving object and a query point " + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "text", + "content": ". The one on the right depicts the corresponding signed distance at " + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "text", + "content": " over time. At " + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "inline_equation", + "content": "t_0" + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "text", + "content": "'s signed distance is a positive truncated value. When the moving object reaches " + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "text", + "content": " at time " + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "inline_equation", + "content": "t_1" + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "text", + "content": " is inside the object and its signed distance is negative accordingly. At " + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "inline_equation", + "content": "t_2" + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "text", + "content": ", the moving object moved past " + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "text", + "content": ", the signed distance of " + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 304, + 165, + 545, + 243 + ], + "type": "text", + "content": " gets positive again." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 436, + 78, + 544, + 152 + ], + "blocks": [ + { + "bbox": [ + 436, + 78, + 544, + 152 + ], + "lines": [ + { + "bbox": [ + 436, + 78, + 544, + 152 + ], + "spans": [ + { + "bbox": [ + 436, + 78, + 544, + 152 + ], + "type": "image", + "image_path": "7672585101abf831228b543a099278c54743c320ac8da2480fd27fd0196a141a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "spans": [ + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "text", + "content": "et al. [26]'s representation of moving point trajectories, we exploit " + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "text", + "content": " globally shared basis functions " + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "inline_equation", + "content": "\\phi_k: \\mathbb{R} \\mapsto \\mathbb{R}" + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "text", + "content": ". Using these basis functions " + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "inline_equation", + "content": "\\phi_k(t)" + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "text", + "content": ", we model the time-varying TSDF " + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "inline_equation", + "content": "F(\\pmb{p}, t)" + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "text", + "content": " that maps a location " + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "inline_equation", + "content": "\\pmb{p} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "text", + "content": " at time " + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 258, + 545, + 318 + ], + "type": "text", + "content": " to a signed distance as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 376, + 328, + 545, + 361 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 328, + 545, + 361 + ], + "spans": [ + { + "bbox": [ + 376, + 328, + 545, + 361 + ], + "type": "interline_equation", + "content": "F (\\boldsymbol {p}, t) = \\sum_ {k = 1} ^ {K} w _ {\\boldsymbol {p}} ^ {k} \\phi_ {k} (t), \\tag {1}", + "image_path": "876f9f7b03a2ae1eba72b0bffd9ba1986ed428a3fc19a3b9d2dfc28910018e1a.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 372, + 545, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 372, + 545, + 419 + ], + "spans": [ + { + "bbox": [ + 304, + 372, + 545, + 419 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 372, + 545, + 419 + ], + "type": "inline_equation", + "content": "w_{\\pmb{p}}^{k} \\in \\mathbb{R}" + }, + { + "bbox": [ + 304, + 372, + 545, + 419 + ], + "type": "text", + "content": " are estimable location-dependent coefficients. In line with previous works [26, 62], we initialize the basis functions with discrete cosine transform (DCT) basis functions:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 350, + 427, + 545, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 427, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 350, + 427, + 545, + 449 + ], + "type": "interline_equation", + "content": "\\phi_ {k} (t) = \\cos \\left(\\frac {\\pi}{2 N} (2 t + 1) (k - 1)\\right). \\tag {2}", + "image_path": "9bc5c551ee74494e571cceb78584bdde91172e8a2256861be144b37b34e7df26.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": "The first basis function for " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "k = 1" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": " is time-independent as " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\phi_1(t) = 1" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": ". During the training process, we fix " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\phi_1(t)" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": " and determine the other basis functions by backpropagation. We consider " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\phi_1(t)" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": "'s corresponding weight " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "w_{\\pmb{p}}^{1}" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": " as the static SDF value of the point " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": ". Hence, " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "F(\\pmb{p}, t)" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": " consists of its static background value, i.e., " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "w_{\\pmb{p}}^{1}\\phi_{1}(t) = w_{\\pmb{p}}^{1}" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": ", and the weighted sum of dynamic basis functions " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\phi_2(t), \\ldots, \\phi_K(t)" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 544, + 545, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 544, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 304, + 544, + 545, + 592 + ], + "type": "text", + "content": "As the basis functions " + }, + { + "bbox": [ + 304, + 544, + 545, + 592 + ], + "type": "inline_equation", + "content": "\\phi_1(t),\\ldots ,\\phi_K(t)" + }, + { + "bbox": [ + 304, + 544, + 545, + 592 + ], + "type": "text", + "content": " are shared between all points in the scene, we need to optimize the location-dependent weights that are implicitly represented in our spatial representation." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 594, + 545, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 639 + ], + "type": "text", + "content": "Spatial representation. To achieve accurate scene reconstruction while maintaining memory efficiency, we employ a multi-resolution sparse voxel grid to store spatial geometric information." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "content": "First, we accumulate the input point clouds, " + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "inline_equation", + "content": "S_{1}, \\ldots, S_{N}" + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "content": " based on their poses " + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "inline_equation", + "content": "T_{1}, \\ldots, T_{N}" + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "content": " computed from LiDAR odometry and generate a hierarchy of voxel grids around points to ensure complete coverage in 3D. We use a spatial hash table for fast retrieval of the resulting voxels that are only initialized if points fall into a voxel." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15419" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 72, + 534, + 189 + ], + "blocks": [ + { + "bbox": [ + 62, + 72, + 534, + 189 + ], + "lines": [ + { + "bbox": [ + 62, + 72, + 534, + 189 + ], + "spans": [ + { + "bbox": [ + 62, + 72, + 534, + 189 + ], + "type": "image", + "image_path": "e0e986dd7c3c1d3401d87bc8d749bc80e7e1d37617c3cc9b641e7ca971ba76a0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "lines": [ + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "spans": [ + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "text", + "content": "Figure 3. Overview of querying a TSDF value in our 4D map representation. For querying a point " + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "inline_equation", + "content": "t_{i + 1}" + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "text", + "content": ", we first retrieve each corner's feature in " + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^l" + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "text", + "content": " of the voxel that " + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "text", + "content": " is located in and obtain the fused feature " + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "inline_equation", + "content": "\\pmb{f}_{\\pmb{p}}" + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "text", + "content": " by trilinear interpolation. Then, we feed " + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "inline_equation", + "content": "\\pmb{f}_{\\pmb{p}}" + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "text", + "content": " into the decoder " + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{mlp}}" + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "text", + "content": " and take the output as the weights of different basis functions " + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "inline_equation", + "content": "\\phi_1(t),\\ldots ,\\phi_K(t)" + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "text", + "content": ". Finally, we calculate the weighted sum of basis functions' values at " + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "inline_equation", + "content": "t_{i + 1}" + }, + { + "bbox": [ + 46, + 200, + 547, + 247 + ], + "type": "text", + "content": " to get their respective SDF results. For simplicity, we only illustrate one level of hashed feature grids." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 263, + 289, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 263, + 289, + 326 + ], + "spans": [ + { + "bbox": [ + 47, + 263, + 289, + 326 + ], + "type": "text", + "content": "Similar to Instant-NGP [36], we save a feature vector " + }, + { + "bbox": [ + 47, + 263, + 289, + 326 + ], + "type": "inline_equation", + "content": "\\pmb{f} \\in \\mathbb{R}^{D}" + }, + { + "bbox": [ + 47, + 263, + 289, + 326 + ], + "type": "text", + "content": " at each corner vertex of the voxel grid in each resolution level, where we denote as " + }, + { + "bbox": [ + 47, + 263, + 289, + 326 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^l" + }, + { + "bbox": [ + 47, + 263, + 289, + 326 + ], + "type": "text", + "content": " the level-wise corner features. We compute the feature vector " + }, + { + "bbox": [ + 47, + 263, + 289, + 326 + ], + "type": "inline_equation", + "content": "\\pmb{f}_{\\pmb{p}} \\in \\mathbb{R}^{D}" + }, + { + "bbox": [ + 47, + 263, + 289, + 326 + ], + "type": "text", + "content": " for given query point " + }, + { + "bbox": [ + 47, + 263, + 289, + 326 + ], + "type": "inline_equation", + "content": "\\pmb{p} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 47, + 263, + 289, + 326 + ], + "type": "text", + "content": " inside the hierarchical grid as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 332, + 287, + 365 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 332, + 287, + 365 + ], + "spans": [ + { + "bbox": [ + 107, + 332, + 287, + 365 + ], + "type": "interline_equation", + "content": "\\boldsymbol {f} _ {\\boldsymbol {p}} = \\sum_ {l = 1} ^ {L} \\operatorname {i n t e r p o l a t e} (\\boldsymbol {p}, \\mathcal {F} ^ {l}), \\tag {3}", + "image_path": "134ac950f9166445da87df551a5dd2677d8ca41441b359af7113e4ea00610095.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 372, + 287, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 372, + 287, + 395 + ], + "spans": [ + { + "bbox": [ + 47, + 372, + 287, + 395 + ], + "type": "text", + "content": "where interpolate is the trilinear interpolation for a given point " + }, + { + "bbox": [ + 47, + 372, + 287, + 395 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 47, + 372, + 287, + 395 + ], + "type": "text", + "content": " using the corner features " + }, + { + "bbox": [ + 47, + 372, + 287, + 395 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^l" + }, + { + "bbox": [ + 47, + 372, + 287, + 395 + ], + "type": "text", + "content": " at level " + }, + { + "bbox": [ + 47, + 372, + 287, + 395 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 47, + 372, + 287, + 395 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 396, + 288, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 396, + 288, + 434 + ], + "spans": [ + { + "bbox": [ + 47, + 396, + 288, + 434 + ], + "type": "text", + "content": "Then, we decode the interpolated feature vector " + }, + { + "bbox": [ + 47, + 396, + 288, + 434 + ], + "type": "inline_equation", + "content": "\\pmb{f}_{\\pmb{p}}" + }, + { + "bbox": [ + 47, + 396, + 288, + 434 + ], + "type": "text", + "content": " into the desired weights " + }, + { + "bbox": [ + 47, + 396, + 288, + 434 + ], + "type": "inline_equation", + "content": "\\pmb{w}_{\\pmb{p}} = (w_{p}^{1},\\dots ,w_{p}^{K})\\in \\mathbb{R}^{K}" + }, + { + "bbox": [ + 47, + 396, + 288, + 434 + ], + "type": "text", + "content": " by a globally shared multi-layer perceptron (MLP) " + }, + { + "bbox": [ + 47, + 396, + 288, + 434 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{mlp}}" + }, + { + "bbox": [ + 47, + 396, + 288, + 434 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 442, + 287, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 442, + 287, + 456 + ], + "spans": [ + { + "bbox": [ + 130, + 442, + 287, + 456 + ], + "type": "interline_equation", + "content": "\\boldsymbol {w} _ {\\boldsymbol {p}} = D _ {\\operatorname {m l p}} \\left(\\boldsymbol {f} _ {\\boldsymbol {p}}\\right). \\tag {4}", + "image_path": "b7c99ad2377a8e85cd14c482d3ace12cb5f692c9d56bfce9a3628972cc94c01a.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 464, + 287, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 464, + 287, + 523 + ], + "spans": [ + { + "bbox": [ + 47, + 464, + 287, + 523 + ], + "type": "text", + "content": "As every step is differentiable, we can optimize the multi-resolution feature grids " + }, + { + "bbox": [ + 47, + 464, + 287, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^l" + }, + { + "bbox": [ + 47, + 464, + 287, + 523 + ], + "type": "text", + "content": ", the MLP decoder " + }, + { + "bbox": [ + 47, + 464, + 287, + 523 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{mlp}}" + }, + { + "bbox": [ + 47, + 464, + 287, + 523 + ], + "type": "text", + "content": " and the values of the basis functions jointly by gradient descent once we have training data and corresponding target values. The SDF querying process is illustrated in Fig. 3." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 529, + 160, + 542 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 529, + 160, + 542 + ], + "spans": [ + { + "bbox": [ + 47, + 529, + 160, + 542 + ], + "type": "text", + "content": "3.2. Objective Function" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "spans": [ + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "text", + "content": "We take samples along the rays from the input scans " + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "inline_equation", + "content": "S_{t}" + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "text", + "content": " to collect training data. Each scan frame " + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "inline_equation", + "content": "S_{t}" + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "text", + "content": " corresponds to a moment " + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "text", + "content": " in time, so we gather four-dimensional data points " + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "inline_equation", + "content": "(\\pmb {q},t)" + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "text", + "content": " via sampling along the ray from the scan origin " + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "inline_equation", + "content": "\\pmb{o}_t\\in \\mathbb{R}^3" + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "text", + "content": " to a point " + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "inline_equation", + "content": "s_t^i\\in S_t" + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "text", + "content": " . We can represent the sampled points " + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "inline_equation", + "content": "q_{s}^{i}" + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "text", + "content": " along the ray as " + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "inline_equation", + "content": "q_{s}^{i} = o_{t} + \\lambda (s_{t}^{i} - o_{t})" + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "text", + "content": " . By setting a truncation threshold " + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 47, + 548, + 287, + 643 + ], + "type": "text", + "content": " , we split the ray into two regions, at the surface and in the free-space:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 97, + 651, + 287, + 666 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 651, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 97, + 651, + 287, + 666 + ], + "type": "interline_equation", + "content": "\\mathcal {T} _ {\\text {s u r f}} ^ {i} = \\left\\{\\boldsymbol {q} _ {s} ^ {i} \\mid \\lambda \\in (1 - \\bar {\\tau}, 1 + \\bar {\\tau}) \\right\\} \\tag {5}", + "image_path": "ba737253c00ad7a59c1bc555cfaa2bb632991afd1607d60ab76d05dd1407aa66.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 99, + 667, + 287, + 682 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 667, + 287, + 682 + ], + "spans": [ + { + "bbox": [ + 99, + 667, + 287, + 682 + ], + "type": "interline_equation", + "content": "\\mathcal {T} _ {\\text {f r e e}} ^ {i} = \\left\\{\\boldsymbol {q} _ {s} ^ {i} \\mid \\lambda \\in (0, 1 - \\bar {\\tau}) \\right\\}, \\tag {6}", + "image_path": "12395539546157ef9a4300c924ad2a5d495f5dd22a0e0deff09cadb81c72c62d.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\bar{\\tau} = \\tau (\\| \\pmb{s}_t^i -\\pmb {o}_t\\|)^{-1}" + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "content": ". Thus, " + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{\\mathrm{surf}}^i" + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "content": " represents the region close to the endpoint " + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "inline_equation", + "content": "s_t^i\\in S_t" + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{\\mathrm{free}}^i" + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "content": " is the region" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "spans": [ + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "text", + "content": "in the free space. We uniformly sample " + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "M_{s}" + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "M_{f}" + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "text", + "content": " points from " + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{\\mathrm{surf}}^i" + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{\\mathrm{free}}^i" + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "text", + "content": " separately. We obtain two sets " + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{surf}}" + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{free}}" + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "text", + "content": " of samples by sampling over all scans. Unlike prior work [20, 46] that use differentiable rendering to calculate the depth by integration along the ray, we design different losses for " + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{surf}}" + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{free}}" + }, + { + "bbox": [ + 304, + 263, + 545, + 335 + ], + "type": "text", + "content": " to supervise the 4D TSDF directly." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "spans": [ + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "text", + "content": "Near Surface Loss. Since the output of our 4D map is the signed distance value " + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "inline_equation", + "content": "\\hat{d} = F(\\pmb {p},t)" + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "text", + "content": " at an arbitrary position " + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "inline_equation", + "content": "\\pmb {p}\\in \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "text", + "content": " in time " + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "inline_equation", + "content": "t\\in [1,N]" + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "text", + "content": ", we expect that the predicted value " + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "inline_equation", + "content": "\\hat{d}" + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "text", + "content": " does not change over time for static points. However, this cannot be guaranteed if we use the projective distance " + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{surf}}" + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "text", + "content": " to the surface along the ray direction directly as the target value, since the projective distance would change over time due to the change of view direction by the moving sensor, even in a static scene. Thus, for the sampled data in " + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{surf}}" + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "text", + "content": ", i.e., the sampled points near the surface, we can only obtain reliable information about the sign of the TSDF value of these points, which should be positive if the point is before the endpoint and negative if the point is behind. In addition, for a sampled point in front of the endpoint, its projective signed distance " + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{surf}}" + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "text", + "content": " should be the upper bound of its actual signed distance value. And for sampled points behind the endpoint, " + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{surf}}" + }, + { + "bbox": [ + 304, + 336, + 546, + 540 + ], + "type": "text", + "content": " should be the lower bound." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 541, + 547, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 541, + 547, + 565 + ], + "spans": [ + { + "bbox": [ + 304, + 541, + 547, + 565 + ], + "type": "text", + "content": "We design a piecewise loss " + }, + { + "bbox": [ + 304, + 541, + 547, + 565 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{surf}}" + }, + { + "bbox": [ + 304, + 541, + 547, + 565 + ], + "type": "text", + "content": " to supervise the sampled points near the surface:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 576, + 545, + 616 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 576, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 315, + 576, + 545, + 616 + ], + "type": "interline_equation", + "content": "L _ {\\text {s u r f}} (\\hat {d}, d _ {\\text {s u r f}}) = \\left\\{ \\begin{array}{c l} | \\hat {d} | & \\text {i f} \\hat {d} d _ {\\text {s u r f}} < 0 \\\\ | \\hat {d} - d _ {\\text {s u r f}} | & \\text {i f} \\hat {d} d _ {\\text {s u r f}} > d _ {\\text {s u r f}} ^ {2} \\\\ 0 & \\text {o t h e r w i s e} \\end{array} , \\right. \\tag {7}", + "image_path": "02ea0c6869e9e5a1edf6ece900b49485a24e7203d56103fbcc83090d9494cb88.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\hat{d} = F(\\pmb {q},t)" + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": " is the predicted value from our map for a sample point " + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\pmb {q}\\in \\mathcal{D}_{\\mathrm{surf}}" + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{surf}}" + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": " is its corresponding projective signed distance for that sampled point in the corresponding scan " + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_t" + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": ". This loss punishes only a prediction when the sign is wrong or its absolute value is larger than the absolute value of " + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{surf}}" + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": ". For a query point exactly on the surface, i.e., " + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{surf}} = 0" + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{surf}}" + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": " is simply the L1 loss." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15420" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 145 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 145 + ], + "type": "text", + "content": "To calculate an accurate signed distance value and maintain the consistency of constraints for static points from different observations, we use the natural property of signed distance function to constraint the length of the gradient vector for samples inside " + }, + { + "bbox": [ + 47, + 72, + 289, + 145 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{surf}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 145 + ], + "type": "text", + "content": ", which is called Eikonal regularization [15, 38]:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 89, + 155, + 287, + 184 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 155, + 287, + 184 + ], + "spans": [ + { + "bbox": [ + 89, + 155, + 287, + 184 + ], + "type": "interline_equation", + "content": "L _ {\\text {e i k o n a l}} (\\boldsymbol {p}, t) = \\left(\\left\\| \\frac {\\partial F (\\boldsymbol {p} , t)}{\\partial \\boldsymbol {p}} \\right\\| - 1\\right) ^ {2}, \\tag {8}", + "image_path": "cca5574169d121287f7700d2354ed7b3255613871e7b4c01e8fb81c64daa0442.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 196, + 288, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 196, + 288, + 243 + ], + "spans": [ + { + "bbox": [ + 47, + 196, + 288, + 243 + ], + "type": "text", + "content": "Inspired by Neuralangelo [25], we manually add perturbations to compute more robust gradient vectors instead of using automatic differentiation, which means we compute numerical gradients:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 255, + 287, + 280 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 255, + 287, + 280 + ], + "spans": [ + { + "bbox": [ + 76, + 255, + 287, + 280 + ], + "type": "interline_equation", + "content": "\\nabla_ {x} F (\\boldsymbol {p}, t) = \\frac {F (\\boldsymbol {p} + \\epsilon_ {\\boldsymbol {x}} , t) - F (\\boldsymbol {p} - \\epsilon_ {\\boldsymbol {x}} , t)}{2 \\epsilon}, \\tag {9}", + "image_path": "2e7b8d87555c3c9358421dc1e38648b7277af2dca066076d7e66af422cae82fc.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "spans": [ + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "inline_equation", + "content": "\\nabla_x F(\\pmb{p}, t)" + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "text", + "content": " is the component of the gradient " + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "inline_equation", + "content": "\\frac{\\partial F(\\pmb{p}, t)}{\\partial \\pmb{p}}" + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "text", + "content": " on the " + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "text", + "content": " axis, and " + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "inline_equation", + "content": "\\epsilon_x = (\\epsilon, 0, 0)^{\\top}" + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "text", + "content": " is the added perturbation. We apply the same operation on " + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "text", + "content": " axes to calculate the numerical gradient. Furthermore, in order to get faster convergence at the beginning and ultimately recover the rich geometric details, we first set a large " + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 47, + 291, + 287, + 380 + ], + "type": "text", + "content": " and gradually reduce it during the training process." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 380, + 288, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 380, + 288, + 453 + ], + "spans": [ + { + "bbox": [ + 46, + 380, + 288, + 453 + ], + "type": "text", + "content": "Free Space Loss. As we tackle the problem of mapping in dynamic environments, we cannot simply accumulate point clouds and then calculate accurate supervision of signed distance value via nearest neighbor search. Therefore, we use a L1 loss " + }, + { + "bbox": [ + 46, + 380, + 288, + 453 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{free}}" + }, + { + "bbox": [ + 46, + 380, + 288, + 453 + ], + "type": "text", + "content": " to constrain the signed distance prediction " + }, + { + "bbox": [ + 46, + 380, + 288, + 453 + ], + "type": "inline_equation", + "content": "\\hat{d}" + }, + { + "bbox": [ + 46, + 380, + 288, + 453 + ], + "type": "text", + "content": " of the free space points, i.e., " + }, + { + "bbox": [ + 46, + 380, + 288, + 453 + ], + "type": "inline_equation", + "content": "\\pmb{p} \\in \\mathcal{D}_{\\mathrm{free}}" + }, + { + "bbox": [ + 46, + 380, + 288, + 453 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 127, + 464, + 287, + 479 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 464, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 127, + 464, + 287, + 479 + ], + "type": "interline_equation", + "content": "L _ {\\text {f r e e}} (\\hat {d}) = | \\hat {d} - \\tau |, \\tag {10}", + "image_path": "3cbff3b4bad6ef36a2584288609bb73dbc02dca275f7b19de14f7decd3204ac3.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 491, + 272, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 491, + 272, + 502 + ], + "spans": [ + { + "bbox": [ + 47, + 491, + 272, + 502 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 491, + 272, + 502 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 47, + 491, + 272, + 502 + ], + "type": "text", + "content": " is the truncation threshold we used in Sec. 3.2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 504, + 287, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 504, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 47, + 504, + 287, + 552 + ], + "type": "text", + "content": "Thanks to our spatio-temporal representation, a single query point can get both, static and dynamic TSDF values. Thus, for some regions that are determined to be free space, we can directly add constraints to their static TSDF values." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "spans": [ + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": "We divide the free space points " + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{free}}" + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": " into dense and sparse subset " + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{dense}}" + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{sparse}}" + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": " based on a threshold " + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "inline_equation", + "content": "r_{\\mathrm{dense}}" + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": " for the distance from the free space point sampled at time " + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": " to the scan origin " + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "inline_equation", + "content": "\\pmb{o}_t" + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": ". For each point " + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "inline_equation", + "content": "\\pmb{p} \\in \\mathcal{D}_{\\mathrm{dense}}" + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": ", we find the nearest neighbor " + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "inline_equation", + "content": "\\pmb{n_p}" + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": " in the corresponding scan " + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_t" + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "inline_equation", + "content": "\\pmb{n_p} = \\arg \\min_{\\pmb{q} \\in \\mathcal{S}_t} \\| \\pmb{p} - \\pmb{q} \\|_2" + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{certain}} = \\{\\pmb{p} \\in \\mathcal{D}_{\\mathrm{dense}} | \\| \\pmb{p} - \\pmb{n_p} \\| > \\tau\\}" + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": " be the points that we consider in the certain free space. Then, we supervise " + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "inline_equation", + "content": "\\pmb{p} \\in \\mathcal{D}_{\\mathrm{certain}}" + }, + { + "bbox": [ + 47, + 554, + 288, + 662 + ], + "type": "text", + "content": " by its static signed distance value directly:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 119, + 673, + 287, + 689 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 673, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 119, + 673, + 287, + 689 + ], + "type": "interline_equation", + "content": "L _ {\\text {c e r t a i n}} (\\boldsymbol {p}) = \\left| w _ {\\boldsymbol {p}} ^ {1} - \\tau \\right|, \\tag {11}", + "image_path": "da138251ee7c4d6faf9bf3e449bac5c6bcc076460a259bfe64e6d5f7256a390a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 700, + 259, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 700, + 259, + 716 + ], + "spans": [ + { + "bbox": [ + 47, + 700, + 259, + 716 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 700, + 259, + 716 + ], + "type": "inline_equation", + "content": "w_{p}^{1}" + }, + { + "bbox": [ + 47, + 700, + 259, + 716 + ], + "type": "text", + "content": " is the first weight of the decoder's output." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 321, + 68, + 420, + 140 + ], + "blocks": [ + { + "bbox": [ + 321, + 68, + 420, + 140 + ], + "lines": [ + { + "bbox": [ + 321, + 68, + 420, + 140 + ], + "spans": [ + { + "bbox": [ + 321, + 68, + 420, + 140 + ], + "type": "image", + "image_path": "9480712a742e6610fc35ba16a2dd5d0cc3f35865c2d52ab271f969545f97912f.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 365, + 140, + 374, + 148 + ], + "lines": [ + { + "bbox": [ + 365, + 140, + 374, + 148 + ], + "spans": [ + { + "bbox": [ + 365, + 140, + 374, + 148 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 321, + 148, + 418, + 217 + ], + "blocks": [ + { + "bbox": [ + 321, + 148, + 418, + 217 + ], + "lines": [ + { + "bbox": [ + 321, + 148, + 418, + 217 + ], + "spans": [ + { + "bbox": [ + 321, + 148, + 418, + 217 + ], + "type": "image", + "image_path": "eff7d68930d37d785d5d9a4d83515144ac33853e6dff29b570a5ac20ff81e6fb.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 364, + 218, + 374, + 226 + ], + "lines": [ + { + "bbox": [ + 364, + 218, + 374, + 226 + ], + "spans": [ + { + "bbox": [ + 364, + 218, + 374, + 226 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 232, + 547, + 277 + ], + "lines": [ + { + "bbox": [ + 305, + 232, + 547, + 277 + ], + "spans": [ + { + "bbox": [ + 305, + 232, + 547, + 277 + ], + "type": "text", + "content": "Figure 4. Reconstructed TSDF for KITTI dataset [14]: Subfigures (a) and (b) are the input neighboring frames. Correspondingly, (c) and (d) are horizontal TSDF slices queried from our 4D map. Note that we only display the TSDF values that are less than " + }, + { + "bbox": [ + 305, + 232, + 547, + 277 + ], + "type": "inline_equation", + "content": "0.3\\mathrm{m}" + }, + { + "bbox": [ + 305, + 232, + 547, + 277 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 433, + 69, + 531, + 139 + ], + "blocks": [ + { + "bbox": [ + 433, + 69, + 531, + 139 + ], + "lines": [ + { + "bbox": [ + 433, + 69, + 531, + 139 + ], + "spans": [ + { + "bbox": [ + 433, + 69, + 531, + 139 + ], + "type": "image", + "image_path": "337a268be407d9a8491494d3f5d2f09f07ea4122f94e1ab921ed626c3c06785c.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 433, + 148, + 529, + 217 + ], + "blocks": [ + { + "bbox": [ + 477, + 140, + 487, + 148 + ], + "lines": [ + { + "bbox": [ + 477, + 140, + 487, + 148 + ], + "spans": [ + { + "bbox": [ + 477, + 140, + 487, + 148 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 433, + 148, + 529, + 217 + ], + "lines": [ + { + "bbox": [ + 433, + 148, + 529, + 217 + ], + "spans": [ + { + "bbox": [ + 433, + 148, + 529, + 217 + ], + "type": "image", + "image_path": "47f30848224463c1004b7eef8b60395cca84232bc99b7c86ca3c3ee443b14aed.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 476, + 218, + 486, + 226 + ], + "lines": [ + { + "bbox": [ + 476, + 218, + 486, + 226 + ], + "spans": [ + { + "bbox": [ + 476, + 218, + 486, + 226 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 290, + 495, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 290, + 495, + 303 + ], + "spans": [ + { + "bbox": [ + 317, + 290, + 495, + 303 + ], + "type": "text", + "content": "In summary, the final loss " + }, + { + "bbox": [ + 317, + 290, + 495, + 303 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{total}}" + }, + { + "bbox": [ + 317, + 290, + 495, + 303 + ], + "type": "text", + "content": " is given by:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 312, + 545, + 407 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 312, + 545, + 407 + ], + "spans": [ + { + "bbox": [ + 312, + 312, + 545, + 407 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} L _ {\\text {t o t a l}} = \\frac {1}{| \\mathcal {D} _ {\\text {s u r f}} |} \\sum_ {(\\boldsymbol {p}, t) \\in \\mathcal {D} _ {\\text {s u r f}}} L _ {\\text {s u r f}} (\\hat {d}, d _ {\\text {s u r f}}) + \\lambda_ {e} L _ {\\text {e i k o n a l}} (\\boldsymbol {p}, t) \\\\ + \\frac {\\lambda_ {f}}{| \\mathcal {D} _ {\\text {f r e e}} |} \\sum_ {(p, t) \\in \\mathcal {D} _ {\\text {f r e e}}} L _ {\\text {f r e e}} (\\hat {d}) \\\\ + \\frac {\\lambda_ {c}}{\\left| \\mathcal {D} _ {\\text {c e r t a i n}} \\right|} \\sum_ {(\\boldsymbol {p}, t) \\in \\mathcal {D} _ {\\text {c e r t a i n}}} L _ {\\text {c e r t a i n}} (\\boldsymbol {p}), \\tag {12} \\\\ \\end{array}", + "image_path": "5207c6f55d411dd394b1c020b1139f5826ff07d8e09952ffdd28fff81368eb15.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "type": "inline_equation", + "content": "\\hat{d} = F(\\pmb{p}, t)" + }, + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "type": "text", + "content": " is the predicted signed distance at the sample position " + }, + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "type": "text", + "content": " at time " + }, + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{surf}}" + }, + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "type": "text", + "content": " is the projective signed distance of sample " + }, + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 304, + 419, + 545, + 491 + ], + "type": "text", + "content": ". With the above loss function and data sampling strategy, we train our map offline until convergence. In Fig. 4, we show TSDF slices obtained using our optimized 4D map at different times." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "text", + "content": "One application of our 4D map representation is dynamic object segmentation. For a point " + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "text", + "content": " in the input scans " + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "inline_equation", + "content": "S_{1:N}" + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "text", + "content": ", its static signed distance value " + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "inline_equation", + "content": "w_{\\pmb{p}}^{1}" + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "text", + "content": " can be obtained by a simple query. If " + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "text", + "content": " belongs to the static background, it should have " + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "inline_equation", + "content": "w_{\\pmb{p}}^{1} = 0" + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "text", + "content": ". Therefore, we simply set a threshold " + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{static}}" + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "text", + "content": " and regard a point as dynamic if " + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "inline_equation", + "content": "w_{\\pmb{p}}^{1} > d_{\\mathrm{static}}" + }, + { + "bbox": [ + 304, + 492, + 545, + 568 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 306, + 574, + 440, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 574, + 440, + 588 + ], + "spans": [ + { + "bbox": [ + 306, + 574, + 440, + 588 + ], + "type": "text", + "content": "3.3. Implementation Details" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": "As hyperparameters of our approach, we use the values listed in Tab. 1 in all LiDAR experiments. Additional parameters are determined by the characteristics of the sensor and the dimensions of the scene. For instance, in the reconstruction of autonomous driving scenes, like KITTI, we set the highest resolution for the feature voxels to " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "0.3\\mathrm{m}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": ". The truncation distance is set to " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\tau = 0.5\\mathrm{m}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": ", and the dense area split threshold " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "r_{\\mathrm{dense}} = 15\\mathrm{m}" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": ". Regarding training time, it takes 12 minutes to train 140 frames from the KITTI dataset using a single Nvidia Quadro RTX 5000." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15421" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 128, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 128, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 128, + 85 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 91, + 288, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 91, + 288, + 200 + ], + "spans": [ + { + "bbox": [ + 46, + 91, + 288, + 200 + ], + "type": "text", + "content": "In this section, we show the effectiveness of our proposed approach with respect to two aspects: (1) Static mapping quality: The static TSDF built by our method allows us to extract a surface mesh using marching cubes [29]. We compare this extracted mesh with the ground truth mesh to evaluate the reconstruction. (2) Dynamic object segmentation: As mentioned above, our method can segment out the dynamic objects in the input scans. We use point-wise dynamic object segmentation accuracy to evaluate the results." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 205, + 180, + 218 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 205, + 180, + 218 + ], + "spans": [ + { + "bbox": [ + 47, + 205, + 180, + 218 + ], + "type": "text", + "content": "4.1. Static Mapping Quality" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 223, + 287, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 223, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 46, + 223, + 287, + 342 + ], + "type": "text", + "content": "Datasets. We select two datasets collected in dynamic environments for quantitative evaluation. One is the synthetic dataset ToyCar3 from Co-Fusion [47], which provides accurate depth images and accurate masks of dynamic objects rendered using Blender, but also depth images with added noise. For this experiment, we select 150 frames from the whole sequence, mask out all dynamic objects in the accurate depth images, and accumulate background static points as the ground-truth static map. The original noisy depth images are used as the input for all methods." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 342, + 287, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 287, + 450 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 287, + 450 + ], + "type": "text", + "content": "Furthermore, we use the Newer College [45] dataset as the real-world dataset, which is collected using a 64-beam LiDAR. Compared with synthetic datasets, it contains more uncertainty from measurements and pose estimates. We select 1,300 frames from the courtyard part for testing and this data includes a few pedestrians as dynamic objects. This dataset offers point clouds obtained by a high-precision terrestrial laser scanner that can be directly utilized as ground truth to evaluate the mapping quality." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 450, + 287, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 450, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 46, + 450, + 287, + 498 + ], + "type": "text", + "content": "Metric and Baselines. We report the reconstruction accuracy, completeness, the Chamfer distance, and the F1-score. Further details on the computation of the metrics can be found in the supplement." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 498, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 287, + 581 + ], + "type": "text", + "content": "We compare our method with several different types of state-of-the-art methods: (i) the traditional TSDF-fusion method, VDBfusion [59], which uses space carving to eliminate the effects of dynamic objects, (ii) the data-driven-based method, neural kernel surface reconstruction (NKSR) [18], and (iii) the neural representation based 3D mapping approach, SHINE-mapping [73]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 582, + 287, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 641 + ], + "type": "text", + "content": "For NKSR [18], we use the default parameters provided by Huang et al. with their official implementation. To ensure a fair comparison with SHINE-mapping, we adopt an equal number of free space samples (15 samples), aligning with our method for consistency." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "content": "For the ToyCar3 dataset, we set VDB-Fusion's resolution to " + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "inline_equation", + "content": "1\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "content": ". To have all methods with a similar memory consumption, we set the resolution of SHINE-mapping's leaf feature voxel to " + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "inline_equation", + "content": "2\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "content": ", and our method's highest resolution accordingly to " + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "inline_equation", + "content": "2\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "content": ". For the Newer College dataset, we set the resolution to " + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "inline_equation", + "content": "10\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "inline_equation", + "content": "30\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "inline_equation", + "content": "30\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "content": " respectively." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 313, + 91, + 531, + 224 + ], + "blocks": [ + { + "bbox": [ + 347, + 71, + 504, + 82 + ], + "lines": [ + { + "bbox": [ + 347, + 71, + 504, + 82 + ], + "spans": [ + { + "bbox": [ + 347, + 71, + 504, + 82 + ], + "type": "text", + "content": "Table 1. Hyperparameters of our approach." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 313, + 91, + 531, + 224 + ], + "lines": [ + { + "bbox": [ + 313, + 91, + 531, + 224 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 531, + 224 + ], + "type": "table", + "html": "
ParameterValueDescription
L2number of feature voxels level
D8The length of feature vectors
K32The number of basis functions
Dmlp2 × 64layer and size of the MLP decoder
Ms5The number of surface area samples
Mf15The number of free space samples
λe0.02weight for Eikonal loss
λf0.25weight for free space loss
λc0.2weight for certain free loss
", + "image_path": "596ccf01a51729c772c1826d526ca989d41bd6a821f383afd43547c4318d88e8.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 309, + 285, + 537, + 360 + ], + "blocks": [ + { + "bbox": [ + 305, + 232, + 545, + 277 + ], + "lines": [ + { + "bbox": [ + 305, + 232, + 545, + 277 + ], + "spans": [ + { + "bbox": [ + 305, + 232, + 545, + 277 + ], + "type": "text", + "content": "Table 2. Quantitative results of the reconstruction quality on ToyCar3. We report the distance error metrics, namely completion, accuracy and Chamfer-L1 in cm. Additionally, we show the F-score in % with a 1 cm error threshold." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 285, + 537, + 360 + ], + "lines": [ + { + "bbox": [ + 309, + 285, + 537, + 360 + ], + "spans": [ + { + "bbox": [ + 309, + 285, + 537, + 360 + ], + "type": "table", + "html": "
MethodComp.↓Acc.↓C-L1↓F-score ↑
VDB-fusion [59]0.5740.4810.52897.95
NKSR [18]0.5262.8091.66789.54
SHINE-mapping [73]0.5830.6260.60598.01
Ours0.4380.4680.45298.35
", + "image_path": "23f1dce81bf8edb1e0a54c55a8f8f695944c27ba556e3173fe8b310d9583ff58.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 374, + 545, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 374, + 545, + 422 + ], + "spans": [ + { + "bbox": [ + 305, + 374, + 545, + 422 + ], + "type": "text", + "content": "Results. The quantitative results for synthetic dataset ToyCar3 and real-world dataset Newer College are presented in Tab. 2 and Tab. 3, respectively. We also show the extracted meshes from all methods in Fig. 5 and Fig. 6." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 423, + 546, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 423, + 546, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 423, + 546, + 662 + ], + "type": "text", + "content": "Our method outperforms the baselines in terms of Completeness and Chamfer distance for both datasets (cf. Fig. 5 and Fig. 6). Regarding the accuracy, SHINE-mapping and VDB-Fusion can filter part of high-frequency noise by fusion of multiple frames, resulting in better performance on noisy Newer College dataset. In comparison, our method considers every scan as accurate to store 4D information, which makes it more sensitive to measurement noise. On the ToyCar3 dataset, both our method and VDB-Fusion successfully eliminate all moving objects. However, on the Newer College dataset, VDB-Fusion incorrectly eliminates the static tree and parts of the ground, resulting in poor completeness shown in Tab. 3. SHINE-mapping eliminates dynamic pedestrians on the Newer College dataset but retains a portion of the dynamic point cloud on the ToyCar3 dataset, which has a larger proportion of dynamic objects, leading to poorer accuracy in Tab. 2. NKSR performs the worst accuracy because it is unable to eliminate dynamic objects, which means it's not suitable to apply NKSR in dynamic real-world scenes directly." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 670, + 471, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 471, + 684 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 471, + 684 + ], + "type": "text", + "content": "4.2. Dynamic Object Segmentation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "Datasets. For dynamic object segmentation, we use the KTH-Dynamic-Benchmark [72] for evaluation, which in-" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15422" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 142, + 144 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 142, + 144 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 142, + 144 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 142, + 144 + ], + "type": "image", + "image_path": "3bf71086da1a1b85b773f0e5e204f5d7b91b2eca6c3bf9a953733509291ee25a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 64, + 145, + 130, + 154 + ], + "lines": [ + { + "bbox": [ + 64, + 145, + 130, + 154 + ], + "spans": [ + { + "bbox": [ + 64, + 145, + 130, + 154 + ], + "type": "text", + "content": "(a) Merged input scans" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 147, + 70, + 242, + 144 + ], + "blocks": [ + { + "bbox": [ + 147, + 70, + 242, + 144 + ], + "lines": [ + { + "bbox": [ + 147, + 70, + 242, + 144 + ], + "spans": [ + { + "bbox": [ + 147, + 70, + 242, + 144 + ], + "type": "image", + "image_path": "1c779af19739d9302162ada79cefa43d162815e520617177d0d1635c4de1fc86.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 184, + 145, + 209, + 154 + ], + "lines": [ + { + "bbox": [ + 184, + 145, + 209, + 154 + ], + "spans": [ + { + "bbox": [ + 184, + 145, + 209, + 154 + ], + "type": "text", + "content": "(b) Ours" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 246, + 70, + 341, + 144 + ], + "blocks": [ + { + "bbox": [ + 246, + 70, + 341, + 144 + ], + "lines": [ + { + "bbox": [ + 246, + 70, + 341, + 144 + ], + "spans": [ + { + "bbox": [ + 246, + 70, + 341, + 144 + ], + "type": "image", + "image_path": "0de7f7c019dd294888f49a44810c6e34c566c7561b3cc26592fa09f737d4ddb9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 266, + 145, + 326, + 154 + ], + "lines": [ + { + "bbox": [ + 266, + 145, + 326, + 154 + ], + "spans": [ + { + "bbox": [ + 266, + 145, + 326, + 154 + ], + "type": "text", + "content": "(c) VDB-Fusion [59]" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 345, + 70, + 441, + 144 + ], + "blocks": [ + { + "bbox": [ + 345, + 70, + 441, + 144 + ], + "lines": [ + { + "bbox": [ + 345, + 70, + 441, + 144 + ], + "spans": [ + { + "bbox": [ + 345, + 70, + 441, + 144 + ], + "type": "image", + "image_path": "b1d5c0cea9788d0c498e0c84bb6d7636ad5325398ea9d49b8206dbd4d65beb08.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 373, + 145, + 416, + 154 + ], + "lines": [ + { + "bbox": [ + 373, + 145, + 416, + 154 + ], + "spans": [ + { + "bbox": [ + 373, + 145, + 416, + 154 + ], + "type": "text", + "content": "(d) NKSR [18]" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 444, + 70, + 539, + 144 + ], + "blocks": [ + { + "bbox": [ + 444, + 70, + 539, + 144 + ], + "lines": [ + { + "bbox": [ + 444, + 70, + 539, + 144 + ], + "spans": [ + { + "bbox": [ + 444, + 70, + 539, + 144 + ], + "type": "image", + "image_path": "c267728fff044577e6303ec989e5b017853a7e9bb385809da6d3fea8c2130214.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 458, + 145, + 529, + 155 + ], + "lines": [ + { + "bbox": [ + 458, + 145, + 529, + 155 + ], + "spans": [ + { + "bbox": [ + 458, + 145, + 529, + 155 + ], + "type": "text", + "content": "(e) SHINE-mapping [73]" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 51, + 189, + 144, + 313 + ], + "blocks": [ + { + "bbox": [ + 46, + 159, + 547, + 182 + ], + "lines": [ + { + "bbox": [ + 46, + 159, + 547, + 182 + ], + "spans": [ + { + "bbox": [ + 46, + 159, + 547, + 182 + ], + "type": "text", + "content": "Figure 5. A comparison of the static mapping results of different methods on the ToyCar3 dataset. There are two dynamic toy cars moving through the scene. Our method can reconstruct the static scene with fine details and eliminate the dynamic car." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 189, + 144, + 313 + ], + "lines": [ + { + "bbox": [ + 51, + 189, + 144, + 313 + ], + "spans": [ + { + "bbox": [ + 51, + 189, + 144, + 313 + ], + "type": "image", + "image_path": "76977bb0fe97692b3d565c308af89e8a8a2dca663c7ac697a2d7b4b835810906.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 64, + 319, + 130, + 327 + ], + "lines": [ + { + "bbox": [ + 64, + 319, + 130, + 327 + ], + "spans": [ + { + "bbox": [ + 64, + 319, + 130, + 327 + ], + "type": "text", + "content": "(a) Merged input scans" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 149, + 189, + 244, + 315 + ], + "blocks": [ + { + "bbox": [ + 149, + 189, + 244, + 315 + ], + "lines": [ + { + "bbox": [ + 149, + 189, + 244, + 315 + ], + "spans": [ + { + "bbox": [ + 149, + 189, + 244, + 315 + ], + "type": "image", + "image_path": "91f788a922f82e91fbc40e959d3ee2609248ef5a463a558f2224577ead6d5732.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 184, + 318, + 209, + 327 + ], + "lines": [ + { + "bbox": [ + 184, + 318, + 209, + 327 + ], + "spans": [ + { + "bbox": [ + 184, + 318, + 209, + 327 + ], + "type": "text", + "content": "(b) Ours" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 332, + 546, + 365 + ], + "lines": [ + { + "bbox": [ + 46, + 332, + 546, + 365 + ], + "spans": [ + { + "bbox": [ + 46, + 332, + 546, + 365 + ], + "type": "text", + "content": "Figure 6. A comparison of the static mapping results of different methods on the Newer College dataset. Several pedestrians are moving through the scene during the data collection. Our method can reconstruct the static scene completely and eliminate the moving pedestrians. Although VDB-Fusion manages to eliminate the pedestrians, it incorrectly removes the tree highlighted in the orange box." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 248, + 189, + 342, + 314 + ], + "blocks": [ + { + "bbox": [ + 248, + 189, + 342, + 314 + ], + "lines": [ + { + "bbox": [ + 248, + 189, + 342, + 314 + ], + "spans": [ + { + "bbox": [ + 248, + 189, + 342, + 314 + ], + "type": "image", + "image_path": "953a87e2edc51b31a2d84fe969341705b56b354bc4b4ccb211b5a218e66620cd.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 265, + 318, + 326, + 327 + ], + "lines": [ + { + "bbox": [ + 265, + 318, + 326, + 327 + ], + "spans": [ + { + "bbox": [ + 265, + 318, + 326, + 327 + ], + "type": "text", + "content": "(c) VDB-Fusion [59]" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 347, + 189, + 442, + 315 + ], + "blocks": [ + { + "bbox": [ + 347, + 189, + 442, + 315 + ], + "lines": [ + { + "bbox": [ + 347, + 189, + 442, + 315 + ], + "spans": [ + { + "bbox": [ + 347, + 189, + 442, + 315 + ], + "type": "image", + "image_path": "ea17f4283b2069b34a1e533e4b94e9243b58afea03a4b50586f50adfb1dc82eb.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 373, + 318, + 416, + 327 + ], + "lines": [ + { + "bbox": [ + 373, + 318, + 416, + 327 + ], + "spans": [ + { + "bbox": [ + 373, + 318, + 416, + 327 + ], + "type": "text", + "content": "(d) NKSR [18]" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 447, + 193, + 541, + 314 + ], + "blocks": [ + { + "bbox": [ + 447, + 193, + 541, + 314 + ], + "lines": [ + { + "bbox": [ + 447, + 193, + 541, + 314 + ], + "spans": [ + { + "bbox": [ + 447, + 193, + 541, + 314 + ], + "type": "image", + "image_path": "72e778c05a486bee3df7a6946756520826d3e8ee21cb3054ceecbcc9fa5e8f74.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 457, + 318, + 529, + 327 + ], + "lines": [ + { + "bbox": [ + 457, + 318, + 529, + 327 + ], + "spans": [ + { + "bbox": [ + 457, + 318, + 529, + 327 + ], + "type": "text", + "content": "(e) SHINE-mapping [73]" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "table", + "bbox": [ + 50, + 430, + 276, + 504 + ], + "blocks": [ + { + "bbox": [ + 46, + 377, + 287, + 421 + ], + "lines": [ + { + "bbox": [ + 46, + 377, + 287, + 421 + ], + "spans": [ + { + "bbox": [ + 46, + 377, + 287, + 421 + ], + "type": "text", + "content": "Table 3. Quantitative results of the reconstruction quality on Newer College. We report the distance error metrics, namely completion, accuracy and Chamfer-L1 in cm. Additionally, we show the F-score in % with a " + }, + { + "bbox": [ + 46, + 377, + 287, + 421 + ], + "type": "inline_equation", + "content": "20\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 377, + 287, + 421 + ], + "type": "text", + "content": " error threshold." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 430, + 276, + 504 + ], + "lines": [ + { + "bbox": [ + 50, + 430, + 276, + 504 + ], + "spans": [ + { + "bbox": [ + 50, + 430, + 276, + 504 + ], + "type": "table", + "html": "
MethodComp.↓Acc.↓C-L1↓F-score ↑
VDB-fusion [59]7.325.996.6596.68
NKSR [18]6.879.288.0895.65
SHINE-mapping [73]6.805.866.3397.67
Ours5.856.496.1797.50
", + "image_path": "30d7724ff38e86bdca4ad571a8abc17e7eef7171dca75f4a02d26a3ead4e5c7f.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "table_body" + } + ], + "index": 23 + }, + { + "bbox": [ + 46, + 519, + 286, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 519, + 286, + 651 + ], + "spans": [ + { + "bbox": [ + 46, + 519, + 286, + 651 + ], + "type": "text", + "content": "cludes four sequences in total: sequence 00 (frame 4,390 - 4,530) and sequence 05 (frame 2,350 - 2,670) from the KITTI dataset [3, 14], which are captured by a 64-beam LiDAR, one sequence from the Argoverse2 dataset [66] consisting of 575 frames captured by two 32-beam LiDARs, and a semi-indoor sequence captured by a sparser 16-beam LiDAR. All sequences come with corresponding pose files and point-wise dynamic or static labels as the ground truth. It is worth noting that the poses for KITTI 00 and 05 were obtained from SuMa [2] and the pose files for the Semi-indoor sequence come from NDT-SLAM [50]." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 46, + 653, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 286, + 712 + ], + "type": "text", + "content": "Metric and Baselines. The KTH-Dynamic-Benchmark evaluates the performance of the method by measuring the classification accuracy of dynamic points (DA%), static points (SA%) and also their associated accuracy (AA%) where " + }, + { + "bbox": [ + 46, + 653, + 286, + 712 + ], + "type": "inline_equation", + "content": "AA = \\sqrt{DA \\cdot SA}" + }, + { + "bbox": [ + 46, + 653, + 286, + 712 + ], + "type": "text", + "content": ". The benchmark provides various" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 379, + 545, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 379, + 545, + 558 + ], + "spans": [ + { + "bbox": [ + 304, + 379, + 545, + 558 + ], + "type": "text", + "content": "bases such as the state-of-the-art LiDAR dynamic object removal methods – Erasor [27] and Removert [21], as well as the traditional 3D mapping method, Octomap [17, 69], and its modified versions, Octomap with ground fitting and outlier filtering. As SHINE-mapping demonstrates the ability to remove dynamic objects in our static mapping experiments, we also report its result in this benchmark. Additionally, we report the performance of the state-of-the-art online moving object segmentation methods, 4DMOS [31] and its extension MapMOS [32]. As these two methods utilize KITTI sequences 00 and 05 for training, we only show the results of the remaining two sequences. For the parameter setting, we set our method's leaf resolution to " + }, + { + "bbox": [ + 304, + 379, + 545, + 558 + ], + "type": "inline_equation", + "content": "0.3\\mathrm{m}" + }, + { + "bbox": [ + 304, + 379, + 545, + 558 + ], + "type": "text", + "content": ", and the threshold for segmentation as " + }, + { + "bbox": [ + 304, + 379, + 545, + 558 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{static}} = 0.16\\mathrm{m}" + }, + { + "bbox": [ + 304, + 379, + 545, + 558 + ], + "type": "text", + "content": ". We set the leaf resolution for Octomap to " + }, + { + "bbox": [ + 304, + 379, + 545, + 558 + ], + "type": "inline_equation", + "content": "0.1\\mathrm{m}" + }, + { + "bbox": [ + 304, + 379, + 545, + 558 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": "Results. The quantitative results of the dynamic object segmentation are shown in Tab. 4. And we depict the accumulated static points generated by different methods in Fig. 7. We can see that our method achieves the best associated accuracy (AA) in three autonomous driving sequences (KITTI 00, KITTI 05, Argoverse2) and vastly outperforms baselines. The supervised learning-based methods 4DMOS and MapMOS do not obtain good dynamic accuracy (DA) due to limited generalizability. Erasor and Octomap tend to over-segment dynamic objects, resulting in poor static accuracy (SA). Removert and SHINE-mapping are too conservative and cannot detect all dynamic objects. Benefiting" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15423" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 101, + 541, + 235 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 545, + 94 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 545, + 94 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 545, + 94 + ], + "type": "text", + "content": "Table 4. Quantitative results of the dynamic object removal quality on the KTH-Dynamic-Benchmark. We report the static accuracy SA, dynamic static DA and the associated accuracy AA. Octomap* refers to the modified Octomap implementation by Zhang et al. [72]." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 101, + 541, + 235 + ], + "lines": [ + { + "bbox": [ + 47, + 101, + 541, + 235 + ], + "spans": [ + { + "bbox": [ + 47, + 101, + 541, + 235 + ], + "type": "table", + "html": "
MethodKITTI Seq. 00KITTI Seq. 05Argoverse2Semi-Indoor
SADAAASADAAASADAAASADAAA
Octomap [17]68.0599.6982.3766.2899.2481.1065.9196.7079.8488.9782.1885.51
Octomap* [72]93.0698.6795.8393.5492.4893.0182.6682.4482.5596.7973.5084.34
Removert [21]99.4441.5364.2699.4222.2847.0698.9731.1655.5399.9612.1534.85
Erasor [27]66.7098.5481.0769.4099.0682.9277.5199.1887.6894.9066.2679.30
SHINE [73]98.9992.3795.6398.9153.2772.5897.6672.6284.2198.8859.1976.51
4DMOS [31]------99.9469.3383.2499.9910.6032.55
MapMOS [32]------99.9685.8892.6599.994.7521.80
Ours99.4698.4798.9799.5498.3698.9599.1795.9197.5394.1772.7982.79
", + "image_path": "ae778bfdf95f37252f4f12a572df121cde330e8eaad9de9c6b89eca3a7e7b8f0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 50, + 242, + 147, + 335 + ], + "blocks": [ + { + "bbox": [ + 50, + 242, + 147, + 335 + ], + "lines": [ + { + "bbox": [ + 50, + 242, + 147, + 335 + ], + "spans": [ + { + "bbox": [ + 50, + 242, + 147, + 335 + ], + "type": "image", + "image_path": "0d4071b0915fae6a9433525e2edf75f04011427bea7e98c0651858af1529c7aa.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 149, + 242, + 246, + 335 + ], + "blocks": [ + { + "bbox": [ + 149, + 242, + 246, + 335 + ], + "lines": [ + { + "bbox": [ + 149, + 242, + 246, + 335 + ], + "spans": [ + { + "bbox": [ + 149, + 242, + 246, + 335 + ], + "type": "image", + "image_path": "2f552eab73051f3695c1c0c715dd5528566af4d6d212b402992088910f586f48.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 248, + 242, + 345, + 335 + ], + "blocks": [ + { + "bbox": [ + 248, + 242, + 345, + 335 + ], + "lines": [ + { + "bbox": [ + 248, + 242, + 345, + 335 + ], + "spans": [ + { + "bbox": [ + 248, + 242, + 345, + 335 + ], + "type": "image", + "image_path": "38d56d63475f86fc99982597abe589fa53938c1bd40edd2b5bd8216e4cefb41a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 347, + 242, + 444, + 335 + ], + "blocks": [ + { + "bbox": [ + 347, + 242, + 444, + 335 + ], + "lines": [ + { + "bbox": [ + 347, + 242, + 444, + 335 + ], + "spans": [ + { + "bbox": [ + 347, + 242, + 444, + 335 + ], + "type": "image", + "image_path": "e787023611604dd5b37c4ec3cbb1ef0fb010461fd659e1e768439ddac27eb2a9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 445, + 242, + 543, + 335 + ], + "blocks": [ + { + "bbox": [ + 445, + 242, + 543, + 335 + ], + "lines": [ + { + "bbox": [ + 445, + 242, + 543, + 335 + ], + "spans": [ + { + "bbox": [ + 445, + 242, + 543, + 335 + ], + "type": "image", + "image_path": "54f569bf883300115200d5a4baecd3327c09bc8e5d043ed2319c01ec309113e2.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 50, + 347, + 146, + 439 + ], + "blocks": [ + { + "bbox": [ + 50, + 347, + 146, + 439 + ], + "lines": [ + { + "bbox": [ + 50, + 347, + 146, + 439 + ], + "spans": [ + { + "bbox": [ + 50, + 347, + 146, + 439 + ], + "type": "image", + "image_path": "923ca0b2ac3e5ccc602f8336c0af5359b2e4d3620d7455893ff41f150ccf460f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 442, + 545, + 498 + ], + "lines": [ + { + "bbox": [ + 46, + 442, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 46, + 442, + 545, + 498 + ], + "type": "text", + "content": "Figure 7. Comparison of dynamic object removal results produced by our proposed method and three baseline methods on the Argoverse2 data sequence of the KTH-benchmark. We show the bird's eye view on the first row and the zoomed view from the blue frustum shown in (a) on the second row. For the ground truth results in (a), the dynamic objects are shown in red. We only show the static points of ground truth for clearer comparison in zoomed view (f). We highlight the over-segmented parking car and sign by Erasor and the undetected moving vehicle by Removert." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 148, + 347, + 245, + 439 + ], + "blocks": [ + { + "bbox": [ + 148, + 347, + 245, + 439 + ], + "lines": [ + { + "bbox": [ + 148, + 347, + 245, + 439 + ], + "spans": [ + { + "bbox": [ + 148, + 347, + 245, + 439 + ], + "type": "image", + "image_path": "ce1802da6bd36c4e27dabd16dd9cd3a1ef92f182496dffc1fc742e308e704bea.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 247, + 347, + 343, + 439 + ], + "blocks": [ + { + "bbox": [ + 247, + 347, + 343, + 439 + ], + "lines": [ + { + "bbox": [ + 247, + 347, + 343, + 439 + ], + "spans": [ + { + "bbox": [ + 247, + 347, + 343, + 439 + ], + "type": "image", + "image_path": "fd50e0b220aaf97d5a0ae1d9ab88fef81a6232dd4a69e3a3a6d20b60585a8cd7.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 346, + 347, + 442, + 440 + ], + "blocks": [ + { + "bbox": [ + 346, + 347, + 442, + 440 + ], + "lines": [ + { + "bbox": [ + 346, + 347, + 442, + 440 + ], + "spans": [ + { + "bbox": [ + 346, + 347, + 442, + 440 + ], + "type": "image", + "image_path": "cda7541e3b937718bfaa2d93ff7156a58547e843f0ccd83fb491cbdec3354be0.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 444, + 347, + 542, + 440 + ], + "blocks": [ + { + "bbox": [ + 444, + 347, + 542, + 440 + ], + "lines": [ + { + "bbox": [ + 444, + 347, + 542, + 440 + ], + "spans": [ + { + "bbox": [ + 444, + 347, + 542, + 440 + ], + "type": "image", + "image_path": "d01e5ba088140fc580080acc67070bd61f654c0500118004a84a45b80490b448.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 511, + 287, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 511, + 287, + 547 + ], + "spans": [ + { + "bbox": [ + 46, + 511, + 287, + 547 + ], + "type": "text", + "content": "from the continuity and large capacity of the 4D neural representation, we strike a better balance between preserving static background points and removing dynamic objects." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 548, + 287, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 548, + 287, + 595 + ], + "spans": [ + { + "bbox": [ + 46, + 548, + 287, + 595 + ], + "type": "text", + "content": "It is worth mentioning again that our method does not rely on any pre-processing or post-processing algorithm such as ground fitting, outlier filtering, and clustering, but also does not require labels for training." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 609, + 119, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 609, + 119, + 621 + ], + "spans": [ + { + "bbox": [ + 47, + 609, + 119, + 621 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": "In this paper, we propose a 4D implicit neural map representation for dynamic scenes that allows us to represent the TSDF of static and dynamic parts of a scene. For this purpose, we use a hierarchical voxel-based feature representation that is then decoded into weights for basis functions to represent a time-varying TSDF that can be queried at arbitrary locations. For learning the representation from a se" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 511, + 545, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 511, + 545, + 607 + ], + "spans": [ + { + "bbox": [ + 304, + 511, + 545, + 607 + ], + "type": "text", + "content": "quence of LiDAR scans, we design an effective data sampling strategy and loss functions. Equipped with our proposed representation, we experimentally show that we are able to tackle the challenging problems of static mapping and dynamic object segmentation. More specifically, our experiments show that our method has the ability to accurately reconstruct 3D maps of the static parts of a scene and can completely remove moving objects at the same time." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 612, + 545, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 612, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 304, + 612, + 545, + 685 + ], + "type": "text", + "content": "Limitations. While our method achieves compelling results, we have to acknowledge that we currently rely on estimated poses by a separate SLAM approach, but also cannot apply our approach in an online fashion. However, we see this as an avenue for future research into joint incremental mapping and pose estimation." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "Acknowledgements. We thank Benedikt Mersch for the fruitful discussion and for providing experiment baselines." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15424" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "text", + "content": "[1] Ioan A. Barsan, Peidong Liu, Marc Pollefeys, and Andreas Geiger. Robust Dense Mapping for Large-Scale Dynamic Environments. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 135, + 287, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 287, + 168 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 287, + 168 + ], + "type": "text", + "content": "[2] Jens Behley and Cyril Stachniss. Efficient Surfel-Based SLAM using 3D Laser Range Data in Urban Environments. In Proc. of Robotics: Science and Systems (RSS), 2018. 3, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 168, + 288, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 168, + 288, + 224 + ], + "spans": [ + { + "bbox": [ + 53, + 168, + 288, + 224 + ], + "type": "text", + "content": "[3] Jens Behley, Martin Garbade, Aandres Milioto, Jan Quenzel, Sven Behnke, Cyril Stachniss, and Juergen Gall. SemanticKITTI: A Dataset for Semantic Scene Understanding of LiDAR Sequences. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2019. 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 224, + 288, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 224, + 288, + 257 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 288, + 257 + ], + "type": "text", + "content": "[4] Peter Biber and Tom Duckett. Dynamic Maps for Long-Term Operation of Mobile Service Robots. In Proc. of Robotics: Science and Systems (RSS), 2005. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 258, + 288, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 288, + 323 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 288, + 323 + ], + "type": "text", + "content": "[5] Cesar Cadena, Luca Carlone, Henry Carrillo, Yasir Latif, Davide Scaramuzza, Jose Neira, Ian Reid, and John J. Leonard. Past, Present, and Future of Simultaneous Localization And Mapping: Towards the Robust-Perception Age. IEEE Trans. on Robotics (TRO), 32(6):1309-1332, 2016. 1, 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 324, + 288, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 324, + 288, + 378 + ], + "spans": [ + { + "bbox": [ + 53, + 324, + 288, + 378 + ], + "type": "text", + "content": "[6] Hongrui Cai, Wanquan Feng, Xuetao Feng, Yan Wang, and Juyong Zhang. Neural surface reconstruction of dynamic scenes with monocular rgb-d camera. In Proc. of the Conf. on Neural Information Processing Systems (NeurIPS), 2022. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 380, + 288, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 380, + 288, + 413 + ], + "spans": [ + { + "bbox": [ + 53, + 380, + 288, + 413 + ], + "type": "text", + "content": "[7] Ang Cao and Justin Johnson. HexPlane: A Fast Representation for Dynamic Scenes. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 414, + 288, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 414, + 288, + 468 + ], + "spans": [ + { + "bbox": [ + 53, + 414, + 288, + 468 + ], + "type": "text", + "content": "[8] Xieyuanli Chen, Shijie Li, Benedikt Mersch, Louis Wiesmann, Juergen Gall, Jens Behley, and Cyril Stachniss. Moving Object Segmentation in 3D LiDAR Data: A Learning-based Approach Exploiting Sequential Data. IEEE Robotics and Automation Letters (RA-L), 6(4):6529-6536, 2021. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 469, + 288, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 469, + 288, + 524 + ], + "spans": [ + { + "bbox": [ + 53, + 469, + 288, + 524 + ], + "type": "text", + "content": "[9] Xieyuanli Chen, Benedikt Mersch, Lucas Nunes, Rodrigo Marcuzzi, Ignacio Vizzo, Jens Behley, and Cyril Stachniss. Automatic Labeling to Generate Training Data for Online LiDAR-Based Moving Object Segmentation. IEEE Robotics and Automation Letters (RA-L), 7(3):6107-6114, 2022. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 525, + 287, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 525, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 48, + 525, + 287, + 579 + ], + "type": "text", + "content": "[10] Xu Chen, Tianjian Jiang, Jie Song, Max Rietmann, Andreas Geiger, Michael J. Black, and Otmar Hilliges. Fast-snarf: A fast deformer for articulated neural fields. IEEE Trans. on Pattern Analysis and Machine Intelligence (TPAMI), 45(10): 11796-11809, 2023. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 580, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 580, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 580, + 287, + 624 + ], + "type": "text", + "content": "[11] Pierre Dellenbach, Jean-Emmanuel Deschaud, Bastien Jacquet, and Francois Goulette. CT-ICP Real-Time Elastic LiDAR Odometry with Loop Closure. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2022. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 624, + 287, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 658 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 658 + ], + "type": "text", + "content": "[12] Jean-Emmanuel Deschaud. IMLS-SLAM: scan-to-model matching based on 3D data. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 658, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 288, + 712 + ], + "type": "text", + "content": "[13] Sara Fridovich-Keil, Giacomo Meanti, Frederik R. Warburg, Benjamin Recht, and Angjoo Kanazawa. K-Planes: Explicit Radiance Fields in Space, Time, and Appearance. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 117 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 117 + ], + "type": "text", + "content": "[14] Andreas Geiger, Peter Lenz, and Raquel Urtasun. Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite. In Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2012. 5, 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 119, + 545, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 119, + 545, + 163 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 545, + 163 + ], + "type": "text", + "content": "[15] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit Geometric Regularization for Learning Shapes. In Proc. of the Intl. Conf. on Machine Learning (ICML), 2020. 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 165, + 547, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 165, + 547, + 209 + ], + "spans": [ + { + "bbox": [ + 307, + 165, + 547, + 209 + ], + "type": "text", + "content": "[16] Dirk Hähnel, Dirk Schulz, and Wolfram Burgard. Mobile robot mapping in populated environments. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2002. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 211, + 545, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 211, + 545, + 255 + ], + "spans": [ + { + "bbox": [ + 307, + 211, + 545, + 255 + ], + "type": "text", + "content": "[17] Armin Hornung, Kai M. Wurm, Maren Bennewitz, Cyril Stachniss, and Wolfram Burgard. OctoMap: An Efficient Probabilistic 3D Mapping Framework Based on Octrees. Autonomous Robots, 34(3):189-206, 2013. 1, 2, 7, 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 257, + 545, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 257, + 545, + 301 + ], + "spans": [ + { + "bbox": [ + 307, + 257, + 545, + 301 + ], + "type": "text", + "content": "[18] Jiahui Huang, Zan Gojcic, Matan Atzmon, Or Litany, Sanja Fidler, and Francis Williams. Neural Kernel Surface Reconstruction. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 6, 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 303, + 545, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 303, + 545, + 346 + ], + "spans": [ + { + "bbox": [ + 307, + 303, + 545, + 346 + ], + "type": "text", + "content": "[19] Shengyu Huang, Zan Gojcic, Jiahui Huang, Andreas Wieser, and Konrad Schindler. Dynamic 3D Scene Analysis by Point Cloud Accumulation. In Proc. of the Europ. Conf. on Computer Vision (ECCV), 2022. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 349, + 547, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 349, + 547, + 404 + ], + "spans": [ + { + "bbox": [ + 307, + 349, + 547, + 404 + ], + "type": "text", + "content": "[20] Shengyu Huang, Zan Gojcic, Zian Wang, Francis Williams, Yoni Kasten, Sanja Fidler, Konrad Schindler, and Or Litany. Neural LiDAR Fields for Novel View Synthesis. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2023. 2, 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 406, + 545, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 406, + 545, + 450 + ], + "spans": [ + { + "bbox": [ + 307, + 406, + 545, + 450 + ], + "type": "text", + "content": "[21] Giseop Kim and Ayoung Kim. Remove, Then Revert: Static Point Cloud Map Construction Using Multiresolution Range Images. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020. 2, 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 452, + 545, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 452, + 545, + 496 + ], + "spans": [ + { + "bbox": [ + 307, + 452, + 545, + 496 + ], + "type": "text", + "content": "[22] Xin Kong, Shikun Liu, Marwan Taher, and Andrew J. Davison. vMAP: Vectorised Object Mapping for Neural Field SLAM. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 498, + 545, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 498, + 545, + 564 + ], + "spans": [ + { + "bbox": [ + 307, + 498, + 545, + 564 + ], + "type": "text", + "content": "[23] Abhijit Kundu, Kyle Genova, Xiaoqi Yin, Alireza Fathi, Caroline Pantofaru, Leonidas Guibas, Andrea Tagliasacchi, Frank Dellaert, and Thomas Funkhouser. Panoptic neural fields: A semantic object-aware neural scene representation. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 566, + 545, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 566, + 545, + 610 + ], + "spans": [ + { + "bbox": [ + 307, + 566, + 545, + 610 + ], + "type": "text", + "content": "[24] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 612, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 612, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 307, + 612, + 545, + 667 + ], + "type": "text", + "content": "[25] Zhaoshuo Li, Thomas Müller, Alex Evans, Russell H Taylor, Mathias Unberath, Ming-Yu Liu, and Chen-Hsuan Lin. Neuralangelo: High-fidelity neural surface reconstruction. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 5" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 669, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 669, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 669, + 545, + 713 + ], + "type": "text", + "content": "[26] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. DynIBaR: Neural Dynamic Image-Based Rendering. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15425" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[27] Hyungtae Lim, Sungwon Hwang, and Hyun Myung. ERASOR: Egocentric Ratio of Pseudo Occupancy-Based Dynamic Object Removal for Static 3D Point Cloud Map Building. IEEE Robotics and Automation Letters (RA-L), 6(2): 2272-2279, 2021. 2, 7, 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 287, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 287, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 287, + 184 + ], + "type": "text", + "content": "[28] Hyungtae Lim, Lucas Nunes, Benedikt Mersch, Xieyuanli Chen, Jens Behley, and Cyril Stachniss. ERASOR2: Instance-Aware Robust 3D Mapping of the Static World in Dynamic Scenes. In Proc. of Robotics: Science and Systems (RSS), 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 186, + 287, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 287, + 229 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 287, + 229 + ], + "type": "text", + "content": "[29] William E. Lorensen and Harvey E. Cline. Marching Cubes: a High Resolution 3D Surface Construction Algorithm. In Proc. of the Intl. Conf. on Computer Graphics and Interactive Techniques (SIGGRAPH), 1987. 2, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 231, + 288, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 231, + 288, + 285 + ], + "spans": [ + { + "bbox": [ + 48, + 231, + 288, + 285 + ], + "type": "text", + "content": "[30] John McCormac, Ankur Handa, Aandrew J. Davison, and Stefan Leutenegger. SemanticFusion: Dense 3D Semantic Mapping with Convolutional Neural Networks. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2017. 1, 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 287, + 287, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 287, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 48, + 287, + 287, + 342 + ], + "type": "text", + "content": "[31] Benedikt Mersch, Xieyuanli Chen, Ignacio Vizzo, Lucas Nunes, Jens Behley, and Cyril Stachniss. Receding Moving Object Segmentation in 3D LiDAR Data Using Sparse 4D Convolutions. IEEE Robotics and Automation Letters (RA-L), 7(3):7503-7510, 2022. 2, 7, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 343, + 287, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 343, + 287, + 408 + ], + "spans": [ + { + "bbox": [ + 48, + 343, + 287, + 408 + ], + "type": "text", + "content": "[32] Benedikt Mersch, Tiziano Guadagnino, Xieyuanli Chen, Tiziano, Ignacio Vizzo, Jens Behley, and Cyril Stachniss. Building Volumetric Beliefs for Dynamic Environments Exploiting Map-Based Moving Object Segmentation. IEEE Robotics and Automation Letters (RA-L), 8(8):5180-5187, 2023. 2, 7, 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 411, + 287, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 411, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 48, + 411, + 287, + 464 + ], + "type": "text", + "content": "[33] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 467, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 467, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 467, + 287, + 510 + ], + "type": "text", + "content": "[34] Daniel Meyer-Delius, Maximilian Beinhofer, and Wolfram Burgard. Occupancy Grid Models for Robot Mapping in Changing Environments. In Proc. of the Conf. on Advancements of Artificial Intelligence (AAAI), 2012. 1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 512, + 287, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 512, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 512, + 287, + 567 + ], + "type": "text", + "content": "[35] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In Proc. of the Europ. Conf. on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 568, + 287, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 568, + 287, + 610 + ], + "spans": [ + { + "bbox": [ + 48, + 568, + 287, + 610 + ], + "type": "text", + "content": "[36] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. on Graphics, 41(4): 102:1-102:15, 2022. 2, 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 613, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 678 + ], + "type": "text", + "content": "[37] Richard A. Newcombe, Shahram Izadi, Otmar Hilliges, David Molyneaux, David Kim, Andrew J. Davison, Pushmeet Kohli, Jamie Shotton, Steve Hodges, and Andrew Fitzgibbon. KinectFusion: Real-Time Dense Surface Mapping and Tracking. In Proc. of the Intl. Symposium on Mixed and Augmented Reality (ISMAR), 2011. 1, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "text", + "content": "[38] Joseph Ortiz, Alexander Clegg, Jing Dong, Edgar Sucar, David Novotny, Michael Zollhoefer, and Mustafa Mukadam. isdf: Real-time neural signed distance fields for robot per" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 94 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 94 + ], + "type": "text", + "content": "ception. In Proc. of Robotics: Science and Systems (RSS), 2022. 5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 96, + 545, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 151 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 151 + ], + "type": "text", + "content": "[39] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguere, and Cyril Stachniss. ReFusion: 3D Reconstruction in Dynamic Environments for RGB-D Cameras Exploiting Residuals. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2019. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 152, + 545, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 207 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 207 + ], + "type": "text", + "content": "[40] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning Continuous Signed Distance Functions for Shape Representation. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 209, + 545, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 209, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 209, + 545, + 262 + ], + "type": "text", + "content": "[41] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable Neural Radiance Fields. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 264, + 545, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 264, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 308, + 264, + 545, + 330 + ], + "type": "text", + "content": "[42] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M. Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. ACM Trans. on Graphics (TOG), 40(6), 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 332, + 545, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 332, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 308, + 332, + 545, + 376 + ], + "type": "text", + "content": "[43] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 377, + 545, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 377, + 545, + 419 + ], + "spans": [ + { + "bbox": [ + 308, + 377, + 545, + 419 + ], + "type": "text", + "content": "[44] Sameera Ramasinghe, Violetta Shevchenko, Gil Avraham, and Anton Van Den Hengel. *Blirf: Band limited radiance fields for dynamic scene modeling.* arXiv preprint arXiv:2302.13543, 2023. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 422, + 545, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 422, + 545, + 476 + ], + "spans": [ + { + "bbox": [ + 308, + 422, + 545, + 476 + ], + "type": "text", + "content": "[45] Milad Ramezani, Yiduo Wang, Marco Camurri, David Wisth, Matias Mattamala, and Maurice Fallon. The Newer College Dataset: Handheld LiDAR, Inertial and Vision with Ground Truth. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 478, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 478, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 308, + 478, + 545, + 533 + ], + "type": "text", + "content": "[46] Konstantinos Rematas, Andrew Liu, Pratul P. Srinivasan, Jonathan T. Barron, Andrea Tagliasacchi, Thomas Funkhouser, and Vittorio Ferrari. Urban radiance fields. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 534, + 545, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 534, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 308, + 534, + 545, + 578 + ], + "type": "text", + "content": "[47] Martin Rünz and Lourdes Agapito. Co-Fusion: Real-Time Segmentation, Tracking and Fusion of Multiple Objects. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2017. 1, 2, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 579, + 545, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 579, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 308, + 579, + 545, + 633 + ], + "type": "text", + "content": "[48] Martin Runz, Maud Buffier, and Lourdes Agapito. MaskFusion: Real-Time Recognition, Tracking and Reconstruction of Multiple Moving Objects. In Proc. of the Intl. Symposium on Mixed and Augmented Reality (ISMAR), 2018. 1, 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 635, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 635, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 635, + 545, + 689 + ], + "type": "text", + "content": "[49] Jari Saarinen, Henrik Andreasson, and Achim Lilienthal. Independent Markov Chain Occupancy Grid Maps for Representation of Dynamic Environments. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2012. 1, 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "text", + "content": "[50] Jari P. Saarinen, Todor Stoyanov, Henrik Andreasson, and Achim J. Lilienthal. Fast 3D Mapping in Highly Dynamic" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "15426" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "text", + "content": "Environments Using Normal Distributions Transform Occupancy Maps. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2013. 1, 2, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 108, + 287, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 108, + 287, + 163 + ], + "spans": [ + { + "bbox": [ + 48, + 108, + 287, + 163 + ], + "type": "text", + "content": "[51] Renato F. Salas-Moreno, Richard A. Newcombe, Hauke Strasdat, Paul H. Kelly, and Andrew J. Davison. SLAM++: Simultaneous Localisation and Mapping at the Level of Objects. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2013. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 165, + 287, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 165, + 287, + 219 + ], + "spans": [ + { + "bbox": [ + 48, + 165, + 287, + 219 + ], + "type": "text", + "content": "[52] Ruizhi Shao, Zerong Zheng, Hanzhang Tu, Boning Liu, Hongwen Zhang, and Yebin Liu. Tensor4d: Efficient neural 4d decomposition for high-fidelity dynamic reconstruction and rendering. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 222, + 287, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 222, + 287, + 266 + ], + "spans": [ + { + "bbox": [ + 48, + 222, + 287, + 266 + ], + "type": "text", + "content": "[53] Chonghyuk Song, Gengshan Yang, Kangle Deng, Jun-Yan Zhu, and Deva Ramanan. Total-recon: Deformable scene reconstruction for embodied view synthesis. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2023. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 268, + 287, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 268, + 287, + 332 + ], + "spans": [ + { + "bbox": [ + 48, + 268, + 287, + 332 + ], + "type": "text", + "content": "[54] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. NeRF-Player: A Streamable Dynamic Scene Representation with Decomposed Neural Radiance Fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 335, + 287, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 335, + 287, + 379 + ], + "spans": [ + { + "bbox": [ + 48, + 335, + 287, + 379 + ], + "type": "text", + "content": "[55]Cyrill Stachniss and Wolfram Burgard. Mobile Robot Mapping and Localization in Non-Static Environments. In Proc. of the National Conf. on Artificial Intelligence (AAAI), 2005.1,2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 382, + 287, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 382, + 287, + 426 + ], + "spans": [ + { + "bbox": [ + 48, + 382, + 287, + 426 + ], + "type": "text", + "content": "[56]Cyrill Stachniss,John J.Leonard,and Sebastian Thrun. Springer Handbook of Robotics,2nd edition, chapter Chapt.46:Simultaneous Localization and Mapping. Springer Verlag,2016.1,2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 428, + 287, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 428, + 287, + 449 + ], + "spans": [ + { + "bbox": [ + 48, + 428, + 287, + 449 + ], + "type": "text", + "content": "[57] Sebastian Thrun, Wolfram Burgard, and Dieter Fox. Probabilistic Robotics. MIT Press, 2005. 1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 452, + 287, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 452, + 287, + 517 + ], + "spans": [ + { + "bbox": [ + 48, + 452, + 287, + 517 + ], + "type": "text", + "content": "[58] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 520, + 287, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 520, + 287, + 562 + ], + "spans": [ + { + "bbox": [ + 48, + 520, + 287, + 562 + ], + "type": "text", + "content": "[59] Ignacio Vizzo, Tiziano Guadagnino, Jens Behley, and Cyril Stachniss. VDBFusion: Flexible and Efficient TSDF Integration of Range Sensor Data. Sensors, 22(3):1296, 2022. 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 567, + 287, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 287, + 620 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 287, + 620 + ], + "type": "text", + "content": "[60] Ignacio Vizzo, Tiziano Guadagnino, Benedikt Mersch, Louis Wiesmann, Jens Behley, and Cyril Stachniss. KISS-ICP: In Defense of Point-to-Point ICP - Simple, Accurate, and Robust Registration If Done the Right Way. IEEE Robotics and Automation Letters (RA-L), 8(2):1029-1036, 2023. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 623, + 287, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 623, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 623, + 287, + 677 + ], + "type": "text", + "content": "[61] Aishan Walcott-Bryant, Michael Kaess, Hordur Johannsson, and John J. Leonard. Dynamic Pose Graph SLAM: Long-Term Mapping in Low Dynamic Environments. In Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2012. 1, 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "text", + "content": "[62] Chaoyang Wang, Ben Eckart, Simon Lucey, and Orazio Gallo. Neural trajectory fields for dynamic novel view synthesis. arXiv preprint arXiv:2105.05994, 2021. 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 663 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "text", + "content": "[63] Chung-Yi Weng, Brian Curless, Pratul P. Srinivasan, Jonathan T. Barron, and Ira Kemelmacher-Shlizerman. HumanNeRF: Free-Viewpoint Rendering of Moving People From Monocular Video. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 129, + 545, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 172 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 172 + ], + "type": "text", + "content": "[64] Thomas Whelan, Stefan Leutenegger, Renato F. Salas-Moreno, Ben Glocker, and Andrew J. Davison. ElasticFusion: Dense SLAM Without A Pose Graph. In Proc. of Robotics: Science and Systems (RSS), 2015. 1, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 175, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 228 + ], + "type": "text", + "content": "[65] Louis Wiesmann, Tiziano Guadagnino, Ignacio Vizzo, Nicky Zimmerman, Yue Pan, Haofei Kuang, Jens Behley, and Cyrill Stachniss. LocNDF: Neural Distance Field Mapping for Robot Localization. IEEE Robotics and Automation Letters (RA-L), 8(8):4999-5006, 2023. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 230, + 545, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 230, + 545, + 305 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 545, + 305 + ], + "type": "text", + "content": "[66] Benjamin Wilson, William Qi, Tanmay Agarwal, John Lambert, Jagjeet Singh, Siddhesh Khandelwal, Bowen Pan, Ratnesh Kumar, Andrew Hartnett, Jhony Kaesemodel Pontes, Deva Ramanan, Peter Carr, and James Hays. Argoverse 2: Next Generation Datasets for Self-driving Perception and Forecasting. In Proc. of the Conf. on Neural Information Processing Systems (NeurIPS), 2021. 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 308, + 545, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 308, + 545, + 339 + ], + "spans": [ + { + "bbox": [ + 307, + 308, + 545, + 339 + ], + "type": "text", + "content": "[67] Denis F. Wolf and Guarav S. Sukhatme. Mobile Robot Simultaneous Localization and Mapping in Dynamic Environments. Autonomous Robots, 19, 2005. 1, 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 342, + 545, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 342, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 307, + 342, + 545, + 396 + ], + "type": "text", + "content": "[68] Tianhao Wu, Fangcheng Zhong, Andrea Tagliasacchi, Forrester Cole, and Cengiz Oztireli. D" + }, + { + "bbox": [ + 307, + 342, + 545, + 396 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 307, + 342, + 545, + 396 + ], + "type": "text", + "content": "NeRF: Self-Supervised Decoupling of Dynamic and Static Objects from a Monocular Video. In Proc. of the Conf. on Neural Information Processing Systems (NeurIPS), 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 397, + 545, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 397, + 545, + 461 + ], + "spans": [ + { + "bbox": [ + 307, + 397, + 545, + 461 + ], + "type": "text", + "content": "[69] Kai M. Wurm, Armin Hornung, Maren Bennewitz, Cyril Stachniss, and Wolfram Burgard. OctoMap: A Probabilistic, Flexible, and Compact 3D Map Representation for Robotic Systems. In Workshop on Best Practice in 3D Perception and Modeling for Mobile Manipulation, IEEE Int. Conf. on Robotics & Automation (ICRA), 2010. 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 464, + 545, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 464, + 545, + 496 + ], + "spans": [ + { + "bbox": [ + 307, + 464, + 545, + 496 + ], + "type": "text", + "content": "[70] Dongyu Yan, Xiaoyang Lyu, Jieqi Shi, and Yi Lin. Efficient Implicit Neural Reconstruction Using LiDAR. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 498, + 545, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 498, + 545, + 552 + ], + "spans": [ + { + "bbox": [ + 307, + 498, + 545, + 552 + ], + "type": "text", + "content": "[71] Wentao Yuan, Zhaoyang Lv, Tanner Schmidt, and Steven Lovegrove. Star: Self-supervised tracking and reconstruction of rigid objects in motion with neural rendering. In Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 554, + 545, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 554, + 545, + 608 + ], + "spans": [ + { + "bbox": [ + 307, + 554, + 545, + 608 + ], + "type": "text", + "content": "[72] Qingwen Zhang, Daniel Duberg, Ruoyu Geng, Mingkai Jia, Lujia Wang, and Patric Jensfelt. A dynamic points removal benchmark in point cloud maps. In IEEE 26th International Conference on Intelligent Transportation Systems (ITSC), pages 608-614, 2023. 6, 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 609, + 545, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 609, + 545, + 663 + ], + "spans": [ + { + "bbox": [ + 307, + 609, + 545, + 663 + ], + "type": "text", + "content": "[73] Xingguang Zhong, Yue Pan, Jens Behley, and Cyril Stachniss. SHINE-Mapping: Large-Scale 3D Mapping Using Sparse Hierarchical Implicit Neural Representations. In Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023. 2, 6, 7, 8" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15427" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Multi-frame Fusion for Video Stabilization/475676e5-4dd7-4a8c-bd05-cc44ef21267a_content_list.json b/2024/3D Multi-frame Fusion for Video Stabilization/475676e5-4dd7-4a8c-bd05-cc44ef21267a_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..1166631675a364f0ac341ffc7b9bb80c47d3eef1 --- /dev/null +++ b/2024/3D Multi-frame Fusion for Video Stabilization/475676e5-4dd7-4a8c-bd05-cc44ef21267a_content_list.json @@ -0,0 +1,1712 @@ +[ + { + "type": "text", + "text": "3D Multi-frame Fusion for Video Stabilization", + "text_level": 1, + "bbox": [ + 250, + 130, + 718, + 150 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhan Peng Xinyi Ye Weiyue Zhao Tianqi Liu Huiqiang Sun Baopu Li Zhiguo Cao* School of AIA, Huazhong University of Science and Technology", + "bbox": [ + 114, + 180, + 843, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{peng_zhan,xinyiye,zhaoweiyue,tq_1iu,shq1031,zgcao}@hust.edu.cn", + "bbox": [ + 210, + 219, + 759, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "bpli.cuhk@gmail.com", + "bbox": [ + 398, + 237, + 570, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 289, + 313, + 305 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we present RStab, a novel framework for video stabilization that integrates 3D multi-frame fusion through volume rendering. Departing from conventional methods, we introduce a 3D multi-frame perspective to generate stabilized images, addressing the challenge of full-frame generation while preserving structure. The core of our RStab framework lies in Stabilized Rendering (SR), a volume rendering module, fusing multi-frame information in 3D space. Specifically, SR involves warping features and colors from multiple frames by projection, fusing them into descriptors to render the stabilized image. However, the precision of warped information depends on the projection accuracy, a factor significantly influenced by dynamic regions. In response, we introduce the Adaptive Ray Range (ARR) module to integrate depth priors, adaptively defining the sampling range for the projection process. Additionally, we propose Color Correction (CC) assisting geometric constraints with optical flow for accurate color aggregation. Thanks to the three modules, our RStab demonstrates superior performance compared with previous stabilizers in the field of view (FOV), image quality, and video stability across various datasets.", + "bbox": [ + 75, + 321, + 473, + 654 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 683, + 209, + 699 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "With the widespread adoption of smartphones, videos have become an important medium for documenting and sharing lives. The videos captured with handheld devices often suffer from annoying shakes. To mitigate this prevalent issue, numerous researchers devote efforts to developing video stabilization algorithms. These methods typically involve three steps: camera trajectory estimation, trajectory smoothing, and stabilized frame generation.", + "bbox": [ + 75, + 709, + 468, + 830 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To obtain a smooth image sequence, known as stabilized frames, early methods employ 2D-plane transformations (homography [20, 23], feature trajectories [9, 10, 21], mo", + "bbox": [ + 76, + 830, + 468, + 877 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1336f4a2b313cdd05a32d4207e5d13be2c05c1f5cfd168e15fa182be2445fa84.jpg", + "image_caption": [ + "Figure 1. Existing dilemmas and our method. (a) and (b) exhibit cropping issues, characteristic of single-frame methods. (a) and (c) encounter difficulties in preserving structure, inherent in 2D-based approaches. Fortunately, our proposed method (d) not only mitigates distortion and artifacts but also maintains no-cropping stabilized frames." + ], + "image_footnote": [], + "bbox": [ + 500, + 287, + 890, + 511 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "tion vectors [18]) on single frames. However, these methods suffer from two major problems. First, these single-frame approaches may produce notable missing regions at the boundary of generated stabilized images, requiring aggressive cropping to ensure a rectangular frame for video (cropping in Fig. 1(a)), further resulting in a substantial reduction in the field of view (FOV). Second, 2D transformations could give rise to structure distortion due to the lack of 3D physical information (shear in Fig. 1(a)).", + "bbox": [ + 496, + 638, + 893, + 775 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In pursuit of the stabilized full-frame, recent 2D methods [5, 24, 42] leverage nearby frames to fill in the unseen content within the target frame. However, due to the inherent absence of physical constraints in 2D transformations, 2D-based multiple-frame methods fail to preserve the structure, especially the parallax regions (Fig. 1(c)). To obtain the structure-preserved stabilized frame, some methods [11, 17, 19, 31] leverage 3D transformations to simulate", + "bbox": [ + 496, + 779, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author.", + "bbox": [ + 94, + 887, + 220, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "7507", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "real-world settings, employing camera poses and epipolar constraints to ensure the image structure. However, due to limited information from a single frame, they cannot generate a full frame, as shown in Fig. 1(b). In brief, the ongoing challenge of concurrently addressing full-frame generation while preserving structure for video stabilization remains a major concern for most current research works.", + "bbox": [ + 75, + 90, + 470, + 196 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To overcome the above problems, intuitively, employing multi-frame fusion with 3D transformations could offer a promising solution. However, two issues may still hinder 3D transformations from incorporating information from neighboring frames. First, since view changes induce geometric deformation, the incorporated information from nearby frames may be inconsistent, suggesting that image blending, e.g., averaging, may lead to distortion. Second, videos feature dynamic objects across frames, which cannot be adequately modeled by 3D constraints. The direct aggregation of information from nearby frames with 3D projection results in a noticeable blur (refer to the experiments).", + "bbox": [ + 75, + 199, + 468, + 381 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Motivated by the above insights and analysis, we propose a video stabilization framework termed RStab for integrating multi-frame fusion and 3D constraints to achieve full-frame generation and structure preservation. Specifically, we propose Stabilized Rendering (SR), a 3D multiframe fusion module using volume rendering. Instead of simple image blending, SR employs both color and feature space to fuse nearby information into spatial descriptors for the scene geometry, such as volume densities of spatial points. Visible points usually come with high volume densities, exhibiting consistent textures in their projections across frames. The observation suggests that points with higher consistency in aggregating information exhibit higher volume densities, implying a greater contribution to the final rendered color.", + "bbox": [ + 75, + 382, + 470, + 608 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To mitigate the impacts of dynamic regions, we propose Adaptive Ray Range (ARR) and Color Correction(CC) modules. The introduction of multi-frame depth priors in ARR constrains the sampling range for spatial points around the surface of objects. A narrow sampling range around the surface decreases the risk of projecting spatial points onto dynamic regions, thereby suppressing the inconsistent information aggregation induced by the dynamic objects. Despite ARR, colors are sensitive to projection inaccuracy, indicating a narrow range is insufficient. Hence, we design CC to refine the projection for color aggregation. The core of CC lies in assisting geometry constraints with optical flow, which matches pixels with similar textures containing the color information.", + "bbox": [ + 75, + 611, + 468, + 823 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By applying the three modules, RStab demonstrates the ability of full-frame generation with structure preservation (Fig. 1(d)) and outperforms all previous video stabilization algorithms in FOV, image quality, and video stability across various datasets. In summary, our key contributions", + "bbox": [ + 75, + 824, + 468, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "are as follows:", + "bbox": [ + 500, + 92, + 599, + 104 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We present a novel 3D multi-frame fusion framework for video stabilization to render full-frame stabilized images with structure preservation.", + "- We propose Stabilized Rendering, which fuses multiple frames in both color and feature space. We augment Stabilized Rendering with the introduction of the Adaptive Ray Range module and Color Correction module, enhancing its capacity to address dynamic regions.", + "- Our video stabilization framework, RStab, demonstrates state-of-the-art (SOTA) performance across various datasets." + ], + "bbox": [ + 513, + 108, + 890, + 272 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 286, + 640, + 301 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2D-based Video Stabilization. 2D video stabilization algorithms model camera trajectory and generate stabilized frames through transformations on a 2D plane, including homography [6, 7, 20, 23, 40], feature trajectories [9, 10, 21, 38], motion vectors [16, 18, 22, 36], and dense flow fields [4, 5, 24, 40, 41]. Early methods [6, 7] estimate global transformations, which proved inadequate for handling complex camera effects such as the parallax effect. Certain approaches estimate multiple local motions [20, 22] or pixel-wise warping field [36, 40, 41] for a single image, offering some relief for the challenges encountered by global transformation methods. However, due to the limited information from a single frame, these methods may result in missing content in the stabilized video. To address this, some methods [5, 24, 42] fuse information from multiple neighboring frames, enabling full-frame generation. Despite achieving a full frame, the 2D transformations lack real-world physical constraints, leading to challenges in preserving image structure.", + "bbox": [ + 496, + 311, + 890, + 598 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D-based Video Stabilization. 3D-based video stabilizers model 3D camera trajectory and stabilize frames with epipolar projection. Some methods [11, 17] rely on the video itself, warping images instructed by projection while preserving content. Others integrate specialized hardware, such as depth cameras [19], light field cameras [31], gyroscopes [30], and IMU sensors [12], to assist with scene geometry. Both kinds of stabilizers estimate the physical motion of the real world and introduce 3D constraints in warping, benefiting stability and structure preservation. However, relying on a single frame, 3D-based video stabilizers have a limited field of view. To mitigate the issue, in this paper, we extend single-frame to multi-frame in 3D space for video stabilization.", + "bbox": [ + 496, + 598, + 890, + 808 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural Rendering. As a significant work in view synthesis, NeRF[15] attains photorealistic synthesized images through implicit volumetric representation and volume rendering. It combines multi-view information, leveraging 3D geometric constraints and pixel-wise rendering to generate high-quality images without missing content from novel", + "bbox": [ + 496, + 810, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "7508", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ac1113c650ce96de13326eaa486ec6f11b1f3b840e219ef57435d9b28ce4d924.jpg", + "image_caption": [ + "Figure 2. Overview of our framework. (1) Given input frames $\\{\\mathbf{I}_t\\}_{t=1}^N$ with a shaky trajectory $\\{\\mathbf{P}_t\\}_{t=1}^N$ , our purpose lies in rendering stabilized video sequence $\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N$ with smoothed trajectory $\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N$ . Here, the input trajectories $\\{\\mathbf{P}_t\\}_{t=1}^N$ derive from preprocessing, while the smoothed trajectories $\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N$ are generated using a Trajectory Smoothing module. (2) In addition to $\\{\\mathbf{P}_t\\}_{t=1}^N$ , depth maps $\\{\\mathbf{D}_t\\}_{t=1}^N$ and optical flow $\\{\\mathbf{F}_t\\}_{t=1}^N$ can be obtained during preprocessing. We aggregate $\\{\\mathbf{D}_t\\}_{t=1}^N$ into the ray range $\\{\\tilde{\\mathbf{R}}_t\\}_{t=1}^N$ using the Adaptive Ray Range module. The ray range $\\{\\tilde{\\mathbf{R}}_t\\}_{t=1}^N$ , along with $\\{\\mathbf{F}_t\\}_{t=1}^N$ and the smoothed trajectory $\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N$ , serves as inputs to the Stabilized Rendering module. Conducting Stabilized Rendering, enhanced by the Color Correction module, we fuse the input frames $\\{\\mathbf{I}_t\\}_{t=1}^N$ and their features $\\{\\mathcal{F}_t\\}_{t=1}^N$ to render the stabilized video sequence $\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N$ ." + ], + "image_footnote": [], + "bbox": [ + 76, + 87, + 893, + 340 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "viewpoints. While NeRF-based methods [1, 2, 13, 25, 28] produce impressive synthesized image quality, its limitation in per-scene training hampers its direct application in video stabilization. Certain approaches [3, 15, 32, 34, 35, 37] strive to improve the generalization of NeRF, but they are not inherently well-suited for video stabilization tasks. Some recent methods [14, 26] attempt to apply techniques in NeRF to stabilize videos, these approaches inherit the limitations of the vanilla NeRF, necessitating retraining for each specific scene. Inspired by generalized rendering technologies from IBRNet[34] and ENeRF[15], which utilize multi-view images and associated features to predict radiance fields, we further propose the Stabilized Rendering. Stabilized Rendering, enhanced by the proposed Adaptive Ray Range module and Color Correction module, extends the volume rendering technique to video stabilization.", + "bbox": [ + 75, + 478, + 472, + 720 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 736, + 168, + 750 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our pipeline is shown in Fig. 2. Given a shaky frame sequence $\\{\\mathbf{I}_t\\}_{t=1}^N$ of length $N$ , our objective is to generate a stabilized sequence $\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N$ . For preprocessing of $\\{\\mathbf{I}_t\\}_{t=1}^N$ , we estimate optical flow $\\{\\mathbf{F}_t\\}_{t=1}^N$ , depth maps $\\{\\mathbf{D}_t\\}_{t=1}^N$ , and camera trajectory $\\{\\mathbf{P}_t\\}_{t=1}^N$ . With $\\{\\mathbf{D}_t\\}_{t=1}^N$ , $\\{\\mathbf{P}_t\\}_{t=1}^N$ and smoothed camera trajectory $\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N$ as input, the Adaptive Ray Range module aggregates multi-view depth maps into the ray ranges $\\{\\tilde{\\mathbf{R}}_t\\}_{t=1}^N$ . Guided by the", + "bbox": [ + 76, + 762, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ranges, Stabilized Rendering enhanced by the Color Correction module generates stabilized video sequence $\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N$ through fusing the input frames $\\{\\mathbf{I}_t\\}_{t=1}^N$ and feature maps $\\{\\mathcal{F}_t\\}_{t=1}^N$ obtained through feature extraction network.", + "bbox": [ + 498, + 477, + 890, + 547 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We start with preprocessing a sequence of input frames $\\{\\mathbf{I}_t\\}_{t=1}^N$ to estimate associated depth maps $\\{\\mathbf{D}_t\\}_{t=1}^N$ and camera trajectory $\\{\\mathbf{P}_t\\}_{t=1}^N$ . These depth maps and camera poses are employed for camera trajectory smoothing. In our pursuit of consistent and smooth camera trajectories, we harness the flexibility of the Gaussian smoothing function: $\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N = \\phi_{sm}(\\{\\mathbf{P}_t\\}_{t=1}^N)$ , where $\\phi_{sm}$ represents the Gaussian smoothing filter, offering adjustable parameters for both the smoothing window and stability. These parameters can be fine-tuned to meet specific requirements and constraints. In Sec. 3.1, we elaborate on rendering a stabilized image with its neighboring frames through Stabilized Rendering. Due to dynamic regions, the conventional 3D-constraint-based rendering fails to adequately represent the geometry. Differing from the conventional rendering, Sec. 3.2 introduces the utilization of depth priors to constrain the sampling range of spatial points around potential geometries, such as the area around the surface of objects. Additionally, in Sec. 3.3, we discuss refining projecting inaccuracy to ensure consistent local color intensities.", + "bbox": [ + 496, + 551, + 892, + 861 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Stabilizing a video involves rendering a image sequence $\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N$ with corresponding stabilized poses $\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N$ . In", + "bbox": [ + 500, + 866, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "7509", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "practice, we adopt a sliding window strategy for frame-by-frame rendering stabilized video. For clarity, we illustrate the rendering process with a single target camera pose $\\tilde{\\mathbf{P}}$ at the timestamp $T$ and its temporal neighborhood $\\Omega_T$ .", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Stabilized Rendering", + "text_level": 1, + "bbox": [ + 76, + 157, + 274, + 174 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Stabilized Rendering is a multi-frame fusion module founded on epipolar constraints which fuses input images and feature maps to render a stable, uncropped video sequence. Considering a pixel $\\tilde{\\mathbf{x}}$ situated in the stabilized image $\\tilde{\\mathbf{I}}$ under a specific target camera pose $\\tilde{\\mathbf{P}}$ , we sample $L$ spatial points sharing projection situation $\\tilde{\\mathbf{x}}$ . These sampled points span depth $\\{\\tilde{d}_i\\}_{i=1}^L$ distributed along the ray with sampling range, denoted as $\\tilde{\\mathbf{R}}(\\tilde{\\mathbf{x}})$ . We project $\\tilde{\\mathbf{x}}$ at depth $\\tilde{d}_i$ onto the neighboring input frames $\\{\\mathbf{I}_t\\}_{t \\in \\Omega_T}$ at corresponding positions $\\{\\mathbf{x}_t^i\\}_{t \\in \\Omega_T}$ by", + "bbox": [ + 75, + 181, + 468, + 342 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {t} ^ {i} = \\mathbf {K P} _ {t} \\tilde {\\mathbf {P}} ^ {- 1} \\tilde {d} _ {i} \\mathbf {K} ^ {- 1} \\tilde {\\mathbf {x}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 186, + 348, + 468, + 367 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{K}$ represents the camera intrinsic parameters shared by all frames in a video and $i\\in (0,L]$ . With the projected points $\\{\\mathbf{x}_t^i\\}_{t\\in \\Omega_T}$ , we aggregate features $\\{\\mathcal{F}_t(\\mathbf{x}_t^i)\\}_{t\\in \\Omega_T}$ in neighboring frames to predict the volume density $\\sigma_{i}$ for the spatial point by", + "bbox": [ + 75, + 373, + 468, + 449 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma_ {i} = \\phi_ {m l p} \\left(\\left\\{\\mathcal {F} _ {t} \\left(\\mathbf {x} _ {t} ^ {i}\\right) \\right\\} _ {t \\in \\Omega_ {T}}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 455, + 468, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\phi_{mlp}$ is a Multiple Layer Perceptron (refer to Supp. for details). Eq. 2 is contingent upon the consistency among features. Specifically, if a sampled spatial point aligns with the ground geometry, the multi-view features of projected points would be similar. This condition establishes scene-independent geometric constraints. When considering the associated color $\\mathbf{c}_i$ , a conventional method is a linear combination for aggregation:", + "bbox": [ + 75, + 479, + 468, + 599 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {c} _ {i} = \\sum_ {t \\in \\Omega_ {T}} \\omega_ {t - T} \\mathbf {I} _ {t} \\left(\\mathbf {x} _ {t} ^ {i}\\right), \\sum_ {t \\in \\Omega_ {T}} \\omega_ {t - T} = 1, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 606, + 468, + 640 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\omega_{t - T}$ represents adaptable parameters determined by the geometric characteristics, such as the volume density $\\sigma_{i}$ . Since the establishment of $\\mathbf{c}_i$ solely relies on input frames, it is training-free to accommodate unforeseen scenes. In volume rendering, the set $\\{\\mathbf{c}_i,\\sigma_i\\}_{i = 1}^L$ , describing spatial points along the same ray, determine the color intensity of $\\tilde{\\mathbf{x}}$ by", + "bbox": [ + 75, + 645, + 468, + 736 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\tilde {\\mathbf {I}} (\\tilde {\\mathbf {x}}) = \\sum_ {i = 1} ^ {L} A _ {i} (1 - e x p (- \\sigma_ {i})) \\mathbf {c} _ {i}, \\tag {4} \\\\ A _ {i} = \\exp \\left(- \\sum_ {j = 1} ^ {i - 1} \\sigma_ {i}\\right). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 743, + 468, + 835 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In Stabilized Rendering, Eqs. 1 imposes epipolar constraints on features and colors warped from multiple neighboring frames. Eqs. 2 & Eqs. 3 aggregate the multi-frame information into spatial descriptors $\\{\\mathbf{c}_i,\\sigma_i\\}_{i = 1}^L$ , and", + "bbox": [ + 75, + 839, + 468, + 902 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/968f14f4ff99e0b97872e1b9c1e88e86758c608cfef980f25031308059c45cf5.jpg", + "image_caption": [ + "Figure 3. Illustration of depth projection and splatting. Left: The depth projection involves lifting a pixel $\\mathbf{x}_t$ to 3D space using the estimated depth $\\mathbf{D}_t(\\mathbf{x}_t)$ and projecting to the subpixel $\\tilde{\\mathbf{x}}$ . The depth of $\\tilde{\\mathbf{x}}$ can be calculated and denoted as $\\tilde{\\mathbf{D}}_t(\\tilde{\\mathbf{x}})$ . Right: As $\\tilde{\\mathbf{x}}$ is not precisely projected onto a pixel coordinate, we convert its depth to adjacent pixels, e.g. $\\tilde{\\mathbf{x}}_p$ , with a distance-associated weight $\\omega_t$ ." + ], + "image_footnote": [], + "bbox": [ + 504, + 87, + 890, + 220 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Eqs. 4 renders stabilized images utilizing these descriptors for each pixel. Epipolar constraints guarantee the structure preservation and per-pixel rendering guarantees full-frame generation. However, the effectiveness of the aforementioned process highly depends on the ray range $\\tilde{\\mathbf{R}} (\\tilde{\\mathbf{x}})$ guiding the sampling. If $\\tilde{\\mathbf{R}} (\\tilde{\\mathbf{x}})$ is not distributed near the surface of objects, the model may aggregate incorrect features into inferior descriptors and diminish rendering quality. The forthcoming section will introduce how to adaptively define the ray range $\\tilde{\\mathbf{R}} (\\tilde{\\mathbf{x}})$ to avoid the issue above.", + "bbox": [ + 496, + 354, + 890, + 506 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Adaptive Ray Range", + "text_level": 1, + "bbox": [ + 500, + 513, + 694, + 531 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Eq. 4 of Stabilized Rendering highlights the dependence of the final color intensity of $\\tilde{\\mathbf{I}} (\\tilde{\\mathbf{x}})$ on the color $c_{i}$ of the 3D point where the ray hits the object for the first time. It indicates that ray ranges around the ground geometry for the sampling process will benefit scene representation. A direct method to define the ray range entails treating the sequence of frames as a static scene: estimating the coarse geometry of each ray and rendering through spatial points sampled from re-defined fine ranges, such as [15, 34]. We argue that the effectiveness of the coarse-to-fine ray range relies on the geometry estimation grounded in epipolar constraints. However, dynamic regions, violating epipolar constraints, make the defined range unreliable.", + "bbox": [ + 496, + 537, + 890, + 733 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To tackle this challenge, we turn to the task of depth estimation. The depth model [11] employs optical flow to impose constraints on dynamic scenes. As optical flow relies on feature matching rather than epipolar constraints, it matches points with features rather than epipolar constraints, showcasing insensitivity to dynamic regions. Consequently, the estimated depth maps derived from this depth model are less susceptible to interference from dynamic objects. We propose to define an adaptive range with preestimated neighboring depth maps $\\{\\mathbf{D}_t\\}_{t\\in \\Omega_T}$ . In particular, we construct the range utilizing the mean and variance", + "bbox": [ + 496, + 734, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "7510", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/faf2cc05008f7e460cf267eaf6f39d79d25dac7a7ca19094acb82964f541415e.jpg", + "image_caption": [ + "Figure 4. The effect of temporal weights. The introduction of temporal weights can mitigate distortion." + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 272, + 238 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b2e9948ceeb3d90a3b8ed45369e03cfab3af6ef17b588bcd5c24b772927b264a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 274, + 88, + 468, + 239 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "of aggregated depth maps from nearby frames.", + "bbox": [ + 76, + 305, + 385, + 319 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As illustrated in the left part of Fig. 3, we project $\\mathbf{x}_t$ in the neighboring frame with pose $\\mathbf{P}_t$ at the depth $\\mathbf{D}_t(\\mathbf{x}_t)$ onto sub-pixel $\\tilde{\\mathbf{x}}$ of the stabilized frame with pose $\\tilde{\\mathbf{P}}$ according to the inverse of Eq. 1. However, as sub-pixel $\\tilde{\\mathbf{x}}$ is not precisely projected onto a specific pixel coordinate, direct utilization of $\\tilde{\\mathbf{D}}_t(\\tilde{\\mathbf{x}})$ to estimate ray ranges for pixels is not feasible. To overcome this limitation, a splatting method [29] is employed, as illustrated in the right part of Fig. 3, converting $\\tilde{\\mathbf{D}}_t(\\tilde{\\mathbf{x}})$ in the following manner:", + "bbox": [ + 75, + 319, + 468, + 455 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\mathbf {D}} _ {t} (\\tilde {\\mathbf {x}} _ {p}) = \\frac {\\sum_ {i} w _ {d} \\tilde {\\mathbf {D}} _ {t} (\\tilde {\\mathbf {x}} _ {i})}{\\sum_ {i} w _ {d}}, w _ {d} = \\prod (\\mathbf {1} - | \\tilde {\\mathbf {x}} _ {p} - \\tilde {\\mathbf {x}} _ {i} |), (5)\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 467, + 468, + 503 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\tilde{\\mathbf{x}}_p$ is a pixel and $\\tilde{\\mathbf{x}}_i$ is the $i$ -th sub-pixel $\\tilde{\\mathbf{x}}$ around $\\tilde{\\mathbf{x}}_p$ satisfying the condition $|\\tilde{\\mathbf{x}}_p - \\tilde{\\mathbf{x}}_i| \\in (0,1)^2$ , $\\prod(\\cdot)$ suggests an element-wise multiplication in a vector, and $\\omega_d$ is distance-associated weights.", + "bbox": [ + 76, + 513, + 468, + 573 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given $\\{\\mathbf{D}_t\\}_{t\\in \\Omega_T}$ , we obtain corresponding $\\{\\tilde{\\mathbf{D}}_t\\}_{t\\in \\Omega_T}$ on the stabilized frame through the project-splat process above. An intuitive approach involves directly calculating the mean $\\mathbf{M}$ , variance $\\mathbf{S}$ , and determining the sampling ray range as $\\mathbf{R} = [\\mathbf{M} - \\mathbf{S},\\mathbf{M} + \\mathbf{S}]$ . However, in the aforementioned depth project-splat process, depth maps further from the timestamp $\\mathrm{T}$ are less reliable. Treating all depth maps equally can result in an inaccurate sampling ray range $\\mathbf{R}$ , leading to a decrease in the image quality (the left part of Fig. 4). This observation prompts the introduction of a weighted mean and variance as:", + "bbox": [ + 75, + 573, + 468, + 739 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\mathbf {M}} = \\sum_ {t \\in \\Omega_ {T}} \\omega_ {t} \\tilde {\\mathbf {D}} _ {t}, \\tilde {S} = \\sqrt {\\sum_ {t \\in \\Omega_ {T}} \\omega_ {t} (\\tilde {\\mathbf {D}} - \\tilde {\\mathbf {M}}) ^ {2}}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 120, + 752, + 468, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\omega_{t}$ is the temporal weighting coefficient, assigning a higher weight to the frame closer to the stabilized frame temporally and vice versa, as defined by", + "bbox": [ + 76, + 801, + 468, + 848 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\omega_ {t} = \\frac {e ^ {\\lambda (t - T)}}{\\sum_ {t \\in \\Omega_ {T}} e ^ {\\lambda (t - T)}}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 858, + 468, + 902 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/8b74912345a991d5a076ab84ed325a7aede7bb518996d52ab9ff5158c14732f0.jpg", + "image_caption": [ + "Figure 5. Illustration of Color Correction module. Firstly, we project a pixel $\\tilde{\\mathbf{x}}_T$ from the target stabilized frame onto corresponding $\\mathbf{x}_T$ of the input frame at the same timestamp $T$ . Secondly, we obtain feature matching of $\\mathbf{x}_T$ in the input frame at timestamps $t$ using optical flow $\\mathbf{F}_{T\\rightarrow t}(\\mathbf{x}_T)$ . As geometric constraints alone are insufficient for modeling dynamic regions, we aggregate precise color by correcting the geometric projected position $\\mathbf{x}_t$ to the optical-flow refined position $\\mathbf{x}_t'$ ." + ], + "image_footnote": [], + "bbox": [ + 501, + 87, + 893, + 319 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda$ is a hyperparameter. Subsequently, ray ranges for the stabilized frame are denoted as $\\tilde{\\mathbf{R}} = \\left[\\tilde{\\mathbf{M}} -\\tilde{\\mathbf{S}},\\tilde{\\mathbf{M}} +\\tilde{\\mathbf{S}}\\right]$ . and can be employed for sampling L points along each ray during the rendering process. As illustrated in the right part of Fig. 4, the Adaptive Ray Range module with temporal weighted ranges yields more favorable rendering results.", + "bbox": [ + 498, + 467, + 890, + 564 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The Adaptive Ray Range module provides a ray range $\\tilde{\\mathbf{R}}$ around the ground geometry guiding points sampling and benefiting volume density $\\sigma_{i}$ prediction. Although the guidance of $\\tilde{\\mathbf{R}}$ mitigates the interference of dynamic objects, the challenge of dynamic objects goes beyond this. According to Eq. 4, the color intensity $\\mathbf{c}_i$ is another factor influencing rendering quality and affected by dynamic regions as well.", + "bbox": [ + 498, + 564, + 890, + 671 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Color Correction", + "text_level": 1, + "bbox": [ + 500, + 680, + 668, + 695 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Color intensity, denoted as $\\mathbf{c}_i$ , exhibits a strong dependence on geometric constraints, akin to volume density $\\sigma_{i}$ . However, density is predicted from the feature maps with their receptive fields, thereby exhibiting a certain tolerance to projection inaccuracy. In contrast, color intensity is derived from the linear combination of colors warped from multiple views, accentuating the sensitivity of colors to projection inaccuracy. Despite the Adaptive Ray Range module offers a correction for projection with geometric constraints, it is inadequate for accurate color aggregation (refer to the experiments). Rather than solely concentrating on refining geometric constraints, we propose to assist these constraints with optical flow. Optical flow, relying on feature similar-", + "bbox": [ + 498, + 704, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "7511", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/cabbcffe56d7f2f5d899145236c6d9665e002e4f424d54ba77f3eb56d2bbecb2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodNUS datasetSelfie datasetDeepStab dataset
C↑D↑S↑C↑D↑S↑C↑D↑S↑
Grundmann et al. [7]2D0.710.760.820.750.810.830.770.870.84
Bundle [20]2D0.810.780.820.740.820.800.800.900.85
Yu and Ramamoorthi [40]2D0.850.810.860.830.790.860.870.920.82
DIFRINT [5]2D1.000.870.841.000.780.841.000.910.78
FuSta [24]2D1.000.870.861.000.830.871.000.920.82
Zhao et al. [42]2D1.000.900.871.000.870.871.000.940.84
Deep3D [11]3D0.660.900.940.350.700.950.750.980.92
Ours3D1.000.910.941.000.920.951.000.980.92
", + "bbox": [ + 161, + 88, + 810, + 256 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Quantitative results on the NUS [20], the Selfie [38], and the DeepStab [33] datasets. We evaluate our method against baselines using three standard metrics: Cropping Ratio(C), Distortion Value(D), Stability Score(S). The best results are bolded and second-best results are highlighted by underline.", + "bbox": [ + 75, + 266, + 893, + 309 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ities, matches pixels with similar textures containing color information. It implies that utilizing optical flow to refine the projection can enhance color accuracy.", + "bbox": [ + 75, + 335, + 468, + 380 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Specifically, we focus on the input frame at $T$ , which adheres to epipolar constraints with the target stabilized frame at $T$ . As shown in Fig. 5, we employ $\\mathbf{I}_T$ as a reference to correct the projection points on the neighboring frame $\\mathbf{I}_t$ with optical flow. According to Eq. 1, we project a point $\\tilde{\\mathbf{x}}_T$ from the stabilized pose $\\tilde{\\mathbf{P}}_T$ onto the $\\mathbf{x}_T$ of $\\mathbf{P}_T$ , the flow-associated points $\\mathbf{x}_t'$ can be expressed as", + "bbox": [ + 75, + 380, + 468, + 486 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {t} ^ {\\prime} = \\mathbf {x} _ {T} + \\mathbf {F} _ {T \\rightarrow t} (\\mathbf {x} _ {T}), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 493, + 468, + 511 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathbf{F}_{T\\rightarrow t}$ represents the optical flow from $\\mathbf{I}_T$ to $\\mathbf{I}_t$ . By applying the same procedure to frames in the temporal neighborhood $\\Omega_T$ , we substitute the $\\mathbf{x}_t$ in Eq. 3 with $\\mathbf{x}_t'$ .", + "bbox": [ + 75, + 517, + 468, + 564 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4. Implementation Details", + "text_level": 1, + "bbox": [ + 76, + 570, + 294, + 585 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In our implementations, a pre-trained model from Deep3D [11] is employed to generate depth prior for the Adaptive Ray Range module and optical flow for Color Correction. Frames neighboring the timestamp $\\mathrm{T}$ are symmetrically distributed, and the length of the set $\\Omega_T$ is fixed to 13. For the Adaptive Ray Range module, the temporal weighting coefficient $\\omega_{i}$ is calculated with $\\lambda = 0.5$ , and we choose $L = 3$ for uniform spatial points sampling along each ray.", + "bbox": [ + 75, + 594, + 468, + 714 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Loss function. During training, we sample rays on all images randomly and minimize the mean squared error between the rendered color and corresponding ground truth:", + "bbox": [ + 75, + 714, + 468, + 761 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\sum_ {\\mathbf {x} \\in \\mathcal {X}} \\left| \\left| \\tilde {\\mathbf {I}} (\\mathbf {x}) - \\mathbf {I} _ {g t} (\\mathbf {x}) \\right| \\right| _ {2} ^ {2}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 768, + 468, + 806 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathbf{I}_{gt}$ is the corresponding ground truth and $\\mathcal{X}$ is the set of pixels sampled from all images in each training batch.", + "bbox": [ + 75, + 809, + 468, + 840 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training details. We follow the training setting of IBRNet [34] to train our model on LLFF [27] and IBRNetCollected [34] including high-quality natural images with accurate camera poses. Our model is trained on an RTX3090", + "bbox": [ + 75, + 840, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "GPU using the Adam optimizer[8]. We set the base learning rates for the feature extraction network and MLP to $1e^{-3}$ and $5e^{-4}$ , respectively, which decay exponentially throughout the optimization process. Typically, the model converges after approximately 200k iterations, and the entire training process takes about a day to complete.", + "bbox": [ + 496, + 335, + 890, + 426 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 498, + 439, + 633, + 455 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.Quantitative Evaluation", + "text_level": 1, + "bbox": [ + 498, + 465, + 720, + 481 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We choose various video stabilization algorithms as the baselines, including Grundmann et al. [7], Liu et al. [20], Wang et al. [33], Yu and Ramamoorthi [39, 40], DIFRINT [5], FuSta [24], Zhao et al. [42], and Deep3D [11]. For comparisons, we use the official provided videos or videos generated by official implementations with default parameters or pre-trained models.", + "bbox": [ + 496, + 488, + 890, + 594 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets. We choose three datasets with different characteristics for evaluations: (1) The NUS [20] dataset comprises 144 videos, categorized into six different scenes: Regular, Running, Crowd, Parallax, QuickRotation, and Running, (2) the Selfie dataset [38] contains 33 video clips featuring frontal faces with large camera motion, (3) and the Deep-Stab dataset [33] includes 61 high-definition videos.", + "bbox": [ + 496, + 599, + 890, + 705 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics. We assess the performance of the stabilizers using three standard metrics widely employed in previous methods [5, 20, 24, 39, 40]: (1) Cropping Ratio: This metric measures the remaining image area after cropping the non-content pixels. (2) Distortion Value: This metric quantifies the anisotropic scaling of the homography matrix between the input and output frames. (3) Stability Score: This metric assesses the stability of the stabilized video by assessing the ratio of low-frequency motion energy to the total energy. All three metrics range from 0 to 1, with higher values indicating better performance.", + "bbox": [ + 496, + 712, + 890, + 878 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results on the NUS dataset. Our evaluation on the NUS", + "bbox": [ + 500, + 885, + 890, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "7512", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6445a5ac79746040c195fee948f2a9f61c217c26f446fee225e0707382894918.jpg", + "image_caption": [ + "Input" + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 282, + 176 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/38be7b4662a10d0788472d050488bfa4ad55286ec3b23a4de17a00bf1334bda5.jpg", + "image_caption": [ + "Bundle [20]" + ], + "image_footnote": [], + "bbox": [ + 297, + 95, + 465, + 169 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4a0a3f30268d35df6d5f9852d0c989cb307ed24228b3cc117f79c28387e17b55.jpg", + "image_caption": [ + "Yu and Ramamoorthi [40]" + ], + "image_footnote": [], + "bbox": [ + 496, + 94, + 669, + 170 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/35df973166922dce73a31c575c11e1538d9a479cd9dc6ea2613ca3db735a1631.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 98, + 861, + 165 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d59a93c292a52554cfd94aa7685c2a4c335f93218586f06cdaf4eba176509916.jpg", + "image_caption": [ + "DIFRINT [5]" + ], + "image_footnote": [], + "bbox": [ + 80, + 196, + 279, + 284 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/57964ea87012f15a8802e1b7009bb18942a7f89b4afbc78118cdca81a67ab3e4.jpg", + "image_caption": [ + "FuSta [24]" + ], + "image_footnote": [], + "bbox": [ + 282, + 196, + 480, + 284 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/08a78308b3f798156ca5e004ec39727c0d91da006b58d754cc6d58a4915e7d9a.jpg", + "image_caption": [ + "Zhao et al. [42]" + ], + "image_footnote": [], + "bbox": [ + 483, + 196, + 681, + 284 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/75bb6ba464b0ec171eed2ec71160fe7c3dbc1ea84890164dcce8b66c7db1e018.jpg", + "image_caption": [ + "Deep3D[11]", + "Ours" + ], + "image_footnote": [], + "bbox": [ + 683, + 196, + 883, + 284 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "dataset [20] is detailed on the left side of Table 1, where our stabilization method excels notably in both stability and distortion reduction when compared to 2D-based methods. This success is attributed to our accuracy in constructing camera trajectories and geometry. In contrast to 3D methods, our approach stands out by leveraging information from multiple input frames, achieving an average cropping ratio of 1. This indicates the effectiveness of our method in full-frame generation across the diverse scenes in the NUS dataset, which is widely acknowledged as a robust benchmark for video stabilization algorithms.", + "bbox": [ + 75, + 396, + 468, + 561 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results on the Selfie dataset. We present the results on the Selfie dataset [38] in the middle of Table 1. It's crucial to highlight that this dataset is characterized by large camera motions and extensive dynamic regions, posing challenges for video stabilization algorithms. Observing the results, a decrease is evident for most algorithms compared to their performance on the NUS dataset. Traditional 3D methods, in particular, experience a significant decline. In contrast, our method consistently delivers the best performance on the Selfie dataset. The performance shows the effectiveness of our algorithm in handling extreme scenes.", + "bbox": [ + 75, + 565, + 468, + 731 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results on the DeepStab dataset. The right side of Table 1 showcases the average scores on the DeepStab dataset [33]. Notably, the videos in this dataset are of higher resolution than NUS and Selfie, specifically $720\\mathrm{p}$ , aligning with the common resolutions of modern devices. Despite the high distortion values across all stabilizers due to the simplicity of this dataset, our approach consistently demonstrates superior performance. This result suggests that our method is well-suited for handling high-definition videos, further emphasizing its applicability for contemporary video stabilization challenges.", + "bbox": [ + 75, + 734, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bbedd1702aeeeb4331fdea29791b21f14bc84f8b8c1594bb35a677f57ffe6c30.jpg", + "image_caption": [ + "Figure 6. Visual comparison of different methods. Contrasting with the baselines in the first row, our method successfully accomplishes full-frame generation. In the second row, while these baselines achieve full-frame generation, they fall short in preserving structure; for instance, in the bottom-left region, the tree trunks are missing in their stabilized images. Please refer to our supplementary material for video comparisons with baselines.", + "w/o Stabilized Rendering" + ], + "image_footnote": [], + "bbox": [ + 501, + 393, + 692, + 529 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d606b9f6b4daf48d60e7051425c1c0f8ac576733b8cbcfb636363d015c2785f3.jpg", + "image_caption": [ + "Ours", + "Figure 7. Quilitative ablation of Stabilized Fusion. Absence of Stabilized Fusion results in noticeable blurs in both static and dynamic regions." + ], + "image_footnote": [], + "bbox": [ + 697, + 393, + 888, + 530 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Qualitative Analysis", + "text_level": 1, + "bbox": [ + 500, + 625, + 691, + 640 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Visual comparisons of our method and state-of-the-art stabilizers is shown in Fig. 6. Many methods [11, 20, 40] apply aggressive cropping, as evident from the grey checkerboard regions. Comparing the bottom-left region of each image in Fig. 6 below with the top-left input, it's clear that our method suffers from fewer visual artifacts.", + "bbox": [ + 500, + 648, + 890, + 737 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 753, + 650, + 770 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conduct ablation studies to analyze the effectiveness of the proposed modules, including Stabilized Rendering (SR), the Adaptive Ray Range module (ARR), and Color Correction module (CC). Our evaluations focus on the Crowd scene within the NUS dataset [20], chosen for its dynamic objects and diverse scenes. We choose Distortion values and PSNR as evaluation metrics. Distortion Value measures the pose-independent structure quality of", + "bbox": [ + 500, + 779, + 892, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7513", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/25f5e98177727532a13733afd1d9adcf104b6a6364a937365be026954b7c8fe6.jpg", + "image_caption": [ + "IBRNet" + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 271, + 172 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7c47836fb1ab540c22e7496bc0f983b2dad28ed4cefdccfc9bb994f3b332686a.jpg", + "image_caption": [ + "ENeRF" + ], + "image_footnote": [], + "bbox": [ + 274, + 88, + 468, + 172 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b935cb2f8b7006df9d0a7994a7657190337d2c93beb48b0d936051ab3aa16668.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 80, + 188, + 271, + 273 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e3ab8833bbe1c9e50c3ada67aae911bd692e8bfa26373460b95f6332ceeb6f49.jpg", + "image_caption": [ + "w/o Adaptive Ray Range" + ], + "image_footnote": [], + "bbox": [ + 80, + 273, + 171, + 323 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9d7e36ceab158649387266c5ddbe6deb544d1868b6711ee82126c173df2a1332.jpg", + "image_caption": [ + "Figure 8. Quilitative ablation of different range strategies. Among the range strategies examined, only our Adaptive Ray Range module can address distortion in image structure." + ], + "image_footnote": [], + "bbox": [ + 176, + 273, + 269, + 323 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1bb923e31cac7faed4c99a41f913c826b26b9e6523ba4b8db6600cb1d2d5d3cf.jpg", + "image_caption": [ + "Figure 9. Quilitative ablation of Color Correction. The Color Correction module refining the projection enhances color accuracy, consequently reducing image artifacts." + ], + "image_footnote": [], + "bbox": [ + 274, + 188, + 467, + 273 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/836225df1614e93b133579b0784abba78e41279b78117932d4d8bd6220dc246b.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 274, + 273, + 370, + 323 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e327c557e851688405372d89788f43584ca0d8c4012f9867e11ceb1da7280a79.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 372, + 273, + 467, + 323 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "images with stabilized poses. Additionally, PSNR is employed to evaluate the pixel-level performance of our model in rendering image details. As real images with stabilized poses are unavailable, we render images with the input pose to derive PSNR.", + "bbox": [ + 75, + 425, + 468, + 500 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Why needs Stabilized Rendering. We conduct experiments to demonstrate the necessity of SR, which fuses features and colors in 3D space. One straightforward strategy replacing SR for fusing multiple frames is image blending. It warps nearby frames into the stabilized view and averages these images. However, as illustrated in the left part of Fig. 7, image blending leads to noticeable blur in both static regions (the stairs) and dynamic regions (the handbag and the shoulder). Comparing Row 4 and Row 3 in Table 1, the notable decreases in distortion value and PSNR align with the observation in Fig. 7. It demonstrates SR, our 3D multiframe fusion module using volume rendering, can enhance the structural quality of stabilized images.", + "bbox": [ + 75, + 503, + 468, + 700 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Importance of Adaptive Ray Range. We compare various range strategies to affirm the importance of ARR: (1) IBRNet [34] and ENeRF [15] employ coarse-to-fine range strategy, and (2) we adopt even sampling of 128 points following setting of IBRNet as a substitution for ARR. However, as shown in Fig. 8, none of these strategies achieve favorable results. Without the sampling range defined by ARR, the methods above are forced to aggregate points sampled over a large range, increasing the risk of projecting spatial points onto dynamic regions. Due to the violation of epipolar constraints, dynamic regions introduce incorrect features and colors to the aggregation of descriptors and lead to distortion of the structure. As shown in Row 1,2,3,5 of Table 1,", + "bbox": [ + 75, + 704, + 470, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2c8dfc7ec3132528612000bb0e10a334493166ec7b1563820334ca1c2f438a19.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 88, + 694, + 172 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/cf84b471ac347006d196b9d1ddd9e5bd7cc6a73e0f8ea48a49e453eeaea2be73.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 172, + 596, + 222 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e45170879f7bd8948694782837c63e9cee54a4f515b022889e97fb02c1efbb3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 599, + 172, + 692, + 222 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3c369aea13eb5c2d15c6de431b90a0085a42c5f5c9281851f3cd911fe656c555.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 697, + 88, + 890, + 172 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4ce40c892ab996b60ab2af19413f15607c0b324e2e725ffb26265fdd4f799f07.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 697, + 172, + 792, + 223 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8f5ed14cc5ec74546b2636d6eb8992f7ae2421f8a90f86b7c00d7d6e6b1e53ab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 797, + 172, + 888, + 223 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c2ea4ded0a9a7456dcea2da08cdf4fbb3f9646fd8daa2c0fa83557164b545fe1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodDistortion Value↑PSNR↑
ENeRF-13.45
IBRNet0.8028.31
Full (Ours)0.9040.01
w/o Stabilized Rendering0.8723.56
w/o Adaptive Ray Range0.8137.83
w/o Color Correction0.8635.81
", + "bbox": [ + 524, + 309, + 867, + 429 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 2. Quantitative results of ablation study. We conduct comparative experiments of various range strategies and study the effect of each module. It should be noted that the results of ENeRF are so poor that the Distortion Value is unavailable.", + "bbox": [ + 498, + 439, + 890, + 494 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ARR proves effective in preserving structure.", + "bbox": [ + 500, + 523, + 799, + 537 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Importance of Color Correction. We conduct a comparison between the results obtained by removing CC and using the full model. The presence of noticeable artifacts in the dynamic region in the left part of Fig. 9 leads to the decrease in PSNR comparing Row 6 and Row 3 of Table 1. This suggests that employing optical flow in CC to refine the projection can improve color accuracy.", + "bbox": [ + 496, + 547, + 890, + 652 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 676, + 625, + 691 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose a video stabilization framework termed RStab for integrating multi-frame fusion and 3D constraints to achieve full-frame generation and structure preservation. The core of RStab lies in Stabilized Rendering, a volume rendering module utilizing both colors and features for multi-frame fusion in 3D space. To enhance Stabilized Rendering module, we design an Adaptive Ray Range module for suppressing inconsistent information and a Color Correction module for refining color aggregation. By applying the three modules, RStab achieves full-frame generation with structure preservation and outperforms all previous stabilizers in FOV, image quality, and video stability across various datasets.", + "bbox": [ + 496, + 704, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "7514", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 5855–5864, 2021. 3", + "[2] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 5470–5479, 2022. 3", + "[3] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 14124-14133, 2021. 3", + "[4] Yu-Ta Chen, Kuan-Wei Tseng, Yao-Chih Lee, Chun-Yu Chen, and Yi-Ping Hung. Pixstabnet: Fast multi-scale deep online video stabilization with pixel-based warping. In Proceedings of IEEE International Conference on Image Processing (ICIP), pages 1929–1933, 2021. 2", + "[5] Jinsoo Choi and In So Kweon. Deep iterative frame interpolation for full-frame video stabilization. ACM Transactions on Graphics (TOG), 39(1):4:1-4:9, 2020. 1, 2, 6, 7", + "[6] Amit Goldstein and Raanan Fattal. Video stabilization using epipolar geometry. ACM Transactions on Graphics (TOG), 31(5):126:1-126:10, 2012. 2", + "[7] Matthias Grundmann, Vivek Kwatra, and Irfan A. Essa. Auto-directed video stabilization with robust L1 optimal camera paths. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 225-232, 2011. 2, 6, 1", + "[8] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Proceedings of International Conference on Learning Representations (ICLR), 2015. 6", + "[9] Yeong Jun Koh, Chulwoo Lee, and Chang-Su Kim. Video stabilization based on feature trajectory augmentation and selection and robust mesh grid warping. IEEE Transactions on Image Processing (TIP), 24(12):5260-5273, 2015. 1, 2", + "[10] Ken-Yi Lee, Yung-Yu Chuang, Bing-Yu Chen, and Ming Ouhyoung. Video stabilization using robust feature trajectories. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 1397-1404, 2009. 1, 2", + "[11] Yao-Chih Lee, Kuan-Wei Tseng, Yu-Ta Chen, Chien-Cheng Chen, Chu-Song Chen, and Yi-Ping Hung. 3d video stabilization with depth estimation by cnn-based optimization. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 10621–10630, 2021. 1, 2, 4, 6, 7", + "[12] Chen Li, Li Song, Shuai Chen, Rong Xie, and Wenjun Zhang. Deep online video stabilization using IMU sensors. IEEE Transactions on Multimedia (TMM), 25:2047-2060, 2023. 2", + "[13] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of IEEE Conference on Com" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "puter Vision Pattern Recognition (CVPR), pages 6498-6508, 2021.3", + "[14] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4273-4284, 2023. 3, 1", + "[15] Haotong Lin, Sida Peng, Zhen Xu, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Efficient neural radiance fields for interactive free-viewpoint video. In ACM SIGGRAPH Asia, pages 39:1-39:9, 2022. 2, 3, 4, 8", + "[16] Kaimo Lin, Nianjuan Jiang, Shuaicheng Liu, Loong-Fah Cheong, Minh N. Do, and Jiangbo Lu. Direct photometric alignment by mesh deformation. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 2701-2709, 2017. 2", + "[17] Feng Liu, Michael Gleicher, Hailin Jin, and Aseem Agarwala. Content-preserving warps for 3d video stabilization. ACM Transactions on Graphics (TOG), 28(3):44, 2009. 1, 2", + "[18] Feng Liu, Michael Gleicher, Jue Wang, Hailin Jin, and Aseem Agarwala. Subspace video stabilization. ACM Transactions on Graphics (TOG), 30(1):4:1-4:10, 2011. 1, 2", + "[19] Shuaicheng Liu, Yinting Wang, Lu Yuan, Jiajun Bu, Ping Tan, and Jian Sun. Video stabilization with a depth camera. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 89-95, 2012. 1, 2", + "[20] Shuaicheng Liu, Lu Yuan, Ping Tan, and Jian Sun. Bundled camera paths for video stabilization. ACM Transactions on Graphics (TOG), 32(4):78:1-78:10, 2013. 1, 2, 6, 7", + "[21] Shuaicheng Liu, Lu Yuan, Ping Tan, and Jian Sun. Steadyflow: Spatially smooth optical flow for video stabilization. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4209-4216, 2014. 1, 2", + "[22] Shuaicheng Liu, Ping Tan, Lu Yuan, Jian Sun, and Bing Zeng. Meshflow: Minimum latency online video stabilization. In Proceedings of European Conference on Computer Vision (ECCV), pages 800-815, 2016. 2", + "[23] Shuaicheng Liu, Mingyu Li, Shuyuan Zhu, and Bing Zeng. Codingflow: Enable video coding for video stabilization. IEEE Transactions on Image Processing (TIP), 26(7):3291-3302, 2017. 1, 2", + "[24] Yu-Lun Liu, Wei-Sheng Lai, Ming-Hsuan Yang, Yung-Yu Chuang, and Jia-Bin Huang. Hybrid neural fusion for full-frame video stabilization. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 2279-2288, 2021. 1, 2, 6, 7", + "[25] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 7210–7219, 2021. 3", + "[26] Andreas Meuleman, Yu-Lun Liu, Chen Gao, Jia-Bin Huang, Changil Kim, Min H. Kim, and Johannes Kopf. Progressively optimized local radiance fields for robust view synthesis. In Proceedings of IEEE Conference on Computer Vision" + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "7515", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Pattern Recognition (CVPR), pages 16539-16548, 2023. 3, 1", + "[27] Ben Mildenhall, Pratul P. Srinivasan, Rodrigo Ortiz Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4):29:1-29:14, 2019. 6", + "[28] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (TOG), 41(4):1-15, 2022. 3", + "[29] Simon Niklaus and Feng Liu. Softmax splatting for video frame interpolation. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 5436-5445, 2020. 5", + "[30] Zhenmei Shi, Fuhao Shi, Wei-Sheng Lai, Chia-Kai Liang, and Yingyu Liang. Deep online fused video stabilization. In Proceedings of Winter Conference on Applications of Computer Vision (WACV), pages 865-873. IEEE, 2022. 2", + "[31] Brandon M. Smith, Li Zhang, Hailin Jin, and Aseem Agarwala. Light field video stabilization. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 341-348, 2009. 1, 2", + "[32] Alex Trevithick and Bo Yang. Grf: Learning a general radiance field for 3d representation and rendering. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 15182-15192, 2021. 3", + "[33] Miao Wang, Guo-Ye Yang, Jin-Kun Lin, Song-Hai Zhang, Ariel Shamir, Shao-Ping Lu, and Shi-Min Hu. Deep online video stabilization with multi-grid warping transformation learning. IEEE Transactions on Image Processing (TIP), 28 (5):2283-2292, 2019. 6, 7, 1", + "[34] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P. Srinivasan, Howard Zhou, Jonathan T. Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas A. Funkhouser. Ibrnet: Learning multi-view image-based rendering. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4690-4699, 2021. 3, 4, 6, 8, 1", + "[35] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 5438–5448, 2022. 3", + "[36] Yufei Xu, Jing Zhang, Stephen J. Maybank, and Dacheng Tao. DUT: learning video stabilization by simply watching unstable videos. IEEE Transactions on Image Processing (TIP), 31:4306-4320, 2022. 2", + "[37] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelnerf: Neural radiance fields from one or few images. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4578-4587, 2021. 3", + "[38] Jiyang Yu and Ravi Ramamoorthi. Selfie video stabilization. In Proceedings of European Conference on Computer Vision (ECCV), pages 569-584, 2018. 2, 6, 7, 1", + "[39] Jiyang Yu and Ravi Ramamoorthi. Robust video stabilization by optimization in CNN weight space. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 3800-3808, 2019. 6" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[40] Jiyang Yu and Ravi Ramamoorthi. Learning video stabilization using optical flow. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 8156-8164, 2020. 2, 6, 7, 1", + "[41] Minda Zhao and Qiang Ling. Pwstablenet: Learning pixelwise warping maps for video stabilization. IEEE Transactions on Image Processing (TIP), 29:3582-3595, 2020. 2", + "[42] Weiyue Zhao, Xin Li, Zhan Peng, Xianrui Luo, Xinyi Ye, Hao Lu, and Zhiguo Cao. Fast full-frame video stabilization with iterative optimization. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 23534-23544, 2023. 1, 2, 6, 7" + ], + "bbox": [ + 501, + 92, + 890, + 260 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "7516", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/3D Multi-frame Fusion for Video Stabilization/475676e5-4dd7-4a8c-bd05-cc44ef21267a_model.json b/2024/3D Multi-frame Fusion for Video Stabilization/475676e5-4dd7-4a8c-bd05-cc44ef21267a_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e9665de29e2fed164aeea8913fded379e627f6ff --- /dev/null +++ b/2024/3D Multi-frame Fusion for Video Stabilization/475676e5-4dd7-4a8c-bd05-cc44ef21267a_model.json @@ -0,0 +1,2321 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.044 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.25, + 0.131, + 0.72, + 0.151 + ], + "angle": 0, + "content": "3D Multi-frame Fusion for Video Stabilization" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.181, + 0.844, + 0.218 + ], + "angle": 0, + "content": "Zhan Peng Xinyi Ye Weiyue Zhao Tianqi Liu Huiqiang Sun Baopu Li Zhiguo Cao* School of AIA, Huazhong University of Science and Technology" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.22, + 0.76, + 0.235 + ], + "angle": 0, + "content": "{peng_zhan,xinyiye,zhaoweiyue,tq_1iu,shq1031,zgcao}@hust.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.399, + 0.238, + 0.571, + 0.251 + ], + "angle": 0, + "content": "bpli.cuhk@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.29, + 0.314, + 0.306 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.322, + 0.474, + 0.655 + ], + "angle": 0, + "content": "In this paper, we present RStab, a novel framework for video stabilization that integrates 3D multi-frame fusion through volume rendering. Departing from conventional methods, we introduce a 3D multi-frame perspective to generate stabilized images, addressing the challenge of full-frame generation while preserving structure. The core of our RStab framework lies in Stabilized Rendering (SR), a volume rendering module, fusing multi-frame information in 3D space. Specifically, SR involves warping features and colors from multiple frames by projection, fusing them into descriptors to render the stabilized image. However, the precision of warped information depends on the projection accuracy, a factor significantly influenced by dynamic regions. In response, we introduce the Adaptive Ray Range (ARR) module to integrate depth priors, adaptively defining the sampling range for the projection process. Additionally, we propose Color Correction (CC) assisting geometric constraints with optical flow for accurate color aggregation. Thanks to the three modules, our RStab demonstrates superior performance compared with previous stabilizers in the field of view (FOV), image quality, and video stability across various datasets." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.684, + 0.21, + 0.7 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.71, + 0.47, + 0.831 + ], + "angle": 0, + "content": "With the widespread adoption of smartphones, videos have become an important medium for documenting and sharing lives. The videos captured with handheld devices often suffer from annoying shakes. To mitigate this prevalent issue, numerous researchers devote efforts to developing video stabilization algorithms. These methods typically involve three steps: camera trajectory estimation, trajectory smoothing, and stabilized frame generation." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.831, + 0.47, + 0.878 + ], + "angle": 0, + "content": "To obtain a smooth image sequence, known as stabilized frames, early methods employ 2D-plane transformations (homography [20, 23], feature trajectories [9, 10, 21], mo" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.288, + 0.892, + 0.512 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.522, + 0.894, + 0.604 + ], + "angle": 0, + "content": "Figure 1. Existing dilemmas and our method. (a) and (b) exhibit cropping issues, characteristic of single-frame methods. (a) and (c) encounter difficulties in preserving structure, inherent in 2D-based approaches. Fortunately, our proposed method (d) not only mitigates distortion and artifacts but also maintains no-cropping stabilized frames." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.64, + 0.895, + 0.776 + ], + "angle": 0, + "content": "tion vectors [18]) on single frames. However, these methods suffer from two major problems. First, these single-frame approaches may produce notable missing regions at the boundary of generated stabilized images, requiring aggressive cropping to ensure a rectangular frame for video (cropping in Fig. 1(a)), further resulting in a substantial reduction in the field of view (FOV). Second, 2D transformations could give rise to structure distortion due to the lack of 3D physical information (shear in Fig. 1(a))." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.902 + ], + "angle": 0, + "content": "In pursuit of the stabilized full-frame, recent 2D methods [5, 24, 42] leverage nearby frames to fill in the unseen content within the target frame. However, due to the inherent absence of physical constraints in 2D transformations, 2D-based multiple-frame methods fail to preserve the structure, especially the parallax regions (Fig. 1(c)). To obtain the structure-preserved stabilized frame, some methods [11, 17, 19, 31] leverage 3D transformations to simulate" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.888, + 0.222, + 0.9 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7507" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.198 + ], + "angle": 0, + "content": "real-world settings, employing camera poses and epipolar constraints to ensure the image structure. However, due to limited information from a single frame, they cannot generate a full frame, as shown in Fig. 1(b). In brief, the ongoing challenge of concurrently addressing full-frame generation while preserving structure for video stabilization remains a major concern for most current research works." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.2, + 0.47, + 0.382 + ], + "angle": 0, + "content": "To overcome the above problems, intuitively, employing multi-frame fusion with 3D transformations could offer a promising solution. However, two issues may still hinder 3D transformations from incorporating information from neighboring frames. First, since view changes induce geometric deformation, the incorporated information from nearby frames may be inconsistent, suggesting that image blending, e.g., averaging, may lead to distortion. Second, videos feature dynamic objects across frames, which cannot be adequately modeled by 3D constraints. The direct aggregation of information from nearby frames with 3D projection results in a noticeable blur (refer to the experiments)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.383, + 0.471, + 0.609 + ], + "angle": 0, + "content": "Motivated by the above insights and analysis, we propose a video stabilization framework termed RStab for integrating multi-frame fusion and 3D constraints to achieve full-frame generation and structure preservation. Specifically, we propose Stabilized Rendering (SR), a 3D multiframe fusion module using volume rendering. Instead of simple image blending, SR employs both color and feature space to fuse nearby information into spatial descriptors for the scene geometry, such as volume densities of spatial points. Visible points usually come with high volume densities, exhibiting consistent textures in their projections across frames. The observation suggests that points with higher consistency in aggregating information exhibit higher volume densities, implying a greater contribution to the final rendered color." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.612, + 0.47, + 0.824 + ], + "angle": 0, + "content": "To mitigate the impacts of dynamic regions, we propose Adaptive Ray Range (ARR) and Color Correction(CC) modules. The introduction of multi-frame depth priors in ARR constrains the sampling range for spatial points around the surface of objects. A narrow sampling range around the surface decreases the risk of projecting spatial points onto dynamic regions, thereby suppressing the inconsistent information aggregation induced by the dynamic objects. Despite ARR, colors are sensitive to projection inaccuracy, indicating a narrow range is insufficient. Hence, we design CC to refine the projection for color aggregation. The core of CC lies in assisting geometry constraints with optical flow, which matches pixels with similar textures containing the color information." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.903 + ], + "angle": 0, + "content": "By applying the three modules, RStab demonstrates the ability of full-frame generation with structure preservation (Fig. 1(d)) and outperforms all previous video stabilization algorithms in FOV, image quality, and video stability across various datasets. In summary, our key contributions" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.093, + 0.6, + 0.106 + ], + "angle": 0, + "content": "are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.109, + 0.891, + 0.153 + ], + "angle": 0, + "content": "- We present a novel 3D multi-frame fusion framework for video stabilization to render full-frame stabilized images with structure preservation." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.154, + 0.892, + 0.229 + ], + "angle": 0, + "content": "- We propose Stabilized Rendering, which fuses multiple frames in both color and feature space. We augment Stabilized Rendering with the introduction of the Adaptive Ray Range module and Color Correction module, enhancing its capacity to address dynamic regions." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.23, + 0.892, + 0.273 + ], + "angle": 0, + "content": "- Our video stabilization framework, RStab, demonstrates state-of-the-art (SOTA) performance across various datasets." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.109, + 0.892, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.287, + 0.642, + 0.303 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.312, + 0.892, + 0.599 + ], + "angle": 0, + "content": "2D-based Video Stabilization. 2D video stabilization algorithms model camera trajectory and generate stabilized frames through transformations on a 2D plane, including homography [6, 7, 20, 23, 40], feature trajectories [9, 10, 21, 38], motion vectors [16, 18, 22, 36], and dense flow fields [4, 5, 24, 40, 41]. Early methods [6, 7] estimate global transformations, which proved inadequate for handling complex camera effects such as the parallax effect. Certain approaches estimate multiple local motions [20, 22] or pixel-wise warping field [36, 40, 41] for a single image, offering some relief for the challenges encountered by global transformation methods. However, due to the limited information from a single frame, these methods may result in missing content in the stabilized video. To address this, some methods [5, 24, 42] fuse information from multiple neighboring frames, enabling full-frame generation. Despite achieving a full frame, the 2D transformations lack real-world physical constraints, leading to challenges in preserving image structure." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.599, + 0.892, + 0.809 + ], + "angle": 0, + "content": "3D-based Video Stabilization. 3D-based video stabilizers model 3D camera trajectory and stabilize frames with epipolar projection. Some methods [11, 17] rely on the video itself, warping images instructed by projection while preserving content. Others integrate specialized hardware, such as depth cameras [19], light field cameras [31], gyroscopes [30], and IMU sensors [12], to assist with scene geometry. Both kinds of stabilizers estimate the physical motion of the real world and introduce 3D constraints in warping, benefiting stability and structure preservation. However, relying on a single frame, 3D-based video stabilizers have a limited field of view. To mitigate the issue, in this paper, we extend single-frame to multi-frame in 3D space for video stabilization." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Neural Rendering. As a significant work in view synthesis, NeRF[15] attains photorealistic synthesized images through implicit volumetric representation and volume rendering. It combines multi-view information, leveraging 3D geometric constraints and pixel-wise rendering to generate high-quality images without missing content from novel" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7508" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.088, + 0.895, + 0.341 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.349, + 0.895, + 0.466 + ], + "angle": 0, + "content": "Figure 2. Overview of our framework. (1) Given input frames \\(\\{\\mathbf{I}_t\\}_{t=1}^N\\) with a shaky trajectory \\(\\{\\mathbf{P}_t\\}_{t=1}^N\\), our purpose lies in rendering stabilized video sequence \\(\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N\\) with smoothed trajectory \\(\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N\\). Here, the input trajectories \\(\\{\\mathbf{P}_t\\}_{t=1}^N\\) derive from preprocessing, while the smoothed trajectories \\(\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N\\) are generated using a Trajectory Smoothing module. (2) In addition to \\(\\{\\mathbf{P}_t\\}_{t=1}^N\\), depth maps \\(\\{\\mathbf{D}_t\\}_{t=1}^N\\) and optical flow \\(\\{\\mathbf{F}_t\\}_{t=1}^N\\) can be obtained during preprocessing. We aggregate \\(\\{\\mathbf{D}_t\\}_{t=1}^N\\) into the ray range \\(\\{\\tilde{\\mathbf{R}}_t\\}_{t=1}^N\\) using the Adaptive Ray Range module. The ray range \\(\\{\\tilde{\\mathbf{R}}_t\\}_{t=1}^N\\), along with \\(\\{\\mathbf{F}_t\\}_{t=1}^N\\) and the smoothed trajectory \\(\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N\\), serves as inputs to the Stabilized Rendering module. Conducting Stabilized Rendering, enhanced by the Color Correction module, we fuse the input frames \\(\\{\\mathbf{I}_t\\}_{t=1}^N\\) and their features \\(\\{\\mathcal{F}_t\\}_{t=1}^N\\) to render the stabilized video sequence \\(\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.479, + 0.473, + 0.721 + ], + "angle": 0, + "content": "viewpoints. While NeRF-based methods [1, 2, 13, 25, 28] produce impressive synthesized image quality, its limitation in per-scene training hampers its direct application in video stabilization. Certain approaches [3, 15, 32, 34, 35, 37] strive to improve the generalization of NeRF, but they are not inherently well-suited for video stabilization tasks. Some recent methods [14, 26] attempt to apply techniques in NeRF to stabilize videos, these approaches inherit the limitations of the vanilla NeRF, necessitating retraining for each specific scene. Inspired by generalized rendering technologies from IBRNet[34] and ENeRF[15], which utilize multi-view images and associated features to predict radiance fields, we further propose the Stabilized Rendering. Stabilized Rendering, enhanced by the proposed Adaptive Ray Range module and Color Correction module, extends the volume rendering technique to video stabilization." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.737, + 0.169, + 0.751 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.763, + 0.473, + 0.904 + ], + "angle": 0, + "content": "Our pipeline is shown in Fig. 2. Given a shaky frame sequence \\(\\{\\mathbf{I}_t\\}_{t=1}^N\\) of length \\(N\\), our objective is to generate a stabilized sequence \\(\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N\\). For preprocessing of \\(\\{\\mathbf{I}_t\\}_{t=1}^N\\), we estimate optical flow \\(\\{\\mathbf{F}_t\\}_{t=1}^N\\), depth maps \\(\\{\\mathbf{D}_t\\}_{t=1}^N\\), and camera trajectory \\(\\{\\mathbf{P}_t\\}_{t=1}^N\\). With \\(\\{\\mathbf{D}_t\\}_{t=1}^N\\), \\(\\{\\mathbf{P}_t\\}_{t=1}^N\\) and smoothed camera trajectory \\(\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N\\) as input, the Adaptive Ray Range module aggregates multi-view depth maps into the ray ranges \\(\\{\\tilde{\\mathbf{R}}_t\\}_{t=1}^N\\). Guided by the" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.478, + 0.892, + 0.548 + ], + "angle": 0, + "content": "ranges, Stabilized Rendering enhanced by the Color Correction module generates stabilized video sequence \\(\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N\\) through fusing the input frames \\(\\{\\mathbf{I}_t\\}_{t=1}^N\\) and feature maps \\(\\{\\mathcal{F}_t\\}_{t=1}^N\\) obtained through feature extraction network." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.553, + 0.893, + 0.862 + ], + "angle": 0, + "content": "We start with preprocessing a sequence of input frames \\(\\{\\mathbf{I}_t\\}_{t=1}^N\\) to estimate associated depth maps \\(\\{\\mathbf{D}_t\\}_{t=1}^N\\) and camera trajectory \\(\\{\\mathbf{P}_t\\}_{t=1}^N\\). These depth maps and camera poses are employed for camera trajectory smoothing. In our pursuit of consistent and smooth camera trajectories, we harness the flexibility of the Gaussian smoothing function: \\(\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N = \\phi_{sm}(\\{\\mathbf{P}_t\\}_{t=1}^N)\\), where \\(\\phi_{sm}\\) represents the Gaussian smoothing filter, offering adjustable parameters for both the smoothing window and stability. These parameters can be fine-tuned to meet specific requirements and constraints. In Sec. 3.1, we elaborate on rendering a stabilized image with its neighboring frames through Stabilized Rendering. Due to dynamic regions, the conventional 3D-constraint-based rendering fails to adequately represent the geometry. Differing from the conventional rendering, Sec. 3.2 introduces the utilization of depth priors to constrain the sampling range of spatial points around potential geometries, such as the area around the surface of objects. Additionally, in Sec. 3.3, we discuss refining projecting inaccuracy to ensure consistent local color intensities." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.867, + 0.894, + 0.903 + ], + "angle": 0, + "content": "Stabilizing a video involves rendering a image sequence \\(\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N\\) with corresponding stabilized poses \\(\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N\\). In" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7509" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "practice, we adopt a sliding window strategy for frame-by-frame rendering stabilized video. For clarity, we illustrate the rendering process with a single target camera pose \\(\\tilde{\\mathbf{P}}\\) at the timestamp \\(T\\) and its temporal neighborhood \\(\\Omega_T\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.159, + 0.276, + 0.175 + ], + "angle": 0, + "content": "3.1. Stabilized Rendering" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.182, + 0.469, + 0.343 + ], + "angle": 0, + "content": "Stabilized Rendering is a multi-frame fusion module founded on epipolar constraints which fuses input images and feature maps to render a stable, uncropped video sequence. Considering a pixel \\(\\tilde{\\mathbf{x}}\\) situated in the stabilized image \\(\\tilde{\\mathbf{I}}\\) under a specific target camera pose \\(\\tilde{\\mathbf{P}}\\), we sample \\(L\\) spatial points sharing projection situation \\(\\tilde{\\mathbf{x}}\\). These sampled points span depth \\(\\{\\tilde{d}_i\\}_{i=1}^L\\) distributed along the ray with sampling range, denoted as \\(\\tilde{\\mathbf{R}}(\\tilde{\\mathbf{x}})\\). We project \\(\\tilde{\\mathbf{x}}\\) at depth \\(\\tilde{d}_i\\) onto the neighboring input frames \\(\\{\\mathbf{I}_t\\}_{t \\in \\Omega_T}\\) at corresponding positions \\(\\{\\mathbf{x}_t^i\\}_{t \\in \\Omega_T}\\) by" + }, + { + "type": "equation", + "bbox": [ + 0.187, + 0.349, + 0.469, + 0.368 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {t} ^ {i} = \\mathbf {K P} _ {t} \\tilde {\\mathbf {P}} ^ {- 1} \\tilde {d} _ {i} \\mathbf {K} ^ {- 1} \\tilde {\\mathbf {x}}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.374, + 0.469, + 0.45 + ], + "angle": 0, + "content": "where \\(\\mathbf{K}\\) represents the camera intrinsic parameters shared by all frames in a video and \\(i\\in (0,L]\\). With the projected points \\(\\{\\mathbf{x}_t^i\\}_{t\\in \\Omega_T}\\), we aggregate features \\(\\{\\mathcal{F}_t(\\mathbf{x}_t^i)\\}_{t\\in \\Omega_T}\\) in neighboring frames to predict the volume density \\(\\sigma_{i}\\) for the spatial point by" + }, + { + "type": "equation", + "bbox": [ + 0.176, + 0.456, + 0.469, + 0.475 + ], + "angle": 0, + "content": "\\[\n\\sigma_ {i} = \\phi_ {m l p} \\left(\\left\\{\\mathcal {F} _ {t} \\left(\\mathbf {x} _ {t} ^ {i}\\right) \\right\\} _ {t \\in \\Omega_ {T}}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.48, + 0.469, + 0.601 + ], + "angle": 0, + "content": "where \\(\\phi_{mlp}\\) is a Multiple Layer Perceptron (refer to Supp. for details). Eq. 2 is contingent upon the consistency among features. Specifically, if a sampled spatial point aligns with the ground geometry, the multi-view features of projected points would be similar. This condition establishes scene-independent geometric constraints. When considering the associated color \\(\\mathbf{c}_i\\), a conventional method is a linear combination for aggregation:" + }, + { + "type": "equation", + "bbox": [ + 0.137, + 0.607, + 0.469, + 0.641 + ], + "angle": 0, + "content": "\\[\n\\mathbf {c} _ {i} = \\sum_ {t \\in \\Omega_ {T}} \\omega_ {t - T} \\mathbf {I} _ {t} \\left(\\mathbf {x} _ {t} ^ {i}\\right), \\sum_ {t \\in \\Omega_ {T}} \\omega_ {t - T} = 1, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.646, + 0.469, + 0.737 + ], + "angle": 0, + "content": "where \\(\\omega_{t - T}\\) represents adaptable parameters determined by the geometric characteristics, such as the volume density \\(\\sigma_{i}\\). Since the establishment of \\(\\mathbf{c}_i\\) solely relies on input frames, it is training-free to accommodate unforeseen scenes. In volume rendering, the set \\(\\{\\mathbf{c}_i,\\sigma_i\\}_{i = 1}^L\\), describing spatial points along the same ray, determine the color intensity of \\(\\tilde{\\mathbf{x}}\\) by" + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.744, + 0.469, + 0.836 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\tilde {\\mathbf {I}} (\\tilde {\\mathbf {x}}) = \\sum_ {i = 1} ^ {L} A _ {i} (1 - e x p (- \\sigma_ {i})) \\mathbf {c} _ {i}, \\tag {4} \\\\ A _ {i} = \\exp \\left(- \\sum_ {j = 1} ^ {i - 1} \\sigma_ {i}\\right). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.469, + 0.903 + ], + "angle": 0, + "content": "In Stabilized Rendering, Eqs. 1 imposes epipolar constraints on features and colors warped from multiple neighboring frames. Eqs. 2 & Eqs. 3 aggregate the multi-frame information into spatial descriptors \\(\\{\\mathbf{c}_i,\\sigma_i\\}_{i = 1}^L\\) , and" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.088, + 0.892, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.232, + 0.892, + 0.33 + ], + "angle": 0, + "content": "Figure 3. Illustration of depth projection and splatting. Left: The depth projection involves lifting a pixel \\(\\mathbf{x}_t\\) to 3D space using the estimated depth \\(\\mathbf{D}_t(\\mathbf{x}_t)\\) and projecting to the subpixel \\(\\tilde{\\mathbf{x}}\\). The depth of \\(\\tilde{\\mathbf{x}}\\) can be calculated and denoted as \\(\\tilde{\\mathbf{D}}_t(\\tilde{\\mathbf{x}})\\). Right: As \\(\\tilde{\\mathbf{x}}\\) is not precisely projected onto a pixel coordinate, we convert its depth to adjacent pixels, e.g. \\(\\tilde{\\mathbf{x}}_p\\), with a distance-associated weight \\(\\omega_t\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.355, + 0.892, + 0.507 + ], + "angle": 0, + "content": "Eqs. 4 renders stabilized images utilizing these descriptors for each pixel. Epipolar constraints guarantee the structure preservation and per-pixel rendering guarantees full-frame generation. However, the effectiveness of the aforementioned process highly depends on the ray range \\(\\tilde{\\mathbf{R}} (\\tilde{\\mathbf{x}})\\) guiding the sampling. If \\(\\tilde{\\mathbf{R}} (\\tilde{\\mathbf{x}})\\) is not distributed near the surface of objects, the model may aggregate incorrect features into inferior descriptors and diminish rendering quality. The forthcoming section will introduce how to adaptively define the ray range \\(\\tilde{\\mathbf{R}} (\\tilde{\\mathbf{x}})\\) to avoid the issue above." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.515, + 0.695, + 0.532 + ], + "angle": 0, + "content": "3.2. Adaptive Ray Range" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.539, + 0.892, + 0.734 + ], + "angle": 0, + "content": "Eq. 4 of Stabilized Rendering highlights the dependence of the final color intensity of \\(\\tilde{\\mathbf{I}} (\\tilde{\\mathbf{x}})\\) on the color \\(c_{i}\\) of the 3D point where the ray hits the object for the first time. It indicates that ray ranges around the ground geometry for the sampling process will benefit scene representation. A direct method to define the ray range entails treating the sequence of frames as a static scene: estimating the coarse geometry of each ray and rendering through spatial points sampled from re-defined fine ranges, such as [15, 34]. We argue that the effectiveness of the coarse-to-fine ray range relies on the geometry estimation grounded in epipolar constraints. However, dynamic regions, violating epipolar constraints, make the defined range unreliable." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.901 + ], + "angle": 0, + "content": "To tackle this challenge, we turn to the task of depth estimation. The depth model [11] employs optical flow to impose constraints on dynamic scenes. As optical flow relies on feature matching rather than epipolar constraints, it matches points with features rather than epipolar constraints, showcasing insensitivity to dynamic regions. Consequently, the estimated depth maps derived from this depth model are less susceptible to interference from dynamic objects. We propose to define an adaptive range with preestimated neighboring depth maps \\(\\{\\mathbf{D}_t\\}_{t\\in \\Omega_T}\\). In particular, we construct the range utilizing the mean and variance" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7510" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.273, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.089, + 0.47, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.25, + 0.471, + 0.278 + ], + "angle": 0, + "content": "Figure 4. The effect of temporal weights. The introduction of temporal weights can mitigate distortion." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.306, + 0.386, + 0.32 + ], + "angle": 0, + "content": "of aggregated depth maps from nearby frames." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.32, + 0.47, + 0.457 + ], + "angle": 0, + "content": "As illustrated in the left part of Fig. 3, we project \\(\\mathbf{x}_t\\) in the neighboring frame with pose \\(\\mathbf{P}_t\\) at the depth \\(\\mathbf{D}_t(\\mathbf{x}_t)\\) onto sub-pixel \\(\\tilde{\\mathbf{x}}\\) of the stabilized frame with pose \\(\\tilde{\\mathbf{P}}\\) according to the inverse of Eq. 1. However, as sub-pixel \\(\\tilde{\\mathbf{x}}\\) is not precisely projected onto a specific pixel coordinate, direct utilization of \\(\\tilde{\\mathbf{D}}_t(\\tilde{\\mathbf{x}})\\) to estimate ray ranges for pixels is not feasible. To overcome this limitation, a splatting method [29] is employed, as illustrated in the right part of Fig. 3, converting \\(\\tilde{\\mathbf{D}}_t(\\tilde{\\mathbf{x}})\\) in the following manner:" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.468, + 0.47, + 0.505 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\mathbf {D}} _ {t} (\\tilde {\\mathbf {x}} _ {p}) = \\frac {\\sum_ {i} w _ {d} \\tilde {\\mathbf {D}} _ {t} (\\tilde {\\mathbf {x}} _ {i})}{\\sum_ {i} w _ {d}}, w _ {d} = \\prod (\\mathbf {1} - | \\tilde {\\mathbf {x}} _ {p} - \\tilde {\\mathbf {x}} _ {i} |), (5)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.514, + 0.47, + 0.574 + ], + "angle": 0, + "content": "where \\(\\tilde{\\mathbf{x}}_p\\) is a pixel and \\(\\tilde{\\mathbf{x}}_i\\) is the \\(i\\)-th sub-pixel \\(\\tilde{\\mathbf{x}}\\) around \\(\\tilde{\\mathbf{x}}_p\\) satisfying the condition \\(|\\tilde{\\mathbf{x}}_p - \\tilde{\\mathbf{x}}_i| \\in (0,1)^2\\), \\(\\prod(\\cdot)\\) suggests an element-wise multiplication in a vector, and \\(\\omega_d\\) is distance-associated weights." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.574, + 0.47, + 0.741 + ], + "angle": 0, + "content": "Given \\(\\{\\mathbf{D}_t\\}_{t\\in \\Omega_T}\\), we obtain corresponding \\(\\{\\tilde{\\mathbf{D}}_t\\}_{t\\in \\Omega_T}\\) on the stabilized frame through the project-splat process above. An intuitive approach involves directly calculating the mean \\(\\mathbf{M}\\), variance \\(\\mathbf{S}\\), and determining the sampling ray range as \\(\\mathbf{R} = [\\mathbf{M} - \\mathbf{S},\\mathbf{M} + \\mathbf{S}]\\). However, in the aforementioned depth project-splat process, depth maps further from the timestamp \\(\\mathrm{T}\\) are less reliable. Treating all depth maps equally can result in an inaccurate sampling ray range \\(\\mathbf{R}\\), leading to a decrease in the image quality (the left part of Fig. 4). This observation prompts the introduction of a weighted mean and variance as:" + }, + { + "type": "equation", + "bbox": [ + 0.122, + 0.753, + 0.47, + 0.793 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\mathbf {M}} = \\sum_ {t \\in \\Omega_ {T}} \\omega_ {t} \\tilde {\\mathbf {D}} _ {t}, \\tilde {S} = \\sqrt {\\sum_ {t \\in \\Omega_ {T}} \\omega_ {t} (\\tilde {\\mathbf {D}} - \\tilde {\\mathbf {M}}) ^ {2}}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.803, + 0.47, + 0.849 + ], + "angle": 0, + "content": "where \\(\\omega_{t}\\) is the temporal weighting coefficient, assigning a higher weight to the frame closer to the stabilized frame temporally and vice versa, as defined by" + }, + { + "type": "equation", + "bbox": [ + 0.202, + 0.859, + 0.47, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\omega_ {t} = \\frac {e ^ {\\lambda (t - T)}}{\\sum_ {t \\in \\Omega_ {T}} e ^ {\\lambda (t - T)}}, \\tag {7}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.088, + 0.895, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.329, + 0.894, + 0.44 + ], + "angle": 0, + "content": "Figure 5. Illustration of Color Correction module. Firstly, we project a pixel \\(\\tilde{\\mathbf{x}}_T\\) from the target stabilized frame onto corresponding \\(\\mathbf{x}_T\\) of the input frame at the same timestamp \\(T\\). Secondly, we obtain feature matching of \\(\\mathbf{x}_T\\) in the input frame at timestamps \\(t\\) using optical flow \\(\\mathbf{F}_{T\\rightarrow t}(\\mathbf{x}_T)\\). As geometric constraints alone are insufficient for modeling dynamic regions, we aggregate precise color by correcting the geometric projected position \\(\\mathbf{x}_t\\) to the optical-flow refined position \\(\\mathbf{x}_t'\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.468, + 0.892, + 0.565 + ], + "angle": 0, + "content": "where \\(\\lambda\\) is a hyperparameter. Subsequently, ray ranges for the stabilized frame are denoted as \\(\\tilde{\\mathbf{R}} = \\left[\\tilde{\\mathbf{M}} -\\tilde{\\mathbf{S}},\\tilde{\\mathbf{M}} +\\tilde{\\mathbf{S}}\\right]\\). and can be employed for sampling L points along each ray during the rendering process. As illustrated in the right part of Fig. 4, the Adaptive Ray Range module with temporal weighted ranges yields more favorable rendering results." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.565, + 0.892, + 0.672 + ], + "angle": 0, + "content": "The Adaptive Ray Range module provides a ray range \\(\\tilde{\\mathbf{R}}\\) around the ground geometry guiding points sampling and benefiting volume density \\(\\sigma_{i}\\) prediction. Although the guidance of \\(\\tilde{\\mathbf{R}}\\) mitigates the interference of dynamic objects, the challenge of dynamic objects goes beyond this. According to Eq. 4, the color intensity \\(\\mathbf{c}_i\\) is another factor influencing rendering quality and affected by dynamic regions as well." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.681, + 0.669, + 0.696 + ], + "angle": 0, + "content": "3.3. Color Correction" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.705, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Color intensity, denoted as \\(\\mathbf{c}_i\\), exhibits a strong dependence on geometric constraints, akin to volume density \\(\\sigma_{i}\\). However, density is predicted from the feature maps with their receptive fields, thereby exhibiting a certain tolerance to projection inaccuracy. In contrast, color intensity is derived from the linear combination of colors warped from multiple views, accentuating the sensitivity of colors to projection inaccuracy. Despite the Adaptive Ray Range module offers a correction for projection with geometric constraints, it is inadequate for accurate color aggregation (refer to the experiments). Rather than solely concentrating on refining geometric constraints, we propose to assist these constraints with optical flow. Optical flow, relying on feature similar-" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "7511" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.163, + 0.089, + 0.812, + 0.257 + ], + "angle": 0, + "content": "
MethodNUS datasetSelfie datasetDeepStab dataset
C↑D↑S↑C↑D↑S↑C↑D↑S↑
Grundmann et al. [7]2D0.710.760.820.750.810.830.770.870.84
Bundle [20]2D0.810.780.820.740.820.800.800.900.85
Yu and Ramamoorthi [40]2D0.850.810.860.830.790.860.870.920.82
DIFRINT [5]2D1.000.870.841.000.780.841.000.910.78
FuSta [24]2D1.000.870.861.000.830.871.000.920.82
Zhao et al. [42]2D1.000.900.871.000.870.871.000.940.84
Deep3D [11]3D0.660.900.940.350.700.950.750.980.92
Ours3D1.000.910.941.000.920.951.000.980.92
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.267, + 0.895, + 0.31 + ], + "angle": 0, + "content": "Table 1. Quantitative results on the NUS [20], the Selfie [38], and the DeepStab [33] datasets. We evaluate our method against baselines using three standard metrics: Cropping Ratio(C), Distortion Value(D), Stability Score(S). The best results are bolded and second-best results are highlighted by underline." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.336, + 0.47, + 0.381 + ], + "angle": 0, + "content": "ities, matches pixels with similar textures containing color information. It implies that utilizing optical flow to refine the projection can enhance color accuracy." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.381, + 0.47, + 0.487 + ], + "angle": 0, + "content": "Specifically, we focus on the input frame at \\(T\\), which adheres to epipolar constraints with the target stabilized frame at \\(T\\). As shown in Fig. 5, we employ \\(\\mathbf{I}_T\\) as a reference to correct the projection points on the neighboring frame \\(\\mathbf{I}_t\\) with optical flow. According to Eq. 1, we project a point \\(\\tilde{\\mathbf{x}}_T\\) from the stabilized pose \\(\\tilde{\\mathbf{P}}_T\\) onto the \\(\\mathbf{x}_T\\) of \\(\\mathbf{P}_T\\), the flow-associated points \\(\\mathbf{x}_t'\\) can be expressed as" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.494, + 0.47, + 0.512 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {t} ^ {\\prime} = \\mathbf {x} _ {T} + \\mathbf {F} _ {T \\rightarrow t} (\\mathbf {x} _ {T}), \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.518, + 0.47, + 0.565 + ], + "angle": 0, + "content": "where \\(\\mathbf{F}_{T\\rightarrow t}\\) represents the optical flow from \\(\\mathbf{I}_T\\) to \\(\\mathbf{I}_t\\). By applying the same procedure to frames in the temporal neighborhood \\(\\Omega_T\\), we substitute the \\(\\mathbf{x}_t\\) in Eq. 3 with \\(\\mathbf{x}_t'\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.571, + 0.295, + 0.587 + ], + "angle": 0, + "content": "3.4. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.595, + 0.47, + 0.715 + ], + "angle": 0, + "content": "In our implementations, a pre-trained model from Deep3D [11] is employed to generate depth prior for the Adaptive Ray Range module and optical flow for Color Correction. Frames neighboring the timestamp \\( \\mathrm{T} \\) are symmetrically distributed, and the length of the set \\( \\Omega_T \\) is fixed to 13. For the Adaptive Ray Range module, the temporal weighting coefficient \\( \\omega_{i} \\) is calculated with \\( \\lambda = 0.5 \\), and we choose \\( L = 3 \\) for uniform spatial points sampling along each ray." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.715, + 0.47, + 0.762 + ], + "angle": 0, + "content": "Loss function. During training, we sample rays on all images randomly and minimize the mean squared error between the rendered color and corresponding ground truth:" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.77, + 0.47, + 0.807 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\sum_ {\\mathbf {x} \\in \\mathcal {X}} \\left| \\left| \\tilde {\\mathbf {I}} (\\mathbf {x}) - \\mathbf {I} _ {g t} (\\mathbf {x}) \\right| \\right| _ {2} ^ {2}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.47, + 0.841 + ], + "angle": 0, + "content": "where \\(\\mathbf{I}_{gt}\\) is the corresponding ground truth and \\(\\mathcal{X}\\) is the set of pixels sampled from all images in each training batch." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.841, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Training details. We follow the training setting of IBRNet [34] to train our model on LLFF [27] and IBRNetCollected [34] including high-quality natural images with accurate camera poses. Our model is trained on an RTX3090" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.336, + 0.892, + 0.428 + ], + "angle": 0, + "content": "GPU using the Adam optimizer[8]. We set the base learning rates for the feature extraction network and MLP to \\(1e^{-3}\\) and \\(5e^{-4}\\), respectively, which decay exponentially throughout the optimization process. Typically, the model converges after approximately 200k iterations, and the entire training process takes about a day to complete." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.44, + 0.634, + 0.457 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.466, + 0.722, + 0.482 + ], + "angle": 0, + "content": "4.1.Quantitative Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.489, + 0.892, + 0.595 + ], + "angle": 0, + "content": "Baselines. We choose various video stabilization algorithms as the baselines, including Grundmann et al. [7], Liu et al. [20], Wang et al. [33], Yu and Ramamoorthi [39, 40], DIFRINT [5], FuSta [24], Zhao et al. [42], and Deep3D [11]. For comparisons, we use the official provided videos or videos generated by official implementations with default parameters or pre-trained models." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.601, + 0.892, + 0.707 + ], + "angle": 0, + "content": "Datasets. We choose three datasets with different characteristics for evaluations: (1) The NUS [20] dataset comprises 144 videos, categorized into six different scenes: Regular, Running, Crowd, Parallax, QuickRotation, and Running, (2) the Selfie dataset [38] contains 33 video clips featuring frontal faces with large camera motion, (3) and the Deep-Stab dataset [33] includes 61 high-definition videos." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.713, + 0.892, + 0.879 + ], + "angle": 0, + "content": "Metrics. We assess the performance of the stabilizers using three standard metrics widely employed in previous methods [5, 20, 24, 39, 40]: (1) Cropping Ratio: This metric measures the remaining image area after cropping the non-content pixels. (2) Distortion Value: This metric quantifies the anisotropic scaling of the homography matrix between the input and output frames. (3) Stability Score: This metric assesses the stability of the stabilized video by assessing the ratio of low-frequency motion energy to the total energy. All three metrics range from 0 to 1, with higher values indicating better performance." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.886, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Results on the NUS dataset. Our evaluation on the NUS" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7512" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.283, + 0.177 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.181, + 0.198, + 0.194 + ], + "angle": 0, + "content": "Input" + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.096, + 0.466, + 0.17 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.346, + 0.18, + 0.419, + 0.194 + ], + "angle": 0, + "content": "Bundle [20]" + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.095, + 0.671, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.18, + 0.661, + 0.194 + ], + "angle": 0, + "content": "Yu and Ramamoorthi [40]" + }, + { + "type": "image", + "bbox": [ + 0.708, + 0.099, + 0.862, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.746, + 0.18, + 0.822, + 0.194 + ], + "angle": 0, + "content": "Deep3D[11]" + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.197, + 0.28, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.141, + 0.289, + 0.221, + 0.303 + ], + "angle": 0, + "content": "DIFRINT [5]" + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.197, + 0.481, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.35, + 0.289, + 0.414, + 0.303 + ], + "angle": 0, + "content": "FuSta [24]" + }, + { + "type": "image", + "bbox": [ + 0.484, + 0.197, + 0.682, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.536, + 0.289, + 0.63, + 0.303 + ], + "angle": 0, + "content": "Zhao et al. [42]" + }, + { + "type": "image", + "bbox": [ + 0.684, + 0.197, + 0.885, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.769, + 0.29, + 0.8, + 0.301 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.315, + 0.894, + 0.371 + ], + "angle": 0, + "content": "Figure 6. Visual comparison of different methods. Contrasting with the baselines in the first row, our method successfully accomplishes full-frame generation. In the second row, while these baselines achieve full-frame generation, they fall short in preserving structure; for instance, in the bottom-left region, the tree trunks are missing in their stabilized images. Please refer to our supplementary material for video comparisons with baselines." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.397, + 0.47, + 0.563 + ], + "angle": 0, + "content": "dataset [20] is detailed on the left side of Table 1, where our stabilization method excels notably in both stability and distortion reduction when compared to 2D-based methods. This success is attributed to our accuracy in constructing camera trajectories and geometry. In contrast to 3D methods, our approach stands out by leveraging information from multiple input frames, achieving an average cropping ratio of 1. This indicates the effectiveness of our method in full-frame generation across the diverse scenes in the NUS dataset, which is widely acknowledged as a robust benchmark for video stabilization algorithms." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.566, + 0.47, + 0.732 + ], + "angle": 0, + "content": "Results on the Selfie dataset. We present the results on the Selfie dataset [38] in the middle of Table 1. It's crucial to highlight that this dataset is characterized by large camera motions and extensive dynamic regions, posing challenges for video stabilization algorithms. Observing the results, a decrease is evident for most algorithms compared to their performance on the NUS dataset. Traditional 3D methods, in particular, experience a significant decline. In contrast, our method consistently delivers the best performance on the Selfie dataset. The performance shows the effectiveness of our algorithm in handling extreme scenes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Results on the DeepStab dataset. The right side of Table 1 showcases the average scores on the DeepStab dataset [33]. Notably, the videos in this dataset are of higher resolution than NUS and Selfie, specifically \\(720\\mathrm{p}\\), aligning with the common resolutions of modern devices. Despite the high distortion values across all stabilizers due to the simplicity of this dataset, our approach consistently demonstrates superior performance. This result suggests that our method is well-suited for handling high-definition videos, further emphasizing its applicability for contemporary video stabilization challenges." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.395, + 0.693, + 0.53 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.52, + 0.531, + 0.678, + 0.545 + ], + "angle": 0, + "content": "w/o Stabilized Rendering" + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.395, + 0.89, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.779, + 0.533, + 0.812, + 0.544 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.555, + 0.893, + 0.597 + ], + "angle": 0, + "content": "Figure 7. Quilitative ablation of Stabilized Fusion. Absence of Stabilized Fusion results in noticeable blurs in both static and dynamic regions." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.625, + 0.692, + 0.641 + ], + "angle": 0, + "content": "4.2. Qualitative Analysis" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.649, + 0.892, + 0.738 + ], + "angle": 0, + "content": "Visual comparisons of our method and state-of-the-art stabilizers is shown in Fig. 6. Many methods [11, 20, 40] apply aggressive cropping, as evident from the grey checkerboard regions. Comparing the bottom-left region of each image in Fig. 6 below with the top-left input, it's clear that our method suffers from fewer visual artifacts." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.755, + 0.651, + 0.771 + ], + "angle": 0, + "content": "5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.78, + 0.893, + 0.901 + ], + "angle": 0, + "content": "We conduct ablation studies to analyze the effectiveness of the proposed modules, including Stabilized Rendering (SR), the Adaptive Ray Range module (ARR), and Color Correction module (CC). Our evaluations focus on the Crowd scene within the NUS dataset [20], chosen for its dynamic objects and diverse scenes. We choose Distortion values and PSNR as evaluation metrics. Distortion Value measures the pose-independent structure quality of" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7513" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.272, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.152, + 0.175, + 0.202, + 0.187 + ], + "angle": 0, + "content": "IBRNet" + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.089, + 0.47, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.352, + 0.176, + 0.401, + 0.187 + ], + "angle": 0, + "content": "ENeRF" + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.189, + 0.272, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.274, + 0.172, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.275, + 0.271, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.189, + 0.468, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.274, + 0.371, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.275, + 0.468, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.097, + 0.326, + 0.255, + 0.34 + ], + "angle": 0, + "content": "w/o Adaptive Ray Range" + }, + { + "type": "image_caption", + "bbox": [ + 0.355, + 0.327, + 0.388, + 0.339 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.35, + 0.47, + 0.392 + ], + "angle": 0, + "content": "Figure 8. Quilitative ablation of different range strategies. Among the range strategies examined, only our Adaptive Ray Range module can address distortion in image structure." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.426, + 0.47, + 0.501 + ], + "angle": 0, + "content": "images with stabilized poses. Additionally, PSNR is employed to evaluate the pixel-level performance of our model in rendering image details. As real images with stabilized poses are unavailable, we render images with the input pose to derive PSNR." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.505, + 0.47, + 0.702 + ], + "angle": 0, + "content": "Why needs Stabilized Rendering. We conduct experiments to demonstrate the necessity of SR, which fuses features and colors in 3D space. One straightforward strategy replacing SR for fusing multiple frames is image blending. It warps nearby frames into the stabilized view and averages these images. However, as illustrated in the left part of Fig. 7, image blending leads to noticeable blur in both static regions (the stairs) and dynamic regions (the handbag and the shoulder). Comparing Row 4 and Row 3 in Table 1, the notable decreases in distortion value and PSNR align with the observation in Fig. 7. It demonstrates SR, our 3D multiframe fusion module using volume rendering, can enhance the structural quality of stabilized images." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Importance of Adaptive Ray Range. We compare various range strategies to affirm the importance of ARR: (1) IBRNet [34] and ENeRF [15] employ coarse-to-fine range strategy, and (2) we adopt even sampling of 128 points following setting of IBRNet as a substitution for ARR. However, as shown in Fig. 8, none of these strategies achieve favorable results. Without the sampling range defined by ARR, the methods above are forced to aggregate points sampled over a large range, increasing the risk of projecting spatial points onto dynamic regions. Due to the violation of epipolar constraints, dynamic regions introduce incorrect features and colors to the aggregation of descriptors and lead to distortion of the structure. As shown in Row 1,2,3,5 of Table 1," + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.089, + 0.695, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.174, + 0.598, + 0.223 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.601, + 0.174, + 0.693, + 0.223 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.089, + 0.891, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.699, + 0.174, + 0.794, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.798, + 0.174, + 0.89, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.779, + 0.226, + 0.812, + 0.238 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.248, + 0.892, + 0.29 + ], + "angle": 0, + "content": "Figure 9. Quilitative ablation of Color Correction. The Color Correction module refining the projection enhances color accuracy, consequently reducing image artifacts." + }, + { + "type": "table", + "bbox": [ + 0.525, + 0.31, + 0.868, + 0.43 + ], + "angle": 0, + "content": "
MethodDistortion Value↑PSNR↑
ENeRF-13.45
IBRNet0.8028.31
Full (Ours)0.9040.01
w/o Stabilized Rendering0.8723.56
w/o Adaptive Ray Range0.8137.83
w/o Color Correction0.8635.81
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.44, + 0.892, + 0.496 + ], + "angle": 0, + "content": "Table 2. Quantitative results of ablation study. We conduct comparative experiments of various range strategies and study the effect of each module. It should be noted that the results of ENeRF are so poor that the Distortion Value is unavailable." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.524, + 0.8, + 0.539 + ], + "angle": 0, + "content": "ARR proves effective in preserving structure." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.548, + 0.892, + 0.654 + ], + "angle": 0, + "content": "Importance of Color Correction. We conduct a comparison between the results obtained by removing CC and using the full model. The presence of noticeable artifacts in the dynamic region in the left part of Fig. 9 leads to the decrease in PSNR comparing Row 6 and Row 3 of Table 1. This suggests that employing optical flow in CC to refine the projection can improve color accuracy." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.677, + 0.627, + 0.692 + ], + "angle": 0, + "content": "6. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In this paper, we propose a video stabilization framework termed RStab for integrating multi-frame fusion and 3D constraints to achieve full-frame generation and structure preservation. The core of RStab lies in Stabilized Rendering, a volume rendering module utilizing both colors and features for multi-frame fusion in 3D space. To enhance Stabilized Rendering module, we design an Adaptive Ray Range module for suppressing inconsistent information and a Color Correction module for refining color aggregation. By applying the three modules, RStab achieves full-frame generation with structure preservation and outperforms all previous stabilizers in FOV, image quality, and video stability across various datasets." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7514" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.197 + ], + "angle": 0, + "content": "[1] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 5855–5864, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.2, + 0.472, + 0.268 + ], + "angle": 0, + "content": "[2] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 5470–5479, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.27, + 0.469, + 0.338 + ], + "angle": 0, + "content": "[3] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 14124-14133, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.34, + 0.469, + 0.408 + ], + "angle": 0, + "content": "[4] Yu-Ta Chen, Kuan-Wei Tseng, Yao-Chih Lee, Chun-Yu Chen, and Yi-Ping Hung. Pixstabnet: Fast multi-scale deep online video stabilization with pixel-based warping. In Proceedings of IEEE International Conference on Image Processing (ICIP), pages 1929–1933, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.41, + 0.469, + 0.45 + ], + "angle": 0, + "content": "[5] Jinsoo Choi and In So Kweon. Deep iterative frame interpolation for full-frame video stabilization. ACM Transactions on Graphics (TOG), 39(1):4:1-4:9, 2020. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.452, + 0.469, + 0.493 + ], + "angle": 0, + "content": "[6] Amit Goldstein and Raanan Fattal. Video stabilization using epipolar geometry. ACM Transactions on Graphics (TOG), 31(5):126:1-126:10, 2012. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.495, + 0.469, + 0.563 + ], + "angle": 0, + "content": "[7] Matthias Grundmann, Vivek Kwatra, and Irfan A. Essa. Auto-directed video stabilization with robust L1 optimal camera paths. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 225-232, 2011. 2, 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.565, + 0.469, + 0.606 + ], + "angle": 0, + "content": "[8] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Proceedings of International Conference on Learning Representations (ICLR), 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.607, + 0.469, + 0.661 + ], + "angle": 0, + "content": "[9] Yeong Jun Koh, Chulwoo Lee, and Chang-Su Kim. Video stabilization based on feature trajectory augmentation and selection and robust mesh grid warping. IEEE Transactions on Image Processing (TIP), 24(12):5260-5273, 2015. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.663, + 0.469, + 0.718 + ], + "angle": 0, + "content": "[10] Ken-Yi Lee, Yung-Yu Chuang, Bing-Yu Chen, and Ming Ouhyoung. Video stabilization using robust feature trajectories. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 1397-1404, 2009. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.72, + 0.469, + 0.8 + ], + "angle": 0, + "content": "[11] Yao-Chih Lee, Kuan-Wei Tseng, Yu-Ta Chen, Chien-Cheng Chen, Chu-Song Chen, and Yi-Ping Hung. 3d video stabilization with depth estimation by cnn-based optimization. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 10621–10630, 2021. 1, 2, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.469, + 0.857 + ], + "angle": 0, + "content": "[12] Chen Li, Li Song, Shuai Chen, Rong Xie, and Wenjun Zhang. Deep online video stabilization using IMU sensors. IEEE Transactions on Multimedia (TMM), 25:2047-2060, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[13] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of IEEE Conference on Com" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.119 + ], + "angle": 0, + "content": "puter Vision Pattern Recognition (CVPR), pages 6498-6508, 2021.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[14] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4273-4284, 2023. 3, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.193, + 0.892, + 0.248 + ], + "angle": 0, + "content": "[15] Haotong Lin, Sida Peng, Zhen Xu, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Efficient neural radiance fields for interactive free-viewpoint video. In ACM SIGGRAPH Asia, pages 39:1-39:9, 2022. 2, 3, 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.25, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[16] Kaimo Lin, Nianjuan Jiang, Shuaicheng Liu, Loong-Fah Cheong, Minh N. Do, and Jiangbo Lu. Direct photometric alignment by mesh deformation. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 2701-2709, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.321, + 0.892, + 0.361 + ], + "angle": 0, + "content": "[17] Feng Liu, Michael Gleicher, Hailin Jin, and Aseem Agarwala. Content-preserving warps for 3d video stabilization. ACM Transactions on Graphics (TOG), 28(3):44, 2009. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.364, + 0.892, + 0.404 + ], + "angle": 0, + "content": "[18] Feng Liu, Michael Gleicher, Jue Wang, Hailin Jin, and Aseem Agarwala. Subspace video stabilization. ACM Transactions on Graphics (TOG), 30(1):4:1-4:10, 2011. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.407, + 0.892, + 0.461 + ], + "angle": 0, + "content": "[19] Shuaicheng Liu, Yinting Wang, Lu Yuan, Jiajun Bu, Ping Tan, and Jian Sun. Video stabilization with a depth camera. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 89-95, 2012. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.463, + 0.892, + 0.504 + ], + "angle": 0, + "content": "[20] Shuaicheng Liu, Lu Yuan, Ping Tan, and Jian Sun. Bundled camera paths for video stabilization. ACM Transactions on Graphics (TOG), 32(4):78:1-78:10, 2013. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.507, + 0.892, + 0.573 + ], + "angle": 0, + "content": "[21] Shuaicheng Liu, Lu Yuan, Ping Tan, and Jian Sun. Steadyflow: Spatially smooth optical flow for video stabilization. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4209-4216, 2014. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.577, + 0.892, + 0.632 + ], + "angle": 0, + "content": "[22] Shuaicheng Liu, Ping Tan, Lu Yuan, Jian Sun, and Bing Zeng. Meshflow: Minimum latency online video stabilization. In Proceedings of European Conference on Computer Vision (ECCV), pages 800-815, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.634, + 0.892, + 0.688 + ], + "angle": 0, + "content": "[23] Shuaicheng Liu, Mingyu Li, Shuyuan Zhu, and Bing Zeng. Codingflow: Enable video coding for video stabilization. IEEE Transactions on Image Processing (TIP), 26(7):3291-3302, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.691, + 0.892, + 0.758 + ], + "angle": 0, + "content": "[24] Yu-Lun Liu, Wei-Sheng Lai, Ming-Hsuan Yang, Yung-Yu Chuang, and Jia-Bin Huang. Hybrid neural fusion for full-frame video stabilization. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 2279-2288, 2021. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.762, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[25] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 7210–7219, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[26] Andreas Meuleman, Yu-Lun Liu, Chen Gao, Jia-Bin Huang, Changil Kim, Min H. Kim, and Johannes Kopf. Progressively optimized local radiance fields for robust view synthesis. In Proceedings of IEEE Conference on Computer Vision" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7515" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.468, + 0.119 + ], + "angle": 0, + "content": "Pattern Recognition (CVPR), pages 16539-16548, 2023. 3, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.121, + 0.469, + 0.189 + ], + "angle": 0, + "content": "[27] Ben Mildenhall, Pratul P. Srinivasan, Rodrigo Ortiz Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4):29:1-29:14, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.19, + 0.469, + 0.245 + ], + "angle": 0, + "content": "[28] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (TOG), 41(4):1-15, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.247, + 0.469, + 0.3 + ], + "angle": 0, + "content": "[29] Simon Niklaus and Feng Liu. Softmax splatting for video frame interpolation. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 5436-5445, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.302, + 0.469, + 0.357 + ], + "angle": 0, + "content": "[30] Zhenmei Shi, Fuhao Shi, Wei-Sheng Lai, Chia-Kai Liang, and Yingyu Liang. Deep online fused video stabilization. In Proceedings of Winter Conference on Applications of Computer Vision (WACV), pages 865-873. IEEE, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.359, + 0.469, + 0.412 + ], + "angle": 0, + "content": "[31] Brandon M. Smith, Li Zhang, Hailin Jin, and Aseem Agarwala. Light field video stabilization. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 341-348, 2009. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.414, + 0.469, + 0.468 + ], + "angle": 0, + "content": "[32] Alex Trevithick and Bo Yang. Grf: Learning a general radiance field for 3d representation and rendering. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 15182-15192, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.47, + 0.469, + 0.538 + ], + "angle": 0, + "content": "[33] Miao Wang, Guo-Ye Yang, Jin-Kun Lin, Song-Hai Zhang, Ariel Shamir, Shao-Ping Lu, and Shi-Min Hu. Deep online video stabilization with multi-grid warping transformation learning. IEEE Transactions on Image Processing (TIP), 28 (5):2283-2292, 2019. 6, 7, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.539, + 0.469, + 0.621 + ], + "angle": 0, + "content": "[34] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P. Srinivasan, Howard Zhou, Jonathan T. Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas A. Funkhouser. Ibrnet: Learning multi-view image-based rendering. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4690-4699, 2021. 3, 4, 6, 8, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.623, + 0.469, + 0.69 + ], + "angle": 0, + "content": "[35] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 5438–5448, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.692, + 0.469, + 0.746 + ], + "angle": 0, + "content": "[36] Yufei Xu, Jing Zhang, Stephen J. Maybank, and Dacheng Tao. DUT: learning video stabilization by simply watching unstable videos. IEEE Transactions on Image Processing (TIP), 31:4306-4320, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.748, + 0.469, + 0.803 + ], + "angle": 0, + "content": "[37] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelnerf: Neural radiance fields from one or few images. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4578-4587, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.805, + 0.469, + 0.845 + ], + "angle": 0, + "content": "[38] Jiyang Yu and Ravi Ramamoorthi. Selfie video stabilization. In Proceedings of European Conference on Computer Vision (ECCV), pages 569-584, 2018. 2, 6, 7, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.847, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[39] Jiyang Yu and Ravi Ramamoorthi. Robust video stabilization by optimization in CNN weight space. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 3800-3808, 2019. 6" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[40] Jiyang Yu and Ravi Ramamoorthi. Learning video stabilization using optical flow. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 8156-8164, 2020. 2, 6, 7, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[41] Minda Zhao and Qiang Ling. Pwstablenet: Learning pixelwise warping maps for video stabilization. IEEE Transactions on Image Processing (TIP), 29:3582-3595, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.892, + 0.261 + ], + "angle": 0, + "content": "[42] Weiyue Zhao, Xin Li, Zhan Peng, Xianrui Luo, Xinyi Ye, Hao Lu, and Zhiguo Cao. Fast full-frame video stabilization with iterative optimization. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 23534-23544, 2023. 1, 2, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "7516" + } + ] +] \ No newline at end of file diff --git a/2024/3D Multi-frame Fusion for Video Stabilization/475676e5-4dd7-4a8c-bd05-cc44ef21267a_origin.pdf b/2024/3D Multi-frame Fusion for Video Stabilization/475676e5-4dd7-4a8c-bd05-cc44ef21267a_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e42a75f83fbcf013b6294aa829c371d9125f541c --- /dev/null +++ b/2024/3D Multi-frame Fusion for Video Stabilization/475676e5-4dd7-4a8c-bd05-cc44ef21267a_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96538a16ab564b2f6be4aa967c0dc7e0164c44dc848e01a799e5fa7dcd04e8ea +size 3789937 diff --git a/2024/3D Multi-frame Fusion for Video Stabilization/full.md b/2024/3D Multi-frame Fusion for Video Stabilization/full.md new file mode 100644 index 0000000000000000000000000000000000000000..131f7c64467aed6f7e562aa2f4786ba35b00c3fd --- /dev/null +++ b/2024/3D Multi-frame Fusion for Video Stabilization/full.md @@ -0,0 +1,333 @@ +# 3D Multi-frame Fusion for Video Stabilization + +Zhan Peng Xinyi Ye Weiyue Zhao Tianqi Liu Huiqiang Sun Baopu Li Zhiguo Cao* School of AIA, Huazhong University of Science and Technology + +{peng_zhan,xinyiye,zhaoweiyue,tq_1iu,shq1031,zgcao}@hust.edu.cn + +bpli.cuhk@gmail.com + +# Abstract + +In this paper, we present RStab, a novel framework for video stabilization that integrates 3D multi-frame fusion through volume rendering. Departing from conventional methods, we introduce a 3D multi-frame perspective to generate stabilized images, addressing the challenge of full-frame generation while preserving structure. The core of our RStab framework lies in Stabilized Rendering (SR), a volume rendering module, fusing multi-frame information in 3D space. Specifically, SR involves warping features and colors from multiple frames by projection, fusing them into descriptors to render the stabilized image. However, the precision of warped information depends on the projection accuracy, a factor significantly influenced by dynamic regions. In response, we introduce the Adaptive Ray Range (ARR) module to integrate depth priors, adaptively defining the sampling range for the projection process. Additionally, we propose Color Correction (CC) assisting geometric constraints with optical flow for accurate color aggregation. Thanks to the three modules, our RStab demonstrates superior performance compared with previous stabilizers in the field of view (FOV), image quality, and video stability across various datasets. + +# 1. Introduction + +With the widespread adoption of smartphones, videos have become an important medium for documenting and sharing lives. The videos captured with handheld devices often suffer from annoying shakes. To mitigate this prevalent issue, numerous researchers devote efforts to developing video stabilization algorithms. These methods typically involve three steps: camera trajectory estimation, trajectory smoothing, and stabilized frame generation. + +To obtain a smooth image sequence, known as stabilized frames, early methods employ 2D-plane transformations (homography [20, 23], feature trajectories [9, 10, 21], mo + +![](images/1336f4a2b313cdd05a32d4207e5d13be2c05c1f5cfd168e15fa182be2445fa84.jpg) +Figure 1. Existing dilemmas and our method. (a) and (b) exhibit cropping issues, characteristic of single-frame methods. (a) and (c) encounter difficulties in preserving structure, inherent in 2D-based approaches. Fortunately, our proposed method (d) not only mitigates distortion and artifacts but also maintains no-cropping stabilized frames. + +tion vectors [18]) on single frames. However, these methods suffer from two major problems. First, these single-frame approaches may produce notable missing regions at the boundary of generated stabilized images, requiring aggressive cropping to ensure a rectangular frame for video (cropping in Fig. 1(a)), further resulting in a substantial reduction in the field of view (FOV). Second, 2D transformations could give rise to structure distortion due to the lack of 3D physical information (shear in Fig. 1(a)). + +In pursuit of the stabilized full-frame, recent 2D methods [5, 24, 42] leverage nearby frames to fill in the unseen content within the target frame. However, due to the inherent absence of physical constraints in 2D transformations, 2D-based multiple-frame methods fail to preserve the structure, especially the parallax regions (Fig. 1(c)). To obtain the structure-preserved stabilized frame, some methods [11, 17, 19, 31] leverage 3D transformations to simulate + +real-world settings, employing camera poses and epipolar constraints to ensure the image structure. However, due to limited information from a single frame, they cannot generate a full frame, as shown in Fig. 1(b). In brief, the ongoing challenge of concurrently addressing full-frame generation while preserving structure for video stabilization remains a major concern for most current research works. + +To overcome the above problems, intuitively, employing multi-frame fusion with 3D transformations could offer a promising solution. However, two issues may still hinder 3D transformations from incorporating information from neighboring frames. First, since view changes induce geometric deformation, the incorporated information from nearby frames may be inconsistent, suggesting that image blending, e.g., averaging, may lead to distortion. Second, videos feature dynamic objects across frames, which cannot be adequately modeled by 3D constraints. The direct aggregation of information from nearby frames with 3D projection results in a noticeable blur (refer to the experiments). + +Motivated by the above insights and analysis, we propose a video stabilization framework termed RStab for integrating multi-frame fusion and 3D constraints to achieve full-frame generation and structure preservation. Specifically, we propose Stabilized Rendering (SR), a 3D multiframe fusion module using volume rendering. Instead of simple image blending, SR employs both color and feature space to fuse nearby information into spatial descriptors for the scene geometry, such as volume densities of spatial points. Visible points usually come with high volume densities, exhibiting consistent textures in their projections across frames. The observation suggests that points with higher consistency in aggregating information exhibit higher volume densities, implying a greater contribution to the final rendered color. + +To mitigate the impacts of dynamic regions, we propose Adaptive Ray Range (ARR) and Color Correction(CC) modules. The introduction of multi-frame depth priors in ARR constrains the sampling range for spatial points around the surface of objects. A narrow sampling range around the surface decreases the risk of projecting spatial points onto dynamic regions, thereby suppressing the inconsistent information aggregation induced by the dynamic objects. Despite ARR, colors are sensitive to projection inaccuracy, indicating a narrow range is insufficient. Hence, we design CC to refine the projection for color aggregation. The core of CC lies in assisting geometry constraints with optical flow, which matches pixels with similar textures containing the color information. + +By applying the three modules, RStab demonstrates the ability of full-frame generation with structure preservation (Fig. 1(d)) and outperforms all previous video stabilization algorithms in FOV, image quality, and video stability across various datasets. In summary, our key contributions + +are as follows: + +- We present a novel 3D multi-frame fusion framework for video stabilization to render full-frame stabilized images with structure preservation. +- We propose Stabilized Rendering, which fuses multiple frames in both color and feature space. We augment Stabilized Rendering with the introduction of the Adaptive Ray Range module and Color Correction module, enhancing its capacity to address dynamic regions. +- Our video stabilization framework, RStab, demonstrates state-of-the-art (SOTA) performance across various datasets. + +# 2. Related Work + +2D-based Video Stabilization. 2D video stabilization algorithms model camera trajectory and generate stabilized frames through transformations on a 2D plane, including homography [6, 7, 20, 23, 40], feature trajectories [9, 10, 21, 38], motion vectors [16, 18, 22, 36], and dense flow fields [4, 5, 24, 40, 41]. Early methods [6, 7] estimate global transformations, which proved inadequate for handling complex camera effects such as the parallax effect. Certain approaches estimate multiple local motions [20, 22] or pixel-wise warping field [36, 40, 41] for a single image, offering some relief for the challenges encountered by global transformation methods. However, due to the limited information from a single frame, these methods may result in missing content in the stabilized video. To address this, some methods [5, 24, 42] fuse information from multiple neighboring frames, enabling full-frame generation. Despite achieving a full frame, the 2D transformations lack real-world physical constraints, leading to challenges in preserving image structure. + +3D-based Video Stabilization. 3D-based video stabilizers model 3D camera trajectory and stabilize frames with epipolar projection. Some methods [11, 17] rely on the video itself, warping images instructed by projection while preserving content. Others integrate specialized hardware, such as depth cameras [19], light field cameras [31], gyroscopes [30], and IMU sensors [12], to assist with scene geometry. Both kinds of stabilizers estimate the physical motion of the real world and introduce 3D constraints in warping, benefiting stability and structure preservation. However, relying on a single frame, 3D-based video stabilizers have a limited field of view. To mitigate the issue, in this paper, we extend single-frame to multi-frame in 3D space for video stabilization. + +Neural Rendering. As a significant work in view synthesis, NeRF[15] attains photorealistic synthesized images through implicit volumetric representation and volume rendering. It combines multi-view information, leveraging 3D geometric constraints and pixel-wise rendering to generate high-quality images without missing content from novel + +![](images/ac1113c650ce96de13326eaa486ec6f11b1f3b840e219ef57435d9b28ce4d924.jpg) +Figure 2. Overview of our framework. (1) Given input frames $\{\mathbf{I}_t\}_{t=1}^N$ with a shaky trajectory $\{\mathbf{P}_t\}_{t=1}^N$ , our purpose lies in rendering stabilized video sequence $\{\tilde{\mathbf{I}}_t\}_{t=1}^N$ with smoothed trajectory $\{\tilde{\mathbf{P}}_t\}_{t=1}^N$ . Here, the input trajectories $\{\mathbf{P}_t\}_{t=1}^N$ derive from preprocessing, while the smoothed trajectories $\{\tilde{\mathbf{P}}_t\}_{t=1}^N$ are generated using a Trajectory Smoothing module. (2) In addition to $\{\mathbf{P}_t\}_{t=1}^N$ , depth maps $\{\mathbf{D}_t\}_{t=1}^N$ and optical flow $\{\mathbf{F}_t\}_{t=1}^N$ can be obtained during preprocessing. We aggregate $\{\mathbf{D}_t\}_{t=1}^N$ into the ray range $\{\tilde{\mathbf{R}}_t\}_{t=1}^N$ using the Adaptive Ray Range module. The ray range $\{\tilde{\mathbf{R}}_t\}_{t=1}^N$ , along with $\{\mathbf{F}_t\}_{t=1}^N$ and the smoothed trajectory $\{\tilde{\mathbf{P}}_t\}_{t=1}^N$ , serves as inputs to the Stabilized Rendering module. Conducting Stabilized Rendering, enhanced by the Color Correction module, we fuse the input frames $\{\mathbf{I}_t\}_{t=1}^N$ and their features $\{\mathcal{F}_t\}_{t=1}^N$ to render the stabilized video sequence $\{\tilde{\mathbf{I}}_t\}_{t=1}^N$ . + +viewpoints. While NeRF-based methods [1, 2, 13, 25, 28] produce impressive synthesized image quality, its limitation in per-scene training hampers its direct application in video stabilization. Certain approaches [3, 15, 32, 34, 35, 37] strive to improve the generalization of NeRF, but they are not inherently well-suited for video stabilization tasks. Some recent methods [14, 26] attempt to apply techniques in NeRF to stabilize videos, these approaches inherit the limitations of the vanilla NeRF, necessitating retraining for each specific scene. Inspired by generalized rendering technologies from IBRNet[34] and ENeRF[15], which utilize multi-view images and associated features to predict radiance fields, we further propose the Stabilized Rendering. Stabilized Rendering, enhanced by the proposed Adaptive Ray Range module and Color Correction module, extends the volume rendering technique to video stabilization. + +# 3. Method + +Our pipeline is shown in Fig. 2. Given a shaky frame sequence $\{\mathbf{I}_t\}_{t=1}^N$ of length $N$ , our objective is to generate a stabilized sequence $\{\tilde{\mathbf{I}}_t\}_{t=1}^N$ . For preprocessing of $\{\mathbf{I}_t\}_{t=1}^N$ , we estimate optical flow $\{\mathbf{F}_t\}_{t=1}^N$ , depth maps $\{\mathbf{D}_t\}_{t=1}^N$ , and camera trajectory $\{\mathbf{P}_t\}_{t=1}^N$ . With $\{\mathbf{D}_t\}_{t=1}^N$ , $\{\mathbf{P}_t\}_{t=1}^N$ and smoothed camera trajectory $\{\tilde{\mathbf{P}}_t\}_{t=1}^N$ as input, the Adaptive Ray Range module aggregates multi-view depth maps into the ray ranges $\{\tilde{\mathbf{R}}_t\}_{t=1}^N$ . Guided by the + +ranges, Stabilized Rendering enhanced by the Color Correction module generates stabilized video sequence $\{\tilde{\mathbf{I}}_t\}_{t=1}^N$ through fusing the input frames $\{\mathbf{I}_t\}_{t=1}^N$ and feature maps $\{\mathcal{F}_t\}_{t=1}^N$ obtained through feature extraction network. + +We start with preprocessing a sequence of input frames $\{\mathbf{I}_t\}_{t=1}^N$ to estimate associated depth maps $\{\mathbf{D}_t\}_{t=1}^N$ and camera trajectory $\{\mathbf{P}_t\}_{t=1}^N$ . These depth maps and camera poses are employed for camera trajectory smoothing. In our pursuit of consistent and smooth camera trajectories, we harness the flexibility of the Gaussian smoothing function: $\{\tilde{\mathbf{P}}_t\}_{t=1}^N = \phi_{sm}(\{\mathbf{P}_t\}_{t=1}^N)$ , where $\phi_{sm}$ represents the Gaussian smoothing filter, offering adjustable parameters for both the smoothing window and stability. These parameters can be fine-tuned to meet specific requirements and constraints. In Sec. 3.1, we elaborate on rendering a stabilized image with its neighboring frames through Stabilized Rendering. Due to dynamic regions, the conventional 3D-constraint-based rendering fails to adequately represent the geometry. Differing from the conventional rendering, Sec. 3.2 introduces the utilization of depth priors to constrain the sampling range of spatial points around potential geometries, such as the area around the surface of objects. Additionally, in Sec. 3.3, we discuss refining projecting inaccuracy to ensure consistent local color intensities. + +Stabilizing a video involves rendering a image sequence $\{\tilde{\mathbf{I}}_t\}_{t=1}^N$ with corresponding stabilized poses $\{\tilde{\mathbf{P}}_t\}_{t=1}^N$ . In + +practice, we adopt a sliding window strategy for frame-by-frame rendering stabilized video. For clarity, we illustrate the rendering process with a single target camera pose $\tilde{\mathbf{P}}$ at the timestamp $T$ and its temporal neighborhood $\Omega_T$ . + +# 3.1. Stabilized Rendering + +Stabilized Rendering is a multi-frame fusion module founded on epipolar constraints which fuses input images and feature maps to render a stable, uncropped video sequence. Considering a pixel $\tilde{\mathbf{x}}$ situated in the stabilized image $\tilde{\mathbf{I}}$ under a specific target camera pose $\tilde{\mathbf{P}}$ , we sample $L$ spatial points sharing projection situation $\tilde{\mathbf{x}}$ . These sampled points span depth $\{\tilde{d}_i\}_{i=1}^L$ distributed along the ray with sampling range, denoted as $\tilde{\mathbf{R}}(\tilde{\mathbf{x}})$ . We project $\tilde{\mathbf{x}}$ at depth $\tilde{d}_i$ onto the neighboring input frames $\{\mathbf{I}_t\}_{t \in \Omega_T}$ at corresponding positions $\{\mathbf{x}_t^i\}_{t \in \Omega_T}$ by + +$$ +\mathbf {x} _ {t} ^ {i} = \mathbf {K P} _ {t} \tilde {\mathbf {P}} ^ {- 1} \tilde {d} _ {i} \mathbf {K} ^ {- 1} \tilde {\mathbf {x}}, \tag {1} +$$ + +where $\mathbf{K}$ represents the camera intrinsic parameters shared by all frames in a video and $i\in (0,L]$ . With the projected points $\{\mathbf{x}_t^i\}_{t\in \Omega_T}$ , we aggregate features $\{\mathcal{F}_t(\mathbf{x}_t^i)\}_{t\in \Omega_T}$ in neighboring frames to predict the volume density $\sigma_{i}$ for the spatial point by + +$$ +\sigma_ {i} = \phi_ {m l p} \left(\left\{\mathcal {F} _ {t} \left(\mathbf {x} _ {t} ^ {i}\right) \right\} _ {t \in \Omega_ {T}}\right), \tag {2} +$$ + +where $\phi_{mlp}$ is a Multiple Layer Perceptron (refer to Supp. for details). Eq. 2 is contingent upon the consistency among features. Specifically, if a sampled spatial point aligns with the ground geometry, the multi-view features of projected points would be similar. This condition establishes scene-independent geometric constraints. When considering the associated color $\mathbf{c}_i$ , a conventional method is a linear combination for aggregation: + +$$ +\mathbf {c} _ {i} = \sum_ {t \in \Omega_ {T}} \omega_ {t - T} \mathbf {I} _ {t} \left(\mathbf {x} _ {t} ^ {i}\right), \sum_ {t \in \Omega_ {T}} \omega_ {t - T} = 1, \tag {3} +$$ + +where $\omega_{t - T}$ represents adaptable parameters determined by the geometric characteristics, such as the volume density $\sigma_{i}$ . Since the establishment of $\mathbf{c}_i$ solely relies on input frames, it is training-free to accommodate unforeseen scenes. In volume rendering, the set $\{\mathbf{c}_i,\sigma_i\}_{i = 1}^L$ , describing spatial points along the same ray, determine the color intensity of $\tilde{\mathbf{x}}$ by + +$$ +\begin{array}{l} \tilde {\mathbf {I}} (\tilde {\mathbf {x}}) = \sum_ {i = 1} ^ {L} A _ {i} (1 - e x p (- \sigma_ {i})) \mathbf {c} _ {i}, \tag {4} \\ A _ {i} = \exp \left(- \sum_ {j = 1} ^ {i - 1} \sigma_ {i}\right). \\ \end{array} +$$ + +In Stabilized Rendering, Eqs. 1 imposes epipolar constraints on features and colors warped from multiple neighboring frames. Eqs. 2 & Eqs. 3 aggregate the multi-frame information into spatial descriptors $\{\mathbf{c}_i,\sigma_i\}_{i = 1}^L$ , and + +![](images/968f14f4ff99e0b97872e1b9c1e88e86758c608cfef980f25031308059c45cf5.jpg) +Figure 3. Illustration of depth projection and splatting. Left: The depth projection involves lifting a pixel $\mathbf{x}_t$ to 3D space using the estimated depth $\mathbf{D}_t(\mathbf{x}_t)$ and projecting to the subpixel $\tilde{\mathbf{x}}$ . The depth of $\tilde{\mathbf{x}}$ can be calculated and denoted as $\tilde{\mathbf{D}}_t(\tilde{\mathbf{x}})$ . Right: As $\tilde{\mathbf{x}}$ is not precisely projected onto a pixel coordinate, we convert its depth to adjacent pixels, e.g. $\tilde{\mathbf{x}}_p$ , with a distance-associated weight $\omega_t$ . + +Eqs. 4 renders stabilized images utilizing these descriptors for each pixel. Epipolar constraints guarantee the structure preservation and per-pixel rendering guarantees full-frame generation. However, the effectiveness of the aforementioned process highly depends on the ray range $\tilde{\mathbf{R}} (\tilde{\mathbf{x}})$ guiding the sampling. If $\tilde{\mathbf{R}} (\tilde{\mathbf{x}})$ is not distributed near the surface of objects, the model may aggregate incorrect features into inferior descriptors and diminish rendering quality. The forthcoming section will introduce how to adaptively define the ray range $\tilde{\mathbf{R}} (\tilde{\mathbf{x}})$ to avoid the issue above. + +# 3.2. Adaptive Ray Range + +Eq. 4 of Stabilized Rendering highlights the dependence of the final color intensity of $\tilde{\mathbf{I}} (\tilde{\mathbf{x}})$ on the color $c_{i}$ of the 3D point where the ray hits the object for the first time. It indicates that ray ranges around the ground geometry for the sampling process will benefit scene representation. A direct method to define the ray range entails treating the sequence of frames as a static scene: estimating the coarse geometry of each ray and rendering through spatial points sampled from re-defined fine ranges, such as [15, 34]. We argue that the effectiveness of the coarse-to-fine ray range relies on the geometry estimation grounded in epipolar constraints. However, dynamic regions, violating epipolar constraints, make the defined range unreliable. + +To tackle this challenge, we turn to the task of depth estimation. The depth model [11] employs optical flow to impose constraints on dynamic scenes. As optical flow relies on feature matching rather than epipolar constraints, it matches points with features rather than epipolar constraints, showcasing insensitivity to dynamic regions. Consequently, the estimated depth maps derived from this depth model are less susceptible to interference from dynamic objects. We propose to define an adaptive range with preestimated neighboring depth maps $\{\mathbf{D}_t\}_{t\in \Omega_T}$ . In particular, we construct the range utilizing the mean and variance + +![](images/faf2cc05008f7e460cf267eaf6f39d79d25dac7a7ca19094acb82964f541415e.jpg) +Figure 4. The effect of temporal weights. The introduction of temporal weights can mitigate distortion. + +![](images/b2e9948ceeb3d90a3b8ed45369e03cfab3af6ef17b588bcd5c24b772927b264a.jpg) + +of aggregated depth maps from nearby frames. + +As illustrated in the left part of Fig. 3, we project $\mathbf{x}_t$ in the neighboring frame with pose $\mathbf{P}_t$ at the depth $\mathbf{D}_t(\mathbf{x}_t)$ onto sub-pixel $\tilde{\mathbf{x}}$ of the stabilized frame with pose $\tilde{\mathbf{P}}$ according to the inverse of Eq. 1. However, as sub-pixel $\tilde{\mathbf{x}}$ is not precisely projected onto a specific pixel coordinate, direct utilization of $\tilde{\mathbf{D}}_t(\tilde{\mathbf{x}})$ to estimate ray ranges for pixels is not feasible. To overcome this limitation, a splatting method [29] is employed, as illustrated in the right part of Fig. 3, converting $\tilde{\mathbf{D}}_t(\tilde{\mathbf{x}})$ in the following manner: + +$$ +\tilde {\mathbf {D}} _ {t} (\tilde {\mathbf {x}} _ {p}) = \frac {\sum_ {i} w _ {d} \tilde {\mathbf {D}} _ {t} (\tilde {\mathbf {x}} _ {i})}{\sum_ {i} w _ {d}}, w _ {d} = \prod (\mathbf {1} - | \tilde {\mathbf {x}} _ {p} - \tilde {\mathbf {x}} _ {i} |), (5) +$$ + +where $\tilde{\mathbf{x}}_p$ is a pixel and $\tilde{\mathbf{x}}_i$ is the $i$ -th sub-pixel $\tilde{\mathbf{x}}$ around $\tilde{\mathbf{x}}_p$ satisfying the condition $|\tilde{\mathbf{x}}_p - \tilde{\mathbf{x}}_i| \in (0,1)^2$ , $\prod(\cdot)$ suggests an element-wise multiplication in a vector, and $\omega_d$ is distance-associated weights. + +Given $\{\mathbf{D}_t\}_{t\in \Omega_T}$ , we obtain corresponding $\{\tilde{\mathbf{D}}_t\}_{t\in \Omega_T}$ on the stabilized frame through the project-splat process above. An intuitive approach involves directly calculating the mean $\mathbf{M}$ , variance $\mathbf{S}$ , and determining the sampling ray range as $\mathbf{R} = [\mathbf{M} - \mathbf{S},\mathbf{M} + \mathbf{S}]$ . However, in the aforementioned depth project-splat process, depth maps further from the timestamp $\mathrm{T}$ are less reliable. Treating all depth maps equally can result in an inaccurate sampling ray range $\mathbf{R}$ , leading to a decrease in the image quality (the left part of Fig. 4). This observation prompts the introduction of a weighted mean and variance as: + +$$ +\tilde {\mathbf {M}} = \sum_ {t \in \Omega_ {T}} \omega_ {t} \tilde {\mathbf {D}} _ {t}, \tilde {S} = \sqrt {\sum_ {t \in \Omega_ {T}} \omega_ {t} (\tilde {\mathbf {D}} - \tilde {\mathbf {M}}) ^ {2}}, \tag {6} +$$ + +where $\omega_{t}$ is the temporal weighting coefficient, assigning a higher weight to the frame closer to the stabilized frame temporally and vice versa, as defined by + +$$ +\omega_ {t} = \frac {e ^ {\lambda (t - T)}}{\sum_ {t \in \Omega_ {T}} e ^ {\lambda (t - T)}}, \tag {7} +$$ + +![](images/8b74912345a991d5a076ab84ed325a7aede7bb518996d52ab9ff5158c14732f0.jpg) +Figure 5. Illustration of Color Correction module. Firstly, we project a pixel $\tilde{\mathbf{x}}_T$ from the target stabilized frame onto corresponding $\mathbf{x}_T$ of the input frame at the same timestamp $T$ . Secondly, we obtain feature matching of $\mathbf{x}_T$ in the input frame at timestamps $t$ using optical flow $\mathbf{F}_{T\rightarrow t}(\mathbf{x}_T)$ . As geometric constraints alone are insufficient for modeling dynamic regions, we aggregate precise color by correcting the geometric projected position $\mathbf{x}_t$ to the optical-flow refined position $\mathbf{x}_t'$ . + +where $\lambda$ is a hyperparameter. Subsequently, ray ranges for the stabilized frame are denoted as $\tilde{\mathbf{R}} = \left[\tilde{\mathbf{M}} -\tilde{\mathbf{S}},\tilde{\mathbf{M}} +\tilde{\mathbf{S}}\right]$ . and can be employed for sampling L points along each ray during the rendering process. As illustrated in the right part of Fig. 4, the Adaptive Ray Range module with temporal weighted ranges yields more favorable rendering results. + +The Adaptive Ray Range module provides a ray range $\tilde{\mathbf{R}}$ around the ground geometry guiding points sampling and benefiting volume density $\sigma_{i}$ prediction. Although the guidance of $\tilde{\mathbf{R}}$ mitigates the interference of dynamic objects, the challenge of dynamic objects goes beyond this. According to Eq. 4, the color intensity $\mathbf{c}_i$ is another factor influencing rendering quality and affected by dynamic regions as well. + +# 3.3. Color Correction + +Color intensity, denoted as $\mathbf{c}_i$ , exhibits a strong dependence on geometric constraints, akin to volume density $\sigma_{i}$ . However, density is predicted from the feature maps with their receptive fields, thereby exhibiting a certain tolerance to projection inaccuracy. In contrast, color intensity is derived from the linear combination of colors warped from multiple views, accentuating the sensitivity of colors to projection inaccuracy. Despite the Adaptive Ray Range module offers a correction for projection with geometric constraints, it is inadequate for accurate color aggregation (refer to the experiments). Rather than solely concentrating on refining geometric constraints, we propose to assist these constraints with optical flow. Optical flow, relying on feature similar- + +
MethodNUS datasetSelfie datasetDeepStab dataset
C↑D↑S↑C↑D↑S↑C↑D↑S↑
Grundmann et al. [7]2D0.710.760.820.750.810.830.770.870.84
Bundle [20]2D0.810.780.820.740.820.800.800.900.85
Yu and Ramamoorthi [40]2D0.850.810.860.830.790.860.870.920.82
DIFRINT [5]2D1.000.870.841.000.780.841.000.910.78
FuSta [24]2D1.000.870.861.000.830.871.000.920.82
Zhao et al. [42]2D1.000.900.871.000.870.871.000.940.84
Deep3D [11]3D0.660.900.940.350.700.950.750.980.92
Ours3D1.000.910.941.000.920.951.000.980.92
+ +Table 1. Quantitative results on the NUS [20], the Selfie [38], and the DeepStab [33] datasets. We evaluate our method against baselines using three standard metrics: Cropping Ratio(C), Distortion Value(D), Stability Score(S). The best results are bolded and second-best results are highlighted by underline. + +ities, matches pixels with similar textures containing color information. It implies that utilizing optical flow to refine the projection can enhance color accuracy. + +Specifically, we focus on the input frame at $T$ , which adheres to epipolar constraints with the target stabilized frame at $T$ . As shown in Fig. 5, we employ $\mathbf{I}_T$ as a reference to correct the projection points on the neighboring frame $\mathbf{I}_t$ with optical flow. According to Eq. 1, we project a point $\tilde{\mathbf{x}}_T$ from the stabilized pose $\tilde{\mathbf{P}}_T$ onto the $\mathbf{x}_T$ of $\mathbf{P}_T$ , the flow-associated points $\mathbf{x}_t'$ can be expressed as + +$$ +\mathbf {x} _ {t} ^ {\prime} = \mathbf {x} _ {T} + \mathbf {F} _ {T \rightarrow t} (\mathbf {x} _ {T}), \tag {8} +$$ + +where $\mathbf{F}_{T\rightarrow t}$ represents the optical flow from $\mathbf{I}_T$ to $\mathbf{I}_t$ . By applying the same procedure to frames in the temporal neighborhood $\Omega_T$ , we substitute the $\mathbf{x}_t$ in Eq. 3 with $\mathbf{x}_t'$ . + +# 3.4. Implementation Details + +In our implementations, a pre-trained model from Deep3D [11] is employed to generate depth prior for the Adaptive Ray Range module and optical flow for Color Correction. Frames neighboring the timestamp $\mathrm{T}$ are symmetrically distributed, and the length of the set $\Omega_T$ is fixed to 13. For the Adaptive Ray Range module, the temporal weighting coefficient $\omega_{i}$ is calculated with $\lambda = 0.5$ , and we choose $L = 3$ for uniform spatial points sampling along each ray. + +Loss function. During training, we sample rays on all images randomly and minimize the mean squared error between the rendered color and corresponding ground truth: + +$$ +\mathcal {L} = \sum_ {\mathbf {x} \in \mathcal {X}} \left| \left| \tilde {\mathbf {I}} (\mathbf {x}) - \mathbf {I} _ {g t} (\mathbf {x}) \right| \right| _ {2} ^ {2}, \tag {9} +$$ + +where $\mathbf{I}_{gt}$ is the corresponding ground truth and $\mathcal{X}$ is the set of pixels sampled from all images in each training batch. + +Training details. We follow the training setting of IBRNet [34] to train our model on LLFF [27] and IBRNetCollected [34] including high-quality natural images with accurate camera poses. Our model is trained on an RTX3090 + +GPU using the Adam optimizer[8]. We set the base learning rates for the feature extraction network and MLP to $1e^{-3}$ and $5e^{-4}$ , respectively, which decay exponentially throughout the optimization process. Typically, the model converges after approximately 200k iterations, and the entire training process takes about a day to complete. + +# 4. Experiments + +# 4.1.Quantitative Evaluation + +Baselines. We choose various video stabilization algorithms as the baselines, including Grundmann et al. [7], Liu et al. [20], Wang et al. [33], Yu and Ramamoorthi [39, 40], DIFRINT [5], FuSta [24], Zhao et al. [42], and Deep3D [11]. For comparisons, we use the official provided videos or videos generated by official implementations with default parameters or pre-trained models. + +Datasets. We choose three datasets with different characteristics for evaluations: (1) The NUS [20] dataset comprises 144 videos, categorized into six different scenes: Regular, Running, Crowd, Parallax, QuickRotation, and Running, (2) the Selfie dataset [38] contains 33 video clips featuring frontal faces with large camera motion, (3) and the Deep-Stab dataset [33] includes 61 high-definition videos. + +Metrics. We assess the performance of the stabilizers using three standard metrics widely employed in previous methods [5, 20, 24, 39, 40]: (1) Cropping Ratio: This metric measures the remaining image area after cropping the non-content pixels. (2) Distortion Value: This metric quantifies the anisotropic scaling of the homography matrix between the input and output frames. (3) Stability Score: This metric assesses the stability of the stabilized video by assessing the ratio of low-frequency motion energy to the total energy. All three metrics range from 0 to 1, with higher values indicating better performance. + +Results on the NUS dataset. Our evaluation on the NUS + +![](images/6445a5ac79746040c195fee948f2a9f61c217c26f446fee225e0707382894918.jpg) +Input + +![](images/38be7b4662a10d0788472d050488bfa4ad55286ec3b23a4de17a00bf1334bda5.jpg) +Bundle [20] + +![](images/4a0a3f30268d35df6d5f9852d0c989cb307ed24228b3cc117f79c28387e17b55.jpg) +Yu and Ramamoorthi [40] + +![](images/35df973166922dce73a31c575c11e1538d9a479cd9dc6ea2613ca3db735a1631.jpg) + +![](images/d59a93c292a52554cfd94aa7685c2a4c335f93218586f06cdaf4eba176509916.jpg) +DIFRINT [5] + +![](images/57964ea87012f15a8802e1b7009bb18942a7f89b4afbc78118cdca81a67ab3e4.jpg) +FuSta [24] + +![](images/08a78308b3f798156ca5e004ec39727c0d91da006b58d754cc6d58a4915e7d9a.jpg) +Zhao et al. [42] + +![](images/75bb6ba464b0ec171eed2ec71160fe7c3dbc1ea84890164dcce8b66c7db1e018.jpg) +Deep3D[11] +Ours + +dataset [20] is detailed on the left side of Table 1, where our stabilization method excels notably in both stability and distortion reduction when compared to 2D-based methods. This success is attributed to our accuracy in constructing camera trajectories and geometry. In contrast to 3D methods, our approach stands out by leveraging information from multiple input frames, achieving an average cropping ratio of 1. This indicates the effectiveness of our method in full-frame generation across the diverse scenes in the NUS dataset, which is widely acknowledged as a robust benchmark for video stabilization algorithms. + +Results on the Selfie dataset. We present the results on the Selfie dataset [38] in the middle of Table 1. It's crucial to highlight that this dataset is characterized by large camera motions and extensive dynamic regions, posing challenges for video stabilization algorithms. Observing the results, a decrease is evident for most algorithms compared to their performance on the NUS dataset. Traditional 3D methods, in particular, experience a significant decline. In contrast, our method consistently delivers the best performance on the Selfie dataset. The performance shows the effectiveness of our algorithm in handling extreme scenes. + +Results on the DeepStab dataset. The right side of Table 1 showcases the average scores on the DeepStab dataset [33]. Notably, the videos in this dataset are of higher resolution than NUS and Selfie, specifically $720\mathrm{p}$ , aligning with the common resolutions of modern devices. Despite the high distortion values across all stabilizers due to the simplicity of this dataset, our approach consistently demonstrates superior performance. This result suggests that our method is well-suited for handling high-definition videos, further emphasizing its applicability for contemporary video stabilization challenges. + +![](images/bbedd1702aeeeb4331fdea29791b21f14bc84f8b8c1594bb35a677f57ffe6c30.jpg) +Figure 6. Visual comparison of different methods. Contrasting with the baselines in the first row, our method successfully accomplishes full-frame generation. In the second row, while these baselines achieve full-frame generation, they fall short in preserving structure; for instance, in the bottom-left region, the tree trunks are missing in their stabilized images. Please refer to our supplementary material for video comparisons with baselines. +w/o Stabilized Rendering + +![](images/d606b9f6b4daf48d60e7051425c1c0f8ac576733b8cbcfb636363d015c2785f3.jpg) +Ours +Figure 7. Quilitative ablation of Stabilized Fusion. Absence of Stabilized Fusion results in noticeable blurs in both static and dynamic regions. + +# 4.2. Qualitative Analysis + +Visual comparisons of our method and state-of-the-art stabilizers is shown in Fig. 6. Many methods [11, 20, 40] apply aggressive cropping, as evident from the grey checkerboard regions. Comparing the bottom-left region of each image in Fig. 6 below with the top-left input, it's clear that our method suffers from fewer visual artifacts. + +# 5. Ablation Study + +We conduct ablation studies to analyze the effectiveness of the proposed modules, including Stabilized Rendering (SR), the Adaptive Ray Range module (ARR), and Color Correction module (CC). Our evaluations focus on the Crowd scene within the NUS dataset [20], chosen for its dynamic objects and diverse scenes. We choose Distortion values and PSNR as evaluation metrics. Distortion Value measures the pose-independent structure quality of + +![](images/25f5e98177727532a13733afd1d9adcf104b6a6364a937365be026954b7c8fe6.jpg) +IBRNet + +![](images/7c47836fb1ab540c22e7496bc0f983b2dad28ed4cefdccfc9bb994f3b332686a.jpg) +ENeRF + +![](images/b935cb2f8b7006df9d0a7994a7657190337d2c93beb48b0d936051ab3aa16668.jpg) + +![](images/e3ab8833bbe1c9e50c3ada67aae911bd692e8bfa26373460b95f6332ceeb6f49.jpg) +w/o Adaptive Ray Range + +![](images/9d7e36ceab158649387266c5ddbe6deb544d1868b6711ee82126c173df2a1332.jpg) +Figure 8. Quilitative ablation of different range strategies. Among the range strategies examined, only our Adaptive Ray Range module can address distortion in image structure. + +![](images/1bb923e31cac7faed4c99a41f913c826b26b9e6523ba4b8db6600cb1d2d5d3cf.jpg) +Figure 9. Quilitative ablation of Color Correction. The Color Correction module refining the projection enhances color accuracy, consequently reducing image artifacts. + +![](images/836225df1614e93b133579b0784abba78e41279b78117932d4d8bd6220dc246b.jpg) +Ours + +![](images/e327c557e851688405372d89788f43584ca0d8c4012f9867e11ceb1da7280a79.jpg) + +images with stabilized poses. Additionally, PSNR is employed to evaluate the pixel-level performance of our model in rendering image details. As real images with stabilized poses are unavailable, we render images with the input pose to derive PSNR. + +Why needs Stabilized Rendering. We conduct experiments to demonstrate the necessity of SR, which fuses features and colors in 3D space. One straightforward strategy replacing SR for fusing multiple frames is image blending. It warps nearby frames into the stabilized view and averages these images. However, as illustrated in the left part of Fig. 7, image blending leads to noticeable blur in both static regions (the stairs) and dynamic regions (the handbag and the shoulder). Comparing Row 4 and Row 3 in Table 1, the notable decreases in distortion value and PSNR align with the observation in Fig. 7. It demonstrates SR, our 3D multiframe fusion module using volume rendering, can enhance the structural quality of stabilized images. + +Importance of Adaptive Ray Range. We compare various range strategies to affirm the importance of ARR: (1) IBRNet [34] and ENeRF [15] employ coarse-to-fine range strategy, and (2) we adopt even sampling of 128 points following setting of IBRNet as a substitution for ARR. However, as shown in Fig. 8, none of these strategies achieve favorable results. Without the sampling range defined by ARR, the methods above are forced to aggregate points sampled over a large range, increasing the risk of projecting spatial points onto dynamic regions. Due to the violation of epipolar constraints, dynamic regions introduce incorrect features and colors to the aggregation of descriptors and lead to distortion of the structure. As shown in Row 1,2,3,5 of Table 1, + +![](images/2c8dfc7ec3132528612000bb0e10a334493166ec7b1563820334ca1c2f438a19.jpg) + +![](images/cf84b471ac347006d196b9d1ddd9e5bd7cc6a73e0f8ea48a49e453eeaea2be73.jpg) + +![](images/e45170879f7bd8948694782837c63e9cee54a4f515b022889e97fb02c1efbb3b.jpg) + +![](images/3c369aea13eb5c2d15c6de431b90a0085a42c5f5c9281851f3cd911fe656c555.jpg) + +![](images/4ce40c892ab996b60ab2af19413f15607c0b324e2e725ffb26265fdd4f799f07.jpg) +Ours + +![](images/8f5ed14cc5ec74546b2636d6eb8992f7ae2421f8a90f86b7c00d7d6e6b1e53ab.jpg) + +
MethodDistortion Value↑PSNR↑
ENeRF-13.45
IBRNet0.8028.31
Full (Ours)0.9040.01
w/o Stabilized Rendering0.8723.56
w/o Adaptive Ray Range0.8137.83
w/o Color Correction0.8635.81
+ +Table 2. Quantitative results of ablation study. We conduct comparative experiments of various range strategies and study the effect of each module. It should be noted that the results of ENeRF are so poor that the Distortion Value is unavailable. + +ARR proves effective in preserving structure. + +Importance of Color Correction. We conduct a comparison between the results obtained by removing CC and using the full model. The presence of noticeable artifacts in the dynamic region in the left part of Fig. 9 leads to the decrease in PSNR comparing Row 6 and Row 3 of Table 1. This suggests that employing optical flow in CC to refine the projection can improve color accuracy. + +# 6. Conclusions + +In this paper, we propose a video stabilization framework termed RStab for integrating multi-frame fusion and 3D constraints to achieve full-frame generation and structure preservation. The core of RStab lies in Stabilized Rendering, a volume rendering module utilizing both colors and features for multi-frame fusion in 3D space. To enhance Stabilized Rendering module, we design an Adaptive Ray Range module for suppressing inconsistent information and a Color Correction module for refining color aggregation. By applying the three modules, RStab achieves full-frame generation with structure preservation and outperforms all previous stabilizers in FOV, image quality, and video stability across various datasets. + +# References + +[1] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 5855–5864, 2021. 3 +[2] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 5470–5479, 2022. 3 +[3] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 14124-14133, 2021. 3 +[4] Yu-Ta Chen, Kuan-Wei Tseng, Yao-Chih Lee, Chun-Yu Chen, and Yi-Ping Hung. Pixstabnet: Fast multi-scale deep online video stabilization with pixel-based warping. In Proceedings of IEEE International Conference on Image Processing (ICIP), pages 1929–1933, 2021. 2 +[5] Jinsoo Choi and In So Kweon. Deep iterative frame interpolation for full-frame video stabilization. ACM Transactions on Graphics (TOG), 39(1):4:1-4:9, 2020. 1, 2, 6, 7 +[6] Amit Goldstein and Raanan Fattal. Video stabilization using epipolar geometry. ACM Transactions on Graphics (TOG), 31(5):126:1-126:10, 2012. 2 +[7] Matthias Grundmann, Vivek Kwatra, and Irfan A. Essa. Auto-directed video stabilization with robust L1 optimal camera paths. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 225-232, 2011. 2, 6, 1 +[8] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Proceedings of International Conference on Learning Representations (ICLR), 2015. 6 +[9] Yeong Jun Koh, Chulwoo Lee, and Chang-Su Kim. Video stabilization based on feature trajectory augmentation and selection and robust mesh grid warping. IEEE Transactions on Image Processing (TIP), 24(12):5260-5273, 2015. 1, 2 +[10] Ken-Yi Lee, Yung-Yu Chuang, Bing-Yu Chen, and Ming Ouhyoung. Video stabilization using robust feature trajectories. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 1397-1404, 2009. 1, 2 +[11] Yao-Chih Lee, Kuan-Wei Tseng, Yu-Ta Chen, Chien-Cheng Chen, Chu-Song Chen, and Yi-Ping Hung. 3d video stabilization with depth estimation by cnn-based optimization. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 10621–10630, 2021. 1, 2, 4, 6, 7 +[12] Chen Li, Li Song, Shuai Chen, Rong Xie, and Wenjun Zhang. Deep online video stabilization using IMU sensors. IEEE Transactions on Multimedia (TMM), 25:2047-2060, 2023. 2 +[13] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of IEEE Conference on Com + +puter Vision Pattern Recognition (CVPR), pages 6498-6508, 2021.3 +[14] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4273-4284, 2023. 3, 1 +[15] Haotong Lin, Sida Peng, Zhen Xu, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Efficient neural radiance fields for interactive free-viewpoint video. In ACM SIGGRAPH Asia, pages 39:1-39:9, 2022. 2, 3, 4, 8 +[16] Kaimo Lin, Nianjuan Jiang, Shuaicheng Liu, Loong-Fah Cheong, Minh N. Do, and Jiangbo Lu. Direct photometric alignment by mesh deformation. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 2701-2709, 2017. 2 +[17] Feng Liu, Michael Gleicher, Hailin Jin, and Aseem Agarwala. Content-preserving warps for 3d video stabilization. ACM Transactions on Graphics (TOG), 28(3):44, 2009. 1, 2 +[18] Feng Liu, Michael Gleicher, Jue Wang, Hailin Jin, and Aseem Agarwala. Subspace video stabilization. ACM Transactions on Graphics (TOG), 30(1):4:1-4:10, 2011. 1, 2 +[19] Shuaicheng Liu, Yinting Wang, Lu Yuan, Jiajun Bu, Ping Tan, and Jian Sun. Video stabilization with a depth camera. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 89-95, 2012. 1, 2 +[20] Shuaicheng Liu, Lu Yuan, Ping Tan, and Jian Sun. Bundled camera paths for video stabilization. ACM Transactions on Graphics (TOG), 32(4):78:1-78:10, 2013. 1, 2, 6, 7 +[21] Shuaicheng Liu, Lu Yuan, Ping Tan, and Jian Sun. Steadyflow: Spatially smooth optical flow for video stabilization. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4209-4216, 2014. 1, 2 +[22] Shuaicheng Liu, Ping Tan, Lu Yuan, Jian Sun, and Bing Zeng. Meshflow: Minimum latency online video stabilization. In Proceedings of European Conference on Computer Vision (ECCV), pages 800-815, 2016. 2 +[23] Shuaicheng Liu, Mingyu Li, Shuyuan Zhu, and Bing Zeng. Codingflow: Enable video coding for video stabilization. IEEE Transactions on Image Processing (TIP), 26(7):3291-3302, 2017. 1, 2 +[24] Yu-Lun Liu, Wei-Sheng Lai, Ming-Hsuan Yang, Yung-Yu Chuang, and Jia-Bin Huang. Hybrid neural fusion for full-frame video stabilization. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 2279-2288, 2021. 1, 2, 6, 7 +[25] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 7210–7219, 2021. 3 +[26] Andreas Meuleman, Yu-Lun Liu, Chen Gao, Jia-Bin Huang, Changil Kim, Min H. Kim, and Johannes Kopf. Progressively optimized local radiance fields for robust view synthesis. In Proceedings of IEEE Conference on Computer Vision + +Pattern Recognition (CVPR), pages 16539-16548, 2023. 3, 1 +[27] Ben Mildenhall, Pratul P. Srinivasan, Rodrigo Ortiz Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4):29:1-29:14, 2019. 6 +[28] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (TOG), 41(4):1-15, 2022. 3 +[29] Simon Niklaus and Feng Liu. Softmax splatting for video frame interpolation. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 5436-5445, 2020. 5 +[30] Zhenmei Shi, Fuhao Shi, Wei-Sheng Lai, Chia-Kai Liang, and Yingyu Liang. Deep online fused video stabilization. In Proceedings of Winter Conference on Applications of Computer Vision (WACV), pages 865-873. IEEE, 2022. 2 +[31] Brandon M. Smith, Li Zhang, Hailin Jin, and Aseem Agarwala. Light field video stabilization. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 341-348, 2009. 1, 2 +[32] Alex Trevithick and Bo Yang. Grf: Learning a general radiance field for 3d representation and rendering. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 15182-15192, 2021. 3 +[33] Miao Wang, Guo-Ye Yang, Jin-Kun Lin, Song-Hai Zhang, Ariel Shamir, Shao-Ping Lu, and Shi-Min Hu. Deep online video stabilization with multi-grid warping transformation learning. IEEE Transactions on Image Processing (TIP), 28 (5):2283-2292, 2019. 6, 7, 1 +[34] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P. Srinivasan, Howard Zhou, Jonathan T. Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas A. Funkhouser. Ibrnet: Learning multi-view image-based rendering. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4690-4699, 2021. 3, 4, 6, 8, 1 +[35] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 5438–5448, 2022. 3 +[36] Yufei Xu, Jing Zhang, Stephen J. Maybank, and Dacheng Tao. DUT: learning video stabilization by simply watching unstable videos. IEEE Transactions on Image Processing (TIP), 31:4306-4320, 2022. 2 +[37] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelnerf: Neural radiance fields from one or few images. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4578-4587, 2021. 3 +[38] Jiyang Yu and Ravi Ramamoorthi. Selfie video stabilization. In Proceedings of European Conference on Computer Vision (ECCV), pages 569-584, 2018. 2, 6, 7, 1 +[39] Jiyang Yu and Ravi Ramamoorthi. Robust video stabilization by optimization in CNN weight space. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 3800-3808, 2019. 6 + +[40] Jiyang Yu and Ravi Ramamoorthi. Learning video stabilization using optical flow. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 8156-8164, 2020. 2, 6, 7, 1 +[41] Minda Zhao and Qiang Ling. Pwstablenet: Learning pixelwise warping maps for video stabilization. IEEE Transactions on Image Processing (TIP), 29:3582-3595, 2020. 2 +[42] Weiyue Zhao, Xin Li, Zhan Peng, Xianrui Luo, Xinyi Ye, Hao Lu, and Zhiguo Cao. Fast full-frame video stabilization with iterative optimization. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 23534-23544, 2023. 1, 2, 6, 7 \ No newline at end of file diff --git a/2024/3D Multi-frame Fusion for Video Stabilization/images.zip b/2024/3D Multi-frame Fusion for Video Stabilization/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..1273d80dac1ddb41b05f5c0669bb23a9b2b490ff --- /dev/null +++ b/2024/3D Multi-frame Fusion for Video Stabilization/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:147dc048a2dc5cb4ab43b84130d345de69e254bf38e2a0e31cfdd152dc1a88cb +size 621727 diff --git a/2024/3D Multi-frame Fusion for Video Stabilization/layout.json b/2024/3D Multi-frame Fusion for Video Stabilization/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..cba8630dd5778a59b9cb3b46aadc985d7011fe61 --- /dev/null +++ b/2024/3D Multi-frame Fusion for Video Stabilization/layout.json @@ -0,0 +1,10193 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 153, + 103, + 440, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 103, + 440, + 119 + ], + "spans": [ + { + "bbox": [ + 153, + 103, + 440, + 119 + ], + "type": "text", + "content": "3D Multi-frame Fusion for Video Stabilization" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 143, + 516, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 143, + 516, + 172 + ], + "spans": [ + { + "bbox": [ + 70, + 143, + 516, + 172 + ], + "type": "text", + "content": "Zhan Peng Xinyi Ye Weiyue Zhao Tianqi Liu Huiqiang Sun Baopu Li Zhiguo Cao* School of AIA, Huazhong University of Science and Technology" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 129, + 174, + 465, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 174, + 465, + 186 + ], + "spans": [ + { + "bbox": [ + 129, + 174, + 465, + 186 + ], + "type": "text", + "content": "{peng_zhan,xinyiye,zhaoweiyue,tq_1iu,shq1031,zgcao}@hust.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 244, + 188, + 349, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 188, + 349, + 198 + ], + "spans": [ + { + "bbox": [ + 244, + 188, + 349, + 198 + ], + "type": "text", + "content": "bpli.cuhk@gmail.com" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 229, + 192, + 242 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 229, + 192, + 242 + ], + "spans": [ + { + "bbox": [ + 143, + 229, + 192, + 242 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 255, + 290, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 255, + 290, + 518 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 290, + 518 + ], + "type": "text", + "content": "In this paper, we present RStab, a novel framework for video stabilization that integrates 3D multi-frame fusion through volume rendering. Departing from conventional methods, we introduce a 3D multi-frame perspective to generate stabilized images, addressing the challenge of full-frame generation while preserving structure. The core of our RStab framework lies in Stabilized Rendering (SR), a volume rendering module, fusing multi-frame information in 3D space. Specifically, SR involves warping features and colors from multiple frames by projection, fusing them into descriptors to render the stabilized image. However, the precision of warped information depends on the projection accuracy, a factor significantly influenced by dynamic regions. In response, we introduce the Adaptive Ray Range (ARR) module to integrate depth priors, adaptively defining the sampling range for the projection process. Additionally, we propose Color Correction (CC) assisting geometric constraints with optical flow for accurate color aggregation. Thanks to the three modules, our RStab demonstrates superior performance compared with previous stabilizers in the field of view (FOV), image quality, and video stability across various datasets." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 541, + 128, + 554 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 541, + 128, + 554 + ], + "spans": [ + { + "bbox": [ + 47, + 541, + 128, + 554 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 562, + 287, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 562, + 287, + 658 + ], + "spans": [ + { + "bbox": [ + 46, + 562, + 287, + 658 + ], + "type": "text", + "content": "With the widespread adoption of smartphones, videos have become an important medium for documenting and sharing lives. The videos captured with handheld devices often suffer from annoying shakes. To mitigate this prevalent issue, numerous researchers devote efforts to developing video stabilization algorithms. These methods typically involve three steps: camera trajectory estimation, trajectory smoothing, and stabilized frame generation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 658, + 287, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 658, + 287, + 695 + ], + "spans": [ + { + "bbox": [ + 47, + 658, + 287, + 695 + ], + "type": "text", + "content": "To obtain a smooth image sequence, known as stabilized frames, early methods employ 2D-plane transformations (homography [20, 23], feature trajectories [9, 10, 21], mo" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 228, + 545, + 405 + ], + "blocks": [ + { + "bbox": [ + 306, + 228, + 545, + 405 + ], + "lines": [ + { + "bbox": [ + 306, + 228, + 545, + 405 + ], + "spans": [ + { + "bbox": [ + 306, + 228, + 545, + 405 + ], + "type": "image", + "image_path": "1336f4a2b313cdd05a32d4207e5d13be2c05c1f5cfd168e15fa182be2445fa84.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 413, + 547, + 478 + ], + "lines": [ + { + "bbox": [ + 305, + 413, + 547, + 478 + ], + "spans": [ + { + "bbox": [ + 305, + 413, + 547, + 478 + ], + "type": "text", + "content": "Figure 1. Existing dilemmas and our method. (a) and (b) exhibit cropping issues, characteristic of single-frame methods. (a) and (c) encounter difficulties in preserving structure, inherent in 2D-based approaches. Fortunately, our proposed method (d) not only mitigates distortion and artifacts but also maintains no-cropping stabilized frames." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 506, + 547, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 506, + 547, + 614 + ], + "spans": [ + { + "bbox": [ + 304, + 506, + 547, + 614 + ], + "type": "text", + "content": "tion vectors [18]) on single frames. However, these methods suffer from two major problems. First, these single-frame approaches may produce notable missing regions at the boundary of generated stabilized images, requiring aggressive cropping to ensure a rectangular frame for video (cropping in Fig. 1(a)), further resulting in a substantial reduction in the field of view (FOV). Second, 2D transformations could give rise to structure distortion due to the lack of 3D physical information (shear in Fig. 1(a))." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "content": "In pursuit of the stabilized full-frame, recent 2D methods [5, 24, 42] leverage nearby frames to fill in the unseen content within the target frame. However, due to the inherent absence of physical constraints in 2D transformations, 2D-based multiple-frame methods fail to preserve the structure, especially the parallax regions (Fig. 1(c)). To obtain the structure-preserved stabilized frame, some methods [11, 17, 19, 31] leverage 3D transformations to simulate" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 703, + 135, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 703, + 135, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 703, + 135, + 712 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7507" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "content": "real-world settings, employing camera poses and epipolar constraints to ensure the image structure. However, due to limited information from a single frame, they cannot generate a full frame, as shown in Fig. 1(b). In brief, the ongoing challenge of concurrently addressing full-frame generation while preserving structure for video stabilization remains a major concern for most current research works." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 158, + 287, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 158, + 287, + 302 + ], + "spans": [ + { + "bbox": [ + 46, + 158, + 287, + 302 + ], + "type": "text", + "content": "To overcome the above problems, intuitively, employing multi-frame fusion with 3D transformations could offer a promising solution. However, two issues may still hinder 3D transformations from incorporating information from neighboring frames. First, since view changes induce geometric deformation, the incorporated information from nearby frames may be inconsistent, suggesting that image blending, e.g., averaging, may lead to distortion. Second, videos feature dynamic objects across frames, which cannot be adequately modeled by 3D constraints. The direct aggregation of information from nearby frames with 3D projection results in a noticeable blur (refer to the experiments)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 303, + 288, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 303, + 288, + 482 + ], + "spans": [ + { + "bbox": [ + 46, + 303, + 288, + 482 + ], + "type": "text", + "content": "Motivated by the above insights and analysis, we propose a video stabilization framework termed RStab for integrating multi-frame fusion and 3D constraints to achieve full-frame generation and structure preservation. Specifically, we propose Stabilized Rendering (SR), a 3D multiframe fusion module using volume rendering. Instead of simple image blending, SR employs both color and feature space to fuse nearby information into spatial descriptors for the scene geometry, such as volume densities of spatial points. Visible points usually come with high volume densities, exhibiting consistent textures in their projections across frames. The observation suggests that points with higher consistency in aggregating information exhibit higher volume densities, implying a greater contribution to the final rendered color." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 484, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 484, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 484, + 287, + 652 + ], + "type": "text", + "content": "To mitigate the impacts of dynamic regions, we propose Adaptive Ray Range (ARR) and Color Correction(CC) modules. The introduction of multi-frame depth priors in ARR constrains the sampling range for spatial points around the surface of objects. A narrow sampling range around the surface decreases the risk of projecting spatial points onto dynamic regions, thereby suppressing the inconsistent information aggregation induced by the dynamic objects. Despite ARR, colors are sensitive to projection inaccuracy, indicating a narrow range is insufficient. Hence, we design CC to refine the projection for color aggregation. The core of CC lies in assisting geometry constraints with optical flow, which matches pixels with similar textures containing the color information." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": "By applying the three modules, RStab demonstrates the ability of full-frame generation with structure preservation (Fig. 1(d)) and outperforms all previous video stabilization algorithms in FOV, image quality, and video stability across various datasets. In summary, our key contributions" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 73, + 367, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 73, + 367, + 83 + ], + "spans": [ + { + "bbox": [ + 306, + 73, + 367, + 83 + ], + "type": "text", + "content": "are as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 314, + 86, + 545, + 216 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 314, + 86, + 545, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 86, + 545, + 121 + ], + "spans": [ + { + "bbox": [ + 314, + 86, + 545, + 121 + ], + "type": "text", + "content": "- We present a novel 3D multi-frame fusion framework for video stabilization to render full-frame stabilized images with structure preservation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 314, + 121, + 545, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 121, + 545, + 181 + ], + "spans": [ + { + "bbox": [ + 314, + 121, + 545, + 181 + ], + "type": "text", + "content": "- We propose Stabilized Rendering, which fuses multiple frames in both color and feature space. We augment Stabilized Rendering with the introduction of the Adaptive Ray Range module and Color Correction module, enhancing its capacity to address dynamic regions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 182, + 545, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 182, + 545, + 216 + ], + "spans": [ + { + "bbox": [ + 314, + 182, + 545, + 216 + ], + "type": "text", + "content": "- Our video stabilization framework, RStab, demonstrates state-of-the-art (SOTA) performance across various datasets." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 227, + 392, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 227, + 392, + 239 + ], + "spans": [ + { + "bbox": [ + 306, + 227, + 392, + 239 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 247, + 545, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 247, + 545, + 474 + ], + "spans": [ + { + "bbox": [ + 304, + 247, + 545, + 474 + ], + "type": "text", + "content": "2D-based Video Stabilization. 2D video stabilization algorithms model camera trajectory and generate stabilized frames through transformations on a 2D plane, including homography [6, 7, 20, 23, 40], feature trajectories [9, 10, 21, 38], motion vectors [16, 18, 22, 36], and dense flow fields [4, 5, 24, 40, 41]. Early methods [6, 7] estimate global transformations, which proved inadequate for handling complex camera effects such as the parallax effect. Certain approaches estimate multiple local motions [20, 22] or pixel-wise warping field [36, 40, 41] for a single image, offering some relief for the challenges encountered by global transformation methods. However, due to the limited information from a single frame, these methods may result in missing content in the stabilized video. To address this, some methods [5, 24, 42] fuse information from multiple neighboring frames, enabling full-frame generation. Despite achieving a full frame, the 2D transformations lack real-world physical constraints, leading to challenges in preserving image structure." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 474, + 545, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 474, + 545, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 474, + 545, + 640 + ], + "type": "text", + "content": "3D-based Video Stabilization. 3D-based video stabilizers model 3D camera trajectory and stabilize frames with epipolar projection. Some methods [11, 17] rely on the video itself, warping images instructed by projection while preserving content. Others integrate specialized hardware, such as depth cameras [19], light field cameras [31], gyroscopes [30], and IMU sensors [12], to assist with scene geometry. Both kinds of stabilizers estimate the physical motion of the real world and introduce 3D constraints in warping, benefiting stability and structure preservation. However, relying on a single frame, 3D-based video stabilizers have a limited field of view. To mitigate the issue, in this paper, we extend single-frame to multi-frame in 3D space for video stabilization." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "Neural Rendering. As a significant work in view synthesis, NeRF[15] attains photorealistic synthesized images through implicit volumetric representation and volume rendering. It combines multi-view information, leveraging 3D geometric constraints and pixel-wise rendering to generate high-quality images without missing content from novel" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7508" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 69, + 547, + 270 + ], + "blocks": [ + { + "bbox": [ + 47, + 69, + 547, + 270 + ], + "lines": [ + { + "bbox": [ + 47, + 69, + 547, + 270 + ], + "spans": [ + { + "bbox": [ + 47, + 69, + 547, + 270 + ], + "type": "image", + "image_path": "ac1113c650ce96de13326eaa486ec6f11b1f3b840e219ef57435d9b28ce4d924.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "lines": [ + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "spans": [ + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": "Figure 2. Overview of our framework. (1) Given input frames " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": " with a shaky trajectory " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{P}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": ", our purpose lies in rendering stabilized video sequence " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": " with smoothed trajectory " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": ". Here, the input trajectories " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{P}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": " derive from preprocessing, while the smoothed trajectories " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": " are generated using a Trajectory Smoothing module. (2) In addition to " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{P}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": ", depth maps " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{D}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": " and optical flow " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{F}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": " can be obtained during preprocessing. We aggregate " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{D}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": " into the ray range " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{R}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": " using the Adaptive Ray Range module. The ray range " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{R}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": ", along with " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{F}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": " and the smoothed trajectory " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": ", serves as inputs to the Stabilized Rendering module. Conducting Stabilized Rendering, enhanced by the Color Correction module, we fuse the input frames " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": " and their features " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{F}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": " to render the stabilized video sequence " + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 46, + 276, + 547, + 369 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 379, + 289, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 379, + 289, + 571 + ], + "spans": [ + { + "bbox": [ + 46, + 379, + 289, + 571 + ], + "type": "text", + "content": "viewpoints. While NeRF-based methods [1, 2, 13, 25, 28] produce impressive synthesized image quality, its limitation in per-scene training hampers its direct application in video stabilization. Certain approaches [3, 15, 32, 34, 35, 37] strive to improve the generalization of NeRF, but they are not inherently well-suited for video stabilization tasks. Some recent methods [14, 26] attempt to apply techniques in NeRF to stabilize videos, these approaches inherit the limitations of the vanilla NeRF, necessitating retraining for each specific scene. Inspired by generalized rendering technologies from IBRNet[34] and ENeRF[15], which utilize multi-view images and associated features to predict radiance fields, we further propose the Stabilized Rendering. Stabilized Rendering, enhanced by the proposed Adaptive Ray Range module and Color Correction module, extends the volume rendering technique to video stabilization." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 583, + 103, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 583, + 103, + 594 + ], + "spans": [ + { + "bbox": [ + 47, + 583, + 103, + 594 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "content": "Our pipeline is shown in Fig. 2. Given a shaky frame sequence " + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "content": ", our objective is to generate a stabilized sequence " + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "content": ". For preprocessing of " + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "content": ", we estimate optical flow " + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{F}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "content": ", depth maps " + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{D}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "content": ", and camera trajectory " + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{P}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "content": ". With " + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{D}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{P}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "content": " and smoothed camera trajectory " + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "content": " as input, the Adaptive Ray Range module aggregates multi-view depth maps into the ray ranges " + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{R}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 47, + 604, + 289, + 715 + ], + "type": "text", + "content": ". Guided by the" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 378, + 545, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 378, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 305, + 378, + 545, + 434 + ], + "type": "text", + "content": "ranges, Stabilized Rendering enhanced by the Color Correction module generates stabilized video sequence " + }, + { + "bbox": [ + 305, + 378, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 305, + 378, + 545, + 434 + ], + "type": "text", + "content": " through fusing the input frames " + }, + { + "bbox": [ + 305, + 378, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 305, + 378, + 545, + 434 + ], + "type": "text", + "content": " and feature maps " + }, + { + "bbox": [ + 305, + 378, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{F}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 305, + 378, + 545, + 434 + ], + "type": "text", + "content": " obtained through feature extraction network." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "type": "text", + "content": "We start with preprocessing a sequence of input frames " + }, + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "type": "text", + "content": " to estimate associated depth maps " + }, + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{D}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "type": "text", + "content": " and camera trajectory " + }, + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{P}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "type": "text", + "content": ". These depth maps and camera poses are employed for camera trajectory smoothing. In our pursuit of consistent and smooth camera trajectories, we harness the flexibility of the Gaussian smoothing function: " + }, + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N = \\phi_{sm}(\\{\\mathbf{P}_t\\}_{t=1}^N)" + }, + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "type": "inline_equation", + "content": "\\phi_{sm}" + }, + { + "bbox": [ + 304, + 437, + 546, + 682 + ], + "type": "text", + "content": " represents the Gaussian smoothing filter, offering adjustable parameters for both the smoothing window and stability. These parameters can be fine-tuned to meet specific requirements and constraints. In Sec. 3.1, we elaborate on rendering a stabilized image with its neighboring frames through Stabilized Rendering. Due to dynamic regions, the conventional 3D-constraint-based rendering fails to adequately represent the geometry. Differing from the conventional rendering, Sec. 3.2 introduces the utilization of depth priors to constrain the sampling range of spatial points around potential geometries, such as the area around the surface of objects. Additionally, in Sec. 3.3, we discuss refining projecting inaccuracy to ensure consistent local color intensities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 686, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 686, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 306, + 686, + 547, + 715 + ], + "type": "text", + "content": "Stabilizing a video involves rendering a image sequence " + }, + { + "bbox": [ + 306, + 686, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{I}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 306, + 686, + 547, + 715 + ], + "type": "text", + "content": " with corresponding stabilized poses " + }, + { + "bbox": [ + 306, + 686, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{P}}_t\\}_{t=1}^N" + }, + { + "bbox": [ + 306, + 686, + 547, + 715 + ], + "type": "text", + "content": ". In" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7509" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "practice, we adopt a sliding window strategy for frame-by-frame rendering stabilized video. For clarity, we illustrate the rendering process with a single target camera pose " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{P}}" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " at the timestamp " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " and its temporal neighborhood " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\Omega_T" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 125, + 168, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 125, + 168, + 138 + ], + "spans": [ + { + "bbox": [ + 47, + 125, + 168, + 138 + ], + "type": "text", + "content": "3.1. Stabilized Rendering" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "spans": [ + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "content": "Stabilized Rendering is a multi-frame fusion module founded on epipolar constraints which fuses input images and feature maps to render a stable, uncropped video sequence. Considering a pixel " + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "content": " situated in the stabilized image " + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{I}}" + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "content": " under a specific target camera pose " + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{P}}" + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "content": ", we sample " + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "content": " spatial points sharing projection situation " + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "content": ". These sampled points span depth " + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\{\\tilde{d}_i\\}_{i=1}^L" + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "content": " distributed along the ray with sampling range, denoted as " + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{R}}(\\tilde{\\mathbf{x}})" + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "content": ". We project " + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "content": " at depth " + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\tilde{d}_i" + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "content": " onto the neighboring input frames " + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_t\\}_{t \\in \\Omega_T}" + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "content": " at corresponding positions " + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{x}_t^i\\}_{t \\in \\Omega_T}" + }, + { + "bbox": [ + 46, + 144, + 287, + 271 + ], + "type": "text", + "content": " by" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 114, + 276, + 287, + 291 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 276, + 287, + 291 + ], + "spans": [ + { + "bbox": [ + 114, + 276, + 287, + 291 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {t} ^ {i} = \\mathbf {K P} _ {t} \\tilde {\\mathbf {P}} ^ {- 1} \\tilde {d} _ {i} \\mathbf {K} ^ {- 1} \\tilde {\\mathbf {x}}, \\tag {1}", + "image_path": "a42fe176b352109ce9d153376e0d788ea5656f8e23391fc45f3ea9cbf469e373.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "spans": [ + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "type": "inline_equation", + "content": "\\mathbf{K}" + }, + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "type": "text", + "content": " represents the camera intrinsic parameters shared by all frames in a video and " + }, + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "type": "inline_equation", + "content": "i\\in (0,L]" + }, + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "type": "text", + "content": ". With the projected points " + }, + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{x}_t^i\\}_{t\\in \\Omega_T}" + }, + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "type": "text", + "content": ", we aggregate features " + }, + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{F}_t(\\mathbf{x}_t^i)\\}_{t\\in \\Omega_T}" + }, + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "type": "text", + "content": " in neighboring frames to predict the volume density " + }, + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "type": "inline_equation", + "content": "\\sigma_{i}" + }, + { + "bbox": [ + 46, + 296, + 287, + 356 + ], + "type": "text", + "content": " for the spatial point by" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 361, + 287, + 376 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 361, + 287, + 376 + ], + "spans": [ + { + "bbox": [ + 107, + 361, + 287, + 376 + ], + "type": "interline_equation", + "content": "\\sigma_ {i} = \\phi_ {m l p} \\left(\\left\\{\\mathcal {F} _ {t} \\left(\\mathbf {x} _ {t} ^ {i}\\right) \\right\\} _ {t \\in \\Omega_ {T}}\\right), \\tag {2}", + "image_path": "531eed52b950de4c95ff6b434d3c767de256ea07c056f420a70bc0bb1f10c1e2.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 380, + 287, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 380, + 287, + 475 + ], + "spans": [ + { + "bbox": [ + 46, + 380, + 287, + 475 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 380, + 287, + 475 + ], + "type": "inline_equation", + "content": "\\phi_{mlp}" + }, + { + "bbox": [ + 46, + 380, + 287, + 475 + ], + "type": "text", + "content": " is a Multiple Layer Perceptron (refer to Supp. for details). Eq. 2 is contingent upon the consistency among features. Specifically, if a sampled spatial point aligns with the ground geometry, the multi-view features of projected points would be similar. This condition establishes scene-independent geometric constraints. When considering the associated color " + }, + { + "bbox": [ + 46, + 380, + 287, + 475 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 46, + 380, + 287, + 475 + ], + "type": "text", + "content": ", a conventional method is a linear combination for aggregation:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 480, + 287, + 507 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 480, + 287, + 507 + ], + "spans": [ + { + "bbox": [ + 83, + 480, + 287, + 507 + ], + "type": "interline_equation", + "content": "\\mathbf {c} _ {i} = \\sum_ {t \\in \\Omega_ {T}} \\omega_ {t - T} \\mathbf {I} _ {t} \\left(\\mathbf {x} _ {t} ^ {i}\\right), \\sum_ {t \\in \\Omega_ {T}} \\omega_ {t - T} = 1, \\tag {3}", + "image_path": "a99cec4f4839de8cf19a4cdd094bc96aab806da0fa11abf96c3f76ca5756fec2.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "spans": [ + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "type": "inline_equation", + "content": "\\omega_{t - T}" + }, + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "type": "text", + "content": " represents adaptable parameters determined by the geometric characteristics, such as the volume density " + }, + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "type": "inline_equation", + "content": "\\sigma_{i}" + }, + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "type": "text", + "content": ". Since the establishment of " + }, + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "type": "text", + "content": " solely relies on input frames, it is training-free to accommodate unforeseen scenes. In volume rendering, the set " + }, + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}_i,\\sigma_i\\}_{i = 1}^L" + }, + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "type": "text", + "content": ", describing spatial points along the same ray, determine the color intensity of " + }, + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 511, + 287, + 583 + ], + "type": "text", + "content": " by" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 97, + 589, + 287, + 662 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 589, + 287, + 662 + ], + "spans": [ + { + "bbox": [ + 97, + 589, + 287, + 662 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\tilde {\\mathbf {I}} (\\tilde {\\mathbf {x}}) = \\sum_ {i = 1} ^ {L} A _ {i} (1 - e x p (- \\sigma_ {i})) \\mathbf {c} _ {i}, \\tag {4} \\\\ A _ {i} = \\exp \\left(- \\sum_ {j = 1} ^ {i - 1} \\sigma_ {i}\\right). \\\\ \\end{array}", + "image_path": "6c10f0be20f769c2d47bd8bbf2c1f73794ead28df15f732e5eb9179be06bd035.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "content": "In Stabilized Rendering, Eqs. 1 imposes epipolar constraints on features and colors warped from multiple neighboring frames. Eqs. 2 & Eqs. 3 aggregate the multi-frame information into spatial descriptors " + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}_i,\\sigma_i\\}_{i = 1}^L" + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "content": " , and" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 309, + 69, + 545, + 175 + ], + "blocks": [ + { + "bbox": [ + 309, + 69, + 545, + 175 + ], + "lines": [ + { + "bbox": [ + 309, + 69, + 545, + 175 + ], + "spans": [ + { + "bbox": [ + 309, + 69, + 545, + 175 + ], + "type": "image", + "image_path": "968f14f4ff99e0b97872e1b9c1e88e86758c608cfef980f25031308059c45cf5.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "lines": [ + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "spans": [ + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "text", + "content": "Figure 3. Illustration of depth projection and splatting. Left: The depth projection involves lifting a pixel " + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "text", + "content": " to 3D space using the estimated depth " + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_t(\\mathbf{x}_t)" + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "text", + "content": " and projecting to the subpixel " + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}" + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "text", + "content": ". The depth of " + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}" + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "text", + "content": " can be calculated and denoted as " + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{D}}_t(\\tilde{\\mathbf{x}})" + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "text", + "content": ". Right: As " + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}" + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "text", + "content": " is not precisely projected onto a pixel coordinate, we convert its depth to adjacent pixels, e.g. " + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_p" + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "text", + "content": ", with a distance-associated weight " + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "inline_equation", + "content": "\\omega_t" + }, + { + "bbox": [ + 304, + 183, + 545, + 261 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 281, + 545, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 281, + 545, + 401 + ], + "spans": [ + { + "bbox": [ + 304, + 281, + 545, + 401 + ], + "type": "text", + "content": "Eqs. 4 renders stabilized images utilizing these descriptors for each pixel. Epipolar constraints guarantee the structure preservation and per-pixel rendering guarantees full-frame generation. However, the effectiveness of the aforementioned process highly depends on the ray range " + }, + { + "bbox": [ + 304, + 281, + 545, + 401 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{R}} (\\tilde{\\mathbf{x}})" + }, + { + "bbox": [ + 304, + 281, + 545, + 401 + ], + "type": "text", + "content": " guiding the sampling. If " + }, + { + "bbox": [ + 304, + 281, + 545, + 401 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{R}} (\\tilde{\\mathbf{x}})" + }, + { + "bbox": [ + 304, + 281, + 545, + 401 + ], + "type": "text", + "content": " is not distributed near the surface of objects, the model may aggregate incorrect features into inferior descriptors and diminish rendering quality. The forthcoming section will introduce how to adaptively define the ray range " + }, + { + "bbox": [ + 304, + 281, + 545, + 401 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{R}} (\\tilde{\\mathbf{x}})" + }, + { + "bbox": [ + 304, + 281, + 545, + 401 + ], + "type": "text", + "content": " to avoid the issue above." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 407, + 425, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 407, + 425, + 421 + ], + "spans": [ + { + "bbox": [ + 306, + 407, + 425, + 421 + ], + "type": "text", + "content": "3.2. Adaptive Ray Range" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 426, + 545, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 426, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 426, + 545, + 581 + ], + "type": "text", + "content": "Eq. 4 of Stabilized Rendering highlights the dependence of the final color intensity of " + }, + { + "bbox": [ + 304, + 426, + 545, + 581 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{I}} (\\tilde{\\mathbf{x}})" + }, + { + "bbox": [ + 304, + 426, + 545, + 581 + ], + "type": "text", + "content": " on the color " + }, + { + "bbox": [ + 304, + 426, + 545, + 581 + ], + "type": "inline_equation", + "content": "c_{i}" + }, + { + "bbox": [ + 304, + 426, + 545, + 581 + ], + "type": "text", + "content": " of the 3D point where the ray hits the object for the first time. It indicates that ray ranges around the ground geometry for the sampling process will benefit scene representation. A direct method to define the ray range entails treating the sequence of frames as a static scene: estimating the coarse geometry of each ray and rendering through spatial points sampled from re-defined fine ranges, such as [15, 34]. We argue that the effectiveness of the coarse-to-fine ray range relies on the geometry estimation grounded in epipolar constraints. However, dynamic regions, violating epipolar constraints, make the defined range unreliable." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": "To tackle this challenge, we turn to the task of depth estimation. The depth model [11] employs optical flow to impose constraints on dynamic scenes. As optical flow relies on feature matching rather than epipolar constraints, it matches points with features rather than epipolar constraints, showcasing insensitivity to dynamic regions. Consequently, the estimated depth maps derived from this depth model are less susceptible to interference from dynamic objects. We propose to define an adaptive range with preestimated neighboring depth maps " + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{D}_t\\}_{t\\in \\Omega_T}" + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": ". In particular, we construct the range utilizing the mean and variance" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7510" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 167, + 189 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 167, + 189 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 167, + 189 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 167, + 189 + ], + "type": "image", + "image_path": "faf2cc05008f7e460cf267eaf6f39d79d25dac7a7ca19094acb82964f541415e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 198, + 288, + 220 + ], + "lines": [ + { + "bbox": [ + 46, + 198, + 288, + 220 + ], + "spans": [ + { + "bbox": [ + 46, + 198, + 288, + 220 + ], + "type": "text", + "content": "Figure 4. The effect of temporal weights. The introduction of temporal weights can mitigate distortion." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 168, + 70, + 287, + 190 + ], + "blocks": [ + { + "bbox": [ + 168, + 70, + 287, + 190 + ], + "lines": [ + { + "bbox": [ + 168, + 70, + 287, + 190 + ], + "spans": [ + { + "bbox": [ + 168, + 70, + 287, + 190 + ], + "type": "image", + "image_path": "b2e9948ceeb3d90a3b8ed45369e03cfab3af6ef17b588bcd5c24b772927b264a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 242, + 236, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 242, + 236, + 253 + ], + "spans": [ + { + "bbox": [ + 47, + 242, + 236, + 253 + ], + "type": "text", + "content": "of aggregated depth maps from nearby frames." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "spans": [ + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "text", + "content": "As illustrated in the left part of Fig. 3, we project " + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "text", + "content": " in the neighboring frame with pose " + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_t" + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "text", + "content": " at the depth " + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_t(\\mathbf{x}_t)" + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "text", + "content": " onto sub-pixel " + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "text", + "content": " of the stabilized frame with pose " + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{P}}" + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "text", + "content": " according to the inverse of Eq. 1. However, as sub-pixel " + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "text", + "content": " is not precisely projected onto a specific pixel coordinate, direct utilization of " + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{D}}_t(\\tilde{\\mathbf{x}})" + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "text", + "content": " to estimate ray ranges for pixels is not feasible. To overcome this limitation, a splatting method [29] is employed, as illustrated in the right part of Fig. 3, converting " + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{D}}_t(\\tilde{\\mathbf{x}})" + }, + { + "bbox": [ + 46, + 253, + 287, + 361 + ], + "type": "text", + "content": " in the following manner:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 370, + 287, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 370, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 52, + 370, + 287, + 399 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {D}} _ {t} (\\tilde {\\mathbf {x}} _ {p}) = \\frac {\\sum_ {i} w _ {d} \\tilde {\\mathbf {D}} _ {t} (\\tilde {\\mathbf {x}} _ {i})}{\\sum_ {i} w _ {d}}, w _ {d} = \\prod (\\mathbf {1} - | \\tilde {\\mathbf {x}} _ {p} - \\tilde {\\mathbf {x}} _ {i} |), (5)", + "image_path": "48477e3bb1b32e4083444d564c5ff35fd6eacc7ca7fa6cd2efe7f41650c01bcb.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "spans": [ + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_p" + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "text", + "content": " is a pixel and " + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_i" + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "text", + "content": "-th sub-pixel " + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}" + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "text", + "content": " around " + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_p" + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "text", + "content": " satisfying the condition " + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "inline_equation", + "content": "|\\tilde{\\mathbf{x}}_p - \\tilde{\\mathbf{x}}_i| \\in (0,1)^2" + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "inline_equation", + "content": "\\prod(\\cdot)" + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "text", + "content": " suggests an element-wise multiplication in a vector, and " + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "inline_equation", + "content": "\\omega_d" + }, + { + "bbox": [ + 47, + 407, + 287, + 454 + ], + "type": "text", + "content": " is distance-associated weights." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "spans": [ + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "text", + "content": "Given " + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{D}_t\\}_{t\\in \\Omega_T}" + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "text", + "content": ", we obtain corresponding " + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{D}}_t\\}_{t\\in \\Omega_T}" + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "text", + "content": " on the stabilized frame through the project-splat process above. An intuitive approach involves directly calculating the mean " + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "text", + "content": ", variance " + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "inline_equation", + "content": "\\mathbf{S}" + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "text", + "content": ", and determining the sampling ray range as " + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "inline_equation", + "content": "\\mathbf{R} = [\\mathbf{M} - \\mathbf{S},\\mathbf{M} + \\mathbf{S}]" + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "text", + "content": ". However, in the aforementioned depth project-splat process, depth maps further from the timestamp " + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "inline_equation", + "content": "\\mathrm{T}" + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "text", + "content": " are less reliable. Treating all depth maps equally can result in an inaccurate sampling ray range " + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 46, + 454, + 287, + 586 + ], + "type": "text", + "content": ", leading to a decrease in the image quality (the left part of Fig. 4). This observation prompts the introduction of a weighted mean and variance as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 74, + 596, + 287, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 596, + 287, + 628 + ], + "spans": [ + { + "bbox": [ + 74, + 596, + 287, + 628 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {M}} = \\sum_ {t \\in \\Omega_ {T}} \\omega_ {t} \\tilde {\\mathbf {D}} _ {t}, \\tilde {S} = \\sqrt {\\sum_ {t \\in \\Omega_ {T}} \\omega_ {t} (\\tilde {\\mathbf {D}} - \\tilde {\\mathbf {M}}) ^ {2}}, \\tag {6}", + "image_path": "5c151bc8ee91f085e4179c59cb41a00136672642ef61fc73319b29bdd8e560ce.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 635, + 287, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 635, + 287, + 672 + ], + "spans": [ + { + "bbox": [ + 47, + 635, + 287, + 672 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 635, + 287, + 672 + ], + "type": "inline_equation", + "content": "\\omega_{t}" + }, + { + "bbox": [ + 47, + 635, + 287, + 672 + ], + "type": "text", + "content": " is the temporal weighting coefficient, assigning a higher weight to the frame closer to the stabilized frame temporally and vice versa, as defined by" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 123, + 680, + 287, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 680, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 123, + 680, + 287, + 715 + ], + "type": "interline_equation", + "content": "\\omega_ {t} = \\frac {e ^ {\\lambda (t - T)}}{\\sum_ {t \\in \\Omega_ {T}} e ^ {\\lambda (t - T)}}, \\tag {7}", + "image_path": "d77bbff0b56797e3aef88cb32e42f58c36e1235ff2f149cbf24be6a4cf72856c.jpg" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 307, + 69, + 547, + 253 + ], + "blocks": [ + { + "bbox": [ + 307, + 69, + 547, + 253 + ], + "lines": [ + { + "bbox": [ + 307, + 69, + 547, + 253 + ], + "spans": [ + { + "bbox": [ + 307, + 69, + 547, + 253 + ], + "type": "image", + "image_path": "8b74912345a991d5a076ab84ed325a7aede7bb518996d52ab9ff5158c14732f0.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "lines": [ + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "spans": [ + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "text", + "content": "Figure 5. Illustration of Color Correction module. Firstly, we project a pixel " + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_T" + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "text", + "content": " from the target stabilized frame onto corresponding " + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_T" + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "text", + "content": " of the input frame at the same timestamp " + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "text", + "content": ". Secondly, we obtain feature matching of " + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_T" + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "text", + "content": " in the input frame at timestamps " + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "text", + "content": " using optical flow " + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{T\\rightarrow t}(\\mathbf{x}_T)" + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "text", + "content": ". As geometric constraints alone are insufficient for modeling dynamic regions, we aggregate precise color by correcting the geometric projected position " + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "text", + "content": " to the optical-flow refined position " + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t'" + }, + { + "bbox": [ + 305, + 260, + 547, + 348 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 370, + 545, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 370, + 545, + 447 + ], + "spans": [ + { + "bbox": [ + 305, + 370, + 545, + 447 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 370, + 545, + 447 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 305, + 370, + 545, + 447 + ], + "type": "text", + "content": " is a hyperparameter. Subsequently, ray ranges for the stabilized frame are denoted as " + }, + { + "bbox": [ + 305, + 370, + 545, + 447 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{R}} = \\left[\\tilde{\\mathbf{M}} -\\tilde{\\mathbf{S}},\\tilde{\\mathbf{M}} +\\tilde{\\mathbf{S}}\\right]" + }, + { + "bbox": [ + 305, + 370, + 545, + 447 + ], + "type": "text", + "content": ". and can be employed for sampling L points along each ray during the rendering process. As illustrated in the right part of Fig. 4, the Adaptive Ray Range module with temporal weighted ranges yields more favorable rendering results." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 447, + 545, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 447, + 545, + 532 + ], + "spans": [ + { + "bbox": [ + 305, + 447, + 545, + 532 + ], + "type": "text", + "content": "The Adaptive Ray Range module provides a ray range " + }, + { + "bbox": [ + 305, + 447, + 545, + 532 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{R}}" + }, + { + "bbox": [ + 305, + 447, + 545, + 532 + ], + "type": "text", + "content": " around the ground geometry guiding points sampling and benefiting volume density " + }, + { + "bbox": [ + 305, + 447, + 545, + 532 + ], + "type": "inline_equation", + "content": "\\sigma_{i}" + }, + { + "bbox": [ + 305, + 447, + 545, + 532 + ], + "type": "text", + "content": " prediction. Although the guidance of " + }, + { + "bbox": [ + 305, + 447, + 545, + 532 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{R}}" + }, + { + "bbox": [ + 305, + 447, + 545, + 532 + ], + "type": "text", + "content": " mitigates the interference of dynamic objects, the challenge of dynamic objects goes beyond this. According to Eq. 4, the color intensity " + }, + { + "bbox": [ + 305, + 447, + 545, + 532 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 305, + 447, + 545, + 532 + ], + "type": "text", + "content": " is another factor influencing rendering quality and affected by dynamic regions as well." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 539, + 409, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 539, + 409, + 551 + ], + "spans": [ + { + "bbox": [ + 306, + 539, + 409, + 551 + ], + "type": "text", + "content": "3.3. Color Correction" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 558, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 558, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 558, + 545, + 713 + ], + "type": "text", + "content": "Color intensity, denoted as " + }, + { + "bbox": [ + 305, + 558, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 305, + 558, + 545, + 713 + ], + "type": "text", + "content": ", exhibits a strong dependence on geometric constraints, akin to volume density " + }, + { + "bbox": [ + 305, + 558, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\sigma_{i}" + }, + { + "bbox": [ + 305, + 558, + 545, + 713 + ], + "type": "text", + "content": ". However, density is predicted from the feature maps with their receptive fields, thereby exhibiting a certain tolerance to projection inaccuracy. In contrast, color intensity is derived from the linear combination of colors warped from multiple views, accentuating the sensitivity of colors to projection inaccuracy. Despite the Adaptive Ray Range module offers a correction for projection with geometric constraints, it is inadequate for accurate color aggregation (refer to the experiments). Rather than solely concentrating on refining geometric constraints, we propose to assist these constraints with optical flow. Optical flow, relying on feature similar-" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "7511" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 99, + 70, + 496, + 203 + ], + "blocks": [ + { + "bbox": [ + 99, + 70, + 496, + 203 + ], + "lines": [ + { + "bbox": [ + 99, + 70, + 496, + 203 + ], + "spans": [ + { + "bbox": [ + 99, + 70, + 496, + 203 + ], + "type": "table", + "html": "
MethodNUS datasetSelfie datasetDeepStab dataset
C↑D↑S↑C↑D↑S↑C↑D↑S↑
Grundmann et al. [7]2D0.710.760.820.750.810.830.770.870.84
Bundle [20]2D0.810.780.820.740.820.800.800.900.85
Yu and Ramamoorthi [40]2D0.850.810.860.830.790.860.870.920.82
DIFRINT [5]2D1.000.870.841.000.780.841.000.910.78
FuSta [24]2D1.000.870.861.000.830.871.000.920.82
Zhao et al. [42]2D1.000.900.871.000.870.871.000.940.84
Deep3D [11]3D0.660.900.940.350.700.950.750.980.92
Ours3D1.000.910.941.000.920.951.000.980.92
", + "image_path": "cabbcffe56d7f2f5d899145236c6d9665e002e4f424d54ba77f3eb56d2bbecb2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 211, + 547, + 245 + ], + "lines": [ + { + "bbox": [ + 46, + 211, + 547, + 245 + ], + "spans": [ + { + "bbox": [ + 46, + 211, + 547, + 245 + ], + "type": "text", + "content": "Table 1. Quantitative results on the NUS [20], the Selfie [38], and the DeepStab [33] datasets. We evaluate our method against baselines using three standard metrics: Cropping Ratio(C), Distortion Value(D), Stability Score(S). The best results are bolded and second-best results are highlighted by underline." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 266, + 287, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 287, + 301 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 287, + 301 + ], + "type": "text", + "content": "ities, matches pixels with similar textures containing color information. It implies that utilizing optical flow to refine the projection can enhance color accuracy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "spans": [ + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "text", + "content": "Specifically, we focus on the input frame at " + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "text", + "content": ", which adheres to epipolar constraints with the target stabilized frame at " + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "text", + "content": ". As shown in Fig. 5, we employ " + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_T" + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "text", + "content": " as a reference to correct the projection points on the neighboring frame " + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_t" + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "text", + "content": " with optical flow. According to Eq. 1, we project a point " + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_T" + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "text", + "content": " from the stabilized pose " + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{P}}_T" + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "text", + "content": " onto the " + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_T" + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_T" + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "text", + "content": ", the flow-associated points " + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t'" + }, + { + "bbox": [ + 46, + 301, + 287, + 385 + ], + "type": "text", + "content": " can be expressed as" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 117, + 391, + 287, + 405 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 391, + 287, + 405 + ], + "spans": [ + { + "bbox": [ + 117, + 391, + 287, + 405 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {t} ^ {\\prime} = \\mathbf {x} _ {T} + \\mathbf {F} _ {T \\rightarrow t} (\\mathbf {x} _ {T}), \\tag {8}", + "image_path": "5e71804d0fb0a4d5beaa863c275902f80669a2d0460c66ae284fe16cb7533177.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "spans": [ + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{T\\rightarrow t}" + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "text", + "content": " represents the optical flow from " + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_T" + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_t" + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "text", + "content": ". By applying the same procedure to frames in the temporal neighborhood " + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "inline_equation", + "content": "\\Omega_T" + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "text", + "content": ", we substitute the " + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "text", + "content": " in Eq. 3 with " + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t'" + }, + { + "bbox": [ + 46, + 410, + 287, + 447 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 452, + 180, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 452, + 180, + 464 + ], + "spans": [ + { + "bbox": [ + 47, + 452, + 180, + 464 + ], + "type": "text", + "content": "3.4. Implementation Details" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "spans": [ + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "type": "text", + "content": "In our implementations, a pre-trained model from Deep3D [11] is employed to generate depth prior for the Adaptive Ray Range module and optical flow for Color Correction. Frames neighboring the timestamp " + }, + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "type": "inline_equation", + "content": "\\mathrm{T}" + }, + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "type": "text", + "content": " are symmetrically distributed, and the length of the set " + }, + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "type": "inline_equation", + "content": "\\Omega_T" + }, + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "type": "text", + "content": " is fixed to 13. For the Adaptive Ray Range module, the temporal weighting coefficient " + }, + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "type": "inline_equation", + "content": "\\omega_{i}" + }, + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "type": "text", + "content": " is calculated with " + }, + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "type": "inline_equation", + "content": "\\lambda = 0.5" + }, + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "type": "text", + "content": ", and we choose " + }, + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "type": "inline_equation", + "content": "L = 3" + }, + { + "bbox": [ + 46, + 471, + 287, + 566 + ], + "type": "text", + "content": " for uniform spatial points sampling along each ray." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 566, + 287, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 566, + 287, + 603 + ], + "spans": [ + { + "bbox": [ + 46, + 566, + 287, + 603 + ], + "type": "text", + "content": "Loss function. During training, we sample rays on all images randomly and minimize the mean squared error between the rendered color and corresponding ground truth:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 609, + 287, + 639 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 609, + 287, + 639 + ], + "spans": [ + { + "bbox": [ + 106, + 609, + 287, + 639 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\sum_ {\\mathbf {x} \\in \\mathcal {X}} \\left| \\left| \\tilde {\\mathbf {I}} (\\mathbf {x}) - \\mathbf {I} _ {g t} (\\mathbf {x}) \\right| \\right| _ {2} ^ {2}, \\tag {9}", + "image_path": "e422fc522283dc805d457cd678d8af33cd3f40b2c0669509426a8ef4785689a4.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 641, + 287, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 287, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 641, + 287, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{gt}" + }, + { + "bbox": [ + 46, + 641, + 287, + 666 + ], + "type": "text", + "content": " is the corresponding ground truth and " + }, + { + "bbox": [ + 46, + 641, + 287, + 666 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 46, + 641, + 287, + 666 + ], + "type": "text", + "content": " is the set of pixels sampled from all images in each training batch." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 666, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 666, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 666, + 287, + 713 + ], + "type": "text", + "content": "Training details. We follow the training setting of IBRNet [34] to train our model on LLFF [27] and IBRNetCollected [34] including high-quality natural images with accurate camera poses. Our model is trained on an RTX3090" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 266, + 545, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 266, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 304, + 266, + 545, + 338 + ], + "type": "text", + "content": "GPU using the Adam optimizer[8]. We set the base learning rates for the feature extraction network and MLP to " + }, + { + "bbox": [ + 304, + 266, + 545, + 338 + ], + "type": "inline_equation", + "content": "1e^{-3}" + }, + { + "bbox": [ + 304, + 266, + 545, + 338 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 266, + 545, + 338 + ], + "type": "inline_equation", + "content": "5e^{-4}" + }, + { + "bbox": [ + 304, + 266, + 545, + 338 + ], + "type": "text", + "content": ", respectively, which decay exponentially throughout the optimization process. Typically, the model converges after approximately 200k iterations, and the entire training process takes about a day to complete." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 348, + 388, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 348, + 388, + 361 + ], + "spans": [ + { + "bbox": [ + 305, + 348, + 388, + 361 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 369, + 441, + 381 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 369, + 441, + 381 + ], + "spans": [ + { + "bbox": [ + 305, + 369, + 441, + 381 + ], + "type": "text", + "content": "4.1.Quantitative Evaluation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 387, + 545, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 387, + 545, + 471 + ], + "spans": [ + { + "bbox": [ + 304, + 387, + 545, + 471 + ], + "type": "text", + "content": "Baselines. We choose various video stabilization algorithms as the baselines, including Grundmann et al. [7], Liu et al. [20], Wang et al. [33], Yu and Ramamoorthi [39, 40], DIFRINT [5], FuSta [24], Zhao et al. [42], and Deep3D [11]. For comparisons, we use the official provided videos or videos generated by official implementations with default parameters or pre-trained models." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 475, + 545, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 475, + 545, + 559 + ], + "spans": [ + { + "bbox": [ + 304, + 475, + 545, + 559 + ], + "type": "text", + "content": "Datasets. We choose three datasets with different characteristics for evaluations: (1) The NUS [20] dataset comprises 144 videos, categorized into six different scenes: Regular, Running, Crowd, Parallax, QuickRotation, and Running, (2) the Selfie dataset [38] contains 33 video clips featuring frontal faces with large camera motion, (3) and the Deep-Stab dataset [33] includes 61 high-definition videos." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 564, + 545, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 564, + 545, + 696 + ], + "spans": [ + { + "bbox": [ + 304, + 564, + 545, + 696 + ], + "type": "text", + "content": "Metrics. We assess the performance of the stabilizers using three standard metrics widely employed in previous methods [5, 20, 24, 39, 40]: (1) Cropping Ratio: This metric measures the remaining image area after cropping the non-content pixels. (2) Distortion Value: This metric quantifies the anisotropic scaling of the homography matrix between the input and output frames. (3) Stability Score: This metric assesses the stability of the stabilized video by assessing the ratio of low-frequency motion energy to the total energy. All three metrics range from 0 to 1, with higher values indicating better performance." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 701, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 701, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 306, + 701, + 545, + 712 + ], + "type": "text", + "content": "Results on the NUS dataset. Our evaluation on the NUS" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7512" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 173, + 140 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 173, + 140 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 173, + 140 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 173, + 140 + ], + "type": "image", + "image_path": "6445a5ac79746040c195fee948f2a9f61c217c26f446fee225e0707382894918.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 143, + 121, + 153 + ], + "lines": [ + { + "bbox": [ + 100, + 143, + 121, + 153 + ], + "spans": [ + { + "bbox": [ + 100, + 143, + 121, + 153 + ], + "type": "text", + "content": "Input" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 182, + 76, + 285, + 134 + ], + "blocks": [ + { + "bbox": [ + 182, + 76, + 285, + 134 + ], + "lines": [ + { + "bbox": [ + 182, + 76, + 285, + 134 + ], + "spans": [ + { + "bbox": [ + 182, + 76, + 285, + 134 + ], + "type": "image", + "image_path": "38be7b4662a10d0788472d050488bfa4ad55286ec3b23a4de17a00bf1334bda5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 142, + 256, + 153 + ], + "lines": [ + { + "bbox": [ + 211, + 142, + 256, + 153 + ], + "spans": [ + { + "bbox": [ + 211, + 142, + 256, + 153 + ], + "type": "text", + "content": "Bundle [20]" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 304, + 75, + 410, + 135 + ], + "blocks": [ + { + "bbox": [ + 304, + 75, + 410, + 135 + ], + "lines": [ + { + "bbox": [ + 304, + 75, + 410, + 135 + ], + "spans": [ + { + "bbox": [ + 304, + 75, + 410, + 135 + ], + "type": "image", + "image_path": "4a0a3f30268d35df6d5f9852d0c989cb307ed24228b3cc117f79c28387e17b55.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 142, + 404, + 153 + ], + "lines": [ + { + "bbox": [ + 309, + 142, + 404, + 153 + ], + "spans": [ + { + "bbox": [ + 309, + 142, + 404, + 153 + ], + "type": "text", + "content": "Yu and Ramamoorthi [40]" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 433, + 78, + 527, + 131 + ], + "blocks": [ + { + "bbox": [ + 433, + 78, + 527, + 131 + ], + "lines": [ + { + "bbox": [ + 433, + 78, + 527, + 131 + ], + "spans": [ + { + "bbox": [ + 433, + 78, + 527, + 131 + ], + "type": "image", + "image_path": "35df973166922dce73a31c575c11e1538d9a479cd9dc6ea2613ca3db735a1631.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 49, + 156, + 171, + 225 + ], + "blocks": [ + { + "bbox": [ + 49, + 156, + 171, + 225 + ], + "lines": [ + { + "bbox": [ + 49, + 156, + 171, + 225 + ], + "spans": [ + { + "bbox": [ + 49, + 156, + 171, + 225 + ], + "type": "image", + "image_path": "d59a93c292a52554cfd94aa7685c2a4c335f93218586f06cdaf4eba176509916.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 86, + 228, + 135, + 239 + ], + "lines": [ + { + "bbox": [ + 86, + 228, + 135, + 239 + ], + "spans": [ + { + "bbox": [ + 86, + 228, + 135, + 239 + ], + "type": "text", + "content": "DIFRINT [5]" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 173, + 156, + 294, + 225 + ], + "blocks": [ + { + "bbox": [ + 173, + 156, + 294, + 225 + ], + "lines": [ + { + "bbox": [ + 173, + 156, + 294, + 225 + ], + "spans": [ + { + "bbox": [ + 173, + 156, + 294, + 225 + ], + "type": "image", + "image_path": "57964ea87012f15a8802e1b7009bb18942a7f89b4afbc78118cdca81a67ab3e4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 228, + 253, + 239 + ], + "lines": [ + { + "bbox": [ + 214, + 228, + 253, + 239 + ], + "spans": [ + { + "bbox": [ + 214, + 228, + 253, + 239 + ], + "type": "text", + "content": "FuSta [24]" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 296, + 156, + 417, + 225 + ], + "blocks": [ + { + "bbox": [ + 296, + 156, + 417, + 225 + ], + "lines": [ + { + "bbox": [ + 296, + 156, + 417, + 225 + ], + "spans": [ + { + "bbox": [ + 296, + 156, + 417, + 225 + ], + "type": "image", + "image_path": "08a78308b3f798156ca5e004ec39727c0d91da006b58d754cc6d58a4915e7d9a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 328, + 228, + 385, + 239 + ], + "lines": [ + { + "bbox": [ + 328, + 228, + 385, + 239 + ], + "spans": [ + { + "bbox": [ + 328, + 228, + 385, + 239 + ], + "type": "text", + "content": "Zhao et al. [42]" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 418, + 156, + 541, + 225 + ], + "blocks": [ + { + "bbox": [ + 456, + 142, + 503, + 153 + ], + "lines": [ + { + "bbox": [ + 456, + 142, + 503, + 153 + ], + "spans": [ + { + "bbox": [ + 456, + 142, + 503, + 153 + ], + "type": "text", + "content": "Deep3D[11]" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 418, + 156, + 541, + 225 + ], + "lines": [ + { + "bbox": [ + 418, + 156, + 541, + 225 + ], + "spans": [ + { + "bbox": [ + 418, + 156, + 541, + 225 + ], + "type": "image", + "image_path": "75bb6ba464b0ec171eed2ec71160fe7c3dbc1ea84890164dcce8b66c7db1e018.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 470, + 229, + 489, + 238 + ], + "lines": [ + { + "bbox": [ + 470, + 229, + 489, + 238 + ], + "spans": [ + { + "bbox": [ + 470, + 229, + 489, + 238 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 314, + 287, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 314, + 287, + 445 + ], + "spans": [ + { + "bbox": [ + 46, + 314, + 287, + 445 + ], + "type": "text", + "content": "dataset [20] is detailed on the left side of Table 1, where our stabilization method excels notably in both stability and distortion reduction when compared to 2D-based methods. This success is attributed to our accuracy in constructing camera trajectories and geometry. In contrast to 3D methods, our approach stands out by leveraging information from multiple input frames, achieving an average cropping ratio of 1. This indicates the effectiveness of our method in full-frame generation across the diverse scenes in the NUS dataset, which is widely acknowledged as a robust benchmark for video stabilization algorithms." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 46, + 448, + 287, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 448, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 46, + 448, + 287, + 579 + ], + "type": "text", + "content": "Results on the Selfie dataset. We present the results on the Selfie dataset [38] in the middle of Table 1. It's crucial to highlight that this dataset is characterized by large camera motions and extensive dynamic regions, posing challenges for video stabilization algorithms. Observing the results, a decrease is evident for most algorithms compared to their performance on the NUS dataset. Traditional 3D methods, in particular, experience a significant decline. In contrast, our method consistently delivers the best performance on the Selfie dataset. The performance shows the effectiveness of our algorithm in handling extreme scenes." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "content": "Results on the DeepStab dataset. The right side of Table 1 showcases the average scores on the DeepStab dataset [33]. Notably, the videos in this dataset are of higher resolution than NUS and Selfie, specifically " + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "inline_equation", + "content": "720\\mathrm{p}" + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "content": ", aligning with the common resolutions of modern devices. Despite the high distortion values across all stabilizers due to the simplicity of this dataset, our approach consistently demonstrates superior performance. This result suggests that our method is well-suited for handling high-definition videos, further emphasizing its applicability for contemporary video stabilization challenges." + } + ] + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 307, + 312, + 424, + 419 + ], + "blocks": [ + { + "bbox": [ + 46, + 249, + 547, + 293 + ], + "lines": [ + { + "bbox": [ + 46, + 249, + 547, + 293 + ], + "spans": [ + { + "bbox": [ + 46, + 249, + 547, + 293 + ], + "type": "text", + "content": "Figure 6. Visual comparison of different methods. Contrasting with the baselines in the first row, our method successfully accomplishes full-frame generation. In the second row, while these baselines achieve full-frame generation, they fall short in preserving structure; for instance, in the bottom-left region, the tree trunks are missing in their stabilized images. Please refer to our supplementary material for video comparisons with baselines." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 312, + 424, + 419 + ], + "lines": [ + { + "bbox": [ + 307, + 312, + 424, + 419 + ], + "spans": [ + { + "bbox": [ + 307, + 312, + 424, + 419 + ], + "type": "image", + "image_path": "bbedd1702aeeeb4331fdea29791b21f14bc84f8b8c1594bb35a677f57ffe6c30.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 318, + 420, + 414, + 431 + ], + "lines": [ + { + "bbox": [ + 318, + 420, + 414, + 431 + ], + "spans": [ + { + "bbox": [ + 318, + 420, + 414, + 431 + ], + "type": "text", + "content": "w/o Stabilized Rendering" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 427, + 312, + 544, + 420 + ], + "blocks": [ + { + "bbox": [ + 427, + 312, + 544, + 420 + ], + "lines": [ + { + "bbox": [ + 427, + 312, + 544, + 420 + ], + "spans": [ + { + "bbox": [ + 427, + 312, + 544, + 420 + ], + "type": "image", + "image_path": "d606b9f6b4daf48d60e7051425c1c0f8ac576733b8cbcfb636363d015c2785f3.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 476, + 422, + 496, + 430 + ], + "lines": [ + { + "bbox": [ + 476, + 422, + 496, + 430 + ], + "spans": [ + { + "bbox": [ + 476, + 422, + 496, + 430 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 439, + 546, + 472 + ], + "lines": [ + { + "bbox": [ + 306, + 439, + 546, + 472 + ], + "spans": [ + { + "bbox": [ + 306, + 439, + 546, + 472 + ], + "type": "text", + "content": "Figure 7. Quilitative ablation of Stabilized Fusion. Absence of Stabilized Fusion results in noticeable blurs in both static and dynamic regions." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 495, + 423, + 507 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 495, + 423, + 507 + ], + "spans": [ + { + "bbox": [ + 306, + 495, + 423, + 507 + ], + "type": "text", + "content": "4.2. Qualitative Analysis" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 306, + 514, + 545, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 514, + 545, + 584 + ], + "spans": [ + { + "bbox": [ + 306, + 514, + 545, + 584 + ], + "type": "text", + "content": "Visual comparisons of our method and state-of-the-art stabilizers is shown in Fig. 6. Many methods [11, 20, 40] apply aggressive cropping, as evident from the grey checkerboard regions. Comparing the bottom-left region of each image in Fig. 6 below with the top-left input, it's clear that our method suffers from fewer visual artifacts." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 306, + 597, + 398, + 610 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 597, + 398, + 610 + ], + "spans": [ + { + "bbox": [ + 306, + 597, + 398, + 610 + ], + "type": "text", + "content": "5. Ablation Study" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 306, + 617, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 617, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 617, + 546, + 713 + ], + "type": "text", + "content": "We conduct ablation studies to analyze the effectiveness of the proposed modules, including Stabilized Rendering (SR), the Adaptive Ray Range module (ARR), and Color Correction module (CC). Our evaluations focus on the Crowd scene within the NUS dataset [20], chosen for its dynamic objects and diverse scenes. We choose Distortion values and PSNR as evaluation metrics. Distortion Value measures the pose-independent structure quality of" + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7513" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 166, + 137 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 166, + 137 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 166, + 137 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 166, + 137 + ], + "type": "image", + "image_path": "25f5e98177727532a13733afd1d9adcf104b6a6364a937365be026954b7c8fe6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 93, + 138, + 123, + 148 + ], + "lines": [ + { + "bbox": [ + 93, + 138, + 123, + 148 + ], + "spans": [ + { + "bbox": [ + 93, + 138, + 123, + 148 + ], + "type": "text", + "content": "IBRNet" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 168, + 70, + 287, + 137 + ], + "blocks": [ + { + "bbox": [ + 168, + 70, + 287, + 137 + ], + "lines": [ + { + "bbox": [ + 168, + 70, + 287, + 137 + ], + "spans": [ + { + "bbox": [ + 168, + 70, + 287, + 137 + ], + "type": "image", + "image_path": "7c47836fb1ab540c22e7496bc0f983b2dad28ed4cefdccfc9bb994f3b332686a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 215, + 139, + 245, + 148 + ], + "lines": [ + { + "bbox": [ + 215, + 139, + 245, + 148 + ], + "spans": [ + { + "bbox": [ + 215, + 139, + 245, + 148 + ], + "type": "text", + "content": "ENeRF" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 49, + 149, + 166, + 217 + ], + "blocks": [ + { + "bbox": [ + 49, + 149, + 166, + 217 + ], + "lines": [ + { + "bbox": [ + 49, + 149, + 166, + 217 + ], + "spans": [ + { + "bbox": [ + 49, + 149, + 166, + 217 + ], + "type": "image", + "image_path": "b935cb2f8b7006df9d0a7994a7657190337d2c93beb48b0d936051ab3aa16668.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 49, + 217, + 105, + 256 + ], + "blocks": [ + { + "bbox": [ + 49, + 217, + 105, + 256 + ], + "lines": [ + { + "bbox": [ + 49, + 217, + 105, + 256 + ], + "spans": [ + { + "bbox": [ + 49, + 217, + 105, + 256 + ], + "type": "image", + "image_path": "e3ab8833bbe1c9e50c3ada67aae911bd692e8bfa26373460b95f6332ceeb6f49.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 59, + 258, + 156, + 269 + ], + "lines": [ + { + "bbox": [ + 59, + 258, + 156, + 269 + ], + "spans": [ + { + "bbox": [ + 59, + 258, + 156, + 269 + ], + "type": "text", + "content": "w/o Adaptive Ray Range" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 108, + 217, + 165, + 256 + ], + "blocks": [ + { + "bbox": [ + 108, + 217, + 165, + 256 + ], + "lines": [ + { + "bbox": [ + 108, + 217, + 165, + 256 + ], + "spans": [ + { + "bbox": [ + 108, + 217, + 165, + 256 + ], + "type": "image", + "image_path": "9d7e36ceab158649387266c5ddbe6deb544d1868b6711ee82126c173df2a1332.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 277, + 287, + 310 + ], + "lines": [ + { + "bbox": [ + 46, + 277, + 287, + 310 + ], + "spans": [ + { + "bbox": [ + 46, + 277, + 287, + 310 + ], + "type": "text", + "content": "Figure 8. Quilitative ablation of different range strategies. Among the range strategies examined, only our Adaptive Ray Range module can address distortion in image structure." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 168, + 149, + 286, + 217 + ], + "blocks": [ + { + "bbox": [ + 168, + 149, + 286, + 217 + ], + "lines": [ + { + "bbox": [ + 168, + 149, + 286, + 217 + ], + "spans": [ + { + "bbox": [ + 168, + 149, + 286, + 217 + ], + "type": "image", + "image_path": "1bb923e31cac7faed4c99a41f913c826b26b9e6523ba4b8db6600cb1d2d5d3cf.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 196, + 545, + 229 + ], + "lines": [ + { + "bbox": [ + 305, + 196, + 545, + 229 + ], + "spans": [ + { + "bbox": [ + 305, + 196, + 545, + 229 + ], + "type": "text", + "content": "Figure 9. Quilitative ablation of Color Correction. The Color Correction module refining the projection enhances color accuracy, consequently reducing image artifacts." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 168, + 217, + 227, + 256 + ], + "blocks": [ + { + "bbox": [ + 168, + 217, + 227, + 256 + ], + "lines": [ + { + "bbox": [ + 168, + 217, + 227, + 256 + ], + "spans": [ + { + "bbox": [ + 168, + 217, + 227, + 256 + ], + "type": "image", + "image_path": "836225df1614e93b133579b0784abba78e41279b78117932d4d8bd6220dc246b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 217, + 258, + 237, + 268 + ], + "lines": [ + { + "bbox": [ + 217, + 258, + 237, + 268 + ], + "spans": [ + { + "bbox": [ + 217, + 258, + 237, + 268 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 228, + 217, + 286, + 256 + ], + "blocks": [ + { + "bbox": [ + 228, + 217, + 286, + 256 + ], + "lines": [ + { + "bbox": [ + 228, + 217, + 286, + 256 + ], + "spans": [ + { + "bbox": [ + 228, + 217, + 286, + 256 + ], + "type": "image", + "image_path": "e327c557e851688405372d89788f43584ca0d8c4012f9867e11ceb1da7280a79.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 337, + 287, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 337, + 287, + 396 + ], + "spans": [ + { + "bbox": [ + 46, + 337, + 287, + 396 + ], + "type": "text", + "content": "images with stabilized poses. Additionally, PSNR is employed to evaluate the pixel-level performance of our model in rendering image details. As real images with stabilized poses are unavailable, we render images with the input pose to derive PSNR." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 399, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 399, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 399, + 287, + 555 + ], + "type": "text", + "content": "Why needs Stabilized Rendering. We conduct experiments to demonstrate the necessity of SR, which fuses features and colors in 3D space. One straightforward strategy replacing SR for fusing multiple frames is image blending. It warps nearby frames into the stabilized view and averages these images. However, as illustrated in the left part of Fig. 7, image blending leads to noticeable blur in both static regions (the stairs) and dynamic regions (the handbag and the shoulder). Comparing Row 4 and Row 3 in Table 1, the notable decreases in distortion value and PSNR align with the observation in Fig. 7. It demonstrates SR, our 3D multiframe fusion module using volume rendering, can enhance the structural quality of stabilized images." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 558, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 288, + 713 + ], + "type": "text", + "content": "Importance of Adaptive Ray Range. We compare various range strategies to affirm the importance of ARR: (1) IBRNet [34] and ENeRF [15] employ coarse-to-fine range strategy, and (2) we adopt even sampling of 128 points following setting of IBRNet as a substitution for ARR. However, as shown in Fig. 8, none of these strategies achieve favorable results. Without the sampling range defined by ARR, the methods above are forced to aggregate points sampled over a large range, increasing the risk of projecting spatial points onto dynamic regions. Due to the violation of epipolar constraints, dynamic regions introduce incorrect features and colors to the aggregation of descriptors and lead to distortion of the structure. As shown in Row 1,2,3,5 of Table 1," + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 307, + 70, + 425, + 137 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 425, + 137 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 425, + 137 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 425, + 137 + ], + "type": "image", + "image_path": "2c8dfc7ec3132528612000bb0e10a334493166ec7b1563820334ca1c2f438a19.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 307, + 137, + 365, + 176 + ], + "blocks": [ + { + "bbox": [ + 307, + 137, + 365, + 176 + ], + "lines": [ + { + "bbox": [ + 307, + 137, + 365, + 176 + ], + "spans": [ + { + "bbox": [ + 307, + 137, + 365, + 176 + ], + "type": "image", + "image_path": "cf84b471ac347006d196b9d1ddd9e5bd7cc6a73e0f8ea48a49e453eeaea2be73.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 367, + 137, + 424, + 176 + ], + "blocks": [ + { + "bbox": [ + 367, + 137, + 424, + 176 + ], + "lines": [ + { + "bbox": [ + 367, + 137, + 424, + 176 + ], + "spans": [ + { + "bbox": [ + 367, + 137, + 424, + 176 + ], + "type": "image", + "image_path": "e45170879f7bd8948694782837c63e9cee54a4f515b022889e97fb02c1efbb3b.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 427, + 70, + 545, + 137 + ], + "blocks": [ + { + "bbox": [ + 427, + 70, + 545, + 137 + ], + "lines": [ + { + "bbox": [ + 427, + 70, + 545, + 137 + ], + "spans": [ + { + "bbox": [ + 427, + 70, + 545, + 137 + ], + "type": "image", + "image_path": "3c369aea13eb5c2d15c6de431b90a0085a42c5f5c9281851f3cd911fe656c555.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 427, + 137, + 485, + 177 + ], + "blocks": [ + { + "bbox": [ + 427, + 137, + 485, + 177 + ], + "lines": [ + { + "bbox": [ + 427, + 137, + 485, + 177 + ], + "spans": [ + { + "bbox": [ + 427, + 137, + 485, + 177 + ], + "type": "image", + "image_path": "4ce40c892ab996b60ab2af19413f15607c0b324e2e725ffb26265fdd4f799f07.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 476, + 178, + 496, + 188 + ], + "lines": [ + { + "bbox": [ + 476, + 178, + 496, + 188 + ], + "spans": [ + { + "bbox": [ + 476, + 178, + 496, + 188 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 488, + 137, + 544, + 177 + ], + "blocks": [ + { + "bbox": [ + 488, + 137, + 544, + 177 + ], + "lines": [ + { + "bbox": [ + 488, + 137, + 544, + 177 + ], + "spans": [ + { + "bbox": [ + 488, + 137, + 544, + 177 + ], + "type": "image", + "image_path": "8f5ed14cc5ec74546b2636d6eb8992f7ae2421f8a90f86b7c00d7d6e6b1e53ab.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "table", + "bbox": [ + 321, + 245, + 531, + 340 + ], + "blocks": [ + { + "bbox": [ + 321, + 245, + 531, + 340 + ], + "lines": [ + { + "bbox": [ + 321, + 245, + 531, + 340 + ], + "spans": [ + { + "bbox": [ + 321, + 245, + 531, + 340 + ], + "type": "table", + "html": "
MethodDistortion Value↑PSNR↑
ENeRF-13.45
IBRNet0.8028.31
Full (Ours)0.9040.01
w/o Stabilized Rendering0.8723.56
w/o Adaptive Ray Range0.8137.83
w/o Color Correction0.8635.81
", + "image_path": "c2ea4ded0a9a7456dcea2da08cdf4fbb3f9646fd8daa2c0fa83557164b545fe1.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "table_body" + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 348, + 545, + 392 + ], + "lines": [ + { + "bbox": [ + 305, + 348, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 305, + 348, + 545, + 392 + ], + "type": "text", + "content": "Table 2. Quantitative results of ablation study. We conduct comparative experiments of various range strategies and study the effect of each module. It should be noted that the results of ENeRF are so poor that the Distortion Value is unavailable." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 415, + 489, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 415, + 489, + 426 + ], + "spans": [ + { + "bbox": [ + 306, + 415, + 489, + 426 + ], + "type": "text", + "content": "ARR proves effective in preserving structure." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 434, + 545, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 434, + 545, + 517 + ], + "spans": [ + { + "bbox": [ + 304, + 434, + 545, + 517 + ], + "type": "text", + "content": "Importance of Color Correction. We conduct a comparison between the results obtained by removing CC and using the full model. The presence of noticeable artifacts in the dynamic region in the left part of Fig. 9 leads to the decrease in PSNR comparing Row 6 and Row 3 of Table 1. This suggests that employing optical flow in CC to refine the projection can improve color accuracy." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 306, + 536, + 383, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 536, + 383, + 548 + ], + "spans": [ + { + "bbox": [ + 306, + 536, + 383, + 548 + ], + "type": "text", + "content": "6. Conclusions" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "type": "text", + "content": "In this paper, we propose a video stabilization framework termed RStab for integrating multi-frame fusion and 3D constraints to achieve full-frame generation and structure preservation. The core of RStab lies in Stabilized Rendering, a volume rendering module utilizing both colors and features for multi-frame fusion in 3D space. To enhance Stabilized Rendering module, we design an Adaptive Ray Range module for suppressing inconsistent information and a Color Correction module for refining color aggregation. By applying the three modules, RStab achieves full-frame generation with structure preservation and outperforms all previous stabilizers in FOV, image quality, and video stability across various datasets." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7514" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "text", + "content": "[1] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 5855–5864, 2021. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 158, + 288, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 158, + 288, + 212 + ], + "spans": [ + { + "bbox": [ + 53, + 158, + 288, + 212 + ], + "type": "text", + "content": "[2] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 5470–5479, 2022. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 213, + 287, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 213, + 287, + 267 + ], + "spans": [ + { + "bbox": [ + 53, + 213, + 287, + 267 + ], + "type": "text", + "content": "[3] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 14124-14133, 2021. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 269, + 287, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 269, + 287, + 323 + ], + "spans": [ + { + "bbox": [ + 53, + 269, + 287, + 323 + ], + "type": "text", + "content": "[4] Yu-Ta Chen, Kuan-Wei Tseng, Yao-Chih Lee, Chun-Yu Chen, and Yi-Ping Hung. Pixstabnet: Fast multi-scale deep online video stabilization with pixel-based warping. In Proceedings of IEEE International Conference on Image Processing (ICIP), pages 1929–1933, 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 324, + 287, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 324, + 287, + 356 + ], + "spans": [ + { + "bbox": [ + 53, + 324, + 287, + 356 + ], + "type": "text", + "content": "[5] Jinsoo Choi and In So Kweon. Deep iterative frame interpolation for full-frame video stabilization. ACM Transactions on Graphics (TOG), 39(1):4:1-4:9, 2020. 1, 2, 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 357, + 287, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 357, + 287, + 390 + ], + "spans": [ + { + "bbox": [ + 53, + 357, + 287, + 390 + ], + "type": "text", + "content": "[6] Amit Goldstein and Raanan Fattal. Video stabilization using epipolar geometry. ACM Transactions on Graphics (TOG), 31(5):126:1-126:10, 2012. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 392, + 287, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 392, + 287, + 445 + ], + "spans": [ + { + "bbox": [ + 53, + 392, + 287, + 445 + ], + "type": "text", + "content": "[7] Matthias Grundmann, Vivek Kwatra, and Irfan A. Essa. Auto-directed video stabilization with robust L1 optimal camera paths. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 225-232, 2011. 2, 6, 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 447, + 287, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 447, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 53, + 447, + 287, + 479 + ], + "type": "text", + "content": "[8] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Proceedings of International Conference on Learning Representations (ICLR), 2015. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 480, + 287, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 480, + 287, + 523 + ], + "spans": [ + { + "bbox": [ + 53, + 480, + 287, + 523 + ], + "type": "text", + "content": "[9] Yeong Jun Koh, Chulwoo Lee, and Chang-Su Kim. Video stabilization based on feature trajectory augmentation and selection and robust mesh grid warping. IEEE Transactions on Image Processing (TIP), 24(12):5260-5273, 2015. 1, 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 525, + 287, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 525, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 48, + 525, + 287, + 568 + ], + "type": "text", + "content": "[10] Ken-Yi Lee, Yung-Yu Chuang, Bing-Yu Chen, and Ming Ouhyoung. Video stabilization using robust feature trajectories. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 1397-1404, 2009. 1, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 570, + 287, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 570, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 48, + 570, + 287, + 633 + ], + "type": "text", + "content": "[11] Yao-Chih Lee, Kuan-Wei Tseng, Yu-Ta Chen, Chien-Cheng Chen, Chu-Song Chen, and Yi-Ping Hung. 3d video stabilization with depth estimation by cnn-based optimization. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 10621–10630, 2021. 1, 2, 4, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 635, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 678 + ], + "type": "text", + "content": "[12] Chen Li, Li Song, Shuai Chen, Rong Xie, and Wenjun Zhang. Deep online video stabilization using IMU sensors. IEEE Transactions on Multimedia (TMM), 25:2047-2060, 2023. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "text", + "content": "[13] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of IEEE Conference on Com" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 94 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 94 + ], + "type": "text", + "content": "puter Vision Pattern Recognition (CVPR), pages 6498-6508, 2021.3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 96, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 150 + ], + "type": "text", + "content": "[14] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4273-4284, 2023. 3, 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 152, + 545, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 152, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 308, + 152, + 545, + 196 + ], + "type": "text", + "content": "[15] Haotong Lin, Sida Peng, Zhen Xu, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Efficient neural radiance fields for interactive free-viewpoint video. In ACM SIGGRAPH Asia, pages 39:1-39:9, 2022. 2, 3, 4, 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 198, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 198, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 308, + 198, + 545, + 251 + ], + "type": "text", + "content": "[16] Kaimo Lin, Nianjuan Jiang, Shuaicheng Liu, Loong-Fah Cheong, Minh N. Do, and Jiangbo Lu. Direct photometric alignment by mesh deformation. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 2701-2709, 2017. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 254, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 254, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 308, + 254, + 545, + 285 + ], + "type": "text", + "content": "[17] Feng Liu, Michael Gleicher, Hailin Jin, and Aseem Agarwala. Content-preserving warps for 3d video stabilization. ACM Transactions on Graphics (TOG), 28(3):44, 2009. 1, 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 288, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 288, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 308, + 288, + 545, + 319 + ], + "type": "text", + "content": "[18] Feng Liu, Michael Gleicher, Jue Wang, Hailin Jin, and Aseem Agarwala. Subspace video stabilization. ACM Transactions on Graphics (TOG), 30(1):4:1-4:10, 2011. 1, 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 322, + 545, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 322, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 308, + 322, + 545, + 365 + ], + "type": "text", + "content": "[19] Shuaicheng Liu, Yinting Wang, Lu Yuan, Jiajun Bu, Ping Tan, and Jian Sun. Video stabilization with a depth camera. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 89-95, 2012. 1, 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 366, + 545, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 366, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 308, + 366, + 545, + 399 + ], + "type": "text", + "content": "[20] Shuaicheng Liu, Lu Yuan, Ping Tan, and Jian Sun. Bundled camera paths for video stabilization. ACM Transactions on Graphics (TOG), 32(4):78:1-78:10, 2013. 1, 2, 6, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 401, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 401, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 308, + 401, + 545, + 453 + ], + "type": "text", + "content": "[21] Shuaicheng Liu, Lu Yuan, Ping Tan, and Jian Sun. Steadyflow: Spatially smooth optical flow for video stabilization. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4209-4216, 2014. 1, 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 456, + 545, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 456, + 545, + 500 + ], + "spans": [ + { + "bbox": [ + 308, + 456, + 545, + 500 + ], + "type": "text", + "content": "[22] Shuaicheng Liu, Ping Tan, Lu Yuan, Jian Sun, and Bing Zeng. Meshflow: Minimum latency online video stabilization. In Proceedings of European Conference on Computer Vision (ECCV), pages 800-815, 2016. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 502, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 502, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 308, + 502, + 545, + 544 + ], + "type": "text", + "content": "[23] Shuaicheng Liu, Mingyu Li, Shuyuan Zhu, and Bing Zeng. Codingflow: Enable video coding for video stabilization. IEEE Transactions on Image Processing (TIP), 26(7):3291-3302, 2017. 1, 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 547, + 545, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 547, + 545, + 600 + ], + "spans": [ + { + "bbox": [ + 308, + 547, + 545, + 600 + ], + "type": "text", + "content": "[24] Yu-Lun Liu, Wei-Sheng Lai, Ming-Hsuan Yang, Yung-Yu Chuang, and Jia-Bin Huang. Hybrid neural fusion for full-frame video stabilization. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 2279-2288, 2021. 1, 2, 6, 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 603, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 603, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 308, + 603, + 545, + 667 + ], + "type": "text", + "content": "[25] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 7210–7219, 2021. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "text", + "content": "[26] Andreas Meuleman, Yu-Lun Liu, Chen Gao, Jia-Bin Huang, Changil Kim, Min H. Kim, and Johannes Kopf. Progressively optimized local radiance fields for robust view synthesis. In Proceedings of IEEE Conference on Computer Vision" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7515" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 67, + 72, + 286, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 286, + 94 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 286, + 94 + ], + "type": "text", + "content": "Pattern Recognition (CVPR), pages 16539-16548, 2023. 3, 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 95, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 95, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 95, + 287, + 149 + ], + "type": "text", + "content": "[27] Ben Mildenhall, Pratul P. Srinivasan, Rodrigo Ortiz Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4):29:1-29:14, 2019. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 150, + 287, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 150, + 287, + 194 + ], + "spans": [ + { + "bbox": [ + 48, + 150, + 287, + 194 + ], + "type": "text", + "content": "[28] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (TOG), 41(4):1-15, 2022. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 195, + 287, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 195, + 287, + 237 + ], + "spans": [ + { + "bbox": [ + 48, + 195, + 287, + 237 + ], + "type": "text", + "content": "[29] Simon Niklaus and Feng Liu. Softmax splatting for video frame interpolation. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 5436-5445, 2020. 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "spans": [ + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "type": "text", + "content": "[30] Zhenmei Shi, Fuhao Shi, Wei-Sheng Lai, Chia-Kai Liang, and Yingyu Liang. Deep online fused video stabilization. In Proceedings of Winter Conference on Applications of Computer Vision (WACV), pages 865-873. IEEE, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 284, + 287, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 284, + 287, + 326 + ], + "spans": [ + { + "bbox": [ + 48, + 284, + 287, + 326 + ], + "type": "text", + "content": "[31] Brandon M. Smith, Li Zhang, Hailin Jin, and Aseem Agarwala. Light field video stabilization. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 341-348, 2009. 1, 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 327, + 287, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 287, + 370 + ], + "type": "text", + "content": "[32] Alex Trevithick and Bo Yang. Grf: Learning a general radiance field for 3d representation and rendering. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 15182-15192, 2021. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 372, + 287, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 372, + 287, + 426 + ], + "spans": [ + { + "bbox": [ + 48, + 372, + 287, + 426 + ], + "type": "text", + "content": "[33] Miao Wang, Guo-Ye Yang, Jin-Kun Lin, Song-Hai Zhang, Ariel Shamir, Shao-Ping Lu, and Shi-Min Hu. Deep online video stabilization with multi-grid warping transformation learning. IEEE Transactions on Image Processing (TIP), 28 (5):2283-2292, 2019. 6, 7, 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 426, + 287, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 426, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 48, + 426, + 287, + 491 + ], + "type": "text", + "content": "[34] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P. Srinivasan, Howard Zhou, Jonathan T. Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas A. Funkhouser. Ibrnet: Learning multi-view image-based rendering. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4690-4699, 2021. 3, 4, 6, 8, 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 493, + 287, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 493, + 287, + 546 + ], + "spans": [ + { + "bbox": [ + 48, + 493, + 287, + 546 + ], + "type": "text", + "content": "[35] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 5438–5448, 2022. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 548, + 287, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 548, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 48, + 548, + 287, + 590 + ], + "type": "text", + "content": "[36] Yufei Xu, Jing Zhang, Stephen J. Maybank, and Dacheng Tao. DUT: learning video stabilization by simply watching unstable videos. IEEE Transactions on Image Processing (TIP), 31:4306-4320, 2022. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 592, + 287, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 592, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 48, + 592, + 287, + 635 + ], + "type": "text", + "content": "[37] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelnerf: Neural radiance fields from one or few images. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 4578-4587, 2021. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 637, + 287, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 637, + 287, + 669 + ], + "spans": [ + { + "bbox": [ + 48, + 637, + 287, + 669 + ], + "type": "text", + "content": "[38] Jiyang Yu and Ravi Ramamoorthi. Selfie video stabilization. In Proceedings of European Conference on Computer Vision (ECCV), pages 569-584, 2018. 2, 6, 7, 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "text", + "content": "[39] Jiyang Yu and Ravi Ramamoorthi. Robust video stabilization by optimization in CNN weight space. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 3800-3808, 2019. 6" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 206 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[40] Jiyang Yu and Ravi Ramamoorthi. Learning video stabilization using optical flow. In Proceedings of IEEE Conference on Computer Vision Pattern Recognition (CVPR), pages 8156-8164, 2020. 2, 6, 7, 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "type": "text", + "content": "[41] Minda Zhao and Qiang Ling. Pwstablenet: Learning pixelwise warping maps for video stabilization. IEEE Transactions on Image Processing (TIP), 29:3582-3595, 2020. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 152, + 545, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 206 + ], + "type": "text", + "content": "[42] Weiyue Zhao, Xin Li, Zhan Peng, Xianrui Luo, Xinyi Ye, Hao Lu, and Zhiguo Cao. Fast full-frame video stabilization with iterative optimization. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pages 23534-23544, 2023. 1, 2, 6, 7" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "7516" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Neural Edge Reconstruction/12034c9b-4470-4339-9189-38596581605f_content_list.json b/2024/3D Neural Edge Reconstruction/12034c9b-4470-4339-9189-38596581605f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..c319c5b8b03b6025cad5c806b78b41222888fba0 --- /dev/null +++ b/2024/3D Neural Edge Reconstruction/12034c9b-4470-4339-9189-38596581605f_content_list.json @@ -0,0 +1,1801 @@ +[ + { + "type": "text", + "text": "3D Neural Edge Reconstruction", + "text_level": 1, + "bbox": [ + 321, + 130, + 647, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lei Li $^{1}$ Songyou Peng $^{1,2\\dagger}$ Zehao Yu $^{3,4}$ Shaohui Liu $^{1}$ Rémi Pautrat $^{1,6}$ Xiaochuan Yin $^{5}$ Marc Pollefeys $^{1,6}$ $^{1}$ ETH Zurich $^{2}$ MPI for Intelligent Systems, Tübingen $^{3}$ University of Tübingen $^{4}$ Tübingen AI Center $^{5}$ Utopilot $^{6}$ Microsoft neural-edge-map.github.io", + "bbox": [ + 143, + 178, + 803, + 272 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 305, + 313, + 320 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Real-world objects and environments are predominantly composed of edge features, including straight lines and curves. Such edges are crucial elements for various applications, such as CAD modeling, surface meshing, lane mapping, etc. However, existing traditional methods only prioritize lines over curves for simplicity in geometric modeling. To this end, we introduce EMAP, a new method for learning 3D edge representations with a focus on both lines and curves. Our method implicitly encodes 3D edge distance and direction in Unsigned Distance Functions (UDF) from multi-view edge maps. On top of this neural representation, we propose an edge extraction algorithm that robustly abstracts parametric 3D edges from the inferred edge points and their directions. Comprehensive evaluations demonstrate that our method achieves better 3D edge reconstruction on multiple challenging datasets. We further show that our learned UDF field enhances neural surface reconstruction by capturing more details.", + "bbox": [ + 75, + 335, + 473, + 609 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 636, + 209, + 651 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The straight line belongs to men, the curved one to God. — Antonio Gaudi", + "bbox": [ + 84, + 662, + 460, + 691 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This sentiment is evident in the visual composition of our environments. While straight lines are common in manmade scenes such as walls, windows, and doors [25], curves are more general and ubiquitous from cups, bridges, architectures, to Gothic arts. Edges, which are composed of both lines and curves, are the fundamental elements of visual perception. Therefore, accurate edge modeling is crucial for understanding the geometry and structure of our 3D world.", + "bbox": [ + 75, + 696, + 468, + 816 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Conventional approaches on 3D reconstruction typically involve inferring dense geometry and abstracting meshes from 2D images [15, 34, 48, 50, 67, 69]. However, the presence of 3D edges offers substantial advantages. First,", + "bbox": [ + 75, + 816, + 468, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f32a4e2e65c6a8de962c388fc298595f11a5a3a318eb562c0f2cf1aa633d4c75.jpg", + "image_caption": [ + "(a) An Indoor Scene" + ], + "image_footnote": [], + "bbox": [ + 504, + 303, + 694, + 386 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e6316e0a68dea4fa60357f5b54aca0a5710246b7a1b48dfbe7cb8ea09541af8b.jpg", + "image_caption": [ + "(b) LIMAP [25]" + ], + "image_footnote": [], + "bbox": [ + 696, + 301, + 885, + 385 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/655a4d113703ef7e8eaee1f49b5e069da98404b01c4a16f403ecc227efa8947a.jpg", + "image_caption": [ + "(c) NEAT [64]" + ], + "image_footnote": [], + "bbox": [ + 501, + 400, + 692, + 481 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e4f4f85cea6280fcd180b2e5fa06d3f1425e04e7805acd5d8f57819546e1c071.jpg", + "image_caption": [ + "(d) EMAP (Ours)", + "Figure 1. Example 3D edge reconstruction on Replica [53]. While prior methods such as LIMAP [25] and NEAT [64] only reconstruct distinctive line segments, our method generates a more complete 3D edge map combining both line and curve features." + ], + "image_footnote": [], + "bbox": [ + 697, + 400, + 888, + 482 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "edges are naturally compact representations that capture the salient features oftentimes around geometric boundaries, which are good indicators for more lightweight and adaptive meshing and 3D modeling with comparably less redundancy. Secondly, in contrast to dense surface modeling from images, 3D edges are unaffected to illumination changes, thus exhibiting better reliability on multi-view reconstruction. Last but not least, 3D edges serve as a universal representation in real-world scenarios, and can be potentially integrated into many applications such as lane mapping [10, 22, 42, 43, 56], motion forecasting [12, 13, 51], medical imaging [45], etc.", + "bbox": [ + 496, + 579, + 892, + 761 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The reconstruction of 3D edges is conventionally approached by matching their 2D observations across views. While Bignoli et al. [5] proposed edge point matching using the sparse map from Structure-from-Motion (SfM), it is inherently ill-posed due to its heavy reliance on cross-view edge correspondences, which are generally sparse and prone to ambiguity. Recent works have also improved the quality of 3D line reconstruction [18, 25, 61, 64], but primarily excel in specific scenes where straight lines dominate.", + "bbox": [ + 496, + 763, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding author", + "bbox": [ + 101, + 886, + 230, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "21219", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "nate. While general real-world environments with curved structures pose more challenges, recent progress on 2D detection and matching is mostly limited to point and line features and thus inapplicable to such scenarios.", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A recent work NEF [70] made a significant step forward in learning 3D curves from multi-view 2D edge observations. Inspired by the recent success of neural radiance field (NeRF) [32], they introduce a neural edge density field and show decent results in reconstructing edges for simple objects. Nevertheless, their proposed edge density field has an inherent bias in edge rendering, leading to less accurate reconstruction. Moreover, its fitting-based edge parameterization process not only requires tedious tuning to specific data, but also struggles with its scalability to larger and more complex scenes. This motivates us to develop a more robust system for 3D edge mapping from 2D observations, which would benefit a wide range of downstream tasks.", + "bbox": [ + 75, + 152, + 470, + 348 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Towards this goal, we introduce EMAP, a novel approach for accurate 3D edge reconstruction from only 2D edge maps. EMAP comprises the following steps. Firstly, we learn the neural unsigned distance function (UDF) to implicitly model 3D edges, utilizing an unbiased rendering equation to mitigate the inaccuracies observed in NEF. Secondly, once learned, we can obtain the unsigned distance and normal for each point in the space, so a set of precise edge points with directions can be extracted. Finally, based on the guidance of every edge point's location and direction, we design a simple yet robust algorithm for parametric line and curve extraction, that can be applied across various challenging scenarios. Our comprehensive evaluations of EMAP, from synthetic CAD models to real-world indoor and outdoor scenes, show its superior performance in 3D edge reconstruction. In addition, we also observe that initializing the optimization process of the recent neural implicit surface reconstruction method with our trained UDF field enables the reconstructing of better details.", + "bbox": [ + 75, + 349, + 468, + 636 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Overall, the contributions of this paper are as follows:", + "bbox": [ + 96, + 638, + 450, + 654 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose EMAP, a 3D neural edge reconstruction pipeline that can learn accurate 3D edge locations and directions implicitly from multi-view edge maps.", + "- We develop a 3D edge extraction algorithm to robustly connect edge points with edge direction guidance.", + "- We show that our model can generate complete 3D edge maps and help optimize dense surfaces." + ], + "bbox": [ + 76, + 655, + 467, + 760 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 779, + 217, + 794 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Geometry-based 3D Line Reconstruction. As a pioneering work, Bartoli and Sturm [4] introduces a full SfM system using line segments, which is later improved under Manhattan assumption [47] and in stereo systems [8]. Recently, with the developments of line detections [36, 37, 65] and matching [2, 36, 38] thanks to the advent to deep learn", + "bbox": [ + 75, + 809, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ing, several works have attempted to revisit the line mapping problem through graph clustering [19], leveraging planar information [61] and incorporating into SLAM systems [17, 23, 29, 52, 77]. In particular, recent work LIMAP [25] introduces a robust 3D line mapping system with structural priors which can adapt to different existing line detectors and matchers. Despite these advances, all the works are limited to straight lines and often produce segmented small lines when it comes to curves. In contrast, edges are generally easier to detect and are redundantly present in most scenes. In this project, rather than relying on lines, we build our 3D mapping system using robust 2D edge maps.", + "bbox": [ + 496, + 90, + 890, + 272 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Learning-based 3D Line/Curve Reconstruction. In contrast to geometry-based methods, some approaches [26, 59, 74] shifted their focus to directly extract parametric curves from given edge point clouds. Typically, they require keypoint detection, clustering, and linkage. Even under the relaxed setting, it is still challenging to generate clean parametric curves due to the complex connectivity of curves and imperfect point clouds [70]. To address this limitation, NEF [70] integrates NeRF [32] for edge mapping from multi-view images, extracting 3D curves from the learned neural edge field through a carefully designed postprocessing. While NEF achieves decent performance on CAD models, it is constrained to simple and low-precision object-level edge mapping. A concurrent work, NEAT [64], utilizes VolSDF [69] to build dense surfaces and incorporates a global junction perceiving module to optimize 3D line junctions with 2D wireframe supervision. Although NEAT can produce 3D wireframes, it is restricted to modeling line segments only. Additionally, their need for textured objects is a limitation. By contrast, we use the unisgned distance function (UDF) to represent edges, enabling the construction of both line segments and curves without the necessity for target textures. We further show that our method can faithfully reconstruct edges for complex scenes.", + "bbox": [ + 496, + 292, + 890, + 654 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural Implicit Representations. Neural implicit representations have emerged as a powerful tool for a spectrum of computer vision tasks, including object geometry representation [9, 24, 30, 34, 35, 40, 46, 57, 62, 66, 68], scene reconstruction [6, 20, 39, 71, 72, 75, 76], novel view synthesis [28, 32, 44, 73] and generative modelling [7, 33, 49]. Recent works [27, 58, 60, 69, 72] show impressive high-fidelity reconstruction by learning the implicit signed distance function (SDF). However, the SDF representation constrains to modeling closed, watertight surfaces. In contrast, NeuralUDF [3] exploits UDF to represent surfaces, offering a higher degree of freedom to represent both closed and open surfaces. We find UDF as a suitable representation to model edges implicitly, in comparison to SDF used in NEAT [64] and edge volume density from NEF [70].", + "bbox": [ + 496, + 672, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "21220", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d23a375aefeb8896ed56301abffb5f0a0787d0ecf38319c3daa5894552c8c5dc.jpg", + "image_caption": [ + "Figure 2. UDF learning overview. We utilize a vanilla NeRF [32] MLP that outputs absolute values to model the 3D UDF field. Edge maps are rendered using a density-based edge neural rendering technique, combined with an unbiased UDF rendering approach to eliminate bias. Our primary supervision comes from 2D edge maps predicted by a pre-trained edge detector." + ], + "image_footnote": [], + "bbox": [ + 81, + 88, + 467, + 243 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 337, + 166, + 353 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our goal is to build a 3D edge map from multi-view posed 2D edge maps. To this end, we first introduce our edge representation and edge field learning in Sec. 3.1. Next, we present our 3D parametric edge extraction from the learned edge representations in Sec. 3.2.", + "bbox": [ + 75, + 362, + 468, + 438 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Edge Field with Unsigned Distance Functions", + "text_level": 1, + "bbox": [ + 76, + 446, + 460, + 462 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Multi-view Edge Maps. Since edge maps are generally invariant to illumination changes and are more robustly detected across various scenes than lines, our method utilizes multiple posed 2D edge maps as inputs. We apply pretrained edge detectors to predict an edge map $E$ for each input RGB image. Each pixel of $E$ has a value within [0, 1], indicating its probability of being an edge.", + "bbox": [ + 75, + 473, + 468, + 579 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Density-based Edge Neural Rendering. We use an unsigned distance function (UDF) to represent edges, denoted as $f_{u}$ . This function computes the unsigned distance from a given 3D point to the nearest edge. The UDF is defined as:", + "bbox": [ + 75, + 582, + 468, + 643 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nf _ {u}: \\mathbb {R} ^ {3} \\rightarrow \\mathbb {R} \\quad \\mathrm {x} \\mapsto u = \\operatorname {U D F} (\\mathrm {x}), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 151, + 650, + 468, + 667 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{x}$ is a 3D point and $u$ is the corresponding UDF value.", + "bbox": [ + 76, + 676, + 468, + 691 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To render an edge pixel in a certain view, we trace a camera ray $\\mathbf{r}(t) = \\mathbf{o} + t\\mathbf{d}$ . This ray originates from the camera's center $\\mathbf{o}$ and extends in direction $\\mathbf{d}$ [32]. To apply volume rendering for edge modeling, it is necessary to establish a mapping $\\Omega_{u}$ [27, 58] that transforms the distance function $f_{u}(\\mathbf{r}(t))$ into volume density $\\sigma_{u}(t)$ as", + "bbox": [ + 75, + 691, + 468, + 782 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma_ {u} (t) = \\Omega_ {u} \\left(f _ {u} (\\mathrm {r} (t))\\right). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 790, + 468, + 806 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the rendering equation, the transmittance $T(t)$ and weight $\\omega (t)$ along the camera ray $\\mathbf{r}$ are accumulated as", + "bbox": [ + 76, + 815, + 468, + 845 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nT (t) = \\exp \\left(- \\int_ {0} ^ {t} \\sigma_ {u} (v) d v\\right), \\quad w (t) = T (t) \\cdot \\sigma_ {u} (t). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 86, + 853, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To effectively handle appearance changes under different viewing angles, most neural field-based surface reconstruction [34, 58, 67, 71] disentangles geometry and appearance. In contrast, edge maps are generally unaffected by lighting, making them view-independent. Therefore, this simplifies the rendering process for edge maps. as it only requires the accumulation of view-independent, density-based weights $w$ along a ray $\\mathbf{r}$ . Now, the rendered edge value $\\hat{E}$ along ray $\\mathbf{r}$ is formulated as:", + "bbox": [ + 496, + 90, + 890, + 226 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {E} (\\mathbf {r}) = \\int_ {0} ^ {+ \\infty} w (t) d t = 1 - T (+ \\infty), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 565, + 234, + 890, + 268 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Eq. (4) establishes the connection between rendered edge values and the transmittance at the end of the camera rays. Intuitively, this means that the rendered edge value is 1 when the camera ray hits an edge in 3D space, and 0 otherwise. Please refer to the supplements for more details.", + "bbox": [ + 496, + 277, + 890, + 353 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Unbiased Density Functions for UDF Rendering. NEF [70] also uses volume rendering for rendering edges. Unlike ours, they utilize edge density to represent edges and an additional network to predict edge values. However, this approach introduces an inherent bias in edge rendering. Similar to the naive solution presented in NeuS [58], the issue comes from the weight function $w$ in Eq. (3), where its local maximum does not coincide with the actual intersection point of the camera ray and the edges.", + "bbox": [ + 496, + 356, + 890, + 492 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To address this issue, we incorporate unbiased UDF rendering [27] into our density-based edge rendering framework. As proved in NeuS, density function $\\sigma_{u}$ should increase monotonically to make the weight function unbiased. However, UDF values are not monotonous along a ray [27]. To adapt the unbiased density function $\\Omega_{s}$ , which is originally induced in NeuS [58], for UDF use, the monotonically increased density function $\\sigma_{u}$ [27] is formulated as", + "bbox": [ + 496, + 492, + 890, + 613 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma_ {u} (t) = \\Psi (t) \\cdot \\Omega_ {s} \\left(f _ {u} (\\mathbf {r} (t))\\right) + (1 - \\Psi (t)) \\cdot \\Omega_ {s} (- f _ {u} (\\mathbf {r} (t))), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 498, + 625, + 890, + 654 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\Psi (t)$ is a differentiable visibility function designed in [27] to capture the monotonicity change in UDF. $\\Psi$ is 0 behind the intersection point between the camera ray and the hit edge, and is 1 before the intersection point. Besides, $\\Psi (t)$ is differentiable around the intersection point to make the UDF optimization more stable.", + "bbox": [ + 496, + 654, + 890, + 744 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Ray Sampling Strategy. A key characteristic of 2D edge maps is their significant sparsity, with edges occupying a much smaller area compared to non-edge regions. To enhance training efficiency and stability, we apply an importance sampling strategy for camera rays, with $50\\%$ of rays uniformly sampled from edge areas in the edge maps and the remaining $50\\%$ from non-edge areas. Such a sampling strategy ensures that our UDF field training is concentrated on edge areas, thereby substantially speeding up the training process. Additionally, our sampling strategy offers an", + "bbox": [ + 496, + 750, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "21221", + "bbox": [ + 478, + 989, + 517, + 1000 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/95652d6ceb191b2d8572ce1242428e6fb2c16a0773901aa5e9b8402546f886e7.jpg", + "image_caption": [ + "Figure 3. Illustration of our 3D parametric edge extraction steps. For simplify, our schematic is depicted in the 2D plane. Our 3D edge extraction algorithm comprises five main stages: point initialization (a), point shifting (b to c), edge direction extraction (c to d), point connection (d to e), and edge fitting (e to f)." + ], + "image_footnote": [], + "bbox": [ + 107, + 95, + 433, + 328 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "elegant solution to the issue of occlusion, a challenge noted in [70]. The rendered edge maps might contain edges not present in the input edge images due to occlusion. In contrast to the complicated occlusion handling strategy introduced in [70], our approach inherently alleviates this challenge by focusing the training on points from the visible edges presented in the input edge maps.", + "bbox": [ + 75, + 421, + 468, + 527 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Loss Functions. The total loss function can be written as:", + "bbox": [ + 76, + 531, + 460, + 545 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {e d g e}} + \\lambda \\mathcal {L} _ {\\text {e i k}}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 556, + 468, + 571 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_{\\mathrm{edge}}$ represents the Mean Square Error (MSE) between the rendered and input edge images. $\\mathcal{L}_{\\mathrm{eik}}$ denotes the Eikonal loss [16], which promotes the learned UDF to be physical distance. $\\lambda$ is used to balance these losses.", + "bbox": [ + 75, + 580, + 468, + 642 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. 3D Parametric Edge Extraction", + "text_level": 1, + "bbox": [ + 76, + 650, + 356, + 666 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "With UDF learning, edge locations are implicitly encoded within the UDF field. However, accurately extracting edge points from the UDF field is non-trivial due to the absence of a real zero-level set in the UDF field. Additionally, formulating these edge points into parametric edges poses significant challenges due to the complex connections of edges. To extract points from the learned density field, NEF [70] selects points with edge density values greater than a specified threshold, $\\epsilon$ . This approach leads to an approximated edge point set that is $\\epsilon$ -bounded [27]. While this method effectively generates comprehensive point clouds, the $\\epsilon$ -bounded point set does not align accurately with the actual edge locations.", + "bbox": [ + 75, + 672, + 468, + 868 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To eliminate the error in edge point extraction, we leverage the physical property of UDF that reflects real-world", + "bbox": [ + 76, + 869, + 470, + 900 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3427383d6547aba66aa415e6f00bae8ea5a6583ef804b3d91314bd0e7c9d01e2.jpg", + "image_caption": [ + "Figure 4. Illustration of the overview (a) and the cross-section (b) of UDF field. (a) In UDF field, edge points are ideally located at the zero-level set, with UDF values being larger away from these points. A query point $x_{t}$ can be precisely shifted to a more accurate position $x_{t + 1}$ by following the UDF value and the inverse normal vector $n(x)$ . The edge direction $l(x)$ aligns with the tangent direction at the edge point $x_{t + 1}$ . (b) The inverse normal vectors of all surrounding points on the cross section are pointing towards the query point." + ], + "image_footnote": [], + "bbox": [ + 506, + 95, + 769, + 186 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/177a7f484bfed0f95367c92e44a94f711554e8a8baf73f022876b74131079723.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 774, + 98, + 885, + 183 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "distances to the edges. Specifically, we develop a 3D edge extraction algorithm composed of five main stages: point initialization, point shifting, edge direction extraction, point connection, and edge fitting, as illustrated in Fig. 3. This algorithm takes the trained UDF field as input and outputs parametric 3D edges, including line segments and curves.", + "bbox": [ + 496, + 335, + 890, + 426 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Point Initialization. Under eikonal loss supervision, the optimized UDF values represent physical distances to the nearest edges. To initialize potential edge points, we begin with the center points of all voxel grids and obtain their UDF values from the UDF field. Subsequently, we eliminate query points whose UDF values exceed a specified threshold $\\epsilon^{\\prime}$ (red points in Fig. 3 (a)).", + "bbox": [ + 496, + 430, + 892, + 536 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Point Shifting. As illustrated in Fig. 4 (a), the normalized inverse gradient of the UDF field indicates the inverse normal vector pointing towards edges. Drawing inspiration from OccNet [31], we refine the point $x$ iteratively towards the edge using its distance and inverse normal direction:", + "bbox": [ + 496, + 540, + 893, + 614 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nx _ {t + 1} \\Leftarrow x _ {t} - f _ {u} (x _ {t}) \\cdot \\frac {\\nabla f _ {u} (x _ {t})}{\\| \\nabla f _ {u} (x _ {t}) \\|}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 575, + 626, + 890, + 659 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $t$ denotes the $t$ -th iteration. As a result of this iterative process, the initial points converge to the edge center (from Fig. 3 (b) to Fig. 3 (c)).", + "bbox": [ + 496, + 670, + 890, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Edge Direction. Establishing connections between edge points is a crucial step in constructing parametric edges. While most methods [11, 42, 70] estimate parameters through least-squares fitting of lines/curves on extracted points, this fitting-based approach for edge extraction is not always robust or accurate. In contrast, inspired by [37, 63], we find that combining the edge direction field with the edge distance field can robustly produce edge parameters. Given that inverse normal vectors invariably point towards edges (see Fig. 4(b)), we first devise an edge direction extraction method based on this set of inverse normal vectors. Specifically, for a query point $x$ , we introduce minor shifts", + "bbox": [ + 496, + 718, + 892, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "21222", + "bbox": [ + 478, + 988, + 517, + 1000 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "set $\\{\\delta\\}_{N}$ with size of $N$ to generate an adjoining point set $\\{x'\\}_{N}$ , where $\\{x'\\}_{N} = x + \\{\\delta\\}_{N}$ . The inverse normal vectors of these points, denoted as $\\{n\\}_{N}$ , are obtained from the learned UDF field. The edge direction, denoted as $l$ , is identified as the null space of $\\{n_i'\\}$ , since the edge direction is perpendicular to all inverse normal vectors in $\\{n\\}_{N}$ . Therefore, $l$ can be extracted with singular value decomposition (SVD):", + "bbox": [ + 75, + 90, + 472, + 210 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nA = U \\Sigma V ^ {T}, \\quad l = V [:, \\operatorname {a r g m i n} (\\Sigma) ], \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 145, + 218, + 468, + 237 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $A$ is the matrix representation of $\\{n\\}_{N}$ and $l$ corresponds to the eigenvector associated with the smallest eigenvalue. Note that $N$ should be sufficiently large to ensure the stability of the extracted edge direction. Unlike DeepLSD [37], we can obtain a precise edge direction field without relying on any 2D direction supervision.", + "bbox": [ + 75, + 243, + 468, + 335 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Point Connection. After accurately determining the edge point locations and directions, we proceed to connect these edge points guided by the edge direction to create polylines (Fig. 3 (d) to (e)). Specifically, we begin by selecting candidate points and then compute directional errors for points adjacent to these candidates. Based on these directional errors, candidate points are connected to its best-matched neighboring point that growing direction aligns best with its extracted edge direction, i.e., with minimal directional error. This process is repeated, extending the edge polylines progressively until no further growth is possible. To ensure efficiency and accuracy, a non-maximum suppression step is employed to remove any redundant points that may exist between the current candidate and the best-matched point. Please refer to the supplements for more algorithm details.", + "bbox": [ + 75, + 338, + 472, + 565 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Edge Fitting. To further parameterize edges, we categorize the polylines into line segments and Bézier curves (Fig. 3 (f)). Initially, we utilize RANSAC [14] to fit lines from the polylines, and select the line segment that encompasses the highest number of inlier points. Following [25], we apply Principal Component Analysis (PCA) to the inlier points, re-estimate the line segment utilizing the principal eigenvector and the mean 3D point, and project all inlier points onto the principal eigenvector to derive the 3D endpoints. This fitting process is repeated for each polyline until the number of inlier points falls below a minimum threshold. For the remaining sub-polylines, we fit each of them with a Bézier curve that is defined by four control points.", + "bbox": [ + 75, + 568, + 472, + 763 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To minimize edge redundancy, we further merge line segments and Bézier curves based on two criteria: the shortest distance between candidate edges and the similarity of curvature at their closest points. For line segments, the shortest distance is the minimal point-to-line segment distance, and curvature similarity is their direction's cosine similarity. For Bézier curves, they are the minimal point-to-point distance and the cosine similarity of the tangent vectors at the nearest points, respectively. Candidate edges", + "bbox": [ + 75, + 763, + 472, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "are merged only if they meet both criteria. This dual-criterion approach ensures that merging happens only when two edges are both similar and close to each other.", + "bbox": [ + 496, + 90, + 890, + 136 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To connect edges, all endpoints of line segments and Bézier curves located within a specified distance threshold are merged into shared endpoints. Furthermore, we implement an optimization step [5, 64] to refine the 3D parametric edges by leveraging 2D edge maps, thereby enhancing edge precision. Specifically, we project 3D parametric edges into edge map frames using camera projection matrices and filter out 3D edges that are not visible in over $90\\%$ of views.", + "bbox": [ + 496, + 137, + 893, + 258 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 498, + 273, + 633, + 291 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experiment Setting", + "text_level": 1, + "bbox": [ + 500, + 300, + 686, + 316 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. We consider four diverse datasets: CAD models (ABC-NEF [70]), real-world objects (DTU [1]), high-quality indoor scenes (Replica [53]), and real-world outdoor scenes (Tanks & Temples [21]). ABC-NEF dataset comprises 115 CAD models, each accompanied by 50 observed images and ground truth parametric edges. We select 82 CAD models, excluding those containing inconsistent edge observations (e.g., cylinders or balls). DTU dataset provides dense ground-truth point clouds and we select 6 objects that meet the multi-view constraints among scans processed by [72]. Following [5], we derive edge points by projecting ground-truth dense points onto images and then comparing them with the observations on 2D edge maps to filter out non-edge points. Replica and Tanks & Temples datasets contain larger scenes. Due to the lack of ground-truth edges, we conduct qualitative comparisons among baselines.", + "bbox": [ + 496, + 327, + 893, + 584 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines. We compare with three state-of-the-art baselines for 3D line/curve mapping, including two learning-based methods, NEF [70] and NEAT [64], and one geometry-based method, LIMAP [25].", + "bbox": [ + 496, + 588, + 893, + 650 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Metrics. Our evaluation involves first sampling points in proportion to the edge's length and subsequently downsampling these points using a voxel grid with a resolution of $256^3$ . Following the metrics used in [25, 70], we consider Accuracy (Acc), Completeness (Comp) in millimeters, and Recall $(R_{\\tau})$ , Precision $(P_{\\tau})$ , F-score $(F_{\\tau})$ in percentage with a threshold $\\tau$ in millimeters. Moreover, we report Edge Direction Consistency (Norm) in percentage to analyze the precision of edge direction extraction.", + "bbox": [ + 496, + 654, + 893, + 790 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation Details. For $f_{u}$ , we utilize 8-layer Multi-layer Perceptrons (MLPs). Each layer in the MLP contains 512 neurons for larger scenes, such as Tanks & Temples, and 256 neurons for other datasets. We sample 1024 rays per batch, among these rays, 512 rays are sampled from edge areas. We train our model for $50k$ iterations on ABC-NEF dataset, and $200k$ iterations on other datasets. We train", + "bbox": [ + 496, + 794, + 893, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "21223", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d8a8f207e25d105eeb2ddbb82ba0e3086cd1272a644335bb120164a0f6fa9412.jpg", + "image_caption": [ + "Figure 5. Qualitative comparisons on ABC-NEF [70]. Lines are shown in black and curves in blue. Thanks to our precise edge extraction capabilities for both lines and curves, we achieve complete and accurate modeling of these elements." + ], + "image_footnote": [], + "bbox": [ + 153, + 85, + 821, + 300 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/63e344e229814eeb377715a57ff3761b96aff690fbd6f67e85da9b17f95aaaa5.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodDetectorModalAcc↓Comp↓Norm↑R5↑R10↑R20↑P5↑P10↑P20↑F5↑F10↑F20↑
LIMAP [25]LSDLine9.918.794.436.282.387.943.087.693.939.084.390.4
SOLD2Line5.929.690.164.276.679.688.196.497.972.984.086.7
NEF [70]PiDiNet†Curve11.916.990.911.462.091.315.768.596.313.064.693.3
PiDiNetCurve15.116.589.711.753.389.813.652.289.112.351.888.7
DexiNedCurve21.915.785.911.348.387.711.539.871.710.842.176.8
OursPiDiNetEdge9.215.693.730.275.789.835.679.195.432.477.092.2
DexiNedEdge8.88.995.456.488.994.862.989.995.759.188.994.9
", + "bbox": [ + 120, + 349, + 844, + 450 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "our network with the Adam optimizer with a learning rate of $5 \\times 10^{-4}$ , while the UDF model $f_{u}$ is trained with a learning rate of $1 \\times 10^{-4}$ and initialized with sphere initialization [66]. For edge detection for NEF and ours, we consider PiDiNet [54] and DexiNed [41]. PiDiNet [54] is employed for indoor scenes, such as DTU and Replica, due to its superior performance in these settings. Conversely, DexiNed [41] is applied to outdoor scenes, as it is primarily trained on outdoor scenes. On the synthetic ABC-NEF dataset, we show results with both detectors. For LIMAP, we follow their paper and we use SOLD2 [36] for indoor scenes and LSD [55] for outdoor scenes. NEAT is trained with 2D wireframes from HAWPV3 [65].", + "bbox": [ + 75, + 498, + 472, + 695 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Evaluation of 3D Edge Reconstruction", + "text_level": 1, + "bbox": [ + 76, + 705, + 408, + 722 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation on ABC-NEF Dataset. We show the quantitative and qualitative comparisons on Table 1 and Fig. 5. Note that NEAT fails on the ABC-NEF dataset because of its heavy dependence on texture input. NEF demonstrates decent performance at $\\tau = 20$ . However, their performance drops significantly when $\\tau$ is set to 10 and 5. This is attributed to its bias in edge rendering and its fitting-based post-processing. LIMAP shows remarkable precision across various $\\tau$ thresholds. Such consistency stems from its non-linear refinement over multi-view 2D supports. Nonetheless, LIMAP's inability to reconstruct curves leads", + "bbox": [ + 75, + 734, + 472, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/bdf45c058aeb89c164a707154f98571fd140a91fd04a3fc0ef6ae056cef6ba57.jpg", + "table_caption": [ + "Table 1. Edge reconstruction results on ABC-NEF [70]. Results from NEF's released pretrained models are indicated by $\\dagger$ . Our method surpasses all others in terms of completeness and achieves accuracy comparable to LIMAP [25]." + ], + "table_footnote": [], + "table_body": "
MethodDetectorCurveLine
Acc↓Comp↓Norm↑Acc↓Comp↓Norm↑
LIMAP [25]LSD272.650.184.834.611.395.9
SOLD2295.782.276.820.018.192.1
NEF [70]PiDiNet†265.027.177.940.413.792.6
PiDiNet263.123.977.643.914.091.4
DexiNed250.520.372.656.213.887.3
OursPiDiNet253.725.788.143.112.893.7
DexiNed241.010.988.746.77.795.4
", + "bbox": [ + 509, + 494, + 879, + 588 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Accuracy, completeness and normal consistency results with curves and lines on ABC-NEF [70]. Our method with DexiNed edge detector yields overall the strongest performance on curves among all baselines.", + "bbox": [ + 496, + 592, + 892, + 647 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "to lower scores in completeness and recall. Our method, when combined with either of the 2D edge detectors, consistently outperforms all baselines. Notably, as shown in Table 1, combined with the DexiNed detector, our method achieves superior results in completeness, edge direction consistency, recall, and F-Score. We also show competitive accuracy and precision when compared to LIMAP.", + "bbox": [ + 496, + 656, + 890, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To further analyze the performance of different edge types, we classify the ground truth edges into curves (including BSplines, ellipses, and circles) and line segments, based on the GT annotations. We provide accuracy, completeness, and edge direction consistency in Table 2 to analyze the separate reconstruction abilities for curves and lines. Note that these results are computed based on all predictions specific to either curves or lines, as other methods do not differentiate between these two types of edges. We", + "bbox": [ + 496, + 763, + 893, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "21224", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/85d6f4966cb9656b73660aa5b0a3763e630fe331ae41e9c52593945514f4242b.jpg", + "image_caption": [ + "Figure 6. Qualitative comparisons on the Replica [53] and Tanks & Temples [21] datasets. The first two scenes are from the Replica dataset, while the last scene is from the Tanks & Temples dataset." + ], + "image_footnote": [], + "bbox": [ + 158, + 97, + 810, + 324 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c76cb10f06e8d4329f2ed8a8004c47e2ae22399821ff56514abfd97ef1c0a767.jpg", + "image_caption": [ + "2D Image" + ], + "image_footnote": [], + "bbox": [ + 96, + 369, + 210, + 455 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/15bdf6ac242f4925c9447e88d99c55d022936580c065ebb02fdca63bcddb0bc0.jpg", + "image_caption": [ + "LIMAP [25]" + ], + "image_footnote": [], + "bbox": [ + 215, + 375, + 328, + 455 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/00797d273957867f59baf7e8d3861bc4761fafe544042f9e09d83bec09342ccc.jpg", + "image_caption": [ + "NEF [70]" + ], + "image_footnote": [], + "bbox": [ + 330, + 375, + 444, + 455 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0bc9a81e7d2d51ad0add2a71c212ba60938538b6deea540583347c6e71802a2b.jpg", + "image_caption": [ + "NEAT [64]" + ], + "image_footnote": [], + "bbox": [ + 101, + 473, + 210, + 553 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f96fadba897230b20816ff77c329140fd909f3ddcda7181254b39ef86a5acc5e.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 218, + 472, + 323, + 549 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b00d3acd08c734a1304a9d9ed71594a644480cc8ea4f8fec61d825ec88663aa1.jpg", + "image_caption": [ + "GT Edge", + "Figure 7. Qualitative comparisons on DTU [1]. Our results demonstrate complete edge structure, whereas other methods result in redundant line segments or imprecise curves." + ], + "image_footnote": [], + "bbox": [ + 338, + 474, + 437, + 547 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/b4ff2ddfc68870fa7a7878641ba2b0304745c0bfe198c243b342a0a7662e9675.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ScanLIMAP [25]NEF [70]NEAT [64]Ours
R5↑P5↑R5↑P5↑R5↑P5↑R5↑P5↑
3775.874.339.551.063.985.162.783.9
8375.750.732.021.872.352.472.361.5
10579.164.930.332.068.973.378.578.0
11079.765.331.240.264.379.690.968.3
11859.462.015.325.259.071.175.378.1
12279.979.215.129.170.082.085.382.9
Mean74.966.127.233.266.473.977.575.4
", + "bbox": [ + 102, + 626, + 441, + 731 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. Edge reconstruction results on DTU [1].", + "bbox": [ + 119, + 736, + 424, + 750 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "can see that our method with DexiNed exhibits superior results in reconstructing curves. As for line segments, our performance is marginally lower than the best-performing method LIMAP which is specially optimized for lines.", + "bbox": [ + 75, + 760, + 468, + 821 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation on DTU Dataset. Our assessment of the DTU dataset, as outlined in Table 3 and Fig. 7, shows our proficiency in real-world scenarios. Notably, our approach achieves the highest recall and precision among all baselines. The DTU dataset presents a challenging scenario for", + "bbox": [ + 75, + 824, + 468, + 902 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b354c149f9d99c4094d797a70c507d8be54383f1a5459910be440c1163369e90.jpg", + "image_caption": [ + "(a) w/o point shifting" + ], + "image_footnote": [], + "bbox": [ + 514, + 369, + 629, + 421 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9330ce339bce3241821a4886721d8b8bd4b53979aaa9cf8a3a161d2c4f46f4d6.jpg", + "image_caption": [ + "(b) w/ point shifting", + "Figure 8. Visualization of point shifting and edge direction. Edge points are shown in point clouds and edge directions in color. The point shifting step significantly refines the locations of edge points. The edge extraction step yields accurate results, as seen in parallel lines sharing the same direction and curves exhibiting continuously changing directions." + ], + "image_footnote": [], + "bbox": [ + 638, + 371, + 754, + 421 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3e18c6522e3243165a91cc2996d1327f39a3edf0e789a150c8e6794f93052558.jpg", + "image_caption": [ + "(c) edge direction" + ], + "image_footnote": [], + "bbox": [ + 763, + 371, + 877, + 421 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "edge extraction due to its varying lighting conditions. However, our edge refinement step proves effective in preserving primary edges, a point we elaborate on in Sec. 4.3. Fig. 7 shows LIMAP tends to produce redundant line segments, leading to high recall but reduced precision. NEF's post-processing is sensitive to different scenes, resulting in noisy edge fitting. NEAT, despite producing clean outputs, its inability to handle curves constrains its overall performance.", + "bbox": [ + 496, + 535, + 890, + 656 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Qualitative Evaluation on Indoor & Outdoor Scenes. To really showcase the power of our method in capturing scene-level geometry, we further run our method on indoor and outdoor scenes. Note that since NEF is not able to produce meaningful reconstructions on larger scenes, we only compare with LIMAP and NEAT. As shown in Fig. 1 and Fig. 6, NEAT, due to its reliance on high-quality surface reconstruction, faces limitations in scene reconstruction, while LIMAP and our method both successfully capture good scene geometry.", + "bbox": [ + 496, + 661, + 890, + 813 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Ablations and Analysis", + "text_level": 1, + "bbox": [ + 500, + 825, + 714, + 842 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Parametric Edge Extraction. To better understand our parametric edge extraction process described in Sec. 3.2, we visualize our point shifting and edge direction in Fig. 8.", + "bbox": [ + 498, + 854, + 892, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "21225", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/37dca7d064aac0cc132d9bf56c37d2bff356cca0784f3ac69b7ed228931bac09.jpg", + "image_caption": [ + "(a) Ours" + ], + "image_footnote": [], + "bbox": [ + 91, + 95, + 205, + 145 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b01c0b9a10c60091e6122fb8977f66258e6dad7d7946bca904b5bc03a66510de.jpg", + "image_caption": [ + "(b) w/o point shifting" + ], + "image_footnote": [], + "bbox": [ + 217, + 95, + 330, + 145 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5ce8b7b83fbb7916206664839d3fe79193c6f49383ddfce6d48c3528c7e0c490.jpg", + "image_caption": [ + "(c) w/o Bezier curve" + ], + "image_footnote": [], + "bbox": [ + 339, + 95, + 454, + 145 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ce553152039508f23cfb3149a04997c6fccfd65ee5ae6a45c5148c973626f296.jpg", + "image_caption": [ + "(d) w/o edge merge." + ], + "image_footnote": [], + "bbox": [ + 86, + 181, + 205, + 229 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1a28bb33faf21a2d5a68b83d045bd5b56dc6f71f9e8b6bc1df74377d810aab98.jpg", + "image_caption": [ + "(e) w/o endpoints merge." + ], + "image_footnote": [], + "bbox": [ + 215, + 181, + 330, + 229 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7c7f3f78f2e6c40239ff6bdb0b140a79f4c6851984e2a16980ad15c09883ab57.jpg", + "image_caption": [ + "(f) GT Edge", + "Figure 9. Qualitative ablation on different component of our parametric edge extraction. The absence of any module in our edge extraction process results in incomplete or noisy qualitative outcomes." + ], + "image_footnote": [], + "bbox": [ + 341, + 181, + 454, + 229 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We can clearly see that the extracted point clouds without point shifting are appeared in redundant and inaccurate edge points (Fig. 8 (a)). In contrast, the point shifting step yields point clouds with sharply defined, precise edges (Fig. 8 (b)). In addition, as shown in Fig. 8 (c), the extracted edge directions along parallel lines are consistent, while those on curves vary continuously. This aligns with our expectations.", + "bbox": [ + 75, + 321, + 468, + 426 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Furthermore, we also conduct ablation studies in Table 4 and Fig. 9 to evaluate the impact of different components in our edge extraction algorithm. These experiments were performed on the ABC-NEF dataset using the DexiNed detector. First, the removal of the query point shifting step leads to a significant drop in both recall and precision. This indicates that our point-shifting step significantly refines the query points locations. Second, excluding Bezier curves results in a decline in completeness (Fig. 9 (c)), showing that curves are necessary for edge reconstruction. Third, omitting the edge merging step leads to redundant small line segments, as evident in Fig. 9 (d). Finally, the removal of endpoint merging impairs connectivity between edges, as shown in Fig. 9 (e).", + "bbox": [ + 75, + 428, + 468, + 640 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/fc6dd362c71742316d5dedcf23fc6e883f0fd9c12c5f3c1142f20b42d47f2237.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodAcc↓Comp↓R5↑P5↑F5↑
aOurs8.88.956.462.959.1
bw/o point shifting15.39.929.218.722.2
cw/o B'ezier curve9.412.154.265.859.0
dw/o edge merging10.38.753.845.348.6
ew/o endpoints merging9.39.051.557.754.0
", + "bbox": [ + 81, + 652, + 468, + 733 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4. Ablation studies on different component of parametric edge extraction on ABC-NEF [70] with DexiNed [41]. Our parametric edge extraction approach with all components achieves the optimal balance between accuracy and completeness.", + "bbox": [ + 75, + 738, + 468, + 794 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Edge Refinement. In Fig. 10, we study the effectiveness of our edge refinement module. When input edge maps contain some noises in dark scenes, our initial 3D edge map, without the edge refinement, exhibits some artifacts. However, the edge refinement module markedly mitigates artifacts, achieving a balance between recall and precision.", + "bbox": [ + 75, + 809, + 470, + 901 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/76476b7e718c308488003bd20018d95bbb86ea3ad15a7919eebed9eaa49ee90a.jpg", + "image_caption": [ + "w/o edge refinement" + ], + "image_footnote": [], + "bbox": [ + 506, + 89, + 694, + 157 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/138283933240e89baba8f9b4feca1fc65438e619ce2077a5f3d602df0693f867.jpg", + "image_caption": [ + "w/ edge refinement" + ], + "image_footnote": [], + "bbox": [ + 700, + 90, + 872, + 156 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/afdaeeed73ad604514de757e9b2270c7023329ae143a5a3832380da533b7596a.jpg", + "image_caption": [ + "Figure 10. Ablation study on edge refinement. Our edge refinement effectively eliminates the majority of noisy edges in background areas.", + "Figure 11. Dense surface reconstruction on Replica [53]. Utilizing our trained UDF MLP for initialization enables MonoSDF to capture more geometric details, such as the vase in the top row, the shelf in the bottom row." + ], + "image_footnote": [], + "bbox": [ + 504, + 229, + 888, + 401 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Application on Dense Surface Reconstruction", + "text_level": 1, + "bbox": [ + 500, + 481, + 883, + 496 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our method has demonstrated its proficiency in reconstructing 3D edges across a diverse range of scenarios. Building on this success, we further explore the potential of our learned representation to benefit other tasks. A particularly relevant area is dense surface reconstruction.", + "bbox": [ + 498, + 506, + 890, + 580 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in Fig. 11, the recent neural-implicit surface reconstruction approach MonoSDF [72] can show decent reconstruction results from only posed multi-view images. However, we notice that they still struggle to capture detailed geometry. To address this, we integrate our method into the MonoSDF pipeline. Specifically, we initialize the geometry MLPs of MonoSDF with our pre-trained UDF MLPs. We can clearly see that such a simple integration can enhance the recovery of geometric details.", + "bbox": [ + 498, + 583, + 892, + 719 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 737, + 625, + 752 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We introduced EMAP, a 3D neural edge reconstruction pipeline that learns accurate 3D edge point locations and directions implicitly from multi-view edge maps through UDF and abstracts 3D parametric edges from the learned UDF field. Through extensive evaluations, EMAP demonstrates remarkable capabilities in CAD modeling and in capturing detailed geometry of objects and scenes. Furthermore, we show that our learned UDF field enriches the geometric details for neural surface reconstruction.", + "bbox": [ + 498, + 763, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "21226", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Henrik Aanaes, Rasmus Ramsbøl Jensen, George Vogiatzis, Engin Tola, and Anders Bjorholm Dahl. Large-scale data for multiple-view stereopsis. International Journal of Computer Vision (IJCV), 2016. 5, 7", + "[2] Hichem Abdellali, Robert Frohlich, Viktor Vilagos, and Zoltan Kato. L2d2: Learnable line detector and descriptor. In Proc. of the International Conf. on 3D Vision (3DV), 2021. 2", + "[3] Dejan Azinović, Ricardo Martin-Brualla, Dan B Goldman, Matthias Nießner, and Justus Thies. Neural rgb-d surface reconstruction. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[4] Adrien Bartoli and Peter Sturm. Structure-from-motion using lines: Representation, triangulation, and bundle adjustment. Computer Vision and Image Understanding (CVIU), 2005. 2", + "[5] Andrea Bignoli, Andrea Romanoni, and Matteo Matteucci. Multi-view stereo 3d edge reconstruction. In Proc. of the IEEE Winter Conference on Applications of Computer Vision (WACV), 2018. 1, 5", + "[6] Rohan Chabra, Jan E Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local sdf priors for detailed 3d reconstruction. In Proc. of the European Conf. on Computer Vision (ECCV), 2020. 2", + "[7] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[8] Manmohan Chandraker, Jongwoo Lim, and David Kriegman. Moving in stereo: Efficient structure and motion using lines. In Proc. of the International Conf. on Computer Vision (ICCV), pages 1741-1748. IEEE, 2009. 2", + "[9] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[10] Wentao Cheng, Sheng Yang, Maomin Zhou, Ziyuan Liu, Yiming Chen, and Mingyang Li. Road mapping and localization using sparse semantic visual features. IEEE Robotics and Automation Letters, 6(4):8118-8125, 2021. 1", + "[11] Kseniya Cherenkova, Elena Dupont, Anis Kacem, Ilya Arzhannikov, Gleb Gusev, and Djamila Aouada. Sepicnet: Sharp edges recovery by parametric inference of curves in 3d shapes. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 4", + "[12] Scott Ettinger, Shuyang Cheng, Benjamin Caine, Chenxi Liu, Hang Zhao, Sabeek Pradhan, Yuning Chai, Ben Sapp, Charles R Qi, Yin Zhou, et al. Large scale interactive motion forecasting for autonomous driving: The waymo open motion dataset. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9710-9719, 2021. 1", + "[13] Liangji Fang, Qinhong Jiang, Jianping Shi, and Bolei Zhou. Tpnet: Trajectory proposal network for motion prediction." + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6797-6806, 2020. 1", + "[14] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 1981. 5", + "[15] Michael Goesele, Noah Snavely, Brian Curless, Hugues Hoppe, and Steven M Seitz. Multi-view stereo for community photo collections. In Proc. of the International Conf. on Computer Vision (ICCV), 2007. 1", + "[16] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. In Proc. of the International Conf. on Machine learning (ICML), 2020. 4", + "[17] Yijia He, Ji Zhao, Yue Guo, Wenhao He, and Kui Yuan. Plizio: Tightly-coupled monocular visual-inertial odometry using point and line features. Sensors, 18(4):1159, 2018. 2", + "[18] Manuel Hofer, Michael Maurer, and Horst Bischof. Improving sparse 3d models for man-made environments using line-based 3d reconstruction. In Proc. of the International Conf. on 3D Vision (3DV), 2014. 1", + "[19] Manuel Hofer, Michael Maurer, and Horst Bischof. Efficient 3d scene abstraction using line segments. Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2017.", + "[20] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, Thomas Funkhouser, et al. Local implicit grid representations for 3d scenes. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[21] Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Trans. on Graphics (ToG), 2017. 5, 7", + "[22] Tianyu Li, Li Chen, Xiangwei Geng, Huijie Wang, Yang Li, Zhenbo Liu, Shengyin Jiang, Yuting Wang, Hang Xu, Chunjing Xu, et al. Topology reasoning for driving scenes. arXiv preprint arXiv:2304.05277, 2023. 1", + "[23] Hyunjun Lim, Jinwoo Jeon, and Hyun Myung. Uv-slam: Unconstrained line-based slam using vanishing points for structural mapping. IEEE Robotics and Automation Letters (RA-L), 7(2):1518-1525, 2022. 2", + "[24] Shaohui Liu, Yinda Zhang, Songyou Peng, Boxin Shi, Marc Pollefeys, and Zhaopeng Cui. Dist: Rendering deep implicit signed distance function with differentiable sphere tracing. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[25] Shaohui Liu, Yifan Yu, Rémi Pautrat, Marc Pollefeys, and Viktor Larsson. 3d line mapping revisited. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 5, 6, 7", + "[26] Yujia Liu, Stefano D'Aronco, Konrad Schindler, and Jan Dirk Wegner. Pc2wf: 3d wireframe reconstruction from raw point clouds. In Proc. of the International Conf. on Learning Representations (ICLR), 2021. 2", + "[27] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Yuan Liu, Peng Wang, Christian Theobalt, Taku Komura, and Wenping Wang. Neuraludf: Learning unsigned distance fields for" + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "21227", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "multi-view reconstruction of surfaces with arbitrary topologies. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3, 4", + "[28] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[29] Daniele Marzorati, Matteo Matteucci, Davide Migliore, and Domenico G Sorrenti. Integration of 3d lines and points in 6dof visual slam by uncertain projective geometry. In EMCR. CiteSeer, 2007. 2", + "[30] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[31] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 4", + "[32] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proc. of the European Conf. on Computer Vision (ECCV), 2020. 2, 3", + "[33] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[34] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proc. of the International Conf. on Computer Vision (ICCV), 2021. 1, 2, 3", + "[35] Jeong Joon Park, Peter Florence, Julian Straub, Richard A. Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[36] Rémi Pautrat, Juan-Ting Lin, Viktor Larsson, Martin R Oswald, and Marc Pollefeys. Sold2: Self-supervised occlusion-aware line description and detection. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 6", + "[37] Rémi Pautrat, Daniel Barath, Viktor Larsson, Martin R Oswald, and Marc Pollefeys. Deeplsd: Line segment detection and refinement with deep image gradients. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 4, 5", + "[38] Rémi* Pautrat, Iago* Suárez, Yifan Yu, Marc Pollefeys, and Viktor Larsson. GlueStick: Robust image matching by sticking points and lines together. In Proc. of the International Conf. on Computer Vision (ICCV), 2023. 2", + "[39] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In Proc. of the European Conf. on Computer Vision (ECCV), 2020. 2" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[40] Songyou Peng, Chiyu \"Max\" Jiang, Yiyi Liao, Michael Niemeyer, Marc Pollefeys, and Andreas Geiger. Shape as points: A differentiable poisson solver. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2", + "[41] Xavier Soria Poma, Edgar Riba, and Angel Sappa. Dense extreme inception network: Towards a robust cnn model for edge detection. In Proc. of the IEEE Winter Conference on Applications of Computer Vision (WACV), 2020. 6, 8", + "[42] Zhijian Qiao, Zehuan Yu, Huan Yin, and Shaojie Shen. Online monocular lane mapping using catmull-rom spline. arXiv preprint arXiv:2307.11653, 2023. 1, 4", + "[43] Tong Qin, Yuxin Zheng, Tongqing Chen, Yilun Chen, and Qing Su. A light-weight semantic map for visual localization towards autonomous driving. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 11248-11254. IEEE, 2021. 1", + "[44] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with thousands of tiny mlp's. In Proc. of the International Conf. on Computer Vision (ICCV), 2021. 2", + "[45] Chris Rorden, Roger Newman-Norlund, Chris Drake, Daniel R Glen, Julius Fridriksson, Taylor Hanayik, and Paul A Taylor. Improving 3d edge detection for visual inspection of mri coregistration and alignment. bioRxiv, pages 2022-09, 2022. 1", + "[46] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proc. of the International Conf. on Computer Vision (ICCV), 2019. 2", + "[47] Grant Schindler, Panchapagesan Krishnamurthy, and Frank Dellaert. Line-based structure from motion for urban environments. In International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT), 2006. 2", + "[48] Johannes L Schonberger, Enliang Zheng, Jan-Michael Frahm, and Marc Pollefeys. Pixelwise view selection for unstructured multi-view stereo. In Proc. of the European Conf. on Computer Vision (ECCV), 2016. 1", + "[49] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2", + "[50] Steven M Seitz, Brian Curless, James Diebel, Daniel Scharstein, and Richard Szeliski. A comparison and evaluation of multi-view stereo reconstruction algorithms. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2006. 1", + "[51] Shaoshuai Shi, Li Jiang, Dengxin Dai, and Bernt Schiele. Motion transformer with global intention localization and local movement refinement. Advances in Neural Information Processing Systems, 2022. 1", + "[52] Fangwen Shu, Jiaxuan Wang, Alain Pagani, and Didier Stricker. Structure plp-slam: Efficient sparse mapping and localization using point, line and plane for monocular, rgb-d and stereo cameras. In Proc. IEEE International Conf. on Robotics and Automation (ICRA). 2", + "[53] Julian Straub, Thomas Whelan, Lingni Ma, Yufan Chen, Erik Wijmans, Simon Green, Jakob J Engel, Raul Mur-Artal, Carl" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "21228", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ren, Shobhit Verma, et al. The replica dataset: A digital replica of indoor spaces. arXiv preprint arXiv:1906.05797, 2019. 1, 5, 7, 8", + "[54] Zhuo Su, Wenzhe Liu, Zitong Yu, Dewen Hu, Qing Liao, Qi Tian, Matti Pietikainen, and Li Liu. Pixel difference networks for efficient edge detection. In Proc. of the International Conf. on Computer Vision (ICCV), 2021. 6", + "[55] Rafael Grompone Von Gioi, Jeremie Jakubowicz, Jean-Michel Morel, and Gregory Randall. Lsd: A fast line segment detector with a false detection control. IEEE Trans. on Pattern Analysis and Machine Intelligence (PAMI), 2008. 6", + "[56] Huijie Wang, Zhenbo Liu, Yang Li, Tianyu Li, Li Chen, Chonghao Sima, Yuting Wang, Shengyin Jiang, Feng Wen, Hang Xu, et al. Road genome: A topology reasoning benchmark for scene understanding in autonomous driving. arXiv preprint arXiv:2304.10440, 2023. 1", + "[57] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2", + "[58] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2, 3", + "[59] Xiaogang Wang, Yuelang Xu, Kai Xu, Andrea Tagliasacchi, Bin Zhou, Ali Mahdavi-Amiri, and Hao Zhang. Pie-net: Parametric inference of point cloud edges. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2", + "[60] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. Hf-neus: Improved surface reconstruction using high-frequency details. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2", + "[61] Dong Wei, Yi Wan, Yongjun Zhang, Xinyi Liu, Bin Zhang, and Xiqi Wang. Elsr: Efficient line segment reconstruction with planes and points guidance. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 2", + "[62] Qiangeng Xu, Weiyue Wang, Duygu Ceylan, Radomir Mech, and Ulrich Neumann. DISN: deep implicit surface network for high-quality single-view 3d reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2", + "[63] Nan Xue, Song Bai, Fudong Wang, Gui-Song Xia, Tianfu Wu, and Liangpei Zhang. Learning attraction field representation for robust line segment detection. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 4", + "[64] Nan Xue, Bin Tan, Yuxi Xiao, Liang Dong, Gui-Song Xia, and Tianfu Wu. Volumetric wireframe parsing from neural attraction fields. arXiv preprint arXiv:2307.10206, 2023. 1, 2, 5, 7", + "[65] Nan Xue, Tianfu Wu, Song Bai, Fu-Dong Wang, Gui-Song Xia, Liangpei Zhang, and Philip HS Torr. Holistically-attracted wireframe parsing: From supervised to self-supervised learning. IEEE Trans. on Pattern Analysis and Machine Intelligence (PAMI), 2023. 2, 6" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[66] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Ronen Basri, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 6", + "[67] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 1, 3", + "[68] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2", + "[69] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 1, 2", + "[70] Yunfan Ye, Renjiao Yi, Zhirui Gao, Chenyang Zhu, Zhiping Cai, and Kai Xu. Nef: Neural edge fields for 3d parametric curve reconstruction from multi-view images. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3, 4, 5, 6, 7, 8", + "[71] Zehao Yu, Anpei Chen, Bozidar Antic, Songyou Peng, Apratim Bhattacharyya, Michael Niemeyer, Siyu Tang, Torsten Sattler, and Andreas Geiger. Sdfstudio: A unified framework for surface reconstruction, 2022. 2, 3", + "[72] Zehao Yu, Songyou Peng, Michael Niemeyer, Torsten Sattler, and Andreas Geiger. Monosdf: Exploring monocular geometric cues for neural implicit surface reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2, 5, 8", + "[73] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2", + "[74] Xiangyu Zhu, Dong Du, Weikai Chen, Zhiyou Zhao, Yinyu Nie, and Xiaoguang Han. Nerve: Neural volumetric edges for parametric curve extraction from point cloud. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2", + "[75] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[76] Zihan Zhu, Songyou Peng, Viktor Larsson, Zhaopeng Cui, Martin R Oswald, Andreas Geiger, and Marc Pollefeys. Nicer-slam: Neural implicit scene encoding for rgb slam. In Proc. of the International Conf. on 3D Vision (3DV), 2024. 2", + "[77] Xingxing Zuo, Xiaojia Xie, Yong Liu, and Guoquan Huang. Robust visual slam with point and line features. In Proc. IEEE International Conf. on Intelligent Robots and Systems (IROS), 2017. 2" + ], + "bbox": [ + 501, + 92, + 890, + 825 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "21229", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3D Neural Edge Reconstruction/12034c9b-4470-4339-9189-38596581605f_model.json b/2024/3D Neural Edge Reconstruction/12034c9b-4470-4339-9189-38596581605f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..11260d378080c2e6b6461b168037cb3cabe72ce5 --- /dev/null +++ b/2024/3D Neural Edge Reconstruction/12034c9b-4470-4339-9189-38596581605f_model.json @@ -0,0 +1,2851 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.323, + 0.131, + 0.648, + 0.153 + ], + "angle": 0, + "content": "3D Neural Edge Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.179, + 0.804, + 0.273 + ], + "angle": 0, + "content": "Lei Li\\(^{1}\\) Songyou Peng\\(^{1,2\\dagger}\\) Zehao Yu\\(^{3,4}\\) Shaohui Liu\\(^{1}\\) Rémi Pautrat\\(^{1,6}\\) Xiaochuan Yin\\(^{5}\\) Marc Pollefeys\\(^{1,6}\\) \n\\(^{1}\\)ETH Zurich \\(^{2}\\)MPI for Intelligent Systems, Tübingen \\(^{3}\\)University of Tübingen \\(^{4}\\)Tübingen AI Center \\(^{5}\\)Utopilot \\(^{6}\\)Microsoft neural-edge-map.github.io" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.306, + 0.314, + 0.321 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.337, + 0.474, + 0.61 + ], + "angle": 0, + "content": "Real-world objects and environments are predominantly composed of edge features, including straight lines and curves. Such edges are crucial elements for various applications, such as CAD modeling, surface meshing, lane mapping, etc. However, existing traditional methods only prioritize lines over curves for simplicity in geometric modeling. To this end, we introduce EMAP, a new method for learning 3D edge representations with a focus on both lines and curves. Our method implicitly encodes 3D edge distance and direction in Unsigned Distance Functions (UDF) from multi-view edge maps. On top of this neural representation, we propose an edge extraction algorithm that robustly abstracts parametric 3D edges from the inferred edge points and their directions. Comprehensive evaluations demonstrate that our method achieves better 3D edge reconstruction on multiple challenging datasets. We further show that our learned UDF field enhances neural surface reconstruction by capturing more details." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.637, + 0.21, + 0.652 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.663, + 0.462, + 0.692 + ], + "angle": 0, + "content": "The straight line belongs to men, the curved one to God. — Antonio Gaudi" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.697, + 0.47, + 0.818 + ], + "angle": 0, + "content": "This sentiment is evident in the visual composition of our environments. While straight lines are common in manmade scenes such as walls, windows, and doors [25], curves are more general and ubiquitous from cups, bridges, architectures, to Gothic arts. Edges, which are composed of both lines and curves, are the fundamental elements of visual perception. Therefore, accurate edge modeling is crucial for understanding the geometry and structure of our 3D world." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.818, + 0.47, + 0.879 + ], + "angle": 0, + "content": "Conventional approaches on 3D reconstruction typically involve inferring dense geometry and abstracting meshes from 2D images [15, 34, 48, 50, 67, 69]. However, the presence of 3D edges offers substantial advantages. First," + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.304, + 0.695, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.545, + 0.388, + 0.656, + 0.4 + ], + "angle": 0, + "content": "(a) An Indoor Scene" + }, + { + "type": "image", + "bbox": [ + 0.697, + 0.302, + 0.887, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.751, + 0.388, + 0.837, + 0.4 + ], + "angle": 0, + "content": "(b) LIMAP [25]" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.401, + 0.694, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.562, + 0.484, + 0.641, + 0.497 + ], + "angle": 0, + "content": "(c) NEAT [64]" + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.401, + 0.889, + 0.483 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.746, + 0.485, + 0.843, + 0.496 + ], + "angle": 0, + "content": "(d) EMAP (Ours)" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.509, + 0.892, + 0.566 + ], + "angle": 0, + "content": "Figure 1. Example 3D edge reconstruction on Replica [53]. While prior methods such as LIMAP [25] and NEAT [64] only reconstruct distinctive line segments, our method generates a more complete 3D edge map combining both line and curve features." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.58, + 0.893, + 0.762 + ], + "angle": 0, + "content": "edges are naturally compact representations that capture the salient features oftentimes around geometric boundaries, which are good indicators for more lightweight and adaptive meshing and 3D modeling with comparably less redundancy. Secondly, in contrast to dense surface modeling from images, 3D edges are unaffected to illumination changes, thus exhibiting better reliability on multi-view reconstruction. Last but not least, 3D edges serve as a universal representation in real-world scenarios, and can be potentially integrated into many applications such as lane mapping [10, 22, 42, 43, 56], motion forecasting [12, 13, 51], medical imaging [45], etc." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.903 + ], + "angle": 0, + "content": "The reconstruction of 3D edges is conventionally approached by matching their 2D observations across views. While Bignoli et al. [5] proposed edge point matching using the sparse map from Structure-from-Motion (SfM), it is inherently ill-posed due to its heavy reliance on cross-view edge correspondences, which are generally sparse and prone to ambiguity. Recent works have also improved the quality of 3D line reconstruction [18, 25, 61, 64], but primarily excel in specific scenes where straight lines dominate." + }, + { + "type": "page_footnote", + "bbox": [ + 0.102, + 0.887, + 0.231, + 0.901 + ], + "angle": 0, + "content": "† Corresponding author" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "21219" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "nate. While general real-world environments with curved structures pose more challenges, recent progress on 2D detection and matching is mostly limited to point and line features and thus inapplicable to such scenarios." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.154, + 0.471, + 0.349 + ], + "angle": 0, + "content": "A recent work NEF [70] made a significant step forward in learning 3D curves from multi-view 2D edge observations. Inspired by the recent success of neural radiance field (NeRF) [32], they introduce a neural edge density field and show decent results in reconstructing edges for simple objects. Nevertheless, their proposed edge density field has an inherent bias in edge rendering, leading to less accurate reconstruction. Moreover, its fitting-based edge parameterization process not only requires tedious tuning to specific data, but also struggles with its scalability to larger and more complex scenes. This motivates us to develop a more robust system for 3D edge mapping from 2D observations, which would benefit a wide range of downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.351, + 0.47, + 0.637 + ], + "angle": 0, + "content": "Towards this goal, we introduce EMAP, a novel approach for accurate 3D edge reconstruction from only 2D edge maps. EMAP comprises the following steps. Firstly, we learn the neural unsigned distance function (UDF) to implicitly model 3D edges, utilizing an unbiased rendering equation to mitigate the inaccuracies observed in NEF. Secondly, once learned, we can obtain the unsigned distance and normal for each point in the space, so a set of precise edge points with directions can be extracted. Finally, based on the guidance of every edge point's location and direction, we design a simple yet robust algorithm for parametric line and curve extraction, that can be applied across various challenging scenarios. Our comprehensive evaluations of EMAP, from synthetic CAD models to real-world indoor and outdoor scenes, show its superior performance in 3D edge reconstruction. In addition, we also observe that initializing the optimization process of the recent neural implicit surface reconstruction method with our trained UDF field enables the reconstructing of better details." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.64, + 0.452, + 0.655 + ], + "angle": 0, + "content": "Overall, the contributions of this paper are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.656, + 0.468, + 0.701 + ], + "angle": 0, + "content": "- We propose EMAP, a 3D neural edge reconstruction pipeline that can learn accurate 3D edge locations and directions implicitly from multi-view edge maps." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.702, + 0.468, + 0.731 + ], + "angle": 0, + "content": "- We develop a 3D edge extraction algorithm to robustly connect edge points with edge direction guidance." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.732, + 0.468, + 0.761 + ], + "angle": 0, + "content": "- We show that our model can generate complete 3D edge maps and help optimize dense surfaces." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.656, + 0.468, + 0.761 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.78, + 0.218, + 0.795 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Geometry-based 3D Line Reconstruction. As a pioneering work, Bartoli and Sturm [4] introduces a full SfM system using line segments, which is later improved under Manhattan assumption [47] and in stereo systems [8]. Recently, with the developments of line detections [36, 37, 65] and matching [2, 36, 38] thanks to the advent to deep learn" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.273 + ], + "angle": 0, + "content": "ing, several works have attempted to revisit the line mapping problem through graph clustering [19], leveraging planar information [61] and incorporating into SLAM systems [17, 23, 29, 52, 77]. In particular, recent work LIMAP [25] introduces a robust 3D line mapping system with structural priors which can adapt to different existing line detectors and matchers. Despite these advances, all the works are limited to straight lines and often produce segmented small lines when it comes to curves. In contrast, edges are generally easier to detect and are redundantly present in most scenes. In this project, rather than relying on lines, we build our 3D mapping system using robust 2D edge maps." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.293, + 0.892, + 0.655 + ], + "angle": 0, + "content": "Learning-based 3D Line/Curve Reconstruction. In contrast to geometry-based methods, some approaches [26, 59, 74] shifted their focus to directly extract parametric curves from given edge point clouds. Typically, they require keypoint detection, clustering, and linkage. Even under the relaxed setting, it is still challenging to generate clean parametric curves due to the complex connectivity of curves and imperfect point clouds [70]. To address this limitation, NEF [70] integrates NeRF [32] for edge mapping from multi-view images, extracting 3D curves from the learned neural edge field through a carefully designed postprocessing. While NEF achieves decent performance on CAD models, it is constrained to simple and low-precision object-level edge mapping. A concurrent work, NEAT [64], utilizes VolSDF [69] to build dense surfaces and incorporates a global junction perceiving module to optimize 3D line junctions with 2D wireframe supervision. Although NEAT can produce 3D wireframes, it is restricted to modeling line segments only. Additionally, their need for textured objects is a limitation. By contrast, we use the unisgned distance function (UDF) to represent edges, enabling the construction of both line segments and curves without the necessity for target textures. We further show that our method can faithfully reconstruct edges for complex scenes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.674, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Neural Implicit Representations. Neural implicit representations have emerged as a powerful tool for a spectrum of computer vision tasks, including object geometry representation [9, 24, 30, 34, 35, 40, 46, 57, 62, 66, 68], scene reconstruction [6, 20, 39, 71, 72, 75, 76], novel view synthesis [28, 32, 44, 73] and generative modelling [7, 33, 49]. Recent works [27, 58, 60, 69, 72] show impressive high-fidelity reconstruction by learning the implicit signed distance function (SDF). However, the SDF representation constrains to modeling closed, watertight surfaces. In contrast, NeuralUDF [3] exploits UDF to represent surfaces, offering a higher degree of freedom to represent both closed and open surfaces. We find UDF as a suitable representation to model edges implicitly, in comparison to SDF used in NEAT [64] and edge volume density from NEF [70]." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "21220" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.089, + 0.468, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.245, + 0.468, + 0.328 + ], + "angle": 0, + "content": "Figure 2. UDF learning overview. We utilize a vanilla NeRF [32] MLP that outputs absolute values to model the 3D UDF field. Edge maps are rendered using a density-based edge neural rendering technique, combined with an unbiased UDF rendering approach to eliminate bias. Our primary supervision comes from 2D edge maps predicted by a pre-trained edge detector." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.338, + 0.168, + 0.354 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.363, + 0.47, + 0.439 + ], + "angle": 0, + "content": "Our goal is to build a 3D edge map from multi-view posed 2D edge maps. To this end, we first introduce our edge representation and edge field learning in Sec. 3.1. Next, we present our 3D parametric edge extraction from the learned edge representations in Sec. 3.2." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.447, + 0.462, + 0.463 + ], + "angle": 0, + "content": "3.1. Edge Field with Unsigned Distance Functions" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.474, + 0.469, + 0.58 + ], + "angle": 0, + "content": "Multi-view Edge Maps. Since edge maps are generally invariant to illumination changes and are more robustly detected across various scenes than lines, our method utilizes multiple posed 2D edge maps as inputs. We apply pretrained edge detectors to predict an edge map \\( E \\) for each input RGB image. Each pixel of \\( E \\) has a value within [0, 1], indicating its probability of being an edge." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.583, + 0.47, + 0.645 + ], + "angle": 0, + "content": "Density-based Edge Neural Rendering. We use an unsigned distance function (UDF) to represent edges, denoted as \\( f_{u} \\). This function computes the unsigned distance from a given 3D point to the nearest edge. The UDF is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.153, + 0.651, + 0.469, + 0.669 + ], + "angle": 0, + "content": "\\[\nf _ {u}: \\mathbb {R} ^ {3} \\rightarrow \\mathbb {R} \\quad \\mathrm {x} \\mapsto u = \\operatorname {U D F} (\\mathrm {x}), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.677, + 0.469, + 0.692 + ], + "angle": 0, + "content": "where \\( \\mathbf{x} \\) is a 3D point and \\( u \\) is the corresponding UDF value." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.693, + 0.469, + 0.783 + ], + "angle": 0, + "content": "To render an edge pixel in a certain view, we trace a camera ray \\(\\mathbf{r}(t) = \\mathbf{o} + t\\mathbf{d}\\). This ray originates from the camera's center \\(\\mathbf{o}\\) and extends in direction \\(\\mathbf{d}\\) [32]. To apply volume rendering for edge modeling, it is necessary to establish a mapping \\(\\Omega_{u}\\) [27, 58] that transforms the distance function \\(f_{u}(\\mathbf{r}(t))\\) into volume density \\(\\sigma_{u}(t)\\) as" + }, + { + "type": "equation", + "bbox": [ + 0.193, + 0.791, + 0.469, + 0.808 + ], + "angle": 0, + "content": "\\[\n\\sigma_ {u} (t) = \\Omega_ {u} \\left(f _ {u} (\\mathrm {r} (t))\\right). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.816, + 0.469, + 0.847 + ], + "angle": 0, + "content": "In the rendering equation, the transmittance \\( T(t) \\) and weight \\( \\omega (t) \\) along the camera ray \\( \\mathbf{r} \\) are accumulated as" + }, + { + "type": "equation", + "bbox": [ + 0.088, + 0.854, + 0.469, + 0.901 + ], + "angle": 0, + "content": "\\[\nT (t) = \\exp \\left(- \\int_ {0} ^ {t} \\sigma_ {u} (v) d v\\right), \\quad w (t) = T (t) \\cdot \\sigma_ {u} (t). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.227 + ], + "angle": 0, + "content": "To effectively handle appearance changes under different viewing angles, most neural field-based surface reconstruction [34, 58, 67, 71] disentangles geometry and appearance. In contrast, edge maps are generally unaffected by lighting, making them view-independent. Therefore, this simplifies the rendering process for edge maps. as it only requires the accumulation of view-independent, density-based weights \\(w\\) along a ray \\(\\mathbf{r}\\) . Now, the rendered edge value \\(\\hat{E}\\) along ray \\(\\mathbf{r}\\) is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.566, + 0.235, + 0.892, + 0.269 + ], + "angle": 0, + "content": "\\[\n\\hat {E} (\\mathbf {r}) = \\int_ {0} ^ {+ \\infty} w (t) d t = 1 - T (+ \\infty), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.278, + 0.892, + 0.354 + ], + "angle": 0, + "content": "Eq. (4) establishes the connection between rendered edge values and the transmittance at the end of the camera rays. Intuitively, this means that the rendered edge value is 1 when the camera ray hits an edge in 3D space, and 0 otherwise. Please refer to the supplements for more details." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.357, + 0.892, + 0.493 + ], + "angle": 0, + "content": "Unbiased Density Functions for UDF Rendering. NEF [70] also uses volume rendering for rendering edges. Unlike ours, they utilize edge density to represent edges and an additional network to predict edge values. However, this approach introduces an inherent bias in edge rendering. Similar to the naive solution presented in NeuS [58], the issue comes from the weight function \\( w \\) in Eq. (3), where its local maximum does not coincide with the actual intersection point of the camera ray and the edges." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.493, + 0.892, + 0.614 + ], + "angle": 0, + "content": "To address this issue, we incorporate unbiased UDF rendering [27] into our density-based edge rendering framework. As proved in NeuS, density function \\(\\sigma_{u}\\) should increase monotonically to make the weight function unbiased. However, UDF values are not monotonous along a ray [27]. To adapt the unbiased density function \\(\\Omega_{s}\\), which is originally induced in NeuS [58], for UDF use, the monotonically increased density function \\(\\sigma_{u}\\) [27] is formulated as" + }, + { + "type": "equation", + "bbox": [ + 0.499, + 0.625, + 0.892, + 0.655 + ], + "angle": 0, + "content": "\\[\n\\sigma_ {u} (t) = \\Psi (t) \\cdot \\Omega_ {s} \\left(f _ {u} (\\mathbf {r} (t))\\right) + (1 - \\Psi (t)) \\cdot \\Omega_ {s} (- f _ {u} (\\mathbf {r} (t))), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.655, + 0.892, + 0.746 + ], + "angle": 0, + "content": "where \\(\\Psi (t)\\) is a differentiable visibility function designed in [27] to capture the monotonicity change in UDF. \\(\\Psi\\) is 0 behind the intersection point between the camera ray and the hit edge, and is 1 before the intersection point. Besides, \\(\\Psi (t)\\) is differentiable around the intersection point to make the UDF optimization more stable." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Ray Sampling Strategy. A key characteristic of 2D edge maps is their significant sparsity, with edges occupying a much smaller area compared to non-edge regions. To enhance training efficiency and stability, we apply an importance sampling strategy for camera rays, with \\(50\\%\\) of rays uniformly sampled from edge areas in the edge maps and the remaining \\(50\\%\\) from non-edge areas. Such a sampling strategy ensures that our UDF field training is concentrated on edge areas, thereby substantially speeding up the training process. Additionally, our sampling strategy offers an" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.99, + 0.518, + 1.0 + ], + "angle": 0, + "content": "21221" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.109, + 0.096, + 0.434, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.34, + 0.473, + 0.412 + ], + "angle": 0, + "content": "Figure 3. Illustration of our 3D parametric edge extraction steps. For simplify, our schematic is depicted in the 2D plane. Our 3D edge extraction algorithm comprises five main stages: point initialization (a), point shifting (b to c), edge direction extraction (c to d), point connection (d to e), and edge fitting (e to f)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.422, + 0.47, + 0.528 + ], + "angle": 0, + "content": "elegant solution to the issue of occlusion, a challenge noted in [70]. The rendered edge maps might contain edges not present in the input edge images due to occlusion. In contrast to the complicated occlusion handling strategy introduced in [70], our approach inherently alleviates this challenge by focusing the training on points from the visible edges presented in the input edge maps." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.532, + 0.462, + 0.546 + ], + "angle": 0, + "content": "Loss Functions. The total loss function can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.195, + 0.557, + 0.47, + 0.573 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {e d g e}} + \\lambda \\mathcal {L} _ {\\text {e i k}}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.582, + 0.47, + 0.643 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_{\\mathrm{edge}}\\) represents the Mean Square Error (MSE) between the rendered and input edge images. \\(\\mathcal{L}_{\\mathrm{eik}}\\) denotes the Eikonal loss [16], which promotes the learned UDF to be physical distance. \\(\\lambda\\) is used to balance these losses." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.651, + 0.357, + 0.667 + ], + "angle": 0, + "content": "3.2. 3D Parametric Edge Extraction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.674, + 0.47, + 0.869 + ], + "angle": 0, + "content": "With UDF learning, edge locations are implicitly encoded within the UDF field. However, accurately extracting edge points from the UDF field is non-trivial due to the absence of a real zero-level set in the UDF field. Additionally, formulating these edge points into parametric edges poses significant challenges due to the complex connections of edges. To extract points from the learned density field, NEF [70] selects points with edge density values greater than a specified threshold, \\(\\epsilon\\). This approach leads to an approximated edge point set that is \\(\\epsilon\\)-bounded [27]. While this method effectively generates comprehensive point clouds, the \\(\\epsilon\\)-bounded point set does not align accurately with the actual edge locations." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.471, + 0.901 + ], + "angle": 0, + "content": "To eliminate the error in edge point extraction, we leverage the physical property of UDF that reflects real-world" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.096, + 0.77, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.776, + 0.099, + 0.886, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.197, + 0.894, + 0.323 + ], + "angle": 0, + "content": "Figure 4. Illustration of the overview (a) and the cross-section (b) of UDF field. (a) In UDF field, edge points are ideally located at the zero-level set, with UDF values being larger away from these points. A query point \\( x_{t} \\) can be precisely shifted to a more accurate position \\( x_{t + 1} \\) by following the UDF value and the inverse normal vector \\( n(x) \\). The edge direction \\( l(x) \\) aligns with the tangent direction at the edge point \\( x_{t + 1} \\). (b) The inverse normal vectors of all surrounding points on the cross section are pointing towards the query point." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.337, + 0.892, + 0.428 + ], + "angle": 0, + "content": "distances to the edges. Specifically, we develop a 3D edge extraction algorithm composed of five main stages: point initialization, point shifting, edge direction extraction, point connection, and edge fitting, as illustrated in Fig. 3. This algorithm takes the trained UDF field as input and outputs parametric 3D edges, including line segments and curves." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.431, + 0.893, + 0.537 + ], + "angle": 0, + "content": "Point Initialization. Under eikonal loss supervision, the optimized UDF values represent physical distances to the nearest edges. To initialize potential edge points, we begin with the center points of all voxel grids and obtain their UDF values from the UDF field. Subsequently, we eliminate query points whose UDF values exceed a specified threshold \\(\\epsilon^{\\prime}\\) (red points in Fig. 3 (a))." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.541, + 0.894, + 0.616 + ], + "angle": 0, + "content": "Point Shifting. As illustrated in Fig. 4 (a), the normalized inverse gradient of the UDF field indicates the inverse normal vector pointing towards edges. Drawing inspiration from OccNet [31], we refine the point \\( x \\) iteratively towards the edge using its distance and inverse normal direction:" + }, + { + "type": "equation", + "bbox": [ + 0.576, + 0.627, + 0.892, + 0.66 + ], + "angle": 0, + "content": "\\[\nx _ {t + 1} \\Leftarrow x _ {t} - f _ {u} (x _ {t}) \\cdot \\frac {\\nabla f _ {u} (x _ {t})}{\\| \\nabla f _ {u} (x _ {t}) \\|}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.671, + 0.892, + 0.716 + ], + "angle": 0, + "content": "where \\(t\\) denotes the \\(t\\)-th iteration. As a result of this iterative process, the initial points converge to the edge center (from Fig. 3 (b) to Fig. 3 (c))." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.719, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Edge Direction. Establishing connections between edge points is a crucial step in constructing parametric edges. While most methods [11, 42, 70] estimate parameters through least-squares fitting of lines/curves on extracted points, this fitting-based approach for edge extraction is not always robust or accurate. In contrast, inspired by [37, 63], we find that combining the edge direction field with the edge distance field can robustly produce edge parameters. Given that inverse normal vectors invariably point towards edges (see Fig. 4(b)), we first devise an edge direction extraction method based on this set of inverse normal vectors. Specifically, for a query point \\( x \\), we introduce minor shifts" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.989, + 0.518, + 1.0 + ], + "angle": 0, + "content": "21222" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.473, + 0.212 + ], + "angle": 0, + "content": "set \\(\\{\\delta\\}_{N}\\) with size of \\(N\\) to generate an adjoining point set \\(\\{x'\\}_{N}\\), where \\(\\{x'\\}_{N} = x + \\{\\delta\\}_{N}\\). The inverse normal vectors of these points, denoted as \\(\\{n\\}_{N}\\), are obtained from the learned UDF field. The edge direction, denoted as \\(l\\), is identified as the null space of \\(\\{n_i'\\}\\), since the edge direction is perpendicular to all inverse normal vectors in \\(\\{n\\}_{N}\\). Therefore, \\(l\\) can be extracted with singular value decomposition (SVD):" + }, + { + "type": "equation", + "bbox": [ + 0.147, + 0.219, + 0.47, + 0.238 + ], + "angle": 0, + "content": "\\[\nA = U \\Sigma V ^ {T}, \\quad l = V [:, \\operatorname {a r g m i n} (\\Sigma) ], \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.244, + 0.47, + 0.336 + ], + "angle": 0, + "content": "where \\(A\\) is the matrix representation of \\(\\{n\\}_{N}\\) and \\(l\\) corresponds to the eigenvector associated with the smallest eigenvalue. Note that \\(N\\) should be sufficiently large to ensure the stability of the extracted edge direction. Unlike DeepLSD [37], we can obtain a precise edge direction field without relying on any 2D direction supervision." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.339, + 0.473, + 0.566 + ], + "angle": 0, + "content": "Point Connection. After accurately determining the edge point locations and directions, we proceed to connect these edge points guided by the edge direction to create polylines (Fig. 3 (d) to (e)). Specifically, we begin by selecting candidate points and then compute directional errors for points adjacent to these candidates. Based on these directional errors, candidate points are connected to its best-matched neighboring point that growing direction aligns best with its extracted edge direction, i.e., with minimal directional error. This process is repeated, extending the edge polylines progressively until no further growth is possible. To ensure efficiency and accuracy, a non-maximum suppression step is employed to remove any redundant points that may exist between the current candidate and the best-matched point. Please refer to the supplements for more algorithm details." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.569, + 0.473, + 0.765 + ], + "angle": 0, + "content": "Edge Fitting. To further parameterize edges, we categorize the polylines into line segments and Bézier curves (Fig. 3 (f)). Initially, we utilize RANSAC [14] to fit lines from the polylines, and select the line segment that encompasses the highest number of inlier points. Following [25], we apply Principal Component Analysis (PCA) to the inlier points, re-estimate the line segment utilizing the principal eigenvector and the mean 3D point, and project all inlier points onto the principal eigenvector to derive the 3D endpoints. This fitting process is repeated for each polyline until the number of inlier points falls below a minimum threshold. For the remaining sub-polylines, we fit each of them with a Bézier curve that is defined by four control points." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.473, + 0.903 + ], + "angle": 0, + "content": "To minimize edge redundancy, we further merge line segments and Bézier curves based on two criteria: the shortest distance between candidate edges and the similarity of curvature at their closest points. For line segments, the shortest distance is the minimal point-to-line segment distance, and curvature similarity is their direction's cosine similarity. For Bézier curves, they are the minimal point-to-point distance and the cosine similarity of the tangent vectors at the nearest points, respectively. Candidate edges" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.137 + ], + "angle": 0, + "content": "are merged only if they meet both criteria. This dual-criterion approach ensures that merging happens only when two edges are both similar and close to each other." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.138, + 0.895, + 0.26 + ], + "angle": 0, + "content": "To connect edges, all endpoints of line segments and Bézier curves located within a specified distance threshold are merged into shared endpoints. Furthermore, we implement an optimization step [5, 64] to refine the 3D parametric edges by leveraging 2D edge maps, thereby enhancing edge precision. Specifically, we project 3D parametric edges into edge map frames using camera projection matrices and filter out 3D edges that are not visible in over \\(90\\%\\) of views." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.275, + 0.634, + 0.292 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.301, + 0.687, + 0.317 + ], + "angle": 0, + "content": "4.1. Experiment Setting" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.328, + 0.895, + 0.585 + ], + "angle": 0, + "content": "Datasets. We consider four diverse datasets: CAD models (ABC-NEF [70]), real-world objects (DTU [1]), high-quality indoor scenes (Replica [53]), and real-world outdoor scenes (Tanks & Temples [21]). ABC-NEF dataset comprises 115 CAD models, each accompanied by 50 observed images and ground truth parametric edges. We select 82 CAD models, excluding those containing inconsistent edge observations (e.g., cylinders or balls). DTU dataset provides dense ground-truth point clouds and we select 6 objects that meet the multi-view constraints among scans processed by [72]. Following [5], we derive edge points by projecting ground-truth dense points onto images and then comparing them with the observations on 2D edge maps to filter out non-edge points. Replica and Tanks & Temples datasets contain larger scenes. Due to the lack of ground-truth edges, we conduct qualitative comparisons among baselines." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.589, + 0.895, + 0.651 + ], + "angle": 0, + "content": "Baselines. We compare with three state-of-the-art baselines for 3D line/curve mapping, including two learning-based methods, NEF [70] and NEAT [64], and one geometry-based method, LIMAP [25]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.655, + 0.895, + 0.791 + ], + "angle": 0, + "content": "Metrics. Our evaluation involves first sampling points in proportion to the edge's length and subsequently downsampling these points using a voxel grid with a resolution of \\(256^3\\). Following the metrics used in [25, 70], we consider Accuracy (Acc), Completeness (Comp) in millimeters, and Recall \\((R_{\\tau})\\), Precision \\((P_{\\tau})\\), F-score \\((F_{\\tau})\\) in percentage with a threshold \\(\\tau\\) in millimeters. Moreover, we report Edge Direction Consistency (Norm) in percentage to analyze the precision of edge direction extraction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Implementation Details. For \\( f_{u} \\), we utilize 8-layer Multi-layer Perceptrons (MLPs). Each layer in the MLP contains 512 neurons for larger scenes, such as Tanks & Temples, and 256 neurons for other datasets. We sample 1024 rays per batch, among these rays, 512 rays are sampled from edge areas. We train our model for \\( 50k \\) iterations on ABC-NEF dataset, and \\( 200k \\) iterations on other datasets. We train" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "21223" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.154, + 0.087, + 0.822, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.309, + 0.895, + 0.34 + ], + "angle": 0, + "content": "Figure 5. Qualitative comparisons on ABC-NEF [70]. Lines are shown in black and curves in blue. Thanks to our precise edge extraction capabilities for both lines and curves, we achieve complete and accurate modeling of these elements." + }, + { + "type": "table", + "bbox": [ + 0.122, + 0.351, + 0.846, + 0.452 + ], + "angle": 0, + "content": "
MethodDetectorModalAcc↓Comp↓Norm↑R5↑R10↑R20↑P5↑P10↑P20↑F5↑F10↑F20↑
LIMAP [25]LSDLine9.918.794.436.282.387.943.087.693.939.084.390.4
SOLD2Line5.929.690.164.276.679.688.196.497.972.984.086.7
NEF [70]PiDiNet†Curve11.916.990.911.462.091.315.768.596.313.064.693.3
PiDiNetCurve15.116.589.711.753.389.813.652.289.112.351.888.7
DexiNedCurve21.915.785.911.348.387.711.539.871.710.842.176.8
OursPiDiNetEdge9.215.693.730.275.789.835.679.195.432.477.092.2
DexiNedEdge8.88.995.456.488.994.862.989.995.759.188.994.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.456, + 0.893, + 0.486 + ], + "angle": 0, + "content": "Table 1. Edge reconstruction results on ABC-NEF [70]. Results from NEF's released pretrained models are indicated by \\(\\dagger\\). Our method surpasses all others in terms of completeness and achieves accuracy comparable to LIMAP [25]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.499, + 0.473, + 0.696 + ], + "angle": 0, + "content": "our network with the Adam optimizer with a learning rate of \\(5 \\times 10^{-4}\\), while the UDF model \\(f_{u}\\) is trained with a learning rate of \\(1 \\times 10^{-4}\\) and initialized with sphere initialization [66]. For edge detection for NEF and ours, we consider PiDiNet [54] and DexiNed [41]. PiDiNet [54] is employed for indoor scenes, such as DTU and Replica, due to its superior performance in these settings. Conversely, DexiNed [41] is applied to outdoor scenes, as it is primarily trained on outdoor scenes. On the synthetic ABC-NEF dataset, we show results with both detectors. For LIMAP, we follow their paper and we use SOLD2 [36] for indoor scenes and LSD [55] for outdoor scenes. NEAT is trained with 2D wireframes from HAWPV3 [65]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.707, + 0.409, + 0.723 + ], + "angle": 0, + "content": "4.2. Evaluation of 3D Edge Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.473, + 0.901 + ], + "angle": 0, + "content": "Evaluation on ABC-NEF Dataset. We show the quantitative and qualitative comparisons on Table 1 and Fig. 5. Note that NEAT fails on the ABC-NEF dataset because of its heavy dependence on texture input. NEF demonstrates decent performance at \\(\\tau = 20\\). However, their performance drops significantly when \\(\\tau\\) is set to 10 and 5. This is attributed to its bias in edge rendering and its fitting-based post-processing. LIMAP shows remarkable precision across various \\(\\tau\\) thresholds. Such consistency stems from its non-linear refinement over multi-view 2D supports. Nonetheless, LIMAP's inability to reconstruct curves leads" + }, + { + "type": "table", + "bbox": [ + 0.511, + 0.496, + 0.88, + 0.589 + ], + "angle": 0, + "content": "
MethodDetectorCurveLine
Acc↓Comp↓Norm↑Acc↓Comp↓Norm↑
LIMAP [25]LSD272.650.184.834.611.395.9
SOLD2295.782.276.820.018.192.1
NEF [70]PiDiNet†265.027.177.940.413.792.6
PiDiNet263.123.977.643.914.091.4
DexiNed250.520.372.656.213.887.3
OursPiDiNet253.725.788.143.112.893.7
DexiNed241.010.988.746.77.795.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.593, + 0.893, + 0.648 + ], + "angle": 0, + "content": "Table 2. Accuracy, completeness and normal consistency results with curves and lines on ABC-NEF [70]. Our method with DexiNed edge detector yields overall the strongest performance on curves among all baselines." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.657, + 0.892, + 0.763 + ], + "angle": 0, + "content": "to lower scores in completeness and recall. Our method, when combined with either of the 2D edge detectors, consistently outperforms all baselines. Notably, as shown in Table 1, combined with the DexiNed detector, our method achieves superior results in completeness, edge direction consistency, recall, and F-Score. We also show competitive accuracy and precision when compared to LIMAP." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.903 + ], + "angle": 0, + "content": "To further analyze the performance of different edge types, we classify the ground truth edges into curves (including BSplines, ellipses, and circles) and line segments, based on the GT annotations. We provide accuracy, completeness, and edge direction consistency in Table 2 to analyze the separate reconstruction abilities for curves and lines. Note that these results are computed based on all predictions specific to either curves or lines, as other methods do not differentiate between these two types of edges. We" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "21224" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.16, + 0.098, + 0.812, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.331, + 0.895, + 0.361 + ], + "angle": 0, + "content": "Figure 6. Qualitative comparisons on the Replica [53] and Tanks & Temples [21] datasets. The first two scenes are from the Replica dataset, while the last scene is from the Tanks & Temples dataset." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.371, + 0.212, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.126, + 0.458, + 0.184, + 0.471 + ], + "angle": 0, + "content": "2D Image" + }, + { + "type": "image", + "bbox": [ + 0.216, + 0.375, + 0.33, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.458, + 0.307, + 0.471 + ], + "angle": 0, + "content": "LIMAP [25]" + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.375, + 0.445, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.359, + 0.458, + 0.414, + 0.47 + ], + "angle": 0, + "content": "NEF [70]" + }, + { + "type": "image", + "bbox": [ + 0.102, + 0.474, + 0.212, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.123, + 0.557, + 0.187, + 0.57 + ], + "angle": 0, + "content": "NEAT [64]" + }, + { + "type": "image", + "bbox": [ + 0.22, + 0.473, + 0.325, + 0.55 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.255, + 0.558, + 0.286, + 0.569 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.476, + 0.439, + 0.548 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.359, + 0.557, + 0.413, + 0.57 + ], + "angle": 0, + "content": "GT Edge" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.576, + 0.47, + 0.618 + ], + "angle": 0, + "content": "Figure 7. Qualitative comparisons on DTU [1]. Our results demonstrate complete edge structure, whereas other methods result in redundant line segments or imprecise curves." + }, + { + "type": "table", + "bbox": [ + 0.104, + 0.627, + 0.442, + 0.732 + ], + "angle": 0, + "content": "
ScanLIMAP [25]NEF [70]NEAT [64]Ours
R5↑P5↑R5↑P5↑R5↑P5↑R5↑P5↑
3775.874.339.551.063.985.162.783.9
8375.750.732.021.872.352.472.361.5
10579.164.930.332.068.973.378.578.0
11079.765.331.240.264.379.690.968.3
11859.462.015.325.259.071.175.378.1
12279.979.215.129.170.082.085.382.9
Mean74.966.127.233.266.473.977.575.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.12, + 0.737, + 0.426, + 0.751 + ], + "angle": 0, + "content": "Table 3. Edge reconstruction results on DTU [1]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.761, + 0.47, + 0.822 + ], + "angle": 0, + "content": "can see that our method with DexiNed exhibits superior results in reconstructing curves. As for line segments, our performance is marginally lower than the best-performing method LIMAP which is specially optimized for lines." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Evaluation on DTU Dataset. Our assessment of the DTU dataset, as outlined in Table 3 and Fig. 7, shows our proficiency in real-world scenarios. Notably, our approach achieves the highest recall and precision among all baselines. The DTU dataset presents a challenging scenario for" + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.371, + 0.63, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.516, + 0.424, + 0.63, + 0.436 + ], + "angle": 0, + "content": "(a) w/o point shifting" + }, + { + "type": "image", + "bbox": [ + 0.64, + 0.372, + 0.755, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.645, + 0.424, + 0.751, + 0.436 + ], + "angle": 0, + "content": "(b) w/ point shifting" + }, + { + "type": "image", + "bbox": [ + 0.764, + 0.372, + 0.878, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.774, + 0.424, + 0.868, + 0.436 + ], + "angle": 0, + "content": "(c) edge direction" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.442, + 0.892, + 0.526 + ], + "angle": 0, + "content": "Figure 8. Visualization of point shifting and edge direction. Edge points are shown in point clouds and edge directions in color. The point shifting step significantly refines the locations of edge points. The edge extraction step yields accurate results, as seen in parallel lines sharing the same direction and curves exhibiting continuously changing directions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.536, + 0.892, + 0.657 + ], + "angle": 0, + "content": "edge extraction due to its varying lighting conditions. However, our edge refinement step proves effective in preserving primary edges, a point we elaborate on in Sec. 4.3. Fig. 7 shows LIMAP tends to produce redundant line segments, leading to high recall but reduced precision. NEF's post-processing is sensitive to different scenes, resulting in noisy edge fitting. NEAT, despite producing clean outputs, its inability to handle curves constrains its overall performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.662, + 0.892, + 0.814 + ], + "angle": 0, + "content": "Qualitative Evaluation on Indoor & Outdoor Scenes. To really showcase the power of our method in capturing scene-level geometry, we further run our method on indoor and outdoor scenes. Note that since NEF is not able to produce meaningful reconstructions on larger scenes, we only compare with LIMAP and NEAT. As shown in Fig. 1 and Fig. 6, NEAT, due to its reliance on high-quality surface reconstruction, faces limitations in scene reconstruction, while LIMAP and our method both successfully capture good scene geometry." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.827, + 0.715, + 0.843 + ], + "angle": 0, + "content": "4.3. Ablations and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.893, + 0.903 + ], + "angle": 0, + "content": "Parametric Edge Extraction. To better understand our parametric edge extraction process described in Sec. 3.2, we visualize our point shifting and edge direction in Fig. 8." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "21225" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.096, + 0.207, + 0.146 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.128, + 0.155, + 0.174, + 0.166 + ], + "angle": 0, + "content": "(a) Ours" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.096, + 0.331, + 0.146 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.217, + 0.155, + 0.332, + 0.167 + ], + "angle": 0, + "content": "(b) w/o point shifting" + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.096, + 0.455, + 0.146 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.343, + 0.155, + 0.454, + 0.167 + ], + "angle": 0, + "content": "(c) w/o Bezier curve" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.182, + 0.207, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.099, + 0.239, + 0.2, + 0.251 + ], + "angle": 0, + "content": "(d) w/o edge merge." + }, + { + "type": "image", + "bbox": [ + 0.217, + 0.182, + 0.331, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.211, + 0.239, + 0.337, + 0.252 + ], + "angle": 0, + "content": "(e) w/o endpoints merge." + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.182, + 0.455, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.365, + 0.239, + 0.432, + 0.251 + ], + "angle": 0, + "content": "(f) GT Edge" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.257, + 0.47, + 0.311 + ], + "angle": 0, + "content": "Figure 9. Qualitative ablation on different component of our parametric edge extraction. The absence of any module in our edge extraction process results in incomplete or noisy qualitative outcomes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.322, + 0.469, + 0.428 + ], + "angle": 0, + "content": "We can clearly see that the extracted point clouds without point shifting are appeared in redundant and inaccurate edge points (Fig. 8 (a)). In contrast, the point shifting step yields point clouds with sharply defined, precise edges (Fig. 8 (b)). In addition, as shown in Fig. 8 (c), the extracted edge directions along parallel lines are consistent, while those on curves vary continuously. This aligns with our expectations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.429, + 0.47, + 0.641 + ], + "angle": 0, + "content": "Furthermore, we also conduct ablation studies in Table 4 and Fig. 9 to evaluate the impact of different components in our edge extraction algorithm. These experiments were performed on the ABC-NEF dataset using the DexiNed detector. First, the removal of the query point shifting step leads to a significant drop in both recall and precision. This indicates that our point-shifting step significantly refines the query points locations. Second, excluding Bezier curves results in a decline in completeness (Fig. 9 (c)), showing that curves are necessary for edge reconstruction. Third, omitting the edge merging step leads to redundant small line segments, as evident in Fig. 9 (d). Finally, the removal of endpoint merging impairs connectivity between edges, as shown in Fig. 9 (e)." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.654, + 0.47, + 0.734 + ], + "angle": 0, + "content": "
MethodAcc↓Comp↓R5↑P5↑F5↑
aOurs8.88.956.462.959.1
bw/o point shifting15.39.929.218.722.2
cw/o B'ezier curve9.412.154.265.859.0
dw/o edge merging10.38.753.845.348.6
ew/o endpoints merging9.39.051.557.754.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.739, + 0.47, + 0.795 + ], + "angle": 0, + "content": "Table 4. Ablation studies on different component of parametric edge extraction on ABC-NEF [70] with DexiNed [41]. Our parametric edge extraction approach with all components achieves the optimal balance between accuracy and completeness." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Edge Refinement. In Fig. 10, we study the effectiveness of our edge refinement module. When input edge maps contain some noises in dark scenes, our initial 3D edge map, without the edge refinement, exhibits some artifacts. However, the edge refinement module markedly mitigates artifacts, achieving a balance between recall and precision." + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.09, + 0.695, + 0.158 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.547, + 0.159, + 0.658, + 0.171 + ], + "angle": 0, + "content": "w/o edge refinement" + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.091, + 0.874, + 0.157 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.735, + 0.159, + 0.84, + 0.171 + ], + "angle": 0, + "content": "w/ edge refinement" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.177, + 0.892, + 0.219 + ], + "angle": 0, + "content": "Figure 10. Ablation study on edge refinement. Our edge refinement effectively eliminates the majority of noisy edges in background areas." + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.23, + 0.89, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.411, + 0.893, + 0.467 + ], + "angle": 0, + "content": "Figure 11. Dense surface reconstruction on Replica [53]. Utilizing our trained UDF MLP for initialization enables MonoSDF to capture more geometric details, such as the vase in the top row, the shelf in the bottom row." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.482, + 0.884, + 0.497 + ], + "angle": 0, + "content": "4.4. Application on Dense Surface Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.507, + 0.892, + 0.581 + ], + "angle": 0, + "content": "Our method has demonstrated its proficiency in reconstructing 3D edges across a diverse range of scenarios. Building on this success, we further explore the potential of our learned representation to benefit other tasks. A particularly relevant area is dense surface reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.584, + 0.893, + 0.72 + ], + "angle": 0, + "content": "As shown in Fig. 11, the recent neural-implicit surface reconstruction approach MonoSDF [72] can show decent reconstruction results from only posed multi-view images. However, we notice that they still struggle to capture detailed geometry. To address this, we integrate our method into the MonoSDF pipeline. Specifically, we initialize the geometry MLPs of MonoSDF with our pre-trained UDF MLPs. We can clearly see that such a simple integration can enhance the recovery of geometric details." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.738, + 0.627, + 0.753 + ], + "angle": 0, + "content": "5. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.765, + 0.892, + 0.9 + ], + "angle": 0, + "content": "We introduced EMAP, a 3D neural edge reconstruction pipeline that learns accurate 3D edge point locations and directions implicitly from multi-view edge maps through UDF and abstracts 3D parametric edges from the learned UDF field. Through extensive evaluations, EMAP demonstrates remarkable capabilities in CAD modeling and in capturing detailed geometry of objects and scenes. Furthermore, we show that our learned UDF field enriches the geometric details for neural surface reconstruction." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "21226" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.17 + ], + "angle": 0, + "content": "[1] Henrik Aanaes, Rasmus Ramsbøl Jensen, George Vogiatzis, Engin Tola, and Anders Bjorholm Dahl. Large-scale data for multiple-view stereopsis. International Journal of Computer Vision (IJCV), 2016. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.227 + ], + "angle": 0, + "content": "[2] Hichem Abdellali, Robert Frohlich, Viktor Vilagos, and Zoltan Kato. L2d2: Learnable line detector and descriptor. In Proc. of the International Conf. on 3D Vision (3DV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.23, + 0.47, + 0.285 + ], + "angle": 0, + "content": "[3] Dejan Azinović, Ricardo Martin-Brualla, Dan B Goldman, Matthias Nießner, and Justus Thies. Neural rgb-d surface reconstruction. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.288, + 0.47, + 0.342 + ], + "angle": 0, + "content": "[4] Adrien Bartoli and Peter Sturm. Structure-from-motion using lines: Representation, triangulation, and bundle adjustment. Computer Vision and Image Understanding (CVIU), 2005. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.345, + 0.47, + 0.4 + ], + "angle": 0, + "content": "[5] Andrea Bignoli, Andrea Romanoni, and Matteo Matteucci. Multi-view stereo 3d edge reconstruction. In Proc. of the IEEE Winter Conference on Applications of Computer Vision (WACV), 2018. 1, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.402, + 0.47, + 0.471 + ], + "angle": 0, + "content": "[6] Rohan Chabra, Jan E Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local sdf priors for detailed 3d reconstruction. In Proc. of the European Conf. on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.473, + 0.47, + 0.557 + ], + "angle": 0, + "content": "[7] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.558, + 0.47, + 0.614 + ], + "angle": 0, + "content": "[8] Manmohan Chandraker, Jongwoo Lim, and David Kriegman. Moving in stereo: Efficient structure and motion using lines. In Proc. of the International Conf. on Computer Vision (ICCV), pages 1741-1748. IEEE, 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.616, + 0.47, + 0.657 + ], + "angle": 0, + "content": "[9] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.66, + 0.47, + 0.715 + ], + "angle": 0, + "content": "[10] Wentao Cheng, Sheng Yang, Maomin Zhou, Ziyuan Liu, Yiming Chen, and Mingyang Li. Road mapping and localization using sparse semantic visual features. IEEE Robotics and Automation Letters, 6(4):8118-8125, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.717, + 0.47, + 0.786 + ], + "angle": 0, + "content": "[11] Kseniya Cherenkova, Elena Dupont, Anis Kacem, Ilya Arzhannikov, Gleb Gusev, and Djamila Aouada. Sepicnet: Sharp edges recovery by parametric inference of curves in 3d shapes. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.788, + 0.47, + 0.871 + ], + "angle": 0, + "content": "[12] Scott Ettinger, Shuyang Cheng, Benjamin Caine, Chenxi Liu, Hang Zhao, Sabeek Pradhan, Yuning Chai, Ben Sapp, Charles R Qi, Yin Zhou, et al. Large scale interactive motion forecasting for autonomous driving: The waymo open motion dataset. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9710-9719, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.873, + 0.468, + 0.902 + ], + "angle": 0, + "content": "[13] Liangji Fang, Qinhong Jiang, Jianping Shi, and Bolei Zhou. Tpnet: Trajectory proposal network for motion prediction." + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.121 + ], + "angle": 0, + "content": "In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6797-6806, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.123, + 0.892, + 0.177 + ], + "angle": 0, + "content": "[14] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 1981. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.179, + 0.892, + 0.234 + ], + "angle": 0, + "content": "[15] Michael Goesele, Noah Snavely, Brian Curless, Hugues Hoppe, and Steven M Seitz. Multi-view stereo for community photo collections. In Proc. of the International Conf. on Computer Vision (ICCV), 2007. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.236, + 0.892, + 0.291 + ], + "angle": 0, + "content": "[16] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. In Proc. of the International Conf. on Machine learning (ICML), 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.293, + 0.892, + 0.334 + ], + "angle": 0, + "content": "[17] Yijia He, Ji Zhao, Yue Guo, Wenhao He, and Kui Yuan. Plizio: Tightly-coupled monocular visual-inertial odometry using point and line features. Sensors, 18(4):1159, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.335, + 0.892, + 0.39 + ], + "angle": 0, + "content": "[18] Manuel Hofer, Michael Maurer, and Horst Bischof. Improving sparse 3d models for man-made environments using line-based 3d reconstruction. In Proc. of the International Conf. on 3D Vision (3DV), 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.392, + 0.892, + 0.445 + ], + "angle": 0, + "content": "[19] Manuel Hofer, Michael Maurer, and Horst Bischof. Efficient 3d scene abstraction using line segments. Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.448, + 0.892, + 0.516 + ], + "angle": 0, + "content": "[20] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, Thomas Funkhouser, et al. Local implicit grid representations for 3d scenes. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.519, + 0.892, + 0.56 + ], + "angle": 0, + "content": "[21] Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Trans. on Graphics (ToG), 2017. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.562, + 0.892, + 0.617 + ], + "angle": 0, + "content": "[22] Tianyu Li, Li Chen, Xiangwei Geng, Huijie Wang, Yang Li, Zhenbo Liu, Shengyin Jiang, Yuting Wang, Hang Xu, Chunjing Xu, et al. Topology reasoning for driving scenes. arXiv preprint arXiv:2304.05277, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.619, + 0.892, + 0.673 + ], + "angle": 0, + "content": "[23] Hyunjun Lim, Jinwoo Jeon, and Hyun Myung. Uv-slam: Unconstrained line-based slam using vanishing points for structural mapping. IEEE Robotics and Automation Letters (RA-L), 7(2):1518-1525, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.676, + 0.892, + 0.744 + ], + "angle": 0, + "content": "[24] Shaohui Liu, Yinda Zhang, Songyou Peng, Boxin Shi, Marc Pollefeys, and Zhaopeng Cui. Dist: Rendering deep implicit signed distance function with differentiable sphere tracing. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.746, + 0.892, + 0.801 + ], + "angle": 0, + "content": "[25] Shaohui Liu, Yifan Yu, Rémi Pautrat, Marc Pollefeys, and Viktor Larsson. 3d line mapping revisited. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.803, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[26] Yujia Liu, Stefano D'Aronco, Konrad Schindler, and Jan Dirk Wegner. Pc2wf: 3d wireframe reconstruction from raw point clouds. In Proc. of the International Conf. on Learning Representations (ICLR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[27] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Yuan Liu, Peng Wang, Christian Theobalt, Taku Komura, and Wenping Wang. Neuraludf: Learning unsigned distance fields for" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "21227" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "multi-view reconstruction of surfaces with arbitrary topologies. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.135, + 0.47, + 0.204 + ], + "angle": 0, + "content": "[28] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.205, + 0.47, + 0.259 + ], + "angle": 0, + "content": "[29] Daniele Marzorati, Matteo Matteucci, Davide Migliore, and Domenico G Sorrenti. Integration of 3d lines and points in 6dof visual slam by uncertain projective geometry. In EMCR. CiteSeer, 2007. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.261, + 0.469, + 0.328 + ], + "angle": 0, + "content": "[30] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.33, + 0.469, + 0.398 + ], + "angle": 0, + "content": "[31] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.4, + 0.469, + 0.468 + ], + "angle": 0, + "content": "[32] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proc. of the European Conf. on Computer Vision (ECCV), 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.47, + 0.469, + 0.524 + ], + "angle": 0, + "content": "[33] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.526, + 0.469, + 0.58 + ], + "angle": 0, + "content": "[34] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proc. of the International Conf. on Computer Vision (ICCV), 2021. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.581, + 0.469, + 0.649 + ], + "angle": 0, + "content": "[35] Jeong Joon Park, Peter Florence, Julian Straub, Richard A. Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.651, + 0.469, + 0.718 + ], + "angle": 0, + "content": "[36] Rémi Pautrat, Juan-Ting Lin, Viktor Larsson, Martin R Oswald, and Marc Pollefeys. Sold2: Self-supervised occlusion-aware line description and detection. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.721, + 0.469, + 0.788 + ], + "angle": 0, + "content": "[37] Rémi Pautrat, Daniel Barath, Viktor Larsson, Martin R Oswald, and Marc Pollefeys. Deeplsd: Line segment detection and refinement with deep image gradients. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.469, + 0.844 + ], + "angle": 0, + "content": "[38] Rémi* Pautrat, Iago* Suárez, Yifan Yu, Marc Pollefeys, and Viktor Larsson. GlueStick: Robust image matching by sticking points and lines together. In Proc. of the International Conf. on Computer Vision (ICCV), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[39] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In Proc. of the European Conf. on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[40] Songyou Peng, Chiyu \"Max\" Jiang, Yiyi Liao, Michael Niemeyer, Marc Pollefeys, and Andreas Geiger. Shape as points: A differentiable poisson solver. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[41] Xavier Soria Poma, Edgar Riba, and Angel Sappa. Dense extreme inception network: Towards a robust cnn model for edge detection. In Proc. of the IEEE Winter Conference on Applications of Computer Vision (WACV), 2020. 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.205, + 0.892, + 0.245 + ], + "angle": 0, + "content": "[42] Zhijian Qiao, Zehuan Yu, Huan Yin, and Shaojie Shen. Online monocular lane mapping using catmull-rom spline. arXiv preprint arXiv:2307.11653, 2023. 1, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.247, + 0.892, + 0.315 + ], + "angle": 0, + "content": "[43] Tong Qin, Yuxin Zheng, Tongqing Chen, Yilun Chen, and Qing Su. A light-weight semantic map for visual localization towards autonomous driving. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 11248-11254. IEEE, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.316, + 0.892, + 0.371 + ], + "angle": 0, + "content": "[44] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with thousands of tiny mlp's. In Proc. of the International Conf. on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.372, + 0.892, + 0.44 + ], + "angle": 0, + "content": "[45] Chris Rorden, Roger Newman-Norlund, Chris Drake, Daniel R Glen, Julius Fridriksson, Taylor Hanayik, and Paul A Taylor. Improving 3d edge detection for visual inspection of mri coregistration and alignment. bioRxiv, pages 2022-09, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.442, + 0.892, + 0.509 + ], + "angle": 0, + "content": "[46] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proc. of the International Conf. on Computer Vision (ICCV), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.511, + 0.892, + 0.566 + ], + "angle": 0, + "content": "[47] Grant Schindler, Panchapagesan Krishnamurthy, and Frank Dellaert. Line-based structure from motion for urban environments. In International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT), 2006. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.567, + 0.892, + 0.621 + ], + "angle": 0, + "content": "[48] Johannes L Schonberger, Enliang Zheng, Jan-Michael Frahm, and Marc Pollefeys. Pixelwise view selection for unstructured multi-view stereo. In Proc. of the European Conf. on Computer Vision (ECCV), 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.623, + 0.892, + 0.677 + ], + "angle": 0, + "content": "[49] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.679, + 0.892, + 0.747 + ], + "angle": 0, + "content": "[50] Steven M Seitz, Brian Curless, James Diebel, Daniel Scharstein, and Richard Szeliski. A comparison and evaluation of multi-view stereo reconstruction algorithms. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2006. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.748, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[51] Shaoshuai Shi, Li Jiang, Dengxin Dai, and Bernt Schiele. Motion transformer with global intention localization and local movement refinement. Advances in Neural Information Processing Systems, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.804, + 0.892, + 0.872 + ], + "angle": 0, + "content": "[52] Fangwen Shu, Jiaxuan Wang, Alain Pagani, and Didier Stricker. Structure plp-slam: Efficient sparse mapping and localization using point, line and plane for monocular, rgb-d and stereo cameras. In Proc. IEEE International Conf. on Robotics and Automation (ICRA). 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[53] Julian Straub, Thomas Whelan, Lingni Ma, Yufan Chen, Erik Wijmans, Simon Green, Jakob J Engel, Raul Mur-Artal, Carl" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "21228" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.47, + 0.133 + ], + "angle": 0, + "content": "Ren, Shobhit Verma, et al. The replica dataset: A digital replica of indoor spaces. arXiv preprint arXiv:1906.05797, 2019. 1, 5, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.134, + 0.47, + 0.189 + ], + "angle": 0, + "content": "[54] Zhuo Su, Wenzhe Liu, Zitong Yu, Dewen Hu, Qing Liao, Qi Tian, Matti Pietikainen, and Li Liu. Pixel difference networks for efficient edge detection. In Proc. of the International Conf. on Computer Vision (ICCV), 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.19, + 0.469, + 0.245 + ], + "angle": 0, + "content": "[55] Rafael Grompone Von Gioi, Jeremie Jakubowicz, Jean-Michel Morel, and Gregory Randall. Lsd: A fast line segment detector with a false detection control. IEEE Trans. on Pattern Analysis and Machine Intelligence (PAMI), 2008. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.247, + 0.469, + 0.315 + ], + "angle": 0, + "content": "[56] Huijie Wang, Zhenbo Liu, Yang Li, Tianyu Li, Li Chen, Chonghao Sima, Yuting Wang, Shengyin Jiang, Feng Wen, Hang Xu, et al. Road genome: A topology reasoning benchmark for scene understanding in autonomous driving. arXiv preprint arXiv:2304.10440, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.316, + 0.469, + 0.384 + ], + "angle": 0, + "content": "[57] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.385, + 0.469, + 0.454 + ], + "angle": 0, + "content": "[58] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.455, + 0.469, + 0.51 + ], + "angle": 0, + "content": "[59] Xiaogang Wang, Yuelang Xu, Kai Xu, Andrea Tagliasacchi, Bin Zhou, Ali Mahdavi-Amiri, and Hao Zhang. Pie-net: Parametric inference of point cloud edges. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.511, + 0.469, + 0.566 + ], + "angle": 0, + "content": "[60] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. Hf-neus: Improved surface reconstruction using high-frequency details. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.567, + 0.469, + 0.634 + ], + "angle": 0, + "content": "[61] Dong Wei, Yi Wan, Yongjun Zhang, Xinyi Liu, Bin Zhang, and Xiqi Wang. Elsr: Efficient line segment reconstruction with planes and points guidance. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.636, + 0.469, + 0.704 + ], + "angle": 0, + "content": "[62] Qiangeng Xu, Weiyue Wang, Duygu Ceylan, Radomir Mech, and Ulrich Neumann. DISN: deep implicit surface network for high-quality single-view 3d reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.706, + 0.469, + 0.773 + ], + "angle": 0, + "content": "[63] Nan Xue, Song Bai, Fudong Wang, Gui-Song Xia, Tianfu Wu, and Liangpei Zhang. Learning attraction field representation for robust line segment detection. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.469, + 0.829 + ], + "angle": 0, + "content": "[64] Nan Xue, Bin Tan, Yuxi Xiao, Liang Dong, Gui-Song Xia, and Tianfu Wu. Volumetric wireframe parsing from neural attraction fields. arXiv preprint arXiv:2307.10206, 2023. 1, 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.831, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[65] Nan Xue, Tianfu Wu, Song Bai, Fu-Dong Wang, Gui-Song Xia, Liangpei Zhang, and Philip HS Torr. Holistically-attracted wireframe parsing: From supervised to self-supervised learning. IEEE Trans. on Pattern Analysis and Machine Intelligence (PAMI), 2023. 2, 6" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.161 + ], + "angle": 0, + "content": "[66] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Ronen Basri, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.163, + 0.892, + 0.232 + ], + "angle": 0, + "content": "[67] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.234, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[68] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.277, + 0.892, + 0.329 + ], + "angle": 0, + "content": "[69] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.332, + 0.892, + 0.401 + ], + "angle": 0, + "content": "[70] Yunfan Ye, Renjiao Yi, Zhirui Gao, Chenyang Zhu, Zhiping Cai, and Kai Xu. Nef: Neural edge fields for 3d parametric curve reconstruction from multi-view images. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.403, + 0.892, + 0.458 + ], + "angle": 0, + "content": "[71] Zehao Yu, Anpei Chen, Bozidar Antic, Songyou Peng, Apratim Bhattacharyya, Michael Niemeyer, Siyu Tang, Torsten Sattler, and Andreas Geiger. Sdfstudio: A unified framework for surface reconstruction, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.46, + 0.892, + 0.529 + ], + "angle": 0, + "content": "[72] Zehao Yu, Songyou Peng, Michael Niemeyer, Torsten Sattler, and Andreas Geiger. Monosdf: Exploring monocular geometric cues for neural implicit surface reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2, 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.531, + 0.892, + 0.572 + ], + "angle": 0, + "content": "[73] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.574, + 0.892, + 0.642 + ], + "angle": 0, + "content": "[74] Xiangyu Zhu, Dong Du, Weikai Chen, Zhiyou Zhao, Yinyu Nie, and Xiaoguang Han. Nerve: Neural volumetric edges for parametric curve extraction from point cloud. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.644, + 0.892, + 0.712 + ], + "angle": 0, + "content": "[75] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.714, + 0.892, + 0.769 + ], + "angle": 0, + "content": "[76] Zihan Zhu, Songyou Peng, Viktor Larsson, Zhaopeng Cui, Martin R Oswald, Andreas Geiger, and Marc Pollefeys. Nicer-slam: Neural implicit scene encoding for rgb slam. In Proc. of the International Conf. on 3D Vision (3DV), 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.77, + 0.892, + 0.826 + ], + "angle": 0, + "content": "[77] Xingxing Zuo, Xiaojia Xie, Yong Liu, and Guoquan Huang. Robust visual slam with point and line features. In Proc. IEEE International Conf. on Intelligent Robots and Systems (IROS), 2017. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.826 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "21229" + } + ] +] \ No newline at end of file diff --git a/2024/3D Neural Edge Reconstruction/12034c9b-4470-4339-9189-38596581605f_origin.pdf b/2024/3D Neural Edge Reconstruction/12034c9b-4470-4339-9189-38596581605f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..77d492b89b9eae3976c1e1cc5cb8434d32393845 --- /dev/null +++ b/2024/3D Neural Edge Reconstruction/12034c9b-4470-4339-9189-38596581605f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5dbe6da0c694451fa55be884d0c623d58e553eca699c1aacc33f23a152f76bb4 +size 8742308 diff --git a/2024/3D Neural Edge Reconstruction/full.md b/2024/3D Neural Edge Reconstruction/full.md new file mode 100644 index 0000000000000000000000000000000000000000..cd9184eb6cea0addeab3afa079cd63cf67466946 --- /dev/null +++ b/2024/3D Neural Edge Reconstruction/full.md @@ -0,0 +1,379 @@ +# 3D Neural Edge Reconstruction + +Lei Li $^{1}$ Songyou Peng $^{1,2\dagger}$ Zehao Yu $^{3,4}$ Shaohui Liu $^{1}$ Rémi Pautrat $^{1,6}$ Xiaochuan Yin $^{5}$ Marc Pollefeys $^{1,6}$ $^{1}$ ETH Zurich $^{2}$ MPI for Intelligent Systems, Tübingen $^{3}$ University of Tübingen $^{4}$ Tübingen AI Center $^{5}$ Utopilot $^{6}$ Microsoft neural-edge-map.github.io + +# Abstract + +Real-world objects and environments are predominantly composed of edge features, including straight lines and curves. Such edges are crucial elements for various applications, such as CAD modeling, surface meshing, lane mapping, etc. However, existing traditional methods only prioritize lines over curves for simplicity in geometric modeling. To this end, we introduce EMAP, a new method for learning 3D edge representations with a focus on both lines and curves. Our method implicitly encodes 3D edge distance and direction in Unsigned Distance Functions (UDF) from multi-view edge maps. On top of this neural representation, we propose an edge extraction algorithm that robustly abstracts parametric 3D edges from the inferred edge points and their directions. Comprehensive evaluations demonstrate that our method achieves better 3D edge reconstruction on multiple challenging datasets. We further show that our learned UDF field enhances neural surface reconstruction by capturing more details. + +# 1. Introduction + +The straight line belongs to men, the curved one to God. — Antonio Gaudi + +This sentiment is evident in the visual composition of our environments. While straight lines are common in manmade scenes such as walls, windows, and doors [25], curves are more general and ubiquitous from cups, bridges, architectures, to Gothic arts. Edges, which are composed of both lines and curves, are the fundamental elements of visual perception. Therefore, accurate edge modeling is crucial for understanding the geometry and structure of our 3D world. + +Conventional approaches on 3D reconstruction typically involve inferring dense geometry and abstracting meshes from 2D images [15, 34, 48, 50, 67, 69]. However, the presence of 3D edges offers substantial advantages. First, + +![](images/f32a4e2e65c6a8de962c388fc298595f11a5a3a318eb562c0f2cf1aa633d4c75.jpg) +(a) An Indoor Scene + +![](images/e6316e0a68dea4fa60357f5b54aca0a5710246b7a1b48dfbe7cb8ea09541af8b.jpg) +(b) LIMAP [25] + +![](images/655a4d113703ef7e8eaee1f49b5e069da98404b01c4a16f403ecc227efa8947a.jpg) +(c) NEAT [64] + +![](images/e4f4f85cea6280fcd180b2e5fa06d3f1425e04e7805acd5d8f57819546e1c071.jpg) +(d) EMAP (Ours) +Figure 1. Example 3D edge reconstruction on Replica [53]. While prior methods such as LIMAP [25] and NEAT [64] only reconstruct distinctive line segments, our method generates a more complete 3D edge map combining both line and curve features. + +edges are naturally compact representations that capture the salient features oftentimes around geometric boundaries, which are good indicators for more lightweight and adaptive meshing and 3D modeling with comparably less redundancy. Secondly, in contrast to dense surface modeling from images, 3D edges are unaffected to illumination changes, thus exhibiting better reliability on multi-view reconstruction. Last but not least, 3D edges serve as a universal representation in real-world scenarios, and can be potentially integrated into many applications such as lane mapping [10, 22, 42, 43, 56], motion forecasting [12, 13, 51], medical imaging [45], etc. + +The reconstruction of 3D edges is conventionally approached by matching their 2D observations across views. While Bignoli et al. [5] proposed edge point matching using the sparse map from Structure-from-Motion (SfM), it is inherently ill-posed due to its heavy reliance on cross-view edge correspondences, which are generally sparse and prone to ambiguity. Recent works have also improved the quality of 3D line reconstruction [18, 25, 61, 64], but primarily excel in specific scenes where straight lines dominate. + +nate. While general real-world environments with curved structures pose more challenges, recent progress on 2D detection and matching is mostly limited to point and line features and thus inapplicable to such scenarios. + +A recent work NEF [70] made a significant step forward in learning 3D curves from multi-view 2D edge observations. Inspired by the recent success of neural radiance field (NeRF) [32], they introduce a neural edge density field and show decent results in reconstructing edges for simple objects. Nevertheless, their proposed edge density field has an inherent bias in edge rendering, leading to less accurate reconstruction. Moreover, its fitting-based edge parameterization process not only requires tedious tuning to specific data, but also struggles with its scalability to larger and more complex scenes. This motivates us to develop a more robust system for 3D edge mapping from 2D observations, which would benefit a wide range of downstream tasks. + +Towards this goal, we introduce EMAP, a novel approach for accurate 3D edge reconstruction from only 2D edge maps. EMAP comprises the following steps. Firstly, we learn the neural unsigned distance function (UDF) to implicitly model 3D edges, utilizing an unbiased rendering equation to mitigate the inaccuracies observed in NEF. Secondly, once learned, we can obtain the unsigned distance and normal for each point in the space, so a set of precise edge points with directions can be extracted. Finally, based on the guidance of every edge point's location and direction, we design a simple yet robust algorithm for parametric line and curve extraction, that can be applied across various challenging scenarios. Our comprehensive evaluations of EMAP, from synthetic CAD models to real-world indoor and outdoor scenes, show its superior performance in 3D edge reconstruction. In addition, we also observe that initializing the optimization process of the recent neural implicit surface reconstruction method with our trained UDF field enables the reconstructing of better details. + +Overall, the contributions of this paper are as follows: + +- We propose EMAP, a 3D neural edge reconstruction pipeline that can learn accurate 3D edge locations and directions implicitly from multi-view edge maps. +- We develop a 3D edge extraction algorithm to robustly connect edge points with edge direction guidance. +- We show that our model can generate complete 3D edge maps and help optimize dense surfaces. + +# 2. Related Work + +Geometry-based 3D Line Reconstruction. As a pioneering work, Bartoli and Sturm [4] introduces a full SfM system using line segments, which is later improved under Manhattan assumption [47] and in stereo systems [8]. Recently, with the developments of line detections [36, 37, 65] and matching [2, 36, 38] thanks to the advent to deep learn + +ing, several works have attempted to revisit the line mapping problem through graph clustering [19], leveraging planar information [61] and incorporating into SLAM systems [17, 23, 29, 52, 77]. In particular, recent work LIMAP [25] introduces a robust 3D line mapping system with structural priors which can adapt to different existing line detectors and matchers. Despite these advances, all the works are limited to straight lines and often produce segmented small lines when it comes to curves. In contrast, edges are generally easier to detect and are redundantly present in most scenes. In this project, rather than relying on lines, we build our 3D mapping system using robust 2D edge maps. + +Learning-based 3D Line/Curve Reconstruction. In contrast to geometry-based methods, some approaches [26, 59, 74] shifted their focus to directly extract parametric curves from given edge point clouds. Typically, they require keypoint detection, clustering, and linkage. Even under the relaxed setting, it is still challenging to generate clean parametric curves due to the complex connectivity of curves and imperfect point clouds [70]. To address this limitation, NEF [70] integrates NeRF [32] for edge mapping from multi-view images, extracting 3D curves from the learned neural edge field through a carefully designed postprocessing. While NEF achieves decent performance on CAD models, it is constrained to simple and low-precision object-level edge mapping. A concurrent work, NEAT [64], utilizes VolSDF [69] to build dense surfaces and incorporates a global junction perceiving module to optimize 3D line junctions with 2D wireframe supervision. Although NEAT can produce 3D wireframes, it is restricted to modeling line segments only. Additionally, their need for textured objects is a limitation. By contrast, we use the unisgned distance function (UDF) to represent edges, enabling the construction of both line segments and curves without the necessity for target textures. We further show that our method can faithfully reconstruct edges for complex scenes. + +Neural Implicit Representations. Neural implicit representations have emerged as a powerful tool for a spectrum of computer vision tasks, including object geometry representation [9, 24, 30, 34, 35, 40, 46, 57, 62, 66, 68], scene reconstruction [6, 20, 39, 71, 72, 75, 76], novel view synthesis [28, 32, 44, 73] and generative modelling [7, 33, 49]. Recent works [27, 58, 60, 69, 72] show impressive high-fidelity reconstruction by learning the implicit signed distance function (SDF). However, the SDF representation constrains to modeling closed, watertight surfaces. In contrast, NeuralUDF [3] exploits UDF to represent surfaces, offering a higher degree of freedom to represent both closed and open surfaces. We find UDF as a suitable representation to model edges implicitly, in comparison to SDF used in NEAT [64] and edge volume density from NEF [70]. + +![](images/d23a375aefeb8896ed56301abffb5f0a0787d0ecf38319c3daa5894552c8c5dc.jpg) +Figure 2. UDF learning overview. We utilize a vanilla NeRF [32] MLP that outputs absolute values to model the 3D UDF field. Edge maps are rendered using a density-based edge neural rendering technique, combined with an unbiased UDF rendering approach to eliminate bias. Our primary supervision comes from 2D edge maps predicted by a pre-trained edge detector. + +# 3. Method + +Our goal is to build a 3D edge map from multi-view posed 2D edge maps. To this end, we first introduce our edge representation and edge field learning in Sec. 3.1. Next, we present our 3D parametric edge extraction from the learned edge representations in Sec. 3.2. + +# 3.1. Edge Field with Unsigned Distance Functions + +Multi-view Edge Maps. Since edge maps are generally invariant to illumination changes and are more robustly detected across various scenes than lines, our method utilizes multiple posed 2D edge maps as inputs. We apply pretrained edge detectors to predict an edge map $E$ for each input RGB image. Each pixel of $E$ has a value within [0, 1], indicating its probability of being an edge. + +Density-based Edge Neural Rendering. We use an unsigned distance function (UDF) to represent edges, denoted as $f_{u}$ . This function computes the unsigned distance from a given 3D point to the nearest edge. The UDF is defined as: + +$$ +f _ {u}: \mathbb {R} ^ {3} \rightarrow \mathbb {R} \quad \mathrm {x} \mapsto u = \operatorname {U D F} (\mathrm {x}), \tag {1} +$$ + +where $\mathbf{x}$ is a 3D point and $u$ is the corresponding UDF value. + +To render an edge pixel in a certain view, we trace a camera ray $\mathbf{r}(t) = \mathbf{o} + t\mathbf{d}$ . This ray originates from the camera's center $\mathbf{o}$ and extends in direction $\mathbf{d}$ [32]. To apply volume rendering for edge modeling, it is necessary to establish a mapping $\Omega_{u}$ [27, 58] that transforms the distance function $f_{u}(\mathbf{r}(t))$ into volume density $\sigma_{u}(t)$ as + +$$ +\sigma_ {u} (t) = \Omega_ {u} \left(f _ {u} (\mathrm {r} (t))\right). \tag {2} +$$ + +In the rendering equation, the transmittance $T(t)$ and weight $\omega (t)$ along the camera ray $\mathbf{r}$ are accumulated as + +$$ +T (t) = \exp \left(- \int_ {0} ^ {t} \sigma_ {u} (v) d v\right), \quad w (t) = T (t) \cdot \sigma_ {u} (t). \tag {3} +$$ + +To effectively handle appearance changes under different viewing angles, most neural field-based surface reconstruction [34, 58, 67, 71] disentangles geometry and appearance. In contrast, edge maps are generally unaffected by lighting, making them view-independent. Therefore, this simplifies the rendering process for edge maps. as it only requires the accumulation of view-independent, density-based weights $w$ along a ray $\mathbf{r}$ . Now, the rendered edge value $\hat{E}$ along ray $\mathbf{r}$ is formulated as: + +$$ +\hat {E} (\mathbf {r}) = \int_ {0} ^ {+ \infty} w (t) d t = 1 - T (+ \infty), \tag {4} +$$ + +Eq. (4) establishes the connection between rendered edge values and the transmittance at the end of the camera rays. Intuitively, this means that the rendered edge value is 1 when the camera ray hits an edge in 3D space, and 0 otherwise. Please refer to the supplements for more details. + +Unbiased Density Functions for UDF Rendering. NEF [70] also uses volume rendering for rendering edges. Unlike ours, they utilize edge density to represent edges and an additional network to predict edge values. However, this approach introduces an inherent bias in edge rendering. Similar to the naive solution presented in NeuS [58], the issue comes from the weight function $w$ in Eq. (3), where its local maximum does not coincide with the actual intersection point of the camera ray and the edges. + +To address this issue, we incorporate unbiased UDF rendering [27] into our density-based edge rendering framework. As proved in NeuS, density function $\sigma_{u}$ should increase monotonically to make the weight function unbiased. However, UDF values are not monotonous along a ray [27]. To adapt the unbiased density function $\Omega_{s}$ , which is originally induced in NeuS [58], for UDF use, the monotonically increased density function $\sigma_{u}$ [27] is formulated as + +$$ +\sigma_ {u} (t) = \Psi (t) \cdot \Omega_ {s} \left(f _ {u} (\mathbf {r} (t))\right) + (1 - \Psi (t)) \cdot \Omega_ {s} (- f _ {u} (\mathbf {r} (t))), \tag {5} +$$ + +where $\Psi (t)$ is a differentiable visibility function designed in [27] to capture the monotonicity change in UDF. $\Psi$ is 0 behind the intersection point between the camera ray and the hit edge, and is 1 before the intersection point. Besides, $\Psi (t)$ is differentiable around the intersection point to make the UDF optimization more stable. + +Ray Sampling Strategy. A key characteristic of 2D edge maps is their significant sparsity, with edges occupying a much smaller area compared to non-edge regions. To enhance training efficiency and stability, we apply an importance sampling strategy for camera rays, with $50\%$ of rays uniformly sampled from edge areas in the edge maps and the remaining $50\%$ from non-edge areas. Such a sampling strategy ensures that our UDF field training is concentrated on edge areas, thereby substantially speeding up the training process. Additionally, our sampling strategy offers an + +![](images/95652d6ceb191b2d8572ce1242428e6fb2c16a0773901aa5e9b8402546f886e7.jpg) +Figure 3. Illustration of our 3D parametric edge extraction steps. For simplify, our schematic is depicted in the 2D plane. Our 3D edge extraction algorithm comprises five main stages: point initialization (a), point shifting (b to c), edge direction extraction (c to d), point connection (d to e), and edge fitting (e to f). + +elegant solution to the issue of occlusion, a challenge noted in [70]. The rendered edge maps might contain edges not present in the input edge images due to occlusion. In contrast to the complicated occlusion handling strategy introduced in [70], our approach inherently alleviates this challenge by focusing the training on points from the visible edges presented in the input edge maps. + +Loss Functions. The total loss function can be written as: + +$$ +\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\text {e d g e}} + \lambda \mathcal {L} _ {\text {e i k}}, \tag {6} +$$ + +where $\mathcal{L}_{\mathrm{edge}}$ represents the Mean Square Error (MSE) between the rendered and input edge images. $\mathcal{L}_{\mathrm{eik}}$ denotes the Eikonal loss [16], which promotes the learned UDF to be physical distance. $\lambda$ is used to balance these losses. + +# 3.2. 3D Parametric Edge Extraction + +With UDF learning, edge locations are implicitly encoded within the UDF field. However, accurately extracting edge points from the UDF field is non-trivial due to the absence of a real zero-level set in the UDF field. Additionally, formulating these edge points into parametric edges poses significant challenges due to the complex connections of edges. To extract points from the learned density field, NEF [70] selects points with edge density values greater than a specified threshold, $\epsilon$ . This approach leads to an approximated edge point set that is $\epsilon$ -bounded [27]. While this method effectively generates comprehensive point clouds, the $\epsilon$ -bounded point set does not align accurately with the actual edge locations. + +To eliminate the error in edge point extraction, we leverage the physical property of UDF that reflects real-world + +![](images/3427383d6547aba66aa415e6f00bae8ea5a6583ef804b3d91314bd0e7c9d01e2.jpg) +Figure 4. Illustration of the overview (a) and the cross-section (b) of UDF field. (a) In UDF field, edge points are ideally located at the zero-level set, with UDF values being larger away from these points. A query point $x_{t}$ can be precisely shifted to a more accurate position $x_{t + 1}$ by following the UDF value and the inverse normal vector $n(x)$ . The edge direction $l(x)$ aligns with the tangent direction at the edge point $x_{t + 1}$ . (b) The inverse normal vectors of all surrounding points on the cross section are pointing towards the query point. + +![](images/177a7f484bfed0f95367c92e44a94f711554e8a8baf73f022876b74131079723.jpg) + +distances to the edges. Specifically, we develop a 3D edge extraction algorithm composed of five main stages: point initialization, point shifting, edge direction extraction, point connection, and edge fitting, as illustrated in Fig. 3. This algorithm takes the trained UDF field as input and outputs parametric 3D edges, including line segments and curves. + +Point Initialization. Under eikonal loss supervision, the optimized UDF values represent physical distances to the nearest edges. To initialize potential edge points, we begin with the center points of all voxel grids and obtain their UDF values from the UDF field. Subsequently, we eliminate query points whose UDF values exceed a specified threshold $\epsilon^{\prime}$ (red points in Fig. 3 (a)). + +Point Shifting. As illustrated in Fig. 4 (a), the normalized inverse gradient of the UDF field indicates the inverse normal vector pointing towards edges. Drawing inspiration from OccNet [31], we refine the point $x$ iteratively towards the edge using its distance and inverse normal direction: + +$$ +x _ {t + 1} \Leftarrow x _ {t} - f _ {u} (x _ {t}) \cdot \frac {\nabla f _ {u} (x _ {t})}{\| \nabla f _ {u} (x _ {t}) \|}, \tag {7} +$$ + +where $t$ denotes the $t$ -th iteration. As a result of this iterative process, the initial points converge to the edge center (from Fig. 3 (b) to Fig. 3 (c)). + +Edge Direction. Establishing connections between edge points is a crucial step in constructing parametric edges. While most methods [11, 42, 70] estimate parameters through least-squares fitting of lines/curves on extracted points, this fitting-based approach for edge extraction is not always robust or accurate. In contrast, inspired by [37, 63], we find that combining the edge direction field with the edge distance field can robustly produce edge parameters. Given that inverse normal vectors invariably point towards edges (see Fig. 4(b)), we first devise an edge direction extraction method based on this set of inverse normal vectors. Specifically, for a query point $x$ , we introduce minor shifts + +set $\{\delta\}_{N}$ with size of $N$ to generate an adjoining point set $\{x'\}_{N}$ , where $\{x'\}_{N} = x + \{\delta\}_{N}$ . The inverse normal vectors of these points, denoted as $\{n\}_{N}$ , are obtained from the learned UDF field. The edge direction, denoted as $l$ , is identified as the null space of $\{n_i'\}$ , since the edge direction is perpendicular to all inverse normal vectors in $\{n\}_{N}$ . Therefore, $l$ can be extracted with singular value decomposition (SVD): + +$$ +A = U \Sigma V ^ {T}, \quad l = V [:, \operatorname {a r g m i n} (\Sigma) ], \tag {8} +$$ + +where $A$ is the matrix representation of $\{n\}_{N}$ and $l$ corresponds to the eigenvector associated with the smallest eigenvalue. Note that $N$ should be sufficiently large to ensure the stability of the extracted edge direction. Unlike DeepLSD [37], we can obtain a precise edge direction field without relying on any 2D direction supervision. + +Point Connection. After accurately determining the edge point locations and directions, we proceed to connect these edge points guided by the edge direction to create polylines (Fig. 3 (d) to (e)). Specifically, we begin by selecting candidate points and then compute directional errors for points adjacent to these candidates. Based on these directional errors, candidate points are connected to its best-matched neighboring point that growing direction aligns best with its extracted edge direction, i.e., with minimal directional error. This process is repeated, extending the edge polylines progressively until no further growth is possible. To ensure efficiency and accuracy, a non-maximum suppression step is employed to remove any redundant points that may exist between the current candidate and the best-matched point. Please refer to the supplements for more algorithm details. + +Edge Fitting. To further parameterize edges, we categorize the polylines into line segments and Bézier curves (Fig. 3 (f)). Initially, we utilize RANSAC [14] to fit lines from the polylines, and select the line segment that encompasses the highest number of inlier points. Following [25], we apply Principal Component Analysis (PCA) to the inlier points, re-estimate the line segment utilizing the principal eigenvector and the mean 3D point, and project all inlier points onto the principal eigenvector to derive the 3D endpoints. This fitting process is repeated for each polyline until the number of inlier points falls below a minimum threshold. For the remaining sub-polylines, we fit each of them with a Bézier curve that is defined by four control points. + +To minimize edge redundancy, we further merge line segments and Bézier curves based on two criteria: the shortest distance between candidate edges and the similarity of curvature at their closest points. For line segments, the shortest distance is the minimal point-to-line segment distance, and curvature similarity is their direction's cosine similarity. For Bézier curves, they are the minimal point-to-point distance and the cosine similarity of the tangent vectors at the nearest points, respectively. Candidate edges + +are merged only if they meet both criteria. This dual-criterion approach ensures that merging happens only when two edges are both similar and close to each other. + +To connect edges, all endpoints of line segments and Bézier curves located within a specified distance threshold are merged into shared endpoints. Furthermore, we implement an optimization step [5, 64] to refine the 3D parametric edges by leveraging 2D edge maps, thereby enhancing edge precision. Specifically, we project 3D parametric edges into edge map frames using camera projection matrices and filter out 3D edges that are not visible in over $90\%$ of views. + +# 4. Experiments + +# 4.1. Experiment Setting + +Datasets. We consider four diverse datasets: CAD models (ABC-NEF [70]), real-world objects (DTU [1]), high-quality indoor scenes (Replica [53]), and real-world outdoor scenes (Tanks & Temples [21]). ABC-NEF dataset comprises 115 CAD models, each accompanied by 50 observed images and ground truth parametric edges. We select 82 CAD models, excluding those containing inconsistent edge observations (e.g., cylinders or balls). DTU dataset provides dense ground-truth point clouds and we select 6 objects that meet the multi-view constraints among scans processed by [72]. Following [5], we derive edge points by projecting ground-truth dense points onto images and then comparing them with the observations on 2D edge maps to filter out non-edge points. Replica and Tanks & Temples datasets contain larger scenes. Due to the lack of ground-truth edges, we conduct qualitative comparisons among baselines. + +Baselines. We compare with three state-of-the-art baselines for 3D line/curve mapping, including two learning-based methods, NEF [70] and NEAT [64], and one geometry-based method, LIMAP [25]. + +Metrics. Our evaluation involves first sampling points in proportion to the edge's length and subsequently downsampling these points using a voxel grid with a resolution of $256^3$ . Following the metrics used in [25, 70], we consider Accuracy (Acc), Completeness (Comp) in millimeters, and Recall $(R_{\tau})$ , Precision $(P_{\tau})$ , F-score $(F_{\tau})$ in percentage with a threshold $\tau$ in millimeters. Moreover, we report Edge Direction Consistency (Norm) in percentage to analyze the precision of edge direction extraction. + +Implementation Details. For $f_{u}$ , we utilize 8-layer Multi-layer Perceptrons (MLPs). Each layer in the MLP contains 512 neurons for larger scenes, such as Tanks & Temples, and 256 neurons for other datasets. We sample 1024 rays per batch, among these rays, 512 rays are sampled from edge areas. We train our model for $50k$ iterations on ABC-NEF dataset, and $200k$ iterations on other datasets. We train + +![](images/d8a8f207e25d105eeb2ddbb82ba0e3086cd1272a644335bb120164a0f6fa9412.jpg) +Figure 5. Qualitative comparisons on ABC-NEF [70]. Lines are shown in black and curves in blue. Thanks to our precise edge extraction capabilities for both lines and curves, we achieve complete and accurate modeling of these elements. + +
MethodDetectorModalAcc↓Comp↓Norm↑R5↑R10↑R20↑P5↑P10↑P20↑F5↑F10↑F20↑
LIMAP [25]LSDLine9.918.794.436.282.387.943.087.693.939.084.390.4
SOLD2Line5.929.690.164.276.679.688.196.497.972.984.086.7
NEF [70]PiDiNet†Curve11.916.990.911.462.091.315.768.596.313.064.693.3
PiDiNetCurve15.116.589.711.753.389.813.652.289.112.351.888.7
DexiNedCurve21.915.785.911.348.387.711.539.871.710.842.176.8
OursPiDiNetEdge9.215.693.730.275.789.835.679.195.432.477.092.2
DexiNedEdge8.88.995.456.488.994.862.989.995.759.188.994.9
+ +our network with the Adam optimizer with a learning rate of $5 \times 10^{-4}$ , while the UDF model $f_{u}$ is trained with a learning rate of $1 \times 10^{-4}$ and initialized with sphere initialization [66]. For edge detection for NEF and ours, we consider PiDiNet [54] and DexiNed [41]. PiDiNet [54] is employed for indoor scenes, such as DTU and Replica, due to its superior performance in these settings. Conversely, DexiNed [41] is applied to outdoor scenes, as it is primarily trained on outdoor scenes. On the synthetic ABC-NEF dataset, we show results with both detectors. For LIMAP, we follow their paper and we use SOLD2 [36] for indoor scenes and LSD [55] for outdoor scenes. NEAT is trained with 2D wireframes from HAWPV3 [65]. + +# 4.2. Evaluation of 3D Edge Reconstruction + +Evaluation on ABC-NEF Dataset. We show the quantitative and qualitative comparisons on Table 1 and Fig. 5. Note that NEAT fails on the ABC-NEF dataset because of its heavy dependence on texture input. NEF demonstrates decent performance at $\tau = 20$ . However, their performance drops significantly when $\tau$ is set to 10 and 5. This is attributed to its bias in edge rendering and its fitting-based post-processing. LIMAP shows remarkable precision across various $\tau$ thresholds. Such consistency stems from its non-linear refinement over multi-view 2D supports. Nonetheless, LIMAP's inability to reconstruct curves leads + +Table 1. Edge reconstruction results on ABC-NEF [70]. Results from NEF's released pretrained models are indicated by $\dagger$ . Our method surpasses all others in terms of completeness and achieves accuracy comparable to LIMAP [25]. + +
MethodDetectorCurveLine
Acc↓Comp↓Norm↑Acc↓Comp↓Norm↑
LIMAP [25]LSD272.650.184.834.611.395.9
SOLD2295.782.276.820.018.192.1
NEF [70]PiDiNet†265.027.177.940.413.792.6
PiDiNet263.123.977.643.914.091.4
DexiNed250.520.372.656.213.887.3
OursPiDiNet253.725.788.143.112.893.7
DexiNed241.010.988.746.77.795.4
+ +Table 2. Accuracy, completeness and normal consistency results with curves and lines on ABC-NEF [70]. Our method with DexiNed edge detector yields overall the strongest performance on curves among all baselines. + +to lower scores in completeness and recall. Our method, when combined with either of the 2D edge detectors, consistently outperforms all baselines. Notably, as shown in Table 1, combined with the DexiNed detector, our method achieves superior results in completeness, edge direction consistency, recall, and F-Score. We also show competitive accuracy and precision when compared to LIMAP. + +To further analyze the performance of different edge types, we classify the ground truth edges into curves (including BSplines, ellipses, and circles) and line segments, based on the GT annotations. We provide accuracy, completeness, and edge direction consistency in Table 2 to analyze the separate reconstruction abilities for curves and lines. Note that these results are computed based on all predictions specific to either curves or lines, as other methods do not differentiate between these two types of edges. We + +![](images/85d6f4966cb9656b73660aa5b0a3763e630fe331ae41e9c52593945514f4242b.jpg) +Figure 6. Qualitative comparisons on the Replica [53] and Tanks & Temples [21] datasets. The first two scenes are from the Replica dataset, while the last scene is from the Tanks & Temples dataset. + +![](images/c76cb10f06e8d4329f2ed8a8004c47e2ae22399821ff56514abfd97ef1c0a767.jpg) +2D Image + +![](images/15bdf6ac242f4925c9447e88d99c55d022936580c065ebb02fdca63bcddb0bc0.jpg) +LIMAP [25] + +![](images/00797d273957867f59baf7e8d3861bc4761fafe544042f9e09d83bec09342ccc.jpg) +NEF [70] + +![](images/0bc9a81e7d2d51ad0add2a71c212ba60938538b6deea540583347c6e71802a2b.jpg) +NEAT [64] + +![](images/f96fadba897230b20816ff77c329140fd909f3ddcda7181254b39ef86a5acc5e.jpg) +Ours + +![](images/b00d3acd08c734a1304a9d9ed71594a644480cc8ea4f8fec61d825ec88663aa1.jpg) +GT Edge +Figure 7. Qualitative comparisons on DTU [1]. Our results demonstrate complete edge structure, whereas other methods result in redundant line segments or imprecise curves. + +
ScanLIMAP [25]NEF [70]NEAT [64]Ours
R5↑P5↑R5↑P5↑R5↑P5↑R5↑P5↑
3775.874.339.551.063.985.162.783.9
8375.750.732.021.872.352.472.361.5
10579.164.930.332.068.973.378.578.0
11079.765.331.240.264.379.690.968.3
11859.462.015.325.259.071.175.378.1
12279.979.215.129.170.082.085.382.9
Mean74.966.127.233.266.473.977.575.4
+ +Table 3. Edge reconstruction results on DTU [1]. + +can see that our method with DexiNed exhibits superior results in reconstructing curves. As for line segments, our performance is marginally lower than the best-performing method LIMAP which is specially optimized for lines. + +Evaluation on DTU Dataset. Our assessment of the DTU dataset, as outlined in Table 3 and Fig. 7, shows our proficiency in real-world scenarios. Notably, our approach achieves the highest recall and precision among all baselines. The DTU dataset presents a challenging scenario for + +![](images/b354c149f9d99c4094d797a70c507d8be54383f1a5459910be440c1163369e90.jpg) +(a) w/o point shifting + +![](images/9330ce339bce3241821a4886721d8b8bd4b53979aaa9cf8a3a161d2c4f46f4d6.jpg) +(b) w/ point shifting +Figure 8. Visualization of point shifting and edge direction. Edge points are shown in point clouds and edge directions in color. The point shifting step significantly refines the locations of edge points. The edge extraction step yields accurate results, as seen in parallel lines sharing the same direction and curves exhibiting continuously changing directions. + +![](images/3e18c6522e3243165a91cc2996d1327f39a3edf0e789a150c8e6794f93052558.jpg) +(c) edge direction + +edge extraction due to its varying lighting conditions. However, our edge refinement step proves effective in preserving primary edges, a point we elaborate on in Sec. 4.3. Fig. 7 shows LIMAP tends to produce redundant line segments, leading to high recall but reduced precision. NEF's post-processing is sensitive to different scenes, resulting in noisy edge fitting. NEAT, despite producing clean outputs, its inability to handle curves constrains its overall performance. + +Qualitative Evaluation on Indoor & Outdoor Scenes. To really showcase the power of our method in capturing scene-level geometry, we further run our method on indoor and outdoor scenes. Note that since NEF is not able to produce meaningful reconstructions on larger scenes, we only compare with LIMAP and NEAT. As shown in Fig. 1 and Fig. 6, NEAT, due to its reliance on high-quality surface reconstruction, faces limitations in scene reconstruction, while LIMAP and our method both successfully capture good scene geometry. + +# 4.3. Ablations and Analysis + +Parametric Edge Extraction. To better understand our parametric edge extraction process described in Sec. 3.2, we visualize our point shifting and edge direction in Fig. 8. + +![](images/37dca7d064aac0cc132d9bf56c37d2bff356cca0784f3ac69b7ed228931bac09.jpg) +(a) Ours + +![](images/b01c0b9a10c60091e6122fb8977f66258e6dad7d7946bca904b5bc03a66510de.jpg) +(b) w/o point shifting + +![](images/5ce8b7b83fbb7916206664839d3fe79193c6f49383ddfce6d48c3528c7e0c490.jpg) +(c) w/o Bezier curve + +![](images/ce553152039508f23cfb3149a04997c6fccfd65ee5ae6a45c5148c973626f296.jpg) +(d) w/o edge merge. + +![](images/1a28bb33faf21a2d5a68b83d045bd5b56dc6f71f9e8b6bc1df74377d810aab98.jpg) +(e) w/o endpoints merge. + +![](images/7c7f3f78f2e6c40239ff6bdb0b140a79f4c6851984e2a16980ad15c09883ab57.jpg) +(f) GT Edge +Figure 9. Qualitative ablation on different component of our parametric edge extraction. The absence of any module in our edge extraction process results in incomplete or noisy qualitative outcomes. + +We can clearly see that the extracted point clouds without point shifting are appeared in redundant and inaccurate edge points (Fig. 8 (a)). In contrast, the point shifting step yields point clouds with sharply defined, precise edges (Fig. 8 (b)). In addition, as shown in Fig. 8 (c), the extracted edge directions along parallel lines are consistent, while those on curves vary continuously. This aligns with our expectations. + +Furthermore, we also conduct ablation studies in Table 4 and Fig. 9 to evaluate the impact of different components in our edge extraction algorithm. These experiments were performed on the ABC-NEF dataset using the DexiNed detector. First, the removal of the query point shifting step leads to a significant drop in both recall and precision. This indicates that our point-shifting step significantly refines the query points locations. Second, excluding Bezier curves results in a decline in completeness (Fig. 9 (c)), showing that curves are necessary for edge reconstruction. Third, omitting the edge merging step leads to redundant small line segments, as evident in Fig. 9 (d). Finally, the removal of endpoint merging impairs connectivity between edges, as shown in Fig. 9 (e). + +
MethodAcc↓Comp↓R5↑P5↑F5↑
aOurs8.88.956.462.959.1
bw/o point shifting15.39.929.218.722.2
cw/o B'ezier curve9.412.154.265.859.0
dw/o edge merging10.38.753.845.348.6
ew/o endpoints merging9.39.051.557.754.0
+ +Table 4. Ablation studies on different component of parametric edge extraction on ABC-NEF [70] with DexiNed [41]. Our parametric edge extraction approach with all components achieves the optimal balance between accuracy and completeness. + +Edge Refinement. In Fig. 10, we study the effectiveness of our edge refinement module. When input edge maps contain some noises in dark scenes, our initial 3D edge map, without the edge refinement, exhibits some artifacts. However, the edge refinement module markedly mitigates artifacts, achieving a balance between recall and precision. + +![](images/76476b7e718c308488003bd20018d95bbb86ea3ad15a7919eebed9eaa49ee90a.jpg) +w/o edge refinement + +![](images/138283933240e89baba8f9b4feca1fc65438e619ce2077a5f3d602df0693f867.jpg) +w/ edge refinement + +![](images/afdaeeed73ad604514de757e9b2270c7023329ae143a5a3832380da533b7596a.jpg) +Figure 10. Ablation study on edge refinement. Our edge refinement effectively eliminates the majority of noisy edges in background areas. +Figure 11. Dense surface reconstruction on Replica [53]. Utilizing our trained UDF MLP for initialization enables MonoSDF to capture more geometric details, such as the vase in the top row, the shelf in the bottom row. + +# 4.4. Application on Dense Surface Reconstruction + +Our method has demonstrated its proficiency in reconstructing 3D edges across a diverse range of scenarios. Building on this success, we further explore the potential of our learned representation to benefit other tasks. A particularly relevant area is dense surface reconstruction. + +As shown in Fig. 11, the recent neural-implicit surface reconstruction approach MonoSDF [72] can show decent reconstruction results from only posed multi-view images. However, we notice that they still struggle to capture detailed geometry. To address this, we integrate our method into the MonoSDF pipeline. Specifically, we initialize the geometry MLPs of MonoSDF with our pre-trained UDF MLPs. We can clearly see that such a simple integration can enhance the recovery of geometric details. + +# 5. Conclusions + +We introduced EMAP, a 3D neural edge reconstruction pipeline that learns accurate 3D edge point locations and directions implicitly from multi-view edge maps through UDF and abstracts 3D parametric edges from the learned UDF field. Through extensive evaluations, EMAP demonstrates remarkable capabilities in CAD modeling and in capturing detailed geometry of objects and scenes. Furthermore, we show that our learned UDF field enriches the geometric details for neural surface reconstruction. + +# References + +[1] Henrik Aanaes, Rasmus Ramsbøl Jensen, George Vogiatzis, Engin Tola, and Anders Bjorholm Dahl. Large-scale data for multiple-view stereopsis. International Journal of Computer Vision (IJCV), 2016. 5, 7 +[2] Hichem Abdellali, Robert Frohlich, Viktor Vilagos, and Zoltan Kato. L2d2: Learnable line detector and descriptor. In Proc. of the International Conf. on 3D Vision (3DV), 2021. 2 +[3] Dejan Azinović, Ricardo Martin-Brualla, Dan B Goldman, Matthias Nießner, and Justus Thies. Neural rgb-d surface reconstruction. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[4] Adrien Bartoli and Peter Sturm. Structure-from-motion using lines: Representation, triangulation, and bundle adjustment. Computer Vision and Image Understanding (CVIU), 2005. 2 +[5] Andrea Bignoli, Andrea Romanoni, and Matteo Matteucci. Multi-view stereo 3d edge reconstruction. In Proc. of the IEEE Winter Conference on Applications of Computer Vision (WACV), 2018. 1, 5 +[6] Rohan Chabra, Jan E Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local sdf priors for detailed 3d reconstruction. In Proc. of the European Conf. on Computer Vision (ECCV), 2020. 2 +[7] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[8] Manmohan Chandraker, Jongwoo Lim, and David Kriegman. Moving in stereo: Efficient structure and motion using lines. In Proc. of the International Conf. on Computer Vision (ICCV), pages 1741-1748. IEEE, 2009. 2 +[9] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[10] Wentao Cheng, Sheng Yang, Maomin Zhou, Ziyuan Liu, Yiming Chen, and Mingyang Li. Road mapping and localization using sparse semantic visual features. IEEE Robotics and Automation Letters, 6(4):8118-8125, 2021. 1 +[11] Kseniya Cherenkova, Elena Dupont, Anis Kacem, Ilya Arzhannikov, Gleb Gusev, and Djamila Aouada. Sepicnet: Sharp edges recovery by parametric inference of curves in 3d shapes. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 4 +[12] Scott Ettinger, Shuyang Cheng, Benjamin Caine, Chenxi Liu, Hang Zhao, Sabeek Pradhan, Yuning Chai, Ben Sapp, Charles R Qi, Yin Zhou, et al. Large scale interactive motion forecasting for autonomous driving: The waymo open motion dataset. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9710-9719, 2021. 1 +[13] Liangji Fang, Qinhong Jiang, Jianping Shi, and Bolei Zhou. Tpnet: Trajectory proposal network for motion prediction. + +In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6797-6806, 2020. 1 +[14] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 1981. 5 +[15] Michael Goesele, Noah Snavely, Brian Curless, Hugues Hoppe, and Steven M Seitz. Multi-view stereo for community photo collections. In Proc. of the International Conf. on Computer Vision (ICCV), 2007. 1 +[16] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. In Proc. of the International Conf. on Machine learning (ICML), 2020. 4 +[17] Yijia He, Ji Zhao, Yue Guo, Wenhao He, and Kui Yuan. Plizio: Tightly-coupled monocular visual-inertial odometry using point and line features. Sensors, 18(4):1159, 2018. 2 +[18] Manuel Hofer, Michael Maurer, and Horst Bischof. Improving sparse 3d models for man-made environments using line-based 3d reconstruction. In Proc. of the International Conf. on 3D Vision (3DV), 2014. 1 +[19] Manuel Hofer, Michael Maurer, and Horst Bischof. Efficient 3d scene abstraction using line segments. Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2017. +[20] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, Thomas Funkhouser, et al. Local implicit grid representations for 3d scenes. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[21] Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Trans. on Graphics (ToG), 2017. 5, 7 +[22] Tianyu Li, Li Chen, Xiangwei Geng, Huijie Wang, Yang Li, Zhenbo Liu, Shengyin Jiang, Yuting Wang, Hang Xu, Chunjing Xu, et al. Topology reasoning for driving scenes. arXiv preprint arXiv:2304.05277, 2023. 1 +[23] Hyunjun Lim, Jinwoo Jeon, and Hyun Myung. Uv-slam: Unconstrained line-based slam using vanishing points for structural mapping. IEEE Robotics and Automation Letters (RA-L), 7(2):1518-1525, 2022. 2 +[24] Shaohui Liu, Yinda Zhang, Songyou Peng, Boxin Shi, Marc Pollefeys, and Zhaopeng Cui. Dist: Rendering deep implicit signed distance function with differentiable sphere tracing. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[25] Shaohui Liu, Yifan Yu, Rémi Pautrat, Marc Pollefeys, and Viktor Larsson. 3d line mapping revisited. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 5, 6, 7 +[26] Yujia Liu, Stefano D'Aronco, Konrad Schindler, and Jan Dirk Wegner. Pc2wf: 3d wireframe reconstruction from raw point clouds. In Proc. of the International Conf. on Learning Representations (ICLR), 2021. 2 +[27] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Yuan Liu, Peng Wang, Christian Theobalt, Taku Komura, and Wenping Wang. Neuraludf: Learning unsigned distance fields for + +multi-view reconstruction of surfaces with arbitrary topologies. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3, 4 +[28] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[29] Daniele Marzorati, Matteo Matteucci, Davide Migliore, and Domenico G Sorrenti. Integration of 3d lines and points in 6dof visual slam by uncertain projective geometry. In EMCR. CiteSeer, 2007. 2 +[30] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[31] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 4 +[32] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proc. of the European Conf. on Computer Vision (ECCV), 2020. 2, 3 +[33] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[34] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proc. of the International Conf. on Computer Vision (ICCV), 2021. 1, 2, 3 +[35] Jeong Joon Park, Peter Florence, Julian Straub, Richard A. Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[36] Rémi Pautrat, Juan-Ting Lin, Viktor Larsson, Martin R Oswald, and Marc Pollefeys. Sold2: Self-supervised occlusion-aware line description and detection. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 6 +[37] Rémi Pautrat, Daniel Barath, Viktor Larsson, Martin R Oswald, and Marc Pollefeys. Deeplsd: Line segment detection and refinement with deep image gradients. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 4, 5 +[38] Rémi* Pautrat, Iago* Suárez, Yifan Yu, Marc Pollefeys, and Viktor Larsson. GlueStick: Robust image matching by sticking points and lines together. In Proc. of the International Conf. on Computer Vision (ICCV), 2023. 2 +[39] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In Proc. of the European Conf. on Computer Vision (ECCV), 2020. 2 + +[40] Songyou Peng, Chiyu "Max" Jiang, Yiyi Liao, Michael Niemeyer, Marc Pollefeys, and Andreas Geiger. Shape as points: A differentiable poisson solver. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2 +[41] Xavier Soria Poma, Edgar Riba, and Angel Sappa. Dense extreme inception network: Towards a robust cnn model for edge detection. In Proc. of the IEEE Winter Conference on Applications of Computer Vision (WACV), 2020. 6, 8 +[42] Zhijian Qiao, Zehuan Yu, Huan Yin, and Shaojie Shen. Online monocular lane mapping using catmull-rom spline. arXiv preprint arXiv:2307.11653, 2023. 1, 4 +[43] Tong Qin, Yuxin Zheng, Tongqing Chen, Yilun Chen, and Qing Su. A light-weight semantic map for visual localization towards autonomous driving. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 11248-11254. IEEE, 2021. 1 +[44] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with thousands of tiny mlp's. In Proc. of the International Conf. on Computer Vision (ICCV), 2021. 2 +[45] Chris Rorden, Roger Newman-Norlund, Chris Drake, Daniel R Glen, Julius Fridriksson, Taylor Hanayik, and Paul A Taylor. Improving 3d edge detection for visual inspection of mri coregistration and alignment. bioRxiv, pages 2022-09, 2022. 1 +[46] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proc. of the International Conf. on Computer Vision (ICCV), 2019. 2 +[47] Grant Schindler, Panchapagesan Krishnamurthy, and Frank Dellaert. Line-based structure from motion for urban environments. In International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT), 2006. 2 +[48] Johannes L Schonberger, Enliang Zheng, Jan-Michael Frahm, and Marc Pollefeys. Pixelwise view selection for unstructured multi-view stereo. In Proc. of the European Conf. on Computer Vision (ECCV), 2016. 1 +[49] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2 +[50] Steven M Seitz, Brian Curless, James Diebel, Daniel Scharstein, and Richard Szeliski. A comparison and evaluation of multi-view stereo reconstruction algorithms. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2006. 1 +[51] Shaoshuai Shi, Li Jiang, Dengxin Dai, and Bernt Schiele. Motion transformer with global intention localization and local movement refinement. Advances in Neural Information Processing Systems, 2022. 1 +[52] Fangwen Shu, Jiaxuan Wang, Alain Pagani, and Didier Stricker. Structure plp-slam: Efficient sparse mapping and localization using point, line and plane for monocular, rgb-d and stereo cameras. In Proc. IEEE International Conf. on Robotics and Automation (ICRA). 2 +[53] Julian Straub, Thomas Whelan, Lingni Ma, Yufan Chen, Erik Wijmans, Simon Green, Jakob J Engel, Raul Mur-Artal, Carl + +Ren, Shobhit Verma, et al. The replica dataset: A digital replica of indoor spaces. arXiv preprint arXiv:1906.05797, 2019. 1, 5, 7, 8 +[54] Zhuo Su, Wenzhe Liu, Zitong Yu, Dewen Hu, Qing Liao, Qi Tian, Matti Pietikainen, and Li Liu. Pixel difference networks for efficient edge detection. In Proc. of the International Conf. on Computer Vision (ICCV), 2021. 6 +[55] Rafael Grompone Von Gioi, Jeremie Jakubowicz, Jean-Michel Morel, and Gregory Randall. Lsd: A fast line segment detector with a false detection control. IEEE Trans. on Pattern Analysis and Machine Intelligence (PAMI), 2008. 6 +[56] Huijie Wang, Zhenbo Liu, Yang Li, Tianyu Li, Li Chen, Chonghao Sima, Yuting Wang, Shengyin Jiang, Feng Wen, Hang Xu, et al. Road genome: A topology reasoning benchmark for scene understanding in autonomous driving. arXiv preprint arXiv:2304.10440, 2023. 1 +[57] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2 +[58] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2, 3 +[59] Xiaogang Wang, Yuelang Xu, Kai Xu, Andrea Tagliasacchi, Bin Zhou, Ali Mahdavi-Amiri, and Hao Zhang. Pie-net: Parametric inference of point cloud edges. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2 +[60] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. Hf-neus: Improved surface reconstruction using high-frequency details. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2 +[61] Dong Wei, Yi Wan, Yongjun Zhang, Xinyi Liu, Bin Zhang, and Xiqi Wang. Elsr: Efficient line segment reconstruction with planes and points guidance. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 2 +[62] Qiangeng Xu, Weiyue Wang, Duygu Ceylan, Radomir Mech, and Ulrich Neumann. DISN: deep implicit surface network for high-quality single-view 3d reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2 +[63] Nan Xue, Song Bai, Fudong Wang, Gui-Song Xia, Tianfu Wu, and Liangpei Zhang. Learning attraction field representation for robust line segment detection. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 4 +[64] Nan Xue, Bin Tan, Yuxi Xiao, Liang Dong, Gui-Song Xia, and Tianfu Wu. Volumetric wireframe parsing from neural attraction fields. arXiv preprint arXiv:2307.10206, 2023. 1, 2, 5, 7 +[65] Nan Xue, Tianfu Wu, Song Bai, Fu-Dong Wang, Gui-Song Xia, Liangpei Zhang, and Philip HS Torr. Holistically-attracted wireframe parsing: From supervised to self-supervised learning. IEEE Trans. on Pattern Analysis and Machine Intelligence (PAMI), 2023. 2, 6 + +[66] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Ronen Basri, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 6 +[67] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 1, 3 +[68] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2 +[69] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 1, 2 +[70] Yunfan Ye, Renjiao Yi, Zhirui Gao, Chenyang Zhu, Zhiping Cai, and Kai Xu. Nef: Neural edge fields for 3d parametric curve reconstruction from multi-view images. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3, 4, 5, 6, 7, 8 +[71] Zehao Yu, Anpei Chen, Bozidar Antic, Songyou Peng, Apratim Bhattacharyya, Michael Niemeyer, Siyu Tang, Torsten Sattler, and Andreas Geiger. Sdfstudio: A unified framework for surface reconstruction, 2022. 2, 3 +[72] Zehao Yu, Songyou Peng, Michael Niemeyer, Torsten Sattler, and Andreas Geiger. Monosdf: Exploring monocular geometric cues for neural implicit surface reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2, 5, 8 +[73] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2 +[74] Xiangyu Zhu, Dong Du, Weikai Chen, Zhiyou Zhao, Yinyu Nie, and Xiaoguang Han. Nerve: Neural volumetric edges for parametric curve extraction from point cloud. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2 +[75] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[76] Zihan Zhu, Songyou Peng, Viktor Larsson, Zhaopeng Cui, Martin R Oswald, Andreas Geiger, and Marc Pollefeys. Nicer-slam: Neural implicit scene encoding for rgb slam. In Proc. of the International Conf. on 3D Vision (3DV), 2024. 2 +[77] Xingxing Zuo, Xiaojia Xie, Yong Liu, and Guoquan Huang. Robust visual slam with point and line features. In Proc. IEEE International Conf. on Intelligent Robots and Systems (IROS), 2017. 2 \ No newline at end of file diff --git a/2024/3D Neural Edge Reconstruction/images.zip b/2024/3D Neural Edge Reconstruction/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..d49015c6b6675ee9a22735065f5c3d8a0f61e205 --- /dev/null +++ b/2024/3D Neural Edge Reconstruction/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acefecf38c0707b633e329c120e910bb604c86bf5da327d2c4a4680e37175a39 +size 634680 diff --git a/2024/3D Neural Edge Reconstruction/layout.json b/2024/3D Neural Edge Reconstruction/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d78901496fc9f5f166a0b59c938f3ab4ce73af31 --- /dev/null +++ b/2024/3D Neural Edge Reconstruction/layout.json @@ -0,0 +1,10561 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 197, + 103, + 396, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 103, + 396, + 121 + ], + "spans": [ + { + "bbox": [ + 197, + 103, + 396, + 121 + ], + "type": "text", + "content": "3D Neural Edge Reconstruction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "spans": [ + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": "Lei Li" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": " Songyou Peng" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{1,2\\dagger}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": " Zehao Yu" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{3,4}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": " Shaohui Liu" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": " Rémi Pautrat" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{1,6}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": " Xiaochuan Yin" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": " Marc Pollefeys" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{1,6}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": "ETH Zurich " + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": "MPI for Intelligent Systems, Tübingen " + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": "University of Tübingen " + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": "Tübingen AI Center " + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": "Utopilot " + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 88, + 141, + 492, + 216 + ], + "type": "text", + "content": "Microsoft neural-edge-map.github.io" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 242, + 192, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 242, + 192, + 254 + ], + "spans": [ + { + "bbox": [ + 143, + 242, + 192, + 254 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 266, + 290, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 290, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 290, + 483 + ], + "type": "text", + "content": "Real-world objects and environments are predominantly composed of edge features, including straight lines and curves. Such edges are crucial elements for various applications, such as CAD modeling, surface meshing, lane mapping, etc. However, existing traditional methods only prioritize lines over curves for simplicity in geometric modeling. To this end, we introduce EMAP, a new method for learning 3D edge representations with a focus on both lines and curves. Our method implicitly encodes 3D edge distance and direction in Unsigned Distance Functions (UDF) from multi-view edge maps. On top of this neural representation, we propose an edge extraction algorithm that robustly abstracts parametric 3D edges from the inferred edge points and their directions. Comprehensive evaluations demonstrate that our method achieves better 3D edge reconstruction on multiple challenging datasets. We further show that our learned UDF field enhances neural surface reconstruction by capturing more details." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 504, + 128, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 504, + 128, + 516 + ], + "spans": [ + { + "bbox": [ + 47, + 504, + 128, + 516 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 525, + 282, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 525, + 282, + 548 + ], + "spans": [ + { + "bbox": [ + 52, + 525, + 282, + 548 + ], + "type": "text", + "content": "The straight line belongs to men, the curved one to God. — Antonio Gaudi" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 552, + 287, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 552, + 287, + 647 + ], + "spans": [ + { + "bbox": [ + 46, + 552, + 287, + 647 + ], + "type": "text", + "content": "This sentiment is evident in the visual composition of our environments. While straight lines are common in manmade scenes such as walls, windows, and doors [25], curves are more general and ubiquitous from cups, bridges, architectures, to Gothic arts. Edges, which are composed of both lines and curves, are the fundamental elements of visual perception. Therefore, accurate edge modeling is crucial for understanding the geometry and structure of our 3D world." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 647, + 287, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 647, + 287, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 647, + 287, + 696 + ], + "type": "text", + "content": "Conventional approaches on 3D reconstruction typically involve inferring dense geometry and abstracting meshes from 2D images [15, 34, 48, 50, 67, 69]. However, the presence of 3D edges offers substantial advantages. First," + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 240, + 425, + 306 + ], + "blocks": [ + { + "bbox": [ + 309, + 240, + 425, + 306 + ], + "lines": [ + { + "bbox": [ + 309, + 240, + 425, + 306 + ], + "spans": [ + { + "bbox": [ + 309, + 240, + 425, + 306 + ], + "type": "image", + "image_path": "f32a4e2e65c6a8de962c388fc298595f11a5a3a318eb562c0f2cf1aa633d4c75.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 333, + 307, + 401, + 316 + ], + "lines": [ + { + "bbox": [ + 333, + 307, + 401, + 316 + ], + "spans": [ + { + "bbox": [ + 333, + 307, + 401, + 316 + ], + "type": "text", + "content": "(a) An Indoor Scene" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 426, + 239, + 542, + 305 + ], + "blocks": [ + { + "bbox": [ + 426, + 239, + 542, + 305 + ], + "lines": [ + { + "bbox": [ + 426, + 239, + 542, + 305 + ], + "spans": [ + { + "bbox": [ + 426, + 239, + 542, + 305 + ], + "type": "image", + "image_path": "e6316e0a68dea4fa60357f5b54aca0a5710246b7a1b48dfbe7cb8ea09541af8b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 459, + 307, + 512, + 316 + ], + "lines": [ + { + "bbox": [ + 459, + 307, + 512, + 316 + ], + "spans": [ + { + "bbox": [ + 459, + 307, + 512, + 316 + ], + "type": "text", + "content": "(b) LIMAP [25]" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 307, + 317, + 424, + 381 + ], + "blocks": [ + { + "bbox": [ + 307, + 317, + 424, + 381 + ], + "lines": [ + { + "bbox": [ + 307, + 317, + 424, + 381 + ], + "spans": [ + { + "bbox": [ + 307, + 317, + 424, + 381 + ], + "type": "image", + "image_path": "655a4d113703ef7e8eaee1f49b5e069da98404b01c4a16f403ecc227efa8947a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 343, + 383, + 392, + 393 + ], + "lines": [ + { + "bbox": [ + 343, + 383, + 392, + 393 + ], + "spans": [ + { + "bbox": [ + 343, + 383, + 392, + 393 + ], + "type": "text", + "content": "(c) NEAT [64]" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 427, + 317, + 544, + 382 + ], + "blocks": [ + { + "bbox": [ + 427, + 317, + 544, + 382 + ], + "lines": [ + { + "bbox": [ + 427, + 317, + 544, + 382 + ], + "spans": [ + { + "bbox": [ + 427, + 317, + 544, + 382 + ], + "type": "image", + "image_path": "e4f4f85cea6280fcd180b2e5fa06d3f1425e04e7805acd5d8f57819546e1c071.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 456, + 384, + 515, + 392 + ], + "lines": [ + { + "bbox": [ + 456, + 384, + 515, + 392 + ], + "spans": [ + { + "bbox": [ + 456, + 384, + 515, + 392 + ], + "type": "text", + "content": "(d) EMAP (Ours)" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 403, + 545, + 448 + ], + "lines": [ + { + "bbox": [ + 305, + 403, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 305, + 403, + 545, + 448 + ], + "type": "text", + "content": "Figure 1. Example 3D edge reconstruction on Replica [53]. While prior methods such as LIMAP [25] and NEAT [64] only reconstruct distinctive line segments, our method generates a more complete 3D edge map combining both line and curve features." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 459, + 546, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 459, + 546, + 603 + ], + "spans": [ + { + "bbox": [ + 304, + 459, + 546, + 603 + ], + "type": "text", + "content": "edges are naturally compact representations that capture the salient features oftentimes around geometric boundaries, which are good indicators for more lightweight and adaptive meshing and 3D modeling with comparably less redundancy. Secondly, in contrast to dense surface modeling from images, 3D edges are unaffected to illumination changes, thus exhibiting better reliability on multi-view reconstruction. Last but not least, 3D edges serve as a universal representation in real-world scenarios, and can be potentially integrated into many applications such as lane mapping [10, 22, 42, 43, 56], motion forecasting [12, 13, 51], medical imaging [45], etc." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": "The reconstruction of 3D edges is conventionally approached by matching their 2D observations across views. While Bignoli et al. [5] proposed edge point matching using the sparse map from Structure-from-Motion (SfM), it is inherently ill-posed due to its heavy reliance on cross-view edge correspondences, which are generally sparse and prone to ambiguity. Recent works have also improved the quality of 3D line reconstruction [18, 25, 61, 64], but primarily excel in specific scenes where straight lines dominate." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 62, + 702, + 141, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 702, + 141, + 713 + ], + "spans": [ + { + "bbox": [ + 62, + 702, + 141, + 713 + ], + "type": "text", + "content": "† Corresponding author" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "21219" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "nate. While general real-world environments with curved structures pose more challenges, recent progress on 2D detection and matching is mostly limited to point and line features and thus inapplicable to such scenarios." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 121, + 288, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 121, + 288, + 276 + ], + "spans": [ + { + "bbox": [ + 46, + 121, + 288, + 276 + ], + "type": "text", + "content": "A recent work NEF [70] made a significant step forward in learning 3D curves from multi-view 2D edge observations. Inspired by the recent success of neural radiance field (NeRF) [32], they introduce a neural edge density field and show decent results in reconstructing edges for simple objects. Nevertheless, their proposed edge density field has an inherent bias in edge rendering, leading to less accurate reconstruction. Moreover, its fitting-based edge parameterization process not only requires tedious tuning to specific data, but also struggles with its scalability to larger and more complex scenes. This motivates us to develop a more robust system for 3D edge mapping from 2D observations, which would benefit a wide range of downstream tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 277, + 287, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 277, + 287, + 504 + ], + "spans": [ + { + "bbox": [ + 46, + 277, + 287, + 504 + ], + "type": "text", + "content": "Towards this goal, we introduce EMAP, a novel approach for accurate 3D edge reconstruction from only 2D edge maps. EMAP comprises the following steps. Firstly, we learn the neural unsigned distance function (UDF) to implicitly model 3D edges, utilizing an unbiased rendering equation to mitigate the inaccuracies observed in NEF. Secondly, once learned, we can obtain the unsigned distance and normal for each point in the space, so a set of precise edge points with directions can be extracted. Finally, based on the guidance of every edge point's location and direction, we design a simple yet robust algorithm for parametric line and curve extraction, that can be applied across various challenging scenarios. Our comprehensive evaluations of EMAP, from synthetic CAD models to real-world indoor and outdoor scenes, show its superior performance in 3D edge reconstruction. In addition, we also observe that initializing the optimization process of the recent neural implicit surface reconstruction method with our trained UDF field enables the reconstructing of better details." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 506, + 276, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 506, + 276, + 518 + ], + "spans": [ + { + "bbox": [ + 59, + 506, + 276, + 518 + ], + "type": "text", + "content": "Overall, the contributions of this paper are as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 519, + 286, + 602 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 47, + 519, + 286, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 519, + 286, + 555 + ], + "spans": [ + { + "bbox": [ + 47, + 519, + 286, + 555 + ], + "type": "text", + "content": "- We propose EMAP, a 3D neural edge reconstruction pipeline that can learn accurate 3D edge locations and directions implicitly from multi-view edge maps." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 555, + 286, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 555, + 286, + 578 + ], + "spans": [ + { + "bbox": [ + 47, + 555, + 286, + 578 + ], + "type": "text", + "content": "- We develop a 3D edge extraction algorithm to robustly connect edge points with edge direction guidance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 579, + 286, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 579, + 286, + 602 + ], + "spans": [ + { + "bbox": [ + 47, + 579, + 286, + 602 + ], + "type": "text", + "content": "- We show that our model can generate complete 3D edge maps and help optimize dense surfaces." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 617, + 133, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 617, + 133, + 629 + ], + "spans": [ + { + "bbox": [ + 47, + 617, + 133, + 629 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "type": "text", + "content": "Geometry-based 3D Line Reconstruction. As a pioneering work, Bartoli and Sturm [4] introduces a full SfM system using line segments, which is later improved under Manhattan assumption [47] and in stereo systems [8]. Recently, with the developments of line detections [36, 37, 65] and matching [2, 36, 38] thanks to the advent to deep learn" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 545, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 216 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 216 + ], + "type": "text", + "content": "ing, several works have attempted to revisit the line mapping problem through graph clustering [19], leveraging planar information [61] and incorporating into SLAM systems [17, 23, 29, 52, 77]. In particular, recent work LIMAP [25] introduces a robust 3D line mapping system with structural priors which can adapt to different existing line detectors and matchers. Despite these advances, all the works are limited to straight lines and often produce segmented small lines when it comes to curves. In contrast, edges are generally easier to detect and are redundantly present in most scenes. In this project, rather than relying on lines, we build our 3D mapping system using robust 2D edge maps." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 232, + 545, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 232, + 545, + 518 + ], + "spans": [ + { + "bbox": [ + 304, + 232, + 545, + 518 + ], + "type": "text", + "content": "Learning-based 3D Line/Curve Reconstruction. In contrast to geometry-based methods, some approaches [26, 59, 74] shifted their focus to directly extract parametric curves from given edge point clouds. Typically, they require keypoint detection, clustering, and linkage. Even under the relaxed setting, it is still challenging to generate clean parametric curves due to the complex connectivity of curves and imperfect point clouds [70]. To address this limitation, NEF [70] integrates NeRF [32] for edge mapping from multi-view images, extracting 3D curves from the learned neural edge field through a carefully designed postprocessing. While NEF achieves decent performance on CAD models, it is constrained to simple and low-precision object-level edge mapping. A concurrent work, NEAT [64], utilizes VolSDF [69] to build dense surfaces and incorporates a global junction perceiving module to optimize 3D line junctions with 2D wireframe supervision. Although NEAT can produce 3D wireframes, it is restricted to modeling line segments only. Additionally, their need for textured objects is a limitation. By contrast, we use the unisgned distance function (UDF) to represent edges, enabling the construction of both line segments and curves without the necessity for target textures. We further show that our method can faithfully reconstruct edges for complex scenes." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 533, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 545, + 713 + ], + "type": "text", + "content": "Neural Implicit Representations. Neural implicit representations have emerged as a powerful tool for a spectrum of computer vision tasks, including object geometry representation [9, 24, 30, 34, 35, 40, 46, 57, 62, 66, 68], scene reconstruction [6, 20, 39, 71, 72, 75, 76], novel view synthesis [28, 32, 44, 73] and generative modelling [7, 33, 49]. Recent works [27, 58, 60, 69, 72] show impressive high-fidelity reconstruction by learning the implicit signed distance function (SDF). However, the SDF representation constrains to modeling closed, watertight surfaces. In contrast, NeuralUDF [3] exploits UDF to represent surfaces, offering a higher degree of freedom to represent both closed and open surfaces. We find UDF as a suitable representation to model edges implicitly, in comparison to SDF used in NEAT [64] and edge volume density from NEF [70]." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21220" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 70, + 286, + 193 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 286, + 193 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 286, + 193 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 286, + 193 + ], + "type": "image", + "image_path": "d23a375aefeb8896ed56301abffb5f0a0787d0ecf38319c3daa5894552c8c5dc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 194, + 286, + 259 + ], + "lines": [ + { + "bbox": [ + 46, + 194, + 286, + 259 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 286, + 259 + ], + "type": "text", + "content": "Figure 2. UDF learning overview. We utilize a vanilla NeRF [32] MLP that outputs absolute values to model the 3D UDF field. Edge maps are rendered using a density-based edge neural rendering technique, combined with an unbiased UDF rendering approach to eliminate bias. Our primary supervision comes from 2D edge maps predicted by a pre-trained edge detector." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 267, + 102, + 280 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 267, + 102, + 280 + ], + "spans": [ + { + "bbox": [ + 47, + 267, + 102, + 280 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 287, + 287, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 287, + 287, + 347 + ], + "spans": [ + { + "bbox": [ + 46, + 287, + 287, + 347 + ], + "type": "text", + "content": "Our goal is to build a 3D edge map from multi-view posed 2D edge maps. To this end, we first introduce our edge representation and edge field learning in Sec. 3.1. Next, we present our 3D parametric edge extraction from the learned edge representations in Sec. 3.2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 354, + 282, + 366 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 282, + 366 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 282, + 366 + ], + "type": "text", + "content": "3.1. Edge Field with Unsigned Distance Functions" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 375, + 287, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 375, + 287, + 459 + ], + "spans": [ + { + "bbox": [ + 46, + 375, + 287, + 459 + ], + "type": "text", + "content": "Multi-view Edge Maps. Since edge maps are generally invariant to illumination changes and are more robustly detected across various scenes than lines, our method utilizes multiple posed 2D edge maps as inputs. We apply pretrained edge detectors to predict an edge map " + }, + { + "bbox": [ + 46, + 375, + 287, + 459 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 375, + 287, + 459 + ], + "type": "text", + "content": " for each input RGB image. Each pixel of " + }, + { + "bbox": [ + 46, + 375, + 287, + 459 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 375, + 287, + 459 + ], + "type": "text", + "content": " has a value within [0, 1], indicating its probability of being an edge." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 461, + 287, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 461, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 46, + 461, + 287, + 510 + ], + "type": "text", + "content": "Density-based Edge Neural Rendering. We use an unsigned distance function (UDF) to represent edges, denoted as " + }, + { + "bbox": [ + 46, + 461, + 287, + 510 + ], + "type": "inline_equation", + "content": "f_{u}" + }, + { + "bbox": [ + 46, + 461, + 287, + 510 + ], + "type": "text", + "content": ". This function computes the unsigned distance from a given 3D point to the nearest edge. The UDF is defined as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 93, + 515, + 287, + 529 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 515, + 287, + 529 + ], + "spans": [ + { + "bbox": [ + 93, + 515, + 287, + 529 + ], + "type": "interline_equation", + "content": "f _ {u}: \\mathbb {R} ^ {3} \\rightarrow \\mathbb {R} \\quad \\mathrm {x} \\mapsto u = \\operatorname {U D F} (\\mathrm {x}), \\tag {1}", + "image_path": "cffda3cfba4ae3e3d080f4ebedb4dd2075c25132e2cd05414e22c4a7d0c4950c.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 536, + 287, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 536, + 287, + 548 + ], + "spans": [ + { + "bbox": [ + 47, + 536, + 287, + 548 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 536, + 287, + 548 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 47, + 536, + 287, + 548 + ], + "type": "text", + "content": " is a 3D point and " + }, + { + "bbox": [ + 47, + 536, + 287, + 548 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 47, + 536, + 287, + 548 + ], + "type": "text", + "content": " is the corresponding UDF value." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "spans": [ + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "text", + "content": "To render an edge pixel in a certain view, we trace a camera ray " + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "inline_equation", + "content": "\\mathbf{r}(t) = \\mathbf{o} + t\\mathbf{d}" + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "text", + "content": ". This ray originates from the camera's center " + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "inline_equation", + "content": "\\mathbf{o}" + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "text", + "content": " and extends in direction " + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "inline_equation", + "content": "\\mathbf{d}" + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "text", + "content": " [32]. To apply volume rendering for edge modeling, it is necessary to establish a mapping " + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "inline_equation", + "content": "\\Omega_{u}" + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "text", + "content": " [27, 58] that transforms the distance function " + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "inline_equation", + "content": "f_{u}(\\mathbf{r}(t))" + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "text", + "content": " into volume density " + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "inline_equation", + "content": "\\sigma_{u}(t)" + }, + { + "bbox": [ + 46, + 548, + 287, + 620 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 118, + 626, + 287, + 639 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 626, + 287, + 639 + ], + "spans": [ + { + "bbox": [ + 118, + 626, + 287, + 639 + ], + "type": "interline_equation", + "content": "\\sigma_ {u} (t) = \\Omega_ {u} \\left(f _ {u} (\\mathrm {r} (t))\\right). \\tag {2}", + "image_path": "f3ccc85cb8daa94c1ba365dc416f729ae57cd18c752613683a361addbee9d20e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 646, + 287, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 646, + 287, + 670 + ], + "spans": [ + { + "bbox": [ + 47, + 646, + 287, + 670 + ], + "type": "text", + "content": "In the rendering equation, the transmittance " + }, + { + "bbox": [ + 47, + 646, + 287, + 670 + ], + "type": "inline_equation", + "content": "T(t)" + }, + { + "bbox": [ + 47, + 646, + 287, + 670 + ], + "type": "text", + "content": " and weight " + }, + { + "bbox": [ + 47, + 646, + 287, + 670 + ], + "type": "inline_equation", + "content": "\\omega (t)" + }, + { + "bbox": [ + 47, + 646, + 287, + 670 + ], + "type": "text", + "content": " along the camera ray " + }, + { + "bbox": [ + 47, + 646, + 287, + 670 + ], + "type": "inline_equation", + "content": "\\mathbf{r}" + }, + { + "bbox": [ + 47, + 646, + 287, + 670 + ], + "type": "text", + "content": " are accumulated as" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 676, + 287, + 713 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 676, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 53, + 676, + 287, + 713 + ], + "type": "interline_equation", + "content": "T (t) = \\exp \\left(- \\int_ {0} ^ {t} \\sigma_ {u} (v) d v\\right), \\quad w (t) = T (t) \\cdot \\sigma_ {u} (t). \\tag {3}", + "image_path": "686dd87f27335f59836eb25b7de8cafc581f6931044635f51132f9b2999024ad.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "content": "To effectively handle appearance changes under different viewing angles, most neural field-based surface reconstruction [34, 58, 67, 71] disentangles geometry and appearance. In contrast, edge maps are generally unaffected by lighting, making them view-independent. Therefore, this simplifies the rendering process for edge maps. as it only requires the accumulation of view-independent, density-based weights " + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "content": " along a ray " + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "inline_equation", + "content": "\\mathbf{r}" + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "content": " . Now, the rendered edge value " + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "inline_equation", + "content": "\\hat{E}" + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "content": " along ray " + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "inline_equation", + "content": "\\mathbf{r}" + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "content": " is formulated as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 346, + 186, + 545, + 213 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 186, + 545, + 213 + ], + "spans": [ + { + "bbox": [ + 346, + 186, + 545, + 213 + ], + "type": "interline_equation", + "content": "\\hat {E} (\\mathbf {r}) = \\int_ {0} ^ {+ \\infty} w (t) d t = 1 - T (+ \\infty), \\tag {4}", + "image_path": "3b7000dd844dac6aec0f90b8e40ff4921ebe8bbd46987ce49034a77a7f312118.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 220, + 545, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 220, + 545, + 280 + ], + "spans": [ + { + "bbox": [ + 304, + 220, + 545, + 280 + ], + "type": "text", + "content": "Eq. (4) establishes the connection between rendered edge values and the transmittance at the end of the camera rays. Intuitively, this means that the rendered edge value is 1 when the camera ray hits an edge in 3D space, and 0 otherwise. Please refer to the supplements for more details." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 282, + 545, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 282, + 545, + 390 + ], + "spans": [ + { + "bbox": [ + 304, + 282, + 545, + 390 + ], + "type": "text", + "content": "Unbiased Density Functions for UDF Rendering. NEF [70] also uses volume rendering for rendering edges. Unlike ours, they utilize edge density to represent edges and an additional network to predict edge values. However, this approach introduces an inherent bias in edge rendering. Similar to the naive solution presented in NeuS [58], the issue comes from the weight function " + }, + { + "bbox": [ + 304, + 282, + 545, + 390 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 282, + 545, + 390 + ], + "type": "text", + "content": " in Eq. (3), where its local maximum does not coincide with the actual intersection point of the camera ray and the edges." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 390, + 545, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 390, + 545, + 486 + ], + "spans": [ + { + "bbox": [ + 304, + 390, + 545, + 486 + ], + "type": "text", + "content": "To address this issue, we incorporate unbiased UDF rendering [27] into our density-based edge rendering framework. As proved in NeuS, density function " + }, + { + "bbox": [ + 304, + 390, + 545, + 486 + ], + "type": "inline_equation", + "content": "\\sigma_{u}" + }, + { + "bbox": [ + 304, + 390, + 545, + 486 + ], + "type": "text", + "content": " should increase monotonically to make the weight function unbiased. However, UDF values are not monotonous along a ray [27]. To adapt the unbiased density function " + }, + { + "bbox": [ + 304, + 390, + 545, + 486 + ], + "type": "inline_equation", + "content": "\\Omega_{s}" + }, + { + "bbox": [ + 304, + 390, + 545, + 486 + ], + "type": "text", + "content": ", which is originally induced in NeuS [58], for UDF use, the monotonically increased density function " + }, + { + "bbox": [ + 304, + 390, + 545, + 486 + ], + "type": "inline_equation", + "content": "\\sigma_{u}" + }, + { + "bbox": [ + 304, + 390, + 545, + 486 + ], + "type": "text", + "content": " [27] is formulated as" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 495, + 545, + 518 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 495, + 545, + 518 + ], + "spans": [ + { + "bbox": [ + 305, + 495, + 545, + 518 + ], + "type": "interline_equation", + "content": "\\sigma_ {u} (t) = \\Psi (t) \\cdot \\Omega_ {s} \\left(f _ {u} (\\mathbf {r} (t))\\right) + (1 - \\Psi (t)) \\cdot \\Omega_ {s} (- f _ {u} (\\mathbf {r} (t))), \\tag {5}", + "image_path": "147fe07fc27778a35f9c43be5ab2be97b75c7a9f9d84ff2db7cbb0626106131e.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 518, + 545, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 518, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 304, + 518, + 545, + 590 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 518, + 545, + 590 + ], + "type": "inline_equation", + "content": "\\Psi (t)" + }, + { + "bbox": [ + 304, + 518, + 545, + 590 + ], + "type": "text", + "content": " is a differentiable visibility function designed in [27] to capture the monotonicity change in UDF. " + }, + { + "bbox": [ + 304, + 518, + 545, + 590 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 304, + 518, + 545, + 590 + ], + "type": "text", + "content": " is 0 behind the intersection point between the camera ray and the hit edge, and is 1 before the intersection point. Besides, " + }, + { + "bbox": [ + 304, + 518, + 545, + 590 + ], + "type": "inline_equation", + "content": "\\Psi (t)" + }, + { + "bbox": [ + 304, + 518, + 545, + 590 + ], + "type": "text", + "content": " is differentiable around the intersection point to make the UDF optimization more stable." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": "Ray Sampling Strategy. A key characteristic of 2D edge maps is their significant sparsity, with edges occupying a much smaller area compared to non-edge regions. To enhance training efficiency and stability, we apply an importance sampling strategy for camera rays, with " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " of rays uniformly sampled from edge areas in the edge maps and the remaining " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " from non-edge areas. Such a sampling strategy ensures that our UDF field training is concentrated on edge areas, thereby substantially speeding up the training process. Additionally, our sampling strategy offers an" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 784, + 317, + 792 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 784, + 317, + 792 + ], + "spans": [ + { + "bbox": [ + 293, + 784, + 317, + 792 + ], + "type": "text", + "content": "21221" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 66, + 76, + 265, + 260 + ], + "blocks": [ + { + "bbox": [ + 66, + 76, + 265, + 260 + ], + "lines": [ + { + "bbox": [ + 66, + 76, + 265, + 260 + ], + "spans": [ + { + "bbox": [ + 66, + 76, + 265, + 260 + ], + "type": "image", + "image_path": "95652d6ceb191b2d8572ce1242428e6fb2c16a0773901aa5e9b8402546f886e7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 269, + 289, + 326 + ], + "lines": [ + { + "bbox": [ + 46, + 269, + 289, + 326 + ], + "spans": [ + { + "bbox": [ + 46, + 269, + 289, + 326 + ], + "type": "text", + "content": "Figure 3. Illustration of our 3D parametric edge extraction steps. For simplify, our schematic is depicted in the 2D plane. Our 3D edge extraction algorithm comprises five main stages: point initialization (a), point shifting (b to c), edge direction extraction (c to d), point connection (d to e), and edge fitting (e to f)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 334, + 287, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 334, + 287, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 334, + 287, + 418 + ], + "type": "text", + "content": "elegant solution to the issue of occlusion, a challenge noted in [70]. The rendered edge maps might contain edges not present in the input edge images due to occlusion. In contrast to the complicated occlusion handling strategy introduced in [70], our approach inherently alleviates this challenge by focusing the training on points from the visible edges presented in the input edge maps." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 421, + 282, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 421, + 282, + 432 + ], + "spans": [ + { + "bbox": [ + 47, + 421, + 282, + 432 + ], + "type": "text", + "content": "Loss Functions. The total loss function can be written as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 119, + 441, + 287, + 453 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 441, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 119, + 441, + 287, + 453 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {e d g e}} + \\lambda \\mathcal {L} _ {\\text {e i k}}, \\tag {6}", + "image_path": "9c42c18297a32a35abc2d51f2f88cd6f14ae019b980a725728b4b7557396a734.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 460, + 287, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 460, + 287, + 509 + ], + "spans": [ + { + "bbox": [ + 46, + 460, + 287, + 509 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 460, + 287, + 509 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{edge}}" + }, + { + "bbox": [ + 46, + 460, + 287, + 509 + ], + "type": "text", + "content": " represents the Mean Square Error (MSE) between the rendered and input edge images. " + }, + { + "bbox": [ + 46, + 460, + 287, + 509 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{eik}}" + }, + { + "bbox": [ + 46, + 460, + 287, + 509 + ], + "type": "text", + "content": " denotes the Eikonal loss [16], which promotes the learned UDF to be physical distance. " + }, + { + "bbox": [ + 46, + 460, + 287, + 509 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 460, + 287, + 509 + ], + "type": "text", + "content": " is used to balance these losses." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 515, + 218, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 515, + 218, + 528 + ], + "spans": [ + { + "bbox": [ + 47, + 515, + 218, + 528 + ], + "type": "text", + "content": "3.2. 3D Parametric Edge Extraction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 533, + 287, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 688 + ], + "type": "text", + "content": "With UDF learning, edge locations are implicitly encoded within the UDF field. However, accurately extracting edge points from the UDF field is non-trivial due to the absence of a real zero-level set in the UDF field. Additionally, formulating these edge points into parametric edges poses significant challenges due to the complex connections of edges. To extract points from the learned density field, NEF [70] selects points with edge density values greater than a specified threshold, " + }, + { + "bbox": [ + 46, + 533, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 46, + 533, + 287, + 688 + ], + "type": "text", + "content": ". This approach leads to an approximated edge point set that is " + }, + { + "bbox": [ + 46, + 533, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 46, + 533, + 287, + 688 + ], + "type": "text", + "content": "-bounded [27]. While this method effectively generates comprehensive point clouds, the " + }, + { + "bbox": [ + 46, + 533, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 46, + 533, + 287, + 688 + ], + "type": "text", + "content": "-bounded point set does not align accurately with the actual edge locations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 689, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 288, + 713 + ], + "type": "text", + "content": "To eliminate the error in edge point extraction, we leverage the physical property of UDF that reflects real-world" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 76, + 471, + 148 + ], + "blocks": [ + { + "bbox": [ + 310, + 76, + 471, + 148 + ], + "lines": [ + { + "bbox": [ + 310, + 76, + 471, + 148 + ], + "spans": [ + { + "bbox": [ + 310, + 76, + 471, + 148 + ], + "type": "image", + "image_path": "3427383d6547aba66aa415e6f00bae8ea5a6583ef804b3d91314bd0e7c9d01e2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "lines": [ + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "spans": [ + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "type": "text", + "content": "Figure 4. Illustration of the overview (a) and the cross-section (b) of UDF field. (a) In UDF field, edge points are ideally located at the zero-level set, with UDF values being larger away from these points. A query point " + }, + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "type": "text", + "content": " can be precisely shifted to a more accurate position " + }, + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "type": "inline_equation", + "content": "x_{t + 1}" + }, + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "type": "text", + "content": " by following the UDF value and the inverse normal vector " + }, + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "type": "inline_equation", + "content": "n(x)" + }, + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "type": "text", + "content": ". The edge direction " + }, + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "type": "inline_equation", + "content": "l(x)" + }, + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "type": "text", + "content": " aligns with the tangent direction at the edge point " + }, + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "type": "inline_equation", + "content": "x_{t + 1}" + }, + { + "bbox": [ + 304, + 156, + 547, + 255 + ], + "type": "text", + "content": ". (b) The inverse normal vectors of all surrounding points on the cross section are pointing towards the query point." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 474, + 78, + 542, + 145 + ], + "blocks": [ + { + "bbox": [ + 474, + 78, + 542, + 145 + ], + "lines": [ + { + "bbox": [ + 474, + 78, + 542, + 145 + ], + "spans": [ + { + "bbox": [ + 474, + 78, + 542, + 145 + ], + "type": "image", + "image_path": "177a7f484bfed0f95367c92e44a94f711554e8a8baf73f022876b74131079723.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 266, + 545, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 266, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 304, + 266, + 545, + 338 + ], + "type": "text", + "content": "distances to the edges. Specifically, we develop a 3D edge extraction algorithm composed of five main stages: point initialization, point shifting, edge direction extraction, point connection, and edge fitting, as illustrated in Fig. 3. This algorithm takes the trained UDF field as input and outputs parametric 3D edges, including line segments and curves." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 341, + 546, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 341, + 546, + 425 + ], + "spans": [ + { + "bbox": [ + 304, + 341, + 546, + 425 + ], + "type": "text", + "content": "Point Initialization. Under eikonal loss supervision, the optimized UDF values represent physical distances to the nearest edges. To initialize potential edge points, we begin with the center points of all voxel grids and obtain their UDF values from the UDF field. Subsequently, we eliminate query points whose UDF values exceed a specified threshold " + }, + { + "bbox": [ + 304, + 341, + 546, + 425 + ], + "type": "inline_equation", + "content": "\\epsilon^{\\prime}" + }, + { + "bbox": [ + 304, + 341, + 546, + 425 + ], + "type": "text", + "content": " (red points in Fig. 3 (a))." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 428, + 547, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 428, + 547, + 487 + ], + "spans": [ + { + "bbox": [ + 304, + 428, + 547, + 487 + ], + "type": "text", + "content": "Point Shifting. As illustrated in Fig. 4 (a), the normalized inverse gradient of the UDF field indicates the inverse normal vector pointing towards edges. Drawing inspiration from OccNet [31], we refine the point " + }, + { + "bbox": [ + 304, + 428, + 547, + 487 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 428, + 547, + 487 + ], + "type": "text", + "content": " iteratively towards the edge using its distance and inverse normal direction:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 352, + 496, + 545, + 522 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 352, + 496, + 545, + 522 + ], + "spans": [ + { + "bbox": [ + 352, + 496, + 545, + 522 + ], + "type": "interline_equation", + "content": "x _ {t + 1} \\Leftarrow x _ {t} - f _ {u} (x _ {t}) \\cdot \\frac {\\nabla f _ {u} (x _ {t})}{\\| \\nabla f _ {u} (x _ {t}) \\|}, \\tag {7}", + "image_path": "3bb305ae160243b3a0d7ecea09a6c5b80674282b5bc2fb21bdbf077203eca2cd.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 531, + 545, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 531, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 531, + 545, + 567 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 531, + 545, + 567 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 531, + 545, + 567 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 304, + 531, + 545, + 567 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 531, + 545, + 567 + ], + "type": "text", + "content": "-th iteration. As a result of this iterative process, the initial points converge to the edge center (from Fig. 3 (b) to Fig. 3 (c))." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 569, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 569, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 569, + 546, + 713 + ], + "type": "text", + "content": "Edge Direction. Establishing connections between edge points is a crucial step in constructing parametric edges. While most methods [11, 42, 70] estimate parameters through least-squares fitting of lines/curves on extracted points, this fitting-based approach for edge extraction is not always robust or accurate. In contrast, inspired by [37, 63], we find that combining the edge direction field with the edge distance field can robustly produce edge parameters. Given that inverse normal vectors invariably point towards edges (see Fig. 4(b)), we first devise an edge direction extraction method based on this set of inverse normal vectors. Specifically, for a query point " + }, + { + "bbox": [ + 304, + 569, + 546, + 713 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 569, + 546, + 713 + ], + "type": "text", + "content": ", we introduce minor shifts" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 783, + 317, + 792 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 783, + 317, + 792 + ], + "spans": [ + { + "bbox": [ + 293, + 783, + 317, + 792 + ], + "type": "text", + "content": "21222" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": "set " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\{\\delta\\}_{N}" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": " with size of " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": " to generate an adjoining point set " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\{x'\\}_{N}" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\{x'\\}_{N} = x + \\{\\delta\\}_{N}" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": ". The inverse normal vectors of these points, denoted as " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\{n\\}_{N}" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": ", are obtained from the learned UDF field. The edge direction, denoted as " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": ", is identified as the null space of " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\{n_i'\\}" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": ", since the edge direction is perpendicular to all inverse normal vectors in " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\{n\\}_{N}" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": ". Therefore, " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": " can be extracted with singular value decomposition (SVD):" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 89, + 173, + 287, + 188 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 173, + 287, + 188 + ], + "spans": [ + { + "bbox": [ + 89, + 173, + 287, + 188 + ], + "type": "interline_equation", + "content": "A = U \\Sigma V ^ {T}, \\quad l = V [:, \\operatorname {a r g m i n} (\\Sigma) ], \\tag {8}", + "image_path": "d14fbc39922d09028acdab2aafed38c55d24ca2a06d0e77f2b9d958742bbb6f7.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 193, + 287, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 193, + 287, + 266 + ], + "spans": [ + { + "bbox": [ + 46, + 193, + 287, + 266 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 193, + 287, + 266 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 193, + 287, + 266 + ], + "type": "text", + "content": " is the matrix representation of " + }, + { + "bbox": [ + 46, + 193, + 287, + 266 + ], + "type": "inline_equation", + "content": "\\{n\\}_{N}" + }, + { + "bbox": [ + 46, + 193, + 287, + 266 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 193, + 287, + 266 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 46, + 193, + 287, + 266 + ], + "type": "text", + "content": " corresponds to the eigenvector associated with the smallest eigenvalue. Note that " + }, + { + "bbox": [ + 46, + 193, + 287, + 266 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 193, + 287, + 266 + ], + "type": "text", + "content": " should be sufficiently large to ensure the stability of the extracted edge direction. Unlike DeepLSD [37], we can obtain a precise edge direction field without relying on any 2D direction supervision." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 268, + 289, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 268, + 289, + 448 + ], + "spans": [ + { + "bbox": [ + 46, + 268, + 289, + 448 + ], + "type": "text", + "content": "Point Connection. After accurately determining the edge point locations and directions, we proceed to connect these edge points guided by the edge direction to create polylines (Fig. 3 (d) to (e)). Specifically, we begin by selecting candidate points and then compute directional errors for points adjacent to these candidates. Based on these directional errors, candidate points are connected to its best-matched neighboring point that growing direction aligns best with its extracted edge direction, i.e., with minimal directional error. This process is repeated, extending the edge polylines progressively until no further growth is possible. To ensure efficiency and accuracy, a non-maximum suppression step is employed to remove any redundant points that may exist between the current candidate and the best-matched point. Please refer to the supplements for more algorithm details." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 450, + 289, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 450, + 289, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 450, + 289, + 605 + ], + "type": "text", + "content": "Edge Fitting. To further parameterize edges, we categorize the polylines into line segments and Bézier curves (Fig. 3 (f)). Initially, we utilize RANSAC [14] to fit lines from the polylines, and select the line segment that encompasses the highest number of inlier points. Following [25], we apply Principal Component Analysis (PCA) to the inlier points, re-estimate the line segment utilizing the principal eigenvector and the mean 3D point, and project all inlier points onto the principal eigenvector to derive the 3D endpoints. This fitting process is repeated for each polyline until the number of inlier points falls below a minimum threshold. For the remaining sub-polylines, we fit each of them with a Bézier curve that is defined by four control points." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 605, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 289, + 715 + ], + "type": "text", + "content": "To minimize edge redundancy, we further merge line segments and Bézier curves based on two criteria: the shortest distance between candidate edges and the similarity of curvature at their closest points. For line segments, the shortest distance is the minimal point-to-line segment distance, and curvature similarity is their direction's cosine similarity. For Bézier curves, they are the minimal point-to-point distance and the cosine similarity of the tangent vectors at the nearest points, respectively. Candidate edges" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "content": "are merged only if they meet both criteria. This dual-criterion approach ensures that merging happens only when two edges are both similar and close to each other." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "text", + "content": "To connect edges, all endpoints of line segments and Bézier curves located within a specified distance threshold are merged into shared endpoints. Furthermore, we implement an optimization step [5, 64] to refine the 3D parametric edges by leveraging 2D edge maps, thereby enhancing edge precision. Specifically, we project 3D parametric edges into edge map frames using camera projection matrices and filter out 3D edges that are not visible in over " + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "text", + "content": " of views." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 217, + 388, + 231 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 217, + 388, + 231 + ], + "spans": [ + { + "bbox": [ + 305, + 217, + 388, + 231 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 238, + 420, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 238, + 420, + 251 + ], + "spans": [ + { + "bbox": [ + 306, + 238, + 420, + 251 + ], + "type": "text", + "content": "4.1. Experiment Setting" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 259, + 547, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 259, + 547, + 463 + ], + "spans": [ + { + "bbox": [ + 304, + 259, + 547, + 463 + ], + "type": "text", + "content": "Datasets. We consider four diverse datasets: CAD models (ABC-NEF [70]), real-world objects (DTU [1]), high-quality indoor scenes (Replica [53]), and real-world outdoor scenes (Tanks & Temples [21]). ABC-NEF dataset comprises 115 CAD models, each accompanied by 50 observed images and ground truth parametric edges. We select 82 CAD models, excluding those containing inconsistent edge observations (e.g., cylinders or balls). DTU dataset provides dense ground-truth point clouds and we select 6 objects that meet the multi-view constraints among scans processed by [72]. Following [5], we derive edge points by projecting ground-truth dense points onto images and then comparing them with the observations on 2D edge maps to filter out non-edge points. Replica and Tanks & Temples datasets contain larger scenes. Due to the lack of ground-truth edges, we conduct qualitative comparisons among baselines." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 466, + 547, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 466, + 547, + 515 + ], + "spans": [ + { + "bbox": [ + 304, + 466, + 547, + 515 + ], + "type": "text", + "content": "Baselines. We compare with three state-of-the-art baselines for 3D line/curve mapping, including two learning-based methods, NEF [70] and NEAT [64], and one geometry-based method, LIMAP [25]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "spans": [ + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "type": "text", + "content": "Metrics. Our evaluation involves first sampling points in proportion to the edge's length and subsequently downsampling these points using a voxel grid with a resolution of " + }, + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "type": "inline_equation", + "content": "256^3" + }, + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "type": "text", + "content": ". Following the metrics used in [25, 70], we consider Accuracy (Acc), Completeness (Comp) in millimeters, and Recall " + }, + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "type": "inline_equation", + "content": "(R_{\\tau})" + }, + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "type": "text", + "content": ", Precision " + }, + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "type": "inline_equation", + "content": "(P_{\\tau})" + }, + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "type": "text", + "content": ", F-score " + }, + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "type": "inline_equation", + "content": "(F_{\\tau})" + }, + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "type": "text", + "content": " in percentage with a threshold " + }, + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 518, + 547, + 626 + ], + "type": "text", + "content": " in millimeters. Moreover, we report Edge Direction Consistency (Norm) in percentage to analyze the precision of edge direction extraction." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "type": "text", + "content": "Implementation Details. For " + }, + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "type": "inline_equation", + "content": "f_{u}" + }, + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "type": "text", + "content": ", we utilize 8-layer Multi-layer Perceptrons (MLPs). Each layer in the MLP contains 512 neurons for larger scenes, such as Tanks & Temples, and 256 neurons for other datasets. We sample 1024 rays per batch, among these rays, 512 rays are sampled from edge areas. We train our model for " + }, + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "type": "inline_equation", + "content": "50k" + }, + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "type": "text", + "content": " iterations on ABC-NEF dataset, and " + }, + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "type": "inline_equation", + "content": "200k" + }, + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "type": "text", + "content": " iterations on other datasets. We train" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "21223" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 94, + 68, + 503, + 238 + ], + "blocks": [ + { + "bbox": [ + 94, + 68, + 503, + 238 + ], + "lines": [ + { + "bbox": [ + 94, + 68, + 503, + 238 + ], + "spans": [ + { + "bbox": [ + 94, + 68, + 503, + 238 + ], + "type": "image", + "image_path": "d8a8f207e25d105eeb2ddbb82ba0e3086cd1272a644335bb120164a0f6fa9412.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 244, + 547, + 269 + ], + "lines": [ + { + "bbox": [ + 46, + 244, + 547, + 269 + ], + "spans": [ + { + "bbox": [ + 46, + 244, + 547, + 269 + ], + "type": "text", + "content": "Figure 5. Qualitative comparisons on ABC-NEF [70]. Lines are shown in black and curves in blue. Thanks to our precise edge extraction capabilities for both lines and curves, we achieve complete and accurate modeling of these elements." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 74, + 277, + 517, + 357 + ], + "blocks": [ + { + "bbox": [ + 74, + 277, + 517, + 357 + ], + "lines": [ + { + "bbox": [ + 74, + 277, + 517, + 357 + ], + "spans": [ + { + "bbox": [ + 74, + 277, + 517, + 357 + ], + "type": "table", + "html": "
MethodDetectorModalAcc↓Comp↓Norm↑R5↑R10↑R20↑P5↑P10↑P20↑F5↑F10↑F20↑
LIMAP [25]LSDLine9.918.794.436.282.387.943.087.693.939.084.390.4
SOLD2Line5.929.690.164.276.679.688.196.497.972.984.086.7
NEF [70]PiDiNet†Curve11.916.990.911.462.091.315.768.596.313.064.693.3
PiDiNetCurve15.116.589.711.753.389.813.652.289.112.351.888.7
DexiNedCurve21.915.785.911.348.387.711.539.871.710.842.176.8
OursPiDiNetEdge9.215.693.730.275.789.835.679.195.432.477.092.2
DexiNedEdge8.88.995.456.488.994.862.989.995.759.188.994.9
", + "image_path": "63e344e229814eeb377715a57ff3761b96aff690fbd6f67e85da9b17f95aaaa5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 395, + 289, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 395, + 289, + 551 + ], + "spans": [ + { + "bbox": [ + 46, + 395, + 289, + 551 + ], + "type": "text", + "content": "our network with the Adam optimizer with a learning rate of " + }, + { + "bbox": [ + 46, + 395, + 289, + 551 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 46, + 395, + 289, + 551 + ], + "type": "text", + "content": ", while the UDF model " + }, + { + "bbox": [ + 46, + 395, + 289, + 551 + ], + "type": "inline_equation", + "content": "f_{u}" + }, + { + "bbox": [ + 46, + 395, + 289, + 551 + ], + "type": "text", + "content": " is trained with a learning rate of " + }, + { + "bbox": [ + 46, + 395, + 289, + 551 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 46, + 395, + 289, + 551 + ], + "type": "text", + "content": " and initialized with sphere initialization [66]. For edge detection for NEF and ours, we consider PiDiNet [54] and DexiNed [41]. PiDiNet [54] is employed for indoor scenes, such as DTU and Replica, due to its superior performance in these settings. Conversely, DexiNed [41] is applied to outdoor scenes, as it is primarily trained on outdoor scenes. On the synthetic ABC-NEF dataset, we show results with both detectors. For LIMAP, we follow their paper and we use SOLD2 [36] for indoor scenes and LSD [55] for outdoor scenes. NEAT is trained with 2D wireframes from HAWPV3 [65]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 559, + 250, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 559, + 250, + 572 + ], + "spans": [ + { + "bbox": [ + 47, + 559, + 250, + 572 + ], + "type": "text", + "content": "4.2. Evaluation of 3D Edge Reconstruction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 582, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 289, + 713 + ], + "type": "text", + "content": "Evaluation on ABC-NEF Dataset. We show the quantitative and qualitative comparisons on Table 1 and Fig. 5. Note that NEAT fails on the ABC-NEF dataset because of its heavy dependence on texture input. NEF demonstrates decent performance at " + }, + { + "bbox": [ + 46, + 582, + 289, + 713 + ], + "type": "inline_equation", + "content": "\\tau = 20" + }, + { + "bbox": [ + 46, + 582, + 289, + 713 + ], + "type": "text", + "content": ". However, their performance drops significantly when " + }, + { + "bbox": [ + 46, + 582, + 289, + 713 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 582, + 289, + 713 + ], + "type": "text", + "content": " is set to 10 and 5. This is attributed to its bias in edge rendering and its fitting-based post-processing. LIMAP shows remarkable precision across various " + }, + { + "bbox": [ + 46, + 582, + 289, + 713 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 582, + 289, + 713 + ], + "type": "text", + "content": " thresholds. Such consistency stems from its non-linear refinement over multi-view 2D supports. Nonetheless, LIMAP's inability to reconstruct curves leads" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 312, + 392, + 538, + 466 + ], + "blocks": [ + { + "bbox": [ + 46, + 361, + 546, + 384 + ], + "lines": [ + { + "bbox": [ + 46, + 361, + 546, + 384 + ], + "spans": [ + { + "bbox": [ + 46, + 361, + 546, + 384 + ], + "type": "text", + "content": "Table 1. Edge reconstruction results on ABC-NEF [70]. Results from NEF's released pretrained models are indicated by " + }, + { + "bbox": [ + 46, + 361, + 546, + 384 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 46, + 361, + 546, + 384 + ], + "type": "text", + "content": ". Our method surpasses all others in terms of completeness and achieves accuracy comparable to LIMAP [25]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 312, + 392, + 538, + 466 + ], + "lines": [ + { + "bbox": [ + 312, + 392, + 538, + 466 + ], + "spans": [ + { + "bbox": [ + 312, + 392, + 538, + 466 + ], + "type": "table", + "html": "
MethodDetectorCurveLine
Acc↓Comp↓Norm↑Acc↓Comp↓Norm↑
LIMAP [25]LSD272.650.184.834.611.395.9
SOLD2295.782.276.820.018.192.1
NEF [70]PiDiNet†265.027.177.940.413.792.6
PiDiNet263.123.977.643.914.091.4
DexiNed250.520.372.656.213.887.3
OursPiDiNet253.725.788.143.112.893.7
DexiNed241.010.988.746.77.795.4
", + "image_path": "bdf45c058aeb89c164a707154f98571fd140a91fd04a3fc0ef6ae056cef6ba57.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 469, + 546, + 513 + ], + "lines": [ + { + "bbox": [ + 304, + 469, + 546, + 513 + ], + "spans": [ + { + "bbox": [ + 304, + 469, + 546, + 513 + ], + "type": "text", + "content": "Table 2. Accuracy, completeness and normal consistency results with curves and lines on ABC-NEF [70]. Our method with DexiNed edge detector yields overall the strongest performance on curves among all baselines." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 520, + 545, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 520, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 520, + 545, + 604 + ], + "type": "text", + "content": "to lower scores in completeness and recall. Our method, when combined with either of the 2D edge detectors, consistently outperforms all baselines. Notably, as shown in Table 1, combined with the DexiNed detector, our method achieves superior results in completeness, edge direction consistency, recall, and F-Score. We also show competitive accuracy and precision when compared to LIMAP." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": "To further analyze the performance of different edge types, we classify the ground truth edges into curves (including BSplines, ellipses, and circles) and line segments, based on the GT annotations. We provide accuracy, completeness, and edge direction consistency in Table 2 to analyze the separate reconstruction abilities for curves and lines. Note that these results are computed based on all predictions specific to either curves or lines, as other methods do not differentiate between these two types of edges. We" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "21224" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 97, + 77, + 496, + 257 + ], + "blocks": [ + { + "bbox": [ + 97, + 77, + 496, + 257 + ], + "lines": [ + { + "bbox": [ + 97, + 77, + 496, + 257 + ], + "spans": [ + { + "bbox": [ + 97, + 77, + 496, + 257 + ], + "type": "image", + "image_path": "85d6f4966cb9656b73660aa5b0a3763e630fe331ae41e9c52593945514f4242b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 262, + 547, + 285 + ], + "lines": [ + { + "bbox": [ + 46, + 262, + 547, + 285 + ], + "spans": [ + { + "bbox": [ + 46, + 262, + 547, + 285 + ], + "type": "text", + "content": "Figure 6. Qualitative comparisons on the Replica [53] and Tanks & Temples [21] datasets. The first two scenes are from the Replica dataset, while the last scene is from the Tanks & Temples dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 59, + 293, + 129, + 361 + ], + "blocks": [ + { + "bbox": [ + 59, + 293, + 129, + 361 + ], + "lines": [ + { + "bbox": [ + 59, + 293, + 129, + 361 + ], + "spans": [ + { + "bbox": [ + 59, + 293, + 129, + 361 + ], + "type": "image", + "image_path": "c76cb10f06e8d4329f2ed8a8004c47e2ae22399821ff56514abfd97ef1c0a767.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 362, + 112, + 373 + ], + "lines": [ + { + "bbox": [ + 77, + 362, + 112, + 373 + ], + "spans": [ + { + "bbox": [ + 77, + 362, + 112, + 373 + ], + "type": "text", + "content": "2D Image" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 132, + 297, + 201, + 361 + ], + "blocks": [ + { + "bbox": [ + 132, + 297, + 201, + 361 + ], + "lines": [ + { + "bbox": [ + 132, + 297, + 201, + 361 + ], + "spans": [ + { + "bbox": [ + 132, + 297, + 201, + 361 + ], + "type": "image", + "image_path": "15bdf6ac242f4925c9447e88d99c55d022936580c065ebb02fdca63bcddb0bc0.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 143, + 362, + 187, + 373 + ], + "lines": [ + { + "bbox": [ + 143, + 362, + 187, + 373 + ], + "spans": [ + { + "bbox": [ + 143, + 362, + 187, + 373 + ], + "type": "text", + "content": "LIMAP [25]" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 202, + 297, + 272, + 361 + ], + "blocks": [ + { + "bbox": [ + 202, + 297, + 272, + 361 + ], + "lines": [ + { + "bbox": [ + 202, + 297, + 272, + 361 + ], + "spans": [ + { + "bbox": [ + 202, + 297, + 272, + 361 + ], + "type": "image", + "image_path": "00797d273957867f59baf7e8d3861bc4761fafe544042f9e09d83bec09342ccc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 219, + 362, + 253, + 372 + ], + "lines": [ + { + "bbox": [ + 219, + 362, + 253, + 372 + ], + "spans": [ + { + "bbox": [ + 219, + 362, + 253, + 372 + ], + "type": "text", + "content": "NEF [70]" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 62, + 375, + 129, + 438 + ], + "blocks": [ + { + "bbox": [ + 62, + 375, + 129, + 438 + ], + "lines": [ + { + "bbox": [ + 62, + 375, + 129, + 438 + ], + "spans": [ + { + "bbox": [ + 62, + 375, + 129, + 438 + ], + "type": "image", + "image_path": "0bc9a81e7d2d51ad0add2a71c212ba60938538b6deea540583347c6e71802a2b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 75, + 441, + 114, + 451 + ], + "lines": [ + { + "bbox": [ + 75, + 441, + 114, + 451 + ], + "spans": [ + { + "bbox": [ + 75, + 441, + 114, + 451 + ], + "type": "text", + "content": "NEAT [64]" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 134, + 374, + 198, + 435 + ], + "blocks": [ + { + "bbox": [ + 134, + 374, + 198, + 435 + ], + "lines": [ + { + "bbox": [ + 134, + 374, + 198, + 435 + ], + "spans": [ + { + "bbox": [ + 134, + 374, + 198, + 435 + ], + "type": "image", + "image_path": "f96fadba897230b20816ff77c329140fd909f3ddcda7181254b39ef86a5acc5e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 156, + 441, + 175, + 450 + ], + "lines": [ + { + "bbox": [ + 156, + 441, + 175, + 450 + ], + "spans": [ + { + "bbox": [ + 156, + 441, + 175, + 450 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 207, + 376, + 268, + 434 + ], + "blocks": [ + { + "bbox": [ + 207, + 376, + 268, + 434 + ], + "lines": [ + { + "bbox": [ + 207, + 376, + 268, + 434 + ], + "spans": [ + { + "bbox": [ + 207, + 376, + 268, + 434 + ], + "type": "image", + "image_path": "b00d3acd08c734a1304a9d9ed71594a644480cc8ea4f8fec61d825ec88663aa1.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 219, + 441, + 252, + 451 + ], + "lines": [ + { + "bbox": [ + 219, + 441, + 252, + 451 + ], + "spans": [ + { + "bbox": [ + 219, + 441, + 252, + 451 + ], + "type": "text", + "content": "GT Edge" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 456, + 287, + 489 + ], + "lines": [ + { + "bbox": [ + 46, + 456, + 287, + 489 + ], + "spans": [ + { + "bbox": [ + 46, + 456, + 287, + 489 + ], + "type": "text", + "content": "Figure 7. Qualitative comparisons on DTU [1]. Our results demonstrate complete edge structure, whereas other methods result in redundant line segments or imprecise curves." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 63, + 496, + 270, + 579 + ], + "blocks": [ + { + "bbox": [ + 63, + 496, + 270, + 579 + ], + "lines": [ + { + "bbox": [ + 63, + 496, + 270, + 579 + ], + "spans": [ + { + "bbox": [ + 63, + 496, + 270, + 579 + ], + "type": "table", + "html": "
ScanLIMAP [25]NEF [70]NEAT [64]Ours
R5↑P5↑R5↑P5↑R5↑P5↑R5↑P5↑
3775.874.339.551.063.985.162.783.9
8375.750.732.021.872.352.472.361.5
10579.164.930.332.068.973.378.578.0
11079.765.331.240.264.379.690.968.3
11859.462.015.325.259.071.175.378.1
12279.979.215.129.170.082.085.382.9
Mean74.966.127.233.266.473.977.575.4
", + "image_path": "b4ff2ddfc68870fa7a7878641ba2b0304745c0bfe198c243b342a0a7662e9675.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 73, + 583, + 260, + 594 + ], + "lines": [ + { + "bbox": [ + 73, + 583, + 260, + 594 + ], + "spans": [ + { + "bbox": [ + 73, + 583, + 260, + 594 + ], + "type": "text", + "content": "Table 3. Edge reconstruction results on DTU [1]." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 602, + 287, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 602, + 287, + 651 + ], + "spans": [ + { + "bbox": [ + 46, + 602, + 287, + 651 + ], + "type": "text", + "content": "can see that our method with DexiNed exhibits superior results in reconstructing curves. As for line segments, our performance is marginally lower than the best-performing method LIMAP which is specially optimized for lines." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": "Evaluation on DTU Dataset. Our assessment of the DTU dataset, as outlined in Table 3 and Fig. 7, shows our proficiency in real-world scenarios. Notably, our approach achieves the highest recall and precision among all baselines. The DTU dataset presents a challenging scenario for" + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 315, + 293, + 385, + 334 + ], + "blocks": [ + { + "bbox": [ + 315, + 293, + 385, + 334 + ], + "lines": [ + { + "bbox": [ + 315, + 293, + 385, + 334 + ], + "spans": [ + { + "bbox": [ + 315, + 293, + 385, + 334 + ], + "type": "image", + "image_path": "b354c149f9d99c4094d797a70c507d8be54383f1a5459910be440c1163369e90.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 335, + 385, + 345 + ], + "lines": [ + { + "bbox": [ + 315, + 335, + 385, + 345 + ], + "spans": [ + { + "bbox": [ + 315, + 335, + 385, + 345 + ], + "type": "text", + "content": "(a) w/o point shifting" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 391, + 294, + 462, + 334 + ], + "blocks": [ + { + "bbox": [ + 391, + 294, + 462, + 334 + ], + "lines": [ + { + "bbox": [ + 391, + 294, + 462, + 334 + ], + "spans": [ + { + "bbox": [ + 391, + 294, + 462, + 334 + ], + "type": "image", + "image_path": "9330ce339bce3241821a4886721d8b8bd4b53979aaa9cf8a3a161d2c4f46f4d6.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 394, + 335, + 459, + 345 + ], + "lines": [ + { + "bbox": [ + 394, + 335, + 459, + 345 + ], + "spans": [ + { + "bbox": [ + 394, + 335, + 459, + 345 + ], + "type": "text", + "content": "(b) w/ point shifting" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 350, + 545, + 416 + ], + "lines": [ + { + "bbox": [ + 305, + 350, + 545, + 416 + ], + "spans": [ + { + "bbox": [ + 305, + 350, + 545, + 416 + ], + "type": "text", + "content": "Figure 8. Visualization of point shifting and edge direction. Edge points are shown in point clouds and edge directions in color. The point shifting step significantly refines the locations of edge points. The edge extraction step yields accurate results, as seen in parallel lines sharing the same direction and curves exhibiting continuously changing directions." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 467, + 294, + 537, + 334 + ], + "blocks": [ + { + "bbox": [ + 467, + 294, + 537, + 334 + ], + "lines": [ + { + "bbox": [ + 467, + 294, + 537, + 334 + ], + "spans": [ + { + "bbox": [ + 467, + 294, + 537, + 334 + ], + "type": "image", + "image_path": "3e18c6522e3243165a91cc2996d1327f39a3edf0e789a150c8e6794f93052558.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 473, + 335, + 531, + 345 + ], + "lines": [ + { + "bbox": [ + 473, + 335, + 531, + 345 + ], + "spans": [ + { + "bbox": [ + 473, + 335, + 531, + 345 + ], + "type": "text", + "content": "(c) edge direction" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "type": "text", + "content": "edge extraction due to its varying lighting conditions. However, our edge refinement step proves effective in preserving primary edges, a point we elaborate on in Sec. 4.3. Fig. 7 shows LIMAP tends to produce redundant line segments, leading to high recall but reduced precision. NEF's post-processing is sensitive to different scenes, resulting in noisy edge fitting. NEAT, despite producing clean outputs, its inability to handle curves constrains its overall performance." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 524, + 545, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 524, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 304, + 524, + 545, + 644 + ], + "type": "text", + "content": "Qualitative Evaluation on Indoor & Outdoor Scenes. To really showcase the power of our method in capturing scene-level geometry, we further run our method on indoor and outdoor scenes. Note that since NEF is not able to produce meaningful reconstructions on larger scenes, we only compare with LIMAP and NEAT. As shown in Fig. 1 and Fig. 6, NEAT, due to its reliance on high-quality surface reconstruction, faces limitations in scene reconstruction, while LIMAP and our method both successfully capture good scene geometry." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 306, + 654, + 437, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 654, + 437, + 667 + ], + "spans": [ + { + "bbox": [ + 306, + 654, + 437, + 667 + ], + "type": "text", + "content": "4.3. Ablations and Analysis" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 305, + 677, + 546, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 546, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 546, + 715 + ], + "type": "text", + "content": "Parametric Edge Extraction. To better understand our parametric edge extraction process described in Sec. 3.2, we visualize our point shifting and edge direction in Fig. 8." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "21225" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 76, + 126, + 115 + ], + "blocks": [ + { + "bbox": [ + 56, + 76, + 126, + 115 + ], + "lines": [ + { + "bbox": [ + 56, + 76, + 126, + 115 + ], + "spans": [ + { + "bbox": [ + 56, + 76, + 126, + 115 + ], + "type": "image", + "image_path": "37dca7d064aac0cc132d9bf56c37d2bff356cca0784f3ac69b7ed228931bac09.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 78, + 122, + 106, + 131 + ], + "lines": [ + { + "bbox": [ + 78, + 122, + 106, + 131 + ], + "spans": [ + { + "bbox": [ + 78, + 122, + 106, + 131 + ], + "type": "text", + "content": "(a) Ours" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 133, + 76, + 202, + 115 + ], + "blocks": [ + { + "bbox": [ + 133, + 76, + 202, + 115 + ], + "lines": [ + { + "bbox": [ + 133, + 76, + 202, + 115 + ], + "spans": [ + { + "bbox": [ + 133, + 76, + 202, + 115 + ], + "type": "image", + "image_path": "b01c0b9a10c60091e6122fb8977f66258e6dad7d7946bca904b5bc03a66510de.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 122, + 203, + 132 + ], + "lines": [ + { + "bbox": [ + 132, + 122, + 203, + 132 + ], + "spans": [ + { + "bbox": [ + 132, + 122, + 203, + 132 + ], + "type": "text", + "content": "(b) w/o point shifting" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 208, + 76, + 278, + 115 + ], + "blocks": [ + { + "bbox": [ + 208, + 76, + 278, + 115 + ], + "lines": [ + { + "bbox": [ + 208, + 76, + 278, + 115 + ], + "spans": [ + { + "bbox": [ + 208, + 76, + 278, + 115 + ], + "type": "image", + "image_path": "5ce8b7b83fbb7916206664839d3fe79193c6f49383ddfce6d48c3528c7e0c490.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 122, + 277, + 132 + ], + "lines": [ + { + "bbox": [ + 209, + 122, + 277, + 132 + ], + "spans": [ + { + "bbox": [ + 209, + 122, + 277, + 132 + ], + "type": "text", + "content": "(c) w/o Bezier curve" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 53, + 144, + 126, + 182 + ], + "blocks": [ + { + "bbox": [ + 53, + 144, + 126, + 182 + ], + "lines": [ + { + "bbox": [ + 53, + 144, + 126, + 182 + ], + "spans": [ + { + "bbox": [ + 53, + 144, + 126, + 182 + ], + "type": "image", + "image_path": "ce553152039508f23cfb3149a04997c6fccfd65ee5ae6a45c5148c973626f296.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 60, + 189, + 122, + 198 + ], + "lines": [ + { + "bbox": [ + 60, + 189, + 122, + 198 + ], + "spans": [ + { + "bbox": [ + 60, + 189, + 122, + 198 + ], + "type": "text", + "content": "(d) w/o edge merge." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 132, + 144, + 202, + 182 + ], + "blocks": [ + { + "bbox": [ + 132, + 144, + 202, + 182 + ], + "lines": [ + { + "bbox": [ + 132, + 144, + 202, + 182 + ], + "spans": [ + { + "bbox": [ + 132, + 144, + 202, + 182 + ], + "type": "image", + "image_path": "1a28bb33faf21a2d5a68b83d045bd5b56dc6f71f9e8b6bc1df74377d810aab98.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 129, + 189, + 206, + 199 + ], + "lines": [ + { + "bbox": [ + 129, + 189, + 206, + 199 + ], + "spans": [ + { + "bbox": [ + 129, + 189, + 206, + 199 + ], + "type": "text", + "content": "(e) w/o endpoints merge." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 209, + 144, + 278, + 182 + ], + "blocks": [ + { + "bbox": [ + 209, + 144, + 278, + 182 + ], + "lines": [ + { + "bbox": [ + 209, + 144, + 278, + 182 + ], + "spans": [ + { + "bbox": [ + 209, + 144, + 278, + 182 + ], + "type": "image", + "image_path": "7c7f3f78f2e6c40239ff6bdb0b140a79f4c6851984e2a16980ad15c09883ab57.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 223, + 189, + 264, + 198 + ], + "lines": [ + { + "bbox": [ + 223, + 189, + 264, + 198 + ], + "spans": [ + { + "bbox": [ + 223, + 189, + 264, + 198 + ], + "type": "text", + "content": "(f) GT Edge" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 203, + 287, + 246 + ], + "lines": [ + { + "bbox": [ + 46, + 203, + 287, + 246 + ], + "spans": [ + { + "bbox": [ + 46, + 203, + 287, + 246 + ], + "type": "text", + "content": "Figure 9. Qualitative ablation on different component of our parametric edge extraction. The absence of any module in our edge extraction process results in incomplete or noisy qualitative outcomes." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 255, + 287, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 255, + 287, + 338 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 287, + 338 + ], + "type": "text", + "content": "We can clearly see that the extracted point clouds without point shifting are appeared in redundant and inaccurate edge points (Fig. 8 (a)). In contrast, the point shifting step yields point clouds with sharply defined, precise edges (Fig. 8 (b)). In addition, as shown in Fig. 8 (c), the extracted edge directions along parallel lines are consistent, while those on curves vary continuously. This aligns with our expectations." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 339, + 287, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 339, + 287, + 507 + ], + "spans": [ + { + "bbox": [ + 46, + 339, + 287, + 507 + ], + "type": "text", + "content": "Furthermore, we also conduct ablation studies in Table 4 and Fig. 9 to evaluate the impact of different components in our edge extraction algorithm. These experiments were performed on the ABC-NEF dataset using the DexiNed detector. First, the removal of the query point shifting step leads to a significant drop in both recall and precision. This indicates that our point-shifting step significantly refines the query points locations. Second, excluding Bezier curves results in a decline in completeness (Fig. 9 (c)), showing that curves are necessary for edge reconstruction. Third, omitting the edge merging step leads to redundant small line segments, as evident in Fig. 9 (d). Finally, the removal of endpoint merging impairs connectivity between edges, as shown in Fig. 9 (e)." + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 50, + 517, + 287, + 581 + ], + "blocks": [ + { + "bbox": [ + 50, + 517, + 287, + 581 + ], + "lines": [ + { + "bbox": [ + 50, + 517, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 50, + 517, + 287, + 581 + ], + "type": "table", + "html": "
MethodAcc↓Comp↓R5↑P5↑F5↑
aOurs8.88.956.462.959.1
bw/o point shifting15.39.929.218.722.2
cw/o B'ezier curve9.412.154.265.859.0
dw/o edge merging10.38.753.845.348.6
ew/o endpoints merging9.39.051.557.754.0
", + "image_path": "fc6dd362c71742316d5dedcf23fc6e883f0fd9c12c5f3c1142f20b42d47f2237.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 585, + 287, + 629 + ], + "lines": [ + { + "bbox": [ + 46, + 585, + 287, + 629 + ], + "spans": [ + { + "bbox": [ + 46, + 585, + 287, + 629 + ], + "type": "text", + "content": "Table 4. Ablation studies on different component of parametric edge extraction on ABC-NEF [70] with DexiNed [41]. Our parametric edge extraction approach with all components achieves the optimal balance between accuracy and completeness." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": "Edge Refinement. In Fig. 10, we study the effectiveness of our edge refinement module. When input edge maps contain some noises in dark scenes, our initial 3D edge map, without the edge refinement, exhibits some artifacts. However, the edge refinement module markedly mitigates artifacts, achieving a balance between recall and precision." + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 310, + 71, + 425, + 125 + ], + "blocks": [ + { + "bbox": [ + 310, + 71, + 425, + 125 + ], + "lines": [ + { + "bbox": [ + 310, + 71, + 425, + 125 + ], + "spans": [ + { + "bbox": [ + 310, + 71, + 425, + 125 + ], + "type": "image", + "image_path": "76476b7e718c308488003bd20018d95bbb86ea3ad15a7919eebed9eaa49ee90a.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 334, + 125, + 402, + 135 + ], + "lines": [ + { + "bbox": [ + 334, + 125, + 402, + 135 + ], + "spans": [ + { + "bbox": [ + 334, + 125, + 402, + 135 + ], + "type": "text", + "content": "w/o edge refinement" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 429, + 72, + 534, + 124 + ], + "blocks": [ + { + "bbox": [ + 429, + 72, + 534, + 124 + ], + "lines": [ + { + "bbox": [ + 429, + 72, + 534, + 124 + ], + "spans": [ + { + "bbox": [ + 429, + 72, + 534, + 124 + ], + "type": "image", + "image_path": "138283933240e89baba8f9b4feca1fc65438e619ce2077a5f3d602df0693f867.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 449, + 125, + 514, + 135 + ], + "lines": [ + { + "bbox": [ + 449, + 125, + 514, + 135 + ], + "spans": [ + { + "bbox": [ + 449, + 125, + 514, + 135 + ], + "type": "text", + "content": "w/ edge refinement" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 309, + 182, + 544, + 318 + ], + "blocks": [ + { + "bbox": [ + 305, + 140, + 545, + 173 + ], + "lines": [ + { + "bbox": [ + 305, + 140, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 305, + 140, + 545, + 173 + ], + "type": "text", + "content": "Figure 10. Ablation study on edge refinement. Our edge refinement effectively eliminates the majority of noisy edges in background areas." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 309, + 182, + 544, + 318 + ], + "lines": [ + { + "bbox": [ + 309, + 182, + 544, + 318 + ], + "spans": [ + { + "bbox": [ + 309, + 182, + 544, + 318 + ], + "type": "image", + "image_path": "afdaeeed73ad604514de757e9b2270c7023329ae143a5a3832380da533b7596a.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 325, + 546, + 369 + ], + "lines": [ + { + "bbox": [ + 305, + 325, + 546, + 369 + ], + "spans": [ + { + "bbox": [ + 305, + 325, + 546, + 369 + ], + "type": "text", + "content": "Figure 11. Dense surface reconstruction on Replica [53]. Utilizing our trained UDF MLP for initialization enables MonoSDF to capture more geometric details, such as the vase in the top row, the shelf in the bottom row." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 381, + 541, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 381, + 541, + 393 + ], + "spans": [ + { + "bbox": [ + 306, + 381, + 541, + 393 + ], + "type": "text", + "content": "4.4. Application on Dense Surface Reconstruction" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 305, + 401, + 545, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 401, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 305, + 401, + 545, + 460 + ], + "type": "text", + "content": "Our method has demonstrated its proficiency in reconstructing 3D edges across a diverse range of scenarios. Building on this success, we further explore the potential of our learned representation to benefit other tasks. A particularly relevant area is dense surface reconstruction." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 305, + 462, + 546, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 462, + 546, + 570 + ], + "spans": [ + { + "bbox": [ + 305, + 462, + 546, + 570 + ], + "type": "text", + "content": "As shown in Fig. 11, the recent neural-implicit surface reconstruction approach MonoSDF [72] can show decent reconstruction results from only posed multi-view images. However, we notice that they still struggle to capture detailed geometry. To address this, we integrate our method into the MonoSDF pipeline. Specifically, we initialize the geometry MLPs of MonoSDF with our pre-trained UDF MLPs. We can clearly see that such a simple integration can enhance the recovery of geometric details." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 306, + 584, + 383, + 596 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 584, + 383, + 596 + ], + "spans": [ + { + "bbox": [ + 306, + 584, + 383, + 596 + ], + "type": "text", + "content": "5. Conclusions" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 305, + 605, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 605, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 605, + 545, + 712 + ], + "type": "text", + "content": "We introduced EMAP, a 3D neural edge reconstruction pipeline that learns accurate 3D edge point locations and directions implicitly from multi-view edge maps through UDF and abstracts 3D parametric edges from the learned UDF field. Through extensive evaluations, EMAP demonstrates remarkable capabilities in CAD modeling and in capturing detailed geometry of objects and scenes. Furthermore, we show that our learned UDF field enriches the geometric details for neural surface reconstruction." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "21226" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "text", + "content": "[1] Henrik Aanaes, Rasmus Ramsbøl Jensen, George Vogiatzis, Engin Tola, and Anders Bjorholm Dahl. Large-scale data for multiple-view stereopsis. International Journal of Computer Vision (IJCV), 2016. 5, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 179 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 179 + ], + "type": "text", + "content": "[2] Hichem Abdellali, Robert Frohlich, Viktor Vilagos, and Zoltan Kato. L2d2: Learnable line detector and descriptor. In Proc. of the International Conf. on 3D Vision (3DV), 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 182, + 287, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 182, + 287, + 225 + ], + "spans": [ + { + "bbox": [ + 53, + 182, + 287, + 225 + ], + "type": "text", + "content": "[3] Dejan Azinović, Ricardo Martin-Brualla, Dan B Goldman, Matthias Nießner, and Justus Thies. Neural rgb-d surface reconstruction. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 228, + 287, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 228, + 287, + 270 + ], + "spans": [ + { + "bbox": [ + 53, + 228, + 287, + 270 + ], + "type": "text", + "content": "[4] Adrien Bartoli and Peter Sturm. Structure-from-motion using lines: Representation, triangulation, and bundle adjustment. Computer Vision and Image Understanding (CVIU), 2005. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 273, + 287, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 273, + 287, + 316 + ], + "spans": [ + { + "bbox": [ + 53, + 273, + 287, + 316 + ], + "type": "text", + "content": "[5] Andrea Bignoli, Andrea Romanoni, and Matteo Matteucci. Multi-view stereo 3d edge reconstruction. In Proc. of the IEEE Winter Conference on Applications of Computer Vision (WACV), 2018. 1, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 318, + 287, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 318, + 287, + 373 + ], + "spans": [ + { + "bbox": [ + 53, + 318, + 287, + 373 + ], + "type": "text", + "content": "[6] Rohan Chabra, Jan E Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local sdf priors for detailed 3d reconstruction. In Proc. of the European Conf. on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 374, + 287, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 374, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 374, + 287, + 441 + ], + "type": "text", + "content": "[7] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 441, + 287, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 441, + 287, + 486 + ], + "spans": [ + { + "bbox": [ + 53, + 441, + 287, + 486 + ], + "type": "text", + "content": "[8] Manmohan Chandraker, Jongwoo Lim, and David Kriegman. Moving in stereo: Efficient structure and motion using lines. In Proc. of the International Conf. on Computer Vision (ICCV), pages 1741-1748. IEEE, 2009. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 487, + 287, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 487, + 287, + 520 + ], + "spans": [ + { + "bbox": [ + 53, + 487, + 287, + 520 + ], + "type": "text", + "content": "[9] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 522, + 287, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 522, + 287, + 566 + ], + "spans": [ + { + "bbox": [ + 48, + 522, + 287, + 566 + ], + "type": "text", + "content": "[10] Wentao Cheng, Sheng Yang, Maomin Zhou, Ziyuan Liu, Yiming Chen, and Mingyang Li. Road mapping and localization using sparse semantic visual features. IEEE Robotics and Automation Letters, 6(4):8118-8125, 2021. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 567, + 287, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 287, + 622 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 287, + 622 + ], + "type": "text", + "content": "[11] Kseniya Cherenkova, Elena Dupont, Anis Kacem, Ilya Arzhannikov, Gleb Gusev, and Djamila Aouada. Sepicnet: Sharp edges recovery by parametric inference of curves in 3d shapes. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 624, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 689 + ], + "type": "text", + "content": "[12] Scott Ettinger, Shuyang Cheng, Benjamin Caine, Chenxi Liu, Hang Zhao, Sabeek Pradhan, Yuning Chai, Ben Sapp, Charles R Qi, Yin Zhou, et al. Large scale interactive motion forecasting for autonomous driving: The waymo open motion dataset. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9710-9719, 2021. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 286, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 286, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 286, + 714 + ], + "type": "text", + "content": "[13] Liangji Fang, Qinhong Jiang, Jianping Shi, and Bolei Zhou. Tpnet: Trajectory proposal network for motion prediction." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6797-6806, 2020. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 97, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 97, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 308, + 97, + 545, + 140 + ], + "type": "text", + "content": "[14] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 1981. 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 141, + 545, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 141, + 545, + 185 + ], + "spans": [ + { + "bbox": [ + 308, + 141, + 545, + 185 + ], + "type": "text", + "content": "[15] Michael Goesele, Noah Snavely, Brian Curless, Hugues Hoppe, and Steven M Seitz. Multi-view stereo for community photo collections. In Proc. of the International Conf. on Computer Vision (ICCV), 2007. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 186, + 545, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 186, + 545, + 230 + ], + "spans": [ + { + "bbox": [ + 308, + 186, + 545, + 230 + ], + "type": "text", + "content": "[16] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. In Proc. of the International Conf. on Machine learning (ICML), 2020. 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 232, + 545, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 232, + 545, + 264 + ], + "spans": [ + { + "bbox": [ + 308, + 232, + 545, + 264 + ], + "type": "text", + "content": "[17] Yijia He, Ji Zhao, Yue Guo, Wenhao He, and Kui Yuan. Plizio: Tightly-coupled monocular visual-inertial odometry using point and line features. Sensors, 18(4):1159, 2018. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 265, + 545, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 265, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 308, + 265, + 545, + 308 + ], + "type": "text", + "content": "[18] Manuel Hofer, Michael Maurer, and Horst Bischof. Improving sparse 3d models for man-made environments using line-based 3d reconstruction. In Proc. of the International Conf. on 3D Vision (3DV), 2014. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 310, + 545, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 310, + 545, + 352 + ], + "spans": [ + { + "bbox": [ + 308, + 310, + 545, + 352 + ], + "type": "text", + "content": "[19] Manuel Hofer, Michael Maurer, and Horst Bischof. Efficient 3d scene abstraction using line segments. Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2017." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 354, + 545, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 354, + 545, + 408 + ], + "spans": [ + { + "bbox": [ + 308, + 354, + 545, + 408 + ], + "type": "text", + "content": "[20] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, Thomas Funkhouser, et al. Local implicit grid representations for 3d scenes. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 411, + 545, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 411, + 545, + 443 + ], + "spans": [ + { + "bbox": [ + 308, + 411, + 545, + 443 + ], + "type": "text", + "content": "[21] Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Trans. on Graphics (ToG), 2017. 5, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 445, + 545, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 445, + 545, + 488 + ], + "spans": [ + { + "bbox": [ + 308, + 445, + 545, + 488 + ], + "type": "text", + "content": "[22] Tianyu Li, Li Chen, Xiangwei Geng, Huijie Wang, Yang Li, Zhenbo Liu, Shengyin Jiang, Yuting Wang, Hang Xu, Chunjing Xu, et al. Topology reasoning for driving scenes. arXiv preprint arXiv:2304.05277, 2023. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 490, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 490, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 308, + 490, + 545, + 533 + ], + "type": "text", + "content": "[23] Hyunjun Lim, Jinwoo Jeon, and Hyun Myung. Uv-slam: Unconstrained line-based slam using vanishing points for structural mapping. IEEE Robotics and Automation Letters (RA-L), 7(2):1518-1525, 2022. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 535, + 545, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 535, + 545, + 589 + ], + "spans": [ + { + "bbox": [ + 308, + 535, + 545, + 589 + ], + "type": "text", + "content": "[24] Shaohui Liu, Yinda Zhang, Songyou Peng, Boxin Shi, Marc Pollefeys, and Zhaopeng Cui. Dist: Rendering deep implicit signed distance function with differentiable sphere tracing. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 590, + 545, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 590, + 545, + 634 + ], + "spans": [ + { + "bbox": [ + 308, + 590, + 545, + 634 + ], + "type": "text", + "content": "[25] Shaohui Liu, Yifan Yu, Rémi Pautrat, Marc Pollefeys, and Viktor Larsson. 3d line mapping revisited. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 2, 5, 6, 7" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 635, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 635, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 308, + 635, + 545, + 679 + ], + "type": "text", + "content": "[26] Yujia Liu, Stefano D'Aronco, Konrad Schindler, and Jan Dirk Wegner. Pc2wf: 3d wireframe reconstruction from raw point clouds. In Proc. of the International Conf. on Learning Representations (ICLR), 2021. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "type": "text", + "content": "[27] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Yuan Liu, Peng Wang, Christian Theobalt, Taku Komura, and Wenping Wang. Neuraludf: Learning unsigned distance fields for" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21227" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "text", + "content": "multi-view reconstruction of surfaces with arbitrary topologies. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3, 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 106, + 287, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 106, + 287, + 161 + ], + "spans": [ + { + "bbox": [ + 48, + 106, + 287, + 161 + ], + "type": "text", + "content": "[28] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 162, + 287, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 162, + 287, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 162, + 287, + 205 + ], + "type": "text", + "content": "[29] Daniele Marzorati, Matteo Matteucci, Davide Migliore, and Domenico G Sorrenti. Integration of 3d lines and points in 6dof visual slam by uncertain projective geometry. In EMCR. CiteSeer, 2007. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 206, + 287, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 206, + 287, + 259 + ], + "spans": [ + { + "bbox": [ + 48, + 206, + 287, + 259 + ], + "type": "text", + "content": "[30] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 261, + 287, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 261, + 287, + 315 + ], + "spans": [ + { + "bbox": [ + 48, + 261, + 287, + 315 + ], + "type": "text", + "content": "[31] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 316, + 287, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 316, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 48, + 316, + 287, + 370 + ], + "type": "text", + "content": "[32] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proc. of the European Conf. on Computer Vision (ECCV), 2020. 2, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 372, + 287, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 372, + 287, + 415 + ], + "spans": [ + { + "bbox": [ + 48, + 372, + 287, + 415 + ], + "type": "text", + "content": "[33] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 416, + 287, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 416, + 287, + 459 + ], + "spans": [ + { + "bbox": [ + 48, + 416, + 287, + 459 + ], + "type": "text", + "content": "[34] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proc. of the International Conf. on Computer Vision (ICCV), 2021. 1, 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 460, + 287, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 460, + 287, + 514 + ], + "spans": [ + { + "bbox": [ + 48, + 460, + 287, + 514 + ], + "type": "text", + "content": "[35] Jeong Joon Park, Peter Florence, Julian Straub, Richard A. Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 515, + 287, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 515, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 48, + 515, + 287, + 568 + ], + "type": "text", + "content": "[36] Rémi Pautrat, Juan-Ting Lin, Viktor Larsson, Martin R Oswald, and Marc Pollefeys. Sold2: Self-supervised occlusion-aware line description and detection. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 571, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 571, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 571, + 287, + 624 + ], + "type": "text", + "content": "[37] Rémi Pautrat, Daniel Barath, Viktor Larsson, Martin R Oswald, and Marc Pollefeys. Deeplsd: Line segment detection and refinement with deep image gradients. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 4, 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 625, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 287, + 668 + ], + "type": "text", + "content": "[38] Rémi* Pautrat, Iago* Suárez, Yifan Yu, Marc Pollefeys, and Viktor Larsson. GlueStick: Robust image matching by sticking points and lines together. In Proc. of the International Conf. on Computer Vision (ICCV), 2023. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "text", + "content": "[39] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In Proc. of the European Conf. on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[40] Songyou Peng, Chiyu \"Max\" Jiang, Yiyi Liao, Michael Niemeyer, Marc Pollefeys, and Andreas Geiger. Shape as points: A differentiable poisson solver. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 118, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 161 + ], + "type": "text", + "content": "[41] Xavier Soria Poma, Edgar Riba, and Angel Sappa. Dense extreme inception network: Towards a robust cnn model for edge detection. In Proc. of the IEEE Winter Conference on Applications of Computer Vision (WACV), 2020. 6, 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 162, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 162, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 162, + 545, + 194 + ], + "type": "text", + "content": "[42] Zhijian Qiao, Zehuan Yu, Huan Yin, and Shaojie Shen. Online monocular lane mapping using catmull-rom spline. arXiv preprint arXiv:2307.11653, 2023. 1, 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 195, + 545, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 195, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 307, + 195, + 545, + 249 + ], + "type": "text", + "content": "[43] Tong Qin, Yuxin Zheng, Tongqing Chen, Yilun Chen, and Qing Su. A light-weight semantic map for visual localization towards autonomous driving. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 11248-11254. IEEE, 2021. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 250, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 250, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 307, + 250, + 545, + 293 + ], + "type": "text", + "content": "[44] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with thousands of tiny mlp's. In Proc. of the International Conf. on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 294, + 545, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 294, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 307, + 294, + 545, + 348 + ], + "type": "text", + "content": "[45] Chris Rorden, Roger Newman-Norlund, Chris Drake, Daniel R Glen, Julius Fridriksson, Taylor Hanayik, and Paul A Taylor. Improving 3d edge detection for visual inspection of mri coregistration and alignment. bioRxiv, pages 2022-09, 2022. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 350, + 545, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 350, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 307, + 350, + 545, + 403 + ], + "type": "text", + "content": "[46] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proc. of the International Conf. on Computer Vision (ICCV), 2019. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 404, + 545, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 404, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 307, + 404, + 545, + 448 + ], + "type": "text", + "content": "[47] Grant Schindler, Panchapagesan Krishnamurthy, and Frank Dellaert. Line-based structure from motion for urban environments. In International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT), 2006. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 449, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 449, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 307, + 449, + 545, + 491 + ], + "type": "text", + "content": "[48] Johannes L Schonberger, Enliang Zheng, Jan-Michael Frahm, and Marc Pollefeys. Pixelwise view selection for unstructured multi-view stereo. In Proc. of the European Conf. on Computer Vision (ECCV), 2016. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 493, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 493, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 307, + 493, + 545, + 536 + ], + "type": "text", + "content": "[49] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 537, + 545, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 537, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 307, + 537, + 545, + 591 + ], + "type": "text", + "content": "[50] Steven M Seitz, Brian Curless, James Diebel, Daniel Scharstein, and Richard Szeliski. A comparison and evaluation of multi-view stereo reconstruction algorithms. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2006. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 592, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 592, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 592, + 545, + 635 + ], + "type": "text", + "content": "[51] Shaoshuai Shi, Li Jiang, Dengxin Dai, and Bernt Schiele. Motion transformer with global intention localization and local movement refinement. Advances in Neural Information Processing Systems, 2022. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 636, + 545, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 636, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 307, + 636, + 545, + 690 + ], + "type": "text", + "content": "[52] Fangwen Shu, Jiaxuan Wang, Alain Pagani, and Didier Stricker. Structure plp-slam: Efficient sparse mapping and localization using point, line and plane for monocular, rgb-d and stereo cameras. In Proc. IEEE International Conf. on Robotics and Automation (ICRA). 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 691, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 712 + ], + "type": "text", + "content": "[53] Julian Straub, Thomas Whelan, Lingni Ma, Yufan Chen, Erik Wijmans, Simon Green, Jakob J Engel, Raul Mur-Artal, Carl" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "21228" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 105 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 105 + ], + "type": "text", + "content": "Ren, Shobhit Verma, et al. The replica dataset: A digital replica of indoor spaces. arXiv preprint arXiv:1906.05797, 2019. 1, 5, 7, 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 106, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 106, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 106, + 287, + 149 + ], + "type": "text", + "content": "[54] Zhuo Su, Wenzhe Liu, Zitong Yu, Dewen Hu, Qing Liao, Qi Tian, Matti Pietikainen, and Li Liu. Pixel difference networks for efficient edge detection. In Proc. of the International Conf. on Computer Vision (ICCV), 2021. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 150, + 287, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 150, + 287, + 194 + ], + "spans": [ + { + "bbox": [ + 48, + 150, + 287, + 194 + ], + "type": "text", + "content": "[55] Rafael Grompone Von Gioi, Jeremie Jakubowicz, Jean-Michel Morel, and Gregory Randall. Lsd: A fast line segment detector with a false detection control. IEEE Trans. on Pattern Analysis and Machine Intelligence (PAMI), 2008. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 195, + 287, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 195, + 287, + 249 + ], + "spans": [ + { + "bbox": [ + 48, + 195, + 287, + 249 + ], + "type": "text", + "content": "[56] Huijie Wang, Zhenbo Liu, Yang Li, Tianyu Li, Li Chen, Chonghao Sima, Yuting Wang, Shengyin Jiang, Feng Wen, Hang Xu, et al. Road genome: A topology reasoning benchmark for scene understanding in autonomous driving. arXiv preprint arXiv:2304.10440, 2023. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 250, + 287, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 250, + 287, + 304 + ], + "spans": [ + { + "bbox": [ + 48, + 250, + 287, + 304 + ], + "type": "text", + "content": "[57] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 304, + 287, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 304, + 287, + 359 + ], + "spans": [ + { + "bbox": [ + 48, + 304, + 287, + 359 + ], + "type": "text", + "content": "[58] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 360, + 287, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 360, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 48, + 360, + 287, + 403 + ], + "type": "text", + "content": "[59] Xiaogang Wang, Yuelang Xu, Kai Xu, Andrea Tagliasacchi, Bin Zhou, Ali Mahdavi-Amiri, and Hao Zhang. Pie-net: Parametric inference of point cloud edges. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 404, + 287, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 404, + 287, + 448 + ], + "spans": [ + { + "bbox": [ + 48, + 404, + 287, + 448 + ], + "type": "text", + "content": "[60] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. Hf-neus: Improved surface reconstruction using high-frequency details. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 449, + 287, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 449, + 287, + 502 + ], + "spans": [ + { + "bbox": [ + 48, + 449, + 287, + 502 + ], + "type": "text", + "content": "[61] Dong Wei, Yi Wan, Yongjun Zhang, Xinyi Liu, Bin Zhang, and Xiqi Wang. Elsr: Efficient line segment reconstruction with planes and points guidance. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 503, + 287, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 503, + 287, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 503, + 287, + 557 + ], + "type": "text", + "content": "[62] Qiangeng Xu, Weiyue Wang, Duygu Ceylan, Radomir Mech, and Ulrich Neumann. DISN: deep implicit surface network for high-quality single-view 3d reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 559, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 287, + 612 + ], + "type": "text", + "content": "[63] Nan Xue, Song Bai, Fudong Wang, Gui-Song Xia, Tianfu Wu, and Liangpei Zhang. Learning attraction field representation for robust line segment detection. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 613, + 287, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 656 + ], + "type": "text", + "content": "[64] Nan Xue, Bin Tan, Yuxi Xiao, Liang Dong, Gui-Song Xia, and Tianfu Wu. Volumetric wireframe parsing from neural attraction fields. arXiv preprint arXiv:2307.10206, 2023. 1, 2, 5, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "text", + "content": "[65] Nan Xue, Tianfu Wu, Song Bai, Fu-Dong Wang, Gui-Song Xia, Liangpei Zhang, and Philip HS Torr. Holistically-attracted wireframe parsing: From supervised to self-supervised learning. IEEE Trans. on Pattern Analysis and Machine Intelligence (PAMI), 2023. 2, 6" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 654 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "text", + "content": "[66] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Ronen Basri, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 129, + 545, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 183 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 183 + ], + "type": "text", + "content": "[67] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 1, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 185, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 185, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 185, + 545, + 217 + ], + "type": "text", + "content": "[68] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 219, + 545, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 219, + 545, + 260 + ], + "spans": [ + { + "bbox": [ + 307, + 219, + 545, + 260 + ], + "type": "text", + "content": "[69] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 262, + 545, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 262, + 545, + 317 + ], + "spans": [ + { + "bbox": [ + 307, + 262, + 545, + 317 + ], + "type": "text", + "content": "[70] Yunfan Ye, Renjiao Yi, Zhirui Gao, Chenyang Zhu, Zhiping Cai, and Kai Xu. Nef: Neural edge fields for 3d parametric curve reconstruction from multi-view images. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 319, + 545, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 319, + 545, + 362 + ], + "spans": [ + { + "bbox": [ + 307, + 319, + 545, + 362 + ], + "type": "text", + "content": "[71] Zehao Yu, Anpei Chen, Bozidar Antic, Songyou Peng, Apratim Bhattacharyya, Michael Niemeyer, Siyu Tang, Torsten Sattler, and Andreas Geiger. Sdfstudio: A unified framework for surface reconstruction, 2022. 2, 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 364, + 545, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 364, + 545, + 418 + ], + "spans": [ + { + "bbox": [ + 307, + 364, + 545, + 418 + ], + "type": "text", + "content": "[72] Zehao Yu, Songyou Peng, Michael Niemeyer, Torsten Sattler, and Andreas Geiger. Monosdf: Exploring monocular geometric cues for neural implicit surface reconstruction. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2, 5, 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 420, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 420, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 420, + 545, + 453 + ], + "type": "text", + "content": "[73] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 454, + 545, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 454, + 545, + 508 + ], + "spans": [ + { + "bbox": [ + 307, + 454, + 545, + 508 + ], + "type": "text", + "content": "[74] Xiangyu Zhu, Dong Du, Weikai Chen, Zhiyou Zhao, Yinyu Nie, and Xiaoguang Han. Nerve: Neural volumetric edges for parametric curve extraction from point cloud. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 510, + 545, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 510, + 545, + 563 + ], + "spans": [ + { + "bbox": [ + 307, + 510, + 545, + 563 + ], + "type": "text", + "content": "[75] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proc. of the Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 565, + 545, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 565, + 545, + 609 + ], + "spans": [ + { + "bbox": [ + 307, + 565, + 545, + 609 + ], + "type": "text", + "content": "[76] Zihan Zhu, Songyou Peng, Viktor Larsson, Zhaopeng Cui, Martin R Oswald, Andreas Geiger, and Marc Pollefeys. Nicer-slam: Neural implicit scene encoding for rgb slam. In Proc. of the International Conf. on 3D Vision (3DV), 2024. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 609, + 545, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 609, + 545, + 654 + ], + "spans": [ + { + "bbox": [ + 307, + 609, + 545, + 654 + ], + "type": "text", + "content": "[77] Xingxing Zuo, Xiaojia Xie, Yong Liu, and Guoquan Huang. Robust visual slam with point and line features. In Proc. IEEE International Conf. on Intelligent Robots and Systems (IROS), 2017. 2" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21229" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/669e6bfe-eb9e-4f5b-a53c-23335eda80fe_content_list.json b/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/669e6bfe-eb9e-4f5b-a53c-23335eda80fe_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..283b5ada3c333b139d40cb8f49edc65b8819c889 --- /dev/null +++ b/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/669e6bfe-eb9e-4f5b-a53c-23335eda80fe_content_list.json @@ -0,0 +1,1600 @@ +[ + { + "type": "text", + "text": "3D Paintbrush: Local Stylization of 3D Shapes with Cascaded Score Distillation", + "text_level": 1, + "bbox": [ + 83, + 130, + 883, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dale Decatur University of Chicago", + "bbox": [ + 102, + 181, + 279, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Itai Lang \nUniversity of Chicago", + "bbox": [ + 316, + 181, + 493, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kfir Aberman \nSnap Research", + "bbox": [ + 532, + 181, + 651, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Rana Hanocka \nUniversity of Chicago", + "bbox": [ + 689, + 181, + 866, + 215 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8f0213175946d81e045e5257620801c49611ea5251bf90a457a930a8d4f51e8c.jpg", + "image_caption": [ + "Figure 1. Utilizing only a text prompt as guidance, 3D Paintbrush seamlessly generates local stylized textures on bare meshes. Our approach produces a localization map (yellow regions) and a highly detailed texture map which conforms to it." + ], + "image_footnote": [], + "bbox": [ + 98, + 256, + 263, + 416 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f4b6e651a7b5e88adec8504d1c994748f92bc8066781abc73a14ed135292e743.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 256, + 442, + 416 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/88d6d9cc4b82a53a6af7741ab56f74fb0e8003a1f8ad9bc7c1cd3a3d6056e5b6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 462, + 256, + 651, + 416 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/eb89358f7aad74877be3c83f45a401455d7ad6d69752f761941a7364042a2dd4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 256, + 870, + 416 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 462, + 310, + 477 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present 3D Paintbrush, a technique for automatically texturing local semantic regions on meshes via text descriptions. Our method is designed to operate directly on meshes, producing texture maps which seamlessly integrate into standard graphics pipelines. We opt to simultaneously produce a localization map (to specify the edit region) and a texture map which conforms to it. This approach improves the quality of both the localization and the stylization. To enhance the details and resolution of the textured area, we leverage multiple stages of a cascaded diffusion model to supervise our local editing technique with generative priors learned from images at different resolutions. Our technique, referred to as Cascaded Score Distillation (CSD), simultaneously distills scores at multiple resolutions in a cascaded fashion, enabling control over both the granularity and global understanding of the supervision. We demonstrate the effectiveness of 3D Paintbrush to locally texture different semantic regions on a variety of shapes. Project page: https://threedle.github.io/3d-paintbrush", + "bbox": [ + 75, + 493, + 473, + 796 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 814, + 207, + 829 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The ability to edit existing high-quality 3D assets is a fundamental capability in 3D modeling workflows. Recent works have shown exceptional results for text-driven 3D data creation [32, 38, 48, 53, 58, 59], but focus on making global", + "bbox": [ + 75, + 839, + 468, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": " edits. While some progress has been made on local editing using an explicit localization of the edit region [49, 67], these regions are often coarse and lack fine-grained detail. Highly-detailed and accurate localizations are important for constraining the edits to be within a specific region, preventing changes unrelated to the target edit. Furthermore, while meshes with texture maps are the de facto standard in graphics pipelines, existing local editing work does not natively operate on meshes nor produce texture maps for them.", + "bbox": [ + 496, + 463, + 890, + 599 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work we develop 3D Paintbrush, a method for automatically texturing local semantic regions on meshes via text descriptions. Our method is designed to operate directly on meshes, producing texture maps which seamlessly integrate into standard graphics pipelines. 3D Paintbrush is controlled via intuitive, free-form text input, allowing users to describe their edits using open vocabulary on a wide range of meshes. Specifically, given an input mesh and a text prompt, 3D Paintbrush produces the corresponding high-quality texture map and a localization region to confine it. To enhance the details and resolution of the locally textured area, we introduce Cascaded Score Distillation (CSD) which leverages multiple stages of a cascaded diffusion model. Our explicit localization masks can be used to layer our edit texture onto existing textures.", + "bbox": [ + 496, + 606, + 892, + 833 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We opt to represent both our localization map and texture map as neural fields encoded by multi-layer perceptions. Our method synthesizes both a fine-grained localization mask and high-quality texture in tandem. Simultane", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "4473", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/305e0e8d18cf7f67879c52579bcc8947fff72b3ab4c653d928e3390ba7c0684d.jpg", + "image_caption": [ + "Colorful polo shirt", + "Figure 2. Precise composition of multiple local textures. 3D Paintbrush produces highly-detailed textures that effectively adhere to the predicted localizations. This enables seamlessly compositing local textures without unwanted fringes (right)." + ], + "image_footnote": [], + "bbox": [ + 76, + 83, + 240, + 253 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/fad0ab68b5b07bb7cec53701159dbce71c34d054366cf12211c111011d67896a.jpg", + "image_caption": [ + "Superman emblem" + ], + "image_footnote": [], + "bbox": [ + 251, + 85, + 411, + 253 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/a89cb636cef5b54c1bfe8703dceb299d017079a30f5b65309a6366597bf6ca4b.jpg", + "image_caption": [ + "Tie-dye apron" + ], + "image_footnote": [], + "bbox": [ + 421, + 85, + 581, + 253 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/66d48d8aa4a8deec06555212b46a505c654ded15867d3a8cbd1f02b36cebbf31.jpg", + "image_caption": [ + "Muay Thai shorts" + ], + "image_footnote": [], + "bbox": [ + 591, + 85, + 754, + 253 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/48871a295e38ebb027784ab0ff12faf45f6e3a7d9e47efeef0edbe907f40b5b8.jpg", + "image_caption": [ + "Composite" + ], + "image_footnote": [], + "bbox": [ + 776, + 93, + 890, + 253 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ously generating the localization and texture maps improves the quality of each. The texture map drives the localization to become more detailed and intricate. The localization explicitly masks the texture, ensuring a coherent local style which respects the localization boundary.", + "bbox": [ + 75, + 327, + 467, + 402 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our local stylization operates in small regions, necessitating higher resolution supervision compared to global generative techniques. Existing approaches leverage pretrained text-to-image diffusion models with Score Distillation Sampling (SDS) to supervise text-driven optimizations [31, 58]. Text-to-image diffusion models often contain multiple cascaded stages in order to achieve high resolution [21], but standard SDS only utilizes the first low-resolution stage of the cascaded model. Our technique, referred to as Cascaded Score Distillation (CSD), simultaneously distills scores at multiple resolutions in a cascaded fashion, enabling control over both the granularity and global understanding of the supervision. Since cascaded stages are trained entirely independently, our insight is to formulate a distillation loss that incorporates all stages in tandem.", + "bbox": [ + 75, + 402, + 467, + 630 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our method enables local text-driven stylization of meshes. By explicitly learning a localization in tandem with the texture, we ensure that our edits are bounded by the localized region. Using our CSD, which leverages all stages of the diffusion model, we can control the granularity and global understanding of the supervision achieving higher resolution textures and localizations than standard SDS. We demonstrate that 3D Paintbrush yields diverse local texturing on a variety of shapes and semantic regions and outperforms baselines both qualitatively and quantitatively.", + "bbox": [ + 75, + 631, + 467, + 797 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 814, + 218, + 829 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A large body of work has studied stylization and analysis of 3D content. Existing work uses neural networks and optimization [6, 15, 19, 22, 23, 30, 33, 37-39, 41, 42, 51, 60, 62] for mesh stylization. Other works use a neural radiance field", + "bbox": [ + 75, + 839, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "NeRF [40] for stylization [11, 34, 64]. Yet, these works focus on stylization rather than localization. Large 2D models have been used for analytical tasks in 3D such as localization and segmentation [1, 2, 10, 17, 27, 28, 54, 57, 67], however, none of these works produce textures. Furthermore, only [1, 2, 10, 67] aim to produce a tight localization on meshes and we find that these approaches still produce relatively smooth localization regions that cannot capture the high frequency details needed for sharp local edits.", + "bbox": [ + 496, + 325, + 890, + 464 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Text-driven generation and editing. Existing works have leveraged pre-trained 2D models to generate 3D representations that adhere to a text prompt [4, 12, 16, 25, 29, 39, 41, 63]. Many recent methods [9, 26, 32, 44, 50, 53, 53, 58, 66] use score distillation [44, 58] from 2D models to generate both geometry and styles from scratch, while other works optimize the texture of an existing, fixed geometry [8, 38, 39, 47]. Other work aims to generate 3D representations from images [14, 35, 36, 45].", + "bbox": [ + 496, + 469, + 890, + 604 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Existing text-to-3D generative methods [38, 44, 58, 59] can be used to perform global edits [18, 48, 67]. However, since these approaches do not have explicit edit localizations, they struggle to perform highly specific local edits without changing other components of the 3D representation's appearance. Different from our objective, these works aim to generate or globally manipulate existing 3D representations, while our work focuses on local editing.", + "bbox": [ + 496, + 606, + 890, + 729 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Text-driven local editing. Many approaches can perform global 3D edits and progress has been made on local editing in images and videos [5, 7, 13, 20]. Yet, few works have addressed the task of precise, local editing for 3D representations. Local editing is challenging since, in addition to synthesizing the edit, methods need to localize the edit region. FocalDreamer [31] obtains precise user defined edit regions at the cost of requiring additional, tedious user input compared to strictly text-driven approaches. Vox-E [49] (operating on voxel representations) and DreamEditor [67] (operating on NeRFs) both use attention maps to localize an edit", + "bbox": [ + 496, + 734, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "4474", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/b877759d8109bb754731500e7eabbf66d9ebd769fd1bf2c81c411b22a81fe222.jpg", + "image_caption": [ + "Figure 3. Overview of 3D Paintbrush. Each point on the surface of the mesh is passed into three different branches to produce a localization probability, texture map, and background map. We texture three different variants of the same mesh with the localization, texture, and background maps and render them from the same viewpoint. Each image along with the corresponding text condition is used to compute the CSD loss." + ], + "image_footnote": [], + "bbox": [ + 76, + 85, + 893, + 265 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "region and thus the localization has no visual meaning in isolation. Our approach imposes a visual loss on our localizations in order to enforce sharp boundaries that are tightly coupled with our texture edits. Additionally, since existing purely text-driven local editing approaches only work on voxels and NeRFs, our approach is the first to enable text-driven local editing on meshes.", + "bbox": [ + 75, + 356, + 468, + 464 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "High resolution text-to-3D. Several works have explored techniques to increase the resolution for text-to-3D. Many recent works apply SDS to latent diffusion models [32, 38, 58, 59, 66]. Recent works backpropagate the gradient through the encoder to get gradients in higher resolution $512 \\times 512$ RGB space [32, 59, 66]. Other works use timestep annealing to give less noisy supervision towards the end of the optimization, thus increasing the detail of the generations [24, 59]. HiFA [66] proposes denoising over multiple successive timesteps each iteration to provide better gradients and achieve high fidelity appearance. While all of these approaches have shown impressive improvements to the resolution of SDS supervision, SDS only utilizes the base stage (not super-resolution stages). Thus, these proposed improvements are orthogonal to ours and can be incorporated at the super-resolution stages using CSD as well.", + "bbox": [ + 75, + 465, + 470, + 709 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 723, + 168, + 739 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We show an overview of our method in Fig. 3. The inputs to our system are a mesh $M$ and a text description $y$ of the desired local edit. Our system produces a local texture on the mesh $M$ that adheres to the text prompt $y$ . To supervise our optimization, we use score distillation with a pretrained text-to-image diffusion model. However, local editing requires higher detail than standard generation due to the small size and granularity of the desired edits. In order to further improve the detail of our localization and texture, we introduce Cascaded Score Distillation (CSD), a", + "bbox": [ + 75, + 750, + 470, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "technique that distills scores at multiple resolutions of the 2D cascaded model. This approach enables leveraging all stages of a cascaded model and provides control over both the detail and global understanding of the supervision.", + "bbox": [ + 496, + 356, + 890, + 417 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Local Neural Texturing", + "text_level": 1, + "bbox": [ + 498, + 424, + 718, + 441 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D Paintbrush represents local textures as neural texture maps over the surface of a mesh $M$ defined by vertices $V \\in \\mathbb{R}^{n \\times 3}$ and faces $F \\in \\{1, \\dots, n\\}^{m \\times 3}$ . Extracting an explicit texture map from our neural textures is trivial, making our representation compatible with existing graphics pipelines. Furthermore, using texture maps enables producing high resolution textures (i.e., sub-triangle values) without a computationally expensive high resolution mesh. A straight-forward approach of directly optimizing texture values results in texture maps with artifacts and noise (see supplemental material). To mitigate this, we leverage the smoothness of neural networks [46]. However, a straight-forward application of an MLP to a 2D texture map $((u, v) \\to (r, g, b))$ is inherently invalid at the texture seams (e.g., erroneous interpolations at boundaries), which may lead to texture discontinuities on the rendered mesh.", + "bbox": [ + 496, + 446, + 890, + 688 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We instead formulate our MLPs to operate on 3D coordinates leading to predictions in 3D that are inherently smooth and without any seam discontinuities. To do so, we invert the UV mapping $\\psi(x,y,z) = (u,v)$ to get a map $\\psi^{-1}(u,v) = (x,y,z)$ from 2D texels to 3D coordinates on the surface of the mesh. We optimize our MLPs with the 3D coordinates obtained from the 2D texel centers. We employ two primary networks, one for localization and one for texturing. Our neural localization MLP is a function $\\mathcal{F}_{\\theta}$ that maps a 3D coordinate $\\mathbf{x} = (x,y,z)$ to a probability $p$ (which we map back to a 2D localization map). Similarly, our neural texture MLP is a function $\\mathcal{F}_{\\phi}$ that takes in a 3D coordinate and outputs an RGB value (which we map back to a 2D texture image). Our architecture first passes the 3D", + "bbox": [ + 496, + 688, + 892, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "4475", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/caf584c44831cf110e828a96530c71e57e340c15fa5cf625b4abdb87e9fe1b93.jpg", + "image_caption": [ + "Figure 4. 3D Paintbrush produces highly detailed textures and localizations for a diverse range of meshes and prompts. Our method synthesizes meaningful local edits on shapes, demonstrating both global and local part-level understanding." + ], + "image_footnote": [], + "bbox": [ + 117, + 88, + 851, + 296 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "coordinates through positional encoding [52] before going through a 6-layer MLP. This formulation of using MLPs defined on the 3D surface leads to a neural texture which produces smoothly varying outputs in 3D, even though our 2D texture maps have discontinuities at the texture seams. The smoothness provided by the MLPs reduces artifacts, produces less noisy textures, and provides super resolution capabilities. Although we optimize our MLPs with 3D coordinates mapped from 2D texel centers, during inference, we may query the MLP for any value (i.e. sub-texels that enable super resolution texture maps even across seams).", + "bbox": [ + 75, + 351, + 472, + 517 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Visual Guidance for Localized Textures", + "text_level": 1, + "bbox": [ + 76, + 523, + 415, + 539 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We guide our optimization using three distinct losses that encourage both the localization and texture towards visually desirable results. Each loss is visualized as a branch in Fig. 3 - top branch: localization loss, middle branch: local texture map loss, bottom branch: background loss.", + "bbox": [ + 75, + 547, + 468, + 625 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Local texture map loss. First, we obtain our localization map $L_{map} \\in [0,1]^{H \\times W}$ from the neural localization MLP $L_{map} = \\psi(\\mathcal{F}_{\\theta}(\\mathbf{x}))$ and the texture map $T_{map} \\in [0,1]^{H \\times W \\times 3}$ from the neural texture MLP $T_{map} = \\psi(\\mathcal{F}_{\\phi}(\\mathbf{x}))$ . We use the localization $L_{map}$ to mask the texture $T_{map}$ to get a local texture map $T_{map}'$ which only contains textures inside the localization region. We apply the masked texture $T_{map}'$ to our mesh $M$ to get a locally-textured mesh $M_t$ and construct a local-texture text prompt $y_t$ from the input text $y$ (middle branch Fig. 3). We then supervise our optimization using a text-conditioned visual loss (cascaded score distillation, see Sec. 3.4) on $M_t$ and $y_t$ . By applying a visual loss to the localization-masked texture, we get informative and meaningful gradients for both our texture MLP and our localization MLP.", + "bbox": [ + 75, + 627, + 468, + 851 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Localization loss. Using only the texture loss allows for trivial solutions where the mask contains a region that includes, but is much larger than, the desired localization re", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "gion. To encourage the localization region to be meaningful, we employ a visual loss on the localization region in isolation (similar to 3D Highlighter [10]). Specifically, we blend a (yellow) color onto the mesh according to the localization map to get a localization-colored mesh $M_{l}$ (top branch Fig. 3). From the text input $y$ , we derive a target localization prompt $y_{l}$ describing the localized region in the format used in 3D Highlighter [10]. We then use $M_{l}$ and $y_{l}$ as input to the text-conditioned visual loss. Using this loss significantly improves the detail and quality of the localization (see supplemental material).", + "bbox": [ + 496, + 351, + 890, + 517 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Background loss. Using only the top two branches in Fig. 3 leads to broader localizations that incorporate superfluous elements characteristic of the input 3D model (i.e. a bill on a duck), in addition to the desired localization region (see supplemental material). To mitigate this, we learn a background texture $B_{map} \\in [0,1]^{H \\times W \\times 3}$ that intentionally contains these characteristic elements of the input 3D shape in the inverse of the localization region $1 - L_{map}$ (the area outside the localization region). Specifically, we blend both the background texture $B_{map}$ (using $1 - L_{map}$ ) and a yellow color (using $L_{map}$ ) to get a composited texture $B_{map}' = L_{map}(\\mathrm{YELLOW}) + (1 - L_{map})B_{map}$ (bottom branch in Fig. 3). We apply the composited texture $B_{map}'$ to the mesh to get $M_b$ and then supervise the background MLP using a visual loss conditioned on both $M_b$ and a target text $y_b$ (derived from $y$ ). The target text $y_b$ describes the generic object class (i.e. 'cow' in Fig. 3) with a (yellow) colored localization region. See supplemental material for more details. The third loss directly encourages incorporating the superfluous elements in the background texture which discourages the localization region from incorporating such undesired elements (since $L_{map}$ and $1 - L_{map}$ are inverse masks).", + "bbox": [ + 496, + 522, + 892, + 868 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Key to our method is the simultaneous optimization of the localization map (that specifies the edit region) and the", + "bbox": [ + 500, + 869, + 892, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4476", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d4f6be7c7b59054a75210a356f66f171e829da3af540ba3a770c61aa11a695ad.jpg", + "image_caption": [ + "Simultaneous" + ], + "image_footnote": [], + "bbox": [ + 78, + 85, + 183, + 165 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6c43ffa6ff52e17ea8cc59bb1f2b7fafb519cdf5dbbeec3fd840d64f9c560b8d.jpg", + "image_caption": [ + "In series" + ], + "image_footnote": [], + "bbox": [ + 197, + 85, + 300, + 165 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/25064e463f681c2bd66cd2cf471c738429508d746c077bed6c2ca5f90c4256b1.jpg", + "image_caption": [ + "Independent", + "Figure 5. Impact of simultaneous optimization. Simultaneously optimizing the localization and texture (left) results in higher-detailed textures which effectively conform to the predicted localization. If we first optimize the localization, then optimize the texture within the localization region (middle), both the localization and texture are less detailed. Independent (right): if we optimize the localization independently (independent: left) and the texture independently (independent: middle), the texture does not align with the localization and thus the masked texture contains fringe artifacts (independent: right)." + ], + "image_footnote": [], + "bbox": [ + 313, + 85, + 468, + 165 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "texture map that conforms to it. This approach improves the quality of both the localization and the stylization. The texture map drives the localization to become more detailed and intricate, while the localization explicitly masks the texture, ensuring a coherent local style which respects the localization boundary (see Fig. 5).", + "bbox": [ + 75, + 349, + 468, + 441 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Score Distillation and Cascaded Diffusion", + "text_level": 1, + "bbox": [ + 76, + 452, + 431, + 467 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Score Distillation. To guide our local stylization, we leverage powerful pretrained text-to-image diffusion models. Existing approaches use these models in conjunction with Score Distillation Sampling (SDS) to supervise text-driven optimizations [44, 58]. For each iteration of an optimization of an image $x$ that we want to supervise with diffusion model $\\phi$ and text prompt $y$ , SDS [44] proposes the following gradient:", + "bbox": [ + 75, + 474, + 468, + 595 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {x} \\mathcal {L} _ {S D S} (\\phi , x, y) = w (t) \\left(\\epsilon_ {\\phi} \\left(z _ {t}, t, y\\right) - \\epsilon\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 130, + 609, + 468, + 626 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where timestep $t \\sim \\mathcal{U}(\\{1, \\dots, T\\})$ is sampled uniformly and noise $\\epsilon \\sim \\mathcal{N}(\\mathbf{0}, \\mathbf{I})$ is Gaussian. The noisy image $z_{t}$ is obtained by applying a timestep-dependent scaling of $\\epsilon$ to the image $x$ . The weight $w(t)$ is a timestep-dependent weighting function and $\\epsilon_{\\phi}(z_{t}, t, y)$ is the noise predicted by the diffusion model conditioned on $z_{t}$ , $t$ , and $y$ . Note that Eq. (1) omits the U-Net Jacobian term (not needed in practice [44]). This objective is similar to the objective used in diffusion model training, however, instead of optimizing the weights of the model, the gradient is applied to the image $x$ .", + "bbox": [ + 75, + 638, + 468, + 790 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": " Cascaded Diffusion. Text-to-image diffusion models often contain multiple cascaded stages at different resolutions in order to achieve high resolution outputs [21]. These cascaded diffusion models consist of a base stage $\\phi^1$ (stage 1) and some number of super-resolution stages $\\phi^{i > 1}$ (stages $2 - N$ ). The base stage is identical to a standard diffusion model, predicting noise $\\epsilon_{\\phi^1}(z_t^1,t,y)$ conditioned on", + "bbox": [ + 75, + 794, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "noisy image $z_{t}^{1}$ , timestep $t$ , and text prompt $y$ . However, the super-resolution stages are conditioned on two differently-noised images: one at the current resolution ( $z_{t}^{i}$ with timestep $t$ and noise $\\epsilon^{i}$ ) and one at the lower resolution ( $z_{s}^{i-1}$ with timestep $s$ and noise $\\epsilon^{i-1}$ ). The predicted noise for the super-resolution stage is given by $\\epsilon_{\\phi^{i}}(z_{t}^{i}, t, z_{s}^{i-1}, s, y)$ . During inference, the lower resolution input image is obtained by adding noise to the output of the prior stage. However in training, both the high and low resolution images are obtained by sampling a single image from the training dataset and rescaling it to different resolutions.", + "bbox": [ + 496, + 90, + 890, + 257 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Standard SDS [44] only utilizes the first, low-resolution base stage, thus neglecting the full potential of the cascaded model. It is not immediately obvious how to formulate a score distillation technique for all stages of a cascaded diffusion model since super-resolution stages take multiple resolution inputs and, at inference, they require a fully denoised output from the prior stage [21]. We take inspiration from SDS and use the perspective of diffusion training as opposed to inference, and extend it to the training of cascaded diffusion models. To our knowledge, we are the first to consider score distillation using the cascaded super-resolution stages.", + "bbox": [ + 496, + 257, + 890, + 439 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Cascaded Score Distillation", + "text_level": 1, + "bbox": [ + 500, + 449, + 746, + 464 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "CSD overview. Our technique, referred to as Cascaded Score Distillation (CSD), simultaneously distills scores at multiple resolutions in a cascaded fashion (illustrated in Fig. 6). Since the stages of a cascaded diffusion model $\\phi$ are trained entirely independently of one another, our insight is to formulate a distillation loss that incorporates gradients from all stages $(\\phi^1,\\dots,\\phi^N)$ simultaneously. We observe that different stages of the cascaded model provide different levels of granularity and global understanding (Fig. 7). Controlling the influence of each stage provides control over the details and the corresponding localization of the supervision (Fig. 8).", + "bbox": [ + 496, + 472, + 890, + 654 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "CSD Formalization. Consider a mesh $M_{\\theta}$ with a neural texture parameterized by an MLP $\\theta$ (This MLP could be either $\\mathcal{F}_{\\theta}$ , $\\mathcal{F}_{\\phi}$ , and $\\mathcal{F}_{\\psi}$ in Sec. 3.2). We first render $M_{\\theta}$ at $N$ different resolutions using a differentiable renderer $g$ to get multiple images $g(M_{\\theta}) = \\mathbf{x} = \\{x^{1}\\dots x^{N}\\}$ such that $x^{i}$ is the same resolution as stage $\\phi^i$ . For the base stage $\\phi^1$ , we perform standard SDS using Eq. (1) on $x^{1}$ and prompt $y$ to get a gradient $\\nabla_{x^1}$ . For all stages $\\phi^i$ for $i > 1$ , we sample two timesteps $t,s\\sim \\mathcal{U}(\\{1,\\ldots ,T\\})$ , noise $\\epsilon^i\\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})$ at the resolution of stage $\\phi^i$ , and noise $\\epsilon^{i - 1}\\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})$ at the resolution of stage $\\phi^{i - 1}$ . Using timestep-dependent schedule coefficients $\\alpha$ and $\\sigma$ , we compute a noisy image $z_{t}^{i} = \\alpha_{t}x^{i} + \\sigma_{t}\\epsilon^{i}$ by applying a timestep-dependent scaling of $\\epsilon^i$ to the image $x^i$ . Similarly, we compute $z_{s}^{i - 1} = \\alpha_{s}x^{i - 1} + \\sigma_{s}\\epsilon^{i - 1}$ by applying a timestep-dependent scaling of $\\epsilon^{i - 1}$ to the image $x^{i - 1}$ . We then use", + "bbox": [ + 496, + 657, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "4477", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/dc0855ebbe75fc13f60c0283528b86c600c64f2abc32f487d20fcae9e5b6718f.jpg", + "image_caption": [ + "Figure 6. Cascaded Score Distillation (CSD). We simultaneously distill scores across multiple stages of a cascaded diffusion model in order to leverage both the global awareness of the first stage and the higher level of detail contained in later stages. The difference between the predicted noise and sampled noise is the image gradient for each stage." + ], + "image_footnote": [], + "bbox": [ + 99, + 85, + 449, + 275 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$\\phi^i$ to predict noise $\\epsilon_{\\phi^i}(z_t^i,t,z_s^{i - 1},s,y)$ conditioned on the noisy images, timesteps, and text prompt. Our gradient $\\nabla_{x^i}$ for stage $\\phi^i$ for $i > 1$ is the difference between the predicted noise and the (higher-resolution) sampled noise $\\epsilon^i$ weighted by the timestep-dependent function $w(t)$ :", + "bbox": [ + 75, + 391, + 467, + 467 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\nabla_ {x ^ {i}} \\mathcal {L} _ {C S D ^ {i}} \\left(\\phi^ {i}, x ^ {i}, x ^ {i - 1}, y\\right) = \\\\ w (t) \\left(\\epsilon_ {\\phi^ {i}} \\left(z _ {t} ^ {i}, t, z _ {s} ^ {i - 1}, s, y\\right) - \\epsilon^ {i}\\right). \\tag {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 150, + 474, + 468, + 513 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "With all gradients $\\nabla_{x^1},\\ldots ,\\nabla_{x^N}$ computed, we weight each gradient $\\nabla_{x^i}$ with a user defined $\\lambda^i$ to provide control over the impact of the supervision from each stage of the cascaded model. Thus our full gradient with respect to any given neural texture $\\theta$ can be described by:", + "bbox": [ + 75, + 521, + 468, + 597 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\nabla_ {\\theta} \\mathcal {L} _ {C S D} (\\phi , \\mathbf {x} = g (\\theta), y) = \\\\ \\lambda^ {1} \\nabla_ {x ^ {1}} \\mathcal {L} _ {S D S} (\\phi^ {1}, x ^ {1}, y) \\frac {\\partial x ^ {1}}{\\partial \\theta} \\\\ + \\sum_ {i = 2} ^ {N} \\lambda^ {i} \\nabla_ {x ^ {i}} \\mathcal {L} _ {C S D ^ {i}} \\left(\\phi^ {i}, x ^ {i}, x ^ {i - 1}, y\\right) \\frac {\\partial x ^ {i}}{\\partial \\theta}. \\tag {3} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 604, + 468, + 699 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Note that just as in SDS [44], we can avoid computing the U-Net Jacobian term $\\frac{\\partial\\epsilon_{\\phi}(z_t^i,t,z_s^{i - 1},s,y)}{z_t^i}$ (not shown in Eq. (3)) since each stage is entirely independent and our gradient is only with respect to the high-resolution image $x^i$ . Thus, we directly apply $\\lambda^i\\nabla_{x^i}$ to the image $x^i$ without having to compute the costly backpropagation through the U-Net. Using the gradient $\\nabla_{\\theta}\\mathcal{L}_{CSD}(\\phi ,\\mathbf{x} = g(\\theta),y)$ , we update the weights of our MLP $\\theta$ .", + "bbox": [ + 75, + 705, + 468, + 832 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 845, + 207, + 861 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We demonstrate the capabilities of 3D Paintbrush on a wide variety of meshes (from different sources [55, 56, 61, 65])", + "bbox": [ + 75, + 869, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/33f1240fa7193e069ba0cbfc01ea4c297c50b0a5c68a2a9bbdf303837df82ec6.jpg", + "image_caption": [ + "Only stage 1" + ], + "image_footnote": [], + "bbox": [ + 501, + 87, + 625, + 172 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/296857b7782a8ab5165386cf4150d05f7248ac8b10ace6c3d3d178fe938d4844.jpg", + "image_caption": [ + "Only stage 2", + "Figure 7. Impact of cascaded stages. Different stages of the cascaded model provide different levels of granularity and global understanding. Using only the (low resolution) stage 1 model gives a low-resolution result in the correct location. While the (high resolution) stage 2 model gives a high-resolution result, it is placed in the incorrect location. Our CSD simultaneously uses stage 1 and 2, resulting in a highly-detailed texture in the appropriate location." + ], + "image_footnote": [], + "bbox": [ + 635, + 88, + 758, + 171 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b135e81205c6c98e66925fc16e84c504e31b85f2c854ebad06f74405b10ea708.jpg", + "image_caption": [ + "CSD" + ], + "image_footnote": [], + "bbox": [ + 767, + 88, + 823, + 171 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5b8460f91c44d63517f33c80a08b86d1c0277e231a606456da0b8a11efa6dfe1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 823, + 89, + 892, + 140 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "and prompts. We highlight key properties of our method such as localization precision and edit specificity. We then demonstrate the importance and capabilities of our CSD loss including its high resolution supervision and intuitive controls. Finally, we evaluate our system against other localization and editing baselines and ablate the key components of our method. In our experiments, we use DeepFloyd IF [3] for our cascaded model. Our unoptimized PyTorch [43] implementation takes 4 hours on a standard A40 GPU, typically achieving satisfactory results within 2 hours.", + "bbox": [ + 496, + 313, + 890, + 465 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Properties of 3D Paintbrush", + "text_level": 1, + "bbox": [ + 500, + 473, + 750, + 489 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "3D Paintbrush generality. 3D Paintbrush is capable of producing highly detailed localizations and textures on a diverse collection of meshes and prompts (Fig. 4). Our method is not restricted to any category of meshes and we show results on organic and manufactured shapes. Furthermore, our local textures can be specified with open vocabulary text descriptions and are not limited to any predefined categories or constraints. This includes \"out-of-domain\" local textures such as the rainbow shinguards on a giraffe which are not naturally seen in the context of these objects, yet are precisely placed in semantically meaningful locations with highly detailed textures.", + "3D Paintbrush precision and composition. 3D Paintbrush produces precise localizations and highly-detailed textures that effectively adhere to these predicted localizations (see Fig. 2). The tight coupling between the localization and texture (see the gold chain necklace in Fig. 1) enables seamless composition of multiple local textures simultaneously on the same mesh without any layering artifacts. For example, the sharp localization boundary of the \"Tie-dye apron\" (in Fig. 2) allows us to composite this local texture on top of other textures without obstructing these textures in regions outside of the apron's boundary.", + "3D Paintbrush specificity and effectiveness. 3D Paintbrush produces accurate and high resolution local edits that closely adhere to the text-specification (see Fig. 10). Our" + ], + "bbox": [ + 496, + 500, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "4478", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c728d6c82ddb8cb58dd10d0cfef669aa55ac036d8bd866f1efbf1cf7c12a1748.jpg", + "image_caption": [ + "Figure 8. Granular control with CSD. Varying the weight between stage 1 and stage 2 results in control over the details and corresponding localization. Only using stage 1 (leftmost) is rather coarse; only using stage 2 (rightmost) is highly detailed with an incorrect localization. Increasing the stage 2 weight (moving left to right) progressively increases the detail and granularity of the supervision, enabling smooth and meaningful interpolation between stage 1 and 2." + ], + "image_footnote": [], + "bbox": [ + 117, + 87, + 176, + 172 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2bfdaa1e92a3b054262c87e40edf7656be061e49122935b113dc0a310c900c99.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 184, + 88, + 243, + 172 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/710419ad47e59698d0a19e559bdd3518608098670ea2964f39f818d032fe4942.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 253, + 88, + 310, + 172 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3814c9de18a8a1812bec28f904d96706c1073a0823d5b79423d07f0ae2513bc2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 320, + 88, + 379, + 172 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/41eb0c5f58d6feb8ce9551a7beafa65e3893ad5870b3c73c95c19a30c276b94a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 388, + 88, + 446, + 172 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2b792d093f6b91dc62a35ea246d0f41f25dd615c0a02226c5548175a2dcc3b09.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 455, + 88, + 514, + 172 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/102658898ed7b397ddcbbc5e58d494ee49e471b9c252e79b8635a0fe4032a853.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 88, + 580, + 172 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1fd7698e39f3f52fbe8ad6c27a8fbb1ce42f938fba5b0ca4540a0ed3b5e4261e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 589, + 88, + 647, + 172 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d0d288a63047d491fee7b9779a723b9a591773b7af8d1d948f06c86f7f11c149.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 88, + 715, + 172 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/db0157c211ca0f61d67e2037a587780ac60fa16e38abd95ffe727ad1b7a7001d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 723, + 88, + 782, + 172 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/89963c25164dfa3d19bd6d48f4b15f548fcd25730b779d9bff8c68979aecc46c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 792, + 88, + 851, + 172 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "method's fine-grained results contain intricate details (i.e. the badge on \"Barcelona jersey\") and reflect the subtle differences in the text prompts (i.e. the \"cape\" on the dog is more tapered than the boxer \"poncho\"). This specificity allows us to produce many diverse and distinct local styles. We show multiple local edits on the same mesh for multiple different meshes, demonstrating the effectiveness of our method on diverse prompts and meshes.", + "bbox": [ + 75, + 267, + 468, + 387 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Importance of Cascaded Score Distillation", + "text_level": 1, + "bbox": [ + 76, + 396, + 436, + 412 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Impact and granular control of CSD. Our cascaded score distillation (CSD) simultaneously distills scores at multiple resolutions in a cascaded fashion. We observe that different stages of the cascaded diffusion model give different levels of granularity and global understanding (Fig. 7). Using only the (low resolution) stage 1 model is equivalent to SDS. Though SDS produces an accurate localization and coherent texture, the result is low-resolution (see Fig. 9). Conversely, using only the (high resolution) stage 2 model gives a high-resolution result, but often fails to properly lo", + "bbox": [ + 75, + 422, + 470, + 575 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "calize the texture leading to undesirable results. Our CSD simultaneously combines the supervision from stages 1 and 2, resulting in a highly-detailed texture in the appropriate location. Increasing the stage 2 weight (moving left to right in Fig. 8) progressively increases the detail and granularity of the supervision, demonstrating smooth and intuitive interpolation between stage 1 and 2. In our experiments, we use a fixed weighting scheme, but this result demonstrates that our method works for a broad range of weights. Quantitative evidence supporting the importance of the CSD loss can bee seen in Tab. 1.", + "bbox": [ + 500, + 267, + 890, + 431 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d5758a31b7aff3df47afe38ed6a74c394772540cb9a145724b3d812e5544c52e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LocalizationSATR3D HighlighterOurs
Average Score ↑1.892.034.80
Local EditsLatent PaintVox-EOurs (SDS)Ours
Average Score ↑2.142.154.064.88
", + "bbox": [ + 506, + 441, + 887, + 507 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 1. Quantitative evaluation. We conduct a perceptual study where users evaluate our localizations and local edits compared to baseline methods (3D Highlighter [10], SATR [2], Latent Paint [38], Vox-E [49], and our method with standard SDS loss).", + "bbox": [ + 500, + 517, + 890, + 574 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/25ad4a63821e8327d352136370d9337a779df6f421123155fcb458dda9484cfd.jpg", + "image_caption": [ + "Figure 9. Importance of super-resolution stage in CSD. Using stage 1 only (equivalent to SDS) lacks fine-grained details. Incorporating the second super-resolution cascaded stage from our CSD increases the resolution and detail. Input text prompts (from left to right): Colorful crochet shell, Cactus base, Tiger stripe shirt." + ], + "image_footnote": [], + "bbox": [ + 96, + 592, + 450, + 825 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Evaluation", + "text_level": 1, + "bbox": [ + 500, + 602, + 619, + 616 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Simultaneous localization and texture. We demonstrate the importance of simultaneously optimizing the localization region and texture in tandem in Fig. 5. We observe that simultaneous optimization results in highly detailed textures which effectively conform to the predicted localization regions (Fig. 5, left). Furthermore, the resulting localization region is sharp and intricate. Alternatively, we optimize the localization region first and use the predicted localization to learn a texture which is confined to the (precomputed) localization region (Fig. 5, middle). In this case, the texture is less detailed, and the localization region is less intricate. Finally, we can learn the texture and localization region independently (Fig. 5, independent). This results in a texture (Fig. 5 independent, middle) that is completely decoupled from the localization region (Fig. 5 independent, left). When masking the texture with the localization region, we observe a misaligned texture with fringe artifacts (Fig. 5 independent, right).", + "bbox": [ + 496, + 628, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "4479", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/dd06006c1d943573844d7e606784a0964f9b073f4c881a9b7c1d76511a2b0273.jpg", + "image_caption": [ + "Beautiful roses Colorful crochet base Rainbow headband Camo poncho Superhero cape Tiger stripe hat", + "Figure 10. 3D Paintbrush is capable of producing a variety of local textures on the same mesh. Each result contains an accurate localization map (to specify the edit region) and a texture map that conforms to it." + ], + "image_footnote": [], + "bbox": [ + 114, + 79, + 851, + 308 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Quantitative evaluation. 3D Paintbrush is the only method geared towards local editing that natively operates on meshes. We compare to the closest mesh-based methods which perform localization (3D Highlighter [10], SATR [2]) and texturing (Latent Paint [38]). We also compare to a voxel NeRF approach for local 3D editing (Vox-E [49]).", + "bbox": [ + 75, + 378, + 468, + 470 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To evaluate our method against these baselines, we conduct a perceptual study where 39 users rate the effectiveness of each method for 9 different meshes (see Tab. 1). 3D Paintbrush consistently scores the highest for both localization and local editing, producing sharper localizations than 3D Highlighter and SATR and higher resolution textures than Latent Paint and Vox-E. Further quantitative evaluation using CLIP R-Precision and qualitative comparisons to these baselines are shown in the supplemental material.", + "bbox": [ + 75, + 472, + 470, + 608 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations. We illustrate a limitation of our method in Fig. 11. In cases where the desired local texture has strong semantic connections to additional components, these auxiliary components can sometimes be included in the localization and local texture. For example, a \"Pharaoh head-dress\" is closely associated with Egyptian necklaces and thus our method also localizes and styles this component as well. Our method also suffers from the Janus effect common to many text-to-3D methods that use 2D supervision.", + "bbox": [ + 75, + 614, + 470, + 750 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 76, + 782, + 194, + 799 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We presented 3D Paintbrush, a technique that produces highly detailed texture maps on meshes which effectively adhere to a predicted localization region. Our system is capable of hallucinating non-obvious local textures on a wide variety of meshes (such as heart-shaped sunglasses on a cow). Our localizations are detailed and accurate, en", + "bbox": [ + 75, + 810, + 470, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "abling seamless post-processing (such as compositing textures without unwanted fringe). We proposed cascaded score distillation, a technique capable of extracting supervision signals from multiple stages of a cascaded diffusion model. We observe that each stage controls different amounts of detail and global understanding. Further, varying the weights for each stage provides control over the resulting local textures. We show the effectiveness of CSD to locally texture meshes; yet, CSD is general and can be applied to other domains (such as images, videos, and alternative 3D representations). In the future, we are interested in extending localized editing to capabilities beyond texturing (such as deformations, normal maps, and more).", + "bbox": [ + 496, + 378, + 893, + 577 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Acknowledgments", + "text_level": 1, + "bbox": [ + 500, + 589, + 679, + 606 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We thank the University of Chicago for providing the AI cluster resources, services, and the professional support of the technical staff. This work was also supported in part by gifts from Snap Research, Adobe Research, Google Research, BSF grant 2022363, and NSF grants 2304481 and 2241303. Finally, we would like to thank Brian Kim, Jack Zhang, Haochen Wang, and the members of 3DL and PALS for their thorough and insightful feedback on our work.", + "bbox": [ + 496, + 612, + 893, + 726 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d0c477b64edabd0b892d6eaf9f1e8a87bda7029beca7ec998bc553b2e8659424.jpg", + "image_caption": [ + "Figure 11. In cases where the desired localization carries a strong semantic context, elements from that context can also appear in the localization and style. For example, when adding a pharaoh headdress, 3D Paintbrush also adds an Egyptian necklace since they are commonly associated with pharaohs." + ], + "image_footnote": [], + "bbox": [ + 501, + 734, + 890, + 825 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "4480", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Ahmed Abdelreheem, Abdelrahman Eldesokey, Maks Ovsjanikov, and Peter Wonka. Zero-shot 3d shape correspondence. In SIGGRAPH Asia 2023 Conference Papers, pages 1-11, 2023. 2", + "[2] Ahmed Abdelreheem, Ivan Skorokhodov, Maks Ovsjanikov, and Peter Wonka. Satr: Zero-shot semantic segmentation of 3d shapes. In ICCV, 2023. 2, 7, 8", + "[3] Stability AI. Deepfloydif, 2023. 6", + "[4] Sudarshan Babu, Richard Liu, Avery Zhou, Michael Maire, Greg Shakhnarovich, and Rana Hanocka. Hyperfields: Towards zero-shot generation of nerfs from text. arXiv preprint arXiv:2310.17075, 2023. 2", + "[5] Omer Bar-Tal, Dolev Ofri-Amar, Rafail Fridman, Yoni Kasten, and Tali Dekel. Text2live: Text-driven layered image and video editing. In European conference on computer vision, pages 707-723. Springer, 2022. 2", + "[6] Alexey Bokhovkin, Shubham Tulsiani, and Angela Dai. Mesh2tex: Generating mesh textures from image queries. arXiv preprint arXiv:2304.05868, 2023. 2", + "[7] Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18392-18402, 2023. 2", + "[8] Dave Zhenyu Chen, Yawar Siddiqui, Hsin-Ying Lee, Sergey Tulyakov, and Matthias Nießner. Text2tex: Text-driven texture synthesis via diffusion models. In ICCV, 2023. 2", + "[9] Rui Chen, Yongwei Chen, Ningxin Jiao, and Kui Jia. Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. arXiv preprint arXiv:2303.13873, 2023. 2", + "[10] Dale Decatur, Itai Lang, and Rana Hanocka. 3d highlighter: Localizing regions on 3d shapes via text descriptions. In CVPR, 2023. 2, 4, 7, 8", + "[11] Zhiwen Fan, Yifan Jiang, Peihao Wang, Xinyu Gong, Dejia Xu, and Zhangyang Wang. Unified implicit neural stylization. In European Conference on Computer Vision, pages 636-654. Springer, 2022. 2", + "[12] Rao Fu, Xiao Zhan, Yiwen Chen, Daniel Ritchie, and Srinath Sridhar. Shapecrafter: A recursive text-conditioned 3d shape generation model. Advances in Neural Information Processing Systems, 35:8882-8895, 2022. 2", + "[13] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022. 2", + "[14] Jun Gao, Tianchang Shen, Zian Wang, Wenzheng Chen, Kangxue Yin, Daiqing Li, Or Litany, Zan Gojcic, and Sanja Fidler. Get3d: A generative model of high quality 3d textured shapes learned from images. In Advances In Neural Information Processing Systems, 2022. 2", + "[15] Lin Gao, Tong Wu, Yu-Jie Yuan, Ming-Xian Lin, Yu-Kun Lai, and Hao Zhang. Tm-net: Deep generative networks for textured meshes. ACM Transactions on Graphics (TOG), 40 (6):1-15, 2021. 2" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] William Gao, Noam Aigerman, Thibault Groueix, Vova Kim, and Rana Hanocka. Textdeformer: Geometry manipulation using text guidance. In ACM SIGGRAPH 2023 Conference Proceedings, pages 1-11, 2023. 2", + "[17] Huy Ha and Shuran Song. Semantic abstraction: Openworld 3D scene understanding from 2D vision-language models. In Proceedings of the 2022 Conference on Robot Learning, 2022. 2", + "[18] Ayaan Haque, Matthew Tancik, Alexei A Efros, Aleksander Holynski, and Angjoo Kanazawa. Instruct-nerf2nerf: Editing 3d scenes with instructions. ICCV, 2023. 2", + "[19] Amir Hertz, Rana Hanocka, Raja Giryes, and Daniel Cohen-Or. Deep geometric texture synthesis. ACM Transactions on Graphics (TOG), 39(4):108-1, 2020. 2", + "[20] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022. 2", + "[21] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. The Journal of Machine Learning Research, 23(1):2249-2281, 2022. 2, 5", + "[22] Lukas Hollein, Justin Johnson, and Matthias Nießner. Stylemesh: Style transfer for indoor 3d scene reconstructions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6198-6208, 2022. 2", + "[23] Jingwei Huang, Justus Thies, Angela Dai, Abhijit Kundu, Chiyu Jiang, Leonidas J Guibas, Matthias Nießner, Thomas Funkhouser, et al. Adversarial texture optimization from rgb-d scans. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1559-1568, 2020. 2", + "[24] Yukun Huang, Jianan Wang, Yukai Shi, Xianbiao Qi, Zheng-Jun Zha, and Lei Zhang. Dreamtime: An improved optimization strategy for text-to-3d content creation. arXiv preprint arXiv:2306.12422, 2023. 3", + "[25] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 867-876, 2022. 2", + "[26] Oren Katzir, Or Patashnik, Daniel Cohen-Or, and Dani Lischinski. Noise-free score distillation, 2023. 2", + "[27] Justin Kerr, Chung Min Kim, Ken Goldberg, Angjoo Kanazawa, and Matthew Tancik. Lerf: Language embedded radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19729-19739, 2023. 2", + "[28] Sosuke Kobayashi, Eiichi Matsumoto, and Vincent Sitzmann. Decomposing nerf for editing via feature field distillation. In Advances in Neural Information Processing Systems, 2022. 2", + "[29] Han-Hung Lee and Angel X Chang. Understanding pure clip guidance for voxel grid nerf models. arXiv preprint arXiv:2209.15172, 2022. 2" + ], + "bbox": [ + 501, + 92, + 893, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "4481", + "bbox": [ + 482, + 945, + 513, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[30] Jiabao Lei, Yabin Zhang, Kui Jia, et al. Tango: Text-driven photorealistic and robust 3d stylization via lighting decomposition. Advances in Neural Information Processing Systems, 35:30923-30936, 2022. 2", + "[31] Yuhan Li, Yishun Dou, Yue Shi, Yu Lei, Xuanhong Chen, Yi Zhang, Peng Zhou, and Bingbing Ni. Focaldreamer: Text-driven 3d editing via focal-fusion assembly. arXiv preprint arXiv:2308.10608, 2023. 2", + "[32] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In CVPR, 2023. 1, 2, 3", + "[33] Hsueh-Ti Derek Liu, Vladimir G. Kim, Siddhartha Chaudhari, Noam Aigerman, and Alec Jacobson. Neural subdivision. ACM Trans. Graph., 39(4), 2020. 2", + "[34] Kunhao Liu, Fangneng Zhan, Yiwen Chen, Jiahui Zhang, Yingchen Yu, Abdulmotaleb El Saddik, Shijian Lu, and Eric P Xing. Stylerf: Zero-shot 3d style transfer of neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8338-8348, 2023. 2", + "[35] Minghua Liu, Chao Xu, Haian Jin, Linghao Chen, Zexiang Xu, Hao Su, et al. One-2-3-45: Any single image to 3d mesh in 45 seconds without per-shape optimization. arXiv preprint arXiv:2306.16928, 2023. 2", + "[36] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object, 2023. 2", + "[37] Yiwei Ma, Xiaqing Zhang, Xiaoshuai Sun, Jiayi Ji, Haowei Wang, Guannan Jiang, Weilin Zhuang, and Rongrong Ji. X-mesh: Towards fast and accurate text-driven 3d stylization via dynamic textual guidance. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2749-2760, 2023. 2", + "[38] Gal Metzer, Elad Richardson, Or Patashnik, Raja Giryes, and Daniel Cohen-Or. Latent-nerf for shape-guided generation of 3d shapes and textures. In CVPR, 2023. 1, 2, 3, 7, 8", + "[39] Oscar Michel, Roi Bar-On, Richard Liu, Sagie Benaim, and Rana Hanocka. Text2mesh: Text-driven neural stylization for meshes. In CVPR, pages 13492-13502, 2022. 2", + "[40] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 2", + "[41] Nasir Mohammad Khalid, Tianhao Xie, Eugene Belilovsky, and Tiberiu Popa. Clip-mesh: Generating textured meshes from text using pretrained image-text models. In SIGGRAPH Asia 2022 conference papers, pages 1-8, 2022. 2", + "[42] Michael Oechsle, Lars Mescheder, Michael Niemeyer, Thilo Strauss, and Andreas Geiger. Texture fields: Learning texture representations in function space. In CVPR, pages 4531-4540, 2019. 2", + "[43] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017. 6" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[44] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. In ICLR, 2023. 2, 5, 6", + "[45] Guocheng Qian, Jinjie Mai, Abdullah Hamdi, Jian Ren, Aliaksandr Siarohin, Bing Li, Hsin-Ying Lee, Ivan Skorokhodov, Peter Wonka, Sergey Tulyakov, et al. Magic123: One image to high-quality 3d object generation using both 2d and 3d diffusion priors. arXiv preprint arXiv:2306.17843, 2023. 2", + "[46] Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the spectral bias of neural networks. In International conference on machine learning, 2019. 3", + "[47] Elad Richardson, Gal Metzer, Yuval Alaluf, Raja Giryes, and Daniel Cohen-Or. Texture: Text-guided texturing of 3d shapes. In ACM TOG, 2023. 2", + "[48] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. 1, 2", + "[49] Etai Sella, Gal Fiebelman, Peter Hedman, and Hadar Averbuch-Elor. Vox-e: Text-guided voxel editing of 3d objects. In ICCV, 2023. 1, 2, 7, 8", + "[50] Yichun Shi, Peng Wang, Jianglong Ye, Mai Long, Kejie Li, and Xiao Yang. Mvdream: Multi-view diffusion for 3d generation. arXiv preprint arXiv:2308.16512, 2023. 2", + "[51] Yawar Siddiqui, Justus Thies, Fangchang Ma, Qi Shan, Matthias Nießner, and Angela Dai. Texturify: Generating textures on 3d shape surfaces. In European Conference on Computer Vision, pages 72-88. Springer, 2022. 2", + "[52] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. 2020. 4", + "[53] Christina Tsalicoglou, Fabian Manhardt, Alessio Tonioni, Michael Niemeyer, and Federico Tombari. Textmesh: Generation of realistic 3d meshes from text prompts. arXiv preprint arXiv:2304.12439, 2023. 1, 2", + "[54] Vadim Tschernezki, Iro Laina, Diane Larlus, and Andrea Vedaldi. Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In 2022 International Conference on 3D Vision (3DV), pages 443-453. IEEE, 2022. 2", + "[55] TurboSquid. Turbosquid 3d model repository, 2021. https://www.turbosquid.com/. 6", + "[56] Oliver van Kaick, Andrea Tagliasacchi, Oana Sidi, Hao Zhang, Daniel Cohen-Or, Lior Wolf, and Ghassan Hamarneh. Prior knowledge for part correspondence. Computer Graphics Forum, 30(2):553-562, 2011. 6", + "[57] Suhani Vora, Noha Radwan, Klaus Greff, Henning Meyer, Kyle Genova, Mehdi S. M. Sajjadi, Etienne Pot, Andrea Tagliasacchi, and Daniel Duckworth. Nesf: Neural semantic fields for generalizable semantic segmentation of 3d scenes, 2021. 2" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "4482", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[58] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A. Yeh, and Greg Shakhnarovich. Score jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In CVPR, 2023. 1, 2, 3, 5", + "[59] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. arXiv preprint arXiv:2305.16213, 2023. 1, 2, 3", + "[60] Xingkui Wei, Zhengqing Chen, Yanwei Fu, Zhaopeng Cui, and Yinda Zhang. Deep hybrid self-prior for full 3d mesh generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5805-5814, 2021. 2", + "[61] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 6", + "[62] Kangxue Yin, Jun Gao, Maria Shugrina, Sameh Khamis, and Sanja Fidler. 3dstylenet: Creating 3d shapes with geometric and texture style variations. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12456-12465, 2021. 2", + "[63] Xiaohui Zeng, Arash Vahdat, Francis Williams, Zan Gojcic, Or Litany, Sanja Fidler, and Karsten Kreis. Lion: Latent point diffusion models for 3d shape generation. arXiv preprint arXiv:2210.06978, 2022. 2", + "[64] Kai Zhang, Nick Kolkin, Sai Bi, Fujun Luan, Zexiang Xu, Eli Shechtman, and Noah Snavely. Arf: Artistic radiance fields. In European Conference on Computer Vision, pages 717-733. Springer, 2022. 2", + "[65] Qingnan Zhou and Alec Jacobson. Thingi10k: A dataset of 10,000 3d-printing models. arXiv preprint arXiv:1605.04797, 2016. 6", + "[66] Joseph Zhu and Peiye Zhuang. Hifa: High-fidelity text-to-3d with advanced diffusion guidance. arXiv preprint arXiv:2305.18766, 2023. 2, 3", + "[67] Jingyu Zhuang, Chen Wang, Lingjie Liu, Liang Lin, and Guanbin Li. Dreameditor: Text-driven 3d scene editing with neural fields. In SIGGRAPH Asia, 2023. 1, 2" + ], + "bbox": [ + 78, + 90, + 470, + 642 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "4483", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/669e6bfe-eb9e-4f5b-a53c-23335eda80fe_model.json b/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/669e6bfe-eb9e-4f5b-a53c-23335eda80fe_model.json new file mode 100644 index 0000000000000000000000000000000000000000..846ca5c23977f4ad8ff1a8ebe7a6eac6a9656f6a --- /dev/null +++ b/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/669e6bfe-eb9e-4f5b-a53c-23335eda80fe_model.json @@ -0,0 +1,2433 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.131, + 0.885, + 0.152 + ], + "angle": 0, + "content": "3D Paintbrush: Local Stylization of 3D Shapes with Cascaded Score Distillation" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.182, + 0.281, + 0.217 + ], + "angle": 0, + "content": "Dale Decatur University of Chicago" + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.182, + 0.495, + 0.217 + ], + "angle": 0, + "content": "Itai Lang \nUniversity of Chicago" + }, + { + "type": "text", + "bbox": [ + 0.533, + 0.182, + 0.652, + 0.217 + ], + "angle": 0, + "content": "Kfir Aberman \nSnap Research" + }, + { + "type": "text", + "bbox": [ + 0.69, + 0.182, + 0.867, + 0.217 + ], + "angle": 0, + "content": "Rana Hanocka \nUniversity of Chicago" + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.257, + 0.264, + 0.417 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.284, + 0.257, + 0.443, + 0.417 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.464, + 0.257, + 0.653, + 0.417 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.663, + 0.257, + 0.871, + 0.417 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.422, + 0.893, + 0.449 + ], + "angle": 0, + "content": "Figure 1. Utilizing only a text prompt as guidance, 3D Paintbrush seamlessly generates local stylized textures on bare meshes. Our approach produces a localization map (yellow regions) and a highly detailed texture map which conforms to it." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.463, + 0.312, + 0.478 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.494, + 0.474, + 0.797 + ], + "angle": 0, + "content": "We present 3D Paintbrush, a technique for automatically texturing local semantic regions on meshes via text descriptions. Our method is designed to operate directly on meshes, producing texture maps which seamlessly integrate into standard graphics pipelines. We opt to simultaneously produce a localization map (to specify the edit region) and a texture map which conforms to it. This approach improves the quality of both the localization and the stylization. To enhance the details and resolution of the textured area, we leverage multiple stages of a cascaded diffusion model to supervise our local editing technique with generative priors learned from images at different resolutions. Our technique, referred to as Cascaded Score Distillation (CSD), simultaneously distills scores at multiple resolutions in a cascaded fashion, enabling control over both the granularity and global understanding of the supervision. We demonstrate the effectiveness of 3D Paintbrush to locally texture different semantic regions on a variety of shapes. Project page: https://threedle.github.io/3d-paintbrush" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.815, + 0.208, + 0.83 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.903 + ], + "angle": 0, + "content": "The ability to edit existing high-quality 3D assets is a fundamental capability in 3D modeling workflows. Recent works have shown exceptional results for text-driven 3D data creation [32, 38, 48, 53, 58, 59], but focus on making global" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.464, + 0.892, + 0.6 + ], + "angle": 0, + "content": " edits. While some progress has been made on local editing using an explicit localization of the edit region [49, 67], these regions are often coarse and lack fine-grained detail. Highly-detailed and accurate localizations are important for constraining the edits to be within a specific region, preventing changes unrelated to the target edit. Furthermore, while meshes with texture maps are the de facto standard in graphics pipelines, existing local editing work does not natively operate on meshes nor produce texture maps for them." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.607, + 0.893, + 0.834 + ], + "angle": 0, + "content": "In this work we develop 3D Paintbrush, a method for automatically texturing local semantic regions on meshes via text descriptions. Our method is designed to operate directly on meshes, producing texture maps which seamlessly integrate into standard graphics pipelines. 3D Paintbrush is controlled via intuitive, free-form text input, allowing users to describe their edits using open vocabulary on a wide range of meshes. Specifically, given an input mesh and a text prompt, 3D Paintbrush produces the corresponding high-quality texture map and a localization region to confine it. To enhance the details and resolution of the locally textured area, we introduce Cascaded Score Distillation (CSD) which leverages multiple stages of a cascaded diffusion model. Our explicit localization masks can be used to layer our edit texture onto existing textures." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We opt to represent both our localization map and texture map as neural fields encoded by multi-layer perceptions. Our method synthesizes both a fine-grained localization mask and high-quality texture in tandem. Simultane" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4473" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.084, + 0.241, + 0.254 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.095, + 0.255, + 0.22, + 0.269 + ], + "angle": 0, + "content": "Colorful polo shirt" + }, + { + "type": "image", + "bbox": [ + 0.252, + 0.086, + 0.412, + 0.254 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.266, + 0.255, + 0.392, + 0.269 + ], + "angle": 0, + "content": "Superman emblem" + }, + { + "type": "image", + "bbox": [ + 0.422, + 0.086, + 0.583, + 0.254 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.452, + 0.255, + 0.545, + 0.269 + ], + "angle": 0, + "content": "Tie-dye apron" + }, + { + "type": "image", + "bbox": [ + 0.593, + 0.086, + 0.755, + 0.254 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.614, + 0.255, + 0.731, + 0.269 + ], + "angle": 0, + "content": "Muay Thai shorts" + }, + { + "type": "image", + "bbox": [ + 0.777, + 0.094, + 0.892, + 0.254 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.799, + 0.255, + 0.873, + 0.269 + ], + "angle": 0, + "content": "Composite" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.273, + 0.893, + 0.302 + ], + "angle": 0, + "content": "Figure 2. Precise composition of multiple local textures. 3D Paintbrush produces highly-detailed textures that effectively adhere to the predicted localizations. This enables seamlessly compositing local textures without unwanted fringes (right)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.328, + 0.468, + 0.403 + ], + "angle": 0, + "content": "ously generating the localization and texture maps improves the quality of each. The texture map drives the localization to become more detailed and intricate. The localization explicitly masks the texture, ensuring a coherent local style which respects the localization boundary." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.404, + 0.468, + 0.631 + ], + "angle": 0, + "content": "Our local stylization operates in small regions, necessitating higher resolution supervision compared to global generative techniques. Existing approaches leverage pretrained text-to-image diffusion models with Score Distillation Sampling (SDS) to supervise text-driven optimizations [31, 58]. Text-to-image diffusion models often contain multiple cascaded stages in order to achieve high resolution [21], but standard SDS only utilizes the first low-resolution stage of the cascaded model. Our technique, referred to as Cascaded Score Distillation (CSD), simultaneously distills scores at multiple resolutions in a cascaded fashion, enabling control over both the granularity and global understanding of the supervision. Since cascaded stages are trained entirely independently, our insight is to formulate a distillation loss that incorporates all stages in tandem." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.632, + 0.468, + 0.798 + ], + "angle": 0, + "content": "In summary, our method enables local text-driven stylization of meshes. By explicitly learning a localization in tandem with the texture, we ensure that our edits are bounded by the localized region. Using our CSD, which leverages all stages of the diffusion model, we can control the granularity and global understanding of the supervision achieving higher resolution textures and localizations than standard SDS. We demonstrate that 3D Paintbrush yields diverse local texturing on a variety of shapes and semantic regions and outperforms baselines both qualitatively and quantitatively." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.815, + 0.219, + 0.83 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.901 + ], + "angle": 0, + "content": "A large body of work has studied stylization and analysis of 3D content. Existing work uses neural networks and optimization [6, 15, 19, 22, 23, 30, 33, 37-39, 41, 42, 51, 60, 62] for mesh stylization. Other works use a neural radiance field" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.327, + 0.892, + 0.465 + ], + "angle": 0, + "content": "NeRF [40] for stylization [11, 34, 64]. Yet, these works focus on stylization rather than localization. Large 2D models have been used for analytical tasks in 3D such as localization and segmentation [1, 2, 10, 17, 27, 28, 54, 57, 67], however, none of these works produce textures. Furthermore, only [1, 2, 10, 67] aim to produce a tight localization on meshes and we find that these approaches still produce relatively smooth localization regions that cannot capture the high frequency details needed for sharp local edits." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.47, + 0.892, + 0.606 + ], + "angle": 0, + "content": "Text-driven generation and editing. Existing works have leveraged pre-trained 2D models to generate 3D representations that adhere to a text prompt [4, 12, 16, 25, 29, 39, 41, 63]. Many recent methods [9, 26, 32, 44, 50, 53, 53, 58, 66] use score distillation [44, 58] from 2D models to generate both geometry and styles from scratch, while other works optimize the texture of an existing, fixed geometry [8, 38, 39, 47]. Other work aims to generate 3D representations from images [14, 35, 36, 45]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.607, + 0.892, + 0.73 + ], + "angle": 0, + "content": "Existing text-to-3D generative methods [38, 44, 58, 59] can be used to perform global edits [18, 48, 67]. However, since these approaches do not have explicit edit localizations, they struggle to perform highly specific local edits without changing other components of the 3D representation's appearance. Different from our objective, these works aim to generate or globally manipulate existing 3D representations, while our work focuses on local editing." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Text-driven local editing. Many approaches can perform global 3D edits and progress has been made on local editing in images and videos [5, 7, 13, 20]. Yet, few works have addressed the task of precise, local editing for 3D representations. Local editing is challenging since, in addition to synthesizing the edit, methods need to localize the edit region. FocalDreamer [31] obtains precise user defined edit regions at the cost of requiring additional, tedious user input compared to strictly text-driven approaches. Vox-E [49] (operating on voxel representations) and DreamEditor [67] (operating on NeRFs) both use attention maps to localize an edit" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4474" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.087, + 0.895, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.274, + 0.895, + 0.332 + ], + "angle": 0, + "content": "Figure 3. Overview of 3D Paintbrush. Each point on the surface of the mesh is passed into three different branches to produce a localization probability, texture map, and background map. We texture three different variants of the same mesh with the localization, texture, and background maps and render them from the same viewpoint. Each image along with the corresponding text condition is used to compute the CSD loss." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.357, + 0.47, + 0.465 + ], + "angle": 0, + "content": "region and thus the localization has no visual meaning in isolation. Our approach imposes a visual loss on our localizations in order to enforce sharp boundaries that are tightly coupled with our texture edits. Additionally, since existing purely text-driven local editing approaches only work on voxels and NeRFs, our approach is the first to enable text-driven local editing on meshes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.467, + 0.471, + 0.71 + ], + "angle": 0, + "content": "High resolution text-to-3D. Several works have explored techniques to increase the resolution for text-to-3D. Many recent works apply SDS to latent diffusion models [32, 38, 58, 59, 66]. Recent works backpropagate the gradient through the encoder to get gradients in higher resolution \\(512 \\times 512\\) RGB space [32, 59, 66]. Other works use timestep annealing to give less noisy supervision towards the end of the optimization, thus increasing the detail of the generations [24, 59]. HiFA [66] proposes denoising over multiple successive timesteps each iteration to provide better gradients and achieve high fidelity appearance. While all of these approaches have shown impressive improvements to the resolution of SDS supervision, SDS only utilizes the base stage (not super-resolution stages). Thus, these proposed improvements are orthogonal to ours and can be incorporated at the super-resolution stages using CSD as well." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.724, + 0.169, + 0.74 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.471, + 0.901 + ], + "angle": 0, + "content": "We show an overview of our method in Fig. 3. The inputs to our system are a mesh \\( M \\) and a text description \\( y \\) of the desired local edit. Our system produces a local texture on the mesh \\( M \\) that adheres to the text prompt \\( y \\). To supervise our optimization, we use score distillation with a pretrained text-to-image diffusion model. However, local editing requires higher detail than standard generation due to the small size and granularity of the desired edits. In order to further improve the detail of our localization and texture, we introduce Cascaded Score Distillation (CSD), a" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.357, + 0.892, + 0.419 + ], + "angle": 0, + "content": "technique that distills scores at multiple resolutions of the 2D cascaded model. This approach enables leveraging all stages of a cascaded model and provides control over both the detail and global understanding of the supervision." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.425, + 0.719, + 0.442 + ], + "angle": 0, + "content": "3.1. Local Neural Texturing" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.448, + 0.892, + 0.689 + ], + "angle": 0, + "content": "3D Paintbrush represents local textures as neural texture maps over the surface of a mesh \\(M\\) defined by vertices \\(V \\in \\mathbb{R}^{n \\times 3}\\) and faces \\(F \\in \\{1, \\dots, n\\}^{m \\times 3}\\). Extracting an explicit texture map from our neural textures is trivial, making our representation compatible with existing graphics pipelines. Furthermore, using texture maps enables producing high resolution textures (i.e., sub-triangle values) without a computationally expensive high resolution mesh. A straight-forward approach of directly optimizing texture values results in texture maps with artifacts and noise (see supplemental material). To mitigate this, we leverage the smoothness of neural networks [46]. However, a straight-forward application of an MLP to a 2D texture map \\(((u, v) \\to (r, g, b))\\) is inherently invalid at the texture seams (e.g., erroneous interpolations at boundaries), which may lead to texture discontinuities on the rendered mesh." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.893, + 0.901 + ], + "angle": 0, + "content": "We instead formulate our MLPs to operate on 3D coordinates leading to predictions in 3D that are inherently smooth and without any seam discontinuities. To do so, we invert the UV mapping \\(\\psi(x,y,z) = (u,v)\\) to get a map \\(\\psi^{-1}(u,v) = (x,y,z)\\) from 2D texels to 3D coordinates on the surface of the mesh. We optimize our MLPs with the 3D coordinates obtained from the 2D texel centers. We employ two primary networks, one for localization and one for texturing. Our neural localization MLP is a function \\(\\mathcal{F}_{\\theta}\\) that maps a 3D coordinate \\(\\mathbf{x} = (x,y,z)\\) to a probability \\(p\\) (which we map back to a 2D localization map). Similarly, our neural texture MLP is a function \\(\\mathcal{F}_{\\phi}\\) that takes in a 3D coordinate and outputs an RGB value (which we map back to a 2D texture image). Our architecture first passes the 3D" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4475" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.089, + 0.852, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.298, + 0.893, + 0.327 + ], + "angle": 0, + "content": "Figure 4. 3D Paintbrush produces highly detailed textures and localizations for a diverse range of meshes and prompts. Our method synthesizes meaningful local edits on shapes, demonstrating both global and local part-level understanding." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.352, + 0.473, + 0.518 + ], + "angle": 0, + "content": "coordinates through positional encoding [52] before going through a 6-layer MLP. This formulation of using MLPs defined on the 3D surface leads to a neural texture which produces smoothly varying outputs in 3D, even though our 2D texture maps have discontinuities at the texture seams. The smoothness provided by the MLPs reduces artifacts, produces less noisy textures, and provides super resolution capabilities. Although we optimize our MLPs with 3D coordinates mapped from 2D texel centers, during inference, we may query the MLP for any value (i.e. sub-texels that enable super resolution texture maps even across seams)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.525, + 0.416, + 0.54 + ], + "angle": 0, + "content": "3.2. Visual Guidance for Localized Textures" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.549, + 0.47, + 0.625 + ], + "angle": 0, + "content": "We guide our optimization using three distinct losses that encourage both the localization and texture towards visually desirable results. Each loss is visualized as a branch in Fig. 3 - top branch: localization loss, middle branch: local texture map loss, bottom branch: background loss." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.628, + 0.47, + 0.852 + ], + "angle": 0, + "content": "Local texture map loss. First, we obtain our localization map \\( L_{map} \\in [0,1]^{H \\times W} \\) from the neural localization MLP \\( L_{map} = \\psi(\\mathcal{F}_{\\theta}(\\mathbf{x})) \\) and the texture map \\( T_{map} \\in [0,1]^{H \\times W \\times 3} \\) from the neural texture MLP \\( T_{map} = \\psi(\\mathcal{F}_{\\phi}(\\mathbf{x})) \\). We use the localization \\( L_{map} \\) to mask the texture \\( T_{map} \\) to get a local texture map \\( T_{map}' \\) which only contains textures inside the localization region. We apply the masked texture \\( T_{map}' \\) to our mesh \\( M \\) to get a locally-textured mesh \\( M_t \\) and construct a local-texture text prompt \\( y_t \\) from the input text \\( y \\) (middle branch Fig. 3). We then supervise our optimization using a text-conditioned visual loss (cascaded score distillation, see Sec. 3.4) on \\( M_t \\) and \\( y_t \\). By applying a visual loss to the localization-masked texture, we get informative and meaningful gradients for both our texture MLP and our localization MLP." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Localization loss. Using only the texture loss allows for trivial solutions where the mask contains a region that includes, but is much larger than, the desired localization re" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.352, + 0.892, + 0.518 + ], + "angle": 0, + "content": "gion. To encourage the localization region to be meaningful, we employ a visual loss on the localization region in isolation (similar to 3D Highlighter [10]). Specifically, we blend a (yellow) color onto the mesh according to the localization map to get a localization-colored mesh \\( M_{l} \\) (top branch Fig. 3). From the text input \\( y \\), we derive a target localization prompt \\( y_{l} \\) describing the localized region in the format used in 3D Highlighter [10]. We then use \\( M_{l} \\) and \\( y_{l} \\) as input to the text-conditioned visual loss. Using this loss significantly improves the detail and quality of the localization (see supplemental material)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.523, + 0.893, + 0.869 + ], + "angle": 0, + "content": "Background loss. Using only the top two branches in Fig. 3 leads to broader localizations that incorporate superfluous elements characteristic of the input 3D model (i.e. a bill on a duck), in addition to the desired localization region (see supplemental material). To mitigate this, we learn a background texture \\( B_{map} \\in [0,1]^{H \\times W \\times 3} \\) that intentionally contains these characteristic elements of the input 3D shape in the inverse of the localization region \\( 1 - L_{map} \\) (the area outside the localization region). Specifically, we blend both the background texture \\( B_{map} \\) (using \\( 1 - L_{map} \\)) and a yellow color (using \\( L_{map} \\)) to get a composited texture \\( B_{map}' = L_{map}(\\mathrm{YELLOW}) + (1 - L_{map})B_{map} \\) (bottom branch in Fig. 3). We apply the composited texture \\( B_{map}' \\) to the mesh to get \\( M_b \\) and then supervise the background MLP using a visual loss conditioned on both \\( M_b \\) and a target text \\( y_b \\) (derived from \\( y \\)). The target text \\( y_b \\) describes the generic object class (i.e. 'cow' in Fig. 3) with a (yellow) colored localization region. See supplemental material for more details. The third loss directly encourages incorporating the superfluous elements in the background texture which discourages the localization region from incorporating such undesired elements (since \\( L_{map} \\) and \\( 1 - L_{map} \\) are inverse masks)." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Key to our method is the simultaneous optimization of the localization map (that specifies the edit region) and the" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4476" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.08, + 0.086, + 0.184, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.091, + 0.167, + 0.183, + 0.179 + ], + "angle": 0, + "content": "Simultaneous" + }, + { + "type": "image", + "bbox": [ + 0.198, + 0.087, + 0.301, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.218, + 0.167, + 0.275, + 0.179 + ], + "angle": 0, + "content": "In series" + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.087, + 0.47, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.35, + 0.167, + 0.433, + 0.181 + ], + "angle": 0, + "content": "Independent" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.185, + 0.47, + 0.324 + ], + "angle": 0, + "content": "Figure 5. Impact of simultaneous optimization. Simultaneously optimizing the localization and texture (left) results in higher-detailed textures which effectively conform to the predicted localization. If we first optimize the localization, then optimize the texture within the localization region (middle), both the localization and texture are less detailed. Independent (right): if we optimize the localization independently (independent: left) and the texture independently (independent: middle), the texture does not align with the localization and thus the masked texture contains fringe artifacts (independent: right)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.351, + 0.469, + 0.442 + ], + "angle": 0, + "content": "texture map that conforms to it. This approach improves the quality of both the localization and the stylization. The texture map drives the localization to become more detailed and intricate, while the localization explicitly masks the texture, ensuring a coherent local style which respects the localization boundary (see Fig. 5)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.453, + 0.433, + 0.468 + ], + "angle": 0, + "content": "3.3. Score Distillation and Cascaded Diffusion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.476, + 0.469, + 0.597 + ], + "angle": 0, + "content": "Score Distillation. To guide our local stylization, we leverage powerful pretrained text-to-image diffusion models. Existing approaches use these models in conjunction with Score Distillation Sampling (SDS) to supervise text-driven optimizations [44, 58]. For each iteration of an optimization of an image \\( x \\) that we want to supervise with diffusion model \\( \\phi \\) and text prompt \\( y \\), SDS [44] proposes the following gradient:" + }, + { + "type": "equation", + "bbox": [ + 0.131, + 0.611, + 0.469, + 0.627 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {x} \\mathcal {L} _ {S D S} (\\phi , x, y) = w (t) \\left(\\epsilon_ {\\phi} \\left(z _ {t}, t, y\\right) - \\epsilon\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.639, + 0.469, + 0.791 + ], + "angle": 0, + "content": "where timestep \\(t \\sim \\mathcal{U}(\\{1, \\dots, T\\})\\) is sampled uniformly and noise \\(\\epsilon \\sim \\mathcal{N}(\\mathbf{0}, \\mathbf{I})\\) is Gaussian. The noisy image \\(z_{t}\\) is obtained by applying a timestep-dependent scaling of \\(\\epsilon\\) to the image \\(x\\). The weight \\(w(t)\\) is a timestep-dependent weighting function and \\(\\epsilon_{\\phi}(z_{t}, t, y)\\) is the noise predicted by the diffusion model conditioned on \\(z_{t}\\), \\(t\\), and \\(y\\). Note that Eq. (1) omits the U-Net Jacobian term (not needed in practice [44]). This objective is similar to the objective used in diffusion model training, however, instead of optimizing the weights of the model, the gradient is applied to the image \\(x\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.469, + 0.902 + ], + "angle": 0, + "content": " Cascaded Diffusion. Text-to-image diffusion models often contain multiple cascaded stages at different resolutions in order to achieve high resolution outputs [21]. These cascaded diffusion models consist of a base stage \\(\\phi^1\\) (stage 1) and some number of super-resolution stages \\(\\phi^{i > 1}\\) (stages \\(2 - N\\)). The base stage is identical to a standard diffusion model, predicting noise \\(\\epsilon_{\\phi^1}(z_t^1,t,y)\\) conditioned on" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.091, + 0.892, + 0.258 + ], + "angle": 0, + "content": "noisy image \\( z_{t}^{1} \\), timestep \\( t \\), and text prompt \\( y \\). However, the super-resolution stages are conditioned on two differently-noised images: one at the current resolution (\\( z_{t}^{i} \\) with timestep \\( t \\) and noise \\( \\epsilon^{i} \\)) and one at the lower resolution (\\( z_{s}^{i-1} \\) with timestep \\( s \\) and noise \\( \\epsilon^{i-1} \\)). The predicted noise for the super-resolution stage is given by \\( \\epsilon_{\\phi^{i}}(z_{t}^{i}, t, z_{s}^{i-1}, s, y) \\). During inference, the lower resolution input image is obtained by adding noise to the output of the prior stage. However in training, both the high and low resolution images are obtained by sampling a single image from the training dataset and rescaling it to different resolutions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.258, + 0.892, + 0.44 + ], + "angle": 0, + "content": "Standard SDS [44] only utilizes the first, low-resolution base stage, thus neglecting the full potential of the cascaded model. It is not immediately obvious how to formulate a score distillation technique for all stages of a cascaded diffusion model since super-resolution stages take multiple resolution inputs and, at inference, they require a fully denoised output from the prior stage [21]. We take inspiration from SDS and use the perspective of diffusion training as opposed to inference, and extend it to the training of cascaded diffusion models. To our knowledge, we are the first to consider score distillation using the cascaded super-resolution stages." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.45, + 0.747, + 0.465 + ], + "angle": 0, + "content": "3.4. Cascaded Score Distillation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.473, + 0.892, + 0.655 + ], + "angle": 0, + "content": "CSD overview. Our technique, referred to as Cascaded Score Distillation (CSD), simultaneously distills scores at multiple resolutions in a cascaded fashion (illustrated in Fig. 6). Since the stages of a cascaded diffusion model \\(\\phi\\) are trained entirely independently of one another, our insight is to formulate a distillation loss that incorporates gradients from all stages \\((\\phi^1,\\dots,\\phi^N)\\) simultaneously. We observe that different stages of the cascaded model provide different levels of granularity and global understanding (Fig. 7). Controlling the influence of each stage provides control over the details and the corresponding localization of the supervision (Fig. 8)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.901 + ], + "angle": 0, + "content": "CSD Formalization. Consider a mesh \\(M_{\\theta}\\) with a neural texture parameterized by an MLP \\(\\theta\\) (This MLP could be either \\(\\mathcal{F}_{\\theta}\\), \\(\\mathcal{F}_{\\phi}\\), and \\(\\mathcal{F}_{\\psi}\\) in Sec. 3.2). We first render \\(M_{\\theta}\\) at \\(N\\) different resolutions using a differentiable renderer \\(g\\) to get multiple images \\(g(M_{\\theta}) = \\mathbf{x} = \\{x^{1}\\dots x^{N}\\}\\) such that \\(x^{i}\\) is the same resolution as stage \\(\\phi^i\\). For the base stage \\(\\phi^1\\), we perform standard SDS using Eq. (1) on \\(x^{1}\\) and prompt \\(y\\) to get a gradient \\(\\nabla_{x^1}\\). For all stages \\(\\phi^i\\) for \\(i > 1\\), we sample two timesteps \\(t,s\\sim \\mathcal{U}(\\{1,\\ldots ,T\\})\\), noise \\(\\epsilon^i\\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})\\) at the resolution of stage \\(\\phi^i\\), and noise \\(\\epsilon^{i - 1}\\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})\\) at the resolution of stage \\(\\phi^{i - 1}\\). Using timestep-dependent schedule coefficients \\(\\alpha\\) and \\(\\sigma\\), we compute a noisy image \\(z_{t}^{i} = \\alpha_{t}x^{i} + \\sigma_{t}\\epsilon^{i}\\) by applying a timestep-dependent scaling of \\(\\epsilon^i\\) to the image \\(x^i\\). Similarly, we compute \\(z_{s}^{i - 1} = \\alpha_{s}x^{i - 1} + \\sigma_{s}\\epsilon^{i - 1}\\) by applying a timestep-dependent scaling of \\(\\epsilon^{i - 1}\\) to the image \\(x^{i - 1}\\). We then use" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4477" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.1, + 0.087, + 0.45, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.285, + 0.47, + 0.368 + ], + "angle": 0, + "content": "Figure 6. Cascaded Score Distillation (CSD). We simultaneously distill scores across multiple stages of a cascaded diffusion model in order to leverage both the global awareness of the first stage and the higher level of detail contained in later stages. The difference between the predicted noise and sampled noise is the image gradient for each stage." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.392, + 0.468, + 0.468 + ], + "angle": 0, + "content": "\\(\\phi^i\\) to predict noise \\(\\epsilon_{\\phi^i}(z_t^i,t,z_s^{i - 1},s,y)\\) conditioned on the noisy images, timesteps, and text prompt. Our gradient \\(\\nabla_{x^i}\\) for stage \\(\\phi^i\\) for \\(i > 1\\) is the difference between the predicted noise and the (higher-resolution) sampled noise \\(\\epsilon^i\\) weighted by the timestep-dependent function \\(w(t)\\):" + }, + { + "type": "equation", + "bbox": [ + 0.151, + 0.476, + 0.469, + 0.514 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\nabla_ {x ^ {i}} \\mathcal {L} _ {C S D ^ {i}} \\left(\\phi^ {i}, x ^ {i}, x ^ {i - 1}, y\\right) = \\\\ w (t) \\left(\\epsilon_ {\\phi^ {i}} \\left(z _ {t} ^ {i}, t, z _ {s} ^ {i - 1}, s, y\\right) - \\epsilon^ {i}\\right). \\tag {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.522, + 0.469, + 0.598 + ], + "angle": 0, + "content": "With all gradients \\(\\nabla_{x^1},\\ldots ,\\nabla_{x^N}\\) computed, we weight each gradient \\(\\nabla_{x^i}\\) with a user defined \\(\\lambda^i\\) to provide control over the impact of the supervision from each stage of the cascaded model. Thus our full gradient with respect to any given neural texture \\(\\theta\\) can be described by:" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.606, + 0.469, + 0.7 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\nabla_ {\\theta} \\mathcal {L} _ {C S D} (\\phi , \\mathbf {x} = g (\\theta), y) = \\\\ \\lambda^ {1} \\nabla_ {x ^ {1}} \\mathcal {L} _ {S D S} (\\phi^ {1}, x ^ {1}, y) \\frac {\\partial x ^ {1}}{\\partial \\theta} \\\\ + \\sum_ {i = 2} ^ {N} \\lambda^ {i} \\nabla_ {x ^ {i}} \\mathcal {L} _ {C S D ^ {i}} \\left(\\phi^ {i}, x ^ {i}, x ^ {i - 1}, y\\right) \\frac {\\partial x ^ {i}}{\\partial \\theta}. \\tag {3} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.707, + 0.469, + 0.833 + ], + "angle": 0, + "content": "Note that just as in SDS [44], we can avoid computing the U-Net Jacobian term \\(\\frac{\\partial\\epsilon_{\\phi}(z_t^i,t,z_s^{i - 1},s,y)}{z_t^i}\\) (not shown in Eq. (3)) since each stage is entirely independent and our gradient is only with respect to the high-resolution image \\(x^i\\). Thus, we directly apply \\(\\lambda^i\\nabla_{x^i}\\) to the image \\(x^i\\) without having to compute the costly backpropagation through the U-Net. Using the gradient \\(\\nabla_{\\theta}\\mathcal{L}_{CSD}(\\phi ,\\mathbf{x} = g(\\theta),y)\\), we update the weights of our MLP \\(\\theta\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.846, + 0.208, + 0.862 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "We demonstrate the capabilities of 3D Paintbrush on a wide variety of meshes (from different sources [55, 56, 61, 65])" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.088, + 0.626, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.517, + 0.175, + 0.602, + 0.189 + ], + "angle": 0, + "content": "Only stage 1" + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.089, + 0.759, + 0.172 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.656, + 0.175, + 0.742, + 0.189 + ], + "angle": 0, + "content": "Only stage 2" + }, + { + "type": "image", + "bbox": [ + 0.768, + 0.089, + 0.825, + 0.172 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.825, + 0.09, + 0.893, + 0.141 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.819, + 0.175, + 0.853, + 0.188 + ], + "angle": 0, + "content": "CSD" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.191, + 0.892, + 0.288 + ], + "angle": 0, + "content": "Figure 7. Impact of cascaded stages. Different stages of the cascaded model provide different levels of granularity and global understanding. Using only the (low resolution) stage 1 model gives a low-resolution result in the correct location. While the (high resolution) stage 2 model gives a high-resolution result, it is placed in the incorrect location. Our CSD simultaneously uses stage 1 and 2, resulting in a highly-detailed texture in the appropriate location." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.314, + 0.892, + 0.466 + ], + "angle": 0, + "content": "and prompts. We highlight key properties of our method such as localization precision and edit specificity. We then demonstrate the importance and capabilities of our CSD loss including its high resolution supervision and intuitive controls. Finally, we evaluate our system against other localization and editing baselines and ablate the key components of our method. In our experiments, we use DeepFloyd IF [3] for our cascaded model. Our unoptimized PyTorch [43] implementation takes 4 hours on a standard A40 GPU, typically achieving satisfactory results within 2 hours." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.474, + 0.751, + 0.49 + ], + "angle": 0, + "content": "4.1. Properties of 3D Paintbrush" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.501, + 0.892, + 0.681 + ], + "angle": 0, + "content": "3D Paintbrush generality. 3D Paintbrush is capable of producing highly detailed localizations and textures on a diverse collection of meshes and prompts (Fig. 4). Our method is not restricted to any category of meshes and we show results on organic and manufactured shapes. Furthermore, our local textures can be specified with open vocabulary text descriptions and are not limited to any predefined categories or constraints. This includes \"out-of-domain\" local textures such as the rainbow shinguards on a giraffe which are not naturally seen in the context of these objects, yet are precisely placed in semantically meaningful locations with highly detailed textures." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.686, + 0.892, + 0.852 + ], + "angle": 0, + "content": "3D Paintbrush precision and composition. 3D Paintbrush produces precise localizations and highly-detailed textures that effectively adhere to these predicted localizations (see Fig. 2). The tight coupling between the localization and texture (see the gold chain necklace in Fig. 1) enables seamless composition of multiple local textures simultaneously on the same mesh without any layering artifacts. For example, the sharp localization boundary of the \"Tie-dye apron\" (in Fig. 2) allows us to composite this local texture on top of other textures without obstructing these textures in regions outside of the apron's boundary." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "3D Paintbrush specificity and effectiveness. 3D Paintbrush produces accurate and high resolution local edits that closely adhere to the text-specification (see Fig. 10). Our" + }, + { + "type": "list", + "bbox": [ + 0.498, + 0.501, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4478" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.088, + 0.178, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.089, + 0.245, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.254, + 0.089, + 0.312, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.089, + 0.38, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.389, + 0.089, + 0.447, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.456, + 0.089, + 0.515, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.524, + 0.089, + 0.581, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.59, + 0.089, + 0.648, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.659, + 0.089, + 0.717, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.089, + 0.783, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.794, + 0.089, + 0.852, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.185, + 0.894, + 0.242 + ], + "angle": 0, + "content": "Figure 8. Granular control with CSD. Varying the weight between stage 1 and stage 2 results in control over the details and corresponding localization. Only using stage 1 (leftmost) is rather coarse; only using stage 2 (rightmost) is highly detailed with an incorrect localization. Increasing the stage 2 weight (moving left to right) progressively increases the detail and granularity of the supervision, enabling smooth and meaningful interpolation between stage 1 and 2." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.268, + 0.47, + 0.388 + ], + "angle": 0, + "content": "method's fine-grained results contain intricate details (i.e. the badge on \"Barcelona jersey\") and reflect the subtle differences in the text prompts (i.e. the \"cape\" on the dog is more tapered than the boxer \"poncho\"). This specificity allows us to produce many diverse and distinct local styles. We show multiple local edits on the same mesh for multiple different meshes, demonstrating the effectiveness of our method on diverse prompts and meshes." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.397, + 0.437, + 0.414 + ], + "angle": 0, + "content": "4.2. Importance of Cascaded Score Distillation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.424, + 0.471, + 0.576 + ], + "angle": 0, + "content": "Impact and granular control of CSD. Our cascaded score distillation (CSD) simultaneously distills scores at multiple resolutions in a cascaded fashion. We observe that different stages of the cascaded diffusion model give different levels of granularity and global understanding (Fig. 7). Using only the (low resolution) stage 1 model is equivalent to SDS. Though SDS produces an accurate localization and coherent texture, the result is low-resolution (see Fig. 9). Conversely, using only the (high resolution) stage 2 model gives a high-resolution result, but often fails to properly lo" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.268, + 0.892, + 0.433 + ], + "angle": 0, + "content": "calize the texture leading to undesirable results. Our CSD simultaneously combines the supervision from stages 1 and 2, resulting in a highly-detailed texture in the appropriate location. Increasing the stage 2 weight (moving left to right in Fig. 8) progressively increases the detail and granularity of the supervision, demonstrating smooth and intuitive interpolation between stage 1 and 2. In our experiments, we use a fixed weighting scheme, but this result demonstrates that our method works for a broad range of weights. Quantitative evidence supporting the importance of the CSD loss can bee seen in Tab. 1." + }, + { + "type": "table", + "bbox": [ + 0.507, + 0.443, + 0.888, + 0.508 + ], + "angle": 0, + "content": "
LocalizationSATR3D HighlighterOurs
Average Score ↑1.892.034.80
Local EditsLatent PaintVox-EOurs (SDS)Ours
Average Score ↑2.142.154.064.88
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.518, + 0.892, + 0.575 + ], + "angle": 0, + "content": "Table 1. Quantitative evaluation. We conduct a perceptual study where users evaluate our localizations and local edits compared to baseline methods (3D Highlighter [10], SATR [2], Latent Paint [38], Vox-E [49], and our method with standard SDS loss)." + }, + { + "type": "image", + "bbox": [ + 0.098, + 0.593, + 0.451, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.828, + 0.47, + 0.897 + ], + "angle": 0, + "content": "Figure 9. Importance of super-resolution stage in CSD. Using stage 1 only (equivalent to SDS) lacks fine-grained details. Incorporating the second super-resolution cascaded stage from our CSD increases the resolution and detail. Input text prompts (from left to right): Colorful crochet shell, Cactus base, Tiger stripe shirt." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.603, + 0.62, + 0.617 + ], + "angle": 0, + "content": "4.3. Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.629, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Simultaneous localization and texture. We demonstrate the importance of simultaneously optimizing the localization region and texture in tandem in Fig. 5. We observe that simultaneous optimization results in highly detailed textures which effectively conform to the predicted localization regions (Fig. 5, left). Furthermore, the resulting localization region is sharp and intricate. Alternatively, we optimize the localization region first and use the predicted localization to learn a texture which is confined to the (precomputed) localization region (Fig. 5, middle). In this case, the texture is less detailed, and the localization region is less intricate. Finally, we can learn the texture and localization region independently (Fig. 5, independent). This results in a texture (Fig. 5 independent, middle) that is completely decoupled from the localization region (Fig. 5 independent, left). When masking the texture with the localization region, we observe a misaligned texture with fringe artifacts (Fig. 5 independent, right)." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4479" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.116, + 0.08, + 0.852, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.131, + 0.31, + 0.846, + 0.325 + ], + "angle": 0, + "content": "Beautiful roses Colorful crochet base Rainbow headband Camo poncho Superhero cape Tiger stripe hat" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.325, + 0.894, + 0.355 + ], + "angle": 0, + "content": "Figure 10. 3D Paintbrush is capable of producing a variety of local textures on the same mesh. Each result contains an accurate localization map (to specify the edit region) and a texture map that conforms to it." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.38, + 0.47, + 0.472 + ], + "angle": 0, + "content": "Quantitative evaluation. 3D Paintbrush is the only method geared towards local editing that natively operates on meshes. We compare to the closest mesh-based methods which perform localization (3D Highlighter [10], SATR [2]) and texturing (Latent Paint [38]). We also compare to a voxel NeRF approach for local 3D editing (Vox-E [49])." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.473, + 0.471, + 0.609 + ], + "angle": 0, + "content": "To evaluate our method against these baselines, we conduct a perceptual study where 39 users rate the effectiveness of each method for 9 different meshes (see Tab. 1). 3D Paintbrush consistently scores the highest for both localization and local editing, producing sharper localizations than 3D Highlighter and SATR and higher resolution textures than Latent Paint and Vox-E. Further quantitative evaluation using CLIP R-Precision and qualitative comparisons to these baselines are shown in the supplemental material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.615, + 0.471, + 0.751 + ], + "angle": 0, + "content": "Limitations. We illustrate a limitation of our method in Fig. 11. In cases where the desired local texture has strong semantic connections to additional components, these auxiliary components can sometimes be included in the localization and local texture. For example, a \"Pharaoh head-dress\" is closely associated with Egyptian necklaces and thus our method also localizes and styles this component as well. Our method also suffers from the Janus effect common to many text-to-3D methods that use 2D supervision." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.784, + 0.196, + 0.8 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.901 + ], + "angle": 0, + "content": "We presented 3D Paintbrush, a technique that produces highly detailed texture maps on meshes which effectively adhere to a predicted localization region. Our system is capable of hallucinating non-obvious local textures on a wide variety of meshes (such as heart-shaped sunglasses on a cow). Our localizations are detailed and accurate, en" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.38, + 0.895, + 0.578 + ], + "angle": 0, + "content": "abling seamless post-processing (such as compositing textures without unwanted fringe). We proposed cascaded score distillation, a technique capable of extracting supervision signals from multiple stages of a cascaded diffusion model. We observe that each stage controls different amounts of detail and global understanding. Further, varying the weights for each stage provides control over the resulting local textures. We show the effectiveness of CSD to locally texture meshes; yet, CSD is general and can be applied to other domains (such as images, videos, and alternative 3D representations). In the future, we are interested in extending localized editing to capabilities beyond texturing (such as deformations, normal maps, and more)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.59, + 0.68, + 0.607 + ], + "angle": 0, + "content": "6. Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.613, + 0.894, + 0.727 + ], + "angle": 0, + "content": "We thank the University of Chicago for providing the AI cluster resources, services, and the professional support of the technical staff. This work was also supported in part by gifts from Snap Research, Adobe Research, Google Research, BSF grant 2022363, and NSF grants 2304481 and 2241303. Finally, we would like to thank Brian Kim, Jack Zhang, Haochen Wang, and the members of 3DL and PALS for their thorough and insightful feedback on our work." + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.736, + 0.892, + 0.826 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.827, + 0.895, + 0.898 + ], + "angle": 0, + "content": "Figure 11. In cases where the desired localization carries a strong semantic context, elements from that context can also appear in the localization and style. For example, when adding a pharaoh headdress, 3D Paintbrush also adds an Egyptian necklace since they are commonly associated with pharaohs." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4480" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.169 + ], + "angle": 0, + "content": "[1] Ahmed Abdelreheem, Abdelrahman Eldesokey, Maks Ovsjanikov, and Peter Wonka. Zero-shot 3d shape correspondence. In SIGGRAPH Asia 2023 Conference Papers, pages 1-11, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.172, + 0.472, + 0.213 + ], + "angle": 0, + "content": "[2] Ahmed Abdelreheem, Ivan Skorokhodov, Maks Ovsjanikov, and Peter Wonka. Satr: Zero-shot semantic segmentation of 3d shapes. In ICCV, 2023. 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.215, + 0.317, + 0.228 + ], + "angle": 0, + "content": "[3] Stability AI. Deepfloydif, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.229, + 0.47, + 0.282 + ], + "angle": 0, + "content": "[4] Sudarshan Babu, Richard Liu, Avery Zhou, Michael Maire, Greg Shakhnarovich, and Rana Hanocka. Hyperfields: Towards zero-shot generation of nerfs from text. arXiv preprint arXiv:2310.17075, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.285, + 0.469, + 0.34 + ], + "angle": 0, + "content": "[5] Omer Bar-Tal, Dolev Ofri-Amar, Rafail Fridman, Yoni Kasten, and Tali Dekel. Text2live: Text-driven layered image and video editing. In European conference on computer vision, pages 707-723. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.342, + 0.469, + 0.382 + ], + "angle": 0, + "content": "[6] Alexey Bokhovkin, Shubham Tulsiani, and Angela Dai. Mesh2tex: Generating mesh textures from image queries. arXiv preprint arXiv:2304.05868, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.383, + 0.469, + 0.45 + ], + "angle": 0, + "content": "[7] Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18392-18402, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.453, + 0.469, + 0.493 + ], + "angle": 0, + "content": "[8] Dave Zhenyu Chen, Yawar Siddiqui, Hsin-Ying Lee, Sergey Tulyakov, and Matthias Nießner. Text2tex: Text-driven texture synthesis via diffusion models. In ICCV, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.496, + 0.469, + 0.549 + ], + "angle": 0, + "content": "[9] Rui Chen, Yongwei Chen, Ningxin Jiao, and Kui Jia. Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. arXiv preprint arXiv:2303.13873, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.551, + 0.469, + 0.592 + ], + "angle": 0, + "content": "[10] Dale Decatur, Itai Lang, and Rana Hanocka. 3d highlighter: Localizing regions on 3d shapes via text descriptions. In CVPR, 2023. 2, 4, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.594, + 0.469, + 0.648 + ], + "angle": 0, + "content": "[11] Zhiwen Fan, Yifan Jiang, Peihao Wang, Xinyu Gong, Dejia Xu, and Zhangyang Wang. Unified implicit neural stylization. In European Conference on Computer Vision, pages 636-654. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.65, + 0.469, + 0.704 + ], + "angle": 0, + "content": "[12] Rao Fu, Xiao Zhan, Yiwen Chen, Daniel Ritchie, and Srinath Sridhar. Shapecrafter: A recursive text-conditioned 3d shape generation model. Advances in Neural Information Processing Systems, 35:8882-8895, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.706, + 0.469, + 0.773 + ], + "angle": 0, + "content": "[13] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.469, + 0.844 + ], + "angle": 0, + "content": "[14] Jun Gao, Tianchang Shen, Zian Wang, Wenzheng Chen, Kangxue Yin, Daiqing Li, Or Litany, Zan Gojcic, and Sanja Fidler. Get3d: A generative model of high quality 3d textured shapes learned from images. In Advances In Neural Information Processing Systems, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[15] Lin Gao, Tong Wu, Yu-Jie Yuan, Ming-Xian Lin, Yu-Kun Lai, and Hao Zhang. Tm-net: Deep generative networks for textured meshes. ACM Transactions on Graphics (TOG), 40 (6):1-15, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.148 + ], + "angle": 0, + "content": "[16] William Gao, Noam Aigerman, Thibault Groueix, Vova Kim, and Rana Hanocka. Textdeformer: Geometry manipulation using text guidance. In ACM SIGGRAPH 2023 Conference Proceedings, pages 1-11, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.151, + 0.892, + 0.206 + ], + "angle": 0, + "content": "[17] Huy Ha and Shuran Song. Semantic abstraction: Openworld 3D scene understanding from 2D vision-language models. In Proceedings of the 2022 Conference on Robot Learning, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.208, + 0.892, + 0.249 + ], + "angle": 0, + "content": "[18] Ayaan Haque, Matthew Tancik, Alexei A Efros, Aleksander Holynski, and Angjoo Kanazawa. Instruct-nerf2nerf: Editing 3d scenes with instructions. ICCV, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.252, + 0.892, + 0.293 + ], + "angle": 0, + "content": "[19] Amir Hertz, Rana Hanocka, Raja Giryes, and Daniel Cohen-Or. Deep geometric texture synthesis. ACM Transactions on Graphics (TOG), 39(4):108-1, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.296, + 0.892, + 0.351 + ], + "angle": 0, + "content": "[20] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.354, + 0.894, + 0.41 + ], + "angle": 0, + "content": "[21] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. The Journal of Machine Learning Research, 23(1):2249-2281, 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.412, + 0.892, + 0.48 + ], + "angle": 0, + "content": "[22] Lukas Hollein, Justin Johnson, and Matthias Nießner. Stylemesh: Style transfer for indoor 3d scene reconstructions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6198-6208, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.484, + 0.892, + 0.566 + ], + "angle": 0, + "content": "[23] Jingwei Huang, Justus Thies, Angela Dai, Abhijit Kundu, Chiyu Jiang, Leonidas J Guibas, Matthias Nießner, Thomas Funkhouser, et al. Adversarial texture optimization from rgb-d scans. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1559-1568, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.569, + 0.892, + 0.624 + ], + "angle": 0, + "content": "[24] Yukun Huang, Jianan Wang, Yukai Shi, Xianbiao Qi, Zheng-Jun Zha, and Lei Zhang. Dreamtime: An improved optimization strategy for text-to-3d content creation. arXiv preprint arXiv:2306.12422, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.628, + 0.892, + 0.695 + ], + "angle": 0, + "content": "[25] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 867-876, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.699, + 0.892, + 0.726 + ], + "angle": 0, + "content": "[26] Oren Katzir, Or Patashnik, Daniel Cohen-Or, and Dani Lischinski. Noise-free score distillation, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.73, + 0.892, + 0.797 + ], + "angle": 0, + "content": "[27] Justin Kerr, Chung Min Kim, Ken Goldberg, Angjoo Kanazawa, and Matthew Tancik. Lerf: Language embedded radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19729-19739, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.802, + 0.892, + 0.855 + ], + "angle": 0, + "content": "[28] Sosuke Kobayashi, Eiichi Matsumoto, and Vincent Sitzmann. Decomposing nerf for editing via feature field distillation. In Advances in Neural Information Processing Systems, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.859, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[29] Han-Hung Lee and Angel X Chang. Understanding pure clip guidance for voxel grid nerf models. arXiv preprint arXiv:2209.15172, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.514, + 0.956 + ], + "angle": 0, + "content": "4481" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[30] Jiabao Lei, Yabin Zhang, Kui Jia, et al. Tango: Text-driven photorealistic and robust 3d stylization via lighting decomposition. Advances in Neural Information Processing Systems, 35:30923-30936, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.15, + 0.47, + 0.205 + ], + "angle": 0, + "content": "[31] Yuhan Li, Yishun Dou, Yue Shi, Yu Lei, Xuanhong Chen, Yi Zhang, Peng Zhou, and Bingbing Ni. Focaldreamer: Text-driven 3d editing via focal-fusion assembly. arXiv preprint arXiv:2308.10608, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.208, + 0.469, + 0.263 + ], + "angle": 0, + "content": "[32] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In CVPR, 2023. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.266, + 0.469, + 0.308 + ], + "angle": 0, + "content": "[33] Hsueh-Ti Derek Liu, Vladimir G. Kim, Siddhartha Chaudhari, Noam Aigerman, and Alec Jacobson. Neural subdivision. ACM Trans. Graph., 39(4), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.31, + 0.469, + 0.393 + ], + "angle": 0, + "content": "[34] Kunhao Liu, Fangneng Zhan, Yiwen Chen, Jiahui Zhang, Yingchen Yu, Abdulmotaleb El Saddik, Shijian Lu, and Eric P Xing. Stylerf: Zero-shot 3d style transfer of neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8338-8348, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.396, + 0.469, + 0.45 + ], + "angle": 0, + "content": "[35] Minghua Liu, Chao Xu, Haian Jin, Linghao Chen, Zexiang Xu, Hao Su, et al. One-2-3-45: Any single image to 3d mesh in 45 seconds without per-shape optimization. arXiv preprint arXiv:2306.16928, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.453, + 0.469, + 0.495 + ], + "angle": 0, + "content": "[36] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.498, + 0.469, + 0.581 + ], + "angle": 0, + "content": "[37] Yiwei Ma, Xiaqing Zhang, Xiaoshuai Sun, Jiayi Ji, Haowei Wang, Guannan Jiang, Weilin Zhuang, and Rongrong Ji. X-mesh: Towards fast and accurate text-driven 3d stylization via dynamic textual guidance. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2749-2760, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.584, + 0.469, + 0.625 + ], + "angle": 0, + "content": "[38] Gal Metzer, Elad Richardson, Or Patashnik, Raja Giryes, and Daniel Cohen-Or. Latent-nerf for shape-guided generation of 3d shapes and textures. In CVPR, 2023. 1, 2, 3, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.628, + 0.469, + 0.669 + ], + "angle": 0, + "content": "[39] Oscar Michel, Roi Bar-On, Richard Liu, Sagie Benaim, and Rana Hanocka. Text2mesh: Text-driven neural stylization for meshes. In CVPR, pages 13492-13502, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.672, + 0.469, + 0.727 + ], + "angle": 0, + "content": "[40] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.729, + 0.469, + 0.785 + ], + "angle": 0, + "content": "[41] Nasir Mohammad Khalid, Tianhao Xie, Eugene Belilovsky, and Tiberiu Popa. Clip-mesh: Generating textured meshes from text using pretrained image-text models. In SIGGRAPH Asia 2022 conference papers, pages 1-8, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.787, + 0.469, + 0.842 + ], + "angle": 0, + "content": "[42] Michael Oechsle, Lars Mescheder, Michael Niemeyer, Thilo Strauss, and Andreas Geiger. Texture fields: Learning texture representations in function space. In CVPR, pages 4531-4540, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[43] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017. 6" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.133 + ], + "angle": 0, + "content": "[44] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. In ICLR, 2023. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.217 + ], + "angle": 0, + "content": "[45] Guocheng Qian, Jinjie Mai, Abdullah Hamdi, Jian Ren, Aliaksandr Siarohin, Bing Li, Hsin-Ying Lee, Ivan Skorokhodov, Peter Wonka, Sergey Tulyakov, et al. Magic123: One image to high-quality 3d object generation using both 2d and 3d diffusion priors. arXiv preprint arXiv:2306.17843, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[46] Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the spectral bias of neural networks. In International conference on machine learning, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[47] Elad Richardson, Gal Metzer, Yuval Alaluf, Raja Giryes, and Daniel Cohen-Or. Texture: Text-guided texturing of 3d shapes. In ACM TOG, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.321, + 0.892, + 0.402 + ], + "angle": 0, + "content": "[48] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.405, + 0.892, + 0.446 + ], + "angle": 0, + "content": "[49] Etai Sella, Gal Fiebelman, Peter Hedman, and Hadar Averbuch-Elor. Vox-e: Text-guided voxel editing of 3d objects. In ICCV, 2023. 1, 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.448, + 0.892, + 0.489 + ], + "angle": 0, + "content": "[50] Yichun Shi, Peng Wang, Jianglong Ye, Mai Long, Kejie Li, and Xiao Yang. Mvdream: Multi-view diffusion for 3d generation. arXiv preprint arXiv:2308.16512, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.491, + 0.892, + 0.546 + ], + "angle": 0, + "content": "[51] Yawar Siddiqui, Justus Thies, Fangchang Ma, Qi Shan, Matthias Nießner, and Angela Dai. Texturify: Generating textures on 3d shape surfaces. In European Conference on Computer Vision, pages 72-88. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.548, + 0.892, + 0.616 + ], + "angle": 0, + "content": "[52] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.619, + 0.892, + 0.673 + ], + "angle": 0, + "content": "[53] Christina Tsalicoglou, Fabian Manhardt, Alessio Tonioni, Michael Niemeyer, and Federico Tombari. Textmesh: Generation of realistic 3d meshes from text prompts. arXiv preprint arXiv:2304.12439, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.676, + 0.892, + 0.743 + ], + "angle": 0, + "content": "[54] Vadim Tschernezki, Iro Laina, Diane Larlus, and Andrea Vedaldi. Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In 2022 International Conference on 3D Vision (3DV), pages 443-453. IEEE, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.746, + 0.892, + 0.773 + ], + "angle": 0, + "content": "[55] TurboSquid. Turbosquid 3d model repository, 2021. https://www.turbosquid.com/. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.776, + 0.892, + 0.83 + ], + "angle": 0, + "content": "[56] Oliver van Kaick, Andrea Tagliasacchi, Oana Sidi, Hao Zhang, Daniel Cohen-Or, Lior Wolf, and Ghassan Hamarneh. Prior knowledge for part correspondence. Computer Graphics Forum, 30(2):553-562, 2011. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.832, + 0.892, + 0.899 + ], + "angle": 0, + "content": "[57] Suhani Vora, Noha Radwan, Klaus Greff, Henning Meyer, Kyle Genova, Mehdi S. M. Sajjadi, Etienne Pot, Andrea Tagliasacchi, and Daniel Duckworth. Nesf: Neural semantic fields for generalizable semantic segmentation of 3d scenes, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4482" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[58] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A. Yeh, and Greg Shakhnarovich. Score jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In CVPR, 2023. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.47, + 0.205 + ], + "angle": 0, + "content": "[59] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. arXiv preprint arXiv:2305.16213, 2023. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.207, + 0.471, + 0.262 + ], + "angle": 0, + "content": "[60] Xingkui Wei, Zhengqing Chen, Yanwei Fu, Zhaopeng Cui, and Yinda Zhang. Deep hybrid self-prior for full 3d mesh generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5805-5814, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.263, + 0.471, + 0.332 + ], + "angle": 0, + "content": "[61] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.334, + 0.471, + 0.401 + ], + "angle": 0, + "content": "[62] Kangxue Yin, Jun Gao, Maria Shugrina, Sameh Khamis, and Sanja Fidler. 3dstylenet: Creating 3d shapes with geometric and texture style variations. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12456-12465, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.403, + 0.471, + 0.459 + ], + "angle": 0, + "content": "[63] Xiaohui Zeng, Arash Vahdat, Francis Williams, Zan Gojcic, Or Litany, Sanja Fidler, and Karsten Kreis. Lion: Latent point diffusion models for 3d shape generation. arXiv preprint arXiv:2210.06978, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.46, + 0.47, + 0.515 + ], + "angle": 0, + "content": "[64] Kai Zhang, Nick Kolkin, Sai Bi, Fujun Luan, Zexiang Xu, Eli Shechtman, and Noah Snavely. Arf: Artistic radiance fields. In European Conference on Computer Vision, pages 717-733. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.517, + 0.471, + 0.558 + ], + "angle": 0, + "content": "[65] Qingnan Zhou and Alec Jacobson. Thingi10k: A dataset of 10,000 3d-printing models. arXiv preprint arXiv:1605.04797, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.56, + 0.471, + 0.6 + ], + "angle": 0, + "content": "[66] Joseph Zhu and Peiye Zhuang. Hifa: High-fidelity text-to-3d with advanced diffusion guidance. arXiv preprint arXiv:2305.18766, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.602, + 0.471, + 0.643 + ], + "angle": 0, + "content": "[67] Jingyu Zhuang, Chen Wang, Lingjie Liu, Liang Lin, and Guanbin Li. Dreameditor: Text-driven 3d scene editing with neural fields. In SIGGRAPH Asia, 2023. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4483" + } + ] +] \ No newline at end of file diff --git a/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/669e6bfe-eb9e-4f5b-a53c-23335eda80fe_origin.pdf b/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/669e6bfe-eb9e-4f5b-a53c-23335eda80fe_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c31461302b88945155e673427b1c4970abd21e7d --- /dev/null +++ b/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/669e6bfe-eb9e-4f5b-a53c-23335eda80fe_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ceac6ff4440dc2d455334ffac90814190e56e4982242af58fdd91f3570bae25 +size 9498236 diff --git a/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/full.md b/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1c17b6ca0373eb67855a52d56d49353d1492a3f2 --- /dev/null +++ b/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/full.md @@ -0,0 +1,321 @@ +# 3D Paintbrush: Local Stylization of 3D Shapes with Cascaded Score Distillation + +Dale Decatur University of Chicago + +Itai Lang +University of Chicago + +Kfir Aberman +Snap Research + +Rana Hanocka +University of Chicago + +![](images/8f0213175946d81e045e5257620801c49611ea5251bf90a457a930a8d4f51e8c.jpg) +Figure 1. Utilizing only a text prompt as guidance, 3D Paintbrush seamlessly generates local stylized textures on bare meshes. Our approach produces a localization map (yellow regions) and a highly detailed texture map which conforms to it. + +![](images/f4b6e651a7b5e88adec8504d1c994748f92bc8066781abc73a14ed135292e743.jpg) + +![](images/88d6d9cc4b82a53a6af7741ab56f74fb0e8003a1f8ad9bc7c1cd3a3d6056e5b6.jpg) + +![](images/eb89358f7aad74877be3c83f45a401455d7ad6d69752f761941a7364042a2dd4.jpg) + +# Abstract + +We present 3D Paintbrush, a technique for automatically texturing local semantic regions on meshes via text descriptions. Our method is designed to operate directly on meshes, producing texture maps which seamlessly integrate into standard graphics pipelines. We opt to simultaneously produce a localization map (to specify the edit region) and a texture map which conforms to it. This approach improves the quality of both the localization and the stylization. To enhance the details and resolution of the textured area, we leverage multiple stages of a cascaded diffusion model to supervise our local editing technique with generative priors learned from images at different resolutions. Our technique, referred to as Cascaded Score Distillation (CSD), simultaneously distills scores at multiple resolutions in a cascaded fashion, enabling control over both the granularity and global understanding of the supervision. We demonstrate the effectiveness of 3D Paintbrush to locally texture different semantic regions on a variety of shapes. Project page: https://threedle.github.io/3d-paintbrush + +# 1. Introduction + +The ability to edit existing high-quality 3D assets is a fundamental capability in 3D modeling workflows. Recent works have shown exceptional results for text-driven 3D data creation [32, 38, 48, 53, 58, 59], but focus on making global + +edits. While some progress has been made on local editing using an explicit localization of the edit region [49, 67], these regions are often coarse and lack fine-grained detail. Highly-detailed and accurate localizations are important for constraining the edits to be within a specific region, preventing changes unrelated to the target edit. Furthermore, while meshes with texture maps are the de facto standard in graphics pipelines, existing local editing work does not natively operate on meshes nor produce texture maps for them. + +In this work we develop 3D Paintbrush, a method for automatically texturing local semantic regions on meshes via text descriptions. Our method is designed to operate directly on meshes, producing texture maps which seamlessly integrate into standard graphics pipelines. 3D Paintbrush is controlled via intuitive, free-form text input, allowing users to describe their edits using open vocabulary on a wide range of meshes. Specifically, given an input mesh and a text prompt, 3D Paintbrush produces the corresponding high-quality texture map and a localization region to confine it. To enhance the details and resolution of the locally textured area, we introduce Cascaded Score Distillation (CSD) which leverages multiple stages of a cascaded diffusion model. Our explicit localization masks can be used to layer our edit texture onto existing textures. + +We opt to represent both our localization map and texture map as neural fields encoded by multi-layer perceptions. Our method synthesizes both a fine-grained localization mask and high-quality texture in tandem. Simultane + +![](images/305e0e8d18cf7f67879c52579bcc8947fff72b3ab4c653d928e3390ba7c0684d.jpg) +Colorful polo shirt +Figure 2. Precise composition of multiple local textures. 3D Paintbrush produces highly-detailed textures that effectively adhere to the predicted localizations. This enables seamlessly compositing local textures without unwanted fringes (right). + +![](images/fad0ab68b5b07bb7cec53701159dbce71c34d054366cf12211c111011d67896a.jpg) +Superman emblem + +![](images/a89cb636cef5b54c1bfe8703dceb299d017079a30f5b65309a6366597bf6ca4b.jpg) +Tie-dye apron + +![](images/66d48d8aa4a8deec06555212b46a505c654ded15867d3a8cbd1f02b36cebbf31.jpg) +Muay Thai shorts + +![](images/48871a295e38ebb027784ab0ff12faf45f6e3a7d9e47efeef0edbe907f40b5b8.jpg) +Composite + +ously generating the localization and texture maps improves the quality of each. The texture map drives the localization to become more detailed and intricate. The localization explicitly masks the texture, ensuring a coherent local style which respects the localization boundary. + +Our local stylization operates in small regions, necessitating higher resolution supervision compared to global generative techniques. Existing approaches leverage pretrained text-to-image diffusion models with Score Distillation Sampling (SDS) to supervise text-driven optimizations [31, 58]. Text-to-image diffusion models often contain multiple cascaded stages in order to achieve high resolution [21], but standard SDS only utilizes the first low-resolution stage of the cascaded model. Our technique, referred to as Cascaded Score Distillation (CSD), simultaneously distills scores at multiple resolutions in a cascaded fashion, enabling control over both the granularity and global understanding of the supervision. Since cascaded stages are trained entirely independently, our insight is to formulate a distillation loss that incorporates all stages in tandem. + +In summary, our method enables local text-driven stylization of meshes. By explicitly learning a localization in tandem with the texture, we ensure that our edits are bounded by the localized region. Using our CSD, which leverages all stages of the diffusion model, we can control the granularity and global understanding of the supervision achieving higher resolution textures and localizations than standard SDS. We demonstrate that 3D Paintbrush yields diverse local texturing on a variety of shapes and semantic regions and outperforms baselines both qualitatively and quantitatively. + +# 2. Related Work + +A large body of work has studied stylization and analysis of 3D content. Existing work uses neural networks and optimization [6, 15, 19, 22, 23, 30, 33, 37-39, 41, 42, 51, 60, 62] for mesh stylization. Other works use a neural radiance field + +NeRF [40] for stylization [11, 34, 64]. Yet, these works focus on stylization rather than localization. Large 2D models have been used for analytical tasks in 3D such as localization and segmentation [1, 2, 10, 17, 27, 28, 54, 57, 67], however, none of these works produce textures. Furthermore, only [1, 2, 10, 67] aim to produce a tight localization on meshes and we find that these approaches still produce relatively smooth localization regions that cannot capture the high frequency details needed for sharp local edits. + +Text-driven generation and editing. Existing works have leveraged pre-trained 2D models to generate 3D representations that adhere to a text prompt [4, 12, 16, 25, 29, 39, 41, 63]. Many recent methods [9, 26, 32, 44, 50, 53, 53, 58, 66] use score distillation [44, 58] from 2D models to generate both geometry and styles from scratch, while other works optimize the texture of an existing, fixed geometry [8, 38, 39, 47]. Other work aims to generate 3D representations from images [14, 35, 36, 45]. + +Existing text-to-3D generative methods [38, 44, 58, 59] can be used to perform global edits [18, 48, 67]. However, since these approaches do not have explicit edit localizations, they struggle to perform highly specific local edits without changing other components of the 3D representation's appearance. Different from our objective, these works aim to generate or globally manipulate existing 3D representations, while our work focuses on local editing. + +Text-driven local editing. Many approaches can perform global 3D edits and progress has been made on local editing in images and videos [5, 7, 13, 20]. Yet, few works have addressed the task of precise, local editing for 3D representations. Local editing is challenging since, in addition to synthesizing the edit, methods need to localize the edit region. FocalDreamer [31] obtains precise user defined edit regions at the cost of requiring additional, tedious user input compared to strictly text-driven approaches. Vox-E [49] (operating on voxel representations) and DreamEditor [67] (operating on NeRFs) both use attention maps to localize an edit + +![](images/b877759d8109bb754731500e7eabbf66d9ebd769fd1bf2c81c411b22a81fe222.jpg) +Figure 3. Overview of 3D Paintbrush. Each point on the surface of the mesh is passed into three different branches to produce a localization probability, texture map, and background map. We texture three different variants of the same mesh with the localization, texture, and background maps and render them from the same viewpoint. Each image along with the corresponding text condition is used to compute the CSD loss. + +region and thus the localization has no visual meaning in isolation. Our approach imposes a visual loss on our localizations in order to enforce sharp boundaries that are tightly coupled with our texture edits. Additionally, since existing purely text-driven local editing approaches only work on voxels and NeRFs, our approach is the first to enable text-driven local editing on meshes. + +High resolution text-to-3D. Several works have explored techniques to increase the resolution for text-to-3D. Many recent works apply SDS to latent diffusion models [32, 38, 58, 59, 66]. Recent works backpropagate the gradient through the encoder to get gradients in higher resolution $512 \times 512$ RGB space [32, 59, 66]. Other works use timestep annealing to give less noisy supervision towards the end of the optimization, thus increasing the detail of the generations [24, 59]. HiFA [66] proposes denoising over multiple successive timesteps each iteration to provide better gradients and achieve high fidelity appearance. While all of these approaches have shown impressive improvements to the resolution of SDS supervision, SDS only utilizes the base stage (not super-resolution stages). Thus, these proposed improvements are orthogonal to ours and can be incorporated at the super-resolution stages using CSD as well. + +# 3. Method + +We show an overview of our method in Fig. 3. The inputs to our system are a mesh $M$ and a text description $y$ of the desired local edit. Our system produces a local texture on the mesh $M$ that adheres to the text prompt $y$ . To supervise our optimization, we use score distillation with a pretrained text-to-image diffusion model. However, local editing requires higher detail than standard generation due to the small size and granularity of the desired edits. In order to further improve the detail of our localization and texture, we introduce Cascaded Score Distillation (CSD), a + +technique that distills scores at multiple resolutions of the 2D cascaded model. This approach enables leveraging all stages of a cascaded model and provides control over both the detail and global understanding of the supervision. + +# 3.1. Local Neural Texturing + +3D Paintbrush represents local textures as neural texture maps over the surface of a mesh $M$ defined by vertices $V \in \mathbb{R}^{n \times 3}$ and faces $F \in \{1, \dots, n\}^{m \times 3}$ . Extracting an explicit texture map from our neural textures is trivial, making our representation compatible with existing graphics pipelines. Furthermore, using texture maps enables producing high resolution textures (i.e., sub-triangle values) without a computationally expensive high resolution mesh. A straight-forward approach of directly optimizing texture values results in texture maps with artifacts and noise (see supplemental material). To mitigate this, we leverage the smoothness of neural networks [46]. However, a straight-forward application of an MLP to a 2D texture map $((u, v) \to (r, g, b))$ is inherently invalid at the texture seams (e.g., erroneous interpolations at boundaries), which may lead to texture discontinuities on the rendered mesh. + +We instead formulate our MLPs to operate on 3D coordinates leading to predictions in 3D that are inherently smooth and without any seam discontinuities. To do so, we invert the UV mapping $\psi(x,y,z) = (u,v)$ to get a map $\psi^{-1}(u,v) = (x,y,z)$ from 2D texels to 3D coordinates on the surface of the mesh. We optimize our MLPs with the 3D coordinates obtained from the 2D texel centers. We employ two primary networks, one for localization and one for texturing. Our neural localization MLP is a function $\mathcal{F}_{\theta}$ that maps a 3D coordinate $\mathbf{x} = (x,y,z)$ to a probability $p$ (which we map back to a 2D localization map). Similarly, our neural texture MLP is a function $\mathcal{F}_{\phi}$ that takes in a 3D coordinate and outputs an RGB value (which we map back to a 2D texture image). Our architecture first passes the 3D + +![](images/caf584c44831cf110e828a96530c71e57e340c15fa5cf625b4abdb87e9fe1b93.jpg) +Figure 4. 3D Paintbrush produces highly detailed textures and localizations for a diverse range of meshes and prompts. Our method synthesizes meaningful local edits on shapes, demonstrating both global and local part-level understanding. + +coordinates through positional encoding [52] before going through a 6-layer MLP. This formulation of using MLPs defined on the 3D surface leads to a neural texture which produces smoothly varying outputs in 3D, even though our 2D texture maps have discontinuities at the texture seams. The smoothness provided by the MLPs reduces artifacts, produces less noisy textures, and provides super resolution capabilities. Although we optimize our MLPs with 3D coordinates mapped from 2D texel centers, during inference, we may query the MLP for any value (i.e. sub-texels that enable super resolution texture maps even across seams). + +# 3.2. Visual Guidance for Localized Textures + +We guide our optimization using three distinct losses that encourage both the localization and texture towards visually desirable results. Each loss is visualized as a branch in Fig. 3 - top branch: localization loss, middle branch: local texture map loss, bottom branch: background loss. + +Local texture map loss. First, we obtain our localization map $L_{map} \in [0,1]^{H \times W}$ from the neural localization MLP $L_{map} = \psi(\mathcal{F}_{\theta}(\mathbf{x}))$ and the texture map $T_{map} \in [0,1]^{H \times W \times 3}$ from the neural texture MLP $T_{map} = \psi(\mathcal{F}_{\phi}(\mathbf{x}))$ . We use the localization $L_{map}$ to mask the texture $T_{map}$ to get a local texture map $T_{map}'$ which only contains textures inside the localization region. We apply the masked texture $T_{map}'$ to our mesh $M$ to get a locally-textured mesh $M_t$ and construct a local-texture text prompt $y_t$ from the input text $y$ (middle branch Fig. 3). We then supervise our optimization using a text-conditioned visual loss (cascaded score distillation, see Sec. 3.4) on $M_t$ and $y_t$ . By applying a visual loss to the localization-masked texture, we get informative and meaningful gradients for both our texture MLP and our localization MLP. + +Localization loss. Using only the texture loss allows for trivial solutions where the mask contains a region that includes, but is much larger than, the desired localization re + +gion. To encourage the localization region to be meaningful, we employ a visual loss on the localization region in isolation (similar to 3D Highlighter [10]). Specifically, we blend a (yellow) color onto the mesh according to the localization map to get a localization-colored mesh $M_{l}$ (top branch Fig. 3). From the text input $y$ , we derive a target localization prompt $y_{l}$ describing the localized region in the format used in 3D Highlighter [10]. We then use $M_{l}$ and $y_{l}$ as input to the text-conditioned visual loss. Using this loss significantly improves the detail and quality of the localization (see supplemental material). + +Background loss. Using only the top two branches in Fig. 3 leads to broader localizations that incorporate superfluous elements characteristic of the input 3D model (i.e. a bill on a duck), in addition to the desired localization region (see supplemental material). To mitigate this, we learn a background texture $B_{map} \in [0,1]^{H \times W \times 3}$ that intentionally contains these characteristic elements of the input 3D shape in the inverse of the localization region $1 - L_{map}$ (the area outside the localization region). Specifically, we blend both the background texture $B_{map}$ (using $1 - L_{map}$ ) and a yellow color (using $L_{map}$ ) to get a composited texture $B_{map}' = L_{map}(\mathrm{YELLOW}) + (1 - L_{map})B_{map}$ (bottom branch in Fig. 3). We apply the composited texture $B_{map}'$ to the mesh to get $M_b$ and then supervise the background MLP using a visual loss conditioned on both $M_b$ and a target text $y_b$ (derived from $y$ ). The target text $y_b$ describes the generic object class (i.e. 'cow' in Fig. 3) with a (yellow) colored localization region. See supplemental material for more details. The third loss directly encourages incorporating the superfluous elements in the background texture which discourages the localization region from incorporating such undesired elements (since $L_{map}$ and $1 - L_{map}$ are inverse masks). + +Key to our method is the simultaneous optimization of the localization map (that specifies the edit region) and the + +![](images/d4f6be7c7b59054a75210a356f66f171e829da3af540ba3a770c61aa11a695ad.jpg) +Simultaneous + +![](images/6c43ffa6ff52e17ea8cc59bb1f2b7fafb519cdf5dbbeec3fd840d64f9c560b8d.jpg) +In series + +![](images/25064e463f681c2bd66cd2cf471c738429508d746c077bed6c2ca5f90c4256b1.jpg) +Independent +Figure 5. Impact of simultaneous optimization. Simultaneously optimizing the localization and texture (left) results in higher-detailed textures which effectively conform to the predicted localization. If we first optimize the localization, then optimize the texture within the localization region (middle), both the localization and texture are less detailed. Independent (right): if we optimize the localization independently (independent: left) and the texture independently (independent: middle), the texture does not align with the localization and thus the masked texture contains fringe artifacts (independent: right). + +texture map that conforms to it. This approach improves the quality of both the localization and the stylization. The texture map drives the localization to become more detailed and intricate, while the localization explicitly masks the texture, ensuring a coherent local style which respects the localization boundary (see Fig. 5). + +# 3.3. Score Distillation and Cascaded Diffusion + +Score Distillation. To guide our local stylization, we leverage powerful pretrained text-to-image diffusion models. Existing approaches use these models in conjunction with Score Distillation Sampling (SDS) to supervise text-driven optimizations [44, 58]. For each iteration of an optimization of an image $x$ that we want to supervise with diffusion model $\phi$ and text prompt $y$ , SDS [44] proposes the following gradient: + +$$ +\nabla_ {x} \mathcal {L} _ {S D S} (\phi , x, y) = w (t) \left(\epsilon_ {\phi} \left(z _ {t}, t, y\right) - \epsilon\right) \tag {1} +$$ + +where timestep $t \sim \mathcal{U}(\{1, \dots, T\})$ is sampled uniformly and noise $\epsilon \sim \mathcal{N}(\mathbf{0}, \mathbf{I})$ is Gaussian. The noisy image $z_{t}$ is obtained by applying a timestep-dependent scaling of $\epsilon$ to the image $x$ . The weight $w(t)$ is a timestep-dependent weighting function and $\epsilon_{\phi}(z_{t}, t, y)$ is the noise predicted by the diffusion model conditioned on $z_{t}$ , $t$ , and $y$ . Note that Eq. (1) omits the U-Net Jacobian term (not needed in practice [44]). This objective is similar to the objective used in diffusion model training, however, instead of optimizing the weights of the model, the gradient is applied to the image $x$ . + +Cascaded Diffusion. Text-to-image diffusion models often contain multiple cascaded stages at different resolutions in order to achieve high resolution outputs [21]. These cascaded diffusion models consist of a base stage $\phi^1$ (stage 1) and some number of super-resolution stages $\phi^{i > 1}$ (stages $2 - N$ ). The base stage is identical to a standard diffusion model, predicting noise $\epsilon_{\phi^1}(z_t^1,t,y)$ conditioned on + +noisy image $z_{t}^{1}$ , timestep $t$ , and text prompt $y$ . However, the super-resolution stages are conditioned on two differently-noised images: one at the current resolution ( $z_{t}^{i}$ with timestep $t$ and noise $\epsilon^{i}$ ) and one at the lower resolution ( $z_{s}^{i-1}$ with timestep $s$ and noise $\epsilon^{i-1}$ ). The predicted noise for the super-resolution stage is given by $\epsilon_{\phi^{i}}(z_{t}^{i}, t, z_{s}^{i-1}, s, y)$ . During inference, the lower resolution input image is obtained by adding noise to the output of the prior stage. However in training, both the high and low resolution images are obtained by sampling a single image from the training dataset and rescaling it to different resolutions. + +Standard SDS [44] only utilizes the first, low-resolution base stage, thus neglecting the full potential of the cascaded model. It is not immediately obvious how to formulate a score distillation technique for all stages of a cascaded diffusion model since super-resolution stages take multiple resolution inputs and, at inference, they require a fully denoised output from the prior stage [21]. We take inspiration from SDS and use the perspective of diffusion training as opposed to inference, and extend it to the training of cascaded diffusion models. To our knowledge, we are the first to consider score distillation using the cascaded super-resolution stages. + +# 3.4. Cascaded Score Distillation + +CSD overview. Our technique, referred to as Cascaded Score Distillation (CSD), simultaneously distills scores at multiple resolutions in a cascaded fashion (illustrated in Fig. 6). Since the stages of a cascaded diffusion model $\phi$ are trained entirely independently of one another, our insight is to formulate a distillation loss that incorporates gradients from all stages $(\phi^1,\dots,\phi^N)$ simultaneously. We observe that different stages of the cascaded model provide different levels of granularity and global understanding (Fig. 7). Controlling the influence of each stage provides control over the details and the corresponding localization of the supervision (Fig. 8). + +CSD Formalization. Consider a mesh $M_{\theta}$ with a neural texture parameterized by an MLP $\theta$ (This MLP could be either $\mathcal{F}_{\theta}$ , $\mathcal{F}_{\phi}$ , and $\mathcal{F}_{\psi}$ in Sec. 3.2). We first render $M_{\theta}$ at $N$ different resolutions using a differentiable renderer $g$ to get multiple images $g(M_{\theta}) = \mathbf{x} = \{x^{1}\dots x^{N}\}$ such that $x^{i}$ is the same resolution as stage $\phi^i$ . For the base stage $\phi^1$ , we perform standard SDS using Eq. (1) on $x^{1}$ and prompt $y$ to get a gradient $\nabla_{x^1}$ . For all stages $\phi^i$ for $i > 1$ , we sample two timesteps $t,s\sim \mathcal{U}(\{1,\ldots ,T\})$ , noise $\epsilon^i\sim \mathcal{N}(\mathbf{0},\mathbf{I})$ at the resolution of stage $\phi^i$ , and noise $\epsilon^{i - 1}\sim \mathcal{N}(\mathbf{0},\mathbf{I})$ at the resolution of stage $\phi^{i - 1}$ . Using timestep-dependent schedule coefficients $\alpha$ and $\sigma$ , we compute a noisy image $z_{t}^{i} = \alpha_{t}x^{i} + \sigma_{t}\epsilon^{i}$ by applying a timestep-dependent scaling of $\epsilon^i$ to the image $x^i$ . Similarly, we compute $z_{s}^{i - 1} = \alpha_{s}x^{i - 1} + \sigma_{s}\epsilon^{i - 1}$ by applying a timestep-dependent scaling of $\epsilon^{i - 1}$ to the image $x^{i - 1}$ . We then use + +![](images/dc0855ebbe75fc13f60c0283528b86c600c64f2abc32f487d20fcae9e5b6718f.jpg) +Figure 6. Cascaded Score Distillation (CSD). We simultaneously distill scores across multiple stages of a cascaded diffusion model in order to leverage both the global awareness of the first stage and the higher level of detail contained in later stages. The difference between the predicted noise and sampled noise is the image gradient for each stage. + +$\phi^i$ to predict noise $\epsilon_{\phi^i}(z_t^i,t,z_s^{i - 1},s,y)$ conditioned on the noisy images, timesteps, and text prompt. Our gradient $\nabla_{x^i}$ for stage $\phi^i$ for $i > 1$ is the difference between the predicted noise and the (higher-resolution) sampled noise $\epsilon^i$ weighted by the timestep-dependent function $w(t)$ : + +$$ +\begin{array}{l} \nabla_ {x ^ {i}} \mathcal {L} _ {C S D ^ {i}} \left(\phi^ {i}, x ^ {i}, x ^ {i - 1}, y\right) = \\ w (t) \left(\epsilon_ {\phi^ {i}} \left(z _ {t} ^ {i}, t, z _ {s} ^ {i - 1}, s, y\right) - \epsilon^ {i}\right). \tag {2} \\ \end{array} +$$ + +With all gradients $\nabla_{x^1},\ldots ,\nabla_{x^N}$ computed, we weight each gradient $\nabla_{x^i}$ with a user defined $\lambda^i$ to provide control over the impact of the supervision from each stage of the cascaded model. Thus our full gradient with respect to any given neural texture $\theta$ can be described by: + +$$ +\begin{array}{l} \nabla_ {\theta} \mathcal {L} _ {C S D} (\phi , \mathbf {x} = g (\theta), y) = \\ \lambda^ {1} \nabla_ {x ^ {1}} \mathcal {L} _ {S D S} (\phi^ {1}, x ^ {1}, y) \frac {\partial x ^ {1}}{\partial \theta} \\ + \sum_ {i = 2} ^ {N} \lambda^ {i} \nabla_ {x ^ {i}} \mathcal {L} _ {C S D ^ {i}} \left(\phi^ {i}, x ^ {i}, x ^ {i - 1}, y\right) \frac {\partial x ^ {i}}{\partial \theta}. \tag {3} \\ \end{array} +$$ + +Note that just as in SDS [44], we can avoid computing the U-Net Jacobian term $\frac{\partial\epsilon_{\phi}(z_t^i,t,z_s^{i - 1},s,y)}{z_t^i}$ (not shown in Eq. (3)) since each stage is entirely independent and our gradient is only with respect to the high-resolution image $x^i$ . Thus, we directly apply $\lambda^i\nabla_{x^i}$ to the image $x^i$ without having to compute the costly backpropagation through the U-Net. Using the gradient $\nabla_{\theta}\mathcal{L}_{CSD}(\phi ,\mathbf{x} = g(\theta),y)$ , we update the weights of our MLP $\theta$ . + +# 4. Experiments + +We demonstrate the capabilities of 3D Paintbrush on a wide variety of meshes (from different sources [55, 56, 61, 65]) + +![](images/33f1240fa7193e069ba0cbfc01ea4c297c50b0a5c68a2a9bbdf303837df82ec6.jpg) +Only stage 1 + +![](images/296857b7782a8ab5165386cf4150d05f7248ac8b10ace6c3d3d178fe938d4844.jpg) +Only stage 2 +Figure 7. Impact of cascaded stages. Different stages of the cascaded model provide different levels of granularity and global understanding. Using only the (low resolution) stage 1 model gives a low-resolution result in the correct location. While the (high resolution) stage 2 model gives a high-resolution result, it is placed in the incorrect location. Our CSD simultaneously uses stage 1 and 2, resulting in a highly-detailed texture in the appropriate location. + +![](images/b135e81205c6c98e66925fc16e84c504e31b85f2c854ebad06f74405b10ea708.jpg) +CSD + +![](images/5b8460f91c44d63517f33c80a08b86d1c0277e231a606456da0b8a11efa6dfe1.jpg) + +and prompts. We highlight key properties of our method such as localization precision and edit specificity. We then demonstrate the importance and capabilities of our CSD loss including its high resolution supervision and intuitive controls. Finally, we evaluate our system against other localization and editing baselines and ablate the key components of our method. In our experiments, we use DeepFloyd IF [3] for our cascaded model. Our unoptimized PyTorch [43] implementation takes 4 hours on a standard A40 GPU, typically achieving satisfactory results within 2 hours. + +# 4.1. Properties of 3D Paintbrush + +3D Paintbrush generality. 3D Paintbrush is capable of producing highly detailed localizations and textures on a diverse collection of meshes and prompts (Fig. 4). Our method is not restricted to any category of meshes and we show results on organic and manufactured shapes. Furthermore, our local textures can be specified with open vocabulary text descriptions and are not limited to any predefined categories or constraints. This includes "out-of-domain" local textures such as the rainbow shinguards on a giraffe which are not naturally seen in the context of these objects, yet are precisely placed in semantically meaningful locations with highly detailed textures. +3D Paintbrush precision and composition. 3D Paintbrush produces precise localizations and highly-detailed textures that effectively adhere to these predicted localizations (see Fig. 2). The tight coupling between the localization and texture (see the gold chain necklace in Fig. 1) enables seamless composition of multiple local textures simultaneously on the same mesh without any layering artifacts. For example, the sharp localization boundary of the "Tie-dye apron" (in Fig. 2) allows us to composite this local texture on top of other textures without obstructing these textures in regions outside of the apron's boundary. +3D Paintbrush specificity and effectiveness. 3D Paintbrush produces accurate and high resolution local edits that closely adhere to the text-specification (see Fig. 10). Our + +![](images/c728d6c82ddb8cb58dd10d0cfef669aa55ac036d8bd866f1efbf1cf7c12a1748.jpg) +Figure 8. Granular control with CSD. Varying the weight between stage 1 and stage 2 results in control over the details and corresponding localization. Only using stage 1 (leftmost) is rather coarse; only using stage 2 (rightmost) is highly detailed with an incorrect localization. Increasing the stage 2 weight (moving left to right) progressively increases the detail and granularity of the supervision, enabling smooth and meaningful interpolation between stage 1 and 2. + +![](images/2bfdaa1e92a3b054262c87e40edf7656be061e49122935b113dc0a310c900c99.jpg) + +![](images/710419ad47e59698d0a19e559bdd3518608098670ea2964f39f818d032fe4942.jpg) + +![](images/3814c9de18a8a1812bec28f904d96706c1073a0823d5b79423d07f0ae2513bc2.jpg) + +![](images/41eb0c5f58d6feb8ce9551a7beafa65e3893ad5870b3c73c95c19a30c276b94a.jpg) + +![](images/2b792d093f6b91dc62a35ea246d0f41f25dd615c0a02226c5548175a2dcc3b09.jpg) + +![](images/102658898ed7b397ddcbbc5e58d494ee49e471b9c252e79b8635a0fe4032a853.jpg) + +![](images/1fd7698e39f3f52fbe8ad6c27a8fbb1ce42f938fba5b0ca4540a0ed3b5e4261e.jpg) + +![](images/d0d288a63047d491fee7b9779a723b9a591773b7af8d1d948f06c86f7f11c149.jpg) + +![](images/db0157c211ca0f61d67e2037a587780ac60fa16e38abd95ffe727ad1b7a7001d.jpg) + +![](images/89963c25164dfa3d19bd6d48f4b15f548fcd25730b779d9bff8c68979aecc46c.jpg) + +method's fine-grained results contain intricate details (i.e. the badge on "Barcelona jersey") and reflect the subtle differences in the text prompts (i.e. the "cape" on the dog is more tapered than the boxer "poncho"). This specificity allows us to produce many diverse and distinct local styles. We show multiple local edits on the same mesh for multiple different meshes, demonstrating the effectiveness of our method on diverse prompts and meshes. + +# 4.2. Importance of Cascaded Score Distillation + +Impact and granular control of CSD. Our cascaded score distillation (CSD) simultaneously distills scores at multiple resolutions in a cascaded fashion. We observe that different stages of the cascaded diffusion model give different levels of granularity and global understanding (Fig. 7). Using only the (low resolution) stage 1 model is equivalent to SDS. Though SDS produces an accurate localization and coherent texture, the result is low-resolution (see Fig. 9). Conversely, using only the (high resolution) stage 2 model gives a high-resolution result, but often fails to properly lo + +calize the texture leading to undesirable results. Our CSD simultaneously combines the supervision from stages 1 and 2, resulting in a highly-detailed texture in the appropriate location. Increasing the stage 2 weight (moving left to right in Fig. 8) progressively increases the detail and granularity of the supervision, demonstrating smooth and intuitive interpolation between stage 1 and 2. In our experiments, we use a fixed weighting scheme, but this result demonstrates that our method works for a broad range of weights. Quantitative evidence supporting the importance of the CSD loss can bee seen in Tab. 1. + +
LocalizationSATR3D HighlighterOurs
Average Score ↑1.892.034.80
Local EditsLatent PaintVox-EOurs (SDS)Ours
Average Score ↑2.142.154.064.88
+ +Table 1. Quantitative evaluation. We conduct a perceptual study where users evaluate our localizations and local edits compared to baseline methods (3D Highlighter [10], SATR [2], Latent Paint [38], Vox-E [49], and our method with standard SDS loss). + +![](images/25ad4a63821e8327d352136370d9337a779df6f421123155fcb458dda9484cfd.jpg) +Figure 9. Importance of super-resolution stage in CSD. Using stage 1 only (equivalent to SDS) lacks fine-grained details. Incorporating the second super-resolution cascaded stage from our CSD increases the resolution and detail. Input text prompts (from left to right): Colorful crochet shell, Cactus base, Tiger stripe shirt. + +# 4.3. Evaluation + +Simultaneous localization and texture. We demonstrate the importance of simultaneously optimizing the localization region and texture in tandem in Fig. 5. We observe that simultaneous optimization results in highly detailed textures which effectively conform to the predicted localization regions (Fig. 5, left). Furthermore, the resulting localization region is sharp and intricate. Alternatively, we optimize the localization region first and use the predicted localization to learn a texture which is confined to the (precomputed) localization region (Fig. 5, middle). In this case, the texture is less detailed, and the localization region is less intricate. Finally, we can learn the texture and localization region independently (Fig. 5, independent). This results in a texture (Fig. 5 independent, middle) that is completely decoupled from the localization region (Fig. 5 independent, left). When masking the texture with the localization region, we observe a misaligned texture with fringe artifacts (Fig. 5 independent, right). + +![](images/dd06006c1d943573844d7e606784a0964f9b073f4c881a9b7c1d76511a2b0273.jpg) +Beautiful roses Colorful crochet base Rainbow headband Camo poncho Superhero cape Tiger stripe hat +Figure 10. 3D Paintbrush is capable of producing a variety of local textures on the same mesh. Each result contains an accurate localization map (to specify the edit region) and a texture map that conforms to it. + +Quantitative evaluation. 3D Paintbrush is the only method geared towards local editing that natively operates on meshes. We compare to the closest mesh-based methods which perform localization (3D Highlighter [10], SATR [2]) and texturing (Latent Paint [38]). We also compare to a voxel NeRF approach for local 3D editing (Vox-E [49]). + +To evaluate our method against these baselines, we conduct a perceptual study where 39 users rate the effectiveness of each method for 9 different meshes (see Tab. 1). 3D Paintbrush consistently scores the highest for both localization and local editing, producing sharper localizations than 3D Highlighter and SATR and higher resolution textures than Latent Paint and Vox-E. Further quantitative evaluation using CLIP R-Precision and qualitative comparisons to these baselines are shown in the supplemental material. + +Limitations. We illustrate a limitation of our method in Fig. 11. In cases where the desired local texture has strong semantic connections to additional components, these auxiliary components can sometimes be included in the localization and local texture. For example, a "Pharaoh head-dress" is closely associated with Egyptian necklaces and thus our method also localizes and styles this component as well. Our method also suffers from the Janus effect common to many text-to-3D methods that use 2D supervision. + +# 5. Conclusion + +We presented 3D Paintbrush, a technique that produces highly detailed texture maps on meshes which effectively adhere to a predicted localization region. Our system is capable of hallucinating non-obvious local textures on a wide variety of meshes (such as heart-shaped sunglasses on a cow). Our localizations are detailed and accurate, en + +abling seamless post-processing (such as compositing textures without unwanted fringe). We proposed cascaded score distillation, a technique capable of extracting supervision signals from multiple stages of a cascaded diffusion model. We observe that each stage controls different amounts of detail and global understanding. Further, varying the weights for each stage provides control over the resulting local textures. We show the effectiveness of CSD to locally texture meshes; yet, CSD is general and can be applied to other domains (such as images, videos, and alternative 3D representations). In the future, we are interested in extending localized editing to capabilities beyond texturing (such as deformations, normal maps, and more). + +# 6. Acknowledgments + +We thank the University of Chicago for providing the AI cluster resources, services, and the professional support of the technical staff. This work was also supported in part by gifts from Snap Research, Adobe Research, Google Research, BSF grant 2022363, and NSF grants 2304481 and 2241303. Finally, we would like to thank Brian Kim, Jack Zhang, Haochen Wang, and the members of 3DL and PALS for their thorough and insightful feedback on our work. + +![](images/d0c477b64edabd0b892d6eaf9f1e8a87bda7029beca7ec998bc553b2e8659424.jpg) +Figure 11. In cases where the desired localization carries a strong semantic context, elements from that context can also appear in the localization and style. For example, when adding a pharaoh headdress, 3D Paintbrush also adds an Egyptian necklace since they are commonly associated with pharaohs. + +# References + +[1] Ahmed Abdelreheem, Abdelrahman Eldesokey, Maks Ovsjanikov, and Peter Wonka. Zero-shot 3d shape correspondence. In SIGGRAPH Asia 2023 Conference Papers, pages 1-11, 2023. 2 +[2] Ahmed Abdelreheem, Ivan Skorokhodov, Maks Ovsjanikov, and Peter Wonka. Satr: Zero-shot semantic segmentation of 3d shapes. In ICCV, 2023. 2, 7, 8 +[3] Stability AI. Deepfloydif, 2023. 6 +[4] Sudarshan Babu, Richard Liu, Avery Zhou, Michael Maire, Greg Shakhnarovich, and Rana Hanocka. Hyperfields: Towards zero-shot generation of nerfs from text. arXiv preprint arXiv:2310.17075, 2023. 2 +[5] Omer Bar-Tal, Dolev Ofri-Amar, Rafail Fridman, Yoni Kasten, and Tali Dekel. Text2live: Text-driven layered image and video editing. In European conference on computer vision, pages 707-723. Springer, 2022. 2 +[6] Alexey Bokhovkin, Shubham Tulsiani, and Angela Dai. Mesh2tex: Generating mesh textures from image queries. arXiv preprint arXiv:2304.05868, 2023. 2 +[7] Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18392-18402, 2023. 2 +[8] Dave Zhenyu Chen, Yawar Siddiqui, Hsin-Ying Lee, Sergey Tulyakov, and Matthias Nießner. Text2tex: Text-driven texture synthesis via diffusion models. In ICCV, 2023. 2 +[9] Rui Chen, Yongwei Chen, Ningxin Jiao, and Kui Jia. Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. arXiv preprint arXiv:2303.13873, 2023. 2 +[10] Dale Decatur, Itai Lang, and Rana Hanocka. 3d highlighter: Localizing regions on 3d shapes via text descriptions. In CVPR, 2023. 2, 4, 7, 8 +[11] Zhiwen Fan, Yifan Jiang, Peihao Wang, Xinyu Gong, Dejia Xu, and Zhangyang Wang. Unified implicit neural stylization. In European Conference on Computer Vision, pages 636-654. Springer, 2022. 2 +[12] Rao Fu, Xiao Zhan, Yiwen Chen, Daniel Ritchie, and Srinath Sridhar. Shapecrafter: A recursive text-conditioned 3d shape generation model. Advances in Neural Information Processing Systems, 35:8882-8895, 2022. 2 +[13] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022. 2 +[14] Jun Gao, Tianchang Shen, Zian Wang, Wenzheng Chen, Kangxue Yin, Daiqing Li, Or Litany, Zan Gojcic, and Sanja Fidler. Get3d: A generative model of high quality 3d textured shapes learned from images. In Advances In Neural Information Processing Systems, 2022. 2 +[15] Lin Gao, Tong Wu, Yu-Jie Yuan, Ming-Xian Lin, Yu-Kun Lai, and Hao Zhang. Tm-net: Deep generative networks for textured meshes. ACM Transactions on Graphics (TOG), 40 (6):1-15, 2021. 2 + +[16] William Gao, Noam Aigerman, Thibault Groueix, Vova Kim, and Rana Hanocka. Textdeformer: Geometry manipulation using text guidance. In ACM SIGGRAPH 2023 Conference Proceedings, pages 1-11, 2023. 2 +[17] Huy Ha and Shuran Song. Semantic abstraction: Openworld 3D scene understanding from 2D vision-language models. In Proceedings of the 2022 Conference on Robot Learning, 2022. 2 +[18] Ayaan Haque, Matthew Tancik, Alexei A Efros, Aleksander Holynski, and Angjoo Kanazawa. Instruct-nerf2nerf: Editing 3d scenes with instructions. ICCV, 2023. 2 +[19] Amir Hertz, Rana Hanocka, Raja Giryes, and Daniel Cohen-Or. Deep geometric texture synthesis. ACM Transactions on Graphics (TOG), 39(4):108-1, 2020. 2 +[20] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022. 2 +[21] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. The Journal of Machine Learning Research, 23(1):2249-2281, 2022. 2, 5 +[22] Lukas Hollein, Justin Johnson, and Matthias Nießner. Stylemesh: Style transfer for indoor 3d scene reconstructions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6198-6208, 2022. 2 +[23] Jingwei Huang, Justus Thies, Angela Dai, Abhijit Kundu, Chiyu Jiang, Leonidas J Guibas, Matthias Nießner, Thomas Funkhouser, et al. Adversarial texture optimization from rgb-d scans. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1559-1568, 2020. 2 +[24] Yukun Huang, Jianan Wang, Yukai Shi, Xianbiao Qi, Zheng-Jun Zha, and Lei Zhang. Dreamtime: An improved optimization strategy for text-to-3d content creation. arXiv preprint arXiv:2306.12422, 2023. 3 +[25] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 867-876, 2022. 2 +[26] Oren Katzir, Or Patashnik, Daniel Cohen-Or, and Dani Lischinski. Noise-free score distillation, 2023. 2 +[27] Justin Kerr, Chung Min Kim, Ken Goldberg, Angjoo Kanazawa, and Matthew Tancik. Lerf: Language embedded radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19729-19739, 2023. 2 +[28] Sosuke Kobayashi, Eiichi Matsumoto, and Vincent Sitzmann. Decomposing nerf for editing via feature field distillation. In Advances in Neural Information Processing Systems, 2022. 2 +[29] Han-Hung Lee and Angel X Chang. Understanding pure clip guidance for voxel grid nerf models. arXiv preprint arXiv:2209.15172, 2022. 2 + +[30] Jiabao Lei, Yabin Zhang, Kui Jia, et al. Tango: Text-driven photorealistic and robust 3d stylization via lighting decomposition. Advances in Neural Information Processing Systems, 35:30923-30936, 2022. 2 +[31] Yuhan Li, Yishun Dou, Yue Shi, Yu Lei, Xuanhong Chen, Yi Zhang, Peng Zhou, and Bingbing Ni. Focaldreamer: Text-driven 3d editing via focal-fusion assembly. arXiv preprint arXiv:2308.10608, 2023. 2 +[32] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In CVPR, 2023. 1, 2, 3 +[33] Hsueh-Ti Derek Liu, Vladimir G. Kim, Siddhartha Chaudhari, Noam Aigerman, and Alec Jacobson. Neural subdivision. ACM Trans. Graph., 39(4), 2020. 2 +[34] Kunhao Liu, Fangneng Zhan, Yiwen Chen, Jiahui Zhang, Yingchen Yu, Abdulmotaleb El Saddik, Shijian Lu, and Eric P Xing. Stylerf: Zero-shot 3d style transfer of neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8338-8348, 2023. 2 +[35] Minghua Liu, Chao Xu, Haian Jin, Linghao Chen, Zexiang Xu, Hao Su, et al. One-2-3-45: Any single image to 3d mesh in 45 seconds without per-shape optimization. arXiv preprint arXiv:2306.16928, 2023. 2 +[36] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object, 2023. 2 +[37] Yiwei Ma, Xiaqing Zhang, Xiaoshuai Sun, Jiayi Ji, Haowei Wang, Guannan Jiang, Weilin Zhuang, and Rongrong Ji. X-mesh: Towards fast and accurate text-driven 3d stylization via dynamic textual guidance. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2749-2760, 2023. 2 +[38] Gal Metzer, Elad Richardson, Or Patashnik, Raja Giryes, and Daniel Cohen-Or. Latent-nerf for shape-guided generation of 3d shapes and textures. In CVPR, 2023. 1, 2, 3, 7, 8 +[39] Oscar Michel, Roi Bar-On, Richard Liu, Sagie Benaim, and Rana Hanocka. Text2mesh: Text-driven neural stylization for meshes. In CVPR, pages 13492-13502, 2022. 2 +[40] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 2 +[41] Nasir Mohammad Khalid, Tianhao Xie, Eugene Belilovsky, and Tiberiu Popa. Clip-mesh: Generating textured meshes from text using pretrained image-text models. In SIGGRAPH Asia 2022 conference papers, pages 1-8, 2022. 2 +[42] Michael Oechsle, Lars Mescheder, Michael Niemeyer, Thilo Strauss, and Andreas Geiger. Texture fields: Learning texture representations in function space. In CVPR, pages 4531-4540, 2019. 2 +[43] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017. 6 + +[44] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. In ICLR, 2023. 2, 5, 6 +[45] Guocheng Qian, Jinjie Mai, Abdullah Hamdi, Jian Ren, Aliaksandr Siarohin, Bing Li, Hsin-Ying Lee, Ivan Skorokhodov, Peter Wonka, Sergey Tulyakov, et al. Magic123: One image to high-quality 3d object generation using both 2d and 3d diffusion priors. arXiv preprint arXiv:2306.17843, 2023. 2 +[46] Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the spectral bias of neural networks. In International conference on machine learning, 2019. 3 +[47] Elad Richardson, Gal Metzer, Yuval Alaluf, Raja Giryes, and Daniel Cohen-Or. Texture: Text-guided texturing of 3d shapes. In ACM TOG, 2023. 2 +[48] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. 1, 2 +[49] Etai Sella, Gal Fiebelman, Peter Hedman, and Hadar Averbuch-Elor. Vox-e: Text-guided voxel editing of 3d objects. In ICCV, 2023. 1, 2, 7, 8 +[50] Yichun Shi, Peng Wang, Jianglong Ye, Mai Long, Kejie Li, and Xiao Yang. Mvdream: Multi-view diffusion for 3d generation. arXiv preprint arXiv:2308.16512, 2023. 2 +[51] Yawar Siddiqui, Justus Thies, Fangchang Ma, Qi Shan, Matthias Nießner, and Angela Dai. Texturify: Generating textures on 3d shape surfaces. In European Conference on Computer Vision, pages 72-88. Springer, 2022. 2 +[52] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. 2020. 4 +[53] Christina Tsalicoglou, Fabian Manhardt, Alessio Tonioni, Michael Niemeyer, and Federico Tombari. Textmesh: Generation of realistic 3d meshes from text prompts. arXiv preprint arXiv:2304.12439, 2023. 1, 2 +[54] Vadim Tschernezki, Iro Laina, Diane Larlus, and Andrea Vedaldi. Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In 2022 International Conference on 3D Vision (3DV), pages 443-453. IEEE, 2022. 2 +[55] TurboSquid. Turbosquid 3d model repository, 2021. https://www.turbosquid.com/. 6 +[56] Oliver van Kaick, Andrea Tagliasacchi, Oana Sidi, Hao Zhang, Daniel Cohen-Or, Lior Wolf, and Ghassan Hamarneh. Prior knowledge for part correspondence. Computer Graphics Forum, 30(2):553-562, 2011. 6 +[57] Suhani Vora, Noha Radwan, Klaus Greff, Henning Meyer, Kyle Genova, Mehdi S. M. Sajjadi, Etienne Pot, Andrea Tagliasacchi, and Daniel Duckworth. Nesf: Neural semantic fields for generalizable semantic segmentation of 3d scenes, 2021. 2 + +[58] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A. Yeh, and Greg Shakhnarovich. Score jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In CVPR, 2023. 1, 2, 3, 5 +[59] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. arXiv preprint arXiv:2305.16213, 2023. 1, 2, 3 +[60] Xingkui Wei, Zhengqing Chen, Yanwei Fu, Zhaopeng Cui, and Yinda Zhang. Deep hybrid self-prior for full 3d mesh generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5805-5814, 2021. 2 +[61] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 6 +[62] Kangxue Yin, Jun Gao, Maria Shugrina, Sameh Khamis, and Sanja Fidler. 3dstylenet: Creating 3d shapes with geometric and texture style variations. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12456-12465, 2021. 2 +[63] Xiaohui Zeng, Arash Vahdat, Francis Williams, Zan Gojcic, Or Litany, Sanja Fidler, and Karsten Kreis. Lion: Latent point diffusion models for 3d shape generation. arXiv preprint arXiv:2210.06978, 2022. 2 +[64] Kai Zhang, Nick Kolkin, Sai Bi, Fujun Luan, Zexiang Xu, Eli Shechtman, and Noah Snavely. Arf: Artistic radiance fields. In European Conference on Computer Vision, pages 717-733. Springer, 2022. 2 +[65] Qingnan Zhou and Alec Jacobson. Thingi10k: A dataset of 10,000 3d-printing models. arXiv preprint arXiv:1605.04797, 2016. 6 +[66] Joseph Zhu and Peiye Zhuang. Hifa: High-fidelity text-to-3d with advanced diffusion guidance. arXiv preprint arXiv:2305.18766, 2023. 2, 3 +[67] Jingyu Zhuang, Chen Wang, Lingjie Liu, Liang Lin, and Guanbin Li. Dreameditor: Text-driven 3d scene editing with neural fields. In SIGGRAPH Asia, 2023. 1, 2 \ No newline at end of file diff --git a/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/images.zip b/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..17857c7a4f8df7ed13f14f3671142a80181c2fef --- /dev/null +++ b/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5e0951239cb523b7298477971e07db1f7b87efb9cf0681352b1d56b46abbe7d +size 496729 diff --git a/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/layout.json b/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..76f5fabd02504e0efec2842daac4183a904330cd --- /dev/null +++ b/2024/3D Paintbrush_ Local Stylization of 3D Shapes with Cascaded Score Distillation/layout.json @@ -0,0 +1,10142 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 51, + 103, + 541, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 103, + 541, + 120 + ], + "spans": [ + { + "bbox": [ + 51, + 103, + 541, + 120 + ], + "type": "text", + "content": "3D Paintbrush: Local Stylization of 3D Shapes with Cascaded Score Distillation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 63, + 144, + 171, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 144, + 171, + 171 + ], + "spans": [ + { + "bbox": [ + 63, + 144, + 171, + 171 + ], + "type": "text", + "content": "Dale Decatur University of Chicago" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 194, + 144, + 302, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 144, + 302, + 171 + ], + "spans": [ + { + "bbox": [ + 194, + 144, + 302, + 171 + ], + "type": "text", + "content": "Itai Lang \nUniversity of Chicago" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 326, + 144, + 399, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 144, + 399, + 171 + ], + "spans": [ + { + "bbox": [ + 326, + 144, + 399, + 171 + ], + "type": "text", + "content": "Kfir Aberman \nSnap Research" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 422, + 144, + 530, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 144, + 530, + 171 + ], + "spans": [ + { + "bbox": [ + 422, + 144, + 530, + 171 + ], + "type": "text", + "content": "Rana Hanocka \nUniversity of Chicago" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 60, + 203, + 161, + 330 + ], + "blocks": [ + { + "bbox": [ + 60, + 203, + 161, + 330 + ], + "lines": [ + { + "bbox": [ + 60, + 203, + 161, + 330 + ], + "spans": [ + { + "bbox": [ + 60, + 203, + 161, + 330 + ], + "type": "image", + "image_path": "8f0213175946d81e045e5257620801c49611ea5251bf90a457a930a8d4f51e8c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 334, + 546, + 355 + ], + "lines": [ + { + "bbox": [ + 46, + 334, + 546, + 355 + ], + "spans": [ + { + "bbox": [ + 46, + 334, + 546, + 355 + ], + "type": "text", + "content": "Figure 1. Utilizing only a text prompt as guidance, 3D Paintbrush seamlessly generates local stylized textures on bare meshes. Our approach produces a localization map (yellow regions) and a highly detailed texture map which conforms to it." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 173, + 203, + 271, + 330 + ], + "blocks": [ + { + "bbox": [ + 173, + 203, + 271, + 330 + ], + "lines": [ + { + "bbox": [ + 173, + 203, + 271, + 330 + ], + "spans": [ + { + "bbox": [ + 173, + 203, + 271, + 330 + ], + "type": "image", + "image_path": "f4b6e651a7b5e88adec8504d1c994748f92bc8066781abc73a14ed135292e743.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 283, + 203, + 399, + 330 + ], + "blocks": [ + { + "bbox": [ + 283, + 203, + 399, + 330 + ], + "lines": [ + { + "bbox": [ + 283, + 203, + 399, + 330 + ], + "spans": [ + { + "bbox": [ + 283, + 203, + 399, + 330 + ], + "type": "image", + "image_path": "88d6d9cc4b82a53a6af7741ab56f74fb0e8003a1f8ad9bc7c1cd3a3d6056e5b6.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 405, + 203, + 533, + 330 + ], + "blocks": [ + { + "bbox": [ + 405, + 203, + 533, + 330 + ], + "lines": [ + { + "bbox": [ + 405, + 203, + 533, + 330 + ], + "spans": [ + { + "bbox": [ + 405, + 203, + 533, + 330 + ], + "type": "image", + "image_path": "eb89358f7aad74877be3c83f45a401455d7ad6d69752f761941a7364042a2dd4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 143, + 366, + 190, + 378 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 366, + 190, + 378 + ], + "spans": [ + { + "bbox": [ + 143, + 366, + 190, + 378 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 391, + 290, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 391, + 290, + 631 + ], + "spans": [ + { + "bbox": [ + 46, + 391, + 290, + 631 + ], + "type": "text", + "content": "We present 3D Paintbrush, a technique for automatically texturing local semantic regions on meshes via text descriptions. Our method is designed to operate directly on meshes, producing texture maps which seamlessly integrate into standard graphics pipelines. We opt to simultaneously produce a localization map (to specify the edit region) and a texture map which conforms to it. This approach improves the quality of both the localization and the stylization. To enhance the details and resolution of the textured area, we leverage multiple stages of a cascaded diffusion model to supervise our local editing technique with generative priors learned from images at different resolutions. Our technique, referred to as Cascaded Score Distillation (CSD), simultaneously distills scores at multiple resolutions in a cascaded fashion, enabling control over both the granularity and global understanding of the supervision. We demonstrate the effectiveness of 3D Paintbrush to locally texture different semantic regions on a variety of shapes. Project page: https://threedle.github.io/3d-paintbrush" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 645, + 127, + 657 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 645, + 127, + 657 + ], + "spans": [ + { + "bbox": [ + 47, + 645, + 127, + 657 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "content": "The ability to edit existing high-quality 3D assets is a fundamental capability in 3D modeling workflows. Recent works have shown exceptional results for text-driven 3D data creation [32, 38, 48, 53, 58, 59], but focus on making global" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 367, + 545, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 367, + 545, + 475 + ], + "spans": [ + { + "bbox": [ + 304, + 367, + 545, + 475 + ], + "type": "text", + "content": " edits. While some progress has been made on local editing using an explicit localization of the edit region [49, 67], these regions are often coarse and lack fine-grained detail. Highly-detailed and accurate localizations are important for constraining the edits to be within a specific region, preventing changes unrelated to the target edit. Furthermore, while meshes with texture maps are the de facto standard in graphics pipelines, existing local editing work does not natively operate on meshes nor produce texture maps for them." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 480, + 546, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 480, + 546, + 660 + ], + "spans": [ + { + "bbox": [ + 304, + 480, + 546, + 660 + ], + "type": "text", + "content": "In this work we develop 3D Paintbrush, a method for automatically texturing local semantic regions on meshes via text descriptions. Our method is designed to operate directly on meshes, producing texture maps which seamlessly integrate into standard graphics pipelines. 3D Paintbrush is controlled via intuitive, free-form text input, allowing users to describe their edits using open vocabulary on a wide range of meshes. Specifically, given an input mesh and a text prompt, 3D Paintbrush produces the corresponding high-quality texture map and a localization region to confine it. To enhance the details and resolution of the locally textured area, we introduce Cascaded Score Distillation (CSD) which leverages multiple stages of a cascaded diffusion model. Our explicit localization masks can be used to layer our edit texture onto existing textures." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "We opt to represent both our localization map and texture map as neural fields encoded by multi-layer perceptions. Our method synthesizes both a fine-grained localization mask and high-quality texture in tandem. Simultane" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4473" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 66, + 147, + 201 + ], + "blocks": [ + { + "bbox": [ + 47, + 66, + 147, + 201 + ], + "lines": [ + { + "bbox": [ + 47, + 66, + 147, + 201 + ], + "spans": [ + { + "bbox": [ + 47, + 66, + 147, + 201 + ], + "type": "image", + "image_path": "305e0e8d18cf7f67879c52579bcc8947fff72b3ab4c653d928e3390ba7c0684d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 58, + 201, + 134, + 213 + ], + "lines": [ + { + "bbox": [ + 58, + 201, + 134, + 213 + ], + "spans": [ + { + "bbox": [ + 58, + 201, + 134, + 213 + ], + "type": "text", + "content": "Colorful polo shirt" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 216, + 546, + 239 + ], + "lines": [ + { + "bbox": [ + 46, + 216, + 546, + 239 + ], + "spans": [ + { + "bbox": [ + 46, + 216, + 546, + 239 + ], + "type": "text", + "content": "Figure 2. Precise composition of multiple local textures. 3D Paintbrush produces highly-detailed textures that effectively adhere to the predicted localizations. This enables seamlessly compositing local textures without unwanted fringes (right)." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 154, + 68, + 252, + 201 + ], + "blocks": [ + { + "bbox": [ + 154, + 68, + 252, + 201 + ], + "lines": [ + { + "bbox": [ + 154, + 68, + 252, + 201 + ], + "spans": [ + { + "bbox": [ + 154, + 68, + 252, + 201 + ], + "type": "image", + "image_path": "fad0ab68b5b07bb7cec53701159dbce71c34d054366cf12211c111011d67896a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 162, + 201, + 239, + 213 + ], + "lines": [ + { + "bbox": [ + 162, + 201, + 239, + 213 + ], + "spans": [ + { + "bbox": [ + 162, + 201, + 239, + 213 + ], + "type": "text", + "content": "Superman emblem" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 258, + 68, + 356, + 201 + ], + "blocks": [ + { + "bbox": [ + 258, + 68, + 356, + 201 + ], + "lines": [ + { + "bbox": [ + 258, + 68, + 356, + 201 + ], + "spans": [ + { + "bbox": [ + 258, + 68, + 356, + 201 + ], + "type": "image", + "image_path": "a89cb636cef5b54c1bfe8703dceb299d017079a30f5b65309a6366597bf6ca4b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 276, + 201, + 333, + 213 + ], + "lines": [ + { + "bbox": [ + 276, + 201, + 333, + 213 + ], + "spans": [ + { + "bbox": [ + 276, + 201, + 333, + 213 + ], + "type": "text", + "content": "Tie-dye apron" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 362, + 68, + 462, + 201 + ], + "blocks": [ + { + "bbox": [ + 362, + 68, + 462, + 201 + ], + "lines": [ + { + "bbox": [ + 362, + 68, + 462, + 201 + ], + "spans": [ + { + "bbox": [ + 362, + 68, + 462, + 201 + ], + "type": "image", + "image_path": "66d48d8aa4a8deec06555212b46a505c654ded15867d3a8cbd1f02b36cebbf31.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 375, + 201, + 447, + 213 + ], + "lines": [ + { + "bbox": [ + 375, + 201, + 447, + 213 + ], + "spans": [ + { + "bbox": [ + 375, + 201, + 447, + 213 + ], + "type": "text", + "content": "Muay Thai shorts" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 475, + 74, + 545, + 201 + ], + "blocks": [ + { + "bbox": [ + 475, + 74, + 545, + 201 + ], + "lines": [ + { + "bbox": [ + 475, + 74, + 545, + 201 + ], + "spans": [ + { + "bbox": [ + 475, + 74, + 545, + 201 + ], + "type": "image", + "image_path": "48871a295e38ebb027784ab0ff12faf45f6e3a7d9e47efeef0edbe907f40b5b8.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 488, + 201, + 534, + 213 + ], + "lines": [ + { + "bbox": [ + 488, + 201, + 534, + 213 + ], + "spans": [ + { + "bbox": [ + 488, + 201, + 534, + 213 + ], + "type": "text", + "content": "Composite" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 259, + 286, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 259, + 286, + 319 + ], + "spans": [ + { + "bbox": [ + 46, + 259, + 286, + 319 + ], + "type": "text", + "content": "ously generating the localization and texture maps improves the quality of each. The texture map drives the localization to become more detailed and intricate. The localization explicitly masks the texture, ensuring a coherent local style which respects the localization boundary." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 319, + 286, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 319, + 286, + 499 + ], + "spans": [ + { + "bbox": [ + 46, + 319, + 286, + 499 + ], + "type": "text", + "content": "Our local stylization operates in small regions, necessitating higher resolution supervision compared to global generative techniques. Existing approaches leverage pretrained text-to-image diffusion models with Score Distillation Sampling (SDS) to supervise text-driven optimizations [31, 58]. Text-to-image diffusion models often contain multiple cascaded stages in order to achieve high resolution [21], but standard SDS only utilizes the first low-resolution stage of the cascaded model. Our technique, referred to as Cascaded Score Distillation (CSD), simultaneously distills scores at multiple resolutions in a cascaded fashion, enabling control over both the granularity and global understanding of the supervision. Since cascaded stages are trained entirely independently, our insight is to formulate a distillation loss that incorporates all stages in tandem." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 500, + 286, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 500, + 286, + 632 + ], + "spans": [ + { + "bbox": [ + 46, + 500, + 286, + 632 + ], + "type": "text", + "content": "In summary, our method enables local text-driven stylization of meshes. By explicitly learning a localization in tandem with the texture, we ensure that our edits are bounded by the localized region. Using our CSD, which leverages all stages of the diffusion model, we can control the granularity and global understanding of the supervision achieving higher resolution textures and localizations than standard SDS. We demonstrate that 3D Paintbrush yields diverse local texturing on a variety of shapes and semantic regions and outperforms baselines both qualitatively and quantitatively." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 645, + 134, + 657 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 645, + 134, + 657 + ], + "spans": [ + { + "bbox": [ + 47, + 645, + 134, + 657 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "content": "A large body of work has studied stylization and analysis of 3D content. Existing work uses neural networks and optimization [6, 15, 19, 22, 23, 30, 33, 37-39, 41, 42, 51, 60, 62] for mesh stylization. Other works use a neural radiance field" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 258, + 545, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 258, + 545, + 368 + ], + "spans": [ + { + "bbox": [ + 304, + 258, + 545, + 368 + ], + "type": "text", + "content": "NeRF [40] for stylization [11, 34, 64]. Yet, these works focus on stylization rather than localization. Large 2D models have been used for analytical tasks in 3D such as localization and segmentation [1, 2, 10, 17, 27, 28, 54, 57, 67], however, none of these works produce textures. Furthermore, only [1, 2, 10, 67] aim to produce a tight localization on meshes and we find that these approaches still produce relatively smooth localization regions that cannot capture the high frequency details needed for sharp local edits." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 372, + 545, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 372, + 545, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 372, + 545, + 479 + ], + "type": "text", + "content": "Text-driven generation and editing. Existing works have leveraged pre-trained 2D models to generate 3D representations that adhere to a text prompt [4, 12, 16, 25, 29, 39, 41, 63]. Many recent methods [9, 26, 32, 44, 50, 53, 53, 58, 66] use score distillation [44, 58] from 2D models to generate both geometry and styles from scratch, while other works optimize the texture of an existing, fixed geometry [8, 38, 39, 47]. Other work aims to generate 3D representations from images [14, 35, 36, 45]." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 480, + 545, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 480, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 304, + 480, + 545, + 578 + ], + "type": "text", + "content": "Existing text-to-3D generative methods [38, 44, 58, 59] can be used to perform global edits [18, 48, 67]. However, since these approaches do not have explicit edit localizations, they struggle to perform highly specific local edits without changing other components of the 3D representation's appearance. Different from our objective, these works aim to generate or globally manipulate existing 3D representations, while our work focuses on local editing." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": "Text-driven local editing. Many approaches can perform global 3D edits and progress has been made on local editing in images and videos [5, 7, 13, 20]. Yet, few works have addressed the task of precise, local editing for 3D representations. Local editing is challenging since, in addition to synthesizing the edit, methods need to localize the edit region. FocalDreamer [31] obtains precise user defined edit regions at the cost of requiring additional, tedious user input compared to strictly text-driven approaches. Vox-E [49] (operating on voxel representations) and DreamEditor [67] (operating on NeRFs) both use attention maps to localize an edit" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "4474" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 68, + 547, + 210 + ], + "blocks": [ + { + "bbox": [ + 47, + 68, + 547, + 210 + ], + "lines": [ + { + "bbox": [ + 47, + 68, + 547, + 210 + ], + "spans": [ + { + "bbox": [ + 47, + 68, + 547, + 210 + ], + "type": "image", + "image_path": "b877759d8109bb754731500e7eabbf66d9ebd769fd1bf2c81c411b22a81fe222.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 217, + 547, + 262 + ], + "lines": [ + { + "bbox": [ + 45, + 217, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 45, + 217, + 547, + 262 + ], + "type": "text", + "content": "Figure 3. Overview of 3D Paintbrush. Each point on the surface of the mesh is passed into three different branches to produce a localization probability, texture map, and background map. We texture three different variants of the same mesh with the localization, texture, and background maps and render them from the same viewpoint. Each image along with the corresponding text condition is used to compute the CSD loss." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 282, + 287, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 282, + 287, + 368 + ], + "spans": [ + { + "bbox": [ + 46, + 282, + 287, + 368 + ], + "type": "text", + "content": "region and thus the localization has no visual meaning in isolation. Our approach imposes a visual loss on our localizations in order to enforce sharp boundaries that are tightly coupled with our texture edits. Additionally, since existing purely text-driven local editing approaches only work on voxels and NeRFs, our approach is the first to enable text-driven local editing on meshes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 369, + 288, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 369, + 288, + 562 + ], + "spans": [ + { + "bbox": [ + 46, + 369, + 288, + 562 + ], + "type": "text", + "content": "High resolution text-to-3D. Several works have explored techniques to increase the resolution for text-to-3D. Many recent works apply SDS to latent diffusion models [32, 38, 58, 59, 66]. Recent works backpropagate the gradient through the encoder to get gradients in higher resolution " + }, + { + "bbox": [ + 46, + 369, + 288, + 562 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 46, + 369, + 288, + 562 + ], + "type": "text", + "content": " RGB space [32, 59, 66]. Other works use timestep annealing to give less noisy supervision towards the end of the optimization, thus increasing the detail of the generations [24, 59]. HiFA [66] proposes denoising over multiple successive timesteps each iteration to provide better gradients and achieve high fidelity appearance. While all of these approaches have shown impressive improvements to the resolution of SDS supervision, SDS only utilizes the base stage (not super-resolution stages). Thus, these proposed improvements are orthogonal to ours and can be incorporated at the super-resolution stages using CSD as well." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 573, + 103, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 573, + 103, + 586 + ], + "spans": [ + { + "bbox": [ + 47, + 573, + 103, + 586 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "text", + "content": "We show an overview of our method in Fig. 3. The inputs to our system are a mesh " + }, + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "text", + "content": " and a text description " + }, + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "text", + "content": " of the desired local edit. Our system produces a local texture on the mesh " + }, + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "text", + "content": " that adheres to the text prompt " + }, + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "text", + "content": ". To supervise our optimization, we use score distillation with a pretrained text-to-image diffusion model. However, local editing requires higher detail than standard generation due to the small size and granularity of the desired edits. In order to further improve the detail of our localization and texture, we introduce Cascaded Score Distillation (CSD), a" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 282, + 545, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 282, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 304, + 282, + 545, + 331 + ], + "type": "text", + "content": "technique that distills scores at multiple resolutions of the 2D cascaded model. This approach enables leveraging all stages of a cascaded model and provides control over both the detail and global understanding of the supervision." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 336, + 440, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 336, + 440, + 350 + ], + "spans": [ + { + "bbox": [ + 305, + 336, + 440, + 350 + ], + "type": "text", + "content": "3.1. Local Neural Texturing" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 354, + 545, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 354, + 545, + 545 + ], + "spans": [ + { + "bbox": [ + 304, + 354, + 545, + 545 + ], + "type": "text", + "content": "3D Paintbrush represents local textures as neural texture maps over the surface of a mesh " + }, + { + "bbox": [ + 304, + 354, + 545, + 545 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 354, + 545, + 545 + ], + "type": "text", + "content": " defined by vertices " + }, + { + "bbox": [ + 304, + 354, + 545, + 545 + ], + "type": "inline_equation", + "content": "V \\in \\mathbb{R}^{n \\times 3}" + }, + { + "bbox": [ + 304, + 354, + 545, + 545 + ], + "type": "text", + "content": " and faces " + }, + { + "bbox": [ + 304, + 354, + 545, + 545 + ], + "type": "inline_equation", + "content": "F \\in \\{1, \\dots, n\\}^{m \\times 3}" + }, + { + "bbox": [ + 304, + 354, + 545, + 545 + ], + "type": "text", + "content": ". Extracting an explicit texture map from our neural textures is trivial, making our representation compatible with existing graphics pipelines. Furthermore, using texture maps enables producing high resolution textures (i.e., sub-triangle values) without a computationally expensive high resolution mesh. A straight-forward approach of directly optimizing texture values results in texture maps with artifacts and noise (see supplemental material). To mitigate this, we leverage the smoothness of neural networks [46]. However, a straight-forward application of an MLP to a 2D texture map " + }, + { + "bbox": [ + 304, + 354, + 545, + 545 + ], + "type": "inline_equation", + "content": "((u, v) \\to (r, g, b))" + }, + { + "bbox": [ + 304, + 354, + 545, + 545 + ], + "type": "text", + "content": " is inherently invalid at the texture seams (e.g., erroneous interpolations at boundaries), which may lead to texture discontinuities on the rendered mesh." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "text", + "content": "We instead formulate our MLPs to operate on 3D coordinates leading to predictions in 3D that are inherently smooth and without any seam discontinuities. To do so, we invert the UV mapping " + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\psi(x,y,z) = (u,v)" + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "text", + "content": " to get a map " + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\psi^{-1}(u,v) = (x,y,z)" + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "text", + "content": " from 2D texels to 3D coordinates on the surface of the mesh. We optimize our MLPs with the 3D coordinates obtained from the 2D texel centers. We employ two primary networks, one for localization and one for texturing. Our neural localization MLP is a function " + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta}" + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "text", + "content": " that maps a 3D coordinate " + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{x} = (x,y,z)" + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "text", + "content": " to a probability " + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "text", + "content": " (which we map back to a 2D localization map). Similarly, our neural texture MLP is a function " + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\phi}" + }, + { + "bbox": [ + 304, + 545, + 546, + 713 + ], + "type": "text", + "content": " that takes in a 3D coordinate and outputs an RGB value (which we map back to a 2D texture image). Our architecture first passes the 3D" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4475" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 70, + 521, + 235 + ], + "blocks": [ + { + "bbox": [ + 72, + 70, + 521, + 235 + ], + "lines": [ + { + "bbox": [ + 72, + 70, + 521, + 235 + ], + "spans": [ + { + "bbox": [ + 72, + 70, + 521, + 235 + ], + "type": "image", + "image_path": "caf584c44831cf110e828a96530c71e57e340c15fa5cf625b4abdb87e9fe1b93.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 236, + 546, + 258 + ], + "lines": [ + { + "bbox": [ + 46, + 236, + 546, + 258 + ], + "spans": [ + { + "bbox": [ + 46, + 236, + 546, + 258 + ], + "type": "text", + "content": "Figure 4. 3D Paintbrush produces highly detailed textures and localizations for a diverse range of meshes and prompts. Our method synthesizes meaningful local edits on shapes, demonstrating both global and local part-level understanding." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 278, + 289, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 278, + 289, + 410 + ], + "spans": [ + { + "bbox": [ + 46, + 278, + 289, + 410 + ], + "type": "text", + "content": "coordinates through positional encoding [52] before going through a 6-layer MLP. This formulation of using MLPs defined on the 3D surface leads to a neural texture which produces smoothly varying outputs in 3D, even though our 2D texture maps have discontinuities at the texture seams. The smoothness provided by the MLPs reduces artifacts, produces less noisy textures, and provides super resolution capabilities. Although we optimize our MLPs with 3D coordinates mapped from 2D texel centers, during inference, we may query the MLP for any value (i.e. sub-texels that enable super resolution texture maps even across seams)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 415, + 254, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 415, + 254, + 427 + ], + "spans": [ + { + "bbox": [ + 47, + 415, + 254, + 427 + ], + "type": "text", + "content": "3.2. Visual Guidance for Localized Textures" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 434, + 287, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 434, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 46, + 434, + 287, + 495 + ], + "type": "text", + "content": "We guide our optimization using three distinct losses that encourage both the localization and texture towards visually desirable results. Each loss is visualized as a branch in Fig. 3 - top branch: localization loss, middle branch: local texture map loss, bottom branch: background loss." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "spans": [ + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": "Local texture map loss. First, we obtain our localization map " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "L_{map} \\in [0,1]^{H \\times W}" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": " from the neural localization MLP " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "L_{map} = \\psi(\\mathcal{F}_{\\theta}(\\mathbf{x}))" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": " and the texture map " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "T_{map} \\in [0,1]^{H \\times W \\times 3}" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": " from the neural texture MLP " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "T_{map} = \\psi(\\mathcal{F}_{\\phi}(\\mathbf{x}))" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": ". We use the localization " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "L_{map}" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": " to mask the texture " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "T_{map}" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": " to get a local texture map " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "T_{map}'" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": " which only contains textures inside the localization region. We apply the masked texture " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "T_{map}'" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": " to our mesh " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": " to get a locally-textured mesh " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "M_t" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": " and construct a local-texture text prompt " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "y_t" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": " from the input text " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": " (middle branch Fig. 3). We then supervise our optimization using a text-conditioned visual loss (cascaded score distillation, see Sec. 3.4) on " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "M_t" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "inline_equation", + "content": "y_t" + }, + { + "bbox": [ + 46, + 497, + 287, + 674 + ], + "type": "text", + "content": ". By applying a visual loss to the localization-masked texture, we get informative and meaningful gradients for both our texture MLP and our localization MLP." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "Localization loss. Using only the texture loss allows for trivial solutions where the mask contains a region that includes, but is much larger than, the desired localization re" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "spans": [ + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "type": "text", + "content": "gion. To encourage the localization region to be meaningful, we employ a visual loss on the localization region in isolation (similar to 3D Highlighter [10]). Specifically, we blend a (yellow) color onto the mesh according to the localization map to get a localization-colored mesh " + }, + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "type": "inline_equation", + "content": "M_{l}" + }, + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "type": "text", + "content": " (top branch Fig. 3). From the text input " + }, + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "type": "text", + "content": ", we derive a target localization prompt " + }, + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "type": "inline_equation", + "content": "y_{l}" + }, + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "type": "text", + "content": " describing the localized region in the format used in 3D Highlighter [10]. We then use " + }, + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "type": "inline_equation", + "content": "M_{l}" + }, + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "type": "inline_equation", + "content": "y_{l}" + }, + { + "bbox": [ + 304, + 278, + 545, + 410 + ], + "type": "text", + "content": " as input to the text-conditioned visual loss. Using this loss significantly improves the detail and quality of the localization (see supplemental material)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "spans": [ + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": "Background loss. Using only the top two branches in Fig. 3 leads to broader localizations that incorporate superfluous elements characteristic of the input 3D model (i.e. a bill on a duck), in addition to the desired localization region (see supplemental material). To mitigate this, we learn a background texture " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "B_{map} \\in [0,1]^{H \\times W \\times 3}" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": " that intentionally contains these characteristic elements of the input 3D shape in the inverse of the localization region " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "1 - L_{map}" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": " (the area outside the localization region). Specifically, we blend both the background texture " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "B_{map}" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": " (using " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "1 - L_{map}" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": ") and a yellow color (using " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "L_{map}" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": ") to get a composited texture " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "B_{map}' = L_{map}(\\mathrm{YELLOW}) + (1 - L_{map})B_{map}" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": " (bottom branch in Fig. 3). We apply the composited texture " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "B_{map}'" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": " to the mesh to get " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "M_b" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": " and then supervise the background MLP using a visual loss conditioned on both " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "M_b" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": " and a target text " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "y_b" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": " (derived from " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": "). The target text " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "y_b" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": " describes the generic object class (i.e. 'cow' in Fig. 3) with a (yellow) colored localization region. See supplemental material for more details. The third loss directly encourages incorporating the superfluous elements in the background texture which discourages the localization region from incorporating such undesired elements (since " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "L_{map}" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "inline_equation", + "content": "1 - L_{map}" + }, + { + "bbox": [ + 304, + 414, + 546, + 688 + ], + "type": "text", + "content": " are inverse masks)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 689, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 546, + 713 + ], + "type": "text", + "content": "Key to our method is the simultaneous optimization of the localization map (that specifies the edit region) and the" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4476" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 68, + 112, + 131 + ], + "blocks": [ + { + "bbox": [ + 48, + 68, + 112, + 131 + ], + "lines": [ + { + "bbox": [ + 48, + 68, + 112, + 131 + ], + "spans": [ + { + "bbox": [ + 48, + 68, + 112, + 131 + ], + "type": "image", + "image_path": "d4f6be7c7b59054a75210a356f66f171e829da3af540ba3a770c61aa11a695ad.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 132, + 111, + 141 + ], + "lines": [ + { + "bbox": [ + 55, + 132, + 111, + 141 + ], + "spans": [ + { + "bbox": [ + 55, + 132, + 111, + 141 + ], + "type": "text", + "content": "Simultaneous" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 121, + 68, + 184, + 131 + ], + "blocks": [ + { + "bbox": [ + 121, + 68, + 184, + 131 + ], + "lines": [ + { + "bbox": [ + 121, + 68, + 184, + 131 + ], + "spans": [ + { + "bbox": [ + 121, + 68, + 184, + 131 + ], + "type": "image", + "image_path": "6c43ffa6ff52e17ea8cc59bb1f2b7fafb519cdf5dbbeec3fd840d64f9c560b8d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 133, + 132, + 168, + 141 + ], + "lines": [ + { + "bbox": [ + 133, + 132, + 168, + 141 + ], + "spans": [ + { + "bbox": [ + 133, + 132, + 168, + 141 + ], + "type": "text", + "content": "In series" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 192, + 68, + 287, + 131 + ], + "blocks": [ + { + "bbox": [ + 192, + 68, + 287, + 131 + ], + "lines": [ + { + "bbox": [ + 192, + 68, + 287, + 131 + ], + "spans": [ + { + "bbox": [ + 192, + 68, + 287, + 131 + ], + "type": "image", + "image_path": "25064e463f681c2bd66cd2cf471c738429508d746c077bed6c2ca5f90c4256b1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 132, + 264, + 143 + ], + "lines": [ + { + "bbox": [ + 214, + 132, + 264, + 143 + ], + "spans": [ + { + "bbox": [ + 214, + 132, + 264, + 143 + ], + "type": "text", + "content": "Independent" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 146, + 287, + 256 + ], + "lines": [ + { + "bbox": [ + 46, + 146, + 287, + 256 + ], + "spans": [ + { + "bbox": [ + 46, + 146, + 287, + 256 + ], + "type": "text", + "content": "Figure 5. Impact of simultaneous optimization. Simultaneously optimizing the localization and texture (left) results in higher-detailed textures which effectively conform to the predicted localization. If we first optimize the localization, then optimize the texture within the localization region (middle), both the localization and texture are less detailed. Independent (right): if we optimize the localization independently (independent: left) and the texture independently (independent: middle), the texture does not align with the localization and thus the masked texture contains fringe artifacts (independent: right)." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 277, + 287, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 277, + 287, + 350 + ], + "spans": [ + { + "bbox": [ + 46, + 277, + 287, + 350 + ], + "type": "text", + "content": "texture map that conforms to it. This approach improves the quality of both the localization and the stylization. The texture map drives the localization to become more detailed and intricate, while the localization explicitly masks the texture, ensuring a coherent local style which respects the localization boundary (see Fig. 5)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 358, + 264, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 358, + 264, + 370 + ], + "spans": [ + { + "bbox": [ + 47, + 358, + 264, + 370 + ], + "type": "text", + "content": "3.3. Score Distillation and Cascaded Diffusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 376, + 287, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 376, + 287, + 472 + ], + "spans": [ + { + "bbox": [ + 46, + 376, + 287, + 472 + ], + "type": "text", + "content": "Score Distillation. To guide our local stylization, we leverage powerful pretrained text-to-image diffusion models. Existing approaches use these models in conjunction with Score Distillation Sampling (SDS) to supervise text-driven optimizations [44, 58]. For each iteration of an optimization of an image " + }, + { + "bbox": [ + 46, + 376, + 287, + 472 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 376, + 287, + 472 + ], + "type": "text", + "content": " that we want to supervise with diffusion model " + }, + { + "bbox": [ + 46, + 376, + 287, + 472 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 46, + 376, + 287, + 472 + ], + "type": "text", + "content": " and text prompt " + }, + { + "bbox": [ + 46, + 376, + 287, + 472 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 376, + 287, + 472 + ], + "type": "text", + "content": ", SDS [44] proposes the following gradient:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 80, + 483, + 287, + 496 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 483, + 287, + 496 + ], + "spans": [ + { + "bbox": [ + 80, + 483, + 287, + 496 + ], + "type": "interline_equation", + "content": "\\nabla_ {x} \\mathcal {L} _ {S D S} (\\phi , x, y) = w (t) \\left(\\epsilon_ {\\phi} \\left(z _ {t}, t, y\\right) - \\epsilon\\right) \\tag {1}", + "image_path": "ffa8ed127f5d9ae1958c9723e5f7d9f5633136c3e47f656d89ad07131264479d.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "spans": [ + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": "where timestep " + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "inline_equation", + "content": "t \\sim \\mathcal{U}(\\{1, \\dots, T\\})" + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": " is sampled uniformly and noise " + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "inline_equation", + "content": "\\epsilon \\sim \\mathcal{N}(\\mathbf{0}, \\mathbf{I})" + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": " is Gaussian. The noisy image " + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": " is obtained by applying a timestep-dependent scaling of " + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": " to the image " + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": ". The weight " + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "inline_equation", + "content": "w(t)" + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": " is a timestep-dependent weighting function and " + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\phi}(z_{t}, t, y)" + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": " is the noise predicted by the diffusion model conditioned on " + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": ". Note that Eq. (1) omits the U-Net Jacobian term (not needed in practice [44]). This objective is similar to the objective used in diffusion model training, however, instead of optimizing the weights of the model, the gradient is applied to the image " + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "text", + "content": " Cascaded Diffusion. Text-to-image diffusion models often contain multiple cascaded stages at different resolutions in order to achieve high resolution outputs [21]. These cascaded diffusion models consist of a base stage " + }, + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\phi^1" + }, + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "text", + "content": " (stage 1) and some number of super-resolution stages " + }, + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\phi^{i > 1}" + }, + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "text", + "content": " (stages " + }, + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "inline_equation", + "content": "2 - N" + }, + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "text", + "content": "). The base stage is identical to a standard diffusion model, predicting noise " + }, + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\phi^1}(z_t^1,t,y)" + }, + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "text", + "content": " conditioned on" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": "noisy image " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "z_{t}^{1}" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": ", timestep " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": ", and text prompt " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": ". However, the super-resolution stages are conditioned on two differently-noised images: one at the current resolution (" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "z_{t}^{i}" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " with timestep " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " and noise " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "\\epsilon^{i}" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": ") and one at the lower resolution (" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "z_{s}^{i-1}" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " with timestep " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " and noise " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "\\epsilon^{i-1}" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": "). The predicted noise for the super-resolution stage is given by " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\phi^{i}}(z_{t}^{i}, t, z_{s}^{i-1}, s, y)" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": ". During inference, the lower resolution input image is obtained by adding noise to the output of the prior stage. However in training, both the high and low resolution images are obtained by sampling a single image from the training dataset and rescaling it to different resolutions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 204, + 545, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 204, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 304, + 204, + 545, + 348 + ], + "type": "text", + "content": "Standard SDS [44] only utilizes the first, low-resolution base stage, thus neglecting the full potential of the cascaded model. It is not immediately obvious how to formulate a score distillation technique for all stages of a cascaded diffusion model since super-resolution stages take multiple resolution inputs and, at inference, they require a fully denoised output from the prior stage [21]. We take inspiration from SDS and use the perspective of diffusion training as opposed to inference, and extend it to the training of cascaded diffusion models. To our knowledge, we are the first to consider score distillation using the cascaded super-resolution stages." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 356, + 457, + 368 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 356, + 457, + 368 + ], + "spans": [ + { + "bbox": [ + 306, + 356, + 457, + 368 + ], + "type": "text", + "content": "3.4. Cascaded Score Distillation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 374, + 545, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 374, + 545, + 518 + ], + "spans": [ + { + "bbox": [ + 304, + 374, + 545, + 518 + ], + "type": "text", + "content": "CSD overview. Our technique, referred to as Cascaded Score Distillation (CSD), simultaneously distills scores at multiple resolutions in a cascaded fashion (illustrated in Fig. 6). Since the stages of a cascaded diffusion model " + }, + { + "bbox": [ + 304, + 374, + 545, + 518 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 304, + 374, + 545, + 518 + ], + "type": "text", + "content": " are trained entirely independently of one another, our insight is to formulate a distillation loss that incorporates gradients from all stages " + }, + { + "bbox": [ + 304, + 374, + 545, + 518 + ], + "type": "inline_equation", + "content": "(\\phi^1,\\dots,\\phi^N)" + }, + { + "bbox": [ + 304, + 374, + 545, + 518 + ], + "type": "text", + "content": " simultaneously. We observe that different stages of the cascaded model provide different levels of granularity and global understanding (Fig. 7). Controlling the influence of each stage provides control over the details and the corresponding localization of the supervision (Fig. 8)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": "CSD Formalization. Consider a mesh " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "M_{\\theta}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " with a neural texture parameterized by an MLP " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " (This MLP could be either " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\phi}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\psi}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " in Sec. 3.2). We first render " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "M_{\\theta}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " different resolutions using a differentiable renderer " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " to get multiple images " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "g(M_{\\theta}) = \\mathbf{x} = \\{x^{1}\\dots x^{N}\\}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "x^{i}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " is the same resolution as stage " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\phi^i" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": ". For the base stage " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\phi^1" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": ", we perform standard SDS using Eq. (1) on " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "x^{1}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " and prompt " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " to get a gradient " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\nabla_{x^1}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": ". For all stages " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\phi^i" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "i > 1" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": ", we sample two timesteps " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "t,s\\sim \\mathcal{U}(\\{1,\\ldots ,T\\})" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": ", noise " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\epsilon^i\\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " at the resolution of stage " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\phi^i" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": ", and noise " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\epsilon^{i - 1}\\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " at the resolution of stage " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\phi^{i - 1}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": ". Using timestep-dependent schedule coefficients " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": ", we compute a noisy image " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "z_{t}^{i} = \\alpha_{t}x^{i} + \\sigma_{t}\\epsilon^{i}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " by applying a timestep-dependent scaling of " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\epsilon^i" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " to the image " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "x^i" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": ". Similarly, we compute " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "z_{s}^{i - 1} = \\alpha_{s}x^{i - 1} + \\sigma_{s}\\epsilon^{i - 1}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " by applying a timestep-dependent scaling of " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\epsilon^{i - 1}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " to the image " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "x^{i - 1}" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": ". We then use" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "4477" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 68, + 275, + 218 + ], + "blocks": [ + { + "bbox": [ + 61, + 68, + 275, + 218 + ], + "lines": [ + { + "bbox": [ + 61, + 68, + 275, + 218 + ], + "spans": [ + { + "bbox": [ + 61, + 68, + 275, + 218 + ], + "type": "image", + "image_path": "dc0855ebbe75fc13f60c0283528b86c600c64f2abc32f487d20fcae9e5b6718f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 225, + 287, + 291 + ], + "lines": [ + { + "bbox": [ + 46, + 225, + 287, + 291 + ], + "spans": [ + { + "bbox": [ + 46, + 225, + 287, + 291 + ], + "type": "text", + "content": "Figure 6. Cascaded Score Distillation (CSD). We simultaneously distill scores across multiple stages of a cascaded diffusion model in order to leverage both the global awareness of the first stage and the higher level of detail contained in later stages. The difference between the predicted noise and sampled noise is the image gradient for each stage." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "spans": [ + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "inline_equation", + "content": "\\phi^i" + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "text", + "content": " to predict noise " + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\phi^i}(z_t^i,t,z_s^{i - 1},s,y)" + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "text", + "content": " conditioned on the noisy images, timesteps, and text prompt. Our gradient " + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "inline_equation", + "content": "\\nabla_{x^i}" + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "text", + "content": " for stage " + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "inline_equation", + "content": "\\phi^i" + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "inline_equation", + "content": "i > 1" + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "text", + "content": " is the difference between the predicted noise and the (higher-resolution) sampled noise " + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "inline_equation", + "content": "\\epsilon^i" + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "text", + "content": " weighted by the timestep-dependent function " + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "inline_equation", + "content": "w(t)" + }, + { + "bbox": [ + 46, + 310, + 286, + 370 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 92, + 376, + 287, + 407 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 376, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 92, + 376, + 287, + 407 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\nabla_ {x ^ {i}} \\mathcal {L} _ {C S D ^ {i}} \\left(\\phi^ {i}, x ^ {i}, x ^ {i - 1}, y\\right) = \\\\ w (t) \\left(\\epsilon_ {\\phi^ {i}} \\left(z _ {t} ^ {i}, t, z _ {s} ^ {i - 1}, s, y\\right) - \\epsilon^ {i}\\right). \\tag {2} \\\\ \\end{array}", + "image_path": "7e4ad4f0f176261070b31105cfc88343ffb7d1dc80b4c10e78bf83780543e8cd.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 413, + 287, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 413, + 287, + 473 + ], + "spans": [ + { + "bbox": [ + 46, + 413, + 287, + 473 + ], + "type": "text", + "content": "With all gradients " + }, + { + "bbox": [ + 46, + 413, + 287, + 473 + ], + "type": "inline_equation", + "content": "\\nabla_{x^1},\\ldots ,\\nabla_{x^N}" + }, + { + "bbox": [ + 46, + 413, + 287, + 473 + ], + "type": "text", + "content": " computed, we weight each gradient " + }, + { + "bbox": [ + 46, + 413, + 287, + 473 + ], + "type": "inline_equation", + "content": "\\nabla_{x^i}" + }, + { + "bbox": [ + 46, + 413, + 287, + 473 + ], + "type": "text", + "content": " with a user defined " + }, + { + "bbox": [ + 46, + 413, + 287, + 473 + ], + "type": "inline_equation", + "content": "\\lambda^i" + }, + { + "bbox": [ + 46, + 413, + 287, + 473 + ], + "type": "text", + "content": " to provide control over the impact of the supervision from each stage of the cascaded model. Thus our full gradient with respect to any given neural texture " + }, + { + "bbox": [ + 46, + 413, + 287, + 473 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 413, + 287, + 473 + ], + "type": "text", + "content": " can be described by:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 479, + 287, + 554 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 479, + 287, + 554 + ], + "spans": [ + { + "bbox": [ + 56, + 479, + 287, + 554 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\nabla_ {\\theta} \\mathcal {L} _ {C S D} (\\phi , \\mathbf {x} = g (\\theta), y) = \\\\ \\lambda^ {1} \\nabla_ {x ^ {1}} \\mathcal {L} _ {S D S} (\\phi^ {1}, x ^ {1}, y) \\frac {\\partial x ^ {1}}{\\partial \\theta} \\\\ + \\sum_ {i = 2} ^ {N} \\lambda^ {i} \\nabla_ {x ^ {i}} \\mathcal {L} _ {C S D ^ {i}} \\left(\\phi^ {i}, x ^ {i}, x ^ {i - 1}, y\\right) \\frac {\\partial x ^ {i}}{\\partial \\theta}. \\tag {3} \\\\ \\end{array}", + "image_path": "ea28369c23a75bbfe31f1a88245c87cb3af954cc944c68ac94b7aaa854d456fc.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "spans": [ + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "text", + "content": "Note that just as in SDS [44], we can avoid computing the U-Net Jacobian term " + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "inline_equation", + "content": "\\frac{\\partial\\epsilon_{\\phi}(z_t^i,t,z_s^{i - 1},s,y)}{z_t^i}" + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "text", + "content": " (not shown in Eq. (3)) since each stage is entirely independent and our gradient is only with respect to the high-resolution image " + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "inline_equation", + "content": "x^i" + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "text", + "content": ". Thus, we directly apply " + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "inline_equation", + "content": "\\lambda^i\\nabla_{x^i}" + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "text", + "content": " to the image " + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "inline_equation", + "content": "x^i" + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "text", + "content": " without having to compute the costly backpropagation through the U-Net. Using the gradient " + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{CSD}(\\phi ,\\mathbf{x} = g(\\theta),y)" + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "text", + "content": ", we update the weights of our MLP " + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 559, + 287, + 659 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 670, + 127, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 127, + 682 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 127, + 682 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "content": "We demonstrate the capabilities of 3D Paintbrush on a wide variety of meshes (from different sources [55, 56, 61, 65])" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 307, + 69, + 383, + 137 + ], + "blocks": [ + { + "bbox": [ + 307, + 69, + 383, + 137 + ], + "lines": [ + { + "bbox": [ + 307, + 69, + 383, + 137 + ], + "spans": [ + { + "bbox": [ + 307, + 69, + 383, + 137 + ], + "type": "image", + "image_path": "33f1240fa7193e069ba0cbfc01ea4c297c50b0a5c68a2a9bbdf303837df82ec6.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 316, + 138, + 368, + 149 + ], + "lines": [ + { + "bbox": [ + 316, + 138, + 368, + 149 + ], + "spans": [ + { + "bbox": [ + 316, + 138, + 368, + 149 + ], + "type": "text", + "content": "Only stage 1" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 389, + 70, + 464, + 136 + ], + "blocks": [ + { + "bbox": [ + 389, + 70, + 464, + 136 + ], + "lines": [ + { + "bbox": [ + 389, + 70, + 464, + 136 + ], + "spans": [ + { + "bbox": [ + 389, + 70, + 464, + 136 + ], + "type": "image", + "image_path": "296857b7782a8ab5165386cf4150d05f7248ac8b10ace6c3d3d178fe938d4844.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 401, + 138, + 454, + 149 + ], + "lines": [ + { + "bbox": [ + 401, + 138, + 454, + 149 + ], + "spans": [ + { + "bbox": [ + 401, + 138, + 454, + 149 + ], + "type": "text", + "content": "Only stage 2" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 151, + 545, + 228 + ], + "lines": [ + { + "bbox": [ + 305, + 151, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 305, + 151, + 545, + 228 + ], + "type": "text", + "content": "Figure 7. Impact of cascaded stages. Different stages of the cascaded model provide different levels of granularity and global understanding. Using only the (low resolution) stage 1 model gives a low-resolution result in the correct location. While the (high resolution) stage 2 model gives a high-resolution result, it is placed in the incorrect location. Our CSD simultaneously uses stage 1 and 2, resulting in a highly-detailed texture in the appropriate location." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 470, + 70, + 504, + 136 + ], + "blocks": [ + { + "bbox": [ + 470, + 70, + 504, + 136 + ], + "lines": [ + { + "bbox": [ + 470, + 70, + 504, + 136 + ], + "spans": [ + { + "bbox": [ + 470, + 70, + 504, + 136 + ], + "type": "image", + "image_path": "b135e81205c6c98e66925fc16e84c504e31b85f2c854ebad06f74405b10ea708.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 501, + 138, + 522, + 148 + ], + "lines": [ + { + "bbox": [ + 501, + 138, + 522, + 148 + ], + "spans": [ + { + "bbox": [ + 501, + 138, + 522, + 148 + ], + "type": "text", + "content": "CSD" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 504, + 71, + 546, + 111 + ], + "blocks": [ + { + "bbox": [ + 504, + 71, + 546, + 111 + ], + "lines": [ + { + "bbox": [ + 504, + 71, + 546, + 111 + ], + "spans": [ + { + "bbox": [ + 504, + 71, + 546, + 111 + ], + "type": "image", + "image_path": "5b8460f91c44d63517f33c80a08b86d1c0277e231a606456da0b8a11efa6dfe1.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 248, + 545, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 248, + 545, + 369 + ], + "spans": [ + { + "bbox": [ + 304, + 248, + 545, + 369 + ], + "type": "text", + "content": "and prompts. We highlight key properties of our method such as localization precision and edit specificity. We then demonstrate the importance and capabilities of our CSD loss including its high resolution supervision and intuitive controls. Finally, we evaluate our system against other localization and editing baselines and ablate the key components of our method. In our experiments, we use DeepFloyd IF [3] for our cascaded model. Our unoptimized PyTorch [43] implementation takes 4 hours on a standard A40 GPU, typically achieving satisfactory results within 2 hours." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 375, + 459, + 388 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 375, + 459, + 388 + ], + "spans": [ + { + "bbox": [ + 306, + 375, + 459, + 388 + ], + "type": "text", + "content": "4.1. Properties of 3D Paintbrush" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 396, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 304, + 396, + 545, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 396, + 545, + 539 + ], + "spans": [ + { + "bbox": [ + 304, + 396, + 545, + 539 + ], + "type": "text", + "content": "3D Paintbrush generality. 3D Paintbrush is capable of producing highly detailed localizations and textures on a diverse collection of meshes and prompts (Fig. 4). Our method is not restricted to any category of meshes and we show results on organic and manufactured shapes. Furthermore, our local textures can be specified with open vocabulary text descriptions and are not limited to any predefined categories or constraints. This includes \"out-of-domain\" local textures such as the rainbow shinguards on a giraffe which are not naturally seen in the context of these objects, yet are precisely placed in semantically meaningful locations with highly detailed textures." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 543, + 545, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 543, + 545, + 674 + ], + "spans": [ + { + "bbox": [ + 304, + 543, + 545, + 674 + ], + "type": "text", + "content": "3D Paintbrush precision and composition. 3D Paintbrush produces precise localizations and highly-detailed textures that effectively adhere to these predicted localizations (see Fig. 2). The tight coupling between the localization and texture (see the gold chain necklace in Fig. 1) enables seamless composition of multiple local textures simultaneously on the same mesh without any layering artifacts. For example, the sharp localization boundary of the \"Tie-dye apron\" (in Fig. 2) allows us to composite this local texture on top of other textures without obstructing these textures in regions outside of the apron's boundary." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "3D Paintbrush specificity and effectiveness. 3D Paintbrush produces accurate and high resolution local edits that closely adhere to the text-specification (see Fig. 10). Our" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "4478" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 69, + 108, + 137 + ], + "blocks": [ + { + "bbox": [ + 72, + 69, + 108, + 137 + ], + "lines": [ + { + "bbox": [ + 72, + 69, + 108, + 137 + ], + "spans": [ + { + "bbox": [ + 72, + 69, + 108, + 137 + ], + "type": "image", + "image_path": "c728d6c82ddb8cb58dd10d0cfef669aa55ac036d8bd866f1efbf1cf7c12a1748.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 146, + 547, + 191 + ], + "lines": [ + { + "bbox": [ + 46, + 146, + 547, + 191 + ], + "spans": [ + { + "bbox": [ + 46, + 146, + 547, + 191 + ], + "type": "text", + "content": "Figure 8. Granular control with CSD. Varying the weight between stage 1 and stage 2 results in control over the details and corresponding localization. Only using stage 1 (leftmost) is rather coarse; only using stage 2 (rightmost) is highly detailed with an incorrect localization. Increasing the stage 2 weight (moving left to right) progressively increases the detail and granularity of the supervision, enabling smooth and meaningful interpolation between stage 1 and 2." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 113, + 70, + 149, + 137 + ], + "blocks": [ + { + "bbox": [ + 113, + 70, + 149, + 137 + ], + "lines": [ + { + "bbox": [ + 113, + 70, + 149, + 137 + ], + "spans": [ + { + "bbox": [ + 113, + 70, + 149, + 137 + ], + "type": "image", + "image_path": "2bfdaa1e92a3b054262c87e40edf7656be061e49122935b113dc0a310c900c99.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 155, + 70, + 190, + 137 + ], + "blocks": [ + { + "bbox": [ + 155, + 70, + 190, + 137 + ], + "lines": [ + { + "bbox": [ + 155, + 70, + 190, + 137 + ], + "spans": [ + { + "bbox": [ + 155, + 70, + 190, + 137 + ], + "type": "image", + "image_path": "710419ad47e59698d0a19e559bdd3518608098670ea2964f39f818d032fe4942.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 196, + 70, + 232, + 137 + ], + "blocks": [ + { + "bbox": [ + 196, + 70, + 232, + 137 + ], + "lines": [ + { + "bbox": [ + 196, + 70, + 232, + 137 + ], + "spans": [ + { + "bbox": [ + 196, + 70, + 232, + 137 + ], + "type": "image", + "image_path": "3814c9de18a8a1812bec28f904d96706c1073a0823d5b79423d07f0ae2513bc2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 238, + 70, + 273, + 137 + ], + "blocks": [ + { + "bbox": [ + 238, + 70, + 273, + 137 + ], + "lines": [ + { + "bbox": [ + 238, + 70, + 273, + 137 + ], + "spans": [ + { + "bbox": [ + 238, + 70, + 273, + 137 + ], + "type": "image", + "image_path": "41eb0c5f58d6feb8ce9551a7beafa65e3893ad5870b3c73c95c19a30c276b94a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 279, + 70, + 315, + 137 + ], + "blocks": [ + { + "bbox": [ + 279, + 70, + 315, + 137 + ], + "lines": [ + { + "bbox": [ + 279, + 70, + 315, + 137 + ], + "spans": [ + { + "bbox": [ + 279, + 70, + 315, + 137 + ], + "type": "image", + "image_path": "2b792d093f6b91dc62a35ea246d0f41f25dd615c0a02226c5548175a2dcc3b09.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 320, + 70, + 355, + 137 + ], + "blocks": [ + { + "bbox": [ + 320, + 70, + 355, + 137 + ], + "lines": [ + { + "bbox": [ + 320, + 70, + 355, + 137 + ], + "spans": [ + { + "bbox": [ + 320, + 70, + 355, + 137 + ], + "type": "image", + "image_path": "102658898ed7b397ddcbbc5e58d494ee49e471b9c252e79b8635a0fe4032a853.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 361, + 70, + 396, + 137 + ], + "blocks": [ + { + "bbox": [ + 361, + 70, + 396, + 137 + ], + "lines": [ + { + "bbox": [ + 361, + 70, + 396, + 137 + ], + "spans": [ + { + "bbox": [ + 361, + 70, + 396, + 137 + ], + "type": "image", + "image_path": "1fd7698e39f3f52fbe8ad6c27a8fbb1ce42f938fba5b0ca4540a0ed3b5e4261e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 403, + 70, + 438, + 137 + ], + "blocks": [ + { + "bbox": [ + 403, + 70, + 438, + 137 + ], + "lines": [ + { + "bbox": [ + 403, + 70, + 438, + 137 + ], + "spans": [ + { + "bbox": [ + 403, + 70, + 438, + 137 + ], + "type": "image", + "image_path": "d0d288a63047d491fee7b9779a723b9a591773b7af8d1d948f06c86f7f11c149.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 443, + 70, + 479, + 137 + ], + "blocks": [ + { + "bbox": [ + 443, + 70, + 479, + 137 + ], + "lines": [ + { + "bbox": [ + 443, + 70, + 479, + 137 + ], + "spans": [ + { + "bbox": [ + 443, + 70, + 479, + 137 + ], + "type": "image", + "image_path": "db0157c211ca0f61d67e2037a587780ac60fa16e38abd95ffe727ad1b7a7001d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 485, + 70, + 521, + 137 + ], + "blocks": [ + { + "bbox": [ + 485, + 70, + 521, + 137 + ], + "lines": [ + { + "bbox": [ + 485, + 70, + 521, + 137 + ], + "spans": [ + { + "bbox": [ + 485, + 70, + 521, + 137 + ], + "type": "image", + "image_path": "89963c25164dfa3d19bd6d48f4b15f548fcd25730b779d9bff8c68979aecc46c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 212, + 287, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 212, + 287, + 307 + ], + "spans": [ + { + "bbox": [ + 46, + 212, + 287, + 307 + ], + "type": "text", + "content": "method's fine-grained results contain intricate details (i.e. the badge on \"Barcelona jersey\") and reflect the subtle differences in the text prompts (i.e. the \"cape\" on the dog is more tapered than the boxer \"poncho\"). This specificity allows us to produce many diverse and distinct local styles. We show multiple local edits on the same mesh for multiple different meshes, demonstrating the effectiveness of our method on diverse prompts and meshes." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 314, + 267, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 314, + 267, + 327 + ], + "spans": [ + { + "bbox": [ + 47, + 314, + 267, + 327 + ], + "type": "text", + "content": "4.2. Importance of Cascaded Score Distillation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 335, + 288, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 335, + 288, + 456 + ], + "spans": [ + { + "bbox": [ + 46, + 335, + 288, + 456 + ], + "type": "text", + "content": "Impact and granular control of CSD. Our cascaded score distillation (CSD) simultaneously distills scores at multiple resolutions in a cascaded fashion. We observe that different stages of the cascaded diffusion model give different levels of granularity and global understanding (Fig. 7). Using only the (low resolution) stage 1 model is equivalent to SDS. Though SDS produces an accurate localization and coherent texture, the result is low-resolution (see Fig. 9). Conversely, using only the (high resolution) stage 2 model gives a high-resolution result, but often fails to properly lo" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 212, + 545, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 212, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 306, + 212, + 545, + 342 + ], + "type": "text", + "content": "calize the texture leading to undesirable results. Our CSD simultaneously combines the supervision from stages 1 and 2, resulting in a highly-detailed texture in the appropriate location. Increasing the stage 2 weight (moving left to right in Fig. 8) progressively increases the detail and granularity of the supervision, demonstrating smooth and intuitive interpolation between stage 1 and 2. In our experiments, we use a fixed weighting scheme, but this result demonstrates that our method works for a broad range of weights. Quantitative evidence supporting the importance of the CSD loss can bee seen in Tab. 1." + } + ] + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 310, + 350, + 543, + 402 + ], + "blocks": [ + { + "bbox": [ + 310, + 350, + 543, + 402 + ], + "lines": [ + { + "bbox": [ + 310, + 350, + 543, + 402 + ], + "spans": [ + { + "bbox": [ + 310, + 350, + 543, + 402 + ], + "type": "table", + "html": "
LocalizationSATR3D HighlighterOurs
Average Score ↑1.892.034.80
Local EditsLatent PaintVox-EOurs (SDS)Ours
Average Score ↑2.142.154.064.88
", + "image_path": "d5758a31b7aff3df47afe38ed6a74c394772540cb9a145724b3d812e5544c52e.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 410, + 545, + 455 + ], + "lines": [ + { + "bbox": [ + 306, + 410, + 545, + 455 + ], + "spans": [ + { + "bbox": [ + 306, + 410, + 545, + 455 + ], + "type": "text", + "content": "Table 1. Quantitative evaluation. We conduct a perceptual study where users evaluate our localizations and local edits compared to baseline methods (3D Highlighter [10], SATR [2], Latent Paint [38], Vox-E [49], and our method with standard SDS loss)." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 59, + 469, + 276, + 654 + ], + "blocks": [ + { + "bbox": [ + 59, + 469, + 276, + 654 + ], + "lines": [ + { + "bbox": [ + 59, + 469, + 276, + 654 + ], + "spans": [ + { + "bbox": [ + 59, + 469, + 276, + 654 + ], + "type": "image", + "image_path": "25ad4a63821e8327d352136370d9337a779df6f421123155fcb458dda9484cfd.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 655, + 287, + 710 + ], + "lines": [ + { + "bbox": [ + 46, + 655, + 287, + 710 + ], + "spans": [ + { + "bbox": [ + 46, + 655, + 287, + 710 + ], + "type": "text", + "content": "Figure 9. Importance of super-resolution stage in CSD. Using stage 1 only (equivalent to SDS) lacks fine-grained details. Incorporating the second super-resolution cascaded stage from our CSD increases the resolution and detail. Input text prompts (from left to right): Colorful crochet shell, Cactus base, Tiger stripe shirt." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 477, + 379, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 477, + 379, + 488 + ], + "spans": [ + { + "bbox": [ + 306, + 477, + 379, + 488 + ], + "type": "text", + "content": "4.3. Evaluation" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": "Simultaneous localization and texture. We demonstrate the importance of simultaneously optimizing the localization region and texture in tandem in Fig. 5. We observe that simultaneous optimization results in highly detailed textures which effectively conform to the predicted localization regions (Fig. 5, left). Furthermore, the resulting localization region is sharp and intricate. Alternatively, we optimize the localization region first and use the predicted localization to learn a texture which is confined to the (precomputed) localization region (Fig. 5, middle). In this case, the texture is less detailed, and the localization region is less intricate. Finally, we can learn the texture and localization region independently (Fig. 5, independent). This results in a texture (Fig. 5 independent, middle) that is completely decoupled from the localization region (Fig. 5 independent, left). When masking the texture with the localization region, we observe a misaligned texture with fringe artifacts (Fig. 5 independent, right)." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4479" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 63, + 521, + 244 + ], + "blocks": [ + { + "bbox": [ + 70, + 63, + 521, + 244 + ], + "lines": [ + { + "bbox": [ + 70, + 63, + 521, + 244 + ], + "spans": [ + { + "bbox": [ + 70, + 63, + 521, + 244 + ], + "type": "image", + "image_path": "dd06006c1d943573844d7e606784a0964f9b073f4c881a9b7c1d76511a2b0273.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 80, + 245, + 517, + 257 + ], + "lines": [ + { + "bbox": [ + 80, + 245, + 517, + 257 + ], + "spans": [ + { + "bbox": [ + 80, + 245, + 517, + 257 + ], + "type": "text", + "content": "Beautiful roses Colorful crochet base Rainbow headband Camo poncho Superhero cape Tiger stripe hat" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 257, + 547, + 281 + ], + "lines": [ + { + "bbox": [ + 46, + 257, + 547, + 281 + ], + "spans": [ + { + "bbox": [ + 46, + 257, + 547, + 281 + ], + "type": "text", + "content": "Figure 10. 3D Paintbrush is capable of producing a variety of local textures on the same mesh. Each result contains an accurate localization map (to specify the edit region) and a texture map that conforms to it." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 300, + 287, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 300, + 287, + 373 + ], + "spans": [ + { + "bbox": [ + 46, + 300, + 287, + 373 + ], + "type": "text", + "content": "Quantitative evaluation. 3D Paintbrush is the only method geared towards local editing that natively operates on meshes. We compare to the closest mesh-based methods which perform localization (3D Highlighter [10], SATR [2]) and texturing (Latent Paint [38]). We also compare to a voxel NeRF approach for local 3D editing (Vox-E [49])." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 374, + 288, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 374, + 288, + 482 + ], + "spans": [ + { + "bbox": [ + 46, + 374, + 288, + 482 + ], + "type": "text", + "content": "To evaluate our method against these baselines, we conduct a perceptual study where 39 users rate the effectiveness of each method for 9 different meshes (see Tab. 1). 3D Paintbrush consistently scores the highest for both localization and local editing, producing sharper localizations than 3D Highlighter and SATR and higher resolution textures than Latent Paint and Vox-E. Further quantitative evaluation using CLIP R-Precision and qualitative comparisons to these baselines are shown in the supplemental material." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 487, + 288, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 487, + 288, + 594 + ], + "spans": [ + { + "bbox": [ + 46, + 487, + 288, + 594 + ], + "type": "text", + "content": "Limitations. We illustrate a limitation of our method in Fig. 11. In cases where the desired local texture has strong semantic connections to additional components, these auxiliary components can sometimes be included in the localization and local texture. For example, a \"Pharaoh head-dress\" is closely associated with Egyptian necklaces and thus our method also localizes and styles this component as well. Our method also suffers from the Janus effect common to many text-to-3D methods that use 2D supervision." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 620, + 119, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 620, + 119, + 633 + ], + "spans": [ + { + "bbox": [ + 47, + 620, + 119, + 633 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 642, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 713 + ], + "type": "text", + "content": "We presented 3D Paintbrush, a technique that produces highly detailed texture maps on meshes which effectively adhere to a predicted localization region. Our system is capable of hallucinating non-obvious local textures on a wide variety of meshes (such as heart-shaped sunglasses on a cow). Our localizations are detailed and accurate, en" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 300, + 547, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 300, + 547, + 457 + ], + "spans": [ + { + "bbox": [ + 304, + 300, + 547, + 457 + ], + "type": "text", + "content": "abling seamless post-processing (such as compositing textures without unwanted fringe). We proposed cascaded score distillation, a technique capable of extracting supervision signals from multiple stages of a cascaded diffusion model. We observe that each stage controls different amounts of detail and global understanding. Further, varying the weights for each stage provides control over the resulting local textures. We show the effectiveness of CSD to locally texture meshes; yet, CSD is general and can be applied to other domains (such as images, videos, and alternative 3D representations). In the future, we are interested in extending localized editing to capabilities beyond texturing (such as deformations, normal maps, and more)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 467, + 416, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 467, + 416, + 480 + ], + "spans": [ + { + "bbox": [ + 306, + 467, + 416, + 480 + ], + "type": "text", + "content": "6. Acknowledgments" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 485, + 547, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 485, + 547, + 575 + ], + "spans": [ + { + "bbox": [ + 304, + 485, + 547, + 575 + ], + "type": "text", + "content": "We thank the University of Chicago for providing the AI cluster resources, services, and the professional support of the technical staff. This work was also supported in part by gifts from Snap Research, Adobe Research, Google Research, BSF grant 2022363, and NSF grants 2304481 and 2241303. Finally, we would like to thank Brian Kim, Jack Zhang, Haochen Wang, and the members of 3DL and PALS for their thorough and insightful feedback on our work." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 307, + 582, + 545, + 654 + ], + "blocks": [ + { + "bbox": [ + 307, + 582, + 545, + 654 + ], + "lines": [ + { + "bbox": [ + 307, + 582, + 545, + 654 + ], + "spans": [ + { + "bbox": [ + 307, + 582, + 545, + 654 + ], + "type": "image", + "image_path": "d0c477b64edabd0b892d6eaf9f1e8a87bda7029beca7ec998bc553b2e8659424.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 654, + 547, + 711 + ], + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 711 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 711 + ], + "type": "text", + "content": "Figure 11. In cases where the desired localization carries a strong semantic context, elements from that context can also appear in the localization and style. For example, when adding a pharaoh headdress, 3D Paintbrush also adds an Egyptian necklace since they are commonly associated with pharaohs." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4480" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "text", + "content": "[1] Ahmed Abdelreheem, Abdelrahman Eldesokey, Maks Ovsjanikov, and Peter Wonka. Zero-shot 3d shape correspondence. In SIGGRAPH Asia 2023 Conference Papers, pages 1-11, 2023. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 136, + 288, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 136, + 288, + 168 + ], + "spans": [ + { + "bbox": [ + 53, + 136, + 288, + 168 + ], + "type": "text", + "content": "[2] Ahmed Abdelreheem, Ivan Skorokhodov, Maks Ovsjanikov, and Peter Wonka. Satr: Zero-shot semantic segmentation of 3d shapes. In ICCV, 2023. 2, 7, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 170, + 194, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 170, + 194, + 180 + ], + "spans": [ + { + "bbox": [ + 53, + 170, + 194, + 180 + ], + "type": "text", + "content": "[3] Stability AI. Deepfloydif, 2023. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 181, + 287, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 181, + 287, + 223 + ], + "spans": [ + { + "bbox": [ + 53, + 181, + 287, + 223 + ], + "type": "text", + "content": "[4] Sudarshan Babu, Richard Liu, Avery Zhou, Michael Maire, Greg Shakhnarovich, and Rana Hanocka. Hyperfields: Towards zero-shot generation of nerfs from text. arXiv preprint arXiv:2310.17075, 2023. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 225, + 287, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 225, + 287, + 269 + ], + "spans": [ + { + "bbox": [ + 53, + 225, + 287, + 269 + ], + "type": "text", + "content": "[5] Omer Bar-Tal, Dolev Ofri-Amar, Rafail Fridman, Yoni Kasten, and Tali Dekel. Text2live: Text-driven layered image and video editing. In European conference on computer vision, pages 707-723. Springer, 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 270, + 287, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 270, + 287, + 302 + ], + "spans": [ + { + "bbox": [ + 53, + 270, + 287, + 302 + ], + "type": "text", + "content": "[6] Alexey Bokhovkin, Shubham Tulsiani, and Angela Dai. Mesh2tex: Generating mesh textures from image queries. arXiv preprint arXiv:2304.05868, 2023. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 303, + 287, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 303, + 287, + 356 + ], + "spans": [ + { + "bbox": [ + 53, + 303, + 287, + 356 + ], + "type": "text", + "content": "[7] Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18392-18402, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 358, + 287, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 358, + 287, + 390 + ], + "spans": [ + { + "bbox": [ + 53, + 358, + 287, + 390 + ], + "type": "text", + "content": "[8] Dave Zhenyu Chen, Yawar Siddiqui, Hsin-Ying Lee, Sergey Tulyakov, and Matthias Nießner. Text2tex: Text-driven texture synthesis via diffusion models. In ICCV, 2023. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 392, + 287, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 392, + 287, + 434 + ], + "spans": [ + { + "bbox": [ + 53, + 392, + 287, + 434 + ], + "type": "text", + "content": "[9] Rui Chen, Yongwei Chen, Ningxin Jiao, and Kui Jia. Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. arXiv preprint arXiv:2303.13873, 2023. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 436, + 287, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 436, + 287, + 468 + ], + "spans": [ + { + "bbox": [ + 48, + 436, + 287, + 468 + ], + "type": "text", + "content": "[10] Dale Decatur, Itai Lang, and Rana Hanocka. 3d highlighter: Localizing regions on 3d shapes via text descriptions. In CVPR, 2023. 2, 4, 7, 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 470, + 287, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 470, + 287, + 513 + ], + "spans": [ + { + "bbox": [ + 48, + 470, + 287, + 513 + ], + "type": "text", + "content": "[11] Zhiwen Fan, Yifan Jiang, Peihao Wang, Xinyu Gong, Dejia Xu, and Zhangyang Wang. Unified implicit neural stylization. In European Conference on Computer Vision, pages 636-654. Springer, 2022. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "type": "text", + "content": "[12] Rao Fu, Xiao Zhan, Yiwen Chen, Daniel Ritchie, and Srinath Sridhar. Shapecrafter: A recursive text-conditioned 3d shape generation model. Advances in Neural Information Processing Systems, 35:8882-8895, 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 559, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 287, + 612 + ], + "type": "text", + "content": "[13] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 614, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 287, + 668 + ], + "type": "text", + "content": "[14] Jun Gao, Tianchang Shen, Zian Wang, Wenzheng Chen, Kangxue Yin, Daiqing Li, Or Litany, Zan Gojcic, and Sanja Fidler. Get3d: A generative model of high quality 3d textured shapes learned from images. In Advances In Neural Information Processing Systems, 2022. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "text", + "content": "[15] Lin Gao, Tong Wu, Yu-Jie Yuan, Ming-Xian Lin, Yu-Kun Lai, and Hao Zhang. Tm-net: Deep generative networks for textured meshes. ACM Transactions on Graphics (TOG), 40 (6):1-15, 2021. 2" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 117 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 117 + ], + "type": "text", + "content": "[16] William Gao, Noam Aigerman, Thibault Groueix, Vova Kim, and Rana Hanocka. Textdeformer: Geometry manipulation using text guidance. In ACM SIGGRAPH 2023 Conference Proceedings, pages 1-11, 2023. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 119, + 545, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 119, + 545, + 163 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 545, + 163 + ], + "type": "text", + "content": "[17] Huy Ha and Shuran Song. Semantic abstraction: Openworld 3D scene understanding from 2D vision-language models. In Proceedings of the 2022 Conference on Robot Learning, 2022. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 164, + 545, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 164, + 545, + 197 + ], + "spans": [ + { + "bbox": [ + 308, + 164, + 545, + 197 + ], + "type": "text", + "content": "[18] Ayaan Haque, Matthew Tancik, Alexei A Efros, Aleksander Holynski, and Angjoo Kanazawa. Instruct-nerf2nerf: Editing 3d scenes with instructions. ICCV, 2023. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 199, + 545, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 199, + 545, + 232 + ], + "spans": [ + { + "bbox": [ + 308, + 199, + 545, + 232 + ], + "type": "text", + "content": "[19] Amir Hertz, Rana Hanocka, Raja Giryes, and Daniel Cohen-Or. Deep geometric texture synthesis. ACM Transactions on Graphics (TOG), 39(4):108-1, 2020. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 234, + 545, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 234, + 545, + 277 + ], + "spans": [ + { + "bbox": [ + 308, + 234, + 545, + 277 + ], + "type": "text", + "content": "[20] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 280, + 547, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 280, + 547, + 324 + ], + "spans": [ + { + "bbox": [ + 308, + 280, + 547, + 324 + ], + "type": "text", + "content": "[21] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. The Journal of Machine Learning Research, 23(1):2249-2281, 2022. 2, 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 326, + 545, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 326, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 308, + 326, + 545, + 380 + ], + "type": "text", + "content": "[22] Lukas Hollein, Justin Johnson, and Matthias Nießner. Stylemesh: Style transfer for indoor 3d scene reconstructions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6198-6208, 2022. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 383, + 545, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 383, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 308, + 383, + 545, + 448 + ], + "type": "text", + "content": "[23] Jingwei Huang, Justus Thies, Angela Dai, Abhijit Kundu, Chiyu Jiang, Leonidas J Guibas, Matthias Nießner, Thomas Funkhouser, et al. Adversarial texture optimization from rgb-d scans. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1559-1568, 2020. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 450, + 545, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 450, + 545, + 494 + ], + "spans": [ + { + "bbox": [ + 308, + 450, + 545, + 494 + ], + "type": "text", + "content": "[24] Yukun Huang, Jianan Wang, Yukai Shi, Xianbiao Qi, Zheng-Jun Zha, and Lei Zhang. Dreamtime: An improved optimization strategy for text-to-3d content creation. arXiv preprint arXiv:2306.12422, 2023. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 497, + 545, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 497, + 545, + 550 + ], + "spans": [ + { + "bbox": [ + 308, + 497, + 545, + 550 + ], + "type": "text", + "content": "[25] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 867-876, 2022. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 553, + 545, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 553, + 545, + 574 + ], + "spans": [ + { + "bbox": [ + 308, + 553, + 545, + 574 + ], + "type": "text", + "content": "[26] Oren Katzir, Or Patashnik, Daniel Cohen-Or, and Dani Lischinski. Noise-free score distillation, 2023. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 578, + 545, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 578, + 545, + 631 + ], + "spans": [ + { + "bbox": [ + 308, + 578, + 545, + 631 + ], + "type": "text", + "content": "[27] Justin Kerr, Chung Min Kim, Ken Goldberg, Angjoo Kanazawa, and Matthew Tancik. Lerf: Language embedded radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19729-19739, 2023. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 635, + 545, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 635, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 308, + 635, + 545, + 677 + ], + "type": "text", + "content": "[28] Sosuke Kobayashi, Eiichi Matsumoto, and Vincent Sitzmann. Decomposing nerf for editing via feature field distillation. In Advances in Neural Information Processing Systems, 2022. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 680, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 680, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 680, + 545, + 712 + ], + "type": "text", + "content": "[29] Han-Hung Lee and Angel X Chang. Understanding pure clip guidance for voxel grid nerf models. arXiv preprint arXiv:2209.15172, 2022. 2" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "text", + "content": "4481" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[30] Jiabao Lei, Yabin Zhang, Kui Jia, et al. Tango: Text-driven photorealistic and robust 3d stylization via lighting decomposition. Advances in Neural Information Processing Systems, 35:30923-30936, 2022. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 162 + ], + "type": "text", + "content": "[31] Yuhan Li, Yishun Dou, Yue Shi, Yu Lei, Xuanhong Chen, Yi Zhang, Peng Zhou, and Bingbing Ni. Focaldreamer: Text-driven 3d editing via focal-fusion assembly. arXiv preprint arXiv:2308.10608, 2023. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 164, + 287, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 164, + 287, + 208 + ], + "spans": [ + { + "bbox": [ + 48, + 164, + 287, + 208 + ], + "type": "text", + "content": "[32] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In CVPR, 2023. 1, 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 210, + 287, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 210, + 287, + 243 + ], + "spans": [ + { + "bbox": [ + 48, + 210, + 287, + 243 + ], + "type": "text", + "content": "[33] Hsueh-Ti Derek Liu, Vladimir G. Kim, Siddhartha Chaudhari, Noam Aigerman, and Alec Jacobson. Neural subdivision. ACM Trans. Graph., 39(4), 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 245, + 287, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 245, + 287, + 311 + ], + "spans": [ + { + "bbox": [ + 48, + 245, + 287, + 311 + ], + "type": "text", + "content": "[34] Kunhao Liu, Fangneng Zhan, Yiwen Chen, Jiahui Zhang, Yingchen Yu, Abdulmotaleb El Saddik, Shijian Lu, and Eric P Xing. Stylerf: Zero-shot 3d style transfer of neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8338-8348, 2023. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 313, + 287, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 313, + 287, + 356 + ], + "spans": [ + { + "bbox": [ + 48, + 313, + 287, + 356 + ], + "type": "text", + "content": "[35] Minghua Liu, Chao Xu, Haian Jin, Linghao Chen, Zexiang Xu, Hao Su, et al. One-2-3-45: Any single image to 3d mesh in 45 seconds without per-shape optimization. arXiv preprint arXiv:2306.16928, 2023. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 358, + 287, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 358, + 287, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 358, + 287, + 392 + ], + "type": "text", + "content": "[36] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object, 2023. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 394, + 287, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 394, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 48, + 394, + 287, + 460 + ], + "type": "text", + "content": "[37] Yiwei Ma, Xiaqing Zhang, Xiaoshuai Sun, Jiayi Ji, Haowei Wang, Guannan Jiang, Weilin Zhuang, and Rongrong Ji. X-mesh: Towards fast and accurate text-driven 3d stylization via dynamic textual guidance. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2749-2760, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 462, + 287, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 462, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 48, + 462, + 287, + 495 + ], + "type": "text", + "content": "[38] Gal Metzer, Elad Richardson, Or Patashnik, Raja Giryes, and Daniel Cohen-Or. Latent-nerf for shape-guided generation of 3d shapes and textures. In CVPR, 2023. 1, 2, 3, 7, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 497, + 287, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 497, + 287, + 529 + ], + "spans": [ + { + "bbox": [ + 48, + 497, + 287, + 529 + ], + "type": "text", + "content": "[39] Oscar Michel, Roi Bar-On, Richard Liu, Sagie Benaim, and Rana Hanocka. Text2mesh: Text-driven neural stylization for meshes. In CVPR, pages 13492-13502, 2022. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 532, + 287, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 532, + 287, + 575 + ], + "spans": [ + { + "bbox": [ + 48, + 532, + 287, + 575 + ], + "type": "text", + "content": "[40] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 577, + 287, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 577, + 287, + 621 + ], + "spans": [ + { + "bbox": [ + 48, + 577, + 287, + 621 + ], + "type": "text", + "content": "[41] Nasir Mohammad Khalid, Tianhao Xie, Eugene Belilovsky, and Tiberiu Popa. Clip-mesh: Generating textured meshes from text using pretrained image-text models. In SIGGRAPH Asia 2022 conference papers, pages 1-8, 2022. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 623, + 287, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 623, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 48, + 623, + 287, + 666 + ], + "type": "text", + "content": "[42] Michael Oechsle, Lars Mescheder, Michael Niemeyer, Thilo Strauss, and Andreas Geiger. Texture fields: Learning texture representations in function space. In CVPR, pages 4531-4540, 2019. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "text", + "content": "[43] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017. 6" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "text", + "content": "[44] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. In ICLR, 2023. 2, 5, 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 107, + 545, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 171 + ], + "type": "text", + "content": "[45] Guocheng Qian, Jinjie Mai, Abdullah Hamdi, Jian Ren, Aliaksandr Siarohin, Bing Li, Hsin-Ying Lee, Ivan Skorokhodov, Peter Wonka, Sergey Tulyakov, et al. Magic123: One image to high-quality 3d object generation using both 2d and 3d diffusion priors. arXiv preprint arXiv:2306.17843, 2023. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "type": "text", + "content": "[46] Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the spectral bias of neural networks. In International conference on machine learning, 2019. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 220, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 251 + ], + "type": "text", + "content": "[47] Elad Richardson, Gal Metzer, Yuval Alaluf, Raja Giryes, and Daniel Cohen-Or. Texture: Text-guided texturing of 3d shapes. In ACM TOG, 2023. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 254, + 545, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 254, + 545, + 318 + ], + "spans": [ + { + "bbox": [ + 307, + 254, + 545, + 318 + ], + "type": "text", + "content": "[48] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. 1, 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 320, + 545, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 320, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 307, + 320, + 545, + 353 + ], + "type": "text", + "content": "[49] Etai Sella, Gal Fiebelman, Peter Hedman, and Hadar Averbuch-Elor. Vox-e: Text-guided voxel editing of 3d objects. In ICCV, 2023. 1, 2, 7, 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 354, + 545, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 545, + 387 + ], + "type": "text", + "content": "[50] Yichun Shi, Peng Wang, Jianglong Ye, Mai Long, Kejie Li, and Xiao Yang. Mvdream: Multi-view diffusion for 3d generation. arXiv preprint arXiv:2308.16512, 2023. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 388, + 545, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 432 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 432 + ], + "type": "text", + "content": "[51] Yawar Siddiqui, Justus Thies, Fangchang Ma, Qi Shan, Matthias Nießner, and Angela Dai. Texturify: Generating textures on 3d shape surfaces. In European Conference on Computer Vision, pages 72-88. Springer, 2022. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 434, + 545, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 434, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 307, + 434, + 545, + 487 + ], + "type": "text", + "content": "[52] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. 2020. 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 490, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 490, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 307, + 490, + 545, + 533 + ], + "type": "text", + "content": "[53] Christina Tsalicoglou, Fabian Manhardt, Alessio Tonioni, Michael Niemeyer, and Federico Tombari. Textmesh: Generation of realistic 3d meshes from text prompts. arXiv preprint arXiv:2304.12439, 2023. 1, 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 535, + 545, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 535, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 307, + 535, + 545, + 588 + ], + "type": "text", + "content": "[54] Vadim Tschernezki, Iro Laina, Diane Larlus, and Andrea Vedaldi. Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In 2022 International Conference on 3D Vision (3DV), pages 443-453. IEEE, 2022. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 590, + 545, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 545, + 612 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 545, + 612 + ], + "type": "text", + "content": "[55] TurboSquid. Turbosquid 3d model repository, 2021. https://www.turbosquid.com/. 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 614, + 545, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 614, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 307, + 614, + 545, + 657 + ], + "type": "text", + "content": "[56] Oliver van Kaick, Andrea Tagliasacchi, Oana Sidi, Hao Zhang, Daniel Cohen-Or, Lior Wolf, and Ghassan Hamarneh. Prior knowledge for part correspondence. Computer Graphics Forum, 30(2):553-562, 2011. 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "text", + "content": "[57] Suhani Vora, Noha Radwan, Klaus Greff, Henning Meyer, Kyle Genova, Mehdi S. M. Sajjadi, Etienne Pot, Andrea Tagliasacchi, and Daniel Duckworth. Nesf: Neural semantic fields for generalizable semantic segmentation of 3d scenes, 2021. 2" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "4482" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 509 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[58] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A. Yeh, and Greg Shakhnarovich. Score jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In CVPR, 2023. 1, 2, 3, 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 287, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 287, + 162 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 287, + 162 + ], + "type": "text", + "content": "[59] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. arXiv preprint arXiv:2305.16213, 2023. 1, 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 163, + 288, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 163, + 288, + 207 + ], + "spans": [ + { + "bbox": [ + 49, + 163, + 288, + 207 + ], + "type": "text", + "content": "[60] Xingkui Wei, Zhengqing Chen, Yanwei Fu, Zhaopeng Cui, and Yinda Zhang. Deep hybrid self-prior for full 3d mesh generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5805-5814, 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 208, + 288, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 208, + 288, + 262 + ], + "spans": [ + { + "bbox": [ + 49, + 208, + 288, + 262 + ], + "type": "text", + "content": "[61] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 264, + 288, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 264, + 288, + 317 + ], + "spans": [ + { + "bbox": [ + 49, + 264, + 288, + 317 + ], + "type": "text", + "content": "[62] Kangxue Yin, Jun Gao, Maria Shugrina, Sameh Khamis, and Sanja Fidler. 3dstylenet: Creating 3d shapes with geometric and texture style variations. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12456-12465, 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 319, + 288, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 319, + 288, + 363 + ], + "spans": [ + { + "bbox": [ + 49, + 319, + 288, + 363 + ], + "type": "text", + "content": "[63] Xiaohui Zeng, Arash Vahdat, Francis Williams, Zan Gojcic, Or Litany, Sanja Fidler, and Karsten Kreis. Lion: Latent point diffusion models for 3d shape generation. arXiv preprint arXiv:2210.06978, 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 364, + 287, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 364, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 49, + 364, + 287, + 407 + ], + "type": "text", + "content": "[64] Kai Zhang, Nick Kolkin, Sai Bi, Fujun Luan, Zexiang Xu, Eli Shechtman, and Noah Snavely. Arf: Artistic radiance fields. In European Conference on Computer Vision, pages 717-733. Springer, 2022. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 409, + 288, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 409, + 288, + 441 + ], + "spans": [ + { + "bbox": [ + 49, + 409, + 288, + 441 + ], + "type": "text", + "content": "[65] Qingnan Zhou and Alec Jacobson. Thingi10k: A dataset of 10,000 3d-printing models. arXiv preprint arXiv:1605.04797, 2016. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 443, + 288, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 443, + 288, + 475 + ], + "spans": [ + { + "bbox": [ + 49, + 443, + 288, + 475 + ], + "type": "text", + "content": "[66] Joseph Zhu and Peiye Zhuang. Hifa: High-fidelity text-to-3d with advanced diffusion guidance. arXiv preprint arXiv:2305.18766, 2023. 2, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 476, + 288, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 476, + 288, + 509 + ], + "spans": [ + { + "bbox": [ + 49, + 476, + 288, + 509 + ], + "type": "text", + "content": "[67] Jingyu Zhuang, Chen Wang, Lingjie Liu, Liang Lin, and Guanbin Li. Dreameditor: Text-driven 3d scene editing with neural fields. In SIGGRAPH Asia, 2023. 1, 2" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4483" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/d47f630a-17d8-4298-a368-699d1959d603_content_list.json b/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/d47f630a-17d8-4298-a368-699d1959d603_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..bd33f00ef02562773f461c4b2394ffbf43723879 --- /dev/null +++ b/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/d47f630a-17d8-4298-a368-699d1959d603_content_list.json @@ -0,0 +1,1507 @@ +[ + { + "type": "text", + "text": "3D-Aware Face Editing via Warping-Guided Latent Direction Learning", + "text_level": 1, + "bbox": [ + 124, + 130, + 844, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yuhao Cheng $^{1}$ Zhuo Chen $^{1}$ Xingyu Ren $^{1}$ Wenhan Zhu $^{1}$ Zhengqin Xu $^{1}$ Di Xu $^{2}$ Changpeng Yang $^{2}$ Yichao Yan $^{1*}$", + "bbox": [ + 178, + 179, + 789, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University $^{2}$ Huawei Cloud Computing Technologies Co., Ltd", + "bbox": [ + 151, + 222, + 816, + 258 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{chengyuhao,ningci5252,rxy_sjtu,zhuwenhan823,fate311,yanyichao}@sjtu.edu.cn, {xudi21,yangchangpeng}@huawei.com", + "bbox": [ + 153, + 261, + 815, + 292 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/49cb9423275b6bee71d88d91ade48eae4998b40fa3498a43e0866c33cc5ac462.jpg", + "image_caption": [ + "Figure 1. An example of our warping-guided 3D-aware face editing method. Our method supports users to edit 3D faces in an intuitive way that drags points from multiple perspectives. Moreover, our method can achieve disentangled editing for shape, expression, and view, while maintaining 3D consistency. Please zoom-in for detailed observation." + ], + "image_footnote": [], + "bbox": [ + 86, + 303, + 883, + 506 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 568, + 313, + 583 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D facial editing, a longstanding task in computer vision with broad applications, is expected to fast and intuitively manipulate any face from arbitrary viewpoints following the user's will. Existing works have limitations in terms of intuitiveness, generalization, and efficiency. To overcome these challenges, we propose FaceEdit3D, which allows users to directly manipulate 3D points to edit a 3D face, achieving natural and rapid face editing. After one or several points are manipulated by users, we propose the tri-plane warping to directly deform the view-independent 3D representation. To address the problem of distortion caused by tri-plane warping, we train a warp-aware encoder to project the warped face onto a standardized latent space. In this space, we further propose directional latent editing to mitigate the identity bias caused by the encoder and realize the disentangled editing of various attributes. Extensive experiments show that our method achieves superior results with rich facial details and nice identity preservation. Our approach also supports general applications like", + "bbox": [ + 75, + 592, + 472, + 878 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "multi-attribute continuous editing and cat/car editing. The project website is https://cyh-sj.github.io/FaceEdit3D/.", + "bbox": [ + 500, + 569, + 890, + 599 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 631, + 630, + 647 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "High-quality face editing has long been an important research topic in computer vision with a wide range of applications, including social media and film production. Previous methods [16, 36, 43] based on 2D GANs [22, 23] have demonstrated the capability of editing facial images with high-fidelity. Recently, benefiting from the impressive achievements of 3D-aware generative models, especially in generative digital human [2-4, 11, 15, 32, 33, 41, 45, 51, 53, 55, 56, 64], the field of 3D facial editing has further attracted significant interest due to its promising capacity of manipulating a 3D representation.", + "bbox": [ + 496, + 657, + 890, + 823 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Typically, 3D face editing methods can be generally classified into three categories: prior-guided conditioning, parameter-space fine-tuning, and latent-space optimization, as summarized in Tab. 1. Specifically, prior-guided conditioning methods [18, 46-48] employ an additional well", + "bbox": [ + 496, + 824, + 890, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Corresponding author", + "bbox": [ + 107, + 886, + 230, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "916", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/01a28f54950d8d1fcf089d8e894b30137888275b5dbafb64b5cd9920e06791ef.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
SchemeMethodsIntuitivenessGeneralizationEfficiency
Conditional control[18, 46, 48]
Fine-tuned models[6, 13, 59]
Supervised directions[1, 36, 43]
Unsupervised directions[16, 42, 67]
[34] (2D)
Ours
", + "bbox": [ + 81, + 88, + 470, + 210 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table 1. Summary of 3D-aware face editing methods. $\\triangle$ indicates its instructions are somewhat ambiguous semantically.", + "bbox": [ + 76, + 220, + 468, + 250 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "designed conditioning module to introduce the control information, e.g., semantic maps [18, 46] and 3DMM [48, 49], into the 3D-aware models. Although flexible, these models typically require a large number of face images with their control labels for training. Parameter-space finetuning methods [6, 13, 59] optimize the pre-trained generators given the target input, achieving zero-shot editing with the help of the large language-image model, e.g., CLIP [38] or Stable Diffusion [39]. However, it is required to maintain a particular generator for each specific editing target, severely constraining their generalization.", + "bbox": [ + 75, + 265, + 468, + 431 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Due to the rich distributions learned in the pre-trained generator, discovering the meaningful directions in the latent space allows for a wide range of editing without the need to modify the generator and dependence on a large amount of training data. According to the exploration of editing direction, latent-space optimization can be achieved in supervised and unsupervised ways. Supervised methods [1, 36, 43, 44] search the meaningful directions in the latent space by learning labeled data for each specific editing. However, these methods cannot be generalized beyond the training domain. In contrast, unsupervised methods [16, 42, 50, 65-67] discover out-of-domain directions by analyzing the distribution of the latent space. However, the editing directions in the latent space are typically not semantically intuitive for the users. Accordingly, introducing interactive guidance to bridge the gap between the latent space and the user's intuition becomes the main purpose of the unsupervised methods.", + "bbox": [ + 75, + 431, + 470, + 703 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To achieve this, several works [12, 34] utilize manipulating points on 2D images to optimize latent code in an unsupervised way, achieving image editing intuitively. The most prominent method DragGAN [34] proposes motion supervision and point tracking to optimize the latent code in a self-supervised manner, showcasing its flexible and intuitive editing capabilities. Considering their success on 2D images, it would be highly desirable if we could also manipulate 3D points to edit a 3D facial representation. However, it is non-trivial to directly extend point dragging to 3D-aware facial editing, due to the following challenges. 1) These methods ignore the global 3D facial structure and only focus on the movements of specific points, potentially", + "bbox": [ + 75, + 704, + 470, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "leading to exaggerated distortions. 2) These methods employ an inefficient approach to optimize the latent codes for image editing. Therefore, extending this procedure to 3D-aware generators fails to meet the demands of 3D interactive applications. 3) The controllability of point dragging is less precise and may cause ambiguous targets, e.g., enlarging the shape of the mouth may lead the mouth to open.", + "bbox": [ + 496, + 90, + 890, + 196 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To overcome these challenges, we propose FaceEdit3D to learn editing directions guided by 3D-consistent facewarping, realizing intuitive and rapid 3D-aware facial editing. (1) First, we propose tri-plane warping on the 3D representation to achieve accurate 3D-consistent facial editing, which allows us to sidestep inaccurate motion supervision. Further, we introduce 3D landmarks rather than arbitrary points as face prior to constrain the change in the normal face distribution. Although tri-plane warping allows for precise editing, it introduces slight facial distortions. (2) Hence, we train a warp-aware encoder instead of latent optimization to straightforwardly project the warped renderings into the standardized space, enabling fast and photorealistic editing. Due to the complex semantic information in the latent space of 3D-aware generators, the obtained encoder suffers from inherent bias, resulting in a loss of details and identity shifting. (3) Therefore, we propose to learn the hierarchical directional editing in latent space, enabling disentangled face editing with identity and details preservation.", + "bbox": [ + 496, + 198, + 892, + 484 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "With all the designs above, we successfully introduce dragging-based edits into 3D face representations. Our work achieves an efficient and straightforward editing process which also enables the decoupling of facial expressions and shapes. Compared to other face editing approaches, our method offers a more intuitive bridge but avoids dependence on the 3D annotations. Extensive experiments have demonstrated the superiority of our method in intuitiveness, generalization, and efficiency for the task of facial editing.", + "bbox": [ + 496, + 484, + 890, + 621 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The main contributions are summarized as follows:", + "bbox": [ + 517, + 622, + 859, + 636 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We design an efficient and straightforward 3D-aware face editing pipeline that is in line with the user's intuition.", + "- We propose to warp the face in the tri-plane feature level, enabling 3D-consistent face manipulation.", + "- We propose a warp-aware encoder to better identify the subtle changes and efficiently solve the problem of distorted face caused by the tri-plane warp.", + "- We propose directional editing in latent space, achieving disentangled facial editing with the preservation of identity and details." + ], + "bbox": [ + 500, + 638, + 890, + 787 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 500, + 805, + 648, + 820 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. 3D-aware GANs", + "text_level": 1, + "bbox": [ + 500, + 830, + 663, + 845 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Inspired by the superiority of implicit representation [31], several attempts [2-4, 11, 15, 32, 33, 41, 45, 53, 55, 64] deploy radiance fields into generative models and thus en", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "917", + "bbox": [ + 485, + 944, + 509, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "able 3D consistent image synthesis. The capability of learning 3D representations from unposed single-view 2D images only empowers these 3D-aware GAN models to gain wide interests and applications. However, partial 3D-aware GANs [3, 15, 32, 33, 41, 64] adopt full implicit representation that lacks pre-computed 3D features before the point sampling. As a consequence, they need to regenerate the 3D feature when given novel viewpoints, limiting the efficiency of them in interactive applications. To address this challenge, several works [2, 4, 45, 53] adopt hybrid representations that first generate view-independent features, and enable sampling points on these pre-computed features for novel view synthesis. Consequently, these methods can realize rapid generation and maintain the inherent 3D-consistent representation. Specifically, EG3D [4] introduces the light tri-plane representation into the generator to raise efficiency and further enhance the image quality. Considering its efficient representation and mature downstream techniques, we adopt the EG3D [4] as the base 3D-aware model to demonstrate the effectiveness of our methods.", + "bbox": [ + 76, + 90, + 472, + 393 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Implicit Representations Deformation", + "text_level": 1, + "bbox": [ + 76, + 404, + 405, + 420 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The deformation of 3D implicit representation has long attracted wide focus, as it serves as the foundation of broad animation applications. Prior researches predominantly introduce an additional deformation field based on the original representation to modify the 3D points. Specifically, deformation fields can be implemented through proxy-based editing [14, 21, 35, 57], cage-based editing [17, 37, 54], and parametric prior-based editing [40, 52, 63], etc. Proxy-based editing learns a lightweight neural network to compute the translation and rotation of 3D points, enabling the deformation of original 3D coordinates. The cage-based methods establish a surrounding cage to fully cover up the original surface of an implicit representation and then modify the cage to deform the inherent surface. Parametric prior-based methods leverage the parametric models such as SMPL [29] and FLAME [27] as a prior condition of the deformation network to drive the implicit representations. However, all of these approaches need to optimize a controllable module for each specific object, lack of efficiency and generality. In contrast, our work provides a landmark-based way to directly edit the 3D representation without optimization and further compresses the 3D deformation into 2D feature planes to improve efficiency.", + "bbox": [ + 76, + 428, + 472, + 776 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Face Editing in GANs", + "text_level": 1, + "bbox": [ + 76, + 786, + 282, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As the latent space learned by the conditioned GANs contains most of the distribution knowledge, many works [1, 42, 43, 50, 69] explore the latent space of a pre-trained generator for the following facial attribute editing. Specifically, InterFaceGAN [43] studies the semantics encoded in the latent space and disentangles the facial semantics with linear", + "bbox": [ + 76, + 809, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "projection. To explicitly edit the facial attributes, further works explore utilizing the intuitive representation, e.g., semantic maps [5, 46, 47, 68] and text prompts [19, 36] for the optimization or the extension of latent space. Moreover, an idea that directly drags the face for the editing catches the wide attention. DragGAN [34] optimizes the latent space via dragging selected points on the image to the target positions. However, it is hard to preserve the facial identity when setting a far distance between the two points, preventing the DragGAN from large-scale editing. Despite the prominent performance of latent space manipulation, it still faces a challenge in balancing the identity preservation and editing amplitude. To further enhance the editing capability, several works [6, 13, 24] focus on the parameter space of a pre-trained generator. While these methods can achieve out-of-domain editing, they need to maintain a specific generator for each attribute manipulation, lacking efficiency. Compared to the methods mentioned above, our method is an intuitive way of dragging points to deform the 3D representations while improving the efficiency and preserving the identity.", + "bbox": [ + 496, + 90, + 893, + 409 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methods", + "text_level": 1, + "bbox": [ + 500, + 420, + 599, + 436 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our proposed framework, FaceEdit3D, aims at multi-view consistent facial editing in shape, expression, and pose via warping-guided directional editing, as illustrated in Fig. 2. To this end, we first review the 3D-aware GAN that achieves high-resolution face rendering from multiple views (Sec. 3.1). Based on the 3D-aware generator, we propose a point-guided feature-space warping method that manipulates the inherent tri-plane representations while ensuring the 3D consistency (Sec. 3.2). However, directly editing the tri-plane may lead to distortions in the final rendered images. Therefore, we train a specifically designed encoder to project the warped renderings to the standardized latent space for photo-realistic editing results (Sec. 3.3). Finally, we delve into the mechanism of latent space and propose directional editing in latent space that enables the disentangled editing of facial shape, expression, and pose (Sec. 3.4).", + "bbox": [ + 496, + 446, + 893, + 689 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminaries on 3D-aware Face Generator", + "text_level": 1, + "bbox": [ + 500, + 696, + 867, + 710 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our framework is built upon EG3D [4], one of the most powerful 3D-aware generative models that achieve photorealistic 3D face generation. The generator of EG3D introduces a tri-plane representation, which compactly encodes the geometry and appearance of a 3D face. Specifically, the tri-plane features can be denoted as $\\mathbf{F} = \\mathcal{G}(\\mathbf{w})\\in$ $\\mathbb{R}^{3\\times 32\\times 256\\times 256}$ , where $\\mathbf{W}$ is a latent code. To render face images from a specific viewpoint, the features of 3D coordinates are sampled from the tri-plane features and a shallow decoder is leveraged to project the tri-plane feature $\\mathbf{F}(x,y,z)\\in \\mathbb{R}^{32\\times 3}$ into volume density $\\sigma \\in \\mathbb{R}^1$ and color feature $c\\in \\mathbb{R}^{32}$ . Subsequently, a low-resolution fea", + "bbox": [ + 496, + 719, + 893, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "918", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/24c131db8dd70402acf4b937f3437dcb105b9f281ff2f7069ac4460692c5f0cf.jpg", + "image_caption": [ + "(a) 3D-consistent Tri-plane Warp" + ], + "image_footnote": [], + "bbox": [ + 81, + 89, + 537, + 268 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/7bef67a8c11a6425320cd8bc3b98ec878d42aea9bd1b469b6c0d88678b328f69.jpg", + "image_caption": [ + "(b) The Pipeline of Our FaceEdit3D", + "Figure 2. Overview of our proposed FaceEdit3D. (a) A detailed illustration of our tri-plane warp. We project 2D key points onto the 3D face surface and then map them to each corresponding plane within a tri-plane representation. Afterward, we apply warping operations to each plane to achieve 3D-consistent editing. (b) The full pipeline of our FaceEdit3D. Given a source image $\\mathbf{I}_s$ with its latent code $\\mathbf{w}_s$ , we first perform the tri-plane warping on it and obtain the warped rendering $\\hat{\\mathbf{I}}_t$ . Subsequently, we utilize a warp-aware encoder to extract the latent codes $\\mathbf{w}_s'$ and $\\mathbf{w}_t'$ from the source image $\\mathbf{I}_s$ and the warped renderings $\\hat{\\mathbf{I}}_t$ , respectively. Then, we employ the hierarchical latent direction to update the target latent code $\\mathbf{w}_t$ . Finally, the edited facial image $\\mathbf{I}_t$ can be synthesized via the updated latent code $\\mathbf{w}_t$ ." + ], + "image_footnote": [], + "bbox": [ + 545, + 89, + 885, + 273 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ture map is generated via volume rendering and then upsampled to high-resolution images. The representation ability of tri-plane features has been verified by several recent works [7, 20, 24]. Therefore, to achieve 3D-consistent editing, we choose to operate directly on the tri-plane features.", + "bbox": [ + 75, + 395, + 468, + 472 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Multi-view Consistent Face Warping", + "text_level": 1, + "bbox": [ + 76, + 482, + 393, + 500 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For 3D face editing, it is a flexible way for users to directly drag points on the rendered images. Different from 2D-level editing that limits to one specific viewpoint, 3D-level manipulation should support editing from an arbitrary viewpoint and achieve 3D-consistent editing effects. To achieve this, we propose a framework based on point-guided triplane warping, where users manipulate one or several points from a desirable viewpoint, and the tri-plane features are warped according to the point displacements.", + "bbox": [ + 75, + 506, + 468, + 642 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Point Manipulation by Users. Ideally, users can directly modify arbitrary points in a rendered face to achieve editing. Nevertheless, the potential conflicts among excessive control points may lead to undesirable distortions of the facial structure during the joint point manipulation, consequently yielding results that deviate from realistic human appearances. To address this issue, we constrain the users to manipulate a set of meaningful 3D facial landmarks to guarantee a natural face structure.", + "bbox": [ + 75, + 643, + 468, + 777 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, given a latent code $\\mathbf{w}_s$ and a pre-trained EG3D generator $\\mathcal{G}$ , the portrait is first rendered in the front view with camera intrinsic $\\mathbf{K}$ . Then, 2D facial landmarks are detected by a pre-trained detector and projected on the facial surface to obtain 3D landmarks $\\mathbf{P} = \\{\\mathbf{p}_0,\\mathbf{p}_1,\\dots ,\\mathbf{p}_n\\} \\in \\mathbb{R}^{n\\times 3}$ , and $\\mathbf{p}_i = \\{\\mathbf{p}_i^x,\\mathbf{p}_i^y,\\mathbf{p}_i^z\\} \\in \\mathbb{R}^3$ . Consequently, users can render images from an arbitrary viewpoint with extrinsic $\\mathbf{R} \\in \\mathbb{S}\\mathbb{O}(3)$ and select any spe", + "bbox": [ + 75, + 780, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "cific points for editing. Take the selected point $\\mathbf{p}_i$ as an example, we set the movement of the point $\\Delta \\mathbf{p}_i$ is perpendicular to the rendering direction. The updated 3D point $\\mathbf{p}_i^{\\prime}$ is represented as:", + "bbox": [ + 498, + 395, + 890, + 455 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {p} _ {i} ^ {\\prime} = \\mathbf {p} _ {i} + \\mathbf {R} ^ {- 1} \\mathbf {K} ^ {- 1} \\mathbf {Z} \\Delta \\mathbf {p} _ {i}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 465, + 890, + 483 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{Z}$ is the depth of the selected point in the pose $\\mathbf{R}$ . After manipulating specific points within the facial structure, we obtain a set of new 3D landmarks $\\mathbf{P}' = \\{\\mathbf{p}_0', \\mathbf{p}_1', \\dots, \\mathbf{p}_n'\\}$ .", + "bbox": [ + 498, + 492, + 890, + 553 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Tri-plane Warping. After the users have manipulated the key points, we apply 3D warping on the tri-planes to edit the 3D representation. Individually considering each of the tri-plane features [7], we can extend the editing in 3D space onto three 2D planes to enhance efficiency. Therefore, we begin by projecting the 3D landmarks onto the three feature planes, and then individually apply a similar warping transformation on each of these feature planes, as illustrated in Fig. 2 (a). Take the $xy$ -plane $\\mathbf{F}_{xy}$ as an example, given $n$ source projected points $\\mathbf{P}^{xy} = \\{\\mathbf{p}_0^{xy}, \\mathbf{p}_1^{xy}, \\dots, \\mathbf{p}_n^{xy}\\} \\in \\mathbb{R}^{n \\times 2}$ , $\\mathbf{p}_i^{xy} = \\{\\mathbf{p}_i^x, \\mathbf{p}_i^y\\}$ and their target points $\\hat{\\mathbf{P}}^{xy} = \\{\\hat{\\mathbf{p}}_0^{xy}, \\hat{\\mathbf{p}}_1^{xy}, \\dots, \\hat{\\mathbf{p}}_n^{xy}\\}$ , we employ thin-plate spline interpolation [9] to compute the grid sampler with:", + "bbox": [ + 498, + 554, + 890, + 750 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ng (\\mathbf {q}) = \\sum_ {i = 1} ^ {n} w _ {i} \\phi \\left(\\left\\| \\mathbf {q} - \\hat {\\mathbf {p}} _ {i} \\right\\|\\right) + \\mathbf {v} ^ {T} \\mathbf {q} + \\mathbf {b}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 557, + 760, + 890, + 799 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\phi (r) = r^2\\log (r)$ is the kernel function and $g(\\mathbf{q})$ provides the inverse mapping of the location $\\mathbf{p}$ to the original plane coordinates $\\mathbf{q}$ . The parameters $\\mathbf{v},\\mathbf{b}$ are the parameters to minimize a certain definition of curvature. Similarly, by applying such inverse mapping to all three planes, we complete the tri-plane warping and achieve the inherently", + "bbox": [ + 498, + 809, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "919", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D-consistent modification. Compared to the manipulation of the sampled 3D coordinate space [60, 62], our method directly manipulates the 3D representation, empowering to simultaneously edit from multiple viewpoints without additional steps.", + "bbox": [ + 75, + 90, + 468, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Warp-Aware Encoding", + "text_level": 1, + "bbox": [ + 76, + 175, + 290, + 191 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After tri-plane warping, the editing results exhibit 3D consistent modification. However, directly applying warping operation on tri-plane features may not conform to the facial distribution in the latent space, leading to a severely distorted appearance. To solve this problem, our solution is to encode the distorted facial image $\\hat{\\mathbf{I}}_t$ into a standardized latent space that learns the natural counterpart $\\mathbf{w}_t^\\prime$ of the distorted face with an encoder $\\mathcal{E}$ :", + "bbox": [ + 75, + 199, + 468, + 318 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {w} _ {t} ^ {\\prime} = \\mathcal {E} (\\hat {\\mathbf {I}} _ {t}). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 329, + 468, + 347 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To train the encoder, we sample images from the pretrained generator to generate image and latent code pairs. Specifically, the portrait $\\mathbf{I}_s$ is generated from the randomly sampled latent code and the camera poses $\\mathbf{c}$ . Subsequently, the portrait $\\mathbf{I}_s$ is projected to latent code $\\mathbf{w}_s^\\prime$ by the encoder $\\mathcal{E}$ , and then the corresponding image $\\mathbf{I}_s^\\prime$ is generated by the same frozen generator $\\mathcal{G}$ and pose $\\mathbf{c}$ . The optimization objective of the encoder is the combination of L1 Loss, LPIPS loss [61], and identity loss [10]:", + "bbox": [ + 75, + 358, + 468, + 493 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {o} = \\mathcal {L} _ {1} \\left(\\mathbf {I} _ {s}, \\mathbf {I} _ {s} ^ {\\prime}\\right) + \\mathcal {L} _ {\\mathbf {L P I P S}} \\left(\\mathbf {I} _ {s}, \\mathbf {I} _ {s} ^ {\\prime}\\right) + \\mathcal {L} _ {\\mathbf {I D}} \\left(\\mathbf {I} _ {s}, \\mathbf {I} _ {s} ^ {\\prime}\\right). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 503, + 468, + 520 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Unfortunately, we find that the encoder trained with the aforementioned method poses difficulties in identifying subtle modifications due to the inherent complexity of 3D-aware generators. Hence, we further introduce the triplane warping as the data augmentation to enhance the overall perception of subtle edits. Similar to the above training pipeline, we apply the encoder onto the warped rendering $\\hat{\\mathbf{I}}_t$ to obtain the latent code $\\mathbf{w}_t'$ , thus generating its inverted image $\\mathbf{I}_t'$ . The loss is calculated between $\\mathbf{I}_t'$ and $\\hat{\\mathbf{I}}_t$ :", + "bbox": [ + 75, + 531, + 468, + 667 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {w} = \\mathcal {L} _ {1} \\left(\\hat {\\mathbf {I}} _ {t}, \\mathbf {I} _ {t} ^ {\\prime}\\right) + \\mathcal {L} _ {\\mathbf {L P I P S}} \\left(\\hat {\\mathbf {I}} _ {t}, \\mathbf {I} _ {t} ^ {\\prime}\\right) + \\mathcal {L} _ {\\mathbf {I D}} \\left(\\hat {\\mathbf {I}} _ {t}, \\mathbf {I} _ {t} ^ {\\prime}\\right). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 96, + 676, + 468, + 694 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Besides, following GOAE [58], we utilize a discriminator $\\mathcal{D}$ to ensure the latent codes $\\mathbf{w}_t^\\prime$ and $\\mathbf{w}_s^\\prime$ in the standardized latent space:", + "bbox": [ + 75, + 704, + 468, + 750 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {d} = \\mathbb {E} [ f (\\mathcal {D} \\left(\\mathbf {w} _ {t} ^ {\\prime}\\right)) + f (\\mathcal {D} \\left(\\mathbf {w} _ {s} ^ {\\prime}\\right)) ] (6) \\\\ + \\mathbb {E} [ f (- \\mathcal {D} (\\mathbf {w} _ {c})) ] + \\gamma | | \\nabla \\mathcal {D} (\\mathbf {w} _ {c}) | | ^ {2}, (6) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 758, + 468, + 799 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $f(x) = -\\log (1 + \\exp (-x))$ , and $\\gamma$ is a hyperparameter in R1 regularization. $\\mathbf{w}_{\\mathbf{c}}$ are pre-sampled standardized latent codes by the frozen generator. The final objective linearly combines the aforementioned losses:", + "bbox": [ + 75, + 808, + 468, + 869 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {o} + \\mathcal {L} _ {w} + \\mathcal {L} _ {d}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 881, + 468, + 896 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After the training process, the edited rendering is projected into latent space and then passed to the generator to yield a more reasonable editing result in the target view $\\mathbf{c}_t$ :", + "bbox": [ + 496, + 90, + 890, + 136 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {t} = \\mathcal {G} \\left(\\mathbf {w} _ {t} ^ {\\prime}, \\mathbf {c} _ {t}\\right). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 640, + 145, + 890, + 161 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Directional Editing in Latent Space", + "text_level": 1, + "bbox": [ + 498, + 169, + 807, + 186 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Warp-aware encoder solves the problem of severely distorted appearance caused by the tri-plane warp, however, it additionally introduces identity bias into the latent codes as the encoder cannot faithfully inverse faces. Besides, it is still hard to handle the ambiguity during the point-manipulation. Therefore, we here propose directional editing learning to further overcome these two challenges.", + "bbox": [ + 496, + 191, + 890, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To begin with, we adopt the difference between the latent codes that are extracted from the images before and after warping by the encoder as the direction guidance. In this way, we mitigate the identity bias and bypass the problem caused by the encoder. Furthermore, we follow Style-CLIP [36] to explore the semantics of layers in the $W+$ latent space of EG3D [4], empowering our method with the disentangled editing of the expression and shape. According to the hierarchical mechanism, we can obtain free editing results by applying editing directions in the variant layers to the same warping facial image, successfully avoiding the ambiguity caused by the tri-plane warp.", + "bbox": [ + 496, + 299, + 890, + 479 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The full pipeline is shown in Fig. 2 (b). Given a latent code $\\mathbf{w}_s$ and the frozen EG3D generator $\\mathcal{G}$ , the facial triplane can be generated. Specifically, the warp-aware encoder projects these two images to standardized latent codes $\\mathbf{w}_s'$ and $\\mathbf{w}_t'$ with Eq. (3), respectively. The target edited latent code $\\mathbf{w}_t$ can be calculated with:", + "bbox": [ + 496, + 479, + 890, + 569 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {w} _ {t} = \\mathbf {w} _ {s} + H \\left(\\mathbf {w} _ {t} ^ {\\prime} - \\mathbf {w} _ {s} ^ {\\prime}\\right), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 579, + 890, + 595 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $H(\\cdot)$ is a feature selection module for disentangling latent direction. Finally, the modified portrait $\\mathbf{I}_t$ can be rendered from any perspective $\\mathbf{c}_t$ with $\\mathbf{I}_t = \\mathcal{G}(\\mathbf{w}_t,\\mathbf{c}_t)$ .", + "bbox": [ + 496, + 604, + 890, + 650 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 498, + 662, + 632, + 679 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we evaluate the efficiency and the quality of our 3D-aware face editing model. We first introduce the implementation details of our work (Sec. 4.1). Subsequently, we compare our method with the SOTA 3D face editing methods qualitatively (Sec. 4.2) and quantitatively (Sec. 4.3). Then, we conduct ablation studies to analyze the effect of each component (Sec. 4.4). Finally, we introduce the potential applications of our method (Sec. 4.5).", + "bbox": [ + 496, + 686, + 890, + 808 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Implementation Details", + "text_level": 1, + "bbox": [ + 498, + 816, + 715, + 832 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We build our approach on the EG3D [4] pre-trained on the FFHQ dataset [22]. We employ the Mediapipe [30] to detect 2D landmarks and select 29 points for user manipulation. To obtain 3D landmarks, we first detect 2D landmarks", + "bbox": [ + 496, + 839, + 890, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "920", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/749d84eef9c5423fe87b46130098caec69d0a3277e18034f5dc3db3e874a48d7.jpg", + "image_caption": [ + "Figure 3. Qualitative comparisons with current SOTA methods for 3D face shape and expression editing. (a), (b), and (c) are the results of synthetic samples, and (d) showcases the results of a real-world portrait." + ], + "image_footnote": [], + "bbox": [ + 84, + 89, + 890, + 452 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "in the frontal view, and then compute the 3D coordinates by the locations of maximum density value on their corresponding emitted rays. We adopt Swin-transformer [28] as the encoder structure to enhance the detail perception. In the encoder training, the standardized latent codes are sampled to generate the face images under random views, consisting of totally 100000 identities. We adopt the Adam optimizer [25] and set the learning rates as $1e - 4$ for both the encoder and the discriminator. All the implementations are based on the PyTorch and set up on Nvidia A6000 GPUs.", + "bbox": [ + 75, + 506, + 472, + 657 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Qualitative Evaluation", + "text_level": 1, + "bbox": [ + 76, + 665, + 287, + 681 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct a qualitative comparison between our work and several SOTA 3D face editing methods with intuitive manipulation, i.e., StyleGAN-NADA [13] guided by the text prompts and IDE-3D [46] controlled by the semantic maps. Besides, we also introduce the point-based warping approach into the qualitative comparison. We adopt similar editing objectives and use their official codes to ensure fairness. Fig. 3 shows the multi-view results of the shape and expression editing, demonstrating the superiority of our method on fine-grained modification. The warp can accomplish obvious editing, but it suffers from facial distortion. IDE-3D [46] achieve satisfied results in most cases. However, the coupling of different facial attributes in the semantic maps leads to changes beyond the target attributes. For", + "bbox": [ + 75, + 688, + 470, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "instance, the baby in Fig. 3 (c) shows the shift of age and identity when trying to elongate his chin. Besides, IDE-3D only supports single-view editing, limiting its availability. StyleGAN-NADA [13] fails to edit the facial shape based on the EG3D despite its great success in style transfer and texture editing. In contrast, our method supports the user to simultaneously manipulate the face from multiple views and enables intuitive editing for facial shapes, expressions, and poses without the sacrifice of identity and detail. In addition to the editing quality, our method has another advantage that it does not require additional training for generative models, demonstrating its generalization.", + "bbox": [ + 496, + 506, + 893, + 686 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Furthermore, we also compare our method with a recent 2D method, DragGAN [34], which employs a similar point-guided operation to ours. Since DragGAN is limited to 2D editing, we compare the results in two aspects, i.e., fixed view editing and novel view synthesis, as shown in Fig. 4. In the aspect of fixed-view editing, the results of DragGAN [34] in Fig. 4 (a) show a tendency to open the mouth and change the identity when shortening the nose, although a mask limiting the editable region is applied. In the aspect of novel view synthesis, DragGAN severely changes the identity due to ambiguous point dragging in Fig. 4 (b). Compared to DragGAN, our method succeeds in achieving the expected editing target while maintaining the identity and irrelevant parts unchanged.", + "bbox": [ + 496, + 689, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "921", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/49c5f8b0ac16b04a7b0f626aadee5c3b8088525efb0a1a0ec7465dec297cefea.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsSchemeInference Time (s)↓MSEi↑MSEo↓MSEi / MSEo↑ID Consistency↑
DragGAN [34]2D5.2311.9920.2248.8930.579
Ours2D0.3562.0490.18611.0160.716
Our warp3D0.2692.4550.3287.4850.707
IDE-3D [46]3D0.3831.8410.9871.8650.649
Ours3D0.6241.6790.3424.9090.712
", + "bbox": [ + 112, + 88, + 854, + 189 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Quantitative comparison with several face editing methods on efficiency and effectiveness. The best results are labeled in bold except for our direct warp due to its distortion results. The unit of $\\mathrm{MSE}_i$ and $\\mathrm{MSE}_o$ are $10^{-2}$ .", + "bbox": [ + 75, + 194, + 892, + 222 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7847255046e76098286f48adda5d859c9b9bda9d1e4c34c1ffeea00edfadc673.jpg", + "image_caption": [ + "Figure 4. Qualitative comparisons with DragGAN [34] on portrait editing. Red and blue points represent the source and target points in the manipulations, respectively. The semi-transparent region indicates the mask used for DragGAN, while not in our method." + ], + "image_footnote": [], + "bbox": [ + 84, + 233, + 468, + 388 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Quantitative Evaluation", + "text_level": 1, + "bbox": [ + 76, + 468, + 299, + 484 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We also conduct quantitative experiments to verify the efficiency and effectiveness of our method, as shown in Tab. 2. We adopt editing time as the metric to evaluate the efficiency because it severely influences the user experiences. As shown, DragGAN [34] spends a large amount of time on latent optimization, resulting in lower efficiency. IDE-3D [46] and our method exhibit similar efficiency in supporting real-time editing. Despite the fastest method, the method of direct warp causes facial distortion, and thus we exclude it from the comparison.", + "bbox": [ + 75, + 491, + 468, + 642 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Furthermore, to assess the capability of disentangled editing, we measure the pixel-wise mean square error (MSE) inside and outside the target editing regions as the metric. The main objective is to successfully edit the target regions while preventing the outside regions from modification. As shown, our approach achieves better editing disentanglement than IDE3D [46] with minimized ratio of $\\mathrm{MSE}_i$ and $\\mathrm{MSE}_o$ . It is worth noting that the editability of 3D GANs is inferior to that of 2D GANs, and thus our method falls behind the DragGAN [34]. Considering the efficiency and the ability to multi-view editing of our method, the gap between ours and the DragGAN is acceptable. To fairly compare these two methods without the interference of base generators, we further extend our method to the same 2D generator and it performs better than DragGAN [34] in this setting. Additionally, we also compare the identity similarity. The results indicate that our method can better maintain", + "bbox": [ + 75, + 643, + 470, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/edf9ce84d6b0f76032e4437ef5ebb39d187e4f5974b19fa115c1e0b2fe6aa899.jpg", + "image_caption": [ + "Figure 5. The ablation study of our loss functions for training the encoder. The first row aims to widen the double eyelids while keeping the eyes open, and the second is to lengthen the bangs. The numbers in the corners represent the identity similarity measured by ArcFace [10]. Please zoom-in for detailed observation." + ], + "image_footnote": [], + "bbox": [ + 501, + 232, + 890, + 367 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "the identity character than other methods.", + "bbox": [ + 500, + 464, + 774, + 479 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 491, + 653, + 508 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effectiveness of Loss Functions. We investigate the effectiveness of each loss function in the encoder training process, as depicted in Fig. 5. The $\\mathcal{L}_w$ introduced by the warp-assisted data augmentation facilitates the accurate identification for user's manipulations, and the $\\mathcal{L}_d$ helps to maintain identity information. The combination of them achieves the best editing results.", + "bbox": [ + 496, + 515, + 892, + 621 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effectiveness of Directional Latent Editing. We conduct an ablation study to verify the effectiveness of our directional latent editing. We begin with applying tri-plane warping on source identities to obtain the warped results. Subsequently, we extract the directions of different layer groups, i.e., shape direction, expression direction, and the combined directions, respectively. Fig. 6 shows that the individual directional latent code has the capacity to disentangle the attributes, while the combination of them can realize integrated editing. However, directly mapping warped rendering to latent space without our directional latent module results in identity shifting and detail deficiency. These results can verify the effectiveness of our directional latent editing.", + "bbox": [ + 496, + 622, + 893, + 819 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Applications", + "text_level": 1, + "bbox": [ + 500, + 830, + 633, + 847 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Generalization of Learned Latent Directions. The editing direction learned for one face can be generalized to other instances, and we can further control the degree along the", + "bbox": [ + 496, + 854, + 892, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "922", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ffabbeefafc3a99ecd629df6aaf614944982e2c1c7a8938096426000624f5b00.jpg", + "image_caption": [ + "Figure 6. The ablation study of our directional editing. \"w/o Dir.\" represents results generated by directly projecting the warped results to latent space." + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 472, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dfae759163fd89ba13343cfbc47329e3062683caf0987f7a0513d46ad2ed6b44.jpg", + "image_caption": [ + "Figure 7. The interpolated editing results along the directions learned in the case of Fig. 3 (a) and (d), i.e., \"wider face\" and \"close mouth\" respectively. It shows that the learned editing direction in one face can be generalized to other instances." + ], + "image_footnote": [], + "bbox": [ + 78, + 287, + 472, + 449 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "direction to linearly interpolate the editing results. Fig. 7 shows the interpolation results guided by the directions learned in the cases of Fig. 3, i.e., wider face and closed mouth. With the degree rising from -2.0 to 2.0, both of the two identities show a gradual trend to change along their directions, although the directions are initially learned for other cases, demonstrating the generalization of these learned latent directions.", + "bbox": [ + 75, + 541, + 468, + 662 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Continuous Editing. Continuous editing is important to real-world applications. Therefore, we conduct an experiment to show our capability of overlying modification. Fig. 8 shows the results with multiple editing targets, i.e., smaller eyes, closed mouth, smaller nose, and wider face. The natural and ID-consistent results demonstrate the effectiveness of our method of continuous editing.", + "bbox": [ + 75, + 667, + 468, + 773 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Generalization to Other Generators. To show the generalized application of our method, we extend it to 3D cat editing and 2D car editing. We introduce our method to the pre-trained EG3D [4] on AFHQ Cats [8] dataset and StyleGAN [23] trained on Stanford Cars [26] dataset, respectively. As shown in Fig. 9, our approach can also successfully manipulate the 3D cats and 2D cars according to the user's point-based instructions.", + "bbox": [ + 75, + 780, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/28a66dc27078a94be76e31ffcf7181a2d5708a581806ec669fa9b8ac1fe162b4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 88, + 890, + 247 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/613467223a83b59e64a8193c3fcee84a28f89cb5650c83fd9db0eb96d011c286.jpg", + "image_caption": [ + "Figure 8. We showcase the mixing results with multiple attributes, demonstrating the continuous editing ability of our method.", + "Figure 9. The extension of our method to cat and car editing." + ], + "image_footnote": [], + "bbox": [ + 500, + 287, + 890, + 426 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 463, + 619, + 479 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose FaceEdit3D, an intuitive method to edit the 3D facial shape and expression from any perspective. Our approach involves a tri-plane warping to ensure the inherent 3D-consistent editing. To mitigate facial distortions led by the warping, we train a warp-aware encoder to project the warped face into standardized distribution and further explore the hierarchical mechanism in latent space to achieve disentangled editing. Extensive experiments demonstrate the effectiveness and efficiency of our method. The additional applications also show the generalization and potential of our method across different applications. To sum up, our method provides a brand new way to manipulate the 3D representation, opening up new avenues for rapid and convenient real-image editing.", + "bbox": [ + 496, + 488, + 890, + 699 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations. Since our method is based on warping the 3D representation, it is hard for our work to achieve texture editing and some semantic editing, such as wearing glasses. Broader Impacts. Despite not our intention, our 3D-aware facial editing capability could potentially be abused. We are committed to privacy protection, preventing the misuse of facial editing for criminal purposes.", + "bbox": [ + 496, + 699, + 890, + 806 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 819, + 668, + 837 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported in part by NSFC (62201342, 62101325), and Shanghai Municipal Science and Technology Major Project (2021SHZDZX0102).", + "bbox": [ + 496, + 844, + 890, + 890 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "923", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Rameen Abdal, Peihao Zhu, Niloy J Mitra, and Peter Wonka. Styleflow: Attribute-conditioned exploration of stylegan-generated images using conditional continuous normalizing flows. TOG, pages 1-21, 2021. 2, 3", + "[2] Sizhe An, Hongyi Xu, Yichun Shi, Guoxian Song, Umit Y Ogras, and Linjie Luo. Panohed: Geometry-aware 3d fullhead synthesis in 360deg. In CVPR, pages 20950-20959, 2023. 1, 2, 3", + "[3] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In CVPR, pages 5799-5809, 2021. 3", + "[4] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In CVPR, pages 16123-16133, 2022. 1, 2, 3, 5, 8", + "[5] Anpei Chen, Ruiyang Liu, Ling Xie, Zhang Chen, Hao Su, and Jingyi Yu. Sofgan: A portrait image generator with dynamic styling. TOG, pages 1-26, 2022. 3", + "[6] Zhuo Chen, Xudong Xu, Yichao Yan, Ye Pan, Wenhan Zhu, Wayne Wu, Bo Dai, and Xiaokang Yang. Hyperstyle3d: Text-guided 3d portrait stylization via hypernetworks. arXiv preprint arXiv:2304.09463, 2023. 2, 3", + "[7] Yuhao Cheng, Yichao Yan, Wenhan Zhu, Ye Pan, Bowen Pan, and Xiaokang Yang. Head3d: Complete 3d head generation via tri-plane feature distillation. arXiv preprint arXiv:2303.15892, 2023. 4", + "[8] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In CVPR, pages 8188-8197, 2020. 8", + "[9] Forrester Cole, David Belanger, Dilip Krishnan, Aaron Sarna, Inbar Mosseri, and William T Freeman. Synthesizing normalized faces from facial identity features. In CVPR, pages 3703-3712, 2017. 4", + "[10] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, pages 4690-4699, 2019. 5, 7", + "[11] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. Gram: Generative radiance manifolds for 3d-aware image generation. In CVPR, pages 10673-10683, 2022. 1, 2", + "[12] Yuki Endo. User-controllable latent transformer for stylegan image layout editing. In Computer Graphics Forum, pages 395-406, 2022. 2", + "[13] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. TOG, pages 1-13, 2022. 2, 3, 6", + "[14] Stephan J Garbin, Marek Kowalski, Virginia Estellers, Stanislaw Szymanowicz, Shideh RezaEIFar, Jingjing Shen, Matthew Johnson, and Julien Valentin. Voltemorph: Realtime, controllable and generalisable animation of volumetric representations. arXiv preprint arXiv:2208.00949, 2022. 3", + "[15] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In ICLR, 2021. 1, 2, 3" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. Ganspace: Discovering interpretable gan controls. NeurIPS, pages 9841–9850, 2020. 1, 2", + "[17] Clément Jambon, Bernhard Kerbl, Georgios Kopanas, Stavros Diolatzis, George Drettakis, and Thomas Leimkuhler. Nerfshop: Interactive editing of neural radiance fields. CGIT, 6(1), 2023. 3", + "[18] Kaiwen Jiang, Shu-Yu Chen, Feng-Lin Liu, Hongbo Fu, and Lin Gao. Nerfaceediting: Disentangled face editing in neural radiance fields. In SIGGRAPH Asia, pages 1-9, 2022. 1, 2", + "[19] Yuming Jiang, Ziqi Huang, Xingang Pan, Chen Change Loy, and Ziwei Liu. Talk-to-edit: Fine-grained facial editing via dialog. In ICCV, pages 13799-13808, 2021. 3", + "[20] Wonjoon Jin, Nuri Ryu, Geonung Kim, Seung-Hwan Baek, and Sunghyun Cho. Dr. 3d: Adapting 3d gans to artistic drawings. In SIGGRAPH Asia, pages 1-8, 2022. 4", + "[21] Kacper Kania, Stephan J Garbin, Andrea Tagliasacchi, Virginia Estellers, Kwang Moo Yi, Julien Valentin, Tomasz Trzciński, and Marek Kowalski. Blendfields: Few-shot example-driven facial modeling. In CVPR, pages 404-415, 2023. 3", + "[22] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019. 1, 5", + "[23] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, pages 8110-8119, 2020. 1, 8", + "[24] Gwanghyun Kim and Se Young Chun. Datid-3d: Diversitypreserved domain adaptation using text-to-image diffusion for 3d generative model. In CVPR, pages 14203–14213, 2023. 3, 4", + "[25] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015. 6", + "[26] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV, pages 554–561, 2013. 8", + "[27] Tianye Li, Timo Bolkart, Michael J Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4d scans. TOG, pages 194-1, 2017. 3", + "[28] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, pages 10012-10022, 2021. 6", + "[29] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. In Seminal Graphics Papers: Pushing the Boundaries, Volume 2, pages 851-866. 2023. 3", + "[30] Camillo Lugaresi, Jiuqiang Tang, Hadon Nash, Chris McClanahan, Esha Uboweja, Michael Hays, Fan Zhang, Chuoling Chang, Ming Guang Yong, Juhyun Lee, et al. Mediapipe: A framework for building perception pipelines. arXiv preprint arXiv:1906.08172, 2019. 5", + "[31] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, pages 99-106, 2020. 2" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "924", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[32] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In CVPR, pages 11453–11464, 2021. 1, 2, 3", + "[33] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In CVPR, pages 13503-13513, 2022. 1, 2, 3", + "[34] Xingang Pan, Ayush Tewari, Thomas Leimkuhler, Lingjie Liu, Abhinitra Meka, and Christian Theobalt. Drag your gan: Interactive point-based manipulation on the generative image manifold. In ASIGGRAPH, pages 1-11, 2023. 2, 3, 6, 7", + "[35] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In ICCV, pages 5865-5874, 2021. 3", + "[36] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In CVPR, pages 2085–2094, 2021. 1, 2, 3, 5", + "[37] Yicong Peng, Yichao Yan, Shengqi Liu, Yuhao Cheng, Shanyan Guan, Bowen Pan, Guangtao Zhai, and Xiaokang Yang. Cagenerf: Cage-based neural radiance field for generalized 3d deformation and animation. NeurIPS, pages 31402-31415, 2022. 3", + "[38] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763, 2021. 2", + "[39] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, pages 10684-10695, 2022. 2", + "[40] Shunsuke Saito, Jinlong Yang, Qianli Ma, and Michael J Black. Scintimate: Weakly supervised learning of skinned clothed avatar networks. In CVPR, pages 2886-2897, 2021. 3", + "[41] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In NIPS, 2020. 1, 2, 3", + "[42] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in gans. In CVPR, pages 1532-1540, 2021. 2, 3", + "[43] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. Interfacegan: Interpreting the disentangled face representation learned by gans. TPAMI, pages 2004-2018, 2020. 1, 2, 3", + "[44] Enis Simsar, Alessio Tonioni, Evin Pinar Ornek, and Federico Tombari. Latentswap3d: Semantic edits on 3d image gans. In ICCV, pages 2899-2909, 2023. 2", + "[45] Ivan Skorokhodov, Sergey Tulyakov, Yiqun Wang, and Peter Wonka. Epigraf: Rethinking training of 3d gans. NeurIPS, pages 24487-24501, 2022. 1, 2, 3", + "[46] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled edit" + ], + "bbox": [ + 78, + 90, + 467, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ing for high-resolution 3d-aware portrait synthesis. ToG, pages 1-10, 2022. 1, 2, 3, 6, 7", + "[47] Jingxiang Sun, Xuan Wang, Yong Zhang, Xiaoyu Li, Qi Zhang, Yebin Liu, and Jue Wang. Fenerf: Face editing in neural radiance fields. In CVPR, pages 7672-7682, 2022. 3", + "[48] Jingxiang Sun, Xuan Wang, Lizhen Wang, Xiaoyu Li, Yong Zhang, Hongwen Zhang, and Yebin Liu. Next3d: Generative neural texture rasterization for 3d-aware head avatars. In CVPR, pages 20991-21002, 2023. 1, 2", + "[49] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In CVPR, pages 6142-6151, 2020. 2", + "[50] Andrey Voynov and Artem Babenko. Unsupervised discovery of interpretable directions in the gan latent space. In ICML, pages 9786-9796, 2020. 2, 3", + "[51] Tengfei Wang, Bo Zhang, Ting Zhang, Shuyang Gu, Jianmin Bao, Tadas Baltrusaitis, Jingjing Shen, Dong Chen, Fang Wen, Qifeng Chen, et al. Rodin: A generative model for sculpting 3d digital avatars using diffusion. In CVPR, pages 4563-4573, 2023. 1", + "[52] Sijing Wu, Yichao Yan, Yunhao Li, Yuhao Cheng, Wenhan Zhu, Ke Gao, Xiaobo Li, and Guangtao Zhai. Ganhead: Towards generative animatable neural head avatars. In CVPR, pages 437-447, 2023. 3", + "[53] Jianfeng Xiang, Jiaolong Yang, Yu Deng, and Xin Tong. Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. In ICCV, pages 2195-2205, 2023. 1, 2, 3", + "[54] Tianhan Xu and Tatsuya Harada. Deforming radiance fields with cages. In ECCV, pages 159-175, 2022. 3", + "[55] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In CVPR, 2022. 1, 2", + "[56] Yan Yichao, Cheng Yuhao, Chen Zhuo, Peng Yicong, Wu Sijing, Zhang Weitian, Li Junjie, Li Yixuan, Gao Jingnan, Zhang Weixia, Zhai Guangtao, and Yang Xiaokang. A survey on generative 3d digital humans based on neural networks: representation, rendering, and learning. _SCIENTIA SINICA Informationis_, pages 1858–, 2023. 1", + "[57] Yu-Jie Yuan, Yang-Tian Sun, Yu-Kun Lai, Yuewen Ma, Rongfei Jia, and Lin Gao. Nerf-editing: geometry editing of neural radiance fields. In CVPR, pages 18353-18364, 2022. 3", + "[58] Ziyang Yuan, Yiming Zhu, Yu Li, Hongyu Liu, and Chun Yuan. Make encoder great again in 3d gan inversion through geometry and occlusion-aware encoding. In ICCV, pages 2437-2447, 2023. 5", + "[59] Chi Zhang, Yiwen Chen, Yijun Fu, Zhenglin Zhou, Gang Yu, Billzb Wang, Bin Fu, Tao Chen, Guosheng Lin, and Chunhua Shen. StyleAvatar3d: Leveraging image-text diffusion models for high-fidelity 3d avatar generation. arXiv preprint arXiv:2305.19012, 2023. 2", + "[60] Jianfeng Zhang, Zihang Jiang, Dingdong Yang, Hongyi Xu, Yichun Shi, Guoxian Song, Zhongcong Xu, Xinchao Wang," + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "925", + "bbox": [ + 486, + 945, + 509, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "and Jiashi Feng. Avatargen: a 3d generative model for animatable human avatars. In ECCV, pages 668-685. Springer, 2022. 5", + "[61] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, pages 586-595, 2018. 5", + "[62] Xuanmeng Zhang, Jianfeng Zhang, Rohan Chacko, Hongyi Xu, Guoxian Song, Yi Yang, and Jiashi Feng. Getavatar: Generative textured meshes for animatable human avatars. In ICCV, pages 2273-2282, 2023. 5", + "[63] Yufeng Zheng, Victoria Fernández Abrevaya, Marcel C Bühler, Xu Chen, Michael J Black, and Otmar Hilliges. Im avatar: Implicit morphable head avatars from videos. In CVPR, pages 13545-13555, 2022. 3", + "[64] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 1, 2, 3", + "[65] Jiapeng Zhu, Ruili Feng, Yujun Shen, Deli Zhao, Zheng-Jun Zha, Jingren Zhou, and Qifeng Chen. Low-rank subspaces in gans. NeurIPS, pages 16648-16658, 2021. 2", + "[66] Jiapeng Zhu, Yujun Shen, Yinghao Xu, Deli Zhao, and Qifeng Chen. Region-based semantic factorization in gans. In ICML, pages 27612-27632, 2022.", + "[67] Jiapeng Zhu, Ceyuan Yang, Yujun Shen, Zifan Shi, Bo Dai, Deli Zhao, and Qifeng Chen. Linkgan: Linking gan latents to pixels for controllable image synthesis. In ICCV, pages 7656-7666, 2023. 2", + "[68] Peihao Zhu, Rameen Abdal, Yipeng Qin, and Peter Wonka. Sean: Image synthesis with semantic region-adaptive normalization. In CVPR, pages 5104-5113, 2020. 3", + "[69] Peiye Zhuang, Oluwasanmi Koyejo, and Alexander G Schwing. Enjoy your editing: Controllable gans for image editing via latent space navigation. arXiv preprint arXiv:2102.01187, 2021.3" + ], + "bbox": [ + 78, + 90, + 467, + 599 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "926", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/d47f630a-17d8-4298-a368-699d1959d603_model.json b/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/d47f630a-17d8-4298-a368-699d1959d603_model.json new file mode 100644 index 0000000000000000000000000000000000000000..ad00339965839193b878816b1d8b00011e0c8385 --- /dev/null +++ b/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/d47f630a-17d8-4298-a368-699d1959d603_model.json @@ -0,0 +1,2312 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.125, + 0.131, + 0.846, + 0.154 + ], + "angle": 0, + "content": "3D-Aware Face Editing via Warping-Guided Latent Direction Learning" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.18, + 0.79, + 0.218 + ], + "angle": 0, + "content": "Yuhao Cheng\\(^{1}\\) Zhuo Chen\\(^{1}\\) Xingyu Ren\\(^{1}\\) Wenhan Zhu\\(^{1}\\) Zhengqin Xu\\(^{1}\\) Di Xu\\(^{2}\\) Changpeng Yang\\(^{2}\\) Yichao Yan\\(^{1*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.223, + 0.817, + 0.259 + ], + "angle": 0, + "content": "\\(^{1}\\)MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University \\(^{2}\\)Huawei Cloud Computing Technologies Co., Ltd" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.262, + 0.816, + 0.294 + ], + "angle": 0, + "content": "{chengyuhao,ningci5252,rxy_sjtu,zhuwenhan823,fate311,yanyichao}@sjtu.edu.cn, {xudi21,yangchangpeng}@huawei.com" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.304, + 0.884, + 0.507 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.514, + 0.893, + 0.556 + ], + "angle": 0, + "content": "Figure 1. An example of our warping-guided 3D-aware face editing method. Our method supports users to edit 3D faces in an intuitive way that drags points from multiple perspectives. Moreover, our method can achieve disentangled editing for shape, expression, and view, while maintaining 3D consistency. Please zoom-in for detailed observation." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.569, + 0.314, + 0.584 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.593, + 0.473, + 0.88 + ], + "angle": 0, + "content": "3D facial editing, a longstanding task in computer vision with broad applications, is expected to fast and intuitively manipulate any face from arbitrary viewpoints following the user's will. Existing works have limitations in terms of intuitiveness, generalization, and efficiency. To overcome these challenges, we propose FaceEdit3D, which allows users to directly manipulate 3D points to edit a 3D face, achieving natural and rapid face editing. After one or several points are manipulated by users, we propose the tri-plane warping to directly deform the view-independent 3D representation. To address the problem of distortion caused by tri-plane warping, we train a warp-aware encoder to project the warped face onto a standardized latent space. In this space, we further propose directional latent editing to mitigate the identity bias caused by the encoder and realize the disentangled editing of various attributes. Extensive experiments show that our method achieves superior results with rich facial details and nice identity preservation. Our approach also supports general applications like" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.57, + 0.892, + 0.601 + ], + "angle": 0, + "content": "multi-attribute continuous editing and cat/car editing. The project website is https://cyh-sj.github.io/FaceEdit3D/." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.632, + 0.631, + 0.648 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.658, + 0.892, + 0.824 + ], + "angle": 0, + "content": "High-quality face editing has long been an important research topic in computer vision with a wide range of applications, including social media and film production. Previous methods [16, 36, 43] based on 2D GANs [22, 23] have demonstrated the capability of editing facial images with high-fidelity. Recently, benefiting from the impressive achievements of 3D-aware generative models, especially in generative digital human [2-4, 11, 15, 32, 33, 41, 45, 51, 53, 55, 56, 64], the field of 3D facial editing has further attracted significant interest due to its promising capacity of manipulating a 3D representation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Typically, 3D face editing methods can be generally classified into three categories: prior-guided conditioning, parameter-space fine-tuning, and latent-space optimization, as summarized in Tab. 1. Specifically, prior-guided conditioning methods [18, 46-48] employ an additional well" + }, + { + "type": "page_footnote", + "bbox": [ + 0.109, + 0.887, + 0.232, + 0.9 + ], + "angle": 0, + "content": "* Corresponding author" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "916" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.089, + 0.472, + 0.212 + ], + "angle": 0, + "content": "
SchemeMethodsIntuitivenessGeneralizationEfficiency
Conditional control[18, 46, 48]
Fine-tuned models[6, 13, 59]
Supervised directions[1, 36, 43]
Unsupervised directions[16, 42, 67]
[34] (2D)
Ours
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.222, + 0.47, + 0.25 + ], + "angle": 0, + "content": "Table 1. Summary of 3D-aware face editing methods. \\(\\triangle\\) indicates its instructions are somewhat ambiguous semantically." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.266, + 0.47, + 0.432 + ], + "angle": 0, + "content": "designed conditioning module to introduce the control information, e.g., semantic maps [18, 46] and 3DMM [48, 49], into the 3D-aware models. Although flexible, these models typically require a large number of face images with their control labels for training. Parameter-space finetuning methods [6, 13, 59] optimize the pre-trained generators given the target input, achieving zero-shot editing with the help of the large language-image model, e.g., CLIP [38] or Stable Diffusion [39]. However, it is required to maintain a particular generator for each specific editing target, severely constraining their generalization." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.433, + 0.471, + 0.704 + ], + "angle": 0, + "content": "Due to the rich distributions learned in the pre-trained generator, discovering the meaningful directions in the latent space allows for a wide range of editing without the need to modify the generator and dependence on a large amount of training data. According to the exploration of editing direction, latent-space optimization can be achieved in supervised and unsupervised ways. Supervised methods [1, 36, 43, 44] search the meaningful directions in the latent space by learning labeled data for each specific editing. However, these methods cannot be generalized beyond the training domain. In contrast, unsupervised methods [16, 42, 50, 65-67] discover out-of-domain directions by analyzing the distribution of the latent space. However, the editing directions in the latent space are typically not semantically intuitive for the users. Accordingly, introducing interactive guidance to bridge the gap between the latent space and the user's intuition becomes the main purpose of the unsupervised methods." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.471, + 0.903 + ], + "angle": 0, + "content": "To achieve this, several works [12, 34] utilize manipulating points on 2D images to optimize latent code in an unsupervised way, achieving image editing intuitively. The most prominent method DragGAN [34] proposes motion supervision and point tracking to optimize the latent code in a self-supervised manner, showcasing its flexible and intuitive editing capabilities. Considering their success on 2D images, it would be highly desirable if we could also manipulate 3D points to edit a 3D facial representation. However, it is non-trivial to directly extend point dragging to 3D-aware facial editing, due to the following challenges. 1) These methods ignore the global 3D facial structure and only focus on the movements of specific points, potentially" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.198 + ], + "angle": 0, + "content": "leading to exaggerated distortions. 2) These methods employ an inefficient approach to optimize the latent codes for image editing. Therefore, extending this procedure to 3D-aware generators fails to meet the demands of 3D interactive applications. 3) The controllability of point dragging is less precise and may cause ambiguous targets, e.g., enlarging the shape of the mouth may lead the mouth to open." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.199, + 0.893, + 0.485 + ], + "angle": 0, + "content": "To overcome these challenges, we propose FaceEdit3D to learn editing directions guided by 3D-consistent facewarping, realizing intuitive and rapid 3D-aware facial editing. (1) First, we propose tri-plane warping on the 3D representation to achieve accurate 3D-consistent facial editing, which allows us to sidestep inaccurate motion supervision. Further, we introduce 3D landmarks rather than arbitrary points as face prior to constrain the change in the normal face distribution. Although tri-plane warping allows for precise editing, it introduces slight facial distortions. (2) Hence, we train a warp-aware encoder instead of latent optimization to straightforwardly project the warped renderings into the standardized space, enabling fast and photorealistic editing. Due to the complex semantic information in the latent space of 3D-aware generators, the obtained encoder suffers from inherent bias, resulting in a loss of details and identity shifting. (3) Therefore, we propose to learn the hierarchical directional editing in latent space, enabling disentangled face editing with identity and details preservation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.486, + 0.892, + 0.622 + ], + "angle": 0, + "content": "With all the designs above, we successfully introduce dragging-based edits into 3D face representations. Our work achieves an efficient and straightforward editing process which also enables the decoupling of facial expressions and shapes. Compared to other face editing approaches, our method offers a more intuitive bridge but avoids dependence on the 3D annotations. Extensive experiments have demonstrated the superiority of our method in intuitiveness, generalization, and efficiency for the task of facial editing." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.623, + 0.86, + 0.637 + ], + "angle": 0, + "content": "The main contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.639, + 0.891, + 0.667 + ], + "angle": 0, + "content": "- We design an efficient and straightforward 3D-aware face editing pipeline that is in line with the user's intuition." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.669, + 0.89, + 0.699 + ], + "angle": 0, + "content": "- We propose to warp the face in the tri-plane feature level, enabling 3D-consistent face manipulation." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.7, + 0.89, + 0.744 + ], + "angle": 0, + "content": "- We propose a warp-aware encoder to better identify the subtle changes and efficiently solve the problem of distorted face caused by the tri-plane warp." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.745, + 0.89, + 0.789 + ], + "angle": 0, + "content": "- We propose directional editing in latent space, achieving disentangled facial editing with the preservation of identity and details." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.639, + 0.891, + 0.789 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.806, + 0.65, + 0.821 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.664, + 0.846 + ], + "angle": 0, + "content": "2.1. 3D-aware GANs" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Inspired by the superiority of implicit representation [31], several attempts [2-4, 11, 15, 32, 33, 41, 45, 53, 55, 64] deploy radiance fields into generative models and thus en" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.945, + 0.511, + 0.957 + ], + "angle": 0, + "content": "917" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.091, + 0.473, + 0.394 + ], + "angle": 0, + "content": "able 3D consistent image synthesis. The capability of learning 3D representations from unposed single-view 2D images only empowers these 3D-aware GAN models to gain wide interests and applications. However, partial 3D-aware GANs [3, 15, 32, 33, 41, 64] adopt full implicit representation that lacks pre-computed 3D features before the point sampling. As a consequence, they need to regenerate the 3D feature when given novel viewpoints, limiting the efficiency of them in interactive applications. To address this challenge, several works [2, 4, 45, 53] adopt hybrid representations that first generate view-independent features, and enable sampling points on these pre-computed features for novel view synthesis. Consequently, these methods can realize rapid generation and maintain the inherent 3D-consistent representation. Specifically, EG3D [4] introduces the light tri-plane representation into the generator to raise efficiency and further enhance the image quality. Considering its efficient representation and mature downstream techniques, we adopt the EG3D [4] as the base 3D-aware model to demonstrate the effectiveness of our methods." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.405, + 0.406, + 0.421 + ], + "angle": 0, + "content": "2.2. Implicit Representations Deformation" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.429, + 0.473, + 0.777 + ], + "angle": 0, + "content": "The deformation of 3D implicit representation has long attracted wide focus, as it serves as the foundation of broad animation applications. Prior researches predominantly introduce an additional deformation field based on the original representation to modify the 3D points. Specifically, deformation fields can be implemented through proxy-based editing [14, 21, 35, 57], cage-based editing [17, 37, 54], and parametric prior-based editing [40, 52, 63], etc. Proxy-based editing learns a lightweight neural network to compute the translation and rotation of 3D points, enabling the deformation of original 3D coordinates. The cage-based methods establish a surrounding cage to fully cover up the original surface of an implicit representation and then modify the cage to deform the inherent surface. Parametric prior-based methods leverage the parametric models such as SMPL [29] and FLAME [27] as a prior condition of the deformation network to drive the implicit representations. However, all of these approaches need to optimize a controllable module for each specific object, lack of efficiency and generality. In contrast, our work provides a landmark-based way to directly edit the 3D representation without optimization and further compresses the 3D deformation into 2D feature planes to improve efficiency." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.787, + 0.283, + 0.803 + ], + "angle": 0, + "content": "2.3. Face Editing in GANs" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.81, + 0.47, + 0.902 + ], + "angle": 0, + "content": "As the latent space learned by the conditioned GANs contains most of the distribution knowledge, many works [1, 42, 43, 50, 69] explore the latent space of a pre-trained generator for the following facial attribute editing. Specifically, InterFaceGAN [43] studies the semantics encoded in the latent space and disentangles the facial semantics with linear" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.091, + 0.895, + 0.41 + ], + "angle": 0, + "content": "projection. To explicitly edit the facial attributes, further works explore utilizing the intuitive representation, e.g., semantic maps [5, 46, 47, 68] and text prompts [19, 36] for the optimization or the extension of latent space. Moreover, an idea that directly drags the face for the editing catches the wide attention. DragGAN [34] optimizes the latent space via dragging selected points on the image to the target positions. However, it is hard to preserve the facial identity when setting a far distance between the two points, preventing the DragGAN from large-scale editing. Despite the prominent performance of latent space manipulation, it still faces a challenge in balancing the identity preservation and editing amplitude. To further enhance the editing capability, several works [6, 13, 24] focus on the parameter space of a pre-trained generator. While these methods can achieve out-of-domain editing, they need to maintain a specific generator for each attribute manipulation, lacking efficiency. Compared to the methods mentioned above, our method is an intuitive way of dragging points to deform the 3D representations while improving the efficiency and preserving the identity." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.421, + 0.6, + 0.437 + ], + "angle": 0, + "content": "3. Methods" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.447, + 0.895, + 0.69 + ], + "angle": 0, + "content": "Our proposed framework, FaceEdit3D, aims at multi-view consistent facial editing in shape, expression, and pose via warping-guided directional editing, as illustrated in Fig. 2. To this end, we first review the 3D-aware GAN that achieves high-resolution face rendering from multiple views (Sec. 3.1). Based on the 3D-aware generator, we propose a point-guided feature-space warping method that manipulates the inherent tri-plane representations while ensuring the 3D consistency (Sec. 3.2). However, directly editing the tri-plane may lead to distortions in the final rendered images. Therefore, we train a specifically designed encoder to project the warped renderings to the standardized latent space for photo-realistic editing results (Sec. 3.3). Finally, we delve into the mechanism of latent space and propose directional editing in latent space that enables the disentangled editing of facial shape, expression, and pose (Sec. 3.4)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.697, + 0.868, + 0.712 + ], + "angle": 0, + "content": "3.1. Preliminaries on 3D-aware Face Generator" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Our framework is built upon EG3D [4], one of the most powerful 3D-aware generative models that achieve photorealistic 3D face generation. The generator of EG3D introduces a tri-plane representation, which compactly encodes the geometry and appearance of a 3D face. Specifically, the tri-plane features can be denoted as \\(\\mathbf{F} = \\mathcal{G}(\\mathbf{w})\\in\\) \\(\\mathbb{R}^{3\\times 32\\times 256\\times 256}\\) , where \\(\\mathbf{W}\\) is a latent code. To render face images from a specific viewpoint, the features of 3D coordinates are sampled from the tri-plane features and a shallow decoder is leveraged to project the tri-plane feature \\(\\mathbf{F}(x,y,z)\\in \\mathbb{R}^{32\\times 3}\\) into volume density \\(\\sigma \\in \\mathbb{R}^1\\) and color feature \\(c\\in \\mathbb{R}^{32}\\) . Subsequently, a low-resolution fea" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "918" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.083, + 0.09, + 0.538, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.217, + 0.276, + 0.405, + 0.29 + ], + "angle": 0, + "content": "(a) 3D-consistent Tri-plane Warp" + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.09, + 0.887, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.616, + 0.276, + 0.822, + 0.289 + ], + "angle": 0, + "content": "(b) The Pipeline of Our FaceEdit3D" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.3, + 0.892, + 0.384 + ], + "angle": 0, + "content": "Figure 2. Overview of our proposed FaceEdit3D. (a) A detailed illustration of our tri-plane warp. We project 2D key points onto the 3D face surface and then map them to each corresponding plane within a tri-plane representation. Afterward, we apply warping operations to each plane to achieve 3D-consistent editing. (b) The full pipeline of our FaceEdit3D. Given a source image \\(\\mathbf{I}_s\\) with its latent code \\(\\mathbf{w}_s\\), we first perform the tri-plane warping on it and obtain the warped rendering \\(\\hat{\\mathbf{I}}_t\\). Subsequently, we utilize a warp-aware encoder to extract the latent codes \\(\\mathbf{w}_s'\\) and \\(\\mathbf{w}_t'\\) from the source image \\(\\mathbf{I}_s\\) and the warped renderings \\(\\hat{\\mathbf{I}}_t\\), respectively. Then, we employ the hierarchical latent direction to update the target latent code \\(\\mathbf{w}_t\\). Finally, the edited facial image \\(\\mathbf{I}_t\\) can be synthesized via the updated latent code \\(\\mathbf{w}_t\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.396, + 0.47, + 0.473 + ], + "angle": 0, + "content": "ture map is generated via volume rendering and then upsampled to high-resolution images. The representation ability of tri-plane features has been verified by several recent works [7, 20, 24]. Therefore, to achieve 3D-consistent editing, we choose to operate directly on the tri-plane features." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.483, + 0.395, + 0.5 + ], + "angle": 0, + "content": "3.2. Multi-view Consistent Face Warping" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.507, + 0.469, + 0.643 + ], + "angle": 0, + "content": "For 3D face editing, it is a flexible way for users to directly drag points on the rendered images. Different from 2D-level editing that limits to one specific viewpoint, 3D-level manipulation should support editing from an arbitrary viewpoint and achieve 3D-consistent editing effects. To achieve this, we propose a framework based on point-guided triplane warping, where users manipulate one or several points from a desirable viewpoint, and the tri-plane features are warped according to the point displacements." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.644, + 0.469, + 0.779 + ], + "angle": 0, + "content": "Point Manipulation by Users. Ideally, users can directly modify arbitrary points in a rendered face to achieve editing. Nevertheless, the potential conflicts among excessive control points may lead to undesirable distortions of the facial structure during the joint point manipulation, consequently yielding results that deviate from realistic human appearances. To address this issue, we constrain the users to manipulate a set of meaningful 3D facial landmarks to guarantee a natural face structure." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Specifically, given a latent code \\(\\mathbf{w}_s\\) and a pre-trained EG3D generator \\(\\mathcal{G}\\), the portrait is first rendered in the front view with camera intrinsic \\(\\mathbf{K}\\). Then, 2D facial landmarks are detected by a pre-trained detector and projected on the facial surface to obtain 3D landmarks \\(\\mathbf{P} = \\{\\mathbf{p}_0,\\mathbf{p}_1,\\dots ,\\mathbf{p}_n\\} \\in \\mathbb{R}^{n\\times 3}\\), and \\(\\mathbf{p}_i = \\{\\mathbf{p}_i^x,\\mathbf{p}_i^y,\\mathbf{p}_i^z\\} \\in \\mathbb{R}^3\\). Consequently, users can render images from an arbitrary viewpoint with extrinsic \\(\\mathbf{R} \\in \\mathbb{S}\\mathbb{O}(3)\\) and select any spe" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.396, + 0.892, + 0.456 + ], + "angle": 0, + "content": "cific points for editing. Take the selected point \\(\\mathbf{p}_i\\) as an example, we set the movement of the point \\(\\Delta \\mathbf{p}_i\\) is perpendicular to the rendering direction. The updated 3D point \\(\\mathbf{p}_i^{\\prime}\\) is represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.466, + 0.891, + 0.484 + ], + "angle": 0, + "content": "\\[\n\\mathbf {p} _ {i} ^ {\\prime} = \\mathbf {p} _ {i} + \\mathbf {R} ^ {- 1} \\mathbf {K} ^ {- 1} \\mathbf {Z} \\Delta \\mathbf {p} _ {i}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.493, + 0.891, + 0.554 + ], + "angle": 0, + "content": "where \\(\\mathbf{Z}\\) is the depth of the selected point in the pose \\(\\mathbf{R}\\). After manipulating specific points within the facial structure, we obtain a set of new 3D landmarks \\(\\mathbf{P}' = \\{\\mathbf{p}_0', \\mathbf{p}_1', \\dots, \\mathbf{p}_n'\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.555, + 0.892, + 0.751 + ], + "angle": 0, + "content": "Tri-plane Warping. After the users have manipulated the key points, we apply 3D warping on the tri-planes to edit the 3D representation. Individually considering each of the tri-plane features [7], we can extend the editing in 3D space onto three 2D planes to enhance efficiency. Therefore, we begin by projecting the 3D landmarks onto the three feature planes, and then individually apply a similar warping transformation on each of these feature planes, as illustrated in Fig. 2 (a). Take the \\( xy \\)-plane \\( \\mathbf{F}_{xy} \\) as an example, given \\( n \\) source projected points \\( \\mathbf{P}^{xy} = \\{\\mathbf{p}_0^{xy}, \\mathbf{p}_1^{xy}, \\dots, \\mathbf{p}_n^{xy}\\} \\in \\mathbb{R}^{n \\times 2} \\), \\( \\mathbf{p}_i^{xy} = \\{\\mathbf{p}_i^x, \\mathbf{p}_i^y\\} \\) and their target points \\( \\hat{\\mathbf{P}}^{xy} = \\{\\hat{\\mathbf{p}}_0^{xy}, \\hat{\\mathbf{p}}_1^{xy}, \\dots, \\hat{\\mathbf{p}}_n^{xy}\\} \\), we employ thin-plate spline interpolation [9] to compute the grid sampler with:" + }, + { + "type": "equation", + "bbox": [ + 0.558, + 0.761, + 0.891, + 0.8 + ], + "angle": 0, + "content": "\\[\ng (\\mathbf {q}) = \\sum_ {i = 1} ^ {n} w _ {i} \\phi \\left(\\left\\| \\mathbf {q} - \\hat {\\mathbf {p}} _ {i} \\right\\|\\right) + \\mathbf {v} ^ {T} \\mathbf {q} + \\mathbf {b}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.81, + 0.892, + 0.901 + ], + "angle": 0, + "content": "where \\(\\phi (r) = r^2\\log (r)\\) is the kernel function and \\(g(\\mathbf{q})\\) provides the inverse mapping of the location \\(\\mathbf{p}\\) to the original plane coordinates \\(\\mathbf{q}\\). The parameters \\(\\mathbf{v},\\mathbf{b}\\) are the parameters to minimize a certain definition of curvature. Similarly, by applying such inverse mapping to all three planes, we complete the tri-plane warping and achieve the inherently" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "919" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.168 + ], + "angle": 0, + "content": "3D-consistent modification. Compared to the manipulation of the sampled 3D coordinate space [60, 62], our method directly manipulates the 3D representation, empowering to simultaneously edit from multiple viewpoints without additional steps." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.176, + 0.291, + 0.192 + ], + "angle": 0, + "content": "3.3. Warp-Aware Encoding" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.2, + 0.47, + 0.319 + ], + "angle": 0, + "content": "After tri-plane warping, the editing results exhibit 3D consistent modification. However, directly applying warping operation on tri-plane features may not conform to the facial distribution in the latent space, leading to a severely distorted appearance. To solve this problem, our solution is to encode the distorted facial image \\(\\hat{\\mathbf{I}}_t\\) into a standardized latent space that learns the natural counterpart \\(\\mathbf{w}_t^\\prime\\) of the distorted face with an encoder \\(\\mathcal{E}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.33, + 0.469, + 0.348 + ], + "angle": 0, + "content": "\\[\n\\mathbf {w} _ {t} ^ {\\prime} = \\mathcal {E} (\\hat {\\mathbf {I}} _ {t}). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.359, + 0.469, + 0.494 + ], + "angle": 0, + "content": "To train the encoder, we sample images from the pretrained generator to generate image and latent code pairs. Specifically, the portrait \\(\\mathbf{I}_s\\) is generated from the randomly sampled latent code and the camera poses \\(\\mathbf{c}\\). Subsequently, the portrait \\(\\mathbf{I}_s\\) is projected to latent code \\(\\mathbf{w}_s^\\prime\\) by the encoder \\(\\mathcal{E}\\), and then the corresponding image \\(\\mathbf{I}_s^\\prime\\) is generated by the same frozen generator \\(\\mathcal{G}\\) and pose \\(\\mathbf{c}\\). The optimization objective of the encoder is the combination of L1 Loss, LPIPS loss [61], and identity loss [10]:" + }, + { + "type": "equation", + "bbox": [ + 0.096, + 0.504, + 0.469, + 0.521 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {o} = \\mathcal {L} _ {1} \\left(\\mathbf {I} _ {s}, \\mathbf {I} _ {s} ^ {\\prime}\\right) + \\mathcal {L} _ {\\mathbf {L P I P S}} \\left(\\mathbf {I} _ {s}, \\mathbf {I} _ {s} ^ {\\prime}\\right) + \\mathcal {L} _ {\\mathbf {I D}} \\left(\\mathbf {I} _ {s}, \\mathbf {I} _ {s} ^ {\\prime}\\right). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.532, + 0.469, + 0.668 + ], + "angle": 0, + "content": "Unfortunately, we find that the encoder trained with the aforementioned method poses difficulties in identifying subtle modifications due to the inherent complexity of 3D-aware generators. Hence, we further introduce the triplane warping as the data augmentation to enhance the overall perception of subtle edits. Similar to the above training pipeline, we apply the encoder onto the warped rendering \\(\\hat{\\mathbf{I}}_t\\) to obtain the latent code \\(\\mathbf{w}_t'\\), thus generating its inverted image \\(\\mathbf{I}_t'\\). The loss is calculated between \\(\\mathbf{I}_t'\\) and \\(\\hat{\\mathbf{I}}_t\\):" + }, + { + "type": "equation", + "bbox": [ + 0.098, + 0.678, + 0.469, + 0.695 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {w} = \\mathcal {L} _ {1} \\left(\\hat {\\mathbf {I}} _ {t}, \\mathbf {I} _ {t} ^ {\\prime}\\right) + \\mathcal {L} _ {\\mathbf {L P I P S}} \\left(\\hat {\\mathbf {I}} _ {t}, \\mathbf {I} _ {t} ^ {\\prime}\\right) + \\mathcal {L} _ {\\mathbf {I D}} \\left(\\hat {\\mathbf {I}} _ {t}, \\mathbf {I} _ {t} ^ {\\prime}\\right). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.469, + 0.75 + ], + "angle": 0, + "content": "Besides, following GOAE [58], we utilize a discriminator \\(\\mathcal{D}\\) to ensure the latent codes \\(\\mathbf{w}_t^\\prime\\) and \\(\\mathbf{w}_s^\\prime\\) in the standardized latent space:" + }, + { + "type": "equation", + "bbox": [ + 0.136, + 0.76, + 0.469, + 0.8 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {d} = \\mathbb {E} [ f (\\mathcal {D} \\left(\\mathbf {w} _ {t} ^ {\\prime}\\right)) + f (\\mathcal {D} \\left(\\mathbf {w} _ {s} ^ {\\prime}\\right)) ] (6) \\\\ + \\mathbb {E} [ f (- \\mathcal {D} (\\mathbf {w} _ {c})) ] + \\gamma | | \\nabla \\mathcal {D} (\\mathbf {w} _ {c}) | | ^ {2}, (6) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.809, + 0.469, + 0.87 + ], + "angle": 0, + "content": "where \\( f(x) = -\\log (1 + \\exp (-x)) \\), and \\( \\gamma \\) is a hyperparameter in R1 regularization. \\( \\mathbf{w}_{\\mathbf{c}} \\) are pre-sampled standardized latent codes by the frozen generator. The final objective linearly combines the aforementioned losses:" + }, + { + "type": "equation", + "bbox": [ + 0.202, + 0.882, + 0.469, + 0.897 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {o} + \\mathcal {L} _ {w} + \\mathcal {L} _ {d}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.137 + ], + "angle": 0, + "content": "After the training process, the edited rendering is projected into latent space and then passed to the generator to yield a more reasonable editing result in the target view \\(\\mathbf{c}_t\\):" + }, + { + "type": "equation", + "bbox": [ + 0.641, + 0.146, + 0.892, + 0.162 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {t} = \\mathcal {G} \\left(\\mathbf {w} _ {t} ^ {\\prime}, \\mathbf {c} _ {t}\\right). \\tag {8}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.17, + 0.808, + 0.187 + ], + "angle": 0, + "content": "3.4. Directional Editing in Latent Space" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.193, + 0.892, + 0.299 + ], + "angle": 0, + "content": "Warp-aware encoder solves the problem of severely distorted appearance caused by the tri-plane warp, however, it additionally introduces identity bias into the latent codes as the encoder cannot faithfully inverse faces. Besides, it is still hard to handle the ambiguity during the point-manipulation. Therefore, we here propose directional editing learning to further overcome these two challenges." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.3, + 0.892, + 0.48 + ], + "angle": 0, + "content": "To begin with, we adopt the difference between the latent codes that are extracted from the images before and after warping by the encoder as the direction guidance. In this way, we mitigate the identity bias and bypass the problem caused by the encoder. Furthermore, we follow Style-CLIP [36] to explore the semantics of layers in the \\( W+ \\) latent space of EG3D [4], empowering our method with the disentangled editing of the expression and shape. According to the hierarchical mechanism, we can obtain free editing results by applying editing directions in the variant layers to the same warping facial image, successfully avoiding the ambiguity caused by the tri-plane warp." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.481, + 0.892, + 0.57 + ], + "angle": 0, + "content": "The full pipeline is shown in Fig. 2 (b). Given a latent code \\(\\mathbf{w}_s\\) and the frozen EG3D generator \\(\\mathcal{G}\\), the facial triplane can be generated. Specifically, the warp-aware encoder projects these two images to standardized latent codes \\(\\mathbf{w}_s'\\) and \\(\\mathbf{w}_t'\\) with Eq. (3), respectively. The target edited latent code \\(\\mathbf{w}_t\\) can be calculated with:" + }, + { + "type": "equation", + "bbox": [ + 0.606, + 0.58, + 0.891, + 0.597 + ], + "angle": 0, + "content": "\\[\n\\mathbf {w} _ {t} = \\mathbf {w} _ {s} + H \\left(\\mathbf {w} _ {t} ^ {\\prime} - \\mathbf {w} _ {s} ^ {\\prime}\\right), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.605, + 0.892, + 0.651 + ], + "angle": 0, + "content": "where \\(H(\\cdot)\\) is a feature selection module for disentangling latent direction. Finally, the modified portrait \\(\\mathbf{I}_t\\) can be rendered from any perspective \\(\\mathbf{c}_t\\) with \\(\\mathbf{I}_t = \\mathcal{G}(\\mathbf{w}_t,\\mathbf{c}_t)\\)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.663, + 0.633, + 0.68 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.688, + 0.892, + 0.809 + ], + "angle": 0, + "content": "In this section, we evaluate the efficiency and the quality of our 3D-aware face editing model. We first introduce the implementation details of our work (Sec. 4.1). Subsequently, we compare our method with the SOTA 3D face editing methods qualitatively (Sec. 4.2) and quantitatively (Sec. 4.3). Then, we conduct ablation studies to analyze the effect of each component (Sec. 4.4). Finally, we introduce the potential applications of our method (Sec. 4.5)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.818, + 0.716, + 0.833 + ], + "angle": 0, + "content": "4.1. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.9 + ], + "angle": 0, + "content": "We build our approach on the EG3D [4] pre-trained on the FFHQ dataset [22]. We employ the Mediapipe [30] to detect 2D landmarks and select 29 points for user manipulation. To obtain 3D landmarks, we first detect 2D landmarks" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "920" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.09, + 0.891, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.466, + 0.897, + 0.496 + ], + "angle": 0, + "content": "Figure 3. Qualitative comparisons with current SOTA methods for 3D face shape and expression editing. (a), (b), and (c) are the results of synthetic samples, and (d) showcases the results of a real-world portrait." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.507, + 0.473, + 0.658 + ], + "angle": 0, + "content": "in the frontal view, and then compute the 3D coordinates by the locations of maximum density value on their corresponding emitted rays. We adopt Swin-transformer [28] as the encoder structure to enhance the detail perception. In the encoder training, the standardized latent codes are sampled to generate the face images under random views, consisting of totally 100000 identities. We adopt the Adam optimizer [25] and set the learning rates as \\(1e - 4\\) for both the encoder and the discriminator. All the implementations are based on the PyTorch and set up on Nvidia A6000 GPUs." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.666, + 0.288, + 0.683 + ], + "angle": 0, + "content": "4.2. Qualitative Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.471, + 0.901 + ], + "angle": 0, + "content": "We conduct a qualitative comparison between our work and several SOTA 3D face editing methods with intuitive manipulation, i.e., StyleGAN-NADA [13] guided by the text prompts and IDE-3D [46] controlled by the semantic maps. Besides, we also introduce the point-based warping approach into the qualitative comparison. We adopt similar editing objectives and use their official codes to ensure fairness. Fig. 3 shows the multi-view results of the shape and expression editing, demonstrating the superiority of our method on fine-grained modification. The warp can accomplish obvious editing, but it suffers from facial distortion. IDE-3D [46] achieve satisfied results in most cases. However, the coupling of different facial attributes in the semantic maps leads to changes beyond the target attributes. For" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.507, + 0.895, + 0.688 + ], + "angle": 0, + "content": "instance, the baby in Fig. 3 (c) shows the shift of age and identity when trying to elongate his chin. Besides, IDE-3D only supports single-view editing, limiting its availability. StyleGAN-NADA [13] fails to edit the facial shape based on the EG3D despite its great success in style transfer and texture editing. In contrast, our method supports the user to simultaneously manipulate the face from multiple views and enables intuitive editing for facial shapes, expressions, and poses without the sacrifice of identity and detail. In addition to the editing quality, our method has another advantage that it does not require additional training for generative models, demonstrating its generalization." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.69, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Furthermore, we also compare our method with a recent 2D method, DragGAN [34], which employs a similar point-guided operation to ours. Since DragGAN is limited to 2D editing, we compare the results in two aspects, i.e., fixed view editing and novel view synthesis, as shown in Fig. 4. In the aspect of fixed-view editing, the results of DragGAN [34] in Fig. 4 (a) show a tendency to open the mouth and change the identity when shortening the nose, although a mask limiting the editable region is applied. In the aspect of novel view synthesis, DragGAN severely changes the identity due to ambiguous point dragging in Fig. 4 (b). Compared to DragGAN, our method succeeds in achieving the expected editing target while maintaining the identity and irrelevant parts unchanged." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "921" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.113, + 0.089, + 0.855, + 0.19 + ], + "angle": 0, + "content": "
MethodsSchemeInference Time (s)↓MSEi↑MSEo↓MSEi / MSEo↑ID Consistency↑
DragGAN [34]2D5.2311.9920.2248.8930.579
Ours2D0.3562.0490.18611.0160.716
Our warp3D0.2692.4550.3287.4850.707
IDE-3D [46]3D0.3831.8410.9871.8650.649
Ours3D0.6241.6790.3424.9090.712
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.195, + 0.893, + 0.223 + ], + "angle": 0, + "content": "Table 2. Quantitative comparison with several face editing methods on efficiency and effectiveness. The best results are labeled in bold except for our direct warp due to its distortion results. The unit of \\(\\mathrm{MSE}_i\\) and \\(\\mathrm{MSE}_o\\) are \\(10^{-2}\\)." + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.234, + 0.47, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.4, + 0.471, + 0.456 + ], + "angle": 0, + "content": "Figure 4. Qualitative comparisons with DragGAN [34] on portrait editing. Red and blue points represent the source and target points in the manipulations, respectively. The semi-transparent region indicates the mask used for DragGAN, while not in our method." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.469, + 0.3, + 0.485 + ], + "angle": 0, + "content": "4.3. Quantitative Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.492, + 0.47, + 0.643 + ], + "angle": 0, + "content": "We also conduct quantitative experiments to verify the efficiency and effectiveness of our method, as shown in Tab. 2. We adopt editing time as the metric to evaluate the efficiency because it severely influences the user experiences. As shown, DragGAN [34] spends a large amount of time on latent optimization, resulting in lower efficiency. IDE-3D [46] and our method exhibit similar efficiency in supporting real-time editing. Despite the fastest method, the method of direct warp causes facial distortion, and thus we exclude it from the comparison." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.645, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Furthermore, to assess the capability of disentangled editing, we measure the pixel-wise mean square error (MSE) inside and outside the target editing regions as the metric. The main objective is to successfully edit the target regions while preventing the outside regions from modification. As shown, our approach achieves better editing disentanglement than IDE3D [46] with minimized ratio of \\(\\mathrm{MSE}_i\\) and \\(\\mathrm{MSE}_o\\). It is worth noting that the editability of 3D GANs is inferior to that of 2D GANs, and thus our method falls behind the DragGAN [34]. Considering the efficiency and the ability to multi-view editing of our method, the gap between ours and the DragGAN is acceptable. To fairly compare these two methods without the interference of base generators, we further extend our method to the same 2D generator and it performs better than DragGAN [34] in this setting. Additionally, we also compare the identity similarity. The results indicate that our method can better maintain" + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.233, + 0.891, + 0.368 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.381, + 0.893, + 0.451 + ], + "angle": 0, + "content": "Figure 5. The ablation study of our loss functions for training the encoder. The first row aims to widen the double eyelids while keeping the eyes open, and the second is to lengthen the bangs. The numbers in the corners represent the identity similarity measured by ArcFace [10]. Please zoom-in for detailed observation." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.465, + 0.776, + 0.48 + ], + "angle": 0, + "content": "the identity character than other methods." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.492, + 0.654, + 0.509 + ], + "angle": 0, + "content": "4.4. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.516, + 0.893, + 0.622 + ], + "angle": 0, + "content": "Effectiveness of Loss Functions. We investigate the effectiveness of each loss function in the encoder training process, as depicted in Fig. 5. The \\(\\mathcal{L}_w\\) introduced by the warp-assisted data augmentation facilitates the accurate identification for user's manipulations, and the \\(\\mathcal{L}_d\\) helps to maintain identity information. The combination of them achieves the best editing results." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.623, + 0.895, + 0.82 + ], + "angle": 0, + "content": "Effectiveness of Directional Latent Editing. We conduct an ablation study to verify the effectiveness of our directional latent editing. We begin with applying tri-plane warping on source identities to obtain the warped results. Subsequently, we extract the directions of different layer groups, i.e., shape direction, expression direction, and the combined directions, respectively. Fig. 6 shows that the individual directional latent code has the capacity to disentangle the attributes, while the combination of them can realize integrated editing. However, directly mapping warped rendering to latent space without our directional latent module results in identity shifting and detail deficiency. These results can verify the effectiveness of our directional latent editing." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.634, + 0.848 + ], + "angle": 0, + "content": "4.5. Applications" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Generalization of Learned Latent Directions. The editing direction learned for one face can be generalized to other instances, and we can further control the degree along the" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "922" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.473, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.237, + 0.47, + 0.279 + ], + "angle": 0, + "content": "Figure 6. The ablation study of our directional editing. \"w/o Dir.\" represents results generated by directly projecting the warped results to latent space." + }, + { + "type": "image", + "bbox": [ + 0.079, + 0.289, + 0.473, + 0.45 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.462, + 0.47, + 0.518 + ], + "angle": 0, + "content": "Figure 7. The interpolated editing results along the directions learned in the case of Fig. 3 (a) and (d), i.e., \"wider face\" and \"close mouth\" respectively. It shows that the learned editing direction in one face can be generalized to other instances." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.542, + 0.47, + 0.663 + ], + "angle": 0, + "content": "direction to linearly interpolate the editing results. Fig. 7 shows the interpolation results guided by the directions learned in the cases of Fig. 3, i.e., wider face and closed mouth. With the degree rising from -2.0 to 2.0, both of the two identities show a gradual trend to change along their directions, although the directions are initially learned for other cases, demonstrating the generalization of these learned latent directions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.669, + 0.47, + 0.775 + ], + "angle": 0, + "content": "Continuous Editing. Continuous editing is important to real-world applications. Therefore, we conduct an experiment to show our capability of overlying modification. Fig. 8 shows the results with multiple editing targets, i.e., smaller eyes, closed mouth, smaller nose, and wider face. The natural and ID-consistent results demonstrate the effectiveness of our method of continuous editing." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Generalization to Other Generators. To show the generalized application of our method, we extend it to 3D cat editing and 2D car editing. We introduce our method to the pre-trained EG3D [4] on AFHQ Cats [8] dataset and StyleGAN [23] trained on Stanford Cars [26] dataset, respectively. As shown in Fig. 9, our approach can also successfully manipulate the 3D cats and 2D cars according to the user's point-based instructions." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.089, + 0.892, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.26, + 0.892, + 0.287 + ], + "angle": 0, + "content": "Figure 8. We showcase the mixing results with multiple attributes, demonstrating the continuous editing ability of our method." + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.288, + 0.892, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.439, + 0.877, + 0.453 + ], + "angle": 0, + "content": "Figure 9. The extension of our method to cat and car editing." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.464, + 0.62, + 0.48 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.489, + 0.892, + 0.7 + ], + "angle": 0, + "content": "In this paper, we propose FaceEdit3D, an intuitive method to edit the 3D facial shape and expression from any perspective. Our approach involves a tri-plane warping to ensure the inherent 3D-consistent editing. To mitigate facial distortions led by the warping, we train a warp-aware encoder to project the warped face into standardized distribution and further explore the hierarchical mechanism in latent space to achieve disentangled editing. Extensive experiments demonstrate the effectiveness and efficiency of our method. The additional applications also show the generalization and potential of our method across different applications. To sum up, our method provides a brand new way to manipulate the 3D representation, opening up new avenues for rapid and convenient real-image editing." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.7, + 0.892, + 0.807 + ], + "angle": 0, + "content": "Limitations. Since our method is based on warping the 3D representation, it is hard for our work to achieve texture editing and some semantic editing, such as wearing glasses. Broader Impacts. Despite not our intention, our 3D-aware facial editing capability could potentially be abused. We are committed to privacy protection, preventing the misuse of facial editing for criminal purposes." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.82, + 0.669, + 0.838 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.845, + 0.892, + 0.891 + ], + "angle": 0, + "content": "This work was supported in part by NSFC (62201342, 62101325), and Shanghai Municipal Science and Technology Major Project (2021SHZDZX0102)." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "923" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.17 + ], + "angle": 0, + "content": "[1] Rameen Abdal, Peihao Zhu, Niloy J Mitra, and Peter Wonka. Styleflow: Attribute-conditioned exploration of stylegan-generated images using conditional continuous normalizing flows. TOG, pages 1-21, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.472, + 0.225 + ], + "angle": 0, + "content": "[2] Sizhe An, Hongyi Xu, Yichun Shi, Guoxian Song, Umit Y Ogras, and Linjie Luo. Panohed: Geometry-aware 3d fullhead synthesis in 360deg. In CVPR, pages 20950-20959, 2023. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.227, + 0.47, + 0.282 + ], + "angle": 0, + "content": "[3] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In CVPR, pages 5799-5809, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.284, + 0.47, + 0.352 + ], + "angle": 0, + "content": "[4] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In CVPR, pages 16123-16133, 2022. 1, 2, 3, 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.354, + 0.469, + 0.395 + ], + "angle": 0, + "content": "[5] Anpei Chen, Ruiyang Liu, Ling Xie, Zhang Chen, Hao Su, and Jingyi Yu. Sofgan: A portrait image generator with dynamic styling. TOG, pages 1-26, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.397, + 0.469, + 0.451 + ], + "angle": 0, + "content": "[6] Zhuo Chen, Xudong Xu, Yichao Yan, Ye Pan, Wenhan Zhu, Wayne Wu, Bo Dai, and Xiaokang Yang. Hyperstyle3d: Text-guided 3d portrait stylization via hypernetworks. arXiv preprint arXiv:2304.09463, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.452, + 0.469, + 0.506 + ], + "angle": 0, + "content": "[7] Yuhao Cheng, Yichao Yan, Wenhan Zhu, Ye Pan, Bowen Pan, and Xiaokang Yang. Head3d: Complete 3d head generation via tri-plane feature distillation. arXiv preprint arXiv:2303.15892, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.508, + 0.469, + 0.549 + ], + "angle": 0, + "content": "[8] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In CVPR, pages 8188-8197, 2020. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.551, + 0.469, + 0.606 + ], + "angle": 0, + "content": "[9] Forrester Cole, David Belanger, Dilip Krishnan, Aaron Sarna, Inbar Mosseri, and William T Freeman. Synthesizing normalized faces from facial identity features. In CVPR, pages 3703-3712, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.607, + 0.469, + 0.648 + ], + "angle": 0, + "content": "[10] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, pages 4690-4699, 2019. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.649, + 0.469, + 0.69 + ], + "angle": 0, + "content": "[11] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. Gram: Generative radiance manifolds for 3d-aware image generation. In CVPR, pages 10673-10683, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.691, + 0.469, + 0.732 + ], + "angle": 0, + "content": "[12] Yuki Endo. User-controllable latent transformer for stylegan image layout editing. In Computer Graphics Forum, pages 395-406, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.734, + 0.469, + 0.787 + ], + "angle": 0, + "content": "[13] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. TOG, pages 1-13, 2022. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.469, + 0.858 + ], + "angle": 0, + "content": "[14] Stephan J Garbin, Marek Kowalski, Virginia Estellers, Stanislaw Szymanowicz, Shideh RezaEIFar, Jingjing Shen, Matthew Johnson, and Julien Valentin. Voltemorph: Realtime, controllable and generalisable animation of volumetric representations. arXiv preprint arXiv:2208.00949, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[15] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In ICLR, 2021. 1, 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[16] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. Ganspace: Discovering interpretable gan controls. NeurIPS, pages 9841–9850, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.892, + 0.189 + ], + "angle": 0, + "content": "[17] Clément Jambon, Bernhard Kerbl, Georgios Kopanas, Stavros Diolatzis, George Drettakis, and Thomas Leimkuhler. Nerfshop: Interactive editing of neural radiance fields. CGIT, 6(1), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.19, + 0.892, + 0.244 + ], + "angle": 0, + "content": "[18] Kaiwen Jiang, Shu-Yu Chen, Feng-Lin Liu, Hongbo Fu, and Lin Gao. Nerfaceediting: Disentangled face editing in neural radiance fields. In SIGGRAPH Asia, pages 1-9, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.246, + 0.892, + 0.287 + ], + "angle": 0, + "content": "[19] Yuming Jiang, Ziqi Huang, Xingang Pan, Chen Change Loy, and Ziwei Liu. Talk-to-edit: Fine-grained facial editing via dialog. In ICCV, pages 13799-13808, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.288, + 0.892, + 0.329 + ], + "angle": 0, + "content": "[20] Wonjoon Jin, Nuri Ryu, Geonung Kim, Seung-Hwan Baek, and Sunghyun Cho. Dr. 3d: Adapting 3d gans to artistic drawings. In SIGGRAPH Asia, pages 1-8, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.33, + 0.892, + 0.397 + ], + "angle": 0, + "content": "[21] Kacper Kania, Stephan J Garbin, Andrea Tagliasacchi, Virginia Estellers, Kwang Moo Yi, Julien Valentin, Tomasz Trzciński, and Marek Kowalski. Blendfields: Few-shot example-driven facial modeling. In CVPR, pages 404-415, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.399, + 0.892, + 0.44 + ], + "angle": 0, + "content": "[22] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019. 1, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.442, + 0.892, + 0.495 + ], + "angle": 0, + "content": "[23] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, pages 8110-8119, 2020. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.497, + 0.892, + 0.551 + ], + "angle": 0, + "content": "[24] Gwanghyun Kim and Se Young Chun. Datid-3d: Diversitypreserved domain adaptation using text-to-image diffusion for 3d generative model. In CVPR, pages 14203–14213, 2023. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.553, + 0.892, + 0.58 + ], + "angle": 0, + "content": "[25] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.581, + 0.892, + 0.622 + ], + "angle": 0, + "content": "[26] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV, pages 554–561, 2013. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.623, + 0.892, + 0.664 + ], + "angle": 0, + "content": "[27] Tianye Li, Timo Bolkart, Michael J Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4d scans. TOG, pages 194-1, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.665, + 0.892, + 0.719 + ], + "angle": 0, + "content": "[28] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, pages 10012-10022, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.72, + 0.892, + 0.775 + ], + "angle": 0, + "content": "[29] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. In Seminal Graphics Papers: Pushing the Boundaries, Volume 2, pages 851-866. 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.776, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[30] Camillo Lugaresi, Jiuqiang Tang, Hadon Nash, Chris McClanahan, Esha Uboweja, Michael Hays, Fan Zhang, Chuoling Chang, Ming Guang Yong, Juhyun Lee, et al. Mediapipe: A framework for building perception pipelines. arXiv preprint arXiv:1906.08172, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[31] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, pages 99-106, 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.956 + ], + "angle": 0, + "content": "924" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.468, + 0.134 + ], + "angle": 0, + "content": "[32] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In CVPR, pages 11453–11464, 2021. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.468, + 0.205 + ], + "angle": 0, + "content": "[33] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In CVPR, pages 13503-13513, 2022. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.207, + 0.468, + 0.274 + ], + "angle": 0, + "content": "[34] Xingang Pan, Ayush Tewari, Thomas Leimkuhler, Lingjie Liu, Abhinitra Meka, and Christian Theobalt. Drag your gan: Interactive point-based manipulation on the generative image manifold. In ASIGGRAPH, pages 1-11, 2023. 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.277, + 0.468, + 0.332 + ], + "angle": 0, + "content": "[35] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In ICCV, pages 5865-5874, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.334, + 0.468, + 0.387 + ], + "angle": 0, + "content": "[36] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In CVPR, pages 2085–2094, 2021. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.39, + 0.468, + 0.458 + ], + "angle": 0, + "content": "[37] Yicong Peng, Yichao Yan, Shengqi Liu, Yuhao Cheng, Shanyan Guan, Bowen Pan, Guangtao Zhai, and Xiaokang Yang. Cagenerf: Cage-based neural radiance field for generalized 3d deformation and animation. NeurIPS, pages 31402-31415, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.461, + 0.468, + 0.53 + ], + "angle": 0, + "content": "[38] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.532, + 0.468, + 0.585 + ], + "angle": 0, + "content": "[39] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, pages 10684-10695, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.588, + 0.468, + 0.642 + ], + "angle": 0, + "content": "[40] Shunsuke Saito, Jinlong Yang, Qianli Ma, and Michael J Black. Scintimate: Weakly supervised learning of skinned clothed avatar networks. In CVPR, pages 2886-2897, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.645, + 0.468, + 0.686 + ], + "angle": 0, + "content": "[41] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In NIPS, 2020. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.688, + 0.468, + 0.728 + ], + "angle": 0, + "content": "[42] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in gans. In CVPR, pages 1532-1540, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.731, + 0.468, + 0.784 + ], + "angle": 0, + "content": "[43] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. Interfacegan: Interpreting the disentangled face representation learned by gans. TPAMI, pages 2004-2018, 2020. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.788, + 0.468, + 0.829 + ], + "angle": 0, + "content": "[44] Enis Simsar, Alessio Tonioni, Evin Pinar Ornek, and Federico Tombari. Latentswap3d: Semantic edits on 3d image gans. In ICCV, pages 2899-2909, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.831, + 0.468, + 0.871 + ], + "angle": 0, + "content": "[45] Ivan Skorokhodov, Sergey Tulyakov, Yiqun Wang, and Peter Wonka. Epigraf: Rethinking training of 3d gans. NeurIPS, pages 24487-24501, 2022. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.874, + 0.468, + 0.901 + ], + "angle": 0, + "content": "[46] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled edit" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.468, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.121 + ], + "angle": 0, + "content": "ing for high-resolution 3d-aware portrait synthesis. ToG, pages 1-10, 2022. 1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.124, + 0.892, + 0.165 + ], + "angle": 0, + "content": "[47] Jingxiang Sun, Xuan Wang, Yong Zhang, Xiaoyu Li, Qi Zhang, Yebin Liu, and Jue Wang. Fenerf: Face editing in neural radiance fields. In CVPR, pages 7672-7682, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.167, + 0.892, + 0.222 + ], + "angle": 0, + "content": "[48] Jingxiang Sun, Xuan Wang, Lizhen Wang, Xiaoyu Li, Yong Zhang, Hongwen Zhang, and Yebin Liu. Next3d: Generative neural texture rasterization for 3d-aware head avatars. In CVPR, pages 20991-21002, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.224, + 0.892, + 0.292 + ], + "angle": 0, + "content": "[49] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In CVPR, pages 6142-6151, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.296, + 0.892, + 0.337 + ], + "angle": 0, + "content": "[50] Andrey Voynov and Artem Babenko. Unsupervised discovery of interpretable directions in the gan latent space. In ICML, pages 9786-9796, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.34, + 0.892, + 0.407 + ], + "angle": 0, + "content": "[51] Tengfei Wang, Bo Zhang, Ting Zhang, Shuyang Gu, Jianmin Bao, Tadas Baltrusaitis, Jingjing Shen, Dong Chen, Fang Wen, Qifeng Chen, et al. Rodin: A generative model for sculpting 3d digital avatars using diffusion. In CVPR, pages 4563-4573, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.411, + 0.892, + 0.466 + ], + "angle": 0, + "content": "[52] Sijing Wu, Yichao Yan, Yunhao Li, Yuhao Cheng, Wenhan Zhu, Ke Gao, Xiaobo Li, and Guangtao Zhai. Ganhead: Towards generative animatable neural head avatars. In CVPR, pages 437-447, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.47, + 0.892, + 0.523 + ], + "angle": 0, + "content": "[53] Jianfeng Xiang, Jiaolong Yang, Yu Deng, and Xin Tong. Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. In ICCV, pages 2195-2205, 2023. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.527, + 0.892, + 0.554 + ], + "angle": 0, + "content": "[54] Tianhan Xu and Tatsuya Harada. Deforming radiance fields with cages. In ECCV, pages 159-175, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.557, + 0.892, + 0.598 + ], + "angle": 0, + "content": "[55] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In CVPR, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.601, + 0.892, + 0.684 + ], + "angle": 0, + "content": "[56] Yan Yichao, Cheng Yuhao, Chen Zhuo, Peng Yicong, Wu Sijing, Zhang Weitian, Li Junjie, Li Yixuan, Gao Jingnan, Zhang Weixia, Zhai Guangtao, and Yang Xiaokang. A survey on generative 3d digital humans based on neural networks: representation, rendering, and learning. _SCIENTIA SINICA Informationis_, pages 1858–, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.687, + 0.892, + 0.739 + ], + "angle": 0, + "content": "[57] Yu-Jie Yuan, Yang-Tian Sun, Yu-Kun Lai, Yuewen Ma, Rongfei Jia, and Lin Gao. Nerf-editing: geometry editing of neural radiance fields. In CVPR, pages 18353-18364, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.744, + 0.892, + 0.798 + ], + "angle": 0, + "content": "[58] Ziyang Yuan, Yiming Zhu, Yu Li, Hongyu Liu, and Chun Yuan. Make encoder great again in 3d gan inversion through geometry and occlusion-aware encoding. In ICCV, pages 2437-2447, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.802, + 0.892, + 0.87 + ], + "angle": 0, + "content": "[59] Chi Zhang, Yiwen Chen, Yijun Fu, Zhenglin Zhou, Gang Yu, Billzb Wang, Bin Fu, Tao Chen, Guosheng Lin, and Chunhua Shen. StyleAvatar3d: Leveraging image-text diffusion models for high-fidelity 3d avatar generation. arXiv preprint arXiv:2305.19012, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.874, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[60] Jianfeng Zhang, Zihang Jiang, Dingdong Yang, Hongyi Xu, Yichun Shi, Guoxian Song, Zhongcong Xu, Xinchao Wang," + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.511, + 0.956 + ], + "angle": 0, + "content": "925" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.468, + 0.133 + ], + "angle": 0, + "content": "and Jiashi Feng. Avatargen: a 3d generative model for animatable human avatars. In ECCV, pages 668-685. Springer, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.468, + 0.189 + ], + "angle": 0, + "content": "[61] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, pages 586-595, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.192, + 0.468, + 0.247 + ], + "angle": 0, + "content": "[62] Xuanmeng Zhang, Jianfeng Zhang, Rohan Chacko, Hongyi Xu, Guoxian Song, Yi Yang, and Jiashi Feng. Getavatar: Generative textured meshes for animatable human avatars. In ICCV, pages 2273-2282, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.249, + 0.468, + 0.303 + ], + "angle": 0, + "content": "[63] Yufeng Zheng, Victoria Fernández Abrevaya, Marcel C Bühler, Xu Chen, Michael J Black, and Otmar Hilliges. Im avatar: Implicit morphable head avatars from videos. In CVPR, pages 13545-13555, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.305, + 0.468, + 0.359 + ], + "angle": 0, + "content": "[64] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.362, + 0.468, + 0.403 + ], + "angle": 0, + "content": "[65] Jiapeng Zhu, Ruili Feng, Yujun Shen, Deli Zhao, Zheng-Jun Zha, Jingren Zhou, and Qifeng Chen. Low-rank subspaces in gans. NeurIPS, pages 16648-16658, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.405, + 0.468, + 0.445 + ], + "angle": 0, + "content": "[66] Jiapeng Zhu, Yujun Shen, Yinghao Xu, Deli Zhao, and Qifeng Chen. Region-based semantic factorization in gans. In ICML, pages 27612-27632, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.448, + 0.468, + 0.5 + ], + "angle": 0, + "content": "[67] Jiapeng Zhu, Ceyuan Yang, Yujun Shen, Zifan Shi, Bo Dai, Deli Zhao, and Qifeng Chen. Linkgan: Linking gan latents to pixels for controllable image synthesis. In ICCV, pages 7656-7666, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.504, + 0.468, + 0.544 + ], + "angle": 0, + "content": "[68] Peihao Zhu, Rameen Abdal, Yipeng Qin, and Peter Wonka. Sean: Image synthesis with semantic region-adaptive normalization. In CVPR, pages 5104-5113, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.547, + 0.468, + 0.601 + ], + "angle": 0, + "content": "[69] Peiye Zhuang, Oluwasanmi Koyejo, and Alexander G Schwing. Enjoy your editing: Controllable gans for image editing via latent space navigation. arXiv preprint arXiv:2102.01187, 2021.3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.468, + 0.601 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.956 + ], + "angle": 0, + "content": "926" + } + ] +] \ No newline at end of file diff --git a/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/d47f630a-17d8-4298-a368-699d1959d603_origin.pdf b/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/d47f630a-17d8-4298-a368-699d1959d603_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2f587fbcef3c36c116f88465461d1ae8a626026c --- /dev/null +++ b/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/d47f630a-17d8-4298-a368-699d1959d603_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a0fa72679d22a4272126af847ad1758963b741cd1ce833a938628d1721da3ec +size 7611252 diff --git a/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/full.md b/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e50f6d92855342406d25df8c637fb70e17d5a4e4 --- /dev/null +++ b/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/full.md @@ -0,0 +1,313 @@ +# 3D-Aware Face Editing via Warping-Guided Latent Direction Learning + +Yuhao Cheng $^{1}$ Zhuo Chen $^{1}$ Xingyu Ren $^{1}$ Wenhan Zhu $^{1}$ Zhengqin Xu $^{1}$ Di Xu $^{2}$ Changpeng Yang $^{2}$ Yichao Yan $^{1*}$ + +$^{1}$ MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University $^{2}$ Huawei Cloud Computing Technologies Co., Ltd + +{chengyuhao,ningci5252,rxy_sjtu,zhuwenhan823,fate311,yanyichao}@sjtu.edu.cn, {xudi21,yangchangpeng}@huawei.com + +![](images/49cb9423275b6bee71d88d91ade48eae4998b40fa3498a43e0866c33cc5ac462.jpg) +Figure 1. An example of our warping-guided 3D-aware face editing method. Our method supports users to edit 3D faces in an intuitive way that drags points from multiple perspectives. Moreover, our method can achieve disentangled editing for shape, expression, and view, while maintaining 3D consistency. Please zoom-in for detailed observation. + +# Abstract + +3D facial editing, a longstanding task in computer vision with broad applications, is expected to fast and intuitively manipulate any face from arbitrary viewpoints following the user's will. Existing works have limitations in terms of intuitiveness, generalization, and efficiency. To overcome these challenges, we propose FaceEdit3D, which allows users to directly manipulate 3D points to edit a 3D face, achieving natural and rapid face editing. After one or several points are manipulated by users, we propose the tri-plane warping to directly deform the view-independent 3D representation. To address the problem of distortion caused by tri-plane warping, we train a warp-aware encoder to project the warped face onto a standardized latent space. In this space, we further propose directional latent editing to mitigate the identity bias caused by the encoder and realize the disentangled editing of various attributes. Extensive experiments show that our method achieves superior results with rich facial details and nice identity preservation. Our approach also supports general applications like + +multi-attribute continuous editing and cat/car editing. The project website is https://cyh-sj.github.io/FaceEdit3D/. + +# 1. Introduction + +High-quality face editing has long been an important research topic in computer vision with a wide range of applications, including social media and film production. Previous methods [16, 36, 43] based on 2D GANs [22, 23] have demonstrated the capability of editing facial images with high-fidelity. Recently, benefiting from the impressive achievements of 3D-aware generative models, especially in generative digital human [2-4, 11, 15, 32, 33, 41, 45, 51, 53, 55, 56, 64], the field of 3D facial editing has further attracted significant interest due to its promising capacity of manipulating a 3D representation. + +Typically, 3D face editing methods can be generally classified into three categories: prior-guided conditioning, parameter-space fine-tuning, and latent-space optimization, as summarized in Tab. 1. Specifically, prior-guided conditioning methods [18, 46-48] employ an additional well + +
SchemeMethodsIntuitivenessGeneralizationEfficiency
Conditional control[18, 46, 48]
Fine-tuned models[6, 13, 59]
Supervised directions[1, 36, 43]
Unsupervised directions[16, 42, 67]
[34] (2D)
Ours
+ +Table 1. Summary of 3D-aware face editing methods. $\triangle$ indicates its instructions are somewhat ambiguous semantically. + +designed conditioning module to introduce the control information, e.g., semantic maps [18, 46] and 3DMM [48, 49], into the 3D-aware models. Although flexible, these models typically require a large number of face images with their control labels for training. Parameter-space finetuning methods [6, 13, 59] optimize the pre-trained generators given the target input, achieving zero-shot editing with the help of the large language-image model, e.g., CLIP [38] or Stable Diffusion [39]. However, it is required to maintain a particular generator for each specific editing target, severely constraining their generalization. + +Due to the rich distributions learned in the pre-trained generator, discovering the meaningful directions in the latent space allows for a wide range of editing without the need to modify the generator and dependence on a large amount of training data. According to the exploration of editing direction, latent-space optimization can be achieved in supervised and unsupervised ways. Supervised methods [1, 36, 43, 44] search the meaningful directions in the latent space by learning labeled data for each specific editing. However, these methods cannot be generalized beyond the training domain. In contrast, unsupervised methods [16, 42, 50, 65-67] discover out-of-domain directions by analyzing the distribution of the latent space. However, the editing directions in the latent space are typically not semantically intuitive for the users. Accordingly, introducing interactive guidance to bridge the gap between the latent space and the user's intuition becomes the main purpose of the unsupervised methods. + +To achieve this, several works [12, 34] utilize manipulating points on 2D images to optimize latent code in an unsupervised way, achieving image editing intuitively. The most prominent method DragGAN [34] proposes motion supervision and point tracking to optimize the latent code in a self-supervised manner, showcasing its flexible and intuitive editing capabilities. Considering their success on 2D images, it would be highly desirable if we could also manipulate 3D points to edit a 3D facial representation. However, it is non-trivial to directly extend point dragging to 3D-aware facial editing, due to the following challenges. 1) These methods ignore the global 3D facial structure and only focus on the movements of specific points, potentially + +leading to exaggerated distortions. 2) These methods employ an inefficient approach to optimize the latent codes for image editing. Therefore, extending this procedure to 3D-aware generators fails to meet the demands of 3D interactive applications. 3) The controllability of point dragging is less precise and may cause ambiguous targets, e.g., enlarging the shape of the mouth may lead the mouth to open. + +To overcome these challenges, we propose FaceEdit3D to learn editing directions guided by 3D-consistent facewarping, realizing intuitive and rapid 3D-aware facial editing. (1) First, we propose tri-plane warping on the 3D representation to achieve accurate 3D-consistent facial editing, which allows us to sidestep inaccurate motion supervision. Further, we introduce 3D landmarks rather than arbitrary points as face prior to constrain the change in the normal face distribution. Although tri-plane warping allows for precise editing, it introduces slight facial distortions. (2) Hence, we train a warp-aware encoder instead of latent optimization to straightforwardly project the warped renderings into the standardized space, enabling fast and photorealistic editing. Due to the complex semantic information in the latent space of 3D-aware generators, the obtained encoder suffers from inherent bias, resulting in a loss of details and identity shifting. (3) Therefore, we propose to learn the hierarchical directional editing in latent space, enabling disentangled face editing with identity and details preservation. + +With all the designs above, we successfully introduce dragging-based edits into 3D face representations. Our work achieves an efficient and straightforward editing process which also enables the decoupling of facial expressions and shapes. Compared to other face editing approaches, our method offers a more intuitive bridge but avoids dependence on the 3D annotations. Extensive experiments have demonstrated the superiority of our method in intuitiveness, generalization, and efficiency for the task of facial editing. + +The main contributions are summarized as follows: + +- We design an efficient and straightforward 3D-aware face editing pipeline that is in line with the user's intuition. +- We propose to warp the face in the tri-plane feature level, enabling 3D-consistent face manipulation. +- We propose a warp-aware encoder to better identify the subtle changes and efficiently solve the problem of distorted face caused by the tri-plane warp. +- We propose directional editing in latent space, achieving disentangled facial editing with the preservation of identity and details. + +# 2. Related Works + +# 2.1. 3D-aware GANs + +Inspired by the superiority of implicit representation [31], several attempts [2-4, 11, 15, 32, 33, 41, 45, 53, 55, 64] deploy radiance fields into generative models and thus en + +able 3D consistent image synthesis. The capability of learning 3D representations from unposed single-view 2D images only empowers these 3D-aware GAN models to gain wide interests and applications. However, partial 3D-aware GANs [3, 15, 32, 33, 41, 64] adopt full implicit representation that lacks pre-computed 3D features before the point sampling. As a consequence, they need to regenerate the 3D feature when given novel viewpoints, limiting the efficiency of them in interactive applications. To address this challenge, several works [2, 4, 45, 53] adopt hybrid representations that first generate view-independent features, and enable sampling points on these pre-computed features for novel view synthesis. Consequently, these methods can realize rapid generation and maintain the inherent 3D-consistent representation. Specifically, EG3D [4] introduces the light tri-plane representation into the generator to raise efficiency and further enhance the image quality. Considering its efficient representation and mature downstream techniques, we adopt the EG3D [4] as the base 3D-aware model to demonstrate the effectiveness of our methods. + +# 2.2. Implicit Representations Deformation + +The deformation of 3D implicit representation has long attracted wide focus, as it serves as the foundation of broad animation applications. Prior researches predominantly introduce an additional deformation field based on the original representation to modify the 3D points. Specifically, deformation fields can be implemented through proxy-based editing [14, 21, 35, 57], cage-based editing [17, 37, 54], and parametric prior-based editing [40, 52, 63], etc. Proxy-based editing learns a lightweight neural network to compute the translation and rotation of 3D points, enabling the deformation of original 3D coordinates. The cage-based methods establish a surrounding cage to fully cover up the original surface of an implicit representation and then modify the cage to deform the inherent surface. Parametric prior-based methods leverage the parametric models such as SMPL [29] and FLAME [27] as a prior condition of the deformation network to drive the implicit representations. However, all of these approaches need to optimize a controllable module for each specific object, lack of efficiency and generality. In contrast, our work provides a landmark-based way to directly edit the 3D representation without optimization and further compresses the 3D deformation into 2D feature planes to improve efficiency. + +# 2.3. Face Editing in GANs + +As the latent space learned by the conditioned GANs contains most of the distribution knowledge, many works [1, 42, 43, 50, 69] explore the latent space of a pre-trained generator for the following facial attribute editing. Specifically, InterFaceGAN [43] studies the semantics encoded in the latent space and disentangles the facial semantics with linear + +projection. To explicitly edit the facial attributes, further works explore utilizing the intuitive representation, e.g., semantic maps [5, 46, 47, 68] and text prompts [19, 36] for the optimization or the extension of latent space. Moreover, an idea that directly drags the face for the editing catches the wide attention. DragGAN [34] optimizes the latent space via dragging selected points on the image to the target positions. However, it is hard to preserve the facial identity when setting a far distance between the two points, preventing the DragGAN from large-scale editing. Despite the prominent performance of latent space manipulation, it still faces a challenge in balancing the identity preservation and editing amplitude. To further enhance the editing capability, several works [6, 13, 24] focus on the parameter space of a pre-trained generator. While these methods can achieve out-of-domain editing, they need to maintain a specific generator for each attribute manipulation, lacking efficiency. Compared to the methods mentioned above, our method is an intuitive way of dragging points to deform the 3D representations while improving the efficiency and preserving the identity. + +# 3. Methods + +Our proposed framework, FaceEdit3D, aims at multi-view consistent facial editing in shape, expression, and pose via warping-guided directional editing, as illustrated in Fig. 2. To this end, we first review the 3D-aware GAN that achieves high-resolution face rendering from multiple views (Sec. 3.1). Based on the 3D-aware generator, we propose a point-guided feature-space warping method that manipulates the inherent tri-plane representations while ensuring the 3D consistency (Sec. 3.2). However, directly editing the tri-plane may lead to distortions in the final rendered images. Therefore, we train a specifically designed encoder to project the warped renderings to the standardized latent space for photo-realistic editing results (Sec. 3.3). Finally, we delve into the mechanism of latent space and propose directional editing in latent space that enables the disentangled editing of facial shape, expression, and pose (Sec. 3.4). + +# 3.1. Preliminaries on 3D-aware Face Generator + +Our framework is built upon EG3D [4], one of the most powerful 3D-aware generative models that achieve photorealistic 3D face generation. The generator of EG3D introduces a tri-plane representation, which compactly encodes the geometry and appearance of a 3D face. Specifically, the tri-plane features can be denoted as $\mathbf{F} = \mathcal{G}(\mathbf{w})\in$ $\mathbb{R}^{3\times 32\times 256\times 256}$ , where $\mathbf{W}$ is a latent code. To render face images from a specific viewpoint, the features of 3D coordinates are sampled from the tri-plane features and a shallow decoder is leveraged to project the tri-plane feature $\mathbf{F}(x,y,z)\in \mathbb{R}^{32\times 3}$ into volume density $\sigma \in \mathbb{R}^1$ and color feature $c\in \mathbb{R}^{32}$ . Subsequently, a low-resolution fea + +![](images/24c131db8dd70402acf4b937f3437dcb105b9f281ff2f7069ac4460692c5f0cf.jpg) +(a) 3D-consistent Tri-plane Warp + +![](images/7bef67a8c11a6425320cd8bc3b98ec878d42aea9bd1b469b6c0d88678b328f69.jpg) +(b) The Pipeline of Our FaceEdit3D +Figure 2. Overview of our proposed FaceEdit3D. (a) A detailed illustration of our tri-plane warp. We project 2D key points onto the 3D face surface and then map them to each corresponding plane within a tri-plane representation. Afterward, we apply warping operations to each plane to achieve 3D-consistent editing. (b) The full pipeline of our FaceEdit3D. Given a source image $\mathbf{I}_s$ with its latent code $\mathbf{w}_s$ , we first perform the tri-plane warping on it and obtain the warped rendering $\hat{\mathbf{I}}_t$ . Subsequently, we utilize a warp-aware encoder to extract the latent codes $\mathbf{w}_s'$ and $\mathbf{w}_t'$ from the source image $\mathbf{I}_s$ and the warped renderings $\hat{\mathbf{I}}_t$ , respectively. Then, we employ the hierarchical latent direction to update the target latent code $\mathbf{w}_t$ . Finally, the edited facial image $\mathbf{I}_t$ can be synthesized via the updated latent code $\mathbf{w}_t$ . + +ture map is generated via volume rendering and then upsampled to high-resolution images. The representation ability of tri-plane features has been verified by several recent works [7, 20, 24]. Therefore, to achieve 3D-consistent editing, we choose to operate directly on the tri-plane features. + +# 3.2. Multi-view Consistent Face Warping + +For 3D face editing, it is a flexible way for users to directly drag points on the rendered images. Different from 2D-level editing that limits to one specific viewpoint, 3D-level manipulation should support editing from an arbitrary viewpoint and achieve 3D-consistent editing effects. To achieve this, we propose a framework based on point-guided triplane warping, where users manipulate one or several points from a desirable viewpoint, and the tri-plane features are warped according to the point displacements. + +Point Manipulation by Users. Ideally, users can directly modify arbitrary points in a rendered face to achieve editing. Nevertheless, the potential conflicts among excessive control points may lead to undesirable distortions of the facial structure during the joint point manipulation, consequently yielding results that deviate from realistic human appearances. To address this issue, we constrain the users to manipulate a set of meaningful 3D facial landmarks to guarantee a natural face structure. + +Specifically, given a latent code $\mathbf{w}_s$ and a pre-trained EG3D generator $\mathcal{G}$ , the portrait is first rendered in the front view with camera intrinsic $\mathbf{K}$ . Then, 2D facial landmarks are detected by a pre-trained detector and projected on the facial surface to obtain 3D landmarks $\mathbf{P} = \{\mathbf{p}_0,\mathbf{p}_1,\dots ,\mathbf{p}_n\} \in \mathbb{R}^{n\times 3}$ , and $\mathbf{p}_i = \{\mathbf{p}_i^x,\mathbf{p}_i^y,\mathbf{p}_i^z\} \in \mathbb{R}^3$ . Consequently, users can render images from an arbitrary viewpoint with extrinsic $\mathbf{R} \in \mathbb{S}\mathbb{O}(3)$ and select any spe + +cific points for editing. Take the selected point $\mathbf{p}_i$ as an example, we set the movement of the point $\Delta \mathbf{p}_i$ is perpendicular to the rendering direction. The updated 3D point $\mathbf{p}_i^{\prime}$ is represented as: + +$$ +\mathbf {p} _ {i} ^ {\prime} = \mathbf {p} _ {i} + \mathbf {R} ^ {- 1} \mathbf {K} ^ {- 1} \mathbf {Z} \Delta \mathbf {p} _ {i}, \tag {1} +$$ + +where $\mathbf{Z}$ is the depth of the selected point in the pose $\mathbf{R}$ . After manipulating specific points within the facial structure, we obtain a set of new 3D landmarks $\mathbf{P}' = \{\mathbf{p}_0', \mathbf{p}_1', \dots, \mathbf{p}_n'\}$ . + +Tri-plane Warping. After the users have manipulated the key points, we apply 3D warping on the tri-planes to edit the 3D representation. Individually considering each of the tri-plane features [7], we can extend the editing in 3D space onto three 2D planes to enhance efficiency. Therefore, we begin by projecting the 3D landmarks onto the three feature planes, and then individually apply a similar warping transformation on each of these feature planes, as illustrated in Fig. 2 (a). Take the $xy$ -plane $\mathbf{F}_{xy}$ as an example, given $n$ source projected points $\mathbf{P}^{xy} = \{\mathbf{p}_0^{xy}, \mathbf{p}_1^{xy}, \dots, \mathbf{p}_n^{xy}\} \in \mathbb{R}^{n \times 2}$ , $\mathbf{p}_i^{xy} = \{\mathbf{p}_i^x, \mathbf{p}_i^y\}$ and their target points $\hat{\mathbf{P}}^{xy} = \{\hat{\mathbf{p}}_0^{xy}, \hat{\mathbf{p}}_1^{xy}, \dots, \hat{\mathbf{p}}_n^{xy}\}$ , we employ thin-plate spline interpolation [9] to compute the grid sampler with: + +$$ +g (\mathbf {q}) = \sum_ {i = 1} ^ {n} w _ {i} \phi \left(\left\| \mathbf {q} - \hat {\mathbf {p}} _ {i} \right\|\right) + \mathbf {v} ^ {T} \mathbf {q} + \mathbf {b}, \tag {2} +$$ + +where $\phi (r) = r^2\log (r)$ is the kernel function and $g(\mathbf{q})$ provides the inverse mapping of the location $\mathbf{p}$ to the original plane coordinates $\mathbf{q}$ . The parameters $\mathbf{v},\mathbf{b}$ are the parameters to minimize a certain definition of curvature. Similarly, by applying such inverse mapping to all three planes, we complete the tri-plane warping and achieve the inherently + +3D-consistent modification. Compared to the manipulation of the sampled 3D coordinate space [60, 62], our method directly manipulates the 3D representation, empowering to simultaneously edit from multiple viewpoints without additional steps. + +# 3.3. Warp-Aware Encoding + +After tri-plane warping, the editing results exhibit 3D consistent modification. However, directly applying warping operation on tri-plane features may not conform to the facial distribution in the latent space, leading to a severely distorted appearance. To solve this problem, our solution is to encode the distorted facial image $\hat{\mathbf{I}}_t$ into a standardized latent space that learns the natural counterpart $\mathbf{w}_t^\prime$ of the distorted face with an encoder $\mathcal{E}$ : + +$$ +\mathbf {w} _ {t} ^ {\prime} = \mathcal {E} (\hat {\mathbf {I}} _ {t}). \tag {3} +$$ + +To train the encoder, we sample images from the pretrained generator to generate image and latent code pairs. Specifically, the portrait $\mathbf{I}_s$ is generated from the randomly sampled latent code and the camera poses $\mathbf{c}$ . Subsequently, the portrait $\mathbf{I}_s$ is projected to latent code $\mathbf{w}_s^\prime$ by the encoder $\mathcal{E}$ , and then the corresponding image $\mathbf{I}_s^\prime$ is generated by the same frozen generator $\mathcal{G}$ and pose $\mathbf{c}$ . The optimization objective of the encoder is the combination of L1 Loss, LPIPS loss [61], and identity loss [10]: + +$$ +\mathcal {L} _ {o} = \mathcal {L} _ {1} \left(\mathbf {I} _ {s}, \mathbf {I} _ {s} ^ {\prime}\right) + \mathcal {L} _ {\mathbf {L P I P S}} \left(\mathbf {I} _ {s}, \mathbf {I} _ {s} ^ {\prime}\right) + \mathcal {L} _ {\mathbf {I D}} \left(\mathbf {I} _ {s}, \mathbf {I} _ {s} ^ {\prime}\right). \tag {4} +$$ + +Unfortunately, we find that the encoder trained with the aforementioned method poses difficulties in identifying subtle modifications due to the inherent complexity of 3D-aware generators. Hence, we further introduce the triplane warping as the data augmentation to enhance the overall perception of subtle edits. Similar to the above training pipeline, we apply the encoder onto the warped rendering $\hat{\mathbf{I}}_t$ to obtain the latent code $\mathbf{w}_t'$ , thus generating its inverted image $\mathbf{I}_t'$ . The loss is calculated between $\mathbf{I}_t'$ and $\hat{\mathbf{I}}_t$ : + +$$ +\mathcal {L} _ {w} = \mathcal {L} _ {1} \left(\hat {\mathbf {I}} _ {t}, \mathbf {I} _ {t} ^ {\prime}\right) + \mathcal {L} _ {\mathbf {L P I P S}} \left(\hat {\mathbf {I}} _ {t}, \mathbf {I} _ {t} ^ {\prime}\right) + \mathcal {L} _ {\mathbf {I D}} \left(\hat {\mathbf {I}} _ {t}, \mathbf {I} _ {t} ^ {\prime}\right). \tag {5} +$$ + +Besides, following GOAE [58], we utilize a discriminator $\mathcal{D}$ to ensure the latent codes $\mathbf{w}_t^\prime$ and $\mathbf{w}_s^\prime$ in the standardized latent space: + +$$ +\begin{array}{l} \mathcal {L} _ {d} = \mathbb {E} [ f (\mathcal {D} \left(\mathbf {w} _ {t} ^ {\prime}\right)) + f (\mathcal {D} \left(\mathbf {w} _ {s} ^ {\prime}\right)) ] (6) \\ + \mathbb {E} [ f (- \mathcal {D} (\mathbf {w} _ {c})) ] + \gamma | | \nabla \mathcal {D} (\mathbf {w} _ {c}) | | ^ {2}, (6) \\ \end{array} +$$ + +where $f(x) = -\log (1 + \exp (-x))$ , and $\gamma$ is a hyperparameter in R1 regularization. $\mathbf{w}_{\mathbf{c}}$ are pre-sampled standardized latent codes by the frozen generator. The final objective linearly combines the aforementioned losses: + +$$ +\mathcal {L} = \mathcal {L} _ {o} + \mathcal {L} _ {w} + \mathcal {L} _ {d}. \tag {7} +$$ + +After the training process, the edited rendering is projected into latent space and then passed to the generator to yield a more reasonable editing result in the target view $\mathbf{c}_t$ : + +$$ +\mathbf {I} _ {t} = \mathcal {G} \left(\mathbf {w} _ {t} ^ {\prime}, \mathbf {c} _ {t}\right). \tag {8} +$$ + +# 3.4. Directional Editing in Latent Space + +Warp-aware encoder solves the problem of severely distorted appearance caused by the tri-plane warp, however, it additionally introduces identity bias into the latent codes as the encoder cannot faithfully inverse faces. Besides, it is still hard to handle the ambiguity during the point-manipulation. Therefore, we here propose directional editing learning to further overcome these two challenges. + +To begin with, we adopt the difference between the latent codes that are extracted from the images before and after warping by the encoder as the direction guidance. In this way, we mitigate the identity bias and bypass the problem caused by the encoder. Furthermore, we follow Style-CLIP [36] to explore the semantics of layers in the $W+$ latent space of EG3D [4], empowering our method with the disentangled editing of the expression and shape. According to the hierarchical mechanism, we can obtain free editing results by applying editing directions in the variant layers to the same warping facial image, successfully avoiding the ambiguity caused by the tri-plane warp. + +The full pipeline is shown in Fig. 2 (b). Given a latent code $\mathbf{w}_s$ and the frozen EG3D generator $\mathcal{G}$ , the facial triplane can be generated. Specifically, the warp-aware encoder projects these two images to standardized latent codes $\mathbf{w}_s'$ and $\mathbf{w}_t'$ with Eq. (3), respectively. The target edited latent code $\mathbf{w}_t$ can be calculated with: + +$$ +\mathbf {w} _ {t} = \mathbf {w} _ {s} + H \left(\mathbf {w} _ {t} ^ {\prime} - \mathbf {w} _ {s} ^ {\prime}\right), \tag {9} +$$ + +where $H(\cdot)$ is a feature selection module for disentangling latent direction. Finally, the modified portrait $\mathbf{I}_t$ can be rendered from any perspective $\mathbf{c}_t$ with $\mathbf{I}_t = \mathcal{G}(\mathbf{w}_t,\mathbf{c}_t)$ . + +# 4. Experiments + +In this section, we evaluate the efficiency and the quality of our 3D-aware face editing model. We first introduce the implementation details of our work (Sec. 4.1). Subsequently, we compare our method with the SOTA 3D face editing methods qualitatively (Sec. 4.2) and quantitatively (Sec. 4.3). Then, we conduct ablation studies to analyze the effect of each component (Sec. 4.4). Finally, we introduce the potential applications of our method (Sec. 4.5). + +# 4.1. Implementation Details + +We build our approach on the EG3D [4] pre-trained on the FFHQ dataset [22]. We employ the Mediapipe [30] to detect 2D landmarks and select 29 points for user manipulation. To obtain 3D landmarks, we first detect 2D landmarks + +![](images/749d84eef9c5423fe87b46130098caec69d0a3277e18034f5dc3db3e874a48d7.jpg) +Figure 3. Qualitative comparisons with current SOTA methods for 3D face shape and expression editing. (a), (b), and (c) are the results of synthetic samples, and (d) showcases the results of a real-world portrait. + +in the frontal view, and then compute the 3D coordinates by the locations of maximum density value on their corresponding emitted rays. We adopt Swin-transformer [28] as the encoder structure to enhance the detail perception. In the encoder training, the standardized latent codes are sampled to generate the face images under random views, consisting of totally 100000 identities. We adopt the Adam optimizer [25] and set the learning rates as $1e - 4$ for both the encoder and the discriminator. All the implementations are based on the PyTorch and set up on Nvidia A6000 GPUs. + +# 4.2. Qualitative Evaluation + +We conduct a qualitative comparison between our work and several SOTA 3D face editing methods with intuitive manipulation, i.e., StyleGAN-NADA [13] guided by the text prompts and IDE-3D [46] controlled by the semantic maps. Besides, we also introduce the point-based warping approach into the qualitative comparison. We adopt similar editing objectives and use their official codes to ensure fairness. Fig. 3 shows the multi-view results of the shape and expression editing, demonstrating the superiority of our method on fine-grained modification. The warp can accomplish obvious editing, but it suffers from facial distortion. IDE-3D [46] achieve satisfied results in most cases. However, the coupling of different facial attributes in the semantic maps leads to changes beyond the target attributes. For + +instance, the baby in Fig. 3 (c) shows the shift of age and identity when trying to elongate his chin. Besides, IDE-3D only supports single-view editing, limiting its availability. StyleGAN-NADA [13] fails to edit the facial shape based on the EG3D despite its great success in style transfer and texture editing. In contrast, our method supports the user to simultaneously manipulate the face from multiple views and enables intuitive editing for facial shapes, expressions, and poses without the sacrifice of identity and detail. In addition to the editing quality, our method has another advantage that it does not require additional training for generative models, demonstrating its generalization. + +Furthermore, we also compare our method with a recent 2D method, DragGAN [34], which employs a similar point-guided operation to ours. Since DragGAN is limited to 2D editing, we compare the results in two aspects, i.e., fixed view editing and novel view synthesis, as shown in Fig. 4. In the aspect of fixed-view editing, the results of DragGAN [34] in Fig. 4 (a) show a tendency to open the mouth and change the identity when shortening the nose, although a mask limiting the editable region is applied. In the aspect of novel view synthesis, DragGAN severely changes the identity due to ambiguous point dragging in Fig. 4 (b). Compared to DragGAN, our method succeeds in achieving the expected editing target while maintaining the identity and irrelevant parts unchanged. + +
MethodsSchemeInference Time (s)↓MSEi↑MSEo↓MSEi / MSEo↑ID Consistency↑
DragGAN [34]2D5.2311.9920.2248.8930.579
Ours2D0.3562.0490.18611.0160.716
Our warp3D0.2692.4550.3287.4850.707
IDE-3D [46]3D0.3831.8410.9871.8650.649
Ours3D0.6241.6790.3424.9090.712
+ +Table 2. Quantitative comparison with several face editing methods on efficiency and effectiveness. The best results are labeled in bold except for our direct warp due to its distortion results. The unit of $\mathrm{MSE}_i$ and $\mathrm{MSE}_o$ are $10^{-2}$ . + +![](images/7847255046e76098286f48adda5d859c9b9bda9d1e4c34c1ffeea00edfadc673.jpg) +Figure 4. Qualitative comparisons with DragGAN [34] on portrait editing. Red and blue points represent the source and target points in the manipulations, respectively. The semi-transparent region indicates the mask used for DragGAN, while not in our method. + +# 4.3. Quantitative Evaluation + +We also conduct quantitative experiments to verify the efficiency and effectiveness of our method, as shown in Tab. 2. We adopt editing time as the metric to evaluate the efficiency because it severely influences the user experiences. As shown, DragGAN [34] spends a large amount of time on latent optimization, resulting in lower efficiency. IDE-3D [46] and our method exhibit similar efficiency in supporting real-time editing. Despite the fastest method, the method of direct warp causes facial distortion, and thus we exclude it from the comparison. + +Furthermore, to assess the capability of disentangled editing, we measure the pixel-wise mean square error (MSE) inside and outside the target editing regions as the metric. The main objective is to successfully edit the target regions while preventing the outside regions from modification. As shown, our approach achieves better editing disentanglement than IDE3D [46] with minimized ratio of $\mathrm{MSE}_i$ and $\mathrm{MSE}_o$ . It is worth noting that the editability of 3D GANs is inferior to that of 2D GANs, and thus our method falls behind the DragGAN [34]. Considering the efficiency and the ability to multi-view editing of our method, the gap between ours and the DragGAN is acceptable. To fairly compare these two methods without the interference of base generators, we further extend our method to the same 2D generator and it performs better than DragGAN [34] in this setting. Additionally, we also compare the identity similarity. The results indicate that our method can better maintain + +![](images/edf9ce84d6b0f76032e4437ef5ebb39d187e4f5974b19fa115c1e0b2fe6aa899.jpg) +Figure 5. The ablation study of our loss functions for training the encoder. The first row aims to widen the double eyelids while keeping the eyes open, and the second is to lengthen the bangs. The numbers in the corners represent the identity similarity measured by ArcFace [10]. Please zoom-in for detailed observation. + +the identity character than other methods. + +# 4.4. Ablation Study + +Effectiveness of Loss Functions. We investigate the effectiveness of each loss function in the encoder training process, as depicted in Fig. 5. The $\mathcal{L}_w$ introduced by the warp-assisted data augmentation facilitates the accurate identification for user's manipulations, and the $\mathcal{L}_d$ helps to maintain identity information. The combination of them achieves the best editing results. + +Effectiveness of Directional Latent Editing. We conduct an ablation study to verify the effectiveness of our directional latent editing. We begin with applying tri-plane warping on source identities to obtain the warped results. Subsequently, we extract the directions of different layer groups, i.e., shape direction, expression direction, and the combined directions, respectively. Fig. 6 shows that the individual directional latent code has the capacity to disentangle the attributes, while the combination of them can realize integrated editing. However, directly mapping warped rendering to latent space without our directional latent module results in identity shifting and detail deficiency. These results can verify the effectiveness of our directional latent editing. + +# 4.5. Applications + +Generalization of Learned Latent Directions. The editing direction learned for one face can be generalized to other instances, and we can further control the degree along the + +![](images/ffabbeefafc3a99ecd629df6aaf614944982e2c1c7a8938096426000624f5b00.jpg) +Figure 6. The ablation study of our directional editing. "w/o Dir." represents results generated by directly projecting the warped results to latent space. + +![](images/dfae759163fd89ba13343cfbc47329e3062683caf0987f7a0513d46ad2ed6b44.jpg) +Figure 7. The interpolated editing results along the directions learned in the case of Fig. 3 (a) and (d), i.e., "wider face" and "close mouth" respectively. It shows that the learned editing direction in one face can be generalized to other instances. + +direction to linearly interpolate the editing results. Fig. 7 shows the interpolation results guided by the directions learned in the cases of Fig. 3, i.e., wider face and closed mouth. With the degree rising from -2.0 to 2.0, both of the two identities show a gradual trend to change along their directions, although the directions are initially learned for other cases, demonstrating the generalization of these learned latent directions. + +Continuous Editing. Continuous editing is important to real-world applications. Therefore, we conduct an experiment to show our capability of overlying modification. Fig. 8 shows the results with multiple editing targets, i.e., smaller eyes, closed mouth, smaller nose, and wider face. The natural and ID-consistent results demonstrate the effectiveness of our method of continuous editing. + +Generalization to Other Generators. To show the generalized application of our method, we extend it to 3D cat editing and 2D car editing. We introduce our method to the pre-trained EG3D [4] on AFHQ Cats [8] dataset and StyleGAN [23] trained on Stanford Cars [26] dataset, respectively. As shown in Fig. 9, our approach can also successfully manipulate the 3D cats and 2D cars according to the user's point-based instructions. + +![](images/28a66dc27078a94be76e31ffcf7181a2d5708a581806ec669fa9b8ac1fe162b4.jpg) + +![](images/613467223a83b59e64a8193c3fcee84a28f89cb5650c83fd9db0eb96d011c286.jpg) +Figure 8. We showcase the mixing results with multiple attributes, demonstrating the continuous editing ability of our method. +Figure 9. The extension of our method to cat and car editing. + +# 5. Conclusion + +In this paper, we propose FaceEdit3D, an intuitive method to edit the 3D facial shape and expression from any perspective. Our approach involves a tri-plane warping to ensure the inherent 3D-consistent editing. To mitigate facial distortions led by the warping, we train a warp-aware encoder to project the warped face into standardized distribution and further explore the hierarchical mechanism in latent space to achieve disentangled editing. Extensive experiments demonstrate the effectiveness and efficiency of our method. The additional applications also show the generalization and potential of our method across different applications. To sum up, our method provides a brand new way to manipulate the 3D representation, opening up new avenues for rapid and convenient real-image editing. + +Limitations. Since our method is based on warping the 3D representation, it is hard for our work to achieve texture editing and some semantic editing, such as wearing glasses. Broader Impacts. Despite not our intention, our 3D-aware facial editing capability could potentially be abused. We are committed to privacy protection, preventing the misuse of facial editing for criminal purposes. + +# Acknowledgements + +This work was supported in part by NSFC (62201342, 62101325), and Shanghai Municipal Science and Technology Major Project (2021SHZDZX0102). + +# References + +[1] Rameen Abdal, Peihao Zhu, Niloy J Mitra, and Peter Wonka. Styleflow: Attribute-conditioned exploration of stylegan-generated images using conditional continuous normalizing flows. TOG, pages 1-21, 2021. 2, 3 +[2] Sizhe An, Hongyi Xu, Yichun Shi, Guoxian Song, Umit Y Ogras, and Linjie Luo. Panohed: Geometry-aware 3d fullhead synthesis in 360deg. In CVPR, pages 20950-20959, 2023. 1, 2, 3 +[3] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In CVPR, pages 5799-5809, 2021. 3 +[4] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In CVPR, pages 16123-16133, 2022. 1, 2, 3, 5, 8 +[5] Anpei Chen, Ruiyang Liu, Ling Xie, Zhang Chen, Hao Su, and Jingyi Yu. Sofgan: A portrait image generator with dynamic styling. TOG, pages 1-26, 2022. 3 +[6] Zhuo Chen, Xudong Xu, Yichao Yan, Ye Pan, Wenhan Zhu, Wayne Wu, Bo Dai, and Xiaokang Yang. Hyperstyle3d: Text-guided 3d portrait stylization via hypernetworks. arXiv preprint arXiv:2304.09463, 2023. 2, 3 +[7] Yuhao Cheng, Yichao Yan, Wenhan Zhu, Ye Pan, Bowen Pan, and Xiaokang Yang. Head3d: Complete 3d head generation via tri-plane feature distillation. arXiv preprint arXiv:2303.15892, 2023. 4 +[8] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In CVPR, pages 8188-8197, 2020. 8 +[9] Forrester Cole, David Belanger, Dilip Krishnan, Aaron Sarna, Inbar Mosseri, and William T Freeman. Synthesizing normalized faces from facial identity features. In CVPR, pages 3703-3712, 2017. 4 +[10] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, pages 4690-4699, 2019. 5, 7 +[11] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. Gram: Generative radiance manifolds for 3d-aware image generation. In CVPR, pages 10673-10683, 2022. 1, 2 +[12] Yuki Endo. User-controllable latent transformer for stylegan image layout editing. In Computer Graphics Forum, pages 395-406, 2022. 2 +[13] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. TOG, pages 1-13, 2022. 2, 3, 6 +[14] Stephan J Garbin, Marek Kowalski, Virginia Estellers, Stanislaw Szymanowicz, Shideh RezaEIFar, Jingjing Shen, Matthew Johnson, and Julien Valentin. Voltemorph: Realtime, controllable and generalisable animation of volumetric representations. arXiv preprint arXiv:2208.00949, 2022. 3 +[15] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In ICLR, 2021. 1, 2, 3 + +[16] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. Ganspace: Discovering interpretable gan controls. NeurIPS, pages 9841–9850, 2020. 1, 2 +[17] Clément Jambon, Bernhard Kerbl, Georgios Kopanas, Stavros Diolatzis, George Drettakis, and Thomas Leimkuhler. Nerfshop: Interactive editing of neural radiance fields. CGIT, 6(1), 2023. 3 +[18] Kaiwen Jiang, Shu-Yu Chen, Feng-Lin Liu, Hongbo Fu, and Lin Gao. Nerfaceediting: Disentangled face editing in neural radiance fields. In SIGGRAPH Asia, pages 1-9, 2022. 1, 2 +[19] Yuming Jiang, Ziqi Huang, Xingang Pan, Chen Change Loy, and Ziwei Liu. Talk-to-edit: Fine-grained facial editing via dialog. In ICCV, pages 13799-13808, 2021. 3 +[20] Wonjoon Jin, Nuri Ryu, Geonung Kim, Seung-Hwan Baek, and Sunghyun Cho. Dr. 3d: Adapting 3d gans to artistic drawings. In SIGGRAPH Asia, pages 1-8, 2022. 4 +[21] Kacper Kania, Stephan J Garbin, Andrea Tagliasacchi, Virginia Estellers, Kwang Moo Yi, Julien Valentin, Tomasz Trzciński, and Marek Kowalski. Blendfields: Few-shot example-driven facial modeling. In CVPR, pages 404-415, 2023. 3 +[22] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019. 1, 5 +[23] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, pages 8110-8119, 2020. 1, 8 +[24] Gwanghyun Kim and Se Young Chun. Datid-3d: Diversitypreserved domain adaptation using text-to-image diffusion for 3d generative model. In CVPR, pages 14203–14213, 2023. 3, 4 +[25] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015. 6 +[26] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV, pages 554–561, 2013. 8 +[27] Tianye Li, Timo Bolkart, Michael J Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4d scans. TOG, pages 194-1, 2017. 3 +[28] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, pages 10012-10022, 2021. 6 +[29] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. In Seminal Graphics Papers: Pushing the Boundaries, Volume 2, pages 851-866. 2023. 3 +[30] Camillo Lugaresi, Jiuqiang Tang, Hadon Nash, Chris McClanahan, Esha Uboweja, Michael Hays, Fan Zhang, Chuoling Chang, Ming Guang Yong, Juhyun Lee, et al. Mediapipe: A framework for building perception pipelines. arXiv preprint arXiv:1906.08172, 2019. 5 +[31] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, pages 99-106, 2020. 2 + +[32] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In CVPR, pages 11453–11464, 2021. 1, 2, 3 +[33] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In CVPR, pages 13503-13513, 2022. 1, 2, 3 +[34] Xingang Pan, Ayush Tewari, Thomas Leimkuhler, Lingjie Liu, Abhinitra Meka, and Christian Theobalt. Drag your gan: Interactive point-based manipulation on the generative image manifold. In ASIGGRAPH, pages 1-11, 2023. 2, 3, 6, 7 +[35] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In ICCV, pages 5865-5874, 2021. 3 +[36] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In CVPR, pages 2085–2094, 2021. 1, 2, 3, 5 +[37] Yicong Peng, Yichao Yan, Shengqi Liu, Yuhao Cheng, Shanyan Guan, Bowen Pan, Guangtao Zhai, and Xiaokang Yang. Cagenerf: Cage-based neural radiance field for generalized 3d deformation and animation. NeurIPS, pages 31402-31415, 2022. 3 +[38] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763, 2021. 2 +[39] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, pages 10684-10695, 2022. 2 +[40] Shunsuke Saito, Jinlong Yang, Qianli Ma, and Michael J Black. Scintimate: Weakly supervised learning of skinned clothed avatar networks. In CVPR, pages 2886-2897, 2021. 3 +[41] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In NIPS, 2020. 1, 2, 3 +[42] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in gans. In CVPR, pages 1532-1540, 2021. 2, 3 +[43] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. Interfacegan: Interpreting the disentangled face representation learned by gans. TPAMI, pages 2004-2018, 2020. 1, 2, 3 +[44] Enis Simsar, Alessio Tonioni, Evin Pinar Ornek, and Federico Tombari. Latentswap3d: Semantic edits on 3d image gans. In ICCV, pages 2899-2909, 2023. 2 +[45] Ivan Skorokhodov, Sergey Tulyakov, Yiqun Wang, and Peter Wonka. Epigraf: Rethinking training of 3d gans. NeurIPS, pages 24487-24501, 2022. 1, 2, 3 +[46] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled edit + +ing for high-resolution 3d-aware portrait synthesis. ToG, pages 1-10, 2022. 1, 2, 3, 6, 7 +[47] Jingxiang Sun, Xuan Wang, Yong Zhang, Xiaoyu Li, Qi Zhang, Yebin Liu, and Jue Wang. Fenerf: Face editing in neural radiance fields. In CVPR, pages 7672-7682, 2022. 3 +[48] Jingxiang Sun, Xuan Wang, Lizhen Wang, Xiaoyu Li, Yong Zhang, Hongwen Zhang, and Yebin Liu. Next3d: Generative neural texture rasterization for 3d-aware head avatars. In CVPR, pages 20991-21002, 2023. 1, 2 +[49] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In CVPR, pages 6142-6151, 2020. 2 +[50] Andrey Voynov and Artem Babenko. Unsupervised discovery of interpretable directions in the gan latent space. In ICML, pages 9786-9796, 2020. 2, 3 +[51] Tengfei Wang, Bo Zhang, Ting Zhang, Shuyang Gu, Jianmin Bao, Tadas Baltrusaitis, Jingjing Shen, Dong Chen, Fang Wen, Qifeng Chen, et al. Rodin: A generative model for sculpting 3d digital avatars using diffusion. In CVPR, pages 4563-4573, 2023. 1 +[52] Sijing Wu, Yichao Yan, Yunhao Li, Yuhao Cheng, Wenhan Zhu, Ke Gao, Xiaobo Li, and Guangtao Zhai. Ganhead: Towards generative animatable neural head avatars. In CVPR, pages 437-447, 2023. 3 +[53] Jianfeng Xiang, Jiaolong Yang, Yu Deng, and Xin Tong. Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. In ICCV, pages 2195-2205, 2023. 1, 2, 3 +[54] Tianhan Xu and Tatsuya Harada. Deforming radiance fields with cages. In ECCV, pages 159-175, 2022. 3 +[55] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In CVPR, 2022. 1, 2 +[56] Yan Yichao, Cheng Yuhao, Chen Zhuo, Peng Yicong, Wu Sijing, Zhang Weitian, Li Junjie, Li Yixuan, Gao Jingnan, Zhang Weixia, Zhai Guangtao, and Yang Xiaokang. A survey on generative 3d digital humans based on neural networks: representation, rendering, and learning. _SCIENTIA SINICA Informationis_, pages 1858–, 2023. 1 +[57] Yu-Jie Yuan, Yang-Tian Sun, Yu-Kun Lai, Yuewen Ma, Rongfei Jia, and Lin Gao. Nerf-editing: geometry editing of neural radiance fields. In CVPR, pages 18353-18364, 2022. 3 +[58] Ziyang Yuan, Yiming Zhu, Yu Li, Hongyu Liu, and Chun Yuan. Make encoder great again in 3d gan inversion through geometry and occlusion-aware encoding. In ICCV, pages 2437-2447, 2023. 5 +[59] Chi Zhang, Yiwen Chen, Yijun Fu, Zhenglin Zhou, Gang Yu, Billzb Wang, Bin Fu, Tao Chen, Guosheng Lin, and Chunhua Shen. StyleAvatar3d: Leveraging image-text diffusion models for high-fidelity 3d avatar generation. arXiv preprint arXiv:2305.19012, 2023. 2 +[60] Jianfeng Zhang, Zihang Jiang, Dingdong Yang, Hongyi Xu, Yichun Shi, Guoxian Song, Zhongcong Xu, Xinchao Wang, + +and Jiashi Feng. Avatargen: a 3d generative model for animatable human avatars. In ECCV, pages 668-685. Springer, 2022. 5 +[61] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, pages 586-595, 2018. 5 +[62] Xuanmeng Zhang, Jianfeng Zhang, Rohan Chacko, Hongyi Xu, Guoxian Song, Yi Yang, and Jiashi Feng. Getavatar: Generative textured meshes for animatable human avatars. In ICCV, pages 2273-2282, 2023. 5 +[63] Yufeng Zheng, Victoria Fernández Abrevaya, Marcel C Bühler, Xu Chen, Michael J Black, and Otmar Hilliges. Im avatar: Implicit morphable head avatars from videos. In CVPR, pages 13545-13555, 2022. 3 +[64] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 1, 2, 3 +[65] Jiapeng Zhu, Ruili Feng, Yujun Shen, Deli Zhao, Zheng-Jun Zha, Jingren Zhou, and Qifeng Chen. Low-rank subspaces in gans. NeurIPS, pages 16648-16658, 2021. 2 +[66] Jiapeng Zhu, Yujun Shen, Yinghao Xu, Deli Zhao, and Qifeng Chen. Region-based semantic factorization in gans. In ICML, pages 27612-27632, 2022. +[67] Jiapeng Zhu, Ceyuan Yang, Yujun Shen, Zifan Shi, Bo Dai, Deli Zhao, and Qifeng Chen. Linkgan: Linking gan latents to pixels for controllable image synthesis. In ICCV, pages 7656-7666, 2023. 2 +[68] Peihao Zhu, Rameen Abdal, Yipeng Qin, and Peter Wonka. Sean: Image synthesis with semantic region-adaptive normalization. In CVPR, pages 5104-5113, 2020. 3 +[69] Peiye Zhuang, Oluwasanmi Koyejo, and Alexander G Schwing. Enjoy your editing: Controllable gans for image editing via latent space navigation. arXiv preprint arXiv:2102.01187, 2021.3 \ No newline at end of file diff --git a/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/images.zip b/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..08759239f54811a6c213b708c32eee96acee85a0 --- /dev/null +++ b/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c1f302378a32064a9d3776bc3bf183bc09724cabb596d7d846f5a53f061617f +size 677753 diff --git a/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/layout.json b/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1815fb866d9baa9e5f90f2cadc12fc70f71e701b --- /dev/null +++ b/2024/3D-Aware Face Editing via Warping-Guided Latent Direction Learning/layout.json @@ -0,0 +1,8822 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 76, + 103, + 517, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 103, + 517, + 121 + ], + "spans": [ + { + "bbox": [ + 76, + 103, + 517, + 121 + ], + "type": "text", + "content": "3D-Aware Face Editing via Warping-Guided Latent Direction Learning" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "spans": [ + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "text", + "content": "Yuhao Cheng" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "text", + "content": " Zhuo Chen" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "text", + "content": " Xingyu Ren" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "text", + "content": " Wenhan Zhu" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "text", + "content": " Zhengqin Xu" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "text", + "content": " Di Xu" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "text", + "content": " Changpeng Yang" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "text", + "content": " Yichao Yan" + }, + { + "bbox": [ + 109, + 142, + 483, + 172 + ], + "type": "inline_equation", + "content": "^{1*}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 93, + 176, + 500, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 176, + 500, + 205 + ], + "spans": [ + { + "bbox": [ + 93, + 176, + 500, + 205 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 93, + 176, + 500, + 205 + ], + "type": "text", + "content": "MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University " + }, + { + "bbox": [ + 93, + 176, + 500, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 93, + 176, + 500, + 205 + ], + "type": "text", + "content": "Huawei Cloud Computing Technologies Co., Ltd" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 94, + 207, + 499, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 207, + 499, + 232 + ], + "spans": [ + { + "bbox": [ + 94, + 207, + 499, + 232 + ], + "type": "text", + "content": "{chengyuhao,ningci5252,rxy_sjtu,zhuwenhan823,fate311,yanyichao}@sjtu.edu.cn, {xudi21,yangchangpeng}@huawei.com" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 53, + 240, + 541, + 401 + ], + "blocks": [ + { + "bbox": [ + 53, + 240, + 541, + 401 + ], + "lines": [ + { + "bbox": [ + 53, + 240, + 541, + 401 + ], + "spans": [ + { + "bbox": [ + 53, + 240, + 541, + 401 + ], + "type": "image", + "image_path": "49cb9423275b6bee71d88d91ade48eae4998b40fa3498a43e0866c33cc5ac462.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 407, + 546, + 440 + ], + "lines": [ + { + "bbox": [ + 46, + 407, + 546, + 440 + ], + "spans": [ + { + "bbox": [ + 46, + 407, + 546, + 440 + ], + "type": "text", + "content": "Figure 1. An example of our warping-guided 3D-aware face editing method. Our method supports users to edit 3D faces in an intuitive way that drags points from multiple perspectives. Moreover, our method can achieve disentangled editing for shape, expression, and view, while maintaining 3D consistency. Please zoom-in for detailed observation." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 450, + 192, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 450, + 192, + 462 + ], + "spans": [ + { + "bbox": [ + 143, + 450, + 192, + 462 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 469, + 289, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 469, + 289, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 469, + 289, + 696 + ], + "type": "text", + "content": "3D facial editing, a longstanding task in computer vision with broad applications, is expected to fast and intuitively manipulate any face from arbitrary viewpoints following the user's will. Existing works have limitations in terms of intuitiveness, generalization, and efficiency. To overcome these challenges, we propose FaceEdit3D, which allows users to directly manipulate 3D points to edit a 3D face, achieving natural and rapid face editing. After one or several points are manipulated by users, we propose the tri-plane warping to directly deform the view-independent 3D representation. To address the problem of distortion caused by tri-plane warping, we train a warp-aware encoder to project the warped face onto a standardized latent space. In this space, we further propose directional latent editing to mitigate the identity bias caused by the encoder and realize the disentangled editing of various attributes. Extensive experiments show that our method achieves superior results with rich facial details and nice identity preservation. Our approach also supports general applications like" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 451, + 545, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 451, + 545, + 475 + ], + "spans": [ + { + "bbox": [ + 306, + 451, + 545, + 475 + ], + "type": "text", + "content": "multi-attribute continuous editing and cat/car editing. The project website is https://cyh-sj.github.io/FaceEdit3D/." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 500, + 386, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 500, + 386, + 513 + ], + "spans": [ + { + "bbox": [ + 306, + 500, + 386, + 513 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 521, + 545, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 652 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 652 + ], + "type": "text", + "content": "High-quality face editing has long been an important research topic in computer vision with a wide range of applications, including social media and film production. Previous methods [16, 36, 43] based on 2D GANs [22, 23] have demonstrated the capability of editing facial images with high-fidelity. Recently, benefiting from the impressive achievements of 3D-aware generative models, especially in generative digital human [2-4, 11, 15, 32, 33, 41, 45, 51, 53, 55, 56, 64], the field of 3D facial editing has further attracted significant interest due to its promising capacity of manipulating a 3D representation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "type": "text", + "content": "Typically, 3D face editing methods can be generally classified into three categories: prior-guided conditioning, parameter-space fine-tuning, and latent-space optimization, as summarized in Tab. 1. Specifically, prior-guided conditioning methods [18, 46-48] employ an additional well" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 702, + 141, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 702, + 141, + 712 + ], + "spans": [ + { + "bbox": [ + 66, + 702, + 141, + 712 + ], + "type": "text", + "content": "* Corresponding author" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "916" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 70, + 288, + 167 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 288, + 167 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 288, + 167 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 288, + 167 + ], + "type": "table", + "html": "
SchemeMethodsIntuitivenessGeneralizationEfficiency
Conditional control[18, 46, 48]
Fine-tuned models[6, 13, 59]
Supervised directions[1, 36, 43]
Unsupervised directions[16, 42, 67]
[34] (2D)
Ours
", + "image_path": "01a28f54950d8d1fcf089d8e894b30137888275b5dbafb64b5cd9920e06791ef.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 175, + 287, + 198 + ], + "lines": [ + { + "bbox": [ + 47, + 175, + 287, + 198 + ], + "spans": [ + { + "bbox": [ + 47, + 175, + 287, + 198 + ], + "type": "text", + "content": "Table 1. Summary of 3D-aware face editing methods. " + }, + { + "bbox": [ + 47, + 175, + 287, + 198 + ], + "type": "inline_equation", + "content": "\\triangle" + }, + { + "bbox": [ + 47, + 175, + 287, + 198 + ], + "type": "text", + "content": " indicates its instructions are somewhat ambiguous semantically." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 210, + 287, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 210, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 46, + 210, + 287, + 342 + ], + "type": "text", + "content": "designed conditioning module to introduce the control information, e.g., semantic maps [18, 46] and 3DMM [48, 49], into the 3D-aware models. Although flexible, these models typically require a large number of face images with their control labels for training. Parameter-space finetuning methods [6, 13, 59] optimize the pre-trained generators given the target input, achieving zero-shot editing with the help of the large language-image model, e.g., CLIP [38] or Stable Diffusion [39]. However, it is required to maintain a particular generator for each specific editing target, severely constraining their generalization." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 342, + 288, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 288, + 557 + ], + "type": "text", + "content": "Due to the rich distributions learned in the pre-trained generator, discovering the meaningful directions in the latent space allows for a wide range of editing without the need to modify the generator and dependence on a large amount of training data. According to the exploration of editing direction, latent-space optimization can be achieved in supervised and unsupervised ways. Supervised methods [1, 36, 43, 44] search the meaningful directions in the latent space by learning labeled data for each specific editing. However, these methods cannot be generalized beyond the training domain. In contrast, unsupervised methods [16, 42, 50, 65-67] discover out-of-domain directions by analyzing the distribution of the latent space. However, the editing directions in the latent space are typically not semantically intuitive for the users. Accordingly, introducing interactive guidance to bridge the gap between the latent space and the user's intuition becomes the main purpose of the unsupervised methods." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 558, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 288, + 715 + ], + "type": "text", + "content": "To achieve this, several works [12, 34] utilize manipulating points on 2D images to optimize latent code in an unsupervised way, achieving image editing intuitively. The most prominent method DragGAN [34] proposes motion supervision and point tracking to optimize the latent code in a self-supervised manner, showcasing its flexible and intuitive editing capabilities. Considering their success on 2D images, it would be highly desirable if we could also manipulate 3D points to edit a 3D facial representation. However, it is non-trivial to directly extend point dragging to 3D-aware facial editing, due to the following challenges. 1) These methods ignore the global 3D facial structure and only focus on the movements of specific points, potentially" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "leading to exaggerated distortions. 2) These methods employ an inefficient approach to optimize the latent codes for image editing. Therefore, extending this procedure to 3D-aware generators fails to meet the demands of 3D interactive applications. 3) The controllability of point dragging is less precise and may cause ambiguous targets, e.g., enlarging the shape of the mouth may lead the mouth to open." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 157, + 546, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 157, + 546, + 384 + ], + "spans": [ + { + "bbox": [ + 304, + 157, + 546, + 384 + ], + "type": "text", + "content": "To overcome these challenges, we propose FaceEdit3D to learn editing directions guided by 3D-consistent facewarping, realizing intuitive and rapid 3D-aware facial editing. (1) First, we propose tri-plane warping on the 3D representation to achieve accurate 3D-consistent facial editing, which allows us to sidestep inaccurate motion supervision. Further, we introduce 3D landmarks rather than arbitrary points as face prior to constrain the change in the normal face distribution. Although tri-plane warping allows for precise editing, it introduces slight facial distortions. (2) Hence, we train a warp-aware encoder instead of latent optimization to straightforwardly project the warped renderings into the standardized space, enabling fast and photorealistic editing. Due to the complex semantic information in the latent space of 3D-aware generators, the obtained encoder suffers from inherent bias, resulting in a loss of details and identity shifting. (3) Therefore, we propose to learn the hierarchical directional editing in latent space, enabling disentangled face editing with identity and details preservation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 384, + 545, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 384, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 304, + 384, + 545, + 492 + ], + "type": "text", + "content": "With all the designs above, we successfully introduce dragging-based edits into 3D face representations. Our work achieves an efficient and straightforward editing process which also enables the decoupling of facial expressions and shapes. Compared to other face editing approaches, our method offers a more intuitive bridge but avoids dependence on the 3D annotations. Extensive experiments have demonstrated the superiority of our method in intuitiveness, generalization, and efficiency for the task of facial editing." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 493, + 526, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 493, + 526, + 504 + ], + "spans": [ + { + "bbox": [ + 317, + 493, + 526, + 504 + ], + "type": "text", + "content": "The main contributions are summarized as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 506, + 545, + 624 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 306, + 506, + 545, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 506, + 545, + 528 + ], + "spans": [ + { + "bbox": [ + 306, + 506, + 545, + 528 + ], + "type": "text", + "content": "- We design an efficient and straightforward 3D-aware face editing pipeline that is in line with the user's intuition." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 529, + 544, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 529, + 544, + 553 + ], + "spans": [ + { + "bbox": [ + 306, + 529, + 544, + 553 + ], + "type": "text", + "content": "- We propose to warp the face in the tri-plane feature level, enabling 3D-consistent face manipulation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 554, + 544, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 554, + 544, + 589 + ], + "spans": [ + { + "bbox": [ + 306, + 554, + 544, + 589 + ], + "type": "text", + "content": "- We propose a warp-aware encoder to better identify the subtle changes and efficiently solve the problem of distorted face caused by the tri-plane warp." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 590, + 544, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 590, + 544, + 624 + ], + "spans": [ + { + "bbox": [ + 306, + 590, + 544, + 624 + ], + "type": "text", + "content": "- We propose directional editing in latent space, achieving disentangled facial editing with the preservation of identity and details." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 638, + 397, + 650 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 638, + 397, + 650 + ], + "spans": [ + { + "bbox": [ + 306, + 638, + 397, + 650 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 658, + 406, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 406, + 670 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 406, + 670 + ], + "type": "text", + "content": "2.1. 3D-aware GANs" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "Inspired by the superiority of implicit representation [31], several attempts [2-4, 11, 15, 32, 33, 41, 45, 53, 55, 64] deploy radiance fields into generative models and thus en" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "text", + "content": "917" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 312 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 312 + ], + "type": "text", + "content": "able 3D consistent image synthesis. The capability of learning 3D representations from unposed single-view 2D images only empowers these 3D-aware GAN models to gain wide interests and applications. However, partial 3D-aware GANs [3, 15, 32, 33, 41, 64] adopt full implicit representation that lacks pre-computed 3D features before the point sampling. As a consequence, they need to regenerate the 3D feature when given novel viewpoints, limiting the efficiency of them in interactive applications. To address this challenge, several works [2, 4, 45, 53] adopt hybrid representations that first generate view-independent features, and enable sampling points on these pre-computed features for novel view synthesis. Consequently, these methods can realize rapid generation and maintain the inherent 3D-consistent representation. Specifically, EG3D [4] introduces the light tri-plane representation into the generator to raise efficiency and further enhance the image quality. Considering its efficient representation and mature downstream techniques, we adopt the EG3D [4] as the base 3D-aware model to demonstrate the effectiveness of our methods." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 320, + 248, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 320, + 248, + 333 + ], + "spans": [ + { + "bbox": [ + 47, + 320, + 248, + 333 + ], + "type": "text", + "content": "2.2. Implicit Representations Deformation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 339, + 289, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 339, + 289, + 615 + ], + "spans": [ + { + "bbox": [ + 47, + 339, + 289, + 615 + ], + "type": "text", + "content": "The deformation of 3D implicit representation has long attracted wide focus, as it serves as the foundation of broad animation applications. Prior researches predominantly introduce an additional deformation field based on the original representation to modify the 3D points. Specifically, deformation fields can be implemented through proxy-based editing [14, 21, 35, 57], cage-based editing [17, 37, 54], and parametric prior-based editing [40, 52, 63], etc. Proxy-based editing learns a lightweight neural network to compute the translation and rotation of 3D points, enabling the deformation of original 3D coordinates. The cage-based methods establish a surrounding cage to fully cover up the original surface of an implicit representation and then modify the cage to deform the inherent surface. Parametric prior-based methods leverage the parametric models such as SMPL [29] and FLAME [27] as a prior condition of the deformation network to drive the implicit representations. However, all of these approaches need to optimize a controllable module for each specific object, lack of efficiency and generality. In contrast, our work provides a landmark-based way to directly edit the 3D representation without optimization and further compresses the 3D deformation into 2D feature planes to improve efficiency." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 623, + 173, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 623, + 173, + 635 + ], + "spans": [ + { + "bbox": [ + 47, + 623, + 173, + 635 + ], + "type": "text", + "content": "2.3. Face Editing in GANs" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 641, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 641, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 641, + 287, + 714 + ], + "type": "text", + "content": "As the latent space learned by the conditioned GANs contains most of the distribution knowledge, many works [1, 42, 43, 50, 69] explore the latent space of a pre-trained generator for the following facial attribute editing. Specifically, InterFaceGAN [43] studies the semantics encoded in the latent space and disentangles the facial semantics with linear" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 547, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 324 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 324 + ], + "type": "text", + "content": "projection. To explicitly edit the facial attributes, further works explore utilizing the intuitive representation, e.g., semantic maps [5, 46, 47, 68] and text prompts [19, 36] for the optimization or the extension of latent space. Moreover, an idea that directly drags the face for the editing catches the wide attention. DragGAN [34] optimizes the latent space via dragging selected points on the image to the target positions. However, it is hard to preserve the facial identity when setting a far distance between the two points, preventing the DragGAN from large-scale editing. Despite the prominent performance of latent space manipulation, it still faces a challenge in balancing the identity preservation and editing amplitude. To further enhance the editing capability, several works [6, 13, 24] focus on the parameter space of a pre-trained generator. While these methods can achieve out-of-domain editing, they need to maintain a specific generator for each attribute manipulation, lacking efficiency. Compared to the methods mentioned above, our method is an intuitive way of dragging points to deform the 3D representations while improving the efficiency and preserving the identity." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 333, + 367, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 333, + 367, + 346 + ], + "spans": [ + { + "bbox": [ + 306, + 333, + 367, + 346 + ], + "type": "text", + "content": "3. Methods" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 354, + 547, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 354, + 547, + 546 + ], + "spans": [ + { + "bbox": [ + 304, + 354, + 547, + 546 + ], + "type": "text", + "content": "Our proposed framework, FaceEdit3D, aims at multi-view consistent facial editing in shape, expression, and pose via warping-guided directional editing, as illustrated in Fig. 2. To this end, we first review the 3D-aware GAN that achieves high-resolution face rendering from multiple views (Sec. 3.1). Based on the 3D-aware generator, we propose a point-guided feature-space warping method that manipulates the inherent tri-plane representations while ensuring the 3D consistency (Sec. 3.2). However, directly editing the tri-plane may lead to distortions in the final rendered images. Therefore, we train a specifically designed encoder to project the warped renderings to the standardized latent space for photo-realistic editing results (Sec. 3.3). Finally, we delve into the mechanism of latent space and propose directional editing in latent space that enables the disentangled editing of facial shape, expression, and pose (Sec. 3.4)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 552, + 531, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 552, + 531, + 563 + ], + "spans": [ + { + "bbox": [ + 306, + 552, + 531, + 563 + ], + "type": "text", + "content": "3.1. Preliminaries on 3D-aware Face Generator" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": "Our framework is built upon EG3D [4], one of the most powerful 3D-aware generative models that achieve photorealistic 3D face generation. The generator of EG3D introduces a tri-plane representation, which compactly encodes the geometry and appearance of a 3D face. Specifically, the tri-plane features can be denoted as " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{F} = \\mathcal{G}(\\mathbf{w})\\in" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{3\\times 32\\times 256\\times 256}" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " , where " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " is a latent code. To render face images from a specific viewpoint, the features of 3D coordinates are sampled from the tri-plane features and a shallow decoder is leveraged to project the tri-plane feature " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{F}(x,y,z)\\in \\mathbb{R}^{32\\times 3}" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " into volume density " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\sigma \\in \\mathbb{R}^1" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " and color feature " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "c\\in \\mathbb{R}^{32}" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " . Subsequently, a low-resolution fea" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "918" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 71, + 329, + 213 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 329, + 213 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 329, + 213 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 329, + 213 + ], + "type": "image", + "image_path": "24c131db8dd70402acf4b937f3437dcb105b9f281ff2f7069ac4460692c5f0cf.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 218, + 247, + 229 + ], + "lines": [ + { + "bbox": [ + 132, + 218, + 247, + 229 + ], + "spans": [ + { + "bbox": [ + 132, + 218, + 247, + 229 + ], + "type": "text", + "content": "(a) 3D-consistent Tri-plane Warp" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 334, + 71, + 542, + 217 + ], + "blocks": [ + { + "bbox": [ + 334, + 71, + 542, + 217 + ], + "lines": [ + { + "bbox": [ + 334, + 71, + 542, + 217 + ], + "spans": [ + { + "bbox": [ + 334, + 71, + 542, + 217 + ], + "type": "image", + "image_path": "7bef67a8c11a6425320cd8bc3b98ec878d42aea9bd1b469b6c0d88678b328f69.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 376, + 218, + 503, + 228 + ], + "lines": [ + { + "bbox": [ + 376, + 218, + 503, + 228 + ], + "spans": [ + { + "bbox": [ + 376, + 218, + 503, + 228 + ], + "type": "text", + "content": "(b) The Pipeline of Our FaceEdit3D" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "lines": [ + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "spans": [ + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "text", + "content": "Figure 2. Overview of our proposed FaceEdit3D. (a) A detailed illustration of our tri-plane warp. We project 2D key points onto the 3D face surface and then map them to each corresponding plane within a tri-plane representation. Afterward, we apply warping operations to each plane to achieve 3D-consistent editing. (b) The full pipeline of our FaceEdit3D. Given a source image " + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_s" + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "text", + "content": " with its latent code " + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_s" + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "text", + "content": ", we first perform the tri-plane warping on it and obtain the warped rendering " + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_t" + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "text", + "content": ". Subsequently, we utilize a warp-aware encoder to extract the latent codes " + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_s'" + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_t'" + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "text", + "content": " from the source image " + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_s" + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "text", + "content": " and the warped renderings " + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_t" + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "text", + "content": ", respectively. Then, we employ the hierarchical latent direction to update the target latent code " + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_t" + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "text", + "content": ". Finally, the edited facial image " + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_t" + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "text", + "content": " can be synthesized via the updated latent code " + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_t" + }, + { + "bbox": [ + 46, + 237, + 545, + 304 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 313, + 287, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 313, + 287, + 374 + ], + "spans": [ + { + "bbox": [ + 46, + 313, + 287, + 374 + ], + "type": "text", + "content": "ture map is generated via volume rendering and then upsampled to high-resolution images. The representation ability of tri-plane features has been verified by several recent works [7, 20, 24]. Therefore, to achieve 3D-consistent editing, we choose to operate directly on the tri-plane features." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 382, + 241, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 382, + 241, + 396 + ], + "spans": [ + { + "bbox": [ + 47, + 382, + 241, + 396 + ], + "type": "text", + "content": "3.2. Multi-view Consistent Face Warping" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 401, + 287, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 401, + 287, + 509 + ], + "spans": [ + { + "bbox": [ + 46, + 401, + 287, + 509 + ], + "type": "text", + "content": "For 3D face editing, it is a flexible way for users to directly drag points on the rendered images. Different from 2D-level editing that limits to one specific viewpoint, 3D-level manipulation should support editing from an arbitrary viewpoint and achieve 3D-consistent editing effects. To achieve this, we propose a framework based on point-guided triplane warping, where users manipulate one or several points from a desirable viewpoint, and the tri-plane features are warped according to the point displacements." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 510, + 287, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 287, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 287, + 616 + ], + "type": "text", + "content": "Point Manipulation by Users. Ideally, users can directly modify arbitrary points in a rendered face to achieve editing. Nevertheless, the potential conflicts among excessive control points may lead to undesirable distortions of the facial structure during the joint point manipulation, consequently yielding results that deviate from realistic human appearances. To address this issue, we constrain the users to manipulate a set of meaningful 3D facial landmarks to guarantee a natural face structure." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "text", + "content": "Specifically, given a latent code " + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_s" + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "text", + "content": " and a pre-trained EG3D generator " + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "text", + "content": ", the portrait is first rendered in the front view with camera intrinsic " + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{K}" + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "text", + "content": ". Then, 2D facial landmarks are detected by a pre-trained detector and projected on the facial surface to obtain 3D landmarks " + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{P} = \\{\\mathbf{p}_0,\\mathbf{p}_1,\\dots ,\\mathbf{p}_n\\} \\in \\mathbb{R}^{n\\times 3}" + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_i = \\{\\mathbf{p}_i^x,\\mathbf{p}_i^y,\\mathbf{p}_i^z\\} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "text", + "content": ". Consequently, users can render images from an arbitrary viewpoint with extrinsic " + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{R} \\in \\mathbb{S}\\mathbb{O}(3)" + }, + { + "bbox": [ + 46, + 618, + 287, + 714 + ], + "type": "text", + "content": " and select any spe" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 313, + 545, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 313, + 545, + 361 + ], + "spans": [ + { + "bbox": [ + 305, + 313, + 545, + 361 + ], + "type": "text", + "content": "cific points for editing. Take the selected point " + }, + { + "bbox": [ + 305, + 313, + 545, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_i" + }, + { + "bbox": [ + 305, + 313, + 545, + 361 + ], + "type": "text", + "content": " as an example, we set the movement of the point " + }, + { + "bbox": [ + 305, + 313, + 545, + 361 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{p}_i" + }, + { + "bbox": [ + 305, + 313, + 545, + 361 + ], + "type": "text", + "content": " is perpendicular to the rendering direction. The updated 3D point " + }, + { + "bbox": [ + 305, + 313, + 545, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_i^{\\prime}" + }, + { + "bbox": [ + 305, + 313, + 545, + 361 + ], + "type": "text", + "content": " is represented as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 367, + 369, + 545, + 383 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 369, + 545, + 383 + ], + "spans": [ + { + "bbox": [ + 367, + 369, + 545, + 383 + ], + "type": "interline_equation", + "content": "\\mathbf {p} _ {i} ^ {\\prime} = \\mathbf {p} _ {i} + \\mathbf {R} ^ {- 1} \\mathbf {K} ^ {- 1} \\mathbf {Z} \\Delta \\mathbf {p} _ {i}, \\tag {1}", + "image_path": "fc5be98f5aed55383050b706e83b8a2fecd45e98434e93016d653f75d9bbb84b.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 390, + 545, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 390, + 545, + 438 + ], + "spans": [ + { + "bbox": [ + 305, + 390, + 545, + 438 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 390, + 545, + 438 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}" + }, + { + "bbox": [ + 305, + 390, + 545, + 438 + ], + "type": "text", + "content": " is the depth of the selected point in the pose " + }, + { + "bbox": [ + 305, + 390, + 545, + 438 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 305, + 390, + 545, + 438 + ], + "type": "text", + "content": ". After manipulating specific points within the facial structure, we obtain a set of new 3D landmarks " + }, + { + "bbox": [ + 305, + 390, + 545, + 438 + ], + "type": "inline_equation", + "content": "\\mathbf{P}' = \\{\\mathbf{p}_0', \\mathbf{p}_1', \\dots, \\mathbf{p}_n'\\}" + }, + { + "bbox": [ + 305, + 390, + 545, + 438 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "spans": [ + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "text", + "content": "Tri-plane Warping. After the users have manipulated the key points, we apply 3D warping on the tri-planes to edit the 3D representation. Individually considering each of the tri-plane features [7], we can extend the editing in 3D space onto three 2D planes to enhance efficiency. Therefore, we begin by projecting the 3D landmarks onto the three feature planes, and then individually apply a similar warping transformation on each of these feature planes, as illustrated in Fig. 2 (a). Take the " + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "inline_equation", + "content": "xy" + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "text", + "content": "-plane " + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{xy}" + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "text", + "content": " as an example, given " + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "text", + "content": " source projected points " + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^{xy} = \\{\\mathbf{p}_0^{xy}, \\mathbf{p}_1^{xy}, \\dots, \\mathbf{p}_n^{xy}\\} \\in \\mathbb{R}^{n \\times 2}" + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_i^{xy} = \\{\\mathbf{p}_i^x, \\mathbf{p}_i^y\\}" + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "text", + "content": " and their target points " + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}^{xy} = \\{\\hat{\\mathbf{p}}_0^{xy}, \\hat{\\mathbf{p}}_1^{xy}, \\dots, \\hat{\\mathbf{p}}_n^{xy}\\}" + }, + { + "bbox": [ + 305, + 439, + 545, + 594 + ], + "type": "text", + "content": ", we employ thin-plate spline interpolation [9] to compute the grid sampler with:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 341, + 602, + 545, + 633 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 602, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 341, + 602, + 545, + 633 + ], + "type": "interline_equation", + "content": "g (\\mathbf {q}) = \\sum_ {i = 1} ^ {n} w _ {i} \\phi \\left(\\left\\| \\mathbf {q} - \\hat {\\mathbf {p}} _ {i} \\right\\|\\right) + \\mathbf {v} ^ {T} \\mathbf {q} + \\mathbf {b}, \\tag {2}", + "image_path": "2a55db5a28f2c42e373b74c29d82af5e5203e9ef817552bd851c98c066f1cbc6.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\phi (r) = r^2\\log (r)" + }, + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "text", + "content": " is the kernel function and " + }, + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "inline_equation", + "content": "g(\\mathbf{q})" + }, + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "text", + "content": " provides the inverse mapping of the location " + }, + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{p}" + }, + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "text", + "content": " to the original plane coordinates " + }, + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "text", + "content": ". The parameters " + }, + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{v},\\mathbf{b}" + }, + { + "bbox": [ + 305, + 641, + 545, + 713 + ], + "type": "text", + "content": " are the parameters to minimize a certain definition of curvature. Similarly, by applying such inverse mapping to all three planes, we complete the tri-plane warping and achieve the inherently" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "919" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": "3D-consistent modification. Compared to the manipulation of the sampled 3D coordinate space [60, 62], our method directly manipulates the 3D representation, empowering to simultaneously edit from multiple viewpoints without additional steps." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 139, + 178, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 139, + 178, + 152 + ], + "spans": [ + { + "bbox": [ + 47, + 139, + 178, + 152 + ], + "type": "text", + "content": "3.3. Warp-Aware Encoding" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 158, + 287, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 158, + 287, + 252 + ], + "spans": [ + { + "bbox": [ + 46, + 158, + 287, + 252 + ], + "type": "text", + "content": "After tri-plane warping, the editing results exhibit 3D consistent modification. However, directly applying warping operation on tri-plane features may not conform to the facial distribution in the latent space, leading to a severely distorted appearance. To solve this problem, our solution is to encode the distorted facial image " + }, + { + "bbox": [ + 46, + 158, + 287, + 252 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_t" + }, + { + "bbox": [ + 46, + 158, + 287, + 252 + ], + "type": "text", + "content": " into a standardized latent space that learns the natural counterpart " + }, + { + "bbox": [ + 46, + 158, + 287, + 252 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_t^\\prime" + }, + { + "bbox": [ + 46, + 158, + 287, + 252 + ], + "type": "text", + "content": " of the distorted face with an encoder " + }, + { + "bbox": [ + 46, + 158, + 287, + 252 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 46, + 158, + 287, + 252 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 140, + 261, + 287, + 275 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 261, + 287, + 275 + ], + "spans": [ + { + "bbox": [ + 140, + 261, + 287, + 275 + ], + "type": "interline_equation", + "content": "\\mathbf {w} _ {t} ^ {\\prime} = \\mathcal {E} (\\hat {\\mathbf {I}} _ {t}). \\tag {3}", + "image_path": "2dc214e225cf878d01236dd13fc9c5cc472d2cf6334a6e5101471eb4533c8225.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "spans": [ + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "text", + "content": "To train the encoder, we sample images from the pretrained generator to generate image and latent code pairs. Specifically, the portrait " + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_s" + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "text", + "content": " is generated from the randomly sampled latent code and the camera poses " + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "text", + "content": ". Subsequently, the portrait " + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_s" + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "text", + "content": " is projected to latent code " + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_s^\\prime" + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "text", + "content": " by the encoder " + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "text", + "content": ", and then the corresponding image " + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_s^\\prime" + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "text", + "content": " is generated by the same frozen generator " + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "text", + "content": " and pose " + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 46, + 284, + 287, + 391 + ], + "type": "text", + "content": ". The optimization objective of the encoder is the combination of L1 Loss, LPIPS loss [61], and identity loss [10]:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 399, + 287, + 412 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 399, + 287, + 412 + ], + "spans": [ + { + "bbox": [ + 58, + 399, + 287, + 412 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {o} = \\mathcal {L} _ {1} \\left(\\mathbf {I} _ {s}, \\mathbf {I} _ {s} ^ {\\prime}\\right) + \\mathcal {L} _ {\\mathbf {L P I P S}} \\left(\\mathbf {I} _ {s}, \\mathbf {I} _ {s} ^ {\\prime}\\right) + \\mathcal {L} _ {\\mathbf {I D}} \\left(\\mathbf {I} _ {s}, \\mathbf {I} _ {s} ^ {\\prime}\\right). \\tag {4}", + "image_path": "6a21e02d41796addd81d178c8ad90e863228e1b835bb76e24df2713fa6ae6866.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "spans": [ + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "type": "text", + "content": "Unfortunately, we find that the encoder trained with the aforementioned method poses difficulties in identifying subtle modifications due to the inherent complexity of 3D-aware generators. Hence, we further introduce the triplane warping as the data augmentation to enhance the overall perception of subtle edits. Similar to the above training pipeline, we apply the encoder onto the warped rendering " + }, + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_t" + }, + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "type": "text", + "content": " to obtain the latent code " + }, + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_t'" + }, + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "type": "text", + "content": ", thus generating its inverted image " + }, + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_t'" + }, + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "type": "text", + "content": ". The loss is calculated between " + }, + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_t'" + }, + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_t" + }, + { + "bbox": [ + 46, + 421, + 287, + 529 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 59, + 536, + 287, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 536, + 287, + 550 + ], + "spans": [ + { + "bbox": [ + 59, + 536, + 287, + 550 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {w} = \\mathcal {L} _ {1} \\left(\\hat {\\mathbf {I}} _ {t}, \\mathbf {I} _ {t} ^ {\\prime}\\right) + \\mathcal {L} _ {\\mathbf {L P I P S}} \\left(\\hat {\\mathbf {I}} _ {t}, \\mathbf {I} _ {t} ^ {\\prime}\\right) + \\mathcal {L} _ {\\mathbf {I D}} \\left(\\hat {\\mathbf {I}} _ {t}, \\mathbf {I} _ {t} ^ {\\prime}\\right). \\tag {5}", + "image_path": "ac162631c054da344857497d0e7f40b4ab8fbf1e78f98c16eeaf2430b41684c6.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 558, + 287, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 287, + 594 + ], + "type": "text", + "content": "Besides, following GOAE [58], we utilize a discriminator " + }, + { + "bbox": [ + 46, + 558, + 287, + 594 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 46, + 558, + 287, + 594 + ], + "type": "text", + "content": " to ensure the latent codes " + }, + { + "bbox": [ + 46, + 558, + 287, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_t^\\prime" + }, + { + "bbox": [ + 46, + 558, + 287, + 594 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 558, + 287, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_s^\\prime" + }, + { + "bbox": [ + 46, + 558, + 287, + 594 + ], + "type": "text", + "content": " in the standardized latent space:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 601, + 287, + 633 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 601, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 83, + 601, + 287, + 633 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {d} = \\mathbb {E} [ f (\\mathcal {D} \\left(\\mathbf {w} _ {t} ^ {\\prime}\\right)) + f (\\mathcal {D} \\left(\\mathbf {w} _ {s} ^ {\\prime}\\right)) ] (6) \\\\ + \\mathbb {E} [ f (- \\mathcal {D} (\\mathbf {w} _ {c})) ] + \\gamma | | \\nabla \\mathcal {D} (\\mathbf {w} _ {c}) | | ^ {2}, (6) \\\\ \\end{array}", + "image_path": "17c90b0f66ce778750de267509ce95aa587accab136cd952c13bc3fec19ad92d.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 640, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 640, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 640, + 287, + 689 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 640, + 287, + 689 + ], + "type": "inline_equation", + "content": "f(x) = -\\log (1 + \\exp (-x))" + }, + { + "bbox": [ + 46, + 640, + 287, + 689 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 640, + 287, + 689 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 640, + 287, + 689 + ], + "type": "text", + "content": " is a hyperparameter in R1 regularization. " + }, + { + "bbox": [ + 46, + 640, + 287, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{\\mathbf{c}}" + }, + { + "bbox": [ + 46, + 640, + 287, + 689 + ], + "type": "text", + "content": " are pre-sampled standardized latent codes by the frozen generator. The final objective linearly combines the aforementioned losses:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 123, + 698, + 287, + 710 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 698, + 287, + 710 + ], + "spans": [ + { + "bbox": [ + 123, + 698, + 287, + 710 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {o} + \\mathcal {L} _ {w} + \\mathcal {L} _ {d}. \\tag {7}", + "image_path": "bc959ab04ed80880bf5ac4c90206cfebaf367f32b17bd6502ee1fe36305dc4f5.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "content": "After the training process, the edited rendering is projected into latent space and then passed to the generator to yield a more reasonable editing result in the target view " + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_t" + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 392, + 115, + 545, + 128 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 115, + 545, + 128 + ], + "spans": [ + { + "bbox": [ + 392, + 115, + 545, + 128 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {t} = \\mathcal {G} \\left(\\mathbf {w} _ {t} ^ {\\prime}, \\mathbf {c} _ {t}\\right). \\tag {8}", + "image_path": "75d61383b78bee308414835c5026117c6bd31cc55ebd0eec7712595dd33382c7.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 134, + 494, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 134, + 494, + 148 + ], + "spans": [ + { + "bbox": [ + 305, + 134, + 494, + 148 + ], + "type": "text", + "content": "3.4. Directional Editing in Latent Space" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 152, + 545, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 152, + 545, + 236 + ], + "spans": [ + { + "bbox": [ + 304, + 152, + 545, + 236 + ], + "type": "text", + "content": "Warp-aware encoder solves the problem of severely distorted appearance caused by the tri-plane warp, however, it additionally introduces identity bias into the latent codes as the encoder cannot faithfully inverse faces. Besides, it is still hard to handle the ambiguity during the point-manipulation. Therefore, we here propose directional editing learning to further overcome these two challenges." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 237, + 545, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 237, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 304, + 237, + 545, + 380 + ], + "type": "text", + "content": "To begin with, we adopt the difference between the latent codes that are extracted from the images before and after warping by the encoder as the direction guidance. In this way, we mitigate the identity bias and bypass the problem caused by the encoder. Furthermore, we follow Style-CLIP [36] to explore the semantics of layers in the " + }, + { + "bbox": [ + 304, + 237, + 545, + 380 + ], + "type": "inline_equation", + "content": "W+" + }, + { + "bbox": [ + 304, + 237, + 545, + 380 + ], + "type": "text", + "content": " latent space of EG3D [4], empowering our method with the disentangled editing of the expression and shape. According to the hierarchical mechanism, we can obtain free editing results by applying editing directions in the variant layers to the same warping facial image, successfully avoiding the ambiguity caused by the tri-plane warp." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "spans": [ + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "text", + "content": "The full pipeline is shown in Fig. 2 (b). Given a latent code " + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_s" + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "text", + "content": " and the frozen EG3D generator " + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "text", + "content": ", the facial triplane can be generated. Specifically, the warp-aware encoder projects these two images to standardized latent codes " + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_s'" + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_t'" + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "text", + "content": " with Eq. (3), respectively. The target edited latent code " + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_t" + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "text", + "content": " can be calculated with:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 370, + 459, + 545, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 459, + 545, + 472 + ], + "spans": [ + { + "bbox": [ + 370, + 459, + 545, + 472 + ], + "type": "interline_equation", + "content": "\\mathbf {w} _ {t} = \\mathbf {w} _ {s} + H \\left(\\mathbf {w} _ {t} ^ {\\prime} - \\mathbf {w} _ {s} ^ {\\prime}\\right), \\tag {9}", + "image_path": "becf7209224e09c9016bcdaedcd30be0ea4d1ea12ea473f236fe7f517b0bdf65.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 479, + 545, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 479, + 545, + 515 + ], + "spans": [ + { + "bbox": [ + 304, + 479, + 545, + 515 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 479, + 545, + 515 + ], + "type": "inline_equation", + "content": "H(\\cdot)" + }, + { + "bbox": [ + 304, + 479, + 545, + 515 + ], + "type": "text", + "content": " is a feature selection module for disentangling latent direction. Finally, the modified portrait " + }, + { + "bbox": [ + 304, + 479, + 545, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_t" + }, + { + "bbox": [ + 304, + 479, + 545, + 515 + ], + "type": "text", + "content": " can be rendered from any perspective " + }, + { + "bbox": [ + 304, + 479, + 545, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_t" + }, + { + "bbox": [ + 304, + 479, + 545, + 515 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 304, + 479, + 545, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_t = \\mathcal{G}(\\mathbf{w}_t,\\mathbf{c}_t)" + }, + { + "bbox": [ + 304, + 479, + 545, + 515 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 525, + 387, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 525, + 387, + 538 + ], + "spans": [ + { + "bbox": [ + 305, + 525, + 387, + 538 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 544, + 545, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 544, + 545, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 544, + 545, + 640 + ], + "type": "text", + "content": "In this section, we evaluate the efficiency and the quality of our 3D-aware face editing model. We first introduce the implementation details of our work (Sec. 4.1). Subsequently, we compare our method with the SOTA 3D face editing methods qualitatively (Sec. 4.2) and quantitatively (Sec. 4.3). Then, we conduct ablation studies to analyze the effect of each component (Sec. 4.4). Finally, we introduce the potential applications of our method (Sec. 4.5)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 647, + 438, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 647, + 438, + 659 + ], + "spans": [ + { + "bbox": [ + 305, + 647, + 438, + 659 + ], + "type": "text", + "content": "4.1. Implementation Details" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 665, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 712 + ], + "type": "text", + "content": "We build our approach on the EG3D [4] pre-trained on the FFHQ dataset [22]. We employ the Mediapipe [30] to detect 2D landmarks and select 29 points for user manipulation. To obtain 3D landmarks, we first detect 2D landmarks" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "920" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 71, + 545, + 358 + ], + "blocks": [ + { + "bbox": [ + 52, + 71, + 545, + 358 + ], + "lines": [ + { + "bbox": [ + 52, + 71, + 545, + 358 + ], + "spans": [ + { + "bbox": [ + 52, + 71, + 545, + 358 + ], + "type": "image", + "image_path": "749d84eef9c5423fe87b46130098caec69d0a3277e18034f5dc3db3e874a48d7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 369, + 548, + 392 + ], + "lines": [ + { + "bbox": [ + 46, + 369, + 548, + 392 + ], + "spans": [ + { + "bbox": [ + 46, + 369, + 548, + 392 + ], + "type": "text", + "content": "Figure 3. Qualitative comparisons with current SOTA methods for 3D face shape and expression editing. (a), (b), and (c) are the results of synthetic samples, and (d) showcases the results of a real-world portrait." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 401, + 289, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 401, + 289, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 401, + 289, + 521 + ], + "type": "text", + "content": "in the frontal view, and then compute the 3D coordinates by the locations of maximum density value on their corresponding emitted rays. We adopt Swin-transformer [28] as the encoder structure to enhance the detail perception. In the encoder training, the standardized latent codes are sampled to generate the face images under random views, consisting of totally 100000 identities. We adopt the Adam optimizer [25] and set the learning rates as " + }, + { + "bbox": [ + 46, + 401, + 289, + 521 + ], + "type": "inline_equation", + "content": "1e - 4" + }, + { + "bbox": [ + 46, + 401, + 289, + 521 + ], + "type": "text", + "content": " for both the encoder and the discriminator. All the implementations are based on the PyTorch and set up on Nvidia A6000 GPUs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 527, + 176, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 176, + 540 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 176, + 540 + ], + "type": "text", + "content": "4.2. Qualitative Evaluation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "content": "We conduct a qualitative comparison between our work and several SOTA 3D face editing methods with intuitive manipulation, i.e., StyleGAN-NADA [13] guided by the text prompts and IDE-3D [46] controlled by the semantic maps. Besides, we also introduce the point-based warping approach into the qualitative comparison. We adopt similar editing objectives and use their official codes to ensure fairness. Fig. 3 shows the multi-view results of the shape and expression editing, demonstrating the superiority of our method on fine-grained modification. The warp can accomplish obvious editing, but it suffers from facial distortion. IDE-3D [46] achieve satisfied results in most cases. However, the coupling of different facial attributes in the semantic maps leads to changes beyond the target attributes. For" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 401, + 547, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 401, + 547, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 401, + 547, + 544 + ], + "type": "text", + "content": "instance, the baby in Fig. 3 (c) shows the shift of age and identity when trying to elongate his chin. Besides, IDE-3D only supports single-view editing, limiting its availability. StyleGAN-NADA [13] fails to edit the facial shape based on the EG3D despite its great success in style transfer and texture editing. In contrast, our method supports the user to simultaneously manipulate the face from multiple views and enables intuitive editing for facial shapes, expressions, and poses without the sacrifice of identity and detail. In addition to the editing quality, our method has another advantage that it does not require additional training for generative models, demonstrating its generalization." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 546, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 546, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 546, + 547, + 713 + ], + "type": "text", + "content": "Furthermore, we also compare our method with a recent 2D method, DragGAN [34], which employs a similar point-guided operation to ours. Since DragGAN is limited to 2D editing, we compare the results in two aspects, i.e., fixed view editing and novel view synthesis, as shown in Fig. 4. In the aspect of fixed-view editing, the results of DragGAN [34] in Fig. 4 (a) show a tendency to open the mouth and change the identity when shortening the nose, although a mask limiting the editable region is applied. In the aspect of novel view synthesis, DragGAN severely changes the identity due to ambiguous point dragging in Fig. 4 (b). Compared to DragGAN, our method succeeds in achieving the expected editing target while maintaining the identity and irrelevant parts unchanged." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "921" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 69, + 70, + 523, + 150 + ], + "blocks": [ + { + "bbox": [ + 69, + 70, + 523, + 150 + ], + "lines": [ + { + "bbox": [ + 69, + 70, + 523, + 150 + ], + "spans": [ + { + "bbox": [ + 69, + 70, + 523, + 150 + ], + "type": "table", + "html": "
MethodsSchemeInference Time (s)↓MSEi↑MSEo↓MSEi / MSEo↑ID Consistency↑
DragGAN [34]2D5.2311.9920.2248.8930.579
Ours2D0.3562.0490.18611.0160.716
Our warp3D0.2692.4550.3287.4850.707
IDE-3D [46]3D0.3831.8410.9871.8650.649
Ours3D0.6241.6790.3424.9090.712
", + "image_path": "49c5f8b0ac16b04a7b0f626aadee5c3b8088525efb0a1a0ec7465dec297cefea.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 154, + 546, + 176 + ], + "lines": [ + { + "bbox": [ + 46, + 154, + 546, + 176 + ], + "spans": [ + { + "bbox": [ + 46, + 154, + 546, + 176 + ], + "type": "text", + "content": "Table 2. Quantitative comparison with several face editing methods on efficiency and effectiveness. The best results are labeled in bold except for our direct warp due to its distortion results. The unit of " + }, + { + "bbox": [ + 46, + 154, + 546, + 176 + ], + "type": "inline_equation", + "content": "\\mathrm{MSE}_i" + }, + { + "bbox": [ + 46, + 154, + 546, + 176 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 154, + 546, + 176 + ], + "type": "inline_equation", + "content": "\\mathrm{MSE}_o" + }, + { + "bbox": [ + 46, + 154, + 546, + 176 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 46, + 154, + 546, + 176 + ], + "type": "inline_equation", + "content": "10^{-2}" + }, + { + "bbox": [ + 46, + 154, + 546, + 176 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 52, + 185, + 287, + 308 + ], + "blocks": [ + { + "bbox": [ + 52, + 185, + 287, + 308 + ], + "lines": [ + { + "bbox": [ + 52, + 185, + 287, + 308 + ], + "spans": [ + { + "bbox": [ + 52, + 185, + 287, + 308 + ], + "type": "image", + "image_path": "7847255046e76098286f48adda5d859c9b9bda9d1e4c34c1ffeea00edfadc673.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 316, + 288, + 361 + ], + "lines": [ + { + "bbox": [ + 46, + 316, + 288, + 361 + ], + "spans": [ + { + "bbox": [ + 46, + 316, + 288, + 361 + ], + "type": "text", + "content": "Figure 4. Qualitative comparisons with DragGAN [34] on portrait editing. Red and blue points represent the source and target points in the manipulations, respectively. The semi-transparent region indicates the mask used for DragGAN, while not in our method." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 371, + 183, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 371, + 183, + 384 + ], + "spans": [ + { + "bbox": [ + 47, + 371, + 183, + 384 + ], + "type": "text", + "content": "4.3. Quantitative Evaluation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 389, + 287, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 389, + 287, + 509 + ], + "spans": [ + { + "bbox": [ + 46, + 389, + 287, + 509 + ], + "type": "text", + "content": "We also conduct quantitative experiments to verify the efficiency and effectiveness of our method, as shown in Tab. 2. We adopt editing time as the metric to evaluate the efficiency because it severely influences the user experiences. As shown, DragGAN [34] spends a large amount of time on latent optimization, resulting in lower efficiency. IDE-3D [46] and our method exhibit similar efficiency in supporting real-time editing. Despite the fastest method, the method of direct warp causes facial distortion, and thus we exclude it from the comparison." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "type": "text", + "content": "Furthermore, to assess the capability of disentangled editing, we measure the pixel-wise mean square error (MSE) inside and outside the target editing regions as the metric. The main objective is to successfully edit the target regions while preventing the outside regions from modification. As shown, our approach achieves better editing disentanglement than IDE3D [46] with minimized ratio of " + }, + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{MSE}_i" + }, + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{MSE}_o" + }, + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "type": "text", + "content": ". It is worth noting that the editability of 3D GANs is inferior to that of 2D GANs, and thus our method falls behind the DragGAN [34]. Considering the efficiency and the ability to multi-view editing of our method, the gap between ours and the DragGAN is acceptable. To fairly compare these two methods without the interference of base generators, we further extend our method to the same 2D generator and it performs better than DragGAN [34] in this setting. Additionally, we also compare the identity similarity. The results indicate that our method can better maintain" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 184, + 545, + 291 + ], + "blocks": [ + { + "bbox": [ + 307, + 184, + 545, + 291 + ], + "lines": [ + { + "bbox": [ + 307, + 184, + 545, + 291 + ], + "spans": [ + { + "bbox": [ + 307, + 184, + 545, + 291 + ], + "type": "image", + "image_path": "edf9ce84d6b0f76032e4437ef5ebb39d187e4f5974b19fa115c1e0b2fe6aa899.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 301, + 546, + 357 + ], + "lines": [ + { + "bbox": [ + 304, + 301, + 546, + 357 + ], + "spans": [ + { + "bbox": [ + 304, + 301, + 546, + 357 + ], + "type": "text", + "content": "Figure 5. The ablation study of our loss functions for training the encoder. The first row aims to widen the double eyelids while keeping the eyes open, and the second is to lengthen the bangs. The numbers in the corners represent the identity similarity measured by ArcFace [10]. Please zoom-in for detailed observation." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 368, + 474, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 368, + 474, + 380 + ], + "spans": [ + { + "bbox": [ + 306, + 368, + 474, + 380 + ], + "type": "text", + "content": "the identity character than other methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 389, + 400, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 389, + 400, + 403 + ], + "spans": [ + { + "bbox": [ + 306, + 389, + 400, + 403 + ], + "type": "text", + "content": "4.4. Ablation Study" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 408, + 546, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 408, + 546, + 492 + ], + "spans": [ + { + "bbox": [ + 304, + 408, + 546, + 492 + ], + "type": "text", + "content": "Effectiveness of Loss Functions. We investigate the effectiveness of each loss function in the encoder training process, as depicted in Fig. 5. The " + }, + { + "bbox": [ + 304, + 408, + 546, + 492 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_w" + }, + { + "bbox": [ + 304, + 408, + 546, + 492 + ], + "type": "text", + "content": " introduced by the warp-assisted data augmentation facilitates the accurate identification for user's manipulations, and the " + }, + { + "bbox": [ + 304, + 408, + 546, + 492 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_d" + }, + { + "bbox": [ + 304, + 408, + 546, + 492 + ], + "type": "text", + "content": " helps to maintain identity information. The combination of them achieves the best editing results." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 493, + 547, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 493, + 547, + 649 + ], + "spans": [ + { + "bbox": [ + 304, + 493, + 547, + 649 + ], + "type": "text", + "content": "Effectiveness of Directional Latent Editing. We conduct an ablation study to verify the effectiveness of our directional latent editing. We begin with applying tri-plane warping on source identities to obtain the warped results. Subsequently, we extract the directions of different layer groups, i.e., shape direction, expression direction, and the combined directions, respectively. Fig. 6 shows that the individual directional latent code has the capacity to disentangle the attributes, while the combination of them can realize integrated editing. However, directly mapping warped rendering to latent space without our directional latent module results in identity shifting and detail deficiency. These results can verify the effectiveness of our directional latent editing." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 658, + 388, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 388, + 671 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 388, + 671 + ], + "type": "text", + "content": "4.5. Applications" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 677, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 546, + 713 + ], + "type": "text", + "content": "Generalization of Learned Latent Directions. The editing direction learned for one face can be generalized to other instances, and we can further control the degree along the" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "922" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 289, + 178 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 289, + 178 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 289, + 178 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 289, + 178 + ], + "type": "image", + "image_path": "ffabbeefafc3a99ecd629df6aaf614944982e2c1c7a8938096426000624f5b00.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 187, + 287, + 220 + ], + "lines": [ + { + "bbox": [ + 47, + 187, + 287, + 220 + ], + "spans": [ + { + "bbox": [ + 47, + 187, + 287, + 220 + ], + "type": "text", + "content": "Figure 6. The ablation study of our directional editing. \"w/o Dir.\" represents results generated by directly projecting the warped results to latent space." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 48, + 228, + 289, + 356 + ], + "blocks": [ + { + "bbox": [ + 48, + 228, + 289, + 356 + ], + "lines": [ + { + "bbox": [ + 48, + 228, + 289, + 356 + ], + "spans": [ + { + "bbox": [ + 48, + 228, + 289, + 356 + ], + "type": "image", + "image_path": "dfae759163fd89ba13343cfbc47329e3062683caf0987f7a0513d46ad2ed6b44.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 365, + 287, + 410 + ], + "lines": [ + { + "bbox": [ + 46, + 365, + 287, + 410 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 287, + 410 + ], + "type": "text", + "content": "Figure 7. The interpolated editing results along the directions learned in the case of Fig. 3 (a) and (d), i.e., \"wider face\" and \"close mouth\" respectively. It shows that the learned editing direction in one face can be generalized to other instances." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 429, + 287, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 429, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 46, + 429, + 287, + 525 + ], + "type": "text", + "content": "direction to linearly interpolate the editing results. Fig. 7 shows the interpolation results guided by the directions learned in the cases of Fig. 3, i.e., wider face and closed mouth. With the degree rising from -2.0 to 2.0, both of the two identities show a gradual trend to change along their directions, although the directions are initially learned for other cases, demonstrating the generalization of these learned latent directions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 529, + 287, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 529, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 46, + 529, + 287, + 613 + ], + "type": "text", + "content": "Continuous Editing. Continuous editing is important to real-world applications. Therefore, we conduct an experiment to show our capability of overlying modification. Fig. 8 shows the results with multiple editing targets, i.e., smaller eyes, closed mouth, smaller nose, and wider face. The natural and ID-consistent results demonstrate the effectiveness of our method of continuous editing." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 618, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 288, + 715 + ], + "type": "text", + "content": "Generalization to Other Generators. To show the generalized application of our method, we extend it to 3D cat editing and 2D car editing. We introduce our method to the pre-trained EG3D [4] on AFHQ Cats [8] dataset and StyleGAN [23] trained on Stanford Cars [26] dataset, respectively. As shown in Fig. 9, our approach can also successfully manipulate the 3D cats and 2D cars according to the user's point-based instructions." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 70, + 545, + 196 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 545, + 196 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 545, + 196 + ], + "type": "image", + "image_path": "28a66dc27078a94be76e31ffcf7181a2d5708a581806ec669fa9b8ac1fe162b4.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 306, + 228, + 545, + 338 + ], + "blocks": [ + { + "bbox": [ + 305, + 205, + 545, + 227 + ], + "lines": [ + { + "bbox": [ + 305, + 205, + 545, + 227 + ], + "spans": [ + { + "bbox": [ + 305, + 205, + 545, + 227 + ], + "type": "text", + "content": "Figure 8. We showcase the mixing results with multiple attributes, demonstrating the continuous editing ability of our method." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 228, + 545, + 338 + ], + "lines": [ + { + "bbox": [ + 306, + 228, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 306, + 228, + 545, + 338 + ], + "type": "image", + "image_path": "613467223a83b59e64a8193c3fcee84a28f89cb5650c83fd9db0eb96d011c286.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 347, + 536, + 358 + ], + "lines": [ + { + "bbox": [ + 314, + 347, + 536, + 358 + ], + "spans": [ + { + "bbox": [ + 314, + 347, + 536, + 358 + ], + "type": "text", + "content": "Figure 9. The extension of our method to cat and car editing." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 367, + 379, + 380 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 367, + 379, + 380 + ], + "spans": [ + { + "bbox": [ + 306, + 367, + 379, + 380 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 387, + 545, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 387, + 545, + 554 + ], + "spans": [ + { + "bbox": [ + 304, + 387, + 545, + 554 + ], + "type": "text", + "content": "In this paper, we propose FaceEdit3D, an intuitive method to edit the 3D facial shape and expression from any perspective. Our approach involves a tri-plane warping to ensure the inherent 3D-consistent editing. To mitigate facial distortions led by the warping, we train a warp-aware encoder to project the warped face into standardized distribution and further explore the hierarchical mechanism in latent space to achieve disentangled editing. Extensive experiments demonstrate the effectiveness and efficiency of our method. The additional applications also show the generalization and potential of our method across different applications. To sum up, our method provides a brand new way to manipulate the 3D representation, opening up new avenues for rapid and convenient real-image editing." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 554, + 545, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 554, + 545, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 554, + 545, + 639 + ], + "type": "text", + "content": "Limitations. Since our method is based on warping the 3D representation, it is hard for our work to achieve texture editing and some semantic editing, such as wearing glasses. Broader Impacts. Despite not our intention, our 3D-aware facial editing capability could potentially be abused. We are committed to privacy protection, preventing the misuse of facial editing for criminal purposes." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 649, + 409, + 663 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 649, + 409, + 663 + ], + "spans": [ + { + "bbox": [ + 306, + 649, + 409, + 663 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 669, + 545, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 669, + 545, + 705 + ], + "spans": [ + { + "bbox": [ + 304, + 669, + 545, + 705 + ], + "type": "text", + "content": "This work was supported in part by NSFC (62201342, 62101325), and Shanghai Municipal Science and Technology Major Project (2021SHZDZX0102)." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "923" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "text", + "content": "[1] Rameen Abdal, Peihao Zhu, Niloy J Mitra, and Peter Wonka. Styleflow: Attribute-conditioned exploration of stylegan-generated images using conditional continuous normalizing flows. TOG, pages 1-21, 2021. 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 135, + 288, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 288, + 178 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 288, + 178 + ], + "type": "text", + "content": "[2] Sizhe An, Hongyi Xu, Yichun Shi, Guoxian Song, Umit Y Ogras, and Linjie Luo. Panohed: Geometry-aware 3d fullhead synthesis in 360deg. In CVPR, pages 20950-20959, 2023. 1, 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 179, + 287, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 179, + 287, + 223 + ], + "spans": [ + { + "bbox": [ + 53, + 179, + 287, + 223 + ], + "type": "text", + "content": "[3] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In CVPR, pages 5799-5809, 2021. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 224, + 287, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 224, + 287, + 278 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 287, + 278 + ], + "type": "text", + "content": "[4] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In CVPR, pages 16123-16133, 2022. 1, 2, 3, 5, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 280, + 287, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 280, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 53, + 280, + 287, + 312 + ], + "type": "text", + "content": "[5] Anpei Chen, Ruiyang Liu, Ling Xie, Zhang Chen, Hao Su, and Jingyi Yu. Sofgan: A portrait image generator with dynamic styling. TOG, pages 1-26, 2022. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 314, + 287, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 314, + 287, + 357 + ], + "spans": [ + { + "bbox": [ + 53, + 314, + 287, + 357 + ], + "type": "text", + "content": "[6] Zhuo Chen, Xudong Xu, Yichao Yan, Ye Pan, Wenhan Zhu, Wayne Wu, Bo Dai, and Xiaokang Yang. Hyperstyle3d: Text-guided 3d portrait stylization via hypernetworks. arXiv preprint arXiv:2304.09463, 2023. 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 357, + 287, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 357, + 287, + 400 + ], + "spans": [ + { + "bbox": [ + 53, + 357, + 287, + 400 + ], + "type": "text", + "content": "[7] Yuhao Cheng, Yichao Yan, Wenhan Zhu, Ye Pan, Bowen Pan, and Xiaokang Yang. Head3d: Complete 3d head generation via tri-plane feature distillation. arXiv preprint arXiv:2303.15892, 2023. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 402, + 287, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 402, + 287, + 434 + ], + "spans": [ + { + "bbox": [ + 53, + 402, + 287, + 434 + ], + "type": "text", + "content": "[8] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In CVPR, pages 8188-8197, 2020. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 436, + 287, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 436, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 53, + 436, + 287, + 479 + ], + "type": "text", + "content": "[9] Forrester Cole, David Belanger, Dilip Krishnan, Aaron Sarna, Inbar Mosseri, and William T Freeman. Synthesizing normalized faces from facial identity features. In CVPR, pages 3703-3712, 2017. 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 480, + 287, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 480, + 287, + 513 + ], + "spans": [ + { + "bbox": [ + 48, + 480, + 287, + 513 + ], + "type": "text", + "content": "[10] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, pages 4690-4699, 2019. 5, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 514, + 287, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 287, + 546 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 287, + 546 + ], + "type": "text", + "content": "[11] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. Gram: Generative radiance manifolds for 3d-aware image generation. In CVPR, pages 10673-10683, 2022. 1, 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 547, + 287, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 547, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 48, + 547, + 287, + 579 + ], + "type": "text", + "content": "[12] Yuki Endo. User-controllable latent transformer for stylegan image layout editing. In Computer Graphics Forum, pages 395-406, 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 581, + 287, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 581, + 287, + 623 + ], + "spans": [ + { + "bbox": [ + 48, + 581, + 287, + 623 + ], + "type": "text", + "content": "[13] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. TOG, pages 1-13, 2022. 2, 3, 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 624, + 287, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 679 + ], + "type": "text", + "content": "[14] Stephan J Garbin, Marek Kowalski, Virginia Estellers, Stanislaw Szymanowicz, Shideh RezaEIFar, Jingjing Shen, Matthew Johnson, and Julien Valentin. Voltemorph: Realtime, controllable and generalisable animation of volumetric representations. arXiv preprint arXiv:2208.00949, 2022. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 681, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 287, + 713 + ], + "type": "text", + "content": "[15] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In ICLR, 2021. 1, 2, 3" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[16] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. Ganspace: Discovering interpretable gan controls. NeurIPS, pages 9841–9850, 2020. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 107, + 545, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 545, + 149 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 545, + 149 + ], + "type": "text", + "content": "[17] Clément Jambon, Bernhard Kerbl, Georgios Kopanas, Stavros Diolatzis, George Drettakis, and Thomas Leimkuhler. Nerfshop: Interactive editing of neural radiance fields. CGIT, 6(1), 2023. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 150, + 545, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 150, + 545, + 193 + ], + "spans": [ + { + "bbox": [ + 308, + 150, + 545, + 193 + ], + "type": "text", + "content": "[18] Kaiwen Jiang, Shu-Yu Chen, Feng-Lin Liu, Hongbo Fu, and Lin Gao. Nerfaceediting: Disentangled face editing in neural radiance fields. In SIGGRAPH Asia, pages 1-9, 2022. 1, 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 194, + 545, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 194, + 545, + 227 + ], + "spans": [ + { + "bbox": [ + 308, + 194, + 545, + 227 + ], + "type": "text", + "content": "[19] Yuming Jiang, Ziqi Huang, Xingang Pan, Chen Change Loy, and Ziwei Liu. Talk-to-edit: Fine-grained facial editing via dialog. In ICCV, pages 13799-13808, 2021. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 228, + 545, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 228, + 545, + 260 + ], + "spans": [ + { + "bbox": [ + 308, + 228, + 545, + 260 + ], + "type": "text", + "content": "[20] Wonjoon Jin, Nuri Ryu, Geonung Kim, Seung-Hwan Baek, and Sunghyun Cho. Dr. 3d: Adapting 3d gans to artistic drawings. In SIGGRAPH Asia, pages 1-8, 2022. 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 261, + 545, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 261, + 545, + 314 + ], + "spans": [ + { + "bbox": [ + 308, + 261, + 545, + 314 + ], + "type": "text", + "content": "[21] Kacper Kania, Stephan J Garbin, Andrea Tagliasacchi, Virginia Estellers, Kwang Moo Yi, Julien Valentin, Tomasz Trzciński, and Marek Kowalski. Blendfields: Few-shot example-driven facial modeling. In CVPR, pages 404-415, 2023. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 316, + 545, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 316, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 308, + 316, + 545, + 348 + ], + "type": "text", + "content": "[22] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019. 1, 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 350, + 545, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 350, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 308, + 350, + 545, + 392 + ], + "type": "text", + "content": "[23] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, pages 8110-8119, 2020. 1, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 393, + 545, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 393, + 545, + 436 + ], + "spans": [ + { + "bbox": [ + 308, + 393, + 545, + 436 + ], + "type": "text", + "content": "[24] Gwanghyun Kim and Se Young Chun. Datid-3d: Diversitypreserved domain adaptation using text-to-image diffusion for 3d generative model. In CVPR, pages 14203–14213, 2023. 3, 4" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 437, + 545, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 437, + 545, + 459 + ], + "spans": [ + { + "bbox": [ + 308, + 437, + 545, + 459 + ], + "type": "text", + "content": "[25] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015. 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 460, + 545, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 460, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 308, + 460, + 545, + 492 + ], + "type": "text", + "content": "[26] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV, pages 554–561, 2013. 8" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 493, + 545, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 493, + 545, + 525 + ], + "spans": [ + { + "bbox": [ + 308, + 493, + 545, + 525 + ], + "type": "text", + "content": "[27] Tianye Li, Timo Bolkart, Michael J Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4d scans. TOG, pages 194-1, 2017. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 526, + 545, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 526, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 308, + 526, + 545, + 569 + ], + "type": "text", + "content": "[28] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, pages 10012-10022, 2021. 6" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 570, + 545, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 570, + 545, + 613 + ], + "spans": [ + { + "bbox": [ + 308, + 570, + 545, + 613 + ], + "type": "text", + "content": "[29] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. In Seminal Graphics Papers: Pushing the Boundaries, Volume 2, pages 851-866. 2023. 3" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 614, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 614, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 308, + 614, + 545, + 669 + ], + "type": "text", + "content": "[30] Camillo Lugaresi, Jiuqiang Tang, Hadon Nash, Chris McClanahan, Esha Uboweja, Michael Hays, Fan Zhang, Chuoling Chang, Ming Guang Yong, Juhyun Lee, et al. Mediapipe: A framework for building perception pipelines. arXiv preprint arXiv:1906.08172, 2019. 5" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "text", + "content": "[31] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, pages 99-106, 2020. 2" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "924" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 286, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 72, + 286, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 286, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 286, + 106 + ], + "type": "text", + "content": "[32] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In CVPR, pages 11453–11464, 2021. 1, 2, 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 286, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 286, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 286, + 162 + ], + "type": "text", + "content": "[33] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In CVPR, pages 13503-13513, 2022. 1, 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 163, + 286, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 286, + 217 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 286, + 217 + ], + "type": "text", + "content": "[34] Xingang Pan, Ayush Tewari, Thomas Leimkuhler, Lingjie Liu, Abhinitra Meka, and Christian Theobalt. Drag your gan: Interactive point-based manipulation on the generative image manifold. In ASIGGRAPH, pages 1-11, 2023. 2, 3, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 219, + 286, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 219, + 286, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 219, + 286, + 262 + ], + "type": "text", + "content": "[35] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In ICCV, pages 5865-5874, 2021. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 264, + 286, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 264, + 286, + 306 + ], + "spans": [ + { + "bbox": [ + 48, + 264, + 286, + 306 + ], + "type": "text", + "content": "[36] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In CVPR, pages 2085–2094, 2021. 1, 2, 3, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 308, + 286, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 308, + 286, + 362 + ], + "spans": [ + { + "bbox": [ + 48, + 308, + 286, + 362 + ], + "type": "text", + "content": "[37] Yicong Peng, Yichao Yan, Shengqi Liu, Yuhao Cheng, Shanyan Guan, Bowen Pan, Guangtao Zhai, and Xiaokang Yang. Cagenerf: Cage-based neural radiance field for generalized 3d deformation and animation. NeurIPS, pages 31402-31415, 2022. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 365, + 286, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 286, + 419 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 286, + 419 + ], + "type": "text", + "content": "[38] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763, 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 421, + 286, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 421, + 286, + 463 + ], + "spans": [ + { + "bbox": [ + 48, + 421, + 286, + 463 + ], + "type": "text", + "content": "[39] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, pages 10684-10695, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 465, + 286, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 465, + 286, + 508 + ], + "spans": [ + { + "bbox": [ + 48, + 465, + 286, + 508 + ], + "type": "text", + "content": "[40] Shunsuke Saito, Jinlong Yang, Qianli Ma, and Michael J Black. Scintimate: Weakly supervised learning of skinned clothed avatar networks. In CVPR, pages 2886-2897, 2021. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 510, + 286, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 510, + 286, + 543 + ], + "spans": [ + { + "bbox": [ + 48, + 510, + 286, + 543 + ], + "type": "text", + "content": "[41] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In NIPS, 2020. 1, 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 544, + 286, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 286, + 576 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 286, + 576 + ], + "type": "text", + "content": "[42] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in gans. In CVPR, pages 1532-1540, 2021. 2, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 578, + 286, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 286, + 620 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 286, + 620 + ], + "type": "text", + "content": "[43] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. Interfacegan: Interpreting the disentangled face representation learned by gans. TPAMI, pages 2004-2018, 2020. 1, 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 624, + 286, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 286, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 286, + 656 + ], + "type": "text", + "content": "[44] Enis Simsar, Alessio Tonioni, Evin Pinar Ornek, and Federico Tombari. Latentswap3d: Semantic edits on 3d image gans. In ICCV, pages 2899-2909, 2023. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 658, + 286, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 286, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 286, + 689 + ], + "type": "text", + "content": "[45] Ivan Skorokhodov, Sergey Tulyakov, Yiqun Wang, and Peter Wonka. Epigraf: Rethinking training of 3d gans. NeurIPS, pages 24487-24501, 2022. 1, 2, 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 692, + 286, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 692, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 692, + 286, + 713 + ], + "type": "text", + "content": "[46] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled edit" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "ing for high-resolution 3d-aware portrait synthesis. ToG, pages 1-10, 2022. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 98, + 545, + 130 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 98, + 545, + 130 + ], + "spans": [ + { + "bbox": [ + 308, + 98, + 545, + 130 + ], + "type": "text", + "content": "[47] Jingxiang Sun, Xuan Wang, Yong Zhang, Xiaoyu Li, Qi Zhang, Yebin Liu, and Jue Wang. Fenerf: Face editing in neural radiance fields. In CVPR, pages 7672-7682, 2022. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 132, + 545, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 132, + 545, + 175 + ], + "spans": [ + { + "bbox": [ + 308, + 132, + 545, + 175 + ], + "type": "text", + "content": "[48] Jingxiang Sun, Xuan Wang, Lizhen Wang, Xiaoyu Li, Yong Zhang, Hongwen Zhang, and Yebin Liu. Next3d: Generative neural texture rasterization for 3d-aware head avatars. In CVPR, pages 20991-21002, 2023. 1, 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 177, + 545, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 177, + 545, + 231 + ], + "spans": [ + { + "bbox": [ + 308, + 177, + 545, + 231 + ], + "type": "text", + "content": "[49] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In CVPR, pages 6142-6151, 2020. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 234, + 545, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 234, + 545, + 266 + ], + "spans": [ + { + "bbox": [ + 308, + 234, + 545, + 266 + ], + "type": "text", + "content": "[50] Andrey Voynov and Artem Babenko. Unsupervised discovery of interpretable directions in the gan latent space. In ICML, pages 9786-9796, 2020. 2, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 269, + 545, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 269, + 545, + 322 + ], + "spans": [ + { + "bbox": [ + 308, + 269, + 545, + 322 + ], + "type": "text", + "content": "[51] Tengfei Wang, Bo Zhang, Ting Zhang, Shuyang Gu, Jianmin Bao, Tadas Baltrusaitis, Jingjing Shen, Dong Chen, Fang Wen, Qifeng Chen, et al. Rodin: A generative model for sculpting 3d digital avatars using diffusion. In CVPR, pages 4563-4573, 2023. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 325, + 545, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 325, + 545, + 369 + ], + "spans": [ + { + "bbox": [ + 308, + 325, + 545, + 369 + ], + "type": "text", + "content": "[52] Sijing Wu, Yichao Yan, Yunhao Li, Yuhao Cheng, Wenhan Zhu, Ke Gao, Xiaobo Li, and Guangtao Zhai. Ganhead: Towards generative animatable neural head avatars. In CVPR, pages 437-447, 2023. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 372, + 545, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 372, + 545, + 414 + ], + "spans": [ + { + "bbox": [ + 308, + 372, + 545, + 414 + ], + "type": "text", + "content": "[53] Jianfeng Xiang, Jiaolong Yang, Yu Deng, and Xin Tong. Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. In ICCV, pages 2195-2205, 2023. 1, 2, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 417, + 545, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 417, + 545, + 438 + ], + "spans": [ + { + "bbox": [ + 308, + 417, + 545, + 438 + ], + "type": "text", + "content": "[54] Tianhan Xu and Tatsuya Harada. Deforming radiance fields with cages. In ECCV, pages 159-175, 2022. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 441, + 545, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 441, + 545, + 473 + ], + "spans": [ + { + "bbox": [ + 308, + 441, + 545, + 473 + ], + "type": "text", + "content": "[55] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In CVPR, 2022. 1, 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 475, + 545, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 475, + 545, + 541 + ], + "spans": [ + { + "bbox": [ + 308, + 475, + 545, + 541 + ], + "type": "text", + "content": "[56] Yan Yichao, Cheng Yuhao, Chen Zhuo, Peng Yicong, Wu Sijing, Zhang Weitian, Li Junjie, Li Yixuan, Gao Jingnan, Zhang Weixia, Zhai Guangtao, and Yang Xiaokang. A survey on generative 3d digital humans based on neural networks: representation, rendering, and learning. _SCIENTIA SINICA Informationis_, pages 1858–, 2023. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 544, + 545, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 544, + 545, + 585 + ], + "spans": [ + { + "bbox": [ + 308, + 544, + 545, + 585 + ], + "type": "text", + "content": "[57] Yu-Jie Yuan, Yang-Tian Sun, Yu-Kun Lai, Yuewen Ma, Rongfei Jia, and Lin Gao. Nerf-editing: geometry editing of neural radiance fields. In CVPR, pages 18353-18364, 2022. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 589, + 545, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 589, + 545, + 632 + ], + "spans": [ + { + "bbox": [ + 308, + 589, + 545, + 632 + ], + "type": "text", + "content": "[58] Ziyang Yuan, Yiming Zhu, Yu Li, Hongyu Liu, and Chun Yuan. Make encoder great again in 3d gan inversion through geometry and occlusion-aware encoding. In ICCV, pages 2437-2447, 2023. 5" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 635, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 635, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 635, + 545, + 689 + ], + "type": "text", + "content": "[59] Chi Zhang, Yiwen Chen, Yijun Fu, Zhenglin Zhou, Gang Yu, Billzb Wang, Bin Fu, Tao Chen, Guosheng Lin, and Chunhua Shen. StyleAvatar3d: Leveraging image-text diffusion models for high-fidelity 3d avatar generation. arXiv preprint arXiv:2305.19012, 2023. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 692, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 692, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 692, + 545, + 713 + ], + "type": "text", + "content": "[60] Jianfeng Zhang, Zihang Jiang, Dingdong Yang, Hongyi Xu, Yichun Shi, Guoxian Song, Zhongcong Xu, Xinchao Wang," + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "text", + "content": "925" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 286, + 475 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 66, + 72, + 286, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 286, + 105 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 286, + 105 + ], + "type": "text", + "content": "and Jiashi Feng. Avatargen: a 3d generative model for animatable human avatars. In ECCV, pages 668-685. Springer, 2022. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 286, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 286, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 286, + 149 + ], + "type": "text", + "content": "[61] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, pages 586-595, 2018. 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 152, + 286, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 152, + 286, + 195 + ], + "spans": [ + { + "bbox": [ + 49, + 152, + 286, + 195 + ], + "type": "text", + "content": "[62] Xuanmeng Zhang, Jianfeng Zhang, Rohan Chacko, Hongyi Xu, Guoxian Song, Yi Yang, and Jiashi Feng. Getavatar: Generative textured meshes for animatable human avatars. In ICCV, pages 2273-2282, 2023. 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 197, + 286, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 197, + 286, + 239 + ], + "spans": [ + { + "bbox": [ + 49, + 197, + 286, + 239 + ], + "type": "text", + "content": "[63] Yufeng Zheng, Victoria Fernández Abrevaya, Marcel C Bühler, Xu Chen, Michael J Black, and Otmar Hilliges. Im avatar: Implicit morphable head avatars from videos. In CVPR, pages 13545-13555, 2022. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 241, + 286, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 241, + 286, + 284 + ], + "spans": [ + { + "bbox": [ + 49, + 241, + 286, + 284 + ], + "type": "text", + "content": "[64] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 1, 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 286, + 286, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 286, + 286, + 319 + ], + "spans": [ + { + "bbox": [ + 49, + 286, + 286, + 319 + ], + "type": "text", + "content": "[65] Jiapeng Zhu, Ruili Feng, Yujun Shen, Deli Zhao, Zheng-Jun Zha, Jingren Zhou, and Qifeng Chen. Low-rank subspaces in gans. NeurIPS, pages 16648-16658, 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 320, + 286, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 320, + 286, + 352 + ], + "spans": [ + { + "bbox": [ + 49, + 320, + 286, + 352 + ], + "type": "text", + "content": "[66] Jiapeng Zhu, Yujun Shen, Yinghao Xu, Deli Zhao, and Qifeng Chen. Region-based semantic factorization in gans. In ICML, pages 27612-27632, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 354, + 286, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 354, + 286, + 396 + ], + "spans": [ + { + "bbox": [ + 49, + 354, + 286, + 396 + ], + "type": "text", + "content": "[67] Jiapeng Zhu, Ceyuan Yang, Yujun Shen, Zifan Shi, Bo Dai, Deli Zhao, and Qifeng Chen. Linkgan: Linking gan latents to pixels for controllable image synthesis. In ICCV, pages 7656-7666, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 399, + 286, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 399, + 286, + 430 + ], + "spans": [ + { + "bbox": [ + 49, + 399, + 286, + 430 + ], + "type": "text", + "content": "[68] Peihao Zhu, Rameen Abdal, Yipeng Qin, and Peter Wonka. Sean: Image synthesis with semantic region-adaptive normalization. In CVPR, pages 5104-5113, 2020. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 433, + 286, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 433, + 286, + 475 + ], + "spans": [ + { + "bbox": [ + 49, + 433, + 286, + 475 + ], + "type": "text", + "content": "[69] Peiye Zhuang, Oluwasanmi Koyejo, and Alexander G Schwing. Enjoy your editing: Controllable gans for image editing via latent space navigation. arXiv preprint arXiv:2102.01187, 2021.3" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "926" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D-LFM_ Lifting Foundation Model/5d227142-e6b0-440e-bad4-facab1940a16_content_list.json b/2024/3D-LFM_ Lifting Foundation Model/5d227142-e6b0-440e-bad4-facab1940a16_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..eaf516749958da545c19ff72558b13386c72a2b2 --- /dev/null +++ b/2024/3D-LFM_ Lifting Foundation Model/5d227142-e6b0-440e-bad4-facab1940a16_content_list.json @@ -0,0 +1,1517 @@ +[ + { + "type": "text", + "text": "3D-LFM: Lifting Foundation Model", + "text_level": 1, + "bbox": [ + 300, + 130, + 669, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mosam Dabhi1", + "bbox": [ + 266, + 179, + 390, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "László A. Jeni $^{1*}$", + "bbox": [ + 426, + 180, + 555, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Simon Lucey $^{2*}$", + "bbox": [ + 584, + 180, + 707, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Carnegie Mellon University", + "bbox": [ + 240, + 204, + 470, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2The University of Adelaide", + "bbox": [ + 504, + 204, + 728, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3dlfm.github.io", + "bbox": [ + 413, + 232, + 550, + 246 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a0af2091a3560911c3122fdad21a8a6e74a174a7bfb69e66fec5e5735c5b7729.jpg", + "image_caption": [ + "(a) Unified 2D-3D lifting for $30+$ categories.", + "(b) Dataset diversity visualization." + ], + "image_footnote": [], + "bbox": [ + 81, + 257, + 643, + 502 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a0bb2bcd1603376f67a0063ab026a40954d20b0e3bc36cb2befe1938d3dc99e1.jpg", + "image_caption": [ + "Figure 1. Overview: (a) This figure shows the 3D-LFM's ability in lifting 2D landmarks into 3D structures across an array of over 30 diverse categories, from human body parts, to a plethora of animals and everyday common objects. The lower portion shows the actual 3D reconstructions by our model, with red lines representing the ground truth and blue lines showing the 3D-LFM's predictions. (b) This figure displays the model's training data distribution on a logarithmic scale, highlighting that inspite of 3D-LFM being trained on imbalanced datasets, it preserves the performance across individual categories." + ], + "image_footnote": [], + "bbox": [ + 656, + 297, + 888, + 467 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 614, + 313, + 630 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The lifting of a 3D structure and camera from 2D landmarks is at the cornerstone of the discipline of computer vision. Traditional methods have been confined to specific rigid objects, such as those in Perspective-n-Point (PnP) problems, but deep learning has expanded our capability to reconstruct a wide range of object classes (e.g. C3DPO [18] and PAUL [24]) with resilience to noise, occlusions, and perspective distortions. However, all these techniques have been limited by the fundamental need to establish correspondences across the 3D training data, significantly limiting their utility to applications where one has an abundance of \"in-correspondence\" 3D data. Our approach harnesses the inherent permutation equivariance of transformers to manage varying numbers of points per 3D data instance, withstand occlusions, and generalizes", + "bbox": [ + 75, + 646, + 473, + 875 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "to unseen categories. We demonstrate state-of-the-art performance across 2D-3D lifting task benchmarks. Since our approach can be trained across such a broad class of structures, we refer to it simply as a 3D Lifting Foundation Model (3D-LFM) – the first of its kind.", + "bbox": [ + 496, + 616, + 893, + 693 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 709, + 632, + 724 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lifting 2D landmarks from a single-view RGB image into 3D has long posed a complex challenge in the field of computer vision because of the ill-posed nature of the problem. This task is important for a range of applications from augmented reality to robotics, and requires an understanding of non-rigid spatial geometry and accurate object descriptions [2, 11, 25]. Historically, efforts in single-frame 2D-3D lifting have encountered significant hurdles: reliance on object-specific models, poor scalability, and limited adaptability to diverse and complex object categories. Traditional methods, while advancing in specific domains like human", + "bbox": [ + 496, + 733, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Both authors advised equally.", + "bbox": [ + 94, + 883, + 259, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "10466", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "body [14, 16, 31] or hand modeling [3, 6], often fail when faced with the complexities of varying object types or object rigs (skeleton placements).", + "bbox": [ + 75, + 90, + 468, + 135 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To facilitate such single-frame 2D-3D lifting, deep learning methods like C3DPO [18] and others [8, 11, 24, 25, 28] have recently been developed. However, these methods are fundamentally limited in that they must have knowledge of the object category and how the 2D landmarks correspond semantically to the 2D/3D data it was trained upon. Further, this represents a drawback, especially when considering their scaling up to dozens or even hundreds of object categories, with varying numbers of landmarks and configurations. This paper marks a departure from such correspondence constraints, introducing the 3D Lifting Foundation Model (3D-LFM), an object-agnostic single frame 2D-3D lifting approach. At its core, 3D-LFM addresses the limitation of previous models, which is the inability to efficiently handle a wide array of object categories while maintaining high fidelity in 3D keypoint lifting from 2D data. We propose a solution rooted in the concept of permutation equivariance, a property that allows our model to autonomously establish correspondences among diverse sets of input 2D keypoints.", + "bbox": [ + 75, + 136, + 468, + 436 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D-LFM is capable of performing single frame 2D-3D lifting for $30+$ categories using a single model simultaneously, covering everything from human forms [9, 15, 32], face [29], hands [17], and animal species [1, 10, 27], to a plethora of inanimate objects found in everyday scenarios such as cars, furniture, etc. [26]. Importantly, 3D-LFM is inherently scalable, poised to expand to hundreds of categories and improve performance, especially in out-of-distribution or less-represented areas, showcasing its broad utility in 3D lifting tasks. 3D-LFM is able to achieve 2D-3D lifting performance that matches those of leading methods specifically optimized for individual categories. The generalizability of 3D LFM is further evident in its ability to handle out-of-distribution (OOD) object categories and rigs, which we refer to as OOD 2D-3D lifting, where the task is to lift the 2D landmarks to 3D for a category never seen during training. We show such OOD results: (1) for inanimate objects - by holding out an object category within the PASCAL dataset, (2) for animals - by training on common object categories such as dogs and cats found in [27] and reconstructing 3D for unseen and rare species of Cheetahs found in [10] and in-the-wild zoo captures from [5], and (3) by showing rig transfer, i.e. training 2D to 3D lifting on a Human3.6M dataset rig [7] and showing similar 2D to 3D lifting performance on previously unseen rigs such as those found in Panoptic studio dataasset rig [9] or a COCO dataset rig [13]. 3D-LFM transfers learnings from seen data during training to unseen OOD data during inference. It does so by learning general structural features during the training phase via the proposed permutation equivariance properties", + "bbox": [ + 75, + 438, + 470, + 891 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "and specific design choices that we discuss in the following sections.", + "bbox": [ + 498, + 90, + 890, + 119 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recognizing the important role geometry plays in 3D reconstruction [4, 5, 11, 18, 24, 25], we integrate Procrustean methods such as Orthographic-N-Point (OnP) or Perspective-N-Point (PnP) to direct the model's focus on deformable aspects within a canonical frame. This incorporation significantly reduces the computational burden on the model, freeing it from learning redundant rigid rotations and focusing its capabilities on capturing the true geometric essence of objects. Scalability, a critical aspect of our model, is addressed through the use of tokenized positional encoding (TPE), which, when combined with graph-based transformer architecture, not only enhances the model's adaptability across diverse categories but also strengthens its ability to handle multiple categories with different number of keypoints and configurations. Finally, the use of skeleton information (joint connectivity) within the graph-based transformers via adjacency matrices provides strong clues about joint proximity and inherent connectivity, aiding in the handling of correspondences across varied object categories.", + "bbox": [ + 496, + 121, + 890, + 421 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To the best of our knowledge, 3D-LFM is one of the only known work which is a unified model capable of doing 2D-3D lifting for $30+$ (and potentially even more) categories simultaneously. Its ability to perform unified learning across a vast spectrum of object categories without specific object information and its handling of OOD scenarios highlight its potential as one of the first models capable of serving as a 2D-3D lifting foundation model.", + "bbox": [ + 496, + 422, + 890, + 542 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The contributions of this paper are threefold:", + "bbox": [ + 500, + 544, + 795, + 558 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. We propose a Procrustean transformer that is able to focus solely on learning the deformable aspects of objects within a single canonical frame whilst preserving permutation equivariance across 2D landmarks.", + "2. The integration of tokenized positional encoding within the graph-based transformer, to enhance our approach's scalability and its capacity to handle diverse and imbalanced datasets.", + "3. We demonstrate that 3D-LFM surpasses state-of-the-art methods in categories such as humans, hands, and faces (benchmark in [32]). Additionally, it shows robust generalization by handling previously unseen objects and configurations, including animals ([5, 10]), inanimate objects ([26]), and novel object arrangements (rig transfer in [9])" + ], + "bbox": [ + 500, + 559, + 890, + 784 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In subsequent sections, we explore the design and methodology of our proposed 3D-LFM architecture, including detailed ablation experiments and comparative analyses. Throughout this paper, 'keypoints', 'landmarks', and 'joints' are used interchangeably, referring to specific, identifiable points or locations on an object or figure that are crucial for understanding its structure and geometry.", + "bbox": [ + 496, + 786, + 890, + 891 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "10467", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/a58284baea3895dbf1f68554ecc0f1b300ccd78394e37597d5bc76f25601e114.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 93, + 808, + 224 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/fe824da252f65bd6270540aea6dbb12ff9392c0addeca4c929282512092910f2.jpg", + "image_caption": [ + "Figure 2. Overview of the 3D Lifting Foundation Model (3D-LFM) architecture: The process begins with the input 2D keypoints undergoing Token Positional Encoding (TPE) before being processed by a series of graph-based transformer layers. The resulting features are then decoded through an MLP into a canonical 3D shape. This shape is aligned to the ground truth (G.T. 3D) in the reference frame using a Procrustean method, with the Mean Squared Error (MSE) loss computed to guide the learning. The architecture captures both local and global contextual information, focusing on deformable structures while minimizing computational complexity." + ], + "image_footnote": [], + "bbox": [ + 189, + 232, + 779, + 364 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Related works", + "text_level": 1, + "bbox": [ + 75, + 478, + 220, + 492 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The field of 2D-3D lifting has evolved substantially from classic works such as those based on Perspective-n-Point (PnP) algorithms [12]. In these early works, the algorithm was given a set of 2D landmarks and some 3D supervision, namely the known 3D rigid object. The field has since witnessed a paradigm shift with the introduction of deep learning methodologies, led by methods such as C3DPO [18], PAUL [24], and Deep NRSfM [11], along with recent transformer-based innovations such as NRSfM-Former [8]. In these approaches one does not need knowledge of the specific 3D object, instead it can get away with just the 2D landmarks and correspondences to an ensemble of 2D/3D data from the object category to be lifted. However, despite their recent success, all these methods still require that the 2D/3D data be in semantic correspondence. That is, the index to a specific landmark has the same semantic meaning across all instances (e.g. chair leg). In practice, this is quite limiting at run-time, as one needs intimate knowledge of the object category, and rig in order to apply any of these current methods. Further, this dramatically limits the ability of these methods to leverage cross-object and cross-rig datasets, prohibiting the construction of a truly generalizable 2D to 3D lifting foundation model – a topic of central focus in this paper.", + "bbox": [ + 73, + 502, + 468, + 864 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recent literature in pose estimation, loosely connected to NRSfM but often more specialized towards human and", + "bbox": [ + 75, + 866, + 468, + 895 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "animal body parts, has also seen remarkable progress. Models such as Jointformer [14] and SimpleBaseline [16] have refined the single-frame 2D-3D lifting process, while generative approaches like MotionCLIP [19] and Human Motion Diffusion Model [20] have laid the groundwork for 3D generative motion-based foundation models. These approaches, however, are even more limiting than C3PDO, PAUL, etc. in that they are intimately wedded to the object class and are not easily extendable to an arbitrary object class.", + "bbox": [ + 496, + 479, + 890, + 628 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Approach", + "text_level": 1, + "bbox": [ + 500, + 645, + 607, + 662 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a set of 2D keypoints representing the projection of an object's joints in an image, we denote the keypoints matrix as $\\mathbf{W} \\in \\mathbb{R}^{N \\times 2}$ , where $N$ is the predetermined maximum number of joints considered across all object categories. For objects with joints count less than $N$ , we introduce a masking mechanism that utilizes a binary mask matrix $\\mathbf{M} \\in \\{0,1\\}^N$ , where each element $m_i$ of $\\mathbf{M}$ is defined as:", + "bbox": [ + 496, + 670, + 890, + 790 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nm _ {i} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f j o i n t} i \\text {i s p r e s e n t} \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 591, + 804, + 890, + 845 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The 3D lifting function $f: \\mathbb{R}^{N \\times 2} \\to \\mathbb{R}^{N \\times 3}$ maps the 2D keypoints to their corresponding 3D structure while compensating for the projection:", + "bbox": [ + 496, + 854, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "10468", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {S} = f (\\mathbf {W}) = \\mathbf {W} \\mathbf {R} ^ {\\top} + \\mathbf {b} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 104, + 468, + 122 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{R} \\in \\mathbb{R}^{3 \\times 3}$ is the projection matrix (assumed either weak-perspective or orthographic) and $\\mathbf{b} \\in \\mathbb{R}^{N \\times 3}$ is a bias term that aligns the centroids of 2D and 3D keypoints.", + "bbox": [ + 76, + 128, + 468, + 172 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Permutation Equivariance: To ensure scalability and adaptability across a diverse set of objects, we leverage the property of permutation equivariance inherent in transformer architectures. Permutation equivariance allows the model to process input keypoints $\\mathbf{W}$ regardless of their order, a critical feature for handling objects with varying joint configurations:", + "bbox": [ + 75, + 174, + 470, + 279 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf (\\mathcal {P} \\mathbf {W}) = \\mathcal {P} f (\\mathbf {W})\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 287, + 341, + 304 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{P}$ is a permutation matrix that reorders the keypoints. Handling Missing Data: To address the challenge of missing data, we refer the Deep NRSfM++ [25] work and use a masking mechanism to accommodate for occlusions or absences of keypoints. Our binary mask matrix $\\mathbf{M} \\in \\{0,1\\}^N$ is applied in such a way that it not only pads the input data to a consistent size but also masks out missing or occluded points: $\\mathbf{W}_m = \\mathbf{W} \\odot \\mathbf{M}$ , where $\\odot$ denotes element-wise multiplication. To remove the effects of translation and ensure that our TPE features are generalizable, we zero-center the data by subtracting the mean of the visible keypoints:", + "bbox": [ + 75, + 310, + 468, + 476 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {W} _ {c} = \\mathbf {W} _ {m} - \\operatorname {m e a n} \\left(\\mathbf {W} _ {m}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 484, + 468, + 501 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We scale the zero-centered data to the range $[-1, 1]$ while preserving the aspect ratio to maintain the geometric integrity of the keypoints. For more details on handling missing data in the presence of perspective effects, we refer the reader to Deep NRSFM++[25].", + "bbox": [ + 75, + 507, + 468, + 583 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Token Positional Encoding: replaces the traditional Correspondence Positional Encoding (CPE) or Joint Embedding which encodes the semantic correspondence information (as used in works such as like [14, 31]) with a mechanism that does not require explicit correspondence or semantic information. Owing to the success of per-point positional embedding, particularly random Fourier features [30] in handling OOD data, we compute Token Positional Encoding (TPE) using analytical Random Fourier features (RFF) as follows:", + "bbox": [ + 75, + 583, + 468, + 733 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {T P E} \\left(\\mathbf {W} _ {c}\\right) = \\sqrt {\\frac {2}{D}} \\left[ \\sin \\left(\\mathbf {W} _ {c} \\boldsymbol {\\omega} + b\\right); \\cos \\left(\\mathbf {W} _ {c} \\boldsymbol {\\omega} + b\\right) \\right] \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 93, + 750, + 468, + 799 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $D$ is the dimensionality of the Fourier feature space, $\\pmb{\\omega} \\in \\mathbb{R}^{2 \\times \\frac{D}{2}}$ and $\\mathbf{b} \\in \\mathbb{R}^{\\frac{D}{2}}$ are parameters sampled from a normal distribution, scaled appropriately. These parameters are sampled once and kept fixed, as per the RFF methodology. The output of this transformation $\\mathbf{TPE}(\\mathbf{W}_c)$ is then fed into the graph-based transformer network as $\\mathbf{X}^\\ell$ where", + "bbox": [ + 75, + 809, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\ell$ indicates the layer number (0 in the above case). This set of features is now ready for processing inside the graph-based transformer layers without the need for correspondence among the input keypoints. The TPE retains the property of permutation equivariance while implicitly encoding the relative positions of the keypoints.", + "bbox": [ + 496, + 90, + 893, + 181 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Graph-based Transformer Architecture", + "text_level": 1, + "bbox": [ + 498, + 191, + 841, + 207 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our graph-based transformer architecture utilizes a hybrid approach to feature aggregation by combining graph-based local attention [22](L) with global self-attention mechanisms [21](G) within a single layer (shown as grey block in Fig. 2. This layer is replicated $L$ times, providing a sequential refinement of the feature representation across the network's depth.", + "bbox": [ + 496, + 213, + 890, + 319 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Hybrid Feature Aggregation: For each layer $\\ell$ , ranging from 0 to $L$ , the feature matrix $\\mathbf{X}^{(\\ell)} \\in \\mathbb{R}^{N \\times D}$ is augmented through simultaneous local and global processing. The local processing component, $\\mathrm{GA}(\\mathbf{X}^{(\\ell)}, \\mathbf{A})$ , leverages an adjacency matrix $\\mathbf{A}$ , which encodes the connectivity based on the object category, to perform graph-based attention on batches of nodes representing the input 2D data:", + "bbox": [ + 496, + 319, + 890, + 425 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {L} ^ {(\\ell)} = \\mathrm {G A} (\\mathbf {X} ^ {(\\ell)}, \\mathbf {A}), \\\\ \\mathbf {G} ^ {(\\ell)} = \\operatorname {M H S A} (\\mathbf {X} ^ {(\\ell)}) \\tag {5} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 622, + 435, + 890, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Local and global features are concatenated to form a unified representation $\\mathbf{U}^{(\\ell)}$ :", + "bbox": [ + 498, + 484, + 890, + 515 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {U} ^ {(\\ell)} = \\operatorname {c o n c a t} \\left(\\mathbf {L} ^ {(\\ell)}, \\mathbf {G} ^ {(\\ell)}\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 598, + 527, + 890, + 547 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Following the concatenation, each layer applies a normalization(LN) and a multilayer perceptron (MLP). The MLP employs a Gaussian Error Linear Unit (GeLU) as the nonlinearity function to enhance the model's expressive power", + "bbox": [ + 496, + 553, + 890, + 614 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {X} ^ {\\prime (\\ell)} = \\mathrm {L N} \\left(\\mathbf {U} ^ {(\\ell)}\\right) + \\mathbf {U} ^ {(\\ell)}, \\\\ \\mathbf {X} ^ {(\\ell + 1)} = \\operatorname {L N} \\left(\\operatorname {M L P} _ {-} \\operatorname {G e L U} \\left(\\mathbf {X} ^ {\\prime (\\ell)}\\right)\\right) + \\mathbf {X} ^ {\\prime (\\ell)} \\tag {7} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 636, + 890, + 676 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Here, GA represents Graph Attention, MHSA denotes Multi-Head Self-Attention, and MLP_GeLU indicates our MLP with GeLU nonlinearity. This architecture is designed to learn patterns in 2D data by considering both the local neighborhood connectivity of input 2D and the global data context of input 2D, which is important for robust 2D to 3D structure lifting.", + "bbox": [ + 496, + 686, + 890, + 792 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Procrustean Alignment", + "text_level": 1, + "bbox": [ + 500, + 801, + 715, + 816 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The final operation in our pipeline decodes the latent feature representation $\\mathbf{X}^{(L)}$ into the predicted canonical structure $\\mathbf{S}_c$ via a GeLU-activated MLP:", + "bbox": [ + 496, + 825, + 890, + 869 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {S} _ {c} = \\operatorname {M L P} _ {\\text {s h a p e . d e c o d e r}} (\\mathbf {X} ^ {(L)})\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 883, + 789, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "10469", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Subsequently, we align $\\mathbf{S}_c$ with the ground truth $\\mathbf{S}_r$ , via a Procrustean alignment method that optimizes for the rotation matrix $\\mathbf{R}$ . The alignment is formalized as a minimization problem:", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {\\mathbf {R}} {\\text {m i n i m i z e}} \\quad | | \\mathbf {M} \\odot (\\mathbf {S} _ {r} - \\mathbf {S} _ {c} \\mathbf {R}) | | _ {F} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 165, + 387, + 188 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{M}$ is a binary mask applied element-wise, and $||\\cdot ||_F$ denotes the Frobenius norm. The optimal $\\mathbf{R}$ is obtained via SVD, which ensures the orthonormality constraint of the rotation matrix:", + "bbox": [ + 75, + 194, + 468, + 253 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {U}, \\boldsymbol {\\Sigma}, \\mathbf {V} ^ {\\top} = \\operatorname {S V D} ((\\mathbf {M} \\odot \\mathbf {S} _ {c}) ^ {\\top} \\mathbf {S} _ {r}), \\quad \\mathbf {R} = \\mathbf {U V} ^ {\\top}\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 280, + 441, + 299 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The predicted shape is then scaled relative to the reference shape $\\mathbf{S}_r$ , resulting in a scale factor $\\gamma$ , which yields the final predicted shape $\\mathbf{S}_p$ :", + "bbox": [ + 75, + 310, + 468, + 356 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {S} _ {p} = \\boldsymbol {\\gamma} \\cdot (\\mathbf {S} _ {c} \\mathbf {R})\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 369, + 326, + 387 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This Procrustean alignment step is crucial for directing the model's focus on learning non-rigid shape deformations over rigid body dynamics, thus significantly enhancing the model's ability to capture the true geometric essence of objects by just focusing on core deformable (non-rigid) aspects. The effectiveness of this approach is confirmed by faster convergence and reduced error rates in our experiments, as detailed in Fig. 6. These findings align with the findings presented in PAUL [24].", + "bbox": [ + 75, + 393, + 468, + 529 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Loss Function", + "text_level": 1, + "bbox": [ + 76, + 539, + 220, + 553 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The optimization of our model relies on the Mean Squared Error (MSE) loss, which calculates the difference between predicted 3D points $\\mathbf{S}_p$ and the ground truth $\\mathbf{S}_r$ :", + "bbox": [ + 75, + 561, + 468, + 608 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {M S E}} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\| \\mathbf {S} _ {p} ^ {(i)} - \\mathbf {S} _ {r} ^ {(i)} \\right\\| ^ {2} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 618, + 468, + 659 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Minimizing this loss across $N$ points ensures the model's ability in reconstructing accurate 3D shapes from input 2D landmarks. This minimization effectively calibrates the shape decoder and the Procrustean alignment to focus on the essential non-rigid characteristics of the objects, helping the accuracy of the 2D to 3D lifting process.", + "bbox": [ + 75, + 665, + 468, + 756 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Results and Comparative Analysis", + "text_level": 1, + "bbox": [ + 75, + 768, + 387, + 787 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our evaluation shows the 3D Lifting Foundation Model (3D-LFM)'s capability in single-frame 2D-3D lifting across diverse object categories without object-specific data in Sec. 4.1. Following that, Sec. 4.2 highlights 3D-LFM's performance over specialized methods, especially achieving state-of-the-art performance in whole-body benchmarks[32] (Fig. 4). Additionally, Sec. 4.3 shows", + "bbox": [ + 75, + 794, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3D-LFM's capability in 2D-3D lifting across 30 categories using a single unified model, enhancing category-specific performance and achieving out-of-distribution (OOD) generalization for unseen object configurations during training. In conclusion, the ablation studies in Section 4.4 validate our proposed procrustean approach, token positional encoding, and the local-global hybrid attention mechanism in the transformer model, confirming their role in 3D-LFM's effectiveness in both single- and multiple-object scenarios.", + "bbox": [ + 496, + 90, + 890, + 227 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Multi-Object 3D Reconstruction", + "text_level": 1, + "bbox": [ + 498, + 287, + 784, + 304 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Clarifying naming convention: In 'object-specific' versus 'object-agnostic', our primary focus in this naming is on the distinction in training methods. Here, object-specific training involves supplying semantic details for each object, leading to isolated training. Conversely, object-agnostic training combines various categories without explicit landmark semantics, leading to combined training.", + "bbox": [ + 496, + 311, + 890, + 417 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Experiment Rationale: 3D-LFM leverages permutation equivariance to accurately lift 2D keypoints into 3D structures across diverse categories, outperforming fixed-array methods by adapting flexibly to variable keypoint configurations. It has been evaluated against non-rigid structure-from-motion approaches [11, 18, 24, 25] that require object-specific inputs, showing its ability to handle diverse categories. For a comprehensive benchmark, we utilize the PASCAL3D+ dataset [26], following C3DPO's [18] methodology, to include a variety of object categories.", + "bbox": [ + 496, + 417, + 890, + 568 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Performance: We benchmark 3D-LFM against the notable NRSfM method, C3DPO [18], for multi-object 2D to 3D lifting with 3D supervision. C3DPO, similar to other contemporary methods [11, 24, 25, 28] requiring object-specific details, serves as an apt comparison due to its multi-category approach. Initially replicating conditions with object-specific information, 3D-LFM matches C3DPO's performance, as demonstrated in Fig. 3. This stage uses MPJPE to measure 3D lifting accuracy, with C3DPO's training setup including an $MN$ dimensional array for object details where $M$ represents number of objects with $N$ being maximum number of keypoints, and our model is trained separately on each object to avoid providing object-specific information. The 3D-LFM's strength emerges when object-specific data is withheld. While C3DPO shows a decline without such data, 3D-LFM maintains a lower MPJPE across categories, even when trained collectively across categories using only an $N$ dimensional array. These findings (Fig. 3) highlights 3D-LFM's capabilities, outperforming single-category training and demonstrating its potential as a generalized 2D to 3D lifting solution.", + "bbox": [ + 496, + 569, + 890, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "10470", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1b39639aa55d9a08205ddba20f9f5b93d226a5c41f43b59e0ae37b4613d61f62.jpg", + "image_caption": [ + "Figure 3. 3D-LFM vs. C3DPO Performance: MPJPE comparisons using the PASCAL3D+ dataset, this figure demonstrates our model's adaptability in the absence of object-specific information, contrasting with C3DPO's increased error under the same conditions. The analysis confirms 3D-LFM's superiority across diverse object categories, reinforcing its potential for generalized 2D to 3D lifting." + ], + "image_footnote": [], + "bbox": [ + 81, + 88, + 464, + 272 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/10fe15f317e7bbc850283cacd33d50076523f63c805a439028b13cdea54489fd.jpg", + "table_caption": [ + "Table 1. Quantitative performance on H3WB: Our method demonstrates leading performance across multiple object categories without the need for object-specific designs." + ], + "table_footnote": [], + "table_body": "
MethodWhole-bodyBodyFace/AlignedHand/Aligned
SimpleBaseline125.4125.7115.9 / 24.6140.7 / 42.5
CanonPose w/3D sv.117.7117.5112.0 / 17.9126.9 / 38.3
Large SimpleBaseline112.3112.6110.6 / 14.6114.8 / 31.7
Jointformer (extra data)81.57860.4 / 16.2117.6 / 38.8
Jointformer88.384.966.5 / 17.8125.3 / 43.7
Ours64.1360.8356.55 / 10.4478.21 / 28.22
Ours – PA33.1339.366.0213.56
", + "bbox": [ + 76, + 472, + 468, + 575 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Benchmark: Object-Specific Models", + "text_level": 1, + "bbox": [ + 75, + 619, + 393, + 636 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Next, we benchmark 3D-LFM against leading specialized methods for human body, face, and hands categories. Our model outperforms these specialized methods, showing multi-category learning without the need for category (landmark) semantics. For this study, we evaluate on H3WB dataset [32], a recent benchmark for diverse whole-body pose estimation tasks. This dataset is valuable for its inclusion of multiple object categories and for providing a comparative baseline against methods such as Jointformer [14], SimpleBaseline [16], and CanonPose [23]. Following H3WB's recommended 5-fold cross-validation and submitting the evaluations to benchmark's authors, we report results on the hidden test set. The results shown in Fig. 4 and Table 1 include PA-MPJPE and MPJPE, with test set performance numbers provided directly by the H3WB team, ensuring that our results are verified by an independent third-party.", + "bbox": [ + 73, + 643, + 468, + 902 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b43f2c5b144c87bfb124cc4c794454de53e59e397131c8933800fc05d04661ba.jpg", + "image_caption": [ + "Figure 4. Performance Comparison on H3WB Benchmark: This chart contrasts MPJPE errors for whole-body, body, face, aligned face, hand, and aligned hand categories within the H3WB benchmark [32]. Our models, with and without Procrustes Alignment (Ours-PA), outperform current state-of-the-art (SOTA) methods, validating our approach's proficiency in 2D to 3D lifting tasks." + ], + "image_footnote": [], + "bbox": [ + 504, + 92, + 885, + 262 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Towards foundation model", + "text_level": 1, + "bbox": [ + 500, + 436, + 741, + 450 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we highlight 3D-LFM's role as a foundational model for varied 2D-3D lifting, capable in managing multiple object types and data imbalances. In this subsection, we explore 3D-LFM's scalability for collective dataset training (Sec.4.3.1), its generalization to new categories and rig transfer capabilities (Sec.4.3.2). These studies validate the 3D-LFM's role as a foundation model, capable at leveraging diverse data without requiring specific configurations, thus simplifying the 3D lifting process for varied joint setups.", + "bbox": [ + 496, + 458, + 890, + 609 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We start this investigation by showing the capability 3D-LFM in handling 2D-3D lifting for $30+$ object categories within the single model, confirming the model's capability to manage imbalanced datasets representative of real-world scenarios as shown in Fig. 1. With a comprehensive range of human, hand, face, inanimate objects, and animal datasets, the 3D-LFM is proven to be scalable, without requiring category-specific adjustments. The subsequent subsections will dissect these attributes further, discussing the 3D-LFM's foundational potential in the 3D lifting domain.", + "bbox": [ + 496, + 609, + 890, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3.1 Combined Dataset Training", + "text_level": 1, + "bbox": [ + 500, + 770, + 748, + 786 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This study evaluates the 3D-LFM's performance on isolated datasets against its performance on a combined dataset. Initially, the model was trained separately on animal-based supercategory datasets: specifically OpenMonkey[1] and Animals3D[27]. Subsequently, it was trained on a merged dataset containing a broad spectrum of object categories, including Human Body-Based datasets such as AMASS [15]", + "bbox": [ + 496, + 794, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "10471", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/493d113d247da95fd6c162af320da3f6562d007a8a6162f4775ca6f5317c1938.jpg", + "table_caption": [ + "Table 2. Quantitative evaluation for OOD scenarios." + ], + "table_footnote": [], + "table_body": "
CategoryOOD (mm)In-Dist. (mm)
Cheetah26.5910.16
Train6.885.71
Chimpanzee52.0542.65
", + "bbox": [ + 122, + 114, + 419, + 191 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "and Human 3.6 [7], Hand-Based datasets such as PanOptic Hands [9], Face-Based datasets like BP4D+[29], and various Inanimate Objects from the PASCAL3D+ dataset[26], along with previously mentioned animal datasets. Isolated training resulted in an average MPJPE of $21.22 \\, \\text{mm}$ , while the combined training method significantly reduced MPJPE to $12.5 \\, \\text{mm}$ on the same animal supercategory validation split. This improvement confirms the potential of 3D-LFM as a pre-training framework and underscores its ability to adapt and generalize from diverse and extensive data collections.", + "bbox": [ + 75, + 218, + 468, + 382 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Dataset Selection Rationale: We selected animal-based supercategory datasets to demonstrate combined training's impact on underrepresented categories. We observed greater performance improvements in smaller, unbalanced datasets (as exemplified by PASCAL3D+: from 4.31 mm to 1.1 mm and OpenMonkey: from 19.45 mm to 9.59 mm) compared to larger datasets with sufficient balance among categories. Consequently, we see minimal gains in more balanced, larger datasets like AMASS (from 1.67 mm to 1.66 mm), underscoring the utility of combined training for enhancing performance in underrepresented and long-tail categories.", + "bbox": [ + 75, + 383, + 468, + 565 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3.2 OOD generalization and rig-transfer:", + "text_level": 1, + "bbox": [ + 76, + 574, + 390, + 589 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We evaluate 3D-LFM's generalization to unseen object categories and rig configurations. Its accuracy is highlighted by successful 2D-3D lifting reconstructions of the \"Cheetah\" from Acinoset [10], which is not included in the typical Animal3D dataset [27], and the \"Train\" category from PASCAL3D+[26], absent during training. Qualitative reconstructions are shown in Fig. 5, along with the quantitative results in Tab.2 for above categories as well as in-the-wild category like a Chimpanzee from the MBW dataset [5] - which illustrates model's strong OOD generalization and capability to handle in-the-wild data.", + "bbox": [ + 75, + 598, + 468, + 763 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Additionally, we show 3D-LFM's capability in transferring rig configurations between datasets, embodying the concept of generic geometry learning. By training on a 17-joint Human3.6M dataset [7] and testing on a 15-joint Panoptic Studio setup [9], our model gives accurate 3D reconstructions despite variations in joint arrangements. This capability is particularly interesting for its efficiency in utilizing data from multiple rigs of the same object, and underscores the model's adaptability, a cornerstone in pro", + "bbox": [ + 75, + 763, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6a0d2e8953ff1370868413af3bf215fb09ab8155ed5eb35f8361ceb6e70f669a.jpg", + "image_caption": [ + "Figure 5. Generalization to unseen data: Figure showing 3D-LFM's proficiency in OOD 2D-3D lifting, effectively handling new, unseen categories, and rig generalization from Acinoset [10] PASCAL3D+ [26], and Panoptic studio [9] with varying joint arrangements in top row. The bottom row presents in-the-wild data from the MBW dataset [5], with red dots indicating input keypoints and blue stick figures showing the model's 3D predictions from different angles." + ], + "image_footnote": [], + "bbox": [ + 506, + 103, + 910, + 258 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "cessing diverse human datasets. It aligns with the broader community's interest in versatile geometry learning, which makes these findings especially compelling. For a more thorough validation, we direct the reader to the ablation section, where qualitative visuals (Fig. 7) and quantitative analysis (Sec. 4.4.3) further highlight 3D-LFM's OOD generalization and rig transfer efficacy.", + "bbox": [ + 496, + 415, + 890, + 521 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Ablation", + "text_level": 1, + "bbox": [ + 500, + 531, + 602, + 544 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In our ablation studies, we evaluate the 3D-LFM's design elements and their individual contributions to its performance. Detailed experiments on the Human3.6M benchmark [7] and a blend of other datasets including Animal3D [27] and facial datasets [9, 29] were carried out to ablate the role of Procrustean transformation, hybrid attention mechanisms, and tokenized positional encoding (TPE) in enabling the model's scalability and out-of-distribution (OOD) generalization.", + "bbox": [ + 496, + 554, + 890, + 689 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4.1 Procrustean Transformation", + "text_level": 1, + "bbox": [ + 500, + 696, + 751, + 709 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3D-LFM's fusion of the procrustean approach, a first in transformer-based lifting frameworks, concentrates on deformable object components, as outlined in Sec.3.2. By focusing on shape within a standard canonical reference frame and avoiding rigid body transformations, we see faster learning and a decreased MPJPE, as evident by the gap between blue and orange lines in Fig. 6 (a) suggests. This fusion is crucial for learning 3D deformations, while utilizing transformers' equivariance. These findings suggest that even for transformers, avoiding rigid transformations' learning aids convergence, most notably with imbalanced datasets.", + "bbox": [ + 496, + 719, + 890, + 898 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "10472", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6fac01a11b6aeb1fd8a0508795cfe84e2bc854407acdaefe15610bfbc38912d3.jpg", + "image_caption": [ + "Figure 6. (a) Comparing attention strategies in 3D-LFM. The combined local-global approach with procrustean alignment surpasses other configurations in MPJPE reduction over 100 epochs on the Human3.6M validation split. (b) rapid convergence and efficiency of the TPE approach compared to the learnable MLP" + ], + "image_footnote": [], + "bbox": [ + 80, + 90, + 269, + 199 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ceac275bcf83e9bd7c4ff7657041a1462ccc14eb5cc983d6881f2cf0024272c1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 272, + 89, + 464, + 200 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/93481d0c79429a24accb03e08ebaea0986c2ded41ca179c42bbda33734330fa9.jpg", + "table_caption": [ + "Table 3. Impact of TPE on Data Imbalance and Rig Transfer" + ], + "table_footnote": [], + "table_body": "
StudyExperimentModel SizeImprovement (%)
Data ImbalanceUnderrepr. category (Hippo) [27]1283.27
51212.28
102422.02
Rig Transfer17 [7]- to 15 [9]-jointN/A12
15 [9]- to 17 [7]-joint23.29
52 [9]- to 83 [29]-joint52.3
", + "bbox": [ + 78, + 325, + 468, + 424 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.2 Local-Global vs. Hybrid Attention", + "text_level": 1, + "bbox": [ + 76, + 450, + 370, + 465 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In evaluating 3D-LFM's attention strategies, our analysis on the same validation split as above demonstrates the superiority of a hybrid approach combining local (GA) and global (MHSA) attention mechanisms. This integration, particularly when complemented by Procrustean (OnP) alignment, significantly enhances performance and accelerates convergence, as evidenced in Fig. 6 (a). The distinct advantage of this hybrid system validates our architectural choices, showcasing its efficiency in reducing MPJPE errors and refining model training dynamics.", + "bbox": [ + 75, + 474, + 468, + 626 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.3 Tokenized Positional Encoding:", + "text_level": 1, + "bbox": [ + 76, + 633, + 349, + 648 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This ablation study assesses the impact of Tokenized Positional Encoding (TPE), which uses analytical Random Fourier Features for encoding positional information. This study examines TPE's influence on model performance in scenarios of data imbalance and rig transfer generalization. Data imbalance study: When tested on the underrepresented hippo category from the Animal3D dataset [27], TPE based model showed a $3.27\\%$ improvement in MPJPE over the baseline MLP with a 128-dimensional model performance as evident in first row of Tab. 3. This improvement grew with the model size. These results highlight TPE's scalability and its faster convergence, especially relevant in imbalanced, OOD scenarios as detailed in Fig. 6 (b). The observed performance boosts suggest that TPE's analytical nature might be more suited to adapting to novel data distributions. Increasing model size amplifies TPE's benefits,", + "bbox": [ + 75, + 657, + 468, + 901 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c5d67503874aeacdc82b0cf2bc01e81d58acfb7b55c19e3d744ca903ab946d39.jpg", + "image_caption": [ + "Figure 7. The qualitative improvement in rig transfer using analytical TPE versus learnable MLP projection. This visualization reinforces the necessity of TPE in handling OOD data such as different rigs, unseen during training." + ], + "image_footnote": [], + "bbox": [ + 509, + 97, + 870, + 247 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "hinting that its fixed analytical approach more adeptly handles OOD intricacies compared to learnable methods like MLPs, which may falter in such situations.", + "bbox": [ + 496, + 353, + 890, + 398 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Rig transfer study: Our rig transfer analysis, summarized in Table 3, showcases TPE's adaptability and effectiveness over the MLP baseline across different joint configurations and rig scenarios, with improvements up to $52.3\\%$ . These findings, particularly the significant performance boost in complex rig transfers, underscore TPE's robustness in OOD contexts. Figure 7 visually highlights the qualitative differences between TPE and MLP approaches in a rig transfer scenario, where the model trained on a 17-joint [7] configuration is tested on a 15 joint [9] setup.", + "bbox": [ + 496, + 398, + 892, + 551 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Discussion and Conclusion", + "text_level": 1, + "bbox": [ + 500, + 563, + 746, + 578 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The proposed 3D-LFM marks a significant leap in 2D-3D lifting, showcasing scalability and adaptability, addressing data imbalance, and generalizing to new data categories. Its cross-category knowledge transfer requires further investigation and handling of inputs with different perspectives could act as potential limitations. 3D-LFM's efficiency is demonstrated by achieving results comparable to leading methods on [32] benchmark as well as its proficiency in out-of-distribution (OOD) scenarios on limited computational resources. For training duration and computational details, please refer to the supplementary materials. This work establishes a baseline framework for future 3D pose estimation and 3D reconstruction models. In summary, the 3D-LFM creates a universally applicable model for 3D reconstruction from 2D data, paving the way for diverse applications that requires accurate 3D reconstructions from 2D inputs.", + "bbox": [ + 496, + 588, + 890, + 843 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement: We extend our gratitude to Ian R. Fasel, Tim Clifford, Javier Movellan, and Matthias Hernandez of Apple for their insightful discussions.", + "bbox": [ + 500, + 844, + 890, + 887 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "10473", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Praneet C Bala, Benjamin R Eisenreich, Seng Bum Michael Yoo, Benjamin Y Hayden, Hyun Soo Park, and Jan Zimmermann. Openmonkeystudio: Automated markerless pose estimation in freely moving macaques. BioRxiv, pages 2020-01, 2020. 2, 6", + "[2] Christoph Bregler, Aaron Hertzmann, and Henning Biermann. Recovering non-rigid 3d shape from image streams. In Proceedings IEEE Conference on Computer Vision and Pattern Recognition. CVPR 2000 (Cat. No. PR00662), pages 690-696. IEEE, 2000. 1", + "[3] Zheng Chen and Yi Sun. Joint-wise 2d to 3d lifting for hand pose estimation from a single rgb image. Applied Intelligence, 53(6):6421-6431, 2023. 2", + "[4] Mosam Dabhi, Chaoyang Wang, Kunal Saluja, László A Jeni, Ian Fasel, and Simon Lucey. High fidelity 3d reconstructions with limited physical views. In 2021 International Conference on 3D Vision (3DV), pages 1301-1311. IEEE, 2021. 2", + "[5] Mosam Dabhi, Chaoyang Wang, Tim Clifford, László Jeni, Ian Fasel, and Simon Lucey. Mbw: Multi-view bootstrapping in the wild. Advances in Neural Information Processing Systems, 35:3039-3051, 2022. 2, 7", + "[6] Liuhao Ge, Zhou Ren, Yuncheng Li, Zehao Xue, Yingying Wang, Jianfei Cai, and Junsong Yuan. 3d hand shape and pose estimation from a single rgb image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10833-10842, 2019. 2", + "[7] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE transactions on pattern analysis and machine intelligence, 36(7):1325-1339, 2013. 2, 7, 8", + "[8] Haorui Ji, Hui Deng, Yuchao Dai, and Hongdong Li. Unsupervised 3d pose estimation with non-rigid structure-from-motion modeling. arXiv preprint arXiv:2308.10705, 2023. 2, 3", + "[9] Hanbyul Joo, Hao Liu, Lei Tan, Lin Gui, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social motion capture. In Proceedings of the IEEE International Conference on Computer Vision, pages 3334-3342, 2015. 2, 7, 8", + "[10] Daniel Joska, Liam Clark, Naoya Muramatsu, Ricardo Jericevich, Fred Nicolls, Alexander Mathis, Mackenzie W Mathis, and Amir Patel. Acinoset: a 3d pose estimation dataset and baseline models for cheetahs in the wild. In 2021 IEEE international conference on robotics and automation (ICRA), pages 13901-13908. IEEE, 2021. 2, 7", + "[11] Chen Kong and Simon Lucey. Deep non-rigid structure from motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1558-1567, 2019. 1, 2, 3, 5", + "[12] Vincent Lepetit, Francesc Moreno-Noguer, and Pascal Fua. Ep n p: An accurate o (n) solution to the p np problem. International journal of computer vision, 81:155-166, 2009. 3" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. 2", + "[14] Sebastian Lutz, Richard Blythman, Koustav Ghosal, Matthew Moynihan, Ciaran Simms, and Aljosa Smolic. Jointformer: Single-frame lifting transformer with error prediction and refinement for 3d human pose estimation. In 2022 26th International Conference on Pattern Recognition (ICPR), pages 1156-1163. IEEE, 2022. 2, 3, 4, 6", + "[15] Naureen Mahmood, Nima Ghorbani, Nikolaus F Troje, Gerard Pons-Moll, and Michael J Black. Amass: Archive of motion capture as surface shapes. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5442-5451, 2019. 2, 6", + "[16] Julieta Martinez, Rayat Hossain, Javier Romero, and James J Little. A simple yet effective baseline for 3d human pose estimation. In Proceedings of the IEEE international conference on computer vision, pages 2640-2649, 2017. 2, 3, 6", + "[17] Gyeongsik Moon, Shouu-I Yu, He Wen, Takaaki Shiratori, and Kyoung Mu Lee. Interhand2. 6m: A dataset and baseline for 3d interacting hand pose estimation from a single rgb image. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XX 16, pages 548-564. Springer, 2020. 2", + "[18] David Novotny, Nikhila Ravi, Benjamin Graham, Natalia Neverova, and Andrea Vedaldi. C3dpo: Canonical 3d pose networks for non-rigid structure from motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7688-7697, 2019. 1, 2, 3, 5", + "[19] Guy Tevet, Brian Gordon, Amir Hertz, Amit H Bermano, and Daniel Cohen-Or. Motionclip: Exposing human motion generation to clip space. In European Conference on Computer Vision, pages 358–374. Springer, 2022. 3", + "[20] Guy Tevet, Sigal Raab, Brian Gordon, Yonatan Shafir, Daniel Cohen-Or, and Amit H Bermano. Human motion diffusion model. arXiv preprint arXiv:2209.14916, 2022. 3", + "[21] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 4", + "[22] Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. Graph attention networks. In International Conference on Learning Representations, 2018. 4", + "[23] Bastian Wandt, Marco Rudolph, Petrissa Zell, Helge Rhodin, and Bodo Rosenhahn. Canonpose: Self-supervised monocular 3d human pose estimation in the wild. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 13294-13304, 2021. 6", + "[24] Chaoyang Wang and Simon Lucey. Paul: Procrustean autoencoder for unsupervised lifting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 434-443, 2021. 1, 2, 3, 5", + "[25] Chaoyang Wang, Chen-Hsuan Lin, and Simon Lucey. Deep nrsfm++: Towards unsupervised 2d-3d lifting in the wild. In" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "10474", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "2020 International Conference on 3D Vision (3DV), pages 12-22. IEEE, 2020. 1, 2, 4, 5", + "[26] Yu Xiang, Roozbeh Mottaghi, and Silvio Savarese. Beyond Pascal: A benchmark for 3d object detection in the wild. In IEEE winter conference on applications of computer vision, pages 75-82. IEEE, 2014. 2, 5, 7", + "[27] Jiacong Xu, Yi Zhang, Jiawei Peng, Wufei Ma, Artur Jesslen, Pengliang Ji, Qixin Hu, Jiehua Zhang, Qihao Liu, Jiahao Wang, et al. Animal3d: A comprehensive dataset of 3d animal pose and shape. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9099-9109, 2023. 2, 6, 7, 8", + "[28] Haitian Zeng, Xin Yu, Jiaxu Miao, and Yi Yang. Mhr-net: Multiple-hypothesis reconstruction of non-rigid shapes from 2d views. In European Conference on Computer Vision, pages 1-17. Springer, 2022. 2, 5", + "[29] Xing Zhang, Lijun Yin, Jeffrey F Cohn, Shaun Canavan, Michael Reale, Andy Horowitz, Peng Liu, and Jeffrey M Girard. Bp4d-spontaneous: a high-resolution spontaneous 3d dynamic facial expression database. Image and Vision Computing, 32(10):692-706, 2014. 2, 7, 8", + "[30] Jianqiao Zheng, Xueqian Li, Sameera Ramasinghe, and Simon Lucey. Robust point cloud processing through positional embedding. arXiv preprint arXiv:2309.00339, 2023. 4", + "[31] Wentao Zhu, Xiaoxuan Ma, Zhaoyang Liu, Libin Liu, Wayne Wu, and Yizhou Wang. Motionbert: Unified pretraining for human motion analysis. arXiv preprint arXiv:2210.06551, 2022. 2, 4", + "[32] Yue Zhu, Nermin Samet, and David Picard. H3wb: Human3. 6m 3d wholebody dataset and benchmark. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20166-20177, 2023. 2, 5, 6, 8" + ], + "bbox": [ + 78, + 90, + 468, + 556 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10475", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/3D-LFM_ Lifting Foundation Model/5d227142-e6b0-440e-bad4-facab1940a16_model.json b/2024/3D-LFM_ Lifting Foundation Model/5d227142-e6b0-440e-bad4-facab1940a16_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6eaa02627a6423ddd7396ec28e885e0dc1afa7fd --- /dev/null +++ b/2024/3D-LFM_ Lifting Foundation Model/5d227142-e6b0-440e-bad4-facab1940a16_model.json @@ -0,0 +1,1936 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.044 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.301, + 0.131, + 0.67, + 0.154 + ], + "angle": 0, + "content": "3D-LFM: Lifting Foundation Model" + }, + { + "type": "text", + "bbox": [ + 0.267, + 0.18, + 0.391, + 0.198 + ], + "angle": 0, + "content": "Mosam Dabhi1" + }, + { + "type": "text", + "bbox": [ + 0.427, + 0.181, + 0.557, + 0.199 + ], + "angle": 0, + "content": "László A. Jeni\\(^{1*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.586, + 0.181, + 0.709, + 0.2 + ], + "angle": 0, + "content": "Simon Lucey\\(^{2*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.205, + 0.471, + 0.224 + ], + "angle": 0, + "content": "\\(^{1}\\)Carnegie Mellon University" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.205, + 0.73, + 0.224 + ], + "angle": 0, + "content": "2The University of Adelaide" + }, + { + "type": "text", + "bbox": [ + 0.415, + 0.233, + 0.551, + 0.247 + ], + "angle": 0, + "content": "3dlfm.github.io" + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.258, + 0.645, + 0.503 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.243, + 0.508, + 0.484, + 0.521 + ], + "angle": 0, + "content": "(a) Unified 2D-3D lifting for \\(30+\\) categories." + }, + { + "type": "image", + "bbox": [ + 0.657, + 0.299, + 0.889, + 0.468 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.677, + 0.508, + 0.859, + 0.521 + ], + "angle": 0, + "content": "(b) Dataset diversity visualization." + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.533, + 0.894, + 0.603 + ], + "angle": 0, + "content": "Figure 1. Overview: (a) This figure shows the 3D-LFM's ability in lifting 2D landmarks into 3D structures across an array of over 30 diverse categories, from human body parts, to a plethora of animals and everyday common objects. The lower portion shows the actual 3D reconstructions by our model, with red lines representing the ground truth and blue lines showing the 3D-LFM's predictions. (b) This figure displays the model's training data distribution on a logarithmic scale, highlighting that inspite of 3D-LFM being trained on imbalanced datasets, it preserves the performance across individual categories." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.615, + 0.314, + 0.631 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.647, + 0.474, + 0.875 + ], + "angle": 0, + "content": "The lifting of a 3D structure and camera from 2D landmarks is at the cornerstone of the discipline of computer vision. Traditional methods have been confined to specific rigid objects, such as those in Perspective-n-Point (PnP) problems, but deep learning has expanded our capability to reconstruct a wide range of object classes (e.g. C3DPO [18] and PAUL [24]) with resilience to noise, occlusions, and perspective distortions. However, all these techniques have been limited by the fundamental need to establish correspondences across the 3D training data, significantly limiting their utility to applications where one has an abundance of \"in-correspondence\" 3D data. Our approach harnesses the inherent permutation equivariance of transformers to manage varying numbers of points per 3D data instance, withstand occlusions, and generalizes" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.617, + 0.895, + 0.694 + ], + "angle": 0, + "content": "to unseen categories. We demonstrate state-of-the-art performance across 2D-3D lifting task benchmarks. Since our approach can be trained across such a broad class of structures, we refer to it simply as a 3D Lifting Foundation Model (3D-LFM) – the first of its kind." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.71, + 0.633, + 0.725 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.734, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Lifting 2D landmarks from a single-view RGB image into 3D has long posed a complex challenge in the field of computer vision because of the ill-posed nature of the problem. This task is important for a range of applications from augmented reality to robotics, and requires an understanding of non-rigid spatial geometry and accurate object descriptions [2, 11, 25]. Historically, efforts in single-frame 2D-3D lifting have encountered significant hurdles: reliance on object-specific models, poor scalability, and limited adaptability to diverse and complex object categories. Traditional methods, while advancing in specific domains like human" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.885, + 0.26, + 0.901 + ], + "angle": 0, + "content": "* Both authors advised equally." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "10466" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.136 + ], + "angle": 0, + "content": "body [14, 16, 31] or hand modeling [3, 6], often fail when faced with the complexities of varying object types or object rigs (skeleton placements)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.137, + 0.47, + 0.437 + ], + "angle": 0, + "content": "To facilitate such single-frame 2D-3D lifting, deep learning methods like C3DPO [18] and others [8, 11, 24, 25, 28] have recently been developed. However, these methods are fundamentally limited in that they must have knowledge of the object category and how the 2D landmarks correspond semantically to the 2D/3D data it was trained upon. Further, this represents a drawback, especially when considering their scaling up to dozens or even hundreds of object categories, with varying numbers of landmarks and configurations. This paper marks a departure from such correspondence constraints, introducing the 3D Lifting Foundation Model (3D-LFM), an object-agnostic single frame 2D-3D lifting approach. At its core, 3D-LFM addresses the limitation of previous models, which is the inability to efficiently handle a wide array of object categories while maintaining high fidelity in 3D keypoint lifting from 2D data. We propose a solution rooted in the concept of permutation equivariance, a property that allows our model to autonomously establish correspondences among diverse sets of input 2D keypoints." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.439, + 0.471, + 0.892 + ], + "angle": 0, + "content": "3D-LFM is capable of performing single frame 2D-3D lifting for \\(30+\\) categories using a single model simultaneously, covering everything from human forms [9, 15, 32], face [29], hands [17], and animal species [1, 10, 27], to a plethora of inanimate objects found in everyday scenarios such as cars, furniture, etc. [26]. Importantly, 3D-LFM is inherently scalable, poised to expand to hundreds of categories and improve performance, especially in out-of-distribution or less-represented areas, showcasing its broad utility in 3D lifting tasks. 3D-LFM is able to achieve 2D-3D lifting performance that matches those of leading methods specifically optimized for individual categories. The generalizability of 3D LFM is further evident in its ability to handle out-of-distribution (OOD) object categories and rigs, which we refer to as OOD 2D-3D lifting, where the task is to lift the 2D landmarks to 3D for a category never seen during training. We show such OOD results: (1) for inanimate objects - by holding out an object category within the PASCAL dataset, (2) for animals - by training on common object categories such as dogs and cats found in [27] and reconstructing 3D for unseen and rare species of Cheetahs found in [10] and in-the-wild zoo captures from [5], and (3) by showing rig transfer, i.e. training 2D to 3D lifting on a Human3.6M dataset rig [7] and showing similar 2D to 3D lifting performance on previously unseen rigs such as those found in Panoptic studio dataasset rig [9] or a COCO dataset rig [13]. 3D-LFM transfers learnings from seen data during training to unseen OOD data during inference. It does so by learning general structural features during the training phase via the proposed permutation equivariance properties" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.12 + ], + "angle": 0, + "content": "and specific design choices that we discuss in the following sections." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.122, + 0.892, + 0.422 + ], + "angle": 0, + "content": "Recognizing the important role geometry plays in 3D reconstruction [4, 5, 11, 18, 24, 25], we integrate Procrustean methods such as Orthographic-N-Point (OnP) or Perspective-N-Point (PnP) to direct the model's focus on deformable aspects within a canonical frame. This incorporation significantly reduces the computational burden on the model, freeing it from learning redundant rigid rotations and focusing its capabilities on capturing the true geometric essence of objects. Scalability, a critical aspect of our model, is addressed through the use of tokenized positional encoding (TPE), which, when combined with graph-based transformer architecture, not only enhances the model's adaptability across diverse categories but also strengthens its ability to handle multiple categories with different number of keypoints and configurations. Finally, the use of skeleton information (joint connectivity) within the graph-based transformers via adjacency matrices provides strong clues about joint proximity and inherent connectivity, aiding in the handling of correspondences across varied object categories." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.424, + 0.892, + 0.544 + ], + "angle": 0, + "content": "To the best of our knowledge, 3D-LFM is one of the only known work which is a unified model capable of doing 2D-3D lifting for \\(30+\\) (and potentially even more) categories simultaneously. Its ability to perform unified learning across a vast spectrum of object categories without specific object information and its handling of OOD scenarios highlight its potential as one of the first models capable of serving as a 2D-3D lifting foundation model." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.545, + 0.797, + 0.559 + ], + "angle": 0, + "content": "The contributions of this paper are threefold:" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.56, + 0.892, + 0.619 + ], + "angle": 0, + "content": "1. We propose a Procrustean transformer that is able to focus solely on learning the deformable aspects of objects within a single canonical frame whilst preserving permutation equivariance across 2D landmarks." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.62, + 0.892, + 0.679 + ], + "angle": 0, + "content": "2. The integration of tokenized positional encoding within the graph-based transformer, to enhance our approach's scalability and its capacity to handle diverse and imbalanced datasets." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.681, + 0.892, + 0.785 + ], + "angle": 0, + "content": "3. We demonstrate that 3D-LFM surpasses state-of-the-art methods in categories such as humans, hands, and faces (benchmark in [32]). Additionally, it shows robust generalization by handling previously unseen objects and configurations, including animals ([5, 10]), inanimate objects ([26]), and novel object arrangements (rig transfer in [9])" + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.56, + 0.892, + 0.785 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.787, + 0.892, + 0.892 + ], + "angle": 0, + "content": "In subsequent sections, we explore the design and methodology of our proposed 3D-LFM architecture, including detailed ablation experiments and comparative analyses. Throughout this paper, 'keypoints', 'landmarks', and 'joints' are used interchangeably, referring to specific, identifiable points or locations on an object or figure that are crucial for understanding its structure and geometry." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "10467" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.164, + 0.094, + 0.81, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.233, + 0.78, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.383, + 0.892, + 0.455 + ], + "angle": 0, + "content": "Figure 2. Overview of the 3D Lifting Foundation Model (3D-LFM) architecture: The process begins with the input 2D keypoints undergoing Token Positional Encoding (TPE) before being processed by a series of graph-based transformer layers. The resulting features are then decoded through an MLP into a canonical 3D shape. This shape is aligned to the ground truth (G.T. 3D) in the reference frame using a Procrustean method, with the Mean Squared Error (MSE) loss computed to guide the learning. The architecture captures both local and global contextual information, focusing on deformable structures while minimizing computational complexity." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.479, + 0.221, + 0.493 + ], + "angle": 0, + "content": "2. Related works" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.503, + 0.47, + 0.866 + ], + "angle": 0, + "content": "The field of 2D-3D lifting has evolved substantially from classic works such as those based on Perspective-n-Point (PnP) algorithms [12]. In these early works, the algorithm was given a set of 2D landmarks and some 3D supervision, namely the known 3D rigid object. The field has since witnessed a paradigm shift with the introduction of deep learning methodologies, led by methods such as C3DPO [18], PAUL [24], and Deep NRSfM [11], along with recent transformer-based innovations such as NRSfM-Former [8]. In these approaches one does not need knowledge of the specific 3D object, instead it can get away with just the 2D landmarks and correspondences to an ensemble of 2D/3D data from the object category to be lifted. However, despite their recent success, all these methods still require that the 2D/3D data be in semantic correspondence. That is, the index to a specific landmark has the same semantic meaning across all instances (e.g. chair leg). In practice, this is quite limiting at run-time, as one needs intimate knowledge of the object category, and rig in order to apply any of these current methods. Further, this dramatically limits the ability of these methods to leverage cross-object and cross-rig datasets, prohibiting the construction of a truly generalizable 2D to 3D lifting foundation model – a topic of central focus in this paper." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.867, + 0.469, + 0.896 + ], + "angle": 0, + "content": "Recent literature in pose estimation, loosely connected to NRSfM but often more specialized towards human and" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.48, + 0.892, + 0.629 + ], + "angle": 0, + "content": "animal body parts, has also seen remarkable progress. Models such as Jointformer [14] and SimpleBaseline [16] have refined the single-frame 2D-3D lifting process, while generative approaches like MotionCLIP [19] and Human Motion Diffusion Model [20] have laid the groundwork for 3D generative motion-based foundation models. These approaches, however, are even more limiting than C3PDO, PAUL, etc. in that they are intimately wedded to the object class and are not easily extendable to an arbitrary object class." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.646, + 0.608, + 0.663 + ], + "angle": 0, + "content": "3. Approach" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.671, + 0.892, + 0.791 + ], + "angle": 0, + "content": "Given a set of 2D keypoints representing the projection of an object's joints in an image, we denote the keypoints matrix as \\(\\mathbf{W} \\in \\mathbb{R}^{N \\times 2}\\), where \\(N\\) is the predetermined maximum number of joints considered across all object categories. For objects with joints count less than \\(N\\), we introduce a masking mechanism that utilizes a binary mask matrix \\(\\mathbf{M} \\in \\{0,1\\}^N\\), where each element \\(m_i\\) of \\(\\mathbf{M}\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.592, + 0.805, + 0.892, + 0.847 + ], + "angle": 0, + "content": "\\[\nm _ {i} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f j o i n t} i \\text {i s p r e s e n t} \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.855, + 0.892, + 0.902 + ], + "angle": 0, + "content": "The 3D lifting function \\( f: \\mathbb{R}^{N \\times 2} \\to \\mathbb{R}^{N \\times 3} \\) maps the 2D keypoints to their corresponding 3D structure while compensating for the projection:" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "10468" + } + ], + [ + { + "type": "equation", + "bbox": [ + 0.185, + 0.105, + 0.469, + 0.123 + ], + "angle": 0, + "content": "\\[\n\\mathbf {S} = f (\\mathbf {W}) = \\mathbf {W} \\mathbf {R} ^ {\\top} + \\mathbf {b} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.129, + 0.47, + 0.174 + ], + "angle": 0, + "content": "where \\(\\mathbf{R} \\in \\mathbb{R}^{3 \\times 3}\\) is the projection matrix (assumed either weak-perspective or orthographic) and \\(\\mathbf{b} \\in \\mathbb{R}^{N \\times 3}\\) is a bias term that aligns the centroids of 2D and 3D keypoints." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.175, + 0.471, + 0.28 + ], + "angle": 0, + "content": "Permutation Equivariance: To ensure scalability and adaptability across a diverse set of objects, we leverage the property of permutation equivariance inherent in transformer architectures. Permutation equivariance allows the model to process input keypoints \\(\\mathbf{W}\\) regardless of their order, a critical feature for handling objects with varying joint configurations:" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.288, + 0.342, + 0.305 + ], + "angle": 0, + "content": "\\[\nf (\\mathcal {P} \\mathbf {W}) = \\mathcal {P} f (\\mathbf {W})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.311, + 0.47, + 0.477 + ], + "angle": 0, + "content": "where \\(\\mathcal{P}\\) is a permutation matrix that reorders the keypoints. Handling Missing Data: To address the challenge of missing data, we refer the Deep NRSfM++ [25] work and use a masking mechanism to accommodate for occlusions or absences of keypoints. Our binary mask matrix \\(\\mathbf{M} \\in \\{0,1\\}^N\\) is applied in such a way that it not only pads the input data to a consistent size but also masks out missing or occluded points: \\(\\mathbf{W}_m = \\mathbf{W} \\odot \\mathbf{M}\\), where \\(\\odot\\) denotes element-wise multiplication. To remove the effects of translation and ensure that our TPE features are generalizable, we zero-center the data by subtracting the mean of the visible keypoints:" + }, + { + "type": "equation", + "bbox": [ + 0.181, + 0.485, + 0.469, + 0.502 + ], + "angle": 0, + "content": "\\[\n\\mathbf {W} _ {c} = \\mathbf {W} _ {m} - \\operatorname {m e a n} \\left(\\mathbf {W} _ {m}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.508, + 0.47, + 0.584 + ], + "angle": 0, + "content": "We scale the zero-centered data to the range \\([-1, 1]\\) while preserving the aspect ratio to maintain the geometric integrity of the keypoints. For more details on handling missing data in the presence of perspective effects, we refer the reader to Deep NRSFM++[25]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.584, + 0.47, + 0.734 + ], + "angle": 0, + "content": "Token Positional Encoding: replaces the traditional Correspondence Positional Encoding (CPE) or Joint Embedding which encodes the semantic correspondence information (as used in works such as like [14, 31]) with a mechanism that does not require explicit correspondence or semantic information. Owing to the success of per-point positional embedding, particularly random Fourier features [30] in handling OOD data, we compute Token Positional Encoding (TPE) using analytical Random Fourier features (RFF) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.094, + 0.751, + 0.469, + 0.8 + ], + "angle": 0, + "content": "\\[\n\\mathbf {T P E} \\left(\\mathbf {W} _ {c}\\right) = \\sqrt {\\frac {2}{D}} \\left[ \\sin \\left(\\mathbf {W} _ {c} \\boldsymbol {\\omega} + b\\right); \\cos \\left(\\mathbf {W} _ {c} \\boldsymbol {\\omega} + b\\right) \\right] \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.471, + 0.902 + ], + "angle": 0, + "content": "where \\(D\\) is the dimensionality of the Fourier feature space, \\(\\pmb{\\omega} \\in \\mathbb{R}^{2 \\times \\frac{D}{2}}\\) and \\(\\mathbf{b} \\in \\mathbb{R}^{\\frac{D}{2}}\\) are parameters sampled from a normal distribution, scaled appropriately. These parameters are sampled once and kept fixed, as per the RFF methodology. The output of this transformation \\(\\mathbf{TPE}(\\mathbf{W}_c)\\) is then fed into the graph-based transformer network as \\(\\mathbf{X}^\\ell\\) where" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.894, + 0.183 + ], + "angle": 0, + "content": "\\(\\ell\\) indicates the layer number (0 in the above case). This set of features is now ready for processing inside the graph-based transformer layers without the need for correspondence among the input keypoints. The TPE retains the property of permutation equivariance while implicitly encoding the relative positions of the keypoints." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.192, + 0.842, + 0.208 + ], + "angle": 0, + "content": "3.1. Graph-based Transformer Architecture" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.214, + 0.892, + 0.32 + ], + "angle": 0, + "content": "Our graph-based transformer architecture utilizes a hybrid approach to feature aggregation by combining graph-based local attention [22](L) with global self-attention mechanisms [21](G) within a single layer (shown as grey block in Fig. 2. This layer is replicated \\( L \\) times, providing a sequential refinement of the feature representation across the network's depth." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.32, + 0.892, + 0.426 + ], + "angle": 0, + "content": "Hybrid Feature Aggregation: For each layer \\(\\ell\\), ranging from 0 to \\(L\\), the feature matrix \\(\\mathbf{X}^{(\\ell)} \\in \\mathbb{R}^{N \\times D}\\) is augmented through simultaneous local and global processing. The local processing component, \\(\\mathrm{GA}(\\mathbf{X}^{(\\ell)}, \\mathbf{A})\\), leverages an adjacency matrix \\(\\mathbf{A}\\), which encodes the connectivity based on the object category, to perform graph-based attention on batches of nodes representing the input 2D data:" + }, + { + "type": "equation", + "bbox": [ + 0.624, + 0.436, + 0.891, + 0.476 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {L} ^ {(\\ell)} = \\mathrm {G A} (\\mathbf {X} ^ {(\\ell)}, \\mathbf {A}), \\\\ \\mathbf {G} ^ {(\\ell)} = \\operatorname {M H S A} (\\mathbf {X} ^ {(\\ell)}) \\tag {5} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.486, + 0.892, + 0.516 + ], + "angle": 0, + "content": "Local and global features are concatenated to form a unified representation \\(\\mathbf{U}^{(\\ell)}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.599, + 0.529, + 0.892, + 0.548 + ], + "angle": 0, + "content": "\\[\n\\mathbf {U} ^ {(\\ell)} = \\operatorname {c o n c a t} \\left(\\mathbf {L} ^ {(\\ell)}, \\mathbf {G} ^ {(\\ell)}\\right) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.554, + 0.892, + 0.615 + ], + "angle": 0, + "content": "Following the concatenation, each layer applies a normalization(LN) and a multilayer perceptron (MLP). The MLP employs a Gaussian Error Linear Unit (GeLU) as the nonlinearity function to enhance the model's expressive power" + }, + { + "type": "equation", + "bbox": [ + 0.557, + 0.637, + 0.891, + 0.677 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {X} ^ {\\prime (\\ell)} = \\mathrm {L N} \\left(\\mathbf {U} ^ {(\\ell)}\\right) + \\mathbf {U} ^ {(\\ell)}, \\\\ \\mathbf {X} ^ {(\\ell + 1)} = \\operatorname {L N} \\left(\\operatorname {M L P} _ {-} \\operatorname {G e L U} \\left(\\mathbf {X} ^ {\\prime (\\ell)}\\right)\\right) + \\mathbf {X} ^ {\\prime (\\ell)} \\tag {7} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.687, + 0.892, + 0.793 + ], + "angle": 0, + "content": "Here, GA represents Graph Attention, MHSA denotes Multi-Head Self-Attention, and MLP_GeLU indicates our MLP with GeLU nonlinearity. This architecture is designed to learn patterns in 2D data by considering both the local neighborhood connectivity of input 2D and the global data context of input 2D, which is important for robust 2D to 3D structure lifting." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.802, + 0.716, + 0.818 + ], + "angle": 0, + "content": "3.2. Procrustean Alignment" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.87 + ], + "angle": 0, + "content": "The final operation in our pipeline decodes the latent feature representation \\(\\mathbf{X}^{(L)}\\) into the predicted canonical structure \\(\\mathbf{S}_c\\) via a GeLU-activated MLP:" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.884, + 0.79, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\mathbf {S} _ {c} = \\operatorname {M L P} _ {\\text {s h a p e . d e c o d e r}} (\\mathbf {X} ^ {(L)})\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10469" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "Subsequently, we align \\(\\mathbf{S}_c\\) with the ground truth \\(\\mathbf{S}_r\\), via a Procrustean alignment method that optimizes for the rotation matrix \\(\\mathbf{R}\\). The alignment is formalized as a minimization problem:" + }, + { + "type": "equation", + "bbox": [ + 0.157, + 0.166, + 0.388, + 0.189 + ], + "angle": 0, + "content": "\\[\n\\underset {\\mathbf {R}} {\\text {m i n i m i z e}} \\quad | | \\mathbf {M} \\odot (\\mathbf {S} _ {r} - \\mathbf {S} _ {c} \\mathbf {R}) | | _ {F} ^ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.195, + 0.469, + 0.255 + ], + "angle": 0, + "content": "where \\(\\mathbf{M}\\) is a binary mask applied element-wise, and \\(||\\cdot ||_F\\) denotes the Frobenius norm. The optimal \\(\\mathbf{R}\\) is obtained via SVD, which ensures the orthonormality constraint of the rotation matrix:" + }, + { + "type": "equation", + "bbox": [ + 0.105, + 0.281, + 0.442, + 0.3 + ], + "angle": 0, + "content": "\\[\n\\mathbf {U}, \\boldsymbol {\\Sigma}, \\mathbf {V} ^ {\\top} = \\operatorname {S V D} ((\\mathbf {M} \\odot \\mathbf {S} _ {c}) ^ {\\top} \\mathbf {S} _ {r}), \\quad \\mathbf {R} = \\mathbf {U V} ^ {\\top}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.311, + 0.469, + 0.357 + ], + "angle": 0, + "content": "The predicted shape is then scaled relative to the reference shape \\(\\mathbf{S}_r\\), resulting in a scale factor \\(\\gamma\\), which yields the final predicted shape \\(\\mathbf{S}_p\\):" + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.371, + 0.328, + 0.388 + ], + "angle": 0, + "content": "\\[\n\\mathbf {S} _ {p} = \\boldsymbol {\\gamma} \\cdot (\\mathbf {S} _ {c} \\mathbf {R})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.394, + 0.469, + 0.53 + ], + "angle": 0, + "content": "This Procrustean alignment step is crucial for directing the model's focus on learning non-rigid shape deformations over rigid body dynamics, thus significantly enhancing the model's ability to capture the true geometric essence of objects by just focusing on core deformable (non-rigid) aspects. The effectiveness of this approach is confirmed by faster convergence and reduced error rates in our experiments, as detailed in Fig. 6. These findings align with the findings presented in PAUL [24]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.54, + 0.222, + 0.554 + ], + "angle": 0, + "content": "3.3. Loss Function" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.563, + 0.469, + 0.609 + ], + "angle": 0, + "content": "The optimization of our model relies on the Mean Squared Error (MSE) loss, which calculates the difference between predicted 3D points \\(\\mathbf{S}_p\\) and the ground truth \\(\\mathbf{S}_r\\):" + }, + { + "type": "equation", + "bbox": [ + 0.17, + 0.619, + 0.469, + 0.66 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {M S E}} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\| \\mathbf {S} _ {p} ^ {(i)} - \\mathbf {S} _ {r} ^ {(i)} \\right\\| ^ {2} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.666, + 0.469, + 0.757 + ], + "angle": 0, + "content": "Minimizing this loss across \\(N\\) points ensures the model's ability in reconstructing accurate 3D shapes from input 2D landmarks. This minimization effectively calibrates the shape decoder and the Procrustean alignment to focus on the essential non-rigid characteristics of the objects, helping the accuracy of the 2D to 3D lifting process." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.77, + 0.388, + 0.788 + ], + "angle": 0, + "content": "4. Results and Comparative Analysis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Our evaluation shows the 3D Lifting Foundation Model (3D-LFM)'s capability in single-frame 2D-3D lifting across diverse object categories without object-specific data in Sec. 4.1. Following that, Sec. 4.2 highlights 3D-LFM's performance over specialized methods, especially achieving state-of-the-art performance in whole-body benchmarks[32] (Fig. 4). Additionally, Sec. 4.3 shows" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.228 + ], + "angle": 0, + "content": "3D-LFM's capability in 2D-3D lifting across 30 categories using a single unified model, enhancing category-specific performance and achieving out-of-distribution (OOD) generalization for unseen object configurations during training. In conclusion, the ablation studies in Section 4.4 validate our proposed procrustean approach, token positional encoding, and the local-global hybrid attention mechanism in the transformer model, confirming their role in 3D-LFM's effectiveness in both single- and multiple-object scenarios." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.289, + 0.785, + 0.305 + ], + "angle": 0, + "content": "4.1. Multi-Object 3D Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.312, + 0.892, + 0.418 + ], + "angle": 0, + "content": "Clarifying naming convention: In 'object-specific' versus 'object-agnostic', our primary focus in this naming is on the distinction in training methods. Here, object-specific training involves supplying semantic details for each object, leading to isolated training. Conversely, object-agnostic training combines various categories without explicit landmark semantics, leading to combined training." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.419, + 0.892, + 0.569 + ], + "angle": 0, + "content": "Experiment Rationale: 3D-LFM leverages permutation equivariance to accurately lift 2D keypoints into 3D structures across diverse categories, outperforming fixed-array methods by adapting flexibly to variable keypoint configurations. It has been evaluated against non-rigid structure-from-motion approaches [11, 18, 24, 25] that require object-specific inputs, showing its ability to handle diverse categories. For a comprehensive benchmark, we utilize the PASCAL3D+ dataset [26], following C3DPO's [18] methodology, to include a variety of object categories." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.57, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Performance: We benchmark 3D-LFM against the notable NRSfM method, C3DPO [18], for multi-object 2D to 3D lifting with 3D supervision. C3DPO, similar to other contemporary methods [11, 24, 25, 28] requiring object-specific details, serves as an apt comparison due to its multi-category approach. Initially replicating conditions with object-specific information, 3D-LFM matches C3DPO's performance, as demonstrated in Fig. 3. This stage uses MPJPE to measure 3D lifting accuracy, with C3DPO's training setup including an \\(MN\\) dimensional array for object details where \\(M\\) represents number of objects with \\(N\\) being maximum number of keypoints, and our model is trained separately on each object to avoid providing object-specific information. The 3D-LFM's strength emerges when object-specific data is withheld. While C3DPO shows a decline without such data, 3D-LFM maintains a lower MPJPE across categories, even when trained collectively across categories using only an \\(N\\) dimensional array. These findings (Fig. 3) highlights 3D-LFM's capabilities, outperforming single-category training and demonstrating its potential as a generalized 2D to 3D lifting solution." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10470" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.089, + 0.465, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.287, + 0.47, + 0.384 + ], + "angle": 0, + "content": "Figure 3. 3D-LFM vs. C3DPO Performance: MPJPE comparisons using the PASCAL3D+ dataset, this figure demonstrates our model's adaptability in the absence of object-specific information, contrasting with C3DPO's increased error under the same conditions. The analysis confirms 3D-LFM's superiority across diverse object categories, reinforcing its potential for generalized 2D to 3D lifting." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.419, + 0.47, + 0.463 + ], + "angle": 0, + "content": "Table 1. Quantitative performance on H3WB: Our method demonstrates leading performance across multiple object categories without the need for object-specific designs." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.473, + 0.469, + 0.576 + ], + "angle": 0, + "content": "
MethodWhole-bodyBodyFace/AlignedHand/Aligned
SimpleBaseline125.4125.7115.9 / 24.6140.7 / 42.5
CanonPose w/3D sv.117.7117.5112.0 / 17.9126.9 / 38.3
Large SimpleBaseline112.3112.6110.6 / 14.6114.8 / 31.7
Jointformer (extra data)81.57860.4 / 16.2117.6 / 38.8
Jointformer88.384.966.5 / 17.8125.3 / 43.7
Ours64.1360.8356.55 / 10.4478.21 / 28.22
Ours – PA33.1339.366.0213.56
" + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.621, + 0.394, + 0.637 + ], + "angle": 0, + "content": "4.2. Benchmark: Object-Specific Models" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.644, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Next, we benchmark 3D-LFM against leading specialized methods for human body, face, and hands categories. Our model outperforms these specialized methods, showing multi-category learning without the need for category (landmark) semantics. For this study, we evaluate on H3WB dataset [32], a recent benchmark for diverse whole-body pose estimation tasks. This dataset is valuable for its inclusion of multiple object categories and for providing a comparative baseline against methods such as Jointformer [14], SimpleBaseline [16], and CanonPose [23]. Following H3WB's recommended 5-fold cross-validation and submitting the evaluations to benchmark's authors, we report results on the hidden test set. The results shown in Fig. 4 and Table 1 include PA-MPJPE and MPJPE, with test set performance numbers provided directly by the H3WB team, ensuring that our results are verified by an independent third-party." + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.093, + 0.887, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.276, + 0.892, + 0.373 + ], + "angle": 0, + "content": "Figure 4. Performance Comparison on H3WB Benchmark: This chart contrasts MPJPE errors for whole-body, body, face, aligned face, hand, and aligned hand categories within the H3WB benchmark [32]. Our models, with and without Procrustes Alignment (Ours-PA), outperform current state-of-the-art (SOTA) methods, validating our approach's proficiency in 2D to 3D lifting tasks." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.437, + 0.742, + 0.452 + ], + "angle": 0, + "content": "4.3. Towards foundation model" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.459, + 0.892, + 0.61 + ], + "angle": 0, + "content": "In this section, we highlight 3D-LFM's role as a foundational model for varied 2D-3D lifting, capable in managing multiple object types and data imbalances. In this subsection, we explore 3D-LFM's scalability for collective dataset training (Sec.4.3.1), its generalization to new categories and rig transfer capabilities (Sec.4.3.2). These studies validate the 3D-LFM's role as a foundation model, capable at leveraging diverse data without requiring specific configurations, thus simplifying the 3D lifting process for varied joint setups." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.611, + 0.892, + 0.763 + ], + "angle": 0, + "content": "We start this investigation by showing the capability 3D-LFM in handling 2D-3D lifting for \\(30+\\) object categories within the single model, confirming the model's capability to manage imbalanced datasets representative of real-world scenarios as shown in Fig. 1. With a comprehensive range of human, hand, face, inanimate objects, and animal datasets, the 3D-LFM is proven to be scalable, without requiring category-specific adjustments. The subsequent subsections will dissect these attributes further, discussing the 3D-LFM's foundational potential in the 3D lifting domain." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.771, + 0.749, + 0.787 + ], + "angle": 0, + "content": "4.3.1 Combined Dataset Training" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.892, + 0.901 + ], + "angle": 0, + "content": "This study evaluates the 3D-LFM's performance on isolated datasets against its performance on a combined dataset. Initially, the model was trained separately on animal-based supercategory datasets: specifically OpenMonkey[1] and Animals3D[27]. Subsequently, it was trained on a merged dataset containing a broad spectrum of object categories, including Human Body-Based datasets such as AMASS [15]" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "10471" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.119, + 0.09, + 0.427, + 0.103 + ], + "angle": 0, + "content": "Table 2. Quantitative evaluation for OOD scenarios." + }, + { + "type": "table", + "bbox": [ + 0.123, + 0.115, + 0.421, + 0.192 + ], + "angle": 0, + "content": "
CategoryOOD (mm)In-Dist. (mm)
Cheetah26.5910.16
Train6.885.71
Chimpanzee52.0542.65
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.219, + 0.47, + 0.383 + ], + "angle": 0, + "content": "and Human 3.6 [7], Hand-Based datasets such as PanOptic Hands [9], Face-Based datasets like BP4D+[29], and various Inanimate Objects from the PASCAL3D+ dataset[26], along with previously mentioned animal datasets. Isolated training resulted in an average MPJPE of \\(21.22 \\, \\text{mm}\\), while the combined training method significantly reduced MPJPE to \\(12.5 \\, \\text{mm}\\) on the same animal supercategory validation split. This improvement confirms the potential of 3D-LFM as a pre-training framework and underscores its ability to adapt and generalize from diverse and extensive data collections." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.385, + 0.47, + 0.566 + ], + "angle": 0, + "content": "Dataset Selection Rationale: We selected animal-based supercategory datasets to demonstrate combined training's impact on underrepresented categories. We observed greater performance improvements in smaller, unbalanced datasets (as exemplified by PASCAL3D+: from 4.31 mm to 1.1 mm and OpenMonkey: from 19.45 mm to 9.59 mm) compared to larger datasets with sufficient balance among categories. Consequently, we see minimal gains in more balanced, larger datasets like AMASS (from 1.67 mm to 1.66 mm), underscoring the utility of combined training for enhancing performance in underrepresented and long-tail categories." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.575, + 0.392, + 0.59 + ], + "angle": 0, + "content": "4.3.2 OOD generalization and rig-transfer:" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.469, + 0.764 + ], + "angle": 0, + "content": "We evaluate 3D-LFM's generalization to unseen object categories and rig configurations. Its accuracy is highlighted by successful 2D-3D lifting reconstructions of the \"Cheetah\" from Acinoset [10], which is not included in the typical Animal3D dataset [27], and the \"Train\" category from PASCAL3D+[26], absent during training. Qualitative reconstructions are shown in Fig. 5, along with the quantitative results in Tab.2 for above categories as well as in-the-wild category like a Chimpanzee from the MBW dataset [5] - which illustrates model's strong OOD generalization and capability to handle in-the-wild data." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Additionally, we show 3D-LFM's capability in transferring rig configurations between datasets, embodying the concept of generic geometry learning. By training on a 17-joint Human3.6M dataset [7] and testing on a 15-joint Panoptic Studio setup [9], our model gives accurate 3D reconstructions despite variations in joint arrangements. This capability is particularly interesting for its efficiency in utilizing data from multiple rigs of the same object, and underscores the model's adaptability, a cornerstone in pro" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.104, + 0.911, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.278, + 0.892, + 0.389 + ], + "angle": 0, + "content": "Figure 5. Generalization to unseen data: Figure showing 3D-LFM's proficiency in OOD 2D-3D lifting, effectively handling new, unseen categories, and rig generalization from Acinoset [10] PASCAL3D+ [26], and Panoptic studio [9] with varying joint arrangements in top row. The bottom row presents in-the-wild data from the MBW dataset [5], with red dots indicating input keypoints and blue stick figures showing the model's 3D predictions from different angles." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.416, + 0.892, + 0.522 + ], + "angle": 0, + "content": "cessing diverse human datasets. It aligns with the broader community's interest in versatile geometry learning, which makes these findings especially compelling. For a more thorough validation, we direct the reader to the ablation section, where qualitative visuals (Fig. 7) and quantitative analysis (Sec. 4.4.3) further highlight 3D-LFM's OOD generalization and rig transfer efficacy." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.532, + 0.604, + 0.545 + ], + "angle": 0, + "content": "4.4. Ablation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.555, + 0.892, + 0.69 + ], + "angle": 0, + "content": "In our ablation studies, we evaluate the 3D-LFM's design elements and their individual contributions to its performance. Detailed experiments on the Human3.6M benchmark [7] and a blend of other datasets including Animal3D [27] and facial datasets [9, 29] were carried out to ablate the role of Procrustean transformation, hybrid attention mechanisms, and tokenized positional encoding (TPE) in enabling the model's scalability and out-of-distribution (OOD) generalization." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.697, + 0.753, + 0.71 + ], + "angle": 0, + "content": "4.4.1 Procrustean Transformation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.9 + ], + "angle": 0, + "content": "3D-LFM's fusion of the procrustean approach, a first in transformer-based lifting frameworks, concentrates on deformable object components, as outlined in Sec.3.2. By focusing on shape within a standard canonical reference frame and avoiding rigid body transformations, we see faster learning and a decreased MPJPE, as evident by the gap between blue and orange lines in Fig. 6 (a) suggests. This fusion is crucial for learning 3D deformations, while utilizing transformers' equivariance. These findings suggest that even for transformers, avoiding rigid transformations' learning aids convergence, most notably with imbalanced datasets." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10472" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.092, + 0.27, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.09, + 0.465, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.215, + 0.47, + 0.286 + ], + "angle": 0, + "content": "Figure 6. (a) Comparing attention strategies in 3D-LFM. The combined local-global approach with procrustean alignment surpasses other configurations in MPJPE reduction over 100 epochs on the Human3.6M validation split. (b) rapid convergence and efficiency of the TPE approach compared to the learnable MLP" + }, + { + "type": "table_caption", + "bbox": [ + 0.081, + 0.301, + 0.465, + 0.315 + ], + "angle": 0, + "content": "Table 3. Impact of TPE on Data Imbalance and Rig Transfer" + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.326, + 0.47, + 0.425 + ], + "angle": 0, + "content": "
StudyExperimentModel SizeImprovement (%)
Data ImbalanceUnderrepr. category (Hippo) [27]1283.27
51212.28
102422.02
Rig Transfer17 [7]- to 15 [9]-jointN/A12
15 [9]- to 17 [7]-joint23.29
52 [9]- to 83 [29]-joint52.3
" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.451, + 0.371, + 0.466 + ], + "angle": 0, + "content": "4.4.2 Local-Global vs. Hybrid Attention" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.475, + 0.47, + 0.627 + ], + "angle": 0, + "content": "In evaluating 3D-LFM's attention strategies, our analysis on the same validation split as above demonstrates the superiority of a hybrid approach combining local (GA) and global (MHSA) attention mechanisms. This integration, particularly when complemented by Procrustean (OnP) alignment, significantly enhances performance and accelerates convergence, as evidenced in Fig. 6 (a). The distinct advantage of this hybrid system validates our architectural choices, showcasing its efficiency in reducing MPJPE errors and refining model training dynamics." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.635, + 0.351, + 0.65 + ], + "angle": 0, + "content": "4.4.3 Tokenized Positional Encoding:" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.659, + 0.47, + 0.902 + ], + "angle": 0, + "content": "This ablation study assesses the impact of Tokenized Positional Encoding (TPE), which uses analytical Random Fourier Features for encoding positional information. This study examines TPE's influence on model performance in scenarios of data imbalance and rig transfer generalization. Data imbalance study: When tested on the underrepresented hippo category from the Animal3D dataset [27], TPE based model showed a \\(3.27\\%\\) improvement in MPJPE over the baseline MLP with a 128-dimensional model performance as evident in first row of Tab. 3. This improvement grew with the model size. These results highlight TPE's scalability and its faster convergence, especially relevant in imbalanced, OOD scenarios as detailed in Fig. 6 (b). The observed performance boosts suggest that TPE's analytical nature might be more suited to adapting to novel data distributions. Increasing model size amplifies TPE's benefits," + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.098, + 0.871, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.273, + 0.892, + 0.329 + ], + "angle": 0, + "content": "Figure 7. The qualitative improvement in rig transfer using analytical TPE versus learnable MLP projection. This visualization reinforces the necessity of TPE in handling OOD data such as different rigs, unseen during training." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.354, + 0.892, + 0.399 + ], + "angle": 0, + "content": "hinting that its fixed analytical approach more adeptly handles OOD intricacies compared to learnable methods like MLPs, which may falter in such situations." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.4, + 0.893, + 0.552 + ], + "angle": 0, + "content": "Rig transfer study: Our rig transfer analysis, summarized in Table 3, showcases TPE's adaptability and effectiveness over the MLP baseline across different joint configurations and rig scenarios, with improvements up to \\(52.3\\%\\). These findings, particularly the significant performance boost in complex rig transfers, underscore TPE's robustness in OOD contexts. Figure 7 visually highlights the qualitative differences between TPE and MLP approaches in a rig transfer scenario, where the model trained on a 17-joint [7] configuration is tested on a 15 joint [9] setup." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.564, + 0.747, + 0.579 + ], + "angle": 0, + "content": "5. Discussion and Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.589, + 0.892, + 0.844 + ], + "angle": 0, + "content": "The proposed 3D-LFM marks a significant leap in 2D-3D lifting, showcasing scalability and adaptability, addressing data imbalance, and generalizing to new data categories. Its cross-category knowledge transfer requires further investigation and handling of inputs with different perspectives could act as potential limitations. 3D-LFM's efficiency is demonstrated by achieving results comparable to leading methods on [32] benchmark as well as its proficiency in out-of-distribution (OOD) scenarios on limited computational resources. For training duration and computational details, please refer to the supplementary materials. This work establishes a baseline framework for future 3D pose estimation and 3D reconstruction models. In summary, the 3D-LFM creates a universally applicable model for 3D reconstruction from 2D data, paving the way for diverse applications that requires accurate 3D reconstructions from 2D inputs." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.845, + 0.892, + 0.888 + ], + "angle": 0, + "content": "Acknowledgement: We extend our gratitude to Ian R. Fasel, Tim Clifford, Javier Movellan, and Matthias Hernandez of Apple for their insightful discussions." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10473" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Praneet C Bala, Benjamin R Eisenreich, Seng Bum Michael Yoo, Benjamin Y Hayden, Hyun Soo Park, and Jan Zimmermann. Openmonkeystudio: Automated markerless pose estimation in freely moving macaques. BioRxiv, pages 2020-01, 2020. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.186, + 0.472, + 0.254 + ], + "angle": 0, + "content": "[2] Christoph Bregler, Aaron Hertzmann, and Henning Biermann. Recovering non-rigid 3d shape from image streams. In Proceedings IEEE Conference on Computer Vision and Pattern Recognition. CVPR 2000 (Cat. No. PR00662), pages 690-696. IEEE, 2000. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.256, + 0.471, + 0.297 + ], + "angle": 0, + "content": "[3] Zheng Chen and Yi Sun. Joint-wise 2d to 3d lifting for hand pose estimation from a single rgb image. Applied Intelligence, 53(6):6421-6431, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.298, + 0.471, + 0.367 + ], + "angle": 0, + "content": "[4] Mosam Dabhi, Chaoyang Wang, Kunal Saluja, László A Jeni, Ian Fasel, and Simon Lucey. High fidelity 3d reconstructions with limited physical views. In 2021 International Conference on 3D Vision (3DV), pages 1301-1311. IEEE, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.368, + 0.471, + 0.424 + ], + "angle": 0, + "content": "[5] Mosam Dabhi, Chaoyang Wang, Tim Clifford, László Jeni, Ian Fasel, and Simon Lucey. Mbw: Multi-view bootstrapping in the wild. Advances in Neural Information Processing Systems, 35:3039-3051, 2022. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.425, + 0.472, + 0.495 + ], + "angle": 0, + "content": "[6] Liuhao Ge, Zhou Ren, Yuncheng Li, Zehao Xue, Yingying Wang, Jianfei Cai, and Junsong Yuan. 3d hand shape and pose estimation from a single rgb image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10833-10842, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.495, + 0.471, + 0.564 + ], + "angle": 0, + "content": "[7] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE transactions on pattern analysis and machine intelligence, 36(7):1325-1339, 2013. 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.565, + 0.471, + 0.62 + ], + "angle": 0, + "content": "[8] Haorui Ji, Hui Deng, Yuchao Dai, and Hongdong Li. Unsupervised 3d pose estimation with non-rigid structure-from-motion modeling. arXiv preprint arXiv:2308.10705, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.621, + 0.471, + 0.704 + ], + "angle": 0, + "content": "[9] Hanbyul Joo, Hao Liu, Lei Tan, Lin Gui, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social motion capture. In Proceedings of the IEEE International Conference on Computer Vision, pages 3334-3342, 2015. 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.47, + 0.789 + ], + "angle": 0, + "content": "[10] Daniel Joska, Liam Clark, Naoya Muramatsu, Ricardo Jericevich, Fred Nicolls, Alexander Mathis, Mackenzie W Mathis, and Amir Patel. Acinoset: a 3d pose estimation dataset and baseline models for cheetahs in the wild. In 2021 IEEE international conference on robotics and automation (ICRA), pages 13901-13908. IEEE, 2021. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.47, + 0.843 + ], + "angle": 0, + "content": "[11] Chen Kong and Simon Lucey. Deep non-rigid structure from motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1558-1567, 2019. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[12] Vincent Lepetit, Francesc Moreno-Noguer, and Pascal Fua. Ep n p: An accurate o (n) solution to the p np problem. International journal of computer vision, 81:155-166, 2009. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.176 + ], + "angle": 0, + "content": "[13] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.177, + 0.894, + 0.259 + ], + "angle": 0, + "content": "[14] Sebastian Lutz, Richard Blythman, Koustav Ghosal, Matthew Moynihan, Ciaran Simms, and Aljosa Smolic. Jointformer: Single-frame lifting transformer with error prediction and refinement for 3d human pose estimation. In 2022 26th International Conference on Pattern Recognition (ICPR), pages 1156-1163. IEEE, 2022. 2, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.26, + 0.895, + 0.328 + ], + "angle": 0, + "content": "[15] Naureen Mahmood, Nima Ghorbani, Nikolaus F Troje, Gerard Pons-Moll, and Michael J Black. Amass: Archive of motion capture as surface shapes. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5442-5451, 2019. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.329, + 0.895, + 0.385 + ], + "angle": 0, + "content": "[16] Julieta Martinez, Rayat Hossain, Javier Romero, and James J Little. A simple yet effective baseline for 3d human pose estimation. In Proceedings of the IEEE international conference on computer vision, pages 2640-2649, 2017. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.385, + 0.892, + 0.469 + ], + "angle": 0, + "content": "[17] Gyeongsik Moon, Shouu-I Yu, He Wen, Takaaki Shiratori, and Kyoung Mu Lee. Interhand2. 6m: A dataset and baseline for 3d interacting hand pose estimation from a single rgb image. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XX 16, pages 548-564. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.469, + 0.892, + 0.538 + ], + "angle": 0, + "content": "[18] David Novotny, Nikhila Ravi, Benjamin Graham, Natalia Neverova, and Andrea Vedaldi. C3dpo: Canonical 3d pose networks for non-rigid structure from motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7688-7697, 2019. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.539, + 0.892, + 0.594 + ], + "angle": 0, + "content": "[19] Guy Tevet, Brian Gordon, Amir Hertz, Amit H Bermano, and Daniel Cohen-Or. Motionclip: Exposing human motion generation to clip space. In European Conference on Computer Vision, pages 358–374. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.595, + 0.892, + 0.636 + ], + "angle": 0, + "content": "[20] Guy Tevet, Sigal Raab, Brian Gordon, Yonatan Shafir, Daniel Cohen-Or, and Amit H Bermano. Human motion diffusion model. arXiv preprint arXiv:2209.14916, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.636, + 0.892, + 0.692 + ], + "angle": 0, + "content": "[21] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.692, + 0.892, + 0.748 + ], + "angle": 0, + "content": "[22] Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. Graph attention networks. In International Conference on Learning Representations, 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.748, + 0.892, + 0.817 + ], + "angle": 0, + "content": "[23] Bastian Wandt, Marco Rudolph, Petrissa Zell, Helge Rhodin, and Bodo Rosenhahn. Canonpose: Self-supervised monocular 3d human pose estimation in the wild. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 13294-13304, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.818, + 0.892, + 0.873 + ], + "angle": 0, + "content": "[24] Chaoyang Wang and Simon Lucey. Paul: Procrustean autoencoder for unsupervised lifting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 434-443, 2021. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[25] Chaoyang Wang, Chen-Hsuan Lin, and Simon Lucey. Deep nrsfm++: Towards unsupervised 2d-3d lifting in the wild. In" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.895, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "10474" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "2020 International Conference on 3D Vision (3DV), pages 12-22. IEEE, 2020. 1, 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.177 + ], + "angle": 0, + "content": "[26] Yu Xiang, Roozbeh Mottaghi, and Silvio Savarese. Beyond Pascal: A benchmark for 3d object detection in the wild. In IEEE winter conference on applications of computer vision, pages 75-82. IEEE, 2014. 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.469, + 0.261 + ], + "angle": 0, + "content": "[27] Jiacong Xu, Yi Zhang, Jiawei Peng, Wufei Ma, Artur Jesslen, Pengliang Ji, Qixin Hu, Jiehua Zhang, Qihao Liu, Jiahao Wang, et al. Animal3d: A comprehensive dataset of 3d animal pose and shape. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9099-9109, 2023. 2, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.263, + 0.469, + 0.319 + ], + "angle": 0, + "content": "[28] Haitian Zeng, Xin Yu, Jiaxu Miao, and Yi Yang. Mhr-net: Multiple-hypothesis reconstruction of non-rigid shapes from 2d views. In European Conference on Computer Vision, pages 1-17. Springer, 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.321, + 0.469, + 0.389 + ], + "angle": 0, + "content": "[29] Xing Zhang, Lijun Yin, Jeffrey F Cohn, Shaun Canavan, Michael Reale, Andy Horowitz, Peng Liu, and Jeffrey M Girard. Bp4d-spontaneous: a high-resolution spontaneous 3d dynamic facial expression database. Image and Vision Computing, 32(10):692-706, 2014. 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.391, + 0.469, + 0.442 + ], + "angle": 0, + "content": "[30] Jianqiao Zheng, Xueqian Li, Sameera Ramasinghe, and Simon Lucey. Robust point cloud processing through positional embedding. arXiv preprint arXiv:2309.00339, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.446, + 0.469, + 0.5 + ], + "angle": 0, + "content": "[31] Wentao Zhu, Xiaoxuan Ma, Zhaoyang Liu, Libin Liu, Wayne Wu, and Yizhou Wang. Motionbert: Unified pretraining for human motion analysis. arXiv preprint arXiv:2210.06551, 2022. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.503, + 0.469, + 0.558 + ], + "angle": 0, + "content": "[32] Yue Zhu, Nermin Samet, and David Picard. H3wb: Human3. 6m 3d wholebody dataset and benchmark. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20166-20177, 2023. 2, 5, 6, 8" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.558 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "10475" + } + ] +] \ No newline at end of file diff --git a/2024/3D-LFM_ Lifting Foundation Model/5d227142-e6b0-440e-bad4-facab1940a16_origin.pdf b/2024/3D-LFM_ Lifting Foundation Model/5d227142-e6b0-440e-bad4-facab1940a16_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..73464f95d0290a8cb1aa28dc85ffad6143d1cd50 --- /dev/null +++ b/2024/3D-LFM_ Lifting Foundation Model/5d227142-e6b0-440e-bad4-facab1940a16_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c1356d03a55fd3e113f361f3142a5f3efb957b838723b798e2f81c743ff5e48 +size 2427012 diff --git a/2024/3D-LFM_ Lifting Foundation Model/full.md b/2024/3D-LFM_ Lifting Foundation Model/full.md new file mode 100644 index 0000000000000000000000000000000000000000..11faacf02fe2daf3a9a3e14e15f1d67cc0b6e26b --- /dev/null +++ b/2024/3D-LFM_ Lifting Foundation Model/full.md @@ -0,0 +1,298 @@ +# 3D-LFM: Lifting Foundation Model + +Mosam Dabhi1 + +László A. Jeni $^{1*}$ + +Simon Lucey $^{2*}$ + +$^{1}$ Carnegie Mellon University + +2The University of Adelaide + +3dlfm.github.io + +![](images/a0af2091a3560911c3122fdad21a8a6e74a174a7bfb69e66fec5e5735c5b7729.jpg) +(a) Unified 2D-3D lifting for $30+$ categories. +(b) Dataset diversity visualization. + +![](images/a0bb2bcd1603376f67a0063ab026a40954d20b0e3bc36cb2befe1938d3dc99e1.jpg) +Figure 1. Overview: (a) This figure shows the 3D-LFM's ability in lifting 2D landmarks into 3D structures across an array of over 30 diverse categories, from human body parts, to a plethora of animals and everyday common objects. The lower portion shows the actual 3D reconstructions by our model, with red lines representing the ground truth and blue lines showing the 3D-LFM's predictions. (b) This figure displays the model's training data distribution on a logarithmic scale, highlighting that inspite of 3D-LFM being trained on imbalanced datasets, it preserves the performance across individual categories. + +# Abstract + +The lifting of a 3D structure and camera from 2D landmarks is at the cornerstone of the discipline of computer vision. Traditional methods have been confined to specific rigid objects, such as those in Perspective-n-Point (PnP) problems, but deep learning has expanded our capability to reconstruct a wide range of object classes (e.g. C3DPO [18] and PAUL [24]) with resilience to noise, occlusions, and perspective distortions. However, all these techniques have been limited by the fundamental need to establish correspondences across the 3D training data, significantly limiting their utility to applications where one has an abundance of "in-correspondence" 3D data. Our approach harnesses the inherent permutation equivariance of transformers to manage varying numbers of points per 3D data instance, withstand occlusions, and generalizes + +to unseen categories. We demonstrate state-of-the-art performance across 2D-3D lifting task benchmarks. Since our approach can be trained across such a broad class of structures, we refer to it simply as a 3D Lifting Foundation Model (3D-LFM) – the first of its kind. + +# 1. Introduction + +Lifting 2D landmarks from a single-view RGB image into 3D has long posed a complex challenge in the field of computer vision because of the ill-posed nature of the problem. This task is important for a range of applications from augmented reality to robotics, and requires an understanding of non-rigid spatial geometry and accurate object descriptions [2, 11, 25]. Historically, efforts in single-frame 2D-3D lifting have encountered significant hurdles: reliance on object-specific models, poor scalability, and limited adaptability to diverse and complex object categories. Traditional methods, while advancing in specific domains like human + +body [14, 16, 31] or hand modeling [3, 6], often fail when faced with the complexities of varying object types or object rigs (skeleton placements). + +To facilitate such single-frame 2D-3D lifting, deep learning methods like C3DPO [18] and others [8, 11, 24, 25, 28] have recently been developed. However, these methods are fundamentally limited in that they must have knowledge of the object category and how the 2D landmarks correspond semantically to the 2D/3D data it was trained upon. Further, this represents a drawback, especially when considering their scaling up to dozens or even hundreds of object categories, with varying numbers of landmarks and configurations. This paper marks a departure from such correspondence constraints, introducing the 3D Lifting Foundation Model (3D-LFM), an object-agnostic single frame 2D-3D lifting approach. At its core, 3D-LFM addresses the limitation of previous models, which is the inability to efficiently handle a wide array of object categories while maintaining high fidelity in 3D keypoint lifting from 2D data. We propose a solution rooted in the concept of permutation equivariance, a property that allows our model to autonomously establish correspondences among diverse sets of input 2D keypoints. + +3D-LFM is capable of performing single frame 2D-3D lifting for $30+$ categories using a single model simultaneously, covering everything from human forms [9, 15, 32], face [29], hands [17], and animal species [1, 10, 27], to a plethora of inanimate objects found in everyday scenarios such as cars, furniture, etc. [26]. Importantly, 3D-LFM is inherently scalable, poised to expand to hundreds of categories and improve performance, especially in out-of-distribution or less-represented areas, showcasing its broad utility in 3D lifting tasks. 3D-LFM is able to achieve 2D-3D lifting performance that matches those of leading methods specifically optimized for individual categories. The generalizability of 3D LFM is further evident in its ability to handle out-of-distribution (OOD) object categories and rigs, which we refer to as OOD 2D-3D lifting, where the task is to lift the 2D landmarks to 3D for a category never seen during training. We show such OOD results: (1) for inanimate objects - by holding out an object category within the PASCAL dataset, (2) for animals - by training on common object categories such as dogs and cats found in [27] and reconstructing 3D for unseen and rare species of Cheetahs found in [10] and in-the-wild zoo captures from [5], and (3) by showing rig transfer, i.e. training 2D to 3D lifting on a Human3.6M dataset rig [7] and showing similar 2D to 3D lifting performance on previously unseen rigs such as those found in Panoptic studio dataasset rig [9] or a COCO dataset rig [13]. 3D-LFM transfers learnings from seen data during training to unseen OOD data during inference. It does so by learning general structural features during the training phase via the proposed permutation equivariance properties + +and specific design choices that we discuss in the following sections. + +Recognizing the important role geometry plays in 3D reconstruction [4, 5, 11, 18, 24, 25], we integrate Procrustean methods such as Orthographic-N-Point (OnP) or Perspective-N-Point (PnP) to direct the model's focus on deformable aspects within a canonical frame. This incorporation significantly reduces the computational burden on the model, freeing it from learning redundant rigid rotations and focusing its capabilities on capturing the true geometric essence of objects. Scalability, a critical aspect of our model, is addressed through the use of tokenized positional encoding (TPE), which, when combined with graph-based transformer architecture, not only enhances the model's adaptability across diverse categories but also strengthens its ability to handle multiple categories with different number of keypoints and configurations. Finally, the use of skeleton information (joint connectivity) within the graph-based transformers via adjacency matrices provides strong clues about joint proximity and inherent connectivity, aiding in the handling of correspondences across varied object categories. + +To the best of our knowledge, 3D-LFM is one of the only known work which is a unified model capable of doing 2D-3D lifting for $30+$ (and potentially even more) categories simultaneously. Its ability to perform unified learning across a vast spectrum of object categories without specific object information and its handling of OOD scenarios highlight its potential as one of the first models capable of serving as a 2D-3D lifting foundation model. + +The contributions of this paper are threefold: + +1. We propose a Procrustean transformer that is able to focus solely on learning the deformable aspects of objects within a single canonical frame whilst preserving permutation equivariance across 2D landmarks. +2. The integration of tokenized positional encoding within the graph-based transformer, to enhance our approach's scalability and its capacity to handle diverse and imbalanced datasets. +3. We demonstrate that 3D-LFM surpasses state-of-the-art methods in categories such as humans, hands, and faces (benchmark in [32]). Additionally, it shows robust generalization by handling previously unseen objects and configurations, including animals ([5, 10]), inanimate objects ([26]), and novel object arrangements (rig transfer in [9]) + +In subsequent sections, we explore the design and methodology of our proposed 3D-LFM architecture, including detailed ablation experiments and comparative analyses. Throughout this paper, 'keypoints', 'landmarks', and 'joints' are used interchangeably, referring to specific, identifiable points or locations on an object or figure that are crucial for understanding its structure and geometry. + +![](images/a58284baea3895dbf1f68554ecc0f1b300ccd78394e37597d5bc76f25601e114.jpg) + +![](images/fe824da252f65bd6270540aea6dbb12ff9392c0addeca4c929282512092910f2.jpg) +Figure 2. Overview of the 3D Lifting Foundation Model (3D-LFM) architecture: The process begins with the input 2D keypoints undergoing Token Positional Encoding (TPE) before being processed by a series of graph-based transformer layers. The resulting features are then decoded through an MLP into a canonical 3D shape. This shape is aligned to the ground truth (G.T. 3D) in the reference frame using a Procrustean method, with the Mean Squared Error (MSE) loss computed to guide the learning. The architecture captures both local and global contextual information, focusing on deformable structures while minimizing computational complexity. + +# 2. Related works + +The field of 2D-3D lifting has evolved substantially from classic works such as those based on Perspective-n-Point (PnP) algorithms [12]. In these early works, the algorithm was given a set of 2D landmarks and some 3D supervision, namely the known 3D rigid object. The field has since witnessed a paradigm shift with the introduction of deep learning methodologies, led by methods such as C3DPO [18], PAUL [24], and Deep NRSfM [11], along with recent transformer-based innovations such as NRSfM-Former [8]. In these approaches one does not need knowledge of the specific 3D object, instead it can get away with just the 2D landmarks and correspondences to an ensemble of 2D/3D data from the object category to be lifted. However, despite their recent success, all these methods still require that the 2D/3D data be in semantic correspondence. That is, the index to a specific landmark has the same semantic meaning across all instances (e.g. chair leg). In practice, this is quite limiting at run-time, as one needs intimate knowledge of the object category, and rig in order to apply any of these current methods. Further, this dramatically limits the ability of these methods to leverage cross-object and cross-rig datasets, prohibiting the construction of a truly generalizable 2D to 3D lifting foundation model – a topic of central focus in this paper. + +Recent literature in pose estimation, loosely connected to NRSfM but often more specialized towards human and + +animal body parts, has also seen remarkable progress. Models such as Jointformer [14] and SimpleBaseline [16] have refined the single-frame 2D-3D lifting process, while generative approaches like MotionCLIP [19] and Human Motion Diffusion Model [20] have laid the groundwork for 3D generative motion-based foundation models. These approaches, however, are even more limiting than C3PDO, PAUL, etc. in that they are intimately wedded to the object class and are not easily extendable to an arbitrary object class. + +# 3. Approach + +Given a set of 2D keypoints representing the projection of an object's joints in an image, we denote the keypoints matrix as $\mathbf{W} \in \mathbb{R}^{N \times 2}$ , where $N$ is the predetermined maximum number of joints considered across all object categories. For objects with joints count less than $N$ , we introduce a masking mechanism that utilizes a binary mask matrix $\mathbf{M} \in \{0,1\}^N$ , where each element $m_i$ of $\mathbf{M}$ is defined as: + +$$ +m _ {i} = \left\{ \begin{array}{l l} 1 & \text {i f j o i n t} i \text {i s p r e s e n t} \\ 0 & \text {o t h e r w i s e} \end{array} \right. \tag {1} +$$ + +The 3D lifting function $f: \mathbb{R}^{N \times 2} \to \mathbb{R}^{N \times 3}$ maps the 2D keypoints to their corresponding 3D structure while compensating for the projection: + +$$ +\mathbf {S} = f (\mathbf {W}) = \mathbf {W} \mathbf {R} ^ {\top} + \mathbf {b} \tag {2} +$$ + +where $\mathbf{R} \in \mathbb{R}^{3 \times 3}$ is the projection matrix (assumed either weak-perspective or orthographic) and $\mathbf{b} \in \mathbb{R}^{N \times 3}$ is a bias term that aligns the centroids of 2D and 3D keypoints. + +Permutation Equivariance: To ensure scalability and adaptability across a diverse set of objects, we leverage the property of permutation equivariance inherent in transformer architectures. Permutation equivariance allows the model to process input keypoints $\mathbf{W}$ regardless of their order, a critical feature for handling objects with varying joint configurations: + +$$ +f (\mathcal {P} \mathbf {W}) = \mathcal {P} f (\mathbf {W}) +$$ + +where $\mathcal{P}$ is a permutation matrix that reorders the keypoints. Handling Missing Data: To address the challenge of missing data, we refer the Deep NRSfM++ [25] work and use a masking mechanism to accommodate for occlusions or absences of keypoints. Our binary mask matrix $\mathbf{M} \in \{0,1\}^N$ is applied in such a way that it not only pads the input data to a consistent size but also masks out missing or occluded points: $\mathbf{W}_m = \mathbf{W} \odot \mathbf{M}$ , where $\odot$ denotes element-wise multiplication. To remove the effects of translation and ensure that our TPE features are generalizable, we zero-center the data by subtracting the mean of the visible keypoints: + +$$ +\mathbf {W} _ {c} = \mathbf {W} _ {m} - \operatorname {m e a n} \left(\mathbf {W} _ {m}\right) \tag {3} +$$ + +We scale the zero-centered data to the range $[-1, 1]$ while preserving the aspect ratio to maintain the geometric integrity of the keypoints. For more details on handling missing data in the presence of perspective effects, we refer the reader to Deep NRSFM++[25]. + +Token Positional Encoding: replaces the traditional Correspondence Positional Encoding (CPE) or Joint Embedding which encodes the semantic correspondence information (as used in works such as like [14, 31]) with a mechanism that does not require explicit correspondence or semantic information. Owing to the success of per-point positional embedding, particularly random Fourier features [30] in handling OOD data, we compute Token Positional Encoding (TPE) using analytical Random Fourier features (RFF) as follows: + +$$ +\mathbf {T P E} \left(\mathbf {W} _ {c}\right) = \sqrt {\frac {2}{D}} \left[ \sin \left(\mathbf {W} _ {c} \boldsymbol {\omega} + b\right); \cos \left(\mathbf {W} _ {c} \boldsymbol {\omega} + b\right) \right] \tag {4} +$$ + +where $D$ is the dimensionality of the Fourier feature space, $\pmb{\omega} \in \mathbb{R}^{2 \times \frac{D}{2}}$ and $\mathbf{b} \in \mathbb{R}^{\frac{D}{2}}$ are parameters sampled from a normal distribution, scaled appropriately. These parameters are sampled once and kept fixed, as per the RFF methodology. The output of this transformation $\mathbf{TPE}(\mathbf{W}_c)$ is then fed into the graph-based transformer network as $\mathbf{X}^\ell$ where + +$\ell$ indicates the layer number (0 in the above case). This set of features is now ready for processing inside the graph-based transformer layers without the need for correspondence among the input keypoints. The TPE retains the property of permutation equivariance while implicitly encoding the relative positions of the keypoints. + +# 3.1. Graph-based Transformer Architecture + +Our graph-based transformer architecture utilizes a hybrid approach to feature aggregation by combining graph-based local attention [22](L) with global self-attention mechanisms [21](G) within a single layer (shown as grey block in Fig. 2. This layer is replicated $L$ times, providing a sequential refinement of the feature representation across the network's depth. + +Hybrid Feature Aggregation: For each layer $\ell$ , ranging from 0 to $L$ , the feature matrix $\mathbf{X}^{(\ell)} \in \mathbb{R}^{N \times D}$ is augmented through simultaneous local and global processing. The local processing component, $\mathrm{GA}(\mathbf{X}^{(\ell)}, \mathbf{A})$ , leverages an adjacency matrix $\mathbf{A}$ , which encodes the connectivity based on the object category, to perform graph-based attention on batches of nodes representing the input 2D data: + +$$ +\begin{array}{l} \mathbf {L} ^ {(\ell)} = \mathrm {G A} (\mathbf {X} ^ {(\ell)}, \mathbf {A}), \\ \mathbf {G} ^ {(\ell)} = \operatorname {M H S A} (\mathbf {X} ^ {(\ell)}) \tag {5} \\ \end{array} +$$ + +Local and global features are concatenated to form a unified representation $\mathbf{U}^{(\ell)}$ : + +$$ +\mathbf {U} ^ {(\ell)} = \operatorname {c o n c a t} \left(\mathbf {L} ^ {(\ell)}, \mathbf {G} ^ {(\ell)}\right) \tag {6} +$$ + +Following the concatenation, each layer applies a normalization(LN) and a multilayer perceptron (MLP). The MLP employs a Gaussian Error Linear Unit (GeLU) as the nonlinearity function to enhance the model's expressive power + +$$ +\begin{array}{l} \mathbf {X} ^ {\prime (\ell)} = \mathrm {L N} \left(\mathbf {U} ^ {(\ell)}\right) + \mathbf {U} ^ {(\ell)}, \\ \mathbf {X} ^ {(\ell + 1)} = \operatorname {L N} \left(\operatorname {M L P} _ {-} \operatorname {G e L U} \left(\mathbf {X} ^ {\prime (\ell)}\right)\right) + \mathbf {X} ^ {\prime (\ell)} \tag {7} \\ \end{array} +$$ + +Here, GA represents Graph Attention, MHSA denotes Multi-Head Self-Attention, and MLP_GeLU indicates our MLP with GeLU nonlinearity. This architecture is designed to learn patterns in 2D data by considering both the local neighborhood connectivity of input 2D and the global data context of input 2D, which is important for robust 2D to 3D structure lifting. + +# 3.2. Procrustean Alignment + +The final operation in our pipeline decodes the latent feature representation $\mathbf{X}^{(L)}$ into the predicted canonical structure $\mathbf{S}_c$ via a GeLU-activated MLP: + +$$ +\mathbf {S} _ {c} = \operatorname {M L P} _ {\text {s h a p e . d e c o d e r}} (\mathbf {X} ^ {(L)}) +$$ + +Subsequently, we align $\mathbf{S}_c$ with the ground truth $\mathbf{S}_r$ , via a Procrustean alignment method that optimizes for the rotation matrix $\mathbf{R}$ . The alignment is formalized as a minimization problem: + +$$ +\underset {\mathbf {R}} {\text {m i n i m i z e}} \quad | | \mathbf {M} \odot (\mathbf {S} _ {r} - \mathbf {S} _ {c} \mathbf {R}) | | _ {F} ^ {2} +$$ + +where $\mathbf{M}$ is a binary mask applied element-wise, and $||\cdot ||_F$ denotes the Frobenius norm. The optimal $\mathbf{R}$ is obtained via SVD, which ensures the orthonormality constraint of the rotation matrix: + +$$ +\mathbf {U}, \boldsymbol {\Sigma}, \mathbf {V} ^ {\top} = \operatorname {S V D} ((\mathbf {M} \odot \mathbf {S} _ {c}) ^ {\top} \mathbf {S} _ {r}), \quad \mathbf {R} = \mathbf {U V} ^ {\top} +$$ + +The predicted shape is then scaled relative to the reference shape $\mathbf{S}_r$ , resulting in a scale factor $\gamma$ , which yields the final predicted shape $\mathbf{S}_p$ : + +$$ +\mathbf {S} _ {p} = \boldsymbol {\gamma} \cdot (\mathbf {S} _ {c} \mathbf {R}) +$$ + +This Procrustean alignment step is crucial for directing the model's focus on learning non-rigid shape deformations over rigid body dynamics, thus significantly enhancing the model's ability to capture the true geometric essence of objects by just focusing on core deformable (non-rigid) aspects. The effectiveness of this approach is confirmed by faster convergence and reduced error rates in our experiments, as detailed in Fig. 6. These findings align with the findings presented in PAUL [24]. + +# 3.3. Loss Function + +The optimization of our model relies on the Mean Squared Error (MSE) loss, which calculates the difference between predicted 3D points $\mathbf{S}_p$ and the ground truth $\mathbf{S}_r$ : + +$$ +\mathcal {L} _ {\mathrm {M S E}} = \frac {1}{N} \sum_ {i = 1} ^ {N} \left\| \mathbf {S} _ {p} ^ {(i)} - \mathbf {S} _ {r} ^ {(i)} \right\| ^ {2} \tag {8} +$$ + +Minimizing this loss across $N$ points ensures the model's ability in reconstructing accurate 3D shapes from input 2D landmarks. This minimization effectively calibrates the shape decoder and the Procrustean alignment to focus on the essential non-rigid characteristics of the objects, helping the accuracy of the 2D to 3D lifting process. + +# 4. Results and Comparative Analysis + +Our evaluation shows the 3D Lifting Foundation Model (3D-LFM)'s capability in single-frame 2D-3D lifting across diverse object categories without object-specific data in Sec. 4.1. Following that, Sec. 4.2 highlights 3D-LFM's performance over specialized methods, especially achieving state-of-the-art performance in whole-body benchmarks[32] (Fig. 4). Additionally, Sec. 4.3 shows + +3D-LFM's capability in 2D-3D lifting across 30 categories using a single unified model, enhancing category-specific performance and achieving out-of-distribution (OOD) generalization for unseen object configurations during training. In conclusion, the ablation studies in Section 4.4 validate our proposed procrustean approach, token positional encoding, and the local-global hybrid attention mechanism in the transformer model, confirming their role in 3D-LFM's effectiveness in both single- and multiple-object scenarios. + +# 4.1. Multi-Object 3D Reconstruction + +Clarifying naming convention: In 'object-specific' versus 'object-agnostic', our primary focus in this naming is on the distinction in training methods. Here, object-specific training involves supplying semantic details for each object, leading to isolated training. Conversely, object-agnostic training combines various categories without explicit landmark semantics, leading to combined training. + +Experiment Rationale: 3D-LFM leverages permutation equivariance to accurately lift 2D keypoints into 3D structures across diverse categories, outperforming fixed-array methods by adapting flexibly to variable keypoint configurations. It has been evaluated against non-rigid structure-from-motion approaches [11, 18, 24, 25] that require object-specific inputs, showing its ability to handle diverse categories. For a comprehensive benchmark, we utilize the PASCAL3D+ dataset [26], following C3DPO's [18] methodology, to include a variety of object categories. + +Performance: We benchmark 3D-LFM against the notable NRSfM method, C3DPO [18], for multi-object 2D to 3D lifting with 3D supervision. C3DPO, similar to other contemporary methods [11, 24, 25, 28] requiring object-specific details, serves as an apt comparison due to its multi-category approach. Initially replicating conditions with object-specific information, 3D-LFM matches C3DPO's performance, as demonstrated in Fig. 3. This stage uses MPJPE to measure 3D lifting accuracy, with C3DPO's training setup including an $MN$ dimensional array for object details where $M$ represents number of objects with $N$ being maximum number of keypoints, and our model is trained separately on each object to avoid providing object-specific information. The 3D-LFM's strength emerges when object-specific data is withheld. While C3DPO shows a decline without such data, 3D-LFM maintains a lower MPJPE across categories, even when trained collectively across categories using only an $N$ dimensional array. These findings (Fig. 3) highlights 3D-LFM's capabilities, outperforming single-category training and demonstrating its potential as a generalized 2D to 3D lifting solution. + +![](images/1b39639aa55d9a08205ddba20f9f5b93d226a5c41f43b59e0ae37b4613d61f62.jpg) +Figure 3. 3D-LFM vs. C3DPO Performance: MPJPE comparisons using the PASCAL3D+ dataset, this figure demonstrates our model's adaptability in the absence of object-specific information, contrasting with C3DPO's increased error under the same conditions. The analysis confirms 3D-LFM's superiority across diverse object categories, reinforcing its potential for generalized 2D to 3D lifting. + +Table 1. Quantitative performance on H3WB: Our method demonstrates leading performance across multiple object categories without the need for object-specific designs. + +
MethodWhole-bodyBodyFace/AlignedHand/Aligned
SimpleBaseline125.4125.7115.9 / 24.6140.7 / 42.5
CanonPose w/3D sv.117.7117.5112.0 / 17.9126.9 / 38.3
Large SimpleBaseline112.3112.6110.6 / 14.6114.8 / 31.7
Jointformer (extra data)81.57860.4 / 16.2117.6 / 38.8
Jointformer88.384.966.5 / 17.8125.3 / 43.7
Ours64.1360.8356.55 / 10.4478.21 / 28.22
Ours – PA33.1339.366.0213.56
+ +# 4.2. Benchmark: Object-Specific Models + +Next, we benchmark 3D-LFM against leading specialized methods for human body, face, and hands categories. Our model outperforms these specialized methods, showing multi-category learning without the need for category (landmark) semantics. For this study, we evaluate on H3WB dataset [32], a recent benchmark for diverse whole-body pose estimation tasks. This dataset is valuable for its inclusion of multiple object categories and for providing a comparative baseline against methods such as Jointformer [14], SimpleBaseline [16], and CanonPose [23]. Following H3WB's recommended 5-fold cross-validation and submitting the evaluations to benchmark's authors, we report results on the hidden test set. The results shown in Fig. 4 and Table 1 include PA-MPJPE and MPJPE, with test set performance numbers provided directly by the H3WB team, ensuring that our results are verified by an independent third-party. + +![](images/b43f2c5b144c87bfb124cc4c794454de53e59e397131c8933800fc05d04661ba.jpg) +Figure 4. Performance Comparison on H3WB Benchmark: This chart contrasts MPJPE errors for whole-body, body, face, aligned face, hand, and aligned hand categories within the H3WB benchmark [32]. Our models, with and without Procrustes Alignment (Ours-PA), outperform current state-of-the-art (SOTA) methods, validating our approach's proficiency in 2D to 3D lifting tasks. + +# 4.3. Towards foundation model + +In this section, we highlight 3D-LFM's role as a foundational model for varied 2D-3D lifting, capable in managing multiple object types and data imbalances. In this subsection, we explore 3D-LFM's scalability for collective dataset training (Sec.4.3.1), its generalization to new categories and rig transfer capabilities (Sec.4.3.2). These studies validate the 3D-LFM's role as a foundation model, capable at leveraging diverse data without requiring specific configurations, thus simplifying the 3D lifting process for varied joint setups. + +We start this investigation by showing the capability 3D-LFM in handling 2D-3D lifting for $30+$ object categories within the single model, confirming the model's capability to manage imbalanced datasets representative of real-world scenarios as shown in Fig. 1. With a comprehensive range of human, hand, face, inanimate objects, and animal datasets, the 3D-LFM is proven to be scalable, without requiring category-specific adjustments. The subsequent subsections will dissect these attributes further, discussing the 3D-LFM's foundational potential in the 3D lifting domain. + +# 4.3.1 Combined Dataset Training + +This study evaluates the 3D-LFM's performance on isolated datasets against its performance on a combined dataset. Initially, the model was trained separately on animal-based supercategory datasets: specifically OpenMonkey[1] and Animals3D[27]. Subsequently, it was trained on a merged dataset containing a broad spectrum of object categories, including Human Body-Based datasets such as AMASS [15] + +Table 2. Quantitative evaluation for OOD scenarios. + +
CategoryOOD (mm)In-Dist. (mm)
Cheetah26.5910.16
Train6.885.71
Chimpanzee52.0542.65
+ +and Human 3.6 [7], Hand-Based datasets such as PanOptic Hands [9], Face-Based datasets like BP4D+[29], and various Inanimate Objects from the PASCAL3D+ dataset[26], along with previously mentioned animal datasets. Isolated training resulted in an average MPJPE of $21.22 \, \text{mm}$ , while the combined training method significantly reduced MPJPE to $12.5 \, \text{mm}$ on the same animal supercategory validation split. This improvement confirms the potential of 3D-LFM as a pre-training framework and underscores its ability to adapt and generalize from diverse and extensive data collections. + +Dataset Selection Rationale: We selected animal-based supercategory datasets to demonstrate combined training's impact on underrepresented categories. We observed greater performance improvements in smaller, unbalanced datasets (as exemplified by PASCAL3D+: from 4.31 mm to 1.1 mm and OpenMonkey: from 19.45 mm to 9.59 mm) compared to larger datasets with sufficient balance among categories. Consequently, we see minimal gains in more balanced, larger datasets like AMASS (from 1.67 mm to 1.66 mm), underscoring the utility of combined training for enhancing performance in underrepresented and long-tail categories. + +# 4.3.2 OOD generalization and rig-transfer: + +We evaluate 3D-LFM's generalization to unseen object categories and rig configurations. Its accuracy is highlighted by successful 2D-3D lifting reconstructions of the "Cheetah" from Acinoset [10], which is not included in the typical Animal3D dataset [27], and the "Train" category from PASCAL3D+[26], absent during training. Qualitative reconstructions are shown in Fig. 5, along with the quantitative results in Tab.2 for above categories as well as in-the-wild category like a Chimpanzee from the MBW dataset [5] - which illustrates model's strong OOD generalization and capability to handle in-the-wild data. + +Additionally, we show 3D-LFM's capability in transferring rig configurations between datasets, embodying the concept of generic geometry learning. By training on a 17-joint Human3.6M dataset [7] and testing on a 15-joint Panoptic Studio setup [9], our model gives accurate 3D reconstructions despite variations in joint arrangements. This capability is particularly interesting for its efficiency in utilizing data from multiple rigs of the same object, and underscores the model's adaptability, a cornerstone in pro + +![](images/6a0d2e8953ff1370868413af3bf215fb09ab8155ed5eb35f8361ceb6e70f669a.jpg) +Figure 5. Generalization to unseen data: Figure showing 3D-LFM's proficiency in OOD 2D-3D lifting, effectively handling new, unseen categories, and rig generalization from Acinoset [10] PASCAL3D+ [26], and Panoptic studio [9] with varying joint arrangements in top row. The bottom row presents in-the-wild data from the MBW dataset [5], with red dots indicating input keypoints and blue stick figures showing the model's 3D predictions from different angles. + +cessing diverse human datasets. It aligns with the broader community's interest in versatile geometry learning, which makes these findings especially compelling. For a more thorough validation, we direct the reader to the ablation section, where qualitative visuals (Fig. 7) and quantitative analysis (Sec. 4.4.3) further highlight 3D-LFM's OOD generalization and rig transfer efficacy. + +# 4.4. Ablation + +In our ablation studies, we evaluate the 3D-LFM's design elements and their individual contributions to its performance. Detailed experiments on the Human3.6M benchmark [7] and a blend of other datasets including Animal3D [27] and facial datasets [9, 29] were carried out to ablate the role of Procrustean transformation, hybrid attention mechanisms, and tokenized positional encoding (TPE) in enabling the model's scalability and out-of-distribution (OOD) generalization. + +# 4.4.1 Procrustean Transformation + +3D-LFM's fusion of the procrustean approach, a first in transformer-based lifting frameworks, concentrates on deformable object components, as outlined in Sec.3.2. By focusing on shape within a standard canonical reference frame and avoiding rigid body transformations, we see faster learning and a decreased MPJPE, as evident by the gap between blue and orange lines in Fig. 6 (a) suggests. This fusion is crucial for learning 3D deformations, while utilizing transformers' equivariance. These findings suggest that even for transformers, avoiding rigid transformations' learning aids convergence, most notably with imbalanced datasets. + +![](images/6fac01a11b6aeb1fd8a0508795cfe84e2bc854407acdaefe15610bfbc38912d3.jpg) +Figure 6. (a) Comparing attention strategies in 3D-LFM. The combined local-global approach with procrustean alignment surpasses other configurations in MPJPE reduction over 100 epochs on the Human3.6M validation split. (b) rapid convergence and efficiency of the TPE approach compared to the learnable MLP + +![](images/ceac275bcf83e9bd7c4ff7657041a1462ccc14eb5cc983d6881f2cf0024272c1.jpg) + +Table 3. Impact of TPE on Data Imbalance and Rig Transfer + +
StudyExperimentModel SizeImprovement (%)
Data ImbalanceUnderrepr. category (Hippo) [27]1283.27
51212.28
102422.02
Rig Transfer17 [7]- to 15 [9]-jointN/A12
15 [9]- to 17 [7]-joint23.29
52 [9]- to 83 [29]-joint52.3
+ +# 4.4.2 Local-Global vs. Hybrid Attention + +In evaluating 3D-LFM's attention strategies, our analysis on the same validation split as above demonstrates the superiority of a hybrid approach combining local (GA) and global (MHSA) attention mechanisms. This integration, particularly when complemented by Procrustean (OnP) alignment, significantly enhances performance and accelerates convergence, as evidenced in Fig. 6 (a). The distinct advantage of this hybrid system validates our architectural choices, showcasing its efficiency in reducing MPJPE errors and refining model training dynamics. + +# 4.4.3 Tokenized Positional Encoding: + +This ablation study assesses the impact of Tokenized Positional Encoding (TPE), which uses analytical Random Fourier Features for encoding positional information. This study examines TPE's influence on model performance in scenarios of data imbalance and rig transfer generalization. Data imbalance study: When tested on the underrepresented hippo category from the Animal3D dataset [27], TPE based model showed a $3.27\%$ improvement in MPJPE over the baseline MLP with a 128-dimensional model performance as evident in first row of Tab. 3. This improvement grew with the model size. These results highlight TPE's scalability and its faster convergence, especially relevant in imbalanced, OOD scenarios as detailed in Fig. 6 (b). The observed performance boosts suggest that TPE's analytical nature might be more suited to adapting to novel data distributions. Increasing model size amplifies TPE's benefits, + +![](images/c5d67503874aeacdc82b0cf2bc01e81d58acfb7b55c19e3d744ca903ab946d39.jpg) +Figure 7. The qualitative improvement in rig transfer using analytical TPE versus learnable MLP projection. This visualization reinforces the necessity of TPE in handling OOD data such as different rigs, unseen during training. + +hinting that its fixed analytical approach more adeptly handles OOD intricacies compared to learnable methods like MLPs, which may falter in such situations. + +Rig transfer study: Our rig transfer analysis, summarized in Table 3, showcases TPE's adaptability and effectiveness over the MLP baseline across different joint configurations and rig scenarios, with improvements up to $52.3\%$ . These findings, particularly the significant performance boost in complex rig transfers, underscore TPE's robustness in OOD contexts. Figure 7 visually highlights the qualitative differences between TPE and MLP approaches in a rig transfer scenario, where the model trained on a 17-joint [7] configuration is tested on a 15 joint [9] setup. + +# 5. Discussion and Conclusion + +The proposed 3D-LFM marks a significant leap in 2D-3D lifting, showcasing scalability and adaptability, addressing data imbalance, and generalizing to new data categories. Its cross-category knowledge transfer requires further investigation and handling of inputs with different perspectives could act as potential limitations. 3D-LFM's efficiency is demonstrated by achieving results comparable to leading methods on [32] benchmark as well as its proficiency in out-of-distribution (OOD) scenarios on limited computational resources. For training duration and computational details, please refer to the supplementary materials. This work establishes a baseline framework for future 3D pose estimation and 3D reconstruction models. In summary, the 3D-LFM creates a universally applicable model for 3D reconstruction from 2D data, paving the way for diverse applications that requires accurate 3D reconstructions from 2D inputs. + +Acknowledgement: We extend our gratitude to Ian R. Fasel, Tim Clifford, Javier Movellan, and Matthias Hernandez of Apple for their insightful discussions. + +# References + +[1] Praneet C Bala, Benjamin R Eisenreich, Seng Bum Michael Yoo, Benjamin Y Hayden, Hyun Soo Park, and Jan Zimmermann. Openmonkeystudio: Automated markerless pose estimation in freely moving macaques. BioRxiv, pages 2020-01, 2020. 2, 6 +[2] Christoph Bregler, Aaron Hertzmann, and Henning Biermann. Recovering non-rigid 3d shape from image streams. In Proceedings IEEE Conference on Computer Vision and Pattern Recognition. CVPR 2000 (Cat. No. PR00662), pages 690-696. IEEE, 2000. 1 +[3] Zheng Chen and Yi Sun. Joint-wise 2d to 3d lifting for hand pose estimation from a single rgb image. Applied Intelligence, 53(6):6421-6431, 2023. 2 +[4] Mosam Dabhi, Chaoyang Wang, Kunal Saluja, László A Jeni, Ian Fasel, and Simon Lucey. High fidelity 3d reconstructions with limited physical views. In 2021 International Conference on 3D Vision (3DV), pages 1301-1311. IEEE, 2021. 2 +[5] Mosam Dabhi, Chaoyang Wang, Tim Clifford, László Jeni, Ian Fasel, and Simon Lucey. Mbw: Multi-view bootstrapping in the wild. Advances in Neural Information Processing Systems, 35:3039-3051, 2022. 2, 7 +[6] Liuhao Ge, Zhou Ren, Yuncheng Li, Zehao Xue, Yingying Wang, Jianfei Cai, and Junsong Yuan. 3d hand shape and pose estimation from a single rgb image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10833-10842, 2019. 2 +[7] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE transactions on pattern analysis and machine intelligence, 36(7):1325-1339, 2013. 2, 7, 8 +[8] Haorui Ji, Hui Deng, Yuchao Dai, and Hongdong Li. Unsupervised 3d pose estimation with non-rigid structure-from-motion modeling. arXiv preprint arXiv:2308.10705, 2023. 2, 3 +[9] Hanbyul Joo, Hao Liu, Lei Tan, Lin Gui, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social motion capture. In Proceedings of the IEEE International Conference on Computer Vision, pages 3334-3342, 2015. 2, 7, 8 +[10] Daniel Joska, Liam Clark, Naoya Muramatsu, Ricardo Jericevich, Fred Nicolls, Alexander Mathis, Mackenzie W Mathis, and Amir Patel. Acinoset: a 3d pose estimation dataset and baseline models for cheetahs in the wild. In 2021 IEEE international conference on robotics and automation (ICRA), pages 13901-13908. IEEE, 2021. 2, 7 +[11] Chen Kong and Simon Lucey. Deep non-rigid structure from motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1558-1567, 2019. 1, 2, 3, 5 +[12] Vincent Lepetit, Francesc Moreno-Noguer, and Pascal Fua. Ep n p: An accurate o (n) solution to the p np problem. International journal of computer vision, 81:155-166, 2009. 3 + +[13] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. 2 +[14] Sebastian Lutz, Richard Blythman, Koustav Ghosal, Matthew Moynihan, Ciaran Simms, and Aljosa Smolic. Jointformer: Single-frame lifting transformer with error prediction and refinement for 3d human pose estimation. In 2022 26th International Conference on Pattern Recognition (ICPR), pages 1156-1163. IEEE, 2022. 2, 3, 4, 6 +[15] Naureen Mahmood, Nima Ghorbani, Nikolaus F Troje, Gerard Pons-Moll, and Michael J Black. Amass: Archive of motion capture as surface shapes. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5442-5451, 2019. 2, 6 +[16] Julieta Martinez, Rayat Hossain, Javier Romero, and James J Little. A simple yet effective baseline for 3d human pose estimation. In Proceedings of the IEEE international conference on computer vision, pages 2640-2649, 2017. 2, 3, 6 +[17] Gyeongsik Moon, Shouu-I Yu, He Wen, Takaaki Shiratori, and Kyoung Mu Lee. Interhand2. 6m: A dataset and baseline for 3d interacting hand pose estimation from a single rgb image. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XX 16, pages 548-564. Springer, 2020. 2 +[18] David Novotny, Nikhila Ravi, Benjamin Graham, Natalia Neverova, and Andrea Vedaldi. C3dpo: Canonical 3d pose networks for non-rigid structure from motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7688-7697, 2019. 1, 2, 3, 5 +[19] Guy Tevet, Brian Gordon, Amir Hertz, Amit H Bermano, and Daniel Cohen-Or. Motionclip: Exposing human motion generation to clip space. In European Conference on Computer Vision, pages 358–374. Springer, 2022. 3 +[20] Guy Tevet, Sigal Raab, Brian Gordon, Yonatan Shafir, Daniel Cohen-Or, and Amit H Bermano. Human motion diffusion model. arXiv preprint arXiv:2209.14916, 2022. 3 +[21] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 4 +[22] Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. Graph attention networks. In International Conference on Learning Representations, 2018. 4 +[23] Bastian Wandt, Marco Rudolph, Petrissa Zell, Helge Rhodin, and Bodo Rosenhahn. Canonpose: Self-supervised monocular 3d human pose estimation in the wild. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 13294-13304, 2021. 6 +[24] Chaoyang Wang and Simon Lucey. Paul: Procrustean autoencoder for unsupervised lifting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 434-443, 2021. 1, 2, 3, 5 +[25] Chaoyang Wang, Chen-Hsuan Lin, and Simon Lucey. Deep nrsfm++: Towards unsupervised 2d-3d lifting in the wild. In + +2020 International Conference on 3D Vision (3DV), pages 12-22. IEEE, 2020. 1, 2, 4, 5 +[26] Yu Xiang, Roozbeh Mottaghi, and Silvio Savarese. Beyond Pascal: A benchmark for 3d object detection in the wild. In IEEE winter conference on applications of computer vision, pages 75-82. IEEE, 2014. 2, 5, 7 +[27] Jiacong Xu, Yi Zhang, Jiawei Peng, Wufei Ma, Artur Jesslen, Pengliang Ji, Qixin Hu, Jiehua Zhang, Qihao Liu, Jiahao Wang, et al. Animal3d: A comprehensive dataset of 3d animal pose and shape. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9099-9109, 2023. 2, 6, 7, 8 +[28] Haitian Zeng, Xin Yu, Jiaxu Miao, and Yi Yang. Mhr-net: Multiple-hypothesis reconstruction of non-rigid shapes from 2d views. In European Conference on Computer Vision, pages 1-17. Springer, 2022. 2, 5 +[29] Xing Zhang, Lijun Yin, Jeffrey F Cohn, Shaun Canavan, Michael Reale, Andy Horowitz, Peng Liu, and Jeffrey M Girard. Bp4d-spontaneous: a high-resolution spontaneous 3d dynamic facial expression database. Image and Vision Computing, 32(10):692-706, 2014. 2, 7, 8 +[30] Jianqiao Zheng, Xueqian Li, Sameera Ramasinghe, and Simon Lucey. Robust point cloud processing through positional embedding. arXiv preprint arXiv:2309.00339, 2023. 4 +[31] Wentao Zhu, Xiaoxuan Ma, Zhaoyang Liu, Libin Liu, Wayne Wu, and Yizhou Wang. Motionbert: Unified pretraining for human motion analysis. arXiv preprint arXiv:2210.06551, 2022. 2, 4 +[32] Yue Zhu, Nermin Samet, and David Picard. H3wb: Human3. 6m 3d wholebody dataset and benchmark. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20166-20177, 2023. 2, 5, 6, 8 \ No newline at end of file diff --git a/2024/3D-LFM_ Lifting Foundation Model/images.zip b/2024/3D-LFM_ Lifting Foundation Model/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..8da28ca1d4b84024644168de8fe3713df0f6aeba --- /dev/null +++ b/2024/3D-LFM_ Lifting Foundation Model/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:767168d764d7364c39d63a17048e6c62f67779bc2620e7d0fae9db85b67ec7e8 +size 389706 diff --git a/2024/3D-LFM_ Lifting Foundation Model/layout.json b/2024/3D-LFM_ Lifting Foundation Model/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a7bd385a87b3e6a8d2597a8229e0b86f329c6efe --- /dev/null +++ b/2024/3D-LFM_ Lifting Foundation Model/layout.json @@ -0,0 +1,7054 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 184, + 103, + 410, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 103, + 410, + 121 + ], + "spans": [ + { + "bbox": [ + 184, + 103, + 410, + 121 + ], + "type": "text", + "content": "3D-LFM: Lifting Foundation Model" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 163, + 142, + 239, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 163, + 142, + 239, + 156 + ], + "spans": [ + { + "bbox": [ + 163, + 142, + 239, + 156 + ], + "type": "text", + "content": "Mosam Dabhi1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 261, + 143, + 340, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 143, + 340, + 157 + ], + "spans": [ + { + "bbox": [ + 261, + 143, + 340, + 157 + ], + "type": "text", + "content": "László A. Jeni" + }, + { + "bbox": [ + 261, + 143, + 340, + 157 + ], + "type": "inline_equation", + "content": "^{1*}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 358, + 143, + 433, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 143, + 433, + 158 + ], + "spans": [ + { + "bbox": [ + 358, + 143, + 433, + 158 + ], + "type": "text", + "content": "Simon Lucey" + }, + { + "bbox": [ + 358, + 143, + 433, + 158 + ], + "type": "inline_equation", + "content": "^{2*}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 147, + 162, + 288, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 162, + 288, + 177 + ], + "spans": [ + { + "bbox": [ + 147, + 162, + 288, + 177 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 147, + 162, + 288, + 177 + ], + "type": "text", + "content": "Carnegie Mellon University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 309, + 162, + 446, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 162, + 446, + 177 + ], + "spans": [ + { + "bbox": [ + 309, + 162, + 446, + 177 + ], + "type": "text", + "content": "2The University of Adelaide" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 253, + 184, + 337, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 184, + 337, + 195 + ], + "spans": [ + { + "bbox": [ + 253, + 184, + 337, + 195 + ], + "type": "text", + "content": "3dlfm.github.io" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 50, + 204, + 394, + 398 + ], + "blocks": [ + { + "bbox": [ + 50, + 204, + 394, + 398 + ], + "lines": [ + { + "bbox": [ + 50, + 204, + 394, + 398 + ], + "spans": [ + { + "bbox": [ + 50, + 204, + 394, + 398 + ], + "type": "image", + "image_path": "a0af2091a3560911c3122fdad21a8a6e74a174a7bfb69e66fec5e5735c5b7729.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 148, + 402, + 296, + 412 + ], + "lines": [ + { + "bbox": [ + 148, + 402, + 296, + 412 + ], + "spans": [ + { + "bbox": [ + 148, + 402, + 296, + 412 + ], + "type": "text", + "content": "(a) Unified 2D-3D lifting for " + }, + { + "bbox": [ + 148, + 402, + 296, + 412 + ], + "type": "inline_equation", + "content": "30+" + }, + { + "bbox": [ + 148, + 402, + 296, + 412 + ], + "type": "text", + "content": " categories." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 414, + 402, + 525, + 412 + ], + "lines": [ + { + "bbox": [ + 414, + 402, + 525, + 412 + ], + "spans": [ + { + "bbox": [ + 414, + 402, + 525, + 412 + ], + "type": "text", + "content": "(b) Dataset diversity visualization." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 402, + 236, + 544, + 370 + ], + "blocks": [ + { + "bbox": [ + 402, + 236, + 544, + 370 + ], + "lines": [ + { + "bbox": [ + 402, + 236, + 544, + 370 + ], + "spans": [ + { + "bbox": [ + 402, + 236, + 544, + 370 + ], + "type": "image", + "image_path": "a0bb2bcd1603376f67a0063ab026a40954d20b0e3bc36cb2befe1938d3dc99e1.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 422, + 547, + 477 + ], + "lines": [ + { + "bbox": [ + 45, + 422, + 547, + 477 + ], + "spans": [ + { + "bbox": [ + 45, + 422, + 547, + 477 + ], + "type": "text", + "content": "Figure 1. Overview: (a) This figure shows the 3D-LFM's ability in lifting 2D landmarks into 3D structures across an array of over 30 diverse categories, from human body parts, to a plethora of animals and everyday common objects. The lower portion shows the actual 3D reconstructions by our model, with red lines representing the ground truth and blue lines showing the 3D-LFM's predictions. (b) This figure displays the model's training data distribution on a logarithmic scale, highlighting that inspite of 3D-LFM being trained on imbalanced datasets, it preserves the performance across individual categories." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 143, + 487, + 192, + 499 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 487, + 192, + 499 + ], + "spans": [ + { + "bbox": [ + 143, + 487, + 192, + 499 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 512, + 290, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 512, + 290, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 512, + 290, + 693 + ], + "type": "text", + "content": "The lifting of a 3D structure and camera from 2D landmarks is at the cornerstone of the discipline of computer vision. Traditional methods have been confined to specific rigid objects, such as those in Perspective-n-Point (PnP) problems, but deep learning has expanded our capability to reconstruct a wide range of object classes (e.g. C3DPO [18] and PAUL [24]) with resilience to noise, occlusions, and perspective distortions. However, all these techniques have been limited by the fundamental need to establish correspondences across the 3D training data, significantly limiting their utility to applications where one has an abundance of \"in-correspondence\" 3D data. Our approach harnesses the inherent permutation equivariance of transformers to manage varying numbers of points per 3D data instance, withstand occlusions, and generalizes" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 488, + 547, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 488, + 547, + 549 + ], + "spans": [ + { + "bbox": [ + 304, + 488, + 547, + 549 + ], + "type": "text", + "content": "to unseen categories. We demonstrate state-of-the-art performance across 2D-3D lifting task benchmarks. Since our approach can be trained across such a broad class of structures, we refer to it simply as a 3D Lifting Foundation Model (3D-LFM) – the first of its kind." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 562, + 387, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 562, + 387, + 574 + ], + "spans": [ + { + "bbox": [ + 306, + 562, + 387, + 574 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 581, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 581, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 581, + 547, + 715 + ], + "type": "text", + "content": "Lifting 2D landmarks from a single-view RGB image into 3D has long posed a complex challenge in the field of computer vision because of the ill-posed nature of the problem. This task is important for a range of applications from augmented reality to robotics, and requires an understanding of non-rigid spatial geometry and accurate object descriptions [2, 11, 25]. Historically, efforts in single-frame 2D-3D lifting have encountered significant hurdles: reliance on object-specific models, poor scalability, and limited adaptability to diverse and complex object categories. Traditional methods, while advancing in specific domains like human" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 700, + 159, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 700, + 159, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 700, + 159, + 713 + ], + "type": "text", + "content": "* Both authors advised equally." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "10466" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "type": "text", + "content": "body [14, 16, 31] or hand modeling [3, 6], often fail when faced with the complexities of varying object types or object rigs (skeleton placements)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 108, + 287, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 108, + 287, + 346 + ], + "spans": [ + { + "bbox": [ + 46, + 108, + 287, + 346 + ], + "type": "text", + "content": "To facilitate such single-frame 2D-3D lifting, deep learning methods like C3DPO [18] and others [8, 11, 24, 25, 28] have recently been developed. However, these methods are fundamentally limited in that they must have knowledge of the object category and how the 2D landmarks correspond semantically to the 2D/3D data it was trained upon. Further, this represents a drawback, especially when considering their scaling up to dozens or even hundreds of object categories, with varying numbers of landmarks and configurations. This paper marks a departure from such correspondence constraints, introducing the 3D Lifting Foundation Model (3D-LFM), an object-agnostic single frame 2D-3D lifting approach. At its core, 3D-LFM addresses the limitation of previous models, which is the inability to efficiently handle a wide array of object categories while maintaining high fidelity in 3D keypoint lifting from 2D data. We propose a solution rooted in the concept of permutation equivariance, a property that allows our model to autonomously establish correspondences among diverse sets of input 2D keypoints." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 347, + 288, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 347, + 288, + 706 + ], + "spans": [ + { + "bbox": [ + 46, + 347, + 288, + 706 + ], + "type": "text", + "content": "3D-LFM is capable of performing single frame 2D-3D lifting for " + }, + { + "bbox": [ + 46, + 347, + 288, + 706 + ], + "type": "inline_equation", + "content": "30+" + }, + { + "bbox": [ + 46, + 347, + 288, + 706 + ], + "type": "text", + "content": " categories using a single model simultaneously, covering everything from human forms [9, 15, 32], face [29], hands [17], and animal species [1, 10, 27], to a plethora of inanimate objects found in everyday scenarios such as cars, furniture, etc. [26]. Importantly, 3D-LFM is inherently scalable, poised to expand to hundreds of categories and improve performance, especially in out-of-distribution or less-represented areas, showcasing its broad utility in 3D lifting tasks. 3D-LFM is able to achieve 2D-3D lifting performance that matches those of leading methods specifically optimized for individual categories. The generalizability of 3D LFM is further evident in its ability to handle out-of-distribution (OOD) object categories and rigs, which we refer to as OOD 2D-3D lifting, where the task is to lift the 2D landmarks to 3D for a category never seen during training. We show such OOD results: (1) for inanimate objects - by holding out an object category within the PASCAL dataset, (2) for animals - by training on common object categories such as dogs and cats found in [27] and reconstructing 3D for unseen and rare species of Cheetahs found in [10] and in-the-wild zoo captures from [5], and (3) by showing rig transfer, i.e. training 2D to 3D lifting on a Human3.6M dataset rig [7] and showing similar 2D to 3D lifting performance on previously unseen rigs such as those found in Panoptic studio dataasset rig [9] or a COCO dataset rig [13]. 3D-LFM transfers learnings from seen data during training to unseen OOD data during inference. It does so by learning general structural features during the training phase via the proposed permutation equivariance properties" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "content": "and specific design choices that we discuss in the following sections." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 96, + 545, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 96, + 545, + 334 + ], + "spans": [ + { + "bbox": [ + 304, + 96, + 545, + 334 + ], + "type": "text", + "content": "Recognizing the important role geometry plays in 3D reconstruction [4, 5, 11, 18, 24, 25], we integrate Procrustean methods such as Orthographic-N-Point (OnP) or Perspective-N-Point (PnP) to direct the model's focus on deformable aspects within a canonical frame. This incorporation significantly reduces the computational burden on the model, freeing it from learning redundant rigid rotations and focusing its capabilities on capturing the true geometric essence of objects. Scalability, a critical aspect of our model, is addressed through the use of tokenized positional encoding (TPE), which, when combined with graph-based transformer architecture, not only enhances the model's adaptability across diverse categories but also strengthens its ability to handle multiple categories with different number of keypoints and configurations. Finally, the use of skeleton information (joint connectivity) within the graph-based transformers via adjacency matrices provides strong clues about joint proximity and inherent connectivity, aiding in the handling of correspondences across varied object categories." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 335, + 545, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 335, + 545, + 430 + ], + "spans": [ + { + "bbox": [ + 304, + 335, + 545, + 430 + ], + "type": "text", + "content": "To the best of our knowledge, 3D-LFM is one of the only known work which is a unified model capable of doing 2D-3D lifting for " + }, + { + "bbox": [ + 304, + 335, + 545, + 430 + ], + "type": "inline_equation", + "content": "30+" + }, + { + "bbox": [ + 304, + 335, + 545, + 430 + ], + "type": "text", + "content": " (and potentially even more) categories simultaneously. Its ability to perform unified learning across a vast spectrum of object categories without specific object information and its handling of OOD scenarios highlight its potential as one of the first models capable of serving as a 2D-3D lifting foundation model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 431, + 487, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 431, + 487, + 442 + ], + "spans": [ + { + "bbox": [ + 306, + 431, + 487, + 442 + ], + "type": "text", + "content": "The contributions of this paper are threefold:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 443, + 545, + 621 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 306, + 443, + 545, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 443, + 545, + 490 + ], + "spans": [ + { + "bbox": [ + 306, + 443, + 545, + 490 + ], + "type": "text", + "content": "1. We propose a Procrustean transformer that is able to focus solely on learning the deformable aspects of objects within a single canonical frame whilst preserving permutation equivariance across 2D landmarks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 491, + 545, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 491, + 545, + 537 + ], + "spans": [ + { + "bbox": [ + 306, + 491, + 545, + 537 + ], + "type": "text", + "content": "2. The integration of tokenized positional encoding within the graph-based transformer, to enhance our approach's scalability and its capacity to handle diverse and imbalanced datasets." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 539, + 545, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 539, + 545, + 621 + ], + "spans": [ + { + "bbox": [ + 306, + 539, + 545, + 621 + ], + "type": "text", + "content": "3. We demonstrate that 3D-LFM surpasses state-of-the-art methods in categories such as humans, hands, and faces (benchmark in [32]). Additionally, it shows robust generalization by handling previously unseen objects and configurations, including animals ([5, 10]), inanimate objects ([26]), and novel object arrangements (rig transfer in [9])" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 623, + 545, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 623, + 545, + 706 + ], + "spans": [ + { + "bbox": [ + 304, + 623, + 545, + 706 + ], + "type": "text", + "content": "In subsequent sections, we explore the design and methodology of our proposed 3D-LFM architecture, including detailed ablation experiments and comparative analyses. Throughout this paper, 'keypoints', 'landmarks', and 'joints' are used interchangeably, referring to specific, identifiable points or locations on an object or figure that are crucial for understanding its structure and geometry." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10467" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 100, + 74, + 495, + 178 + ], + "blocks": [ + { + "bbox": [ + 100, + 74, + 495, + 178 + ], + "lines": [ + { + "bbox": [ + 100, + 74, + 495, + 178 + ], + "spans": [ + { + "bbox": [ + 100, + 74, + 495, + 178 + ], + "type": "image", + "image_path": "a58284baea3895dbf1f68554ecc0f1b300ccd78394e37597d5bc76f25601e114.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 116, + 184, + 477, + 289 + ], + "blocks": [ + { + "bbox": [ + 116, + 184, + 477, + 289 + ], + "lines": [ + { + "bbox": [ + 116, + 184, + 477, + 289 + ], + "spans": [ + { + "bbox": [ + 116, + 184, + 477, + 289 + ], + "type": "image", + "image_path": "fe824da252f65bd6270540aea6dbb12ff9392c0addeca4c929282512092910f2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 303, + 545, + 360 + ], + "lines": [ + { + "bbox": [ + 45, + 303, + 545, + 360 + ], + "spans": [ + { + "bbox": [ + 45, + 303, + 545, + 360 + ], + "type": "text", + "content": "Figure 2. Overview of the 3D Lifting Foundation Model (3D-LFM) architecture: The process begins with the input 2D keypoints undergoing Token Positional Encoding (TPE) before being processed by a series of graph-based transformer layers. The resulting features are then decoded through an MLP into a canonical 3D shape. This shape is aligned to the ground truth (G.T. 3D) in the reference frame using a Procrustean method, with the Mean Squared Error (MSE) loss computed to guide the learning. The architecture captures both local and global contextual information, focusing on deformable structures while minimizing computational complexity." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 379, + 135, + 390 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 379, + 135, + 390 + ], + "spans": [ + { + "bbox": [ + 46, + 379, + 135, + 390 + ], + "type": "text", + "content": "2. Related works" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 398, + 287, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 398, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 45, + 398, + 287, + 685 + ], + "type": "text", + "content": "The field of 2D-3D lifting has evolved substantially from classic works such as those based on Perspective-n-Point (PnP) algorithms [12]. In these early works, the algorithm was given a set of 2D landmarks and some 3D supervision, namely the known 3D rigid object. The field has since witnessed a paradigm shift with the introduction of deep learning methodologies, led by methods such as C3DPO [18], PAUL [24], and Deep NRSfM [11], along with recent transformer-based innovations such as NRSfM-Former [8]. In these approaches one does not need knowledge of the specific 3D object, instead it can get away with just the 2D landmarks and correspondences to an ensemble of 2D/3D data from the object category to be lifted. However, despite their recent success, all these methods still require that the 2D/3D data be in semantic correspondence. That is, the index to a specific landmark has the same semantic meaning across all instances (e.g. chair leg). In practice, this is quite limiting at run-time, as one needs intimate knowledge of the object category, and rig in order to apply any of these current methods. Further, this dramatically limits the ability of these methods to leverage cross-object and cross-rig datasets, prohibiting the construction of a truly generalizable 2D to 3D lifting foundation model – a topic of central focus in this paper." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 686, + 287, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 686, + 287, + 709 + ], + "spans": [ + { + "bbox": [ + 46, + 686, + 287, + 709 + ], + "type": "text", + "content": "Recent literature in pose estimation, loosely connected to NRSfM but often more specialized towards human and" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 380, + 545, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 380, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 304, + 380, + 545, + 498 + ], + "type": "text", + "content": "animal body parts, has also seen remarkable progress. Models such as Jointformer [14] and SimpleBaseline [16] have refined the single-frame 2D-3D lifting process, while generative approaches like MotionCLIP [19] and Human Motion Diffusion Model [20] have laid the groundwork for 3D generative motion-based foundation models. These approaches, however, are even more limiting than C3PDO, PAUL, etc. in that they are intimately wedded to the object class and are not easily extendable to an arbitrary object class." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 511, + 372, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 511, + 372, + 525 + ], + "spans": [ + { + "bbox": [ + 306, + 511, + 372, + 525 + ], + "type": "text", + "content": "3. Approach" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "spans": [ + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "text", + "content": "Given a set of 2D keypoints representing the projection of an object's joints in an image, we denote the keypoints matrix as " + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "inline_equation", + "content": "\\mathbf{W} \\in \\mathbb{R}^{N \\times 2}" + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "text", + "content": " is the predetermined maximum number of joints considered across all object categories. For objects with joints count less than " + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "text", + "content": ", we introduce a masking mechanism that utilizes a binary mask matrix " + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "inline_equation", + "content": "\\mathbf{M} \\in \\{0,1\\}^N" + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "text", + "content": ", where each element " + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "inline_equation", + "content": "m_i" + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 304, + 531, + 545, + 626 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 362, + 637, + 545, + 670 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 362, + 637, + 545, + 670 + ], + "spans": [ + { + "bbox": [ + 362, + 637, + 545, + 670 + ], + "type": "interline_equation", + "content": "m _ {i} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f j o i n t} i \\text {i s p r e s e n t} \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {1}", + "image_path": "bd0e0b4782f502b88a846e38a700ec9871e544cbca866be6eec9e20601e40bc4.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 677, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 714 + ], + "type": "text", + "content": "The 3D lifting function " + }, + { + "bbox": [ + 304, + 677, + 545, + 714 + ], + "type": "inline_equation", + "content": "f: \\mathbb{R}^{N \\times 2} \\to \\mathbb{R}^{N \\times 3}" + }, + { + "bbox": [ + 304, + 677, + 545, + 714 + ], + "type": "text", + "content": " maps the 2D keypoints to their corresponding 3D structure while compensating for the projection:" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10468" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 113, + 83, + 287, + 97 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 83, + 287, + 97 + ], + "spans": [ + { + "bbox": [ + 113, + 83, + 287, + 97 + ], + "type": "interline_equation", + "content": "\\mathbf {S} = f (\\mathbf {W}) = \\mathbf {W} \\mathbf {R} ^ {\\top} + \\mathbf {b} \\tag {2}", + "image_path": "fc1fab190097390cb7555d06c31cca45cc1a3d32f968ea94ba83b8eccefb4449.jpg" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 102, + 287, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 102, + 287, + 137 + ], + "spans": [ + { + "bbox": [ + 47, + 102, + 287, + 137 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 102, + 287, + 137 + ], + "type": "inline_equation", + "content": "\\mathbf{R} \\in \\mathbb{R}^{3 \\times 3}" + }, + { + "bbox": [ + 47, + 102, + 287, + 137 + ], + "type": "text", + "content": " is the projection matrix (assumed either weak-perspective or orthographic) and " + }, + { + "bbox": [ + 47, + 102, + 287, + 137 + ], + "type": "inline_equation", + "content": "\\mathbf{b} \\in \\mathbb{R}^{N \\times 3}" + }, + { + "bbox": [ + 47, + 102, + 287, + 137 + ], + "type": "text", + "content": " is a bias term that aligns the centroids of 2D and 3D keypoints." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 138, + 288, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 138, + 288, + 221 + ], + "spans": [ + { + "bbox": [ + 46, + 138, + 288, + 221 + ], + "type": "text", + "content": "Permutation Equivariance: To ensure scalability and adaptability across a diverse set of objects, we leverage the property of permutation equivariance inherent in transformer architectures. Permutation equivariance allows the model to process input keypoints " + }, + { + "bbox": [ + 46, + 138, + 288, + 221 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 46, + 138, + 288, + 221 + ], + "type": "text", + "content": " regardless of their order, a critical feature for handling objects with varying joint configurations:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 125, + 228, + 209, + 241 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 228, + 209, + 241 + ], + "spans": [ + { + "bbox": [ + 125, + 228, + 209, + 241 + ], + "type": "interline_equation", + "content": "f (\\mathcal {P} \\mathbf {W}) = \\mathcal {P} f (\\mathbf {W})", + "image_path": "60795a3f54d10777396d4a4713ebfe42ab60268e60c55ec26533b8c2288daf75.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 246, + 287, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 246, + 287, + 377 + ], + "spans": [ + { + "bbox": [ + 46, + 246, + 287, + 377 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 246, + 287, + 377 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 46, + 246, + 287, + 377 + ], + "type": "text", + "content": " is a permutation matrix that reorders the keypoints. Handling Missing Data: To address the challenge of missing data, we refer the Deep NRSfM++ [25] work and use a masking mechanism to accommodate for occlusions or absences of keypoints. Our binary mask matrix " + }, + { + "bbox": [ + 46, + 246, + 287, + 377 + ], + "type": "inline_equation", + "content": "\\mathbf{M} \\in \\{0,1\\}^N" + }, + { + "bbox": [ + 46, + 246, + 287, + 377 + ], + "type": "text", + "content": " is applied in such a way that it not only pads the input data to a consistent size but also masks out missing or occluded points: " + }, + { + "bbox": [ + 46, + 246, + 287, + 377 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_m = \\mathbf{W} \\odot \\mathbf{M}" + }, + { + "bbox": [ + 46, + 246, + 287, + 377 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 246, + 287, + 377 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 46, + 246, + 287, + 377 + ], + "type": "text", + "content": " denotes element-wise multiplication. To remove the effects of translation and ensure that our TPE features are generalizable, we zero-center the data by subtracting the mean of the visible keypoints:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 110, + 384, + 287, + 397 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 384, + 287, + 397 + ], + "spans": [ + { + "bbox": [ + 110, + 384, + 287, + 397 + ], + "type": "interline_equation", + "content": "\\mathbf {W} _ {c} = \\mathbf {W} _ {m} - \\operatorname {m e a n} \\left(\\mathbf {W} _ {m}\\right) \\tag {3}", + "image_path": "8ef58b4a071bbe269b3f46edb38d9609cf3305e697896ee5d12694526314204f.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 402, + 287, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 402, + 287, + 462 + ], + "spans": [ + { + "bbox": [ + 46, + 402, + 287, + 462 + ], + "type": "text", + "content": "We scale the zero-centered data to the range " + }, + { + "bbox": [ + 46, + 402, + 287, + 462 + ], + "type": "inline_equation", + "content": "[-1, 1]" + }, + { + "bbox": [ + 46, + 402, + 287, + 462 + ], + "type": "text", + "content": " while preserving the aspect ratio to maintain the geometric integrity of the keypoints. For more details on handling missing data in the presence of perspective effects, we refer the reader to Deep NRSFM++[25]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 462, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 462, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 462, + 287, + 581 + ], + "type": "text", + "content": "Token Positional Encoding: replaces the traditional Correspondence Positional Encoding (CPE) or Joint Embedding which encodes the semantic correspondence information (as used in works such as like [14, 31]) with a mechanism that does not require explicit correspondence or semantic information. Owing to the success of per-point positional embedding, particularly random Fourier features [30] in handling OOD data, we compute Token Positional Encoding (TPE) using analytical Random Fourier features (RFF) as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 57, + 594, + 287, + 633 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 594, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 57, + 594, + 287, + 633 + ], + "type": "interline_equation", + "content": "\\mathbf {T P E} \\left(\\mathbf {W} _ {c}\\right) = \\sqrt {\\frac {2}{D}} \\left[ \\sin \\left(\\mathbf {W} _ {c} \\boldsymbol {\\omega} + b\\right); \\cos \\left(\\mathbf {W} _ {c} \\boldsymbol {\\omega} + b\\right) \\right] \\tag {4}", + "image_path": "29d3eca818fa25f48b91d85c4301af99e068ebd39bc3cd608bb8fc183e41eaa5.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": " is the dimensionality of the Fourier feature space, " + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\pmb{\\omega} \\in \\mathbb{R}^{2 \\times \\frac{D}{2}}" + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{b} \\in \\mathbb{R}^{\\frac{D}{2}}" + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": " are parameters sampled from a normal distribution, scaled appropriately. These parameters are sampled once and kept fixed, as per the RFF methodology. The output of this transformation " + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{TPE}(\\mathbf{W}_c)" + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": " is then fed into the graph-based transformer network as " + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^\\ell" + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": " where" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 547, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 144 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 304, + 72, + 547, + 144 + ], + "type": "text", + "content": " indicates the layer number (0 in the above case). This set of features is now ready for processing inside the graph-based transformer layers without the need for correspondence among the input keypoints. The TPE retains the property of permutation equivariance while implicitly encoding the relative positions of the keypoints." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 152, + 515, + 164 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 152, + 515, + 164 + ], + "spans": [ + { + "bbox": [ + 305, + 152, + 515, + 164 + ], + "type": "text", + "content": "3.1. Graph-based Transformer Architecture" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 169, + 545, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 169, + 545, + 253 + ], + "spans": [ + { + "bbox": [ + 304, + 169, + 545, + 253 + ], + "type": "text", + "content": "Our graph-based transformer architecture utilizes a hybrid approach to feature aggregation by combining graph-based local attention [22](L) with global self-attention mechanisms [21](G) within a single layer (shown as grey block in Fig. 2. This layer is replicated " + }, + { + "bbox": [ + 304, + 169, + 545, + 253 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 169, + 545, + 253 + ], + "type": "text", + "content": " times, providing a sequential refinement of the feature representation across the network's depth." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "spans": [ + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "type": "text", + "content": "Hybrid Feature Aggregation: For each layer " + }, + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "type": "text", + "content": ", ranging from 0 to " + }, + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "type": "text", + "content": ", the feature matrix " + }, + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{(\\ell)} \\in \\mathbb{R}^{N \\times D}" + }, + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "type": "text", + "content": " is augmented through simultaneous local and global processing. The local processing component, " + }, + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "type": "inline_equation", + "content": "\\mathrm{GA}(\\mathbf{X}^{(\\ell)}, \\mathbf{A})" + }, + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "type": "text", + "content": ", leverages an adjacency matrix " + }, + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 304, + 253, + 545, + 337 + ], + "type": "text", + "content": ", which encodes the connectivity based on the object category, to perform graph-based attention on batches of nodes representing the input 2D data:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 381, + 345, + 545, + 376 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 345, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 381, + 345, + 545, + 376 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {L} ^ {(\\ell)} = \\mathrm {G A} (\\mathbf {X} ^ {(\\ell)}, \\mathbf {A}), \\\\ \\mathbf {G} ^ {(\\ell)} = \\operatorname {M H S A} (\\mathbf {X} ^ {(\\ell)}) \\tag {5} \\\\ \\end{array}", + "image_path": "023bf9de8dd8741b0d08a9db60914de155b6e4a34b089573a516ef1b5bfe5d5d.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 384, + 545, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 384, + 545, + 408 + ], + "spans": [ + { + "bbox": [ + 305, + 384, + 545, + 408 + ], + "type": "text", + "content": "Local and global features are concatenated to form a unified representation " + }, + { + "bbox": [ + 305, + 384, + 545, + 408 + ], + "type": "inline_equation", + "content": "\\mathbf{U}^{(\\ell)}" + }, + { + "bbox": [ + 305, + 384, + 545, + 408 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 366, + 418, + 545, + 434 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 366, + 418, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 366, + 418, + 545, + 434 + ], + "type": "interline_equation", + "content": "\\mathbf {U} ^ {(\\ell)} = \\operatorname {c o n c a t} \\left(\\mathbf {L} ^ {(\\ell)}, \\mathbf {G} ^ {(\\ell)}\\right) \\tag {6}", + "image_path": "3a54851f30e7f6425d1803053bc89e8c077f6c66815312a047ad3951f6ac687f.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 438, + 545, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 438, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 304, + 438, + 545, + 487 + ], + "type": "text", + "content": "Following the concatenation, each layer applies a normalization(LN) and a multilayer perceptron (MLP). The MLP employs a Gaussian Error Linear Unit (GeLU) as the nonlinearity function to enhance the model's expressive power" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 340, + 504, + 545, + 536 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 504, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 340, + 504, + 545, + 536 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {X} ^ {\\prime (\\ell)} = \\mathrm {L N} \\left(\\mathbf {U} ^ {(\\ell)}\\right) + \\mathbf {U} ^ {(\\ell)}, \\\\ \\mathbf {X} ^ {(\\ell + 1)} = \\operatorname {L N} \\left(\\operatorname {M L P} _ {-} \\operatorname {G e L U} \\left(\\mathbf {X} ^ {\\prime (\\ell)}\\right)\\right) + \\mathbf {X} ^ {\\prime (\\ell)} \\tag {7} \\\\ \\end{array}", + "image_path": "2de914ba9b64b6aff80084271f7ba1402b2af66a9f44c88184b2def26f2bcf75.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 544, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 544, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 544, + 545, + 628 + ], + "type": "text", + "content": "Here, GA represents Graph Attention, MHSA denotes Multi-Head Self-Attention, and MLP_GeLU indicates our MLP with GeLU nonlinearity. This architecture is designed to learn patterns in 2D data by considering both the local neighborhood connectivity of input 2D and the global data context of input 2D, which is important for robust 2D to 3D structure lifting." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 635, + 438, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 635, + 438, + 647 + ], + "spans": [ + { + "bbox": [ + 306, + 635, + 438, + 647 + ], + "type": "text", + "content": "3.2. Procrustean Alignment" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 654, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 689 + ], + "type": "text", + "content": "The final operation in our pipeline decodes the latent feature representation " + }, + { + "bbox": [ + 304, + 654, + 545, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{(L)}" + }, + { + "bbox": [ + 304, + 654, + 545, + 689 + ], + "type": "text", + "content": " into the predicted canonical structure " + }, + { + "bbox": [ + 304, + 654, + 545, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_c" + }, + { + "bbox": [ + 304, + 654, + 545, + 689 + ], + "type": "text", + "content": " via a GeLU-activated MLP:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 367, + 700, + 483, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 700, + 483, + 714 + ], + "spans": [ + { + "bbox": [ + 367, + 700, + 483, + 714 + ], + "type": "interline_equation", + "content": "\\mathbf {S} _ {c} = \\operatorname {M L P} _ {\\text {s h a p e . d e c o d e r}} (\\mathbf {X} ^ {(L)})", + "image_path": "73b412403fd69121f927e9708c333b1dcc8d5bd687b39f032268b77cc0af55f9.jpg" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10469" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "Subsequently, we align " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_c" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " with the ground truth " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_r" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": ", via a Procrustean alignment method that optimizes for the rotation matrix " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": ". The alignment is formalized as a minimization problem:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 96, + 131, + 237, + 149 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 131, + 237, + 149 + ], + "spans": [ + { + "bbox": [ + 96, + 131, + 237, + 149 + ], + "type": "interline_equation", + "content": "\\underset {\\mathbf {R}} {\\text {m i n i m i z e}} \\quad | | \\mathbf {M} \\odot (\\mathbf {S} _ {r} - \\mathbf {S} _ {c} \\mathbf {R}) | | _ {F} ^ {2}", + "image_path": "45fccde0e4420066735d2b513e0fed996d3cc7c97f3c34a5c2e2ec712d5732d2.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 154, + 287, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 154, + 287, + 201 + ], + "spans": [ + { + "bbox": [ + 46, + 154, + 287, + 201 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 154, + 287, + 201 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 46, + 154, + 287, + 201 + ], + "type": "text", + "content": " is a binary mask applied element-wise, and " + }, + { + "bbox": [ + 46, + 154, + 287, + 201 + ], + "type": "inline_equation", + "content": "||\\cdot ||_F" + }, + { + "bbox": [ + 46, + 154, + 287, + 201 + ], + "type": "text", + "content": " denotes the Frobenius norm. The optimal " + }, + { + "bbox": [ + 46, + 154, + 287, + 201 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 46, + 154, + 287, + 201 + ], + "type": "text", + "content": " is obtained via SVD, which ensures the orthonormality constraint of the rotation matrix:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 64, + 222, + 270, + 237 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 222, + 270, + 237 + ], + "spans": [ + { + "bbox": [ + 64, + 222, + 270, + 237 + ], + "type": "interline_equation", + "content": "\\mathbf {U}, \\boldsymbol {\\Sigma}, \\mathbf {V} ^ {\\top} = \\operatorname {S V D} ((\\mathbf {M} \\odot \\mathbf {S} _ {c}) ^ {\\top} \\mathbf {S} _ {r}), \\quad \\mathbf {R} = \\mathbf {U V} ^ {\\top}", + "image_path": "db22c7863bfc91fe41bfba3f1bee3ae68614563bfdedd02f8f5ff5c0dadc5f0f.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 246, + 287, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 246, + 287, + 282 + ], + "spans": [ + { + "bbox": [ + 46, + 246, + 287, + 282 + ], + "type": "text", + "content": "The predicted shape is then scaled relative to the reference shape " + }, + { + "bbox": [ + 46, + 246, + 287, + 282 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_r" + }, + { + "bbox": [ + 46, + 246, + 287, + 282 + ], + "type": "text", + "content": ", resulting in a scale factor " + }, + { + "bbox": [ + 46, + 246, + 287, + 282 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 246, + 287, + 282 + ], + "type": "text", + "content": ", which yields the final predicted shape " + }, + { + "bbox": [ + 46, + 246, + 287, + 282 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_p" + }, + { + "bbox": [ + 46, + 246, + 287, + 282 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 293, + 200, + 307 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 293, + 200, + 307 + ], + "spans": [ + { + "bbox": [ + 132, + 293, + 200, + 307 + ], + "type": "interline_equation", + "content": "\\mathbf {S} _ {p} = \\boldsymbol {\\gamma} \\cdot (\\mathbf {S} _ {c} \\mathbf {R})", + "image_path": "a4c5cff89adc4d1087c2bbf5e880d04ef6c963e47116c0cef7916b1136a2e8a6.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 312, + 287, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 312, + 287, + 419 + ], + "spans": [ + { + "bbox": [ + 46, + 312, + 287, + 419 + ], + "type": "text", + "content": "This Procrustean alignment step is crucial for directing the model's focus on learning non-rigid shape deformations over rigid body dynamics, thus significantly enhancing the model's ability to capture the true geometric essence of objects by just focusing on core deformable (non-rigid) aspects. The effectiveness of this approach is confirmed by faster convergence and reduced error rates in our experiments, as detailed in Fig. 6. These findings align with the findings presented in PAUL [24]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 427, + 135, + 438 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 427, + 135, + 438 + ], + "spans": [ + { + "bbox": [ + 47, + 427, + 135, + 438 + ], + "type": "text", + "content": "3.3. Loss Function" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 445, + 287, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 445, + 287, + 482 + ], + "spans": [ + { + "bbox": [ + 46, + 445, + 287, + 482 + ], + "type": "text", + "content": "The optimization of our model relies on the Mean Squared Error (MSE) loss, which calculates the difference between predicted 3D points " + }, + { + "bbox": [ + 46, + 445, + 287, + 482 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_p" + }, + { + "bbox": [ + 46, + 445, + 287, + 482 + ], + "type": "text", + "content": " and the ground truth " + }, + { + "bbox": [ + 46, + 445, + 287, + 482 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_r" + }, + { + "bbox": [ + 46, + 445, + 287, + 482 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 490, + 287, + 522 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 287, + 522 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 287, + 522 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {M S E}} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\| \\mathbf {S} _ {p} ^ {(i)} - \\mathbf {S} _ {r} ^ {(i)} \\right\\| ^ {2} \\tag {8}", + "image_path": "8dc1f322786ace093a0a8d13769777ea91f9700264846838c1cf4d4498a48434.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 527, + 287, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 527, + 287, + 599 + ], + "spans": [ + { + "bbox": [ + 46, + 527, + 287, + 599 + ], + "type": "text", + "content": "Minimizing this loss across " + }, + { + "bbox": [ + 46, + 527, + 287, + 599 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 527, + 287, + 599 + ], + "type": "text", + "content": " points ensures the model's ability in reconstructing accurate 3D shapes from input 2D landmarks. This minimization effectively calibrates the shape decoder and the Procrustean alignment to focus on the essential non-rigid characteristics of the objects, helping the accuracy of the 2D to 3D lifting process." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 609, + 237, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 609, + 237, + 624 + ], + "spans": [ + { + "bbox": [ + 46, + 609, + 237, + 624 + ], + "type": "text", + "content": "4. Results and Comparative Analysis" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": "Our evaluation shows the 3D Lifting Foundation Model (3D-LFM)'s capability in single-frame 2D-3D lifting across diverse object categories without object-specific data in Sec. 4.1. Following that, Sec. 4.2 highlights 3D-LFM's performance over specialized methods, especially achieving state-of-the-art performance in whole-body benchmarks[32] (Fig. 4). Additionally, Sec. 4.3 shows" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": "3D-LFM's capability in 2D-3D lifting across 30 categories using a single unified model, enhancing category-specific performance and achieving out-of-distribution (OOD) generalization for unseen object configurations during training. In conclusion, the ablation studies in Section 4.4 validate our proposed procrustean approach, token positional encoding, and the local-global hybrid attention mechanism in the transformer model, confirming their role in 3D-LFM's effectiveness in both single- and multiple-object scenarios." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 228, + 480, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 228, + 480, + 241 + ], + "spans": [ + { + "bbox": [ + 305, + 228, + 480, + 241 + ], + "type": "text", + "content": "4.1. Multi-Object 3D Reconstruction" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 247, + 545, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 247, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 304, + 247, + 545, + 331 + ], + "type": "text", + "content": "Clarifying naming convention: In 'object-specific' versus 'object-agnostic', our primary focus in this naming is on the distinction in training methods. Here, object-specific training involves supplying semantic details for each object, leading to isolated training. Conversely, object-agnostic training combines various categories without explicit landmark semantics, leading to combined training." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 331, + 545, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 331, + 545, + 450 + ], + "spans": [ + { + "bbox": [ + 304, + 331, + 545, + 450 + ], + "type": "text", + "content": "Experiment Rationale: 3D-LFM leverages permutation equivariance to accurately lift 2D keypoints into 3D structures across diverse categories, outperforming fixed-array methods by adapting flexibly to variable keypoint configurations. It has been evaluated against non-rigid structure-from-motion approaches [11, 18, 24, 25] that require object-specific inputs, showing its ability to handle diverse categories. For a comprehensive benchmark, we utilize the PASCAL3D+ dataset [26], following C3DPO's [18] methodology, to include a variety of object categories." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 451, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 451, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 451, + 545, + 712 + ], + "type": "text", + "content": "Performance: We benchmark 3D-LFM against the notable NRSfM method, C3DPO [18], for multi-object 2D to 3D lifting with 3D supervision. C3DPO, similar to other contemporary methods [11, 24, 25, 28] requiring object-specific details, serves as an apt comparison due to its multi-category approach. Initially replicating conditions with object-specific information, 3D-LFM matches C3DPO's performance, as demonstrated in Fig. 3. This stage uses MPJPE to measure 3D lifting accuracy, with C3DPO's training setup including an " + }, + { + "bbox": [ + 304, + 451, + 545, + 712 + ], + "type": "inline_equation", + "content": "MN" + }, + { + "bbox": [ + 304, + 451, + 545, + 712 + ], + "type": "text", + "content": " dimensional array for object details where " + }, + { + "bbox": [ + 304, + 451, + 545, + 712 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 451, + 545, + 712 + ], + "type": "text", + "content": " represents number of objects with " + }, + { + "bbox": [ + 304, + 451, + 545, + 712 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 451, + 545, + 712 + ], + "type": "text", + "content": " being maximum number of keypoints, and our model is trained separately on each object to avoid providing object-specific information. The 3D-LFM's strength emerges when object-specific data is withheld. While C3DPO shows a decline without such data, 3D-LFM maintains a lower MPJPE across categories, even when trained collectively across categories using only an " + }, + { + "bbox": [ + 304, + 451, + 545, + 712 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 451, + 545, + 712 + ], + "type": "text", + "content": " dimensional array. These findings (Fig. 3) highlights 3D-LFM's capabilities, outperforming single-category training and demonstrating its potential as a generalized 2D to 3D lifting solution." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10470" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 70, + 284, + 216 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 284, + 216 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 284, + 216 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 284, + 216 + ], + "type": "image", + "image_path": "1b39639aa55d9a08205ddba20f9f5b93d226a5c41f43b59e0ae37b4613d61f62.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 227, + 287, + 304 + ], + "lines": [ + { + "bbox": [ + 46, + 227, + 287, + 304 + ], + "spans": [ + { + "bbox": [ + 46, + 227, + 287, + 304 + ], + "type": "text", + "content": "Figure 3. 3D-LFM vs. C3DPO Performance: MPJPE comparisons using the PASCAL3D+ dataset, this figure demonstrates our model's adaptability in the absence of object-specific information, contrasting with C3DPO's increased error under the same conditions. The analysis confirms 3D-LFM's superiority across diverse object categories, reinforcing its potential for generalized 2D to 3D lifting." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 47, + 374, + 287, + 456 + ], + "blocks": [ + { + "bbox": [ + 46, + 331, + 287, + 366 + ], + "lines": [ + { + "bbox": [ + 46, + 331, + 287, + 366 + ], + "spans": [ + { + "bbox": [ + 46, + 331, + 287, + 366 + ], + "type": "text", + "content": "Table 1. Quantitative performance on H3WB: Our method demonstrates leading performance across multiple object categories without the need for object-specific designs." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 374, + 287, + 456 + ], + "lines": [ + { + "bbox": [ + 47, + 374, + 287, + 456 + ], + "spans": [ + { + "bbox": [ + 47, + 374, + 287, + 456 + ], + "type": "table", + "html": "
MethodWhole-bodyBodyFace/AlignedHand/Aligned
SimpleBaseline125.4125.7115.9 / 24.6140.7 / 42.5
CanonPose w/3D sv.117.7117.5112.0 / 17.9126.9 / 38.3
Large SimpleBaseline112.3112.6110.6 / 14.6114.8 / 31.7
Jointformer (extra data)81.57860.4 / 16.2117.6 / 38.8
Jointformer88.384.966.5 / 17.8125.3 / 43.7
Ours64.1360.8356.55 / 10.4478.21 / 28.22
Ours – PA33.1339.366.0213.56
", + "image_path": "10fe15f317e7bbc850283cacd33d50076523f63c805a439028b13cdea54489fd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 491, + 241, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 491, + 241, + 504 + ], + "spans": [ + { + "bbox": [ + 46, + 491, + 241, + 504 + ], + "type": "text", + "content": "4.2. Benchmark: Object-Specific Models" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 510, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 510, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 45, + 510, + 287, + 715 + ], + "type": "text", + "content": "Next, we benchmark 3D-LFM against leading specialized methods for human body, face, and hands categories. Our model outperforms these specialized methods, showing multi-category learning without the need for category (landmark) semantics. For this study, we evaluate on H3WB dataset [32], a recent benchmark for diverse whole-body pose estimation tasks. This dataset is valuable for its inclusion of multiple object categories and for providing a comparative baseline against methods such as Jointformer [14], SimpleBaseline [16], and CanonPose [23]. Following H3WB's recommended 5-fold cross-validation and submitting the evaluations to benchmark's authors, we report results on the hidden test set. The results shown in Fig. 4 and Table 1 include PA-MPJPE and MPJPE, with test set performance numbers provided directly by the H3WB team, ensuring that our results are verified by an independent third-party." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 309, + 73, + 542, + 208 + ], + "blocks": [ + { + "bbox": [ + 309, + 73, + 542, + 208 + ], + "lines": [ + { + "bbox": [ + 309, + 73, + 542, + 208 + ], + "spans": [ + { + "bbox": [ + 309, + 73, + 542, + 208 + ], + "type": "image", + "image_path": "b43f2c5b144c87bfb124cc4c794454de53e59e397131c8933800fc05d04661ba.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 218, + 545, + 295 + ], + "lines": [ + { + "bbox": [ + 304, + 218, + 545, + 295 + ], + "spans": [ + { + "bbox": [ + 304, + 218, + 545, + 295 + ], + "type": "text", + "content": "Figure 4. Performance Comparison on H3WB Benchmark: This chart contrasts MPJPE errors for whole-body, body, face, aligned face, hand, and aligned hand categories within the H3WB benchmark [32]. Our models, with and without Procrustes Alignment (Ours-PA), outperform current state-of-the-art (SOTA) methods, validating our approach's proficiency in 2D to 3D lifting tasks." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 346, + 454, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 346, + 454, + 357 + ], + "spans": [ + { + "bbox": [ + 306, + 346, + 454, + 357 + ], + "type": "text", + "content": "4.3. Towards foundation model" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 363, + 545, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 363, + 545, + 483 + ], + "spans": [ + { + "bbox": [ + 304, + 363, + 545, + 483 + ], + "type": "text", + "content": "In this section, we highlight 3D-LFM's role as a foundational model for varied 2D-3D lifting, capable in managing multiple object types and data imbalances. In this subsection, we explore 3D-LFM's scalability for collective dataset training (Sec.4.3.1), its generalization to new categories and rig transfer capabilities (Sec.4.3.2). These studies validate the 3D-LFM's role as a foundation model, capable at leveraging diverse data without requiring specific configurations, thus simplifying the 3D lifting process for varied joint setups." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 483, + 545, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 545, + 604 + ], + "type": "text", + "content": "We start this investigation by showing the capability 3D-LFM in handling 2D-3D lifting for " + }, + { + "bbox": [ + 304, + 483, + 545, + 604 + ], + "type": "inline_equation", + "content": "30+" + }, + { + "bbox": [ + 304, + 483, + 545, + 604 + ], + "type": "text", + "content": " object categories within the single model, confirming the model's capability to manage imbalanced datasets representative of real-world scenarios as shown in Fig. 1. With a comprehensive range of human, hand, face, inanimate objects, and animal datasets, the 3D-LFM is proven to be scalable, without requiring category-specific adjustments. The subsequent subsections will dissect these attributes further, discussing the 3D-LFM's foundational potential in the 3D lifting domain." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 610, + 458, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 610, + 458, + 623 + ], + "spans": [ + { + "bbox": [ + 306, + 610, + 458, + 623 + ], + "type": "text", + "content": "4.3.1 Combined Dataset Training" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "content": "This study evaluates the 3D-LFM's performance on isolated datasets against its performance on a combined dataset. Initially, the model was trained separately on animal-based supercategory datasets: specifically OpenMonkey[1] and Animals3D[27]. Subsequently, it was trained on a merged dataset containing a broad spectrum of object categories, including Human Body-Based datasets such as AMASS [15]" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10471" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 75, + 91, + 257, + 152 + ], + "blocks": [ + { + "bbox": [ + 72, + 71, + 261, + 81 + ], + "lines": [ + { + "bbox": [ + 72, + 71, + 261, + 81 + ], + "spans": [ + { + "bbox": [ + 72, + 71, + 261, + 81 + ], + "type": "text", + "content": "Table 2. Quantitative evaluation for OOD scenarios." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 75, + 91, + 257, + 152 + ], + "lines": [ + { + "bbox": [ + 75, + 91, + 257, + 152 + ], + "spans": [ + { + "bbox": [ + 75, + 91, + 257, + 152 + ], + "type": "table", + "html": "
CategoryOOD (mm)In-Dist. (mm)
Cheetah26.5910.16
Train6.885.71
Chimpanzee52.0542.65
", + "image_path": "493d113d247da95fd6c162af320da3f6562d007a8a6162f4775ca6f5317c1938.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 173, + 287, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 173, + 287, + 303 + ], + "spans": [ + { + "bbox": [ + 46, + 173, + 287, + 303 + ], + "type": "text", + "content": "and Human 3.6 [7], Hand-Based datasets such as PanOptic Hands [9], Face-Based datasets like BP4D+[29], and various Inanimate Objects from the PASCAL3D+ dataset[26], along with previously mentioned animal datasets. Isolated training resulted in an average MPJPE of " + }, + { + "bbox": [ + 46, + 173, + 287, + 303 + ], + "type": "inline_equation", + "content": "21.22 \\, \\text{mm}" + }, + { + "bbox": [ + 46, + 173, + 287, + 303 + ], + "type": "text", + "content": ", while the combined training method significantly reduced MPJPE to " + }, + { + "bbox": [ + 46, + 173, + 287, + 303 + ], + "type": "inline_equation", + "content": "12.5 \\, \\text{mm}" + }, + { + "bbox": [ + 46, + 173, + 287, + 303 + ], + "type": "text", + "content": " on the same animal supercategory validation split. This improvement confirms the potential of 3D-LFM as a pre-training framework and underscores its ability to adapt and generalize from diverse and extensive data collections." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 304, + 287, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 304, + 287, + 448 + ], + "spans": [ + { + "bbox": [ + 46, + 304, + 287, + 448 + ], + "type": "text", + "content": "Dataset Selection Rationale: We selected animal-based supercategory datasets to demonstrate combined training's impact on underrepresented categories. We observed greater performance improvements in smaller, unbalanced datasets (as exemplified by PASCAL3D+: from 4.31 mm to 1.1 mm and OpenMonkey: from 19.45 mm to 9.59 mm) compared to larger datasets with sufficient balance among categories. Consequently, we see minimal gains in more balanced, larger datasets like AMASS (from 1.67 mm to 1.66 mm), underscoring the utility of combined training for enhancing performance in underrepresented and long-tail categories." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 455, + 239, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 455, + 239, + 467 + ], + "spans": [ + { + "bbox": [ + 47, + 455, + 239, + 467 + ], + "type": "text", + "content": "4.3.2 OOD generalization and rig-transfer:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 474, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 287, + 605 + ], + "type": "text", + "content": "We evaluate 3D-LFM's generalization to unseen object categories and rig configurations. Its accuracy is highlighted by successful 2D-3D lifting reconstructions of the \"Cheetah\" from Acinoset [10], which is not included in the typical Animal3D dataset [27], and the \"Train\" category from PASCAL3D+[26], absent during training. Qualitative reconstructions are shown in Fig. 5, along with the quantitative results in Tab.2 for above categories as well as in-the-wild category like a Chimpanzee from the MBW dataset [5] - which illustrates model's strong OOD generalization and capability to handle in-the-wild data." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": "Additionally, we show 3D-LFM's capability in transferring rig configurations between datasets, embodying the concept of generic geometry learning. By training on a 17-joint Human3.6M dataset [7] and testing on a 15-joint Panoptic Studio setup [9], our model gives accurate 3D reconstructions despite variations in joint arrangements. This capability is particularly interesting for its efficiency in utilizing data from multiple rigs of the same object, and underscores the model's adaptability, a cornerstone in pro" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 310, + 82, + 557, + 205 + ], + "blocks": [ + { + "bbox": [ + 310, + 82, + 557, + 205 + ], + "lines": [ + { + "bbox": [ + 310, + 82, + 557, + 205 + ], + "spans": [ + { + "bbox": [ + 310, + 82, + 557, + 205 + ], + "type": "image", + "image_path": "6a0d2e8953ff1370868413af3bf215fb09ab8155ed5eb35f8361ceb6e70f669a.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 220, + 545, + 308 + ], + "lines": [ + { + "bbox": [ + 304, + 220, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 304, + 220, + 545, + 308 + ], + "type": "text", + "content": "Figure 5. Generalization to unseen data: Figure showing 3D-LFM's proficiency in OOD 2D-3D lifting, effectively handling new, unseen categories, and rig generalization from Acinoset [10] PASCAL3D+ [26], and Panoptic studio [9] with varying joint arrangements in top row. The bottom row presents in-the-wild data from the MBW dataset [5], with red dots indicating input keypoints and blue stick figures showing the model's 3D predictions from different angles." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 329, + 545, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 329, + 545, + 413 + ], + "spans": [ + { + "bbox": [ + 304, + 329, + 545, + 413 + ], + "type": "text", + "content": "cessing diverse human datasets. It aligns with the broader community's interest in versatile geometry learning, which makes these findings especially compelling. For a more thorough validation, we direct the reader to the ablation section, where qualitative visuals (Fig. 7) and quantitative analysis (Sec. 4.4.3) further highlight 3D-LFM's OOD generalization and rig transfer efficacy." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 421, + 369, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 421, + 369, + 431 + ], + "spans": [ + { + "bbox": [ + 306, + 421, + 369, + 431 + ], + "type": "text", + "content": "4.4. Ablation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 439, + 545, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 439, + 545, + 546 + ], + "spans": [ + { + "bbox": [ + 304, + 439, + 545, + 546 + ], + "type": "text", + "content": "In our ablation studies, we evaluate the 3D-LFM's design elements and their individual contributions to its performance. Detailed experiments on the Human3.6M benchmark [7] and a blend of other datasets including Animal3D [27] and facial datasets [9, 29] were carried out to ablate the role of Procrustean transformation, hybrid attention mechanisms, and tokenized positional encoding (TPE) in enabling the model's scalability and out-of-distribution (OOD) generalization." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 552, + 460, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 552, + 460, + 562 + ], + "spans": [ + { + "bbox": [ + 306, + 552, + 460, + 562 + ], + "type": "text", + "content": "4.4.1 Procrustean Transformation" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 570, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 712 + ], + "type": "text", + "content": "3D-LFM's fusion of the procrustean approach, a first in transformer-based lifting frameworks, concentrates on deformable object components, as outlined in Sec.3.2. By focusing on shape within a standard canonical reference frame and avoiding rigid body transformations, we see faster learning and a decreased MPJPE, as evident by the gap between blue and orange lines in Fig. 6 (a) suggests. This fusion is crucial for learning 3D deformations, while utilizing transformers' equivariance. These findings suggest that even for transformers, avoiding rigid transformations' learning aids convergence, most notably with imbalanced datasets." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10472" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 72, + 165, + 158 + ], + "blocks": [ + { + "bbox": [ + 49, + 72, + 165, + 158 + ], + "lines": [ + { + "bbox": [ + 49, + 72, + 165, + 158 + ], + "spans": [ + { + "bbox": [ + 49, + 72, + 165, + 158 + ], + "type": "image", + "image_path": "6fac01a11b6aeb1fd8a0508795cfe84e2bc854407acdaefe15610bfbc38912d3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 170, + 287, + 226 + ], + "lines": [ + { + "bbox": [ + 46, + 170, + 287, + 226 + ], + "spans": [ + { + "bbox": [ + 46, + 170, + 287, + 226 + ], + "type": "text", + "content": "Figure 6. (a) Comparing attention strategies in 3D-LFM. The combined local-global approach with procrustean alignment surpasses other configurations in MPJPE reduction over 100 epochs on the Human3.6M validation split. (b) rapid convergence and efficiency of the TPE approach compared to the learnable MLP" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 167, + 71, + 284, + 159 + ], + "blocks": [ + { + "bbox": [ + 167, + 71, + 284, + 159 + ], + "lines": [ + { + "bbox": [ + 167, + 71, + 284, + 159 + ], + "spans": [ + { + "bbox": [ + 167, + 71, + 284, + 159 + ], + "type": "image", + "image_path": "ceac275bcf83e9bd7c4ff7657041a1462ccc14eb5cc983d6881f2cf0024272c1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 48, + 258, + 287, + 336 + ], + "blocks": [ + { + "bbox": [ + 49, + 238, + 284, + 249 + ], + "lines": [ + { + "bbox": [ + 49, + 238, + 284, + 249 + ], + "spans": [ + { + "bbox": [ + 49, + 238, + 284, + 249 + ], + "type": "text", + "content": "Table 3. Impact of TPE on Data Imbalance and Rig Transfer" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 258, + 287, + 336 + ], + "lines": [ + { + "bbox": [ + 48, + 258, + 287, + 336 + ], + "spans": [ + { + "bbox": [ + 48, + 258, + 287, + 336 + ], + "type": "table", + "html": "
StudyExperimentModel SizeImprovement (%)
Data ImbalanceUnderrepr. category (Hippo) [27]1283.27
51212.28
102422.02
Rig Transfer17 [7]- to 15 [9]-jointN/A12
15 [9]- to 17 [7]-joint23.29
52 [9]- to 83 [29]-joint52.3
", + "image_path": "93481d0c79429a24accb03e08ebaea0986c2ded41ca179c42bbda33734330fa9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 357, + 227, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 357, + 227, + 369 + ], + "spans": [ + { + "bbox": [ + 47, + 357, + 227, + 369 + ], + "type": "text", + "content": "4.4.2 Local-Global vs. Hybrid Attention" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 376, + 287, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 376, + 287, + 496 + ], + "spans": [ + { + "bbox": [ + 46, + 376, + 287, + 496 + ], + "type": "text", + "content": "In evaluating 3D-LFM's attention strategies, our analysis on the same validation split as above demonstrates the superiority of a hybrid approach combining local (GA) and global (MHSA) attention mechanisms. This integration, particularly when complemented by Procrustean (OnP) alignment, significantly enhances performance and accelerates convergence, as evidenced in Fig. 6 (a). The distinct advantage of this hybrid system validates our architectural choices, showcasing its efficiency in reducing MPJPE errors and refining model training dynamics." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 502, + 214, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 502, + 214, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 502, + 214, + 514 + ], + "type": "text", + "content": "4.4.3 Tokenized Positional Encoding:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 521, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 521, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 521, + 287, + 714 + ], + "type": "text", + "content": "This ablation study assesses the impact of Tokenized Positional Encoding (TPE), which uses analytical Random Fourier Features for encoding positional information. This study examines TPE's influence on model performance in scenarios of data imbalance and rig transfer generalization. Data imbalance study: When tested on the underrepresented hippo category from the Animal3D dataset [27], TPE based model showed a " + }, + { + "bbox": [ + 46, + 521, + 287, + 714 + ], + "type": "inline_equation", + "content": "3.27\\%" + }, + { + "bbox": [ + 46, + 521, + 287, + 714 + ], + "type": "text", + "content": " improvement in MPJPE over the baseline MLP with a 128-dimensional model performance as evident in first row of Tab. 3. This improvement grew with the model size. These results highlight TPE's scalability and its faster convergence, especially relevant in imbalanced, OOD scenarios as detailed in Fig. 6 (b). The observed performance boosts suggest that TPE's analytical nature might be more suited to adapting to novel data distributions. Increasing model size amplifies TPE's benefits," + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 312, + 77, + 533, + 196 + ], + "blocks": [ + { + "bbox": [ + 312, + 77, + 533, + 196 + ], + "lines": [ + { + "bbox": [ + 312, + 77, + 533, + 196 + ], + "spans": [ + { + "bbox": [ + 312, + 77, + 533, + 196 + ], + "type": "image", + "image_path": "c5d67503874aeacdc82b0cf2bc01e81d58acfb7b55c19e3d744ca903ab946d39.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 216, + 545, + 260 + ], + "lines": [ + { + "bbox": [ + 305, + 216, + 545, + 260 + ], + "spans": [ + { + "bbox": [ + 305, + 216, + 545, + 260 + ], + "type": "text", + "content": "Figure 7. The qualitative improvement in rig transfer using analytical TPE versus learnable MLP projection. This visualization reinforces the necessity of TPE in handling OOD data such as different rigs, unseen during training." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 280, + 545, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 280, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 304, + 280, + 545, + 316 + ], + "type": "text", + "content": "hinting that its fixed analytical approach more adeptly handles OOD intricacies compared to learnable methods like MLPs, which may falter in such situations." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 316, + 546, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 546, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 546, + 437 + ], + "type": "text", + "content": "Rig transfer study: Our rig transfer analysis, summarized in Table 3, showcases TPE's adaptability and effectiveness over the MLP baseline across different joint configurations and rig scenarios, with improvements up to " + }, + { + "bbox": [ + 304, + 316, + 546, + 437 + ], + "type": "inline_equation", + "content": "52.3\\%" + }, + { + "bbox": [ + 304, + 316, + 546, + 437 + ], + "type": "text", + "content": ". These findings, particularly the significant performance boost in complex rig transfers, underscore TPE's robustness in OOD contexts. Figure 7 visually highlights the qualitative differences between TPE and MLP approaches in a rig transfer scenario, where the model trained on a 17-joint [7] configuration is tested on a 15 joint [9] setup." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 446, + 457, + 458 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 446, + 457, + 458 + ], + "spans": [ + { + "bbox": [ + 306, + 446, + 457, + 458 + ], + "type": "text", + "content": "5. Discussion and Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 466, + 545, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 466, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 304, + 466, + 545, + 668 + ], + "type": "text", + "content": "The proposed 3D-LFM marks a significant leap in 2D-3D lifting, showcasing scalability and adaptability, addressing data imbalance, and generalizing to new data categories. Its cross-category knowledge transfer requires further investigation and handling of inputs with different perspectives could act as potential limitations. 3D-LFM's efficiency is demonstrated by achieving results comparable to leading methods on [32] benchmark as well as its proficiency in out-of-distribution (OOD) scenarios on limited computational resources. For training duration and computational details, please refer to the supplementary materials. This work establishes a baseline framework for future 3D pose estimation and 3D reconstruction models. In summary, the 3D-LFM creates a universally applicable model for 3D reconstruction from 2D data, paving the way for diverse applications that requires accurate 3D reconstructions from 2D inputs." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 669, + 545, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 669, + 545, + 703 + ], + "spans": [ + { + "bbox": [ + 306, + 669, + 545, + 703 + ], + "type": "text", + "content": "Acknowledgement: We extend our gratitude to Ian R. Fasel, Tim Clifford, Javier Movellan, and Matthias Hernandez of Apple for their insightful discussions." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10473" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Praneet C Bala, Benjamin R Eisenreich, Seng Bum Michael Yoo, Benjamin Y Hayden, Hyun Soo Park, and Jan Zimmermann. Openmonkeystudio: Automated markerless pose estimation in freely moving macaques. BioRxiv, pages 2020-01, 2020. 2, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 147, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 147, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 147, + 288, + 201 + ], + "type": "text", + "content": "[2] Christoph Bregler, Aaron Hertzmann, and Henning Biermann. Recovering non-rigid 3d shape from image streams. In Proceedings IEEE Conference on Computer Vision and Pattern Recognition. CVPR 2000 (Cat. No. PR00662), pages 690-696. IEEE, 2000. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 202, + 288, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 288, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 288, + 235 + ], + "type": "text", + "content": "[3] Zheng Chen and Yi Sun. Joint-wise 2d to 3d lifting for hand pose estimation from a single rgb image. Applied Intelligence, 53(6):6421-6431, 2023. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 236, + 288, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 288, + 290 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 288, + 290 + ], + "type": "text", + "content": "[4] Mosam Dabhi, Chaoyang Wang, Kunal Saluja, László A Jeni, Ian Fasel, and Simon Lucey. High fidelity 3d reconstructions with limited physical views. In 2021 International Conference on 3D Vision (3DV), pages 1301-1311. IEEE, 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 291, + 288, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 291, + 288, + 335 + ], + "spans": [ + { + "bbox": [ + 53, + 291, + 288, + 335 + ], + "type": "text", + "content": "[5] Mosam Dabhi, Chaoyang Wang, Tim Clifford, László Jeni, Ian Fasel, and Simon Lucey. Mbw: Multi-view bootstrapping in the wild. Advances in Neural Information Processing Systems, 35:3039-3051, 2022. 2, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 336, + 288, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 336, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 53, + 336, + 288, + 392 + ], + "type": "text", + "content": "[6] Liuhao Ge, Zhou Ren, Yuncheng Li, Zehao Xue, Yingying Wang, Jianfei Cai, and Junsong Yuan. 3d hand shape and pose estimation from a single rgb image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10833-10842, 2019. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 392, + 288, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 392, + 288, + 446 + ], + "spans": [ + { + "bbox": [ + 53, + 392, + 288, + 446 + ], + "type": "text", + "content": "[7] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE transactions on pattern analysis and machine intelligence, 36(7):1325-1339, 2013. 2, 7, 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 447, + 288, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 447, + 288, + 491 + ], + "spans": [ + { + "bbox": [ + 53, + 447, + 288, + 491 + ], + "type": "text", + "content": "[8] Haorui Ji, Hui Deng, Yuchao Dai, and Hongdong Li. Unsupervised 3d pose estimation with non-rigid structure-from-motion modeling. arXiv preprint arXiv:2308.10705, 2023. 2, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 491, + 288, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 491, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 53, + 491, + 288, + 557 + ], + "type": "text", + "content": "[9] Hanbyul Joo, Hao Liu, Lei Tan, Lin Gui, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social motion capture. In Proceedings of the IEEE International Conference on Computer Vision, pages 3334-3342, 2015. 2, 7, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 558, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 287, + 624 + ], + "type": "text", + "content": "[10] Daniel Joska, Liam Clark, Naoya Muramatsu, Ricardo Jericevich, Fred Nicolls, Alexander Mathis, Mackenzie W Mathis, and Amir Patel. Acinoset: a 3d pose estimation dataset and baseline models for cheetahs in the wild. In 2021 IEEE international conference on robotics and automation (ICRA), pages 13901-13908. IEEE, 2021. 2, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 625, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 287, + 667 + ], + "type": "text", + "content": "[11] Chen Kong and Simon Lucey. Deep non-rigid structure from motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1558-1567, 2019. 1, 2, 3, 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "text", + "content": "[12] Vincent Lepetit, Francesc Moreno-Noguer, and Pascal Fua. Ep n p: An accurate o (n) solution to the p np problem. International journal of computer vision, 81:155-166, 2009. 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 139 + ], + "type": "text", + "content": "[13] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 140, + 547, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 140, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 307, + 140, + 547, + 205 + ], + "type": "text", + "content": "[14] Sebastian Lutz, Richard Blythman, Koustav Ghosal, Matthew Moynihan, Ciaran Simms, and Aljosa Smolic. Jointformer: Single-frame lifting transformer with error prediction and refinement for 3d human pose estimation. In 2022 26th International Conference on Pattern Recognition (ICPR), pages 1156-1163. IEEE, 2022. 2, 3, 4, 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 205, + 547, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 205, + 547, + 259 + ], + "spans": [ + { + "bbox": [ + 308, + 205, + 547, + 259 + ], + "type": "text", + "content": "[15] Naureen Mahmood, Nima Ghorbani, Nikolaus F Troje, Gerard Pons-Moll, and Michael J Black. Amass: Archive of motion capture as surface shapes. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5442-5451, 2019. 2, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 260, + 547, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 260, + 547, + 304 + ], + "spans": [ + { + "bbox": [ + 308, + 260, + 547, + 304 + ], + "type": "text", + "content": "[16] Julieta Martinez, Rayat Hossain, Javier Romero, and James J Little. A simple yet effective baseline for 3d human pose estimation. In Proceedings of the IEEE international conference on computer vision, pages 2640-2649, 2017. 2, 3, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 304, + 545, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 304, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 308, + 304, + 545, + 371 + ], + "type": "text", + "content": "[17] Gyeongsik Moon, Shouu-I Yu, He Wen, Takaaki Shiratori, and Kyoung Mu Lee. Interhand2. 6m: A dataset and baseline for 3d interacting hand pose estimation from a single rgb image. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XX 16, pages 548-564. Springer, 2020. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 371, + 545, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 371, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 308, + 371, + 545, + 426 + ], + "type": "text", + "content": "[18] David Novotny, Nikhila Ravi, Benjamin Graham, Natalia Neverova, and Andrea Vedaldi. C3dpo: Canonical 3d pose networks for non-rigid structure from motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7688-7697, 2019. 1, 2, 3, 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 426, + 545, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 426, + 545, + 470 + ], + "spans": [ + { + "bbox": [ + 308, + 426, + 545, + 470 + ], + "type": "text", + "content": "[19] Guy Tevet, Brian Gordon, Amir Hertz, Amit H Bermano, and Daniel Cohen-Or. Motionclip: Exposing human motion generation to clip space. In European Conference on Computer Vision, pages 358–374. Springer, 2022. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 471, + 545, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 471, + 545, + 503 + ], + "spans": [ + { + "bbox": [ + 308, + 471, + 545, + 503 + ], + "type": "text", + "content": "[20] Guy Tevet, Sigal Raab, Brian Gordon, Yonatan Shafir, Daniel Cohen-Or, and Amit H Bermano. Human motion diffusion model. arXiv preprint arXiv:2209.14916, 2022. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 503, + 545, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 503, + 545, + 548 + ], + "spans": [ + { + "bbox": [ + 308, + 503, + 545, + 548 + ], + "type": "text", + "content": "[21] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 548, + 545, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 548, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 308, + 548, + 545, + 592 + ], + "type": "text", + "content": "[22] Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. Graph attention networks. In International Conference on Learning Representations, 2018. 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 592, + 545, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 592, + 545, + 647 + ], + "spans": [ + { + "bbox": [ + 308, + 592, + 545, + 647 + ], + "type": "text", + "content": "[23] Bastian Wandt, Marco Rudolph, Petrissa Zell, Helge Rhodin, and Bodo Rosenhahn. Canonpose: Self-supervised monocular 3d human pose estimation in the wild. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 13294-13304, 2021. 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 647, + 545, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 545, + 691 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 545, + 691 + ], + "type": "text", + "content": "[24] Chaoyang Wang and Simon Lucey. Paul: Procrustean autoencoder for unsupervised lifting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 434-443, 2021. 1, 2, 3, 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "text", + "content": "[25] Chaoyang Wang, Chen-Hsuan Lin, and Simon Lucey. Deep nrsfm++: Towards unsupervised 2d-3d lifting in the wild. In" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "10474" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 441 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "text", + "content": "2020 International Conference on 3D Vision (3DV), pages 12-22. IEEE, 2020. 1, 2, 4, 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "text", + "content": "[26] Yu Xiang, Roozbeh Mottaghi, and Silvio Savarese. Beyond Pascal: A benchmark for 3d object detection in the wild. In IEEE winter conference on applications of computer vision, pages 75-82. IEEE, 2014. 2, 5, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 206 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 206 + ], + "type": "text", + "content": "[27] Jiacong Xu, Yi Zhang, Jiawei Peng, Wufei Ma, Artur Jesslen, Pengliang Ji, Qixin Hu, Jiehua Zhang, Qihao Liu, Jiahao Wang, et al. Animal3d: A comprehensive dataset of 3d animal pose and shape. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9099-9109, 2023. 2, 6, 7, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 208, + 287, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 208, + 287, + 252 + ], + "spans": [ + { + "bbox": [ + 48, + 208, + 287, + 252 + ], + "type": "text", + "content": "[28] Haitian Zeng, Xin Yu, Jiaxu Miao, and Yi Yang. Mhr-net: Multiple-hypothesis reconstruction of non-rigid shapes from 2d views. In European Conference on Computer Vision, pages 1-17. Springer, 2022. 2, 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 254, + 287, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 254, + 287, + 308 + ], + "spans": [ + { + "bbox": [ + 48, + 254, + 287, + 308 + ], + "type": "text", + "content": "[29] Xing Zhang, Lijun Yin, Jeffrey F Cohn, Shaun Canavan, Michael Reale, Andy Horowitz, Peng Liu, and Jeffrey M Girard. Bp4d-spontaneous: a high-resolution spontaneous 3d dynamic facial expression database. Image and Vision Computing, 32(10):692-706, 2014. 2, 7, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 309, + 287, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 309, + 287, + 350 + ], + "spans": [ + { + "bbox": [ + 48, + 309, + 287, + 350 + ], + "type": "text", + "content": "[30] Jianqiao Zheng, Xueqian Li, Sameera Ramasinghe, and Simon Lucey. Robust point cloud processing through positional embedding. arXiv preprint arXiv:2309.00339, 2023. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 353, + 287, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 353, + 287, + 396 + ], + "spans": [ + { + "bbox": [ + 48, + 353, + 287, + 396 + ], + "type": "text", + "content": "[31] Wentao Zhu, Xiaoxuan Ma, Zhaoyang Liu, Libin Liu, Wayne Wu, and Yizhou Wang. Motionbert: Unified pretraining for human motion analysis. arXiv preprint arXiv:2210.06551, 2022. 2, 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 398, + 287, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 398, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 48, + 398, + 287, + 441 + ], + "type": "text", + "content": "[32] Yue Zhu, Nermin Samet, and David Picard. H3wb: Human3. 6m 3d wholebody dataset and benchmark. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20166-20177, 2023. 2, 5, 6, 8" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10475" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/e514998b-c539-47e4-bf66-6c5fccc605eb_content_list.json b/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/e514998b-c539-47e4-bf66-6c5fccc605eb_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..0f9657656963a7a9cb733be323ce344ccffab1e6 --- /dev/null +++ b/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/e514998b-c539-47e4-bf66-6c5fccc605eb_content_list.json @@ -0,0 +1,1841 @@ +[ + { + "type": "text", + "text": "3D-SceneDreamer: Text-Driven 3D-Consistent Scene Generation", + "text_level": 1, + "bbox": [ + 156, + 130, + 813, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Songchun Zhang $^{1}$ , Yibo Zhang $^{2}$ , Quan Zheng $^{4}$ , Rui Ma $^{2}$ , Wei Hua $^{3}$ , Hujun Bao $^{1}$ , Weiwei Xu $^{1}$ , Changqing Zou $^{1,3*}$", + "bbox": [ + 217, + 179, + 750, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Zhejiang University 2 Jilin University 3 Zhejiang Lab", + "bbox": [ + 246, + 222, + 720, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{4}$ Institute of Software, Chinese Academy of Sciences", + "bbox": [ + 269, + 241, + 694, + 258 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d1f7a6621a947ff51cc845b08531a5daef0f8e8b76e07990f17c1d9ed44a2816.jpg", + "image_caption": [ + "Figure 1. Text-Driven 3D Scene Generation from text prompts. (a) Given a scene description prompt and an arbitrary 6-degree-of-freedom (6-DOF) camera trajectory, our approach progressively generates the full 3D scene by continuously synthesizing 2D novel views. (b) The limitation of mesh representations [12, 16] and the lack of reasonable rectification mechanisms lead to cumulative errors in outdoor scenes, which are respectively marked with yellow and blue dash line boxes. In contrast, our approach can alleviate the problem by introducing a progressive generation pipeline." + ], + "image_footnote": [], + "bbox": [ + 81, + 268, + 553, + 465 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c2671e5361e19388a00d9ce464ff5fb6ed9721ceae32b8342a57cb8252f86856.jpg", + "image_caption": [], + "image_footnote": [ + "Aerial drone shot of a mountain range in the style of cinematic video, shallow depth of field, subject in focus, dynamic movement" + ], + "bbox": [ + 580, + 271, + 691, + 340 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ab64d1a82f00e61c4b83673cab557366a3a1729acc007f1786ff472b038db194.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 270, + 785, + 340 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ae774f98176f34d7cd4c755c84e4f5ba10f2010ddd93e17d23161bc9c2cb2cbd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 787, + 270, + 880, + 340 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e2353ea3abb63236884dd018a3302097572376aa219cdc0d8890f6864371c6b8.jpg", + "image_caption": [ + "(b) Observation" + ], + "image_footnote": [], + "bbox": [ + 580, + 369, + 691, + 445 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7a70c4f0b5ddc98e4973cd2557d36489ffef18bfd57a3c4d789a4549f3104dbe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 371, + 785, + 443 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0d3c579a780a8e924e302dda7cd864b75facc5bf555d62fca1b294709a7a4285.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 787, + 369, + 880, + 449 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 547, + 313, + 561 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Text-driven 3D scene generation techniques have made rapid progress in recent years. Their success is mainly attributed to using existing generative models to iteratively perform image warping and inpainting to generate 3D scenes. However, these methods heavily rely on the outputs of existing models, leading to error accumulation in geometry and appearance that prevent the models from being used in various scenarios (e.g., outdoor and unreal scenarios). To address this limitation, we generatively refine the newly generated local views by querying and aggregating global 3D information, and then progressively generate the 3D scene. Specifically, we employ a tri-plane features-based NeRF as a unified representation of the 3D scene to constrain global 3D consistency, and propose a generative refinement network to synthesize new contents with higher quality by exploiting the natural image prior from 2D diffusion model as well as the global 3D information of the current scene. Our extensive experiments demonstrate that, in comparison to previous methods, our approach supports wide variety of scene generation and arbitrary camera trajectories with improved visual quality and 3D consistency.", + "bbox": [ + 73, + 569, + 472, + 888 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 501, + 546, + 630, + 561 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, with the growing need for 3D creation tools for metaverse applications, attention to 3D scene generation techniques has increased rapidly. Existing tools [11, 44] usually require professional modeling skills and extensive manual labor, which is time-consuming and inefficient. To facilitate the 3D scene creation and reduce the need for professional skills, 3D scene generation tools should be intuitive and versatile while ensuring sufficient controllability.", + "bbox": [ + 496, + 580, + 890, + 702 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper focuses on the specific setting of generating consistent 3D scenes from the input texts that describe the 3D scenes. This problem is highly challenging from several perspectives, including the limitation of available text-3D data pairs and the need for ensuring both semantic and geometric consistency of the generated scenes. To overcome the limited 3D data issue, recent text-to-3D methods [42, 62] have leveraged the powerful pre-trained text-to-image diffusion model [48] as a strong prior to optimize 3D representation. However, their generated scenes often have relatively simpler geometry and lack 3D consistency, because 2D prior diffusion models lack the perception of 3D information.", + "bbox": [ + 496, + 705, + 892, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "10170", + "bbox": [ + 478, + 924, + 519, + 936 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/efbee3c07137c1e213a33737d20e908f74c2964a806245f6a0125a2fc3fbb76c.jpg", + "image_caption": [ + "Figure 2. Comparison with existing designs. (a) The feedforward approaches use depth-based warping and refinement operations to generate novel views of the scene without a unified representation. (b) The warping-inpainting approaches use mesh as a unified representation and generate the scene through iterative inpainting. (c) We replace the mesh with NeRF as the unified representation and alleviate the cumulative error issue by incorporating a generative refinement model. This allows our framework to support the generation of a wider range of scene types." + ], + "image_footnote": [], + "bbox": [ + 78, + 88, + 472, + 251 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Some recent methods [12, 16] introduce the monocular depth estimation model [45, 46] as a strong geometric prior and follow the warping-inpainting pipeline [26, 29] for progressive 3D scene generation, which partially solves the inconsistency problem. Although these methods can generate realistic scenes with multi-view 3D consistency, they mainly focus on indoor scenes and fail to handle large-scale outdoor scene generation as illustrated in Fig. 1 (b). This can be attributed to two main aspects: (1) Due to the adoption of an explicit 3D mesh as the unified 3D representation, the noise of the depth estimation in the outdoor scene can cause a large stretch of the scene geometry; (2) The lack of an efficient rectification mechanism in the pipeline leads to an accumulation of geometric and appearance errors.", + "bbox": [ + 75, + 395, + 468, + 606 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we present a new framework, named 3D-SceneDreamer that provides a unified solution for text-driven 3D consistent indoor and outdoor scene generation. Our approach employs a tri-planar feature-based radiance field as a unified 3D representation instead of 3D mesh, which is advantageous for general scene generation (especially in outdoor scenes) and supports navigating with arbitrary 6-DOF camera trajectories. Afterwards, we model the scene generation process as a progressive optimization of the NeRF representation, while a text-guided and scene-adapted generative novel view synthesis is employed to refine the NeRF optimization. Fig. 2 shows a comparison of our design with existing text-to-scene pipelines.", + "bbox": [ + 75, + 609, + 468, + 806 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Specifically, we first perform scene initialization, which consists of two stages, i.e., generating a supporting database and optimizing the initial scene representation. We first use the input text prompt and the pre-trained diffusion model [48] to generate the initial image as an appearance prior. Then, we use an off-the-shelf depth estimation model [2]", + "bbox": [ + 75, + 809, + 468, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "to provide the geometric prior for the corresponding scene. Inspired by [66], to prevent NeRF from over-fitting for the single view image, we construct a database via differentiable spatial transformation [18] and use it for optimizing the initial NeRF representation of the generated scene. To generate the extrapolated content, we use volume rendering and trilinear interpolation in the novel viewpoints to obtain the initial rendered images and their corresponding feature maps. These outputs are later fed into our 3D-aware generative refinement model, whose output images are subsequently added as new content to the supporting database. Next, in conjunction with the new data, we progressively generate the whole 3D scene by updating our 3D representation through our incremental training strategy.", + "bbox": [ + 496, + 90, + 890, + 301 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Extensive experiments demonstrate that our approach significantly outperforms the state-of-the-art text-driven 3D scene generation method in both visual quality and 3D consistency. To summarize, our technical contributions are as follows:", + "bbox": [ + 496, + 301, + 890, + 377 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We provide a unified solution for text-driven consistent 3D scene generation that supports both indoor and outdoor scenes as well as allows navigation with arbitrary 6-DOF camera trajectories.", + "- We propose to use a tri-planar feature-based neural radiance field as a global 3D representation of the scene to generate continuous scene views, which preserves the 3D consistency of the scene, empowered by a progressive optimization strategy.", + "- We propose a new generative refinement model, which explicitly injects 3D information to refine the coarse view generated by novel view synthesis and then incorporates the new views to refine the NeRF optimization." + ], + "bbox": [ + 500, + 378, + 890, + 574 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 588, + 640, + 603 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Text-Driven 3D Content Generation. Recently, motivated by the success of text-to-image models, employing pretrained 2D diffusion models to perform text-to-3D generation has gained significant research attention. Some pioneering works [42, 61] introduce the Score Distillation Sampling (SDS) and utilize 2D diffusion prior to optimize 3D representation. Subsequent works [8, 28, 34, 62] further enhance texture realism and geometric quality. However, they primarily focus on improving object-level 3D content generation rather than large-scale 3D scenes. Recent works [12, 16, 66] have proposed some feasible solutions for 3D scene generation. By utilizing the pre-trained monocular depth model and the inpainting model, they generate the 3D scene progressively based on the input text and camera trajectory. However, due to the underlying 3D representation or optimization scheme, these methods are limited in several aspects. For example, as [12, 16] utilize explicit mesh as 3D representation, it is difficult for them to generate outdoor scenes. Besides, their mesh outputs", + "bbox": [ + 496, + 613, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "10171", + "bbox": [ + 480, + 924, + 517, + 936 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "also suffer from fragmented geometry and artifacts due to imprecise depth estimation results. Although Text2NeRF achieves to generate high-quality indoor and outdoor scenes by replacing the meshes with neural radiance fields [35], it can only generate camera-centric scenes. In contrast, our approach not only supports more general 3D scene generation but can also handle arbitrary 6DOF camera trajectories.", + "bbox": [ + 75, + 90, + 472, + 196 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Text-Driven Video Generation. Text-Driven Video Generation aims to create realistic video content based on textual conditions. In the early stages, this task was approached using GAN [1, 25, 41] and VAE [33, 38] generative models, but the results were limited to low-resolution short video clips. Following the significant advancements in text-to-image models, recent text-to-video works extend text-to-image models such as transformer [17, 64, 65] and diffusion model [3, 14, 15, 32, 53, 68] for video generation. These approaches enable the generalization of high-quality and open-vocabulary videos, but require a substantial amount of text-image or text-video pairs of data for training. Text2Video-Zero [19] proposes the first zero-shot text-to-video generation pipeline that does not rely on training or optimization, but their generated videos lack smoothness and 3D consistency. Our method is capable of generating smooth and long videos which are consistent to the scenes described by the input text, without the need for large-scale training data. Furthermore, the utilization of NeRF as the 3D representation enhances the 3D consistency of our videos.", + "bbox": [ + 75, + 198, + 472, + 512 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "View Synthesis with Generative Models. Several early stage studies [5, 21, 22, 26, 29, 63] employ GAN to synthesize new viewpoints. However, the training process of GAN is prone to the issue of mode collapse, which limits the diversity of generation results. Diffusion model has been shown its capability to generate diverse and high-quality images and videos. In recent view synthesis works [4, 7, 51, 57], diffusion models have been employed to achieve improved scene generation results over prior works. For example, in Deceptive-NeRF [30], pseudo-observations are synthesized by diffusion models and these observations are further utilized for enhance the NeRF optimization. Closely similar to [30], our method proposes a geometry-aware diffusion refinement model to reduce the artifacts of the input coarse view generated by the initial novel view synthesis. With the 3D information from NeRF features injected to the refinement process, we can achieve globally consistent 3D scene generation.", + "bbox": [ + 75, + 513, + 472, + 786 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Neural Radiance Fields Revisited", + "text_level": 1, + "bbox": [ + 76, + 799, + 380, + 815 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Neural Radiance Fields (NeRF) [59] is a novel view synthesis technique that has shown impressive results. It represents the specific 3D scene via an implicit function, denoted as $f_{\\theta}:(\\pmb {x},\\pmb {d})\\mapsto (\\mathbf{c},\\sigma)$ , given a spatial location $\\mathbf{x}$ and a ray direction $\\mathbf{d}$ , where $\\theta$ represents the learnable parameters,", + "bbox": [ + 75, + 824, + 468, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and $\\mathbf{c}$ and $\\sigma$ are the color and density. To render a novel image, NeRF marches a camera ray $\\mathbf{r}(t) = \\mathbf{o} + t\\mathbf{d}$ starting from the origin $\\mathbf{o}$ through each pixel and calculates its color $\\hat{\\boldsymbol{C}}$ and rendered depth $\\hat{\\boldsymbol{D}}$ via the volume rendering quadrature, i.e., $\\hat{\\boldsymbol{C}} (\\mathbf{r}) = \\sum_{i = 1}^{N}T_{i}\\alpha_{i}\\mathbf{c}_{i}$ and $\\hat{\\boldsymbol{D}} (\\mathbf{r}) = \\sum_{i = 1}^{N}T_{i}\\alpha_{i}t_{i}$ , where $T_{i} = \\exp \\left(-\\sum_{j = 1}^{i - 1}\\sigma_{j}\\delta_{j}\\right)$ , $\\alpha_{i} = (1 - \\exp (-\\sigma_{i}\\delta_{i}))$ , and $\\delta_{k} = t_{k + 1} - t_{k}$ indicates the distance between two point samples. Typically, stratified sampling is used to select the point samples $\\{t_i\\}_{i = 1}^N$ between $t_n$ and $t_f$ , which denote the near and far planes of the camera. When multi-view images are available, $\\theta$ can be easily optimized with the MSE loss:", + "bbox": [ + 496, + 90, + 890, + 263 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\theta} = \\sum_ {\\boldsymbol {r} \\in \\mathcal {R}} \\left\\| \\hat {\\boldsymbol {C}} (\\boldsymbol {r}) - \\boldsymbol {C} (\\boldsymbol {r}) \\right\\| _ {2} ^ {2} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 598, + 266, + 890, + 303 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathcal{R}$ is the collection of rays, and $C$ indicates the ground truth color.", + "bbox": [ + 496, + 311, + 890, + 343 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4. Methods", + "text_level": 1, + "bbox": [ + 500, + 359, + 599, + 376 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1. Overview", + "text_level": 1, + "bbox": [ + 500, + 385, + 609, + 400 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a description of the target scene a the input text prompt $\\mathbf{p}$ , and a pre-defined camera trajectory denoted by $\\{\\mathbf{T}_i\\}_{i=1}^N$ , our goal is to generate a 3D scene along the camera trajectory with the multiview 3D consistency.", + "bbox": [ + 496, + 409, + 890, + 469 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The overview of the proposed model is illustrated in Fig. 3. We first introduce the acquisition of appearance and structural priors in Sec. 4.2, which serve as the scene initialization. The formulation of Unified Scene Representation and its optimization with the former priors are presented in Sec. 4.3. To synthesize new content while maintaining the multiview consistency, we propose a geometry-aware refinement model in Sec. 4.4. Finally, the full online scene generation process is presented in Sec. 4.5.", + "bbox": [ + 496, + 470, + 890, + 607 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.2. Scene Context Initialization", + "text_level": 1, + "bbox": [ + 498, + 619, + 750, + 635 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given the input textual prompt $\\mathbf{p}$ , we first utilize a pretrained stable diffusion model to generate an initial 2D image $\\mathbf{I}_0$ , which serves as an appearance prior for the scene. Then, we feed this image into the off-the-shelf depth estimation model [2], and take the output as a geometric prior for the target scene, denoted as $\\mathbf{D}_0$ . Inspired by [66], we construct a supporting database $S = \\{((\\mathbf{D}_i,\\mathbf{I}_i,\\mathbf{T}_i)\\}_{i = 1}^N$ via differentiable spatial transformation [18] and image inpainting [16] techniques, where $N$ denotes the number of initial viewpoints. This database provides additional views and depth information, which could prevent the model from overfitting to the initial view. With the initial supporting database, we can initialize the global 3D representation. The data generated by our method will be continuously appended to this supporting database for continuous optimization of the global 3D representation. More details are provided in our supplemental materials.", + "bbox": [ + 496, + 643, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "10172", + "bbox": [ + 478, + 925, + 519, + 936 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a184d31afa8507f0e45a91c95f11f4d804eadb76155b7671d4dd8922b33d832f.jpg", + "image_caption": [ + "Figure 3. Overview of our pipeline. (a) Scene Context Initialization contains a supporting database to provide novel viewpoint data for progressive generation. (b) Unified 3D Representation provides a unified representation for the generated scene, which allows our approach to accomplish more general scene generation and to hold the 3D consistency at the same time. (c) 3D-Aware Generative Refinement alleviates the cumulative error issue during long-term extrapolation by exploiting large-scale natural images prior to generatively refine the synthesized novel viewpoint image. The consistency regularization module is used for test-time optimization." + ], + "image_footnote": [], + "bbox": [ + 86, + 89, + 883, + 338 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.3. Unified Scene Representation", + "text_level": 1, + "bbox": [ + 76, + 417, + 339, + 434 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Though previous methods [26, 29] have achieved novel view generations via differentiable rendering-based frame-to-frame warping, there are still drawbacks: (1) the global 3D consistency is not ensured, (2) cumulative errors occur in long-term generation, (3) complex scenes may lead to failure. To tackling above issues, we propose a tri-planar feature-based NeRF as the unified representation. Compared with previous methods [12, 16, 26, 29], our approach constrains the global 3D consistency while handling the scene generation with complex appearances and geometries.", + "bbox": [ + 75, + 444, + 470, + 609 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Tri-planar Feature Representation. For constructing the feature tri-planes $\\mathbf{M} = \\{\\mathbf{M}_{xy},\\mathbf{M}_{yz},\\mathbf{M}_{xz}\\} \\in \\mathbb{R}^{3\\times S\\times S\\times D}$ from the input images, where $S$ is the spatial resolution and $D$ is the feature dimension, we first extract 2D image features from supporting views using the pre-trained ViT from DINoV2 [40] because of its strong capability in modeling cross-view correlations. We denote the extracted feature corresponding to image $\\mathbf{I}_i$ as $\\mathbf{F}_i$ , and the feature set obtained from all input views is denoted as $\\{\\mathbf{F}_i\\}_{i = 1}^N$ . To lift the local 2D feature maps into the unified 3D space, similar to the previous work [67], we back-project the extracted local image features $\\mathbf{F}$ into a 3D feature volume $\\mathbf{V}$ along each camera ray. To avoid the cubic computational complexity of volumes, we construct a tri-planar representation by projecting the 3D feature volume $\\mathbf{V}$ into its respective plane via three separate encoders. This representation reduces the complexity from feature dimensionality reduction, but with equivalent information compared to purely 2D feature representations (e.g., BEV representations [10, 27]).", + "bbox": [ + 75, + 613, + 472, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Implicit Radiance Field Decoder. Based on the constructed tri-planar representation $\\mathbf{M}$ , we can reconstruct the images with target poses via our implicit radiance field decoder module $\\Psi = \\{f_{g}, f_{c}\\}$ , where $f_{g}$ and $f_{c}$ indicate the geometric feature decoder and appearance decoder. Given a 3D point $p = [i, j, k]$ and a view direction $d$ , we orthogonally project $p$ to each feature plane in $\\mathbf{M}$ with bilinear sampling to obtain the conditional feature $\\mathbf{M}_p = [\\mathbf{M}_{xy}(i, j), \\mathbf{M}_{yz}(j, k), \\mathbf{M}_{xz}(i, k)]$ . We feed $\\mathbf{M}_p$ into the geometric feature decoder to obtain the predicted density $\\sigma$ and the geometric feature vector $\\mathbf{g}$ , after which we further decode its color $c$ :", + "bbox": [ + 496, + 417, + 893, + 599 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n(\\sigma , \\boldsymbol {g}) = f _ {g} (\\gamma (\\boldsymbol {x}), \\mathbf {M} _ {p})\n$$\n", + "text_format": "latex", + "bbox": [ + 616, + 612, + 774, + 628 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {c} = f _ {c} (\\gamma (\\boldsymbol {x}), \\gamma (\\boldsymbol {d}), \\boldsymbol {g}, \\mathbf {M} _ {p}) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 628, + 890, + 648 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\gamma (\\cdot)$ indicates the positional encoding function. Then we can calculate the pixel color via an approximation of the volume rendering integral mentioned in Sec. 3.", + "bbox": [ + 496, + 655, + 890, + 700 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Training Objective. To optimize our 3D representation, we leverage the ground truth colors from the target image as the supervisory signal. Additionally, in the setting with sparse input views, we employ the estimated dense depth map to enhance the model's learning of low-frequency geometric information and prevent overfitting to appearance details. Our optimization objective is as follows:", + "bbox": [ + 496, + 700, + 890, + 806 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\sum_ {\\boldsymbol {r} \\in \\mathcal {R}} \\left(\\mathcal {L} _ {\\text {p h o t o}} (\\boldsymbol {r}) + \\lambda \\mathcal {L} _ {\\text {d e p t h}} (\\boldsymbol {r})\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 570, + 813, + 890, + 845 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_{\\text{photo}}(\\boldsymbol{r}) = \\left\\| \\hat{\\boldsymbol{C}}(\\boldsymbol{r}) - \\boldsymbol{C}(\\boldsymbol{r}) \\right\\|^2$ , $\\mathcal{L}_{\\text{depth}}(\\boldsymbol{r}) = \\left\\| \\hat{\\mathbf{D}}_{\\mathbf{r}}^*(\\boldsymbol{r}) - \\mathbf{D}^*(\\boldsymbol{r}) \\right\\|^2$ , $\\mathcal{R}$ denotes the collection of rays gen-", + "bbox": [ + 498, + 849, + 890, + 904 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "10173", + "bbox": [ + 478, + 924, + 517, + 936 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "erated from the images in the supporting database, $\\lambda$ indicates the balance weight of the depth loss, and $\\mathbf{D}^{*}(\\boldsymbol{r})$ and $\\hat{\\mathbf{D}}_{\\mathbf{r}}^{*}(\\boldsymbol{r})$ denote the rendered depth and the depth obtained from the pre-trained depth estimation model. Since monocular depths are not scale- and shift-invariant, both depths are normalized per frame.", + "bbox": [ + 75, + 90, + 472, + 183 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.4. 3D-Aware Generative Refinement", + "text_level": 1, + "bbox": [ + 76, + 191, + 372, + 207 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given a sequence of poses and an initial viewpoint, previous methods [12, 16, 66] usually generate novel views by the warping-inpainting pipeline. Though these methods have achieved promising results, they suffer from two issues: (1) The lack of rectification mechanisms in these methods can lead to error accumulation. (2) The lack of explicit 3D information during the inpainting process of these methods can lead to insufficient 3D consistency. Therefore, we propose a 3D-Aware Generative Refinement model to alleviate the above issues. On the one hand, we introduce an efficient refinement mechanism to reduce the cumulative error in the novel view generation. On the other hand, we explicitly inject 3D information during the process of generating novel views to enhance 3D consistency. We will describe the model design below.", + "bbox": [ + 75, + 215, + 468, + 441 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Model Design. Given a novel viewpoint with camera pose $\\mathbf{T}_i$ , the tri-planar features $\\mathbf{M}$ , we can obtain the rendered image $\\mathbf{I}_r$ , rendered depth $\\mathbf{D}_r$ and the corresponding 2D feature map $\\mathbf{F}_r$ via the radiance field decoder module $\\Psi$ and volume rendering. For convenience, we model the whole process with a mapping operator $\\mathcal{F}_{ren}:\\{\\mathbf{T}_i,\\mathbf{M}\\} \\mapsto \\{\\mathbf{I}_r,\\mathbf{F}_r,\\mathbf{D}_r\\}$ . Note that the feature map is computed similarly to the color and depth, i.e., by numerical quadrature, and can be formulated as", + "bbox": [ + 76, + 441, + 468, + 578 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {r} (\\mathbf {r}) = \\sum_ {i = 1} ^ {N} T _ {i} \\left(1 - \\exp \\left(- \\sigma_ {i} \\delta_ {i}\\right)\\right) \\boldsymbol {g} _ {i} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 147, + 583, + 468, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\pmb{g}_i$ indicates the feature vector decoded by $f_{g}$ , and $N$ denotes the total number of point samples on the ray $\\pmb{r}$ .", + "bbox": [ + 75, + 631, + 467, + 662 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Although the quality of the rendered coarse results may not be very high, they can still provide reasonable guidance for the extrapolated view generation according to the current scene. Based on this assumption, we propose to take the rendered image and the feature map as conditional inputs to a pre-trained 2D stable diffusion model and generate a refined synthetic image $\\hat{\\mathbf{I}}_r$ via fine-tuning the model, which allows to leverage natural image priors derived from internet-scale data. The process can be formulated as:", + "bbox": [ + 75, + 662, + 467, + 797 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {I}} _ {r} = \\mathcal {F} _ {g e n} (\\mathbf {I} _ {r}, \\tau (\\mathbf {p}), \\mathcal {G} (\\mathbf {F} _ {r})) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 810, + 467, + 829 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{F}_{gen}$ denotes our generative refinement model, $\\tau (\\mathbf{p})$ indicates the input text embedding, and $\\mathcal{G}$ denotes the feature adapter for learning the mapping from external control information to the internal knowledge in LDM.", + "bbox": [ + 75, + 839, + 467, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Scene-Adapted Diffusion Model Fine-Tuning. For the scene generation task, we propose to leverage the rich 2D priors in the pre-trained latent diffusion model instead of training a new model from scratch. Thus, we jointly train the feature adapter, the radiance field decoder, and the feature aggregation layer, while keeping the parameters of stable diffusion fixed. The objective of the fine-tuning process is shown below:", + "bbox": [ + 496, + 90, + 890, + 210 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {A D} = \\mathbb {E} _ {t, \\epsilon \\sim \\mathcal {N} (0, I)} \\left[ \\| \\epsilon_ {\\theta} \\left(\\boldsymbol {z} _ {t}, t, \\tau (\\mathbf {p}), \\mathbf {F} _ {r}, \\mathbf {I} _ {r}\\right) - \\epsilon \\| _ {2} ^ {2} \\right] \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 511, + 217, + 890, + 242 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With the rendered feature map $\\mathbf{F}_r$ containing information about the appearance and geometry, we can control the pre-trained text-to-image diffusion model to generate images that are consistent with the content of generated images from previous viewpoints. In addition, our model inherits the high-quality image generation ability of the stable diffusion model, which ensures the plausibility of the generated views. The pre-trained prior and our effective conditional adaptation enable our model to have generalization ability in novel scenes.", + "bbox": [ + 496, + 250, + 890, + 400 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Global-Local Consistency Regularization. In the online generation process, though our model can rectify the coarse rendering results, we do not explicitly constrain the 3D consistency across views when synthesizing novel views. Therefore, we design a regularization term $\\mathcal{L}_{\\text{cons}}$ for test-time optimization, which shares the same formula as Eq. (6) to guarantee the plausibility of the generated novel views. Specifically, we expect that 3D consistency exists between novel views obtained from geometric projection using local geometric information (i.e., monocular depth estimation) and novel views generated using global geometric information (i.e., global tri-planar 3D representation). Thus, we simultaneously generate novel views based on the previous warping-and-inpainting pipeline and use them as supervisory signals to further fine-tune the feature adapter.", + "bbox": [ + 496, + 401, + 890, + 627 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.5. Online Scene Generation Process.", + "text_level": 1, + "bbox": [ + 498, + 635, + 794, + 650 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we introduce our online 3D scene generation process, which consists of three parts: scene representation initialization, extrapolation content synthesis, and incremental training strategy.", + "bbox": [ + 496, + 659, + 890, + 718 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Scene Representation Initialization. Given the input textual prompt, we first generate an initial 2D image using a pre-trained stable diffusion model, after which we construct a supporting database $S$ via the method mentioned in Sec. 4.2. Then, by exploiting the data from the database, as well as the photometric loss (Eq. (3)), we can optimize the unified representation. To prevent the model from overfitting to high-frequency details, we allow the model to learn low-frequency geometric information better by utilizing the depth priors. [60].", + "bbox": [ + 496, + 719, + 890, + 869 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Extrapolated Content Synthesis. To generate the extrapolated content, we proceed by retrieving the next pose, de", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "10174", + "bbox": [ + 480, + 925, + 517, + 936 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "noted as $\\mathbf{T}_i$ , from the pose sequence $\\{\\mathbf{T}_i\\}_{i=1}^N$ . We then employ volumetric rendering to obtain a coarse view of the current viewpoint and the corresponding feature map. These rendered outputs are used as conditional inputs to our generative refinement model $\\mathcal{F}_{gen}$ for generating a refined view. Due to the presence of a generative refinement mechanism, our extrapolation method mitigates the effects of cumulative errors. The refined view from the model $\\mathcal{F}_{gen}$ is subsequently added to the supporting database $\\mathcal{S}$ as new content. Incremental Training Strategy. After obtaining the new content, we then need to update the unified representation. However, fine-tuning only on the newly generated data can lead to catastrophic forgetting, whereas fine-tuning on the entire dataset requires excessively long training time. Inspired by [54], we sample a sparse set of rays $\\mathcal{Q}$ according to the information gain to optimize the representation, thus improving the efficiency of the incremental training.", + "bbox": [ + 75, + 89, + 472, + 349 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 363, + 209, + 380 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Implementation details.", + "text_level": 1, + "bbox": [ + 76, + 388, + 294, + 404 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We implemented our system using PyTorch. For the differentiable rendering part, we utilized [13] for depth estimation. To avoid the occurrence of black holes, we referred to the implementation in [18] to generate surrounding views. For the text-guided image generation, we use the publicly available stable diffusion code from Diffusers [58]. For the multi-view consistency image generation, we refer to the implementation of T2I-Adapter [39] to inject the depth feature conditions. In the progressive NeRF reconstruction part, we refer to the tri-planar implementation in [6]. We conducted all experiments using 4 NVIDIA RTX A100 GPUs for training and inference. More details can be found in our supplementary material.", + "bbox": [ + 75, + 411, + 472, + 609 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Evaluation metrics.", + "text_level": 1, + "bbox": [ + 76, + 619, + 263, + 633 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Image quality. We evaluate the quality of our generated images using CLIP Score (CS), Inception Score (IS), Blind/Referenceless Image Spatial Quality Evaluator (BRISQUE) [36] and Natural Image Quality Evaluator (NQIE) [37]. The Inception Score is based on the diversity and predictability of the generated images. CLIP Score uses a pre-trained CLIP model [43] to measure the similarity between text and images. Note that existing visual quality metrics such as FID cannot be used since the scenes generated by text-to-3D approaches do not exhibit the same underlying data distribution.", + "bbox": [ + 75, + 643, + 468, + 809 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Multiview Consistency. Given a sequence of rendered images, we evaluate the multi-view consistency of our generated scene using Camera Error (CE), Depth Error (DE), and flow-warping error (FE) metrics. Motivated by [10, 12], we use COLMAP [50], a reliable SfM technique, to compute the camera trajectory and the sparse 3D point cloud. CE", + "bbox": [ + 75, + 809, + 470, + 901 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/f6f87e370fc29a3f1345f2eba7d37bd8c1cd2cd8a6adc3168fea476987663269.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Method3D Representation3D ConsistencyVisual Quality
DE↓CE↓SFM rate↑CS↑BRISQUE↓NIQE↓IS↑
Inf-Zero [26]--1.1890.38-21.435.852.34
3DP [52]LDI&Mesh0.420.9650.47-29.955.841.75
PixelSynth [47]Point Cloud0.360.7320.52-36.744.981.28
ProlificDreamer [62]NeRF---23.4127.976.751.21
Text2Room [16]Mesh0.240.4260.6328.1528.375.462.19
Scenescape [12]Mesh0.180.3940.7628.8424.544.782.23
OursNeRF0.130.1760.8929.9723.644.662.62
", + "bbox": [ + 501, + 88, + 890, + 172 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/713867d21cf0bdcf69f8c4a91158d44ffd34d9a7bc5b81c55262c784f000c8d4.jpg", + "table_caption": [ + "Table 1. Comparison with text-to-scene methods. We compare our approach with two categories of approaches, i.e., pure text-driven 3D generation and text-to-image generation followed by 3D scene generation. Metrics on 3D consistency and visual quality are illustrated." + ], + "table_footnote": [], + "table_body": "
MethodFE↓CS↑BRISQUE↓NIQE↓IS↑
VideoFusion [32]0.03923.5427.395.942.21
GEN-2 [49]0.03227.5425.655.242.38
Ours0.02829.9523.534.702.69
", + "bbox": [ + 501, + 255, + 890, + 321 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/009b812cea4e43480f12c4f6e01d422cdbca0c96abfcfcf9ab39c80082cff278.jpg", + "table_caption": [ + "Table 2. Comparison with text-to-video methods. Metrics on flow warping error (FE) and visual quality are illustrated." + ], + "table_footnote": [], + "table_body": "
MethodCS↑BRISQUE↓NIQE↓IS↑
Text2Light [20]26.1649.266.152.54
MVDiffusion [42]27.2531.545.472.76
Ours28.1224.154.962.79
", + "bbox": [ + 501, + 359, + 890, + 431 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/d4e07cd0ec7f45988785207baf82d772475547aa7c4562590bf0176ea833233e.jpg", + "table_caption": [ + "Table 3. Comparison with text-to-panorama methods. We compare our method with recent text-driven 3D generation methods [9, 55]. Metrics on visual quality are illustrated." + ], + "table_footnote": [], + "table_body": "
MethodDE↓CE↓SfM rate↑CS↑BRISQUE↓NIQE↓
Full Model0.130.1760.8929.9726.186.54
W/o UR0.460.7640.4122.7127.955.81
W/o GRM0.590.9810.4622.1229.645.75
W/o CR0.190.2540.7828.1427.166.12
", + "bbox": [ + 501, + 486, + 890, + 556 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 4. Ablations. For brevity, we use UR, GRM, CR to denote Unified Representation, Generative Refinement Model and Consistency Regularization, respectively.", + "bbox": [ + 496, + 560, + 890, + 603 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "is computed by comparing the difference between the predicted trajectory and the given trajectory, and DE is computed by comparing the difference between the sparse depth map obtained by COLMAP and the estimated depth map. In addition, to account for temporal consistency, we follow [23] and use RAFT [56] to compute FE.", + "bbox": [ + 496, + 608, + 890, + 700 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3. Comparisons", + "text_level": 1, + "bbox": [ + 500, + 709, + 638, + 726 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. Since there are only a few baselines directly related to our approach, we also take into account some methods with similar capabilities and construct their variants for comparison. Specifically, the following three categories of methods are included:", + "bbox": [ + 496, + 733, + 893, + 808 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "- Text-to-Scene. There exist techniques [12, 16] that generate 3D meshes iteratively by employing warping and inpainting processes, allowing for direct comparisons with our proposed methods. Moreover, image-guided 3D generation methods [24, 26, 47] are also available, wherein initial images can be produced using a T2I model. Subse", + "bbox": [ + 500, + 809, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "10175", + "bbox": [ + 478, + 925, + 519, + 936 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/65ed0448407265cbc362a5b1d0cd09f28ab146347d16340e3909089a5ec239c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 99, + 97, + 200, + 175 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2e9b0df492708e819cefd86a4f8484c48808126114dcb5ca4910e718367bcd8f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 210, + 98, + 313, + 175 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/365299417889dd154f217046dee628d0d66cf75c41ec0f4447407f908f38a486.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 98, + 424, + 175 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d79b069678db8c93d7181e7cb2c145a3f8589fabb2d18f5ab3fc2d06f25ef2b0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 434, + 98, + 535, + 175 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a6bc8fc880685fc0f368de228ad5512f56cb16bfbc550c7bbf90488a79d09577.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 545, + 98, + 647, + 175 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ed90cb84fa4ecc1653c5bcc660d88f963d3ea3564d14615d91a0001a29099d11.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 98, + 759, + 175 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b48aff05088edfbd8a8d513eb3d4b1b6934f69667e0485c159279dfdeb5b8756.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 769, + 98, + 870, + 175 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a0e445f9c89f8e06600d05fe4f47d529595356557807f92ecfec7035da14fdc6.jpg", + "image_caption": [ + "POV, A versatile room with a sofa as the centerpiece, a bookshelf, a lamp, a desk TV, masterpiece" + ], + "image_footnote": [], + "bbox": [ + 99, + 189, + 200, + 265 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bf3993ff869f02f6c8f0a1947b7fc58dbedc630c6ead31f6bc2152b785782462.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 210, + 189, + 312, + 265 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/05d1187a6b7b0854d933452ba99d28d7fee44346e266b26ed0f7d104298a1d6d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 189, + 424, + 265 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7b8c450ac0456d6d93dec9fef4b5fd7ffcb62717db7e6cec41b56c9884f5c1e9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 434, + 189, + 534, + 265 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c282522fcfa6e7d542dc9868b8aea235526e12e6b0c12d6bb958ae2eb274d308.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 545, + 189, + 645, + 265 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f6aa10bd642ba5905a67874a4423ddbc52fc6d4560d0d7e75322a68bf5c6400d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 189, + 758, + 265 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1958f17e5af93d8ae0472ae81108fc72a8aad031ead136868c78114901c80134.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 769, + 189, + 870, + 265 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bf30f6742ceff9037e1b46598b0e315e236e4acc294c6f4dc0a20cefdc484093.jpg", + "image_caption": [ + "Figure 4. Quantitative Results. From our results, it can be seen that our approach produces high-fidelity scenes with stable 3D consistency in indoor scenes, outdoor scenes, and unreal-style scenes. More high-resolution results can be found in the supplementary material." + ], + "image_footnote": [], + "bbox": [ + 99, + 276, + 199, + 354 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6f022ac78801c86286c6d5fa2e28a02faf746d8362275ddc961bcedcfea4883c.jpg", + "image_caption": [ + "POV, walking through a palace in fantasy style, master piece, indoor scene" + ], + "image_footnote": [], + "bbox": [ + 210, + 276, + 312, + 354 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/97864f2f12fc7b955f01f560c67066562bc30650692f1944bdb8312c61a318f3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 276, + 423, + 354 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ec12fe1703a1a1b92b32f68b7da4e70f8caa128c26b06db6df9cb968395d050e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 433, + 276, + 534, + 354 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/03053ae95128d28fc9a5d7d30573809ee2383b3141ee16dee17bbf3216dde2e7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 545, + 276, + 647, + 354 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/082e34233df5852860772145312becc712814e5891d4c9da63d67c588ccce236.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 276, + 758, + 354 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3e39023510c67838f8fa7e48c99114f4b4298baeecf82532dc7abefa60e66bd2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 769, + 276, + 870, + 352 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f61e7435d707adccc16cbef184f9c1ad7bf5dd4fee74337e379b6d77459aded0.jpg", + "image_caption": [ + "This kitchen is a charming blend of rustic and modern, featuring a large reclaimed wood island with marble countertop, a sink surrounded by cabinets. The left of the island, a stainless-steel refrigerator stands tall. The of the sink, built-in wooden cabinets painted in a muted.", + "Figure 5. Comparison with text-to-panorama methods. It can be seen that although our method is not trained on panoramic data, it can also generate multiple views with cross-view consistency." + ], + "image_footnote": [], + "bbox": [ + 91, + 446, + 470, + 651 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "sequently, their pipeline can be used to generate 3D scenes, enabling a comparison against our approach. We comprehensively evaluate these methods based on the previously introduced 3D consistency and visual quality metrics.", + "bbox": [ + 89, + 719, + 468, + 779 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Text-to-Video. Some recent text-driven video generation methods [32, 49] can also generate similar 3D scene walkthrough videos. Since it is not supported to explicitly control the camera motion in the video generation methods, we only evaluated them in terms of visual quality and temporal consistency.", + "- Text-to-Panorama. This task generates perspective images covering the panoramic field of view, which is chal" + ], + "bbox": [ + 76, + 780, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e87497fc5613b9ba5f6494f53e7551cd1236821982d1e48aac089fc42fbcd20b.jpg", + "image_caption": [ + "Figure 6. Comparison with text-to-video methods. Blur artifacts and temporally inconsistent frames occur in the text-to-video methods because of the lack of global 3D representation." + ], + "image_footnote": [], + "bbox": [ + 521, + 414, + 885, + 638 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "lenging to ensure consistency in the overlapping regions. We have selected two related methods [9, 55] for comparisons.", + "bbox": [ + 511, + 704, + 890, + 747 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparison to Text-to-Scene Methods. To generate the scenes, we use a set of test-specific prompts covering descriptions of indoor, outdoor and unreal scenes. Each prompt generates an image sequence of 100 frames, and for a fair comparison, we set a fixed random seed. After that, we compute the metrics proposed in Sec. 5.2 on the generated image sequences and evaluate the effectiveness of the method. As shown in Tab. 1, our method outperforms the mesh-based iterative generation methods in several metrics, especially for outdoor scenes. The quality of", + "bbox": [ + 496, + 750, + 892, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "10176", + "bbox": [ + 480, + 926, + 519, + 936 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3ecab3ee9329b10f8d76102279f32ad20c8bd5d50468d06deee087f69225d82f.jpg", + "image_caption": [ + "POV, walkthrough a damp, stone corridor, beautiful photo, masterpiece, indoor scene", + "(a) Extracted Mesh" + ], + "image_footnote": [], + "bbox": [ + 98, + 127, + 254, + 223 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/97f0ddc7e3a35d86b1280d5f1d41a654b413e917d213630e66e2d8fb2ba9f4b7.jpg", + "image_caption": [ + "Walkthrough a road, trees, beautiful photo, best quality, masterpiece, indoor scene", + "(b) Point Cloud", + "Figure 7. Reconstructed 3D Results. (a) The 3D mesh extracted by marching cube algorithm, and (b) the point cloud obtained after the reconstruction using COLMAP [31]. Our reconstruction results show that our methods can generate scenes with satisfactory 3D consistency." + ], + "image_footnote": [], + "bbox": [ + 267, + 128, + 452, + 224 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "their generation results relies heavily on the generative and geometric prior and degrades over time due to error accumulation. In addition, their use of a mesh to represent the scene makes it difficult to represent intense depth discontinuities, which are common in outdoor scenes. Our method, on the other hand, adopts hybrid NeRF as the scene representation, which can cope with complex scenes, and our rectification mechanism can mitigate the effect of accumulated errors caused by inaccurate prior signals.", + "bbox": [ + 75, + 309, + 467, + 444 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Comparison to Text-to-Video Methods. For comparison with the text-to-video model, we used the same collection of prompts as input to the model and generated 1,200 video clips. We used the same metrics to evaluate the 3D consistency and visual quality of the videos generated by the T2V model and our rendered videos. As shown in Tab. 2, our method significantly outperforms the T2V model on all metrics, proving the effectiveness of our method. The T2V model learns geometry and appearance prior by training on a large video dataset, but it lacks a unified 3D representation, making it difficult to ensure multi-view consistency of the generated content, as can be observed Fig. 6.", + "bbox": [ + 75, + 445, + 467, + 625 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Comparison to Text-to-Panorama Methods. We evaluate the methods [9, 55] on visual quality. Tab. 3 and Fig. 5 present the quantitative and qualitative evaluations, respectively. From the results, it can be seen that the results of previous methods can be inconsistent at the left and right boundaries, while our method, although not specifically designed for panorama generation, produces multiple views with cross-view consistency.", + "bbox": [ + 75, + 626, + 467, + 744 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3D Results. In Fig. 7, we show the 3D results reconstructed by our method. The 3D mesh is extracted by the marching cube algorithm [31]. Additionally, we can reconstruct high-quality point clouds using colmap [50] by inputting the rendered image collection, which further demonstrates the superior 3D consistency of the generated view results.", + "bbox": [ + 75, + 747, + 467, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 847, + 228, + 862 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To further analyze the proposed methodology, we performed several ablation studies to evaluate the effectiveness", + "bbox": [ + 75, + 869, + 467, + 898 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "of each module. More ablation studies can be found in our supplementary material.", + "bbox": [ + 498, + 90, + 890, + 119 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effectiveness of Unified Representations. To validate our necessity to construct a unified 3D representation, we remove it from our pipeline. At this time, our approach degenerates to the previous paradigm of warping-inpainting. As shown in Tab. 4, the quality of the generated scenes degrades in DE and CE metrics due to the lack of global 3D consistency constraints.", + "bbox": [ + 498, + 121, + 890, + 226 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effectiveness of Generative Refinement. To validate the effectiveness of our proposed generative refinement, we ablate the modules in our approach, whereby the novel view obtained through volume rendering will be updated directly into the supporting database for subsequent incremental training. The results in Tab. 4 show that this can lead to a significant degradation in the quality of the generated scene. We argue that the reason for this is that the quality of novel views generated by NeRF training on sparse views tends to be inferior, with notable blurring and artifacts. Therefore, adding this data for optimizing 3D scenes would lead to continuous degradation of the quality of the generated scenes.", + "bbox": [ + 496, + 227, + 890, + 422 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effectiveness of Consistency Regularization. To verify the validity of our regularization loss, we ablate this loss and generate scenes to compute the relevant metrics. As shown in Tab. 4, adding this loss further improves the 3D consistency of the generated scenes. Though we explicitly inject 3D information into the refining process, its output still shows some inconsistent results in several scenes. Therefore, to further improve the quality of the generated new views, we perform test-time optimization through this regularization term to constrain the consistency between local and global representations.", + "bbox": [ + 496, + 425, + 890, + 590 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 606, + 617, + 622 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This paper presents a new framework, which employs the tri-planar feature-based neural radiation field as a unified 3D representation and provides a unified solution for text-driven indoor and outdoor scene generation and the output supports navigation with arbitrary camera trajectories. Our method fine-tunes a scene-adapted diffusion model to correct the generated new content to mitigate the effect of cumulative errors while synthesizing extrapolated content. Experimental results show that our method can produce results with better visual quality and 3D consistency compared to previous methods.", + "bbox": [ + 496, + 632, + 890, + 797 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 814, + 686, + 830 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This research was supported by Zhejiang Provincial Natural Science Foundation of China under Grant No. LD24F020007, National Natural Science Foundation of China (No. 62202199), and NSFC (no. 62302491).", + "bbox": [ + 498, + 839, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "10177", + "bbox": [ + 480, + 925, + 517, + 936 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Yogesh Balaji, Martin Renqiang Min, Bing Bai, Rama Chellappa, and Hans Peter Graf. Conditional GAN with discriminative filter generation for text-to-video synthesis. In Proceedings of the International Joint Conference on Artificial Intelligence, pages 1995-2001, 2019. 3", + "[2] Shariq Farooq Bhat, Reiner Birkl, Diana Wofk, Peter Wonka, and Matthias Müller. Zoedepth: Zero-shot transfer by combining relative and metric depth. arXiv preprint arXiv:2302.12288, 2023. 2, 3", + "[3] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22563-22575, 2023. 3", + "[4] Shengqu Cai, Eric Ryan Chan, Songyou Peng, Mohamad Shahbazi, Anton Obukhov, Luc Van Gool, and Gordon Wetzstein. Diffdreamer: Consistent single-view perpetual view generation with conditional diffusion models. arXiv preprint arXiv:2211.12131, 2022. 3", + "[5] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5799-5809, 2021. 3", + "[6] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. 6", + "[7] Eric R Chan, Koki Nagano, Matthew A Chan, Alexander W Bergman, Jeong Joon Park, Axel Levy, Miika Aittala, Shalini De Mello, Tero Karras, and Gordon Wetzstein. Generative novel view synthesis with 3d-aware diffusion models. arXiv preprint arXiv:2304.02602, 2023. 3", + "[8] Rui Chen, Yongwei Chen, Ningxin Jiao, and Kui Jia. Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 2", + "[9] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG), 41(6):1-16, 2022. 6, 7, 8", + "[10] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Scenedreamer: Unbounded 3d scene generation from 2d image collections. arXiv preprint arXiv:2302.01330, 2023. 4, 6", + "[11] Blender Online Community. Blender - a 3D modelling and rendering package. Blender Foundation, Stichting Blender Foundation, Amsterdam, 2018. 1", + "[12] Rafail Fridman, Amit Abecasis, Yoni Kasten, and Tali Dekel. Scenescape: Text-driven consistent scene generation. arXiv preprint arXiv:2302.01133, 2023. 1, 2, 4, 5, 6", + "[13] Vitor Guizilini, Igor Vasiljevic, Dian Chen, Rares Ambrus, and Adrien Gaidon. Towards zero-shot scale-aware monococular depth estimation. In Proceedings of the IEEE/CVF Inter-" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "national Conference on Computer Vision, pages 9233-9243, 2023.6", + "[14] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsenko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 3", + "[15] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. arXiv preprint arXiv:2204.03458, 2022. 3", + "[16] Lukas Hollein, Ang Cao, Andrew Owens, Justin Johnson, and Matthias Nießner. Text2room: Extracting textured 3d meshes from 2d text-to-image models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7909-7920, 2023. 1, 2, 3, 4, 5, 6", + "[17] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868, 2022. 3", + "[18] Max Jaderberg, Karen Simonyan, Andrew Zisserman, et al. Spatial transformer networks. Advances in neural information processing systems, 28, 2015. 2, 3, 6", + "[19] Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439, 2023. 3", + "[20] Nasir Mohammad Khalid, Tianhao Xie, Eugene Belilovsky, and Tiberiu Popa. CLIP-mesh: Generating textured meshes from text using pretrained image-text models. In SIGGRAPH Asia 2022 Conference Papers. ACM, 2022. 6", + "[21] Jing Yu Koh, Honglak Lee, Yinfei Yang, Jason Baldridge, and Peter Anderson. Pathdreamer: A world model for indoor navigation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14738-14748, 2021. 3", + "[22] Jing Yu Koh, Harsh Agrawal, Dhruv Batra, Richard Tucker, Austin Waters, Honglak Lee, Yinfei Yang, Jason Baldridge, and Peter Anderson. Simple and effective synthesis of indoor 3d scenes. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1169-1178, 2023. 3", + "[23] Wei-Sheng Lai, Jia-Bin Huang, Oliver Wang, Eli Shechtman, Ersin Yumer, and Ming-Hsuan Yang. Learning blind video temporal consistency. In Proceedings of the European conference on computer vision (ECCV), pages 170-185, 2018. 6", + "[24] Xingyi Li, Zhiguo Cao, Huiqiang Sun, Jianming Zhang, Ke Xian, and Guosheng Lin. 3d cinematography from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4595-4605, 2023. 6", + "[25] Yitong Li, Martin Min, Dinghan Shen, David Carlson, and Lawrence Carin. Video generation from text. In Proceedings of the AAAI conference on artificial intelligence, 2018. 3", + "[26] Zhengqi Li, Qianqian Wang, Noah Snavely, and Angjoo Kanazawa. Infinitenature-zero: Learning perpetual view generation of natural scenes from single images. In Proceed-" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "10178", + "bbox": [ + 480, + 926, + 519, + 936 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ings of the European Conference on Computer Vision, pages 515-534. Springer, 2022. 2, 3, 4, 6", + "[27] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Yu Qiao, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from multi-camera images via spatiotemporal transformers. In Proceedings of the European Conference on Computer Vision, pages 1-18. Springer, 2022. 4", + "[28] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 300–309, 2023. 2", + "[29] Andrew Liu, Richard Tucker, Varun Jampani, Ameesh Makadia, Noah Snavely, and Angjoo Kanazawa. Infinite nature: Perpetual view generation of natural scenes from a single image. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14458-14467, 2021. 2, 3, 4", + "[30] Xinhang Liu, Shiu-hong Kao, Jiaben Chen, Yu-Wing Tai, and Chi-Keung Tang. Deceptive-nerf: Enhancing nerf reconstruction using pseudo-observations from diffusion models. arXiv preprint arXiv:2305.15171, 2023. 3", + "[31] William E Lorensen and Harvey E Cline. Marching cubes: A high resolution 3d surface construction algorithm. In Semin al graphics: pioneering efforts that shaped the field, pages 347-353. 1998. 8", + "[32] Zhengxiong Luo, Dayou Chen, Yingya Zhang, Yan Huang, Liang Wang, Yujun Shen, Deli Zhao, Jingren Zhou, and Tieniu Tan. Videofusion: Decomposed diffusion models for high-quality video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10209-10218, 2023. 3, 6, 7", + "[33] Tanya Marwah, Gaurav Mittal, and Vineeth N Balasubramanian. Attentive semantic video generation using captions. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1426-1434, 2017. 3", + "[34] Gal Metzer, Elad Richardson, Or Patashnik, Raja Giryes, and Daniel Cohen-Or. Latent-nerf for shape-guided generation of 3d shapes and textures. arXiv preprint arXiv:2211.07600, 2022. 2", + "[35] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3", + "[36] Anish Mittal, Anush Krishna Moorthy, and Alan Conrad Bovik. No-reference image quality assessment in the spatial domain. IEEE Transactions on image processing, 21(12): 4695-4708, 2012. 6", + "[37] Anish Mittal, Rajiv Soundararajan, and Alan C Bovik. Making a “completely blind” image quality analyzer. IEEE Signal processing letters, 20(3):209-212, 2012. 6", + "[38] Gaurav Mittal, Tanya Marwah, and Vineeth N Balasubramanian. Sync-draw: Automatic video generation using deep recurrent attentive architectures. In Proceedings of the 25th" + ], + "bbox": [ + 78, + 92, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ACM international conference on Multimedia, pages 1096-1104, 2017. 3", + "[39] Chong Mou, Xintao Wang, Liangbin Xie, Jian Zhang, Zhonggang Qi, Ying Shan, and Xiaohu Qie. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453, 2023.6", + "[40] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 4", + "[41] Yingwei Pan, Zhaofan Qiu, Ting Yao, Houqiang Li, and Tao Mei. To create what you tell: Generating videos from captions. In Proceedings of the 25th ACM international conference on Multimedia, pages 1789-1798, 2017. 3", + "[42] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv, 2022. 1, 2, 6", + "[43] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 6", + "[44] Alexander Raistrick, Lahav Lipson, Zeyu Ma, Lingjie Mei, Mingzhe Wang, Yiming Zuo, Karhan Kayan, Hongyu Wen, Beining Han, Yihan Wang, Alejandro Newell, Hei Law, Ankit Goyal, Kaiyu Yang, and Jia Deng. Infinite photorealistic worlds using procedural generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12630-12641. IEEE, 2023. 1", + "[45] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE transactions on pattern analysis and machine intelligence, 44(3):1623-1637, 2020. 2", + "[46] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12179-12188, 2021. 2", + "[47] Chris Rockwell, David F Fouhey, and Justin Johnson. Pixelsynth: Generating a 3d-consistent experience from a single image. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14104-14113, 2021. 6", + "[48] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684–10695, 2022. 1, 2", + "[49] RunWay. Gen-2: The next step forward for generative ai, 2023. https://research.runwayml.com/gen2.6,7", + "[50] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2016. 6, 8", + "[51] Liao Shen, Xingyi Li, Huiqiang Sun, Juwen Peng, Ke Xian, Zhiguo Cao, and Guosheng Lin. Make-it-4d: Synthesizing" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10179", + "bbox": [ + 480, + 926, + 519, + 936 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "a consistent long-term dynamic scene video from a single image. arXiv preprint arXiv:2308.10257, 2023. 3", + "[52] Meng-Li Shih, Shih-Yang Su, Johannes Kopf, and Jia-Bin Huang. 3d photography using context-aware layered depth inpainting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8028-8038, 2020. 6", + "[53] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022.3", + "[54] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew J Davison. imap: Implicit mapping and positioning in real-time. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6229-6238, 2021. 6", + "[55] Shitao Tang, Fuyang Zhang, Jiacheng Chen, Peng Wang, and Yasutaka Furukawa. Mvdiffusion: Enabling holistic multiview image generation with correspondence-aware diffusion. arXiv preprint arXiv:2307.01097, 2023. 6, 7, 8", + "[56] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Proceedings of the European Conference on Computer Vision, pages 402-419. Springer, 2020. 6", + "[57] Hung-Yu Tseng, Qinbo Li, Changil Kim, Suhib Alsisan, Jia-Bin Huang, and Johannes Kopf. Consistent view synthesis with pose-guided diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16773-16783, 2023. 3", + "[58] Patrick von Platen, Suraj Patil, Anton Lozhkov, Pedro Cuenca, Nathan Lambert, Kashif Rasul, Mishig Davaadorj, and Thomas Wolf. Diffusers: State-of-the-art diffusion models. https://github.com/huggingface/diffusers, 2022.6", + "[59] Can Wang, Ruixiang Jiang, Mengei Chai, Mingming He, Dongdong Chen, and Jing Liao. Nerf-art: Text-driven neural radiance fields stylization. arXiv preprint arXiv:2212.08070, 2022.3", + "[60] Guangcong Wang, Zhaoxi Chen, Chen Change Loy, and Ziwei Liu. Sparsenerf: Distilling depth ranking for few-shot novel view synthesis. arXiv preprint arXiv:2303.16196, 2023.5", + "[61] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A. Yeh, and Greg Shakhnarovich. Score jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12619-12629, 2023. 2", + "[62] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation, 2023. 1, 2, 6", + "[63] Olivia Wiles, Georgia Gkioxari, Richard Szeliski, and Justin Johnson. Synsin: End-to-end view synthesis from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7467-7477, 2020. 3" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[64] Chenfei Wu, Lun Huang, Qianxi Zhang, Binyang Li, Lei Ji, Fan Yang, Guillermo Sapiro, and Nan Duan. Godiva: Generating open-domain videos from natural descriptions. arXiv preprint arXiv:2104.14806, 2021. 3", + "[65] Chenfei Wu, Jian Liang, Lei Ji, Fan Yang, Yuejian Fang, Daxin Jiang, and Nan Duan. Niwa: Visual synthesis pretraining for neural visual world creation. In Proceedings of the European Conference on Computer Vision, pages 720-736. Springer, 2022. 3", + "[66] Jingbo Zhang, Xiaoyu Li, Ziyu Wan, Can Wang, and Jing Liao. Text2nerf: Text-driven 3d scene generation with neural radiance fields. arXiv preprint arXiv:2305.11588, 2023. 2, 3, 5", + "[67] Xiaoshuai Zhang, Sai Bi, Kalyan Sunkavalli, Hao Su, and Zexiang Xu. Nerfusion: Fusing radiance fields for large-scale scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5449-5458, 2022. 4", + "[68] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 3" + ], + "bbox": [ + 501, + 92, + 893, + 400 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "10180", + "bbox": [ + 480, + 926, + 517, + 936 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/e514998b-c539-47e4-bf66-6c5fccc605eb_model.json b/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/e514998b-c539-47e4-bf66-6c5fccc605eb_model.json new file mode 100644 index 0000000000000000000000000000000000000000..7542b3ae7a5cd5232f67fd06cb3001c0cbdc27ee --- /dev/null +++ b/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/e514998b-c539-47e4-bf66-6c5fccc605eb_model.json @@ -0,0 +1,2686 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.044 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.158, + 0.131, + 0.815, + 0.152 + ], + "angle": 0, + "content": "3D-SceneDreamer: Text-Driven 3D-Consistent Scene Generation" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.18, + 0.751, + 0.218 + ], + "angle": 0, + "content": "Songchun Zhang\\(^{1}\\), Yibo Zhang\\(^{2}\\), Quan Zheng\\(^{4}\\), Rui Ma\\(^{2}\\), Wei Hua\\(^{3}\\), Hujun Bao\\(^{1}\\), Weiwei Xu\\(^{1}\\), Changqing Zou\\(^{1,3*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.223, + 0.721, + 0.242 + ], + "angle": 0, + "content": "1 Zhejiang University 2 Jilin University 3 Zhejiang Lab" + }, + { + "type": "text", + "bbox": [ + 0.271, + 0.242, + 0.695, + 0.26 + ], + "angle": 0, + "content": "\\(^{4}\\) Institute of Software, Chinese Academy of Sciences" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.269, + 0.554, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.581, + 0.272, + 0.692, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.696, + 0.271, + 0.786, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.789, + 0.271, + 0.881, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.581, + 0.346, + 0.868, + 0.368 + ], + "angle": 0, + "content": "Aerial drone shot of a mountain range in the style of cinematic video, shallow depth of field, subject in focus, dynamic movement" + }, + { + "type": "image", + "bbox": [ + 0.581, + 0.371, + 0.692, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.696, + 0.372, + 0.786, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.789, + 0.371, + 0.881, + 0.45 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.697, + 0.454, + 0.787, + 0.466 + ], + "angle": 0, + "content": "(b) Observation" + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.47, + 0.894, + 0.54 + ], + "angle": 0, + "content": "Figure 1. Text-Driven 3D Scene Generation from text prompts. (a) Given a scene description prompt and an arbitrary 6-degree-of-freedom (6-DOF) camera trajectory, our approach progressively generates the full 3D scene by continuously synthesizing 2D novel views. (b) The limitation of mesh representations [12, 16] and the lack of reasonable rectification mechanisms lead to cumulative errors in outdoor scenes, which are respectively marked with yellow and blue dash line boxes. In contrast, our approach can alleviate the problem by introducing a progressive generation pipeline." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.548, + 0.314, + 0.563 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.57, + 0.473, + 0.889 + ], + "angle": 0, + "content": "Text-driven 3D scene generation techniques have made rapid progress in recent years. Their success is mainly attributed to using existing generative models to iteratively perform image warping and inpainting to generate 3D scenes. However, these methods heavily rely on the outputs of existing models, leading to error accumulation in geometry and appearance that prevent the models from being used in various scenarios (e.g., outdoor and unreal scenarios). To address this limitation, we generatively refine the newly generated local views by querying and aggregating global 3D information, and then progressively generate the 3D scene. Specifically, we employ a tri-plane features-based NeRF as a unified representation of the 3D scene to constrain global 3D consistency, and propose a generative refinement network to synthesize new contents with higher quality by exploiting the natural image prior from 2D diffusion model as well as the global 3D information of the current scene. Our extensive experiments demonstrate that, in comparison to previous methods, our approach supports wide variety of scene generation and arbitrary camera trajectories with improved visual quality and 3D consistency." + }, + { + "type": "title", + "bbox": [ + 0.502, + 0.547, + 0.631, + 0.563 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.581, + 0.892, + 0.703 + ], + "angle": 0, + "content": "In recent years, with the growing need for 3D creation tools for metaverse applications, attention to 3D scene generation techniques has increased rapidly. Existing tools [11, 44] usually require professional modeling skills and extensive manual labor, which is time-consuming and inefficient. To facilitate the 3D scene creation and reduce the need for professional skills, 3D scene generation tools should be intuitive and versatile while ensuring sufficient controllability." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.706, + 0.893, + 0.903 + ], + "angle": 0, + "content": "This paper focuses on the specific setting of generating consistent 3D scenes from the input texts that describe the 3D scenes. This problem is highly challenging from several perspectives, including the limitation of available text-3D data pairs and the need for ensuring both semantic and geometric consistency of the generated scenes. To overcome the limited 3D data issue, recent text-to-3D methods [42, 62] have leveraged the powerful pre-trained text-to-image diffusion model [48] as a strong prior to optimize 3D representation. However, their generated scenes often have relatively simpler geometry and lack 3D consistency, because 2D prior diffusion models lack the perception of 3D information." + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.925, + 0.52, + 0.937 + ], + "angle": 0, + "content": "10170" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.079, + 0.089, + 0.473, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.258, + 0.47, + 0.384 + ], + "angle": 0, + "content": "Figure 2. Comparison with existing designs. (a) The feedforward approaches use depth-based warping and refinement operations to generate novel views of the scene without a unified representation. (b) The warping-inpainting approaches use mesh as a unified representation and generate the scene through iterative inpainting. (c) We replace the mesh with NeRF as the unified representation and alleviate the cumulative error issue by incorporating a generative refinement model. This allows our framework to support the generation of a wider range of scene types." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.396, + 0.47, + 0.607 + ], + "angle": 0, + "content": "Some recent methods [12, 16] introduce the monocular depth estimation model [45, 46] as a strong geometric prior and follow the warping-inpainting pipeline [26, 29] for progressive 3D scene generation, which partially solves the inconsistency problem. Although these methods can generate realistic scenes with multi-view 3D consistency, they mainly focus on indoor scenes and fail to handle large-scale outdoor scene generation as illustrated in Fig. 1 (b). This can be attributed to two main aspects: (1) Due to the adoption of an explicit 3D mesh as the unified 3D representation, the noise of the depth estimation in the outdoor scene can cause a large stretch of the scene geometry; (2) The lack of an efficient rectification mechanism in the pipeline leads to an accumulation of geometric and appearance errors." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.611, + 0.47, + 0.807 + ], + "angle": 0, + "content": "In this paper, we present a new framework, named 3D-SceneDreamer that provides a unified solution for text-driven 3D consistent indoor and outdoor scene generation. Our approach employs a tri-planar feature-based radiance field as a unified 3D representation instead of 3D mesh, which is advantageous for general scene generation (especially in outdoor scenes) and supports navigating with arbitrary 6-DOF camera trajectories. Afterwards, we model the scene generation process as a progressive optimization of the NeRF representation, while a text-guided and scene-adapted generative novel view synthesis is employed to refine the NeRF optimization. Fig. 2 shows a comparison of our design with existing text-to-scene pipelines." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Specifically, we first perform scene initialization, which consists of two stages, i.e., generating a supporting database and optimizing the initial scene representation. We first use the input text prompt and the pre-trained diffusion model [48] to generate the initial image as an appearance prior. Then, we use an off-the-shelf depth estimation model [2]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.303 + ], + "angle": 0, + "content": "to provide the geometric prior for the corresponding scene. Inspired by [66], to prevent NeRF from over-fitting for the single view image, we construct a database via differentiable spatial transformation [18] and use it for optimizing the initial NeRF representation of the generated scene. To generate the extrapolated content, we use volume rendering and trilinear interpolation in the novel viewpoints to obtain the initial rendered images and their corresponding feature maps. These outputs are later fed into our 3D-aware generative refinement model, whose output images are subsequently added as new content to the supporting database. Next, in conjunction with the new data, we progressively generate the whole 3D scene by updating our 3D representation through our incremental training strategy." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.303, + 0.892, + 0.378 + ], + "angle": 0, + "content": "Extensive experiments demonstrate that our approach significantly outperforms the state-of-the-art text-driven 3D scene generation method in both visual quality and 3D consistency. To summarize, our technical contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.379, + 0.892, + 0.439 + ], + "angle": 0, + "content": "- We provide a unified solution for text-driven consistent 3D scene generation that supports both indoor and outdoor scenes as well as allows navigation with arbitrary 6-DOF camera trajectories." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.439, + 0.892, + 0.514 + ], + "angle": 0, + "content": "- We propose to use a tri-planar feature-based neural radiance field as a global 3D representation of the scene to generate continuous scene views, which preserves the 3D consistency of the scene, empowered by a progressive optimization strategy." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.515, + 0.892, + 0.575 + ], + "angle": 0, + "content": "- We propose a new generative refinement model, which explicitly injects 3D information to refine the coarse view generated by novel view synthesis and then incorporates the new views to refine the NeRF optimization." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.379, + 0.892, + 0.575 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.589, + 0.642, + 0.604 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.614, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Text-Driven 3D Content Generation. Recently, motivated by the success of text-to-image models, employing pretrained 2D diffusion models to perform text-to-3D generation has gained significant research attention. Some pioneering works [42, 61] introduce the Score Distillation Sampling (SDS) and utilize 2D diffusion prior to optimize 3D representation. Subsequent works [8, 28, 34, 62] further enhance texture realism and geometric quality. However, they primarily focus on improving object-level 3D content generation rather than large-scale 3D scenes. Recent works [12, 16, 66] have proposed some feasible solutions for 3D scene generation. By utilizing the pre-trained monocular depth model and the inpainting model, they generate the 3D scene progressively based on the input text and camera trajectory. However, due to the underlying 3D representation or optimization scheme, these methods are limited in several aspects. For example, as [12, 16] utilize explicit mesh as 3D representation, it is difficult for them to generate outdoor scenes. Besides, their mesh outputs" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.925, + 0.518, + 0.937 + ], + "angle": 0, + "content": "10171" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.197 + ], + "angle": 0, + "content": "also suffer from fragmented geometry and artifacts due to imprecise depth estimation results. Although Text2NeRF achieves to generate high-quality indoor and outdoor scenes by replacing the meshes with neural radiance fields [35], it can only generate camera-centric scenes. In contrast, our approach not only supports more general 3D scene generation but can also handle arbitrary 6DOF camera trajectories." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.199, + 0.473, + 0.513 + ], + "angle": 0, + "content": "Text-Driven Video Generation. Text-Driven Video Generation aims to create realistic video content based on textual conditions. In the early stages, this task was approached using GAN [1, 25, 41] and VAE [33, 38] generative models, but the results were limited to low-resolution short video clips. Following the significant advancements in text-to-image models, recent text-to-video works extend text-to-image models such as transformer [17, 64, 65] and diffusion model [3, 14, 15, 32, 53, 68] for video generation. These approaches enable the generalization of high-quality and open-vocabulary videos, but require a substantial amount of text-image or text-video pairs of data for training. Text2Video-Zero [19] proposes the first zero-shot text-to-video generation pipeline that does not rely on training or optimization, but their generated videos lack smoothness and 3D consistency. Our method is capable of generating smooth and long videos which are consistent to the scenes described by the input text, without the need for large-scale training data. Furthermore, the utilization of NeRF as the 3D representation enhances the 3D consistency of our videos." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.514, + 0.473, + 0.787 + ], + "angle": 0, + "content": "View Synthesis with Generative Models. Several early stage studies [5, 21, 22, 26, 29, 63] employ GAN to synthesize new viewpoints. However, the training process of GAN is prone to the issue of mode collapse, which limits the diversity of generation results. Diffusion model has been shown its capability to generate diverse and high-quality images and videos. In recent view synthesis works [4, 7, 51, 57], diffusion models have been employed to achieve improved scene generation results over prior works. For example, in Deceptive-NeRF [30], pseudo-observations are synthesized by diffusion models and these observations are further utilized for enhance the NeRF optimization. Closely similar to [30], our method proposes a geometry-aware diffusion refinement model to reduce the artifacts of the input coarse view generated by the initial novel view synthesis. With the 3D information from NeRF features injected to the refinement process, we can achieve globally consistent 3D scene generation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.8, + 0.382, + 0.816 + ], + "angle": 0, + "content": "3. Neural Radiance Fields Revisited" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Neural Radiance Fields (NeRF) [59] is a novel view synthesis technique that has shown impressive results. It represents the specific 3D scene via an implicit function, denoted as \\( f_{\\theta}:(\\pmb {x},\\pmb {d})\\mapsto (\\mathbf{c},\\sigma) \\), given a spatial location \\( \\mathbf{x} \\) and a ray direction \\( \\mathbf{d} \\), where \\( \\theta \\) represents the learnable parameters," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.265 + ], + "angle": 0, + "content": "and \\(\\mathbf{c}\\) and \\(\\sigma\\) are the color and density. To render a novel image, NeRF marches a camera ray \\(\\mathbf{r}(t) = \\mathbf{o} + t\\mathbf{d}\\) starting from the origin \\(\\mathbf{o}\\) through each pixel and calculates its color \\(\\hat{\\boldsymbol{C}}\\) and rendered depth \\(\\hat{\\boldsymbol{D}}\\) via the volume rendering quadrature, i.e., \\(\\hat{\\boldsymbol{C}} (\\mathbf{r}) = \\sum_{i = 1}^{N}T_{i}\\alpha_{i}\\mathbf{c}_{i}\\) and \\(\\hat{\\boldsymbol{D}} (\\mathbf{r}) = \\sum_{i = 1}^{N}T_{i}\\alpha_{i}t_{i}\\), where \\(T_{i} = \\exp \\left(-\\sum_{j = 1}^{i - 1}\\sigma_{j}\\delta_{j}\\right)\\), \\(\\alpha_{i} = (1 - \\exp (-\\sigma_{i}\\delta_{i}))\\), and \\(\\delta_{k} = t_{k + 1} - t_{k}\\) indicates the distance between two point samples. Typically, stratified sampling is used to select the point samples \\(\\{t_i\\}_{i = 1}^N\\) between \\(t_n\\) and \\(t_f\\), which denote the near and far planes of the camera. When multi-view images are available, \\(\\theta\\) can be easily optimized with the MSE loss:" + }, + { + "type": "equation", + "bbox": [ + 0.599, + 0.267, + 0.892, + 0.304 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\theta} = \\sum_ {\\boldsymbol {r} \\in \\mathcal {R}} \\left\\| \\hat {\\boldsymbol {C}} (\\boldsymbol {r}) - \\boldsymbol {C} (\\boldsymbol {r}) \\right\\| _ {2} ^ {2} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.313, + 0.892, + 0.344 + ], + "angle": 0, + "content": "where \\(\\mathcal{R}\\) is the collection of rays, and \\(C\\) indicates the ground truth color." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.36, + 0.6, + 0.377 + ], + "angle": 0, + "content": "4. Methods" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.386, + 0.611, + 0.401 + ], + "angle": 0, + "content": "4.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.41, + 0.892, + 0.47 + ], + "angle": 0, + "content": "Given a description of the target scene a the input text prompt \\(\\mathbf{p}\\), and a pre-defined camera trajectory denoted by \\(\\{\\mathbf{T}_i\\}_{i=1}^N\\), our goal is to generate a 3D scene along the camera trajectory with the multiview 3D consistency." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.472, + 0.892, + 0.608 + ], + "angle": 0, + "content": "The overview of the proposed model is illustrated in Fig. 3. We first introduce the acquisition of appearance and structural priors in Sec. 4.2, which serve as the scene initialization. The formulation of Unified Scene Representation and its optimization with the former priors are presented in Sec. 4.3. To synthesize new content while maintaining the multiview consistency, we propose a geometry-aware refinement model in Sec. 4.4. Finally, the full online scene generation process is presented in Sec. 4.5." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.62, + 0.75, + 0.636 + ], + "angle": 0, + "content": "4.2. Scene Context Initialization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.645, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Given the input textual prompt \\(\\mathbf{p}\\), we first utilize a pretrained stable diffusion model to generate an initial 2D image \\(\\mathbf{I}_0\\), which serves as an appearance prior for the scene. Then, we feed this image into the off-the-shelf depth estimation model [2], and take the output as a geometric prior for the target scene, denoted as \\(\\mathbf{D}_0\\). Inspired by [66], we construct a supporting database \\(S = \\{((\\mathbf{D}_i,\\mathbf{I}_i,\\mathbf{T}_i)\\}_{i = 1}^N\\) via differentiable spatial transformation [18] and image inpainting [16] techniques, where \\(N\\) denotes the number of initial viewpoints. This database provides additional views and depth information, which could prevent the model from overfitting to the initial view. With the initial supporting database, we can initialize the global 3D representation. The data generated by our method will be continuously appended to this supporting database for continuous optimization of the global 3D representation. More details are provided in our supplemental materials." + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.926, + 0.521, + 0.938 + ], + "angle": 0, + "content": "10172" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.09, + 0.885, + 0.339 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.345, + 0.895, + 0.416 + ], + "angle": 0, + "content": "Figure 3. Overview of our pipeline. (a) Scene Context Initialization contains a supporting database to provide novel viewpoint data for progressive generation. (b) Unified 3D Representation provides a unified representation for the generated scene, which allows our approach to accomplish more general scene generation and to hold the 3D consistency at the same time. (c) 3D-Aware Generative Refinement alleviates the cumulative error issue during long-term extrapolation by exploiting large-scale natural images prior to generatively refine the synthesized novel viewpoint image. The consistency regularization module is used for test-time optimization." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.419, + 0.34, + 0.435 + ], + "angle": 0, + "content": "4.3. Unified Scene Representation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.445, + 0.471, + 0.61 + ], + "angle": 0, + "content": "Though previous methods [26, 29] have achieved novel view generations via differentiable rendering-based frame-to-frame warping, there are still drawbacks: (1) the global 3D consistency is not ensured, (2) cumulative errors occur in long-term generation, (3) complex scenes may lead to failure. To tackling above issues, we propose a tri-planar feature-based NeRF as the unified representation. Compared with previous methods [12, 16, 26, 29], our approach constrains the global 3D consistency while handling the scene generation with complex appearances and geometries." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.614, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Tri-planar Feature Representation. For constructing the feature tri-planes \\(\\mathbf{M} = \\{\\mathbf{M}_{xy},\\mathbf{M}_{yz},\\mathbf{M}_{xz}\\} \\in \\mathbb{R}^{3\\times S\\times S\\times D}\\) from the input images, where \\(S\\) is the spatial resolution and \\(D\\) is the feature dimension, we first extract 2D image features from supporting views using the pre-trained ViT from DINoV2 [40] because of its strong capability in modeling cross-view correlations. We denote the extracted feature corresponding to image \\(\\mathbf{I}_i\\) as \\(\\mathbf{F}_i\\), and the feature set obtained from all input views is denoted as \\(\\{\\mathbf{F}_i\\}_{i = 1}^N\\). To lift the local 2D feature maps into the unified 3D space, similar to the previous work [67], we back-project the extracted local image features \\(\\mathbf{F}\\) into a 3D feature volume \\(\\mathbf{V}\\) along each camera ray. To avoid the cubic computational complexity of volumes, we construct a tri-planar representation by projecting the 3D feature volume \\(\\mathbf{V}\\) into its respective plane via three separate encoders. This representation reduces the complexity from feature dimensionality reduction, but with equivalent information compared to purely 2D feature representations (e.g., BEV representations [10, 27])." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.419, + 0.895, + 0.6 + ], + "angle": 0, + "content": "Implicit Radiance Field Decoder. Based on the constructed tri-planar representation \\(\\mathbf{M}\\), we can reconstruct the images with target poses via our implicit radiance field decoder module \\(\\Psi = \\{f_{g}, f_{c}\\}\\), where \\(f_{g}\\) and \\(f_{c}\\) indicate the geometric feature decoder and appearance decoder. Given a 3D point \\(p = [i, j, k]\\) and a view direction \\(d\\), we orthogonally project \\(p\\) to each feature plane in \\(\\mathbf{M}\\) with bilinear sampling to obtain the conditional feature \\(\\mathbf{M}_p = [\\mathbf{M}_{xy}(i, j), \\mathbf{M}_{yz}(j, k), \\mathbf{M}_{xz}(i, k)]\\). We feed \\(\\mathbf{M}_p\\) into the geometric feature decoder to obtain the predicted density \\(\\sigma\\) and the geometric feature vector \\(\\mathbf{g}\\), after which we further decode its color \\(c\\):" + }, + { + "type": "equation", + "bbox": [ + 0.617, + 0.613, + 0.775, + 0.63 + ], + "angle": 0, + "content": "\\[\n(\\sigma , \\boldsymbol {g}) = f _ {g} (\\gamma (\\boldsymbol {x}), \\mathbf {M} _ {p})\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.605, + 0.629, + 0.891, + 0.65 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {c} = f _ {c} (\\gamma (\\boldsymbol {x}), \\gamma (\\boldsymbol {d}), \\boldsymbol {g}, \\mathbf {M} _ {p}) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.656, + 0.892, + 0.701 + ], + "angle": 0, + "content": "where \\(\\gamma (\\cdot)\\) indicates the positional encoding function. Then we can calculate the pixel color via an approximation of the volume rendering integral mentioned in Sec. 3." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.702, + 0.892, + 0.808 + ], + "angle": 0, + "content": "Training Objective. To optimize our 3D representation, we leverage the ground truth colors from the target image as the supervisory signal. Additionally, in the setting with sparse input views, we employ the estimated dense depth map to enhance the model's learning of low-frequency geometric information and prevent overfitting to appearance details. Our optimization objective is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.571, + 0.814, + 0.892, + 0.846 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\sum_ {\\boldsymbol {r} \\in \\mathcal {R}} \\left(\\mathcal {L} _ {\\text {p h o t o}} (\\boldsymbol {r}) + \\lambda \\mathcal {L} _ {\\text {d e p t h}} (\\boldsymbol {r})\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.85, + 0.892, + 0.905 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_{\\text{photo}}(\\boldsymbol{r}) = \\left\\| \\hat{\\boldsymbol{C}}(\\boldsymbol{r}) - \\boldsymbol{C}(\\boldsymbol{r}) \\right\\|^2\\), \\(\\mathcal{L}_{\\text{depth}}(\\boldsymbol{r}) = \\left\\| \\hat{\\mathbf{D}}_{\\mathbf{r}}^*(\\boldsymbol{r}) - \\mathbf{D}^*(\\boldsymbol{r}) \\right\\|^2\\), \\(\\mathcal{R}\\) denotes the collection of rays gen-" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.925, + 0.519, + 0.937 + ], + "angle": 0, + "content": "10173" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.184 + ], + "angle": 0, + "content": "erated from the images in the supporting database, \\(\\lambda\\) indicates the balance weight of the depth loss, and \\(\\mathbf{D}^{*}(\\boldsymbol{r})\\) and \\(\\hat{\\mathbf{D}}_{\\mathbf{r}}^{*}(\\boldsymbol{r})\\) denote the rendered depth and the depth obtained from the pre-trained depth estimation model. Since monocular depths are not scale- and shift-invariant, both depths are normalized per frame." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.193, + 0.373, + 0.208 + ], + "angle": 0, + "content": "4.4. 3D-Aware Generative Refinement" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.216, + 0.47, + 0.442 + ], + "angle": 0, + "content": "Given a sequence of poses and an initial viewpoint, previous methods [12, 16, 66] usually generate novel views by the warping-inpainting pipeline. Though these methods have achieved promising results, they suffer from two issues: (1) The lack of rectification mechanisms in these methods can lead to error accumulation. (2) The lack of explicit 3D information during the inpainting process of these methods can lead to insufficient 3D consistency. Therefore, we propose a 3D-Aware Generative Refinement model to alleviate the above issues. On the one hand, we introduce an efficient refinement mechanism to reduce the cumulative error in the novel view generation. On the other hand, we explicitly inject 3D information during the process of generating novel views to enhance 3D consistency. We will describe the model design below." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.443, + 0.47, + 0.579 + ], + "angle": 0, + "content": "Model Design. Given a novel viewpoint with camera pose \\(\\mathbf{T}_i\\), the tri-planar features \\(\\mathbf{M}\\), we can obtain the rendered image \\(\\mathbf{I}_r\\), rendered depth \\(\\mathbf{D}_r\\) and the corresponding 2D feature map \\(\\mathbf{F}_r\\) via the radiance field decoder module \\(\\Psi\\) and volume rendering. For convenience, we model the whole process with a mapping operator \\(\\mathcal{F}_{ren}:\\{\\mathbf{T}_i,\\mathbf{M}\\} \\mapsto \\{\\mathbf{I}_r,\\mathbf{F}_r,\\mathbf{D}_r\\}\\). Note that the feature map is computed similarly to the color and depth, i.e., by numerical quadrature, and can be formulated as" + }, + { + "type": "equation", + "bbox": [ + 0.148, + 0.584, + 0.47, + 0.625 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {r} (\\mathbf {r}) = \\sum_ {i = 1} ^ {N} T _ {i} \\left(1 - \\exp \\left(- \\sigma_ {i} \\delta_ {i}\\right)\\right) \\boldsymbol {g} _ {i} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.632, + 0.468, + 0.663 + ], + "angle": 0, + "content": "where \\( \\pmb{g}_i \\) indicates the feature vector decoded by \\( f_{g} \\), and \\( N \\) denotes the total number of point samples on the ray \\( \\pmb{r} \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.664, + 0.468, + 0.798 + ], + "angle": 0, + "content": "Although the quality of the rendered coarse results may not be very high, they can still provide reasonable guidance for the extrapolated view generation according to the current scene. Based on this assumption, we propose to take the rendered image and the feature map as conditional inputs to a pre-trained 2D stable diffusion model and generate a refined synthetic image \\(\\hat{\\mathbf{I}}_r\\) via fine-tuning the model, which allows to leverage natural image priors derived from internet-scale data. The process can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.177, + 0.811, + 0.468, + 0.83 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {I}} _ {r} = \\mathcal {F} _ {g e n} (\\mathbf {I} _ {r}, \\tau (\\mathbf {p}), \\mathcal {G} (\\mathbf {F} _ {r})) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.468, + 0.901 + ], + "angle": 0, + "content": "where \\(\\mathcal{F}_{gen}\\) denotes our generative refinement model, \\(\\tau (\\mathbf{p})\\) indicates the input text embedding, and \\(\\mathcal{G}\\) denotes the feature adapter for learning the mapping from external control information to the internal knowledge in LDM." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.212 + ], + "angle": 0, + "content": "Scene-Adapted Diffusion Model Fine-Tuning. For the scene generation task, we propose to leverage the rich 2D priors in the pre-trained latent diffusion model instead of training a new model from scratch. Thus, we jointly train the feature adapter, the radiance field decoder, and the feature aggregation layer, while keeping the parameters of stable diffusion fixed. The objective of the fine-tuning process is shown below:" + }, + { + "type": "equation", + "bbox": [ + 0.512, + 0.218, + 0.892, + 0.243 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {A D} = \\mathbb {E} _ {t, \\epsilon \\sim \\mathcal {N} (0, I)} \\left[ \\| \\epsilon_ {\\theta} \\left(\\boldsymbol {z} _ {t}, t, \\tau (\\mathbf {p}), \\mathbf {F} _ {r}, \\mathbf {I} _ {r}\\right) - \\epsilon \\| _ {2} ^ {2} \\right] \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.25, + 0.892, + 0.401 + ], + "angle": 0, + "content": "With the rendered feature map \\(\\mathbf{F}_r\\) containing information about the appearance and geometry, we can control the pre-trained text-to-image diffusion model to generate images that are consistent with the content of generated images from previous viewpoints. In addition, our model inherits the high-quality image generation ability of the stable diffusion model, which ensures the plausibility of the generated views. The pre-trained prior and our effective conditional adaptation enable our model to have generalization ability in novel scenes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.402, + 0.892, + 0.628 + ], + "angle": 0, + "content": "Global-Local Consistency Regularization. In the online generation process, though our model can rectify the coarse rendering results, we do not explicitly constrain the 3D consistency across views when synthesizing novel views. Therefore, we design a regularization term \\(\\mathcal{L}_{\\text{cons}}\\) for test-time optimization, which shares the same formula as Eq. (6) to guarantee the plausibility of the generated novel views. Specifically, we expect that 3D consistency exists between novel views obtained from geometric projection using local geometric information (i.e., monocular depth estimation) and novel views generated using global geometric information (i.e., global tri-planar 3D representation). Thus, we simultaneously generate novel views based on the previous warping-and-inpainting pipeline and use them as supervisory signals to further fine-tune the feature adapter." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.636, + 0.795, + 0.651 + ], + "angle": 0, + "content": "4.5. Online Scene Generation Process." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.892, + 0.719 + ], + "angle": 0, + "content": "In this section, we introduce our online 3D scene generation process, which consists of three parts: scene representation initialization, extrapolation content synthesis, and incremental training strategy." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.87 + ], + "angle": 0, + "content": "Scene Representation Initialization. Given the input textual prompt, we first generate an initial 2D image using a pre-trained stable diffusion model, after which we construct a supporting database \\( S \\) via the method mentioned in Sec. 4.2. Then, by exploiting the data from the database, as well as the photometric loss (Eq. (3)), we can optimize the unified representation. To prevent the model from overfitting to high-frequency details, we allow the model to learn low-frequency geometric information better by utilizing the depth priors. [60]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Extrapolated Content Synthesis. To generate the extrapolated content, we proceed by retrieving the next pose, de" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.926, + 0.519, + 0.938 + ], + "angle": 0, + "content": "10174" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.09, + 0.473, + 0.351 + ], + "angle": 0, + "content": "noted as \\(\\mathbf{T}_i\\), from the pose sequence \\(\\{\\mathbf{T}_i\\}_{i=1}^N\\). We then employ volumetric rendering to obtain a coarse view of the current viewpoint and the corresponding feature map. These rendered outputs are used as conditional inputs to our generative refinement model \\(\\mathcal{F}_{gen}\\) for generating a refined view. Due to the presence of a generative refinement mechanism, our extrapolation method mitigates the effects of cumulative errors. The refined view from the model \\(\\mathcal{F}_{gen}\\) is subsequently added to the supporting database \\(\\mathcal{S}\\) as new content. Incremental Training Strategy. After obtaining the new content, we then need to update the unified representation. However, fine-tuning only on the newly generated data can lead to catastrophic forgetting, whereas fine-tuning on the entire dataset requires excessively long training time. Inspired by [54], we sample a sparse set of rays \\(\\mathcal{Q}\\) according to the information gain to optimize the representation, thus improving the efficiency of the incremental training." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.364, + 0.21, + 0.381 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.389, + 0.295, + 0.405 + ], + "angle": 0, + "content": "5.1. Implementation details." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.412, + 0.473, + 0.61 + ], + "angle": 0, + "content": "We implemented our system using PyTorch. For the differentiable rendering part, we utilized [13] for depth estimation. To avoid the occurrence of black holes, we referred to the implementation in [18] to generate surrounding views. For the text-guided image generation, we use the publicly available stable diffusion code from Diffusers [58]. For the multi-view consistency image generation, we refer to the implementation of T2I-Adapter [39] to inject the depth feature conditions. In the progressive NeRF reconstruction part, we refer to the tri-planar implementation in [6]. We conducted all experiments using 4 NVIDIA RTX A100 GPUs for training and inference. More details can be found in our supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.62, + 0.264, + 0.635 + ], + "angle": 0, + "content": "5.2. Evaluation metrics." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.644, + 0.47, + 0.81 + ], + "angle": 0, + "content": "Image quality. We evaluate the quality of our generated images using CLIP Score (CS), Inception Score (IS), Blind/Referenceless Image Spatial Quality Evaluator (BRISQUE) [36] and Natural Image Quality Evaluator (NQIE) [37]. The Inception Score is based on the diversity and predictability of the generated images. CLIP Score uses a pre-trained CLIP model [43] to measure the similarity between text and images. Note that existing visual quality metrics such as FID cannot be used since the scenes generated by text-to-3D approaches do not exhibit the same underlying data distribution." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Multiview Consistency. Given a sequence of rendered images, we evaluate the multi-view consistency of our generated scene using Camera Error (CE), Depth Error (DE), and flow-warping error (FE) metrics. Motivated by [10, 12], we use COLMAP [50], a reliable SfM technique, to compute the camera trajectory and the sparse 3D point cloud. CE" + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.089, + 0.892, + 0.174 + ], + "angle": 0, + "content": "
Method3D Representation3D ConsistencyVisual Quality
DE↓CE↓SFM rate↑CS↑BRISQUE↓NIQE↓IS↑
Inf-Zero [26]--1.1890.38-21.435.852.34
3DP [52]LDI&Mesh0.420.9650.47-29.955.841.75
PixelSynth [47]Point Cloud0.360.7320.52-36.744.981.28
ProlificDreamer [62]NeRF---23.4127.976.751.21
Text2Room [16]Mesh0.240.4260.6328.1528.375.462.19
Scenescape [12]Mesh0.180.3940.7628.8424.544.782.23
OursNeRF0.130.1760.8929.9723.644.662.62
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.179, + 0.894, + 0.248 + ], + "angle": 0, + "content": "Table 1. Comparison with text-to-scene methods. We compare our approach with two categories of approaches, i.e., pure text-driven 3D generation and text-to-image generation followed by 3D scene generation. Metrics on 3D consistency and visual quality are illustrated." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.256, + 0.892, + 0.322 + ], + "angle": 0, + "content": "
MethodFE↓CS↑BRISQUE↓NIQE↓IS↑
VideoFusion [32]0.03923.5427.395.942.21
GEN-2 [49]0.03227.5425.655.242.38
Ours0.02829.9523.534.702.69
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.325, + 0.892, + 0.354 + ], + "angle": 0, + "content": "Table 2. Comparison with text-to-video methods. Metrics on flow warping error (FE) and visual quality are illustrated." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.361, + 0.891, + 0.433 + ], + "angle": 0, + "content": "
MethodCS↑BRISQUE↓NIQE↓IS↑
Text2Light [20]26.1649.266.152.54
MVDiffusion [42]27.2531.545.472.76
Ours28.1224.154.962.79
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.437, + 0.892, + 0.48 + ], + "angle": 0, + "content": "Table 3. Comparison with text-to-panorama methods. We compare our method with recent text-driven 3D generation methods [9, 55]. Metrics on visual quality are illustrated." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.487, + 0.892, + 0.557 + ], + "angle": 0, + "content": "
MethodDE↓CE↓SfM rate↑CS↑BRISQUE↓NIQE↓
Full Model0.130.1760.8929.9726.186.54
W/o UR0.460.7640.4122.7127.955.81
W/o GRM0.590.9810.4622.1229.645.75
W/o CR0.190.2540.7828.1427.166.12
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.561, + 0.892, + 0.604 + ], + "angle": 0, + "content": "Table 4. Ablations. For brevity, we use UR, GRM, CR to denote Unified Representation, Generative Refinement Model and Consistency Regularization, respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.609, + 0.892, + 0.701 + ], + "angle": 0, + "content": "is computed by comparing the difference between the predicted trajectory and the given trajectory, and DE is computed by comparing the difference between the sparse depth map obtained by COLMAP and the estimated depth map. In addition, to account for temporal consistency, we follow [23] and use RAFT [56] to compute FE." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.71, + 0.64, + 0.727 + ], + "angle": 0, + "content": "5.3. Comparisons" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.734, + 0.894, + 0.809 + ], + "angle": 0, + "content": "Baselines. Since there are only a few baselines directly related to our approach, we also take into account some methods with similar capabilities and construct their variants for comparison. Specifically, the following three categories of methods are included:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.81, + 0.894, + 0.901 + ], + "angle": 0, + "content": "- Text-to-Scene. There exist techniques [12, 16] that generate 3D meshes iteratively by employing warping and inpainting processes, allowing for direct comparisons with our proposed methods. Moreover, image-guided 3D generation methods [24, 26, 47] are also available, wherein initial images can be produced using a T2I model. Subse" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.926, + 0.52, + 0.938 + ], + "angle": 0, + "content": "10175" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.1, + 0.098, + 0.201, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.099, + 0.314, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.324, + 0.099, + 0.425, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.435, + 0.099, + 0.536, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.099, + 0.648, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.659, + 0.099, + 0.76, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.771, + 0.099, + 0.871, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.198, + 0.177, + 0.777, + 0.19 + ], + "angle": 0, + "content": "POV, A versatile room with a sofa as the centerpiece, a bookshelf, a lamp, a desk TV, masterpiece" + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.19, + 0.201, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.19, + 0.313, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.324, + 0.19, + 0.425, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.435, + 0.19, + 0.535, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.19, + 0.647, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.659, + 0.19, + 0.759, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.771, + 0.19, + 0.871, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.277, + 0.2, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.277, + 0.313, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.277, + 0.424, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.434, + 0.277, + 0.535, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.277, + 0.648, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.659, + 0.277, + 0.759, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.771, + 0.277, + 0.871, + 0.353 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.265, + 0.355, + 0.71, + 0.367 + ], + "angle": 0, + "content": "POV, walking through a palace in fantasy style, master piece, indoor scene" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.371, + 0.892, + 0.4 + ], + "angle": 0, + "content": "Figure 4. Quantitative Results. From our results, it can be seen that our approach produces high-fidelity scenes with stable 3D consistency in indoor scenes, outdoor scenes, and unreal-style scenes. More high-resolution results can be found in the supplementary material." + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.412, + 0.479, + 0.445 + ], + "angle": 0, + "content": "This kitchen is a charming blend of rustic and modern, featuring a large reclaimed wood island with marble countertop, a sink surrounded by cabinets. The left of the island, a stainless-steel refrigerator stands tall. The of the sink, built-in wooden cabinets painted in a muted." + }, + { + "type": "image", + "bbox": [ + 0.093, + 0.447, + 0.472, + 0.652 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.668, + 0.47, + 0.71 + ], + "angle": 0, + "content": "Figure 5. Comparison with text-to-panorama methods. It can be seen that although our method is not trained on panoramic data, it can also generate multiple views with cross-view consistency." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.72, + 0.469, + 0.78 + ], + "angle": 0, + "content": "sequently, their pipeline can be used to generate 3D scenes, enabling a comparison against our approach. We comprehensively evaluate these methods based on the previously introduced 3D consistency and visual quality metrics." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.781, + 0.469, + 0.87 + ], + "angle": 0, + "content": "- Text-to-Video. Some recent text-driven video generation methods [32, 49] can also generate similar 3D scene walkthrough videos. Since it is not supported to explicitly control the camera motion in the video generation methods, we only evaluated them in terms of visual quality and temporal consistency." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "- Text-to-Panorama. This task generates perspective images covering the panoramic field of view, which is chal" + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.781, + 0.469, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.415, + 0.887, + 0.639 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.656, + 0.892, + 0.698 + ], + "angle": 0, + "content": "Figure 6. Comparison with text-to-video methods. Blur artifacts and temporally inconsistent frames occur in the text-to-video methods because of the lack of global 3D representation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.705, + 0.892, + 0.748 + ], + "angle": 0, + "content": "lenging to ensure consistency in the overlapping regions. We have selected two related methods [9, 55] for comparisons." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Comparison to Text-to-Scene Methods. To generate the scenes, we use a set of test-specific prompts covering descriptions of indoor, outdoor and unreal scenes. Each prompt generates an image sequence of 100 frames, and for a fair comparison, we set a fixed random seed. After that, we compute the metrics proposed in Sec. 5.2 on the generated image sequences and evaluate the effectiveness of the method. As shown in Tab. 1, our method outperforms the mesh-based iterative generation methods in several metrics, especially for outdoor scenes. The quality of" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.927, + 0.52, + 0.938 + ], + "angle": 0, + "content": "10176" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.104, + 0.095, + 0.262, + 0.127 + ], + "angle": 0, + "content": "POV, walkthrough a damp, stone corridor, beautiful photo, masterpiece, indoor scene" + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.128, + 0.256, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.116, + 0.227, + 0.231, + 0.238 + ], + "angle": 0, + "content": "(a) Extracted Mesh" + }, + { + "type": "image_caption", + "bbox": [ + 0.276, + 0.095, + 0.442, + 0.128 + ], + "angle": 0, + "content": "Walkthrough a road, trees, beautiful photo, best quality, masterpiece, indoor scene" + }, + { + "type": "image", + "bbox": [ + 0.268, + 0.13, + 0.454, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.314, + 0.227, + 0.407, + 0.239 + ], + "angle": 0, + "content": "(b) Point Cloud" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.239, + 0.468, + 0.307 + ], + "angle": 0, + "content": "Figure 7. Reconstructed 3D Results. (a) The 3D mesh extracted by marching cube algorithm, and (b) the point cloud obtained after the reconstruction using COLMAP [31]. Our reconstruction results show that our methods can generate scenes with satisfactory 3D consistency." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.31, + 0.468, + 0.445 + ], + "angle": 0, + "content": "their generation results relies heavily on the generative and geometric prior and degrades over time due to error accumulation. In addition, their use of a mesh to represent the scene makes it difficult to represent intense depth discontinuities, which are common in outdoor scenes. Our method, on the other hand, adopts hybrid NeRF as the scene representation, which can cope with complex scenes, and our rectification mechanism can mitigate the effect of accumulated errors caused by inaccurate prior signals." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.446, + 0.468, + 0.626 + ], + "angle": 0, + "content": "Comparison to Text-to-Video Methods. For comparison with the text-to-video model, we used the same collection of prompts as input to the model and generated 1,200 video clips. We used the same metrics to evaluate the 3D consistency and visual quality of the videos generated by the T2V model and our rendered videos. As shown in Tab. 2, our method significantly outperforms the T2V model on all metrics, proving the effectiveness of our method. The T2V model learns geometry and appearance prior by training on a large video dataset, but it lacks a unified 3D representation, making it difficult to ensure multi-view consistency of the generated content, as can be observed Fig. 6." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.627, + 0.468, + 0.746 + ], + "angle": 0, + "content": "Comparison to Text-to-Panorama Methods. We evaluate the methods [9, 55] on visual quality. Tab. 3 and Fig. 5 present the quantitative and qualitative evaluations, respectively. From the results, it can be seen that the results of previous methods can be inconsistent at the left and right boundaries, while our method, although not specifically designed for panorama generation, produces multiple views with cross-view consistency." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.748, + 0.468, + 0.837 + ], + "angle": 0, + "content": "3D Results. In Fig. 7, we show the 3D results reconstructed by our method. The 3D mesh is extracted by the marching cube algorithm [31]. Additionally, we can reconstruct high-quality point clouds using colmap [50] by inputting the rendered image collection, which further demonstrates the superior 3D consistency of the generated view results." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.848, + 0.229, + 0.863 + ], + "angle": 0, + "content": "5.4. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.468, + 0.899 + ], + "angle": 0, + "content": "To further analyze the proposed methodology, we performed several ablation studies to evaluate the effectiveness" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.891, + 0.121 + ], + "angle": 0, + "content": "of each module. More ablation studies can be found in our supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.122, + 0.892, + 0.227 + ], + "angle": 0, + "content": "Effectiveness of Unified Representations. To validate our necessity to construct a unified 3D representation, we remove it from our pipeline. At this time, our approach degenerates to the previous paradigm of warping-inpainting. As shown in Tab. 4, the quality of the generated scenes degrades in DE and CE metrics due to the lack of global 3D consistency constraints." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.228, + 0.892, + 0.424 + ], + "angle": 0, + "content": "Effectiveness of Generative Refinement. To validate the effectiveness of our proposed generative refinement, we ablate the modules in our approach, whereby the novel view obtained through volume rendering will be updated directly into the supporting database for subsequent incremental training. The results in Tab. 4 show that this can lead to a significant degradation in the quality of the generated scene. We argue that the reason for this is that the quality of novel views generated by NeRF training on sparse views tends to be inferior, with notable blurring and artifacts. Therefore, adding this data for optimizing 3D scenes would lead to continuous degradation of the quality of the generated scenes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.426, + 0.892, + 0.591 + ], + "angle": 0, + "content": "Effectiveness of Consistency Regularization. To verify the validity of our regularization loss, we ablate this loss and generate scenes to compute the relevant metrics. As shown in Tab. 4, adding this loss further improves the 3D consistency of the generated scenes. Though we explicitly inject 3D information into the refining process, its output still shows some inconsistent results in several scenes. Therefore, to further improve the quality of the generated new views, we perform test-time optimization through this regularization term to constrain the consistency between local and global representations." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.607, + 0.619, + 0.623 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.633, + 0.892, + 0.799 + ], + "angle": 0, + "content": "This paper presents a new framework, which employs the tri-planar feature-based neural radiation field as a unified 3D representation and provides a unified solution for text-driven indoor and outdoor scene generation and the output supports navigation with arbitrary camera trajectories. Our method fine-tunes a scene-adapted diffusion model to correct the generated new content to mitigate the effect of cumulative errors while synthesizing extrapolated content. Experimental results show that our method can produce results with better visual quality and 3D consistency compared to previous methods." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.815, + 0.687, + 0.831 + ], + "angle": 0, + "content": "7. Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.84, + 0.892, + 0.9 + ], + "angle": 0, + "content": "This research was supported by Zhejiang Provincial Natural Science Foundation of China under Grant No. LD24F020007, National Natural Science Foundation of China (No. 62202199), and NSFC (no. 62302491)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.926, + 0.519, + 0.938 + ], + "angle": 0, + "content": "10177" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Yogesh Balaji, Martin Renqiang Min, Bing Bai, Rama Chellappa, and Hans Peter Graf. Conditional GAN with discriminative filter generation for text-to-video synthesis. In Proceedings of the International Joint Conference on Artificial Intelligence, pages 1995-2001, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.186, + 0.472, + 0.24 + ], + "angle": 0, + "content": "[2] Shariq Farooq Bhat, Reiner Birkl, Diana Wofk, Peter Wonka, and Matthias Müller. Zoedepth: Zero-shot transfer by combining relative and metric depth. arXiv preprint arXiv:2302.12288, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.242, + 0.471, + 0.324 + ], + "angle": 0, + "content": "[3] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22563-22575, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.326, + 0.471, + 0.394 + ], + "angle": 0, + "content": "[4] Shengqu Cai, Eric Ryan Chan, Songyou Peng, Mohamad Shahbazi, Anton Obukhov, Luc Van Gool, and Gordon Wetzstein. Diffdreamer: Consistent single-view perpetual view generation with conditional diffusion models. arXiv preprint arXiv:2211.12131, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.396, + 0.471, + 0.465 + ], + "angle": 0, + "content": "[5] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5799-5809, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.467, + 0.471, + 0.548 + ], + "angle": 0, + "content": "[6] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.55, + 0.471, + 0.619 + ], + "angle": 0, + "content": "[7] Eric R Chan, Koki Nagano, Matthew A Chan, Alexander W Bergman, Jeong Joon Park, Axel Levy, Miika Aittala, Shalini De Mello, Tero Karras, and Gordon Wetzstein. Generative novel view synthesis with 3d-aware diffusion models. arXiv preprint arXiv:2304.02602, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.621, + 0.471, + 0.687 + ], + "angle": 0, + "content": "[8] Rui Chen, Yongwei Chen, Ningxin Jiao, and Kui Jia. Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.69, + 0.471, + 0.731 + ], + "angle": 0, + "content": "[9] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG), 41(6):1-16, 2022. 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.732, + 0.47, + 0.773 + ], + "angle": 0, + "content": "[10] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Scenedreamer: Unbounded 3d scene generation from 2d image collections. arXiv preprint arXiv:2302.01330, 2023. 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.471, + 0.815 + ], + "angle": 0, + "content": "[11] Blender Online Community. Blender - a 3D modelling and rendering package. Blender Foundation, Stichting Blender Foundation, Amsterdam, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.471, + 0.859 + ], + "angle": 0, + "content": "[12] Rafail Fridman, Amit Abecasis, Yoni Kasten, and Tali Dekel. Scenescape: Text-driven consistent scene generation. arXiv preprint arXiv:2302.01133, 2023. 1, 2, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.471, + 0.901 + ], + "angle": 0, + "content": "[13] Vitor Guizilini, Igor Vasiljevic, Dian Chen, Rares Ambrus, and Adrien Gaidon. Towards zero-shot scale-aware monococular depth estimation. In Proceedings of the IEEE/CVF Inter-" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.119 + ], + "angle": 0, + "content": "national Conference on Computer Vision, pages 9233-9243, 2023.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.191 + ], + "angle": 0, + "content": "[14] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsenko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.193, + 0.892, + 0.234 + ], + "angle": 0, + "content": "[15] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. arXiv preprint arXiv:2204.03458, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.236, + 0.895, + 0.305 + ], + "angle": 0, + "content": "[16] Lukas Hollein, Ang Cao, Andrew Owens, Justin Johnson, and Matthias Nießner. Text2room: Extracting textured 3d meshes from 2d text-to-image models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7909-7920, 2023. 1, 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.307, + 0.892, + 0.36 + ], + "angle": 0, + "content": "[17] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.363, + 0.892, + 0.404 + ], + "angle": 0, + "content": "[18] Max Jaderberg, Karen Simonyan, Andrew Zisserman, et al. Spatial transformer networks. Advances in neural information processing systems, 28, 2015. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.406, + 0.892, + 0.475 + ], + "angle": 0, + "content": "[19] Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.477, + 0.892, + 0.532 + ], + "angle": 0, + "content": "[20] Nasir Mohammad Khalid, Tianhao Xie, Eugene Belilovsky, and Tiberiu Popa. CLIP-mesh: Generating textured meshes from text using pretrained image-text models. In SIGGRAPH Asia 2022 Conference Papers. ACM, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.534, + 0.892, + 0.601 + ], + "angle": 0, + "content": "[21] Jing Yu Koh, Honglak Lee, Yinfei Yang, Jason Baldridge, and Peter Anderson. Pathdreamer: A world model for indoor navigation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14738-14748, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.604, + 0.892, + 0.673 + ], + "angle": 0, + "content": "[22] Jing Yu Koh, Harsh Agrawal, Dhruv Batra, Richard Tucker, Austin Waters, Honglak Lee, Yinfei Yang, Jason Baldridge, and Peter Anderson. Simple and effective synthesis of indoor 3d scenes. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1169-1178, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.676, + 0.892, + 0.743 + ], + "angle": 0, + "content": "[23] Wei-Sheng Lai, Jia-Bin Huang, Oliver Wang, Eli Shechtman, Ersin Yumer, and Ming-Hsuan Yang. Learning blind video temporal consistency. In Proceedings of the European conference on computer vision (ECCV), pages 170-185, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.746, + 0.892, + 0.814 + ], + "angle": 0, + "content": "[24] Xingyi Li, Zhiguo Cao, Huiqiang Sun, Jianming Zhang, Ke Xian, and Guosheng Lin. 3d cinematography from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4595-4605, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.817, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[25] Yitong Li, Martin Min, Dinghan Shen, David Carlson, and Lawrence Carin. Video generation from text. In Proceedings of the AAAI conference on artificial intelligence, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[26] Zhengqi Li, Qianqian Wang, Noah Snavely, and Angjoo Kanazawa. Infinitenature-zero: Learning perpetual view generation of natural scenes from single images. In Proceed-" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.895, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.927, + 0.52, + 0.938 + ], + "angle": 0, + "content": "10178" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.093, + 0.469, + 0.12 + ], + "angle": 0, + "content": "ings of the European Conference on Computer Vision, pages 515-534. Springer, 2022. 2, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.472, + 0.205 + ], + "angle": 0, + "content": "[27] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Yu Qiao, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from multi-camera images via spatiotemporal transformers. In Proceedings of the European Conference on Computer Vision, pages 1-18. Springer, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.207, + 0.471, + 0.29 + ], + "angle": 0, + "content": "[28] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 300–309, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.292, + 0.471, + 0.373 + ], + "angle": 0, + "content": "[29] Andrew Liu, Richard Tucker, Varun Jampani, Ameesh Makadia, Noah Snavely, and Angjoo Kanazawa. Infinite nature: Perpetual view generation of natural scenes from a single image. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14458-14467, 2021. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.376, + 0.469, + 0.431 + ], + "angle": 0, + "content": "[30] Xinhang Liu, Shiu-hong Kao, Jiaben Chen, Yu-Wing Tai, and Chi-Keung Tang. Deceptive-nerf: Enhancing nerf reconstruction using pseudo-observations from diffusion models. arXiv preprint arXiv:2305.15171, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.433, + 0.469, + 0.487 + ], + "angle": 0, + "content": "[31] William E Lorensen and Harvey E Cline. Marching cubes: A high resolution 3d surface construction algorithm. In Semin al graphics: pioneering efforts that shaped the field, pages 347-353. 1998. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.49, + 0.471, + 0.572 + ], + "angle": 0, + "content": "[32] Zhengxiong Luo, Dayou Chen, Yingya Zhang, Yan Huang, Liang Wang, Yujun Shen, Deli Zhao, Jingren Zhou, and Tieniu Tan. Videofusion: Decomposed diffusion models for high-quality video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10209-10218, 2023. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.575, + 0.469, + 0.63 + ], + "angle": 0, + "content": "[33] Tanya Marwah, Gaurav Mittal, and Vineeth N Balasubramanian. Attentive semantic video generation using captions. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1426-1434, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.632, + 0.469, + 0.685 + ], + "angle": 0, + "content": "[34] Gal Metzer, Elad Richardson, Or Patashnik, Raja Giryes, and Daniel Cohen-Or. Latent-nerf for shape-guided generation of 3d shapes and textures. arXiv preprint arXiv:2211.07600, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.688, + 0.469, + 0.756 + ], + "angle": 0, + "content": "[35] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.759, + 0.469, + 0.813 + ], + "angle": 0, + "content": "[36] Anish Mittal, Anush Krishna Moorthy, and Alan Conrad Bovik. No-reference image quality assessment in the spatial domain. IEEE Transactions on image processing, 21(12): 4695-4708, 2012. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.469, + 0.857 + ], + "angle": 0, + "content": "[37] Anish Mittal, Rajiv Soundararajan, and Alan C Bovik. Making a “completely blind” image quality analyzer. IEEE Signal processing letters, 20(3):209-212, 2012. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[38] Gaurav Mittal, Tanya Marwah, and Vineeth N Balasubramanian. Sync-draw: Automatic video generation using deep recurrent attentive architectures. In Proceedings of the 25th" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "ACM international conference on Multimedia, pages 1096-1104, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.892, + 0.177 + ], + "angle": 0, + "content": "[39] Chong Mou, Xintao Wang, Liangbin Xie, Jian Zhang, Zhonggang Qi, Ying Shan, and Xiaohu Qie. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453, 2023.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.247 + ], + "angle": 0, + "content": "[40] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.892, + 0.305 + ], + "angle": 0, + "content": "[41] Yingwei Pan, Zhaofan Qiu, Ting Yao, Houqiang Li, and Tao Mei. To create what you tell: Generating videos from captions. In Proceedings of the 25th ACM international conference on Multimedia, pages 1789-1798, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.307, + 0.892, + 0.347 + ], + "angle": 0, + "content": "[42] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv, 2022. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.349, + 0.892, + 0.431 + ], + "angle": 0, + "content": "[43] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.434, + 0.892, + 0.531 + ], + "angle": 0, + "content": "[44] Alexander Raistrick, Lahav Lipson, Zeyu Ma, Lingjie Mei, Mingzhe Wang, Yiming Zuo, Karhan Kayan, Hongyu Wen, Beining Han, Yihan Wang, Alejandro Newell, Hei Law, Ankit Goyal, Kaiyu Yang, and Jia Deng. Infinite photorealistic worlds using procedural generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12630-12641. IEEE, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.532, + 0.892, + 0.601 + ], + "angle": 0, + "content": "[45] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE transactions on pattern analysis and machine intelligence, 44(3):1623-1637, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.603, + 0.892, + 0.657 + ], + "angle": 0, + "content": "[46] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12179-12188, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.66, + 0.892, + 0.715 + ], + "angle": 0, + "content": "[47] Chris Rockwell, David F Fouhey, and Justin Johnson. Pixelsynth: Generating a 3d-consistent experience from a single image. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14104-14113, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.717, + 0.892, + 0.785 + ], + "angle": 0, + "content": "[48] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684–10695, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.788, + 0.892, + 0.814 + ], + "angle": 0, + "content": "[49] RunWay. Gen-2: The next step forward for generative ai, 2023. https://research.runwayml.com/gen2.6,7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.817, + 0.892, + 0.871 + ], + "angle": 0, + "content": "[50] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2016. 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[51] Liao Shen, Xingyi Li, Huiqiang Sun, Juwen Peng, Ke Xian, Zhiguo Cao, and Guosheng Lin. Make-it-4d: Synthesizing" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.927, + 0.52, + 0.938 + ], + "angle": 0, + "content": "10179" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "a consistent long-term dynamic scene video from a single image. arXiv preprint arXiv:2308.10257, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.123, + 0.47, + 0.191 + ], + "angle": 0, + "content": "[52] Meng-Li Shih, Shih-Yang Su, Johannes Kopf, and Jia-Bin Huang. 3d photography using context-aware layered depth inpainting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8028-8038, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.195, + 0.469, + 0.263 + ], + "angle": 0, + "content": "[53] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.267, + 0.469, + 0.322 + ], + "angle": 0, + "content": "[54] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew J Davison. imap: Implicit mapping and positioning in real-time. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6229-6238, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.325, + 0.469, + 0.38 + ], + "angle": 0, + "content": "[55] Shitao Tang, Fuyang Zhang, Jiacheng Chen, Peng Wang, and Yasutaka Furukawa. Mvdiffusion: Enabling holistic multiview image generation with correspondence-aware diffusion. arXiv preprint arXiv:2307.01097, 2023. 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.383, + 0.469, + 0.436 + ], + "angle": 0, + "content": "[56] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Proceedings of the European Conference on Computer Vision, pages 402-419. Springer, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.442, + 0.47, + 0.511 + ], + "angle": 0, + "content": "[57] Hung-Yu Tseng, Qinbo Li, Changil Kim, Suhib Alsisan, Jia-Bin Huang, and Johannes Kopf. Consistent view synthesis with pose-guided diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16773-16783, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.513, + 0.469, + 0.581 + ], + "angle": 0, + "content": "[58] Patrick von Platen, Suraj Patil, Anton Lozhkov, Pedro Cuenca, Nathan Lambert, Kashif Rasul, Mishig Davaadorj, and Thomas Wolf. Diffusers: State-of-the-art diffusion models. https://github.com/huggingface/diffusers, 2022.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.585, + 0.469, + 0.64 + ], + "angle": 0, + "content": "[59] Can Wang, Ruixiang Jiang, Mengei Chai, Mingming He, Dongdong Chen, and Jing Liao. Nerf-art: Text-driven neural radiance fields stylization. arXiv preprint arXiv:2212.08070, 2022.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.644, + 0.469, + 0.697 + ], + "angle": 0, + "content": "[60] Guangcong Wang, Zhaoxi Chen, Chen Change Loy, and Ziwei Liu. Sparsenerf: Distilling depth ranking for few-shot novel view synthesis. arXiv preprint arXiv:2303.16196, 2023.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.702, + 0.469, + 0.77 + ], + "angle": 0, + "content": "[61] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A. Yeh, and Greg Shakhnarovich. Score jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12619-12629, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.774, + 0.469, + 0.828 + ], + "angle": 0, + "content": "[62] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation, 2023. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[63] Olivia Wiles, Georgia Gkioxari, Richard Szeliski, and Justin Johnson. Synsin: End-to-end view synthesis from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7467-7477, 2020. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[64] Chenfei Wu, Lun Huang, Qianxi Zhang, Binyang Li, Lei Ji, Fan Yang, Guillermo Sapiro, and Nan Duan. Godiva: Generating open-domain videos from natural descriptions. arXiv preprint arXiv:2104.14806, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.15, + 0.894, + 0.218 + ], + "angle": 0, + "content": "[65] Chenfei Wu, Jian Liang, Lei Ji, Fan Yang, Yuejian Fang, Daxin Jiang, and Nan Duan. Niwa: Visual synthesis pretraining for neural visual world creation. In Proceedings of the European Conference on Computer Vision, pages 720-736. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.274 + ], + "angle": 0, + "content": "[66] Jingbo Zhang, Xiaoyu Li, Ziyu Wan, Can Wang, and Jing Liao. Text2nerf: Text-driven 3d scene generation with neural radiance fields. arXiv preprint arXiv:2305.11588, 2023. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.277, + 0.892, + 0.345 + ], + "angle": 0, + "content": "[67] Xiaoshuai Zhang, Sai Bi, Kalyan Sunkavalli, Hao Su, and Zexiang Xu. Nerfusion: Fusing radiance fields for large-scale scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5449-5458, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.348, + 0.892, + 0.401 + ], + "angle": 0, + "content": "[68] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.927, + 0.519, + 0.938 + ], + "angle": 0, + "content": "10180" + } + ] +] \ No newline at end of file diff --git a/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/e514998b-c539-47e4-bf66-6c5fccc605eb_origin.pdf b/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/e514998b-c539-47e4-bf66-6c5fccc605eb_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..780acecd11724c8df52d28703547455647738bdd --- /dev/null +++ b/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/e514998b-c539-47e4-bf66-6c5fccc605eb_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8ae6978dd21f8c66494678e0aa959c446312d3e143086b3b9ecf665c9be55a5 +size 5069848 diff --git a/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/full.md b/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3b29e1c8c9b44eb363d51fe654af4000b192d9fb --- /dev/null +++ b/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/full.md @@ -0,0 +1,370 @@ +# 3D-SceneDreamer: Text-Driven 3D-Consistent Scene Generation + +Songchun Zhang $^{1}$ , Yibo Zhang $^{2}$ , Quan Zheng $^{4}$ , Rui Ma $^{2}$ , Wei Hua $^{3}$ , Hujun Bao $^{1}$ , Weiwei Xu $^{1}$ , Changqing Zou $^{1,3*}$ + +1 Zhejiang University 2 Jilin University 3 Zhejiang Lab + +$^{4}$ Institute of Software, Chinese Academy of Sciences + +![](images/d1f7a6621a947ff51cc845b08531a5daef0f8e8b76e07990f17c1d9ed44a2816.jpg) +Figure 1. Text-Driven 3D Scene Generation from text prompts. (a) Given a scene description prompt and an arbitrary 6-degree-of-freedom (6-DOF) camera trajectory, our approach progressively generates the full 3D scene by continuously synthesizing 2D novel views. (b) The limitation of mesh representations [12, 16] and the lack of reasonable rectification mechanisms lead to cumulative errors in outdoor scenes, which are respectively marked with yellow and blue dash line boxes. In contrast, our approach can alleviate the problem by introducing a progressive generation pipeline. + +![](images/c2671e5361e19388a00d9ce464ff5fb6ed9721ceae32b8342a57cb8252f86856.jpg) +Aerial drone shot of a mountain range in the style of cinematic video, shallow depth of field, subject in focus, dynamic movement + +![](images/ab64d1a82f00e61c4b83673cab557366a3a1729acc007f1786ff472b038db194.jpg) + +![](images/ae774f98176f34d7cd4c755c84e4f5ba10f2010ddd93e17d23161bc9c2cb2cbd.jpg) + +![](images/e2353ea3abb63236884dd018a3302097572376aa219cdc0d8890f6864371c6b8.jpg) +(b) Observation + +![](images/7a70c4f0b5ddc98e4973cd2557d36489ffef18bfd57a3c4d789a4549f3104dbe.jpg) + +![](images/0d3c579a780a8e924e302dda7cd864b75facc5bf555d62fca1b294709a7a4285.jpg) + +# Abstract + +Text-driven 3D scene generation techniques have made rapid progress in recent years. Their success is mainly attributed to using existing generative models to iteratively perform image warping and inpainting to generate 3D scenes. However, these methods heavily rely on the outputs of existing models, leading to error accumulation in geometry and appearance that prevent the models from being used in various scenarios (e.g., outdoor and unreal scenarios). To address this limitation, we generatively refine the newly generated local views by querying and aggregating global 3D information, and then progressively generate the 3D scene. Specifically, we employ a tri-plane features-based NeRF as a unified representation of the 3D scene to constrain global 3D consistency, and propose a generative refinement network to synthesize new contents with higher quality by exploiting the natural image prior from 2D diffusion model as well as the global 3D information of the current scene. Our extensive experiments demonstrate that, in comparison to previous methods, our approach supports wide variety of scene generation and arbitrary camera trajectories with improved visual quality and 3D consistency. + +# 1. Introduction + +In recent years, with the growing need for 3D creation tools for metaverse applications, attention to 3D scene generation techniques has increased rapidly. Existing tools [11, 44] usually require professional modeling skills and extensive manual labor, which is time-consuming and inefficient. To facilitate the 3D scene creation and reduce the need for professional skills, 3D scene generation tools should be intuitive and versatile while ensuring sufficient controllability. + +This paper focuses on the specific setting of generating consistent 3D scenes from the input texts that describe the 3D scenes. This problem is highly challenging from several perspectives, including the limitation of available text-3D data pairs and the need for ensuring both semantic and geometric consistency of the generated scenes. To overcome the limited 3D data issue, recent text-to-3D methods [42, 62] have leveraged the powerful pre-trained text-to-image diffusion model [48] as a strong prior to optimize 3D representation. However, their generated scenes often have relatively simpler geometry and lack 3D consistency, because 2D prior diffusion models lack the perception of 3D information. + +![](images/efbee3c07137c1e213a33737d20e908f74c2964a806245f6a0125a2fc3fbb76c.jpg) +Figure 2. Comparison with existing designs. (a) The feedforward approaches use depth-based warping and refinement operations to generate novel views of the scene without a unified representation. (b) The warping-inpainting approaches use mesh as a unified representation and generate the scene through iterative inpainting. (c) We replace the mesh with NeRF as the unified representation and alleviate the cumulative error issue by incorporating a generative refinement model. This allows our framework to support the generation of a wider range of scene types. + +Some recent methods [12, 16] introduce the monocular depth estimation model [45, 46] as a strong geometric prior and follow the warping-inpainting pipeline [26, 29] for progressive 3D scene generation, which partially solves the inconsistency problem. Although these methods can generate realistic scenes with multi-view 3D consistency, they mainly focus on indoor scenes and fail to handle large-scale outdoor scene generation as illustrated in Fig. 1 (b). This can be attributed to two main aspects: (1) Due to the adoption of an explicit 3D mesh as the unified 3D representation, the noise of the depth estimation in the outdoor scene can cause a large stretch of the scene geometry; (2) The lack of an efficient rectification mechanism in the pipeline leads to an accumulation of geometric and appearance errors. + +In this paper, we present a new framework, named 3D-SceneDreamer that provides a unified solution for text-driven 3D consistent indoor and outdoor scene generation. Our approach employs a tri-planar feature-based radiance field as a unified 3D representation instead of 3D mesh, which is advantageous for general scene generation (especially in outdoor scenes) and supports navigating with arbitrary 6-DOF camera trajectories. Afterwards, we model the scene generation process as a progressive optimization of the NeRF representation, while a text-guided and scene-adapted generative novel view synthesis is employed to refine the NeRF optimization. Fig. 2 shows a comparison of our design with existing text-to-scene pipelines. + +Specifically, we first perform scene initialization, which consists of two stages, i.e., generating a supporting database and optimizing the initial scene representation. We first use the input text prompt and the pre-trained diffusion model [48] to generate the initial image as an appearance prior. Then, we use an off-the-shelf depth estimation model [2] + +to provide the geometric prior for the corresponding scene. Inspired by [66], to prevent NeRF from over-fitting for the single view image, we construct a database via differentiable spatial transformation [18] and use it for optimizing the initial NeRF representation of the generated scene. To generate the extrapolated content, we use volume rendering and trilinear interpolation in the novel viewpoints to obtain the initial rendered images and their corresponding feature maps. These outputs are later fed into our 3D-aware generative refinement model, whose output images are subsequently added as new content to the supporting database. Next, in conjunction with the new data, we progressively generate the whole 3D scene by updating our 3D representation through our incremental training strategy. + +Extensive experiments demonstrate that our approach significantly outperforms the state-of-the-art text-driven 3D scene generation method in both visual quality and 3D consistency. To summarize, our technical contributions are as follows: + +- We provide a unified solution for text-driven consistent 3D scene generation that supports both indoor and outdoor scenes as well as allows navigation with arbitrary 6-DOF camera trajectories. +- We propose to use a tri-planar feature-based neural radiance field as a global 3D representation of the scene to generate continuous scene views, which preserves the 3D consistency of the scene, empowered by a progressive optimization strategy. +- We propose a new generative refinement model, which explicitly injects 3D information to refine the coarse view generated by novel view synthesis and then incorporates the new views to refine the NeRF optimization. + +# 2. Related Work + +Text-Driven 3D Content Generation. Recently, motivated by the success of text-to-image models, employing pretrained 2D diffusion models to perform text-to-3D generation has gained significant research attention. Some pioneering works [42, 61] introduce the Score Distillation Sampling (SDS) and utilize 2D diffusion prior to optimize 3D representation. Subsequent works [8, 28, 34, 62] further enhance texture realism and geometric quality. However, they primarily focus on improving object-level 3D content generation rather than large-scale 3D scenes. Recent works [12, 16, 66] have proposed some feasible solutions for 3D scene generation. By utilizing the pre-trained monocular depth model and the inpainting model, they generate the 3D scene progressively based on the input text and camera trajectory. However, due to the underlying 3D representation or optimization scheme, these methods are limited in several aspects. For example, as [12, 16] utilize explicit mesh as 3D representation, it is difficult for them to generate outdoor scenes. Besides, their mesh outputs + +also suffer from fragmented geometry and artifacts due to imprecise depth estimation results. Although Text2NeRF achieves to generate high-quality indoor and outdoor scenes by replacing the meshes with neural radiance fields [35], it can only generate camera-centric scenes. In contrast, our approach not only supports more general 3D scene generation but can also handle arbitrary 6DOF camera trajectories. + +Text-Driven Video Generation. Text-Driven Video Generation aims to create realistic video content based on textual conditions. In the early stages, this task was approached using GAN [1, 25, 41] and VAE [33, 38] generative models, but the results were limited to low-resolution short video clips. Following the significant advancements in text-to-image models, recent text-to-video works extend text-to-image models such as transformer [17, 64, 65] and diffusion model [3, 14, 15, 32, 53, 68] for video generation. These approaches enable the generalization of high-quality and open-vocabulary videos, but require a substantial amount of text-image or text-video pairs of data for training. Text2Video-Zero [19] proposes the first zero-shot text-to-video generation pipeline that does not rely on training or optimization, but their generated videos lack smoothness and 3D consistency. Our method is capable of generating smooth and long videos which are consistent to the scenes described by the input text, without the need for large-scale training data. Furthermore, the utilization of NeRF as the 3D representation enhances the 3D consistency of our videos. + +View Synthesis with Generative Models. Several early stage studies [5, 21, 22, 26, 29, 63] employ GAN to synthesize new viewpoints. However, the training process of GAN is prone to the issue of mode collapse, which limits the diversity of generation results. Diffusion model has been shown its capability to generate diverse and high-quality images and videos. In recent view synthesis works [4, 7, 51, 57], diffusion models have been employed to achieve improved scene generation results over prior works. For example, in Deceptive-NeRF [30], pseudo-observations are synthesized by diffusion models and these observations are further utilized for enhance the NeRF optimization. Closely similar to [30], our method proposes a geometry-aware diffusion refinement model to reduce the artifacts of the input coarse view generated by the initial novel view synthesis. With the 3D information from NeRF features injected to the refinement process, we can achieve globally consistent 3D scene generation. + +# 3. Neural Radiance Fields Revisited + +Neural Radiance Fields (NeRF) [59] is a novel view synthesis technique that has shown impressive results. It represents the specific 3D scene via an implicit function, denoted as $f_{\theta}:(\pmb {x},\pmb {d})\mapsto (\mathbf{c},\sigma)$ , given a spatial location $\mathbf{x}$ and a ray direction $\mathbf{d}$ , where $\theta$ represents the learnable parameters, + +and $\mathbf{c}$ and $\sigma$ are the color and density. To render a novel image, NeRF marches a camera ray $\mathbf{r}(t) = \mathbf{o} + t\mathbf{d}$ starting from the origin $\mathbf{o}$ through each pixel and calculates its color $\hat{\boldsymbol{C}}$ and rendered depth $\hat{\boldsymbol{D}}$ via the volume rendering quadrature, i.e., $\hat{\boldsymbol{C}} (\mathbf{r}) = \sum_{i = 1}^{N}T_{i}\alpha_{i}\mathbf{c}_{i}$ and $\hat{\boldsymbol{D}} (\mathbf{r}) = \sum_{i = 1}^{N}T_{i}\alpha_{i}t_{i}$ , where $T_{i} = \exp \left(-\sum_{j = 1}^{i - 1}\sigma_{j}\delta_{j}\right)$ , $\alpha_{i} = (1 - \exp (-\sigma_{i}\delta_{i}))$ , and $\delta_{k} = t_{k + 1} - t_{k}$ indicates the distance between two point samples. Typically, stratified sampling is used to select the point samples $\{t_i\}_{i = 1}^N$ between $t_n$ and $t_f$ , which denote the near and far planes of the camera. When multi-view images are available, $\theta$ can be easily optimized with the MSE loss: + +$$ +\mathcal {L} _ {\theta} = \sum_ {\boldsymbol {r} \in \mathcal {R}} \left\| \hat {\boldsymbol {C}} (\boldsymbol {r}) - \boldsymbol {C} (\boldsymbol {r}) \right\| _ {2} ^ {2} \tag {1} +$$ + +where $\mathcal{R}$ is the collection of rays, and $C$ indicates the ground truth color. + +# 4. Methods + +# 4.1. Overview + +Given a description of the target scene a the input text prompt $\mathbf{p}$ , and a pre-defined camera trajectory denoted by $\{\mathbf{T}_i\}_{i=1}^N$ , our goal is to generate a 3D scene along the camera trajectory with the multiview 3D consistency. + +The overview of the proposed model is illustrated in Fig. 3. We first introduce the acquisition of appearance and structural priors in Sec. 4.2, which serve as the scene initialization. The formulation of Unified Scene Representation and its optimization with the former priors are presented in Sec. 4.3. To synthesize new content while maintaining the multiview consistency, we propose a geometry-aware refinement model in Sec. 4.4. Finally, the full online scene generation process is presented in Sec. 4.5. + +# 4.2. Scene Context Initialization + +Given the input textual prompt $\mathbf{p}$ , we first utilize a pretrained stable diffusion model to generate an initial 2D image $\mathbf{I}_0$ , which serves as an appearance prior for the scene. Then, we feed this image into the off-the-shelf depth estimation model [2], and take the output as a geometric prior for the target scene, denoted as $\mathbf{D}_0$ . Inspired by [66], we construct a supporting database $S = \{((\mathbf{D}_i,\mathbf{I}_i,\mathbf{T}_i)\}_{i = 1}^N$ via differentiable spatial transformation [18] and image inpainting [16] techniques, where $N$ denotes the number of initial viewpoints. This database provides additional views and depth information, which could prevent the model from overfitting to the initial view. With the initial supporting database, we can initialize the global 3D representation. The data generated by our method will be continuously appended to this supporting database for continuous optimization of the global 3D representation. More details are provided in our supplemental materials. + +![](images/a184d31afa8507f0e45a91c95f11f4d804eadb76155b7671d4dd8922b33d832f.jpg) +Figure 3. Overview of our pipeline. (a) Scene Context Initialization contains a supporting database to provide novel viewpoint data for progressive generation. (b) Unified 3D Representation provides a unified representation for the generated scene, which allows our approach to accomplish more general scene generation and to hold the 3D consistency at the same time. (c) 3D-Aware Generative Refinement alleviates the cumulative error issue during long-term extrapolation by exploiting large-scale natural images prior to generatively refine the synthesized novel viewpoint image. The consistency regularization module is used for test-time optimization. + +# 4.3. Unified Scene Representation + +Though previous methods [26, 29] have achieved novel view generations via differentiable rendering-based frame-to-frame warping, there are still drawbacks: (1) the global 3D consistency is not ensured, (2) cumulative errors occur in long-term generation, (3) complex scenes may lead to failure. To tackling above issues, we propose a tri-planar feature-based NeRF as the unified representation. Compared with previous methods [12, 16, 26, 29], our approach constrains the global 3D consistency while handling the scene generation with complex appearances and geometries. + +Tri-planar Feature Representation. For constructing the feature tri-planes $\mathbf{M} = \{\mathbf{M}_{xy},\mathbf{M}_{yz},\mathbf{M}_{xz}\} \in \mathbb{R}^{3\times S\times S\times D}$ from the input images, where $S$ is the spatial resolution and $D$ is the feature dimension, we first extract 2D image features from supporting views using the pre-trained ViT from DINoV2 [40] because of its strong capability in modeling cross-view correlations. We denote the extracted feature corresponding to image $\mathbf{I}_i$ as $\mathbf{F}_i$ , and the feature set obtained from all input views is denoted as $\{\mathbf{F}_i\}_{i = 1}^N$ . To lift the local 2D feature maps into the unified 3D space, similar to the previous work [67], we back-project the extracted local image features $\mathbf{F}$ into a 3D feature volume $\mathbf{V}$ along each camera ray. To avoid the cubic computational complexity of volumes, we construct a tri-planar representation by projecting the 3D feature volume $\mathbf{V}$ into its respective plane via three separate encoders. This representation reduces the complexity from feature dimensionality reduction, but with equivalent information compared to purely 2D feature representations (e.g., BEV representations [10, 27]). + +Implicit Radiance Field Decoder. Based on the constructed tri-planar representation $\mathbf{M}$ , we can reconstruct the images with target poses via our implicit radiance field decoder module $\Psi = \{f_{g}, f_{c}\}$ , where $f_{g}$ and $f_{c}$ indicate the geometric feature decoder and appearance decoder. Given a 3D point $p = [i, j, k]$ and a view direction $d$ , we orthogonally project $p$ to each feature plane in $\mathbf{M}$ with bilinear sampling to obtain the conditional feature $\mathbf{M}_p = [\mathbf{M}_{xy}(i, j), \mathbf{M}_{yz}(j, k), \mathbf{M}_{xz}(i, k)]$ . We feed $\mathbf{M}_p$ into the geometric feature decoder to obtain the predicted density $\sigma$ and the geometric feature vector $\mathbf{g}$ , after which we further decode its color $c$ : + +$$ +(\sigma , \boldsymbol {g}) = f _ {g} (\gamma (\boldsymbol {x}), \mathbf {M} _ {p}) +$$ + +$$ +\boldsymbol {c} = f _ {c} (\gamma (\boldsymbol {x}), \gamma (\boldsymbol {d}), \boldsymbol {g}, \mathbf {M} _ {p}) \tag {2} +$$ + +where $\gamma (\cdot)$ indicates the positional encoding function. Then we can calculate the pixel color via an approximation of the volume rendering integral mentioned in Sec. 3. + +Training Objective. To optimize our 3D representation, we leverage the ground truth colors from the target image as the supervisory signal. Additionally, in the setting with sparse input views, we employ the estimated dense depth map to enhance the model's learning of low-frequency geometric information and prevent overfitting to appearance details. Our optimization objective is as follows: + +$$ +\mathcal {L} = \sum_ {\boldsymbol {r} \in \mathcal {R}} \left(\mathcal {L} _ {\text {p h o t o}} (\boldsymbol {r}) + \lambda \mathcal {L} _ {\text {d e p t h}} (\boldsymbol {r})\right) \tag {3} +$$ + +where $\mathcal{L}_{\text{photo}}(\boldsymbol{r}) = \left\| \hat{\boldsymbol{C}}(\boldsymbol{r}) - \boldsymbol{C}(\boldsymbol{r}) \right\|^2$ , $\mathcal{L}_{\text{depth}}(\boldsymbol{r}) = \left\| \hat{\mathbf{D}}_{\mathbf{r}}^*(\boldsymbol{r}) - \mathbf{D}^*(\boldsymbol{r}) \right\|^2$ , $\mathcal{R}$ denotes the collection of rays gen- + +erated from the images in the supporting database, $\lambda$ indicates the balance weight of the depth loss, and $\mathbf{D}^{*}(\boldsymbol{r})$ and $\hat{\mathbf{D}}_{\mathbf{r}}^{*}(\boldsymbol{r})$ denote the rendered depth and the depth obtained from the pre-trained depth estimation model. Since monocular depths are not scale- and shift-invariant, both depths are normalized per frame. + +# 4.4. 3D-Aware Generative Refinement + +Given a sequence of poses and an initial viewpoint, previous methods [12, 16, 66] usually generate novel views by the warping-inpainting pipeline. Though these methods have achieved promising results, they suffer from two issues: (1) The lack of rectification mechanisms in these methods can lead to error accumulation. (2) The lack of explicit 3D information during the inpainting process of these methods can lead to insufficient 3D consistency. Therefore, we propose a 3D-Aware Generative Refinement model to alleviate the above issues. On the one hand, we introduce an efficient refinement mechanism to reduce the cumulative error in the novel view generation. On the other hand, we explicitly inject 3D information during the process of generating novel views to enhance 3D consistency. We will describe the model design below. + +Model Design. Given a novel viewpoint with camera pose $\mathbf{T}_i$ , the tri-planar features $\mathbf{M}$ , we can obtain the rendered image $\mathbf{I}_r$ , rendered depth $\mathbf{D}_r$ and the corresponding 2D feature map $\mathbf{F}_r$ via the radiance field decoder module $\Psi$ and volume rendering. For convenience, we model the whole process with a mapping operator $\mathcal{F}_{ren}:\{\mathbf{T}_i,\mathbf{M}\} \mapsto \{\mathbf{I}_r,\mathbf{F}_r,\mathbf{D}_r\}$ . Note that the feature map is computed similarly to the color and depth, i.e., by numerical quadrature, and can be formulated as + +$$ +\mathbf {F} _ {r} (\mathbf {r}) = \sum_ {i = 1} ^ {N} T _ {i} \left(1 - \exp \left(- \sigma_ {i} \delta_ {i}\right)\right) \boldsymbol {g} _ {i} \tag {4} +$$ + +where $\pmb{g}_i$ indicates the feature vector decoded by $f_{g}$ , and $N$ denotes the total number of point samples on the ray $\pmb{r}$ . + +Although the quality of the rendered coarse results may not be very high, they can still provide reasonable guidance for the extrapolated view generation according to the current scene. Based on this assumption, we propose to take the rendered image and the feature map as conditional inputs to a pre-trained 2D stable diffusion model and generate a refined synthetic image $\hat{\mathbf{I}}_r$ via fine-tuning the model, which allows to leverage natural image priors derived from internet-scale data. The process can be formulated as: + +$$ +\hat {\mathbf {I}} _ {r} = \mathcal {F} _ {g e n} (\mathbf {I} _ {r}, \tau (\mathbf {p}), \mathcal {G} (\mathbf {F} _ {r})) \tag {5} +$$ + +where $\mathcal{F}_{gen}$ denotes our generative refinement model, $\tau (\mathbf{p})$ indicates the input text embedding, and $\mathcal{G}$ denotes the feature adapter for learning the mapping from external control information to the internal knowledge in LDM. + +Scene-Adapted Diffusion Model Fine-Tuning. For the scene generation task, we propose to leverage the rich 2D priors in the pre-trained latent diffusion model instead of training a new model from scratch. Thus, we jointly train the feature adapter, the radiance field decoder, and the feature aggregation layer, while keeping the parameters of stable diffusion fixed. The objective of the fine-tuning process is shown below: + +$$ +\mathcal {L} _ {A D} = \mathbb {E} _ {t, \epsilon \sim \mathcal {N} (0, I)} \left[ \| \epsilon_ {\theta} \left(\boldsymbol {z} _ {t}, t, \tau (\mathbf {p}), \mathbf {F} _ {r}, \mathbf {I} _ {r}\right) - \epsilon \| _ {2} ^ {2} \right] \tag {6} +$$ + +With the rendered feature map $\mathbf{F}_r$ containing information about the appearance and geometry, we can control the pre-trained text-to-image diffusion model to generate images that are consistent with the content of generated images from previous viewpoints. In addition, our model inherits the high-quality image generation ability of the stable diffusion model, which ensures the plausibility of the generated views. The pre-trained prior and our effective conditional adaptation enable our model to have generalization ability in novel scenes. + +Global-Local Consistency Regularization. In the online generation process, though our model can rectify the coarse rendering results, we do not explicitly constrain the 3D consistency across views when synthesizing novel views. Therefore, we design a regularization term $\mathcal{L}_{\text{cons}}$ for test-time optimization, which shares the same formula as Eq. (6) to guarantee the plausibility of the generated novel views. Specifically, we expect that 3D consistency exists between novel views obtained from geometric projection using local geometric information (i.e., monocular depth estimation) and novel views generated using global geometric information (i.e., global tri-planar 3D representation). Thus, we simultaneously generate novel views based on the previous warping-and-inpainting pipeline and use them as supervisory signals to further fine-tune the feature adapter. + +# 4.5. Online Scene Generation Process. + +In this section, we introduce our online 3D scene generation process, which consists of three parts: scene representation initialization, extrapolation content synthesis, and incremental training strategy. + +Scene Representation Initialization. Given the input textual prompt, we first generate an initial 2D image using a pre-trained stable diffusion model, after which we construct a supporting database $S$ via the method mentioned in Sec. 4.2. Then, by exploiting the data from the database, as well as the photometric loss (Eq. (3)), we can optimize the unified representation. To prevent the model from overfitting to high-frequency details, we allow the model to learn low-frequency geometric information better by utilizing the depth priors. [60]. + +Extrapolated Content Synthesis. To generate the extrapolated content, we proceed by retrieving the next pose, de + +noted as $\mathbf{T}_i$ , from the pose sequence $\{\mathbf{T}_i\}_{i=1}^N$ . We then employ volumetric rendering to obtain a coarse view of the current viewpoint and the corresponding feature map. These rendered outputs are used as conditional inputs to our generative refinement model $\mathcal{F}_{gen}$ for generating a refined view. Due to the presence of a generative refinement mechanism, our extrapolation method mitigates the effects of cumulative errors. The refined view from the model $\mathcal{F}_{gen}$ is subsequently added to the supporting database $\mathcal{S}$ as new content. Incremental Training Strategy. After obtaining the new content, we then need to update the unified representation. However, fine-tuning only on the newly generated data can lead to catastrophic forgetting, whereas fine-tuning on the entire dataset requires excessively long training time. Inspired by [54], we sample a sparse set of rays $\mathcal{Q}$ according to the information gain to optimize the representation, thus improving the efficiency of the incremental training. + +# 5. Experiments + +# 5.1. Implementation details. + +We implemented our system using PyTorch. For the differentiable rendering part, we utilized [13] for depth estimation. To avoid the occurrence of black holes, we referred to the implementation in [18] to generate surrounding views. For the text-guided image generation, we use the publicly available stable diffusion code from Diffusers [58]. For the multi-view consistency image generation, we refer to the implementation of T2I-Adapter [39] to inject the depth feature conditions. In the progressive NeRF reconstruction part, we refer to the tri-planar implementation in [6]. We conducted all experiments using 4 NVIDIA RTX A100 GPUs for training and inference. More details can be found in our supplementary material. + +# 5.2. Evaluation metrics. + +Image quality. We evaluate the quality of our generated images using CLIP Score (CS), Inception Score (IS), Blind/Referenceless Image Spatial Quality Evaluator (BRISQUE) [36] and Natural Image Quality Evaluator (NQIE) [37]. The Inception Score is based on the diversity and predictability of the generated images. CLIP Score uses a pre-trained CLIP model [43] to measure the similarity between text and images. Note that existing visual quality metrics such as FID cannot be used since the scenes generated by text-to-3D approaches do not exhibit the same underlying data distribution. + +Multiview Consistency. Given a sequence of rendered images, we evaluate the multi-view consistency of our generated scene using Camera Error (CE), Depth Error (DE), and flow-warping error (FE) metrics. Motivated by [10, 12], we use COLMAP [50], a reliable SfM technique, to compute the camera trajectory and the sparse 3D point cloud. CE + +
Method3D Representation3D ConsistencyVisual Quality
DE↓CE↓SFM rate↑CS↑BRISQUE↓NIQE↓IS↑
Inf-Zero [26]--1.1890.38-21.435.852.34
3DP [52]LDI&Mesh0.420.9650.47-29.955.841.75
PixelSynth [47]Point Cloud0.360.7320.52-36.744.981.28
ProlificDreamer [62]NeRF---23.4127.976.751.21
Text2Room [16]Mesh0.240.4260.6328.1528.375.462.19
Scenescape [12]Mesh0.180.3940.7628.8424.544.782.23
OursNeRF0.130.1760.8929.9723.644.662.62
+ +Table 1. Comparison with text-to-scene methods. We compare our approach with two categories of approaches, i.e., pure text-driven 3D generation and text-to-image generation followed by 3D scene generation. Metrics on 3D consistency and visual quality are illustrated. + +
MethodFE↓CS↑BRISQUE↓NIQE↓IS↑
VideoFusion [32]0.03923.5427.395.942.21
GEN-2 [49]0.03227.5425.655.242.38
Ours0.02829.9523.534.702.69
+ +Table 2. Comparison with text-to-video methods. Metrics on flow warping error (FE) and visual quality are illustrated. + +
MethodCS↑BRISQUE↓NIQE↓IS↑
Text2Light [20]26.1649.266.152.54
MVDiffusion [42]27.2531.545.472.76
Ours28.1224.154.962.79
+ +Table 3. Comparison with text-to-panorama methods. We compare our method with recent text-driven 3D generation methods [9, 55]. Metrics on visual quality are illustrated. + +
MethodDE↓CE↓SfM rate↑CS↑BRISQUE↓NIQE↓
Full Model0.130.1760.8929.9726.186.54
W/o UR0.460.7640.4122.7127.955.81
W/o GRM0.590.9810.4622.1229.645.75
W/o CR0.190.2540.7828.1427.166.12
+ +Table 4. Ablations. For brevity, we use UR, GRM, CR to denote Unified Representation, Generative Refinement Model and Consistency Regularization, respectively. + +is computed by comparing the difference between the predicted trajectory and the given trajectory, and DE is computed by comparing the difference between the sparse depth map obtained by COLMAP and the estimated depth map. In addition, to account for temporal consistency, we follow [23] and use RAFT [56] to compute FE. + +# 5.3. Comparisons + +Baselines. Since there are only a few baselines directly related to our approach, we also take into account some methods with similar capabilities and construct their variants for comparison. Specifically, the following three categories of methods are included: + +- Text-to-Scene. There exist techniques [12, 16] that generate 3D meshes iteratively by employing warping and inpainting processes, allowing for direct comparisons with our proposed methods. Moreover, image-guided 3D generation methods [24, 26, 47] are also available, wherein initial images can be produced using a T2I model. Subse + +![](images/65ed0448407265cbc362a5b1d0cd09f28ab146347d16340e3909089a5ec239c9.jpg) + +![](images/2e9b0df492708e819cefd86a4f8484c48808126114dcb5ca4910e718367bcd8f.jpg) + +![](images/365299417889dd154f217046dee628d0d66cf75c41ec0f4447407f908f38a486.jpg) + +![](images/d79b069678db8c93d7181e7cb2c145a3f8589fabb2d18f5ab3fc2d06f25ef2b0.jpg) + +![](images/a6bc8fc880685fc0f368de228ad5512f56cb16bfbc550c7bbf90488a79d09577.jpg) + +![](images/ed90cb84fa4ecc1653c5bcc660d88f963d3ea3564d14615d91a0001a29099d11.jpg) + +![](images/b48aff05088edfbd8a8d513eb3d4b1b6934f69667e0485c159279dfdeb5b8756.jpg) + +![](images/a0e445f9c89f8e06600d05fe4f47d529595356557807f92ecfec7035da14fdc6.jpg) +POV, A versatile room with a sofa as the centerpiece, a bookshelf, a lamp, a desk TV, masterpiece + +![](images/bf3993ff869f02f6c8f0a1947b7fc58dbedc630c6ead31f6bc2152b785782462.jpg) + +![](images/05d1187a6b7b0854d933452ba99d28d7fee44346e266b26ed0f7d104298a1d6d.jpg) + +![](images/7b8c450ac0456d6d93dec9fef4b5fd7ffcb62717db7e6cec41b56c9884f5c1e9.jpg) + +![](images/c282522fcfa6e7d542dc9868b8aea235526e12e6b0c12d6bb958ae2eb274d308.jpg) + +![](images/f6aa10bd642ba5905a67874a4423ddbc52fc6d4560d0d7e75322a68bf5c6400d.jpg) + +![](images/1958f17e5af93d8ae0472ae81108fc72a8aad031ead136868c78114901c80134.jpg) + +![](images/bf30f6742ceff9037e1b46598b0e315e236e4acc294c6f4dc0a20cefdc484093.jpg) +Figure 4. Quantitative Results. From our results, it can be seen that our approach produces high-fidelity scenes with stable 3D consistency in indoor scenes, outdoor scenes, and unreal-style scenes. More high-resolution results can be found in the supplementary material. + +![](images/6f022ac78801c86286c6d5fa2e28a02faf746d8362275ddc961bcedcfea4883c.jpg) +POV, walking through a palace in fantasy style, master piece, indoor scene + +![](images/97864f2f12fc7b955f01f560c67066562bc30650692f1944bdb8312c61a318f3.jpg) + +![](images/ec12fe1703a1a1b92b32f68b7da4e70f8caa128c26b06db6df9cb968395d050e.jpg) + +![](images/03053ae95128d28fc9a5d7d30573809ee2383b3141ee16dee17bbf3216dde2e7.jpg) + +![](images/082e34233df5852860772145312becc712814e5891d4c9da63d67c588ccce236.jpg) + +![](images/3e39023510c67838f8fa7e48c99114f4b4298baeecf82532dc7abefa60e66bd2.jpg) + +![](images/f61e7435d707adccc16cbef184f9c1ad7bf5dd4fee74337e379b6d77459aded0.jpg) +This kitchen is a charming blend of rustic and modern, featuring a large reclaimed wood island with marble countertop, a sink surrounded by cabinets. The left of the island, a stainless-steel refrigerator stands tall. The of the sink, built-in wooden cabinets painted in a muted. +Figure 5. Comparison with text-to-panorama methods. It can be seen that although our method is not trained on panoramic data, it can also generate multiple views with cross-view consistency. + +sequently, their pipeline can be used to generate 3D scenes, enabling a comparison against our approach. We comprehensively evaluate these methods based on the previously introduced 3D consistency and visual quality metrics. + +- Text-to-Video. Some recent text-driven video generation methods [32, 49] can also generate similar 3D scene walkthrough videos. Since it is not supported to explicitly control the camera motion in the video generation methods, we only evaluated them in terms of visual quality and temporal consistency. +- Text-to-Panorama. This task generates perspective images covering the panoramic field of view, which is chal + +![](images/e87497fc5613b9ba5f6494f53e7551cd1236821982d1e48aac089fc42fbcd20b.jpg) +Figure 6. Comparison with text-to-video methods. Blur artifacts and temporally inconsistent frames occur in the text-to-video methods because of the lack of global 3D representation. + +lenging to ensure consistency in the overlapping regions. We have selected two related methods [9, 55] for comparisons. + +Comparison to Text-to-Scene Methods. To generate the scenes, we use a set of test-specific prompts covering descriptions of indoor, outdoor and unreal scenes. Each prompt generates an image sequence of 100 frames, and for a fair comparison, we set a fixed random seed. After that, we compute the metrics proposed in Sec. 5.2 on the generated image sequences and evaluate the effectiveness of the method. As shown in Tab. 1, our method outperforms the mesh-based iterative generation methods in several metrics, especially for outdoor scenes. The quality of + +![](images/3ecab3ee9329b10f8d76102279f32ad20c8bd5d50468d06deee087f69225d82f.jpg) +POV, walkthrough a damp, stone corridor, beautiful photo, masterpiece, indoor scene +(a) Extracted Mesh + +![](images/97f0ddc7e3a35d86b1280d5f1d41a654b413e917d213630e66e2d8fb2ba9f4b7.jpg) +Walkthrough a road, trees, beautiful photo, best quality, masterpiece, indoor scene +(b) Point Cloud +Figure 7. Reconstructed 3D Results. (a) The 3D mesh extracted by marching cube algorithm, and (b) the point cloud obtained after the reconstruction using COLMAP [31]. Our reconstruction results show that our methods can generate scenes with satisfactory 3D consistency. + +their generation results relies heavily on the generative and geometric prior and degrades over time due to error accumulation. In addition, their use of a mesh to represent the scene makes it difficult to represent intense depth discontinuities, which are common in outdoor scenes. Our method, on the other hand, adopts hybrid NeRF as the scene representation, which can cope with complex scenes, and our rectification mechanism can mitigate the effect of accumulated errors caused by inaccurate prior signals. + +Comparison to Text-to-Video Methods. For comparison with the text-to-video model, we used the same collection of prompts as input to the model and generated 1,200 video clips. We used the same metrics to evaluate the 3D consistency and visual quality of the videos generated by the T2V model and our rendered videos. As shown in Tab. 2, our method significantly outperforms the T2V model on all metrics, proving the effectiveness of our method. The T2V model learns geometry and appearance prior by training on a large video dataset, but it lacks a unified 3D representation, making it difficult to ensure multi-view consistency of the generated content, as can be observed Fig. 6. + +Comparison to Text-to-Panorama Methods. We evaluate the methods [9, 55] on visual quality. Tab. 3 and Fig. 5 present the quantitative and qualitative evaluations, respectively. From the results, it can be seen that the results of previous methods can be inconsistent at the left and right boundaries, while our method, although not specifically designed for panorama generation, produces multiple views with cross-view consistency. + +3D Results. In Fig. 7, we show the 3D results reconstructed by our method. The 3D mesh is extracted by the marching cube algorithm [31]. Additionally, we can reconstruct high-quality point clouds using colmap [50] by inputting the rendered image collection, which further demonstrates the superior 3D consistency of the generated view results. + +# 5.4. Ablation Study + +To further analyze the proposed methodology, we performed several ablation studies to evaluate the effectiveness + +of each module. More ablation studies can be found in our supplementary material. + +Effectiveness of Unified Representations. To validate our necessity to construct a unified 3D representation, we remove it from our pipeline. At this time, our approach degenerates to the previous paradigm of warping-inpainting. As shown in Tab. 4, the quality of the generated scenes degrades in DE and CE metrics due to the lack of global 3D consistency constraints. + +Effectiveness of Generative Refinement. To validate the effectiveness of our proposed generative refinement, we ablate the modules in our approach, whereby the novel view obtained through volume rendering will be updated directly into the supporting database for subsequent incremental training. The results in Tab. 4 show that this can lead to a significant degradation in the quality of the generated scene. We argue that the reason for this is that the quality of novel views generated by NeRF training on sparse views tends to be inferior, with notable blurring and artifacts. Therefore, adding this data for optimizing 3D scenes would lead to continuous degradation of the quality of the generated scenes. + +Effectiveness of Consistency Regularization. To verify the validity of our regularization loss, we ablate this loss and generate scenes to compute the relevant metrics. As shown in Tab. 4, adding this loss further improves the 3D consistency of the generated scenes. Though we explicitly inject 3D information into the refining process, its output still shows some inconsistent results in several scenes. Therefore, to further improve the quality of the generated new views, we perform test-time optimization through this regularization term to constrain the consistency between local and global representations. + +# 6. Conclusion + +This paper presents a new framework, which employs the tri-planar feature-based neural radiation field as a unified 3D representation and provides a unified solution for text-driven indoor and outdoor scene generation and the output supports navigation with arbitrary camera trajectories. Our method fine-tunes a scene-adapted diffusion model to correct the generated new content to mitigate the effect of cumulative errors while synthesizing extrapolated content. Experimental results show that our method can produce results with better visual quality and 3D consistency compared to previous methods. + +# 7. Acknowledgements + +This research was supported by Zhejiang Provincial Natural Science Foundation of China under Grant No. LD24F020007, National Natural Science Foundation of China (No. 62202199), and NSFC (no. 62302491). + +# References + +[1] Yogesh Balaji, Martin Renqiang Min, Bing Bai, Rama Chellappa, and Hans Peter Graf. Conditional GAN with discriminative filter generation for text-to-video synthesis. In Proceedings of the International Joint Conference on Artificial Intelligence, pages 1995-2001, 2019. 3 +[2] Shariq Farooq Bhat, Reiner Birkl, Diana Wofk, Peter Wonka, and Matthias Müller. Zoedepth: Zero-shot transfer by combining relative and metric depth. arXiv preprint arXiv:2302.12288, 2023. 2, 3 +[3] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22563-22575, 2023. 3 +[4] Shengqu Cai, Eric Ryan Chan, Songyou Peng, Mohamad Shahbazi, Anton Obukhov, Luc Van Gool, and Gordon Wetzstein. Diffdreamer: Consistent single-view perpetual view generation with conditional diffusion models. arXiv preprint arXiv:2211.12131, 2022. 3 +[5] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5799-5809, 2021. 3 +[6] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. 6 +[7] Eric R Chan, Koki Nagano, Matthew A Chan, Alexander W Bergman, Jeong Joon Park, Axel Levy, Miika Aittala, Shalini De Mello, Tero Karras, and Gordon Wetzstein. Generative novel view synthesis with 3d-aware diffusion models. arXiv preprint arXiv:2304.02602, 2023. 3 +[8] Rui Chen, Yongwei Chen, Ningxin Jiao, and Kui Jia. Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 2 +[9] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG), 41(6):1-16, 2022. 6, 7, 8 +[10] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Scenedreamer: Unbounded 3d scene generation from 2d image collections. arXiv preprint arXiv:2302.01330, 2023. 4, 6 +[11] Blender Online Community. Blender - a 3D modelling and rendering package. Blender Foundation, Stichting Blender Foundation, Amsterdam, 2018. 1 +[12] Rafail Fridman, Amit Abecasis, Yoni Kasten, and Tali Dekel. Scenescape: Text-driven consistent scene generation. arXiv preprint arXiv:2302.01133, 2023. 1, 2, 4, 5, 6 +[13] Vitor Guizilini, Igor Vasiljevic, Dian Chen, Rares Ambrus, and Adrien Gaidon. Towards zero-shot scale-aware monococular depth estimation. In Proceedings of the IEEE/CVF Inter- + +national Conference on Computer Vision, pages 9233-9243, 2023.6 +[14] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsenko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 3 +[15] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. arXiv preprint arXiv:2204.03458, 2022. 3 +[16] Lukas Hollein, Ang Cao, Andrew Owens, Justin Johnson, and Matthias Nießner. Text2room: Extracting textured 3d meshes from 2d text-to-image models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7909-7920, 2023. 1, 2, 3, 4, 5, 6 +[17] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868, 2022. 3 +[18] Max Jaderberg, Karen Simonyan, Andrew Zisserman, et al. Spatial transformer networks. Advances in neural information processing systems, 28, 2015. 2, 3, 6 +[19] Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439, 2023. 3 +[20] Nasir Mohammad Khalid, Tianhao Xie, Eugene Belilovsky, and Tiberiu Popa. CLIP-mesh: Generating textured meshes from text using pretrained image-text models. In SIGGRAPH Asia 2022 Conference Papers. ACM, 2022. 6 +[21] Jing Yu Koh, Honglak Lee, Yinfei Yang, Jason Baldridge, and Peter Anderson. Pathdreamer: A world model for indoor navigation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14738-14748, 2021. 3 +[22] Jing Yu Koh, Harsh Agrawal, Dhruv Batra, Richard Tucker, Austin Waters, Honglak Lee, Yinfei Yang, Jason Baldridge, and Peter Anderson. Simple and effective synthesis of indoor 3d scenes. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1169-1178, 2023. 3 +[23] Wei-Sheng Lai, Jia-Bin Huang, Oliver Wang, Eli Shechtman, Ersin Yumer, and Ming-Hsuan Yang. Learning blind video temporal consistency. In Proceedings of the European conference on computer vision (ECCV), pages 170-185, 2018. 6 +[24] Xingyi Li, Zhiguo Cao, Huiqiang Sun, Jianming Zhang, Ke Xian, and Guosheng Lin. 3d cinematography from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4595-4605, 2023. 6 +[25] Yitong Li, Martin Min, Dinghan Shen, David Carlson, and Lawrence Carin. Video generation from text. In Proceedings of the AAAI conference on artificial intelligence, 2018. 3 +[26] Zhengqi Li, Qianqian Wang, Noah Snavely, and Angjoo Kanazawa. Infinitenature-zero: Learning perpetual view generation of natural scenes from single images. In Proceed- + +ings of the European Conference on Computer Vision, pages 515-534. Springer, 2022. 2, 3, 4, 6 +[27] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Yu Qiao, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from multi-camera images via spatiotemporal transformers. In Proceedings of the European Conference on Computer Vision, pages 1-18. Springer, 2022. 4 +[28] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 300–309, 2023. 2 +[29] Andrew Liu, Richard Tucker, Varun Jampani, Ameesh Makadia, Noah Snavely, and Angjoo Kanazawa. Infinite nature: Perpetual view generation of natural scenes from a single image. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14458-14467, 2021. 2, 3, 4 +[30] Xinhang Liu, Shiu-hong Kao, Jiaben Chen, Yu-Wing Tai, and Chi-Keung Tang. Deceptive-nerf: Enhancing nerf reconstruction using pseudo-observations from diffusion models. arXiv preprint arXiv:2305.15171, 2023. 3 +[31] William E Lorensen and Harvey E Cline. Marching cubes: A high resolution 3d surface construction algorithm. In Semin al graphics: pioneering efforts that shaped the field, pages 347-353. 1998. 8 +[32] Zhengxiong Luo, Dayou Chen, Yingya Zhang, Yan Huang, Liang Wang, Yujun Shen, Deli Zhao, Jingren Zhou, and Tieniu Tan. Videofusion: Decomposed diffusion models for high-quality video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10209-10218, 2023. 3, 6, 7 +[33] Tanya Marwah, Gaurav Mittal, and Vineeth N Balasubramanian. Attentive semantic video generation using captions. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1426-1434, 2017. 3 +[34] Gal Metzer, Elad Richardson, Or Patashnik, Raja Giryes, and Daniel Cohen-Or. Latent-nerf for shape-guided generation of 3d shapes and textures. arXiv preprint arXiv:2211.07600, 2022. 2 +[35] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3 +[36] Anish Mittal, Anush Krishna Moorthy, and Alan Conrad Bovik. No-reference image quality assessment in the spatial domain. IEEE Transactions on image processing, 21(12): 4695-4708, 2012. 6 +[37] Anish Mittal, Rajiv Soundararajan, and Alan C Bovik. Making a “completely blind” image quality analyzer. IEEE Signal processing letters, 20(3):209-212, 2012. 6 +[38] Gaurav Mittal, Tanya Marwah, and Vineeth N Balasubramanian. Sync-draw: Automatic video generation using deep recurrent attentive architectures. In Proceedings of the 25th + +ACM international conference on Multimedia, pages 1096-1104, 2017. 3 +[39] Chong Mou, Xintao Wang, Liangbin Xie, Jian Zhang, Zhonggang Qi, Ying Shan, and Xiaohu Qie. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453, 2023.6 +[40] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 4 +[41] Yingwei Pan, Zhaofan Qiu, Ting Yao, Houqiang Li, and Tao Mei. To create what you tell: Generating videos from captions. In Proceedings of the 25th ACM international conference on Multimedia, pages 1789-1798, 2017. 3 +[42] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv, 2022. 1, 2, 6 +[43] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 6 +[44] Alexander Raistrick, Lahav Lipson, Zeyu Ma, Lingjie Mei, Mingzhe Wang, Yiming Zuo, Karhan Kayan, Hongyu Wen, Beining Han, Yihan Wang, Alejandro Newell, Hei Law, Ankit Goyal, Kaiyu Yang, and Jia Deng. Infinite photorealistic worlds using procedural generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12630-12641. IEEE, 2023. 1 +[45] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE transactions on pattern analysis and machine intelligence, 44(3):1623-1637, 2020. 2 +[46] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12179-12188, 2021. 2 +[47] Chris Rockwell, David F Fouhey, and Justin Johnson. Pixelsynth: Generating a 3d-consistent experience from a single image. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14104-14113, 2021. 6 +[48] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684–10695, 2022. 1, 2 +[49] RunWay. Gen-2: The next step forward for generative ai, 2023. https://research.runwayml.com/gen2.6,7 +[50] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2016. 6, 8 +[51] Liao Shen, Xingyi Li, Huiqiang Sun, Juwen Peng, Ke Xian, Zhiguo Cao, and Guosheng Lin. Make-it-4d: Synthesizing + +a consistent long-term dynamic scene video from a single image. arXiv preprint arXiv:2308.10257, 2023. 3 +[52] Meng-Li Shih, Shih-Yang Su, Johannes Kopf, and Jia-Bin Huang. 3d photography using context-aware layered depth inpainting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8028-8038, 2020. 6 +[53] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022.3 +[54] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew J Davison. imap: Implicit mapping and positioning in real-time. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6229-6238, 2021. 6 +[55] Shitao Tang, Fuyang Zhang, Jiacheng Chen, Peng Wang, and Yasutaka Furukawa. Mvdiffusion: Enabling holistic multiview image generation with correspondence-aware diffusion. arXiv preprint arXiv:2307.01097, 2023. 6, 7, 8 +[56] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Proceedings of the European Conference on Computer Vision, pages 402-419. Springer, 2020. 6 +[57] Hung-Yu Tseng, Qinbo Li, Changil Kim, Suhib Alsisan, Jia-Bin Huang, and Johannes Kopf. Consistent view synthesis with pose-guided diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16773-16783, 2023. 3 +[58] Patrick von Platen, Suraj Patil, Anton Lozhkov, Pedro Cuenca, Nathan Lambert, Kashif Rasul, Mishig Davaadorj, and Thomas Wolf. Diffusers: State-of-the-art diffusion models. https://github.com/huggingface/diffusers, 2022.6 +[59] Can Wang, Ruixiang Jiang, Mengei Chai, Mingming He, Dongdong Chen, and Jing Liao. Nerf-art: Text-driven neural radiance fields stylization. arXiv preprint arXiv:2212.08070, 2022.3 +[60] Guangcong Wang, Zhaoxi Chen, Chen Change Loy, and Ziwei Liu. Sparsenerf: Distilling depth ranking for few-shot novel view synthesis. arXiv preprint arXiv:2303.16196, 2023.5 +[61] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A. Yeh, and Greg Shakhnarovich. Score jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12619-12629, 2023. 2 +[62] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation, 2023. 1, 2, 6 +[63] Olivia Wiles, Georgia Gkioxari, Richard Szeliski, and Justin Johnson. Synsin: End-to-end view synthesis from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7467-7477, 2020. 3 + +[64] Chenfei Wu, Lun Huang, Qianxi Zhang, Binyang Li, Lei Ji, Fan Yang, Guillermo Sapiro, and Nan Duan. Godiva: Generating open-domain videos from natural descriptions. arXiv preprint arXiv:2104.14806, 2021. 3 +[65] Chenfei Wu, Jian Liang, Lei Ji, Fan Yang, Yuejian Fang, Daxin Jiang, and Nan Duan. Niwa: Visual synthesis pretraining for neural visual world creation. In Proceedings of the European Conference on Computer Vision, pages 720-736. Springer, 2022. 3 +[66] Jingbo Zhang, Xiaoyu Li, Ziyu Wan, Can Wang, and Jing Liao. Text2nerf: Text-driven 3d scene generation with neural radiance fields. arXiv preprint arXiv:2305.11588, 2023. 2, 3, 5 +[67] Xiaoshuai Zhang, Sai Bi, Kalyan Sunkavalli, Hao Su, and Zexiang Xu. Nerfusion: Fusing radiance fields for large-scale scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5449-5458, 2022. 4 +[68] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 3 \ No newline at end of file diff --git a/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/images.zip b/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..70a46ff9e052c7ba1151d54e4a09e2aac54f8450 --- /dev/null +++ b/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdad038228fd958f6601a32a02aa9b3614b8cf60c3b128fabac1aaf1b88fe99c +size 585896 diff --git a/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/layout.json b/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1da24bc9f0721a8e4014eb2dad0562baefbaeacf --- /dev/null +++ b/2024/3D-SceneDreamer_ Text-Driven 3D-Consistent Scene Generation/layout.json @@ -0,0 +1,10208 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 96, + 103, + 498, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 103, + 498, + 120 + ], + "spans": [ + { + "bbox": [ + 96, + 103, + 498, + 120 + ], + "type": "text", + "content": "3D-SceneDreamer: Text-Driven 3D-Consistent Scene Generation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "spans": [ + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "text", + "content": "Songchun Zhang" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "text", + "content": ", Yibo Zhang" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "text", + "content": ", Quan Zheng" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "text", + "content": ", Rui Ma" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "text", + "content": ", Wei Hua" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "text", + "content": ", Hujun Bao" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "text", + "content": ", Weiwei Xu" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "text", + "content": ", Changqing Zou" + }, + { + "bbox": [ + 133, + 142, + 459, + 172 + ], + "type": "inline_equation", + "content": "^{1,3*}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 151, + 176, + 441, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 176, + 441, + 191 + ], + "spans": [ + { + "bbox": [ + 151, + 176, + 441, + 191 + ], + "type": "text", + "content": "1 Zhejiang University 2 Jilin University 3 Zhejiang Lab" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 165, + 191, + 425, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 191, + 425, + 205 + ], + "spans": [ + { + "bbox": [ + 165, + 191, + 425, + 205 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 165, + 191, + 425, + 205 + ], + "type": "text", + "content": " Institute of Software, Chinese Academy of Sciences" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 50, + 213, + 339, + 369 + ], + "blocks": [ + { + "bbox": [ + 50, + 213, + 339, + 369 + ], + "lines": [ + { + "bbox": [ + 50, + 213, + 339, + 369 + ], + "spans": [ + { + "bbox": [ + 50, + 213, + 339, + 369 + ], + "type": "image", + "image_path": "d1f7a6621a947ff51cc845b08531a5daef0f8e8b76e07990f17c1d9ed44a2816.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 372, + 547, + 427 + ], + "lines": [ + { + "bbox": [ + 45, + 372, + 547, + 427 + ], + "spans": [ + { + "bbox": [ + 45, + 372, + 547, + 427 + ], + "type": "text", + "content": "Figure 1. Text-Driven 3D Scene Generation from text prompts. (a) Given a scene description prompt and an arbitrary 6-degree-of-freedom (6-DOF) camera trajectory, our approach progressively generates the full 3D scene by continuously synthesizing 2D novel views. (b) The limitation of mesh representations [12, 16] and the lack of reasonable rectification mechanisms lead to cumulative errors in outdoor scenes, which are respectively marked with yellow and blue dash line boxes. In contrast, our approach can alleviate the problem by introducing a progressive generation pipeline." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 355, + 215, + 423, + 270 + ], + "blocks": [ + { + "bbox": [ + 355, + 215, + 423, + 270 + ], + "lines": [ + { + "bbox": [ + 355, + 215, + 423, + 270 + ], + "spans": [ + { + "bbox": [ + 355, + 215, + 423, + 270 + ], + "type": "image", + "image_path": "c2671e5361e19388a00d9ce464ff5fb6ed9721ceae32b8342a57cb8252f86856.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 355, + 274, + 531, + 291 + ], + "lines": [ + { + "bbox": [ + 355, + 274, + 531, + 291 + ], + "spans": [ + { + "bbox": [ + 355, + 274, + 531, + 291 + ], + "type": "text", + "content": "Aerial drone shot of a mountain range in the style of cinematic video, shallow depth of field, subject in focus, dynamic movement" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 425, + 214, + 481, + 270 + ], + "blocks": [ + { + "bbox": [ + 425, + 214, + 481, + 270 + ], + "lines": [ + { + "bbox": [ + 425, + 214, + 481, + 270 + ], + "spans": [ + { + "bbox": [ + 425, + 214, + 481, + 270 + ], + "type": "image", + "image_path": "ab64d1a82f00e61c4b83673cab557366a3a1729acc007f1786ff472b038db194.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 482, + 214, + 539, + 270 + ], + "blocks": [ + { + "bbox": [ + 482, + 214, + 539, + 270 + ], + "lines": [ + { + "bbox": [ + 482, + 214, + 539, + 270 + ], + "spans": [ + { + "bbox": [ + 482, + 214, + 539, + 270 + ], + "type": "image", + "image_path": "ae774f98176f34d7cd4c755c84e4f5ba10f2010ddd93e17d23161bc9c2cb2cbd.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 355, + 293, + 423, + 353 + ], + "blocks": [ + { + "bbox": [ + 355, + 293, + 423, + 353 + ], + "lines": [ + { + "bbox": [ + 355, + 293, + 423, + 353 + ], + "spans": [ + { + "bbox": [ + 355, + 293, + 423, + 353 + ], + "type": "image", + "image_path": "e2353ea3abb63236884dd018a3302097572376aa219cdc0d8890f6864371c6b8.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 426, + 359, + 481, + 369 + ], + "lines": [ + { + "bbox": [ + 426, + 359, + 481, + 369 + ], + "spans": [ + { + "bbox": [ + 426, + 359, + 481, + 369 + ], + "type": "text", + "content": "(b) Observation" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 425, + 294, + 481, + 351 + ], + "blocks": [ + { + "bbox": [ + 425, + 294, + 481, + 351 + ], + "lines": [ + { + "bbox": [ + 425, + 294, + 481, + 351 + ], + "spans": [ + { + "bbox": [ + 425, + 294, + 481, + 351 + ], + "type": "image", + "image_path": "7a70c4f0b5ddc98e4973cd2557d36489ffef18bfd57a3c4d789a4549f3104dbe.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 482, + 293, + 539, + 356 + ], + "blocks": [ + { + "bbox": [ + 482, + 293, + 539, + 356 + ], + "lines": [ + { + "bbox": [ + 482, + 293, + 539, + 356 + ], + "spans": [ + { + "bbox": [ + 482, + 293, + 539, + 356 + ], + "type": "image", + "image_path": "0d3c579a780a8e924e302dda7cd864b75facc5bf555d62fca1b294709a7a4285.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 143, + 434, + 192, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 434, + 192, + 445 + ], + "spans": [ + { + "bbox": [ + 143, + 434, + 192, + 445 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 45, + 451, + 289, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 451, + 289, + 704 + ], + "spans": [ + { + "bbox": [ + 45, + 451, + 289, + 704 + ], + "type": "text", + "content": "Text-driven 3D scene generation techniques have made rapid progress in recent years. Their success is mainly attributed to using existing generative models to iteratively perform image warping and inpainting to generate 3D scenes. However, these methods heavily rely on the outputs of existing models, leading to error accumulation in geometry and appearance that prevent the models from being used in various scenarios (e.g., outdoor and unreal scenarios). To address this limitation, we generatively refine the newly generated local views by querying and aggregating global 3D information, and then progressively generate the 3D scene. Specifically, we employ a tri-plane features-based NeRF as a unified representation of the 3D scene to constrain global 3D consistency, and propose a generative refinement network to synthesize new contents with higher quality by exploiting the natural image prior from 2D diffusion model as well as the global 3D information of the current scene. Our extensive experiments demonstrate that, in comparison to previous methods, our approach supports wide variety of scene generation and arbitrary camera trajectories with improved visual quality and 3D consistency." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 433, + 386, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 433, + 386, + 445 + ], + "spans": [ + { + "bbox": [ + 307, + 433, + 386, + 445 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 460, + 545, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 545, + 556 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 545, + 556 + ], + "type": "text", + "content": "In recent years, with the growing need for 3D creation tools for metaverse applications, attention to 3D scene generation techniques has increased rapidly. Existing tools [11, 44] usually require professional modeling skills and extensive manual labor, which is time-consuming and inefficient. To facilitate the 3D scene creation and reduce the need for professional skills, 3D scene generation tools should be intuitive and versatile while ensuring sufficient controllability." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 559, + 546, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 559, + 546, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 559, + 546, + 715 + ], + "type": "text", + "content": "This paper focuses on the specific setting of generating consistent 3D scenes from the input texts that describe the 3D scenes. This problem is highly challenging from several perspectives, including the limitation of available text-3D data pairs and the need for ensuring both semantic and geometric consistency of the generated scenes. To overcome the limited 3D data issue, recent text-to-3D methods [42, 62] have leveraged the powerful pre-trained text-to-image diffusion model [48] as a strong prior to optimize 3D representation. However, their generated scenes often have relatively simpler geometry and lack 3D consistency, because 2D prior diffusion models lack the perception of 3D information." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 732, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 732, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 732, + 318, + 742 + ], + "type": "text", + "content": "10170" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 70, + 289, + 199 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 289, + 199 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 289, + 199 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 289, + 199 + ], + "type": "image", + "image_path": "efbee3c07137c1e213a33737d20e908f74c2964a806245f6a0125a2fc3fbb76c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 204, + 287, + 304 + ], + "lines": [ + { + "bbox": [ + 46, + 204, + 287, + 304 + ], + "spans": [ + { + "bbox": [ + 46, + 204, + 287, + 304 + ], + "type": "text", + "content": "Figure 2. Comparison with existing designs. (a) The feedforward approaches use depth-based warping and refinement operations to generate novel views of the scene without a unified representation. (b) The warping-inpainting approaches use mesh as a unified representation and generate the scene through iterative inpainting. (c) We replace the mesh with NeRF as the unified representation and alleviate the cumulative error issue by incorporating a generative refinement model. This allows our framework to support the generation of a wider range of scene types." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 313, + 287, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 313, + 287, + 480 + ], + "spans": [ + { + "bbox": [ + 46, + 313, + 287, + 480 + ], + "type": "text", + "content": "Some recent methods [12, 16] introduce the monocular depth estimation model [45, 46] as a strong geometric prior and follow the warping-inpainting pipeline [26, 29] for progressive 3D scene generation, which partially solves the inconsistency problem. Although these methods can generate realistic scenes with multi-view 3D consistency, they mainly focus on indoor scenes and fail to handle large-scale outdoor scene generation as illustrated in Fig. 1 (b). This can be attributed to two main aspects: (1) Due to the adoption of an explicit 3D mesh as the unified 3D representation, the noise of the depth estimation in the outdoor scene can cause a large stretch of the scene geometry; (2) The lack of an efficient rectification mechanism in the pipeline leads to an accumulation of geometric and appearance errors." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 483, + 287, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 483, + 287, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 483, + 287, + 639 + ], + "type": "text", + "content": "In this paper, we present a new framework, named 3D-SceneDreamer that provides a unified solution for text-driven 3D consistent indoor and outdoor scene generation. Our approach employs a tri-planar feature-based radiance field as a unified 3D representation instead of 3D mesh, which is advantageous for general scene generation (especially in outdoor scenes) and supports navigating with arbitrary 6-DOF camera trajectories. Afterwards, we model the scene generation process as a progressive optimization of the NeRF representation, while a text-guided and scene-adapted generative novel view synthesis is employed to refine the NeRF optimization. Fig. 2 shows a comparison of our design with existing text-to-scene pipelines." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "text", + "content": "Specifically, we first perform scene initialization, which consists of two stages, i.e., generating a supporting database and optimizing the initial scene representation. We first use the input text prompt and the pre-trained diffusion model [48] to generate the initial image as an appearance prior. Then, we use an off-the-shelf depth estimation model [2]" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 545, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 239 + ], + "type": "text", + "content": "to provide the geometric prior for the corresponding scene. Inspired by [66], to prevent NeRF from over-fitting for the single view image, we construct a database via differentiable spatial transformation [18] and use it for optimizing the initial NeRF representation of the generated scene. To generate the extrapolated content, we use volume rendering and trilinear interpolation in the novel viewpoints to obtain the initial rendered images and their corresponding feature maps. These outputs are later fed into our 3D-aware generative refinement model, whose output images are subsequently added as new content to the supporting database. Next, in conjunction with the new data, we progressively generate the whole 3D scene by updating our 3D representation through our incremental training strategy." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 239, + 545, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 239, + 545, + 299 + ], + "spans": [ + { + "bbox": [ + 304, + 239, + 545, + 299 + ], + "type": "text", + "content": "Extensive experiments demonstrate that our approach significantly outperforms the state-of-the-art text-driven 3D scene generation method in both visual quality and 3D consistency. To summarize, our technical contributions are as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 300, + 545, + 455 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 306, + 300, + 545, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 300, + 545, + 347 + ], + "spans": [ + { + "bbox": [ + 306, + 300, + 545, + 347 + ], + "type": "text", + "content": "- We provide a unified solution for text-driven consistent 3D scene generation that supports both indoor and outdoor scenes as well as allows navigation with arbitrary 6-DOF camera trajectories." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 347, + 545, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 347, + 545, + 407 + ], + "spans": [ + { + "bbox": [ + 306, + 347, + 545, + 407 + ], + "type": "text", + "content": "- We propose to use a tri-planar feature-based neural radiance field as a global 3D representation of the scene to generate continuous scene views, which preserves the 3D consistency of the scene, empowered by a progressive optimization strategy." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 407, + 545, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 407, + 545, + 455 + ], + "spans": [ + { + "bbox": [ + 306, + 407, + 545, + 455 + ], + "type": "text", + "content": "- We propose a new generative refinement model, which explicitly injects 3D information to refine the coarse view generated by novel view synthesis and then incorporates the new views to refine the NeRF optimization." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 466, + 392, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 466, + 392, + 478 + ], + "spans": [ + { + "bbox": [ + 306, + 466, + 392, + 478 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 486, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 486, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 486, + 545, + 713 + ], + "type": "text", + "content": "Text-Driven 3D Content Generation. Recently, motivated by the success of text-to-image models, employing pretrained 2D diffusion models to perform text-to-3D generation has gained significant research attention. Some pioneering works [42, 61] introduce the Score Distillation Sampling (SDS) and utilize 2D diffusion prior to optimize 3D representation. Subsequent works [8, 28, 34, 62] further enhance texture realism and geometric quality. However, they primarily focus on improving object-level 3D content generation rather than large-scale 3D scenes. Recent works [12, 16, 66] have proposed some feasible solutions for 3D scene generation. By utilizing the pre-trained monocular depth model and the inpainting model, they generate the 3D scene progressively based on the input text and camera trajectory. However, due to the underlying 3D representation or optimization scheme, these methods are limited in several aspects. For example, as [12, 16] utilize explicit mesh as 3D representation, it is difficult for them to generate outdoor scenes. Besides, their mesh outputs" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 732, + 317, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 732, + 317, + 742 + ], + "spans": [ + { + "bbox": [ + 294, + 732, + 317, + 742 + ], + "type": "text", + "content": "10171" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "content": "also suffer from fragmented geometry and artifacts due to imprecise depth estimation results. Although Text2NeRF achieves to generate high-quality indoor and outdoor scenes by replacing the meshes with neural radiance fields [35], it can only generate camera-centric scenes. In contrast, our approach not only supports more general 3D scene generation but can also handle arbitrary 6DOF camera trajectories." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 157, + 289, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 157, + 289, + 406 + ], + "spans": [ + { + "bbox": [ + 46, + 157, + 289, + 406 + ], + "type": "text", + "content": "Text-Driven Video Generation. Text-Driven Video Generation aims to create realistic video content based on textual conditions. In the early stages, this task was approached using GAN [1, 25, 41] and VAE [33, 38] generative models, but the results were limited to low-resolution short video clips. Following the significant advancements in text-to-image models, recent text-to-video works extend text-to-image models such as transformer [17, 64, 65] and diffusion model [3, 14, 15, 32, 53, 68] for video generation. These approaches enable the generalization of high-quality and open-vocabulary videos, but require a substantial amount of text-image or text-video pairs of data for training. Text2Video-Zero [19] proposes the first zero-shot text-to-video generation pipeline that does not rely on training or optimization, but their generated videos lack smoothness and 3D consistency. Our method is capable of generating smooth and long videos which are consistent to the scenes described by the input text, without the need for large-scale training data. Furthermore, the utilization of NeRF as the 3D representation enhances the 3D consistency of our videos." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 407, + 289, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 407, + 289, + 623 + ], + "spans": [ + { + "bbox": [ + 46, + 407, + 289, + 623 + ], + "type": "text", + "content": "View Synthesis with Generative Models. Several early stage studies [5, 21, 22, 26, 29, 63] employ GAN to synthesize new viewpoints. However, the training process of GAN is prone to the issue of mode collapse, which limits the diversity of generation results. Diffusion model has been shown its capability to generate diverse and high-quality images and videos. In recent view synthesis works [4, 7, 51, 57], diffusion models have been employed to achieve improved scene generation results over prior works. For example, in Deceptive-NeRF [30], pseudo-observations are synthesized by diffusion models and these observations are further utilized for enhance the NeRF optimization. Closely similar to [30], our method proposes a geometry-aware diffusion refinement model to reduce the artifacts of the input coarse view generated by the initial novel view synthesis. With the 3D information from NeRF features injected to the refinement process, we can achieve globally consistent 3D scene generation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 633, + 233, + 646 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 633, + 233, + 646 + ], + "spans": [ + { + "bbox": [ + 47, + 633, + 233, + 646 + ], + "type": "text", + "content": "3. Neural Radiance Fields Revisited" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": "Neural Radiance Fields (NeRF) [59] is a novel view synthesis technique that has shown impressive results. It represents the specific 3D scene via an implicit function, denoted as " + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "inline_equation", + "content": "f_{\\theta}:(\\pmb {x},\\pmb {d})\\mapsto (\\mathbf{c},\\sigma)" + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": ", given a spatial location " + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": " and a ray direction " + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{d}" + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": " represents the learnable parameters," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": " are the color and density. To render a novel image, NeRF marches a camera ray " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\mathbf{r}(t) = \\mathbf{o} + t\\mathbf{d}" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": " starting from the origin " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\mathbf{o}" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": " through each pixel and calculates its color " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\hat{\\boldsymbol{C}}" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": " and rendered depth " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\hat{\\boldsymbol{D}}" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": " via the volume rendering quadrature, i.e., " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\hat{\\boldsymbol{C}} (\\mathbf{r}) = \\sum_{i = 1}^{N}T_{i}\\alpha_{i}\\mathbf{c}_{i}" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\hat{\\boldsymbol{D}} (\\mathbf{r}) = \\sum_{i = 1}^{N}T_{i}\\alpha_{i}t_{i}" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "T_{i} = \\exp \\left(-\\sum_{j = 1}^{i - 1}\\sigma_{j}\\delta_{j}\\right)" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\alpha_{i} = (1 - \\exp (-\\sigma_{i}\\delta_{i}))" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\delta_{k} = t_{k + 1} - t_{k}" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": " indicates the distance between two point samples. Typically, stratified sampling is used to select the point samples " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\{t_i\\}_{i = 1}^N" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": " between " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "t_n" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "t_f" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": ", which denote the near and far planes of the camera. When multi-view images are available, " + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 72, + 545, + 209 + ], + "type": "text", + "content": " can be easily optimized with the MSE loss:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 366, + 211, + 545, + 240 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 366, + 211, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 366, + 211, + 545, + 240 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\theta} = \\sum_ {\\boldsymbol {r} \\in \\mathcal {R}} \\left\\| \\hat {\\boldsymbol {C}} (\\boldsymbol {r}) - \\boldsymbol {C} (\\boldsymbol {r}) \\right\\| _ {2} ^ {2} \\tag {1}", + "image_path": "e3d35225f7643e039842788fce27ba1285061215d5e109e6054ad2ff8d2d27b9.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 247, + 545, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 247, + 545, + 272 + ], + "spans": [ + { + "bbox": [ + 304, + 247, + 545, + 272 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 247, + 545, + 272 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 304, + 247, + 545, + 272 + ], + "type": "text", + "content": " is the collection of rays, and " + }, + { + "bbox": [ + 304, + 247, + 545, + 272 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 247, + 545, + 272 + ], + "type": "text", + "content": " indicates the ground truth color." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 285, + 367, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 285, + 367, + 298 + ], + "spans": [ + { + "bbox": [ + 306, + 285, + 367, + 298 + ], + "type": "text", + "content": "4. Methods" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 305, + 373, + 317 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 305, + 373, + 317 + ], + "spans": [ + { + "bbox": [ + 306, + 305, + 373, + 317 + ], + "type": "text", + "content": "4.1. Overview" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 324, + 545, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 324, + 545, + 372 + ], + "spans": [ + { + "bbox": [ + 304, + 324, + 545, + 372 + ], + "type": "text", + "content": "Given a description of the target scene a the input text prompt " + }, + { + "bbox": [ + 304, + 324, + 545, + 372 + ], + "type": "inline_equation", + "content": "\\mathbf{p}" + }, + { + "bbox": [ + 304, + 324, + 545, + 372 + ], + "type": "text", + "content": ", and a pre-defined camera trajectory denoted by " + }, + { + "bbox": [ + 304, + 324, + 545, + 372 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{T}_i\\}_{i=1}^N" + }, + { + "bbox": [ + 304, + 324, + 545, + 372 + ], + "type": "text", + "content": ", our goal is to generate a 3D scene along the camera trajectory with the multiview 3D consistency." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 373, + 545, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 373, + 545, + 481 + ], + "spans": [ + { + "bbox": [ + 304, + 373, + 545, + 481 + ], + "type": "text", + "content": "The overview of the proposed model is illustrated in Fig. 3. We first introduce the acquisition of appearance and structural priors in Sec. 4.2, which serve as the scene initialization. The formulation of Unified Scene Representation and its optimization with the former priors are presented in Sec. 4.3. To synthesize new content while maintaining the multiview consistency, we propose a geometry-aware refinement model in Sec. 4.4. Finally, the full online scene generation process is presented in Sec. 4.5." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 491, + 459, + 503 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 491, + 459, + 503 + ], + "spans": [ + { + "bbox": [ + 305, + 491, + 459, + 503 + ], + "type": "text", + "content": "4.2. Scene Context Initialization" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": "Given the input textual prompt " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{p}" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": ", we first utilize a pretrained stable diffusion model to generate an initial 2D image " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_0" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": ", which serves as an appearance prior for the scene. Then, we feed this image into the off-the-shelf depth estimation model [2], and take the output as a geometric prior for the target scene, denoted as " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_0" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": ". Inspired by [66], we construct a supporting database " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "S = \\{((\\mathbf{D}_i,\\mathbf{I}_i,\\mathbf{T}_i)\\}_{i = 1}^N" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": " via differentiable spatial transformation [18] and image inpainting [16] techniques, where " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": " denotes the number of initial viewpoints. This database provides additional views and depth information, which could prevent the model from overfitting to the initial view. With the initial supporting database, we can initialize the global 3D representation. The data generated by our method will be continuously appended to this supporting database for continuous optimization of the global 3D representation. More details are provided in our supplemental materials." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "type": "text", + "content": "10172" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 71, + 541, + 268 + ], + "blocks": [ + { + "bbox": [ + 53, + 71, + 541, + 268 + ], + "lines": [ + { + "bbox": [ + 53, + 71, + 541, + 268 + ], + "spans": [ + { + "bbox": [ + 53, + 71, + 541, + 268 + ], + "type": "image", + "image_path": "a184d31afa8507f0e45a91c95f11f4d804eadb76155b7671d4dd8922b33d832f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 273, + 547, + 329 + ], + "lines": [ + { + "bbox": [ + 46, + 273, + 547, + 329 + ], + "spans": [ + { + "bbox": [ + 46, + 273, + 547, + 329 + ], + "type": "text", + "content": "Figure 3. Overview of our pipeline. (a) Scene Context Initialization contains a supporting database to provide novel viewpoint data for progressive generation. (b) Unified 3D Representation provides a unified representation for the generated scene, which allows our approach to accomplish more general scene generation and to hold the 3D consistency at the same time. (c) 3D-Aware Generative Refinement alleviates the cumulative error issue during long-term extrapolation by exploiting large-scale natural images prior to generatively refine the synthesized novel viewpoint image. The consistency regularization module is used for test-time optimization." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 331, + 208, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 331, + 208, + 344 + ], + "spans": [ + { + "bbox": [ + 47, + 331, + 208, + 344 + ], + "type": "text", + "content": "4.3. Unified Scene Representation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 352, + 288, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 352, + 288, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 352, + 288, + 483 + ], + "type": "text", + "content": "Though previous methods [26, 29] have achieved novel view generations via differentiable rendering-based frame-to-frame warping, there are still drawbacks: (1) the global 3D consistency is not ensured, (2) cumulative errors occur in long-term generation, (3) complex scenes may lead to failure. To tackling above issues, we propose a tri-planar feature-based NeRF as the unified representation. Compared with previous methods [12, 16, 26, 29], our approach constrains the global 3D consistency while handling the scene generation with complex appearances and geometries." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "text", + "content": "Tri-planar Feature Representation. For constructing the feature tri-planes " + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{M} = \\{\\mathbf{M}_{xy},\\mathbf{M}_{yz},\\mathbf{M}_{xz}\\} \\in \\mathbb{R}^{3\\times S\\times S\\times D}" + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "text", + "content": " from the input images, where " + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "text", + "content": " is the spatial resolution and " + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "text", + "content": " is the feature dimension, we first extract 2D image features from supporting views using the pre-trained ViT from DINoV2 [40] because of its strong capability in modeling cross-view correlations. We denote the extracted feature corresponding to image " + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_i" + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_i" + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "text", + "content": ", and the feature set obtained from all input views is denoted as " + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{F}_i\\}_{i = 1}^N" + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "text", + "content": ". To lift the local 2D feature maps into the unified 3D space, similar to the previous work [67], we back-project the extracted local image features " + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "text", + "content": " into a 3D feature volume " + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "text", + "content": " along each camera ray. To avoid the cubic computational complexity of volumes, we construct a tri-planar representation by projecting the 3D feature volume " + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 46, + 486, + 289, + 715 + ], + "type": "text", + "content": " into its respective plane via three separate encoders. This representation reduces the complexity from feature dimensionality reduction, but with equivalent information compared to purely 2D feature representations (e.g., BEV representations [10, 27])." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "spans": [ + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": "Implicit Radiance Field Decoder. Based on the constructed tri-planar representation " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": ", we can reconstruct the images with target poses via our implicit radiance field decoder module " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "\\Psi = \\{f_{g}, f_{c}\\}" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "f_{g}" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "f_{c}" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": " indicate the geometric feature decoder and appearance decoder. Given a 3D point " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "p = [i, j, k]" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": " and a view direction " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": ", we orthogonally project " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": " to each feature plane in " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": " with bilinear sampling to obtain the conditional feature " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_p = [\\mathbf{M}_{xy}(i, j), \\mathbf{M}_{yz}(j, k), \\mathbf{M}_{xz}(i, k)]" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": ". We feed " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_p" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": " into the geometric feature decoder to obtain the predicted density " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": " and the geometric feature vector " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "\\mathbf{g}" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": ", after which we further decode its color " + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 331, + 547, + 475 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 377, + 485, + 474, + 498 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 377, + 485, + 474, + 498 + ], + "spans": [ + { + "bbox": [ + 377, + 485, + 474, + 498 + ], + "type": "interline_equation", + "content": "(\\sigma , \\boldsymbol {g}) = f _ {g} (\\gamma (\\boldsymbol {x}), \\mathbf {M} _ {p})", + "image_path": "fe849accbe808322f5aa02550db950cf4feefd53cba2fcf0de8e1fa277d5c4e8.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 370, + 498, + 545, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 498, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 370, + 498, + 545, + 514 + ], + "type": "interline_equation", + "content": "\\boldsymbol {c} = f _ {c} (\\gamma (\\boldsymbol {x}), \\gamma (\\boldsymbol {d}), \\boldsymbol {g}, \\mathbf {M} _ {p}) \\tag {2}", + "image_path": "1bfe14c32a9a8d4c3d2486c9ffe043b1f5d6df73063a3decf5e999bf20500f41.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 519, + 545, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 519, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 304, + 519, + 545, + 555 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 519, + 545, + 555 + ], + "type": "inline_equation", + "content": "\\gamma (\\cdot)" + }, + { + "bbox": [ + 304, + 519, + 545, + 555 + ], + "type": "text", + "content": " indicates the positional encoding function. Then we can calculate the pixel color via an approximation of the volume rendering integral mentioned in Sec. 3." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 555, + 545, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 555, + 545, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 555, + 545, + 639 + ], + "type": "text", + "content": "Training Objective. To optimize our 3D representation, we leverage the ground truth colors from the target image as the supervisory signal. Additionally, in the setting with sparse input views, we employ the estimated dense depth map to enhance the model's learning of low-frequency geometric information and prevent overfitting to appearance details. Our optimization objective is as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 349, + 644, + 545, + 670 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 349, + 644, + 545, + 670 + ], + "spans": [ + { + "bbox": [ + 349, + 644, + 545, + 670 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\sum_ {\\boldsymbol {r} \\in \\mathcal {R}} \\left(\\mathcal {L} _ {\\text {p h o t o}} (\\boldsymbol {r}) + \\lambda \\mathcal {L} _ {\\text {d e p t h}} (\\boldsymbol {r})\\right) \\tag {3}", + "image_path": "1710c388349faaf16d40c38b51c18651e79ba2270b9c3bd280911e1ba894f121.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 673, + 545, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 673, + 545, + 716 + ], + "spans": [ + { + "bbox": [ + 305, + 673, + 545, + 716 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 673, + 545, + 716 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text{photo}}(\\boldsymbol{r}) = \\left\\| \\hat{\\boldsymbol{C}}(\\boldsymbol{r}) - \\boldsymbol{C}(\\boldsymbol{r}) \\right\\|^2" + }, + { + "bbox": [ + 305, + 673, + 545, + 716 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 673, + 545, + 716 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text{depth}}(\\boldsymbol{r}) = \\left\\| \\hat{\\mathbf{D}}_{\\mathbf{r}}^*(\\boldsymbol{r}) - \\mathbf{D}^*(\\boldsymbol{r}) \\right\\|^2" + }, + { + "bbox": [ + 305, + 673, + 545, + 716 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 673, + 545, + 716 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 305, + 673, + 545, + 716 + ], + "type": "text", + "content": " denotes the collection of rays gen-" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 732, + 317, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 732, + 317, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 732, + 317, + 742 + ], + "type": "text", + "content": "10173" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "content": "erated from the images in the supporting database, " + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "content": " indicates the balance weight of the depth loss, and " + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "inline_equation", + "content": "\\mathbf{D}^{*}(\\boldsymbol{r})" + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{D}}_{\\mathbf{r}}^{*}(\\boldsymbol{r})" + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "content": " denote the rendered depth and the depth obtained from the pre-trained depth estimation model. Since monocular depths are not scale- and shift-invariant, both depths are normalized per frame." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 152, + 228, + 164 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 152, + 228, + 164 + ], + "spans": [ + { + "bbox": [ + 47, + 152, + 228, + 164 + ], + "type": "text", + "content": "4.4. 3D-Aware Generative Refinement" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 171, + 287, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 171, + 287, + 350 + ], + "spans": [ + { + "bbox": [ + 46, + 171, + 287, + 350 + ], + "type": "text", + "content": "Given a sequence of poses and an initial viewpoint, previous methods [12, 16, 66] usually generate novel views by the warping-inpainting pipeline. Though these methods have achieved promising results, they suffer from two issues: (1) The lack of rectification mechanisms in these methods can lead to error accumulation. (2) The lack of explicit 3D information during the inpainting process of these methods can lead to insufficient 3D consistency. Therefore, we propose a 3D-Aware Generative Refinement model to alleviate the above issues. On the one hand, we introduce an efficient refinement mechanism to reduce the cumulative error in the novel view generation. On the other hand, we explicitly inject 3D information during the process of generating novel views to enhance 3D consistency. We will describe the model design below." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "spans": [ + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "text", + "content": "Model Design. Given a novel viewpoint with camera pose " + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{T}_i" + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "text", + "content": ", the tri-planar features " + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "text", + "content": ", we can obtain the rendered image " + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_r" + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "text", + "content": ", rendered depth " + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_r" + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "text", + "content": " and the corresponding 2D feature map " + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_r" + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "text", + "content": " via the radiance field decoder module " + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "text", + "content": " and volume rendering. For convenience, we model the whole process with a mapping operator " + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{ren}:\\{\\mathbf{T}_i,\\mathbf{M}\\} \\mapsto \\{\\mathbf{I}_r,\\mathbf{F}_r,\\mathbf{D}_r\\}" + }, + { + "bbox": [ + 47, + 350, + 287, + 458 + ], + "type": "text", + "content": ". Note that the feature map is computed similarly to the color and depth, i.e., by numerical quadrature, and can be formulated as" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 90, + 462, + 287, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 462, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 90, + 462, + 287, + 495 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {r} (\\mathbf {r}) = \\sum_ {i = 1} ^ {N} T _ {i} \\left(1 - \\exp \\left(- \\sigma_ {i} \\delta_ {i}\\right)\\right) \\boldsymbol {g} _ {i} \\tag {4}", + "image_path": "300811600447a9e74f6f188413a2694dcfb6756aa77952136dd684769b7e50cc.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 500, + 286, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 500, + 286, + 525 + ], + "spans": [ + { + "bbox": [ + 46, + 500, + 286, + 525 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 500, + 286, + 525 + ], + "type": "inline_equation", + "content": "\\pmb{g}_i" + }, + { + "bbox": [ + 46, + 500, + 286, + 525 + ], + "type": "text", + "content": " indicates the feature vector decoded by " + }, + { + "bbox": [ + 46, + 500, + 286, + 525 + ], + "type": "inline_equation", + "content": "f_{g}" + }, + { + "bbox": [ + 46, + 500, + 286, + 525 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 500, + 286, + 525 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 500, + 286, + 525 + ], + "type": "text", + "content": " denotes the total number of point samples on the ray " + }, + { + "bbox": [ + 46, + 500, + 286, + 525 + ], + "type": "inline_equation", + "content": "\\pmb{r}" + }, + { + "bbox": [ + 46, + 500, + 286, + 525 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 525, + 286, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 525, + 286, + 632 + ], + "spans": [ + { + "bbox": [ + 46, + 525, + 286, + 632 + ], + "type": "text", + "content": "Although the quality of the rendered coarse results may not be very high, they can still provide reasonable guidance for the extrapolated view generation according to the current scene. Based on this assumption, we propose to take the rendered image and the feature map as conditional inputs to a pre-trained 2D stable diffusion model and generate a refined synthetic image " + }, + { + "bbox": [ + 46, + 525, + 286, + 632 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_r" + }, + { + "bbox": [ + 46, + 525, + 286, + 632 + ], + "type": "text", + "content": " via fine-tuning the model, which allows to leverage natural image priors derived from internet-scale data. The process can be formulated as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 108, + 642, + 286, + 657 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 642, + 286, + 657 + ], + "spans": [ + { + "bbox": [ + 108, + 642, + 286, + 657 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {I}} _ {r} = \\mathcal {F} _ {g e n} (\\mathbf {I} _ {r}, \\tau (\\mathbf {p}), \\mathcal {G} (\\mathbf {F} _ {r})) \\tag {5}", + "image_path": "c4435b2222b6004895a94912052520ee55c459ed28b9a34d1bfeb2eaff7ebc2f.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{gen}" + }, + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "text", + "content": " denotes our generative refinement model, " + }, + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\tau (\\mathbf{p})" + }, + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "text", + "content": " indicates the input text embedding, and " + }, + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "text", + "content": " denotes the feature adapter for learning the mapping from external control information to the internal knowledge in LDM." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "content": "Scene-Adapted Diffusion Model Fine-Tuning. For the scene generation task, we propose to leverage the rich 2D priors in the pre-trained latent diffusion model instead of training a new model from scratch. Thus, we jointly train the feature adapter, the radiance field decoder, and the feature aggregation layer, while keeping the parameters of stable diffusion fixed. The objective of the fine-tuning process is shown below:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 172, + 545, + 192 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 172, + 545, + 192 + ], + "spans": [ + { + "bbox": [ + 313, + 172, + 545, + 192 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {A D} = \\mathbb {E} _ {t, \\epsilon \\sim \\mathcal {N} (0, I)} \\left[ \\| \\epsilon_ {\\theta} \\left(\\boldsymbol {z} _ {t}, t, \\tau (\\mathbf {p}), \\mathbf {F} _ {r}, \\mathbf {I} _ {r}\\right) - \\epsilon \\| _ {2} ^ {2} \\right] \\tag {6}", + "image_path": "f9348f5de37437c67f36861ac9d3070b0239b3114439070741784dc2de8a7a69.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 198, + 545, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 198, + 545, + 317 + ], + "spans": [ + { + "bbox": [ + 304, + 198, + 545, + 317 + ], + "type": "text", + "content": "With the rendered feature map " + }, + { + "bbox": [ + 304, + 198, + 545, + 317 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_r" + }, + { + "bbox": [ + 304, + 198, + 545, + 317 + ], + "type": "text", + "content": " containing information about the appearance and geometry, we can control the pre-trained text-to-image diffusion model to generate images that are consistent with the content of generated images from previous viewpoints. In addition, our model inherits the high-quality image generation ability of the stable diffusion model, which ensures the plausibility of the generated views. The pre-trained prior and our effective conditional adaptation enable our model to have generalization ability in novel scenes." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 318, + 545, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 318, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 304, + 318, + 545, + 497 + ], + "type": "text", + "content": "Global-Local Consistency Regularization. In the online generation process, though our model can rectify the coarse rendering results, we do not explicitly constrain the 3D consistency across views when synthesizing novel views. Therefore, we design a regularization term " + }, + { + "bbox": [ + 304, + 318, + 545, + 497 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text{cons}}" + }, + { + "bbox": [ + 304, + 318, + 545, + 497 + ], + "type": "text", + "content": " for test-time optimization, which shares the same formula as Eq. (6) to guarantee the plausibility of the generated novel views. Specifically, we expect that 3D consistency exists between novel views obtained from geometric projection using local geometric information (i.e., monocular depth estimation) and novel views generated using global geometric information (i.e., global tri-planar 3D representation). Thus, we simultaneously generate novel views based on the previous warping-and-inpainting pipeline and use them as supervisory signals to further fine-tune the feature adapter." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 503, + 486, + 515 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 503, + 486, + 515 + ], + "spans": [ + { + "bbox": [ + 305, + 503, + 486, + 515 + ], + "type": "text", + "content": "4.5. Online Scene Generation Process." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 522, + 545, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 545, + 569 + ], + "type": "text", + "content": "In this section, we introduce our online 3D scene generation process, which consists of three parts: scene representation initialization, extrapolation content synthesis, and incremental training strategy." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 570, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 689 + ], + "type": "text", + "content": "Scene Representation Initialization. Given the input textual prompt, we first generate an initial 2D image using a pre-trained stable diffusion model, after which we construct a supporting database " + }, + { + "bbox": [ + 304, + 570, + 545, + 689 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 570, + 545, + 689 + ], + "type": "text", + "content": " via the method mentioned in Sec. 4.2. Then, by exploiting the data from the database, as well as the photometric loss (Eq. (3)), we can optimize the unified representation. To prevent the model from overfitting to high-frequency details, we allow the model to learn low-frequency geometric information better by utilizing the depth priors. [60]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "Extrapolated Content Synthesis. To generate the extrapolated content, we proceed by retrieving the next pose, de" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 733, + 317, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 733, + 317, + 742 + ], + "spans": [ + { + "bbox": [ + 294, + 733, + 317, + 742 + ], + "type": "text", + "content": "10174" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "spans": [ + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "text", + "content": "noted as " + }, + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathbf{T}_i" + }, + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "text", + "content": ", from the pose sequence " + }, + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{T}_i\\}_{i=1}^N" + }, + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "text", + "content": ". We then employ volumetric rendering to obtain a coarse view of the current viewpoint and the corresponding feature map. These rendered outputs are used as conditional inputs to our generative refinement model " + }, + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{gen}" + }, + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "text", + "content": " for generating a refined view. Due to the presence of a generative refinement mechanism, our extrapolation method mitigates the effects of cumulative errors. The refined view from the model " + }, + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{gen}" + }, + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "text", + "content": " is subsequently added to the supporting database " + }, + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + }, + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "text", + "content": " as new content. Incremental Training Strategy. After obtaining the new content, we then need to update the unified representation. However, fine-tuning only on the newly generated data can lead to catastrophic forgetting, whereas fine-tuning on the entire dataset requires excessively long training time. Inspired by [54], we sample a sparse set of rays " + }, + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathcal{Q}" + }, + { + "bbox": [ + 46, + 71, + 289, + 277 + ], + "type": "text", + "content": " according to the information gain to optimize the representation, thus improving the efficiency of the incremental training." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 288, + 128, + 301 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 288, + 128, + 301 + ], + "spans": [ + { + "bbox": [ + 47, + 288, + 128, + 301 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 308, + 180, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 180, + 320 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 180, + 320 + ], + "type": "text", + "content": "5.1. Implementation details." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 326, + 289, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 326, + 289, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 326, + 289, + 483 + ], + "type": "text", + "content": "We implemented our system using PyTorch. For the differentiable rendering part, we utilized [13] for depth estimation. To avoid the occurrence of black holes, we referred to the implementation in [18] to generate surrounding views. For the text-guided image generation, we use the publicly available stable diffusion code from Diffusers [58]. For the multi-view consistency image generation, we refer to the implementation of T2I-Adapter [39] to inject the depth feature conditions. In the progressive NeRF reconstruction part, we refer to the tri-planar implementation in [6]. We conducted all experiments using 4 NVIDIA RTX A100 GPUs for training and inference. More details can be found in our supplementary material." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 491, + 161, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 491, + 161, + 502 + ], + "spans": [ + { + "bbox": [ + 47, + 491, + 161, + 502 + ], + "type": "text", + "content": "5.2. Evaluation metrics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 510, + 287, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 287, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 287, + 641 + ], + "type": "text", + "content": "Image quality. We evaluate the quality of our generated images using CLIP Score (CS), Inception Score (IS), Blind/Referenceless Image Spatial Quality Evaluator (BRISQUE) [36] and Natural Image Quality Evaluator (NQIE) [37]. The Inception Score is based on the diversity and predictability of the generated images. CLIP Score uses a pre-trained CLIP model [43] to measure the similarity between text and images. Note that existing visual quality metrics such as FID cannot be used since the scenes generated by text-to-3D approaches do not exhibit the same underlying data distribution." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": "Multiview Consistency. Given a sequence of rendered images, we evaluate the multi-view consistency of our generated scene using Camera Error (CE), Depth Error (DE), and flow-warping error (FE) metrics. Motivated by [10, 12], we use COLMAP [50], a reliable SfM technique, to compute the camera trajectory and the sparse 3D point cloud. CE" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 307, + 70, + 545, + 137 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 545, + 137 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 545, + 137 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 545, + 137 + ], + "type": "table", + "html": "
Method3D Representation3D ConsistencyVisual Quality
DE↓CE↓SFM rate↑CS↑BRISQUE↓NIQE↓IS↑
Inf-Zero [26]--1.1890.38-21.435.852.34
3DP [52]LDI&Mesh0.420.9650.47-29.955.841.75
PixelSynth [47]Point Cloud0.360.7320.52-36.744.981.28
ProlificDreamer [62]NeRF---23.4127.976.751.21
Text2Room [16]Mesh0.240.4260.6328.1528.375.462.19
Scenescape [12]Mesh0.180.3940.7628.8424.544.782.23
OursNeRF0.130.1760.8929.9723.644.662.62
", + "image_path": "f6f87e370fc29a3f1345f2eba7d37bd8c1cd2cd8a6adc3168fea476987663269.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 307, + 202, + 545, + 255 + ], + "blocks": [ + { + "bbox": [ + 304, + 141, + 547, + 196 + ], + "lines": [ + { + "bbox": [ + 304, + 141, + 547, + 196 + ], + "spans": [ + { + "bbox": [ + 304, + 141, + 547, + 196 + ], + "type": "text", + "content": "Table 1. Comparison with text-to-scene methods. We compare our approach with two categories of approaches, i.e., pure text-driven 3D generation and text-to-image generation followed by 3D scene generation. Metrics on 3D consistency and visual quality are illustrated." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 202, + 545, + 255 + ], + "lines": [ + { + "bbox": [ + 307, + 202, + 545, + 255 + ], + "spans": [ + { + "bbox": [ + 307, + 202, + 545, + 255 + ], + "type": "table", + "html": "
MethodFE↓CS↑BRISQUE↓NIQE↓IS↑
VideoFusion [32]0.03923.5427.395.942.21
GEN-2 [49]0.03227.5425.655.242.38
Ours0.02829.9523.534.702.69
", + "image_path": "713867d21cf0bdcf69f8c4a91158d44ffd34d9a7bc5b81c55262c784f000c8d4.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 307, + 285, + 545, + 342 + ], + "blocks": [ + { + "bbox": [ + 306, + 257, + 545, + 280 + ], + "lines": [ + { + "bbox": [ + 306, + 257, + 545, + 280 + ], + "spans": [ + { + "bbox": [ + 306, + 257, + 545, + 280 + ], + "type": "text", + "content": "Table 2. Comparison with text-to-video methods. Metrics on flow warping error (FE) and visual quality are illustrated." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 285, + 545, + 342 + ], + "lines": [ + { + "bbox": [ + 307, + 285, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 285, + 545, + 342 + ], + "type": "table", + "html": "
MethodCS↑BRISQUE↓NIQE↓IS↑
Text2Light [20]26.1649.266.152.54
MVDiffusion [42]27.2531.545.472.76
Ours28.1224.154.962.79
", + "image_path": "009b812cea4e43480f12c4f6e01d422cdbca0c96abfcfcf9ab39c80082cff278.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 307, + 385, + 545, + 441 + ], + "blocks": [ + { + "bbox": [ + 304, + 346, + 545, + 380 + ], + "lines": [ + { + "bbox": [ + 304, + 346, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 304, + 346, + 545, + 380 + ], + "type": "text", + "content": "Table 3. Comparison with text-to-panorama methods. We compare our method with recent text-driven 3D generation methods [9, 55]. Metrics on visual quality are illustrated." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 385, + 545, + 441 + ], + "lines": [ + { + "bbox": [ + 307, + 385, + 545, + 441 + ], + "spans": [ + { + "bbox": [ + 307, + 385, + 545, + 441 + ], + "type": "table", + "html": "
MethodDE↓CE↓SfM rate↑CS↑BRISQUE↓NIQE↓
Full Model0.130.1760.8929.9726.186.54
W/o UR0.460.7640.4122.7127.955.81
W/o GRM0.590.9810.4622.1229.645.75
W/o CR0.190.2540.7828.1427.166.12
", + "image_path": "d4e07cd0ec7f45988785207baf82d772475547aa7c4562590bf0176ea833233e.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 444, + 545, + 478 + ], + "lines": [ + { + "bbox": [ + 304, + 444, + 545, + 478 + ], + "spans": [ + { + "bbox": [ + 304, + 444, + 545, + 478 + ], + "type": "text", + "content": "Table 4. Ablations. For brevity, we use UR, GRM, CR to denote Unified Representation, Generative Refinement Model and Consistency Regularization, respectively." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 482, + 545, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 482, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 304, + 482, + 545, + 555 + ], + "type": "text", + "content": "is computed by comparing the difference between the predicted trajectory and the given trajectory, and DE is computed by comparing the difference between the sparse depth map obtained by COLMAP and the estimated depth map. In addition, to account for temporal consistency, we follow [23] and use RAFT [56] to compute FE." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 562, + 391, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 562, + 391, + 575 + ], + "spans": [ + { + "bbox": [ + 306, + 562, + 391, + 575 + ], + "type": "text", + "content": "5.3. Comparisons" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 581, + 547, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 581, + 547, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 581, + 547, + 640 + ], + "type": "text", + "content": "Baselines. Since there are only a few baselines directly related to our approach, we also take into account some methods with similar capabilities and construct their variants for comparison. Specifically, the following three categories of methods are included:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 641, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 641, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 641, + 547, + 713 + ], + "type": "text", + "content": "- Text-to-Scene. There exist techniques [12, 16] that generate 3D meshes iteratively by employing warping and inpainting processes, allowing for direct comparisons with our proposed methods. Moreover, image-guided 3D generation methods [24, 26, 47] are also available, wherein initial images can be produced using a T2I model. Subse" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "type": "text", + "content": "10175" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 77, + 123, + 139 + ], + "blocks": [ + { + "bbox": [ + 61, + 77, + 123, + 139 + ], + "lines": [ + { + "bbox": [ + 61, + 77, + 123, + 139 + ], + "spans": [ + { + "bbox": [ + 61, + 77, + 123, + 139 + ], + "type": "image", + "image_path": "65ed0448407265cbc362a5b1d0cd09f28ab146347d16340e3909089a5ec239c9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 129, + 78, + 192, + 139 + ], + "blocks": [ + { + "bbox": [ + 129, + 78, + 192, + 139 + ], + "lines": [ + { + "bbox": [ + 129, + 78, + 192, + 139 + ], + "spans": [ + { + "bbox": [ + 129, + 78, + 192, + 139 + ], + "type": "image", + "image_path": "2e9b0df492708e819cefd86a4f8484c48808126114dcb5ca4910e718367bcd8f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 198, + 78, + 260, + 139 + ], + "blocks": [ + { + "bbox": [ + 198, + 78, + 260, + 139 + ], + "lines": [ + { + "bbox": [ + 198, + 78, + 260, + 139 + ], + "spans": [ + { + "bbox": [ + 198, + 78, + 260, + 139 + ], + "type": "image", + "image_path": "365299417889dd154f217046dee628d0d66cf75c41ec0f4447407f908f38a486.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 266, + 78, + 328, + 139 + ], + "blocks": [ + { + "bbox": [ + 266, + 78, + 328, + 139 + ], + "lines": [ + { + "bbox": [ + 266, + 78, + 328, + 139 + ], + "spans": [ + { + "bbox": [ + 266, + 78, + 328, + 139 + ], + "type": "image", + "image_path": "d79b069678db8c93d7181e7cb2c145a3f8589fabb2d18f5ab3fc2d06f25ef2b0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 334, + 78, + 396, + 139 + ], + "blocks": [ + { + "bbox": [ + 334, + 78, + 396, + 139 + ], + "lines": [ + { + "bbox": [ + 334, + 78, + 396, + 139 + ], + "spans": [ + { + "bbox": [ + 334, + 78, + 396, + 139 + ], + "type": "image", + "image_path": "a6bc8fc880685fc0f368de228ad5512f56cb16bfbc550c7bbf90488a79d09577.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 403, + 78, + 465, + 139 + ], + "blocks": [ + { + "bbox": [ + 403, + 78, + 465, + 139 + ], + "lines": [ + { + "bbox": [ + 403, + 78, + 465, + 139 + ], + "spans": [ + { + "bbox": [ + 403, + 78, + 465, + 139 + ], + "type": "image", + "image_path": "ed90cb84fa4ecc1653c5bcc660d88f963d3ea3564d14615d91a0001a29099d11.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 471, + 78, + 533, + 139 + ], + "blocks": [ + { + "bbox": [ + 471, + 78, + 533, + 139 + ], + "lines": [ + { + "bbox": [ + 471, + 78, + 533, + 139 + ], + "spans": [ + { + "bbox": [ + 471, + 78, + 533, + 139 + ], + "type": "image", + "image_path": "b48aff05088edfbd8a8d513eb3d4b1b6934f69667e0485c159279dfdeb5b8756.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 61, + 150, + 123, + 210 + ], + "blocks": [ + { + "bbox": [ + 121, + 140, + 475, + 150 + ], + "lines": [ + { + "bbox": [ + 121, + 140, + 475, + 150 + ], + "spans": [ + { + "bbox": [ + 121, + 140, + 475, + 150 + ], + "type": "text", + "content": "POV, A versatile room with a sofa as the centerpiece, a bookshelf, a lamp, a desk TV, masterpiece" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 61, + 150, + 123, + 210 + ], + "lines": [ + { + "bbox": [ + 61, + 150, + 123, + 210 + ], + "spans": [ + { + "bbox": [ + 61, + 150, + 123, + 210 + ], + "type": "image", + "image_path": "a0e445f9c89f8e06600d05fe4f47d529595356557807f92ecfec7035da14fdc6.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 129, + 150, + 191, + 210 + ], + "blocks": [ + { + "bbox": [ + 129, + 150, + 191, + 210 + ], + "lines": [ + { + "bbox": [ + 129, + 150, + 191, + 210 + ], + "spans": [ + { + "bbox": [ + 129, + 150, + 191, + 210 + ], + "type": "image", + "image_path": "bf3993ff869f02f6c8f0a1947b7fc58dbedc630c6ead31f6bc2152b785782462.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 198, + 150, + 260, + 210 + ], + "blocks": [ + { + "bbox": [ + 198, + 150, + 260, + 210 + ], + "lines": [ + { + "bbox": [ + 198, + 150, + 260, + 210 + ], + "spans": [ + { + "bbox": [ + 198, + 150, + 260, + 210 + ], + "type": "image", + "image_path": "05d1187a6b7b0854d933452ba99d28d7fee44346e266b26ed0f7d104298a1d6d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 266, + 150, + 327, + 210 + ], + "blocks": [ + { + "bbox": [ + 266, + 150, + 327, + 210 + ], + "lines": [ + { + "bbox": [ + 266, + 150, + 327, + 210 + ], + "spans": [ + { + "bbox": [ + 266, + 150, + 327, + 210 + ], + "type": "image", + "image_path": "7b8c450ac0456d6d93dec9fef4b5fd7ffcb62717db7e6cec41b56c9884f5c1e9.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 334, + 150, + 395, + 210 + ], + "blocks": [ + { + "bbox": [ + 334, + 150, + 395, + 210 + ], + "lines": [ + { + "bbox": [ + 334, + 150, + 395, + 210 + ], + "spans": [ + { + "bbox": [ + 334, + 150, + 395, + 210 + ], + "type": "image", + "image_path": "c282522fcfa6e7d542dc9868b8aea235526e12e6b0c12d6bb958ae2eb274d308.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 403, + 150, + 464, + 210 + ], + "blocks": [ + { + "bbox": [ + 403, + 150, + 464, + 210 + ], + "lines": [ + { + "bbox": [ + 403, + 150, + 464, + 210 + ], + "spans": [ + { + "bbox": [ + 403, + 150, + 464, + 210 + ], + "type": "image", + "image_path": "f6aa10bd642ba5905a67874a4423ddbc52fc6d4560d0d7e75322a68bf5c6400d.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 471, + 150, + 533, + 210 + ], + "blocks": [ + { + "bbox": [ + 471, + 150, + 533, + 210 + ], + "lines": [ + { + "bbox": [ + 471, + 150, + 533, + 210 + ], + "spans": [ + { + "bbox": [ + 471, + 150, + 533, + 210 + ], + "type": "image", + "image_path": "1958f17e5af93d8ae0472ae81108fc72a8aad031ead136868c78114901c80134.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 61, + 219, + 122, + 281 + ], + "blocks": [ + { + "bbox": [ + 61, + 219, + 122, + 281 + ], + "lines": [ + { + "bbox": [ + 61, + 219, + 122, + 281 + ], + "spans": [ + { + "bbox": [ + 61, + 219, + 122, + 281 + ], + "type": "image", + "image_path": "bf30f6742ceff9037e1b46598b0e315e236e4acc294c6f4dc0a20cefdc484093.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 293, + 545, + 316 + ], + "lines": [ + { + "bbox": [ + 46, + 293, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 46, + 293, + 545, + 316 + ], + "type": "text", + "content": "Figure 4. Quantitative Results. From our results, it can be seen that our approach produces high-fidelity scenes with stable 3D consistency in indoor scenes, outdoor scenes, and unreal-style scenes. More high-resolution results can be found in the supplementary material." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 129, + 219, + 191, + 281 + ], + "blocks": [ + { + "bbox": [ + 129, + 219, + 191, + 281 + ], + "lines": [ + { + "bbox": [ + 129, + 219, + 191, + 281 + ], + "spans": [ + { + "bbox": [ + 129, + 219, + 191, + 281 + ], + "type": "image", + "image_path": "6f022ac78801c86286c6d5fa2e28a02faf746d8362275ddc961bcedcfea4883c.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 162, + 281, + 434, + 290 + ], + "lines": [ + { + "bbox": [ + 162, + 281, + 434, + 290 + ], + "spans": [ + { + "bbox": [ + 162, + 281, + 434, + 290 + ], + "type": "text", + "content": "POV, walking through a palace in fantasy style, master piece, indoor scene" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 197, + 219, + 259, + 281 + ], + "blocks": [ + { + "bbox": [ + 197, + 219, + 259, + 281 + ], + "lines": [ + { + "bbox": [ + 197, + 219, + 259, + 281 + ], + "spans": [ + { + "bbox": [ + 197, + 219, + 259, + 281 + ], + "type": "image", + "image_path": "97864f2f12fc7b955f01f560c67066562bc30650692f1944bdb8312c61a318f3.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 265, + 219, + 327, + 281 + ], + "blocks": [ + { + "bbox": [ + 265, + 219, + 327, + 281 + ], + "lines": [ + { + "bbox": [ + 265, + 219, + 327, + 281 + ], + "spans": [ + { + "bbox": [ + 265, + 219, + 327, + 281 + ], + "type": "image", + "image_path": "ec12fe1703a1a1b92b32f68b7da4e70f8caa128c26b06db6df9cb968395d050e.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 334, + 219, + 396, + 281 + ], + "blocks": [ + { + "bbox": [ + 334, + 219, + 396, + 281 + ], + "lines": [ + { + "bbox": [ + 334, + 219, + 396, + 281 + ], + "spans": [ + { + "bbox": [ + 334, + 219, + 396, + 281 + ], + "type": "image", + "image_path": "03053ae95128d28fc9a5d7d30573809ee2383b3141ee16dee17bbf3216dde2e7.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 403, + 219, + 464, + 281 + ], + "blocks": [ + { + "bbox": [ + 403, + 219, + 464, + 281 + ], + "lines": [ + { + "bbox": [ + 403, + 219, + 464, + 281 + ], + "spans": [ + { + "bbox": [ + 403, + 219, + 464, + 281 + ], + "type": "image", + "image_path": "082e34233df5852860772145312becc712814e5891d4c9da63d67c588ccce236.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 471, + 219, + 533, + 279 + ], + "blocks": [ + { + "bbox": [ + 471, + 219, + 533, + 279 + ], + "lines": [ + { + "bbox": [ + 471, + 219, + 533, + 279 + ], + "spans": [ + { + "bbox": [ + 471, + 219, + 533, + 279 + ], + "type": "image", + "image_path": "3e39023510c67838f8fa7e48c99114f4b4298baeecf82532dc7abefa60e66bd2.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 56, + 354, + 288, + 516 + ], + "blocks": [ + { + "bbox": [ + 53, + 326, + 293, + 352 + ], + "lines": [ + { + "bbox": [ + 53, + 326, + 293, + 352 + ], + "spans": [ + { + "bbox": [ + 53, + 326, + 293, + 352 + ], + "type": "text", + "content": "This kitchen is a charming blend of rustic and modern, featuring a large reclaimed wood island with marble countertop, a sink surrounded by cabinets. The left of the island, a stainless-steel refrigerator stands tall. The of the sink, built-in wooden cabinets painted in a muted." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 56, + 354, + 288, + 516 + ], + "lines": [ + { + "bbox": [ + 56, + 354, + 288, + 516 + ], + "spans": [ + { + "bbox": [ + 56, + 354, + 288, + 516 + ], + "type": "image", + "image_path": "f61e7435d707adccc16cbef184f9c1ad7bf5dd4fee74337e379b6d77459aded0.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 529, + 287, + 562 + ], + "lines": [ + { + "bbox": [ + 46, + 529, + 287, + 562 + ], + "spans": [ + { + "bbox": [ + 46, + 529, + 287, + 562 + ], + "type": "text", + "content": "Figure 5. Comparison with text-to-panorama methods. It can be seen that although our method is not trained on panoramic data, it can also generate multiple views with cross-view consistency." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "bbox": [ + 55, + 570, + 287, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 287, + 617 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 287, + 617 + ], + "type": "text", + "content": "sequently, their pipeline can be used to generate 3D scenes, enabling a comparison against our approach. We comprehensively evaluate these methods based on the previously introduced 3D consistency and visual quality metrics." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 47, + 618, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 47, + 618, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 618, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 618, + 287, + 689 + ], + "type": "text", + "content": "- Text-to-Video. Some recent text-driven video generation methods [32, 49] can also generate similar 3D scene walkthrough videos. Since it is not supported to explicitly control the camera motion in the video generation methods, we only evaluated them in terms of visual quality and temporal consistency." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "- Text-to-Panorama. This task generates perspective images covering the panoramic field of view, which is chal" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 319, + 328, + 542, + 506 + ], + "blocks": [ + { + "bbox": [ + 319, + 328, + 542, + 506 + ], + "lines": [ + { + "bbox": [ + 319, + 328, + 542, + 506 + ], + "spans": [ + { + "bbox": [ + 319, + 328, + 542, + 506 + ], + "type": "image", + "image_path": "e87497fc5613b9ba5f6494f53e7551cd1236821982d1e48aac089fc42fbcd20b.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 519, + 545, + 552 + ], + "lines": [ + { + "bbox": [ + 306, + 519, + 545, + 552 + ], + "spans": [ + { + "bbox": [ + 306, + 519, + 545, + 552 + ], + "type": "text", + "content": "Figure 6. Comparison with text-to-video methods. Blur artifacts and temporally inconsistent frames occur in the text-to-video methods because of the lack of global 3D representation." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + }, + { + "bbox": [ + 313, + 558, + 545, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 558, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 313, + 558, + 545, + 592 + ], + "type": "text", + "content": "lenging to ensure consistency in the overlapping regions. We have selected two related methods [9, 55] for comparisons." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 304, + 594, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 546, + 713 + ], + "type": "text", + "content": "Comparison to Text-to-Scene Methods. To generate the scenes, we use a set of test-specific prompts covering descriptions of indoor, outdoor and unreal scenes. Each prompt generates an image sequence of 100 frames, and for a fair comparison, we set a fixed random seed. After that, we compute the metrics proposed in Sec. 5.2 on the generated image sequences and evaluate the effectiveness of the method. As shown in Tab. 1, our method outperforms the mesh-based iterative generation methods in several metrics, especially for outdoor scenes. The quality of" + } + ] + } + ], + "index": 34 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 734, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 734, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 294, + 734, + 318, + 742 + ], + "type": "text", + "content": "10176" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 101, + 156, + 177 + ], + "blocks": [ + { + "bbox": [ + 63, + 75, + 160, + 100 + ], + "lines": [ + { + "bbox": [ + 63, + 75, + 160, + 100 + ], + "spans": [ + { + "bbox": [ + 63, + 75, + 160, + 100 + ], + "type": "text", + "content": "POV, walkthrough a damp, stone corridor, beautiful photo, masterpiece, indoor scene" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 60, + 101, + 156, + 177 + ], + "lines": [ + { + "bbox": [ + 60, + 101, + 156, + 177 + ], + "spans": [ + { + "bbox": [ + 60, + 101, + 156, + 177 + ], + "type": "image", + "image_path": "3ecab3ee9329b10f8d76102279f32ad20c8bd5d50468d06deee087f69225d82f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 70, + 179, + 141, + 188 + ], + "lines": [ + { + "bbox": [ + 70, + 179, + 141, + 188 + ], + "spans": [ + { + "bbox": [ + 70, + 179, + 141, + 188 + ], + "type": "text", + "content": "(a) Extracted Mesh" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 164, + 102, + 277, + 178 + ], + "blocks": [ + { + "bbox": [ + 168, + 75, + 270, + 101 + ], + "lines": [ + { + "bbox": [ + 168, + 75, + 270, + 101 + ], + "spans": [ + { + "bbox": [ + 168, + 75, + 270, + 101 + ], + "type": "text", + "content": "Walkthrough a road, trees, beautiful photo, best quality, masterpiece, indoor scene" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 164, + 102, + 277, + 178 + ], + "lines": [ + { + "bbox": [ + 164, + 102, + 277, + 178 + ], + "spans": [ + { + "bbox": [ + 164, + 102, + 277, + 178 + ], + "type": "image", + "image_path": "97f0ddc7e3a35d86b1280d5f1d41a654b413e917d213630e66e2d8fb2ba9f4b7.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 192, + 179, + 249, + 189 + ], + "lines": [ + { + "bbox": [ + 192, + 179, + 249, + 189 + ], + "spans": [ + { + "bbox": [ + 192, + 179, + 249, + 189 + ], + "type": "text", + "content": "(b) Point Cloud" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 189, + 286, + 243 + ], + "lines": [ + { + "bbox": [ + 46, + 189, + 286, + 243 + ], + "spans": [ + { + "bbox": [ + 46, + 189, + 286, + 243 + ], + "type": "text", + "content": "Figure 7. Reconstructed 3D Results. (a) The 3D mesh extracted by marching cube algorithm, and (b) the point cloud obtained after the reconstruction using COLMAP [31]. Our reconstruction results show that our methods can generate scenes with satisfactory 3D consistency." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 245, + 286, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 245, + 286, + 352 + ], + "spans": [ + { + "bbox": [ + 46, + 245, + 286, + 352 + ], + "type": "text", + "content": "their generation results relies heavily on the generative and geometric prior and degrades over time due to error accumulation. In addition, their use of a mesh to represent the scene makes it difficult to represent intense depth discontinuities, which are common in outdoor scenes. Our method, on the other hand, adopts hybrid NeRF as the scene representation, which can cope with complex scenes, and our rectification mechanism can mitigate the effect of accumulated errors caused by inaccurate prior signals." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 353, + 286, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 353, + 286, + 495 + ], + "spans": [ + { + "bbox": [ + 46, + 353, + 286, + 495 + ], + "type": "text", + "content": "Comparison to Text-to-Video Methods. For comparison with the text-to-video model, we used the same collection of prompts as input to the model and generated 1,200 video clips. We used the same metrics to evaluate the 3D consistency and visual quality of the videos generated by the T2V model and our rendered videos. As shown in Tab. 2, our method significantly outperforms the T2V model on all metrics, proving the effectiveness of our method. The T2V model learns geometry and appearance prior by training on a large video dataset, but it lacks a unified 3D representation, making it difficult to ensure multi-view consistency of the generated content, as can be observed Fig. 6." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 496, + 286, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 496, + 286, + 590 + ], + "spans": [ + { + "bbox": [ + 46, + 496, + 286, + 590 + ], + "type": "text", + "content": "Comparison to Text-to-Panorama Methods. We evaluate the methods [9, 55] on visual quality. Tab. 3 and Fig. 5 present the quantitative and qualitative evaluations, respectively. From the results, it can be seen that the results of previous methods can be inconsistent at the left and right boundaries, while our method, although not specifically designed for panorama generation, produces multiple views with cross-view consistency." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 592, + 286, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 592, + 286, + 662 + ], + "spans": [ + { + "bbox": [ + 46, + 592, + 286, + 662 + ], + "type": "text", + "content": "3D Results. In Fig. 7, we show the 3D results reconstructed by our method. The 3D mesh is extracted by the marching cube algorithm [31]. Additionally, we can reconstruct high-quality point clouds using colmap [50] by inputting the rendered image collection, which further demonstrates the superior 3D consistency of the generated view results." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 671, + 140, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 671, + 140, + 683 + ], + "spans": [ + { + "bbox": [ + 47, + 671, + 140, + 683 + ], + "type": "text", + "content": "5.4. Ablation Study" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 689, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 286, + 712 + ], + "type": "text", + "content": "To further analyze the proposed methodology, we performed several ablation studies to evaluate the effectiveness" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "content": "of each module. More ablation studies can be found in our supplementary material." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 96, + 545, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 96, + 545, + 179 + ], + "spans": [ + { + "bbox": [ + 305, + 96, + 545, + 179 + ], + "type": "text", + "content": "Effectiveness of Unified Representations. To validate our necessity to construct a unified 3D representation, we remove it from our pipeline. At this time, our approach degenerates to the previous paradigm of warping-inpainting. As shown in Tab. 4, the quality of the generated scenes degrades in DE and CE metrics due to the lack of global 3D consistency constraints." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 180, + 545, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 180, + 545, + 335 + ], + "spans": [ + { + "bbox": [ + 304, + 180, + 545, + 335 + ], + "type": "text", + "content": "Effectiveness of Generative Refinement. To validate the effectiveness of our proposed generative refinement, we ablate the modules in our approach, whereby the novel view obtained through volume rendering will be updated directly into the supporting database for subsequent incremental training. The results in Tab. 4 show that this can lead to a significant degradation in the quality of the generated scene. We argue that the reason for this is that the quality of novel views generated by NeRF training on sparse views tends to be inferior, with notable blurring and artifacts. Therefore, adding this data for optimizing 3D scenes would lead to continuous degradation of the quality of the generated scenes." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 337, + 545, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 337, + 545, + 468 + ], + "spans": [ + { + "bbox": [ + 304, + 337, + 545, + 468 + ], + "type": "text", + "content": "Effectiveness of Consistency Regularization. To verify the validity of our regularization loss, we ablate this loss and generate scenes to compute the relevant metrics. As shown in Tab. 4, adding this loss further improves the 3D consistency of the generated scenes. Though we explicitly inject 3D information into the refining process, its output still shows some inconsistent results in several scenes. Therefore, to further improve the quality of the generated new views, we perform test-time optimization through this regularization term to constrain the consistency between local and global representations." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 480, + 378, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 480, + 378, + 493 + ], + "spans": [ + { + "bbox": [ + 306, + 480, + 378, + 493 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 501, + 545, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 501, + 545, + 632 + ], + "spans": [ + { + "bbox": [ + 304, + 501, + 545, + 632 + ], + "type": "text", + "content": "This paper presents a new framework, which employs the tri-planar feature-based neural radiation field as a unified 3D representation and provides a unified solution for text-driven indoor and outdoor scene generation and the output supports navigation with arbitrary camera trajectories. Our method fine-tunes a scene-adapted diffusion model to correct the generated new content to mitigate the effect of cumulative errors while synthesizing extrapolated content. Experimental results show that our method can produce results with better visual quality and 3D consistency compared to previous methods." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 645, + 420, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 645, + 420, + 658 + ], + "spans": [ + { + "bbox": [ + 306, + 645, + 420, + 658 + ], + "type": "text", + "content": "7. Acknowledgements" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 665, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 665, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 665, + 545, + 712 + ], + "type": "text", + "content": "This research was supported by Zhejiang Provincial Natural Science Foundation of China under Grant No. LD24F020007, National Natural Science Foundation of China (No. 62202199), and NSFC (no. 62302491)." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 733, + 317, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 733, + 317, + 742 + ], + "spans": [ + { + "bbox": [ + 294, + 733, + 317, + 742 + ], + "type": "text", + "content": "10177" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Yogesh Balaji, Martin Renqiang Min, Bing Bai, Rama Chellappa, and Hans Peter Graf. Conditional GAN with discriminative filter generation for text-to-video synthesis. In Proceedings of the International Joint Conference on Artificial Intelligence, pages 1995-2001, 2019. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 147, + 288, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 147, + 288, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 147, + 288, + 190 + ], + "type": "text", + "content": "[2] Shariq Farooq Bhat, Reiner Birkl, Diana Wofk, Peter Wonka, and Matthias Müller. Zoedepth: Zero-shot transfer by combining relative and metric depth. arXiv preprint arXiv:2302.12288, 2023. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 191, + 288, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 191, + 288, + 256 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 288, + 256 + ], + "type": "text", + "content": "[3] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22563-22575, 2023. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 258, + 288, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 288, + 312 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 288, + 312 + ], + "type": "text", + "content": "[4] Shengqu Cai, Eric Ryan Chan, Songyou Peng, Mohamad Shahbazi, Anton Obukhov, Luc Van Gool, and Gordon Wetzstein. Diffdreamer: Consistent single-view perpetual view generation with conditional diffusion models. arXiv preprint arXiv:2211.12131, 2022. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 313, + 288, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 313, + 288, + 368 + ], + "spans": [ + { + "bbox": [ + 53, + 313, + 288, + 368 + ], + "type": "text", + "content": "[5] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5799-5809, 2021. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 369, + 288, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 369, + 288, + 434 + ], + "spans": [ + { + "bbox": [ + 53, + 369, + 288, + 434 + ], + "type": "text", + "content": "[6] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 435, + 288, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 435, + 288, + 490 + ], + "spans": [ + { + "bbox": [ + 53, + 435, + 288, + 490 + ], + "type": "text", + "content": "[7] Eric R Chan, Koki Nagano, Matthew A Chan, Alexander W Bergman, Jeong Joon Park, Axel Levy, Miika Aittala, Shalini De Mello, Tero Karras, and Gordon Wetzstein. Generative novel view synthesis with 3d-aware diffusion models. arXiv preprint arXiv:2304.02602, 2023. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 491, + 288, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 491, + 288, + 544 + ], + "spans": [ + { + "bbox": [ + 53, + 491, + 288, + 544 + ], + "type": "text", + "content": "[8] Rui Chen, Yongwei Chen, Ningxin Jiao, and Kui Jia. Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 546, + 288, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 546, + 288, + 578 + ], + "spans": [ + { + "bbox": [ + 53, + 546, + 288, + 578 + ], + "type": "text", + "content": "[9] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG), 41(6):1-16, 2022. 6, 7, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 579, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 579, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 579, + 287, + 612 + ], + "type": "text", + "content": "[10] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Scenedreamer: Unbounded 3d scene generation from 2d image collections. arXiv preprint arXiv:2302.01330, 2023. 4, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 614, + 288, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 288, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 288, + 645 + ], + "type": "text", + "content": "[11] Blender Online Community. Blender - a 3D modelling and rendering package. Blender Foundation, Stichting Blender Foundation, Amsterdam, 2018. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 647, + 288, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 680 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 680 + ], + "type": "text", + "content": "[12] Rafail Fridman, Amit Abecasis, Yoni Kasten, and Tali Dekel. Scenescape: Text-driven consistent scene generation. arXiv preprint arXiv:2302.01133, 2023. 1, 2, 4, 5, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 681, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 288, + 713 + ], + "type": "text", + "content": "[13] Vitor Guizilini, Igor Vasiljevic, Dian Chen, Rares Ambrus, and Adrien Gaidon. Towards zero-shot scale-aware monococular depth estimation. In Proceedings of the IEEE/CVF Inter-" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 94 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 94 + ], + "type": "text", + "content": "national Conference on Computer Vision, pages 9233-9243, 2023.6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 96, + 545, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 151 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 151 + ], + "type": "text", + "content": "[14] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsenko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 152, + 545, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 185 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 185 + ], + "type": "text", + "content": "[15] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. arXiv preprint arXiv:2204.03458, 2022. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 186, + 547, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 547, + 241 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 547, + 241 + ], + "type": "text", + "content": "[16] Lukas Hollein, Ang Cao, Andrew Owens, Justin Johnson, and Matthias Nießner. Text2room: Extracting textured 3d meshes from 2d text-to-image models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7909-7920, 2023. 1, 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 243, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 243, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 243, + 545, + 285 + ], + "type": "text", + "content": "[17] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868, 2022. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 287, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 287, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 287, + 545, + 319 + ], + "type": "text", + "content": "[18] Max Jaderberg, Karen Simonyan, Andrew Zisserman, et al. Spatial transformer networks. Advances in neural information processing systems, 28, 2015. 2, 3, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 321, + 545, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 321, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 307, + 321, + 545, + 376 + ], + "type": "text", + "content": "[19] Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439, 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 377, + 545, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 377, + 545, + 421 + ], + "spans": [ + { + "bbox": [ + 307, + 377, + 545, + 421 + ], + "type": "text", + "content": "[20] Nasir Mohammad Khalid, Tianhao Xie, Eugene Belilovsky, and Tiberiu Popa. CLIP-mesh: Generating textured meshes from text using pretrained image-text models. In SIGGRAPH Asia 2022 Conference Papers. ACM, 2022. 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 422, + 545, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 545, + 475 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 545, + 475 + ], + "type": "text", + "content": "[21] Jing Yu Koh, Honglak Lee, Yinfei Yang, Jason Baldridge, and Peter Anderson. Pathdreamer: A world model for indoor navigation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14738-14748, 2021. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 478, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 478, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 307, + 478, + 545, + 533 + ], + "type": "text", + "content": "[22] Jing Yu Koh, Harsh Agrawal, Dhruv Batra, Richard Tucker, Austin Waters, Honglak Lee, Yinfei Yang, Jason Baldridge, and Peter Anderson. Simple and effective synthesis of indoor 3d scenes. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1169-1178, 2023. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 535, + 545, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 535, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 307, + 535, + 545, + 588 + ], + "type": "text", + "content": "[23] Wei-Sheng Lai, Jia-Bin Huang, Oliver Wang, Eli Shechtman, Ersin Yumer, and Ming-Hsuan Yang. Learning blind video temporal consistency. In Proceedings of the European conference on computer vision (ECCV), pages 170-185, 2018. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 590, + 545, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 545, + 644 + ], + "type": "text", + "content": "[24] Xingyi Li, Zhiguo Cao, Huiqiang Sun, Jianming Zhang, Ke Xian, and Guosheng Lin. 3d cinematography from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4595-4605, 2023. 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 647, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 679 + ], + "type": "text", + "content": "[25] Yitong Li, Martin Min, Dinghan Shen, David Carlson, and Lawrence Carin. Video generation from text. In Proceedings of the AAAI conference on artificial intelligence, 2018. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "text", + "content": "[26] Zhengqi Li, Qianqian Wang, Noah Snavely, and Angjoo Kanazawa. Infinitenature-zero: Learning perpetual view generation of natural scenes from single images. In Proceed-" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 734, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 734, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 294, + 734, + 318, + 742 + ], + "type": "text", + "content": "10178" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "text", + "content": "ings of the European Conference on Computer Vision, pages 515-534. Springer, 2022. 2, 3, 4, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 288, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 288, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 288, + 162 + ], + "type": "text", + "content": "[27] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Yu Qiao, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from multi-camera images via spatiotemporal transformers. In Proceedings of the European Conference on Computer Vision, pages 1-18. Springer, 2022. 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 163, + 288, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 288, + 229 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 288, + 229 + ], + "type": "text", + "content": "[28] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 300–309, 2023. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 231, + 288, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 231, + 288, + 295 + ], + "spans": [ + { + "bbox": [ + 48, + 231, + 288, + 295 + ], + "type": "text", + "content": "[29] Andrew Liu, Richard Tucker, Varun Jampani, Ameesh Makadia, Noah Snavely, and Angjoo Kanazawa. Infinite nature: Perpetual view generation of natural scenes from a single image. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14458-14467, 2021. 2, 3, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 297, + 287, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 297, + 287, + 341 + ], + "spans": [ + { + "bbox": [ + 48, + 297, + 287, + 341 + ], + "type": "text", + "content": "[30] Xinhang Liu, Shiu-hong Kao, Jiaben Chen, Yu-Wing Tai, and Chi-Keung Tang. Deceptive-nerf: Enhancing nerf reconstruction using pseudo-observations from diffusion models. arXiv preprint arXiv:2305.15171, 2023. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 342, + 287, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 342, + 287, + 385 + ], + "spans": [ + { + "bbox": [ + 48, + 342, + 287, + 385 + ], + "type": "text", + "content": "[31] William E Lorensen and Harvey E Cline. Marching cubes: A high resolution 3d surface construction algorithm. In Semin al graphics: pioneering efforts that shaped the field, pages 347-353. 1998. 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 388, + 288, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 288, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 288, + 453 + ], + "type": "text", + "content": "[32] Zhengxiong Luo, Dayou Chen, Yingya Zhang, Yan Huang, Liang Wang, Yujun Shen, Deli Zhao, Jingren Zhou, and Tieniu Tan. Videofusion: Decomposed diffusion models for high-quality video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10209-10218, 2023. 3, 6, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 455, + 287, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 455, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 48, + 455, + 287, + 498 + ], + "type": "text", + "content": "[33] Tanya Marwah, Gaurav Mittal, and Vineeth N Balasubramanian. Attentive semantic video generation using captions. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1426-1434, 2017. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 500, + 287, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 500, + 287, + 542 + ], + "spans": [ + { + "bbox": [ + 48, + 500, + 287, + 542 + ], + "type": "text", + "content": "[34] Gal Metzer, Elad Richardson, Or Patashnik, Raja Giryes, and Daniel Cohen-Or. Latent-nerf for shape-guided generation of 3d shapes and textures. arXiv preprint arXiv:2211.07600, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 544, + 287, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 287, + 598 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 287, + 598 + ], + "type": "text", + "content": "[35] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 601, + 287, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 287, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 287, + 643 + ], + "type": "text", + "content": "[36] Anish Mittal, Anush Krishna Moorthy, and Alan Conrad Bovik. No-reference image quality assessment in the spatial domain. IEEE Transactions on image processing, 21(12): 4695-4708, 2012. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 646, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 287, + 678 + ], + "type": "text", + "content": "[37] Anish Mittal, Rajiv Soundararajan, and Alan C Bovik. Making a “completely blind” image quality analyzer. IEEE Signal processing letters, 20(3):209-212, 2012. 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "text", + "content": "[38] Gaurav Mittal, Tanya Marwah, and Vineeth N Balasubramanian. Sync-draw: Automatic video generation using deep recurrent attentive architectures. In Proceedings of the 25th" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "ACM international conference on Multimedia, pages 1096-1104, 2017. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 96, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 140 + ], + "type": "text", + "content": "[39] Chong Mou, Xintao Wang, Liangbin Xie, Jian Zhang, Zhonggang Qi, Ying Shan, and Xiaohu Qie. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453, 2023.6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 141, + 545, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 195 + ], + "type": "text", + "content": "[40] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 197, + 545, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 545, + 241 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 545, + 241 + ], + "type": "text", + "content": "[41] Yingwei Pan, Zhaofan Qiu, Ting Yao, Houqiang Li, and Tao Mei. To create what you tell: Generating videos from captions. In Proceedings of the 25th ACM international conference on Multimedia, pages 1789-1798, 2017. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 243, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 243, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 308, + 243, + 545, + 274 + ], + "type": "text", + "content": "[42] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv, 2022. 1, 2, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 276, + 545, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 276, + 545, + 341 + ], + "spans": [ + { + "bbox": [ + 308, + 276, + 545, + 341 + ], + "type": "text", + "content": "[43] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 343, + 545, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 343, + 545, + 420 + ], + "spans": [ + { + "bbox": [ + 308, + 343, + 545, + 420 + ], + "type": "text", + "content": "[44] Alexander Raistrick, Lahav Lipson, Zeyu Ma, Lingjie Mei, Mingzhe Wang, Yiming Zuo, Karhan Kayan, Hongyu Wen, Beining Han, Yihan Wang, Alejandro Newell, Hei Law, Ankit Goyal, Kaiyu Yang, and Jia Deng. Infinite photorealistic worlds using procedural generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12630-12641. IEEE, 2023. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 421, + 545, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 421, + 545, + 475 + ], + "spans": [ + { + "bbox": [ + 308, + 421, + 545, + 475 + ], + "type": "text", + "content": "[45] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE transactions on pattern analysis and machine intelligence, 44(3):1623-1637, 2020. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 477, + 545, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 477, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 308, + 477, + 545, + 520 + ], + "type": "text", + "content": "[46] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12179-12188, 2021. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 522, + 545, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 522, + 545, + 566 + ], + "spans": [ + { + "bbox": [ + 308, + 522, + 545, + 566 + ], + "type": "text", + "content": "[47] Chris Rockwell, David F Fouhey, and Justin Johnson. Pixelsynth: Generating a 3d-consistent experience from a single image. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14104-14113, 2021. 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 567, + 545, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 567, + 545, + 621 + ], + "spans": [ + { + "bbox": [ + 308, + 567, + 545, + 621 + ], + "type": "text", + "content": "[48] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684–10695, 2022. 1, 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 624, + 545, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 624, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 308, + 624, + 545, + 644 + ], + "type": "text", + "content": "[49] RunWay. Gen-2: The next step forward for generative ai, 2023. https://research.runwayml.com/gen2.6,7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "type": "text", + "content": "[50] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2016. 6, 8" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "text", + "content": "[51] Liao Shen, Xingyi Li, Huiqiang Sun, Juwen Peng, Ke Xian, Zhiguo Cao, and Guosheng Lin. Make-it-4d: Synthesizing" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 734, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 734, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 294, + 734, + 318, + 742 + ], + "type": "text", + "content": "10179" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "text", + "content": "a consistent long-term dynamic scene video from a single image. arXiv preprint arXiv:2308.10257, 2023. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 97, + 287, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 97, + 287, + 151 + ], + "spans": [ + { + "bbox": [ + 48, + 97, + 287, + 151 + ], + "type": "text", + "content": "[52] Meng-Li Shih, Shih-Yang Su, Johannes Kopf, and Jia-Bin Huang. 3d photography using context-aware layered depth inpainting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8028-8038, 2020. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 154, + 287, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 154, + 287, + 208 + ], + "spans": [ + { + "bbox": [ + 48, + 154, + 287, + 208 + ], + "type": "text", + "content": "[53] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022.3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 211, + 287, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 211, + 287, + 255 + ], + "spans": [ + { + "bbox": [ + 48, + 211, + 287, + 255 + ], + "type": "text", + "content": "[54] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew J Davison. imap: Implicit mapping and positioning in real-time. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6229-6238, 2021. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 257, + 287, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 257, + 287, + 300 + ], + "spans": [ + { + "bbox": [ + 48, + 257, + 287, + 300 + ], + "type": "text", + "content": "[55] Shitao Tang, Fuyang Zhang, Jiacheng Chen, Peng Wang, and Yasutaka Furukawa. Mvdiffusion: Enabling holistic multiview image generation with correspondence-aware diffusion. arXiv preprint arXiv:2307.01097, 2023. 6, 7, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 303, + 287, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 303, + 287, + 345 + ], + "spans": [ + { + "bbox": [ + 48, + 303, + 287, + 345 + ], + "type": "text", + "content": "[56] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Proceedings of the European Conference on Computer Vision, pages 402-419. Springer, 2020. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 350, + 287, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 350, + 287, + 404 + ], + "spans": [ + { + "bbox": [ + 48, + 350, + 287, + 404 + ], + "type": "text", + "content": "[57] Hung-Yu Tseng, Qinbo Li, Changil Kim, Suhib Alsisan, Jia-Bin Huang, and Johannes Kopf. Consistent view synthesis with pose-guided diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16773-16783, 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 406, + 287, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 406, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 48, + 406, + 287, + 460 + ], + "type": "text", + "content": "[58] Patrick von Platen, Suraj Patil, Anton Lozhkov, Pedro Cuenca, Nathan Lambert, Kashif Rasul, Mishig Davaadorj, and Thomas Wolf. Diffusers: State-of-the-art diffusion models. https://github.com/huggingface/diffusers, 2022.6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 463, + 287, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 463, + 287, + 506 + ], + "spans": [ + { + "bbox": [ + 48, + 463, + 287, + 506 + ], + "type": "text", + "content": "[59] Can Wang, Ruixiang Jiang, Mengei Chai, Mingming He, Dongdong Chen, and Jing Liao. Nerf-art: Text-driven neural radiance fields stylization. arXiv preprint arXiv:2212.08070, 2022.3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 510, + 287, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 510, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 48, + 510, + 287, + 552 + ], + "type": "text", + "content": "[60] Guangcong Wang, Zhaoxi Chen, Chen Change Loy, and Ziwei Liu. Sparsenerf: Distilling depth ranking for few-shot novel view synthesis. arXiv preprint arXiv:2303.16196, 2023.5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 555, + 287, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 287, + 609 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 287, + 609 + ], + "type": "text", + "content": "[61] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A. Yeh, and Greg Shakhnarovich. Score jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12619-12629, 2023. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 613, + 287, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 655 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 655 + ], + "type": "text", + "content": "[62] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation, 2023. 1, 2, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "text", + "content": "[63] Olivia Wiles, Georgia Gkioxari, Richard Szeliski, and Justin Johnson. Synsin: End-to-end view synthesis from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7467-7477, 2020. 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 317 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[64] Chenfei Wu, Lun Huang, Qianxi Zhang, Binyang Li, Lei Ji, Fan Yang, Guillermo Sapiro, and Nan Duan. Godiva: Generating open-domain videos from natural descriptions. arXiv preprint arXiv:2104.14806, 2021. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 118, + 547, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 547, + 172 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 547, + 172 + ], + "type": "text", + "content": "[65] Chenfei Wu, Jian Liang, Lei Ji, Fan Yang, Yuejian Fang, Daxin Jiang, and Nan Duan. Niwa: Visual synthesis pretraining for neural visual world creation. In Proceedings of the European Conference on Computer Vision, pages 720-736. Springer, 2022. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "type": "text", + "content": "[66] Jingbo Zhang, Xiaoyu Li, Ziyu Wan, Can Wang, and Jing Liao. Text2nerf: Text-driven 3d scene generation with neural radiance fields. arXiv preprint arXiv:2305.11588, 2023. 2, 3, 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 219, + 545, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 219, + 545, + 273 + ], + "spans": [ + { + "bbox": [ + 307, + 219, + 545, + 273 + ], + "type": "text", + "content": "[67] Xiaoshuai Zhang, Sai Bi, Kalyan Sunkavalli, Hao Su, and Zexiang Xu. Nerfusion: Fusing radiance fields for large-scale scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5449-5458, 2022. 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 275, + 545, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 275, + 545, + 317 + ], + "spans": [ + { + "bbox": [ + 307, + 275, + 545, + 317 + ], + "type": "text", + "content": "[68] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 3" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 734, + 317, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 734, + 317, + 742 + ], + "spans": [ + { + "bbox": [ + 294, + 734, + 317, + 742 + ], + "type": "text", + "content": "10180" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/9acb5370-2e99-4481-9b63-bbd93724edf4_content_list.json b/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/9acb5370-2e99-4481-9b63-bbd93724edf4_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..2a6cf8dffadfded2c300b1665f9e33a940793023 --- /dev/null +++ b/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/9acb5370-2e99-4481-9b63-bbd93724edf4_content_list.json @@ -0,0 +1,1315 @@ +[ + { + "type": "text", + "text": "3DFIREs: Few Image 3D REconstruction for Scenes with Hidden Surfaces", + "text_level": 1, + "bbox": [ + 107, + 130, + 861, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Linyi Jin $^{1}$ , Nilesh Kulkarni $^{1}$ , David F. Fouhey $^{2}$ $^{1}$ University of Michigan $^{1}$ , New York University $^{2}$", + "bbox": [ + 295, + 179, + 673, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{jinlinyi,nileshk}@umich.edu,david.fouhey@nyu.edu", + "bbox": [ + 264, + 219, + 705, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper introduces 3DFIRES, a novel system for scene-level 3D reconstruction from posed images. Designed to work with as few as one view, 3DFIRES reconstructs the complete geometry of unseen scenes, including hidden surfaces. With multiple view inputs, our method produces full reconstruction within all camera frustums. A key feature of our approach is the fusion of multi-view information at the feature level, enabling the production of coherent and comprehensive 3D reconstruction. We train our system on non-watertight scans from large-scale real scene dataset. We show it matches the efficacy of single-view reconstruction methods with only one input and surpasses existing techniques in both quantitative and qualitative measures for sparse-view 3D reconstruction. Project page: https://jinlinyi.github.io/3DFIRES/", + "bbox": [ + 75, + 300, + 473, + 527 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 556, + 209, + 571 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Consider two views of the scene in Fig. 1. Part of the bedroom in View 1 is occluded by the wall, and so you may be uncertain what is behind it, although you might guess the wall continues. Now consider adding in View 2. You can see a bedside table, but little else. However, you can fuse these pieces together to create a consistent 3D sense of the scene viewed by the images, including both the visible and invisible parts. We use this sense when shopping for real estate or looking at a friend's photos. We estimate the structure of the scene from parts that are visible to all views; integrate information across images for parts that visible in one view but not others; and take educated guesses for completely occluded regions. Importantly, as the available data increases from one camera to a handful, we can seamlessly integrate the evidence across views.", + "bbox": [ + 75, + 582, + 468, + 808 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This task poses a challenge for current computer vision since it requires making judgments about visible and occluded 3D structures and integrating information across images with large pose change. These abilities are usually independently investigated in two separate strands of research. With single image reconstruction techniques [15,", + "bbox": [ + 75, + 810, + 468, + 901 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a20483cb3db04cf890d22acc12fd091c77b216b9e1dbfeae39b5dfc5f74dfec8.jpg", + "image_caption": [ + "Figure 1. Reconstructing 3D from sparsely posed images. Given a sparse set of posed image views, our method is able to reconstruct the full 3D of the scene. On the top, we show two sparse views of the scene in View 1 and View 2. On the bottom left is the 3D reconstruction from our network in the frustum of View 1. We show that our method can generate the occluded side table (zoom in). On the bottom right is the full reconstruction. We color occluded surfaces with surface normals." + ], + "image_footnote": [], + "bbox": [ + 506, + 273, + 890, + 505 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "20, 26, 41, 43], one can predict both visible and occluded 3D structure from an image, but stacking such outputs from multiple images can produce inconsistent outputs. When handled independently, methods cannot identify the best view to reason about an occluded region. Non-line-of-sight imaging involves transmitting and receiving signals to reveal hidden scenes, incompatible with standard camera images [14]. Sparse view reconstruction methods [1, 17, 39] can create consistent reconstructions from two views; however, these approaches are limited to the visible parts of the scene that can decomposed into planes. Moreover, these methods are usually specialized to a particular number of images that can be accepted.", + "bbox": [ + 496, + 643, + 893, + 839 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, there has been considerable progress in generalized radiance fields, which produce full 3D representations. This occupancy representation and per-scene optimization has shown promising results by optimizing for", + "bbox": [ + 496, + 839, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "9742", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "novel view synthesis on single scenes from posed images sets [7, 23, 36, 40]. Extending this line of work, methods like [32, 46] have shown an ability to predict novel views for unseen scenes from a few images. However, since these methods optimize for perceptual quality, the underlying geometry often has artifacts. Like them we also require one or more image views at input, but instead we predict an implicit function [20] that can reliably reconstruct both visible and occluded parts of previously unseen scenes.", + "bbox": [ + 75, + 90, + 472, + 226 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We propose 3DFIREs, Few Image 3D-REconstruction of Scenes, which integrates information from a variable number of images to produce a full reconstruction of the scene. 3DFIREs integrates information in the features space across a varying number of images, enabling it to identify how to best use the available image data to produce an accurate reconstruction at a point. As output, 3DFIREs produces a pixel-aligned implicit field based on a generalization of the Directed Ray Distance Function [20, 21], which enables high quality reconstructions. Thanks to integration in feature space, the results are more consistent than handling images independently: this is what enables reconstructing the bed-side table in Fig. 1, even though it is hidden by the wall in one image. We found and document several design decisions in terms of training and network architecture needed to produce these results.", + "bbox": [ + 75, + 227, + 472, + 468 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We evaluate our method on complex interior scenes from Omnidata [8, 33] dataset collected with a real scanner. We compare 3DFIRES with the point-space fusion of state-of-the-art methods for scene-level full 3D reconstruction methods from a single image [21, 43]. Our experiments show several key results. First, 3DFIRES produces more accurate results compared to existing works. The improvements are larger in hidden regions, and especially substantial when measuring consistency of prediction from multiple views. Second, ablative analysis reveals the key design decisions responsible for 3DFIRES's success. Third, 3DFIRES can generalize to variable views: we train on 1, 2, and 3 views and generalize to 5 views. Finally, 3DFIRES can reconstruct when given LoFTR [37] estimated poses with known translation scale.", + "bbox": [ + 75, + 468, + 472, + 694 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 76, + 709, + 227, + 724 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We aim to produce a coherent 3D scene reconstruction given a single or a few images with wide baselines.", + "bbox": [ + 75, + 734, + 468, + 763 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D from Single Image. Predicting a complete 3D scene from a single image is inherently ambiguous. Recently different 3D representations have been proposed to reconstruct complete 3D scenes (including occluded surfaces) such as layered depth [35], voxels [3, 11, 19, 41], planes [16], point-clouds [9, 43], meshes [10, 12, 25], or implicit representation for objects [22, 26] and scenes [2, 4, 20, 21, 36]. While they have strong performance on single image, they do not necessarily produce coherent results when required to infer", + "bbox": [ + 75, + 763, + 470, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "on multiple images of the same scene [21]. Our method can reconstruct hidden geometry from at least a single image using implicit representation from [20]. Instead of naively fusing point clouds from different images, we fuse features when predicting a multi-view consistent point cloud with few input images.", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D from dense views. Traditional multi-view 3D reconstruction methods can produce accurate and coherent point clouds from pixel correspondences [33]. Classical methods in computer vision use approaches like Multi-view stereo (MVS) to construct only visible parts of the scene in all the images. There is a long line of work in trying to reconstruct scenes from video sequences [6, 34] where they reconstruct visible scenes and camera poses. Learning-based methods for MVS estimate geometry for scenes [18, 24, 38, 45] also require an input video to explicitly predict scene geometry. Instead of requiring high overlap inputs such as video frames, our method works on wide-baseline images.", + "bbox": [ + 496, + 181, + 892, + 362 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D from sparse view inputs. Our approach operates in a multi-view setting with a sparse set of views. We have a similar setting as wide-baseline reconstruction [27]. Associative3D [28] reconstructs the whole scene but requires voxelized scenes to train, our method works on non-watertight scene data. Prior work also explores planar representation [1, 17, 39] for coherent 3D surfaces in non-watertight scenes. They use feed-forward networks to predict visible 3D surfaces for each view and merge them using predicted correspondences. Our approach leverages an implicit representation that accommodates non-watertight data, enabling the reconstruction of both visible and occluded surfaces. We fuse deep features from multiple views to predict DRDF representation from Kulkarni et al. [20], producing a coherent reconstruction.", + "bbox": [ + 496, + 363, + 892, + 589 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Novel view synthesis. NeRF [23] and its extensions [42, 46, 48] optimizes per-scene radiance fields for novel-view synthesis, this requires many views and test-time optimization. Due to its occupancy-based representation, extracting geometry often requires thresholding the density function, which leads to cloudy geometry with sparse input views. Our method directly predicts geometry from unseen images without the need for test-time optimization. PixelNerf [46] or SRT [32] can generalize to new scenes but their objectives optimize for photometric losses.", + "bbox": [ + 496, + 589, + 890, + 739 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 753, + 591, + 768 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our goal is to predict an accurate and consistent 3D reconstruction from one or more sparsely spaced camera views and known poses. With one image, the method should predict all surfaces in the camera frustum, including visible and occluded regions. With more images, the method should predict the surfaces in the union of the frustum.", + "bbox": [ + 496, + 779, + 890, + 869 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We tackle this problem with 3DFIREs, a simple and effective approach designed for this setting. We first discuss", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "9743", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6922e9ca25b1f1133670f996a2a19b275807d4d562e4c390adbe72a1217f3c0f.jpg", + "image_caption": [ + "Figure 2. (a) Architecture for single view DRDF [20]. Given an image and a query pixel location, it predicts DRDF along the ray from the query pixel. (b) we extend (a) to work on sparse views. Middle: Given N images, a query point $\\mathbf{x}$ , and a query direction $\\vec{\\mathbf{r}}_q$ , we aggregate features from multiple images and output DRDF along the query ray. Right: We show detailed network architecture of 3DFIREs which consists of a Query Encoder and a DRDF Predictor." + ], + "image_footnote": [], + "bbox": [ + 80, + 77, + 305, + 375 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/7684a2539dcb412f20ded22585efde547b7d9c4373bdbbfa8239a389a4a81897.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 77, + 887, + 375 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/b2c5411d94639627566952e36b10c4ac43aaf8bc93e09d0e1f363a6f63494efa.jpg", + "image_caption": [ + "Figure 3. Predictions in the blue camera frustum. Occluded surfaces are colored with surface normals. A single image to 3D method like DRDF [20] is unable to reconstruct the parts of the scene behind the wall with certainty and hence erroneously adds a full wall in front of the hallway (red box). 3DFIREs which fuses features from multiple views (Green and Purple camera in Fig. 2) predicts empty space for the entrance (black box)." + ], + "image_footnote": [], + "bbox": [ + 80, + 455, + 468, + 647 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tackling scene reconstruction in a single image case in §3.1 using the Directed Ray Distance Function (DRDF) [20] and scale this approach to multiple image views in §3.2. In §3.3, we show how we can operationalize our multi-view reconstruction goal with an attention-based model architecture.", + "bbox": [ + 75, + 761, + 468, + 835 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Background Single View Reconstruction", + "text_level": 1, + "bbox": [ + 76, + 845, + 421, + 862 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We begin by revisiting the DRDF formulation for a single image reconstruction. Consider a single image $\\mathcal{I}$ , a single", + "bbox": [ + 75, + 869, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "view implicit reconstruction method aims to produce the full 3D reconstruction for the scene from this image. At inference, when conditioned on image features, the method outputs a distance function for a pre-defined set of 3D points in the camera frustum. It then decodes this predicted distance function to a surface to recover the 3D geometry of the scene. For instance, if the predicted 3D distance function is an unsigned distance function [2], the points on the surface are with distances close to zero.", + "bbox": [ + 496, + 458, + 890, + 592 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Kulkarni et al. [20] solve the single image 3D reconstruction with the DRDF function and show that using the DRDF outperforms the standard unsigned distance function. The DRDF is a ray-based distance function measuring the distance of a point $\\mathbf{x}$ to the nearest intersection with a surface along a ray $\\vec{\\mathbf{r}}$ . In [20], the ray on which distances are measured is the ray from the camera center $\\mathbf{c}$ to $\\mathbf{x}$ .", + "bbox": [ + 496, + 595, + 890, + 700 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Fig. 2 (a) shows the DRDF for one such ray. Now, any 3D point $\\mathbf{x}$ can be represented as its distance towards the camera times a unit ray direction, or $z\\vec{\\mathbf{r}}$ , where $z \\in \\mathbb{R}$ and $\\vec{\\mathbf{r}} = \\mathrm{norm}(\\mathbf{x} - \\mathbf{c})$ where $\\mathrm{norm}(\\mathbf{p}) = \\mathbf{p} / ||\\mathbf{p}||$ . The DRDF, $d_{\\mathrm{DR}}(z\\vec{\\mathbf{r}})$ , furthermore includes a sign that determines for the point the direction along the ray towards the nearest intersection (i.e., forwards or backwards). Therefore $(z + d_{\\mathrm{DR}}(z\\vec{\\mathbf{r}}))\\vec{\\mathbf{r}}$ corresponds to a point on the surface.", + "bbox": [ + 496, + 702, + 890, + 823 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The DRDF can be used to create a system that infers single image 3D by pairing the distance function at a point $\\mathbf{x}$ with pixel-aligned features. At inference time, as shown in Fig. 2 (a), given a point $\\mathbf{x}$ in the camera frustum we can extract corresponding pixel-aligned image features using an", + "bbox": [ + 496, + 825, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "9744", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "image backbone $\\mathrm{BB}[\\pi (\\mathbf{x})]$ , and use an MLP to predict the DRDF value corresponding to the point $\\mathbf{x}$ along the $\\vec{\\mathbf{r}}$ . Since DRDF is a ray-based function, its value only depends on the intersections along the ray. For any ray corresponding to a pixel on the image, the prediction of DRDF for the point depends on the image features, and the location of the point on the ray. This parameterization allows DRDF to learn sharp 3D reconstructions of the scene from a single RGB image. At training time, we train a model to predict the DRDF by supervising it with the ground-truth DRDF values computed using the mesh geometry.", + "bbox": [ + 75, + 90, + 472, + 258 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Extending DRDFs to Multiple Views", + "text_level": 1, + "bbox": [ + 76, + 267, + 395, + 282 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Now, with multiple views we have: N images $\\{\\mathcal{I}_i\\}_{i = 1}^{\\mathrm{N}}$ relative camera transforms $\\{\\pi_i\\}_{i = 1}^{\\mathrm{N}}$ , and corresponding camera centers $\\{\\mathbf{c}_i\\}_{i = 1}^{\\mathrm{N}}$ , our goal is to reconstruct the 3D of the full scene. While the task could perhaps be accomplished by simply predicting individual 3D for each camera, and assembling them together. Our insight is that if the camera frustums have considerable overlap, for overlapping regions we can achieve a better and more consistent reconstruction by allowing the network to reason about which camera provides the best view for each point. This can be achieved by allowing the network to fuse features across cameras for the points in feature space rather than by concatenating in point space. We propose to improve the feature quality of any point $\\mathbf{x}$ by fusing the features from multiple cameras. Since we are now dealing with the multi-view settings, a multi-view DRDF formulation is necessary to allow us to predict the DRDF value along each of the query rays, $\\vec{\\mathbf{r}}_q$ originating from the respective camera centers.", + "bbox": [ + 75, + 290, + 472, + 564 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the case of multiple views, the image feature corresponding to a point $\\mathbf{x}$ should be a fusion of features $\\{\\mathbf{f}_{\\theta}[\\pi_i(\\mathbf{x})]\\}_{i = 1}^{\\mathrm{N}}$ . The feature should support predicting the N DRDF values along all the camera directions as $\\{d_{\\mathrm{DR}}(z_i\\vec{\\mathbf{r}}_i)\\}_{i = 1}^{\\mathrm{N}}$ . The intuition of our key idea is that multiple-image views provide more information about the 3D scene and hence potentially better features. We can learn these better features by fusing features to predict a consistent output. This requires a novel architecture that attends to features and rays, $\\{\\vec{\\mathbf{r}}_i\\}_{i = 1}^{\\mathrm{N}}$ , originating from all the available image views. Under this formulation single view DRDF is a special case of our formulation where $\\mathbf{N}$ is 1.", + "bbox": [ + 75, + 564, + 472, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Network Architecture", + "text_level": 1, + "bbox": [ + 76, + 756, + 284, + 771 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Towards the goal of predicting DRDFs along multiple query rays $\\vec{\\mathbf{r}}_q\\in \\{\\vec{\\mathbf{r}}_i\\}_{i = 1}^{\\mathrm{N}}$ , we present a simple and effective network 3DFIREs that accomplishes this task. 3DFIREs consists of three modules: The first module is a Backbone Feature Extractor that obtains pixel-aligned appearance features; by projecting the query point $\\mathbf{x}$ onto the camera, we can obtain a per-point and per-camera appearance feature as in [20, 23, 31, 42, 46]. Since the appearance feature is", + "bbox": [ + 75, + 779, + 470, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "per-image, the model must learn to aggregate information across cameras. This is done with our second component Query Encoder that provides geometric information for aggregating appearance features. Specifically, the query encoder uses the information about the relative positions of query point $\\mathbf{x}$ and query direction $\\vec{\\mathbf{r}}_q$ w.r.t. cameras $\\{\\pi_i\\}_{i=1}^N$ . The final module is the DRDF Predictor that takes appearance and query features to produce a DRDF value along the query direction $\\vec{\\mathbf{r}}_q$ by incorporating the appearance features (evidence for geometry) and query encoder features (evidence that relates different features). Fig. 3 shows an example on how integrating information across multiple views leads to better prediction for occluded parts of the scene.", + "bbox": [ + 496, + 90, + 893, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Backbone Feature Extractor. Our backbone features extractor $\\mathrm{BB}(\\cdot)$ aims to create appearance features from an image. It accepts an image $\\mathcal{I}_i\\in \\mathbb{R}^{H\\times W\\times 3}$ and produces a grid of D-dimensional features $\\mathbf{F}_i\\in \\mathbb{R}^{H'\\times W'\\times D_{\\mathrm{img}}}$ . We use a pre-trained depth estimating vision transformer [29]. Feature extraction for each image proceeds independently using the same network. With extracted per-camera backbone features, $\\mathbf{f}_i$ , for point $\\mathbf{x}$ by interpolating features in $\\{\\mathbf{F}_i\\}_{i = 1}^{\\mathrm{N}}$ at the projection $\\{\\pi_i(\\mathbf{x})\\}_{i = 1}^{\\mathrm{N}}$ correspondingly.", + "bbox": [ + 496, + 287, + 893, + 426 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Query Encoder. Our query encoder $q(\\cdot)$ aims to enable a predictor to decide how to aggregate information across images. As input, the encoder takes a query 3D point $\\mathbf{x}$ and a query direction $\\vec{\\mathbf{r}}_q$ . It additionally considers the backbone features, camera centers $\\{\\mathbf{c}_i\\}_{i=1}^N$ and transforms $\\{\\pi_i\\}_{i=1}^N$ . Our query encoding is the concatenation of: (i) the relative viewing direction in camera $i$ 's space $\\Delta \\vec{\\mathbf{r}}_i(\\vec{\\mathbf{r}}_q) = [\\vec{\\mathbf{r}}_q - \\mathrm{norm}(\\mathbf{x} - \\mathbf{c}_i), \\vec{\\mathbf{r}}_q \\cdot \\mathrm{norm}(\\mathbf{x} - \\mathbf{c}_i)] \\in \\mathbb{R}^4$ ; and (ii) the normalized device coordinates (NDC), coordinates of point $\\mathbf{x}$ in the camera frame $\\mathrm{ndc}_i(\\mathbf{x}) \\in \\mathbb{R}^3$ . Intuitively this query representation, $\\mathbf{q}_i = \\{\\Delta \\vec{\\mathbf{r}}_i, \\mathrm{ndc}_i(\\mathbf{x})\\} \\in \\mathbb{R}^7$ enables reasoning such as: information about surfaces near $\\mathbf{x}$ in direction $\\vec{\\mathbf{r}}_q$ is likely not visible in camera $i$ due to either angle or distance, so this feature ought to be weighted low. The ray query vector is encoded in a positional encoding layer [40] with output dimension $D_{\\mathrm{query}}$ .", + "bbox": [ + 496, + 426, + 895, + 667 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "DRDF Predictor. For a query ray and point tuple, $\\{\\vec{\\mathbf{r}}_q,\\mathbf{x}\\}$ , this model considers the image features $\\{\\mathbf{f}_i\\}_{i = 1}^{\\mathrm{N}}$ , and query features $\\{\\mathbf{q}_i\\}_{i = 1}^N$ yielding a joint camera specific feature, $\\{\\mathbf{f}_i,\\mathbf{q}_i\\}_{i = 1}^{\\mathrm{N}}$ , of dimension $D_{\\mathrm{img}} + D_{\\mathrm{query}}$ . Our self-attention attends over all these features to produce a weight $w_{i}$ per feature. We aggregate the features using this weight to produce a fused feature for the point $\\mathbf{x}$ . We then use the fused feature to predict a DRDF value between $[-1,1]$ with the help of an MLP. This is akin to selecting cameras that are likely to contain the geometry information about the ray point tuple and predicting the geometry information.", + "bbox": [ + 496, + 667, + 895, + 834 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Training 3DFIREs", + "text_level": 1, + "bbox": [ + 500, + 845, + 684, + 861 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The effectiveness of 3DFIREs is improved by getting details right during training. One observation is that sampling", + "bbox": [ + 496, + 869, + 893, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "9745", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "points near intersections gives improvements over uniform sampling because the scene-level space is predominantly empty. By increasing the density of sampled points near surface, the network can better learn the scene structure. We sample points along the ray as per a Gaussian distribution centered at the intersection. Prior work [42] involves applying ray attention which allows for samples along a ray to attend with each other before the final prediction. This has been shown to be effective. However, combining ray attention with Gaussian sampling during training enables the network to 'cheat'. Ray Attention exploits a train-time shortcut (query point density) to infer intersections. At inference as point density is uniform and this shortcut fails. Empirically we find Gaussian sampling alone to be more effective than ray attention.", + "bbox": [ + 75, + 90, + 472, + 316 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Implementation Details", + "text_level": 1, + "bbox": [ + 76, + 329, + 294, + 345 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training. Our image feature backbone is vision transformer [29] dpt_beit_large_384 pretrained by MiDaS [30]. We use $\\ell_{1}$ loss on log-space truncated DRDF [5, 20, 38]. During training, we randomly sample 1, 2, 3 views with 80 rays per image and 512 points along each ray. Our method is trained for 300K iteration on NVIDIA A100 GPU with batch size of 1. More details in supp.", + "bbox": [ + 75, + 353, + 468, + 459 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Inference. Given N images, we extract backbone features for each image. We generate $n_{\\mathrm{ray}} = 128 \\times 128$ query rays from each camera. Along each ray, we sample $n_{\\mathrm{pt}} = 256$ points that have uniformly spaced depth from 0 to 8m. In total, we get $\\mathbf{N} \\times n_{\\mathrm{ray}} \\times n_{\\mathrm{pt}}$ query pairs $\\{\\mathbf{x}, \\vec{\\mathbf{r}}_q\\}$ , which are fed to 3DFIREs in parallel to get DRDF value. We calculate positive-to-negative zero-crossings along each ray [20] to get a 3D point and aggregate the results.", + "bbox": [ + 75, + 460, + 468, + 580 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiment", + "text_level": 1, + "bbox": [ + 76, + 597, + 202, + 614 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we present the experimental framework for 3DFIREs, our system designed to reconstruct full scene geometry from wide-baseline, sparse images. Considering the novelty of our problem, there is no prior work that does this exact setting. To address this, we curated a dataset and developed testing metrics specifically tailored to the problem's requirements. We conduct comprehensive evaluations of 3DFIREs using real scene images, comparing its performance against alternative methods in the field.", + "bbox": [ + 75, + 623, + 468, + 758 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Dataset", + "text_level": 1, + "bbox": [ + 76, + 770, + 171, + 786 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Following [21], we use the dataset from the Gibson database [44], which contains real images of complex and diverse scenes such as multi-floor villas and expansive warehouses. The scale of the assets in the dataset presents challenging reconstruction problem, which is desirable for evaluating the ability to recover occluded surfaces. We use the images sampled by Omnidata [8] for a diverse set of", + "bbox": [ + 75, + 794, + 468, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "camera poses from the Taskonomy [47] Medium subset, including 98/20/20 training/validation/test buildings. Since our multiview setting is different from the single-view setting of [21], the precise samples are different. Our setting is also similar to [17, 39] in that images have wide baselines (median $2.8\\mathrm{m}$ translation, $63.9^{\\circ}$ rotation), unlike methods using video frames [38] where images have high overlap. Our approach diverges from [17, 39] in also reconstructing occluded regions and using real (not rendered) images.", + "bbox": [ + 496, + 90, + 890, + 227 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To curate our image sets, we use a sampling process like [17]. For a set of $k$ images, after picking an image at random, each new image is selected to have at most $70\\%$ overlap with any existing image in the set, and at least $30\\%$ overlap with at least one other image in the set. The process balances diversity and coherence in the viewpoints. We crop images to a fixed field of view. We collect 3781 training sets among $\\geq 10\\mathrm{K}$ images. We also sample 300 sets of 3-view images and 100 sets of 5-view images for evaluation from the held-out test scenes. See the supplementary for dataset generation details. The 3 view and 5 view test set contain considerable occluded 3D geometry (41.9% and 43.7% respectively).", + "bbox": [ + 496, + 228, + 892, + 425 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Baselines", + "text_level": 1, + "bbox": [ + 500, + 436, + 607, + 450 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To the best of our knowledge, no prior work reconstructs occluded regions from sparse-view images at scene scale. We thus create strong baselines from existing methods that handle parts of our setting. Each method is the strongest in its line of work.", + "bbox": [ + 496, + 460, + 890, + 534 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For instance, the visible surface upper-bound includes all methods that reconstruct visible surfaces from sparse views [17, 38, 39]. The DRDF method [20, 21] has been shown to be more effective for scene-level 3D reconstruction compared to many other implicit functions like density [46], occupancy [31], unsigned distance functions on scenes and rays [2]. MCC [43] is likewise SOTA for point cloud completion.", + "bbox": [ + 496, + 536, + 890, + 656 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Depth Only [8, 29] Prior state-of-the-art works on sparse scene reconstruction [38, 39] predict visible surfaces from multiple views, but cannot recover hidden surfaces. To show the near-oracle reconstruction of visible surfaces, we use MiDaS [29] depth model trained on Omnidata [8] with ground-truth scale and shift. This baseline is an upper bound on the performance of methods like [1, 17, 38, 39].", + "bbox": [ + 496, + 657, + 890, + 763 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Multiview Compressive Coding (MCC) [43] This method predicts occupancy probability from RGB-D partial point clouds. MCC works on scene-level reconstructions including non-watertight meshes. We train MCC on the same training set as ours. This method requires depth as input and at inference we provide it with ground truth depth. Since MCC only works on a single point cloud, to produce predictions from multiple images, we infer each image independently and aggregate the predicted point cloud in point", + "bbox": [ + 496, + 763, + 890, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "9746", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b8fa1bdef0a8b67e46cfe64e1e74cc419b2dcba4659f14e08d063451662d07f4.jpg", + "image_caption": [ + "Figure 4. Comparison between different methods on held-out test scene. Occluded surfaces are colored with the computed surface normals. \"Depth only\" leaves holes with sparse input views, e.g. absent floors and walls. Occupancy-based method MCC [43] produces cloudy results, failing to get the details like pillow, tables. Concatenation of single view DRDF (SV-DRDF) [20] produces inconsistent results, e.g. missing wall in row 2, the double wall in row 3. Our method produces more consistent predictions across different views and also recovers the hidden surface, resulting in a complete mesh. We urge the reader to see results provided in the supplementary videos." + ], + "image_footnote": [], + "bbox": [ + 89, + 85, + 877, + 534 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "cloud space.", + "text_level": 1, + "bbox": [ + 76, + 633, + 161, + 647 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Single-view DRDF (SV-DRDF) [20] This method reconstructs both visible and hidden surfaces from a single input image. We use this baseline to show the benefit of our proposed multi-view feature aggregation. For a fair comparison, we upgrade the original backbone from ResNet34 [13] to the same BEiT [29] and use the same training strategy such as Gaussian sampling of points. Both improve results. Since this baseline only supports single image reconstruction, we produce predictions independently from each input image and aggregate all the point clouds.", + "bbox": [ + 75, + 648, + 472, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Evaluation Metrics", + "text_level": 1, + "bbox": [ + 76, + 814, + 261, + 829 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We use two metrics to evaluate our system.", + "bbox": [ + 76, + 839, + 362, + 853 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Scene F score. Following [20, 43], we compute the scene accuracy (fraction of predicted points within $\\rho$ of a ground truth point), completeness (fraction of ground truth points", + "bbox": [ + 75, + 854, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "within $\\rho$ from a predicted point), and their F-score (F1). This gives an overall summary of scene-level reconstruction. We classify the scene into (1) visible: points that are visible from any one of the input views; and (2) hidden: points that are hidden from all of the input views. Due to the space limit, we only show F-score at $\\rho = 0.2$ . A full table with accuracy, completeness, F-score at different $\\rho$ is in the supp. Trends are the same across values of $\\rho$ and there is no significant accuracy/completeness imbalance for the baselines (MCC, SV-DRDF).", + "bbox": [ + 496, + 632, + 890, + 782 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Multiview consistency. Only measuring the F-score does not measure the consistency of 3D reconstruction when generating results from multiple views. Doubled predictions of surfaces do not change the Scene F score results if they are within $\\rho$ . Prior work [17] used a detection-based method that penalized double surfaces on planar predictions, but their metric is not applicable since it requires pla", + "bbox": [ + 496, + 794, + 892, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "9747", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9f9f10e95b7c42e73884dfe5ad593ab61c797a2c67aaa682658bd8ecfe41c92f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 80, + 61, + 480, + 301 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fee9a0347d0b3db85e0055b44cf9950c593c14cde05c3630f7c4098d987aa385.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 488, + 61, + 888, + 301 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ae42aa271bbaedd6cef334145f3a6bee8be5040659c7285ca8b27397cea3c7e8.jpg", + "image_caption": [ + "Figure 5. Qualitative results on held-out test scenes. Top row: Reconstruction from 3 images and compared with ground truth. Our method can reconstruct a complete scene structure within all the camera frustums, including the occluded surfaces. Bottom row: Predictions from 5 input images compared with ground truth. For the 2nd and 3rd examples, ceilings are removed to reveal the details of the scene." + ], + "image_footnote": [], + "bbox": [ + 80, + 303, + 888, + 424 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "lar instances. We require a metric that can measure the consistency of 3D reconstruction of points in individual frustumums. Specifically, we would like to ensure that points $\\mathbf{P}_i$ generated from all query rays originating from $\\mathbf{c}_i$ of $\\pi_i$ are consistent with points, $\\mathbf{P}_j$ , generated from by ray queries from $\\mathbf{c}_j$ of $\\pi_j$ at the intersection of frustumums of both the cameras. For every point, $\\mathbf{p} \\in \\mathbf{P}_j$ and within the field of view of camera $i$ , we compute their minimum distance to points in $\\mathbf{P}_i$ . Our metric measures percent of points in the set $\\mathbf{P}_j$ that have minimum distance within the threshold of $\\rho$ . We evaluate this metric bidirectionally to ensure complete results.", + "bbox": [ + 75, + 482, + 472, + 662 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Results", + "text_level": 1, + "bbox": [ + 76, + 676, + 171, + 691 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Qualitative Results. Fig. 3 shows reconstruction from using query rays from the blue camera in Fig. 2. Occluded surfaces are colored with surface normals. DRDF [20] is unable to reconstruct the parts of the scene behind the wall with certainty and erroneously adds a full wall in front of the hallway. 3DFIREs fuses features from multiple images (Green and Purple camera in Fig. 2) accurately predicts the empty space.", + "bbox": [ + 75, + 702, + 468, + 823 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fig. 4 shows results unseen test scenes, and compares reconstruction of baselines. Red box crop show highlighted differences and provide a zoomed-in view for detailed examination. Depth only (MiDaS with ground truth scale and shift) reconstructs only visible regions this leaves holes such", + "bbox": [ + 75, + 825, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "as the missing surfaces behind chairs in Row 1; and absent floor sections in Row 4. MCC [43] tends to produce cloudy volumes and misses details like pillows and tables. Single-view DRDF (SV-DRDF) produces occluded regions and sharp surfaces but lacks consistency when aggregating results from multiple views. This is noticeable in its inability to reconstruct the occluded wall in Row 2, the creation of a doubled ceiling in Row 3 due to occlusions. 3DFIREs, effectively merges observations from multiple images, resulting in sharp and accurate reconstructions of both visible and hidden surfaces. By fusing information across views in the feature space, our method overcomes the limitations of other approaches. This ensures comprehensive and consistent scene-level reconstruction from few sparse views.", + "bbox": [ + 496, + 482, + 892, + 693 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In Fig. 5 we show additional alongside the ground truth. 3DFIREs successfully reconstructs large occluded areas, floors hidden by foreground objects (colored in pink), and unseen sides of objects such as the back of chairs in the first example and the kitchen islands in the second example. The reconstruction from multiple views demonstrates consistency and coherent surfaces in overlapping regions.", + "bbox": [ + 496, + 696, + 890, + 803 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "While our method is trained with up to three views, it seamlessly extends to five views. This adaptability stems from our architecture's inherent flexibility to the number of input views. With increasing views it predicts clean and coherent reconstructions within all the camera frustums.", + "bbox": [ + 496, + 806, + 892, + 881 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Quantitative Results. We evaluate our method on sets of 1,", + "bbox": [ + 500, + 885, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "9748", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c520031f90ed025b430e1f72225f1ad10f3bd6d1264dae2f02990d816bf6388d.jpg", + "table_caption": [ + "Table 1. Quantitative results on Scene F-score ( $\\rho = 0.2$ ) for Hidden points, Visible points, All points. For 3 and 5 views, we evaluate Consistency. Depth only: visible surface upperbound is separated to indicate it has oracle information. Despite accurate reconstructions on visible surfaces, these lines of work cannot recover hidden surfaces, causing low overall performance. With 1 view, 3DFIREs is comparable to single view DRDF. With more views, 3DFIREs outperforms all the other baselines in F-score. There is large improvement in consistency metric compared to single view DRDF, showing that aggregating features produces a more coherent reconstruction. Full tables showing accuracy and completeness are in the supplemental." + ], + "table_footnote": [], + "table_body": "
1 view3 views5 views
Hidden ↑Visible ↑All ↑Hidden ↑Visible ↑All ↑Consistency ↑Hidden ↑Visible ↑All ↑Consistency ↑
Depth only-85.3160.12-87.8463.9072.79-91.2969.4072.57
MCC40.2756.4050.2542.9162.0254.7870.2038.5164.4455.9466.57
SV-DRDF53.3673.4565.2148.0276.1965.6176.4447.5181.3170.5478.13
3DFIRES53.3474.2965.7149.9976.7466.5685.4849.5281.7471.4185.92
", + "bbox": [ + 78, + 171, + 890, + 265 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/e4d13c798d48d4ff70e81bc0358045f92185b4359f82f51d3654a93ce7d8192f.jpg", + "table_caption": [ + "Table 2. Ablation study on training strategies. GS: Gaussian sampling near intersection along the ray during training. Ray Attn: points along a query ray attend to each other." + ], + "table_footnote": [], + "table_body": "
HiddenVisibleAllConsistency
-GS43.0777.0564.8183.45
+Ray Attn. -GS47.0977.6065.5883.27
+Ray Attn. +GS14.853.3613.2933.56
Ours50.2077.3066.4685.45
", + "bbox": [ + 99, + 327, + 444, + 404 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/4e01f99897634c5d16e09f9afe996d534042723e0cf8442032ab646225532222.jpg", + "table_caption": [ + "Table 3. Quantitative results on noisy camera poses generated by LoFTR, evaluated on 3 view cases at $\\rho = 0.2$ . 3DFIREs assumes accurate pixel-aligned features but still produces more consistent reconstructions compared to not aggregating features." + ], + "table_footnote": [], + "table_body": "
3-ViewHiddenVisibleAllConsistency
SV-DRDF37.3962.7152.9357.65
Ours38.8562.4053.1965.71
", + "bbox": [ + 101, + 484, + 444, + 541 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3, 5 views respectively, as detailed in Tab. 1. Our approach, designed for flexible input views, matches prior works in single-view scene reconstruction and achieves state-of-the-art results with multiple input views. In single-image cases, it is comparable to the single-view DRDF baseline.", + "bbox": [ + 75, + 565, + 468, + 641 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For 3-view sets, our method outperforms MCC [43] or DRDF [21]. Although MiDaS with ground truth scale and shift demonstrates optimal visible surface reconstruction, it falls short in overall scene reconstruction because of no reconstruction on occluded surfaces. When evaluated on scene consistency, 3DFIREs shows a large absolute improvement of $>9\\%$ , over the second-best baseline, showing 3DFIREs's ability to aggregate features across views to produce consistent results.", + "bbox": [ + 75, + 641, + 468, + 777 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The trend persists with 5-view inputs, where our method has the highest F score and consistency. Our method is not trained on 5-views subset but still remains robust to more input views enhancing the reconstruction quality in both visible and hidden surface reconstructions.", + "bbox": [ + 75, + 777, + 468, + 852 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Ablations and Analysis", + "text_level": 1, + "bbox": [ + 76, + 862, + 290, + 878 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation study on training strategy. We conduct an ab", + "bbox": [ + 76, + 885, + 468, + 901 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "lation study (Tab. 2) to investigate the effectiveness of different training strategies for our method. Without Gaussian sampling or ray attention (-GS), the method has degraded performance $(-7\\%$ in hidden F score). With ray attention only (+Ray Attn. -GS), the method is able to better reconstruct the hidden surface but is still worse than ours $(-3\\%)$ . With both ray attention and Gaussian sampling (+Ray Attn. +GS), the network finds shortcut during training and does not work during testing. With Gaussian sampling strategy, our method performs the best.", + "bbox": [ + 496, + 276, + 890, + 426 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Robustness with noisy camera poses. Our method requires accurate camera poses to aggregate pixel-aligned features. This setting is challenging with sparse view data since camera estimation can be noisy. We test if the misalignment of image features caused by noisy camera projection matrices degrades our system. We use LoFTR [37] to estimate the camera rotation and translation angle and evaluate the reconstruction within all the camera frustumums. Since LoFTR does not provide a translation scale, we use ground truth instead. Tab. 3 shows results on 3-view cases. Our method still has significantly higher consistency over single view DRDF baseline. We provide an analysis with synthetic Gaussian camera noise in the supplementary.", + "bbox": [ + 496, + 428, + 892, + 625 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 637, + 625, + 652 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We present 3DFIREs, a scene-level 3D reconstruction method that requires only one or a few posed images of a scene. Our method takes in an arbitrary number of input views, fuses multi-view information in the features space and predicts DRDF given a 3D point and query direction. We train our method on a large-scale scene dataset and show its strong ability to reconstruct both visible and hidden surfaces coherently within all the camera frustums on challenging wide-baseline images. Currently, our methods require pose input from off-the-shelf estimation methods, solving for 3D reconstruction and adapting the poses is a challenging next step and left to future work.", + "bbox": [ + 496, + 661, + 890, + 829 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments. Thanks to Mohamed Banani, Richard Higgins, Ziyang Chen for their helpful feedback. Thanks to UM ARC for computing support. Toyota Research Institute provided funds to support this work.", + "bbox": [ + 496, + 834, + 890, + 891 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "9749", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Samir Agarwala, Linyi Jin, Chris Rockwell, and David F Fouhey. Planeformers: From sparse view planes to 3d reconstruction. In ECCV, 2022. 1, 2, 5", + "[2] Julian Chibane, Aymen Mir, and Gerard Pons-Moll. Neural unsigned distance fields for implicit function learning. In NeurIPS, 2020. 2, 3, 5", + "[3] Christopher B Choy, Danfei Xu, JunYoung Gwak, Kevin Chen, and Silvio Savarese. 3d-r2n2: A unified approach for single and multi-view 3d object reconstruction. In ECCV, 2016. 2", + "[4] Manuel Dahnert, Ji Hou, Matthias Nießner, and Angela Dai. Panoptic 3d scene reconstruction from a single rgb image. NeurIPS, 2021. 2", + "[5] Angela Dai, Christian Diller, and Matthias Nießner. Sg-nn: Sparse generative neural networks for self-supervised scene completion of rgb-d scans. In CVPR, 2020. 5", + "[6] Andrew J Davison, Ian D Reid, Nicholas D Molton, and Olivier Stasse. Monoslam: Real-time single camera slam. TPAMI, 2007. 2", + "[7] Kangle Deng, Andrew Liu, Jun-Yan Zhu, and Deva Ramanan. Depth-supervised nef: Fewer views and faster training for free. In CVPR, 2022. 2", + "[8] Ainaz Eftekhar, Alexander Sax, Jitendra Malik, and Amir Zamir. Omnidata: A scalable pipeline for making multi-task mid-level vision datasets from 3d scans. In ICCV, 2021. 2, 5", + "[9] Haoqiang Fan, Hao Su, and Leonidas J Guibas. A point set generation network for 3d object reconstruction from a single image. In CVPR, 2017. 2", + "[10] Justin Johnson Georgia Gkioxari, Nikhila Ravi. Learning 3d object shape and layout without 3d supervision. CVPR, 2022. 2", + "[11] Rohit Girdhar, David F Fouhey, Mikel Rodriguez, and Abhinav Gupta. Learning a predictable and generative vector representation for objects. In ECCV, 2016. 2", + "[12] Georgia Gkioxari, Jitendra Malik, and Justin Johnson. Mesh r-cnn. In ICCV, 2019. 2", + "[13] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 6", + "[14] Mariko Isogawa, Dorian Chan, Ye Yuan, Kris M. Kitani, and Matthew O'Toole. Efficient non-line-of-sight imaging from transient sinograms. In ECCV, 2020. 1", + "[15] Hamid Izadinia, Qi Shan, and Steven M. Seitz. Im2cad. In CVPR, 2017. 1", + "[16] Ziyu Jiang, Buyu Liu, Samuel Schulter, Zhangyang Wang, and Manmohan Chandraker. Peek-a-boo: Occlusion reasoning in indoor scenes with plane representations. In CVPR, 2020. 2", + "[17] Linyi Jin, Shengyi Qian, Andrew Owens, and David F Fouhey. Planar surface reconstruction from sparse views. In ICCV, 2021. 1, 2, 5, 6", + "[18] Abhishek Kar, Christian Hane, and Jitendra Malik. Learning a multi-view stereo machine. NeurIPS, 2017. 2", + "[19] Nilesh Kulkarni, Ishan Misra, Shubham Tulsiani, and Abhinav Gupta. 3d-relnet: Joint object and relational network for 3d prediction. In ICCV, 2019. 2" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[20] Nilesh Kulkarni, Justin Johnson, and David F Fouhey. Directed ray distance functions for 3d scene reconstruction. In ECCV, 2022. 1, 2, 3, 4, 5, 6, 7", + "[21] Nilesh Kulkarni, Linyi Jin, Justin Johnson, and David F Fouhey. Learning to predict scene-level implicit 3d from posed rgbd data. In CVPR, 2023. 2, 5, 8", + "[22] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In CVPR, 2019. 2", + "[23] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 2, 4", + "[24] Zak Murez, Tarrence Van As, James Bartolozzi, Ayan Sinha, Vijay Badrinarayanan, and Andrew Rabinovich. Atlas: End-to-end 3d scene reconstruction from posed images. In ECCV, 2020. 2", + "[25] Yinyu Nie, Xiaoguang Han, Shihui Guo, Yujuan Zheng, Jian Chang, and Jian Jun Zhang. Total3dunderstanding: Joint layout, object pose and mesh reconstruction for indoor scenes from a single image. In CVPR, 2020. 2", + "[26] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In CVPR, 2019. 1, 2", + "[27] Philip Pritchett and Andrew Zisserman. Wide baseline stereo matching. In ICCV, 1998. 2", + "[28] Shengyi Qian, Linyi Jin, and David F Fouhey. Associative3d: Volumetric reconstruction from sparse views. In ECCV, 2020. 2", + "[29] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. ICCV, 2021. 4, 5, 6", + "[30] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. TPAMI, 2022. 5", + "[31] Shunsuke Saito, Tomas Simon, Jason Saragih, and Hanbyul Joo. Pifuhd: Multi-level pixel-aligned implicit function for high-resolution 3d human digitization. In CVPR, 2020. 4, 5", + "[32] Mehdi S. M. Sajjadi, Henning Meyer, Etienne Pot, Urs Bergmann, Klaus Greff, Noha Radwan, Suhani Vora, Mario Lucic, Daniel Duckworth, Alexey Dosovitskiy, Jakob Uszkoreit, Thomas Funkhouser, and Andrea Tagliasacchi. Scene Representation Transformer: Geometry-Free Novel View Synthesis Through Set-Latent Scene Representations. CVPR, 2022. 2", + "[33] Daniel Scharstein and Richard Szeliski. A taxonomy and evaluation of dense two-frame stereo correspondence algorithms. IJCV, 2002. 2", + "[34] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 2", + "[35] Jonathan Shade, Steven Gortler, Li-wei He, and Richard Szeliski. Layered depth images. In Siggraph, 1998. 2", + "[36] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. NeurIPS, 2020. 2" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9750", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[37] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. LoFTR: Detector-free local feature matching with transformers. CVPR, 2021. 2, 8", + "[38] Jiaming Sun, Yiming Xie, Linghao Chen, Xiaowei Zhou, and Hujun Bao. Neuralrecon: Real-time coherent 3d reconstruction from monocular video. In CVPR, 2021. 2, 5", + "[39] Bin Tan, Nan Xue, Tianfu Wu, and Gui-Song Xia. Nope-sac: Neural one-plane ransac for sparse-view planar 3d reconstruction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2023. 1, 2, 5", + "[40] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. NeurIPS, 2020. 2, 4", + "[41] Shubham Tulsiani, Saurabh Gupta, David F Fouhey, Alexei A Efros, and Jitendra Malik. Factoring shape, pose, and layout from the 2d image of a 3d scene. In CVPR, 2018. 1, 2", + "[42] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul Srinivasan, Howard Zhou, Jonathan T. Barron, Ricardo MartinBrualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In CVPR, 2021. 2, 4, 5", + "[43] Chao-Yuan Wu, Justin Johnson, Jitendra Malik, Christoph Feichtenhofer, and Georgia Gkioxari. Multiview compressive coding for 3D reconstruction. CVPR, 2023. 1, 2, 5, 6, 7, 8", + "[44] Fei Xia, Amir R Zamir, Zhiyang He, Alexander Sax, Jitendra Malik, and Silvio Savarese. Gibson env: Real-world perception for embodied agents. In CVPR, 2018. 5", + "[45] Yiming Xie, Matheus Gadelha, Fengting Yang, Xiaowei Zhou, and Huaizu Jiang. Planarrecon: Real-time 3d plane detection and reconstruction from posed monocular videos. In CVPR, 2022. 2", + "[46] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2, 4, 5", + "[47] Amir R. Zamir, Alexander Sax, William Shen, Leonidas J. Guibas, Jitendra Malik, and Silvio Savarese. Taskonomy: Disentangling task transfer learning. In CVPR, 2018. 5", + "[48] Zhizhuo Zhou and Shubham Tulsiani. Sparsefusion: Distilling view-conditioned diffusion for 3d reconstruction. In CVPR, 2023. 2" + ], + "bbox": [ + 78, + 90, + 468, + 713 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "9751", + "bbox": [ + 482, + 945, + 513, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/9acb5370-2e99-4481-9b63-bbd93724edf4_model.json b/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/9acb5370-2e99-4481-9b63-bbd93724edf4_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6eb9a65424a1c6ac9de53a54a6c850435596a32e --- /dev/null +++ b/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/9acb5370-2e99-4481-9b63-bbd93724edf4_model.json @@ -0,0 +1,1837 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.109, + 0.131, + 0.862, + 0.152 + ], + "angle": 0, + "content": "3DFIREs: Few Image 3D REconstruction for Scenes with Hidden Surfaces" + }, + { + "type": "text", + "bbox": [ + 0.297, + 0.18, + 0.674, + 0.217 + ], + "angle": 0, + "content": "Linyi Jin\\(^{1}\\), Nilesh Kulkarni\\(^{1}\\), David F. Fouhey\\(^{2}\\) \n\\(^{1}\\)University of Michigan\\(^{1}\\), New York University\\(^{2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.266, + 0.22, + 0.706, + 0.234 + ], + "angle": 0, + "content": "{jinlinyi,nileshk}@umich.edu,david.fouhey@nyu.edu" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.285 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.301, + 0.474, + 0.529 + ], + "angle": 0, + "content": "This paper introduces 3DFIRES, a novel system for scene-level 3D reconstruction from posed images. Designed to work with as few as one view, 3DFIRES reconstructs the complete geometry of unseen scenes, including hidden surfaces. With multiple view inputs, our method produces full reconstruction within all camera frustums. A key feature of our approach is the fusion of multi-view information at the feature level, enabling the production of coherent and comprehensive 3D reconstruction. We train our system on non-watertight scans from large-scale real scene dataset. We show it matches the efficacy of single-view reconstruction methods with only one input and surpasses existing techniques in both quantitative and qualitative measures for sparse-view 3D reconstruction. Project page: https://jinlinyi.github.io/3DFIRES/" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.558, + 0.21, + 0.573 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.583, + 0.47, + 0.809 + ], + "angle": 0, + "content": "Consider two views of the scene in Fig. 1. Part of the bedroom in View 1 is occluded by the wall, and so you may be uncertain what is behind it, although you might guess the wall continues. Now consider adding in View 2. You can see a bedside table, but little else. However, you can fuse these pieces together to create a consistent 3D sense of the scene viewed by the images, including both the visible and invisible parts. We use this sense when shopping for real estate or looking at a friend's photos. We estimate the structure of the scene from parts that are visible to all views; integrate information across images for parts that visible in one view but not others; and take educated guesses for completely occluded regions. Importantly, as the available data increases from one camera to a handful, we can seamlessly integrate the evidence across views." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.47, + 0.902 + ], + "angle": 0, + "content": "This task poses a challenge for current computer vision since it requires making judgments about visible and occluded 3D structures and integrating information across images with large pose change. These abilities are usually independently investigated in two separate strands of research. With single image reconstruction techniques [15," + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.274, + 0.891, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.518, + 0.895, + 0.63 + ], + "angle": 0, + "content": "Figure 1. Reconstructing 3D from sparsely posed images. Given a sparse set of posed image views, our method is able to reconstruct the full 3D of the scene. On the top, we show two sparse views of the scene in View 1 and View 2. On the bottom left is the 3D reconstruction from our network in the frustum of View 1. We show that our method can generate the occluded side table (zoom in). On the bottom right is the full reconstruction. We color occluded surfaces with surface normals." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.895, + 0.84 + ], + "angle": 0, + "content": "20, 26, 41, 43], one can predict both visible and occluded 3D structure from an image, but stacking such outputs from multiple images can produce inconsistent outputs. When handled independently, methods cannot identify the best view to reason about an occluded region. Non-line-of-sight imaging involves transmitting and receiving signals to reveal hidden scenes, incompatible with standard camera images [14]. Sparse view reconstruction methods [1, 17, 39] can create consistent reconstructions from two views; however, these approaches are limited to the visible parts of the scene that can decomposed into planes. Moreover, these methods are usually specialized to a particular number of images that can be accepted." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Recently, there has been considerable progress in generalized radiance fields, which produce full 3D representations. This occupancy representation and per-scene optimization has shown promising results by optimizing for" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9742" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.227 + ], + "angle": 0, + "content": "novel view synthesis on single scenes from posed images sets [7, 23, 36, 40]. Extending this line of work, methods like [32, 46] have shown an ability to predict novel views for unseen scenes from a few images. However, since these methods optimize for perceptual quality, the underlying geometry often has artifacts. Like them we also require one or more image views at input, but instead we predict an implicit function [20] that can reliably reconstruct both visible and occluded parts of previously unseen scenes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.228, + 0.473, + 0.469 + ], + "angle": 0, + "content": "We propose 3DFIREs, Few Image 3D-REconstruction of Scenes, which integrates information from a variable number of images to produce a full reconstruction of the scene. 3DFIREs integrates information in the features space across a varying number of images, enabling it to identify how to best use the available image data to produce an accurate reconstruction at a point. As output, 3DFIREs produces a pixel-aligned implicit field based on a generalization of the Directed Ray Distance Function [20, 21], which enables high quality reconstructions. Thanks to integration in feature space, the results are more consistent than handling images independently: this is what enables reconstructing the bed-side table in Fig. 1, even though it is hidden by the wall in one image. We found and document several design decisions in terms of training and network architecture needed to produce these results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.469, + 0.473, + 0.695 + ], + "angle": 0, + "content": "We evaluate our method on complex interior scenes from Omnidata [8, 33] dataset collected with a real scanner. We compare 3DFIRES with the point-space fusion of state-of-the-art methods for scene-level full 3D reconstruction methods from a single image [21, 43]. Our experiments show several key results. First, 3DFIRES produces more accurate results compared to existing works. The improvements are larger in hidden regions, and especially substantial when measuring consistency of prediction from multiple views. Second, ablative analysis reveals the key design decisions responsible for 3DFIRES's success. Third, 3DFIRES can generalize to variable views: we train on 1, 2, and 3 views and generalize to 5 views. Finally, 3DFIRES can reconstruct when given LoFTR [37] estimated poses with known translation scale." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.71, + 0.228, + 0.725 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.47, + 0.765 + ], + "angle": 0, + "content": "We aim to produce a coherent 3D scene reconstruction given a single or a few images with wide baselines." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.471, + 0.902 + ], + "angle": 0, + "content": "3D from Single Image. Predicting a complete 3D scene from a single image is inherently ambiguous. Recently different 3D representations have been proposed to reconstruct complete 3D scenes (including occluded surfaces) such as layered depth [35], voxels [3, 11, 19, 41], planes [16], point-clouds [9, 43], meshes [10, 12, 25], or implicit representation for objects [22, 26] and scenes [2, 4, 20, 21, 36]. While they have strong performance on single image, they do not necessarily produce coherent results when required to infer" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.182 + ], + "angle": 0, + "content": "on multiple images of the same scene [21]. Our method can reconstruct hidden geometry from at least a single image using implicit representation from [20]. Instead of naively fusing point clouds from different images, we fuse features when predicting a multi-view consistent point cloud with few input images." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.183, + 0.893, + 0.363 + ], + "angle": 0, + "content": "3D from dense views. Traditional multi-view 3D reconstruction methods can produce accurate and coherent point clouds from pixel correspondences [33]. Classical methods in computer vision use approaches like Multi-view stereo (MVS) to construct only visible parts of the scene in all the images. There is a long line of work in trying to reconstruct scenes from video sequences [6, 34] where they reconstruct visible scenes and camera poses. Learning-based methods for MVS estimate geometry for scenes [18, 24, 38, 45] also require an input video to explicitly predict scene geometry. Instead of requiring high overlap inputs such as video frames, our method works on wide-baseline images." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.364, + 0.893, + 0.59 + ], + "angle": 0, + "content": "3D from sparse view inputs. Our approach operates in a multi-view setting with a sparse set of views. We have a similar setting as wide-baseline reconstruction [27]. Associative3D [28] reconstructs the whole scene but requires voxelized scenes to train, our method works on non-watertight scene data. Prior work also explores planar representation [1, 17, 39] for coherent 3D surfaces in non-watertight scenes. They use feed-forward networks to predict visible 3D surfaces for each view and merge them using predicted correspondences. Our approach leverages an implicit representation that accommodates non-watertight data, enabling the reconstruction of both visible and occluded surfaces. We fuse deep features from multiple views to predict DRDF representation from Kulkarni et al. [20], producing a coherent reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.59, + 0.892, + 0.741 + ], + "angle": 0, + "content": "Novel view synthesis. NeRF [23] and its extensions [42, 46, 48] optimizes per-scene radiance fields for novel-view synthesis, this requires many views and test-time optimization. Due to its occupancy-based representation, extracting geometry often requires thresholding the density function, which leads to cloudy geometry with sparse input views. Our method directly predicts geometry from unseen images without the need for test-time optimization. PixelNerf [46] or SRT [32] can generalize to new scenes but their objectives optimize for photometric losses." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.755, + 0.593, + 0.77 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.87 + ], + "angle": 0, + "content": "Our goal is to predict an accurate and consistent 3D reconstruction from one or more sparsely spaced camera views and known poses. With one image, the method should predict all surfaces in the camera frustum, including visible and occluded regions. With more images, the method should predict the surfaces in the union of the frustum." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We tackle this problem with 3DFIREs, a simple and effective approach designed for this setting. We first discuss" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9743" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.078, + 0.307, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.322, + 0.078, + 0.888, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.388, + 0.893, + 0.445 + ], + "angle": 0, + "content": "Figure 2. (a) Architecture for single view DRDF [20]. Given an image and a query pixel location, it predicts DRDF along the ray from the query pixel. (b) we extend (a) to work on sparse views. Middle: Given N images, a query point \\(\\mathbf{x}\\), and a query direction \\(\\vec{\\mathbf{r}}_q\\), we aggregate features from multiple images and output DRDF along the query ray. Right: We show detailed network architecture of 3DFIREs which consists of a Query Encoder and a DRDF Predictor." + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.456, + 0.47, + 0.648 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.662, + 0.47, + 0.759 + ], + "angle": 0, + "content": "Figure 3. Predictions in the blue camera frustum. Occluded surfaces are colored with surface normals. A single image to 3D method like DRDF [20] is unable to reconstruct the parts of the scene behind the wall with certainty and hence erroneously adds a full wall in front of the hallway (red box). 3DFIREs which fuses features from multiple views (Green and Purple camera in Fig. 2) predicts empty space for the entrance (black box)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.762, + 0.469, + 0.837 + ], + "angle": 0, + "content": "tackling scene reconstruction in a single image case in §3.1 using the Directed Ray Distance Function (DRDF) [20] and scale this approach to multiple image views in §3.2. In §3.3, we show how we can operationalize our multi-view reconstruction goal with an attention-based model architecture." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.847, + 0.423, + 0.863 + ], + "angle": 0, + "content": "3.1. Background Single View Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.47, + 0.901 + ], + "angle": 0, + "content": "We begin by revisiting the DRDF formulation for a single image reconstruction. Consider a single image \\(\\mathcal{I}\\), a single" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.459, + 0.892, + 0.593 + ], + "angle": 0, + "content": "view implicit reconstruction method aims to produce the full 3D reconstruction for the scene from this image. At inference, when conditioned on image features, the method outputs a distance function for a pre-defined set of 3D points in the camera frustum. It then decodes this predicted distance function to a surface to recover the 3D geometry of the scene. For instance, if the predicted 3D distance function is an unsigned distance function [2], the points on the surface are with distances close to zero." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.596, + 0.892, + 0.701 + ], + "angle": 0, + "content": "Kulkarni et al. [20] solve the single image 3D reconstruction with the DRDF function and show that using the DRDF outperforms the standard unsigned distance function. The DRDF is a ray-based distance function measuring the distance of a point \\(\\mathbf{x}\\) to the nearest intersection with a surface along a ray \\(\\vec{\\mathbf{r}}\\). In [20], the ray on which distances are measured is the ray from the camera center \\(\\mathbf{c}\\) to \\(\\mathbf{x}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.703, + 0.892, + 0.824 + ], + "angle": 0, + "content": "Fig. 2 (a) shows the DRDF for one such ray. Now, any 3D point \\(\\mathbf{x}\\) can be represented as its distance towards the camera times a unit ray direction, or \\(z\\vec{\\mathbf{r}}\\), where \\(z \\in \\mathbb{R}\\) and \\(\\vec{\\mathbf{r}} = \\mathrm{norm}(\\mathbf{x} - \\mathbf{c})\\) where \\(\\mathrm{norm}(\\mathbf{p}) = \\mathbf{p} / ||\\mathbf{p}||\\). The DRDF, \\(d_{\\mathrm{DR}}(z\\vec{\\mathbf{r}})\\), furthermore includes a sign that determines for the point the direction along the ray towards the nearest intersection (i.e., forwards or backwards). Therefore \\((z + d_{\\mathrm{DR}}(z\\vec{\\mathbf{r}}))\\vec{\\mathbf{r}}\\) corresponds to a point on the surface." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.901 + ], + "angle": 0, + "content": "The DRDF can be used to create a system that infers single image 3D by pairing the distance function at a point \\( \\mathbf{x} \\) with pixel-aligned features. At inference time, as shown in Fig. 2 (a), given a point \\( \\mathbf{x} \\) in the camera frustum we can extract corresponding pixel-aligned image features using an" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9744" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.473, + 0.259 + ], + "angle": 0, + "content": "image backbone \\(\\mathrm{BB}[\\pi (\\mathbf{x})]\\), and use an MLP to predict the DRDF value corresponding to the point \\(\\mathbf{x}\\) along the \\(\\vec{\\mathbf{r}}\\). Since DRDF is a ray-based function, its value only depends on the intersections along the ray. For any ray corresponding to a pixel on the image, the prediction of DRDF for the point depends on the image features, and the location of the point on the ray. This parameterization allows DRDF to learn sharp 3D reconstructions of the scene from a single RGB image. At training time, we train a model to predict the DRDF by supervising it with the ground-truth DRDF values computed using the mesh geometry." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.268, + 0.396, + 0.284 + ], + "angle": 0, + "content": "3.2. Extending DRDFs to Multiple Views" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.291, + 0.473, + 0.565 + ], + "angle": 0, + "content": "Now, with multiple views we have: N images \\(\\{\\mathcal{I}_i\\}_{i = 1}^{\\mathrm{N}}\\) relative camera transforms \\(\\{\\pi_i\\}_{i = 1}^{\\mathrm{N}}\\) , and corresponding camera centers \\(\\{\\mathbf{c}_i\\}_{i = 1}^{\\mathrm{N}}\\) , our goal is to reconstruct the 3D of the full scene. While the task could perhaps be accomplished by simply predicting individual 3D for each camera, and assembling them together. Our insight is that if the camera frustums have considerable overlap, for overlapping regions we can achieve a better and more consistent reconstruction by allowing the network to reason about which camera provides the best view for each point. This can be achieved by allowing the network to fuse features across cameras for the points in feature space rather than by concatenating in point space. We propose to improve the feature quality of any point \\(\\mathbf{x}\\) by fusing the features from multiple cameras. Since we are now dealing with the multi-view settings, a multi-view DRDF formulation is necessary to allow us to predict the DRDF value along each of the query rays, \\(\\vec{\\mathbf{r}}_q\\) originating from the respective camera centers." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.565, + 0.473, + 0.746 + ], + "angle": 0, + "content": "In the case of multiple views, the image feature corresponding to a point \\(\\mathbf{x}\\) should be a fusion of features \\(\\{\\mathbf{f}_{\\theta}[\\pi_i(\\mathbf{x})]\\}_{i = 1}^{\\mathrm{N}}\\). The feature should support predicting the N DRDF values along all the camera directions as \\(\\{d_{\\mathrm{DR}}(z_i\\vec{\\mathbf{r}}_i)\\}_{i = 1}^{\\mathrm{N}}\\). The intuition of our key idea is that multiple-image views provide more information about the 3D scene and hence potentially better features. We can learn these better features by fusing features to predict a consistent output. This requires a novel architecture that attends to features and rays, \\(\\{\\vec{\\mathbf{r}}_i\\}_{i = 1}^{\\mathrm{N}}\\), originating from all the available image views. Under this formulation single view DRDF is a special case of our formulation where \\(\\mathbf{N}\\) is 1." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.757, + 0.285, + 0.772 + ], + "angle": 0, + "content": "3.3. Network Architecture" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.472, + 0.903 + ], + "angle": 0, + "content": "Towards the goal of predicting DRDFs along multiple query rays \\(\\vec{\\mathbf{r}}_q\\in \\{\\vec{\\mathbf{r}}_i\\}_{i = 1}^{\\mathrm{N}}\\), we present a simple and effective network 3DFIREs that accomplishes this task. 3DFIREs consists of three modules: The first module is a Backbone Feature Extractor that obtains pixel-aligned appearance features; by projecting the query point \\(\\mathbf{x}\\) onto the camera, we can obtain a per-point and per-camera appearance feature as in [20, 23, 31, 42, 46]. Since the appearance feature is" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.288 + ], + "angle": 0, + "content": "per-image, the model must learn to aggregate information across cameras. This is done with our second component Query Encoder that provides geometric information for aggregating appearance features. Specifically, the query encoder uses the information about the relative positions of query point \\(\\mathbf{x}\\) and query direction \\(\\vec{\\mathbf{r}}_q\\) w.r.t. cameras \\(\\{\\pi_i\\}_{i=1}^N\\). The final module is the DRDF Predictor that takes appearance and query features to produce a DRDF value along the query direction \\(\\vec{\\mathbf{r}}_q\\) by incorporating the appearance features (evidence for geometry) and query encoder features (evidence that relates different features). Fig. 3 shows an example on how integrating information across multiple views leads to better prediction for occluded parts of the scene." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.289, + 0.895, + 0.427 + ], + "angle": 0, + "content": "Backbone Feature Extractor. Our backbone features extractor \\(\\mathrm{BB}(\\cdot)\\) aims to create appearance features from an image. It accepts an image \\(\\mathcal{I}_i\\in \\mathbb{R}^{H\\times W\\times 3}\\) and produces a grid of D-dimensional features \\(\\mathbf{F}_i\\in \\mathbb{R}^{H'\\times W'\\times D_{\\mathrm{img}}}\\). We use a pre-trained depth estimating vision transformer [29]. Feature extraction for each image proceeds independently using the same network. With extracted per-camera backbone features, \\(\\mathbf{f}_i\\), for point \\(\\mathbf{x}\\) by interpolating features in \\(\\{\\mathbf{F}_i\\}_{i = 1}^{\\mathrm{N}}\\) at the projection \\(\\{\\pi_i(\\mathbf{x})\\}_{i = 1}^{\\mathrm{N}}\\) correspondingly." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.427, + 0.896, + 0.668 + ], + "angle": 0, + "content": "Query Encoder. Our query encoder \\( q(\\cdot) \\) aims to enable a predictor to decide how to aggregate information across images. As input, the encoder takes a query 3D point \\( \\mathbf{x} \\) and a query direction \\( \\vec{\\mathbf{r}}_q \\). It additionally considers the backbone features, camera centers \\( \\{\\mathbf{c}_i\\}_{i=1}^N \\) and transforms \\( \\{\\pi_i\\}_{i=1}^N \\). Our query encoding is the concatenation of: (i) the relative viewing direction in camera \\( i \\)'s space \\( \\Delta \\vec{\\mathbf{r}}_i(\\vec{\\mathbf{r}}_q) = [\\vec{\\mathbf{r}}_q - \\mathrm{norm}(\\mathbf{x} - \\mathbf{c}_i), \\vec{\\mathbf{r}}_q \\cdot \\mathrm{norm}(\\mathbf{x} - \\mathbf{c}_i)] \\in \\mathbb{R}^4 \\); and (ii) the normalized device coordinates (NDC), coordinates of point \\( \\mathbf{x} \\) in the camera frame \\( \\mathrm{ndc}_i(\\mathbf{x}) \\in \\mathbb{R}^3 \\). Intuitively this query representation, \\( \\mathbf{q}_i = \\{\\Delta \\vec{\\mathbf{r}}_i, \\mathrm{ndc}_i(\\mathbf{x})\\} \\in \\mathbb{R}^7 \\) enables reasoning such as: information about surfaces near \\( \\mathbf{x} \\) in direction \\( \\vec{\\mathbf{r}}_q \\) is likely not visible in camera \\( i \\) due to either angle or distance, so this feature ought to be weighted low. The ray query vector is encoded in a positional encoding layer [40] with output dimension \\( D_{\\mathrm{query}} \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.668, + 0.896, + 0.835 + ], + "angle": 0, + "content": "DRDF Predictor. For a query ray and point tuple, \\(\\{\\vec{\\mathbf{r}}_q,\\mathbf{x}\\}\\), this model considers the image features \\(\\{\\mathbf{f}_i\\}_{i = 1}^{\\mathrm{N}}\\), and query features \\(\\{\\mathbf{q}_i\\}_{i = 1}^N\\) yielding a joint camera specific feature, \\(\\{\\mathbf{f}_i,\\mathbf{q}_i\\}_{i = 1}^{\\mathrm{N}}\\), of dimension \\(D_{\\mathrm{img}} + D_{\\mathrm{query}}\\). Our self-attention attends over all these features to produce a weight \\(w_{i}\\) per feature. We aggregate the features using this weight to produce a fused feature for the point \\(\\mathbf{x}\\). We then use the fused feature to predict a DRDF value between \\([-1,1]\\) with the help of an MLP. This is akin to selecting cameras that are likely to contain the geometry information about the ray point tuple and predicting the geometry information." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.847, + 0.685, + 0.862 + ], + "angle": 0, + "content": "3.4. Training 3DFIREs" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.895, + 0.903 + ], + "angle": 0, + "content": "The effectiveness of 3DFIREs is improved by getting details right during training. One observation is that sampling" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9745" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.318 + ], + "angle": 0, + "content": "points near intersections gives improvements over uniform sampling because the scene-level space is predominantly empty. By increasing the density of sampled points near surface, the network can better learn the scene structure. We sample points along the ray as per a Gaussian distribution centered at the intersection. Prior work [42] involves applying ray attention which allows for samples along a ray to attend with each other before the final prediction. This has been shown to be effective. However, combining ray attention with Gaussian sampling during training enables the network to 'cheat'. Ray Attention exploits a train-time shortcut (query point density) to infer intersections. At inference as point density is uniform and this shortcut fails. Empirically we find Gaussian sampling alone to be more effective than ray attention." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.33, + 0.295, + 0.346 + ], + "angle": 0, + "content": "3.5. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.354, + 0.47, + 0.46 + ], + "angle": 0, + "content": "Training. Our image feature backbone is vision transformer [29] dpt_beit_large_384 pretrained by MiDaS [30]. We use \\(\\ell_{1}\\) loss on log-space truncated DRDF [5, 20, 38]. During training, we randomly sample 1, 2, 3 views with 80 rays per image and 512 points along each ray. Our method is trained for 300K iteration on NVIDIA A100 GPU with batch size of 1. More details in supp." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.461, + 0.47, + 0.581 + ], + "angle": 0, + "content": "Inference. Given N images, we extract backbone features for each image. We generate \\( n_{\\mathrm{ray}} = 128 \\times 128 \\) query rays from each camera. Along each ray, we sample \\( n_{\\mathrm{pt}} = 256 \\) points that have uniformly spaced depth from 0 to 8m. In total, we get \\( \\mathbf{N} \\times n_{\\mathrm{ray}} \\times n_{\\mathrm{pt}} \\) query pairs \\( \\{\\mathbf{x}, \\vec{\\mathbf{r}}_q\\} \\), which are fed to 3DFIREs in parallel to get DRDF value. We calculate positive-to-negative zero-crossings along each ray [20] to get a 3D point and aggregate the results." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.598, + 0.203, + 0.615 + ], + "angle": 0, + "content": "4. Experiment" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.624, + 0.47, + 0.759 + ], + "angle": 0, + "content": "In this section, we present the experimental framework for 3DFIREs, our system designed to reconstruct full scene geometry from wide-baseline, sparse images. Considering the novelty of our problem, there is no prior work that does this exact setting. To address this, we curated a dataset and developed testing metrics specifically tailored to the problem's requirements. We conduct comprehensive evaluations of 3DFIREs using real scene images, comparing its performance against alternative methods in the field." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.771, + 0.173, + 0.787 + ], + "angle": 0, + "content": "4.1. Dataset" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Following [21], we use the dataset from the Gibson database [44], which contains real images of complex and diverse scenes such as multi-floor villas and expansive warehouses. The scale of the assets in the dataset presents challenging reconstruction problem, which is desirable for evaluating the ability to recover occluded surfaces. We use the images sampled by Omnidata [8] for a diverse set of" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.228 + ], + "angle": 0, + "content": "camera poses from the Taskonomy [47] Medium subset, including 98/20/20 training/validation/test buildings. Since our multiview setting is different from the single-view setting of [21], the precise samples are different. Our setting is also similar to [17, 39] in that images have wide baselines (median \\(2.8\\mathrm{m}\\) translation, \\(63.9^{\\circ}\\) rotation), unlike methods using video frames [38] where images have high overlap. Our approach diverges from [17, 39] in also reconstructing occluded regions and using real (not rendered) images." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.229, + 0.893, + 0.426 + ], + "angle": 0, + "content": "To curate our image sets, we use a sampling process like [17]. For a set of \\( k \\) images, after picking an image at random, each new image is selected to have at most \\( 70\\% \\) overlap with any existing image in the set, and at least \\( 30\\% \\) overlap with at least one other image in the set. The process balances diversity and coherence in the viewpoints. We crop images to a fixed field of view. We collect 3781 training sets among \\( \\geq 10\\mathrm{K} \\) images. We also sample 300 sets of 3-view images and 100 sets of 5-view images for evaluation from the held-out test scenes. See the supplementary for dataset generation details. The 3 view and 5 view test set contain considerable occluded 3D geometry (41.9% and 43.7% respectively)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.437, + 0.608, + 0.451 + ], + "angle": 0, + "content": "4.2. Baselines" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.461, + 0.892, + 0.535 + ], + "angle": 0, + "content": "To the best of our knowledge, no prior work reconstructs occluded regions from sparse-view images at scene scale. We thus create strong baselines from existing methods that handle parts of our setting. Each method is the strongest in its line of work." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.537, + 0.892, + 0.657 + ], + "angle": 0, + "content": "For instance, the visible surface upper-bound includes all methods that reconstruct visible surfaces from sparse views [17, 38, 39]. The DRDF method [20, 21] has been shown to be more effective for scene-level 3D reconstruction compared to many other implicit functions like density [46], occupancy [31], unsigned distance functions on scenes and rays [2]. MCC [43] is likewise SOTA for point cloud completion." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.764 + ], + "angle": 0, + "content": "Depth Only [8, 29] Prior state-of-the-art works on sparse scene reconstruction [38, 39] predict visible surfaces from multiple views, but cannot recover hidden surfaces. To show the near-oracle reconstruction of visible surfaces, we use MiDaS [29] depth model trained on Omnidata [8] with ground-truth scale and shift. This baseline is an upper bound on the performance of methods like [1, 17, 38, 39]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Multiview Compressive Coding (MCC) [43] This method predicts occupancy probability from RGB-D partial point clouds. MCC works on scene-level reconstructions including non-watertight meshes. We train MCC on the same training set as ours. This method requires depth as input and at inference we provide it with ground truth depth. Since MCC only works on a single point cloud, to produce predictions from multiple images, we infer each image independently and aggregate the predicted point cloud in point" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9746" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.091, + 0.086, + 0.878, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.549, + 0.895, + 0.62 + ], + "angle": 0, + "content": "Figure 4. Comparison between different methods on held-out test scene. Occluded surfaces are colored with the computed surface normals. \"Depth only\" leaves holes with sparse input views, e.g. absent floors and walls. Occupancy-based method MCC [43] produces cloudy results, failing to get the details like pillow, tables. Concatenation of single view DRDF (SV-DRDF) [20] produces inconsistent results, e.g. missing wall in row 2, the double wall in row 3. Our method produces more consistent predictions across different views and also recovers the hidden surface, resulting in a complete mesh. We urge the reader to see results provided in the supplementary videos." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.634, + 0.163, + 0.648 + ], + "angle": 0, + "content": "cloud space." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.649, + 0.473, + 0.803 + ], + "angle": 0, + "content": "Single-view DRDF (SV-DRDF) [20] This method reconstructs both visible and hidden surfaces from a single input image. We use this baseline to show the benefit of our proposed multi-view feature aggregation. For a fair comparison, we upgrade the original backbone from ResNet34 [13] to the same BEiT [29] and use the same training strategy such as Gaussian sampling of points. Both improve results. Since this baseline only supports single image reconstruction, we produce predictions independently from each input image and aggregate all the point clouds." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.815, + 0.262, + 0.83 + ], + "angle": 0, + "content": "4.3. Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.84, + 0.363, + 0.854 + ], + "angle": 0, + "content": "We use two metrics to evaluate our system." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Scene F score. Following [20, 43], we compute the scene accuracy (fraction of predicted points within \\(\\rho\\) of a ground truth point), completeness (fraction of ground truth points" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.633, + 0.892, + 0.784 + ], + "angle": 0, + "content": "within \\(\\rho\\) from a predicted point), and their F-score (F1). This gives an overall summary of scene-level reconstruction. We classify the scene into (1) visible: points that are visible from any one of the input views; and (2) hidden: points that are hidden from all of the input views. Due to the space limit, we only show F-score at \\(\\rho = 0.2\\). A full table with accuracy, completeness, F-score at different \\(\\rho\\) is in the supp. Trends are the same across values of \\(\\rho\\) and there is no significant accuracy/completeness imbalance for the baselines (MCC, SV-DRDF)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.893, + 0.903 + ], + "angle": 0, + "content": "Multiview consistency. Only measuring the F-score does not measure the consistency of 3D reconstruction when generating results from multiple views. Doubled predictions of surfaces do not change the Scene F score results if they are within \\(\\rho\\). Prior work [17] used a detection-based method that penalized double surfaces on planar predictions, but their metric is not applicable since it requires pla" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9747" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.062, + 0.482, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.49, + 0.062, + 0.889, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.304, + 0.89, + 0.425 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.427, + 0.893, + 0.469 + ], + "angle": 0, + "content": "Figure 5. Qualitative results on held-out test scenes. Top row: Reconstruction from 3 images and compared with ground truth. Our method can reconstruct a complete scene structure within all the camera frustums, including the occluded surfaces. Bottom row: Predictions from 5 input images compared with ground truth. For the 2nd and 3rd examples, ceilings are removed to reveal the details of the scene." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.483, + 0.473, + 0.664 + ], + "angle": 0, + "content": "lar instances. We require a metric that can measure the consistency of 3D reconstruction of points in individual frustumums. Specifically, we would like to ensure that points \\(\\mathbf{P}_i\\) generated from all query rays originating from \\(\\mathbf{c}_i\\) of \\(\\pi_i\\) are consistent with points, \\(\\mathbf{P}_j\\), generated from by ray queries from \\(\\mathbf{c}_j\\) of \\(\\pi_j\\) at the intersection of frustumums of both the cameras. For every point, \\(\\mathbf{p} \\in \\mathbf{P}_j\\) and within the field of view of camera \\(i\\), we compute their minimum distance to points in \\(\\mathbf{P}_i\\). Our metric measures percent of points in the set \\(\\mathbf{P}_j\\) that have minimum distance within the threshold of \\(\\rho\\). We evaluate this metric bidirectionally to ensure complete results." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.678, + 0.172, + 0.693 + ], + "angle": 0, + "content": "4.4. Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.703, + 0.47, + 0.824 + ], + "angle": 0, + "content": "Qualitative Results. Fig. 3 shows reconstruction from using query rays from the blue camera in Fig. 2. Occluded surfaces are colored with surface normals. DRDF [20] is unable to reconstruct the parts of the scene behind the wall with certainty and erroneously adds a full wall in front of the hallway. 3DFIREs fuses features from multiple images (Green and Purple camera in Fig. 2) accurately predicts the empty space." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Fig. 4 shows results unseen test scenes, and compares reconstruction of baselines. Red box crop show highlighted differences and provide a zoomed-in view for detailed examination. Depth only (MiDaS with ground truth scale and shift) reconstructs only visible regions this leaves holes such" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.483, + 0.893, + 0.694 + ], + "angle": 0, + "content": "as the missing surfaces behind chairs in Row 1; and absent floor sections in Row 4. MCC [43] tends to produce cloudy volumes and misses details like pillows and tables. Single-view DRDF (SV-DRDF) produces occluded regions and sharp surfaces but lacks consistency when aggregating results from multiple views. This is noticeable in its inability to reconstruct the occluded wall in Row 2, the creation of a doubled ceiling in Row 3 due to occlusions. 3DFIREs, effectively merges observations from multiple images, resulting in sharp and accurate reconstructions of both visible and hidden surfaces. By fusing information across views in the feature space, our method overcomes the limitations of other approaches. This ensures comprehensive and consistent scene-level reconstruction from few sparse views." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.698, + 0.892, + 0.804 + ], + "angle": 0, + "content": "In Fig. 5 we show additional alongside the ground truth. 3DFIREs successfully reconstructs large occluded areas, floors hidden by foreground objects (colored in pink), and unseen sides of objects such as the back of chairs in the first example and the kitchen islands in the second example. The reconstruction from multiple views demonstrates consistency and coherent surfaces in overlapping regions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.807, + 0.893, + 0.882 + ], + "angle": 0, + "content": "While our method is trained with up to three views, it seamlessly extends to five views. This adaptability stems from our architecture's inherent flexibility to the number of input views. With increasing views it predicts clean and coherent reconstructions within all the camera frustums." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.886, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Quantitative Results. We evaluate our method on sets of 1," + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9748" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.077, + 0.895, + 0.162 + ], + "angle": 0, + "content": "Table 1. Quantitative results on Scene F-score (\\(\\rho = 0.2\\)) for Hidden points, Visible points, All points. For 3 and 5 views, we evaluate Consistency. Depth only: visible surface upperbound is separated to indicate it has oracle information. Despite accurate reconstructions on visible surfaces, these lines of work cannot recover hidden surfaces, causing low overall performance. With 1 view, 3DFIREs is comparable to single view DRDF. With more views, 3DFIREs outperforms all the other baselines in F-score. There is large improvement in consistency metric compared to single view DRDF, showing that aggregating features produces a more coherent reconstruction. Full tables showing accuracy and completeness are in the supplemental." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.172, + 0.891, + 0.266 + ], + "angle": 0, + "content": "
1 view3 views5 views
Hidden ↑Visible ↑All ↑Hidden ↑Visible ↑All ↑Consistency ↑Hidden ↑Visible ↑All ↑Consistency ↑
Depth only-85.3160.12-87.8463.9072.79-91.2969.4072.57
MCC40.2756.4050.2542.9162.0254.7870.2038.5164.4455.9466.57
SV-DRDF53.3673.4565.2148.0276.1965.6176.4447.5181.3170.5478.13
3DFIRES53.3474.2965.7149.9976.7466.5685.4849.5281.7471.4185.92
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.275, + 0.471, + 0.318 + ], + "angle": 0, + "content": "Table 2. Ablation study on training strategies. GS: Gaussian sampling near intersection along the ray during training. Ray Attn: points along a query ray attend to each other." + }, + { + "type": "table", + "bbox": [ + 0.101, + 0.328, + 0.446, + 0.405 + ], + "angle": 0, + "content": "
HiddenVisibleAllConsistency
-GS43.0777.0564.8183.45
+Ray Attn. -GS47.0977.6065.5883.27
+Ray Attn. +GS14.853.3613.2933.56
Ours50.2077.3066.4685.45
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.418, + 0.471, + 0.475 + ], + "angle": 0, + "content": "Table 3. Quantitative results on noisy camera poses generated by LoFTR, evaluated on 3 view cases at \\(\\rho = 0.2\\). 3DFIREs assumes accurate pixel-aligned features but still produces more consistent reconstructions compared to not aggregating features." + }, + { + "type": "table", + "bbox": [ + 0.102, + 0.485, + 0.446, + 0.542 + ], + "angle": 0, + "content": "
3-ViewHiddenVisibleAllConsistency
SV-DRDF37.3962.7152.9357.65
Ours38.8562.4053.1965.71
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.566, + 0.469, + 0.642 + ], + "angle": 0, + "content": "3, 5 views respectively, as detailed in Tab. 1. Our approach, designed for flexible input views, matches prior works in single-view scene reconstruction and achieves state-of-the-art results with multiple input views. In single-image cases, it is comparable to the single-view DRDF baseline." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.642, + 0.469, + 0.778 + ], + "angle": 0, + "content": "For 3-view sets, our method outperforms MCC [43] or DRDF [21]. Although MiDaS with ground truth scale and shift demonstrates optimal visible surface reconstruction, it falls short in overall scene reconstruction because of no reconstruction on occluded surfaces. When evaluated on scene consistency, 3DFIREs shows a large absolute improvement of \\(>9\\%\\), over the second-best baseline, showing 3DFIREs's ability to aggregate features across views to produce consistent results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.778, + 0.469, + 0.853 + ], + "angle": 0, + "content": "The trend persists with 5-view inputs, where our method has the highest F score and consistency. Our method is not trained on 5-views subset but still remains robust to more input views enhancing the reconstruction quality in both visible and hidden surface reconstructions." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.863, + 0.292, + 0.879 + ], + "angle": 0, + "content": "4.5. Ablations and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.886, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Ablation study on training strategy. We conduct an ab" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.277, + 0.892, + 0.428 + ], + "angle": 0, + "content": "lation study (Tab. 2) to investigate the effectiveness of different training strategies for our method. Without Gaussian sampling or ray attention (-GS), the method has degraded performance \\((-7\\%\\) in hidden F score). With ray attention only (+Ray Attn. -GS), the method is able to better reconstruct the hidden surface but is still worse than ours \\((-3\\%)\\). With both ray attention and Gaussian sampling (+Ray Attn. +GS), the network finds shortcut during training and does not work during testing. With Gaussian sampling strategy, our method performs the best." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.429, + 0.893, + 0.625 + ], + "angle": 0, + "content": "Robustness with noisy camera poses. Our method requires accurate camera poses to aggregate pixel-aligned features. This setting is challenging with sparse view data since camera estimation can be noisy. We test if the misalignment of image features caused by noisy camera projection matrices degrades our system. We use LoFTR [37] to estimate the camera rotation and translation angle and evaluate the reconstruction within all the camera frustumums. Since LoFTR does not provide a translation scale, we use ground truth instead. Tab. 3 shows results on 3-view cases. Our method still has significantly higher consistency over single view DRDF baseline. We provide an analysis with synthetic Gaussian camera noise in the supplementary." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.638, + 0.627, + 0.654 + ], + "angle": 0, + "content": "5. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.662, + 0.892, + 0.83 + ], + "angle": 0, + "content": "We present 3DFIREs, a scene-level 3D reconstruction method that requires only one or a few posed images of a scene. Our method takes in an arbitrary number of input views, fuses multi-view information in the features space and predicts DRDF given a 3D point and query direction. We train our method on a large-scale scene dataset and show its strong ability to reconstruct both visible and hidden surfaces coherently within all the camera frustums on challenging wide-baseline images. Currently, our methods require pose input from off-the-shelf estimation methods, solving for 3D reconstruction and adapting the poses is a challenging next step and left to future work." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.835, + 0.892, + 0.892 + ], + "angle": 0, + "content": "Acknowledgments. Thanks to Mohamed Banani, Richard Higgins, Ziyang Chen for their helpful feedback. Thanks to UM ARC for computing support. Toyota Research Institute provided funds to support this work." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9749" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.156 + ], + "angle": 0, + "content": "[1] Samir Agarwala, Linyi Jin, Chris Rockwell, and David F Fouhey. Planeformers: From sparse view planes to 3d reconstruction. In ECCV, 2022. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.157, + 0.47, + 0.198 + ], + "angle": 0, + "content": "[2] Julian Chibane, Aymen Mir, and Gerard Pons-Moll. Neural unsigned distance fields for implicit function learning. In NeurIPS, 2020. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.2, + 0.471, + 0.253 + ], + "angle": 0, + "content": "[3] Christopher B Choy, Danfei Xu, JunYoung Gwak, Kevin Chen, and Silvio Savarese. 3d-r2n2: A unified approach for single and multi-view 3d object reconstruction. In ECCV, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.256, + 0.468, + 0.296 + ], + "angle": 0, + "content": "[4] Manuel Dahnert, Ji Hou, Matthias Nießner, and Angela Dai. Panoptic 3d scene reconstruction from a single rgb image. NeurIPS, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.298, + 0.468, + 0.338 + ], + "angle": 0, + "content": "[5] Angela Dai, Christian Diller, and Matthias Nießner. Sg-nn: Sparse generative neural networks for self-supervised scene completion of rgb-d scans. In CVPR, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.34, + 0.468, + 0.38 + ], + "angle": 0, + "content": "[6] Andrew J Davison, Ian D Reid, Nicholas D Molton, and Olivier Stasse. Monoslam: Real-time single camera slam. TPAMI, 2007. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.382, + 0.468, + 0.422 + ], + "angle": 0, + "content": "[7] Kangle Deng, Andrew Liu, Jun-Yan Zhu, and Deva Ramanan. Depth-supervised nef: Fewer views and faster training for free. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.424, + 0.468, + 0.465 + ], + "angle": 0, + "content": "[8] Ainaz Eftekhar, Alexander Sax, Jitendra Malik, and Amir Zamir. Omnidata: A scalable pipeline for making multi-task mid-level vision datasets from 3d scans. In ICCV, 2021. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.467, + 0.468, + 0.507 + ], + "angle": 0, + "content": "[9] Haoqiang Fan, Hao Su, and Leonidas J Guibas. A point set generation network for 3d object reconstruction from a single image. In CVPR, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.508, + 0.468, + 0.547 + ], + "angle": 0, + "content": "[10] Justin Johnson Georgia Gkioxari, Nikhila Ravi. Learning 3d object shape and layout without 3d supervision. CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.55, + 0.468, + 0.591 + ], + "angle": 0, + "content": "[11] Rohit Girdhar, David F Fouhey, Mikel Rodriguez, and Abhinav Gupta. Learning a predictable and generative vector representation for objects. In ECCV, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.593, + 0.468, + 0.619 + ], + "angle": 0, + "content": "[12] Georgia Gkioxari, Jitendra Malik, and Justin Johnson. Mesh r-cnn. In ICCV, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.621, + 0.468, + 0.66 + ], + "angle": 0, + "content": "[13] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.662, + 0.468, + 0.703 + ], + "angle": 0, + "content": "[14] Mariko Isogawa, Dorian Chan, Ye Yuan, Kris M. Kitani, and Matthew O'Toole. Efficient non-line-of-sight imaging from transient sinograms. In ECCV, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.468, + 0.731 + ], + "angle": 0, + "content": "[15] Hamid Izadinia, Qi Shan, and Steven M. Seitz. Im2cad. In CVPR, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.733, + 0.468, + 0.787 + ], + "angle": 0, + "content": "[16] Ziyu Jiang, Buyu Liu, Samuel Schulter, Zhangyang Wang, and Manmohan Chandraker. Peek-a-boo: Occlusion reasoning in indoor scenes with plane representations. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.468, + 0.829 + ], + "angle": 0, + "content": "[17] Linyi Jin, Shengyi Qian, Andrew Owens, and David F Fouhey. Planar surface reconstruction from sparse views. In ICCV, 2021. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.831, + 0.468, + 0.857 + ], + "angle": 0, + "content": "[18] Abhishek Kar, Christian Hane, and Jitendra Malik. Learning a multi-view stereo machine. NeurIPS, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.468, + 0.9 + ], + "angle": 0, + "content": "[19] Nilesh Kulkarni, Ishan Misra, Shubham Tulsiani, and Abhinav Gupta. 3d-relnet: Joint object and relational network for 3d prediction. In ICCV, 2019. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.471, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.133 + ], + "angle": 0, + "content": "[20] Nilesh Kulkarni, Justin Johnson, and David F Fouhey. Directed ray distance functions for 3d scene reconstruction. In ECCV, 2022. 1, 2, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.135, + 0.892, + 0.175 + ], + "angle": 0, + "content": "[21] Nilesh Kulkarni, Linyi Jin, Justin Johnson, and David F Fouhey. Learning to predict scene-level implicit 3d from posed rgbd data. In CVPR, 2023. 2, 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.178, + 0.892, + 0.23 + ], + "angle": 0, + "content": "[22] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In CVPR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.232, + 0.892, + 0.287 + ], + "angle": 0, + "content": "[23] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.288, + 0.892, + 0.34 + ], + "angle": 0, + "content": "[24] Zak Murez, Tarrence Van As, James Bartolozzi, Ayan Sinha, Vijay Badrinarayanan, and Andrew Rabinovich. Atlas: End-to-end 3d scene reconstruction from posed images. In ECCV, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.343, + 0.892, + 0.398 + ], + "angle": 0, + "content": "[25] Yinyu Nie, Xiaoguang Han, Shihui Guo, Yujuan Zheng, Jian Chang, and Jian Jun Zhang. Total3dunderstanding: Joint layout, object pose and mesh reconstruction for indoor scenes from a single image. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.4, + 0.892, + 0.453 + ], + "angle": 0, + "content": "[26] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In CVPR, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.455, + 0.892, + 0.482 + ], + "angle": 0, + "content": "[27] Philip Pritchett and Andrew Zisserman. Wide baseline stereo matching. In ICCV, 1998. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.484, + 0.892, + 0.523 + ], + "angle": 0, + "content": "[28] Shengyi Qian, Linyi Jin, and David F Fouhey. Associative3d: Volumetric reconstruction from sparse views. In ECCV, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.525, + 0.892, + 0.564 + ], + "angle": 0, + "content": "[29] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. ICCV, 2021. 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.567, + 0.892, + 0.62 + ], + "angle": 0, + "content": "[30] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. TPAMI, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.623, + 0.892, + 0.663 + ], + "angle": 0, + "content": "[31] Shunsuke Saito, Tomas Simon, Jason Saragih, and Hanbyul Joo. Pifuhd: Multi-level pixel-aligned implicit function for high-resolution 3d human digitization. In CVPR, 2020. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.665, + 0.892, + 0.76 + ], + "angle": 0, + "content": "[32] Mehdi S. M. Sajjadi, Henning Meyer, Etienne Pot, Urs Bergmann, Klaus Greff, Noha Radwan, Suhani Vora, Mario Lucic, Daniel Duckworth, Alexey Dosovitskiy, Jakob Uszkoreit, Thomas Funkhouser, and Andrea Tagliasacchi. Scene Representation Transformer: Geometry-Free Novel View Synthesis Through Set-Latent Scene Representations. CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.762, + 0.892, + 0.801 + ], + "angle": 0, + "content": "[33] Daniel Scharstein and Richard Szeliski. A taxonomy and evaluation of dense two-frame stereo correspondence algorithms. IJCV, 2002. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.804, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[34] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.831, + 0.892, + 0.859 + ], + "angle": 0, + "content": "[35] Jonathan Shade, Steven Gortler, Li-wei He, and Richard Szeliski. Layered depth images. In Siggraph, 1998. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[36] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. NeurIPS, 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.956 + ], + "angle": 0, + "content": "9750" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.134 + ], + "angle": 0, + "content": "[37] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. LoFTR: Detector-free local feature matching with transformers. CVPR, 2021. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.136, + 0.469, + 0.176 + ], + "angle": 0, + "content": "[38] Jiaming Sun, Yiming Xie, Linghao Chen, Xiaowei Zhou, and Hujun Bao. Neuralrecon: Real-time coherent 3d reconstruction from monocular video. In CVPR, 2021. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.179, + 0.469, + 0.233 + ], + "angle": 0, + "content": "[39] Bin Tan, Nan Xue, Tianfu Wu, and Gui-Song Xia. Nope-sac: Neural one-plane ransac for sparse-view planar 3d reconstruction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2023. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.235, + 0.469, + 0.303 + ], + "angle": 0, + "content": "[40] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. NeurIPS, 2020. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.305, + 0.469, + 0.358 + ], + "angle": 0, + "content": "[41] Shubham Tulsiani, Saurabh Gupta, David F Fouhey, Alexei A Efros, and Jitendra Malik. Factoring shape, pose, and layout from the 2d image of a 3d scene. In CVPR, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.362, + 0.469, + 0.429 + ], + "angle": 0, + "content": "[42] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul Srinivasan, Howard Zhou, Jonathan T. Barron, Ricardo MartinBrualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In CVPR, 2021. 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.433, + 0.469, + 0.485 + ], + "angle": 0, + "content": "[43] Chao-Yuan Wu, Justin Johnson, Jitendra Malik, Christoph Feichtenhofer, and Georgia Gkioxari. Multiview compressive coding for 3D reconstruction. CVPR, 2023. 1, 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.489, + 0.469, + 0.53 + ], + "angle": 0, + "content": "[44] Fei Xia, Amir R Zamir, Zhiyang He, Alexander Sax, Jitendra Malik, and Silvio Savarese. Gibson env: Real-world perception for embodied agents. In CVPR, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.532, + 0.469, + 0.585 + ], + "angle": 0, + "content": "[45] Yiming Xie, Matheus Gadelha, Fengting Yang, Xiaowei Zhou, and Huaizu Jiang. Planarrecon: Real-time 3d plane detection and reconstruction from posed monocular videos. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.589, + 0.469, + 0.629 + ], + "angle": 0, + "content": "[46] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.632, + 0.469, + 0.672 + ], + "angle": 0, + "content": "[47] Amir R. Zamir, Alexander Sax, William Shen, Leonidas J. Guibas, Jitendra Malik, and Silvio Savarese. Taskonomy: Disentangling task transfer learning. In CVPR, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.674, + 0.469, + 0.714 + ], + "angle": 0, + "content": "[48] Zhizhuo Zhou and Shubham Tulsiani. Sparsefusion: Distilling view-conditioned diffusion for 3d reconstruction. In CVPR, 2023. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.714 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.514, + 0.956 + ], + "angle": 0, + "content": "9751" + } + ] +] \ No newline at end of file diff --git a/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/9acb5370-2e99-4481-9b63-bbd93724edf4_origin.pdf b/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/9acb5370-2e99-4481-9b63-bbd93724edf4_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3cf0473e4c9a079666a113b5fca1f77952a70e68 --- /dev/null +++ b/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/9acb5370-2e99-4481-9b63-bbd93724edf4_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00d5139bef77e8661e3a9ebbe337e501d317fd033ed73ec949926f8e03d5ef67 +size 5998019 diff --git a/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/full.md b/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1330f5213e5e61161426c97a5ef3b1777354cf08 --- /dev/null +++ b/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/full.md @@ -0,0 +1,249 @@ +# 3DFIREs: Few Image 3D REconstruction for Scenes with Hidden Surfaces + +Linyi Jin $^{1}$ , Nilesh Kulkarni $^{1}$ , David F. Fouhey $^{2}$ $^{1}$ University of Michigan $^{1}$ , New York University $^{2}$ + +{jinlinyi,nileshk}@umich.edu,david.fouhey@nyu.edu + +# Abstract + +This paper introduces 3DFIRES, a novel system for scene-level 3D reconstruction from posed images. Designed to work with as few as one view, 3DFIRES reconstructs the complete geometry of unseen scenes, including hidden surfaces. With multiple view inputs, our method produces full reconstruction within all camera frustums. A key feature of our approach is the fusion of multi-view information at the feature level, enabling the production of coherent and comprehensive 3D reconstruction. We train our system on non-watertight scans from large-scale real scene dataset. We show it matches the efficacy of single-view reconstruction methods with only one input and surpasses existing techniques in both quantitative and qualitative measures for sparse-view 3D reconstruction. Project page: https://jinlinyi.github.io/3DFIRES/ + +# 1. Introduction + +Consider two views of the scene in Fig. 1. Part of the bedroom in View 1 is occluded by the wall, and so you may be uncertain what is behind it, although you might guess the wall continues. Now consider adding in View 2. You can see a bedside table, but little else. However, you can fuse these pieces together to create a consistent 3D sense of the scene viewed by the images, including both the visible and invisible parts. We use this sense when shopping for real estate or looking at a friend's photos. We estimate the structure of the scene from parts that are visible to all views; integrate information across images for parts that visible in one view but not others; and take educated guesses for completely occluded regions. Importantly, as the available data increases from one camera to a handful, we can seamlessly integrate the evidence across views. + +This task poses a challenge for current computer vision since it requires making judgments about visible and occluded 3D structures and integrating information across images with large pose change. These abilities are usually independently investigated in two separate strands of research. With single image reconstruction techniques [15, + +![](images/a20483cb3db04cf890d22acc12fd091c77b216b9e1dbfeae39b5dfc5f74dfec8.jpg) +Figure 1. Reconstructing 3D from sparsely posed images. Given a sparse set of posed image views, our method is able to reconstruct the full 3D of the scene. On the top, we show two sparse views of the scene in View 1 and View 2. On the bottom left is the 3D reconstruction from our network in the frustum of View 1. We show that our method can generate the occluded side table (zoom in). On the bottom right is the full reconstruction. We color occluded surfaces with surface normals. + +20, 26, 41, 43], one can predict both visible and occluded 3D structure from an image, but stacking such outputs from multiple images can produce inconsistent outputs. When handled independently, methods cannot identify the best view to reason about an occluded region. Non-line-of-sight imaging involves transmitting and receiving signals to reveal hidden scenes, incompatible with standard camera images [14]. Sparse view reconstruction methods [1, 17, 39] can create consistent reconstructions from two views; however, these approaches are limited to the visible parts of the scene that can decomposed into planes. Moreover, these methods are usually specialized to a particular number of images that can be accepted. + +Recently, there has been considerable progress in generalized radiance fields, which produce full 3D representations. This occupancy representation and per-scene optimization has shown promising results by optimizing for + +novel view synthesis on single scenes from posed images sets [7, 23, 36, 40]. Extending this line of work, methods like [32, 46] have shown an ability to predict novel views for unseen scenes from a few images. However, since these methods optimize for perceptual quality, the underlying geometry often has artifacts. Like them we also require one or more image views at input, but instead we predict an implicit function [20] that can reliably reconstruct both visible and occluded parts of previously unseen scenes. + +We propose 3DFIREs, Few Image 3D-REconstruction of Scenes, which integrates information from a variable number of images to produce a full reconstruction of the scene. 3DFIREs integrates information in the features space across a varying number of images, enabling it to identify how to best use the available image data to produce an accurate reconstruction at a point. As output, 3DFIREs produces a pixel-aligned implicit field based on a generalization of the Directed Ray Distance Function [20, 21], which enables high quality reconstructions. Thanks to integration in feature space, the results are more consistent than handling images independently: this is what enables reconstructing the bed-side table in Fig. 1, even though it is hidden by the wall in one image. We found and document several design decisions in terms of training and network architecture needed to produce these results. + +We evaluate our method on complex interior scenes from Omnidata [8, 33] dataset collected with a real scanner. We compare 3DFIRES with the point-space fusion of state-of-the-art methods for scene-level full 3D reconstruction methods from a single image [21, 43]. Our experiments show several key results. First, 3DFIRES produces more accurate results compared to existing works. The improvements are larger in hidden regions, and especially substantial when measuring consistency of prediction from multiple views. Second, ablative analysis reveals the key design decisions responsible for 3DFIRES's success. Third, 3DFIRES can generalize to variable views: we train on 1, 2, and 3 views and generalize to 5 views. Finally, 3DFIRES can reconstruct when given LoFTR [37] estimated poses with known translation scale. + +# 2. Related Works + +We aim to produce a coherent 3D scene reconstruction given a single or a few images with wide baselines. + +3D from Single Image. Predicting a complete 3D scene from a single image is inherently ambiguous. Recently different 3D representations have been proposed to reconstruct complete 3D scenes (including occluded surfaces) such as layered depth [35], voxels [3, 11, 19, 41], planes [16], point-clouds [9, 43], meshes [10, 12, 25], or implicit representation for objects [22, 26] and scenes [2, 4, 20, 21, 36]. While they have strong performance on single image, they do not necessarily produce coherent results when required to infer + +on multiple images of the same scene [21]. Our method can reconstruct hidden geometry from at least a single image using implicit representation from [20]. Instead of naively fusing point clouds from different images, we fuse features when predicting a multi-view consistent point cloud with few input images. + +3D from dense views. Traditional multi-view 3D reconstruction methods can produce accurate and coherent point clouds from pixel correspondences [33]. Classical methods in computer vision use approaches like Multi-view stereo (MVS) to construct only visible parts of the scene in all the images. There is a long line of work in trying to reconstruct scenes from video sequences [6, 34] where they reconstruct visible scenes and camera poses. Learning-based methods for MVS estimate geometry for scenes [18, 24, 38, 45] also require an input video to explicitly predict scene geometry. Instead of requiring high overlap inputs such as video frames, our method works on wide-baseline images. + +3D from sparse view inputs. Our approach operates in a multi-view setting with a sparse set of views. We have a similar setting as wide-baseline reconstruction [27]. Associative3D [28] reconstructs the whole scene but requires voxelized scenes to train, our method works on non-watertight scene data. Prior work also explores planar representation [1, 17, 39] for coherent 3D surfaces in non-watertight scenes. They use feed-forward networks to predict visible 3D surfaces for each view and merge them using predicted correspondences. Our approach leverages an implicit representation that accommodates non-watertight data, enabling the reconstruction of both visible and occluded surfaces. We fuse deep features from multiple views to predict DRDF representation from Kulkarni et al. [20], producing a coherent reconstruction. + +Novel view synthesis. NeRF [23] and its extensions [42, 46, 48] optimizes per-scene radiance fields for novel-view synthesis, this requires many views and test-time optimization. Due to its occupancy-based representation, extracting geometry often requires thresholding the density function, which leads to cloudy geometry with sparse input views. Our method directly predicts geometry from unseen images without the need for test-time optimization. PixelNerf [46] or SRT [32] can generalize to new scenes but their objectives optimize for photometric losses. + +# 3. Method + +Our goal is to predict an accurate and consistent 3D reconstruction from one or more sparsely spaced camera views and known poses. With one image, the method should predict all surfaces in the camera frustum, including visible and occluded regions. With more images, the method should predict the surfaces in the union of the frustum. + +We tackle this problem with 3DFIREs, a simple and effective approach designed for this setting. We first discuss + +![](images/6922e9ca25b1f1133670f996a2a19b275807d4d562e4c390adbe72a1217f3c0f.jpg) +Figure 2. (a) Architecture for single view DRDF [20]. Given an image and a query pixel location, it predicts DRDF along the ray from the query pixel. (b) we extend (a) to work on sparse views. Middle: Given N images, a query point $\mathbf{x}$ , and a query direction $\vec{\mathbf{r}}_q$ , we aggregate features from multiple images and output DRDF along the query ray. Right: We show detailed network architecture of 3DFIREs which consists of a Query Encoder and a DRDF Predictor. + +![](images/7684a2539dcb412f20ded22585efde547b7d9c4373bdbbfa8239a389a4a81897.jpg) + +![](images/b2c5411d94639627566952e36b10c4ac43aaf8bc93e09d0e1f363a6f63494efa.jpg) +Figure 3. Predictions in the blue camera frustum. Occluded surfaces are colored with surface normals. A single image to 3D method like DRDF [20] is unable to reconstruct the parts of the scene behind the wall with certainty and hence erroneously adds a full wall in front of the hallway (red box). 3DFIREs which fuses features from multiple views (Green and Purple camera in Fig. 2) predicts empty space for the entrance (black box). + +tackling scene reconstruction in a single image case in §3.1 using the Directed Ray Distance Function (DRDF) [20] and scale this approach to multiple image views in §3.2. In §3.3, we show how we can operationalize our multi-view reconstruction goal with an attention-based model architecture. + +# 3.1. Background Single View Reconstruction + +We begin by revisiting the DRDF formulation for a single image reconstruction. Consider a single image $\mathcal{I}$ , a single + +view implicit reconstruction method aims to produce the full 3D reconstruction for the scene from this image. At inference, when conditioned on image features, the method outputs a distance function for a pre-defined set of 3D points in the camera frustum. It then decodes this predicted distance function to a surface to recover the 3D geometry of the scene. For instance, if the predicted 3D distance function is an unsigned distance function [2], the points on the surface are with distances close to zero. + +Kulkarni et al. [20] solve the single image 3D reconstruction with the DRDF function and show that using the DRDF outperforms the standard unsigned distance function. The DRDF is a ray-based distance function measuring the distance of a point $\mathbf{x}$ to the nearest intersection with a surface along a ray $\vec{\mathbf{r}}$ . In [20], the ray on which distances are measured is the ray from the camera center $\mathbf{c}$ to $\mathbf{x}$ . + +Fig. 2 (a) shows the DRDF for one such ray. Now, any 3D point $\mathbf{x}$ can be represented as its distance towards the camera times a unit ray direction, or $z\vec{\mathbf{r}}$ , where $z \in \mathbb{R}$ and $\vec{\mathbf{r}} = \mathrm{norm}(\mathbf{x} - \mathbf{c})$ where $\mathrm{norm}(\mathbf{p}) = \mathbf{p} / ||\mathbf{p}||$ . The DRDF, $d_{\mathrm{DR}}(z\vec{\mathbf{r}})$ , furthermore includes a sign that determines for the point the direction along the ray towards the nearest intersection (i.e., forwards or backwards). Therefore $(z + d_{\mathrm{DR}}(z\vec{\mathbf{r}}))\vec{\mathbf{r}}$ corresponds to a point on the surface. + +The DRDF can be used to create a system that infers single image 3D by pairing the distance function at a point $\mathbf{x}$ with pixel-aligned features. At inference time, as shown in Fig. 2 (a), given a point $\mathbf{x}$ in the camera frustum we can extract corresponding pixel-aligned image features using an + +image backbone $\mathrm{BB}[\pi (\mathbf{x})]$ , and use an MLP to predict the DRDF value corresponding to the point $\mathbf{x}$ along the $\vec{\mathbf{r}}$ . Since DRDF is a ray-based function, its value only depends on the intersections along the ray. For any ray corresponding to a pixel on the image, the prediction of DRDF for the point depends on the image features, and the location of the point on the ray. This parameterization allows DRDF to learn sharp 3D reconstructions of the scene from a single RGB image. At training time, we train a model to predict the DRDF by supervising it with the ground-truth DRDF values computed using the mesh geometry. + +# 3.2. Extending DRDFs to Multiple Views + +Now, with multiple views we have: N images $\{\mathcal{I}_i\}_{i = 1}^{\mathrm{N}}$ relative camera transforms $\{\pi_i\}_{i = 1}^{\mathrm{N}}$ , and corresponding camera centers $\{\mathbf{c}_i\}_{i = 1}^{\mathrm{N}}$ , our goal is to reconstruct the 3D of the full scene. While the task could perhaps be accomplished by simply predicting individual 3D for each camera, and assembling them together. Our insight is that if the camera frustums have considerable overlap, for overlapping regions we can achieve a better and more consistent reconstruction by allowing the network to reason about which camera provides the best view for each point. This can be achieved by allowing the network to fuse features across cameras for the points in feature space rather than by concatenating in point space. We propose to improve the feature quality of any point $\mathbf{x}$ by fusing the features from multiple cameras. Since we are now dealing with the multi-view settings, a multi-view DRDF formulation is necessary to allow us to predict the DRDF value along each of the query rays, $\vec{\mathbf{r}}_q$ originating from the respective camera centers. + +In the case of multiple views, the image feature corresponding to a point $\mathbf{x}$ should be a fusion of features $\{\mathbf{f}_{\theta}[\pi_i(\mathbf{x})]\}_{i = 1}^{\mathrm{N}}$ . The feature should support predicting the N DRDF values along all the camera directions as $\{d_{\mathrm{DR}}(z_i\vec{\mathbf{r}}_i)\}_{i = 1}^{\mathrm{N}}$ . The intuition of our key idea is that multiple-image views provide more information about the 3D scene and hence potentially better features. We can learn these better features by fusing features to predict a consistent output. This requires a novel architecture that attends to features and rays, $\{\vec{\mathbf{r}}_i\}_{i = 1}^{\mathrm{N}}$ , originating from all the available image views. Under this formulation single view DRDF is a special case of our formulation where $\mathbf{N}$ is 1. + +# 3.3. Network Architecture + +Towards the goal of predicting DRDFs along multiple query rays $\vec{\mathbf{r}}_q\in \{\vec{\mathbf{r}}_i\}_{i = 1}^{\mathrm{N}}$ , we present a simple and effective network 3DFIREs that accomplishes this task. 3DFIREs consists of three modules: The first module is a Backbone Feature Extractor that obtains pixel-aligned appearance features; by projecting the query point $\mathbf{x}$ onto the camera, we can obtain a per-point and per-camera appearance feature as in [20, 23, 31, 42, 46]. Since the appearance feature is + +per-image, the model must learn to aggregate information across cameras. This is done with our second component Query Encoder that provides geometric information for aggregating appearance features. Specifically, the query encoder uses the information about the relative positions of query point $\mathbf{x}$ and query direction $\vec{\mathbf{r}}_q$ w.r.t. cameras $\{\pi_i\}_{i=1}^N$ . The final module is the DRDF Predictor that takes appearance and query features to produce a DRDF value along the query direction $\vec{\mathbf{r}}_q$ by incorporating the appearance features (evidence for geometry) and query encoder features (evidence that relates different features). Fig. 3 shows an example on how integrating information across multiple views leads to better prediction for occluded parts of the scene. + +Backbone Feature Extractor. Our backbone features extractor $\mathrm{BB}(\cdot)$ aims to create appearance features from an image. It accepts an image $\mathcal{I}_i\in \mathbb{R}^{H\times W\times 3}$ and produces a grid of D-dimensional features $\mathbf{F}_i\in \mathbb{R}^{H'\times W'\times D_{\mathrm{img}}}$ . We use a pre-trained depth estimating vision transformer [29]. Feature extraction for each image proceeds independently using the same network. With extracted per-camera backbone features, $\mathbf{f}_i$ , for point $\mathbf{x}$ by interpolating features in $\{\mathbf{F}_i\}_{i = 1}^{\mathrm{N}}$ at the projection $\{\pi_i(\mathbf{x})\}_{i = 1}^{\mathrm{N}}$ correspondingly. + +Query Encoder. Our query encoder $q(\cdot)$ aims to enable a predictor to decide how to aggregate information across images. As input, the encoder takes a query 3D point $\mathbf{x}$ and a query direction $\vec{\mathbf{r}}_q$ . It additionally considers the backbone features, camera centers $\{\mathbf{c}_i\}_{i=1}^N$ and transforms $\{\pi_i\}_{i=1}^N$ . Our query encoding is the concatenation of: (i) the relative viewing direction in camera $i$ 's space $\Delta \vec{\mathbf{r}}_i(\vec{\mathbf{r}}_q) = [\vec{\mathbf{r}}_q - \mathrm{norm}(\mathbf{x} - \mathbf{c}_i), \vec{\mathbf{r}}_q \cdot \mathrm{norm}(\mathbf{x} - \mathbf{c}_i)] \in \mathbb{R}^4$ ; and (ii) the normalized device coordinates (NDC), coordinates of point $\mathbf{x}$ in the camera frame $\mathrm{ndc}_i(\mathbf{x}) \in \mathbb{R}^3$ . Intuitively this query representation, $\mathbf{q}_i = \{\Delta \vec{\mathbf{r}}_i, \mathrm{ndc}_i(\mathbf{x})\} \in \mathbb{R}^7$ enables reasoning such as: information about surfaces near $\mathbf{x}$ in direction $\vec{\mathbf{r}}_q$ is likely not visible in camera $i$ due to either angle or distance, so this feature ought to be weighted low. The ray query vector is encoded in a positional encoding layer [40] with output dimension $D_{\mathrm{query}}$ . + +DRDF Predictor. For a query ray and point tuple, $\{\vec{\mathbf{r}}_q,\mathbf{x}\}$ , this model considers the image features $\{\mathbf{f}_i\}_{i = 1}^{\mathrm{N}}$ , and query features $\{\mathbf{q}_i\}_{i = 1}^N$ yielding a joint camera specific feature, $\{\mathbf{f}_i,\mathbf{q}_i\}_{i = 1}^{\mathrm{N}}$ , of dimension $D_{\mathrm{img}} + D_{\mathrm{query}}$ . Our self-attention attends over all these features to produce a weight $w_{i}$ per feature. We aggregate the features using this weight to produce a fused feature for the point $\mathbf{x}$ . We then use the fused feature to predict a DRDF value between $[-1,1]$ with the help of an MLP. This is akin to selecting cameras that are likely to contain the geometry information about the ray point tuple and predicting the geometry information. + +# 3.4. Training 3DFIREs + +The effectiveness of 3DFIREs is improved by getting details right during training. One observation is that sampling + +points near intersections gives improvements over uniform sampling because the scene-level space is predominantly empty. By increasing the density of sampled points near surface, the network can better learn the scene structure. We sample points along the ray as per a Gaussian distribution centered at the intersection. Prior work [42] involves applying ray attention which allows for samples along a ray to attend with each other before the final prediction. This has been shown to be effective. However, combining ray attention with Gaussian sampling during training enables the network to 'cheat'. Ray Attention exploits a train-time shortcut (query point density) to infer intersections. At inference as point density is uniform and this shortcut fails. Empirically we find Gaussian sampling alone to be more effective than ray attention. + +# 3.5. Implementation Details + +Training. Our image feature backbone is vision transformer [29] dpt_beit_large_384 pretrained by MiDaS [30]. We use $\ell_{1}$ loss on log-space truncated DRDF [5, 20, 38]. During training, we randomly sample 1, 2, 3 views with 80 rays per image and 512 points along each ray. Our method is trained for 300K iteration on NVIDIA A100 GPU with batch size of 1. More details in supp. + +Inference. Given N images, we extract backbone features for each image. We generate $n_{\mathrm{ray}} = 128 \times 128$ query rays from each camera. Along each ray, we sample $n_{\mathrm{pt}} = 256$ points that have uniformly spaced depth from 0 to 8m. In total, we get $\mathbf{N} \times n_{\mathrm{ray}} \times n_{\mathrm{pt}}$ query pairs $\{\mathbf{x}, \vec{\mathbf{r}}_q\}$ , which are fed to 3DFIREs in parallel to get DRDF value. We calculate positive-to-negative zero-crossings along each ray [20] to get a 3D point and aggregate the results. + +# 4. Experiment + +In this section, we present the experimental framework for 3DFIREs, our system designed to reconstruct full scene geometry from wide-baseline, sparse images. Considering the novelty of our problem, there is no prior work that does this exact setting. To address this, we curated a dataset and developed testing metrics specifically tailored to the problem's requirements. We conduct comprehensive evaluations of 3DFIREs using real scene images, comparing its performance against alternative methods in the field. + +# 4.1. Dataset + +Following [21], we use the dataset from the Gibson database [44], which contains real images of complex and diverse scenes such as multi-floor villas and expansive warehouses. The scale of the assets in the dataset presents challenging reconstruction problem, which is desirable for evaluating the ability to recover occluded surfaces. We use the images sampled by Omnidata [8] for a diverse set of + +camera poses from the Taskonomy [47] Medium subset, including 98/20/20 training/validation/test buildings. Since our multiview setting is different from the single-view setting of [21], the precise samples are different. Our setting is also similar to [17, 39] in that images have wide baselines (median $2.8\mathrm{m}$ translation, $63.9^{\circ}$ rotation), unlike methods using video frames [38] where images have high overlap. Our approach diverges from [17, 39] in also reconstructing occluded regions and using real (not rendered) images. + +To curate our image sets, we use a sampling process like [17]. For a set of $k$ images, after picking an image at random, each new image is selected to have at most $70\%$ overlap with any existing image in the set, and at least $30\%$ overlap with at least one other image in the set. The process balances diversity and coherence in the viewpoints. We crop images to a fixed field of view. We collect 3781 training sets among $\geq 10\mathrm{K}$ images. We also sample 300 sets of 3-view images and 100 sets of 5-view images for evaluation from the held-out test scenes. See the supplementary for dataset generation details. The 3 view and 5 view test set contain considerable occluded 3D geometry (41.9% and 43.7% respectively). + +# 4.2. Baselines + +To the best of our knowledge, no prior work reconstructs occluded regions from sparse-view images at scene scale. We thus create strong baselines from existing methods that handle parts of our setting. Each method is the strongest in its line of work. + +For instance, the visible surface upper-bound includes all methods that reconstruct visible surfaces from sparse views [17, 38, 39]. The DRDF method [20, 21] has been shown to be more effective for scene-level 3D reconstruction compared to many other implicit functions like density [46], occupancy [31], unsigned distance functions on scenes and rays [2]. MCC [43] is likewise SOTA for point cloud completion. + +Depth Only [8, 29] Prior state-of-the-art works on sparse scene reconstruction [38, 39] predict visible surfaces from multiple views, but cannot recover hidden surfaces. To show the near-oracle reconstruction of visible surfaces, we use MiDaS [29] depth model trained on Omnidata [8] with ground-truth scale and shift. This baseline is an upper bound on the performance of methods like [1, 17, 38, 39]. + +Multiview Compressive Coding (MCC) [43] This method predicts occupancy probability from RGB-D partial point clouds. MCC works on scene-level reconstructions including non-watertight meshes. We train MCC on the same training set as ours. This method requires depth as input and at inference we provide it with ground truth depth. Since MCC only works on a single point cloud, to produce predictions from multiple images, we infer each image independently and aggregate the predicted point cloud in point + +![](images/b8fa1bdef0a8b67e46cfe64e1e74cc419b2dcba4659f14e08d063451662d07f4.jpg) +Figure 4. Comparison between different methods on held-out test scene. Occluded surfaces are colored with the computed surface normals. "Depth only" leaves holes with sparse input views, e.g. absent floors and walls. Occupancy-based method MCC [43] produces cloudy results, failing to get the details like pillow, tables. Concatenation of single view DRDF (SV-DRDF) [20] produces inconsistent results, e.g. missing wall in row 2, the double wall in row 3. Our method produces more consistent predictions across different views and also recovers the hidden surface, resulting in a complete mesh. We urge the reader to see results provided in the supplementary videos. + +# cloud space. + +Single-view DRDF (SV-DRDF) [20] This method reconstructs both visible and hidden surfaces from a single input image. We use this baseline to show the benefit of our proposed multi-view feature aggregation. For a fair comparison, we upgrade the original backbone from ResNet34 [13] to the same BEiT [29] and use the same training strategy such as Gaussian sampling of points. Both improve results. Since this baseline only supports single image reconstruction, we produce predictions independently from each input image and aggregate all the point clouds. + +# 4.3. Evaluation Metrics + +We use two metrics to evaluate our system. + +Scene F score. Following [20, 43], we compute the scene accuracy (fraction of predicted points within $\rho$ of a ground truth point), completeness (fraction of ground truth points + +within $\rho$ from a predicted point), and their F-score (F1). This gives an overall summary of scene-level reconstruction. We classify the scene into (1) visible: points that are visible from any one of the input views; and (2) hidden: points that are hidden from all of the input views. Due to the space limit, we only show F-score at $\rho = 0.2$ . A full table with accuracy, completeness, F-score at different $\rho$ is in the supp. Trends are the same across values of $\rho$ and there is no significant accuracy/completeness imbalance for the baselines (MCC, SV-DRDF). + +Multiview consistency. Only measuring the F-score does not measure the consistency of 3D reconstruction when generating results from multiple views. Doubled predictions of surfaces do not change the Scene F score results if they are within $\rho$ . Prior work [17] used a detection-based method that penalized double surfaces on planar predictions, but their metric is not applicable since it requires pla + +![](images/9f9f10e95b7c42e73884dfe5ad593ab61c797a2c67aaa682658bd8ecfe41c92f.jpg) + +![](images/fee9a0347d0b3db85e0055b44cf9950c593c14cde05c3630f7c4098d987aa385.jpg) + +![](images/ae42aa271bbaedd6cef334145f3a6bee8be5040659c7285ca8b27397cea3c7e8.jpg) +Figure 5. Qualitative results on held-out test scenes. Top row: Reconstruction from 3 images and compared with ground truth. Our method can reconstruct a complete scene structure within all the camera frustums, including the occluded surfaces. Bottom row: Predictions from 5 input images compared with ground truth. For the 2nd and 3rd examples, ceilings are removed to reveal the details of the scene. + +lar instances. We require a metric that can measure the consistency of 3D reconstruction of points in individual frustumums. Specifically, we would like to ensure that points $\mathbf{P}_i$ generated from all query rays originating from $\mathbf{c}_i$ of $\pi_i$ are consistent with points, $\mathbf{P}_j$ , generated from by ray queries from $\mathbf{c}_j$ of $\pi_j$ at the intersection of frustumums of both the cameras. For every point, $\mathbf{p} \in \mathbf{P}_j$ and within the field of view of camera $i$ , we compute their minimum distance to points in $\mathbf{P}_i$ . Our metric measures percent of points in the set $\mathbf{P}_j$ that have minimum distance within the threshold of $\rho$ . We evaluate this metric bidirectionally to ensure complete results. + +# 4.4. Results + +Qualitative Results. Fig. 3 shows reconstruction from using query rays from the blue camera in Fig. 2. Occluded surfaces are colored with surface normals. DRDF [20] is unable to reconstruct the parts of the scene behind the wall with certainty and erroneously adds a full wall in front of the hallway. 3DFIREs fuses features from multiple images (Green and Purple camera in Fig. 2) accurately predicts the empty space. + +Fig. 4 shows results unseen test scenes, and compares reconstruction of baselines. Red box crop show highlighted differences and provide a zoomed-in view for detailed examination. Depth only (MiDaS with ground truth scale and shift) reconstructs only visible regions this leaves holes such + +as the missing surfaces behind chairs in Row 1; and absent floor sections in Row 4. MCC [43] tends to produce cloudy volumes and misses details like pillows and tables. Single-view DRDF (SV-DRDF) produces occluded regions and sharp surfaces but lacks consistency when aggregating results from multiple views. This is noticeable in its inability to reconstruct the occluded wall in Row 2, the creation of a doubled ceiling in Row 3 due to occlusions. 3DFIREs, effectively merges observations from multiple images, resulting in sharp and accurate reconstructions of both visible and hidden surfaces. By fusing information across views in the feature space, our method overcomes the limitations of other approaches. This ensures comprehensive and consistent scene-level reconstruction from few sparse views. + +In Fig. 5 we show additional alongside the ground truth. 3DFIREs successfully reconstructs large occluded areas, floors hidden by foreground objects (colored in pink), and unseen sides of objects such as the back of chairs in the first example and the kitchen islands in the second example. The reconstruction from multiple views demonstrates consistency and coherent surfaces in overlapping regions. + +While our method is trained with up to three views, it seamlessly extends to five views. This adaptability stems from our architecture's inherent flexibility to the number of input views. With increasing views it predicts clean and coherent reconstructions within all the camera frustums. + +Quantitative Results. We evaluate our method on sets of 1, + +Table 1. Quantitative results on Scene F-score ( $\rho = 0.2$ ) for Hidden points, Visible points, All points. For 3 and 5 views, we evaluate Consistency. Depth only: visible surface upperbound is separated to indicate it has oracle information. Despite accurate reconstructions on visible surfaces, these lines of work cannot recover hidden surfaces, causing low overall performance. With 1 view, 3DFIREs is comparable to single view DRDF. With more views, 3DFIREs outperforms all the other baselines in F-score. There is large improvement in consistency metric compared to single view DRDF, showing that aggregating features produces a more coherent reconstruction. Full tables showing accuracy and completeness are in the supplemental. + +
1 view3 views5 views
Hidden ↑Visible ↑All ↑Hidden ↑Visible ↑All ↑Consistency ↑Hidden ↑Visible ↑All ↑Consistency ↑
Depth only-85.3160.12-87.8463.9072.79-91.2969.4072.57
MCC40.2756.4050.2542.9162.0254.7870.2038.5164.4455.9466.57
SV-DRDF53.3673.4565.2148.0276.1965.6176.4447.5181.3170.5478.13
3DFIRES53.3474.2965.7149.9976.7466.5685.4849.5281.7471.4185.92
+ +Table 2. Ablation study on training strategies. GS: Gaussian sampling near intersection along the ray during training. Ray Attn: points along a query ray attend to each other. + +
HiddenVisibleAllConsistency
-GS43.0777.0564.8183.45
+Ray Attn. -GS47.0977.6065.5883.27
+Ray Attn. +GS14.853.3613.2933.56
Ours50.2077.3066.4685.45
+ +Table 3. Quantitative results on noisy camera poses generated by LoFTR, evaluated on 3 view cases at $\rho = 0.2$ . 3DFIREs assumes accurate pixel-aligned features but still produces more consistent reconstructions compared to not aggregating features. + +
3-ViewHiddenVisibleAllConsistency
SV-DRDF37.3962.7152.9357.65
Ours38.8562.4053.1965.71
+ +3, 5 views respectively, as detailed in Tab. 1. Our approach, designed for flexible input views, matches prior works in single-view scene reconstruction and achieves state-of-the-art results with multiple input views. In single-image cases, it is comparable to the single-view DRDF baseline. + +For 3-view sets, our method outperforms MCC [43] or DRDF [21]. Although MiDaS with ground truth scale and shift demonstrates optimal visible surface reconstruction, it falls short in overall scene reconstruction because of no reconstruction on occluded surfaces. When evaluated on scene consistency, 3DFIREs shows a large absolute improvement of $>9\%$ , over the second-best baseline, showing 3DFIREs's ability to aggregate features across views to produce consistent results. + +The trend persists with 5-view inputs, where our method has the highest F score and consistency. Our method is not trained on 5-views subset but still remains robust to more input views enhancing the reconstruction quality in both visible and hidden surface reconstructions. + +# 4.5. Ablations and Analysis + +Ablation study on training strategy. We conduct an ab + +lation study (Tab. 2) to investigate the effectiveness of different training strategies for our method. Without Gaussian sampling or ray attention (-GS), the method has degraded performance $(-7\%$ in hidden F score). With ray attention only (+Ray Attn. -GS), the method is able to better reconstruct the hidden surface but is still worse than ours $(-3\%)$ . With both ray attention and Gaussian sampling (+Ray Attn. +GS), the network finds shortcut during training and does not work during testing. With Gaussian sampling strategy, our method performs the best. + +Robustness with noisy camera poses. Our method requires accurate camera poses to aggregate pixel-aligned features. This setting is challenging with sparse view data since camera estimation can be noisy. We test if the misalignment of image features caused by noisy camera projection matrices degrades our system. We use LoFTR [37] to estimate the camera rotation and translation angle and evaluate the reconstruction within all the camera frustumums. Since LoFTR does not provide a translation scale, we use ground truth instead. Tab. 3 shows results on 3-view cases. Our method still has significantly higher consistency over single view DRDF baseline. We provide an analysis with synthetic Gaussian camera noise in the supplementary. + +# 5. Conclusions + +We present 3DFIREs, a scene-level 3D reconstruction method that requires only one or a few posed images of a scene. Our method takes in an arbitrary number of input views, fuses multi-view information in the features space and predicts DRDF given a 3D point and query direction. We train our method on a large-scale scene dataset and show its strong ability to reconstruct both visible and hidden surfaces coherently within all the camera frustums on challenging wide-baseline images. Currently, our methods require pose input from off-the-shelf estimation methods, solving for 3D reconstruction and adapting the poses is a challenging next step and left to future work. + +Acknowledgments. Thanks to Mohamed Banani, Richard Higgins, Ziyang Chen for their helpful feedback. Thanks to UM ARC for computing support. Toyota Research Institute provided funds to support this work. + +# References + +[1] Samir Agarwala, Linyi Jin, Chris Rockwell, and David F Fouhey. Planeformers: From sparse view planes to 3d reconstruction. In ECCV, 2022. 1, 2, 5 +[2] Julian Chibane, Aymen Mir, and Gerard Pons-Moll. Neural unsigned distance fields for implicit function learning. In NeurIPS, 2020. 2, 3, 5 +[3] Christopher B Choy, Danfei Xu, JunYoung Gwak, Kevin Chen, and Silvio Savarese. 3d-r2n2: A unified approach for single and multi-view 3d object reconstruction. In ECCV, 2016. 2 +[4] Manuel Dahnert, Ji Hou, Matthias Nießner, and Angela Dai. Panoptic 3d scene reconstruction from a single rgb image. NeurIPS, 2021. 2 +[5] Angela Dai, Christian Diller, and Matthias Nießner. Sg-nn: Sparse generative neural networks for self-supervised scene completion of rgb-d scans. In CVPR, 2020. 5 +[6] Andrew J Davison, Ian D Reid, Nicholas D Molton, and Olivier Stasse. Monoslam: Real-time single camera slam. TPAMI, 2007. 2 +[7] Kangle Deng, Andrew Liu, Jun-Yan Zhu, and Deva Ramanan. Depth-supervised nef: Fewer views and faster training for free. In CVPR, 2022. 2 +[8] Ainaz Eftekhar, Alexander Sax, Jitendra Malik, and Amir Zamir. Omnidata: A scalable pipeline for making multi-task mid-level vision datasets from 3d scans. In ICCV, 2021. 2, 5 +[9] Haoqiang Fan, Hao Su, and Leonidas J Guibas. A point set generation network for 3d object reconstruction from a single image. In CVPR, 2017. 2 +[10] Justin Johnson Georgia Gkioxari, Nikhila Ravi. Learning 3d object shape and layout without 3d supervision. CVPR, 2022. 2 +[11] Rohit Girdhar, David F Fouhey, Mikel Rodriguez, and Abhinav Gupta. Learning a predictable and generative vector representation for objects. In ECCV, 2016. 2 +[12] Georgia Gkioxari, Jitendra Malik, and Justin Johnson. Mesh r-cnn. In ICCV, 2019. 2 +[13] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 6 +[14] Mariko Isogawa, Dorian Chan, Ye Yuan, Kris M. Kitani, and Matthew O'Toole. Efficient non-line-of-sight imaging from transient sinograms. In ECCV, 2020. 1 +[15] Hamid Izadinia, Qi Shan, and Steven M. Seitz. Im2cad. In CVPR, 2017. 1 +[16] Ziyu Jiang, Buyu Liu, Samuel Schulter, Zhangyang Wang, and Manmohan Chandraker. Peek-a-boo: Occlusion reasoning in indoor scenes with plane representations. In CVPR, 2020. 2 +[17] Linyi Jin, Shengyi Qian, Andrew Owens, and David F Fouhey. Planar surface reconstruction from sparse views. In ICCV, 2021. 1, 2, 5, 6 +[18] Abhishek Kar, Christian Hane, and Jitendra Malik. Learning a multi-view stereo machine. NeurIPS, 2017. 2 +[19] Nilesh Kulkarni, Ishan Misra, Shubham Tulsiani, and Abhinav Gupta. 3d-relnet: Joint object and relational network for 3d prediction. In ICCV, 2019. 2 + +[20] Nilesh Kulkarni, Justin Johnson, and David F Fouhey. Directed ray distance functions for 3d scene reconstruction. In ECCV, 2022. 1, 2, 3, 4, 5, 6, 7 +[21] Nilesh Kulkarni, Linyi Jin, Justin Johnson, and David F Fouhey. Learning to predict scene-level implicit 3d from posed rgbd data. In CVPR, 2023. 2, 5, 8 +[22] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In CVPR, 2019. 2 +[23] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 2, 4 +[24] Zak Murez, Tarrence Van As, James Bartolozzi, Ayan Sinha, Vijay Badrinarayanan, and Andrew Rabinovich. Atlas: End-to-end 3d scene reconstruction from posed images. In ECCV, 2020. 2 +[25] Yinyu Nie, Xiaoguang Han, Shihui Guo, Yujuan Zheng, Jian Chang, and Jian Jun Zhang. Total3dunderstanding: Joint layout, object pose and mesh reconstruction for indoor scenes from a single image. In CVPR, 2020. 2 +[26] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In CVPR, 2019. 1, 2 +[27] Philip Pritchett and Andrew Zisserman. Wide baseline stereo matching. In ICCV, 1998. 2 +[28] Shengyi Qian, Linyi Jin, and David F Fouhey. Associative3d: Volumetric reconstruction from sparse views. In ECCV, 2020. 2 +[29] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. ICCV, 2021. 4, 5, 6 +[30] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. TPAMI, 2022. 5 +[31] Shunsuke Saito, Tomas Simon, Jason Saragih, and Hanbyul Joo. Pifuhd: Multi-level pixel-aligned implicit function for high-resolution 3d human digitization. In CVPR, 2020. 4, 5 +[32] Mehdi S. M. Sajjadi, Henning Meyer, Etienne Pot, Urs Bergmann, Klaus Greff, Noha Radwan, Suhani Vora, Mario Lucic, Daniel Duckworth, Alexey Dosovitskiy, Jakob Uszkoreit, Thomas Funkhouser, and Andrea Tagliasacchi. Scene Representation Transformer: Geometry-Free Novel View Synthesis Through Set-Latent Scene Representations. CVPR, 2022. 2 +[33] Daniel Scharstein and Richard Szeliski. A taxonomy and evaluation of dense two-frame stereo correspondence algorithms. IJCV, 2002. 2 +[34] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 2 +[35] Jonathan Shade, Steven Gortler, Li-wei He, and Richard Szeliski. Layered depth images. In Siggraph, 1998. 2 +[36] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. NeurIPS, 2020. 2 + +[37] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. LoFTR: Detector-free local feature matching with transformers. CVPR, 2021. 2, 8 +[38] Jiaming Sun, Yiming Xie, Linghao Chen, Xiaowei Zhou, and Hujun Bao. Neuralrecon: Real-time coherent 3d reconstruction from monocular video. In CVPR, 2021. 2, 5 +[39] Bin Tan, Nan Xue, Tianfu Wu, and Gui-Song Xia. Nope-sac: Neural one-plane ransac for sparse-view planar 3d reconstruction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2023. 1, 2, 5 +[40] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. NeurIPS, 2020. 2, 4 +[41] Shubham Tulsiani, Saurabh Gupta, David F Fouhey, Alexei A Efros, and Jitendra Malik. Factoring shape, pose, and layout from the 2d image of a 3d scene. In CVPR, 2018. 1, 2 +[42] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul Srinivasan, Howard Zhou, Jonathan T. Barron, Ricardo MartinBrualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In CVPR, 2021. 2, 4, 5 +[43] Chao-Yuan Wu, Justin Johnson, Jitendra Malik, Christoph Feichtenhofer, and Georgia Gkioxari. Multiview compressive coding for 3D reconstruction. CVPR, 2023. 1, 2, 5, 6, 7, 8 +[44] Fei Xia, Amir R Zamir, Zhiyang He, Alexander Sax, Jitendra Malik, and Silvio Savarese. Gibson env: Real-world perception for embodied agents. In CVPR, 2018. 5 +[45] Yiming Xie, Matheus Gadelha, Fengting Yang, Xiaowei Zhou, and Huaizu Jiang. Planarrecon: Real-time 3d plane detection and reconstruction from posed monocular videos. In CVPR, 2022. 2 +[46] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2, 4, 5 +[47] Amir R. Zamir, Alexander Sax, William Shen, Leonidas J. Guibas, Jitendra Malik, and Silvio Savarese. Taskonomy: Disentangling task transfer learning. In CVPR, 2018. 5 +[48] Zhizhuo Zhou and Shubham Tulsiani. Sparsefusion: Distilling view-conditioned diffusion for 3d reconstruction. In CVPR, 2023. 2 \ No newline at end of file diff --git a/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/images.zip b/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..194d140c80941ce8ed889b9b32c9a9ac406aa8a5 --- /dev/null +++ b/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c15cc558b492cc5af3030ee04d306366bfc02f4cab5e4a67cf3ac056264e46b9 +size 700777 diff --git a/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/layout.json b/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..221ff58cfe27998c4ff32940ab84eac450d8283e --- /dev/null +++ b/2024/3DFIRES_ Few Image 3D REconstruction for Scenes with Hidden Surfaces/layout.json @@ -0,0 +1,7679 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 66, + 103, + 527, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 103, + 527, + 120 + ], + "spans": [ + { + "bbox": [ + 66, + 103, + 527, + 120 + ], + "type": "text", + "content": "3DFIREs: Few Image 3D REconstruction for Scenes with Hidden Surfaces" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "spans": [ + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "type": "text", + "content": "Linyi Jin" + }, + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "type": "text", + "content": ", Nilesh Kulkarni" + }, + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "type": "text", + "content": ", David F. Fouhey" + }, + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "type": "text", + "content": "University of Michigan" + }, + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "type": "text", + "content": ", New York University" + }, + { + "bbox": [ + 181, + 142, + 412, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 162, + 174, + 432, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 174, + 432, + 185 + ], + "spans": [ + { + "bbox": [ + 162, + 174, + 432, + 185 + ], + "type": "text", + "content": "{jinlinyi,nileshk}@umich.edu,david.fouhey@nyu.edu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 238, + 290, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 238, + 290, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 238, + 290, + 418 + ], + "type": "text", + "content": "This paper introduces 3DFIRES, a novel system for scene-level 3D reconstruction from posed images. Designed to work with as few as one view, 3DFIRES reconstructs the complete geometry of unseen scenes, including hidden surfaces. With multiple view inputs, our method produces full reconstruction within all camera frustums. A key feature of our approach is the fusion of multi-view information at the feature level, enabling the production of coherent and comprehensive 3D reconstruction. We train our system on non-watertight scans from large-scale real scene dataset. We show it matches the efficacy of single-view reconstruction methods with only one input and surpasses existing techniques in both quantitative and qualitative measures for sparse-view 3D reconstruction. Project page: https://jinlinyi.github.io/3DFIRES/" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 441, + 128, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 441, + 128, + 453 + ], + "spans": [ + { + "bbox": [ + 47, + 441, + 128, + 453 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 461, + 287, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 461, + 287, + 640 + ], + "spans": [ + { + "bbox": [ + 46, + 461, + 287, + 640 + ], + "type": "text", + "content": "Consider two views of the scene in Fig. 1. Part of the bedroom in View 1 is occluded by the wall, and so you may be uncertain what is behind it, although you might guess the wall continues. Now consider adding in View 2. You can see a bedside table, but little else. However, you can fuse these pieces together to create a consistent 3D sense of the scene viewed by the images, including both the visible and invisible parts. We use this sense when shopping for real estate or looking at a friend's photos. We estimate the structure of the scene from parts that are visible to all views; integrate information across images for parts that visible in one view but not others; and take educated guesses for completely occluded regions. Importantly, as the available data increases from one camera to a handful, we can seamlessly integrate the evidence across views." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "content": "This task poses a challenge for current computer vision since it requires making judgments about visible and occluded 3D structures and integrating information across images with large pose change. These abilities are usually independently investigated in two separate strands of research. With single image reconstruction techniques [15," + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 217, + 545, + 400 + ], + "blocks": [ + { + "bbox": [ + 310, + 217, + 545, + 400 + ], + "lines": [ + { + "bbox": [ + 310, + 217, + 545, + 400 + ], + "spans": [ + { + "bbox": [ + 310, + 217, + 545, + 400 + ], + "type": "image", + "image_path": "a20483cb3db04cf890d22acc12fd091c77b216b9e1dbfeae39b5dfc5f74dfec8.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 410, + 547, + 498 + ], + "lines": [ + { + "bbox": [ + 304, + 410, + 547, + 498 + ], + "spans": [ + { + "bbox": [ + 304, + 410, + 547, + 498 + ], + "type": "text", + "content": "Figure 1. Reconstructing 3D from sparsely posed images. Given a sparse set of posed image views, our method is able to reconstruct the full 3D of the scene. On the top, we show two sparse views of the scene in View 1 and View 2. On the bottom left is the 3D reconstruction from our network in the frustum of View 1. We show that our method can generate the occluded side table (zoom in). On the bottom right is the full reconstruction. We color occluded surfaces with surface normals." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 510, + 547, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 547, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 547, + 665 + ], + "type": "text", + "content": "20, 26, 41, 43], one can predict both visible and occluded 3D structure from an image, but stacking such outputs from multiple images can produce inconsistent outputs. When handled independently, methods cannot identify the best view to reason about an occluded region. Non-line-of-sight imaging involves transmitting and receiving signals to reveal hidden scenes, incompatible with standard camera images [14]. Sparse view reconstruction methods [1, 17, 39] can create consistent reconstructions from two views; however, these approaches are limited to the visible parts of the scene that can decomposed into planes. Moreover, these methods are usually specialized to a particular number of images that can be accepted." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": "Recently, there has been considerable progress in generalized radiance fields, which produce full 3D representations. This occupancy representation and per-scene optimization has shown promising results by optimizing for" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9742" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 179 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 179 + ], + "type": "text", + "content": "novel view synthesis on single scenes from posed images sets [7, 23, 36, 40]. Extending this line of work, methods like [32, 46] have shown an ability to predict novel views for unseen scenes from a few images. However, since these methods optimize for perceptual quality, the underlying geometry often has artifacts. Like them we also require one or more image views at input, but instead we predict an implicit function [20] that can reliably reconstruct both visible and occluded parts of previously unseen scenes." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 180, + 289, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 180, + 289, + 371 + ], + "spans": [ + { + "bbox": [ + 46, + 180, + 289, + 371 + ], + "type": "text", + "content": "We propose 3DFIREs, Few Image 3D-REconstruction of Scenes, which integrates information from a variable number of images to produce a full reconstruction of the scene. 3DFIREs integrates information in the features space across a varying number of images, enabling it to identify how to best use the available image data to produce an accurate reconstruction at a point. As output, 3DFIREs produces a pixel-aligned implicit field based on a generalization of the Directed Ray Distance Function [20, 21], which enables high quality reconstructions. Thanks to integration in feature space, the results are more consistent than handling images independently: this is what enables reconstructing the bed-side table in Fig. 1, even though it is hidden by the wall in one image. We found and document several design decisions in terms of training and network architecture needed to produce these results." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 371, + 289, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 371, + 289, + 550 + ], + "spans": [ + { + "bbox": [ + 46, + 371, + 289, + 550 + ], + "type": "text", + "content": "We evaluate our method on complex interior scenes from Omnidata [8, 33] dataset collected with a real scanner. We compare 3DFIRES with the point-space fusion of state-of-the-art methods for scene-level full 3D reconstruction methods from a single image [21, 43]. Our experiments show several key results. First, 3DFIRES produces more accurate results compared to existing works. The improvements are larger in hidden regions, and especially substantial when measuring consistency of prediction from multiple views. Second, ablative analysis reveals the key design decisions responsible for 3DFIRES's success. Third, 3DFIRES can generalize to variable views: we train on 1, 2, and 3 views and generalize to 5 views. Finally, 3DFIRES can reconstruct when given LoFTR [37] estimated poses with known translation scale." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 562, + 139, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 562, + 139, + 574 + ], + "spans": [ + { + "bbox": [ + 47, + 562, + 139, + 574 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 582, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 605 + ], + "type": "text", + "content": "We aim to produce a coherent 3D scene reconstruction given a single or a few images with wide baselines." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "content": "3D from Single Image. Predicting a complete 3D scene from a single image is inherently ambiguous. Recently different 3D representations have been proposed to reconstruct complete 3D scenes (including occluded surfaces) such as layered depth [35], voxels [3, 11, 19, 41], planes [16], point-clouds [9, 43], meshes [10, 12, 25], or implicit representation for objects [22, 26] and scenes [2, 4, 20, 21, 36]. While they have strong performance on single image, they do not necessarily produce coherent results when required to infer" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "on multiple images of the same scene [21]. Our method can reconstruct hidden geometry from at least a single image using implicit representation from [20]. Instead of naively fusing point clouds from different images, we fuse features when predicting a multi-view consistent point cloud with few input images." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 144, + 546, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 144, + 546, + 287 + ], + "spans": [ + { + "bbox": [ + 304, + 144, + 546, + 287 + ], + "type": "text", + "content": "3D from dense views. Traditional multi-view 3D reconstruction methods can produce accurate and coherent point clouds from pixel correspondences [33]. Classical methods in computer vision use approaches like Multi-view stereo (MVS) to construct only visible parts of the scene in all the images. There is a long line of work in trying to reconstruct scenes from video sequences [6, 34] where they reconstruct visible scenes and camera poses. Learning-based methods for MVS estimate geometry for scenes [18, 24, 38, 45] also require an input video to explicitly predict scene geometry. Instead of requiring high overlap inputs such as video frames, our method works on wide-baseline images." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 288, + 546, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 288, + 546, + 467 + ], + "spans": [ + { + "bbox": [ + 304, + 288, + 546, + 467 + ], + "type": "text", + "content": "3D from sparse view inputs. Our approach operates in a multi-view setting with a sparse set of views. We have a similar setting as wide-baseline reconstruction [27]. Associative3D [28] reconstructs the whole scene but requires voxelized scenes to train, our method works on non-watertight scene data. Prior work also explores planar representation [1, 17, 39] for coherent 3D surfaces in non-watertight scenes. They use feed-forward networks to predict visible 3D surfaces for each view and merge them using predicted correspondences. Our approach leverages an implicit representation that accommodates non-watertight data, enabling the reconstruction of both visible and occluded surfaces. We fuse deep features from multiple views to predict DRDF representation from Kulkarni et al. [20], producing a coherent reconstruction." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 467, + 545, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 467, + 545, + 586 + ], + "spans": [ + { + "bbox": [ + 304, + 467, + 545, + 586 + ], + "type": "text", + "content": "Novel view synthesis. NeRF [23] and its extensions [42, 46, 48] optimizes per-scene radiance fields for novel-view synthesis, this requires many views and test-time optimization. Due to its occupancy-based representation, extracting geometry often requires thresholding the density function, which leads to cloudy geometry with sparse input views. Our method directly predicts geometry from unseen images without the need for test-time optimization. PixelNerf [46] or SRT [32] can generalize to new scenes but their objectives optimize for photometric losses." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 597, + 362, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 597, + 362, + 609 + ], + "spans": [ + { + "bbox": [ + 306, + 597, + 362, + 609 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 617, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 689 + ], + "type": "text", + "content": "Our goal is to predict an accurate and consistent 3D reconstruction from one or more sparsely spaced camera views and known poses. With one image, the method should predict all surfaces in the camera frustum, including visible and occluded regions. With more images, the method should predict the surfaces in the union of the frustum." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "We tackle this problem with 3DFIREs, a simple and effective approach designed for this setting. We first discuss" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9743" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 61, + 187, + 297 + ], + "blocks": [ + { + "bbox": [ + 49, + 61, + 187, + 297 + ], + "lines": [ + { + "bbox": [ + 49, + 61, + 187, + 297 + ], + "spans": [ + { + "bbox": [ + 49, + 61, + 187, + 297 + ], + "type": "image", + "image_path": "6922e9ca25b1f1133670f996a2a19b275807d4d562e4c390adbe72a1217f3c0f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 307, + 546, + 352 + ], + "lines": [ + { + "bbox": [ + 46, + 307, + 546, + 352 + ], + "spans": [ + { + "bbox": [ + 46, + 307, + 546, + 352 + ], + "type": "text", + "content": "Figure 2. (a) Architecture for single view DRDF [20]. Given an image and a query pixel location, it predicts DRDF along the ray from the query pixel. (b) we extend (a) to work on sparse views. Middle: Given N images, a query point " + }, + { + "bbox": [ + 46, + 307, + 546, + 352 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 307, + 546, + 352 + ], + "type": "text", + "content": ", and a query direction " + }, + { + "bbox": [ + 46, + 307, + 546, + 352 + ], + "type": "inline_equation", + "content": "\\vec{\\mathbf{r}}_q" + }, + { + "bbox": [ + 46, + 307, + 546, + 352 + ], + "type": "text", + "content": ", we aggregate features from multiple images and output DRDF along the query ray. Right: We show detailed network architecture of 3DFIREs which consists of a Query Encoder and a DRDF Predictor." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 197, + 61, + 543, + 297 + ], + "blocks": [ + { + "bbox": [ + 197, + 61, + 543, + 297 + ], + "lines": [ + { + "bbox": [ + 197, + 61, + 543, + 297 + ], + "spans": [ + { + "bbox": [ + 197, + 61, + 543, + 297 + ], + "type": "image", + "image_path": "7684a2539dcb412f20ded22585efde547b7d9c4373bdbbfa8239a389a4a81897.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 49, + 361, + 287, + 513 + ], + "blocks": [ + { + "bbox": [ + 49, + 361, + 287, + 513 + ], + "lines": [ + { + "bbox": [ + 49, + 361, + 287, + 513 + ], + "spans": [ + { + "bbox": [ + 49, + 361, + 287, + 513 + ], + "type": "image", + "image_path": "b2c5411d94639627566952e36b10c4ac43aaf8bc93e09d0e1f363a6f63494efa.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 524, + 287, + 601 + ], + "lines": [ + { + "bbox": [ + 46, + 524, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 46, + 524, + 287, + 601 + ], + "type": "text", + "content": "Figure 3. Predictions in the blue camera frustum. Occluded surfaces are colored with surface normals. A single image to 3D method like DRDF [20] is unable to reconstruct the parts of the scene behind the wall with certainty and hence erroneously adds a full wall in front of the hallway (red box). 3DFIREs which fuses features from multiple views (Green and Purple camera in Fig. 2) predicts empty space for the entrance (black box)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 603, + 287, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 603, + 287, + 662 + ], + "spans": [ + { + "bbox": [ + 46, + 603, + 287, + 662 + ], + "type": "text", + "content": "tackling scene reconstruction in a single image case in §3.1 using the Directed Ray Distance Function (DRDF) [20] and scale this approach to multiple image views in §3.2. In §3.3, we show how we can operationalize our multi-view reconstruction goal with an attention-based model architecture." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 670, + 258, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 258, + 683 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 258, + 683 + ], + "type": "text", + "content": "3.1. Background Single View Reconstruction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "content": "We begin by revisiting the DRDF formulation for a single image reconstruction. Consider a single image " + }, + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "content": ", a single" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 363, + 545, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 363, + 545, + 469 + ], + "spans": [ + { + "bbox": [ + 304, + 363, + 545, + 469 + ], + "type": "text", + "content": "view implicit reconstruction method aims to produce the full 3D reconstruction for the scene from this image. At inference, when conditioned on image features, the method outputs a distance function for a pre-defined set of 3D points in the camera frustum. It then decodes this predicted distance function to a surface to recover the 3D geometry of the scene. For instance, if the predicted 3D distance function is an unsigned distance function [2], the points on the surface are with distances close to zero." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 472, + 545, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 472, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 304, + 472, + 545, + 555 + ], + "type": "text", + "content": "Kulkarni et al. [20] solve the single image 3D reconstruction with the DRDF function and show that using the DRDF outperforms the standard unsigned distance function. The DRDF is a ray-based distance function measuring the distance of a point " + }, + { + "bbox": [ + 304, + 472, + 545, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 472, + 545, + 555 + ], + "type": "text", + "content": " to the nearest intersection with a surface along a ray " + }, + { + "bbox": [ + 304, + 472, + 545, + 555 + ], + "type": "inline_equation", + "content": "\\vec{\\mathbf{r}}" + }, + { + "bbox": [ + 304, + 472, + 545, + 555 + ], + "type": "text", + "content": ". In [20], the ray on which distances are measured is the ray from the camera center " + }, + { + "bbox": [ + 304, + 472, + 545, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 304, + 472, + 545, + 555 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 472, + 545, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 472, + 545, + 555 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "spans": [ + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "text", + "content": "Fig. 2 (a) shows the DRDF for one such ray. Now, any 3D point " + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "text", + "content": " can be represented as its distance towards the camera times a unit ray direction, or " + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "inline_equation", + "content": "z\\vec{\\mathbf{r}}" + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "inline_equation", + "content": "z \\in \\mathbb{R}" + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "inline_equation", + "content": "\\vec{\\mathbf{r}} = \\mathrm{norm}(\\mathbf{x} - \\mathbf{c})" + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "inline_equation", + "content": "\\mathrm{norm}(\\mathbf{p}) = \\mathbf{p} / ||\\mathbf{p}||" + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "text", + "content": ". The DRDF, " + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{DR}}(z\\vec{\\mathbf{r}})" + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "text", + "content": ", furthermore includes a sign that determines for the point the direction along the ray towards the nearest intersection (i.e., forwards or backwards). Therefore " + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "inline_equation", + "content": "(z + d_{\\mathrm{DR}}(z\\vec{\\mathbf{r}}))\\vec{\\mathbf{r}}" + }, + { + "bbox": [ + 304, + 556, + 545, + 652 + ], + "type": "text", + "content": " corresponds to a point on the surface." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": "The DRDF can be used to create a system that infers single image 3D by pairing the distance function at a point " + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": " with pixel-aligned features. At inference time, as shown in Fig. 2 (a), given a point " + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": " in the camera frustum we can extract corresponding pixel-aligned image features using an" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9744" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 205 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 205 + ], + "type": "text", + "content": "image backbone " + }, + { + "bbox": [ + 46, + 72, + 289, + 205 + ], + "type": "inline_equation", + "content": "\\mathrm{BB}[\\pi (\\mathbf{x})]" + }, + { + "bbox": [ + 46, + 72, + 289, + 205 + ], + "type": "text", + "content": ", and use an MLP to predict the DRDF value corresponding to the point " + }, + { + "bbox": [ + 46, + 72, + 289, + 205 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 72, + 289, + 205 + ], + "type": "text", + "content": " along the " + }, + { + "bbox": [ + 46, + 72, + 289, + 205 + ], + "type": "inline_equation", + "content": "\\vec{\\mathbf{r}}" + }, + { + "bbox": [ + 46, + 72, + 289, + 205 + ], + "type": "text", + "content": ". Since DRDF is a ray-based function, its value only depends on the intersections along the ray. For any ray corresponding to a pixel on the image, the prediction of DRDF for the point depends on the image features, and the location of the point on the ray. This parameterization allows DRDF to learn sharp 3D reconstructions of the scene from a single RGB image. At training time, we train a model to predict the DRDF by supervising it with the ground-truth DRDF values computed using the mesh geometry." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 212, + 242, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 212, + 242, + 224 + ], + "spans": [ + { + "bbox": [ + 47, + 212, + 242, + 224 + ], + "type": "text", + "content": "3.2. Extending DRDFs to Multiple Views" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "spans": [ + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "type": "text", + "content": "Now, with multiple views we have: N images " + }, + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{I}_i\\}_{i = 1}^{\\mathrm{N}}" + }, + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "type": "text", + "content": " relative camera transforms " + }, + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "type": "inline_equation", + "content": "\\{\\pi_i\\}_{i = 1}^{\\mathrm{N}}" + }, + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "type": "text", + "content": " , and corresponding camera centers " + }, + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}_i\\}_{i = 1}^{\\mathrm{N}}" + }, + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "type": "text", + "content": " , our goal is to reconstruct the 3D of the full scene. While the task could perhaps be accomplished by simply predicting individual 3D for each camera, and assembling them together. Our insight is that if the camera frustums have considerable overlap, for overlapping regions we can achieve a better and more consistent reconstruction by allowing the network to reason about which camera provides the best view for each point. This can be achieved by allowing the network to fuse features across cameras for the points in feature space rather than by concatenating in point space. We propose to improve the feature quality of any point " + }, + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "type": "text", + "content": " by fusing the features from multiple cameras. Since we are now dealing with the multi-view settings, a multi-view DRDF formulation is necessary to allow us to predict the DRDF value along each of the query rays, " + }, + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "type": "inline_equation", + "content": "\\vec{\\mathbf{r}}_q" + }, + { + "bbox": [ + 46, + 230, + 289, + 447 + ], + "type": "text", + "content": " originating from the respective camera centers." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "spans": [ + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "type": "text", + "content": "In the case of multiple views, the image feature corresponding to a point " + }, + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "type": "text", + "content": " should be a fusion of features " + }, + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{f}_{\\theta}[\\pi_i(\\mathbf{x})]\\}_{i = 1}^{\\mathrm{N}}" + }, + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "type": "text", + "content": ". The feature should support predicting the N DRDF values along all the camera directions as " + }, + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "type": "inline_equation", + "content": "\\{d_{\\mathrm{DR}}(z_i\\vec{\\mathbf{r}}_i)\\}_{i = 1}^{\\mathrm{N}}" + }, + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "type": "text", + "content": ". The intuition of our key idea is that multiple-image views provide more information about the 3D scene and hence potentially better features. We can learn these better features by fusing features to predict a consistent output. This requires a novel architecture that attends to features and rays, " + }, + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "type": "inline_equation", + "content": "\\{\\vec{\\mathbf{r}}_i\\}_{i = 1}^{\\mathrm{N}}" + }, + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "type": "text", + "content": ", originating from all the available image views. Under this formulation single view DRDF is a special case of our formulation where " + }, + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "type": "inline_equation", + "content": "\\mathbf{N}" + }, + { + "bbox": [ + 46, + 447, + 289, + 590 + ], + "type": "text", + "content": " is 1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 599, + 174, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 599, + 174, + 611 + ], + "spans": [ + { + "bbox": [ + 47, + 599, + 174, + 611 + ], + "type": "text", + "content": "3.3. Network Architecture" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": "Towards the goal of predicting DRDFs along multiple query rays " + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\vec{\\mathbf{r}}_q\\in \\{\\vec{\\mathbf{r}}_i\\}_{i = 1}^{\\mathrm{N}}" + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": ", we present a simple and effective network 3DFIREs that accomplishes this task. 3DFIREs consists of three modules: The first module is a Backbone Feature Extractor that obtains pixel-aligned appearance features; by projecting the query point " + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": " onto the camera, we can obtain a per-point and per-camera appearance feature as in [20, 23, 31, 42, 46]. Since the appearance feature is" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "text", + "content": "per-image, the model must learn to aggregate information across cameras. This is done with our second component Query Encoder that provides geometric information for aggregating appearance features. Specifically, the query encoder uses the information about the relative positions of query point " + }, + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "text", + "content": " and query direction " + }, + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "inline_equation", + "content": "\\vec{\\mathbf{r}}_q" + }, + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "text", + "content": " w.r.t. cameras " + }, + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "inline_equation", + "content": "\\{\\pi_i\\}_{i=1}^N" + }, + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "text", + "content": ". The final module is the DRDF Predictor that takes appearance and query features to produce a DRDF value along the query direction " + }, + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "inline_equation", + "content": "\\vec{\\mathbf{r}}_q" + }, + { + "bbox": [ + 304, + 72, + 547, + 228 + ], + "type": "text", + "content": " by incorporating the appearance features (evidence for geometry) and query encoder features (evidence that relates different features). Fig. 3 shows an example on how integrating information across multiple views leads to better prediction for occluded parts of the scene." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "spans": [ + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "text", + "content": "Backbone Feature Extractor. Our backbone features extractor " + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\mathrm{BB}(\\cdot)" + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "text", + "content": " aims to create appearance features from an image. It accepts an image " + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_i\\in \\mathbb{R}^{H\\times W\\times 3}" + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "text", + "content": " and produces a grid of D-dimensional features " + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_i\\in \\mathbb{R}^{H'\\times W'\\times D_{\\mathrm{img}}}" + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "text", + "content": ". We use a pre-trained depth estimating vision transformer [29]. Feature extraction for each image proceeds independently using the same network. With extracted per-camera backbone features, " + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i" + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "text", + "content": ", for point " + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "text", + "content": " by interpolating features in " + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{F}_i\\}_{i = 1}^{\\mathrm{N}}" + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "text", + "content": " at the projection " + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\{\\pi_i(\\mathbf{x})\\}_{i = 1}^{\\mathrm{N}}" + }, + { + "bbox": [ + 304, + 228, + 547, + 338 + ], + "type": "text", + "content": " correspondingly." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "spans": [ + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": "Query Encoder. Our query encoder " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "q(\\cdot)" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": " aims to enable a predictor to decide how to aggregate information across images. As input, the encoder takes a query 3D point " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": " and a query direction " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "\\vec{\\mathbf{r}}_q" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": ". It additionally considers the backbone features, camera centers " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}_i\\}_{i=1}^N" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": " and transforms " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "\\{\\pi_i\\}_{i=1}^N" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": ". Our query encoding is the concatenation of: (i) the relative viewing direction in camera " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": "'s space " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "\\Delta \\vec{\\mathbf{r}}_i(\\vec{\\mathbf{r}}_q) = [\\vec{\\mathbf{r}}_q - \\mathrm{norm}(\\mathbf{x} - \\mathbf{c}_i), \\vec{\\mathbf{r}}_q \\cdot \\mathrm{norm}(\\mathbf{x} - \\mathbf{c}_i)] \\in \\mathbb{R}^4" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": "; and (ii) the normalized device coordinates (NDC), coordinates of point " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": " in the camera frame " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "\\mathrm{ndc}_i(\\mathbf{x}) \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": ". Intuitively this query representation, " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{q}_i = \\{\\Delta \\vec{\\mathbf{r}}_i, \\mathrm{ndc}_i(\\mathbf{x})\\} \\in \\mathbb{R}^7" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": " enables reasoning such as: information about surfaces near " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": " in direction " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "\\vec{\\mathbf{r}}_q" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": " is likely not visible in camera " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": " due to either angle or distance, so this feature ought to be weighted low. The ray query vector is encoded in a positional encoding layer [40] with output dimension " + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{query}}" + }, + { + "bbox": [ + 304, + 338, + 548, + 529 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "spans": [ + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "text", + "content": "DRDF Predictor. For a query ray and point tuple, " + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "inline_equation", + "content": "\\{\\vec{\\mathbf{r}}_q,\\mathbf{x}\\}" + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "text", + "content": ", this model considers the image features " + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{f}_i\\}_{i = 1}^{\\mathrm{N}}" + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "text", + "content": ", and query features " + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{q}_i\\}_{i = 1}^N" + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "text", + "content": " yielding a joint camera specific feature, " + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{f}_i,\\mathbf{q}_i\\}_{i = 1}^{\\mathrm{N}}" + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "text", + "content": ", of dimension " + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{img}} + D_{\\mathrm{query}}" + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "text", + "content": ". Our self-attention attends over all these features to produce a weight " + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "inline_equation", + "content": "w_{i}" + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "text", + "content": " per feature. We aggregate the features using this weight to produce a fused feature for the point " + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "text", + "content": ". We then use the fused feature to predict a DRDF value between " + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "inline_equation", + "content": "[-1,1]" + }, + { + "bbox": [ + 304, + 529, + 548, + 661 + ], + "type": "text", + "content": " with the help of an MLP. This is akin to selecting cameras that are likely to contain the geometry information about the ray point tuple and predicting the geometry information." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 670, + 419, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 419, + 682 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 419, + 682 + ], + "type": "text", + "content": "3.4. Training 3DFIREs" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 689, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 547, + 715 + ], + "type": "text", + "content": "The effectiveness of 3DFIREs is improved by getting details right during training. One observation is that sampling" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9745" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "type": "text", + "content": "points near intersections gives improvements over uniform sampling because the scene-level space is predominantly empty. By increasing the density of sampled points near surface, the network can better learn the scene structure. We sample points along the ray as per a Gaussian distribution centered at the intersection. Prior work [42] involves applying ray attention which allows for samples along a ray to attend with each other before the final prediction. This has been shown to be effective. However, combining ray attention with Gaussian sampling during training enables the network to 'cheat'. Ray Attention exploits a train-time shortcut (query point density) to infer intersections. At inference as point density is uniform and this shortcut fails. Empirically we find Gaussian sampling alone to be more effective than ray attention." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 261, + 180, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 261, + 180, + 274 + ], + "spans": [ + { + "bbox": [ + 47, + 261, + 180, + 274 + ], + "type": "text", + "content": "3.5. Implementation Details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 280, + 287, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 280, + 287, + 364 + ], + "spans": [ + { + "bbox": [ + 46, + 280, + 287, + 364 + ], + "type": "text", + "content": "Training. Our image feature backbone is vision transformer [29] dpt_beit_large_384 pretrained by MiDaS [30]. We use " + }, + { + "bbox": [ + 46, + 280, + 287, + 364 + ], + "type": "inline_equation", + "content": "\\ell_{1}" + }, + { + "bbox": [ + 46, + 280, + 287, + 364 + ], + "type": "text", + "content": " loss on log-space truncated DRDF [5, 20, 38]. During training, we randomly sample 1, 2, 3 views with 80 rays per image and 512 points along each ray. Our method is trained for 300K iteration on NVIDIA A100 GPU with batch size of 1. More details in supp." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": "Inference. Given N images, we extract backbone features for each image. We generate " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "n_{\\mathrm{ray}} = 128 \\times 128" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " query rays from each camera. Along each ray, we sample " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "n_{\\mathrm{pt}} = 256" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " points that have uniformly spaced depth from 0 to 8m. In total, we get " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "\\mathbf{N} \\times n_{\\mathrm{ray}} \\times n_{\\mathrm{pt}}" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " query pairs " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{x}, \\vec{\\mathbf{r}}_q\\}" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": ", which are fed to 3DFIREs in parallel to get DRDF value. We calculate positive-to-negative zero-crossings along each ray [20] to get a 3D point and aggregate the results." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 473, + 124, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 473, + 124, + 487 + ], + "spans": [ + { + "bbox": [ + 47, + 473, + 124, + 487 + ], + "type": "text", + "content": "4. Experiment" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 494, + 287, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 494, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 46, + 494, + 287, + 601 + ], + "type": "text", + "content": "In this section, we present the experimental framework for 3DFIREs, our system designed to reconstruct full scene geometry from wide-baseline, sparse images. Considering the novelty of our problem, there is no prior work that does this exact setting. To address this, we curated a dataset and developed testing metrics specifically tailored to the problem's requirements. We conduct comprehensive evaluations of 3DFIREs using real scene images, comparing its performance against alternative methods in the field." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 610, + 105, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 610, + 105, + 623 + ], + "spans": [ + { + "bbox": [ + 47, + 610, + 105, + 623 + ], + "type": "text", + "content": "4.1. Dataset" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "text", + "content": "Following [21], we use the dataset from the Gibson database [44], which contains real images of complex and diverse scenes such as multi-floor villas and expansive warehouses. The scale of the assets in the dataset presents challenging reconstruction problem, which is desirable for evaluating the ability to recover occluded surfaces. We use the images sampled by Omnidata [8] for a diverse set of" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": "camera poses from the Taskonomy [47] Medium subset, including 98/20/20 training/validation/test buildings. Since our multiview setting is different from the single-view setting of [21], the precise samples are different. Our setting is also similar to [17, 39] in that images have wide baselines (median " + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "inline_equation", + "content": "2.8\\mathrm{m}" + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": " translation, " + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "inline_equation", + "content": "63.9^{\\circ}" + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": " rotation), unlike methods using video frames [38] where images have high overlap. Our approach diverges from [17, 39] in also reconstructing occluded regions and using real (not rendered) images." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 181, + 546, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 181, + 546, + 337 + ], + "spans": [ + { + "bbox": [ + 304, + 181, + 546, + 337 + ], + "type": "text", + "content": "To curate our image sets, we use a sampling process like [17]. For a set of " + }, + { + "bbox": [ + 304, + 181, + 546, + 337 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 181, + 546, + 337 + ], + "type": "text", + "content": " images, after picking an image at random, each new image is selected to have at most " + }, + { + "bbox": [ + 304, + 181, + 546, + 337 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 304, + 181, + 546, + 337 + ], + "type": "text", + "content": " overlap with any existing image in the set, and at least " + }, + { + "bbox": [ + 304, + 181, + 546, + 337 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 304, + 181, + 546, + 337 + ], + "type": "text", + "content": " overlap with at least one other image in the set. The process balances diversity and coherence in the viewpoints. We crop images to a fixed field of view. We collect 3781 training sets among " + }, + { + "bbox": [ + 304, + 181, + 546, + 337 + ], + "type": "inline_equation", + "content": "\\geq 10\\mathrm{K}" + }, + { + "bbox": [ + 304, + 181, + 546, + 337 + ], + "type": "text", + "content": " images. We also sample 300 sets of 3-view images and 100 sets of 5-view images for evaluation from the held-out test scenes. See the supplementary for dataset generation details. The 3 view and 5 view test set contain considerable occluded 3D geometry (41.9% and 43.7% respectively)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 346, + 372, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 346, + 372, + 357 + ], + "spans": [ + { + "bbox": [ + 306, + 346, + 372, + 357 + ], + "type": "text", + "content": "4.2. Baselines" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 365, + 545, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 365, + 545, + 423 + ], + "spans": [ + { + "bbox": [ + 304, + 365, + 545, + 423 + ], + "type": "text", + "content": "To the best of our knowledge, no prior work reconstructs occluded regions from sparse-view images at scene scale. We thus create strong baselines from existing methods that handle parts of our setting. Each method is the strongest in its line of work." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 425, + 545, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 425, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 304, + 425, + 545, + 520 + ], + "type": "text", + "content": "For instance, the visible surface upper-bound includes all methods that reconstruct visible surfaces from sparse views [17, 38, 39]. The DRDF method [20, 21] has been shown to be more effective for scene-level 3D reconstruction compared to many other implicit functions like density [46], occupancy [31], unsigned distance functions on scenes and rays [2]. MCC [43] is likewise SOTA for point cloud completion." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 521, + 545, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 605 + ], + "type": "text", + "content": "Depth Only [8, 29] Prior state-of-the-art works on sparse scene reconstruction [38, 39] predict visible surfaces from multiple views, but cannot recover hidden surfaces. To show the near-oracle reconstruction of visible surfaces, we use MiDaS [29] depth model trained on Omnidata [8] with ground-truth scale and shift. This baseline is an upper bound on the performance of methods like [1, 17, 38, 39]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "content": "Multiview Compressive Coding (MCC) [43] This method predicts occupancy probability from RGB-D partial point clouds. MCC works on scene-level reconstructions including non-watertight meshes. We train MCC on the same training set as ours. This method requires depth as input and at inference we provide it with ground truth depth. Since MCC only works on a single point cloud, to produce predictions from multiple images, we infer each image independently and aggregate the predicted point cloud in point" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9746" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 68, + 537, + 423 + ], + "blocks": [ + { + "bbox": [ + 55, + 68, + 537, + 423 + ], + "lines": [ + { + "bbox": [ + 55, + 68, + 537, + 423 + ], + "spans": [ + { + "bbox": [ + 55, + 68, + 537, + 423 + ], + "type": "image", + "image_path": "b8fa1bdef0a8b67e46cfe64e1e74cc419b2dcba4659f14e08d063451662d07f4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 434, + 547, + 491 + ], + "lines": [ + { + "bbox": [ + 46, + 434, + 547, + 491 + ], + "spans": [ + { + "bbox": [ + 46, + 434, + 547, + 491 + ], + "type": "text", + "content": "Figure 4. Comparison between different methods on held-out test scene. Occluded surfaces are colored with the computed surface normals. \"Depth only\" leaves holes with sparse input views, e.g. absent floors and walls. Occupancy-based method MCC [43] produces cloudy results, failing to get the details like pillow, tables. Concatenation of single view DRDF (SV-DRDF) [20] produces inconsistent results, e.g. missing wall in row 2, the double wall in row 3. Our method produces more consistent predictions across different views and also recovers the hidden surface, resulting in a complete mesh. We urge the reader to see results provided in the supplementary videos." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 502, + 99, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 502, + 99, + 513 + ], + "spans": [ + { + "bbox": [ + 47, + 502, + 99, + 513 + ], + "type": "text", + "content": "cloud space." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 514, + 289, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 514, + 289, + 635 + ], + "spans": [ + { + "bbox": [ + 46, + 514, + 289, + 635 + ], + "type": "text", + "content": "Single-view DRDF (SV-DRDF) [20] This method reconstructs both visible and hidden surfaces from a single input image. We use this baseline to show the benefit of our proposed multi-view feature aggregation. For a fair comparison, we upgrade the original backbone from ResNet34 [13] to the same BEiT [29] and use the same training strategy such as Gaussian sampling of points. Both improve results. Since this baseline only supports single image reconstruction, we produce predictions independently from each input image and aggregate all the point clouds." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 645, + 160, + 657 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 645, + 160, + 657 + ], + "spans": [ + { + "bbox": [ + 47, + 645, + 160, + 657 + ], + "type": "text", + "content": "4.3. Evaluation Metrics" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 665, + 222, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 665, + 222, + 676 + ], + "spans": [ + { + "bbox": [ + 47, + 665, + 222, + 676 + ], + "type": "text", + "content": "We use two metrics to evaluate our system." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "content": "Scene F score. Following [20, 43], we compute the scene accuracy (fraction of predicted points within " + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "content": " of a ground truth point), completeness (fraction of ground truth points" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 501, + 545, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 501, + 545, + 620 + ], + "spans": [ + { + "bbox": [ + 304, + 501, + 545, + 620 + ], + "type": "text", + "content": "within " + }, + { + "bbox": [ + 304, + 501, + 545, + 620 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 304, + 501, + 545, + 620 + ], + "type": "text", + "content": " from a predicted point), and their F-score (F1). This gives an overall summary of scene-level reconstruction. We classify the scene into (1) visible: points that are visible from any one of the input views; and (2) hidden: points that are hidden from all of the input views. Due to the space limit, we only show F-score at " + }, + { + "bbox": [ + 304, + 501, + 545, + 620 + ], + "type": "inline_equation", + "content": "\\rho = 0.2" + }, + { + "bbox": [ + 304, + 501, + 545, + 620 + ], + "type": "text", + "content": ". A full table with accuracy, completeness, F-score at different " + }, + { + "bbox": [ + 304, + 501, + 545, + 620 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 304, + 501, + 545, + 620 + ], + "type": "text", + "content": " is in the supp. Trends are the same across values of " + }, + { + "bbox": [ + 304, + 501, + 545, + 620 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 304, + 501, + 545, + 620 + ], + "type": "text", + "content": " and there is no significant accuracy/completeness imbalance for the baselines (MCC, SV-DRDF)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 629, + 546, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 546, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 546, + 715 + ], + "type": "text", + "content": "Multiview consistency. Only measuring the F-score does not measure the consistency of 3D reconstruction when generating results from multiple views. Doubled predictions of surfaces do not change the Scene F score results if they are within " + }, + { + "bbox": [ + 304, + 629, + 546, + 715 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 304, + 629, + 546, + 715 + ], + "type": "text", + "content": ". Prior work [17] used a detection-based method that penalized double surfaces on planar predictions, but their metric is not applicable since it requires pla" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9747" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 49, + 294, + 239 + ], + "blocks": [ + { + "bbox": [ + 49, + 49, + 294, + 239 + ], + "lines": [ + { + "bbox": [ + 49, + 49, + 294, + 239 + ], + "spans": [ + { + "bbox": [ + 49, + 49, + 294, + 239 + ], + "type": "image", + "image_path": "9f9f10e95b7c42e73884dfe5ad593ab61c797a2c67aaa682658bd8ecfe41c92f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 299, + 49, + 544, + 239 + ], + "blocks": [ + { + "bbox": [ + 299, + 49, + 544, + 239 + ], + "lines": [ + { + "bbox": [ + 299, + 49, + 544, + 239 + ], + "spans": [ + { + "bbox": [ + 299, + 49, + 544, + 239 + ], + "type": "image", + "image_path": "fee9a0347d0b3db85e0055b44cf9950c593c14cde05c3630f7c4098d987aa385.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 49, + 240, + 544, + 336 + ], + "blocks": [ + { + "bbox": [ + 49, + 240, + 544, + 336 + ], + "lines": [ + { + "bbox": [ + 49, + 240, + 544, + 336 + ], + "spans": [ + { + "bbox": [ + 49, + 240, + 544, + 336 + ], + "type": "image", + "image_path": "ae42aa271bbaedd6cef334145f3a6bee8be5040659c7285ca8b27397cea3c7e8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 338, + 546, + 371 + ], + "lines": [ + { + "bbox": [ + 46, + 338, + 546, + 371 + ], + "spans": [ + { + "bbox": [ + 46, + 338, + 546, + 371 + ], + "type": "text", + "content": "Figure 5. Qualitative results on held-out test scenes. Top row: Reconstruction from 3 images and compared with ground truth. Our method can reconstruct a complete scene structure within all the camera frustums, including the occluded surfaces. Bottom row: Predictions from 5 input images compared with ground truth. For the 2nd and 3rd examples, ceilings are removed to reveal the details of the scene." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "spans": [ + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "content": "lar instances. We require a metric that can measure the consistency of 3D reconstruction of points in individual frustumums. Specifically, we would like to ensure that points " + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_i" + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "content": " generated from all query rays originating from " + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "inline_equation", + "content": "\\pi_i" + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "content": " are consistent with points, " + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_j" + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "content": ", generated from by ray queries from " + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_j" + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "inline_equation", + "content": "\\pi_j" + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "content": " at the intersection of frustumums of both the cameras. For every point, " + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{p} \\in \\mathbf{P}_j" + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "content": " and within the field of view of camera " + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "content": ", we compute their minimum distance to points in " + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_i" + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "content": ". Our metric measures percent of points in the set " + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_j" + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "content": " that have minimum distance within the threshold of " + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 46, + 382, + 289, + 525 + ], + "type": "text", + "content": ". We evaluate this metric bidirectionally to ensure complete results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 536, + 105, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 536, + 105, + 548 + ], + "spans": [ + { + "bbox": [ + 47, + 536, + 105, + 548 + ], + "type": "text", + "content": "4.4. Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 556, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 556, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 556, + 287, + 652 + ], + "type": "text", + "content": "Qualitative Results. Fig. 3 shows reconstruction from using query rays from the blue camera in Fig. 2. Occluded surfaces are colored with surface normals. DRDF [20] is unable to reconstruct the parts of the scene behind the wall with certainty and erroneously adds a full wall in front of the hallway. 3DFIREs fuses features from multiple images (Green and Purple camera in Fig. 2) accurately predicts the empty space." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "text", + "content": "Fig. 4 shows results unseen test scenes, and compares reconstruction of baselines. Red box crop show highlighted differences and provide a zoomed-in view for detailed examination. Depth only (MiDaS with ground truth scale and shift) reconstructs only visible regions this leaves holes such" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 382, + 546, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 382, + 546, + 549 + ], + "spans": [ + { + "bbox": [ + 304, + 382, + 546, + 549 + ], + "type": "text", + "content": "as the missing surfaces behind chairs in Row 1; and absent floor sections in Row 4. MCC [43] tends to produce cloudy volumes and misses details like pillows and tables. Single-view DRDF (SV-DRDF) produces occluded regions and sharp surfaces but lacks consistency when aggregating results from multiple views. This is noticeable in its inability to reconstruct the occluded wall in Row 2, the creation of a doubled ceiling in Row 3 due to occlusions. 3DFIREs, effectively merges observations from multiple images, resulting in sharp and accurate reconstructions of both visible and hidden surfaces. By fusing information across views in the feature space, our method overcomes the limitations of other approaches. This ensures comprehensive and consistent scene-level reconstruction from few sparse views." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 552, + 545, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 552, + 545, + 636 + ], + "spans": [ + { + "bbox": [ + 304, + 552, + 545, + 636 + ], + "type": "text", + "content": "In Fig. 5 we show additional alongside the ground truth. 3DFIREs successfully reconstructs large occluded areas, floors hidden by foreground objects (colored in pink), and unseen sides of objects such as the back of chairs in the first example and the kitchen islands in the second example. The reconstruction from multiple views demonstrates consistency and coherent surfaces in overlapping regions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 639, + 546, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 639, + 546, + 698 + ], + "spans": [ + { + "bbox": [ + 304, + 639, + 546, + 698 + ], + "type": "text", + "content": "While our method is trained with up to three views, it seamlessly extends to five views. This adaptability stems from our architecture's inherent flexibility to the number of input views. With increasing views it predicts clean and coherent reconstructions within all the camera frustums." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 701, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 701, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 701, + 545, + 713 + ], + "type": "text", + "content": "Quantitative Results. We evaluate our method on sets of 1," + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9748" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 136, + 545, + 210 + ], + "blocks": [ + { + "bbox": [ + 46, + 60, + 547, + 128 + ], + "lines": [ + { + "bbox": [ + 46, + 60, + 547, + 128 + ], + "spans": [ + { + "bbox": [ + 46, + 60, + 547, + 128 + ], + "type": "text", + "content": "Table 1. Quantitative results on Scene F-score (" + }, + { + "bbox": [ + 46, + 60, + 547, + 128 + ], + "type": "inline_equation", + "content": "\\rho = 0.2" + }, + { + "bbox": [ + 46, + 60, + 547, + 128 + ], + "type": "text", + "content": ") for Hidden points, Visible points, All points. For 3 and 5 views, we evaluate Consistency. Depth only: visible surface upperbound is separated to indicate it has oracle information. Despite accurate reconstructions on visible surfaces, these lines of work cannot recover hidden surfaces, causing low overall performance. With 1 view, 3DFIREs is comparable to single view DRDF. With more views, 3DFIREs outperforms all the other baselines in F-score. There is large improvement in consistency metric compared to single view DRDF, showing that aggregating features produces a more coherent reconstruction. Full tables showing accuracy and completeness are in the supplemental." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 136, + 545, + 210 + ], + "lines": [ + { + "bbox": [ + 48, + 136, + 545, + 210 + ], + "spans": [ + { + "bbox": [ + 48, + 136, + 545, + 210 + ], + "type": "table", + "html": "
1 view3 views5 views
Hidden ↑Visible ↑All ↑Hidden ↑Visible ↑All ↑Consistency ↑Hidden ↑Visible ↑All ↑Consistency ↑
Depth only-85.3160.12-87.8463.9072.79-91.2969.4072.57
MCC40.2756.4050.2542.9162.0254.7870.2038.5164.4455.9466.57
SV-DRDF53.3673.4565.2148.0276.1965.6176.4447.5181.3170.5478.13
3DFIRES53.3474.2965.7149.9976.7466.5685.4849.5281.7471.4185.92
", + "image_path": "c520031f90ed025b430e1f72225f1ad10f3bd6d1264dae2f02990d816bf6388d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 61, + 259, + 272, + 320 + ], + "blocks": [ + { + "bbox": [ + 46, + 217, + 288, + 251 + ], + "lines": [ + { + "bbox": [ + 46, + 217, + 288, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 217, + 288, + 251 + ], + "type": "text", + "content": "Table 2. Ablation study on training strategies. GS: Gaussian sampling near intersection along the ray during training. Ray Attn: points along a query ray attend to each other." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 61, + 259, + 272, + 320 + ], + "lines": [ + { + "bbox": [ + 61, + 259, + 272, + 320 + ], + "spans": [ + { + "bbox": [ + 61, + 259, + 272, + 320 + ], + "type": "table", + "html": "
HiddenVisibleAllConsistency
-GS43.0777.0564.8183.45
+Ray Attn. -GS47.0977.6065.5883.27
+Ray Attn. +GS14.853.3613.2933.56
Ours50.2077.3066.4685.45
", + "image_path": "e4d13c798d48d4ff70e81bc0358045f92185b4359f82f51d3654a93ce7d8192f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 62, + 384, + 272, + 429 + ], + "blocks": [ + { + "bbox": [ + 46, + 331, + 288, + 376 + ], + "lines": [ + { + "bbox": [ + 46, + 331, + 288, + 376 + ], + "spans": [ + { + "bbox": [ + 46, + 331, + 288, + 376 + ], + "type": "text", + "content": "Table 3. Quantitative results on noisy camera poses generated by LoFTR, evaluated on 3 view cases at " + }, + { + "bbox": [ + 46, + 331, + 288, + 376 + ], + "type": "inline_equation", + "content": "\\rho = 0.2" + }, + { + "bbox": [ + 46, + 331, + 288, + 376 + ], + "type": "text", + "content": ". 3DFIREs assumes accurate pixel-aligned features but still produces more consistent reconstructions compared to not aggregating features." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 62, + 384, + 272, + 429 + ], + "lines": [ + { + "bbox": [ + 62, + 384, + 272, + 429 + ], + "spans": [ + { + "bbox": [ + 62, + 384, + 272, + 429 + ], + "type": "table", + "html": "
3-ViewHiddenVisibleAllConsistency
SV-DRDF37.3962.7152.9357.65
Ours38.8562.4053.1965.71
", + "image_path": "4e01f99897634c5d16e09f9afe996d534042723e0cf8442032ab646225532222.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 448, + 287, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 448, + 287, + 508 + ], + "spans": [ + { + "bbox": [ + 46, + 448, + 287, + 508 + ], + "type": "text", + "content": "3, 5 views respectively, as detailed in Tab. 1. Our approach, designed for flexible input views, matches prior works in single-view scene reconstruction and achieves state-of-the-art results with multiple input views. In single-image cases, it is comparable to the single-view DRDF baseline." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 508, + 287, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 508, + 287, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 508, + 287, + 616 + ], + "type": "text", + "content": "For 3-view sets, our method outperforms MCC [43] or DRDF [21]. Although MiDaS with ground truth scale and shift demonstrates optimal visible surface reconstruction, it falls short in overall scene reconstruction because of no reconstruction on occluded surfaces. When evaluated on scene consistency, 3DFIREs shows a large absolute improvement of " + }, + { + "bbox": [ + 46, + 508, + 287, + 616 + ], + "type": "inline_equation", + "content": ">9\\%" + }, + { + "bbox": [ + 46, + 508, + 287, + 616 + ], + "type": "text", + "content": ", over the second-best baseline, showing 3DFIREs's ability to aggregate features across views to produce consistent results." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 616, + 287, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 616, + 287, + 675 + ], + "spans": [ + { + "bbox": [ + 46, + 616, + 287, + 675 + ], + "type": "text", + "content": "The trend persists with 5-view inputs, where our method has the highest F score and consistency. Our method is not trained on 5-views subset but still remains robust to more input views enhancing the reconstruction quality in both visible and hidden surface reconstructions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 683, + 178, + 696 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 683, + 178, + 696 + ], + "spans": [ + { + "bbox": [ + 47, + 683, + 178, + 696 + ], + "type": "text", + "content": "4.5. Ablations and Analysis" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 701, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 287, + 714 + ], + "type": "text", + "content": "Ablation study on training strategy. We conduct an ab" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 219, + 545, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 219, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 304, + 219, + 545, + 338 + ], + "type": "text", + "content": "lation study (Tab. 2) to investigate the effectiveness of different training strategies for our method. Without Gaussian sampling or ray attention (-GS), the method has degraded performance " + }, + { + "bbox": [ + 304, + 219, + 545, + 338 + ], + "type": "inline_equation", + "content": "(-7\\%" + }, + { + "bbox": [ + 304, + 219, + 545, + 338 + ], + "type": "text", + "content": " in hidden F score). With ray attention only (+Ray Attn. -GS), the method is able to better reconstruct the hidden surface but is still worse than ours " + }, + { + "bbox": [ + 304, + 219, + 545, + 338 + ], + "type": "inline_equation", + "content": "(-3\\%)" + }, + { + "bbox": [ + 304, + 219, + 545, + 338 + ], + "type": "text", + "content": ". With both ray attention and Gaussian sampling (+Ray Attn. +GS), the network finds shortcut during training and does not work during testing. With Gaussian sampling strategy, our method performs the best." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 339, + 546, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 339, + 546, + 495 + ], + "spans": [ + { + "bbox": [ + 304, + 339, + 546, + 495 + ], + "type": "text", + "content": "Robustness with noisy camera poses. Our method requires accurate camera poses to aggregate pixel-aligned features. This setting is challenging with sparse view data since camera estimation can be noisy. We test if the misalignment of image features caused by noisy camera projection matrices degrades our system. We use LoFTR [37] to estimate the camera rotation and translation angle and evaluate the reconstruction within all the camera frustumums. Since LoFTR does not provide a translation scale, we use ground truth instead. Tab. 3 shows results on 3-view cases. Our method still has significantly higher consistency over single view DRDF baseline. We provide an analysis with synthetic Gaussian camera noise in the supplementary." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 505, + 383, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 505, + 383, + 517 + ], + "spans": [ + { + "bbox": [ + 306, + 505, + 383, + 517 + ], + "type": "text", + "content": "5. Conclusions" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 524, + 545, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 524, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 304, + 524, + 545, + 657 + ], + "type": "text", + "content": "We present 3DFIREs, a scene-level 3D reconstruction method that requires only one or a few posed images of a scene. Our method takes in an arbitrary number of input views, fuses multi-view information in the features space and predicts DRDF given a 3D point and query direction. We train our method on a large-scale scene dataset and show its strong ability to reconstruct both visible and hidden surfaces coherently within all the camera frustums on challenging wide-baseline images. Currently, our methods require pose input from off-the-shelf estimation methods, solving for 3D reconstruction and adapting the poses is a challenging next step and left to future work." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 661, + 545, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 661, + 545, + 706 + ], + "spans": [ + { + "bbox": [ + 304, + 661, + 545, + 706 + ], + "type": "text", + "content": "Acknowledgments. Thanks to Mohamed Banani, Richard Higgins, Ziyang Chen for their helpful feedback. Thanks to UM ARC for computing support. Toyota Research Institute provided funds to support this work." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9749" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 123 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 123 + ], + "type": "text", + "content": "[1] Samir Agarwala, Linyi Jin, Chris Rockwell, and David F Fouhey. Planeformers: From sparse view planes to 3d reconstruction. In ECCV, 2022. 1, 2, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 124, + 287, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 124, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 53, + 124, + 287, + 156 + ], + "type": "text", + "content": "[2] Julian Chibane, Aymen Mir, and Gerard Pons-Moll. Neural unsigned distance fields for implicit function learning. In NeurIPS, 2020. 2, 3, 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 158, + 288, + 200 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 158, + 288, + 200 + ], + "spans": [ + { + "bbox": [ + 53, + 158, + 288, + 200 + ], + "type": "text", + "content": "[3] Christopher B Choy, Danfei Xu, JunYoung Gwak, Kevin Chen, and Silvio Savarese. 3d-r2n2: A unified approach for single and multi-view 3d object reconstruction. In ECCV, 2016. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 202, + 286, + 234 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 286, + 234 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 286, + 234 + ], + "type": "text", + "content": "[4] Manuel Dahnert, Ji Hou, Matthias Nießner, and Angela Dai. Panoptic 3d scene reconstruction from a single rgb image. NeurIPS, 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 236, + 286, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 286, + 267 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 286, + 267 + ], + "type": "text", + "content": "[5] Angela Dai, Christian Diller, and Matthias Nießner. Sg-nn: Sparse generative neural networks for self-supervised scene completion of rgb-d scans. In CVPR, 2020. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 269, + 286, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 269, + 286, + 300 + ], + "spans": [ + { + "bbox": [ + 53, + 269, + 286, + 300 + ], + "type": "text", + "content": "[6] Andrew J Davison, Ian D Reid, Nicholas D Molton, and Olivier Stasse. Monoslam: Real-time single camera slam. TPAMI, 2007. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 302, + 286, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 302, + 286, + 334 + ], + "spans": [ + { + "bbox": [ + 53, + 302, + 286, + 334 + ], + "type": "text", + "content": "[7] Kangle Deng, Andrew Liu, Jun-Yan Zhu, and Deva Ramanan. Depth-supervised nef: Fewer views and faster training for free. In CVPR, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 335, + 286, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 335, + 286, + 368 + ], + "spans": [ + { + "bbox": [ + 53, + 335, + 286, + 368 + ], + "type": "text", + "content": "[8] Ainaz Eftekhar, Alexander Sax, Jitendra Malik, and Amir Zamir. Omnidata: A scalable pipeline for making multi-task mid-level vision datasets from 3d scans. In ICCV, 2021. 2, 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 369, + 286, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 369, + 286, + 401 + ], + "spans": [ + { + "bbox": [ + 53, + 369, + 286, + 401 + ], + "type": "text", + "content": "[9] Haoqiang Fan, Hao Su, and Leonidas J Guibas. A point set generation network for 3d object reconstruction from a single image. In CVPR, 2017. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 402, + 286, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 402, + 286, + 433 + ], + "spans": [ + { + "bbox": [ + 48, + 402, + 286, + 433 + ], + "type": "text", + "content": "[10] Justin Johnson Georgia Gkioxari, Nikhila Ravi. Learning 3d object shape and layout without 3d supervision. CVPR, 2022. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 435, + 286, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 435, + 286, + 468 + ], + "spans": [ + { + "bbox": [ + 48, + 435, + 286, + 468 + ], + "type": "text", + "content": "[11] Rohit Girdhar, David F Fouhey, Mikel Rodriguez, and Abhinav Gupta. Learning a predictable and generative vector representation for objects. In ECCV, 2016. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 469, + 286, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 469, + 286, + 490 + ], + "spans": [ + { + "bbox": [ + 48, + 469, + 286, + 490 + ], + "type": "text", + "content": "[12] Georgia Gkioxari, Jitendra Malik, and Justin Johnson. Mesh r-cnn. In ICCV, 2019. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 491, + 286, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 491, + 286, + 522 + ], + "spans": [ + { + "bbox": [ + 48, + 491, + 286, + 522 + ], + "type": "text", + "content": "[13] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 524, + 286, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 524, + 286, + 556 + ], + "spans": [ + { + "bbox": [ + 48, + 524, + 286, + 556 + ], + "type": "text", + "content": "[14] Mariko Isogawa, Dorian Chan, Ye Yuan, Kris M. Kitani, and Matthew O'Toole. Efficient non-line-of-sight imaging from transient sinograms. In ECCV, 2020. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 558, + 286, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 286, + 578 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 286, + 578 + ], + "type": "text", + "content": "[15] Hamid Izadinia, Qi Shan, and Steven M. Seitz. Im2cad. In CVPR, 2017. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 580, + 286, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 580, + 286, + 623 + ], + "spans": [ + { + "bbox": [ + 48, + 580, + 286, + 623 + ], + "type": "text", + "content": "[16] Ziyu Jiang, Buyu Liu, Samuel Schulter, Zhangyang Wang, and Manmohan Chandraker. Peek-a-boo: Occlusion reasoning in indoor scenes with plane representations. In CVPR, 2020. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 48, + 624, + 286, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 286, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 286, + 656 + ], + "type": "text", + "content": "[17] Linyi Jin, Shengyi Qian, Andrew Owens, and David F Fouhey. Planar surface reconstruction from sparse views. In ICCV, 2021. 1, 2, 5, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 48, + 658, + 286, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 286, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 286, + 678 + ], + "type": "text", + "content": "[18] Abhishek Kar, Christian Hane, and Jitendra Malik. Learning a multi-view stereo machine. NeurIPS, 2017. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 48, + 680, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 286, + 712 + ], + "type": "text", + "content": "[19] Nilesh Kulkarni, Ishan Misra, Shubham Tulsiani, and Abhinav Gupta. 3d-relnet: Joint object and relational network for 3d prediction. In ICCV, 2019. 2" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 38, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "text", + "content": "[20] Nilesh Kulkarni, Justin Johnson, and David F Fouhey. Directed ray distance functions for 3d scene reconstruction. In ECCV, 2022. 1, 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 106, + 545, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 106, + 545, + 138 + ], + "spans": [ + { + "bbox": [ + 307, + 106, + 545, + 138 + ], + "type": "text", + "content": "[21] Nilesh Kulkarni, Linyi Jin, Justin Johnson, and David F Fouhey. Learning to predict scene-level implicit 3d from posed rgbd data. In CVPR, 2023. 2, 5, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 140, + 545, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 140, + 545, + 182 + ], + "spans": [ + { + "bbox": [ + 307, + 140, + 545, + 182 + ], + "type": "text", + "content": "[22] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In CVPR, 2019. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 183, + 545, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 183, + 545, + 227 + ], + "spans": [ + { + "bbox": [ + 307, + 183, + 545, + 227 + ], + "type": "text", + "content": "[23] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 2, 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 228, + 545, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 228, + 545, + 269 + ], + "spans": [ + { + "bbox": [ + 307, + 228, + 545, + 269 + ], + "type": "text", + "content": "[24] Zak Murez, Tarrence Van As, James Bartolozzi, Ayan Sinha, Vijay Badrinarayanan, and Andrew Rabinovich. Atlas: End-to-end 3d scene reconstruction from posed images. In ECCV, 2020. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 271, + 545, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 271, + 545, + 315 + ], + "spans": [ + { + "bbox": [ + 307, + 271, + 545, + 315 + ], + "type": "text", + "content": "[25] Yinyu Nie, Xiaoguang Han, Shihui Guo, Yujuan Zheng, Jian Chang, and Jian Jun Zhang. Total3dunderstanding: Joint layout, object pose and mesh reconstruction for indoor scenes from a single image. In CVPR, 2020. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 316, + 545, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 316, + 545, + 358 + ], + "spans": [ + { + "bbox": [ + 307, + 316, + 545, + 358 + ], + "type": "text", + "content": "[26] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In CVPR, 2019. 1, 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 360, + 545, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 360, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 307, + 360, + 545, + 381 + ], + "type": "text", + "content": "[27] Philip Pritchett and Andrew Zisserman. Wide baseline stereo matching. In ICCV, 1998. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 383, + 545, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 383, + 545, + 414 + ], + "spans": [ + { + "bbox": [ + 307, + 383, + 545, + 414 + ], + "type": "text", + "content": "[28] Shengyi Qian, Linyi Jin, and David F Fouhey. Associative3d: Volumetric reconstruction from sparse views. In ECCV, 2020. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 415, + 545, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 415, + 545, + 446 + ], + "spans": [ + { + "bbox": [ + 307, + 415, + 545, + 446 + ], + "type": "text", + "content": "[29] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. ICCV, 2021. 4, 5, 6" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 449, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 449, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 307, + 449, + 545, + 491 + ], + "type": "text", + "content": "[30] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. TPAMI, 2022. 5" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 493, + 545, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 493, + 545, + 525 + ], + "spans": [ + { + "bbox": [ + 307, + 493, + 545, + 525 + ], + "type": "text", + "content": "[31] Shunsuke Saito, Tomas Simon, Jason Saragih, and Hanbyul Joo. Pifuhd: Multi-level pixel-aligned implicit function for high-resolution 3d human digitization. In CVPR, 2020. 4, 5" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 526, + 545, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 526, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 307, + 526, + 545, + 601 + ], + "type": "text", + "content": "[32] Mehdi S. M. Sajjadi, Henning Meyer, Etienne Pot, Urs Bergmann, Klaus Greff, Noha Radwan, Suhani Vora, Mario Lucic, Daniel Duckworth, Alexey Dosovitskiy, Jakob Uszkoreit, Thomas Funkhouser, and Andrea Tagliasacchi. Scene Representation Transformer: Geometry-Free Novel View Synthesis Through Set-Latent Scene Representations. CVPR, 2022. 2" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 603, + 545, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 603, + 545, + 634 + ], + "spans": [ + { + "bbox": [ + 307, + 603, + 545, + 634 + ], + "type": "text", + "content": "[33] Daniel Scharstein and Richard Szeliski. A taxonomy and evaluation of dense two-frame stereo correspondence algorithms. IJCV, 2002. 2" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 307, + 636, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 636, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 307, + 636, + 545, + 656 + ], + "type": "text", + "content": "[34] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 2" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 307, + 658, + 545, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 680 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 680 + ], + "type": "text", + "content": "[35] Jonathan Shade, Steven Gortler, Li-wei He, and Richard Szeliski. Layered depth images. In Siggraph, 1998. 2" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 307, + 681, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 545, + 712 + ], + "type": "text", + "content": "[36] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. NeurIPS, 2020. 2" + } + ] + } + ], + "index": 37 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "9750" + } + ] + } + ], + "index": 39 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 565 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[37] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. LoFTR: Detector-free local feature matching with transformers. CVPR, 2021. 2, 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 107, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 107, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 49, + 107, + 287, + 139 + ], + "type": "text", + "content": "[38] Jiaming Sun, Yiming Xie, Linghao Chen, Xiaowei Zhou, and Hujun Bao. Neuralrecon: Real-time coherent 3d reconstruction from monocular video. In CVPR, 2021. 2, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 141, + 287, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 141, + 287, + 184 + ], + "spans": [ + { + "bbox": [ + 49, + 141, + 287, + 184 + ], + "type": "text", + "content": "[39] Bin Tan, Nan Xue, Tianfu Wu, and Gui-Song Xia. Nope-sac: Neural one-plane ransac for sparse-view planar 3d reconstruction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2023. 1, 2, 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 186, + 287, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 186, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 49, + 186, + 287, + 239 + ], + "type": "text", + "content": "[40] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. NeurIPS, 2020. 2, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 241, + 287, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 241, + 287, + 283 + ], + "spans": [ + { + "bbox": [ + 49, + 241, + 287, + 283 + ], + "type": "text", + "content": "[41] Shubham Tulsiani, Saurabh Gupta, David F Fouhey, Alexei A Efros, and Jitendra Malik. Factoring shape, pose, and layout from the 2d image of a 3d scene. In CVPR, 2018. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 286, + 287, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 286, + 287, + 339 + ], + "spans": [ + { + "bbox": [ + 49, + 286, + 287, + 339 + ], + "type": "text", + "content": "[42] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul Srinivasan, Howard Zhou, Jonathan T. Barron, Ricardo MartinBrualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In CVPR, 2021. 2, 4, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 342, + 287, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 342, + 287, + 384 + ], + "spans": [ + { + "bbox": [ + 49, + 342, + 287, + 384 + ], + "type": "text", + "content": "[43] Chao-Yuan Wu, Justin Johnson, Jitendra Malik, Christoph Feichtenhofer, and Georgia Gkioxari. Multiview compressive coding for 3D reconstruction. CVPR, 2023. 1, 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 387, + 287, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 387, + 287, + 419 + ], + "spans": [ + { + "bbox": [ + 49, + 387, + 287, + 419 + ], + "type": "text", + "content": "[44] Fei Xia, Amir R Zamir, Zhiyang He, Alexander Sax, Jitendra Malik, and Silvio Savarese. Gibson env: Real-world perception for embodied agents. In CVPR, 2018. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 421, + 287, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 421, + 287, + 463 + ], + "spans": [ + { + "bbox": [ + 49, + 421, + 287, + 463 + ], + "type": "text", + "content": "[45] Yiming Xie, Matheus Gadelha, Fengting Yang, Xiaowei Zhou, and Huaizu Jiang. Planarrecon: Real-time 3d plane detection and reconstruction from posed monocular videos. In CVPR, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 466, + 287, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 466, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 49, + 466, + 287, + 498 + ], + "type": "text", + "content": "[46] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2, 4, 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 500, + 287, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 500, + 287, + 532 + ], + "spans": [ + { + "bbox": [ + 49, + 500, + 287, + 532 + ], + "type": "text", + "content": "[47] Amir R. Zamir, Alexander Sax, William Shen, Leonidas J. Guibas, Jitendra Malik, and Silvio Savarese. Taskonomy: Disentangling task transfer learning. In CVPR, 2018. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 533, + 287, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 533, + 287, + 565 + ], + "spans": [ + { + "bbox": [ + 49, + 533, + 287, + 565 + ], + "type": "text", + "content": "[48] Zhizhuo Zhou and Shubham Tulsiani. Sparsefusion: Distilling view-conditioned diffusion for 3d reconstruction. In CVPR, 2023. 2" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "text", + "content": "9751" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/0f8abe9e-31c6-4dc4-9520-66dabe1eb0cf_content_list.json b/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/0f8abe9e-31c6-4dc4-9520-66dabe1eb0cf_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a8c3e77cd1ce33f57c176cc764afae0ea75f2ba7 --- /dev/null +++ b/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/0f8abe9e-31c6-4dc4-9520-66dabe1eb0cf_content_list.json @@ -0,0 +1,1494 @@ +[ + { + "type": "text", + "text": "3DGS-Avatar: Animatable Avatars via Deformable 3D Gaussian Splatting", + "text_level": 1, + "bbox": [ + 111, + 130, + 857, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhiyin Qian $^{1}$ Shaofei Wang $^{1,2,3}$ Marko Mihajlovic $^{1}$ Andreas Geiger $^{2,3}$ Siyu Tang $^{1}$ $^{1}$ ETH Zürich $^{2}$ University of Tübingen $^{3}$ Tübingen AI Center", + "bbox": [ + 124, + 178, + 843, + 218 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/20265e84ed1b0eeb165923bfa7b725cf8fab4493209f38f90668c76390df00c6.jpg", + "image_caption": [ + "Figure 1. 3DGS-Avatar. We develop an efficient method for creating animatable avatars from monocular videos, leveraging 3D Gaussian Splatting [14]. Given a short sequence of dynamic human with a tracked skeleton and foreground masks, our method creates an avatar within 30 minutes on a single GPU, supports animation and novel view synthesis at over 50 FPS, and achieves comparable or better rendering quality to the state-of-the-art [57, 58] that requires over 8 GPU days to train, takes several seconds to render a single image, and relies on pre-training on clothed human scans [57]." + ], + "image_footnote": [], + "bbox": [ + 83, + 257, + 883, + 380 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 476, + 313, + 492 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We introduce an approach that creates animatable human avatars from monocular videos using 3D Gaussian Splatting (3DGS). Existing methods based on neural radiance fields (NeRFs) achieve high-quality novel-view/novelpose image synthesis but often require days of training, and are extremely slow at inference time. Recently, the community has explored fast grid structures for efficient training of clothed avatars. Albeit being extremely fast at training, these methods can barely achieve an interactive rendering frame rate with around 15 FPS. In this paper, we use 3D Gaussian Splatting and learn a non-rigid deformation network to reconstruct animatable clothed human avatars that can be trained within 30 minutes and rendered at real-time frame rates $(50 + FPS)$ . Given the explicit nature of our representation, we further introduce as-isometric-as-possible regularizations on both the Gaussian mean vectors and the covariance matrices, enhancing the generalization of our model on highly articulated unseen poses. Experimental results show that our method achieves comparable and even better performance compared to state-of-the-art approaches on animatable avatar creation from a monocular input, while being $400x$ and $250x$ faster in training and inference, respectively. Please see our project page at https://neuralbodies.github.io/3DGS-Avatar.", + "bbox": [ + 75, + 521, + 473, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 501, + 476, + 630, + 491 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Reconstructing clothed human avatars from image inputs presents a significant challenge in computer vision, yet holds immense importance due to its applications in virtual reality, gaming, and e-commerce. Traditional methods often rely on dense, synchronized multi-view inputs, which may not be readily available in more practical scenarios. Recent advances in implicit neural fields [27, 30, 32, 36, 47, 50, 51, 53, 55, 65, 66] have enabled high-quality reconstruction of geometry [8, 38, 57, 61] and appearance [13, 20, 22, 31, 35, 37, 42, 58, 70] of clothed human bodies from sparse multi-view or monocular videos. Animation of such reconstructed clothed human bodies is also possible by learning the geometry and appearance representations in a predefined canonical pose [13, 20, 35, 57, 58, 70].", + "bbox": [ + 496, + 503, + 892, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To achieve state-of-the-art rendering quality, existing methods rely on training a neural radiance field (NeRF) [27] combined with either explicit body articulation [8, 12, 13, 20, 35, 38, 57, 58, 70] or conditioning the NeRF on human body related encodings [31, 37, 48, 61]. They often employ large multi-layer perceptrons (MLPs) to model the neural radiance field, which are computationally demanding, leading to prolonged training (days) and inference (seconds) time. This computational expense poses a significant challenge for practical applications of these state-of-the-art methods in real-time applications.", + "bbox": [ + 496, + 717, + 893, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "With recent advances in efficient learning of implicit", + "bbox": [ + 519, + 885, + 892, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "5020", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "neural fields, training time of NeRFs has been reduced to minutes [3, 29, 46, 52?]. There are also works targeting fast inference of pretrained NeRFs [43, 67, 69]. Inspired by these developments, several avatar reconstruction methods have been tailored to fast training [7, 12] or fast inference [6, 17, 39]. However, to the best of our knowledge, there currently exists no method that simultaneously achieves both fast training and real-time inference for animatable avatar reconstruction from just monocular videos.", + "bbox": [ + 75, + 90, + 472, + 227 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Point-based rendering [44, 49, 62, 71, 73? , 74] has emerged as an efficient alternative to NeRFs for fast inference. With the recently proposed 3D Gaussian Splatting (3DGS) [14], it is possible to achieve state-of-the-art rendering quality using only a fraction of NeRFs' inference time and comparatively fast training for static scene reconstruction.", + "bbox": [ + 75, + 231, + 468, + 338 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Leveraging the capabilities of 3DGS, we demonstrate its application in modeling animatable clothed avatars using monocular videos. Our approach effectively integrates rigid human articulation with a non-rigid deformation field within the 3DGS framework. We use a small multi-layer perceptron (MLP) to decode color. This MLP is designed to be responsive to local non-rigid deformations and dynamic lighting conditions, ensuring a more realistic and responsive rendering of the avatar's appearance. Furthermore, we apply as-isometric-as-possible regularizations [15, 41] to both the Gaussian mean vectors and the covariance matrices, which helps maintain the geometric consistency and realistic deformation of the avatar, particularly in dynamic and varied poses.", + "bbox": [ + 75, + 343, + 472, + 555 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our experimental results show that our method is comparable to or better than current state-of-the-art [57, 58] in animatable avatar creation from monocular inputs, achieving training speed 400 times faster and inference speed 250 times quicker. Compared to methods that focus on fast training [7, 12], our method, despite being slower in training, can model pose-dependent non-rigid deformation and produce significantly better rendering quality, while being 3 times faster in terms of rendering. We provide an overview of the comparison to major prior works in Tab. 1. In summary, our work makes the following contributions:", + "bbox": [ + 75, + 560, + 468, + 727 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce 3D Gaussian Splatting to animatable human avatars reconstruction from monocular videos.", + "- We develop a simple yet effective deformation network as well as regularization terms that effectively drive 3D Gaussian Splats to handle highly articulated and out-of-distribution poses.", + "- Our method is the first, to our knowledge, to simultaneously deliver high-quality rendering, model pose-dependent non-rigid deformation, generalize effectively to unseen poses, and achieve fast training (less than 30 minutes) and real-time rendering speed $(50+\\mathrm{FPS})$ ." + ], + "bbox": [ + 76, + 731, + 468, + 897 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e9fda64a11f5021ef1f0f1249f67708d0c44f3844d225ab1c07b86eeae577d61.jpg", + "image_caption": [ + "Table 1. Comparison to SoTA. Instant-NVR [7] and InstantA-vatar [12] achieve instant training within 5 minutes. For real-time rendering, we require a frame rate over 30 FPS. Note that while UV-Volumes [6] claims real-time freeview rendering, they only achieve 14 FPS on novel pose synthesis due to the slow generation of their UV Volume." + ], + "image_footnote": [], + "bbox": [ + 532, + 90, + 861, + 305 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 500, + 431, + 650, + 446 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural rendering for clothed human avatars. Since the seminal work of Neural Radiance Fields (NeRF) [27], there has been a surge of research on neural rendering for clothed human avatars. The majority of the works focus on either learning a NeRF conditioned on human body related encodings [31, 48, 61], or learning a canonical NeRF representation and warp camera rays from the observation space to the canonical space to query radiance and density values from the canonical NeRF [8, 12, 13, 20, 35, 38, 57, 58, 70]. Most of these works rely on large multi-layer perceptrons (MLPs) to model the underlying neural radiance field, which are computationally expensive, resulting in prolonged training (days) and inference (seconds) time.", + "bbox": [ + 496, + 459, + 890, + 655 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "With recent advances in accelerated data structures for neural fields, there has been several works targeting fast inference and fast training of NeRFs for clothed humans. [12] proposes to use iNGP [29] as the underlying representation for articulated NeRFs, which enables fast training (less than 5 minutes) and interactive rendering speed (15 FPS) but ignores pose-dependent non-rigid deformations. [7] also utilizes iNGP and represents non-rigid deformations in the UV space, which enables fast training and modeling of pose-dependent non-rigid deformations. However, as we will show in our experiments, [7]'s parametrization of non-rigid deformations result in blurry renderings. [6] proposes to generate a pose-dependent UV volume for efficient free-view synthesis. However, their UV-volume generation process is slow (20 FPS), making novel pose synthesis less efficient (only 14 FPS). [17] also employs UV-based rendering", + "bbox": [ + 496, + 657, + 892, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "5021", + "bbox": [ + 482, + 944, + 513, + 957 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "to achieve real-time rendering of dynamic clothed humans, but only works on dense multi-view inputs. Extending [69], [54, 72] applied Fourier transform for compressing human performance capture data, albeit with limitations on dense multi-view data (60-80 views) and non-generalizability of the Fourier basis representation to unseen poses beyond the training dataset. In contrast to all these works, our method achieves state-of-the-art rendering quality and speed with less than 30 minutes of training time from a single monocular video input.", + "bbox": [ + 75, + 90, + 472, + 241 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Dynamic 3D gaussians. Point-based rendering [40, 44, 49, 62, 71, 73, 74] has also been shown to be an efficient alternative to NeRFs for fast inference and training. Extending point cloud to 3D Gaussians, 3D Gaussian Splatting (3DGS) [14] models the rendering process as splatting a set of 3D Gaussians onto image plane via alpha blending, achieving state-of-the-art rendering quality with real-time inference speed and fast training given multi-view inputs.", + "bbox": [ + 75, + 244, + 468, + 366 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given the great performance on both quality and speed of 3DGS, a rich set of works has further explored the 3D Gaussian representation for dynamic scene reconstruction. [14] proposed to optimize the position and shape of each 3D Gaussian on a frame-by-frame basis and simultaneously performed 6-DOF dense tracking for free. Their model size, however, increases with the temporal dimension. [59, 63] maintain a single set of 3D Gaussians in a canonical space and deform them to each frame via learning a time-dependent deformation field, producing state-of-the-art results in terms of both rendering quality and speed. [64] augments 3D Gaussians with temporal dimension into 4D Gaussian primitives to approximate the underlying spatiotemporal 4D volume of the dynamic scene. While such methods show promising results, they are only applicable to either synthetic datasets with fast camera movement and slow object motion or forward-facing real scenes with limited object movements, thus unable to handle the immense displacement of the articulated human body. To address this problem, our approach utilizes a statistical human body model [24] for articulation and applies regularization to reduce the overfitting of the deformation field.", + "bbox": [ + 75, + 369, + 472, + 700 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Concurrent works. Concurrent with our method, many recent works also seek to combine 3DGS with human articulation prior for avatar reconstruction. We provide a comparison of our approach to concurrent works in Tab. 2. D3GA [75] proposed to embed 3D Gaussians in tetrahedral cages and utilize cage deformations for drivable avatar animation. However, they use dense calibrated multi-view videos as input and require an additional 3D scan to generate the tetrahedral mesh template. Li et al. [21] focused on generating avatars with a detailed appearance from multiview videos by post-processing radiance field renderings with 2D CNNs, which limits their rendering speed. Along with [11, 28], these works fail to achieve fast training with", + "bbox": [ + 75, + 704, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/0c5bb30f7863eb054a4a8bf0a42e08f62a298dbc2cbb1f043fea25d713d8c2eb.jpg", + "image_caption": [ + "Table 2. Comparison to Concurrent Works." + ], + "image_footnote": [], + "bbox": [ + 532, + 90, + 861, + 319 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "relatively complex pipelines. Similar to our approach, Ye et al. [68] deforms 3D Gaussians in canonical space via pose-dependent deformation and rigid articulation, but they still require 2 hours for training and do not show results on monocular inputs. HUGS [16] learns a background model along with the animatable human avatar, but they fail to take pose-dependent cloth deformation into account. Several other works [10, 18, 23] also neglect pose-dependent cloth deformation to achieve even faster training (in 5 minutes) and rendering $(150 + \\mathrm{FPS})$ . We argue that our method strikes a good balance between quality and speed compared to concurrent works, as being the only method simultaneously achieving the properties listed in Tab. 2.", + "bbox": [ + 496, + 376, + 893, + 574 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Preliminaries", + "text_level": 1, + "bbox": [ + 500, + 594, + 638, + 611 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Linear Blend Skinning. To model human articulations, a widely adopted paradigm is to represent geometry and appearance in a shared canonical space [8, 12, 13, 20, 35, 38, 57, 58] and use Linear Blend Skinning (LBS) [2, 9, 24, 33, 34, 60] to deform the parametric human body under arbitrary poses. Given a point $\\mathbf{x}_c$ in canonical space, the LBS function takes a set of rigid bone transformations $\\{\\mathbf{B}_b\\}_{b=1}^B$ and computes its correspondence $\\mathbf{x}_o$ in the observation space:", + "bbox": [ + 496, + 622, + 893, + 758 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {\\mathbf {o}} = L B S _ {\\sigma_ {w}} \\left(\\mathbf {x} _ {c}; \\left\\{\\mathbf {B} _ {b} \\right\\}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 609, + 776, + 890, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Assuming an underlying SMPL model, we use a total of $B = 24$ bone transformations, each represented by a $4 \\times 4$ rotation-translation matrix, which are then linearly blended via a set of skinning weights $\\mathbf{w} \\in [0,1]^B$ , s.t. $\\sum_{b=1}^{B} \\mathbf{w}_b = 1$ , modeled by a coordinate-based neural skinning field $f_{\\sigma_w}(\\mathbf{x}_c) \\in [4,5,26,45,56]$ . The forward linear blend skin", + "bbox": [ + 496, + 810, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "5022", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ning function can thus be formulated as:", + "bbox": [ + 76, + 90, + 346, + 104 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {o} = L B S _ {\\sigma_ {w}} \\left(\\mathbf {x} _ {c}; \\{\\mathbf {B} _ {b} \\}\\right) = \\sum_ {b = 1} ^ {B} f _ {\\sigma_ {w}} \\left(\\mathbf {x} _ {c}\\right) _ {b} \\mathbf {B} _ {b} \\mathbf {x} _ {c} \\quad (2)\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 116, + 468, + 143 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Compared to prior works that search canonical correspondences of points in observation space [12, 57, 58], our method requires no inverse skimming which is typically difficult to compute and often leads to multiple solutions [4, 5]. A similar technique has been employed in [73] for face avatar modeling.", + "bbox": [ + 75, + 154, + 468, + 243 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D Gaussian Splatting. 3DGS [14] utilizes a set of 3D Gaussian primitives $\\{\\mathcal{G}\\}$ as static scene representation which can be rendered in real-time via differentiable rasterization. Each 3D Gaussian $\\mathcal{G}$ is defined by its mean $\\mathbf{x}$ , covariance $\\boldsymbol{\\Sigma}$ , opacity $\\alpha$ and view-dependent color represented by spherical harmonics coefficients $\\mathbf{f}$ . To ensure positive semi-definiteness, the covariance matrix is represented by a scaling matrix $\\mathbf{S}$ and rotation matrix $\\mathbf{R}$ . In practice, we store the diagonal vector $\\mathbf{s} \\in \\mathbb{R}^3$ of the scaling matrix and a quaternion vector $\\mathbf{q} \\in \\mathbb{R}^4$ to represent rotation, which can be trivially converted to a valid covariance matrix.", + "bbox": [ + 75, + 244, + 468, + 410 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The 3D Gaussians are projected to the 2D image plane during the rendering process and accumulated via alpha blending. Given a viewing transformation $\\mathbf{W}$ and the Jacobian of the affine approximation of the projective transformation $\\mathbf{J}$ , the 2D covariance matrix in camera coordinate [76] is given by $\\boldsymbol{\\Sigma}^{\\prime} = (\\mathbf{J}\\mathbf{W}\\boldsymbol{\\Sigma}\\mathbf{W}^{T}\\mathbf{J}^{T})_{1:2,1:2}$ . The pixel color $C$ is thus computed by blending 3D Gaussian splats that overlap at the given pixel, sorted according to their depth:", + "bbox": [ + 75, + 411, + 468, + 547 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nC = \\sum_ {i} \\left(\\alpha_ {i} ^ {\\prime} \\prod_ {j = 1} ^ {i - 1} \\left(1 - \\alpha_ {j} ^ {\\prime}\\right)\\right) c _ {i} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 559, + 468, + 593 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\alpha_{i}^{\\prime}$ denotes the learned opacity $\\alpha_{i}$ weighted by the probability density of $i$ -th projected 2D Gaussian at the target pixel location. $c$ denotes the view-dependent color computed from stored SH coefficients $\\mathbf{f}$ .", + "bbox": [ + 75, + 603, + 468, + 662 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The 3D Gaussians $\\{\\mathcal{G}\\}$ are optimized via a photometric loss. During optimization, 3DGS adaptively controls the number of 3D Gaussians via periodic densification and pruning, achieving self-adaptive convergence to an optimal density distribution of 3D Gaussians that well represents the scene.", + "bbox": [ + 75, + 664, + 468, + 753 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Methods", + "text_level": 1, + "bbox": [ + 76, + 768, + 174, + 784 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We illustrate the pipeline of our method in Fig. 2. The input to our method is a monocular video with a calibrated camera, fitted SMPL parameters, and foreground masks. Our method optimizes a set of 3D Gaussians in canonical space, which is then deformed to the observation space and rendered from the given camera. For a set of 3D Gaussians $\\{\\mathcal{G}^{(i)}\\}_{i = 1}^{N}$ , we store the following properties at each point:", + "bbox": [ + 75, + 794, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "position $\\mathbf{x}$ , scaling factor $\\mathbf{s}$ , rotation quaternion $\\mathbf{q}$ , opacity $\\alpha$ and a color feature vector $\\mathbf{f}$ . We start by randomly sampling $N = 50k$ points on the canonical SMPL [24] mesh surface as initialization of canonical 3D Gaussians $\\{\\mathcal{G}_c\\}$ . Inspired by HumanNeRF [58], we decompose the complex human deformation into a non-rigid part that encodes pose-dependent cloth deformation, and a rigid transformation controlled by the human skeleton.", + "bbox": [ + 498, + 90, + 890, + 210 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Pose-dependent Non-rigid Deformation", + "text_level": 1, + "bbox": [ + 498, + 219, + 836, + 234 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We formulate the non-rigid deformation module as:", + "bbox": [ + 500, + 242, + 841, + 257 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{\\mathcal {G} _ {d} \\right\\} = \\mathcal {F} _ {\\theta_ {n r}} \\left(\\left\\{\\mathcal {G} _ {c} \\right\\}; \\mathcal {Z} _ {p}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 612, + 263, + 890, + 281 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\{\\mathcal{G}_d\\}$ represents the non-rigidly deformed 3D Gaussians. $\\theta_{nr}$ represents the learnable parameters of the nonrigid deformation module. $\\mathcal{Z}_p$ is a latent code which encodes SMPL pose and shape $(\\theta, \\beta)$ using a lightweight hierarchical pose encoder [26]. Specifically, the deformation network $f_{\\theta_{nr}}$ takes the canonical position $\\mathbf{x}_c$ , the pose latent code $\\mathcal{Z}_p$ as inputs and outputs the offsets of the Gaussian's position, scale, rotation, along with a feature vector $\\mathbf{z}$ :", + "bbox": [ + 498, + 287, + 890, + 409 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\delta \\mathbf {x}, \\delta \\mathbf {s}, \\delta \\mathbf {q}, \\mathbf {z}\\right) = f _ {\\theta_ {n r}} \\left(\\mathbf {x} _ {c}; \\mathcal {Z} _ {p}\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 591, + 416, + 890, + 433 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We use a multi-level hash grid [29] to encode 3D positions as spatial features, which are then concatenated with the pose latent code $\\mathcal{Z}_p$ and fed into a shallow MLP with 2 hidden layers and a width of 128. The canonical Gaussian is deformed by:", + "bbox": [ + 498, + 439, + 890, + 513 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {d} = \\mathbf {x} _ {c} + \\delta \\mathbf {x} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 522, + 890, + 537 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {s} _ {d} = \\mathbf {s} _ {c} \\cdot \\exp (\\delta \\mathbf {s}) \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 541, + 890, + 556 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {q} _ {d} = \\mathbf {q} _ {c} \\cdot [ 1, \\delta q _ {1}, \\delta q _ {2}, \\delta q _ {3} ] \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 606, + 560, + 890, + 575 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "note that the $\\cdot$ operator on quaternions is equivalent to multiplying the two rotation matrices derived by the two quaternions. Since the quaternion $[1,0,0,0]$ corresponds to the identity rotation matrix, we have $\\mathbf{q}_d = \\mathbf{q}_c$ when $\\delta \\mathbf{q} = \\mathbf{0}$ .", + "bbox": [ + 498, + 583, + 890, + 643 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Rigid Transformation", + "text_level": 1, + "bbox": [ + 500, + 651, + 702, + 666 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We further transform the non-rigidly deformed 3D Gaussians $\\{\\mathcal{G}_d\\}$ to the observation space via a rigid transformation module:", + "bbox": [ + 498, + 674, + 890, + 718 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{\\mathcal {G} _ {o} \\right\\} = \\mathcal {F} _ {\\theta_ {r}} \\left(\\left\\{\\mathcal {G} _ {d} \\right\\}; \\left\\{\\mathbf {B} _ {\\mathbf {b}} \\right\\} _ {b = 1} ^ {B}\\right) \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 724, + 890, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where a skinning MLP $f_{\\theta_r}$ is learned to predict skinning weights at the position $\\mathbf{x}_d$ . We transform the position and the rotation matrix of 3D Gaussians via forward LBS:", + "bbox": [ + 498, + 750, + 890, + 794 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {T} = \\sum_ {b = 1} ^ {B} f _ {\\theta_ {r}} \\left(\\mathbf {x} _ {d}\\right) _ {b} \\mathbf {B} _ {b} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 614, + 797, + 890, + 825 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {o} = \\mathbf {T} \\mathbf {x} _ {d} \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 611, + 828, + 890, + 843 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {R} _ {o} = \\mathbf {T} _ {1: 3, 1: 3} \\mathbf {R} _ {d} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 847, + 890, + 863 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{R}_d$ is the rotation matrix derived from the quaternion $\\mathbf{q}_d$ .", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "5023", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/eb1865dac9c072592ad7c2480fdea1e4f45207fd2d69a729340adaaeb3763c4f.jpg", + "image_caption": [ + "Figure 2. Our framework for creating animatable avatars from monocular videos. We first initialize a set of 3D Gaussians in the canonical space via sampling points from a SMPL mesh. Each canonical Gaussian $\\mathcal{G}_c$ goes through a non-rigid deformation module $\\mathcal{F}_{\\theta_{nr}}$ conditioned on an encoded pose vector $\\mathcal{Z}_p$ (Sec. 4.1) to account for pose-dependent non-rigid cloth deformation. This module outputs a non-rigidly deformed 3D Gaussian $\\mathcal{G}_d$ and a pose-dependent latent feature $\\mathbf{z}$ . The non-rigidly deformed 3D Gaussian $\\mathcal{G}_d$ is transformed to the observation space $\\mathcal{G}_o$ (Sec. 4.2) via LBS with learned neural skinning $\\mathcal{F}_{\\theta_r}$ . The Gaussian feature $\\mathbf{f}$ , the pose-dependent feature $\\mathbf{z}$ , a per-frame latent code $\\mathcal{Z}_c$ , and the ray direction $\\mathbf{d}$ are propagated through a small MLP $\\mathcal{F}_{\\theta_c}$ to decode the view-dependent color $c$ for each 3D Gaussian. Finally, the observation space 3D Gaussians $\\{\\mathcal{G}_o\\}$ and their respective color values are accumulated via differentiable Gaussian rasterization (Eq. (3)) to render the image." + ], + "image_footnote": [], + "bbox": [ + 81, + 89, + 890, + 231 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Color MLP", + "text_level": 1, + "bbox": [ + 76, + 378, + 202, + 393 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Prior works [59, 63, 64] follow the convention of 3DGS [14], which stores spherical harmonics coefficients per 3D Gaussian to encode the view-dependent color. Treating the stored color feature $\\mathbf{f}$ as spherical harmonics coefficients, the color of a 3D Gaussian can be computed by the dot product of the spherical harmonics basis and the learned coefficients: $c = \\langle \\pmb{\\gamma}(\\mathbf{d}),\\mathbf{f}\\rangle$ , where $\\mathbf{d}$ represents the viewing direction, derived from the relative position of the 3D Gaussian wrt. the camera center and $\\pmb{\\gamma}$ denotes the spherical harmonics basis function.", + "bbox": [ + 75, + 401, + 470, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "While conceptually simple, we argue that this approach does not suit our monocular setting. Since only one camera view is provided during training, the viewing direction in the world space is fixed, leading to poor generalization to unseen test views. Similar to [38], we use the inverse rigid transformation from Sec. 4.2 to canonicalize the viewing direction: $\\hat{\\mathbf{d}} = \\mathbf{T}_{1:3,1:3}^{-1}\\mathbf{d}$ , where $\\mathbf{T}$ is the forward transformation matrix defined in Eq. (10). Theoretically, canonicalizing viewing direction also promotes consistency of the specular component of canonical 3D Gaussians under rigid transformations.", + "bbox": [ + 75, + 553, + 468, + 717 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "On the other hand, we observe that the pixel color of the rendered clothed human avatar also largely depends on local deformation. Local fine wrinkles on clothes, for instance, would cause self-occlusion that heavily affects shading. Following [37], we also learn a per-frame latent code $\\mathcal{Z}_c$ to compensate for different environment light effects across frames caused by the global movement of the subject. Hence, instead of learning spherical harmonic coefficients, we enhance color modeling by learning a neural network that takes per-Gaussian color feature vector $\\mathbf{f} \\in \\mathbb{R}^{32}$ , local pose-dependent feature vector $\\mathbf{z} \\in \\mathbb{R}^{16}$ from the non-rigid deformation network, per-frame latent code $\\mathcal{Z}_c \\in \\mathbb{R}^{16}$ , and", + "bbox": [ + 75, + 719, + 470, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "spherical harmonics basis of canonicalized viewing direction $\\gamma (\\dot{\\mathbf{d}})$ with a degree of 3 as input and predicts the color of the 3D Gaussian:", + "bbox": [ + 496, + 378, + 892, + 422 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nc = \\mathcal {F} _ {\\theta_ {c}} (\\mathbf {f}, \\mathbf {z}, \\mathcal {Z} _ {c}, \\boldsymbol {\\gamma} (\\hat {\\mathbf {d}})) \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 614, + 440, + 892, + 459 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In practice, we find a tiny MLP with one 64-dimension hidden layer sufficient to model the appearance. Increasing the size of the MLP leads to overfitting and performance drop.", + "bbox": [ + 496, + 474, + 890, + 521 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.4. Optimization", + "text_level": 1, + "bbox": [ + 500, + 537, + 638, + 553 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We jointly optimize canonical 3D Gaussians $\\{\\mathcal{G}_c\\}$ and the parameters $\\theta_{nr},\\theta_r,\\theta_c$ of the non-rigid deformation network, the skinning network and the color network, respectively.", + "bbox": [ + 496, + 561, + 890, + 623 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Pose correction. SMPL [24] parameter fittings from images can be inaccurate. To address this, we additionally optimize the per-sequence shape parameter as well as per-frame translation, global rotation, and local joint rotations. We initialize these parameters $\\theta_{p}$ with the given SMPL parameters and differentiably derive the bone transformations $\\{\\mathbf{B}_b\\}$ as input to the network, enabling direct optimization via backpropagation.", + "bbox": [ + 496, + 626, + 890, + 747 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As-isometric-as-possible regularization. With monocular video as input, only one view of the human is visible in each frame, making it extremely hard to generalize to novel views and novel poses. Considering the sparsity of input, the non-rigid deformation network is highly underconstrained, resulting in noisy deformation from the canonical space to the observation space. Inspired by [41], we leverage the as-isometric-as-possible constraint [15] to restrict neighboring 3D Gaussian centers to preserve a similar distance after deformation. We further augment the con", + "bbox": [ + 496, + 750, + 893, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5024", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "strain to Gaussian covariance matrices:", + "bbox": [ + 76, + 90, + 341, + 104 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {i s o p o s}} = \\sum_ {i = 1} ^ {N} \\sum_ {j \\in \\mathcal {N} _ {k} (i)} \\left| d \\left(\\mathbf {x} _ {c} ^ {(i)}, \\mathbf {x} _ {c} ^ {(j)}\\right) - d \\left(\\mathbf {x} _ {o} ^ {(i)}, \\mathbf {x} _ {o} ^ {(j)}\\right) \\right| \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 107, + 111, + 468, + 170 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {i s o c o v} = \\sum_ {i = 1} ^ {N} \\sum_ {j \\in \\mathcal {N} _ {k} (i)} \\left| d \\left(\\boldsymbol {\\Sigma} _ {c} ^ {(i)}, \\boldsymbol {\\Sigma} _ {c} ^ {(j)}\\right) - d \\left(\\boldsymbol {\\Sigma} _ {o} ^ {(i)}, \\boldsymbol {\\Sigma} _ {o} ^ {(j)}\\right) \\right| \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 175, + 468, + 234 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $N$ denotes the number of 3D Gaussians. $\\mathcal{N}_k$ denotes the k-nearest neighbourhood, and we set $k$ to 5. We use L2-norm as our distance function $d(\\cdot ,\\cdot)$", + "bbox": [ + 76, + 241, + 468, + 286 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Loss function. Our full loss function consists of a RGB loss $\\mathcal{L}_{rgb}$ , a mask loss $\\mathcal{L}_{mask}$ , a skinning weight regularization loss $\\mathcal{L}_{skin}$ and the as-isometric-as-possible regularization loss for both position and covariance $\\mathcal{L}_{ispos},\\mathcal{L}_{isocov}$ . For further details of the loss definition and respective weights, please refer to the Supp.Mat.", + "bbox": [ + 76, + 286, + 468, + 378 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 388, + 209, + 406 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we first compare the proposed approach with recent state-of-the-art methods [7, 12, 37, 57, 58], demonstrating that our proposed approach achieves superior rendering quality in terms of LPIPS, which is more informative under monocular setting, while achieving fast training and real-time rendering speed, respectively $400\\mathrm{x}$ and $250\\mathrm{x}$ faster than the most competitive baseline [58]. We then systematically ablate each component of the proposed model, showing their effectiveness in better rendering quality.", + "bbox": [ + 75, + 415, + 468, + 551 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Evaluation Dataset", + "text_level": 1, + "bbox": [ + 76, + 559, + 261, + 574 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ZJU-MoCap [37]. This is the major testbed for quantitative evaluation. We pick six sequences (377, 386, 387, 392, 393, 394) from the ZJU-MoCap dataset and follow the training/test split of HumanNeRF [58]. The motion of these sequences is repetitive and does not contain a sufficient number of poses for meaningful novel pose synthesis benchmarks. Thus we focus on evaluating novel view synthesis (PSNR/SSIM/LPIPS) and show qualitative results for animation on out-of-distribution poses. Note that LPIPS in all the tables are scaled up by 1000.", + "bbox": [ + 75, + 582, + 468, + 733 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "PeopleSnapshot [1]. We also conduct experiments on 4 sequences of the PeopleSnapshot dataset, which includes monocular videos of people rotating in front of a camera. We follow the data split of InstantAvatar [12] and compare to [12] on novel pose synthesis. For fair comparison, we use the provided poses optimized by Anim-NeRF [35] and do not further optimize it during our training.", + "bbox": [ + 75, + 733, + 468, + 839 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Comparison with Baselines", + "text_level": 1, + "bbox": [ + 76, + 847, + 321, + 863 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We compare our approach with NeuralBody [37], HumanNeRF [58], MonoHuman [70], ARAH [57] and Instant", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "NVR [7] under monocular setup on ZJU-MoCap. The quantitative results are reported in Tab. 3. NeuralBody is underperforming compared to other approaches. Overall, our proposed approach produces comparable performance to ARAH on PSNR and SSIM, while significantly outperforming all the baselines on LPIPS. We argue that LPIPS is more informative compared to the other two metrics, as it is very difficult to reproduce exactly the ground-truth appearance for novel views due to the monocular setting and the stochastic nature of cloth deformations. Meanwhile, our method is also capable of fast training and renders at a real-time rendering frame rate, being 400 times faster for training (30 GPU minutes vs. 8 GPU days) and $250 - 500$ times faster for inference (50 FPS vs. 0.1 FPS for ARAH and 0.2 FPS for HumanNeRF). We also note that Instant-NVR trains on a refined version of ZJU-MoCap, which provides refined camera parameters, SMPL fittings, and more accurate instance masks with part-level annotation that is essential for running their method. Hence their metrics are not directly comparable to other methods in Tab. 3. We train our model on the refined dataset for a fair quantitative comparison, which clearly shows that our method outperforms Instant-NVR in most scenarios.", + "bbox": [ + 496, + 90, + 890, + 436 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Qualitative comparisons on novel view synthesis can be found in Fig. 3. We observe that our method preserves sharper details compared to ARAH and does not produce fluctuating artifacts as in HumanNeRF caused by noisy deformation fields. Instant-NVR produces an oversmooth appearance and tends to generate noisy limbs. Additionally, we animate our learned avatars with pose sequences from AMASS [25] and AIST++ [19], shown in the rightmost column of Fig. 3. This shows that our model could generalize to extreme out-of-distribution poses.", + "bbox": [ + 496, + 439, + 890, + 589 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For PeopleSnapshot, we report the quantitative comparison against InstantAvatar [12] in Tab. 4. Our approach significantly outperforms InstantAvatar on PSNR and LPIPS, while being more than $3\\mathrm{x}$ faster during inference.", + "bbox": [ + 496, + 590, + 890, + 651 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 662, + 653, + 680 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We study the effect of various components of our method on the ZJU-MoCap dataset, including the color MLP, the as-isometric-as-possible regularization and the pose correction module. The average metrics over 6 sequences are reported in Tab. 5. We show that all proposed techniques are required to reach the optimal performance, best reflected by LPIPS which is the most informative metric for novel view synthesis evaluation under a monocular setup.", + "bbox": [ + 496, + 688, + 890, + 808 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We further show qualitative comparison on out-of-distribution poses in Fig. 4, which demonstrates that the as-isometric-as-possible loss helps to constrain the 3D Gaussians to comply with consistent movement during deformation, hence improving generalization on novel poses. Albeit marginally, each individual component contributes to a", + "bbox": [ + 496, + 810, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "5025", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/721a7f5d19b0b74ddebf9957d73031a24577f9c604fd435442873d5c25505bdd.jpg", + "image_caption": [ + "Figure 3. Qualitative Comparison on ZJU-MoCap [37]. We show the results for both novel view synthesis and novel pose animation of all sequences on ZJU-MoCap. Our method produces high-quality results that preserve cloth details even on out-of-distribution poses." + ], + "image_footnote": [], + "bbox": [ + 127, + 87, + 844, + 834 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "5026", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/b3ba8e8ac9145a6cd6a725e2eb602a33d824ad2df9e25dd79f8489c2b8eb61ad.jpg", + "table_caption": [ + "Table 3. Quantitative Results on ZJU-MoCap [37]. We outperform both competitive baselines [57, 58] in terms of LPIPS while being two orders of magnitude faster in training and rendering. Cell color indicates best and second best. Instant-NVR [7] is trained and tested on a refined version of ZJU-MoCap, thus is not directly comparable to other baselines quantitatively. We train our model on the refined dataset for fair quantitative comparison to Instant-NVR and the metrics are reported in the last two rows of the table." + ], + "table_footnote": [], + "table_body": "
Subject:Metric:377386387392393394
GPU↓FPS↑PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
NeuralBody [37]12h229.110.967440.9530.540.967846.4327.000.951859.4730.100.964253.2728.610.959059.0529.100.959354.55
HumanNeRF [58]>8d0.230.410.974324.0633.200.975228.9928.180.963235.5831.040.970532.1228.310.960336.7230.310.964232.89
MonoHuman [70]4d0.129.120.972726.5832.940.969536.0427.930.960141.7629.500.963539.4527.640.956643.1729.150.959538.08
ARAH [57]8d0.130.850.980026.6033.500.978131.4028.490.965640.4332.020.974235.2828.770.964542.3029.460.963240.76
Ours0.5h5030.640.977420.8833.630.977325.7728.330.964234.2431.660.973030.1428.880.963535.2630.540.966131.21
Instant-NVR* [7]0.1h331.280.978925.3733.710.977032.8128.390.964045.9731.850.973039.4729.560.964146.1631.320.968040.63
Ours*0.5h5030.960.977819.8533.940.978424.7028.400.965632.9632.100.973929.2029.300.964534.0330.740.966231.00
", + "bbox": [ + 78, + 155, + 893, + 256 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/8faf096d86e312a36b17928a7b22f24eff183db6c44aab942c246a35f97290dc.jpg", + "table_caption": [ + "Table 4. Quantitative Results on PeopleSnapshot [1]." + ], + "table_footnote": [], + "table_body": "
Subject:female-3-casualfemale-4-casualmale-3-casualmale-4-casual
Metric:GPU↓FPS↑PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
InstantAvatar [12]5 min.1527.660.970921.0029.110.968316.7029.530.971615.5027.670.962630.7
Ours45 min.5030.570.958120.8633.160.967815.7434.280.972414.9230.220.965323.05
", + "bbox": [ + 86, + 292, + 883, + 363 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0d9a9c3867c9690ac42a4f0d9f471dcd6a71d1b5a6955c5757ee0752e25168c7.jpg", + "image_caption": [ + "Full model" + ], + "image_footnote": [], + "bbox": [ + 104, + 378, + 274, + 522 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7100c0043f46985ec827f25486fc998778278d7c8015634db068aab801420277.jpg", + "image_caption": [ + "Figure 4. Ablation Study on as-isometric-as-possible regularization, which removes the artifacts on highly articulated poses." + ], + "image_footnote": [], + "bbox": [ + 276, + 380, + 468, + 522 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f8a7ab9505428a03290576c0fda0d8b908c6d20231f0efeee27f050fcc5239cc.jpg", + "image_caption": [ + "w/o $\\mathcal{L}_{isocov}$ , $\\mathcal{L}_{isopos}$" + ], + "image_footnote": [], + "bbox": [ + 501, + 378, + 669, + 521 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/71c45e1a0a787ec7f859bfd0e58f1ad509d5fdfd434a248ffc0d8de1a5b2ac0b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 380, + 862, + 522 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/b079594807012ee5477bd2cd79ffb95d9a735fd0f0d739cff276919818a06168.jpg", + "table_caption": [ + "Table 5. Ablation Study on ZJU-MoCap [37]. The proposed model achieves the lowest LPIPS, demonstrating the effectiveness of all components." + ], + "table_footnote": [], + "table_body": "
Metric:PSNR↑SSIM↑LPIPS↓
Full model30.610.970329.58
w/o color MLP30.550.970031.24
w/o Lisocov30.610.970329.84
w/o Lisopos, Lisocov30.590.969930.25
w/o pose correction30.600.970329.87
", + "bbox": [ + 98, + 660, + 447, + 763 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "better novel-view rendering quality and particularly generates more plausible results with respect to novel pose animation.", + "bbox": [ + 75, + 786, + 470, + 832 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 76, + 844, + 194, + 859 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we present 3DGS-Avatar, one of the first methods that utilize the explicit representation of 3DGS", + "bbox": [ + 75, + 869, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "for efficient reconstruction of clothed human avatars from monocular videos. Our method achieves photorealistic rendering, awareness of pose-dependent cloth deformation, generalization to unseen poses, fast training, and real-time rendering all at once.", + "bbox": [ + 496, + 609, + 892, + 686 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Experiments show that our method is comparable to or even better than the state-of-the-art methods in terms of rendering quality while being two orders of magnitude faster in both training and inference. Furthermore, we propose to replace spherical harmonics with a shallow MLP to decode 3D Gaussian color and regularize deformation with geometric constraints, both proved to be effective in enhancing rendering quality. We hope that our new representation could foster further research in fast, high-quality animatable clothed human avatar synthesis from a monocular view.", + "bbox": [ + 496, + 686, + 893, + 838 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement. SW and AG were supported by the ERC Starting Grant LEGO-3D (850533) and the DFG EXC number 2064/1 - project number 390727645. SW and ST acknowledge the SNSF grant 200021 204840.", + "bbox": [ + 496, + 839, + 893, + 901 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "5027", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Thiemo Alldieck, Marcus Magnor, Weipeng Xu, Christian Theobalt, and Gerard Pons-Moll. Video based reconstruction of 3d people models. In Proc. of CVPR, 2018. 6, 8", + "[2] Dragomir Anguelov, Praveen Srinivasan, Daphne Koller, Sebastian Thrun, Jim Rodgers, and James Davis. Scape: shape completion and animation of people. ACM Transactions Graphics, 24, 2005. 3", + "[3] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In Proc. of ECCV, 2022. 2", + "[4] Xu Chen, Yufeng Zheng, Michael Black, Otmar Hilliges, and Andreas Geiger. Snarf: Differentiable forward skinning for animating non-rigid neural implicit shapes. In Proc. of ICCV, 2021. 3, 4", + "[5] Xu Chen, Tianjian Jiang, Jie Song, Max Rietmann, Andreas Geiger, Michael J. Black, and Otmar Hilliges. Fast-snarf: A fast deformer for articulated neural fields. Pattern Analysis and Machine Intelligence (PAMI), 2023. 3, 4", + "[6] Yue Chen, Xuan Wang, Xingyu Chen, Qi Zhang, Xiaoyu Li, Yu Guo, Jue Wang, and Fei Wang. Uv volumes for real-time rendering of editable free-view human performance. In Proc. of CVPR, 2023. 2", + "[7] Chen Geng, Sida Peng, Zhen Xu, Hujun Bao, and Xiaowei Zhou. Learning neural volumetric representations of dynamic humans in minutes. In Proc. of CVPR, 2023. 2, 6, 8", + "[8] Chen Guo, Tianjian Jiang, Xu Chen, Jie Song, and Otmar Hilliges. Vid2 avatar: 3d avatar reconstruction from videos in the wild via self-supervised scene decomposition. In Proc. of CVPR, 2023. 1, 2, 3", + "[9] N. Hasler, C. Stoll, M. Sunkel, B. Rosenhahn, and H.-P. Seidel. A Statistical Model of Human Pose and Body Shape. Computer Graphics Forum, 28:337-346, 2009. 3", + "[10] Shoukang Hu and Ziwei Liu. Gauhuman: Articulated gaussian splatting from monocular human videos. In Proc. of CVPR, 2024. 3", + "[11] Rohit Jena, Ganesh Subramanian Iyer, Siddharth Choudhary, Brandon Smith, Pratik Chaudhari, and James Gee. Splatarmor: Articulated gaussian splatting for animatable humans from monocular rgb videos. arXiv preprint arXiv:2311.10812, 2023. 3", + "[12] Tianjian Jiang, Xu Chen, Jie Song, and Otmar Hilliges. Instantavatar: Learning avatars from monocular video in 60 seconds. In Proc. of CVPR, 2023. 1, 2, 3, 4, 6, 8", + "[13] Wei Jiang, Kwang Moo Yi, Golnoosh Samei, Oncel Tuzel, and Anurag Ranjan. Neuman: Neural human radiance field from a single video. In Proc. of ECCV, 2022. 1, 2, 3", + "[14] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics, 42 (4), 2023. 1, 2, 3, 4, 5", + "[15] Martin Kilian, Niloy J. Mitra, and Helmut Pottmann. Geometric modeling in shape space. ACM Transactions on Graphics (SIGGRAPH), 26(3), 2007. 2, 5" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] Muhammed Kocabas, Jen-Hao Rick Chang, James Gabriel, Oncel Tuzel, and Anurag Ranjan. Hugs: Human gaussian splatting. In Proc. of CVPR, 2024. 3", + "[17] Youngjoong Kwon, Lingjie Liu, Henry Fuchs, Marc Habermann, and Christian Theobalt. Deliffas: Deformable light fields for fast avatar synthesis. Proc. of NeurIPS, 2023. 2", + "[18] Jiahui Lei, Yufu Wang, Georgios Pavlakos, Lingjie Liu, and Kostas Daniilidis. Gart: Gaussian articulated template models. In Proc. of CVPR, 2024. 3", + "[19] Ruilong Li, Shan Yang, David A. Ross, and Angjoo Kanazawa. Ai choreographer: Music conditioned 3d dance generation with aist++. In Proc. of ICCV, 2021. 6", + "[20] Ruilong Li, Julian Tanke, Minh Vo, Michael Zollhoefer, Jürgen Gall, Angjoo Kanazawa, and Christoph Lassner. Tava: Template-free animatable volumetric actors. In Proc. of ECCV, 2022. 1, 2, 3", + "[21] Zhe Li, Zerong Zheng, Lizhen Wang, and Yebin Liu. Animatable gaussians: Learning pose-dependent gaussian maps for high-fidelity human avatar modeling. In Proc. of CVPR, 2024. 3", + "[22] Lingjie Liu, Marc Habermann, Viktor Rudnev, Kripasindhu Sarkar, Jiatao Gu, and Christian Theobalt. Neural actor: Neural free-view synthesis of human actors with pose control. ACM Trans. Graph. (ACM SIGGRAPH Asia), 2021. 1", + "[23] Yang Liu, Xiang Huang, Minghan Qin, Qinwei Lin, and Haoqian Wang. Animatable 3d gaussian: Fast and high-quality reconstruction of multiple human avatars, 2023. 3", + "[24] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multiperson linear model. ACM Transactions Graphics, 34(6), 2015. 3, 4, 5", + "[25] Naureen Mahmood, Nima Ghorbani, Nikolaus F. Troje, Gerard Pons-Moll, and Michael J. Black. AMASS: Archive of motion capture as surface shapes. In Proc. of ICCV, 2019. 6", + "[26] Marko Mihajlovic, Yan Zhang, Michael J. Black, and Siyu Tang. LEAP: Learning articulated occupancy of people. In Proc. of CVPR, 2021. 3, 4", + "[27] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proc. of ECCV, 2020. 1, 2", + "[28] Arthur Moreau, Jifei Song, Helisa Dhamo, Richard Shaw, Yiren Zhou, and Eduardo Pérez-Pellitero. Human gaussian splatting: Real-time rendering of animatable avatars. In Proc. of CVPR, 2024. 3", + "[29] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions Graphics, 41(4), 2022. 2, 4", + "[30] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proc. of CVPR, 2020. 1", + "[31] Atsuhiro Noguchi, Xiao Sun, Stephen Lin, and Tatsuya Harada. Neural articulated radiance field. In Proc. of ICCV, 2021. 1, 2" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "5028", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[32] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proc. of ICCV, 2021. 1", + "[33] Ahmed A. A. Osman, Timo Bolkart, and Michael J. Black. Star: Sparse trained articulated human body regressor. In Proc. of ECCV, 2020. 3", + "[34] Georgios Pavlakos, Luyang Zhu, Xiaowei Zhou, and Kostas Daniilidis. Learning to estimate 3d human pose and shape from a single color image. In Proc. of CVPR, 2018. 3", + "[35] Sida Peng, Junting Dong, Qianqian Wang, Shangzhan Zhang, Qing Shuai, Xiaowei Zhou, and Hujun Bao. Animatable neural radiance fields for modeling dynamic human bodies. In Proc. of ICCV, 2021. 1, 2, 3, 6", + "[36] Songyou Peng, Chiyu \"Max\" Jiang, Yiyi Liao, Michael Niemeyer, Marc Pollefeys, and Andreas Geiger. Shape as points: A differentiable poisson solver. In Proc. of NeurIPS, 2021. 1", + "[37] Sida Peng, Yuanqing Zhang, Yinghao Xu, Qianqian Wang, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Neural body: Implicit neural representations with structured latent codes for novel view synthesis of dynamic humans. In Proc. of CVPR, 2021. 1, 2, 5, 6, 7, 8", + "[38] Sida Peng, Shangzhan Zhang, Zhen Xu, Chen Geng, Boyi Jiang, Hujun Bao, and Xiaowei Zhou. Animatable neural implicit surfaces for creating avatars from videos. ArXiv, abs/2203.08133, 2022. 1, 2, 3, 5", + "[39] Sida Peng, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Representing volumetric videos as dynamic mlp maps. In Proc. of CVPR, 2023. 2", + "[40] Sergey Prokudin, Michael J. Black, and Javier Romero. SMPLpix: Neural avatars from 3D human models. In Proc. of WACV, 2021. 3", + "[41] Sergey Prokudin, Qianli Ma, Maxime Raafat, Julien Valentin, and Siyu Tang. Dynamic point fields. In Proc. of ICCV, 2023. 2, 5", + "[42] Amit Raj, Julian Tanke, James Hays, Minh Vo, Carsten Stoll, and Christoph Lassner. Anr-articulated neural rendering for virtual avatars. In Proc. of CVPR, 2021. 1", + "[43] Christian Reiser, Richard Szeliski, Dor Verbin, Pratul P. Srinivasan, Ben Mildenhall, Andreas Geiger, Jonathan T. Barron, and Peter Hedman. Merf: Memory-efficient radiance fields for real-time view synthesis in unbounded scenes. ACM TOG, 42(4), 2023. 2", + "[44] Darius Rückert, Linus Franke, and Marc Stamminger. Adop: Approximate differentiable one-pixel point rendering. ACM Transactions on Graphics, 41(4), 2022. 2, 3", + "[45] Shunsuke Saito, Jinlong Yang, Qianli Ma, and Michael J. Black. SCANimate: Weakly supervised learning of skinned clothed avatar networks. In Proc. of CVPR, 2021. 3", + "[46] Sara Fridovich-Keil and Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proc. of CVPR, 2022. 2", + "[47] Vincent Sitzmann, Semon Rezchikov, William T. Freeman, Joshua B. Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. In Proc. of NeurIPS, 2021. 1" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[48] Shih-Yang Su, Frank Yu, Michael Zollhoefer, and Helge Rhodin. A-neRF: Articulated neural radiance fields for learning human shape, appearance, and pose. In Proc. of NeurIPS, 2021. 1, 2", + "[49] Shih-Yang Su, Timur Bagautdinov, and Helge Rhodin. Npc: Neural point characters from video. In Proc. of ICCV, 2023. 2, 3", + "[50] Mohammed Suhail, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Generalizable patch-based neural rendering. In Proc. of ECCV, 2022. 1", + "[51] Mohammed Suhail1, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Light field neural rendering. In Proc. of CVPR, 2022. 1", + "[52] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proc. of CVPR, 2022. 2", + "[53] Huan Wang, Jian Ren, Zeng Huang, Kyle Olszewski, Mengei Chai, Yun Fu, and Sergey Tulyakov. R21: Distilling neural radiance field to neural light field for efficient novel view synthesis. In Proc. of ECCV, 2022. 1", + "[54] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proc. of CVPR, 2022. 3", + "[55] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. In Proc. of NeurIPS, 2021. 1", + "[56] Shaofei Wang, Marko Mihajlovic, Qianli Ma, Andreas Geiger, and Siyu Tang. Metaatrix: Learning animatable clothed human models from few depth images. In Proc. of NeurIPS, 2021. 3", + "[57] Shaofei Wang, Katja Schwarz, Andreas Geiger, and Siyu Tang. Arah: Animatable volume rendering of articulated human sdfs. In Proc. of ECCV, 2022. 1, 2, 3, 4, 6, 8", + "[58] Chung-Yi Weng, Brian Curless, Pratul P. Srinivasan, Jonathan T. Barron, and Ira Kemelmacher-Shlizerman. Humaner: Free-viewpoint rendering of moving people from monocular video. In Proc. of CVPR, 2022. 1, 2, 3, 4, 6, 8", + "[59] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Wang Xinggang. 4d gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 3, 5", + "[60] Hongyi Xu, Eduard Gabriel Bazavan, Andrei Zanfir, William T. Freeman, Rahul Sukthankar, and Cristian Sminchisescu. Ghum & ghuml: Generative 3d human shape and articulated pose models. In Proc. of CVPR, 2020. 3", + "[61] Hongyi Xu, Thiemo Alldieck, and Cristian Sminchisescu. H-neRF: Neural radiance fields for rendering and temporal reconstruction of humans in motion. In Proc. of NeurIPS, 2021. 1, 2", + "[62] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proc. of CVPR, 2022. 2, 3", + "[63] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for" + ], + "bbox": [ + 501, + 92, + 890, + 902 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "5029", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "high-fidelity monocular dynamic scene reconstruction. arXiv preprint arXiv:2309.13101, 2023. 3, 5", + "[64] Zeyu Yang, Hongye Yang, Zijie Pan, Xiatian Zhu, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv 2310.10642, 2023. 3, 5", + "[65] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Proc. of NeurIPS, 2020. 1", + "[66] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. In Proc. of NeurIPS, 2021. 1", + "[67] Lior Yariv, Peter Hedman, Christian Reiser, Dor Verbin, Pratul P. Srinivasan, Richard Szeliski, Jonathan T. Barron, and Ben Mildenhall. Bakedsdf: Meshing neural sdfs for real-time view synthesis. In Proc. of SIGGRAPH, 2023. 2", + "[68] Keyang Ye, Tianjia Shao, and Kun Zhou. Animatable 3d gaussians for high-fidelity synthesis of human motions, 2023. 3", + "[69] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. PlenOctrees for real-time rendering of neural radiance fields. In Proc. of ICCV, 2021. 2, 3", + "[70] Zhengming Yu, Wei Cheng, xian Liu, Wayne Wu, and KwanYee Lin. MonoHuman: Animatable human neural field from monocular video. In Proc. of CVPR, 2023. 1, 2, 6, 8", + "[71] Qiang Zhang, Seung-Hwan Baek, Szymon Rusinkiewicz, and Felix Heide. Differentiable point-based radiance fields for efficient view synthesis. In SIGGRAPH Asia Conference Proceedings, 2022. 2, 3", + "[72] Fuqiang Zhao, Yuheng Jiang, Kaixin Yao, Jiakai Zhang, Liao Wang, Haizhao Dai, Yuhui Zhong, Yingliang Zhang, Minye Wu, Lan Xu, and Jingyi Yu. Human performance modeling and rendering via neural animated mesh. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 41(6), 2022. 3", + "[73] Yufeng Zheng, Wang Yifan, Gordon Wetzstein, Michael J. Black, and Otmar Hilliges. Pointavatar: Deformable point-based head avatars from videos. In Proc. of ECCV, 2023. 2, 3, 4", + "[74] Zerong Zheng, Han Huang, Tao Yu, Hongwen Zhang, Yandong Guo, and Yebin Liu. Structured local radiance fields for human avatar modeling. In Proc. of CVPR, 2022. 2, 3", + "[75] Wojciech Zielonka, Timur Bagautdinov, Shunsuke Saito, Michael Zollhöfer, Justus Thies, and Javier Romero. Drivable 3d gaussian avatars. arXiv preprint arXiv:2311.08581, 2023. 3", + "[76] M. Zwicker, H. Pfister, J. van Baar, and M. Gross. Ewa volume splatting. In Proceedings Visualization, 2001. VIS '01., pages 29-538, 2001. 4" + ], + "bbox": [ + 78, + 90, + 470, + 786 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "5030", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/0f8abe9e-31c6-4dc4-9520-66dabe1eb0cf_model.json b/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/0f8abe9e-31c6-4dc4-9520-66dabe1eb0cf_model.json new file mode 100644 index 0000000000000000000000000000000000000000..888c31f9bb4994dce9339bead74583133e53521b --- /dev/null +++ b/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/0f8abe9e-31c6-4dc4-9520-66dabe1eb0cf_model.json @@ -0,0 +1,2345 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.131, + 0.859, + 0.154 + ], + "angle": 0, + "content": "3DGS-Avatar: Animatable Avatars via Deformable 3D Gaussian Splatting" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.179, + 0.844, + 0.219 + ], + "angle": 0, + "content": "Zhiyin Qian\\(^{1}\\) Shaofei Wang\\(^{1,2,3}\\) Marko Mihajlovic\\(^{1}\\) Andreas Geiger\\(^{2,3}\\) Siyu Tang\\(^{1}\\) \n\\(^{1}\\)ETH Zürich \\(^{2}\\)University of Tübingen \\(^{3}\\)Tübingen AI Center" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.258, + 0.885, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.394, + 0.893, + 0.466 + ], + "angle": 0, + "content": "Figure 1. 3DGS-Avatar. We develop an efficient method for creating animatable avatars from monocular videos, leveraging 3D Gaussian Splatting [14]. Given a short sequence of dynamic human with a tracked skeleton and foreground masks, our method creates an avatar within 30 minutes on a single GPU, supports animation and novel view synthesis at over 50 FPS, and achieves comparable or better rendering quality to the state-of-the-art [57, 58] that requires over 8 GPU days to train, takes several seconds to render a single image, and relies on pre-training on clothed human scans [57]." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.477, + 0.314, + 0.493 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.522, + 0.474, + 0.884 + ], + "angle": 0, + "content": "We introduce an approach that creates animatable human avatars from monocular videos using 3D Gaussian Splatting (3DGS). Existing methods based on neural radiance fields (NeRFs) achieve high-quality novel-view/novelpose image synthesis but often require days of training, and are extremely slow at inference time. Recently, the community has explored fast grid structures for efficient training of clothed avatars. Albeit being extremely fast at training, these methods can barely achieve an interactive rendering frame rate with around 15 FPS. In this paper, we use 3D Gaussian Splatting and learn a non-rigid deformation network to reconstruct animatable clothed human avatars that can be trained within 30 minutes and rendered at real-time frame rates \\((50 + FPS)\\). Given the explicit nature of our representation, we further introduce as-isometric-as-possible regularizations on both the Gaussian mean vectors and the covariance matrices, enhancing the generalization of our model on highly articulated unseen poses. Experimental results show that our method achieves comparable and even better performance compared to state-of-the-art approaches on animatable avatar creation from a monocular input, while being \\(400x\\) and \\(250x\\) faster in training and inference, respectively. Please see our project page at https://neuralbodies.github.io/3DGS-Avatar." + }, + { + "type": "title", + "bbox": [ + 0.502, + 0.477, + 0.631, + 0.492 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.505, + 0.893, + 0.716 + ], + "angle": 0, + "content": "Reconstructing clothed human avatars from image inputs presents a significant challenge in computer vision, yet holds immense importance due to its applications in virtual reality, gaming, and e-commerce. Traditional methods often rely on dense, synchronized multi-view inputs, which may not be readily available in more practical scenarios. Recent advances in implicit neural fields [27, 30, 32, 36, 47, 50, 51, 53, 55, 65, 66] have enabled high-quality reconstruction of geometry [8, 38, 57, 61] and appearance [13, 20, 22, 31, 35, 37, 42, 58, 70] of clothed human bodies from sparse multi-view or monocular videos. Animation of such reconstructed clothed human bodies is also possible by learning the geometry and appearance representations in a predefined canonical pose [13, 20, 35, 57, 58, 70]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.718, + 0.895, + 0.884 + ], + "angle": 0, + "content": "To achieve state-of-the-art rendering quality, existing methods rely on training a neural radiance field (NeRF) [27] combined with either explicit body articulation [8, 12, 13, 20, 35, 38, 57, 58, 70] or conditioning the NeRF on human body related encodings [31, 37, 48, 61]. They often employ large multi-layer perceptrons (MLPs) to model the neural radiance field, which are computationally demanding, leading to prolonged training (days) and inference (seconds) time. This computational expense poses a significant challenge for practical applications of these state-of-the-art methods in real-time applications." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.886, + 0.893, + 0.901 + ], + "angle": 0, + "content": "With recent advances in efficient learning of implicit" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5020" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.228 + ], + "angle": 0, + "content": "neural fields, training time of NeRFs has been reduced to minutes [3, 29, 46, 52?]. There are also works targeting fast inference of pretrained NeRFs [43, 67, 69]. Inspired by these developments, several avatar reconstruction methods have been tailored to fast training [7, 12] or fast inference [6, 17, 39]. However, to the best of our knowledge, there currently exists no method that simultaneously achieves both fast training and real-time inference for animatable avatar reconstruction from just monocular videos." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.232, + 0.47, + 0.339 + ], + "angle": 0, + "content": "Point-based rendering [44, 49, 62, 71, 73? , 74] has emerged as an efficient alternative to NeRFs for fast inference. With the recently proposed 3D Gaussian Splatting (3DGS) [14], it is possible to achieve state-of-the-art rendering quality using only a fraction of NeRFs' inference time and comparatively fast training for static scene reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.344, + 0.473, + 0.556 + ], + "angle": 0, + "content": "Leveraging the capabilities of 3DGS, we demonstrate its application in modeling animatable clothed avatars using monocular videos. Our approach effectively integrates rigid human articulation with a non-rigid deformation field within the 3DGS framework. We use a small multi-layer perceptron (MLP) to decode color. This MLP is designed to be responsive to local non-rigid deformations and dynamic lighting conditions, ensuring a more realistic and responsive rendering of the avatar's appearance. Furthermore, we apply as-isometric-as-possible regularizations [15, 41] to both the Gaussian mean vectors and the covariance matrices, which helps maintain the geometric consistency and realistic deformation of the avatar, particularly in dynamic and varied poses." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.561, + 0.47, + 0.728 + ], + "angle": 0, + "content": "Our experimental results show that our method is comparable to or better than current state-of-the-art [57, 58] in animatable avatar creation from monocular inputs, achieving training speed 400 times faster and inference speed 250 times quicker. Compared to methods that focus on fast training [7, 12], our method, despite being slower in training, can model pose-dependent non-rigid deformation and produce significantly better rendering quality, while being 3 times faster in terms of rendering. We provide an overview of the comparison to major prior works in Tab. 1. In summary, our work makes the following contributions:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.732, + 0.47, + 0.76 + ], + "angle": 0, + "content": "- We introduce 3D Gaussian Splatting to animatable human avatars reconstruction from monocular videos." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.762, + 0.47, + 0.821 + ], + "angle": 0, + "content": "- We develop a simple yet effective deformation network as well as regularization terms that effectively drive 3D Gaussian Splats to handle highly articulated and out-of-distribution poses." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.822, + 0.47, + 0.898 + ], + "angle": 0, + "content": "- Our method is the first, to our knowledge, to simultaneously deliver high-quality rendering, model pose-dependent non-rigid deformation, generalize effectively to unseen poses, and achieve fast training (less than 30 minutes) and real-time rendering speed \\((50+\\mathrm{FPS})\\)." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.732, + 0.47, + 0.898 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.091, + 0.862, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.317, + 0.895, + 0.402 + ], + "angle": 0, + "content": "Table 1. Comparison to SoTA. Instant-NVR [7] and InstantA-vatar [12] achieve instant training within 5 minutes. For real-time rendering, we require a frame rate over 30 FPS. Note that while UV-Volumes [6] claims real-time freeview rendering, they only achieve 14 FPS on novel pose synthesis due to the slow generation of their UV Volume." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.432, + 0.651, + 0.447 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.46, + 0.892, + 0.656 + ], + "angle": 0, + "content": "Neural rendering for clothed human avatars. Since the seminal work of Neural Radiance Fields (NeRF) [27], there has been a surge of research on neural rendering for clothed human avatars. The majority of the works focus on either learning a NeRF conditioned on human body related encodings [31, 48, 61], or learning a canonical NeRF representation and warp camera rays from the observation space to the canonical space to query radiance and density values from the canonical NeRF [8, 12, 13, 20, 35, 38, 57, 58, 70]. Most of these works rely on large multi-layer perceptrons (MLPs) to model the underlying neural radiance field, which are computationally expensive, resulting in prolonged training (days) and inference (seconds) time." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.893, + 0.903 + ], + "angle": 0, + "content": "With recent advances in accelerated data structures for neural fields, there has been several works targeting fast inference and fast training of NeRFs for clothed humans. [12] proposes to use iNGP [29] as the underlying representation for articulated NeRFs, which enables fast training (less than 5 minutes) and interactive rendering speed (15 FPS) but ignores pose-dependent non-rigid deformations. [7] also utilizes iNGP and represents non-rigid deformations in the UV space, which enables fast training and modeling of pose-dependent non-rigid deformations. However, as we will show in our experiments, [7]'s parametrization of non-rigid deformations result in blurry renderings. [6] proposes to generate a pose-dependent UV volume for efficient free-view synthesis. However, their UV-volume generation process is slow (20 FPS), making novel pose synthesis less efficient (only 14 FPS). [17] also employs UV-based rendering" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.958 + ], + "angle": 0, + "content": "5021" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.242 + ], + "angle": 0, + "content": "to achieve real-time rendering of dynamic clothed humans, but only works on dense multi-view inputs. Extending [69], [54, 72] applied Fourier transform for compressing human performance capture data, albeit with limitations on dense multi-view data (60-80 views) and non-generalizability of the Fourier basis representation to unseen poses beyond the training dataset. In contrast to all these works, our method achieves state-of-the-art rendering quality and speed with less than 30 minutes of training time from a single monocular video input." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.246, + 0.47, + 0.367 + ], + "angle": 0, + "content": "Dynamic 3D gaussians. Point-based rendering [40, 44, 49, 62, 71, 73, 74] has also been shown to be an efficient alternative to NeRFs for fast inference and training. Extending point cloud to 3D Gaussians, 3D Gaussian Splatting (3DGS) [14] models the rendering process as splatting a set of 3D Gaussians onto image plane via alpha blending, achieving state-of-the-art rendering quality with real-time inference speed and fast training given multi-view inputs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.37, + 0.473, + 0.702 + ], + "angle": 0, + "content": "Given the great performance on both quality and speed of 3DGS, a rich set of works has further explored the 3D Gaussian representation for dynamic scene reconstruction. [14] proposed to optimize the position and shape of each 3D Gaussian on a frame-by-frame basis and simultaneously performed 6-DOF dense tracking for free. Their model size, however, increases with the temporal dimension. [59, 63] maintain a single set of 3D Gaussians in a canonical space and deform them to each frame via learning a time-dependent deformation field, producing state-of-the-art results in terms of both rendering quality and speed. [64] augments 3D Gaussians with temporal dimension into 4D Gaussian primitives to approximate the underlying spatiotemporal 4D volume of the dynamic scene. While such methods show promising results, they are only applicable to either synthetic datasets with fast camera movement and slow object motion or forward-facing real scenes with limited object movements, thus unable to handle the immense displacement of the articulated human body. To address this problem, our approach utilizes a statistical human body model [24] for articulation and applies regularization to reduce the overfitting of the deformation field." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Concurrent works. Concurrent with our method, many recent works also seek to combine 3DGS with human articulation prior for avatar reconstruction. We provide a comparison of our approach to concurrent works in Tab. 2. D3GA [75] proposed to embed 3D Gaussians in tetrahedral cages and utilize cage deformations for drivable avatar animation. However, they use dense calibrated multi-view videos as input and require an additional 3D scan to generate the tetrahedral mesh template. Li et al. [21] focused on generating avatars with a detailed appearance from multiview videos by post-processing radiance field renderings with 2D CNNs, which limits their rendering speed. Along with [11, 28], these works fail to achieve fast training with" + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.091, + 0.862, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.558, + 0.331, + 0.835, + 0.345 + ], + "angle": 0, + "content": "Table 2. Comparison to Concurrent Works." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.377, + 0.895, + 0.575 + ], + "angle": 0, + "content": "relatively complex pipelines. Similar to our approach, Ye et al. [68] deforms 3D Gaussians in canonical space via pose-dependent deformation and rigid articulation, but they still require 2 hours for training and do not show results on monocular inputs. HUGS [16] learns a background model along with the animatable human avatar, but they fail to take pose-dependent cloth deformation into account. Several other works [10, 18, 23] also neglect pose-dependent cloth deformation to achieve even faster training (in 5 minutes) and rendering \\((150 + \\mathrm{FPS})\\). We argue that our method strikes a good balance between quality and speed compared to concurrent works, as being the only method simultaneously achieving the properties listed in Tab. 2." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.595, + 0.64, + 0.612 + ], + "angle": 0, + "content": "3. Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.623, + 0.895, + 0.76 + ], + "angle": 0, + "content": "Linear Blend Skinning. To model human articulations, a widely adopted paradigm is to represent geometry and appearance in a shared canonical space [8, 12, 13, 20, 35, 38, 57, 58] and use Linear Blend Skinning (LBS) [2, 9, 24, 33, 34, 60] to deform the parametric human body under arbitrary poses. Given a point \\(\\mathbf{x}_c\\) in canonical space, the LBS function takes a set of rigid bone transformations \\(\\{\\mathbf{B}_b\\}_{b=1}^B\\) and computes its correspondence \\(\\mathbf{x}_o\\) in the observation space:" + }, + { + "type": "equation", + "bbox": [ + 0.61, + 0.777, + 0.892, + 0.794 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {\\mathbf {o}} = L B S _ {\\sigma_ {w}} \\left(\\mathbf {x} _ {c}; \\left\\{\\mathbf {B} _ {b} \\right\\}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Assuming an underlying SMPL model, we use a total of \\(B = 24\\) bone transformations, each represented by a \\(4 \\times 4\\) rotation-translation matrix, which are then linearly blended via a set of skinning weights \\(\\mathbf{w} \\in [0,1]^B\\), s.t. \\(\\sum_{b=1}^{B} \\mathbf{w}_b = 1\\), modeled by a coordinate-based neural skinning field \\(f_{\\sigma_w}(\\mathbf{x}_c) \\in [4,5,26,45,56]\\). The forward linear blend skin" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5022" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.347, + 0.106 + ], + "angle": 0, + "content": "ning function can thus be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.095, + 0.117, + 0.47, + 0.145 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {o} = L B S _ {\\sigma_ {w}} \\left(\\mathbf {x} _ {c}; \\{\\mathbf {B} _ {b} \\}\\right) = \\sum_ {b = 1} ^ {B} f _ {\\sigma_ {w}} \\left(\\mathbf {x} _ {c}\\right) _ {b} \\mathbf {B} _ {b} \\mathbf {x} _ {c} \\quad (2)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.155, + 0.469, + 0.244 + ], + "angle": 0, + "content": "Compared to prior works that search canonical correspondences of points in observation space [12, 57, 58], our method requires no inverse skimming which is typically difficult to compute and often leads to multiple solutions [4, 5]. A similar technique has been employed in [73] for face avatar modeling." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.245, + 0.47, + 0.411 + ], + "angle": 0, + "content": "3D Gaussian Splatting. 3DGS [14] utilizes a set of 3D Gaussian primitives \\(\\{\\mathcal{G}\\}\\) as static scene representation which can be rendered in real-time via differentiable rasterization. Each 3D Gaussian \\(\\mathcal{G}\\) is defined by its mean \\(\\mathbf{x}\\), covariance \\(\\boldsymbol{\\Sigma}\\), opacity \\(\\alpha\\) and view-dependent color represented by spherical harmonics coefficients \\(\\mathbf{f}\\). To ensure positive semi-definiteness, the covariance matrix is represented by a scaling matrix \\(\\mathbf{S}\\) and rotation matrix \\(\\mathbf{R}\\). In practice, we store the diagonal vector \\(\\mathbf{s} \\in \\mathbb{R}^3\\) of the scaling matrix and a quaternion vector \\(\\mathbf{q} \\in \\mathbb{R}^4\\) to represent rotation, which can be trivially converted to a valid covariance matrix." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.412, + 0.469, + 0.549 + ], + "angle": 0, + "content": "The 3D Gaussians are projected to the 2D image plane during the rendering process and accumulated via alpha blending. Given a viewing transformation \\(\\mathbf{W}\\) and the Jacobian of the affine approximation of the projective transformation \\(\\mathbf{J}\\), the 2D covariance matrix in camera coordinate [76] is given by \\(\\boldsymbol{\\Sigma}^{\\prime} = (\\mathbf{J}\\mathbf{W}\\boldsymbol{\\Sigma}\\mathbf{W}^{T}\\mathbf{J}^{T})_{1:2,1:2}\\). The pixel color \\(C\\) is thus computed by blending 3D Gaussian splats that overlap at the given pixel, sorted according to their depth:" + }, + { + "type": "equation", + "bbox": [ + 0.157, + 0.56, + 0.469, + 0.594 + ], + "angle": 0, + "content": "\\[\nC = \\sum_ {i} \\left(\\alpha_ {i} ^ {\\prime} \\prod_ {j = 1} ^ {i - 1} \\left(1 - \\alpha_ {j} ^ {\\prime}\\right)\\right) c _ {i} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.604, + 0.469, + 0.664 + ], + "angle": 0, + "content": "where \\(\\alpha_{i}^{\\prime}\\) denotes the learned opacity \\(\\alpha_{i}\\) weighted by the probability density of \\(i\\)-th projected 2D Gaussian at the target pixel location. \\(c\\) denotes the view-dependent color computed from stored SH coefficients \\(\\mathbf{f}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.665, + 0.469, + 0.754 + ], + "angle": 0, + "content": "The 3D Gaussians \\(\\{\\mathcal{G}\\}\\) are optimized via a photometric loss. During optimization, 3DGS adaptively controls the number of 3D Gaussians via periodic densification and pruning, achieving self-adaptive convergence to an optimal density distribution of 3D Gaussians that well represents the scene." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.77, + 0.176, + 0.785 + ], + "angle": 0, + "content": "4. Methods" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.469, + 0.902 + ], + "angle": 0, + "content": "We illustrate the pipeline of our method in Fig. 2. The input to our method is a monocular video with a calibrated camera, fitted SMPL parameters, and foreground masks. Our method optimizes a set of 3D Gaussians in canonical space, which is then deformed to the observation space and rendered from the given camera. For a set of 3D Gaussians \\(\\{\\mathcal{G}^{(i)}\\}_{i = 1}^{N}\\), we store the following properties at each point:" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.212 + ], + "angle": 0, + "content": "position \\(\\mathbf{x}\\), scaling factor \\(\\mathbf{s}\\), rotation quaternion \\(\\mathbf{q}\\), opacity \\(\\alpha\\) and a color feature vector \\(\\mathbf{f}\\). We start by randomly sampling \\(N = 50k\\) points on the canonical SMPL [24] mesh surface as initialization of canonical 3D Gaussians \\(\\{\\mathcal{G}_c\\}\\). Inspired by HumanNeRF [58], we decompose the complex human deformation into a non-rigid part that encodes pose-dependent cloth deformation, and a rigid transformation controlled by the human skeleton." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.22, + 0.838, + 0.236 + ], + "angle": 0, + "content": "4.1. Pose-dependent Non-rigid Deformation" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.243, + 0.842, + 0.258 + ], + "angle": 0, + "content": "We formulate the non-rigid deformation module as:" + }, + { + "type": "equation", + "bbox": [ + 0.613, + 0.265, + 0.892, + 0.282 + ], + "angle": 0, + "content": "\\[\n\\left\\{\\mathcal {G} _ {d} \\right\\} = \\mathcal {F} _ {\\theta_ {n r}} \\left(\\left\\{\\mathcal {G} _ {c} \\right\\}; \\mathcal {Z} _ {p}\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.288, + 0.892, + 0.41 + ], + "angle": 0, + "content": "where \\(\\{\\mathcal{G}_d\\}\\) represents the non-rigidly deformed 3D Gaussians. \\(\\theta_{nr}\\) represents the learnable parameters of the nonrigid deformation module. \\(\\mathcal{Z}_p\\) is a latent code which encodes SMPL pose and shape \\((\\theta, \\beta)\\) using a lightweight hierarchical pose encoder [26]. Specifically, the deformation network \\(f_{\\theta_{nr}}\\) takes the canonical position \\(\\mathbf{x}_c\\), the pose latent code \\(\\mathcal{Z}_p\\) as inputs and outputs the offsets of the Gaussian's position, scale, rotation, along with a feature vector \\(\\mathbf{z}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.593, + 0.417, + 0.892, + 0.434 + ], + "angle": 0, + "content": "\\[\n\\left(\\delta \\mathbf {x}, \\delta \\mathbf {s}, \\delta \\mathbf {q}, \\mathbf {z}\\right) = f _ {\\theta_ {n r}} \\left(\\mathbf {x} _ {c}; \\mathcal {Z} _ {p}\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.44, + 0.892, + 0.515 + ], + "angle": 0, + "content": "We use a multi-level hash grid [29] to encode 3D positions as spatial features, which are then concatenated with the pose latent code \\(\\mathcal{Z}_p\\) and fed into a shallow MLP with 2 hidden layers and a width of 128. The canonical Gaussian is deformed by:" + }, + { + "type": "equation", + "bbox": [ + 0.605, + 0.523, + 0.891, + 0.539 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {d} = \\mathbf {x} _ {c} + \\delta \\mathbf {x} \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.609, + 0.542, + 0.891, + 0.557 + ], + "angle": 0, + "content": "\\[\n\\mathbf {s} _ {d} = \\mathbf {s} _ {c} \\cdot \\exp (\\delta \\mathbf {s}) \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.607, + 0.561, + 0.891, + 0.577 + ], + "angle": 0, + "content": "\\[\n\\mathbf {q} _ {d} = \\mathbf {q} _ {c} \\cdot [ 1, \\delta q _ {1}, \\delta q _ {2}, \\delta q _ {3} ] \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.584, + 0.892, + 0.645 + ], + "angle": 0, + "content": "note that the \\( \\cdot \\) operator on quaternions is equivalent to multiplying the two rotation matrices derived by the two quaternions. Since the quaternion \\( [1,0,0,0] \\) corresponds to the identity rotation matrix, we have \\( \\mathbf{q}_d = \\mathbf{q}_c \\) when \\( \\delta \\mathbf{q} = \\mathbf{0} \\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.652, + 0.704, + 0.667 + ], + "angle": 0, + "content": "4.2. Rigid Transformation" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.675, + 0.892, + 0.719 + ], + "angle": 0, + "content": "We further transform the non-rigidly deformed 3D Gaussians \\(\\{\\mathcal{G}_d\\}\\) to the observation space via a rigid transformation module:" + }, + { + "type": "equation", + "bbox": [ + 0.597, + 0.726, + 0.891, + 0.745 + ], + "angle": 0, + "content": "\\[\n\\left\\{\\mathcal {G} _ {o} \\right\\} = \\mathcal {F} _ {\\theta_ {r}} \\left(\\left\\{\\mathcal {G} _ {d} \\right\\}; \\left\\{\\mathbf {B} _ {\\mathbf {b}} \\right\\} _ {b = 1} ^ {B}\\right) \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.751, + 0.892, + 0.795 + ], + "angle": 0, + "content": "where a skinning MLP \\( f_{\\theta_r} \\) is learned to predict skinning weights at the position \\( \\mathbf{x}_d \\). We transform the position and the rotation matrix of 3D Gaussians via forward LBS:" + }, + { + "type": "equation", + "bbox": [ + 0.616, + 0.799, + 0.891, + 0.826 + ], + "angle": 0, + "content": "\\[\n\\mathbf {T} = \\sum_ {b = 1} ^ {B} f _ {\\theta_ {r}} \\left(\\mathbf {x} _ {d}\\right) _ {b} \\mathbf {B} _ {b} \\tag {10}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.612, + 0.829, + 0.891, + 0.844 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {o} = \\mathbf {T} \\mathbf {x} _ {d} \\tag {11}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.609, + 0.848, + 0.891, + 0.864 + ], + "angle": 0, + "content": "\\[\n\\mathbf {R} _ {o} = \\mathbf {T} _ {1: 3, 1: 3} \\mathbf {R} _ {d} \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "where \\(\\mathbf{R}_d\\) is the rotation matrix derived from the quaternion \\(\\mathbf{q}_d\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5023" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.09, + 0.891, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.242, + 0.896, + 0.354 + ], + "angle": 0, + "content": "Figure 2. Our framework for creating animatable avatars from monocular videos. We first initialize a set of 3D Gaussians in the canonical space via sampling points from a SMPL mesh. Each canonical Gaussian \\(\\mathcal{G}_c\\) goes through a non-rigid deformation module \\(\\mathcal{F}_{\\theta_{nr}}\\) conditioned on an encoded pose vector \\(\\mathcal{Z}_p\\) (Sec. 4.1) to account for pose-dependent non-rigid cloth deformation. This module outputs a non-rigidly deformed 3D Gaussian \\(\\mathcal{G}_d\\) and a pose-dependent latent feature \\(\\mathbf{z}\\). The non-rigidly deformed 3D Gaussian \\(\\mathcal{G}_d\\) is transformed to the observation space \\(\\mathcal{G}_o\\) (Sec. 4.2) via LBS with learned neural skinning \\(\\mathcal{F}_{\\theta_r}\\). The Gaussian feature \\(\\mathbf{f}\\), the pose-dependent feature \\(\\mathbf{z}\\), a per-frame latent code \\(\\mathcal{Z}_c\\), and the ray direction \\(\\mathbf{d}\\) are propagated through a small MLP \\(\\mathcal{F}_{\\theta_c}\\) to decode the view-dependent color \\(c\\) for each 3D Gaussian. Finally, the observation space 3D Gaussians \\(\\{\\mathcal{G}_o\\}\\) and their respective color values are accumulated via differentiable Gaussian rasterization (Eq. (3)) to render the image." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.379, + 0.204, + 0.394 + ], + "angle": 0, + "content": "4.3. Color MLP" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.402, + 0.471, + 0.553 + ], + "angle": 0, + "content": "Prior works [59, 63, 64] follow the convention of 3DGS [14], which stores spherical harmonics coefficients per 3D Gaussian to encode the view-dependent color. Treating the stored color feature \\(\\mathbf{f}\\) as spherical harmonics coefficients, the color of a 3D Gaussian can be computed by the dot product of the spherical harmonics basis and the learned coefficients: \\(c = \\langle \\pmb{\\gamma}(\\mathbf{d}),\\mathbf{f}\\rangle\\), where \\(\\mathbf{d}\\) represents the viewing direction, derived from the relative position of the 3D Gaussian wrt. the camera center and \\(\\pmb{\\gamma}\\) denotes the spherical harmonics basis function." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.554, + 0.47, + 0.718 + ], + "angle": 0, + "content": "While conceptually simple, we argue that this approach does not suit our monocular setting. Since only one camera view is provided during training, the viewing direction in the world space is fixed, leading to poor generalization to unseen test views. Similar to [38], we use the inverse rigid transformation from Sec. 4.2 to canonicalize the viewing direction: \\(\\hat{\\mathbf{d}} = \\mathbf{T}_{1:3,1:3}^{-1}\\mathbf{d}\\), where \\(\\mathbf{T}\\) is the forward transformation matrix defined in Eq. (10). Theoretically, canonicalizing viewing direction also promotes consistency of the specular component of canonical 3D Gaussians under rigid transformations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.471, + 0.902 + ], + "angle": 0, + "content": "On the other hand, we observe that the pixel color of the rendered clothed human avatar also largely depends on local deformation. Local fine wrinkles on clothes, for instance, would cause self-occlusion that heavily affects shading. Following [37], we also learn a per-frame latent code \\(\\mathcal{Z}_c\\) to compensate for different environment light effects across frames caused by the global movement of the subject. Hence, instead of learning spherical harmonic coefficients, we enhance color modeling by learning a neural network that takes per-Gaussian color feature vector \\(\\mathbf{f} \\in \\mathbb{R}^{32}\\), local pose-dependent feature vector \\(\\mathbf{z} \\in \\mathbb{R}^{16}\\) from the non-rigid deformation network, per-frame latent code \\(\\mathcal{Z}_c \\in \\mathbb{R}^{16}\\), and" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.38, + 0.893, + 0.424 + ], + "angle": 0, + "content": "spherical harmonics basis of canonicalized viewing direction \\(\\gamma (\\dot{\\mathbf{d}})\\) with a degree of 3 as input and predicts the color of the 3D Gaussian:" + }, + { + "type": "equation", + "bbox": [ + 0.615, + 0.441, + 0.893, + 0.46 + ], + "angle": 0, + "content": "\\[\nc = \\mathcal {F} _ {\\theta_ {c}} (\\mathbf {f}, \\mathbf {z}, \\mathcal {Z} _ {c}, \\boldsymbol {\\gamma} (\\hat {\\mathbf {d}})) \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.475, + 0.892, + 0.522 + ], + "angle": 0, + "content": "In practice, we find a tiny MLP with one 64-dimension hidden layer sufficient to model the appearance. Increasing the size of the MLP leads to overfitting and performance drop." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.538, + 0.64, + 0.554 + ], + "angle": 0, + "content": "4.4. Optimization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.563, + 0.892, + 0.624 + ], + "angle": 0, + "content": "We jointly optimize canonical 3D Gaussians \\(\\{\\mathcal{G}_c\\}\\) and the parameters \\(\\theta_{nr},\\theta_r,\\theta_c\\) of the non-rigid deformation network, the skinning network and the color network, respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.627, + 0.892, + 0.748 + ], + "angle": 0, + "content": "Pose correction. SMPL [24] parameter fittings from images can be inaccurate. To address this, we additionally optimize the per-sequence shape parameter as well as per-frame translation, global rotation, and local joint rotations. We initialize these parameters \\(\\theta_{p}\\) with the given SMPL parameters and differentiably derive the bone transformations \\(\\{\\mathbf{B}_b\\}\\) as input to the network, enabling direct optimization via backpropagation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.895, + 0.902 + ], + "angle": 0, + "content": "As-isometric-as-possible regularization. With monocular video as input, only one view of the human is visible in each frame, making it extremely hard to generalize to novel views and novel poses. Considering the sparsity of input, the non-rigid deformation network is highly underconstrained, resulting in noisy deformation from the canonical space to the observation space. Inspired by [41], we leverage the as-isometric-as-possible constraint [15] to restrict neighboring 3D Gaussian centers to preserve a similar distance after deformation. We further augment the con" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5024" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.078, + 0.092, + 0.342, + 0.106 + ], + "angle": 0, + "content": "strain to Gaussian covariance matrices:" + }, + { + "type": "equation", + "bbox": [ + 0.108, + 0.112, + 0.469, + 0.171 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {i s o p o s}} = \\sum_ {i = 1} ^ {N} \\sum_ {j \\in \\mathcal {N} _ {k} (i)} \\left| d \\left(\\mathbf {x} _ {c} ^ {(i)}, \\mathbf {x} _ {c} ^ {(j)}\\right) - d \\left(\\mathbf {x} _ {o} ^ {(i)}, \\mathbf {x} _ {o} ^ {(j)}\\right) \\right| \\tag {14}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.095, + 0.176, + 0.469, + 0.235 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {i s o c o v} = \\sum_ {i = 1} ^ {N} \\sum_ {j \\in \\mathcal {N} _ {k} (i)} \\left| d \\left(\\boldsymbol {\\Sigma} _ {c} ^ {(i)}, \\boldsymbol {\\Sigma} _ {c} ^ {(j)}\\right) - d \\left(\\boldsymbol {\\Sigma} _ {o} ^ {(i)}, \\boldsymbol {\\Sigma} _ {o} ^ {(j)}\\right) \\right| \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.242, + 0.469, + 0.287 + ], + "angle": 0, + "content": "where \\(N\\) denotes the number of 3D Gaussians. \\(\\mathcal{N}_k\\) denotes the k-nearest neighbourhood, and we set \\(k\\) to 5. We use L2-norm as our distance function \\(d(\\cdot ,\\cdot)\\)" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.287, + 0.469, + 0.379 + ], + "angle": 0, + "content": "Loss function. Our full loss function consists of a RGB loss \\(\\mathcal{L}_{rgb}\\), a mask loss \\(\\mathcal{L}_{mask}\\), a skinning weight regularization loss \\(\\mathcal{L}_{skin}\\) and the as-isometric-as-possible regularization loss for both position and covariance \\(\\mathcal{L}_{ispos},\\mathcal{L}_{isocov}\\). For further details of the loss definition and respective weights, please refer to the Supp.Mat." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.39, + 0.21, + 0.407 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.416, + 0.469, + 0.552 + ], + "angle": 0, + "content": "In this section, we first compare the proposed approach with recent state-of-the-art methods [7, 12, 37, 57, 58], demonstrating that our proposed approach achieves superior rendering quality in terms of LPIPS, which is more informative under monocular setting, while achieving fast training and real-time rendering speed, respectively \\(400\\mathrm{x}\\) and \\(250\\mathrm{x}\\) faster than the most competitive baseline [58]. We then systematically ablate each component of the proposed model, showing their effectiveness in better rendering quality." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.56, + 0.262, + 0.575 + ], + "angle": 0, + "content": "5.1. Evaluation Dataset" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.583, + 0.469, + 0.734 + ], + "angle": 0, + "content": "ZJU-MoCap [37]. This is the major testbed for quantitative evaluation. We pick six sequences (377, 386, 387, 392, 393, 394) from the ZJU-MoCap dataset and follow the training/test split of HumanNeRF [58]. The motion of these sequences is repetitive and does not contain a sufficient number of poses for meaningful novel pose synthesis benchmarks. Thus we focus on evaluating novel view synthesis (PSNR/SSIM/LPIPS) and show qualitative results for animation on out-of-distribution poses. Note that LPIPS in all the tables are scaled up by 1000." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.734, + 0.469, + 0.84 + ], + "angle": 0, + "content": "PeopleSnapshot [1]. We also conduct experiments on 4 sequences of the PeopleSnapshot dataset, which includes monocular videos of people rotating in front of a camera. We follow the data split of InstantAvatar [12] and compare to [12] on novel pose synthesis. For fair comparison, we use the provided poses optimized by Anim-NeRF [35] and do not further optimize it during our training." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.848, + 0.323, + 0.864 + ], + "angle": 0, + "content": "5.2. Comparison with Baselines" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "We compare our approach with NeuralBody [37], HumanNeRF [58], MonoHuman [70], ARAH [57] and Instant" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.438 + ], + "angle": 0, + "content": "NVR [7] under monocular setup on ZJU-MoCap. The quantitative results are reported in Tab. 3. NeuralBody is underperforming compared to other approaches. Overall, our proposed approach produces comparable performance to ARAH on PSNR and SSIM, while significantly outperforming all the baselines on LPIPS. We argue that LPIPS is more informative compared to the other two metrics, as it is very difficult to reproduce exactly the ground-truth appearance for novel views due to the monocular setting and the stochastic nature of cloth deformations. Meanwhile, our method is also capable of fast training and renders at a real-time rendering frame rate, being 400 times faster for training (30 GPU minutes vs. 8 GPU days) and \\(250 - 500\\) times faster for inference (50 FPS vs. 0.1 FPS for ARAH and 0.2 FPS for HumanNeRF). We also note that Instant-NVR trains on a refined version of ZJU-MoCap, which provides refined camera parameters, SMPL fittings, and more accurate instance masks with part-level annotation that is essential for running their method. Hence their metrics are not directly comparable to other methods in Tab. 3. We train our model on the refined dataset for a fair quantitative comparison, which clearly shows that our method outperforms Instant-NVR in most scenarios." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.44, + 0.892, + 0.59 + ], + "angle": 0, + "content": "Qualitative comparisons on novel view synthesis can be found in Fig. 3. We observe that our method preserves sharper details compared to ARAH and does not produce fluctuating artifacts as in HumanNeRF caused by noisy deformation fields. Instant-NVR produces an oversmooth appearance and tends to generate noisy limbs. Additionally, we animate our learned avatars with pose sequences from AMASS [25] and AIST++ [19], shown in the rightmost column of Fig. 3. This shows that our model could generalize to extreme out-of-distribution poses." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.592, + 0.892, + 0.652 + ], + "angle": 0, + "content": "For PeopleSnapshot, we report the quantitative comparison against InstantAvatar [12] in Tab. 4. Our approach significantly outperforms InstantAvatar on PSNR and LPIPS, while being more than \\(3\\mathrm{x}\\) faster during inference." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.664, + 0.655, + 0.681 + ], + "angle": 0, + "content": "5.3. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.892, + 0.809 + ], + "angle": 0, + "content": "We study the effect of various components of our method on the ZJU-MoCap dataset, including the color MLP, the as-isometric-as-possible regularization and the pose correction module. The average metrics over 6 sequences are reported in Tab. 5. We show that all proposed techniques are required to reach the optimal performance, best reflected by LPIPS which is the most informative metric for novel view synthesis evaluation under a monocular setup." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We further show qualitative comparison on out-of-distribution poses in Fig. 4, which demonstrates that the as-isometric-as-possible loss helps to constrain the 3D Gaussians to comply with consistent movement during deformation, hence improving generalization on novel poses. Albeit marginally, each individual component contributes to a" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5025" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.129, + 0.088, + 0.845, + 0.835 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.857, + 0.897, + 0.887 + ], + "angle": 0, + "content": "Figure 3. Qualitative Comparison on ZJU-MoCap [37]. We show the results for both novel view synthesis and novel pose animation of all sequences on ZJU-MoCap. Our method produces high-quality results that preserve cloth details even on out-of-distribution poses." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "5026" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.895, + 0.149 + ], + "angle": 0, + "content": "Table 3. Quantitative Results on ZJU-MoCap [37]. We outperform both competitive baselines [57, 58] in terms of LPIPS while being two orders of magnitude faster in training and rendering. Cell color indicates best and second best. Instant-NVR [7] is trained and tested on a refined version of ZJU-MoCap, thus is not directly comparable to other baselines quantitatively. We train our model on the refined dataset for fair quantitative comparison to Instant-NVR and the metrics are reported in the last two rows of the table." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.156, + 0.894, + 0.257 + ], + "angle": 0, + "content": "
Subject:Metric:377386387392393394
GPU↓FPS↑PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
NeuralBody [37]12h229.110.967440.9530.540.967846.4327.000.951859.4730.100.964253.2728.610.959059.0529.100.959354.55
HumanNeRF [58]>8d0.230.410.974324.0633.200.975228.9928.180.963235.5831.040.970532.1228.310.960336.7230.310.964232.89
MonoHuman [70]4d0.129.120.972726.5832.940.969536.0427.930.960141.7629.500.963539.4527.640.956643.1729.150.959538.08
ARAH [57]8d0.130.850.980026.6033.500.978131.4028.490.965640.4332.020.974235.2828.770.964542.3029.460.963240.76
Ours0.5h5030.640.977420.8833.630.977325.7728.330.964234.2431.660.973030.1428.880.963535.2630.540.966131.21
Instant-NVR* [7]0.1h331.280.978925.3733.710.977032.8128.390.964045.9731.850.973039.4729.560.964146.1631.320.968040.63
Ours*0.5h5030.960.977819.8533.940.978424.7028.400.965632.9632.100.973929.2029.300.964534.0330.740.966231.00
" + }, + { + "type": "table_caption", + "bbox": [ + 0.319, + 0.268, + 0.651, + 0.284 + ], + "angle": 0, + "content": "Table 4. Quantitative Results on PeopleSnapshot [1]." + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.294, + 0.885, + 0.364 + ], + "angle": 0, + "content": "
Subject:female-3-casualfemale-4-casualmale-3-casualmale-4-casual
Metric:GPU↓FPS↑PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
InstantAvatar [12]5 min.1527.660.970921.0029.110.968316.7029.530.971615.5027.670.962630.7
Ours45 min.5030.570.958120.8633.160.967815.7434.280.972414.9230.220.965323.05
" + }, + { + "type": "image", + "bbox": [ + 0.105, + 0.38, + 0.275, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.277, + 0.381, + 0.47, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.242, + 0.527, + 0.319, + 0.542 + ], + "angle": 0, + "content": "Full model" + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.38, + 0.671, + 0.522 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.675, + 0.381, + 0.864, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.609, + 0.528, + 0.744, + 0.544 + ], + "angle": 0, + "content": "w/o \\(\\mathcal{L}_{isocov}\\), \\(\\mathcal{L}_{isopos}\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.57, + 0.855, + 0.586 + ], + "angle": 0, + "content": "Figure 4. Ablation Study on as-isometric-as-possible regularization, which removes the artifacts on highly articulated poses." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.609, + 0.471, + 0.651 + ], + "angle": 0, + "content": "Table 5. Ablation Study on ZJU-MoCap [37]. The proposed model achieves the lowest LPIPS, demonstrating the effectiveness of all components." + }, + { + "type": "table", + "bbox": [ + 0.099, + 0.661, + 0.449, + 0.764 + ], + "angle": 0, + "content": "
Metric:PSNR↑SSIM↑LPIPS↓
Full model30.610.970329.58
w/o color MLP30.550.970031.24
w/o Lisocov30.610.970329.84
w/o Lisopos, Lisocov30.590.969930.25
w/o pose correction30.600.970329.87
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.787, + 0.471, + 0.833 + ], + "angle": 0, + "content": "better novel-view rendering quality and particularly generates more plausible results with respect to novel pose animation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.845, + 0.196, + 0.861 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.471, + 0.903 + ], + "angle": 0, + "content": "In this paper, we present 3DGS-Avatar, one of the first methods that utilize the explicit representation of 3DGS" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.611, + 0.893, + 0.687 + ], + "angle": 0, + "content": "for efficient reconstruction of clothed human avatars from monocular videos. Our method achieves photorealistic rendering, awareness of pose-dependent cloth deformation, generalization to unseen poses, fast training, and real-time rendering all at once." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.688, + 0.895, + 0.839 + ], + "angle": 0, + "content": "Experiments show that our method is comparable to or even better than the state-of-the-art methods in terms of rendering quality while being two orders of magnitude faster in both training and inference. Furthermore, we propose to replace spherical harmonics with a shallow MLP to decode 3D Gaussian color and regularize deformation with geometric constraints, both proved to be effective in enhancing rendering quality. We hope that our new representation could foster further research in fast, high-quality animatable clothed human avatar synthesis from a monocular view." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.902 + ], + "angle": 0, + "content": "Acknowledgement. SW and AG were supported by the ERC Starting Grant LEGO-3D (850533) and the DFG EXC number 2064/1 - project number 390727645. SW and ST acknowledge the SNSF grant 200021 204840." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5027" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.157 + ], + "angle": 0, + "content": "[1] Thiemo Alldieck, Marcus Magnor, Weipeng Xu, Christian Theobalt, and Gerard Pons-Moll. Video based reconstruction of 3d people models. In Proc. of CVPR, 2018. 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.16, + 0.471, + 0.215 + ], + "angle": 0, + "content": "[2] Dragomir Anguelov, Praveen Srinivasan, Daphne Koller, Sebastian Thrun, Jim Rodgers, and James Davis. Scape: shape completion and animation of people. ACM Transactions Graphics, 24, 2005. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.218, + 0.472, + 0.259 + ], + "angle": 0, + "content": "[3] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In Proc. of ECCV, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.262, + 0.47, + 0.317 + ], + "angle": 0, + "content": "[4] Xu Chen, Yufeng Zheng, Michael Black, Otmar Hilliges, and Andreas Geiger. Snarf: Differentiable forward skinning for animating non-rigid neural implicit shapes. In Proc. of ICCV, 2021. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.32, + 0.47, + 0.375 + ], + "angle": 0, + "content": "[5] Xu Chen, Tianjian Jiang, Jie Song, Max Rietmann, Andreas Geiger, Michael J. Black, and Otmar Hilliges. Fast-snarf: A fast deformer for articulated neural fields. Pattern Analysis and Machine Intelligence (PAMI), 2023. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.378, + 0.469, + 0.434 + ], + "angle": 0, + "content": "[6] Yue Chen, Xuan Wang, Xingyu Chen, Qi Zhang, Xiaoyu Li, Yu Guo, Jue Wang, and Fei Wang. Uv volumes for real-time rendering of editable free-view human performance. In Proc. of CVPR, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.436, + 0.469, + 0.49 + ], + "angle": 0, + "content": "[7] Chen Geng, Sida Peng, Zhen Xu, Hujun Bao, and Xiaowei Zhou. Learning neural volumetric representations of dynamic humans in minutes. In Proc. of CVPR, 2023. 2, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.494, + 0.469, + 0.549 + ], + "angle": 0, + "content": "[8] Chen Guo, Tianjian Jiang, Xu Chen, Jie Song, and Otmar Hilliges. Vid2 avatar: 3d avatar reconstruction from videos in the wild via self-supervised scene decomposition. In Proc. of CVPR, 2023. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.552, + 0.469, + 0.594 + ], + "angle": 0, + "content": "[9] N. Hasler, C. Stoll, M. Sunkel, B. Rosenhahn, and H.-P. Seidel. A Statistical Model of Human Pose and Body Shape. Computer Graphics Forum, 28:337-346, 2009. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.597, + 0.471, + 0.637 + ], + "angle": 0, + "content": "[10] Shoukang Hu and Ziwei Liu. Gauhuman: Articulated gaussian splatting from monocular human videos. In Proc. of CVPR, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.641, + 0.47, + 0.709 + ], + "angle": 0, + "content": "[11] Rohit Jena, Ganesh Subramanian Iyer, Siddharth Choudhary, Brandon Smith, Pratik Chaudhari, and James Gee. Splatarmor: Articulated gaussian splatting for animatable humans from monocular rgb videos. arXiv preprint arXiv:2311.10812, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.713, + 0.47, + 0.755 + ], + "angle": 0, + "content": "[12] Tianjian Jiang, Xu Chen, Jie Song, and Otmar Hilliges. Instantavatar: Learning avatars from monocular video in 60 seconds. In Proc. of CVPR, 2023. 1, 2, 3, 4, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.758, + 0.469, + 0.799 + ], + "angle": 0, + "content": "[13] Wei Jiang, Kwang Moo Yi, Golnoosh Samei, Oncel Tuzel, and Anurag Ranjan. Neuman: Neural human radiance field from a single video. In Proc. of ECCV, 2022. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.802, + 0.469, + 0.856 + ], + "angle": 0, + "content": "[14] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics, 42 (4), 2023. 1, 2, 3, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[15] Martin Kilian, Niloy J. Mitra, and Helmut Pottmann. Geometric modeling in shape space. ACM Transactions on Graphics (SIGGRAPH), 26(3), 2007. 2, 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.135 + ], + "angle": 0, + "content": "[16] Muhammed Kocabas, Jen-Hao Rick Chang, James Gabriel, Oncel Tuzel, and Anurag Ranjan. Hugs: Human gaussian splatting. In Proc. of CVPR, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.137, + 0.892, + 0.178 + ], + "angle": 0, + "content": "[17] Youngjoong Kwon, Lingjie Liu, Henry Fuchs, Marc Habermann, and Christian Theobalt. Deliffas: Deformable light fields for fast avatar synthesis. Proc. of NeurIPS, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.181, + 0.892, + 0.222 + ], + "angle": 0, + "content": "[18] Jiahui Lei, Yufu Wang, Georgios Pavlakos, Lingjie Liu, and Kostas Daniilidis. Gart: Gaussian articulated template models. In Proc. of CVPR, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.224, + 0.892, + 0.266 + ], + "angle": 0, + "content": "[19] Ruilong Li, Shan Yang, David A. Ross, and Angjoo Kanazawa. Ai choreographer: Music conditioned 3d dance generation with aist++. In Proc. of ICCV, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.268, + 0.892, + 0.323 + ], + "angle": 0, + "content": "[20] Ruilong Li, Julian Tanke, Minh Vo, Michael Zollhoefer, Jürgen Gall, Angjoo Kanazawa, and Christoph Lassner. Tava: Template-free animatable volumetric actors. In Proc. of ECCV, 2022. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.326, + 0.892, + 0.379 + ], + "angle": 0, + "content": "[21] Zhe Li, Zerong Zheng, Lizhen Wang, and Yebin Liu. Animatable gaussians: Learning pose-dependent gaussian maps for high-fidelity human avatar modeling. In Proc. of CVPR, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.383, + 0.892, + 0.438 + ], + "angle": 0, + "content": "[22] Lingjie Liu, Marc Habermann, Viktor Rudnev, Kripasindhu Sarkar, Jiatao Gu, and Christian Theobalt. Neural actor: Neural free-view synthesis of human actors with pose control. ACM Trans. Graph. (ACM SIGGRAPH Asia), 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.441, + 0.892, + 0.482 + ], + "angle": 0, + "content": "[23] Yang Liu, Xiang Huang, Minghan Qin, Qinwei Lin, and Haoqian Wang. Animatable 3d gaussian: Fast and high-quality reconstruction of multiple human avatars, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.485, + 0.892, + 0.539 + ], + "angle": 0, + "content": "[24] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multiperson linear model. ACM Transactions Graphics, 34(6), 2015. 3, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.542, + 0.892, + 0.583 + ], + "angle": 0, + "content": "[25] Naureen Mahmood, Nima Ghorbani, Nikolaus F. Troje, Gerard Pons-Moll, and Michael J. Black. AMASS: Archive of motion capture as surface shapes. In Proc. of ICCV, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.586, + 0.892, + 0.626 + ], + "angle": 0, + "content": "[26] Marko Mihajlovic, Yan Zhang, Michael J. Black, and Siyu Tang. LEAP: Learning articulated occupancy of people. In Proc. of CVPR, 2021. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.629, + 0.892, + 0.685 + ], + "angle": 0, + "content": "[27] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proc. of ECCV, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.687, + 0.892, + 0.742 + ], + "angle": 0, + "content": "[28] Arthur Moreau, Jifei Song, Helisa Dhamo, Richard Shaw, Yiren Zhou, and Eduardo Pérez-Pellitero. Human gaussian splatting: Real-time rendering of animatable avatars. In Proc. of CVPR, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.745, + 0.892, + 0.799 + ], + "angle": 0, + "content": "[29] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions Graphics, 41(4), 2022. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.802, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[30] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proc. of CVPR, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[31] Atsuhiro Noguchi, Xiao Sun, Stephen Lin, and Tatsuya Harada. Neural articulated radiance field. In Proc. of ICCV, 2021. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5028" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.146 + ], + "angle": 0, + "content": "[32] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proc. of ICCV, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.148, + 0.47, + 0.189 + ], + "angle": 0, + "content": "[33] Ahmed A. A. Osman, Timo Bolkart, and Michael J. Black. Star: Sparse trained articulated human body regressor. In Proc. of ECCV, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.19, + 0.469, + 0.232 + ], + "angle": 0, + "content": "[34] Georgios Pavlakos, Luyang Zhu, Xiaowei Zhou, and Kostas Daniilidis. Learning to estimate 3d human pose and shape from a single color image. In Proc. of CVPR, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.233, + 0.469, + 0.287 + ], + "angle": 0, + "content": "[35] Sida Peng, Junting Dong, Qianqian Wang, Shangzhan Zhang, Qing Shuai, Xiaowei Zhou, and Hujun Bao. Animatable neural radiance fields for modeling dynamic human bodies. In Proc. of ICCV, 2021. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.288, + 0.469, + 0.342 + ], + "angle": 0, + "content": "[36] Songyou Peng, Chiyu \"Max\" Jiang, Yiyi Liao, Michael Niemeyer, Marc Pollefeys, and Andreas Geiger. Shape as points: A differentiable poisson solver. In Proc. of NeurIPS, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.344, + 0.47, + 0.412 + ], + "angle": 0, + "content": "[37] Sida Peng, Yuanqing Zhang, Yinghao Xu, Qianqian Wang, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Neural body: Implicit neural representations with structured latent codes for novel view synthesis of dynamic humans. In Proc. of CVPR, 2021. 1, 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.413, + 0.469, + 0.468 + ], + "angle": 0, + "content": "[38] Sida Peng, Shangzhan Zhang, Zhen Xu, Chen Geng, Boyi Jiang, Hujun Bao, and Xiaowei Zhou. Animatable neural implicit surfaces for creating avatars from videos. ArXiv, abs/2203.08133, 2022. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.469, + 0.469, + 0.51 + ], + "angle": 0, + "content": "[39] Sida Peng, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Representing volumetric videos as dynamic mlp maps. In Proc. of CVPR, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.511, + 0.47, + 0.551 + ], + "angle": 0, + "content": "[40] Sergey Prokudin, Michael J. Black, and Javier Romero. SMPLpix: Neural avatars from 3D human models. In Proc. of WACV, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.553, + 0.469, + 0.593 + ], + "angle": 0, + "content": "[41] Sergey Prokudin, Qianli Ma, Maxime Raafat, Julien Valentin, and Siyu Tang. Dynamic point fields. In Proc. of ICCV, 2023. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.595, + 0.469, + 0.636 + ], + "angle": 0, + "content": "[42] Amit Raj, Julian Tanke, James Hays, Minh Vo, Carsten Stoll, and Christoph Lassner. Anr-articulated neural rendering for virtual avatars. In Proc. of CVPR, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.637, + 0.469, + 0.705 + ], + "angle": 0, + "content": "[43] Christian Reiser, Richard Szeliski, Dor Verbin, Pratul P. Srinivasan, Ben Mildenhall, Andreas Geiger, Jonathan T. Barron, and Peter Hedman. Merf: Memory-efficient radiance fields for real-time view synthesis in unbounded scenes. ACM TOG, 42(4), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.707, + 0.469, + 0.747 + ], + "angle": 0, + "content": "[44] Darius Rückert, Linus Franke, and Marc Stamminger. Adop: Approximate differentiable one-pixel point rendering. ACM Transactions on Graphics, 41(4), 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.748, + 0.469, + 0.789 + ], + "angle": 0, + "content": "[45] Shunsuke Saito, Jinlong Yang, Qianli Ma, and Michael J. Black. SCANimate: Weakly supervised learning of skinned clothed avatar networks. In Proc. of CVPR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.469, + 0.843 + ], + "angle": 0, + "content": "[46] Sara Fridovich-Keil and Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proc. of CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[47] Vincent Sitzmann, Semon Rezchikov, William T. Freeman, Joshua B. Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. In Proc. of NeurIPS, 2021. 1" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[48] Shih-Yang Su, Frank Yu, Michael Zollhoefer, and Helge Rhodin. A-neRF: Articulated neural radiance fields for learning human shape, appearance, and pose. In Proc. of NeurIPS, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[49] Shih-Yang Su, Timur Bagautdinov, and Helge Rhodin. Npc: Neural point characters from video. In Proc. of ICCV, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.892, + 0.233 + ], + "angle": 0, + "content": "[50] Mohammed Suhail, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Generalizable patch-based neural rendering. In Proc. of ECCV, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.235, + 0.892, + 0.276 + ], + "angle": 0, + "content": "[51] Mohammed Suhail1, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Light field neural rendering. In Proc. of CVPR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.892, + 0.319 + ], + "angle": 0, + "content": "[52] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proc. of CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.321, + 0.892, + 0.375 + ], + "angle": 0, + "content": "[53] Huan Wang, Jian Ren, Zeng Huang, Kyle Olszewski, Mengei Chai, Yun Fu, and Sergey Tulyakov. R21: Distilling neural radiance field to neural light field for efficient novel view synthesis. In Proc. of ECCV, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.377, + 0.892, + 0.433 + ], + "angle": 0, + "content": "[54] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proc. of CVPR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.435, + 0.892, + 0.489 + ], + "angle": 0, + "content": "[55] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. In Proc. of NeurIPS, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.491, + 0.892, + 0.545 + ], + "angle": 0, + "content": "[56] Shaofei Wang, Marko Mihajlovic, Qianli Ma, Andreas Geiger, and Siyu Tang. Metaatrix: Learning animatable clothed human models from few depth images. In Proc. of NeurIPS, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.547, + 0.892, + 0.588 + ], + "angle": 0, + "content": "[57] Shaofei Wang, Katja Schwarz, Andreas Geiger, and Siyu Tang. Arah: Animatable volume rendering of articulated human sdfs. In Proc. of ECCV, 2022. 1, 2, 3, 4, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.59, + 0.892, + 0.645 + ], + "angle": 0, + "content": "[58] Chung-Yi Weng, Brian Curless, Pratul P. Srinivasan, Jonathan T. Barron, and Ira Kemelmacher-Shlizerman. Humaner: Free-viewpoint rendering of moving people from monocular video. In Proc. of CVPR, 2022. 1, 2, 3, 4, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.647, + 0.892, + 0.702 + ], + "angle": 0, + "content": "[59] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Wang Xinggang. 4d gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.704, + 0.892, + 0.758 + ], + "angle": 0, + "content": "[60] Hongyi Xu, Eduard Gabriel Bazavan, Andrei Zanfir, William T. Freeman, Rahul Sukthankar, and Cristian Sminchisescu. Ghum & ghuml: Generative 3d human shape and articulated pose models. In Proc. of CVPR, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.76, + 0.892, + 0.814 + ], + "angle": 0, + "content": "[61] Hongyi Xu, Thiemo Alldieck, and Cristian Sminchisescu. H-neRF: Neural radiance fields for rendering and temporal reconstruction of humans in motion. In Proc. of NeurIPS, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.817, + 0.892, + 0.87 + ], + "angle": 0, + "content": "[62] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proc. of CVPR, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.903 + ], + "angle": 0, + "content": "[63] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "5029" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "high-fidelity monocular dynamic scene reconstruction. arXiv preprint arXiv:2309.13101, 2023. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[64] Zeyu Yang, Hongye Yang, Zijie Pan, Xiatian Zhu, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv 2310.10642, 2023. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.469, + 0.233 + ], + "angle": 0, + "content": "[65] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Proc. of NeurIPS, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.235, + 0.471, + 0.275 + ], + "angle": 0, + "content": "[66] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. In Proc. of NeurIPS, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.278, + 0.469, + 0.332 + ], + "angle": 0, + "content": "[67] Lior Yariv, Peter Hedman, Christian Reiser, Dor Verbin, Pratul P. Srinivasan, Richard Szeliski, Jonathan T. Barron, and Ben Mildenhall. Bakedsdf: Meshing neural sdfs for real-time view synthesis. In Proc. of SIGGRAPH, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.334, + 0.469, + 0.374 + ], + "angle": 0, + "content": "[68] Keyang Ye, Tianjia Shao, and Kun Zhou. Animatable 3d gaussians for high-fidelity synthesis of human motions, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.377, + 0.469, + 0.418 + ], + "angle": 0, + "content": "[69] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. PlenOctrees for real-time rendering of neural radiance fields. In Proc. of ICCV, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.42, + 0.469, + 0.461 + ], + "angle": 0, + "content": "[70] Zhengming Yu, Wei Cheng, xian Liu, Wayne Wu, and KwanYee Lin. MonoHuman: Animatable human neural field from monocular video. In Proc. of CVPR, 2023. 1, 2, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.463, + 0.469, + 0.517 + ], + "angle": 0, + "content": "[71] Qiang Zhang, Seung-Hwan Baek, Szymon Rusinkiewicz, and Felix Heide. Differentiable point-based radiance fields for efficient view synthesis. In SIGGRAPH Asia Conference Proceedings, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.52, + 0.469, + 0.588 + ], + "angle": 0, + "content": "[72] Fuqiang Zhao, Yuheng Jiang, Kaixin Yao, Jiakai Zhang, Liao Wang, Haizhao Dai, Yuhui Zhong, Yingliang Zhang, Minye Wu, Lan Xu, and Jingyi Yu. Human performance modeling and rendering via neural animated mesh. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 41(6), 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.59, + 0.469, + 0.643 + ], + "angle": 0, + "content": "[73] Yufeng Zheng, Wang Yifan, Gordon Wetzstein, Michael J. Black, and Otmar Hilliges. Pointavatar: Deformable point-based head avatars from videos. In Proc. of ECCV, 2023. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.646, + 0.469, + 0.687 + ], + "angle": 0, + "content": "[74] Zerong Zheng, Han Huang, Tao Yu, Hongwen Zhang, Yandong Guo, and Yebin Liu. Structured local radiance fields for human avatar modeling. In Proc. of CVPR, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.689, + 0.469, + 0.743 + ], + "angle": 0, + "content": "[75] Wojciech Zielonka, Timur Bagautdinov, Shunsuke Saito, Michael Zollhöfer, Justus Thies, and Javier Romero. Drivable 3d gaussian avatars. arXiv preprint arXiv:2311.08581, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.469, + 0.787 + ], + "angle": 0, + "content": "[76] M. Zwicker, H. Pfister, J. van Baar, and M. Gross. Ewa volume splatting. In Proceedings Visualization, 2001. VIS '01., pages 29-538, 2001. 4" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.787 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "5030" + } + ] +] \ No newline at end of file diff --git a/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/0f8abe9e-31c6-4dc4-9520-66dabe1eb0cf_origin.pdf b/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/0f8abe9e-31c6-4dc4-9520-66dabe1eb0cf_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f19f36bc8c13a066bca4b19283b2da03fda9236a --- /dev/null +++ b/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/0f8abe9e-31c6-4dc4-9520-66dabe1eb0cf_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7394b05345a6f88256c20a43992aed7aaccf15a6f647b79ccc8635245d3e17d +size 1274857 diff --git a/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/full.md b/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/full.md new file mode 100644 index 0000000000000000000000000000000000000000..fb5034dd84961bdaa66337ef76865425e73f4a80 --- /dev/null +++ b/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/full.md @@ -0,0 +1,331 @@ +# 3DGS-Avatar: Animatable Avatars via Deformable 3D Gaussian Splatting + +Zhiyin Qian $^{1}$ Shaofei Wang $^{1,2,3}$ Marko Mihajlovic $^{1}$ Andreas Geiger $^{2,3}$ Siyu Tang $^{1}$ $^{1}$ ETH Zürich $^{2}$ University of Tübingen $^{3}$ Tübingen AI Center + +![](images/20265e84ed1b0eeb165923bfa7b725cf8fab4493209f38f90668c76390df00c6.jpg) +Figure 1. 3DGS-Avatar. We develop an efficient method for creating animatable avatars from monocular videos, leveraging 3D Gaussian Splatting [14]. Given a short sequence of dynamic human with a tracked skeleton and foreground masks, our method creates an avatar within 30 minutes on a single GPU, supports animation and novel view synthesis at over 50 FPS, and achieves comparable or better rendering quality to the state-of-the-art [57, 58] that requires over 8 GPU days to train, takes several seconds to render a single image, and relies on pre-training on clothed human scans [57]. + +# Abstract + +We introduce an approach that creates animatable human avatars from monocular videos using 3D Gaussian Splatting (3DGS). Existing methods based on neural radiance fields (NeRFs) achieve high-quality novel-view/novelpose image synthesis but often require days of training, and are extremely slow at inference time. Recently, the community has explored fast grid structures for efficient training of clothed avatars. Albeit being extremely fast at training, these methods can barely achieve an interactive rendering frame rate with around 15 FPS. In this paper, we use 3D Gaussian Splatting and learn a non-rigid deformation network to reconstruct animatable clothed human avatars that can be trained within 30 minutes and rendered at real-time frame rates $(50 + FPS)$ . Given the explicit nature of our representation, we further introduce as-isometric-as-possible regularizations on both the Gaussian mean vectors and the covariance matrices, enhancing the generalization of our model on highly articulated unseen poses. Experimental results show that our method achieves comparable and even better performance compared to state-of-the-art approaches on animatable avatar creation from a monocular input, while being $400x$ and $250x$ faster in training and inference, respectively. Please see our project page at https://neuralbodies.github.io/3DGS-Avatar. + +# 1. Introduction + +Reconstructing clothed human avatars from image inputs presents a significant challenge in computer vision, yet holds immense importance due to its applications in virtual reality, gaming, and e-commerce. Traditional methods often rely on dense, synchronized multi-view inputs, which may not be readily available in more practical scenarios. Recent advances in implicit neural fields [27, 30, 32, 36, 47, 50, 51, 53, 55, 65, 66] have enabled high-quality reconstruction of geometry [8, 38, 57, 61] and appearance [13, 20, 22, 31, 35, 37, 42, 58, 70] of clothed human bodies from sparse multi-view or monocular videos. Animation of such reconstructed clothed human bodies is also possible by learning the geometry and appearance representations in a predefined canonical pose [13, 20, 35, 57, 58, 70]. + +To achieve state-of-the-art rendering quality, existing methods rely on training a neural radiance field (NeRF) [27] combined with either explicit body articulation [8, 12, 13, 20, 35, 38, 57, 58, 70] or conditioning the NeRF on human body related encodings [31, 37, 48, 61]. They often employ large multi-layer perceptrons (MLPs) to model the neural radiance field, which are computationally demanding, leading to prolonged training (days) and inference (seconds) time. This computational expense poses a significant challenge for practical applications of these state-of-the-art methods in real-time applications. + +With recent advances in efficient learning of implicit + +neural fields, training time of NeRFs has been reduced to minutes [3, 29, 46, 52?]. There are also works targeting fast inference of pretrained NeRFs [43, 67, 69]. Inspired by these developments, several avatar reconstruction methods have been tailored to fast training [7, 12] or fast inference [6, 17, 39]. However, to the best of our knowledge, there currently exists no method that simultaneously achieves both fast training and real-time inference for animatable avatar reconstruction from just monocular videos. + +Point-based rendering [44, 49, 62, 71, 73? , 74] has emerged as an efficient alternative to NeRFs for fast inference. With the recently proposed 3D Gaussian Splatting (3DGS) [14], it is possible to achieve state-of-the-art rendering quality using only a fraction of NeRFs' inference time and comparatively fast training for static scene reconstruction. + +Leveraging the capabilities of 3DGS, we demonstrate its application in modeling animatable clothed avatars using monocular videos. Our approach effectively integrates rigid human articulation with a non-rigid deformation field within the 3DGS framework. We use a small multi-layer perceptron (MLP) to decode color. This MLP is designed to be responsive to local non-rigid deformations and dynamic lighting conditions, ensuring a more realistic and responsive rendering of the avatar's appearance. Furthermore, we apply as-isometric-as-possible regularizations [15, 41] to both the Gaussian mean vectors and the covariance matrices, which helps maintain the geometric consistency and realistic deformation of the avatar, particularly in dynamic and varied poses. + +Our experimental results show that our method is comparable to or better than current state-of-the-art [57, 58] in animatable avatar creation from monocular inputs, achieving training speed 400 times faster and inference speed 250 times quicker. Compared to methods that focus on fast training [7, 12], our method, despite being slower in training, can model pose-dependent non-rigid deformation and produce significantly better rendering quality, while being 3 times faster in terms of rendering. We provide an overview of the comparison to major prior works in Tab. 1. In summary, our work makes the following contributions: + +- We introduce 3D Gaussian Splatting to animatable human avatars reconstruction from monocular videos. +- We develop a simple yet effective deformation network as well as regularization terms that effectively drive 3D Gaussian Splats to handle highly articulated and out-of-distribution poses. +- Our method is the first, to our knowledge, to simultaneously deliver high-quality rendering, model pose-dependent non-rigid deformation, generalize effectively to unseen poses, and achieve fast training (less than 30 minutes) and real-time rendering speed $(50+\mathrm{FPS})$ . + +![](images/e9fda64a11f5021ef1f0f1249f67708d0c44f3844d225ab1c07b86eeae577d61.jpg) +Table 1. Comparison to SoTA. Instant-NVR [7] and InstantA-vatar [12] achieve instant training within 5 minutes. For real-time rendering, we require a frame rate over 30 FPS. Note that while UV-Volumes [6] claims real-time freeview rendering, they only achieve 14 FPS on novel pose synthesis due to the slow generation of their UV Volume. + +# 2. Related Works + +Neural rendering for clothed human avatars. Since the seminal work of Neural Radiance Fields (NeRF) [27], there has been a surge of research on neural rendering for clothed human avatars. The majority of the works focus on either learning a NeRF conditioned on human body related encodings [31, 48, 61], or learning a canonical NeRF representation and warp camera rays from the observation space to the canonical space to query radiance and density values from the canonical NeRF [8, 12, 13, 20, 35, 38, 57, 58, 70]. Most of these works rely on large multi-layer perceptrons (MLPs) to model the underlying neural radiance field, which are computationally expensive, resulting in prolonged training (days) and inference (seconds) time. + +With recent advances in accelerated data structures for neural fields, there has been several works targeting fast inference and fast training of NeRFs for clothed humans. [12] proposes to use iNGP [29] as the underlying representation for articulated NeRFs, which enables fast training (less than 5 minutes) and interactive rendering speed (15 FPS) but ignores pose-dependent non-rigid deformations. [7] also utilizes iNGP and represents non-rigid deformations in the UV space, which enables fast training and modeling of pose-dependent non-rigid deformations. However, as we will show in our experiments, [7]'s parametrization of non-rigid deformations result in blurry renderings. [6] proposes to generate a pose-dependent UV volume for efficient free-view synthesis. However, their UV-volume generation process is slow (20 FPS), making novel pose synthesis less efficient (only 14 FPS). [17] also employs UV-based rendering + +to achieve real-time rendering of dynamic clothed humans, but only works on dense multi-view inputs. Extending [69], [54, 72] applied Fourier transform for compressing human performance capture data, albeit with limitations on dense multi-view data (60-80 views) and non-generalizability of the Fourier basis representation to unseen poses beyond the training dataset. In contrast to all these works, our method achieves state-of-the-art rendering quality and speed with less than 30 minutes of training time from a single monocular video input. + +Dynamic 3D gaussians. Point-based rendering [40, 44, 49, 62, 71, 73, 74] has also been shown to be an efficient alternative to NeRFs for fast inference and training. Extending point cloud to 3D Gaussians, 3D Gaussian Splatting (3DGS) [14] models the rendering process as splatting a set of 3D Gaussians onto image plane via alpha blending, achieving state-of-the-art rendering quality with real-time inference speed and fast training given multi-view inputs. + +Given the great performance on both quality and speed of 3DGS, a rich set of works has further explored the 3D Gaussian representation for dynamic scene reconstruction. [14] proposed to optimize the position and shape of each 3D Gaussian on a frame-by-frame basis and simultaneously performed 6-DOF dense tracking for free. Their model size, however, increases with the temporal dimension. [59, 63] maintain a single set of 3D Gaussians in a canonical space and deform them to each frame via learning a time-dependent deformation field, producing state-of-the-art results in terms of both rendering quality and speed. [64] augments 3D Gaussians with temporal dimension into 4D Gaussian primitives to approximate the underlying spatiotemporal 4D volume of the dynamic scene. While such methods show promising results, they are only applicable to either synthetic datasets with fast camera movement and slow object motion or forward-facing real scenes with limited object movements, thus unable to handle the immense displacement of the articulated human body. To address this problem, our approach utilizes a statistical human body model [24] for articulation and applies regularization to reduce the overfitting of the deformation field. + +Concurrent works. Concurrent with our method, many recent works also seek to combine 3DGS with human articulation prior for avatar reconstruction. We provide a comparison of our approach to concurrent works in Tab. 2. D3GA [75] proposed to embed 3D Gaussians in tetrahedral cages and utilize cage deformations for drivable avatar animation. However, they use dense calibrated multi-view videos as input and require an additional 3D scan to generate the tetrahedral mesh template. Li et al. [21] focused on generating avatars with a detailed appearance from multiview videos by post-processing radiance field renderings with 2D CNNs, which limits their rendering speed. Along with [11, 28], these works fail to achieve fast training with + +![](images/0c5bb30f7863eb054a4a8bf0a42e08f62a298dbc2cbb1f043fea25d713d8c2eb.jpg) +Table 2. Comparison to Concurrent Works. + +relatively complex pipelines. Similar to our approach, Ye et al. [68] deforms 3D Gaussians in canonical space via pose-dependent deformation and rigid articulation, but they still require 2 hours for training and do not show results on monocular inputs. HUGS [16] learns a background model along with the animatable human avatar, but they fail to take pose-dependent cloth deformation into account. Several other works [10, 18, 23] also neglect pose-dependent cloth deformation to achieve even faster training (in 5 minutes) and rendering $(150 + \mathrm{FPS})$ . We argue that our method strikes a good balance between quality and speed compared to concurrent works, as being the only method simultaneously achieving the properties listed in Tab. 2. + +# 3. Preliminaries + +Linear Blend Skinning. To model human articulations, a widely adopted paradigm is to represent geometry and appearance in a shared canonical space [8, 12, 13, 20, 35, 38, 57, 58] and use Linear Blend Skinning (LBS) [2, 9, 24, 33, 34, 60] to deform the parametric human body under arbitrary poses. Given a point $\mathbf{x}_c$ in canonical space, the LBS function takes a set of rigid bone transformations $\{\mathbf{B}_b\}_{b=1}^B$ and computes its correspondence $\mathbf{x}_o$ in the observation space: + +$$ +\mathbf {x} _ {\mathbf {o}} = L B S _ {\sigma_ {w}} \left(\mathbf {x} _ {c}; \left\{\mathbf {B} _ {b} \right\}\right) \tag {1} +$$ + +Assuming an underlying SMPL model, we use a total of $B = 24$ bone transformations, each represented by a $4 \times 4$ rotation-translation matrix, which are then linearly blended via a set of skinning weights $\mathbf{w} \in [0,1]^B$ , s.t. $\sum_{b=1}^{B} \mathbf{w}_b = 1$ , modeled by a coordinate-based neural skinning field $f_{\sigma_w}(\mathbf{x}_c) \in [4,5,26,45,56]$ . The forward linear blend skin + +ning function can thus be formulated as: + +$$ +\mathbf {x} _ {o} = L B S _ {\sigma_ {w}} \left(\mathbf {x} _ {c}; \{\mathbf {B} _ {b} \}\right) = \sum_ {b = 1} ^ {B} f _ {\sigma_ {w}} \left(\mathbf {x} _ {c}\right) _ {b} \mathbf {B} _ {b} \mathbf {x} _ {c} \quad (2) +$$ + +Compared to prior works that search canonical correspondences of points in observation space [12, 57, 58], our method requires no inverse skimming which is typically difficult to compute and often leads to multiple solutions [4, 5]. A similar technique has been employed in [73] for face avatar modeling. + +3D Gaussian Splatting. 3DGS [14] utilizes a set of 3D Gaussian primitives $\{\mathcal{G}\}$ as static scene representation which can be rendered in real-time via differentiable rasterization. Each 3D Gaussian $\mathcal{G}$ is defined by its mean $\mathbf{x}$ , covariance $\boldsymbol{\Sigma}$ , opacity $\alpha$ and view-dependent color represented by spherical harmonics coefficients $\mathbf{f}$ . To ensure positive semi-definiteness, the covariance matrix is represented by a scaling matrix $\mathbf{S}$ and rotation matrix $\mathbf{R}$ . In practice, we store the diagonal vector $\mathbf{s} \in \mathbb{R}^3$ of the scaling matrix and a quaternion vector $\mathbf{q} \in \mathbb{R}^4$ to represent rotation, which can be trivially converted to a valid covariance matrix. + +The 3D Gaussians are projected to the 2D image plane during the rendering process and accumulated via alpha blending. Given a viewing transformation $\mathbf{W}$ and the Jacobian of the affine approximation of the projective transformation $\mathbf{J}$ , the 2D covariance matrix in camera coordinate [76] is given by $\boldsymbol{\Sigma}^{\prime} = (\mathbf{J}\mathbf{W}\boldsymbol{\Sigma}\mathbf{W}^{T}\mathbf{J}^{T})_{1:2,1:2}$ . The pixel color $C$ is thus computed by blending 3D Gaussian splats that overlap at the given pixel, sorted according to their depth: + +$$ +C = \sum_ {i} \left(\alpha_ {i} ^ {\prime} \prod_ {j = 1} ^ {i - 1} \left(1 - \alpha_ {j} ^ {\prime}\right)\right) c _ {i} \tag {3} +$$ + +where $\alpha_{i}^{\prime}$ denotes the learned opacity $\alpha_{i}$ weighted by the probability density of $i$ -th projected 2D Gaussian at the target pixel location. $c$ denotes the view-dependent color computed from stored SH coefficients $\mathbf{f}$ . + +The 3D Gaussians $\{\mathcal{G}\}$ are optimized via a photometric loss. During optimization, 3DGS adaptively controls the number of 3D Gaussians via periodic densification and pruning, achieving self-adaptive convergence to an optimal density distribution of 3D Gaussians that well represents the scene. + +# 4. Methods + +We illustrate the pipeline of our method in Fig. 2. The input to our method is a monocular video with a calibrated camera, fitted SMPL parameters, and foreground masks. Our method optimizes a set of 3D Gaussians in canonical space, which is then deformed to the observation space and rendered from the given camera. For a set of 3D Gaussians $\{\mathcal{G}^{(i)}\}_{i = 1}^{N}$ , we store the following properties at each point: + +position $\mathbf{x}$ , scaling factor $\mathbf{s}$ , rotation quaternion $\mathbf{q}$ , opacity $\alpha$ and a color feature vector $\mathbf{f}$ . We start by randomly sampling $N = 50k$ points on the canonical SMPL [24] mesh surface as initialization of canonical 3D Gaussians $\{\mathcal{G}_c\}$ . Inspired by HumanNeRF [58], we decompose the complex human deformation into a non-rigid part that encodes pose-dependent cloth deformation, and a rigid transformation controlled by the human skeleton. + +# 4.1. Pose-dependent Non-rigid Deformation + +We formulate the non-rigid deformation module as: + +$$ +\left\{\mathcal {G} _ {d} \right\} = \mathcal {F} _ {\theta_ {n r}} \left(\left\{\mathcal {G} _ {c} \right\}; \mathcal {Z} _ {p}\right) \tag {4} +$$ + +where $\{\mathcal{G}_d\}$ represents the non-rigidly deformed 3D Gaussians. $\theta_{nr}$ represents the learnable parameters of the nonrigid deformation module. $\mathcal{Z}_p$ is a latent code which encodes SMPL pose and shape $(\theta, \beta)$ using a lightweight hierarchical pose encoder [26]. Specifically, the deformation network $f_{\theta_{nr}}$ takes the canonical position $\mathbf{x}_c$ , the pose latent code $\mathcal{Z}_p$ as inputs and outputs the offsets of the Gaussian's position, scale, rotation, along with a feature vector $\mathbf{z}$ : + +$$ +\left(\delta \mathbf {x}, \delta \mathbf {s}, \delta \mathbf {q}, \mathbf {z}\right) = f _ {\theta_ {n r}} \left(\mathbf {x} _ {c}; \mathcal {Z} _ {p}\right) \tag {5} +$$ + +We use a multi-level hash grid [29] to encode 3D positions as spatial features, which are then concatenated with the pose latent code $\mathcal{Z}_p$ and fed into a shallow MLP with 2 hidden layers and a width of 128. The canonical Gaussian is deformed by: + +$$ +\mathbf {x} _ {d} = \mathbf {x} _ {c} + \delta \mathbf {x} \tag {6} +$$ + +$$ +\mathbf {s} _ {d} = \mathbf {s} _ {c} \cdot \exp (\delta \mathbf {s}) \tag {7} +$$ + +$$ +\mathbf {q} _ {d} = \mathbf {q} _ {c} \cdot [ 1, \delta q _ {1}, \delta q _ {2}, \delta q _ {3} ] \tag {8} +$$ + +note that the $\cdot$ operator on quaternions is equivalent to multiplying the two rotation matrices derived by the two quaternions. Since the quaternion $[1,0,0,0]$ corresponds to the identity rotation matrix, we have $\mathbf{q}_d = \mathbf{q}_c$ when $\delta \mathbf{q} = \mathbf{0}$ . + +# 4.2. Rigid Transformation + +We further transform the non-rigidly deformed 3D Gaussians $\{\mathcal{G}_d\}$ to the observation space via a rigid transformation module: + +$$ +\left\{\mathcal {G} _ {o} \right\} = \mathcal {F} _ {\theta_ {r}} \left(\left\{\mathcal {G} _ {d} \right\}; \left\{\mathbf {B} _ {\mathbf {b}} \right\} _ {b = 1} ^ {B}\right) \tag {9} +$$ + +where a skinning MLP $f_{\theta_r}$ is learned to predict skinning weights at the position $\mathbf{x}_d$ . We transform the position and the rotation matrix of 3D Gaussians via forward LBS: + +$$ +\mathbf {T} = \sum_ {b = 1} ^ {B} f _ {\theta_ {r}} \left(\mathbf {x} _ {d}\right) _ {b} \mathbf {B} _ {b} \tag {10} +$$ + +$$ +\mathbf {x} _ {o} = \mathbf {T} \mathbf {x} _ {d} \tag {11} +$$ + +$$ +\mathbf {R} _ {o} = \mathbf {T} _ {1: 3, 1: 3} \mathbf {R} _ {d} \tag {12} +$$ + +where $\mathbf{R}_d$ is the rotation matrix derived from the quaternion $\mathbf{q}_d$ . + +![](images/eb1865dac9c072592ad7c2480fdea1e4f45207fd2d69a729340adaaeb3763c4f.jpg) +Figure 2. Our framework for creating animatable avatars from monocular videos. We first initialize a set of 3D Gaussians in the canonical space via sampling points from a SMPL mesh. Each canonical Gaussian $\mathcal{G}_c$ goes through a non-rigid deformation module $\mathcal{F}_{\theta_{nr}}$ conditioned on an encoded pose vector $\mathcal{Z}_p$ (Sec. 4.1) to account for pose-dependent non-rigid cloth deformation. This module outputs a non-rigidly deformed 3D Gaussian $\mathcal{G}_d$ and a pose-dependent latent feature $\mathbf{z}$ . The non-rigidly deformed 3D Gaussian $\mathcal{G}_d$ is transformed to the observation space $\mathcal{G}_o$ (Sec. 4.2) via LBS with learned neural skinning $\mathcal{F}_{\theta_r}$ . The Gaussian feature $\mathbf{f}$ , the pose-dependent feature $\mathbf{z}$ , a per-frame latent code $\mathcal{Z}_c$ , and the ray direction $\mathbf{d}$ are propagated through a small MLP $\mathcal{F}_{\theta_c}$ to decode the view-dependent color $c$ for each 3D Gaussian. Finally, the observation space 3D Gaussians $\{\mathcal{G}_o\}$ and their respective color values are accumulated via differentiable Gaussian rasterization (Eq. (3)) to render the image. + +# 4.3. Color MLP + +Prior works [59, 63, 64] follow the convention of 3DGS [14], which stores spherical harmonics coefficients per 3D Gaussian to encode the view-dependent color. Treating the stored color feature $\mathbf{f}$ as spherical harmonics coefficients, the color of a 3D Gaussian can be computed by the dot product of the spherical harmonics basis and the learned coefficients: $c = \langle \pmb{\gamma}(\mathbf{d}),\mathbf{f}\rangle$ , where $\mathbf{d}$ represents the viewing direction, derived from the relative position of the 3D Gaussian wrt. the camera center and $\pmb{\gamma}$ denotes the spherical harmonics basis function. + +While conceptually simple, we argue that this approach does not suit our monocular setting. Since only one camera view is provided during training, the viewing direction in the world space is fixed, leading to poor generalization to unseen test views. Similar to [38], we use the inverse rigid transformation from Sec. 4.2 to canonicalize the viewing direction: $\hat{\mathbf{d}} = \mathbf{T}_{1:3,1:3}^{-1}\mathbf{d}$ , where $\mathbf{T}$ is the forward transformation matrix defined in Eq. (10). Theoretically, canonicalizing viewing direction also promotes consistency of the specular component of canonical 3D Gaussians under rigid transformations. + +On the other hand, we observe that the pixel color of the rendered clothed human avatar also largely depends on local deformation. Local fine wrinkles on clothes, for instance, would cause self-occlusion that heavily affects shading. Following [37], we also learn a per-frame latent code $\mathcal{Z}_c$ to compensate for different environment light effects across frames caused by the global movement of the subject. Hence, instead of learning spherical harmonic coefficients, we enhance color modeling by learning a neural network that takes per-Gaussian color feature vector $\mathbf{f} \in \mathbb{R}^{32}$ , local pose-dependent feature vector $\mathbf{z} \in \mathbb{R}^{16}$ from the non-rigid deformation network, per-frame latent code $\mathcal{Z}_c \in \mathbb{R}^{16}$ , and + +spherical harmonics basis of canonicalized viewing direction $\gamma (\dot{\mathbf{d}})$ with a degree of 3 as input and predicts the color of the 3D Gaussian: + +$$ +c = \mathcal {F} _ {\theta_ {c}} (\mathbf {f}, \mathbf {z}, \mathcal {Z} _ {c}, \boldsymbol {\gamma} (\hat {\mathbf {d}})) \tag {13} +$$ + +In practice, we find a tiny MLP with one 64-dimension hidden layer sufficient to model the appearance. Increasing the size of the MLP leads to overfitting and performance drop. + +# 4.4. Optimization + +We jointly optimize canonical 3D Gaussians $\{\mathcal{G}_c\}$ and the parameters $\theta_{nr},\theta_r,\theta_c$ of the non-rigid deformation network, the skinning network and the color network, respectively. + +Pose correction. SMPL [24] parameter fittings from images can be inaccurate. To address this, we additionally optimize the per-sequence shape parameter as well as per-frame translation, global rotation, and local joint rotations. We initialize these parameters $\theta_{p}$ with the given SMPL parameters and differentiably derive the bone transformations $\{\mathbf{B}_b\}$ as input to the network, enabling direct optimization via backpropagation. + +As-isometric-as-possible regularization. With monocular video as input, only one view of the human is visible in each frame, making it extremely hard to generalize to novel views and novel poses. Considering the sparsity of input, the non-rigid deformation network is highly underconstrained, resulting in noisy deformation from the canonical space to the observation space. Inspired by [41], we leverage the as-isometric-as-possible constraint [15] to restrict neighboring 3D Gaussian centers to preserve a similar distance after deformation. We further augment the con + +strain to Gaussian covariance matrices: + +$$ +\mathcal {L} _ {\text {i s o p o s}} = \sum_ {i = 1} ^ {N} \sum_ {j \in \mathcal {N} _ {k} (i)} \left| d \left(\mathbf {x} _ {c} ^ {(i)}, \mathbf {x} _ {c} ^ {(j)}\right) - d \left(\mathbf {x} _ {o} ^ {(i)}, \mathbf {x} _ {o} ^ {(j)}\right) \right| \tag {14} +$$ + +$$ +\mathcal {L} _ {i s o c o v} = \sum_ {i = 1} ^ {N} \sum_ {j \in \mathcal {N} _ {k} (i)} \left| d \left(\boldsymbol {\Sigma} _ {c} ^ {(i)}, \boldsymbol {\Sigma} _ {c} ^ {(j)}\right) - d \left(\boldsymbol {\Sigma} _ {o} ^ {(i)}, \boldsymbol {\Sigma} _ {o} ^ {(j)}\right) \right| \tag {15} +$$ + +where $N$ denotes the number of 3D Gaussians. $\mathcal{N}_k$ denotes the k-nearest neighbourhood, and we set $k$ to 5. We use L2-norm as our distance function $d(\cdot ,\cdot)$ + +Loss function. Our full loss function consists of a RGB loss $\mathcal{L}_{rgb}$ , a mask loss $\mathcal{L}_{mask}$ , a skinning weight regularization loss $\mathcal{L}_{skin}$ and the as-isometric-as-possible regularization loss for both position and covariance $\mathcal{L}_{ispos},\mathcal{L}_{isocov}$ . For further details of the loss definition and respective weights, please refer to the Supp.Mat. + +# 5. Experiments + +In this section, we first compare the proposed approach with recent state-of-the-art methods [7, 12, 37, 57, 58], demonstrating that our proposed approach achieves superior rendering quality in terms of LPIPS, which is more informative under monocular setting, while achieving fast training and real-time rendering speed, respectively $400\mathrm{x}$ and $250\mathrm{x}$ faster than the most competitive baseline [58]. We then systematically ablate each component of the proposed model, showing their effectiveness in better rendering quality. + +# 5.1. Evaluation Dataset + +ZJU-MoCap [37]. This is the major testbed for quantitative evaluation. We pick six sequences (377, 386, 387, 392, 393, 394) from the ZJU-MoCap dataset and follow the training/test split of HumanNeRF [58]. The motion of these sequences is repetitive and does not contain a sufficient number of poses for meaningful novel pose synthesis benchmarks. Thus we focus on evaluating novel view synthesis (PSNR/SSIM/LPIPS) and show qualitative results for animation on out-of-distribution poses. Note that LPIPS in all the tables are scaled up by 1000. + +PeopleSnapshot [1]. We also conduct experiments on 4 sequences of the PeopleSnapshot dataset, which includes monocular videos of people rotating in front of a camera. We follow the data split of InstantAvatar [12] and compare to [12] on novel pose synthesis. For fair comparison, we use the provided poses optimized by Anim-NeRF [35] and do not further optimize it during our training. + +# 5.2. Comparison with Baselines + +We compare our approach with NeuralBody [37], HumanNeRF [58], MonoHuman [70], ARAH [57] and Instant + +NVR [7] under monocular setup on ZJU-MoCap. The quantitative results are reported in Tab. 3. NeuralBody is underperforming compared to other approaches. Overall, our proposed approach produces comparable performance to ARAH on PSNR and SSIM, while significantly outperforming all the baselines on LPIPS. We argue that LPIPS is more informative compared to the other two metrics, as it is very difficult to reproduce exactly the ground-truth appearance for novel views due to the monocular setting and the stochastic nature of cloth deformations. Meanwhile, our method is also capable of fast training and renders at a real-time rendering frame rate, being 400 times faster for training (30 GPU minutes vs. 8 GPU days) and $250 - 500$ times faster for inference (50 FPS vs. 0.1 FPS for ARAH and 0.2 FPS for HumanNeRF). We also note that Instant-NVR trains on a refined version of ZJU-MoCap, which provides refined camera parameters, SMPL fittings, and more accurate instance masks with part-level annotation that is essential for running their method. Hence their metrics are not directly comparable to other methods in Tab. 3. We train our model on the refined dataset for a fair quantitative comparison, which clearly shows that our method outperforms Instant-NVR in most scenarios. + +Qualitative comparisons on novel view synthesis can be found in Fig. 3. We observe that our method preserves sharper details compared to ARAH and does not produce fluctuating artifacts as in HumanNeRF caused by noisy deformation fields. Instant-NVR produces an oversmooth appearance and tends to generate noisy limbs. Additionally, we animate our learned avatars with pose sequences from AMASS [25] and AIST++ [19], shown in the rightmost column of Fig. 3. This shows that our model could generalize to extreme out-of-distribution poses. + +For PeopleSnapshot, we report the quantitative comparison against InstantAvatar [12] in Tab. 4. Our approach significantly outperforms InstantAvatar on PSNR and LPIPS, while being more than $3\mathrm{x}$ faster during inference. + +# 5.3. Ablation Study + +We study the effect of various components of our method on the ZJU-MoCap dataset, including the color MLP, the as-isometric-as-possible regularization and the pose correction module. The average metrics over 6 sequences are reported in Tab. 5. We show that all proposed techniques are required to reach the optimal performance, best reflected by LPIPS which is the most informative metric for novel view synthesis evaluation under a monocular setup. + +We further show qualitative comparison on out-of-distribution poses in Fig. 4, which demonstrates that the as-isometric-as-possible loss helps to constrain the 3D Gaussians to comply with consistent movement during deformation, hence improving generalization on novel poses. Albeit marginally, each individual component contributes to a + +![](images/721a7f5d19b0b74ddebf9957d73031a24577f9c604fd435442873d5c25505bdd.jpg) +Figure 3. Qualitative Comparison on ZJU-MoCap [37]. We show the results for both novel view synthesis and novel pose animation of all sequences on ZJU-MoCap. Our method produces high-quality results that preserve cloth details even on out-of-distribution poses. + +Table 3. Quantitative Results on ZJU-MoCap [37]. We outperform both competitive baselines [57, 58] in terms of LPIPS while being two orders of magnitude faster in training and rendering. Cell color indicates best and second best. Instant-NVR [7] is trained and tested on a refined version of ZJU-MoCap, thus is not directly comparable to other baselines quantitatively. We train our model on the refined dataset for fair quantitative comparison to Instant-NVR and the metrics are reported in the last two rows of the table. + +
Subject:Metric:377386387392393394
GPU↓FPS↑PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
NeuralBody [37]12h229.110.967440.9530.540.967846.4327.000.951859.4730.100.964253.2728.610.959059.0529.100.959354.55
HumanNeRF [58]>8d0.230.410.974324.0633.200.975228.9928.180.963235.5831.040.970532.1228.310.960336.7230.310.964232.89
MonoHuman [70]4d0.129.120.972726.5832.940.969536.0427.930.960141.7629.500.963539.4527.640.956643.1729.150.959538.08
ARAH [57]8d0.130.850.980026.6033.500.978131.4028.490.965640.4332.020.974235.2828.770.964542.3029.460.963240.76
Ours0.5h5030.640.977420.8833.630.977325.7728.330.964234.2431.660.973030.1428.880.963535.2630.540.966131.21
Instant-NVR* [7]0.1h331.280.978925.3733.710.977032.8128.390.964045.9731.850.973039.4729.560.964146.1631.320.968040.63
Ours*0.5h5030.960.977819.8533.940.978424.7028.400.965632.9632.100.973929.2029.300.964534.0330.740.966231.00
+ +Table 4. Quantitative Results on PeopleSnapshot [1]. + +
Subject:female-3-casualfemale-4-casualmale-3-casualmale-4-casual
Metric:GPU↓FPS↑PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
InstantAvatar [12]5 min.1527.660.970921.0029.110.968316.7029.530.971615.5027.670.962630.7
Ours45 min.5030.570.958120.8633.160.967815.7434.280.972414.9230.220.965323.05
+ +![](images/0d9a9c3867c9690ac42a4f0d9f471dcd6a71d1b5a6955c5757ee0752e25168c7.jpg) +Full model + +![](images/7100c0043f46985ec827f25486fc998778278d7c8015634db068aab801420277.jpg) +Figure 4. Ablation Study on as-isometric-as-possible regularization, which removes the artifacts on highly articulated poses. + +![](images/f8a7ab9505428a03290576c0fda0d8b908c6d20231f0efeee27f050fcc5239cc.jpg) +w/o $\mathcal{L}_{isocov}$ , $\mathcal{L}_{isopos}$ + +![](images/71c45e1a0a787ec7f859bfd0e58f1ad509d5fdfd434a248ffc0d8de1a5b2ac0b.jpg) + +Table 5. Ablation Study on ZJU-MoCap [37]. The proposed model achieves the lowest LPIPS, demonstrating the effectiveness of all components. + +
Metric:PSNR↑SSIM↑LPIPS↓
Full model30.610.970329.58
w/o color MLP30.550.970031.24
w/o Lisocov30.610.970329.84
w/o Lisopos, Lisocov30.590.969930.25
w/o pose correction30.600.970329.87
+ +better novel-view rendering quality and particularly generates more plausible results with respect to novel pose animation. + +# 6. Conclusion + +In this paper, we present 3DGS-Avatar, one of the first methods that utilize the explicit representation of 3DGS + +for efficient reconstruction of clothed human avatars from monocular videos. Our method achieves photorealistic rendering, awareness of pose-dependent cloth deformation, generalization to unseen poses, fast training, and real-time rendering all at once. + +Experiments show that our method is comparable to or even better than the state-of-the-art methods in terms of rendering quality while being two orders of magnitude faster in both training and inference. Furthermore, we propose to replace spherical harmonics with a shallow MLP to decode 3D Gaussian color and regularize deformation with geometric constraints, both proved to be effective in enhancing rendering quality. We hope that our new representation could foster further research in fast, high-quality animatable clothed human avatar synthesis from a monocular view. + +Acknowledgement. SW and AG were supported by the ERC Starting Grant LEGO-3D (850533) and the DFG EXC number 2064/1 - project number 390727645. SW and ST acknowledge the SNSF grant 200021 204840. + +# References + +[1] Thiemo Alldieck, Marcus Magnor, Weipeng Xu, Christian Theobalt, and Gerard Pons-Moll. Video based reconstruction of 3d people models. In Proc. of CVPR, 2018. 6, 8 +[2] Dragomir Anguelov, Praveen Srinivasan, Daphne Koller, Sebastian Thrun, Jim Rodgers, and James Davis. Scape: shape completion and animation of people. ACM Transactions Graphics, 24, 2005. 3 +[3] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In Proc. of ECCV, 2022. 2 +[4] Xu Chen, Yufeng Zheng, Michael Black, Otmar Hilliges, and Andreas Geiger. Snarf: Differentiable forward skinning for animating non-rigid neural implicit shapes. In Proc. of ICCV, 2021. 3, 4 +[5] Xu Chen, Tianjian Jiang, Jie Song, Max Rietmann, Andreas Geiger, Michael J. Black, and Otmar Hilliges. Fast-snarf: A fast deformer for articulated neural fields. Pattern Analysis and Machine Intelligence (PAMI), 2023. 3, 4 +[6] Yue Chen, Xuan Wang, Xingyu Chen, Qi Zhang, Xiaoyu Li, Yu Guo, Jue Wang, and Fei Wang. Uv volumes for real-time rendering of editable free-view human performance. In Proc. of CVPR, 2023. 2 +[7] Chen Geng, Sida Peng, Zhen Xu, Hujun Bao, and Xiaowei Zhou. Learning neural volumetric representations of dynamic humans in minutes. In Proc. of CVPR, 2023. 2, 6, 8 +[8] Chen Guo, Tianjian Jiang, Xu Chen, Jie Song, and Otmar Hilliges. Vid2 avatar: 3d avatar reconstruction from videos in the wild via self-supervised scene decomposition. In Proc. of CVPR, 2023. 1, 2, 3 +[9] N. Hasler, C. Stoll, M. Sunkel, B. Rosenhahn, and H.-P. Seidel. A Statistical Model of Human Pose and Body Shape. Computer Graphics Forum, 28:337-346, 2009. 3 +[10] Shoukang Hu and Ziwei Liu. Gauhuman: Articulated gaussian splatting from monocular human videos. In Proc. of CVPR, 2024. 3 +[11] Rohit Jena, Ganesh Subramanian Iyer, Siddharth Choudhary, Brandon Smith, Pratik Chaudhari, and James Gee. Splatarmor: Articulated gaussian splatting for animatable humans from monocular rgb videos. arXiv preprint arXiv:2311.10812, 2023. 3 +[12] Tianjian Jiang, Xu Chen, Jie Song, and Otmar Hilliges. Instantavatar: Learning avatars from monocular video in 60 seconds. In Proc. of CVPR, 2023. 1, 2, 3, 4, 6, 8 +[13] Wei Jiang, Kwang Moo Yi, Golnoosh Samei, Oncel Tuzel, and Anurag Ranjan. Neuman: Neural human radiance field from a single video. In Proc. of ECCV, 2022. 1, 2, 3 +[14] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics, 42 (4), 2023. 1, 2, 3, 4, 5 +[15] Martin Kilian, Niloy J. Mitra, and Helmut Pottmann. Geometric modeling in shape space. ACM Transactions on Graphics (SIGGRAPH), 26(3), 2007. 2, 5 + +[16] Muhammed Kocabas, Jen-Hao Rick Chang, James Gabriel, Oncel Tuzel, and Anurag Ranjan. Hugs: Human gaussian splatting. In Proc. of CVPR, 2024. 3 +[17] Youngjoong Kwon, Lingjie Liu, Henry Fuchs, Marc Habermann, and Christian Theobalt. Deliffas: Deformable light fields for fast avatar synthesis. Proc. of NeurIPS, 2023. 2 +[18] Jiahui Lei, Yufu Wang, Georgios Pavlakos, Lingjie Liu, and Kostas Daniilidis. Gart: Gaussian articulated template models. In Proc. of CVPR, 2024. 3 +[19] Ruilong Li, Shan Yang, David A. Ross, and Angjoo Kanazawa. Ai choreographer: Music conditioned 3d dance generation with aist++. In Proc. of ICCV, 2021. 6 +[20] Ruilong Li, Julian Tanke, Minh Vo, Michael Zollhoefer, Jürgen Gall, Angjoo Kanazawa, and Christoph Lassner. Tava: Template-free animatable volumetric actors. In Proc. of ECCV, 2022. 1, 2, 3 +[21] Zhe Li, Zerong Zheng, Lizhen Wang, and Yebin Liu. Animatable gaussians: Learning pose-dependent gaussian maps for high-fidelity human avatar modeling. In Proc. of CVPR, 2024. 3 +[22] Lingjie Liu, Marc Habermann, Viktor Rudnev, Kripasindhu Sarkar, Jiatao Gu, and Christian Theobalt. Neural actor: Neural free-view synthesis of human actors with pose control. ACM Trans. Graph. (ACM SIGGRAPH Asia), 2021. 1 +[23] Yang Liu, Xiang Huang, Minghan Qin, Qinwei Lin, and Haoqian Wang. Animatable 3d gaussian: Fast and high-quality reconstruction of multiple human avatars, 2023. 3 +[24] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multiperson linear model. ACM Transactions Graphics, 34(6), 2015. 3, 4, 5 +[25] Naureen Mahmood, Nima Ghorbani, Nikolaus F. Troje, Gerard Pons-Moll, and Michael J. Black. AMASS: Archive of motion capture as surface shapes. In Proc. of ICCV, 2019. 6 +[26] Marko Mihajlovic, Yan Zhang, Michael J. Black, and Siyu Tang. LEAP: Learning articulated occupancy of people. In Proc. of CVPR, 2021. 3, 4 +[27] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proc. of ECCV, 2020. 1, 2 +[28] Arthur Moreau, Jifei Song, Helisa Dhamo, Richard Shaw, Yiren Zhou, and Eduardo Pérez-Pellitero. Human gaussian splatting: Real-time rendering of animatable avatars. In Proc. of CVPR, 2024. 3 +[29] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions Graphics, 41(4), 2022. 2, 4 +[30] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proc. of CVPR, 2020. 1 +[31] Atsuhiro Noguchi, Xiao Sun, Stephen Lin, and Tatsuya Harada. Neural articulated radiance field. In Proc. of ICCV, 2021. 1, 2 + +[32] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proc. of ICCV, 2021. 1 +[33] Ahmed A. A. Osman, Timo Bolkart, and Michael J. Black. Star: Sparse trained articulated human body regressor. In Proc. of ECCV, 2020. 3 +[34] Georgios Pavlakos, Luyang Zhu, Xiaowei Zhou, and Kostas Daniilidis. Learning to estimate 3d human pose and shape from a single color image. In Proc. of CVPR, 2018. 3 +[35] Sida Peng, Junting Dong, Qianqian Wang, Shangzhan Zhang, Qing Shuai, Xiaowei Zhou, and Hujun Bao. Animatable neural radiance fields for modeling dynamic human bodies. In Proc. of ICCV, 2021. 1, 2, 3, 6 +[36] Songyou Peng, Chiyu "Max" Jiang, Yiyi Liao, Michael Niemeyer, Marc Pollefeys, and Andreas Geiger. Shape as points: A differentiable poisson solver. In Proc. of NeurIPS, 2021. 1 +[37] Sida Peng, Yuanqing Zhang, Yinghao Xu, Qianqian Wang, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Neural body: Implicit neural representations with structured latent codes for novel view synthesis of dynamic humans. In Proc. of CVPR, 2021. 1, 2, 5, 6, 7, 8 +[38] Sida Peng, Shangzhan Zhang, Zhen Xu, Chen Geng, Boyi Jiang, Hujun Bao, and Xiaowei Zhou. Animatable neural implicit surfaces for creating avatars from videos. ArXiv, abs/2203.08133, 2022. 1, 2, 3, 5 +[39] Sida Peng, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Representing volumetric videos as dynamic mlp maps. In Proc. of CVPR, 2023. 2 +[40] Sergey Prokudin, Michael J. Black, and Javier Romero. SMPLpix: Neural avatars from 3D human models. In Proc. of WACV, 2021. 3 +[41] Sergey Prokudin, Qianli Ma, Maxime Raafat, Julien Valentin, and Siyu Tang. Dynamic point fields. In Proc. of ICCV, 2023. 2, 5 +[42] Amit Raj, Julian Tanke, James Hays, Minh Vo, Carsten Stoll, and Christoph Lassner. Anr-articulated neural rendering for virtual avatars. In Proc. of CVPR, 2021. 1 +[43] Christian Reiser, Richard Szeliski, Dor Verbin, Pratul P. Srinivasan, Ben Mildenhall, Andreas Geiger, Jonathan T. Barron, and Peter Hedman. Merf: Memory-efficient radiance fields for real-time view synthesis in unbounded scenes. ACM TOG, 42(4), 2023. 2 +[44] Darius Rückert, Linus Franke, and Marc Stamminger. Adop: Approximate differentiable one-pixel point rendering. ACM Transactions on Graphics, 41(4), 2022. 2, 3 +[45] Shunsuke Saito, Jinlong Yang, Qianli Ma, and Michael J. Black. SCANimate: Weakly supervised learning of skinned clothed avatar networks. In Proc. of CVPR, 2021. 3 +[46] Sara Fridovich-Keil and Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proc. of CVPR, 2022. 2 +[47] Vincent Sitzmann, Semon Rezchikov, William T. Freeman, Joshua B. Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. In Proc. of NeurIPS, 2021. 1 + +[48] Shih-Yang Su, Frank Yu, Michael Zollhoefer, and Helge Rhodin. A-neRF: Articulated neural radiance fields for learning human shape, appearance, and pose. In Proc. of NeurIPS, 2021. 1, 2 +[49] Shih-Yang Su, Timur Bagautdinov, and Helge Rhodin. Npc: Neural point characters from video. In Proc. of ICCV, 2023. 2, 3 +[50] Mohammed Suhail, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Generalizable patch-based neural rendering. In Proc. of ECCV, 2022. 1 +[51] Mohammed Suhail1, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Light field neural rendering. In Proc. of CVPR, 2022. 1 +[52] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proc. of CVPR, 2022. 2 +[53] Huan Wang, Jian Ren, Zeng Huang, Kyle Olszewski, Mengei Chai, Yun Fu, and Sergey Tulyakov. R21: Distilling neural radiance field to neural light field for efficient novel view synthesis. In Proc. of ECCV, 2022. 1 +[54] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proc. of CVPR, 2022. 3 +[55] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. In Proc. of NeurIPS, 2021. 1 +[56] Shaofei Wang, Marko Mihajlovic, Qianli Ma, Andreas Geiger, and Siyu Tang. Metaatrix: Learning animatable clothed human models from few depth images. In Proc. of NeurIPS, 2021. 3 +[57] Shaofei Wang, Katja Schwarz, Andreas Geiger, and Siyu Tang. Arah: Animatable volume rendering of articulated human sdfs. In Proc. of ECCV, 2022. 1, 2, 3, 4, 6, 8 +[58] Chung-Yi Weng, Brian Curless, Pratul P. Srinivasan, Jonathan T. Barron, and Ira Kemelmacher-Shlizerman. Humaner: Free-viewpoint rendering of moving people from monocular video. In Proc. of CVPR, 2022. 1, 2, 3, 4, 6, 8 +[59] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Wang Xinggang. 4d gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 3, 5 +[60] Hongyi Xu, Eduard Gabriel Bazavan, Andrei Zanfir, William T. Freeman, Rahul Sukthankar, and Cristian Sminchisescu. Ghum & ghuml: Generative 3d human shape and articulated pose models. In Proc. of CVPR, 2020. 3 +[61] Hongyi Xu, Thiemo Alldieck, and Cristian Sminchisescu. H-neRF: Neural radiance fields for rendering and temporal reconstruction of humans in motion. In Proc. of NeurIPS, 2021. 1, 2 +[62] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proc. of CVPR, 2022. 2, 3 +[63] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for + +high-fidelity monocular dynamic scene reconstruction. arXiv preprint arXiv:2309.13101, 2023. 3, 5 +[64] Zeyu Yang, Hongye Yang, Zijie Pan, Xiatian Zhu, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv 2310.10642, 2023. 3, 5 +[65] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Proc. of NeurIPS, 2020. 1 +[66] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. In Proc. of NeurIPS, 2021. 1 +[67] Lior Yariv, Peter Hedman, Christian Reiser, Dor Verbin, Pratul P. Srinivasan, Richard Szeliski, Jonathan T. Barron, and Ben Mildenhall. Bakedsdf: Meshing neural sdfs for real-time view synthesis. In Proc. of SIGGRAPH, 2023. 2 +[68] Keyang Ye, Tianjia Shao, and Kun Zhou. Animatable 3d gaussians for high-fidelity synthesis of human motions, 2023. 3 +[69] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. PlenOctrees for real-time rendering of neural radiance fields. In Proc. of ICCV, 2021. 2, 3 +[70] Zhengming Yu, Wei Cheng, xian Liu, Wayne Wu, and KwanYee Lin. MonoHuman: Animatable human neural field from monocular video. In Proc. of CVPR, 2023. 1, 2, 6, 8 +[71] Qiang Zhang, Seung-Hwan Baek, Szymon Rusinkiewicz, and Felix Heide. Differentiable point-based radiance fields for efficient view synthesis. In SIGGRAPH Asia Conference Proceedings, 2022. 2, 3 +[72] Fuqiang Zhao, Yuheng Jiang, Kaixin Yao, Jiakai Zhang, Liao Wang, Haizhao Dai, Yuhui Zhong, Yingliang Zhang, Minye Wu, Lan Xu, and Jingyi Yu. Human performance modeling and rendering via neural animated mesh. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 41(6), 2022. 3 +[73] Yufeng Zheng, Wang Yifan, Gordon Wetzstein, Michael J. Black, and Otmar Hilliges. Pointavatar: Deformable point-based head avatars from videos. In Proc. of ECCV, 2023. 2, 3, 4 +[74] Zerong Zheng, Han Huang, Tao Yu, Hongwen Zhang, Yandong Guo, and Yebin Liu. Structured local radiance fields for human avatar modeling. In Proc. of CVPR, 2022. 2, 3 +[75] Wojciech Zielonka, Timur Bagautdinov, Shunsuke Saito, Michael Zollhöfer, Justus Thies, and Javier Romero. Drivable 3d gaussian avatars. arXiv preprint arXiv:2311.08581, 2023. 3 +[76] M. Zwicker, H. Pfister, J. van Baar, and M. Gross. Ewa volume splatting. In Proceedings Visualization, 2001. VIS '01., pages 29-538, 2001. 4 \ No newline at end of file diff --git a/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/images.zip b/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..3c61cdafc29111df9582ced4d9d7c427756b13c4 --- /dev/null +++ b/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:810af912ca5fd205544f9b2e9690bf9cb6b1c556b8c4f4f4cd6154cec3827663 +size 588854 diff --git a/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/layout.json b/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..fa64238e8667228149048b0797dd807aa555aa95 --- /dev/null +++ b/2024/3DGS-Avatar_ Animatable Avatars via Deformable 3D Gaussian Splatting/layout.json @@ -0,0 +1,9328 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 68, + 103, + 525, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 103, + 525, + 121 + ], + "spans": [ + { + "bbox": [ + 68, + 103, + 525, + 121 + ], + "type": "text", + "content": "3DGS-Avatar: Animatable Avatars via Deformable 3D Gaussian Splatting" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "spans": [ + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "text", + "content": "Zhiyin Qian" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "text", + "content": " Shaofei Wang" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{1,2,3}" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "text", + "content": " Marko Mihajlovic" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "text", + "content": " Andreas Geiger" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{2,3}" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "text", + "content": " Siyu Tang" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "text", + "content": "ETH Zürich " + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "text", + "content": "University of Tübingen " + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 76, + 141, + 516, + 173 + ], + "type": "text", + "content": "Tübingen AI Center" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 51, + 204, + 541, + 301 + ], + "blocks": [ + { + "bbox": [ + 51, + 204, + 541, + 301 + ], + "lines": [ + { + "bbox": [ + 51, + 204, + 541, + 301 + ], + "spans": [ + { + "bbox": [ + 51, + 204, + 541, + 301 + ], + "type": "image", + "image_path": "20265e84ed1b0eeb165923bfa7b725cf8fab4493209f38f90668c76390df00c6.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 312, + 546, + 369 + ], + "lines": [ + { + "bbox": [ + 46, + 312, + 546, + 369 + ], + "spans": [ + { + "bbox": [ + 46, + 312, + 546, + 369 + ], + "type": "text", + "content": "Figure 1. 3DGS-Avatar. We develop an efficient method for creating animatable avatars from monocular videos, leveraging 3D Gaussian Splatting [14]. Given a short sequence of dynamic human with a tracked skeleton and foreground masks, our method creates an avatar within 30 minutes on a single GPU, supports animation and novel view synthesis at over 50 FPS, and achieves comparable or better rendering quality to the state-of-the-art [57, 58] that requires over 8 GPU days to train, takes several seconds to render a single image, and relies on pre-training on clothed human scans [57]." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 377, + 192, + 390 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 377, + 192, + 390 + ], + "spans": [ + { + "bbox": [ + 143, + 377, + 192, + 390 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 413, + 290, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 413, + 290, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 413, + 290, + 700 + ], + "type": "text", + "content": "We introduce an approach that creates animatable human avatars from monocular videos using 3D Gaussian Splatting (3DGS). Existing methods based on neural radiance fields (NeRFs) achieve high-quality novel-view/novelpose image synthesis but often require days of training, and are extremely slow at inference time. Recently, the community has explored fast grid structures for efficient training of clothed avatars. Albeit being extremely fast at training, these methods can barely achieve an interactive rendering frame rate with around 15 FPS. In this paper, we use 3D Gaussian Splatting and learn a non-rigid deformation network to reconstruct animatable clothed human avatars that can be trained within 30 minutes and rendered at real-time frame rates " + }, + { + "bbox": [ + 46, + 413, + 290, + 700 + ], + "type": "inline_equation", + "content": "(50 + FPS)" + }, + { + "bbox": [ + 46, + 413, + 290, + 700 + ], + "type": "text", + "content": ". Given the explicit nature of our representation, we further introduce as-isometric-as-possible regularizations on both the Gaussian mean vectors and the covariance matrices, enhancing the generalization of our model on highly articulated unseen poses. Experimental results show that our method achieves comparable and even better performance compared to state-of-the-art approaches on animatable avatar creation from a monocular input, while being " + }, + { + "bbox": [ + 46, + 413, + 290, + 700 + ], + "type": "inline_equation", + "content": "400x" + }, + { + "bbox": [ + 46, + 413, + 290, + 700 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 413, + 290, + 700 + ], + "type": "inline_equation", + "content": "250x" + }, + { + "bbox": [ + 46, + 413, + 290, + 700 + ], + "type": "text", + "content": " faster in training and inference, respectively. Please see our project page at https://neuralbodies.github.io/3DGS-Avatar." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 377, + 386, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 377, + 386, + 389 + ], + "spans": [ + { + "bbox": [ + 307, + 377, + 386, + 389 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 399, + 546, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 399, + 546, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 399, + 546, + 567 + ], + "type": "text", + "content": "Reconstructing clothed human avatars from image inputs presents a significant challenge in computer vision, yet holds immense importance due to its applications in virtual reality, gaming, and e-commerce. Traditional methods often rely on dense, synchronized multi-view inputs, which may not be readily available in more practical scenarios. Recent advances in implicit neural fields [27, 30, 32, 36, 47, 50, 51, 53, 55, 65, 66] have enabled high-quality reconstruction of geometry [8, 38, 57, 61] and appearance [13, 20, 22, 31, 35, 37, 42, 58, 70] of clothed human bodies from sparse multi-view or monocular videos. Animation of such reconstructed clothed human bodies is also possible by learning the geometry and appearance representations in a predefined canonical pose [13, 20, 35, 57, 58, 70]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 568, + 547, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 568, + 547, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 568, + 547, + 700 + ], + "type": "text", + "content": "To achieve state-of-the-art rendering quality, existing methods rely on training a neural radiance field (NeRF) [27] combined with either explicit body articulation [8, 12, 13, 20, 35, 38, 57, 58, 70] or conditioning the NeRF on human body related encodings [31, 37, 48, 61]. They often employ large multi-layer perceptrons (MLPs) to model the neural radiance field, which are computationally demanding, leading to prolonged training (days) and inference (seconds) time. This computational expense poses a significant challenge for practical applications of these state-of-the-art methods in real-time applications." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 318, + 701, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 701, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 318, + 701, + 546, + 713 + ], + "type": "text", + "content": "With recent advances in efficient learning of implicit" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5020" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": "neural fields, training time of NeRFs has been reduced to minutes [3, 29, 46, 52?]. There are also works targeting fast inference of pretrained NeRFs [43, 67, 69]. Inspired by these developments, several avatar reconstruction methods have been tailored to fast training [7, 12] or fast inference [6, 17, 39]. However, to the best of our knowledge, there currently exists no method that simultaneously achieves both fast training and real-time inference for animatable avatar reconstruction from just monocular videos." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 183, + 287, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 183, + 287, + 268 + ], + "spans": [ + { + "bbox": [ + 46, + 183, + 287, + 268 + ], + "type": "text", + "content": "Point-based rendering [44, 49, 62, 71, 73? , 74] has emerged as an efficient alternative to NeRFs for fast inference. With the recently proposed 3D Gaussian Splatting (3DGS) [14], it is possible to achieve state-of-the-art rendering quality using only a fraction of NeRFs' inference time and comparatively fast training for static scene reconstruction." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 272, + 289, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 272, + 289, + 440 + ], + "spans": [ + { + "bbox": [ + 46, + 272, + 289, + 440 + ], + "type": "text", + "content": "Leveraging the capabilities of 3DGS, we demonstrate its application in modeling animatable clothed avatars using monocular videos. Our approach effectively integrates rigid human articulation with a non-rigid deformation field within the 3DGS framework. We use a small multi-layer perceptron (MLP) to decode color. This MLP is designed to be responsive to local non-rigid deformations and dynamic lighting conditions, ensuring a more realistic and responsive rendering of the avatar's appearance. Furthermore, we apply as-isometric-as-possible regularizations [15, 41] to both the Gaussian mean vectors and the covariance matrices, which helps maintain the geometric consistency and realistic deformation of the avatar, particularly in dynamic and varied poses." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 444, + 287, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 444, + 287, + 576 + ], + "spans": [ + { + "bbox": [ + 46, + 444, + 287, + 576 + ], + "type": "text", + "content": "Our experimental results show that our method is comparable to or better than current state-of-the-art [57, 58] in animatable avatar creation from monocular inputs, achieving training speed 400 times faster and inference speed 250 times quicker. Compared to methods that focus on fast training [7, 12], our method, despite being slower in training, can model pose-dependent non-rigid deformation and produce significantly better rendering quality, while being 3 times faster in terms of rendering. We provide an overview of the comparison to major prior works in Tab. 1. In summary, our work makes the following contributions:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 579, + 287, + 711 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 47, + 579, + 287, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 579, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 47, + 579, + 287, + 601 + ], + "type": "text", + "content": "- We introduce 3D Gaussian Splatting to animatable human avatars reconstruction from monocular videos." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 603, + 287, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 603, + 287, + 650 + ], + "spans": [ + { + "bbox": [ + 47, + 603, + 287, + 650 + ], + "type": "text", + "content": "- We develop a simple yet effective deformation network as well as regularization terms that effectively drive 3D Gaussian Splats to handle highly articulated and out-of-distribution poses." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 651, + 287, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 651, + 287, + 711 + ], + "spans": [ + { + "bbox": [ + 47, + 651, + 287, + 711 + ], + "type": "text", + "content": "- Our method is the first, to our knowledge, to simultaneously deliver high-quality rendering, model pose-dependent non-rigid deformation, generalize effectively to unseen poses, and achieve fast training (less than 30 minutes) and real-time rendering speed " + }, + { + "bbox": [ + 47, + 651, + 287, + 711 + ], + "type": "inline_equation", + "content": "(50+\\mathrm{FPS})" + }, + { + "bbox": [ + 47, + 651, + 287, + 711 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 326, + 72, + 527, + 242 + ], + "blocks": [ + { + "bbox": [ + 326, + 72, + 527, + 242 + ], + "lines": [ + { + "bbox": [ + 326, + 72, + 527, + 242 + ], + "spans": [ + { + "bbox": [ + 326, + 72, + 527, + 242 + ], + "type": "image", + "image_path": "e9fda64a11f5021ef1f0f1249f67708d0c44f3844d225ab1c07b86eeae577d61.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 251, + 547, + 318 + ], + "lines": [ + { + "bbox": [ + 305, + 251, + 547, + 318 + ], + "spans": [ + { + "bbox": [ + 305, + 251, + 547, + 318 + ], + "type": "text", + "content": "Table 1. Comparison to SoTA. Instant-NVR [7] and InstantA-vatar [12] achieve instant training within 5 minutes. For real-time rendering, we require a frame rate over 30 FPS. Note that while UV-Volumes [6] claims real-time freeview rendering, they only achieve 14 FPS on novel pose synthesis due to the slow generation of their UV Volume." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 342, + 398, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 342, + 398, + 354 + ], + "spans": [ + { + "bbox": [ + 306, + 342, + 398, + 354 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 364, + 545, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 364, + 545, + 519 + ], + "spans": [ + { + "bbox": [ + 304, + 364, + 545, + 519 + ], + "type": "text", + "content": "Neural rendering for clothed human avatars. Since the seminal work of Neural Radiance Fields (NeRF) [27], there has been a surge of research on neural rendering for clothed human avatars. The majority of the works focus on either learning a NeRF conditioned on human body related encodings [31, 48, 61], or learning a canonical NeRF representation and warp camera rays from the observation space to the canonical space to query radiance and density values from the canonical NeRF [8, 12, 13, 20, 35, 38, 57, 58, 70]. Most of these works rely on large multi-layer perceptrons (MLPs) to model the underlying neural radiance field, which are computationally expensive, resulting in prolonged training (days) and inference (seconds) time." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 521, + 546, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 546, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 546, + 715 + ], + "type": "text", + "content": "With recent advances in accelerated data structures for neural fields, there has been several works targeting fast inference and fast training of NeRFs for clothed humans. [12] proposes to use iNGP [29] as the underlying representation for articulated NeRFs, which enables fast training (less than 5 minutes) and interactive rendering speed (15 FPS) but ignores pose-dependent non-rigid deformations. [7] also utilizes iNGP and represents non-rigid deformations in the UV space, which enables fast training and modeling of pose-dependent non-rigid deformations. However, as we will show in our experiments, [7]'s parametrization of non-rigid deformations result in blurry renderings. [6] proposes to generate a pose-dependent UV volume for efficient free-view synthesis. However, their UV-volume generation process is slow (20 FPS), making novel pose synthesis less efficient (only 14 FPS). [17] also employs UV-based rendering" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "type": "text", + "content": "5021" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "content": "to achieve real-time rendering of dynamic clothed humans, but only works on dense multi-view inputs. Extending [69], [54, 72] applied Fourier transform for compressing human performance capture data, albeit with limitations on dense multi-view data (60-80 views) and non-generalizability of the Fourier basis representation to unseen poses beyond the training dataset. In contrast to all these works, our method achieves state-of-the-art rendering quality and speed with less than 30 minutes of training time from a single monocular video input." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 194, + 287, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 194, + 287, + 290 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 287, + 290 + ], + "type": "text", + "content": "Dynamic 3D gaussians. Point-based rendering [40, 44, 49, 62, 71, 73, 74] has also been shown to be an efficient alternative to NeRFs for fast inference and training. Extending point cloud to 3D Gaussians, 3D Gaussian Splatting (3DGS) [14] models the rendering process as splatting a set of 3D Gaussians onto image plane via alpha blending, achieving state-of-the-art rendering quality with real-time inference speed and fast training given multi-view inputs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 293, + 289, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 293, + 289, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 293, + 289, + 555 + ], + "type": "text", + "content": "Given the great performance on both quality and speed of 3DGS, a rich set of works has further explored the 3D Gaussian representation for dynamic scene reconstruction. [14] proposed to optimize the position and shape of each 3D Gaussian on a frame-by-frame basis and simultaneously performed 6-DOF dense tracking for free. Their model size, however, increases with the temporal dimension. [59, 63] maintain a single set of 3D Gaussians in a canonical space and deform them to each frame via learning a time-dependent deformation field, producing state-of-the-art results in terms of both rendering quality and speed. [64] augments 3D Gaussians with temporal dimension into 4D Gaussian primitives to approximate the underlying spatiotemporal 4D volume of the dynamic scene. While such methods show promising results, they are only applicable to either synthetic datasets with fast camera movement and slow object motion or forward-facing real scenes with limited object movements, thus unable to handle the immense displacement of the articulated human body. To address this problem, our approach utilizes a statistical human body model [24] for articulation and applies regularization to reduce the overfitting of the deformation field." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 558, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 289, + 715 + ], + "type": "text", + "content": "Concurrent works. Concurrent with our method, many recent works also seek to combine 3DGS with human articulation prior for avatar reconstruction. We provide a comparison of our approach to concurrent works in Tab. 2. D3GA [75] proposed to embed 3D Gaussians in tetrahedral cages and utilize cage deformations for drivable avatar animation. However, they use dense calibrated multi-view videos as input and require an additional 3D scan to generate the tetrahedral mesh template. Li et al. [21] focused on generating avatars with a detailed appearance from multiview videos by post-processing radiance field renderings with 2D CNNs, which limits their rendering speed. Along with [11, 28], these works fail to achieve fast training with" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 326, + 72, + 527, + 253 + ], + "blocks": [ + { + "bbox": [ + 326, + 72, + 527, + 253 + ], + "lines": [ + { + "bbox": [ + 326, + 72, + 527, + 253 + ], + "spans": [ + { + "bbox": [ + 326, + 72, + 527, + 253 + ], + "type": "image", + "image_path": "0c5bb30f7863eb054a4a8bf0a42e08f62a298dbc2cbb1f043fea25d713d8c2eb.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 341, + 262, + 511, + 273 + ], + "lines": [ + { + "bbox": [ + 341, + 262, + 511, + 273 + ], + "spans": [ + { + "bbox": [ + 341, + 262, + 511, + 273 + ], + "type": "text", + "content": "Table 2. Comparison to Concurrent Works." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 298, + 547, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 298, + 547, + 455 + ], + "spans": [ + { + "bbox": [ + 304, + 298, + 547, + 455 + ], + "type": "text", + "content": "relatively complex pipelines. Similar to our approach, Ye et al. [68] deforms 3D Gaussians in canonical space via pose-dependent deformation and rigid articulation, but they still require 2 hours for training and do not show results on monocular inputs. HUGS [16] learns a background model along with the animatable human avatar, but they fail to take pose-dependent cloth deformation into account. Several other works [10, 18, 23] also neglect pose-dependent cloth deformation to achieve even faster training (in 5 minutes) and rendering " + }, + { + "bbox": [ + 304, + 298, + 547, + 455 + ], + "type": "inline_equation", + "content": "(150 + \\mathrm{FPS})" + }, + { + "bbox": [ + 304, + 298, + 547, + 455 + ], + "type": "text", + "content": ". We argue that our method strikes a good balance between quality and speed compared to concurrent works, as being the only method simultaneously achieving the properties listed in Tab. 2." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 471, + 391, + 484 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 471, + 391, + 484 + ], + "spans": [ + { + "bbox": [ + 306, + 471, + 391, + 484 + ], + "type": "text", + "content": "3. Preliminaries" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 493, + 547, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 493, + 547, + 601 + ], + "spans": [ + { + "bbox": [ + 304, + 493, + 547, + 601 + ], + "type": "text", + "content": "Linear Blend Skinning. To model human articulations, a widely adopted paradigm is to represent geometry and appearance in a shared canonical space [8, 12, 13, 20, 35, 38, 57, 58] and use Linear Blend Skinning (LBS) [2, 9, 24, 33, 34, 60] to deform the parametric human body under arbitrary poses. Given a point " + }, + { + "bbox": [ + 304, + 493, + 547, + 601 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_c" + }, + { + "bbox": [ + 304, + 493, + 547, + 601 + ], + "type": "text", + "content": " in canonical space, the LBS function takes a set of rigid bone transformations " + }, + { + "bbox": [ + 304, + 493, + 547, + 601 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{B}_b\\}_{b=1}^B" + }, + { + "bbox": [ + 304, + 493, + 547, + 601 + ], + "type": "text", + "content": " and computes its correspondence " + }, + { + "bbox": [ + 304, + 493, + 547, + 601 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_o" + }, + { + "bbox": [ + 304, + 493, + 547, + 601 + ], + "type": "text", + "content": " in the observation space:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 373, + 615, + 545, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 615, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 373, + 615, + 545, + 628 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {\\mathbf {o}} = L B S _ {\\sigma_ {w}} \\left(\\mathbf {x} _ {c}; \\left\\{\\mathbf {B} _ {b} \\right\\}\\right) \\tag {1}", + "image_path": "e683cab6fdc991734f6b81a314150f620e8236fcfe4dee400b56b4b129976979.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "content": "Assuming an underlying SMPL model, we use a total of " + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "inline_equation", + "content": "B = 24" + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "content": " bone transformations, each represented by a " + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "inline_equation", + "content": "4 \\times 4" + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "content": " rotation-translation matrix, which are then linearly blended via a set of skinning weights " + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{w} \\in [0,1]^B" + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "content": ", s.t. " + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\sum_{b=1}^{B} \\mathbf{w}_b = 1" + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "content": ", modeled by a coordinate-based neural skinning field " + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "inline_equation", + "content": "f_{\\sigma_w}(\\mathbf{x}_c) \\in [4,5,26,45,56]" + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "content": ". The forward linear blend skin" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5022" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 212, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 212, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 212, + 83 + ], + "type": "text", + "content": "ning function can thus be formulated as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 58, + 92, + 287, + 114 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 92, + 287, + 114 + ], + "spans": [ + { + "bbox": [ + 58, + 92, + 287, + 114 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {o} = L B S _ {\\sigma_ {w}} \\left(\\mathbf {x} _ {c}; \\{\\mathbf {B} _ {b} \\}\\right) = \\sum_ {b = 1} ^ {B} f _ {\\sigma_ {w}} \\left(\\mathbf {x} _ {c}\\right) _ {b} \\mathbf {B} _ {b} \\mathbf {x} _ {c} \\quad (2)", + "image_path": "de8a9e33c0ea8278f3b78f7a1ccc161169557282eef9a10ec7a502c0f47651ae.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 122, + 287, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 122, + 287, + 193 + ], + "spans": [ + { + "bbox": [ + 46, + 122, + 287, + 193 + ], + "type": "text", + "content": "Compared to prior works that search canonical correspondences of points in observation space [12, 57, 58], our method requires no inverse skimming which is typically difficult to compute and often leads to multiple solutions [4, 5]. A similar technique has been employed in [73] for face avatar modeling." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "text", + "content": "3D Gaussian Splatting. 3DGS [14] utilizes a set of 3D Gaussian primitives " + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{G}\\}" + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "text", + "content": " as static scene representation which can be rendered in real-time via differentiable rasterization. Each 3D Gaussian " + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "text", + "content": " is defined by its mean " + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "text", + "content": ", covariance " + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}" + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "text", + "content": ", opacity " + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "text", + "content": " and view-dependent color represented by spherical harmonics coefficients " + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "text", + "content": ". To ensure positive semi-definiteness, the covariance matrix is represented by a scaling matrix " + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "inline_equation", + "content": "\\mathbf{S}" + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "text", + "content": " and rotation matrix " + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "text", + "content": ". In practice, we store the diagonal vector " + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "inline_equation", + "content": "\\mathbf{s} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "text", + "content": " of the scaling matrix and a quaternion vector " + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "inline_equation", + "content": "\\mathbf{q} \\in \\mathbb{R}^4" + }, + { + "bbox": [ + 46, + 194, + 287, + 325 + ], + "type": "text", + "content": " to represent rotation, which can be trivially converted to a valid covariance matrix." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 326, + 287, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 326, + 287, + 434 + ], + "spans": [ + { + "bbox": [ + 46, + 326, + 287, + 434 + ], + "type": "text", + "content": "The 3D Gaussians are projected to the 2D image plane during the rendering process and accumulated via alpha blending. Given a viewing transformation " + }, + { + "bbox": [ + 46, + 326, + 287, + 434 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 46, + 326, + 287, + 434 + ], + "type": "text", + "content": " and the Jacobian of the affine approximation of the projective transformation " + }, + { + "bbox": [ + 46, + 326, + 287, + 434 + ], + "type": "inline_equation", + "content": "\\mathbf{J}" + }, + { + "bbox": [ + 46, + 326, + 287, + 434 + ], + "type": "text", + "content": ", the 2D covariance matrix in camera coordinate [76] is given by " + }, + { + "bbox": [ + 46, + 326, + 287, + 434 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}^{\\prime} = (\\mathbf{J}\\mathbf{W}\\boldsymbol{\\Sigma}\\mathbf{W}^{T}\\mathbf{J}^{T})_{1:2,1:2}" + }, + { + "bbox": [ + 46, + 326, + 287, + 434 + ], + "type": "text", + "content": ". The pixel color " + }, + { + "bbox": [ + 46, + 326, + 287, + 434 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 46, + 326, + 287, + 434 + ], + "type": "text", + "content": " is thus computed by blending 3D Gaussian splats that overlap at the given pixel, sorted according to their depth:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 443, + 287, + 470 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 443, + 287, + 470 + ], + "spans": [ + { + "bbox": [ + 96, + 443, + 287, + 470 + ], + "type": "interline_equation", + "content": "C = \\sum_ {i} \\left(\\alpha_ {i} ^ {\\prime} \\prod_ {j = 1} ^ {i - 1} \\left(1 - \\alpha_ {j} ^ {\\prime}\\right)\\right) c _ {i} \\tag {3}", + "image_path": "3489192c7095a2a532bc0922d41d3f0a935adf6ca72064dfd808c1ca42b1b7db.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "type": "inline_equation", + "content": "\\alpha_{i}^{\\prime}" + }, + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "type": "text", + "content": " denotes the learned opacity " + }, + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "type": "inline_equation", + "content": "\\alpha_{i}" + }, + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "type": "text", + "content": " weighted by the probability density of " + }, + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "type": "text", + "content": "-th projected 2D Gaussian at the target pixel location. " + }, + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "type": "text", + "content": " denotes the view-dependent color computed from stored SH coefficients " + }, + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 46, + 478, + 287, + 525 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 526, + 287, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 526, + 287, + 597 + ], + "spans": [ + { + "bbox": [ + 46, + 526, + 287, + 597 + ], + "type": "text", + "content": "The 3D Gaussians " + }, + { + "bbox": [ + 46, + 526, + 287, + 597 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{G}\\}" + }, + { + "bbox": [ + 46, + 526, + 287, + 597 + ], + "type": "text", + "content": " are optimized via a photometric loss. During optimization, 3DGS adaptively controls the number of 3D Gaussians via periodic densification and pruning, achieving self-adaptive convergence to an optimal density distribution of 3D Gaussians that well represents the scene." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 609, + 107, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 609, + 107, + 621 + ], + "spans": [ + { + "bbox": [ + 47, + 609, + 107, + 621 + ], + "type": "text", + "content": "4. Methods" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "text", + "content": "We illustrate the pipeline of our method in Fig. 2. The input to our method is a monocular video with a calibrated camera, fitted SMPL parameters, and foreground masks. Our method optimizes a set of 3D Gaussians in canonical space, which is then deformed to the observation space and rendered from the given camera. For a set of 3D Gaussians " + }, + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{G}^{(i)}\\}_{i = 1}^{N}" + }, + { + "bbox": [ + 46, + 629, + 287, + 714 + ], + "type": "text", + "content": ", we store the following properties at each point:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "content": "position " + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "content": ", scaling factor " + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "content": ", rotation quaternion " + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "content": ", opacity " + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "content": " and a color feature vector " + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "content": ". We start by randomly sampling " + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "N = 50k" + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "content": " points on the canonical SMPL [24] mesh surface as initialization of canonical 3D Gaussians " + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{G}_c\\}" + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "content": ". Inspired by HumanNeRF [58], we decompose the complex human deformation into a non-rigid part that encodes pose-dependent cloth deformation, and a rigid transformation controlled by the human skeleton." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 174, + 512, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 174, + 512, + 186 + ], + "spans": [ + { + "bbox": [ + 305, + 174, + 512, + 186 + ], + "type": "text", + "content": "4.1. Pose-dependent Non-rigid Deformation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 192, + 515, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 192, + 515, + 204 + ], + "spans": [ + { + "bbox": [ + 306, + 192, + 515, + 204 + ], + "type": "text", + "content": "We formulate the non-rigid deformation module as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 375, + 209, + 545, + 223 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 209, + 545, + 223 + ], + "spans": [ + { + "bbox": [ + 375, + 209, + 545, + 223 + ], + "type": "interline_equation", + "content": "\\left\\{\\mathcal {G} _ {d} \\right\\} = \\mathcal {F} _ {\\theta_ {n r}} \\left(\\left\\{\\mathcal {G} _ {c} \\right\\}; \\mathcal {Z} _ {p}\\right) \\tag {4}", + "image_path": "2e6eef9a190e1ce2e0cbee24d45781ddc4a5196803270d3dc2b99c3c2e3215cb.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "spans": [ + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{G}_d\\}" + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "text", + "content": " represents the non-rigidly deformed 3D Gaussians. " + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "inline_equation", + "content": "\\theta_{nr}" + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "text", + "content": " represents the learnable parameters of the nonrigid deformation module. " + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}_p" + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "text", + "content": " is a latent code which encodes SMPL pose and shape " + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "inline_equation", + "content": "(\\theta, \\beta)" + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "text", + "content": " using a lightweight hierarchical pose encoder [26]. Specifically, the deformation network " + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "inline_equation", + "content": "f_{\\theta_{nr}}" + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "text", + "content": " takes the canonical position " + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_c" + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "text", + "content": ", the pose latent code " + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}_p" + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "text", + "content": " as inputs and outputs the offsets of the Gaussian's position, scale, rotation, along with a feature vector " + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 305, + 228, + 545, + 324 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 362, + 330, + 545, + 343 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 362, + 330, + 545, + 343 + ], + "spans": [ + { + "bbox": [ + 362, + 330, + 545, + 343 + ], + "type": "interline_equation", + "content": "\\left(\\delta \\mathbf {x}, \\delta \\mathbf {s}, \\delta \\mathbf {q}, \\mathbf {z}\\right) = f _ {\\theta_ {n r}} \\left(\\mathbf {x} _ {c}; \\mathcal {Z} _ {p}\\right) \\tag {5}", + "image_path": "243631e2a808e8ab068f7b8252e45ccfb0a390552013607c3a9f6037ed11a2ff.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 348, + 545, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 348, + 545, + 407 + ], + "spans": [ + { + "bbox": [ + 305, + 348, + 545, + 407 + ], + "type": "text", + "content": "We use a multi-level hash grid [29] to encode 3D positions as spatial features, which are then concatenated with the pose latent code " + }, + { + "bbox": [ + 305, + 348, + 545, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}_p" + }, + { + "bbox": [ + 305, + 348, + 545, + 407 + ], + "type": "text", + "content": " and fed into a shallow MLP with 2 hidden layers and a width of 128. The canonical Gaussian is deformed by:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 370, + 414, + 545, + 426 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 414, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 370, + 414, + 545, + 426 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {d} = \\mathbf {x} _ {c} + \\delta \\mathbf {x} \\tag {6}", + "image_path": "2d690a98b7a136b08a959c569101f174f42f06db11aa41f2b827ea5649136418.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 372, + 429, + 545, + 441 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 429, + 545, + 441 + ], + "spans": [ + { + "bbox": [ + 372, + 429, + 545, + 441 + ], + "type": "interline_equation", + "content": "\\mathbf {s} _ {d} = \\mathbf {s} _ {c} \\cdot \\exp (\\delta \\mathbf {s}) \\tag {7}", + "image_path": "7fcadef54b072eb11859dc61755a05b37f95b4290de32c01aa26f2cf753eb134.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 371, + 444, + 545, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 371, + 444, + 545, + 456 + ], + "spans": [ + { + "bbox": [ + 371, + 444, + 545, + 456 + ], + "type": "interline_equation", + "content": "\\mathbf {q} _ {d} = \\mathbf {q} _ {c} \\cdot [ 1, \\delta q _ {1}, \\delta q _ {2}, \\delta q _ {3} ] \\tag {8}", + "image_path": "5636f0a138b08c89970d52f718c30e97ed6f28491b4d219ad251cb2eb3428af0.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "type": "text", + "content": "note that the " + }, + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "type": "text", + "content": " operator on quaternions is equivalent to multiplying the two rotation matrices derived by the two quaternions. Since the quaternion " + }, + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "type": "inline_equation", + "content": "[1,0,0,0]" + }, + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "type": "text", + "content": " corresponds to the identity rotation matrix, we have " + }, + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "type": "inline_equation", + "content": "\\mathbf{q}_d = \\mathbf{q}_c" + }, + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "type": "inline_equation", + "content": "\\delta \\mathbf{q} = \\mathbf{0}" + }, + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 516, + 430, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 516, + 430, + 528 + ], + "spans": [ + { + "bbox": [ + 306, + 516, + 430, + 528 + ], + "type": "text", + "content": "4.2. Rigid Transformation" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 534, + 545, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 534, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 305, + 534, + 545, + 569 + ], + "type": "text", + "content": "We further transform the non-rigidly deformed 3D Gaussians " + }, + { + "bbox": [ + 305, + 534, + 545, + 569 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{G}_d\\}" + }, + { + "bbox": [ + 305, + 534, + 545, + 569 + ], + "type": "text", + "content": " to the observation space via a rigid transformation module:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 365, + 574, + 545, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 574, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 365, + 574, + 545, + 590 + ], + "type": "interline_equation", + "content": "\\left\\{\\mathcal {G} _ {o} \\right\\} = \\mathcal {F} _ {\\theta_ {r}} \\left(\\left\\{\\mathcal {G} _ {d} \\right\\}; \\left\\{\\mathbf {B} _ {\\mathbf {b}} \\right\\} _ {b = 1} ^ {B}\\right) \\tag {9}", + "image_path": "1fe433635dcce2466b5ff362049c45b74df6546461408425710b1a82a86170a7.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 594, + 545, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 594, + 545, + 629 + ], + "spans": [ + { + "bbox": [ + 305, + 594, + 545, + 629 + ], + "type": "text", + "content": "where a skinning MLP " + }, + { + "bbox": [ + 305, + 594, + 545, + 629 + ], + "type": "inline_equation", + "content": "f_{\\theta_r}" + }, + { + "bbox": [ + 305, + 594, + 545, + 629 + ], + "type": "text", + "content": " is learned to predict skinning weights at the position " + }, + { + "bbox": [ + 305, + 594, + 545, + 629 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_d" + }, + { + "bbox": [ + 305, + 594, + 545, + 629 + ], + "type": "text", + "content": ". We transform the position and the rotation matrix of 3D Gaussians via forward LBS:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 376, + 632, + 545, + 654 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 632, + 545, + 654 + ], + "spans": [ + { + "bbox": [ + 376, + 632, + 545, + 654 + ], + "type": "interline_equation", + "content": "\\mathbf {T} = \\sum_ {b = 1} ^ {B} f _ {\\theta_ {r}} \\left(\\mathbf {x} _ {d}\\right) _ {b} \\mathbf {B} _ {b} \\tag {10}", + "image_path": "af55b15a67ae1246e471cbb5588a3f6bebcf9805133cbdc4e690b1bfe1d99aaf.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 374, + 656, + 545, + 668 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 656, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 374, + 656, + 545, + 668 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {o} = \\mathbf {T} \\mathbf {x} _ {d} \\tag {11}", + "image_path": "b6f0341c3bf78cdef166f63642f1152b2e7eb35fbc5d24e8cd85b28a15b9f3db.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 372, + 671, + 545, + 684 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 671, + 545, + 684 + ], + "spans": [ + { + "bbox": [ + 372, + 671, + 545, + 684 + ], + "type": "interline_equation", + "content": "\\mathbf {R} _ {o} = \\mathbf {T} _ {1: 3, 1: 3} \\mathbf {R} _ {d} \\tag {12}", + "image_path": "f745db47584c384cebdedbd7bda7ed79f89adcc12d152f198dc0f4255f278f9b.jpg" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_d" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": " is the rotation matrix derived from the quaternion " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{q}_d" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5023" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 71, + 545, + 183 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 545, + 183 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 545, + 183 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 545, + 183 + ], + "type": "image", + "image_path": "eb1865dac9c072592ad7c2480fdea1e4f45207fd2d69a729340adaaeb3763c4f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "lines": [ + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "spans": [ + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": "Figure 2. Our framework for creating animatable avatars from monocular videos. We first initialize a set of 3D Gaussians in the canonical space via sampling points from a SMPL mesh. Each canonical Gaussian " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_c" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": " goes through a non-rigid deformation module " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta_{nr}}" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": " conditioned on an encoded pose vector " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}_p" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": " (Sec. 4.1) to account for pose-dependent non-rigid cloth deformation. This module outputs a non-rigidly deformed 3D Gaussian " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_d" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": " and a pose-dependent latent feature " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": ". The non-rigidly deformed 3D Gaussian " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_d" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": " is transformed to the observation space " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_o" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": " (Sec. 4.2) via LBS with learned neural skinning " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta_r}" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": ". The Gaussian feature " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": ", the pose-dependent feature " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": ", a per-frame latent code " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}_c" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": ", and the ray direction " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathbf{d}" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": " are propagated through a small MLP " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta_c}" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": " to decode the view-dependent color " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": " for each 3D Gaussian. Finally, the observation space 3D Gaussians " + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{G}_o\\}" + }, + { + "bbox": [ + 46, + 191, + 548, + 280 + ], + "type": "text", + "content": " and their respective color values are accumulated via differentiable Gaussian rasterization (Eq. (3)) to render the image." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 300, + 124, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 300, + 124, + 312 + ], + "spans": [ + { + "bbox": [ + 47, + 300, + 124, + 312 + ], + "type": "text", + "content": "4.3. Color MLP" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 318, + 288, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 318, + 288, + 437 + ], + "spans": [ + { + "bbox": [ + 46, + 318, + 288, + 437 + ], + "type": "text", + "content": "Prior works [59, 63, 64] follow the convention of 3DGS [14], which stores spherical harmonics coefficients per 3D Gaussian to encode the view-dependent color. Treating the stored color feature " + }, + { + "bbox": [ + 46, + 318, + 288, + 437 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 46, + 318, + 288, + 437 + ], + "type": "text", + "content": " as spherical harmonics coefficients, the color of a 3D Gaussian can be computed by the dot product of the spherical harmonics basis and the learned coefficients: " + }, + { + "bbox": [ + 46, + 318, + 288, + 437 + ], + "type": "inline_equation", + "content": "c = \\langle \\pmb{\\gamma}(\\mathbf{d}),\\mathbf{f}\\rangle" + }, + { + "bbox": [ + 46, + 318, + 288, + 437 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 318, + 288, + 437 + ], + "type": "inline_equation", + "content": "\\mathbf{d}" + }, + { + "bbox": [ + 46, + 318, + 288, + 437 + ], + "type": "text", + "content": " represents the viewing direction, derived from the relative position of the 3D Gaussian wrt. the camera center and " + }, + { + "bbox": [ + 46, + 318, + 288, + 437 + ], + "type": "inline_equation", + "content": "\\pmb{\\gamma}" + }, + { + "bbox": [ + 46, + 318, + 288, + 437 + ], + "type": "text", + "content": " denotes the spherical harmonics basis function." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 438, + 287, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 438, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 438, + 287, + 568 + ], + "type": "text", + "content": "While conceptually simple, we argue that this approach does not suit our monocular setting. Since only one camera view is provided during training, the viewing direction in the world space is fixed, leading to poor generalization to unseen test views. Similar to [38], we use the inverse rigid transformation from Sec. 4.2 to canonicalize the viewing direction: " + }, + { + "bbox": [ + 46, + 438, + 287, + 568 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{d}} = \\mathbf{T}_{1:3,1:3}^{-1}\\mathbf{d}" + }, + { + "bbox": [ + 46, + 438, + 287, + 568 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 438, + 287, + 568 + ], + "type": "inline_equation", + "content": "\\mathbf{T}" + }, + { + "bbox": [ + 46, + 438, + 287, + 568 + ], + "type": "text", + "content": " is the forward transformation matrix defined in Eq. (10). Theoretically, canonicalizing viewing direction also promotes consistency of the specular component of canonical 3D Gaussians under rigid transformations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": "On the other hand, we observe that the pixel color of the rendered clothed human avatar also largely depends on local deformation. Local fine wrinkles on clothes, for instance, would cause self-occlusion that heavily affects shading. Following [37], we also learn a per-frame latent code " + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}_c" + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": " to compensate for different environment light effects across frames caused by the global movement of the subject. Hence, instead of learning spherical harmonic coefficients, we enhance color modeling by learning a neural network that takes per-Gaussian color feature vector " + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{f} \\in \\mathbb{R}^{32}" + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": ", local pose-dependent feature vector " + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^{16}" + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": " from the non-rigid deformation network, per-frame latent code " + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}_c \\in \\mathbb{R}^{16}" + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": ", and" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 300, + 546, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 300, + 546, + 335 + ], + "spans": [ + { + "bbox": [ + 304, + 300, + 546, + 335 + ], + "type": "text", + "content": "spherical harmonics basis of canonicalized viewing direction " + }, + { + "bbox": [ + 304, + 300, + 546, + 335 + ], + "type": "inline_equation", + "content": "\\gamma (\\dot{\\mathbf{d}})" + }, + { + "bbox": [ + 304, + 300, + 546, + 335 + ], + "type": "text", + "content": " with a degree of 3 as input and predicts the color of the 3D Gaussian:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 376, + 349, + 546, + 364 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 349, + 546, + 364 + ], + "spans": [ + { + "bbox": [ + 376, + 349, + 546, + 364 + ], + "type": "interline_equation", + "content": "c = \\mathcal {F} _ {\\theta_ {c}} (\\mathbf {f}, \\mathbf {z}, \\mathcal {Z} _ {c}, \\boldsymbol {\\gamma} (\\hat {\\mathbf {d}})) \\tag {13}", + "image_path": "b0faa9ec3fc716f6aae6409cc5b2ede736afe3b9cb93cee2df9ad48567870fe4.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 376, + 545, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 376, + 545, + 413 + ], + "spans": [ + { + "bbox": [ + 304, + 376, + 545, + 413 + ], + "type": "text", + "content": "In practice, we find a tiny MLP with one 64-dimension hidden layer sufficient to model the appearance. Increasing the size of the MLP leads to overfitting and performance drop." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 426, + 391, + 438 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 426, + 391, + 438 + ], + "spans": [ + { + "bbox": [ + 306, + 426, + 391, + 438 + ], + "type": "text", + "content": "4.4. Optimization" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 445, + 545, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 445, + 545, + 494 + ], + "spans": [ + { + "bbox": [ + 304, + 445, + 545, + 494 + ], + "type": "text", + "content": "We jointly optimize canonical 3D Gaussians " + }, + { + "bbox": [ + 304, + 445, + 545, + 494 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{G}_c\\}" + }, + { + "bbox": [ + 304, + 445, + 545, + 494 + ], + "type": "text", + "content": " and the parameters " + }, + { + "bbox": [ + 304, + 445, + 545, + 494 + ], + "type": "inline_equation", + "content": "\\theta_{nr},\\theta_r,\\theta_c" + }, + { + "bbox": [ + 304, + 445, + 545, + 494 + ], + "type": "text", + "content": " of the non-rigid deformation network, the skinning network and the color network, respectively." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 496, + 545, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 496, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 304, + 496, + 545, + 592 + ], + "type": "text", + "content": "Pose correction. SMPL [24] parameter fittings from images can be inaccurate. To address this, we additionally optimize the per-sequence shape parameter as well as per-frame translation, global rotation, and local joint rotations. We initialize these parameters " + }, + { + "bbox": [ + 304, + 496, + 545, + 592 + ], + "type": "inline_equation", + "content": "\\theta_{p}" + }, + { + "bbox": [ + 304, + 496, + 545, + 592 + ], + "type": "text", + "content": " with the given SMPL parameters and differentiably derive the bone transformations " + }, + { + "bbox": [ + 304, + 496, + 545, + 592 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{B}_b\\}" + }, + { + "bbox": [ + 304, + 496, + 545, + 592 + ], + "type": "text", + "content": " as input to the network, enabling direct optimization via backpropagation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 594, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 547, + 714 + ], + "type": "text", + "content": "As-isometric-as-possible regularization. With monocular video as input, only one view of the human is visible in each frame, making it extremely hard to generalize to novel views and novel poses. Considering the sparsity of input, the non-rigid deformation network is highly underconstrained, resulting in noisy deformation from the canonical space to the observation space. Inspired by [41], we leverage the as-isometric-as-possible constraint [15] to restrict neighboring 3D Gaussian centers to preserve a similar distance after deformation. We further augment the con" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5024" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 209, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 209, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 209, + 83 + ], + "type": "text", + "content": "strain to Gaussian covariance matrices:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 88, + 287, + 135 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 88, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 66, + 88, + 287, + 135 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {i s o p o s}} = \\sum_ {i = 1} ^ {N} \\sum_ {j \\in \\mathcal {N} _ {k} (i)} \\left| d \\left(\\mathbf {x} _ {c} ^ {(i)}, \\mathbf {x} _ {c} ^ {(j)}\\right) - d \\left(\\mathbf {x} _ {o} ^ {(i)}, \\mathbf {x} _ {o} ^ {(j)}\\right) \\right| \\tag {14}", + "image_path": "30f58d4a8a489b8c9e6a44e33a73260dd28de444744403a096d40edd30b02078.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 139, + 287, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 139, + 287, + 186 + ], + "spans": [ + { + "bbox": [ + 58, + 139, + 287, + 186 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {i s o c o v} = \\sum_ {i = 1} ^ {N} \\sum_ {j \\in \\mathcal {N} _ {k} (i)} \\left| d \\left(\\boldsymbol {\\Sigma} _ {c} ^ {(i)}, \\boldsymbol {\\Sigma} _ {c} ^ {(j)}\\right) - d \\left(\\boldsymbol {\\Sigma} _ {o} ^ {(i)}, \\boldsymbol {\\Sigma} _ {o} ^ {(j)}\\right) \\right| \\tag {15}", + "image_path": "fe9e3c7242ebe67b6a54378c0d741376463143da7e1279b4a49d9c0b7f271a3f.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 191, + 287, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 191, + 287, + 227 + ], + "spans": [ + { + "bbox": [ + 47, + 191, + 287, + 227 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 191, + 287, + 227 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 47, + 191, + 287, + 227 + ], + "type": "text", + "content": " denotes the number of 3D Gaussians. " + }, + { + "bbox": [ + 47, + 191, + 287, + 227 + ], + "type": "inline_equation", + "content": "\\mathcal{N}_k" + }, + { + "bbox": [ + 47, + 191, + 287, + 227 + ], + "type": "text", + "content": " denotes the k-nearest neighbourhood, and we set " + }, + { + "bbox": [ + 47, + 191, + 287, + 227 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 47, + 191, + 287, + 227 + ], + "type": "text", + "content": " to 5. We use L2-norm as our distance function " + }, + { + "bbox": [ + 47, + 191, + 287, + 227 + ], + "type": "inline_equation", + "content": "d(\\cdot ,\\cdot)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 227, + 287, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 227, + 287, + 300 + ], + "spans": [ + { + "bbox": [ + 47, + 227, + 287, + 300 + ], + "type": "text", + "content": "Loss function. Our full loss function consists of a RGB loss " + }, + { + "bbox": [ + 47, + 227, + 287, + 300 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{rgb}" + }, + { + "bbox": [ + 47, + 227, + 287, + 300 + ], + "type": "text", + "content": ", a mask loss " + }, + { + "bbox": [ + 47, + 227, + 287, + 300 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{mask}" + }, + { + "bbox": [ + 47, + 227, + 287, + 300 + ], + "type": "text", + "content": ", a skinning weight regularization loss " + }, + { + "bbox": [ + 47, + 227, + 287, + 300 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{skin}" + }, + { + "bbox": [ + 47, + 227, + 287, + 300 + ], + "type": "text", + "content": " and the as-isometric-as-possible regularization loss for both position and covariance " + }, + { + "bbox": [ + 47, + 227, + 287, + 300 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ispos},\\mathcal{L}_{isocov}" + }, + { + "bbox": [ + 47, + 227, + 287, + 300 + ], + "type": "text", + "content": ". For further details of the loss definition and respective weights, please refer to the Supp.Mat." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 308, + 128, + 322 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 128, + 322 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 128, + 322 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 329, + 287, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 329, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 46, + 329, + 287, + 437 + ], + "type": "text", + "content": "In this section, we first compare the proposed approach with recent state-of-the-art methods [7, 12, 37, 57, 58], demonstrating that our proposed approach achieves superior rendering quality in terms of LPIPS, which is more informative under monocular setting, while achieving fast training and real-time rendering speed, respectively " + }, + { + "bbox": [ + 46, + 329, + 287, + 437 + ], + "type": "inline_equation", + "content": "400\\mathrm{x}" + }, + { + "bbox": [ + 46, + 329, + 287, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 329, + 287, + 437 + ], + "type": "inline_equation", + "content": "250\\mathrm{x}" + }, + { + "bbox": [ + 46, + 329, + 287, + 437 + ], + "type": "text", + "content": " faster than the most competitive baseline [58]. We then systematically ablate each component of the proposed model, showing their effectiveness in better rendering quality." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 443, + 160, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 443, + 160, + 455 + ], + "spans": [ + { + "bbox": [ + 47, + 443, + 160, + 455 + ], + "type": "text", + "content": "5.1. Evaluation Dataset" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 461, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 461, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 461, + 287, + 581 + ], + "type": "text", + "content": "ZJU-MoCap [37]. This is the major testbed for quantitative evaluation. We pick six sequences (377, 386, 387, 392, 393, 394) from the ZJU-MoCap dataset and follow the training/test split of HumanNeRF [58]. The motion of these sequences is repetitive and does not contain a sufficient number of poses for meaningful novel pose synthesis benchmarks. Thus we focus on evaluating novel view synthesis (PSNR/SSIM/LPIPS) and show qualitative results for animation on out-of-distribution poses. Note that LPIPS in all the tables are scaled up by 1000." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 581, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 581, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 581, + 287, + 665 + ], + "type": "text", + "content": "PeopleSnapshot [1]. We also conduct experiments on 4 sequences of the PeopleSnapshot dataset, which includes monocular videos of people rotating in front of a camera. We follow the data split of InstantAvatar [12] and compare to [12] on novel pose synthesis. For fair comparison, we use the provided poses optimized by Anim-NeRF [35] and do not further optimize it during our training." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 671, + 197, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 671, + 197, + 684 + ], + "spans": [ + { + "bbox": [ + 47, + 671, + 197, + 684 + ], + "type": "text", + "content": "5.2. Comparison with Baselines" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "We compare our approach with NeuralBody [37], HumanNeRF [58], MonoHuman [70], ARAH [57] and Instant" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 72, + 545, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 346 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 346 + ], + "type": "text", + "content": "NVR [7] under monocular setup on ZJU-MoCap. The quantitative results are reported in Tab. 3. NeuralBody is underperforming compared to other approaches. Overall, our proposed approach produces comparable performance to ARAH on PSNR and SSIM, while significantly outperforming all the baselines on LPIPS. We argue that LPIPS is more informative compared to the other two metrics, as it is very difficult to reproduce exactly the ground-truth appearance for novel views due to the monocular setting and the stochastic nature of cloth deformations. Meanwhile, our method is also capable of fast training and renders at a real-time rendering frame rate, being 400 times faster for training (30 GPU minutes vs. 8 GPU days) and " + }, + { + "bbox": [ + 304, + 72, + 545, + 346 + ], + "type": "inline_equation", + "content": "250 - 500" + }, + { + "bbox": [ + 304, + 72, + 545, + 346 + ], + "type": "text", + "content": " times faster for inference (50 FPS vs. 0.1 FPS for ARAH and 0.2 FPS for HumanNeRF). We also note that Instant-NVR trains on a refined version of ZJU-MoCap, which provides refined camera parameters, SMPL fittings, and more accurate instance masks with part-level annotation that is essential for running their method. Hence their metrics are not directly comparable to other methods in Tab. 3. We train our model on the refined dataset for a fair quantitative comparison, which clearly shows that our method outperforms Instant-NVR in most scenarios." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 348, + 545, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 348, + 545, + 467 + ], + "spans": [ + { + "bbox": [ + 304, + 348, + 545, + 467 + ], + "type": "text", + "content": "Qualitative comparisons on novel view synthesis can be found in Fig. 3. We observe that our method preserves sharper details compared to ARAH and does not produce fluctuating artifacts as in HumanNeRF caused by noisy deformation fields. Instant-NVR produces an oversmooth appearance and tends to generate noisy limbs. Additionally, we animate our learned avatars with pose sequences from AMASS [25] and AIST++ [19], shown in the rightmost column of Fig. 3. This shows that our model could generalize to extreme out-of-distribution poses." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 468, + 545, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 468, + 545, + 516 + ], + "spans": [ + { + "bbox": [ + 304, + 468, + 545, + 516 + ], + "type": "text", + "content": "For PeopleSnapshot, we report the quantitative comparison against InstantAvatar [12] in Tab. 4. Our approach significantly outperforms InstantAvatar on PSNR and LPIPS, while being more than " + }, + { + "bbox": [ + 304, + 468, + 545, + 516 + ], + "type": "inline_equation", + "content": "3\\mathrm{x}" + }, + { + "bbox": [ + 304, + 468, + 545, + 516 + ], + "type": "text", + "content": " faster during inference." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 525, + 400, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 525, + 400, + 539 + ], + "spans": [ + { + "bbox": [ + 306, + 525, + 400, + 539 + ], + "type": "text", + "content": "5.3. Ablation Study" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 545, + 545, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 545, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 545, + 640 + ], + "type": "text", + "content": "We study the effect of various components of our method on the ZJU-MoCap dataset, including the color MLP, the as-isometric-as-possible regularization and the pose correction module. The average metrics over 6 sequences are reported in Tab. 5. We show that all proposed techniques are required to reach the optimal performance, best reflected by LPIPS which is the most informative metric for novel view synthesis evaluation under a monocular setup." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "We further show qualitative comparison on out-of-distribution poses in Fig. 4, which demonstrates that the as-isometric-as-possible loss helps to constrain the 3D Gaussians to comply with consistent movement during deformation, hence improving generalization on novel poses. Albeit marginally, each individual component contributes to a" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5025" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 69, + 517, + 661 + ], + "blocks": [ + { + "bbox": [ + 78, + 69, + 517, + 661 + ], + "lines": [ + { + "bbox": [ + 78, + 69, + 517, + 661 + ], + "spans": [ + { + "bbox": [ + 78, + 69, + 517, + 661 + ], + "type": "image", + "image_path": "721a7f5d19b0b74ddebf9957d73031a24577f9c604fd435442873d5c25505bdd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 678, + 548, + 702 + ], + "lines": [ + { + "bbox": [ + 46, + 678, + 548, + 702 + ], + "spans": [ + { + "bbox": [ + 46, + 678, + 548, + 702 + ], + "type": "text", + "content": "Figure 3. Qualitative Comparison on ZJU-MoCap [37]. We show the results for both novel view synthesis and novel pose animation of all sequences on ZJU-MoCap. Our method produces high-quality results that preserve cloth details even on out-of-distribution poses." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "5026" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 123, + 547, + 203 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 547, + 118 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 547, + 118 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 547, + 118 + ], + "type": "text", + "content": "Table 3. Quantitative Results on ZJU-MoCap [37]. We outperform both competitive baselines [57, 58] in terms of LPIPS while being two orders of magnitude faster in training and rendering. Cell color indicates best and second best. Instant-NVR [7] is trained and tested on a refined version of ZJU-MoCap, thus is not directly comparable to other baselines quantitatively. We train our model on the refined dataset for fair quantitative comparison to Instant-NVR and the metrics are reported in the last two rows of the table." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 123, + 547, + 203 + ], + "lines": [ + { + "bbox": [ + 48, + 123, + 547, + 203 + ], + "spans": [ + { + "bbox": [ + 48, + 123, + 547, + 203 + ], + "type": "table", + "html": "
Subject:Metric:377386387392393394
GPU↓FPS↑PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
NeuralBody [37]12h229.110.967440.9530.540.967846.4327.000.951859.4730.100.964253.2728.610.959059.0529.100.959354.55
HumanNeRF [58]>8d0.230.410.974324.0633.200.975228.9928.180.963235.5831.040.970532.1228.310.960336.7230.310.964232.89
MonoHuman [70]4d0.129.120.972726.5832.940.969536.0427.930.960141.7629.500.963539.4527.640.956643.1729.150.959538.08
ARAH [57]8d0.130.850.980026.6033.500.978131.4028.490.965640.4332.020.974235.2828.770.964542.3029.460.963240.76
Ours0.5h5030.640.977420.8833.630.977325.7728.330.964234.2431.660.973030.1428.880.963535.2630.540.966131.21
Instant-NVR* [7]0.1h331.280.978925.3733.710.977032.8128.390.964045.9731.850.973039.4729.560.964146.1631.320.968040.63
Ours*0.5h5030.960.977819.8533.940.978424.7028.400.965632.9632.100.973929.2029.300.964534.0330.740.966231.00
", + "image_path": "b3ba8e8ac9145a6cd6a725e2eb602a33d824ad2df9e25dd79f8489c2b8eb61ad.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 53, + 232, + 541, + 288 + ], + "blocks": [ + { + "bbox": [ + 195, + 212, + 398, + 224 + ], + "lines": [ + { + "bbox": [ + 195, + 212, + 398, + 224 + ], + "spans": [ + { + "bbox": [ + 195, + 212, + 398, + 224 + ], + "type": "text", + "content": "Table 4. Quantitative Results on PeopleSnapshot [1]." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 232, + 541, + 288 + ], + "lines": [ + { + "bbox": [ + 53, + 232, + 541, + 288 + ], + "spans": [ + { + "bbox": [ + 53, + 232, + 541, + 288 + ], + "type": "table", + "html": "
Subject:female-3-casualfemale-4-casualmale-3-casualmale-4-casual
Metric:GPU↓FPS↑PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓PSNR↑SSIM↑LPIPS↓
InstantAvatar [12]5 min.1527.660.970921.0029.110.968316.7029.530.971615.5027.670.962630.7
Ours45 min.5030.570.958120.8633.160.967815.7434.280.972414.9230.220.965323.05
", + "image_path": "8faf096d86e312a36b17928a7b22f24eff183db6c44aab942c246a35f97290dc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 64, + 300, + 168, + 414 + ], + "blocks": [ + { + "bbox": [ + 64, + 300, + 168, + 414 + ], + "lines": [ + { + "bbox": [ + 64, + 300, + 168, + 414 + ], + "spans": [ + { + "bbox": [ + 64, + 300, + 168, + 414 + ], + "type": "image", + "image_path": "0d9a9c3867c9690ac42a4f0d9f471dcd6a71d1b5a6955c5757ee0752e25168c7.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 148, + 417, + 195, + 429 + ], + "lines": [ + { + "bbox": [ + 148, + 417, + 195, + 429 + ], + "spans": [ + { + "bbox": [ + 148, + 417, + 195, + 429 + ], + "type": "text", + "content": "Full model" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 169, + 301, + 287, + 414 + ], + "blocks": [ + { + "bbox": [ + 169, + 301, + 287, + 414 + ], + "lines": [ + { + "bbox": [ + 169, + 301, + 287, + 414 + ], + "spans": [ + { + "bbox": [ + 169, + 301, + 287, + 414 + ], + "type": "image", + "image_path": "7100c0043f46985ec827f25486fc998778278d7c8015634db068aab801420277.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 69, + 451, + 523, + 464 + ], + "lines": [ + { + "bbox": [ + 69, + 451, + 523, + 464 + ], + "spans": [ + { + "bbox": [ + 69, + 451, + 523, + 464 + ], + "type": "text", + "content": "Figure 4. Ablation Study on as-isometric-as-possible regularization, which removes the artifacts on highly articulated poses." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 300, + 410, + 413 + ], + "blocks": [ + { + "bbox": [ + 307, + 300, + 410, + 413 + ], + "lines": [ + { + "bbox": [ + 307, + 300, + 410, + 413 + ], + "spans": [ + { + "bbox": [ + 307, + 300, + 410, + 413 + ], + "type": "image", + "image_path": "f8a7ab9505428a03290576c0fda0d8b908c6d20231f0efeee27f050fcc5239cc.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 372, + 418, + 455, + 430 + ], + "lines": [ + { + "bbox": [ + 372, + 418, + 455, + 430 + ], + "spans": [ + { + "bbox": [ + 372, + 418, + 455, + 430 + ], + "type": "text", + "content": "w/o " + }, + { + "bbox": [ + 372, + 418, + 455, + 430 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{isocov}" + }, + { + "bbox": [ + 372, + 418, + 455, + 430 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 372, + 418, + 455, + 430 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{isopos}" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 413, + 301, + 528, + 414 + ], + "blocks": [ + { + "bbox": [ + 413, + 301, + 528, + 414 + ], + "lines": [ + { + "bbox": [ + 413, + 301, + 528, + 414 + ], + "spans": [ + { + "bbox": [ + 413, + 301, + 528, + 414 + ], + "type": "image", + "image_path": "71c45e1a0a787ec7f859bfd0e58f1ad509d5fdfd434a248ffc0d8de1a5b2ac0b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 60, + 523, + 274, + 605 + ], + "blocks": [ + { + "bbox": [ + 46, + 482, + 288, + 515 + ], + "lines": [ + { + "bbox": [ + 46, + 482, + 288, + 515 + ], + "spans": [ + { + "bbox": [ + 46, + 482, + 288, + 515 + ], + "type": "text", + "content": "Table 5. Ablation Study on ZJU-MoCap [37]. The proposed model achieves the lowest LPIPS, demonstrating the effectiveness of all components." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 60, + 523, + 274, + 605 + ], + "lines": [ + { + "bbox": [ + 60, + 523, + 274, + 605 + ], + "spans": [ + { + "bbox": [ + 60, + 523, + 274, + 605 + ], + "type": "table", + "html": "
Metric:PSNR↑SSIM↑LPIPS↓
Full model30.610.970329.58
w/o color MLP30.550.970031.24
w/o Lisocov30.610.970329.84
w/o Lisopos, Lisocov30.590.969930.25
w/o pose correction30.600.970329.87
", + "image_path": "b079594807012ee5477bd2cd79ffb95d9a735fd0f0d739cff276919818a06168.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 623, + 288, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 623, + 288, + 659 + ], + "spans": [ + { + "bbox": [ + 46, + 623, + 288, + 659 + ], + "type": "text", + "content": "better novel-view rendering quality and particularly generates more plausible results with respect to novel pose animation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 669, + 119, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 669, + 119, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 669, + 119, + 681 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "content": "In this paper, we present 3DGS-Avatar, one of the first methods that utilize the explicit representation of 3DGS" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 483, + 546, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 546, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 546, + 544 + ], + "type": "text", + "content": "for efficient reconstruction of clothed human avatars from monocular videos. Our method achieves photorealistic rendering, awareness of pose-dependent cloth deformation, generalization to unseen poses, fast training, and real-time rendering all at once." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 544, + 547, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 544, + 547, + 664 + ], + "spans": [ + { + "bbox": [ + 304, + 544, + 547, + 664 + ], + "type": "text", + "content": "Experiments show that our method is comparable to or even better than the state-of-the-art methods in terms of rendering quality while being two orders of magnitude faster in both training and inference. Furthermore, we propose to replace spherical harmonics with a shallow MLP to decode 3D Gaussian color and regularize deformation with geometric constraints, both proved to be effective in enhancing rendering quality. We hope that our new representation could foster further research in fast, high-quality animatable clothed human avatar synthesis from a monocular view." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "content": "Acknowledgement. SW and AG were supported by the ERC Starting Grant LEGO-3D (850533) and the DFG EXC number 2064/1 - project number 390727645. SW and ST acknowledge the SNSF grant 200021 204840." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5027" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "text", + "content": "[1] Thiemo Alldieck, Marcus Magnor, Weipeng Xu, Christian Theobalt, and Gerard Pons-Moll. Video based reconstruction of 3d people models. In Proc. of CVPR, 2018. 6, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 126, + 288, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 126, + 288, + 170 + ], + "spans": [ + { + "bbox": [ + 53, + 126, + 288, + 170 + ], + "type": "text", + "content": "[2] Dragomir Anguelov, Praveen Srinivasan, Daphne Koller, Sebastian Thrun, Jim Rodgers, and James Davis. Scape: shape completion and animation of people. ACM Transactions Graphics, 24, 2005. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 172, + 288, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 172, + 288, + 205 + ], + "spans": [ + { + "bbox": [ + 53, + 172, + 288, + 205 + ], + "type": "text", + "content": "[3] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In Proc. of ECCV, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 207, + 287, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 207, + 287, + 251 + ], + "spans": [ + { + "bbox": [ + 53, + 207, + 287, + 251 + ], + "type": "text", + "content": "[4] Xu Chen, Yufeng Zheng, Michael Black, Otmar Hilliges, and Andreas Geiger. Snarf: Differentiable forward skinning for animating non-rigid neural implicit shapes. In Proc. of ICCV, 2021. 3, 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 253, + 287, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 253, + 287, + 297 + ], + "spans": [ + { + "bbox": [ + 53, + 253, + 287, + 297 + ], + "type": "text", + "content": "[5] Xu Chen, Tianjian Jiang, Jie Song, Max Rietmann, Andreas Geiger, Michael J. Black, and Otmar Hilliges. Fast-snarf: A fast deformer for articulated neural fields. Pattern Analysis and Machine Intelligence (PAMI), 2023. 3, 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 299, + 287, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 299, + 287, + 343 + ], + "spans": [ + { + "bbox": [ + 53, + 299, + 287, + 343 + ], + "type": "text", + "content": "[6] Yue Chen, Xuan Wang, Xingyu Chen, Qi Zhang, Xiaoyu Li, Yu Guo, Jue Wang, and Fei Wang. Uv volumes for real-time rendering of editable free-view human performance. In Proc. of CVPR, 2023. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 345, + 287, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 345, + 287, + 388 + ], + "spans": [ + { + "bbox": [ + 53, + 345, + 287, + 388 + ], + "type": "text", + "content": "[7] Chen Geng, Sida Peng, Zhen Xu, Hujun Bao, and Xiaowei Zhou. Learning neural volumetric representations of dynamic humans in minutes. In Proc. of CVPR, 2023. 2, 6, 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "spans": [ + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "type": "text", + "content": "[8] Chen Guo, Tianjian Jiang, Xu Chen, Jie Song, and Otmar Hilliges. Vid2 avatar: 3d avatar reconstruction from videos in the wild via self-supervised scene decomposition. In Proc. of CVPR, 2023. 1, 2, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 437, + 287, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 437, + 287, + 470 + ], + "spans": [ + { + "bbox": [ + 53, + 437, + 287, + 470 + ], + "type": "text", + "content": "[9] N. Hasler, C. Stoll, M. Sunkel, B. Rosenhahn, and H.-P. Seidel. A Statistical Model of Human Pose and Body Shape. Computer Graphics Forum, 28:337-346, 2009. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 472, + 288, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 472, + 288, + 504 + ], + "spans": [ + { + "bbox": [ + 48, + 472, + 288, + 504 + ], + "type": "text", + "content": "[10] Shoukang Hu and Ziwei Liu. Gauhuman: Articulated gaussian splatting from monocular human videos. In Proc. of CVPR, 2024. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 507, + 287, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 507, + 287, + 561 + ], + "spans": [ + { + "bbox": [ + 48, + 507, + 287, + 561 + ], + "type": "text", + "content": "[11] Rohit Jena, Ganesh Subramanian Iyer, Siddharth Choudhary, Brandon Smith, Pratik Chaudhari, and James Gee. Splatarmor: Articulated gaussian splatting for animatable humans from monocular rgb videos. arXiv preprint arXiv:2311.10812, 2023. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 564, + 287, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 564, + 287, + 597 + ], + "spans": [ + { + "bbox": [ + 48, + 564, + 287, + 597 + ], + "type": "text", + "content": "[12] Tianjian Jiang, Xu Chen, Jie Song, and Otmar Hilliges. Instantavatar: Learning avatars from monocular video in 60 seconds. In Proc. of CVPR, 2023. 1, 2, 3, 4, 6, 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 600, + 287, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 600, + 287, + 632 + ], + "spans": [ + { + "bbox": [ + 48, + 600, + 287, + 632 + ], + "type": "text", + "content": "[13] Wei Jiang, Kwang Moo Yi, Golnoosh Samei, Oncel Tuzel, and Anurag Ranjan. Neuman: Neural human radiance field from a single video. In Proc. of ECCV, 2022. 1, 2, 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 635, + 287, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 677 + ], + "type": "text", + "content": "[14] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics, 42 (4), 2023. 1, 2, 3, 4, 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "text", + "content": "[15] Martin Kilian, Niloy J. Mitra, and Helmut Pottmann. Geometric modeling in shape space. ACM Transactions on Graphics (SIGGRAPH), 26(3), 2007. 2, 5" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[16] Muhammed Kocabas, Jen-Hao Rick Chang, James Gabriel, Oncel Tuzel, and Anurag Ranjan. Hugs: Human gaussian splatting. In Proc. of CVPR, 2024. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 108, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 108, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 307, + 108, + 545, + 140 + ], + "type": "text", + "content": "[17] Youngjoong Kwon, Lingjie Liu, Henry Fuchs, Marc Habermann, and Christian Theobalt. Deliffas: Deformable light fields for fast avatar synthesis. Proc. of NeurIPS, 2023. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 143, + 545, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 143, + 545, + 175 + ], + "spans": [ + { + "bbox": [ + 307, + 143, + 545, + 175 + ], + "type": "text", + "content": "[18] Jiahui Lei, Yufu Wang, Georgios Pavlakos, Lingjie Liu, and Kostas Daniilidis. Gart: Gaussian articulated template models. In Proc. of CVPR, 2024. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 177, + 545, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 177, + 545, + 210 + ], + "spans": [ + { + "bbox": [ + 307, + 177, + 545, + 210 + ], + "type": "text", + "content": "[19] Ruilong Li, Shan Yang, David A. Ross, and Angjoo Kanazawa. Ai choreographer: Music conditioned 3d dance generation with aist++. In Proc. of ICCV, 2021. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 212, + 545, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 212, + 545, + 255 + ], + "spans": [ + { + "bbox": [ + 307, + 212, + 545, + 255 + ], + "type": "text", + "content": "[20] Ruilong Li, Julian Tanke, Minh Vo, Michael Zollhoefer, Jürgen Gall, Angjoo Kanazawa, and Christoph Lassner. Tava: Template-free animatable volumetric actors. In Proc. of ECCV, 2022. 1, 2, 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 258, + 545, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 258, + 545, + 300 + ], + "spans": [ + { + "bbox": [ + 307, + 258, + 545, + 300 + ], + "type": "text", + "content": "[21] Zhe Li, Zerong Zheng, Lizhen Wang, and Yebin Liu. Animatable gaussians: Learning pose-dependent gaussian maps for high-fidelity human avatar modeling. In Proc. of CVPR, 2024. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 303, + 545, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 303, + 545, + 346 + ], + "spans": [ + { + "bbox": [ + 307, + 303, + 545, + 346 + ], + "type": "text", + "content": "[22] Lingjie Liu, Marc Habermann, Viktor Rudnev, Kripasindhu Sarkar, Jiatao Gu, and Christian Theobalt. Neural actor: Neural free-view synthesis of human actors with pose control. ACM Trans. Graph. (ACM SIGGRAPH Asia), 2021. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 349, + 545, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 349, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 307, + 349, + 545, + 381 + ], + "type": "text", + "content": "[23] Yang Liu, Xiang Huang, Minghan Qin, Qinwei Lin, and Haoqian Wang. Animatable 3d gaussian: Fast and high-quality reconstruction of multiple human avatars, 2023. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 384, + 545, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 384, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 307, + 384, + 545, + 426 + ], + "type": "text", + "content": "[24] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multiperson linear model. ACM Transactions Graphics, 34(6), 2015. 3, 4, 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 429, + 545, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 429, + 545, + 461 + ], + "spans": [ + { + "bbox": [ + 307, + 429, + 545, + 461 + ], + "type": "text", + "content": "[25] Naureen Mahmood, Nima Ghorbani, Nikolaus F. Troje, Gerard Pons-Moll, and Michael J. Black. AMASS: Archive of motion capture as surface shapes. In Proc. of ICCV, 2019. 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 464, + 545, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 464, + 545, + 495 + ], + "spans": [ + { + "bbox": [ + 307, + 464, + 545, + 495 + ], + "type": "text", + "content": "[26] Marko Mihajlovic, Yan Zhang, Michael J. Black, and Siyu Tang. LEAP: Learning articulated occupancy of people. In Proc. of CVPR, 2021. 3, 4" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 498, + 545, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 498, + 545, + 542 + ], + "spans": [ + { + "bbox": [ + 307, + 498, + 545, + 542 + ], + "type": "text", + "content": "[27] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proc. of ECCV, 2020. 1, 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 544, + 545, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 544, + 545, + 587 + ], + "spans": [ + { + "bbox": [ + 307, + 544, + 545, + 587 + ], + "type": "text", + "content": "[28] Arthur Moreau, Jifei Song, Helisa Dhamo, Richard Shaw, Yiren Zhou, and Eduardo Pérez-Pellitero. Human gaussian splatting: Real-time rendering of animatable avatars. In Proc. of CVPR, 2024. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 590, + 545, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 545, + 632 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 545, + 632 + ], + "type": "text", + "content": "[29] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions Graphics, 41(4), 2022. 2, 4" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 635, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 545, + 678 + ], + "type": "text", + "content": "[30] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proc. of CVPR, 2020. 1" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 681, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 545, + 712 + ], + "type": "text", + "content": "[31] Atsuhiro Noguchi, Xiao Sun, Stephen Lin, and Tatsuya Harada. Neural articulated radiance field. In Proc. of ICCV, 2021. 1, 2" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5028" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "text", + "content": "[32] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proc. of ICCV, 2021. 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 117, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 117, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 117, + 287, + 149 + ], + "type": "text", + "content": "[33] Ahmed A. A. Osman, Timo Bolkart, and Michael J. Black. Star: Sparse trained articulated human body regressor. In Proc. of ECCV, 2020. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 150, + 287, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 150, + 287, + 183 + ], + "spans": [ + { + "bbox": [ + 48, + 150, + 287, + 183 + ], + "type": "text", + "content": "[34] Georgios Pavlakos, Luyang Zhu, Xiaowei Zhou, and Kostas Daniilidis. Learning to estimate 3d human pose and shape from a single color image. In Proc. of CVPR, 2018. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 184, + 287, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 184, + 287, + 227 + ], + "spans": [ + { + "bbox": [ + 48, + 184, + 287, + 227 + ], + "type": "text", + "content": "[35] Sida Peng, Junting Dong, Qianqian Wang, Shangzhan Zhang, Qing Shuai, Xiaowei Zhou, and Hujun Bao. Animatable neural radiance fields for modeling dynamic human bodies. In Proc. of ICCV, 2021. 1, 2, 3, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 228, + 287, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 228, + 287, + 270 + ], + "spans": [ + { + "bbox": [ + 48, + 228, + 287, + 270 + ], + "type": "text", + "content": "[36] Songyou Peng, Chiyu \"Max\" Jiang, Yiyi Liao, Michael Niemeyer, Marc Pollefeys, and Andreas Geiger. Shape as points: A differentiable poisson solver. In Proc. of NeurIPS, 2021. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 272, + 287, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 272, + 287, + 326 + ], + "spans": [ + { + "bbox": [ + 48, + 272, + 287, + 326 + ], + "type": "text", + "content": "[37] Sida Peng, Yuanqing Zhang, Yinghao Xu, Qianqian Wang, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Neural body: Implicit neural representations with structured latent codes for novel view synthesis of dynamic humans. In Proc. of CVPR, 2021. 1, 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 327, + 287, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 287, + 370 + ], + "type": "text", + "content": "[38] Sida Peng, Shangzhan Zhang, Zhen Xu, Chen Geng, Boyi Jiang, Hujun Bao, and Xiaowei Zhou. Animatable neural implicit surfaces for creating avatars from videos. ArXiv, abs/2203.08133, 2022. 1, 2, 3, 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 371, + 287, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 371, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 48, + 371, + 287, + 403 + ], + "type": "text", + "content": "[39] Sida Peng, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Representing volumetric videos as dynamic mlp maps. In Proc. of CVPR, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 404, + 287, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 404, + 287, + 436 + ], + "spans": [ + { + "bbox": [ + 48, + 404, + 287, + 436 + ], + "type": "text", + "content": "[40] Sergey Prokudin, Michael J. Black, and Javier Romero. SMPLpix: Neural avatars from 3D human models. In Proc. of WACV, 2021. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 437, + 287, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 437, + 287, + 469 + ], + "spans": [ + { + "bbox": [ + 48, + 437, + 287, + 469 + ], + "type": "text", + "content": "[41] Sergey Prokudin, Qianli Ma, Maxime Raafat, Julien Valentin, and Siyu Tang. Dynamic point fields. In Proc. of ICCV, 2023. 2, 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 471, + 287, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 471, + 287, + 503 + ], + "spans": [ + { + "bbox": [ + 48, + 471, + 287, + 503 + ], + "type": "text", + "content": "[42] Amit Raj, Julian Tanke, James Hays, Minh Vo, Carsten Stoll, and Christoph Lassner. Anr-articulated neural rendering for virtual avatars. In Proc. of CVPR, 2021. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 504, + 287, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 504, + 287, + 558 + ], + "spans": [ + { + "bbox": [ + 48, + 504, + 287, + 558 + ], + "type": "text", + "content": "[43] Christian Reiser, Richard Szeliski, Dor Verbin, Pratul P. Srinivasan, Ben Mildenhall, Andreas Geiger, Jonathan T. Barron, and Peter Hedman. Merf: Memory-efficient radiance fields for real-time view synthesis in unbounded scenes. ACM TOG, 42(4), 2023. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 559, + 287, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 287, + 591 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 287, + 591 + ], + "type": "text", + "content": "[44] Darius Rückert, Linus Franke, and Marc Stamminger. Adop: Approximate differentiable one-pixel point rendering. ACM Transactions on Graphics, 41(4), 2022. 2, 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 592, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 592, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 592, + 287, + 624 + ], + "type": "text", + "content": "[45] Shunsuke Saito, Jinlong Yang, Qianli Ma, and Michael J. Black. SCANimate: Weakly supervised learning of skinned clothed avatar networks. In Proc. of CVPR, 2021. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 625, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 287, + 667 + ], + "type": "text", + "content": "[46] Sara Fridovich-Keil and Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proc. of CVPR, 2022. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "text", + "content": "[47] Vincent Sitzmann, Semon Rezchikov, William T. Freeman, Joshua B. Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. In Proc. of NeurIPS, 2021. 1" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 715 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[48] Shih-Yang Su, Frank Yu, Michael Zollhoefer, and Helge Rhodin. A-neRF: Articulated neural radiance fields for learning human shape, appearance, and pose. In Proc. of NeurIPS, 2021. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "type": "text", + "content": "[49] Shih-Yang Su, Timur Bagautdinov, and Helge Rhodin. Npc: Neural point characters from video. In Proc. of ICCV, 2023. 2, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 152, + 545, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 184 + ], + "type": "text", + "content": "[50] Mohammed Suhail, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Generalizable patch-based neural rendering. In Proc. of ECCV, 2022. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 186, + 545, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 218 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 218 + ], + "type": "text", + "content": "[51] Mohammed Suhail1, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Light field neural rendering. In Proc. of CVPR, 2022. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 220, + 545, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 252 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 252 + ], + "type": "text", + "content": "[52] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proc. of CVPR, 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 254, + 545, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 254, + 545, + 297 + ], + "spans": [ + { + "bbox": [ + 307, + 254, + 545, + 297 + ], + "type": "text", + "content": "[53] Huan Wang, Jian Ren, Zeng Huang, Kyle Olszewski, Mengei Chai, Yun Fu, and Sergey Tulyakov. R21: Distilling neural radiance field to neural light field for efficient novel view synthesis. In Proc. of ECCV, 2022. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 298, + 545, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 298, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 298, + 545, + 342 + ], + "type": "text", + "content": "[54] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proc. of CVPR, 2022. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 344, + 545, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 344, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 307, + 344, + 545, + 387 + ], + "type": "text", + "content": "[55] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. In Proc. of NeurIPS, 2021. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 388, + 545, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 431 + ], + "type": "text", + "content": "[56] Shaofei Wang, Marko Mihajlovic, Qianli Ma, Andreas Geiger, and Siyu Tang. Metaatrix: Learning animatable clothed human models from few depth images. In Proc. of NeurIPS, 2021. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 433, + 545, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 433, + 545, + 465 + ], + "spans": [ + { + "bbox": [ + 307, + 433, + 545, + 465 + ], + "type": "text", + "content": "[57] Shaofei Wang, Katja Schwarz, Andreas Geiger, and Siyu Tang. Arah: Animatable volume rendering of articulated human sdfs. In Proc. of ECCV, 2022. 1, 2, 3, 4, 6, 8" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 467, + 545, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 467, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 307, + 467, + 545, + 510 + ], + "type": "text", + "content": "[58] Chung-Yi Weng, Brian Curless, Pratul P. Srinivasan, Jonathan T. Barron, and Ira Kemelmacher-Shlizerman. Humaner: Free-viewpoint rendering of moving people from monocular video. In Proc. of CVPR, 2022. 1, 2, 3, 4, 6, 8" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 512, + 545, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 512, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 307, + 512, + 545, + 555 + ], + "type": "text", + "content": "[59] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Wang Xinggang. 4d gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 3, 5" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 557, + 545, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 557, + 545, + 600 + ], + "spans": [ + { + "bbox": [ + 307, + 557, + 545, + 600 + ], + "type": "text", + "content": "[60] Hongyi Xu, Eduard Gabriel Bazavan, Andrei Zanfir, William T. Freeman, Rahul Sukthankar, and Cristian Sminchisescu. Ghum & ghuml: Generative 3d human shape and articulated pose models. In Proc. of CVPR, 2020. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 601, + 545, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 601, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 307, + 601, + 545, + 644 + ], + "type": "text", + "content": "[61] Hongyi Xu, Thiemo Alldieck, and Cristian Sminchisescu. H-neRF: Neural radiance fields for rendering and temporal reconstruction of humans in motion. In Proc. of NeurIPS, 2021. 1, 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 647, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 689 + ], + "type": "text", + "content": "[62] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proc. of CVPR, 2022. 2, 3" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 691, + 545, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 715 + ], + "type": "text", + "content": "[63] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5029" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 623 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "text", + "content": "high-fidelity monocular dynamic scene reconstruction. arXiv preprint arXiv:2309.13101, 2023. 3, 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 139 + ], + "type": "text", + "content": "[64] Zeyu Yang, Hongye Yang, Zijie Pan, Xiatian Zhu, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv 2310.10642, 2023. 3, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 184 + ], + "type": "text", + "content": "[65] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Proc. of NeurIPS, 2020. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 288, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 288, + 217 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 288, + 217 + ], + "type": "text", + "content": "[66] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. In Proc. of NeurIPS, 2021. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 220, + 287, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 220, + 287, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 220, + 287, + 262 + ], + "type": "text", + "content": "[67] Lior Yariv, Peter Hedman, Christian Reiser, Dor Verbin, Pratul P. Srinivasan, Richard Szeliski, Jonathan T. Barron, and Ben Mildenhall. Bakedsdf: Meshing neural sdfs for real-time view synthesis. In Proc. of SIGGRAPH, 2023. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 264, + 287, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 264, + 287, + 296 + ], + "spans": [ + { + "bbox": [ + 48, + 264, + 287, + 296 + ], + "type": "text", + "content": "[68] Keyang Ye, Tianjia Shao, and Kun Zhou. Animatable 3d gaussians for high-fidelity synthesis of human motions, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 298, + 287, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 298, + 287, + 331 + ], + "spans": [ + { + "bbox": [ + 48, + 298, + 287, + 331 + ], + "type": "text", + "content": "[69] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. PlenOctrees for real-time rendering of neural radiance fields. In Proc. of ICCV, 2021. 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 332, + 287, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 332, + 287, + 365 + ], + "spans": [ + { + "bbox": [ + 48, + 332, + 287, + 365 + ], + "type": "text", + "content": "[70] Zhengming Yu, Wei Cheng, xian Liu, Wayne Wu, and KwanYee Lin. MonoHuman: Animatable human neural field from monocular video. In Proc. of CVPR, 2023. 1, 2, 6, 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 366, + 287, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 366, + 287, + 409 + ], + "spans": [ + { + "bbox": [ + 48, + 366, + 287, + 409 + ], + "type": "text", + "content": "[71] Qiang Zhang, Seung-Hwan Baek, Szymon Rusinkiewicz, and Felix Heide. Differentiable point-based radiance fields for efficient view synthesis. In SIGGRAPH Asia Conference Proceedings, 2022. 2, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 411, + 287, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 411, + 287, + 465 + ], + "spans": [ + { + "bbox": [ + 48, + 411, + 287, + 465 + ], + "type": "text", + "content": "[72] Fuqiang Zhao, Yuheng Jiang, Kaixin Yao, Jiakai Zhang, Liao Wang, Haizhao Dai, Yuhui Zhong, Yingliang Zhang, Minye Wu, Lan Xu, and Jingyi Yu. Human performance modeling and rendering via neural animated mesh. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 41(6), 2022. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 467, + 287, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 467, + 287, + 509 + ], + "spans": [ + { + "bbox": [ + 48, + 467, + 287, + 509 + ], + "type": "text", + "content": "[73] Yufeng Zheng, Wang Yifan, Gordon Wetzstein, Michael J. Black, and Otmar Hilliges. Pointavatar: Deformable point-based head avatars from videos. In Proc. of ECCV, 2023. 2, 3, 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 511, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 511, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 511, + 287, + 544 + ], + "type": "text", + "content": "[74] Zerong Zheng, Han Huang, Tao Yu, Hongwen Zhang, Yandong Guo, and Yebin Liu. Structured local radiance fields for human avatar modeling. In Proc. of CVPR, 2022. 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 545, + 287, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 545, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 545, + 287, + 588 + ], + "type": "text", + "content": "[75] Wojciech Zielonka, Timur Bagautdinov, Shunsuke Saito, Michael Zollhöfer, Justus Thies, and Javier Romero. Drivable 3d gaussian avatars. arXiv preprint arXiv:2311.08581, 2023. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 590, + 287, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 623 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 623 + ], + "type": "text", + "content": "[76] M. Zwicker, H. Pfister, J. van Baar, and M. Gross. Ewa volume splatting. In Proceedings Visualization, 2001. VIS '01., pages 29-538, 2001. 4" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5030" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/94b406dc-a25e-4b49-8259-ce68b53e5886_content_list.json b/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/94b406dc-a25e-4b49-8259-ce68b53e5886_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..09f9a9499348f556787a2c8c37810dfac51fd1a8 --- /dev/null +++ b/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/94b406dc-a25e-4b49-8259-ce68b53e5886_content_list.json @@ -0,0 +1,1833 @@ +[ + { + "type": "text", + "text": "3DGStream: On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos", + "text_level": 1, + "bbox": [ + 99, + 128, + 872, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiakai Sun, Han Jiao, Guangyuan Li, Zhanjie Zhang, Lei Zhao*, Wei Xing* Zhejiang University {csjk, csjh, cslgy, cszzj, cszhl, WXING}@zju.edu.cn https://sjojak.github.io/3dgstream", + "bbox": [ + 145, + 203, + 830, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 308, + 313, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Constructing photo-realistic Free-Viewpoint Videos (FVVs) of dynamic scenes from multi-view videos remains a challenging endeavor. Despite the remarkable advancements achieved by current neural rendering techniques, these methods generally require complete video sequences for offline training and are not capable of real-time rendering. To address these constraints, we introduce 3DGStream, a method designed for efficient FVV streaming of real-world dynamic scenes. Our method achieves fast on-the-fly per-frame reconstruction within 12 seconds and real-time rendering at 200 FPS. Specifically, we utilize 3D Gaussians (3DGs) to represent the scene. Instead of the naive approach of directly optimizing 3DGs per-frame, we employ a compact Neural Transformation Cache (NTC) to model the translations and rotations of 3DGs, markedly reducing the training time and storage required for each FVV frame. Furthermore, we propose an adaptive 3DG addition strategy to handle emerging objects in dynamic scenes. Experiments demonstrate that 3DGStream achieves competitive performance in terms of rendering speed, image quality, training time, and model storage when compared with state-of-the-art methods.", + "bbox": [ + 75, + 340, + 473, + 674 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 684, + 209, + 700 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Constructing Free-Viewpoint Videos (FVVs) from videos captured by a set of known-poses cameras from multiple views remains a frontier challenge within the domains of computer vision and graphics. The potential value and application prospects of this task in the VR/AR/XR domains have attracted much research. Traditional approaches predominantly fall into two categories: geometry-based methods that explicitly reconstruct dynamic graphics primitives [15, 17], and image-based methods that obtain new views through interpolation [7, 75]. However, these conventional methods struggle to handle real-world scenes charac", + "bbox": [ + 75, + 710, + 468, + 877 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1b3ede0eaefa4c22f620afdbbb3556bf275be31ca051bf8102762170180f27b2.jpg", + "image_caption": [ + "(a) I-NGP [40]: Per-frame training" + ], + "image_footnote": [], + "bbox": [ + 501, + 308, + 696, + 421 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0918a36a140434114d446465e079766e2a98597dd841d72a4d8d38640297c495.jpg", + "image_caption": [ + "(b) HyperReel [1]: Offline training" + ], + "image_footnote": [], + "bbox": [ + 697, + 308, + 890, + 421 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f51b4a235d5650998f799be73836b78465837a18a055f0a0fdf2bb9a7b1f3df4.jpg", + "image_caption": [ + "(c) StreamRF [29]: Online training" + ], + "image_footnote": [], + "bbox": [ + 501, + 435, + 696, + 547 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/eee2b6190201fb5877944c828dadfa88b4549390a1c435bd585f5a976170d356.jpg", + "image_caption": [ + "(d) Ours: Online training", + "Figure 1. Comparison on the flame steak scene of the N3DV dataset [31]. The training time, requisite storage, and PSNR are computed as averages over the whole video. Our method stands out by the ability of fast online training and real-time rendering, standing competitive in both model storage and image quality." + ], + "image_footnote": [], + "bbox": [ + 697, + 435, + 890, + 547 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "terized by complex geometries and appearance.", + "bbox": [ + 500, + 656, + 813, + 672 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, Neural Radiance Fields (NeRFs) [36] has garnered significant attention due to its potent capabilities in synthesizing novel views as a 3D volumetric representation. A succession of NeRF-like works [19, 29, 31-33, 43-46, 48, 60, 67] further propelled advancements in constructing FVVs on dynamic scenes. Nonetheless, the vast majority of NeRF-like FVV construction methods encountered two primary limitations: (1) they typically necessitate complete video sequences for time-consuming offline training, meaning they can replay dynamic scenes but are unable to stream them, and (2) they generally fail to achieve real-time rendering, thereby hindering practical applications.", + "bbox": [ + 496, + 672, + 892, + 853 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, Kerbl et al. [26] have achieved real-time radiance field rendering using 3D Gaussians (3DGs), thus enabling the instant synthesis of novel views in static scenes", + "bbox": [ + 496, + 854, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding authors.", + "bbox": [ + 94, + 886, + 227, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "20675", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f3f37646edea7c3bfd3112b4a5f4973d55a975aa9c3e9700e92fb8305f9195d4.jpg", + "image_caption": [ + "Figure 2. Comparison of our method with other methods on the N3DV dataset [31]. $\\square$ denotes training from scratch per frame, $\\triangle$ represents offline training on complete video sequences, and $\\bigcirc$ signifies online training on video streams. While achieving online training, our method reaches state-of-the-art performance in both rendering speed and overall training time." + ], + "image_footnote": [], + "bbox": [ + 81, + 90, + 460, + 309 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "with just minutes of training. Inspired by this breakthrough, we propose 3DGStream, a method that utilizes 3DGs to construct Free-Viewpoint Videos (FVVs) of dynamic scenes. Specifically, we first train the initial 3DGs on the multi-view frames at timestep 0. Then, for each timestep $i$ , we use the 3DGs of previous timestep $i - 1$ as initialization and pass it to a two-stage pipeline. (1) In Stage 1, we train a Neural Transformation Cache (NTC) to model the transformations of 3DGs. (2) Then in the Stage 2, we use an adaptive 3DG addition strategy to handle emerging objects by spawning frame-specific additional 3DGs near these objects and optimize them along with periodic splitting and pruning. After the two-stage pipeline concludes, we use both the 3DGs transformed by the NTC and the additional 3DGs for rendering at the current timestep $i$ , with only the former carrying over for initialization of the subsequent timestep. This design significantly reduces the storage requirements for the FVV, as we only need to store the per-frame NTCs and frame-specific additions, rather than all 3DGs for each frame.", + "bbox": [ + 75, + 416, + 468, + 717 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3DGStream is capable of rendering photo-realistic FVVs at megapixel resolution in real-time, boasting exceptionally rapid per-frame training speeds and limited model storage requirements. As illustrated in Figs. 1 and 2, compared with static reconstruction methods that train from scratch per-frame and dynamic reconstruction methods that necessitate offline training across the complete video sequences, our approach excels in both training speed and rendering speed, maintaining a competitive edge in image quality and model storage. Furthermore, our method outperforms StreamRF [29], a state-of-the-art technique tackling the exactly same task, in all the relevant aspects.", + "bbox": [ + 75, + 719, + 468, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To summarize, our contributions include:", + "bbox": [ + 517, + 90, + 792, + 104 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose 3DGStream, a method for on-the-fly construction of photo-realistic, real-time renderable FVV on video streams, eliminating the necessity for lengthy offline training on the entire video sequences.", + "- We utilize NTC for modeling the transformations of 3DGs, in conjunction with an adaptive 3DG addition strategy to tackle emerging objects within dynamic scenes. This combination permits meticulous manipulation of 3DGs, accommodating scene alterations with limited performance overhead.", + "- We conduct extensive experiments to demonstrate 3DGStream's competitive edge in rendering quality, training time, and requisite storage, as well as its superior rendering speed, compared to existing state-of-the-art dynamic scene reconstruction methods." + ], + "bbox": [ + 500, + 109, + 890, + 335 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 351, + 640, + 367 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Novel View Synthesis for Static Scenes", + "text_level": 1, + "bbox": [ + 500, + 376, + 828, + 392 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Synthesizing novel views from a set of images of static scenes is a time-honored problem in the domains of computer vision and graphics. Traditional methods such as Lumigraph [8, 22] or Light-Field [10, 16, 28, 50] achieve new view synthesis through interpolation. In recent years, Neural Radiance Fields (NeRF) [36] has achieved photorealistic synthesizing results by representing the radiance field using a multi-layer perceptron (MLP). A series of subsequent works enhance NeRF's performance in various aspects, such as accelerating training speeds [12, 13, 20, 25, 40, 52], achieving real-time rendering [14, 21, 23, 47, 64, 72], and improving synthesis quality on challenging scenes [2-4, 35, 37, 56] or sparse inputs [11, 41, 53, 63, 66, 69, 73]. Since the vanilla NeRF employs costly volume rendering, necessitating neural network queries for rendering, subsequent approaches faced trade-offs in training time, rendering speed, model storage, image quality, and applicability. To address these challenges, Kerbl et al. [26] propose 3D Gaussian Splatting (3DG-S), which integrates of 3DGs with differentiable point-based rendering. 3DG-S enables real-time high-fidelity view synthesis in large-scale unbounded scenes after brief training periods with modest storage requirements. Inspired by this work, we extend its application to the task of constructing FVVs of dynamic scenes. Taking it a step further, we design a on-the-fly training framework to achieve efficient FVV streaming.", + "bbox": [ + 496, + 398, + 890, + 792 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Free-Viewpoint Videos of Dynamic Scenes", + "text_level": 1, + "bbox": [ + 500, + 801, + 857, + 816 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Constructing FVVs from a set of videos of dynamic scenes is a more challenging and applicable task in the domains of computer vision and graphics. Earlier attempts to address this task pivoted around the construction of dynamic primitives [15, 17] or resorting to interpolation [7, 75]. With the", + "bbox": [ + 496, + 825, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "20676", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/55d1f9333c7c59cf1303ecb9ba739f032e21eebe7ba75e680000a776bbce7bb8.jpg", + "image_caption": [ + "Figure 3. Overview of 3DGStream. Given a set of multi-view video streams, 3DGStream aims to construct high-quality FVV stream of the captured dynamic scene on-the-fly. Initially, we optimize a set of 3DGs to represent the scene at timestep 0. For each subsequent timestep $i$ , we use the 3DGs from timestep $i - 1$ as an initialization and then engage in a two-stage training process: Stage 1: We train the Neural Transformation Cache (NTC) to model the translations and rotations of 3DGs. After training, the NTC transforms the 3DGs, preparing them for the next timestep and the next stage in the current timestep. Stage 2: We spawn frame-specific additional 3DGs at potential locations and optimize them along with periodic splitting and pruning. After the two-stage process concludes, both transformed and additional 3DGs are used to render at the current timestep $i$ , with only the transformed ones carried into the next timestep." + ], + "image_footnote": [], + "bbox": [ + 78, + 87, + 893, + 289 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "success of NeRF-like methods in novel view synthesis for static scenes, a series of works [1, 9, 19, 29-34, 42, 44-46, 48, 51, 55, 57, 59, 61, 62, 68, 74] attempt to use NeRF for constructing FVVs in dynamic scenes. These works can typically be categorized into five types: prior-driven, flow-based, warp-based, those using spatio-temporal inputs, and per-frame training.", + "bbox": [ + 75, + 405, + 470, + 511 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Prior-driven methods [27, 30, 62, 68, 74] leverage parametric models or incorporate additional priors, such as skeletons, to bolster performance on the reconstruction of specific dynamic objects, e.g., humans. However, their application is limited and not generalizable to broader scenes.", + "bbox": [ + 76, + 513, + 468, + 589 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Flow-based methods [32, 33] primarily focus on constructing FVVs from monocular videos. By estimating the correspondence of 3D points in consecutive frames, they achieve impressive results. Nonetheless, the intrinsic ill-posedness of monocular reconstructions in intricate dynamic scenes frequently calls for supplementary priors like depth, optical flow, and motion segmentation masks.", + "bbox": [ + 75, + 592, + 468, + 698 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Warp-based methods [1, 42, 44, 46, 51, 55, 61] assume that the dynamics of the scene arise from the deformation of static structures. These methods warp the radiance field of each frame onto one or several canonical frames, achieving notable results. However, the strong assumptions they rely on often prevent them from handling topological variations.", + "bbox": [ + 75, + 700, + 470, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Methods that use spatio-temporal inputs [9, 19, 31, 45, 48, 57, 58] enhance radiance fields by adding a temporal dimension, enabling the querying of the radiance field using spatio-temporal coordinates. While these techniques showcase a remarkable ability to synthesize new viewpoints in dynamic scenes, the entangled scene parameters can constrain their adaptability for downstream applications.", + "bbox": [ + 75, + 795, + 468, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Per-frame training methods [29, 34, 59] adapt to changes in the scene online by leveraging per-frame training, a paradigm we have also adopted. To be specific, StreamRF [29] employs Plenoxels [20] for scene representation and achieves rapid on-the-fly training with minimal storage requirements through techniques like narrow band tuning and difference-based compression. ReRF [59] uses DVGO [52] for scene representation and optimize motion grid and residual grid frame by frame to model interframe discrepancies, enabling high-quality FVV streaming and rendering. Dynamic3DG [34] optimizes simplified 3DGs and integrates physically-based priors for high-quality novel view synthesis on dynamic scenes.", + "bbox": [ + 496, + 405, + 890, + 601 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Among the aforementioned works, only NeRF-Player [51], ReRF [59], StreamRF [29], and Dynamic3DG [34] are able to stream FVVs. NeRFPlayer achieves FVV streaming through a decomposition module and a feature streaming module, but it is only able to stream pre-trained models. ReRF and Dynamic3DG are limited to processing scenes with few objects and foreground mask, necessitating minute-level per-frame training times. StreamRF stands out by requiring only a few seconds for each frame's training to construct high-fidelity FVVs on challenging real-world dynamic scenes with compressed model storage. However, it falls short in rendering speed. Contrarily, our approach matches or surpasses StreamRF in training speed, model storage, and image quality, all while achieving real-time rendering at 200 FPS.", + "bbox": [ + 496, + 604, + 892, + 830 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Concurrent Works", + "text_level": 1, + "bbox": [ + 500, + 845, + 681, + 859 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Except for Dynamic3DG, several concurrent works have extended 3DG-S to represent dynamic scenes. De", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "20677", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "formable3DG [70] employs an MLP to model the deformation of 3DGs, while [65] introduces a hexplane-based encoder to enhance the efficiency of deformation query. Meanwhile, [18, 71] lift 3DG to 4DG primitives for dynamic scene representation. However, these approaches are limited to offline reconstruction and lack streamable capabilities, whereas our work aims to achieve efficient streaming of FFVs with an online training paradigm.", + "bbox": [ + 75, + 90, + 472, + 212 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Background: 3D Gaussian Splitting", + "text_level": 1, + "bbox": [ + 76, + 217, + 406, + 234 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D Gaussian Splitting (3DG-S) [26] employs anisotropic 3D Gaussians as an explicit scene representation. Paired with a fast differentiable rasterizer, 3DGs achieves real-time novel view synthesis with only minutes of training.", + "bbox": [ + 75, + 242, + 468, + 303 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. 3D Gaussians as Scene Representation", + "text_level": 1, + "bbox": [ + 76, + 310, + 406, + 327 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A 3DG is defined by a covariance matrix $\\Sigma$ centered at point (i.e., mean) $\\mu$ :", + "bbox": [ + 75, + 334, + 468, + 364 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nG (x; \\mu , \\Sigma) = e ^ {- \\frac {1}{2} (x - \\mu) ^ {T} \\Sigma^ {- 1} (x - \\mu)}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 153, + 369, + 468, + 391 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To ensure positive semi-definiteness during optimization, the covariance matrix $\\Sigma$ is decomposed into a rotation matrix $R$ and a scaling matrix $S$ :", + "bbox": [ + 75, + 398, + 468, + 444 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\Sigma = R S S ^ {T} R ^ {T}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 450, + 468, + 468 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Rotation is conveniently represented by a unit quaternion, while scaling uses a 3D vector. Additionally, each 3DG contains a set of spherical harmonics (SH) coefficients of to represent view-dependent colors, along with an opacity value $\\alpha$ , which is used in $\\alpha$ -blending (Eq. (4)).", + "bbox": [ + 75, + 477, + 468, + 553 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Splitting for Differentiable Rasterization", + "text_level": 1, + "bbox": [ + 76, + 560, + 428, + 575 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For novel view synthesis, 3DG-S [26] project 3DGs to 2D Gaussian (2DG) splats [76]:", + "bbox": [ + 75, + 583, + 468, + 614 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\Sigma^ {\\prime} = J W \\Sigma W ^ {T} J ^ {T}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 619, + 468, + 638 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Here, $\\Sigma^{\\prime}$ is the covariance matrix in camera coordinate. $J$ is the Jacobian of the affine approximation of the projective transformation, and $W$ is the viewing transformation matrix. By skipping the third row and third column of $\\Sigma^{\\prime}$ , we can derive a $2\\times 2$ matrix denoted as $\\Sigma_{2d}$ . Furthermore, projecting the 3DG's mean, $\\mu$ , into the image space results in a 2D mean, $\\mu_{2d}$ . Consequently, this allows us to define the 2DG in the image space as $G_{2d}(x;\\mu_{2d},\\Sigma_{2d})$ .", + "bbox": [ + 75, + 646, + 468, + 767 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Using $\\Sigma^{\\prime}$ , the color $C$ of a pixel can be computed by blending the $N$ ordered points overlapping the pixel:", + "bbox": [ + 75, + 768, + 468, + 799 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nC = \\sum_ {i \\in N} c _ {i} \\alpha_ {i} ^ {\\prime} \\prod_ {j = 1} ^ {i - 1} \\left(1 - \\alpha_ {j} ^ {\\prime}\\right). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 806, + 468, + 847 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Here, $c_{i}$ denotes the view-dependent color of the $i$ -th 3DG. $\\alpha_{i}^{\\prime}$ is determined by multiplying the opacity $\\alpha_{i}$ of the $i$ -th 3DG $G$ with the evaluation of the corresponding 2DG $G_{2d}$ .", + "bbox": [ + 76, + 854, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Leveraging a highly-optimized rasterization pipeline coupled with custom CUDA kernels, the training and rendering of 3DG-S are remarkably fast. For instance, for megapixel-scale real-world scenes, just a few minutes of optimization allows 3DGs to achieve photo-realistic visual quality and rendering speeds surpassing 100 FPS.", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Method", + "text_level": 1, + "bbox": [ + 500, + 194, + 589, + 209 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3DGStream constructs photo-realistic FVV streams from multi-view video streams on-the-fly using a per-frame training paradigm. We initiate the process by training 3DGs [26] at timestep 0. For subsequent timesteps, we employ the previous timestep's 3DGs as an initialization and pass them to a two-stage pipeline. Firstly (Sec. 4.1), a Neural Transformation Cache (NTC) is trained to model the transformation for each 3DG. Once the training is finished, we transform the 3DGs and carry the transformed 3DGs to the next timestep. Secondly (Sec. 4.2), we employ an adaptive 3DG addition strategy to handle emerging objects. For each FVV frame, we render views at the current timestep using both the transformed 3DGs and additional 3DGs, while the latter are not passed to the next timestep. Note that we only need to train and store the parameters of the NTC and the additional 3DGs for each subsequent timestep, not all the 3DGs. We depict an overview of our approach in Fig. 3.", + "bbox": [ + 496, + 218, + 890, + 477 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Neural Transformation Cache", + "text_level": 1, + "bbox": [ + 498, + 484, + 766, + 500 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For NTC, we seek a structure that is compact, efficient, and adaptive to model the transformations of 3DGs. Compactness is essential to reduce the model storage. Efficiency enhances training and inference speeds. Adaptivity ensures the model focuses more on dynamic regions. Additionally, it would be beneficial if the structure could consider certain priors of dynamic scenes [5, 24, 54], such as the tendency for neighboring parts of an object to have similar motion.", + "bbox": [ + 496, + 508, + 890, + 628 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Inspired by Neural Radiance Caching [39] and I-NGP [40], we employ multi-resolution hash encoding combined with a shallow fully-fused MLP [38] as the NTC. Specifically, following I-NGP, we use multi-resolution voxel grids to represent the scene. Voxel grids at each resolution are mapped to a hash table storing a $d$ -dimensional learnable feature vector. For a given 3D position $x \\in \\mathbb{R}^3$ , its hash encoding at resolution $l$ , denoted as $h(x;l) \\in \\mathbb{R}^d$ , is the linear interpolation of the feature vectors corresponding to the eight corners of the surrounding grid. Consequently, its multi-resolution hash encoding $h(x) = [h(x;0), h(x;1), \\dots, h(x;L - 1)] \\in \\mathbb{R}^{Ld}$ , where $L$ represents the number of resolution levels. The multi-resolution hash encoding addresses all our requirements for the NTC:", + "bbox": [ + 496, + 628, + 890, + 840 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Compactness: Hashing effectively reduces the storage space needed for encoding the whole scene.", + "- Efficiency: Hash table lookup operates in $O(1)$ , and is highly compatible with modern GPUs." + ], + "bbox": [ + 500, + 840, + 888, + 898 + ], + "page_idx": 3 + }, + { + "type": "footer", + "text": "20678", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Adaptivity: Hash collisions occur in hash tables at finer resolutions, allowing regions with larger gradients—representing dynamic regions in our context—to drive the optimization.", + "- Priors: The combination of linear interpolation and the voxel-grid structure ensures the local smoothness of transformations. Additionally, the multi-resolution approach adeptly merges global and local information." + ], + "bbox": [ + 76, + 90, + 467, + 210 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Furthermore, to enhance the NTC's performance with minimal overhead, we utilize a shallow fully-fused MLP [38]. This maps the hash encoding to a 7-dimensional output: the first three dimensions indicate the translation of the 3DG; the remaining dimensions represent the rotation of the 3DG using quaternions. Given multi-resolution hash encoding coupled with MLP, our NTC is formalized as:", + "bbox": [ + 75, + 212, + 467, + 316 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nd \\mu , d q = M L P (h (\\mu)), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 328, + 467, + 345 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mu$ denotes the mean of the input 3DG. We transform the 3DGs based on $d\\mu$ and $dq$ . Specifically, the following parameters of the transformed 3DGs are given as:", + "bbox": [ + 75, + 356, + 467, + 401 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Mean: $\\mu' = \\mu + d\\mu$ , where $\\mu'$ is the new mean and $+$ represents vector addition.", + "- Rotation: $q' = \\text{norm}(q) \\times \\text{norm}(dq)$ , where $q'$ is the new rotation, $\\times$ indicates quaternion multiplication and norm denotes normalization.", + "- SH Coefficients: Upon rotating the 3DG, the SH coefficients should also be adjusted to align with the rotation of the 3DG. Leveraging the rotation invariance of SH, we directly employ SH Rotation to update SHs. Please refer to the supplementary materials (Suppl.) for details." + ], + "bbox": [ + 76, + 401, + 467, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Stage 1, we transform the 3DGs from the previous frame by NTC and then render with them. The parameters of the NTC is optimized by the loss between the rendered image and the ground truth. Following 3DG-S [26], the loss function is $L_{1}$ combined with a D-SSIM term:", + "bbox": [ + 75, + 551, + 467, + 627 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL = (1 - \\lambda) L _ {1} + \\lambda L _ {D - S S I M}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 638, + 467, + 654 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda = 0.2$ in all our experiments. It should be noted that during the training process, the 3DGs from the previous frame remain frozen and do not undergo any updates. This implies that the input to the NTC remains consistent.", + "bbox": [ + 75, + 665, + 467, + 726 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Additionally, to ensure training stability, we initialize the NTC with warm-up parameters. The loss employed during the warm-up is defined as:", + "bbox": [ + 75, + 726, + 467, + 771 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {w a r m} - u p} = \\left\\| d \\mu \\right\\| _ {1} - \\cos^ {2} (\\operatorname {n o r m} (d q), Q), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 124, + 781, + 467, + 799 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $Q$ is the identity quaternion. The first term uses the $L_{1}$ norm to ensure the estimated translation approaches zero, while the second term, leveraging cosine similarity, ensures the estimated rotation approximates no rotation. However, given the double-covering property of the unit quaternions, we use the square of the cosine similarity. For", + "bbox": [ + 75, + 809, + 467, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "each scene, we execute the warm-up solely after the training at timestep 0, using noise-augmented means of the initial 3DGs as input. After 3000 iterations of training (roughly 20 seconds), the parameters are stored and used to initialize the NTCs for all the following timesteps.", + "bbox": [ + 496, + 90, + 890, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Adaptive 3DG Addition", + "text_level": 1, + "bbox": [ + 498, + 179, + 718, + 195 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Relying solely on 3DGs transformations adequately cover a significant portion of real-world dynamic scenes, with translations effectively managing occlusions and disappearances in subsequent timesteps. However, this approach falters when faced with objects not present in the initial frame, such as transient objects like flames or smoke, and new persistent objects like the liquid poured out of a bottle. Since 3DG is an unstructured explicit representation, it's essential to add new 3DGs to model these emerging objects. Considering constraints related to model storage requirements and training complexities, it's not feasible to generate an extensive number of additional 3DGs nor allow them to be used in subsequent frames, as this would cause 3DGs to accumulate over time. This necessitates a strategy for swiftly generating a limited number of frame-specific 3DGs to model these emerging objects precisely and thereby enhance the completeness of the scene at the current timestep.", + "bbox": [ + 496, + 203, + 890, + 460 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Firstly, we need to ascertain the locations for the emerging objects. Inspired by 3DG-S [26], we recognized the view-space positional gradients of 3DGs as a key indicator. We observed that for emerging objects, the 3DGs in proximity exhibited large view-space positional gradients. This is attributed to the optimization attempting to 'masquerade' the emerging object by transforming the 3DGs. However, since we prevent the colors of the 3DGs from being updated in Stage 1, this attempt falls short. Nonetheless, they are still transformed to appropriate positions, with large view-space positional gradients.", + "bbox": [ + 496, + 460, + 890, + 627 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Based on the aforementioned observations, we deem it appropriate to introduce additional 3DGs around these high-gradient regions. Moreover, to exhaustively capture every potential location where new objects might emerge, we adopt an adaptive 3DG spawn strategy. Specifically, we track view-space positional gradient during the final training epoch of Stage 1. Once this stage concludes, we select 3DGs that have an average magnitude of view-space position gradients exceeding a relatively low threshold $\\tau_{\\text{grad}} = 0.00015$ . For each selected 3DG, the position of the additional 3DG is sampled from $X \\sim \\mathcal{N}(\\mu, 2\\Sigma)$ , where $\\mu$ and $\\Sigma$ is the mean and the covariance matrix of the selected 3DG. While we avoid assumptions about the other attributes of the additional 3DGs, improper initializations of SH coefficients and scaling vectors tend to result in an optimization preference for reducing opacity over adjusting these parameters. This causes additional 3DGs to quickly become transparent, thereby failing to capture the emerging objects. To mitigate", + "bbox": [ + 496, + 628, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "20679", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/52d98daa9bb69c47e43f3e12ed485e442b0635e4e088dcea92ccdaaf91986686.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 80, + 88, + 238, + 190 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ae26b7747c376143137a582a93b2850c98b6cff2b532905bd821721976106b8b.jpg", + "image_caption": [ + "(a) I-NGP [40]" + ], + "image_footnote": [], + "bbox": [ + 80, + 191, + 236, + 314 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/bd062e4a5cf408db25ebf02c6f7531404be0b2225855137eb1501564c34fec85.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 241, + 88, + 401, + 190 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/fd03f12ece401bbc6c970708387398d4f14ea125baaa835db430a9c1f14401d3.jpg", + "image_caption": [ + "(b) HyperReel [1]" + ], + "image_footnote": [], + "bbox": [ + 241, + 191, + 401, + 314 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a9d751f4253cc380e4540e6800e1d04ba16adc81ba059b71274d8e2e3968c70f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 405, + 88, + 563, + 190 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/55703ea1220f0eac49eb846d6e1a479105004cf1203af013fcb7264ae33a8c60.jpg", + "image_caption": [ + "(c) StreamRF [29]", + "Figure 4. Qualitative comparisons on the discussion scene of the Meet Room dataset and the sear steak scene of the N3DV dataset." + ], + "image_footnote": [], + "bbox": [ + 405, + 191, + 563, + 314 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a81ba47ebcbf2aaa2aa7a9f5e1e45665b6700bfa8ef732ef0b140374e946c99a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 566, + 88, + 728, + 190 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/08e78befab9c69bf2cce80314d4bf13b63e00c8785f420dbebb8eaadccbfa376.jpg", + "image_caption": [ + "(d) 3DGStream" + ], + "image_footnote": [], + "bbox": [ + 566, + 191, + 727, + 314 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2bd76b79bc5ffc64bcc77a83db5e8fa3e0d8304f7009fe26e9ce2c50bde8ab37.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 730, + 88, + 890, + 190 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/89a217cf6df7540d2b61524a00535766caa0b6d2ca7e0c8eb2e34169c6e63d1b.jpg", + "image_caption": [ + "(e) Ground Truth" + ], + "image_footnote": [], + "bbox": [ + 730, + 191, + 890, + 314 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/0108cbb951f65f09206c0d7ddc125f4f5bf4a8c605bda25510074c5c5f4dffa0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CategoryMethodPSNR†(dB)Storage↓(MB)Train↓(mins)Render†(FPS)Streamable
StaticPlenoxels [20]30.774106238.3
I-NGP [40]28.6248.21.32.9
3DG-S [26]32.0847.18.3390
OfflineDyNeRF [31]29.58†0.12600.02×
NeRFPlayer [51]30.6917.11.20.05
HexPlane [9]31.700.82.40.21×
K-Planes [48]31.631.00.80.15×
HyperReel [1]31.101.21.82.00×
MixVoxels [57]30.801.70.2716.7×
OnlineStreamRF [29]30.6817.7/31.4*0.258.3
Ours31.677.6/7.8*0.20215
", + "bbox": [ + 78, + 361, + 467, + 512 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "this issue, the SH coefficients and scaling vectors of these 3DGs are derived from the selected ones, with rotations set to the identity quaternion $\\mathbf{q} = [1,0,0,0]$ and opacity initialized at 0.1. After spawning, the 3DGs undergo optimization utilizing the same loss function (Eq. (6)) as Stage 1. Note that only the parameters of the additional 3DGs are optimized, while those of the transformed 3DGs remain fixed.", + "bbox": [ + 75, + 595, + 468, + 700 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To guard against local minima and manage the number of additional 3DGs, we implement an adaptive 3DG quantity control strategy. Specifically, in Stage 2, we set a relatively high threshold, $\\tau_{\\alpha} = 0.01$ , for the opacity value. At the end of each training epoch, for 3DGs with view-space position gradients exceeding $\\tau_{grad}$ , we spawn additional 3DGs nearby to address under-reconstructed regions. These additional 3DGs inherit their rotations and SH coefficients from the original 3DG, but their scaling is adjusted to $80\\%$ of the original, mirroring the 'split' operation described by Kerbl et al. [26]. Subsequently, we discard any additional 3DGs with opacity values below $\\tau_{\\alpha}$ to suppress the growth in the quantity of 3DGs.", + "bbox": [ + 75, + 704, + 468, + 902 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/c69731b2a882ffd31136f0f1f56c76ed0a191acce903b3c90d595b11d0e14d2f.jpg", + "table_caption": [ + "Table 1. Quantitative comparison on the N3DV dataset. The training time, required storage and PSNR are averaged over the whole 300 frames for each scene. ${}^{ \\dagger }$ DyNeRF [31] only report metrics on the flame salmon scene. *Considering the initial model." + ], + "table_footnote": [], + "table_body": "
MethodPSNR↑ (dB)Storage↓ (MB)Train↓ (mins)Render↑ (FPS)
Plenoxels [20]27.1510151410
I-NGP [40]28.1048.21.14.1
3DG-S [26]31.3121.12.6571
StreamRF [29]26.725.7/9.0*0.1710
Ours30.794.0/4.1*0.10288
", + "bbox": [ + 501, + 361, + 898, + 489 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Quantitative comparison on the Meet Room dataset. Note that the training time, required storage and PSNR are averaged over the whole 300 frames. *Considering the initial model.", + "bbox": [ + 498, + 500, + 890, + 542 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 500, + 547, + 632, + 566 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Datasets", + "text_level": 1, + "bbox": [ + 500, + 573, + 601, + 588 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct experiments on two real-world dynamic scene datasets: N3DV dataset [31] and Meet Room dataset [29].", + "bbox": [ + 498, + 595, + 890, + 626 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "N3DV dataset [31] is captured using a multi-view system of 21 cameras, comprises dynamic scenes recorded at a resolution of $2704 \\times 2028$ and 30 FPS. Following previous works [9, 29, 31, 48, 51, 57], we downsample the videos by a factor of two and follow the training and validation camera split provided by [31].", + "bbox": [ + 496, + 627, + 890, + 717 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Meet Room dataset [29] is captured using a 13-camera multi-view system, comprises dynamic scenes recorded at a resolution of $1280 \\times 720$ and 30 FPS. Following [29], we utilize 13 views for training and reserved 1 for testing.", + "bbox": [ + 496, + 718, + 890, + 779 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Implementation", + "text_level": 1, + "bbox": [ + 500, + 786, + 658, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We implement 3DGStream upon the codes of 3D Gaussian Splitting (3DG-S) [26], and implement the Neural Transformation Cache (NTC) using tiny-cuda-nn [38]. For the training of initial 3DGs, we fine-tune the learning rates on the N3DV dataset based on the default settings of 3DG-S, and apply them to the Meet Room dataset. For all scenes,", + "bbox": [ + 496, + 809, + 890, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "20680", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d0d6fadb2b57332d5acb9dfad6d2677720664f253140eb705b21e072bacd049a.jpg", + "image_caption": [ + "Figure 5. Comparison of different approaches for modeling the transformation of 3DGs. Conducted on the second frame of the flame salmon video, utilizing identical initial 3DGs." + ], + "image_footnote": [], + "bbox": [ + 81, + 90, + 465, + 309 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e8290a6e386565d983f49d3106dee95b43297f1a82f85d16bd6caac628c70b0e.jpg", + "image_caption": [ + "Figure 6. Comparison of different approaches on the flame salmon scene." + ], + "image_footnote": [], + "bbox": [ + 81, + 375, + 464, + 593 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/2d227832986811198cc67295c19fdcc46e0f93fc6149dbbb143b71710ea42980.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
VariantPSNR↑ (dB)#Additional 3DGs↓
Baseline28.390
Rnd. Spawn28.39971.9
w/o Quant. Ctrl.28.438710.8
Full Model28.42477.7
", + "bbox": [ + 91, + 641, + 464, + 734 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. Ablation study of the Adaptive 3DG Addition strategy on the flame salmon scene. The metrics are averaged over the whole sequence.", + "bbox": [ + 75, + 744, + 468, + 786 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "we train the NTC for 150 iterations in Stage 1. and train the additional 3DGs for 100 iterations in Stage 2. Please refer to Suppl. for more details.", + "bbox": [ + 75, + 787, + 470, + 834 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3. Comparisons", + "text_level": 1, + "bbox": [ + 76, + 845, + 217, + 862 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Quantitative comparisons. Our quantitative analysis involves benchmarking 3DGStream on the N3DV dataset and", + "bbox": [ + 76, + 869, + 470, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/32dae74192b7fd1ee5083dfdcf9c39c6bf2b758c5eef9cf1ce8771508d0a9dfa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 88, + 630, + 162 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/929c859c4667c346e914af0d40aec71632d76f0090bcf438c4b9e516eb4ddbf4.jpg", + "image_caption": [ + "(a) Result of Stage 1" + ], + "image_footnote": [], + "bbox": [ + 501, + 164, + 630, + 238 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d3f16064cb27a3f787bbc3dd059602dc774e5454cc24a07a5c886dc52a45d911.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 632, + 88, + 759, + 162 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/89ead230eb52d15332e8e78864a044f4dedfbbb4f775193ede3b623a760103bd.jpg", + "image_caption": [ + "(b) Result of Stage 2" + ], + "image_footnote": [], + "bbox": [ + 632, + 164, + 759, + 238 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/32a4544da6bed84849eb6edc553687d85f4361b2400e4297c8c6199ad4ae282c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 763, + 88, + 892, + 162 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cbf84c21826185045c50ad4b71ca180440b5be0a7b216b812950e8175b92799c.jpg", + "image_caption": [ + "(c) Ground Truth", + "Figure 7. Quantitative results of the ablation study conducted on the flame steak scene and the coffee martini scene." + ], + "image_footnote": [], + "bbox": [ + 763, + 164, + 890, + 238 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Meet Room dataset, comparing it with a range of representative methods. We take Plenoxels [20], I-NGP [40], and 3DG-S [26] as representatives of fast static scene reconstruction methods, training them from scratch for each frame. StreamRF [29], Dynamic3DG [34], and ReRF [60] are designed for online training in dynamic scenes. Owing to the limitations of Dynamic3DG and ReRF, which necessitate foreground masks and are confined to scenes with fewer objects, and their minute-level per-frame training times, we select StreamRF selected as the representative for online training methods due to its adaptability and training feasibility on the N3DV and MeetRoom datasets. To demonstrate 3DGStream's competitive image quality, we drew comparisons with the quantitative results reported for the N3DV dataset in the respective papers of DyNeRF [31], NeRFPlayer [51], HexPlane [9], K-Planes [48], HyperReel [1], and MixVoxels [57], all of which are methods for reconstructing dynamic scenes through offline training on entire video sequences.", + "bbox": [ + 496, + 306, + 892, + 594 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In Tab. 1, we present the averaged rendering speed, training time, required storage, and peak signal-to-noise ratio (PSNR) over all scenes of the N3DV dataset. For each scene, the latter three metrics are computed as averages over the whole 300 frames. Besides, we provide a breakdown of comparisons across all scenes within the N3DV dataset in the Suppl. To demonstrate the generality of our method, we conducted experiments on the MeetRoom dataset, as introduced by StreamRF [29], and performed a quantitative comparison against Plenoxels [20], I-NGP [40], 3DG-S [26], and StreamRF [29]. The results are presented in Tab. 2. As presented in Tabs. 1 and 2, our method demonstrates superiority through fast online training and real-time rendering, concurrently maintaining a competitive edge in terms of model storage and image quality. Furthermore, among the methods capable of streaming FVVs, our model requires the minimal model storage.", + "bbox": [ + 496, + 595, + 893, + 851 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Qualitative comparisons. While our approach primarily aims to enhance the efficiency of online FVV construction, as illustrated in Tabs. 1 and 2, it still achieves competitive", + "bbox": [ + 498, + 854, + 893, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "20681", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/a783aca32dcf45351a39b20276a1258e5801bdcead4cf631c1c05a82130b5f48.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
StepOverhead (ms)FPS
Render w/o NTC2.56390
+ Query NTC0.62
+ Transformation0.02
+ SH Rotation1.46
Total4.66215
", + "bbox": [ + 129, + 88, + 416, + 200 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4. Rendering profiling for the flame salmon scene at megapixel resolution. Note that flame salmon is the most time-consuming to render of all scenes in our experiments.", + "bbox": [ + 75, + 212, + 468, + 255 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "image quality. In Fig. 4, we present a qualitative comparison with I-NGP [40], HyperReel [1], and StreamRF [29] across scenes on the N3DV dataset [31] and the Meet Room dataset [29], with a special emphasis on dynamic objects such as faces, hands, and tongs, as well as intricate objects like labels and statues. It is evident that our method faithfully captures the dynamics of the scene without sacrificing the ability to reconstruct intricate objects. Please refer to our project page for more video results.", + "bbox": [ + 75, + 263, + 468, + 398 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4. Evaluations", + "text_level": 1, + "bbox": [ + 76, + 409, + 205, + 422 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Neural Transformation Cache. We utilize distinct approaches to model the transformations of 3DGs from the first to the second frame within the flame salmon video of the N3DV dataset to show the effectiveness of NTC. Fig. 5 shows that, without multi-resolution hash encoding $(w / o$ Hash enc.), the MLP faces challenges in modeling transformations effectively. Additionally, without the warm-up $(w / o$ Warm-up), it takes more iterations for convergence. Besides, even when compared with the direct optimization of the previous frame's 3DGs (Direct Opt.), NTC demonstrate on-par performance. In Fig. 6, We present the results of different approaches applied across the entire flame salmon video, excluding the first frame (i.e., Frame 0). $w / o$ Hash enc. and $w / o$ Warm-up. are not able to converge swiftly, resulting in accumulating errors as the sequence progresses. Direct Opt. yields the best outcomes but at the cost of inflated storage. Utilizing NTC, in contrast, delivers comparable results with substantially lower storage overhead by eliminating the need for saving all the 3DGs.", + "bbox": [ + 75, + 431, + 468, + 718 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Adaptive 3DG Addition. Tab. 3 presents the quantitative results of the ablation study conducted on the flame salmon scene, and more results are presented in Suppl. The base model without Stage 2, and a set of randomly spawned 3DGs (Rnd. Spawn) in equivalent quantities to our spawn strategy, both fail to capture emerging objects. The variant without our quantity control strategy ( $w/o$ Quant. Ctrl.) manages to model emerging objects but requires a significantly larger number of additional 3DGs. In contrast, our full model proficiently reconstructs emerging objects using a minimal addition of 3DGs. The ablation study illustrated in Fig. 7 qualitatively showcases the effect of the Adap", + "bbox": [ + 75, + 719, + 468, + 901 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "tive 3DG Addition strategy, highlighting its ability to reconstruct the objects not present in the initial frame, such as coffee in a pot, a dog's tongue, and flames.", + "bbox": [ + 496, + 90, + 890, + 136 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Real-time Rendering. Following 3DG-S [26], we employ the SIBR framework [6] to measure the rendering speed. Once all resources required are loaded onto the GPU, the additional overhead of our approach is primarily the time taken to query the NTC and transform the 3DGs. As detailed in Tab. 4, our method benefits from the efficiency of the multi-resolution hash encoding and the fully-fused MLP [38], which facilitate rapid NTC query. Notably, the most time-consuming step is the SH Rotation. However, our experiments indicate that the SH rotation has a minimal impact on the reconstruction quality, which may be attributed to the 3DGs modeling view-dependent colors through alternative mechanisms (e.g., small 3DGs of varying colors surrounding the object) rather than SH coefficients. Nonetheless, we maintain SH rotation for theoretical soundness.", + "bbox": [ + 496, + 136, + 890, + 362 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Discussion", + "text_level": 1, + "bbox": [ + 500, + 375, + 612, + 390 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The quality of 3DG-S [26] on the initial frame is crucial to 3DGStream. Therefore, we inherit the limitations of 3DG-S, such as high dependence on the initial point cloud. As illustrated in Fig. 7, there are obvious artifacts beyond the windows, attributable to COLMAP's [49] inability to reconstruct distant landscapes. Hence, our method stands to benefit directly from future enhancements to 3DG-S. Moreover, for efficient on-the-fly training, we limit the number of training iterations, which restricts modeling of drastic motion in Stage 1 and complex emerging objects in Stage 2.", + "bbox": [ + 496, + 401, + 890, + 551 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 565, + 617, + 580 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We propose 3DGStream, an novel method for efficient Free-Viewpoint Video streaming. Based on 3DG-S [26], we utilizes an effective Neural Transformation Cache to capture the motion of objects. In addition, we propose an Adaptive 3DG Addition strategy to accurately model emerging objects in dynamic scenes. The two-stage pipeline of 3DGStream enables the online reconstruction of dynamic scenes in video streams. While ensuring photo-realistic image quality, 3DGStream achieves on-the-fly training ( $\\sim$ 10s per-frame) and real-time rendering ( $\\sim$ 200FPS) at megapixel resolution with moderate requisite storage.", + "bbox": [ + 496, + 590, + 890, + 756 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "8. Acknowledgement", + "text_level": 1, + "bbox": [ + 500, + 768, + 679, + 786 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported in part by Zhejiang Province Program (2022C01222, 2023C03199, 2023C03201), the National Program of China (62172365, 2021YFF0900604, 19ZDA197), Ningbo Science and Technology Plan Project (022Z167, 2023Z137), and MOE Frontier Science Center for Brain Science & Brain-Machine Integration (Zhejiang University).", + "bbox": [ + 496, + 794, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "footer", + "text": "20682", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023. 1, 3, 6, 7, 8", + "[2] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2", + "[3] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5470-5479, 2022.", + "[4] Jonathan T. Barron, Ben Mildenhall, Dor Verbin, Pratul P. Srinivasan, and Peter Hedman. Zip-nerf: Anti-aliased grid-based neural radiance fields. ICCV, 2023. 2", + "[5] Michael J Black and Paul Anandan. The robust estimation of multiple motions: Parametric and piecewise-smooth flow fields. Computer vision and image understanding, 63(1):75-104, 1996. 4", + "[6] Sebastien Bonopera, Jerome Esnault, Siddhant Prakash, Simon Rodriguez, Theo Thonat, Mehdi Benadel, Gaurav Chaurasia, Julien Philip, and George Drettakis. sibr: A system for image based rendering, 2020. 8", + "[7] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 1, 2", + "[8] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In SIGGRAPH, pages 425-432, 2001. 2", + "[9] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. CVPR, 2023. 3, 6, 7", + "[10] Jin-Xiang Chai, Xin Tong, Shing-Chow Chan, and Heung-Yeung Shum. Plenoptic sampling. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 307-318, 2000. 2", + "[11] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14124-14133, 2021. 2", + "[12] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In European Conference on Computer Vision (ECCV), 2022. 2", + "[13] Anpei Chen, Zexiang Xu, Xinyue Wei, Siyu Tang, Hao Su, and Andreas Geiger. Dictionary fields: Learning a neural basis decomposition. ACM Trans. Graph., 2023. 2", + "[14] Zhiqin Chen, Thomas Funkhouser, Peter Hedman, and Andrea Tagliasacchi. Mobilenerf: Exploiting the polygon rasterization pipeline for efficient neural field rendering on mo" + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "bile architectures. In The Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2", + "[15] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (TOG), 34(4):69, 2015. 1, 2", + "[16] Abe Davis, Marc Levoy, and Fredo Durand. Unstructured light fields. Comput. Graph. Forum, 31(2pt1):305-314, 2012. 2", + "[17] Mingsong Dou, Philip Davidson, Sean Ryan Fanello, Sameh Khamis, Adarsh Kowdle, Christoph Rhemann, Vladimir Tankovich, and Shahram Izadi. Motion2fusion: Real-time volumetric performance capture. ACM Trans. Graph., 36(6): 246:1-246:16, 2017. 1, 2", + "[18] Yuanxing Duan, Fangyin Wei, Qiyu Dai, Yuhang He, Wenzheng Chen, and Baoquan Chen. 4d gaussian splatting: Towards efficient novel view synthesis for dynamic scenes. arXiv preprint arXiv:2402.03307, 2024. 4", + "[19] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In SIGGRAPH Asia 2022 Conference Papers, 2022. 1, 3", + "[20] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 2, 3, 6, 7", + "[21] Stephan J. Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien Valentin. Fastnerf: High-fidelity neural rendering at 200fps. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14346-14355, 2021. 2", + "[22] Steven J. Gortler, Radek Grzesczuk, Richard Szeliski, and Michael F. Cohen. The lumigraph. In Proceedings of the 23rd Annual Conference on Computer Graphics and Interactive Techniques, page 43-54, New York, NY, USA, 1996. Association for Computing Machinery. 2", + "[23] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5855-5864, 2021. 2", + "[24] Berthold KP Horn and Brian G Schunck. Determining optical flow. Artificial intelligence, 17(1-3):185-203, 1981. 4", + "[25] Wenbo Hu, Yuling Wang, Lin Ma, Bangbang Yang, Lin Gao, Xiao Liu, and Yuewen Ma. Tri-miprf: Tri-mip representation for efficient anti-aliasing neural radiance fields. In ICCV, 2023. 2", + "[26] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics, 42 (4), 2023. 1, 2, 4, 5, 6, 7, 8", + "[27] Tobias Kirschstein, Shenhan Qian, Simon Giebenhain, Tim Walter, and Matthias Nießner. Nersemble: Multi-view radiance field reconstruction of human heads. arXiv preprint arXiv:2305.03027, 2023. 3" + ], + "bbox": [ + 503, + 92, + 893, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "20683", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Marc Levoy and Pat Hanrahan. Light field rendering. In SIGGRAPH, pages 31-42, 1996. 2", + "[29] Lingzhi Li, Zhen Shen, Zhongshu Wang, Li Shen, and Ping Tan. Streaming radiance fields for 3d video synthesis. In NeurIPS, 2022. 1, 2, 3, 6, 7, 8", + "[30] Ruilong Li, Julian Tanke, Minh Vo, Michael Zollhöfer, Jürgen Gall, Angjoo Kanazawa, and Christoph Lassner. Tava: Template-free animatable volumetric actors. In European Conference on Computer Vision, pages 419-436. Springer, 2022. 3", + "[31] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 1, 2, 3, 6, 7, 8", + "[32] Zhengqi Li, Simon Niklaus, Noah Snively, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6498-6508, 2021. 3", + "[33] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 3", + "[34] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In 3DV, 2024. 3, 7", + "[35] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In CVPR, 2021. 2", + "[36] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pages 405-421. Springer, 2020. 1, 2", + "[37] Ben Mildenhall, Peter Hedman, Ricardo Martin-Brualla, Pratul P Srinivasan, and Jonathan T Barron. Nerf in the dark: High dynamic range view synthesis from noisy raw images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16190-16199, 2022. 2", + "[38] Thomas Müller. tiny-cuda-nn, 2021. 4, 5, 6, 8", + "[39] Thomas Müller, Fabrice Rousselle, Jan Novák, and Alexander Keller. Real-time neural radiance caching for path tracing. ACM Transactions on Graphics (TOG), 40(4):1-16, 2021. 4", + "[40] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, 2022. 1, 2, 4, 6, 7, 8", + "[41] Michael Niemeyer, Jonathan T Barron, Ben Mildenhall, Mehdi SM Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis" + ], + "bbox": [ + 78, + 90, + 467, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "from sparse inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5480-5490, 2022. 2", + "[42] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5865-5874, 2021. 3", + "[43] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 1", + "[44] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M. Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. ACM Trans. Graph., 40(6), 2021. 3", + "[45] Suntheon Park, Minjung Son, Seokhwan Jang, Young Chun Ahn, Ji-Yeon Kim, and Nahiyup Kang. Temporal interpolation is all you need for dynamic neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4212-4221, 2023. 3", + "[46] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10318-10327, 2021. 1, 3", + "[47] Christian Reiser, Rick Szeliski, Dor Verbin, Pratul Srinivasan, Ben Mildenhall, Andreas Geiger, Jon Barron, and Peter Hedman. Merf: Memory-efficient radiance fields for real-time view synthesis in unbounded scenes. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 2", + "[48] Sara Fridovich-Keil and Giacomo Meanti, Frederik Rahbæk Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In CVPR, 2023. 1, 3, 6, 7", + "[49] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 8", + "[50] Heung-Yeung Shum and Li-Wei He. Rendering with concentric mosaics. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 299–306, 1999. 2", + "[51] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 3, 6, 7", + "[52] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5449-5459, 2022. 2, 3", + "[53] Jiakai Sun, Zhanjie Zhang, Jiafu Chen, Guangyuan Li, Boyan Ji, Lei Zhao, and Wei Xing. Vgos: Voxel grid opti" + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "20684", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "mization for view synthesis from sparse inputs. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence, IJCAI-23, pages 1414-1422. International Joint Conferences on Artificial Intelligence Organization, 2023. Main Track. 2", + "[54] Carlo Tomasi and Takeo Kanade. Shape and motion from image streams under orthography: a factorization method. International journal of computer vision, 9:137-154, 1992. 4", + "[55] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 12959-12970, 2021. 3", + "[56] Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T. Barron, and Pratul P. Srinivasan. Ref-NeRF: Structured view-dependent appearance for neural radiance fields. CVPR, 2022. 2", + "[57] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, Yafei Song, and Huaping Liu. Mixed neural voxels for fast multiview video synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19706-19716, 2023. 3, 6, 7", + "[58] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022. 3", + "[59] Liao Wang, Qiang Hu, Qihan He, Ziyu Wang, Jingyi Yu, Tinne Tuytelaars, Lan Xu, and Minye Wu. Neural residual radiance fields for streamably free-viewpoint videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 76-87, 2023. 3", + "[60] Liao Wang, Qiang Hu, Qihan He, Ziyu Wang, Jingyi Yu, Tinne Tuytelaars, Lan Xu, and Minye Wu. Neural residual radiance fields for streamably free-viewpoint videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 76-87, 2023. 1, 7", + "[61] Qianqian Wang, Yen-Yu Chang, Ruojin Cai, Zhengqi Li, Bharath Hariharan, Aleksander Holynski, and Noah Snavely. Tracking everything everywhere all at once. In International Conference on Computer Vision, 2023. 3", + "[62] Chung-Yi Weng, Brian Curless, Pratul P Srinivasan, Jonathan T Barron, and Ira Kemelmacher-Shlizerman. Humanerf: Free-viewpoint rendering of moving people from monocular video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16210-16220, 2022. 3", + "[63] Felix Wimbauer, Nan Yang, Christian Rupprecht, and Daniel Cremers. Behind the scenes: Density fields for single view reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9076-9086, 2023. 2", + "[64] Suttisak Wizadwongsa, Pakkapon Phongthawee, Jiraphon Yenphraphai, and Supasorn Suwajanakorn. Nex: Real-time" + ], + "bbox": [ + 78, + 92, + 468, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "view synthesis with neural basis expansion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8534-8543, 2021. 2", + "[65] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Wang Xinggang. 4d gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 4", + "[66] Jamie Wynn and Daniyar Turmukhambetov. Diffusionerf: Regularizing neural radiance fields with denoising diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4180-4189, 2023. 2", + "[67] Wenqi Xian, Jia-Bin Huang, Johannes Kopf, and Changil Kim. Space-time neural irradiance fields for free-viewpoint video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9421-9431, 2021. 1", + "[68] Gengshan Yang, Minh Vo, Natalia Neverova, Deva Ramanan, Andrea Vedaldi, and Hanbyul Joo. Banmo: Building animatable 3d neural models from many casual videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2863-2873, 2022. 3", + "[69] Jiawei Yang, Marco Pavone, and Yue Wang. Freenerf: Improving few-shot neural rendering with free frequency regularization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8254-8263, 2023. 2", + "[70] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. arXiv preprint arXiv:2309.13101, 2023. 4", + "[71] Zeyu Yang, Hongye Yang, Zijie Pan, and Li Zhang. Realtime photorealistic dynamic scene representation and rendering with 4d gaussian splatting. 2024. 4", + "[72] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5752-5761, 2021. 2", + "[73] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2", + "[74] Fuqiang Zhao, Wei Yang, Jiakai Zhang, Pei Lin, Yingliang Zhang, Jingyi Yu, and Lan Xu. Humannerf: Efficiently generated human radiance field from sparse inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7743-7753, 2022. 3", + "[75] C Lawrence Zitnick, Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High-quality video view interpolation using a layered representation. ACM transactions on graphics (TOG), 23(3):600-608, 2004. 1, 2", + "[76] Matthias Zwicker, Hanspeter Pfister, Jeroen Van Baar, and Markus Gross. Ewa volume splatting. In Proceedings Visualization, 2001. VIS'01., pages 29-538. IEEE, 2001. 4" + ], + "bbox": [ + 503, + 92, + 890, + 868 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "20685", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/94b406dc-a25e-4b49-8259-ce68b53e5886_model.json b/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/94b406dc-a25e-4b49-8259-ce68b53e5886_model.json new file mode 100644 index 0000000000000000000000000000000000000000..de7ada58e4db13044cd06a22cedf1fa9dfdb8b5d --- /dev/null +++ b/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/94b406dc-a25e-4b49-8259-ce68b53e5886_model.json @@ -0,0 +1,2829 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.1, + 0.13, + 0.873, + 0.177 + ], + "angle": 0, + "content": "3DGStream: On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos" + }, + { + "type": "text", + "bbox": [ + 0.147, + 0.204, + 0.831, + 0.277 + ], + "angle": 0, + "content": "Jiakai Sun, Han Jiao, Guangyuan Li, Zhanjie Zhang, Lei Zhao*, Wei Xing* Zhejiang University {csjk, csjh, cslgy, cszzj, cszhl, WXING}@zju.edu.cn https://sjojak.github.io/3dgstream" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.309, + 0.314, + 0.327 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.342, + 0.474, + 0.675 + ], + "angle": 0, + "content": "Constructing photo-realistic Free-Viewpoint Videos (FVVs) of dynamic scenes from multi-view videos remains a challenging endeavor. Despite the remarkable advancements achieved by current neural rendering techniques, these methods generally require complete video sequences for offline training and are not capable of real-time rendering. To address these constraints, we introduce 3DGStream, a method designed for efficient FVV streaming of real-world dynamic scenes. Our method achieves fast on-the-fly per-frame reconstruction within 12 seconds and real-time rendering at 200 FPS. Specifically, we utilize 3D Gaussians (3DGs) to represent the scene. Instead of the naive approach of directly optimizing 3DGs per-frame, we employ a compact Neural Transformation Cache (NTC) to model the translations and rotations of 3DGs, markedly reducing the training time and storage required for each FVV frame. Furthermore, we propose an adaptive 3DG addition strategy to handle emerging objects in dynamic scenes. Experiments demonstrate that 3DGStream achieves competitive performance in terms of rendering speed, image quality, training time, and model storage when compared with state-of-the-art methods." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.685, + 0.21, + 0.701 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.711, + 0.47, + 0.878 + ], + "angle": 0, + "content": "Constructing Free-Viewpoint Videos (FVVs) from videos captured by a set of known-poses cameras from multiple views remains a frontier challenge within the domains of computer vision and graphics. The potential value and application prospects of this task in the VR/AR/XR domains have attracted much research. Traditional approaches predominantly fall into two categories: geometry-based methods that explicitly reconstruct dynamic graphics primitives [15, 17], and image-based methods that obtain new views through interpolation [7, 75]. However, these conventional methods struggle to handle real-world scenes charac" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.309, + 0.697, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.423, + 0.692, + 0.435 + ], + "angle": 0, + "content": "(a) I-NGP [40]: Per-frame training" + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.309, + 0.892, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.7, + 0.423, + 0.889, + 0.435 + ], + "angle": 0, + "content": "(b) HyperReel [1]: Offline training" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.436, + 0.697, + 0.548 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.55, + 0.694, + 0.563 + ], + "angle": 0, + "content": "(c) StreamRF [29]: Online training" + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.436, + 0.892, + 0.548 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.726, + 0.55, + 0.863, + 0.562 + ], + "angle": 0, + "content": "(d) Ours: Online training" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.576, + 0.892, + 0.647 + ], + "angle": 0, + "content": "Figure 1. Comparison on the flame steak scene of the N3DV dataset [31]. The training time, requisite storage, and PSNR are computed as averages over the whole video. Our method stands out by the ability of fast online training and real-time rendering, standing competitive in both model storage and image quality." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.657, + 0.814, + 0.673 + ], + "angle": 0, + "content": "terized by complex geometries and appearance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.673, + 0.893, + 0.854 + ], + "angle": 0, + "content": "In recent years, Neural Radiance Fields (NeRFs) [36] has garnered significant attention due to its potent capabilities in synthesizing novel views as a 3D volumetric representation. A succession of NeRF-like works [19, 29, 31-33, 43-46, 48, 60, 67] further propelled advancements in constructing FVVs on dynamic scenes. Nonetheless, the vast majority of NeRF-like FVV construction methods encountered two primary limitations: (1) they typically necessitate complete video sequences for time-consuming offline training, meaning they can replay dynamic scenes but are unable to stream them, and (2) they generally fail to achieve real-time rendering, thereby hindering practical applications." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Recently, Kerbl et al. [26] have achieved real-time radiance field rendering using 3D Gaussians (3DGs), thus enabling the instant synthesis of novel views in static scenes" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.228, + 0.9 + ], + "angle": 0, + "content": "*Corresponding authors." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20675" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.092, + 0.462, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.324, + 0.47, + 0.407 + ], + "angle": 0, + "content": "Figure 2. Comparison of our method with other methods on the N3DV dataset [31]. \\(\\square\\) denotes training from scratch per frame, \\(\\triangle\\) represents offline training on complete video sequences, and \\(\\bigcirc\\) signifies online training on video streams. While achieving online training, our method reaches state-of-the-art performance in both rendering speed and overall training time." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.417, + 0.47, + 0.718 + ], + "angle": 0, + "content": "with just minutes of training. Inspired by this breakthrough, we propose 3DGStream, a method that utilizes 3DGs to construct Free-Viewpoint Videos (FVVs) of dynamic scenes. Specifically, we first train the initial 3DGs on the multi-view frames at timestep 0. Then, for each timestep \\( i \\), we use the 3DGs of previous timestep \\( i - 1 \\) as initialization and pass it to a two-stage pipeline. (1) In Stage 1, we train a Neural Transformation Cache (NTC) to model the transformations of 3DGs. (2) Then in the Stage 2, we use an adaptive 3DG addition strategy to handle emerging objects by spawning frame-specific additional 3DGs near these objects and optimize them along with periodic splitting and pruning. After the two-stage pipeline concludes, we use both the 3DGs transformed by the NTC and the additional 3DGs for rendering at the current timestep \\( i \\), with only the former carrying over for initialization of the subsequent timestep. This design significantly reduces the storage requirements for the FVV, as we only need to store the per-frame NTCs and frame-specific additions, rather than all 3DGs for each frame." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.47, + 0.902 + ], + "angle": 0, + "content": "3DGStream is capable of rendering photo-realistic FVVs at megapixel resolution in real-time, boasting exceptionally rapid per-frame training speeds and limited model storage requirements. As illustrated in Figs. 1 and 2, compared with static reconstruction methods that train from scratch per-frame and dynamic reconstruction methods that necessitate offline training across the complete video sequences, our approach excels in both training speed and rendering speed, maintaining a competitive edge in image quality and model storage. Furthermore, our method outperforms StreamRF [29], a state-of-the-art technique tackling the exactly same task, in all the relevant aspects." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.092, + 0.793, + 0.106 + ], + "angle": 0, + "content": "To summarize, our contributions include:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.11, + 0.891, + 0.169 + ], + "angle": 0, + "content": "- We propose 3DGStream, a method for on-the-fly construction of photo-realistic, real-time renderable FVV on video streams, eliminating the necessity for lengthy offline training on the entire video sequences." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.17, + 0.892, + 0.259 + ], + "angle": 0, + "content": "- We utilize NTC for modeling the transformations of 3DGs, in conjunction with an adaptive 3DG addition strategy to tackle emerging objects within dynamic scenes. This combination permits meticulous manipulation of 3DGs, accommodating scene alterations with limited performance overhead." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.261, + 0.892, + 0.336 + ], + "angle": 0, + "content": "- We conduct extensive experiments to demonstrate 3DGStream's competitive edge in rendering quality, training time, and requisite storage, as well as its superior rendering speed, compared to existing state-of-the-art dynamic scene reconstruction methods." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.11, + 0.892, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.352, + 0.642, + 0.368 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.377, + 0.83, + 0.393 + ], + "angle": 0, + "content": "2.1. Novel View Synthesis for Static Scenes" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.4, + 0.892, + 0.793 + ], + "angle": 0, + "content": "Synthesizing novel views from a set of images of static scenes is a time-honored problem in the domains of computer vision and graphics. Traditional methods such as Lumigraph [8, 22] or Light-Field [10, 16, 28, 50] achieve new view synthesis through interpolation. In recent years, Neural Radiance Fields (NeRF) [36] has achieved photorealistic synthesizing results by representing the radiance field using a multi-layer perceptron (MLP). A series of subsequent works enhance NeRF's performance in various aspects, such as accelerating training speeds [12, 13, 20, 25, 40, 52], achieving real-time rendering [14, 21, 23, 47, 64, 72], and improving synthesis quality on challenging scenes [2-4, 35, 37, 56] or sparse inputs [11, 41, 53, 63, 66, 69, 73]. Since the vanilla NeRF employs costly volume rendering, necessitating neural network queries for rendering, subsequent approaches faced trade-offs in training time, rendering speed, model storage, image quality, and applicability. To address these challenges, Kerbl et al. [26] propose 3D Gaussian Splatting (3DG-S), which integrates of 3DGs with differentiable point-based rendering. 3DG-S enables real-time high-fidelity view synthesis in large-scale unbounded scenes after brief training periods with modest storage requirements. Inspired by this work, we extend its application to the task of constructing FVVs of dynamic scenes. Taking it a step further, we design a on-the-fly training framework to achieve efficient FVV streaming." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.802, + 0.859, + 0.818 + ], + "angle": 0, + "content": "2.2. Free-Viewpoint Videos of Dynamic Scenes" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Constructing FVVs from a set of videos of dynamic scenes is a more challenging and applicable task in the domains of computer vision and graphics. Earlier attempts to address this task pivoted around the construction of dynamic primitives [15, 17] or resorting to interpolation [7, 75]. With the" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20676" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.08, + 0.088, + 0.895, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.3, + 0.895, + 0.399 + ], + "angle": 0, + "content": "Figure 3. Overview of 3DGStream. Given a set of multi-view video streams, 3DGStream aims to construct high-quality FVV stream of the captured dynamic scene on-the-fly. Initially, we optimize a set of 3DGs to represent the scene at timestep 0. For each subsequent timestep \\( i \\), we use the 3DGs from timestep \\( i - 1 \\) as an initialization and then engage in a two-stage training process: Stage 1: We train the Neural Transformation Cache (NTC) to model the translations and rotations of 3DGs. After training, the NTC transforms the 3DGs, preparing them for the next timestep and the next stage in the current timestep. Stage 2: We spawn frame-specific additional 3DGs at potential locations and optimize them along with periodic splitting and pruning. After the two-stage process concludes, both transformed and additional 3DGs are used to render at the current timestep \\( i \\), with only the transformed ones carried into the next timestep." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.406, + 0.471, + 0.512 + ], + "angle": 0, + "content": "success of NeRF-like methods in novel view synthesis for static scenes, a series of works [1, 9, 19, 29-34, 42, 44-46, 48, 51, 55, 57, 59, 61, 62, 68, 74] attempt to use NeRF for constructing FVVs in dynamic scenes. These works can typically be categorized into five types: prior-driven, flow-based, warp-based, those using spatio-temporal inputs, and per-frame training." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.515, + 0.47, + 0.59 + ], + "angle": 0, + "content": "Prior-driven methods [27, 30, 62, 68, 74] leverage parametric models or incorporate additional priors, such as skeletons, to bolster performance on the reconstruction of specific dynamic objects, e.g., humans. However, their application is limited and not generalizable to broader scenes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.593, + 0.47, + 0.699 + ], + "angle": 0, + "content": "Flow-based methods [32, 33] primarily focus on constructing FVVs from monocular videos. By estimating the correspondence of 3D points in consecutive frames, they achieve impressive results. Nonetheless, the intrinsic ill-posedness of monocular reconstructions in intricate dynamic scenes frequently calls for supplementary priors like depth, optical flow, and motion segmentation masks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.702, + 0.471, + 0.793 + ], + "angle": 0, + "content": "Warp-based methods [1, 42, 44, 46, 51, 55, 61] assume that the dynamics of the scene arise from the deformation of static structures. These methods warp the radiance field of each frame onto one or several canonical frames, achieving notable results. However, the strong assumptions they rely on often prevent them from handling topological variations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Methods that use spatio-temporal inputs [9, 19, 31, 45, 48, 57, 58] enhance radiance fields by adding a temporal dimension, enabling the querying of the radiance field using spatio-temporal coordinates. While these techniques showcase a remarkable ability to synthesize new viewpoints in dynamic scenes, the entangled scene parameters can constrain their adaptability for downstream applications." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.406, + 0.892, + 0.602 + ], + "angle": 0, + "content": "Per-frame training methods [29, 34, 59] adapt to changes in the scene online by leveraging per-frame training, a paradigm we have also adopted. To be specific, StreamRF [29] employs Plenoxels [20] for scene representation and achieves rapid on-the-fly training with minimal storage requirements through techniques like narrow band tuning and difference-based compression. ReRF [59] uses DVGO [52] for scene representation and optimize motion grid and residual grid frame by frame to model interframe discrepancies, enabling high-quality FVV streaming and rendering. Dynamic3DG [34] optimizes simplified 3DGs and integrates physically-based priors for high-quality novel view synthesis on dynamic scenes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.605, + 0.893, + 0.831 + ], + "angle": 0, + "content": "Among the aforementioned works, only NeRF-Player [51], ReRF [59], StreamRF [29], and Dynamic3DG [34] are able to stream FVVs. NeRFPlayer achieves FVV streaming through a decomposition module and a feature streaming module, but it is only able to stream pre-trained models. ReRF and Dynamic3DG are limited to processing scenes with few objects and foreground mask, necessitating minute-level per-frame training times. StreamRF stands out by requiring only a few seconds for each frame's training to construct high-fidelity FVVs on challenging real-world dynamic scenes with compressed model storage. However, it falls short in rendering speed. Contrarily, our approach matches or surpasses StreamRF in training speed, model storage, and image quality, all while achieving real-time rendering at 200 FPS." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.846, + 0.682, + 0.86 + ], + "angle": 0, + "content": "2.3. Concurrent Works" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Except for Dynamic3DG, several concurrent works have extended 3DG-S to represent dynamic scenes. De" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20677" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.473, + 0.213 + ], + "angle": 0, + "content": "formable3DG [70] employs an MLP to model the deformation of 3DGs, while [65] introduces a hexplane-based encoder to enhance the efficiency of deformation query. Meanwhile, [18, 71] lift 3DG to 4DG primitives for dynamic scene representation. However, these approaches are limited to offline reconstruction and lack streamable capabilities, whereas our work aims to achieve efficient streaming of FFVs with an online training paradigm." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.218, + 0.407, + 0.236 + ], + "angle": 0, + "content": "3. Background: 3D Gaussian Splitting" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.243, + 0.47, + 0.304 + ], + "angle": 0, + "content": "3D Gaussian Splitting (3DG-S) [26] employs anisotropic 3D Gaussians as an explicit scene representation. Paired with a fast differentiable rasterizer, 3DGs achieves real-time novel view synthesis with only minutes of training." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.311, + 0.408, + 0.328 + ], + "angle": 0, + "content": "3.1. 3D Gaussians as Scene Representation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.335, + 0.47, + 0.365 + ], + "angle": 0, + "content": "A 3DG is defined by a covariance matrix \\(\\Sigma\\) centered at point (i.e., mean) \\(\\mu\\):" + }, + { + "type": "equation", + "bbox": [ + 0.155, + 0.371, + 0.469, + 0.392 + ], + "angle": 0, + "content": "\\[\nG (x; \\mu , \\Sigma) = e ^ {- \\frac {1}{2} (x - \\mu) ^ {T} \\Sigma^ {- 1} (x - \\mu)}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.4, + 0.469, + 0.445 + ], + "angle": 0, + "content": "To ensure positive semi-definiteness during optimization, the covariance matrix \\(\\Sigma\\) is decomposed into a rotation matrix \\(R\\) and a scaling matrix \\(S\\):" + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.452, + 0.469, + 0.469 + ], + "angle": 0, + "content": "\\[\n\\Sigma = R S S ^ {T} R ^ {T}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.478, + 0.47, + 0.554 + ], + "angle": 0, + "content": "Rotation is conveniently represented by a unit quaternion, while scaling uses a 3D vector. Additionally, each 3DG contains a set of spherical harmonics (SH) coefficients of to represent view-dependent colors, along with an opacity value \\(\\alpha\\), which is used in \\(\\alpha\\)-blending (Eq. (4))." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.561, + 0.429, + 0.577 + ], + "angle": 0, + "content": "3.2. Splitting for Differentiable Rasterization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.584, + 0.469, + 0.615 + ], + "angle": 0, + "content": "For novel view synthesis, 3DG-S [26] project 3DGs to 2D Gaussian (2DG) splats [76]:" + }, + { + "type": "equation", + "bbox": [ + 0.204, + 0.621, + 0.469, + 0.639 + ], + "angle": 0, + "content": "\\[\n\\Sigma^ {\\prime} = J W \\Sigma W ^ {T} J ^ {T}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.647, + 0.469, + 0.768 + ], + "angle": 0, + "content": "Here, \\(\\Sigma^{\\prime}\\) is the covariance matrix in camera coordinate. \\(J\\) is the Jacobian of the affine approximation of the projective transformation, and \\(W\\) is the viewing transformation matrix. By skipping the third row and third column of \\(\\Sigma^{\\prime}\\), we can derive a \\(2\\times 2\\) matrix denoted as \\(\\Sigma_{2d}\\). Furthermore, projecting the 3DG's mean, \\(\\mu\\), into the image space results in a 2D mean, \\(\\mu_{2d}\\). Consequently, this allows us to define the 2DG in the image space as \\(G_{2d}(x;\\mu_{2d},\\Sigma_{2d})\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.769, + 0.469, + 0.8 + ], + "angle": 0, + "content": "Using \\(\\Sigma^{\\prime}\\), the color \\(C\\) of a pixel can be computed by blending the \\(N\\) ordered points overlapping the pixel:" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.807, + 0.469, + 0.848 + ], + "angle": 0, + "content": "\\[\nC = \\sum_ {i \\in N} c _ {i} \\alpha_ {i} ^ {\\prime} \\prod_ {j = 1} ^ {i - 1} \\left(1 - \\alpha_ {j} ^ {\\prime}\\right). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Here, \\( c_{i} \\) denotes the view-dependent color of the \\( i \\)-th 3DG. \\( \\alpha_{i}^{\\prime} \\) is determined by multiplying the opacity \\( \\alpha_{i} \\) of the \\( i \\)-th 3DG \\( G \\) with the evaluation of the corresponding 2DG \\( G_{2d} \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.183 + ], + "angle": 0, + "content": "Leveraging a highly-optimized rasterization pipeline coupled with custom CUDA kernels, the training and rendering of 3DG-S are remarkably fast. For instance, for megapixel-scale real-world scenes, just a few minutes of optimization allows 3DGs to achieve photo-realistic visual quality and rendering speeds surpassing 100 FPS." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.195, + 0.591, + 0.21 + ], + "angle": 0, + "content": "4. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.219, + 0.892, + 0.478 + ], + "angle": 0, + "content": "3DGStream constructs photo-realistic FVV streams from multi-view video streams on-the-fly using a per-frame training paradigm. We initiate the process by training 3DGs [26] at timestep 0. For subsequent timesteps, we employ the previous timestep's 3DGs as an initialization and pass them to a two-stage pipeline. Firstly (Sec. 4.1), a Neural Transformation Cache (NTC) is trained to model the transformation for each 3DG. Once the training is finished, we transform the 3DGs and carry the transformed 3DGs to the next timestep. Secondly (Sec. 4.2), we employ an adaptive 3DG addition strategy to handle emerging objects. For each FVV frame, we render views at the current timestep using both the transformed 3DGs and additional 3DGs, while the latter are not passed to the next timestep. Note that we only need to train and store the parameters of the NTC and the additional 3DGs for each subsequent timestep, not all the 3DGs. We depict an overview of our approach in Fig. 3." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.485, + 0.767, + 0.5 + ], + "angle": 0, + "content": "4.1. Neural Transformation Cache" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.509, + 0.892, + 0.629 + ], + "angle": 0, + "content": "For NTC, we seek a structure that is compact, efficient, and adaptive to model the transformations of 3DGs. Compactness is essential to reduce the model storage. Efficiency enhances training and inference speeds. Adaptivity ensures the model focuses more on dynamic regions. Additionally, it would be beneficial if the structure could consider certain priors of dynamic scenes [5, 24, 54], such as the tendency for neighboring parts of an object to have similar motion." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.63, + 0.892, + 0.841 + ], + "angle": 0, + "content": "Inspired by Neural Radiance Caching [39] and I-NGP [40], we employ multi-resolution hash encoding combined with a shallow fully-fused MLP [38] as the NTC. Specifically, following I-NGP, we use multi-resolution voxel grids to represent the scene. Voxel grids at each resolution are mapped to a hash table storing a \\(d\\)-dimensional learnable feature vector. For a given 3D position \\(x \\in \\mathbb{R}^3\\), its hash encoding at resolution \\(l\\), denoted as \\(h(x;l) \\in \\mathbb{R}^d\\), is the linear interpolation of the feature vectors corresponding to the eight corners of the surrounding grid. Consequently, its multi-resolution hash encoding \\(h(x) = [h(x;0), h(x;1), \\dots, h(x;L - 1)] \\in \\mathbb{R}^{Ld}\\), where \\(L\\) represents the number of resolution levels. The multi-resolution hash encoding addresses all our requirements for the NTC:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.841, + 0.89, + 0.87 + ], + "angle": 0, + "content": "- Compactness: Hashing effectively reduces the storage space needed for encoding the whole scene." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.89, + 0.9 + ], + "angle": 0, + "content": "- Efficiency: Hash table lookup operates in \\(O(1)\\), and is highly compatible with modern GPUs." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.841, + 0.89, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "footer", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20678" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.468, + 0.152 + ], + "angle": 0, + "content": "- Adaptivity: Hash collisions occur in hash tables at finer resolutions, allowing regions with larger gradients—representing dynamic regions in our context—to drive the optimization." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.152, + 0.468, + 0.212 + ], + "angle": 0, + "content": "- Priors: The combination of linear interpolation and the voxel-grid structure ensures the local smoothness of transformations. Additionally, the multi-resolution approach adeptly merges global and local information." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.092, + 0.468, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.213, + 0.468, + 0.318 + ], + "angle": 0, + "content": "Furthermore, to enhance the NTC's performance with minimal overhead, we utilize a shallow fully-fused MLP [38]. This maps the hash encoding to a 7-dimensional output: the first three dimensions indicate the translation of the 3DG; the remaining dimensions represent the rotation of the 3DG using quaternions. Given multi-resolution hash encoding coupled with MLP, our NTC is formalized as:" + }, + { + "type": "equation", + "bbox": [ + 0.193, + 0.329, + 0.468, + 0.346 + ], + "angle": 0, + "content": "\\[\nd \\mu , d q = M L P (h (\\mu)), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.357, + 0.468, + 0.402 + ], + "angle": 0, + "content": "where \\(\\mu\\) denotes the mean of the input 3DG. We transform the 3DGs based on \\(d\\mu\\) and \\(dq\\). Specifically, the following parameters of the transformed 3DGs are given as:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.402, + 0.468, + 0.431 + ], + "angle": 0, + "content": "- Mean: \\(\\mu' = \\mu + d\\mu\\), where \\(\\mu'\\) is the new mean and \\(+\\) represents vector addition." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.432, + 0.468, + 0.476 + ], + "angle": 0, + "content": "- Rotation: \\( q' = \\text{norm}(q) \\times \\text{norm}(dq) \\), where \\( q' \\) is the new rotation, \\( \\times \\) indicates quaternion multiplication and norm denotes normalization." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.478, + 0.468, + 0.553 + ], + "angle": 0, + "content": "- SH Coefficients: Upon rotating the 3DG, the SH coefficients should also be adjusted to align with the rotation of the 3DG. Leveraging the rotation invariance of SH, we directly employ SH Rotation to update SHs. Please refer to the supplementary materials (Suppl.) for details." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.402, + 0.468, + 0.553 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.553, + 0.468, + 0.628 + ], + "angle": 0, + "content": "In Stage 1, we transform the 3DGs from the previous frame by NTC and then render with them. The parameters of the NTC is optimized by the loss between the rendered image and the ground truth. Following 3DG-S [26], the loss function is \\( L_{1} \\) combined with a D-SSIM term:" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.64, + 0.468, + 0.655 + ], + "angle": 0, + "content": "\\[\nL = (1 - \\lambda) L _ {1} + \\lambda L _ {D - S S I M}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.666, + 0.468, + 0.727 + ], + "angle": 0, + "content": "where \\(\\lambda = 0.2\\) in all our experiments. It should be noted that during the training process, the 3DGs from the previous frame remain frozen and do not undergo any updates. This implies that the input to the NTC remains consistent." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.727, + 0.468, + 0.772 + ], + "angle": 0, + "content": "Additionally, to ensure training stability, we initialize the NTC with warm-up parameters. The loss employed during the warm-up is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.125, + 0.782, + 0.468, + 0.8 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {w a r m} - u p} = \\left\\| d \\mu \\right\\| _ {1} - \\cos^ {2} (\\operatorname {n o r m} (d q), Q), \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.468, + 0.901 + ], + "angle": 0, + "content": "where \\(Q\\) is the identity quaternion. The first term uses the \\(L_{1}\\) norm to ensure the estimated translation approaches zero, while the second term, leveraging cosine similarity, ensures the estimated rotation approximates no rotation. However, given the double-covering property of the unit quaternions, we use the square of the cosine similarity. For" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.168 + ], + "angle": 0, + "content": "each scene, we execute the warm-up solely after the training at timestep 0, using noise-augmented means of the initial 3DGs as input. After 3000 iterations of training (roughly 20 seconds), the parameters are stored and used to initialize the NTCs for all the following timesteps." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.18, + 0.719, + 0.196 + ], + "angle": 0, + "content": "4.2. Adaptive 3DG Addition" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.204, + 0.892, + 0.461 + ], + "angle": 0, + "content": "Relying solely on 3DGs transformations adequately cover a significant portion of real-world dynamic scenes, with translations effectively managing occlusions and disappearances in subsequent timesteps. However, this approach falters when faced with objects not present in the initial frame, such as transient objects like flames or smoke, and new persistent objects like the liquid poured out of a bottle. Since 3DG is an unstructured explicit representation, it's essential to add new 3DGs to model these emerging objects. Considering constraints related to model storage requirements and training complexities, it's not feasible to generate an extensive number of additional 3DGs nor allow them to be used in subsequent frames, as this would cause 3DGs to accumulate over time. This necessitates a strategy for swiftly generating a limited number of frame-specific 3DGs to model these emerging objects precisely and thereby enhance the completeness of the scene at the current timestep." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.462, + 0.892, + 0.628 + ], + "angle": 0, + "content": "Firstly, we need to ascertain the locations for the emerging objects. Inspired by 3DG-S [26], we recognized the view-space positional gradients of 3DGs as a key indicator. We observed that for emerging objects, the 3DGs in proximity exhibited large view-space positional gradients. This is attributed to the optimization attempting to 'masquerade' the emerging object by transforming the 3DGs. However, since we prevent the colors of the 3DGs from being updated in Stage 1, this attempt falls short. Nonetheless, they are still transformed to appropriate positions, with large view-space positional gradients." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.63, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Based on the aforementioned observations, we deem it appropriate to introduce additional 3DGs around these high-gradient regions. Moreover, to exhaustively capture every potential location where new objects might emerge, we adopt an adaptive 3DG spawn strategy. Specifically, we track view-space positional gradient during the final training epoch of Stage 1. Once this stage concludes, we select 3DGs that have an average magnitude of view-space position gradients exceeding a relatively low threshold \\(\\tau_{\\text{grad}} = 0.00015\\). For each selected 3DG, the position of the additional 3DG is sampled from \\(X \\sim \\mathcal{N}(\\mu, 2\\Sigma)\\), where \\(\\mu\\) and \\(\\Sigma\\) is the mean and the covariance matrix of the selected 3DG. While we avoid assumptions about the other attributes of the additional 3DGs, improper initializations of SH coefficients and scaling vectors tend to result in an optimization preference for reducing opacity over adjusting these parameters. This causes additional 3DGs to quickly become transparent, thereby failing to capture the emerging objects. To mitigate" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20679" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.239, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.192, + 0.238, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.12, + 0.317, + 0.2, + 0.33 + ], + "angle": 0, + "content": "(a) I-NGP [40]" + }, + { + "type": "image", + "bbox": [ + 0.242, + 0.089, + 0.403, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.242, + 0.192, + 0.402, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.275, + 0.317, + 0.371, + 0.33 + ], + "angle": 0, + "content": "(b) HyperReel [1]" + }, + { + "type": "image", + "bbox": [ + 0.406, + 0.089, + 0.565, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.406, + 0.192, + 0.565, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.438, + 0.317, + 0.534, + 0.33 + ], + "angle": 0, + "content": "(c) StreamRF [29]" + }, + { + "type": "image", + "bbox": [ + 0.568, + 0.089, + 0.729, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.568, + 0.192, + 0.728, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.607, + 0.317, + 0.691, + 0.329 + ], + "angle": 0, + "content": "(d) 3DGStream" + }, + { + "type": "image", + "bbox": [ + 0.732, + 0.089, + 0.892, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.732, + 0.192, + 0.892, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.766, + 0.317, + 0.857, + 0.329 + ], + "angle": 0, + "content": "(e) Ground Truth" + }, + { + "type": "image_caption", + "bbox": [ + 0.092, + 0.342, + 0.875, + 0.356 + ], + "angle": 0, + "content": "Figure 4. Qualitative comparisons on the discussion scene of the Meet Room dataset and the sear steak scene of the N3DV dataset." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.362, + 0.468, + 0.513 + ], + "angle": 0, + "content": "
CategoryMethodPSNR†(dB)Storage↓(MB)Train↓(mins)Render†(FPS)Streamable
StaticPlenoxels [20]30.774106238.3
I-NGP [40]28.6248.21.32.9
3DG-S [26]32.0847.18.3390
OfflineDyNeRF [31]29.58†0.12600.02×
NeRFPlayer [51]30.6917.11.20.05
HexPlane [9]31.700.82.40.21×
K-Planes [48]31.631.00.80.15×
HyperReel [1]31.101.21.82.00×
MixVoxels [57]30.801.70.2716.7×
OnlineStreamRF [29]30.6817.7/31.4*0.258.3
Ours31.677.6/7.8*0.20215
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.523, + 0.47, + 0.58 + ], + "angle": 0, + "content": "Table 1. Quantitative comparison on the N3DV dataset. The training time, required storage and PSNR are averaged over the whole 300 frames for each scene. \\( {}^{ \\dagger } \\) DyNeRF [31] only report metrics on the flame salmon scene. *Considering the initial model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.596, + 0.469, + 0.702 + ], + "angle": 0, + "content": "this issue, the SH coefficients and scaling vectors of these 3DGs are derived from the selected ones, with rotations set to the identity quaternion \\( \\mathbf{q} = [1,0,0,0] \\) and opacity initialized at 0.1. After spawning, the 3DGs undergo optimization utilizing the same loss function (Eq. (6)) as Stage 1. Note that only the parameters of the additional 3DGs are optimized, while those of the transformed 3DGs remain fixed." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.47, + 0.903 + ], + "angle": 0, + "content": "To guard against local minima and manage the number of additional 3DGs, we implement an adaptive 3DG quantity control strategy. Specifically, in Stage 2, we set a relatively high threshold, \\(\\tau_{\\alpha} = 0.01\\), for the opacity value. At the end of each training epoch, for 3DGs with view-space position gradients exceeding \\(\\tau_{grad}\\), we spawn additional 3DGs nearby to address under-reconstructed regions. These additional 3DGs inherit their rotations and SH coefficients from the original 3DG, but their scaling is adjusted to \\(80\\%\\) of the original, mirroring the 'split' operation described by Kerbl et al. [26]. Subsequently, we discard any additional 3DGs with opacity values below \\(\\tau_{\\alpha}\\) to suppress the growth in the quantity of 3DGs." + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.362, + 0.899, + 0.491 + ], + "angle": 0, + "content": "
MethodPSNR↑ (dB)Storage↓ (MB)Train↓ (mins)Render↑ (FPS)
Plenoxels [20]27.1510151410
I-NGP [40]28.1048.21.14.1
3DG-S [26]31.3121.12.6571
StreamRF [29]26.725.7/9.0*0.1710
Ours30.794.0/4.1*0.10288
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.501, + 0.892, + 0.543 + ], + "angle": 0, + "content": "Table 2. Quantitative comparison on the Meet Room dataset. Note that the training time, required storage and PSNR are averaged over the whole 300 frames. *Considering the initial model." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.549, + 0.633, + 0.567 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.574, + 0.602, + 0.589 + ], + "angle": 0, + "content": "5.1. Datasets" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.597, + 0.891, + 0.627 + ], + "angle": 0, + "content": "We conduct experiments on two real-world dynamic scene datasets: N3DV dataset [31] and Meet Room dataset [29]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.628, + 0.892, + 0.718 + ], + "angle": 0, + "content": "N3DV dataset [31] is captured using a multi-view system of 21 cameras, comprises dynamic scenes recorded at a resolution of \\(2704 \\times 2028\\) and 30 FPS. Following previous works [9, 29, 31, 48, 51, 57], we downsample the videos by a factor of two and follow the training and validation camera split provided by [31]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.719, + 0.892, + 0.78 + ], + "angle": 0, + "content": "Meet Room dataset [29] is captured using a 13-camera multi-view system, comprises dynamic scenes recorded at a resolution of \\(1280 \\times 720\\) and 30 FPS. Following [29], we utilize 13 views for training and reserved 1 for testing." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.787, + 0.66, + 0.803 + ], + "angle": 0, + "content": "5.2. Implementation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.892, + 0.902 + ], + "angle": 0, + "content": "We implement 3DGStream upon the codes of 3D Gaussian Splitting (3DG-S) [26], and implement the Neural Transformation Cache (NTC) using tiny-cuda-nn [38]. For the training of initial 3DGs, we fine-tune the learning rates on the N3DV dataset based on the default settings of 3DG-S, and apply them to the Meet Room dataset. For all scenes," + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20680" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.092, + 0.466, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.325, + 0.471, + 0.368 + ], + "angle": 0, + "content": "Figure 5. Comparison of different approaches for modeling the transformation of 3DGs. Conducted on the second frame of the flame salmon video, utilizing identical initial 3DGs." + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.376, + 0.465, + 0.594 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.608, + 0.47, + 0.636 + ], + "angle": 0, + "content": "Figure 6. Comparison of different approaches on the flame salmon scene." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.642, + 0.465, + 0.735 + ], + "angle": 0, + "content": "
VariantPSNR↑ (dB)#Additional 3DGs↓
Baseline28.390
Rnd. Spawn28.39971.9
w/o Quant. Ctrl.28.438710.8
Full Model28.42477.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.745, + 0.47, + 0.787 + ], + "angle": 0, + "content": "Table 3. Ablation study of the Adaptive 3DG Addition strategy on the flame salmon scene. The metrics are averaged over the whole sequence." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.789, + 0.471, + 0.835 + ], + "angle": 0, + "content": "we train the NTC for 150 iterations in Stage 1. and train the additional 3DGs for 100 iterations in Stage 2. Please refer to Suppl. for more details." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.847, + 0.218, + 0.863 + ], + "angle": 0, + "content": "5.3. Comparisons" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Quantitative comparisons. Our quantitative analysis involves benchmarking 3DGStream on the N3DV dataset and" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.089, + 0.631, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.165, + 0.631, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.241, + 0.621, + 0.255 + ], + "angle": 0, + "content": "(a) Result of Stage 1" + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.089, + 0.761, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.165, + 0.761, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.642, + 0.241, + 0.753, + 0.254 + ], + "angle": 0, + "content": "(b) Result of Stage 2" + }, + { + "type": "image", + "bbox": [ + 0.764, + 0.089, + 0.893, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.764, + 0.165, + 0.892, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.781, + 0.242, + 0.874, + 0.254 + ], + "angle": 0, + "content": "(c) Ground Truth" + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.267, + 0.892, + 0.295 + ], + "angle": 0, + "content": "Figure 7. Quantitative results of the ablation study conducted on the flame steak scene and the coffee martini scene." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.308, + 0.893, + 0.595 + ], + "angle": 0, + "content": "Meet Room dataset, comparing it with a range of representative methods. We take Plenoxels [20], I-NGP [40], and 3DG-S [26] as representatives of fast static scene reconstruction methods, training them from scratch for each frame. StreamRF [29], Dynamic3DG [34], and ReRF [60] are designed for online training in dynamic scenes. Owing to the limitations of Dynamic3DG and ReRF, which necessitate foreground masks and are confined to scenes with fewer objects, and their minute-level per-frame training times, we select StreamRF selected as the representative for online training methods due to its adaptability and training feasibility on the N3DV and MeetRoom datasets. To demonstrate 3DGStream's competitive image quality, we drew comparisons with the quantitative results reported for the N3DV dataset in the respective papers of DyNeRF [31], NeRFPlayer [51], HexPlane [9], K-Planes [48], HyperReel [1], and MixVoxels [57], all of which are methods for reconstructing dynamic scenes through offline training on entire video sequences." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.597, + 0.895, + 0.852 + ], + "angle": 0, + "content": "In Tab. 1, we present the averaged rendering speed, training time, required storage, and peak signal-to-noise ratio (PSNR) over all scenes of the N3DV dataset. For each scene, the latter three metrics are computed as averages over the whole 300 frames. Besides, we provide a breakdown of comparisons across all scenes within the N3DV dataset in the Suppl. To demonstrate the generality of our method, we conducted experiments on the MeetRoom dataset, as introduced by StreamRF [29], and performed a quantitative comparison against Plenoxels [20], I-NGP [40], 3DG-S [26], and StreamRF [29]. The results are presented in Tab. 2. As presented in Tabs. 1 and 2, our method demonstrates superiority through fast online training and real-time rendering, concurrently maintaining a competitive edge in terms of model storage and image quality. Furthermore, among the methods capable of streaming FVVs, our model requires the minimal model storage." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Qualitative comparisons. While our approach primarily aims to enhance the efficiency of online FVV construction, as illustrated in Tabs. 1 and 2, it still achieves competitive" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "20681" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.13, + 0.089, + 0.418, + 0.202 + ], + "angle": 0, + "content": "
StepOverhead (ms)FPS
Render w/o NTC2.56390
+ Query NTC0.62
+ Transformation0.02
+ SH Rotation1.46
Total4.66215
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.213, + 0.47, + 0.256 + ], + "angle": 0, + "content": "Table 4. Rendering profiling for the flame salmon scene at megapixel resolution. Note that flame salmon is the most time-consuming to render of all scenes in our experiments." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.264, + 0.47, + 0.4 + ], + "angle": 0, + "content": "image quality. In Fig. 4, we present a qualitative comparison with I-NGP [40], HyperReel [1], and StreamRF [29] across scenes on the N3DV dataset [31] and the Meet Room dataset [29], with a special emphasis on dynamic objects such as faces, hands, and tongs, as well as intricate objects like labels and statues. It is evident that our method faithfully captures the dynamics of the scene without sacrificing the ability to reconstruct intricate objects. Please refer to our project page for more video results." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.41, + 0.206, + 0.424 + ], + "angle": 0, + "content": "5.4. Evaluations" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.433, + 0.47, + 0.719 + ], + "angle": 0, + "content": "Neural Transformation Cache. We utilize distinct approaches to model the transformations of 3DGs from the first to the second frame within the flame salmon video of the N3DV dataset to show the effectiveness of NTC. Fig. 5 shows that, without multi-resolution hash encoding \\((w / o\\) Hash enc.), the MLP faces challenges in modeling transformations effectively. Additionally, without the warm-up \\((w / o\\) Warm-up), it takes more iterations for convergence. Besides, even when compared with the direct optimization of the previous frame's 3DGs (Direct Opt.), NTC demonstrate on-par performance. In Fig. 6, We present the results of different approaches applied across the entire flame salmon video, excluding the first frame (i.e., Frame 0). \\(w / o\\) Hash enc. and \\(w / o\\) Warm-up. are not able to converge swiftly, resulting in accumulating errors as the sequence progresses. Direct Opt. yields the best outcomes but at the cost of inflated storage. Utilizing NTC, in contrast, delivers comparable results with substantially lower storage overhead by eliminating the need for saving all the 3DGs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Adaptive 3DG Addition. Tab. 3 presents the quantitative results of the ablation study conducted on the flame salmon scene, and more results are presented in Suppl. The base model without Stage 2, and a set of randomly spawned 3DGs (Rnd. Spawn) in equivalent quantities to our spawn strategy, both fail to capture emerging objects. The variant without our quantity control strategy (\\( w/o \\) Quant. Ctrl.) manages to model emerging objects but requires a significantly larger number of additional 3DGs. In contrast, our full model proficiently reconstructs emerging objects using a minimal addition of 3DGs. The ablation study illustrated in Fig. 7 qualitatively showcases the effect of the Adap" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.137 + ], + "angle": 0, + "content": "tive 3DG Addition strategy, highlighting its ability to reconstruct the objects not present in the initial frame, such as coffee in a pot, a dog's tongue, and flames." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.137, + 0.892, + 0.363 + ], + "angle": 0, + "content": "Real-time Rendering. Following 3DG-S [26], we employ the SIBR framework [6] to measure the rendering speed. Once all resources required are loaded onto the GPU, the additional overhead of our approach is primarily the time taken to query the NTC and transform the 3DGs. As detailed in Tab. 4, our method benefits from the efficiency of the multi-resolution hash encoding and the fully-fused MLP [38], which facilitate rapid NTC query. Notably, the most time-consuming step is the SH Rotation. However, our experiments indicate that the SH rotation has a minimal impact on the reconstruction quality, which may be attributed to the 3DGs modeling view-dependent colors through alternative mechanisms (e.g., small 3DGs of varying colors surrounding the object) rather than SH coefficients. Nonetheless, we maintain SH rotation for theoretical soundness." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.376, + 0.613, + 0.391 + ], + "angle": 0, + "content": "6. Discussion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.402, + 0.892, + 0.553 + ], + "angle": 0, + "content": "The quality of 3DG-S [26] on the initial frame is crucial to 3DGStream. Therefore, we inherit the limitations of 3DG-S, such as high dependence on the initial point cloud. As illustrated in Fig. 7, there are obvious artifacts beyond the windows, attributable to COLMAP's [49] inability to reconstruct distant landscapes. Hence, our method stands to benefit directly from future enhancements to 3DG-S. Moreover, for efficient on-the-fly training, we limit the number of training iterations, which restricts modeling of drastic motion in Stage 1 and complex emerging objects in Stage 2." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.566, + 0.619, + 0.581 + ], + "angle": 0, + "content": "7. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.591, + 0.892, + 0.757 + ], + "angle": 0, + "content": "We propose 3DGStream, an novel method for efficient Free-Viewpoint Video streaming. Based on 3DG-S [26], we utilizes an effective Neural Transformation Cache to capture the motion of objects. In addition, we propose an Adaptive 3DG Addition strategy to accurately model emerging objects in dynamic scenes. The two-stage pipeline of 3DGStream enables the online reconstruction of dynamic scenes in video streams. While ensuring photo-realistic image quality, 3DGStream achieves on-the-fly training (\\(\\sim\\)10s per-frame) and real-time rendering (\\(\\sim\\)200FPS) at megapixel resolution with moderate requisite storage." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.77, + 0.681, + 0.787 + ], + "angle": 0, + "content": "8. Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.892, + 0.901 + ], + "angle": 0, + "content": "This work was supported in part by Zhejiang Province Program (2022C01222, 2023C03199, 2023C03201), the National Program of China (62172365, 2021YFF0900604, 19ZDA197), Ningbo Science and Technology Plan Project (022Z167, 2023Z137), and MOE Frontier Science Center for Brain Science & Brain-Machine Integration (Zhejiang University)." + }, + { + "type": "footer", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "20682" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.198 + ], + "angle": 0, + "content": "[1] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023. 1, 3, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.2, + 0.472, + 0.281 + ], + "angle": 0, + "content": "[2] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.283, + 0.472, + 0.352 + ], + "angle": 0, + "content": "[3] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5470-5479, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.353, + 0.47, + 0.394 + ], + "angle": 0, + "content": "[4] Jonathan T. Barron, Ben Mildenhall, Dor Verbin, Pratul P. Srinivasan, and Peter Hedman. Zip-nerf: Anti-aliased grid-based neural radiance fields. ICCV, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.395, + 0.47, + 0.45 + ], + "angle": 0, + "content": "[5] Michael J Black and Paul Anandan. The robust estimation of multiple motions: Parametric and piecewise-smooth flow fields. Computer vision and image understanding, 63(1):75-104, 1996. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.452, + 0.47, + 0.507 + ], + "angle": 0, + "content": "[6] Sebastien Bonopera, Jerome Esnault, Siddhant Prakash, Simon Rodriguez, Theo Thonat, Mehdi Benadel, Gaurav Chaurasia, Julien Philip, and George Drettakis. sibr: A system for image based rendering, 2020. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.508, + 0.47, + 0.577 + ], + "angle": 0, + "content": "[7] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.578, + 0.47, + 0.62 + ], + "angle": 0, + "content": "[8] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In SIGGRAPH, pages 425-432, 2001. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.621, + 0.47, + 0.647 + ], + "angle": 0, + "content": "[9] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. CVPR, 2023. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.649, + 0.47, + 0.704 + ], + "angle": 0, + "content": "[10] Jin-Xiang Chai, Xin Tong, Shing-Chow Chan, and Heung-Yeung Shum. Plenoptic sampling. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 307-318, 2000. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.47, + 0.773 + ], + "angle": 0, + "content": "[11] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14124-14133, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.47, + 0.816 + ], + "angle": 0, + "content": "[12] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In European Conference on Computer Vision (ECCV), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.47, + 0.859 + ], + "angle": 0, + "content": "[13] Anpei Chen, Zexiang Xu, Xinyue Wei, Siyu Tang, Hao Su, and Andreas Geiger. Dictionary fields: Learning a neural basis decomposition. ACM Trans. Graph., 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.47, + 0.902 + ], + "angle": 0, + "content": "[14] Zhiqin Chen, Thomas Funkhouser, Peter Hedman, and Andrea Tagliasacchi. Mobilenerf: Exploiting the polygon rasterization pipeline for efficient neural field rendering on mo" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "bile architectures. In The Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[15] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (TOG), 34(4):69, 2015. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.192, + 0.892, + 0.233 + ], + "angle": 0, + "content": "[16] Abe Davis, Marc Levoy, and Fredo Durand. Unstructured light fields. Comput. Graph. Forum, 31(2pt1):305-314, 2012. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.235, + 0.892, + 0.304 + ], + "angle": 0, + "content": "[17] Mingsong Dou, Philip Davidson, Sean Ryan Fanello, Sameh Khamis, Adarsh Kowdle, Christoph Rhemann, Vladimir Tankovich, and Shahram Izadi. Motion2fusion: Real-time volumetric performance capture. ACM Trans. Graph., 36(6): 246:1-246:16, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.305, + 0.892, + 0.361 + ], + "angle": 0, + "content": "[18] Yuanxing Duan, Fangyin Wei, Qiyu Dai, Yuhang He, Wenzheng Chen, and Baoquan Chen. 4d gaussian splatting: Towards efficient novel view synthesis for dynamic scenes. arXiv preprint arXiv:2402.03307, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.362, + 0.892, + 0.419 + ], + "angle": 0, + "content": "[19] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In SIGGRAPH Asia 2022 Conference Papers, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.42, + 0.894, + 0.489 + ], + "angle": 0, + "content": "[20] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.49, + 0.892, + 0.559 + ], + "angle": 0, + "content": "[21] Stephan J. Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien Valentin. Fastnerf: High-fidelity neural rendering at 200fps. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14346-14355, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.561, + 0.892, + 0.631 + ], + "angle": 0, + "content": "[22] Steven J. Gortler, Radek Grzesczuk, Richard Szeliski, and Michael F. Cohen. The lumigraph. In Proceedings of the 23rd Annual Conference on Computer Graphics and Interactive Techniques, page 43-54, New York, NY, USA, 1996. Association for Computing Machinery. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.632, + 0.892, + 0.7 + ], + "angle": 0, + "content": "[23] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5855-5864, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.702, + 0.892, + 0.731 + ], + "angle": 0, + "content": "[24] Berthold KP Horn and Brian G Schunck. Determining optical flow. Artificial intelligence, 17(1-3):185-203, 1981. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.732, + 0.892, + 0.786 + ], + "angle": 0, + "content": "[25] Wenbo Hu, Yuling Wang, Lin Ma, Bangbang Yang, Lin Gao, Xiao Liu, and Yuewen Ma. Tri-miprf: Tri-mip representation for efficient anti-aliasing neural radiance fields. In ICCV, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.788, + 0.892, + 0.844 + ], + "angle": 0, + "content": "[26] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics, 42 (4), 2023. 1, 2, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.845, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[27] Tobias Kirschstein, Shenhan Qian, Simon Giebenhain, Tim Walter, and Matthias Nießner. Nersemble: Multi-view radiance field reconstruction of human heads. arXiv preprint arXiv:2305.03027, 2023. 3" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20683" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.468, + 0.12 + ], + "angle": 0, + "content": "[28] Marc Levoy and Pat Hanrahan. Light field rendering. In SIGGRAPH, pages 31-42, 1996. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.122, + 0.468, + 0.163 + ], + "angle": 0, + "content": "[29] Lingzhi Li, Zhen Shen, Zhongshu Wang, Li Shen, and Ping Tan. Streaming radiance fields for 3d video synthesis. In NeurIPS, 2022. 1, 2, 3, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.165, + 0.468, + 0.234 + ], + "angle": 0, + "content": "[30] Ruilong Li, Julian Tanke, Minh Vo, Michael Zollhöfer, Jürgen Gall, Angjoo Kanazawa, and Christoph Lassner. Tava: Template-free animatable volumetric actors. In European Conference on Computer Vision, pages 419-436. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.236, + 0.468, + 0.331 + ], + "angle": 0, + "content": "[31] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 1, 2, 3, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.334, + 0.468, + 0.402 + ], + "angle": 0, + "content": "[32] Zhengqi Li, Simon Niklaus, Noah Snively, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6498-6508, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.405, + 0.468, + 0.473 + ], + "angle": 0, + "content": "[33] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.476, + 0.468, + 0.517 + ], + "angle": 0, + "content": "[34] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In 3DV, 2024. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.519, + 0.468, + 0.572 + ], + "angle": 0, + "content": "[35] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.576, + 0.468, + 0.644 + ], + "angle": 0, + "content": "[36] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pages 405-421. Springer, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.647, + 0.468, + 0.728 + ], + "angle": 0, + "content": "[37] Ben Mildenhall, Peter Hedman, Ricardo Martin-Brualla, Pratul P Srinivasan, and Jonathan T Barron. Nerf in the dark: High dynamic range view synthesis from noisy raw images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16190-16199, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.731, + 0.385, + 0.744 + ], + "angle": 0, + "content": "[38] Thomas Müller. tiny-cuda-nn, 2021. 4, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.746, + 0.468, + 0.8 + ], + "angle": 0, + "content": "[39] Thomas Müller, Fabrice Rousselle, Jan Novák, and Alexander Keller. Real-time neural radiance caching for path tracing. ACM Transactions on Graphics (TOG), 40(4):1-16, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.803, + 0.468, + 0.857 + ], + "angle": 0, + "content": "[40] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, 2022. 1, 2, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.86, + 0.468, + 0.901 + ], + "angle": 0, + "content": "[41] Michael Niemeyer, Jonathan T Barron, Ben Mildenhall, Mehdi SM Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.468, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "from sparse inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5480-5490, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.137, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[42] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5865-5874, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.208, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[43] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.278, + 0.892, + 0.346 + ], + "angle": 0, + "content": "[44] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M. Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. ACM Trans. Graph., 40(6), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.349, + 0.892, + 0.417 + ], + "angle": 0, + "content": "[45] Suntheon Park, Minjung Son, Seokhwan Jang, Young Chun Ahn, Ji-Yeon Kim, and Nahiyup Kang. Temporal interpolation is all you need for dynamic neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4212-4221, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.42, + 0.892, + 0.487 + ], + "angle": 0, + "content": "[46] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10318-10327, 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.49, + 0.892, + 0.559 + ], + "angle": 0, + "content": "[47] Christian Reiser, Rick Szeliski, Dor Verbin, Pratul Srinivasan, Ben Mildenhall, Andreas Geiger, Jon Barron, and Peter Hedman. Merf: Memory-efficient radiance fields for real-time view synthesis in unbounded scenes. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.562, + 0.892, + 0.615 + ], + "angle": 0, + "content": "[48] Sara Fridovich-Keil and Giacomo Meanti, Frederik Rahbæk Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In CVPR, 2023. 1, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.618, + 0.892, + 0.659 + ], + "angle": 0, + "content": "[49] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.661, + 0.892, + 0.716 + ], + "angle": 0, + "content": "[50] Heung-Yeung Shum and Li-Wei He. Rendering with concentric mosaics. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 299–306, 1999. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.719, + 0.892, + 0.8 + ], + "angle": 0, + "content": "[51] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.803, + 0.892, + 0.87 + ], + "angle": 0, + "content": "[52] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5449-5459, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[53] Jiakai Sun, Zhanjie Zhang, Jiafu Chen, Guangyuan Li, Boyan Ji, Lei Zhao, and Wei Xing. Vgos: Voxel grid opti" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20684" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.093, + 0.47, + 0.161 + ], + "angle": 0, + "content": "mization for view synthesis from sparse inputs. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence, IJCAI-23, pages 1414-1422. International Joint Conferences on Artificial Intelligence Organization, 2023. Main Track. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.164, + 0.47, + 0.217 + ], + "angle": 0, + "content": "[54] Carlo Tomasi and Takeo Kanade. Shape and motion from image streams under orthography: a factorization method. International journal of computer vision, 9:137-154, 1992. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.221, + 0.469, + 0.304 + ], + "angle": 0, + "content": "[55] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 12959-12970, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.307, + 0.469, + 0.359 + ], + "angle": 0, + "content": "[56] Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T. Barron, and Pratul P. Srinivasan. Ref-NeRF: Structured view-dependent appearance for neural radiance fields. CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.363, + 0.469, + 0.431 + ], + "angle": 0, + "content": "[57] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, Yafei Song, and Huaping Liu. Mixed neural voxels for fast multiview video synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19706-19716, 2023. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.434, + 0.469, + 0.515 + ], + "angle": 0, + "content": "[58] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.519, + 0.469, + 0.587 + ], + "angle": 0, + "content": "[59] Liao Wang, Qiang Hu, Qihan He, Ziyu Wang, Jingyi Yu, Tinne Tuytelaars, Lan Xu, and Minye Wu. Neural residual radiance fields for streamably free-viewpoint videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 76-87, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.59, + 0.469, + 0.659 + ], + "angle": 0, + "content": "[60] Liao Wang, Qiang Hu, Qihan He, Ziyu Wang, Jingyi Yu, Tinne Tuytelaars, Lan Xu, and Minye Wu. Neural residual radiance fields for streamably free-viewpoint videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 76-87, 2023. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.661, + 0.469, + 0.715 + ], + "angle": 0, + "content": "[61] Qianqian Wang, Yen-Yu Chang, Ruojin Cai, Zhengqi Li, Bharath Hariharan, Aleksander Holynski, and Noah Snavely. Tracking everything everywhere all at once. In International Conference on Computer Vision, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.718, + 0.469, + 0.799 + ], + "angle": 0, + "content": "[62] Chung-Yi Weng, Brian Curless, Pratul P Srinivasan, Jonathan T Barron, and Ira Kemelmacher-Shlizerman. Humanerf: Free-viewpoint rendering of moving people from monocular video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16210-16220, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.803, + 0.469, + 0.87 + ], + "angle": 0, + "content": "[63] Felix Wimbauer, Nan Yang, Christian Rupprecht, and Daniel Cremers. Behind the scenes: Density fields for single view reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9076-9086, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.874, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[64] Suttisak Wizadwongsa, Pakkapon Phongthawee, Jiraphon Yenphraphai, and Supasorn Suwajanakorn. Nex: Real-time" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "view synthesis with neural basis expansion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8534-8543, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[65] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Wang Xinggang. 4d gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.193, + 0.892, + 0.26 + ], + "angle": 0, + "content": "[66] Jamie Wynn and Daniyar Turmukhambetov. Diffusionerf: Regularizing neural radiance fields with denoising diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4180-4189, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.263, + 0.892, + 0.331 + ], + "angle": 0, + "content": "[67] Wenqi Xian, Jia-Bin Huang, Johannes Kopf, and Changil Kim. Space-time neural irradiance fields for free-viewpoint video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9421-9431, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.334, + 0.892, + 0.402 + ], + "angle": 0, + "content": "[68] Gengshan Yang, Minh Vo, Natalia Neverova, Deva Ramanan, Andrea Vedaldi, and Hanbyul Joo. Banmo: Building animatable 3d neural models from many casual videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2863-2873, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.404, + 0.892, + 0.471 + ], + "angle": 0, + "content": "[69] Jiawei Yang, Marco Pavone, and Yue Wang. Freenerf: Improving few-shot neural rendering with free frequency regularization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8254-8263, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.474, + 0.892, + 0.53 + ], + "angle": 0, + "content": "[70] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. arXiv preprint arXiv:2309.13101, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.531, + 0.892, + 0.572 + ], + "angle": 0, + "content": "[71] Zeyu Yang, Hongye Yang, Zijie Pan, and Li Zhang. Realtime photorealistic dynamic scene representation and rendering with 4d gaussian splatting. 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.574, + 0.892, + 0.641 + ], + "angle": 0, + "content": "[72] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5752-5761, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.644, + 0.892, + 0.685 + ], + "angle": 0, + "content": "[73] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.687, + 0.892, + 0.756 + ], + "angle": 0, + "content": "[74] Fuqiang Zhao, Wei Yang, Jiakai Zhang, Pei Lin, Yingliang Zhang, Jingyi Yu, and Lan Xu. Humannerf: Efficiently generated human radiance field from sparse inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7743-7753, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.758, + 0.892, + 0.825 + ], + "angle": 0, + "content": "[75] C Lawrence Zitnick, Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High-quality video view interpolation using a layered representation. ACM transactions on graphics (TOG), 23(3):600-608, 2004. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.828, + 0.892, + 0.869 + ], + "angle": 0, + "content": "[76] Matthias Zwicker, Hanspeter Pfister, Jeroen Van Baar, and Markus Gross. Ewa volume splatting. In Proceedings Visualization, 2001. VIS'01., pages 29-538. IEEE, 2001. 4" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.869 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.518, + 0.957 + ], + "angle": 0, + "content": "20685" + } + ] +] \ No newline at end of file diff --git a/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/94b406dc-a25e-4b49-8259-ce68b53e5886_origin.pdf b/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/94b406dc-a25e-4b49-8259-ce68b53e5886_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0d41364e5f6858c3d2d95d12f94572963bb6964b --- /dev/null +++ b/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/94b406dc-a25e-4b49-8259-ce68b53e5886_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a7ae6e536f2d133f4dcc240e8fa629dc6179b084974e59412605f41c8604ba1 +size 3124234 diff --git a/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/full.md b/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/full.md new file mode 100644 index 0000000000000000000000000000000000000000..c43e02ef7fc34f57e651c6c683520923472a8554 --- /dev/null +++ b/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/full.md @@ -0,0 +1,376 @@ +# 3DGStream: On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos + +Jiakai Sun, Han Jiao, Guangyuan Li, Zhanjie Zhang, Lei Zhao*, Wei Xing* Zhejiang University {csjk, csjh, cslgy, cszzj, cszhl, WXING}@zju.edu.cn https://sjojak.github.io/3dgstream + +# Abstract + +Constructing photo-realistic Free-Viewpoint Videos (FVVs) of dynamic scenes from multi-view videos remains a challenging endeavor. Despite the remarkable advancements achieved by current neural rendering techniques, these methods generally require complete video sequences for offline training and are not capable of real-time rendering. To address these constraints, we introduce 3DGStream, a method designed for efficient FVV streaming of real-world dynamic scenes. Our method achieves fast on-the-fly per-frame reconstruction within 12 seconds and real-time rendering at 200 FPS. Specifically, we utilize 3D Gaussians (3DGs) to represent the scene. Instead of the naive approach of directly optimizing 3DGs per-frame, we employ a compact Neural Transformation Cache (NTC) to model the translations and rotations of 3DGs, markedly reducing the training time and storage required for each FVV frame. Furthermore, we propose an adaptive 3DG addition strategy to handle emerging objects in dynamic scenes. Experiments demonstrate that 3DGStream achieves competitive performance in terms of rendering speed, image quality, training time, and model storage when compared with state-of-the-art methods. + +# 1. Introduction + +Constructing Free-Viewpoint Videos (FVVs) from videos captured by a set of known-poses cameras from multiple views remains a frontier challenge within the domains of computer vision and graphics. The potential value and application prospects of this task in the VR/AR/XR domains have attracted much research. Traditional approaches predominantly fall into two categories: geometry-based methods that explicitly reconstruct dynamic graphics primitives [15, 17], and image-based methods that obtain new views through interpolation [7, 75]. However, these conventional methods struggle to handle real-world scenes charac + +![](images/1b3ede0eaefa4c22f620afdbbb3556bf275be31ca051bf8102762170180f27b2.jpg) +(a) I-NGP [40]: Per-frame training + +![](images/0918a36a140434114d446465e079766e2a98597dd841d72a4d8d38640297c495.jpg) +(b) HyperReel [1]: Offline training + +![](images/f51b4a235d5650998f799be73836b78465837a18a055f0a0fdf2bb9a7b1f3df4.jpg) +(c) StreamRF [29]: Online training + +![](images/eee2b6190201fb5877944c828dadfa88b4549390a1c435bd585f5a976170d356.jpg) +(d) Ours: Online training +Figure 1. Comparison on the flame steak scene of the N3DV dataset [31]. The training time, requisite storage, and PSNR are computed as averages over the whole video. Our method stands out by the ability of fast online training and real-time rendering, standing competitive in both model storage and image quality. + +terized by complex geometries and appearance. + +In recent years, Neural Radiance Fields (NeRFs) [36] has garnered significant attention due to its potent capabilities in synthesizing novel views as a 3D volumetric representation. A succession of NeRF-like works [19, 29, 31-33, 43-46, 48, 60, 67] further propelled advancements in constructing FVVs on dynamic scenes. Nonetheless, the vast majority of NeRF-like FVV construction methods encountered two primary limitations: (1) they typically necessitate complete video sequences for time-consuming offline training, meaning they can replay dynamic scenes but are unable to stream them, and (2) they generally fail to achieve real-time rendering, thereby hindering practical applications. + +Recently, Kerbl et al. [26] have achieved real-time radiance field rendering using 3D Gaussians (3DGs), thus enabling the instant synthesis of novel views in static scenes + +![](images/f3f37646edea7c3bfd3112b4a5f4973d55a975aa9c3e9700e92fb8305f9195d4.jpg) +Figure 2. Comparison of our method with other methods on the N3DV dataset [31]. $\square$ denotes training from scratch per frame, $\triangle$ represents offline training on complete video sequences, and $\bigcirc$ signifies online training on video streams. While achieving online training, our method reaches state-of-the-art performance in both rendering speed and overall training time. + +with just minutes of training. Inspired by this breakthrough, we propose 3DGStream, a method that utilizes 3DGs to construct Free-Viewpoint Videos (FVVs) of dynamic scenes. Specifically, we first train the initial 3DGs on the multi-view frames at timestep 0. Then, for each timestep $i$ , we use the 3DGs of previous timestep $i - 1$ as initialization and pass it to a two-stage pipeline. (1) In Stage 1, we train a Neural Transformation Cache (NTC) to model the transformations of 3DGs. (2) Then in the Stage 2, we use an adaptive 3DG addition strategy to handle emerging objects by spawning frame-specific additional 3DGs near these objects and optimize them along with periodic splitting and pruning. After the two-stage pipeline concludes, we use both the 3DGs transformed by the NTC and the additional 3DGs for rendering at the current timestep $i$ , with only the former carrying over for initialization of the subsequent timestep. This design significantly reduces the storage requirements for the FVV, as we only need to store the per-frame NTCs and frame-specific additions, rather than all 3DGs for each frame. + +3DGStream is capable of rendering photo-realistic FVVs at megapixel resolution in real-time, boasting exceptionally rapid per-frame training speeds and limited model storage requirements. As illustrated in Figs. 1 and 2, compared with static reconstruction methods that train from scratch per-frame and dynamic reconstruction methods that necessitate offline training across the complete video sequences, our approach excels in both training speed and rendering speed, maintaining a competitive edge in image quality and model storage. Furthermore, our method outperforms StreamRF [29], a state-of-the-art technique tackling the exactly same task, in all the relevant aspects. + +To summarize, our contributions include: + +- We propose 3DGStream, a method for on-the-fly construction of photo-realistic, real-time renderable FVV on video streams, eliminating the necessity for lengthy offline training on the entire video sequences. +- We utilize NTC for modeling the transformations of 3DGs, in conjunction with an adaptive 3DG addition strategy to tackle emerging objects within dynamic scenes. This combination permits meticulous manipulation of 3DGs, accommodating scene alterations with limited performance overhead. +- We conduct extensive experiments to demonstrate 3DGStream's competitive edge in rendering quality, training time, and requisite storage, as well as its superior rendering speed, compared to existing state-of-the-art dynamic scene reconstruction methods. + +# 2. Related Work + +# 2.1. Novel View Synthesis for Static Scenes + +Synthesizing novel views from a set of images of static scenes is a time-honored problem in the domains of computer vision and graphics. Traditional methods such as Lumigraph [8, 22] or Light-Field [10, 16, 28, 50] achieve new view synthesis through interpolation. In recent years, Neural Radiance Fields (NeRF) [36] has achieved photorealistic synthesizing results by representing the radiance field using a multi-layer perceptron (MLP). A series of subsequent works enhance NeRF's performance in various aspects, such as accelerating training speeds [12, 13, 20, 25, 40, 52], achieving real-time rendering [14, 21, 23, 47, 64, 72], and improving synthesis quality on challenging scenes [2-4, 35, 37, 56] or sparse inputs [11, 41, 53, 63, 66, 69, 73]. Since the vanilla NeRF employs costly volume rendering, necessitating neural network queries for rendering, subsequent approaches faced trade-offs in training time, rendering speed, model storage, image quality, and applicability. To address these challenges, Kerbl et al. [26] propose 3D Gaussian Splatting (3DG-S), which integrates of 3DGs with differentiable point-based rendering. 3DG-S enables real-time high-fidelity view synthesis in large-scale unbounded scenes after brief training periods with modest storage requirements. Inspired by this work, we extend its application to the task of constructing FVVs of dynamic scenes. Taking it a step further, we design a on-the-fly training framework to achieve efficient FVV streaming. + +# 2.2. Free-Viewpoint Videos of Dynamic Scenes + +Constructing FVVs from a set of videos of dynamic scenes is a more challenging and applicable task in the domains of computer vision and graphics. Earlier attempts to address this task pivoted around the construction of dynamic primitives [15, 17] or resorting to interpolation [7, 75]. With the + +![](images/55d1f9333c7c59cf1303ecb9ba739f032e21eebe7ba75e680000a776bbce7bb8.jpg) +Figure 3. Overview of 3DGStream. Given a set of multi-view video streams, 3DGStream aims to construct high-quality FVV stream of the captured dynamic scene on-the-fly. Initially, we optimize a set of 3DGs to represent the scene at timestep 0. For each subsequent timestep $i$ , we use the 3DGs from timestep $i - 1$ as an initialization and then engage in a two-stage training process: Stage 1: We train the Neural Transformation Cache (NTC) to model the translations and rotations of 3DGs. After training, the NTC transforms the 3DGs, preparing them for the next timestep and the next stage in the current timestep. Stage 2: We spawn frame-specific additional 3DGs at potential locations and optimize them along with periodic splitting and pruning. After the two-stage process concludes, both transformed and additional 3DGs are used to render at the current timestep $i$ , with only the transformed ones carried into the next timestep. + +success of NeRF-like methods in novel view synthesis for static scenes, a series of works [1, 9, 19, 29-34, 42, 44-46, 48, 51, 55, 57, 59, 61, 62, 68, 74] attempt to use NeRF for constructing FVVs in dynamic scenes. These works can typically be categorized into five types: prior-driven, flow-based, warp-based, those using spatio-temporal inputs, and per-frame training. + +Prior-driven methods [27, 30, 62, 68, 74] leverage parametric models or incorporate additional priors, such as skeletons, to bolster performance on the reconstruction of specific dynamic objects, e.g., humans. However, their application is limited and not generalizable to broader scenes. + +Flow-based methods [32, 33] primarily focus on constructing FVVs from monocular videos. By estimating the correspondence of 3D points in consecutive frames, they achieve impressive results. Nonetheless, the intrinsic ill-posedness of monocular reconstructions in intricate dynamic scenes frequently calls for supplementary priors like depth, optical flow, and motion segmentation masks. + +Warp-based methods [1, 42, 44, 46, 51, 55, 61] assume that the dynamics of the scene arise from the deformation of static structures. These methods warp the radiance field of each frame onto one or several canonical frames, achieving notable results. However, the strong assumptions they rely on often prevent them from handling topological variations. + +Methods that use spatio-temporal inputs [9, 19, 31, 45, 48, 57, 58] enhance radiance fields by adding a temporal dimension, enabling the querying of the radiance field using spatio-temporal coordinates. While these techniques showcase a remarkable ability to synthesize new viewpoints in dynamic scenes, the entangled scene parameters can constrain their adaptability for downstream applications. + +Per-frame training methods [29, 34, 59] adapt to changes in the scene online by leveraging per-frame training, a paradigm we have also adopted. To be specific, StreamRF [29] employs Plenoxels [20] for scene representation and achieves rapid on-the-fly training with minimal storage requirements through techniques like narrow band tuning and difference-based compression. ReRF [59] uses DVGO [52] for scene representation and optimize motion grid and residual grid frame by frame to model interframe discrepancies, enabling high-quality FVV streaming and rendering. Dynamic3DG [34] optimizes simplified 3DGs and integrates physically-based priors for high-quality novel view synthesis on dynamic scenes. + +Among the aforementioned works, only NeRF-Player [51], ReRF [59], StreamRF [29], and Dynamic3DG [34] are able to stream FVVs. NeRFPlayer achieves FVV streaming through a decomposition module and a feature streaming module, but it is only able to stream pre-trained models. ReRF and Dynamic3DG are limited to processing scenes with few objects and foreground mask, necessitating minute-level per-frame training times. StreamRF stands out by requiring only a few seconds for each frame's training to construct high-fidelity FVVs on challenging real-world dynamic scenes with compressed model storage. However, it falls short in rendering speed. Contrarily, our approach matches or surpasses StreamRF in training speed, model storage, and image quality, all while achieving real-time rendering at 200 FPS. + +# 2.3. Concurrent Works + +Except for Dynamic3DG, several concurrent works have extended 3DG-S to represent dynamic scenes. De + +formable3DG [70] employs an MLP to model the deformation of 3DGs, while [65] introduces a hexplane-based encoder to enhance the efficiency of deformation query. Meanwhile, [18, 71] lift 3DG to 4DG primitives for dynamic scene representation. However, these approaches are limited to offline reconstruction and lack streamable capabilities, whereas our work aims to achieve efficient streaming of FFVs with an online training paradigm. + +# 3. Background: 3D Gaussian Splitting + +3D Gaussian Splitting (3DG-S) [26] employs anisotropic 3D Gaussians as an explicit scene representation. Paired with a fast differentiable rasterizer, 3DGs achieves real-time novel view synthesis with only minutes of training. + +# 3.1. 3D Gaussians as Scene Representation + +A 3DG is defined by a covariance matrix $\Sigma$ centered at point (i.e., mean) $\mu$ : + +$$ +G (x; \mu , \Sigma) = e ^ {- \frac {1}{2} (x - \mu) ^ {T} \Sigma^ {- 1} (x - \mu)}. \tag {1} +$$ + +To ensure positive semi-definiteness during optimization, the covariance matrix $\Sigma$ is decomposed into a rotation matrix $R$ and a scaling matrix $S$ : + +$$ +\Sigma = R S S ^ {T} R ^ {T}. \tag {2} +$$ + +Rotation is conveniently represented by a unit quaternion, while scaling uses a 3D vector. Additionally, each 3DG contains a set of spherical harmonics (SH) coefficients of to represent view-dependent colors, along with an opacity value $\alpha$ , which is used in $\alpha$ -blending (Eq. (4)). + +# 3.2. Splitting for Differentiable Rasterization + +For novel view synthesis, 3DG-S [26] project 3DGs to 2D Gaussian (2DG) splats [76]: + +$$ +\Sigma^ {\prime} = J W \Sigma W ^ {T} J ^ {T}. \tag {3} +$$ + +Here, $\Sigma^{\prime}$ is the covariance matrix in camera coordinate. $J$ is the Jacobian of the affine approximation of the projective transformation, and $W$ is the viewing transformation matrix. By skipping the third row and third column of $\Sigma^{\prime}$ , we can derive a $2\times 2$ matrix denoted as $\Sigma_{2d}$ . Furthermore, projecting the 3DG's mean, $\mu$ , into the image space results in a 2D mean, $\mu_{2d}$ . Consequently, this allows us to define the 2DG in the image space as $G_{2d}(x;\mu_{2d},\Sigma_{2d})$ . + +Using $\Sigma^{\prime}$ , the color $C$ of a pixel can be computed by blending the $N$ ordered points overlapping the pixel: + +$$ +C = \sum_ {i \in N} c _ {i} \alpha_ {i} ^ {\prime} \prod_ {j = 1} ^ {i - 1} \left(1 - \alpha_ {j} ^ {\prime}\right). \tag {4} +$$ + +Here, $c_{i}$ denotes the view-dependent color of the $i$ -th 3DG. $\alpha_{i}^{\prime}$ is determined by multiplying the opacity $\alpha_{i}$ of the $i$ -th 3DG $G$ with the evaluation of the corresponding 2DG $G_{2d}$ . + +Leveraging a highly-optimized rasterization pipeline coupled with custom CUDA kernels, the training and rendering of 3DG-S are remarkably fast. For instance, for megapixel-scale real-world scenes, just a few minutes of optimization allows 3DGs to achieve photo-realistic visual quality and rendering speeds surpassing 100 FPS. + +# 4. Method + +3DGStream constructs photo-realistic FVV streams from multi-view video streams on-the-fly using a per-frame training paradigm. We initiate the process by training 3DGs [26] at timestep 0. For subsequent timesteps, we employ the previous timestep's 3DGs as an initialization and pass them to a two-stage pipeline. Firstly (Sec. 4.1), a Neural Transformation Cache (NTC) is trained to model the transformation for each 3DG. Once the training is finished, we transform the 3DGs and carry the transformed 3DGs to the next timestep. Secondly (Sec. 4.2), we employ an adaptive 3DG addition strategy to handle emerging objects. For each FVV frame, we render views at the current timestep using both the transformed 3DGs and additional 3DGs, while the latter are not passed to the next timestep. Note that we only need to train and store the parameters of the NTC and the additional 3DGs for each subsequent timestep, not all the 3DGs. We depict an overview of our approach in Fig. 3. + +# 4.1. Neural Transformation Cache + +For NTC, we seek a structure that is compact, efficient, and adaptive to model the transformations of 3DGs. Compactness is essential to reduce the model storage. Efficiency enhances training and inference speeds. Adaptivity ensures the model focuses more on dynamic regions. Additionally, it would be beneficial if the structure could consider certain priors of dynamic scenes [5, 24, 54], such as the tendency for neighboring parts of an object to have similar motion. + +Inspired by Neural Radiance Caching [39] and I-NGP [40], we employ multi-resolution hash encoding combined with a shallow fully-fused MLP [38] as the NTC. Specifically, following I-NGP, we use multi-resolution voxel grids to represent the scene. Voxel grids at each resolution are mapped to a hash table storing a $d$ -dimensional learnable feature vector. For a given 3D position $x \in \mathbb{R}^3$ , its hash encoding at resolution $l$ , denoted as $h(x;l) \in \mathbb{R}^d$ , is the linear interpolation of the feature vectors corresponding to the eight corners of the surrounding grid. Consequently, its multi-resolution hash encoding $h(x) = [h(x;0), h(x;1), \dots, h(x;L - 1)] \in \mathbb{R}^{Ld}$ , where $L$ represents the number of resolution levels. The multi-resolution hash encoding addresses all our requirements for the NTC: + +- Compactness: Hashing effectively reduces the storage space needed for encoding the whole scene. +- Efficiency: Hash table lookup operates in $O(1)$ , and is highly compatible with modern GPUs. + +- Adaptivity: Hash collisions occur in hash tables at finer resolutions, allowing regions with larger gradients—representing dynamic regions in our context—to drive the optimization. +- Priors: The combination of linear interpolation and the voxel-grid structure ensures the local smoothness of transformations. Additionally, the multi-resolution approach adeptly merges global and local information. + +Furthermore, to enhance the NTC's performance with minimal overhead, we utilize a shallow fully-fused MLP [38]. This maps the hash encoding to a 7-dimensional output: the first three dimensions indicate the translation of the 3DG; the remaining dimensions represent the rotation of the 3DG using quaternions. Given multi-resolution hash encoding coupled with MLP, our NTC is formalized as: + +$$ +d \mu , d q = M L P (h (\mu)), \tag {5} +$$ + +where $\mu$ denotes the mean of the input 3DG. We transform the 3DGs based on $d\mu$ and $dq$ . Specifically, the following parameters of the transformed 3DGs are given as: + +- Mean: $\mu' = \mu + d\mu$ , where $\mu'$ is the new mean and $+$ represents vector addition. +- Rotation: $q' = \text{norm}(q) \times \text{norm}(dq)$ , where $q'$ is the new rotation, $\times$ indicates quaternion multiplication and norm denotes normalization. +- SH Coefficients: Upon rotating the 3DG, the SH coefficients should also be adjusted to align with the rotation of the 3DG. Leveraging the rotation invariance of SH, we directly employ SH Rotation to update SHs. Please refer to the supplementary materials (Suppl.) for details. + +In Stage 1, we transform the 3DGs from the previous frame by NTC and then render with them. The parameters of the NTC is optimized by the loss between the rendered image and the ground truth. Following 3DG-S [26], the loss function is $L_{1}$ combined with a D-SSIM term: + +$$ +L = (1 - \lambda) L _ {1} + \lambda L _ {D - S S I M}, \tag {6} +$$ + +where $\lambda = 0.2$ in all our experiments. It should be noted that during the training process, the 3DGs from the previous frame remain frozen and do not undergo any updates. This implies that the input to the NTC remains consistent. + +Additionally, to ensure training stability, we initialize the NTC with warm-up parameters. The loss employed during the warm-up is defined as: + +$$ +L _ {\text {w a r m} - u p} = \left\| d \mu \right\| _ {1} - \cos^ {2} (\operatorname {n o r m} (d q), Q), \tag {7} +$$ + +where $Q$ is the identity quaternion. The first term uses the $L_{1}$ norm to ensure the estimated translation approaches zero, while the second term, leveraging cosine similarity, ensures the estimated rotation approximates no rotation. However, given the double-covering property of the unit quaternions, we use the square of the cosine similarity. For + +each scene, we execute the warm-up solely after the training at timestep 0, using noise-augmented means of the initial 3DGs as input. After 3000 iterations of training (roughly 20 seconds), the parameters are stored and used to initialize the NTCs for all the following timesteps. + +# 4.2. Adaptive 3DG Addition + +Relying solely on 3DGs transformations adequately cover a significant portion of real-world dynamic scenes, with translations effectively managing occlusions and disappearances in subsequent timesteps. However, this approach falters when faced with objects not present in the initial frame, such as transient objects like flames or smoke, and new persistent objects like the liquid poured out of a bottle. Since 3DG is an unstructured explicit representation, it's essential to add new 3DGs to model these emerging objects. Considering constraints related to model storage requirements and training complexities, it's not feasible to generate an extensive number of additional 3DGs nor allow them to be used in subsequent frames, as this would cause 3DGs to accumulate over time. This necessitates a strategy for swiftly generating a limited number of frame-specific 3DGs to model these emerging objects precisely and thereby enhance the completeness of the scene at the current timestep. + +Firstly, we need to ascertain the locations for the emerging objects. Inspired by 3DG-S [26], we recognized the view-space positional gradients of 3DGs as a key indicator. We observed that for emerging objects, the 3DGs in proximity exhibited large view-space positional gradients. This is attributed to the optimization attempting to 'masquerade' the emerging object by transforming the 3DGs. However, since we prevent the colors of the 3DGs from being updated in Stage 1, this attempt falls short. Nonetheless, they are still transformed to appropriate positions, with large view-space positional gradients. + +Based on the aforementioned observations, we deem it appropriate to introduce additional 3DGs around these high-gradient regions. Moreover, to exhaustively capture every potential location where new objects might emerge, we adopt an adaptive 3DG spawn strategy. Specifically, we track view-space positional gradient during the final training epoch of Stage 1. Once this stage concludes, we select 3DGs that have an average magnitude of view-space position gradients exceeding a relatively low threshold $\tau_{\text{grad}} = 0.00015$ . For each selected 3DG, the position of the additional 3DG is sampled from $X \sim \mathcal{N}(\mu, 2\Sigma)$ , where $\mu$ and $\Sigma$ is the mean and the covariance matrix of the selected 3DG. While we avoid assumptions about the other attributes of the additional 3DGs, improper initializations of SH coefficients and scaling vectors tend to result in an optimization preference for reducing opacity over adjusting these parameters. This causes additional 3DGs to quickly become transparent, thereby failing to capture the emerging objects. To mitigate + +![](images/52d98daa9bb69c47e43f3e12ed485e442b0635e4e088dcea92ccdaaf91986686.jpg) + +![](images/ae26b7747c376143137a582a93b2850c98b6cff2b532905bd821721976106b8b.jpg) +(a) I-NGP [40] + +![](images/bd062e4a5cf408db25ebf02c6f7531404be0b2225855137eb1501564c34fec85.jpg) + +![](images/fd03f12ece401bbc6c970708387398d4f14ea125baaa835db430a9c1f14401d3.jpg) +(b) HyperReel [1] + +![](images/a9d751f4253cc380e4540e6800e1d04ba16adc81ba059b71274d8e2e3968c70f.jpg) + +![](images/55703ea1220f0eac49eb846d6e1a479105004cf1203af013fcb7264ae33a8c60.jpg) +(c) StreamRF [29] +Figure 4. Qualitative comparisons on the discussion scene of the Meet Room dataset and the sear steak scene of the N3DV dataset. + +![](images/a81ba47ebcbf2aaa2aa7a9f5e1e45665b6700bfa8ef732ef0b140374e946c99a.jpg) + +![](images/08e78befab9c69bf2cce80314d4bf13b63e00c8785f420dbebb8eaadccbfa376.jpg) +(d) 3DGStream + +![](images/2bd76b79bc5ffc64bcc77a83db5e8fa3e0d8304f7009fe26e9ce2c50bde8ab37.jpg) + +![](images/89a217cf6df7540d2b61524a00535766caa0b6d2ca7e0c8eb2e34169c6e63d1b.jpg) +(e) Ground Truth + +
CategoryMethodPSNR†(dB)Storage↓(MB)Train↓(mins)Render†(FPS)Streamable
StaticPlenoxels [20]30.774106238.3
I-NGP [40]28.6248.21.32.9
3DG-S [26]32.0847.18.3390
OfflineDyNeRF [31]29.58†0.12600.02×
NeRFPlayer [51]30.6917.11.20.05
HexPlane [9]31.700.82.40.21×
K-Planes [48]31.631.00.80.15×
HyperReel [1]31.101.21.82.00×
MixVoxels [57]30.801.70.2716.7×
OnlineStreamRF [29]30.6817.7/31.4*0.258.3
Ours31.677.6/7.8*0.20215
+ +this issue, the SH coefficients and scaling vectors of these 3DGs are derived from the selected ones, with rotations set to the identity quaternion $\mathbf{q} = [1,0,0,0]$ and opacity initialized at 0.1. After spawning, the 3DGs undergo optimization utilizing the same loss function (Eq. (6)) as Stage 1. Note that only the parameters of the additional 3DGs are optimized, while those of the transformed 3DGs remain fixed. + +To guard against local minima and manage the number of additional 3DGs, we implement an adaptive 3DG quantity control strategy. Specifically, in Stage 2, we set a relatively high threshold, $\tau_{\alpha} = 0.01$ , for the opacity value. At the end of each training epoch, for 3DGs with view-space position gradients exceeding $\tau_{grad}$ , we spawn additional 3DGs nearby to address under-reconstructed regions. These additional 3DGs inherit their rotations and SH coefficients from the original 3DG, but their scaling is adjusted to $80\%$ of the original, mirroring the 'split' operation described by Kerbl et al. [26]. Subsequently, we discard any additional 3DGs with opacity values below $\tau_{\alpha}$ to suppress the growth in the quantity of 3DGs. + +Table 1. Quantitative comparison on the N3DV dataset. The training time, required storage and PSNR are averaged over the whole 300 frames for each scene. ${}^{ \dagger }$ DyNeRF [31] only report metrics on the flame salmon scene. *Considering the initial model. + +
MethodPSNR↑ (dB)Storage↓ (MB)Train↓ (mins)Render↑ (FPS)
Plenoxels [20]27.1510151410
I-NGP [40]28.1048.21.14.1
3DG-S [26]31.3121.12.6571
StreamRF [29]26.725.7/9.0*0.1710
Ours30.794.0/4.1*0.10288
+ +Table 2. Quantitative comparison on the Meet Room dataset. Note that the training time, required storage and PSNR are averaged over the whole 300 frames. *Considering the initial model. + +# 5. Experiments + +# 5.1. Datasets + +We conduct experiments on two real-world dynamic scene datasets: N3DV dataset [31] and Meet Room dataset [29]. + +N3DV dataset [31] is captured using a multi-view system of 21 cameras, comprises dynamic scenes recorded at a resolution of $2704 \times 2028$ and 30 FPS. Following previous works [9, 29, 31, 48, 51, 57], we downsample the videos by a factor of two and follow the training and validation camera split provided by [31]. + +Meet Room dataset [29] is captured using a 13-camera multi-view system, comprises dynamic scenes recorded at a resolution of $1280 \times 720$ and 30 FPS. Following [29], we utilize 13 views for training and reserved 1 for testing. + +# 5.2. Implementation + +We implement 3DGStream upon the codes of 3D Gaussian Splitting (3DG-S) [26], and implement the Neural Transformation Cache (NTC) using tiny-cuda-nn [38]. For the training of initial 3DGs, we fine-tune the learning rates on the N3DV dataset based on the default settings of 3DG-S, and apply them to the Meet Room dataset. For all scenes, + +![](images/d0d6fadb2b57332d5acb9dfad6d2677720664f253140eb705b21e072bacd049a.jpg) +Figure 5. Comparison of different approaches for modeling the transformation of 3DGs. Conducted on the second frame of the flame salmon video, utilizing identical initial 3DGs. + +![](images/e8290a6e386565d983f49d3106dee95b43297f1a82f85d16bd6caac628c70b0e.jpg) +Figure 6. Comparison of different approaches on the flame salmon scene. + +
VariantPSNR↑ (dB)#Additional 3DGs↓
Baseline28.390
Rnd. Spawn28.39971.9
w/o Quant. Ctrl.28.438710.8
Full Model28.42477.7
+ +Table 3. Ablation study of the Adaptive 3DG Addition strategy on the flame salmon scene. The metrics are averaged over the whole sequence. + +we train the NTC for 150 iterations in Stage 1. and train the additional 3DGs for 100 iterations in Stage 2. Please refer to Suppl. for more details. + +# 5.3. Comparisons + +Quantitative comparisons. Our quantitative analysis involves benchmarking 3DGStream on the N3DV dataset and + +![](images/32dae74192b7fd1ee5083dfdcf9c39c6bf2b758c5eef9cf1ce8771508d0a9dfa.jpg) + +![](images/929c859c4667c346e914af0d40aec71632d76f0090bcf438c4b9e516eb4ddbf4.jpg) +(a) Result of Stage 1 + +![](images/d3f16064cb27a3f787bbc3dd059602dc774e5454cc24a07a5c886dc52a45d911.jpg) + +![](images/89ead230eb52d15332e8e78864a044f4dedfbbb4f775193ede3b623a760103bd.jpg) +(b) Result of Stage 2 + +![](images/32a4544da6bed84849eb6edc553687d85f4361b2400e4297c8c6199ad4ae282c.jpg) + +![](images/cbf84c21826185045c50ad4b71ca180440b5be0a7b216b812950e8175b92799c.jpg) +(c) Ground Truth +Figure 7. Quantitative results of the ablation study conducted on the flame steak scene and the coffee martini scene. + +Meet Room dataset, comparing it with a range of representative methods. We take Plenoxels [20], I-NGP [40], and 3DG-S [26] as representatives of fast static scene reconstruction methods, training them from scratch for each frame. StreamRF [29], Dynamic3DG [34], and ReRF [60] are designed for online training in dynamic scenes. Owing to the limitations of Dynamic3DG and ReRF, which necessitate foreground masks and are confined to scenes with fewer objects, and their minute-level per-frame training times, we select StreamRF selected as the representative for online training methods due to its adaptability and training feasibility on the N3DV and MeetRoom datasets. To demonstrate 3DGStream's competitive image quality, we drew comparisons with the quantitative results reported for the N3DV dataset in the respective papers of DyNeRF [31], NeRFPlayer [51], HexPlane [9], K-Planes [48], HyperReel [1], and MixVoxels [57], all of which are methods for reconstructing dynamic scenes through offline training on entire video sequences. + +In Tab. 1, we present the averaged rendering speed, training time, required storage, and peak signal-to-noise ratio (PSNR) over all scenes of the N3DV dataset. For each scene, the latter three metrics are computed as averages over the whole 300 frames. Besides, we provide a breakdown of comparisons across all scenes within the N3DV dataset in the Suppl. To demonstrate the generality of our method, we conducted experiments on the MeetRoom dataset, as introduced by StreamRF [29], and performed a quantitative comparison against Plenoxels [20], I-NGP [40], 3DG-S [26], and StreamRF [29]. The results are presented in Tab. 2. As presented in Tabs. 1 and 2, our method demonstrates superiority through fast online training and real-time rendering, concurrently maintaining a competitive edge in terms of model storage and image quality. Furthermore, among the methods capable of streaming FVVs, our model requires the minimal model storage. + +Qualitative comparisons. While our approach primarily aims to enhance the efficiency of online FVV construction, as illustrated in Tabs. 1 and 2, it still achieves competitive + +
StepOverhead (ms)FPS
Render w/o NTC2.56390
+ Query NTC0.62
+ Transformation0.02
+ SH Rotation1.46
Total4.66215
+ +Table 4. Rendering profiling for the flame salmon scene at megapixel resolution. Note that flame salmon is the most time-consuming to render of all scenes in our experiments. + +image quality. In Fig. 4, we present a qualitative comparison with I-NGP [40], HyperReel [1], and StreamRF [29] across scenes on the N3DV dataset [31] and the Meet Room dataset [29], with a special emphasis on dynamic objects such as faces, hands, and tongs, as well as intricate objects like labels and statues. It is evident that our method faithfully captures the dynamics of the scene without sacrificing the ability to reconstruct intricate objects. Please refer to our project page for more video results. + +# 5.4. Evaluations + +Neural Transformation Cache. We utilize distinct approaches to model the transformations of 3DGs from the first to the second frame within the flame salmon video of the N3DV dataset to show the effectiveness of NTC. Fig. 5 shows that, without multi-resolution hash encoding $(w / o$ Hash enc.), the MLP faces challenges in modeling transformations effectively. Additionally, without the warm-up $(w / o$ Warm-up), it takes more iterations for convergence. Besides, even when compared with the direct optimization of the previous frame's 3DGs (Direct Opt.), NTC demonstrate on-par performance. In Fig. 6, We present the results of different approaches applied across the entire flame salmon video, excluding the first frame (i.e., Frame 0). $w / o$ Hash enc. and $w / o$ Warm-up. are not able to converge swiftly, resulting in accumulating errors as the sequence progresses. Direct Opt. yields the best outcomes but at the cost of inflated storage. Utilizing NTC, in contrast, delivers comparable results with substantially lower storage overhead by eliminating the need for saving all the 3DGs. + +Adaptive 3DG Addition. Tab. 3 presents the quantitative results of the ablation study conducted on the flame salmon scene, and more results are presented in Suppl. The base model without Stage 2, and a set of randomly spawned 3DGs (Rnd. Spawn) in equivalent quantities to our spawn strategy, both fail to capture emerging objects. The variant without our quantity control strategy ( $w/o$ Quant. Ctrl.) manages to model emerging objects but requires a significantly larger number of additional 3DGs. In contrast, our full model proficiently reconstructs emerging objects using a minimal addition of 3DGs. The ablation study illustrated in Fig. 7 qualitatively showcases the effect of the Adap + +tive 3DG Addition strategy, highlighting its ability to reconstruct the objects not present in the initial frame, such as coffee in a pot, a dog's tongue, and flames. + +Real-time Rendering. Following 3DG-S [26], we employ the SIBR framework [6] to measure the rendering speed. Once all resources required are loaded onto the GPU, the additional overhead of our approach is primarily the time taken to query the NTC and transform the 3DGs. As detailed in Tab. 4, our method benefits from the efficiency of the multi-resolution hash encoding and the fully-fused MLP [38], which facilitate rapid NTC query. Notably, the most time-consuming step is the SH Rotation. However, our experiments indicate that the SH rotation has a minimal impact on the reconstruction quality, which may be attributed to the 3DGs modeling view-dependent colors through alternative mechanisms (e.g., small 3DGs of varying colors surrounding the object) rather than SH coefficients. Nonetheless, we maintain SH rotation for theoretical soundness. + +# 6. Discussion + +The quality of 3DG-S [26] on the initial frame is crucial to 3DGStream. Therefore, we inherit the limitations of 3DG-S, such as high dependence on the initial point cloud. As illustrated in Fig. 7, there are obvious artifacts beyond the windows, attributable to COLMAP's [49] inability to reconstruct distant landscapes. Hence, our method stands to benefit directly from future enhancements to 3DG-S. Moreover, for efficient on-the-fly training, we limit the number of training iterations, which restricts modeling of drastic motion in Stage 1 and complex emerging objects in Stage 2. + +# 7. Conclusion + +We propose 3DGStream, an novel method for efficient Free-Viewpoint Video streaming. Based on 3DG-S [26], we utilizes an effective Neural Transformation Cache to capture the motion of objects. In addition, we propose an Adaptive 3DG Addition strategy to accurately model emerging objects in dynamic scenes. The two-stage pipeline of 3DGStream enables the online reconstruction of dynamic scenes in video streams. While ensuring photo-realistic image quality, 3DGStream achieves on-the-fly training ( $\sim$ 10s per-frame) and real-time rendering ( $\sim$ 200FPS) at megapixel resolution with moderate requisite storage. + +# 8. Acknowledgement + +This work was supported in part by Zhejiang Province Program (2022C01222, 2023C03199, 2023C03201), the National Program of China (62172365, 2021YFF0900604, 19ZDA197), Ningbo Science and Technology Plan Project (022Z167, 2023Z137), and MOE Frontier Science Center for Brain Science & Brain-Machine Integration (Zhejiang University). + +# References + +[1] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023. 1, 3, 6, 7, 8 +[2] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2 +[3] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5470-5479, 2022. +[4] Jonathan T. Barron, Ben Mildenhall, Dor Verbin, Pratul P. Srinivasan, and Peter Hedman. Zip-nerf: Anti-aliased grid-based neural radiance fields. ICCV, 2023. 2 +[5] Michael J Black and Paul Anandan. The robust estimation of multiple motions: Parametric and piecewise-smooth flow fields. Computer vision and image understanding, 63(1):75-104, 1996. 4 +[6] Sebastien Bonopera, Jerome Esnault, Siddhant Prakash, Simon Rodriguez, Theo Thonat, Mehdi Benadel, Gaurav Chaurasia, Julien Philip, and George Drettakis. sibr: A system for image based rendering, 2020. 8 +[7] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 1, 2 +[8] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In SIGGRAPH, pages 425-432, 2001. 2 +[9] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. CVPR, 2023. 3, 6, 7 +[10] Jin-Xiang Chai, Xin Tong, Shing-Chow Chan, and Heung-Yeung Shum. Plenoptic sampling. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 307-318, 2000. 2 +[11] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14124-14133, 2021. 2 +[12] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In European Conference on Computer Vision (ECCV), 2022. 2 +[13] Anpei Chen, Zexiang Xu, Xinyue Wei, Siyu Tang, Hao Su, and Andreas Geiger. Dictionary fields: Learning a neural basis decomposition. ACM Trans. Graph., 2023. 2 +[14] Zhiqin Chen, Thomas Funkhouser, Peter Hedman, and Andrea Tagliasacchi. Mobilenerf: Exploiting the polygon rasterization pipeline for efficient neural field rendering on mo + +bile architectures. In The Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2 +[15] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (TOG), 34(4):69, 2015. 1, 2 +[16] Abe Davis, Marc Levoy, and Fredo Durand. Unstructured light fields. Comput. Graph. Forum, 31(2pt1):305-314, 2012. 2 +[17] Mingsong Dou, Philip Davidson, Sean Ryan Fanello, Sameh Khamis, Adarsh Kowdle, Christoph Rhemann, Vladimir Tankovich, and Shahram Izadi. Motion2fusion: Real-time volumetric performance capture. ACM Trans. Graph., 36(6): 246:1-246:16, 2017. 1, 2 +[18] Yuanxing Duan, Fangyin Wei, Qiyu Dai, Yuhang He, Wenzheng Chen, and Baoquan Chen. 4d gaussian splatting: Towards efficient novel view synthesis for dynamic scenes. arXiv preprint arXiv:2402.03307, 2024. 4 +[19] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In SIGGRAPH Asia 2022 Conference Papers, 2022. 1, 3 +[20] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 2, 3, 6, 7 +[21] Stephan J. Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien Valentin. Fastnerf: High-fidelity neural rendering at 200fps. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14346-14355, 2021. 2 +[22] Steven J. Gortler, Radek Grzesczuk, Richard Szeliski, and Michael F. Cohen. The lumigraph. In Proceedings of the 23rd Annual Conference on Computer Graphics and Interactive Techniques, page 43-54, New York, NY, USA, 1996. Association for Computing Machinery. 2 +[23] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5855-5864, 2021. 2 +[24] Berthold KP Horn and Brian G Schunck. Determining optical flow. Artificial intelligence, 17(1-3):185-203, 1981. 4 +[25] Wenbo Hu, Yuling Wang, Lin Ma, Bangbang Yang, Lin Gao, Xiao Liu, and Yuewen Ma. Tri-miprf: Tri-mip representation for efficient anti-aliasing neural radiance fields. In ICCV, 2023. 2 +[26] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics, 42 (4), 2023. 1, 2, 4, 5, 6, 7, 8 +[27] Tobias Kirschstein, Shenhan Qian, Simon Giebenhain, Tim Walter, and Matthias Nießner. Nersemble: Multi-view radiance field reconstruction of human heads. arXiv preprint arXiv:2305.03027, 2023. 3 + +[28] Marc Levoy and Pat Hanrahan. Light field rendering. In SIGGRAPH, pages 31-42, 1996. 2 +[29] Lingzhi Li, Zhen Shen, Zhongshu Wang, Li Shen, and Ping Tan. Streaming radiance fields for 3d video synthesis. In NeurIPS, 2022. 1, 2, 3, 6, 7, 8 +[30] Ruilong Li, Julian Tanke, Minh Vo, Michael Zollhöfer, Jürgen Gall, Angjoo Kanazawa, and Christoph Lassner. Tava: Template-free animatable volumetric actors. In European Conference on Computer Vision, pages 419-436. Springer, 2022. 3 +[31] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 1, 2, 3, 6, 7, 8 +[32] Zhengqi Li, Simon Niklaus, Noah Snively, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6498-6508, 2021. 3 +[33] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 3 +[34] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In 3DV, 2024. 3, 7 +[35] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In CVPR, 2021. 2 +[36] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pages 405-421. Springer, 2020. 1, 2 +[37] Ben Mildenhall, Peter Hedman, Ricardo Martin-Brualla, Pratul P Srinivasan, and Jonathan T Barron. Nerf in the dark: High dynamic range view synthesis from noisy raw images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16190-16199, 2022. 2 +[38] Thomas Müller. tiny-cuda-nn, 2021. 4, 5, 6, 8 +[39] Thomas Müller, Fabrice Rousselle, Jan Novák, and Alexander Keller. Real-time neural radiance caching for path tracing. ACM Transactions on Graphics (TOG), 40(4):1-16, 2021. 4 +[40] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, 2022. 1, 2, 4, 6, 7, 8 +[41] Michael Niemeyer, Jonathan T Barron, Ben Mildenhall, Mehdi SM Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis + +from sparse inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5480-5490, 2022. 2 +[42] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5865-5874, 2021. 3 +[43] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 1 +[44] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M. Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. ACM Trans. Graph., 40(6), 2021. 3 +[45] Suntheon Park, Minjung Son, Seokhwan Jang, Young Chun Ahn, Ji-Yeon Kim, and Nahiyup Kang. Temporal interpolation is all you need for dynamic neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4212-4221, 2023. 3 +[46] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10318-10327, 2021. 1, 3 +[47] Christian Reiser, Rick Szeliski, Dor Verbin, Pratul Srinivasan, Ben Mildenhall, Andreas Geiger, Jon Barron, and Peter Hedman. Merf: Memory-efficient radiance fields for real-time view synthesis in unbounded scenes. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 2 +[48] Sara Fridovich-Keil and Giacomo Meanti, Frederik Rahbæk Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In CVPR, 2023. 1, 3, 6, 7 +[49] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 8 +[50] Heung-Yeung Shum and Li-Wei He. Rendering with concentric mosaics. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 299–306, 1999. 2 +[51] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 3, 6, 7 +[52] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5449-5459, 2022. 2, 3 +[53] Jiakai Sun, Zhanjie Zhang, Jiafu Chen, Guangyuan Li, Boyan Ji, Lei Zhao, and Wei Xing. Vgos: Voxel grid opti + +mization for view synthesis from sparse inputs. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence, IJCAI-23, pages 1414-1422. International Joint Conferences on Artificial Intelligence Organization, 2023. Main Track. 2 +[54] Carlo Tomasi and Takeo Kanade. Shape and motion from image streams under orthography: a factorization method. International journal of computer vision, 9:137-154, 1992. 4 +[55] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 12959-12970, 2021. 3 +[56] Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T. Barron, and Pratul P. Srinivasan. Ref-NeRF: Structured view-dependent appearance for neural radiance fields. CVPR, 2022. 2 +[57] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, Yafei Song, and Huaping Liu. Mixed neural voxels for fast multiview video synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19706-19716, 2023. 3, 6, 7 +[58] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022. 3 +[59] Liao Wang, Qiang Hu, Qihan He, Ziyu Wang, Jingyi Yu, Tinne Tuytelaars, Lan Xu, and Minye Wu. Neural residual radiance fields for streamably free-viewpoint videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 76-87, 2023. 3 +[60] Liao Wang, Qiang Hu, Qihan He, Ziyu Wang, Jingyi Yu, Tinne Tuytelaars, Lan Xu, and Minye Wu. Neural residual radiance fields for streamably free-viewpoint videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 76-87, 2023. 1, 7 +[61] Qianqian Wang, Yen-Yu Chang, Ruojin Cai, Zhengqi Li, Bharath Hariharan, Aleksander Holynski, and Noah Snavely. Tracking everything everywhere all at once. In International Conference on Computer Vision, 2023. 3 +[62] Chung-Yi Weng, Brian Curless, Pratul P Srinivasan, Jonathan T Barron, and Ira Kemelmacher-Shlizerman. Humanerf: Free-viewpoint rendering of moving people from monocular video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16210-16220, 2022. 3 +[63] Felix Wimbauer, Nan Yang, Christian Rupprecht, and Daniel Cremers. Behind the scenes: Density fields for single view reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9076-9086, 2023. 2 +[64] Suttisak Wizadwongsa, Pakkapon Phongthawee, Jiraphon Yenphraphai, and Supasorn Suwajanakorn. Nex: Real-time + +view synthesis with neural basis expansion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8534-8543, 2021. 2 +[65] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Wang Xinggang. 4d gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 4 +[66] Jamie Wynn and Daniyar Turmukhambetov. Diffusionerf: Regularizing neural radiance fields with denoising diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4180-4189, 2023. 2 +[67] Wenqi Xian, Jia-Bin Huang, Johannes Kopf, and Changil Kim. Space-time neural irradiance fields for free-viewpoint video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9421-9431, 2021. 1 +[68] Gengshan Yang, Minh Vo, Natalia Neverova, Deva Ramanan, Andrea Vedaldi, and Hanbyul Joo. Banmo: Building animatable 3d neural models from many casual videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2863-2873, 2022. 3 +[69] Jiawei Yang, Marco Pavone, and Yue Wang. Freenerf: Improving few-shot neural rendering with free frequency regularization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8254-8263, 2023. 2 +[70] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. arXiv preprint arXiv:2309.13101, 2023. 4 +[71] Zeyu Yang, Hongye Yang, Zijie Pan, and Li Zhang. Realtime photorealistic dynamic scene representation and rendering with 4d gaussian splatting. 2024. 4 +[72] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5752-5761, 2021. 2 +[73] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2 +[74] Fuqiang Zhao, Wei Yang, Jiakai Zhang, Pei Lin, Yingliang Zhang, Jingyi Yu, and Lan Xu. Humannerf: Efficiently generated human radiance field from sparse inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7743-7753, 2022. 3 +[75] C Lawrence Zitnick, Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High-quality video view interpolation using a layered representation. ACM transactions on graphics (TOG), 23(3):600-608, 2004. 1, 2 +[76] Matthias Zwicker, Hanspeter Pfister, Jeroen Van Baar, and Markus Gross. Ewa volume splatting. In Proceedings Visualization, 2001. VIS'01., pages 29-538. IEEE, 2001. 4 \ No newline at end of file diff --git a/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/images.zip b/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..9e9c383eb41d643fac0ed910d1181f7dd232b187 --- /dev/null +++ b/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2207be926f40c2e6eaffa80526be9b030bf12d8603670f9ff116878472466e7 +size 560306 diff --git a/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/layout.json b/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..7d099399cd2fd8e80a6f19586a4170a73e6af47e --- /dev/null +++ b/2024/3DGStream_ On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos/layout.json @@ -0,0 +1,10179 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 61, + 102, + 534, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 102, + 534, + 140 + ], + "spans": [ + { + "bbox": [ + 61, + 102, + 534, + 140 + ], + "type": "text", + "content": "3DGStream: On-the-Fly Training of 3D Gaussians for Efficient Streaming of Photo-Realistic Free-Viewpoint Videos" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 89, + 161, + 508, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 161, + 508, + 219 + ], + "spans": [ + { + "bbox": [ + 89, + 161, + 508, + 219 + ], + "type": "text", + "content": "Jiakai Sun, Han Jiao, Guangyuan Li, Zhanjie Zhang, Lei Zhao*, Wei Xing* Zhejiang University {csjk, csjh, cslgy, cszzj, cszhl, WXING}@zju.edu.cn https://sjojak.github.io/3dgstream" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "spans": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 270, + 290, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 270, + 290, + 534 + ], + "spans": [ + { + "bbox": [ + 46, + 270, + 290, + 534 + ], + "type": "text", + "content": "Constructing photo-realistic Free-Viewpoint Videos (FVVs) of dynamic scenes from multi-view videos remains a challenging endeavor. Despite the remarkable advancements achieved by current neural rendering techniques, these methods generally require complete video sequences for offline training and are not capable of real-time rendering. To address these constraints, we introduce 3DGStream, a method designed for efficient FVV streaming of real-world dynamic scenes. Our method achieves fast on-the-fly per-frame reconstruction within 12 seconds and real-time rendering at 200 FPS. Specifically, we utilize 3D Gaussians (3DGs) to represent the scene. Instead of the naive approach of directly optimizing 3DGs per-frame, we employ a compact Neural Transformation Cache (NTC) to model the translations and rotations of 3DGs, markedly reducing the training time and storage required for each FVV frame. Furthermore, we propose an adaptive 3DG addition strategy to handle emerging objects in dynamic scenes. Experiments demonstrate that 3DGStream achieves competitive performance in terms of rendering speed, image quality, training time, and model storage when compared with state-of-the-art methods." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 542, + 128, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 542, + 128, + 555 + ], + "spans": [ + { + "bbox": [ + 47, + 542, + 128, + 555 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 563, + 287, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 563, + 287, + 695 + ], + "spans": [ + { + "bbox": [ + 46, + 563, + 287, + 695 + ], + "type": "text", + "content": "Constructing Free-Viewpoint Videos (FVVs) from videos captured by a set of known-poses cameras from multiple views remains a frontier challenge within the domains of computer vision and graphics. The potential value and application prospects of this task in the VR/AR/XR domains have attracted much research. Traditional approaches predominantly fall into two categories: geometry-based methods that explicitly reconstruct dynamic graphics primitives [15, 17], and image-based methods that obtain new views through interpolation [7, 75]. However, these conventional methods struggle to handle real-world scenes charac" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 244, + 426, + 334 + ], + "blocks": [ + { + "bbox": [ + 307, + 244, + 426, + 334 + ], + "lines": [ + { + "bbox": [ + 307, + 244, + 426, + 334 + ], + "spans": [ + { + "bbox": [ + 307, + 244, + 426, + 334 + ], + "type": "image", + "image_path": "1b3ede0eaefa4c22f620afdbbb3556bf275be31ca051bf8102762170180f27b2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 335, + 423, + 344 + ], + "lines": [ + { + "bbox": [ + 309, + 335, + 423, + 344 + ], + "spans": [ + { + "bbox": [ + 309, + 335, + 423, + 344 + ], + "type": "text", + "content": "(a) I-NGP [40]: Per-frame training" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 427, + 244, + 545, + 334 + ], + "blocks": [ + { + "bbox": [ + 427, + 244, + 545, + 334 + ], + "lines": [ + { + "bbox": [ + 427, + 244, + 545, + 334 + ], + "spans": [ + { + "bbox": [ + 427, + 244, + 545, + 334 + ], + "type": "image", + "image_path": "0918a36a140434114d446465e079766e2a98597dd841d72a4d8d38640297c495.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 428, + 335, + 544, + 344 + ], + "lines": [ + { + "bbox": [ + 428, + 335, + 544, + 344 + ], + "spans": [ + { + "bbox": [ + 428, + 335, + 544, + 344 + ], + "type": "text", + "content": "(b) HyperReel [1]: Offline training" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 307, + 345, + 426, + 434 + ], + "blocks": [ + { + "bbox": [ + 307, + 345, + 426, + 434 + ], + "lines": [ + { + "bbox": [ + 307, + 345, + 426, + 434 + ], + "spans": [ + { + "bbox": [ + 307, + 345, + 426, + 434 + ], + "type": "image", + "image_path": "f51b4a235d5650998f799be73836b78465837a18a055f0a0fdf2bb9a7b1f3df4.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 435, + 424, + 445 + ], + "lines": [ + { + "bbox": [ + 308, + 435, + 424, + 445 + ], + "spans": [ + { + "bbox": [ + 308, + 435, + 424, + 445 + ], + "type": "text", + "content": "(c) StreamRF [29]: Online training" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 427, + 345, + 545, + 434 + ], + "blocks": [ + { + "bbox": [ + 427, + 345, + 545, + 434 + ], + "lines": [ + { + "bbox": [ + 427, + 345, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 427, + 345, + 545, + 434 + ], + "type": "image", + "image_path": "eee2b6190201fb5877944c828dadfa88b4549390a1c435bd585f5a976170d356.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 444, + 435, + 528, + 445 + ], + "lines": [ + { + "bbox": [ + 444, + 435, + 528, + 445 + ], + "spans": [ + { + "bbox": [ + 444, + 435, + 528, + 445 + ], + "type": "text", + "content": "(d) Ours: Online training" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 456, + 545, + 512 + ], + "lines": [ + { + "bbox": [ + 304, + 456, + 545, + 512 + ], + "spans": [ + { + "bbox": [ + 304, + 456, + 545, + 512 + ], + "type": "text", + "content": "Figure 1. Comparison on the flame steak scene of the N3DV dataset [31]. The training time, requisite storage, and PSNR are computed as averages over the whole video. Our method stands out by the ability of fast online training and real-time rendering, standing competitive in both model storage and image quality." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 520, + 498, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 520, + 498, + 533 + ], + "spans": [ + { + "bbox": [ + 306, + 520, + 498, + 533 + ], + "type": "text", + "content": "terized by complex geometries and appearance." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 533, + 546, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 546, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 546, + 676 + ], + "type": "text", + "content": "In recent years, Neural Radiance Fields (NeRFs) [36] has garnered significant attention due to its potent capabilities in synthesizing novel views as a 3D volumetric representation. A succession of NeRF-like works [19, 29, 31-33, 43-46, 48, 60, 67] further propelled advancements in constructing FVVs on dynamic scenes. Nonetheless, the vast majority of NeRF-like FVV construction methods encountered two primary limitations: (1) they typically necessitate complete video sequences for time-consuming offline training, meaning they can replay dynamic scenes but are unable to stream them, and (2) they generally fail to achieve real-time rendering, thereby hindering practical applications." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "content": "Recently, Kerbl et al. [26] have achieved real-time radiance field rendering using 3D Gaussians (3DGs), thus enabling the instant synthesis of novel views in static scenes" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 139, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 139, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 139, + 712 + ], + "type": "text", + "content": "*Corresponding authors." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20675" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 72, + 282, + 245 + ], + "blocks": [ + { + "bbox": [ + 50, + 72, + 282, + 245 + ], + "lines": [ + { + "bbox": [ + 50, + 72, + 282, + 245 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 282, + 245 + ], + "type": "image", + "image_path": "f3f37646edea7c3bfd3112b4a5f4973d55a975aa9c3e9700e92fb8305f9195d4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 256, + 287, + 322 + ], + "lines": [ + { + "bbox": [ + 46, + 256, + 287, + 322 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 287, + 322 + ], + "type": "text", + "content": "Figure 2. Comparison of our method with other methods on the N3DV dataset [31]. " + }, + { + "bbox": [ + 46, + 256, + 287, + 322 + ], + "type": "inline_equation", + "content": "\\square" + }, + { + "bbox": [ + 46, + 256, + 287, + 322 + ], + "type": "text", + "content": " denotes training from scratch per frame, " + }, + { + "bbox": [ + 46, + 256, + 287, + 322 + ], + "type": "inline_equation", + "content": "\\triangle" + }, + { + "bbox": [ + 46, + 256, + 287, + 322 + ], + "type": "text", + "content": " represents offline training on complete video sequences, and " + }, + { + "bbox": [ + 46, + 256, + 287, + 322 + ], + "type": "inline_equation", + "content": "\\bigcirc" + }, + { + "bbox": [ + 46, + 256, + 287, + 322 + ], + "type": "text", + "content": " signifies online training on video streams. While achieving online training, our method reaches state-of-the-art performance in both rendering speed and overall training time." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 330, + 287, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 330, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 330, + 287, + 568 + ], + "type": "text", + "content": "with just minutes of training. Inspired by this breakthrough, we propose 3DGStream, a method that utilizes 3DGs to construct Free-Viewpoint Videos (FVVs) of dynamic scenes. Specifically, we first train the initial 3DGs on the multi-view frames at timestep 0. Then, for each timestep " + }, + { + "bbox": [ + 46, + 330, + 287, + 568 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 330, + 287, + 568 + ], + "type": "text", + "content": ", we use the 3DGs of previous timestep " + }, + { + "bbox": [ + 46, + 330, + 287, + 568 + ], + "type": "inline_equation", + "content": "i - 1" + }, + { + "bbox": [ + 46, + 330, + 287, + 568 + ], + "type": "text", + "content": " as initialization and pass it to a two-stage pipeline. (1) In Stage 1, we train a Neural Transformation Cache (NTC) to model the transformations of 3DGs. (2) Then in the Stage 2, we use an adaptive 3DG addition strategy to handle emerging objects by spawning frame-specific additional 3DGs near these objects and optimize them along with periodic splitting and pruning. After the two-stage pipeline concludes, we use both the 3DGs transformed by the NTC and the additional 3DGs for rendering at the current timestep " + }, + { + "bbox": [ + 46, + 330, + 287, + 568 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 330, + 287, + 568 + ], + "type": "text", + "content": ", with only the former carrying over for initialization of the subsequent timestep. This design significantly reduces the storage requirements for the FVV, as we only need to store the per-frame NTCs and frame-specific additions, rather than all 3DGs for each frame." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 570, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 287, + 714 + ], + "type": "text", + "content": "3DGStream is capable of rendering photo-realistic FVVs at megapixel resolution in real-time, boasting exceptionally rapid per-frame training speeds and limited model storage requirements. As illustrated in Figs. 1 and 2, compared with static reconstruction methods that train from scratch per-frame and dynamic reconstruction methods that necessitate offline training across the complete video sequences, our approach excels in both training speed and rendering speed, maintaining a competitive edge in image quality and model storage. Furthermore, our method outperforms StreamRF [29], a state-of-the-art technique tackling the exactly same task, in all the relevant aspects." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 317, + 72, + 485, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 485, + 83 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 485, + 83 + ], + "type": "text", + "content": "To summarize, our contributions include:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 87, + 545, + 266 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 306, + 87, + 545, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 87, + 545, + 133 + ], + "spans": [ + { + "bbox": [ + 306, + 87, + 545, + 133 + ], + "type": "text", + "content": "- We propose 3DGStream, a method for on-the-fly construction of photo-realistic, real-time renderable FVV on video streams, eliminating the necessity for lengthy offline training on the entire video sequences." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 134, + 545, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 134, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 306, + 134, + 545, + 205 + ], + "type": "text", + "content": "- We utilize NTC for modeling the transformations of 3DGs, in conjunction with an adaptive 3DG addition strategy to tackle emerging objects within dynamic scenes. This combination permits meticulous manipulation of 3DGs, accommodating scene alterations with limited performance overhead." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 206, + 545, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 206, + 545, + 266 + ], + "spans": [ + { + "bbox": [ + 306, + 206, + 545, + 266 + ], + "type": "text", + "content": "- We conduct extensive experiments to demonstrate 3DGStream's competitive edge in rendering quality, training time, and requisite storage, as well as its superior rendering speed, compared to existing state-of-the-art dynamic scene reconstruction methods." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 278, + 392, + 291 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 278, + 392, + 291 + ], + "spans": [ + { + "bbox": [ + 306, + 278, + 392, + 291 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 298, + 507, + 311 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 298, + 507, + 311 + ], + "spans": [ + { + "bbox": [ + 306, + 298, + 507, + 311 + ], + "type": "text", + "content": "2.1. Novel View Synthesis for Static Scenes" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 316, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 545, + 628 + ], + "type": "text", + "content": "Synthesizing novel views from a set of images of static scenes is a time-honored problem in the domains of computer vision and graphics. Traditional methods such as Lumigraph [8, 22] or Light-Field [10, 16, 28, 50] achieve new view synthesis through interpolation. In recent years, Neural Radiance Fields (NeRF) [36] has achieved photorealistic synthesizing results by representing the radiance field using a multi-layer perceptron (MLP). A series of subsequent works enhance NeRF's performance in various aspects, such as accelerating training speeds [12, 13, 20, 25, 40, 52], achieving real-time rendering [14, 21, 23, 47, 64, 72], and improving synthesis quality on challenging scenes [2-4, 35, 37, 56] or sparse inputs [11, 41, 53, 63, 66, 69, 73]. Since the vanilla NeRF employs costly volume rendering, necessitating neural network queries for rendering, subsequent approaches faced trade-offs in training time, rendering speed, model storage, image quality, and applicability. To address these challenges, Kerbl et al. [26] propose 3D Gaussian Splatting (3DG-S), which integrates of 3DGs with differentiable point-based rendering. 3DG-S enables real-time high-fidelity view synthesis in large-scale unbounded scenes after brief training periods with modest storage requirements. Inspired by this work, we extend its application to the task of constructing FVVs of dynamic scenes. Taking it a step further, we design a on-the-fly training framework to achieve efficient FVV streaming." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 635, + 525, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 635, + 525, + 647 + ], + "spans": [ + { + "bbox": [ + 306, + 635, + 525, + 647 + ], + "type": "text", + "content": "2.2. Free-Viewpoint Videos of Dynamic Scenes" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": "Constructing FVVs from a set of videos of dynamic scenes is a more challenging and applicable task in the domains of computer vision and graphics. Earlier attempts to address this task pivoted around the construction of dynamic primitives [15, 17] or resorting to interpolation [7, 75]. With the" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20676" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 69, + 547, + 229 + ], + "blocks": [ + { + "bbox": [ + 48, + 69, + 547, + 229 + ], + "lines": [ + { + "bbox": [ + 48, + 69, + 547, + 229 + ], + "spans": [ + { + "bbox": [ + 48, + 69, + 547, + 229 + ], + "type": "image", + "image_path": "55d1f9333c7c59cf1303ecb9ba739f032e21eebe7ba75e680000a776bbce7bb8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 237, + 547, + 316 + ], + "lines": [ + { + "bbox": [ + 46, + 237, + 547, + 316 + ], + "spans": [ + { + "bbox": [ + 46, + 237, + 547, + 316 + ], + "type": "text", + "content": "Figure 3. Overview of 3DGStream. Given a set of multi-view video streams, 3DGStream aims to construct high-quality FVV stream of the captured dynamic scene on-the-fly. Initially, we optimize a set of 3DGs to represent the scene at timestep 0. For each subsequent timestep " + }, + { + "bbox": [ + 46, + 237, + 547, + 316 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 237, + 547, + 316 + ], + "type": "text", + "content": ", we use the 3DGs from timestep " + }, + { + "bbox": [ + 46, + 237, + 547, + 316 + ], + "type": "inline_equation", + "content": "i - 1" + }, + { + "bbox": [ + 46, + 237, + 547, + 316 + ], + "type": "text", + "content": " as an initialization and then engage in a two-stage training process: Stage 1: We train the Neural Transformation Cache (NTC) to model the translations and rotations of 3DGs. After training, the NTC transforms the 3DGs, preparing them for the next timestep and the next stage in the current timestep. Stage 2: We spawn frame-specific additional 3DGs at potential locations and optimize them along with periodic splitting and pruning. After the two-stage process concludes, both transformed and additional 3DGs are used to render at the current timestep " + }, + { + "bbox": [ + 46, + 237, + 547, + 316 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 237, + 547, + 316 + ], + "type": "text", + "content": ", with only the transformed ones carried into the next timestep." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 321, + 288, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 321, + 288, + 405 + ], + "spans": [ + { + "bbox": [ + 46, + 321, + 288, + 405 + ], + "type": "text", + "content": "success of NeRF-like methods in novel view synthesis for static scenes, a series of works [1, 9, 19, 29-34, 42, 44-46, 48, 51, 55, 57, 59, 61, 62, 68, 74] attempt to use NeRF for constructing FVVs in dynamic scenes. These works can typically be categorized into five types: prior-driven, flow-based, warp-based, those using spatio-temporal inputs, and per-frame training." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 407, + 287, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 407, + 287, + 467 + ], + "spans": [ + { + "bbox": [ + 47, + 407, + 287, + 467 + ], + "type": "text", + "content": "Prior-driven methods [27, 30, 62, 68, 74] leverage parametric models or incorporate additional priors, such as skeletons, to bolster performance on the reconstruction of specific dynamic objects, e.g., humans. However, their application is limited and not generalizable to broader scenes." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 469, + 287, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 469, + 287, + 553 + ], + "spans": [ + { + "bbox": [ + 46, + 469, + 287, + 553 + ], + "type": "text", + "content": "Flow-based methods [32, 33] primarily focus on constructing FVVs from monocular videos. By estimating the correspondence of 3D points in consecutive frames, they achieve impressive results. Nonetheless, the intrinsic ill-posedness of monocular reconstructions in intricate dynamic scenes frequently calls for supplementary priors like depth, optical flow, and motion segmentation masks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 555, + 288, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 555, + 288, + 628 + ], + "spans": [ + { + "bbox": [ + 46, + 555, + 288, + 628 + ], + "type": "text", + "content": "Warp-based methods [1, 42, 44, 46, 51, 55, 61] assume that the dynamics of the scene arise from the deformation of static structures. These methods warp the radiance field of each frame onto one or several canonical frames, achieving notable results. However, the strong assumptions they rely on often prevent them from handling topological variations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 630, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 287, + 715 + ], + "type": "text", + "content": "Methods that use spatio-temporal inputs [9, 19, 31, 45, 48, 57, 58] enhance radiance fields by adding a temporal dimension, enabling the querying of the radiance field using spatio-temporal coordinates. While these techniques showcase a remarkable ability to synthesize new viewpoints in dynamic scenes, the entangled scene parameters can constrain their adaptability for downstream applications." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 321, + 545, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 321, + 545, + 476 + ], + "spans": [ + { + "bbox": [ + 304, + 321, + 545, + 476 + ], + "type": "text", + "content": "Per-frame training methods [29, 34, 59] adapt to changes in the scene online by leveraging per-frame training, a paradigm we have also adopted. To be specific, StreamRF [29] employs Plenoxels [20] for scene representation and achieves rapid on-the-fly training with minimal storage requirements through techniques like narrow band tuning and difference-based compression. ReRF [59] uses DVGO [52] for scene representation and optimize motion grid and residual grid frame by frame to model interframe discrepancies, enabling high-quality FVV streaming and rendering. Dynamic3DG [34] optimizes simplified 3DGs and integrates physically-based priors for high-quality novel view synthesis on dynamic scenes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 479, + 546, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 479, + 546, + 658 + ], + "spans": [ + { + "bbox": [ + 304, + 479, + 546, + 658 + ], + "type": "text", + "content": "Among the aforementioned works, only NeRF-Player [51], ReRF [59], StreamRF [29], and Dynamic3DG [34] are able to stream FVVs. NeRFPlayer achieves FVV streaming through a decomposition module and a feature streaming module, but it is only able to stream pre-trained models. ReRF and Dynamic3DG are limited to processing scenes with few objects and foreground mask, necessitating minute-level per-frame training times. StreamRF stands out by requiring only a few seconds for each frame's training to construct high-fidelity FVVs on challenging real-world dynamic scenes with compressed model storage. However, it falls short in rendering speed. Contrarily, our approach matches or surpasses StreamRF in training speed, model storage, and image quality, all while achieving real-time rendering at 200 FPS." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 670, + 417, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 417, + 681 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 417, + 681 + ], + "type": "text", + "content": "2.3. Concurrent Works" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "Except for Dynamic3DG, several concurrent works have extended 3DG-S to represent dynamic scenes. De" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20677" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "type": "text", + "content": "formable3DG [70] employs an MLP to model the deformation of 3DGs, while [65] introduces a hexplane-based encoder to enhance the efficiency of deformation query. Meanwhile, [18, 71] lift 3DG to 4DG primitives for dynamic scene representation. However, these approaches are limited to offline reconstruction and lack streamable capabilities, whereas our work aims to achieve efficient streaming of FFVs with an online training paradigm." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 172, + 249, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 172, + 249, + 186 + ], + "spans": [ + { + "bbox": [ + 47, + 172, + 249, + 186 + ], + "type": "text", + "content": "3. Background: 3D Gaussian Splitting" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 192, + 287, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 192, + 287, + 240 + ], + "spans": [ + { + "bbox": [ + 46, + 192, + 287, + 240 + ], + "type": "text", + "content": "3D Gaussian Splitting (3DG-S) [26] employs anisotropic 3D Gaussians as an explicit scene representation. Paired with a fast differentiable rasterizer, 3DGs achieves real-time novel view synthesis with only minutes of training." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 246, + 249, + 259 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 246, + 249, + 259 + ], + "spans": [ + { + "bbox": [ + 47, + 246, + 249, + 259 + ], + "type": "text", + "content": "3.1. 3D Gaussians as Scene Representation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 265, + 287, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 265, + 287, + 289 + ], + "spans": [ + { + "bbox": [ + 46, + 265, + 287, + 289 + ], + "type": "text", + "content": "A 3DG is defined by a covariance matrix " + }, + { + "bbox": [ + 46, + 265, + 287, + 289 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 46, + 265, + 287, + 289 + ], + "type": "text", + "content": " centered at point (i.e., mean) " + }, + { + "bbox": [ + 46, + 265, + 287, + 289 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 46, + 265, + 287, + 289 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 94, + 293, + 287, + 310 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 293, + 287, + 310 + ], + "spans": [ + { + "bbox": [ + 94, + 293, + 287, + 310 + ], + "type": "interline_equation", + "content": "G (x; \\mu , \\Sigma) = e ^ {- \\frac {1}{2} (x - \\mu) ^ {T} \\Sigma^ {- 1} (x - \\mu)}. \\tag {1}", + "image_path": "ca9894344e066b0ffb7143494c81f22c3f9f4af0cf86c65f080026b280458574.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 316, + 287, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 316, + 287, + 352 + ], + "spans": [ + { + "bbox": [ + 46, + 316, + 287, + 352 + ], + "type": "text", + "content": "To ensure positive semi-definiteness during optimization, the covariance matrix " + }, + { + "bbox": [ + 46, + 316, + 287, + 352 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 46, + 316, + 287, + 352 + ], + "type": "text", + "content": " is decomposed into a rotation matrix " + }, + { + "bbox": [ + 46, + 316, + 287, + 352 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 46, + 316, + 287, + 352 + ], + "type": "text", + "content": " and a scaling matrix " + }, + { + "bbox": [ + 46, + 316, + 287, + 352 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 46, + 316, + 287, + 352 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 357, + 287, + 371 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 357, + 287, + 371 + ], + "spans": [ + { + "bbox": [ + 132, + 357, + 287, + 371 + ], + "type": "interline_equation", + "content": "\\Sigma = R S S ^ {T} R ^ {T}. \\tag {2}", + "image_path": "18fbaec7b13e9370b735bcd82d954a44941edbc90f46452e44c4f361916e7822.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 378, + 287, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 378, + 287, + 438 + ], + "spans": [ + { + "bbox": [ + 46, + 378, + 287, + 438 + ], + "type": "text", + "content": "Rotation is conveniently represented by a unit quaternion, while scaling uses a 3D vector. Additionally, each 3DG contains a set of spherical harmonics (SH) coefficients of to represent view-dependent colors, along with an opacity value " + }, + { + "bbox": [ + 46, + 378, + 287, + 438 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 378, + 287, + 438 + ], + "type": "text", + "content": ", which is used in " + }, + { + "bbox": [ + 46, + 378, + 287, + 438 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 378, + 287, + 438 + ], + "type": "text", + "content": "-blending (Eq. (4))." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 444, + 262, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 444, + 262, + 456 + ], + "spans": [ + { + "bbox": [ + 47, + 444, + 262, + 456 + ], + "type": "text", + "content": "3.2. Splitting for Differentiable Rasterization" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 462, + 287, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 462, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 46, + 462, + 287, + 487 + ], + "type": "text", + "content": "For novel view synthesis, 3DG-S [26] project 3DGs to 2D Gaussian (2DG) splats [76]:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 124, + 491, + 287, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 491, + 287, + 506 + ], + "spans": [ + { + "bbox": [ + 124, + 491, + 287, + 506 + ], + "type": "interline_equation", + "content": "\\Sigma^ {\\prime} = J W \\Sigma W ^ {T} J ^ {T}. \\tag {3}", + "image_path": "4f925d9101cd78799e23e28e6bc295cfb20e63a208dfcda6e7b7393c8d209d2c.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "spans": [ + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "inline_equation", + "content": "\\Sigma^{\\prime}" + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "text", + "content": " is the covariance matrix in camera coordinate. " + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "text", + "content": " is the Jacobian of the affine approximation of the projective transformation, and " + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "text", + "content": " is the viewing transformation matrix. By skipping the third row and third column of " + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "inline_equation", + "content": "\\Sigma^{\\prime}" + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "text", + "content": ", we can derive a " + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "inline_equation", + "content": "2\\times 2" + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "text", + "content": " matrix denoted as " + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "inline_equation", + "content": "\\Sigma_{2d}" + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "text", + "content": ". Furthermore, projecting the 3DG's mean, " + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "text", + "content": ", into the image space results in a 2D mean, " + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "inline_equation", + "content": "\\mu_{2d}" + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "text", + "content": ". Consequently, this allows us to define the 2DG in the image space as " + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "inline_equation", + "content": "G_{2d}(x;\\mu_{2d},\\Sigma_{2d})" + }, + { + "bbox": [ + 46, + 512, + 287, + 608 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 609, + 287, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 609, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 46, + 609, + 287, + 633 + ], + "type": "text", + "content": "Using " + }, + { + "bbox": [ + 46, + 609, + 287, + 633 + ], + "type": "inline_equation", + "content": "\\Sigma^{\\prime}" + }, + { + "bbox": [ + 46, + 609, + 287, + 633 + ], + "type": "text", + "content": ", the color " + }, + { + "bbox": [ + 46, + 609, + 287, + 633 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 46, + 609, + 287, + 633 + ], + "type": "text", + "content": " of a pixel can be computed by blending the " + }, + { + "bbox": [ + 46, + 609, + 287, + 633 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 609, + 287, + 633 + ], + "type": "text", + "content": " ordered points overlapping the pixel:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 110, + 639, + 287, + 671 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 639, + 287, + 671 + ], + "spans": [ + { + "bbox": [ + 110, + 639, + 287, + 671 + ], + "type": "interline_equation", + "content": "C = \\sum_ {i \\in N} c _ {i} \\alpha_ {i} ^ {\\prime} \\prod_ {j = 1} ^ {i - 1} \\left(1 - \\alpha_ {j} ^ {\\prime}\\right). \\tag {4}", + "image_path": "22cd575bc8f5faa4b1aa9cfb349ec725c7d6b13253d89602a223c64b34e7918e.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "c_{i}" + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": " denotes the view-dependent color of the " + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": "-th 3DG. " + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\alpha_{i}^{\\prime}" + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": " is determined by multiplying the opacity " + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\alpha_{i}" + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": " of the " + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": "-th 3DG " + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": " with the evaluation of the corresponding 2DG " + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "G_{2d}" + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "Leveraging a highly-optimized rasterization pipeline coupled with custom CUDA kernels, the training and rendering of 3DG-S are remarkably fast. For instance, for megapixel-scale real-world scenes, just a few minutes of optimization allows 3DGs to achieve photo-realistic visual quality and rendering speeds surpassing 100 FPS." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 154, + 361, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 154, + 361, + 166 + ], + "spans": [ + { + "bbox": [ + 306, + 154, + 361, + 166 + ], + "type": "text", + "content": "4. Method" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 173, + 545, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 173, + 545, + 378 + ], + "spans": [ + { + "bbox": [ + 304, + 173, + 545, + 378 + ], + "type": "text", + "content": "3DGStream constructs photo-realistic FVV streams from multi-view video streams on-the-fly using a per-frame training paradigm. We initiate the process by training 3DGs [26] at timestep 0. For subsequent timesteps, we employ the previous timestep's 3DGs as an initialization and pass them to a two-stage pipeline. Firstly (Sec. 4.1), a Neural Transformation Cache (NTC) is trained to model the transformation for each 3DG. Once the training is finished, we transform the 3DGs and carry the transformed 3DGs to the next timestep. Secondly (Sec. 4.2), we employ an adaptive 3DG addition strategy to handle emerging objects. For each FVV frame, we render views at the current timestep using both the transformed 3DGs and additional 3DGs, while the latter are not passed to the next timestep. Note that we only need to train and store the parameters of the NTC and the additional 3DGs for each subsequent timestep, not all the 3DGs. We depict an overview of our approach in Fig. 3." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 384, + 469, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 384, + 469, + 396 + ], + "spans": [ + { + "bbox": [ + 305, + 384, + 469, + 396 + ], + "type": "text", + "content": "4.1. Neural Transformation Cache" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 403, + 545, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 403, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 304, + 403, + 545, + 498 + ], + "type": "text", + "content": "For NTC, we seek a structure that is compact, efficient, and adaptive to model the transformations of 3DGs. Compactness is essential to reduce the model storage. Efficiency enhances training and inference speeds. Adaptivity ensures the model focuses more on dynamic regions. Additionally, it would be beneficial if the structure could consider certain priors of dynamic scenes [5, 24, 54], such as the tendency for neighboring parts of an object to have similar motion." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "text", + "content": "Inspired by Neural Radiance Caching [39] and I-NGP [40], we employ multi-resolution hash encoding combined with a shallow fully-fused MLP [38] as the NTC. Specifically, following I-NGP, we use multi-resolution voxel grids to represent the scene. Voxel grids at each resolution are mapped to a hash table storing a " + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "text", + "content": "-dimensional learnable feature vector. For a given 3D position " + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "text", + "content": ", its hash encoding at resolution " + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "text", + "content": ", denoted as " + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "inline_equation", + "content": "h(x;l) \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "text", + "content": ", is the linear interpolation of the feature vectors corresponding to the eight corners of the surrounding grid. Consequently, its multi-resolution hash encoding " + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "inline_equation", + "content": "h(x) = [h(x;0), h(x;1), \\dots, h(x;L - 1)] \\in \\mathbb{R}^{Ld}" + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 498, + 545, + 666 + ], + "type": "text", + "content": " represents the number of resolution levels. The multi-resolution hash encoding addresses all our requirements for the NTC:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 666, + 544, + 712 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 306, + 666, + 544, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 666, + 544, + 689 + ], + "spans": [ + { + "bbox": [ + 306, + 666, + 544, + 689 + ], + "type": "text", + "content": "- Compactness: Hashing effectively reduces the storage space needed for encoding the whole scene." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 689, + 544, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 544, + 712 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 544, + 712 + ], + "type": "text", + "content": "- Efficiency: Hash table lookup operates in " + }, + { + "bbox": [ + 306, + 689, + 544, + 712 + ], + "type": "inline_equation", + "content": "O(1)" + }, + { + "bbox": [ + 306, + 689, + 544, + 712 + ], + "type": "text", + "content": ", and is highly compatible with modern GPUs." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20678" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 286, + 167 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 47, + 72, + 286, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 286, + 120 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 286, + 120 + ], + "type": "text", + "content": "- Adaptivity: Hash collisions occur in hash tables at finer resolutions, allowing regions with larger gradients—representing dynamic regions in our context—to drive the optimization." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 120, + 286, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 120, + 286, + 167 + ], + "spans": [ + { + "bbox": [ + 47, + 120, + 286, + 167 + ], + "type": "text", + "content": "- Priors: The combination of linear interpolation and the voxel-grid structure ensures the local smoothness of transformations. Additionally, the multi-resolution approach adeptly merges global and local information." + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 168, + 286, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 168, + 286, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 168, + 286, + 251 + ], + "type": "text", + "content": "Furthermore, to enhance the NTC's performance with minimal overhead, we utilize a shallow fully-fused MLP [38]. This maps the hash encoding to a 7-dimensional output: the first three dimensions indicate the translation of the 3DG; the remaining dimensions represent the rotation of the 3DG using quaternions. Given multi-resolution hash encoding coupled with MLP, our NTC is formalized as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 118, + 260, + 286, + 274 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 260, + 286, + 274 + ], + "spans": [ + { + "bbox": [ + 118, + 260, + 286, + 274 + ], + "type": "interline_equation", + "content": "d \\mu , d q = M L P (h (\\mu)), \\tag {5}", + "image_path": "1c947a2fef8fdab5b44cfb5ae28ef02e1c97e9af0c2eff13859e256797ba35dc.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 282, + 286, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 282, + 286, + 318 + ], + "spans": [ + { + "bbox": [ + 46, + 282, + 286, + 318 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 282, + 286, + 318 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 46, + 282, + 286, + 318 + ], + "type": "text", + "content": " denotes the mean of the input 3DG. We transform the 3DGs based on " + }, + { + "bbox": [ + 46, + 282, + 286, + 318 + ], + "type": "inline_equation", + "content": "d\\mu" + }, + { + "bbox": [ + 46, + 282, + 286, + 318 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 282, + 286, + 318 + ], + "type": "inline_equation", + "content": "dq" + }, + { + "bbox": [ + 46, + 282, + 286, + 318 + ], + "type": "text", + "content": ". Specifically, the following parameters of the transformed 3DGs are given as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 318, + 286, + 437 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 47, + 318, + 286, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 318, + 286, + 341 + ], + "spans": [ + { + "bbox": [ + 47, + 318, + 286, + 341 + ], + "type": "text", + "content": "- Mean: " + }, + { + "bbox": [ + 47, + 318, + 286, + 341 + ], + "type": "inline_equation", + "content": "\\mu' = \\mu + d\\mu" + }, + { + "bbox": [ + 47, + 318, + 286, + 341 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 318, + 286, + 341 + ], + "type": "inline_equation", + "content": "\\mu'" + }, + { + "bbox": [ + 47, + 318, + 286, + 341 + ], + "type": "text", + "content": " is the new mean and " + }, + { + "bbox": [ + 47, + 318, + 286, + 341 + ], + "type": "inline_equation", + "content": "+" + }, + { + "bbox": [ + 47, + 318, + 286, + 341 + ], + "type": "text", + "content": " represents vector addition." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 342, + 286, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 342, + 286, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 342, + 286, + 376 + ], + "type": "text", + "content": "- Rotation: " + }, + { + "bbox": [ + 47, + 342, + 286, + 376 + ], + "type": "inline_equation", + "content": "q' = \\text{norm}(q) \\times \\text{norm}(dq)" + }, + { + "bbox": [ + 47, + 342, + 286, + 376 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 342, + 286, + 376 + ], + "type": "inline_equation", + "content": "q'" + }, + { + "bbox": [ + 47, + 342, + 286, + 376 + ], + "type": "text", + "content": " is the new rotation, " + }, + { + "bbox": [ + 47, + 342, + 286, + 376 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 47, + 342, + 286, + 376 + ], + "type": "text", + "content": " indicates quaternion multiplication and norm denotes normalization." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 378, + 286, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 378, + 286, + 437 + ], + "spans": [ + { + "bbox": [ + 47, + 378, + 286, + 437 + ], + "type": "text", + "content": "- SH Coefficients: Upon rotating the 3DG, the SH coefficients should also be adjusted to align with the rotation of the 3DG. Leveraging the rotation invariance of SH, we directly employ SH Rotation to update SHs. Please refer to the supplementary materials (Suppl.) for details." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 437, + 286, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 437, + 286, + 497 + ], + "spans": [ + { + "bbox": [ + 46, + 437, + 286, + 497 + ], + "type": "text", + "content": "In Stage 1, we transform the 3DGs from the previous frame by NTC and then render with them. The parameters of the NTC is optimized by the loss between the rendered image and the ground truth. Following 3DG-S [26], the loss function is " + }, + { + "bbox": [ + 46, + 437, + 286, + 497 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 46, + 437, + 286, + 497 + ], + "type": "text", + "content": " combined with a D-SSIM term:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 102, + 506, + 286, + 518 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 506, + 286, + 518 + ], + "spans": [ + { + "bbox": [ + 102, + 506, + 286, + 518 + ], + "type": "interline_equation", + "content": "L = (1 - \\lambda) L _ {1} + \\lambda L _ {D - S S I M}, \\tag {6}", + "image_path": "c62a15de499304243a8a75ff185f29e3867226ec6eacab510f19079e141d2057.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 527, + 286, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 527, + 286, + 575 + ], + "spans": [ + { + "bbox": [ + 46, + 527, + 286, + 575 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 527, + 286, + 575 + ], + "type": "inline_equation", + "content": "\\lambda = 0.2" + }, + { + "bbox": [ + 46, + 527, + 286, + 575 + ], + "type": "text", + "content": " in all our experiments. It should be noted that during the training process, the 3DGs from the previous frame remain frozen and do not undergo any updates. This implies that the input to the NTC remains consistent." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 575, + 286, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 575, + 286, + 611 + ], + "spans": [ + { + "bbox": [ + 46, + 575, + 286, + 611 + ], + "type": "text", + "content": "Additionally, to ensure training stability, we initialize the NTC with warm-up parameters. The loss employed during the warm-up is defined as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 76, + 619, + 286, + 633 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 619, + 286, + 633 + ], + "spans": [ + { + "bbox": [ + 76, + 619, + 286, + 633 + ], + "type": "interline_equation", + "content": "L _ {\\text {w a r m} - u p} = \\left\\| d \\mu \\right\\| _ {1} - \\cos^ {2} (\\operatorname {n o r m} (d q), Q), \\tag {7}", + "image_path": "838fc8fbae119938c5f31cb10f55120b8315d509715ecf8fc49975bf53988b34.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "type": "text", + "content": " is the identity quaternion. The first term uses the " + }, + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "type": "text", + "content": " norm to ensure the estimated translation approaches zero, while the second term, leveraging cosine similarity, ensures the estimated rotation approximates no rotation. However, given the double-covering property of the unit quaternions, we use the square of the cosine similarity. For" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "content": "each scene, we execute the warm-up solely after the training at timestep 0, using noise-augmented means of the initial 3DGs as input. After 3000 iterations of training (roughly 20 seconds), the parameters are stored and used to initialize the NTCs for all the following timesteps." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 142, + 440, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 142, + 440, + 155 + ], + "spans": [ + { + "bbox": [ + 305, + 142, + 440, + 155 + ], + "type": "text", + "content": "4.2. Adaptive 3DG Addition" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 161, + 545, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 161, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 304, + 161, + 545, + 365 + ], + "type": "text", + "content": "Relying solely on 3DGs transformations adequately cover a significant portion of real-world dynamic scenes, with translations effectively managing occlusions and disappearances in subsequent timesteps. However, this approach falters when faced with objects not present in the initial frame, such as transient objects like flames or smoke, and new persistent objects like the liquid poured out of a bottle. Since 3DG is an unstructured explicit representation, it's essential to add new 3DGs to model these emerging objects. Considering constraints related to model storage requirements and training complexities, it's not feasible to generate an extensive number of additional 3DGs nor allow them to be used in subsequent frames, as this would cause 3DGs to accumulate over time. This necessitates a strategy for swiftly generating a limited number of frame-specific 3DGs to model these emerging objects precisely and thereby enhance the completeness of the scene at the current timestep." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 365, + 545, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 365, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 304, + 365, + 545, + 497 + ], + "type": "text", + "content": "Firstly, we need to ascertain the locations for the emerging objects. Inspired by 3DG-S [26], we recognized the view-space positional gradients of 3DGs as a key indicator. We observed that for emerging objects, the 3DGs in proximity exhibited large view-space positional gradients. This is attributed to the optimization attempting to 'masquerade' the emerging object by transforming the 3DGs. However, since we prevent the colors of the 3DGs from being updated in Stage 1, this attempt falls short. Nonetheless, they are still transformed to appropriate positions, with large view-space positional gradients." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": "Based on the aforementioned observations, we deem it appropriate to introduce additional 3DGs around these high-gradient regions. Moreover, to exhaustively capture every potential location where new objects might emerge, we adopt an adaptive 3DG spawn strategy. Specifically, we track view-space positional gradient during the final training epoch of Stage 1. Once this stage concludes, we select 3DGs that have an average magnitude of view-space position gradients exceeding a relatively low threshold " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\tau_{\\text{grad}} = 0.00015" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": ". For each selected 3DG, the position of the additional 3DG is sampled from " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "X \\sim \\mathcal{N}(\\mu, 2\\Sigma)" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": " is the mean and the covariance matrix of the selected 3DG. While we avoid assumptions about the other attributes of the additional 3DGs, improper initializations of SH coefficients and scaling vectors tend to result in an optimization preference for reducing opacity over adjusting these parameters. This causes additional 3DGs to quickly become transparent, thereby failing to capture the emerging objects. To mitigate" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20679" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 146, + 151 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 146, + 151 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 146, + 151 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 146, + 151 + ], + "type": "image", + "image_path": "52d98daa9bb69c47e43f3e12ed485e442b0635e4e088dcea92ccdaaf91986686.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 49, + 152, + 145, + 249 + ], + "blocks": [ + { + "bbox": [ + 49, + 152, + 145, + 249 + ], + "lines": [ + { + "bbox": [ + 49, + 152, + 145, + 249 + ], + "spans": [ + { + "bbox": [ + 49, + 152, + 145, + 249 + ], + "type": "image", + "image_path": "ae26b7747c376143137a582a93b2850c98b6cff2b532905bd821721976106b8b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 73, + 251, + 122, + 261 + ], + "lines": [ + { + "bbox": [ + 73, + 251, + 122, + 261 + ], + "spans": [ + { + "bbox": [ + 73, + 251, + 122, + 261 + ], + "type": "text", + "content": "(a) I-NGP [40]" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 148, + 70, + 246, + 151 + ], + "blocks": [ + { + "bbox": [ + 148, + 70, + 246, + 151 + ], + "lines": [ + { + "bbox": [ + 148, + 70, + 246, + 151 + ], + "spans": [ + { + "bbox": [ + 148, + 70, + 246, + 151 + ], + "type": "image", + "image_path": "bd062e4a5cf408db25ebf02c6f7531404be0b2225855137eb1501564c34fec85.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 148, + 152, + 246, + 249 + ], + "blocks": [ + { + "bbox": [ + 148, + 152, + 246, + 249 + ], + "lines": [ + { + "bbox": [ + 148, + 152, + 246, + 249 + ], + "spans": [ + { + "bbox": [ + 148, + 152, + 246, + 249 + ], + "type": "image", + "image_path": "fd03f12ece401bbc6c970708387398d4f14ea125baaa835db430a9c1f14401d3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 168, + 251, + 227, + 261 + ], + "lines": [ + { + "bbox": [ + 168, + 251, + 227, + 261 + ], + "spans": [ + { + "bbox": [ + 168, + 251, + 227, + 261 + ], + "type": "text", + "content": "(b) HyperReel [1]" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 248, + 70, + 345, + 151 + ], + "blocks": [ + { + "bbox": [ + 248, + 70, + 345, + 151 + ], + "lines": [ + { + "bbox": [ + 248, + 70, + 345, + 151 + ], + "spans": [ + { + "bbox": [ + 248, + 70, + 345, + 151 + ], + "type": "image", + "image_path": "a9d751f4253cc380e4540e6800e1d04ba16adc81ba059b71274d8e2e3968c70f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 248, + 152, + 345, + 249 + ], + "blocks": [ + { + "bbox": [ + 248, + 152, + 345, + 249 + ], + "lines": [ + { + "bbox": [ + 248, + 152, + 345, + 249 + ], + "spans": [ + { + "bbox": [ + 248, + 152, + 345, + 249 + ], + "type": "image", + "image_path": "55703ea1220f0eac49eb846d6e1a479105004cf1203af013fcb7264ae33a8c60.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 268, + 251, + 326, + 261 + ], + "lines": [ + { + "bbox": [ + 268, + 251, + 326, + 261 + ], + "spans": [ + { + "bbox": [ + 268, + 251, + 326, + 261 + ], + "type": "text", + "content": "(c) StreamRF [29]" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 56, + 270, + 535, + 281 + ], + "lines": [ + { + "bbox": [ + 56, + 270, + 535, + 281 + ], + "spans": [ + { + "bbox": [ + 56, + 270, + 535, + 281 + ], + "type": "text", + "content": "Figure 4. Qualitative comparisons on the discussion scene of the Meet Room dataset and the sear steak scene of the N3DV dataset." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 347, + 70, + 446, + 151 + ], + "blocks": [ + { + "bbox": [ + 347, + 70, + 446, + 151 + ], + "lines": [ + { + "bbox": [ + 347, + 70, + 446, + 151 + ], + "spans": [ + { + "bbox": [ + 347, + 70, + 446, + 151 + ], + "type": "image", + "image_path": "a81ba47ebcbf2aaa2aa7a9f5e1e45665b6700bfa8ef732ef0b140374e946c99a.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 347, + 152, + 445, + 249 + ], + "blocks": [ + { + "bbox": [ + 347, + 152, + 445, + 249 + ], + "lines": [ + { + "bbox": [ + 347, + 152, + 445, + 249 + ], + "spans": [ + { + "bbox": [ + 347, + 152, + 445, + 249 + ], + "type": "image", + "image_path": "08e78befab9c69bf2cce80314d4bf13b63e00c8785f420dbebb8eaadccbfa376.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 371, + 251, + 422, + 260 + ], + "lines": [ + { + "bbox": [ + 371, + 251, + 422, + 260 + ], + "spans": [ + { + "bbox": [ + 371, + 251, + 422, + 260 + ], + "type": "text", + "content": "(d) 3DGStream" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 447, + 70, + 545, + 151 + ], + "blocks": [ + { + "bbox": [ + 447, + 70, + 545, + 151 + ], + "lines": [ + { + "bbox": [ + 447, + 70, + 545, + 151 + ], + "spans": [ + { + "bbox": [ + 447, + 70, + 545, + 151 + ], + "type": "image", + "image_path": "2bd76b79bc5ffc64bcc77a83db5e8fa3e0d8304f7009fe26e9ce2c50bde8ab37.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 447, + 152, + 545, + 249 + ], + "blocks": [ + { + "bbox": [ + 447, + 152, + 545, + 249 + ], + "lines": [ + { + "bbox": [ + 447, + 152, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 447, + 152, + 545, + 249 + ], + "type": "image", + "image_path": "89a217cf6df7540d2b61524a00535766caa0b6d2ca7e0c8eb2e34169c6e63d1b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 468, + 251, + 524, + 260 + ], + "lines": [ + { + "bbox": [ + 468, + 251, + 524, + 260 + ], + "spans": [ + { + "bbox": [ + 468, + 251, + 524, + 260 + ], + "type": "text", + "content": "(e) Ground Truth" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 48, + 286, + 286, + 406 + ], + "blocks": [ + { + "bbox": [ + 48, + 286, + 286, + 406 + ], + "lines": [ + { + "bbox": [ + 48, + 286, + 286, + 406 + ], + "spans": [ + { + "bbox": [ + 48, + 286, + 286, + 406 + ], + "type": "table", + "html": "
CategoryMethodPSNR†(dB)Storage↓(MB)Train↓(mins)Render†(FPS)Streamable
StaticPlenoxels [20]30.774106238.3
I-NGP [40]28.6248.21.32.9
3DG-S [26]32.0847.18.3390
OfflineDyNeRF [31]29.58†0.12600.02×
NeRFPlayer [51]30.6917.11.20.05
HexPlane [9]31.700.82.40.21×
K-Planes [48]31.631.00.80.15×
HyperReel [1]31.101.21.82.00×
MixVoxels [57]30.801.70.2716.7×
OnlineStreamRF [29]30.6817.7/31.4*0.258.3
Ours31.677.6/7.8*0.20215
", + "image_path": "0108cbb951f65f09206c0d7ddc125f4f5bf4a8c605bda25510074c5c5f4dffa0.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 46, + 472, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 472, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 472, + 287, + 555 + ], + "type": "text", + "content": "this issue, the SH coefficients and scaling vectors of these 3DGs are derived from the selected ones, with rotations set to the identity quaternion " + }, + { + "bbox": [ + 46, + 472, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{q} = [1,0,0,0]" + }, + { + "bbox": [ + 46, + 472, + 287, + 555 + ], + "type": "text", + "content": " and opacity initialized at 0.1. After spawning, the 3DGs undergo optimization utilizing the same loss function (Eq. (6)) as Stage 1. Note that only the parameters of the additional 3DGs are optimized, while those of the transformed 3DGs remain fixed." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 46, + 558, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 287, + 715 + ], + "type": "text", + "content": "To guard against local minima and manage the number of additional 3DGs, we implement an adaptive 3DG quantity control strategy. Specifically, in Stage 2, we set a relatively high threshold, " + }, + { + "bbox": [ + 46, + 558, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\tau_{\\alpha} = 0.01" + }, + { + "bbox": [ + 46, + 558, + 287, + 715 + ], + "type": "text", + "content": ", for the opacity value. At the end of each training epoch, for 3DGs with view-space position gradients exceeding " + }, + { + "bbox": [ + 46, + 558, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\tau_{grad}" + }, + { + "bbox": [ + 46, + 558, + 287, + 715 + ], + "type": "text", + "content": ", we spawn additional 3DGs nearby to address under-reconstructed regions. These additional 3DGs inherit their rotations and SH coefficients from the original 3DG, but their scaling is adjusted to " + }, + { + "bbox": [ + 46, + 558, + 287, + 715 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 46, + 558, + 287, + 715 + ], + "type": "text", + "content": " of the original, mirroring the 'split' operation described by Kerbl et al. [26]. Subsequently, we discard any additional 3DGs with opacity values below " + }, + { + "bbox": [ + 46, + 558, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\tau_{\\alpha}" + }, + { + "bbox": [ + 46, + 558, + 287, + 715 + ], + "type": "text", + "content": " to suppress the growth in the quantity of 3DGs." + } + ] + } + ], + "index": 19 + }, + { + "type": "table", + "bbox": [ + 307, + 286, + 550, + 388 + ], + "blocks": [ + { + "bbox": [ + 46, + 414, + 287, + 459 + ], + "lines": [ + { + "bbox": [ + 46, + 414, + 287, + 459 + ], + "spans": [ + { + "bbox": [ + 46, + 414, + 287, + 459 + ], + "type": "text", + "content": "Table 1. Quantitative comparison on the N3DV dataset. The training time, required storage and PSNR are averaged over the whole 300 frames for each scene. " + }, + { + "bbox": [ + 46, + 414, + 287, + 459 + ], + "type": "inline_equation", + "content": "{}^{ \\dagger }" + }, + { + "bbox": [ + 46, + 414, + 287, + 459 + ], + "type": "text", + "content": " DyNeRF [31] only report metrics on the flame salmon scene. *Considering the initial model." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 286, + 550, + 388 + ], + "lines": [ + { + "bbox": [ + 307, + 286, + 550, + 388 + ], + "spans": [ + { + "bbox": [ + 307, + 286, + 550, + 388 + ], + "type": "table", + "html": "
MethodPSNR↑ (dB)Storage↓ (MB)Train↓ (mins)Render↑ (FPS)
Plenoxels [20]27.1510151410
I-NGP [40]28.1048.21.14.1
3DG-S [26]31.3121.12.6571
StreamRF [29]26.725.7/9.0*0.1710
Ours30.794.0/4.1*0.10288
", + "image_path": "c69731b2a882ffd31136f0f1f56c76ed0a191acce903b3c90d595b11d0e14d2f.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "table_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 396, + 545, + 430 + ], + "lines": [ + { + "bbox": [ + 305, + 396, + 545, + 430 + ], + "spans": [ + { + "bbox": [ + 305, + 396, + 545, + 430 + ], + "type": "text", + "content": "Table 2. Quantitative comparison on the Meet Room dataset. Note that the training time, required storage and PSNR are averaged over the whole 300 frames. *Considering the initial model." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 434, + 387, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 434, + 387, + 449 + ], + "spans": [ + { + "bbox": [ + 306, + 434, + 387, + 449 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 454, + 368, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 454, + 368, + 466 + ], + "spans": [ + { + "bbox": [ + 306, + 454, + 368, + 466 + ], + "type": "text", + "content": "5.1. Datasets" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 472, + 545, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 472, + 545, + 496 + ], + "spans": [ + { + "bbox": [ + 305, + 472, + 545, + 496 + ], + "type": "text", + "content": "We conduct experiments on two real-world dynamic scene datasets: N3DV dataset [31] and Meet Room dataset [29]." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 497, + 545, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 497, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 304, + 497, + 545, + 568 + ], + "type": "text", + "content": "N3DV dataset [31] is captured using a multi-view system of 21 cameras, comprises dynamic scenes recorded at a resolution of " + }, + { + "bbox": [ + 304, + 497, + 545, + 568 + ], + "type": "inline_equation", + "content": "2704 \\times 2028" + }, + { + "bbox": [ + 304, + 497, + 545, + 568 + ], + "type": "text", + "content": " and 30 FPS. Following previous works [9, 29, 31, 48, 51, 57], we downsample the videos by a factor of two and follow the training and validation camera split provided by [31]." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 569, + 545, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 569, + 545, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 569, + 545, + 617 + ], + "type": "text", + "content": "Meet Room dataset [29] is captured using a 13-camera multi-view system, comprises dynamic scenes recorded at a resolution of " + }, + { + "bbox": [ + 304, + 569, + 545, + 617 + ], + "type": "inline_equation", + "content": "1280 \\times 720" + }, + { + "bbox": [ + 304, + 569, + 545, + 617 + ], + "type": "text", + "content": " and 30 FPS. Following [29], we utilize 13 views for training and reserved 1 for testing." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 306, + 623, + 403, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 623, + 403, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 623, + 403, + 635 + ], + "type": "text", + "content": "5.2. Implementation" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 304, + 641, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 545, + 714 + ], + "type": "text", + "content": "We implement 3DGStream upon the codes of 3D Gaussian Splitting (3DG-S) [26], and implement the Neural Transformation Cache (NTC) using tiny-cuda-nn [38]. For the training of initial 3DGs, we fine-tune the learning rates on the N3DV dataset based on the default settings of 3DG-S, and apply them to the Meet Room dataset. For all scenes," + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20680" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 72, + 285, + 245 + ], + "blocks": [ + { + "bbox": [ + 50, + 72, + 285, + 245 + ], + "lines": [ + { + "bbox": [ + 50, + 72, + 285, + 245 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 285, + 245 + ], + "type": "image", + "image_path": "d0d6fadb2b57332d5acb9dfad6d2677720664f253140eb705b21e072bacd049a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 257, + 288, + 291 + ], + "lines": [ + { + "bbox": [ + 46, + 257, + 288, + 291 + ], + "spans": [ + { + "bbox": [ + 46, + 257, + 288, + 291 + ], + "type": "text", + "content": "Figure 5. Comparison of different approaches for modeling the transformation of 3DGs. Conducted on the second frame of the flame salmon video, utilizing identical initial 3DGs." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 50, + 297, + 284, + 470 + ], + "blocks": [ + { + "bbox": [ + 50, + 297, + 284, + 470 + ], + "lines": [ + { + "bbox": [ + 50, + 297, + 284, + 470 + ], + "spans": [ + { + "bbox": [ + 50, + 297, + 284, + 470 + ], + "type": "image", + "image_path": "e8290a6e386565d983f49d3106dee95b43297f1a82f85d16bd6caac628c70b0e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 481, + 287, + 503 + ], + "lines": [ + { + "bbox": [ + 47, + 481, + 287, + 503 + ], + "spans": [ + { + "bbox": [ + 47, + 481, + 287, + 503 + ], + "type": "text", + "content": "Figure 6. Comparison of different approaches on the flame salmon scene." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 56, + 508, + 284, + 582 + ], + "blocks": [ + { + "bbox": [ + 56, + 508, + 284, + 582 + ], + "lines": [ + { + "bbox": [ + 56, + 508, + 284, + 582 + ], + "spans": [ + { + "bbox": [ + 56, + 508, + 284, + 582 + ], + "type": "table", + "html": "
VariantPSNR↑ (dB)#Additional 3DGs↓
Baseline28.390
Rnd. Spawn28.39971.9
w/o Quant. Ctrl.28.438710.8
Full Model28.42477.7
", + "image_path": "2d227832986811198cc67295c19fdcc46e0f93fc6149dbbb143b71710ea42980.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 590, + 287, + 623 + ], + "lines": [ + { + "bbox": [ + 46, + 590, + 287, + 623 + ], + "spans": [ + { + "bbox": [ + 46, + 590, + 287, + 623 + ], + "type": "text", + "content": "Table 3. Ablation study of the Adaptive 3DG Addition strategy on the flame salmon scene. The metrics are averaged over the whole sequence." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 624, + 288, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 624, + 288, + 661 + ], + "spans": [ + { + "bbox": [ + 46, + 624, + 288, + 661 + ], + "type": "text", + "content": "we train the NTC for 150 iterations in Stage 1. and train the additional 3DGs for 100 iterations in Stage 2. Please refer to Suppl. for more details." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 670, + 133, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 133, + 683 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 133, + 683 + ], + "type": "text", + "content": "5.3. Comparisons" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 689, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 288, + 713 + ], + "type": "text", + "content": "Quantitative comparisons. Our quantitative analysis involves benchmarking 3DGStream on the N3DV dataset and" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 307, + 70, + 386, + 129 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 386, + 129 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 386, + 129 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 386, + 129 + ], + "type": "image", + "image_path": "32dae74192b7fd1ee5083dfdcf9c39c6bf2b758c5eef9cf1ce8771508d0a9dfa.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 307, + 130, + 386, + 189 + ], + "blocks": [ + { + "bbox": [ + 307, + 130, + 386, + 189 + ], + "lines": [ + { + "bbox": [ + 307, + 130, + 386, + 189 + ], + "spans": [ + { + "bbox": [ + 307, + 130, + 386, + 189 + ], + "type": "image", + "image_path": "929c859c4667c346e914af0d40aec71632d76f0090bcf438c4b9e516eb4ddbf4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 190, + 380, + 201 + ], + "lines": [ + { + "bbox": [ + 313, + 190, + 380, + 201 + ], + "spans": [ + { + "bbox": [ + 313, + 190, + 380, + 201 + ], + "type": "text", + "content": "(a) Result of Stage 1" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 387, + 70, + 465, + 129 + ], + "blocks": [ + { + "bbox": [ + 387, + 70, + 465, + 129 + ], + "lines": [ + { + "bbox": [ + 387, + 70, + 465, + 129 + ], + "spans": [ + { + "bbox": [ + 387, + 70, + 465, + 129 + ], + "type": "image", + "image_path": "d3f16064cb27a3f787bbc3dd059602dc774e5454cc24a07a5c886dc52a45d911.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 387, + 130, + 465, + 189 + ], + "blocks": [ + { + "bbox": [ + 387, + 130, + 465, + 189 + ], + "lines": [ + { + "bbox": [ + 387, + 130, + 465, + 189 + ], + "spans": [ + { + "bbox": [ + 387, + 130, + 465, + 189 + ], + "type": "image", + "image_path": "89ead230eb52d15332e8e78864a044f4dedfbbb4f775193ede3b623a760103bd.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 392, + 190, + 460, + 201 + ], + "lines": [ + { + "bbox": [ + 392, + 190, + 460, + 201 + ], + "spans": [ + { + "bbox": [ + 392, + 190, + 460, + 201 + ], + "type": "text", + "content": "(b) Result of Stage 2" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 467, + 70, + 546, + 129 + ], + "blocks": [ + { + "bbox": [ + 467, + 70, + 546, + 129 + ], + "lines": [ + { + "bbox": [ + 467, + 70, + 546, + 129 + ], + "spans": [ + { + "bbox": [ + 467, + 70, + 546, + 129 + ], + "type": "image", + "image_path": "32a4544da6bed84849eb6edc553687d85f4361b2400e4297c8c6199ad4ae282c.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 467, + 130, + 545, + 189 + ], + "blocks": [ + { + "bbox": [ + 467, + 130, + 545, + 189 + ], + "lines": [ + { + "bbox": [ + 467, + 130, + 545, + 189 + ], + "spans": [ + { + "bbox": [ + 467, + 130, + 545, + 189 + ], + "type": "image", + "image_path": "cbf84c21826185045c50ad4b71ca180440b5be0a7b216b812950e8175b92799c.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 477, + 191, + 534, + 201 + ], + "lines": [ + { + "bbox": [ + 477, + 191, + 534, + 201 + ], + "spans": [ + { + "bbox": [ + 477, + 191, + 534, + 201 + ], + "type": "text", + "content": "(c) Ground Truth" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 211, + 545, + 233 + ], + "lines": [ + { + "bbox": [ + 306, + 211, + 545, + 233 + ], + "spans": [ + { + "bbox": [ + 306, + 211, + 545, + 233 + ], + "type": "text", + "content": "Figure 7. Quantitative results of the ablation study conducted on the flame steak scene and the coffee martini scene." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 243, + 546, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 243, + 546, + 471 + ], + "spans": [ + { + "bbox": [ + 304, + 243, + 546, + 471 + ], + "type": "text", + "content": "Meet Room dataset, comparing it with a range of representative methods. We take Plenoxels [20], I-NGP [40], and 3DG-S [26] as representatives of fast static scene reconstruction methods, training them from scratch for each frame. StreamRF [29], Dynamic3DG [34], and ReRF [60] are designed for online training in dynamic scenes. Owing to the limitations of Dynamic3DG and ReRF, which necessitate foreground masks and are confined to scenes with fewer objects, and their minute-level per-frame training times, we select StreamRF selected as the representative for online training methods due to its adaptability and training feasibility on the N3DV and MeetRoom datasets. To demonstrate 3DGStream's competitive image quality, we drew comparisons with the quantitative results reported for the N3DV dataset in the respective papers of DyNeRF [31], NeRFPlayer [51], HexPlane [9], K-Planes [48], HyperReel [1], and MixVoxels [57], all of which are methods for reconstructing dynamic scenes through offline training on entire video sequences." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 472, + 547, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 472, + 547, + 674 + ], + "spans": [ + { + "bbox": [ + 304, + 472, + 547, + 674 + ], + "type": "text", + "content": "In Tab. 1, we present the averaged rendering speed, training time, required storage, and peak signal-to-noise ratio (PSNR) over all scenes of the N3DV dataset. For each scene, the latter three metrics are computed as averages over the whole 300 frames. Besides, we provide a breakdown of comparisons across all scenes within the N3DV dataset in the Suppl. To demonstrate the generality of our method, we conducted experiments on the MeetRoom dataset, as introduced by StreamRF [29], and performed a quantitative comparison against Plenoxels [20], I-NGP [40], 3DG-S [26], and StreamRF [29]. The results are presented in Tab. 2. As presented in Tabs. 1 and 2, our method demonstrates superiority through fast online training and real-time rendering, concurrently maintaining a competitive edge in terms of model storage and image quality. Furthermore, among the methods capable of streaming FVVs, our model requires the minimal model storage." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "type": "text", + "content": "Qualitative comparisons. While our approach primarily aims to enhance the efficiency of online FVV construction, as illustrated in Tabs. 1 and 2, it still achieves competitive" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20681" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 79, + 70, + 255, + 159 + ], + "blocks": [ + { + "bbox": [ + 79, + 70, + 255, + 159 + ], + "lines": [ + { + "bbox": [ + 79, + 70, + 255, + 159 + ], + "spans": [ + { + "bbox": [ + 79, + 70, + 255, + 159 + ], + "type": "table", + "html": "
StepOverhead (ms)FPS
Render w/o NTC2.56390
+ Query NTC0.62
+ Transformation0.02
+ SH Rotation1.46
Total4.66215
", + "image_path": "a783aca32dcf45351a39b20276a1258e5801bdcead4cf631c1c05a82130b5f48.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 168, + 287, + 202 + ], + "lines": [ + { + "bbox": [ + 46, + 168, + 287, + 202 + ], + "spans": [ + { + "bbox": [ + 46, + 168, + 287, + 202 + ], + "type": "text", + "content": "Table 4. Rendering profiling for the flame salmon scene at megapixel resolution. Note that flame salmon is the most time-consuming to render of all scenes in our experiments." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 209, + 287, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 209, + 287, + 316 + ], + "spans": [ + { + "bbox": [ + 46, + 209, + 287, + 316 + ], + "type": "text", + "content": "image quality. In Fig. 4, we present a qualitative comparison with I-NGP [40], HyperReel [1], and StreamRF [29] across scenes on the N3DV dataset [31] and the Meet Room dataset [29], with a special emphasis on dynamic objects such as faces, hands, and tongs, as well as intricate objects like labels and statues. It is evident that our method faithfully captures the dynamics of the scene without sacrificing the ability to reconstruct intricate objects. Please refer to our project page for more video results." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 324, + 126, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 324, + 126, + 335 + ], + "spans": [ + { + "bbox": [ + 47, + 324, + 126, + 335 + ], + "type": "text", + "content": "5.4. Evaluations" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 342, + 287, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 287, + 569 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 287, + 569 + ], + "type": "text", + "content": "Neural Transformation Cache. We utilize distinct approaches to model the transformations of 3DGs from the first to the second frame within the flame salmon video of the N3DV dataset to show the effectiveness of NTC. Fig. 5 shows that, without multi-resolution hash encoding " + }, + { + "bbox": [ + 46, + 342, + 287, + 569 + ], + "type": "inline_equation", + "content": "(w / o" + }, + { + "bbox": [ + 46, + 342, + 287, + 569 + ], + "type": "text", + "content": " Hash enc.), the MLP faces challenges in modeling transformations effectively. Additionally, without the warm-up " + }, + { + "bbox": [ + 46, + 342, + 287, + 569 + ], + "type": "inline_equation", + "content": "(w / o" + }, + { + "bbox": [ + 46, + 342, + 287, + 569 + ], + "type": "text", + "content": " Warm-up), it takes more iterations for convergence. Besides, even when compared with the direct optimization of the previous frame's 3DGs (Direct Opt.), NTC demonstrate on-par performance. In Fig. 6, We present the results of different approaches applied across the entire flame salmon video, excluding the first frame (i.e., Frame 0). " + }, + { + "bbox": [ + 46, + 342, + 287, + 569 + ], + "type": "inline_equation", + "content": "w / o" + }, + { + "bbox": [ + 46, + 342, + 287, + 569 + ], + "type": "text", + "content": " Hash enc. and " + }, + { + "bbox": [ + 46, + 342, + 287, + 569 + ], + "type": "inline_equation", + "content": "w / o" + }, + { + "bbox": [ + 46, + 342, + 287, + 569 + ], + "type": "text", + "content": " Warm-up. are not able to converge swiftly, resulting in accumulating errors as the sequence progresses. Direct Opt. yields the best outcomes but at the cost of inflated storage. Utilizing NTC, in contrast, delivers comparable results with substantially lower storage overhead by eliminating the need for saving all the 3DGs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 570, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 287, + 714 + ], + "type": "text", + "content": "Adaptive 3DG Addition. Tab. 3 presents the quantitative results of the ablation study conducted on the flame salmon scene, and more results are presented in Suppl. The base model without Stage 2, and a set of randomly spawned 3DGs (Rnd. Spawn) in equivalent quantities to our spawn strategy, both fail to capture emerging objects. The variant without our quantity control strategy (" + }, + { + "bbox": [ + 46, + 570, + 287, + 714 + ], + "type": "inline_equation", + "content": "w/o" + }, + { + "bbox": [ + 46, + 570, + 287, + 714 + ], + "type": "text", + "content": " Quant. Ctrl.) manages to model emerging objects but requires a significantly larger number of additional 3DGs. In contrast, our full model proficiently reconstructs emerging objects using a minimal addition of 3DGs. The ablation study illustrated in Fig. 7 qualitatively showcases the effect of the Adap" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "content": "tive 3DG Addition strategy, highlighting its ability to reconstruct the objects not present in the initial frame, such as coffee in a pot, a dog's tongue, and flames." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 108, + 545, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 108, + 545, + 287 + ], + "spans": [ + { + "bbox": [ + 304, + 108, + 545, + 287 + ], + "type": "text", + "content": "Real-time Rendering. Following 3DG-S [26], we employ the SIBR framework [6] to measure the rendering speed. Once all resources required are loaded onto the GPU, the additional overhead of our approach is primarily the time taken to query the NTC and transform the 3DGs. As detailed in Tab. 4, our method benefits from the efficiency of the multi-resolution hash encoding and the fully-fused MLP [38], which facilitate rapid NTC query. Notably, the most time-consuming step is the SH Rotation. However, our experiments indicate that the SH rotation has a minimal impact on the reconstruction quality, which may be attributed to the 3DGs modeling view-dependent colors through alternative mechanisms (e.g., small 3DGs of varying colors surrounding the object) rather than SH coefficients. Nonetheless, we maintain SH rotation for theoretical soundness." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 297, + 375, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 297, + 375, + 309 + ], + "spans": [ + { + "bbox": [ + 306, + 297, + 375, + 309 + ], + "type": "text", + "content": "6. Discussion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 318, + 545, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 318, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 318, + 545, + 437 + ], + "type": "text", + "content": "The quality of 3DG-S [26] on the initial frame is crucial to 3DGStream. Therefore, we inherit the limitations of 3DG-S, such as high dependence on the initial point cloud. As illustrated in Fig. 7, there are obvious artifacts beyond the windows, attributable to COLMAP's [49] inability to reconstruct distant landscapes. Hence, our method stands to benefit directly from future enhancements to 3DG-S. Moreover, for efficient on-the-fly training, we limit the number of training iterations, which restricts modeling of drastic motion in Stage 1 and complex emerging objects in Stage 2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 448, + 378, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 448, + 378, + 460 + ], + "spans": [ + { + "bbox": [ + 306, + 448, + 378, + 460 + ], + "type": "text", + "content": "7. Conclusion" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 468, + 545, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 468, + 545, + 599 + ], + "spans": [ + { + "bbox": [ + 304, + 468, + 545, + 599 + ], + "type": "text", + "content": "We propose 3DGStream, an novel method for efficient Free-Viewpoint Video streaming. Based on 3DG-S [26], we utilizes an effective Neural Transformation Cache to capture the motion of objects. In addition, we propose an Adaptive 3DG Addition strategy to accurately model emerging objects in dynamic scenes. The two-stage pipeline of 3DGStream enables the online reconstruction of dynamic scenes in video streams. While ensuring photo-realistic image quality, 3DGStream achieves on-the-fly training (" + }, + { + "bbox": [ + 304, + 468, + 545, + 599 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 304, + 468, + 545, + 599 + ], + "type": "text", + "content": "10s per-frame) and real-time rendering (" + }, + { + "bbox": [ + 304, + 468, + 545, + 599 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 304, + 468, + 545, + 599 + ], + "type": "text", + "content": "200FPS) at megapixel resolution with moderate requisite storage." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 609, + 416, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 609, + 416, + 623 + ], + "spans": [ + { + "bbox": [ + 306, + 609, + 416, + 623 + ], + "type": "text", + "content": "8. Acknowledgement" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "content": "This work was supported in part by Zhejiang Province Program (2022C01222, 2023C03199, 2023C03201), the National Program of China (62172365, 2021YFF0900604, 19ZDA197), Ningbo Science and Technology Plan Project (022Z167, 2023Z137), and MOE Frontier Science Center for Brain Science & Brain-Machine Integration (Zhejiang University)." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20682" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "text", + "content": "[1] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023. 1, 3, 6, 7, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 158, + 288, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 158, + 288, + 222 + ], + "spans": [ + { + "bbox": [ + 53, + 158, + 288, + 222 + ], + "type": "text", + "content": "[2] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 224, + 288, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 224, + 288, + 278 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 288, + 278 + ], + "type": "text", + "content": "[3] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5470-5479, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 279, + 287, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 279, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 53, + 279, + 287, + 312 + ], + "type": "text", + "content": "[4] Jonathan T. Barron, Ben Mildenhall, Dor Verbin, Pratul P. Srinivasan, and Peter Hedman. Zip-nerf: Anti-aliased grid-based neural radiance fields. ICCV, 2023. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 312, + 287, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 312, + 287, + 356 + ], + "spans": [ + { + "bbox": [ + 53, + 312, + 287, + 356 + ], + "type": "text", + "content": "[5] Michael J Black and Paul Anandan. The robust estimation of multiple motions: Parametric and piecewise-smooth flow fields. Computer vision and image understanding, 63(1):75-104, 1996. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 357, + 287, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 357, + 287, + 401 + ], + "spans": [ + { + "bbox": [ + 53, + 357, + 287, + 401 + ], + "type": "text", + "content": "[6] Sebastien Bonopera, Jerome Esnault, Siddhant Prakash, Simon Rodriguez, Theo Thonat, Mehdi Benadel, Gaurav Chaurasia, Julien Philip, and George Drettakis. sibr: A system for image based rendering, 2020. 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 402, + 287, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 402, + 287, + 456 + ], + "spans": [ + { + "bbox": [ + 53, + 402, + 287, + 456 + ], + "type": "text", + "content": "[7] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 457, + 287, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 457, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 53, + 457, + 287, + 491 + ], + "type": "text", + "content": "[8] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In SIGGRAPH, pages 425-432, 2001. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 491, + 287, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 491, + 287, + 512 + ], + "spans": [ + { + "bbox": [ + 53, + 491, + 287, + 512 + ], + "type": "text", + "content": "[9] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. CVPR, 2023. 3, 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "type": "text", + "content": "[10] Jin-Xiang Chai, Xin Tong, Shing-Chow Chan, and Heung-Yeung Shum. Plenoptic sampling. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 307-318, 2000. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 558, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 287, + 612 + ], + "type": "text", + "content": "[11] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14124-14133, 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 613, + 287, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 646 + ], + "type": "text", + "content": "[12] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In European Conference on Computer Vision (ECCV), 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 647, + 287, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 680 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 680 + ], + "type": "text", + "content": "[13] Anpei Chen, Zexiang Xu, Xinyue Wei, Siyu Tang, Hao Su, and Andreas Geiger. Dictionary fields: Learning a neural basis decomposition. ACM Trans. Graph., 2023. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 681, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 287, + 714 + ], + "type": "text", + "content": "[14] Zhiqin Chen, Thomas Funkhouser, Peter Hedman, and Andrea Tagliasacchi. Mobilenerf: Exploiting the polygon rasterization pipeline for efficient neural field rendering on mo" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "bile architectures. In The Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 96, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 150 + ], + "type": "text", + "content": "[15] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (TOG), 34(4):69, 2015. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 152, + 545, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 152, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 308, + 152, + 545, + 184 + ], + "type": "text", + "content": "[16] Abe Davis, Marc Levoy, and Fredo Durand. Unstructured light fields. Comput. Graph. Forum, 31(2pt1):305-314, 2012. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 186, + 545, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 186, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 308, + 186, + 545, + 240 + ], + "type": "text", + "content": "[17] Mingsong Dou, Philip Davidson, Sean Ryan Fanello, Sameh Khamis, Adarsh Kowdle, Christoph Rhemann, Vladimir Tankovich, and Shahram Izadi. Motion2fusion: Real-time volumetric performance capture. ACM Trans. Graph., 36(6): 246:1-246:16, 2017. 1, 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 241, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 241, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 308, + 241, + 545, + 285 + ], + "type": "text", + "content": "[18] Yuanxing Duan, Fangyin Wei, Qiyu Dai, Yuhang He, Wenzheng Chen, and Baoquan Chen. 4d gaussian splatting: Towards efficient novel view synthesis for dynamic scenes. arXiv preprint arXiv:2402.03307, 2024. 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 286, + 545, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 286, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 308, + 286, + 545, + 331 + ], + "type": "text", + "content": "[19] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In SIGGRAPH Asia 2022 Conference Papers, 2022. 1, 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 332, + 547, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 332, + 547, + 387 + ], + "spans": [ + { + "bbox": [ + 308, + 332, + 547, + 387 + ], + "type": "text", + "content": "[20] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 2, 3, 6, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 388, + 545, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 388, + 545, + 442 + ], + "spans": [ + { + "bbox": [ + 308, + 388, + 545, + 442 + ], + "type": "text", + "content": "[21] Stephan J. Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien Valentin. Fastnerf: High-fidelity neural rendering at 200fps. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14346-14355, 2021. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 444, + 545, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 444, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 308, + 444, + 545, + 499 + ], + "type": "text", + "content": "[22] Steven J. Gortler, Radek Grzesczuk, Richard Szeliski, and Michael F. Cohen. The lumigraph. In Proceedings of the 23rd Annual Conference on Computer Graphics and Interactive Techniques, page 43-54, New York, NY, USA, 1996. Association for Computing Machinery. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 500, + 545, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 500, + 545, + 554 + ], + "spans": [ + { + "bbox": [ + 308, + 500, + 545, + 554 + ], + "type": "text", + "content": "[23] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5855-5864, 2021. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 555, + 545, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 555, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 308, + 555, + 545, + 578 + ], + "type": "text", + "content": "[24] Berthold KP Horn and Brian G Schunck. Determining optical flow. Artificial intelligence, 17(1-3):185-203, 1981. 4" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 579, + 545, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 579, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 308, + 579, + 545, + 622 + ], + "type": "text", + "content": "[25] Wenbo Hu, Yuling Wang, Lin Ma, Bangbang Yang, Lin Gao, Xiao Liu, and Yuewen Ma. Tri-miprf: Tri-mip representation for efficient anti-aliasing neural radiance fields. In ICCV, 2023. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 624, + 545, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 624, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 308, + 624, + 545, + 668 + ], + "type": "text", + "content": "[26] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics, 42 (4), 2023. 1, 2, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 669, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 669, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 669, + 545, + 712 + ], + "type": "text", + "content": "[27] Tobias Kirschstein, Shenhan Qian, Simon Giebenhain, Tim Walter, and Matthias Nießner. Nersemble: Multi-view radiance field reconstruction of human heads. arXiv preprint arXiv:2305.03027, 2023. 3" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20683" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 286, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 286, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 286, + 95 + ], + "type": "text", + "content": "[28] Marc Levoy and Pat Hanrahan. Light field rendering. In SIGGRAPH, pages 31-42, 1996. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 96, + 286, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 96, + 286, + 129 + ], + "spans": [ + { + "bbox": [ + 49, + 96, + 286, + 129 + ], + "type": "text", + "content": "[29] Lingzhi Li, Zhen Shen, Zhongshu Wang, Li Shen, and Ping Tan. Streaming radiance fields for 3d video synthesis. In NeurIPS, 2022. 1, 2, 3, 6, 7, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 130, + 286, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 130, + 286, + 185 + ], + "spans": [ + { + "bbox": [ + 49, + 130, + 286, + 185 + ], + "type": "text", + "content": "[30] Ruilong Li, Julian Tanke, Minh Vo, Michael Zollhöfer, Jürgen Gall, Angjoo Kanazawa, and Christoph Lassner. Tava: Template-free animatable volumetric actors. In European Conference on Computer Vision, pages 419-436. Springer, 2022. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 186, + 286, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 186, + 286, + 262 + ], + "spans": [ + { + "bbox": [ + 49, + 186, + 286, + 262 + ], + "type": "text", + "content": "[31] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 1, 2, 3, 6, 7, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 264, + 286, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 264, + 286, + 318 + ], + "spans": [ + { + "bbox": [ + 49, + 264, + 286, + 318 + ], + "type": "text", + "content": "[32] Zhengqi Li, Simon Niklaus, Noah Snively, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6498-6508, 2021. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 320, + 286, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 320, + 286, + 374 + ], + "spans": [ + { + "bbox": [ + 49, + 320, + 286, + 374 + ], + "type": "text", + "content": "[33] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 1, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 376, + 286, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 376, + 286, + 409 + ], + "spans": [ + { + "bbox": [ + 49, + 376, + 286, + 409 + ], + "type": "text", + "content": "[34] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In 3DV, 2024. 3, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 411, + 286, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 411, + 286, + 453 + ], + "spans": [ + { + "bbox": [ + 49, + 411, + 286, + 453 + ], + "type": "text", + "content": "[35] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In CVPR, 2021. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 456, + 286, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 456, + 286, + 510 + ], + "spans": [ + { + "bbox": [ + 49, + 456, + 286, + 510 + ], + "type": "text", + "content": "[36] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pages 405-421. Springer, 2020. 1, 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 512, + 286, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 512, + 286, + 576 + ], + "spans": [ + { + "bbox": [ + 49, + 512, + 286, + 576 + ], + "type": "text", + "content": "[37] Ben Mildenhall, Peter Hedman, Ricardo Martin-Brualla, Pratul P Srinivasan, and Jonathan T Barron. Nerf in the dark: High dynamic range view synthesis from noisy raw images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16190-16199, 2022. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 578, + 235, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 578, + 235, + 589 + ], + "spans": [ + { + "bbox": [ + 49, + 578, + 235, + 589 + ], + "type": "text", + "content": "[38] Thomas Müller. tiny-cuda-nn, 2021. 4, 5, 6, 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 590, + 286, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 590, + 286, + 633 + ], + "spans": [ + { + "bbox": [ + 49, + 590, + 286, + 633 + ], + "type": "text", + "content": "[39] Thomas Müller, Fabrice Rousselle, Jan Novák, and Alexander Keller. Real-time neural radiance caching for path tracing. ACM Transactions on Graphics (TOG), 40(4):1-16, 2021. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 635, + 286, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 635, + 286, + 678 + ], + "spans": [ + { + "bbox": [ + 49, + 635, + 286, + 678 + ], + "type": "text", + "content": "[40] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, 2022. 1, 2, 4, 6, 7, 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 681, + 286, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 681, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 49, + 681, + 286, + 713 + ], + "type": "text", + "content": "[41] Michael Niemeyer, Jonathan T Barron, Ben Mildenhall, Mehdi SM Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "text", + "content": "from sparse inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5480-5490, 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 108, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 108, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 308, + 108, + 545, + 162 + ], + "type": "text", + "content": "[42] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5865-5874, 2021. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 164, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 164, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 308, + 164, + 545, + 217 + ], + "type": "text", + "content": "[43] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 220, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 220, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 308, + 220, + 545, + 274 + ], + "type": "text", + "content": "[44] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M. Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. ACM Trans. Graph., 40(6), 2021. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 276, + 545, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 276, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 308, + 276, + 545, + 330 + ], + "type": "text", + "content": "[45] Suntheon Park, Minjung Son, Seokhwan Jang, Young Chun Ahn, Ji-Yeon Kim, and Nahiyup Kang. Temporal interpolation is all you need for dynamic neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4212-4221, 2023. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 332, + 545, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 332, + 545, + 385 + ], + "spans": [ + { + "bbox": [ + 308, + 332, + 545, + 385 + ], + "type": "text", + "content": "[46] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10318-10327, 2021. 1, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 388, + 545, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 388, + 545, + 442 + ], + "spans": [ + { + "bbox": [ + 308, + 388, + 545, + 442 + ], + "type": "text", + "content": "[47] Christian Reiser, Rick Szeliski, Dor Verbin, Pratul Srinivasan, Ben Mildenhall, Andreas Geiger, Jon Barron, and Peter Hedman. Merf: Memory-efficient radiance fields for real-time view synthesis in unbounded scenes. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 445, + 545, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 445, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 308, + 445, + 545, + 487 + ], + "type": "text", + "content": "[48] Sara Fridovich-Keil and Giacomo Meanti, Frederik Rahbæk Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In CVPR, 2023. 1, 3, 6, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 489, + 545, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 489, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 308, + 489, + 545, + 521 + ], + "type": "text", + "content": "[49] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 523, + 545, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 523, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 308, + 523, + 545, + 567 + ], + "type": "text", + "content": "[50] Heung-Yeung Shum and Li-Wei He. Rendering with concentric mosaics. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 299–306, 1999. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 569, + 545, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 569, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 308, + 569, + 545, + 633 + ], + "type": "text", + "content": "[51] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 3, 6, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 635, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 635, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 635, + 545, + 689 + ], + "type": "text", + "content": "[52] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5449-5459, 2022. 2, 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "text", + "content": "[53] Jiakai Sun, Zhanjie Zhang, Jiafu Chen, Guangyuan Li, Boyan Ji, Lei Zhao, and Wei Xing. Vgos: Voxel grid opti" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "20684" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 67, + 73, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 287, + 127 + ], + "type": "text", + "content": "mization for view synthesis from sparse inputs. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence, IJCAI-23, pages 1414-1422. International Joint Conferences on Artificial Intelligence Organization, 2023. Main Track. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 287, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 287, + 171 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 287, + 171 + ], + "type": "text", + "content": "[54] Carlo Tomasi and Takeo Kanade. Shape and motion from image streams under orthography: a factorization method. International journal of computer vision, 9:137-154, 1992. 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 175, + 287, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 175, + 287, + 240 + ], + "spans": [ + { + "bbox": [ + 49, + 175, + 287, + 240 + ], + "type": "text", + "content": "[55] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 12959-12970, 2021. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 243, + 287, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 243, + 287, + 284 + ], + "spans": [ + { + "bbox": [ + 49, + 243, + 287, + 284 + ], + "type": "text", + "content": "[56] Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T. Barron, and Pratul P. Srinivasan. Ref-NeRF: Structured view-dependent appearance for neural radiance fields. CVPR, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 287, + 287, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 287, + 287, + 341 + ], + "spans": [ + { + "bbox": [ + 49, + 287, + 287, + 341 + ], + "type": "text", + "content": "[57] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, Yafei Song, and Huaping Liu. Mixed neural voxels for fast multiview video synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19706-19716, 2023. 3, 6, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 343, + 287, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 343, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 49, + 343, + 287, + 407 + ], + "type": "text", + "content": "[58] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 411, + 287, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 411, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 49, + 411, + 287, + 464 + ], + "type": "text", + "content": "[59] Liao Wang, Qiang Hu, Qihan He, Ziyu Wang, Jingyi Yu, Tinne Tuytelaars, Lan Xu, and Minye Wu. Neural residual radiance fields for streamably free-viewpoint videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 76-87, 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 467, + 287, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 467, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 49, + 467, + 287, + 521 + ], + "type": "text", + "content": "[60] Liao Wang, Qiang Hu, Qihan He, Ziyu Wang, Jingyi Yu, Tinne Tuytelaars, Lan Xu, and Minye Wu. Neural residual radiance fields for streamably free-viewpoint videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 76-87, 2023. 1, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 523, + 287, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 523, + 287, + 566 + ], + "spans": [ + { + "bbox": [ + 49, + 523, + 287, + 566 + ], + "type": "text", + "content": "[61] Qianqian Wang, Yen-Yu Chang, Ruojin Cai, Zhengqi Li, Bharath Hariharan, Aleksander Holynski, and Noah Snavely. Tracking everything everywhere all at once. In International Conference on Computer Vision, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 568, + 287, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 568, + 287, + 632 + ], + "spans": [ + { + "bbox": [ + 49, + 568, + 287, + 632 + ], + "type": "text", + "content": "[62] Chung-Yi Weng, Brian Curless, Pratul P Srinivasan, Jonathan T Barron, and Ira Kemelmacher-Shlizerman. Humanerf: Free-viewpoint rendering of moving people from monocular video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16210-16220, 2022. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 635, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 635, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 49, + 635, + 287, + 689 + ], + "type": "text", + "content": "[63] Felix Wimbauer, Nan Yang, Christian Rupprecht, and Daniel Cremers. Behind the scenes: Density fields for single view reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9076-9086, 2023. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 692, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 692, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 49, + 692, + 287, + 713 + ], + "type": "text", + "content": "[64] Suttisak Wizadwongsa, Pakkapon Phongthawee, Jiraphon Yenphraphai, and Supasorn Suwajanakorn. Nex: Real-time" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 688 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "text", + "content": "view synthesis with neural basis expansion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8534-8543, 2021. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 107, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 545, + 150 + ], + "type": "text", + "content": "[65] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Wang Xinggang. 4d gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 152, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 152, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 308, + 152, + 545, + 205 + ], + "type": "text", + "content": "[66] Jamie Wynn and Daniyar Turmukhambetov. Diffusionerf: Regularizing neural radiance fields with denoising diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4180-4189, 2023. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 208, + 545, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 208, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 308, + 208, + 545, + 262 + ], + "type": "text", + "content": "[67] Wenqi Xian, Jia-Bin Huang, Johannes Kopf, and Changil Kim. Space-time neural irradiance fields for free-viewpoint video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9421-9431, 2021. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 264, + 545, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 264, + 545, + 318 + ], + "spans": [ + { + "bbox": [ + 308, + 264, + 545, + 318 + ], + "type": "text", + "content": "[68] Gengshan Yang, Minh Vo, Natalia Neverova, Deva Ramanan, Andrea Vedaldi, and Hanbyul Joo. Banmo: Building animatable 3d neural models from many casual videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2863-2873, 2022. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 319, + 545, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 319, + 545, + 373 + ], + "spans": [ + { + "bbox": [ + 308, + 319, + 545, + 373 + ], + "type": "text", + "content": "[69] Jiawei Yang, Marco Pavone, and Yue Wang. Freenerf: Improving few-shot neural rendering with free frequency regularization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8254-8263, 2023. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 375, + 545, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 375, + 545, + 419 + ], + "spans": [ + { + "bbox": [ + 308, + 375, + 545, + 419 + ], + "type": "text", + "content": "[70] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. arXiv preprint arXiv:2309.13101, 2023. 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 420, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 420, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 308, + 420, + 545, + 453 + ], + "type": "text", + "content": "[71] Zeyu Yang, Hongye Yang, Zijie Pan, and Li Zhang. Realtime photorealistic dynamic scene representation and rendering with 4d gaussian splatting. 2024. 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 454, + 545, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 454, + 545, + 507 + ], + "spans": [ + { + "bbox": [ + 308, + 454, + 545, + 507 + ], + "type": "text", + "content": "[72] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5752-5761, 2021. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 510, + 545, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 510, + 545, + 542 + ], + "spans": [ + { + "bbox": [ + 308, + 510, + 545, + 542 + ], + "type": "text", + "content": "[73] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 544, + 545, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 544, + 545, + 598 + ], + "spans": [ + { + "bbox": [ + 308, + 544, + 545, + 598 + ], + "type": "text", + "content": "[74] Fuqiang Zhao, Wei Yang, Jiakai Zhang, Pei Lin, Yingliang Zhang, Jingyi Yu, and Lan Xu. Humannerf: Efficiently generated human radiance field from sparse inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7743-7753, 2022. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 600, + 545, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 600, + 545, + 653 + ], + "spans": [ + { + "bbox": [ + 308, + 600, + 545, + 653 + ], + "type": "text", + "content": "[75] C Lawrence Zitnick, Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High-quality video view interpolation using a layered representation. ACM transactions on graphics (TOG), 23(3):600-608, 2004. 1, 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 655, + 545, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 655, + 545, + 688 + ], + "spans": [ + { + "bbox": [ + 308, + 655, + 545, + 688 + ], + "type": "text", + "content": "[76] Matthias Zwicker, Hanspeter Pfister, Jeroen Van Baar, and Markus Gross. Ewa volume splatting. In Proceedings Visualization, 2001. VIS'01., pages 29-538. IEEE, 2001. 4" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "20685" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/5d416e0e-fbb0-491c-8e72-ffffff1eb68b_content_list.json b/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/5d416e0e-fbb0-491c-8e72-ffffff1eb68b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..ecdb3321e158b7fbe4c5202d42d43737460d0116 --- /dev/null +++ b/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/5d416e0e-fbb0-491c-8e72-ffffff1eb68b_content_list.json @@ -0,0 +1,1450 @@ +[ + { + "type": "text", + "text": "3DInAction: Understanding Human Actions in 3D Point Clouds", + "text_level": 1, + "bbox": [ + 161, + 130, + 808, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yizhak Ben-Shabat1,2", + "bbox": [ + 238, + 179, + 413, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Oren Shrout2", + "bbox": [ + 454, + 180, + 560, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Stephen Gould", + "bbox": [ + 602, + 180, + 728, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Australian National University", + "bbox": [ + 176, + 215, + 428, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Technion, Israel Institute of Technology", + "bbox": [ + 468, + 215, + 792, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "sitzikbs@technion.ac.il, shout.oren@campus.technion.ac.il, stephen.gould@anu.edu.au", + "bbox": [ + 114, + 236, + 854, + 251 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://github.com/sitzikbs/3dincaction", + "bbox": [ + 315, + 253, + 661, + 268 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/77a8b2bdb6e2bc8177795a0ff3f8cf7bea7e437ca5d6127b0d2ac7d0d2729236.jpg", + "image_caption": [ + "Figure 1. t-patches for action recognition. We propose a new representation for dynamic 3D point clouds. Termed $t$ -patches, these are locally evolving point cloud sets aggregated over time. Learning features over t-patches provides an improved temporal point cloud representation for action understanding." + ], + "image_footnote": [], + "bbox": [ + 99, + 294, + 194, + 443 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/39b06bebed98e5301eafc3c562c7c30da441c22f9988cd6337436241515c3513.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 210, + 297, + 352, + 443 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e074a955792586b2acfdeb1a778f6dab4137266a42bfd994d8c83691f8419cfc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 297, + 488, + 443 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/920f0c45077417070175bf99b53baaf9b5f91803dbb5f8a59e7c45135cbb0ffc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 297, + 620, + 443 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d40f2b85b106594b3ffdb7f23c00df6ee6597a7d2750a9612450cdcdde76978f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 627, + 297, + 779, + 441 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/171287348038d40b2751b2c095c8224a3b7acf6789927f3fd03387b051cb719f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 789, + 300, + 870, + 392 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 526, + 313, + 542 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We propose a novel method for 3D point cloud action recognition. Understanding human actions in RGB videos has been widely studied in recent years, however, its 3D point cloud counterpart remains under-explored despite the clear value that 3D information may bring. This is mostly due to the inherent limitation of the point cloud data modality—lack of structure, permutation invariance, and varying number of points—which makes it difficult to learn a spatio-temporal representation. To address this limitation, we propose the 3DinAction pipeline that first estimates patches moving in time (t-patches) as a key building block, alongside a hierarchical architecture that learns an informative spatio-temporal representation. We show that our method achieves improved performance on existing datasets, including DFAUST and IKEA ASM. Code is publicly available at https://github.com/sitzikbs/3dincaction.", + "bbox": [ + 75, + 559, + 472, + 801 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 830, + 209, + 845 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we address the task of action recognition from 3D point cloud sequences. We propose a novel pipeline wherein points are grouped into temporally evolv-", + "bbox": [ + 75, + 854, + 470, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ing patches that capture discriminative action dynamics. Our work is motivated by the massive growth of online media, mobile and surveillance cameras that have enabled the computer vision community to develop many data-driven action-recognition methods [5, 12, 26, 31], most of which rely on RGB video data. Recently, commodity 3D sensors are gaining increased momentum, however, the 3D point cloud modality for action recognition has yet been underexploited due to the scarcity of 3D action-labeled data.", + "bbox": [ + 496, + 527, + 893, + 665 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In many cases, a pure RGB video-based inference may not be enough and incorporating other modalities like geometry is required. This is especially necessary for safety critical applications such as autonomous systems, where redundancy is crucial, or in scenarios where the video is heavily degraded (e.g., due to poor lighting). Some approaches incorporate geometrical information implicitly, e.g., through intermediate pose estimation [7]. This often entails extra steps that require more time and resources and is still limited to video input. Therefore a more explicit approach is desirable.", + "bbox": [ + 496, + 669, + 893, + 835 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D sensors provide an alternative modality in the form of point clouds sampled on the environment. Despite the vast research on 3D vision and learning, even static 3D point cloud datasets are significantly smaller than their RGB im", + "bbox": [ + 496, + 839, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "19978", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "age counterparts due to difficulties in collecting and labeling. 3D point cloud sequence databases are even smaller,", + "bbox": [ + 75, + 90, + 467, + 119 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "making it more difficult to learn a meaningful 3D action representation. Furthermore, learning a point cloud representation still remains an active research field because point clouds are unstructured, unordered, and may contain a varying number of points. Learning a temporal point cloud representation is even more challenging since, unlike pixels, there is no one-to-one point correspondence through time.", + "bbox": [ + 75, + 121, + 467, + 224 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We address these challenges and propose the 3DinAction pipeline for 3D point cloud action recognition. In our pipeline, we first extract local temporal point patches (t-patches) that reflect a point region's motion in time, see Figure 1. We then learn a t-patch representation using a novel hierarchical architecture that incorporates spatial features in the temporal domain. We finally get an action prediction for each frame in a sequence by aggregating multiple t-patch representations. This pipeline overcomes the need for ground truth point temporal correspondence, grid structure, point order, and a fixed number of points in each frame. Intuitively, patches reflect local surface deformation and are more robust to point correspondence errors.", + "bbox": [ + 75, + 227, + 467, + 422 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We conduct extended experiments to evaluate the performance of our approach compared to existing SoTA methods and show that 3DinAction provides significant performance gains of $13\\%$ and $7\\%$ in accuracy on DFAUST and IKEA ASM, respectively.", + "bbox": [ + 76, + 424, + 467, + 498 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The key contributions of our work are as follows:", + "bbox": [ + 94, + 500, + 423, + 513 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- A novel representation for dynamically evolving local point cloud sets termed t-patches.", + "- A hierarchical architecture that produces an informative spatio-temporal representation for sequences of point clouds." + ], + "bbox": [ + 76, + 513, + 467, + 587 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 603, + 217, + 618 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Learning 3D point cloud representations. Point clouds pose a challenge for neural networks due to their unstructured and point-wise unordered nature. To address these challenges, several approaches have been proposed. PointNet [23, 24] uses permutation-invariant operators, such as pointwise MLPs and pooling layers, to aggregate features across a point set. Some approaches construct a graph from the point set. DGCNN [34] applies message passing and performs graph convolutions on kNN graphs, KCNet [29] uses kernel correlation and graph pooling, and Kd-Networks [15] apply multiplicative transformations and share the parameters based on the subdivisions imposed by kd-trees. Alternatively, the structure can be imposed using a grid of voxels [22, 36], or a grid of Gaussians in 3DmFVNet [1]. Another alternative avoids the structure by using Transformer's attention mechanism [17, 37]. For a comprehensive survey of point cloud architectures please see [14].", + "bbox": [ + 75, + 628, + 467, + 883 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, various factors that can impact the training of", + "bbox": [ + 94, + 886, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "different architectures have been investigated [13, 25]. This includes exploring data augmentation strategies and loss functions that are not specific to a particular architecture. The results of this study showed that older PointNet-based architectures [23, 24] can perform comparably to newer architectures with minor changes.", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "All of the above methods deal with static, single-frame, or single-shape point clouds. In this work, the input is a temporal point cloud where a representation for a short sequence is required and point correspondence between frames is unknown. Therefore extending existing approaches is not trivial.", + "bbox": [ + 496, + 184, + 890, + 273 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Learning temporal 3D point cloud representations. Temporal point clouds have not been as extensively studied as their static counterparts, in particular for action recognition. Meteornet [21] processes 4D points using a PointNet++ architecture where they appended a temporal dimension to the spatial coordinates. PSTNet [10, 11] proposed spatio-temporal convolutions and utilized some of the temporal consistency for action recognition. Similarly, P4Transformer [8] uses 4D convolutions and a transformer for capturing appearance and motion via self-attention. In a follow-up work PST-Transformer [9] employs a video level of self-attention in search for similar points across entire videos and so encodes spatio-temporal structure. Some works attempt to alleviate the full supervision requirement for 3D action recognition. These include self-supervised features learning [32] by predicting temporal order from a large unlabeled dataset and fine-tuning on a smaller annotated datasets and unsupervised skeleton colorization [35]. Additional supervised approaches include MinkowskiNet [6] that uses a 4D spatio-temporal CNN after converting the point clouds to an occupancy grid, 3DV [33] that encodes 3D motion information from depth videos into a compact voxel set, and Kinet [38] that implicitly encoded feature level dynamics in feature space by unrolling the normal solver of ST-surfaces.", + "bbox": [ + 496, + 277, + 890, + 652 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The above methods, perform a single classification per clip. In this paper, we focus on a related, and more chllang-ing, task that requires a prediction per-frame. We propose to convert the point cloud representation into t-patches and use an MLP based hierarchical architecture to get the spatiotemporal representation.", + "bbox": [ + 496, + 656, + 890, + 746 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D action understanding datasets. One of the major driving forces behind the success of learning-based approaches is the availability of annotated data. For the task of 3D point cloud action recognition, there is currently no designated standard dataset, however, some existing datasets may be extended. The CAD 60 and CAD 120 [16, 30] datasets include 60 and 120 long-term activity videos of 12 and 10 classes respectively (e.g., making cereal, microwave food). These datasets provide raw RGB, skeletons, and depth data however its small scale and long-term focus limit its effec", + "bbox": [ + 496, + 750, + 890, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "19979", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "tiveness. The NTU RGB+D 60 [28] and NTU RGB+D 120 [20] provide $\\sim 56\\mathrm{K}$ and $\\sim 114\\mathrm{K}$ clips containing 60 and 120 actions classes respectively, e.g., taking off a jacket, taking a selfie. They provide three different simultaneous RGB views, IR and depth streams as well as 3D skeletons. While these datasets can be considered large-scale, their contrived nature makes recent skeleton-based methods (e.g., [7]) perform well, making a prior-free approach difficult to justify. The MSR-Action3D dataset [19] includes 20 action classes performed by 10 subjects for a total of 567 depth map sequences, collected using a Kinect v1 device (23K frames). The sequences in this dataset are very short and therefore using it to evaluate learning-based approaches provides a limited indication of generalization. The above datasets provide per clip action annotations.", + "bbox": [ + 75, + 90, + 472, + 316 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Some datasets inherently provide per-frame annotations. The IKEA ASM dataset [2] provides 371 videos clipped into 31K clips. It contains 33 action classes related to furniture assembly, annotated per frame. This dataset provides several modalities including three RGB views, and Depth. It is an extremely challenging dataset since the human assembler is often occluded and presents very unique assembly poses. It is also very imbalanced since different assembly actions have different duration and may repeat multiple times within the same assembly. Although it was designed for video action recognition, its challenges are the core reasons for choosing to extend it to the point cloud action recognition task. The DFAUST dataset [3] provides high-resolution 4D scans of human subjects in motion. It includes 14 action categories with over 100 dynamic scans of 10 subjects (1:1 male-to-female ratio) with varying body shapes represented as registrations of aligned meshes, therefore an extension to our task is straightforward. One particularly important feature of this dataset is the GT point correspondences throughout the sequence i.e. it is possible to follow each point's movement through time. While this dataset is not as large-scale as others, it provides ground truth information (correspondence) that most other collected datasets do not. Therefore, we extend this dataset to 3D point cloud action recognition and use it as a testbed for many ablation studies (see Section 4.4).", + "bbox": [ + 75, + 321, + 472, + 714 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. 3DinAction pipeline", + "text_level": 1, + "bbox": [ + 76, + 736, + 269, + 753 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our 3DinAction pipeline is illustrated in Figure 2. Given a temporal sequence of 3D point clouds we first extract a set of t-patches (Section 3.1). We then feed the t-patches into a hierarchical neural network (Section 3.2) to produce a per-frame high dimensional feature vector representation. Finally, the feature vectors are fed into a classifier to obtain per-frame predictions. The proposed approach is prior-free (no skeleton extraction required) and therefore general and can be used on different action-understanding datasets.", + "bbox": [ + 75, + 763, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. t-patches", + "text_level": 1, + "bbox": [ + 500, + 90, + 607, + 107 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Let $S = \\{x_{j} \\in \\mathbb{R}^{3} \\mid j = 1, \\dots, N\\}$ denote a 3D point cloud with $N$ points. In the classic (static) setting, a patch $\\Psi_{q}$ is extracted around some query point $x_{q}$ . For example, the patch $\\Psi_{q}$ may be constructed by finding the $k$ -nearest neighbors of $x_{q}$ in $S$ .", + "bbox": [ + 496, + 112, + 890, + 189 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In our temporal setting we are given a sequence of point clouds $S = \\{S^0, \\dots, S^T\\}$ composed of point cloud frames $S^t = \\{x_j^t \\mid j = 1, \\dots, N^t\\}$ . Here the superscript $t$ is used to denote the index of the point cloud in the sequence. Instead of extracting a patch within a single frame, we allow patches to extend temporally, and denote them as $t$ -patches.", + "bbox": [ + 496, + 189, + 890, + 281 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Definition 3.1 A t-patch $P_{q}$ is a sequence of point sets indexed by a query point $x_{q}^{0}$ and jointly moving in time defined by a pointwise mapping function between patches in consecutive frames. Mathematically, $P_{q} = \\langle \\Psi_{q}^{t}\\rangle_{t = 0}^{T}$ where $\\Psi_{q}^{0}$ is the initial (static) patch and $\\Psi_{q}^{t} = \\Phi (\\Psi_{q}^{t - 1})$ is the patch at time $t$ where $\\Phi$ is a pointwise mapping function.", + "bbox": [ + 496, + 294, + 890, + 386 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In practice, it is difficult to find a reliable mapping function $\\Phi$ . Therefore we propose a simplified formulation that, for a given query point $x_{q}^{0}$ , first extracts a patch for the first frame $\\Psi_{q}^{0}$ and then iteratively extracts corresponding patches for the next frames (iterating over time), by using the closest point in the next frame as the new query point. More formally, we specify $\\vec{\\Psi}_{q}^{0} \\triangleq \\Psi_{q}^{0}$ , $\\vec{\\Psi}_{q}^{t} = knn(x_{q}^{t-1}, S^{t})$ and $x_{q}^{t} = n n(x_{q}^{t-1}, S^{t})$ for $t = 1, \\ldots, T$ . Here $knn$ is the $k$ nearest neighbor and $n n$ is nearest neighbor. Then, the simplified t-patch formulation is given by", + "bbox": [ + 496, + 398, + 890, + 551 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\vec {P} _ {q} = \\left\\langle \\vec {\\Psi} _ {q} ^ {t} \\mid t = 0, \\dots , T \\right\\rangle \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 602, + 561, + 890, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "See Figure 3 left for an illustration of the t-patch extraction process. Note that if ground truth correspondence is available $knn$ can be swapped back to $\\Phi$ . However, this does not guarantee improved performance.", + "bbox": [ + 496, + 595, + 890, + 655 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Temporal t-patch collapse. The simplified formulation of extracting t-patches inherently suffers from the problem of two or more t-patches collapsing into having the same points after a certain frame. We call this scenario t-patch temporal collapse. Temporal collapse can happen whenever $x_{q}^{t} = x_{p}^{t}$ for $x_{q}^{0} \\neq x_{p}^{0}$ . The main issue with temporal collapse is the reduction in point coverage as time progresses, i.e. the patches covering the last point cloud have significant overlaps and therefore include fewer points than the first frame and so missing vital data. An illustration of the t-patch collapse problem is available in Figure 3 (right). To mitigate this issue, we propose two solutions. First, adding small noise to each iteration's query points, i.e. $\\overrightarrow{\\Psi}_{q}^{t} = knn(x_{q}^{t} + \\epsilon, S^{t+1})$ where $\\epsilon \\sim \\mathcal{N}(\\mu, \\sigma^{2})$ is a small Gaussian noise. Second, we propose to construct t-patches from the first to last frame but also in reverse, initializing", + "bbox": [ + 496, + 656, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "19980", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/ca03231e6ca24b0c3a26aa855970e4a3c0f6682cb3ba7e648684c1c9763fd057.jpg", + "image_caption": [ + "Figure 2. 3DinAction pipeline. Given a sequence of point clouds, a set of t-patches is extracted. The t-patches are fed into a neural network to output an embedding vector. This is done hierarchically until finally the global t-patch vectors are pooled to get a per-frame point cloud embedding which is then fed into a classifier to output an action prediction per frame." + ], + "image_footnote": [], + "bbox": [ + 98, + 109, + 880, + 409 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/fa2721163831b9461c340b37e484f9bf65404e8c33d5584b94aa6d6c81df9fd3.jpg", + "image_caption": [ + "Figure 3. t-patch construction and collapse. Illustration of t-patch construction (left) and collapse (right). Starting from an origin point $x_{q}^{0}$ we find the nearest neighbours in the next frame iteratively to construct the t-patch subset (non-black points). A collapse happens when two different origin points, $x_{q}^{0}$ and $x_{p}^{0}$ , have the same nearest neighbour at some time step, $\\Psi_{p}^{3} = \\Psi_{q}^{3}$ here." + ], + "image_footnote": [], + "bbox": [ + 86, + 489, + 446, + 609 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "with $\\Psi_q^0$ and $\\Psi_q^T$ , respectively. We name this variation bidirectional t-patches. More formally bidirectional t-patches are given by,", + "bbox": [ + 75, + 739, + 468, + 787 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\stackrel {\\leftrightarrow} {P} = \\left(\\bigcup_ {q} \\vec {P} _ {q}\\right) \\cup \\left(\\bigcup_ {p} \\overleftarrow {P} _ {p}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 809, + 468, + 852 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\overleftarrow{P}_p$ is defined similarly to $\\vec{P}_q$ but in the reverse direction, i.e., $\\overleftarrow{\\Psi}_p^T \\triangleq \\Psi_p^T$ and $\\overleftarrow{\\Psi}_p^t = knn(x_p^{t+1}, S^t)$ for", + "bbox": [ + 76, + 863, + 468, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$t = T - 1,\\ldots ,0$ . Here, the final set of t-patches is composed of an equal number of t-patches from both directions.", + "bbox": [ + 498, + 487, + 890, + 518 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Hierarchical architecture", + "text_level": 1, + "bbox": [ + 500, + 527, + 732, + 542 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The proposed architecture is composed of $l$ consecutive t-patch modules. Each module receives a point cloud sequence $S$ as input. The sequence is fed into a t-patch extractor where it undergoes subsampling and t-patch extraction, forming $\\tilde{S}_l$ and $P^l$ respectively. Then, the t-patches are fed into t-patch Net, a network that computes a high-dimensional feature vector $f_{l}$ for each t-patch, parametrized by $\\theta_l$ . The subsampled sequence $\\tilde{S}_l$ and its corresponding t-patch features $f_{l}$ are then fed into the next t-patch module. These modules form a hierarchy in the sense that each module receives as input a sparser point cloud with a higher dimensional feature vector representing each point (encoding both spatial and temporal information). Note that both the t-patch points and their features are fed into t-patch Net. t-patch extractor. We first subsample the first frame in the point cloud sequence $S^0$ using farthest point sampling (FPS) to form a set of $M$ query points $\\tilde{S}^0 = \\{x_j^0\\in FPS(S^0,M)\\}$ . The set $\\tilde{S}^0$ is used to form the t-patches. Subsampling is required since computing a t-patch for each point is inefficient and unnecessary due to overlaps. After subsampling, we extract $M$ t-patches using Equation 2 where $q\\in \\tilde{S}^0$ . The extractor operates on both 3D points and their corresponding features (for mod", + "bbox": [ + 496, + 551, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "19981", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ules deeper in the hierarchy).", + "bbox": [ + 76, + 90, + 272, + 104 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Model architecture and t-patch net. The t-patch network computes a high dimensional representation for each t-patch. The t-patch Net architecture is composed of several MLP layers operating on the non-temporal dimensions (sharing weights across points) followed by a convolutional layer operating on both the temporal and feature dimensions. Note that the network weights are also shared across t-patches. The output of each t-patch Net is a vector for each frame. The final frame representation is obtained by aggregating all of the t-patch features using a max pooling operation i.e. $\\text{maxpool}_{M_l}(f_3)$ . This representation is then fed into a classifier consisting of three fully connected layers with temporal smoothing and softmax to output the final action prediction. To train the network we use the same losses of RGB based approaches [2, 5] which include a per-frame prediction cross entropy loss and a per-sequence prediction cross entropy loss (summed and weighted evenly) $L_{total} = L_{frame} + L_{seq}$ . For full details see supplemental.", + "bbox": [ + 75, + 106, + 472, + 380 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 391, + 209, + 407 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We evaluate the performance of our approach on three datasets. The results show that the 3DinAction pipeline outperforms all baselines in DFAUST [3] and IKEA ASM [2] and is comparable in MSR-Action 3D [19]. We then conduct an ablation study for selecting parameters and t-patch extraction method showing that adding jitter and bidirectional t-patches is beneficial. Finally, we report time performance and show the tradeoff between performance and inference time. For more results and experiments, see supplemental material.", + "bbox": [ + 75, + 417, + 468, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines and evaluation metrics. For evaluation, we report several standard metrics [4]: the top1 and top3 framework accuracy are the de facto standard for action classification. We compute it by summing the number of correctly classified frames and dividing by the total number of frames in each video and then averaging over all videos in the test set. Additionally, since some of the datasets are imbalanced and may contain different actions for each frame in a clip, we also report the macro-recall by separately computing recall for each category and then averaging (macro). Finally, we report the mean average precision (mAP) since all untrimmed videos contain multiple action labels.", + "bbox": [ + 75, + 568, + 468, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For DFAUST and IKEA ASM we report static methods PointNet [23], $\\mathrm{PointNet}^{++}$ [24], and Set Transformer [18] by applying them on each point cloud frame individually. Additionally, we report temporal methods like PSTNet [10] and also implemented a temporal smoothing version of each static method (PoinNet+TS, $\\mathrm{Pointnet}^{++} + \\mathrm{TS}$ , and Set Transformer+TS respectively) by learning the weights of a convolutional layer over the temporal dimension. Temporal smoothing aims to provide a naive baseline for utilizing temporal information in addition to spatial information.", + "bbox": [ + 75, + 750, + 470, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that in all experiments, unless otherwise specified, our method uses the simplified formulation with jitter and bidirectional t-patches.", + "bbox": [ + 496, + 90, + 890, + 137 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experiments on DFAUST dataset", + "text_level": 1, + "bbox": [ + 498, + 146, + 790, + 162 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We extend the DFAUST dataset for the task of action recognition and show that the proposed approach outperforms other methods (see Table 1).", + "bbox": [ + 496, + 170, + 890, + 215 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "DFAUST dataset [3]. We extended the DFAUST dataset to our task by subdividing it into clips of 64 frames with train and test human subjects. The split was constructed so no subject will appear in both training and test set as well as guarantee that all actions appear in both. The train and test sets contain 76 full-length sequences (395 clips, and $\\sim 25\\mathrm{K}$ frames) and 53 sequences (313 clips, and $\\sim 20\\mathrm{K}$ frames) respectively. Each point cloud frame contains 6890 points. These points are mesh vertices and therefore the density varies greatly (e.g., very dense on the face, hands, and feet and sparser on the legs). For all baselines, we sampled a set of 1024 points using the farthest point sampling algorithm to provide a more uniform set of points. For this dataset, all frames in a clip have the same label. Note that not all actions are performed by all subjects. For the full action list and dataset statistics, see the supplemental.", + "bbox": [ + 496, + 215, + 892, + 455 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Results. The results, reported in Table 1, show that our proposed approach outperforms all baselines by a large margin. It also shows that temporal smoothing boosts performance significantly for all static baselines. Additionally, to explore the influence of our simplified knn-based temporal point mapping, we used the GT point correspondence to match the consecutive t-patch origin points and report the results as another baseline (Ours + GT corr). The results show that there is a mAP performance gain with GT correspondence, however, it is limited. Note that in most datasets, this GT correspondence is not available. Finally, we also experimented with a Transformer architecture to process the t-patch learned representations and show that it does not provide additional performance boost. This may be attributed to the dataset size.", + "bbox": [ + 496, + 458, + 892, + 683 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Insight. We extended the GradCam [27] approach for our 3DinAction pipeline. Using this approach we get a score per point in each t-patch proportional to its influence on classifying the frame to a given target class. The results in Figure 4 show that, as expected, our approach learns meaningful representations since the most prominent regions are the ones with the informative motion. For example, in the Jumping jacks action (top row) the hands are most prominent as they are making a large and distinct motion.", + "bbox": [ + 496, + 685, + 890, + 821 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Experiments on IKEA ASM dataset", + "text_level": 1, + "bbox": [ + 498, + 830, + 812, + 847 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "IKEA ASM dataset [2]. This dataset consists of 371 videos (3M frames) of people assembling IKEA furniture in different indoor environments. It was collected using a", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "19982", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/5e49459696c917a9ed76439f8384bbb834626ef28a354aa2ba68bfec22ae5eca.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodFrame acc.
top 1top 3mAP
3DmFVNet [1]60.8687.680.7171
PointNet [23]65.6786.440.7161
PointNet++ [24]58.5188.280.5842
Set Transformer [18]52.2781.980.6209
PoinNet [23] + TS74.1094.000.7863
PointNet++ [24] + TS67.8886.210.7563
Set Transformer [18] + TS62.9590.330.7322
PSTNet [10]50.7078.280.6490
Ours + GT corr77.6795.380.8762
Ours + Transformer77.0993.777.49
Ours87.2699.260.8616
", + "bbox": [ + 86, + 88, + 460, + 296 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Action recognition results on DFAUST. Reporting frame-wise accuracy and mean average precision. Ours outperforms all baselines by a large margin.", + "bbox": [ + 75, + 306, + 468, + 349 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/70a0c3e96fa4cf736b34a39ff64e0b044dbff4a15bf7a0395ab03946ea6db655.jpg", + "image_caption": [ + "Figure 4. 3DinAction GradCAM scores. The proposed 3DinAction pipeline learns meaningful representations for prominent regions. The presented actions are jumping jacks (top row), hips (middle row), and knees (bottom row). The columns represent progressing time steps from left to right. Colormap indicates high GradCAM scores in red and low scores in blue." + ], + "image_footnote": [], + "bbox": [ + 101, + 367, + 454, + 720 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Kinect V2 camera and provides camera parameters to reconstruct point clouds in camera coordinates. It provides action annotation for each frame (33 classes). It is a highly challenging dataset for two main reasons: (1) It is highly", + "bbox": [ + 75, + 839, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "imbalanced since some actions have a long duration and occur multiple times in each video (e.g., spin leg) and some are shorter and sparser (flip tabletop). (2) The assembly motion includes a lot of self-occlusion as well as subtle movements. The train/test split consists of 254 and 117 full sequences respectively. The split is environment-based (i.e. in the test set there is no environment that appeared in the training set). The assembly videos have an average of $\\sim$ 2735 frames per video. The point clouds provided in this dataset are aligned to the camera coordinate frame, posing a challenge for methods that are sensitive to rotations since the camera moves between different scans.", + "bbox": [ + 496, + 90, + 890, + 271 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results. The results on the IKEA ASM dataset are reported in Table 2. The results show that the proposed 3DinAction pipeline provides a significant performance boost over static approaches and their temporally smooth variants. Additionally, as expected, PointNet and Set Transformer are heavily affected by the variations in coordinate frames. $\\mathrm{PointNet}^{++}$ on the other hand performs better since it uses local coordinate frames for each local region. All methods show an improved mAP when using the temporally smooth variant with degradation in frame-wise accuracy due to the dataset imbalance. For this dataset, the top1 metric is not always indicative of the quality of performance because a high top1 is directly correlated with many frames classified as the most common class. Additionally, we compare to pose-based methods reported in [2] and show that the proposed approach also outperforms these baselines. See supplementary material for confusion matrices.", + "bbox": [ + 496, + 273, + 892, + 529 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "t-patch intuition and visualization. In Figure 5 we visualize the t-patches for the flip table action in the TV Bench assembly. A set of selected t-patches are highlighted in color demonstrating different types of t-patches and their spatiotemporal changes. The blue is on the moving TV Bench assembly, it moves rigidly with the assembly. The maroon is on the moving person's arm, it exhibits nonrigid motion and deformations through time. The teal is on the static table surface containing some of the TV Bench's points in the first frame but remains static when it moves since its origin query point is on the table. The green is on the static carpet, remaining approximately the same through time. Note that the RGB images are for visualization purposes and are not used in our pipeline.", + "bbox": [ + 496, + 531, + 893, + 743 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Experiments on MSR-Action3D dataset", + "text_level": 1, + "bbox": [ + 498, + 755, + 841, + 771 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For this dataset, the task is to predict a single class for a sequence of frames (unlike the other datasets where a per-frame prediction is required). To that end, we replace our classifier with a single fully connected layer and max pooled the results over the temporal domain (similar to [10]). The results, reported in Table 3, show that all SoTA methods, including the proposed approach, exhibit very similar performance. This is mainly attributed to the small scale of the", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "19983", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/507d68e235f3b0de3debfe9bb776f8bdb370ba2e44031561c43649e19c75caf8.jpg", + "image_caption": [ + "Figure 5. IKEA ASM example with t-patches. The flip table action for the TV Bench assembly is visualization including the RGB image (top), and a grayscale 3D point cloud with t-patches (bottom). t-patches are highlighted in color. The blue is on the moving TV Bench assembly, maroon is on the moving persons arm, teal is on the static table surface, and green is on the colorful static carpet." + ], + "image_footnote": [], + "bbox": [ + 86, + 87, + 285, + 297 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ee4b5844cd5c3ca6e3f785a24d468c225c86bf8d1e63ba13aeed01e0d5bbd6d4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 287, + 87, + 483, + 297 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2468e71df666d5da49836430f28668bdeca4592a2d25a2397b3cf1d5dac6db67.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 485, + 87, + 681, + 297 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1fd39becc884c2a336ecfbdc908e4f44c57ebdff495c1fe9be3b979531ab02ad.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 684, + 88, + 880, + 297 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/6707409b54fecadcd6b8c7317b4337d89cc6bbfc2ac63db00676d67bfca147d1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodFrame acc.
top 1top 3macromAP
PointNet [23]4.2019.865.760.0346
PointNet++ [24]45.9770.1029.480.1187
Set Transformer [18]14.9657.1213.160.0299
PointNet [23] + TS6.0019.485.140.0804
PointNet++ [24] +TS27.8460.6427.720.2024
Set Transformer [18] + TS9.5436.5010.740.1471
PSTNet [10]17.9452.2417.140.2016
Human Pose HCN [2]39.1565.3728.180.2232
Human Pose ST-GCN [2]43.466.2926.540.1856
Ours without BD45.1672.8335.060.2932
Ours52.9175.0338.840.2875
", + "bbox": [ + 78, + 378, + 468, + 588 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "dataset and the lack of diversity in the action classes. Furthermore, we witnessed that the main performance gap is for frames and sequences where the action is indistinguishable (e.g., first few frames of a sequence where no distinguishable action commenced).", + "bbox": [ + 75, + 667, + 468, + 744 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Ablation study", + "text_level": 1, + "bbox": [ + 76, + 755, + 227, + 771 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "t-patch extraction. We studied the t-patch extraction method and its effects on action recognition on a noisy version of the DFAUST dataset. The results reported in Table 4, show the significance of the t-patch collapse problem and the effectiveness of adding small jitter and bidirectional t-patches to overcome it. In the DFAUST dataset, finding the nearest neighbor between frames provides a $\\sim 96\\%$ correspondence accuracy (small motion between frames).", + "bbox": [ + 75, + 779, + 470, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/139423c391a6afbb65748bc51cef5befa0b036a618e685e50d34a3bcbaaf2bab.jpg", + "table_caption": [ + "Table 2. Action classification on IKEA ASM. The proposed approach provides a significant performance boost over other static and dynamic approaches, including the temporal smoothing (TS)." + ], + "table_footnote": [], + "table_body": "
Method#frames
48121624
PSTNet [10]81.1483.5087.8889.9091.20
P4Transformer [8]80.1383.1787.5489.5690.94
PST-Transformer [9]81.1483.9788.1591.9893.73
Kinet [38]79.8083.8488.5391.9293.27
Ours80.4786.2088.2290.5792.23
", + "bbox": [ + 501, + 378, + 903, + 494 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. MSR-Action3D classification results. Reporting classification accuracy for clips of different lengths. Results show that all methods are comparable since this dataset's scale is limited.", + "bbox": [ + 498, + 505, + 893, + 547 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Therefore, in this experiment, we augment the dataset once by adding small Gaussian noise to each point in the dataset $(\\sigma = 0.01)$ , decreasing the correspondence accuracy to $\\sim 62.4\\%$ and introducing multiple t-patch collapse instances as well as increasing the classification difficulty.", + "bbox": [ + 496, + 580, + 890, + 656 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Several variants of the t-patch extraction were explored. The first variation (GT) incorporates the ground truth correspondence into the t-patch extraction. Using this method, there is no t-patch collapse since there is a one-to-one mapping between frames. We expected this to produce an upper bound on the performance, however, surprisingly the results show that this variation is actually inferior to the proposed t-patch approach. We attribute this to the proposed t-patch extraction inherent augmentation caused by the downsampling and nearest neighbor point jitter. We then continue to explore the proposed approaches for dealing with t-patch collapse which include jitter, i.e. adding small noise to each point before finding its nearest neighbor in the next frame, and the bidirectional t-patches that extract patches both from the first to the last frame and from the last to the first frame. The results show that adding jitter is al", + "bbox": [ + 496, + 657, + 892, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "19984", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/439d33761319830dbd20e1f69c47475ee00906206240291ef90437ed1b97c37f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Frame acc.
DataGTJitterBDtop 1top 3mAP
clean77.6795.380.8762
74.7392.140.8097
80.4996.610.9023
87.2699.260.8616
noisy76.0895.500.9013
66.7493.760.7626
81.8398.970.9220
80.0397.570.8975
", + "bbox": [ + 84, + 88, + 460, + 250 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c1dcb349e0d350c0e2fd6ddd7dd6a20489e3deabe911935df3d519aeaca662d9.jpg", + "table_caption": [ + "Table 4. t-patch collapse ablation on DFAUST. Exploring adding (1) GT - ground truth correspondences, (2) jitter - small Gaussian noise in t-patch construction, and (3) BD - bidirectional t-patches." + ], + "table_footnote": [], + "table_body": "
Frame acc.
nktop 1top 3mAP
2561676.9697.540.8430
5121680.0397.570.8975
10241677.3097.880.8507
512876.8796.210.7557
5123277.9196.600.7453
", + "bbox": [ + 138, + 320, + 406, + 436 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ways beneficial and provides a boost in performance. The bidirectional t-patches improve accuracy performance significantly when the data is clean and are comparable when the data is noisy. Note that in both dataset variations, the degradation due to temporal t-patch collapse is low compared to Kinect-based scan data, therefore the bidirectional benefits are not fully utilized.", + "bbox": [ + 75, + 518, + 468, + 625 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "t-patch parameters. The core parameters for t-patch extraction are the number of neighbors to extract $(k)$ and the number of points to subsample $(n)$ . Here there is a tradeoff between complexity and performance i.e. when $k$ and $n$ are small, the input to the model is small accordingly but the overall coverage is reduced and therefore performance is lower. We explored their influence on the noisy DFAUST dataset and report the results in Table 5. The results show that the method is fairly robust to the selection of these parameters, producing comparable results for all. The best performance was obtained for $n = 512$ , $k = 16$ . Surprisingly, the performance slightly degrades when increasing $k$ and $n$ beyond these values. This is likely due to the increase in model size, which easily overfits on a dataset of this size.", + "bbox": [ + 75, + 627, + 468, + 838 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Time and parameters. We report the time performance and the number of parameters of several baselines in Table 6. The results show the tradeoff between performance and time, i.e. the temporal approaches exhibit longer pro", + "bbox": [ + 75, + 839, + 468, + 901 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/89efa058769a3a1157dd65688b63d692ab9882082145e22cb7e6d14e451ad688.jpg", + "table_caption": [ + "Table 5. t-patch parameters ablation. Results for the number of neighboring points in a patch $k$ and number of downsampled points $n$ show that the method is robust." + ], + "table_footnote": [], + "table_body": "
MethodTime [ms]# parameters
PointNet [23]64.493.5M
PointNet++ [24]23.351.5M
PSTNet [10]185.928.3M
Ours t-patch extraction180.650
Ours feature computation12.509.8M
Ours classifier0.361.1M
Ours193.5110.9M
", + "bbox": [ + 501, + 88, + 893, + 220 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 6. Time and parameters. Temporal methods have more parameters and take longer. 3DinAction time is mostly used to extract t-patches.", + "bbox": [ + 496, + 231, + 893, + 273 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "cessing times and more parameters while performing better. For the proposed approach, we break down the timing of individual components, namely the t-patch extraction, feature computation, and classifier. The results show that the proposed approach is comparable to PSTNet in time while having more parameters. Interestingly, most of the time is used for extracting the t-patches and not for feature extraction or classification. This is attributed to the farthest point sampling and the sequential knn search, both of which could be further optimized for speed. Note that results are average of 50 runs, each with a batch of 4 and 1024 points per frame.", + "bbox": [ + 496, + 297, + 893, + 464 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations. Since the simplified formulation of t-patch construction uses $knn$ , it is sensitive to variations in point densities. A t-patch in a sparse region will occupy a larger volume than a t-patch in a dense region. We use FPS to mitigate this, however, other approaches can be used e.g., using neighbors in a fixed radius. Another limitation is data with a very low frame rate or very fast motion since this breaks the assumption that points in consecutive frames are close to each other, and will cause inconsistent t-patch motion.", + "bbox": [ + 496, + 464, + 893, + 602 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 612, + 627, + 628 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We introduced the 3DinAction pipeline, a novel method for 3D point cloud action recognition. It showed that the creation of temporal patches is beneficial for finding informative spatio-temporal point representations. 3DinAction has demonstrated a performance boost over SoTA methods.", + "bbox": [ + 496, + 637, + 890, + 712 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work opens many interesting future directions of research. These include trying to learn the t-patch construction instead of the knn selection, imposing stronger temporal structure based on preexisting knowledge and bias (e.g., sceneflow or tracking), and exploring using multimodal inputs with this representation (e.g., RGB or text).", + "bbox": [ + 496, + 713, + 890, + 804 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement. This project has received funding from the European Union's Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No 893465. We also thank the Microsoft for Azure Credits and NVIDIA Academic Hardware Grant Program for providing high-speed A5000 GPU.", + "bbox": [ + 496, + 810, + 893, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "19985", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Yizhak Ben-Shabat, Michael Lindenbaum, and Anath Fischer. 3DMFV: Three-dimensional point cloud classification in real-time using convolutional neural networks. RAL, 3:3145-3152, 2018. 2, 6", + "[2] Yizhak Ben-Shabat, Xin Yu, Fatemeh Saleh, Dylan Campbell, Cristian Rodriguez-Opazo, Hongdong Li, and Stephen Gould. The aka asm dataset: Understanding people assembling furniture through actions, objects and pose. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 847-859, 2021. 3, 5, 6, 7", + "[3] Federica Bogo, Javier Romero, Gerard Pons-Moll, and Michael J. Black. Dynamic FAUST: Registering human bodies in motion. In IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), July 2017. 3, 5", + "[4] Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 961-970, 2015. 5", + "[5] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 1, 5", + "[6] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3075-3084, 2019. 2", + "[7] Haodong Duan, Yue Zhao, Kai Chen, Dahua Lin, and Bo Dai. Revisiting skeleton-based action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2969-2978, 2022. 1, 3", + "[8] Hehe Fan, Yi Yang, and Mohan Kankanhalli. Point 4d transformer networks for spatio-temporal modeling in point cloud videos. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 14204-14213, 2021. 2, 7", + "[9] Hehe Fan, Yi Yang, and Mohan Kankanhalli. Point spatio-temporal transformer networks for point cloud video modeling. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(2):2181-2192, 2022. 2, 7", + "[10] Hehe Fan, Xin Yu, Yuhang Ding, Yi Yang, and Mohan Kankanhalli. Pistnet: Point spatio-temporal convolution on point cloud sequences. In International Conference on Learning Representations, 2021. 2, 5, 6, 7, 8" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[11] Hehe Fan, Xin Yu, Yi Yang, and Mohan Kankanhalli. Deep hierarchical representation of point cloud videos via spatio-temporal decomposition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(12):9918-9930, 2021. 2", + "[12] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6202-6211, 2019. 1", + "[13] Ankit Goyal, Hei Law, Bowei Liu, Alejandro Newell, and Jia Deng. Revisiting point cloud shape classification with a simple and effective baseline. In International Conference on Machine Learning, pages 3809-3820. PMLR, 2021. 2", + "[14] Yulan Guo, Hanyun Wang, Qingyong Hu, Hao Liu, Li Liu, and Mohammed Bennamoun. Deep learning for 3d point clouds: A survey. PAMI, 2020. 2", + "[15] Roman Klokov and Victor Lempitsky. Escape from cells: Deep kd-networks for the recognition of 3d point cloud models. In Proceedings of the IEEE international conference on computer vision, pages 863-872, 2017. 2", + "[16] Hema Swetha Koppula, Rudhir Gupta, and Ashutosh Saxena. Learning human activities and object affordances from rgb-d videos. The International Journal of Robotics Research, 32(8):951-970, 2013. 2", + "[17] Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In International conference on machine learning, pages 3744-3753. PMLR, 2019. 2", + "[18] Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In International conference on machine learning, pages 3744-3753. PMLR, 2019. 5, 6, 7", + "[19] Wanqing Li, Zhengyou Zhang, and Zicheng Liu. Action recognition based on a bag of 3d points. In 2010 IEEE computer society conference on computer vision and pattern recognition-workshops, pages 9-14. IEEE, 2010. 3, 5", + "[20] Jun Liu, Amir Shahroudy, Mauricio Perez, Gang Wang, Ling-Yu Duan, and Alex C Kot. Ntu rgb+ d 120: A large-scale benchmark for 3d human activity understanding. IEEE transactions on pattern analysis and machine intelligence, 42(10):2684-2701, 2019. 3", + "[21] Xingyu Liu, Mengyuan Yan, and Jeannette Bohg. Meteornet: Deep learning on dynamic 3d point cloud sequences. In Proceedings of the IEEE/CVF Interna" + ], + "bbox": [ + 501, + 90, + 893, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "19986", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "tional Conference on Computer Vision, pages 9246- 9255, 2019. 2", + "[22] Daniel Maturana and Sebastian Scherer. Voxnet: A 3d convolutional neural network for real-time object recognition. In 2015 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 922-928. IEEE, 2015. 2", + "[23] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pages 652-660, 2017. 2, 5, 6, 7, 8", + "[24] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In NeurIPS, volume 30, 2017. 2, 5, 6, 7, 8", + "[25] Guocheng Qian, Yuchen Li, Houwen Peng, Jinjie Mai, Hasan Abed Al Kader Hammoud, Mohamed Elhoseiny, and Bernard Ghanem. Pointnext: Revisiting pointnet++ with improved training and scaling strategies. arXiv preprint arXiv:2206.04670, 2022. 2", + "[26] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In The IEEE International Conference on Computer Vision (ICCV), Oct 2017. 1", + "[27] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision, pages 618-626, 2017. 5", + "[28] Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang. Ntu rgb+ d: A large scale dataset for 3d human activity analysis. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1010-1019, 2016. 3", + "[29] Yiru Shen, Chen Feng, Yaoqing Yang, and Dong Tian. Mining point cloud local structures by kernel correlation and graph pooling. In IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pages 4548-4557, 2018. 2", + "[30] Jaeyong Sung, Colin Ponce, Bart Selman, and Ashutosh Saxena. Unstructured human activity detection from rgbd images. In 2012 IEEE international conference on robotics and automation, pages 842-849. IEEE, 2012. 2", + "[31] Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3d convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 4489-4497, 2015. 1", + "[32] Haiyan Wang, Liang Yang, Xuejian Rong, Jinglun Feng, and Yingli Tian. Self-supervised 4d spatio" + ], + "bbox": [ + 78, + 90, + 470, + 901 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "temporal feature learning via order prediction of sequential point cloud clips. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3762-3771, 2021. 2", + "[33] Yancheng Wang, Yang Xiao, Fu Xiong, Wenxiang Jiang, Zhiguo Cao, Joey Tianyi Zhou, and Junsong Yuan. 3dv: 3d dynamic voxel for action recognition in depth video. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 511-520, 2020. 2", + "[34] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E Sarma, Michael M Bronstein, and Justin M Solomon. Dynamic graph cnn for learning on point clouds. Acm Transactions On Graphics (tog), 38:1-12, 2019. 2", + "[35] Siyuan Yang, Jun Liu, Shijian Lu, Meng Hwa Er, and Alex C Kot. Skeleton cloud colorization for unsupervised 3d action representation learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13423-13433, 2021. 2", + "[36] Cheng Zhang, Haocheng Wan, Shengqiang Liu, Xinyi Shen, and Zizhao Wu. Pvt: Point-voxel transformer for 3d deep learning. arXiv preprint arXiv:2108.06076, 2021. 2", + "[37] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip H.S. Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 16259-16268, October 2021. 2", + "[38] Jia-Xing Zhong, Kaichen Zhou, Qingyong Hu, Bing Wang, Niki Trigoni, and Andrew Markham. No pain, big gain: classify dynamic point cloud sequences with static models by fitting feature-level space-time surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8510-8520, 2022. 2, 7" + ], + "bbox": [ + 501, + 90, + 893, + 626 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "19987", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/5d416e0e-fbb0-491c-8e72-ffffff1eb68b_model.json b/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/5d416e0e-fbb0-491c-8e72-ffffff1eb68b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..0d8d2098d084733a8b94d5da03d1dc99f54b7e73 --- /dev/null +++ b/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/5d416e0e-fbb0-491c-8e72-ffffff1eb68b_model.json @@ -0,0 +1,1892 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.131, + 0.809, + 0.152 + ], + "angle": 0, + "content": "3DInAction: Understanding Human Actions in 3D Point Clouds" + }, + { + "type": "text", + "bbox": [ + 0.24, + 0.18, + 0.414, + 0.198 + ], + "angle": 0, + "content": "Yizhak Ben-Shabat1,2" + }, + { + "type": "text", + "bbox": [ + 0.455, + 0.181, + 0.562, + 0.198 + ], + "angle": 0, + "content": "Oren Shrout2" + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.181, + 0.729, + 0.199 + ], + "angle": 0, + "content": "Stephen Gould" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.216, + 0.429, + 0.235 + ], + "angle": 0, + "content": "\\(^{1}\\)Australian National University" + }, + { + "type": "text", + "bbox": [ + 0.469, + 0.216, + 0.793, + 0.235 + ], + "angle": 0, + "content": "\\(^{2}\\)Technion, Israel Institute of Technology" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.237, + 0.856, + 0.252 + ], + "angle": 0, + "content": "sitzikbs@technion.ac.il, shout.oren@campus.technion.ac.il, stephen.gould@anu.edu.au" + }, + { + "type": "text", + "bbox": [ + 0.316, + 0.255, + 0.663, + 0.269 + ], + "angle": 0, + "content": "https://github.com/sitzikbs/3dincaction" + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.295, + 0.195, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.298, + 0.353, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.354, + 0.298, + 0.49, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.298, + 0.622, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.629, + 0.299, + 0.78, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.79, + 0.301, + 0.872, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.461, + 0.894, + 0.504 + ], + "angle": 0, + "content": "Figure 1. t-patches for action recognition. We propose a new representation for dynamic 3D point clouds. Termed \\(t\\)-patches, these are locally evolving point cloud sets aggregated over time. Learning features over t-patches provides an improved temporal point cloud representation for action understanding." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.527, + 0.314, + 0.544 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.56, + 0.473, + 0.803 + ], + "angle": 0, + "content": "We propose a novel method for 3D point cloud action recognition. Understanding human actions in RGB videos has been widely studied in recent years, however, its 3D point cloud counterpart remains under-explored despite the clear value that 3D information may bring. This is mostly due to the inherent limitation of the point cloud data modality—lack of structure, permutation invariance, and varying number of points—which makes it difficult to learn a spatio-temporal representation. To address this limitation, we propose the 3DinAction pipeline that first estimates patches moving in time (t-patches) as a key building block, alongside a hierarchical architecture that learns an informative spatio-temporal representation. We show that our method achieves improved performance on existing datasets, including DFAUST and IKEA ASM. Code is publicly available at https://github.com/sitzikbs/3dincaction." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.831, + 0.21, + 0.847 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.903 + ], + "angle": 0, + "content": "In this paper, we address the task of action recognition from 3D point cloud sequences. We propose a novel pipeline wherein points are grouped into temporally evolv-" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.529, + 0.895, + 0.666 + ], + "angle": 0, + "content": "ing patches that capture discriminative action dynamics. Our work is motivated by the massive growth of online media, mobile and surveillance cameras that have enabled the computer vision community to develop many data-driven action-recognition methods [5, 12, 26, 31], most of which rely on RGB video data. Recently, commodity 3D sensors are gaining increased momentum, however, the 3D point cloud modality for action recognition has yet been underexploited due to the scarcity of 3D action-labeled data." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.67, + 0.895, + 0.837 + ], + "angle": 0, + "content": "In many cases, a pure RGB video-based inference may not be enough and incorporating other modalities like geometry is required. This is especially necessary for safety critical applications such as autonomous systems, where redundancy is crucial, or in scenarios where the video is heavily degraded (e.g., due to poor lighting). Some approaches incorporate geometrical information implicitly, e.g., through intermediate pose estimation [7]. This often entails extra steps that require more time and resources and is still limited to video input. Therefore a more explicit approach is desirable." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.895, + 0.903 + ], + "angle": 0, + "content": "3D sensors provide an alternative modality in the form of point clouds sampled on the environment. Despite the vast research on 3D vision and learning, even static 3D point cloud datasets are significantly smaller than their RGB im" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "19978" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.468, + 0.121 + ], + "angle": 0, + "content": "age counterparts due to difficulties in collecting and labeling. 3D point cloud sequence databases are even smaller," + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.122, + 0.468, + 0.226 + ], + "angle": 0, + "content": "making it more difficult to learn a meaningful 3D action representation. Furthermore, learning a point cloud representation still remains an active research field because point clouds are unstructured, unordered, and may contain a varying number of points. Learning a temporal point cloud representation is even more challenging since, unlike pixels, there is no one-to-one point correspondence through time." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.228, + 0.468, + 0.424 + ], + "angle": 0, + "content": "We address these challenges and propose the 3DinAction pipeline for 3D point cloud action recognition. In our pipeline, we first extract local temporal point patches (t-patches) that reflect a point region's motion in time, see Figure 1. We then learn a t-patch representation using a novel hierarchical architecture that incorporates spatial features in the temporal domain. We finally get an action prediction for each frame in a sequence by aggregating multiple t-patch representations. This pipeline overcomes the need for ground truth point temporal correspondence, grid structure, point order, and a fixed number of points in each frame. Intuitively, patches reflect local surface deformation and are more robust to point correspondence errors." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.425, + 0.468, + 0.499 + ], + "angle": 0, + "content": "We conduct extended experiments to evaluate the performance of our approach compared to existing SoTA methods and show that 3DinAction provides significant performance gains of \\(13\\%\\) and \\(7\\%\\) in accuracy on DFAUST and IKEA ASM, respectively." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.5, + 0.424, + 0.514 + ], + "angle": 0, + "content": "The key contributions of our work are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.515, + 0.468, + 0.543 + ], + "angle": 0, + "content": "- A novel representation for dynamically evolving local point cloud sets termed t-patches." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.545, + 0.468, + 0.588 + ], + "angle": 0, + "content": "- A hierarchical architecture that produces an informative spatio-temporal representation for sequences of point clouds." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.515, + 0.468, + 0.588 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.604, + 0.218, + 0.619 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.629, + 0.468, + 0.885 + ], + "angle": 0, + "content": "Learning 3D point cloud representations. Point clouds pose a challenge for neural networks due to their unstructured and point-wise unordered nature. To address these challenges, several approaches have been proposed. PointNet [23, 24] uses permutation-invariant operators, such as pointwise MLPs and pooling layers, to aggregate features across a point set. Some approaches construct a graph from the point set. DGCNN [34] applies message passing and performs graph convolutions on kNN graphs, KCNet [29] uses kernel correlation and graph pooling, and Kd-Networks [15] apply multiplicative transformations and share the parameters based on the subdivisions imposed by kd-trees. Alternatively, the structure can be imposed using a grid of voxels [22, 36], or a grid of Gaussians in 3DmFVNet [1]. Another alternative avoids the structure by using Transformer's attention mechanism [17, 37]. For a comprehensive survey of point cloud architectures please see [14]." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.887, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Recently, various factors that can impact the training of" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.182 + ], + "angle": 0, + "content": "different architectures have been investigated [13, 25]. This includes exploring data augmentation strategies and loss functions that are not specific to a particular architecture. The results of this study showed that older PointNet-based architectures [23, 24] can perform comparably to newer architectures with minor changes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.185, + 0.892, + 0.274 + ], + "angle": 0, + "content": "All of the above methods deal with static, single-frame, or single-shape point clouds. In this work, the input is a temporal point cloud where a representation for a short sequence is required and point correspondence between frames is unknown. Therefore extending existing approaches is not trivial." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.278, + 0.892, + 0.653 + ], + "angle": 0, + "content": "Learning temporal 3D point cloud representations. Temporal point clouds have not been as extensively studied as their static counterparts, in particular for action recognition. Meteornet [21] processes 4D points using a PointNet++ architecture where they appended a temporal dimension to the spatial coordinates. PSTNet [10, 11] proposed spatio-temporal convolutions and utilized some of the temporal consistency for action recognition. Similarly, P4Transformer [8] uses 4D convolutions and a transformer for capturing appearance and motion via self-attention. In a follow-up work PST-Transformer [9] employs a video level of self-attention in search for similar points across entire videos and so encodes spatio-temporal structure. Some works attempt to alleviate the full supervision requirement for 3D action recognition. These include self-supervised features learning [32] by predicting temporal order from a large unlabeled dataset and fine-tuning on a smaller annotated datasets and unsupervised skeleton colorization [35]. Additional supervised approaches include MinkowskiNet [6] that uses a 4D spatio-temporal CNN after converting the point clouds to an occupancy grid, 3DV [33] that encodes 3D motion information from depth videos into a compact voxel set, and Kinet [38] that implicitly encoded feature level dynamics in feature space by unrolling the normal solver of ST-surfaces." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.657, + 0.892, + 0.747 + ], + "angle": 0, + "content": "The above methods, perform a single classification per clip. In this paper, we focus on a related, and more chllang-ing, task that requires a prediction per-frame. We propose to convert the point cloud representation into t-patches and use an MLP based hierarchical architecture to get the spatiotemporal representation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.9 + ], + "angle": 0, + "content": "3D action understanding datasets. One of the major driving forces behind the success of learning-based approaches is the availability of annotated data. For the task of 3D point cloud action recognition, there is currently no designated standard dataset, however, some existing datasets may be extended. The CAD 60 and CAD 120 [16, 30] datasets include 60 and 120 long-term activity videos of 12 and 10 classes respectively (e.g., making cereal, microwave food). These datasets provide raw RGB, skeletons, and depth data however its small scale and long-term focus limit its effec" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19979" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.473, + 0.318 + ], + "angle": 0, + "content": "tiveness. The NTU RGB+D 60 [28] and NTU RGB+D 120 [20] provide \\(\\sim 56\\mathrm{K}\\) and \\(\\sim 114\\mathrm{K}\\) clips containing 60 and 120 actions classes respectively, e.g., taking off a jacket, taking a selfie. They provide three different simultaneous RGB views, IR and depth streams as well as 3D skeletons. While these datasets can be considered large-scale, their contrived nature makes recent skeleton-based methods (e.g., [7]) perform well, making a prior-free approach difficult to justify. The MSR-Action3D dataset [19] includes 20 action classes performed by 10 subjects for a total of 567 depth map sequences, collected using a Kinect v1 device (23K frames). The sequences in this dataset are very short and therefore using it to evaluate learning-based approaches provides a limited indication of generalization. The above datasets provide per clip action annotations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.322, + 0.473, + 0.715 + ], + "angle": 0, + "content": "Some datasets inherently provide per-frame annotations. The IKEA ASM dataset [2] provides 371 videos clipped into 31K clips. It contains 33 action classes related to furniture assembly, annotated per frame. This dataset provides several modalities including three RGB views, and Depth. It is an extremely challenging dataset since the human assembler is often occluded and presents very unique assembly poses. It is also very imbalanced since different assembly actions have different duration and may repeat multiple times within the same assembly. Although it was designed for video action recognition, its challenges are the core reasons for choosing to extend it to the point cloud action recognition task. The DFAUST dataset [3] provides high-resolution 4D scans of human subjects in motion. It includes 14 action categories with over 100 dynamic scans of 10 subjects (1:1 male-to-female ratio) with varying body shapes represented as registrations of aligned meshes, therefore an extension to our task is straightforward. One particularly important feature of this dataset is the GT point correspondences throughout the sequence i.e. it is possible to follow each point's movement through time. While this dataset is not as large-scale as others, it provides ground truth information (correspondence) that most other collected datasets do not. Therefore, we extend this dataset to 3D point cloud action recognition and use it as a testbed for many ablation studies (see Section 4.4)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.737, + 0.27, + 0.755 + ], + "angle": 0, + "content": "3. 3DinAction pipeline" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Our 3DinAction pipeline is illustrated in Figure 2. Given a temporal sequence of 3D point clouds we first extract a set of t-patches (Section 3.1). We then feed the t-patches into a hierarchical neural network (Section 3.2) to produce a per-frame high dimensional feature vector representation. Finally, the feature vectors are fed into a classifier to obtain per-frame predictions. The proposed approach is prior-free (no skeleton extraction required) and therefore general and can be used on different action-understanding datasets." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.608, + 0.108 + ], + "angle": 0, + "content": "3.1. t-patches" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.113, + 0.892, + 0.19 + ], + "angle": 0, + "content": "Let \\( S = \\{x_{j} \\in \\mathbb{R}^{3} \\mid j = 1, \\dots, N\\} \\) denote a 3D point cloud with \\( N \\) points. In the classic (static) setting, a patch \\( \\Psi_{q} \\) is extracted around some query point \\( x_{q} \\). For example, the patch \\( \\Psi_{q} \\) may be constructed by finding the \\( k \\)-nearest neighbors of \\( x_{q} \\) in \\( S \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.19, + 0.892, + 0.282 + ], + "angle": 0, + "content": "In our temporal setting we are given a sequence of point clouds \\( S = \\{S^0, \\dots, S^T\\} \\) composed of point cloud frames \\( S^t = \\{x_j^t \\mid j = 1, \\dots, N^t\\} \\). Here the superscript \\( t \\) is used to denote the index of the point cloud in the sequence. Instead of extracting a patch within a single frame, we allow patches to extend temporally, and denote them as \\( t \\)-patches." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.295, + 0.892, + 0.387 + ], + "angle": 0, + "content": "Definition 3.1 A t-patch \\( P_{q} \\) is a sequence of point sets indexed by a query point \\( x_{q}^{0} \\) and jointly moving in time defined by a pointwise mapping function between patches in consecutive frames. Mathematically, \\( P_{q} = \\langle \\Psi_{q}^{t}\\rangle_{t = 0}^{T} \\) where \\( \\Psi_{q}^{0} \\) is the initial (static) patch and \\( \\Psi_{q}^{t} = \\Phi (\\Psi_{q}^{t - 1}) \\) is the patch at time \\( t \\) where \\( \\Phi \\) is a pointwise mapping function." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.399, + 0.892, + 0.553 + ], + "angle": 0, + "content": "In practice, it is difficult to find a reliable mapping function \\(\\Phi\\). Therefore we propose a simplified formulation that, for a given query point \\(x_{q}^{0}\\), first extracts a patch for the first frame \\(\\Psi_{q}^{0}\\) and then iteratively extracts corresponding patches for the next frames (iterating over time), by using the closest point in the next frame as the new query point. More formally, we specify \\(\\vec{\\Psi}_{q}^{0} \\triangleq \\Psi_{q}^{0}\\), \\(\\vec{\\Psi}_{q}^{t} = knn(x_{q}^{t-1}, S^{t})\\) and \\(x_{q}^{t} = n n(x_{q}^{t-1}, S^{t})\\) for \\(t = 1, \\ldots, T\\). Here \\(knn\\) is the \\(k\\) nearest neighbor and \\(n n\\) is nearest neighbor. Then, the simplified t-patch formulation is given by" + }, + { + "type": "equation", + "bbox": [ + 0.604, + 0.563, + 0.892, + 0.587 + ], + "angle": 0, + "content": "\\[\n\\vec {P} _ {q} = \\left\\langle \\vec {\\Psi} _ {q} ^ {t} \\mid t = 0, \\dots , T \\right\\rangle \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.597, + 0.892, + 0.656 + ], + "angle": 0, + "content": "See Figure 3 left for an illustration of the t-patch extraction process. Note that if ground truth correspondence is available \\( knn \\) can be swapped back to \\( \\Phi \\). However, this does not guarantee improved performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.657, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Temporal t-patch collapse. The simplified formulation of extracting t-patches inherently suffers from the problem of two or more t-patches collapsing into having the same points after a certain frame. We call this scenario t-patch temporal collapse. Temporal collapse can happen whenever \\( x_{q}^{t} = x_{p}^{t} \\) for \\( x_{q}^{0} \\neq x_{p}^{0} \\). The main issue with temporal collapse is the reduction in point coverage as time progresses, i.e. the patches covering the last point cloud have significant overlaps and therefore include fewer points than the first frame and so missing vital data. An illustration of the t-patch collapse problem is available in Figure 3 (right). To mitigate this issue, we propose two solutions. First, adding small noise to each iteration's query points, i.e. \\( \\overrightarrow{\\Psi}_{q}^{t} = knn(x_{q}^{t} + \\epsilon, S^{t+1}) \\) where \\( \\epsilon \\sim \\mathcal{N}(\\mu, \\sigma^{2}) \\) is a small Gaussian noise. Second, we propose to construct t-patches from the first to last frame but also in reverse, initializing" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19980" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.111, + 0.882, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.419, + 0.893, + 0.464 + ], + "angle": 0, + "content": "Figure 2. 3DinAction pipeline. Given a sequence of point clouds, a set of t-patches is extracted. The t-patches are fed into a neural network to output an embedding vector. This is done hierarchically until finally the global t-patch vectors are pooled to get a per-frame point cloud embedding which is then fed into a classifier to output an action prediction per frame." + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.49, + 0.447, + 0.611 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.631, + 0.47, + 0.716 + ], + "angle": 0, + "content": "Figure 3. t-patch construction and collapse. Illustration of t-patch construction (left) and collapse (right). Starting from an origin point \\( x_{q}^{0} \\) we find the nearest neighbours in the next frame iteratively to construct the t-patch subset (non-black points). A collapse happens when two different origin points, \\( x_{q}^{0} \\) and \\( x_{p}^{0} \\), have the same nearest neighbour at some time step, \\( \\Psi_{p}^{3} = \\Psi_{q}^{3} \\) here." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.741, + 0.47, + 0.788 + ], + "angle": 0, + "content": "with \\(\\Psi_q^0\\) and \\(\\Psi_q^T\\), respectively. We name this variation bidirectional t-patches. More formally bidirectional t-patches are given by," + }, + { + "type": "equation", + "bbox": [ + 0.17, + 0.81, + 0.469, + 0.853 + ], + "angle": 0, + "content": "\\[\n\\stackrel {\\leftrightarrow} {P} = \\left(\\bigcup_ {q} \\vec {P} _ {q}\\right) \\cup \\left(\\bigcup_ {p} \\overleftarrow {P} _ {p}\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.864, + 0.47, + 0.904 + ], + "angle": 0, + "content": "where \\(\\overleftarrow{P}_p\\) is defined similarly to \\(\\vec{P}_q\\) but in the reverse direction, i.e., \\(\\overleftarrow{\\Psi}_p^T \\triangleq \\Psi_p^T\\) and \\(\\overleftarrow{\\Psi}_p^t = knn(x_p^{t+1}, S^t)\\) for" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.488, + 0.892, + 0.52 + ], + "angle": 0, + "content": "\\(t = T - 1,\\ldots ,0\\) . Here, the final set of t-patches is composed of an equal number of t-patches from both directions." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.529, + 0.733, + 0.544 + ], + "angle": 0, + "content": "3.2. Hierarchical architecture" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.553, + 0.892, + 0.901 + ], + "angle": 0, + "content": "The proposed architecture is composed of \\(l\\) consecutive t-patch modules. Each module receives a point cloud sequence \\(S\\) as input. The sequence is fed into a t-patch extractor where it undergoes subsampling and t-patch extraction, forming \\(\\tilde{S}_l\\) and \\(P^l\\) respectively. Then, the t-patches are fed into t-patch Net, a network that computes a high-dimensional feature vector \\(f_{l}\\) for each t-patch, parametrized by \\(\\theta_l\\). The subsampled sequence \\(\\tilde{S}_l\\) and its corresponding t-patch features \\(f_{l}\\) are then fed into the next t-patch module. These modules form a hierarchy in the sense that each module receives as input a sparser point cloud with a higher dimensional feature vector representing each point (encoding both spatial and temporal information). Note that both the t-patch points and their features are fed into t-patch Net. t-patch extractor. We first subsample the first frame in the point cloud sequence \\(S^0\\) using farthest point sampling (FPS) to form a set of \\(M\\) query points \\(\\tilde{S}^0 = \\{x_j^0\\in FPS(S^0,M)\\}\\). The set \\(\\tilde{S}^0\\) is used to form the t-patches. Subsampling is required since computing a t-patch for each point is inefficient and unnecessary due to overlaps. After subsampling, we extract \\(M\\) t-patches using Equation 2 where \\(q\\in \\tilde{S}^0\\). The extractor operates on both 3D points and their corresponding features (for mod" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "19981" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.273, + 0.106 + ], + "angle": 0, + "content": "ules deeper in the hierarchy)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.107, + 0.473, + 0.381 + ], + "angle": 0, + "content": "Model architecture and t-patch net. The t-patch network computes a high dimensional representation for each t-patch. The t-patch Net architecture is composed of several MLP layers operating on the non-temporal dimensions (sharing weights across points) followed by a convolutional layer operating on both the temporal and feature dimensions. Note that the network weights are also shared across t-patches. The output of each t-patch Net is a vector for each frame. The final frame representation is obtained by aggregating all of the t-patch features using a max pooling operation i.e. \\( \\text{maxpool}_{M_l}(f_3) \\). This representation is then fed into a classifier consisting of three fully connected layers with temporal smoothing and softmax to output the final action prediction. To train the network we use the same losses of RGB based approaches [2, 5] which include a per-frame prediction cross entropy loss and a per-sequence prediction cross entropy loss (summed and weighted evenly) \\( L_{total} = L_{frame} + L_{seq} \\). For full details see supplemental." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.392, + 0.21, + 0.409 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.418, + 0.47, + 0.568 + ], + "angle": 0, + "content": "We evaluate the performance of our approach on three datasets. The results show that the 3DinAction pipeline outperforms all baselines in DFAUST [3] and IKEA ASM [2] and is comparable in MSR-Action 3D [19]. We then conduct an ablation study for selecting parameters and t-patch extraction method showing that adding jitter and bidirectional t-patches is beneficial. Finally, we report time performance and show the tradeoff between performance and inference time. For more results and experiments, see supplemental material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.569, + 0.47, + 0.75 + ], + "angle": 0, + "content": "Baselines and evaluation metrics. For evaluation, we report several standard metrics [4]: the top1 and top3 framework accuracy are the de facto standard for action classification. We compute it by summing the number of correctly classified frames and dividing by the total number of frames in each video and then averaging over all videos in the test set. Additionally, since some of the datasets are imbalanced and may contain different actions for each frame in a clip, we also report the macro-recall by separately computing recall for each category and then averaging (macro). Finally, we report the mean average precision (mAP) since all untrimmed videos contain multiple action labels." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.471, + 0.902 + ], + "angle": 0, + "content": "For DFAUST and IKEA ASM we report static methods PointNet [23], \\(\\mathrm{PointNet}^{++}\\) [24], and Set Transformer [18] by applying them on each point cloud frame individually. Additionally, we report temporal methods like PSTNet [10] and also implemented a temporal smoothing version of each static method (PoinNet+TS, \\(\\mathrm{Pointnet}^{++} + \\mathrm{TS}\\), and Set Transformer+TS respectively) by learning the weights of a convolutional layer over the temporal dimension. Temporal smoothing aims to provide a naive baseline for utilizing temporal information in addition to spatial information." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.138 + ], + "angle": 0, + "content": "Note that in all experiments, unless otherwise specified, our method uses the simplified formulation with jitter and bidirectional t-patches." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.147, + 0.791, + 0.164 + ], + "angle": 0, + "content": "4.1. Experiments on DFAUST dataset" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.171, + 0.892, + 0.216 + ], + "angle": 0, + "content": "We extend the DFAUST dataset for the task of action recognition and show that the proposed approach outperforms other methods (see Table 1)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.217, + 0.893, + 0.457 + ], + "angle": 0, + "content": "DFAUST dataset [3]. We extended the DFAUST dataset to our task by subdividing it into clips of 64 frames with train and test human subjects. The split was constructed so no subject will appear in both training and test set as well as guarantee that all actions appear in both. The train and test sets contain 76 full-length sequences (395 clips, and \\(\\sim 25\\mathrm{K}\\) frames) and 53 sequences (313 clips, and \\(\\sim 20\\mathrm{K}\\) frames) respectively. Each point cloud frame contains 6890 points. These points are mesh vertices and therefore the density varies greatly (e.g., very dense on the face, hands, and feet and sparser on the legs). For all baselines, we sampled a set of 1024 points using the farthest point sampling algorithm to provide a more uniform set of points. For this dataset, all frames in a clip have the same label. Note that not all actions are performed by all subjects. For the full action list and dataset statistics, see the supplemental." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.459, + 0.893, + 0.684 + ], + "angle": 0, + "content": "Results. The results, reported in Table 1, show that our proposed approach outperforms all baselines by a large margin. It also shows that temporal smoothing boosts performance significantly for all static baselines. Additionally, to explore the influence of our simplified knn-based temporal point mapping, we used the GT point correspondence to match the consecutive t-patch origin points and report the results as another baseline (Ours + GT corr). The results show that there is a mAP performance gain with GT correspondence, however, it is limited. Note that in most datasets, this GT correspondence is not available. Finally, we also experimented with a Transformer architecture to process the t-patch learned representations and show that it does not provide additional performance boost. This may be attributed to the dataset size." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.686, + 0.892, + 0.822 + ], + "angle": 0, + "content": "Insight. We extended the GradCam [27] approach for our 3DinAction pipeline. Using this approach we get a score per point in each t-patch proportional to its influence on classifying the frame to a given target class. The results in Figure 4 show that, as expected, our approach learns meaningful representations since the most prominent regions are the ones with the informative motion. For example, in the Jumping jacks action (top row) the hands are most prominent as they are making a large and distinct motion." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.832, + 0.813, + 0.848 + ], + "angle": 0, + "content": "4.2. Experiments on IKEA ASM dataset" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "IKEA ASM dataset [2]. This dataset consists of 371 videos (3M frames) of people assembling IKEA furniture in different indoor environments. It was collected using a" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19982" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.087, + 0.089, + 0.462, + 0.297 + ], + "angle": 0, + "content": "
MethodFrame acc.
top 1top 3mAP
3DmFVNet [1]60.8687.680.7171
PointNet [23]65.6786.440.7161
PointNet++ [24]58.5188.280.5842
Set Transformer [18]52.2781.980.6209
PoinNet [23] + TS74.1094.000.7863
PointNet++ [24] + TS67.8886.210.7563
Set Transformer [18] + TS62.9590.330.7322
PSTNet [10]50.7078.280.6490
Ours + GT corr77.6795.380.8762
Ours + Transformer77.0993.777.49
Ours87.2699.260.8616
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.307, + 0.47, + 0.351 + ], + "angle": 0, + "content": "Table 1. Action recognition results on DFAUST. Reporting frame-wise accuracy and mean average precision. Ours outperforms all baselines by a large margin." + }, + { + "type": "image", + "bbox": [ + 0.102, + 0.368, + 0.455, + 0.721 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.73, + 0.47, + 0.813 + ], + "angle": 0, + "content": "Figure 4. 3DinAction GradCAM scores. The proposed 3DinAction pipeline learns meaningful representations for prominent regions. The presented actions are jumping jacks (top row), hips (middle row), and knees (bottom row). The columns represent progressing time steps from left to right. Colormap indicates high GradCAM scores in red and low scores in blue." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Kinect V2 camera and provides camera parameters to reconstruct point clouds in camera coordinates. It provides action annotation for each frame (33 classes). It is a highly challenging dataset for two main reasons: (1) It is highly" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.272 + ], + "angle": 0, + "content": "imbalanced since some actions have a long duration and occur multiple times in each video (e.g., spin leg) and some are shorter and sparser (flip tabletop). (2) The assembly motion includes a lot of self-occlusion as well as subtle movements. The train/test split consists of 254 and 117 full sequences respectively. The split is environment-based (i.e. in the test set there is no environment that appeared in the training set). The assembly videos have an average of \\(\\sim\\) 2735 frames per video. The point clouds provided in this dataset are aligned to the camera coordinate frame, posing a challenge for methods that are sensitive to rotations since the camera moves between different scans." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.274, + 0.893, + 0.53 + ], + "angle": 0, + "content": "Results. The results on the IKEA ASM dataset are reported in Table 2. The results show that the proposed 3DinAction pipeline provides a significant performance boost over static approaches and their temporally smooth variants. Additionally, as expected, PointNet and Set Transformer are heavily affected by the variations in coordinate frames. \\(\\mathrm{PointNet}^{++}\\) on the other hand performs better since it uses local coordinate frames for each local region. All methods show an improved mAP when using the temporally smooth variant with degradation in frame-wise accuracy due to the dataset imbalance. For this dataset, the top1 metric is not always indicative of the quality of performance because a high top1 is directly correlated with many frames classified as the most common class. Additionally, we compare to pose-based methods reported in [2] and show that the proposed approach also outperforms these baselines. See supplementary material for confusion matrices." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.532, + 0.895, + 0.744 + ], + "angle": 0, + "content": "t-patch intuition and visualization. In Figure 5 we visualize the t-patches for the flip table action in the TV Bench assembly. A set of selected t-patches are highlighted in color demonstrating different types of t-patches and their spatiotemporal changes. The blue is on the moving TV Bench assembly, it moves rigidly with the assembly. The maroon is on the moving person's arm, it exhibits nonrigid motion and deformations through time. The teal is on the static table surface containing some of the TV Bench's points in the first frame but remains static when it moves since its origin query point is on the table. The green is on the static carpet, remaining approximately the same through time. Note that the RGB images are for visualization purposes and are not used in our pipeline." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.756, + 0.842, + 0.772 + ], + "angle": 0, + "content": "4.3. Experiments on MSR-Action3D dataset" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "For this dataset, the task is to predict a single class for a sequence of frames (unlike the other datasets where a per-frame prediction is required). To that end, we replace our classifier with a single fully connected layer and max pooled the results over the temporal domain (similar to [10]). The results, reported in Table 3, show that all SoTA methods, including the proposed approach, exhibit very similar performance. This is mainly attributed to the small scale of the" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19983" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.088, + 0.287, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.289, + 0.088, + 0.484, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.486, + 0.088, + 0.683, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.685, + 0.089, + 0.882, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.312, + 0.895, + 0.357 + ], + "angle": 0, + "content": "Figure 5. IKEA ASM example with t-patches. The flip table action for the TV Bench assembly is visualization including the RGB image (top), and a grayscale 3D point cloud with t-patches (bottom). t-patches are highlighted in color. The blue is on the moving TV Bench assembly, maroon is on the moving persons arm, teal is on the static table surface, and green is on the colorful static carpet." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.379, + 0.47, + 0.589 + ], + "angle": 0, + "content": "
MethodFrame acc.
top 1top 3macromAP
PointNet [23]4.2019.865.760.0346
PointNet++ [24]45.9770.1029.480.1187
Set Transformer [18]14.9657.1213.160.0299
PointNet [23] + TS6.0019.485.140.0804
PointNet++ [24] +TS27.8460.6427.720.2024
Set Transformer [18] + TS9.5436.5010.740.1471
PSTNet [10]17.9452.2417.140.2016
Human Pose HCN [2]39.1565.3728.180.2232
Human Pose ST-GCN [2]43.466.2926.540.1856
Ours without BD45.1672.8335.060.2932
Ours52.9175.0338.840.2875
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.597, + 0.471, + 0.642 + ], + "angle": 0, + "content": "Table 2. Action classification on IKEA ASM. The proposed approach provides a significant performance boost over other static and dynamic approaches, including the temporal smoothing (TS)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.668, + 0.47, + 0.745 + ], + "angle": 0, + "content": "dataset and the lack of diversity in the action classes. Furthermore, we witnessed that the main performance gap is for frames and sequences where the action is indistinguishable (e.g., first few frames of a sequence where no distinguishable action commenced)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.756, + 0.228, + 0.772 + ], + "angle": 0, + "content": "4.4. Ablation study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.471, + 0.903 + ], + "angle": 0, + "content": "t-patch extraction. We studied the t-patch extraction method and its effects on action recognition on a noisy version of the DFAUST dataset. The results reported in Table 4, show the significance of the t-patch collapse problem and the effectiveness of adding small jitter and bidirectional t-patches to overcome it. In the DFAUST dataset, finding the nearest neighbor between frames provides a \\(\\sim 96\\%\\) correspondence accuracy (small motion between frames)." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.379, + 0.905, + 0.496 + ], + "angle": 0, + "content": "
Method#frames
48121624
PSTNet [10]81.1483.5087.8889.9091.20
P4Transformer [8]80.1383.1787.5489.5690.94
PST-Transformer [9]81.1483.9788.1591.9893.73
Kinet [38]79.8083.8488.5391.9293.27
Ours80.4786.2088.2290.5792.23
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.506, + 0.895, + 0.549 + ], + "angle": 0, + "content": "Table 3. MSR-Action3D classification results. Reporting classification accuracy for clips of different lengths. Results show that all methods are comparable since this dataset's scale is limited." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.581, + 0.892, + 0.657 + ], + "angle": 0, + "content": "Therefore, in this experiment, we augment the dataset once by adding small Gaussian noise to each point in the dataset \\((\\sigma = 0.01)\\), decreasing the correspondence accuracy to \\(\\sim 62.4\\%\\) and introducing multiple t-patch collapse instances as well as increasing the classification difficulty." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Several variants of the t-patch extraction were explored. The first variation (GT) incorporates the ground truth correspondence into the t-patch extraction. Using this method, there is no t-patch collapse since there is a one-to-one mapping between frames. We expected this to produce an upper bound on the performance, however, surprisingly the results show that this variation is actually inferior to the proposed t-patch approach. We attribute this to the proposed t-patch extraction inherent augmentation caused by the downsampling and nearest neighbor point jitter. We then continue to explore the proposed approaches for dealing with t-patch collapse which include jitter, i.e. adding small noise to each point before finding its nearest neighbor in the next frame, and the bidirectional t-patches that extract patches both from the first to the last frame and from the last to the first frame. The results show that adding jitter is al" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "19984" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.086, + 0.089, + 0.462, + 0.251 + ], + "angle": 0, + "content": "
Frame acc.
DataGTJitterBDtop 1top 3mAP
clean77.6795.380.8762
74.7392.140.8097
80.4996.610.9023
87.2699.260.8616
noisy76.0895.500.9013
66.7493.760.7626
81.8398.970.9220
80.0397.570.8975
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.261, + 0.472, + 0.306 + ], + "angle": 0, + "content": "Table 4. t-patch collapse ablation on DFAUST. Exploring adding (1) GT - ground truth correspondences, (2) jitter - small Gaussian noise in t-patch construction, and (3) BD - bidirectional t-patches." + }, + { + "type": "table", + "bbox": [ + 0.14, + 0.321, + 0.408, + 0.438 + ], + "angle": 0, + "content": "
Frame acc.
nktop 1top 3mAP
2561676.9697.540.8430
5121680.0397.570.8975
10241677.3097.880.8507
512876.8796.210.7557
5123277.9196.600.7453
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.448, + 0.472, + 0.492 + ], + "angle": 0, + "content": "Table 5. t-patch parameters ablation. Results for the number of neighboring points in a patch \\( k \\) and number of downsampled points \\( n \\) show that the method is robust." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.52, + 0.47, + 0.626 + ], + "angle": 0, + "content": "ways beneficial and provides a boost in performance. The bidirectional t-patches improve accuracy performance significantly when the data is clean and are comparable when the data is noisy. Note that in both dataset variations, the degradation due to temporal t-patch collapse is low compared to Kinect-based scan data, therefore the bidirectional benefits are not fully utilized." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.628, + 0.47, + 0.839 + ], + "angle": 0, + "content": "t-patch parameters. The core parameters for t-patch extraction are the number of neighbors to extract \\((k)\\) and the number of points to subsample \\((n)\\). Here there is a tradeoff between complexity and performance i.e. when \\(k\\) and \\(n\\) are small, the input to the model is small accordingly but the overall coverage is reduced and therefore performance is lower. We explored their influence on the noisy DFAUST dataset and report the results in Table 5. The results show that the method is fairly robust to the selection of these parameters, producing comparable results for all. The best performance was obtained for \\(n = 512\\), \\(k = 16\\). Surprisingly, the performance slightly degrades when increasing \\(k\\) and \\(n\\) beyond these values. This is likely due to the increase in model size, which easily overfits on a dataset of this size." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Time and parameters. We report the time performance and the number of parameters of several baselines in Table 6. The results show the tradeoff between performance and time, i.e. the temporal approaches exhibit longer pro" + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.089, + 0.895, + 0.221 + ], + "angle": 0, + "content": "
MethodTime [ms]# parameters
PointNet [23]64.493.5M
PointNet++ [24]23.351.5M
PSTNet [10]185.928.3M
Ours t-patch extraction180.650
Ours feature computation12.509.8M
Ours classifier0.361.1M
Ours193.5110.9M
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.232, + 0.895, + 0.275 + ], + "angle": 0, + "content": "Table 6. Time and parameters. Temporal methods have more parameters and take longer. 3DinAction time is mostly used to extract t-patches." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.298, + 0.895, + 0.465 + ], + "angle": 0, + "content": "cessing times and more parameters while performing better. For the proposed approach, we break down the timing of individual components, namely the t-patch extraction, feature computation, and classifier. The results show that the proposed approach is comparable to PSTNet in time while having more parameters. Interestingly, most of the time is used for extracting the t-patches and not for feature extraction or classification. This is attributed to the farthest point sampling and the sequential knn search, both of which could be further optimized for speed. Note that results are average of 50 runs, each with a batch of 4 and 1024 points per frame." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.465, + 0.895, + 0.603 + ], + "angle": 0, + "content": "Limitations. Since the simplified formulation of t-patch construction uses \\( knn \\), it is sensitive to variations in point densities. A t-patch in a sparse region will occupy a larger volume than a t-patch in a dense region. We use FPS to mitigate this, however, other approaches can be used e.g., using neighbors in a fixed radius. Another limitation is data with a very low frame rate or very fast motion since this breaks the assumption that points in consecutive frames are close to each other, and will cause inconsistent t-patch motion." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.613, + 0.628, + 0.629 + ], + "angle": 0, + "content": "5. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.638, + 0.892, + 0.713 + ], + "angle": 0, + "content": "We introduced the 3DinAction pipeline, a novel method for 3D point cloud action recognition. It showed that the creation of temporal patches is beneficial for finding informative spatio-temporal point representations. 3DinAction has demonstrated a performance boost over SoTA methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.714, + 0.892, + 0.805 + ], + "angle": 0, + "content": "This work opens many interesting future directions of research. These include trying to learn the t-patch construction instead of the knn selection, imposing stronger temporal structure based on preexisting knowledge and bias (e.g., sceneflow or tracking), and exploring using multimodal inputs with this representation (e.g., RGB or text)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Acknowledgement. This project has received funding from the European Union's Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No 893465. We also thank the Microsoft for Azure Credits and NVIDIA Academic Hardware Grant Program for providing high-speed A5000 GPU." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19985" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.116, + 0.47, + 0.175 + ], + "angle": 0, + "content": "[1] Yizhak Ben-Shabat, Michael Lindenbaum, and Anath Fischer. 3DMFV: Three-dimensional point cloud classification in real-time using convolutional neural networks. RAL, 3:3145-3152, 2018. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.177, + 0.472, + 0.281 + ], + "angle": 0, + "content": "[2] Yizhak Ben-Shabat, Xin Yu, Fatemeh Saleh, Dylan Campbell, Cristian Rodriguez-Opazo, Hongdong Li, and Stephen Gould. The aka asm dataset: Understanding people assembling furniture through actions, objects and pose. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 847-859, 2021. 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.283, + 0.471, + 0.355 + ], + "angle": 0, + "content": "[3] Federica Bogo, Javier Romero, Gerard Pons-Moll, and Michael J. Black. Dynamic FAUST: Registering human bodies in motion. In IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), July 2017. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.358, + 0.472, + 0.447 + ], + "angle": 0, + "content": "[4] Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 961-970, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.449, + 0.471, + 0.523 + ], + "angle": 0, + "content": "[5] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 1, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.525, + 0.471, + 0.599 + ], + "angle": 0, + "content": "[6] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3075-3084, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.601, + 0.471, + 0.674 + ], + "angle": 0, + "content": "[7] Haodong Duan, Yue Zhao, Kai Chen, Dahua Lin, and Bo Dai. Revisiting skeleton-based action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2969-2978, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.676, + 0.471, + 0.749 + ], + "angle": 0, + "content": "[8] Hehe Fan, Yi Yang, and Mohan Kankanhalli. Point 4d transformer networks for spatio-temporal modeling in point cloud videos. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 14204-14213, 2021. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.751, + 0.47, + 0.825 + ], + "angle": 0, + "content": "[9] Hehe Fan, Yi Yang, and Mohan Kankanhalli. Point spatio-temporal transformer networks for point cloud video modeling. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(2):2181-2192, 2022. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.826, + 0.47, + 0.9 + ], + "angle": 0, + "content": "[10] Hehe Fan, Xin Yu, Yuhang Ding, Yi Yang, and Mohan Kankanhalli. Pistnet: Point spatio-temporal convolution on point cloud sequences. In International Conference on Learning Representations, 2021. 2, 5, 6, 7, 8" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.092, + 0.894, + 0.167 + ], + "angle": 0, + "content": "[11] Hehe Fan, Xin Yu, Yi Yang, and Mohan Kankanhalli. Deep hierarchical representation of point cloud videos via spatio-temporal decomposition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(12):9918-9930, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.17, + 0.895, + 0.245 + ], + "angle": 0, + "content": "[12] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6202-6211, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.248, + 0.895, + 0.323 + ], + "angle": 0, + "content": "[13] Ankit Goyal, Hei Law, Bowei Liu, Alejandro Newell, and Jia Deng. Revisiting point cloud shape classification with a simple and effective baseline. In International Conference on Machine Learning, pages 3809-3820. PMLR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.326, + 0.895, + 0.372 + ], + "angle": 0, + "content": "[14] Yulan Guo, Hanyun Wang, Qingyong Hu, Hao Liu, Li Liu, and Mohammed Bennamoun. Deep learning for 3d point clouds: A survey. PAMI, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.374, + 0.895, + 0.448 + ], + "angle": 0, + "content": "[15] Roman Klokov and Victor Lempitsky. Escape from cells: Deep kd-networks for the recognition of 3d point cloud models. In Proceedings of the IEEE international conference on computer vision, pages 863-872, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.451, + 0.895, + 0.512 + ], + "angle": 0, + "content": "[16] Hema Swetha Koppula, Rudhir Gupta, and Ashutosh Saxena. Learning human activities and object affordances from rgb-d videos. The International Journal of Robotics Research, 32(8):951-970, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.515, + 0.895, + 0.605 + ], + "angle": 0, + "content": "[17] Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In International conference on machine learning, pages 3744-3753. PMLR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.607, + 0.895, + 0.697 + ], + "angle": 0, + "content": "[18] Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In International conference on machine learning, pages 3744-3753. PMLR, 2019. 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.7, + 0.895, + 0.775 + ], + "angle": 0, + "content": "[19] Wanqing Li, Zhengyou Zhang, and Zicheng Liu. Action recognition based on a bag of 3d points. In 2010 IEEE computer society conference on computer vision and pattern recognition-workshops, pages 9-14. IEEE, 2010. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.778, + 0.895, + 0.854 + ], + "angle": 0, + "content": "[20] Jun Liu, Amir Shahroudy, Mauricio Perez, Gang Wang, Ling-Yu Duan, and Alex C Kot. Ntu rgb+ d 120: A large-scale benchmark for 3d human activity understanding. IEEE transactions on pattern analysis and machine intelligence, 42(10):2684-2701, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.856, + 0.895, + 0.902 + ], + "angle": 0, + "content": "[21] Xingyu Liu, Mengyuan Yan, and Jeannette Bohg. Meteornet: Deep learning on dynamic 3d point cloud sequences. In Proceedings of the IEEE/CVF Interna" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.092, + 0.895, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "19986" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.092, + 0.47, + 0.121 + ], + "angle": 0, + "content": "tional Conference on Computer Vision, pages 9246- 9255, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.123, + 0.472, + 0.197 + ], + "angle": 0, + "content": "[22] Daniel Maturana and Sebastian Scherer. Voxnet: A 3d convolutional neural network for real-time object recognition. In 2015 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 922-928. IEEE, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.2, + 0.471, + 0.274 + ], + "angle": 0, + "content": "[23] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pages 652-660, 2017. 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.276, + 0.471, + 0.335 + ], + "angle": 0, + "content": "[24] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In NeurIPS, volume 30, 2017. 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.337, + 0.471, + 0.412 + ], + "angle": 0, + "content": "[25] Guocheng Qian, Yuchen Li, Houwen Peng, Jinjie Mai, Hasan Abed Al Kader Hammoud, Mohamed Elhoseiny, and Bernard Ghanem. Pointnext: Revisiting pointnet++ with improved training and scaling strategies. arXiv preprint arXiv:2206.04670, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.414, + 0.471, + 0.473 + ], + "angle": 0, + "content": "[26] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In The IEEE International Conference on Computer Vision (ICCV), Oct 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.475, + 0.471, + 0.564 + ], + "angle": 0, + "content": "[27] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision, pages 618-626, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.567, + 0.471, + 0.641 + ], + "angle": 0, + "content": "[28] Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang. Ntu rgb+ d: A large scale dataset for 3d human activity analysis. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1010-1019, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.643, + 0.471, + 0.716 + ], + "angle": 0, + "content": "[29] Yiru Shen, Chen Feng, Yaoqing Yang, and Dong Tian. Mining point cloud local structures by kernel correlation and graph pooling. In IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pages 4548-4557, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.719, + 0.471, + 0.793 + ], + "angle": 0, + "content": "[30] Jaeyong Sung, Colin Ponce, Bart Selman, and Ashutosh Saxena. Unstructured human activity detection from rgbd images. In 2012 IEEE international conference on robotics and automation, pages 842-849. IEEE, 2012. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.796, + 0.471, + 0.87 + ], + "angle": 0, + "content": "[31] Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3d convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 4489-4497, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.872, + 0.471, + 0.902 + ], + "angle": 0, + "content": "[32] Haiyan Wang, Liang Yang, Xuejian Rong, Jinglun Feng, and Yingli Tian. Self-supervised 4d spatio" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.092, + 0.894, + 0.152 + ], + "angle": 0, + "content": "temporal feature learning via order prediction of sequential point cloud clips. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3762-3771, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.154, + 0.894, + 0.243 + ], + "angle": 0, + "content": "[33] Yancheng Wang, Yang Xiao, Fu Xiong, Wenxiang Jiang, Zhiguo Cao, Joey Tianyi Zhou, and Junsong Yuan. 3dv: 3d dynamic voxel for action recognition in depth video. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 511-520, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.246, + 0.893, + 0.306 + ], + "angle": 0, + "content": "[34] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E Sarma, Michael M Bronstein, and Justin M Solomon. Dynamic graph cnn for learning on point clouds. Acm Transactions On Graphics (tog), 38:1-12, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.308, + 0.893, + 0.383 + ], + "angle": 0, + "content": "[35] Siyuan Yang, Jun Liu, Shijian Lu, Meng Hwa Er, and Alex C Kot. Skeleton cloud colorization for unsupervised 3d action representation learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13423-13433, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.385, + 0.893, + 0.443 + ], + "angle": 0, + "content": "[36] Cheng Zhang, Haocheng Wan, Shengqiang Liu, Xinyi Shen, and Zizhao Wu. Pvt: Point-voxel transformer for 3d deep learning. arXiv preprint arXiv:2108.06076, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.446, + 0.893, + 0.52 + ], + "angle": 0, + "content": "[37] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip H.S. Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 16259-16268, October 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.523, + 0.894, + 0.627 + ], + "angle": 0, + "content": "[38] Jia-Xing Zhong, Kaichen Zhou, Qingyong Hu, Bing Wang, Niki Trigoni, and Andrew Markham. No pain, big gain: classify dynamic point cloud sequences with static models by fitting feature-level space-time surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8510-8520, 2022. 2, 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.092, + 0.894, + 0.627 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "19987" + } + ] +] \ No newline at end of file diff --git a/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/5d416e0e-fbb0-491c-8e72-ffffff1eb68b_origin.pdf b/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/5d416e0e-fbb0-491c-8e72-ffffff1eb68b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..143f032dd23f7c6dbe42a1b9f27f7f98cfb2d117 --- /dev/null +++ b/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/5d416e0e-fbb0-491c-8e72-ffffff1eb68b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e09e3b681fc83dd87d9622ab98b8c9e1092455d3f6e1cc1681e19b0b88b917e +size 2270594 diff --git a/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/full.md b/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0941e88db5e0ecaa1492c3846f25549c420538db --- /dev/null +++ b/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/full.md @@ -0,0 +1,267 @@ +# 3DInAction: Understanding Human Actions in 3D Point Clouds + +Yizhak Ben-Shabat1,2 + +Oren Shrout2 + +Stephen Gould + +$^{1}$ Australian National University + +$^{2}$ Technion, Israel Institute of Technology + +sitzikbs@technion.ac.il, shout.oren@campus.technion.ac.il, stephen.gould@anu.edu.au + +https://github.com/sitzikbs/3dincaction + +![](images/77a8b2bdb6e2bc8177795a0ff3f8cf7bea7e437ca5d6127b0d2ac7d0d2729236.jpg) +Figure 1. t-patches for action recognition. We propose a new representation for dynamic 3D point clouds. Termed $t$ -patches, these are locally evolving point cloud sets aggregated over time. Learning features over t-patches provides an improved temporal point cloud representation for action understanding. + +![](images/39b06bebed98e5301eafc3c562c7c30da441c22f9988cd6337436241515c3513.jpg) + +![](images/e074a955792586b2acfdeb1a778f6dab4137266a42bfd994d8c83691f8419cfc.jpg) + +![](images/920f0c45077417070175bf99b53baaf9b5f91803dbb5f8a59e7c45135cbb0ffc.jpg) + +![](images/d40f2b85b106594b3ffdb7f23c00df6ee6597a7d2750a9612450cdcdde76978f.jpg) + +![](images/171287348038d40b2751b2c095c8224a3b7acf6789927f3fd03387b051cb719f.jpg) + +# Abstract + +We propose a novel method for 3D point cloud action recognition. Understanding human actions in RGB videos has been widely studied in recent years, however, its 3D point cloud counterpart remains under-explored despite the clear value that 3D information may bring. This is mostly due to the inherent limitation of the point cloud data modality—lack of structure, permutation invariance, and varying number of points—which makes it difficult to learn a spatio-temporal representation. To address this limitation, we propose the 3DinAction pipeline that first estimates patches moving in time (t-patches) as a key building block, alongside a hierarchical architecture that learns an informative spatio-temporal representation. We show that our method achieves improved performance on existing datasets, including DFAUST and IKEA ASM. Code is publicly available at https://github.com/sitzikbs/3dincaction. + +# 1. Introduction + +In this paper, we address the task of action recognition from 3D point cloud sequences. We propose a novel pipeline wherein points are grouped into temporally evolv- + +ing patches that capture discriminative action dynamics. Our work is motivated by the massive growth of online media, mobile and surveillance cameras that have enabled the computer vision community to develop many data-driven action-recognition methods [5, 12, 26, 31], most of which rely on RGB video data. Recently, commodity 3D sensors are gaining increased momentum, however, the 3D point cloud modality for action recognition has yet been underexploited due to the scarcity of 3D action-labeled data. + +In many cases, a pure RGB video-based inference may not be enough and incorporating other modalities like geometry is required. This is especially necessary for safety critical applications such as autonomous systems, where redundancy is crucial, or in scenarios where the video is heavily degraded (e.g., due to poor lighting). Some approaches incorporate geometrical information implicitly, e.g., through intermediate pose estimation [7]. This often entails extra steps that require more time and resources and is still limited to video input. Therefore a more explicit approach is desirable. + +3D sensors provide an alternative modality in the form of point clouds sampled on the environment. Despite the vast research on 3D vision and learning, even static 3D point cloud datasets are significantly smaller than their RGB im + +age counterparts due to difficulties in collecting and labeling. 3D point cloud sequence databases are even smaller, + +making it more difficult to learn a meaningful 3D action representation. Furthermore, learning a point cloud representation still remains an active research field because point clouds are unstructured, unordered, and may contain a varying number of points. Learning a temporal point cloud representation is even more challenging since, unlike pixels, there is no one-to-one point correspondence through time. + +We address these challenges and propose the 3DinAction pipeline for 3D point cloud action recognition. In our pipeline, we first extract local temporal point patches (t-patches) that reflect a point region's motion in time, see Figure 1. We then learn a t-patch representation using a novel hierarchical architecture that incorporates spatial features in the temporal domain. We finally get an action prediction for each frame in a sequence by aggregating multiple t-patch representations. This pipeline overcomes the need for ground truth point temporal correspondence, grid structure, point order, and a fixed number of points in each frame. Intuitively, patches reflect local surface deformation and are more robust to point correspondence errors. + +We conduct extended experiments to evaluate the performance of our approach compared to existing SoTA methods and show that 3DinAction provides significant performance gains of $13\%$ and $7\%$ in accuracy on DFAUST and IKEA ASM, respectively. + +The key contributions of our work are as follows: + +- A novel representation for dynamically evolving local point cloud sets termed t-patches. +- A hierarchical architecture that produces an informative spatio-temporal representation for sequences of point clouds. + +# 2. Related Work + +Learning 3D point cloud representations. Point clouds pose a challenge for neural networks due to their unstructured and point-wise unordered nature. To address these challenges, several approaches have been proposed. PointNet [23, 24] uses permutation-invariant operators, such as pointwise MLPs and pooling layers, to aggregate features across a point set. Some approaches construct a graph from the point set. DGCNN [34] applies message passing and performs graph convolutions on kNN graphs, KCNet [29] uses kernel correlation and graph pooling, and Kd-Networks [15] apply multiplicative transformations and share the parameters based on the subdivisions imposed by kd-trees. Alternatively, the structure can be imposed using a grid of voxels [22, 36], or a grid of Gaussians in 3DmFVNet [1]. Another alternative avoids the structure by using Transformer's attention mechanism [17, 37]. For a comprehensive survey of point cloud architectures please see [14]. + +Recently, various factors that can impact the training of + +different architectures have been investigated [13, 25]. This includes exploring data augmentation strategies and loss functions that are not specific to a particular architecture. The results of this study showed that older PointNet-based architectures [23, 24] can perform comparably to newer architectures with minor changes. + +All of the above methods deal with static, single-frame, or single-shape point clouds. In this work, the input is a temporal point cloud where a representation for a short sequence is required and point correspondence between frames is unknown. Therefore extending existing approaches is not trivial. + +Learning temporal 3D point cloud representations. Temporal point clouds have not been as extensively studied as their static counterparts, in particular for action recognition. Meteornet [21] processes 4D points using a PointNet++ architecture where they appended a temporal dimension to the spatial coordinates. PSTNet [10, 11] proposed spatio-temporal convolutions and utilized some of the temporal consistency for action recognition. Similarly, P4Transformer [8] uses 4D convolutions and a transformer for capturing appearance and motion via self-attention. In a follow-up work PST-Transformer [9] employs a video level of self-attention in search for similar points across entire videos and so encodes spatio-temporal structure. Some works attempt to alleviate the full supervision requirement for 3D action recognition. These include self-supervised features learning [32] by predicting temporal order from a large unlabeled dataset and fine-tuning on a smaller annotated datasets and unsupervised skeleton colorization [35]. Additional supervised approaches include MinkowskiNet [6] that uses a 4D spatio-temporal CNN after converting the point clouds to an occupancy grid, 3DV [33] that encodes 3D motion information from depth videos into a compact voxel set, and Kinet [38] that implicitly encoded feature level dynamics in feature space by unrolling the normal solver of ST-surfaces. + +The above methods, perform a single classification per clip. In this paper, we focus on a related, and more chllang-ing, task that requires a prediction per-frame. We propose to convert the point cloud representation into t-patches and use an MLP based hierarchical architecture to get the spatiotemporal representation. + +3D action understanding datasets. One of the major driving forces behind the success of learning-based approaches is the availability of annotated data. For the task of 3D point cloud action recognition, there is currently no designated standard dataset, however, some existing datasets may be extended. The CAD 60 and CAD 120 [16, 30] datasets include 60 and 120 long-term activity videos of 12 and 10 classes respectively (e.g., making cereal, microwave food). These datasets provide raw RGB, skeletons, and depth data however its small scale and long-term focus limit its effec + +tiveness. The NTU RGB+D 60 [28] and NTU RGB+D 120 [20] provide $\sim 56\mathrm{K}$ and $\sim 114\mathrm{K}$ clips containing 60 and 120 actions classes respectively, e.g., taking off a jacket, taking a selfie. They provide three different simultaneous RGB views, IR and depth streams as well as 3D skeletons. While these datasets can be considered large-scale, their contrived nature makes recent skeleton-based methods (e.g., [7]) perform well, making a prior-free approach difficult to justify. The MSR-Action3D dataset [19] includes 20 action classes performed by 10 subjects for a total of 567 depth map sequences, collected using a Kinect v1 device (23K frames). The sequences in this dataset are very short and therefore using it to evaluate learning-based approaches provides a limited indication of generalization. The above datasets provide per clip action annotations. + +Some datasets inherently provide per-frame annotations. The IKEA ASM dataset [2] provides 371 videos clipped into 31K clips. It contains 33 action classes related to furniture assembly, annotated per frame. This dataset provides several modalities including three RGB views, and Depth. It is an extremely challenging dataset since the human assembler is often occluded and presents very unique assembly poses. It is also very imbalanced since different assembly actions have different duration and may repeat multiple times within the same assembly. Although it was designed for video action recognition, its challenges are the core reasons for choosing to extend it to the point cloud action recognition task. The DFAUST dataset [3] provides high-resolution 4D scans of human subjects in motion. It includes 14 action categories with over 100 dynamic scans of 10 subjects (1:1 male-to-female ratio) with varying body shapes represented as registrations of aligned meshes, therefore an extension to our task is straightforward. One particularly important feature of this dataset is the GT point correspondences throughout the sequence i.e. it is possible to follow each point's movement through time. While this dataset is not as large-scale as others, it provides ground truth information (correspondence) that most other collected datasets do not. Therefore, we extend this dataset to 3D point cloud action recognition and use it as a testbed for many ablation studies (see Section 4.4). + +# 3. 3DinAction pipeline + +Our 3DinAction pipeline is illustrated in Figure 2. Given a temporal sequence of 3D point clouds we first extract a set of t-patches (Section 3.1). We then feed the t-patches into a hierarchical neural network (Section 3.2) to produce a per-frame high dimensional feature vector representation. Finally, the feature vectors are fed into a classifier to obtain per-frame predictions. The proposed approach is prior-free (no skeleton extraction required) and therefore general and can be used on different action-understanding datasets. + +# 3.1. t-patches + +Let $S = \{x_{j} \in \mathbb{R}^{3} \mid j = 1, \dots, N\}$ denote a 3D point cloud with $N$ points. In the classic (static) setting, a patch $\Psi_{q}$ is extracted around some query point $x_{q}$ . For example, the patch $\Psi_{q}$ may be constructed by finding the $k$ -nearest neighbors of $x_{q}$ in $S$ . + +In our temporal setting we are given a sequence of point clouds $S = \{S^0, \dots, S^T\}$ composed of point cloud frames $S^t = \{x_j^t \mid j = 1, \dots, N^t\}$ . Here the superscript $t$ is used to denote the index of the point cloud in the sequence. Instead of extracting a patch within a single frame, we allow patches to extend temporally, and denote them as $t$ -patches. + +Definition 3.1 A t-patch $P_{q}$ is a sequence of point sets indexed by a query point $x_{q}^{0}$ and jointly moving in time defined by a pointwise mapping function between patches in consecutive frames. Mathematically, $P_{q} = \langle \Psi_{q}^{t}\rangle_{t = 0}^{T}$ where $\Psi_{q}^{0}$ is the initial (static) patch and $\Psi_{q}^{t} = \Phi (\Psi_{q}^{t - 1})$ is the patch at time $t$ where $\Phi$ is a pointwise mapping function. + +In practice, it is difficult to find a reliable mapping function $\Phi$ . Therefore we propose a simplified formulation that, for a given query point $x_{q}^{0}$ , first extracts a patch for the first frame $\Psi_{q}^{0}$ and then iteratively extracts corresponding patches for the next frames (iterating over time), by using the closest point in the next frame as the new query point. More formally, we specify $\vec{\Psi}_{q}^{0} \triangleq \Psi_{q}^{0}$ , $\vec{\Psi}_{q}^{t} = knn(x_{q}^{t-1}, S^{t})$ and $x_{q}^{t} = n n(x_{q}^{t-1}, S^{t})$ for $t = 1, \ldots, T$ . Here $knn$ is the $k$ nearest neighbor and $n n$ is nearest neighbor. Then, the simplified t-patch formulation is given by + +$$ +\vec {P} _ {q} = \left\langle \vec {\Psi} _ {q} ^ {t} \mid t = 0, \dots , T \right\rangle \tag {1} +$$ + +See Figure 3 left for an illustration of the t-patch extraction process. Note that if ground truth correspondence is available $knn$ can be swapped back to $\Phi$ . However, this does not guarantee improved performance. + +Temporal t-patch collapse. The simplified formulation of extracting t-patches inherently suffers from the problem of two or more t-patches collapsing into having the same points after a certain frame. We call this scenario t-patch temporal collapse. Temporal collapse can happen whenever $x_{q}^{t} = x_{p}^{t}$ for $x_{q}^{0} \neq x_{p}^{0}$ . The main issue with temporal collapse is the reduction in point coverage as time progresses, i.e. the patches covering the last point cloud have significant overlaps and therefore include fewer points than the first frame and so missing vital data. An illustration of the t-patch collapse problem is available in Figure 3 (right). To mitigate this issue, we propose two solutions. First, adding small noise to each iteration's query points, i.e. $\overrightarrow{\Psi}_{q}^{t} = knn(x_{q}^{t} + \epsilon, S^{t+1})$ where $\epsilon \sim \mathcal{N}(\mu, \sigma^{2})$ is a small Gaussian noise. Second, we propose to construct t-patches from the first to last frame but also in reverse, initializing + +![](images/ca03231e6ca24b0c3a26aa855970e4a3c0f6682cb3ba7e648684c1c9763fd057.jpg) +Figure 2. 3DinAction pipeline. Given a sequence of point clouds, a set of t-patches is extracted. The t-patches are fed into a neural network to output an embedding vector. This is done hierarchically until finally the global t-patch vectors are pooled to get a per-frame point cloud embedding which is then fed into a classifier to output an action prediction per frame. + +![](images/fa2721163831b9461c340b37e484f9bf65404e8c33d5584b94aa6d6c81df9fd3.jpg) +Figure 3. t-patch construction and collapse. Illustration of t-patch construction (left) and collapse (right). Starting from an origin point $x_{q}^{0}$ we find the nearest neighbours in the next frame iteratively to construct the t-patch subset (non-black points). A collapse happens when two different origin points, $x_{q}^{0}$ and $x_{p}^{0}$ , have the same nearest neighbour at some time step, $\Psi_{p}^{3} = \Psi_{q}^{3}$ here. + +with $\Psi_q^0$ and $\Psi_q^T$ , respectively. We name this variation bidirectional t-patches. More formally bidirectional t-patches are given by, + +$$ +\stackrel {\leftrightarrow} {P} = \left(\bigcup_ {q} \vec {P} _ {q}\right) \cup \left(\bigcup_ {p} \overleftarrow {P} _ {p}\right) \tag {2} +$$ + +where $\overleftarrow{P}_p$ is defined similarly to $\vec{P}_q$ but in the reverse direction, i.e., $\overleftarrow{\Psi}_p^T \triangleq \Psi_p^T$ and $\overleftarrow{\Psi}_p^t = knn(x_p^{t+1}, S^t)$ for + +$t = T - 1,\ldots ,0$ . Here, the final set of t-patches is composed of an equal number of t-patches from both directions. + +# 3.2. Hierarchical architecture + +The proposed architecture is composed of $l$ consecutive t-patch modules. Each module receives a point cloud sequence $S$ as input. The sequence is fed into a t-patch extractor where it undergoes subsampling and t-patch extraction, forming $\tilde{S}_l$ and $P^l$ respectively. Then, the t-patches are fed into t-patch Net, a network that computes a high-dimensional feature vector $f_{l}$ for each t-patch, parametrized by $\theta_l$ . The subsampled sequence $\tilde{S}_l$ and its corresponding t-patch features $f_{l}$ are then fed into the next t-patch module. These modules form a hierarchy in the sense that each module receives as input a sparser point cloud with a higher dimensional feature vector representing each point (encoding both spatial and temporal information). Note that both the t-patch points and their features are fed into t-patch Net. t-patch extractor. We first subsample the first frame in the point cloud sequence $S^0$ using farthest point sampling (FPS) to form a set of $M$ query points $\tilde{S}^0 = \{x_j^0\in FPS(S^0,M)\}$ . The set $\tilde{S}^0$ is used to form the t-patches. Subsampling is required since computing a t-patch for each point is inefficient and unnecessary due to overlaps. After subsampling, we extract $M$ t-patches using Equation 2 where $q\in \tilde{S}^0$ . The extractor operates on both 3D points and their corresponding features (for mod + +ules deeper in the hierarchy). + +Model architecture and t-patch net. The t-patch network computes a high dimensional representation for each t-patch. The t-patch Net architecture is composed of several MLP layers operating on the non-temporal dimensions (sharing weights across points) followed by a convolutional layer operating on both the temporal and feature dimensions. Note that the network weights are also shared across t-patches. The output of each t-patch Net is a vector for each frame. The final frame representation is obtained by aggregating all of the t-patch features using a max pooling operation i.e. $\text{maxpool}_{M_l}(f_3)$ . This representation is then fed into a classifier consisting of three fully connected layers with temporal smoothing and softmax to output the final action prediction. To train the network we use the same losses of RGB based approaches [2, 5] which include a per-frame prediction cross entropy loss and a per-sequence prediction cross entropy loss (summed and weighted evenly) $L_{total} = L_{frame} + L_{seq}$ . For full details see supplemental. + +# 4. Experiments + +We evaluate the performance of our approach on three datasets. The results show that the 3DinAction pipeline outperforms all baselines in DFAUST [3] and IKEA ASM [2] and is comparable in MSR-Action 3D [19]. We then conduct an ablation study for selecting parameters and t-patch extraction method showing that adding jitter and bidirectional t-patches is beneficial. Finally, we report time performance and show the tradeoff between performance and inference time. For more results and experiments, see supplemental material. + +Baselines and evaluation metrics. For evaluation, we report several standard metrics [4]: the top1 and top3 framework accuracy are the de facto standard for action classification. We compute it by summing the number of correctly classified frames and dividing by the total number of frames in each video and then averaging over all videos in the test set. Additionally, since some of the datasets are imbalanced and may contain different actions for each frame in a clip, we also report the macro-recall by separately computing recall for each category and then averaging (macro). Finally, we report the mean average precision (mAP) since all untrimmed videos contain multiple action labels. + +For DFAUST and IKEA ASM we report static methods PointNet [23], $\mathrm{PointNet}^{++}$ [24], and Set Transformer [18] by applying them on each point cloud frame individually. Additionally, we report temporal methods like PSTNet [10] and also implemented a temporal smoothing version of each static method (PoinNet+TS, $\mathrm{Pointnet}^{++} + \mathrm{TS}$ , and Set Transformer+TS respectively) by learning the weights of a convolutional layer over the temporal dimension. Temporal smoothing aims to provide a naive baseline for utilizing temporal information in addition to spatial information. + +Note that in all experiments, unless otherwise specified, our method uses the simplified formulation with jitter and bidirectional t-patches. + +# 4.1. Experiments on DFAUST dataset + +We extend the DFAUST dataset for the task of action recognition and show that the proposed approach outperforms other methods (see Table 1). + +DFAUST dataset [3]. We extended the DFAUST dataset to our task by subdividing it into clips of 64 frames with train and test human subjects. The split was constructed so no subject will appear in both training and test set as well as guarantee that all actions appear in both. The train and test sets contain 76 full-length sequences (395 clips, and $\sim 25\mathrm{K}$ frames) and 53 sequences (313 clips, and $\sim 20\mathrm{K}$ frames) respectively. Each point cloud frame contains 6890 points. These points are mesh vertices and therefore the density varies greatly (e.g., very dense on the face, hands, and feet and sparser on the legs). For all baselines, we sampled a set of 1024 points using the farthest point sampling algorithm to provide a more uniform set of points. For this dataset, all frames in a clip have the same label. Note that not all actions are performed by all subjects. For the full action list and dataset statistics, see the supplemental. + +Results. The results, reported in Table 1, show that our proposed approach outperforms all baselines by a large margin. It also shows that temporal smoothing boosts performance significantly for all static baselines. Additionally, to explore the influence of our simplified knn-based temporal point mapping, we used the GT point correspondence to match the consecutive t-patch origin points and report the results as another baseline (Ours + GT corr). The results show that there is a mAP performance gain with GT correspondence, however, it is limited. Note that in most datasets, this GT correspondence is not available. Finally, we also experimented with a Transformer architecture to process the t-patch learned representations and show that it does not provide additional performance boost. This may be attributed to the dataset size. + +Insight. We extended the GradCam [27] approach for our 3DinAction pipeline. Using this approach we get a score per point in each t-patch proportional to its influence on classifying the frame to a given target class. The results in Figure 4 show that, as expected, our approach learns meaningful representations since the most prominent regions are the ones with the informative motion. For example, in the Jumping jacks action (top row) the hands are most prominent as they are making a large and distinct motion. + +# 4.2. Experiments on IKEA ASM dataset + +IKEA ASM dataset [2]. This dataset consists of 371 videos (3M frames) of people assembling IKEA furniture in different indoor environments. It was collected using a + +
MethodFrame acc.
top 1top 3mAP
3DmFVNet [1]60.8687.680.7171
PointNet [23]65.6786.440.7161
PointNet++ [24]58.5188.280.5842
Set Transformer [18]52.2781.980.6209
PoinNet [23] + TS74.1094.000.7863
PointNet++ [24] + TS67.8886.210.7563
Set Transformer [18] + TS62.9590.330.7322
PSTNet [10]50.7078.280.6490
Ours + GT corr77.6795.380.8762
Ours + Transformer77.0993.777.49
Ours87.2699.260.8616
+ +Table 1. Action recognition results on DFAUST. Reporting frame-wise accuracy and mean average precision. Ours outperforms all baselines by a large margin. + +![](images/70a0c3e96fa4cf736b34a39ff64e0b044dbff4a15bf7a0395ab03946ea6db655.jpg) +Figure 4. 3DinAction GradCAM scores. The proposed 3DinAction pipeline learns meaningful representations for prominent regions. The presented actions are jumping jacks (top row), hips (middle row), and knees (bottom row). The columns represent progressing time steps from left to right. Colormap indicates high GradCAM scores in red and low scores in blue. + +Kinect V2 camera and provides camera parameters to reconstruct point clouds in camera coordinates. It provides action annotation for each frame (33 classes). It is a highly challenging dataset for two main reasons: (1) It is highly + +imbalanced since some actions have a long duration and occur multiple times in each video (e.g., spin leg) and some are shorter and sparser (flip tabletop). (2) The assembly motion includes a lot of self-occlusion as well as subtle movements. The train/test split consists of 254 and 117 full sequences respectively. The split is environment-based (i.e. in the test set there is no environment that appeared in the training set). The assembly videos have an average of $\sim$ 2735 frames per video. The point clouds provided in this dataset are aligned to the camera coordinate frame, posing a challenge for methods that are sensitive to rotations since the camera moves between different scans. + +Results. The results on the IKEA ASM dataset are reported in Table 2. The results show that the proposed 3DinAction pipeline provides a significant performance boost over static approaches and their temporally smooth variants. Additionally, as expected, PointNet and Set Transformer are heavily affected by the variations in coordinate frames. $\mathrm{PointNet}^{++}$ on the other hand performs better since it uses local coordinate frames for each local region. All methods show an improved mAP when using the temporally smooth variant with degradation in frame-wise accuracy due to the dataset imbalance. For this dataset, the top1 metric is not always indicative of the quality of performance because a high top1 is directly correlated with many frames classified as the most common class. Additionally, we compare to pose-based methods reported in [2] and show that the proposed approach also outperforms these baselines. See supplementary material for confusion matrices. + +t-patch intuition and visualization. In Figure 5 we visualize the t-patches for the flip table action in the TV Bench assembly. A set of selected t-patches are highlighted in color demonstrating different types of t-patches and their spatiotemporal changes. The blue is on the moving TV Bench assembly, it moves rigidly with the assembly. The maroon is on the moving person's arm, it exhibits nonrigid motion and deformations through time. The teal is on the static table surface containing some of the TV Bench's points in the first frame but remains static when it moves since its origin query point is on the table. The green is on the static carpet, remaining approximately the same through time. Note that the RGB images are for visualization purposes and are not used in our pipeline. + +# 4.3. Experiments on MSR-Action3D dataset + +For this dataset, the task is to predict a single class for a sequence of frames (unlike the other datasets where a per-frame prediction is required). To that end, we replace our classifier with a single fully connected layer and max pooled the results over the temporal domain (similar to [10]). The results, reported in Table 3, show that all SoTA methods, including the proposed approach, exhibit very similar performance. This is mainly attributed to the small scale of the + +![](images/507d68e235f3b0de3debfe9bb776f8bdb370ba2e44031561c43649e19c75caf8.jpg) +Figure 5. IKEA ASM example with t-patches. The flip table action for the TV Bench assembly is visualization including the RGB image (top), and a grayscale 3D point cloud with t-patches (bottom). t-patches are highlighted in color. The blue is on the moving TV Bench assembly, maroon is on the moving persons arm, teal is on the static table surface, and green is on the colorful static carpet. + +![](images/ee4b5844cd5c3ca6e3f785a24d468c225c86bf8d1e63ba13aeed01e0d5bbd6d4.jpg) + +![](images/2468e71df666d5da49836430f28668bdeca4592a2d25a2397b3cf1d5dac6db67.jpg) + +![](images/1fd39becc884c2a336ecfbdc908e4f44c57ebdff495c1fe9be3b979531ab02ad.jpg) + +
MethodFrame acc.
top 1top 3macromAP
PointNet [23]4.2019.865.760.0346
PointNet++ [24]45.9770.1029.480.1187
Set Transformer [18]14.9657.1213.160.0299
PointNet [23] + TS6.0019.485.140.0804
PointNet++ [24] +TS27.8460.6427.720.2024
Set Transformer [18] + TS9.5436.5010.740.1471
PSTNet [10]17.9452.2417.140.2016
Human Pose HCN [2]39.1565.3728.180.2232
Human Pose ST-GCN [2]43.466.2926.540.1856
Ours without BD45.1672.8335.060.2932
Ours52.9175.0338.840.2875
+ +dataset and the lack of diversity in the action classes. Furthermore, we witnessed that the main performance gap is for frames and sequences where the action is indistinguishable (e.g., first few frames of a sequence where no distinguishable action commenced). + +# 4.4. Ablation study + +t-patch extraction. We studied the t-patch extraction method and its effects on action recognition on a noisy version of the DFAUST dataset. The results reported in Table 4, show the significance of the t-patch collapse problem and the effectiveness of adding small jitter and bidirectional t-patches to overcome it. In the DFAUST dataset, finding the nearest neighbor between frames provides a $\sim 96\%$ correspondence accuracy (small motion between frames). + +Table 2. Action classification on IKEA ASM. The proposed approach provides a significant performance boost over other static and dynamic approaches, including the temporal smoothing (TS). + +
Method#frames
48121624
PSTNet [10]81.1483.5087.8889.9091.20
P4Transformer [8]80.1383.1787.5489.5690.94
PST-Transformer [9]81.1483.9788.1591.9893.73
Kinet [38]79.8083.8488.5391.9293.27
Ours80.4786.2088.2290.5792.23
+ +Table 3. MSR-Action3D classification results. Reporting classification accuracy for clips of different lengths. Results show that all methods are comparable since this dataset's scale is limited. + +Therefore, in this experiment, we augment the dataset once by adding small Gaussian noise to each point in the dataset $(\sigma = 0.01)$ , decreasing the correspondence accuracy to $\sim 62.4\%$ and introducing multiple t-patch collapse instances as well as increasing the classification difficulty. + +Several variants of the t-patch extraction were explored. The first variation (GT) incorporates the ground truth correspondence into the t-patch extraction. Using this method, there is no t-patch collapse since there is a one-to-one mapping between frames. We expected this to produce an upper bound on the performance, however, surprisingly the results show that this variation is actually inferior to the proposed t-patch approach. We attribute this to the proposed t-patch extraction inherent augmentation caused by the downsampling and nearest neighbor point jitter. We then continue to explore the proposed approaches for dealing with t-patch collapse which include jitter, i.e. adding small noise to each point before finding its nearest neighbor in the next frame, and the bidirectional t-patches that extract patches both from the first to the last frame and from the last to the first frame. The results show that adding jitter is al + +
Frame acc.
DataGTJitterBDtop 1top 3mAP
clean77.6795.380.8762
74.7392.140.8097
80.4996.610.9023
87.2699.260.8616
noisy76.0895.500.9013
66.7493.760.7626
81.8398.970.9220
80.0397.570.8975
+ +Table 4. t-patch collapse ablation on DFAUST. Exploring adding (1) GT - ground truth correspondences, (2) jitter - small Gaussian noise in t-patch construction, and (3) BD - bidirectional t-patches. + +
Frame acc.
nktop 1top 3mAP
2561676.9697.540.8430
5121680.0397.570.8975
10241677.3097.880.8507
512876.8796.210.7557
5123277.9196.600.7453
+ +ways beneficial and provides a boost in performance. The bidirectional t-patches improve accuracy performance significantly when the data is clean and are comparable when the data is noisy. Note that in both dataset variations, the degradation due to temporal t-patch collapse is low compared to Kinect-based scan data, therefore the bidirectional benefits are not fully utilized. + +t-patch parameters. The core parameters for t-patch extraction are the number of neighbors to extract $(k)$ and the number of points to subsample $(n)$ . Here there is a tradeoff between complexity and performance i.e. when $k$ and $n$ are small, the input to the model is small accordingly but the overall coverage is reduced and therefore performance is lower. We explored their influence on the noisy DFAUST dataset and report the results in Table 5. The results show that the method is fairly robust to the selection of these parameters, producing comparable results for all. The best performance was obtained for $n = 512$ , $k = 16$ . Surprisingly, the performance slightly degrades when increasing $k$ and $n$ beyond these values. This is likely due to the increase in model size, which easily overfits on a dataset of this size. + +Time and parameters. We report the time performance and the number of parameters of several baselines in Table 6. The results show the tradeoff between performance and time, i.e. the temporal approaches exhibit longer pro + +Table 5. t-patch parameters ablation. Results for the number of neighboring points in a patch $k$ and number of downsampled points $n$ show that the method is robust. + +
MethodTime [ms]# parameters
PointNet [23]64.493.5M
PointNet++ [24]23.351.5M
PSTNet [10]185.928.3M
Ours t-patch extraction180.650
Ours feature computation12.509.8M
Ours classifier0.361.1M
Ours193.5110.9M
+ +Table 6. Time and parameters. Temporal methods have more parameters and take longer. 3DinAction time is mostly used to extract t-patches. + +cessing times and more parameters while performing better. For the proposed approach, we break down the timing of individual components, namely the t-patch extraction, feature computation, and classifier. The results show that the proposed approach is comparable to PSTNet in time while having more parameters. Interestingly, most of the time is used for extracting the t-patches and not for feature extraction or classification. This is attributed to the farthest point sampling and the sequential knn search, both of which could be further optimized for speed. Note that results are average of 50 runs, each with a batch of 4 and 1024 points per frame. + +Limitations. Since the simplified formulation of t-patch construction uses $knn$ , it is sensitive to variations in point densities. A t-patch in a sparse region will occupy a larger volume than a t-patch in a dense region. We use FPS to mitigate this, however, other approaches can be used e.g., using neighbors in a fixed radius. Another limitation is data with a very low frame rate or very fast motion since this breaks the assumption that points in consecutive frames are close to each other, and will cause inconsistent t-patch motion. + +# 5. Conclusions + +We introduced the 3DinAction pipeline, a novel method for 3D point cloud action recognition. It showed that the creation of temporal patches is beneficial for finding informative spatio-temporal point representations. 3DinAction has demonstrated a performance boost over SoTA methods. + +This work opens many interesting future directions of research. These include trying to learn the t-patch construction instead of the knn selection, imposing stronger temporal structure based on preexisting knowledge and bias (e.g., sceneflow or tracking), and exploring using multimodal inputs with this representation (e.g., RGB or text). + +Acknowledgement. This project has received funding from the European Union's Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No 893465. We also thank the Microsoft for Azure Credits and NVIDIA Academic Hardware Grant Program for providing high-speed A5000 GPU. + +# References + +[1] Yizhak Ben-Shabat, Michael Lindenbaum, and Anath Fischer. 3DMFV: Three-dimensional point cloud classification in real-time using convolutional neural networks. RAL, 3:3145-3152, 2018. 2, 6 +[2] Yizhak Ben-Shabat, Xin Yu, Fatemeh Saleh, Dylan Campbell, Cristian Rodriguez-Opazo, Hongdong Li, and Stephen Gould. The aka asm dataset: Understanding people assembling furniture through actions, objects and pose. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 847-859, 2021. 3, 5, 6, 7 +[3] Federica Bogo, Javier Romero, Gerard Pons-Moll, and Michael J. Black. Dynamic FAUST: Registering human bodies in motion. In IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), July 2017. 3, 5 +[4] Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 961-970, 2015. 5 +[5] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 1, 5 +[6] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3075-3084, 2019. 2 +[7] Haodong Duan, Yue Zhao, Kai Chen, Dahua Lin, and Bo Dai. Revisiting skeleton-based action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2969-2978, 2022. 1, 3 +[8] Hehe Fan, Yi Yang, and Mohan Kankanhalli. Point 4d transformer networks for spatio-temporal modeling in point cloud videos. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 14204-14213, 2021. 2, 7 +[9] Hehe Fan, Yi Yang, and Mohan Kankanhalli. Point spatio-temporal transformer networks for point cloud video modeling. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(2):2181-2192, 2022. 2, 7 +[10] Hehe Fan, Xin Yu, Yuhang Ding, Yi Yang, and Mohan Kankanhalli. Pistnet: Point spatio-temporal convolution on point cloud sequences. In International Conference on Learning Representations, 2021. 2, 5, 6, 7, 8 + +[11] Hehe Fan, Xin Yu, Yi Yang, and Mohan Kankanhalli. Deep hierarchical representation of point cloud videos via spatio-temporal decomposition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(12):9918-9930, 2021. 2 +[12] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6202-6211, 2019. 1 +[13] Ankit Goyal, Hei Law, Bowei Liu, Alejandro Newell, and Jia Deng. Revisiting point cloud shape classification with a simple and effective baseline. In International Conference on Machine Learning, pages 3809-3820. PMLR, 2021. 2 +[14] Yulan Guo, Hanyun Wang, Qingyong Hu, Hao Liu, Li Liu, and Mohammed Bennamoun. Deep learning for 3d point clouds: A survey. PAMI, 2020. 2 +[15] Roman Klokov and Victor Lempitsky. Escape from cells: Deep kd-networks for the recognition of 3d point cloud models. In Proceedings of the IEEE international conference on computer vision, pages 863-872, 2017. 2 +[16] Hema Swetha Koppula, Rudhir Gupta, and Ashutosh Saxena. Learning human activities and object affordances from rgb-d videos. The International Journal of Robotics Research, 32(8):951-970, 2013. 2 +[17] Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In International conference on machine learning, pages 3744-3753. PMLR, 2019. 2 +[18] Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In International conference on machine learning, pages 3744-3753. PMLR, 2019. 5, 6, 7 +[19] Wanqing Li, Zhengyou Zhang, and Zicheng Liu. Action recognition based on a bag of 3d points. In 2010 IEEE computer society conference on computer vision and pattern recognition-workshops, pages 9-14. IEEE, 2010. 3, 5 +[20] Jun Liu, Amir Shahroudy, Mauricio Perez, Gang Wang, Ling-Yu Duan, and Alex C Kot. Ntu rgb+ d 120: A large-scale benchmark for 3d human activity understanding. IEEE transactions on pattern analysis and machine intelligence, 42(10):2684-2701, 2019. 3 +[21] Xingyu Liu, Mengyuan Yan, and Jeannette Bohg. Meteornet: Deep learning on dynamic 3d point cloud sequences. In Proceedings of the IEEE/CVF Interna + +tional Conference on Computer Vision, pages 9246- 9255, 2019. 2 +[22] Daniel Maturana and Sebastian Scherer. Voxnet: A 3d convolutional neural network for real-time object recognition. In 2015 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 922-928. IEEE, 2015. 2 +[23] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pages 652-660, 2017. 2, 5, 6, 7, 8 +[24] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In NeurIPS, volume 30, 2017. 2, 5, 6, 7, 8 +[25] Guocheng Qian, Yuchen Li, Houwen Peng, Jinjie Mai, Hasan Abed Al Kader Hammoud, Mohamed Elhoseiny, and Bernard Ghanem. Pointnext: Revisiting pointnet++ with improved training and scaling strategies. arXiv preprint arXiv:2206.04670, 2022. 2 +[26] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In The IEEE International Conference on Computer Vision (ICCV), Oct 2017. 1 +[27] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision, pages 618-626, 2017. 5 +[28] Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang. Ntu rgb+ d: A large scale dataset for 3d human activity analysis. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1010-1019, 2016. 3 +[29] Yiru Shen, Chen Feng, Yaoqing Yang, and Dong Tian. Mining point cloud local structures by kernel correlation and graph pooling. In IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pages 4548-4557, 2018. 2 +[30] Jaeyong Sung, Colin Ponce, Bart Selman, and Ashutosh Saxena. Unstructured human activity detection from rgbd images. In 2012 IEEE international conference on robotics and automation, pages 842-849. IEEE, 2012. 2 +[31] Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3d convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 4489-4497, 2015. 1 +[32] Haiyan Wang, Liang Yang, Xuejian Rong, Jinglun Feng, and Yingli Tian. Self-supervised 4d spatio + +temporal feature learning via order prediction of sequential point cloud clips. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3762-3771, 2021. 2 +[33] Yancheng Wang, Yang Xiao, Fu Xiong, Wenxiang Jiang, Zhiguo Cao, Joey Tianyi Zhou, and Junsong Yuan. 3dv: 3d dynamic voxel for action recognition in depth video. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 511-520, 2020. 2 +[34] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E Sarma, Michael M Bronstein, and Justin M Solomon. Dynamic graph cnn for learning on point clouds. Acm Transactions On Graphics (tog), 38:1-12, 2019. 2 +[35] Siyuan Yang, Jun Liu, Shijian Lu, Meng Hwa Er, and Alex C Kot. Skeleton cloud colorization for unsupervised 3d action representation learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13423-13433, 2021. 2 +[36] Cheng Zhang, Haocheng Wan, Shengqiang Liu, Xinyi Shen, and Zizhao Wu. Pvt: Point-voxel transformer for 3d deep learning. arXiv preprint arXiv:2108.06076, 2021. 2 +[37] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip H.S. Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 16259-16268, October 2021. 2 +[38] Jia-Xing Zhong, Kaichen Zhou, Qingyong Hu, Bing Wang, Niki Trigoni, and Andrew Markham. No pain, big gain: classify dynamic point cloud sequences with static models by fitting feature-level space-time surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8510-8520, 2022. 2, 7 \ No newline at end of file diff --git a/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/images.zip b/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..20886e79bc84f3ee4cb8649267dfba0f9a489f65 --- /dev/null +++ b/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35f57dfc34e47e68e7a3dfd4207bff0a85c75367d56b84713d64a04fddca2a5b +size 619331 diff --git a/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/layout.json b/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..9767939510734a5eed1c739d722f849283bf5b90 --- /dev/null +++ b/2024/3DInAction_ Understanding Human Actions in 3D Point Clouds/layout.json @@ -0,0 +1,7617 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 99, + 103, + 495, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 103, + 495, + 120 + ], + "spans": [ + { + "bbox": [ + 99, + 103, + 495, + 120 + ], + "type": "text", + "content": "3DInAction: Understanding Human Actions in 3D Point Clouds" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 146, + 142, + 253, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 142, + 253, + 156 + ], + "spans": [ + { + "bbox": [ + 146, + 142, + 253, + 156 + ], + "type": "text", + "content": "Yizhak Ben-Shabat1,2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 278, + 143, + 343, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 278, + 143, + 343, + 156 + ], + "spans": [ + { + "bbox": [ + 278, + 143, + 343, + 156 + ], + "type": "text", + "content": "Oren Shrout2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 369, + 143, + 446, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 143, + 446, + 157 + ], + "spans": [ + { + "bbox": [ + 369, + 143, + 446, + 157 + ], + "type": "text", + "content": "Stephen Gould" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 108, + 171, + 262, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 171, + 262, + 186 + ], + "spans": [ + { + "bbox": [ + 108, + 171, + 262, + 186 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 108, + 171, + 262, + 186 + ], + "type": "text", + "content": "Australian National University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 287, + 171, + 485, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 287, + 171, + 485, + 186 + ], + "spans": [ + { + "bbox": [ + 287, + 171, + 485, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 287, + 171, + 485, + 186 + ], + "type": "text", + "content": "Technion, Israel Institute of Technology" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 187, + 523, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 187, + 523, + 199 + ], + "spans": [ + { + "bbox": [ + 70, + 187, + 523, + 199 + ], + "type": "text", + "content": "sitzikbs@technion.ac.il, shout.oren@campus.technion.ac.il, stephen.gould@anu.edu.au" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 193, + 201, + 405, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 201, + 405, + 213 + ], + "spans": [ + { + "bbox": [ + 193, + 201, + 405, + 213 + ], + "type": "text", + "content": "https://github.com/sitzikbs/3dincaction" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 61, + 233, + 119, + 351 + ], + "blocks": [ + { + "bbox": [ + 61, + 233, + 119, + 351 + ], + "lines": [ + { + "bbox": [ + 61, + 233, + 119, + 351 + ], + "spans": [ + { + "bbox": [ + 61, + 233, + 119, + 351 + ], + "type": "image", + "image_path": "77a8b2bdb6e2bc8177795a0ff3f8cf7bea7e437ca5d6127b0d2ac7d0d2729236.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 365, + 547, + 399 + ], + "lines": [ + { + "bbox": [ + 45, + 365, + 547, + 399 + ], + "spans": [ + { + "bbox": [ + 45, + 365, + 547, + 399 + ], + "type": "text", + "content": "Figure 1. t-patches for action recognition. We propose a new representation for dynamic 3D point clouds. Termed " + }, + { + "bbox": [ + 45, + 365, + 547, + 399 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 45, + 365, + 547, + 399 + ], + "type": "text", + "content": "-patches, these are locally evolving point cloud sets aggregated over time. Learning features over t-patches provides an improved temporal point cloud representation for action understanding." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 129, + 236, + 216, + 351 + ], + "blocks": [ + { + "bbox": [ + 129, + 236, + 216, + 351 + ], + "lines": [ + { + "bbox": [ + 129, + 236, + 216, + 351 + ], + "spans": [ + { + "bbox": [ + 129, + 236, + 216, + 351 + ], + "type": "image", + "image_path": "39b06bebed98e5301eafc3c562c7c30da441c22f9988cd6337436241515c3513.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 216, + 236, + 299, + 351 + ], + "blocks": [ + { + "bbox": [ + 216, + 236, + 299, + 351 + ], + "lines": [ + { + "bbox": [ + 216, + 236, + 299, + 351 + ], + "spans": [ + { + "bbox": [ + 216, + 236, + 299, + 351 + ], + "type": "image", + "image_path": "e074a955792586b2acfdeb1a778f6dab4137266a42bfd994d8c83691f8419cfc.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 309, + 236, + 380, + 351 + ], + "blocks": [ + { + "bbox": [ + 309, + 236, + 380, + 351 + ], + "lines": [ + { + "bbox": [ + 309, + 236, + 380, + 351 + ], + "spans": [ + { + "bbox": [ + 309, + 236, + 380, + 351 + ], + "type": "image", + "image_path": "920f0c45077417070175bf99b53baaf9b5f91803dbb5f8a59e7c45135cbb0ffc.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 384, + 236, + 477, + 350 + ], + "blocks": [ + { + "bbox": [ + 384, + 236, + 477, + 350 + ], + "lines": [ + { + "bbox": [ + 384, + 236, + 477, + 350 + ], + "spans": [ + { + "bbox": [ + 384, + 236, + 477, + 350 + ], + "type": "image", + "image_path": "d40f2b85b106594b3ffdb7f23c00df6ee6597a7d2750a9612450cdcdde76978f.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 483, + 238, + 533, + 311 + ], + "blocks": [ + { + "bbox": [ + 483, + 238, + 533, + 311 + ], + "lines": [ + { + "bbox": [ + 483, + 238, + 533, + 311 + ], + "spans": [ + { + "bbox": [ + 483, + 238, + 533, + 311 + ], + "type": "image", + "image_path": "171287348038d40b2751b2c095c8224a3b7acf6789927f3fd03387b051cb719f.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 143, + 417, + 192, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 417, + 192, + 430 + ], + "spans": [ + { + "bbox": [ + 143, + 417, + 192, + 430 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 46, + 443, + 289, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 443, + 289, + 635 + ], + "spans": [ + { + "bbox": [ + 46, + 443, + 289, + 635 + ], + "type": "text", + "content": "We propose a novel method for 3D point cloud action recognition. Understanding human actions in RGB videos has been widely studied in recent years, however, its 3D point cloud counterpart remains under-explored despite the clear value that 3D information may bring. This is mostly due to the inherent limitation of the point cloud data modality—lack of structure, permutation invariance, and varying number of points—which makes it difficult to learn a spatio-temporal representation. To address this limitation, we propose the 3DinAction pipeline that first estimates patches moving in time (t-patches) as a key building block, alongside a hierarchical architecture that learns an informative spatio-temporal representation. We show that our method achieves improved performance on existing datasets, including DFAUST and IKEA ASM. Code is publicly available at https://github.com/sitzikbs/3dincaction." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 658, + 128, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 658, + 128, + 670 + ], + "spans": [ + { + "bbox": [ + 47, + 658, + 128, + 670 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "content": "In this paper, we address the task of action recognition from 3D point cloud sequences. We propose a novel pipeline wherein points are grouped into temporally evolv-" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 418, + 547, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 418, + 547, + 527 + ], + "spans": [ + { + "bbox": [ + 304, + 418, + 547, + 527 + ], + "type": "text", + "content": "ing patches that capture discriminative action dynamics. Our work is motivated by the massive growth of online media, mobile and surveillance cameras that have enabled the computer vision community to develop many data-driven action-recognition methods [5, 12, 26, 31], most of which rely on RGB video data. Recently, commodity 3D sensors are gaining increased momentum, however, the 3D point cloud modality for action recognition has yet been underexploited due to the scarcity of 3D action-labeled data." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 530, + 547, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 530, + 547, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 530, + 547, + 662 + ], + "type": "text", + "content": "In many cases, a pure RGB video-based inference may not be enough and incorporating other modalities like geometry is required. This is especially necessary for safety critical applications such as autonomous systems, where redundancy is crucial, or in scenarios where the video is heavily degraded (e.g., due to poor lighting). Some approaches incorporate geometrical information implicitly, e.g., through intermediate pose estimation [7]. This often entails extra steps that require more time and resources and is still limited to video input. Therefore a more explicit approach is desirable." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": "3D sensors provide an alternative modality in the form of point clouds sampled on the environment. Despite the vast research on 3D vision and learning, even static 3D point cloud datasets are significantly smaller than their RGB im" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "19978" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 286, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 286, + 95 + ], + "type": "text", + "content": "age counterparts due to difficulties in collecting and labeling. 3D point cloud sequence databases are even smaller," + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 96, + 286, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 96, + 286, + 178 + ], + "spans": [ + { + "bbox": [ + 46, + 96, + 286, + 178 + ], + "type": "text", + "content": "making it more difficult to learn a meaningful 3D action representation. Furthermore, learning a point cloud representation still remains an active research field because point clouds are unstructured, unordered, and may contain a varying number of points. Learning a temporal point cloud representation is even more challenging since, unlike pixels, there is no one-to-one point correspondence through time." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 180, + 286, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 180, + 286, + 335 + ], + "spans": [ + { + "bbox": [ + 46, + 180, + 286, + 335 + ], + "type": "text", + "content": "We address these challenges and propose the 3DinAction pipeline for 3D point cloud action recognition. In our pipeline, we first extract local temporal point patches (t-patches) that reflect a point region's motion in time, see Figure 1. We then learn a t-patch representation using a novel hierarchical architecture that incorporates spatial features in the temporal domain. We finally get an action prediction for each frame in a sequence by aggregating multiple t-patch representations. This pipeline overcomes the need for ground truth point temporal correspondence, grid structure, point order, and a fixed number of points in each frame. Intuitively, patches reflect local surface deformation and are more robust to point correspondence errors." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 336, + 286, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 336, + 286, + 395 + ], + "spans": [ + { + "bbox": [ + 47, + 336, + 286, + 395 + ], + "type": "text", + "content": "We conduct extended experiments to evaluate the performance of our approach compared to existing SoTA methods and show that 3DinAction provides significant performance gains of " + }, + { + "bbox": [ + 47, + 336, + 286, + 395 + ], + "type": "inline_equation", + "content": "13\\%" + }, + { + "bbox": [ + 47, + 336, + 286, + 395 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 336, + 286, + 395 + ], + "type": "inline_equation", + "content": "7\\%" + }, + { + "bbox": [ + 47, + 336, + 286, + 395 + ], + "type": "text", + "content": " in accuracy on DFAUST and IKEA ASM, respectively." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 396, + 259, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 396, + 259, + 407 + ], + "spans": [ + { + "bbox": [ + 58, + 396, + 259, + 407 + ], + "type": "text", + "content": "The key contributions of our work are as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 407, + 286, + 465 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 47, + 407, + 286, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 407, + 286, + 430 + ], + "spans": [ + { + "bbox": [ + 47, + 407, + 286, + 430 + ], + "type": "text", + "content": "- A novel representation for dynamically evolving local point cloud sets termed t-patches." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 431, + 286, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 431, + 286, + 465 + ], + "spans": [ + { + "bbox": [ + 47, + 431, + 286, + 465 + ], + "type": "text", + "content": "- A hierarchical architecture that produces an informative spatio-temporal representation for sequences of point clouds." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 478, + 133, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 478, + 133, + 490 + ], + "spans": [ + { + "bbox": [ + 47, + 478, + 133, + 490 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 498, + 286, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 286, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 286, + 700 + ], + "type": "text", + "content": "Learning 3D point cloud representations. Point clouds pose a challenge for neural networks due to their unstructured and point-wise unordered nature. To address these challenges, several approaches have been proposed. PointNet [23, 24] uses permutation-invariant operators, such as pointwise MLPs and pooling layers, to aggregate features across a point set. Some approaches construct a graph from the point set. DGCNN [34] applies message passing and performs graph convolutions on kNN graphs, KCNet [29] uses kernel correlation and graph pooling, and Kd-Networks [15] apply multiplicative transformations and share the parameters based on the subdivisions imposed by kd-trees. Alternatively, the structure can be imposed using a grid of voxels [22, 36], or a grid of Gaussians in 3DmFVNet [1]. Another alternative avoids the structure by using Transformer's attention mechanism [17, 37]. For a comprehensive survey of point cloud architectures please see [14]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 58, + 702, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 286, + 713 + ], + "type": "text", + "content": "Recently, various factors that can impact the training of" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "different architectures have been investigated [13, 25]. This includes exploring data augmentation strategies and loss functions that are not specific to a particular architecture. The results of this study showed that older PointNet-based architectures [23, 24] can perform comparably to newer architectures with minor changes." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 146, + 545, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 146, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 304, + 146, + 545, + 217 + ], + "type": "text", + "content": "All of the above methods deal with static, single-frame, or single-shape point clouds. In this work, the input is a temporal point cloud where a representation for a short sequence is required and point correspondence between frames is unknown. Therefore extending existing approaches is not trivial." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 220, + 545, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 220, + 545, + 517 + ], + "spans": [ + { + "bbox": [ + 304, + 220, + 545, + 517 + ], + "type": "text", + "content": "Learning temporal 3D point cloud representations. Temporal point clouds have not been as extensively studied as their static counterparts, in particular for action recognition. Meteornet [21] processes 4D points using a PointNet++ architecture where they appended a temporal dimension to the spatial coordinates. PSTNet [10, 11] proposed spatio-temporal convolutions and utilized some of the temporal consistency for action recognition. Similarly, P4Transformer [8] uses 4D convolutions and a transformer for capturing appearance and motion via self-attention. In a follow-up work PST-Transformer [9] employs a video level of self-attention in search for similar points across entire videos and so encodes spatio-temporal structure. Some works attempt to alleviate the full supervision requirement for 3D action recognition. These include self-supervised features learning [32] by predicting temporal order from a large unlabeled dataset and fine-tuning on a smaller annotated datasets and unsupervised skeleton colorization [35]. Additional supervised approaches include MinkowskiNet [6] that uses a 4D spatio-temporal CNN after converting the point clouds to an occupancy grid, 3DV [33] that encodes 3D motion information from depth videos into a compact voxel set, and Kinet [38] that implicitly encoded feature level dynamics in feature space by unrolling the normal solver of ST-surfaces." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 520, + 545, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 520, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 304, + 520, + 545, + 591 + ], + "type": "text", + "content": "The above methods, perform a single classification per clip. In this paper, we focus on a related, and more chllang-ing, task that requires a prediction per-frame. We propose to convert the point cloud representation into t-patches and use an MLP based hierarchical architecture to get the spatiotemporal representation." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 594, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 712 + ], + "type": "text", + "content": "3D action understanding datasets. One of the major driving forces behind the success of learning-based approaches is the availability of annotated data. For the task of 3D point cloud action recognition, there is currently no designated standard dataset, however, some existing datasets may be extended. The CAD 60 and CAD 120 [16, 30] datasets include 60 and 120 long-term activity videos of 12 and 10 classes respectively (e.g., making cereal, microwave food). These datasets provide raw RGB, skeletons, and depth data however its small scale and long-term focus limit its effec" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19979" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "type": "text", + "content": "tiveness. The NTU RGB+D 60 [28] and NTU RGB+D 120 [20] provide " + }, + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "type": "inline_equation", + "content": "\\sim 56\\mathrm{K}" + }, + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "type": "inline_equation", + "content": "\\sim 114\\mathrm{K}" + }, + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "type": "text", + "content": " clips containing 60 and 120 actions classes respectively, e.g., taking off a jacket, taking a selfie. They provide three different simultaneous RGB views, IR and depth streams as well as 3D skeletons. While these datasets can be considered large-scale, their contrived nature makes recent skeleton-based methods (e.g., [7]) perform well, making a prior-free approach difficult to justify. The MSR-Action3D dataset [19] includes 20 action classes performed by 10 subjects for a total of 567 depth map sequences, collected using a Kinect v1 device (23K frames). The sequences in this dataset are very short and therefore using it to evaluate learning-based approaches provides a limited indication of generalization. The above datasets provide per clip action annotations." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 255, + 289, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 255, + 289, + 566 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 289, + 566 + ], + "type": "text", + "content": "Some datasets inherently provide per-frame annotations. The IKEA ASM dataset [2] provides 371 videos clipped into 31K clips. It contains 33 action classes related to furniture assembly, annotated per frame. This dataset provides several modalities including three RGB views, and Depth. It is an extremely challenging dataset since the human assembler is often occluded and presents very unique assembly poses. It is also very imbalanced since different assembly actions have different duration and may repeat multiple times within the same assembly. Although it was designed for video action recognition, its challenges are the core reasons for choosing to extend it to the point cloud action recognition task. The DFAUST dataset [3] provides high-resolution 4D scans of human subjects in motion. It includes 14 action categories with over 100 dynamic scans of 10 subjects (1:1 male-to-female ratio) with varying body shapes represented as registrations of aligned meshes, therefore an extension to our task is straightforward. One particularly important feature of this dataset is the GT point correspondences throughout the sequence i.e. it is possible to follow each point's movement through time. While this dataset is not as large-scale as others, it provides ground truth information (correspondence) that most other collected datasets do not. Therefore, we extend this dataset to 3D point cloud action recognition and use it as a testbed for many ablation studies (see Section 4.4)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 583, + 165, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 583, + 165, + 597 + ], + "spans": [ + { + "bbox": [ + 47, + 583, + 165, + 597 + ], + "type": "text", + "content": "3. 3DinAction pipeline" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "content": "Our 3DinAction pipeline is illustrated in Figure 2. Given a temporal sequence of 3D point clouds we first extract a set of t-patches (Section 3.1). We then feed the t-patches into a hierarchical neural network (Section 3.2) to produce a per-frame high dimensional feature vector representation. Finally, the feature vectors are fed into a classifier to obtain per-frame predictions. The proposed approach is prior-free (no skeleton extraction required) and therefore general and can be used on different action-understanding datasets." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 306, + 72, + 372, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 372, + 85 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 372, + 85 + ], + "type": "text", + "content": "3.1. t-patches" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "inline_equation", + "content": "S = \\{x_{j} \\in \\mathbb{R}^{3} \\mid j = 1, \\dots, N\\}" + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "text", + "content": " denote a 3D point cloud with " + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "text", + "content": " points. In the classic (static) setting, a patch " + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "inline_equation", + "content": "\\Psi_{q}" + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "text", + "content": " is extracted around some query point " + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "inline_equation", + "content": "x_{q}" + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "text", + "content": ". For example, the patch " + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "inline_equation", + "content": "\\Psi_{q}" + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "text", + "content": " may be constructed by finding the " + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "text", + "content": "-nearest neighbors of " + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "inline_equation", + "content": "x_{q}" + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 89, + 545, + 150 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 150, + 545, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 150, + 545, + 223 + ], + "spans": [ + { + "bbox": [ + 304, + 150, + 545, + 223 + ], + "type": "text", + "content": "In our temporal setting we are given a sequence of point clouds " + }, + { + "bbox": [ + 304, + 150, + 545, + 223 + ], + "type": "inline_equation", + "content": "S = \\{S^0, \\dots, S^T\\}" + }, + { + "bbox": [ + 304, + 150, + 545, + 223 + ], + "type": "text", + "content": " composed of point cloud frames " + }, + { + "bbox": [ + 304, + 150, + 545, + 223 + ], + "type": "inline_equation", + "content": "S^t = \\{x_j^t \\mid j = 1, \\dots, N^t\\}" + }, + { + "bbox": [ + 304, + 150, + 545, + 223 + ], + "type": "text", + "content": ". Here the superscript " + }, + { + "bbox": [ + 304, + 150, + 545, + 223 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 150, + 545, + 223 + ], + "type": "text", + "content": " is used to denote the index of the point cloud in the sequence. Instead of extracting a patch within a single frame, we allow patches to extend temporally, and denote them as " + }, + { + "bbox": [ + 304, + 150, + 545, + 223 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 150, + 545, + 223 + ], + "type": "text", + "content": "-patches." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "spans": [ + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "text", + "content": "Definition 3.1 A t-patch " + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "inline_equation", + "content": "P_{q}" + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "text", + "content": " is a sequence of point sets indexed by a query point " + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "inline_equation", + "content": "x_{q}^{0}" + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "text", + "content": " and jointly moving in time defined by a pointwise mapping function between patches in consecutive frames. Mathematically, " + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "inline_equation", + "content": "P_{q} = \\langle \\Psi_{q}^{t}\\rangle_{t = 0}^{T}" + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "inline_equation", + "content": "\\Psi_{q}^{0}" + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "text", + "content": " is the initial (static) patch and " + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "inline_equation", + "content": "\\Psi_{q}^{t} = \\Phi (\\Psi_{q}^{t - 1})" + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "text", + "content": " is the patch at time " + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 304, + 233, + 545, + 306 + ], + "type": "text", + "content": " is a pointwise mapping function." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "text", + "content": "In practice, it is difficult to find a reliable mapping function " + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "text", + "content": ". Therefore we propose a simplified formulation that, for a given query point " + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "inline_equation", + "content": "x_{q}^{0}" + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "text", + "content": ", first extracts a patch for the first frame " + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "inline_equation", + "content": "\\Psi_{q}^{0}" + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "text", + "content": " and then iteratively extracts corresponding patches for the next frames (iterating over time), by using the closest point in the next frame as the new query point. More formally, we specify " + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "inline_equation", + "content": "\\vec{\\Psi}_{q}^{0} \\triangleq \\Psi_{q}^{0}" + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "inline_equation", + "content": "\\vec{\\Psi}_{q}^{t} = knn(x_{q}^{t-1}, S^{t})" + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "inline_equation", + "content": "x_{q}^{t} = n n(x_{q}^{t-1}, S^{t})" + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "inline_equation", + "content": "t = 1, \\ldots, T" + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "text", + "content": ". Here " + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "inline_equation", + "content": "knn" + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "text", + "content": " nearest neighbor and " + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "inline_equation", + "content": "n n" + }, + { + "bbox": [ + 304, + 316, + 545, + 437 + ], + "type": "text", + "content": " is nearest neighbor. Then, the simplified t-patch formulation is given by" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 369, + 445, + 545, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 445, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 369, + 445, + 545, + 464 + ], + "type": "interline_equation", + "content": "\\vec {P} _ {q} = \\left\\langle \\vec {\\Psi} _ {q} ^ {t} \\mid t = 0, \\dots , T \\right\\rangle \\tag {1}", + "image_path": "7dcafea7eb9800e273b651079e27aef7944a1d98b880ff155da8eda2f598a884.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 472, + 545, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 472, + 545, + 519 + ], + "spans": [ + { + "bbox": [ + 304, + 472, + 545, + 519 + ], + "type": "text", + "content": "See Figure 3 left for an illustration of the t-patch extraction process. Note that if ground truth correspondence is available " + }, + { + "bbox": [ + 304, + 472, + 545, + 519 + ], + "type": "inline_equation", + "content": "knn" + }, + { + "bbox": [ + 304, + 472, + 545, + 519 + ], + "type": "text", + "content": " can be swapped back to " + }, + { + "bbox": [ + 304, + 472, + 545, + 519 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 304, + 472, + 545, + 519 + ], + "type": "text", + "content": ". However, this does not guarantee improved performance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 520, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 520, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 520, + 545, + 713 + ], + "type": "text", + "content": "Temporal t-patch collapse. The simplified formulation of extracting t-patches inherently suffers from the problem of two or more t-patches collapsing into having the same points after a certain frame. We call this scenario t-patch temporal collapse. Temporal collapse can happen whenever " + }, + { + "bbox": [ + 304, + 520, + 545, + 713 + ], + "type": "inline_equation", + "content": "x_{q}^{t} = x_{p}^{t}" + }, + { + "bbox": [ + 304, + 520, + 545, + 713 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 304, + 520, + 545, + 713 + ], + "type": "inline_equation", + "content": "x_{q}^{0} \\neq x_{p}^{0}" + }, + { + "bbox": [ + 304, + 520, + 545, + 713 + ], + "type": "text", + "content": ". The main issue with temporal collapse is the reduction in point coverage as time progresses, i.e. the patches covering the last point cloud have significant overlaps and therefore include fewer points than the first frame and so missing vital data. An illustration of the t-patch collapse problem is available in Figure 3 (right). To mitigate this issue, we propose two solutions. First, adding small noise to each iteration's query points, i.e. " + }, + { + "bbox": [ + 304, + 520, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\overrightarrow{\\Psi}_{q}^{t} = knn(x_{q}^{t} + \\epsilon, S^{t+1})" + }, + { + "bbox": [ + 304, + 520, + 545, + 713 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 520, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\epsilon \\sim \\mathcal{N}(\\mu, \\sigma^{2})" + }, + { + "bbox": [ + 304, + 520, + 545, + 713 + ], + "type": "text", + "content": " is a small Gaussian noise. Second, we propose to construct t-patches from the first to last frame but also in reverse, initializing" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19980" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 87, + 539, + 324 + ], + "blocks": [ + { + "bbox": [ + 60, + 87, + 539, + 324 + ], + "lines": [ + { + "bbox": [ + 60, + 87, + 539, + 324 + ], + "spans": [ + { + "bbox": [ + 60, + 87, + 539, + 324 + ], + "type": "image", + "image_path": "ca03231e6ca24b0c3a26aa855970e4a3c0f6682cb3ba7e648684c1c9763fd057.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 331, + 546, + 367 + ], + "lines": [ + { + "bbox": [ + 46, + 331, + 546, + 367 + ], + "spans": [ + { + "bbox": [ + 46, + 331, + 546, + 367 + ], + "type": "text", + "content": "Figure 2. 3DinAction pipeline. Given a sequence of point clouds, a set of t-patches is extracted. The t-patches are fed into a neural network to output an embedding vector. This is done hierarchically until finally the global t-patch vectors are pooled to get a per-frame point cloud embedding which is then fed into a classifier to output an action prediction per frame." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 53, + 388, + 273, + 483 + ], + "blocks": [ + { + "bbox": [ + 53, + 388, + 273, + 483 + ], + "lines": [ + { + "bbox": [ + 53, + 388, + 273, + 483 + ], + "spans": [ + { + "bbox": [ + 53, + 388, + 273, + 483 + ], + "type": "image", + "image_path": "fa2721163831b9461c340b37e484f9bf65404e8c33d5584b94aa6d6c81df9fd3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 499, + 287, + 567 + ], + "lines": [ + { + "bbox": [ + 46, + 499, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 46, + 499, + 287, + 567 + ], + "type": "text", + "content": "Figure 3. t-patch construction and collapse. Illustration of t-patch construction (left) and collapse (right). Starting from an origin point " + }, + { + "bbox": [ + 46, + 499, + 287, + 567 + ], + "type": "inline_equation", + "content": "x_{q}^{0}" + }, + { + "bbox": [ + 46, + 499, + 287, + 567 + ], + "type": "text", + "content": " we find the nearest neighbours in the next frame iteratively to construct the t-patch subset (non-black points). A collapse happens when two different origin points, " + }, + { + "bbox": [ + 46, + 499, + 287, + 567 + ], + "type": "inline_equation", + "content": "x_{q}^{0}" + }, + { + "bbox": [ + 46, + 499, + 287, + 567 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 499, + 287, + 567 + ], + "type": "inline_equation", + "content": "x_{p}^{0}" + }, + { + "bbox": [ + 46, + 499, + 287, + 567 + ], + "type": "text", + "content": ", have the same nearest neighbour at some time step, " + }, + { + "bbox": [ + 46, + 499, + 287, + 567 + ], + "type": "inline_equation", + "content": "\\Psi_{p}^{3} = \\Psi_{q}^{3}" + }, + { + "bbox": [ + 46, + 499, + 287, + 567 + ], + "type": "text", + "content": " here." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "inline_equation", + "content": "\\Psi_q^0" + }, + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "inline_equation", + "content": "\\Psi_q^T" + }, + { + "bbox": [ + 46, + 586, + 287, + 624 + ], + "type": "text", + "content": ", respectively. We name this variation bidirectional t-patches. More formally bidirectional t-patches are given by," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 641, + 287, + 675 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 641, + 287, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 641, + 287, + 675 + ], + "type": "interline_equation", + "content": "\\stackrel {\\leftrightarrow} {P} = \\left(\\bigcup_ {q} \\vec {P} _ {q}\\right) \\cup \\left(\\bigcup_ {p} \\overleftarrow {P} _ {p}\\right) \\tag {2}", + "image_path": "8379231c215d23c9081ab301f5ab9442175e3f27990f5945b18b0bd66ad10f7e.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 684, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 684, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 684, + 287, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 684, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\overleftarrow{P}_p" + }, + { + "bbox": [ + 47, + 684, + 287, + 715 + ], + "type": "text", + "content": " is defined similarly to " + }, + { + "bbox": [ + 47, + 684, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\vec{P}_q" + }, + { + "bbox": [ + 47, + 684, + 287, + 715 + ], + "type": "text", + "content": " but in the reverse direction, i.e., " + }, + { + "bbox": [ + 47, + 684, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\overleftarrow{\\Psi}_p^T \\triangleq \\Psi_p^T" + }, + { + "bbox": [ + 47, + 684, + 287, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 684, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\overleftarrow{\\Psi}_p^t = knn(x_p^{t+1}, S^t)" + }, + { + "bbox": [ + 47, + 684, + 287, + 715 + ], + "type": "text", + "content": " for" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 386, + 545, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 386, + 545, + 411 + ], + "spans": [ + { + "bbox": [ + 305, + 386, + 545, + 411 + ], + "type": "inline_equation", + "content": "t = T - 1,\\ldots ,0" + }, + { + "bbox": [ + 305, + 386, + 545, + 411 + ], + "type": "text", + "content": " . Here, the final set of t-patches is composed of an equal number of t-patches from both directions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 418, + 448, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 418, + 448, + 430 + ], + "spans": [ + { + "bbox": [ + 306, + 418, + 448, + 430 + ], + "type": "text", + "content": "3.2. Hierarchical architecture" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": "The proposed architecture is composed of " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": " consecutive t-patch modules. Each module receives a point cloud sequence " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": " as input. The sequence is fed into a t-patch extractor where it undergoes subsampling and t-patch extraction, forming " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\tilde{S}_l" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "P^l" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": " respectively. Then, the t-patches are fed into t-patch Net, a network that computes a high-dimensional feature vector " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "f_{l}" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": " for each t-patch, parametrized by " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\theta_l" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": ". The subsampled sequence " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\tilde{S}_l" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": " and its corresponding t-patch features " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "f_{l}" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": " are then fed into the next t-patch module. These modules form a hierarchy in the sense that each module receives as input a sparser point cloud with a higher dimensional feature vector representing each point (encoding both spatial and temporal information). Note that both the t-patch points and their features are fed into t-patch Net. t-patch extractor. We first subsample the first frame in the point cloud sequence " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "S^0" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": " using farthest point sampling (FPS) to form a set of " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": " query points " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\tilde{S}^0 = \\{x_j^0\\in FPS(S^0,M)\\}" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": ". The set " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\tilde{S}^0" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": " is used to form the t-patches. Subsampling is required since computing a t-patch for each point is inefficient and unnecessary due to overlaps. After subsampling, we extract " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": " t-patches using Equation 2 where " + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "inline_equation", + "content": "q\\in \\tilde{S}^0" + }, + { + "bbox": [ + 304, + 437, + 545, + 713 + ], + "type": "text", + "content": ". The extractor operates on both 3D points and their corresponding features (for mod" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19981" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 167, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 167, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 167, + 83 + ], + "type": "text", + "content": "ules deeper in the hierarchy)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 84, + 289, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 84, + 289, + 301 + ], + "spans": [ + { + "bbox": [ + 46, + 84, + 289, + 301 + ], + "type": "text", + "content": "Model architecture and t-patch net. The t-patch network computes a high dimensional representation for each t-patch. The t-patch Net architecture is composed of several MLP layers operating on the non-temporal dimensions (sharing weights across points) followed by a convolutional layer operating on both the temporal and feature dimensions. Note that the network weights are also shared across t-patches. The output of each t-patch Net is a vector for each frame. The final frame representation is obtained by aggregating all of the t-patch features using a max pooling operation i.e. " + }, + { + "bbox": [ + 46, + 84, + 289, + 301 + ], + "type": "inline_equation", + "content": "\\text{maxpool}_{M_l}(f_3)" + }, + { + "bbox": [ + 46, + 84, + 289, + 301 + ], + "type": "text", + "content": ". This representation is then fed into a classifier consisting of three fully connected layers with temporal smoothing and softmax to output the final action prediction. To train the network we use the same losses of RGB based approaches [2, 5] which include a per-frame prediction cross entropy loss and a per-sequence prediction cross entropy loss (summed and weighted evenly) " + }, + { + "bbox": [ + 46, + 84, + 289, + 301 + ], + "type": "inline_equation", + "content": "L_{total} = L_{frame} + L_{seq}" + }, + { + "bbox": [ + 46, + 84, + 289, + 301 + ], + "type": "text", + "content": ". For full details see supplemental." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 310, + 128, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 310, + 128, + 323 + ], + "spans": [ + { + "bbox": [ + 47, + 310, + 128, + 323 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 331, + 287, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 331, + 287, + 449 + ], + "spans": [ + { + "bbox": [ + 46, + 331, + 287, + 449 + ], + "type": "text", + "content": "We evaluate the performance of our approach on three datasets. The results show that the 3DinAction pipeline outperforms all baselines in DFAUST [3] and IKEA ASM [2] and is comparable in MSR-Action 3D [19]. We then conduct an ablation study for selecting parameters and t-patch extraction method showing that adding jitter and bidirectional t-patches is beneficial. Finally, we report time performance and show the tradeoff between performance and inference time. For more results and experiments, see supplemental material." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 450, + 287, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 450, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 46, + 450, + 287, + 594 + ], + "type": "text", + "content": "Baselines and evaluation metrics. For evaluation, we report several standard metrics [4]: the top1 and top3 framework accuracy are the de facto standard for action classification. We compute it by summing the number of correctly classified frames and dividing by the total number of frames in each video and then averaging over all videos in the test set. Additionally, since some of the datasets are imbalanced and may contain different actions for each frame in a clip, we also report the macro-recall by separately computing recall for each category and then averaging (macro). Finally, we report the mean average precision (mAP) since all untrimmed videos contain multiple action labels." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": "For DFAUST and IKEA ASM we report static methods PointNet [23], " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{PointNet}^{++}" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " [24], and Set Transformer [18] by applying them on each point cloud frame individually. Additionally, we report temporal methods like PSTNet [10] and also implemented a temporal smoothing version of each static method (PoinNet+TS, " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{Pointnet}^{++} + \\mathrm{TS}" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": ", and Set Transformer+TS respectively) by learning the weights of a convolutional layer over the temporal dimension. Temporal smoothing aims to provide a naive baseline for utilizing temporal information in addition to spatial information." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 109 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 109 + ], + "type": "text", + "content": "Note that in all experiments, unless otherwise specified, our method uses the simplified formulation with jitter and bidirectional t-patches." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 116, + 484, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 116, + 484, + 129 + ], + "spans": [ + { + "bbox": [ + 305, + 116, + 484, + 129 + ], + "type": "text", + "content": "4.1. Experiments on DFAUST dataset" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 135, + 545, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 135, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 304, + 135, + 545, + 171 + ], + "type": "text", + "content": "We extend the DFAUST dataset for the task of action recognition and show that the proposed approach outperforms other methods (see Table 1)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 171, + 546, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 171, + 546, + 361 + ], + "spans": [ + { + "bbox": [ + 304, + 171, + 546, + 361 + ], + "type": "text", + "content": "DFAUST dataset [3]. We extended the DFAUST dataset to our task by subdividing it into clips of 64 frames with train and test human subjects. The split was constructed so no subject will appear in both training and test set as well as guarantee that all actions appear in both. The train and test sets contain 76 full-length sequences (395 clips, and " + }, + { + "bbox": [ + 304, + 171, + 546, + 361 + ], + "type": "inline_equation", + "content": "\\sim 25\\mathrm{K}" + }, + { + "bbox": [ + 304, + 171, + 546, + 361 + ], + "type": "text", + "content": " frames) and 53 sequences (313 clips, and " + }, + { + "bbox": [ + 304, + 171, + 546, + 361 + ], + "type": "inline_equation", + "content": "\\sim 20\\mathrm{K}" + }, + { + "bbox": [ + 304, + 171, + 546, + 361 + ], + "type": "text", + "content": " frames) respectively. Each point cloud frame contains 6890 points. These points are mesh vertices and therefore the density varies greatly (e.g., very dense on the face, hands, and feet and sparser on the legs). For all baselines, we sampled a set of 1024 points using the farthest point sampling algorithm to provide a more uniform set of points. For this dataset, all frames in a clip have the same label. Note that not all actions are performed by all subjects. For the full action list and dataset statistics, see the supplemental." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 363, + 546, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 363, + 546, + 541 + ], + "spans": [ + { + "bbox": [ + 304, + 363, + 546, + 541 + ], + "type": "text", + "content": "Results. The results, reported in Table 1, show that our proposed approach outperforms all baselines by a large margin. It also shows that temporal smoothing boosts performance significantly for all static baselines. Additionally, to explore the influence of our simplified knn-based temporal point mapping, we used the GT point correspondence to match the consecutive t-patch origin points and report the results as another baseline (Ours + GT corr). The results show that there is a mAP performance gain with GT correspondence, however, it is limited. Note that in most datasets, this GT correspondence is not available. Finally, we also experimented with a Transformer architecture to process the t-patch learned representations and show that it does not provide additional performance boost. This may be attributed to the dataset size." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 543, + 545, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 543, + 545, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 543, + 545, + 651 + ], + "type": "text", + "content": "Insight. We extended the GradCam [27] approach for our 3DinAction pipeline. Using this approach we get a score per point in each t-patch proportional to its influence on classifying the frame to a given target class. The results in Figure 4 show that, as expected, our approach learns meaningful representations since the most prominent regions are the ones with the informative motion. For example, in the Jumping jacks action (top row) the hands are most prominent as they are making a large and distinct motion." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 658, + 497, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 658, + 497, + 671 + ], + "spans": [ + { + "bbox": [ + 305, + 658, + 497, + 671 + ], + "type": "text", + "content": "4.2. Experiments on IKEA ASM dataset" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "IKEA ASM dataset [2]. This dataset consists of 371 videos (3M frames) of people assembling IKEA furniture in different indoor environments. It was collected using a" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19982" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 70, + 282, + 235 + ], + "blocks": [ + { + "bbox": [ + 53, + 70, + 282, + 235 + ], + "lines": [ + { + "bbox": [ + 53, + 70, + 282, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 70, + 282, + 235 + ], + "type": "table", + "html": "
MethodFrame acc.
top 1top 3mAP
3DmFVNet [1]60.8687.680.7171
PointNet [23]65.6786.440.7161
PointNet++ [24]58.5188.280.5842
Set Transformer [18]52.2781.980.6209
PoinNet [23] + TS74.1094.000.7863
PointNet++ [24] + TS67.8886.210.7563
Set Transformer [18] + TS62.9590.330.7322
PSTNet [10]50.7078.280.6490
Ours + GT corr77.6795.380.8762
Ours + Transformer77.0993.777.49
Ours87.2699.260.8616
", + "image_path": "5e49459696c917a9ed76439f8384bbb834626ef28a354aa2ba68bfec22ae5eca.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 243, + 287, + 277 + ], + "lines": [ + { + "bbox": [ + 46, + 243, + 287, + 277 + ], + "spans": [ + { + "bbox": [ + 46, + 243, + 287, + 277 + ], + "type": "text", + "content": "Table 1. Action recognition results on DFAUST. Reporting frame-wise accuracy and mean average precision. Ours outperforms all baselines by a large margin." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 62, + 291, + 278, + 571 + ], + "blocks": [ + { + "bbox": [ + 62, + 291, + 278, + 571 + ], + "lines": [ + { + "bbox": [ + 62, + 291, + 278, + 571 + ], + "spans": [ + { + "bbox": [ + 62, + 291, + 278, + 571 + ], + "type": "image", + "image_path": "70a0c3e96fa4cf736b34a39ff64e0b044dbff4a15bf7a0395ab03946ea6db655.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 578, + 287, + 643 + ], + "lines": [ + { + "bbox": [ + 46, + 578, + 287, + 643 + ], + "spans": [ + { + "bbox": [ + 46, + 578, + 287, + 643 + ], + "type": "text", + "content": "Figure 4. 3DinAction GradCAM scores. The proposed 3DinAction pipeline learns meaningful representations for prominent regions. The presented actions are jumping jacks (top row), hips (middle row), and knees (bottom row). The columns represent progressing time steps from left to right. Colormap indicates high GradCAM scores in red and low scores in blue." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": "Kinect V2 camera and provides camera parameters to reconstruct point clouds in camera coordinates. It provides action annotation for each frame (33 classes). It is a highly challenging dataset for two main reasons: (1) It is highly" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "type": "text", + "content": "imbalanced since some actions have a long duration and occur multiple times in each video (e.g., spin leg) and some are shorter and sparser (flip tabletop). (2) The assembly motion includes a lot of self-occlusion as well as subtle movements. The train/test split consists of 254 and 117 full sequences respectively. The split is environment-based (i.e. in the test set there is no environment that appeared in the training set). The assembly videos have an average of " + }, + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "type": "text", + "content": " 2735 frames per video. The point clouds provided in this dataset are aligned to the camera coordinate frame, posing a challenge for methods that are sensitive to rotations since the camera moves between different scans." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 217, + 546, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 217, + 546, + 419 + ], + "spans": [ + { + "bbox": [ + 304, + 217, + 546, + 419 + ], + "type": "text", + "content": "Results. The results on the IKEA ASM dataset are reported in Table 2. The results show that the proposed 3DinAction pipeline provides a significant performance boost over static approaches and their temporally smooth variants. Additionally, as expected, PointNet and Set Transformer are heavily affected by the variations in coordinate frames. " + }, + { + "bbox": [ + 304, + 217, + 546, + 419 + ], + "type": "inline_equation", + "content": "\\mathrm{PointNet}^{++}" + }, + { + "bbox": [ + 304, + 217, + 546, + 419 + ], + "type": "text", + "content": " on the other hand performs better since it uses local coordinate frames for each local region. All methods show an improved mAP when using the temporally smooth variant with degradation in frame-wise accuracy due to the dataset imbalance. For this dataset, the top1 metric is not always indicative of the quality of performance because a high top1 is directly correlated with many frames classified as the most common class. Additionally, we compare to pose-based methods reported in [2] and show that the proposed approach also outperforms these baselines. See supplementary material for confusion matrices." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 421, + 547, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 421, + 547, + 589 + ], + "spans": [ + { + "bbox": [ + 304, + 421, + 547, + 589 + ], + "type": "text", + "content": "t-patch intuition and visualization. In Figure 5 we visualize the t-patches for the flip table action in the TV Bench assembly. A set of selected t-patches are highlighted in color demonstrating different types of t-patches and their spatiotemporal changes. The blue is on the moving TV Bench assembly, it moves rigidly with the assembly. The maroon is on the moving person's arm, it exhibits nonrigid motion and deformations through time. The teal is on the static table surface containing some of the TV Bench's points in the first frame but remains static when it moves since its origin query point is on the table. The green is on the static carpet, remaining approximately the same through time. Note that the RGB images are for visualization purposes and are not used in our pipeline." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 598, + 515, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 598, + 515, + 611 + ], + "spans": [ + { + "bbox": [ + 305, + 598, + 515, + 611 + ], + "type": "text", + "content": "4.3. Experiments on MSR-Action3D dataset" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "For this dataset, the task is to predict a single class for a sequence of frames (unlike the other datasets where a per-frame prediction is required). To that end, we replace our classifier with a single fully connected layer and max pooled the results over the temporal domain (similar to [10]). The results, reported in Table 3, show that all SoTA methods, including the proposed approach, exhibit very similar performance. This is mainly attributed to the small scale of the" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19983" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 69, + 175, + 236 + ], + "blocks": [ + { + "bbox": [ + 53, + 69, + 175, + 236 + ], + "lines": [ + { + "bbox": [ + 53, + 69, + 175, + 236 + ], + "spans": [ + { + "bbox": [ + 53, + 69, + 175, + 236 + ], + "type": "image", + "image_path": "507d68e235f3b0de3debfe9bb776f8bdb370ba2e44031561c43649e19c75caf8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 247, + 547, + 282 + ], + "lines": [ + { + "bbox": [ + 46, + 247, + 547, + 282 + ], + "spans": [ + { + "bbox": [ + 46, + 247, + 547, + 282 + ], + "type": "text", + "content": "Figure 5. IKEA ASM example with t-patches. The flip table action for the TV Bench assembly is visualization including the RGB image (top), and a grayscale 3D point cloud with t-patches (bottom). t-patches are highlighted in color. The blue is on the moving TV Bench assembly, maroon is on the moving persons arm, teal is on the static table surface, and green is on the colorful static carpet." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 176, + 69, + 296, + 236 + ], + "blocks": [ + { + "bbox": [ + 176, + 69, + 296, + 236 + ], + "lines": [ + { + "bbox": [ + 176, + 69, + 296, + 236 + ], + "spans": [ + { + "bbox": [ + 176, + 69, + 296, + 236 + ], + "type": "image", + "image_path": "ee4b5844cd5c3ca6e3f785a24d468c225c86bf8d1e63ba13aeed01e0d5bbd6d4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 297, + 69, + 417, + 236 + ], + "blocks": [ + { + "bbox": [ + 297, + 69, + 417, + 236 + ], + "lines": [ + { + "bbox": [ + 297, + 69, + 417, + 236 + ], + "spans": [ + { + "bbox": [ + 297, + 69, + 417, + 236 + ], + "type": "image", + "image_path": "2468e71df666d5da49836430f28668bdeca4592a2d25a2397b3cf1d5dac6db67.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 419, + 70, + 539, + 236 + ], + "blocks": [ + { + "bbox": [ + 419, + 70, + 539, + 236 + ], + "lines": [ + { + "bbox": [ + 419, + 70, + 539, + 236 + ], + "spans": [ + { + "bbox": [ + 419, + 70, + 539, + 236 + ], + "type": "image", + "image_path": "1fd39becc884c2a336ecfbdc908e4f44c57ebdff495c1fe9be3b979531ab02ad.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 48, + 300, + 287, + 466 + ], + "blocks": [ + { + "bbox": [ + 48, + 300, + 287, + 466 + ], + "lines": [ + { + "bbox": [ + 48, + 300, + 287, + 466 + ], + "spans": [ + { + "bbox": [ + 48, + 300, + 287, + 466 + ], + "type": "table", + "html": "
MethodFrame acc.
top 1top 3macromAP
PointNet [23]4.2019.865.760.0346
PointNet++ [24]45.9770.1029.480.1187
Set Transformer [18]14.9657.1213.160.0299
PointNet [23] + TS6.0019.485.140.0804
PointNet++ [24] +TS27.8460.6427.720.2024
Set Transformer [18] + TS9.5436.5010.740.1471
PSTNet [10]17.9452.2417.140.2016
Human Pose HCN [2]39.1565.3728.180.2232
Human Pose ST-GCN [2]43.466.2926.540.1856
Ours without BD45.1672.8335.060.2932
Ours52.9175.0338.840.2875
", + "image_path": "6707409b54fecadcd6b8c7317b4337d89cc6bbfc2ac63db00676d67bfca147d1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 529, + 287, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 529, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 46, + 529, + 287, + 590 + ], + "type": "text", + "content": "dataset and the lack of diversity in the action classes. Furthermore, we witnessed that the main performance gap is for frames and sequences where the action is indistinguishable (e.g., first few frames of a sequence where no distinguishable action commenced)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 598, + 139, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 598, + 139, + 611 + ], + "spans": [ + { + "bbox": [ + 47, + 598, + 139, + 611 + ], + "type": "text", + "content": "4.4. Ablation study" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": "t-patch extraction. We studied the t-patch extraction method and its effects on action recognition on a noisy version of the DFAUST dataset. The results reported in Table 4, show the significance of the t-patch collapse problem and the effectiveness of adding small jitter and bidirectional t-patches to overcome it. In the DFAUST dataset, finding the nearest neighbor between frames provides a " + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\sim 96\\%" + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": " correspondence accuracy (small motion between frames)." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 307, + 300, + 553, + 392 + ], + "blocks": [ + { + "bbox": [ + 46, + 472, + 288, + 508 + ], + "lines": [ + { + "bbox": [ + 46, + 472, + 288, + 508 + ], + "spans": [ + { + "bbox": [ + 46, + 472, + 288, + 508 + ], + "type": "text", + "content": "Table 2. Action classification on IKEA ASM. The proposed approach provides a significant performance boost over other static and dynamic approaches, including the temporal smoothing (TS)." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 300, + 553, + 392 + ], + "lines": [ + { + "bbox": [ + 307, + 300, + 553, + 392 + ], + "spans": [ + { + "bbox": [ + 307, + 300, + 553, + 392 + ], + "type": "table", + "html": "
Method#frames
48121624
PSTNet [10]81.1483.5087.8889.9091.20
P4Transformer [8]80.1383.1787.5489.5690.94
PST-Transformer [9]81.1483.9788.1591.9893.73
Kinet [38]79.8083.8488.5391.9293.27
Ours80.4786.2088.2290.5792.23
", + "image_path": "139423c391a6afbb65748bc51cef5befa0b036a618e685e50d34a3bcbaaf2bab.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 400, + 547, + 434 + ], + "lines": [ + { + "bbox": [ + 305, + 400, + 547, + 434 + ], + "spans": [ + { + "bbox": [ + 305, + 400, + 547, + 434 + ], + "type": "text", + "content": "Table 3. MSR-Action3D classification results. Reporting classification accuracy for clips of different lengths. Results show that all methods are comparable since this dataset's scale is limited." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 460, + 545, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 545, + 520 + ], + "type": "text", + "content": "Therefore, in this experiment, we augment the dataset once by adding small Gaussian noise to each point in the dataset " + }, + { + "bbox": [ + 304, + 460, + 545, + 520 + ], + "type": "inline_equation", + "content": "(\\sigma = 0.01)" + }, + { + "bbox": [ + 304, + 460, + 545, + 520 + ], + "type": "text", + "content": ", decreasing the correspondence accuracy to " + }, + { + "bbox": [ + 304, + 460, + 545, + 520 + ], + "type": "inline_equation", + "content": "\\sim 62.4\\%" + }, + { + "bbox": [ + 304, + 460, + 545, + 520 + ], + "type": "text", + "content": " and introducing multiple t-patch collapse instances as well as increasing the classification difficulty." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 521, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 546, + 713 + ], + "type": "text", + "content": "Several variants of the t-patch extraction were explored. The first variation (GT) incorporates the ground truth correspondence into the t-patch extraction. Using this method, there is no t-patch collapse since there is a one-to-one mapping between frames. We expected this to produce an upper bound on the performance, however, surprisingly the results show that this variation is actually inferior to the proposed t-patch approach. We attribute this to the proposed t-patch extraction inherent augmentation caused by the downsampling and nearest neighbor point jitter. We then continue to explore the proposed approaches for dealing with t-patch collapse which include jitter, i.e. adding small noise to each point before finding its nearest neighbor in the next frame, and the bidirectional t-patches that extract patches both from the first to the last frame and from the last to the first frame. The results show that adding jitter is al" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "19984" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 70, + 282, + 198 + ], + "blocks": [ + { + "bbox": [ + 52, + 70, + 282, + 198 + ], + "lines": [ + { + "bbox": [ + 52, + 70, + 282, + 198 + ], + "spans": [ + { + "bbox": [ + 52, + 70, + 282, + 198 + ], + "type": "table", + "html": "
Frame acc.
DataGTJitterBDtop 1top 3mAP
clean77.6795.380.8762
74.7392.140.8097
80.4996.610.9023
87.2699.260.8616
noisy76.0895.500.9013
66.7493.760.7626
81.8398.970.9220
80.0397.570.8975
", + "image_path": "439d33761319830dbd20e1f69c47475ee00906206240291ef90437ed1b97c37f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 85, + 254, + 249, + 346 + ], + "blocks": [ + { + "bbox": [ + 47, + 206, + 288, + 242 + ], + "lines": [ + { + "bbox": [ + 47, + 206, + 288, + 242 + ], + "spans": [ + { + "bbox": [ + 47, + 206, + 288, + 242 + ], + "type": "text", + "content": "Table 4. t-patch collapse ablation on DFAUST. Exploring adding (1) GT - ground truth correspondences, (2) jitter - small Gaussian noise in t-patch construction, and (3) BD - bidirectional t-patches." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 85, + 254, + 249, + 346 + ], + "lines": [ + { + "bbox": [ + 85, + 254, + 249, + 346 + ], + "spans": [ + { + "bbox": [ + 85, + 254, + 249, + 346 + ], + "type": "table", + "html": "
Frame acc.
nktop 1top 3mAP
2561676.9697.540.8430
5121680.0397.570.8975
10241677.3097.880.8507
512876.8796.210.7557
5123277.9196.600.7453
", + "image_path": "c1dcb349e0d350c0e2fd6ddd7dd6a20489e3deabe911935df3d519aeaca662d9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 411, + 287, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 411, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 46, + 411, + 287, + 495 + ], + "type": "text", + "content": "ways beneficial and provides a boost in performance. The bidirectional t-patches improve accuracy performance significantly when the data is clean and are comparable when the data is noisy. Note that in both dataset variations, the degradation due to temporal t-patch collapse is low compared to Kinect-based scan data, therefore the bidirectional benefits are not fully utilized." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "text", + "content": "t-patch parameters. The core parameters for t-patch extraction are the number of neighbors to extract " + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "inline_equation", + "content": "(k)" + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "text", + "content": " and the number of points to subsample " + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "inline_equation", + "content": "(n)" + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "text", + "content": ". Here there is a tradeoff between complexity and performance i.e. when " + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "text", + "content": " are small, the input to the model is small accordingly but the overall coverage is reduced and therefore performance is lower. We explored their influence on the noisy DFAUST dataset and report the results in Table 5. The results show that the method is fairly robust to the selection of these parameters, producing comparable results for all. The best performance was obtained for " + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "inline_equation", + "content": "n = 512" + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "inline_equation", + "content": "k = 16" + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "text", + "content": ". Surprisingly, the performance slightly degrades when increasing " + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 497, + 287, + 664 + ], + "type": "text", + "content": " beyond these values. This is likely due to the increase in model size, which easily overfits on a dataset of this size." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "type": "text", + "content": "Time and parameters. We report the time performance and the number of parameters of several baselines in Table 6. The results show the tradeoff between performance and time, i.e. the temporal approaches exhibit longer pro" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 307, + 70, + 547, + 175 + ], + "blocks": [ + { + "bbox": [ + 47, + 354, + 288, + 389 + ], + "lines": [ + { + "bbox": [ + 47, + 354, + 288, + 389 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 288, + 389 + ], + "type": "text", + "content": "Table 5. t-patch parameters ablation. Results for the number of neighboring points in a patch " + }, + { + "bbox": [ + 47, + 354, + 288, + 389 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 47, + 354, + 288, + 389 + ], + "type": "text", + "content": " and number of downsampled points " + }, + { + "bbox": [ + 47, + 354, + 288, + 389 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 47, + 354, + 288, + 389 + ], + "type": "text", + "content": " show that the method is robust." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 70, + 547, + 175 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 547, + 175 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 547, + 175 + ], + "type": "table", + "html": "
MethodTime [ms]# parameters
PointNet [23]64.493.5M
PointNet++ [24]23.351.5M
PSTNet [10]185.928.3M
Ours t-patch extraction180.650
Ours feature computation12.509.8M
Ours classifier0.361.1M
Ours193.5110.9M
", + "image_path": "89efa058769a3a1157dd65688b63d692ab9882082145e22cb7e6d14e451ad688.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 183, + 547, + 217 + ], + "lines": [ + { + "bbox": [ + 304, + 183, + 547, + 217 + ], + "spans": [ + { + "bbox": [ + 304, + 183, + 547, + 217 + ], + "type": "text", + "content": "Table 6. Time and parameters. Temporal methods have more parameters and take longer. 3DinAction time is mostly used to extract t-patches." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 236, + 547, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 236, + 547, + 368 + ], + "spans": [ + { + "bbox": [ + 304, + 236, + 547, + 368 + ], + "type": "text", + "content": "cessing times and more parameters while performing better. For the proposed approach, we break down the timing of individual components, namely the t-patch extraction, feature computation, and classifier. The results show that the proposed approach is comparable to PSTNet in time while having more parameters. Interestingly, most of the time is used for extracting the t-patches and not for feature extraction or classification. This is attributed to the farthest point sampling and the sequential knn search, both of which could be further optimized for speed. Note that results are average of 50 runs, each with a batch of 4 and 1024 points per frame." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 368, + 547, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 368, + 547, + 477 + ], + "spans": [ + { + "bbox": [ + 304, + 368, + 547, + 477 + ], + "type": "text", + "content": "Limitations. Since the simplified formulation of t-patch construction uses " + }, + { + "bbox": [ + 304, + 368, + 547, + 477 + ], + "type": "inline_equation", + "content": "knn" + }, + { + "bbox": [ + 304, + 368, + 547, + 477 + ], + "type": "text", + "content": ", it is sensitive to variations in point densities. A t-patch in a sparse region will occupy a larger volume than a t-patch in a dense region. We use FPS to mitigate this, however, other approaches can be used e.g., using neighbors in a fixed radius. Another limitation is data with a very low frame rate or very fast motion since this breaks the assumption that points in consecutive frames are close to each other, and will cause inconsistent t-patch motion." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 485, + 384, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 485, + 384, + 498 + ], + "spans": [ + { + "bbox": [ + 306, + 485, + 384, + 498 + ], + "type": "text", + "content": "5. Conclusions" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 505, + 545, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 505, + 545, + 564 + ], + "spans": [ + { + "bbox": [ + 304, + 505, + 545, + 564 + ], + "type": "text", + "content": "We introduced the 3DinAction pipeline, a novel method for 3D point cloud action recognition. It showed that the creation of temporal patches is beneficial for finding informative spatio-temporal point representations. 3DinAction has demonstrated a performance boost over SoTA methods." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 565, + 545, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 565, + 545, + 637 + ], + "spans": [ + { + "bbox": [ + 304, + 565, + 545, + 637 + ], + "type": "text", + "content": "This work opens many interesting future directions of research. These include trying to learn the t-patch construction instead of the knn selection, imposing stronger temporal structure based on preexisting knowledge and bias (e.g., sceneflow or tracking), and exploring using multimodal inputs with this representation (e.g., RGB or text)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "content": "Acknowledgement. This project has received funding from the European Union's Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No 893465. We also thank the Microsoft for Azure Credits and NVIDIA Academic Hardware Grant Program for providing high-speed A5000 GPU." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19985" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 138 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 138 + ], + "type": "text", + "content": "[1] Yizhak Ben-Shabat, Michael Lindenbaum, and Anath Fischer. 3DMFV: Three-dimensional point cloud classification in real-time using convolutional neural networks. RAL, 3:3145-3152, 2018. 2, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 140, + 288, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 140, + 288, + 222 + ], + "spans": [ + { + "bbox": [ + 53, + 140, + 288, + 222 + ], + "type": "text", + "content": "[2] Yizhak Ben-Shabat, Xin Yu, Fatemeh Saleh, Dylan Campbell, Cristian Rodriguez-Opazo, Hongdong Li, and Stephen Gould. The aka asm dataset: Understanding people assembling furniture through actions, objects and pose. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 847-859, 2021. 3, 5, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 224, + 288, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 224, + 288, + 281 + ], + "spans": [ + { + "bbox": [ + 54, + 224, + 288, + 281 + ], + "type": "text", + "content": "[3] Federica Bogo, Javier Romero, Gerard Pons-Moll, and Michael J. Black. Dynamic FAUST: Registering human bodies in motion. In IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), July 2017. 3, 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 283, + 288, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 283, + 288, + 354 + ], + "spans": [ + { + "bbox": [ + 54, + 283, + 288, + 354 + ], + "type": "text", + "content": "[4] Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 961-970, 2015. 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 355, + 288, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 355, + 288, + 414 + ], + "spans": [ + { + "bbox": [ + 54, + 355, + 288, + 414 + ], + "type": "text", + "content": "[5] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 1, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 415, + 288, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 415, + 288, + 474 + ], + "spans": [ + { + "bbox": [ + 54, + 415, + 288, + 474 + ], + "type": "text", + "content": "[6] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3075-3084, 2019. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 475, + 288, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 475, + 288, + 533 + ], + "spans": [ + { + "bbox": [ + 54, + 475, + 288, + 533 + ], + "type": "text", + "content": "[7] Haodong Duan, Yue Zhao, Kai Chen, Dahua Lin, and Bo Dai. Revisiting skeleton-based action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2969-2978, 2022. 1, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 54, + 535, + 288, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 535, + 288, + 593 + ], + "spans": [ + { + "bbox": [ + 54, + 535, + 288, + 593 + ], + "type": "text", + "content": "[8] Hehe Fan, Yi Yang, and Mohan Kankanhalli. Point 4d transformer networks for spatio-temporal modeling in point cloud videos. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 14204-14213, 2021. 2, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 594, + 287, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 594, + 287, + 653 + ], + "spans": [ + { + "bbox": [ + 53, + 594, + 287, + 653 + ], + "type": "text", + "content": "[9] Hehe Fan, Yi Yang, and Mohan Kankanhalli. Point spatio-temporal transformer networks for point cloud video modeling. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(2):2181-2192, 2022. 2, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 654, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 654, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 654, + 287, + 712 + ], + "type": "text", + "content": "[10] Hehe Fan, Xin Yu, Yuhang Ding, Yi Yang, and Mohan Kankanhalli. Pistnet: Point spatio-temporal convolution on point cloud sequences. In International Conference on Learning Representations, 2021. 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 72, + 547, + 714 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 307, + 72, + 547, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 72, + 547, + 132 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 547, + 132 + ], + "type": "text", + "content": "[11] Hehe Fan, Xin Yu, Yi Yang, and Mohan Kankanhalli. Deep hierarchical representation of point cloud videos via spatio-temporal decomposition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(12):9918-9930, 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 134, + 547, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 134, + 547, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 134, + 547, + 194 + ], + "type": "text", + "content": "[12] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6202-6211, 2019. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 196, + 547, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 196, + 547, + 255 + ], + "spans": [ + { + "bbox": [ + 307, + 196, + 547, + 255 + ], + "type": "text", + "content": "[13] Ankit Goyal, Hei Law, Bowei Liu, Alejandro Newell, and Jia Deng. Revisiting point cloud shape classification with a simple and effective baseline. In International Conference on Machine Learning, pages 3809-3820. PMLR, 2021. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 258, + 547, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 258, + 547, + 294 + ], + "spans": [ + { + "bbox": [ + 307, + 258, + 547, + 294 + ], + "type": "text", + "content": "[14] Yulan Guo, Hanyun Wang, Qingyong Hu, Hao Liu, Li Liu, and Mohammed Bennamoun. Deep learning for 3d point clouds: A survey. PAMI, 2020. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 296, + 547, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 296, + 547, + 354 + ], + "spans": [ + { + "bbox": [ + 307, + 296, + 547, + 354 + ], + "type": "text", + "content": "[15] Roman Klokov and Victor Lempitsky. Escape from cells: Deep kd-networks for the recognition of 3d point cloud models. In Proceedings of the IEEE international conference on computer vision, pages 863-872, 2017. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 357, + 547, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 357, + 547, + 405 + ], + "spans": [ + { + "bbox": [ + 307, + 357, + 547, + 405 + ], + "type": "text", + "content": "[16] Hema Swetha Koppula, Rudhir Gupta, and Ashutosh Saxena. Learning human activities and object affordances from rgb-d videos. The International Journal of Robotics Research, 32(8):951-970, 2013. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 407, + 547, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 407, + 547, + 479 + ], + "spans": [ + { + "bbox": [ + 307, + 407, + 547, + 479 + ], + "type": "text", + "content": "[17] Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In International conference on machine learning, pages 3744-3753. PMLR, 2019. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 480, + 547, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 480, + 547, + 552 + ], + "spans": [ + { + "bbox": [ + 307, + 480, + 547, + 552 + ], + "type": "text", + "content": "[18] Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In International conference on machine learning, pages 3744-3753. PMLR, 2019. 5, 6, 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 554, + 547, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 554, + 547, + 613 + ], + "spans": [ + { + "bbox": [ + 307, + 554, + 547, + 613 + ], + "type": "text", + "content": "[19] Wanqing Li, Zhengyou Zhang, and Zicheng Liu. Action recognition based on a bag of 3d points. In 2010 IEEE computer society conference on computer vision and pattern recognition-workshops, pages 9-14. IEEE, 2010. 3, 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 616, + 547, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 616, + 547, + 676 + ], + "spans": [ + { + "bbox": [ + 307, + 616, + 547, + 676 + ], + "type": "text", + "content": "[20] Jun Liu, Amir Shahroudy, Mauricio Perez, Gang Wang, Ling-Yu Duan, and Alex C Kot. Ntu rgb+ d 120: A large-scale benchmark for 3d human activity understanding. IEEE transactions on pattern analysis and machine intelligence, 42(10):2684-2701, 2019. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 677, + 547, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 677, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 307, + 677, + 547, + 714 + ], + "type": "text", + "content": "[21] Xingyu Liu, Mengyuan Yan, and Jeannette Bohg. Meteornet: Deep learning on dynamic 3d point cloud sequences. In Proceedings of the IEEE/CVF Interna" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "19986" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 69, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 287, + 95 + ], + "type": "text", + "content": "tional Conference on Computer Vision, pages 9246- 9255, 2019. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 97, + 288, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 97, + 288, + 156 + ], + "spans": [ + { + "bbox": [ + 48, + 97, + 288, + 156 + ], + "type": "text", + "content": "[22] Daniel Maturana and Sebastian Scherer. Voxnet: A 3d convolutional neural network for real-time object recognition. In 2015 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 922-928. IEEE, 2015. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 158, + 288, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 158, + 288, + 217 + ], + "spans": [ + { + "bbox": [ + 48, + 158, + 288, + 217 + ], + "type": "text", + "content": "[23] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pages 652-660, 2017. 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 218, + 288, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 218, + 288, + 265 + ], + "spans": [ + { + "bbox": [ + 48, + 218, + 288, + 265 + ], + "type": "text", + "content": "[24] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In NeurIPS, volume 30, 2017. 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 266, + 288, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 266, + 288, + 326 + ], + "spans": [ + { + "bbox": [ + 48, + 266, + 288, + 326 + ], + "type": "text", + "content": "[25] Guocheng Qian, Yuchen Li, Houwen Peng, Jinjie Mai, Hasan Abed Al Kader Hammoud, Mohamed Elhoseiny, and Bernard Ghanem. Pointnext: Revisiting pointnet++ with improved training and scaling strategies. arXiv preprint arXiv:2206.04670, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 327, + 288, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 288, + 374 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 288, + 374 + ], + "type": "text", + "content": "[26] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In The IEEE International Conference on Computer Vision (ICCV), Oct 2017. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 376, + 288, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 376, + 288, + 446 + ], + "spans": [ + { + "bbox": [ + 48, + 376, + 288, + 446 + ], + "type": "text", + "content": "[27] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision, pages 618-626, 2017. 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 449, + 288, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 449, + 288, + 507 + ], + "spans": [ + { + "bbox": [ + 48, + 449, + 288, + 507 + ], + "type": "text", + "content": "[28] Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang. Ntu rgb+ d: A large scale dataset for 3d human activity analysis. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1010-1019, 2016. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 509, + 288, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 509, + 288, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 509, + 288, + 567 + ], + "type": "text", + "content": "[29] Yiru Shen, Chen Feng, Yaoqing Yang, and Dong Tian. Mining point cloud local structures by kernel correlation and graph pooling. In IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pages 4548-4557, 2018. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 569, + 288, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 569, + 288, + 628 + ], + "spans": [ + { + "bbox": [ + 48, + 569, + 288, + 628 + ], + "type": "text", + "content": "[30] Jaeyong Sung, Colin Ponce, Bart Selman, and Ashutosh Saxena. Unstructured human activity detection from rgbd images. In 2012 IEEE international conference on robotics and automation, pages 842-849. IEEE, 2012. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 630, + 288, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 630, + 288, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 630, + 288, + 689 + ], + "type": "text", + "content": "[31] Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3d convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 4489-4497, 2015. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 690, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 690, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 690, + 288, + 714 + ], + "type": "text", + "content": "[32] Haiyan Wang, Liang Yang, Xuejian Rong, Jinglun Feng, and Yingli Tian. Self-supervised 4d spatio" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 72, + 547, + 496 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 327, + 72, + 547, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 72, + 547, + 120 + ], + "spans": [ + { + "bbox": [ + 327, + 72, + 547, + 120 + ], + "type": "text", + "content": "temporal feature learning via order prediction of sequential point cloud clips. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3762-3771, 2021. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 121, + 547, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 121, + 547, + 192 + ], + "spans": [ + { + "bbox": [ + 307, + 121, + 547, + 192 + ], + "type": "text", + "content": "[33] Yancheng Wang, Yang Xiao, Fu Xiong, Wenxiang Jiang, Zhiguo Cao, Joey Tianyi Zhou, and Junsong Yuan. 3dv: 3d dynamic voxel for action recognition in depth video. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 511-520, 2020. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 194, + 546, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 194, + 546, + 242 + ], + "spans": [ + { + "bbox": [ + 307, + 194, + 546, + 242 + ], + "type": "text", + "content": "[34] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E Sarma, Michael M Bronstein, and Justin M Solomon. Dynamic graph cnn for learning on point clouds. Acm Transactions On Graphics (tog), 38:1-12, 2019. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 243, + 546, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 243, + 546, + 303 + ], + "spans": [ + { + "bbox": [ + 307, + 243, + 546, + 303 + ], + "type": "text", + "content": "[35] Siyuan Yang, Jun Liu, Shijian Lu, Meng Hwa Er, and Alex C Kot. Skeleton cloud colorization for unsupervised 3d action representation learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13423-13433, 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 304, + 546, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 304, + 546, + 350 + ], + "spans": [ + { + "bbox": [ + 307, + 304, + 546, + 350 + ], + "type": "text", + "content": "[36] Cheng Zhang, Haocheng Wan, Shengqiang Liu, Xinyi Shen, and Zizhao Wu. Pvt: Point-voxel transformer for 3d deep learning. arXiv preprint arXiv:2108.06076, 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 353, + 546, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 353, + 546, + 411 + ], + "spans": [ + { + "bbox": [ + 308, + 353, + 546, + 411 + ], + "type": "text", + "content": "[37] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip H.S. Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 16259-16268, October 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 414, + 547, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 414, + 547, + 496 + ], + "spans": [ + { + "bbox": [ + 308, + 414, + 547, + 496 + ], + "type": "text", + "content": "[38] Jia-Xing Zhong, Kaichen Zhou, Qingyong Hu, Bing Wang, Niki Trigoni, and Andrew Markham. No pain, big gain: classify dynamic point cloud sequences with static models by fitting feature-level space-time surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8510-8520, 2022. 2, 7" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "19987" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/128911ab-0f2f-4697-8895-080e5b45c36b_content_list.json b/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/128911ab-0f2f-4697-8895-080e5b45c36b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e07e6faefb47f21dbe8152671cf20e413bffb729 --- /dev/null +++ b/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/128911ab-0f2f-4697-8895-080e5b45c36b_content_list.json @@ -0,0 +1,1507 @@ +[ + { + "type": "text", + "text": "3DSFLabelling: Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling", + "text_level": 1, + "bbox": [ + 93, + 130, + 875, + 154 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chaokang Jiang $^{1}$ , Guangming Wang $^{2}$ , Jiuming Liu $^{3}$ , Hesheng Wang $^{3}$ , Zhuang Ma $^{1}$ , Zhenqiang Liu $^{1}$ , Zhujin Liang $^{1}$ , Yi Shan $^{1}$ , Dalong Du $^{1\\dagger}$", + "bbox": [ + 153, + 179, + 815, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1PhiGent Robotics, 2University of Cambridge, 3Shanghai Jiaotong University", + "bbox": [ + 179, + 215, + 790, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ts20060079a31@cumt.edu.cn, gw462@cam.ac.uk, {liujiuming, wanghesheng}@sjtu.edu.cn, mazhuang097@outlook.com, {zhenqiang.liu, zhujin.liang, yi.shan, dalong.du}@phigent.ai jiangchaokang.github.io/3DSFLabelling-Page", + "bbox": [ + 109, + 236, + 854, + 286 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 321, + 313, + 338 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Learning 3D scene flow from LiDAR point clouds presents significant difficulties, including poor generalization from synthetic datasets to real scenes, scarcity of real-world 3D labels, and poor performance on real sparse LiDAR point clouds. We present a novel approach from the perspective of auto-labelling, aiming to generate a large number of 3D scene flow pseudo labels for real-world LiDAR point clouds. Specifically, we employ the assumption of rigid body motion to simulate potential object-level rigid movements in autonomous driving scenarios. By updating different motion attributes for multiple anchor boxes, the rigid motion decomposition is obtained for the whole scene. Furthermore, we developed a novel 3D scene flow data augmentation method for global and local motion. By perfectly synthesizing target point clouds based on augmented motion parameters, we easily obtain lots of 3D scene flow labels in point clouds highly consistent with real scenarios. On multiple real-world datasets including LiDAR KITTI, nuScenes, and Argoverse, our method outperforms all previous supervised and unsupervised methods without requiring manual labelling. Impressively, our method achieves a tenfold reduction in EPE3D metric on the LiDAR KITTI dataset, reducing it from $0.190m$ to a mere $0.008m$ error.", + "bbox": [ + 75, + 353, + 473, + 700 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 729, + 209, + 744 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D scene flow estimation through deducing per-point motion filed from consecutive frames of point clouds, serves a critical role across various applications, encompassing motion prediction [33, 48], anomaly motion detection [15], 3D object detection [8, 16, 50], and dynamic point cloud accumulation [14]. With the advancing of deep learning on point clouds [37, 38], many works [4, 9, 18, 27, 36, 39, 51] have developed the learning-based", + "bbox": [ + 75, + 755, + 468, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/19952b9b470f368f6e4bc0b9866aded47f2609d58f00c7a451bae351021bd4f4.jpg", + "image_caption": [ + "Figure 1. The proposed 3D scene flow pseudo-auto-labelling framework. Given point clouds and initial bounding boxes, both global and local motion parameters are iteratively optimized. Diverse motion patterns are augmented by randomly adjusting these motion parameters, thereby creating a diverse and realistic set of motion labels for the training of 3D scene flow estimation models." + ], + "image_footnote": [], + "bbox": [ + 503, + 319, + 890, + 498 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "methods to estimate per-point motion from 3D point clouds. Some state-of-the-art methods [4, 39, 51] have reduced the average 3D Endpoint Error (EPE3D) to a few centimetres on the KITTI Scene Flow dataset (stereoKITTI) [30, 31]. However, due to the scarcity of scene flow labels, these methods rely heavily on synthetic datasets such as FlyingThings3D (FT3D) [29] for network training.", + "bbox": [ + 496, + 611, + 890, + 717 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "When evaluated on the stereoKITTI dataset [30, 31], PV-RAFT [47] demonstrates an average EPE3D of just $0.056m$ . However, when evaluated on the Argoverse dataset [3], the EPE3D metric astonishingly exceeds $10m$ [24]. Therefore, learning 3D scene flow on synthetic dataset [29] has a large gap with real-world application. Jin et al. [18] recently introduce a new synthetic dataset, GTA-SF, simulating LiDAR scans for autonomous driving. They propose a teacher-student domain adaptation framework to reduce the gap between synthetic and real datasets and improve some performance of 3D scene flow estimation. However, their performance is still poor in", + "bbox": [ + 496, + 719, + 892, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{1\\dagger}$ Corresponding author.", + "bbox": [ + 94, + 886, + 227, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "15173", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a3238b09d16c6cfed576ad678b98173ea1569ef35c043a7d0fc9140d4eb15084.jpg", + "image_caption": [ + "Figure 2. The accuracy improvement after integrating our proposed pseudo-auto-labelling method. Models trained on synthetic data performance poorly in 3D scene flow estimation for LiDAR-based autonomous driving. Our proposed 3D pseudo-auto-labelling method improves accuracy, reaching an EPE3D below $2cm$ across datasets [2, 3, 31]." + ], + "image_footnote": [], + "bbox": [ + 86, + 94, + 450, + 251 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "real-world LiDAR data because of ideal sensor models and lack of scene variety. Ideally, models should learn from real sensor data in the autonomous driving field. However, labelling each point's 3D motion vector for the 3D scene flow task is extremely costly. This has driven many works [6, 22, 28, 32, 39, 44] towards unsupervised or self-supervised learning of 3D scene flow. Although these methods have achieved reasonable accuracy, they still fall behind supervised methods, highlighting the importance of real sensor data and corresponding 3D scene flow labels.", + "bbox": [ + 75, + 368, + 468, + 518 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we address three key challenges in the field of autonomous driving: the reliance on synthetic datasets that still have a poor generalization with real-world scenarios, the scarcity of scene flow labels in actual driving scenes, and the poor performance of existing 3D scene flow estimation networks on real LiDAR data. Inspired by the rigid motion assumptions in RigidFlow [22] and RSF [5], we propose a novel scene flow auto-labelling approach that leverages the characteristics of rigid motion prevalent in autonomous driving scenarios (Fig. 1). Specifically, we utilize 3D anchor boxes to segment 3D objects in point clouds. The attributes of each object-level box are not only position and size but also rotation, translation, motion status, and normal vector attributes. By leveraging the constrained loss functions for the box parameters and inter-frame association, we optimize the attributes of the boxes, subsequently combining these parameters with the source point cloud to produce a realistic target point cloud. Importantly, the generated target point cloud maintains a one-to-one correspondence with the source point cloud, enabling the efficient generation of pseudo 3D scene flow labels.", + "bbox": [ + 75, + 520, + 470, + 838 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To capture a more diverse range of motion patterns, we introduce a novel data augmentation strategy for 3D scene flow auto-labelling. Utilizing the attributes of each box, we simulate the rotations, translations, and motion status", + "bbox": [ + 75, + 839, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "of both the ego vehicle and surrounding environment by adding Gaussian noise to these attributes. Consequently, we obtain numerous 3D scene flow labels with diverse motions that closely resemble real-world scenarios, furnishing the neural network with rich real training data and significantly improving the generalization capabilities of learning-based methods. Experimental results validate that our pseudo-label generation strategy consistently achieves state-of-the-art scene flow estimation results across various models [4, 36, 51] and datasets [2, 3, 30] (Fig. 2).", + "bbox": [ + 496, + 90, + 890, + 243 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our contributions are as follows:", + "bbox": [ + 517, + 248, + 818, + 262 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a new framework for the automatic labelling of 3D scene flow pseudo-labels, significantly enhancing the accuracy of current scene flow estimation models, and effectively addressing the scarcity of 3D flow labels in autonomous driving.", + "- We propose a universal 3D box optimization method with multiple motion attributes. Building upon this, we further introduce a plug-and-play 3D scene flow augmentation module with global-local motions and motion status. This allows for flexible motion adjustment of ego-motion and dynamic environments, setting a new benchmark for scene flow data augmentation.", + "- Our method achieves state-of-the-art performance on KITTI, nuScenes, and Argoverse LiDAR datasets. Impressively, our approach surpasses all supervised and unsupervised methods without requiring any synthesising data and manual scene flow labels." + ], + "bbox": [ + 500, + 267, + 890, + 523 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 554, + 640, + 569 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Supervised 3D Scene Flow Learning", + "text_level": 1, + "bbox": [ + 500, + 584, + 813, + 601 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In recent years, the performance of methods [28, 34, 42] for 3D scene flow based on point cloud deep learning has surpassed traditional methods. FlowNet3D [28] pioneers an end-to-end approach to learning 3D scene flow from point clouds. Some works, such as HALFlow [13], 3DFlow [43], PointPWC [49], and WSAFlowNet [45], utilize PWC structures to learn 3D scene flow in a coarse-to-fine manner. Other methods address the disorderliness of points by voxelizing point clouds and using sparse convolution or voxel correlation fields to learn 3D scene flow, such as PV-RAFT [47], DPV-RAFT [46], and SCTN [21]. Additional work refines the estimated scene flow through iterative procedures. MSBRN [4] proposes bidirectional gated recurrent units for iteratively estimating scene flow. GMSF [51] and PT-FlowNet [9] introduce point cloud transformers into 3D scene flow estimation networks. These supervised learning methods for 3D scene flow heavily rely on ground truth and are all trained on the FT3D dataset [29] and evaluated on stereoKITTI [30, 31] for network generalization test.", + "bbox": [ + 496, + 613, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "15174", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Unsupervised 3D Scene Flow Learning", + "text_level": 1, + "bbox": [ + 76, + 90, + 410, + 107 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "JGwF [32] and PointPWC [49] initially propose several self-supervised learning losses such as cycle consistency loss and chamfer loss. EgoFlow [40] distinguishes 3D scene flow into ego-motion flow and remaining non-rigid flow, achieving self-supervised learning based on temporal consistency. SFGAN [44] introduces generative adversarial concepts into self-supervised learning for 3D scene flow. Recently, works like R3DSF [12], RigidFlow [22], and LiDARSceneFlow [7] greatly improve the accuracy of 3D scene flow estimation by introducing local or object-level rigidity constraints. RigidFlow [22] explicitly enforces rigid alignment within super-voxel regions by decomposing the source point cloud into multiple supervoxels. R3DSF [12] separately considers background and foreground object-level 3D scene flow, relying on segmentation and odometry tasks [25, 26].", + "bbox": [ + 75, + 113, + 472, + 356 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. 3D Scene Flow Optimization", + "text_level": 1, + "bbox": [ + 76, + 366, + 333, + 383 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D scene flow optimization techniques have demonstrated remarkable generalization capabilities, attracting a significant amount of academic research recently. Graph prior [35] optimizes scene flow to be as smooth as possible by using the Laplacian of point clouds. Some techniques introduce neural networks to optimize 3D scene flow. NSFP [23] introduces a novel implicit regularizer, the Neural Scene Flow Prior, which primarily depends on runtime optimization and robust regularization. RSF [5] combines global ego-motion with object-specific rigid movements to optimize 3D bounding box parameters and compute scene flow. FastNSF [24] also adopts neural scene flow prior, and it shows more advantages in dealing with dense LiDAR points compared to learning methods. SCOOP [20], in the runtime phase, directly optimizes the flow refinement module using self-supervised objectives. Although optimization-based approaches for 3D scene flow estimation have demonstrated impressive accuracy, they typically involve high computational costs.", + "bbox": [ + 75, + 390, + 472, + 679 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. 3DSFLabelling", + "text_level": 1, + "bbox": [ + 76, + 691, + 230, + 709 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D scene flow estimation infers the 3D flow, $SF_{pred} \\in \\mathbb{R}^{3 \\times N_1}$ from the source point cloud $PC_S \\in \\mathbb{R}^{3 \\times N_1}$ and the target point cloud $PC_T \\in \\mathbb{R}^{3 \\times N_2}$ for each point in the source point. Previous self-supervised learning methods [32, 49] typically use the estimated 3D motion vector $SF_{pred}$ to warp the source point cloud $PC_S$ to the target point cloud $PC_{Sw}$ . By comparing the difference between $PC_{Sw}$ and $PC_T$ , a supervisory signal is generated.", + "bbox": [ + 76, + 718, + 468, + 839 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In contrast with previous self-supervised learning methods, we propose bounding box element optimization to obtain the boxes and the box motion parameters from raw unlabelled point cloud data. Then, we use object-box-level", + "bbox": [ + 75, + 839, + 470, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "motion parameters and global motion parameters to warp each box's points and the whole point cloud to the target point cloud, generating corresponding pseudo 3D scene flow labels. During the warping process of each object box, we propose augmenting the motion attributes of each object and the whole scene. This diversity assists the network in capturing a broader range of motion behaviours.", + "bbox": [ + 496, + 90, + 890, + 196 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Prerequisites", + "text_level": 1, + "bbox": [ + 498, + 205, + 637, + 220 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Apart from the two input point clouds, we do not require any extra labels, such as object-level tracking and semantic information, or vehicle ego-motion labels. To reinforce the geometric constraints in the pseudo label generation module, we employ Open3d [52] to generate coarse per-point normals. Despite these normals not being perfectly accurate, they are readily obtainable and can provide useful geometric constraints. Finally, we establish initial 3D anchor boxes with specific centers $(x,y,z)$ , width $w$ , length $l$ , height $h$ , and rotation angle $\\theta$ , in accordance with the range of input points. As depicted in Fig. 3, the inputs of our model consist of the initial anchor box set, $PC_{S}$ , $PC_{T}$ , and point cloud normals $N_{S}$ .", + "bbox": [ + 496, + 228, + 890, + 425 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Motion Parameter Optimization Module", + "text_level": 1, + "bbox": [ + 498, + 433, + 848, + 450 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As shown in Fig. 3, we present the process of simulating the motion of point clouds in actual autonomous driving by updating four sets of parameters: differentiable bounding boxes $\\Phi = [c,s,\\theta]$ , global motion parameters $\\Theta = [R_{ego},t_{ego}]$ , motion parameters for each box $[R_{perbox},t_{perbox}]$ , and motion probability $P_M$ for each box. The variables $c$ , $s$ , and $\\theta$ represent the center coordinates, size, and orientation of the 3D box, respectively.", + "bbox": [ + 496, + 455, + 890, + 577 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Inspired by RSF [5], we use the motion of object-level bounding boxes to present the point-wise 3D motion and make the step-like boxes differentiable through sigmoid approximation. By transforming the individual points to the bounding boxes, we introduce an object-level perception of the scene, enabling a more natural capture of rigid motion. This method proves advantageous in autonomous driving scenarios, where most objects predominantly exhibit rigid behaviour [12]. Additionally, in the context of autonomous driving, most scene motion is typically caused by the ego motion of the vehicle. Hence, setting global motion parameters is necessary to simulate the global consistent rigid motion of the whole scene. To discern whether the motion of each box is caused by ego-motion, we also set up a motion probability for each bounding box.", + "bbox": [ + 496, + 578, + 890, + 803 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "With the initial set of four motion parameters, the source point cloud is warped to the target frame, as follows:", + "bbox": [ + 496, + 804, + 890, + 834 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nP C _ {T} ^ {\\Theta}, P C _ {T} ^ {\\Phi} = \\Omega_ {1} (\\Theta , P C _ {S}), \\Omega_ {2} (\\Upsilon (\\Phi , P C _ {S})), (1)\n$$\n", + "text_format": "latex", + "bbox": [ + 539, + 842, + 890, + 862 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\Theta$ represents global motion parameters. $\\Phi$ represents motion parameters of each bounding box, and $\\Omega_{1}$ and $\\Omega_{2}$", + "bbox": [ + 496, + 869, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "15175", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/47edac90fd1ac4ffa004220a61c20d6756b11dce719163b184c42dbfc5435c53.jpg", + "image_caption": [ + "Figure 3. The proposed learning framework of pseudo 3D scene flow automatic labelling. The input comprises 3D anchor boxes, a pair of point clouds, and their corresponding coarse normal vectors. The optimization of motion parameters primarily updates the bounding box parameters, global motion parameters, local motion parameters, and the motion probability of the boxes. The attribute parameters for boxes are updated through backward optimization from six objective functions. Once optimized, the motion parameters simulate various types of motion using a global-local data augmentation module. A single source frame point cloud, along with the augmented motion parameters, produces diverse 3D scene flow labels. These labels serve to guide the supervised neural network to learn point-wise motion." + ], + "image_footnote": [], + "bbox": [ + 86, + 87, + 883, + 393 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "are background and foreground warping functions, respectively, generating the warped point clouds $PC_T^\\Theta$ and $PC_T^\\Phi$ . $\\Upsilon$ signifies the removal of boxes with too few points.", + "bbox": [ + 76, + 506, + 468, + 551 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Based on the real target frame of point cloud and the generated target point clouds $PC_T^\\Theta$ and $PC_T^\\Phi$ , we define loss functions to update and optimize the box attributes. We separately calculate the background and foreground losses:", + "bbox": [ + 75, + 551, + 470, + 612 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {B G} = \\kappa \\left(N _ {T} ^ {\\Theta} \\oplus P C _ {T} ^ {\\Theta}, N _ {T} \\oplus P C _ {T}\\right) + \\delta \\left(P C _ {T} ^ {\\Theta}, P C _ {T}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 617, + 468, + 636 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} L _ {F G} = \\frac {1}{K _ {b o x}} \\sum P _ {M} \\times (\\kappa \\left(N _ {T} ^ {\\Phi} \\oplus P C _ {T} ^ {\\Phi}, N _ {T} \\oplus P C _ {T}\\right) \\\\ + \\delta \\left(P C _ {T} ^ {\\Phi}, P C _ {T}\\right)), \\tag {3} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 638, + 468, + 700 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\kappa$ is a function calculating nearest neighbour matches between the transformed point cloud and the target point cloud. $\\delta$ is a pairwise distance function with location encoding. $K_{box}$ is the number of boxes, $P_M$ is the motion probability of each box, and the term $N_T\\oplus PC_T$ represents the concatenation of the target point cloud's normal and positions. As for the motion probability $P_M$ of each box:", + "bbox": [ + 75, + 702, + 468, + 809 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP _ {M} = \\sigma \\left(\\alpha \\times \\left(\\Omega_ {3} \\left(\\Phi , \\gamma_ {i}\\right) + \\beta_ {i}\\right)\\right) - \\alpha \\times \\left(\\Omega_ {3} \\left(\\Phi , \\gamma_ {i}\\right) - \\beta_ {i}\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 816, + 468, + 834 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\sigma(x)$ represents the sigmoid function, $\\alpha$ is a hyperparameter 'slope' in the sigmoid, $\\beta$ represents the half size of the vector of 3D dimensions $w$ , $l$ , and $h$ of the bounding box. Coordinate values $\\gamma$ in the source point cloud are", + "bbox": [ + 75, + 839, + 470, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "warped to the target point cloud via motion box parameters $\\Phi$ . For each dynamic box, each point's relative position to the box's centre is calculated. Higher motion probability $P_{M}$ is assigned to the points closer to the centre. A fixed hyperparameter $\\alpha$ , controlling motion probability, may not effectively respond to diverse and complex autonomous driving scenarios. Therefore, we adopt an adaptive computation of $\\alpha$ based on the variance of the point nearest-neighbour consistency loss from the previous generation. The variance in the nearest-neighbour consistency loss for different points in the background implies the distribution of dynamic objects in the scene. With fewer moving objects indicated by a lower variance, $\\alpha$ should be adaptively reduced, tending to produce lower motion probability $P_{M}$ for points.", + "bbox": [ + 496, + 506, + 890, + 717 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In addition to $L_{BG}$ and $L_{FG}$ , we introduce box dimension regularization, heading term, and angle term to constrain the dimensions, heading, and rotation angles of the bounding boxes within a reasonable range [5]. We also introduce a mass term to ensure that there are as many points as possible within the box, making the estimated motion parameters of the box more robust [5].", + "bbox": [ + 496, + 717, + 890, + 823 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Data Augmentation for 3D Flow Auto-labelling", + "text_level": 1, + "bbox": [ + 500, + 832, + 890, + 849 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Existing data augmentation practices [49] often add consistent random rotations and noise offsets to the input points, which indeed yields certain benefits. However, in", + "bbox": [ + 496, + 854, + 892, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "15176", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a87fff64d105338c18d0dd90d331d3992cd37f111193d4e44fff8b2f4adcd16a.jpg", + "image_caption": [ + "Figure 4. The proposed pseudo label generation module. With the augmented motion probability $P_{M}^{*}$ , bounding boxes are categorized into dynamic and static types. Using global and local motion parameters, the $PC_{S}$ is warped to the target point cloud $PC_{T}^{*}$ . Finally, pseudo 3D scene flow labels $SF$ are derived from the correspondence between $PC_{T}^{*}$ and $PC_{S}$ . $K_{box}$ represents the number of boxes." + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 465, + 239 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "autonomous driving scenarios, there are frequently various complex motion patterns for multiple objects. To make models learn complex scene motion rules, we propose a novel data augmentation method for scene flow labelling in both global and object-level motions. Our method simulates a broad spectrum of 3D scene flow data variations, originating from ego-motion and dynamic object movement, thereby providing a promising solution to the challenge of securing abundant 3D scene flow labels.", + "bbox": [ + 75, + 369, + 468, + 505 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As in Fig. 3, random noise is applied to either global or local motion parameters respectively. We generate a random rotation angle $\\alpha$ and a random unit vector $\\mathbf{u}$ for the rotation direction using random noise. They are used to create the Lie algebra $\\xi$ . Subsequently, the Lie algebra $\\xi$ is converted into a rotation matrix $\\mathbf{M}$ using the Rodrigues' rotation formula and applied to the original rotation matrix $\\mathbf{R}$ to obtain a new rotation matrix $\\mathbf{R}^*$ , as follows:", + "bbox": [ + 75, + 506, + 468, + 626 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {M} = \\mathbf {I} + \\sin (| \\pmb {\\xi} |) \\frac {\\pmb {\\xi}}{| \\pmb {\\xi} |} _ {\\times} + (1 - \\cos (| \\pmb {\\xi} |)) \\left(\\frac {\\pmb {\\xi}}{| \\pmb {\\xi} |} _ {\\times}\\right) ^ {2}, \\quad (5) \\\\ \\boldsymbol {\\xi} = \\alpha \\boldsymbol {u}, \\mathbf {R} ^ {*} = \\mathbf {R M}. \\tag {6} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 627, + 468, + 694 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The Lie algebra element $\\xi$ , the product of scalar $\\alpha$ and unit vector $\\mathbf{u}$ , signifies rotation magnitude and direction, with $\\alpha$ and $\\mathbf{u}$ representing rotation angle and axis, respectively. $\\mathbf{I}$ is identity matrix, and $\\xi \\times \\xi$ is the antisymmetric matrix of $\\xi$ . Lie algebra intuitively and conveniently represents minor $SO(3)$ group variations. Rodrigues' rotation formula, mapping from the Lie algebra to the Lie group, facilitates the transformation of angle-based noise into a form directly applicable to the rotation matrix. This transformation brings mathematical convenience, making the update of the rotation matrix concise and efficient.", + "bbox": [ + 75, + 703, + 468, + 868 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Importantly, our data augmentation targets dynamically moving objects, because persistently adding varied motion", + "bbox": [ + 76, + 869, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "noise to bounding boxes perceived as static objects may disrupt original data distribution. Moreover, the translation and motion probability are also augmented. As depicted in Fig. 3, we generate noise within an appropriate range and directly add it to the translation matrix or motion probability, resulting in augmented translation and motion probability.", + "bbox": [ + 496, + 90, + 890, + 183 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Pseudo Label Generation for 3D Scene Flow", + "text_level": 1, + "bbox": [ + 500, + 190, + 874, + 205 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The motion parameters are fed into the pseudo label generation module to obtain point-wise 3D scene flow labels. The specific process of the label generation module is shown in Fig. 4. We determine the motion state of the 3D bounding box through the motion probability $P_{M}$ :", + "bbox": [ + 496, + 214, + 890, + 290 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nP C _ {T} ^ {*} = \\left\\{ \\begin{array}{l l} P C _ {S} \\times R _ {e g o} ^ {*} + t _ {e g o} ^ {*} & \\text {i f} P _ {M} ^ {*} < \\mathbb {J}, \\\\ P C _ {S} ^ {e g o} \\times R _ {\\text {p e r b o x}} ^ {*} + t _ {\\text {p e r b o x}} ^ {*} & \\text {i f} P _ {M} ^ {*} \\geq \\mathbb {J}. \\end{array} \\right. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 299, + 890, + 339 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$PC_{S}^{ego}$ is the points in the dynamic box from the source point cloud, transformed through global rotation and translation. When $P_{M}$ is less than threshold $\\mathbb{J}$ , the current bounding box is deemed static. Conversely, if $P_{M}$ exceeds a predefined threshold $\\mathbb{J}$ , the current bounding box is considered dynamic. For static boxes, based on the existing global motion, we apply a uniform noise to all static boxes to simulate various ego-motion patterns. By adding minute noise to the motion probability $P_{M}$ for each box, we can construct various motion states and show a greater variety of scene motions. Before transforming the dynamic boxes, a prior global transformation of all points is required. For dynamic bounding boxes, we add various noises to their existing motion, generating new rotations and translations, thereby creating various motion patterns. We warp the source point cloud within each box to the target frame using the box's motion parameters, obtaining the pseudo target point cloud $PC_{T}^{*}$ .", + "bbox": [ + 496, + 348, + 890, + 621 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The generated pseudo target point cloud $PC_T^*$ and the real source frame point cloud $PC_S$ have a perfect correspondence. Therefore, the 3D scene flow labels can be easily obtained by directly subtracting $PC_S$ from $PC_T^*$ :", + "bbox": [ + 496, + 621, + 890, + 681 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS F = P C _ {T} ^ {*} - P C _ {S}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 622, + 691, + 890, + 709 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The generated scene flow labels capture various motion patterns from real autonomous driving scenes. They help the model understand and adjust to complex driving conditions. This improves the model's ability to generalize in unfamiliar real-world scenarios.", + "bbox": [ + 496, + 718, + 890, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 806, + 633, + 824 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Datasets", + "text_level": 1, + "bbox": [ + 500, + 832, + 601, + 845 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Test Datasets: Graph prior [35] introduces two autonomous driving datasets, Argoverse scene flow [3] and nuScenes scene flow [2] datasets. Scene flow labels in the", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "15177", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/a0197c8a729456b8824436c8ad9d1074e2e786e308f8cc618e8052e4a799a3fc.jpg", + "table_caption": [ + "Table 1. Comparison of our method with the best-performing methods on multiple datasets [2, 3, 10] and metrics. 'None', 'Weak', 'Self', and 'Full' represent non-learning, weakly supervised, self-supervised, and supervised methods, respectively. \"↑\" means higher is better, and \"↓\" means lower is better. Our method uses GMSF [51] as a baseline and combines it with our proposed pseudo-auto-labelling framework, 3DSFlabelling. Despite the use of a supervised learning structure, no ground truth is utilized in training." + ], + "table_footnote": [], + "table_body": "
MethodSup.LiDAR KITTI Scene Flow [10]Argoverse Scene Flow [3]nuScenes Scene Flow [2]
EPE3D↓Acc3DS↑Acc3DR↑Outliers↓EPE3D↓Acc3DS↑Acc3DR↑Outliers↓EPE3D↓Acc3DS↑Acc3DR↑Outliers↓
Graph prior [35]None----0.25700.25240.4760-0.28900.20120.4354-
RSF [5]None0.08500.88300.92900.2390----0.10700.71700.86200.3210
NSFP [23]None0.14200.68800.82600.38500.15900.38430.6308-0.17510.35180.63450.5270
R3DSF [12]Weak0.09400.78400.88500.31400.41600.34520.43100.5580----
FlowNet3D [28]Full0.72200.03000.12200.96500.45500.01340.06120.73600.50500.21200.10810.6200
PointPWC [49]Full0.39000.38700.55000.65300.42880.04620.21640.91990.78830.02870.13330.9410
DCA-SRSFE [18]Full0.59000.15050.33310.84850.79570.07120.14680.97990.70420.05380.11830.9766
FLOT [36]Full0.65320.15540.31300.83710.24910.09460.31260.86570.48580.08210.26690.8547
MSBRN [4]Full0.01390.97520.98470.14330.86910.24320.28540.75970.61370.23540.29240.7638
GMSF [51]Full0.19000.29620.55020.61717.27760.00360.01440.99309.42310.00340.00860.9943
Mittal et al. [32]Self0.97730.00960.05240.99360.65200.03190.11590.96210.84220.02890.10410.9615
Jiang et al. [17]Self0.49080.20520.42380.72860.25170.12360.36660.81140.47090.10340.31750.8191
OursSelf0.00780.99240.99470.13280.00930.97800.98800.13020.01850.95340.97130.1670
", + "bbox": [ + 80, + 148, + 890, + 356 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/cd41548fefd727ed8f693a1c32f9c3362eadb605cfb897aae8130f1adf35331b.jpg", + "image_caption": [ + "Figure 5. Registration visualization results of our method (GMSF [51] + 3DSFlabelling) and baselines on the LiDAR KITTI and Argoverse datasets [3, 10]. The estimated target point cloud $PC_{sw}$ is derived from warping the source point cloud $PC_S$ to the target point cloud via 3D scene flow. The larger the overlap between $PC_{sw}$ (blue) and the target point cloud $PC_T$ (green), the higher the predicted accuracy of the scene flow. Local areas are zoomed in for better visibility. Our 3D scene flow estimation notably improves performance." + ], + "image_footnote": [], + "bbox": [ + 76, + 358, + 890, + 669 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "datasets are derived from LiDAR point clouds, object trajectories, map data, and vehicle pose. The datasets contain 212 and 310 test samples, respectively. R3DSF [12] introduces the lidarKITTI [10], which shares 142 scenes with stereoKITTI, collected via Velodyne's 64-beam LiDAR. Unlike FT3D [29] and stereoKITTI [30, 31], the point clouds from lidarKITTI are sparsely distributed. Note that LiDAR scene flow ground truths contain errors. We mitigate this by fusing the ground truth with the first point cloud to create a corrected second frame for network input, thus", + "bbox": [ + 75, + 741, + 472, + 892 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "avoiding evaluation errors.", + "bbox": [ + 500, + 741, + 679, + 757 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training Datasets used in previous methods: FT3D [29] and stereoKITTI [30, 31] are the frequently used datasets for training previous 3D scene flow models [4, 28, 36, 49, 51]. FT3D consists of 19,640 training pairs, while stereoKITTI [30, 31] contains 142 dense point clouds, with the first 100 frames used for model fine-tuning in some works [23, 32]. Some works [23, 28, 32, 35, 49] train their models on 2,691 pairs of Argoverse [3] data and 1,513 pairs of nuScenes [2] data, with 3D scene flow annotations fol", + "bbox": [ + 496, + 763, + 893, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "15178", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/3e79f987e89572282c4c12a76e3ae5301c641b055b2088084b97f3e5ae5ca3fd.jpg", + "table_caption": [ + "Table 2. The comparative results between our method and baseline. “↑” signifies accuracy enhancement. In real-world LiDAR scenarios, our method markedly improves the 3D flow estimation accuracy across three datasets [2, 3, 30] on the three baselines. This demonstrates that the proposed pseudo-auto-labelling framework can substantially boost the accuracy of existing methods, even without the need for ground truth." + ], + "table_footnote": [], + "table_body": "
DatasetMethodEPE3D↓Acc3DS↑Acc3DR↑
LiDAR KITTIFLOT [36]0.65320.15540.3130
FLOT+3DSFlabelling0.0189 ↑ 97.1%0.96660.9792
MSBRN [4]0.01390.97520.9847
MSBRN+3DSFlabelling0.0123 ↑ 11.5%0.97970.9868
GMSF [51]0.19000.29620.5502
GMSF+3DSFlabelling0.0078 ↑ 95.8%0.99240.9947
ArgoverseFLOT [36]0.24910.09460.3126
FLOT+3DSFlabelling0.0107 ↑ 95.7%0.97110.9862
MSBRN [4]0.86910.24320.2854
MSBRN+3DSFlabelling0.0150 ↑ 98.3%0.94820.9601
GMSF [51]7.27760.00360.0144
GMSF+3DSFlabelling0.0093 ↑ 99.9%0.97800.9880
nuScenesFLOT [36]0.48580.08210.2669
FLOT+3DSFlabelling0.0554 ↑ 88.6%0.76010.8909
MSBRN [4]0.61370.23540.2924
MSBRN+3DSFlabelling0.0235 ↑ 96.2%0.94130.9604
GMSF [51]9.42310.00340.0086
GMSF+3DSFlabelling0.0185 ↑ 99.8%0.95340.9713
", + "bbox": [ + 86, + 191, + 460, + 426 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "lowing the settings of the Graph prior [35]. The R3DSF [12] training set utilizes FT3D and semanticKITTI datasets [1], relying on ego-motion labels and semantic segmentation labels from semanticKITTI.", + "bbox": [ + 75, + 445, + 468, + 505 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Training Datasets used in our methods: Because we do not need any labels for training data, we use raw LiDAR point clouds sampled from raw data. For testing on the lidarKITTI [31], we use LiDAR point clouds from sequences 00 to 09 of the KITTI Odometry dataset [11] for auto-labelling and training. For testing on the nuScenes scene flow dataset [2], we randomly sample 50,000 pairs of LiDAR point clouds from the 350,000 LiDAR point clouds in the nuScenes sweeps dataset [2]. For testing on the Argoverse scene flow Dataset [3], we use the LiDAR point clouds from sequences 01 to 05 of the Argoverse 2 Sensor Dataset [3] for auto-labelling and training. In the selection of training data, we exclude the test scenes.", + "bbox": [ + 75, + 506, + 470, + 702 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Implementation Details", + "text_level": 1, + "bbox": [ + 76, + 710, + 294, + 727 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The effectiveness of the proposed auto-labelling framework is demonstrated using three prominent deep learning models: FLOT [36], MSBRN [4], and GMSF [51]. These models use optimal transport, coarse-to-fine strategies, and transformer architectures respectively. Hyperparameters consistent with the original networks are employed during the training process. The input point clouds, from which ground points have been filtered, are randomly sampled to incorporate 8192 points. The LiDAR point cloud data from KITTI [10] is confined to the front view perspective, maintaining consistency with previous studies [12].", + "bbox": [ + 75, + 734, + 468, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/27e0724d4cdb1e066329df99e52d3f6c9a768e340ac086420c64ebfcdc6a13f7.jpg", + "table_caption": [ + "Table 3. Model comparison on the Argoverse dataset [3]. 'M' represents millions of parameters, and time is in milliseconds." + ], + "table_footnote": [], + "table_body": "
MethodSup.EPE3D↓Acc3DS↑Acc3DR↑Time↓Params.↓
PointPWC [49]Full0.42880.04620.2164147 ms7.7 M
PV-RAFT [47]Full10.7450.02000.0100169 ms-
R3DSF [12]Weak0.41600.34520.4310113 ms8.0 M
FlowStep3D [19]Self0.84500.01000.0800729 ms-
NSFP [23]None0.15900.38430.63082864 ms-
Fast-NSF [24]None0.11800.69930.8355124 ms-
MBNSF [41]None0.05100.79360.92375000+ ms-
MSBRN+3DSFlabellingSelf0.01500.94820.9601341 ms3.5 M
GMSF+3DSFlabellingSelf0.00930.97800.9880251 ms6.0 M
FLOT+3DSFlabellingSelf0.01070.97110.986278 ms0.1 M
", + "bbox": [ + 504, + 119, + 888, + 253 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Furthermore, we utilize four scene flow evaluation metrics [28, 36, 49, 51]: Average Endpoint Error (EPE3D), ACC3DS, ACC3DR, and Outliers.", + "bbox": [ + 498, + 273, + 890, + 319 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3.Quantitative Results", + "text_level": 1, + "bbox": [ + 498, + 329, + 692, + 345 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The experimental results are presented in Table 1. We list the best-performing optimized [5, 12, 23, 35], self-supervised [17, 32], and supervised [18, 28, 49] models in the table. Our method achieves excellent performance on all datasets [2, 3, 10] and metrics. Particularly, compared to the baselines [51], there is an order of magnitude reduction in EPE3D on most datasets. The proposed auto-labelling method generates effective scene flow labels, perfectly simulating the rigid motion of various objects in the real world. The designed global-local data augmentation further expands the 3D scene flow labels. As a result, our method significantly outperforms other methods. We have also applied this plug-and-play auto-labelling framework for 3D scene flow (3DSFlabelling) to three existing models, as demonstrated in Table 2. The proposed method significantly enhances the accuracy of 3D scene flow estimation in these models [4, 36, 51].", + "bbox": [ + 496, + 353, + 890, + 609 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Moreover, many existing works utilize a large number of model parameters [12, 47, 49] or adopt optimization methods [23, 24, 41] during testing for a more accurate estimation of 3D scene flow. These methods are highly time-consuming, and cannot ensure accuracy when reducing model parameters. Our proposed 3DSFlabelling effectively addresses this challenge. In Table 3, by using the small-parameter model FLOT (iter=1) [36] combined with our auto-labelling framework, we surpass all current supervised, unsupervised, weakly supervised, and optimized methods. This strongly validates the effectiveness of generating real-world labels in solving the challenges.", + "bbox": [ + 496, + 609, + 890, + 791 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Visualization", + "text_level": 1, + "bbox": [ + 500, + 801, + 635, + 816 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fig. 5 visualizes the precision of our method and others on two datasets [3, 31]. FLOT [36], with its mathematically optimal transport approach to matching point clouds, exhibits superior generalization. MSBRN [4], leveraging a multi-scale bidirectional recurrent network, robustly esti", + "bbox": [ + 496, + 825, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "15179", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/169a4c7b0bf781bd620e7d9979149217888a844b7c7250aa8475f9b5c354eb68.jpg", + "image_caption": [ + "Scene 250 in nuScenes" + ], + "image_footnote": [], + "bbox": [ + 83, + 101, + 269, + 181 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/eaccdd806351786224e439bf002e1a3d6bbb47a2e2fe2fe7682b110e55a3e79e.jpg", + "image_caption": [ + "Scene 115 in nuScenes" + ], + "image_footnote": [], + "bbox": [ + 271, + 101, + 455, + 181 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/658bf3882567c680da7874e22ffa406d61c12ea540e8f220eb43d82f3bbd98c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 83, + 181, + 269, + 263 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a4ade8ce38739b9be661b02065e24a52a5f13b03fbd38814c17e0ae916f9c465.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 271, + 181, + 455, + 263 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a1c6d82a63eaac9088c503f523d3ea567c058a5441ff66e354645956101708d8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 83, + 263, + 267, + 344 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/126431da375604bd3609427392a8d3ab019499e3cb3a54d10618186df34cf56f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 271, + 263, + 455, + 344 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fbfcb4a3c3a1852573cb190ac3b77585c3d495ec5990fda6b0e0a0f32c4f85b0.jpg", + "image_caption": [ + "Figure 6. Error visualizing of our method (GMSF+3DSFlabelling) and baselines on the nuScenes dataset [2]. Using 3D EndPoint Error (EPE3D) as the metric, we categorize the error into six levels. Combining GMSF [51] with our proposed 3DSFlabelling, we manage to keep the EPE3D for most points within 0.02 meters, clearly outperforming other methods largely." + ], + "image_footnote": [], + "bbox": [ + 83, + 345, + 267, + 443 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5b9c81c31aac9e0aa0bf244ccb08bda00a3e45e08dd8f382166efe07078dcdf6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 271, + 345, + 455, + 443 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/401fffce353f13df6e77680d13782e1f907a6cdc9944addc6538afb65337e2bd.jpg", + "table_caption": [ + "Table 4. Generalization comparison experiment. \"A\", \"N\", and \"K\" represent the Argoverse [3], nuScenes [2], and KITTI [10] datasets. $\\langle \\sim \\rangle$ representing a model trained on the dataset on the left and directly evaluated on another new dataset on the right." + ], + "table_footnote": [], + "table_body": "
MethodSup.A~→NN~→AA~→KN~→K
EPE3DAcc3DSEPE3DAcc3DSEPE3DAcc3DSEPE3DAcc3DS
PointPWC [49]Self0.59110.08440.70430.02810.86320.01190.93070.0027
RigidFlow [12]Self0.11350.34450.39910.01520.36450.21180.50420.0141
MSBRN [4]Full0.53090.00550.37610.00980.60360.00560.49260.0081
GMSF [51]Full0.03340.90370.30780.12780.04420.87640.05740.8135
OursSelf0.01150.96930.02640.91920.04140.90200.02080.9595
", + "bbox": [ + 78, + 604, + 475, + 705 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "mates 3D scene flow on KITTI. GMSF [51] utilizes a transformer architecture for powerful fitting learning, but it lacks cross-domain generalization. The proposed method consistently shows better alignment between predicted and target point clouds across all scenes. Additionally, a visualization of the scene flow error on the nuScenes dataset is presented in Fig. 6. In two randomly selected test scenes, our method keeps the scene flow EPE3D mostly within $0.02m$ , clearly outperforming other baselines. More visual comparisons will be presented in the supplementary material.", + "bbox": [ + 75, + 733, + 467, + 883 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4 provides quantitative results, demonstrating the", + "bbox": [ + 96, + 885, + 467, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/671a56c460220d6f874ba114b3b842b3aa0d9617a1be6e8a7b5ec159424c0537.jpg", + "table_caption": [ + "Table 5. Ablation study of 3D scene flow data augmentation. \"No Aug\" and \"Trad. Aug\" represents no data augmentation and traditional data augmentation [49], respectively. Our data augmentation method has a very positive impact on the model." + ], + "table_footnote": [], + "table_body": "
ModelData Augmentation MethodsKITTIArgoversenuScenes
No Aug Trad. AugOur AugEPE3D ACC3DSEPE3D ACC3DSEPE3D ACC3DSEPE3D ACC3DS
Ours (FLOT)--0.06010.72910.04920.80150.73640.6642
--0.05400.76220.04300.86790.06100.7417
--0.01890.96660.01070.97110.05540.7601
Ours (MSBRN)--0.01310.97810.01800.94110.07970.8510
--0.01290.97900.01770.94270.07930.8547
--0.01230.97970.01500.94820.02350.9413
Ours (GMSF)--0.01030.99010.01390.96370.02130.9468
--0.00810.99180.01370.96630.02120.9473
--0.00780.99240.00930.97800.01850.9534
", + "bbox": [ + 503, + 148, + 888, + 272 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "generalization of our 3DSFlabelling combined with the existing method (GMSF [51]) on new datasets. For instance, we train a model on the Argoverse dataset and directly evaluate it on the nuScenes dataset. These two datasets belong to different domains, posing a domain generalization problem. The results in Table 4 indicate that our framework performs exceptionally well on the new dataset, consistently achieving an EPE3D of less than $5cm$ , and even reaching an average endpoint error of less than $2cm$ .", + "bbox": [ + 496, + 294, + 890, + 430 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Ablation Study", + "text_level": 1, + "bbox": [ + 498, + 436, + 653, + 454 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This section explores the advantages of global-local data augmentation. In Table 5, we compare existing 3D scene flow data augmentation [49] with our proposed global-local data augmentation method. Our augmentation strategy shows significant enhancement in all evaluation metrics. This is attributed to the effective simulation of various motion patterns in autonomous driving by global-local data augmentation. The introduction of various motion transformations excellently utilizes the limited training data to extend a variety of 3D scene flow styles. More ablation studies are referring to the supplement material.", + "bbox": [ + 496, + 460, + 890, + 627 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 638, + 619, + 655 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We package 3D point clouds into boxes with different motion attributes. By optimizing the motion parameters for each box and warping the source point cloud into the target point cloud, we create pseudo 3D scene flow labels. We also design a global-local data augmentation method, introducing various scene motion patterns and significantly increasing the diversity and quantity of 3D scene flow labels. Tests on multiple real-world datasets show that our 3D scene flow auto-labelling significantly enhances the performance of existing models. Importantly, this approach eliminates the need for 3D scene flow estimation models to depend on manually annotated 3D scene flow labels.", + "bbox": [ + 496, + 664, + 890, + 845 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 851, + 687, + 868 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported by PhiGent Robotics.", + "bbox": [ + 517, + 871, + 890, + 887 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "15180", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jens Behley, Martin Garbade, Andres Milioto, Jan Quenzel, Sven Behnke, Cyril Stachniss, and Jurgen Gall. Semantickitti: A dataset for semantic scene understanding of lidar sequences. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9297-9307, 2019. 7", + "[2] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 2, 5, 6, 7, 8", + "[3] Ming-Fang Chang, John Lambert, Patsorn Sangkloy, Jagjeet Singh, Slawomir Bak, Andrew Hartnett, De Wang, Peter Carr, Simon Lucey, Deva Ramanan, et al. Argoverse: 3d tracking and forecasting with rich maps. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8748-8757, 2019. 1, 2, 5, 6, 7, 8", + "[4] Wencan Cheng and Jong Hwan Ko. Multi-scale bidirectional recurrent network with hybrid correlation for point cloud based scene flow estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10041-10050, 2023. 1, 2, 6, 7, 8", + "[5] David Deng and Avideh Zakhor. Rsf: Optimizing rigid scene flow from 3d point clouds without labels. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1277-1286, 2023. 2, 3, 4, 6, 7", + "[6] Fangqiang Ding, Zhijun Pan, Yimin Deng, Jianning Deng, and Chris Xiaoxuan Lu. Self-supervised scene flow estimation with 4-d automotive radar. IEEE Robotics and Automation Letters, 7(3):8233-8240, 2022. 2", + "[7] Guanting Dong, Yueyi Zhang, Hanlin Li, Xiaoyan Sun, and Zhiwei Xiong. Exploiting rigidity constraints for lidar scene flow estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12776-12785, 2022. 3", + "[8] Emeç Erçelik, Ekim Yurtsever, Mingyu Liu, Zhijie Yang, Hanzhen Zhang, Pinar Topçam, Maximilian Listl, Yilmaz Kaan Cayli, and Alois Knoll. 3d object detection with a self-supervised lidar scene flow backbone. In European Conference on Computer Vision, pages 247-265. Springer, 2022. 1", + "[9] Jingyun Fu, Zhiyu Xiang, Chengyu Qiao, and Tingming Bai. Pt-flownet: Scene flow estimation on point clouds with point transformer. IEEE Robotics and Automation Letters, 8(5): 2566-2573, 2023. 1, 2", + "[10] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In 2012 IEEE conference on computer vision and pattern recognition, pages 3354-3361. IEEE, 2012. 6, 7, 8", + "[11] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. The International Journal of Robotics Research, 32(11):1231-1237, 2013. 7", + "[12] Zan Gojcic, Or Litany, Andreas Wieser, Leonidas J Guibas, and Tolga Birdal. Weakly supervised learning of rigid 3d" + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "scene flow. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5692-5703, 2021. 3, 6, 7, 8", + "[13] Xiuye Gu, Yijie Wang, Chongruo Wu, Yong Jae Lee, and Panqu Wang. Hplflownet: Hierarchical permutohedral lattice flownet for scene flow estimation on large-scale point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3254-3263, 2019. 2", + "[14] Shengyu Huang, Zan Gojcic, Jiahui Huang, Andreas Wieser, and Konrad Schindler. Dynamic 3d scene analysis by point cloud accumulation. In European Conference on Computer Vision, pages 674-690. Springer, 2022. 1", + "[15] Hafsa Iqbal, Abdulla Al-Kaff, Pablo Marin, Lucio Marcenaro, David Martin Gomez, and Carlo Regazzoni. Detection of abnormal motion by estimating scene flows of point clouds for autonomous driving. In 2021 IEEE International Intelligent Transportation Systems Conference (ITSC), pages 2788-2793. IEEE, 2021. 1", + "[16] Chaokang Jiang, Guangming Wang, Jinxing Wu, Yanzi Miao, and Hesheng Wang. Ffpa-net: Efficient feature fusion with projection awareness for 3d object detection. arXiv preprint arXiv:2209.07419, 2022. 1", + "[17] Chaokang Jiang, Guangming Wang, Yanzi Miao, and Hesheng Wang. 3-d scene flow estimation on pseudo-lidar: Bridging the gap on estimating point motion. IEEE Transactions on Industrial Informatics, 19(6):7346-7354, 2023. 6, 7", + "[18] Zhao Jin, Yinjie Lei, Naveed Akhtar, Haifeng Li, and Munawar Hayat. Deformation and correspondence aware unsupervised synthetic-to-real scene flow estimation for point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7233-7243, 2022. 1, 6, 7", + "[19] Yair Kittenplon, Yonina C Eldar, and Dan Raviv. Flowstep3d: Model unrolling for self-supervised scene flow estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4114-4123, 2021. 7", + "[20] Itai Lang, Dror Aiger, Forrester Cole, Shai Avidan, and Michael Rubinstein. Scoop: Self-supervised correspondence and optimization-based scene flow. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5281-5290, 2023. 3", + "[21] Bing Li, Cheng Zheng, Silvio Giancola, and Bernard Ghanem. Sctn: Sparse convolution-transformer network for scene flow estimation. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1254–1262, 2022. 2", + "[22] Ruibo Li, Chi Zhang, Guosheng Lin, Zhe Wang, and Chunhua Shen. Rigidflow: Self-supervised scene flow learning on point clouds by local rigidity prior. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16959-16968, 2022. 2, 3", + "[23] Xueqian Li, Jhony Kaesemodel Pontes, and Simon Lucey. Neural scene flow prior. Advances in Neural Information Processing Systems, 34:7838-7851, 2021. 3, 6, 7", + "[24] Xueqian Li, Jianqiao Zheng, Francesco Ferroni, Jhony Kaesemodel Pontes, and Simon Lucey. Fast neural scene flow. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 9878-9890, 2023. 1, 3, 7" + ], + "bbox": [ + 503, + 92, + 890, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "15181", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[25] Jiuming Liu, Guangming Wang, Chaokang Jiang, Zhe Liu, and Hesheng Wang. Translo: A window-based masked point transformer framework for large-scale lidar odometry. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1683-1691, 2023. 3", + "[26] Jiuming Liu, Guangming Wang, Zhe Liu, Chaokang Jiang, Marc Pollefeys, and Hesheng Wang. Regformer: an efficient projection-aware transformer network for large-scale point cloud registration. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8451-8460, 2023. 3", + "[27] Jiuming Liu, Guangming Wang, Weicai Ye, Chaokang Jiang, Jinru Han, Zhe Liu, Guofeng Zhang, Dalong Du, and Hesheng Wang. Difflow3d: Toward robust uncertainty-aware scene flow estimation with diffusion model. arXiv preprint arXiv:2311.17456, 2023. 1", + "[28] Xingyu Liu, Charles R Qi, and Leonidas J Guibas. Flownet3d: Learning scene flow in 3d point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 529-537, 2019. 2, 6, 7", + "[29] Nikolaus Mayer, Eddy Ilg, Philip Hausser, Philipp Fischer, Daniel Cremers, Alexey Dosovitskiy, and Thomas Brox. A large dataset to train convolutional networks for disparity, optical flow, and scene flow estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4040-4048, 2016. 1, 2, 6", + "[30] Moritz Menze, Christian Heipke, and Andreas Geiger. Joint 3d estimation of vehicles and scene flow. ISPRS annals of the photogrammetry, remote sensing and spatial information sciences, 2:427-434, 2015. 1, 2, 6, 7", + "[31] Moritz Menze, Christian Heipke, and Andreas Geiger. Object scene flow. ISPRS Journal of Photogrammetry and Remote Sensing, 140:60-76, 2018. 1, 2, 6, 7", + "[32] Himangi Mittal, Brian Okorn, and David Held. Just go with the flow: Self-supervised scene flow estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11177-11185, 2020. 2, 3, 6, 7", + "[33] Mahyar Najibi, Jingwei Ji, Yin Zhou, Charles R Qi, Xinchen Yan, Scott Ettinger, and Dragomir Anguelov. Motion inspired unsupervised perception and prediction in autonomous driving. In European Conference on Computer Vision, pages 424-443. Springer, 2022. 1", + "[34] Chensheng Peng, Guangming Wang, Xian Wan Lo, Xinrui Wu, Chenfeng Xu, Masayoshi Tomizuka, Wei Zhan, and Hesheng Wang. Delflow: Dense efficient learning of scene flow for large-scale point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16901-16910, 2023. 2", + "[35] Jhony Kaesemodel Pontes, James Hays, and Simon Lucey. Scene flow from point clouds with or without learning. In 2020 International Conference on 3D Vision (3DV), pages 261-270, 2020. 3, 5, 6, 7", + "[36] Gilles Puy, Alexandre Boulch, and Renaud Marlet. Flot: Scene flow on point clouds guided by optimal transport. In ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part XXVIII 16, pages 527–544, 2020. 1, 2, 6, 7" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[37] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 1", + "[38] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 1", + "[39] Yaqi Shen, Le Hui, Jin Xie, and Jian Yang. Self-supervised 3d scene flow estimation guided by superpoints. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5271-5280, 2023. 1, 2", + "[40] Ivan Tishchenko, Sandro Lombardi, Martin R Oswald, and Marc Pollefeys. Self-supervised learning of non-rigid residual flow and ego-motion. In 2020 international conference on 3D vision (3DV), pages 150-159. IEEE, 2020. 3", + "[41] Kavisha Vidanapathirana, Shin-Fang Chng, Xueqian Li, and Simon Lucey. Multi-body neural scene flow. arXiv preprint arXiv:2310.10301, 2023. 7", + "[42] Guangming Wang, Xinrui Wu, Zhe Liu, and Hesheng Wang. Hierarchical attention learning of scene flow in 3d point clouds. IEEE Transactions on Image Processing, 30:5168-5181, 2021. 2", + "[43] Guangming Wang, Yunzhe Hu, Zhe Liu, Yiyang Zhou, Masayoshi Tomizuka, Wei Zhan, and Hesheng Wang. What matters for 3d scene flow network. In European Conference on Computer Vision, pages 38-55. Springer, 2022. 2", + "[44] Guangming Wang, Chaokang Jiang, Zehang Shen, Yanzi Miao, and Hesheng Wang. Sfgan: Unsupervised generative adversarial learning of 3d scene flow from the 3d scene self. Advanced Intelligent Systems, 4(4):2100197, 2022. 2, 3", + "[45] Yun Wang, Cheng Chi, and Xin Yang. Exploiting implicit rigidity constraints via weight-sharing aggregation for scene flow estimation from point clouds. arXiv preprint arXiv:2303.02454, 2023. 2", + "[46] Ziyi Wang, Yi Wei, Yongming Rao, Jie Zhou, and Jiwen Lu. 3d point-voxel correlation fields for scene flow estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2023. 2", + "[47] Yi Wei, Ziyi Wang, Yongming Rao, Jiwen Lu, and Jie Zhou. Pv-raft: Point-voxel correlation fields for scene flow estimation of point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6954–6963, 2021. 1, 2, 7", + "[48] Pengxiang Wu, Siheng Chen, and Dimitris N Metaxas. Motionnet: Joint perception and motion prediction for autonomous driving based on bird's eye view maps. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11385-11395, 2020. 1", + "[49] Wenxuan Wu, Zhi Yuan Wang, Zhuwen Li, Wei Liu, and Li Fuxin. Pointpwc-net: Cost volume on point clouds for (self-) supervised scene flow estimation. In European Conference on Computer Vision, pages 88-107, 2020. 2, 3, 4, 6, 7, 8", + "[50] Yi Zhang, Yuwen Ye, Zhiyu Xiang, and Jiaqi Gu. Sdp-net: Scene flow based real-time object detection and prediction from sequential 3d point clouds. In Proceedings of the Asian Conference on Computer Vision, 2020. 1" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "15182", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[51] Yushan Zhang, Johan Edstedt, Bastian Wandt, Per-Erik Forssén, Maria Magnusson, and Michael Felsberg. Gmsf: Global matching scene flow. arXiv preprint arXiv:2305.17432, 2023. 1, 2, 6, 7, 8", + "[52] Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Open3d: A modern library for 3d data processing. arXiv preprint arXiv:1801.09847, 2018. 3" + ], + "bbox": [ + 78, + 90, + 468, + 189 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "15183", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/128911ab-0f2f-4697-8895-080e5b45c36b_model.json b/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/128911ab-0f2f-4697-8895-080e5b45c36b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..cc8a82ddec34a0a87f5b7a0770464f24b24b4bae --- /dev/null +++ b/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/128911ab-0f2f-4697-8895-080e5b45c36b_model.json @@ -0,0 +1,2125 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.094, + 0.131, + 0.877, + 0.155 + ], + "angle": 0, + "content": "3DSFLabelling: Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.18, + 0.816, + 0.217 + ], + "angle": 0, + "content": "Chaokang Jiang\\(^{1}\\), Guangming Wang\\(^{2}\\), Jiuming Liu\\(^{3}\\), Hesheng Wang\\(^{3}\\), Zhuang Ma\\(^{1}\\), Zhenqiang Liu\\(^{1}\\), Zhujin Liang\\(^{1}\\), Yi Shan\\(^{1}\\), Dalong Du\\(^{1\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.217, + 0.792, + 0.235 + ], + "angle": 0, + "content": "1PhiGent Robotics, 2University of Cambridge, 3Shanghai Jiaotong University" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.237, + 0.856, + 0.287 + ], + "angle": 0, + "content": "ts20060079a31@cumt.edu.cn, gw462@cam.ac.uk, {liujiuming, wanghesheng}@sjtu.edu.cn, mazhuang097@outlook.com, {zhenqiang.liu, zhujin.liang, yi.shan, dalong.du}@phigent.ai jiangchaokang.github.io/3DSFLabelling-Page" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.322, + 0.314, + 0.339 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.354, + 0.474, + 0.702 + ], + "angle": 0, + "content": "Learning 3D scene flow from LiDAR point clouds presents significant difficulties, including poor generalization from synthetic datasets to real scenes, scarcity of real-world 3D labels, and poor performance on real sparse LiDAR point clouds. We present a novel approach from the perspective of auto-labelling, aiming to generate a large number of 3D scene flow pseudo labels for real-world LiDAR point clouds. Specifically, we employ the assumption of rigid body motion to simulate potential object-level rigid movements in autonomous driving scenarios. By updating different motion attributes for multiple anchor boxes, the rigid motion decomposition is obtained for the whole scene. Furthermore, we developed a novel 3D scene flow data augmentation method for global and local motion. By perfectly synthesizing target point clouds based on augmented motion parameters, we easily obtain lots of 3D scene flow labels in point clouds highly consistent with real scenarios. On multiple real-world datasets including LiDAR KITTI, nuScenes, and Argoverse, our method outperforms all previous supervised and unsupervised methods without requiring manual labelling. Impressively, our method achieves a tenfold reduction in EPE3D metric on the LiDAR KITTI dataset, reducing it from \\(0.190m\\) to a mere \\(0.008m\\) error." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.731, + 0.21, + 0.746 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.756, + 0.47, + 0.879 + ], + "angle": 0, + "content": "3D scene flow estimation through deducing per-point motion filed from consecutive frames of point clouds, serves a critical role across various applications, encompassing motion prediction [33, 48], anomaly motion detection [15], 3D object detection [8, 16, 50], and dynamic point cloud accumulation [14]. With the advancing of deep learning on point clouds [37, 38], many works [4, 9, 18, 27, 36, 39, 51] have developed the learning-based" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.32, + 0.891, + 0.499 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.503, + 0.895, + 0.588 + ], + "angle": 0, + "content": "Figure 1. The proposed 3D scene flow pseudo-auto-labelling framework. Given point clouds and initial bounding boxes, both global and local motion parameters are iteratively optimized. Diverse motion patterns are augmented by randomly adjusting these motion parameters, thereby creating a diverse and realistic set of motion labels for the training of 3D scene flow estimation models." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.612, + 0.892, + 0.718 + ], + "angle": 0, + "content": "methods to estimate per-point motion from 3D point clouds. Some state-of-the-art methods [4, 39, 51] have reduced the average 3D Endpoint Error (EPE3D) to a few centimetres on the KITTI Scene Flow dataset (stereoKITTI) [30, 31]. However, due to the scarcity of scene flow labels, these methods rely heavily on synthetic datasets such as FlyingThings3D (FT3D) [29] for network training." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.893, + 0.901 + ], + "angle": 0, + "content": "When evaluated on the stereoKITTI dataset [30, 31], PV-RAFT [47] demonstrates an average EPE3D of just \\(0.056m\\). However, when evaluated on the Argoverse dataset [3], the EPE3D metric astonishingly exceeds \\(10m\\) [24]. Therefore, learning 3D scene flow on synthetic dataset [29] has a large gap with real-world application. Jin et al. [18] recently introduce a new synthetic dataset, GTA-SF, simulating LiDAR scans for autonomous driving. They propose a teacher-student domain adaptation framework to reduce the gap between synthetic and real datasets and improve some performance of 3D scene flow estimation. However, their performance is still poor in" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.228, + 0.901 + ], + "angle": 0, + "content": "\\(^{1\\dagger}\\) Corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15173" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.087, + 0.095, + 0.451, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.255, + 0.47, + 0.338 + ], + "angle": 0, + "content": "Figure 2. The accuracy improvement after integrating our proposed pseudo-auto-labelling method. Models trained on synthetic data performance poorly in 3D scene flow estimation for LiDAR-based autonomous driving. Our proposed 3D pseudo-auto-labelling method improves accuracy, reaching an EPE3D below \\(2cm\\) across datasets [2, 3, 31]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.369, + 0.47, + 0.52 + ], + "angle": 0, + "content": "real-world LiDAR data because of ideal sensor models and lack of scene variety. Ideally, models should learn from real sensor data in the autonomous driving field. However, labelling each point's 3D motion vector for the 3D scene flow task is extremely costly. This has driven many works [6, 22, 28, 32, 39, 44] towards unsupervised or self-supervised learning of 3D scene flow. Although these methods have achieved reasonable accuracy, they still fall behind supervised methods, highlighting the importance of real sensor data and corresponding 3D scene flow labels." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.521, + 0.471, + 0.839 + ], + "angle": 0, + "content": "In this work, we address three key challenges in the field of autonomous driving: the reliance on synthetic datasets that still have a poor generalization with real-world scenarios, the scarcity of scene flow labels in actual driving scenes, and the poor performance of existing 3D scene flow estimation networks on real LiDAR data. Inspired by the rigid motion assumptions in RigidFlow [22] and RSF [5], we propose a novel scene flow auto-labelling approach that leverages the characteristics of rigid motion prevalent in autonomous driving scenarios (Fig. 1). Specifically, we utilize 3D anchor boxes to segment 3D objects in point clouds. The attributes of each object-level box are not only position and size but also rotation, translation, motion status, and normal vector attributes. By leveraging the constrained loss functions for the box parameters and inter-frame association, we optimize the attributes of the boxes, subsequently combining these parameters with the source point cloud to produce a realistic target point cloud. Importantly, the generated target point cloud maintains a one-to-one correspondence with the source point cloud, enabling the efficient generation of pseudo 3D scene flow labels." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.901 + ], + "angle": 0, + "content": "To capture a more diverse range of motion patterns, we introduce a novel data augmentation strategy for 3D scene flow auto-labelling. Utilizing the attributes of each box, we simulate the rotations, translations, and motion status" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.244 + ], + "angle": 0, + "content": "of both the ego vehicle and surrounding environment by adding Gaussian noise to these attributes. Consequently, we obtain numerous 3D scene flow labels with diverse motions that closely resemble real-world scenarios, furnishing the neural network with rich real training data and significantly improving the generalization capabilities of learning-based methods. Experimental results validate that our pseudo-label generation strategy consistently achieves state-of-the-art scene flow estimation results across various models [4, 36, 51] and datasets [2, 3, 30] (Fig. 2)." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.249, + 0.82, + 0.263 + ], + "angle": 0, + "content": "In summary, our contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.268, + 0.892, + 0.344 + ], + "angle": 0, + "content": "- We propose a new framework for the automatic labelling of 3D scene flow pseudo-labels, significantly enhancing the accuracy of current scene flow estimation models, and effectively addressing the scarcity of 3D flow labels in autonomous driving." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.344, + 0.892, + 0.45 + ], + "angle": 0, + "content": "- We propose a universal 3D box optimization method with multiple motion attributes. Building upon this, we further introduce a plug-and-play 3D scene flow augmentation module with global-local motions and motion status. This allows for flexible motion adjustment of ego-motion and dynamic environments, setting a new benchmark for scene flow data augmentation." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.45, + 0.892, + 0.525 + ], + "angle": 0, + "content": "- Our method achieves state-of-the-art performance on KITTI, nuScenes, and Argoverse LiDAR datasets. Impressively, our approach surpasses all supervised and unsupervised methods without requiring any synthesising data and manual scene flow labels." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.268, + 0.892, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.555, + 0.642, + 0.57 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.585, + 0.814, + 0.602 + ], + "angle": 0, + "content": "2.1. Supervised 3D Scene Flow Learning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.614, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In recent years, the performance of methods [28, 34, 42] for 3D scene flow based on point cloud deep learning has surpassed traditional methods. FlowNet3D [28] pioneers an end-to-end approach to learning 3D scene flow from point clouds. Some works, such as HALFlow [13], 3DFlow [43], PointPWC [49], and WSAFlowNet [45], utilize PWC structures to learn 3D scene flow in a coarse-to-fine manner. Other methods address the disorderliness of points by voxelizing point clouds and using sparse convolution or voxel correlation fields to learn 3D scene flow, such as PV-RAFT [47], DPV-RAFT [46], and SCTN [21]. Additional work refines the estimated scene flow through iterative procedures. MSBRN [4] proposes bidirectional gated recurrent units for iteratively estimating scene flow. GMSF [51] and PT-FlowNet [9] introduce point cloud transformers into 3D scene flow estimation networks. These supervised learning methods for 3D scene flow heavily rely on ground truth and are all trained on the FT3D dataset [29] and evaluated on stereoKITTI [30, 31] for network generalization test." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15174" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.091, + 0.411, + 0.108 + ], + "angle": 0, + "content": "2.2. Unsupervised 3D Scene Flow Learning" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.114, + 0.473, + 0.357 + ], + "angle": 0, + "content": "JGwF [32] and PointPWC [49] initially propose several self-supervised learning losses such as cycle consistency loss and chamfer loss. EgoFlow [40] distinguishes 3D scene flow into ego-motion flow and remaining non-rigid flow, achieving self-supervised learning based on temporal consistency. SFGAN [44] introduces generative adversarial concepts into self-supervised learning for 3D scene flow. Recently, works like R3DSF [12], RigidFlow [22], and LiDARSceneFlow [7] greatly improve the accuracy of 3D scene flow estimation by introducing local or object-level rigidity constraints. RigidFlow [22] explicitly enforces rigid alignment within super-voxel regions by decomposing the source point cloud into multiple supervoxels. R3DSF [12] separately considers background and foreground object-level 3D scene flow, relying on segmentation and odometry tasks [25, 26]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.367, + 0.334, + 0.384 + ], + "angle": 0, + "content": "2.3. 3D Scene Flow Optimization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.391, + 0.473, + 0.68 + ], + "angle": 0, + "content": "3D scene flow optimization techniques have demonstrated remarkable generalization capabilities, attracting a significant amount of academic research recently. Graph prior [35] optimizes scene flow to be as smooth as possible by using the Laplacian of point clouds. Some techniques introduce neural networks to optimize 3D scene flow. NSFP [23] introduces a novel implicit regularizer, the Neural Scene Flow Prior, which primarily depends on runtime optimization and robust regularization. RSF [5] combines global ego-motion with object-specific rigid movements to optimize 3D bounding box parameters and compute scene flow. FastNSF [24] also adopts neural scene flow prior, and it shows more advantages in dealing with dense LiDAR points compared to learning methods. SCOOP [20], in the runtime phase, directly optimizes the flow refinement module using self-supervised objectives. Although optimization-based approaches for 3D scene flow estimation have demonstrated impressive accuracy, they typically involve high computational costs." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.693, + 0.231, + 0.71 + ], + "angle": 0, + "content": "3. 3DSFLabelling" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.719, + 0.47, + 0.84 + ], + "angle": 0, + "content": "3D scene flow estimation infers the 3D flow, \\( SF_{pred} \\in \\mathbb{R}^{3 \\times N_1} \\) from the source point cloud \\( PC_S \\in \\mathbb{R}^{3 \\times N_1} \\) and the target point cloud \\( PC_T \\in \\mathbb{R}^{3 \\times N_2} \\) for each point in the source point. Previous self-supervised learning methods [32, 49] typically use the estimated 3D motion vector \\( SF_{pred} \\) to warp the source point cloud \\( PC_S \\) to the target point cloud \\( PC_{Sw} \\). By comparing the difference between \\( PC_{Sw} \\) and \\( PC_T \\), a supervisory signal is generated." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.902 + ], + "angle": 0, + "content": "In contrast with previous self-supervised learning methods, we propose bounding box element optimization to obtain the boxes and the box motion parameters from raw unlabelled point cloud data. Then, we use object-box-level" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.198 + ], + "angle": 0, + "content": "motion parameters and global motion parameters to warp each box's points and the whole point cloud to the target point cloud, generating corresponding pseudo 3D scene flow labels. During the warping process of each object box, we propose augmenting the motion attributes of each object and the whole scene. This diversity assists the network in capturing a broader range of motion behaviours." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.206, + 0.638, + 0.222 + ], + "angle": 0, + "content": "3.1. Prerequisites" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.229, + 0.892, + 0.426 + ], + "angle": 0, + "content": "Apart from the two input point clouds, we do not require any extra labels, such as object-level tracking and semantic information, or vehicle ego-motion labels. To reinforce the geometric constraints in the pseudo label generation module, we employ Open3d [52] to generate coarse per-point normals. Despite these normals not being perfectly accurate, they are readily obtainable and can provide useful geometric constraints. Finally, we establish initial 3D anchor boxes with specific centers \\((x,y,z)\\), width \\(w\\), length \\(l\\), height \\(h\\), and rotation angle \\(\\theta\\), in accordance with the range of input points. As depicted in Fig. 3, the inputs of our model consist of the initial anchor box set, \\(PC_{S}\\), \\(PC_{T}\\), and point cloud normals \\(N_{S}\\)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.434, + 0.849, + 0.451 + ], + "angle": 0, + "content": "3.2. Motion Parameter Optimization Module" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.457, + 0.892, + 0.578 + ], + "angle": 0, + "content": "As shown in Fig. 3, we present the process of simulating the motion of point clouds in actual autonomous driving by updating four sets of parameters: differentiable bounding boxes \\(\\Phi = [c,s,\\theta]\\), global motion parameters \\(\\Theta = [R_{ego},t_{ego}]\\), motion parameters for each box \\([R_{perbox},t_{perbox}]\\), and motion probability \\(P_M\\) for each box. The variables \\(c\\), \\(s\\), and \\(\\theta\\) represent the center coordinates, size, and orientation of the 3D box, respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.579, + 0.892, + 0.804 + ], + "angle": 0, + "content": "Inspired by RSF [5], we use the motion of object-level bounding boxes to present the point-wise 3D motion and make the step-like boxes differentiable through sigmoid approximation. By transforming the individual points to the bounding boxes, we introduce an object-level perception of the scene, enabling a more natural capture of rigid motion. This method proves advantageous in autonomous driving scenarios, where most objects predominantly exhibit rigid behaviour [12]. Additionally, in the context of autonomous driving, most scene motion is typically caused by the ego motion of the vehicle. Hence, setting global motion parameters is necessary to simulate the global consistent rigid motion of the whole scene. To discern whether the motion of each box is caused by ego-motion, we also set up a motion probability for each bounding box." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.805, + 0.892, + 0.835 + ], + "angle": 0, + "content": "With the initial set of four motion parameters, the source point cloud is warped to the target frame, as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.54, + 0.843, + 0.892, + 0.863 + ], + "angle": 0, + "content": "\\[\nP C _ {T} ^ {\\Theta}, P C _ {T} ^ {\\Phi} = \\Omega_ {1} (\\Theta , P C _ {S}), \\Omega_ {2} (\\Upsilon (\\Phi , P C _ {S})), (1)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "where \\(\\Theta\\) represents global motion parameters. \\(\\Phi\\) represents motion parameters of each bounding box, and \\(\\Omega_{1}\\) and \\(\\Omega_{2}\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15175" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.088, + 0.884, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.397, + 0.896, + 0.482 + ], + "angle": 0, + "content": "Figure 3. The proposed learning framework of pseudo 3D scene flow automatic labelling. The input comprises 3D anchor boxes, a pair of point clouds, and their corresponding coarse normal vectors. The optimization of motion parameters primarily updates the bounding box parameters, global motion parameters, local motion parameters, and the motion probability of the boxes. The attribute parameters for boxes are updated through backward optimization from six objective functions. Once optimized, the motion parameters simulate various types of motion using a global-local data augmentation module. A single source frame point cloud, along with the augmented motion parameters, produces diverse 3D scene flow labels. These labels serve to guide the supervised neural network to learn point-wise motion." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.507, + 0.47, + 0.552 + ], + "angle": 0, + "content": "are background and foreground warping functions, respectively, generating the warped point clouds \\(PC_T^\\Theta\\) and \\(PC_T^\\Phi\\). \\(\\Upsilon\\) signifies the removal of boxes with too few points." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.552, + 0.471, + 0.613 + ], + "angle": 0, + "content": "Based on the real target frame of point cloud and the generated target point clouds \\(PC_T^\\Theta\\) and \\(PC_T^\\Phi\\), we define loss functions to update and optimize the box attributes. We separately calculate the background and foreground losses:" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.618, + 0.47, + 0.637 + ], + "angle": 0, + "content": "\\[\nL _ {B G} = \\kappa \\left(N _ {T} ^ {\\Theta} \\oplus P C _ {T} ^ {\\Theta}, N _ {T} \\oplus P C _ {T}\\right) + \\delta \\left(P C _ {T} ^ {\\Theta}, P C _ {T}\\right), \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.64, + 0.469, + 0.702 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} L _ {F G} = \\frac {1}{K _ {b o x}} \\sum P _ {M} \\times (\\kappa \\left(N _ {T} ^ {\\Phi} \\oplus P C _ {T} ^ {\\Phi}, N _ {T} \\oplus P C _ {T}\\right) \\\\ + \\delta \\left(P C _ {T} ^ {\\Phi}, P C _ {T}\\right)), \\tag {3} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.703, + 0.47, + 0.81 + ], + "angle": 0, + "content": "where \\(\\kappa\\) is a function calculating nearest neighbour matches between the transformed point cloud and the target point cloud. \\(\\delta\\) is a pairwise distance function with location encoding. \\(K_{box}\\) is the number of boxes, \\(P_M\\) is the motion probability of each box, and the term \\(N_T\\oplus PC_T\\) represents the concatenation of the target point cloud's normal and positions. As for the motion probability \\(P_M\\) of each box:" + }, + { + "type": "equation", + "bbox": [ + 0.086, + 0.817, + 0.47, + 0.835 + ], + "angle": 0, + "content": "\\[\nP _ {M} = \\sigma \\left(\\alpha \\times \\left(\\Omega_ {3} \\left(\\Phi , \\gamma_ {i}\\right) + \\beta_ {i}\\right)\\right) - \\alpha \\times \\left(\\Omega_ {3} \\left(\\Phi , \\gamma_ {i}\\right) - \\beta_ {i}\\right), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.903 + ], + "angle": 0, + "content": "where \\(\\sigma(x)\\) represents the sigmoid function, \\(\\alpha\\) is a hyperparameter 'slope' in the sigmoid, \\(\\beta\\) represents the half size of the vector of 3D dimensions \\(w\\), \\(l\\), and \\(h\\) of the bounding box. Coordinate values \\(\\gamma\\) in the source point cloud are" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.507, + 0.892, + 0.718 + ], + "angle": 0, + "content": "warped to the target point cloud via motion box parameters \\(\\Phi\\). For each dynamic box, each point's relative position to the box's centre is calculated. Higher motion probability \\(P_{M}\\) is assigned to the points closer to the centre. A fixed hyperparameter \\(\\alpha\\), controlling motion probability, may not effectively respond to diverse and complex autonomous driving scenarios. Therefore, we adopt an adaptive computation of \\(\\alpha\\) based on the variance of the point nearest-neighbour consistency loss from the previous generation. The variance in the nearest-neighbour consistency loss for different points in the background implies the distribution of dynamic objects in the scene. With fewer moving objects indicated by a lower variance, \\(\\alpha\\) should be adaptively reduced, tending to produce lower motion probability \\(P_{M}\\) for points." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.718, + 0.892, + 0.824 + ], + "angle": 0, + "content": "In addition to \\(L_{BG}\\) and \\(L_{FG}\\), we introduce box dimension regularization, heading term, and angle term to constrain the dimensions, heading, and rotation angles of the bounding boxes within a reasonable range [5]. We also introduce a mass term to ensure that there are as many points as possible within the box, making the estimated motion parameters of the box more robust [5]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.833, + 0.892, + 0.85 + ], + "angle": 0, + "content": "3.3. Data Augmentation for 3D Flow Auto-labelling" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.893, + 0.902 + ], + "angle": 0, + "content": "Existing data augmentation practices [49] often add consistent random rotations and noise offsets to the input points, which indeed yields certain benefits. However, in" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "15176" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.466, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.244, + 0.473, + 0.342 + ], + "angle": 0, + "content": "Figure 4. The proposed pseudo label generation module. With the augmented motion probability \\( P_{M}^{*} \\), bounding boxes are categorized into dynamic and static types. Using global and local motion parameters, the \\( PC_{S} \\) is warped to the target point cloud \\( PC_{T}^{*} \\). Finally, pseudo 3D scene flow labels \\( SF \\) are derived from the correspondence between \\( PC_{T}^{*} \\) and \\( PC_{S} \\). \\( K_{box} \\) represents the number of boxes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.37, + 0.47, + 0.506 + ], + "angle": 0, + "content": "autonomous driving scenarios, there are frequently various complex motion patterns for multiple objects. To make models learn complex scene motion rules, we propose a novel data augmentation method for scene flow labelling in both global and object-level motions. Our method simulates a broad spectrum of 3D scene flow data variations, originating from ego-motion and dynamic object movement, thereby providing a promising solution to the challenge of securing abundant 3D scene flow labels." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.507, + 0.47, + 0.627 + ], + "angle": 0, + "content": "As in Fig. 3, random noise is applied to either global or local motion parameters respectively. We generate a random rotation angle \\(\\alpha\\) and a random unit vector \\(\\mathbf{u}\\) for the rotation direction using random noise. They are used to create the Lie algebra \\(\\xi\\). Subsequently, the Lie algebra \\(\\xi\\) is converted into a rotation matrix \\(\\mathbf{M}\\) using the Rodrigues' rotation formula and applied to the original rotation matrix \\(\\mathbf{R}\\) to obtain a new rotation matrix \\(\\mathbf{R}^*\\), as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.092, + 0.628, + 0.47, + 0.695 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {M} = \\mathbf {I} + \\sin (| \\pmb {\\xi} |) \\frac {\\pmb {\\xi}}{| \\pmb {\\xi} |} _ {\\times} + (1 - \\cos (| \\pmb {\\xi} |)) \\left(\\frac {\\pmb {\\xi}}{| \\pmb {\\xi} |} _ {\\times}\\right) ^ {2}, \\quad (5) \\\\ \\boldsymbol {\\xi} = \\alpha \\boldsymbol {u}, \\mathbf {R} ^ {*} = \\mathbf {R M}. \\tag {6} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.704, + 0.47, + 0.869 + ], + "angle": 0, + "content": "The Lie algebra element \\(\\xi\\), the product of scalar \\(\\alpha\\) and unit vector \\(\\mathbf{u}\\), signifies rotation magnitude and direction, with \\(\\alpha\\) and \\(\\mathbf{u}\\) representing rotation angle and axis, respectively. \\(\\mathbf{I}\\) is identity matrix, and \\(\\xi \\times \\xi\\) is the antisymmetric matrix of \\(\\xi\\). Lie algebra intuitively and conveniently represents minor \\(SO(3)\\) group variations. Rodrigues' rotation formula, mapping from the Lie algebra to the Lie group, facilitates the transformation of angle-based noise into a form directly applicable to the rotation matrix. This transformation brings mathematical convenience, making the update of the rotation matrix concise and efficient." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Importantly, our data augmentation targets dynamically moving objects, because persistently adding varied motion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.184 + ], + "angle": 0, + "content": "noise to bounding boxes perceived as static objects may disrupt original data distribution. Moreover, the translation and motion probability are also augmented. As depicted in Fig. 3, we generate noise within an appropriate range and directly add it to the translation matrix or motion probability, resulting in augmented translation and motion probability." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.191, + 0.875, + 0.207 + ], + "angle": 0, + "content": "3.4. Pseudo Label Generation for 3D Scene Flow" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.215, + 0.892, + 0.291 + ], + "angle": 0, + "content": "The motion parameters are fed into the pseudo label generation module to obtain point-wise 3D scene flow labels. The specific process of the label generation module is shown in Fig. 4. We determine the motion state of the 3D bounding box through the motion probability \\( P_{M} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.511, + 0.3, + 0.892, + 0.34 + ], + "angle": 0, + "content": "\\[\nP C _ {T} ^ {*} = \\left\\{ \\begin{array}{l l} P C _ {S} \\times R _ {e g o} ^ {*} + t _ {e g o} ^ {*} & \\text {i f} P _ {M} ^ {*} < \\mathbb {J}, \\\\ P C _ {S} ^ {e g o} \\times R _ {\\text {p e r b o x}} ^ {*} + t _ {\\text {p e r b o x}} ^ {*} & \\text {i f} P _ {M} ^ {*} \\geq \\mathbb {J}. \\end{array} \\right. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.349, + 0.892, + 0.622 + ], + "angle": 0, + "content": "\\(PC_{S}^{ego}\\) is the points in the dynamic box from the source point cloud, transformed through global rotation and translation. When \\(P_{M}\\) is less than threshold \\(\\mathbb{J}\\), the current bounding box is deemed static. Conversely, if \\(P_{M}\\) exceeds a predefined threshold \\(\\mathbb{J}\\), the current bounding box is considered dynamic. For static boxes, based on the existing global motion, we apply a uniform noise to all static boxes to simulate various ego-motion patterns. By adding minute noise to the motion probability \\(P_{M}\\) for each box, we can construct various motion states and show a greater variety of scene motions. Before transforming the dynamic boxes, a prior global transformation of all points is required. For dynamic bounding boxes, we add various noises to their existing motion, generating new rotations and translations, thereby creating various motion patterns. We warp the source point cloud within each box to the target frame using the box's motion parameters, obtaining the pseudo target point cloud \\(PC_{T}^{*}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.622, + 0.892, + 0.682 + ], + "angle": 0, + "content": "The generated pseudo target point cloud \\(PC_T^*\\) and the real source frame point cloud \\(PC_S\\) have a perfect correspondence. Therefore, the 3D scene flow labels can be easily obtained by directly subtracting \\(PC_S\\) from \\(PC_T^*\\):" + }, + { + "type": "equation", + "bbox": [ + 0.624, + 0.693, + 0.891, + 0.71 + ], + "angle": 0, + "content": "\\[\nS F = P C _ {T} ^ {*} - P C _ {S}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.719, + 0.892, + 0.794 + ], + "angle": 0, + "content": "The generated scene flow labels capture various motion patterns from real autonomous driving scenes. They help the model understand and adjust to complex driving conditions. This improves the model's ability to generalize in unfamiliar real-world scenarios." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.808, + 0.634, + 0.825 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.833, + 0.602, + 0.847 + ], + "angle": 0, + "content": "4.1. Datasets" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Test Datasets: Graph prior [35] introduces two autonomous driving datasets, Argoverse scene flow [3] and nuScenes scene flow [2] datasets. Scene flow labels in the" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15177" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.895, + 0.146 + ], + "angle": 0, + "content": "Table 1. Comparison of our method with the best-performing methods on multiple datasets [2, 3, 10] and metrics. 'None', 'Weak', 'Self', and 'Full' represent non-learning, weakly supervised, self-supervised, and supervised methods, respectively. \"↑\" means higher is better, and \"↓\" means lower is better. Our method uses GMSF [51] as a baseline and combines it with our proposed pseudo-auto-labelling framework, 3DSFlabelling. Despite the use of a supervised learning structure, no ground truth is utilized in training." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.15, + 0.891, + 0.357 + ], + "angle": 0, + "content": "
MethodSup.LiDAR KITTI Scene Flow [10]Argoverse Scene Flow [3]nuScenes Scene Flow [2]
EPE3D↓Acc3DS↑Acc3DR↑Outliers↓EPE3D↓Acc3DS↑Acc3DR↑Outliers↓EPE3D↓Acc3DS↑Acc3DR↑Outliers↓
Graph prior [35]None----0.25700.25240.4760-0.28900.20120.4354-
RSF [5]None0.08500.88300.92900.2390----0.10700.71700.86200.3210
NSFP [23]None0.14200.68800.82600.38500.15900.38430.6308-0.17510.35180.63450.5270
R3DSF [12]Weak0.09400.78400.88500.31400.41600.34520.43100.5580----
FlowNet3D [28]Full0.72200.03000.12200.96500.45500.01340.06120.73600.50500.21200.10810.6200
PointPWC [49]Full0.39000.38700.55000.65300.42880.04620.21640.91990.78830.02870.13330.9410
DCA-SRSFE [18]Full0.59000.15050.33310.84850.79570.07120.14680.97990.70420.05380.11830.9766
FLOT [36]Full0.65320.15540.31300.83710.24910.09460.31260.86570.48580.08210.26690.8547
MSBRN [4]Full0.01390.97520.98470.14330.86910.24320.28540.75970.61370.23540.29240.7638
GMSF [51]Full0.19000.29620.55020.61717.27760.00360.01440.99309.42310.00340.00860.9943
Mittal et al. [32]Self0.97730.00960.05240.99360.65200.03190.11590.96210.84220.02890.10410.9615
Jiang et al. [17]Self0.49080.20520.42380.72860.25170.12360.36660.81140.47090.10340.31750.8191
OursSelf0.00780.99240.99470.13280.00930.97800.98800.13020.01850.95340.97130.1670
" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.359, + 0.892, + 0.67 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.673, + 0.895, + 0.732 + ], + "angle": 0, + "content": "Figure 5. Registration visualization results of our method (GMSF [51] + 3DSFlabelling) and baselines on the LiDAR KITTI and Argoverse datasets [3, 10]. The estimated target point cloud \\(PC_{sw}\\) is derived from warping the source point cloud \\(PC_S\\) to the target point cloud via 3D scene flow. The larger the overlap between \\(PC_{sw}\\) (blue) and the target point cloud \\(PC_T\\) (green), the higher the predicted accuracy of the scene flow. Local areas are zoomed in for better visibility. Our 3D scene flow estimation notably improves performance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.742, + 0.473, + 0.893 + ], + "angle": 0, + "content": "datasets are derived from LiDAR point clouds, object trajectories, map data, and vehicle pose. The datasets contain 212 and 310 test samples, respectively. R3DSF [12] introduces the lidarKITTI [10], which shares 142 scenes with stereoKITTI, collected via Velodyne's 64-beam LiDAR. Unlike FT3D [29] and stereoKITTI [30, 31], the point clouds from lidarKITTI are sparsely distributed. Note that LiDAR scene flow ground truths contain errors. We mitigate this by fusing the ground truth with the first point cloud to create a corrected second frame for network input, thus" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.742, + 0.68, + 0.758 + ], + "angle": 0, + "content": "avoiding evaluation errors." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Training Datasets used in previous methods: FT3D [29] and stereoKITTI [30, 31] are the frequently used datasets for training previous 3D scene flow models [4, 28, 36, 49, 51]. FT3D consists of 19,640 training pairs, while stereoKITTI [30, 31] contains 142 dense point clouds, with the first 100 frames used for model fine-tuning in some works [23, 32]. Some works [23, 28, 32, 35, 49] train their models on 2,691 pairs of Argoverse [3] data and 1,513 pairs of nuScenes [2] data, with 3D scene flow annotations fol" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "15178" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.472, + 0.187 + ], + "angle": 0, + "content": "Table 2. The comparative results between our method and baseline. “↑” signifies accuracy enhancement. In real-world LiDAR scenarios, our method markedly improves the 3D flow estimation accuracy across three datasets [2, 3, 30] on the three baselines. This demonstrates that the proposed pseudo-auto-labelling framework can substantially boost the accuracy of existing methods, even without the need for ground truth." + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.192, + 0.462, + 0.427 + ], + "angle": 0, + "content": "
DatasetMethodEPE3D↓Acc3DS↑Acc3DR↑
LiDAR KITTIFLOT [36]0.65320.15540.3130
FLOT+3DSFlabelling0.0189 ↑ 97.1%0.96660.9792
MSBRN [4]0.01390.97520.9847
MSBRN+3DSFlabelling0.0123 ↑ 11.5%0.97970.9868
GMSF [51]0.19000.29620.5502
GMSF+3DSFlabelling0.0078 ↑ 95.8%0.99240.9947
ArgoverseFLOT [36]0.24910.09460.3126
FLOT+3DSFlabelling0.0107 ↑ 95.7%0.97110.9862
MSBRN [4]0.86910.24320.2854
MSBRN+3DSFlabelling0.0150 ↑ 98.3%0.94820.9601
GMSF [51]7.27760.00360.0144
GMSF+3DSFlabelling0.0093 ↑ 99.9%0.97800.9880
nuScenesFLOT [36]0.48580.08210.2669
FLOT+3DSFlabelling0.0554 ↑ 88.6%0.76010.8909
MSBRN [4]0.61370.23540.2924
MSBRN+3DSFlabelling0.0235 ↑ 96.2%0.94130.9604
GMSF [51]9.42310.00340.0086
GMSF+3DSFlabelling0.0185 ↑ 99.8%0.95340.9713
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.446, + 0.47, + 0.506 + ], + "angle": 0, + "content": "lowing the settings of the Graph prior [35]. The R3DSF [12] training set utilizes FT3D and semanticKITTI datasets [1], relying on ego-motion labels and semantic segmentation labels from semanticKITTI." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.507, + 0.471, + 0.703 + ], + "angle": 0, + "content": "Training Datasets used in our methods: Because we do not need any labels for training data, we use raw LiDAR point clouds sampled from raw data. For testing on the lidarKITTI [31], we use LiDAR point clouds from sequences 00 to 09 of the KITTI Odometry dataset [11] for auto-labelling and training. For testing on the nuScenes scene flow dataset [2], we randomly sample 50,000 pairs of LiDAR point clouds from the 350,000 LiDAR point clouds in the nuScenes sweeps dataset [2]. For testing on the Argoverse scene flow Dataset [3], we use the LiDAR point clouds from sequences 01 to 05 of the Argoverse 2 Sensor Dataset [3] for auto-labelling and training. In the selection of training data, we exclude the test scenes." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.712, + 0.295, + 0.728 + ], + "angle": 0, + "content": "4.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.47, + 0.903 + ], + "angle": 0, + "content": "The effectiveness of the proposed auto-labelling framework is demonstrated using three prominent deep learning models: FLOT [36], MSBRN [4], and GMSF [51]. These models use optimal transport, coarse-to-fine strategies, and transformer architectures respectively. Hyperparameters consistent with the original networks are employed during the training process. The input point clouds, from which ground points have been filtered, are randomly sampled to incorporate 8192 points. The LiDAR point cloud data from KITTI [10] is confined to the front view perspective, maintaining consistency with previous studies [12]." + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.089, + 0.892, + 0.118 + ], + "angle": 0, + "content": "Table 3. Model comparison on the Argoverse dataset [3]. 'M' represents millions of parameters, and time is in milliseconds." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.121, + 0.89, + 0.254 + ], + "angle": 0, + "content": "
MethodSup.EPE3D↓Acc3DS↑Acc3DR↑Time↓Params.↓
PointPWC [49]Full0.42880.04620.2164147 ms7.7 M
PV-RAFT [47]Full10.7450.02000.0100169 ms-
R3DSF [12]Weak0.41600.34520.4310113 ms8.0 M
FlowStep3D [19]Self0.84500.01000.0800729 ms-
NSFP [23]None0.15900.38430.63082864 ms-
Fast-NSF [24]None0.11800.69930.8355124 ms-
MBNSF [41]None0.05100.79360.92375000+ ms-
MSBRN+3DSFlabellingSelf0.01500.94820.9601341 ms3.5 M
GMSF+3DSFlabellingSelf0.00930.97800.9880251 ms6.0 M
FLOT+3DSFlabellingSelf0.01070.97110.986278 ms0.1 M
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.275, + 0.892, + 0.32 + ], + "angle": 0, + "content": "Furthermore, we utilize four scene flow evaluation metrics [28, 36, 49, 51]: Average Endpoint Error (EPE3D), ACC3DS, ACC3DR, and Outliers." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.33, + 0.694, + 0.346 + ], + "angle": 0, + "content": "4.3.Quantitative Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.354, + 0.892, + 0.61 + ], + "angle": 0, + "content": "The experimental results are presented in Table 1. We list the best-performing optimized [5, 12, 23, 35], self-supervised [17, 32], and supervised [18, 28, 49] models in the table. Our method achieves excellent performance on all datasets [2, 3, 10] and metrics. Particularly, compared to the baselines [51], there is an order of magnitude reduction in EPE3D on most datasets. The proposed auto-labelling method generates effective scene flow labels, perfectly simulating the rigid motion of various objects in the real world. The designed global-local data augmentation further expands the 3D scene flow labels. As a result, our method significantly outperforms other methods. We have also applied this plug-and-play auto-labelling framework for 3D scene flow (3DSFlabelling) to three existing models, as demonstrated in Table 2. The proposed method significantly enhances the accuracy of 3D scene flow estimation in these models [4, 36, 51]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.611, + 0.892, + 0.792 + ], + "angle": 0, + "content": "Moreover, many existing works utilize a large number of model parameters [12, 47, 49] or adopt optimization methods [23, 24, 41] during testing for a more accurate estimation of 3D scene flow. These methods are highly time-consuming, and cannot ensure accuracy when reducing model parameters. Our proposed 3DSFlabelling effectively addresses this challenge. In Table 3, by using the small-parameter model FLOT (iter=1) [36] combined with our auto-labelling framework, we surpass all current supervised, unsupervised, weakly supervised, and optimized methods. This strongly validates the effectiveness of generating real-world labels in solving the challenges." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.802, + 0.637, + 0.817 + ], + "angle": 0, + "content": "4.4. Visualization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Fig. 5 visualizes the precision of our method and others on two datasets [3, 31]. FLOT [36], with its mathematically optimal transport approach to matching point clouds, exhibits superior generalization. MSBRN [4], leveraging a multi-scale bidirectional recurrent network, robustly esti" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15179" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.09, + 0.246, + 0.101 + ], + "angle": 0, + "content": "Scene 250 in nuScenes" + }, + { + "type": "image_caption", + "bbox": [ + 0.298, + 0.09, + 0.428, + 0.101 + ], + "angle": 0, + "content": "Scene 115 in nuScenes" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.102, + 0.27, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.102, + 0.457, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.183, + 0.27, + 0.264 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.183, + 0.457, + 0.264 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.265, + 0.269, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.265, + 0.457, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.346, + 0.269, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.346, + 0.457, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.446, + 0.47, + 0.53 + ], + "angle": 0, + "content": "Figure 6. Error visualizing of our method (GMSF+3DSFlabelling) and baselines on the nuScenes dataset [2]. Using 3D EndPoint Error (EPE3D) as the metric, we categorize the error into six levels. Combining GMSF [51] with our proposed 3DSFlabelling, we manage to keep the EPE3D for most points within 0.02 meters, clearly outperforming other methods largely." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.545, + 0.47, + 0.601 + ], + "angle": 0, + "content": "Table 4. Generalization comparison experiment. \"A\", \"N\", and \"K\" represent the Argoverse [3], nuScenes [2], and KITTI [10] datasets. \\(\\langle \\sim \\rangle\\) representing a model trained on the dataset on the left and directly evaluated on another new dataset on the right." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.605, + 0.477, + 0.706 + ], + "angle": 0, + "content": "
MethodSup.A~→NN~→AA~→KN~→K
EPE3DAcc3DSEPE3DAcc3DSEPE3DAcc3DSEPE3DAcc3DS
PointPWC [49]Self0.59110.08440.70430.02810.86320.01190.93070.0027
RigidFlow [12]Self0.11350.34450.39910.01520.36450.21180.50420.0141
MSBRN [4]Full0.53090.00550.37610.00980.60360.00560.49260.0081
GMSF [51]Full0.03340.90370.30780.12780.04420.87640.05740.8135
OursSelf0.01150.96930.02640.91920.04140.90200.02080.9595
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.734, + 0.468, + 0.884 + ], + "angle": 0, + "content": "mates 3D scene flow on KITTI. GMSF [51] utilizes a transformer architecture for powerful fitting learning, but it lacks cross-domain generalization. The proposed method consistently shows better alignment between predicted and target point clouds across all scenes. Additionally, a visualization of the scene flow error on the nuScenes dataset is presented in Fig. 6. In two randomly selected test scenes, our method keeps the scene flow EPE3D mostly within \\(0.02m\\), clearly outperforming other baselines. More visual comparisons will be presented in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Table 4 provides quantitative results, demonstrating the" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.09, + 0.892, + 0.145 + ], + "angle": 0, + "content": "Table 5. Ablation study of 3D scene flow data augmentation. \"No Aug\" and \"Trad. Aug\" represents no data augmentation and traditional data augmentation [49], respectively. Our data augmentation method has a very positive impact on the model." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.15, + 0.89, + 0.273 + ], + "angle": 0, + "content": "
ModelData Augmentation MethodsKITTIArgoversenuScenes
No Aug Trad. AugOur AugEPE3D ACC3DSEPE3D ACC3DSEPE3D ACC3DSEPE3D ACC3DS
Ours (FLOT)--0.06010.72910.04920.80150.73640.6642
--0.05400.76220.04300.86790.06100.7417
--0.01890.96660.01070.97110.05540.7601
Ours (MSBRN)--0.01310.97810.01800.94110.07970.8510
--0.01290.97900.01770.94270.07930.8547
--0.01230.97970.01500.94820.02350.9413
Ours (GMSF)--0.01030.99010.01390.96370.02130.9468
--0.00810.99180.01370.96630.02120.9473
--0.00780.99240.00930.97800.01850.9534
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.295, + 0.892, + 0.431 + ], + "angle": 0, + "content": "generalization of our 3DSFlabelling combined with the existing method (GMSF [51]) on new datasets. For instance, we train a model on the Argoverse dataset and directly evaluate it on the nuScenes dataset. These two datasets belong to different domains, posing a domain generalization problem. The results in Table 4 indicate that our framework performs exceptionally well on the new dataset, consistently achieving an EPE3D of less than \\(5cm\\), and even reaching an average endpoint error of less than \\(2cm\\)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.438, + 0.654, + 0.455 + ], + "angle": 0, + "content": "4.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.462, + 0.892, + 0.628 + ], + "angle": 0, + "content": "This section explores the advantages of global-local data augmentation. In Table 5, we compare existing 3D scene flow data augmentation [49] with our proposed global-local data augmentation method. Our augmentation strategy shows significant enhancement in all evaluation metrics. This is attributed to the effective simulation of various motion patterns in autonomous driving by global-local data augmentation. The introduction of various motion transformations excellently utilizes the limited training data to extend a variety of 3D scene flow styles. More ablation studies are referring to the supplement material." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.64, + 0.62, + 0.656 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.665, + 0.892, + 0.846 + ], + "angle": 0, + "content": "We package 3D point clouds into boxes with different motion attributes. By optimizing the motion parameters for each box and warping the source point cloud into the target point cloud, we create pseudo 3D scene flow labels. We also design a global-local data augmentation method, introducing various scene motion patterns and significantly increasing the diversity and quantity of 3D scene flow labels. Tests on multiple real-world datasets show that our 3D scene flow auto-labelling significantly enhances the performance of existing models. Importantly, this approach eliminates the need for 3D scene flow estimation models to depend on manually annotated 3D scene flow labels." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.852, + 0.688, + 0.869 + ], + "angle": 0, + "content": "6. Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.872, + 0.891, + 0.888 + ], + "angle": 0, + "content": "This work was supported by PhiGent Robotics." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "15180" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.197 + ], + "angle": 0, + "content": "[1] Jens Behley, Martin Garbade, Andres Milioto, Jan Quenzel, Sven Behnke, Cyril Stachniss, and Jurgen Gall. Semantickitti: A dataset for semantic scene understanding of lidar sequences. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9297-9307, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.2, + 0.472, + 0.282 + ], + "angle": 0, + "content": "[2] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.284, + 0.472, + 0.367 + ], + "angle": 0, + "content": "[3] Ming-Fang Chang, John Lambert, Patsorn Sangkloy, Jagjeet Singh, Slawomir Bak, Andrew Hartnett, De Wang, Peter Carr, Simon Lucey, Deva Ramanan, et al. Argoverse: 3d tracking and forecasting with rich maps. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8748-8757, 2019. 1, 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.368, + 0.47, + 0.436 + ], + "angle": 0, + "content": "[4] Wencan Cheng and Jong Hwan Ko. Multi-scale bidirectional recurrent network with hybrid correlation for point cloud based scene flow estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10041-10050, 2023. 1, 2, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.438, + 0.471, + 0.492 + ], + "angle": 0, + "content": "[5] David Deng and Avideh Zakhor. Rsf: Optimizing rigid scene flow from 3d point clouds without labels. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1277-1286, 2023. 2, 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.494, + 0.469, + 0.548 + ], + "angle": 0, + "content": "[6] Fangqiang Ding, Zhijun Pan, Yimin Deng, Jianning Deng, and Chris Xiaoxuan Lu. Self-supervised scene flow estimation with 4-d automotive radar. IEEE Robotics and Automation Letters, 7(3):8233-8240, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.551, + 0.469, + 0.619 + ], + "angle": 0, + "content": "[7] Guanting Dong, Yueyi Zhang, Hanlin Li, Xiaoyan Sun, and Zhiwei Xiong. Exploiting rigidity constraints for lidar scene flow estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12776-12785, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.621, + 0.469, + 0.703 + ], + "angle": 0, + "content": "[8] Emeç Erçelik, Ekim Yurtsever, Mingyu Liu, Zhijie Yang, Hanzhen Zhang, Pinar Topçam, Maximilian Listl, Yilmaz Kaan Cayli, and Alois Knoll. 3d object detection with a self-supervised lidar scene flow backbone. In European Conference on Computer Vision, pages 247-265. Springer, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.705, + 0.469, + 0.759 + ], + "angle": 0, + "content": "[9] Jingyun Fu, Zhiyu Xiang, Chengyu Qiao, and Tingming Bai. Pt-flownet: Scene flow estimation on point clouds with point transformer. IEEE Robotics and Automation Letters, 8(5): 2566-2573, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.469, + 0.815 + ], + "angle": 0, + "content": "[10] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In 2012 IEEE conference on computer vision and pattern recognition, pages 3354-3361. IEEE, 2012. 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[11] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. The International Journal of Robotics Research, 32(11):1231-1237, 2013. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.902 + ], + "angle": 0, + "content": "[12] Zan Gojcic, Or Litany, Andreas Wieser, Leonidas J Guibas, and Tolga Birdal. Weakly supervised learning of rigid 3d" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "scene flow. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5692-5703, 2021. 3, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[13] Xiuye Gu, Yijie Wang, Chongruo Wu, Yong Jae Lee, and Panqu Wang. Hplflownet: Hierarchical permutohedral lattice flownet for scene flow estimation on large-scale point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3254-3263, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.205, + 0.892, + 0.26 + ], + "angle": 0, + "content": "[14] Shengyu Huang, Zan Gojcic, Jiahui Huang, Andreas Wieser, and Konrad Schindler. Dynamic 3d scene analysis by point cloud accumulation. In European Conference on Computer Vision, pages 674-690. Springer, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.261, + 0.892, + 0.342 + ], + "angle": 0, + "content": "[15] Hafsa Iqbal, Abdulla Al-Kaff, Pablo Marin, Lucio Marcenaro, David Martin Gomez, and Carlo Regazzoni. Detection of abnormal motion by estimating scene flows of point clouds for autonomous driving. In 2021 IEEE International Intelligent Transportation Systems Conference (ITSC), pages 2788-2793. IEEE, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.344, + 0.892, + 0.399 + ], + "angle": 0, + "content": "[16] Chaokang Jiang, Guangming Wang, Jinxing Wu, Yanzi Miao, and Hesheng Wang. Ffpa-net: Efficient feature fusion with projection awareness for 3d object detection. arXiv preprint arXiv:2209.07419, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.4, + 0.892, + 0.455 + ], + "angle": 0, + "content": "[17] Chaokang Jiang, Guangming Wang, Yanzi Miao, and Hesheng Wang. 3-d scene flow estimation on pseudo-lidar: Bridging the gap on estimating point motion. IEEE Transactions on Industrial Informatics, 19(6):7346-7354, 2023. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.456, + 0.892, + 0.537 + ], + "angle": 0, + "content": "[18] Zhao Jin, Yinjie Lei, Naveed Akhtar, Haifeng Li, and Munawar Hayat. Deformation and correspondence aware unsupervised synthetic-to-real scene flow estimation for point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7233-7243, 2022. 1, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.539, + 0.892, + 0.607 + ], + "angle": 0, + "content": "[19] Yair Kittenplon, Yonina C Eldar, and Dan Raviv. Flowstep3d: Model unrolling for self-supervised scene flow estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4114-4123, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.609, + 0.892, + 0.678 + ], + "angle": 0, + "content": "[20] Itai Lang, Dror Aiger, Forrester Cole, Shai Avidan, and Michael Rubinstein. Scoop: Self-supervised correspondence and optimization-based scene flow. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5281-5290, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.679, + 0.892, + 0.734 + ], + "angle": 0, + "content": "[21] Bing Li, Cheng Zheng, Silvio Giancola, and Bernard Ghanem. Sctn: Sparse convolution-transformer network for scene flow estimation. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1254–1262, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.735, + 0.892, + 0.803 + ], + "angle": 0, + "content": "[22] Ruibo Li, Chi Zhang, Guosheng Lin, Zhe Wang, and Chunhua Shen. Rigidflow: Self-supervised scene flow learning on point clouds by local rigidity prior. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16959-16968, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.804, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[23] Xueqian Li, Jhony Kaesemodel Pontes, and Simon Lucey. Neural scene flow prior. Advances in Neural Information Processing Systems, 34:7838-7851, 2021. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[24] Xueqian Li, Jianqiao Zheng, Francesco Ferroni, Jhony Kaesemodel Pontes, and Simon Lucey. Fast neural scene flow. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 9878-9890, 2023. 1, 3, 7" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "15181" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[25] Jiuming Liu, Guangming Wang, Chaokang Jiang, Zhe Liu, and Hesheng Wang. Translo: A window-based masked point transformer framework for large-scale lidar odometry. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1683-1691, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.164, + 0.472, + 0.245 + ], + "angle": 0, + "content": "[26] Jiuming Liu, Guangming Wang, Zhe Liu, Chaokang Jiang, Marc Pollefeys, and Hesheng Wang. Regformer: an efficient projection-aware transformer network for large-scale point cloud registration. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8451-8460, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.249, + 0.472, + 0.317 + ], + "angle": 0, + "content": "[27] Jiuming Liu, Guangming Wang, Weicai Ye, Chaokang Jiang, Jinru Han, Zhe Liu, Guofeng Zhang, Dalong Du, and Hesheng Wang. Difflow3d: Toward robust uncertainty-aware scene flow estimation with diffusion model. arXiv preprint arXiv:2311.17456, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.32, + 0.47, + 0.375 + ], + "angle": 0, + "content": "[28] Xingyu Liu, Charles R Qi, and Leonidas J Guibas. Flownet3d: Learning scene flow in 3d point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 529-537, 2019. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.377, + 0.47, + 0.459 + ], + "angle": 0, + "content": "[29] Nikolaus Mayer, Eddy Ilg, Philip Hausser, Philipp Fischer, Daniel Cremers, Alexey Dosovitskiy, and Thomas Brox. A large dataset to train convolutional networks for disparity, optical flow, and scene flow estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4040-4048, 2016. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.462, + 0.472, + 0.516 + ], + "angle": 0, + "content": "[30] Moritz Menze, Christian Heipke, and Andreas Geiger. Joint 3d estimation of vehicles and scene flow. ISPRS annals of the photogrammetry, remote sensing and spatial information sciences, 2:427-434, 2015. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.519, + 0.47, + 0.559 + ], + "angle": 0, + "content": "[31] Moritz Menze, Christian Heipke, and Andreas Geiger. Object scene flow. ISPRS Journal of Photogrammetry and Remote Sensing, 140:60-76, 2018. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.562, + 0.47, + 0.617 + ], + "angle": 0, + "content": "[32] Himangi Mittal, Brian Okorn, and David Held. Just go with the flow: Self-supervised scene flow estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11177-11185, 2020. 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.619, + 0.47, + 0.687 + ], + "angle": 0, + "content": "[33] Mahyar Najibi, Jingwei Ji, Yin Zhou, Charles R Qi, Xinchen Yan, Scott Ettinger, and Dragomir Anguelov. Motion inspired unsupervised perception and prediction in autonomous driving. In European Conference on Computer Vision, pages 424-443. Springer, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.69, + 0.47, + 0.772 + ], + "angle": 0, + "content": "[34] Chensheng Peng, Guangming Wang, Xian Wan Lo, Xinrui Wu, Chenfeng Xu, Masayoshi Tomizuka, Wei Zhan, and Hesheng Wang. Delflow: Dense efficient learning of scene flow for large-scale point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16901-16910, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.47, + 0.829 + ], + "angle": 0, + "content": "[35] Jhony Kaesemodel Pontes, James Hays, and Simon Lucey. Scene flow from point clouds with or without learning. In 2020 International Conference on 3D Vision (3DV), pages 261-270, 2020. 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.47, + 0.9 + ], + "angle": 0, + "content": "[36] Gilles Puy, Alexandre Boulch, and Renaud Marlet. Flot: Scene flow on point clouds guided by optimal transport. In ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part XXVIII 16, pages 527–544, 2020. 1, 2, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.16 + ], + "angle": 0, + "content": "[37] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.163, + 0.892, + 0.217 + ], + "angle": 0, + "content": "[38] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.219, + 0.892, + 0.273 + ], + "angle": 0, + "content": "[39] Yaqi Shen, Le Hui, Jin Xie, and Jian Yang. Self-supervised 3d scene flow estimation guided by superpoints. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5271-5280, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.275, + 0.892, + 0.328 + ], + "angle": 0, + "content": "[40] Ivan Tishchenko, Sandro Lombardi, Martin R Oswald, and Marc Pollefeys. Self-supervised learning of non-rigid residual flow and ego-motion. In 2020 international conference on 3D vision (3DV), pages 150-159. IEEE, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.33, + 0.892, + 0.37 + ], + "angle": 0, + "content": "[41] Kavisha Vidanapathirana, Shin-Fang Chng, Xueqian Li, and Simon Lucey. Multi-body neural scene flow. arXiv preprint arXiv:2310.10301, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.372, + 0.892, + 0.426 + ], + "angle": 0, + "content": "[42] Guangming Wang, Xinrui Wu, Zhe Liu, and Hesheng Wang. Hierarchical attention learning of scene flow in 3d point clouds. IEEE Transactions on Image Processing, 30:5168-5181, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.428, + 0.892, + 0.482 + ], + "angle": 0, + "content": "[43] Guangming Wang, Yunzhe Hu, Zhe Liu, Yiyang Zhou, Masayoshi Tomizuka, Wei Zhan, and Hesheng Wang. What matters for 3d scene flow network. In European Conference on Computer Vision, pages 38-55. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.484, + 0.892, + 0.538 + ], + "angle": 0, + "content": "[44] Guangming Wang, Chaokang Jiang, Zehang Shen, Yanzi Miao, and Hesheng Wang. Sfgan: Unsupervised generative adversarial learning of 3d scene flow from the 3d scene self. Advanced Intelligent Systems, 4(4):2100197, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.54, + 0.892, + 0.593 + ], + "angle": 0, + "content": "[45] Yun Wang, Cheng Chi, and Xin Yang. Exploiting implicit rigidity constraints via weight-sharing aggregation for scene flow estimation from point clouds. arXiv preprint arXiv:2303.02454, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.596, + 0.892, + 0.65 + ], + "angle": 0, + "content": "[46] Ziyi Wang, Yi Wei, Yongming Rao, Jie Zhou, and Jiwen Lu. 3d point-voxel correlation fields for scene flow estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.652, + 0.892, + 0.718 + ], + "angle": 0, + "content": "[47] Yi Wei, Ziyi Wang, Yongming Rao, Jiwen Lu, and Jie Zhou. Pv-raft: Point-voxel correlation fields for scene flow estimation of point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6954–6963, 2021. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.721, + 0.892, + 0.789 + ], + "angle": 0, + "content": "[48] Pengxiang Wu, Siheng Chen, and Dimitris N Metaxas. Motionnet: Joint perception and motion prediction for autonomous driving based on bird's eye view maps. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11385-11395, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.79, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[49] Wenxuan Wu, Zhi Yuan Wang, Zhuwen Li, Wei Liu, and Li Fuxin. Pointpwc-net: Cost volume on point clouds for (self-) supervised scene flow estimation. In European Conference on Computer Vision, pages 88-107, 2020. 2, 3, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.847, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[50] Yi Zhang, Yuwen Ye, Zhiyu Xiang, and Jiaqi Gu. Sdp-net: Scene flow based real-time object detection and prediction from sequential 3d point clouds. In Proceedings of the Asian Conference on Computer Vision, 2020. 1" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "15182" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[51] Yushan Zhang, Johan Edstedt, Bastian Wandt, Per-Erik Forssén, Maria Magnusson, and Michael Felsberg. Gmsf: Global matching scene flow. arXiv preprint arXiv:2305.17432, 2023. 1, 2, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.469, + 0.19 + ], + "angle": 0, + "content": "[52] Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Open3d: A modern library for 3d data processing. arXiv preprint arXiv:1801.09847, 2018. 3" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.092, + 0.47, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.956 + ], + "angle": 0, + "content": "15183" + } + ] +] \ No newline at end of file diff --git a/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/128911ab-0f2f-4697-8895-080e5b45c36b_origin.pdf b/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/128911ab-0f2f-4697-8895-080e5b45c36b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..56e6942307d82e51a791cede14b47e134fc855b8 --- /dev/null +++ b/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/128911ab-0f2f-4697-8895-080e5b45c36b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2946cb1b933b38a4ee7c564b330bb88100bdae10b1256a03ae337e44125f26c1 +size 5190756 diff --git a/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/full.md b/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/full.md new file mode 100644 index 0000000000000000000000000000000000000000..6d41e8f024f1cbb0dc91dc4adab8f3d5cf41e43c --- /dev/null +++ b/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/full.md @@ -0,0 +1,297 @@ +# 3DSFLabelling: Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling + +Chaokang Jiang $^{1}$ , Guangming Wang $^{2}$ , Jiuming Liu $^{3}$ , Hesheng Wang $^{3}$ , Zhuang Ma $^{1}$ , Zhenqiang Liu $^{1}$ , Zhujin Liang $^{1}$ , Yi Shan $^{1}$ , Dalong Du $^{1\dagger}$ + +1PhiGent Robotics, 2University of Cambridge, 3Shanghai Jiaotong University + +ts20060079a31@cumt.edu.cn, gw462@cam.ac.uk, {liujiuming, wanghesheng}@sjtu.edu.cn, mazhuang097@outlook.com, {zhenqiang.liu, zhujin.liang, yi.shan, dalong.du}@phigent.ai jiangchaokang.github.io/3DSFLabelling-Page + +# Abstract + +Learning 3D scene flow from LiDAR point clouds presents significant difficulties, including poor generalization from synthetic datasets to real scenes, scarcity of real-world 3D labels, and poor performance on real sparse LiDAR point clouds. We present a novel approach from the perspective of auto-labelling, aiming to generate a large number of 3D scene flow pseudo labels for real-world LiDAR point clouds. Specifically, we employ the assumption of rigid body motion to simulate potential object-level rigid movements in autonomous driving scenarios. By updating different motion attributes for multiple anchor boxes, the rigid motion decomposition is obtained for the whole scene. Furthermore, we developed a novel 3D scene flow data augmentation method for global and local motion. By perfectly synthesizing target point clouds based on augmented motion parameters, we easily obtain lots of 3D scene flow labels in point clouds highly consistent with real scenarios. On multiple real-world datasets including LiDAR KITTI, nuScenes, and Argoverse, our method outperforms all previous supervised and unsupervised methods without requiring manual labelling. Impressively, our method achieves a tenfold reduction in EPE3D metric on the LiDAR KITTI dataset, reducing it from $0.190m$ to a mere $0.008m$ error. + +# 1. Introduction + +3D scene flow estimation through deducing per-point motion filed from consecutive frames of point clouds, serves a critical role across various applications, encompassing motion prediction [33, 48], anomaly motion detection [15], 3D object detection [8, 16, 50], and dynamic point cloud accumulation [14]. With the advancing of deep learning on point clouds [37, 38], many works [4, 9, 18, 27, 36, 39, 51] have developed the learning-based + +![](images/19952b9b470f368f6e4bc0b9866aded47f2609d58f00c7a451bae351021bd4f4.jpg) +Figure 1. The proposed 3D scene flow pseudo-auto-labelling framework. Given point clouds and initial bounding boxes, both global and local motion parameters are iteratively optimized. Diverse motion patterns are augmented by randomly adjusting these motion parameters, thereby creating a diverse and realistic set of motion labels for the training of 3D scene flow estimation models. + +methods to estimate per-point motion from 3D point clouds. Some state-of-the-art methods [4, 39, 51] have reduced the average 3D Endpoint Error (EPE3D) to a few centimetres on the KITTI Scene Flow dataset (stereoKITTI) [30, 31]. However, due to the scarcity of scene flow labels, these methods rely heavily on synthetic datasets such as FlyingThings3D (FT3D) [29] for network training. + +When evaluated on the stereoKITTI dataset [30, 31], PV-RAFT [47] demonstrates an average EPE3D of just $0.056m$ . However, when evaluated on the Argoverse dataset [3], the EPE3D metric astonishingly exceeds $10m$ [24]. Therefore, learning 3D scene flow on synthetic dataset [29] has a large gap with real-world application. Jin et al. [18] recently introduce a new synthetic dataset, GTA-SF, simulating LiDAR scans for autonomous driving. They propose a teacher-student domain adaptation framework to reduce the gap between synthetic and real datasets and improve some performance of 3D scene flow estimation. However, their performance is still poor in + +![](images/a3238b09d16c6cfed576ad678b98173ea1569ef35c043a7d0fc9140d4eb15084.jpg) +Figure 2. The accuracy improvement after integrating our proposed pseudo-auto-labelling method. Models trained on synthetic data performance poorly in 3D scene flow estimation for LiDAR-based autonomous driving. Our proposed 3D pseudo-auto-labelling method improves accuracy, reaching an EPE3D below $2cm$ across datasets [2, 3, 31]. + +real-world LiDAR data because of ideal sensor models and lack of scene variety. Ideally, models should learn from real sensor data in the autonomous driving field. However, labelling each point's 3D motion vector for the 3D scene flow task is extremely costly. This has driven many works [6, 22, 28, 32, 39, 44] towards unsupervised or self-supervised learning of 3D scene flow. Although these methods have achieved reasonable accuracy, they still fall behind supervised methods, highlighting the importance of real sensor data and corresponding 3D scene flow labels. + +In this work, we address three key challenges in the field of autonomous driving: the reliance on synthetic datasets that still have a poor generalization with real-world scenarios, the scarcity of scene flow labels in actual driving scenes, and the poor performance of existing 3D scene flow estimation networks on real LiDAR data. Inspired by the rigid motion assumptions in RigidFlow [22] and RSF [5], we propose a novel scene flow auto-labelling approach that leverages the characteristics of rigid motion prevalent in autonomous driving scenarios (Fig. 1). Specifically, we utilize 3D anchor boxes to segment 3D objects in point clouds. The attributes of each object-level box are not only position and size but also rotation, translation, motion status, and normal vector attributes. By leveraging the constrained loss functions for the box parameters and inter-frame association, we optimize the attributes of the boxes, subsequently combining these parameters with the source point cloud to produce a realistic target point cloud. Importantly, the generated target point cloud maintains a one-to-one correspondence with the source point cloud, enabling the efficient generation of pseudo 3D scene flow labels. + +To capture a more diverse range of motion patterns, we introduce a novel data augmentation strategy for 3D scene flow auto-labelling. Utilizing the attributes of each box, we simulate the rotations, translations, and motion status + +of both the ego vehicle and surrounding environment by adding Gaussian noise to these attributes. Consequently, we obtain numerous 3D scene flow labels with diverse motions that closely resemble real-world scenarios, furnishing the neural network with rich real training data and significantly improving the generalization capabilities of learning-based methods. Experimental results validate that our pseudo-label generation strategy consistently achieves state-of-the-art scene flow estimation results across various models [4, 36, 51] and datasets [2, 3, 30] (Fig. 2). + +In summary, our contributions are as follows: + +- We propose a new framework for the automatic labelling of 3D scene flow pseudo-labels, significantly enhancing the accuracy of current scene flow estimation models, and effectively addressing the scarcity of 3D flow labels in autonomous driving. +- We propose a universal 3D box optimization method with multiple motion attributes. Building upon this, we further introduce a plug-and-play 3D scene flow augmentation module with global-local motions and motion status. This allows for flexible motion adjustment of ego-motion and dynamic environments, setting a new benchmark for scene flow data augmentation. +- Our method achieves state-of-the-art performance on KITTI, nuScenes, and Argoverse LiDAR datasets. Impressively, our approach surpasses all supervised and unsupervised methods without requiring any synthesising data and manual scene flow labels. + +# 2. Related Work + +# 2.1. Supervised 3D Scene Flow Learning + +In recent years, the performance of methods [28, 34, 42] for 3D scene flow based on point cloud deep learning has surpassed traditional methods. FlowNet3D [28] pioneers an end-to-end approach to learning 3D scene flow from point clouds. Some works, such as HALFlow [13], 3DFlow [43], PointPWC [49], and WSAFlowNet [45], utilize PWC structures to learn 3D scene flow in a coarse-to-fine manner. Other methods address the disorderliness of points by voxelizing point clouds and using sparse convolution or voxel correlation fields to learn 3D scene flow, such as PV-RAFT [47], DPV-RAFT [46], and SCTN [21]. Additional work refines the estimated scene flow through iterative procedures. MSBRN [4] proposes bidirectional gated recurrent units for iteratively estimating scene flow. GMSF [51] and PT-FlowNet [9] introduce point cloud transformers into 3D scene flow estimation networks. These supervised learning methods for 3D scene flow heavily rely on ground truth and are all trained on the FT3D dataset [29] and evaluated on stereoKITTI [30, 31] for network generalization test. + +# 2.2. Unsupervised 3D Scene Flow Learning + +JGwF [32] and PointPWC [49] initially propose several self-supervised learning losses such as cycle consistency loss and chamfer loss. EgoFlow [40] distinguishes 3D scene flow into ego-motion flow and remaining non-rigid flow, achieving self-supervised learning based on temporal consistency. SFGAN [44] introduces generative adversarial concepts into self-supervised learning for 3D scene flow. Recently, works like R3DSF [12], RigidFlow [22], and LiDARSceneFlow [7] greatly improve the accuracy of 3D scene flow estimation by introducing local or object-level rigidity constraints. RigidFlow [22] explicitly enforces rigid alignment within super-voxel regions by decomposing the source point cloud into multiple supervoxels. R3DSF [12] separately considers background and foreground object-level 3D scene flow, relying on segmentation and odometry tasks [25, 26]. + +# 2.3. 3D Scene Flow Optimization + +3D scene flow optimization techniques have demonstrated remarkable generalization capabilities, attracting a significant amount of academic research recently. Graph prior [35] optimizes scene flow to be as smooth as possible by using the Laplacian of point clouds. Some techniques introduce neural networks to optimize 3D scene flow. NSFP [23] introduces a novel implicit regularizer, the Neural Scene Flow Prior, which primarily depends on runtime optimization and robust regularization. RSF [5] combines global ego-motion with object-specific rigid movements to optimize 3D bounding box parameters and compute scene flow. FastNSF [24] also adopts neural scene flow prior, and it shows more advantages in dealing with dense LiDAR points compared to learning methods. SCOOP [20], in the runtime phase, directly optimizes the flow refinement module using self-supervised objectives. Although optimization-based approaches for 3D scene flow estimation have demonstrated impressive accuracy, they typically involve high computational costs. + +# 3. 3DSFLabelling + +3D scene flow estimation infers the 3D flow, $SF_{pred} \in \mathbb{R}^{3 \times N_1}$ from the source point cloud $PC_S \in \mathbb{R}^{3 \times N_1}$ and the target point cloud $PC_T \in \mathbb{R}^{3 \times N_2}$ for each point in the source point. Previous self-supervised learning methods [32, 49] typically use the estimated 3D motion vector $SF_{pred}$ to warp the source point cloud $PC_S$ to the target point cloud $PC_{Sw}$ . By comparing the difference between $PC_{Sw}$ and $PC_T$ , a supervisory signal is generated. + +In contrast with previous self-supervised learning methods, we propose bounding box element optimization to obtain the boxes and the box motion parameters from raw unlabelled point cloud data. Then, we use object-box-level + +motion parameters and global motion parameters to warp each box's points and the whole point cloud to the target point cloud, generating corresponding pseudo 3D scene flow labels. During the warping process of each object box, we propose augmenting the motion attributes of each object and the whole scene. This diversity assists the network in capturing a broader range of motion behaviours. + +# 3.1. Prerequisites + +Apart from the two input point clouds, we do not require any extra labels, such as object-level tracking and semantic information, or vehicle ego-motion labels. To reinforce the geometric constraints in the pseudo label generation module, we employ Open3d [52] to generate coarse per-point normals. Despite these normals not being perfectly accurate, they are readily obtainable and can provide useful geometric constraints. Finally, we establish initial 3D anchor boxes with specific centers $(x,y,z)$ , width $w$ , length $l$ , height $h$ , and rotation angle $\theta$ , in accordance with the range of input points. As depicted in Fig. 3, the inputs of our model consist of the initial anchor box set, $PC_{S}$ , $PC_{T}$ , and point cloud normals $N_{S}$ . + +# 3.2. Motion Parameter Optimization Module + +As shown in Fig. 3, we present the process of simulating the motion of point clouds in actual autonomous driving by updating four sets of parameters: differentiable bounding boxes $\Phi = [c,s,\theta]$ , global motion parameters $\Theta = [R_{ego},t_{ego}]$ , motion parameters for each box $[R_{perbox},t_{perbox}]$ , and motion probability $P_M$ for each box. The variables $c$ , $s$ , and $\theta$ represent the center coordinates, size, and orientation of the 3D box, respectively. + +Inspired by RSF [5], we use the motion of object-level bounding boxes to present the point-wise 3D motion and make the step-like boxes differentiable through sigmoid approximation. By transforming the individual points to the bounding boxes, we introduce an object-level perception of the scene, enabling a more natural capture of rigid motion. This method proves advantageous in autonomous driving scenarios, where most objects predominantly exhibit rigid behaviour [12]. Additionally, in the context of autonomous driving, most scene motion is typically caused by the ego motion of the vehicle. Hence, setting global motion parameters is necessary to simulate the global consistent rigid motion of the whole scene. To discern whether the motion of each box is caused by ego-motion, we also set up a motion probability for each bounding box. + +With the initial set of four motion parameters, the source point cloud is warped to the target frame, as follows: + +$$ +P C _ {T} ^ {\Theta}, P C _ {T} ^ {\Phi} = \Omega_ {1} (\Theta , P C _ {S}), \Omega_ {2} (\Upsilon (\Phi , P C _ {S})), (1) +$$ + +where $\Theta$ represents global motion parameters. $\Phi$ represents motion parameters of each bounding box, and $\Omega_{1}$ and $\Omega_{2}$ + +![](images/47edac90fd1ac4ffa004220a61c20d6756b11dce719163b184c42dbfc5435c53.jpg) +Figure 3. The proposed learning framework of pseudo 3D scene flow automatic labelling. The input comprises 3D anchor boxes, a pair of point clouds, and their corresponding coarse normal vectors. The optimization of motion parameters primarily updates the bounding box parameters, global motion parameters, local motion parameters, and the motion probability of the boxes. The attribute parameters for boxes are updated through backward optimization from six objective functions. Once optimized, the motion parameters simulate various types of motion using a global-local data augmentation module. A single source frame point cloud, along with the augmented motion parameters, produces diverse 3D scene flow labels. These labels serve to guide the supervised neural network to learn point-wise motion. + +are background and foreground warping functions, respectively, generating the warped point clouds $PC_T^\Theta$ and $PC_T^\Phi$ . $\Upsilon$ signifies the removal of boxes with too few points. + +Based on the real target frame of point cloud and the generated target point clouds $PC_T^\Theta$ and $PC_T^\Phi$ , we define loss functions to update and optimize the box attributes. We separately calculate the background and foreground losses: + +$$ +L _ {B G} = \kappa \left(N _ {T} ^ {\Theta} \oplus P C _ {T} ^ {\Theta}, N _ {T} \oplus P C _ {T}\right) + \delta \left(P C _ {T} ^ {\Theta}, P C _ {T}\right), \tag {2} +$$ + +$$ +\begin{array}{l} L _ {F G} = \frac {1}{K _ {b o x}} \sum P _ {M} \times (\kappa \left(N _ {T} ^ {\Phi} \oplus P C _ {T} ^ {\Phi}, N _ {T} \oplus P C _ {T}\right) \\ + \delta \left(P C _ {T} ^ {\Phi}, P C _ {T}\right)), \tag {3} \\ \end{array} +$$ + +where $\kappa$ is a function calculating nearest neighbour matches between the transformed point cloud and the target point cloud. $\delta$ is a pairwise distance function with location encoding. $K_{box}$ is the number of boxes, $P_M$ is the motion probability of each box, and the term $N_T\oplus PC_T$ represents the concatenation of the target point cloud's normal and positions. As for the motion probability $P_M$ of each box: + +$$ +P _ {M} = \sigma \left(\alpha \times \left(\Omega_ {3} \left(\Phi , \gamma_ {i}\right) + \beta_ {i}\right)\right) - \alpha \times \left(\Omega_ {3} \left(\Phi , \gamma_ {i}\right) - \beta_ {i}\right), \tag {4} +$$ + +where $\sigma(x)$ represents the sigmoid function, $\alpha$ is a hyperparameter 'slope' in the sigmoid, $\beta$ represents the half size of the vector of 3D dimensions $w$ , $l$ , and $h$ of the bounding box. Coordinate values $\gamma$ in the source point cloud are + +warped to the target point cloud via motion box parameters $\Phi$ . For each dynamic box, each point's relative position to the box's centre is calculated. Higher motion probability $P_{M}$ is assigned to the points closer to the centre. A fixed hyperparameter $\alpha$ , controlling motion probability, may not effectively respond to diverse and complex autonomous driving scenarios. Therefore, we adopt an adaptive computation of $\alpha$ based on the variance of the point nearest-neighbour consistency loss from the previous generation. The variance in the nearest-neighbour consistency loss for different points in the background implies the distribution of dynamic objects in the scene. With fewer moving objects indicated by a lower variance, $\alpha$ should be adaptively reduced, tending to produce lower motion probability $P_{M}$ for points. + +In addition to $L_{BG}$ and $L_{FG}$ , we introduce box dimension regularization, heading term, and angle term to constrain the dimensions, heading, and rotation angles of the bounding boxes within a reasonable range [5]. We also introduce a mass term to ensure that there are as many points as possible within the box, making the estimated motion parameters of the box more robust [5]. + +# 3.3. Data Augmentation for 3D Flow Auto-labelling + +Existing data augmentation practices [49] often add consistent random rotations and noise offsets to the input points, which indeed yields certain benefits. However, in + +![](images/a87fff64d105338c18d0dd90d331d3992cd37f111193d4e44fff8b2f4adcd16a.jpg) +Figure 4. The proposed pseudo label generation module. With the augmented motion probability $P_{M}^{*}$ , bounding boxes are categorized into dynamic and static types. Using global and local motion parameters, the $PC_{S}$ is warped to the target point cloud $PC_{T}^{*}$ . Finally, pseudo 3D scene flow labels $SF$ are derived from the correspondence between $PC_{T}^{*}$ and $PC_{S}$ . $K_{box}$ represents the number of boxes. + +autonomous driving scenarios, there are frequently various complex motion patterns for multiple objects. To make models learn complex scene motion rules, we propose a novel data augmentation method for scene flow labelling in both global and object-level motions. Our method simulates a broad spectrum of 3D scene flow data variations, originating from ego-motion and dynamic object movement, thereby providing a promising solution to the challenge of securing abundant 3D scene flow labels. + +As in Fig. 3, random noise is applied to either global or local motion parameters respectively. We generate a random rotation angle $\alpha$ and a random unit vector $\mathbf{u}$ for the rotation direction using random noise. They are used to create the Lie algebra $\xi$ . Subsequently, the Lie algebra $\xi$ is converted into a rotation matrix $\mathbf{M}$ using the Rodrigues' rotation formula and applied to the original rotation matrix $\mathbf{R}$ to obtain a new rotation matrix $\mathbf{R}^*$ , as follows: + +$$ +\begin{array}{l} \mathbf {M} = \mathbf {I} + \sin (| \pmb {\xi} |) \frac {\pmb {\xi}}{| \pmb {\xi} |} _ {\times} + (1 - \cos (| \pmb {\xi} |)) \left(\frac {\pmb {\xi}}{| \pmb {\xi} |} _ {\times}\right) ^ {2}, \quad (5) \\ \boldsymbol {\xi} = \alpha \boldsymbol {u}, \mathbf {R} ^ {*} = \mathbf {R M}. \tag {6} \\ \end{array} +$$ + +The Lie algebra element $\xi$ , the product of scalar $\alpha$ and unit vector $\mathbf{u}$ , signifies rotation magnitude and direction, with $\alpha$ and $\mathbf{u}$ representing rotation angle and axis, respectively. $\mathbf{I}$ is identity matrix, and $\xi \times \xi$ is the antisymmetric matrix of $\xi$ . Lie algebra intuitively and conveniently represents minor $SO(3)$ group variations. Rodrigues' rotation formula, mapping from the Lie algebra to the Lie group, facilitates the transformation of angle-based noise into a form directly applicable to the rotation matrix. This transformation brings mathematical convenience, making the update of the rotation matrix concise and efficient. + +Importantly, our data augmentation targets dynamically moving objects, because persistently adding varied motion + +noise to bounding boxes perceived as static objects may disrupt original data distribution. Moreover, the translation and motion probability are also augmented. As depicted in Fig. 3, we generate noise within an appropriate range and directly add it to the translation matrix or motion probability, resulting in augmented translation and motion probability. + +# 3.4. Pseudo Label Generation for 3D Scene Flow + +The motion parameters are fed into the pseudo label generation module to obtain point-wise 3D scene flow labels. The specific process of the label generation module is shown in Fig. 4. We determine the motion state of the 3D bounding box through the motion probability $P_{M}$ : + +$$ +P C _ {T} ^ {*} = \left\{ \begin{array}{l l} P C _ {S} \times R _ {e g o} ^ {*} + t _ {e g o} ^ {*} & \text {i f} P _ {M} ^ {*} < \mathbb {J}, \\ P C _ {S} ^ {e g o} \times R _ {\text {p e r b o x}} ^ {*} + t _ {\text {p e r b o x}} ^ {*} & \text {i f} P _ {M} ^ {*} \geq \mathbb {J}. \end{array} \right. \tag {7} +$$ + +$PC_{S}^{ego}$ is the points in the dynamic box from the source point cloud, transformed through global rotation and translation. When $P_{M}$ is less than threshold $\mathbb{J}$ , the current bounding box is deemed static. Conversely, if $P_{M}$ exceeds a predefined threshold $\mathbb{J}$ , the current bounding box is considered dynamic. For static boxes, based on the existing global motion, we apply a uniform noise to all static boxes to simulate various ego-motion patterns. By adding minute noise to the motion probability $P_{M}$ for each box, we can construct various motion states and show a greater variety of scene motions. Before transforming the dynamic boxes, a prior global transformation of all points is required. For dynamic bounding boxes, we add various noises to their existing motion, generating new rotations and translations, thereby creating various motion patterns. We warp the source point cloud within each box to the target frame using the box's motion parameters, obtaining the pseudo target point cloud $PC_{T}^{*}$ . + +The generated pseudo target point cloud $PC_T^*$ and the real source frame point cloud $PC_S$ have a perfect correspondence. Therefore, the 3D scene flow labels can be easily obtained by directly subtracting $PC_S$ from $PC_T^*$ : + +$$ +S F = P C _ {T} ^ {*} - P C _ {S}. \tag {8} +$$ + +The generated scene flow labels capture various motion patterns from real autonomous driving scenes. They help the model understand and adjust to complex driving conditions. This improves the model's ability to generalize in unfamiliar real-world scenarios. + +# 4. Experiments + +# 4.1. Datasets + +Test Datasets: Graph prior [35] introduces two autonomous driving datasets, Argoverse scene flow [3] and nuScenes scene flow [2] datasets. Scene flow labels in the + +Table 1. Comparison of our method with the best-performing methods on multiple datasets [2, 3, 10] and metrics. 'None', 'Weak', 'Self', and 'Full' represent non-learning, weakly supervised, self-supervised, and supervised methods, respectively. "↑" means higher is better, and "↓" means lower is better. Our method uses GMSF [51] as a baseline and combines it with our proposed pseudo-auto-labelling framework, 3DSFlabelling. Despite the use of a supervised learning structure, no ground truth is utilized in training. + +
MethodSup.LiDAR KITTI Scene Flow [10]Argoverse Scene Flow [3]nuScenes Scene Flow [2]
EPE3D↓Acc3DS↑Acc3DR↑Outliers↓EPE3D↓Acc3DS↑Acc3DR↑Outliers↓EPE3D↓Acc3DS↑Acc3DR↑Outliers↓
Graph prior [35]None----0.25700.25240.4760-0.28900.20120.4354-
RSF [5]None0.08500.88300.92900.2390----0.10700.71700.86200.3210
NSFP [23]None0.14200.68800.82600.38500.15900.38430.6308-0.17510.35180.63450.5270
R3DSF [12]Weak0.09400.78400.88500.31400.41600.34520.43100.5580----
FlowNet3D [28]Full0.72200.03000.12200.96500.45500.01340.06120.73600.50500.21200.10810.6200
PointPWC [49]Full0.39000.38700.55000.65300.42880.04620.21640.91990.78830.02870.13330.9410
DCA-SRSFE [18]Full0.59000.15050.33310.84850.79570.07120.14680.97990.70420.05380.11830.9766
FLOT [36]Full0.65320.15540.31300.83710.24910.09460.31260.86570.48580.08210.26690.8547
MSBRN [4]Full0.01390.97520.98470.14330.86910.24320.28540.75970.61370.23540.29240.7638
GMSF [51]Full0.19000.29620.55020.61717.27760.00360.01440.99309.42310.00340.00860.9943
Mittal et al. [32]Self0.97730.00960.05240.99360.65200.03190.11590.96210.84220.02890.10410.9615
Jiang et al. [17]Self0.49080.20520.42380.72860.25170.12360.36660.81140.47090.10340.31750.8191
OursSelf0.00780.99240.99470.13280.00930.97800.98800.13020.01850.95340.97130.1670
+ +![](images/cd41548fefd727ed8f693a1c32f9c3362eadb605cfb897aae8130f1adf35331b.jpg) +Figure 5. Registration visualization results of our method (GMSF [51] + 3DSFlabelling) and baselines on the LiDAR KITTI and Argoverse datasets [3, 10]. The estimated target point cloud $PC_{sw}$ is derived from warping the source point cloud $PC_S$ to the target point cloud via 3D scene flow. The larger the overlap between $PC_{sw}$ (blue) and the target point cloud $PC_T$ (green), the higher the predicted accuracy of the scene flow. Local areas are zoomed in for better visibility. Our 3D scene flow estimation notably improves performance. + +datasets are derived from LiDAR point clouds, object trajectories, map data, and vehicle pose. The datasets contain 212 and 310 test samples, respectively. R3DSF [12] introduces the lidarKITTI [10], which shares 142 scenes with stereoKITTI, collected via Velodyne's 64-beam LiDAR. Unlike FT3D [29] and stereoKITTI [30, 31], the point clouds from lidarKITTI are sparsely distributed. Note that LiDAR scene flow ground truths contain errors. We mitigate this by fusing the ground truth with the first point cloud to create a corrected second frame for network input, thus + +avoiding evaluation errors. + +Training Datasets used in previous methods: FT3D [29] and stereoKITTI [30, 31] are the frequently used datasets for training previous 3D scene flow models [4, 28, 36, 49, 51]. FT3D consists of 19,640 training pairs, while stereoKITTI [30, 31] contains 142 dense point clouds, with the first 100 frames used for model fine-tuning in some works [23, 32]. Some works [23, 28, 32, 35, 49] train their models on 2,691 pairs of Argoverse [3] data and 1,513 pairs of nuScenes [2] data, with 3D scene flow annotations fol + +Table 2. The comparative results between our method and baseline. “↑” signifies accuracy enhancement. In real-world LiDAR scenarios, our method markedly improves the 3D flow estimation accuracy across three datasets [2, 3, 30] on the three baselines. This demonstrates that the proposed pseudo-auto-labelling framework can substantially boost the accuracy of existing methods, even without the need for ground truth. + +
DatasetMethodEPE3D↓Acc3DS↑Acc3DR↑
LiDAR KITTIFLOT [36]0.65320.15540.3130
FLOT+3DSFlabelling0.0189 ↑ 97.1%0.96660.9792
MSBRN [4]0.01390.97520.9847
MSBRN+3DSFlabelling0.0123 ↑ 11.5%0.97970.9868
GMSF [51]0.19000.29620.5502
GMSF+3DSFlabelling0.0078 ↑ 95.8%0.99240.9947
ArgoverseFLOT [36]0.24910.09460.3126
FLOT+3DSFlabelling0.0107 ↑ 95.7%0.97110.9862
MSBRN [4]0.86910.24320.2854
MSBRN+3DSFlabelling0.0150 ↑ 98.3%0.94820.9601
GMSF [51]7.27760.00360.0144
GMSF+3DSFlabelling0.0093 ↑ 99.9%0.97800.9880
nuScenesFLOT [36]0.48580.08210.2669
FLOT+3DSFlabelling0.0554 ↑ 88.6%0.76010.8909
MSBRN [4]0.61370.23540.2924
MSBRN+3DSFlabelling0.0235 ↑ 96.2%0.94130.9604
GMSF [51]9.42310.00340.0086
GMSF+3DSFlabelling0.0185 ↑ 99.8%0.95340.9713
+ +lowing the settings of the Graph prior [35]. The R3DSF [12] training set utilizes FT3D and semanticKITTI datasets [1], relying on ego-motion labels and semantic segmentation labels from semanticKITTI. + +Training Datasets used in our methods: Because we do not need any labels for training data, we use raw LiDAR point clouds sampled from raw data. For testing on the lidarKITTI [31], we use LiDAR point clouds from sequences 00 to 09 of the KITTI Odometry dataset [11] for auto-labelling and training. For testing on the nuScenes scene flow dataset [2], we randomly sample 50,000 pairs of LiDAR point clouds from the 350,000 LiDAR point clouds in the nuScenes sweeps dataset [2]. For testing on the Argoverse scene flow Dataset [3], we use the LiDAR point clouds from sequences 01 to 05 of the Argoverse 2 Sensor Dataset [3] for auto-labelling and training. In the selection of training data, we exclude the test scenes. + +# 4.2. Implementation Details + +The effectiveness of the proposed auto-labelling framework is demonstrated using three prominent deep learning models: FLOT [36], MSBRN [4], and GMSF [51]. These models use optimal transport, coarse-to-fine strategies, and transformer architectures respectively. Hyperparameters consistent with the original networks are employed during the training process. The input point clouds, from which ground points have been filtered, are randomly sampled to incorporate 8192 points. The LiDAR point cloud data from KITTI [10] is confined to the front view perspective, maintaining consistency with previous studies [12]. + +Table 3. Model comparison on the Argoverse dataset [3]. 'M' represents millions of parameters, and time is in milliseconds. + +
MethodSup.EPE3D↓Acc3DS↑Acc3DR↑Time↓Params.↓
PointPWC [49]Full0.42880.04620.2164147 ms7.7 M
PV-RAFT [47]Full10.7450.02000.0100169 ms-
R3DSF [12]Weak0.41600.34520.4310113 ms8.0 M
FlowStep3D [19]Self0.84500.01000.0800729 ms-
NSFP [23]None0.15900.38430.63082864 ms-
Fast-NSF [24]None0.11800.69930.8355124 ms-
MBNSF [41]None0.05100.79360.92375000+ ms-
MSBRN+3DSFlabellingSelf0.01500.94820.9601341 ms3.5 M
GMSF+3DSFlabellingSelf0.00930.97800.9880251 ms6.0 M
FLOT+3DSFlabellingSelf0.01070.97110.986278 ms0.1 M
+ +Furthermore, we utilize four scene flow evaluation metrics [28, 36, 49, 51]: Average Endpoint Error (EPE3D), ACC3DS, ACC3DR, and Outliers. + +# 4.3.Quantitative Results + +The experimental results are presented in Table 1. We list the best-performing optimized [5, 12, 23, 35], self-supervised [17, 32], and supervised [18, 28, 49] models in the table. Our method achieves excellent performance on all datasets [2, 3, 10] and metrics. Particularly, compared to the baselines [51], there is an order of magnitude reduction in EPE3D on most datasets. The proposed auto-labelling method generates effective scene flow labels, perfectly simulating the rigid motion of various objects in the real world. The designed global-local data augmentation further expands the 3D scene flow labels. As a result, our method significantly outperforms other methods. We have also applied this plug-and-play auto-labelling framework for 3D scene flow (3DSFlabelling) to three existing models, as demonstrated in Table 2. The proposed method significantly enhances the accuracy of 3D scene flow estimation in these models [4, 36, 51]. + +Moreover, many existing works utilize a large number of model parameters [12, 47, 49] or adopt optimization methods [23, 24, 41] during testing for a more accurate estimation of 3D scene flow. These methods are highly time-consuming, and cannot ensure accuracy when reducing model parameters. Our proposed 3DSFlabelling effectively addresses this challenge. In Table 3, by using the small-parameter model FLOT (iter=1) [36] combined with our auto-labelling framework, we surpass all current supervised, unsupervised, weakly supervised, and optimized methods. This strongly validates the effectiveness of generating real-world labels in solving the challenges. + +# 4.4. Visualization + +Fig. 5 visualizes the precision of our method and others on two datasets [3, 31]. FLOT [36], with its mathematically optimal transport approach to matching point clouds, exhibits superior generalization. MSBRN [4], leveraging a multi-scale bidirectional recurrent network, robustly esti + +![](images/169a4c7b0bf781bd620e7d9979149217888a844b7c7250aa8475f9b5c354eb68.jpg) +Scene 250 in nuScenes + +![](images/eaccdd806351786224e439bf002e1a3d6bbb47a2e2fe2fe7682b110e55a3e79e.jpg) +Scene 115 in nuScenes + +![](images/658bf3882567c680da7874e22ffa406d61c12ea540e8f220eb43d82f3bbd98c9.jpg) + +![](images/a4ade8ce38739b9be661b02065e24a52a5f13b03fbd38814c17e0ae916f9c465.jpg) + +![](images/a1c6d82a63eaac9088c503f523d3ea567c058a5441ff66e354645956101708d8.jpg) + +![](images/126431da375604bd3609427392a8d3ab019499e3cb3a54d10618186df34cf56f.jpg) + +![](images/fbfcb4a3c3a1852573cb190ac3b77585c3d495ec5990fda6b0e0a0f32c4f85b0.jpg) +Figure 6. Error visualizing of our method (GMSF+3DSFlabelling) and baselines on the nuScenes dataset [2]. Using 3D EndPoint Error (EPE3D) as the metric, we categorize the error into six levels. Combining GMSF [51] with our proposed 3DSFlabelling, we manage to keep the EPE3D for most points within 0.02 meters, clearly outperforming other methods largely. + +![](images/5b9c81c31aac9e0aa0bf244ccb08bda00a3e45e08dd8f382166efe07078dcdf6.jpg) + +Table 4. Generalization comparison experiment. "A", "N", and "K" represent the Argoverse [3], nuScenes [2], and KITTI [10] datasets. $\langle \sim \rangle$ representing a model trained on the dataset on the left and directly evaluated on another new dataset on the right. + +
MethodSup.A~→NN~→AA~→KN~→K
EPE3DAcc3DSEPE3DAcc3DSEPE3DAcc3DSEPE3DAcc3DS
PointPWC [49]Self0.59110.08440.70430.02810.86320.01190.93070.0027
RigidFlow [12]Self0.11350.34450.39910.01520.36450.21180.50420.0141
MSBRN [4]Full0.53090.00550.37610.00980.60360.00560.49260.0081
GMSF [51]Full0.03340.90370.30780.12780.04420.87640.05740.8135
OursSelf0.01150.96930.02640.91920.04140.90200.02080.9595
+ +mates 3D scene flow on KITTI. GMSF [51] utilizes a transformer architecture for powerful fitting learning, but it lacks cross-domain generalization. The proposed method consistently shows better alignment between predicted and target point clouds across all scenes. Additionally, a visualization of the scene flow error on the nuScenes dataset is presented in Fig. 6. In two randomly selected test scenes, our method keeps the scene flow EPE3D mostly within $0.02m$ , clearly outperforming other baselines. More visual comparisons will be presented in the supplementary material. + +Table 4 provides quantitative results, demonstrating the + +Table 5. Ablation study of 3D scene flow data augmentation. "No Aug" and "Trad. Aug" represents no data augmentation and traditional data augmentation [49], respectively. Our data augmentation method has a very positive impact on the model. + +
ModelData Augmentation MethodsKITTIArgoversenuScenes
No Aug Trad. AugOur AugEPE3D ACC3DSEPE3D ACC3DSEPE3D ACC3DSEPE3D ACC3DS
Ours (FLOT)--0.06010.72910.04920.80150.73640.6642
--0.05400.76220.04300.86790.06100.7417
--0.01890.96660.01070.97110.05540.7601
Ours (MSBRN)--0.01310.97810.01800.94110.07970.8510
--0.01290.97900.01770.94270.07930.8547
--0.01230.97970.01500.94820.02350.9413
Ours (GMSF)--0.01030.99010.01390.96370.02130.9468
--0.00810.99180.01370.96630.02120.9473
--0.00780.99240.00930.97800.01850.9534
+ +generalization of our 3DSFlabelling combined with the existing method (GMSF [51]) on new datasets. For instance, we train a model on the Argoverse dataset and directly evaluate it on the nuScenes dataset. These two datasets belong to different domains, posing a domain generalization problem. The results in Table 4 indicate that our framework performs exceptionally well on the new dataset, consistently achieving an EPE3D of less than $5cm$ , and even reaching an average endpoint error of less than $2cm$ . + +# 4.5. Ablation Study + +This section explores the advantages of global-local data augmentation. In Table 5, we compare existing 3D scene flow data augmentation [49] with our proposed global-local data augmentation method. Our augmentation strategy shows significant enhancement in all evaluation metrics. This is attributed to the effective simulation of various motion patterns in autonomous driving by global-local data augmentation. The introduction of various motion transformations excellently utilizes the limited training data to extend a variety of 3D scene flow styles. More ablation studies are referring to the supplement material. + +# 5. Conclusion + +We package 3D point clouds into boxes with different motion attributes. By optimizing the motion parameters for each box and warping the source point cloud into the target point cloud, we create pseudo 3D scene flow labels. We also design a global-local data augmentation method, introducing various scene motion patterns and significantly increasing the diversity and quantity of 3D scene flow labels. Tests on multiple real-world datasets show that our 3D scene flow auto-labelling significantly enhances the performance of existing models. Importantly, this approach eliminates the need for 3D scene flow estimation models to depend on manually annotated 3D scene flow labels. + +# 6. Acknowledgements + +This work was supported by PhiGent Robotics. + +# References + +[1] Jens Behley, Martin Garbade, Andres Milioto, Jan Quenzel, Sven Behnke, Cyril Stachniss, and Jurgen Gall. Semantickitti: A dataset for semantic scene understanding of lidar sequences. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9297-9307, 2019. 7 +[2] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 2, 5, 6, 7, 8 +[3] Ming-Fang Chang, John Lambert, Patsorn Sangkloy, Jagjeet Singh, Slawomir Bak, Andrew Hartnett, De Wang, Peter Carr, Simon Lucey, Deva Ramanan, et al. Argoverse: 3d tracking and forecasting with rich maps. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8748-8757, 2019. 1, 2, 5, 6, 7, 8 +[4] Wencan Cheng and Jong Hwan Ko. Multi-scale bidirectional recurrent network with hybrid correlation for point cloud based scene flow estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10041-10050, 2023. 1, 2, 6, 7, 8 +[5] David Deng and Avideh Zakhor. Rsf: Optimizing rigid scene flow from 3d point clouds without labels. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1277-1286, 2023. 2, 3, 4, 6, 7 +[6] Fangqiang Ding, Zhijun Pan, Yimin Deng, Jianning Deng, and Chris Xiaoxuan Lu. Self-supervised scene flow estimation with 4-d automotive radar. IEEE Robotics and Automation Letters, 7(3):8233-8240, 2022. 2 +[7] Guanting Dong, Yueyi Zhang, Hanlin Li, Xiaoyan Sun, and Zhiwei Xiong. Exploiting rigidity constraints for lidar scene flow estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12776-12785, 2022. 3 +[8] Emeç Erçelik, Ekim Yurtsever, Mingyu Liu, Zhijie Yang, Hanzhen Zhang, Pinar Topçam, Maximilian Listl, Yilmaz Kaan Cayli, and Alois Knoll. 3d object detection with a self-supervised lidar scene flow backbone. In European Conference on Computer Vision, pages 247-265. Springer, 2022. 1 +[9] Jingyun Fu, Zhiyu Xiang, Chengyu Qiao, and Tingming Bai. Pt-flownet: Scene flow estimation on point clouds with point transformer. IEEE Robotics and Automation Letters, 8(5): 2566-2573, 2023. 1, 2 +[10] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In 2012 IEEE conference on computer vision and pattern recognition, pages 3354-3361. IEEE, 2012. 6, 7, 8 +[11] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. The International Journal of Robotics Research, 32(11):1231-1237, 2013. 7 +[12] Zan Gojcic, Or Litany, Andreas Wieser, Leonidas J Guibas, and Tolga Birdal. Weakly supervised learning of rigid 3d + +scene flow. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5692-5703, 2021. 3, 6, 7, 8 +[13] Xiuye Gu, Yijie Wang, Chongruo Wu, Yong Jae Lee, and Panqu Wang. Hplflownet: Hierarchical permutohedral lattice flownet for scene flow estimation on large-scale point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3254-3263, 2019. 2 +[14] Shengyu Huang, Zan Gojcic, Jiahui Huang, Andreas Wieser, and Konrad Schindler. Dynamic 3d scene analysis by point cloud accumulation. In European Conference on Computer Vision, pages 674-690. Springer, 2022. 1 +[15] Hafsa Iqbal, Abdulla Al-Kaff, Pablo Marin, Lucio Marcenaro, David Martin Gomez, and Carlo Regazzoni. Detection of abnormal motion by estimating scene flows of point clouds for autonomous driving. In 2021 IEEE International Intelligent Transportation Systems Conference (ITSC), pages 2788-2793. IEEE, 2021. 1 +[16] Chaokang Jiang, Guangming Wang, Jinxing Wu, Yanzi Miao, and Hesheng Wang. Ffpa-net: Efficient feature fusion with projection awareness for 3d object detection. arXiv preprint arXiv:2209.07419, 2022. 1 +[17] Chaokang Jiang, Guangming Wang, Yanzi Miao, and Hesheng Wang. 3-d scene flow estimation on pseudo-lidar: Bridging the gap on estimating point motion. IEEE Transactions on Industrial Informatics, 19(6):7346-7354, 2023. 6, 7 +[18] Zhao Jin, Yinjie Lei, Naveed Akhtar, Haifeng Li, and Munawar Hayat. Deformation and correspondence aware unsupervised synthetic-to-real scene flow estimation for point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7233-7243, 2022. 1, 6, 7 +[19] Yair Kittenplon, Yonina C Eldar, and Dan Raviv. Flowstep3d: Model unrolling for self-supervised scene flow estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4114-4123, 2021. 7 +[20] Itai Lang, Dror Aiger, Forrester Cole, Shai Avidan, and Michael Rubinstein. Scoop: Self-supervised correspondence and optimization-based scene flow. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5281-5290, 2023. 3 +[21] Bing Li, Cheng Zheng, Silvio Giancola, and Bernard Ghanem. Sctn: Sparse convolution-transformer network for scene flow estimation. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1254–1262, 2022. 2 +[22] Ruibo Li, Chi Zhang, Guosheng Lin, Zhe Wang, and Chunhua Shen. Rigidflow: Self-supervised scene flow learning on point clouds by local rigidity prior. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16959-16968, 2022. 2, 3 +[23] Xueqian Li, Jhony Kaesemodel Pontes, and Simon Lucey. Neural scene flow prior. Advances in Neural Information Processing Systems, 34:7838-7851, 2021. 3, 6, 7 +[24] Xueqian Li, Jianqiao Zheng, Francesco Ferroni, Jhony Kaesemodel Pontes, and Simon Lucey. Fast neural scene flow. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 9878-9890, 2023. 1, 3, 7 + +[25] Jiuming Liu, Guangming Wang, Chaokang Jiang, Zhe Liu, and Hesheng Wang. Translo: A window-based masked point transformer framework for large-scale lidar odometry. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1683-1691, 2023. 3 +[26] Jiuming Liu, Guangming Wang, Zhe Liu, Chaokang Jiang, Marc Pollefeys, and Hesheng Wang. Regformer: an efficient projection-aware transformer network for large-scale point cloud registration. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8451-8460, 2023. 3 +[27] Jiuming Liu, Guangming Wang, Weicai Ye, Chaokang Jiang, Jinru Han, Zhe Liu, Guofeng Zhang, Dalong Du, and Hesheng Wang. Difflow3d: Toward robust uncertainty-aware scene flow estimation with diffusion model. arXiv preprint arXiv:2311.17456, 2023. 1 +[28] Xingyu Liu, Charles R Qi, and Leonidas J Guibas. Flownet3d: Learning scene flow in 3d point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 529-537, 2019. 2, 6, 7 +[29] Nikolaus Mayer, Eddy Ilg, Philip Hausser, Philipp Fischer, Daniel Cremers, Alexey Dosovitskiy, and Thomas Brox. A large dataset to train convolutional networks for disparity, optical flow, and scene flow estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4040-4048, 2016. 1, 2, 6 +[30] Moritz Menze, Christian Heipke, and Andreas Geiger. Joint 3d estimation of vehicles and scene flow. ISPRS annals of the photogrammetry, remote sensing and spatial information sciences, 2:427-434, 2015. 1, 2, 6, 7 +[31] Moritz Menze, Christian Heipke, and Andreas Geiger. Object scene flow. ISPRS Journal of Photogrammetry and Remote Sensing, 140:60-76, 2018. 1, 2, 6, 7 +[32] Himangi Mittal, Brian Okorn, and David Held. Just go with the flow: Self-supervised scene flow estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11177-11185, 2020. 2, 3, 6, 7 +[33] Mahyar Najibi, Jingwei Ji, Yin Zhou, Charles R Qi, Xinchen Yan, Scott Ettinger, and Dragomir Anguelov. Motion inspired unsupervised perception and prediction in autonomous driving. In European Conference on Computer Vision, pages 424-443. Springer, 2022. 1 +[34] Chensheng Peng, Guangming Wang, Xian Wan Lo, Xinrui Wu, Chenfeng Xu, Masayoshi Tomizuka, Wei Zhan, and Hesheng Wang. Delflow: Dense efficient learning of scene flow for large-scale point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16901-16910, 2023. 2 +[35] Jhony Kaesemodel Pontes, James Hays, and Simon Lucey. Scene flow from point clouds with or without learning. In 2020 International Conference on 3D Vision (3DV), pages 261-270, 2020. 3, 5, 6, 7 +[36] Gilles Puy, Alexandre Boulch, and Renaud Marlet. Flot: Scene flow on point clouds guided by optimal transport. In ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part XXVIII 16, pages 527–544, 2020. 1, 2, 6, 7 + +[37] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 1 +[38] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 1 +[39] Yaqi Shen, Le Hui, Jin Xie, and Jian Yang. Self-supervised 3d scene flow estimation guided by superpoints. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5271-5280, 2023. 1, 2 +[40] Ivan Tishchenko, Sandro Lombardi, Martin R Oswald, and Marc Pollefeys. Self-supervised learning of non-rigid residual flow and ego-motion. In 2020 international conference on 3D vision (3DV), pages 150-159. IEEE, 2020. 3 +[41] Kavisha Vidanapathirana, Shin-Fang Chng, Xueqian Li, and Simon Lucey. Multi-body neural scene flow. arXiv preprint arXiv:2310.10301, 2023. 7 +[42] Guangming Wang, Xinrui Wu, Zhe Liu, and Hesheng Wang. Hierarchical attention learning of scene flow in 3d point clouds. IEEE Transactions on Image Processing, 30:5168-5181, 2021. 2 +[43] Guangming Wang, Yunzhe Hu, Zhe Liu, Yiyang Zhou, Masayoshi Tomizuka, Wei Zhan, and Hesheng Wang. What matters for 3d scene flow network. In European Conference on Computer Vision, pages 38-55. Springer, 2022. 2 +[44] Guangming Wang, Chaokang Jiang, Zehang Shen, Yanzi Miao, and Hesheng Wang. Sfgan: Unsupervised generative adversarial learning of 3d scene flow from the 3d scene self. Advanced Intelligent Systems, 4(4):2100197, 2022. 2, 3 +[45] Yun Wang, Cheng Chi, and Xin Yang. Exploiting implicit rigidity constraints via weight-sharing aggregation for scene flow estimation from point clouds. arXiv preprint arXiv:2303.02454, 2023. 2 +[46] Ziyi Wang, Yi Wei, Yongming Rao, Jie Zhou, and Jiwen Lu. 3d point-voxel correlation fields for scene flow estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2023. 2 +[47] Yi Wei, Ziyi Wang, Yongming Rao, Jiwen Lu, and Jie Zhou. Pv-raft: Point-voxel correlation fields for scene flow estimation of point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6954–6963, 2021. 1, 2, 7 +[48] Pengxiang Wu, Siheng Chen, and Dimitris N Metaxas. Motionnet: Joint perception and motion prediction for autonomous driving based on bird's eye view maps. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11385-11395, 2020. 1 +[49] Wenxuan Wu, Zhi Yuan Wang, Zhuwen Li, Wei Liu, and Li Fuxin. Pointpwc-net: Cost volume on point clouds for (self-) supervised scene flow estimation. In European Conference on Computer Vision, pages 88-107, 2020. 2, 3, 4, 6, 7, 8 +[50] Yi Zhang, Yuwen Ye, Zhiyu Xiang, and Jiaqi Gu. Sdp-net: Scene flow based real-time object detection and prediction from sequential 3d point clouds. In Proceedings of the Asian Conference on Computer Vision, 2020. 1 + +[51] Yushan Zhang, Johan Edstedt, Bastian Wandt, Per-Erik Forssén, Maria Magnusson, and Michael Felsberg. Gmsf: Global matching scene flow. arXiv preprint arXiv:2305.17432, 2023. 1, 2, 6, 7, 8 +[52] Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Open3d: A modern library for 3d data processing. arXiv preprint arXiv:1801.09847, 2018. 3 \ No newline at end of file diff --git a/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/images.zip b/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..39902e8663f1c21c7ea38adc7d06d2ff4353af09 --- /dev/null +++ b/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ff593c4099649a3c66ba0f41faa7ad3bb8275e34d0793603561c32ab87b8344 +size 991011 diff --git a/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/layout.json b/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..900ed6747dc9e03d7eb4e2f8e18e6ba349df0477 --- /dev/null +++ b/2024/3DSFLabelling_ Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling/layout.json @@ -0,0 +1,8724 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 57, + 103, + 536, + 122 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 103, + 536, + 122 + ], + "spans": [ + { + "bbox": [ + 57, + 103, + 536, + 122 + ], + "type": "text", + "content": "3DSFLabelling: Boosting 3D Scene Flow Estimation by Pseudo Auto-labelling" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "spans": [ + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "text", + "content": "Chaokang Jiang" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "text", + "content": ", Guangming Wang" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "text", + "content": ", Jiuming Liu" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "text", + "content": ", Hesheng Wang" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "text", + "content": ", Zhuang Ma" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "text", + "content": ", Zhenqiang Liu" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "text", + "content": ", Zhujin Liang" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "text", + "content": ", Yi Shan" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "text", + "content": ", Dalong Du" + }, + { + "bbox": [ + 94, + 142, + 499, + 171 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 171, + 484, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 171, + 484, + 186 + ], + "spans": [ + { + "bbox": [ + 110, + 171, + 484, + 186 + ], + "type": "text", + "content": "1PhiGent Robotics, 2University of Cambridge, 3Shanghai Jiaotong University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 187, + 523, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 187, + 523, + 227 + ], + "spans": [ + { + "bbox": [ + 67, + 187, + 523, + 227 + ], + "type": "text", + "content": "ts20060079a31@cumt.edu.cn, gw462@cam.ac.uk, {liujiuming, wanghesheng}@sjtu.edu.cn, mazhuang097@outlook.com, {zhenqiang.liu, zhujin.liang, yi.shan, dalong.du}@phigent.ai jiangchaokang.github.io/3DSFLabelling-Page" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 255, + 192, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 255, + 192, + 268 + ], + "spans": [ + { + "bbox": [ + 143, + 255, + 192, + 268 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 280, + 290, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 280, + 290, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 280, + 290, + 555 + ], + "type": "text", + "content": "Learning 3D scene flow from LiDAR point clouds presents significant difficulties, including poor generalization from synthetic datasets to real scenes, scarcity of real-world 3D labels, and poor performance on real sparse LiDAR point clouds. We present a novel approach from the perspective of auto-labelling, aiming to generate a large number of 3D scene flow pseudo labels for real-world LiDAR point clouds. Specifically, we employ the assumption of rigid body motion to simulate potential object-level rigid movements in autonomous driving scenarios. By updating different motion attributes for multiple anchor boxes, the rigid motion decomposition is obtained for the whole scene. Furthermore, we developed a novel 3D scene flow data augmentation method for global and local motion. By perfectly synthesizing target point clouds based on augmented motion parameters, we easily obtain lots of 3D scene flow labels in point clouds highly consistent with real scenarios. On multiple real-world datasets including LiDAR KITTI, nuScenes, and Argoverse, our method outperforms all previous supervised and unsupervised methods without requiring manual labelling. Impressively, our method achieves a tenfold reduction in EPE3D metric on the LiDAR KITTI dataset, reducing it from " + }, + { + "bbox": [ + 46, + 280, + 290, + 555 + ], + "type": "inline_equation", + "content": "0.190m" + }, + { + "bbox": [ + 46, + 280, + 290, + 555 + ], + "type": "text", + "content": " to a mere " + }, + { + "bbox": [ + 46, + 280, + 290, + 555 + ], + "type": "inline_equation", + "content": "0.008m" + }, + { + "bbox": [ + 46, + 280, + 290, + 555 + ], + "type": "text", + "content": " error." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 578, + 128, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 578, + 128, + 590 + ], + "spans": [ + { + "bbox": [ + 47, + 578, + 128, + 590 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 598, + 287, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 598, + 287, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 598, + 287, + 696 + ], + "type": "text", + "content": "3D scene flow estimation through deducing per-point motion filed from consecutive frames of point clouds, serves a critical role across various applications, encompassing motion prediction [33, 48], anomaly motion detection [15], 3D object detection [8, 16, 50], and dynamic point cloud accumulation [14]. With the advancing of deep learning on point clouds [37, 38], many works [4, 9, 18, 27, 36, 39, 51] have developed the learning-based" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 308, + 253, + 545, + 395 + ], + "blocks": [ + { + "bbox": [ + 308, + 253, + 545, + 395 + ], + "lines": [ + { + "bbox": [ + 308, + 253, + 545, + 395 + ], + "spans": [ + { + "bbox": [ + 308, + 253, + 545, + 395 + ], + "type": "image", + "image_path": "19952b9b470f368f6e4bc0b9866aded47f2609d58f00c7a451bae351021bd4f4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 398, + 547, + 465 + ], + "lines": [ + { + "bbox": [ + 305, + 398, + 547, + 465 + ], + "spans": [ + { + "bbox": [ + 305, + 398, + 547, + 465 + ], + "type": "text", + "content": "Figure 1. The proposed 3D scene flow pseudo-auto-labelling framework. Given point clouds and initial bounding boxes, both global and local motion parameters are iteratively optimized. Diverse motion patterns are augmented by randomly adjusting these motion parameters, thereby creating a diverse and realistic set of motion labels for the training of 3D scene flow estimation models." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 484, + 545, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 484, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 304, + 484, + 545, + 568 + ], + "type": "text", + "content": "methods to estimate per-point motion from 3D point clouds. Some state-of-the-art methods [4, 39, 51] have reduced the average 3D Endpoint Error (EPE3D) to a few centimetres on the KITTI Scene Flow dataset (stereoKITTI) [30, 31]. However, due to the scarcity of scene flow labels, these methods rely heavily on synthetic datasets such as FlyingThings3D (FT3D) [29] for network training." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": "When evaluated on the stereoKITTI dataset [30, 31], PV-RAFT [47] demonstrates an average EPE3D of just " + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "inline_equation", + "content": "0.056m" + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": ". However, when evaluated on the Argoverse dataset [3], the EPE3D metric astonishingly exceeds " + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "inline_equation", + "content": "10m" + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": " [24]. Therefore, learning 3D scene flow on synthetic dataset [29] has a large gap with real-world application. Jin et al. [18] recently introduce a new synthetic dataset, GTA-SF, simulating LiDAR scans for autonomous driving. They propose a teacher-student domain adaptation framework to reduce the gap between synthetic and real datasets and improve some performance of 3D scene flow estimation. However, their performance is still poor in" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 139, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 139, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 139, + 713 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 58, + 702, + 139, + 713 + ], + "type": "text", + "content": " Corresponding author." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15173" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 75, + 276, + 199 + ], + "blocks": [ + { + "bbox": [ + 53, + 75, + 276, + 199 + ], + "lines": [ + { + "bbox": [ + 53, + 75, + 276, + 199 + ], + "spans": [ + { + "bbox": [ + 53, + 75, + 276, + 199 + ], + "type": "image", + "image_path": "a3238b09d16c6cfed576ad678b98173ea1569ef35c043a7d0fc9140d4eb15084.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 201, + 287, + 267 + ], + "lines": [ + { + "bbox": [ + 46, + 201, + 287, + 267 + ], + "spans": [ + { + "bbox": [ + 46, + 201, + 287, + 267 + ], + "type": "text", + "content": "Figure 2. The accuracy improvement after integrating our proposed pseudo-auto-labelling method. Models trained on synthetic data performance poorly in 3D scene flow estimation for LiDAR-based autonomous driving. Our proposed 3D pseudo-auto-labelling method improves accuracy, reaching an EPE3D below " + }, + { + "bbox": [ + 46, + 201, + 287, + 267 + ], + "type": "inline_equation", + "content": "2cm" + }, + { + "bbox": [ + 46, + 201, + 287, + 267 + ], + "type": "text", + "content": " across datasets [2, 3, 31]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 292, + 287, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 292, + 287, + 411 + ], + "spans": [ + { + "bbox": [ + 46, + 292, + 287, + 411 + ], + "type": "text", + "content": "real-world LiDAR data because of ideal sensor models and lack of scene variety. Ideally, models should learn from real sensor data in the autonomous driving field. However, labelling each point's 3D motion vector for the 3D scene flow task is extremely costly. This has driven many works [6, 22, 28, 32, 39, 44] towards unsupervised or self-supervised learning of 3D scene flow. Although these methods have achieved reasonable accuracy, they still fall behind supervised methods, highlighting the importance of real sensor data and corresponding 3D scene flow labels." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 412, + 288, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 412, + 288, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 412, + 288, + 664 + ], + "type": "text", + "content": "In this work, we address three key challenges in the field of autonomous driving: the reliance on synthetic datasets that still have a poor generalization with real-world scenarios, the scarcity of scene flow labels in actual driving scenes, and the poor performance of existing 3D scene flow estimation networks on real LiDAR data. Inspired by the rigid motion assumptions in RigidFlow [22] and RSF [5], we propose a novel scene flow auto-labelling approach that leverages the characteristics of rigid motion prevalent in autonomous driving scenarios (Fig. 1). Specifically, we utilize 3D anchor boxes to segment 3D objects in point clouds. The attributes of each object-level box are not only position and size but also rotation, translation, motion status, and normal vector attributes. By leveraging the constrained loss functions for the box parameters and inter-frame association, we optimize the attributes of the boxes, subsequently combining these parameters with the source point cloud to produce a realistic target point cloud. Importantly, the generated target point cloud maintains a one-to-one correspondence with the source point cloud, enabling the efficient generation of pseudo 3D scene flow labels." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "content": "To capture a more diverse range of motion patterns, we introduce a novel data augmentation strategy for 3D scene flow auto-labelling. Utilizing the attributes of each box, we simulate the rotations, translations, and motion status" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 545, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 193 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 193 + ], + "type": "text", + "content": "of both the ego vehicle and surrounding environment by adding Gaussian noise to these attributes. Consequently, we obtain numerous 3D scene flow labels with diverse motions that closely resemble real-world scenarios, furnishing the neural network with rich real training data and significantly improving the generalization capabilities of learning-based methods. Experimental results validate that our pseudo-label generation strategy consistently achieves state-of-the-art scene flow estimation results across various models [4, 36, 51] and datasets [2, 3, 30] (Fig. 2)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 317, + 197, + 501, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 197, + 501, + 208 + ], + "spans": [ + { + "bbox": [ + 317, + 197, + 501, + 208 + ], + "type": "text", + "content": "In summary, our contributions are as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 212, + 545, + 415 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 306, + 212, + 545, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 212, + 545, + 272 + ], + "spans": [ + { + "bbox": [ + 306, + 212, + 545, + 272 + ], + "type": "text", + "content": "- We propose a new framework for the automatic labelling of 3D scene flow pseudo-labels, significantly enhancing the accuracy of current scene flow estimation models, and effectively addressing the scarcity of 3D flow labels in autonomous driving." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 272, + 545, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 272, + 545, + 356 + ], + "spans": [ + { + "bbox": [ + 306, + 272, + 545, + 356 + ], + "type": "text", + "content": "- We propose a universal 3D box optimization method with multiple motion attributes. Building upon this, we further introduce a plug-and-play 3D scene flow augmentation module with global-local motions and motion status. This allows for flexible motion adjustment of ego-motion and dynamic environments, setting a new benchmark for scene flow data augmentation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 356, + 545, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 356, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 306, + 356, + 545, + 415 + ], + "type": "text", + "content": "- Our method achieves state-of-the-art performance on KITTI, nuScenes, and Argoverse LiDAR datasets. Impressively, our approach surpasses all supervised and unsupervised methods without requiring any synthesising data and manual scene flow labels." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 439, + 392, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 439, + 392, + 451 + ], + "spans": [ + { + "bbox": [ + 306, + 439, + 392, + 451 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 463, + 498, + 476 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 463, + 498, + 476 + ], + "spans": [ + { + "bbox": [ + 306, + 463, + 498, + 476 + ], + "type": "text", + "content": "2.1. Supervised 3D Scene Flow Learning" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 486, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 486, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 486, + 545, + 713 + ], + "type": "text", + "content": "In recent years, the performance of methods [28, 34, 42] for 3D scene flow based on point cloud deep learning has surpassed traditional methods. FlowNet3D [28] pioneers an end-to-end approach to learning 3D scene flow from point clouds. Some works, such as HALFlow [13], 3DFlow [43], PointPWC [49], and WSAFlowNet [45], utilize PWC structures to learn 3D scene flow in a coarse-to-fine manner. Other methods address the disorderliness of points by voxelizing point clouds and using sparse convolution or voxel correlation fields to learn 3D scene flow, such as PV-RAFT [47], DPV-RAFT [46], and SCTN [21]. Additional work refines the estimated scene flow through iterative procedures. MSBRN [4] proposes bidirectional gated recurrent units for iteratively estimating scene flow. GMSF [51] and PT-FlowNet [9] introduce point cloud transformers into 3D scene flow estimation networks. These supervised learning methods for 3D scene flow heavily rely on ground truth and are all trained on the FT3D dataset [29] and evaluated on stereoKITTI [30, 31] for network generalization test." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15174" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 251, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 251, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 251, + 85 + ], + "type": "text", + "content": "2.2. Unsupervised 3D Scene Flow Learning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 90, + 289, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 90, + 289, + 282 + ], + "spans": [ + { + "bbox": [ + 46, + 90, + 289, + 282 + ], + "type": "text", + "content": "JGwF [32] and PointPWC [49] initially propose several self-supervised learning losses such as cycle consistency loss and chamfer loss. EgoFlow [40] distinguishes 3D scene flow into ego-motion flow and remaining non-rigid flow, achieving self-supervised learning based on temporal consistency. SFGAN [44] introduces generative adversarial concepts into self-supervised learning for 3D scene flow. Recently, works like R3DSF [12], RigidFlow [22], and LiDARSceneFlow [7] greatly improve the accuracy of 3D scene flow estimation by introducing local or object-level rigidity constraints. RigidFlow [22] explicitly enforces rigid alignment within super-voxel regions by decomposing the source point cloud into multiple supervoxels. R3DSF [12] separately considers background and foreground object-level 3D scene flow, relying on segmentation and odometry tasks [25, 26]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 290, + 204, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 290, + 204, + 304 + ], + "spans": [ + { + "bbox": [ + 47, + 290, + 204, + 304 + ], + "type": "text", + "content": "2.3. 3D Scene Flow Optimization" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 309, + 289, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 309, + 289, + 538 + ], + "spans": [ + { + "bbox": [ + 46, + 309, + 289, + 538 + ], + "type": "text", + "content": "3D scene flow optimization techniques have demonstrated remarkable generalization capabilities, attracting a significant amount of academic research recently. Graph prior [35] optimizes scene flow to be as smooth as possible by using the Laplacian of point clouds. Some techniques introduce neural networks to optimize 3D scene flow. NSFP [23] introduces a novel implicit regularizer, the Neural Scene Flow Prior, which primarily depends on runtime optimization and robust regularization. RSF [5] combines global ego-motion with object-specific rigid movements to optimize 3D bounding box parameters and compute scene flow. FastNSF [24] also adopts neural scene flow prior, and it shows more advantages in dealing with dense LiDAR points compared to learning methods. SCOOP [20], in the runtime phase, directly optimizes the flow refinement module using self-supervised objectives. Although optimization-based approaches for 3D scene flow estimation have demonstrated impressive accuracy, they typically involve high computational costs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 548, + 141, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 548, + 141, + 562 + ], + "spans": [ + { + "bbox": [ + 47, + 548, + 141, + 562 + ], + "type": "text", + "content": "3. 3DSFLabelling" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "text", + "content": "3D scene flow estimation infers the 3D flow, " + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "inline_equation", + "content": "SF_{pred} \\in \\mathbb{R}^{3 \\times N_1}" + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "text", + "content": " from the source point cloud " + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "inline_equation", + "content": "PC_S \\in \\mathbb{R}^{3 \\times N_1}" + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "text", + "content": " and the target point cloud " + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "inline_equation", + "content": "PC_T \\in \\mathbb{R}^{3 \\times N_2}" + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "text", + "content": " for each point in the source point. Previous self-supervised learning methods [32, 49] typically use the estimated 3D motion vector " + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "inline_equation", + "content": "SF_{pred}" + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "text", + "content": " to warp the source point cloud " + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "inline_equation", + "content": "PC_S" + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "text", + "content": " to the target point cloud " + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "inline_equation", + "content": "PC_{Sw}" + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "text", + "content": ". By comparing the difference between " + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "inline_equation", + "content": "PC_{Sw}" + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "inline_equation", + "content": "PC_T" + }, + { + "bbox": [ + 47, + 569, + 287, + 665 + ], + "type": "text", + "content": ", a supervisory signal is generated." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "content": "In contrast with previous self-supervised learning methods, we propose bounding box element optimization to obtain the boxes and the box motion parameters from raw unlabelled point cloud data. Then, we use object-box-level" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "motion parameters and global motion parameters to warp each box's points and the whole point cloud to the target point cloud, generating corresponding pseudo 3D scene flow labels. During the warping process of each object box, we propose augmenting the motion attributes of each object and the whole scene. This diversity assists the network in capturing a broader range of motion behaviours." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 163, + 390, + 175 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 163, + 390, + 175 + ], + "spans": [ + { + "bbox": [ + 305, + 163, + 390, + 175 + ], + "type": "text", + "content": "3.1. Prerequisites" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "spans": [ + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "text", + "content": "Apart from the two input point clouds, we do not require any extra labels, such as object-level tracking and semantic information, or vehicle ego-motion labels. To reinforce the geometric constraints in the pseudo label generation module, we employ Open3d [52] to generate coarse per-point normals. Despite these normals not being perfectly accurate, they are readily obtainable and can provide useful geometric constraints. Finally, we establish initial 3D anchor boxes with specific centers " + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "inline_equation", + "content": "(x,y,z)" + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "text", + "content": ", width " + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "text", + "content": ", length " + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "text", + "content": ", height " + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "text", + "content": ", and rotation angle " + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "text", + "content": ", in accordance with the range of input points. As depicted in Fig. 3, the inputs of our model consist of the initial anchor box set, " + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "inline_equation", + "content": "PC_{S}" + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "inline_equation", + "content": "PC_{T}" + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "text", + "content": ", and point cloud normals " + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "inline_equation", + "content": "N_{S}" + }, + { + "bbox": [ + 304, + 181, + 545, + 337 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 343, + 519, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 343, + 519, + 357 + ], + "spans": [ + { + "bbox": [ + 305, + 343, + 519, + 357 + ], + "type": "text", + "content": "3.2. Motion Parameter Optimization Module" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "spans": [ + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "text", + "content": "As shown in Fig. 3, we present the process of simulating the motion of point clouds in actual autonomous driving by updating four sets of parameters: differentiable bounding boxes " + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "inline_equation", + "content": "\\Phi = [c,s,\\theta]" + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "text", + "content": ", global motion parameters " + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "inline_equation", + "content": "\\Theta = [R_{ego},t_{ego}]" + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "text", + "content": ", motion parameters for each box " + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "inline_equation", + "content": "[R_{perbox},t_{perbox}]" + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "text", + "content": ", and motion probability " + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "inline_equation", + "content": "P_M" + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "text", + "content": " for each box. The variables " + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 361, + 545, + 457 + ], + "type": "text", + "content": " represent the center coordinates, size, and orientation of the 3D box, respectively." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 458, + 545, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 458, + 545, + 636 + ], + "spans": [ + { + "bbox": [ + 304, + 458, + 545, + 636 + ], + "type": "text", + "content": "Inspired by RSF [5], we use the motion of object-level bounding boxes to present the point-wise 3D motion and make the step-like boxes differentiable through sigmoid approximation. By transforming the individual points to the bounding boxes, we introduce an object-level perception of the scene, enabling a more natural capture of rigid motion. This method proves advantageous in autonomous driving scenarios, where most objects predominantly exhibit rigid behaviour [12]. Additionally, in the context of autonomous driving, most scene motion is typically caused by the ego motion of the vehicle. Hence, setting global motion parameters is necessary to simulate the global consistent rigid motion of the whole scene. To discern whether the motion of each box is caused by ego-motion, we also set up a motion probability for each bounding box." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 637, + 545, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 637, + 545, + 661 + ], + "spans": [ + { + "bbox": [ + 304, + 637, + 545, + 661 + ], + "type": "text", + "content": "With the initial set of four motion parameters, the source point cloud is warped to the target frame, as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 330, + 667, + 545, + 683 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 667, + 545, + 683 + ], + "spans": [ + { + "bbox": [ + 330, + 667, + 545, + 683 + ], + "type": "interline_equation", + "content": "P C _ {T} ^ {\\Theta}, P C _ {T} ^ {\\Phi} = \\Omega_ {1} (\\Theta , P C _ {S}), \\Omega_ {2} (\\Upsilon (\\Phi , P C _ {S})), (1)", + "image_path": "dd4a65d60fd7bef15a4ea005ccb8e27c6f8fd1c135b0a888f8ba5a8c0dad4b45.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": " represents global motion parameters. " + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": " represents motion parameters of each bounding box, and " + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\Omega_{1}" + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\Omega_{2}" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15175" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 69, + 541, + 312 + ], + "blocks": [ + { + "bbox": [ + 53, + 69, + 541, + 312 + ], + "lines": [ + { + "bbox": [ + 53, + 69, + 541, + 312 + ], + "spans": [ + { + "bbox": [ + 53, + 69, + 541, + 312 + ], + "type": "image", + "image_path": "47edac90fd1ac4ffa004220a61c20d6756b11dce719163b184c42dbfc5435c53.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 314, + 548, + 381 + ], + "lines": [ + { + "bbox": [ + 46, + 314, + 548, + 381 + ], + "spans": [ + { + "bbox": [ + 46, + 314, + 548, + 381 + ], + "type": "text", + "content": "Figure 3. The proposed learning framework of pseudo 3D scene flow automatic labelling. The input comprises 3D anchor boxes, a pair of point clouds, and their corresponding coarse normal vectors. The optimization of motion parameters primarily updates the bounding box parameters, global motion parameters, local motion parameters, and the motion probability of the boxes. The attribute parameters for boxes are updated through backward optimization from six objective functions. Once optimized, the motion parameters simulate various types of motion using a global-local data augmentation module. A single source frame point cloud, along with the augmented motion parameters, produces diverse 3D scene flow labels. These labels serve to guide the supervised neural network to learn point-wise motion." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 401, + 287, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 401, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 47, + 401, + 287, + 437 + ], + "type": "text", + "content": "are background and foreground warping functions, respectively, generating the warped point clouds " + }, + { + "bbox": [ + 47, + 401, + 287, + 437 + ], + "type": "inline_equation", + "content": "PC_T^\\Theta" + }, + { + "bbox": [ + 47, + 401, + 287, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 401, + 287, + 437 + ], + "type": "inline_equation", + "content": "PC_T^\\Phi" + }, + { + "bbox": [ + 47, + 401, + 287, + 437 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 47, + 401, + 287, + 437 + ], + "type": "inline_equation", + "content": "\\Upsilon" + }, + { + "bbox": [ + 47, + 401, + 287, + 437 + ], + "type": "text", + "content": " signifies the removal of boxes with too few points." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 437, + 288, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 437, + 288, + 485 + ], + "spans": [ + { + "bbox": [ + 46, + 437, + 288, + 485 + ], + "type": "text", + "content": "Based on the real target frame of point cloud and the generated target point clouds " + }, + { + "bbox": [ + 46, + 437, + 288, + 485 + ], + "type": "inline_equation", + "content": "PC_T^\\Theta" + }, + { + "bbox": [ + 46, + 437, + 288, + 485 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 437, + 288, + 485 + ], + "type": "inline_equation", + "content": "PC_T^\\Phi" + }, + { + "bbox": [ + 46, + 437, + 288, + 485 + ], + "type": "text", + "content": ", we define loss functions to update and optimize the box attributes. We separately calculate the background and foreground losses:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 489, + 287, + 504 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 489, + 287, + 504 + ], + "spans": [ + { + "bbox": [ + 52, + 489, + 287, + 504 + ], + "type": "interline_equation", + "content": "L _ {B G} = \\kappa \\left(N _ {T} ^ {\\Theta} \\oplus P C _ {T} ^ {\\Theta}, N _ {T} \\oplus P C _ {T}\\right) + \\delta \\left(P C _ {T} ^ {\\Theta}, P C _ {T}\\right), \\tag {2}", + "image_path": "8e1873f61535cbd403f8d9af3d4c3bcdb07cfc6e4e6ccd75d246905f07e7392e.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 506, + 287, + 555 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 506, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 55, + 506, + 287, + 555 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} L _ {F G} = \\frac {1}{K _ {b o x}} \\sum P _ {M} \\times (\\kappa \\left(N _ {T} ^ {\\Phi} \\oplus P C _ {T} ^ {\\Phi}, N _ {T} \\oplus P C _ {T}\\right) \\\\ + \\delta \\left(P C _ {T} ^ {\\Phi}, P C _ {T}\\right)), \\tag {3} \\\\ \\end{array}", + "image_path": "d5b07606ce24fe78d13c19171e75d23877094f6b575c64306b83ca53aaf0981d.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "text", + "content": " is a function calculating nearest neighbour matches between the transformed point cloud and the target point cloud. " + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "text", + "content": " is a pairwise distance function with location encoding. " + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "inline_equation", + "content": "K_{box}" + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "text", + "content": " is the number of boxes, " + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "inline_equation", + "content": "P_M" + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "text", + "content": " is the motion probability of each box, and the term " + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "inline_equation", + "content": "N_T\\oplus PC_T" + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "text", + "content": " represents the concatenation of the target point cloud's normal and positions. As for the motion probability " + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "inline_equation", + "content": "P_M" + }, + { + "bbox": [ + 46, + 556, + 287, + 641 + ], + "type": "text", + "content": " of each box:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 647, + 287, + 661 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 647, + 287, + 661 + ], + "spans": [ + { + "bbox": [ + 52, + 647, + 287, + 661 + ], + "type": "interline_equation", + "content": "P _ {M} = \\sigma \\left(\\alpha \\times \\left(\\Omega_ {3} \\left(\\Phi , \\gamma_ {i}\\right) + \\beta_ {i}\\right)\\right) - \\alpha \\times \\left(\\Omega_ {3} \\left(\\Phi , \\gamma_ {i}\\right) - \\beta_ {i}\\right), \\tag {4}", + "image_path": "f126212538d622c5edbddd3be451505fbd1855743bf226f6c0500345ed190f4e.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\sigma(x)" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": " represents the sigmoid function, " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": " is a hyperparameter 'slope' in the sigmoid, " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": " represents the half size of the vector of 3D dimensions " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": " of the bounding box. Coordinate values " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": " in the source point cloud are" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "text", + "content": "warped to the target point cloud via motion box parameters " + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "text", + "content": ". For each dynamic box, each point's relative position to the box's centre is calculated. Higher motion probability " + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "inline_equation", + "content": "P_{M}" + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "text", + "content": " is assigned to the points closer to the centre. A fixed hyperparameter " + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "text", + "content": ", controlling motion probability, may not effectively respond to diverse and complex autonomous driving scenarios. Therefore, we adopt an adaptive computation of " + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "text", + "content": " based on the variance of the point nearest-neighbour consistency loss from the previous generation. The variance in the nearest-neighbour consistency loss for different points in the background implies the distribution of dynamic objects in the scene. With fewer moving objects indicated by a lower variance, " + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "text", + "content": " should be adaptively reduced, tending to produce lower motion probability " + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "inline_equation", + "content": "P_{M}" + }, + { + "bbox": [ + 304, + 401, + 545, + 568 + ], + "type": "text", + "content": " for points." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 568, + 545, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 568, + 545, + 652 + ], + "spans": [ + { + "bbox": [ + 304, + 568, + 545, + 652 + ], + "type": "text", + "content": "In addition to " + }, + { + "bbox": [ + 304, + 568, + 545, + 652 + ], + "type": "inline_equation", + "content": "L_{BG}" + }, + { + "bbox": [ + 304, + 568, + 545, + 652 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 568, + 545, + 652 + ], + "type": "inline_equation", + "content": "L_{FG}" + }, + { + "bbox": [ + 304, + 568, + 545, + 652 + ], + "type": "text", + "content": ", we introduce box dimension regularization, heading term, and angle term to constrain the dimensions, heading, and rotation angles of the bounding boxes within a reasonable range [5]. We also introduce a mass term to ensure that there are as many points as possible within the box, making the estimated motion parameters of the box more robust [5]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 659, + 545, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 659, + 545, + 673 + ], + "spans": [ + { + "bbox": [ + 306, + 659, + 545, + 673 + ], + "type": "text", + "content": "3.3. Data Augmentation for 3D Flow Auto-labelling" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "type": "text", + "content": "Existing data augmentation practices [49] often add consistent random rotations and noise offsets to the input points, which indeed yields certain benefits. However, in" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "15176" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 285, + 190 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 285, + 190 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 285, + 190 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 285, + 190 + ], + "type": "image", + "image_path": "a87fff64d105338c18d0dd90d331d3992cd37f111193d4e44fff8b2f4adcd16a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "lines": [ + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "spans": [ + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "text", + "content": "Figure 4. The proposed pseudo label generation module. With the augmented motion probability " + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "inline_equation", + "content": "P_{M}^{*}" + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "text", + "content": ", bounding boxes are categorized into dynamic and static types. Using global and local motion parameters, the " + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "inline_equation", + "content": "PC_{S}" + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "text", + "content": " is warped to the target point cloud " + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "inline_equation", + "content": "PC_{T}^{*}" + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "text", + "content": ". Finally, pseudo 3D scene flow labels " + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "inline_equation", + "content": "SF" + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "text", + "content": " are derived from the correspondence between " + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "inline_equation", + "content": "PC_{T}^{*}" + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "inline_equation", + "content": "PC_{S}" + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "inline_equation", + "content": "K_{box}" + }, + { + "bbox": [ + 46, + 193, + 289, + 270 + ], + "type": "text", + "content": " represents the number of boxes." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 293, + 287, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 293, + 287, + 400 + ], + "spans": [ + { + "bbox": [ + 46, + 293, + 287, + 400 + ], + "type": "text", + "content": "autonomous driving scenarios, there are frequently various complex motion patterns for multiple objects. To make models learn complex scene motion rules, we propose a novel data augmentation method for scene flow labelling in both global and object-level motions. Our method simulates a broad spectrum of 3D scene flow data variations, originating from ego-motion and dynamic object movement, thereby providing a promising solution to the challenge of securing abundant 3D scene flow labels." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "spans": [ + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "text", + "content": "As in Fig. 3, random noise is applied to either global or local motion parameters respectively. We generate a random rotation angle " + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "text", + "content": " and a random unit vector " + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "text", + "content": " for the rotation direction using random noise. They are used to create the Lie algebra " + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "text", + "content": ". Subsequently, the Lie algebra " + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "text", + "content": " is converted into a rotation matrix " + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "text", + "content": " using the Rodrigues' rotation formula and applied to the original rotation matrix " + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "text", + "content": " to obtain a new rotation matrix " + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "inline_equation", + "content": "\\mathbf{R}^*" + }, + { + "bbox": [ + 46, + 401, + 287, + 496 + ], + "type": "text", + "content": ", as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 497, + 287, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 497, + 287, + 550 + ], + "spans": [ + { + "bbox": [ + 56, + 497, + 287, + 550 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {M} = \\mathbf {I} + \\sin (| \\pmb {\\xi} |) \\frac {\\pmb {\\xi}}{| \\pmb {\\xi} |} _ {\\times} + (1 - \\cos (| \\pmb {\\xi} |)) \\left(\\frac {\\pmb {\\xi}}{| \\pmb {\\xi} |} _ {\\times}\\right) ^ {2}, \\quad (5) \\\\ \\boldsymbol {\\xi} = \\alpha \\boldsymbol {u}, \\mathbf {R} ^ {*} = \\mathbf {R M}. \\tag {6} \\\\ \\end{array}", + "image_path": "b0e7bbcd567168e67e27f9aa439f57faca3a530c5e9c8e855e82b6035378c536.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "text", + "content": "The Lie algebra element " + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "text", + "content": ", the product of scalar " + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "text", + "content": " and unit vector " + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "text", + "content": ", signifies rotation magnitude and direction, with " + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "text", + "content": " representing rotation angle and axis, respectively. " + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\mathbf{I}" + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "text", + "content": " is identity matrix, and " + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\xi \\times \\xi" + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "text", + "content": " is the antisymmetric matrix of " + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "text", + "content": ". Lie algebra intuitively and conveniently represents minor " + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "inline_equation", + "content": "SO(3)" + }, + { + "bbox": [ + 46, + 557, + 287, + 688 + ], + "type": "text", + "content": " group variations. Rodrigues' rotation formula, mapping from the Lie algebra to the Lie group, facilitates the transformation of angle-based noise into a form directly applicable to the rotation matrix. This transformation brings mathematical convenience, making the update of the rotation matrix concise and efficient." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": "Importantly, our data augmentation targets dynamically moving objects, because persistently adding varied motion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 145 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 145 + ], + "type": "text", + "content": "noise to bounding boxes perceived as static objects may disrupt original data distribution. Moreover, the translation and motion probability are also augmented. As depicted in Fig. 3, we generate noise within an appropriate range and directly add it to the translation matrix or motion probability, resulting in augmented translation and motion probability." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 151, + 535, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 151, + 535, + 163 + ], + "spans": [ + { + "bbox": [ + 306, + 151, + 535, + 163 + ], + "type": "text", + "content": "3.4. Pseudo Label Generation for 3D Scene Flow" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 170, + 545, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 170, + 545, + 230 + ], + "spans": [ + { + "bbox": [ + 304, + 170, + 545, + 230 + ], + "type": "text", + "content": "The motion parameters are fed into the pseudo label generation module to obtain point-wise 3D scene flow labels. The specific process of the label generation module is shown in Fig. 4. We determine the motion state of the 3D bounding box through the motion probability " + }, + { + "bbox": [ + 304, + 170, + 545, + 230 + ], + "type": "inline_equation", + "content": "P_{M}" + }, + { + "bbox": [ + 304, + 170, + 545, + 230 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 312, + 237, + 545, + 269 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 237, + 545, + 269 + ], + "spans": [ + { + "bbox": [ + 312, + 237, + 545, + 269 + ], + "type": "interline_equation", + "content": "P C _ {T} ^ {*} = \\left\\{ \\begin{array}{l l} P C _ {S} \\times R _ {e g o} ^ {*} + t _ {e g o} ^ {*} & \\text {i f} P _ {M} ^ {*} < \\mathbb {J}, \\\\ P C _ {S} ^ {e g o} \\times R _ {\\text {p e r b o x}} ^ {*} + t _ {\\text {p e r b o x}} ^ {*} & \\text {i f} P _ {M} ^ {*} \\geq \\mathbb {J}. \\end{array} \\right. \\tag {7}", + "image_path": "76dde0715d25ffe2e84812acbb6a9ca305972b5ac8481051879933229117e585.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "inline_equation", + "content": "PC_{S}^{ego}" + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "text", + "content": " is the points in the dynamic box from the source point cloud, transformed through global rotation and translation. When " + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "inline_equation", + "content": "P_{M}" + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "text", + "content": " is less than threshold " + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "inline_equation", + "content": "\\mathbb{J}" + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "text", + "content": ", the current bounding box is deemed static. Conversely, if " + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "inline_equation", + "content": "P_{M}" + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "text", + "content": " exceeds a predefined threshold " + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "inline_equation", + "content": "\\mathbb{J}" + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "text", + "content": ", the current bounding box is considered dynamic. For static boxes, based on the existing global motion, we apply a uniform noise to all static boxes to simulate various ego-motion patterns. By adding minute noise to the motion probability " + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "inline_equation", + "content": "P_{M}" + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "text", + "content": " for each box, we can construct various motion states and show a greater variety of scene motions. Before transforming the dynamic boxes, a prior global transformation of all points is required. For dynamic bounding boxes, we add various noises to their existing motion, generating new rotations and translations, thereby creating various motion patterns. We warp the source point cloud within each box to the target frame using the box's motion parameters, obtaining the pseudo target point cloud " + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "inline_equation", + "content": "PC_{T}^{*}" + }, + { + "bbox": [ + 304, + 276, + 545, + 492 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 492, + 545, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 492, + 545, + 540 + ], + "spans": [ + { + "bbox": [ + 304, + 492, + 545, + 540 + ], + "type": "text", + "content": "The generated pseudo target point cloud " + }, + { + "bbox": [ + 304, + 492, + 545, + 540 + ], + "type": "inline_equation", + "content": "PC_T^*" + }, + { + "bbox": [ + 304, + 492, + 545, + 540 + ], + "type": "text", + "content": " and the real source frame point cloud " + }, + { + "bbox": [ + 304, + 492, + 545, + 540 + ], + "type": "inline_equation", + "content": "PC_S" + }, + { + "bbox": [ + 304, + 492, + 545, + 540 + ], + "type": "text", + "content": " have a perfect correspondence. Therefore, the 3D scene flow labels can be easily obtained by directly subtracting " + }, + { + "bbox": [ + 304, + 492, + 545, + 540 + ], + "type": "inline_equation", + "content": "PC_S" + }, + { + "bbox": [ + 304, + 492, + 545, + 540 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 304, + 492, + 545, + 540 + ], + "type": "inline_equation", + "content": "PC_T^*" + }, + { + "bbox": [ + 304, + 492, + 545, + 540 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 381, + 548, + 545, + 562 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 548, + 545, + 562 + ], + "spans": [ + { + "bbox": [ + 381, + 548, + 545, + 562 + ], + "type": "interline_equation", + "content": "S F = P C _ {T} ^ {*} - P C _ {S}. \\tag {8}", + "image_path": "1d292c9a08a28d25433d0b0fee5de933aa39ef07eefad715cb509d3cae65df1b.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 569, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 569, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 569, + 545, + 628 + ], + "type": "text", + "content": "The generated scene flow labels capture various motion patterns from real autonomous driving scenes. They help the model understand and adjust to complex driving conditions. This improves the model's ability to generalize in unfamiliar real-world scenarios." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 639, + 388, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 639, + 388, + 653 + ], + "spans": [ + { + "bbox": [ + 306, + 639, + 388, + 653 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 659, + 368, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 659, + 368, + 670 + ], + "spans": [ + { + "bbox": [ + 306, + 659, + 368, + 670 + ], + "type": "text", + "content": "4.1. Datasets" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "Test Datasets: Graph prior [35] introduces two autonomous driving datasets, Argoverse scene flow [3] and nuScenes scene flow [2] datasets. Scene flow labels in the" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15177" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 49, + 118, + 545, + 282 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 547, + 115 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 547, + 115 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 547, + 115 + ], + "type": "text", + "content": "Table 1. Comparison of our method with the best-performing methods on multiple datasets [2, 3, 10] and metrics. 'None', 'Weak', 'Self', and 'Full' represent non-learning, weakly supervised, self-supervised, and supervised methods, respectively. \"↑\" means higher is better, and \"↓\" means lower is better. Our method uses GMSF [51] as a baseline and combines it with our proposed pseudo-auto-labelling framework, 3DSFlabelling. Despite the use of a supervised learning structure, no ground truth is utilized in training." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 118, + 545, + 282 + ], + "lines": [ + { + "bbox": [ + 49, + 118, + 545, + 282 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 545, + 282 + ], + "type": "table", + "html": "
MethodSup.LiDAR KITTI Scene Flow [10]Argoverse Scene Flow [3]nuScenes Scene Flow [2]
EPE3D↓Acc3DS↑Acc3DR↑Outliers↓EPE3D↓Acc3DS↑Acc3DR↑Outliers↓EPE3D↓Acc3DS↑Acc3DR↑Outliers↓
Graph prior [35]None----0.25700.25240.4760-0.28900.20120.4354-
RSF [5]None0.08500.88300.92900.2390----0.10700.71700.86200.3210
NSFP [23]None0.14200.68800.82600.38500.15900.38430.6308-0.17510.35180.63450.5270
R3DSF [12]Weak0.09400.78400.88500.31400.41600.34520.43100.5580----
FlowNet3D [28]Full0.72200.03000.12200.96500.45500.01340.06120.73600.50500.21200.10810.6200
PointPWC [49]Full0.39000.38700.55000.65300.42880.04620.21640.91990.78830.02870.13330.9410
DCA-SRSFE [18]Full0.59000.15050.33310.84850.79570.07120.14680.97990.70420.05380.11830.9766
FLOT [36]Full0.65320.15540.31300.83710.24910.09460.31260.86570.48580.08210.26690.8547
MSBRN [4]Full0.01390.97520.98470.14330.86910.24320.28540.75970.61370.23540.29240.7638
GMSF [51]Full0.19000.29620.55020.61717.27760.00360.01440.99309.42310.00340.00860.9943
Mittal et al. [32]Self0.97730.00960.05240.99360.65200.03190.11590.96210.84220.02890.10410.9615
Jiang et al. [17]Self0.49080.20520.42380.72860.25170.12360.36660.81140.47090.10340.31750.8191
OursSelf0.00780.99240.99470.13280.00930.97800.98800.13020.01850.95340.97130.1670
", + "image_path": "a0197c8a729456b8824436c8ad9d1074e2e786e308f8cc618e8052e4a799a3fc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 47, + 284, + 545, + 530 + ], + "blocks": [ + { + "bbox": [ + 47, + 284, + 545, + 530 + ], + "lines": [ + { + "bbox": [ + 47, + 284, + 545, + 530 + ], + "spans": [ + { + "bbox": [ + 47, + 284, + 545, + 530 + ], + "type": "image", + "image_path": "cd41548fefd727ed8f693a1c32f9c3362eadb605cfb897aae8130f1adf35331b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 533, + 547, + 579 + ], + "lines": [ + { + "bbox": [ + 45, + 533, + 547, + 579 + ], + "spans": [ + { + "bbox": [ + 45, + 533, + 547, + 579 + ], + "type": "text", + "content": "Figure 5. Registration visualization results of our method (GMSF [51] + 3DSFlabelling) and baselines on the LiDAR KITTI and Argoverse datasets [3, 10]. The estimated target point cloud " + }, + { + "bbox": [ + 45, + 533, + 547, + 579 + ], + "type": "inline_equation", + "content": "PC_{sw}" + }, + { + "bbox": [ + 45, + 533, + 547, + 579 + ], + "type": "text", + "content": " is derived from warping the source point cloud " + }, + { + "bbox": [ + 45, + 533, + 547, + 579 + ], + "type": "inline_equation", + "content": "PC_S" + }, + { + "bbox": [ + 45, + 533, + 547, + 579 + ], + "type": "text", + "content": " to the target point cloud via 3D scene flow. The larger the overlap between " + }, + { + "bbox": [ + 45, + 533, + 547, + 579 + ], + "type": "inline_equation", + "content": "PC_{sw}" + }, + { + "bbox": [ + 45, + 533, + 547, + 579 + ], + "type": "text", + "content": " (blue) and the target point cloud " + }, + { + "bbox": [ + 45, + 533, + 547, + 579 + ], + "type": "inline_equation", + "content": "PC_T" + }, + { + "bbox": [ + 45, + 533, + 547, + 579 + ], + "type": "text", + "content": " (green), the higher the predicted accuracy of the scene flow. Local areas are zoomed in for better visibility. Our 3D scene flow estimation notably improves performance." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 587, + 289, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 587, + 289, + 707 + ], + "spans": [ + { + "bbox": [ + 46, + 587, + 289, + 707 + ], + "type": "text", + "content": "datasets are derived from LiDAR point clouds, object trajectories, map data, and vehicle pose. The datasets contain 212 and 310 test samples, respectively. R3DSF [12] introduces the lidarKITTI [10], which shares 142 scenes with stereoKITTI, collected via Velodyne's 64-beam LiDAR. Unlike FT3D [29] and stereoKITTI [30, 31], the point clouds from lidarKITTI are sparsely distributed. Note that LiDAR scene flow ground truths contain errors. We mitigate this by fusing the ground truth with the first point cloud to create a corrected second frame for network input, thus" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 587, + 416, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 587, + 416, + 600 + ], + "spans": [ + { + "bbox": [ + 306, + 587, + 416, + 600 + ], + "type": "text", + "content": "avoiding evaluation errors." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": "Training Datasets used in previous methods: FT3D [29] and stereoKITTI [30, 31] are the frequently used datasets for training previous 3D scene flow models [4, 28, 36, 49, 51]. FT3D consists of 19,640 training pairs, while stereoKITTI [30, 31] contains 142 dense point clouds, with the first 100 frames used for model fine-tuning in some works [23, 32]. Some works [23, 28, 32, 35, 49] train their models on 2,691 pairs of Argoverse [3] data and 1,513 pairs of nuScenes [2] data, with 3D scene flow annotations fol" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "15178" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 152, + 282, + 338 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 288, + 148 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 288, + 148 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 288, + 148 + ], + "type": "text", + "content": "Table 2. The comparative results between our method and baseline. “↑” signifies accuracy enhancement. In real-world LiDAR scenarios, our method markedly improves the 3D flow estimation accuracy across three datasets [2, 3, 30] on the three baselines. This demonstrates that the proposed pseudo-auto-labelling framework can substantially boost the accuracy of existing methods, even without the need for ground truth." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 152, + 282, + 338 + ], + "lines": [ + { + "bbox": [ + 53, + 152, + 282, + 338 + ], + "spans": [ + { + "bbox": [ + 53, + 152, + 282, + 338 + ], + "type": "table", + "html": "
DatasetMethodEPE3D↓Acc3DS↑Acc3DR↑
LiDAR KITTIFLOT [36]0.65320.15540.3130
FLOT+3DSFlabelling0.0189 ↑ 97.1%0.96660.9792
MSBRN [4]0.01390.97520.9847
MSBRN+3DSFlabelling0.0123 ↑ 11.5%0.97970.9868
GMSF [51]0.19000.29620.5502
GMSF+3DSFlabelling0.0078 ↑ 95.8%0.99240.9947
ArgoverseFLOT [36]0.24910.09460.3126
FLOT+3DSFlabelling0.0107 ↑ 95.7%0.97110.9862
MSBRN [4]0.86910.24320.2854
MSBRN+3DSFlabelling0.0150 ↑ 98.3%0.94820.9601
GMSF [51]7.27760.00360.0144
GMSF+3DSFlabelling0.0093 ↑ 99.9%0.97800.9880
nuScenesFLOT [36]0.48580.08210.2669
FLOT+3DSFlabelling0.0554 ↑ 88.6%0.76010.8909
MSBRN [4]0.61370.23540.2924
MSBRN+3DSFlabelling0.0235 ↑ 96.2%0.94130.9604
GMSF [51]9.42310.00340.0086
GMSF+3DSFlabelling0.0185 ↑ 99.8%0.95340.9713
", + "image_path": "3e79f987e89572282c4c12a76e3ae5301c641b055b2088084b97f3e5ae5ca3fd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 353, + 287, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 353, + 287, + 400 + ], + "spans": [ + { + "bbox": [ + 46, + 353, + 287, + 400 + ], + "type": "text", + "content": "lowing the settings of the Graph prior [35]. The R3DSF [12] training set utilizes FT3D and semanticKITTI datasets [1], relying on ego-motion labels and semantic segmentation labels from semanticKITTI." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 401, + 288, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 401, + 288, + 556 + ], + "spans": [ + { + "bbox": [ + 46, + 401, + 288, + 556 + ], + "type": "text", + "content": "Training Datasets used in our methods: Because we do not need any labels for training data, we use raw LiDAR point clouds sampled from raw data. For testing on the lidarKITTI [31], we use LiDAR point clouds from sequences 00 to 09 of the KITTI Odometry dataset [11] for auto-labelling and training. For testing on the nuScenes scene flow dataset [2], we randomly sample 50,000 pairs of LiDAR point clouds from the 350,000 LiDAR point clouds in the nuScenes sweeps dataset [2]. For testing on the Argoverse scene flow Dataset [3], we use the LiDAR point clouds from sequences 01 to 05 of the Argoverse 2 Sensor Dataset [3] for auto-labelling and training. In the selection of training data, we exclude the test scenes." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 563, + 180, + 576 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 563, + 180, + 576 + ], + "spans": [ + { + "bbox": [ + 47, + 563, + 180, + 576 + ], + "type": "text", + "content": "4.2. Implementation Details" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 582, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 715 + ], + "type": "text", + "content": "The effectiveness of the proposed auto-labelling framework is demonstrated using three prominent deep learning models: FLOT [36], MSBRN [4], and GMSF [51]. These models use optimal transport, coarse-to-fine strategies, and transformer architectures respectively. Hyperparameters consistent with the original networks are employed during the training process. The input point clouds, from which ground points have been filtered, are randomly sampled to incorporate 8192 points. The LiDAR point cloud data from KITTI [10] is confined to the front view perspective, maintaining consistency with previous studies [12]." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 309, + 95, + 544, + 201 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 545, + 93 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 545, + 93 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 545, + 93 + ], + "type": "text", + "content": "Table 3. Model comparison on the Argoverse dataset [3]. 'M' represents millions of parameters, and time is in milliseconds." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 95, + 544, + 201 + ], + "lines": [ + { + "bbox": [ + 309, + 95, + 544, + 201 + ], + "spans": [ + { + "bbox": [ + 309, + 95, + 544, + 201 + ], + "type": "table", + "html": "
MethodSup.EPE3D↓Acc3DS↑Acc3DR↑Time↓Params.↓
PointPWC [49]Full0.42880.04620.2164147 ms7.7 M
PV-RAFT [47]Full10.7450.02000.0100169 ms-
R3DSF [12]Weak0.41600.34520.4310113 ms8.0 M
FlowStep3D [19]Self0.84500.01000.0800729 ms-
NSFP [23]None0.15900.38430.63082864 ms-
Fast-NSF [24]None0.11800.69930.8355124 ms-
MBNSF [41]None0.05100.79360.92375000+ ms-
MSBRN+3DSFlabellingSelf0.01500.94820.9601341 ms3.5 M
GMSF+3DSFlabellingSelf0.00930.97800.9880251 ms6.0 M
FLOT+3DSFlabellingSelf0.01070.97110.986278 ms0.1 M
", + "image_path": "27e0724d4cdb1e066329df99e52d3f6c9a768e340ac086420c64ebfcdc6a13f7.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 217, + 545, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 217, + 545, + 253 + ], + "spans": [ + { + "bbox": [ + 305, + 217, + 545, + 253 + ], + "type": "text", + "content": "Furthermore, we utilize four scene flow evaluation metrics [28, 36, 49, 51]: Average Endpoint Error (EPE3D), ACC3DS, ACC3DR, and Outliers." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 261, + 424, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 261, + 424, + 274 + ], + "spans": [ + { + "bbox": [ + 305, + 261, + 424, + 274 + ], + "type": "text", + "content": "4.3.Quantitative Results" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 280, + 545, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 280, + 545, + 483 + ], + "spans": [ + { + "bbox": [ + 304, + 280, + 545, + 483 + ], + "type": "text", + "content": "The experimental results are presented in Table 1. We list the best-performing optimized [5, 12, 23, 35], self-supervised [17, 32], and supervised [18, 28, 49] models in the table. Our method achieves excellent performance on all datasets [2, 3, 10] and metrics. Particularly, compared to the baselines [51], there is an order of magnitude reduction in EPE3D on most datasets. The proposed auto-labelling method generates effective scene flow labels, perfectly simulating the rigid motion of various objects in the real world. The designed global-local data augmentation further expands the 3D scene flow labels. As a result, our method significantly outperforms other methods. We have also applied this plug-and-play auto-labelling framework for 3D scene flow (3DSFlabelling) to three existing models, as demonstrated in Table 2. The proposed method significantly enhances the accuracy of 3D scene flow estimation in these models [4, 36, 51]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 483, + 545, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 545, + 627 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 545, + 627 + ], + "type": "text", + "content": "Moreover, many existing works utilize a large number of model parameters [12, 47, 49] or adopt optimization methods [23, 24, 41] during testing for a more accurate estimation of 3D scene flow. These methods are highly time-consuming, and cannot ensure accuracy when reducing model parameters. Our proposed 3DSFlabelling effectively addresses this challenge. In Table 3, by using the small-parameter model FLOT (iter=1) [36] combined with our auto-labelling framework, we surpass all current supervised, unsupervised, weakly supervised, and optimized methods. This strongly validates the effectiveness of generating real-world labels in solving the challenges." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 635, + 389, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 635, + 389, + 647 + ], + "spans": [ + { + "bbox": [ + 306, + 635, + 389, + 647 + ], + "type": "text", + "content": "4.4. Visualization" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": "Fig. 5 visualizes the precision of our method and others on two datasets [3, 31]. FLOT [36], with its mathematically optimal transport approach to matching point clouds, exhibits superior generalization. MSBRN [4], leveraging a multi-scale bidirectional recurrent network, robustly esti" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15179" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 80, + 165, + 144 + ], + "blocks": [ + { + "bbox": [ + 70, + 71, + 150, + 79 + ], + "lines": [ + { + "bbox": [ + 70, + 71, + 150, + 79 + ], + "spans": [ + { + "bbox": [ + 70, + 71, + 150, + 79 + ], + "type": "text", + "content": "Scene 250 in nuScenes" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 80, + 165, + 144 + ], + "lines": [ + { + "bbox": [ + 51, + 80, + 165, + 144 + ], + "spans": [ + { + "bbox": [ + 51, + 80, + 165, + 144 + ], + "type": "image", + "image_path": "169a4c7b0bf781bd620e7d9979149217888a844b7c7250aa8475f9b5c354eb68.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 166, + 80, + 279, + 144 + ], + "blocks": [ + { + "bbox": [ + 182, + 71, + 261, + 79 + ], + "lines": [ + { + "bbox": [ + 182, + 71, + 261, + 79 + ], + "spans": [ + { + "bbox": [ + 182, + 71, + 261, + 79 + ], + "type": "text", + "content": "Scene 115 in nuScenes" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 166, + 80, + 279, + 144 + ], + "lines": [ + { + "bbox": [ + 166, + 80, + 279, + 144 + ], + "spans": [ + { + "bbox": [ + 166, + 80, + 279, + 144 + ], + "type": "image", + "image_path": "eaccdd806351786224e439bf002e1a3d6bbb47a2e2fe2fe7682b110e55a3e79e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 51, + 144, + 165, + 209 + ], + "blocks": [ + { + "bbox": [ + 51, + 144, + 165, + 209 + ], + "lines": [ + { + "bbox": [ + 51, + 144, + 165, + 209 + ], + "spans": [ + { + "bbox": [ + 51, + 144, + 165, + 209 + ], + "type": "image", + "image_path": "658bf3882567c680da7874e22ffa406d61c12ea540e8f220eb43d82f3bbd98c9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 166, + 144, + 279, + 209 + ], + "blocks": [ + { + "bbox": [ + 166, + 144, + 279, + 209 + ], + "lines": [ + { + "bbox": [ + 166, + 144, + 279, + 209 + ], + "spans": [ + { + "bbox": [ + 166, + 144, + 279, + 209 + ], + "type": "image", + "image_path": "a4ade8ce38739b9be661b02065e24a52a5f13b03fbd38814c17e0ae916f9c465.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 51, + 209, + 164, + 273 + ], + "blocks": [ + { + "bbox": [ + 51, + 209, + 164, + 273 + ], + "lines": [ + { + "bbox": [ + 51, + 209, + 164, + 273 + ], + "spans": [ + { + "bbox": [ + 51, + 209, + 164, + 273 + ], + "type": "image", + "image_path": "a1c6d82a63eaac9088c503f523d3ea567c058a5441ff66e354645956101708d8.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 166, + 209, + 279, + 273 + ], + "blocks": [ + { + "bbox": [ + 166, + 209, + 279, + 273 + ], + "lines": [ + { + "bbox": [ + 166, + 209, + 279, + 273 + ], + "spans": [ + { + "bbox": [ + 166, + 209, + 279, + 273 + ], + "type": "image", + "image_path": "126431da375604bd3609427392a8d3ab019499e3cb3a54d10618186df34cf56f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 51, + 274, + 164, + 351 + ], + "blocks": [ + { + "bbox": [ + 51, + 274, + 164, + 351 + ], + "lines": [ + { + "bbox": [ + 51, + 274, + 164, + 351 + ], + "spans": [ + { + "bbox": [ + 51, + 274, + 164, + 351 + ], + "type": "image", + "image_path": "fbfcb4a3c3a1852573cb190ac3b77585c3d495ec5990fda6b0e0a0f32c4f85b0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 353, + 287, + 419 + ], + "lines": [ + { + "bbox": [ + 46, + 353, + 287, + 419 + ], + "spans": [ + { + "bbox": [ + 46, + 353, + 287, + 419 + ], + "type": "text", + "content": "Figure 6. Error visualizing of our method (GMSF+3DSFlabelling) and baselines on the nuScenes dataset [2]. Using 3D EndPoint Error (EPE3D) as the metric, we categorize the error into six levels. Combining GMSF [51] with our proposed 3DSFlabelling, we manage to keep the EPE3D for most points within 0.02 meters, clearly outperforming other methods largely." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 166, + 274, + 279, + 351 + ], + "blocks": [ + { + "bbox": [ + 166, + 274, + 279, + 351 + ], + "lines": [ + { + "bbox": [ + 166, + 274, + 279, + 351 + ], + "spans": [ + { + "bbox": [ + 166, + 274, + 279, + 351 + ], + "type": "image", + "image_path": "5b9c81c31aac9e0aa0bf244ccb08bda00a3e45e08dd8f382166efe07078dcdf6.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 48, + 479, + 291, + 559 + ], + "blocks": [ + { + "bbox": [ + 46, + 431, + 287, + 475 + ], + "lines": [ + { + "bbox": [ + 46, + 431, + 287, + 475 + ], + "spans": [ + { + "bbox": [ + 46, + 431, + 287, + 475 + ], + "type": "text", + "content": "Table 4. Generalization comparison experiment. \"A\", \"N\", and \"K\" represent the Argoverse [3], nuScenes [2], and KITTI [10] datasets. " + }, + { + "bbox": [ + 46, + 431, + 287, + 475 + ], + "type": "inline_equation", + "content": "\\langle \\sim \\rangle" + }, + { + "bbox": [ + 46, + 431, + 287, + 475 + ], + "type": "text", + "content": " representing a model trained on the dataset on the left and directly evaluated on another new dataset on the right." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 479, + 291, + 559 + ], + "lines": [ + { + "bbox": [ + 48, + 479, + 291, + 559 + ], + "spans": [ + { + "bbox": [ + 48, + 479, + 291, + 559 + ], + "type": "table", + "html": "
MethodSup.A~→NN~→AA~→KN~→K
EPE3DAcc3DSEPE3DAcc3DSEPE3DAcc3DSEPE3DAcc3DS
PointPWC [49]Self0.59110.08440.70430.02810.86320.01190.93070.0027
RigidFlow [12]Self0.11350.34450.39910.01520.36450.21180.50420.0141
MSBRN [4]Full0.53090.00550.37610.00980.60360.00560.49260.0081
GMSF [51]Full0.03340.90370.30780.12780.04420.87640.05740.8135
OursSelf0.01150.96930.02640.91920.04140.90200.02080.9595
", + "image_path": "401fffce353f13df6e77680d13782e1f907a6cdc9944addc6538afb65337e2bd.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 581, + 286, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 581, + 286, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 581, + 286, + 700 + ], + "type": "text", + "content": "mates 3D scene flow on KITTI. GMSF [51] utilizes a transformer architecture for powerful fitting learning, but it lacks cross-domain generalization. The proposed method consistently shows better alignment between predicted and target point clouds across all scenes. Additionally, a visualization of the scene flow error on the nuScenes dataset is presented in Fig. 6. In two randomly selected test scenes, our method keeps the scene flow EPE3D mostly within " + }, + { + "bbox": [ + 46, + 581, + 286, + 700 + ], + "type": "inline_equation", + "content": "0.02m" + }, + { + "bbox": [ + 46, + 581, + 286, + 700 + ], + "type": "text", + "content": ", clearly outperforming other baselines. More visual comparisons will be presented in the supplementary material." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 59, + 701, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 286, + 713 + ], + "type": "text", + "content": "Table 4 provides quantitative results, demonstrating the" + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 308, + 118, + 544, + 216 + ], + "blocks": [ + { + "bbox": [ + 305, + 71, + 545, + 114 + ], + "lines": [ + { + "bbox": [ + 305, + 71, + 545, + 114 + ], + "spans": [ + { + "bbox": [ + 305, + 71, + 545, + 114 + ], + "type": "text", + "content": "Table 5. Ablation study of 3D scene flow data augmentation. \"No Aug\" and \"Trad. Aug\" represents no data augmentation and traditional data augmentation [49], respectively. Our data augmentation method has a very positive impact on the model." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 118, + 544, + 216 + ], + "lines": [ + { + "bbox": [ + 308, + 118, + 544, + 216 + ], + "spans": [ + { + "bbox": [ + 308, + 118, + 544, + 216 + ], + "type": "table", + "html": "
ModelData Augmentation MethodsKITTIArgoversenuScenes
No Aug Trad. AugOur AugEPE3D ACC3DSEPE3D ACC3DSEPE3D ACC3DSEPE3D ACC3DS
Ours (FLOT)--0.06010.72910.04920.80150.73640.6642
--0.05400.76220.04300.86790.06100.7417
--0.01890.96660.01070.97110.05540.7601
Ours (MSBRN)--0.01310.97810.01800.94110.07970.8510
--0.01290.97900.01770.94270.07930.8547
--0.01230.97970.01500.94820.02350.9413
Ours (GMSF)--0.01030.99010.01390.96370.02130.9468
--0.00810.99180.01370.96630.02120.9473
--0.00780.99240.00930.97800.01850.9534
", + "image_path": "671a56c460220d6f874ba114b3b842b3aa0d9617a1be6e8a7b5ec159424c0537.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 233, + 545, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 233, + 545, + 341 + ], + "spans": [ + { + "bbox": [ + 304, + 233, + 545, + 341 + ], + "type": "text", + "content": "generalization of our 3DSFlabelling combined with the existing method (GMSF [51]) on new datasets. For instance, we train a model on the Argoverse dataset and directly evaluate it on the nuScenes dataset. These two datasets belong to different domains, posing a domain generalization problem. The results in Table 4 indicate that our framework performs exceptionally well on the new dataset, consistently achieving an EPE3D of less than " + }, + { + "bbox": [ + 304, + 233, + 545, + 341 + ], + "type": "inline_equation", + "content": "5cm" + }, + { + "bbox": [ + 304, + 233, + 545, + 341 + ], + "type": "text", + "content": ", and even reaching an average endpoint error of less than " + }, + { + "bbox": [ + 304, + 233, + 545, + 341 + ], + "type": "inline_equation", + "content": "2cm" + }, + { + "bbox": [ + 304, + 233, + 545, + 341 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 346, + 400, + 360 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 346, + 400, + 360 + ], + "spans": [ + { + "bbox": [ + 305, + 346, + 400, + 360 + ], + "type": "text", + "content": "4.5. Ablation Study" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 365, + 545, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 365, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 304, + 365, + 545, + 497 + ], + "type": "text", + "content": "This section explores the advantages of global-local data augmentation. In Table 5, we compare existing 3D scene flow data augmentation [49] with our proposed global-local data augmentation method. Our augmentation strategy shows significant enhancement in all evaluation metrics. This is attributed to the effective simulation of various motion patterns in autonomous driving by global-local data augmentation. The introduction of various motion transformations excellently utilizes the limited training data to extend a variety of 3D scene flow styles. More ablation studies are referring to the supplement material." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 506, + 379, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 506, + 379, + 519 + ], + "spans": [ + { + "bbox": [ + 306, + 506, + 379, + 519 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 526, + 545, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 526, + 545, + 670 + ], + "spans": [ + { + "bbox": [ + 304, + 526, + 545, + 670 + ], + "type": "text", + "content": "We package 3D point clouds into boxes with different motion attributes. By optimizing the motion parameters for each box and warping the source point cloud into the target point cloud, we create pseudo 3D scene flow labels. We also design a global-local data augmentation method, introducing various scene motion patterns and significantly increasing the diversity and quantity of 3D scene flow labels. Tests on multiple real-world datasets show that our 3D scene flow auto-labelling significantly enhances the performance of existing models. Importantly, this approach eliminates the need for 3D scene flow estimation models to depend on manually annotated 3D scene flow labels." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 674, + 421, + 688 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 674, + 421, + 688 + ], + "spans": [ + { + "bbox": [ + 306, + 674, + 421, + 688 + ], + "type": "text", + "content": "6. Acknowledgements" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 690, + 545, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 690, + 545, + 703 + ], + "spans": [ + { + "bbox": [ + 317, + 690, + 545, + 703 + ], + "type": "text", + "content": "This work was supported by PhiGent Robotics." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "15180" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "text", + "content": "[1] Jens Behley, Martin Garbade, Andres Milioto, Jan Quenzel, Sven Behnke, Cyril Stachniss, and Jurgen Gall. Semantickitti: A dataset for semantic scene understanding of lidar sequences. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9297-9307, 2019. 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 158, + 288, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 158, + 288, + 223 + ], + "spans": [ + { + "bbox": [ + 53, + 158, + 288, + 223 + ], + "type": "text", + "content": "[2] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 224, + 288, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 224, + 288, + 290 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 288, + 290 + ], + "type": "text", + "content": "[3] Ming-Fang Chang, John Lambert, Patsorn Sangkloy, Jagjeet Singh, Slawomir Bak, Andrew Hartnett, De Wang, Peter Carr, Simon Lucey, Deva Ramanan, et al. Argoverse: 3d tracking and forecasting with rich maps. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8748-8757, 2019. 1, 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 291, + 287, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 291, + 287, + 345 + ], + "spans": [ + { + "bbox": [ + 53, + 291, + 287, + 345 + ], + "type": "text", + "content": "[4] Wencan Cheng and Jong Hwan Ko. Multi-scale bidirectional recurrent network with hybrid correlation for point cloud based scene flow estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10041-10050, 2023. 1, 2, 6, 7, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 346, + 288, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 346, + 288, + 389 + ], + "spans": [ + { + "bbox": [ + 53, + 346, + 288, + 389 + ], + "type": "text", + "content": "[5] David Deng and Avideh Zakhor. Rsf: Optimizing rigid scene flow from 3d point clouds without labels. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1277-1286, 2023. 2, 3, 4, 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "spans": [ + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "type": "text", + "content": "[6] Fangqiang Ding, Zhijun Pan, Yimin Deng, Jianning Deng, and Chris Xiaoxuan Lu. Self-supervised scene flow estimation with 4-d automotive radar. IEEE Robotics and Automation Letters, 7(3):8233-8240, 2022. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 436, + 287, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 436, + 287, + 490 + ], + "spans": [ + { + "bbox": [ + 53, + 436, + 287, + 490 + ], + "type": "text", + "content": "[7] Guanting Dong, Yueyi Zhang, Hanlin Li, Xiaoyan Sun, and Zhiwei Xiong. Exploiting rigidity constraints for lidar scene flow estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12776-12785, 2022. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 491, + 287, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 491, + 287, + 556 + ], + "spans": [ + { + "bbox": [ + 53, + 491, + 287, + 556 + ], + "type": "text", + "content": "[8] Emeç Erçelik, Ekim Yurtsever, Mingyu Liu, Zhijie Yang, Hanzhen Zhang, Pinar Topçam, Maximilian Listl, Yilmaz Kaan Cayli, and Alois Knoll. 3d object detection with a self-supervised lidar scene flow backbone. In European Conference on Computer Vision, pages 247-265. Springer, 2022. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 558, + 287, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 558, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 53, + 558, + 287, + 601 + ], + "type": "text", + "content": "[9] Jingyun Fu, Zhiyu Xiang, Chengyu Qiao, and Tingming Bai. Pt-flownet: Scene flow estimation on point clouds with point transformer. IEEE Robotics and Automation Letters, 8(5): 2566-2573, 2023. 1, 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 602, + 287, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 287, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 287, + 645 + ], + "type": "text", + "content": "[10] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In 2012 IEEE conference on computer vision and pattern recognition, pages 3354-3361. IEEE, 2012. 6, 7, 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "text", + "content": "[11] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. The International Journal of Robotics Research, 32(11):1231-1237, 2013. 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "text", + "content": "[12] Zan Gojcic, Or Litany, Andreas Wieser, Leonidas J Guibas, and Tolga Birdal. Weakly supervised learning of rigid 3d" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 714 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "text", + "content": "scene flow. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5692-5703, 2021. 3, 6, 7, 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 107, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 545, + 161 + ], + "type": "text", + "content": "[13] Xiuye Gu, Yijie Wang, Chongruo Wu, Yong Jae Lee, and Panqu Wang. Hplflownet: Hierarchical permutohedral lattice flownet for scene flow estimation on large-scale point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3254-3263, 2019. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 162, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 162, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 308, + 162, + 545, + 205 + ], + "type": "text", + "content": "[14] Shengyu Huang, Zan Gojcic, Jiahui Huang, Andreas Wieser, and Konrad Schindler. Dynamic 3d scene analysis by point cloud accumulation. In European Conference on Computer Vision, pages 674-690. Springer, 2022. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 206, + 545, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 206, + 545, + 270 + ], + "spans": [ + { + "bbox": [ + 308, + 206, + 545, + 270 + ], + "type": "text", + "content": "[15] Hafsa Iqbal, Abdulla Al-Kaff, Pablo Marin, Lucio Marcenaro, David Martin Gomez, and Carlo Regazzoni. Detection of abnormal motion by estimating scene flows of point clouds for autonomous driving. In 2021 IEEE International Intelligent Transportation Systems Conference (ITSC), pages 2788-2793. IEEE, 2021. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 272, + 545, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 272, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 308, + 272, + 545, + 316 + ], + "type": "text", + "content": "[16] Chaokang Jiang, Guangming Wang, Jinxing Wu, Yanzi Miao, and Hesheng Wang. Ffpa-net: Efficient feature fusion with projection awareness for 3d object detection. arXiv preprint arXiv:2209.07419, 2022. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 316, + 545, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 316, + 545, + 360 + ], + "spans": [ + { + "bbox": [ + 308, + 316, + 545, + 360 + ], + "type": "text", + "content": "[17] Chaokang Jiang, Guangming Wang, Yanzi Miao, and Hesheng Wang. 3-d scene flow estimation on pseudo-lidar: Bridging the gap on estimating point motion. IEEE Transactions on Industrial Informatics, 19(6):7346-7354, 2023. 6, 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 361, + 545, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 361, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 308, + 361, + 545, + 425 + ], + "type": "text", + "content": "[18] Zhao Jin, Yinjie Lei, Naveed Akhtar, Haifeng Li, and Munawar Hayat. Deformation and correspondence aware unsupervised synthetic-to-real scene flow estimation for point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7233-7243, 2022. 1, 6, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 426, + 545, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 426, + 545, + 480 + ], + "spans": [ + { + "bbox": [ + 308, + 426, + 545, + 480 + ], + "type": "text", + "content": "[19] Yair Kittenplon, Yonina C Eldar, and Dan Raviv. Flowstep3d: Model unrolling for self-supervised scene flow estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4114-4123, 2021. 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 482, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 482, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 308, + 482, + 545, + 536 + ], + "type": "text", + "content": "[20] Itai Lang, Dror Aiger, Forrester Cole, Shai Avidan, and Michael Rubinstein. Scoop: Self-supervised correspondence and optimization-based scene flow. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5281-5290, 2023. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 537, + 545, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 545, + 581 + ], + "type": "text", + "content": "[21] Bing Li, Cheng Zheng, Silvio Giancola, and Bernard Ghanem. Sctn: Sparse convolution-transformer network for scene flow estimation. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1254–1262, 2022. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 582, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 582, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 308, + 582, + 545, + 635 + ], + "type": "text", + "content": "[22] Ruibo Li, Chi Zhang, Guosheng Lin, Zhe Wang, and Chunhua Shen. Rigidflow: Self-supervised scene flow learning on point clouds by local rigidity prior. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16959-16968, 2022. 2, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 636, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 636, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 308, + 636, + 545, + 669 + ], + "type": "text", + "content": "[23] Xueqian Li, Jhony Kaesemodel Pontes, and Simon Lucey. Neural scene flow prior. Advances in Neural Information Processing Systems, 34:7838-7851, 2021. 3, 6, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 670, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 714 + ], + "type": "text", + "content": "[24] Xueqian Li, Jianqiao Zheng, Francesco Ferroni, Jhony Kaesemodel Pontes, and Simon Lucey. Fast neural scene flow. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 9878-9890, 2023. 1, 3, 7" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15181" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[25] Jiuming Liu, Guangming Wang, Chaokang Jiang, Zhe Liu, and Hesheng Wang. Translo: A window-based masked point transformer framework for large-scale lidar odometry. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1683-1691, 2023. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 288, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 288, + 194 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 288, + 194 + ], + "type": "text", + "content": "[26] Jiuming Liu, Guangming Wang, Zhe Liu, Chaokang Jiang, Marc Pollefeys, and Hesheng Wang. Regformer: an efficient projection-aware transformer network for large-scale point cloud registration. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8451-8460, 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 197, + 288, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 197, + 288, + 251 + ], + "spans": [ + { + "bbox": [ + 48, + 197, + 288, + 251 + ], + "type": "text", + "content": "[27] Jiuming Liu, Guangming Wang, Weicai Ye, Chaokang Jiang, Jinru Han, Zhe Liu, Guofeng Zhang, Dalong Du, and Hesheng Wang. Difflow3d: Toward robust uncertainty-aware scene flow estimation with diffusion model. arXiv preprint arXiv:2311.17456, 2023. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 253, + 287, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 253, + 287, + 297 + ], + "spans": [ + { + "bbox": [ + 48, + 253, + 287, + 297 + ], + "type": "text", + "content": "[28] Xingyu Liu, Charles R Qi, and Leonidas J Guibas. Flownet3d: Learning scene flow in 3d point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 529-537, 2019. 2, 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 298, + 287, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 298, + 287, + 363 + ], + "spans": [ + { + "bbox": [ + 48, + 298, + 287, + 363 + ], + "type": "text", + "content": "[29] Nikolaus Mayer, Eddy Ilg, Philip Hausser, Philipp Fischer, Daniel Cremers, Alexey Dosovitskiy, and Thomas Brox. A large dataset to train convolutional networks for disparity, optical flow, and scene flow estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4040-4048, 2016. 1, 2, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 365, + 288, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 288, + 408 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 288, + 408 + ], + "type": "text", + "content": "[30] Moritz Menze, Christian Heipke, and Andreas Geiger. Joint 3d estimation of vehicles and scene flow. ISPRS annals of the photogrammetry, remote sensing and spatial information sciences, 2:427-434, 2015. 1, 2, 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 411, + 287, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 411, + 287, + 442 + ], + "spans": [ + { + "bbox": [ + 48, + 411, + 287, + 442 + ], + "type": "text", + "content": "[31] Moritz Menze, Christian Heipke, and Andreas Geiger. Object scene flow. ISPRS Journal of Photogrammetry and Remote Sensing, 140:60-76, 2018. 1, 2, 6, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 445, + 287, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 445, + 287, + 488 + ], + "spans": [ + { + "bbox": [ + 48, + 445, + 287, + 488 + ], + "type": "text", + "content": "[32] Himangi Mittal, Brian Okorn, and David Held. Just go with the flow: Self-supervised scene flow estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11177-11185, 2020. 2, 3, 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 490, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 490, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 490, + 287, + 544 + ], + "type": "text", + "content": "[33] Mahyar Najibi, Jingwei Ji, Yin Zhou, Charles R Qi, Xinchen Yan, Scott Ettinger, and Dragomir Anguelov. Motion inspired unsupervised perception and prediction in autonomous driving. In European Conference on Computer Vision, pages 424-443. Springer, 2022. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 546, + 287, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 546, + 287, + 611 + ], + "spans": [ + { + "bbox": [ + 48, + 546, + 287, + 611 + ], + "type": "text", + "content": "[34] Chensheng Peng, Guangming Wang, Xian Wan Lo, Xinrui Wu, Chenfeng Xu, Masayoshi Tomizuka, Wei Zhan, and Hesheng Wang. Delflow: Dense efficient learning of scene flow for large-scale point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16901-16910, 2023. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 613, + 287, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 656 + ], + "type": "text", + "content": "[35] Jhony Kaesemodel Pontes, James Hays, and Simon Lucey. Scene flow from point clouds with or without learning. In 2020 International Conference on 3D Vision (3DV), pages 261-270, 2020. 3, 5, 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "text", + "content": "[36] Gilles Puy, Alexandre Boulch, and Renaud Marlet. Flot: Scene flow on point clouds guided by optimal transport. In ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part XXVIII 16, pages 527–544, 2020. 1, 2, 6, 7" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "type": "text", + "content": "[37] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 129, + 545, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 171 + ], + "type": "text", + "content": "[38] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 173, + 545, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 173, + 545, + 216 + ], + "spans": [ + { + "bbox": [ + 307, + 173, + 545, + 216 + ], + "type": "text", + "content": "[39] Yaqi Shen, Le Hui, Jin Xie, and Jian Yang. Self-supervised 3d scene flow estimation guided by superpoints. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5271-5280, 2023. 1, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 217, + 545, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 217, + 545, + 259 + ], + "spans": [ + { + "bbox": [ + 307, + 217, + 545, + 259 + ], + "type": "text", + "content": "[40] Ivan Tishchenko, Sandro Lombardi, Martin R Oswald, and Marc Pollefeys. Self-supervised learning of non-rigid residual flow and ego-motion. In 2020 international conference on 3D vision (3DV), pages 150-159. IEEE, 2020. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 261, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 261, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 307, + 261, + 545, + 293 + ], + "type": "text", + "content": "[41] Kavisha Vidanapathirana, Shin-Fang Chng, Xueqian Li, and Simon Lucey. Multi-body neural scene flow. arXiv preprint arXiv:2310.10301, 2023. 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 294, + 545, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 294, + 545, + 337 + ], + "spans": [ + { + "bbox": [ + 307, + 294, + 545, + 337 + ], + "type": "text", + "content": "[42] Guangming Wang, Xinrui Wu, Zhe Liu, and Hesheng Wang. Hierarchical attention learning of scene flow in 3d point clouds. IEEE Transactions on Image Processing, 30:5168-5181, 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 338, + 545, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 338, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 307, + 338, + 545, + 381 + ], + "type": "text", + "content": "[43] Guangming Wang, Yunzhe Hu, Zhe Liu, Yiyang Zhou, Masayoshi Tomizuka, Wei Zhan, and Hesheng Wang. What matters for 3d scene flow network. In European Conference on Computer Vision, pages 38-55. Springer, 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 383, + 545, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 383, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 307, + 383, + 545, + 426 + ], + "type": "text", + "content": "[44] Guangming Wang, Chaokang Jiang, Zehang Shen, Yanzi Miao, and Hesheng Wang. Sfgan: Unsupervised generative adversarial learning of 3d scene flow from the 3d scene self. Advanced Intelligent Systems, 4(4):2100197, 2022. 2, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 427, + 545, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 427, + 545, + 469 + ], + "spans": [ + { + "bbox": [ + 307, + 427, + 545, + 469 + ], + "type": "text", + "content": "[45] Yun Wang, Cheng Chi, and Xin Yang. Exploiting implicit rigidity constraints via weight-sharing aggregation for scene flow estimation from point clouds. arXiv preprint arXiv:2303.02454, 2023. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 472, + 545, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 472, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 307, + 472, + 545, + 514 + ], + "type": "text", + "content": "[46] Ziyi Wang, Yi Wei, Yongming Rao, Jie Zhou, and Jiwen Lu. 3d point-voxel correlation fields for scene flow estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2023. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 516, + 545, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 516, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 307, + 516, + 545, + 568 + ], + "type": "text", + "content": "[47] Yi Wei, Ziyi Wang, Yongming Rao, Jiwen Lu, and Jie Zhou. Pv-raft: Point-voxel correlation fields for scene flow estimation of point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6954–6963, 2021. 1, 2, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 571, + 545, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 571, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 307, + 571, + 545, + 624 + ], + "type": "text", + "content": "[48] Pengxiang Wu, Siheng Chen, and Dimitris N Metaxas. Motionnet: Joint perception and motion prediction for autonomous driving based on bird's eye view maps. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11385-11395, 2020. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 625, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 625, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 307, + 625, + 545, + 669 + ], + "type": "text", + "content": "[49] Wenxuan Wu, Zhi Yuan Wang, Zhuwen Li, Wei Liu, and Li Fuxin. Pointpwc-net: Cost volume on point clouds for (self-) supervised scene flow estimation. In European Conference on Computer Vision, pages 88-107, 2020. 2, 3, 4, 6, 7, 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "type": "text", + "content": "[50] Yi Zhang, Yuwen Ye, Zhiyu Xiang, and Jiaqi Gu. Sdp-net: Scene flow based real-time object detection and prediction from sequential 3d point clouds. In Proceedings of the Asian Conference on Computer Vision, 2020. 1" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "15182" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 150 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[51] Yushan Zhang, Johan Edstedt, Bastian Wandt, Per-Erik Forssén, Maria Magnusson, and Michael Felsberg. Gmsf: Global matching scene flow. arXiv preprint arXiv:2305.17432, 2023. 1, 2, 6, 7, 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 150 + ], + "type": "text", + "content": "[52] Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Open3d: A modern library for 3d data processing. arXiv preprint arXiv:1801.09847, 2018. 3" + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "15183" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/2c81e75a-1abf-4d0b-aa9d-80d61a8cb264_content_list.json b/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/2c81e75a-1abf-4d0b-aa9d-80d61a8cb264_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..2f91eee80437c4cf065a6aa21ee01915cb9241ab --- /dev/null +++ b/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/2c81e75a-1abf-4d0b-aa9d-80d61a8cb264_content_list.json @@ -0,0 +1,1766 @@ +[ + { + "type": "text", + "text": "3DToonify: Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images", + "text_level": 1, + "bbox": [ + 106, + 128, + 864, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yifang Men $^{1*}$ , Hanxi Liu $^{2*}$ , Yuan Yao $^{1}$ , Miaomiao Cui $^{1}$ , Xuansong Xie $^{1}$ , Zhouhui Lian $^{2\\dagger}$ $^{1}$ Institute for Intelligent Computing, Alibaba Group", + "bbox": [ + 143, + 202, + 833, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Wangxuan Institute of Computer Technology, Peking University, China", + "bbox": [ + 199, + 239, + 769, + 257 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/da767aa80c93a5565c7fc9d733d21dcc827b6b033124b6ab68f1e8650a012ff7.jpg", + "image_caption": [ + "(a) Input video" + ], + "image_footnote": [], + "bbox": [ + 81, + 276, + 189, + 523 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/fd88199e8286ebe9ab4bc36991e5c5fd4ce50af1e43504a13b6561fdb0890581.jpg", + "image_caption": [ + "(b) 3D style adaption" + ], + "image_footnote": [], + "bbox": [ + 191, + 276, + 449, + 523 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/923548eb85c654ebbe7c547e03b6e51dfadc1de6a918a579005c6a51189f3ac0.jpg", + "image_caption": [ + "(c) View-consistent renderings in different styles", + "Figure 1. Given a set of RGB portrait images captured by a monocular camera, our method can learn a photorealistic representation in neural implicit fields, and transfer it to artistic ones with underlying 3D structures changed. Multiple stylized results can be rendered from arbitrary novel viewpoints with consistent geometry and texture." + ], + "image_footnote": [], + "bbox": [ + 452, + 276, + 887, + 522 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 602, + 313, + 617 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Visual content creation has aroused a surge of interest given its applications in mobile photography and AR/VR. Portrait style transfer and 3D recovery from monocular images as two representative tasks have so far evolved independently. In this paper, we make a connection between the two, and tackle the challenging task of 3D portrait stylization - modeling high-fidelity 3D stylized avatars from captured 2D portrait images. However, naively combining the techniques from the two isolated areas may suffer from either inadequate stylization or absence of 3D assets. To this end, we propose 3DToonify, a new framework that introduces a progressive training scheme to achieve 3D style adaption on spatial neural representation (SNR). SNR is constructed with implicit fields and they are dy", + "bbox": [ + 73, + 628, + 470, + 840 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "namically optimized by the progressive training scheme, which consists of three stages: guided prior learning, deformable geometry adaption and explicit texture adaption. In this way, stylized geometry and texture are learned in SNR in an explicit and structured way with only a single stylized exemplar needed. Moreover, our method obtains style-adaptive underlying structures (i.e., deformable geometry and exaggerated texture) and view-consistent stylized avatar rendering from arbitrary novel viewpoints. Both qualitative and quantitative experiments have been conducted to demonstrate the effectiveness and superiority of our method for automatically generating exemplar-guided 3D stylized avatars.", + "bbox": [ + 500, + 603, + 893, + 799 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 804, + 632, + 819 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Portrait style transfer [30, 53] aims to transform real face images into artistic 2D portraits in desired visual styles while maintaining personal identity. However, given a sequence of portrait images captured from different viewpoints, existing portrait style transfer methods are typically", + "bbox": [ + 496, + 825, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Denotes equal contribution.", + "bbox": [ + 94, + 851, + 251, + 864 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding author. E-mail: lianzhouhui@pku.edu.cn.", + "bbox": [ + 96, + 864, + 401, + 876 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "This work was partially supported by National Natural Science Foundation of China (Grant No.: 62372015).", + "bbox": [ + 78, + 877, + 468, + 901 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "10127", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "only effective for limited forward-facing photos and fails to maintain view consistency in 3D space. Essentially, existing methods only learn a style transfer between 2D features, and have no sense to 3D representations built on real-world objects. What if we can construct and stylize underlying 3D structures from captured 2D portrait images? See Figure 1 for an example. When stylized with 3D structures (i.e., geometry and texture), we can easily render view-free stylized portraits with 3D consistency and robust artistic results. This capacity will extremely facilitate the 3D content creation process which often requires large amounts of time and special expertise, and make it accessible to a variety of novice users. As shown in Figure 1, this paper aims to address the challenging task of generating high-fidelity 3D avatar from a portrait video by following the style of a given exemplar image. We refer this task as 3D portrait stylization – a marriage between portrait style transfer and 3D recovery from monocular images.", + "bbox": [ + 76, + 90, + 472, + 363 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The naive solution to the task mentioned above is directly combining existing methods of 2D portrait stylization with 3D reconstruction, i.e., learning 3D representations such as voxels [36], primitives [26] or occupancy fields [31] directly from stylized portrait images. However, it is less effective due to the biased image manifold built by 2D portrait stylization, making the representation learning be ill-posed with highly-biased visible views. Recently, neural radiance field (NeRF) [4, 18, 32, 33, 40, 49] has made great progress due to its advanced ability to achieve photo-realistic novel view synthesis with sparse input views. Some previous attempts [8, 34, 35, 48, 58] also combine NeRF with image-based [11] or text-driven [42] neural style transfer to generate novel views of stylized 3D scenes or avatars. Recently, a series of new works have started to focus on 3D stylized avatar generation. Some methods [7, 15, 23, 25, 27, 41, 50, 56] exploit the great potential of 2D text-to-image diffusion models [44-46] to generate 3D cartoonish avatars according to a given text prompt. Others [2, 51, 55, 57] build on 3D generative models [6, 38] to bridge the gap between the real space and the target domain, and generate avatars with certain styles under a sampled latent vector. However, all these methods either can not achieve high-fidelity personalized 3D portrait stylization with user-specific identities and styles, or fail to generate fine-grained full-head avatars that support view-consistent rendering from arbitrary viewpoints.", + "bbox": [ + 76, + 367, + 472, + 776 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the aforementioned challenges, we draw inspiration from domain adaption on 2D features [10, 30], and introduce a progressive training scheme to achieve 3D style adaption on spatial neural representation (SNR). The key insights of this design are twofold. First, it is hard to directly learn an accurate 3D representation field from stylized portraits with few-shot inconsistent 2D views, but easier to learn a photorealistic field as a prior and adapt it to", + "bbox": [ + 75, + 779, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "target style fields with transfer learning. Second, learning spatial representation with disentangled surface and texture allows for flexible geometry deformation and texture adaption, leading to more diverse and fine-grained style editing. To this end, we construct SNR with neural implicit fields and dynamically optimize its subfields with a progressive training scheme. This scheme includes the following three stages: prior learning to obtain an accurate human reconstruction, geometry adaption to produce inherently exaggerated deformation, and texture adaption to realize artistic albedo decomposition. Eventually, the 2D portraits are converted to stylized SNR, and explicit 3D assets can be easily extracted with disentangled 3D structures. In summary, our contributions are threefold:", + "bbox": [ + 496, + 90, + 893, + 301 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We present a new method that adopts neural implicit fields to address the challenging task of generating high-fidelity 3D avatar from a portrait video by following the style of a given exemplar image. Stylized results can be rendered under arbitrary novel viewpoints with consistent geometry and texture.", + "- We introduce an elegant network of spatial neural representation to model common attributes over the 3D space. This design allows for disentangled geometry and texture adaption, achieving more flexible and fine-grained 3D stylization results.", + "- We propose a novel progressive training scheme of 3D style adaption. Cooperated with the delicately-designed spatial neural network, it enables learning realistic 3D cartoon avatars with deformed geometry and stylized texture." + ], + "bbox": [ + 500, + 303, + 890, + 544 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 554, + 640, + 570 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2D Portrait Stylization. In the deep neural network based portrait stylization, there are two types of approaches, i.e., image-to-image translation and StyleGAN based translation. Methods [21, 24] conduct face-to-cartoon translation by adopting the framework of cycleGAN [60]. Nevertheless, training such methods requires extensive data and may still generate unstable results. StyleGAN [16, 17] has become a popular alternative for portrait stylization due to its strong capacity for latent inversion and style control. [30] proposes a calibration framework to adapt the original training distribution for fine-grained translation. [53] leverages the mid- and high-resolution layers of StyleGAN to render high-quality artistic portraits based on the multi-scale content features to better preserve details. Although high-quality results have been shown, these methods cannot handle extreme face angle while maintaining cross-view consistency.", + "bbox": [ + 496, + 580, + 893, + 838 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural Implicit Fields. Recently, neural implicit functions have emerged as an effective representation to model conventional 3D scenes due to its continuous nature. This representation has been successfully adopted to shape model-", + "bbox": [ + 496, + 839, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "10128", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/7451c05f14cfdda6536561d10e264702e5a878e43da945522c450760650656a9.jpg", + "image_caption": [ + "Figure 2. An overview of the proposed framework. Our method first learns a photorealistic field built-upon spatial neural representation (SNR) using dense input views, then transfers this prior representation to artistic ones with few-shot stylized views by adapting underlying 3D structures. SNR is constructed by a geometry field for SDF surface, an appearance field for observed color, and a texture field for albedo color, respectively. The progressive training scheme is adopted to enable SNR to learn about stylized geometry and texture in an explicit and structured manner." + ], + "image_footnote": [], + "bbox": [ + 89, + 90, + 359, + 316 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/849c9135f9defd35b372ddc6d31c4785b906add425fb7c11d37fd904953665ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 377, + 93, + 665, + 316 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/d861cb72c80ec9f08a4376eebdcce5f4141cf2707931a0a448a4edb4156894cf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 95, + 872, + 316 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ing [12, 39], novel view synthesis [29, 32] and multi-view 3D reconstruction [49, 54]. The method of Neural Radiance Fields (NeRF) [32], in particular, has attracted significant attention for its ability to achieve photo-realistic novel view synthesis results by utilizing neural implicit functions together with volume rendering. A number of variants have been developed thereafter to fit with different scenarios and requirements, including quality improvement [4], fast rendering [33], dynamic scene capture [40] and generative models [5]. However, NeRF's estimated volume density does not admit accurate surface reconstruction, the recovered 3D geometry is far from satisfactory and can hardly be extracted as explicit materials. Recent works tackle the issue by combining implicit surface functions. [37] represents the surface by occupancy values and shrink the sample region of volume rendering during the optimization. [49] introduces signed distance functions (SDF) to represent the scene and can directly extract the surface as the zero-level set of the SDF with better accuracy.", + "bbox": [ + 75, + 412, + 472, + 700 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D Avatar Stylization. 3D avatar stylization aims to generate stylized 3D avatars whose rendered images captured from different viewpoints match the specific style. Early methods are either mesh-driven [13] or rely on explicit parameterization [47]. More recently, [35, 48] exploit the flexibility of neural radiance field and propose a text-guided stylization approach that manipulates the reconstructed scenes with input text prompts. However, due to the limited expressiveness of natural languages, they can not generate highly-detailed results with arbitrary user-specific styles. Another stream of methods [2, 19, 20, 57] using 3D generative models [6, 38] have extended avatar stylization to 3D-aware domain adaption. However, inherited from", + "bbox": [ + 75, + 704, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "their predecessors, these methods can not synthesize full-head avatars in $360^{\\circ}$ , and perform badly with real-world out-of-domain data. In contrast, our method utilizes the implicit representation to model high-fidelity 3D avatars from captured portrait videos, which allows for superior view consistency and stable stylization.", + "bbox": [ + 496, + 412, + 893, + 505 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method Description", + "text_level": 1, + "bbox": [ + 498, + 512, + 692, + 530 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given the short portrait video of a person captured with a monocular camera, we aim to generate the high-fidelity 3D stylized avatar of the person. The person stands still when recording the video. We denote the split frames of the video as $\\{I_i | i = 1, \\dots, N\\}$ , where $i$ is the frame index, $N$ is the number of frames. For each frame, we use COLMAP to obtain the calibrated camera and the method proposed in [28] to extract the foreground human mask.", + "bbox": [ + 496, + 537, + 890, + 657 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The overview of the proposed framework is illustrated in Figure 2. 3DToonify aims to learn the stylized human neural field by adapting 3D structures in a progressive training scheme. This scheme is built upon a spatial neural representation, which utilizes disentangled implicit fields to capture the underlying 3D structures such as geometry and texture (Section 3.1). We first leverage the geometric guidance from a multi-view stereo to learn a robust photorealistic representation, acting as a source prior (Section 3.2). Then this prior representation is adapted to the style domain with adaptive geometry deformation (Section 3.3.1) and decomposed albedo colors (Section 3.3.2). In this way, the stylized human avatar field can be constructed by SNR with transformed underlying structures, thus allowing for fully stylized results and 3D consistent rendering in arbitrary viewpoints.", + "bbox": [ + 496, + 659, + 892, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "10129", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Spatial neural representation", + "text_level": 1, + "bbox": [ + 76, + 90, + 338, + 107 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The proposed spatial neural representation (SNR) is based on neural radiance field (NeRF) [32], which can be seen as a continuous 5D function that maps a 3D position $\\mathbf{x}$ and a viewing direction $\\mathbf{v}$ to an emitted color $\\mathbf{c} = (r, g, b)$ and a volume density $\\sigma$ . NeRF is approximated by a multi-layer perceptron (MLP) $F_{\\theta}: (\\mathbf{x}, \\mathbf{v}) \\rightarrow \\mathbf{c}, \\sigma$ . SNR consists of three MLPs $F_{geo}, F_{app}$ and $F_{tex}$ , representing the decomposed fields of geometry, the observed appearance color and the albedo texture color, respectively.", + "bbox": [ + 75, + 116, + 470, + 253 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Geometry field learns a function $F_{geo} : \\mathbb{R}^3 \\to \\mathbb{R}$ that maps a spatial point $\\mathbf{x} \\in \\mathbb{R}^3$ to its signed distance value $\\mathcal{G}$ to the object surface. It constructs the underlying object surface by encoding a signed distance function (SDF) of only location $\\mathbf{x}$ . In order to be compatible with the rendering procedure of the radiance field, a probability function $\\psi(\\cdot)$ proposed by [49] is used to calculate the point weight $w$ from the signed distance value $\\mathcal{G}$ , where $\\psi(\\cdot)$ denotes an unbiased and occlusion-aware approximation. With this implicit SDF representation, the explicit object surface $S$ can be easily extracted by the zero level-set of the $SDF : S = \\{\\mathbf{x} \\in \\mathbb{R}^3 | \\mathcal{G}(\\mathbf{x}) = 0\\}$ .", + "bbox": [ + 75, + 255, + 472, + 438 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Appearance field learns a function $F_{app} : \\mathbb{R}^3 \\times \\mathbb{S}^2 \\to \\mathbb{R}^3$ to encode the observed colors $\\mathbf{c}_{app}$ associated with the point $\\mathbf{x} \\in \\mathbb{R}^3$ and the view direction $\\mathbf{v} \\in \\mathbb{S}^2$ . The feature vectors $F(\\mathbf{x})$ derived from $F_{geo}$ are also concatenated as the inputs. To better approximate the appearance colors of the object captured in read-world scenes, $F_{app}$ is introduced as a function of both location and viewing direction, thus allowing learning view-dependent RGB colors for multi-view images. Notably, the learned representation in $F_{app}$ could be degraded into reflection components $\\mathbf{s}$ , which are caused by illumination and vary with view directions. It will be adaptively changed in the later training stage (see the detailed discussion in Section 3.3.2).", + "bbox": [ + 75, + 439, + 470, + 636 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Texture field learns a function $F_{tex} : \\mathbb{R}^3 \\to \\mathbb{R}$ to encode the albedo color for the texture atlas $\\mathbf{c}_{tex}$ associated with only the spatial location $\\mathbf{x}$ . Similar to $F_{app}$ , feature vectors derived from $F_{geo}$ are concatenated as inputs. We encourage the texture representation to be multi-view consistent by restricting $F_{tex}$ being a function of only $\\mathbf{x}$ , while allowing the final color $\\mathbf{c} = \\mathbf{s} \\circ \\mathbf{c}_{tex}$ to be view-dependent to satisfy different view observations, where $\\circ$ denotes element-wise multiplication. With the nature of view-independent representation of $F_{tex}$ , explicit textures can be obtained by accumulating the volume albedo colors.", + "bbox": [ + 75, + 638, + 468, + 806 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The proposed geometry field and texture field are formulated in a view-independent function, once being effectively learned, they can express spatial attributes shared by the entire 3D space. This enables editable 3D structures with only few-shot stylized views needed in the later adaption process.", + "bbox": [ + 75, + 809, + 470, + 898 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/ac93a99053c6774a73519ac4ada0afbd4d6bbcb0072cac4b917e309346da142e.jpg", + "image_caption": [ + "(a) Stage I", + "Figure 3. Visualized results in stage I, II, III." + ], + "image_footnote": [], + "bbox": [ + 540, + 89, + 619, + 207 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/42cc2a07b00e0bf76703a28ec4574e850b5dfbc3702c5f880f66eda604ca39ee.jpg", + "image_caption": [ + "(b) Stage II" + ], + "image_footnote": [], + "bbox": [ + 622, + 89, + 707, + 207 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/1dd871b05424d5ce98197c527cd4f4940ef247b220cddcbd23ea978f1eefffeb.jpg", + "image_caption": [ + "(c) Stage III" + ], + "image_footnote": [], + "bbox": [ + 725, + 89, + 854, + 207 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. MVS guided prior learning", + "text_level": 1, + "bbox": [ + 500, + 251, + 746, + 268 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this module, we learn the photorealistic representation as a prior for the later 3D style adaption. Due to the complexity of real-world captures caused by illumination, object materials, etc., the reconstructed results can easily suffer from noisy surfaces and irregular holes. Observing that the geometry directly extracted by multi-view stereo (MVS) methods are generally accurate with only local noises, we propose to integrate the depth information estimated by MVS as a geometric guidance for surface reconstruction.", + "bbox": [ + 496, + 273, + 890, + 410 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Accumulated depth guidance. Volume rendering has been proven effective to enable robust supervision using 2D image observations. Following this, we render the depth map with $K$ points along the emitted ray and use the corresponding 2D depth value for supervision. The ray can be parametrized as $r(i) = o + d_i\\mathbf{v}$ , where $o$ is the center of the camera and $\\mathbf{v}$ is the direction of the ray. The depth $\\hat{D}(r)$ from the geometry field can be computed by:", + "bbox": [ + 496, + 410, + 892, + 531 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {D} (r) = \\sum_ {i = 1} ^ {K} \\left(T _ {i} \\alpha_ {i} d _ {i}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 622, + 537, + 890, + 579 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $T_{i}$ is the accumulated transmittance defined by $\\Pi_{j = 1}^{i + 1}(1 - \\alpha_j)$ , and $\\alpha_{j}$ denotes the discrete opacity value computed by $\\alpha_{j} = \\max (\\frac{\\Phi_{s}(s_{i}) - \\Phi_{s}(s_{i} + 1)}{\\Phi_{s}(s_{i})},0)$ , in which $\\Phi$ is the cumulative distribution of logistic distribution. More details about conversion from the SDF distance to the opacity can be found in NeuS [49]. For a batched training ray $r\\in R$ , the accumulated depth loss can be formulated as:", + "bbox": [ + 496, + 585, + 890, + 696 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {d e p t h}} = \\sum_ {r \\in R} | | M (r) (\\hat {D} (r) - D (r)) | | _ {1}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 560, + 704, + 890, + 736 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $M(r) \\in \\{0,1\\}$ is the object mask value and $D(r)$ is the supervised depth value.", + "bbox": [ + 496, + 743, + 890, + 773 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Depth-sampled surface guidance. Except for the depth constraint on spatial accumulated points, we also leverage points sampled from the depth image $I_{D}$ to guide the construction of the SDF surface. The surface loss encourages these sampled 3D points being close to the object surface and $L_{sur}$ can be formulated as:", + "bbox": [ + 496, + 773, + 890, + 864 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {s u r} = \\sum_ {\\mathbf {x} _ {d} \\in I _ {D}} | | F _ {g e o} (\\mathbf {x} _ {d}) | | _ {1}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 598, + 869, + 890, + 904 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "10130", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Training. Given a set of portrait images and their camera parameters, we train the architecture with the geometry field and the appearance field using the following loss function:", + "bbox": [ + 76, + 90, + 468, + 137 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {p r i o r} = L _ {c o l o r} + \\lambda_ {m v s} L _ {m v s} + \\lambda_ {m a s k} L _ {m a s k} + \\lambda_ {r e g} L _ {r e g}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 146, + 468, + 175 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda$ denotes the weight of each corresponding loss. The MVS guided loss is computed as $L_{mvs} = L_{depth} + L_{sur}$ . The color reconstruction loss $L_{color}$ is calculated as the distance between the accumulated color $\\hat{C}(r)$ and the observed color $C(r)$ of $I$ :", + "bbox": [ + 76, + 176, + 468, + 252 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {c o l o r} = \\sum_ {r \\in R} | | M (r) (\\hat {C} (r) - C (r)) | | _ {1}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 140, + 261, + 468, + 294 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\hat{C}(r)$ can be computed by $\\sum_{i=1}^{K}(T_i\\alpha_i c_i)$ , and $c_i$ denotes the volumetric color produced by the appearance field $F_{app}$ . To focus on human reconstruction, we also define a mask term with the binary cross entropy loss:", + "bbox": [ + 76, + 304, + 468, + 367 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {m a s k} = B C E (\\hat {M} (r), M (r)), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 375, + 468, + 393 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\hat{M}(r) = \\sum_{i=1}^{K} (T_i \\alpha_i)$ is the density accumulation along the ray. The Eikonal loss [12] used to regularize the SDF values is defined as", + "bbox": [ + 76, + 401, + 468, + 448 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {r e g} = \\sum_ {k} | | \\nabla_ {\\mathbf {p} _ {k}} F _ {g e o} (\\mathbf {x} _ {k}) - 1 | | _ {2} ^ {2}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 155, + 455, + 468, + 487 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Visualized results of this stage are shown in Figure 3 (a). Not only the radiance field with accumulated color is learned, but also the inherent geometry can be accurately decomposed. The high-quality reconstruction learned in this stage also paves the way for the next stage of style adaptation with few-shot 2D stylized portraits.", + "bbox": [ + 76, + 494, + 468, + 585 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Spatial representation adaption", + "text_level": 1, + "bbox": [ + 76, + 595, + 354, + 611 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With the constructed photorealistic representation, we then transform it to the style domain by progressively adapting the underlying 3D structures. We first adaptively learn the faithful deformed geometry without the interference of the albedo texture module, and then decompose albedo colors from observed ones with fixed geometric structures. This enables effective 3D structure disentanglement with more accurate surface and clearer texture.", + "bbox": [ + 76, + 618, + 468, + 738 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.1 Geometry adaption", + "text_level": 1, + "bbox": [ + 76, + 746, + 266, + 761 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this stage, we utilize a number of stylized 2D portrait images $I_{t}$ derived from existing 2D portrait stylization methods [30, 53] to fine-tune the geometry field $F_{geo}$ and the appearance field $F_{app}$ . The spatial-shared geometry will be adaptively transformed in $F_{geo}$ and the observed colors varying with views will be modeled in $F_{app}$ , enabling the network focusing on geometry adaption. During training, the pixel color of $I_{t}$ is used as the observed color to guide the accumulated volume colors:", + "bbox": [ + 76, + 763, + 468, + 898 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {c o l o r} = \\sum_ {r \\in R} | | M (r) (C (r) - C _ {t} (r)) | | _ {1}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 560, + 103, + 890, + 135 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $C(r)$ is computed by the volumetric color from $F_{app}$ and the converted opacity from $F_{geo}$ . The total training loss is formulated as:", + "bbox": [ + 498, + 140, + 890, + 185 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {g e o} = L _ {c o l o r} + \\lambda_ {m a s k} L _ {m a s k} + \\lambda_ {r e g} L _ {r e g}. \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 549, + 196, + 890, + 214 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Figure 3 (b), the spatial deformed geometry can be extracted from $F_{geo}$ . However, rendering results are 3D-inconsistent with obvious artifacts in side-view renderings, since only few-shot 2D stylizations of the frontal views are provided for style adaption and the view-dependent function $F_{app}$ trivially fits these views.", + "bbox": [ + 498, + 223, + 890, + 314 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.2 Albedo texture adaption and optimization", + "text_level": 1, + "bbox": [ + 498, + 321, + 844, + 337 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this stage, we aim to learn the spatial-shared texture field $F_{tex}$ by decomposing the albedo colors from the appearance ones. Specifically, we insert $F_{tex}$ as a view-independent texture field and jointly optimize $F_{tex}$ and $F_{app}$ . In this way, view-consistent colors can be effectively decomposed from the total appearance and the remaining components in $F_{app}$ are regarded as view-dependent reflections. The final color are computed by $\\tilde{c}_i = s \\circ c_i'$ , where $c_i'$ is the albedo color from $F_{tex}$ and $s$ is the degraded reflection from $F_{app}$ for spatial points. Then we can obtain the final accumulated color by", + "bbox": [ + 498, + 340, + 890, + 505 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {C} (r) = \\sum_ {i = 1} ^ {K} \\left(T _ {i} \\alpha_ {i} \\tilde {c} _ {i}\\right). \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 622, + 506, + 890, + 547 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To further ensure effective albedo color decomposition, a discriminator $D$ is introduced to encourage $\\tilde{C}(r)$ satisfying the approximate distribution of palette colors of $I_t$ . With $\\kappa$ as a posterize filter, the patch color $\\kappa(C_t(p))$ of $I_t$ is fed into $D$ as a real sample, and the reconstructed color $\\tilde{C}(p)$ from $F_{tex}$ is fed into $D$ as a fake sample, where $p$ is the set of rays for image pixels in a patch. We define the discrimination loss $L_{ds}$ to penalize for distance between the distribution of $C(p)$ and $\\tilde{C}(p)$ as:", + "bbox": [ + 498, + 551, + 890, + 686 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} L _ {d s} = \\mathbb {E} _ {p \\sim \\left\\{I _ {t} ^ {i} \\right\\}} [ \\log (D (\\kappa (C _ {t} (p))) ] + \\tag {11} \\\\ \\mathbb {E} _ {p \\sim \\{I _ {t} ^ {i} \\}} [ l o g (1 - D (\\bar {C} (p))) ]. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 576, + 696, + 890, + 738 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To keep the learned geometry stay faithful to the given style, we fix $F_{geo}$ and train $\\{F_{app}, F_{tex}\\}$ with the training loss as follows:", + "bbox": [ + 498, + 746, + 890, + 790 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {t e x} = L _ {\\text {c o l o r}} + \\lambda_ {\\text {m a s k}} L _ {\\text {m a s k}} + \\lambda_ {\\text {r e g}} L _ {\\text {r e g}} + \\lambda_ {\\text {d s}} L _ {\\text {d s}}, \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 508, + 803, + 890, + 820 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $L_{color}$ denotes the distance between the final accumulated color $\\tilde{C} (r)$ and the observed stylized color $C_t(r)$ :", + "bbox": [ + 500, + 829, + 890, + 859 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {c o l o r}} = \\sum_ {r \\in R} | | M (r) (\\tilde {C} (r) - C _ {t} (r)) | | _ {1}. \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 560, + 871, + 890, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "10131", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/97e2a9916a549fbebbfe3d1a0716a84b27836cd1ae0257bcb0d633e8112a68ba.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 145, + 88, + 233, + 157 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/87dd30b9d003ee603f0cc7f8a379059a37ce64da01247fce5b8b16c06dec645f.jpg", + "image_caption": [ + "(c) Exported meshes", + "Figure 4. Stylized results in novel views and corresponding exported meshes." + ], + "image_footnote": [], + "bbox": [ + 250, + 89, + 823, + 383 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0db51f534d021605dd826d0ec8811bfa2188f7cec0e9bafc4bf30548b4f4652d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 76, + 160, + 233, + 229 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/38d6359d7526f8914559ea06e323dbe203d9062ddcb7672f996eb9be9c464733.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 145, + 233, + 235, + 299 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "", + "bbox": [ + 250, + 301, + 563, + 383 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We show rendering results of this stage in Figure 3 (c), demonstrating their 3D consistency in multi-view setting. Thanks to the spatial-shared colors learned in the view-independent $F_{tex}$ , the albedo texture can be seamlessly extracted and further enhanced in an explicit manner.", + "bbox": [ + 76, + 420, + 468, + 496 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experimental Results", + "text_level": 1, + "bbox": [ + 76, + 511, + 282, + 527 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation details. Our network architecture consists of three modules: the signed distance function $F_{geo}$ , the appearance function $F_{app}$ and the texture function $F_{tex}$ , which are modeled by three MLPs with 8, 6, 6 hidden layers, respectively. Positional encoding [32] and sphere initialization [3] are also applied similar to [49]. For the depth priors, we adopt the OpenMVS method [1] to extract estimated depth maps from the input video. For the 2D style translator, we adopt DCT-Net [30] and VToonify [53] to produce target stylized images and preserve forward/backward facing results whose absolute yaw angle is less than 0.2 radian for supervision. We use the Adam optimizer [22] with the learning rate of 2.5e-5 to train our models and sample 512 rays for each batch. The loss weights are shared by three stages with $\\lambda_{mask}$ , $\\lambda_{mvs}$ , $\\lambda_{reg}$ , $\\lambda_{ds}$ set to $\\{0.5, 0.5, 0.1, 1\\}$ . Stage I, II and III are trained for 300k, 200k and 50k iterations, respectively, taking around 20 hours in total on a single NVIDIA Teasla-V100 GPU.", + "bbox": [ + 75, + 535, + 470, + 806 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets. We create a $360^{\\circ}$ captured portrait dataset called Portrait360 to evaluate our approach. This dataset contains 14 static portrait videos captured by rotating the camera around the human head. All videos have a length between 20 to 30 seconds and are split to 300 frames as source training data.", + "bbox": [ + 76, + 809, + 468, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.3D portrait stylization", + "text_level": 1, + "bbox": [ + 500, + 420, + 705, + 436 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Performance on view consistent rendering. Given a short portrait video captured by a monocular camera, our model learns a stylized 3D representation from 2D portrait frames. Stylized portrait images can be generated from arbitrary novel viewpoints following exemplar styles, while ensuring facial identity of the person and 3D consistency between different views. Note that the synthesized images in this part are produced directly by volume rendering on implicit functions, without any explicit style enhancement applied for the results. Our stylized avatars rendered in novel viewpoints and their corresponding exported meshes are shown in Figure 4, more results can be found in the supplementary. Comparison with 3D avatar stylization methods. In this section, we compare our method with two 3D avatar stylization methods, DeformToon3D [57] and NeRF-Art [48], which represent the state-of-the-art techniques in 3D-aware generative toonification and text-guided NeRF stylization, respectively.", + "bbox": [ + 496, + 444, + 890, + 718 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Qualitative comparison. Here we adapt VToonify [53] to generate target stylized images with selected exemplars to train our model. For DeformToon3D [57], we use the author-provided code and train the model using data generated with the same exemplars by DualStyleGAN [52], which is also the 2D generator used in VToonify. Here we directly generate its real-space and style-space results under the same sampled instance code, since the additional PTI [43] process will cause accumulated fidelity errors, especially on arbitrary real faces. For NeRF-Art [48], as it does not support using a single exemplar image for style guidance, we use Mini-GPT4 [59] to generate style de", + "bbox": [ + 496, + 719, + 892, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "10132", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7b0389739f52ae38cc292df12b3df9b1f07a94238a2afe532f28e8bb53708ba5.jpg", + "image_caption": [ + "Figure 5. Qualitative comparison with 3D avatar stylization methods. We directly compare the generated real-space and style-space results of DeformToon3D to alleviate the fidelity loss in the additional PTI process. The models of NeRF-Art and Ours are trained on our Portrait360 dataset. Four views are selected for comparison." + ], + "image_footnote": [], + "bbox": [ + 76, + 80, + 883, + 388 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/00980e90a2c39b88c259c59edc030d6ce211cc762bc1c4fade063efffab5b2cc.jpg", + "image_caption": [ + "Figure 6. Qualitative comparison with 2D portrait stylization methods on view consistent rendering. For a more prominent video comparison, please refer to the supplementary video." + ], + "image_footnote": [], + "bbox": [ + 76, + 443, + 472, + 555 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "scriptions corresponding to each target image. The input text prompts used in this section are shown in the supplementary. We demonstrate qualitative comparison of the three methods in Figure 5. DeformToon3D only focuses on frontal views and fails to generate plausible renderings under large angles. Besides, it tends to synthesize overly exaggerated results and fail to maintain the facial characteristics (e.g., hairstyles) of the original image. NeRF-Art only generates results with undesired stylized texture and weakly-changed underlying structures. On the contrary, our method can generate fine-grained full-head stylized avatars with view-consistent renderings and exaggerated styles.", + "bbox": [ + 75, + 609, + 468, + 792 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Quantitative comparison. For quantitative comparison, we measure the quality of multi-view stylized renderings of all methods by calculating the Frechet Inception Distance (FID) [14] value for the training cartoon exemplar dataset. A lower FID score indicates that the distribution of the generated images is more similar to that of real 2D cartoon faces. we also evaluate the fidelity of all methods in 3D", + "bbox": [ + 75, + 795, + 470, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/5a89b99d280b6da7543279d0c01aee02ea0997e3424d32ca5d6aa830f0a83136.jpg", + "table_caption": [ + "Table 1. Quantitative comparison with 3D avatar stylization methods on FID and IP. $\\uparrow$ , $\\downarrow$ denote if higher or lower is better." + ], + "table_footnote": [], + "table_body": "
MethodDeformToon3DNeRF-ArtOurs
FID ↓66.578.857.6
IP ↑0.5510.6710.678
", + "bbox": [ + 553, + 474, + 834, + 526 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "style adaption using the identity preservation (IP) metric, which is calculated as the Arcface [9] feature similarity between the input image and the stylized result. As shown in Table 1, our method outperforms the other two methods in both FID and identity preservation, which showcases our ability of generating high-quality stylized results while being faithful to the original human identity.", + "bbox": [ + 496, + 546, + 890, + 654 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparison with 2D portrait stylization methods. In this section, we compare our method with two state-of-the-art 2D portrait stylization methods, VToonify [53] and DCT-Net [30], to further demonstrate our ability of generating 3D-consistent and high-quality stylized results for arbitrary views.", + "bbox": [ + 496, + 656, + 890, + 746 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Qualitative comparison. Due to the incapability of 2D portrait stylization methods to synthesize novel view results, we only make comparison under reconstructed views captured in the input video. For both VToonify and DCT-Net, frames are directly input into the trained/finetuned models released by authors to obtain the corresponding stylized images. Then we select their forward/backward results as sparse view supervision to train our models (denoted as ours-V and ours-D, respectively). As illustrated in Figure 6, VToonify and DCT-Net fail to synthesize exaggerated ge", + "bbox": [ + 496, + 750, + 892, + 901 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "10133", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/1761903e6a60847e1ead7f60e7ed65ae577e8ef1465cd328c2dd311aa7bbac2a.jpg", + "table_caption": [ + "Table 2. Comparison of FID and 3D validity with 2D portrait stylization methods." + ], + "table_footnote": [], + "table_body": "
MethodDCT-NetOurs-DVToonifyOurs-V
FID ↓126.194.786.957.6
3D validity ↑0.541.000.621.00
", + "bbox": [ + 114, + 108, + 426, + 157 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/193c6f1095d6ac4510c065b2431be728c345235a1a2bda64982c7af942cef6d6.jpg", + "table_caption": [ + "Table 3. Ablation of the progressive training scheme. Results verify the effectiveness of the proposed module in each stage." + ], + "table_footnote": [], + "table_body": "
Variantsw/o Priorw/o GAw/o TAw/o PSAfull model
FID ↓98.7105.296.796.294.7
", + "bbox": [ + 81, + 196, + 468, + 236 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ometry effects in challenging viewpoints (e.g., side faces) and are unable to maintain 3D view consistency. Note that these extreme view results are not used as supervision in our style adaption process. On the contrary, our method can easily render style-faithful and robust results in a 3D consistent manner. This showcases the importance of learning underlying 3D structures in maintaining view-consistency of the stylized avatar.", + "bbox": [ + 75, + 250, + 468, + 369 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Quantitative comparison. We also measure the quality of our rendering results against VToonify [53] and DCT-Net [30] using FID [14]. We use data from our Portrait360 dataset as source images and remove failure cases of the 2D methods. As shown in Table 2, both of our models produce better results with lower FID values compared with original 2D methods. To further evaluate the stylization ability of handling views from the entire 3D space, we propose to calculate 3D validity by computing the conversion rate of successfully stylized results to the whole dataset. 2D methods rely on detected facial landmarks and failed conversions can be automatically recognized. Compared to 2D methods, our method could handle more challenging poses in the entire 3D space with higher 3D validity.", + "bbox": [ + 75, + 371, + 468, + 583 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2. Ablation study", + "text_level": 1, + "bbox": [ + 76, + 592, + 227, + 607 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In addition to visualized results in Figure 3, we verify the effectiveness of the proposed module in each stage by evaluating the performance of corresponding variants of our method. The qualitative and quantitative results are shown in Figure 7 and Table 3, respectively.", + "bbox": [ + 75, + 613, + 468, + 688 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "MVS guided prior learning. We train a model without photorealistic prior learning and directly learn the spatial neural representation from stylized portrait images. It is confusing for inverse rendering to produce valid geometry and texture with unreal 3D-inconsistent stylized observations, as shown in Figure 7 (b). This indicates that the reconstruction prior is crucial for generating plausible underlying structures in 3D style adaption. The design of MVS guidance also helps to reconstruct more robust surface without holes brought by illumination noise in complicated real-world scenes (see Figure 7 (g)).", + "bbox": [ + 75, + 688, + 468, + 854 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Progressive structure adaption (PSA). By removing PSA proposed in Section 3.3, we jointly learn the geometry and texture adaption with the full SNR network. Results in", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8b9c6889554011bdefb35c533c4ffd26c26ed60aaa6d0dd76efacaf37bdf6f52.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 87, + 578, + 148 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a87768f105b6969d347893f7e9a3d34fc209a24060266d421d08b339b9a35021.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 583, + 88, + 658, + 148 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c4a065f929414ee1e25f23d23a68c4685e3b143df51c3a279fb84a72c24b112b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 88, + 736, + 148 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bcda2f9545e0dcd647052abd390aca4ca3797ae88b3274072c1e97c5bcaf782e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 740, + 89, + 816, + 148 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/83dfa2ac84f715e612a032aa1493abe8f9da9509a6e834406d9f004e61b128f4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 826, + 89, + 885, + 148 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/73c4ed6b6cdad5ab56c56525eba63cca0796fef43b19f8fe5115cf4ba8d9bc16.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 148, + 573, + 208 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1b40db43918fb83bbf66ea047d3c9e6bcc737afd6588aa7f67b982b6d1bd12f5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 588, + 148, + 651, + 208 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9f78499e2ff5d5bccfa113836857c45ac50c70fe12cd007cb314d79c5d73ee68.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 148, + 728, + 208 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c8b7d7c0433bfca7df145d340e3d9eb0218a294f110a0d1794c1f487e89e3154.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 745, + 148, + 810, + 208 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/282cda70d8623c7120ec7daf2803a47b35aa4f9319963b6d45202ae82104e2d7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 823, + 148, + 885, + 208 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/50c81616580c8d1b5925a3be3ecfb40c0336cc02eb13af832c60fc52069248f6.jpg", + "image_caption": [ + "(a) Full model" + ], + "image_footnote": [], + "bbox": [ + 506, + 210, + 573, + 268 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5485069b49665032a6bdb670172eb35e6f032625f5db462a43c5ad4b27175683.jpg", + "image_caption": [ + "(b) w/o Prior" + ], + "image_footnote": [], + "bbox": [ + 584, + 210, + 651, + 268 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1a4f5dd617dc920eba73afff47b112c3f9cad4552d0c58375cbf9b03af790072.jpg", + "image_caption": [ + "(c) w/o PSA" + ], + "image_footnote": [], + "bbox": [ + 665, + 209, + 728, + 268 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4b87003ac0bfc3f40a6ac2c42dc28c5092fa07aadac5cb650862785538e218ca.jpg", + "image_caption": [ + "(d) w/o GA", + "Figure 7. Effects of the proposed prior learning (Prior), progressive structure adaption (PSA), geometry adaption (GA), texture adaption (TA), style enhancement (SE) and MVS guidance (MVS)." + ], + "image_footnote": [], + "bbox": [ + 745, + 210, + 810, + 268 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5e367f77429cc6fc4af0ace1dc9d73ee20867897c34325dbf50293fedbefcf44.jpg", + "image_caption": [ + "(e) w/o TA" + ], + "image_footnote": [], + "bbox": [ + 821, + 210, + 883, + 268 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 7 (c) show that simultaneously training $F_{geo}$ and $F_{tex}$ disrupts the disentanglement of each other. Progressive adaption brings more accurate surfaces and seamless textures.", + "bbox": [ + 496, + 354, + 890, + 412 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Geometry and texture adaption. Figure 7 (d, e) verify the necessity of geometry adaption (GA) and texture adaption (TA), respectively. In contrast to explicit texture stylization, GA enables the internal surface to be deformed adaptively, thus making 3D portraits be fully stylized. Without TA, inferred vertex colors from the appearance field suffers noticeable artifacts, due to the inconsistent observed colors from different views. TA introduces an extra texture field that automatically decomposes albedo colors shared in 3D space, thus alleviating the texture seaming issue. Besides, we explore adding additional style enhancement (SE) on the explicit texture map extracted from the texture field, which further brings more vivid stylization effects (Figure 7 (f)). We also show the impact of the number of stylized frames used for adaption stages in the supplemental material.", + "bbox": [ + 496, + 415, + 892, + 642 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 652, + 617, + 667 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we handled the challenging and on-going task of synthesizing the high-fidelity stylized 3D avatar from a portrait video under the guidance of a single style image. We showed that the naive combination of portrait style transfer and 3D reconstruction techniques does not work well in this task, and proposed a novel framework called 3DToonify that learns 3D style adaption based on spatial neural representations (SNR). We introduced a delicately-designed spatial neural network for disentangled geometry and texture adaption. We also came up with a novel progressive training scheme suitable for the SNR to accurately capture the underlying stylized 3D structures. Both qualitative and quantitative experimental results demonstrated that our method enables fine-grained 3D avatar stylization with view consistency and diverse exaggerated results.", + "bbox": [ + 496, + 674, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "10134", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] OpenMvs. [EB/OL]. https://github.com/cdcseacave/openMVS/.6", + "[2] Rameen Abdal, Hsin-Ying Lee, Peihao Zhu, Mengei Chai, Aliaksandr Siarohin, Peter Wonka, and Sergey Tulyakov. 3davatargan: Bridging domains for personalized editable avatars. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4552-4562, 2023. 2, 3", + "[3] Matan Atzmon and Yaron Lipman. Sal: Sign agnostic learning of shapes from raw data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2565-2574, 2020. 6", + "[4] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2, 3", + "[5] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5799-5809, 2021. 3", + "[6] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. 2, 3", + "[7] Rui Chen, Yongwei Chen, Ningxin Jiao, and Kui Jia. Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. arXiv preprint arXiv:2303.13873, 2023. 2", + "[8] Pei-Ze Chiang, Meng-Shiun Tsai, Hung-Yu Tseng, WeiSheng Lai, and Wei-Chen Chiu. Stylizing 3d scene via implicit representation and hypernetwork. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1475-1484, 2022. 2", + "[9] Jiankang Deng, Jia Guo, Xue Niannan, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, 2019. 7", + "[10] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. ACM Transactions on Graphics (TOG), 41(4):1-13, 2022. 2", + "[11] Leon A Gatys, Alexander S Ecker, and Matthias Bethge. Image style transfer using convolutional neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2414-2423, 2016. 2", + "[12] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. arXiv preprint arXiv:2002.10099, 2020. 3, 5", + "[13] Fangzhou Han, Shuquan Ye, Mingming He, Menglei Chai, and Jing Liao. Exemplar-based 3d portrait stylization. IEEE Transactions on Visualization and Computer Graphics, 2021. 3" + ], + "bbox": [ + 78, + 114, + 468, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 7, 8", + "[15] Yukun Huang, Jianan Wang, Ailing Zeng, He Cao, Xianbiao Qi, Yukai Shi, Zheng-Jun Zha, and Lei Zhang. Dreamwaltz: Make a scene with complex 3d animatable avatars. arXiv preprint arXiv:2305.12529, 2023. 2", + "[16] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 2", + "[17] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 2", + "[18] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG), 42(4):1-14, 2023. 2", + "[19] Gwanghyun Kim and Se Young Chun. Datid-3d: Diversitypreserved domain adaptation using text-to-image diffusion for 3d generative model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14203-14213, 2023. 3", + "[20] Gwanghyun Kim, Ji Ha Jang, and Se Young Chun. Podia-3d: Domain adaptation of 3d generative model across large domain gap using pose-preserved text-to-image diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 22603–22612, 2023. 3", + "[21] Junho Kim, Minjae Kim, Hyeonwoo Kang, and Kwang Hee Lee. U-gat-it: Unsupervised generative attentional networks with adaptive layer-instance normalization for image-to-image translation. In International Conference on Learning Representations, 2020. 2", + "[22] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6", + "[23] Nikos Kolotouros, Thiemo Alldieck, Andrei Zanfir, Eduard Gabriel Bazavan, Mihai Fieraru, and Cristian Sminchisescu. Dreamhuman: Animatable 3d avatars from text. arXiv preprint arXiv:2306.09329, 2023. 2", + "[24] Bing Li, Yuanlue Zhu, Yitong Wang, Chia-Wen Lin, Bernard Ghanem, and Linlin Shen. Anigan: Style-guided generative adversarial networks for unsupervised anime face generation. IEEE Transactions on Multimedia, 2021. 2", + "[25] Tingting Liao, Hongwei Yi, Yuliang Xiu, Jiaxaing Tang, Yangyi Huang, Justus Thies, and Michael J Black. Tada! text to animatable digital avatars. arXiv preprint arXiv:2308.10899, 2023. 2", + "[26] Yiyi Liao, Katja Schwarz, Lars Mescheder, and Andreas Geiger. Towards unsupervised learning of generative models for 3d controllable image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5871-5880, 2020. 2" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "10135", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 300–309, 2023. 2", + "[28] Jinlin Liu, Yuan Yao, Wendi Hou, Miaomiao Cui, Xuansong Xie, Changshui Zhang, and Xian-sheng Hua. Boosting semantic human matting with coarse annotations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8563-8572, 2020. 3", + "[29] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019. 3", + "[30] Yifang Men, Yuan Yao, Miaomiao Cui, Zhouhui Lian, and Xuansong Xie. Dct-net: domain-calibrated translation for portrait stylization. ACM Transactions on Graphics (TOG), 41(4):1-9, 2022. 1, 2, 5, 6, 7, 8", + "[31] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4460-4470, 2019. 2", + "[32] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 2, 3, 4, 6", + "[33] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, 2022. 2, 3", + "[34] Thu Nguyen-Phuoc, Feng Liu, and Lei Xiao. Snerf: stylized neural implicit representations for 3d scenes. arXiv preprint arXiv:2207.02363, 2022. 2", + "[35] Thu Nguyen-Phuoc, Gabriel Schwartz, Yuting Ye, Stephen Lombardi, and Lei Xiao. Alteredavatar: Stylizing dynamic 3d avatars with fast style adaptation. arXiv preprint arXiv:2305.19245, 2023. 2, 3", + "[36] Thu H Nguyen-Phuoc, Christian Richardt, Long Mai, Yongliang Yang, and Niloy Mitra. Blockgan: Learning 3d object-aware scene representations from unlabelled images. Advances in Neural Information Processing Systems, 33:6767-6778, 2020. 2", + "[37] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5589-5599, 2021. 3", + "[38] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13503–13513, 2022. 2, 3", + "[39] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning con" + ], + "bbox": [ + 78, + 90, + 470, + 901 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "tinuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 165-174, 2019. 3", + "[40] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 2, 3", + "[41] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988, 2022. 2", + "[42] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2", + "[43] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. ACM Transactions on graphics (TOG), 42(1):1-13, 2022. 6", + "[44] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2", + "[45] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023.", + "[46] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 2", + "[47] Shen Sang, Tiancheng Zhi, Guoxian Song, Minghao Liu, Chunpong Lai, Jing Liu, Xiang Wen, James Davis, and Linjie Luo. AgileAvatar: Stylized 3d avatar creation via cascaded domain bridging. 2022. 3", + "[48] Can Wang, Ruixiang Jiang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Nerf-art: Text-driven neural radiance fields stylization. IEEE Transactions on Visualization and Computer Graphics, 2023. 2, 3, 6", + "[49] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689, 2021. 2, 3, 4, 6", + "[50] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. arXiv preprint arXiv:2305.16213, 2023. 2", + "[51] Shiyao Xu, Lingzhi Li, Li Shen, Yifang Men, and Zhouhui Lian. Your3demooji: Creating personalized emojis via one-shot 3d-aware cartoon avatar synthesis. In SIGGRAPH Asia 2022 Technical Communications, pages 1-4. 2022. 2" + ], + "bbox": [ + 501, + 92, + 890, + 901 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10136", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[52] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Pastiche master: Exemplar-based high-resolution portrait style transfer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7693-7702, 2022. 6", + "[53] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Vtoonify: Controllable high-resolution portrait video style transfer. arXiv preprint arXiv:2209.11224, 2022. 1, 2, 5, 6, 7, 8", + "[54] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. Advances in Neural Information Processing Systems, 33:2492-2502, 2020. 3", + "[55] Chi Zhang, Yiwen Chen, Yijun Fu, Zhenglin Zhou, Gang Yu, Billzb Wang, Bin Fu, Tao Chen, Guosheng Lin, and Chunhua Shen. StyleAvatar3d: Leveraging image-text diffusion models for high-fidelity 3d avatar generation. arXiv preprint arXiv:2305.19012, 2023. 2", + "[56] Huichao Zhang, Bowen Chen, Hao Yang, Liao Qu, Xu Wang, Li Chen, Chao Long, Feida Zhu, Kang Du, and Min Zheng. Avatarverse: High-quality & stable 3d avatar creation from text and pose. arXiv preprint arXiv:2308.03610, 2023. 2", + "[57] Junzhe Zhang, Yushi Lan, Shuai Yang, Fangzhou Hong, Quan Wang, Chai Kiat Yeo, Ziwei Liu, and Chen Change Loy. Deformtoon3d: Deformable neural radiance fields for 3d toonification. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9144-9154, 2023. 2, 3, 6", + "[58] Kai Zhang, Nick Kolkin, Sai Bi, Fujun Luan, Zexiang Xu, Eli Shechtman, and Noah Snavely. Arf: Artistic radiance fields. In European Conference on Computer Vision, pages 717-733. Springer, 2022. 2", + "[59] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 6", + "[60] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 2223-2232, 2017. 2" + ], + "bbox": [ + 78, + 90, + 468, + 696 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "10137", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/2c81e75a-1abf-4d0b-aa9d-80d61a8cb264_model.json b/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/2c81e75a-1abf-4d0b-aa9d-80d61a8cb264_model.json new file mode 100644 index 0000000000000000000000000000000000000000..1382b3526f09efef67c23826847ce5b72d7cc858 --- /dev/null +++ b/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/2c81e75a-1abf-4d0b-aa9d-80d61a8cb264_model.json @@ -0,0 +1,2532 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.107, + 0.13, + 0.865, + 0.176 + ], + "angle": 0, + "content": "3DToonify: Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images" + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.203, + 0.834, + 0.239 + ], + "angle": 0, + "content": "Yifang Men\\(^{1*}\\), Hanxi Liu\\(^{2*}\\), Yuan Yao\\(^{1}\\), Miaomiao Cui\\(^{1}\\), Xuansong Xie\\(^{1}\\), Zhouhui Lian\\(^{2\\dagger}\\) \n\\(^{1}\\)Institute for Intelligent Computing, Alibaba Group" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.24, + 0.771, + 0.258 + ], + "angle": 0, + "content": "\\(^{2}\\)Wangxuan Institute of Computer Technology, Peking University, China" + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.277, + 0.191, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.528, + 0.18, + 0.542 + ], + "angle": 0, + "content": "(a) Input video" + }, + { + "type": "image", + "bbox": [ + 0.192, + 0.277, + 0.45, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.269, + 0.528, + 0.397, + 0.542 + ], + "angle": 0, + "content": "(b) 3D style adaption" + }, + { + "type": "image", + "bbox": [ + 0.453, + 0.277, + 0.888, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.532, + 0.528, + 0.822, + 0.543 + ], + "angle": 0, + "content": "(c) View-consistent renderings in different styles" + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.547, + 0.894, + 0.59 + ], + "angle": 0, + "content": "Figure 1. Given a set of RGB portrait images captured by a monocular camera, our method can learn a photorealistic representation in neural implicit fields, and transfer it to artistic ones with underlying 3D structures changed. Multiple stylized results can be rendered from arbitrary novel viewpoints with consistent geometry and texture." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.603, + 0.314, + 0.618 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.63, + 0.471, + 0.842 + ], + "angle": 0, + "content": "Visual content creation has aroused a surge of interest given its applications in mobile photography and AR/VR. Portrait style transfer and 3D recovery from monocular images as two representative tasks have so far evolved independently. In this paper, we make a connection between the two, and tackle the challenging task of 3D portrait stylization - modeling high-fidelity 3D stylized avatars from captured 2D portrait images. However, naively combining the techniques from the two isolated areas may suffer from either inadequate stylization or absence of 3D assets. To this end, we propose 3DToonify, a new framework that introduces a progressive training scheme to achieve 3D style adaption on spatial neural representation (SNR). SNR is constructed with implicit fields and they are dy" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.604, + 0.895, + 0.8 + ], + "angle": 0, + "content": "namically optimized by the progressive training scheme, which consists of three stages: guided prior learning, deformable geometry adaption and explicit texture adaption. In this way, stylized geometry and texture are learned in SNR in an explicit and structured way with only a single stylized exemplar needed. Moreover, our method obtains style-adaptive underlying structures (i.e., deformable geometry and exaggerated texture) and view-consistent stylized avatar rendering from arbitrary novel viewpoints. Both qualitative and quantitative experiments have been conducted to demonstrate the effectiveness and superiority of our method for automatically generating exemplar-guided 3D stylized avatars." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.805, + 0.633, + 0.82 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.827, + 0.894, + 0.904 + ], + "angle": 0, + "content": "Portrait style transfer [30, 53] aims to transform real face images into artistic 2D portraits in desired visual styles while maintaining personal identity. However, given a sequence of portrait images captured from different viewpoints, existing portrait style transfer methods are typically" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.852, + 0.252, + 0.865 + ], + "angle": 0, + "content": "*Denotes equal contribution." + }, + { + "type": "page_footnote", + "bbox": [ + 0.098, + 0.865, + 0.403, + 0.877 + ], + "angle": 0, + "content": "† Corresponding author. E-mail: lianzhouhui@pku.edu.cn." + }, + { + "type": "page_footnote", + "bbox": [ + 0.08, + 0.878, + 0.469, + 0.902 + ], + "angle": 0, + "content": "This work was partially supported by National Natural Science Foundation of China (Grant No.: 62372015)." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.852, + 0.469, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10127" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.091, + 0.473, + 0.364 + ], + "angle": 0, + "content": "only effective for limited forward-facing photos and fails to maintain view consistency in 3D space. Essentially, existing methods only learn a style transfer between 2D features, and have no sense to 3D representations built on real-world objects. What if we can construct and stylize underlying 3D structures from captured 2D portrait images? See Figure 1 for an example. When stylized with 3D structures (i.e., geometry and texture), we can easily render view-free stylized portraits with 3D consistency and robust artistic results. This capacity will extremely facilitate the 3D content creation process which often requires large amounts of time and special expertise, and make it accessible to a variety of novice users. As shown in Figure 1, this paper aims to address the challenging task of generating high-fidelity 3D avatar from a portrait video by following the style of a given exemplar image. We refer this task as 3D portrait stylization – a marriage between portrait style transfer and 3D recovery from monocular images." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.368, + 0.473, + 0.777 + ], + "angle": 0, + "content": "The naive solution to the task mentioned above is directly combining existing methods of 2D portrait stylization with 3D reconstruction, i.e., learning 3D representations such as voxels [36], primitives [26] or occupancy fields [31] directly from stylized portrait images. However, it is less effective due to the biased image manifold built by 2D portrait stylization, making the representation learning be ill-posed with highly-biased visible views. Recently, neural radiance field (NeRF) [4, 18, 32, 33, 40, 49] has made great progress due to its advanced ability to achieve photo-realistic novel view synthesis with sparse input views. Some previous attempts [8, 34, 35, 48, 58] also combine NeRF with image-based [11] or text-driven [42] neural style transfer to generate novel views of stylized 3D scenes or avatars. Recently, a series of new works have started to focus on 3D stylized avatar generation. Some methods [7, 15, 23, 25, 27, 41, 50, 56] exploit the great potential of 2D text-to-image diffusion models [44-46] to generate 3D cartoonish avatars according to a given text prompt. Others [2, 51, 55, 57] build on 3D generative models [6, 38] to bridge the gap between the real space and the target domain, and generate avatars with certain styles under a sampled latent vector. However, all these methods either can not achieve high-fidelity personalized 3D portrait stylization with user-specific identities and styles, or fail to generate fine-grained full-head avatars that support view-consistent rendering from arbitrary viewpoints." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.473, + 0.903 + ], + "angle": 0, + "content": "To address the aforementioned challenges, we draw inspiration from domain adaption on 2D features [10, 30], and introduce a progressive training scheme to achieve 3D style adaption on spatial neural representation (SNR). The key insights of this design are twofold. First, it is hard to directly learn an accurate 3D representation field from stylized portraits with few-shot inconsistent 2D views, but easier to learn a photorealistic field as a prior and adapt it to" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.303 + ], + "angle": 0, + "content": "target style fields with transfer learning. Second, learning spatial representation with disentangled surface and texture allows for flexible geometry deformation and texture adaption, leading to more diverse and fine-grained style editing. To this end, we construct SNR with neural implicit fields and dynamically optimize its subfields with a progressive training scheme. This scheme includes the following three stages: prior learning to obtain an accurate human reconstruction, geometry adaption to produce inherently exaggerated deformation, and texture adaption to realize artistic albedo decomposition. Eventually, the 2D portraits are converted to stylized SNR, and explicit 3D assets can be easily extracted with disentangled 3D structures. In summary, our contributions are threefold:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.304, + 0.892, + 0.395 + ], + "angle": 0, + "content": "- We present a new method that adopts neural implicit fields to address the challenging task of generating high-fidelity 3D avatar from a portrait video by following the style of a given exemplar image. Stylized results can be rendered under arbitrary novel viewpoints with consistent geometry and texture." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.395, + 0.892, + 0.47 + ], + "angle": 0, + "content": "- We introduce an elegant network of spatial neural representation to model common attributes over the 3D space. This design allows for disentangled geometry and texture adaption, achieving more flexible and fine-grained 3D stylization results." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.47, + 0.892, + 0.545 + ], + "angle": 0, + "content": "- We propose a novel progressive training scheme of 3D style adaption. Cooperated with the delicately-designed spatial neural network, it enables learning realistic 3D cartoon avatars with deformed geometry and stylized texture." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.304, + 0.892, + 0.545 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.555, + 0.642, + 0.571 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.582, + 0.895, + 0.839 + ], + "angle": 0, + "content": "2D Portrait Stylization. In the deep neural network based portrait stylization, there are two types of approaches, i.e., image-to-image translation and StyleGAN based translation. Methods [21, 24] conduct face-to-cartoon translation by adopting the framework of cycleGAN [60]. Nevertheless, training such methods requires extensive data and may still generate unstable results. StyleGAN [16, 17] has become a popular alternative for portrait stylization due to its strong capacity for latent inversion and style control. [30] proposes a calibration framework to adapt the original training distribution for fine-grained translation. [53] leverages the mid- and high-resolution layers of StyleGAN to render high-quality artistic portraits based on the multi-scale content features to better preserve details. Although high-quality results have been shown, these methods cannot handle extreme face angle while maintaining cross-view consistency." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Neural Implicit Fields. Recently, neural implicit functions have emerged as an effective representation to model conventional 3D scenes due to its continuous nature. This representation has been successfully adopted to shape model-" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "10128" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.091, + 0.092, + 0.36, + 0.318 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.379, + 0.094, + 0.666, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.096, + 0.874, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.326, + 0.895, + 0.397 + ], + "angle": 0, + "content": "Figure 2. An overview of the proposed framework. Our method first learns a photorealistic field built-upon spatial neural representation (SNR) using dense input views, then transfers this prior representation to artistic ones with few-shot stylized views by adapting underlying 3D structures. SNR is constructed by a geometry field for SDF surface, an appearance field for observed color, and a texture field for albedo color, respectively. The progressive training scheme is adopted to enable SNR to learn about stylized geometry and texture in an explicit and structured manner." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.413, + 0.473, + 0.701 + ], + "angle": 0, + "content": "ing [12, 39], novel view synthesis [29, 32] and multi-view 3D reconstruction [49, 54]. The method of Neural Radiance Fields (NeRF) [32], in particular, has attracted significant attention for its ability to achieve photo-realistic novel view synthesis results by utilizing neural implicit functions together with volume rendering. A number of variants have been developed thereafter to fit with different scenarios and requirements, including quality improvement [4], fast rendering [33], dynamic scene capture [40] and generative models [5]. However, NeRF's estimated volume density does not admit accurate surface reconstruction, the recovered 3D geometry is far from satisfactory and can hardly be extracted as explicit materials. Recent works tackle the issue by combining implicit surface functions. [37] represents the surface by occupancy values and shrink the sample region of volume rendering during the optimization. [49] introduces signed distance functions (SDF) to represent the scene and can directly extract the surface as the zero-level set of the SDF with better accuracy." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.473, + 0.903 + ], + "angle": 0, + "content": "3D Avatar Stylization. 3D avatar stylization aims to generate stylized 3D avatars whose rendered images captured from different viewpoints match the specific style. Early methods are either mesh-driven [13] or rely on explicit parameterization [47]. More recently, [35, 48] exploit the flexibility of neural radiance field and propose a text-guided stylization approach that manipulates the reconstructed scenes with input text prompts. However, due to the limited expressiveness of natural languages, they can not generate highly-detailed results with arbitrary user-specific styles. Another stream of methods [2, 19, 20, 57] using 3D generative models [6, 38] have extended avatar stylization to 3D-aware domain adaption. However, inherited from" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.414, + 0.895, + 0.506 + ], + "angle": 0, + "content": "their predecessors, these methods can not synthesize full-head avatars in \\(360^{\\circ}\\), and perform badly with real-world out-of-domain data. In contrast, our method utilizes the implicit representation to model high-fidelity 3D avatars from captured portrait videos, which allows for superior view consistency and stable stylization." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.513, + 0.693, + 0.531 + ], + "angle": 0, + "content": "3. Method Description" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.538, + 0.892, + 0.658 + ], + "angle": 0, + "content": "Given the short portrait video of a person captured with a monocular camera, we aim to generate the high-fidelity 3D stylized avatar of the person. The person stands still when recording the video. We denote the split frames of the video as \\(\\{I_i | i = 1, \\dots, N\\}\\), where \\(i\\) is the frame index, \\(N\\) is the number of frames. For each frame, we use COLMAP to obtain the calibrated camera and the method proposed in [28] to extract the foreground human mask." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.893, + 0.902 + ], + "angle": 0, + "content": "The overview of the proposed framework is illustrated in Figure 2. 3DToonify aims to learn the stylized human neural field by adapting 3D structures in a progressive training scheme. This scheme is built upon a spatial neural representation, which utilizes disentangled implicit fields to capture the underlying 3D structures such as geometry and texture (Section 3.1). We first leverage the geometric guidance from a multi-view stereo to learn a robust photorealistic representation, acting as a source prior (Section 3.2). Then this prior representation is adapted to the style domain with adaptive geometry deformation (Section 3.3.1) and decomposed albedo colors (Section 3.3.2). In this way, the stylized human avatar field can be constructed by SNR with transformed underlying structures, thus allowing for fully stylized results and 3D consistent rendering in arbitrary viewpoints." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "10129" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.091, + 0.339, + 0.108 + ], + "angle": 0, + "content": "3.1. Spatial neural representation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.117, + 0.471, + 0.254 + ], + "angle": 0, + "content": "The proposed spatial neural representation (SNR) is based on neural radiance field (NeRF) [32], which can be seen as a continuous 5D function that maps a 3D position \\(\\mathbf{x}\\) and a viewing direction \\(\\mathbf{v}\\) to an emitted color \\(\\mathbf{c} = (r, g, b)\\) and a volume density \\(\\sigma\\). NeRF is approximated by a multi-layer perceptron (MLP) \\(F_{\\theta}: (\\mathbf{x}, \\mathbf{v}) \\rightarrow \\mathbf{c}, \\sigma\\). SNR consists of three MLPs \\(F_{geo}, F_{app}\\) and \\(F_{tex}\\), representing the decomposed fields of geometry, the observed appearance color and the albedo texture color, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.256, + 0.473, + 0.439 + ], + "angle": 0, + "content": "Geometry field learns a function \\( F_{geo} : \\mathbb{R}^3 \\to \\mathbb{R} \\) that maps a spatial point \\( \\mathbf{x} \\in \\mathbb{R}^3 \\) to its signed distance value \\( \\mathcal{G} \\) to the object surface. It constructs the underlying object surface by encoding a signed distance function (SDF) of only location \\( \\mathbf{x} \\). In order to be compatible with the rendering procedure of the radiance field, a probability function \\( \\psi(\\cdot) \\) proposed by [49] is used to calculate the point weight \\( w \\) from the signed distance value \\( \\mathcal{G} \\), where \\( \\psi(\\cdot) \\) denotes an unbiased and occlusion-aware approximation. With this implicit SDF representation, the explicit object surface \\( S \\) can be easily extracted by the zero level-set of the \\( SDF : S = \\{\\mathbf{x} \\in \\mathbb{R}^3 | \\mathcal{G}(\\mathbf{x}) = 0\\} \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.44, + 0.471, + 0.637 + ], + "angle": 0, + "content": "Appearance field learns a function \\( F_{app} : \\mathbb{R}^3 \\times \\mathbb{S}^2 \\to \\mathbb{R}^3 \\) to encode the observed colors \\( \\mathbf{c}_{app} \\) associated with the point \\( \\mathbf{x} \\in \\mathbb{R}^3 \\) and the view direction \\( \\mathbf{v} \\in \\mathbb{S}^2 \\). The feature vectors \\( F(\\mathbf{x}) \\) derived from \\( F_{geo} \\) are also concatenated as the inputs. To better approximate the appearance colors of the object captured in read-world scenes, \\( F_{app} \\) is introduced as a function of both location and viewing direction, thus allowing learning view-dependent RGB colors for multi-view images. Notably, the learned representation in \\( F_{app} \\) could be degraded into reflection components \\( \\mathbf{s} \\), which are caused by illumination and vary with view directions. It will be adaptively changed in the later training stage (see the detailed discussion in Section 3.3.2)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.64, + 0.47, + 0.807 + ], + "angle": 0, + "content": "Texture field learns a function \\( F_{tex} : \\mathbb{R}^3 \\to \\mathbb{R} \\) to encode the albedo color for the texture atlas \\( \\mathbf{c}_{tex} \\) associated with only the spatial location \\( \\mathbf{x} \\). Similar to \\( F_{app} \\), feature vectors derived from \\( F_{geo} \\) are concatenated as inputs. We encourage the texture representation to be multi-view consistent by restricting \\( F_{tex} \\) being a function of only \\( \\mathbf{x} \\), while allowing the final color \\( \\mathbf{c} = \\mathbf{s} \\circ \\mathbf{c}_{tex} \\) to be view-dependent to satisfy different view observations, where \\( \\circ \\) denotes element-wise multiplication. With the nature of view-independent representation of \\( F_{tex} \\), explicit textures can be obtained by accumulating the volume albedo colors." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.471, + 0.9 + ], + "angle": 0, + "content": "The proposed geometry field and texture field are formulated in a view-independent function, once being effectively learned, they can express spatial attributes shared by the entire 3D space. This enables editable 3D structures with only few-shot stylized views needed in the later adaption process." + }, + { + "type": "image", + "bbox": [ + 0.541, + 0.09, + 0.62, + 0.208 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.548, + 0.21, + 0.601, + 0.221 + ], + "angle": 0, + "content": "(a) Stage I" + }, + { + "type": "image", + "bbox": [ + 0.623, + 0.09, + 0.709, + 0.208 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.671, + 0.21, + 0.726, + 0.221 + ], + "angle": 0, + "content": "(b) Stage II" + }, + { + "type": "image", + "bbox": [ + 0.727, + 0.09, + 0.855, + 0.208 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.784, + 0.21, + 0.844, + 0.221 + ], + "angle": 0, + "content": "(c) Stage III" + }, + { + "type": "image_caption", + "bbox": [ + 0.562, + 0.224, + 0.831, + 0.238 + ], + "angle": 0, + "content": "Figure 3. Visualized results in stage I, II, III." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.252, + 0.747, + 0.269 + ], + "angle": 0, + "content": "3.2. MVS guided prior learning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.275, + 0.892, + 0.411 + ], + "angle": 0, + "content": "In this module, we learn the photorealistic representation as a prior for the later 3D style adaption. Due to the complexity of real-world captures caused by illumination, object materials, etc., the reconstructed results can easily suffer from noisy surfaces and irregular holes. Observing that the geometry directly extracted by multi-view stereo (MVS) methods are generally accurate with only local noises, we propose to integrate the depth information estimated by MVS as a geometric guidance for surface reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.411, + 0.893, + 0.532 + ], + "angle": 0, + "content": "Accumulated depth guidance. Volume rendering has been proven effective to enable robust supervision using 2D image observations. Following this, we render the depth map with \\( K \\) points along the emitted ray and use the corresponding 2D depth value for supervision. The ray can be parametrized as \\( r(i) = o + d_i\\mathbf{v} \\), where \\( o \\) is the center of the camera and \\( \\mathbf{v} \\) is the direction of the ray. The depth \\( \\hat{D}(r) \\) from the geometry field can be computed by:" + }, + { + "type": "equation", + "bbox": [ + 0.623, + 0.539, + 0.892, + 0.58 + ], + "angle": 0, + "content": "\\[\n\\hat {D} (r) = \\sum_ {i = 1} ^ {K} \\left(T _ {i} \\alpha_ {i} d _ {i}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.586, + 0.892, + 0.698 + ], + "angle": 0, + "content": "where \\(T_{i}\\) is the accumulated transmittance defined by \\(\\Pi_{j = 1}^{i + 1}(1 - \\alpha_j)\\), and \\(\\alpha_{j}\\) denotes the discrete opacity value computed by \\(\\alpha_{j} = \\max (\\frac{\\Phi_{s}(s_{i}) - \\Phi_{s}(s_{i} + 1)}{\\Phi_{s}(s_{i})},0)\\), in which \\(\\Phi\\) is the cumulative distribution of logistic distribution. More details about conversion from the SDF distance to the opacity can be found in NeuS [49]. For a batched training ray \\(r\\in R\\), the accumulated depth loss can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.561, + 0.705, + 0.892, + 0.737 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {d e p t h}} = \\sum_ {r \\in R} | | M (r) (\\hat {D} (r) - D (r)) | | _ {1}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.744, + 0.892, + 0.774 + ], + "angle": 0, + "content": "where \\( M(r) \\in \\{0,1\\} \\) is the object mask value and \\( D(r) \\) is the supervised depth value." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.774, + 0.892, + 0.865 + ], + "angle": 0, + "content": "Depth-sampled surface guidance. Except for the depth constraint on spatial accumulated points, we also leverage points sampled from the depth image \\( I_{D} \\) to guide the construction of the SDF surface. The surface loss encourages these sampled 3D points being close to the object surface and \\( L_{sur} \\) can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.599, + 0.871, + 0.892, + 0.905 + ], + "angle": 0, + "content": "\\[\nL _ {s u r} = \\sum_ {\\mathbf {x} _ {d} \\in I _ {D}} | | F _ {g e o} (\\mathbf {x} _ {d}) | | _ {1}. \\tag {3}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "10130" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.138 + ], + "angle": 0, + "content": "Training. Given a set of portrait images and their camera parameters, we train the architecture with the geometry field and the appearance field using the following loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.078, + 0.147, + 0.47, + 0.176 + ], + "angle": 0, + "content": "\\[\nL _ {p r i o r} = L _ {c o l o r} + \\lambda_ {m v s} L _ {m v s} + \\lambda_ {m a s k} L _ {m a s k} + \\lambda_ {r e g} L _ {r e g}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.178, + 0.469, + 0.253 + ], + "angle": 0, + "content": "where \\(\\lambda\\) denotes the weight of each corresponding loss. The MVS guided loss is computed as \\(L_{mvs} = L_{depth} + L_{sur}\\). The color reconstruction loss \\(L_{color}\\) is calculated as the distance between the accumulated color \\(\\hat{C}(r)\\) and the observed color \\(C(r)\\) of \\(I\\):" + }, + { + "type": "equation", + "bbox": [ + 0.141, + 0.262, + 0.469, + 0.295 + ], + "angle": 0, + "content": "\\[\nL _ {c o l o r} = \\sum_ {r \\in R} | | M (r) (\\hat {C} (r) - C (r)) | | _ {1}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.305, + 0.469, + 0.368 + ], + "angle": 0, + "content": "where \\(\\hat{C}(r)\\) can be computed by \\(\\sum_{i=1}^{K}(T_i\\alpha_i c_i)\\), and \\(c_i\\) denotes the volumetric color produced by the appearance field \\(F_{app}\\). To focus on human reconstruction, we also define a mask term with the binary cross entropy loss:" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.376, + 0.469, + 0.395 + ], + "angle": 0, + "content": "\\[\nL _ {m a s k} = B C E (\\hat {M} (r), M (r)), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.402, + 0.469, + 0.449 + ], + "angle": 0, + "content": "where \\(\\hat{M}(r) = \\sum_{i=1}^{K} (T_i \\alpha_i)\\) is the density accumulation along the ray. The Eikonal loss [12] used to regularize the SDF values is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.156, + 0.457, + 0.469, + 0.488 + ], + "angle": 0, + "content": "\\[\nL _ {r e g} = \\sum_ {k} | | \\nabla_ {\\mathbf {p} _ {k}} F _ {g e o} (\\mathbf {x} _ {k}) - 1 | | _ {2} ^ {2}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.496, + 0.469, + 0.587 + ], + "angle": 0, + "content": "Visualized results of this stage are shown in Figure 3 (a). Not only the radiance field with accumulated color is learned, but also the inherent geometry can be accurately decomposed. The high-quality reconstruction learned in this stage also paves the way for the next stage of style adaptation with few-shot 2D stylized portraits." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.596, + 0.356, + 0.612 + ], + "angle": 0, + "content": "3.3. Spatial representation adaption" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.619, + 0.469, + 0.739 + ], + "angle": 0, + "content": "With the constructed photorealistic representation, we then transform it to the style domain by progressively adapting the underlying 3D structures. We first adaptively learn the faithful deformed geometry without the interference of the albedo texture module, and then decompose albedo colors from observed ones with fixed geometric structures. This enables effective 3D structure disentanglement with more accurate surface and clearer texture." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.747, + 0.267, + 0.762 + ], + "angle": 0, + "content": "3.3.1 Geometry adaption" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.765, + 0.469, + 0.9 + ], + "angle": 0, + "content": "In this stage, we utilize a number of stylized 2D portrait images \\(I_{t}\\) derived from existing 2D portrait stylization methods [30, 53] to fine-tune the geometry field \\(F_{geo}\\) and the appearance field \\(F_{app}\\). The spatial-shared geometry will be adaptively transformed in \\(F_{geo}\\) and the observed colors varying with views will be modeled in \\(F_{app}\\), enabling the network focusing on geometry adaption. During training, the pixel color of \\(I_{t}\\) is used as the observed color to guide the accumulated volume colors:" + }, + { + "type": "equation", + "bbox": [ + 0.562, + 0.104, + 0.892, + 0.136 + ], + "angle": 0, + "content": "\\[\nL _ {c o l o r} = \\sum_ {r \\in R} | | M (r) (C (r) - C _ {t} (r)) | | _ {1}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.141, + 0.892, + 0.186 + ], + "angle": 0, + "content": "where \\( C(r) \\) is computed by the volumetric color from \\( F_{app} \\) and the converted opacity from \\( F_{geo} \\). The total training loss is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.55, + 0.197, + 0.892, + 0.215 + ], + "angle": 0, + "content": "\\[\nL _ {g e o} = L _ {c o l o r} + \\lambda_ {m a s k} L _ {m a s k} + \\lambda_ {r e g} L _ {r e g}. \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.224, + 0.892, + 0.315 + ], + "angle": 0, + "content": "As shown in Figure 3 (b), the spatial deformed geometry can be extracted from \\( F_{geo} \\). However, rendering results are 3D-inconsistent with obvious artifacts in side-view renderings, since only few-shot 2D stylizations of the frontal views are provided for style adaption and the view-dependent function \\( F_{app} \\) trivially fits these views." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.323, + 0.846, + 0.338 + ], + "angle": 0, + "content": "3.3.2 Albedo texture adaption and optimization" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.341, + 0.892, + 0.506 + ], + "angle": 0, + "content": "In this stage, we aim to learn the spatial-shared texture field \\( F_{tex} \\) by decomposing the albedo colors from the appearance ones. Specifically, we insert \\( F_{tex} \\) as a view-independent texture field and jointly optimize \\( F_{tex} \\) and \\( F_{app} \\). In this way, view-consistent colors can be effectively decomposed from the total appearance and the remaining components in \\( F_{app} \\) are regarded as view-dependent reflections. The final color are computed by \\( \\tilde{c}_i = s \\circ c_i' \\), where \\( c_i' \\) is the albedo color from \\( F_{tex} \\) and \\( s \\) is the degraded reflection from \\( F_{app} \\) for spatial points. Then we can obtain the final accumulated color by" + }, + { + "type": "equation", + "bbox": [ + 0.623, + 0.507, + 0.891, + 0.548 + ], + "angle": 0, + "content": "\\[\n\\tilde {C} (r) = \\sum_ {i = 1} ^ {K} \\left(T _ {i} \\alpha_ {i} \\tilde {c} _ {i}\\right). \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.552, + 0.892, + 0.688 + ], + "angle": 0, + "content": "To further ensure effective albedo color decomposition, a discriminator \\( D \\) is introduced to encourage \\( \\tilde{C}(r) \\) satisfying the approximate distribution of palette colors of \\( I_t \\). With \\( \\kappa \\) as a posterize filter, the patch color \\( \\kappa(C_t(p)) \\) of \\( I_t \\) is fed into \\( D \\) as a real sample, and the reconstructed color \\( \\tilde{C}(p) \\) from \\( F_{tex} \\) is fed into \\( D \\) as a fake sample, where \\( p \\) is the set of rays for image pixels in a patch. We define the discrimination loss \\( L_{ds} \\) to penalize for distance between the distribution of \\( C(p) \\) and \\( \\tilde{C}(p) \\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.577, + 0.698, + 0.892, + 0.739 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} L _ {d s} = \\mathbb {E} _ {p \\sim \\left\\{I _ {t} ^ {i} \\right\\}} [ \\log (D (\\kappa (C _ {t} (p))) ] + \\tag {11} \\\\ \\mathbb {E} _ {p \\sim \\{I _ {t} ^ {i} \\}} [ l o g (1 - D (\\bar {C} (p))) ]. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.747, + 0.892, + 0.791 + ], + "angle": 0, + "content": "To keep the learned geometry stay faithful to the given style, we fix \\( F_{geo} \\) and train \\( \\{F_{app}, F_{tex}\\} \\) with the training loss as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.509, + 0.804, + 0.891, + 0.821 + ], + "angle": 0, + "content": "\\[\nL _ {t e x} = L _ {\\text {c o l o r}} + \\lambda_ {\\text {m a s k}} L _ {\\text {m a s k}} + \\lambda_ {\\text {r e g}} L _ {\\text {r e g}} + \\lambda_ {\\text {d s}} L _ {\\text {d s}}, \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.83, + 0.892, + 0.861 + ], + "angle": 0, + "content": "where \\(L_{color}\\) denotes the distance between the final accumulated color \\(\\tilde{C} (r)\\) and the observed stylized color \\(C_t(r)\\):" + }, + { + "type": "equation", + "bbox": [ + 0.561, + 0.872, + 0.891, + 0.904 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {c o l o r}} = \\sum_ {r \\in R} | | M (r) (\\tilde {C} (r) - C _ {t} (r)) | | _ {1}. \\tag {13}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "10131" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.146, + 0.089, + 0.235, + 0.158 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.251, + 0.09, + 0.825, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.077, + 0.161, + 0.235, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.146, + 0.234, + 0.236, + 0.3 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.251, + 0.303, + 0.565, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.678, + 0.372, + 0.787, + 0.384 + ], + "angle": 0, + "content": "(c) Exported meshes" + }, + { + "type": "image_caption", + "bbox": [ + 0.255, + 0.388, + 0.713, + 0.403 + ], + "angle": 0, + "content": "Figure 4. Stylized results in novel views and corresponding exported meshes." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.421, + 0.47, + 0.497 + ], + "angle": 0, + "content": "We show rendering results of this stage in Figure 3 (c), demonstrating their 3D consistency in multi-view setting. Thanks to the spatial-shared colors learned in the view-independent \\( F_{tex} \\), the albedo texture can be seamlessly extracted and further enhanced in an explicit manner." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.512, + 0.283, + 0.529 + ], + "angle": 0, + "content": "4. Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.536, + 0.471, + 0.807 + ], + "angle": 0, + "content": "Implementation details. Our network architecture consists of three modules: the signed distance function \\( F_{geo} \\), the appearance function \\( F_{app} \\) and the texture function \\( F_{tex} \\), which are modeled by three MLPs with 8, 6, 6 hidden layers, respectively. Positional encoding [32] and sphere initialization [3] are also applied similar to [49]. For the depth priors, we adopt the OpenMVS method [1] to extract estimated depth maps from the input video. For the 2D style translator, we adopt DCT-Net [30] and VToonify [53] to produce target stylized images and preserve forward/backward facing results whose absolute yaw angle is less than 0.2 radian for supervision. We use the Adam optimizer [22] with the learning rate of 2.5e-5 to train our models and sample 512 rays for each batch. The loss weights are shared by three stages with \\( \\lambda_{mask} \\), \\( \\lambda_{mvs} \\), \\( \\lambda_{reg} \\), \\( \\lambda_{ds} \\) set to \\( \\{0.5, 0.5, 0.1, 1\\} \\). Stage I, II and III are trained for 300k, 200k and 50k iterations, respectively, taking around 20 hours in total on a single NVIDIA Teasla-V100 GPU." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.81, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Datasets. We create a \\(360^{\\circ}\\) captured portrait dataset called Portrait360 to evaluate our approach. This dataset contains 14 static portrait videos captured by rotating the camera around the human head. All videos have a length between 20 to 30 seconds and are split to 300 frames as source training data." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.421, + 0.707, + 0.437 + ], + "angle": 0, + "content": "4.1.3D portrait stylization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.445, + 0.892, + 0.719 + ], + "angle": 0, + "content": "Performance on view consistent rendering. Given a short portrait video captured by a monocular camera, our model learns a stylized 3D representation from 2D portrait frames. Stylized portrait images can be generated from arbitrary novel viewpoints following exemplar styles, while ensuring facial identity of the person and 3D consistency between different views. Note that the synthesized images in this part are produced directly by volume rendering on implicit functions, without any explicit style enhancement applied for the results. Our stylized avatars rendered in novel viewpoints and their corresponding exported meshes are shown in Figure 4, more results can be found in the supplementary. Comparison with 3D avatar stylization methods. In this section, we compare our method with two 3D avatar stylization methods, DeformToon3D [57] and NeRF-Art [48], which represent the state-of-the-art techniques in 3D-aware generative toonification and text-guided NeRF stylization, respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.893, + 0.902 + ], + "angle": 0, + "content": "Qualitative comparison. Here we adapt VToonify [53] to generate target stylized images with selected exemplars to train our model. For DeformToon3D [57], we use the author-provided code and train the model using data generated with the same exemplars by DualStyleGAN [52], which is also the 2D generator used in VToonify. Here we directly generate its real-space and style-space results under the same sampled instance code, since the additional PTI [43] process will cause accumulated fidelity errors, especially on arbitrary real faces. For NeRF-Art [48], as it does not support using a single exemplar image for style guidance, we use Mini-GPT4 [59] to generate style de" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10132" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.082, + 0.885, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.393, + 0.897, + 0.437 + ], + "angle": 0, + "content": "Figure 5. Qualitative comparison with 3D avatar stylization methods. We directly compare the generated real-space and style-space results of DeformToon3D to alleviate the fidelity loss in the additional PTI process. The models of NeRF-Art and Ours are trained on our Portrait360 dataset. Four views are selected for comparison." + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.444, + 0.473, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.558, + 0.472, + 0.601 + ], + "angle": 0, + "content": "Figure 6. Qualitative comparison with 2D portrait stylization methods on view consistent rendering. For a more prominent video comparison, please refer to the supplementary video." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.611, + 0.47, + 0.793 + ], + "angle": 0, + "content": "scriptions corresponding to each target image. The input text prompts used in this section are shown in the supplementary. We demonstrate qualitative comparison of the three methods in Figure 5. DeformToon3D only focuses on frontal views and fails to generate plausible renderings under large angles. Besides, it tends to synthesize overly exaggerated results and fail to maintain the facial characteristics (e.g., hairstyles) of the original image. NeRF-Art only generates results with undesired stylized texture and weakly-changed underlying structures. On the contrary, our method can generate fine-grained full-head stylized avatars with view-consistent renderings and exaggerated styles." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Quantitative comparison. For quantitative comparison, we measure the quality of multi-view stylized renderings of all methods by calculating the Frechet Inception Distance (FID) [14] value for the training cartoon exemplar dataset. A lower FID score indicates that the distribution of the generated images is more similar to that of real 2D cartoon faces. we also evaluate the fidelity of all methods in 3D" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.445, + 0.892, + 0.473 + ], + "angle": 0, + "content": "Table 1. Quantitative comparison with 3D avatar stylization methods on FID and IP. \\(\\uparrow\\), \\(\\downarrow\\) denote if higher or lower is better." + }, + { + "type": "table", + "bbox": [ + 0.554, + 0.475, + 0.836, + 0.527 + ], + "angle": 0, + "content": "
MethodDeformToon3DNeRF-ArtOurs
FID ↓66.578.857.6
IP ↑0.5510.6710.678
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.547, + 0.892, + 0.655 + ], + "angle": 0, + "content": "style adaption using the identity preservation (IP) metric, which is calculated as the Arcface [9] feature similarity between the input image and the stylized result. As shown in Table 1, our method outperforms the other two methods in both FID and identity preservation, which showcases our ability of generating high-quality stylized results while being faithful to the original human identity." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.657, + 0.892, + 0.747 + ], + "angle": 0, + "content": "Comparison with 2D portrait stylization methods. In this section, we compare our method with two state-of-the-art 2D portrait stylization methods, VToonify [53] and DCT-Net [30], to further demonstrate our ability of generating 3D-consistent and high-quality stylized results for arbitrary views." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.893, + 0.902 + ], + "angle": 0, + "content": "Qualitative comparison. Due to the incapability of 2D portrait stylization methods to synthesize novel view results, we only make comparison under reconstructed views captured in the input video. For both VToonify and DCT-Net, frames are directly input into the trained/finetuned models released by authors to obtain the corresponding stylized images. Then we select their forward/backward results as sparse view supervision to train our models (denoted as ours-V and ours-D, respectively). As illustrated in Figure 6, VToonify and DCT-Net fail to synthesize exaggerated ge" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10133" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.079, + 0.47, + 0.105 + ], + "angle": 0, + "content": "Table 2. Comparison of FID and 3D validity with 2D portrait stylization methods." + }, + { + "type": "table", + "bbox": [ + 0.116, + 0.109, + 0.428, + 0.158 + ], + "angle": 0, + "content": "
MethodDCT-NetOurs-DVToonifyOurs-V
FID ↓126.194.786.957.6
3D validity ↑0.541.000.621.00
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.167, + 0.469, + 0.195 + ], + "angle": 0, + "content": "Table 3. Ablation of the progressive training scheme. Results verify the effectiveness of the proposed module in each stage." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.197, + 0.47, + 0.237 + ], + "angle": 0, + "content": "
Variantsw/o Priorw/o GAw/o TAw/o PSAfull model
FID ↓98.7105.296.796.294.7
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.251, + 0.469, + 0.371 + ], + "angle": 0, + "content": "ometry effects in challenging viewpoints (e.g., side faces) and are unable to maintain 3D view consistency. Note that these extreme view results are not used as supervision in our style adaption process. On the contrary, our method can easily render style-faithful and robust results in a 3D consistent manner. This showcases the importance of learning underlying 3D structures in maintaining view-consistency of the stylized avatar." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.372, + 0.47, + 0.584 + ], + "angle": 0, + "content": "Quantitative comparison. We also measure the quality of our rendering results against VToonify [53] and DCT-Net [30] using FID [14]. We use data from our Portrait360 dataset as source images and remove failure cases of the 2D methods. As shown in Table 2, both of our models produce better results with lower FID values compared with original 2D methods. To further evaluate the stylization ability of handling views from the entire 3D space, we propose to calculate 3D validity by computing the conversion rate of successfully stylized results to the whole dataset. 2D methods rely on detected facial landmarks and failed conversions can be automatically recognized. Compared to 2D methods, our method could handle more challenging poses in the entire 3D space with higher 3D validity." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.593, + 0.228, + 0.608 + ], + "angle": 0, + "content": "4.2. Ablation study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.614, + 0.469, + 0.689 + ], + "angle": 0, + "content": "In addition to visualized results in Figure 3, we verify the effectiveness of the proposed module in each stage by evaluating the performance of corresponding variants of our method. The qualitative and quantitative results are shown in Figure 7 and Table 3, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.469, + 0.856 + ], + "angle": 0, + "content": "MVS guided prior learning. We train a model without photorealistic prior learning and directly learn the spatial neural representation from stylized portrait images. It is confusing for inverse rendering to produce valid geometry and texture with unreal 3D-inconsistent stylized observations, as shown in Figure 7 (b). This indicates that the reconstruction prior is crucial for generating plausible underlying structures in 3D style adaption. The design of MVS guidance also helps to reconstruct more robust surface without holes brought by illumination noise in complicated real-world scenes (see Figure 7 (g))." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Progressive structure adaption (PSA). By removing PSA proposed in Section 3.3, we jointly learn the geometry and texture adaption with the full SNR network. Results in" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.088, + 0.58, + 0.149 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.584, + 0.089, + 0.66, + 0.149 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.663, + 0.089, + 0.738, + 0.149 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.741, + 0.09, + 0.818, + 0.149 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.827, + 0.09, + 0.887, + 0.149 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.15, + 0.574, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.15, + 0.652, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.15, + 0.729, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.746, + 0.15, + 0.811, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.825, + 0.15, + 0.887, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.211, + 0.574, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.586, + 0.211, + 0.652, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.21, + 0.729, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.746, + 0.211, + 0.811, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.822, + 0.211, + 0.885, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.271, + 0.577, + 0.282 + ], + "angle": 0, + "content": "(a) Full model" + }, + { + "type": "image_caption", + "bbox": [ + 0.587, + 0.271, + 0.653, + 0.282 + ], + "angle": 0, + "content": "(b) w/o Prior" + }, + { + "type": "image_caption", + "bbox": [ + 0.668, + 0.271, + 0.73, + 0.282 + ], + "angle": 0, + "content": "(c) w/o PSA" + }, + { + "type": "image_caption", + "bbox": [ + 0.752, + 0.271, + 0.81, + 0.281 + ], + "angle": 0, + "content": "(d) w/o GA" + }, + { + "type": "image_caption", + "bbox": [ + 0.828, + 0.271, + 0.884, + 0.281 + ], + "angle": 0, + "content": "(e) w/o TA" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.287, + 0.892, + 0.343 + ], + "angle": 0, + "content": "Figure 7. Effects of the proposed prior learning (Prior), progressive structure adaption (PSA), geometry adaption (GA), texture adaption (TA), style enhancement (SE) and MVS guidance (MVS)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.355, + 0.892, + 0.414 + ], + "angle": 0, + "content": "Figure 7 (c) show that simultaneously training \\( F_{geo} \\) and \\( F_{tex} \\) disrupts the disentanglement of each other. Progressive adaption brings more accurate surfaces and seamless textures." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.416, + 0.893, + 0.643 + ], + "angle": 0, + "content": "Geometry and texture adaption. Figure 7 (d, e) verify the necessity of geometry adaption (GA) and texture adaption (TA), respectively. In contrast to explicit texture stylization, GA enables the internal surface to be deformed adaptively, thus making 3D portraits be fully stylized. Without TA, inferred vertex colors from the appearance field suffers noticeable artifacts, due to the inconsistent observed colors from different views. TA introduces an extra texture field that automatically decomposes albedo colors shared in 3D space, thus alleviating the texture seaming issue. Besides, we explore adding additional style enhancement (SE) on the explicit texture map extracted from the texture field, which further brings more vivid stylization effects (Figure 7 (f)). We also show the impact of the number of stylized frames used for adaption stages in the supplemental material." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.653, + 0.619, + 0.668 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In this paper, we handled the challenging and on-going task of synthesizing the high-fidelity stylized 3D avatar from a portrait video under the guidance of a single style image. We showed that the naive combination of portrait style transfer and 3D reconstruction techniques does not work well in this task, and proposed a novel framework called 3DToonify that learns 3D style adaption based on spatial neural representations (SNR). We introduced a delicately-designed spatial neural network for disentangled geometry and texture adaption. We also came up with a novel progressive training scheme suitable for the SNR to accurately capture the underlying stylized 3D structures. Both qualitative and quantitative experimental results demonstrated that our method enables fine-grained 3D avatar stylization with view consistency and diverse exaggerated results." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10134" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.468, + 0.142 + ], + "angle": 0, + "content": "[1] OpenMvs. [EB/OL]. https://github.com/cdcseacave/openMVS/.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.144, + 0.469, + 0.226 + ], + "angle": 0, + "content": "[2] Rameen Abdal, Hsin-Ying Lee, Peihao Zhu, Mengei Chai, Aliaksandr Siarohin, Peter Wonka, and Sergey Tulyakov. 3davatargan: Bridging domains for personalized editable avatars. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4552-4562, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.228, + 0.47, + 0.282 + ], + "angle": 0, + "content": "[3] Matan Atzmon and Yaron Lipman. Sal: Sign agnostic learning of shapes from raw data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2565-2574, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.284, + 0.469, + 0.366 + ], + "angle": 0, + "content": "[4] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.368, + 0.469, + 0.437 + ], + "angle": 0, + "content": "[5] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5799-5809, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.438, + 0.469, + 0.521 + ], + "angle": 0, + "content": "[6] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.522, + 0.469, + 0.577 + ], + "angle": 0, + "content": "[7] Rui Chen, Yongwei Chen, Ningxin Jiao, and Kui Jia. Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. arXiv preprint arXiv:2303.13873, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.579, + 0.469, + 0.647 + ], + "angle": 0, + "content": "[8] Pei-Ze Chiang, Meng-Shiun Tsai, Hung-Yu Tseng, WeiSheng Lai, and Wei-Chen Chiu. Stylizing 3d scene via implicit representation and hypernetwork. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1475-1484, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.649, + 0.469, + 0.689 + ], + "angle": 0, + "content": "[9] Jiankang Deng, Jia Guo, Xue Niannan, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.691, + 0.469, + 0.746 + ], + "angle": 0, + "content": "[10] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. ACM Transactions on Graphics (TOG), 41(4):1-13, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.747, + 0.469, + 0.802 + ], + "angle": 0, + "content": "[11] Leon A Gatys, Alexander S Ecker, and Matthias Bethge. Image style transfer using convolutional neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2414-2423, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.804, + 0.469, + 0.844 + ], + "angle": 0, + "content": "[12] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. arXiv preprint arXiv:2002.10099, 2020. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[13] Fangzhou Han, Shuquan Ye, Mingming He, Menglei Chai, and Jing Liao. Exemplar-based 3d portrait stylization. IEEE Transactions on Visualization and Computer Graphics, 2021. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.16 + ], + "angle": 0, + "content": "[14] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.892, + 0.219 + ], + "angle": 0, + "content": "[15] Yukun Huang, Jianan Wang, Ailing Zeng, He Cao, Xianbiao Qi, Yukai Shi, Zheng-Jun Zha, and Lei Zhang. Dreamwaltz: Make a scene with complex 3d animatable avatars. arXiv preprint arXiv:2305.12529, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.276 + ], + "angle": 0, + "content": "[16] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.892, + 0.346 + ], + "angle": 0, + "content": "[17] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.349, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[18] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG), 42(4):1-14, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.406, + 0.892, + 0.473 + ], + "angle": 0, + "content": "[19] Gwanghyun Kim and Se Young Chun. Datid-3d: Diversitypreserved domain adaptation using text-to-image diffusion for 3d generative model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14203-14213, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.476, + 0.892, + 0.545 + ], + "angle": 0, + "content": "[20] Gwanghyun Kim, Ji Ha Jang, and Se Young Chun. Podia-3d: Domain adaptation of 3d generative model across large domain gap using pose-preserved text-to-image diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 22603–22612, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.547, + 0.892, + 0.615 + ], + "angle": 0, + "content": "[21] Junho Kim, Minjae Kim, Hyeonwoo Kang, and Kwang Hee Lee. U-gat-it: Unsupervised generative attentional networks with adaptive layer-instance normalization for image-to-image translation. In International Conference on Learning Representations, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.618, + 0.892, + 0.657 + ], + "angle": 0, + "content": "[22] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.661, + 0.892, + 0.716 + ], + "angle": 0, + "content": "[23] Nikos Kolotouros, Thiemo Alldieck, Andrei Zanfir, Eduard Gabriel Bazavan, Mihai Fieraru, and Cristian Sminchisescu. Dreamhuman: Animatable 3d avatars from text. arXiv preprint arXiv:2306.09329, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.718, + 0.892, + 0.772 + ], + "angle": 0, + "content": "[24] Bing Li, Yuanlue Zhu, Yitong Wang, Chia-Wen Lin, Bernard Ghanem, and Linlin Shen. Anigan: Style-guided generative adversarial networks for unsupervised anime face generation. IEEE Transactions on Multimedia, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.775, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[25] Tingting Liao, Hongwei Yi, Yuliang Xiu, Jiaxaing Tang, Yangyi Huang, Justus Thies, and Michael J Black. Tada! text to animatable digital avatars. arXiv preprint arXiv:2308.10899, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.832, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[26] Yiyi Liao, Katja Schwarz, Lars Mescheder, and Andreas Geiger. Towards unsupervised learning of generative models for 3d controllable image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5871-5880, 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10135" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[27] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 300–309, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.177, + 0.472, + 0.245 + ], + "angle": 0, + "content": "[28] Jinlin Liu, Yuan Yao, Wendi Hou, Miaomiao Cui, Xuansong Xie, Changshui Zhang, and Xian-sheng Hua. Boosting semantic human matting with coarse annotations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8563-8572, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.247, + 0.47, + 0.302 + ], + "angle": 0, + "content": "[29] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.302, + 0.47, + 0.357 + ], + "angle": 0, + "content": "[30] Yifang Men, Yuan Yao, Miaomiao Cui, Zhouhui Lian, and Xuansong Xie. Dct-net: domain-calibrated translation for portrait stylization. ACM Transactions on Graphics (TOG), 41(4):1-9, 2022. 1, 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.359, + 0.47, + 0.426 + ], + "angle": 0, + "content": "[31] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4460-4470, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.428, + 0.47, + 0.495 + ], + "angle": 0, + "content": "[32] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 2, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.497, + 0.47, + 0.552 + ], + "angle": 0, + "content": "[33] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.553, + 0.47, + 0.594 + ], + "angle": 0, + "content": "[34] Thu Nguyen-Phuoc, Feng Liu, and Lei Xiao. Snerf: stylized neural implicit representations for 3d scenes. arXiv preprint arXiv:2207.02363, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.595, + 0.47, + 0.649 + ], + "angle": 0, + "content": "[35] Thu Nguyen-Phuoc, Gabriel Schwartz, Yuting Ye, Stephen Lombardi, and Lei Xiao. Alteredavatar: Stylizing dynamic 3d avatars with fast style adaptation. arXiv preprint arXiv:2305.19245, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.65, + 0.47, + 0.718 + ], + "angle": 0, + "content": "[36] Thu H Nguyen-Phuoc, Christian Richardt, Long Mai, Yongliang Yang, and Niloy Mitra. Blockgan: Learning 3d object-aware scene representations from unlabelled images. Advances in Neural Information Processing Systems, 33:6767-6778, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.72, + 0.47, + 0.789 + ], + "angle": 0, + "content": "[37] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5589-5599, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.47, + 0.871 + ], + "angle": 0, + "content": "[38] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13503–13513, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.47, + 0.902 + ], + "angle": 0, + "content": "[39] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning con" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.135 + ], + "angle": 0, + "content": "tinuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 165-174, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[40] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.205, + 0.892, + 0.245 + ], + "angle": 0, + "content": "[41] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.247, + 0.892, + 0.328 + ], + "angle": 0, + "content": "[42] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.33, + 0.892, + 0.384 + ], + "angle": 0, + "content": "[43] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. ACM Transactions on graphics (TOG), 42(1):1-13, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.386, + 0.892, + 0.455 + ], + "angle": 0, + "content": "[44] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.456, + 0.892, + 0.537 + ], + "angle": 0, + "content": "[45] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.539, + 0.892, + 0.621 + ], + "angle": 0, + "content": "[46] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.623, + 0.892, + 0.678 + ], + "angle": 0, + "content": "[47] Shen Sang, Tiancheng Zhi, Guoxian Song, Minghao Liu, Chunpong Lai, Jing Liu, Xiang Wen, James Davis, and Linjie Luo. AgileAvatar: Stylized 3d avatar creation via cascaded domain bridging. 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.679, + 0.892, + 0.734 + ], + "angle": 0, + "content": "[48] Can Wang, Ruixiang Jiang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Nerf-art: Text-driven neural radiance fields stylization. IEEE Transactions on Visualization and Computer Graphics, 2023. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.735, + 0.892, + 0.789 + ], + "angle": 0, + "content": "[49] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689, 2021. 2, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.79, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[50] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. arXiv preprint arXiv:2305.16213, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[51] Shiyao Xu, Lingzhi Li, Li Shen, Yifang Men, and Zhouhui Lian. Your3demooji: Creating personalized emojis via one-shot 3d-aware cartoon avatar synthesis. In SIGGRAPH Asia 2022 Technical Communications, pages 1-4. 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "10136" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[52] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Pastiche master: Exemplar-based high-resolution portrait style transfer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7693-7702, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.47, + 0.218 + ], + "angle": 0, + "content": "[53] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Vtoonify: Controllable high-resolution portrait video style transfer. arXiv preprint arXiv:2209.11224, 2022. 1, 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.22, + 0.47, + 0.288 + ], + "angle": 0, + "content": "[54] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. Advances in Neural Information Processing Systems, 33:2492-2502, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.29, + 0.47, + 0.359 + ], + "angle": 0, + "content": "[55] Chi Zhang, Yiwen Chen, Yijun Fu, Zhenglin Zhou, Gang Yu, Billzb Wang, Bin Fu, Tao Chen, Guosheng Lin, and Chunhua Shen. StyleAvatar3d: Leveraging image-text diffusion models for high-fidelity 3d avatar generation. arXiv preprint arXiv:2305.19012, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.361, + 0.47, + 0.429 + ], + "angle": 0, + "content": "[56] Huichao Zhang, Bowen Chen, Hao Yang, Liao Qu, Xu Wang, Li Chen, Chao Long, Feida Zhu, Kang Du, and Min Zheng. Avatarverse: High-quality & stable 3d avatar creation from text and pose. arXiv preprint arXiv:2308.03610, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.431, + 0.47, + 0.513 + ], + "angle": 0, + "content": "[57] Junzhe Zhang, Yushi Lan, Shuai Yang, Fangzhou Hong, Quan Wang, Chai Kiat Yeo, Ziwei Liu, and Chen Change Loy. Deformtoon3d: Deformable neural radiance fields for 3d toonification. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9144-9154, 2023. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.515, + 0.47, + 0.571 + ], + "angle": 0, + "content": "[58] Kai Zhang, Nick Kolkin, Sai Bi, Fujun Luan, Zexiang Xu, Eli Shechtman, and Noah Snavely. Arf: Artistic radiance fields. In European Conference on Computer Vision, pages 717-733. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.572, + 0.47, + 0.627 + ], + "angle": 0, + "content": "[59] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.629, + 0.47, + 0.697 + ], + "angle": 0, + "content": "[60] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 2223-2232, 2017. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.697 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10137" + } + ] +] \ No newline at end of file diff --git a/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/2c81e75a-1abf-4d0b-aa9d-80d61a8cb264_origin.pdf b/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/2c81e75a-1abf-4d0b-aa9d-80d61a8cb264_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..236c765104d82757b45baf235b6d62a97fdccbcc --- /dev/null +++ b/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/2c81e75a-1abf-4d0b-aa9d-80d61a8cb264_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ffcddb05527d5fe16f6a4394d4329f26e13947438701079fdec439869378fc4 +size 10199227 diff --git a/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/full.md b/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/full.md new file mode 100644 index 0000000000000000000000000000000000000000..839120c006e98a86b953b4ccffa109f0cb269a1e --- /dev/null +++ b/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/full.md @@ -0,0 +1,356 @@ +# 3DToonify: Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images + +Yifang Men $^{1*}$ , Hanxi Liu $^{2*}$ , Yuan Yao $^{1}$ , Miaomiao Cui $^{1}$ , Xuansong Xie $^{1}$ , Zhouhui Lian $^{2\dagger}$ $^{1}$ Institute for Intelligent Computing, Alibaba Group + +$^{2}$ Wangxuan Institute of Computer Technology, Peking University, China + +![](images/da767aa80c93a5565c7fc9d733d21dcc827b6b033124b6ab68f1e8650a012ff7.jpg) +(a) Input video + +![](images/fd88199e8286ebe9ab4bc36991e5c5fd4ce50af1e43504a13b6561fdb0890581.jpg) +(b) 3D style adaption + +![](images/923548eb85c654ebbe7c547e03b6e51dfadc1de6a918a579005c6a51189f3ac0.jpg) +(c) View-consistent renderings in different styles +Figure 1. Given a set of RGB portrait images captured by a monocular camera, our method can learn a photorealistic representation in neural implicit fields, and transfer it to artistic ones with underlying 3D structures changed. Multiple stylized results can be rendered from arbitrary novel viewpoints with consistent geometry and texture. + +# Abstract + +Visual content creation has aroused a surge of interest given its applications in mobile photography and AR/VR. Portrait style transfer and 3D recovery from monocular images as two representative tasks have so far evolved independently. In this paper, we make a connection between the two, and tackle the challenging task of 3D portrait stylization - modeling high-fidelity 3D stylized avatars from captured 2D portrait images. However, naively combining the techniques from the two isolated areas may suffer from either inadequate stylization or absence of 3D assets. To this end, we propose 3DToonify, a new framework that introduces a progressive training scheme to achieve 3D style adaption on spatial neural representation (SNR). SNR is constructed with implicit fields and they are dy + +namically optimized by the progressive training scheme, which consists of three stages: guided prior learning, deformable geometry adaption and explicit texture adaption. In this way, stylized geometry and texture are learned in SNR in an explicit and structured way with only a single stylized exemplar needed. Moreover, our method obtains style-adaptive underlying structures (i.e., deformable geometry and exaggerated texture) and view-consistent stylized avatar rendering from arbitrary novel viewpoints. Both qualitative and quantitative experiments have been conducted to demonstrate the effectiveness and superiority of our method for automatically generating exemplar-guided 3D stylized avatars. + +# 1. Introduction + +Portrait style transfer [30, 53] aims to transform real face images into artistic 2D portraits in desired visual styles while maintaining personal identity. However, given a sequence of portrait images captured from different viewpoints, existing portrait style transfer methods are typically + +only effective for limited forward-facing photos and fails to maintain view consistency in 3D space. Essentially, existing methods only learn a style transfer between 2D features, and have no sense to 3D representations built on real-world objects. What if we can construct and stylize underlying 3D structures from captured 2D portrait images? See Figure 1 for an example. When stylized with 3D structures (i.e., geometry and texture), we can easily render view-free stylized portraits with 3D consistency and robust artistic results. This capacity will extremely facilitate the 3D content creation process which often requires large amounts of time and special expertise, and make it accessible to a variety of novice users. As shown in Figure 1, this paper aims to address the challenging task of generating high-fidelity 3D avatar from a portrait video by following the style of a given exemplar image. We refer this task as 3D portrait stylization – a marriage between portrait style transfer and 3D recovery from monocular images. + +The naive solution to the task mentioned above is directly combining existing methods of 2D portrait stylization with 3D reconstruction, i.e., learning 3D representations such as voxels [36], primitives [26] or occupancy fields [31] directly from stylized portrait images. However, it is less effective due to the biased image manifold built by 2D portrait stylization, making the representation learning be ill-posed with highly-biased visible views. Recently, neural radiance field (NeRF) [4, 18, 32, 33, 40, 49] has made great progress due to its advanced ability to achieve photo-realistic novel view synthesis with sparse input views. Some previous attempts [8, 34, 35, 48, 58] also combine NeRF with image-based [11] or text-driven [42] neural style transfer to generate novel views of stylized 3D scenes or avatars. Recently, a series of new works have started to focus on 3D stylized avatar generation. Some methods [7, 15, 23, 25, 27, 41, 50, 56] exploit the great potential of 2D text-to-image diffusion models [44-46] to generate 3D cartoonish avatars according to a given text prompt. Others [2, 51, 55, 57] build on 3D generative models [6, 38] to bridge the gap between the real space and the target domain, and generate avatars with certain styles under a sampled latent vector. However, all these methods either can not achieve high-fidelity personalized 3D portrait stylization with user-specific identities and styles, or fail to generate fine-grained full-head avatars that support view-consistent rendering from arbitrary viewpoints. + +To address the aforementioned challenges, we draw inspiration from domain adaption on 2D features [10, 30], and introduce a progressive training scheme to achieve 3D style adaption on spatial neural representation (SNR). The key insights of this design are twofold. First, it is hard to directly learn an accurate 3D representation field from stylized portraits with few-shot inconsistent 2D views, but easier to learn a photorealistic field as a prior and adapt it to + +target style fields with transfer learning. Second, learning spatial representation with disentangled surface and texture allows for flexible geometry deformation and texture adaption, leading to more diverse and fine-grained style editing. To this end, we construct SNR with neural implicit fields and dynamically optimize its subfields with a progressive training scheme. This scheme includes the following three stages: prior learning to obtain an accurate human reconstruction, geometry adaption to produce inherently exaggerated deformation, and texture adaption to realize artistic albedo decomposition. Eventually, the 2D portraits are converted to stylized SNR, and explicit 3D assets can be easily extracted with disentangled 3D structures. In summary, our contributions are threefold: + +- We present a new method that adopts neural implicit fields to address the challenging task of generating high-fidelity 3D avatar from a portrait video by following the style of a given exemplar image. Stylized results can be rendered under arbitrary novel viewpoints with consistent geometry and texture. +- We introduce an elegant network of spatial neural representation to model common attributes over the 3D space. This design allows for disentangled geometry and texture adaption, achieving more flexible and fine-grained 3D stylization results. +- We propose a novel progressive training scheme of 3D style adaption. Cooperated with the delicately-designed spatial neural network, it enables learning realistic 3D cartoon avatars with deformed geometry and stylized texture. + +# 2. Related Work + +2D Portrait Stylization. In the deep neural network based portrait stylization, there are two types of approaches, i.e., image-to-image translation and StyleGAN based translation. Methods [21, 24] conduct face-to-cartoon translation by adopting the framework of cycleGAN [60]. Nevertheless, training such methods requires extensive data and may still generate unstable results. StyleGAN [16, 17] has become a popular alternative for portrait stylization due to its strong capacity for latent inversion and style control. [30] proposes a calibration framework to adapt the original training distribution for fine-grained translation. [53] leverages the mid- and high-resolution layers of StyleGAN to render high-quality artistic portraits based on the multi-scale content features to better preserve details. Although high-quality results have been shown, these methods cannot handle extreme face angle while maintaining cross-view consistency. + +Neural Implicit Fields. Recently, neural implicit functions have emerged as an effective representation to model conventional 3D scenes due to its continuous nature. This representation has been successfully adopted to shape model- + +![](images/7451c05f14cfdda6536561d10e264702e5a878e43da945522c450760650656a9.jpg) +Figure 2. An overview of the proposed framework. Our method first learns a photorealistic field built-upon spatial neural representation (SNR) using dense input views, then transfers this prior representation to artistic ones with few-shot stylized views by adapting underlying 3D structures. SNR is constructed by a geometry field for SDF surface, an appearance field for observed color, and a texture field for albedo color, respectively. The progressive training scheme is adopted to enable SNR to learn about stylized geometry and texture in an explicit and structured manner. + +![](images/849c9135f9defd35b372ddc6d31c4785b906add425fb7c11d37fd904953665ef.jpg) + +![](images/d861cb72c80ec9f08a4376eebdcce5f4141cf2707931a0a448a4edb4156894cf.jpg) + +ing [12, 39], novel view synthesis [29, 32] and multi-view 3D reconstruction [49, 54]. The method of Neural Radiance Fields (NeRF) [32], in particular, has attracted significant attention for its ability to achieve photo-realistic novel view synthesis results by utilizing neural implicit functions together with volume rendering. A number of variants have been developed thereafter to fit with different scenarios and requirements, including quality improvement [4], fast rendering [33], dynamic scene capture [40] and generative models [5]. However, NeRF's estimated volume density does not admit accurate surface reconstruction, the recovered 3D geometry is far from satisfactory and can hardly be extracted as explicit materials. Recent works tackle the issue by combining implicit surface functions. [37] represents the surface by occupancy values and shrink the sample region of volume rendering during the optimization. [49] introduces signed distance functions (SDF) to represent the scene and can directly extract the surface as the zero-level set of the SDF with better accuracy. + +3D Avatar Stylization. 3D avatar stylization aims to generate stylized 3D avatars whose rendered images captured from different viewpoints match the specific style. Early methods are either mesh-driven [13] or rely on explicit parameterization [47]. More recently, [35, 48] exploit the flexibility of neural radiance field and propose a text-guided stylization approach that manipulates the reconstructed scenes with input text prompts. However, due to the limited expressiveness of natural languages, they can not generate highly-detailed results with arbitrary user-specific styles. Another stream of methods [2, 19, 20, 57] using 3D generative models [6, 38] have extended avatar stylization to 3D-aware domain adaption. However, inherited from + +their predecessors, these methods can not synthesize full-head avatars in $360^{\circ}$ , and perform badly with real-world out-of-domain data. In contrast, our method utilizes the implicit representation to model high-fidelity 3D avatars from captured portrait videos, which allows for superior view consistency and stable stylization. + +# 3. Method Description + +Given the short portrait video of a person captured with a monocular camera, we aim to generate the high-fidelity 3D stylized avatar of the person. The person stands still when recording the video. We denote the split frames of the video as $\{I_i | i = 1, \dots, N\}$ , where $i$ is the frame index, $N$ is the number of frames. For each frame, we use COLMAP to obtain the calibrated camera and the method proposed in [28] to extract the foreground human mask. + +The overview of the proposed framework is illustrated in Figure 2. 3DToonify aims to learn the stylized human neural field by adapting 3D structures in a progressive training scheme. This scheme is built upon a spatial neural representation, which utilizes disentangled implicit fields to capture the underlying 3D structures such as geometry and texture (Section 3.1). We first leverage the geometric guidance from a multi-view stereo to learn a robust photorealistic representation, acting as a source prior (Section 3.2). Then this prior representation is adapted to the style domain with adaptive geometry deformation (Section 3.3.1) and decomposed albedo colors (Section 3.3.2). In this way, the stylized human avatar field can be constructed by SNR with transformed underlying structures, thus allowing for fully stylized results and 3D consistent rendering in arbitrary viewpoints. + +# 3.1. Spatial neural representation + +The proposed spatial neural representation (SNR) is based on neural radiance field (NeRF) [32], which can be seen as a continuous 5D function that maps a 3D position $\mathbf{x}$ and a viewing direction $\mathbf{v}$ to an emitted color $\mathbf{c} = (r, g, b)$ and a volume density $\sigma$ . NeRF is approximated by a multi-layer perceptron (MLP) $F_{\theta}: (\mathbf{x}, \mathbf{v}) \rightarrow \mathbf{c}, \sigma$ . SNR consists of three MLPs $F_{geo}, F_{app}$ and $F_{tex}$ , representing the decomposed fields of geometry, the observed appearance color and the albedo texture color, respectively. + +Geometry field learns a function $F_{geo} : \mathbb{R}^3 \to \mathbb{R}$ that maps a spatial point $\mathbf{x} \in \mathbb{R}^3$ to its signed distance value $\mathcal{G}$ to the object surface. It constructs the underlying object surface by encoding a signed distance function (SDF) of only location $\mathbf{x}$ . In order to be compatible with the rendering procedure of the radiance field, a probability function $\psi(\cdot)$ proposed by [49] is used to calculate the point weight $w$ from the signed distance value $\mathcal{G}$ , where $\psi(\cdot)$ denotes an unbiased and occlusion-aware approximation. With this implicit SDF representation, the explicit object surface $S$ can be easily extracted by the zero level-set of the $SDF : S = \{\mathbf{x} \in \mathbb{R}^3 | \mathcal{G}(\mathbf{x}) = 0\}$ . + +Appearance field learns a function $F_{app} : \mathbb{R}^3 \times \mathbb{S}^2 \to \mathbb{R}^3$ to encode the observed colors $\mathbf{c}_{app}$ associated with the point $\mathbf{x} \in \mathbb{R}^3$ and the view direction $\mathbf{v} \in \mathbb{S}^2$ . The feature vectors $F(\mathbf{x})$ derived from $F_{geo}$ are also concatenated as the inputs. To better approximate the appearance colors of the object captured in read-world scenes, $F_{app}$ is introduced as a function of both location and viewing direction, thus allowing learning view-dependent RGB colors for multi-view images. Notably, the learned representation in $F_{app}$ could be degraded into reflection components $\mathbf{s}$ , which are caused by illumination and vary with view directions. It will be adaptively changed in the later training stage (see the detailed discussion in Section 3.3.2). + +Texture field learns a function $F_{tex} : \mathbb{R}^3 \to \mathbb{R}$ to encode the albedo color for the texture atlas $\mathbf{c}_{tex}$ associated with only the spatial location $\mathbf{x}$ . Similar to $F_{app}$ , feature vectors derived from $F_{geo}$ are concatenated as inputs. We encourage the texture representation to be multi-view consistent by restricting $F_{tex}$ being a function of only $\mathbf{x}$ , while allowing the final color $\mathbf{c} = \mathbf{s} \circ \mathbf{c}_{tex}$ to be view-dependent to satisfy different view observations, where $\circ$ denotes element-wise multiplication. With the nature of view-independent representation of $F_{tex}$ , explicit textures can be obtained by accumulating the volume albedo colors. + +The proposed geometry field and texture field are formulated in a view-independent function, once being effectively learned, they can express spatial attributes shared by the entire 3D space. This enables editable 3D structures with only few-shot stylized views needed in the later adaption process. + +![](images/ac93a99053c6774a73519ac4ada0afbd4d6bbcb0072cac4b917e309346da142e.jpg) +(a) Stage I +Figure 3. Visualized results in stage I, II, III. + +![](images/42cc2a07b00e0bf76703a28ec4574e850b5dfbc3702c5f880f66eda604ca39ee.jpg) +(b) Stage II + +![](images/1dd871b05424d5ce98197c527cd4f4940ef247b220cddcbd23ea978f1eefffeb.jpg) +(c) Stage III + +# 3.2. MVS guided prior learning + +In this module, we learn the photorealistic representation as a prior for the later 3D style adaption. Due to the complexity of real-world captures caused by illumination, object materials, etc., the reconstructed results can easily suffer from noisy surfaces and irregular holes. Observing that the geometry directly extracted by multi-view stereo (MVS) methods are generally accurate with only local noises, we propose to integrate the depth information estimated by MVS as a geometric guidance for surface reconstruction. + +Accumulated depth guidance. Volume rendering has been proven effective to enable robust supervision using 2D image observations. Following this, we render the depth map with $K$ points along the emitted ray and use the corresponding 2D depth value for supervision. The ray can be parametrized as $r(i) = o + d_i\mathbf{v}$ , where $o$ is the center of the camera and $\mathbf{v}$ is the direction of the ray. The depth $\hat{D}(r)$ from the geometry field can be computed by: + +$$ +\hat {D} (r) = \sum_ {i = 1} ^ {K} \left(T _ {i} \alpha_ {i} d _ {i}\right), \tag {1} +$$ + +where $T_{i}$ is the accumulated transmittance defined by $\Pi_{j = 1}^{i + 1}(1 - \alpha_j)$ , and $\alpha_{j}$ denotes the discrete opacity value computed by $\alpha_{j} = \max (\frac{\Phi_{s}(s_{i}) - \Phi_{s}(s_{i} + 1)}{\Phi_{s}(s_{i})},0)$ , in which $\Phi$ is the cumulative distribution of logistic distribution. More details about conversion from the SDF distance to the opacity can be found in NeuS [49]. For a batched training ray $r\in R$ , the accumulated depth loss can be formulated as: + +$$ +L _ {\text {d e p t h}} = \sum_ {r \in R} | | M (r) (\hat {D} (r) - D (r)) | | _ {1}, \tag {2} +$$ + +where $M(r) \in \{0,1\}$ is the object mask value and $D(r)$ is the supervised depth value. + +Depth-sampled surface guidance. Except for the depth constraint on spatial accumulated points, we also leverage points sampled from the depth image $I_{D}$ to guide the construction of the SDF surface. The surface loss encourages these sampled 3D points being close to the object surface and $L_{sur}$ can be formulated as: + +$$ +L _ {s u r} = \sum_ {\mathbf {x} _ {d} \in I _ {D}} | | F _ {g e o} (\mathbf {x} _ {d}) | | _ {1}. \tag {3} +$$ + +Training. Given a set of portrait images and their camera parameters, we train the architecture with the geometry field and the appearance field using the following loss function: + +$$ +L _ {p r i o r} = L _ {c o l o r} + \lambda_ {m v s} L _ {m v s} + \lambda_ {m a s k} L _ {m a s k} + \lambda_ {r e g} L _ {r e g}, \tag {4} +$$ + +where $\lambda$ denotes the weight of each corresponding loss. The MVS guided loss is computed as $L_{mvs} = L_{depth} + L_{sur}$ . The color reconstruction loss $L_{color}$ is calculated as the distance between the accumulated color $\hat{C}(r)$ and the observed color $C(r)$ of $I$ : + +$$ +L _ {c o l o r} = \sum_ {r \in R} | | M (r) (\hat {C} (r) - C (r)) | | _ {1}, \tag {5} +$$ + +where $\hat{C}(r)$ can be computed by $\sum_{i=1}^{K}(T_i\alpha_i c_i)$ , and $c_i$ denotes the volumetric color produced by the appearance field $F_{app}$ . To focus on human reconstruction, we also define a mask term with the binary cross entropy loss: + +$$ +L _ {m a s k} = B C E (\hat {M} (r), M (r)), \tag {6} +$$ + +where $\hat{M}(r) = \sum_{i=1}^{K} (T_i \alpha_i)$ is the density accumulation along the ray. The Eikonal loss [12] used to regularize the SDF values is defined as + +$$ +L _ {r e g} = \sum_ {k} | | \nabla_ {\mathbf {p} _ {k}} F _ {g e o} (\mathbf {x} _ {k}) - 1 | | _ {2} ^ {2}. \tag {7} +$$ + +Visualized results of this stage are shown in Figure 3 (a). Not only the radiance field with accumulated color is learned, but also the inherent geometry can be accurately decomposed. The high-quality reconstruction learned in this stage also paves the way for the next stage of style adaptation with few-shot 2D stylized portraits. + +# 3.3. Spatial representation adaption + +With the constructed photorealistic representation, we then transform it to the style domain by progressively adapting the underlying 3D structures. We first adaptively learn the faithful deformed geometry without the interference of the albedo texture module, and then decompose albedo colors from observed ones with fixed geometric structures. This enables effective 3D structure disentanglement with more accurate surface and clearer texture. + +# 3.3.1 Geometry adaption + +In this stage, we utilize a number of stylized 2D portrait images $I_{t}$ derived from existing 2D portrait stylization methods [30, 53] to fine-tune the geometry field $F_{geo}$ and the appearance field $F_{app}$ . The spatial-shared geometry will be adaptively transformed in $F_{geo}$ and the observed colors varying with views will be modeled in $F_{app}$ , enabling the network focusing on geometry adaption. During training, the pixel color of $I_{t}$ is used as the observed color to guide the accumulated volume colors: + +$$ +L _ {c o l o r} = \sum_ {r \in R} | | M (r) (C (r) - C _ {t} (r)) | | _ {1}, \tag {8} +$$ + +where $C(r)$ is computed by the volumetric color from $F_{app}$ and the converted opacity from $F_{geo}$ . The total training loss is formulated as: + +$$ +L _ {g e o} = L _ {c o l o r} + \lambda_ {m a s k} L _ {m a s k} + \lambda_ {r e g} L _ {r e g}. \tag {9} +$$ + +As shown in Figure 3 (b), the spatial deformed geometry can be extracted from $F_{geo}$ . However, rendering results are 3D-inconsistent with obvious artifacts in side-view renderings, since only few-shot 2D stylizations of the frontal views are provided for style adaption and the view-dependent function $F_{app}$ trivially fits these views. + +# 3.3.2 Albedo texture adaption and optimization + +In this stage, we aim to learn the spatial-shared texture field $F_{tex}$ by decomposing the albedo colors from the appearance ones. Specifically, we insert $F_{tex}$ as a view-independent texture field and jointly optimize $F_{tex}$ and $F_{app}$ . In this way, view-consistent colors can be effectively decomposed from the total appearance and the remaining components in $F_{app}$ are regarded as view-dependent reflections. The final color are computed by $\tilde{c}_i = s \circ c_i'$ , where $c_i'$ is the albedo color from $F_{tex}$ and $s$ is the degraded reflection from $F_{app}$ for spatial points. Then we can obtain the final accumulated color by + +$$ +\tilde {C} (r) = \sum_ {i = 1} ^ {K} \left(T _ {i} \alpha_ {i} \tilde {c} _ {i}\right). \tag {10} +$$ + +To further ensure effective albedo color decomposition, a discriminator $D$ is introduced to encourage $\tilde{C}(r)$ satisfying the approximate distribution of palette colors of $I_t$ . With $\kappa$ as a posterize filter, the patch color $\kappa(C_t(p))$ of $I_t$ is fed into $D$ as a real sample, and the reconstructed color $\tilde{C}(p)$ from $F_{tex}$ is fed into $D$ as a fake sample, where $p$ is the set of rays for image pixels in a patch. We define the discrimination loss $L_{ds}$ to penalize for distance between the distribution of $C(p)$ and $\tilde{C}(p)$ as: + +$$ +\begin{array}{l} L _ {d s} = \mathbb {E} _ {p \sim \left\{I _ {t} ^ {i} \right\}} [ \log (D (\kappa (C _ {t} (p))) ] + \tag {11} \\ \mathbb {E} _ {p \sim \{I _ {t} ^ {i} \}} [ l o g (1 - D (\bar {C} (p))) ]. \\ \end{array} +$$ + +To keep the learned geometry stay faithful to the given style, we fix $F_{geo}$ and train $\{F_{app}, F_{tex}\}$ with the training loss as follows: + +$$ +L _ {t e x} = L _ {\text {c o l o r}} + \lambda_ {\text {m a s k}} L _ {\text {m a s k}} + \lambda_ {\text {r e g}} L _ {\text {r e g}} + \lambda_ {\text {d s}} L _ {\text {d s}}, \tag {12} +$$ + +where $L_{color}$ denotes the distance between the final accumulated color $\tilde{C} (r)$ and the observed stylized color $C_t(r)$ : + +$$ +L _ {\text {c o l o r}} = \sum_ {r \in R} | | M (r) (\tilde {C} (r) - C _ {t} (r)) | | _ {1}. \tag {13} +$$ + +![](images/97e2a9916a549fbebbfe3d1a0716a84b27836cd1ae0257bcb0d633e8112a68ba.jpg) + +![](images/87dd30b9d003ee603f0cc7f8a379059a37ce64da01247fce5b8b16c06dec645f.jpg) +(c) Exported meshes +Figure 4. Stylized results in novel views and corresponding exported meshes. + +![](images/0db51f534d021605dd826d0ec8811bfa2188f7cec0e9bafc4bf30548b4f4652d.jpg) + +![](images/38d6359d7526f8914559ea06e323dbe203d9062ddcb7672f996eb9be9c464733.jpg) + +We show rendering results of this stage in Figure 3 (c), demonstrating their 3D consistency in multi-view setting. Thanks to the spatial-shared colors learned in the view-independent $F_{tex}$ , the albedo texture can be seamlessly extracted and further enhanced in an explicit manner. + +# 4. Experimental Results + +Implementation details. Our network architecture consists of three modules: the signed distance function $F_{geo}$ , the appearance function $F_{app}$ and the texture function $F_{tex}$ , which are modeled by three MLPs with 8, 6, 6 hidden layers, respectively. Positional encoding [32] and sphere initialization [3] are also applied similar to [49]. For the depth priors, we adopt the OpenMVS method [1] to extract estimated depth maps from the input video. For the 2D style translator, we adopt DCT-Net [30] and VToonify [53] to produce target stylized images and preserve forward/backward facing results whose absolute yaw angle is less than 0.2 radian for supervision. We use the Adam optimizer [22] with the learning rate of 2.5e-5 to train our models and sample 512 rays for each batch. The loss weights are shared by three stages with $\lambda_{mask}$ , $\lambda_{mvs}$ , $\lambda_{reg}$ , $\lambda_{ds}$ set to $\{0.5, 0.5, 0.1, 1\}$ . Stage I, II and III are trained for 300k, 200k and 50k iterations, respectively, taking around 20 hours in total on a single NVIDIA Teasla-V100 GPU. + +Datasets. We create a $360^{\circ}$ captured portrait dataset called Portrait360 to evaluate our approach. This dataset contains 14 static portrait videos captured by rotating the camera around the human head. All videos have a length between 20 to 30 seconds and are split to 300 frames as source training data. + +# 4.1.3D portrait stylization + +Performance on view consistent rendering. Given a short portrait video captured by a monocular camera, our model learns a stylized 3D representation from 2D portrait frames. Stylized portrait images can be generated from arbitrary novel viewpoints following exemplar styles, while ensuring facial identity of the person and 3D consistency between different views. Note that the synthesized images in this part are produced directly by volume rendering on implicit functions, without any explicit style enhancement applied for the results. Our stylized avatars rendered in novel viewpoints and their corresponding exported meshes are shown in Figure 4, more results can be found in the supplementary. Comparison with 3D avatar stylization methods. In this section, we compare our method with two 3D avatar stylization methods, DeformToon3D [57] and NeRF-Art [48], which represent the state-of-the-art techniques in 3D-aware generative toonification and text-guided NeRF stylization, respectively. + +Qualitative comparison. Here we adapt VToonify [53] to generate target stylized images with selected exemplars to train our model. For DeformToon3D [57], we use the author-provided code and train the model using data generated with the same exemplars by DualStyleGAN [52], which is also the 2D generator used in VToonify. Here we directly generate its real-space and style-space results under the same sampled instance code, since the additional PTI [43] process will cause accumulated fidelity errors, especially on arbitrary real faces. For NeRF-Art [48], as it does not support using a single exemplar image for style guidance, we use Mini-GPT4 [59] to generate style de + +![](images/7b0389739f52ae38cc292df12b3df9b1f07a94238a2afe532f28e8bb53708ba5.jpg) +Figure 5. Qualitative comparison with 3D avatar stylization methods. We directly compare the generated real-space and style-space results of DeformToon3D to alleviate the fidelity loss in the additional PTI process. The models of NeRF-Art and Ours are trained on our Portrait360 dataset. Four views are selected for comparison. + +![](images/00980e90a2c39b88c259c59edc030d6ce211cc762bc1c4fade063efffab5b2cc.jpg) +Figure 6. Qualitative comparison with 2D portrait stylization methods on view consistent rendering. For a more prominent video comparison, please refer to the supplementary video. + +scriptions corresponding to each target image. The input text prompts used in this section are shown in the supplementary. We demonstrate qualitative comparison of the three methods in Figure 5. DeformToon3D only focuses on frontal views and fails to generate plausible renderings under large angles. Besides, it tends to synthesize overly exaggerated results and fail to maintain the facial characteristics (e.g., hairstyles) of the original image. NeRF-Art only generates results with undesired stylized texture and weakly-changed underlying structures. On the contrary, our method can generate fine-grained full-head stylized avatars with view-consistent renderings and exaggerated styles. + +Quantitative comparison. For quantitative comparison, we measure the quality of multi-view stylized renderings of all methods by calculating the Frechet Inception Distance (FID) [14] value for the training cartoon exemplar dataset. A lower FID score indicates that the distribution of the generated images is more similar to that of real 2D cartoon faces. we also evaluate the fidelity of all methods in 3D + +Table 1. Quantitative comparison with 3D avatar stylization methods on FID and IP. $\uparrow$ , $\downarrow$ denote if higher or lower is better. + +
MethodDeformToon3DNeRF-ArtOurs
FID ↓66.578.857.6
IP ↑0.5510.6710.678
+ +style adaption using the identity preservation (IP) metric, which is calculated as the Arcface [9] feature similarity between the input image and the stylized result. As shown in Table 1, our method outperforms the other two methods in both FID and identity preservation, which showcases our ability of generating high-quality stylized results while being faithful to the original human identity. + +Comparison with 2D portrait stylization methods. In this section, we compare our method with two state-of-the-art 2D portrait stylization methods, VToonify [53] and DCT-Net [30], to further demonstrate our ability of generating 3D-consistent and high-quality stylized results for arbitrary views. + +Qualitative comparison. Due to the incapability of 2D portrait stylization methods to synthesize novel view results, we only make comparison under reconstructed views captured in the input video. For both VToonify and DCT-Net, frames are directly input into the trained/finetuned models released by authors to obtain the corresponding stylized images. Then we select their forward/backward results as sparse view supervision to train our models (denoted as ours-V and ours-D, respectively). As illustrated in Figure 6, VToonify and DCT-Net fail to synthesize exaggerated ge + +Table 2. Comparison of FID and 3D validity with 2D portrait stylization methods. + +
MethodDCT-NetOurs-DVToonifyOurs-V
FID ↓126.194.786.957.6
3D validity ↑0.541.000.621.00
+ +Table 3. Ablation of the progressive training scheme. Results verify the effectiveness of the proposed module in each stage. + +
Variantsw/o Priorw/o GAw/o TAw/o PSAfull model
FID ↓98.7105.296.796.294.7
+ +ometry effects in challenging viewpoints (e.g., side faces) and are unable to maintain 3D view consistency. Note that these extreme view results are not used as supervision in our style adaption process. On the contrary, our method can easily render style-faithful and robust results in a 3D consistent manner. This showcases the importance of learning underlying 3D structures in maintaining view-consistency of the stylized avatar. + +Quantitative comparison. We also measure the quality of our rendering results against VToonify [53] and DCT-Net [30] using FID [14]. We use data from our Portrait360 dataset as source images and remove failure cases of the 2D methods. As shown in Table 2, both of our models produce better results with lower FID values compared with original 2D methods. To further evaluate the stylization ability of handling views from the entire 3D space, we propose to calculate 3D validity by computing the conversion rate of successfully stylized results to the whole dataset. 2D methods rely on detected facial landmarks and failed conversions can be automatically recognized. Compared to 2D methods, our method could handle more challenging poses in the entire 3D space with higher 3D validity. + +# 4.2. Ablation study + +In addition to visualized results in Figure 3, we verify the effectiveness of the proposed module in each stage by evaluating the performance of corresponding variants of our method. The qualitative and quantitative results are shown in Figure 7 and Table 3, respectively. + +MVS guided prior learning. We train a model without photorealistic prior learning and directly learn the spatial neural representation from stylized portrait images. It is confusing for inverse rendering to produce valid geometry and texture with unreal 3D-inconsistent stylized observations, as shown in Figure 7 (b). This indicates that the reconstruction prior is crucial for generating plausible underlying structures in 3D style adaption. The design of MVS guidance also helps to reconstruct more robust surface without holes brought by illumination noise in complicated real-world scenes (see Figure 7 (g)). + +Progressive structure adaption (PSA). By removing PSA proposed in Section 3.3, we jointly learn the geometry and texture adaption with the full SNR network. Results in + +![](images/8b9c6889554011bdefb35c533c4ffd26c26ed60aaa6d0dd76efacaf37bdf6f52.jpg) + +![](images/a87768f105b6969d347893f7e9a3d34fc209a24060266d421d08b339b9a35021.jpg) + +![](images/c4a065f929414ee1e25f23d23a68c4685e3b143df51c3a279fb84a72c24b112b.jpg) + +![](images/bcda2f9545e0dcd647052abd390aca4ca3797ae88b3274072c1e97c5bcaf782e.jpg) + +![](images/83dfa2ac84f715e612a032aa1493abe8f9da9509a6e834406d9f004e61b128f4.jpg) + +![](images/73c4ed6b6cdad5ab56c56525eba63cca0796fef43b19f8fe5115cf4ba8d9bc16.jpg) + +![](images/1b40db43918fb83bbf66ea047d3c9e6bcc737afd6588aa7f67b982b6d1bd12f5.jpg) + +![](images/9f78499e2ff5d5bccfa113836857c45ac50c70fe12cd007cb314d79c5d73ee68.jpg) + +![](images/c8b7d7c0433bfca7df145d340e3d9eb0218a294f110a0d1794c1f487e89e3154.jpg) + +![](images/282cda70d8623c7120ec7daf2803a47b35aa4f9319963b6d45202ae82104e2d7.jpg) + +![](images/50c81616580c8d1b5925a3be3ecfb40c0336cc02eb13af832c60fc52069248f6.jpg) +(a) Full model + +![](images/5485069b49665032a6bdb670172eb35e6f032625f5db462a43c5ad4b27175683.jpg) +(b) w/o Prior + +![](images/1a4f5dd617dc920eba73afff47b112c3f9cad4552d0c58375cbf9b03af790072.jpg) +(c) w/o PSA + +![](images/4b87003ac0bfc3f40a6ac2c42dc28c5092fa07aadac5cb650862785538e218ca.jpg) +(d) w/o GA +Figure 7. Effects of the proposed prior learning (Prior), progressive structure adaption (PSA), geometry adaption (GA), texture adaption (TA), style enhancement (SE) and MVS guidance (MVS). + +![](images/5e367f77429cc6fc4af0ace1dc9d73ee20867897c34325dbf50293fedbefcf44.jpg) +(e) w/o TA + +Figure 7 (c) show that simultaneously training $F_{geo}$ and $F_{tex}$ disrupts the disentanglement of each other. Progressive adaption brings more accurate surfaces and seamless textures. + +Geometry and texture adaption. Figure 7 (d, e) verify the necessity of geometry adaption (GA) and texture adaption (TA), respectively. In contrast to explicit texture stylization, GA enables the internal surface to be deformed adaptively, thus making 3D portraits be fully stylized. Without TA, inferred vertex colors from the appearance field suffers noticeable artifacts, due to the inconsistent observed colors from different views. TA introduces an extra texture field that automatically decomposes albedo colors shared in 3D space, thus alleviating the texture seaming issue. Besides, we explore adding additional style enhancement (SE) on the explicit texture map extracted from the texture field, which further brings more vivid stylization effects (Figure 7 (f)). We also show the impact of the number of stylized frames used for adaption stages in the supplemental material. + +# 5. Conclusion + +In this paper, we handled the challenging and on-going task of synthesizing the high-fidelity stylized 3D avatar from a portrait video under the guidance of a single style image. We showed that the naive combination of portrait style transfer and 3D reconstruction techniques does not work well in this task, and proposed a novel framework called 3DToonify that learns 3D style adaption based on spatial neural representations (SNR). We introduced a delicately-designed spatial neural network for disentangled geometry and texture adaption. We also came up with a novel progressive training scheme suitable for the SNR to accurately capture the underlying stylized 3D structures. Both qualitative and quantitative experimental results demonstrated that our method enables fine-grained 3D avatar stylization with view consistency and diverse exaggerated results. + +# References + +[1] OpenMvs. [EB/OL]. https://github.com/cdcseacave/openMVS/.6 +[2] Rameen Abdal, Hsin-Ying Lee, Peihao Zhu, Mengei Chai, Aliaksandr Siarohin, Peter Wonka, and Sergey Tulyakov. 3davatargan: Bridging domains for personalized editable avatars. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4552-4562, 2023. 2, 3 +[3] Matan Atzmon and Yaron Lipman. Sal: Sign agnostic learning of shapes from raw data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2565-2574, 2020. 6 +[4] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2, 3 +[5] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5799-5809, 2021. 3 +[6] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. 2, 3 +[7] Rui Chen, Yongwei Chen, Ningxin Jiao, and Kui Jia. Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. arXiv preprint arXiv:2303.13873, 2023. 2 +[8] Pei-Ze Chiang, Meng-Shiun Tsai, Hung-Yu Tseng, WeiSheng Lai, and Wei-Chen Chiu. Stylizing 3d scene via implicit representation and hypernetwork. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1475-1484, 2022. 2 +[9] Jiankang Deng, Jia Guo, Xue Niannan, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, 2019. 7 +[10] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. ACM Transactions on Graphics (TOG), 41(4):1-13, 2022. 2 +[11] Leon A Gatys, Alexander S Ecker, and Matthias Bethge. Image style transfer using convolutional neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2414-2423, 2016. 2 +[12] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. arXiv preprint arXiv:2002.10099, 2020. 3, 5 +[13] Fangzhou Han, Shuquan Ye, Mingming He, Menglei Chai, and Jing Liao. Exemplar-based 3d portrait stylization. IEEE Transactions on Visualization and Computer Graphics, 2021. 3 + +[14] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 7, 8 +[15] Yukun Huang, Jianan Wang, Ailing Zeng, He Cao, Xianbiao Qi, Yukai Shi, Zheng-Jun Zha, and Lei Zhang. Dreamwaltz: Make a scene with complex 3d animatable avatars. arXiv preprint arXiv:2305.12529, 2023. 2 +[16] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 2 +[17] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 2 +[18] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG), 42(4):1-14, 2023. 2 +[19] Gwanghyun Kim and Se Young Chun. Datid-3d: Diversitypreserved domain adaptation using text-to-image diffusion for 3d generative model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14203-14213, 2023. 3 +[20] Gwanghyun Kim, Ji Ha Jang, and Se Young Chun. Podia-3d: Domain adaptation of 3d generative model across large domain gap using pose-preserved text-to-image diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 22603–22612, 2023. 3 +[21] Junho Kim, Minjae Kim, Hyeonwoo Kang, and Kwang Hee Lee. U-gat-it: Unsupervised generative attentional networks with adaptive layer-instance normalization for image-to-image translation. In International Conference on Learning Representations, 2020. 2 +[22] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6 +[23] Nikos Kolotouros, Thiemo Alldieck, Andrei Zanfir, Eduard Gabriel Bazavan, Mihai Fieraru, and Cristian Sminchisescu. Dreamhuman: Animatable 3d avatars from text. arXiv preprint arXiv:2306.09329, 2023. 2 +[24] Bing Li, Yuanlue Zhu, Yitong Wang, Chia-Wen Lin, Bernard Ghanem, and Linlin Shen. Anigan: Style-guided generative adversarial networks for unsupervised anime face generation. IEEE Transactions on Multimedia, 2021. 2 +[25] Tingting Liao, Hongwei Yi, Yuliang Xiu, Jiaxaing Tang, Yangyi Huang, Justus Thies, and Michael J Black. Tada! text to animatable digital avatars. arXiv preprint arXiv:2308.10899, 2023. 2 +[26] Yiyi Liao, Katja Schwarz, Lars Mescheder, and Andreas Geiger. Towards unsupervised learning of generative models for 3d controllable image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5871-5880, 2020. 2 + +[27] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 300–309, 2023. 2 +[28] Jinlin Liu, Yuan Yao, Wendi Hou, Miaomiao Cui, Xuansong Xie, Changshui Zhang, and Xian-sheng Hua. Boosting semantic human matting with coarse annotations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8563-8572, 2020. 3 +[29] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019. 3 +[30] Yifang Men, Yuan Yao, Miaomiao Cui, Zhouhui Lian, and Xuansong Xie. Dct-net: domain-calibrated translation for portrait stylization. ACM Transactions on Graphics (TOG), 41(4):1-9, 2022. 1, 2, 5, 6, 7, 8 +[31] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4460-4470, 2019. 2 +[32] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 2, 3, 4, 6 +[33] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, 2022. 2, 3 +[34] Thu Nguyen-Phuoc, Feng Liu, and Lei Xiao. Snerf: stylized neural implicit representations for 3d scenes. arXiv preprint arXiv:2207.02363, 2022. 2 +[35] Thu Nguyen-Phuoc, Gabriel Schwartz, Yuting Ye, Stephen Lombardi, and Lei Xiao. Alteredavatar: Stylizing dynamic 3d avatars with fast style adaptation. arXiv preprint arXiv:2305.19245, 2023. 2, 3 +[36] Thu H Nguyen-Phuoc, Christian Richardt, Long Mai, Yongliang Yang, and Niloy Mitra. Blockgan: Learning 3d object-aware scene representations from unlabelled images. Advances in Neural Information Processing Systems, 33:6767-6778, 2020. 2 +[37] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5589-5599, 2021. 3 +[38] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13503–13513, 2022. 2, 3 +[39] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning con + +tinuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 165-174, 2019. 3 +[40] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 2, 3 +[41] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988, 2022. 2 +[42] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2 +[43] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. ACM Transactions on graphics (TOG), 42(1):1-13, 2022. 6 +[44] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2 +[45] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. +[46] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 2 +[47] Shen Sang, Tiancheng Zhi, Guoxian Song, Minghao Liu, Chunpong Lai, Jing Liu, Xiang Wen, James Davis, and Linjie Luo. AgileAvatar: Stylized 3d avatar creation via cascaded domain bridging. 2022. 3 +[48] Can Wang, Ruixiang Jiang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Nerf-art: Text-driven neural radiance fields stylization. IEEE Transactions on Visualization and Computer Graphics, 2023. 2, 3, 6 +[49] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689, 2021. 2, 3, 4, 6 +[50] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. arXiv preprint arXiv:2305.16213, 2023. 2 +[51] Shiyao Xu, Lingzhi Li, Li Shen, Yifang Men, and Zhouhui Lian. Your3demooji: Creating personalized emojis via one-shot 3d-aware cartoon avatar synthesis. In SIGGRAPH Asia 2022 Technical Communications, pages 1-4. 2022. 2 + +[52] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Pastiche master: Exemplar-based high-resolution portrait style transfer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7693-7702, 2022. 6 +[53] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Vtoonify: Controllable high-resolution portrait video style transfer. arXiv preprint arXiv:2209.11224, 2022. 1, 2, 5, 6, 7, 8 +[54] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. Advances in Neural Information Processing Systems, 33:2492-2502, 2020. 3 +[55] Chi Zhang, Yiwen Chen, Yijun Fu, Zhenglin Zhou, Gang Yu, Billzb Wang, Bin Fu, Tao Chen, Guosheng Lin, and Chunhua Shen. StyleAvatar3d: Leveraging image-text diffusion models for high-fidelity 3d avatar generation. arXiv preprint arXiv:2305.19012, 2023. 2 +[56] Huichao Zhang, Bowen Chen, Hao Yang, Liao Qu, Xu Wang, Li Chen, Chao Long, Feida Zhu, Kang Du, and Min Zheng. Avatarverse: High-quality & stable 3d avatar creation from text and pose. arXiv preprint arXiv:2308.03610, 2023. 2 +[57] Junzhe Zhang, Yushi Lan, Shuai Yang, Fangzhou Hong, Quan Wang, Chai Kiat Yeo, Ziwei Liu, and Chen Change Loy. Deformtoon3d: Deformable neural radiance fields for 3d toonification. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9144-9154, 2023. 2, 3, 6 +[58] Kai Zhang, Nick Kolkin, Sai Bi, Fujun Luan, Zexiang Xu, Eli Shechtman, and Noah Snavely. Arf: Artistic radiance fields. In European Conference on Computer Vision, pages 717-733. Springer, 2022. 2 +[59] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 6 +[60] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 2223-2232, 2017. 2 \ No newline at end of file diff --git a/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/images.zip b/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..4d4d2ed2f66691a75d13c3cf2de629797fd42b90 --- /dev/null +++ b/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ffceb448ca89fe2d15875fb3498945fc42b35973d3e46d927883ab4829614a5 +size 634084 diff --git a/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/layout.json b/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e7b18fc436bb4d0b89609b9285ec4ed6d0b2ff1e --- /dev/null +++ b/2024/3DToonify_ Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images/layout.json @@ -0,0 +1,10456 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 65, + 102, + 529, + 139 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 102, + 529, + 139 + ], + "spans": [ + { + "bbox": [ + 65, + 102, + 529, + 139 + ], + "type": "text", + "content": "3DToonify: Creating Your High-Fidelity 3D Stylized Avatar Easily from 2D Portrait Images" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "spans": [ + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "text", + "content": "Yifang Men" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "text", + "content": ", Hanxi Liu" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "text", + "content": ", Yuan Yao" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "text", + "content": ", Miaomiao Cui" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "text", + "content": ", Xuansong Xie" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "text", + "content": ", Zhouhui Lian" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "inline_equation", + "content": "^{2\\dagger}" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 88, + 160, + 510, + 189 + ], + "type": "text", + "content": "Institute for Intelligent Computing, Alibaba Group" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 122, + 190, + 471, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 190, + 471, + 204 + ], + "spans": [ + { + "bbox": [ + 122, + 190, + 471, + 204 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 122, + 190, + 471, + 204 + ], + "type": "text", + "content": "Wangxuan Institute of Computer Technology, Peking University, China" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 50, + 219, + 116, + 415 + ], + "blocks": [ + { + "bbox": [ + 50, + 219, + 116, + 415 + ], + "lines": [ + { + "bbox": [ + 50, + 219, + 116, + 415 + ], + "spans": [ + { + "bbox": [ + 50, + 219, + 116, + 415 + ], + "type": "image", + "image_path": "da767aa80c93a5565c7fc9d733d21dcc827b6b033124b6ab68f1e8650a012ff7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 53, + 418, + 110, + 429 + ], + "lines": [ + { + "bbox": [ + 53, + 418, + 110, + 429 + ], + "spans": [ + { + "bbox": [ + 53, + 418, + 110, + 429 + ], + "type": "text", + "content": "(a) Input video" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 117, + 219, + 275, + 415 + ], + "blocks": [ + { + "bbox": [ + 117, + 219, + 275, + 415 + ], + "lines": [ + { + "bbox": [ + 117, + 219, + 275, + 415 + ], + "spans": [ + { + "bbox": [ + 117, + 219, + 275, + 415 + ], + "type": "image", + "image_path": "fd88199e8286ebe9ab4bc36991e5c5fd4ce50af1e43504a13b6561fdb0890581.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 164, + 418, + 242, + 429 + ], + "lines": [ + { + "bbox": [ + 164, + 418, + 242, + 429 + ], + "spans": [ + { + "bbox": [ + 164, + 418, + 242, + 429 + ], + "type": "text", + "content": "(b) 3D style adaption" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 277, + 219, + 543, + 414 + ], + "blocks": [ + { + "bbox": [ + 277, + 219, + 543, + 414 + ], + "lines": [ + { + "bbox": [ + 277, + 219, + 543, + 414 + ], + "spans": [ + { + "bbox": [ + 277, + 219, + 543, + 414 + ], + "type": "image", + "image_path": "923548eb85c654ebbe7c547e03b6e51dfadc1de6a918a579005c6a51189f3ac0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 325, + 418, + 503, + 430 + ], + "lines": [ + { + "bbox": [ + 325, + 418, + 503, + 430 + ], + "spans": [ + { + "bbox": [ + 325, + 418, + 503, + 430 + ], + "type": "text", + "content": "(c) View-consistent renderings in different styles" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 45, + 433, + 547, + 467 + ], + "lines": [ + { + "bbox": [ + 45, + 433, + 547, + 467 + ], + "spans": [ + { + "bbox": [ + 45, + 433, + 547, + 467 + ], + "type": "text", + "content": "Figure 1. Given a set of RGB portrait images captured by a monocular camera, our method can learn a photorealistic representation in neural implicit fields, and transfer it to artistic ones with underlying 3D structures changed. Multiple stylized results can be rendered from arbitrary novel viewpoints with consistent geometry and texture." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 143, + 477, + 192, + 489 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 477, + 192, + 489 + ], + "spans": [ + { + "bbox": [ + 143, + 477, + 192, + 489 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 498, + 288, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 498, + 288, + 666 + ], + "spans": [ + { + "bbox": [ + 45, + 498, + 288, + 666 + ], + "type": "text", + "content": "Visual content creation has aroused a surge of interest given its applications in mobile photography and AR/VR. Portrait style transfer and 3D recovery from monocular images as two representative tasks have so far evolved independently. In this paper, we make a connection between the two, and tackle the challenging task of 3D portrait stylization - modeling high-fidelity 3D stylized avatars from captured 2D portrait images. However, naively combining the techniques from the two isolated areas may suffer from either inadequate stylization or absence of 3D assets. To this end, we propose 3DToonify, a new framework that introduces a progressive training scheme to achieve 3D style adaption on spatial neural representation (SNR). SNR is constructed with implicit fields and they are dy" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 478, + 547, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 478, + 547, + 633 + ], + "spans": [ + { + "bbox": [ + 306, + 478, + 547, + 633 + ], + "type": "text", + "content": "namically optimized by the progressive training scheme, which consists of three stages: guided prior learning, deformable geometry adaption and explicit texture adaption. In this way, stylized geometry and texture are learned in SNR in an explicit and structured way with only a single stylized exemplar needed. Moreover, our method obtains style-adaptive underlying structures (i.e., deformable geometry and exaggerated texture) and view-consistent stylized avatar rendering from arbitrary novel viewpoints. Both qualitative and quantitative experiments have been conducted to demonstrate the effectiveness and superiority of our method for automatically generating exemplar-guided 3D stylized avatars." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 637, + 387, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 637, + 387, + 649 + ], + "spans": [ + { + "bbox": [ + 306, + 637, + 387, + 649 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 654, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 715 + ], + "type": "text", + "content": "Portrait style transfer [30, 53] aims to transform real face images into artistic 2D portraits in desired visual styles while maintaining personal identity. However, given a sequence of portrait images captured from different viewpoints, existing portrait style transfer methods are typically" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 674, + 154, + 685 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 674, + 154, + 685 + ], + "spans": [ + { + "bbox": [ + 58, + 674, + 154, + 685 + ], + "type": "text", + "content": "*Denotes equal contribution." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 59, + 685, + 246, + 694 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 685, + 246, + 694 + ], + "spans": [ + { + "bbox": [ + 59, + 685, + 246, + 694 + ], + "type": "text", + "content": "† Corresponding author. E-mail: lianzhouhui@pku.edu.cn." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 48, + 695, + 287, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 695, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 695, + 287, + 714 + ], + "type": "text", + "content": "This work was partially supported by National Natural Science Foundation of China (Grant No.: 62372015)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10127" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 288 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 288 + ], + "type": "text", + "content": "only effective for limited forward-facing photos and fails to maintain view consistency in 3D space. Essentially, existing methods only learn a style transfer between 2D features, and have no sense to 3D representations built on real-world objects. What if we can construct and stylize underlying 3D structures from captured 2D portrait images? See Figure 1 for an example. When stylized with 3D structures (i.e., geometry and texture), we can easily render view-free stylized portraits with 3D consistency and robust artistic results. This capacity will extremely facilitate the 3D content creation process which often requires large amounts of time and special expertise, and make it accessible to a variety of novice users. As shown in Figure 1, this paper aims to address the challenging task of generating high-fidelity 3D avatar from a portrait video by following the style of a given exemplar image. We refer this task as 3D portrait stylization – a marriage between portrait style transfer and 3D recovery from monocular images." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 291, + 289, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 291, + 289, + 615 + ], + "spans": [ + { + "bbox": [ + 47, + 291, + 289, + 615 + ], + "type": "text", + "content": "The naive solution to the task mentioned above is directly combining existing methods of 2D portrait stylization with 3D reconstruction, i.e., learning 3D representations such as voxels [36], primitives [26] or occupancy fields [31] directly from stylized portrait images. However, it is less effective due to the biased image manifold built by 2D portrait stylization, making the representation learning be ill-posed with highly-biased visible views. Recently, neural radiance field (NeRF) [4, 18, 32, 33, 40, 49] has made great progress due to its advanced ability to achieve photo-realistic novel view synthesis with sparse input views. Some previous attempts [8, 34, 35, 48, 58] also combine NeRF with image-based [11] or text-driven [42] neural style transfer to generate novel views of stylized 3D scenes or avatars. Recently, a series of new works have started to focus on 3D stylized avatar generation. Some methods [7, 15, 23, 25, 27, 41, 50, 56] exploit the great potential of 2D text-to-image diffusion models [44-46] to generate 3D cartoonish avatars according to a given text prompt. Others [2, 51, 55, 57] build on 3D generative models [6, 38] to bridge the gap between the real space and the target domain, and generate avatars with certain styles under a sampled latent vector. However, all these methods either can not achieve high-fidelity personalized 3D portrait stylization with user-specific identities and styles, or fail to generate fine-grained full-head avatars that support view-consistent rendering from arbitrary viewpoints." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 617, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 289, + 715 + ], + "type": "text", + "content": "To address the aforementioned challenges, we draw inspiration from domain adaption on 2D features [10, 30], and introduce a progressive training scheme to achieve 3D style adaption on spatial neural representation (SNR). The key insights of this design are twofold. First, it is hard to directly learn an accurate 3D representation field from stylized portraits with few-shot inconsistent 2D views, but easier to learn a photorealistic field as a prior and adapt it to" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 304, + 72, + 547, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 239 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 239 + ], + "type": "text", + "content": "target style fields with transfer learning. Second, learning spatial representation with disentangled surface and texture allows for flexible geometry deformation and texture adaption, leading to more diverse and fine-grained style editing. To this end, we construct SNR with neural implicit fields and dynamically optimize its subfields with a progressive training scheme. This scheme includes the following three stages: prior learning to obtain an accurate human reconstruction, geometry adaption to produce inherently exaggerated deformation, and texture adaption to realize artistic albedo decomposition. Eventually, the 2D portraits are converted to stylized SNR, and explicit 3D assets can be easily extracted with disentangled 3D structures. In summary, our contributions are threefold:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 306, + 240, + 545, + 431 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 306, + 240, + 545, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 240, + 545, + 312 + ], + "spans": [ + { + "bbox": [ + 306, + 240, + 545, + 312 + ], + "type": "text", + "content": "- We present a new method that adopts neural implicit fields to address the challenging task of generating high-fidelity 3D avatar from a portrait video by following the style of a given exemplar image. Stylized results can be rendered under arbitrary novel viewpoints with consistent geometry and texture." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 312, + 545, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 312, + 545, + 372 + ], + "spans": [ + { + "bbox": [ + 306, + 312, + 545, + 372 + ], + "type": "text", + "content": "- We introduce an elegant network of spatial neural representation to model common attributes over the 3D space. This design allows for disentangled geometry and texture adaption, achieving more flexible and fine-grained 3D stylization results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 372, + 545, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 372, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 306, + 372, + 545, + 431 + ], + "type": "text", + "content": "- We propose a novel progressive training scheme of 3D style adaption. Cooperated with the delicately-designed spatial neural network, it enables learning realistic 3D cartoon avatars with deformed geometry and stylized texture." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 439, + 392, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 439, + 392, + 452 + ], + "spans": [ + { + "bbox": [ + 306, + 439, + 392, + 452 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 460, + 547, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 547, + 664 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 547, + 664 + ], + "type": "text", + "content": "2D Portrait Stylization. In the deep neural network based portrait stylization, there are two types of approaches, i.e., image-to-image translation and StyleGAN based translation. Methods [21, 24] conduct face-to-cartoon translation by adopting the framework of cycleGAN [60]. Nevertheless, training such methods requires extensive data and may still generate unstable results. StyleGAN [16, 17] has become a popular alternative for portrait stylization due to its strong capacity for latent inversion and style control. [30] proposes a calibration framework to adapt the original training distribution for fine-grained translation. [53] leverages the mid- and high-resolution layers of StyleGAN to render high-quality artistic portraits based on the multi-scale content features to better preserve details. Although high-quality results have been shown, these methods cannot handle extreme face angle while maintaining cross-view consistency." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": "Neural Implicit Fields. Recently, neural implicit functions have emerged as an effective representation to model conventional 3D scenes due to its continuous nature. This representation has been successfully adopted to shape model-" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "10128" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 72, + 220, + 251 + ], + "blocks": [ + { + "bbox": [ + 55, + 72, + 220, + 251 + ], + "lines": [ + { + "bbox": [ + 55, + 72, + 220, + 251 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 220, + 251 + ], + "type": "image", + "image_path": "7451c05f14cfdda6536561d10e264702e5a878e43da945522c450760650656a9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 258, + 547, + 314 + ], + "lines": [ + { + "bbox": [ + 46, + 258, + 547, + 314 + ], + "spans": [ + { + "bbox": [ + 46, + 258, + 547, + 314 + ], + "type": "text", + "content": "Figure 2. An overview of the proposed framework. Our method first learns a photorealistic field built-upon spatial neural representation (SNR) using dense input views, then transfers this prior representation to artistic ones with few-shot stylized views by adapting underlying 3D structures. SNR is constructed by a geometry field for SDF surface, an appearance field for observed color, and a texture field for albedo color, respectively. The progressive training scheme is adopted to enable SNR to learn about stylized geometry and texture in an explicit and structured manner." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 231, + 74, + 407, + 251 + ], + "blocks": [ + { + "bbox": [ + 231, + 74, + 407, + 251 + ], + "lines": [ + { + "bbox": [ + 231, + 74, + 407, + 251 + ], + "spans": [ + { + "bbox": [ + 231, + 74, + 407, + 251 + ], + "type": "image", + "image_path": "849c9135f9defd35b372ddc6d31c4785b906add425fb7c11d37fd904953665ef.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 432, + 76, + 534, + 251 + ], + "blocks": [ + { + "bbox": [ + 432, + 76, + 534, + 251 + ], + "lines": [ + { + "bbox": [ + 432, + 76, + 534, + 251 + ], + "spans": [ + { + "bbox": [ + 432, + 76, + 534, + 251 + ], + "type": "image", + "image_path": "d861cb72c80ec9f08a4376eebdcce5f4141cf2707931a0a448a4edb4156894cf.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 327, + 289, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 327, + 289, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 327, + 289, + 555 + ], + "type": "text", + "content": "ing [12, 39], novel view synthesis [29, 32] and multi-view 3D reconstruction [49, 54]. The method of Neural Radiance Fields (NeRF) [32], in particular, has attracted significant attention for its ability to achieve photo-realistic novel view synthesis results by utilizing neural implicit functions together with volume rendering. A number of variants have been developed thereafter to fit with different scenarios and requirements, including quality improvement [4], fast rendering [33], dynamic scene capture [40] and generative models [5]. However, NeRF's estimated volume density does not admit accurate surface reconstruction, the recovered 3D geometry is far from satisfactory and can hardly be extracted as explicit materials. Recent works tackle the issue by combining implicit surface functions. [37] represents the surface by occupancy values and shrink the sample region of volume rendering during the optimization. [49] introduces signed distance functions (SDF) to represent the scene and can directly extract the surface as the zero-level set of the SDF with better accuracy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 558, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 289, + 715 + ], + "type": "text", + "content": "3D Avatar Stylization. 3D avatar stylization aims to generate stylized 3D avatars whose rendered images captured from different viewpoints match the specific style. Early methods are either mesh-driven [13] or rely on explicit parameterization [47]. More recently, [35, 48] exploit the flexibility of neural radiance field and propose a text-guided stylization approach that manipulates the reconstructed scenes with input text prompts. However, due to the limited expressiveness of natural languages, they can not generate highly-detailed results with arbitrary user-specific styles. Another stream of methods [2, 19, 20, 57] using 3D generative models [6, 38] have extended avatar stylization to 3D-aware domain adaption. However, inherited from" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 327, + 547, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 327, + 547, + 400 + ], + "spans": [ + { + "bbox": [ + 304, + 327, + 547, + 400 + ], + "type": "text", + "content": "their predecessors, these methods can not synthesize full-head avatars in " + }, + { + "bbox": [ + 304, + 327, + 547, + 400 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 327, + 547, + 400 + ], + "type": "text", + "content": ", and perform badly with real-world out-of-domain data. In contrast, our method utilizes the implicit representation to model high-fidelity 3D avatars from captured portrait videos, which allows for superior view consistency and stable stylization." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 406, + 424, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 406, + 424, + 420 + ], + "spans": [ + { + "bbox": [ + 305, + 406, + 424, + 420 + ], + "type": "text", + "content": "3. Method Description" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 426, + 545, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 426, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 304, + 426, + 545, + 521 + ], + "type": "text", + "content": "Given the short portrait video of a person captured with a monocular camera, we aim to generate the high-fidelity 3D stylized avatar of the person. The person stands still when recording the video. We denote the split frames of the video as " + }, + { + "bbox": [ + 304, + 426, + 545, + 521 + ], + "type": "inline_equation", + "content": "\\{I_i | i = 1, \\dots, N\\}" + }, + { + "bbox": [ + 304, + 426, + 545, + 521 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 426, + 545, + 521 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 426, + 545, + 521 + ], + "type": "text", + "content": " is the frame index, " + }, + { + "bbox": [ + 304, + 426, + 545, + 521 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 426, + 545, + 521 + ], + "type": "text", + "content": " is the number of frames. For each frame, we use COLMAP to obtain the calibrated camera and the method proposed in [28] to extract the foreground human mask." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 522, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 546, + 714 + ], + "type": "text", + "content": "The overview of the proposed framework is illustrated in Figure 2. 3DToonify aims to learn the stylized human neural field by adapting 3D structures in a progressive training scheme. This scheme is built upon a spatial neural representation, which utilizes disentangled implicit fields to capture the underlying 3D structures such as geometry and texture (Section 3.1). We first leverage the geometric guidance from a multi-view stereo to learn a robust photorealistic representation, acting as a source prior (Section 3.2). Then this prior representation is adapted to the style domain with adaptive geometry deformation (Section 3.3.1) and decomposed albedo colors (Section 3.3.2). In this way, the stylized human avatar field can be constructed by SNR with transformed underlying structures, thus allowing for fully stylized results and 3D consistent rendering in arbitrary viewpoints." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "10129" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 207, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 207, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 207, + 85 + ], + "type": "text", + "content": "3.1. Spatial neural representation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "text", + "content": "The proposed spatial neural representation (SNR) is based on neural radiance field (NeRF) [32], which can be seen as a continuous 5D function that maps a 3D position " + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "text", + "content": " and a viewing direction " + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "text", + "content": " to an emitted color " + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "inline_equation", + "content": "\\mathbf{c} = (r, g, b)" + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "text", + "content": " and a volume density " + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "text", + "content": ". NeRF is approximated by a multi-layer perceptron (MLP) " + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "inline_equation", + "content": "F_{\\theta}: (\\mathbf{x}, \\mathbf{v}) \\rightarrow \\mathbf{c}, \\sigma" + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "text", + "content": ". SNR consists of three MLPs " + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "inline_equation", + "content": "F_{geo}, F_{app}" + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "inline_equation", + "content": "F_{tex}" + }, + { + "bbox": [ + 46, + 92, + 288, + 201 + ], + "type": "text", + "content": ", representing the decomposed fields of geometry, the observed appearance color and the albedo texture color, respectively." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "spans": [ + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "text", + "content": "Geometry field learns a function " + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "inline_equation", + "content": "F_{geo} : \\mathbb{R}^3 \\to \\mathbb{R}" + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "text", + "content": " that maps a spatial point " + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "text", + "content": " to its signed distance value " + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "text", + "content": " to the object surface. It constructs the underlying object surface by encoding a signed distance function (SDF) of only location " + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "text", + "content": ". In order to be compatible with the rendering procedure of the radiance field, a probability function " + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "inline_equation", + "content": "\\psi(\\cdot)" + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "text", + "content": " proposed by [49] is used to calculate the point weight " + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "text", + "content": " from the signed distance value " + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "inline_equation", + "content": "\\psi(\\cdot)" + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "text", + "content": " denotes an unbiased and occlusion-aware approximation. With this implicit SDF representation, the explicit object surface " + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "text", + "content": " can be easily extracted by the zero level-set of the " + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "inline_equation", + "content": "SDF : S = \\{\\mathbf{x} \\in \\mathbb{R}^3 | \\mathcal{G}(\\mathbf{x}) = 0\\}" + }, + { + "bbox": [ + 46, + 202, + 289, + 347 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "spans": [ + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "text", + "content": "Appearance field learns a function " + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "inline_equation", + "content": "F_{app} : \\mathbb{R}^3 \\times \\mathbb{S}^2 \\to \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "text", + "content": " to encode the observed colors " + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{app}" + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "text", + "content": " associated with the point " + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "text", + "content": " and the view direction " + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "inline_equation", + "content": "\\mathbf{v} \\in \\mathbb{S}^2" + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "text", + "content": ". The feature vectors " + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "inline_equation", + "content": "F(\\mathbf{x})" + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "text", + "content": " derived from " + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "inline_equation", + "content": "F_{geo}" + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "text", + "content": " are also concatenated as the inputs. To better approximate the appearance colors of the object captured in read-world scenes, " + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "inline_equation", + "content": "F_{app}" + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "text", + "content": " is introduced as a function of both location and viewing direction, thus allowing learning view-dependent RGB colors for multi-view images. Notably, the learned representation in " + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "inline_equation", + "content": "F_{app}" + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "text", + "content": " could be degraded into reflection components " + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 46, + 348, + 288, + 504 + ], + "type": "text", + "content": ", which are caused by illumination and vary with view directions. It will be adaptively changed in the later training stage (see the detailed discussion in Section 3.3.2)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "text", + "content": "Texture field learns a function " + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "inline_equation", + "content": "F_{tex} : \\mathbb{R}^3 \\to \\mathbb{R}" + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "text", + "content": " to encode the albedo color for the texture atlas " + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{tex}" + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "text", + "content": " associated with only the spatial location " + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "text", + "content": ". Similar to " + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "inline_equation", + "content": "F_{app}" + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "text", + "content": ", feature vectors derived from " + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "inline_equation", + "content": "F_{geo}" + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "text", + "content": " are concatenated as inputs. We encourage the texture representation to be multi-view consistent by restricting " + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "inline_equation", + "content": "F_{tex}" + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "text", + "content": " being a function of only " + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "text", + "content": ", while allowing the final color " + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "inline_equation", + "content": "\\mathbf{c} = \\mathbf{s} \\circ \\mathbf{c}_{tex}" + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "text", + "content": " to be view-dependent to satisfy different view observations, where " + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "inline_equation", + "content": "\\circ" + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "text", + "content": " denotes element-wise multiplication. With the nature of view-independent representation of " + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "inline_equation", + "content": "F_{tex}" + }, + { + "bbox": [ + 46, + 506, + 287, + 639 + ], + "type": "text", + "content": ", explicit textures can be obtained by accumulating the volume albedo colors." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 641, + 288, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 288, + 712 + ], + "type": "text", + "content": "The proposed geometry field and texture field are formulated in a view-independent function, once being effectively learned, they can express spatial attributes shared by the entire 3D space. This enables editable 3D structures with only few-shot stylized views needed in the later adaption process." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 331, + 71, + 379, + 164 + ], + "blocks": [ + { + "bbox": [ + 331, + 71, + 379, + 164 + ], + "lines": [ + { + "bbox": [ + 331, + 71, + 379, + 164 + ], + "spans": [ + { + "bbox": [ + 331, + 71, + 379, + 164 + ], + "type": "image", + "image_path": "ac93a99053c6774a73519ac4ada0afbd4d6bbcb0072cac4b917e309346da142e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 335, + 166, + 367, + 175 + ], + "lines": [ + { + "bbox": [ + 335, + 166, + 367, + 175 + ], + "spans": [ + { + "bbox": [ + 335, + 166, + 367, + 175 + ], + "type": "text", + "content": "(a) Stage I" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 343, + 177, + 508, + 188 + ], + "lines": [ + { + "bbox": [ + 343, + 177, + 508, + 188 + ], + "spans": [ + { + "bbox": [ + 343, + 177, + 508, + 188 + ], + "type": "text", + "content": "Figure 3. Visualized results in stage I, II, III." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 381, + 71, + 433, + 164 + ], + "blocks": [ + { + "bbox": [ + 381, + 71, + 433, + 164 + ], + "lines": [ + { + "bbox": [ + 381, + 71, + 433, + 164 + ], + "spans": [ + { + "bbox": [ + 381, + 71, + 433, + 164 + ], + "type": "image", + "image_path": "42cc2a07b00e0bf76703a28ec4574e850b5dfbc3702c5f880f66eda604ca39ee.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 410, + 166, + 444, + 175 + ], + "lines": [ + { + "bbox": [ + 410, + 166, + 444, + 175 + ], + "spans": [ + { + "bbox": [ + 410, + 166, + 444, + 175 + ], + "type": "text", + "content": "(b) Stage II" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 444, + 71, + 523, + 164 + ], + "blocks": [ + { + "bbox": [ + 444, + 71, + 523, + 164 + ], + "lines": [ + { + "bbox": [ + 444, + 71, + 523, + 164 + ], + "spans": [ + { + "bbox": [ + 444, + 71, + 523, + 164 + ], + "type": "image", + "image_path": "1dd871b05424d5ce98197c527cd4f4940ef247b220cddcbd23ea978f1eefffeb.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 479, + 166, + 516, + 175 + ], + "lines": [ + { + "bbox": [ + 479, + 166, + 516, + 175 + ], + "spans": [ + { + "bbox": [ + 479, + 166, + 516, + 175 + ], + "type": "text", + "content": "(c) Stage III" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 199, + 457, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 199, + 457, + 213 + ], + "spans": [ + { + "bbox": [ + 306, + 199, + 457, + 213 + ], + "type": "text", + "content": "3.2. MVS guided prior learning" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 217, + 545, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 217, + 545, + 325 + ], + "spans": [ + { + "bbox": [ + 304, + 217, + 545, + 325 + ], + "type": "text", + "content": "In this module, we learn the photorealistic representation as a prior for the later 3D style adaption. Due to the complexity of real-world captures caused by illumination, object materials, etc., the reconstructed results can easily suffer from noisy surfaces and irregular holes. Observing that the geometry directly extracted by multi-view stereo (MVS) methods are generally accurate with only local noises, we propose to integrate the depth information estimated by MVS as a geometric guidance for surface reconstruction." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "spans": [ + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "type": "text", + "content": "Accumulated depth guidance. Volume rendering has been proven effective to enable robust supervision using 2D image observations. Following this, we render the depth map with " + }, + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "type": "text", + "content": " points along the emitted ray and use the corresponding 2D depth value for supervision. The ray can be parametrized as " + }, + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "type": "inline_equation", + "content": "r(i) = o + d_i\\mathbf{v}" + }, + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "type": "inline_equation", + "content": "o" + }, + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "type": "text", + "content": " is the center of the camera and " + }, + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "type": "text", + "content": " is the direction of the ray. The depth " + }, + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "type": "inline_equation", + "content": "\\hat{D}(r)" + }, + { + "bbox": [ + 304, + 325, + 546, + 421 + ], + "type": "text", + "content": " from the geometry field can be computed by:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 381, + 426, + 545, + 459 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 426, + 545, + 459 + ], + "spans": [ + { + "bbox": [ + 381, + 426, + 545, + 459 + ], + "type": "interline_equation", + "content": "\\hat {D} (r) = \\sum_ {i = 1} ^ {K} \\left(T _ {i} \\alpha_ {i} d _ {i}\\right), \\tag {1}", + "image_path": "ff5a2478d7b55793df22a40ec9a9914c1f4bcbc3807773513ac381e244788557.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "spans": [ + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "inline_equation", + "content": "T_{i}" + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "text", + "content": " is the accumulated transmittance defined by " + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "inline_equation", + "content": "\\Pi_{j = 1}^{i + 1}(1 - \\alpha_j)" + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "inline_equation", + "content": "\\alpha_{j}" + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "text", + "content": " denotes the discrete opacity value computed by " + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "inline_equation", + "content": "\\alpha_{j} = \\max (\\frac{\\Phi_{s}(s_{i}) - \\Phi_{s}(s_{i} + 1)}{\\Phi_{s}(s_{i})},0)" + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "text", + "content": ", in which " + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "text", + "content": " is the cumulative distribution of logistic distribution. More details about conversion from the SDF distance to the opacity can be found in NeuS [49]. For a batched training ray " + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "inline_equation", + "content": "r\\in R" + }, + { + "bbox": [ + 304, + 464, + 545, + 552 + ], + "type": "text", + "content": ", the accumulated depth loss can be formulated as:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 343, + 558, + 545, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 558, + 545, + 583 + ], + "spans": [ + { + "bbox": [ + 343, + 558, + 545, + 583 + ], + "type": "interline_equation", + "content": "L _ {\\text {d e p t h}} = \\sum_ {r \\in R} | | M (r) (\\hat {D} (r) - D (r)) | | _ {1}, \\tag {2}", + "image_path": "dafb65313d89d12d2f21acc360f74a42dbaa121450a5feb358588f69e0bd50fb.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 589, + 545, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 589, + 545, + 613 + ], + "spans": [ + { + "bbox": [ + 304, + 589, + 545, + 613 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 589, + 545, + 613 + ], + "type": "inline_equation", + "content": "M(r) \\in \\{0,1\\}" + }, + { + "bbox": [ + 304, + 589, + 545, + 613 + ], + "type": "text", + "content": " is the object mask value and " + }, + { + "bbox": [ + 304, + 589, + 545, + 613 + ], + "type": "inline_equation", + "content": "D(r)" + }, + { + "bbox": [ + 304, + 589, + 545, + 613 + ], + "type": "text", + "content": " is the supervised depth value." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 613, + 545, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 613, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 304, + 613, + 545, + 685 + ], + "type": "text", + "content": "Depth-sampled surface guidance. Except for the depth constraint on spatial accumulated points, we also leverage points sampled from the depth image " + }, + { + "bbox": [ + 304, + 613, + 545, + 685 + ], + "type": "inline_equation", + "content": "I_{D}" + }, + { + "bbox": [ + 304, + 613, + 545, + 685 + ], + "type": "text", + "content": " to guide the construction of the SDF surface. The surface loss encourages these sampled 3D points being close to the object surface and " + }, + { + "bbox": [ + 304, + 613, + 545, + 685 + ], + "type": "inline_equation", + "content": "L_{sur}" + }, + { + "bbox": [ + 304, + 613, + 545, + 685 + ], + "type": "text", + "content": " can be formulated as:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 366, + 689, + 545, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 366, + 689, + 545, + 716 + ], + "spans": [ + { + "bbox": [ + 366, + 689, + 545, + 716 + ], + "type": "interline_equation", + "content": "L _ {s u r} = \\sum_ {\\mathbf {x} _ {d} \\in I _ {D}} | | F _ {g e o} (\\mathbf {x} _ {d}) | | _ {1}. \\tag {3}", + "image_path": "07bbbaa9351453c2d4de672e900a4522ca539ef8400d1a82dfca2186a5689e4f.jpg" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "10130" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": "Training. Given a set of portrait images and their camera parameters, we train the architecture with the geometry field and the appearance field using the following loss function:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 116, + 287, + 139 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 116, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 47, + 116, + 287, + 139 + ], + "type": "interline_equation", + "content": "L _ {p r i o r} = L _ {c o l o r} + \\lambda_ {m v s} L _ {m v s} + \\lambda_ {m a s k} L _ {m a s k} + \\lambda_ {r e g} L _ {r e g}, \\tag {4}", + "image_path": "73dd1cf41c23fa843eae185295d594960e92a43a119ea68762ea9d89450f2fb8.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "spans": [ + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "text", + "content": " denotes the weight of each corresponding loss. The MVS guided loss is computed as " + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "inline_equation", + "content": "L_{mvs} = L_{depth} + L_{sur}" + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "text", + "content": ". The color reconstruction loss " + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "inline_equation", + "content": "L_{color}" + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "text", + "content": " is calculated as the distance between the accumulated color " + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "inline_equation", + "content": "\\hat{C}(r)" + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "text", + "content": " and the observed color " + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "inline_equation", + "content": "C(r)" + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 47, + 140, + 287, + 200 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 86, + 207, + 287, + 233 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 207, + 287, + 233 + ], + "spans": [ + { + "bbox": [ + 86, + 207, + 287, + 233 + ], + "type": "interline_equation", + "content": "L _ {c o l o r} = \\sum_ {r \\in R} | | M (r) (\\hat {C} (r) - C (r)) | | _ {1}, \\tag {5}", + "image_path": "96c9637a7deea6d13843c952fed2efa74af2bacc750b8bef61129bd66866930c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 241, + 287, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 241, + 287, + 291 + ], + "spans": [ + { + "bbox": [ + 47, + 241, + 287, + 291 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 241, + 287, + 291 + ], + "type": "inline_equation", + "content": "\\hat{C}(r)" + }, + { + "bbox": [ + 47, + 241, + 287, + 291 + ], + "type": "text", + "content": " can be computed by " + }, + { + "bbox": [ + 47, + 241, + 287, + 291 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{K}(T_i\\alpha_i c_i)" + }, + { + "bbox": [ + 47, + 241, + 287, + 291 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 241, + 287, + 291 + ], + "type": "inline_equation", + "content": "c_i" + }, + { + "bbox": [ + 47, + 241, + 287, + 291 + ], + "type": "text", + "content": " denotes the volumetric color produced by the appearance field " + }, + { + "bbox": [ + 47, + 241, + 287, + 291 + ], + "type": "inline_equation", + "content": "F_{app}" + }, + { + "bbox": [ + 47, + 241, + 287, + 291 + ], + "type": "text", + "content": ". To focus on human reconstruction, we also define a mask term with the binary cross entropy loss:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 102, + 297, + 287, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 297, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 102, + 297, + 287, + 312 + ], + "type": "interline_equation", + "content": "L _ {m a s k} = B C E (\\hat {M} (r), M (r)), \\tag {6}", + "image_path": "10d21497802ff0670962a290377aa4f7f4d2cbbaaa8c31bed4c3ba693f7d1ee7.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 318, + 287, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 318, + 287, + 355 + ], + "spans": [ + { + "bbox": [ + 47, + 318, + 287, + 355 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 318, + 287, + 355 + ], + "type": "inline_equation", + "content": "\\hat{M}(r) = \\sum_{i=1}^{K} (T_i \\alpha_i)" + }, + { + "bbox": [ + 47, + 318, + 287, + 355 + ], + "type": "text", + "content": " is the density accumulation along the ray. The Eikonal loss [12] used to regularize the SDF values is defined as" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 95, + 361, + 287, + 386 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 361, + 287, + 386 + ], + "spans": [ + { + "bbox": [ + 95, + 361, + 287, + 386 + ], + "type": "interline_equation", + "content": "L _ {r e g} = \\sum_ {k} | | \\nabla_ {\\mathbf {p} _ {k}} F _ {g e o} (\\mathbf {x} _ {k}) - 1 | | _ {2} ^ {2}. \\tag {7}", + "image_path": "a2f56135d04ddc528d91103fac42be999c96029c46fcabf7510b892c85ee129f.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 392, + 287, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 392, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 47, + 392, + 287, + 464 + ], + "type": "text", + "content": "Visualized results of this stage are shown in Figure 3 (a). Not only the radiance field with accumulated color is learned, but also the inherent geometry can be accurately decomposed. The high-quality reconstruction learned in this stage also paves the way for the next stage of style adaptation with few-shot 2D stylized portraits." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 472, + 217, + 484 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 472, + 217, + 484 + ], + "spans": [ + { + "bbox": [ + 47, + 472, + 217, + 484 + ], + "type": "text", + "content": "3.3. Spatial representation adaption" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 490, + 287, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 490, + 287, + 585 + ], + "spans": [ + { + "bbox": [ + 47, + 490, + 287, + 585 + ], + "type": "text", + "content": "With the constructed photorealistic representation, we then transform it to the style domain by progressively adapting the underlying 3D structures. We first adaptively learn the faithful deformed geometry without the interference of the albedo texture module, and then decompose albedo colors from observed ones with fixed geometric structures. This enables effective 3D structure disentanglement with more accurate surface and clearer texture." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 591, + 163, + 603 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 591, + 163, + 603 + ], + "spans": [ + { + "bbox": [ + 47, + 591, + 163, + 603 + ], + "type": "text", + "content": "3.3.1 Geometry adaption" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "text", + "content": "In this stage, we utilize a number of stylized 2D portrait images " + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "text", + "content": " derived from existing 2D portrait stylization methods [30, 53] to fine-tune the geometry field " + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "inline_equation", + "content": "F_{geo}" + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "text", + "content": " and the appearance field " + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "inline_equation", + "content": "F_{app}" + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "text", + "content": ". The spatial-shared geometry will be adaptively transformed in " + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "inline_equation", + "content": "F_{geo}" + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "text", + "content": " and the observed colors varying with views will be modeled in " + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "inline_equation", + "content": "F_{app}" + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "text", + "content": ", enabling the network focusing on geometry adaption. During training, the pixel color of " + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 47, + 605, + 287, + 712 + ], + "type": "text", + "content": " is used as the observed color to guide the accumulated volume colors:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 343, + 82, + 545, + 107 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 82, + 545, + 107 + ], + "spans": [ + { + "bbox": [ + 343, + 82, + 545, + 107 + ], + "type": "interline_equation", + "content": "L _ {c o l o r} = \\sum_ {r \\in R} | | M (r) (C (r) - C _ {t} (r)) | | _ {1}, \\tag {8}", + "image_path": "064f4aa37fa3b8968143912bf0c9803d5fd54294c7e66e3ac47d786407c855e9.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 111, + 545, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 111, + 545, + 147 + ], + "spans": [ + { + "bbox": [ + 305, + 111, + 545, + 147 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 111, + 545, + 147 + ], + "type": "inline_equation", + "content": "C(r)" + }, + { + "bbox": [ + 305, + 111, + 545, + 147 + ], + "type": "text", + "content": " is computed by the volumetric color from " + }, + { + "bbox": [ + 305, + 111, + 545, + 147 + ], + "type": "inline_equation", + "content": "F_{app}" + }, + { + "bbox": [ + 305, + 111, + 545, + 147 + ], + "type": "text", + "content": " and the converted opacity from " + }, + { + "bbox": [ + 305, + 111, + 545, + 147 + ], + "type": "inline_equation", + "content": "F_{geo}" + }, + { + "bbox": [ + 305, + 111, + 545, + 147 + ], + "type": "text", + "content": ". The total training loss is formulated as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 336, + 156, + 545, + 170 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 336, + 156, + 545, + 170 + ], + "spans": [ + { + "bbox": [ + 336, + 156, + 545, + 170 + ], + "type": "interline_equation", + "content": "L _ {g e o} = L _ {c o l o r} + \\lambda_ {m a s k} L _ {m a s k} + \\lambda_ {r e g} L _ {r e g}. \\tag {9}", + "image_path": "a1da5844706a2e8137f0ca9c0bd55cad4ea092659275cb058a7685ddd1a0c877.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 177, + 545, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 177, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 305, + 177, + 545, + 249 + ], + "type": "text", + "content": "As shown in Figure 3 (b), the spatial deformed geometry can be extracted from " + }, + { + "bbox": [ + 305, + 177, + 545, + 249 + ], + "type": "inline_equation", + "content": "F_{geo}" + }, + { + "bbox": [ + 305, + 177, + 545, + 249 + ], + "type": "text", + "content": ". However, rendering results are 3D-inconsistent with obvious artifacts in side-view renderings, since only few-shot 2D stylizations of the frontal views are provided for style adaption and the view-dependent function " + }, + { + "bbox": [ + 305, + 177, + 545, + 249 + ], + "type": "inline_equation", + "content": "F_{app}" + }, + { + "bbox": [ + 305, + 177, + 545, + 249 + ], + "type": "text", + "content": " trivially fits these views." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 255, + 517, + 267 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 255, + 517, + 267 + ], + "spans": [ + { + "bbox": [ + 305, + 255, + 517, + 267 + ], + "type": "text", + "content": "3.3.2 Albedo texture adaption and optimization" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "spans": [ + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "text", + "content": "In this stage, we aim to learn the spatial-shared texture field " + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "inline_equation", + "content": "F_{tex}" + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "text", + "content": " by decomposing the albedo colors from the appearance ones. Specifically, we insert " + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "inline_equation", + "content": "F_{tex}" + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "text", + "content": " as a view-independent texture field and jointly optimize " + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "inline_equation", + "content": "F_{tex}" + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "inline_equation", + "content": "F_{app}" + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "text", + "content": ". In this way, view-consistent colors can be effectively decomposed from the total appearance and the remaining components in " + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "inline_equation", + "content": "F_{app}" + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "text", + "content": " are regarded as view-dependent reflections. The final color are computed by " + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "inline_equation", + "content": "\\tilde{c}_i = s \\circ c_i'" + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "inline_equation", + "content": "c_i'" + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "text", + "content": " is the albedo color from " + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "inline_equation", + "content": "F_{tex}" + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "text", + "content": " is the degraded reflection from " + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "inline_equation", + "content": "F_{app}" + }, + { + "bbox": [ + 305, + 270, + 545, + 400 + ], + "type": "text", + "content": " for spatial points. Then we can obtain the final accumulated color by" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 381, + 401, + 545, + 434 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 401, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 381, + 401, + 545, + 434 + ], + "type": "interline_equation", + "content": "\\tilde {C} (r) = \\sum_ {i = 1} ^ {K} \\left(T _ {i} \\alpha_ {i} \\tilde {c} _ {i}\\right). \\tag {10}", + "image_path": "a291c0fa57e62cc9df7c36420655efb426fdf3caa9c65caf3dec5bdeab07e27c.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": "To further ensure effective albedo color decomposition, a discriminator " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " is introduced to encourage " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\tilde{C}(r)" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " satisfying the approximate distribution of palette colors of " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "I_t" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": ". With " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " as a posterize filter, the patch color " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\kappa(C_t(p))" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "I_t" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " is fed into " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " as a real sample, and the reconstructed color " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\tilde{C}(p)" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "F_{tex}" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " is fed into " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " as a fake sample, where " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " is the set of rays for image pixels in a patch. We define the discrimination loss " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "L_{ds}" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " to penalize for distance between the distribution of " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "C(p)" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\tilde{C}(p)" + }, + { + "bbox": [ + 305, + 437, + 545, + 544 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 353, + 552, + 545, + 585 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 552, + 545, + 585 + ], + "spans": [ + { + "bbox": [ + 353, + 552, + 545, + 585 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} L _ {d s} = \\mathbb {E} _ {p \\sim \\left\\{I _ {t} ^ {i} \\right\\}} [ \\log (D (\\kappa (C _ {t} (p))) ] + \\tag {11} \\\\ \\mathbb {E} _ {p \\sim \\{I _ {t} ^ {i} \\}} [ l o g (1 - D (\\bar {C} (p))) ]. \\\\ \\end{array}", + "image_path": "b805e670ffd9a8b3001c98980dd4cd202f23eb147f3b00e906440c969b3de432.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 591, + 545, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 591, + 545, + 626 + ], + "spans": [ + { + "bbox": [ + 305, + 591, + 545, + 626 + ], + "type": "text", + "content": "To keep the learned geometry stay faithful to the given style, we fix " + }, + { + "bbox": [ + 305, + 591, + 545, + 626 + ], + "type": "inline_equation", + "content": "F_{geo}" + }, + { + "bbox": [ + 305, + 591, + 545, + 626 + ], + "type": "text", + "content": " and train " + }, + { + "bbox": [ + 305, + 591, + 545, + 626 + ], + "type": "inline_equation", + "content": "\\{F_{app}, F_{tex}\\}" + }, + { + "bbox": [ + 305, + 591, + 545, + 626 + ], + "type": "text", + "content": " with the training loss as follows:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 311, + 636, + 545, + 650 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 636, + 545, + 650 + ], + "spans": [ + { + "bbox": [ + 311, + 636, + 545, + 650 + ], + "type": "interline_equation", + "content": "L _ {t e x} = L _ {\\text {c o l o r}} + \\lambda_ {\\text {m a s k}} L _ {\\text {m a s k}} + \\lambda_ {\\text {r e g}} L _ {\\text {r e g}} + \\lambda_ {\\text {d s}} L _ {\\text {d s}}, \\tag {12}", + "image_path": "a4abe18a8360bedfe22287f1eff46557d473327381b1b4e439707b1d7de4da56.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 657, + 545, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 657, + 545, + 681 + ], + "spans": [ + { + "bbox": [ + 306, + 657, + 545, + 681 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 657, + 545, + 681 + ], + "type": "inline_equation", + "content": "L_{color}" + }, + { + "bbox": [ + 306, + 657, + 545, + 681 + ], + "type": "text", + "content": " denotes the distance between the final accumulated color " + }, + { + "bbox": [ + 306, + 657, + 545, + 681 + ], + "type": "inline_equation", + "content": "\\tilde{C} (r)" + }, + { + "bbox": [ + 306, + 657, + 545, + 681 + ], + "type": "text", + "content": " and the observed stylized color " + }, + { + "bbox": [ + 306, + 657, + 545, + 681 + ], + "type": "inline_equation", + "content": "C_t(r)" + }, + { + "bbox": [ + 306, + 657, + 545, + 681 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 343, + 690, + 545, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 690, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 343, + 690, + 545, + 715 + ], + "type": "interline_equation", + "content": "L _ {\\text {c o l o r}} = \\sum_ {r \\in R} | | M (r) (\\tilde {C} (r) - C _ {t} (r)) | | _ {1}. \\tag {13}", + "image_path": "4255b7ee1e4e637df558a44decdb271cb8207bfcd5512506ab75fe9b9692c89b.jpg" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10131" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 89, + 70, + 143, + 125 + ], + "blocks": [ + { + "bbox": [ + 89, + 70, + 143, + 125 + ], + "lines": [ + { + "bbox": [ + 89, + 70, + 143, + 125 + ], + "spans": [ + { + "bbox": [ + 89, + 70, + 143, + 125 + ], + "type": "image", + "image_path": "97e2a9916a549fbebbfe3d1a0716a84b27836cd1ae0257bcb0d633e8112a68ba.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 153, + 71, + 504, + 304 + ], + "blocks": [ + { + "bbox": [ + 153, + 71, + 504, + 304 + ], + "lines": [ + { + "bbox": [ + 153, + 71, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 153, + 71, + 504, + 304 + ], + "type": "image", + "image_path": "87dd30b9d003ee603f0cc7f8a379059a37ce64da01247fce5b8b16c06dec645f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 414, + 294, + 481, + 304 + ], + "lines": [ + { + "bbox": [ + 414, + 294, + 481, + 304 + ], + "spans": [ + { + "bbox": [ + 414, + 294, + 481, + 304 + ], + "type": "text", + "content": "(c) Exported meshes" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 156, + 307, + 436, + 319 + ], + "lines": [ + { + "bbox": [ + 156, + 307, + 436, + 319 + ], + "spans": [ + { + "bbox": [ + 156, + 307, + 436, + 319 + ], + "type": "text", + "content": "Figure 4. Stylized results in novel views and corresponding exported meshes." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 47, + 127, + 143, + 182 + ], + "blocks": [ + { + "bbox": [ + 47, + 127, + 143, + 182 + ], + "lines": [ + { + "bbox": [ + 47, + 127, + 143, + 182 + ], + "spans": [ + { + "bbox": [ + 47, + 127, + 143, + 182 + ], + "type": "image", + "image_path": "0db51f534d021605dd826d0ec8811bfa2188f7cec0e9bafc4bf30548b4f4652d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 89, + 185, + 144, + 237 + ], + "blocks": [ + { + "bbox": [ + 89, + 185, + 144, + 237 + ], + "lines": [ + { + "bbox": [ + 89, + 185, + 144, + 237 + ], + "spans": [ + { + "bbox": [ + 89, + 185, + 144, + 237 + ], + "type": "image", + "image_path": "38d6359d7526f8914559ea06e323dbe203d9062ddcb7672f996eb9be9c464733.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 153, + 239, + 345, + 304 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 239, + 345, + 304 + ], + "spans": [ + { + "bbox": [ + 153, + 239, + 345, + 304 + ], + "type": "image", + "image_path": "59b07a1667f08a6cd97a7220a34eca197005995dab4c6a392774ed835debd958.jpg" + } + ] + } + ], + "index": 4, + "type": "text" + }, + { + "bbox": [ + 47, + 333, + 287, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 333, + 287, + 393 + ], + "spans": [ + { + "bbox": [ + 47, + 333, + 287, + 393 + ], + "type": "text", + "content": "We show rendering results of this stage in Figure 3 (c), demonstrating their 3D consistency in multi-view setting. Thanks to the spatial-shared colors learned in the view-independent " + }, + { + "bbox": [ + 47, + 333, + 287, + 393 + ], + "type": "inline_equation", + "content": "F_{tex}" + }, + { + "bbox": [ + 47, + 333, + 287, + 393 + ], + "type": "text", + "content": ", the albedo texture can be seamlessly extracted and further enhanced in an explicit manner." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 405, + 173, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 405, + 173, + 418 + ], + "spans": [ + { + "bbox": [ + 47, + 405, + 173, + 418 + ], + "type": "text", + "content": "4. Experimental Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "text", + "content": "Implementation details. Our network architecture consists of three modules: the signed distance function " + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "inline_equation", + "content": "F_{geo}" + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "text", + "content": ", the appearance function " + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "inline_equation", + "content": "F_{app}" + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "text", + "content": " and the texture function " + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "inline_equation", + "content": "F_{tex}" + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "text", + "content": ", which are modeled by three MLPs with 8, 6, 6 hidden layers, respectively. Positional encoding [32] and sphere initialization [3] are also applied similar to [49]. For the depth priors, we adopt the OpenMVS method [1] to extract estimated depth maps from the input video. For the 2D style translator, we adopt DCT-Net [30] and VToonify [53] to produce target stylized images and preserve forward/backward facing results whose absolute yaw angle is less than 0.2 radian for supervision. We use the Adam optimizer [22] with the learning rate of 2.5e-5 to train our models and sample 512 rays for each batch. The loss weights are shared by three stages with " + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "inline_equation", + "content": "\\lambda_{mask}" + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "inline_equation", + "content": "\\lambda_{mvs}" + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "inline_equation", + "content": "\\lambda_{reg}" + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "inline_equation", + "content": "\\lambda_{ds}" + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "text", + "content": " set to " + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "inline_equation", + "content": "\\{0.5, 0.5, 0.1, 1\\}" + }, + { + "bbox": [ + 46, + 424, + 288, + 639 + ], + "type": "text", + "content": ". Stage I, II and III are trained for 300k, 200k and 50k iterations, respectively, taking around 20 hours in total on a single NVIDIA Teasla-V100 GPU." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 641, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 641, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 641, + 287, + 714 + ], + "type": "text", + "content": "Datasets. We create a " + }, + { + "bbox": [ + 47, + 641, + 287, + 714 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 641, + 287, + 714 + ], + "type": "text", + "content": " captured portrait dataset called Portrait360 to evaluate our approach. This dataset contains 14 static portrait videos captured by rotating the camera around the human head. All videos have a length between 20 to 30 seconds and are split to 300 frames as source training data." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 333, + 432, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 333, + 432, + 346 + ], + "spans": [ + { + "bbox": [ + 306, + 333, + 432, + 346 + ], + "type": "text", + "content": "4.1.3D portrait stylization" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 352, + 545, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 352, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 304, + 352, + 545, + 569 + ], + "type": "text", + "content": "Performance on view consistent rendering. Given a short portrait video captured by a monocular camera, our model learns a stylized 3D representation from 2D portrait frames. Stylized portrait images can be generated from arbitrary novel viewpoints following exemplar styles, while ensuring facial identity of the person and 3D consistency between different views. Note that the synthesized images in this part are produced directly by volume rendering on implicit functions, without any explicit style enhancement applied for the results. Our stylized avatars rendered in novel viewpoints and their corresponding exported meshes are shown in Figure 4, more results can be found in the supplementary. Comparison with 3D avatar stylization methods. In this section, we compare our method with two 3D avatar stylization methods, DeformToon3D [57] and NeRF-Art [48], which represent the state-of-the-art techniques in 3D-aware generative toonification and text-guided NeRF stylization, respectively." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 570, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 546, + 714 + ], + "type": "text", + "content": "Qualitative comparison. Here we adapt VToonify [53] to generate target stylized images with selected exemplars to train our model. For DeformToon3D [57], we use the author-provided code and train the model using data generated with the same exemplars by DualStyleGAN [52], which is also the 2D generator used in VToonify. Here we directly generate its real-space and style-space results under the same sampled instance code, since the additional PTI [43] process will cause accumulated fidelity errors, especially on arbitrary real faces. For NeRF-Art [48], as it does not support using a single exemplar image for style guidance, we use Mini-GPT4 [59] to generate style de" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10132" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 64, + 541, + 308 + ], + "blocks": [ + { + "bbox": [ + 47, + 64, + 541, + 308 + ], + "lines": [ + { + "bbox": [ + 47, + 64, + 541, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 64, + 541, + 308 + ], + "type": "image", + "image_path": "7b0389739f52ae38cc292df12b3df9b1f07a94238a2afe532f28e8bb53708ba5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 311, + 548, + 346 + ], + "lines": [ + { + "bbox": [ + 46, + 311, + 548, + 346 + ], + "spans": [ + { + "bbox": [ + 46, + 311, + 548, + 346 + ], + "type": "text", + "content": "Figure 5. Qualitative comparison with 3D avatar stylization methods. We directly compare the generated real-space and style-space results of DeformToon3D to alleviate the fidelity loss in the additional PTI process. The models of NeRF-Art and Ours are trained on our Portrait360 dataset. Four views are selected for comparison." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 47, + 351, + 289, + 440 + ], + "blocks": [ + { + "bbox": [ + 47, + 351, + 289, + 440 + ], + "lines": [ + { + "bbox": [ + 47, + 351, + 289, + 440 + ], + "spans": [ + { + "bbox": [ + 47, + 351, + 289, + 440 + ], + "type": "image", + "image_path": "00980e90a2c39b88c259c59edc030d6ce211cc762bc1c4fade063efffab5b2cc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 441, + 288, + 475 + ], + "lines": [ + { + "bbox": [ + 46, + 441, + 288, + 475 + ], + "spans": [ + { + "bbox": [ + 46, + 441, + 288, + 475 + ], + "type": "text", + "content": "Figure 6. Qualitative comparison with 2D portrait stylization methods on view consistent rendering. For a more prominent video comparison, please refer to the supplementary video." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 483, + 287, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 483, + 287, + 628 + ], + "spans": [ + { + "bbox": [ + 46, + 483, + 287, + 628 + ], + "type": "text", + "content": "scriptions corresponding to each target image. The input text prompts used in this section are shown in the supplementary. We demonstrate qualitative comparison of the three methods in Figure 5. DeformToon3D only focuses on frontal views and fails to generate plausible renderings under large angles. Besides, it tends to synthesize overly exaggerated results and fail to maintain the facial characteristics (e.g., hairstyles) of the original image. NeRF-Art only generates results with undesired stylized texture and weakly-changed underlying structures. On the contrary, our method can generate fine-grained full-head stylized avatars with view-consistent renderings and exaggerated styles." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 630, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 288, + 713 + ], + "type": "text", + "content": "Quantitative comparison. For quantitative comparison, we measure the quality of multi-view stylized renderings of all methods by calculating the Frechet Inception Distance (FID) [14] value for the training cartoon exemplar dataset. A lower FID score indicates that the distribution of the generated images is more similar to that of real 2D cartoon faces. we also evaluate the fidelity of all methods in 3D" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 339, + 376, + 511, + 417 + ], + "blocks": [ + { + "bbox": [ + 305, + 352, + 545, + 374 + ], + "lines": [ + { + "bbox": [ + 305, + 352, + 545, + 374 + ], + "spans": [ + { + "bbox": [ + 305, + 352, + 545, + 374 + ], + "type": "text", + "content": "Table 1. Quantitative comparison with 3D avatar stylization methods on FID and IP. " + }, + { + "bbox": [ + 305, + 352, + 545, + 374 + ], + "type": "inline_equation", + "content": "\\uparrow" + }, + { + "bbox": [ + 305, + 352, + 545, + 374 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 352, + 545, + 374 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 305, + 352, + 545, + 374 + ], + "type": "text", + "content": " denote if higher or lower is better." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 339, + 376, + 511, + 417 + ], + "lines": [ + { + "bbox": [ + 339, + 376, + 511, + 417 + ], + "spans": [ + { + "bbox": [ + 339, + 376, + 511, + 417 + ], + "type": "table", + "html": "
MethodDeformToon3DNeRF-ArtOurs
FID ↓66.578.857.6
IP ↑0.5510.6710.678
", + "image_path": "5a89b99d280b6da7543279d0c01aee02ea0997e3424d32ca5d6aa830f0a83136.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 433, + 545, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 433, + 545, + 518 + ], + "spans": [ + { + "bbox": [ + 304, + 433, + 545, + 518 + ], + "type": "text", + "content": "style adaption using the identity preservation (IP) metric, which is calculated as the Arcface [9] feature similarity between the input image and the stylized result. As shown in Table 1, our method outperforms the other two methods in both FID and identity preservation, which showcases our ability of generating high-quality stylized results while being faithful to the original human identity." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 520, + 545, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 520, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 304, + 520, + 545, + 591 + ], + "type": "text", + "content": "Comparison with 2D portrait stylization methods. In this section, we compare our method with two state-of-the-art 2D portrait stylization methods, VToonify [53] and DCT-Net [30], to further demonstrate our ability of generating 3D-consistent and high-quality stylized results for arbitrary views." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 594, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 546, + 714 + ], + "type": "text", + "content": "Qualitative comparison. Due to the incapability of 2D portrait stylization methods to synthesize novel view results, we only make comparison under reconstructed views captured in the input video. For both VToonify and DCT-Net, frames are directly input into the trained/finetuned models released by authors to obtain the corresponding stylized images. Then we select their forward/backward results as sparse view supervision to train our models (denoted as ours-V and ours-D, respectively). As illustrated in Figure 6, VToonify and DCT-Net fail to synthesize exaggerated ge" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10133" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 86, + 261, + 125 + ], + "blocks": [ + { + "bbox": [ + 47, + 62, + 287, + 83 + ], + "lines": [ + { + "bbox": [ + 47, + 62, + 287, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 62, + 287, + 83 + ], + "type": "text", + "content": "Table 2. Comparison of FID and 3D validity with 2D portrait stylization methods." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 86, + 261, + 125 + ], + "lines": [ + { + "bbox": [ + 70, + 86, + 261, + 125 + ], + "spans": [ + { + "bbox": [ + 70, + 86, + 261, + 125 + ], + "type": "table", + "html": "
MethodDCT-NetOurs-DVToonifyOurs-V
FID ↓126.194.786.957.6
3D validity ↑0.541.000.621.00
", + "image_path": "1761903e6a60847e1ead7f60e7ed65ae577e8ef1465cd328c2dd311aa7bbac2a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 50, + 156, + 287, + 187 + ], + "blocks": [ + { + "bbox": [ + 47, + 132, + 287, + 154 + ], + "lines": [ + { + "bbox": [ + 47, + 132, + 287, + 154 + ], + "spans": [ + { + "bbox": [ + 47, + 132, + 287, + 154 + ], + "type": "text", + "content": "Table 3. Ablation of the progressive training scheme. Results verify the effectiveness of the proposed module in each stage." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 156, + 287, + 187 + ], + "lines": [ + { + "bbox": [ + 50, + 156, + 287, + 187 + ], + "spans": [ + { + "bbox": [ + 50, + 156, + 287, + 187 + ], + "type": "table", + "html": "
Variantsw/o Priorw/o GAw/o TAw/o PSAfull model
FID ↓98.7105.296.796.294.7
", + "image_path": "193c6f1095d6ac4510c065b2431be728c345235a1a2bda64982c7af942cef6d6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 198, + 287, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 198, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 46, + 198, + 287, + 293 + ], + "type": "text", + "content": "ometry effects in challenging viewpoints (e.g., side faces) and are unable to maintain 3D view consistency. Note that these extreme view results are not used as supervision in our style adaption process. On the contrary, our method can easily render style-faithful and robust results in a 3D consistent manner. This showcases the importance of learning underlying 3D structures in maintaining view-consistency of the stylized avatar." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 294, + 287, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 294, + 287, + 462 + ], + "spans": [ + { + "bbox": [ + 46, + 294, + 287, + 462 + ], + "type": "text", + "content": "Quantitative comparison. We also measure the quality of our rendering results against VToonify [53] and DCT-Net [30] using FID [14]. We use data from our Portrait360 dataset as source images and remove failure cases of the 2D methods. As shown in Table 2, both of our models produce better results with lower FID values compared with original 2D methods. To further evaluate the stylization ability of handling views from the entire 3D space, we propose to calculate 3D validity by computing the conversion rate of successfully stylized results to the whole dataset. 2D methods rely on detected facial landmarks and failed conversions can be automatically recognized. Compared to 2D methods, our method could handle more challenging poses in the entire 3D space with higher 3D validity." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 469, + 139, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 469, + 139, + 481 + ], + "spans": [ + { + "bbox": [ + 47, + 469, + 139, + 481 + ], + "type": "text", + "content": "4.2. Ablation study" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 486, + 287, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 486, + 287, + 545 + ], + "spans": [ + { + "bbox": [ + 46, + 486, + 287, + 545 + ], + "type": "text", + "content": "In addition to visualized results in Figure 3, we verify the effectiveness of the proposed module in each stage by evaluating the performance of corresponding variants of our method. The qualitative and quantitative results are shown in Figure 7 and Table 3, respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 545, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 287, + 677 + ], + "type": "text", + "content": "MVS guided prior learning. We train a model without photorealistic prior learning and directly learn the spatial neural representation from stylized portrait images. It is confusing for inverse rendering to produce valid geometry and texture with unreal 3D-inconsistent stylized observations, as shown in Figure 7 (b). This indicates that the reconstruction prior is crucial for generating plausible underlying structures in 3D style adaption. The design of MVS guidance also helps to reconstruct more robust surface without holes brought by illumination noise in complicated real-world scenes (see Figure 7 (g))." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "Progressive structure adaption (PSA). By removing PSA proposed in Section 3.3, we jointly learn the geometry and texture adaption with the full SNR network. Results in" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 69, + 354, + 118 + ], + "blocks": [ + { + "bbox": [ + 309, + 69, + 354, + 118 + ], + "lines": [ + { + "bbox": [ + 309, + 69, + 354, + 118 + ], + "spans": [ + { + "bbox": [ + 309, + 69, + 354, + 118 + ], + "type": "image", + "image_path": "8b9c6889554011bdefb35c533c4ffd26c26ed60aaa6d0dd76efacaf37bdf6f52.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 357, + 70, + 403, + 118 + ], + "blocks": [ + { + "bbox": [ + 357, + 70, + 403, + 118 + ], + "lines": [ + { + "bbox": [ + 357, + 70, + 403, + 118 + ], + "spans": [ + { + "bbox": [ + 357, + 70, + 403, + 118 + ], + "type": "image", + "image_path": "a87768f105b6969d347893f7e9a3d34fc209a24060266d421d08b339b9a35021.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 405, + 70, + 451, + 118 + ], + "blocks": [ + { + "bbox": [ + 405, + 70, + 451, + 118 + ], + "lines": [ + { + "bbox": [ + 405, + 70, + 451, + 118 + ], + "spans": [ + { + "bbox": [ + 405, + 70, + 451, + 118 + ], + "type": "image", + "image_path": "c4a065f929414ee1e25f23d23a68c4685e3b143df51c3a279fb84a72c24b112b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 453, + 71, + 500, + 118 + ], + "blocks": [ + { + "bbox": [ + 453, + 71, + 500, + 118 + ], + "lines": [ + { + "bbox": [ + 453, + 71, + 500, + 118 + ], + "spans": [ + { + "bbox": [ + 453, + 71, + 500, + 118 + ], + "type": "image", + "image_path": "bcda2f9545e0dcd647052abd390aca4ca3797ae88b3274072c1e97c5bcaf782e.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 506, + 71, + 542, + 118 + ], + "blocks": [ + { + "bbox": [ + 506, + 71, + 542, + 118 + ], + "lines": [ + { + "bbox": [ + 506, + 71, + 542, + 118 + ], + "spans": [ + { + "bbox": [ + 506, + 71, + 542, + 118 + ], + "type": "image", + "image_path": "83dfa2ac84f715e612a032aa1493abe8f9da9509a6e834406d9f004e61b128f4.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 310, + 118, + 351, + 165 + ], + "blocks": [ + { + "bbox": [ + 310, + 118, + 351, + 165 + ], + "lines": [ + { + "bbox": [ + 310, + 118, + 351, + 165 + ], + "spans": [ + { + "bbox": [ + 310, + 118, + 351, + 165 + ], + "type": "image", + "image_path": "73c4ed6b6cdad5ab56c56525eba63cca0796fef43b19f8fe5115cf4ba8d9bc16.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 360, + 118, + 399, + 165 + ], + "blocks": [ + { + "bbox": [ + 360, + 118, + 399, + 165 + ], + "lines": [ + { + "bbox": [ + 360, + 118, + 399, + 165 + ], + "spans": [ + { + "bbox": [ + 360, + 118, + 399, + 165 + ], + "type": "image", + "image_path": "1b40db43918fb83bbf66ea047d3c9e6bcc737afd6588aa7f67b982b6d1bd12f5.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 406, + 118, + 446, + 165 + ], + "blocks": [ + { + "bbox": [ + 406, + 118, + 446, + 165 + ], + "lines": [ + { + "bbox": [ + 406, + 118, + 446, + 165 + ], + "spans": [ + { + "bbox": [ + 406, + 118, + 446, + 165 + ], + "type": "image", + "image_path": "9f78499e2ff5d5bccfa113836857c45ac50c70fe12cd007cb314d79c5d73ee68.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 456, + 118, + 496, + 165 + ], + "blocks": [ + { + "bbox": [ + 456, + 118, + 496, + 165 + ], + "lines": [ + { + "bbox": [ + 456, + 118, + 496, + 165 + ], + "spans": [ + { + "bbox": [ + 456, + 118, + 496, + 165 + ], + "type": "image", + "image_path": "c8b7d7c0433bfca7df145d340e3d9eb0218a294f110a0d1794c1f487e89e3154.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 504, + 118, + 542, + 165 + ], + "blocks": [ + { + "bbox": [ + 504, + 118, + 542, + 165 + ], + "lines": [ + { + "bbox": [ + 504, + 118, + 542, + 165 + ], + "spans": [ + { + "bbox": [ + 504, + 118, + 542, + 165 + ], + "type": "image", + "image_path": "282cda70d8623c7120ec7daf2803a47b35aa4f9319963b6d45202ae82104e2d7.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 310, + 167, + 351, + 213 + ], + "blocks": [ + { + "bbox": [ + 310, + 167, + 351, + 213 + ], + "lines": [ + { + "bbox": [ + 310, + 167, + 351, + 213 + ], + "spans": [ + { + "bbox": [ + 310, + 167, + 351, + 213 + ], + "type": "image", + "image_path": "50c81616580c8d1b5925a3be3ecfb40c0336cc02eb13af832c60fc52069248f6.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 214, + 353, + 223 + ], + "lines": [ + { + "bbox": [ + 307, + 214, + 353, + 223 + ], + "spans": [ + { + "bbox": [ + 307, + 214, + 353, + 223 + ], + "type": "text", + "content": "(a) Full model" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 358, + 167, + 399, + 213 + ], + "blocks": [ + { + "bbox": [ + 358, + 167, + 399, + 213 + ], + "lines": [ + { + "bbox": [ + 358, + 167, + 399, + 213 + ], + "spans": [ + { + "bbox": [ + 358, + 167, + 399, + 213 + ], + "type": "image", + "image_path": "5485069b49665032a6bdb670172eb35e6f032625f5db462a43c5ad4b27175683.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 214, + 399, + 223 + ], + "lines": [ + { + "bbox": [ + 359, + 214, + 399, + 223 + ], + "spans": [ + { + "bbox": [ + 359, + 214, + 399, + 223 + ], + "type": "text", + "content": "(b) w/o Prior" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 407, + 166, + 446, + 213 + ], + "blocks": [ + { + "bbox": [ + 407, + 166, + 446, + 213 + ], + "lines": [ + { + "bbox": [ + 407, + 166, + 446, + 213 + ], + "spans": [ + { + "bbox": [ + 407, + 166, + 446, + 213 + ], + "type": "image", + "image_path": "1a4f5dd617dc920eba73afff47b112c3f9cad4552d0c58375cbf9b03af790072.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 408, + 214, + 446, + 223 + ], + "lines": [ + { + "bbox": [ + 408, + 214, + 446, + 223 + ], + "spans": [ + { + "bbox": [ + 408, + 214, + 446, + 223 + ], + "type": "text", + "content": "(c) w/o PSA" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 456, + 167, + 496, + 213 + ], + "blocks": [ + { + "bbox": [ + 456, + 167, + 496, + 213 + ], + "lines": [ + { + "bbox": [ + 456, + 167, + 496, + 213 + ], + "spans": [ + { + "bbox": [ + 456, + 167, + 496, + 213 + ], + "type": "image", + "image_path": "4b87003ac0bfc3f40a6ac2c42dc28c5092fa07aadac5cb650862785538e218ca.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 460, + 214, + 495, + 222 + ], + "lines": [ + { + "bbox": [ + 460, + 214, + 495, + 222 + ], + "spans": [ + { + "bbox": [ + 460, + 214, + 495, + 222 + ], + "type": "text", + "content": "(d) w/o GA" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 227, + 545, + 271 + ], + "lines": [ + { + "bbox": [ + 304, + 227, + 545, + 271 + ], + "spans": [ + { + "bbox": [ + 304, + 227, + 545, + 271 + ], + "type": "text", + "content": "Figure 7. Effects of the proposed prior learning (Prior), progressive structure adaption (PSA), geometry adaption (GA), texture adaption (TA), style enhancement (SE) and MVS guidance (MVS)." + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 503, + 167, + 541, + 213 + ], + "blocks": [ + { + "bbox": [ + 503, + 167, + 541, + 213 + ], + "lines": [ + { + "bbox": [ + 503, + 167, + 541, + 213 + ], + "spans": [ + { + "bbox": [ + 503, + 167, + 541, + 213 + ], + "type": "image", + "image_path": "5e367f77429cc6fc4af0ace1dc9d73ee20867897c34325dbf50293fedbefcf44.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 506, + 214, + 541, + 222 + ], + "lines": [ + { + "bbox": [ + 506, + 214, + 541, + 222 + ], + "spans": [ + { + "bbox": [ + 506, + 214, + 541, + 222 + ], + "type": "text", + "content": "(e) w/o TA" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 281, + 545, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 281, + 545, + 327 + ], + "spans": [ + { + "bbox": [ + 304, + 281, + 545, + 327 + ], + "type": "text", + "content": "Figure 7 (c) show that simultaneously training " + }, + { + "bbox": [ + 304, + 281, + 545, + 327 + ], + "type": "inline_equation", + "content": "F_{geo}" + }, + { + "bbox": [ + 304, + 281, + 545, + 327 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 281, + 545, + 327 + ], + "type": "inline_equation", + "content": "F_{tex}" + }, + { + "bbox": [ + 304, + 281, + 545, + 327 + ], + "type": "text", + "content": " disrupts the disentanglement of each other. Progressive adaption brings more accurate surfaces and seamless textures." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 304, + 329, + 546, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 329, + 546, + 509 + ], + "spans": [ + { + "bbox": [ + 304, + 329, + 546, + 509 + ], + "type": "text", + "content": "Geometry and texture adaption. Figure 7 (d, e) verify the necessity of geometry adaption (GA) and texture adaption (TA), respectively. In contrast to explicit texture stylization, GA enables the internal surface to be deformed adaptively, thus making 3D portraits be fully stylized. Without TA, inferred vertex colors from the appearance field suffers noticeable artifacts, due to the inconsistent observed colors from different views. TA introduces an extra texture field that automatically decomposes albedo colors shared in 3D space, thus alleviating the texture seaming issue. Besides, we explore adding additional style enhancement (SE) on the explicit texture map extracted from the texture field, which further brings more vivid stylization effects (Figure 7 (f)). We also show the impact of the number of stylized frames used for adaption stages in the supplemental material." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 306, + 517, + 378, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 517, + 378, + 529 + ], + "spans": [ + { + "bbox": [ + 306, + 517, + 378, + 529 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 304, + 534, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 545, + 713 + ], + "type": "text", + "content": "In this paper, we handled the challenging and on-going task of synthesizing the high-fidelity stylized 3D avatar from a portrait video under the guidance of a single style image. We showed that the naive combination of portrait style transfer and 3D reconstruction techniques does not work well in this task, and proposed a novel framework called 3DToonify that learns 3D style adaption based on spatial neural representations (SNR). We introduced a delicately-designed spatial neural network for disentangled geometry and texture adaption. We also came up with a novel progressive training scheme suitable for the SNR to accurately capture the underlying stylized 3D structures. Both qualitative and quantitative experimental results demonstrated that our method enables fine-grained 3D avatar stylization with view consistency and diverse exaggerated results." + } + ] + } + ], + "index": 34 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10134" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 286, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 286, + 112 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 286, + 112 + ], + "type": "text", + "content": "[1] OpenMvs. [EB/OL]. https://github.com/cdcseacave/openMVS/.6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 114, + 287, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 114, + 287, + 178 + ], + "spans": [ + { + "bbox": [ + 53, + 114, + 287, + 178 + ], + "type": "text", + "content": "[2] Rameen Abdal, Hsin-Ying Lee, Peihao Zhu, Mengei Chai, Aliaksandr Siarohin, Peter Wonka, and Sergey Tulyakov. 3davatargan: Bridging domains for personalized editable avatars. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4552-4562, 2023. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 180, + 287, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 180, + 287, + 223 + ], + "spans": [ + { + "bbox": [ + 53, + 180, + 287, + 223 + ], + "type": "text", + "content": "[3] Matan Atzmon and Yaron Lipman. Sal: Sign agnostic learning of shapes from raw data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2565-2574, 2020. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 224, + 287, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 224, + 287, + 289 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 287, + 289 + ], + "type": "text", + "content": "[4] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 291, + 287, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 291, + 287, + 346 + ], + "spans": [ + { + "bbox": [ + 53, + 291, + 287, + 346 + ], + "type": "text", + "content": "[5] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5799-5809, 2021. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 346, + 287, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 346, + 287, + 412 + ], + "spans": [ + { + "bbox": [ + 53, + 346, + 287, + 412 + ], + "type": "text", + "content": "[6] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 413, + 287, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 413, + 287, + 456 + ], + "spans": [ + { + "bbox": [ + 53, + 413, + 287, + 456 + ], + "type": "text", + "content": "[7] Rui Chen, Yongwei Chen, Ningxin Jiao, and Kui Jia. Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. arXiv preprint arXiv:2303.13873, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 458, + 287, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 458, + 287, + 512 + ], + "spans": [ + { + "bbox": [ + 53, + 458, + 287, + 512 + ], + "type": "text", + "content": "[8] Pei-Ze Chiang, Meng-Shiun Tsai, Hung-Yu Tseng, WeiSheng Lai, and Wei-Chen Chiu. Stylizing 3d scene via implicit representation and hypernetwork. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1475-1484, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 514, + 287, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 514, + 287, + 545 + ], + "spans": [ + { + "bbox": [ + 53, + 514, + 287, + 545 + ], + "type": "text", + "content": "[9] Jiankang Deng, Jia Guo, Xue Niannan, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, 2019. 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 547, + 287, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 547, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 48, + 547, + 287, + 590 + ], + "type": "text", + "content": "[10] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. ACM Transactions on Graphics (TOG), 41(4):1-13, 2022. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 591, + 287, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 591, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 48, + 591, + 287, + 635 + ], + "type": "text", + "content": "[11] Leon A Gatys, Alexander S Ecker, and Matthias Bethge. Image style transfer using convolutional neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2414-2423, 2016. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 636, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 636, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 636, + 287, + 668 + ], + "type": "text", + "content": "[12] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. arXiv preprint arXiv:2002.10099, 2020. 3, 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "text", + "content": "[13] Fangzhou Han, Shuquan Ye, Mingming He, Menglei Chai, and Jing Liao. Exemplar-based 3d portrait stylization. IEEE Transactions on Visualization and Computer Graphics, 2021. 3" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "type": "text", + "content": "[14] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 7, 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 129, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 173 + ], + "type": "text", + "content": "[15] Yukun Huang, Jianan Wang, Ailing Zeng, He Cao, Xianbiao Qi, Yukai Shi, Zheng-Jun Zha, and Lei Zhang. Dreamwaltz: Make a scene with complex 3d animatable avatars. arXiv preprint arXiv:2305.12529, 2023. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 175, + 545, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 218 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 218 + ], + "type": "text", + "content": "[16] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 220, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 274 + ], + "type": "text", + "content": "[17] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 276, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 276, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 276, + 545, + 319 + ], + "type": "text", + "content": "[18] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG), 42(4):1-14, 2023. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 321, + 545, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 321, + 545, + 374 + ], + "spans": [ + { + "bbox": [ + 307, + 321, + 545, + 374 + ], + "type": "text", + "content": "[19] Gwanghyun Kim and Se Young Chun. Datid-3d: Diversitypreserved domain adaptation using text-to-image diffusion for 3d generative model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14203-14213, 2023. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 376, + 545, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 376, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 545, + 431 + ], + "type": "text", + "content": "[20] Gwanghyun Kim, Ji Ha Jang, and Se Young Chun. Podia-3d: Domain adaptation of 3d generative model across large domain gap using pose-preserved text-to-image diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 22603–22612, 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 433, + 545, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 433, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 307, + 433, + 545, + 487 + ], + "type": "text", + "content": "[21] Junho Kim, Minjae Kim, Hyeonwoo Kang, and Kwang Hee Lee. U-gat-it: Unsupervised generative attentional networks with adaptive layer-instance normalization for image-to-image translation. In International Conference on Learning Representations, 2020. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 489, + 545, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 489, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 307, + 489, + 545, + 520 + ], + "type": "text", + "content": "[22] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 523, + 545, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 523, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 307, + 523, + 545, + 567 + ], + "type": "text", + "content": "[23] Nikos Kolotouros, Thiemo Alldieck, Andrei Zanfir, Eduard Gabriel Bazavan, Mihai Fieraru, and Cristian Sminchisescu. Dreamhuman: Animatable 3d avatars from text. arXiv preprint arXiv:2306.09329, 2023. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 568, + 545, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 568, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 307, + 568, + 545, + 611 + ], + "type": "text", + "content": "[24] Bing Li, Yuanlue Zhu, Yitong Wang, Chia-Wen Lin, Bernard Ghanem, and Linlin Shen. Anigan: Style-guided generative adversarial networks for unsupervised anime face generation. IEEE Transactions on Multimedia, 2021. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 613, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 613, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 307, + 613, + 545, + 656 + ], + "type": "text", + "content": "[25] Tingting Liao, Hongwei Yi, Yuliang Xiu, Jiaxaing Tang, Yangyi Huang, Justus Thies, and Michael J Black. Tada! text to animatable digital avatars. arXiv preprint arXiv:2308.10899, 2023. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "text", + "content": "[26] Yiyi Liao, Katja Schwarz, Lars Mescheder, and Andreas Geiger. Towards unsupervised learning of generative models for 3d controllable image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5871-5880, 2020. 2" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10135" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "type": "text", + "content": "[27] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 300–309, 2023. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 140, + 288, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 140, + 288, + 194 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 288, + 194 + ], + "type": "text", + "content": "[28] Jinlin Liu, Yuan Yao, Wendi Hou, Miaomiao Cui, Xuansong Xie, Changshui Zhang, and Xian-sheng Hua. Boosting semantic human matting with coarse annotations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8563-8572, 2020. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 195, + 287, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 195, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 195, + 287, + 239 + ], + "type": "text", + "content": "[29] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "spans": [ + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "type": "text", + "content": "[30] Yifang Men, Yuan Yao, Miaomiao Cui, Zhouhui Lian, and Xuansong Xie. Dct-net: domain-calibrated translation for portrait stylization. ACM Transactions on Graphics (TOG), 41(4):1-9, 2022. 1, 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 284, + 287, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 284, + 287, + 337 + ], + "spans": [ + { + "bbox": [ + 48, + 284, + 287, + 337 + ], + "type": "text", + "content": "[31] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4460-4470, 2019. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 338, + 287, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 338, + 287, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 338, + 287, + 392 + ], + "type": "text", + "content": "[32] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 2, 3, 4, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 393, + 287, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 393, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 48, + 393, + 287, + 437 + ], + "type": "text", + "content": "[33] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, 2022. 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 437, + 287, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 437, + 287, + 470 + ], + "spans": [ + { + "bbox": [ + 48, + 437, + 287, + 470 + ], + "type": "text", + "content": "[34] Thu Nguyen-Phuoc, Feng Liu, and Lei Xiao. Snerf: stylized neural implicit representations for 3d scenes. arXiv preprint arXiv:2207.02363, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 471, + 287, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 471, + 287, + 514 + ], + "spans": [ + { + "bbox": [ + 48, + 471, + 287, + 514 + ], + "type": "text", + "content": "[35] Thu Nguyen-Phuoc, Gabriel Schwartz, Yuting Ye, Stephen Lombardi, and Lei Xiao. Alteredavatar: Stylizing dynamic 3d avatars with fast style adaptation. arXiv preprint arXiv:2305.19245, 2023. 2, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 514, + 287, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 287, + 568 + ], + "type": "text", + "content": "[36] Thu H Nguyen-Phuoc, Christian Richardt, Long Mai, Yongliang Yang, and Niloy Mitra. Blockgan: Learning 3d object-aware scene representations from unlabelled images. Advances in Neural Information Processing Systems, 33:6767-6778, 2020. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 570, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 570, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 570, + 287, + 624 + ], + "type": "text", + "content": "[37] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5589-5599, 2021. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 625, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 287, + 689 + ], + "type": "text", + "content": "[38] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13503–13513, 2022. 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "text", + "content": "[39] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning con" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 714 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "text", + "content": "tinuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 165-174, 2019. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 107, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 161 + ], + "type": "text", + "content": "[40] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 2, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 162, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 162, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 162, + 545, + 194 + ], + "type": "text", + "content": "[41] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988, 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 195, + 545, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 195, + 545, + 259 + ], + "spans": [ + { + "bbox": [ + 307, + 195, + 545, + 259 + ], + "type": "text", + "content": "[42] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 261, + 545, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 261, + 545, + 304 + ], + "spans": [ + { + "bbox": [ + 307, + 261, + 545, + 304 + ], + "type": "text", + "content": "[43] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. ACM Transactions on graphics (TOG), 42(1):1-13, 2022. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 305, + 545, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 305, + 545, + 360 + ], + "spans": [ + { + "bbox": [ + 307, + 305, + 545, + 360 + ], + "type": "text", + "content": "[44] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 361, + 545, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 361, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 307, + 361, + 545, + 425 + ], + "type": "text", + "content": "[45] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 426, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 426, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 307, + 426, + 545, + 491 + ], + "type": "text", + "content": "[46] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 493, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 493, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 307, + 493, + 545, + 536 + ], + "type": "text", + "content": "[47] Shen Sang, Tiancheng Zhi, Guoxian Song, Minghao Liu, Chunpong Lai, Jing Liu, Xiang Wen, James Davis, and Linjie Luo. AgileAvatar: Stylized 3d avatar creation via cascaded domain bridging. 2022. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 537, + 545, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 537, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 307, + 537, + 545, + 581 + ], + "type": "text", + "content": "[48] Can Wang, Ruixiang Jiang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Nerf-art: Text-driven neural radiance fields stylization. IEEE Transactions on Visualization and Computer Graphics, 2023. 2, 3, 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 582, + 545, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 582, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 307, + 582, + 545, + 624 + ], + "type": "text", + "content": "[49] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689, 2021. 2, 3, 4, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 625, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 625, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 307, + 625, + 545, + 669 + ], + "type": "text", + "content": "[50] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. arXiv preprint arXiv:2305.16213, 2023. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 670, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 545, + 714 + ], + "type": "text", + "content": "[51] Shiyao Xu, Lingzhi Li, Li Shen, Yifang Men, and Zhouhui Lian. Your3demooji: Creating personalized emojis via one-shot 3d-aware cartoon avatar synthesis. In SIGGRAPH Asia 2022 Technical Communications, pages 1-4. 2022. 2" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "10136" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 552 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[52] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Pastiche master: Exemplar-based high-resolution portrait style transfer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7693-7702, 2022. 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 287, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 287, + 172 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 287, + 172 + ], + "type": "text", + "content": "[53] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Vtoonify: Controllable high-resolution portrait video style transfer. arXiv preprint arXiv:2209.11224, 2022. 1, 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 174, + 287, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 174, + 287, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 174, + 287, + 228 + ], + "type": "text", + "content": "[54] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. Advances in Neural Information Processing Systems, 33:2492-2502, 2020. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 229, + 287, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 229, + 287, + 284 + ], + "spans": [ + { + "bbox": [ + 48, + 229, + 287, + 284 + ], + "type": "text", + "content": "[55] Chi Zhang, Yiwen Chen, Yijun Fu, Zhenglin Zhou, Gang Yu, Billzb Wang, Bin Fu, Tao Chen, Guosheng Lin, and Chunhua Shen. StyleAvatar3d: Leveraging image-text diffusion models for high-fidelity 3d avatar generation. arXiv preprint arXiv:2305.19012, 2023. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 285, + 287, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 285, + 287, + 339 + ], + "spans": [ + { + "bbox": [ + 48, + 285, + 287, + 339 + ], + "type": "text", + "content": "[56] Huichao Zhang, Bowen Chen, Hao Yang, Liao Qu, Xu Wang, Li Chen, Chao Long, Feida Zhu, Kang Du, and Min Zheng. Avatarverse: High-quality & stable 3d avatar creation from text and pose. arXiv preprint arXiv:2308.03610, 2023. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 341, + 287, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 341, + 287, + 406 + ], + "spans": [ + { + "bbox": [ + 48, + 341, + 287, + 406 + ], + "type": "text", + "content": "[57] Junzhe Zhang, Yushi Lan, Shuai Yang, Fangzhou Hong, Quan Wang, Chai Kiat Yeo, Ziwei Liu, and Chen Change Loy. Deformtoon3d: Deformable neural radiance fields for 3d toonification. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9144-9154, 2023. 2, 3, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 407, + 287, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 407, + 287, + 452 + ], + "spans": [ + { + "bbox": [ + 48, + 407, + 287, + 452 + ], + "type": "text", + "content": "[58] Kai Zhang, Nick Kolkin, Sai Bi, Fujun Luan, Zexiang Xu, Eli Shechtman, and Noah Snavely. Arf: Artistic radiance fields. In European Conference on Computer Vision, pages 717-733. Springer, 2022. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 453, + 287, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 453, + 287, + 496 + ], + "spans": [ + { + "bbox": [ + 48, + 453, + 287, + 496 + ], + "type": "text", + "content": "[59] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 498, + 287, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 498, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 48, + 498, + 287, + 552 + ], + "type": "text", + "content": "[60] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 2223-2232, 2017. 2" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10137" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/0c8075a5-3d90-4e50-a4ef-fbf63dd9f1bc_content_list.json b/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/0c8075a5-3d90-4e50-a4ef-fbf63dd9f1bc_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..43545762f966fcb628d031ee1985bef162e50e53 --- /dev/null +++ b/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/0c8075a5-3d90-4e50-a4ef-fbf63dd9f1bc_content_list.json @@ -0,0 +1,1668 @@ +[ + { + "type": "text", + "text": "3DiffTecn: 3D Object Detection with Geometry-Aware Diffusion Features", + "text_level": 1, + "bbox": [ + 101, + 130, + 867, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chenfeng $\\mathrm{Xu}^{1,2}$", + "bbox": [ + 217, + 179, + 344, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ NVIDIA", + "bbox": [ + 145, + 198, + 227, + 214 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ UC Berkeley", + "bbox": [ + 246, + 198, + 359, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Huan Ling $^{1,3,4}$", + "bbox": [ + 364, + 179, + 483, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ Vector Institute", + "bbox": [ + 380, + 198, + 511, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sanja Fidler $^{1,3,4}$", + "bbox": [ + 504, + 179, + 630, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Or Litany1,5", + "bbox": [ + 651, + 180, + 750, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "4University of Toronto", + "bbox": [ + 532, + 198, + 712, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "5Technion", + "bbox": [ + 735, + 198, + 818, + 215 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3b8baaa198a92612716eb6b6b01593a412c4da7837e38b5b97e855001e7ae770.jpg", + "image_caption": [ + "Figure 1. (1) We enhance pre-trained diffusion features with 3D awareness by training a geometric ControlNet (Sec. 3.2). (2) We employ a semantic ControlNet (Sec. 3.3) to refine generative features for targeted data and downstream tasks, specifically focusing on enhancing features for 3D object detection. (3) During the inference process, we further enhance 3D detection accuracy by assembling the bounding box predictions from virtual views (Sec. 3.4)." + ], + "image_footnote": [], + "bbox": [ + 89, + 243, + 282, + 438 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8cf0024d69b12af854c82a7d5572215d681f5e6617d0afb848bc2e9afca39e76.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 243, + 558, + 438 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/29d0352b60e36b7e83fb0c699e08c9a37f45a4c6a3d5db07c4cb87a67a224880.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 558, + 243, + 687, + 438 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/dd73e635593482f0cf55158f68afd9f3f7fabb6558f3a47f1583060f82e20e9f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 243, + 880, + 436 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 518, + 313, + 534 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3DiffTecnion introduces a novel method for 3D object detection from single images, utilizing a 3D-aware diffusion model for feature extraction. Addressing the resource-intensive nature of annotating large-scale 3D image data, our approach leverages pretrained diffusion models, traditionally used for 2D tasks, and adapts them for 3D detection through geometric and semantic tuning. Geometrically, we enhance the model to perform view synthesis from single images, incorporating an epipolar warp operator. This process utilizes easily accessible posed image data, eliminating the need for manual annotation. Semantically, the model is further refined on target detection data. Both stages utilize ControlNet, ensuring the preservation of original feature capabilities. Through our methodology, we obtain 3D-aware features that excel in identifying cross-view point correspondences. In 3D detection, 3DiffTecnion substantially surpasses previous benchmarks, e.g., Cube-RCNN, by $9.43\\%$ in AP3D on the Omni3D-ARkitscene dataset. Furthermore, 3DiffTecnion demonstrates robust label efficiency and generalizes well to cross-domain data, nearly matching fully-supervised models in zero-shot scenarios. Project page: https://research.nvidia.com/labs/toronto-ai/3difftection/.", + "bbox": [ + 75, + 536, + 473, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 501, + 518, + 630, + 534 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Detecting objects in 3D from a single image presents a significant challenge in computer vision, involving not only object recognition and localization but also depth and orientation prediction. This task, crucial for applications in robotics and augmented reality, demands advanced 3D reasoning from computational models.", + "bbox": [ + 496, + 547, + 890, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Training a 3D detector from scratch is resource-intensive due to the high labeling costs [5]. Recently, large self-supervised models have emerged as compelling learners for image representation [10, 16, 17]. They acquire robust semantic features that can be fine-tuned on smaller, annotated datasets. Image diffusion models, trained on internet-scale data, have proven to be particularly effective in this context [24, 46, 56]. However, these models often lack 3D awareness and exhibit a domain gap in 3D applications. Recent work has aimed to bridge this gap by lifting 2D image features to 3D and refining them for specific 3D tasks. NeRF-Det [54] trained a view synthesis model alongside a detection head using pretrained image feature extractors. However, this approach is constrained by the need for dense scene views and fully annotated data. Efforts in novel view synthesis using diffusion models have shown promise [7, 58]. Yet, these models are generally", + "bbox": [ + 496, + 643, + 892, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "10617", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "trained from scratch, thereby foregoing the advantages of using pretrained semantic features.", + "bbox": [ + 76, + 90, + 468, + 119 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To overcome these limitations, our work, 3DiffTecnion, introduces a novel framework that repurposes pretrained 2D diffusion models for 3D object detection (see overview Fig. 1). We enhance these models with 3D awareness through a view synthesis task, employing epipolar geometry to warp features from source images to target views. This process utilizes ControlNet [57] to maintain the integrity of the original features (See Fig. 3). Utilizing image pairs from videos, which are abundant and do not require manual annotation, our approach is scalable and efficient. To demonstrate that our approach successfully imparts 3D awareness to the model, we assess the performance of its features in establishing point correspondences across multiple views. Our results indicate that these features outperform those of the base model, both qualitatively and quantitatively. For 3D detection, 3DiffTecnion trains a standard detection head with 3D box supervision, incorporating a second ControlNet to adapt the features to specific detection tasks and domains, preserving feature quality and view synthesis capabilities. At test time, we capitalize on both geometric and semantic capabilities by generating detection proposals from multiple virtual synthesized views, which are then consolidated through Non-Maximum Suppression (NMS).", + "bbox": [ + 76, + 121, + 468, + 467 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our primary contributions are as follows: (1) We introduce a scalable technique for enhancing pretrained 2D diffusion models with 3D awareness through a novel geometric ControlNet, enhanced with an epipolar warp operator; (2) We adapt these features to a 3D detection task and target domain by introducing a second, semantic ControlNet; and (3) We integrate both view synthesis and 3D detection capabilities to further improve detection performance through ensemble prediction.", + "bbox": [ + 76, + 468, + 468, + 603 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3DiffTraction emerges as a powerful 3D detector, substantially surpassing previous benchmarks, e.g., CubERCNN, by $9.43\\%$ in AP3D on the Omni3D-ARkitscene dataset. Furthermore, 3DiffTraction demonstrates robust label efficiency, achieving a 2.28 AP3D-N improvement over previous methods trained with full supervision while using only $50\\%$ of the labels. 3DiffTraction also exhibits the ability to generalize to cross-domain data, nearly matching the performance of previously established fully-supervised models without any tuning (zero-shot).", + "bbox": [ + 76, + 604, + 468, + 755 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related works", + "text_level": 1, + "bbox": [ + 76, + 768, + 220, + 784 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D Object Detection from Images. 3D object detection from posed images is widely explored [26, 32, 37, 51, 54]. However, assuming given camera extrinsic is not a common scenario, especially in applications such as AR/VR and mobile devices. The task of 3D detection from single images, relying solely on camera intrinsics, presents a more generalized yet significantly more challenging problem. The", + "bbox": [ + 76, + 794, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "model is required to inherently learn 3D structures and harness semantic knowledge. While representative methods [8, 21, 23, 31, 47, 50] endeavor to enforce 3D detectors to learn 3D cues from diverse geometric constraints, the dearth of semantics stemming from the limited availability of 3D datasets still impede the generalizability of 3D detectors. Brazil et al. [5], in an effort to address this issue, embarked on enhancing the dataset landscape by introducing Omni3D dataset. Rather than focusing on advancing generalizable 3D detection by increasing annotated 3D data, we propose a new paradigm, of enhancing semantic-aware diffusion features with 3D awareness.", + "bbox": [ + 496, + 90, + 890, + 271 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Diffusion Models for 2D Perception. Trained diffusion models [30, 34, 36, 39] have been shown to have internal representations suitable for dense perception tasks, particularly in the realm of image segmentation [6, 14, 45, 56]. These models demonstrate impressive label efficiency [2]. Similarly, we observe strong base performance in both 2D and 3D detection (see Tab. 3); our method also benefits from high label efficiency. Diffusion models have further been trained to perform 2D segmentation tasks [11, 22, 53]. In [1] the model is trained to output a segmentation map using an auxiliary network that outputs residual features. Similarly, we use a ControlNet to refine the diffusion model features to endow them with 3D awareness. We note that several works utilize multiple generations to achieve a more robust prediction [1], we go a step further by using our controllable view generation to ensemble predictions from multiple views. Few works have studied tasks other than segmentation. DreamTeacher [24] proposed to distil the diffusion features to an image backbone and demonstrated excellent performance when tuned on perception tasks[24]. [40] trained a diffusion model for dense depth prediction from a single image. Recently, DiffusionDet [9] proposed an interesting method for using diffusion models for 2D detection by directly denoising the bounding boxes conditioned on the target image. Diffusion features have been analyzed in [49] showing that different UNet layer activations are correlated with different level of image details. We utilize this property when choosing which UNet layer outputs to warp in our geometric conditioning. Remarkably, [46] have shown strong point correspondence ability with good robustness to view change. Here we demonstrate that our 3D-aware features can further boost this robustness.", + "bbox": [ + 496, + 275, + 890, + 760 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Novel View Synthesis with Diffusion Models Image synthesis has undergone a significant transformation with the advent of 2D diffusion models, as demonstrated by notable works [12, 18, 19, 28, 29, 33, 36, 38, 43, 44]. These models have extended their capabilities to the Novel View Synthesis (NVS) task, where 3DiM [52] and Zero-123 [25] model NVS of objects as a viewpoint-conditioned image-to-image translation task with diffusion models. The models are trained on a synthetic dataset with camera anno", + "bbox": [ + 496, + 763, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "10618", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/203a697340b1db1b2b4e7c605608a27e5d624f072d90d92bfd9da3671df3a767.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 80, + 88, + 232, + 186 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/2343cf8df149d8c9053de2934a7c307f260e1249d1fe1005e34627f6f10cefcc.jpg", + "image_caption": [ + "Reference Image Reference Image" + ], + "image_footnote": [], + "bbox": [ + 80, + 189, + 232, + 287 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/96db09b4bb962605c5b6c6e52e540dac82feb39a144d29bc430f4e58ee316a92.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 250, + 88, + 401, + 186 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/9e7cb19f6ac30e1bd71215a9bd27a43aab56a995fccb05c3e09f38b18c5151a9.jpg", + "image_caption": [ + "DIFT DIFT" + ], + "image_footnote": [], + "bbox": [ + 250, + 189, + 400, + 287 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/92847051b8525fb05a836ff818a925a7dd7df88d7a82e34cbf18439cbc82f8e8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 401, + 88, + 566, + 186 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6788f6c27568e89bceda9e7d76215012c964e942233bd2a4c50026138ba365df.jpg", + "image_caption": [ + "3DiffTection 3DiffTection", + "Figure 2. Visualization of semantic correspondence prediction using different features Given a Red Source Point in the left most reference image, we predict the corresponding points in the images from different camera views on the right (Blue Dot). The ground truth points are marked by Red Stars. Our method, 3DiffTecnion, is able to identify precise correspondences in challenging scenes with repetitive visual patterns." + ], + "image_footnote": [], + "bbox": [ + 401, + 189, + 566, + 287 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/e136d136315cb67d4ccfb9b004c0a59ec59d4aa8d5c41648d529586361a26dbf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 571, + 88, + 723, + 186 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/eb8a0a60bea6219bc3fd531c371317c9e2993ad04d5b3bdd5256b3f300f8e730.jpg", + "image_caption": [ + "DIFT DIFT" + ], + "image_footnote": [], + "bbox": [ + 571, + 188, + 722, + 287 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/587fe0ee74bbadc427d879de26426e7b080f1eca6a37ff13ef9b3cf35b0082a0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 723, + 89, + 888, + 186 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/5693250229cf0bab211bb3f2d459157050970986ddac7c8685e12e088b78700b.jpg", + "image_caption": [ + "3DiffTcction 3DiffTcction" + ], + "image_footnote": [], + "bbox": [ + 723, + 188, + 888, + 287 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tation and demonstrate zero-shot generalization to in-the-wild images. NerfDiff [15] distills the knowledge of a 3D-aware conditional diffusion model into a Nerf. RealFusion [27] uses a diffusion model as a conditional prior with designed prompts. NeuralLift [55] uses language-guided priors to guide the novel view synthesis diffusion model. Most recently, inspired by the idea of video diffusion models [4, 20, 42], MVDream [41] adapts the attention layers to model the cross-view 3D dependency. The most relevant work to our approaches is SparseFusion [58], where authors propose to incorporate geometry priors with epipolar geometries. However, while their model is trained from scratch, in our approach, we use NVS merely as an auxiliary task to enhance the pre-trained diffusion features with 3D awareness and design the architecture for tuning a minimal number of parameters by leveraging a ControlNet.", + "bbox": [ + 75, + 378, + 472, + 619 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. 3DiffTection", + "text_level": 1, + "bbox": [ + 76, + 633, + 205, + 648 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We introduce 3DiffTlection, designed to harness diffusion model features for 3D detection. As depicted in Fig. 1, 3DiffTlection comprises three core components: 1) Instilling 3D awareness into the diffusion features by training a geometric ControlNet for view synthesis. 2) Bridging the domain and task gaps using a semantic ControlNet, which is concurrently trained with a 3D detection head on the target data distribution. 3) Amplifying 3D box predictions through a virtual view ensembling strategy. We further detail each of these steps in the subsequent sections.", + "bbox": [ + 75, + 660, + 468, + 811 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Diffusion Model as a Feature Extractor", + "text_level": 1, + "bbox": [ + 75, + 823, + 413, + 838 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recent works demonstrate that features extracted from text-to-image diffusion models, such as Stable Diffusion [36], capture rich semantics suitable for dense perception tasks, including image segmentation [56] and point correspond", + "bbox": [ + 75, + 840, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "dences [46]. In this work, our interest lies in 3D object detection. However, since Stable Diffusion is trained on 2D image-text pairs—a pre-training paradigm proficient in aligning textual semantics with 2D visual features—it might lack 3D awareness. We aim to explore this by examining point correspondences between views. We hypothesize that features with 3D awareness should demonstrate the capability to identify correspondences that point to the same 3D locations when provided with multi-view images.", + "bbox": [ + 496, + 377, + 890, + 513 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Following [46, 56] we employ a single forward step for feature extraction. However, unlike these works, we only input images without textual captions, given that in real-world scenarios, textual input is typically not provided for object detection. Formally, given an image $\\mathbf{x}$ , we sample a noise image $\\mathbf{x}_t$ at time $t$ , and obtain the diffusion features", + "bbox": [ + 496, + 513, + 892, + 604 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} = \\mathcal {F} \\left(\\mathbf {x} _ {t}; \\Theta\\right), \\mathbf {x} _ {t} = \\sqrt {\\bar {\\alpha} _ {t}} \\mathbf {x} + \\sqrt {1 - \\bar {\\alpha} _ {t}} \\epsilon_ {t}, \\epsilon_ {t} \\sim \\mathbb {N} (0, 1), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 611, + 890, + 642 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{f}$ represents the multi-scale features from the decoder module of UNet $\\mathcal{F}$ (parameterized by $\\Theta$ ), and $\\alpha_{t}$ represents a pre-defined noise schedule, satisfying $\\bar{\\alpha}_{t} = \\prod_{k=1}^{t} \\alpha_{k}$ .", + "bbox": [ + 496, + 643, + 890, + 689 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Interestingly, as illustrated in Fig. 2, the point localization of Stable Diffusion features depends on 2D appearance matching. This can lead to confusion in the presence of repeated visual patterns, indicating a deficiency in 3D spatial understanding. Given this observation, we aim to integrate 3D awareness into the diffusion features.", + "bbox": [ + 496, + 689, + 890, + 777 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Injecting 3D Awareness to Diffusion Features", + "text_level": 1, + "bbox": [ + 500, + 786, + 880, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ControlNet [57] is a powerful tool that allows the addition of conditioning into a pre-trained, static Stable Diffusion (SD) model. It has been demonstrated to support various types of dense input conditioning, such as depth and semantic images. This is achieved through the injection of conditional image features into trainable copies of the", + "bbox": [ + 496, + 809, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "10619", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/2461a98e4bd6698926a2feeb97eae2681dd765aed52ba97cef75be0dfd19cafd.jpg", + "image_caption": [ + "(a) Before" + ], + "image_footnote": [], + "bbox": [ + 138, + 89, + 264, + 333 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3c64bf0bfe55e5513cba759d69e2b2df5bb1c61abe1c205144368f59732522de.jpg", + "image_caption": [ + "(b) After", + "Figure 3. Architecture of Geometric ControlNet. Left: Original Stable Diffusion UNet encoder block. Right: We train novel view image synthesis by adding a geometric ControlNet to the original Stable Diffusion encoder blocks. The geometric ControlNet receives the conditional view image as an additional input. Using the camera pose, we introduce an epipolar warp operator, which warps intermediate features into the target view. With the geometric ControlNet, we significantly improve the 3D awareness of pre-trained diffusion features." + ], + "image_footnote": [], + "bbox": [ + 285, + 89, + 834, + 345 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "original SD blocks. A significant attribute of ControlNet is its ability to resist overfitting to the dataset used for tuning while preserving the original model's performance. As a result, ControlNet is well-suited for enhancing diffusion features with 3D awareness without compromising their 2D semantic quality.", + "bbox": [ + 75, + 431, + 468, + 523 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Formally, we denote one block of UNet $\\mathcal{F}$ as $\\mathcal{F}_s(\\cdot;\\Theta_s)$ parameterized by $\\Theta_s$ . In particular, the original ControlNet block copies each pre-trained Stable Diffusion module $\\mathcal{F}_s(\\cdot;\\Theta_s)$ denoted as $\\mathcal{F}_s'(\\cdot;\\Theta_s')$ , and accompanying with two zero convolutions $\\mathcal{Z}_{s1}$ and $\\mathcal{Z}_{s2}$ , parameterized by $\\Theta_{zs1}$ and $\\Theta_{zs2}$ , respectively. We slightly abuse the notation of $\\mathbf{x} \\in \\mathcal{R}^{H \\times W \\times C}$ as the arbitrary middle features of $\\mathbf{x}_t$ in $\\mathcal{F}$ . Then a ControlNet block with the corresponding frozen Stable Diffusion block is given by", + "bbox": [ + 75, + 523, + 473, + 660 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {y} _ {s} = \\mathcal {F} _ {s} (\\mathbf {x}; \\Theta_ {s}) + \\mathcal {Z} _ {s 2} \\left(\\mathcal {F} _ {s} ^ {\\prime} (\\mathbf {x} + \\mathcal {Z} _ {s 1} (\\mathbf {c}; \\Theta_ {z s 1}); \\Theta_ {s} ^ {\\prime}); \\Theta_ {z s 2}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 669, + 468, + 699 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{c} \\in \\mathcal{R}^{H \\times W \\times C}$ is the condition image feature and $\\mathbf{y}_s \\in \\mathcal{R}^{H \\times W \\times C}$ is the output.", + "bbox": [ + 75, + 699, + 468, + 733 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Epipolar warp operator. We utilize ControlNet to enhance the 3D awareness of diffusion features by training it to perform view synthesis. Specifically, we select pairs of images with known relative camera poses and train the ControlNet conditioned on the source view to produce the output view. Since the features induced by the condition in ControlNet are additive, it is a common practice to ensure alignment between these features and the noisy input features. However, the input for our view synthesis task is, by definition, not aligned with the noisy input of the target", + "bbox": [ + 75, + 750, + 472, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "view. As a solution, we propose to warp the source view features to align with the target using epipolar geometry. We denote the epipolar warp operator as $\\mathcal{G}(\\cdot ,T_n)$ , and our geometric ControlNet is formulated as:", + "bbox": [ + 496, + 433, + 893, + 494 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {y} _ {s} = \\mathcal {F} _ {s} (\\mathbf {x}; \\Theta_ {s}) + \\mathcal {Z} _ {s 2} \\left(\\mathcal {G} \\left(\\mathcal {F} _ {s} ^ {\\prime} (\\mathbf {x} + \\mathcal {Z} _ {s 1} (\\mathbf {c}; \\Theta_ {z s 1}); \\Theta_ {s} ^ {\\prime}), T _ {n}\\right); \\Theta_ {z s 2}\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 496, + 503, + 921, + 535 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Formally, to obtain the target novel-view image at position $(u,v)$ , we assume that relative camera extrinsic from the source view is described by $T_{n} = [[R_{n},0]^{T},[t_{n},1]^{T}]$ and the intrinsic parameters are represented as $K$ . The equation for the epipolar line is:", + "bbox": [ + 496, + 536, + 893, + 611 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nl _ {c} = K ^ {- T} \\left(\\left[ t _ {n} \\right] \\times R _ {n}\\right) K ^ {- 1} [ u, v, 1 ] ^ {T}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 570, + 621, + 890, + 640 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Here, $l_{c}$ denotes the epipolar line associated with the source conditional image. We sample a set of features along the epipolar line, denoted as $\\{\\mathbf{c}(p_i)\\}$ , where the $p_i$ are points on the epipolar line. These features are then aggregated at the target view position $(u,v)$ via a differentiable aggregator function, resulting in the updated features:", + "bbox": [ + 496, + 648, + 893, + 742 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {c} ^ {\\prime} (u, v) = \\text {a g g r e g a t o r} (\\{\\mathbf {c} (p _ {i}) \\}), \\quad p _ {i} \\sim l _ {c}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 550, + 752, + 890, + 770 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The differentiable aggregator can be as straightforward as average/max functions or something more complex like a transformer, as demonstrated in [13, 58], and $\\mathbf{c}^{\\prime}$ is the warped condition image features, i.e., the output of epipolar warp operator $\\mathcal{G}$ . The geometric warping procedure is illustrated in Fig. 3.", + "bbox": [ + 496, + 779, + 893, + 869 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Interestingly, we found it beneficial to avoid warping features across all the UNet decoder blocks. As highlighted by", + "bbox": [ + 496, + 869, + 893, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "10620", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "[48], middle-layer features in Stable Diffusion emphasize high-level semantics, while top stages capture appearance and geometry. Given the shared semantic content in novel-view synthesis, even amidst pixel deviations, we warp features only in the final two stages of Stable-Diffusion. This maintains semantic consistency while accommodating geometric warping shifts. Our geometric ControlNet notably enhances the 3D awareness of diffusion features, evident in the 3DiffTection examples in Fig. 2.", + "bbox": [ + 75, + 90, + 472, + 227 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Bridging the Task and Domain Gap", + "text_level": 1, + "bbox": [ + 76, + 234, + 387, + 251 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We leverage the 3D-enhanced features for 3D detection by training a standard detection head with 3D box supervision. To further verify the efficacy of our approach in adapting diffusion features for 3D tasks, we train a 3D detection head, keeping our fine-tuned features fixed. Notably, we observe a substantial improvement compared to the baseline SD feature. We report details in Tab. 3.", + "bbox": [ + 75, + 251, + 468, + 356 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Nevertheless, we acknowledge two potential gaps. Firstly, our view synthesis tuning is conceptualized as a universal 3D feature augmentation method. Hence, it is designed to work with a vast collection of posed image pairs, which can be inexpensively gathered (e.g., from videos) without the need for costly labeling. Consequently, there might be a domain discrepancy when comparing to target data, which could originate from a smaller, fully annotated dataset. Secondly, since the features aren't specifically finetuned for detection, there is further potential for optimization towards detection, in tandem with the detection head. As before, we aim to retain the robust feature characteristics already achieved and choose to deploy a second ControlNet.", + "bbox": [ + 75, + 357, + 468, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Specifically, we freeze both the original SD and the geometric ControlNet modules. We then introduce another trainable ControlNet, which we refer to as semantic ControlNet. For our model to execute single-image 3D detection, we utilize the input image $x$ in three distinct ways. First, we extract features from it using the pretrained SD, denoted as $\\mathcal{F}(x)$ , through a single SD denoising forward step. Next, we feed it into our geometric ControlNet, represented as $\\mathcal{F}_{geo}(x,T_n)$ , with an identity pose $(T_{n} = [Id,0])$ to obtain our 3D-aware features. Lastly, we introduce it to the semantic ControlNet, denoted by $\\mathcal{F}_{sem}(x)$ , to produce trainable features fine-tuned for detection within the target data distribution. We aggregate all the features and pass them to a standard 3D detection head, represented as $\\mathcal{D}$ [5]. The semantic ControlNet is trained with 3D detection head.", + "bbox": [ + 75, + 553, + 468, + 779 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ny = \\mathcal {D} (\\mathcal {F} (x) + \\mathcal {F} _ {\\text {g e o}} (x, [ I d, 0 ]) + \\mathcal {F} _ {\\text {s e m}} (x)) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 122, + 789, + 468, + 806 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The figure overview is in the supplementary material.", + "bbox": [ + 76, + 815, + 431, + 830 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Ensemble Prediction", + "text_level": 1, + "bbox": [ + 76, + 839, + 272, + 853 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ControlNet is recognized for its ability to retain the capabilities of the pre-tuned model. As a result, our semantically tuned model still possesses view synthesis capabilities", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ties. We exploit this characteristic to introduce a test-time prediction ensembling that further enhances detection performance. Specifically, our box prediction $y$ is dependent on the input view. Although our detection model is trained with this pose set to the identity (i.e., no transformation), at test time, we can incorporate other viewing transformations denoted as $\\xi_{i}$ ,", + "bbox": [ + 496, + 90, + 890, + 196 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ny (\\xi) = \\mathcal {D} \\left(\\mathcal {F} (x) + \\mathcal {F} _ {\\text {g e o}} (x, \\xi) + \\mathcal {F} _ {\\text {s e m}} (x)\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 549, + 207, + 890, + 224 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The final prediction is derived through a non-maximum suppression of individual view predictions:", + "bbox": [ + 496, + 233, + 890, + 263 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ny _ {f i n a l} = N M S \\left(\\left\\{y \\left(\\xi_ {i} \\right\\}\\right). \\right. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 273, + 890, + 290 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We note that our objective isn't to create a novel view at this stage but to enrich the prediction using views that are close to the original pose. The underlying intuition is that the detection and view synthesis capabilities complement each other. Certain objects might be localized more precisely when observed from a slightly altered view.", + "bbox": [ + 496, + 300, + 890, + 390 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 402, + 632, + 420 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we present a comprehensive experimental evaluation of 3DiffTlection and its constituent components. Initially, in Section 4.1, we establish 3DiffTlection as a powerful 3D detection framework, particularly when fine-tuned on a specific target dataset. We then validate its capacity for generalization to new datasets, both with and without tuning of the detection head (Section 4.2). Subsequently, we demonstrate its ability to maintain strong performance with limited labels (Section 4.3). Finally, in Section 4.4, we confirm 3DiffTlection's enhanced 3D awareness by measuring its feature correspondence accuracy. We also validate the importance of each module in our design and conclude with visualizations of our auxiliary view synthesis ability.", + "bbox": [ + 496, + 429, + 890, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets and implementation details For all our experiments, we train the geometric ControlNet on the official ARKitscene datasets [3], which provide around 450K posed low-resolution $(256\\times 256)$ images. We sample around 40K RGB images along with their intrinsics and extrinsic. Note that in the following experiments, the pretrained geometric ControlNet is kept frozen. For training 3D object detection, we use Omni3D-ARkitsscenes as our primary in-domain experiment dataset, and Omni3DSUNRGBD for our cross-dataset experiments. To evaluate the performance, we compute a mean AP3D across all categories in Omni3D-ARkitsscenes and over a range of IoU3D thresholds in [0.05, 0.10,..., 0.50], simply denoted as AP3D. We also report AP3D at IoU 15, 25, and 50 (AP3D@15, AP3D@25 and AP3D@50) as following [5]. We take the publicly available text-to-image LDM [36], Stable Diffusion as the preliminary backbone. Unlike previous diffusion models which require multiple images for training", + "bbox": [ + 496, + 628, + 892, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "10621", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/f09a2fb8e5a5a098cdfba533413546cab473fa9799c2a4acfb36ade4bcc7c5f0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsResolutionNVS Train ViewsDet. Train ViewsAP3D↑AP3D@15↑AP3D@25↑AP3D@50↑
CubeRCNN-DLA256×256-131.7543.1034.6811.07
DreamTchr-Res50256×256-133.2044.5437.1012.35
NeRF-Det-R50256×256≥10≥1033.1346.8136.0313.58
ImVoxelNet256×256-≥1032.0946.7135.6211.94
3DiffTlection256×2562139.2250.5843.1816.40
CubeRCNN-DLA512×512-134.3246.0636.0212.51
DreamTchr-Res50512×512-136.1449.8240.5115.48
3DiffTlection512×5122143.7557.1347.3220.30
CubeRCNN-DLA-Aug512×512-141.7253.0945.4219.26
", + "bbox": [ + 81, + 66, + 890, + 223 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. 3D Object Detection Results on Omni3D-ARKitScenes testing set. 3DiffTraction significantly outperforms baselines, including CubeRCNN-DLA-Aug, which is trained with 6x more supervision data.", + "bbox": [ + 75, + 224, + 892, + 253 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "a novel-view synthesis task, we only take two views, one as the source view and another one as the target view. Moreover, we only consider two views with an overlap of less than $30\\%$ . Regarding novel-view synthesis ensemble, we use pseudo camera rotations, i.e., $\\pm 15$ deg and ensemble the predicted bounding boxes via NMS.", + "bbox": [ + 75, + 266, + 468, + 356 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Methods in comparison. CubeRCNN [5] extends Fast-RCNN [35] to 3D object detection by incorporating a cube head. In our work, we aim to provide a stronger 3D-aware image backbone, and compare it with other image backbones using the Cube-RCNN framework. Specifically, we compare with DreamTeacher [24], which distills knowledge from a Pre-trained Stable Diffusion to a lighter network, ResNet-50. We also compare with DIFT [46], which directly employs the frozen Stable Diffusion as the image feature extractor. Additionally, we evaluate methods designed for multi-view 3D detection, such as NeRF-Det [54] and ImVoxelNet [37]. While these methods typically require more images during training, we use them for single-image 3D object detection during testing.", + "bbox": [ + 75, + 361, + 470, + 571 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. 3D Object Detection on Omni3D-ARKitsscenes", + "text_level": 1, + "bbox": [ + 75, + 582, + 467, + 597 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Tab. 1, we analyze the 3D object detection performance of 3DiffTlection compared to several baseline methods. Notably, 3DiffTlection significantly outperforms CubeRCNN-DLA [5], a prior art in single-view 3D detection on the Omni3D-ARKitScenes dataset, achieving a margin of $7.4\\%$ at a resolution of $256 \\times 256$ and $9.43\\%$ at a resolution of $512 \\times 512$ on the AP3D metric. We further compare our approach to NeRF-Det-R50 [54] and ImVoxelNet [37], both of which utilize multi-view images during training (indicated in Tab. 1 as NVS Train Views and Det. Train Views). In contrast, 3DiffTlection which does not rely on multi-view images for training the detection network and uses only view-pairs for geometric network training, surpasses these methods by $6.09\\%$ and $7.13\\%$ on the AP3D metric, respectively. Additionally, we compare our approach to DreamTeacher-Res50 [24], which distills StableDiffusion feature prediction into a ResNet backbone to make it amenable for perception tasks. 3DiffTlection exceeds DreamTeacher by $6.02\\%$ and $7.61\\%$ at resolutions of $256 \\times 256$ and $512 \\times 512$ , respectively. Lastly, we eval", + "bbox": [ + 73, + 598, + 470, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "uate our model against CubeRCNN-DLA-Aug, which denotes the training of CubeRCNN on the complete Omni3D dataset, comprising 234,000 RGB images with a more robust training recipe. Remarkably, our model outperforms CubeRCNN-DLA-Aug by $2.03\\%$ on AP3D while using nearly 6x less data, demonstrating its data efficiency.", + "bbox": [ + 496, + 266, + 890, + 354 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We also show visualization results in Fig. 4. Compared to CubeRCNN, our proposed 3DiffTecn predicts 3D bounding boxes with better pose, localization and significantly fewer false defecions. As seen in the middle column, our model can even handle severe occlusion cases, i.e., the sofa in the middle image and the sink in the right image.", + "bbox": [ + 496, + 356, + 890, + 446 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Cross-dataset Generalization", + "text_level": 1, + "bbox": [ + 498, + 454, + 759, + 469 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To assess the capability of 3DiffTraction's geometric ControlNet to carry its 3D awareness to other datasets, we employed a 3DiffTraction model with its geometric ControlNet trained on the OMni3D-ARKitscene dataset, and conduct cross-dataset experiments on the Ommi3D-SUNRGBD dataset. We evaluate it with two settings: (1) finetune the parameters on the Omni3D-SUNRBGD dataset and test the performance on Omni3D-SUNRGBD dataset, and (2) train the parameters on the Omni3D-ARKitsscenes dataset and directly test the performance on Omni3D-SUNRGBD dataset in a zero-shot setting. The performance is shown in Tab. 2.", + "bbox": [ + 496, + 477, + 890, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the first setting (shown in the fourth column), as a baseline, we trained the 3D head using DIFT-SD features. 3DiffTecn w/o Semantic-ControlNet and w/ Semantic-ControlNet outperform DIFT-SD by $1.21\\%$ and $5.99\\%$ , respectively. We further compare our approach with CubeR-CNN. To ensure a fair comparison, we take CubeRCNN-DLA trained on Omni3D-ARKitscene datasets and fine-tuned its entire model on the Omni3D-SUNRGBD. Without any training of the geometric ControlNet on the Omni-SUNRGBD, 3DiffTecn (w/o Semantic-ControlNet) with only tuned a 3D head surpasses the fully fine-tuned CubeRCNN-DLA by $0.39\\%$ . Then, we reintegrate the semantic ControlNet and jointly train it with the 3D head. This yields a performance boost of $5.09\\%$ . These results indicate that even without training the geometric ControlNet in the target domain, the semantic ControlNet adeptly adapts features for perception tasks.", + "bbox": [ + 496, + 643, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "10622", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/13c06a72aa3f828faddcd127c5c83c5b8f25e94f6763ddf57d9a25f9330c20d8.jpg", + "image_caption": [ + "Figure 4. Qualitative results on Omni3D-ARKitScene 3D Detection. In contrast to Cube-RCNN (bottom), our approach (top) accurately predicts both the box class and 3D locations. The bird's-eye-view visualization further demonstrates that our predictions surpass the baseline performance of Cube-RCNN." + ], + "image_footnote": [], + "bbox": [ + 89, + 80, + 883, + 344 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/f37e72565568a189bfca093356410971598e1df2243858b22021a16fe4b91544.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsBackbonePretrained on ARKitTuned on SUNRGBDZero-shot(w/o 2D GT)Zero-shot(w/ 2D GT)
DIFT-SDStableDiffX21.9216.7425.31
CubeRCNNDLA3422.7216.8125.05
3DiffTecnStableDiff+Geo-Ctr23.1117.3726.94
3DiffTecnStableDiff+Geo-Ctr+Sem-Ctr27.8122.6430.14
", + "bbox": [ + 78, + 388, + 890, + 460 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Cross-domain experiment on the Omni3D-SUNRGBD dataset. The \"Pre-trained on ARKit\" denotes we pre-train the backbone on Omni3D-ARkitsscenes. For CubeCNN, we pre-train it with 3D detection supervision. For all zero-shot experiments, the methods are first trained on Omni3D-ARKitscenes for 3D detection and then directly tested on Omni3D-SUNRGBD dataset. \"2D GT\" means we use ground-truth 2D bounding box to crop ROI image features. The results are reported for overlapped 14 classes between Omni3D-SUNRGBD and Omni3D-ARKiSscenes dataset.", + "bbox": [ + 75, + 463, + 893, + 532 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To further demonstrate the transferrability of 3DiffTec-tion, we train the models for 3D detection on Omni3D-ARkitsscenes and directly test it on Omni3D-SUNRGBD dataset without any further tuning. The results are shown in Column 3 and column 4 of Tab. 2. We observe that if we have ground truth 2D bounding boxes, 3DiffTec-tion with semantic-ControlNet can even achieve the best performance. Without ground truth 2D bounding boxes, 3DiffTec-tion is also able to outperform DIFT-SD and CubeR-CNN by $5.90\\%$ and $5.83\\%$ , respectively. These results demonstrate the notable transferrability of our 3DiffTec-tion.", + "bbox": [ + 75, + 544, + 472, + 710 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Label Efficiency", + "text_level": 1, + "bbox": [ + 76, + 724, + 238, + 742 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We hypothesize that our usage of semantic ControlNet for tuning 3DiffTlection towards a target dataset should maintain high label efficiency. We test this by using $50\\%$ and $10\\%$ labels from the Omni3D-ARKitscene datasets. The results are shown in Tab. ?? of supplementary materials. In low-data regime (for both $50\\%$ and $10\\%$ label setting), 3DiffTlection demonstrates significantly better performance, and more modest degradation than baselines. Notably, even with $50\\%$ of the labels, our proposed 3DiffTlection achieves 2.28 AP3D-N improvement over previous methods trained", + "bbox": [ + 75, + 750, + 472, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "on $100\\%$ label. Additionally, when tuning only the 3D head 3DiffTraction performs better than CubeRCNN and DreamTeacher with tuning all parameters.", + "bbox": [ + 496, + 544, + 893, + 592 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Analysis and Ablation", + "text_level": 1, + "bbox": [ + 498, + 603, + 705, + 619 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Feature correspondence fidelity (Fig. 2). As described in 3.1, we conducted a feature correspondence experiment. We hypothesize that if our model is 3D aware, it should be find 3D correspondences. As can be seen, our method yields a more accurate point-matching result, primarily because our geometric ControlNet is trained to infer 3D correspondences through our Epipolar warp operator to successfully generate novel views. To provide further insights, we visualize a heatmap demonstrating the similarity of target image features to the reference key points. Notably, our 3DiffTec-tion features exhibit better concentration around the target point. Furthermore, we quantitatively evaluate the correspondence performance on ScanNet dataset, which is never accessed by both our 3DiffTec-tion and DIFT for fair comparison. The experiment results are shown in supplementary material. The results also demonstrate our hypothesis. Novel-view synthesis visualization (Fig. 5). To further validate our geometric ControlNet ability to maintain geo", + "bbox": [ + 496, + 627, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "10623", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/9f83f5b0a79734c7981cf839405b18e42c276bef80e578550f625c07bcf6df81.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BackboneNVS Train ViewsGeo-CtrSem-CtrNV-EnsembleAP2DAP3D↑AP3D@15↑AP3D@25↑AP3D@50↑
VIT-B (MAE)----26.1425.2336.0428.648.11
Res50 (DreamTchr)----25.2724.3634.1625.977.93
StableDiff. (DIFT)----29.3528.8640.1832.078.86
StableDiff. (Ours)1--29.5126.0535.8129.866.95
StableDiff. (Ours)2--30.1631.2041.8733.5310.14
StableDiff. (Ours)2-37.1238.7250.3842.8816.18
StableDiff. (Ours)237.1939.2250.5843.1816.40
", + "bbox": [ + 81, + 88, + 890, + 203 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3. Analysis of 3DiffTraction Modules on Omni3D-ARKitScenes testing set. We first compare different backbones by freezing the backbone and only training the 3D detection head. Then, we perform ablative studies on each module of our architecture systematically. Starting with the baseline vanilla stable diffusion model, we incrementally incorporate improvements: Geometry-ControlNet (Geo-Ctr), the number of novel view synthesis training views (NVS Train Views), Semantic-ControlNet (Sem-Ctr), and the novel view synthesis ensemble (NV-Ensemble).", + "bbox": [ + 75, + 205, + 892, + 273 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/417ef0d839acf2219053d702644a9ac19cae7a319ee9975164eccd5cc4b20fcb.jpg", + "image_caption": [ + "Condition Image" + ], + "image_footnote": [], + "bbox": [ + 86, + 282, + 210, + 378 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/81b9c178890e7d0553a954e3296adfc78c7ef4b709f53e323f0abd3689820c72.jpg", + "image_caption": [ + "Generated Image", + "Figure 5. Novel-view synthesis visualization on Omni3D-ARKitScenes testing set. Our model with Geometry-ControlNet synthesizes realistic novel views from a single input image." + ], + "image_footnote": [], + "bbox": [ + 222, + 282, + 346, + 378 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6abb8442114644d6cab971414207c15ea3edd1dff62051d6c35abec7b01ce5e6.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 357, + 282, + 480, + 378 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/001f4ad4fe0b28a9e436ae73b5fc674027538205da10fa7997e49a38690c9fd1.jpg", + "image_caption": [ + "Condition Image" + ], + "image_footnote": [], + "bbox": [ + 490, + 282, + 616, + 378 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/261d30b2ce440669a5438b36dd9bd408060bc87a94fd82d846f51a8a12a5eede.jpg", + "image_caption": [ + "Generated Image" + ], + "image_footnote": [], + "bbox": [ + 625, + 282, + 750, + 378 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9991777c9e65312f6265bae4da6b6e6d134a1c580f45b177dc374710e515af60.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 759, + 282, + 883, + 378 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "metric consistency of the source view content, we visualize novel-view synthesis results. The results demonstrate that our proposed epipolar warp operator is effective in synthesizing the scene with accurate geometry and layout compared to the ground truth images. We note that scene-level NVS from a single image is a challenging task, and we observe that our model may introduce artifacts. While enhancing performance is an interesting future work, here we utilize NVS as an auxiliary task which is demonstrated to effectively enhance our model's 3D awareness.", + "bbox": [ + 75, + 435, + 468, + 585 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3DiffTraction modules. We analyze the unique modules and design choices in 3DiffTraction: the Stable Diffusion backbone, geometric and semantic ControlNets targeting NVs and detection, and the multi-view prediction ensemble. All results are reported using the Omni3D-ARKitscenes in Tab. 3. We first validate our choice of using a Stable Diffusion backbone. While diffusion features excel in 2D segmentation tasks [24, 56], they have not been tested in 3D detection. We analyze this choice independently from the other improvements by keeping the backbone frozen and only training the 3D detection head. The vanilla Stable Diffusion features achieve a $28.86\\%$ AP3D, exceeding CubeRCNN-VIT-B (MAE pretrained) by $3.63\\%$ and ResNet-50 DreamTeacher by $4.5\\%$ in AP30. This performance is mirrored in AP2D results, affirming Stable Diffusion's suitability for perception tasks. Our geometric ControlNet, is aimed at instilling 3D awareness via NVS training. A performance boost of $2.34\\%$ on AP3D and $0.81\\%$ on AP2D indicates that the geometric ControlNet imparts 3D awareness knowledge while preserving its 2D knowl", + "bbox": [ + 75, + 598, + 470, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "edge. To ensure our improvement is attributed to our view synthesis training, we limited the geometric ControlNet to single-view data by setting the source and target views to be identical (denoted by '1' in the NVs train view column of Tab. 3), which reduces the training to be denoising training [6]. This indicate a $2.81\\%$ decrease in AP3D compared to the standard Stable Diffusion, affirming our hypothesis. Further, the semantic ControlNet, co-trained with the 3D detection head enhances both AP2D and AP3D by around $7\\%$ confirming its efficacy in adapting the feature for optimal use by the detection head. Lastly, using NVS-ensemble results in additional $0.5\\%$ increase in AP3D.", + "bbox": [ + 496, + 435, + 890, + 614 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion and Limitations", + "text_level": 1, + "bbox": [ + 500, + 632, + 756, + 648 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3DiffTraction, utilizing a 3D-aware diffusion model, enables efficient 3D detection from single images, overcoming large-scale data annotation challenges. With its geometric and semantic tuning strategies, it surpasses previous benchmarks, showing high label efficiency and cross-domain adaptability. 3DiffTraction has limitations, including the need for image pairs with accurate camera poses and challenges in handling dynamic objects from in-the-wild videos. Additionally, its use of the Stable Diffusion architecture demands substantial memory and runtime, achieving about 7.5 fps on a 3090Ti GPU. Suitable for offline tasks, it requires further optimization for online detection.", + "bbox": [ + 496, + 657, + 890, + 839 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. Or Litany is a Taub fellow and is supported by the Azrieli Foundation Early Career Faculty Fellowship. We thank Qianqian Wang, David Acuna, and Jonah Philion for the insightful discussion.", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "10624", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Tomer Amit, Tal Shaharbany, Eliya Nachmani, and Lior Wolf. Segdiff: Image segmentation with diffusion probabilistic models. arXiv preprint arXiv:2112.00390, 2021. 2", + "[2] Dmitry Baranchuk, Andrey Voynov, Ivan Rubachev, Valentin Khrulkov, and Artem Babenko. Label-efficient semantic segmentation with diffusion models. In International Conference on Learning Representations, 2022. 2", + "[3] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Tal Dimry, Yuri Feigin, Peter Fu, Thomas Gebauer, Brandon Joffe, Daniel Kurz, Arik Schwartz, and Elad Shulman. ARK-scenes - a diverse real-world dataset for 3d indoor scene understanding using mobile RGB-d data. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. 5", + "[4] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3", + "[5] Garrick Brazil, Abhinav Kumar, Julian Straub, Nikhila Ravi, Justin Johnson, and Georgia Gkioxari. Omni3d: A large benchmark and model for 3d object detection in the wild, 2023. 1, 2, 5, 6", + "[6] Emmanuel Asiedu Brempong, Simon Kornblith, Ting Chen, Niki Parmar, Matthias Minderer, and Mohammad Norouzi. Denoising pretraining for semantic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4175-4186, 2022. 2, 8", + "[7] Eric R. Chan, Koki Nagano, Matthew A. Chan, Alexander W. Bergman, Jeong Joon Park, Axel Levy, Miika Aittala, Shalini De Mello, Tero Karras, and Gordon Wetzstein. Generative novel view synthesis with 3d-aware diffusion models, 2023. 1", + "[8] Hansheng Chen, Yuyao Huang, Wei Tian, Zhong Gao, and Lu Xiong. Monorun: Monocular 3d object detection by reconstruction and uncertainty propagation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10379-10388, 2021. 2", + "[9] Shoufa Chen, Peize Sun, Yibing Song, and Ping Luo. Diffusional: Diffusion model for object detection. ICCV, 2023. 2", + "[10] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International conference on machine learning, pages 1597-1607. PMLR, 2020. 1", + "[11] Ting Chen, Lala Li, Saurabh Saxena, Geoffrey Hinton, and David J Fleet. A generalist framework for panoptic segmentation of images and videos. arXiv preprint arXiv:2210.06366, 2022. 2", + "[12] Prafulla Dhariwal and Alexander Quinn Nichol. Diffusion models beat GANs on image synthesis. In Advances in Neural Information Processing Systems, 2021. 2", + "[13] Yilun Du, Cameron Smith, Ayush Tewari, and Vincent Sitzmann. Learning to render novel views from wide-baseline stereo pairs, 2023. 4" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Alexandros Graikos, Nikolay Malkin, Nebojsa Jojic, and Dimitris Samaras. Diffusion models as plug-and-play priors. Advances in Neural Information Processing Systems, 35:14715-14728, 2022. 2", + "[15] Jiatao Gu, Alex Trevithick, Kai-En Lin, Josh Susskind, Christian Theobalt, Lingjie Liu, and Ravi Ramamoorthi. Nerfdiff: Single-image view synthesis with nef-guided distillation from 3d-aware diffusion. In International Conference on Machine Learning, 2023. 3", + "[16] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 1", + "[17] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners, 2021. 1", + "[18] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, 2020. 2", + "[19] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. arXiv preprint arXiv:2106.15282, 2021. 2", + "[20] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P. Kingma, Ben Poole, Mohammad Norouzi, David J. Fleet, and Tim Salimans. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 3", + "[21] Siyuan Huang, Siyuan Qi, Yinxue Xiao, Yixin Zhu, Ying Nian Wu, and Song-Chun Zhu. Cooperative holistic scene understanding: Unifying 3d object, layout, and camera pose estimation. In Advances in Neural Information Processing Systems, pages 206-217, 2018. 2", + "[22] Boah Kim, Yujin Oh, and Jong Chul Ye. Diffusion adversarial representation learning for self-supervised vessel segmentation. In The Eleventh International Conference on Learning Representations, 2023. 2", + "[23] Nilesh Kulkarni, Ishan Misra, Shubham Tulsiani, and Abhinav Gupta. 3d-relnet: Joint object and relational network for 3d prediction. 2019. 2", + "[24] Daiqing Li, Huan Ling, Amlan Kar, David Acuna, Seung Wook Kim, Karsten Kreis, Antonio Torralba, and Sanja Fidler. Dreamteacher: Pretraining image backbones with deep generative models, 2023. 1, 2, 6, 8", + "[25] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object, 2023. 2", + "[26] Yingfei Liu, Tiancai Wang, Xiangyu Zhang, and Jian Sun. Petr: Position embedding transformation for multi-view 3d object detection. In European Conference on Computer Vision, pages 531-548. Springer, 2022. 2", + "[27] Luke Melas-Kyriazi, Christian Rupprecht, Iro Laina, and Andrea Vedaldi. Realfusion: 360 reconstruction of any object from a single image. In CVPR, 2023. 3", + "[28] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "10625", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 2", + "[29] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In International Conference on Machine Learning, 2021. 2", + "[30] Alexander Quinn Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models. In Proceedings of the 39th International Conference on Machine Learning, pages 16784-16804. PMLR, 2022. 2", + "[31] Yinyu Nie, Xiaoguang Han, Shihui Guo, Yujuan Zheng, Jian Chang, and Jian Jun Zhang. Total3dunderstanding: Joint layout, object pose and mesh reconstruction for indoor scenes from a single image. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[32] Jinhyung Park, Chenfeng Xu, Shijia Yang, Kurt Keutzer, Kris M. Kitani, Masayoshi Tomizuka, and Wei Zhan. Time will tell: New outlooks and a baseline for temporal multiview 3d object detection. In The Eleventh International Conference on Learning Representations, 2023. 2", + "[33] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 2", + "[34] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 2", + "[35] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2015. 6", + "[36] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2, 3, 5", + "[37] Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Imvoxelnet: Image to voxels projection for monocular and multi-view general-purpose 3d object detection. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2397-2406, 2022. 2, 6", + "[38] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S. Sara Mahdavi, Rapha Gontijo Lopes, Tim Salimans, Jonathan Ho, David J Fleet, and Mohammad Norouzi. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022. 2", + "[39] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 2" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[40] Saurabh Saxena, Abhishek Kar, Mohammad Norouzi, and David J. Fleet. Monocular depth estimation using diffusion models, 2023. 2", + "[41] Yichun Shi, Peng Wang, Jianglong Ye, Long Mai, Kejie Li, and Xiao Yang. Mvdream: Multi-view diffusion for 3d generation. arXiv:2308.16512, 2023. 3", + "[42] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv:2209.14792, 2022.3", + "[43] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, 2015. 2", + "[44] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In International Conference on Learning Representations, 2021. 2", + "[45] Weimin Tan, Siyuan Chen, and Bo Yan. Diffss: Diffusion model for few-shot semantic segmentation. arXiv preprint arXiv:2307.00773, 2023. 2", + "[46] Luming Tang, Menglin Jia, Qianqian Wang, Cheng Perng Phoo, and Bharath Hariharan. Emergent correspondence from image diffusion, 2023. 1, 2, 3, 6", + "[47] Shubham Tulsiani, Saurabh Gupta, David Fouhey, Alexei A. Efros, and Jitendra Malik. Factoring shape, pose, and layout from the 2d image of a 3d scene. In Computer Vision and Pattern Recognition (CVPR), 2018. 2", + "[48] Narek Tumanyan, Michal Geyer, Shai Bagon, and Tali Dekel. Plug-and-play diffusion features for text-driven image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1921-1930, 2023. 5", + "[49] Narek Tumanyan, Michal Geyer, Shai Bagon, and Tali Dekel. Plug-and-play diffusion features for text-driven image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1921-1930, 2023. 2", + "[50] Tai Wang, ZHU Xinge, Jiangmiao Pang, and Dahua Lin. Probabilistic and geometric depth: Detecting objects in perspective. In Conference on Robot Learning, pages 1475-1485. PMLR, 2022. 2", + "[51] Yue Wang, Vitor Campagnolo Guizilini, Tianyuan Zhang, Yilun Wang, Hang Zhao, and Justin Solomon. Detr3d: 3d object detection from multi-view images via 3d-to-2d queries. In Conference on Robot Learning, pages 180–191. PMLR, 2022. 2", + "[52] Daniel Watson, William Chan, Ricardo Martin-Brualla, Jonathan Ho, Andrea Tagliasacchi, and Mohammad Norouzi. Novel view synthesis with diffusion models. arXiv preprint arXiv:2210.04628, 2022. 2", + "[53] Julia Wolleb, Robin Sandkuhler, Florentin Bieder, Philippe Valmaggia, and Philippe C Cattin. Diffusion models for implicit image segmentation ensembles. In International Conference on Medical Imaging with Deep Learning, pages 1336-1348. PMLR, 2022. 2" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10626", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[54] Chenfeng Xu, Bichen Wu, Ji Hou, Sam Tsai, Ruilong Li, Jialiang Wang, Wei Zhan, Zijian He, Peter Vajda, Kurt Keutzer, and Masayoshi Tomizuka. Nerf-det: Learning geometry-aware volumetric representation for multi-view 3d object detection, 2023. 1, 2, 6", + "[55] Dejia Xu, Yifan Jiang, Peihao Wang, Zhiwen Fan, Yi Wang, and Zhangyang Wang. Neurallift-360: Lifting an in-the-wild 2d photo to a 3d object with $360^{\\circ}$ views. 2022. 3", + "[56] Jiarui Xu, Sifei Liu, Arash Vahdat, Wonmin Byeon, Xiaolong Wang, and Shalini De Mello. Open-vocabulary panoptic segmentation with text-to-image diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2955-2966, 2023. 1, 2, 3, 8", + "[57] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models, 2023. 2, 3", + "[58] Zhizhuo Zhou and Shubham Tulsiani. Sparsefusion: Distilling view-conditioned diffusion for 3d reconstruction. In CVPR, 2023. 1, 3, 4" + ], + "bbox": [ + 78, + 90, + 468, + 373 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "10627", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/0c8075a5-3d90-4e50-a4ef-fbf63dd9f1bc_model.json b/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/0c8075a5-3d90-4e50-a4ef-fbf63dd9f1bc_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e951d17488c842514395f1a070850fb8ba454605 --- /dev/null +++ b/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/0c8075a5-3d90-4e50-a4ef-fbf63dd9f1bc_model.json @@ -0,0 +1,2356 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.102, + 0.131, + 0.868, + 0.152 + ], + "angle": 0, + "content": "3DiffTecn: 3D Object Detection with Geometry-Aware Diffusion Features" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.18, + 0.346, + 0.199 + ], + "angle": 0, + "content": "Chenfeng \\(\\mathrm{Xu}^{1,2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.147, + 0.199, + 0.228, + 0.215 + ], + "angle": 0, + "content": "\\(^{1}\\)NVIDIA" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.199, + 0.361, + 0.217 + ], + "angle": 0, + "content": "\\(^{2}\\)UC Berkeley" + }, + { + "type": "text", + "bbox": [ + 0.366, + 0.18, + 0.484, + 0.199 + ], + "angle": 0, + "content": "Huan Ling\\(^{1,3,4}\\)" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.199, + 0.513, + 0.217 + ], + "angle": 0, + "content": "\\(^{3}\\)Vector Institute" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.18, + 0.632, + 0.199 + ], + "angle": 0, + "content": "Sanja Fidler\\(^{1,3,4}\\)" + }, + { + "type": "text", + "bbox": [ + 0.653, + 0.181, + 0.751, + 0.199 + ], + "angle": 0, + "content": "Or Litany1,5" + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.199, + 0.714, + 0.217 + ], + "angle": 0, + "content": "4University of Toronto" + }, + { + "type": "text", + "bbox": [ + 0.736, + 0.199, + 0.819, + 0.216 + ], + "angle": 0, + "content": "5Technion" + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.244, + 0.283, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.284, + 0.244, + 0.56, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.56, + 0.244, + 0.688, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.688, + 0.244, + 0.882, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.451, + 0.893, + 0.507 + ], + "angle": 0, + "content": "Figure 1. (1) We enhance pre-trained diffusion features with 3D awareness by training a geometric ControlNet (Sec. 3.2). (2) We employ a semantic ControlNet (Sec. 3.3) to refine generative features for targeted data and downstream tasks, specifically focusing on enhancing features for 3D object detection. (3) During the inference process, we further enhance 3D detection accuracy by assembling the bounding box predictions from virtual views (Sec. 3.4)." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.52, + 0.314, + 0.535 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.537, + 0.474, + 0.884 + ], + "angle": 0, + "content": "3DiffTecnion introduces a novel method for 3D object detection from single images, utilizing a 3D-aware diffusion model for feature extraction. Addressing the resource-intensive nature of annotating large-scale 3D image data, our approach leverages pretrained diffusion models, traditionally used for 2D tasks, and adapts them for 3D detection through geometric and semantic tuning. Geometrically, we enhance the model to perform view synthesis from single images, incorporating an epipolar warp operator. This process utilizes easily accessible posed image data, eliminating the need for manual annotation. Semantically, the model is further refined on target detection data. Both stages utilize ControlNet, ensuring the preservation of original feature capabilities. Through our methodology, we obtain 3D-aware features that excel in identifying cross-view point correspondences. In 3D detection, 3DiffTecnion substantially surpasses previous benchmarks, e.g., Cube-RCNN, by \\(9.43\\%\\) in AP3D on the Omni3D-ARkitscene dataset. Furthermore, 3DiffTecnion demonstrates robust label efficiency and generalizes well to cross-domain data, nearly matching fully-supervised models in zero-shot scenarios. Project page: https://research.nvidia.com/labs/toronto-ai/3difftection/." + }, + { + "type": "title", + "bbox": [ + 0.502, + 0.52, + 0.631, + 0.535 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.549, + 0.892, + 0.64 + ], + "angle": 0, + "content": "Detecting objects in 3D from a single image presents a significant challenge in computer vision, involving not only object recognition and localization but also depth and orientation prediction. This task, crucial for applications in robotics and augmented reality, demands advanced 3D reasoning from computational models." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.893, + 0.903 + ], + "angle": 0, + "content": "Training a 3D detector from scratch is resource-intensive due to the high labeling costs [5]. Recently, large self-supervised models have emerged as compelling learners for image representation [10, 16, 17]. They acquire robust semantic features that can be fine-tuned on smaller, annotated datasets. Image diffusion models, trained on internet-scale data, have proven to be particularly effective in this context [24, 46, 56]. However, these models often lack 3D awareness and exhibit a domain gap in 3D applications. Recent work has aimed to bridge this gap by lifting 2D image features to 3D and refining them for specific 3D tasks. NeRF-Det [54] trained a view synthesis model alongside a detection head using pretrained image feature extractors. However, this approach is constrained by the need for dense scene views and fully annotated data. Efforts in novel view synthesis using diffusion models have shown promise [7, 58]. Yet, these models are generally" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10617" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.121 + ], + "angle": 0, + "content": "trained from scratch, thereby foregoing the advantages of using pretrained semantic features." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.122, + 0.47, + 0.468 + ], + "angle": 0, + "content": "To overcome these limitations, our work, 3DiffTecnion, introduces a novel framework that repurposes pretrained 2D diffusion models for 3D object detection (see overview Fig. 1). We enhance these models with 3D awareness through a view synthesis task, employing epipolar geometry to warp features from source images to target views. This process utilizes ControlNet [57] to maintain the integrity of the original features (See Fig. 3). Utilizing image pairs from videos, which are abundant and do not require manual annotation, our approach is scalable and efficient. To demonstrate that our approach successfully imparts 3D awareness to the model, we assess the performance of its features in establishing point correspondences across multiple views. Our results indicate that these features outperform those of the base model, both qualitatively and quantitatively. For 3D detection, 3DiffTecnion trains a standard detection head with 3D box supervision, incorporating a second ControlNet to adapt the features to specific detection tasks and domains, preserving feature quality and view synthesis capabilities. At test time, we capitalize on both geometric and semantic capabilities by generating detection proposals from multiple virtual synthesized views, which are then consolidated through Non-Maximum Suppression (NMS)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.469, + 0.47, + 0.604 + ], + "angle": 0, + "content": "Our primary contributions are as follows: (1) We introduce a scalable technique for enhancing pretrained 2D diffusion models with 3D awareness through a novel geometric ControlNet, enhanced with an epipolar warp operator; (2) We adapt these features to a 3D detection task and target domain by introducing a second, semantic ControlNet; and (3) We integrate both view synthesis and 3D detection capabilities to further improve detection performance through ensemble prediction." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.605, + 0.47, + 0.756 + ], + "angle": 0, + "content": "3DiffTraction emerges as a powerful 3D detector, substantially surpassing previous benchmarks, e.g., CubERCNN, by \\(9.43\\%\\) in AP3D on the Omni3D-ARkitscene dataset. Furthermore, 3DiffTraction demonstrates robust label efficiency, achieving a 2.28 AP3D-N improvement over previous methods trained with full supervision while using only \\(50\\%\\) of the labels. 3DiffTraction also exhibits the ability to generalize to cross-domain data, nearly matching the performance of previously established fully-supervised models without any tuning (zero-shot)." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.77, + 0.222, + 0.785 + ], + "angle": 0, + "content": "2. Related works" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.795, + 0.468, + 0.901 + ], + "angle": 0, + "content": "3D Object Detection from Images. 3D object detection from posed images is widely explored [26, 32, 37, 51, 54]. However, assuming given camera extrinsic is not a common scenario, especially in applications such as AR/VR and mobile devices. The task of 3D detection from single images, relying solely on camera intrinsics, presents a more generalized yet significantly more challenging problem. The" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.272 + ], + "angle": 0, + "content": "model is required to inherently learn 3D structures and harness semantic knowledge. While representative methods [8, 21, 23, 31, 47, 50] endeavor to enforce 3D detectors to learn 3D cues from diverse geometric constraints, the dearth of semantics stemming from the limited availability of 3D datasets still impede the generalizability of 3D detectors. Brazil et al. [5], in an effort to address this issue, embarked on enhancing the dataset landscape by introducing Omni3D dataset. Rather than focusing on advancing generalizable 3D detection by increasing annotated 3D data, we propose a new paradigm, of enhancing semantic-aware diffusion features with 3D awareness." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.276, + 0.892, + 0.761 + ], + "angle": 0, + "content": "Diffusion Models for 2D Perception. Trained diffusion models [30, 34, 36, 39] have been shown to have internal representations suitable for dense perception tasks, particularly in the realm of image segmentation [6, 14, 45, 56]. These models demonstrate impressive label efficiency [2]. Similarly, we observe strong base performance in both 2D and 3D detection (see Tab. 3); our method also benefits from high label efficiency. Diffusion models have further been trained to perform 2D segmentation tasks [11, 22, 53]. In [1] the model is trained to output a segmentation map using an auxiliary network that outputs residual features. Similarly, we use a ControlNet to refine the diffusion model features to endow them with 3D awareness. We note that several works utilize multiple generations to achieve a more robust prediction [1], we go a step further by using our controllable view generation to ensemble predictions from multiple views. Few works have studied tasks other than segmentation. DreamTeacher [24] proposed to distil the diffusion features to an image backbone and demonstrated excellent performance when tuned on perception tasks[24]. [40] trained a diffusion model for dense depth prediction from a single image. Recently, DiffusionDet [9] proposed an interesting method for using diffusion models for 2D detection by directly denoising the bounding boxes conditioned on the target image. Diffusion features have been analyzed in [49] showing that different UNet layer activations are correlated with different level of image details. We utilize this property when choosing which UNet layer outputs to warp in our geometric conditioning. Remarkably, [46] have shown strong point correspondence ability with good robustness to view change. Here we demonstrate that our 3D-aware features can further boost this robustness." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Novel View Synthesis with Diffusion Models Image synthesis has undergone a significant transformation with the advent of 2D diffusion models, as demonstrated by notable works [12, 18, 19, 28, 29, 33, 36, 38, 43, 44]. These models have extended their capabilities to the Novel View Synthesis (NVS) task, where 3DiM [52] and Zero-123 [25] model NVS of objects as a viewpoint-conditioned image-to-image translation task with diffusion models. The models are trained on a synthetic dataset with camera anno" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "10618" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.233, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.19, + 0.233, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.098, + 0.29, + 0.198, + 0.308 + ], + "angle": 0, + "content": "Reference Image Reference Image" + }, + { + "type": "image", + "bbox": [ + 0.25, + 0.089, + 0.402, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.25, + 0.19, + 0.401, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.296, + 0.29, + 0.331, + 0.307 + ], + "angle": 0, + "content": "DIFT DIFT" + }, + { + "type": "image", + "bbox": [ + 0.402, + 0.089, + 0.568, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.402, + 0.19, + 0.568, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.436, + 0.29, + 0.514, + 0.307 + ], + "angle": 0, + "content": "3DiffTection 3DiffTection" + }, + { + "type": "image", + "bbox": [ + 0.572, + 0.089, + 0.724, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.572, + 0.189, + 0.723, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.633, + 0.29, + 0.667, + 0.306 + ], + "angle": 0, + "content": "DIFT DIFT" + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.09, + 0.89, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.189, + 0.89, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.777, + 0.289, + 0.855, + 0.306 + ], + "angle": 0, + "content": "3DiffTcction 3DiffTcction" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.31, + 0.894, + 0.368 + ], + "angle": 0, + "content": "Figure 2. Visualization of semantic correspondence prediction using different features Given a Red Source Point in the left most reference image, we predict the corresponding points in the images from different camera views on the right (Blue Dot). The ground truth points are marked by Red Stars. Our method, 3DiffTecnion, is able to identify precise correspondences in challenging scenes with repetitive visual patterns." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.379, + 0.473, + 0.621 + ], + "angle": 0, + "content": "tation and demonstrate zero-shot generalization to in-the-wild images. NerfDiff [15] distills the knowledge of a 3D-aware conditional diffusion model into a Nerf. RealFusion [27] uses a diffusion model as a conditional prior with designed prompts. NeuralLift [55] uses language-guided priors to guide the novel view synthesis diffusion model. Most recently, inspired by the idea of video diffusion models [4, 20, 42], MVDream [41] adapts the attention layers to model the cross-view 3D dependency. The most relevant work to our approaches is SparseFusion [58], where authors propose to incorporate geometry priors with epipolar geometries. However, while their model is trained from scratch, in our approach, we use NVS merely as an auxiliary task to enhance the pre-trained diffusion features with 3D awareness and design the architecture for tuning a minimal number of parameters by leveraging a ControlNet." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.635, + 0.206, + 0.65 + ], + "angle": 0, + "content": "3. 3DiffTection" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.661, + 0.47, + 0.813 + ], + "angle": 0, + "content": "We introduce 3DiffTlection, designed to harness diffusion model features for 3D detection. As depicted in Fig. 1, 3DiffTlection comprises three core components: 1) Instilling 3D awareness into the diffusion features by training a geometric ControlNet for view synthesis. 2) Bridging the domain and task gaps using a semantic ControlNet, which is concurrently trained with a 3D detection head on the target data distribution. 3) Amplifying 3D box predictions through a virtual view ensembling strategy. We further detail each of these steps in the subsequent sections." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.824, + 0.415, + 0.839 + ], + "angle": 0, + "content": "3.1. Diffusion Model as a Feature Extractor" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.841, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Recent works demonstrate that features extracted from text-to-image diffusion models, such as Stable Diffusion [36], capture rich semantics suitable for dense perception tasks, including image segmentation [56] and point correspond" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.378, + 0.892, + 0.514 + ], + "angle": 0, + "content": "dences [46]. In this work, our interest lies in 3D object detection. However, since Stable Diffusion is trained on 2D image-text pairs—a pre-training paradigm proficient in aligning textual semantics with 2D visual features—it might lack 3D awareness. We aim to explore this by examining point correspondences between views. We hypothesize that features with 3D awareness should demonstrate the capability to identify correspondences that point to the same 3D locations when provided with multi-view images." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.515, + 0.893, + 0.605 + ], + "angle": 0, + "content": "Following [46, 56] we employ a single forward step for feature extraction. However, unlike these works, we only input images without textual captions, given that in real-world scenarios, textual input is typically not provided for object detection. Formally, given an image \\(\\mathbf{x}\\), we sample a noise image \\(\\mathbf{x}_t\\) at time \\(t\\), and obtain the diffusion features" + }, + { + "type": "equation", + "bbox": [ + 0.511, + 0.612, + 0.892, + 0.643 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} = \\mathcal {F} \\left(\\mathbf {x} _ {t}; \\Theta\\right), \\mathbf {x} _ {t} = \\sqrt {\\bar {\\alpha} _ {t}} \\mathbf {x} + \\sqrt {1 - \\bar {\\alpha} _ {t}} \\epsilon_ {t}, \\epsilon_ {t} \\sim \\mathbb {N} (0, 1), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.69 + ], + "angle": 0, + "content": "where \\(\\mathbf{f}\\) represents the multi-scale features from the decoder module of UNet \\(\\mathcal{F}\\) (parameterized by \\(\\Theta\\)), and \\(\\alpha_{t}\\) represents a pre-defined noise schedule, satisfying \\(\\bar{\\alpha}_{t} = \\prod_{k=1}^{t} \\alpha_{k}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.69, + 0.892, + 0.779 + ], + "angle": 0, + "content": "Interestingly, as illustrated in Fig. 2, the point localization of Stable Diffusion features depends on 2D appearance matching. This can lead to confusion in the presence of repeated visual patterns, indicating a deficiency in 3D spatial understanding. Given this observation, we aim to integrate 3D awareness into the diffusion features." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.787, + 0.882, + 0.803 + ], + "angle": 0, + "content": "3.2. Injecting 3D Awareness to Diffusion Features" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.892, + 0.901 + ], + "angle": 0, + "content": "ControlNet [57] is a powerful tool that allows the addition of conditioning into a pre-trained, static Stable Diffusion (SD) model. It has been demonstrated to support various types of dense input conditioning, such as depth and semantic images. This is achieved through the injection of conditional image features into trainable copies of the" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10619" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.14, + 0.09, + 0.265, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.175, + 0.347, + 0.241, + 0.361 + ], + "angle": 0, + "content": "(a) Before" + }, + { + "type": "image", + "bbox": [ + 0.286, + 0.09, + 0.835, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.559, + 0.347, + 0.614, + 0.361 + ], + "angle": 0, + "content": "(b) After" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.365, + 0.895, + 0.422 + ], + "angle": 0, + "content": "Figure 3. Architecture of Geometric ControlNet. Left: Original Stable Diffusion UNet encoder block. Right: We train novel view image synthesis by adding a geometric ControlNet to the original Stable Diffusion encoder blocks. The geometric ControlNet receives the conditional view image as an additional input. Using the camera pose, we introduce an epipolar warp operator, which warps intermediate features into the target view. With the geometric ControlNet, we significantly improve the 3D awareness of pre-trained diffusion features." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.433, + 0.47, + 0.524 + ], + "angle": 0, + "content": "original SD blocks. A significant attribute of ControlNet is its ability to resist overfitting to the dataset used for tuning while preserving the original model's performance. As a result, ControlNet is well-suited for enhancing diffusion features with 3D awareness without compromising their 2D semantic quality." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.524, + 0.474, + 0.661 + ], + "angle": 0, + "content": "Formally, we denote one block of UNet \\(\\mathcal{F}\\) as \\(\\mathcal{F}_s(\\cdot;\\Theta_s)\\) parameterized by \\(\\Theta_s\\). In particular, the original ControlNet block copies each pre-trained Stable Diffusion module \\(\\mathcal{F}_s(\\cdot;\\Theta_s)\\) denoted as \\(\\mathcal{F}_s'(\\cdot;\\Theta_s')\\), and accompanying with two zero convolutions \\(\\mathcal{Z}_{s1}\\) and \\(\\mathcal{Z}_{s2}\\), parameterized by \\(\\Theta_{zs1}\\) and \\(\\Theta_{zs2}\\), respectively. We slightly abuse the notation of \\(\\mathbf{x} \\in \\mathcal{R}^{H \\times W \\times C}\\) as the arbitrary middle features of \\(\\mathbf{x}_t\\) in \\(\\mathcal{F}\\). Then a ControlNet block with the corresponding frozen Stable Diffusion block is given by" + }, + { + "type": "equation", + "bbox": [ + 0.078, + 0.67, + 0.469, + 0.7 + ], + "angle": 0, + "content": "\\[\n\\mathbf {y} _ {s} = \\mathcal {F} _ {s} (\\mathbf {x}; \\Theta_ {s}) + \\mathcal {Z} _ {s 2} \\left(\\mathcal {F} _ {s} ^ {\\prime} (\\mathbf {x} + \\mathcal {Z} _ {s 1} (\\mathbf {c}; \\Theta_ {z s 1}); \\Theta_ {s} ^ {\\prime}); \\Theta_ {z s 2}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.7, + 0.47, + 0.734 + ], + "angle": 0, + "content": "where \\(\\mathbf{c} \\in \\mathcal{R}^{H \\times W \\times C}\\) is the condition image feature and \\(\\mathbf{y}_s \\in \\mathcal{R}^{H \\times W \\times C}\\) is the output." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.473, + 0.904 + ], + "angle": 0, + "content": "Epipolar warp operator. We utilize ControlNet to enhance the 3D awareness of diffusion features by training it to perform view synthesis. Specifically, we select pairs of images with known relative camera poses and train the ControlNet conditioned on the source view to produce the output view. Since the features induced by the condition in ControlNet are additive, it is a common practice to ensure alignment between these features and the noisy input features. However, the input for our view synthesis task is, by definition, not aligned with the noisy input of the target" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.434, + 0.895, + 0.495 + ], + "angle": 0, + "content": "view. As a solution, we propose to warp the source view features to align with the target using epipolar geometry. We denote the epipolar warp operator as \\(\\mathcal{G}(\\cdot ,T_n)\\), and our geometric ControlNet is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.498, + 0.505, + 0.923, + 0.536 + ], + "angle": 0, + "content": "\\[\n\\mathbf {y} _ {s} = \\mathcal {F} _ {s} (\\mathbf {x}; \\Theta_ {s}) + \\mathcal {Z} _ {s 2} \\left(\\mathcal {G} \\left(\\mathcal {F} _ {s} ^ {\\prime} (\\mathbf {x} + \\mathcal {Z} _ {s 1} (\\mathbf {c}; \\Theta_ {z s 1}); \\Theta_ {s} ^ {\\prime}), T _ {n}\\right); \\Theta_ {z s 2}\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.537, + 0.895, + 0.612 + ], + "angle": 0, + "content": "Formally, to obtain the target novel-view image at position \\((u,v)\\), we assume that relative camera extrinsic from the source view is described by \\(T_{n} = [[R_{n},0]^{T},[t_{n},1]^{T}]\\) and the intrinsic parameters are represented as \\(K\\). The equation for the epipolar line is:" + }, + { + "type": "equation", + "bbox": [ + 0.571, + 0.622, + 0.892, + 0.641 + ], + "angle": 0, + "content": "\\[\nl _ {c} = K ^ {- T} \\left(\\left[ t _ {n} \\right] \\times R _ {n}\\right) K ^ {- 1} [ u, v, 1 ] ^ {T}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.65, + 0.895, + 0.743 + ], + "angle": 0, + "content": "Here, \\( l_{c} \\) denotes the epipolar line associated with the source conditional image. We sample a set of features along the epipolar line, denoted as \\( \\{\\mathbf{c}(p_i)\\} \\), where the \\( p_i \\) are points on the epipolar line. These features are then aggregated at the target view position \\( (u,v) \\) via a differentiable aggregator function, resulting in the updated features:" + }, + { + "type": "equation", + "bbox": [ + 0.552, + 0.753, + 0.892, + 0.771 + ], + "angle": 0, + "content": "\\[\n\\mathbf {c} ^ {\\prime} (u, v) = \\text {a g g r e g a t o r} (\\{\\mathbf {c} (p _ {i}) \\}), \\quad p _ {i} \\sim l _ {c}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.871 + ], + "angle": 0, + "content": "The differentiable aggregator can be as straightforward as average/max functions or something more complex like a transformer, as demonstrated in [13, 58], and \\(\\mathbf{c}^{\\prime}\\) is the warped condition image features, i.e., the output of epipolar warp operator \\(\\mathcal{G}\\). The geometric warping procedure is illustrated in Fig. 3." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Interestingly, we found it beneficial to avoid warping features across all the UNet decoder blocks. As highlighted by" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "10620" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.228 + ], + "angle": 0, + "content": "[48], middle-layer features in Stable Diffusion emphasize high-level semantics, while top stages capture appearance and geometry. Given the shared semantic content in novel-view synthesis, even amidst pixel deviations, we warp features only in the final two stages of Stable-Diffusion. This maintains semantic consistency while accommodating geometric warping shifts. Our geometric ControlNet notably enhances the 3D awareness of diffusion features, evident in the 3DiffTection examples in Fig. 2." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.236, + 0.388, + 0.252 + ], + "angle": 0, + "content": "3.3. Bridging the Task and Domain Gap" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.252, + 0.469, + 0.357 + ], + "angle": 0, + "content": "We leverage the 3D-enhanced features for 3D detection by training a standard detection head with 3D box supervision. To further verify the efficacy of our approach in adapting diffusion features for 3D tasks, we train a 3D detection head, keeping our fine-tuned features fixed. Notably, we observe a substantial improvement compared to the baseline SD feature. We report details in Tab. 3." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.358, + 0.469, + 0.553 + ], + "angle": 0, + "content": "Nevertheless, we acknowledge two potential gaps. Firstly, our view synthesis tuning is conceptualized as a universal 3D feature augmentation method. Hence, it is designed to work with a vast collection of posed image pairs, which can be inexpensively gathered (e.g., from videos) without the need for costly labeling. Consequently, there might be a domain discrepancy when comparing to target data, which could originate from a smaller, fully annotated dataset. Secondly, since the features aren't specifically finetuned for detection, there is further potential for optimization towards detection, in tandem with the detection head. As before, we aim to retain the robust feature characteristics already achieved and choose to deploy a second ControlNet." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.554, + 0.47, + 0.78 + ], + "angle": 0, + "content": "Specifically, we freeze both the original SD and the geometric ControlNet modules. We then introduce another trainable ControlNet, which we refer to as semantic ControlNet. For our model to execute single-image 3D detection, we utilize the input image \\( x \\) in three distinct ways. First, we extract features from it using the pretrained SD, denoted as \\( \\mathcal{F}(x) \\), through a single SD denoising forward step. Next, we feed it into our geometric ControlNet, represented as \\( \\mathcal{F}_{geo}(x,T_n) \\), with an identity pose \\( (T_{n} = [Id,0]) \\) to obtain our 3D-aware features. Lastly, we introduce it to the semantic ControlNet, denoted by \\( \\mathcal{F}_{sem}(x) \\), to produce trainable features fine-tuned for detection within the target data distribution. We aggregate all the features and pass them to a standard 3D detection head, represented as \\( \\mathcal{D} \\) [5]. The semantic ControlNet is trained with 3D detection head." + }, + { + "type": "equation", + "bbox": [ + 0.124, + 0.79, + 0.469, + 0.807 + ], + "angle": 0, + "content": "\\[\ny = \\mathcal {D} (\\mathcal {F} (x) + \\mathcal {F} _ {\\text {g e o}} (x, [ I d, 0 ]) + \\mathcal {F} _ {\\text {s e m}} (x)) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.816, + 0.432, + 0.831 + ], + "angle": 0, + "content": "The figure overview is in the supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.84, + 0.273, + 0.854 + ], + "angle": 0, + "content": "3.4. Ensemble Prediction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.469, + 0.901 + ], + "angle": 0, + "content": "ControlNet is recognized for its ability to retain the capabilities of the pre-tuned model. As a result, our semantically tuned model still possesses view synthesis capabilities" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.198 + ], + "angle": 0, + "content": "ties. We exploit this characteristic to introduce a test-time prediction ensembling that further enhances detection performance. Specifically, our box prediction \\( y \\) is dependent on the input view. Although our detection model is trained with this pose set to the identity (i.e., no transformation), at test time, we can incorporate other viewing transformations denoted as \\( \\xi_{i} \\)," + }, + { + "type": "equation", + "bbox": [ + 0.55, + 0.208, + 0.892, + 0.226 + ], + "angle": 0, + "content": "\\[\ny (\\xi) = \\mathcal {D} \\left(\\mathcal {F} (x) + \\mathcal {F} _ {\\text {g e o}} (x, \\xi) + \\mathcal {F} _ {\\text {s e m}} (x)\\right). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.234, + 0.892, + 0.264 + ], + "angle": 0, + "content": "The final prediction is derived through a non-maximum suppression of individual view predictions:" + }, + { + "type": "equation", + "bbox": [ + 0.609, + 0.274, + 0.892, + 0.291 + ], + "angle": 0, + "content": "\\[\ny _ {f i n a l} = N M S \\left(\\left\\{y \\left(\\xi_ {i} \\right\\}\\right). \\right. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.301, + 0.892, + 0.391 + ], + "angle": 0, + "content": "We note that our objective isn't to create a novel view at this stage but to enrich the prediction using views that are close to the original pose. The underlying intuition is that the detection and view synthesis capabilities complement each other. Certain objects might be localized more precisely when observed from a slightly altered view." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.404, + 0.633, + 0.421 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.43, + 0.892, + 0.626 + ], + "angle": 0, + "content": "In this section, we present a comprehensive experimental evaluation of 3DiffTlection and its constituent components. Initially, in Section 4.1, we establish 3DiffTlection as a powerful 3D detection framework, particularly when fine-tuned on a specific target dataset. We then validate its capacity for generalization to new datasets, both with and without tuning of the detection head (Section 4.2). Subsequently, we demonstrate its ability to maintain strong performance with limited labels (Section 4.3). Finally, in Section 4.4, we confirm 3DiffTlection's enhanced 3D awareness by measuring its feature correspondence accuracy. We also validate the importance of each module in our design and conclude with visualizations of our auxiliary view synthesis ability." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.629, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Datasets and implementation details For all our experiments, we train the geometric ControlNet on the official ARKitscene datasets [3], which provide around 450K posed low-resolution \\((256\\times 256)\\) images. We sample around 40K RGB images along with their intrinsics and extrinsic. Note that in the following experiments, the pretrained geometric ControlNet is kept frozen. For training 3D object detection, we use Omni3D-ARkitsscenes as our primary in-domain experiment dataset, and Omni3DSUNRGBD for our cross-dataset experiments. To evaluate the performance, we compute a mean AP3D across all categories in Omni3D-ARkitsscenes and over a range of IoU3D thresholds in [0.05, 0.10,..., 0.50], simply denoted as AP3D. We also report AP3D at IoU 15, 25, and 50 (AP3D@15, AP3D@25 and AP3D@50) as following [5]. We take the publicly available text-to-image LDM [36], Stable Diffusion as the preliminary backbone. Unlike previous diffusion models which require multiple images for training" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "10621" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.068, + 0.891, + 0.224 + ], + "angle": 0, + "content": "
MethodsResolutionNVS Train ViewsDet. Train ViewsAP3D↑AP3D@15↑AP3D@25↑AP3D@50↑
CubeRCNN-DLA256×256-131.7543.1034.6811.07
DreamTchr-Res50256×256-133.2044.5437.1012.35
NeRF-Det-R50256×256≥10≥1033.1346.8136.0313.58
ImVoxelNet256×256-≥1032.0946.7135.6211.94
3DiffTlection256×2562139.2250.5843.1816.40
CubeRCNN-DLA512×512-134.3246.0636.0212.51
DreamTchr-Res50512×512-136.1449.8240.5115.48
3DiffTlection512×5122143.7557.1347.3220.30
CubeRCNN-DLA-Aug512×512-141.7253.0945.4219.26
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.226, + 0.893, + 0.254 + ], + "angle": 0, + "content": "Table 1. 3D Object Detection Results on Omni3D-ARKitScenes testing set. 3DiffTraction significantly outperforms baselines, including CubeRCNN-DLA-Aug, which is trained with 6x more supervision data." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.267, + 0.47, + 0.357 + ], + "angle": 0, + "content": "a novel-view synthesis task, we only take two views, one as the source view and another one as the target view. Moreover, we only consider two views with an overlap of less than \\(30\\%\\). Regarding novel-view synthesis ensemble, we use pseudo camera rotations, i.e., \\(\\pm 15\\) deg and ensemble the predicted bounding boxes via NMS." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.362, + 0.471, + 0.573 + ], + "angle": 0, + "content": "Methods in comparison. CubeRCNN [5] extends Fast-RCNN [35] to 3D object detection by incorporating a cube head. In our work, we aim to provide a stronger 3D-aware image backbone, and compare it with other image backbones using the Cube-RCNN framework. Specifically, we compare with DreamTeacher [24], which distills knowledge from a Pre-trained Stable Diffusion to a lighter network, ResNet-50. We also compare with DIFT [46], which directly employs the frozen Stable Diffusion as the image feature extractor. Additionally, we evaluate methods designed for multi-view 3D detection, such as NeRF-Det [54] and ImVoxelNet [37]. While these methods typically require more images during training, we use them for single-image 3D object detection during testing." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.583, + 0.468, + 0.598 + ], + "angle": 0, + "content": "4.1. 3D Object Detection on Omni3D-ARKitsscenes" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.599, + 0.471, + 0.902 + ], + "angle": 0, + "content": "In Tab. 1, we analyze the 3D object detection performance of 3DiffTlection compared to several baseline methods. Notably, 3DiffTlection significantly outperforms CubeRCNN-DLA [5], a prior art in single-view 3D detection on the Omni3D-ARKitScenes dataset, achieving a margin of \\(7.4\\%\\) at a resolution of \\(256 \\times 256\\) and \\(9.43\\%\\) at a resolution of \\(512 \\times 512\\) on the AP3D metric. We further compare our approach to NeRF-Det-R50 [54] and ImVoxelNet [37], both of which utilize multi-view images during training (indicated in Tab. 1 as NVS Train Views and Det. Train Views). In contrast, 3DiffTlection which does not rely on multi-view images for training the detection network and uses only view-pairs for geometric network training, surpasses these methods by \\(6.09\\%\\) and \\(7.13\\%\\) on the AP3D metric, respectively. Additionally, we compare our approach to DreamTeacher-Res50 [24], which distills StableDiffusion feature prediction into a ResNet backbone to make it amenable for perception tasks. 3DiffTlection exceeds DreamTeacher by \\(6.02\\%\\) and \\(7.61\\%\\) at resolutions of \\(256 \\times 256\\) and \\(512 \\times 512\\), respectively. Lastly, we eval" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.267, + 0.892, + 0.356 + ], + "angle": 0, + "content": "uate our model against CubeRCNN-DLA-Aug, which denotes the training of CubeRCNN on the complete Omni3D dataset, comprising 234,000 RGB images with a more robust training recipe. Remarkably, our model outperforms CubeRCNN-DLA-Aug by \\(2.03\\%\\) on AP3D while using nearly 6x less data, demonstrating its data efficiency." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.357, + 0.892, + 0.448 + ], + "angle": 0, + "content": "We also show visualization results in Fig. 4. Compared to CubeRCNN, our proposed 3DiffTecn predicts 3D bounding boxes with better pose, localization and significantly fewer false defecions. As seen in the middle column, our model can even handle severe occlusion cases, i.e., the sofa in the middle image and the sink in the right image." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.455, + 0.761, + 0.47 + ], + "angle": 0, + "content": "4.2. Cross-dataset Generalization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.478, + 0.892, + 0.644 + ], + "angle": 0, + "content": "To assess the capability of 3DiffTraction's geometric ControlNet to carry its 3D awareness to other datasets, we employed a 3DiffTraction model with its geometric ControlNet trained on the OMni3D-ARKitscene dataset, and conduct cross-dataset experiments on the Ommi3D-SUNRGBD dataset. We evaluate it with two settings: (1) finetune the parameters on the Omni3D-SUNRBGD dataset and test the performance on Omni3D-SUNRGBD dataset, and (2) train the parameters on the Omni3D-ARKitsscenes dataset and directly test the performance on Omni3D-SUNRGBD dataset in a zero-shot setting. The performance is shown in Tab. 2." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.645, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In the first setting (shown in the fourth column), as a baseline, we trained the 3D head using DIFT-SD features. 3DiffTecn w/o Semantic-ControlNet and w/ Semantic-ControlNet outperform DIFT-SD by \\(1.21\\%\\) and \\(5.99\\%\\), respectively. We further compare our approach with CubeR-CNN. To ensure a fair comparison, we take CubeRCNN-DLA trained on Omni3D-ARKitscene datasets and fine-tuned its entire model on the Omni3D-SUNRGBD. Without any training of the geometric ControlNet on the Omni-SUNRGBD, 3DiffTecn (w/o Semantic-ControlNet) with only tuned a 3D head surpasses the fully fine-tuned CubeRCNN-DLA by \\(0.39\\%\\). Then, we reintegrate the semantic ControlNet and jointly train it with the 3D head. This yields a performance boost of \\(5.09\\%\\). These results indicate that even without training the geometric ControlNet in the target domain, the semantic ControlNet adeptly adapts features for perception tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "10622" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.091, + 0.082, + 0.885, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.347, + 0.895, + 0.388 + ], + "angle": 0, + "content": "Figure 4. Qualitative results on Omni3D-ARKitScene 3D Detection. In contrast to Cube-RCNN (bottom), our approach (top) accurately predicts both the box class and 3D locations. The bird's-eye-view visualization further demonstrates that our predictions surpass the baseline performance of Cube-RCNN." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.389, + 0.891, + 0.462 + ], + "angle": 0, + "content": "
MethodsBackbonePretrained on ARKitTuned on SUNRGBDZero-shot(w/o 2D GT)Zero-shot(w/ 2D GT)
DIFT-SDStableDiffX21.9216.7425.31
CubeRCNNDLA3422.7216.8125.05
3DiffTecnStableDiff+Geo-Ctr23.1117.3726.94
3DiffTecnStableDiff+Geo-Ctr+Sem-Ctr27.8122.6430.14
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.464, + 0.895, + 0.534 + ], + "angle": 0, + "content": "Table 2. Cross-domain experiment on the Omni3D-SUNRGBD dataset. The \"Pre-trained on ARKit\" denotes we pre-train the backbone on Omni3D-ARkitsscenes. For CubeCNN, we pre-train it with 3D detection supervision. For all zero-shot experiments, the methods are first trained on Omni3D-ARKitscenes for 3D detection and then directly tested on Omni3D-SUNRGBD dataset. \"2D GT\" means we use ground-truth 2D bounding box to crop ROI image features. The results are reported for overlapped 14 classes between Omni3D-SUNRGBD and Omni3D-ARKiSscenes dataset." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.545, + 0.473, + 0.712 + ], + "angle": 0, + "content": "To further demonstrate the transferrability of 3DiffTec-tion, we train the models for 3D detection on Omni3D-ARkitsscenes and directly test it on Omni3D-SUNRGBD dataset without any further tuning. The results are shown in Column 3 and column 4 of Tab. 2. We observe that if we have ground truth 2D bounding boxes, 3DiffTec-tion with semantic-ControlNet can even achieve the best performance. Without ground truth 2D bounding boxes, 3DiffTec-tion is also able to outperform DIFT-SD and CubeR-CNN by \\(5.90\\%\\) and \\(5.83\\%\\), respectively. These results demonstrate the notable transferrability of our 3DiffTec-tion." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.725, + 0.24, + 0.743 + ], + "angle": 0, + "content": "4.3. Label Efficiency" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.473, + 0.903 + ], + "angle": 0, + "content": "We hypothesize that our usage of semantic ControlNet for tuning 3DiffTlection towards a target dataset should maintain high label efficiency. We test this by using \\(50\\%\\) and \\(10\\%\\) labels from the Omni3D-ARKitscene datasets. The results are shown in Tab. ?? of supplementary materials. In low-data regime (for both \\(50\\%\\) and \\(10\\%\\) label setting), 3DiffTlection demonstrates significantly better performance, and more modest degradation than baselines. Notably, even with \\(50\\%\\) of the labels, our proposed 3DiffTlection achieves 2.28 AP3D-N improvement over previous methods trained" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.545, + 0.895, + 0.593 + ], + "angle": 0, + "content": "on \\(100\\%\\) label. Additionally, when tuning only the 3D head 3DiffTraction performs better than CubeRCNN and DreamTeacher with tuning all parameters." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.604, + 0.707, + 0.62 + ], + "angle": 0, + "content": "4.4. Analysis and Ablation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.628, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Feature correspondence fidelity (Fig. 2). As described in 3.1, we conducted a feature correspondence experiment. We hypothesize that if our model is 3D aware, it should be find 3D correspondences. As can be seen, our method yields a more accurate point-matching result, primarily because our geometric ControlNet is trained to infer 3D correspondences through our Epipolar warp operator to successfully generate novel views. To provide further insights, we visualize a heatmap demonstrating the similarity of target image features to the reference key points. Notably, our 3DiffTec-tion features exhibit better concentration around the target point. Furthermore, we quantitatively evaluate the correspondence performance on ScanNet dataset, which is never accessed by both our 3DiffTec-tion and DIFT for fair comparison. The experiment results are shown in supplementary material. The results also demonstrate our hypothesis. Novel-view synthesis visualization (Fig. 5). To further validate our geometric ControlNet ability to maintain geo" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10623" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.089, + 0.891, + 0.204 + ], + "angle": 0, + "content": "
BackboneNVS Train ViewsGeo-CtrSem-CtrNV-EnsembleAP2DAP3D↑AP3D@15↑AP3D@25↑AP3D@50↑
VIT-B (MAE)----26.1425.2336.0428.648.11
Res50 (DreamTchr)----25.2724.3634.1625.977.93
StableDiff. (DIFT)----29.3528.8640.1832.078.86
StableDiff. (Ours)1--29.5126.0535.8129.866.95
StableDiff. (Ours)2--30.1631.2041.8733.5310.14
StableDiff. (Ours)2-37.1238.7250.3842.8816.18
StableDiff. (Ours)237.1939.2250.5843.1816.40
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.206, + 0.893, + 0.275 + ], + "angle": 0, + "content": "Table 3. Analysis of 3DiffTraction Modules on Omni3D-ARKitScenes testing set. We first compare different backbones by freezing the backbone and only training the 3D detection head. Then, we perform ablative studies on each module of our architecture systematically. Starting with the baseline vanilla stable diffusion model, we incrementally incorporate improvements: Geometry-ControlNet (Geo-Ctr), the number of novel view synthesis training views (NVS Train Views), Semantic-ControlNet (Sem-Ctr), and the novel view synthesis ensemble (NV-Ensemble)." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.283, + 0.212, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.381, + 0.188, + 0.39 + ], + "angle": 0, + "content": "Condition Image" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.283, + 0.348, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.247, + 0.381, + 0.323, + 0.391 + ], + "angle": 0, + "content": "Generated Image" + }, + { + "type": "image", + "bbox": [ + 0.358, + 0.283, + 0.482, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.411, + 0.381, + 0.428, + 0.39 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.491, + 0.283, + 0.617, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.516, + 0.381, + 0.591, + 0.39 + ], + "angle": 0, + "content": "Condition Image" + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.283, + 0.751, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.651, + 0.381, + 0.726, + 0.39 + ], + "angle": 0, + "content": "Generated Image" + }, + { + "type": "image", + "bbox": [ + 0.76, + 0.283, + 0.885, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.815, + 0.381, + 0.831, + 0.389 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.395, + 0.893, + 0.424 + ], + "angle": 0, + "content": "Figure 5. Novel-view synthesis visualization on Omni3D-ARKitScenes testing set. Our model with Geometry-ControlNet synthesizes realistic novel views from a single input image." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.436, + 0.47, + 0.587 + ], + "angle": 0, + "content": "metric consistency of the source view content, we visualize novel-view synthesis results. The results demonstrate that our proposed epipolar warp operator is effective in synthesizing the scene with accurate geometry and layout compared to the ground truth images. We note that scene-level NVS from a single image is a challenging task, and we observe that our model may introduce artifacts. While enhancing performance is an interesting future work, here we utilize NVS as an auxiliary task which is demonstrated to effectively enhance our model's 3D awareness." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.471, + 0.901 + ], + "angle": 0, + "content": "3DiffTraction modules. We analyze the unique modules and design choices in 3DiffTraction: the Stable Diffusion backbone, geometric and semantic ControlNets targeting NVs and detection, and the multi-view prediction ensemble. All results are reported using the Omni3D-ARKitscenes in Tab. 3. We first validate our choice of using a Stable Diffusion backbone. While diffusion features excel in 2D segmentation tasks [24, 56], they have not been tested in 3D detection. We analyze this choice independently from the other improvements by keeping the backbone frozen and only training the 3D detection head. The vanilla Stable Diffusion features achieve a \\(28.86\\%\\) AP3D, exceeding CubeRCNN-VIT-B (MAE pretrained) by \\(3.63\\%\\) and ResNet-50 DreamTeacher by \\(4.5\\%\\) in AP30. This performance is mirrored in AP2D results, affirming Stable Diffusion's suitability for perception tasks. Our geometric ControlNet, is aimed at instilling 3D awareness via NVS training. A performance boost of \\(2.34\\%\\) on AP3D and \\(0.81\\%\\) on AP2D indicates that the geometric ControlNet imparts 3D awareness knowledge while preserving its 2D knowl" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.436, + 0.892, + 0.616 + ], + "angle": 0, + "content": "edge. To ensure our improvement is attributed to our view synthesis training, we limited the geometric ControlNet to single-view data by setting the source and target views to be identical (denoted by '1' in the NVs train view column of Tab. 3), which reduces the training to be denoising training [6]. This indicate a \\(2.81\\%\\) decrease in AP3D compared to the standard Stable Diffusion, affirming our hypothesis. Further, the semantic ControlNet, co-trained with the 3D detection head enhances both AP2D and AP3D by around \\(7\\%\\) confirming its efficacy in adapting the feature for optimal use by the detection head. Lastly, using NVS-ensemble results in additional \\(0.5\\%\\) increase in AP3D." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.633, + 0.757, + 0.649 + ], + "angle": 0, + "content": "5. Conclusion and Limitations" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.84 + ], + "angle": 0, + "content": "3DiffTraction, utilizing a 3D-aware diffusion model, enables efficient 3D detection from single images, overcoming large-scale data annotation challenges. With its geometric and semantic tuning strategies, it surpasses previous benchmarks, showing high label efficiency and cross-domain adaptability. 3DiffTraction has limitations, including the need for image pairs with accurate camera poses and challenges in handling dynamic objects from in-the-wild videos. Additionally, its use of the Stable Diffusion architecture demands substantial memory and runtime, achieving about 7.5 fps on a 3090Ti GPU. Suitable for offline tasks, it requires further optimization for online detection." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Acknowledgements. Or Litany is a Taub fellow and is supported by the Azrieli Foundation Early Career Faculty Fellowship. We thank Qianqian Wang, David Acuna, and Jonah Philion for the insightful discussion." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10624" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.157 + ], + "angle": 0, + "content": "[1] Tomer Amit, Tal Shaharbany, Eliya Nachmani, and Lior Wolf. Segdiff: Image segmentation with diffusion probabilistic models. arXiv preprint arXiv:2112.00390, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.472, + 0.214 + ], + "angle": 0, + "content": "[2] Dmitry Baranchuk, Andrey Voynov, Ivan Rubachev, Valentin Khrulkov, and Artem Babenko. Label-efficient semantic segmentation with diffusion models. In International Conference on Learning Representations, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.217, + 0.47, + 0.313 + ], + "angle": 0, + "content": "[3] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Tal Dimry, Yuri Feigin, Peter Fu, Thomas Gebauer, Brandon Joffe, Daniel Kurz, Arik Schwartz, and Elad Shulman. ARK-scenes - a diverse real-world dataset for 3d indoor scene understanding using mobile RGB-d data. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.316, + 0.47, + 0.384 + ], + "angle": 0, + "content": "[4] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.387, + 0.468, + 0.441 + ], + "angle": 0, + "content": "[5] Garrick Brazil, Abhinav Kumar, Julian Straub, Nikhila Ravi, Justin Johnson, and Georgia Gkioxari. Omni3d: A large benchmark and model for 3d object detection in the wild, 2023. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.444, + 0.468, + 0.514 + ], + "angle": 0, + "content": "[6] Emmanuel Asiedu Brempong, Simon Kornblith, Ting Chen, Niki Parmar, Matthias Minderer, and Mohammad Norouzi. Denoising pretraining for semantic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4175-4186, 2022. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.516, + 0.468, + 0.583 + ], + "angle": 0, + "content": "[7] Eric R. Chan, Koki Nagano, Matthew A. Chan, Alexander W. Bergman, Jeong Joon Park, Axel Levy, Miika Aittala, Shalini De Mello, Tero Karras, and Gordon Wetzstein. Generative novel view synthesis with 3d-aware diffusion models, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.586, + 0.47, + 0.655 + ], + "angle": 0, + "content": "[8] Hansheng Chen, Yuyao Huang, Wei Tian, Zhong Gao, and Lu Xiong. Monorun: Monocular 3d object detection by reconstruction and uncertainty propagation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10379-10388, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.657, + 0.468, + 0.697 + ], + "angle": 0, + "content": "[9] Shoufa Chen, Peize Sun, Yibing Song, and Ping Luo. Diffusional: Diffusion model for object detection. ICCV, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.701, + 0.468, + 0.757 + ], + "angle": 0, + "content": "[10] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International conference on machine learning, pages 1597-1607. PMLR, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.758, + 0.468, + 0.813 + ], + "angle": 0, + "content": "[11] Ting Chen, Lala Li, Saurabh Saxena, Geoffrey Hinton, and David J Fleet. A generalist framework for panoptic segmentation of images and videos. arXiv preprint arXiv:2210.06366, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.468, + 0.857 + ], + "angle": 0, + "content": "[12] Prafulla Dhariwal and Alexander Quinn Nichol. Diffusion models beat GANs on image synthesis. In Advances in Neural Information Processing Systems, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.468, + 0.901 + ], + "angle": 0, + "content": "[13] Yilun Du, Cameron Smith, Ayush Tewari, and Vincent Sitzmann. Learning to render novel views from wide-baseline stereo pairs, 2023. 4" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[14] Alexandros Graikos, Nikolay Malkin, Nebojsa Jojic, and Dimitris Samaras. Diffusion models as plug-and-play priors. Advances in Neural Information Processing Systems, 35:14715-14728, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.219 + ], + "angle": 0, + "content": "[15] Jiatao Gu, Alex Trevithick, Kai-En Lin, Josh Susskind, Christian Theobalt, Lingjie Liu, and Ravi Ramamoorthi. Nerfdiff: Single-image view synthesis with nef-guided distillation from 3d-aware diffusion. In International Conference on Machine Learning, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.288 + ], + "angle": 0, + "content": "[16] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.291, + 0.892, + 0.331 + ], + "angle": 0, + "content": "[17] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.334, + 0.892, + 0.374 + ], + "angle": 0, + "content": "[18] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.377, + 0.892, + 0.431 + ], + "angle": 0, + "content": "[19] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. arXiv preprint arXiv:2106.15282, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.434, + 0.892, + 0.502 + ], + "angle": 0, + "content": "[20] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P. Kingma, Ben Poole, Mohammad Norouzi, David J. Fleet, and Tim Salimans. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.504, + 0.892, + 0.573 + ], + "angle": 0, + "content": "[21] Siyuan Huang, Siyuan Qi, Yinxue Xiao, Yixin Zhu, Ying Nian Wu, and Song-Chun Zhu. Cooperative holistic scene understanding: Unifying 3d object, layout, and camera pose estimation. In Advances in Neural Information Processing Systems, pages 206-217, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.575, + 0.892, + 0.629 + ], + "angle": 0, + "content": "[22] Boah Kim, Yujin Oh, and Jong Chul Ye. Diffusion adversarial representation learning for self-supervised vessel segmentation. In The Eleventh International Conference on Learning Representations, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.632, + 0.892, + 0.672 + ], + "angle": 0, + "content": "[23] Nilesh Kulkarni, Ishan Misra, Shubham Tulsiani, and Abhinav Gupta. 3d-relnet: Joint object and relational network for 3d prediction. 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.674, + 0.892, + 0.729 + ], + "angle": 0, + "content": "[24] Daiqing Li, Huan Ling, Amlan Kar, David Acuna, Seung Wook Kim, Karsten Kreis, Antonio Torralba, and Sanja Fidler. Dreamteacher: Pretraining image backbones with deep generative models, 2023. 1, 2, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.731, + 0.892, + 0.772 + ], + "angle": 0, + "content": "[25] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.774, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[26] Yingfei Liu, Tiancai Wang, Xiangyu Zhang, and Jian Sun. Petr: Position embedding transformation for multi-view 3d object detection. In European Conference on Computer Vision, pages 531-548. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.831, + 0.892, + 0.871 + ], + "angle": 0, + "content": "[27] Luke Melas-Kyriazi, Christian Rupprecht, Iro Laina, and Andrea Vedaldi. Realfusion: 360 reconstruction of any object from a single image. In CVPR, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[28] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10625" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.47, + 0.178 + ], + "angle": 0, + "content": "[29] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In International Conference on Machine Learning, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.18, + 0.469, + 0.262 + ], + "angle": 0, + "content": "[30] Alexander Quinn Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models. In Proceedings of the 39th International Conference on Machine Learning, pages 16784-16804. PMLR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.264, + 0.469, + 0.333 + ], + "angle": 0, + "content": "[31] Yinyu Nie, Xiaoguang Han, Shihui Guo, Yujuan Zheng, Jian Chang, and Jian Jun Zhang. Total3dunderstanding: Joint layout, object pose and mesh reconstruction for indoor scenes from a single image. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.335, + 0.469, + 0.404 + ], + "angle": 0, + "content": "[32] Jinhyung Park, Chenfeng Xu, Shijia Yang, Kurt Keutzer, Kris M. Kitani, Masayoshi Tomizuka, and Wei Zhan. Time will tell: New outlooks and a baseline for temporal multiview 3d object detection. In The Eleventh International Conference on Learning Representations, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.406, + 0.469, + 0.46 + ], + "angle": 0, + "content": "[33] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.463, + 0.469, + 0.517 + ], + "angle": 0, + "content": "[34] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.52, + 0.469, + 0.576 + ], + "angle": 0, + "content": "[35] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.578, + 0.47, + 0.646 + ], + "angle": 0, + "content": "[36] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.649, + 0.47, + 0.718 + ], + "angle": 0, + "content": "[37] Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Imvoxelnet: Image to voxels projection for monocular and multi-view general-purpose 3d object detection. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2397-2406, 2022. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.72, + 0.469, + 0.815 + ], + "angle": 0, + "content": "[38] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S. Sara Mahdavi, Rapha Gontijo Lopes, Tim Salimans, Jonathan Ho, David J Fleet, and Mohammad Norouzi. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[39] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[40] Saurabh Saxena, Abhishek Kar, Mohammad Norouzi, and David J. Fleet. Monocular depth estimation using diffusion models, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.176 + ], + "angle": 0, + "content": "[41] Yichun Shi, Peng Wang, Jianglong Ye, Long Mai, Kejie Li, and Xiao Yang. Mvdream: Multi-view diffusion for 3d generation. arXiv:2308.16512, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.233 + ], + "angle": 0, + "content": "[42] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv:2209.14792, 2022.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.235, + 0.892, + 0.291 + ], + "angle": 0, + "content": "[43] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.293, + 0.892, + 0.36 + ], + "angle": 0, + "content": "[44] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In International Conference on Learning Representations, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.363, + 0.892, + 0.404 + ], + "angle": 0, + "content": "[45] Weimin Tan, Siyuan Chen, and Bo Yan. Diffss: Diffusion model for few-shot semantic segmentation. arXiv preprint arXiv:2307.00773, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.406, + 0.892, + 0.448 + ], + "angle": 0, + "content": "[46] Luming Tang, Menglin Jia, Qianqian Wang, Cheng Perng Phoo, and Bharath Hariharan. Emergent correspondence from image diffusion, 2023. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.449, + 0.892, + 0.504 + ], + "angle": 0, + "content": "[47] Shubham Tulsiani, Saurabh Gupta, David Fouhey, Alexei A. Efros, and Jitendra Malik. Factoring shape, pose, and layout from the 2d image of a 3d scene. In Computer Vision and Pattern Recognition (CVPR), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.506, + 0.892, + 0.575 + ], + "angle": 0, + "content": "[48] Narek Tumanyan, Michal Geyer, Shai Bagon, and Tali Dekel. Plug-and-play diffusion features for text-driven image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1921-1930, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.577, + 0.892, + 0.646 + ], + "angle": 0, + "content": "[49] Narek Tumanyan, Michal Geyer, Shai Bagon, and Tali Dekel. Plug-and-play diffusion features for text-driven image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1921-1930, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.648, + 0.892, + 0.702 + ], + "angle": 0, + "content": "[50] Tai Wang, ZHU Xinge, Jiangmiao Pang, and Dahua Lin. Probabilistic and geometric depth: Detecting objects in perspective. In Conference on Robot Learning, pages 1475-1485. PMLR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.704, + 0.892, + 0.772 + ], + "angle": 0, + "content": "[51] Yue Wang, Vitor Campagnolo Guizilini, Tianyuan Zhang, Yilun Wang, Hang Zhao, and Justin Solomon. Detr3d: 3d object detection from multi-view images via 3d-to-2d queries. In Conference on Robot Learning, pages 180–191. PMLR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.774, + 0.892, + 0.83 + ], + "angle": 0, + "content": "[52] Daniel Watson, William Chan, Ricardo Martin-Brualla, Jonathan Ho, Andrea Tagliasacchi, and Mohammad Norouzi. Novel view synthesis with diffusion models. arXiv preprint arXiv:2210.04628, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[53] Julia Wolleb, Robin Sandkuhler, Florentin Bieder, Philippe Valmaggia, and Philippe C Cattin. Diffusion models for implicit image segmentation ensembles. In International Conference on Medical Imaging with Deep Learning, pages 1336-1348. PMLR, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "10626" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[54] Chenfeng Xu, Bichen Wu, Ji Hou, Sam Tsai, Ruilong Li, Jialiang Wang, Wei Zhan, Zijian He, Peter Vajda, Kurt Keutzer, and Masayoshi Tomizuka. Nerf-det: Learning geometry-aware volumetric representation for multi-view 3d object detection, 2023. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.47, + 0.204 + ], + "angle": 0, + "content": "[55] Dejia Xu, Yifan Jiang, Peihao Wang, Zhiwen Fan, Yi Wang, and Zhangyang Wang. Neurallift-360: Lifting an in-the-wild 2d photo to a 3d object with \\(360^{\\circ}\\) views. 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.206, + 0.47, + 0.288 + ], + "angle": 0, + "content": "[56] Jiarui Xu, Sifei Liu, Arash Vahdat, Wonmin Byeon, Xiaolong Wang, and Shalini De Mello. Open-vocabulary panoptic segmentation with text-to-image diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2955-2966, 2023. 1, 2, 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.29, + 0.47, + 0.33 + ], + "angle": 0, + "content": "[57] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.332, + 0.47, + 0.374 + ], + "angle": 0, + "content": "[58] Zhizhuo Zhou and Shubham Tulsiani. Sparsefusion: Distilling view-conditioned diffusion for 3d reconstruction. In CVPR, 2023. 1, 3, 4" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "10627" + } + ] +] \ No newline at end of file diff --git a/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/0c8075a5-3d90-4e50-a4ef-fbf63dd9f1bc_origin.pdf b/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/0c8075a5-3d90-4e50-a4ef-fbf63dd9f1bc_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0c852d9cbabf194fa59011525fda8757cbdf3ce9 --- /dev/null +++ b/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/0c8075a5-3d90-4e50-a4ef-fbf63dd9f1bc_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dbd528cb58f07f39008efe160c0a257f702c6144670e5517fb986fde509505e +size 1685480 diff --git a/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/full.md b/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0fbcbc234f48ea1075e5d20bd099a0af25b3afd6 --- /dev/null +++ b/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/full.md @@ -0,0 +1,331 @@ +# 3DiffTecn: 3D Object Detection with Geometry-Aware Diffusion Features + +Chenfeng $\mathrm{Xu}^{1,2}$ + +$^{1}$ NVIDIA + +$^{2}$ UC Berkeley + +Huan Ling $^{1,3,4}$ + +$^{3}$ Vector Institute + +Sanja Fidler $^{1,3,4}$ + +Or Litany1,5 + +4University of Toronto + +5Technion + +![](images/3b8baaa198a92612716eb6b6b01593a412c4da7837e38b5b97e855001e7ae770.jpg) +Figure 1. (1) We enhance pre-trained diffusion features with 3D awareness by training a geometric ControlNet (Sec. 3.2). (2) We employ a semantic ControlNet (Sec. 3.3) to refine generative features for targeted data and downstream tasks, specifically focusing on enhancing features for 3D object detection. (3) During the inference process, we further enhance 3D detection accuracy by assembling the bounding box predictions from virtual views (Sec. 3.4). + +![](images/8cf0024d69b12af854c82a7d5572215d681f5e6617d0afb848bc2e9afca39e76.jpg) + +![](images/29d0352b60e36b7e83fb0c699e08c9a37f45a4c6a3d5db07c4cb87a67a224880.jpg) + +![](images/dd73e635593482f0cf55158f68afd9f3f7fabb6558f3a47f1583060f82e20e9f.jpg) + +# Abstract + +3DiffTecnion introduces a novel method for 3D object detection from single images, utilizing a 3D-aware diffusion model for feature extraction. Addressing the resource-intensive nature of annotating large-scale 3D image data, our approach leverages pretrained diffusion models, traditionally used for 2D tasks, and adapts them for 3D detection through geometric and semantic tuning. Geometrically, we enhance the model to perform view synthesis from single images, incorporating an epipolar warp operator. This process utilizes easily accessible posed image data, eliminating the need for manual annotation. Semantically, the model is further refined on target detection data. Both stages utilize ControlNet, ensuring the preservation of original feature capabilities. Through our methodology, we obtain 3D-aware features that excel in identifying cross-view point correspondences. In 3D detection, 3DiffTecnion substantially surpasses previous benchmarks, e.g., Cube-RCNN, by $9.43\%$ in AP3D on the Omni3D-ARkitscene dataset. Furthermore, 3DiffTecnion demonstrates robust label efficiency and generalizes well to cross-domain data, nearly matching fully-supervised models in zero-shot scenarios. Project page: https://research.nvidia.com/labs/toronto-ai/3difftection/. + +# 1. Introduction + +Detecting objects in 3D from a single image presents a significant challenge in computer vision, involving not only object recognition and localization but also depth and orientation prediction. This task, crucial for applications in robotics and augmented reality, demands advanced 3D reasoning from computational models. + +Training a 3D detector from scratch is resource-intensive due to the high labeling costs [5]. Recently, large self-supervised models have emerged as compelling learners for image representation [10, 16, 17]. They acquire robust semantic features that can be fine-tuned on smaller, annotated datasets. Image diffusion models, trained on internet-scale data, have proven to be particularly effective in this context [24, 46, 56]. However, these models often lack 3D awareness and exhibit a domain gap in 3D applications. Recent work has aimed to bridge this gap by lifting 2D image features to 3D and refining them for specific 3D tasks. NeRF-Det [54] trained a view synthesis model alongside a detection head using pretrained image feature extractors. However, this approach is constrained by the need for dense scene views and fully annotated data. Efforts in novel view synthesis using diffusion models have shown promise [7, 58]. Yet, these models are generally + +trained from scratch, thereby foregoing the advantages of using pretrained semantic features. + +To overcome these limitations, our work, 3DiffTecnion, introduces a novel framework that repurposes pretrained 2D diffusion models for 3D object detection (see overview Fig. 1). We enhance these models with 3D awareness through a view synthesis task, employing epipolar geometry to warp features from source images to target views. This process utilizes ControlNet [57] to maintain the integrity of the original features (See Fig. 3). Utilizing image pairs from videos, which are abundant and do not require manual annotation, our approach is scalable and efficient. To demonstrate that our approach successfully imparts 3D awareness to the model, we assess the performance of its features in establishing point correspondences across multiple views. Our results indicate that these features outperform those of the base model, both qualitatively and quantitatively. For 3D detection, 3DiffTecnion trains a standard detection head with 3D box supervision, incorporating a second ControlNet to adapt the features to specific detection tasks and domains, preserving feature quality and view synthesis capabilities. At test time, we capitalize on both geometric and semantic capabilities by generating detection proposals from multiple virtual synthesized views, which are then consolidated through Non-Maximum Suppression (NMS). + +Our primary contributions are as follows: (1) We introduce a scalable technique for enhancing pretrained 2D diffusion models with 3D awareness through a novel geometric ControlNet, enhanced with an epipolar warp operator; (2) We adapt these features to a 3D detection task and target domain by introducing a second, semantic ControlNet; and (3) We integrate both view synthesis and 3D detection capabilities to further improve detection performance through ensemble prediction. + +3DiffTraction emerges as a powerful 3D detector, substantially surpassing previous benchmarks, e.g., CubERCNN, by $9.43\%$ in AP3D on the Omni3D-ARkitscene dataset. Furthermore, 3DiffTraction demonstrates robust label efficiency, achieving a 2.28 AP3D-N improvement over previous methods trained with full supervision while using only $50\%$ of the labels. 3DiffTraction also exhibits the ability to generalize to cross-domain data, nearly matching the performance of previously established fully-supervised models without any tuning (zero-shot). + +# 2. Related works + +3D Object Detection from Images. 3D object detection from posed images is widely explored [26, 32, 37, 51, 54]. However, assuming given camera extrinsic is not a common scenario, especially in applications such as AR/VR and mobile devices. The task of 3D detection from single images, relying solely on camera intrinsics, presents a more generalized yet significantly more challenging problem. The + +model is required to inherently learn 3D structures and harness semantic knowledge. While representative methods [8, 21, 23, 31, 47, 50] endeavor to enforce 3D detectors to learn 3D cues from diverse geometric constraints, the dearth of semantics stemming from the limited availability of 3D datasets still impede the generalizability of 3D detectors. Brazil et al. [5], in an effort to address this issue, embarked on enhancing the dataset landscape by introducing Omni3D dataset. Rather than focusing on advancing generalizable 3D detection by increasing annotated 3D data, we propose a new paradigm, of enhancing semantic-aware diffusion features with 3D awareness. + +Diffusion Models for 2D Perception. Trained diffusion models [30, 34, 36, 39] have been shown to have internal representations suitable for dense perception tasks, particularly in the realm of image segmentation [6, 14, 45, 56]. These models demonstrate impressive label efficiency [2]. Similarly, we observe strong base performance in both 2D and 3D detection (see Tab. 3); our method also benefits from high label efficiency. Diffusion models have further been trained to perform 2D segmentation tasks [11, 22, 53]. In [1] the model is trained to output a segmentation map using an auxiliary network that outputs residual features. Similarly, we use a ControlNet to refine the diffusion model features to endow them with 3D awareness. We note that several works utilize multiple generations to achieve a more robust prediction [1], we go a step further by using our controllable view generation to ensemble predictions from multiple views. Few works have studied tasks other than segmentation. DreamTeacher [24] proposed to distil the diffusion features to an image backbone and demonstrated excellent performance when tuned on perception tasks[24]. [40] trained a diffusion model for dense depth prediction from a single image. Recently, DiffusionDet [9] proposed an interesting method for using diffusion models for 2D detection by directly denoising the bounding boxes conditioned on the target image. Diffusion features have been analyzed in [49] showing that different UNet layer activations are correlated with different level of image details. We utilize this property when choosing which UNet layer outputs to warp in our geometric conditioning. Remarkably, [46] have shown strong point correspondence ability with good robustness to view change. Here we demonstrate that our 3D-aware features can further boost this robustness. + +Novel View Synthesis with Diffusion Models Image synthesis has undergone a significant transformation with the advent of 2D diffusion models, as demonstrated by notable works [12, 18, 19, 28, 29, 33, 36, 38, 43, 44]. These models have extended their capabilities to the Novel View Synthesis (NVS) task, where 3DiM [52] and Zero-123 [25] model NVS of objects as a viewpoint-conditioned image-to-image translation task with diffusion models. The models are trained on a synthetic dataset with camera anno + +![](images/203a697340b1db1b2b4e7c605608a27e5d624f072d90d92bfd9da3671df3a767.jpg) + +![](images/2343cf8df149d8c9053de2934a7c307f260e1249d1fe1005e34627f6f10cefcc.jpg) +Reference Image Reference Image + +![](images/96db09b4bb962605c5b6c6e52e540dac82feb39a144d29bc430f4e58ee316a92.jpg) + +![](images/9e7cb19f6ac30e1bd71215a9bd27a43aab56a995fccb05c3e09f38b18c5151a9.jpg) +DIFT DIFT + +![](images/92847051b8525fb05a836ff818a925a7dd7df88d7a82e34cbf18439cbc82f8e8.jpg) + +![](images/6788f6c27568e89bceda9e7d76215012c964e942233bd2a4c50026138ba365df.jpg) +3DiffTection 3DiffTection +Figure 2. Visualization of semantic correspondence prediction using different features Given a Red Source Point in the left most reference image, we predict the corresponding points in the images from different camera views on the right (Blue Dot). The ground truth points are marked by Red Stars. Our method, 3DiffTecnion, is able to identify precise correspondences in challenging scenes with repetitive visual patterns. + +![](images/e136d136315cb67d4ccfb9b004c0a59ec59d4aa8d5c41648d529586361a26dbf.jpg) + +![](images/eb8a0a60bea6219bc3fd531c371317c9e2993ad04d5b3bdd5256b3f300f8e730.jpg) +DIFT DIFT + +![](images/587fe0ee74bbadc427d879de26426e7b080f1eca6a37ff13ef9b3cf35b0082a0.jpg) + +![](images/5693250229cf0bab211bb3f2d459157050970986ddac7c8685e12e088b78700b.jpg) +3DiffTcction 3DiffTcction + +tation and demonstrate zero-shot generalization to in-the-wild images. NerfDiff [15] distills the knowledge of a 3D-aware conditional diffusion model into a Nerf. RealFusion [27] uses a diffusion model as a conditional prior with designed prompts. NeuralLift [55] uses language-guided priors to guide the novel view synthesis diffusion model. Most recently, inspired by the idea of video diffusion models [4, 20, 42], MVDream [41] adapts the attention layers to model the cross-view 3D dependency. The most relevant work to our approaches is SparseFusion [58], where authors propose to incorporate geometry priors with epipolar geometries. However, while their model is trained from scratch, in our approach, we use NVS merely as an auxiliary task to enhance the pre-trained diffusion features with 3D awareness and design the architecture for tuning a minimal number of parameters by leveraging a ControlNet. + +# 3. 3DiffTection + +We introduce 3DiffTlection, designed to harness diffusion model features for 3D detection. As depicted in Fig. 1, 3DiffTlection comprises three core components: 1) Instilling 3D awareness into the diffusion features by training a geometric ControlNet for view synthesis. 2) Bridging the domain and task gaps using a semantic ControlNet, which is concurrently trained with a 3D detection head on the target data distribution. 3) Amplifying 3D box predictions through a virtual view ensembling strategy. We further detail each of these steps in the subsequent sections. + +# 3.1. Diffusion Model as a Feature Extractor + +Recent works demonstrate that features extracted from text-to-image diffusion models, such as Stable Diffusion [36], capture rich semantics suitable for dense perception tasks, including image segmentation [56] and point correspond + +dences [46]. In this work, our interest lies in 3D object detection. However, since Stable Diffusion is trained on 2D image-text pairs—a pre-training paradigm proficient in aligning textual semantics with 2D visual features—it might lack 3D awareness. We aim to explore this by examining point correspondences between views. We hypothesize that features with 3D awareness should demonstrate the capability to identify correspondences that point to the same 3D locations when provided with multi-view images. + +Following [46, 56] we employ a single forward step for feature extraction. However, unlike these works, we only input images without textual captions, given that in real-world scenarios, textual input is typically not provided for object detection. Formally, given an image $\mathbf{x}$ , we sample a noise image $\mathbf{x}_t$ at time $t$ , and obtain the diffusion features + +$$ +\mathbf {f} = \mathcal {F} \left(\mathbf {x} _ {t}; \Theta\right), \mathbf {x} _ {t} = \sqrt {\bar {\alpha} _ {t}} \mathbf {x} + \sqrt {1 - \bar {\alpha} _ {t}} \epsilon_ {t}, \epsilon_ {t} \sim \mathbb {N} (0, 1), \tag {1} +$$ + +where $\mathbf{f}$ represents the multi-scale features from the decoder module of UNet $\mathcal{F}$ (parameterized by $\Theta$ ), and $\alpha_{t}$ represents a pre-defined noise schedule, satisfying $\bar{\alpha}_{t} = \prod_{k=1}^{t} \alpha_{k}$ . + +Interestingly, as illustrated in Fig. 2, the point localization of Stable Diffusion features depends on 2D appearance matching. This can lead to confusion in the presence of repeated visual patterns, indicating a deficiency in 3D spatial understanding. Given this observation, we aim to integrate 3D awareness into the diffusion features. + +# 3.2. Injecting 3D Awareness to Diffusion Features + +ControlNet [57] is a powerful tool that allows the addition of conditioning into a pre-trained, static Stable Diffusion (SD) model. It has been demonstrated to support various types of dense input conditioning, such as depth and semantic images. This is achieved through the injection of conditional image features into trainable copies of the + +![](images/2461a98e4bd6698926a2feeb97eae2681dd765aed52ba97cef75be0dfd19cafd.jpg) +(a) Before + +![](images/3c64bf0bfe55e5513cba759d69e2b2df5bb1c61abe1c205144368f59732522de.jpg) +(b) After +Figure 3. Architecture of Geometric ControlNet. Left: Original Stable Diffusion UNet encoder block. Right: We train novel view image synthesis by adding a geometric ControlNet to the original Stable Diffusion encoder blocks. The geometric ControlNet receives the conditional view image as an additional input. Using the camera pose, we introduce an epipolar warp operator, which warps intermediate features into the target view. With the geometric ControlNet, we significantly improve the 3D awareness of pre-trained diffusion features. + +original SD blocks. A significant attribute of ControlNet is its ability to resist overfitting to the dataset used for tuning while preserving the original model's performance. As a result, ControlNet is well-suited for enhancing diffusion features with 3D awareness without compromising their 2D semantic quality. + +Formally, we denote one block of UNet $\mathcal{F}$ as $\mathcal{F}_s(\cdot;\Theta_s)$ parameterized by $\Theta_s$ . In particular, the original ControlNet block copies each pre-trained Stable Diffusion module $\mathcal{F}_s(\cdot;\Theta_s)$ denoted as $\mathcal{F}_s'(\cdot;\Theta_s')$ , and accompanying with two zero convolutions $\mathcal{Z}_{s1}$ and $\mathcal{Z}_{s2}$ , parameterized by $\Theta_{zs1}$ and $\Theta_{zs2}$ , respectively. We slightly abuse the notation of $\mathbf{x} \in \mathcal{R}^{H \times W \times C}$ as the arbitrary middle features of $\mathbf{x}_t$ in $\mathcal{F}$ . Then a ControlNet block with the corresponding frozen Stable Diffusion block is given by + +$$ +\mathbf {y} _ {s} = \mathcal {F} _ {s} (\mathbf {x}; \Theta_ {s}) + \mathcal {Z} _ {s 2} \left(\mathcal {F} _ {s} ^ {\prime} (\mathbf {x} + \mathcal {Z} _ {s 1} (\mathbf {c}; \Theta_ {z s 1}); \Theta_ {s} ^ {\prime}); \Theta_ {z s 2}\right), \tag {2} +$$ + +where $\mathbf{c} \in \mathcal{R}^{H \times W \times C}$ is the condition image feature and $\mathbf{y}_s \in \mathcal{R}^{H \times W \times C}$ is the output. + +Epipolar warp operator. We utilize ControlNet to enhance the 3D awareness of diffusion features by training it to perform view synthesis. Specifically, we select pairs of images with known relative camera poses and train the ControlNet conditioned on the source view to produce the output view. Since the features induced by the condition in ControlNet are additive, it is a common practice to ensure alignment between these features and the noisy input features. However, the input for our view synthesis task is, by definition, not aligned with the noisy input of the target + +view. As a solution, we propose to warp the source view features to align with the target using epipolar geometry. We denote the epipolar warp operator as $\mathcal{G}(\cdot ,T_n)$ , and our geometric ControlNet is formulated as: + +$$ +\mathbf {y} _ {s} = \mathcal {F} _ {s} (\mathbf {x}; \Theta_ {s}) + \mathcal {Z} _ {s 2} \left(\mathcal {G} \left(\mathcal {F} _ {s} ^ {\prime} (\mathbf {x} + \mathcal {Z} _ {s 1} (\mathbf {c}; \Theta_ {z s 1}); \Theta_ {s} ^ {\prime}), T _ {n}\right); \Theta_ {z s 2}\right), \tag {3} +$$ + +Formally, to obtain the target novel-view image at position $(u,v)$ , we assume that relative camera extrinsic from the source view is described by $T_{n} = [[R_{n},0]^{T},[t_{n},1]^{T}]$ and the intrinsic parameters are represented as $K$ . The equation for the epipolar line is: + +$$ +l _ {c} = K ^ {- T} \left(\left[ t _ {n} \right] \times R _ {n}\right) K ^ {- 1} [ u, v, 1 ] ^ {T}, \tag {4} +$$ + +Here, $l_{c}$ denotes the epipolar line associated with the source conditional image. We sample a set of features along the epipolar line, denoted as $\{\mathbf{c}(p_i)\}$ , where the $p_i$ are points on the epipolar line. These features are then aggregated at the target view position $(u,v)$ via a differentiable aggregator function, resulting in the updated features: + +$$ +\mathbf {c} ^ {\prime} (u, v) = \text {a g g r e g a t o r} (\{\mathbf {c} (p _ {i}) \}), \quad p _ {i} \sim l _ {c}. \tag {5} +$$ + +The differentiable aggregator can be as straightforward as average/max functions or something more complex like a transformer, as demonstrated in [13, 58], and $\mathbf{c}^{\prime}$ is the warped condition image features, i.e., the output of epipolar warp operator $\mathcal{G}$ . The geometric warping procedure is illustrated in Fig. 3. + +Interestingly, we found it beneficial to avoid warping features across all the UNet decoder blocks. As highlighted by + +[48], middle-layer features in Stable Diffusion emphasize high-level semantics, while top stages capture appearance and geometry. Given the shared semantic content in novel-view synthesis, even amidst pixel deviations, we warp features only in the final two stages of Stable-Diffusion. This maintains semantic consistency while accommodating geometric warping shifts. Our geometric ControlNet notably enhances the 3D awareness of diffusion features, evident in the 3DiffTection examples in Fig. 2. + +# 3.3. Bridging the Task and Domain Gap + +We leverage the 3D-enhanced features for 3D detection by training a standard detection head with 3D box supervision. To further verify the efficacy of our approach in adapting diffusion features for 3D tasks, we train a 3D detection head, keeping our fine-tuned features fixed. Notably, we observe a substantial improvement compared to the baseline SD feature. We report details in Tab. 3. + +Nevertheless, we acknowledge two potential gaps. Firstly, our view synthesis tuning is conceptualized as a universal 3D feature augmentation method. Hence, it is designed to work with a vast collection of posed image pairs, which can be inexpensively gathered (e.g., from videos) without the need for costly labeling. Consequently, there might be a domain discrepancy when comparing to target data, which could originate from a smaller, fully annotated dataset. Secondly, since the features aren't specifically finetuned for detection, there is further potential for optimization towards detection, in tandem with the detection head. As before, we aim to retain the robust feature characteristics already achieved and choose to deploy a second ControlNet. + +Specifically, we freeze both the original SD and the geometric ControlNet modules. We then introduce another trainable ControlNet, which we refer to as semantic ControlNet. For our model to execute single-image 3D detection, we utilize the input image $x$ in three distinct ways. First, we extract features from it using the pretrained SD, denoted as $\mathcal{F}(x)$ , through a single SD denoising forward step. Next, we feed it into our geometric ControlNet, represented as $\mathcal{F}_{geo}(x,T_n)$ , with an identity pose $(T_{n} = [Id,0])$ to obtain our 3D-aware features. Lastly, we introduce it to the semantic ControlNet, denoted by $\mathcal{F}_{sem}(x)$ , to produce trainable features fine-tuned for detection within the target data distribution. We aggregate all the features and pass them to a standard 3D detection head, represented as $\mathcal{D}$ [5]. The semantic ControlNet is trained with 3D detection head. + +$$ +y = \mathcal {D} (\mathcal {F} (x) + \mathcal {F} _ {\text {g e o}} (x, [ I d, 0 ]) + \mathcal {F} _ {\text {s e m}} (x)) \tag {6} +$$ + +The figure overview is in the supplementary material. + +# 3.4. Ensemble Prediction + +ControlNet is recognized for its ability to retain the capabilities of the pre-tuned model. As a result, our semantically tuned model still possesses view synthesis capabilities + +ties. We exploit this characteristic to introduce a test-time prediction ensembling that further enhances detection performance. Specifically, our box prediction $y$ is dependent on the input view. Although our detection model is trained with this pose set to the identity (i.e., no transformation), at test time, we can incorporate other viewing transformations denoted as $\xi_{i}$ , + +$$ +y (\xi) = \mathcal {D} \left(\mathcal {F} (x) + \mathcal {F} _ {\text {g e o}} (x, \xi) + \mathcal {F} _ {\text {s e m}} (x)\right). \tag {7} +$$ + +The final prediction is derived through a non-maximum suppression of individual view predictions: + +$$ +y _ {f i n a l} = N M S \left(\left\{y \left(\xi_ {i} \right\}\right). \right. \tag {8} +$$ + +We note that our objective isn't to create a novel view at this stage but to enrich the prediction using views that are close to the original pose. The underlying intuition is that the detection and view synthesis capabilities complement each other. Certain objects might be localized more precisely when observed from a slightly altered view. + +# 4. Experiments + +In this section, we present a comprehensive experimental evaluation of 3DiffTlection and its constituent components. Initially, in Section 4.1, we establish 3DiffTlection as a powerful 3D detection framework, particularly when fine-tuned on a specific target dataset. We then validate its capacity for generalization to new datasets, both with and without tuning of the detection head (Section 4.2). Subsequently, we demonstrate its ability to maintain strong performance with limited labels (Section 4.3). Finally, in Section 4.4, we confirm 3DiffTlection's enhanced 3D awareness by measuring its feature correspondence accuracy. We also validate the importance of each module in our design and conclude with visualizations of our auxiliary view synthesis ability. + +Datasets and implementation details For all our experiments, we train the geometric ControlNet on the official ARKitscene datasets [3], which provide around 450K posed low-resolution $(256\times 256)$ images. We sample around 40K RGB images along with their intrinsics and extrinsic. Note that in the following experiments, the pretrained geometric ControlNet is kept frozen. For training 3D object detection, we use Omni3D-ARkitsscenes as our primary in-domain experiment dataset, and Omni3DSUNRGBD for our cross-dataset experiments. To evaluate the performance, we compute a mean AP3D across all categories in Omni3D-ARkitsscenes and over a range of IoU3D thresholds in [0.05, 0.10,..., 0.50], simply denoted as AP3D. We also report AP3D at IoU 15, 25, and 50 (AP3D@15, AP3D@25 and AP3D@50) as following [5]. We take the publicly available text-to-image LDM [36], Stable Diffusion as the preliminary backbone. Unlike previous diffusion models which require multiple images for training + +
MethodsResolutionNVS Train ViewsDet. Train ViewsAP3D↑AP3D@15↑AP3D@25↑AP3D@50↑
CubeRCNN-DLA256×256-131.7543.1034.6811.07
DreamTchr-Res50256×256-133.2044.5437.1012.35
NeRF-Det-R50256×256≥10≥1033.1346.8136.0313.58
ImVoxelNet256×256-≥1032.0946.7135.6211.94
3DiffTlection256×2562139.2250.5843.1816.40
CubeRCNN-DLA512×512-134.3246.0636.0212.51
DreamTchr-Res50512×512-136.1449.8240.5115.48
3DiffTlection512×5122143.7557.1347.3220.30
CubeRCNN-DLA-Aug512×512-141.7253.0945.4219.26
+ +Table 1. 3D Object Detection Results on Omni3D-ARKitScenes testing set. 3DiffTraction significantly outperforms baselines, including CubeRCNN-DLA-Aug, which is trained with 6x more supervision data. + +a novel-view synthesis task, we only take two views, one as the source view and another one as the target view. Moreover, we only consider two views with an overlap of less than $30\%$ . Regarding novel-view synthesis ensemble, we use pseudo camera rotations, i.e., $\pm 15$ deg and ensemble the predicted bounding boxes via NMS. + +Methods in comparison. CubeRCNN [5] extends Fast-RCNN [35] to 3D object detection by incorporating a cube head. In our work, we aim to provide a stronger 3D-aware image backbone, and compare it with other image backbones using the Cube-RCNN framework. Specifically, we compare with DreamTeacher [24], which distills knowledge from a Pre-trained Stable Diffusion to a lighter network, ResNet-50. We also compare with DIFT [46], which directly employs the frozen Stable Diffusion as the image feature extractor. Additionally, we evaluate methods designed for multi-view 3D detection, such as NeRF-Det [54] and ImVoxelNet [37]. While these methods typically require more images during training, we use them for single-image 3D object detection during testing. + +# 4.1. 3D Object Detection on Omni3D-ARKitsscenes + +In Tab. 1, we analyze the 3D object detection performance of 3DiffTlection compared to several baseline methods. Notably, 3DiffTlection significantly outperforms CubeRCNN-DLA [5], a prior art in single-view 3D detection on the Omni3D-ARKitScenes dataset, achieving a margin of $7.4\%$ at a resolution of $256 \times 256$ and $9.43\%$ at a resolution of $512 \times 512$ on the AP3D metric. We further compare our approach to NeRF-Det-R50 [54] and ImVoxelNet [37], both of which utilize multi-view images during training (indicated in Tab. 1 as NVS Train Views and Det. Train Views). In contrast, 3DiffTlection which does not rely on multi-view images for training the detection network and uses only view-pairs for geometric network training, surpasses these methods by $6.09\%$ and $7.13\%$ on the AP3D metric, respectively. Additionally, we compare our approach to DreamTeacher-Res50 [24], which distills StableDiffusion feature prediction into a ResNet backbone to make it amenable for perception tasks. 3DiffTlection exceeds DreamTeacher by $6.02\%$ and $7.61\%$ at resolutions of $256 \times 256$ and $512 \times 512$ , respectively. Lastly, we eval + +uate our model against CubeRCNN-DLA-Aug, which denotes the training of CubeRCNN on the complete Omni3D dataset, comprising 234,000 RGB images with a more robust training recipe. Remarkably, our model outperforms CubeRCNN-DLA-Aug by $2.03\%$ on AP3D while using nearly 6x less data, demonstrating its data efficiency. + +We also show visualization results in Fig. 4. Compared to CubeRCNN, our proposed 3DiffTecn predicts 3D bounding boxes with better pose, localization and significantly fewer false defecions. As seen in the middle column, our model can even handle severe occlusion cases, i.e., the sofa in the middle image and the sink in the right image. + +# 4.2. Cross-dataset Generalization + +To assess the capability of 3DiffTraction's geometric ControlNet to carry its 3D awareness to other datasets, we employed a 3DiffTraction model with its geometric ControlNet trained on the OMni3D-ARKitscene dataset, and conduct cross-dataset experiments on the Ommi3D-SUNRGBD dataset. We evaluate it with two settings: (1) finetune the parameters on the Omni3D-SUNRBGD dataset and test the performance on Omni3D-SUNRGBD dataset, and (2) train the parameters on the Omni3D-ARKitsscenes dataset and directly test the performance on Omni3D-SUNRGBD dataset in a zero-shot setting. The performance is shown in Tab. 2. + +In the first setting (shown in the fourth column), as a baseline, we trained the 3D head using DIFT-SD features. 3DiffTecn w/o Semantic-ControlNet and w/ Semantic-ControlNet outperform DIFT-SD by $1.21\%$ and $5.99\%$ , respectively. We further compare our approach with CubeR-CNN. To ensure a fair comparison, we take CubeRCNN-DLA trained on Omni3D-ARKitscene datasets and fine-tuned its entire model on the Omni3D-SUNRGBD. Without any training of the geometric ControlNet on the Omni-SUNRGBD, 3DiffTecn (w/o Semantic-ControlNet) with only tuned a 3D head surpasses the fully fine-tuned CubeRCNN-DLA by $0.39\%$ . Then, we reintegrate the semantic ControlNet and jointly train it with the 3D head. This yields a performance boost of $5.09\%$ . These results indicate that even without training the geometric ControlNet in the target domain, the semantic ControlNet adeptly adapts features for perception tasks. + +![](images/13c06a72aa3f828faddcd127c5c83c5b8f25e94f6763ddf57d9a25f9330c20d8.jpg) +Figure 4. Qualitative results on Omni3D-ARKitScene 3D Detection. In contrast to Cube-RCNN (bottom), our approach (top) accurately predicts both the box class and 3D locations. The bird's-eye-view visualization further demonstrates that our predictions surpass the baseline performance of Cube-RCNN. + +
MethodsBackbonePretrained on ARKitTuned on SUNRGBDZero-shot(w/o 2D GT)Zero-shot(w/ 2D GT)
DIFT-SDStableDiffX21.9216.7425.31
CubeRCNNDLA3422.7216.8125.05
3DiffTecnStableDiff+Geo-Ctr23.1117.3726.94
3DiffTecnStableDiff+Geo-Ctr+Sem-Ctr27.8122.6430.14
+ +Table 2. Cross-domain experiment on the Omni3D-SUNRGBD dataset. The "Pre-trained on ARKit" denotes we pre-train the backbone on Omni3D-ARkitsscenes. For CubeCNN, we pre-train it with 3D detection supervision. For all zero-shot experiments, the methods are first trained on Omni3D-ARKitscenes for 3D detection and then directly tested on Omni3D-SUNRGBD dataset. "2D GT" means we use ground-truth 2D bounding box to crop ROI image features. The results are reported for overlapped 14 classes between Omni3D-SUNRGBD and Omni3D-ARKiSscenes dataset. + +To further demonstrate the transferrability of 3DiffTec-tion, we train the models for 3D detection on Omni3D-ARkitsscenes and directly test it on Omni3D-SUNRGBD dataset without any further tuning. The results are shown in Column 3 and column 4 of Tab. 2. We observe that if we have ground truth 2D bounding boxes, 3DiffTec-tion with semantic-ControlNet can even achieve the best performance. Without ground truth 2D bounding boxes, 3DiffTec-tion is also able to outperform DIFT-SD and CubeR-CNN by $5.90\%$ and $5.83\%$ , respectively. These results demonstrate the notable transferrability of our 3DiffTec-tion. + +# 4.3. Label Efficiency + +We hypothesize that our usage of semantic ControlNet for tuning 3DiffTlection towards a target dataset should maintain high label efficiency. We test this by using $50\%$ and $10\%$ labels from the Omni3D-ARKitscene datasets. The results are shown in Tab. ?? of supplementary materials. In low-data regime (for both $50\%$ and $10\%$ label setting), 3DiffTlection demonstrates significantly better performance, and more modest degradation than baselines. Notably, even with $50\%$ of the labels, our proposed 3DiffTlection achieves 2.28 AP3D-N improvement over previous methods trained + +on $100\%$ label. Additionally, when tuning only the 3D head 3DiffTraction performs better than CubeRCNN and DreamTeacher with tuning all parameters. + +# 4.4. Analysis and Ablation + +Feature correspondence fidelity (Fig. 2). As described in 3.1, we conducted a feature correspondence experiment. We hypothesize that if our model is 3D aware, it should be find 3D correspondences. As can be seen, our method yields a more accurate point-matching result, primarily because our geometric ControlNet is trained to infer 3D correspondences through our Epipolar warp operator to successfully generate novel views. To provide further insights, we visualize a heatmap demonstrating the similarity of target image features to the reference key points. Notably, our 3DiffTec-tion features exhibit better concentration around the target point. Furthermore, we quantitatively evaluate the correspondence performance on ScanNet dataset, which is never accessed by both our 3DiffTec-tion and DIFT for fair comparison. The experiment results are shown in supplementary material. The results also demonstrate our hypothesis. Novel-view synthesis visualization (Fig. 5). To further validate our geometric ControlNet ability to maintain geo + +
BackboneNVS Train ViewsGeo-CtrSem-CtrNV-EnsembleAP2DAP3D↑AP3D@15↑AP3D@25↑AP3D@50↑
VIT-B (MAE)----26.1425.2336.0428.648.11
Res50 (DreamTchr)----25.2724.3634.1625.977.93
StableDiff. (DIFT)----29.3528.8640.1832.078.86
StableDiff. (Ours)1--29.5126.0535.8129.866.95
StableDiff. (Ours)2--30.1631.2041.8733.5310.14
StableDiff. (Ours)2-37.1238.7250.3842.8816.18
StableDiff. (Ours)237.1939.2250.5843.1816.40
+ +Table 3. Analysis of 3DiffTraction Modules on Omni3D-ARKitScenes testing set. We first compare different backbones by freezing the backbone and only training the 3D detection head. Then, we perform ablative studies on each module of our architecture systematically. Starting with the baseline vanilla stable diffusion model, we incrementally incorporate improvements: Geometry-ControlNet (Geo-Ctr), the number of novel view synthesis training views (NVS Train Views), Semantic-ControlNet (Sem-Ctr), and the novel view synthesis ensemble (NV-Ensemble). + +![](images/417ef0d839acf2219053d702644a9ac19cae7a319ee9975164eccd5cc4b20fcb.jpg) +Condition Image + +![](images/81b9c178890e7d0553a954e3296adfc78c7ef4b709f53e323f0abd3689820c72.jpg) +Generated Image +Figure 5. Novel-view synthesis visualization on Omni3D-ARKitScenes testing set. Our model with Geometry-ControlNet synthesizes realistic novel views from a single input image. + +![](images/6abb8442114644d6cab971414207c15ea3edd1dff62051d6c35abec7b01ce5e6.jpg) +GT + +![](images/001f4ad4fe0b28a9e436ae73b5fc674027538205da10fa7997e49a38690c9fd1.jpg) +Condition Image + +![](images/261d30b2ce440669a5438b36dd9bd408060bc87a94fd82d846f51a8a12a5eede.jpg) +Generated Image + +![](images/9991777c9e65312f6265bae4da6b6e6d134a1c580f45b177dc374710e515af60.jpg) +GT + +metric consistency of the source view content, we visualize novel-view synthesis results. The results demonstrate that our proposed epipolar warp operator is effective in synthesizing the scene with accurate geometry and layout compared to the ground truth images. We note that scene-level NVS from a single image is a challenging task, and we observe that our model may introduce artifacts. While enhancing performance is an interesting future work, here we utilize NVS as an auxiliary task which is demonstrated to effectively enhance our model's 3D awareness. + +3DiffTraction modules. We analyze the unique modules and design choices in 3DiffTraction: the Stable Diffusion backbone, geometric and semantic ControlNets targeting NVs and detection, and the multi-view prediction ensemble. All results are reported using the Omni3D-ARKitscenes in Tab. 3. We first validate our choice of using a Stable Diffusion backbone. While diffusion features excel in 2D segmentation tasks [24, 56], they have not been tested in 3D detection. We analyze this choice independently from the other improvements by keeping the backbone frozen and only training the 3D detection head. The vanilla Stable Diffusion features achieve a $28.86\%$ AP3D, exceeding CubeRCNN-VIT-B (MAE pretrained) by $3.63\%$ and ResNet-50 DreamTeacher by $4.5\%$ in AP30. This performance is mirrored in AP2D results, affirming Stable Diffusion's suitability for perception tasks. Our geometric ControlNet, is aimed at instilling 3D awareness via NVS training. A performance boost of $2.34\%$ on AP3D and $0.81\%$ on AP2D indicates that the geometric ControlNet imparts 3D awareness knowledge while preserving its 2D knowl + +edge. To ensure our improvement is attributed to our view synthesis training, we limited the geometric ControlNet to single-view data by setting the source and target views to be identical (denoted by '1' in the NVs train view column of Tab. 3), which reduces the training to be denoising training [6]. This indicate a $2.81\%$ decrease in AP3D compared to the standard Stable Diffusion, affirming our hypothesis. Further, the semantic ControlNet, co-trained with the 3D detection head enhances both AP2D and AP3D by around $7\%$ confirming its efficacy in adapting the feature for optimal use by the detection head. Lastly, using NVS-ensemble results in additional $0.5\%$ increase in AP3D. + +# 5. Conclusion and Limitations + +3DiffTraction, utilizing a 3D-aware diffusion model, enables efficient 3D detection from single images, overcoming large-scale data annotation challenges. With its geometric and semantic tuning strategies, it surpasses previous benchmarks, showing high label efficiency and cross-domain adaptability. 3DiffTraction has limitations, including the need for image pairs with accurate camera poses and challenges in handling dynamic objects from in-the-wild videos. Additionally, its use of the Stable Diffusion architecture demands substantial memory and runtime, achieving about 7.5 fps on a 3090Ti GPU. Suitable for offline tasks, it requires further optimization for online detection. + +Acknowledgements. Or Litany is a Taub fellow and is supported by the Azrieli Foundation Early Career Faculty Fellowship. We thank Qianqian Wang, David Acuna, and Jonah Philion for the insightful discussion. + +# References + +[1] Tomer Amit, Tal Shaharbany, Eliya Nachmani, and Lior Wolf. Segdiff: Image segmentation with diffusion probabilistic models. arXiv preprint arXiv:2112.00390, 2021. 2 +[2] Dmitry Baranchuk, Andrey Voynov, Ivan Rubachev, Valentin Khrulkov, and Artem Babenko. Label-efficient semantic segmentation with diffusion models. In International Conference on Learning Representations, 2022. 2 +[3] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Tal Dimry, Yuri Feigin, Peter Fu, Thomas Gebauer, Brandon Joffe, Daniel Kurz, Arik Schwartz, and Elad Shulman. ARK-scenes - a diverse real-world dataset for 3d indoor scene understanding using mobile RGB-d data. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. 5 +[4] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3 +[5] Garrick Brazil, Abhinav Kumar, Julian Straub, Nikhila Ravi, Justin Johnson, and Georgia Gkioxari. Omni3d: A large benchmark and model for 3d object detection in the wild, 2023. 1, 2, 5, 6 +[6] Emmanuel Asiedu Brempong, Simon Kornblith, Ting Chen, Niki Parmar, Matthias Minderer, and Mohammad Norouzi. Denoising pretraining for semantic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4175-4186, 2022. 2, 8 +[7] Eric R. Chan, Koki Nagano, Matthew A. Chan, Alexander W. Bergman, Jeong Joon Park, Axel Levy, Miika Aittala, Shalini De Mello, Tero Karras, and Gordon Wetzstein. Generative novel view synthesis with 3d-aware diffusion models, 2023. 1 +[8] Hansheng Chen, Yuyao Huang, Wei Tian, Zhong Gao, and Lu Xiong. Monorun: Monocular 3d object detection by reconstruction and uncertainty propagation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10379-10388, 2021. 2 +[9] Shoufa Chen, Peize Sun, Yibing Song, and Ping Luo. Diffusional: Diffusion model for object detection. ICCV, 2023. 2 +[10] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International conference on machine learning, pages 1597-1607. PMLR, 2020. 1 +[11] Ting Chen, Lala Li, Saurabh Saxena, Geoffrey Hinton, and David J Fleet. A generalist framework for panoptic segmentation of images and videos. arXiv preprint arXiv:2210.06366, 2022. 2 +[12] Prafulla Dhariwal and Alexander Quinn Nichol. Diffusion models beat GANs on image synthesis. In Advances in Neural Information Processing Systems, 2021. 2 +[13] Yilun Du, Cameron Smith, Ayush Tewari, and Vincent Sitzmann. Learning to render novel views from wide-baseline stereo pairs, 2023. 4 + +[14] Alexandros Graikos, Nikolay Malkin, Nebojsa Jojic, and Dimitris Samaras. Diffusion models as plug-and-play priors. Advances in Neural Information Processing Systems, 35:14715-14728, 2022. 2 +[15] Jiatao Gu, Alex Trevithick, Kai-En Lin, Josh Susskind, Christian Theobalt, Lingjie Liu, and Ravi Ramamoorthi. Nerfdiff: Single-image view synthesis with nef-guided distillation from 3d-aware diffusion. In International Conference on Machine Learning, 2023. 3 +[16] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 1 +[17] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners, 2021. 1 +[18] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, 2020. 2 +[19] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. arXiv preprint arXiv:2106.15282, 2021. 2 +[20] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P. Kingma, Ben Poole, Mohammad Norouzi, David J. Fleet, and Tim Salimans. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 3 +[21] Siyuan Huang, Siyuan Qi, Yinxue Xiao, Yixin Zhu, Ying Nian Wu, and Song-Chun Zhu. Cooperative holistic scene understanding: Unifying 3d object, layout, and camera pose estimation. In Advances in Neural Information Processing Systems, pages 206-217, 2018. 2 +[22] Boah Kim, Yujin Oh, and Jong Chul Ye. Diffusion adversarial representation learning for self-supervised vessel segmentation. In The Eleventh International Conference on Learning Representations, 2023. 2 +[23] Nilesh Kulkarni, Ishan Misra, Shubham Tulsiani, and Abhinav Gupta. 3d-relnet: Joint object and relational network for 3d prediction. 2019. 2 +[24] Daiqing Li, Huan Ling, Amlan Kar, David Acuna, Seung Wook Kim, Karsten Kreis, Antonio Torralba, and Sanja Fidler. Dreamteacher: Pretraining image backbones with deep generative models, 2023. 1, 2, 6, 8 +[25] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object, 2023. 2 +[26] Yingfei Liu, Tiancai Wang, Xiangyu Zhang, and Jian Sun. Petr: Position embedding transformation for multi-view 3d object detection. In European Conference on Computer Vision, pages 531-548. Springer, 2022. 2 +[27] Luke Melas-Kyriazi, Christian Rupprecht, Iro Laina, and Andrea Vedaldi. Realfusion: 360 reconstruction of any object from a single image. In CVPR, 2023. 3 +[28] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and + +Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 2 +[29] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In International Conference on Machine Learning, 2021. 2 +[30] Alexander Quinn Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models. In Proceedings of the 39th International Conference on Machine Learning, pages 16784-16804. PMLR, 2022. 2 +[31] Yinyu Nie, Xiaoguang Han, Shihui Guo, Yujuan Zheng, Jian Chang, and Jian Jun Zhang. Total3dunderstanding: Joint layout, object pose and mesh reconstruction for indoor scenes from a single image. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[32] Jinhyung Park, Chenfeng Xu, Shijia Yang, Kurt Keutzer, Kris M. Kitani, Masayoshi Tomizuka, and Wei Zhan. Time will tell: New outlooks and a baseline for temporal multiview 3d object detection. In The Eleventh International Conference on Learning Representations, 2023. 2 +[33] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 2 +[34] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 2 +[35] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2015. 6 +[36] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2, 3, 5 +[37] Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Imvoxelnet: Image to voxels projection for monocular and multi-view general-purpose 3d object detection. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2397-2406, 2022. 2, 6 +[38] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S. Sara Mahdavi, Rapha Gontijo Lopes, Tim Salimans, Jonathan Ho, David J Fleet, and Mohammad Norouzi. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022. 2 +[39] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 2 + +[40] Saurabh Saxena, Abhishek Kar, Mohammad Norouzi, and David J. Fleet. Monocular depth estimation using diffusion models, 2023. 2 +[41] Yichun Shi, Peng Wang, Jianglong Ye, Long Mai, Kejie Li, and Xiao Yang. Mvdream: Multi-view diffusion for 3d generation. arXiv:2308.16512, 2023. 3 +[42] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv:2209.14792, 2022.3 +[43] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, 2015. 2 +[44] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In International Conference on Learning Representations, 2021. 2 +[45] Weimin Tan, Siyuan Chen, and Bo Yan. Diffss: Diffusion model for few-shot semantic segmentation. arXiv preprint arXiv:2307.00773, 2023. 2 +[46] Luming Tang, Menglin Jia, Qianqian Wang, Cheng Perng Phoo, and Bharath Hariharan. Emergent correspondence from image diffusion, 2023. 1, 2, 3, 6 +[47] Shubham Tulsiani, Saurabh Gupta, David Fouhey, Alexei A. Efros, and Jitendra Malik. Factoring shape, pose, and layout from the 2d image of a 3d scene. In Computer Vision and Pattern Recognition (CVPR), 2018. 2 +[48] Narek Tumanyan, Michal Geyer, Shai Bagon, and Tali Dekel. Plug-and-play diffusion features for text-driven image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1921-1930, 2023. 5 +[49] Narek Tumanyan, Michal Geyer, Shai Bagon, and Tali Dekel. Plug-and-play diffusion features for text-driven image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1921-1930, 2023. 2 +[50] Tai Wang, ZHU Xinge, Jiangmiao Pang, and Dahua Lin. Probabilistic and geometric depth: Detecting objects in perspective. In Conference on Robot Learning, pages 1475-1485. PMLR, 2022. 2 +[51] Yue Wang, Vitor Campagnolo Guizilini, Tianyuan Zhang, Yilun Wang, Hang Zhao, and Justin Solomon. Detr3d: 3d object detection from multi-view images via 3d-to-2d queries. In Conference on Robot Learning, pages 180–191. PMLR, 2022. 2 +[52] Daniel Watson, William Chan, Ricardo Martin-Brualla, Jonathan Ho, Andrea Tagliasacchi, and Mohammad Norouzi. Novel view synthesis with diffusion models. arXiv preprint arXiv:2210.04628, 2022. 2 +[53] Julia Wolleb, Robin Sandkuhler, Florentin Bieder, Philippe Valmaggia, and Philippe C Cattin. Diffusion models for implicit image segmentation ensembles. In International Conference on Medical Imaging with Deep Learning, pages 1336-1348. PMLR, 2022. 2 + +[54] Chenfeng Xu, Bichen Wu, Ji Hou, Sam Tsai, Ruilong Li, Jialiang Wang, Wei Zhan, Zijian He, Peter Vajda, Kurt Keutzer, and Masayoshi Tomizuka. Nerf-det: Learning geometry-aware volumetric representation for multi-view 3d object detection, 2023. 1, 2, 6 +[55] Dejia Xu, Yifan Jiang, Peihao Wang, Zhiwen Fan, Yi Wang, and Zhangyang Wang. Neurallift-360: Lifting an in-the-wild 2d photo to a 3d object with $360^{\circ}$ views. 2022. 3 +[56] Jiarui Xu, Sifei Liu, Arash Vahdat, Wonmin Byeon, Xiaolong Wang, and Shalini De Mello. Open-vocabulary panoptic segmentation with text-to-image diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2955-2966, 2023. 1, 2, 3, 8 +[57] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models, 2023. 2, 3 +[58] Zhizhuo Zhou and Shubham Tulsiani. Sparsefusion: Distilling view-conditioned diffusion for 3d reconstruction. In CVPR, 2023. 1, 3, 4 \ No newline at end of file diff --git a/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/images.zip b/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..67f4ac914c4df62897433377e9f27426a1ffd3b7 --- /dev/null +++ b/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e03334a57d130257a9d9da822f04366937d1a90f81c224a9c869741ad206245 +size 587028 diff --git a/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/layout.json b/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..bb4e67c5c3318215312b2c21d0590b864c2a2cd4 --- /dev/null +++ b/2024/3DiffTection_ 3D Object Detection with Geometry-Aware Diffusion Features/layout.json @@ -0,0 +1,8971 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 62, + 103, + 531, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 103, + 531, + 120 + ], + "spans": [ + { + "bbox": [ + 62, + 103, + 531, + 120 + ], + "type": "text", + "content": "3DiffTecn: 3D Object Detection with Geometry-Aware Diffusion Features" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 142, + 211, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 142, + 211, + 157 + ], + "spans": [ + { + "bbox": [ + 133, + 142, + 211, + 157 + ], + "type": "text", + "content": "Chenfeng " + }, + { + "bbox": [ + 133, + 142, + 211, + 157 + ], + "type": "inline_equation", + "content": "\\mathrm{Xu}^{1,2}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 89, + 157, + 139, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 157, + 139, + 170 + ], + "spans": [ + { + "bbox": [ + 89, + 157, + 139, + 170 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 89, + 157, + 139, + 170 + ], + "type": "text", + "content": "NVIDIA" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 151, + 157, + 220, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 157, + 220, + 171 + ], + "spans": [ + { + "bbox": [ + 151, + 157, + 220, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 151, + 157, + 220, + 171 + ], + "type": "text", + "content": "UC Berkeley" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 223, + 142, + 296, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 142, + 296, + 157 + ], + "spans": [ + { + "bbox": [ + 223, + 142, + 296, + 157 + ], + "type": "text", + "content": "Huan Ling" + }, + { + "bbox": [ + 223, + 142, + 296, + 157 + ], + "type": "inline_equation", + "content": "^{1,3,4}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 233, + 157, + 313, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 157, + 313, + 171 + ], + "spans": [ + { + "bbox": [ + 233, + 157, + 313, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 233, + 157, + 313, + 171 + ], + "type": "text", + "content": "Vector Institute" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 309, + 142, + 386, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 142, + 386, + 157 + ], + "spans": [ + { + "bbox": [ + 309, + 142, + 386, + 157 + ], + "type": "text", + "content": "Sanja Fidler" + }, + { + "bbox": [ + 309, + 142, + 386, + 157 + ], + "type": "inline_equation", + "content": "^{1,3,4}" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 399, + 143, + 459, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 143, + 459, + 157 + ], + "spans": [ + { + "bbox": [ + 399, + 143, + 459, + 157 + ], + "type": "text", + "content": "Or Litany1,5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 326, + 157, + 436, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 157, + 436, + 171 + ], + "spans": [ + { + "bbox": [ + 326, + 157, + 436, + 171 + ], + "type": "text", + "content": "4University of Toronto" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 450, + 157, + 501, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 450, + 157, + 501, + 171 + ], + "spans": [ + { + "bbox": [ + 450, + 157, + 501, + 171 + ], + "type": "text", + "content": "5Technion" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 55, + 193, + 173, + 347 + ], + "blocks": [ + { + "bbox": [ + 55, + 193, + 173, + 347 + ], + "lines": [ + { + "bbox": [ + 55, + 193, + 173, + 347 + ], + "spans": [ + { + "bbox": [ + 55, + 193, + 173, + 347 + ], + "type": "image", + "image_path": "3b8baaa198a92612716eb6b6b01593a412c4da7837e38b5b97e855001e7ae770.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 357, + 546, + 401 + ], + "lines": [ + { + "bbox": [ + 45, + 357, + 546, + 401 + ], + "spans": [ + { + "bbox": [ + 45, + 357, + 546, + 401 + ], + "type": "text", + "content": "Figure 1. (1) We enhance pre-trained diffusion features with 3D awareness by training a geometric ControlNet (Sec. 3.2). (2) We employ a semantic ControlNet (Sec. 3.3) to refine generative features for targeted data and downstream tasks, specifically focusing on enhancing features for 3D object detection. (3) During the inference process, we further enhance 3D detection accuracy by assembling the bounding box predictions from virtual views (Sec. 3.4)." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 173, + 193, + 342, + 347 + ], + "blocks": [ + { + "bbox": [ + 173, + 193, + 342, + 347 + ], + "lines": [ + { + "bbox": [ + 173, + 193, + 342, + 347 + ], + "spans": [ + { + "bbox": [ + 173, + 193, + 342, + 347 + ], + "type": "image", + "image_path": "8cf0024d69b12af854c82a7d5572215d681f5e6617d0afb848bc2e9afca39e76.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 342, + 193, + 421, + 347 + ], + "blocks": [ + { + "bbox": [ + 342, + 193, + 421, + 347 + ], + "lines": [ + { + "bbox": [ + 342, + 193, + 421, + 347 + ], + "spans": [ + { + "bbox": [ + 342, + 193, + 421, + 347 + ], + "type": "image", + "image_path": "29d0352b60e36b7e83fb0c699e08c9a37f45a4c6a3d5db07c4cb87a67a224880.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 421, + 193, + 539, + 346 + ], + "blocks": [ + { + "bbox": [ + 421, + 193, + 539, + 346 + ], + "lines": [ + { + "bbox": [ + 421, + 193, + 539, + 346 + ], + "spans": [ + { + "bbox": [ + 421, + 193, + 539, + 346 + ], + "type": "image", + "image_path": "dd73e635593482f0cf55158f68afd9f3f7fabb6558f3a47f1583060f82e20e9f.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 143, + 411, + 192, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 411, + 192, + 423 + ], + "spans": [ + { + "bbox": [ + 143, + 411, + 192, + 423 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 46, + 425, + 290, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 425, + 290, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 425, + 290, + 700 + ], + "type": "text", + "content": "3DiffTecnion introduces a novel method for 3D object detection from single images, utilizing a 3D-aware diffusion model for feature extraction. Addressing the resource-intensive nature of annotating large-scale 3D image data, our approach leverages pretrained diffusion models, traditionally used for 2D tasks, and adapts them for 3D detection through geometric and semantic tuning. Geometrically, we enhance the model to perform view synthesis from single images, incorporating an epipolar warp operator. This process utilizes easily accessible posed image data, eliminating the need for manual annotation. Semantically, the model is further refined on target detection data. Both stages utilize ControlNet, ensuring the preservation of original feature capabilities. Through our methodology, we obtain 3D-aware features that excel in identifying cross-view point correspondences. In 3D detection, 3DiffTecnion substantially surpasses previous benchmarks, e.g., Cube-RCNN, by " + }, + { + "bbox": [ + 46, + 425, + 290, + 700 + ], + "type": "inline_equation", + "content": "9.43\\%" + }, + { + "bbox": [ + 46, + 425, + 290, + 700 + ], + "type": "text", + "content": " in AP3D on the Omni3D-ARkitscene dataset. Furthermore, 3DiffTecnion demonstrates robust label efficiency and generalizes well to cross-domain data, nearly matching fully-supervised models in zero-shot scenarios. Project page: https://research.nvidia.com/labs/toronto-ai/3difftection/." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 411, + 386, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 411, + 386, + 423 + ], + "spans": [ + { + "bbox": [ + 307, + 411, + 386, + 423 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 434, + 545, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 434, + 545, + 506 + ], + "spans": [ + { + "bbox": [ + 304, + 434, + 545, + 506 + ], + "type": "text", + "content": "Detecting objects in 3D from a single image presents a significant challenge in computer vision, involving not only object recognition and localization but also depth and orientation prediction. This task, crucial for applications in robotics and augmented reality, demands advanced 3D reasoning from computational models." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 510, + 546, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 546, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 546, + 715 + ], + "type": "text", + "content": "Training a 3D detector from scratch is resource-intensive due to the high labeling costs [5]. Recently, large self-supervised models have emerged as compelling learners for image representation [10, 16, 17]. They acquire robust semantic features that can be fine-tuned on smaller, annotated datasets. Image diffusion models, trained on internet-scale data, have proven to be particularly effective in this context [24, 46, 56]. However, these models often lack 3D awareness and exhibit a domain gap in 3D applications. Recent work has aimed to bridge this gap by lifting 2D image features to 3D and refining them for specific 3D tasks. NeRF-Det [54] trained a view synthesis model alongside a detection head using pretrained image feature extractors. However, this approach is constrained by the need for dense scene views and fully annotated data. Efforts in novel view synthesis using diffusion models have shown promise [7, 58]. Yet, these models are generally" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10617" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "type": "text", + "content": "trained from scratch, thereby foregoing the advantages of using pretrained semantic features." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 96, + 287, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 96, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 47, + 96, + 287, + 370 + ], + "type": "text", + "content": "To overcome these limitations, our work, 3DiffTecnion, introduces a novel framework that repurposes pretrained 2D diffusion models for 3D object detection (see overview Fig. 1). We enhance these models with 3D awareness through a view synthesis task, employing epipolar geometry to warp features from source images to target views. This process utilizes ControlNet [57] to maintain the integrity of the original features (See Fig. 3). Utilizing image pairs from videos, which are abundant and do not require manual annotation, our approach is scalable and efficient. To demonstrate that our approach successfully imparts 3D awareness to the model, we assess the performance of its features in establishing point correspondences across multiple views. Our results indicate that these features outperform those of the base model, both qualitatively and quantitatively. For 3D detection, 3DiffTecnion trains a standard detection head with 3D box supervision, incorporating a second ControlNet to adapt the features to specific detection tasks and domains, preserving feature quality and view synthesis capabilities. At test time, we capitalize on both geometric and semantic capabilities by generating detection proposals from multiple virtual synthesized views, which are then consolidated through Non-Maximum Suppression (NMS)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 371, + 287, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 371, + 287, + 478 + ], + "spans": [ + { + "bbox": [ + 47, + 371, + 287, + 478 + ], + "type": "text", + "content": "Our primary contributions are as follows: (1) We introduce a scalable technique for enhancing pretrained 2D diffusion models with 3D awareness through a novel geometric ControlNet, enhanced with an epipolar warp operator; (2) We adapt these features to a 3D detection task and target domain by introducing a second, semantic ControlNet; and (3) We integrate both view synthesis and 3D detection capabilities to further improve detection performance through ensemble prediction." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 479, + 287, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 479, + 287, + 598 + ], + "spans": [ + { + "bbox": [ + 47, + 479, + 287, + 598 + ], + "type": "text", + "content": "3DiffTraction emerges as a powerful 3D detector, substantially surpassing previous benchmarks, e.g., CubERCNN, by " + }, + { + "bbox": [ + 47, + 479, + 287, + 598 + ], + "type": "inline_equation", + "content": "9.43\\%" + }, + { + "bbox": [ + 47, + 479, + 287, + 598 + ], + "type": "text", + "content": " in AP3D on the Omni3D-ARkitscene dataset. Furthermore, 3DiffTraction demonstrates robust label efficiency, achieving a 2.28 AP3D-N improvement over previous methods trained with full supervision while using only " + }, + { + "bbox": [ + 47, + 479, + 287, + 598 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 47, + 479, + 287, + 598 + ], + "type": "text", + "content": " of the labels. 3DiffTraction also exhibits the ability to generalize to cross-domain data, nearly matching the performance of previously established fully-supervised models without any tuning (zero-shot)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 609, + 135, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 609, + 135, + 621 + ], + "spans": [ + { + "bbox": [ + 47, + 609, + 135, + 621 + ], + "type": "text", + "content": "2. Related works" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 629, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 629, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 629, + 286, + 713 + ], + "type": "text", + "content": "3D Object Detection from Images. 3D object detection from posed images is widely explored [26, 32, 37, 51, 54]. However, assuming given camera extrinsic is not a common scenario, especially in applications such as AR/VR and mobile devices. The task of 3D detection from single images, relying solely on camera intrinsics, presents a more generalized yet significantly more challenging problem. The" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "type": "text", + "content": "model is required to inherently learn 3D structures and harness semantic knowledge. While representative methods [8, 21, 23, 31, 47, 50] endeavor to enforce 3D detectors to learn 3D cues from diverse geometric constraints, the dearth of semantics stemming from the limited availability of 3D datasets still impede the generalizability of 3D detectors. Brazil et al. [5], in an effort to address this issue, embarked on enhancing the dataset landscape by introducing Omni3D dataset. Rather than focusing on advancing generalizable 3D detection by increasing annotated 3D data, we propose a new paradigm, of enhancing semantic-aware diffusion features with 3D awareness." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 218, + 545, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 218, + 545, + 602 + ], + "spans": [ + { + "bbox": [ + 304, + 218, + 545, + 602 + ], + "type": "text", + "content": "Diffusion Models for 2D Perception. Trained diffusion models [30, 34, 36, 39] have been shown to have internal representations suitable for dense perception tasks, particularly in the realm of image segmentation [6, 14, 45, 56]. These models demonstrate impressive label efficiency [2]. Similarly, we observe strong base performance in both 2D and 3D detection (see Tab. 3); our method also benefits from high label efficiency. Diffusion models have further been trained to perform 2D segmentation tasks [11, 22, 53]. In [1] the model is trained to output a segmentation map using an auxiliary network that outputs residual features. Similarly, we use a ControlNet to refine the diffusion model features to endow them with 3D awareness. We note that several works utilize multiple generations to achieve a more robust prediction [1], we go a step further by using our controllable view generation to ensemble predictions from multiple views. Few works have studied tasks other than segmentation. DreamTeacher [24] proposed to distil the diffusion features to an image backbone and demonstrated excellent performance when tuned on perception tasks[24]. [40] trained a diffusion model for dense depth prediction from a single image. Recently, DiffusionDet [9] proposed an interesting method for using diffusion models for 2D detection by directly denoising the bounding boxes conditioned on the target image. Diffusion features have been analyzed in [49] showing that different UNet layer activations are correlated with different level of image details. We utilize this property when choosing which UNet layer outputs to warp in our geometric conditioning. Remarkably, [46] have shown strong point correspondence ability with good robustness to view change. Here we demonstrate that our 3D-aware features can further boost this robustness." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "type": "text", + "content": "Novel View Synthesis with Diffusion Models Image synthesis has undergone a significant transformation with the advent of 2D diffusion models, as demonstrated by notable works [12, 18, 19, 28, 29, 33, 36, 38, 43, 44]. These models have extended their capabilities to the Novel View Synthesis (NVS) task, where 3DiM [52] and Zero-123 [25] model NVS of objects as a viewpoint-conditioned image-to-image translation task with diffusion models. The models are trained on a synthetic dataset with camera anno" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10618" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 142, + 148 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 142, + 148 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 142, + 148 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 142, + 148 + ], + "type": "image", + "image_path": "203a697340b1db1b2b4e7c605608a27e5d624f072d90d92bfd9da3671df3a767.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 49, + 150, + 142, + 228 + ], + "blocks": [ + { + "bbox": [ + 49, + 150, + 142, + 228 + ], + "lines": [ + { + "bbox": [ + 49, + 150, + 142, + 228 + ], + "spans": [ + { + "bbox": [ + 49, + 150, + 142, + 228 + ], + "type": "image", + "image_path": "2343cf8df149d8c9053de2934a7c307f260e1249d1fe1005e34627f6f10cefcc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 59, + 229, + 121, + 243 + ], + "lines": [ + { + "bbox": [ + 59, + 229, + 121, + 243 + ], + "spans": [ + { + "bbox": [ + 59, + 229, + 121, + 243 + ], + "type": "text", + "content": "Reference Image Reference Image" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 153, + 70, + 246, + 148 + ], + "blocks": [ + { + "bbox": [ + 153, + 70, + 246, + 148 + ], + "lines": [ + { + "bbox": [ + 153, + 70, + 246, + 148 + ], + "spans": [ + { + "bbox": [ + 153, + 70, + 246, + 148 + ], + "type": "image", + "image_path": "96db09b4bb962605c5b6c6e52e540dac82feb39a144d29bc430f4e58ee316a92.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 153, + 150, + 245, + 228 + ], + "blocks": [ + { + "bbox": [ + 153, + 150, + 245, + 228 + ], + "lines": [ + { + "bbox": [ + 153, + 150, + 245, + 228 + ], + "spans": [ + { + "bbox": [ + 153, + 150, + 245, + 228 + ], + "type": "image", + "image_path": "9e7cb19f6ac30e1bd71215a9bd27a43aab56a995fccb05c3e09f38b18c5151a9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 181, + 229, + 202, + 243 + ], + "lines": [ + { + "bbox": [ + 181, + 229, + 202, + 243 + ], + "spans": [ + { + "bbox": [ + 181, + 229, + 202, + 243 + ], + "type": "text", + "content": "DIFT DIFT" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 246, + 70, + 347, + 148 + ], + "blocks": [ + { + "bbox": [ + 246, + 70, + 347, + 148 + ], + "lines": [ + { + "bbox": [ + 246, + 70, + 347, + 148 + ], + "spans": [ + { + "bbox": [ + 246, + 70, + 347, + 148 + ], + "type": "image", + "image_path": "92847051b8525fb05a836ff818a925a7dd7df88d7a82e34cbf18439cbc82f8e8.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 246, + 150, + 347, + 228 + ], + "blocks": [ + { + "bbox": [ + 246, + 150, + 347, + 228 + ], + "lines": [ + { + "bbox": [ + 246, + 150, + 347, + 228 + ], + "spans": [ + { + "bbox": [ + 246, + 150, + 347, + 228 + ], + "type": "image", + "image_path": "6788f6c27568e89bceda9e7d76215012c964e942233bd2a4c50026138ba365df.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 266, + 229, + 314, + 243 + ], + "lines": [ + { + "bbox": [ + 266, + 229, + 314, + 243 + ], + "spans": [ + { + "bbox": [ + 266, + 229, + 314, + 243 + ], + "type": "text", + "content": "3DiffTection 3DiffTection" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 245, + 547, + 291 + ], + "lines": [ + { + "bbox": [ + 46, + 245, + 547, + 291 + ], + "spans": [ + { + "bbox": [ + 46, + 245, + 547, + 291 + ], + "type": "text", + "content": "Figure 2. Visualization of semantic correspondence prediction using different features Given a Red Source Point in the left most reference image, we predict the corresponding points in the images from different camera views on the right (Blue Dot). The ground truth points are marked by Red Stars. Our method, 3DiffTecnion, is able to identify precise correspondences in challenging scenes with repetitive visual patterns." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 350, + 70, + 443, + 148 + ], + "blocks": [ + { + "bbox": [ + 350, + 70, + 443, + 148 + ], + "lines": [ + { + "bbox": [ + 350, + 70, + 443, + 148 + ], + "spans": [ + { + "bbox": [ + 350, + 70, + 443, + 148 + ], + "type": "image", + "image_path": "e136d136315cb67d4ccfb9b004c0a59ec59d4aa8d5c41648d529586361a26dbf.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 350, + 149, + 442, + 228 + ], + "blocks": [ + { + "bbox": [ + 350, + 149, + 442, + 228 + ], + "lines": [ + { + "bbox": [ + 350, + 149, + 442, + 228 + ], + "spans": [ + { + "bbox": [ + 350, + 149, + 442, + 228 + ], + "type": "image", + "image_path": "eb8a0a60bea6219bc3fd531c371317c9e2993ad04d5b3bdd5256b3f300f8e730.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 387, + 229, + 408, + 242 + ], + "lines": [ + { + "bbox": [ + 387, + 229, + 408, + 242 + ], + "spans": [ + { + "bbox": [ + 387, + 229, + 408, + 242 + ], + "type": "text", + "content": "DIFT DIFT" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 443, + 71, + 544, + 148 + ], + "blocks": [ + { + "bbox": [ + 443, + 71, + 544, + 148 + ], + "lines": [ + { + "bbox": [ + 443, + 71, + 544, + 148 + ], + "spans": [ + { + "bbox": [ + 443, + 71, + 544, + 148 + ], + "type": "image", + "image_path": "587fe0ee74bbadc427d879de26426e7b080f1eca6a37ff13ef9b3cf35b0082a0.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 443, + 149, + 544, + 228 + ], + "blocks": [ + { + "bbox": [ + 443, + 149, + 544, + 228 + ], + "lines": [ + { + "bbox": [ + 443, + 149, + 544, + 228 + ], + "spans": [ + { + "bbox": [ + 443, + 149, + 544, + 228 + ], + "type": "image", + "image_path": "5693250229cf0bab211bb3f2d459157050970986ddac7c8685e12e088b78700b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 475, + 228, + 523, + 242 + ], + "lines": [ + { + "bbox": [ + 475, + 228, + 523, + 242 + ], + "spans": [ + { + "bbox": [ + 475, + 228, + 523, + 242 + ], + "type": "text", + "content": "3DiffTcction 3DiffTcction" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 300, + 289, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 300, + 289, + 491 + ], + "spans": [ + { + "bbox": [ + 46, + 300, + 289, + 491 + ], + "type": "text", + "content": "tation and demonstrate zero-shot generalization to in-the-wild images. NerfDiff [15] distills the knowledge of a 3D-aware conditional diffusion model into a Nerf. RealFusion [27] uses a diffusion model as a conditional prior with designed prompts. NeuralLift [55] uses language-guided priors to guide the novel view synthesis diffusion model. Most recently, inspired by the idea of video diffusion models [4, 20, 42], MVDream [41] adapts the attention layers to model the cross-view 3D dependency. The most relevant work to our approaches is SparseFusion [58], where authors propose to incorporate geometry priors with epipolar geometries. However, while their model is trained from scratch, in our approach, we use NVS merely as an auxiliary task to enhance the pre-trained diffusion features with 3D awareness and design the architecture for tuning a minimal number of parameters by leveraging a ControlNet." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 502, + 126, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 502, + 126, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 502, + 126, + 514 + ], + "type": "text", + "content": "3. 3DiffTection" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 46, + 523, + 287, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 523, + 287, + 643 + ], + "spans": [ + { + "bbox": [ + 46, + 523, + 287, + 643 + ], + "type": "text", + "content": "We introduce 3DiffTlection, designed to harness diffusion model features for 3D detection. As depicted in Fig. 1, 3DiffTlection comprises three core components: 1) Instilling 3D awareness into the diffusion features by training a geometric ControlNet for view synthesis. 2) Bridging the domain and task gaps using a semantic ControlNet, which is concurrently trained with a 3D detection head on the target data distribution. 3) Amplifying 3D box predictions through a virtual view ensembling strategy. We further detail each of these steps in the subsequent sections." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 46, + 652, + 253, + 664 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 652, + 253, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 652, + 253, + 664 + ], + "type": "text", + "content": "3.1. Diffusion Model as a Feature Extractor" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 46, + 666, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 666, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 666, + 287, + 714 + ], + "type": "text", + "content": "Recent works demonstrate that features extracted from text-to-image diffusion models, such as Stable Diffusion [36], capture rich semantics suitable for dense perception tasks, including image segmentation [56] and point correspond" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 299, + 545, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 299, + 545, + 407 + ], + "spans": [ + { + "bbox": [ + 304, + 299, + 545, + 407 + ], + "type": "text", + "content": "dences [46]. In this work, our interest lies in 3D object detection. However, since Stable Diffusion is trained on 2D image-text pairs—a pre-training paradigm proficient in aligning textual semantics with 2D visual features—it might lack 3D awareness. We aim to explore this by examining point correspondences between views. We hypothesize that features with 3D awareness should demonstrate the capability to identify correspondences that point to the same 3D locations when provided with multi-view images." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 407, + 546, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 407, + 546, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 407, + 546, + 479 + ], + "type": "text", + "content": "Following [46, 56] we employ a single forward step for feature extraction. However, unlike these works, we only input images without textual captions, given that in real-world scenarios, textual input is typically not provided for object detection. Formally, given an image " + }, + { + "bbox": [ + 304, + 407, + 546, + 479 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 407, + 546, + 479 + ], + "type": "text", + "content": ", we sample a noise image " + }, + { + "bbox": [ + 304, + 407, + 546, + 479 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 304, + 407, + 546, + 479 + ], + "type": "text", + "content": " at time " + }, + { + "bbox": [ + 304, + 407, + 546, + 479 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 407, + 546, + 479 + ], + "type": "text", + "content": ", and obtain the diffusion features" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 484, + 545, + 509 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 484, + 545, + 509 + ], + "spans": [ + { + "bbox": [ + 312, + 484, + 545, + 509 + ], + "type": "interline_equation", + "content": "\\mathbf {f} = \\mathcal {F} \\left(\\mathbf {x} _ {t}; \\Theta\\right), \\mathbf {x} _ {t} = \\sqrt {\\bar {\\alpha} _ {t}} \\mathbf {x} + \\sqrt {1 - \\bar {\\alpha} _ {t}} \\epsilon_ {t}, \\epsilon_ {t} \\sim \\mathbb {N} (0, 1), \\tag {1}", + "image_path": "b95cafebd92825ef69a079261152103c89a4565bdebccfc308c98120376728dd.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "type": "text", + "content": " represents the multi-scale features from the decoder module of UNet " + }, + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "type": "text", + "content": " (parameterized by " + }, + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "type": "text", + "content": "), and " + }, + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "type": "inline_equation", + "content": "\\alpha_{t}" + }, + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "type": "text", + "content": " represents a pre-defined noise schedule, satisfying " + }, + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "type": "inline_equation", + "content": "\\bar{\\alpha}_{t} = \\prod_{k=1}^{t} \\alpha_{k}" + }, + { + "bbox": [ + 304, + 510, + 545, + 546 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 546, + 545, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 546, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 546, + 545, + 616 + ], + "type": "text", + "content": "Interestingly, as illustrated in Fig. 2, the point localization of Stable Diffusion features depends on 2D appearance matching. This can lead to confusion in the presence of repeated visual patterns, indicating a deficiency in 3D spatial understanding. Given this observation, we aim to integrate 3D awareness into the diffusion features." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 306, + 623, + 539, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 623, + 539, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 623, + 539, + 635 + ], + "type": "text", + "content": "3.2. Injecting 3D Awareness to Diffusion Features" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "content": "ControlNet [57] is a powerful tool that allows the addition of conditioning into a pre-trained, static Stable Diffusion (SD) model. It has been demonstrated to support various types of dense input conditioning, such as depth and semantic images. This is achieved through the injection of conditional image features into trainable copies of the" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10619" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 85, + 71, + 162, + 264 + ], + "blocks": [ + { + "bbox": [ + 85, + 71, + 162, + 264 + ], + "lines": [ + { + "bbox": [ + 85, + 71, + 162, + 264 + ], + "spans": [ + { + "bbox": [ + 85, + 71, + 162, + 264 + ], + "type": "image", + "image_path": "2461a98e4bd6698926a2feeb97eae2681dd765aed52ba97cef75be0dfd19cafd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 107, + 274, + 147, + 285 + ], + "lines": [ + { + "bbox": [ + 107, + 274, + 147, + 285 + ], + "spans": [ + { + "bbox": [ + 107, + 274, + 147, + 285 + ], + "type": "text", + "content": "(a) Before" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 175, + 71, + 511, + 274 + ], + "blocks": [ + { + "bbox": [ + 175, + 71, + 511, + 274 + ], + "lines": [ + { + "bbox": [ + 175, + 71, + 511, + 274 + ], + "spans": [ + { + "bbox": [ + 175, + 71, + 511, + 274 + ], + "type": "image", + "image_path": "3c64bf0bfe55e5513cba759d69e2b2df5bb1c61abe1c205144368f59732522de.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 274, + 375, + 285 + ], + "lines": [ + { + "bbox": [ + 342, + 274, + 375, + 285 + ], + "spans": [ + { + "bbox": [ + 342, + 274, + 375, + 285 + ], + "type": "text", + "content": "(b) After" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 289, + 547, + 334 + ], + "lines": [ + { + "bbox": [ + 46, + 289, + 547, + 334 + ], + "spans": [ + { + "bbox": [ + 46, + 289, + 547, + 334 + ], + "type": "text", + "content": "Figure 3. Architecture of Geometric ControlNet. Left: Original Stable Diffusion UNet encoder block. Right: We train novel view image synthesis by adding a geometric ControlNet to the original Stable Diffusion encoder blocks. The geometric ControlNet receives the conditional view image as an additional input. Using the camera pose, we introduce an epipolar warp operator, which warps intermediate features into the target view. With the geometric ControlNet, we significantly improve the 3D awareness of pre-trained diffusion features." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 342, + 287, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 287, + 415 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 287, + 415 + ], + "type": "text", + "content": "original SD blocks. A significant attribute of ControlNet is its ability to resist overfitting to the dataset used for tuning while preserving the original model's performance. As a result, ControlNet is well-suited for enhancing diffusion features with 3D awareness without compromising their 2D semantic quality." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "spans": [ + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": "Formally, we denote one block of UNet " + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_s(\\cdot;\\Theta_s)" + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": " parameterized by " + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "inline_equation", + "content": "\\Theta_s" + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": ". In particular, the original ControlNet block copies each pre-trained Stable Diffusion module " + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_s(\\cdot;\\Theta_s)" + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": " denoted as " + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_s'(\\cdot;\\Theta_s')" + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": ", and accompanying with two zero convolutions " + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}_{s1}" + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}_{s2}" + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": ", parameterized by " + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "inline_equation", + "content": "\\Theta_{zs1}" + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "inline_equation", + "content": "\\Theta_{zs2}" + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": ", respectively. We slightly abuse the notation of " + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in \\mathcal{R}^{H \\times W \\times C}" + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": " as the arbitrary middle features of " + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 46, + 415, + 290, + 523 + ], + "type": "text", + "content": ". Then a ControlNet block with the corresponding frozen Stable Diffusion block is given by" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 530, + 287, + 554 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 530, + 287, + 554 + ], + "spans": [ + { + "bbox": [ + 47, + 530, + 287, + 554 + ], + "type": "interline_equation", + "content": "\\mathbf {y} _ {s} = \\mathcal {F} _ {s} (\\mathbf {x}; \\Theta_ {s}) + \\mathcal {Z} _ {s 2} \\left(\\mathcal {F} _ {s} ^ {\\prime} (\\mathbf {x} + \\mathcal {Z} _ {s 1} (\\mathbf {c}; \\Theta_ {z s 1}); \\Theta_ {s} ^ {\\prime}); \\Theta_ {z s 2}\\right), \\tag {2}", + "image_path": "7fe4cd87321e8f223818ee35fc1623e047326323b4569234f4da827c86f1eecc.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 554, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 554, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 554, + 287, + 581 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 554, + 287, + 581 + ], + "type": "inline_equation", + "content": "\\mathbf{c} \\in \\mathcal{R}^{H \\times W \\times C}" + }, + { + "bbox": [ + 46, + 554, + 287, + 581 + ], + "type": "text", + "content": " is the condition image feature and " + }, + { + "bbox": [ + 46, + 554, + 287, + 581 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_s \\in \\mathcal{R}^{H \\times W \\times C}" + }, + { + "bbox": [ + 46, + 554, + 287, + 581 + ], + "type": "text", + "content": " is the output." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": "Epipolar warp operator. We utilize ControlNet to enhance the 3D awareness of diffusion features by training it to perform view synthesis. Specifically, we select pairs of images with known relative camera poses and train the ControlNet conditioned on the source view to produce the output view. Since the features induced by the condition in ControlNet are additive, it is a common practice to ensure alignment between these features and the noisy input features. However, the input for our view synthesis task is, by definition, not aligned with the noisy input of the target" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 343, + 547, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 343, + 547, + 392 + ], + "spans": [ + { + "bbox": [ + 304, + 343, + 547, + 392 + ], + "type": "text", + "content": "view. As a solution, we propose to warp the source view features to align with the target using epipolar geometry. We denote the epipolar warp operator as " + }, + { + "bbox": [ + 304, + 343, + 547, + 392 + ], + "type": "inline_equation", + "content": "\\mathcal{G}(\\cdot ,T_n)" + }, + { + "bbox": [ + 304, + 343, + 547, + 392 + ], + "type": "text", + "content": ", and our geometric ControlNet is formulated as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 399, + 564, + 424 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 399, + 564, + 424 + ], + "spans": [ + { + "bbox": [ + 304, + 399, + 564, + 424 + ], + "type": "interline_equation", + "content": "\\mathbf {y} _ {s} = \\mathcal {F} _ {s} (\\mathbf {x}; \\Theta_ {s}) + \\mathcal {Z} _ {s 2} \\left(\\mathcal {G} \\left(\\mathcal {F} _ {s} ^ {\\prime} (\\mathbf {x} + \\mathcal {Z} _ {s 1} (\\mathbf {c}; \\Theta_ {z s 1}); \\Theta_ {s} ^ {\\prime}), T _ {n}\\right); \\Theta_ {z s 2}\\right), \\tag {3}", + "image_path": "97734c98d2bcddc28516e1e3596a85301a637bd588bc452f04d28c84d995ff3a.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 425, + 547, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 425, + 547, + 484 + ], + "spans": [ + { + "bbox": [ + 304, + 425, + 547, + 484 + ], + "type": "text", + "content": "Formally, to obtain the target novel-view image at position " + }, + { + "bbox": [ + 304, + 425, + 547, + 484 + ], + "type": "inline_equation", + "content": "(u,v)" + }, + { + "bbox": [ + 304, + 425, + 547, + 484 + ], + "type": "text", + "content": ", we assume that relative camera extrinsic from the source view is described by " + }, + { + "bbox": [ + 304, + 425, + 547, + 484 + ], + "type": "inline_equation", + "content": "T_{n} = [[R_{n},0]^{T},[t_{n},1]^{T}]" + }, + { + "bbox": [ + 304, + 425, + 547, + 484 + ], + "type": "text", + "content": " and the intrinsic parameters are represented as " + }, + { + "bbox": [ + 304, + 425, + 547, + 484 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 425, + 547, + 484 + ], + "type": "text", + "content": ". The equation for the epipolar line is:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 349, + 492, + 545, + 507 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 349, + 492, + 545, + 507 + ], + "spans": [ + { + "bbox": [ + 349, + 492, + 545, + 507 + ], + "type": "interline_equation", + "content": "l _ {c} = K ^ {- T} \\left(\\left[ t _ {n} \\right] \\times R _ {n}\\right) K ^ {- 1} [ u, v, 1 ] ^ {T}, \\tag {4}", + "image_path": "91d69e549ed8b1a9491338f4b94171b53fd6739b93e1438d15149d0cceedfa36.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 514, + 547, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 514, + 547, + 588 + ], + "spans": [ + { + "bbox": [ + 304, + 514, + 547, + 588 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 304, + 514, + 547, + 588 + ], + "type": "inline_equation", + "content": "l_{c}" + }, + { + "bbox": [ + 304, + 514, + 547, + 588 + ], + "type": "text", + "content": " denotes the epipolar line associated with the source conditional image. We sample a set of features along the epipolar line, denoted as " + }, + { + "bbox": [ + 304, + 514, + 547, + 588 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}(p_i)\\}" + }, + { + "bbox": [ + 304, + 514, + 547, + 588 + ], + "type": "text", + "content": ", where the " + }, + { + "bbox": [ + 304, + 514, + 547, + 588 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 304, + 514, + 547, + 588 + ], + "type": "text", + "content": " are points on the epipolar line. These features are then aggregated at the target view position " + }, + { + "bbox": [ + 304, + 514, + 547, + 588 + ], + "type": "inline_equation", + "content": "(u,v)" + }, + { + "bbox": [ + 304, + 514, + 547, + 588 + ], + "type": "text", + "content": " via a differentiable aggregator function, resulting in the updated features:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 337, + 596, + 545, + 610 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 596, + 545, + 610 + ], + "spans": [ + { + "bbox": [ + 337, + 596, + 545, + 610 + ], + "type": "interline_equation", + "content": "\\mathbf {c} ^ {\\prime} (u, v) = \\text {a g g r e g a t o r} (\\{\\mathbf {c} (p _ {i}) \\}), \\quad p _ {i} \\sim l _ {c}. \\tag {5}", + "image_path": "711fab1f95c425aaf16cceb2c53906c73211d4a36a124d7af9c4e83eec77ac16.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 617, + 547, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 689 + ], + "type": "text", + "content": "The differentiable aggregator can be as straightforward as average/max functions or something more complex like a transformer, as demonstrated in [13, 58], and " + }, + { + "bbox": [ + 304, + 617, + 547, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{c}^{\\prime}" + }, + { + "bbox": [ + 304, + 617, + 547, + 689 + ], + "type": "text", + "content": " is the warped condition image features, i.e., the output of epipolar warp operator " + }, + { + "bbox": [ + 304, + 617, + 547, + 689 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 304, + 617, + 547, + 689 + ], + "type": "text", + "content": ". The geometric warping procedure is illustrated in Fig. 3." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 689, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 547, + 715 + ], + "type": "text", + "content": "Interestingly, we found it beneficial to avoid warping features across all the UNet decoder blocks. As highlighted by" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "10620" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": "[48], middle-layer features in Stable Diffusion emphasize high-level semantics, while top stages capture appearance and geometry. Given the shared semantic content in novel-view synthesis, even amidst pixel deviations, we warp features only in the final two stages of Stable-Diffusion. This maintains semantic consistency while accommodating geometric warping shifts. Our geometric ControlNet notably enhances the 3D awareness of diffusion features, evident in the 3DiffTection examples in Fig. 2." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 186, + 237, + 199 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 186, + 237, + 199 + ], + "spans": [ + { + "bbox": [ + 47, + 186, + 237, + 199 + ], + "type": "text", + "content": "3.3. Bridging the Task and Domain Gap" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 199, + 287, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 199, + 287, + 282 + ], + "spans": [ + { + "bbox": [ + 46, + 199, + 287, + 282 + ], + "type": "text", + "content": "We leverage the 3D-enhanced features for 3D detection by training a standard detection head with 3D box supervision. To further verify the efficacy of our approach in adapting diffusion features for 3D tasks, we train a 3D detection head, keeping our fine-tuned features fixed. Notably, we observe a substantial improvement compared to the baseline SD feature. We report details in Tab. 3." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 283, + 287, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 283, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 46, + 283, + 287, + 437 + ], + "type": "text", + "content": "Nevertheless, we acknowledge two potential gaps. Firstly, our view synthesis tuning is conceptualized as a universal 3D feature augmentation method. Hence, it is designed to work with a vast collection of posed image pairs, which can be inexpensively gathered (e.g., from videos) without the need for costly labeling. Consequently, there might be a domain discrepancy when comparing to target data, which could originate from a smaller, fully annotated dataset. Secondly, since the features aren't specifically finetuned for detection, there is further potential for optimization towards detection, in tandem with the detection head. As before, we aim to retain the robust feature characteristics already achieved and choose to deploy a second ControlNet." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "spans": [ + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "text", + "content": "Specifically, we freeze both the original SD and the geometric ControlNet modules. We then introduce another trainable ControlNet, which we refer to as semantic ControlNet. For our model to execute single-image 3D detection, we utilize the input image " + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "text", + "content": " in three distinct ways. First, we extract features from it using the pretrained SD, denoted as " + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "inline_equation", + "content": "\\mathcal{F}(x)" + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "text", + "content": ", through a single SD denoising forward step. Next, we feed it into our geometric ControlNet, represented as " + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{geo}(x,T_n)" + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "text", + "content": ", with an identity pose " + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "inline_equation", + "content": "(T_{n} = [Id,0])" + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "text", + "content": " to obtain our 3D-aware features. Lastly, we introduce it to the semantic ControlNet, denoted by " + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{sem}(x)" + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "text", + "content": ", to produce trainable features fine-tuned for detection within the target data distribution. We aggregate all the features and pass them to a standard 3D detection head, represented as " + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 46, + 438, + 287, + 617 + ], + "type": "text", + "content": " [5]. The semantic ControlNet is trained with 3D detection head." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 625, + 287, + 639 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 625, + 287, + 639 + ], + "spans": [ + { + "bbox": [ + 75, + 625, + 287, + 639 + ], + "type": "interline_equation", + "content": "y = \\mathcal {D} (\\mathcal {F} (x) + \\mathcal {F} _ {\\text {g e o}} (x, [ I d, 0 ]) + \\mathcal {F} _ {\\text {s e m}} (x)) \\tag {6}", + "image_path": "1c8c41dcca17830aac82914685263475c7135ebc3cb5aff3089919436f3ba477.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 646, + 264, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 646, + 264, + 658 + ], + "spans": [ + { + "bbox": [ + 47, + 646, + 264, + 658 + ], + "type": "text", + "content": "The figure overview is in the supplementary material." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 665, + 167, + 676 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 665, + 167, + 676 + ], + "spans": [ + { + "bbox": [ + 47, + 665, + 167, + 676 + ], + "type": "text", + "content": "3.4. Ensemble Prediction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "ControlNet is recognized for its ability to retain the capabilities of the pre-tuned model. As a result, our semantically tuned model still possesses view synthesis capabilities" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "ties. We exploit this characteristic to introduce a test-time prediction ensembling that further enhances detection performance. Specifically, our box prediction " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " is dependent on the input view. Although our detection model is trained with this pose set to the identity (i.e., no transformation), at test time, we can incorporate other viewing transformations denoted as " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\xi_{i}" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 336, + 164, + 545, + 178 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 336, + 164, + 545, + 178 + ], + "spans": [ + { + "bbox": [ + 336, + 164, + 545, + 178 + ], + "type": "interline_equation", + "content": "y (\\xi) = \\mathcal {D} \\left(\\mathcal {F} (x) + \\mathcal {F} _ {\\text {g e o}} (x, \\xi) + \\mathcal {F} _ {\\text {s e m}} (x)\\right). \\tag {7}", + "image_path": "d930dd44e7128f81d98d25c74acfdb12a911194b59f2773250182129c7b63b91.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 185, + 545, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 185, + 545, + 209 + ], + "spans": [ + { + "bbox": [ + 304, + 185, + 545, + 209 + ], + "type": "text", + "content": "The final prediction is derived through a non-maximum suppression of individual view predictions:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 372, + 217, + 545, + 230 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 217, + 545, + 230 + ], + "spans": [ + { + "bbox": [ + 372, + 217, + 545, + 230 + ], + "type": "interline_equation", + "content": "y _ {f i n a l} = N M S \\left(\\left\\{y \\left(\\xi_ {i} \\right\\}\\right). \\right. \\tag {8}", + "image_path": "4b1f28ce36b1a29b022cc22776691c9347d5a8d0821ef89bdec7ed466d4f830b.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 238, + 545, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 238, + 545, + 309 + ], + "spans": [ + { + "bbox": [ + 304, + 238, + 545, + 309 + ], + "type": "text", + "content": "We note that our objective isn't to create a novel view at this stage but to enrich the prediction using views that are close to the original pose. The underlying intuition is that the detection and view synthesis capabilities complement each other. Certain objects might be localized more precisely when observed from a slightly altered view." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 319, + 387, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 319, + 387, + 333 + ], + "spans": [ + { + "bbox": [ + 306, + 319, + 387, + 333 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 340, + 545, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 340, + 545, + 495 + ], + "spans": [ + { + "bbox": [ + 304, + 340, + 545, + 495 + ], + "type": "text", + "content": "In this section, we present a comprehensive experimental evaluation of 3DiffTlection and its constituent components. Initially, in Section 4.1, we establish 3DiffTlection as a powerful 3D detection framework, particularly when fine-tuned on a specific target dataset. We then validate its capacity for generalization to new datasets, both with and without tuning of the detection head (Section 4.2). Subsequently, we demonstrate its ability to maintain strong performance with limited labels (Section 4.3). Finally, in Section 4.4, we confirm 3DiffTlection's enhanced 3D awareness by measuring its feature correspondence accuracy. We also validate the importance of each module in our design and conclude with visualizations of our auxiliary view synthesis ability." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 498, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 546, + 713 + ], + "type": "text", + "content": "Datasets and implementation details For all our experiments, we train the geometric ControlNet on the official ARKitscene datasets [3], which provide around 450K posed low-resolution " + }, + { + "bbox": [ + 304, + 498, + 546, + 713 + ], + "type": "inline_equation", + "content": "(256\\times 256)" + }, + { + "bbox": [ + 304, + 498, + 546, + 713 + ], + "type": "text", + "content": " images. We sample around 40K RGB images along with their intrinsics and extrinsic. Note that in the following experiments, the pretrained geometric ControlNet is kept frozen. For training 3D object detection, we use Omni3D-ARkitsscenes as our primary in-domain experiment dataset, and Omni3DSUNRGBD for our cross-dataset experiments. To evaluate the performance, we compute a mean AP3D across all categories in Omni3D-ARkitsscenes and over a range of IoU3D thresholds in [0.05, 0.10,..., 0.50], simply denoted as AP3D. We also report AP3D at IoU 15, 25, and 50 (AP3D@15, AP3D@25 and AP3D@50) as following [5]. We take the publicly available text-to-image LDM [36], Stable Diffusion as the preliminary backbone. Unlike previous diffusion models which require multiple images for training" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10621" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 53, + 545, + 177 + ], + "blocks": [ + { + "bbox": [ + 50, + 53, + 545, + 177 + ], + "lines": [ + { + "bbox": [ + 50, + 53, + 545, + 177 + ], + "spans": [ + { + "bbox": [ + 50, + 53, + 545, + 177 + ], + "type": "table", + "html": "
MethodsResolutionNVS Train ViewsDet. Train ViewsAP3D↑AP3D@15↑AP3D@25↑AP3D@50↑
CubeRCNN-DLA256×256-131.7543.1034.6811.07
DreamTchr-Res50256×256-133.2044.5437.1012.35
NeRF-Det-R50256×256≥10≥1033.1346.8136.0313.58
ImVoxelNet256×256-≥1032.0946.7135.6211.94
3DiffTlection256×2562139.2250.5843.1816.40
CubeRCNN-DLA512×512-134.3246.0636.0212.51
DreamTchr-Res50512×512-136.1449.8240.5115.48
3DiffTlection512×5122143.7557.1347.3220.30
CubeRCNN-DLA-Aug512×512-141.7253.0945.4219.26
", + "image_path": "f09a2fb8e5a5a098cdfba533413546cab473fa9799c2a4acfb36ade4bcc7c5f0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 178, + 546, + 201 + ], + "lines": [ + { + "bbox": [ + 46, + 178, + 546, + 201 + ], + "spans": [ + { + "bbox": [ + 46, + 178, + 546, + 201 + ], + "type": "text", + "content": "Table 1. 3D Object Detection Results on Omni3D-ARKitScenes testing set. 3DiffTraction significantly outperforms baselines, including CubeRCNN-DLA-Aug, which is trained with 6x more supervision data." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 211, + 287, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 211, + 287, + 282 + ], + "spans": [ + { + "bbox": [ + 46, + 211, + 287, + 282 + ], + "type": "text", + "content": "a novel-view synthesis task, we only take two views, one as the source view and another one as the target view. Moreover, we only consider two views with an overlap of less than " + }, + { + "bbox": [ + 46, + 211, + 287, + 282 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 46, + 211, + 287, + 282 + ], + "type": "text", + "content": ". Regarding novel-view synthesis ensemble, we use pseudo camera rotations, i.e., " + }, + { + "bbox": [ + 46, + 211, + 287, + 282 + ], + "type": "inline_equation", + "content": "\\pm 15" + }, + { + "bbox": [ + 46, + 211, + 287, + 282 + ], + "type": "text", + "content": " deg and ensemble the predicted bounding boxes via NMS." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 286, + 288, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 286, + 288, + 453 + ], + "spans": [ + { + "bbox": [ + 46, + 286, + 288, + 453 + ], + "type": "text", + "content": "Methods in comparison. CubeRCNN [5] extends Fast-RCNN [35] to 3D object detection by incorporating a cube head. In our work, we aim to provide a stronger 3D-aware image backbone, and compare it with other image backbones using the Cube-RCNN framework. Specifically, we compare with DreamTeacher [24], which distills knowledge from a Pre-trained Stable Diffusion to a lighter network, ResNet-50. We also compare with DIFT [46], which directly employs the frozen Stable Diffusion as the image feature extractor. Additionally, we evaluate methods designed for multi-view 3D detection, such as NeRF-Det [54] and ImVoxelNet [37]. While these methods typically require more images during training, we use them for single-image 3D object detection during testing." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 461, + 286, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 461, + 286, + 473 + ], + "spans": [ + { + "bbox": [ + 46, + 461, + 286, + 473 + ], + "type": "text", + "content": "4.1. 3D Object Detection on Omni3D-ARKitsscenes" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "text", + "content": "In Tab. 1, we analyze the 3D object detection performance of 3DiffTlection compared to several baseline methods. Notably, 3DiffTlection significantly outperforms CubeRCNN-DLA [5], a prior art in single-view 3D detection on the Omni3D-ARKitScenes dataset, achieving a margin of " + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "7.4\\%" + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "text", + "content": " at a resolution of " + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "9.43\\%" + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "text", + "content": " at a resolution of " + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "text", + "content": " on the AP3D metric. We further compare our approach to NeRF-Det-R50 [54] and ImVoxelNet [37], both of which utilize multi-view images during training (indicated in Tab. 1 as NVS Train Views and Det. Train Views). In contrast, 3DiffTlection which does not rely on multi-view images for training the detection network and uses only view-pairs for geometric network training, surpasses these methods by " + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "6.09\\%" + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "7.13\\%" + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "text", + "content": " on the AP3D metric, respectively. Additionally, we compare our approach to DreamTeacher-Res50 [24], which distills StableDiffusion feature prediction into a ResNet backbone to make it amenable for perception tasks. 3DiffTlection exceeds DreamTeacher by " + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "6.02\\%" + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "7.61\\%" + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "text", + "content": " at resolutions of " + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 45, + 474, + 288, + 714 + ], + "type": "text", + "content": ", respectively. Lastly, we eval" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 211, + 545, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 211, + 545, + 281 + ], + "spans": [ + { + "bbox": [ + 304, + 211, + 545, + 281 + ], + "type": "text", + "content": "uate our model against CubeRCNN-DLA-Aug, which denotes the training of CubeRCNN on the complete Omni3D dataset, comprising 234,000 RGB images with a more robust training recipe. Remarkably, our model outperforms CubeRCNN-DLA-Aug by " + }, + { + "bbox": [ + 304, + 211, + 545, + 281 + ], + "type": "inline_equation", + "content": "2.03\\%" + }, + { + "bbox": [ + 304, + 211, + 545, + 281 + ], + "type": "text", + "content": " on AP3D while using nearly 6x less data, demonstrating its data efficiency." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 282, + 545, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 282, + 545, + 354 + ], + "spans": [ + { + "bbox": [ + 304, + 282, + 545, + 354 + ], + "type": "text", + "content": "We also show visualization results in Fig. 4. Compared to CubeRCNN, our proposed 3DiffTecn predicts 3D bounding boxes with better pose, localization and significantly fewer false defecions. As seen in the middle column, our model can even handle severe occlusion cases, i.e., the sofa in the middle image and the sink in the right image." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 360, + 465, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 360, + 465, + 372 + ], + "spans": [ + { + "bbox": [ + 305, + 360, + 465, + 372 + ], + "type": "text", + "content": "4.2. Cross-dataset Generalization" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 378, + 545, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 378, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 304, + 378, + 545, + 510 + ], + "type": "text", + "content": "To assess the capability of 3DiffTraction's geometric ControlNet to carry its 3D awareness to other datasets, we employed a 3DiffTraction model with its geometric ControlNet trained on the OMni3D-ARKitscene dataset, and conduct cross-dataset experiments on the Ommi3D-SUNRGBD dataset. We evaluate it with two settings: (1) finetune the parameters on the Omni3D-SUNRBGD dataset and test the performance on Omni3D-SUNRGBD dataset, and (2) train the parameters on the Omni3D-ARKitsscenes dataset and directly test the performance on Omni3D-SUNRGBD dataset in a zero-shot setting. The performance is shown in Tab. 2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": "In the first setting (shown in the fourth column), as a baseline, we trained the 3D head using DIFT-SD features. 3DiffTecn w/o Semantic-ControlNet and w/ Semantic-ControlNet outperform DIFT-SD by " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "1.21\\%" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "5.99\\%" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": ", respectively. We further compare our approach with CubeR-CNN. To ensure a fair comparison, we take CubeRCNN-DLA trained on Omni3D-ARKitscene datasets and fine-tuned its entire model on the Omni3D-SUNRGBD. Without any training of the geometric ControlNet on the Omni-SUNRGBD, 3DiffTecn (w/o Semantic-ControlNet) with only tuned a 3D head surpasses the fully fine-tuned CubeRCNN-DLA by " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "0.39\\%" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": ". Then, we reintegrate the semantic ControlNet and jointly train it with the 3D head. This yields a performance boost of " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "5.09\\%" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": ". These results indicate that even without training the geometric ControlNet in the target domain, the semantic ControlNet adeptly adapts features for perception tasks." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "10622" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 64, + 541, + 273 + ], + "blocks": [ + { + "bbox": [ + 55, + 64, + 541, + 273 + ], + "lines": [ + { + "bbox": [ + 55, + 64, + 541, + 273 + ], + "spans": [ + { + "bbox": [ + 55, + 64, + 541, + 273 + ], + "type": "image", + "image_path": "13c06a72aa3f828faddcd127c5c83c5b8f25e94f6763ddf57d9a25f9330c20d8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 274, + 547, + 307 + ], + "lines": [ + { + "bbox": [ + 46, + 274, + 547, + 307 + ], + "spans": [ + { + "bbox": [ + 46, + 274, + 547, + 307 + ], + "type": "text", + "content": "Figure 4. Qualitative results on Omni3D-ARKitScene 3D Detection. In contrast to Cube-RCNN (bottom), our approach (top) accurately predicts both the box class and 3D locations. The bird's-eye-view visualization further demonstrates that our predictions surpass the baseline performance of Cube-RCNN." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 48, + 308, + 545, + 365 + ], + "blocks": [ + { + "bbox": [ + 48, + 308, + 545, + 365 + ], + "lines": [ + { + "bbox": [ + 48, + 308, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 48, + 308, + 545, + 365 + ], + "type": "table", + "html": "
MethodsBackbonePretrained on ARKitTuned on SUNRGBDZero-shot(w/o 2D GT)Zero-shot(w/ 2D GT)
DIFT-SDStableDiffX21.9216.7425.31
CubeRCNNDLA3422.7216.8125.05
3DiffTecnStableDiff+Geo-Ctr23.1117.3726.94
3DiffTecnStableDiff+Geo-Ctr+Sem-Ctr27.8122.6430.14
", + "image_path": "f37e72565568a189bfca093356410971598e1df2243858b22021a16fe4b91544.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 367, + 547, + 422 + ], + "lines": [ + { + "bbox": [ + 46, + 367, + 547, + 422 + ], + "spans": [ + { + "bbox": [ + 46, + 367, + 547, + 422 + ], + "type": "text", + "content": "Table 2. Cross-domain experiment on the Omni3D-SUNRGBD dataset. The \"Pre-trained on ARKit\" denotes we pre-train the backbone on Omni3D-ARkitsscenes. For CubeCNN, we pre-train it with 3D detection supervision. For all zero-shot experiments, the methods are first trained on Omni3D-ARKitscenes for 3D detection and then directly tested on Omni3D-SUNRGBD dataset. \"2D GT\" means we use ground-truth 2D bounding box to crop ROI image features. The results are reported for overlapped 14 classes between Omni3D-SUNRGBD and Omni3D-ARKiSscenes dataset." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 431, + 289, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 431, + 289, + 563 + ], + "spans": [ + { + "bbox": [ + 46, + 431, + 289, + 563 + ], + "type": "text", + "content": "To further demonstrate the transferrability of 3DiffTec-tion, we train the models for 3D detection on Omni3D-ARkitsscenes and directly test it on Omni3D-SUNRGBD dataset without any further tuning. The results are shown in Column 3 and column 4 of Tab. 2. We observe that if we have ground truth 2D bounding boxes, 3DiffTec-tion with semantic-ControlNet can even achieve the best performance. Without ground truth 2D bounding boxes, 3DiffTec-tion is also able to outperform DIFT-SD and CubeR-CNN by " + }, + { + "bbox": [ + 46, + 431, + 289, + 563 + ], + "type": "inline_equation", + "content": "5.90\\%" + }, + { + "bbox": [ + 46, + 431, + 289, + 563 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 431, + 289, + 563 + ], + "type": "inline_equation", + "content": "5.83\\%" + }, + { + "bbox": [ + 46, + 431, + 289, + 563 + ], + "type": "text", + "content": ", respectively. These results demonstrate the notable transferrability of our 3DiffTec-tion." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 574, + 146, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 574, + 146, + 588 + ], + "spans": [ + { + "bbox": [ + 47, + 574, + 146, + 588 + ], + "type": "text", + "content": "4.3. Label Efficiency" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": "We hypothesize that our usage of semantic ControlNet for tuning 3DiffTlection towards a target dataset should maintain high label efficiency. We test this by using " + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": " labels from the Omni3D-ARKitscene datasets. The results are shown in Tab. ?? of supplementary materials. In low-data regime (for both " + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": " label setting), 3DiffTlection demonstrates significantly better performance, and more modest degradation than baselines. Notably, even with " + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": " of the labels, our proposed 3DiffTlection achieves 2.28 AP3D-N improvement over previous methods trained" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 431, + 547, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 431, + 547, + 469 + ], + "spans": [ + { + "bbox": [ + 304, + 431, + 547, + 469 + ], + "type": "text", + "content": "on " + }, + { + "bbox": [ + 304, + 431, + 547, + 469 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 304, + 431, + 547, + 469 + ], + "type": "text", + "content": " label. Additionally, when tuning only the 3D head 3DiffTraction performs better than CubeRCNN and DreamTeacher with tuning all parameters." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 478, + 432, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 478, + 432, + 491 + ], + "spans": [ + { + "bbox": [ + 305, + 478, + 432, + 491 + ], + "type": "text", + "content": "4.4. Analysis and Ablation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 497, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 497, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 497, + 545, + 713 + ], + "type": "text", + "content": "Feature correspondence fidelity (Fig. 2). As described in 3.1, we conducted a feature correspondence experiment. We hypothesize that if our model is 3D aware, it should be find 3D correspondences. As can be seen, our method yields a more accurate point-matching result, primarily because our geometric ControlNet is trained to infer 3D correspondences through our Epipolar warp operator to successfully generate novel views. To provide further insights, we visualize a heatmap demonstrating the similarity of target image features to the reference key points. Notably, our 3DiffTec-tion features exhibit better concentration around the target point. Furthermore, we quantitatively evaluate the correspondence performance on ScanNet dataset, which is never accessed by both our 3DiffTec-tion and DIFT for fair comparison. The experiment results are shown in supplementary material. The results also demonstrate our hypothesis. Novel-view synthesis visualization (Fig. 5). To further validate our geometric ControlNet ability to maintain geo" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10623" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 70, + 545, + 161 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 545, + 161 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 545, + 161 + ], + "type": "table", + "html": "
BackboneNVS Train ViewsGeo-CtrSem-CtrNV-EnsembleAP2DAP3D↑AP3D@15↑AP3D@25↑AP3D@50↑
VIT-B (MAE)----26.1425.2336.0428.648.11
Res50 (DreamTchr)----25.2724.3634.1625.977.93
StableDiff. (DIFT)----29.3528.8640.1832.078.86
StableDiff. (Ours)1--29.5126.0535.8129.866.95
StableDiff. (Ours)2--30.1631.2041.8733.5310.14
StableDiff. (Ours)2-37.1238.7250.3842.8816.18
StableDiff. (Ours)237.1939.2250.5843.1816.40
", + "image_path": "9f83f5b0a79734c7981cf839405b18e42c276bef80e578550f625c07bcf6df81.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 163, + 546, + 217 + ], + "lines": [ + { + "bbox": [ + 46, + 163, + 546, + 217 + ], + "spans": [ + { + "bbox": [ + 46, + 163, + 546, + 217 + ], + "type": "text", + "content": "Table 3. Analysis of 3DiffTraction Modules on Omni3D-ARKitScenes testing set. We first compare different backbones by freezing the backbone and only training the 3D detection head. Then, we perform ablative studies on each module of our architecture systematically. Starting with the baseline vanilla stable diffusion model, we incrementally incorporate improvements: Geometry-ControlNet (Geo-Ctr), the number of novel view synthesis training views (NVS Train Views), Semantic-ControlNet (Sem-Ctr), and the novel view synthesis ensemble (NV-Ensemble)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 53, + 224, + 129, + 300 + ], + "blocks": [ + { + "bbox": [ + 53, + 224, + 129, + 300 + ], + "lines": [ + { + "bbox": [ + 53, + 224, + 129, + 300 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 129, + 300 + ], + "type": "image", + "image_path": "417ef0d839acf2219053d702644a9ac19cae7a319ee9975164eccd5cc4b20fcb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 69, + 301, + 115, + 308 + ], + "lines": [ + { + "bbox": [ + 69, + 301, + 115, + 308 + ], + "spans": [ + { + "bbox": [ + 69, + 301, + 115, + 308 + ], + "type": "text", + "content": "Condition Image" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 136, + 224, + 212, + 300 + ], + "blocks": [ + { + "bbox": [ + 136, + 224, + 212, + 300 + ], + "lines": [ + { + "bbox": [ + 136, + 224, + 212, + 300 + ], + "spans": [ + { + "bbox": [ + 136, + 224, + 212, + 300 + ], + "type": "image", + "image_path": "81b9c178890e7d0553a954e3296adfc78c7ef4b709f53e323f0abd3689820c72.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 151, + 301, + 197, + 309 + ], + "lines": [ + { + "bbox": [ + 151, + 301, + 197, + 309 + ], + "spans": [ + { + "bbox": [ + 151, + 301, + 197, + 309 + ], + "type": "text", + "content": "Generated Image" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 312, + 546, + 335 + ], + "lines": [ + { + "bbox": [ + 46, + 312, + 546, + 335 + ], + "spans": [ + { + "bbox": [ + 46, + 312, + 546, + 335 + ], + "type": "text", + "content": "Figure 5. Novel-view synthesis visualization on Omni3D-ARKitScenes testing set. Our model with Geometry-ControlNet synthesizes realistic novel views from a single input image." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 219, + 224, + 294, + 300 + ], + "blocks": [ + { + "bbox": [ + 219, + 224, + 294, + 300 + ], + "lines": [ + { + "bbox": [ + 219, + 224, + 294, + 300 + ], + "spans": [ + { + "bbox": [ + 219, + 224, + 294, + 300 + ], + "type": "image", + "image_path": "6abb8442114644d6cab971414207c15ea3edd1dff62051d6c35abec7b01ce5e6.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 251, + 301, + 261, + 308 + ], + "lines": [ + { + "bbox": [ + 251, + 301, + 261, + 308 + ], + "spans": [ + { + "bbox": [ + 251, + 301, + 261, + 308 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 300, + 224, + 377, + 300 + ], + "blocks": [ + { + "bbox": [ + 300, + 224, + 377, + 300 + ], + "lines": [ + { + "bbox": [ + 300, + 224, + 377, + 300 + ], + "spans": [ + { + "bbox": [ + 300, + 224, + 377, + 300 + ], + "type": "image", + "image_path": "001f4ad4fe0b28a9e436ae73b5fc674027538205da10fa7997e49a38690c9fd1.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 301, + 361, + 308 + ], + "lines": [ + { + "bbox": [ + 315, + 301, + 361, + 308 + ], + "spans": [ + { + "bbox": [ + 315, + 301, + 361, + 308 + ], + "type": "text", + "content": "Condition Image" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 383, + 224, + 459, + 300 + ], + "blocks": [ + { + "bbox": [ + 383, + 224, + 459, + 300 + ], + "lines": [ + { + "bbox": [ + 383, + 224, + 459, + 300 + ], + "spans": [ + { + "bbox": [ + 383, + 224, + 459, + 300 + ], + "type": "image", + "image_path": "261d30b2ce440669a5438b36dd9bd408060bc87a94fd82d846f51a8a12a5eede.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 398, + 301, + 444, + 308 + ], + "lines": [ + { + "bbox": [ + 398, + 301, + 444, + 308 + ], + "spans": [ + { + "bbox": [ + 398, + 301, + 444, + 308 + ], + "type": "text", + "content": "Generated Image" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 465, + 224, + 541, + 300 + ], + "blocks": [ + { + "bbox": [ + 465, + 224, + 541, + 300 + ], + "lines": [ + { + "bbox": [ + 465, + 224, + 541, + 300 + ], + "spans": [ + { + "bbox": [ + 465, + 224, + 541, + 300 + ], + "type": "image", + "image_path": "9991777c9e65312f6265bae4da6b6e6d134a1c580f45b177dc374710e515af60.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 498, + 301, + 508, + 308 + ], + "lines": [ + { + "bbox": [ + 498, + 301, + 508, + 308 + ], + "spans": [ + { + "bbox": [ + 498, + 301, + 508, + 308 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 345, + 287, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 345, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 46, + 345, + 287, + 464 + ], + "type": "text", + "content": "metric consistency of the source view content, we visualize novel-view synthesis results. The results demonstrate that our proposed epipolar warp operator is effective in synthesizing the scene with accurate geometry and layout compared to the ground truth images. We note that scene-level NVS from a single image is a challenging task, and we observe that our model may introduce artifacts. While enhancing performance is an interesting future work, here we utilize NVS as an auxiliary task which is demonstrated to effectively enhance our model's 3D awareness." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "type": "text", + "content": "3DiffTraction modules. We analyze the unique modules and design choices in 3DiffTraction: the Stable Diffusion backbone, geometric and semantic ControlNets targeting NVs and detection, and the multi-view prediction ensemble. All results are reported using the Omni3D-ARKitscenes in Tab. 3. We first validate our choice of using a Stable Diffusion backbone. While diffusion features excel in 2D segmentation tasks [24, 56], they have not been tested in 3D detection. We analyze this choice independently from the other improvements by keeping the backbone frozen and only training the 3D detection head. The vanilla Stable Diffusion features achieve a " + }, + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "type": "inline_equation", + "content": "28.86\\%" + }, + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "type": "text", + "content": " AP3D, exceeding CubeRCNN-VIT-B (MAE pretrained) by " + }, + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "type": "inline_equation", + "content": "3.63\\%" + }, + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "type": "text", + "content": " and ResNet-50 DreamTeacher by " + }, + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "type": "inline_equation", + "content": "4.5\\%" + }, + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "type": "text", + "content": " in AP30. This performance is mirrored in AP2D results, affirming Stable Diffusion's suitability for perception tasks. Our geometric ControlNet, is aimed at instilling 3D awareness via NVS training. A performance boost of " + }, + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "type": "inline_equation", + "content": "2.34\\%" + }, + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "type": "text", + "content": " on AP3D and " + }, + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "type": "inline_equation", + "content": "0.81\\%" + }, + { + "bbox": [ + 46, + 474, + 288, + 713 + ], + "type": "text", + "content": " on AP2D indicates that the geometric ControlNet imparts 3D awareness knowledge while preserving its 2D knowl" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 345, + 545, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 345, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 304, + 345, + 545, + 487 + ], + "type": "text", + "content": "edge. To ensure our improvement is attributed to our view synthesis training, we limited the geometric ControlNet to single-view data by setting the source and target views to be identical (denoted by '1' in the NVs train view column of Tab. 3), which reduces the training to be denoising training [6]. This indicate a " + }, + { + "bbox": [ + 304, + 345, + 545, + 487 + ], + "type": "inline_equation", + "content": "2.81\\%" + }, + { + "bbox": [ + 304, + 345, + 545, + 487 + ], + "type": "text", + "content": " decrease in AP3D compared to the standard Stable Diffusion, affirming our hypothesis. Further, the semantic ControlNet, co-trained with the 3D detection head enhances both AP2D and AP3D by around " + }, + { + "bbox": [ + 304, + 345, + 545, + 487 + ], + "type": "inline_equation", + "content": "7\\%" + }, + { + "bbox": [ + 304, + 345, + 545, + 487 + ], + "type": "text", + "content": " confirming its efficacy in adapting the feature for optimal use by the detection head. Lastly, using NVS-ensemble results in additional " + }, + { + "bbox": [ + 304, + 345, + 545, + 487 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 304, + 345, + 545, + 487 + ], + "type": "text", + "content": " increase in AP3D." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 501, + 463, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 501, + 463, + 514 + ], + "spans": [ + { + "bbox": [ + 306, + 501, + 463, + 514 + ], + "type": "text", + "content": "5. Conclusion and Limitations" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 521, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 665 + ], + "type": "text", + "content": "3DiffTraction, utilizing a 3D-aware diffusion model, enables efficient 3D detection from single images, overcoming large-scale data annotation challenges. With its geometric and semantic tuning strategies, it surpasses previous benchmarks, showing high label efficiency and cross-domain adaptability. 3DiffTraction has limitations, including the need for image pairs with accurate camera poses and challenges in handling dynamic objects from in-the-wild videos. Additionally, its use of the Stable Diffusion architecture demands substantial memory and runtime, achieving about 7.5 fps on a 3090Ti GPU. Suitable for offline tasks, it requires further optimization for online detection." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Acknowledgements. Or Litany is a Taub fellow and is supported by the Azrieli Foundation Early Career Faculty Fellowship. We thank Qianqian Wang, David Acuna, and Jonah Philion for the insightful discussion." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10624" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "text", + "content": "[1] Tomer Amit, Tal Shaharbany, Eliya Nachmani, and Lior Wolf. Segdiff: Image segmentation with diffusion probabilistic models. arXiv preprint arXiv:2112.00390, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 288, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 288, + 169 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 288, + 169 + ], + "type": "text", + "content": "[2] Dmitry Baranchuk, Andrey Voynov, Ivan Rubachev, Valentin Khrulkov, and Artem Babenko. Label-efficient semantic segmentation with diffusion models. In International Conference on Learning Representations, 2022. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 171, + 287, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 171, + 287, + 247 + ], + "spans": [ + { + "bbox": [ + 53, + 171, + 287, + 247 + ], + "type": "text", + "content": "[3] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Tal Dimry, Yuri Feigin, Peter Fu, Thomas Gebauer, Brandon Joffe, Daniel Kurz, Arik Schwartz, and Elad Shulman. ARK-scenes - a diverse real-world dataset for 3d indoor scene understanding using mobile RGB-d data. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 250, + 287, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 250, + 287, + 304 + ], + "spans": [ + { + "bbox": [ + 53, + 250, + 287, + 304 + ], + "type": "text", + "content": "[4] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 306, + 286, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 306, + 286, + 349 + ], + "spans": [ + { + "bbox": [ + 53, + 306, + 286, + 349 + ], + "type": "text", + "content": "[5] Garrick Brazil, Abhinav Kumar, Julian Straub, Nikhila Ravi, Justin Johnson, and Georgia Gkioxari. Omni3d: A large benchmark and model for 3d object detection in the wild, 2023. 1, 2, 5, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 351, + 286, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 351, + 286, + 407 + ], + "spans": [ + { + "bbox": [ + 53, + 351, + 286, + 407 + ], + "type": "text", + "content": "[6] Emmanuel Asiedu Brempong, Simon Kornblith, Ting Chen, Niki Parmar, Matthias Minderer, and Mohammad Norouzi. Denoising pretraining for semantic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4175-4186, 2022. 2, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 408, + 286, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 408, + 286, + 461 + ], + "spans": [ + { + "bbox": [ + 53, + 408, + 286, + 461 + ], + "type": "text", + "content": "[7] Eric R. Chan, Koki Nagano, Matthew A. Chan, Alexander W. Bergman, Jeong Joon Park, Axel Levy, Miika Aittala, Shalini De Mello, Tero Karras, and Gordon Wetzstein. Generative novel view synthesis with 3d-aware diffusion models, 2023. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 464, + 287, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 464, + 287, + 518 + ], + "spans": [ + { + "bbox": [ + 53, + 464, + 287, + 518 + ], + "type": "text", + "content": "[8] Hansheng Chen, Yuyao Huang, Wei Tian, Zhong Gao, and Lu Xiong. Monorun: Monocular 3d object detection by reconstruction and uncertainty propagation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10379-10388, 2021. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 520, + 286, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 520, + 286, + 552 + ], + "spans": [ + { + "bbox": [ + 53, + 520, + 286, + 552 + ], + "type": "text", + "content": "[9] Shoufa Chen, Peize Sun, Yibing Song, and Ping Luo. Diffusional: Diffusion model for object detection. ICCV, 2023. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 555, + 286, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 286, + 599 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 286, + 599 + ], + "type": "text", + "content": "[10] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International conference on machine learning, pages 1597-1607. PMLR, 2020. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 600, + 286, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 600, + 286, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 600, + 286, + 643 + ], + "type": "text", + "content": "[11] Ting Chen, Lala Li, Saurabh Saxena, Geoffrey Hinton, and David J Fleet. A generalist framework for panoptic segmentation of images and videos. arXiv preprint arXiv:2210.06366, 2022. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 646, + 286, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 286, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 286, + 678 + ], + "type": "text", + "content": "[12] Prafulla Dhariwal and Alexander Quinn Nichol. Diffusion models beat GANs on image synthesis. In Advances in Neural Information Processing Systems, 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 680, + 286, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 286, + 713 + ], + "type": "text", + "content": "[13] Yilun Du, Cameron Smith, Ayush Tewari, and Vincent Sitzmann. Learning to render novel views from wide-baseline stereo pairs, 2023. 4" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[14] Alexandros Graikos, Nikolay Malkin, Nebojsa Jojic, and Dimitris Samaras. Diffusion models as plug-and-play priors. Advances in Neural Information Processing Systems, 35:14715-14728, 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 118, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 173 + ], + "type": "text", + "content": "[15] Jiatao Gu, Alex Trevithick, Kai-En Lin, Josh Susskind, Christian Theobalt, Lingjie Liu, and Ravi Ramamoorthi. Nerfdiff: Single-image view synthesis with nef-guided distillation from 3d-aware diffusion. In International Conference on Machine Learning, 2023. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 175, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 228 + ], + "type": "text", + "content": "[16] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 230, + 545, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 230, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 545, + 262 + ], + "type": "text", + "content": "[17] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners, 2021. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 264, + 545, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 264, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 307, + 264, + 545, + 296 + ], + "type": "text", + "content": "[18] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, 2020. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 298, + 545, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 298, + 545, + 341 + ], + "spans": [ + { + "bbox": [ + 307, + 298, + 545, + 341 + ], + "type": "text", + "content": "[19] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. arXiv preprint arXiv:2106.15282, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 343, + 545, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 343, + 545, + 397 + ], + "spans": [ + { + "bbox": [ + 307, + 343, + 545, + 397 + ], + "type": "text", + "content": "[20] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P. Kingma, Ben Poole, Mohammad Norouzi, David J. Fleet, and Tim Salimans. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 399, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 545, + 453 + ], + "type": "text", + "content": "[21] Siyuan Huang, Siyuan Qi, Yinxue Xiao, Yixin Zhu, Ying Nian Wu, and Song-Chun Zhu. Cooperative holistic scene understanding: Unifying 3d object, layout, and camera pose estimation. In Advances in Neural Information Processing Systems, pages 206-217, 2018. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 455, + 545, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 455, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 307, + 455, + 545, + 498 + ], + "type": "text", + "content": "[22] Boah Kim, Yujin Oh, and Jong Chul Ye. Diffusion adversarial representation learning for self-supervised vessel segmentation. In The Eleventh International Conference on Learning Representations, 2023. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 500, + 545, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 500, + 545, + 532 + ], + "spans": [ + { + "bbox": [ + 307, + 500, + 545, + 532 + ], + "type": "text", + "content": "[23] Nilesh Kulkarni, Ishan Misra, Shubham Tulsiani, and Abhinav Gupta. 3d-relnet: Joint object and relational network for 3d prediction. 2019. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 533, + 545, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 533, + 545, + 577 + ], + "spans": [ + { + "bbox": [ + 307, + 533, + 545, + 577 + ], + "type": "text", + "content": "[24] Daiqing Li, Huan Ling, Amlan Kar, David Acuna, Seung Wook Kim, Karsten Kreis, Antonio Torralba, and Sanja Fidler. Dreamteacher: Pretraining image backbones with deep generative models, 2023. 1, 2, 6, 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 578, + 545, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 578, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 307, + 578, + 545, + 611 + ], + "type": "text", + "content": "[25] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object, 2023. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 613, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 613, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 307, + 613, + 545, + 656 + ], + "type": "text", + "content": "[26] Yingfei Liu, Tiancai Wang, Xiangyu Zhang, and Jian Sun. Petr: Position embedding transformation for multi-view 3d object detection. In European Conference on Computer Vision, pages 531-548. Springer, 2022. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 658, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 689 + ], + "type": "text", + "content": "[27] Luke Melas-Kyriazi, Christian Rupprecht, Iro Laina, and Andrea Vedaldi. Realfusion: 360 reconstruction of any object from a single image. In CVPR, 2023. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "type": "text", + "content": "[28] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "10625" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "text", + "content": "Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 287, + 140 + ], + "type": "text", + "content": "[29] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In International Conference on Machine Learning, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 142, + 287, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 142, + 287, + 207 + ], + "spans": [ + { + "bbox": [ + 48, + 142, + 287, + 207 + ], + "type": "text", + "content": "[30] Alexander Quinn Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models. In Proceedings of the 39th International Conference on Machine Learning, pages 16784-16804. PMLR, 2022. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 209, + 287, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 209, + 287, + 263 + ], + "spans": [ + { + "bbox": [ + 48, + 209, + 287, + 263 + ], + "type": "text", + "content": "[31] Yinyu Nie, Xiaoguang Han, Shihui Guo, Yujuan Zheng, Jian Chang, and Jian Jun Zhang. Total3dunderstanding: Joint layout, object pose and mesh reconstruction for indoor scenes from a single image. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 265, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 265, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 265, + 287, + 319 + ], + "type": "text", + "content": "[32] Jinhyung Park, Chenfeng Xu, Shijia Yang, Kurt Keutzer, Kris M. Kitani, Masayoshi Tomizuka, and Wei Zhan. Time will tell: New outlooks and a baseline for temporal multiview 3d object detection. In The Eleventh International Conference on Learning Representations, 2023. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 321, + 287, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 321, + 287, + 364 + ], + "spans": [ + { + "bbox": [ + 48, + 321, + 287, + 364 + ], + "type": "text", + "content": "[33] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 366, + 287, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 366, + 287, + 409 + ], + "spans": [ + { + "bbox": [ + 48, + 366, + 287, + 409 + ], + "type": "text", + "content": "[34] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 411, + 287, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 411, + 287, + 456 + ], + "spans": [ + { + "bbox": [ + 48, + 411, + 287, + 456 + ], + "type": "text", + "content": "[35] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2015. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 457, + 287, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 457, + 287, + 511 + ], + "spans": [ + { + "bbox": [ + 48, + 457, + 287, + 511 + ], + "type": "text", + "content": "[36] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2, 3, 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 514, + 287, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 287, + 568 + ], + "type": "text", + "content": "[37] Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Imvoxelnet: Image to voxels projection for monocular and multi-view general-purpose 3d object detection. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2397-2406, 2022. 2, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 570, + 287, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 570, + 287, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 570, + 287, + 645 + ], + "type": "text", + "content": "[38] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S. Sara Mahdavi, Rapha Gontijo Lopes, Tim Salimans, Jonathan Ho, David J Fleet, and Mohammad Norouzi. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "text", + "content": "[39] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 2" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[40] Saurabh Saxena, Abhishek Kar, Mohammad Norouzi, and David J. Fleet. Monocular depth estimation using diffusion models, 2023. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 107, + 545, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 139 + ], + "type": "text", + "content": "[41] Yichun Shi, Peng Wang, Jianglong Ye, Long Mai, Kejie Li, and Xiao Yang. Mvdream: Multi-view diffusion for 3d generation. arXiv:2308.16512, 2023. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 141, + 545, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 184 + ], + "type": "text", + "content": "[42] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv:2209.14792, 2022.3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 186, + 545, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 230 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 230 + ], + "type": "text", + "content": "[43] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, 2015. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 232, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 232, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 308, + 232, + 545, + 285 + ], + "type": "text", + "content": "[44] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In International Conference on Learning Representations, 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 287, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 287, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 308, + 287, + 545, + 319 + ], + "type": "text", + "content": "[45] Weimin Tan, Siyuan Chen, and Bo Yan. Diffss: Diffusion model for few-shot semantic segmentation. arXiv preprint arXiv:2307.00773, 2023. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 321, + 545, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 321, + 545, + 354 + ], + "spans": [ + { + "bbox": [ + 308, + 321, + 545, + 354 + ], + "type": "text", + "content": "[46] Luming Tang, Menglin Jia, Qianqian Wang, Cheng Perng Phoo, and Bharath Hariharan. Emergent correspondence from image diffusion, 2023. 1, 2, 3, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 355, + 545, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 355, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 308, + 355, + 545, + 399 + ], + "type": "text", + "content": "[47] Shubham Tulsiani, Saurabh Gupta, David Fouhey, Alexei A. Efros, and Jitendra Malik. Factoring shape, pose, and layout from the 2d image of a 3d scene. In Computer Vision and Pattern Recognition (CVPR), 2018. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 400, + 545, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 400, + 545, + 455 + ], + "spans": [ + { + "bbox": [ + 308, + 400, + 545, + 455 + ], + "type": "text", + "content": "[48] Narek Tumanyan, Michal Geyer, Shai Bagon, and Tali Dekel. Plug-and-play diffusion features for text-driven image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1921-1930, 2023. 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 456, + 545, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 456, + 545, + 511 + ], + "spans": [ + { + "bbox": [ + 308, + 456, + 545, + 511 + ], + "type": "text", + "content": "[49] Narek Tumanyan, Michal Geyer, Shai Bagon, and Tali Dekel. Plug-and-play diffusion features for text-driven image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1921-1930, 2023. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 513, + 545, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 513, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 308, + 513, + 545, + 555 + ], + "type": "text", + "content": "[50] Tai Wang, ZHU Xinge, Jiangmiao Pang, and Dahua Lin. Probabilistic and geometric depth: Detecting objects in perspective. In Conference on Robot Learning, pages 1475-1485. PMLR, 2022. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 557, + 545, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 557, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 308, + 557, + 545, + 611 + ], + "type": "text", + "content": "[51] Yue Wang, Vitor Campagnolo Guizilini, Tianyuan Zhang, Yilun Wang, Hang Zhao, and Justin Solomon. Detr3d: 3d object detection from multi-view images via 3d-to-2d queries. In Conference on Robot Learning, pages 180–191. PMLR, 2022. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 613, + 545, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 613, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 308, + 613, + 545, + 657 + ], + "type": "text", + "content": "[52] Daniel Watson, William Chan, Ricardo Martin-Brualla, Jonathan Ho, Andrea Tagliasacchi, and Mohammad Norouzi. Novel view synthesis with diffusion models. arXiv preprint arXiv:2210.04628, 2022. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "text", + "content": "[53] Julia Wolleb, Robin Sandkuhler, Florentin Bieder, Philippe Valmaggia, and Philippe C Cattin. Diffusion models for implicit image segmentation ensembles. In International Conference on Medical Imaging with Deep Learning, pages 1336-1348. PMLR, 2022. 2" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "10626" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 296 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[54] Chenfeng Xu, Bichen Wu, Ji Hou, Sam Tsai, Ruilong Li, Jialiang Wang, Wei Zhan, Zijian He, Peter Vajda, Kurt Keutzer, and Masayoshi Tomizuka. Nerf-det: Learning geometry-aware volumetric representation for multi-view 3d object detection, 2023. 1, 2, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 287, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 287, + 161 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 287, + 161 + ], + "type": "text", + "content": "[55] Dejia Xu, Yifan Jiang, Peihao Wang, Zhiwen Fan, Yi Wang, and Zhangyang Wang. Neurallift-360: Lifting an in-the-wild 2d photo to a 3d object with " + }, + { + "bbox": [ + 48, + 129, + 287, + 161 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 48, + 129, + 287, + 161 + ], + "type": "text", + "content": " views. 2022. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 163, + 287, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 287, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 287, + 228 + ], + "type": "text", + "content": "[56] Jiarui Xu, Sifei Liu, Arash Vahdat, Wonmin Byeon, Xiaolong Wang, and Shalini De Mello. Open-vocabulary panoptic segmentation with text-to-image diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2955-2966, 2023. 1, 2, 3, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 229, + 287, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 229, + 287, + 261 + ], + "spans": [ + { + "bbox": [ + 48, + 229, + 287, + 261 + ], + "type": "text", + "content": "[57] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models, 2023. 2, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 262, + 287, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 262, + 287, + 296 + ], + "spans": [ + { + "bbox": [ + 48, + 262, + 287, + 296 + ], + "type": "text", + "content": "[58] Zhizhuo Zhou and Shubham Tulsiani. Sparsefusion: Distilling view-conditioned diffusion for 3d reconstruction. In CVPR, 2023. 1, 3, 4" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "10627" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/c4bfa810-f46e-49e4-9e19-ae1a9e3dcad6_content_list.json b/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/c4bfa810-f46e-49e4-9e19-ae1a9e3dcad6_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..cd7cb26551691dc81d517e4e04bbd11a30077036 --- /dev/null +++ b/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/c4bfa810-f46e-49e4-9e19-ae1a9e3dcad6_content_list.json @@ -0,0 +1,1675 @@ +[ + { + "type": "text", + "text": "4D Gaussian Splatting for Real-Time Dynamic Scene Rendering", + "text_level": 1, + "bbox": [ + 161, + 130, + 810, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Guanjun Wu $^{1*}$ , Taoran Yi $^{2*}$ , Jiemin Fang $^{3\\dagger}$ , Lingxi Xie $^{3}$ , Xiaopeng Zhang $^{3}$ , Wei Wei $^{1}$ , Wenyu Liu $^{2}$ , Qi Tian $^{3}$ , Xinggang Wang $^{2\\dagger}$ $^{1}$ School of CS, Huazhong University of Science and Technology \n $^{2}$ School of EIC, Huazhong University of Science and Technology \n $^{3}$ Huawei Inc. {guajuwu, taoranyi, weiw, liuwy, xgwang}@hust.edu.cn {jaminfong, 198808xc, zxphistory}@gmail.com tian.qil@huawei.com", + "bbox": [ + 156, + 179, + 810, + 287 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d73bdb002bf5289b6d8907683ba93efef171e8ac01f88f48bdee539a002af360.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 78, + 309, + 305, + 467 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5a870c6bfe26650dfb77339e293ba392d3e580979a11e9a8ea07d02cdc240ba5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 305, + 309, + 573, + 467 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/73b2ae604c887edc3943e5666529abad127dbbc92887896cfe92677c2de6f41c.jpg", + "image_caption": [ + "Figure 1. Our method achieves real-time rendering $\\ddagger$ for dynamic scenes at high image resolutions while maintaining high rendering quality. The right figure is tested on synthetic datasets, where the radius of the dot corresponds to the training time. \"Res\": resolution. $\\ddagger$ The rendering speed not only depends on the image resolution but also the number of 3D Gaussians and the scale of deformation fields which are determined by the complexity of the scene." + ], + "image_footnote": [], + "bbox": [ + 571, + 305, + 875, + 494 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 549, + 313, + 564 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Representing and rendering dynamic scenes has been an important but challenging task. Especially, to accurately model complex motions, high efficiency is usually hard to guarantee. To achieve real-time dynamic scene rendering while also enjoying high training and storage efficiency, we propose 4D Gaussian Splatting (4D-GS) as a holistic representation for dynamic scenes rather than applying 3D-GS for each individual frame. In 4D-GS, a novel explicit representation containing both 3D Gaussians and 4D neural voxels is proposed. A decomposed neural voxel encoding algorithm inspired by HexPlane is proposed to efficiently build Gaussian features from 4D neural voxels and then a lightweight MLP is applied to predict Gaussian deformations at novel timestamps. Our 4D-GS method achieves real-time rendering under high resolutions, 82 FPS at an $800 \\times 800$ resolution on an RTX 3090 GPU while maintaining comparable or better quality than previous state-of-the-art methods. More demos and code are available at", + "bbox": [ + 73, + 580, + 472, + 853 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://guanjunwu.github.io/4dgs/.", + "bbox": [ + 500, + 550, + 830, + 565 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 598, + 632, + 614 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Novel view synthesis (NVS) stands as a critical task in the domain of 3D vision and plays a vital role in many applications, e.g. VR, AR, and movie production. NVS aims at rendering images from any desired viewpoint or timestamp of a scene, usually requiring modeling the scene accurately from several 2D images. Dynamic scenes are quite common in real scenarios, rendering which is important but challenging as complex motions need to be modeled with both spatially and temporally sparse input.", + "bbox": [ + 496, + 625, + 890, + 762 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "NeRF [32] has achieved great success in synthesizing novel view images by representing scenes with implicit functions. The volume rendering techniques [7] are introduced to connect 2D images and 3D scenes. However, the original NeRF method bears big training and rendering costs. Though some NeRF variants [5, 8, 10, 11, 33, 44, 47] reduce the training time from days to minutes, the rendering process still bears a non-negligible latency.", + "bbox": [ + 496, + 762, + 890, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent 3D Gaussian Splatting (3D-GS) [19] signifi", + "bbox": [ + 517, + 883, + 890, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contributions.", + "bbox": [ + 93, + 862, + 210, + 875 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "†Project Lead.", + "bbox": [ + 96, + 875, + 174, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$\\ddagger$ Corresponding author.", + "bbox": [ + 96, + 887, + 220, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "20310", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "cantly boosts the rendering speed to a real-time level by representing the scene as 3D Gaussians. The cumbersome volume rendering in the original NeRF is replaced with efficient differentiable splatting [57], which directly projects 3D Gaussian onto the 2D image plane. 3D-GS not only enjoys real-time rendering speed but also represents the scene more explicitly, making it easier to manipulate the scene representation.", + "bbox": [ + 75, + 90, + 472, + 210 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, 3D-GS focuses on the static scenes. Extending it to dynamic scenes as a 4D representation is a reasonable, important but difficult topic. The key challenge lies in modeling complicated point motions from sparse input. 3D-GS holds a natural geometry prior by representing scenes with point-like Gaussians. One direct and effective extension approach is to construct 3D Gaussians at each timestamp [30] but the storage/memory cost will multiply especially for long input sequences. Our goal is to construct a compact representation while maintaining both training and rendering efficiency, i.e. 4D Gaussian Splatting (4D-GS). To this end, we propose to represent Gaussian motions and shape changes by an efficient Gaussian deformation field network, containing a temporal-spatial structure encoder and an extremely tiny multi-head Gaussian deformation decoder. Only one set of canonical 3D Gaussians is maintained. For each timestamp, the canonical 3D Gaussians will be transformed by the Gaussian deformation field into new positions with new shapes. The transformation process represents both the Gaussian motion and deformation. Note that different from modeling motions of each Gaussian separately [30, 55], the spatial-temporal structure encoder can connect different adjacent 3D Gaussians to predict more accurate motions and shape deformation. Then the deformed 3D Gaussians can be directly splatted for rendering the according-timestamp image. Our contributions can be summarized as follows.", + "bbox": [ + 75, + 210, + 472, + 618 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- An efficient 4D Gaussian splating framework with an efficient Gaussian deformation field is proposed by modeling both Gaussian motion and Gaussian shape changes across time.", + "- A multi-resolution encoding method is proposed to connect the nearby 3D Gaussians and build rich 3D Gaussian features by an efficient spatial-temporal structure encoder.", + "- 4D-GS achieves real-time rendering on dynamic scenes, up to 82 FPS at a resolution of $800 \\times 800$ for synthetic datasets and 30 FPS at a resolution of $1352 \\times 1014$ in real datasets, while maintaining comparable or superior performance than previous state-of-the-art (SOTA) methods and shows potential for editing and tracking in 4D scenes." + ], + "bbox": [ + 76, + 621, + 468, + 816 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 76, + 829, + 225, + 845 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we simply review the difference of dynamic NeRFs in Sec. 2.1, then discuss the point clouds-based neural rendering algorithm in Sec. 2.2.", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Original Sampled Points", + "- Canonical Mapped Points", + "The Original Cast Ray", + "The Canonical Mapped Ray" + ], + "bbox": [ + 527, + 89, + 674, + 133 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Original Sampled Points", + "The Original Cast Ray", + "Time Features of the Points" + ], + "bbox": [ + 705, + 90, + 861, + 122 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ef1850bc3b5475d2432be61a873fecf2df6b0c6264f71e71debb0238f3bb80f6.jpg", + "image_caption": [ + "(a) Canonical Mapping Volume Rendering" + ], + "image_footnote": [], + "bbox": [ + 531, + 142, + 671, + 199 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/9d40cb415fcf5860dbe341a088ea28c52200b20dcaed0dabcd4d4d035f716dc6.jpg", + "image_caption": [ + "(b) Time-aware Volume Rendering" + ], + "image_footnote": [], + "bbox": [ + 723, + 132, + 866, + 205 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/b15ec9426dcf64169ad93fcee106d21566e9bd5e5efb974d017d601a668cb17e.jpg", + "image_caption": [ + "(c) 4D Gaussian Splitting", + "Figure 2. Illustration of different dynamic scene rendering methods. (a) Points are sampled in the casted ray during volume rendering. The point deformation fields proposed in [8, 39] map the points into a canonical space. (b) Time-aware volume rendering computes the features of each point directly and does not change the rendering path. (c) The Gaussian deformation field converts original 3D Gaussians into another group of 3D Gaussians with a certain timestamp." + ], + "image_footnote": [], + "bbox": [ + 516, + 224, + 671, + 301 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$\\bullet$ Original 3D Gaussians G", + "$\\bullet$ Deformed 3D Gaussians $G^{\\prime}$", + "Gaussian Deformation Field $\\mathrm{F}(G, t_i)$", + "Gaussian Rasterization Paths" + ], + "bbox": [ + 696, + 227, + 887, + 271 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Novel View Synthesis", + "text_level": 1, + "bbox": [ + 500, + 453, + 697, + 468 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Novel view synthesis is a important and challenging task in 3D reconstruction. Much approaches are proposed to represent a 3D object and render novel views. Efficient representations such as light fields [4], mesh [6, 15, 24, 46], voxels [16, 18, 23], multi-planes [9] can render high quality image with enough supervisions. NeRF-based approaches [3, 32, 59] demonstrate that implicit radiance fields can effectively learn scene representations and synthesize high-quality novel views. [35, 36, 39] have challenged the static hypothesis, expanding the boundary of novel view synthesis for dynamic scenes. [8] proposes to use an explicit voxel grid to model temporal information, accelerating the learning time for dynamic scenes to half an hour and applied in [17, 29, 56]. The proposed deformation-based neural rendering methods are shown in Fig. 2 (a). Flow-based [13, 25, 29, 48, 61] methods adopting warping algorithm to synthesis novel views by blending nearby frames. [5, 11, 12, 22, 44, 49] represent further advancements in faster dynamic scene learning by adopting decomposed neural voxels. They treat sampled points in each timestamp individually as shown in Fig. 2 (b). [14, 27, 38, 50, 51, 53] are efficient methods to handle multi-view setups. The aforementioned methods though achieve fast training speed, real-time rendering for dynamic scenes is still challenging, especially for monocular input. Our method aims at constructing a highly efficient training and rendering pipeline in Fig. 2 (c), while maintaining the quality, even for sparse inputs.", + "bbox": [ + 496, + 477, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "20311", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/79abdf0621f574dc702f76de86f319a1f46faf3cc8d3c4e6ffe3deb822ac0349.jpg", + "image_caption": [ + "Figure 3. The overall pipeline of our model. Given a group of 3D Gaussians $\\mathcal{G}$ , we extract the center coordinate of each 3D Gaussian $\\mathcal{X}$ and timestamp $t$ to compute the voxel feature by querying multi-resolution voxel planes. Then a tiny multi-head Gaussian deformation decoder is used to decode the feature and get the deformed 3D Gaussians $\\mathcal{G}'$ at timestamp $t$ . The deformed Gaussians are then splatted to the rendered image." + ], + "image_footnote": [], + "bbox": [ + 76, + 92, + 890, + 300 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Neural Rendering with Point Clouds", + "text_level": 1, + "bbox": [ + 76, + 380, + 393, + 396 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Effectively representing 3D scenes remains a challenging topic. The community has explored various neural representations [32], e.g. meshes, point clouds [54], voxels [10], and hybrid approaches [33, 47]. Point-cloud-based methods [28, 40, 41, 58] initially target at 3D segmentation and classification. A representative approach for rendering presented in [1, 54] combines point cloud representations with volume rendering, achieving rapid convergence speed even for dynamic novel view synthesis [34, 61]. [20, 21, 42] adopt differential point rendering technique for scene reconstructions.", + "bbox": [ + 75, + 417, + 470, + 583 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recently, 3D-GS [19] is notable for its pure explicit representation and differential point-based splatting methods, enabling real-time rendering of novel views. Dynamic3DGS [30] models dynamic scenes by tracking the position and variance of each 3D Gaussian at each timestamp $t_i$ . An explicit table is utilized to store information about each 3D Gaussian at every timestamp, leading to a linear memory consumption increase, denoted as $O(t\\mathcal{N})$ , in which $\\mathcal{N}$ is num of 3D Gaussians. For long-term scene reconstruction, the storage cost will become non-negligible. The memory complexity of our approach only depends on the number of 3D Gaussians and parameters of Gaussians deformation fields network $\\mathcal{F}$ , which is denoted as $O(\\mathcal{N} + \\mathcal{F})$ . [55] adds a marginal temporal Gaussian distribution into the origin 3D Gaussians, which uplift 3D Gaussians into 4DHowever, it may cause each 3D Gaussian to only focus on their local temporal space. [26] track each 3D Gaussians individually. Our approach also models 3D Gaussian motions but with a compact network, resulting in highly efficient training efficiency and real-time rendering.", + "bbox": [ + 75, + 598, + 470, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Preliminary", + "text_level": 1, + "bbox": [ + 500, + 378, + 625, + 396 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we simply review the representation and rendering process of 3D-GS [19] in Sec. 3.1 and the formula of dynamic NeRFs in Sec. 3.2.", + "bbox": [ + 498, + 405, + 892, + 450 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. 3D Gaussian Splatting", + "text_level": 1, + "bbox": [ + 498, + 459, + 709, + 476 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D Gaussians [19] is an explicit 3D scene representation in the form of point clouds. Each 3D Gaussian is characterized by a covariance matrix $\\Sigma$ and a center point $\\mathcal{X}$ , which is referred to as the mean value of the Gaussian:", + "bbox": [ + 498, + 482, + 892, + 542 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nG (X) = e ^ {- \\frac {1}{2} \\mathcal {X} ^ {T} \\Sigma^ {- 1} \\mathcal {X}}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 616, + 551, + 890, + 571 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For differentiable optimization, the covariance matrix $\\Sigma$ can be decomposed into a scaling matrix $\\mathbf{S}$ and a rotation matrix $\\mathbf{R}$ :", + "bbox": [ + 498, + 582, + 890, + 625 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\Sigma = \\mathbf {R} \\mathbf {S} \\mathbf {S} ^ {T} \\mathbf {R} ^ {T}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 638, + 626, + 890, + 642 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "When rendering novel views, differential splatting [57] is employed for the 3D Gaussians within the camera planes. As introduced by [62], using a viewing transform matrix $W$ and the Jacobian matrix $J$ of the affine approximation of the projective transformation, the covariance matrix $\\Sigma'$ in camera coordinates can be computed as", + "bbox": [ + 498, + 648, + 890, + 739 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\Sigma^ {\\prime} = J W \\Sigma W ^ {T} J ^ {T}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 625, + 750, + 890, + 768 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In summary, each 3D Gaussian is characterized by the following attributes: position $\\mathcal{X} \\in \\mathbb{R}^3$ , color defined by spherical harmonic (SH) coefficients $\\mathcal{C} \\in \\mathbb{R}^k$ (where $k$ represents nums of SH functions), opacity $\\alpha \\in \\mathbb{R}$ , rotation factor $r \\in \\mathbb{R}^4$ , and scaling factor $s \\in \\mathbb{R}^3$ . Specifically, for each pixel, the color and opacity of all the Gaussians are computed using the Gaussian's representation Eq. 1. The blending of $N$ ordered points that overlap the pixel is given by", + "bbox": [ + 496, + 779, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "20312", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the formula:", + "bbox": [ + 76, + 90, + 163, + 104 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nC = \\sum_ {i \\in N} c _ {i} \\alpha_ {i} \\prod_ {j = 1} ^ {i - 1} (1 - \\alpha_ {i}). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 114, + 468, + 157 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Here, $c_{i}$ , $\\alpha_{i}$ represents the density and color of this point computed by a 3D Gaussian $G$ with covariance $\\Sigma$ multiplied by an estimizable per-point opacity and SH color coefficients.", + "bbox": [ + 76, + 167, + 468, + 228 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Dynamic NeRFs with Deformation Fields", + "text_level": 1, + "bbox": [ + 76, + 238, + 429, + 255 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "All the dynamic NeRF algorithms can be formulated as:", + "bbox": [ + 76, + 262, + 447, + 277 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nc, \\sigma = \\mathcal {M} (\\mathbf {x}, d, t, \\lambda), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 290, + 468, + 306 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{M}$ is a mapping that maps 8D space $(\\mathbf{x},d,t,\\lambda)$ to 4D space $(c,\\sigma)$ . Where $x$ reveals to the spatial point, $\\lambda$ is the optional input as used to build topological and appearance changes in [36], and $d$ stands for view-dependency.", + "bbox": [ + 76, + 318, + 468, + 378 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As is shown in Fig. 2 (a), all the deformation NeRF based method which estimate the world-to-canonical mapping by a deformation network $\\phi_t:(\\mathbf{x},t)\\to \\Delta \\mathbf{x}$ . Then a network is introduced to compute volume density and view-dependent RGB color from each ray. The formula for rendering can be expressed as:", + "bbox": [ + 76, + 378, + 468, + 469 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nc, \\sigma = \\operatorname {N e R F} (\\mathbf {x} + \\Delta \\mathbf {x}, d, \\lambda), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 482, + 468, + 498 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where 'NeRF' stands for vanilla NeRF pipeline, $\\lambda$ is a frame-dependent code to model the topological and appearance changes [31, 36].", + "bbox": [ + 76, + 510, + 468, + 555 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "However, our 4D Gaussian splatting framework presents a novel rendering technique. We successfully compute the canonical-to-world mapping by a Gaussian deformation field network $\\mathcal{F}$ at the time $t$ directly and differential splating [19] is followed, which enables the skill of computing backward flow and tracking for 3D Gaussians.", + "bbox": [ + 76, + 556, + 468, + 646 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Method", + "text_level": 1, + "bbox": [ + 76, + 660, + 168, + 674 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Sec. 4.1 introduces the overall 4D Gaussian Splitting framework. Then, the Gaussian deformation field is proposed in Sec. 4.2. Finally, we describe the optimization process in Sec. 4.3.", + "bbox": [ + 76, + 685, + 468, + 746 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. 4D Gaussian Splatting Framework", + "text_level": 1, + "bbox": [ + 76, + 756, + 380, + 772 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Fig. 3, given a view matrix $M = [R,T]$ , times-amp $t$ , our 4D Gaussian splatting framework includes 3D Gaussians $\\mathcal{G}$ and Gaussian deformation field network $\\mathcal{F}$ . Then a novel-view image $\\hat{I}$ is rendered by differential splatting [57] $\\mathcal{S}$ following $\\hat{I} = \\mathcal{S}(M,\\mathcal{G}^{\\prime})$ , where $\\mathcal{G}^{\\prime} = \\Delta \\mathcal{G} + \\mathcal{G}$ .", + "bbox": [ + 76, + 779, + 468, + 854 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, the deformation of 3D Gaussians $\\Delta \\mathcal{G}$ is introduced by the Gaussian deformation field network $\\Delta \\mathcal{G} = \\mathcal{F}(\\mathcal{G},t)$ , in which the spatial-temporal structure encoder $\\mathcal{H}$", + "bbox": [ + 76, + 854, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3ec68fe898331e76563915399716aa1fd62987de6864ea790d836aecf93278fa.jpg", + "image_caption": [ + "Figure 4. Illustration of the optimization process. With static 3D Gaussian initialization, our model can learn high-quality 3D Gaussians of the motion part." + ], + "image_footnote": [], + "bbox": [ + 501, + 87, + 885, + 271 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "can encode both the temporal and spatial features of 3D Gaussians $f_{d} = \\mathcal{H}(\\mathcal{G},t)$ , and the multi-head Gaussian deformation decoder $\\mathcal{D}$ can decode the features and predict each 3D Gaussian's deformation $\\Delta \\mathcal{G} = \\mathcal{D}(f)$ , then the deformed 3D Gaussians $\\mathcal{G}'$ can be introduced.", + "bbox": [ + 498, + 335, + 890, + 411 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The rendering process of our 4D Gaussian Splatting is depicted in Fig. 2 (c). Our 4D Gaussian splatting converts the original 3D Gaussians $\\mathcal{G}$ into another group of 3D Gaussians $\\mathcal{G}'$ given a timestamp $t$ , maintaining the effectiveness of the differential splatting as referred in [57].", + "bbox": [ + 498, + 412, + 890, + 488 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Gaussian Deformation Field Network", + "text_level": 1, + "bbox": [ + 500, + 496, + 823, + 511 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The network to learn the Gaussian deformation field includes an efficient spatial-temporal structure encoder $\\mathcal{H}$ and a Gaussian deformation decoder $\\mathcal{D}$ for predicting the deformation of each 3D Gaussian.", + "bbox": [ + 498, + 518, + 890, + 579 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Spatial-Temporal Structure Encoder. Nearby 3D Gaussians always share similar spatial and temporal information. To model 3D Gaussians' features effectively, we introduce an efficient spatial-temporal structure encoder $\\mathcal{H}$ including a multi-resolution HexPlane $R(i,j)$ and a tiny MLP $\\phi_d$ inspired by [5, 8, 11, 44]. While the vanilla 4D neural voxel is memory-consuming, we adopt a 4D K-Planes [11] module to decompose the 4D neural voxel into 6 planes. All 3D Gaussians in a certain area can be contained in the bounding plane voxels and Gaussian's deformation can also be encoded in nearby temporal voxels.", + "bbox": [ + 498, + 598, + 890, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, the spatial-temporal structure encoder $\\mathcal{H}$ contains 6 multi-resolution plane modules $R_{l}(i,j)$ and a tiny MLP $\\phi_d$ , i.e. $\\mathcal{H}(\\mathcal{G},t) = \\{R_l(i,j),\\phi_d|(i,j)\\in$ $\\{(x,y),(x,z),(y,z),(x,t),(y,t),(z,t)\\} ,l\\in \\{1,2\\} \\}$ . The position $\\mu = (x,y,z)$ is the mean value of 3D Gaussians $\\mathcal{G}$ . Each voxel module is defined by $R(i,j)\\in \\mathbb{R}^{h\\times lN_i\\times lN_j}$ where $h$ stands for the hidden dim of features, $N$ denotes the basic resolution of voxel grid and $l$ equals to the upsampling scale. This entails encoding information of the 3D", + "bbox": [ + 498, + 765, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "20313", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/589f60be5cc17db5ac9d7db98e30f7ded964ca1f15788b98b84e05478d29dd59.jpg", + "image_caption": [ + "Figure 5. Visualization of synthesized datasets compared with other models [5, 8, 11, 17, 19, 49]. The rendering results of [11] are displayed with a default green background. We have adopted their rendering settings." + ], + "image_footnote": [], + "bbox": [ + 76, + 85, + 895, + 294 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Gaussians within the 62D voxel planes while considering temporal information. The formula for computing separate voxel features is as follows:", + "bbox": [ + 75, + 343, + 470, + 388 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nf _ {h} = \\bigcup_ {l} \\prod \\operatorname {i n t e r p} \\left(R _ {l} (i, j)\\right), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 112, + 398, + 468, + 433 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n(i, j) \\in \\{(x, y), (x, z), (y, z), (x, t), (y, t), (z, t) \\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 98, + 434, + 429, + 453 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$f_{h} \\in \\mathbb{R}^{h * l}$ is the feature of neural voxels. 'interp' denotes the bilinear interpolation for querying the voxel features located at 4 vertices of the grid. The discussion of the production process is similar to [11]. Then a tiny MLP $\\phi_{d}$ merges all the features by $f_{d} = \\phi_{d}(f_{h})$ .", + "bbox": [ + 75, + 464, + 470, + 542 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Multi-head Gaussian Deformation Decoder. When all the features of 3D Gaussians are encoded, we can compute any desired variable with a multi-head Gaussian deformation decoder $\\mathcal{D} = \\{\\phi_x,\\phi_r,\\phi_s\\}$ . Separate MLPs are employed to compute the deformation of position $\\Delta \\mathcal{X} = \\phi_{x}(f_{d})$ , rotation $\\Delta r = \\phi_r(f_d)$ , and scaling $\\Delta s = \\phi_s(f_d)$ . Then, the deformed feature $(\\mathcal{X}',r',s')$ can be addressed as:", + "bbox": [ + 75, + 563, + 468, + 670 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\mathcal {X} ^ {\\prime}, r ^ {\\prime}, s ^ {\\prime}\\right) = \\left(\\mathcal {X} + \\Delta \\mathcal {X}, r + \\Delta r, s + \\Delta s\\right). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 129, + 683, + 468, + 700 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, we obtain the deformed 3D Gaussians $\\mathcal{G}' = \\{\\mathcal{X}', s', r', \\sigma, \\mathcal{C}\\}$ .", + "bbox": [ + 76, + 713, + 468, + 744 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Optimization", + "text_level": 1, + "bbox": [ + 76, + 755, + 217, + 771 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3D Gaussian Initialization. [19] shows that 3D Gaussians can be well-trained with structure from motion (SfM) [43] points initialization. Similarly, 4D Gaussians should also be fine-tuned in proper 3D Gaussian initialization. We optimize 3D Gaussians at initial 3000 iterations for warm-up and then render images with 3D Gaussians $\\hat{I} = S(M, \\mathcal{G})$ instead of 4D Gaussians $\\hat{I} = S(M, \\mathcal{G}')$ . The illustration of the optimization process is shown in Fig. 4.", + "bbox": [ + 75, + 779, + 470, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Loss Function. Similar to other reconstruction methods [8, 19, 39], we use the L1 color loss to supervise the training process. A grid-based total-variational loss [5, 8, 11, 47] $\\mathcal{L}_{tv}$ is also applied.", + "bbox": [ + 498, + 343, + 892, + 405 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = | \\hat {I} - I | + \\mathcal {L} _ {t v}. \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 627, + 415, + 892, + 434 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiment", + "text_level": 1, + "bbox": [ + 498, + 439, + 625, + 458 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we mainly introduce the hyperparameters and datasets of our settings in Sec. 5.1 and the results between different datasets will be compared with [2, 5, 8, 11, 19, 27, 45, 49, 50] in Sec. 5.2. Then, ablation studies are proposed to prove the effectiveness of our approaches in Sec. 5.3 and more discussion about 4D-GS in Sec. 5.4. Finally, we discuss the limitation of our proposed 4D-GS in Sec. 5.5.", + "bbox": [ + 496, + 465, + 892, + 585 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 498, + 595, + 707, + 613 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our implementation is primarily based on the PyTorch [37] framework and tested in a single RTX 3090 GPU, and we've fine-tuned our optimization parameters by the configuration outlined in the 3D-GS [19]. More hyperparameters will be shown in the appendix.", + "bbox": [ + 496, + 619, + 892, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Synthetic Dataset. We primarily assess the performance of our model using synthetic datasets, as introduced by D-NeRF [39]. These datasets are designed for monocular settings, although it's worth noting that the camera poses for each timestamp are close to randomly generated. Each scene within these datasets contains dynamic frames, ranging from 50 to 200 in number.", + "bbox": [ + 496, + 714, + 892, + 821 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Real-world Datasets. We utilize datasets provided by HyperNeRF [36] and Neu3D's [22] as benchmark datasets to evaluate the performance of our model in real-world scenarios. The Nerfies dataset is captured using one or two", + "bbox": [ + 496, + 839, + 893, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "20314", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/2d0635783a41ee2783b754c9eaae16cb23001622ef636a0efded0f08bd6a891f.jpg", + "image_caption": [ + "Figure 6. Visualization of the HyperNeRF [36] datasets compared with other methods [8, 17, 19, 36]. 'GT' stands for ground truth images." + ], + "image_footnote": [], + "bbox": [ + 76, + 85, + 890, + 308 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/6182eeb9a23f23372907884edb109992e044e885d60e986ff087ff139ecd01c2.jpg", + "table_caption": [ + "Table 1. Quantitative results on the synthesis dataset. The best and the second best results are denoted by pink and yellow. The rendering resolution is set to $800 \\times 800$ . \"Time\" in the table stands for training times." + ], + "table_footnote": [], + "table_body": "
ModelPSNR(dB)↑SSIM↑LPIPS↓Time↓FPS ↑Storage (MB)↓
TiNeuVox-B [8]32.670.970.0428 mins1.548
KPlanes [11]31.610.97-52 mins0.97418
HexPlane-Slim [5]31.040.970.0411m 30s2.538
3D-GS [19]23.190.930.0810 mins17010
FFDNeRF [17]32.680.970.04-< 1440
MSTH [49]31.340.980.026 mins--
V4D [12]33.720.980.026.9 hours2.08377
Ours34.050.980.028 mins8218
", + "bbox": [ + 197, + 388, + 774, + 527 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "cameras, following straightforward camera motion, while the Neu3D's dataset is captured using 15 to 20 static cameras, involving extended periods and intricate camera motions. We use the points computed by SfM [43] from the first frame of each video in Neu3D's dataset and 200 frames randomly selected in HyperNeRF's.", + "bbox": [ + 75, + 551, + 468, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Results", + "text_level": 1, + "bbox": [ + 76, + 650, + 171, + 666 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We primarily assess our experimental results using various metrics, encompassing peak-signal-to-noise ratio (PSNR), perceptual quality measure LPIPS [60], structural similarity index (SSIM) [52] and its extensions including structural dissimilarity index measure (DSSIM), multiscale structural similarity index (MS-SSIM), FPS, training times and Storage.", + "bbox": [ + 75, + 674, + 468, + 779 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To assess the quality of novel view synthesis, we conducted benchmarking against several state-of-the-art methods in the field, including [5, 8, 11, 12, 17, 19, 27, 49]. The results are summarized in Tab. 1. While current dynamic hybrid representations can produce high-quality results, they often come with the drawback of rendering speed. The lack of modeling dynamic motion part makes [19] fail to reconstruct dynamic scenes. In contrast, our method en", + "bbox": [ + 75, + 780, + 470, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "joys both the highest rendering quality within the synthesis dataset and exceptionally fast rendering speeds while keeping extremely low storage consumption and convergence time.", + "bbox": [ + 496, + 551, + 892, + 611 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Additionally, the results obtained from real-world datasets are presented in Tab. 2 and Tab. 3. It becomes apparent that some NeRFs [2, 5, 45] suffer from slow convergence speed, and the other grid-based NeRF methods [5, 8, 11, 49] encounter difficulties when attempting to capture intricate object details. In stark contrast, our methods research comparable rendering quality, fast convergence, and excel in free-view rendering speed in indoor cases. Though [27] addresses the high quality in comparison to ours, the need for multi-cam setups makes it hard to model monocular scenes and other methods also limit free-view rendering speed and storage.", + "bbox": [ + 496, + 613, + 893, + 795 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 800, + 653, + 816 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Spatial-Temporal Structure Encoder. The explicit Hex-Plane encoder $R_{l}(i,j)$ possesses the capacity to retain 3D Gaussians' spatial and temporal information, which can reduce storage consumption in comparison with purely explicit methods [30]. Discarding this module, we observe", + "bbox": [ + 496, + 824, + 893, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "20315", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/930d62bc2e1de7987c899d965fc13bf3358595be102923045f2b7fc51d2b487f.jpg", + "table_caption": [ + "Table 2. Quantitative results on HyperNeRF's [36] vrig dataset. Rendering resolution is set to ${960} \\times {540}$ ." + ], + "table_footnote": [], + "table_body": "
ModelPSNR(dB)↑MS-SSIM↑Times↓FPS↑Storage(MB)↓
Nerfies [35]22.20.803~ hours< 1-
HyperNeRF [36]22.40.81432 hours< 1-
TiNeuVox-B [8]24.30.83630 mins148
3D-GS [19]19.70.68040 mins5552
FFDNeRF [17]24.20.842-0.05440
V4D [12]24.80.8325.5 hours0.29377
Ours25.20.84530 mins3461
", + "bbox": [ + 215, + 114, + 751, + 242 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ab4efbced946b9a4130d7b209ac4d6acfa3926315a16071e28e78ffb92ec8787.jpg", + "table_caption": [ + "Table 3. Quantitative results on the Neu3D's [22] dataset, rendering resolution is set to $1352 \\times 1014$ ." + ], + "table_footnote": [ + "*: The metrics of the model are tested without \"coffee martini\" and resolution is set to $1024 \\times 768$ .", + ": The FPS is tested with fixed-view rendering." + ], + "table_body": "
ModelPSNR(dB)↑D-SSIM↓LPIPS↓Time ↓FPS↑Storage (MB)↓
NeRFPlayer [45]30.690.0340.1116 hours0.045-
HyperReel [2]31.100.0360.0969 hours2.0360
HexPlane-all* [5]31.700.0140.07512 hours0.2250
KPlanes [11]31.63--1.8 hours0.3309
Im4D [27]32.58-0.20828 mins~593
MSTH [49]32.370.0150.05620 mins2(15‡)135
Ours31.150.0160.04940 mins3090
", + "bbox": [ + 173, + 280, + 802, + 402 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "that using only a shallow MLP $\\phi_d$ falls short in modeling complex deformations across various settings. Tab. 4 demonstrates that, while the model incurs minimal memory costs, it does come at the expense of rendering quality.", + "bbox": [ + 75, + 453, + 468, + 515 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Gaussian Deformation Decoder. Our proposed Gaussian deformation decoder $\\mathcal{D}$ decodes the features from the spatial-temporal structure encoder $\\mathcal{H}$ . All the changes in 3D Gaussians can be explained by separate MLPs $\\{\\phi_x, \\phi_r, \\phi_s\\}$ . As is shown in Tab. 4, 4D Gaussians cannot fit dynamic scenes well without modeling 3D Gaussian motion. Meanwhile, the movement of human body joints is typically manifested as stretching and twisting of surface details in a macroscopic view. If one aims to accurately model these movements, the size and shape of 3D Gaussians should also be adjusted accordingly. Otherwise, there may be underfitting of details during excessive stretching, or an inability to correctly simulate the movement of objects at a microscopic level.", + "bbox": [ + 75, + 526, + 468, + 736 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3D Gaussian Initialization. In some cases without SfM [43] points initialization, training 4D-GS directly may cause difficulty in convergence. Optimizing 3D Gaussians for warm-up enjoys: (a) making some 3D Gaussians stay in the dynamic part, which releases the pressure of large deformation learning by 4D Gaussians as shown in Fig. 4. (b) learning proper 3D Gaussians $\\mathcal{G}$ and suggesting deformation fields paying more attention to the dynamic part. (c) avoiding numeric errors in optimizing the Gaussian deformation network $\\mathcal{F}$ and keeping the training process stable.", + "bbox": [ + 75, + 750, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d5ca3ea5b0c79315f75c77d126f979c7bcaaa1db0e04ac6bdbcae8033e1e4a6d.jpg", + "image_caption": [ + "Figure 7. Visualization of tracking with 3D Gaussians. Each line in the figure of second rows stands for trajectories of 3D Gaussians" + ], + "image_footnote": [], + "bbox": [ + 521, + 452, + 870, + 664 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Tab. 4 also shows that if we train our model without the warm-up coarse stage, the rendering quality will suffer.", + "bbox": [ + 498, + 729, + 890, + 761 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.4. Discussions", + "text_level": 1, + "bbox": [ + 500, + 771, + 624, + 785 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Tracking with 3D Gaussians. Tracking in 3D is also a important task. [17] also shows tracking objects' motion in 3D. Different from dynamic3DGS [30], our methods even can present tracking objects in monocular settings with pretty low storage i.e. 10MB in 3D Gaussians $\\mathcal{G}$ and 8 MB in Gaussian deformation field network $\\mathcal{F}$ . Fig. 7 shows the 3D Gaussian's deformation at certain timestamps.", + "bbox": [ + 496, + 794, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "20316", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c365f2b894e193d77b3ab32acf3bf460c65f591ee3365c751895f3edd1a6255e.jpg", + "table_caption": [ + "Table 4. Ablation studies on synthetic datasets using our proposed methods." + ], + "table_footnote": [], + "table_body": "
ModelPSNR(dB)↑SSIM↑LPIPS↓Time↓FPS↑Storage (MB)↓
Ours w/o HexPlane RL(i,j)27.050.950.054 mins14012
Ours w/o initialization31.910.970.037.5 mins7918
Ours w/o φx26.670.950.078 mins8217
Ours w/o φr33.080.980.038 mins8317
Ours w/o φs33.020.980.038 mins8217
Ours34.050.980.028 mins8218
", + "bbox": [ + 125, + 114, + 834, + 237 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c6bd6b65e62c5b3d545a492d97594d68dd04549c7125eda500e39c47fc4e9d18.jpg", + "image_caption": [ + "Figure 8. Visualization of composition with 4D Gaussians." + ], + "image_footnote": [], + "bbox": [ + 80, + 247, + 467, + 362 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/32fa9e66fe525318a6f0c790ad66bfed2b98622fa8fd0d2a3b3a3133fb4dc8ba.jpg", + "image_caption": [ + "Figure 9. Visualization of the relationship between rendering speed and numbers of 3D Gaussians in the rendered screens. All the tests are finished in the synthesis dataset." + ], + "image_footnote": [], + "bbox": [ + 76, + 401, + 467, + 635 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Composition with 4D Gaussians. Similar to dynamic3DGS [30], our proposed methods can also propose editing in 4D Gaussians in Fig. 8. Thanks to the explicit representation of 3D Gaussians, all the trained models can predict deformed 3D Gaussians in the same space following $\\mathcal{G}' = \\{\\mathcal{G}_1', \\mathcal{G}_2', \\dots, \\mathcal{G}_n'\\}$ and differential rendering [57] can project all the point clouds into viewpoints by $\\hat{I} = \\mathcal{S}(M, \\mathcal{G}')$ .", + "bbox": [ + 75, + 714, + 468, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Analysis of Rendering Speed. As is shown in Fig. 9, we also test the relationship between points in the rendered screen and rendering speed at the resolution of $800 \\times 800$ .", + "bbox": [ + 75, + 854, + 470, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We found that if the rendered points are lower than 30000, the rendering speed can be up to 90. The config of Gaussian deformation fields are discussed in the appendix. To achieve render-time rendering speed, we should strike a balance among all the rendering resolutions, 4D Gaussians representation including numbers of 3D Gaussians, and the capacity of the Gaussian deformation field network and any other hardware constraints.", + "bbox": [ + 496, + 250, + 890, + 369 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.5. Limitations", + "text_level": 1, + "bbox": [ + 500, + 390, + 625, + 404 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Though 4D-GS can indeed attain rapid convergence and yield real-time rendering outcomes in many scenarios, there are a few key challenges to address. First, large motions, the absence of background points, and the unprecise camera pose cause the struggle of optimizing 4D Gaussians. What is more, it is still challenging to 4D-GS also cannot split the joint motion of static and dynamic Gaussiansparts under the monocular settings without any additional supervision. Finally, a more compact algorithm needs to be designed to handle urban-scale reconstruction due to the heavy querying of Gaussian deformation fields by huge numbers of 3D Gaussians.", + "bbox": [ + 496, + 417, + 890, + 597 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 623, + 617, + 638 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This paper proposes 4D Gaussian splating to achieve real-time dynamic scene rendering. An efficient deformation field network is constructed to accurately model Gaussian motions and shape deformations, where adjacent Gaussians are connected via a spatial-temporal structure encoder. Connections between Gaussians lead to more complete deformed geometry, effectively avoiding avulsion. Our 4D Gaussians can not only model dynamic scenes but also have the potential for 4D objective tracking and editing.", + "bbox": [ + 496, + 651, + 890, + 787 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 500, + 811, + 658, + 828 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported by the National Natural Science Foundation of China (No. 62376102). The authors would like to thank Haotong Lin for providing the quantitative results of Im4D [27].", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "20317", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jad Abou-Chakra, Feras Dayoub, and Niko Sünderhauf. Particlererf: Particle based encoding for online neural radiance fields in dynamic scenes. arXiv preprint arXiv:2211.04041, 2022. 3", + "[2] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023. 5, 6, 7", + "[3] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2", + "[4] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2", + "[5] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 130-141, 2023. 1, 2, 4, 5, 6, 7", + "[6] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (ToG), 34(4):1-13, 2015. 2", + "[7] Robert A Drebin, Loren Carpenter, and Pat Hanrahan. Volume rendering. ACM Siggraph Computer Graphics, 22(4): 65-74, 1988. 1", + "[8] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In SIGGRAPH Asia 2022 Conference Papers, pages 1-9, 2022. 1, 2, 4, 5, 6, 7", + "[9] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snively, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 2", + "[10] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 1, 3", + "[11] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbaek Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12479–12488, 2023. 1, 2, 4, 5, 6, 7" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[12] Wanshui Gan, Hongbin Xu, Yi Huang, Shifeng Chen, and Naoto Yokoya. V4d: Voxel for 4d novel view synthesis. IEEE Transactions on Visualization and Computer Graphics, 2023. 2, 6, 7", + "[13] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5712-5721, 2021. 2", + "[14] Xiangjun Gao, Jiaolong Yang, Jongyoo Kim, Sida Peng, Zicheng Liu, and Xin Tong. Mps-nerf: Generalizable 3d human rendering from multiview images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2", + "[15] Kaiwen Guo, Feng Xu, Yangang Wang, Yebin Liu, and Qionghai Dai. Robust non-rigid motion tracking and surface reconstruction using 10 regularization. In Proceedings of the IEEE International Conference on Computer Vision, pages 3083-3091, 2015. 2", + "[16] Kaiwen Guo, Peter Lincoln, Philip Davidson, Jay Busch, Xueming Yu, Matt Whalen, Geoff Harvey, Sergio Orts-Escolano, Rohit Pandey, Jason Dourgarian, et al. The relightables: Volumetric performance capture of humans with realistic relighting. ACM Transactions on Graphics (ToG), 38(6):1–19, 2019. 2", + "[17] Xiang Guo, Jiadai Sun, Yuchao Dai, Guanying Chen, Xiaoting Ye, Xiao Tan, Errui Ding, Yumeng Zhang, and Jingdong Wang. Forward flow for novel view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16022-16033, 2023. 2, 5, 6, 7", + "[18] Tao Hu, Tao Yu, Zerong Zheng, He Zhang, Yebin Liu, and Matthias Zwicker. Hvtr: Hybrid volumetric-textural rendering for human avatars. In 2022 International Conference on 3D Vision (3DV), pages 197-208. IEEE, 2022. 2", + "[19] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG), 42(4):1-14, 2023. 1, 3, 4, 5, 6, 7", + "[20] Leonid Keselman and Martial Hebert. Approximate differentiable rendering with algebraic surfaces. In European Conference on Computer Vision, pages 596-614. Springer, 2022. 3", + "[21] Leonid Keselman and Martial Hebert. Flexible techniques for differentiable rendering with 3d gaussians. arXiv preprint arXiv:2308.14737, 2023. 3", + "[22] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 2, 5, 7", + "[23] Zhong Li, Yu Ji, Wei Yang, Jinwei Ye, and Jingyi Yu. Robust 3d human motion reconstruction via dynamic template construction. In 2017 International Conference on 3D Vision (3DV), pages 496-505. IEEE, 2017. 2", + "[24] Zhong Li, Minye Wu, Wangyiteng Zhou, and Jingyi Yu. 4d human body correspondences from panoramic depth maps." + ], + "bbox": [ + 501, + 92, + 890, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "20318", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2877-2886, 2018. 2", + "[25] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6498-6508, 2021. 2", + "[26] Zhan Li, Zhang Chen, Zhong Li, and Yi Xu. Spacetime gaussian feature splatting for real-time dynamic view synthesis. arXiv preprint arXiv:2312.16812, 2023. 3", + "[27] Haotong Lin, Sida Peng, Zhen Xu, Tao Xie, Xingyi He, Hu-jun Bao, and Xiaowei Zhou. High-fidelity and real-time novel view synthesis for dynamic scenes. In SIGGRAPH Asia Conference Proceedings, 2023. 2, 5, 6, 7, 8", + "[28] Xingyu Liu, Mengyuan Yan, and Jeannette Bohg. Meteornet: Deep learning on dynamic 3d point cloud sequences. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9246-9255, 2019. 3", + "[29] Yu-Lun Liu, Chen Gao, Andreas Meuleman, Hung-Yu Tseng, Ayush Saraf, Changil Kim, Yung-Yu Chuang, Johannes Kopf, and Jia-Bin Huang. Robust dynamic radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13-23, 2023. 2", + "[30] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In 3DV, 2024. 2, 3, 6, 7, 8", + "[31] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7210-7219, 2021. 4", + "[32] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 1, 2, 3", + "[33] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4):1-15, 2022. 1, 3", + "[34] Byeongjun Park and Changick Kim. Point-dynrf: Point-based dynamic radiance fields from a monocular video. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3171-3181, 2024. 3", + "[35] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 2, 7", + "[36] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. arXiv preprint arXiv:2106.13228, 2021. 2, 4,5,6,7" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[37] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 5", + "[38] Sida Peng, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Representing volumetric videos as dynamic mlp maps. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4252-4262, 2023. 2", + "[39] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10318–10327, 2021. 2, 5", + "[40] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017, 3", + "[41] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 3", + "[42] Darius Rückert, Linus Franke, and Marc Stamminger. Adop: Approximate differentiable one-pixel point rendering. ACM Transactions on Graphics (ToG), 41(4):1-14, 2022. 3", + "[43] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4104-4113, 2016. 5, 6, 7", + "[44] Ruizhi Shao, Zerong Zheng, Hanzhang Tu, Boning Liu, Hongwen Zhang, and Yebin Liu. Tensor4d: Efficient neural 4d decomposition for high-fidelity dynamic reconstruction and rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16632-16642, 2023. 1, 2, 4", + "[45] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 5, 6, 7", + "[46] Zhuo Su, Lan Xu, Zerong Zheng, Tao Yu, Yebin Liu, and Lu Fang. Robustfusion: Human volumetric capture with data-driven visual cues using a rgbd camera. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part IV 16, pages 246-264. Springer, 2020. 2", + "[47] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5459-5469, 2022. 1, 3, 5", + "[48] Fengrui Tian, Shaoyi Du, and Yueqi Duan. Mononerf: Learning a generalizable dynamic radiance field from" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "20319", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "monocular videos. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17903-17913, 2023. 2", + "[49] Feng Wang, Zilong Chen, Guokang Wang, Yafei Song, and Huaping Liu. Masked space-time hash encoding for efficient dynamic scene reconstruction. Advances in neural information processing systems, 2023. 2, 5, 6, 7", + "[50] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, Yafei Song, and Huaping Liu. Mixed neural voxels for fast multiview video synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19706-19716, 2023. 2, 5", + "[51] Yiming Wang, Qin Han, Marc Habermann, Kostas Dani-ilidis, Christian Theobalt, and Lingjie Liu. Neus2: Fast learning of neural implicit surfaces for multi-view reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3295–3306, 2023. 2", + "[52] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6", + "[53] Qingshan Xu, Weihang Kong, Wenbing Tao, and Marc Pollefeys. Multi-scale geometric consistency guided and planar prior assisted multi-view stereo. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(4):4945-4963, 2022. 2", + "[54] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5438–5448, 2022. 3", + "[55] Zeyu Yang, Hongye Yang, Zijie Pan, Xiatian Zhu, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv:2310.10642, 2023. 2, 3", + "[56] Taoran Yi, Jiemin Fang, Xinggang Wang, and Wenyu Liu. Generalizable neural voxels for fast human radiance fields. arXiv preprint arXiv:2303.15387, 2023. 2", + "[57] Wang Yifan, Felice Serena, Shihao Wu, Cengiz Öz Tireli, and Olga Sorkine-Hornung. Differentiable surface splatting for point-based geometry processing. ACM Transactions on Graphics (TOG), 38(6):1-14, 2019. 2, 3, 4, 8", + "[58] Lequan Yu, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. Pu-net: Point cloud upsampling network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2790–2799, 2018. 3", + "[59] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2", + "[60] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 6", + "[61] Kaichen Zhou, Jia-Xing Zhong, Sangyun Shin, Kai Lu, Yiyuan Yang, Andrew Markham, and Niki Trigoni. Dynpoint: Dynamic neural point for view synthesis. Advances in Neural Information Processing Systems, 36, 2024. 2, 3" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 10 + }, + { + "type": "ref_text", + "text": "[62] Matthias Zwicker, Hanspeter Pfister, Jeroen Van Baar, and Markus Gross. Surface splatting. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 371-378, 2001. 3", + "bbox": [ + 501, + 90, + 892, + 148 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "20320", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/c4bfa810-f46e-49e4-9e19-ae1a9e3dcad6_model.json b/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/c4bfa810-f46e-49e4-9e19-ae1a9e3dcad6_model.json new file mode 100644 index 0000000000000000000000000000000000000000..9d7056daaf91f13fc8326461233b3334b5ff6c60 --- /dev/null +++ b/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/c4bfa810-f46e-49e4-9e19-ae1a9e3dcad6_model.json @@ -0,0 +1,2565 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.162, + 0.131, + 0.812, + 0.154 + ], + "angle": 0, + "content": "4D Gaussian Splatting for Real-Time Dynamic Scene Rendering" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.18, + 0.812, + 0.288 + ], + "angle": 0, + "content": "Guanjun Wu\\(^{1*}\\), Taoran Yi\\(^{2*}\\), Jiemin Fang\\(^{3\\dagger}\\), Lingxi Xie\\(^{3}\\), Xiaopeng Zhang\\(^{3}\\), Wei Wei\\(^{1}\\), Wenyu Liu\\(^{2}\\), Qi Tian\\(^{3}\\), Xinggang Wang\\(^{2\\dagger}\\) \n\\(^{1}\\)School of CS, Huazhong University of Science and Technology \n\\(^{2}\\)School of EIC, Huazhong University of Science and Technology \n\\(^{3}\\)Huawei Inc. {guajuwu, taoranyi, weiw, liuwy, xgwang}@hust.edu.cn {jaminfong, 198808xc, zxphistory}@gmail.com tian.qil@huawei.com" + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.31, + 0.306, + 0.468 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.306, + 0.31, + 0.574, + 0.468 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.573, + 0.306, + 0.877, + 0.496 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.501, + 0.893, + 0.552 + ], + "angle": 0, + "content": "Figure 1. Our method achieves real-time rendering \\(\\ddagger\\) for dynamic scenes at high image resolutions while maintaining high rendering quality. The right figure is tested on synthetic datasets, where the radius of the dot corresponds to the training time. \"Res\": resolution. \\(\\ddagger\\) The rendering speed not only depends on the image resolution but also the number of 3D Gaussians and the scale of deformation fields which are determined by the complexity of the scene." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.55, + 0.314, + 0.565 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.581, + 0.473, + 0.854 + ], + "angle": 0, + "content": "Representing and rendering dynamic scenes has been an important but challenging task. Especially, to accurately model complex motions, high efficiency is usually hard to guarantee. To achieve real-time dynamic scene rendering while also enjoying high training and storage efficiency, we propose 4D Gaussian Splatting (4D-GS) as a holistic representation for dynamic scenes rather than applying 3D-GS for each individual frame. In 4D-GS, a novel explicit representation containing both 3D Gaussians and 4D neural voxels is proposed. A decomposed neural voxel encoding algorithm inspired by HexPlane is proposed to efficiently build Gaussian features from 4D neural voxels and then a lightweight MLP is applied to predict Gaussian deformations at novel timestamps. Our 4D-GS method achieves real-time rendering under high resolutions, 82 FPS at an \\(800 \\times 800\\) resolution on an RTX 3090 GPU while maintaining comparable or better quality than previous state-of-the-art methods. More demos and code are available at" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.551, + 0.831, + 0.566 + ], + "angle": 0, + "content": "https://guanjunwu.github.io/4dgs/." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.599, + 0.633, + 0.615 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.625, + 0.892, + 0.763 + ], + "angle": 0, + "content": "Novel view synthesis (NVS) stands as a critical task in the domain of 3D vision and plays a vital role in many applications, e.g. VR, AR, and movie production. NVS aims at rendering images from any desired viewpoint or timestamp of a scene, usually requiring modeling the scene accurately from several 2D images. Dynamic scenes are quite common in real scenarios, rendering which is important but challenging as complex motions need to be modeled with both spatially and temporally sparse input." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.763, + 0.892, + 0.884 + ], + "angle": 0, + "content": "NeRF [32] has achieved great success in synthesizing novel view images by representing scenes with implicit functions. The volume rendering techniques [7] are introduced to connect 2D images and 3D scenes. However, the original NeRF method bears big training and rendering costs. Though some NeRF variants [5, 8, 10, 11, 33, 44, 47] reduce the training time from days to minutes, the rendering process still bears a non-negligible latency." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.885, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Recent 3D Gaussian Splatting (3D-GS) [19] signifi" + }, + { + "type": "page_footnote", + "bbox": [ + 0.094, + 0.863, + 0.212, + 0.875 + ], + "angle": 0, + "content": "*Equal contributions." + }, + { + "type": "page_footnote", + "bbox": [ + 0.097, + 0.876, + 0.175, + 0.888 + ], + "angle": 0, + "content": "†Project Lead." + }, + { + "type": "page_footnote", + "bbox": [ + 0.097, + 0.888, + 0.222, + 0.9 + ], + "angle": 0, + "content": "\\(\\ddagger\\) Corresponding author." + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.863, + 0.222, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20310" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.211 + ], + "angle": 0, + "content": "cantly boosts the rendering speed to a real-time level by representing the scene as 3D Gaussians. The cumbersome volume rendering in the original NeRF is replaced with efficient differentiable splatting [57], which directly projects 3D Gaussian onto the 2D image plane. 3D-GS not only enjoys real-time rendering speed but also represents the scene more explicitly, making it easier to manipulate the scene representation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.212, + 0.473, + 0.619 + ], + "angle": 0, + "content": "However, 3D-GS focuses on the static scenes. Extending it to dynamic scenes as a 4D representation is a reasonable, important but difficult topic. The key challenge lies in modeling complicated point motions from sparse input. 3D-GS holds a natural geometry prior by representing scenes with point-like Gaussians. One direct and effective extension approach is to construct 3D Gaussians at each timestamp [30] but the storage/memory cost will multiply especially for long input sequences. Our goal is to construct a compact representation while maintaining both training and rendering efficiency, i.e. 4D Gaussian Splatting (4D-GS). To this end, we propose to represent Gaussian motions and shape changes by an efficient Gaussian deformation field network, containing a temporal-spatial structure encoder and an extremely tiny multi-head Gaussian deformation decoder. Only one set of canonical 3D Gaussians is maintained. For each timestamp, the canonical 3D Gaussians will be transformed by the Gaussian deformation field into new positions with new shapes. The transformation process represents both the Gaussian motion and deformation. Note that different from modeling motions of each Gaussian separately [30, 55], the spatial-temporal structure encoder can connect different adjacent 3D Gaussians to predict more accurate motions and shape deformation. Then the deformed 3D Gaussians can be directly splatted for rendering the according-timestamp image. Our contributions can be summarized as follows." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.622, + 0.47, + 0.68 + ], + "angle": 0, + "content": "- An efficient 4D Gaussian splating framework with an efficient Gaussian deformation field is proposed by modeling both Gaussian motion and Gaussian shape changes across time." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.682, + 0.47, + 0.726 + ], + "angle": 0, + "content": "- A multi-resolution encoding method is proposed to connect the nearby 3D Gaussians and build rich 3D Gaussian features by an efficient spatial-temporal structure encoder." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.727, + 0.47, + 0.817 + ], + "angle": 0, + "content": "- 4D-GS achieves real-time rendering on dynamic scenes, up to 82 FPS at a resolution of \\(800 \\times 800\\) for synthetic datasets and 30 FPS at a resolution of \\(1352 \\times 1014\\) in real datasets, while maintaining comparable or superior performance than previous state-of-the-art (SOTA) methods and shows potential for editing and tracking in 4D scenes." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.622, + 0.47, + 0.817 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.83, + 0.227, + 0.846 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "In this section, we simply review the difference of dynamic NeRFs in Sec. 2.1, then discuss the point clouds-based neural rendering algorithm in Sec. 2.2." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.09, + 0.66, + 0.101 + ], + "angle": 0, + "content": "- Original Sampled Points" + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.102, + 0.665, + 0.112 + ], + "angle": 0, + "content": "- Canonical Mapped Points" + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.113, + 0.65, + 0.123 + ], + "angle": 0, + "content": "The Original Cast Ray" + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.124, + 0.676, + 0.135 + ], + "angle": 0, + "content": "The Canonical Mapped Ray" + }, + { + "type": "list", + "bbox": [ + 0.528, + 0.09, + 0.676, + 0.135 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.711, + 0.091, + 0.848, + 0.101 + ], + "angle": 0, + "content": "- Original Sampled Points" + }, + { + "type": "text", + "bbox": [ + 0.73, + 0.102, + 0.839, + 0.112 + ], + "angle": 0, + "content": "The Original Cast Ray" + }, + { + "type": "text", + "bbox": [ + 0.707, + 0.113, + 0.862, + 0.123 + ], + "angle": 0, + "content": "Time Features of the Points" + }, + { + "type": "list", + "bbox": [ + 0.707, + 0.091, + 0.862, + 0.123 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.143, + 0.672, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.501, + 0.205, + 0.703, + 0.217 + ], + "angle": 0, + "content": "(a) Canonical Mapping Volume Rendering" + }, + { + "type": "image", + "bbox": [ + 0.724, + 0.133, + 0.867, + 0.206 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.712, + 0.206, + 0.879, + 0.217 + ], + "angle": 0, + "content": "(b) Time-aware Volume Rendering" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.225, + 0.672, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.698, + 0.228, + 0.832, + 0.238 + ], + "angle": 0, + "content": "\\(\\bullet\\) Original 3D Gaussians G" + }, + { + "type": "text", + "bbox": [ + 0.698, + 0.239, + 0.844, + 0.249 + ], + "angle": 0, + "content": "\\(\\bullet\\) Deformed 3D Gaussians \\(G^{\\prime}\\)" + }, + { + "type": "text", + "bbox": [ + 0.697, + 0.25, + 0.888, + 0.262 + ], + "angle": 0, + "content": "Gaussian Deformation Field \\( \\mathrm{F}(G, t_i) \\)" + }, + { + "type": "text", + "bbox": [ + 0.697, + 0.263, + 0.85, + 0.272 + ], + "angle": 0, + "content": "Gaussian Rasterization Paths" + }, + { + "type": "list", + "bbox": [ + 0.697, + 0.228, + 0.888, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.64, + 0.303, + 0.766, + 0.315 + ], + "angle": 0, + "content": "(c) 4D Gaussian Splitting" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.326, + 0.892, + 0.437 + ], + "angle": 0, + "content": "Figure 2. Illustration of different dynamic scene rendering methods. (a) Points are sampled in the casted ray during volume rendering. The point deformation fields proposed in [8, 39] map the points into a canonical space. (b) Time-aware volume rendering computes the features of each point directly and does not change the rendering path. (c) The Gaussian deformation field converts original 3D Gaussians into another group of 3D Gaussians with a certain timestamp." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.454, + 0.699, + 0.469 + ], + "angle": 0, + "content": "2.1. Novel View Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.478, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Novel view synthesis is a important and challenging task in 3D reconstruction. Much approaches are proposed to represent a 3D object and render novel views. Efficient representations such as light fields [4], mesh [6, 15, 24, 46], voxels [16, 18, 23], multi-planes [9] can render high quality image with enough supervisions. NeRF-based approaches [3, 32, 59] demonstrate that implicit radiance fields can effectively learn scene representations and synthesize high-quality novel views. [35, 36, 39] have challenged the static hypothesis, expanding the boundary of novel view synthesis for dynamic scenes. [8] proposes to use an explicit voxel grid to model temporal information, accelerating the learning time for dynamic scenes to half an hour and applied in [17, 29, 56]. The proposed deformation-based neural rendering methods are shown in Fig. 2 (a). Flow-based [13, 25, 29, 48, 61] methods adopting warping algorithm to synthesis novel views by blending nearby frames. [5, 11, 12, 22, 44, 49] represent further advancements in faster dynamic scene learning by adopting decomposed neural voxels. They treat sampled points in each timestamp individually as shown in Fig. 2 (b). [14, 27, 38, 50, 51, 53] are efficient methods to handle multi-view setups. The aforementioned methods though achieve fast training speed, real-time rendering for dynamic scenes is still challenging, especially for monocular input. Our method aims at constructing a highly efficient training and rendering pipeline in Fig. 2 (c), while maintaining the quality, even for sparse inputs." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "20311" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.093, + 0.892, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.312, + 0.894, + 0.368 + ], + "angle": 0, + "content": "Figure 3. The overall pipeline of our model. Given a group of 3D Gaussians \\(\\mathcal{G}\\), we extract the center coordinate of each 3D Gaussian \\(\\mathcal{X}\\) and timestamp \\(t\\) to compute the voxel feature by querying multi-resolution voxel planes. Then a tiny multi-head Gaussian deformation decoder is used to decode the feature and get the deformed 3D Gaussians \\(\\mathcal{G}'\\) at timestamp \\(t\\). The deformed Gaussians are then splatted to the rendered image." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.381, + 0.395, + 0.397 + ], + "angle": 0, + "content": "2.2. Neural Rendering with Point Clouds" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.418, + 0.471, + 0.584 + ], + "angle": 0, + "content": "Effectively representing 3D scenes remains a challenging topic. The community has explored various neural representations [32], e.g. meshes, point clouds [54], voxels [10], and hybrid approaches [33, 47]. Point-cloud-based methods [28, 40, 41, 58] initially target at 3D segmentation and classification. A representative approach for rendering presented in [1, 54] combines point cloud representations with volume rendering, achieving rapid convergence speed even for dynamic novel view synthesis [34, 61]. [20, 21, 42] adopt differential point rendering technique for scene reconstructions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Recently, 3D-GS [19] is notable for its pure explicit representation and differential point-based splatting methods, enabling real-time rendering of novel views. Dynamic3DGS [30] models dynamic scenes by tracking the position and variance of each 3D Gaussian at each timestamp \\( t_i \\). An explicit table is utilized to store information about each 3D Gaussian at every timestamp, leading to a linear memory consumption increase, denoted as \\( O(t\\mathcal{N}) \\), in which \\( \\mathcal{N} \\) is num of 3D Gaussians. For long-term scene reconstruction, the storage cost will become non-negligible. The memory complexity of our approach only depends on the number of 3D Gaussians and parameters of Gaussians deformation fields network \\( \\mathcal{F} \\), which is denoted as \\( O(\\mathcal{N} + \\mathcal{F}) \\). [55] adds a marginal temporal Gaussian distribution into the origin 3D Gaussians, which uplift 3D Gaussians into 4DHowever, it may cause each 3D Gaussian to only focus on their local temporal space. [26] track each 3D Gaussians individually. Our approach also models 3D Gaussian motions but with a compact network, resulting in highly efficient training efficiency and real-time rendering." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.38, + 0.627, + 0.397 + ], + "angle": 0, + "content": "3. Preliminary" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.406, + 0.893, + 0.451 + ], + "angle": 0, + "content": "In this section, we simply review the representation and rendering process of 3D-GS [19] in Sec. 3.1 and the formula of dynamic NeRFs in Sec. 3.2." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.46, + 0.71, + 0.477 + ], + "angle": 0, + "content": "3.1. 3D Gaussian Splatting" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.483, + 0.893, + 0.543 + ], + "angle": 0, + "content": "3D Gaussians [19] is an explicit 3D scene representation in the form of point clouds. Each 3D Gaussian is characterized by a covariance matrix \\(\\Sigma\\) and a center point \\(\\mathcal{X}\\), which is referred to as the mean value of the Gaussian:" + }, + { + "type": "equation", + "bbox": [ + 0.617, + 0.552, + 0.891, + 0.572 + ], + "angle": 0, + "content": "\\[\nG (X) = e ^ {- \\frac {1}{2} \\mathcal {X} ^ {T} \\Sigma^ {- 1} \\mathcal {X}}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.583, + 0.892, + 0.626 + ], + "angle": 0, + "content": "For differentiable optimization, the covariance matrix \\(\\Sigma\\) can be decomposed into a scaling matrix \\(\\mathbf{S}\\) and a rotation matrix \\(\\mathbf{R}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.64, + 0.627, + 0.891, + 0.643 + ], + "angle": 0, + "content": "\\[\n\\Sigma = \\mathbf {R} \\mathbf {S} \\mathbf {S} ^ {T} \\mathbf {R} ^ {T}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.65, + 0.892, + 0.741 + ], + "angle": 0, + "content": "When rendering novel views, differential splatting [57] is employed for the 3D Gaussians within the camera planes. As introduced by [62], using a viewing transform matrix \\( W \\) and the Jacobian matrix \\( J \\) of the affine approximation of the projective transformation, the covariance matrix \\( \\Sigma' \\) in camera coordinates can be computed as" + }, + { + "type": "equation", + "bbox": [ + 0.627, + 0.751, + 0.891, + 0.769 + ], + "angle": 0, + "content": "\\[\n\\Sigma^ {\\prime} = J W \\Sigma W ^ {T} J ^ {T}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.902 + ], + "angle": 0, + "content": "In summary, each 3D Gaussian is characterized by the following attributes: position \\(\\mathcal{X} \\in \\mathbb{R}^3\\), color defined by spherical harmonic (SH) coefficients \\(\\mathcal{C} \\in \\mathbb{R}^k\\) (where \\(k\\) represents nums of SH functions), opacity \\(\\alpha \\in \\mathbb{R}\\), rotation factor \\(r \\in \\mathbb{R}^4\\), and scaling factor \\(s \\in \\mathbb{R}^3\\). Specifically, for each pixel, the color and opacity of all the Gaussians are computed using the Gaussian's representation Eq. 1. The blending of \\(N\\) ordered points that overlap the pixel is given by" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "20312" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.078, + 0.092, + 0.164, + 0.106 + ], + "angle": 0, + "content": "the formula:" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.116, + 0.47, + 0.159 + ], + "angle": 0, + "content": "\\[\nC = \\sum_ {i \\in N} c _ {i} \\alpha_ {i} \\prod_ {j = 1} ^ {i - 1} (1 - \\alpha_ {i}). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.169, + 0.47, + 0.229 + ], + "angle": 0, + "content": "Here, \\( c_{i} \\), \\( \\alpha_{i} \\) represents the density and color of this point computed by a 3D Gaussian \\( G \\) with covariance \\( \\Sigma \\) multiplied by an estimizable per-point opacity and SH color coefficients." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.239, + 0.43, + 0.256 + ], + "angle": 0, + "content": "3.2. Dynamic NeRFs with Deformation Fields" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.263, + 0.448, + 0.278 + ], + "angle": 0, + "content": "All the dynamic NeRF algorithms can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.291, + 0.469, + 0.307 + ], + "angle": 0, + "content": "\\[\nc, \\sigma = \\mathcal {M} (\\mathbf {x}, d, t, \\lambda), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.319, + 0.469, + 0.379 + ], + "angle": 0, + "content": "where \\(\\mathcal{M}\\) is a mapping that maps 8D space \\((\\mathbf{x},d,t,\\lambda)\\) to 4D space \\((c,\\sigma)\\). Where \\(x\\) reveals to the spatial point, \\(\\lambda\\) is the optional input as used to build topological and appearance changes in [36], and \\(d\\) stands for view-dependency." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.38, + 0.469, + 0.47 + ], + "angle": 0, + "content": "As is shown in Fig. 2 (a), all the deformation NeRF based method which estimate the world-to-canonical mapping by a deformation network \\(\\phi_t:(\\mathbf{x},t)\\to \\Delta \\mathbf{x}\\). Then a network is introduced to compute volume density and view-dependent RGB color from each ray. The formula for rendering can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.176, + 0.483, + 0.469, + 0.499 + ], + "angle": 0, + "content": "\\[\nc, \\sigma = \\operatorname {N e R F} (\\mathbf {x} + \\Delta \\mathbf {x}, d, \\lambda), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.511, + 0.469, + 0.556 + ], + "angle": 0, + "content": "where 'NeRF' stands for vanilla NeRF pipeline, \\(\\lambda\\) is a frame-dependent code to model the topological and appearance changes [31, 36]." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.557, + 0.469, + 0.647 + ], + "angle": 0, + "content": "However, our 4D Gaussian splatting framework presents a novel rendering technique. We successfully compute the canonical-to-world mapping by a Gaussian deformation field network \\(\\mathcal{F}\\) at the time \\(t\\) directly and differential splating [19] is followed, which enables the skill of computing backward flow and tracking for 3D Gaussians." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.661, + 0.169, + 0.675 + ], + "angle": 0, + "content": "4. Method" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.686, + 0.469, + 0.747 + ], + "angle": 0, + "content": "Sec. 4.1 introduces the overall 4D Gaussian Splitting framework. Then, the Gaussian deformation field is proposed in Sec. 4.2. Finally, we describe the optimization process in Sec. 4.3." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.757, + 0.382, + 0.773 + ], + "angle": 0, + "content": "4.1. 4D Gaussian Splatting Framework" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.78, + 0.469, + 0.855 + ], + "angle": 0, + "content": "As shown in Fig. 3, given a view matrix \\(M = [R,T]\\), times-amp \\(t\\), our 4D Gaussian splatting framework includes 3D Gaussians \\(\\mathcal{G}\\) and Gaussian deformation field network \\(\\mathcal{F}\\). Then a novel-view image \\(\\hat{I}\\) is rendered by differential splatting [57] \\(\\mathcal{S}\\) following \\(\\hat{I} = \\mathcal{S}(M,\\mathcal{G}^{\\prime})\\), where \\(\\mathcal{G}^{\\prime} = \\Delta \\mathcal{G} + \\mathcal{G}\\)." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.856, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Specifically, the deformation of 3D Gaussians \\(\\Delta \\mathcal{G}\\) is introduced by the Gaussian deformation field network \\(\\Delta \\mathcal{G} = \\mathcal{F}(\\mathcal{G},t)\\), in which the spatial-temporal structure encoder \\(\\mathcal{H}\\)" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.088, + 0.887, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.282, + 0.892, + 0.324 + ], + "angle": 0, + "content": "Figure 4. Illustration of the optimization process. With static 3D Gaussian initialization, our model can learn high-quality 3D Gaussians of the motion part." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.337, + 0.892, + 0.412 + ], + "angle": 0, + "content": "can encode both the temporal and spatial features of 3D Gaussians \\( f_{d} = \\mathcal{H}(\\mathcal{G},t) \\), and the multi-head Gaussian deformation decoder \\( \\mathcal{D} \\) can decode the features and predict each 3D Gaussian's deformation \\( \\Delta \\mathcal{G} = \\mathcal{D}(f) \\), then the deformed 3D Gaussians \\( \\mathcal{G}' \\) can be introduced." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.413, + 0.892, + 0.489 + ], + "angle": 0, + "content": "The rendering process of our 4D Gaussian Splatting is depicted in Fig. 2 (c). Our 4D Gaussian splatting converts the original 3D Gaussians \\(\\mathcal{G}\\) into another group of 3D Gaussians \\(\\mathcal{G}'\\) given a timestamp \\(t\\), maintaining the effectiveness of the differential splatting as referred in [57]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.497, + 0.824, + 0.512 + ], + "angle": 0, + "content": "4.2. Gaussian Deformation Field Network" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.52, + 0.892, + 0.58 + ], + "angle": 0, + "content": "The network to learn the Gaussian deformation field includes an efficient spatial-temporal structure encoder \\(\\mathcal{H}\\) and a Gaussian deformation decoder \\(\\mathcal{D}\\) for predicting the deformation of each 3D Gaussian." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.599, + 0.892, + 0.765 + ], + "angle": 0, + "content": "Spatial-Temporal Structure Encoder. Nearby 3D Gaussians always share similar spatial and temporal information. To model 3D Gaussians' features effectively, we introduce an efficient spatial-temporal structure encoder \\(\\mathcal{H}\\) including a multi-resolution HexPlane \\(R(i,j)\\) and a tiny MLP \\(\\phi_d\\) inspired by [5, 8, 11, 44]. While the vanilla 4D neural voxel is memory-consuming, we adopt a 4D K-Planes [11] module to decompose the 4D neural voxel into 6 planes. All 3D Gaussians in a certain area can be contained in the bounding plane voxels and Gaussian's deformation can also be encoded in nearby temporal voxels." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.766, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Specifically, the spatial-temporal structure encoder \\(\\mathcal{H}\\) contains 6 multi-resolution plane modules \\(R_{l}(i,j)\\) and a tiny MLP \\(\\phi_d\\), i.e. \\(\\mathcal{H}(\\mathcal{G},t) = \\{R_l(i,j),\\phi_d|(i,j)\\in\\) \\(\\{(x,y),(x,z),(y,z),(x,t),(y,t),(z,t)\\} ,l\\in \\{1,2\\} \\}\\). The position \\(\\mu = (x,y,z)\\) is the mean value of 3D Gaussians \\(\\mathcal{G}\\). Each voxel module is defined by \\(R(i,j)\\in \\mathbb{R}^{h\\times lN_i\\times lN_j}\\) where \\(h\\) stands for the hidden dim of features, \\(N\\) denotes the basic resolution of voxel grid and \\(l\\) equals to the upsampling scale. This entails encoding information of the 3D" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20313" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.086, + 0.896, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.302, + 0.895, + 0.334 + ], + "angle": 0, + "content": "Figure 5. Visualization of synthesized datasets compared with other models [5, 8, 11, 17, 19, 49]. The rendering results of [11] are displayed with a default green background. We have adopted their rendering settings." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.344, + 0.471, + 0.389 + ], + "angle": 0, + "content": "Gaussians within the 62D voxel planes while considering temporal information. The formula for computing separate voxel features is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.113, + 0.4, + 0.47, + 0.434 + ], + "angle": 0, + "content": "\\[\nf _ {h} = \\bigcup_ {l} \\prod \\operatorname {i n t e r p} \\left(R _ {l} (i, j)\\right), \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.099, + 0.435, + 0.43, + 0.454 + ], + "angle": 0, + "content": "\\[\n(i, j) \\in \\{(x, y), (x, z), (y, z), (x, t), (y, t), (z, t) \\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.465, + 0.472, + 0.543 + ], + "angle": 0, + "content": "\\(f_{h} \\in \\mathbb{R}^{h * l}\\) is the feature of neural voxels. 'interp' denotes the bilinear interpolation for querying the voxel features located at 4 vertices of the grid. The discussion of the production process is similar to [11]. Then a tiny MLP \\(\\phi_{d}\\) merges all the features by \\(f_{d} = \\phi_{d}(f_{h})\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.564, + 0.47, + 0.671 + ], + "angle": 0, + "content": "Multi-head Gaussian Deformation Decoder. When all the features of 3D Gaussians are encoded, we can compute any desired variable with a multi-head Gaussian deformation decoder \\(\\mathcal{D} = \\{\\phi_x,\\phi_r,\\phi_s\\}\\). Separate MLPs are employed to compute the deformation of position \\(\\Delta \\mathcal{X} = \\phi_{x}(f_{d})\\), rotation \\(\\Delta r = \\phi_r(f_d)\\), and scaling \\(\\Delta s = \\phi_s(f_d)\\). Then, the deformed feature \\((\\mathcal{X}',r',s')\\) can be addressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.13, + 0.684, + 0.47, + 0.701 + ], + "angle": 0, + "content": "\\[\n\\left(\\mathcal {X} ^ {\\prime}, r ^ {\\prime}, s ^ {\\prime}\\right) = \\left(\\mathcal {X} + \\Delta \\mathcal {X}, r + \\Delta r, s + \\Delta s\\right). \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.714, + 0.47, + 0.746 + ], + "angle": 0, + "content": "Finally, we obtain the deformed 3D Gaussians \\(\\mathcal{G}' = \\{\\mathcal{X}', s', r', \\sigma, \\mathcal{C}\\}\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.756, + 0.218, + 0.772 + ], + "angle": 0, + "content": "4.3. Optimization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.471, + 0.903 + ], + "angle": 0, + "content": "3D Gaussian Initialization. [19] shows that 3D Gaussians can be well-trained with structure from motion (SfM) [43] points initialization. Similarly, 4D Gaussians should also be fine-tuned in proper 3D Gaussian initialization. We optimize 3D Gaussians at initial 3000 iterations for warm-up and then render images with 3D Gaussians \\(\\hat{I} = S(M, \\mathcal{G})\\) instead of 4D Gaussians \\(\\hat{I} = S(M, \\mathcal{G}')\\). The illustration of the optimization process is shown in Fig. 4." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.344, + 0.893, + 0.406 + ], + "angle": 0, + "content": "Loss Function. Similar to other reconstruction methods [8, 19, 39], we use the L1 color loss to supervise the training process. A grid-based total-variational loss [5, 8, 11, 47] \\(\\mathcal{L}_{tv}\\) is also applied." + }, + { + "type": "equation", + "bbox": [ + 0.629, + 0.416, + 0.893, + 0.435 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = | \\hat {I} - I | + \\mathcal {L} _ {t v}. \\tag {9}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.44, + 0.627, + 0.459 + ], + "angle": 0, + "content": "5. Experiment" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.466, + 0.893, + 0.587 + ], + "angle": 0, + "content": "In this section, we mainly introduce the hyperparameters and datasets of our settings in Sec. 5.1 and the results between different datasets will be compared with [2, 5, 8, 11, 19, 27, 45, 49, 50] in Sec. 5.2. Then, ablation studies are proposed to prove the effectiveness of our approaches in Sec. 5.3 and more discussion about 4D-GS in Sec. 5.4. Finally, we discuss the limitation of our proposed 4D-GS in Sec. 5.5." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.597, + 0.708, + 0.614 + ], + "angle": 0, + "content": "5.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.62, + 0.893, + 0.698 + ], + "angle": 0, + "content": "Our implementation is primarily based on the PyTorch [37] framework and tested in a single RTX 3090 GPU, and we've fine-tuned our optimization parameters by the configuration outlined in the 3D-GS [19]. More hyperparameters will be shown in the appendix." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.715, + 0.893, + 0.822 + ], + "angle": 0, + "content": "Synthetic Dataset. We primarily assess the performance of our model using synthetic datasets, as introduced by D-NeRF [39]. These datasets are designed for monocular settings, although it's worth noting that the camera poses for each timestamp are close to randomly generated. Each scene within these datasets contains dynamic frames, ranging from 50 to 200 in number." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.903 + ], + "angle": 0, + "content": "Real-world Datasets. We utilize datasets provided by HyperNeRF [36] and Neu3D's [22] as benchmark datasets to evaluate the performance of our model in real-world scenarios. The Nerfies dataset is captured using one or two" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20314" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.086, + 0.891, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.32, + 0.893, + 0.335 + ], + "angle": 0, + "content": "Figure 6. Visualization of the HyperNeRF [36] datasets compared with other methods [8, 17, 19, 36]. 'GT' stands for ground truth images." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.349, + 0.893, + 0.379 + ], + "angle": 0, + "content": "Table 1. Quantitative results on the synthesis dataset. The best and the second best results are denoted by pink and yellow. The rendering resolution is set to \\(800 \\times 800\\). \"Time\" in the table stands for training times." + }, + { + "type": "table", + "bbox": [ + 0.198, + 0.39, + 0.776, + 0.528 + ], + "angle": 0, + "content": "
ModelPSNR(dB)↑SSIM↑LPIPS↓Time↓FPS ↑Storage (MB)↓
TiNeuVox-B [8]32.670.970.0428 mins1.548
KPlanes [11]31.610.97-52 mins0.97418
HexPlane-Slim [5]31.040.970.0411m 30s2.538
3D-GS [19]23.190.930.0810 mins17010
FFDNeRF [17]32.680.970.04-< 1440
MSTH [49]31.340.980.026 mins--
V4D [12]33.720.980.026.9 hours2.08377
Ours34.050.980.028 mins8218
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.553, + 0.47, + 0.645 + ], + "angle": 0, + "content": "cameras, following straightforward camera motion, while the Neu3D's dataset is captured using 15 to 20 static cameras, involving extended periods and intricate camera motions. We use the points computed by SfM [43] from the first frame of each video in Neu3D's dataset and 200 frames randomly selected in HyperNeRF's." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.651, + 0.172, + 0.667 + ], + "angle": 0, + "content": "5.2. Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.675, + 0.47, + 0.78 + ], + "angle": 0, + "content": "We primarily assess our experimental results using various metrics, encompassing peak-signal-to-noise ratio (PSNR), perceptual quality measure LPIPS [60], structural similarity index (SSIM) [52] and its extensions including structural dissimilarity index measure (DSSIM), multiscale structural similarity index (MS-SSIM), FPS, training times and Storage." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.471, + 0.902 + ], + "angle": 0, + "content": "To assess the quality of novel view synthesis, we conducted benchmarking against several state-of-the-art methods in the field, including [5, 8, 11, 12, 17, 19, 27, 49]. The results are summarized in Tab. 1. While current dynamic hybrid representations can produce high-quality results, they often come with the drawback of rendering speed. The lack of modeling dynamic motion part makes [19] fail to reconstruct dynamic scenes. In contrast, our method en" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.553, + 0.893, + 0.612 + ], + "angle": 0, + "content": "joys both the highest rendering quality within the synthesis dataset and exceptionally fast rendering speeds while keeping extremely low storage consumption and convergence time." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.614, + 0.895, + 0.796 + ], + "angle": 0, + "content": "Additionally, the results obtained from real-world datasets are presented in Tab. 2 and Tab. 3. It becomes apparent that some NeRFs [2, 5, 45] suffer from slow convergence speed, and the other grid-based NeRF methods [5, 8, 11, 49] encounter difficulties when attempting to capture intricate object details. In stark contrast, our methods research comparable rendering quality, fast convergence, and excel in free-view rendering speed in indoor cases. Though [27] addresses the high quality in comparison to ours, the need for multi-cam setups makes it hard to model monocular scenes and other methods also limit free-view rendering speed and storage." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.801, + 0.655, + 0.817 + ], + "angle": 0, + "content": "5.3. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.894, + 0.903 + ], + "angle": 0, + "content": "Spatial-Temporal Structure Encoder. The explicit Hex-Plane encoder \\( R_{l}(i,j) \\) possesses the capacity to retain 3D Gaussians' spatial and temporal information, which can reduce storage consumption in comparison with purely explicit methods [30]. Discarding this module, we observe" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20315" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.177, + 0.089, + 0.794, + 0.105 + ], + "angle": 0, + "content": "Table 2. Quantitative results on HyperNeRF's [36] vrig dataset. Rendering resolution is set to \\( {960} \\times {540} \\) ." + }, + { + "type": "table", + "bbox": [ + 0.217, + 0.115, + 0.753, + 0.243 + ], + "angle": 0, + "content": "
ModelPSNR(dB)↑MS-SSIM↑Times↓FPS↑Storage(MB)↓
Nerfies [35]22.20.803~ hours< 1-
HyperNeRF [36]22.40.81432 hours< 1-
TiNeuVox-B [8]24.30.83630 mins148
3D-GS [19]19.70.68040 mins5552
FFDNeRF [17]24.20.842-0.05440
V4D [12]24.80.8325.5 hours0.29377
Ours25.20.84530 mins3461
" + }, + { + "type": "table_caption", + "bbox": [ + 0.189, + 0.256, + 0.78, + 0.271 + ], + "angle": 0, + "content": "Table 3. Quantitative results on the Neu3D's [22] dataset, rendering resolution is set to \\(1352 \\times 1014\\)." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.281, + 0.803, + 0.404 + ], + "angle": 0, + "content": "
ModelPSNR(dB)↑D-SSIM↓LPIPS↓Time ↓FPS↑Storage (MB)↓
NeRFPlayer [45]30.690.0340.1116 hours0.045-
HyperReel [2]31.100.0360.0969 hours2.0360
HexPlane-all* [5]31.700.0140.07512 hours0.2250
KPlanes [11]31.63--1.8 hours0.3309
Im4D [27]32.58-0.20828 mins~593
MSTH [49]32.370.0150.05620 mins2(15‡)135
Ours31.150.0160.04940 mins3090
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.102, + 0.405, + 0.618, + 0.416 + ], + "angle": 0, + "content": "*: The metrics of the model are tested without \"coffee martini\" and resolution is set to \\(1024 \\times 768\\)." + }, + { + "type": "table_footnote", + "bbox": [ + 0.102, + 0.416, + 0.353, + 0.429 + ], + "angle": 0, + "content": ": The FPS is tested with fixed-view rendering." + }, + { + "type": "list", + "bbox": [ + 0.102, + 0.405, + 0.618, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.454, + 0.47, + 0.516 + ], + "angle": 0, + "content": "that using only a shallow MLP \\(\\phi_d\\) falls short in modeling complex deformations across various settings. Tab. 4 demonstrates that, while the model incurs minimal memory costs, it does come at the expense of rendering quality." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.527, + 0.47, + 0.737 + ], + "angle": 0, + "content": "Gaussian Deformation Decoder. Our proposed Gaussian deformation decoder \\(\\mathcal{D}\\) decodes the features from the spatial-temporal structure encoder \\(\\mathcal{H}\\). All the changes in 3D Gaussians can be explained by separate MLPs \\(\\{\\phi_x, \\phi_r, \\phi_s\\}\\). As is shown in Tab. 4, 4D Gaussians cannot fit dynamic scenes well without modeling 3D Gaussian motion. Meanwhile, the movement of human body joints is typically manifested as stretching and twisting of surface details in a macroscopic view. If one aims to accurately model these movements, the size and shape of 3D Gaussians should also be adjusted accordingly. Otherwise, there may be underfitting of details during excessive stretching, or an inability to correctly simulate the movement of objects at a microscopic level." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.471, + 0.902 + ], + "angle": 0, + "content": "3D Gaussian Initialization. In some cases without SfM [43] points initialization, training 4D-GS directly may cause difficulty in convergence. Optimizing 3D Gaussians for warm-up enjoys: (a) making some 3D Gaussians stay in the dynamic part, which releases the pressure of large deformation learning by 4D Gaussians as shown in Fig. 4. (b) learning proper 3D Gaussians \\(\\mathcal{G}\\) and suggesting deformation fields paying more attention to the dynamic part. (c) avoiding numeric errors in optimizing the Gaussian deformation network \\(\\mathcal{F}\\) and keeping the training process stable." + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.453, + 0.871, + 0.665 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.675, + 0.892, + 0.703 + ], + "angle": 0, + "content": "Figure 7. Visualization of tracking with 3D Gaussians. Each line in the figure of second rows stands for trajectories of 3D Gaussians" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.731, + 0.892, + 0.762 + ], + "angle": 0, + "content": "Tab. 4 also shows that if we train our model without the warm-up coarse stage, the rendering quality will suffer." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.772, + 0.625, + 0.786 + ], + "angle": 0, + "content": "5.4. Discussions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Tracking with 3D Gaussians. Tracking in 3D is also a important task. [17] also shows tracking objects' motion in 3D. Different from dynamic3DGS [30], our methods even can present tracking objects in monocular settings with pretty low storage i.e. 10MB in 3D Gaussians \\(\\mathcal{G}\\) and 8 MB in Gaussian deformation field network \\(\\mathcal{F}\\). Fig. 7 shows the 3D Gaussian's deformation at certain timestamps." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "20316" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.26, + 0.09, + 0.71, + 0.104 + ], + "angle": 0, + "content": "Table 4. Ablation studies on synthetic datasets using our proposed methods." + }, + { + "type": "table", + "bbox": [ + 0.127, + 0.115, + 0.835, + 0.238 + ], + "angle": 0, + "content": "
ModelPSNR(dB)↑SSIM↑LPIPS↓Time↓FPS↑Storage (MB)↓
Ours w/o HexPlane RL(i,j)27.050.950.054 mins14012
Ours w/o initialization31.910.970.037.5 mins7918
Ours w/o φx26.670.950.078 mins8217
Ours w/o φr33.080.980.038 mins8317
Ours w/o φs33.020.980.038 mins8217
Ours34.050.980.028 mins8218
" + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.248, + 0.468, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.098, + 0.374, + 0.447, + 0.388 + ], + "angle": 0, + "content": "Figure 8. Visualization of composition with 4D Gaussians." + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.402, + 0.468, + 0.636 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.646, + 0.47, + 0.688 + ], + "angle": 0, + "content": "Figure 9. Visualization of the relationship between rendering speed and numbers of 3D Gaussians in the rendered screens. All the tests are finished in the synthesis dataset." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.715, + 0.47, + 0.837 + ], + "angle": 0, + "content": "Composition with 4D Gaussians. Similar to dynamic3DGS [30], our proposed methods can also propose editing in 4D Gaussians in Fig. 8. Thanks to the explicit representation of 3D Gaussians, all the trained models can predict deformed 3D Gaussians in the same space following \\(\\mathcal{G}' = \\{\\mathcal{G}_1', \\mathcal{G}_2', \\dots, \\mathcal{G}_n'\\}\\) and differential rendering [57] can project all the point clouds into viewpoints by \\(\\hat{I} = \\mathcal{S}(M, \\mathcal{G}')\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Analysis of Rendering Speed. As is shown in Fig. 9, we also test the relationship between points in the rendered screen and rendering speed at the resolution of \\(800 \\times 800\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.25, + 0.892, + 0.37 + ], + "angle": 0, + "content": "We found that if the rendered points are lower than 30000, the rendering speed can be up to 90. The config of Gaussian deformation fields are discussed in the appendix. To achieve render-time rendering speed, we should strike a balance among all the rendering resolutions, 4D Gaussians representation including numbers of 3D Gaussians, and the capacity of the Gaussian deformation field network and any other hardware constraints." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.391, + 0.627, + 0.405 + ], + "angle": 0, + "content": "5.5. Limitations" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.418, + 0.892, + 0.598 + ], + "angle": 0, + "content": "Though 4D-GS can indeed attain rapid convergence and yield real-time rendering outcomes in many scenarios, there are a few key challenges to address. First, large motions, the absence of background points, and the unprecise camera pose cause the struggle of optimizing 4D Gaussians. What is more, it is still challenging to 4D-GS also cannot split the joint motion of static and dynamic Gaussiansparts under the monocular settings without any additional supervision. Finally, a more compact algorithm needs to be designed to handle urban-scale reconstruction due to the heavy querying of Gaussian deformation fields by huge numbers of 3D Gaussians." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.624, + 0.619, + 0.639 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.652, + 0.892, + 0.789 + ], + "angle": 0, + "content": "This paper proposes 4D Gaussian splating to achieve real-time dynamic scene rendering. An efficient deformation field network is constructed to accurately model Gaussian motions and shape deformations, where adjacent Gaussians are connected via a spatial-temporal structure encoder. Connections between Gaussians lead to more complete deformed geometry, effectively avoiding avulsion. Our 4D Gaussians can not only model dynamic scenes but also have the potential for 4D objective tracking and editing." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.812, + 0.66, + 0.829 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "This work was supported by the National Natural Science Foundation of China (No. 62376102). The authors would like to thank Haotong Lin for providing the quantitative results of Im4D [27]." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "20317" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.169 + ], + "angle": 0, + "content": "[1] Jad Abou-Chakra, Feras Dayoub, and Niko Sünderhauf. Particlererf: Particle based encoding for online neural radiance fields in dynamic scenes. arXiv preprint arXiv:2211.04041, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.257 + ], + "angle": 0, + "content": "[2] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023. 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.259, + 0.472, + 0.342 + ], + "angle": 0, + "content": "[3] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.344, + 0.472, + 0.413 + ], + "angle": 0, + "content": "[4] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.415, + 0.472, + 0.471 + ], + "angle": 0, + "content": "[5] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 130-141, 2023. 1, 2, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.473, + 0.472, + 0.543 + ], + "angle": 0, + "content": "[6] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (ToG), 34(4):1-13, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.545, + 0.472, + 0.587 + ], + "angle": 0, + "content": "[7] Robert A Drebin, Loren Carpenter, and Pat Hanrahan. Volume rendering. ACM Siggraph Computer Graphics, 22(4): 65-74, 1988. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.589, + 0.472, + 0.657 + ], + "angle": 0, + "content": "[8] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In SIGGRAPH Asia 2022 Conference Papers, pages 1-9, 2022. 1, 2, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.66, + 0.472, + 0.743 + ], + "angle": 0, + "content": "[9] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snively, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.472, + 0.815 + ], + "angle": 0, + "content": "[10] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.472, + 0.9 + ], + "angle": 0, + "content": "[11] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbaek Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12479–12488, 2023. 1, 2, 4, 5, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[12] Wanshui Gan, Hongbin Xu, Yi Huang, Shifeng Chen, and Naoto Yokoya. V4d: Voxel for 4d novel view synthesis. IEEE Transactions on Visualization and Computer Graphics, 2023. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.15, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[13] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5712-5721, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.207, + 0.892, + 0.262 + ], + "angle": 0, + "content": "[14] Xiangjun Gao, Jiaolong Yang, Jongyoo Kim, Sida Peng, Zicheng Liu, and Xin Tong. Mps-nerf: Generalizable 3d human rendering from multiview images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.264, + 0.892, + 0.332 + ], + "angle": 0, + "content": "[15] Kaiwen Guo, Feng Xu, Yangang Wang, Yebin Liu, and Qionghai Dai. Robust non-rigid motion tracking and surface reconstruction using 10 regularization. In Proceedings of the IEEE International Conference on Computer Vision, pages 3083-3091, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.334, + 0.892, + 0.417 + ], + "angle": 0, + "content": "[16] Kaiwen Guo, Peter Lincoln, Philip Davidson, Jay Busch, Xueming Yu, Matt Whalen, Geoff Harvey, Sergio Orts-Escolano, Rohit Pandey, Jason Dourgarian, et al. The relightables: Volumetric performance capture of humans with realistic relighting. ACM Transactions on Graphics (ToG), 38(6):1–19, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.42, + 0.892, + 0.501 + ], + "angle": 0, + "content": "[17] Xiang Guo, Jiadai Sun, Yuchao Dai, Guanying Chen, Xiaoting Ye, Xiao Tan, Errui Ding, Yumeng Zhang, and Jingdong Wang. Forward flow for novel view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16022-16033, 2023. 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.504, + 0.892, + 0.559 + ], + "angle": 0, + "content": "[18] Tao Hu, Tao Yu, Zerong Zheng, He Zhang, Yebin Liu, and Matthias Zwicker. Hvtr: Hybrid volumetric-textural rendering for human avatars. In 2022 International Conference on 3D Vision (3DV), pages 197-208. IEEE, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.561, + 0.892, + 0.615 + ], + "angle": 0, + "content": "[19] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG), 42(4):1-14, 2023. 1, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.618, + 0.892, + 0.671 + ], + "angle": 0, + "content": "[20] Leonid Keselman and Martial Hebert. Approximate differentiable rendering with algebraic surfaces. In European Conference on Computer Vision, pages 596-614. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.675, + 0.892, + 0.715 + ], + "angle": 0, + "content": "[21] Leonid Keselman and Martial Hebert. Flexible techniques for differentiable rendering with 3d gaussians. arXiv preprint arXiv:2308.14737, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.718, + 0.892, + 0.813 + ], + "angle": 0, + "content": "[22] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.816, + 0.892, + 0.871 + ], + "angle": 0, + "content": "[23] Zhong Li, Yu Ji, Wei Yang, Jinwei Ye, and Jingyi Yu. Robust 3d human motion reconstruction via dynamic template construction. In 2017 International Conference on 3D Vision (3DV), pages 496-505. IEEE, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[24] Zhong Li, Minye Wu, Wangyiteng Zhou, and Jingyi Yu. 4d human body correspondences from panoramic depth maps." + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20318" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2877-2886, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.19 + ], + "angle": 0, + "content": "[25] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6498-6508, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.469, + 0.234 + ], + "angle": 0, + "content": "[26] Zhan Li, Zhang Chen, Zhong Li, and Yi Xu. Spacetime gaussian feature splatting for real-time dynamic view synthesis. arXiv preprint arXiv:2312.16812, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.236, + 0.47, + 0.291 + ], + "angle": 0, + "content": "[27] Haotong Lin, Sida Peng, Zhen Xu, Tao Xie, Xingyi He, Hu-jun Bao, and Xiaowei Zhou. High-fidelity and real-time novel view synthesis for dynamic scenes. In SIGGRAPH Asia Conference Proceedings, 2023. 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.293, + 0.469, + 0.348 + ], + "angle": 0, + "content": "[28] Xingyu Liu, Mengyuan Yan, and Jeannette Bohg. Meteornet: Deep learning on dynamic 3d point cloud sequences. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9246-9255, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.35, + 0.469, + 0.419 + ], + "angle": 0, + "content": "[29] Yu-Lun Liu, Chen Gao, Andreas Meuleman, Hung-Yu Tseng, Ayush Saraf, Changil Kim, Yung-Yu Chuang, Johannes Kopf, and Jia-Bin Huang. Robust dynamic radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13-23, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.421, + 0.469, + 0.475 + ], + "angle": 0, + "content": "[30] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In 3DV, 2024. 2, 3, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.477, + 0.469, + 0.56 + ], + "angle": 0, + "content": "[31] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7210-7219, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.563, + 0.469, + 0.63 + ], + "angle": 0, + "content": "[32] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.633, + 0.469, + 0.688 + ], + "angle": 0, + "content": "[33] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4):1-15, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.69, + 0.469, + 0.746 + ], + "angle": 0, + "content": "[34] Byeongjun Park and Changick Kim. Point-dynrf: Point-based dynamic radiance fields from a monocular video. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3171-3181, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.747, + 0.469, + 0.816 + ], + "angle": 0, + "content": "[35] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[36] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. arXiv preprint arXiv:2106.13228, 2021. 2, 4,5,6,7" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.174 + ], + "angle": 0, + "content": "[37] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.178, + 0.892, + 0.245 + ], + "angle": 0, + "content": "[38] Sida Peng, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Representing volumetric videos as dynamic mlp maps. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4252-4262, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.892, + 0.316 + ], + "angle": 0, + "content": "[39] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10318–10327, 2021. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.319, + 0.892, + 0.386 + ], + "angle": 0, + "content": "[40] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.39, + 0.892, + 0.446 + ], + "angle": 0, + "content": "[41] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.448, + 0.892, + 0.489 + ], + "angle": 0, + "content": "[42] Darius Rückert, Linus Franke, and Marc Stamminger. Adop: Approximate differentiable one-pixel point rendering. ACM Transactions on Graphics (ToG), 41(4):1-14, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.491, + 0.892, + 0.545 + ], + "angle": 0, + "content": "[43] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4104-4113, 2016. 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.548, + 0.892, + 0.63 + ], + "angle": 0, + "content": "[44] Ruizhi Shao, Zerong Zheng, Hanzhang Tu, Boning Liu, Hongwen Zhang, and Yebin Liu. Tensor4d: Efficient neural 4d decomposition for high-fidelity dynamic reconstruction and rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16632-16642, 2023. 1, 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.633, + 0.892, + 0.715 + ], + "angle": 0, + "content": "[45] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.718, + 0.892, + 0.8 + ], + "angle": 0, + "content": "[46] Zhuo Su, Lan Xu, Zerong Zheng, Tao Yu, Yebin Liu, and Lu Fang. Robustfusion: Human volumetric capture with data-driven visual cues using a rgbd camera. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part IV 16, pages 246-264. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.802, + 0.892, + 0.87 + ], + "angle": 0, + "content": "[47] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5459-5469, 2022. 1, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[48] Fengrui Tian, Shaoyi Du, and Yueqi Duan. Mononerf: Learning a generalizable dynamic radiance field from" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20319" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.47, + 0.133 + ], + "angle": 0, + "content": "monocular videos. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17903-17913, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.134, + 0.47, + 0.189 + ], + "angle": 0, + "content": "[49] Feng Wang, Zilong Chen, Guokang Wang, Yafei Song, and Huaping Liu. Masked space-time hash encoding for efficient dynamic scene reconstruction. Advances in neural information processing systems, 2023. 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.19, + 0.47, + 0.258 + ], + "angle": 0, + "content": "[50] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, Yafei Song, and Huaping Liu. Mixed neural voxels for fast multiview video synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19706-19716, 2023. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.26, + 0.47, + 0.329 + ], + "angle": 0, + "content": "[51] Yiming Wang, Qin Han, Marc Habermann, Kostas Dani-ilidis, Christian Theobalt, and Lingjie Liu. Neus2: Fast learning of neural implicit surfaces for multi-view reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3295–3306, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.33, + 0.47, + 0.384 + ], + "angle": 0, + "content": "[52] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.385, + 0.47, + 0.453 + ], + "angle": 0, + "content": "[53] Qingshan Xu, Weihang Kong, Wenbing Tao, and Marc Pollefeys. Multi-scale geometric consistency guided and planar prior assisted multi-view stereo. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(4):4945-4963, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.455, + 0.47, + 0.524 + ], + "angle": 0, + "content": "[54] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5438–5448, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.525, + 0.47, + 0.579 + ], + "angle": 0, + "content": "[55] Zeyu Yang, Hongye Yang, Zijie Pan, Xiatian Zhu, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv:2310.10642, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.581, + 0.469, + 0.621 + ], + "angle": 0, + "content": "[56] Taoran Yi, Jiemin Fang, Xinggang Wang, and Wenyu Liu. Generalizable neural voxels for fast human radiance fields. arXiv preprint arXiv:2303.15387, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.622, + 0.469, + 0.677 + ], + "angle": 0, + "content": "[57] Wang Yifan, Felice Serena, Shihao Wu, Cengiz Öz Tireli, and Olga Sorkine-Hornung. Differentiable surface splatting for point-based geometry processing. ACM Transactions on Graphics (TOG), 38(6):1-14, 2019. 2, 3, 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.679, + 0.469, + 0.734 + ], + "angle": 0, + "content": "[58] Lequan Yu, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. Pu-net: Point cloud upsampling network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2790–2799, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.735, + 0.469, + 0.775 + ], + "angle": 0, + "content": "[59] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.47, + 0.845 + ], + "angle": 0, + "content": "[60] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[61] Kaichen Zhou, Jia-Xing Zhong, Sangyun Shin, Kai Lu, Yiyuan Yang, Andrew Markham, and Niki Trigoni. Dynpoint: Dynamic neural point for view synthesis. Advances in Neural Information Processing Systems, 36, 2024. 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.092, + 0.893, + 0.149 + ], + "angle": 0, + "content": "[62] Matthias Zwicker, Hanspeter Pfister, Jeroen Van Baar, and Markus Gross. Surface splatting. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 371-378, 2001. 3" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20320" + } + ] +] \ No newline at end of file diff --git a/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/c4bfa810-f46e-49e4-9e19-ae1a9e3dcad6_origin.pdf b/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/c4bfa810-f46e-49e4-9e19-ae1a9e3dcad6_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e29fcac4e84aed8d9d39c7f6b8ecb8438c7d7951 --- /dev/null +++ b/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/c4bfa810-f46e-49e4-9e19-ae1a9e3dcad6_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4409706da37dcb2d99a75b071d9dd49622c7938ce58f0071e2779d8aa460f3d4 +size 2651815 diff --git a/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/full.md b/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1cd2c7d499f72068dbadc42f86967b548c6dba66 --- /dev/null +++ b/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/full.md @@ -0,0 +1,348 @@ +# 4D Gaussian Splatting for Real-Time Dynamic Scene Rendering + +Guanjun Wu $^{1*}$ , Taoran Yi $^{2*}$ , Jiemin Fang $^{3\dagger}$ , Lingxi Xie $^{3}$ , Xiaopeng Zhang $^{3}$ , Wei Wei $^{1}$ , Wenyu Liu $^{2}$ , Qi Tian $^{3}$ , Xinggang Wang $^{2\dagger}$ $^{1}$ School of CS, Huazhong University of Science and Technology + $^{2}$ School of EIC, Huazhong University of Science and Technology + $^{3}$ Huawei Inc. {guajuwu, taoranyi, weiw, liuwy, xgwang}@hust.edu.cn {jaminfong, 198808xc, zxphistory}@gmail.com tian.qil@huawei.com + +![](images/d73bdb002bf5289b6d8907683ba93efef171e8ac01f88f48bdee539a002af360.jpg) + +![](images/5a870c6bfe26650dfb77339e293ba392d3e580979a11e9a8ea07d02cdc240ba5.jpg) + +![](images/73b2ae604c887edc3943e5666529abad127dbbc92887896cfe92677c2de6f41c.jpg) +Figure 1. Our method achieves real-time rendering $\ddagger$ for dynamic scenes at high image resolutions while maintaining high rendering quality. The right figure is tested on synthetic datasets, where the radius of the dot corresponds to the training time. "Res": resolution. $\ddagger$ The rendering speed not only depends on the image resolution but also the number of 3D Gaussians and the scale of deformation fields which are determined by the complexity of the scene. + +# Abstract + +Representing and rendering dynamic scenes has been an important but challenging task. Especially, to accurately model complex motions, high efficiency is usually hard to guarantee. To achieve real-time dynamic scene rendering while also enjoying high training and storage efficiency, we propose 4D Gaussian Splatting (4D-GS) as a holistic representation for dynamic scenes rather than applying 3D-GS for each individual frame. In 4D-GS, a novel explicit representation containing both 3D Gaussians and 4D neural voxels is proposed. A decomposed neural voxel encoding algorithm inspired by HexPlane is proposed to efficiently build Gaussian features from 4D neural voxels and then a lightweight MLP is applied to predict Gaussian deformations at novel timestamps. Our 4D-GS method achieves real-time rendering under high resolutions, 82 FPS at an $800 \times 800$ resolution on an RTX 3090 GPU while maintaining comparable or better quality than previous state-of-the-art methods. More demos and code are available at + +https://guanjunwu.github.io/4dgs/. + +# 1. Introduction + +Novel view synthesis (NVS) stands as a critical task in the domain of 3D vision and plays a vital role in many applications, e.g. VR, AR, and movie production. NVS aims at rendering images from any desired viewpoint or timestamp of a scene, usually requiring modeling the scene accurately from several 2D images. Dynamic scenes are quite common in real scenarios, rendering which is important but challenging as complex motions need to be modeled with both spatially and temporally sparse input. + +NeRF [32] has achieved great success in synthesizing novel view images by representing scenes with implicit functions. The volume rendering techniques [7] are introduced to connect 2D images and 3D scenes. However, the original NeRF method bears big training and rendering costs. Though some NeRF variants [5, 8, 10, 11, 33, 44, 47] reduce the training time from days to minutes, the rendering process still bears a non-negligible latency. + +Recent 3D Gaussian Splatting (3D-GS) [19] signifi + +cantly boosts the rendering speed to a real-time level by representing the scene as 3D Gaussians. The cumbersome volume rendering in the original NeRF is replaced with efficient differentiable splatting [57], which directly projects 3D Gaussian onto the 2D image plane. 3D-GS not only enjoys real-time rendering speed but also represents the scene more explicitly, making it easier to manipulate the scene representation. + +However, 3D-GS focuses on the static scenes. Extending it to dynamic scenes as a 4D representation is a reasonable, important but difficult topic. The key challenge lies in modeling complicated point motions from sparse input. 3D-GS holds a natural geometry prior by representing scenes with point-like Gaussians. One direct and effective extension approach is to construct 3D Gaussians at each timestamp [30] but the storage/memory cost will multiply especially for long input sequences. Our goal is to construct a compact representation while maintaining both training and rendering efficiency, i.e. 4D Gaussian Splatting (4D-GS). To this end, we propose to represent Gaussian motions and shape changes by an efficient Gaussian deformation field network, containing a temporal-spatial structure encoder and an extremely tiny multi-head Gaussian deformation decoder. Only one set of canonical 3D Gaussians is maintained. For each timestamp, the canonical 3D Gaussians will be transformed by the Gaussian deformation field into new positions with new shapes. The transformation process represents both the Gaussian motion and deformation. Note that different from modeling motions of each Gaussian separately [30, 55], the spatial-temporal structure encoder can connect different adjacent 3D Gaussians to predict more accurate motions and shape deformation. Then the deformed 3D Gaussians can be directly splatted for rendering the according-timestamp image. Our contributions can be summarized as follows. + +- An efficient 4D Gaussian splating framework with an efficient Gaussian deformation field is proposed by modeling both Gaussian motion and Gaussian shape changes across time. +- A multi-resolution encoding method is proposed to connect the nearby 3D Gaussians and build rich 3D Gaussian features by an efficient spatial-temporal structure encoder. +- 4D-GS achieves real-time rendering on dynamic scenes, up to 82 FPS at a resolution of $800 \times 800$ for synthetic datasets and 30 FPS at a resolution of $1352 \times 1014$ in real datasets, while maintaining comparable or superior performance than previous state-of-the-art (SOTA) methods and shows potential for editing and tracking in 4D scenes. + +# 2. Related Works + +In this section, we simply review the difference of dynamic NeRFs in Sec. 2.1, then discuss the point clouds-based neural rendering algorithm in Sec. 2.2. + +- Original Sampled Points +- Canonical Mapped Points +The Original Cast Ray +The Canonical Mapped Ray + +- Original Sampled Points +The Original Cast Ray +Time Features of the Points + +![](images/ef1850bc3b5475d2432be61a873fecf2df6b0c6264f71e71debb0238f3bb80f6.jpg) +(a) Canonical Mapping Volume Rendering + +![](images/9d40cb415fcf5860dbe341a088ea28c52200b20dcaed0dabcd4d4d035f716dc6.jpg) +(b) Time-aware Volume Rendering + +![](images/b15ec9426dcf64169ad93fcee106d21566e9bd5e5efb974d017d601a668cb17e.jpg) +(c) 4D Gaussian Splitting +Figure 2. Illustration of different dynamic scene rendering methods. (a) Points are sampled in the casted ray during volume rendering. The point deformation fields proposed in [8, 39] map the points into a canonical space. (b) Time-aware volume rendering computes the features of each point directly and does not change the rendering path. (c) The Gaussian deformation field converts original 3D Gaussians into another group of 3D Gaussians with a certain timestamp. + +$\bullet$ Original 3D Gaussians G +$\bullet$ Deformed 3D Gaussians $G^{\prime}$ +Gaussian Deformation Field $\mathrm{F}(G, t_i)$ +Gaussian Rasterization Paths + +# 2.1. Novel View Synthesis + +Novel view synthesis is a important and challenging task in 3D reconstruction. Much approaches are proposed to represent a 3D object and render novel views. Efficient representations such as light fields [4], mesh [6, 15, 24, 46], voxels [16, 18, 23], multi-planes [9] can render high quality image with enough supervisions. NeRF-based approaches [3, 32, 59] demonstrate that implicit radiance fields can effectively learn scene representations and synthesize high-quality novel views. [35, 36, 39] have challenged the static hypothesis, expanding the boundary of novel view synthesis for dynamic scenes. [8] proposes to use an explicit voxel grid to model temporal information, accelerating the learning time for dynamic scenes to half an hour and applied in [17, 29, 56]. The proposed deformation-based neural rendering methods are shown in Fig. 2 (a). Flow-based [13, 25, 29, 48, 61] methods adopting warping algorithm to synthesis novel views by blending nearby frames. [5, 11, 12, 22, 44, 49] represent further advancements in faster dynamic scene learning by adopting decomposed neural voxels. They treat sampled points in each timestamp individually as shown in Fig. 2 (b). [14, 27, 38, 50, 51, 53] are efficient methods to handle multi-view setups. The aforementioned methods though achieve fast training speed, real-time rendering for dynamic scenes is still challenging, especially for monocular input. Our method aims at constructing a highly efficient training and rendering pipeline in Fig. 2 (c), while maintaining the quality, even for sparse inputs. + +![](images/79abdf0621f574dc702f76de86f319a1f46faf3cc8d3c4e6ffe3deb822ac0349.jpg) +Figure 3. The overall pipeline of our model. Given a group of 3D Gaussians $\mathcal{G}$ , we extract the center coordinate of each 3D Gaussian $\mathcal{X}$ and timestamp $t$ to compute the voxel feature by querying multi-resolution voxel planes. Then a tiny multi-head Gaussian deformation decoder is used to decode the feature and get the deformed 3D Gaussians $\mathcal{G}'$ at timestamp $t$ . The deformed Gaussians are then splatted to the rendered image. + +# 2.2. Neural Rendering with Point Clouds + +Effectively representing 3D scenes remains a challenging topic. The community has explored various neural representations [32], e.g. meshes, point clouds [54], voxels [10], and hybrid approaches [33, 47]. Point-cloud-based methods [28, 40, 41, 58] initially target at 3D segmentation and classification. A representative approach for rendering presented in [1, 54] combines point cloud representations with volume rendering, achieving rapid convergence speed even for dynamic novel view synthesis [34, 61]. [20, 21, 42] adopt differential point rendering technique for scene reconstructions. + +Recently, 3D-GS [19] is notable for its pure explicit representation and differential point-based splatting methods, enabling real-time rendering of novel views. Dynamic3DGS [30] models dynamic scenes by tracking the position and variance of each 3D Gaussian at each timestamp $t_i$ . An explicit table is utilized to store information about each 3D Gaussian at every timestamp, leading to a linear memory consumption increase, denoted as $O(t\mathcal{N})$ , in which $\mathcal{N}$ is num of 3D Gaussians. For long-term scene reconstruction, the storage cost will become non-negligible. The memory complexity of our approach only depends on the number of 3D Gaussians and parameters of Gaussians deformation fields network $\mathcal{F}$ , which is denoted as $O(\mathcal{N} + \mathcal{F})$ . [55] adds a marginal temporal Gaussian distribution into the origin 3D Gaussians, which uplift 3D Gaussians into 4DHowever, it may cause each 3D Gaussian to only focus on their local temporal space. [26] track each 3D Gaussians individually. Our approach also models 3D Gaussian motions but with a compact network, resulting in highly efficient training efficiency and real-time rendering. + +# 3. Preliminary + +In this section, we simply review the representation and rendering process of 3D-GS [19] in Sec. 3.1 and the formula of dynamic NeRFs in Sec. 3.2. + +# 3.1. 3D Gaussian Splatting + +3D Gaussians [19] is an explicit 3D scene representation in the form of point clouds. Each 3D Gaussian is characterized by a covariance matrix $\Sigma$ and a center point $\mathcal{X}$ , which is referred to as the mean value of the Gaussian: + +$$ +G (X) = e ^ {- \frac {1}{2} \mathcal {X} ^ {T} \Sigma^ {- 1} \mathcal {X}}. \tag {1} +$$ + +For differentiable optimization, the covariance matrix $\Sigma$ can be decomposed into a scaling matrix $\mathbf{S}$ and a rotation matrix $\mathbf{R}$ : + +$$ +\Sigma = \mathbf {R} \mathbf {S} \mathbf {S} ^ {T} \mathbf {R} ^ {T}. \tag {2} +$$ + +When rendering novel views, differential splatting [57] is employed for the 3D Gaussians within the camera planes. As introduced by [62], using a viewing transform matrix $W$ and the Jacobian matrix $J$ of the affine approximation of the projective transformation, the covariance matrix $\Sigma'$ in camera coordinates can be computed as + +$$ +\Sigma^ {\prime} = J W \Sigma W ^ {T} J ^ {T}. \tag {3} +$$ + +In summary, each 3D Gaussian is characterized by the following attributes: position $\mathcal{X} \in \mathbb{R}^3$ , color defined by spherical harmonic (SH) coefficients $\mathcal{C} \in \mathbb{R}^k$ (where $k$ represents nums of SH functions), opacity $\alpha \in \mathbb{R}$ , rotation factor $r \in \mathbb{R}^4$ , and scaling factor $s \in \mathbb{R}^3$ . Specifically, for each pixel, the color and opacity of all the Gaussians are computed using the Gaussian's representation Eq. 1. The blending of $N$ ordered points that overlap the pixel is given by + +the formula: + +$$ +C = \sum_ {i \in N} c _ {i} \alpha_ {i} \prod_ {j = 1} ^ {i - 1} (1 - \alpha_ {i}). \tag {4} +$$ + +Here, $c_{i}$ , $\alpha_{i}$ represents the density and color of this point computed by a 3D Gaussian $G$ with covariance $\Sigma$ multiplied by an estimizable per-point opacity and SH color coefficients. + +# 3.2. Dynamic NeRFs with Deformation Fields + +All the dynamic NeRF algorithms can be formulated as: + +$$ +c, \sigma = \mathcal {M} (\mathbf {x}, d, t, \lambda), \tag {5} +$$ + +where $\mathcal{M}$ is a mapping that maps 8D space $(\mathbf{x},d,t,\lambda)$ to 4D space $(c,\sigma)$ . Where $x$ reveals to the spatial point, $\lambda$ is the optional input as used to build topological and appearance changes in [36], and $d$ stands for view-dependency. + +As is shown in Fig. 2 (a), all the deformation NeRF based method which estimate the world-to-canonical mapping by a deformation network $\phi_t:(\mathbf{x},t)\to \Delta \mathbf{x}$ . Then a network is introduced to compute volume density and view-dependent RGB color from each ray. The formula for rendering can be expressed as: + +$$ +c, \sigma = \operatorname {N e R F} (\mathbf {x} + \Delta \mathbf {x}, d, \lambda), \tag {6} +$$ + +where 'NeRF' stands for vanilla NeRF pipeline, $\lambda$ is a frame-dependent code to model the topological and appearance changes [31, 36]. + +However, our 4D Gaussian splatting framework presents a novel rendering technique. We successfully compute the canonical-to-world mapping by a Gaussian deformation field network $\mathcal{F}$ at the time $t$ directly and differential splating [19] is followed, which enables the skill of computing backward flow and tracking for 3D Gaussians. + +# 4. Method + +Sec. 4.1 introduces the overall 4D Gaussian Splitting framework. Then, the Gaussian deformation field is proposed in Sec. 4.2. Finally, we describe the optimization process in Sec. 4.3. + +# 4.1. 4D Gaussian Splatting Framework + +As shown in Fig. 3, given a view matrix $M = [R,T]$ , times-amp $t$ , our 4D Gaussian splatting framework includes 3D Gaussians $\mathcal{G}$ and Gaussian deformation field network $\mathcal{F}$ . Then a novel-view image $\hat{I}$ is rendered by differential splatting [57] $\mathcal{S}$ following $\hat{I} = \mathcal{S}(M,\mathcal{G}^{\prime})$ , where $\mathcal{G}^{\prime} = \Delta \mathcal{G} + \mathcal{G}$ . + +Specifically, the deformation of 3D Gaussians $\Delta \mathcal{G}$ is introduced by the Gaussian deformation field network $\Delta \mathcal{G} = \mathcal{F}(\mathcal{G},t)$ , in which the spatial-temporal structure encoder $\mathcal{H}$ + +![](images/3ec68fe898331e76563915399716aa1fd62987de6864ea790d836aecf93278fa.jpg) +Figure 4. Illustration of the optimization process. With static 3D Gaussian initialization, our model can learn high-quality 3D Gaussians of the motion part. + +can encode both the temporal and spatial features of 3D Gaussians $f_{d} = \mathcal{H}(\mathcal{G},t)$ , and the multi-head Gaussian deformation decoder $\mathcal{D}$ can decode the features and predict each 3D Gaussian's deformation $\Delta \mathcal{G} = \mathcal{D}(f)$ , then the deformed 3D Gaussians $\mathcal{G}'$ can be introduced. + +The rendering process of our 4D Gaussian Splatting is depicted in Fig. 2 (c). Our 4D Gaussian splatting converts the original 3D Gaussians $\mathcal{G}$ into another group of 3D Gaussians $\mathcal{G}'$ given a timestamp $t$ , maintaining the effectiveness of the differential splatting as referred in [57]. + +# 4.2. Gaussian Deformation Field Network + +The network to learn the Gaussian deformation field includes an efficient spatial-temporal structure encoder $\mathcal{H}$ and a Gaussian deformation decoder $\mathcal{D}$ for predicting the deformation of each 3D Gaussian. + +Spatial-Temporal Structure Encoder. Nearby 3D Gaussians always share similar spatial and temporal information. To model 3D Gaussians' features effectively, we introduce an efficient spatial-temporal structure encoder $\mathcal{H}$ including a multi-resolution HexPlane $R(i,j)$ and a tiny MLP $\phi_d$ inspired by [5, 8, 11, 44]. While the vanilla 4D neural voxel is memory-consuming, we adopt a 4D K-Planes [11] module to decompose the 4D neural voxel into 6 planes. All 3D Gaussians in a certain area can be contained in the bounding plane voxels and Gaussian's deformation can also be encoded in nearby temporal voxels. + +Specifically, the spatial-temporal structure encoder $\mathcal{H}$ contains 6 multi-resolution plane modules $R_{l}(i,j)$ and a tiny MLP $\phi_d$ , i.e. $\mathcal{H}(\mathcal{G},t) = \{R_l(i,j),\phi_d|(i,j)\in$ $\{(x,y),(x,z),(y,z),(x,t),(y,t),(z,t)\} ,l\in \{1,2\} \}$ . The position $\mu = (x,y,z)$ is the mean value of 3D Gaussians $\mathcal{G}$ . Each voxel module is defined by $R(i,j)\in \mathbb{R}^{h\times lN_i\times lN_j}$ where $h$ stands for the hidden dim of features, $N$ denotes the basic resolution of voxel grid and $l$ equals to the upsampling scale. This entails encoding information of the 3D + +![](images/589f60be5cc17db5ac9d7db98e30f7ded964ca1f15788b98b84e05478d29dd59.jpg) +Figure 5. Visualization of synthesized datasets compared with other models [5, 8, 11, 17, 19, 49]. The rendering results of [11] are displayed with a default green background. We have adopted their rendering settings. + +Gaussians within the 62D voxel planes while considering temporal information. The formula for computing separate voxel features is as follows: + +$$ +f _ {h} = \bigcup_ {l} \prod \operatorname {i n t e r p} \left(R _ {l} (i, j)\right), \tag {7} +$$ + +$$ +(i, j) \in \{(x, y), (x, z), (y, z), (x, t), (y, t), (z, t) \}. +$$ + +$f_{h} \in \mathbb{R}^{h * l}$ is the feature of neural voxels. 'interp' denotes the bilinear interpolation for querying the voxel features located at 4 vertices of the grid. The discussion of the production process is similar to [11]. Then a tiny MLP $\phi_{d}$ merges all the features by $f_{d} = \phi_{d}(f_{h})$ . + +Multi-head Gaussian Deformation Decoder. When all the features of 3D Gaussians are encoded, we can compute any desired variable with a multi-head Gaussian deformation decoder $\mathcal{D} = \{\phi_x,\phi_r,\phi_s\}$ . Separate MLPs are employed to compute the deformation of position $\Delta \mathcal{X} = \phi_{x}(f_{d})$ , rotation $\Delta r = \phi_r(f_d)$ , and scaling $\Delta s = \phi_s(f_d)$ . Then, the deformed feature $(\mathcal{X}',r',s')$ can be addressed as: + +$$ +\left(\mathcal {X} ^ {\prime}, r ^ {\prime}, s ^ {\prime}\right) = \left(\mathcal {X} + \Delta \mathcal {X}, r + \Delta r, s + \Delta s\right). \tag {8} +$$ + +Finally, we obtain the deformed 3D Gaussians $\mathcal{G}' = \{\mathcal{X}', s', r', \sigma, \mathcal{C}\}$ . + +# 4.3. Optimization + +3D Gaussian Initialization. [19] shows that 3D Gaussians can be well-trained with structure from motion (SfM) [43] points initialization. Similarly, 4D Gaussians should also be fine-tuned in proper 3D Gaussian initialization. We optimize 3D Gaussians at initial 3000 iterations for warm-up and then render images with 3D Gaussians $\hat{I} = S(M, \mathcal{G})$ instead of 4D Gaussians $\hat{I} = S(M, \mathcal{G}')$ . The illustration of the optimization process is shown in Fig. 4. + +Loss Function. Similar to other reconstruction methods [8, 19, 39], we use the L1 color loss to supervise the training process. A grid-based total-variational loss [5, 8, 11, 47] $\mathcal{L}_{tv}$ is also applied. + +$$ +\mathcal {L} = | \hat {I} - I | + \mathcal {L} _ {t v}. \tag {9} +$$ + +# 5. Experiment + +In this section, we mainly introduce the hyperparameters and datasets of our settings in Sec. 5.1 and the results between different datasets will be compared with [2, 5, 8, 11, 19, 27, 45, 49, 50] in Sec. 5.2. Then, ablation studies are proposed to prove the effectiveness of our approaches in Sec. 5.3 and more discussion about 4D-GS in Sec. 5.4. Finally, we discuss the limitation of our proposed 4D-GS in Sec. 5.5. + +# 5.1. Experimental Settings + +Our implementation is primarily based on the PyTorch [37] framework and tested in a single RTX 3090 GPU, and we've fine-tuned our optimization parameters by the configuration outlined in the 3D-GS [19]. More hyperparameters will be shown in the appendix. + +Synthetic Dataset. We primarily assess the performance of our model using synthetic datasets, as introduced by D-NeRF [39]. These datasets are designed for monocular settings, although it's worth noting that the camera poses for each timestamp are close to randomly generated. Each scene within these datasets contains dynamic frames, ranging from 50 to 200 in number. + +Real-world Datasets. We utilize datasets provided by HyperNeRF [36] and Neu3D's [22] as benchmark datasets to evaluate the performance of our model in real-world scenarios. The Nerfies dataset is captured using one or two + +![](images/2d0635783a41ee2783b754c9eaae16cb23001622ef636a0efded0f08bd6a891f.jpg) +Figure 6. Visualization of the HyperNeRF [36] datasets compared with other methods [8, 17, 19, 36]. 'GT' stands for ground truth images. + +Table 1. Quantitative results on the synthesis dataset. The best and the second best results are denoted by pink and yellow. The rendering resolution is set to $800 \times 800$ . "Time" in the table stands for training times. + +
ModelPSNR(dB)↑SSIM↑LPIPS↓Time↓FPS ↑Storage (MB)↓
TiNeuVox-B [8]32.670.970.0428 mins1.548
KPlanes [11]31.610.97-52 mins0.97418
HexPlane-Slim [5]31.040.970.0411m 30s2.538
3D-GS [19]23.190.930.0810 mins17010
FFDNeRF [17]32.680.970.04-< 1440
MSTH [49]31.340.980.026 mins--
V4D [12]33.720.980.026.9 hours2.08377
Ours34.050.980.028 mins8218
+ +cameras, following straightforward camera motion, while the Neu3D's dataset is captured using 15 to 20 static cameras, involving extended periods and intricate camera motions. We use the points computed by SfM [43] from the first frame of each video in Neu3D's dataset and 200 frames randomly selected in HyperNeRF's. + +# 5.2. Results + +We primarily assess our experimental results using various metrics, encompassing peak-signal-to-noise ratio (PSNR), perceptual quality measure LPIPS [60], structural similarity index (SSIM) [52] and its extensions including structural dissimilarity index measure (DSSIM), multiscale structural similarity index (MS-SSIM), FPS, training times and Storage. + +To assess the quality of novel view synthesis, we conducted benchmarking against several state-of-the-art methods in the field, including [5, 8, 11, 12, 17, 19, 27, 49]. The results are summarized in Tab. 1. While current dynamic hybrid representations can produce high-quality results, they often come with the drawback of rendering speed. The lack of modeling dynamic motion part makes [19] fail to reconstruct dynamic scenes. In contrast, our method en + +joys both the highest rendering quality within the synthesis dataset and exceptionally fast rendering speeds while keeping extremely low storage consumption and convergence time. + +Additionally, the results obtained from real-world datasets are presented in Tab. 2 and Tab. 3. It becomes apparent that some NeRFs [2, 5, 45] suffer from slow convergence speed, and the other grid-based NeRF methods [5, 8, 11, 49] encounter difficulties when attempting to capture intricate object details. In stark contrast, our methods research comparable rendering quality, fast convergence, and excel in free-view rendering speed in indoor cases. Though [27] addresses the high quality in comparison to ours, the need for multi-cam setups makes it hard to model monocular scenes and other methods also limit free-view rendering speed and storage. + +# 5.3. Ablation Study + +Spatial-Temporal Structure Encoder. The explicit Hex-Plane encoder $R_{l}(i,j)$ possesses the capacity to retain 3D Gaussians' spatial and temporal information, which can reduce storage consumption in comparison with purely explicit methods [30]. Discarding this module, we observe + +Table 2. Quantitative results on HyperNeRF's [36] vrig dataset. Rendering resolution is set to ${960} \times {540}$ . + +
ModelPSNR(dB)↑MS-SSIM↑Times↓FPS↑Storage(MB)↓
Nerfies [35]22.20.803~ hours< 1-
HyperNeRF [36]22.40.81432 hours< 1-
TiNeuVox-B [8]24.30.83630 mins148
3D-GS [19]19.70.68040 mins5552
FFDNeRF [17]24.20.842-0.05440
V4D [12]24.80.8325.5 hours0.29377
Ours25.20.84530 mins3461
+ +Table 3. Quantitative results on the Neu3D's [22] dataset, rendering resolution is set to $1352 \times 1014$ . + +
ModelPSNR(dB)↑D-SSIM↓LPIPS↓Time ↓FPS↑Storage (MB)↓
NeRFPlayer [45]30.690.0340.1116 hours0.045-
HyperReel [2]31.100.0360.0969 hours2.0360
HexPlane-all* [5]31.700.0140.07512 hours0.2250
KPlanes [11]31.63--1.8 hours0.3309
Im4D [27]32.58-0.20828 mins~593
MSTH [49]32.370.0150.05620 mins2(15‡)135
Ours31.150.0160.04940 mins3090
+ +*: The metrics of the model are tested without "coffee martini" and resolution is set to $1024 \times 768$ . +: The FPS is tested with fixed-view rendering. + +that using only a shallow MLP $\phi_d$ falls short in modeling complex deformations across various settings. Tab. 4 demonstrates that, while the model incurs minimal memory costs, it does come at the expense of rendering quality. + +Gaussian Deformation Decoder. Our proposed Gaussian deformation decoder $\mathcal{D}$ decodes the features from the spatial-temporal structure encoder $\mathcal{H}$ . All the changes in 3D Gaussians can be explained by separate MLPs $\{\phi_x, \phi_r, \phi_s\}$ . As is shown in Tab. 4, 4D Gaussians cannot fit dynamic scenes well without modeling 3D Gaussian motion. Meanwhile, the movement of human body joints is typically manifested as stretching and twisting of surface details in a macroscopic view. If one aims to accurately model these movements, the size and shape of 3D Gaussians should also be adjusted accordingly. Otherwise, there may be underfitting of details during excessive stretching, or an inability to correctly simulate the movement of objects at a microscopic level. + +3D Gaussian Initialization. In some cases without SfM [43] points initialization, training 4D-GS directly may cause difficulty in convergence. Optimizing 3D Gaussians for warm-up enjoys: (a) making some 3D Gaussians stay in the dynamic part, which releases the pressure of large deformation learning by 4D Gaussians as shown in Fig. 4. (b) learning proper 3D Gaussians $\mathcal{G}$ and suggesting deformation fields paying more attention to the dynamic part. (c) avoiding numeric errors in optimizing the Gaussian deformation network $\mathcal{F}$ and keeping the training process stable. + +![](images/d5ca3ea5b0c79315f75c77d126f979c7bcaaa1db0e04ac6bdbcae8033e1e4a6d.jpg) +Figure 7. Visualization of tracking with 3D Gaussians. Each line in the figure of second rows stands for trajectories of 3D Gaussians + +Tab. 4 also shows that if we train our model without the warm-up coarse stage, the rendering quality will suffer. + +# 5.4. Discussions + +Tracking with 3D Gaussians. Tracking in 3D is also a important task. [17] also shows tracking objects' motion in 3D. Different from dynamic3DGS [30], our methods even can present tracking objects in monocular settings with pretty low storage i.e. 10MB in 3D Gaussians $\mathcal{G}$ and 8 MB in Gaussian deformation field network $\mathcal{F}$ . Fig. 7 shows the 3D Gaussian's deformation at certain timestamps. + +Table 4. Ablation studies on synthetic datasets using our proposed methods. + +
ModelPSNR(dB)↑SSIM↑LPIPS↓Time↓FPS↑Storage (MB)↓
Ours w/o HexPlane RL(i,j)27.050.950.054 mins14012
Ours w/o initialization31.910.970.037.5 mins7918
Ours w/o φx26.670.950.078 mins8217
Ours w/o φr33.080.980.038 mins8317
Ours w/o φs33.020.980.038 mins8217
Ours34.050.980.028 mins8218
+ +![](images/c6bd6b65e62c5b3d545a492d97594d68dd04549c7125eda500e39c47fc4e9d18.jpg) +Figure 8. Visualization of composition with 4D Gaussians. + +![](images/32fa9e66fe525318a6f0c790ad66bfed2b98622fa8fd0d2a3b3a3133fb4dc8ba.jpg) +Figure 9. Visualization of the relationship between rendering speed and numbers of 3D Gaussians in the rendered screens. All the tests are finished in the synthesis dataset. + +Composition with 4D Gaussians. Similar to dynamic3DGS [30], our proposed methods can also propose editing in 4D Gaussians in Fig. 8. Thanks to the explicit representation of 3D Gaussians, all the trained models can predict deformed 3D Gaussians in the same space following $\mathcal{G}' = \{\mathcal{G}_1', \mathcal{G}_2', \dots, \mathcal{G}_n'\}$ and differential rendering [57] can project all the point clouds into viewpoints by $\hat{I} = \mathcal{S}(M, \mathcal{G}')$ . + +Analysis of Rendering Speed. As is shown in Fig. 9, we also test the relationship between points in the rendered screen and rendering speed at the resolution of $800 \times 800$ . + +We found that if the rendered points are lower than 30000, the rendering speed can be up to 90. The config of Gaussian deformation fields are discussed in the appendix. To achieve render-time rendering speed, we should strike a balance among all the rendering resolutions, 4D Gaussians representation including numbers of 3D Gaussians, and the capacity of the Gaussian deformation field network and any other hardware constraints. + +# 5.5. Limitations + +Though 4D-GS can indeed attain rapid convergence and yield real-time rendering outcomes in many scenarios, there are a few key challenges to address. First, large motions, the absence of background points, and the unprecise camera pose cause the struggle of optimizing 4D Gaussians. What is more, it is still challenging to 4D-GS also cannot split the joint motion of static and dynamic Gaussiansparts under the monocular settings without any additional supervision. Finally, a more compact algorithm needs to be designed to handle urban-scale reconstruction due to the heavy querying of Gaussian deformation fields by huge numbers of 3D Gaussians. + +# 6. Conclusion + +This paper proposes 4D Gaussian splating to achieve real-time dynamic scene rendering. An efficient deformation field network is constructed to accurately model Gaussian motions and shape deformations, where adjacent Gaussians are connected via a spatial-temporal structure encoder. Connections between Gaussians lead to more complete deformed geometry, effectively avoiding avulsion. Our 4D Gaussians can not only model dynamic scenes but also have the potential for 4D objective tracking and editing. + +# Acknowledgments + +This work was supported by the National Natural Science Foundation of China (No. 62376102). The authors would like to thank Haotong Lin for providing the quantitative results of Im4D [27]. + +# References + +[1] Jad Abou-Chakra, Feras Dayoub, and Niko Sünderhauf. Particlererf: Particle based encoding for online neural radiance fields in dynamic scenes. arXiv preprint arXiv:2211.04041, 2022. 3 +[2] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023. 5, 6, 7 +[3] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2 +[4] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2 +[5] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 130-141, 2023. 1, 2, 4, 5, 6, 7 +[6] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (ToG), 34(4):1-13, 2015. 2 +[7] Robert A Drebin, Loren Carpenter, and Pat Hanrahan. Volume rendering. ACM Siggraph Computer Graphics, 22(4): 65-74, 1988. 1 +[8] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In SIGGRAPH Asia 2022 Conference Papers, pages 1-9, 2022. 1, 2, 4, 5, 6, 7 +[9] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snively, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 2 +[10] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 1, 3 +[11] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbaek Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12479–12488, 2023. 1, 2, 4, 5, 6, 7 + +[12] Wanshui Gan, Hongbin Xu, Yi Huang, Shifeng Chen, and Naoto Yokoya. V4d: Voxel for 4d novel view synthesis. IEEE Transactions on Visualization and Computer Graphics, 2023. 2, 6, 7 +[13] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5712-5721, 2021. 2 +[14] Xiangjun Gao, Jiaolong Yang, Jongyoo Kim, Sida Peng, Zicheng Liu, and Xin Tong. Mps-nerf: Generalizable 3d human rendering from multiview images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2 +[15] Kaiwen Guo, Feng Xu, Yangang Wang, Yebin Liu, and Qionghai Dai. Robust non-rigid motion tracking and surface reconstruction using 10 regularization. In Proceedings of the IEEE International Conference on Computer Vision, pages 3083-3091, 2015. 2 +[16] Kaiwen Guo, Peter Lincoln, Philip Davidson, Jay Busch, Xueming Yu, Matt Whalen, Geoff Harvey, Sergio Orts-Escolano, Rohit Pandey, Jason Dourgarian, et al. The relightables: Volumetric performance capture of humans with realistic relighting. ACM Transactions on Graphics (ToG), 38(6):1–19, 2019. 2 +[17] Xiang Guo, Jiadai Sun, Yuchao Dai, Guanying Chen, Xiaoting Ye, Xiao Tan, Errui Ding, Yumeng Zhang, and Jingdong Wang. Forward flow for novel view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16022-16033, 2023. 2, 5, 6, 7 +[18] Tao Hu, Tao Yu, Zerong Zheng, He Zhang, Yebin Liu, and Matthias Zwicker. Hvtr: Hybrid volumetric-textural rendering for human avatars. In 2022 International Conference on 3D Vision (3DV), pages 197-208. IEEE, 2022. 2 +[19] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG), 42(4):1-14, 2023. 1, 3, 4, 5, 6, 7 +[20] Leonid Keselman and Martial Hebert. Approximate differentiable rendering with algebraic surfaces. In European Conference on Computer Vision, pages 596-614. Springer, 2022. 3 +[21] Leonid Keselman and Martial Hebert. Flexible techniques for differentiable rendering with 3d gaussians. arXiv preprint arXiv:2308.14737, 2023. 3 +[22] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 2, 5, 7 +[23] Zhong Li, Yu Ji, Wei Yang, Jinwei Ye, and Jingyi Yu. Robust 3d human motion reconstruction via dynamic template construction. In 2017 International Conference on 3D Vision (3DV), pages 496-505. IEEE, 2017. 2 +[24] Zhong Li, Minye Wu, Wangyiteng Zhou, and Jingyi Yu. 4d human body correspondences from panoramic depth maps. + +In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2877-2886, 2018. 2 +[25] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6498-6508, 2021. 2 +[26] Zhan Li, Zhang Chen, Zhong Li, and Yi Xu. Spacetime gaussian feature splatting for real-time dynamic view synthesis. arXiv preprint arXiv:2312.16812, 2023. 3 +[27] Haotong Lin, Sida Peng, Zhen Xu, Tao Xie, Xingyi He, Hu-jun Bao, and Xiaowei Zhou. High-fidelity and real-time novel view synthesis for dynamic scenes. In SIGGRAPH Asia Conference Proceedings, 2023. 2, 5, 6, 7, 8 +[28] Xingyu Liu, Mengyuan Yan, and Jeannette Bohg. Meteornet: Deep learning on dynamic 3d point cloud sequences. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9246-9255, 2019. 3 +[29] Yu-Lun Liu, Chen Gao, Andreas Meuleman, Hung-Yu Tseng, Ayush Saraf, Changil Kim, Yung-Yu Chuang, Johannes Kopf, and Jia-Bin Huang. Robust dynamic radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13-23, 2023. 2 +[30] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In 3DV, 2024. 2, 3, 6, 7, 8 +[31] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7210-7219, 2021. 4 +[32] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 1, 2, 3 +[33] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4):1-15, 2022. 1, 3 +[34] Byeongjun Park and Changick Kim. Point-dynrf: Point-based dynamic radiance fields from a monocular video. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3171-3181, 2024. 3 +[35] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 2, 7 +[36] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. arXiv preprint arXiv:2106.13228, 2021. 2, 4,5,6,7 + +[37] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 5 +[38] Sida Peng, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Representing volumetric videos as dynamic mlp maps. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4252-4262, 2023. 2 +[39] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10318–10327, 2021. 2, 5 +[40] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017, 3 +[41] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 3 +[42] Darius Rückert, Linus Franke, and Marc Stamminger. Adop: Approximate differentiable one-pixel point rendering. ACM Transactions on Graphics (ToG), 41(4):1-14, 2022. 3 +[43] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4104-4113, 2016. 5, 6, 7 +[44] Ruizhi Shao, Zerong Zheng, Hanzhang Tu, Boning Liu, Hongwen Zhang, and Yebin Liu. Tensor4d: Efficient neural 4d decomposition for high-fidelity dynamic reconstruction and rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16632-16642, 2023. 1, 2, 4 +[45] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 5, 6, 7 +[46] Zhuo Su, Lan Xu, Zerong Zheng, Tao Yu, Yebin Liu, and Lu Fang. Robustfusion: Human volumetric capture with data-driven visual cues using a rgbd camera. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part IV 16, pages 246-264. Springer, 2020. 2 +[47] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5459-5469, 2022. 1, 3, 5 +[48] Fengrui Tian, Shaoyi Du, and Yueqi Duan. Mononerf: Learning a generalizable dynamic radiance field from + +monocular videos. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17903-17913, 2023. 2 +[49] Feng Wang, Zilong Chen, Guokang Wang, Yafei Song, and Huaping Liu. Masked space-time hash encoding for efficient dynamic scene reconstruction. Advances in neural information processing systems, 2023. 2, 5, 6, 7 +[50] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, Yafei Song, and Huaping Liu. Mixed neural voxels for fast multiview video synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19706-19716, 2023. 2, 5 +[51] Yiming Wang, Qin Han, Marc Habermann, Kostas Dani-ilidis, Christian Theobalt, and Lingjie Liu. Neus2: Fast learning of neural implicit surfaces for multi-view reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3295–3306, 2023. 2 +[52] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6 +[53] Qingshan Xu, Weihang Kong, Wenbing Tao, and Marc Pollefeys. Multi-scale geometric consistency guided and planar prior assisted multi-view stereo. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(4):4945-4963, 2022. 2 +[54] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5438–5448, 2022. 3 +[55] Zeyu Yang, Hongye Yang, Zijie Pan, Xiatian Zhu, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv:2310.10642, 2023. 2, 3 +[56] Taoran Yi, Jiemin Fang, Xinggang Wang, and Wenyu Liu. Generalizable neural voxels for fast human radiance fields. arXiv preprint arXiv:2303.15387, 2023. 2 +[57] Wang Yifan, Felice Serena, Shihao Wu, Cengiz Öz Tireli, and Olga Sorkine-Hornung. Differentiable surface splatting for point-based geometry processing. ACM Transactions on Graphics (TOG), 38(6):1-14, 2019. 2, 3, 4, 8 +[58] Lequan Yu, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. Pu-net: Point cloud upsampling network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2790–2799, 2018. 3 +[59] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2 +[60] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 6 +[61] Kaichen Zhou, Jia-Xing Zhong, Sangyun Shin, Kai Lu, Yiyuan Yang, Andrew Markham, and Niki Trigoni. Dynpoint: Dynamic neural point for view synthesis. Advances in Neural Information Processing Systems, 36, 2024. 2, 3 + +[62] Matthias Zwicker, Hanspeter Pfister, Jeroen Van Baar, and Markus Gross. Surface splatting. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 371-378, 2001. 3 \ No newline at end of file diff --git a/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/images.zip b/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..1a641fa044be2410e7ba92f8c1ecc6b417d2f254 --- /dev/null +++ b/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b9bfd120dc7f8bb3b12b07a5c6ea4bfe81a83dcd507a81e44fdc21f4aa500b9 +size 735919 diff --git a/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/layout.json b/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..22060cf67c2b51d92abed44f0eeb73a874692df0 --- /dev/null +++ b/2024/4D Gaussian Splatting for Real-Time Dynamic Scene Rendering/layout.json @@ -0,0 +1,10101 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 99, + 103, + 496, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 103, + 496, + 121 + ], + "spans": [ + { + "bbox": [ + 99, + 103, + 496, + 121 + ], + "type": "text", + "content": "4D Gaussian Splatting for Real-Time Dynamic Scene Rendering" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "spans": [ + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "content": "Guanjun Wu" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "content": ", Taoran Yi" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "content": ", Jiemin Fang" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "inline_equation", + "content": "^{3\\dagger}" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "content": ", Lingxi Xie" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "content": ", Xiaopeng Zhang" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "content": ", Wei Wei" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "content": ", Wenyu Liu" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "content": ", Qi Tian" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "content": ", Xinggang Wang" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "inline_equation", + "content": "^{2\\dagger}" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "content": "School of CS, Huazhong University of Science and Technology \n" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "content": "School of EIC, Huazhong University of Science and Technology \n" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 96, + 142, + 496, + 228 + ], + "type": "text", + "content": "Huawei Inc. {guajuwu, taoranyi, weiw, liuwy, xgwang}@hust.edu.cn {jaminfong, 198808xc, zxphistory}@gmail.com tian.qil@huawei.com" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 48, + 245, + 187, + 370 + ], + "blocks": [ + { + "bbox": [ + 48, + 245, + 187, + 370 + ], + "lines": [ + { + "bbox": [ + 48, + 245, + 187, + 370 + ], + "spans": [ + { + "bbox": [ + 48, + 245, + 187, + 370 + ], + "type": "image", + "image_path": "d73bdb002bf5289b6d8907683ba93efef171e8ac01f88f48bdee539a002af360.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 187, + 245, + 351, + 370 + ], + "blocks": [ + { + "bbox": [ + 187, + 245, + 351, + 370 + ], + "lines": [ + { + "bbox": [ + 187, + 245, + 351, + 370 + ], + "spans": [ + { + "bbox": [ + 187, + 245, + 351, + 370 + ], + "type": "image", + "image_path": "5a870c6bfe26650dfb77339e293ba392d3e580979a11e9a8ea07d02cdc240ba5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 350, + 242, + 536, + 392 + ], + "blocks": [ + { + "bbox": [ + 350, + 242, + 536, + 392 + ], + "lines": [ + { + "bbox": [ + 350, + 242, + 536, + 392 + ], + "spans": [ + { + "bbox": [ + 350, + 242, + 536, + 392 + ], + "type": "image", + "image_path": "73b2ae604c887edc3943e5666529abad127dbbc92887896cfe92677c2de6f41c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 396, + 546, + 437 + ], + "lines": [ + { + "bbox": [ + 46, + 396, + 546, + 437 + ], + "spans": [ + { + "bbox": [ + 46, + 396, + 546, + 437 + ], + "type": "text", + "content": "Figure 1. Our method achieves real-time rendering " + }, + { + "bbox": [ + 46, + 396, + 546, + 437 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 46, + 396, + 546, + 437 + ], + "type": "text", + "content": " for dynamic scenes at high image resolutions while maintaining high rendering quality. The right figure is tested on synthetic datasets, where the radius of the dot corresponds to the training time. \"Res\": resolution. " + }, + { + "bbox": [ + 46, + 396, + 546, + 437 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 46, + 396, + 546, + 437 + ], + "type": "text", + "content": " The rendering speed not only depends on the image resolution but also the number of 3D Gaussians and the scale of deformation fields which are determined by the complexity of the scene." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 435, + 192, + 447 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 435, + 192, + 447 + ], + "spans": [ + { + "bbox": [ + 143, + 435, + 192, + 447 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 460, + 289, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 460, + 289, + 676 + ], + "spans": [ + { + "bbox": [ + 45, + 460, + 289, + 676 + ], + "type": "text", + "content": "Representing and rendering dynamic scenes has been an important but challenging task. Especially, to accurately model complex motions, high efficiency is usually hard to guarantee. To achieve real-time dynamic scene rendering while also enjoying high training and storage efficiency, we propose 4D Gaussian Splatting (4D-GS) as a holistic representation for dynamic scenes rather than applying 3D-GS for each individual frame. In 4D-GS, a novel explicit representation containing both 3D Gaussians and 4D neural voxels is proposed. A decomposed neural voxel encoding algorithm inspired by HexPlane is proposed to efficiently build Gaussian features from 4D neural voxels and then a lightweight MLP is applied to predict Gaussian deformations at novel timestamps. Our 4D-GS method achieves real-time rendering under high resolutions, 82 FPS at an " + }, + { + "bbox": [ + 45, + 460, + 289, + 676 + ], + "type": "inline_equation", + "content": "800 \\times 800" + }, + { + "bbox": [ + 45, + 460, + 289, + 676 + ], + "type": "text", + "content": " resolution on an RTX 3090 GPU while maintaining comparable or better quality than previous state-of-the-art methods. More demos and code are available at" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 436, + 508, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 436, + 508, + 448 + ], + "spans": [ + { + "bbox": [ + 306, + 436, + 508, + 448 + ], + "type": "text", + "content": "https://guanjunwu.github.io/4dgs/." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 474, + 387, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 474, + 387, + 487 + ], + "spans": [ + { + "bbox": [ + 306, + 474, + 387, + 487 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 495, + 545, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 495, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 495, + 545, + 604 + ], + "type": "text", + "content": "Novel view synthesis (NVS) stands as a critical task in the domain of 3D vision and plays a vital role in many applications, e.g. VR, AR, and movie production. NVS aims at rendering images from any desired viewpoint or timestamp of a scene, usually requiring modeling the scene accurately from several 2D images. Dynamic scenes are quite common in real scenarios, rendering which is important but challenging as complex motions need to be modeled with both spatially and temporally sparse input." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 604, + 545, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 604, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 604, + 545, + 700 + ], + "type": "text", + "content": "NeRF [32] has achieved great success in synthesizing novel view images by representing scenes with implicit functions. The volume rendering techniques [7] are introduced to connect 2D images and 3D scenes. However, the original NeRF method bears big training and rendering costs. Though some NeRF variants [5, 8, 10, 11, 33, 44, 47] reduce the training time from days to minutes, the rendering process still bears a non-negligible latency." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 700, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 700, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 317, + 700, + 545, + 714 + ], + "type": "text", + "content": "Recent 3D Gaussian Splatting (3D-GS) [19] signifi" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 683, + 129, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 683, + 129, + 693 + ], + "spans": [ + { + "bbox": [ + 57, + 683, + 129, + 693 + ], + "type": "text", + "content": "*Equal contributions." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 59, + 693, + 107, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 693, + 107, + 703 + ], + "spans": [ + { + "bbox": [ + 59, + 693, + 107, + 703 + ], + "type": "text", + "content": "†Project Lead." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 59, + 703, + 135, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 703, + 135, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 703, + 135, + 712 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 59, + 703, + 135, + 712 + ], + "type": "text", + "content": " Corresponding author." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20310" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": "cantly boosts the rendering speed to a real-time level by representing the scene as 3D Gaussians. The cumbersome volume rendering in the original NeRF is replaced with efficient differentiable splatting [57], which directly projects 3D Gaussian onto the 2D image plane. 3D-GS not only enjoys real-time rendering speed but also represents the scene more explicitly, making it easier to manipulate the scene representation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 167, + 289, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 167, + 289, + 490 + ], + "spans": [ + { + "bbox": [ + 46, + 167, + 289, + 490 + ], + "type": "text", + "content": "However, 3D-GS focuses on the static scenes. Extending it to dynamic scenes as a 4D representation is a reasonable, important but difficult topic. The key challenge lies in modeling complicated point motions from sparse input. 3D-GS holds a natural geometry prior by representing scenes with point-like Gaussians. One direct and effective extension approach is to construct 3D Gaussians at each timestamp [30] but the storage/memory cost will multiply especially for long input sequences. Our goal is to construct a compact representation while maintaining both training and rendering efficiency, i.e. 4D Gaussian Splatting (4D-GS). To this end, we propose to represent Gaussian motions and shape changes by an efficient Gaussian deformation field network, containing a temporal-spatial structure encoder and an extremely tiny multi-head Gaussian deformation decoder. Only one set of canonical 3D Gaussians is maintained. For each timestamp, the canonical 3D Gaussians will be transformed by the Gaussian deformation field into new positions with new shapes. The transformation process represents both the Gaussian motion and deformation. Note that different from modeling motions of each Gaussian separately [30, 55], the spatial-temporal structure encoder can connect different adjacent 3D Gaussians to predict more accurate motions and shape deformation. Then the deformed 3D Gaussians can be directly splatted for rendering the according-timestamp image. Our contributions can be summarized as follows." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 492, + 287, + 647 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 47, + 492, + 287, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 492, + 287, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 492, + 287, + 538 + ], + "type": "text", + "content": "- An efficient 4D Gaussian splating framework with an efficient Gaussian deformation field is proposed by modeling both Gaussian motion and Gaussian shape changes across time." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 540, + 287, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 540, + 287, + 574 + ], + "spans": [ + { + "bbox": [ + 47, + 540, + 287, + 574 + ], + "type": "text", + "content": "- A multi-resolution encoding method is proposed to connect the nearby 3D Gaussians and build rich 3D Gaussian features by an efficient spatial-temporal structure encoder." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 575, + 287, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 575, + 287, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 575, + 287, + 647 + ], + "type": "text", + "content": "- 4D-GS achieves real-time rendering on dynamic scenes, up to 82 FPS at a resolution of " + }, + { + "bbox": [ + 47, + 575, + 287, + 647 + ], + "type": "inline_equation", + "content": "800 \\times 800" + }, + { + "bbox": [ + 47, + 575, + 287, + 647 + ], + "type": "text", + "content": " for synthetic datasets and 30 FPS at a resolution of " + }, + { + "bbox": [ + 47, + 575, + 287, + 647 + ], + "type": "inline_equation", + "content": "1352 \\times 1014" + }, + { + "bbox": [ + 47, + 575, + 287, + 647 + ], + "type": "text", + "content": " in real datasets, while maintaining comparable or superior performance than previous state-of-the-art (SOTA) methods and shows potential for editing and tracking in 4D scenes." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 657, + 138, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 657, + 138, + 670 + ], + "spans": [ + { + "bbox": [ + 47, + 657, + 138, + 670 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "In this section, we simply review the difference of dynamic NeRFs in Sec. 2.1, then discuss the point clouds-based neural rendering algorithm in Sec. 2.2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 323, + 71, + 413, + 106 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 323, + 71, + 403, + 79 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 71, + 403, + 79 + ], + "spans": [ + { + "bbox": [ + 323, + 71, + 403, + 79 + ], + "type": "text", + "content": "- Original Sampled Points" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 323, + 80, + 406, + 88 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 80, + 406, + 88 + ], + "spans": [ + { + "bbox": [ + 323, + 80, + 406, + 88 + ], + "type": "text", + "content": "- Canonical Mapped Points" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 323, + 89, + 397, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 89, + 397, + 97 + ], + "spans": [ + { + "bbox": [ + 323, + 89, + 397, + 97 + ], + "type": "text", + "content": "The Original Cast Ray" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 323, + 98, + 413, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 98, + 413, + 106 + ], + "spans": [ + { + "bbox": [ + 323, + 98, + 413, + 106 + ], + "type": "text", + "content": "The Canonical Mapped Ray" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 432, + 72, + 527, + 97 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 435, + 72, + 518, + 79 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 435, + 72, + 518, + 79 + ], + "spans": [ + { + "bbox": [ + 435, + 72, + 518, + 79 + ], + "type": "text", + "content": "- Original Sampled Points" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 446, + 80, + 513, + 88 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 446, + 80, + 513, + 88 + ], + "spans": [ + { + "bbox": [ + 446, + 80, + 513, + 88 + ], + "type": "text", + "content": "The Original Cast Ray" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 432, + 89, + 527, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 432, + 89, + 527, + 97 + ], + "spans": [ + { + "bbox": [ + 432, + 89, + 527, + 97 + ], + "type": "text", + "content": "Time Features of the Points" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 325, + 113, + 411, + 158 + ], + "blocks": [ + { + "bbox": [ + 325, + 113, + 411, + 158 + ], + "lines": [ + { + "bbox": [ + 325, + 113, + 411, + 158 + ], + "spans": [ + { + "bbox": [ + 325, + 113, + 411, + 158 + ], + "type": "image", + "image_path": "ef1850bc3b5475d2432be61a873fecf2df6b0c6264f71e71debb0238f3bb80f6.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 162, + 430, + 171 + ], + "lines": [ + { + "bbox": [ + 306, + 162, + 430, + 171 + ], + "spans": [ + { + "bbox": [ + 306, + 162, + 430, + 171 + ], + "type": "text", + "content": "(a) Canonical Mapping Volume Rendering" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 443, + 105, + 530, + 163 + ], + "blocks": [ + { + "bbox": [ + 443, + 105, + 530, + 163 + ], + "lines": [ + { + "bbox": [ + 443, + 105, + 530, + 163 + ], + "spans": [ + { + "bbox": [ + 443, + 105, + 530, + 163 + ], + "type": "image", + "image_path": "9d40cb415fcf5860dbe341a088ea28c52200b20dcaed0dabcd4d4d035f716dc6.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 435, + 163, + 537, + 171 + ], + "lines": [ + { + "bbox": [ + 435, + 163, + 537, + 171 + ], + "spans": [ + { + "bbox": [ + 435, + 163, + 537, + 171 + ], + "type": "text", + "content": "(b) Time-aware Volume Rendering" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 316, + 178, + 411, + 239 + ], + "blocks": [ + { + "bbox": [ + 316, + 178, + 411, + 239 + ], + "lines": [ + { + "bbox": [ + 316, + 178, + 411, + 239 + ], + "spans": [ + { + "bbox": [ + 316, + 178, + 411, + 239 + ], + "type": "image", + "image_path": "b15ec9426dcf64169ad93fcee106d21566e9bd5e5efb974d017d601a668cb17e.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 391, + 239, + 468, + 249 + ], + "lines": [ + { + "bbox": [ + 391, + 239, + 468, + 249 + ], + "spans": [ + { + "bbox": [ + 391, + 239, + 468, + 249 + ], + "type": "text", + "content": "(c) 4D Gaussian Splitting" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 258, + 545, + 346 + ], + "lines": [ + { + "bbox": [ + 304, + 258, + 545, + 346 + ], + "spans": [ + { + "bbox": [ + 304, + 258, + 545, + 346 + ], + "type": "text", + "content": "Figure 2. Illustration of different dynamic scene rendering methods. (a) Points are sampled in the casted ray during volume rendering. The point deformation fields proposed in [8, 39] map the points into a canonical space. (b) Time-aware volume rendering computes the features of each point directly and does not change the rendering path. (c) The Gaussian deformation field converts original 3D Gaussians into another group of 3D Gaussians with a certain timestamp." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "bbox": [ + 426, + 180, + 543, + 215 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 427, + 180, + 509, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 427, + 180, + 509, + 188 + ], + "spans": [ + { + "bbox": [ + 427, + 180, + 509, + 188 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 427, + 180, + 509, + 188 + ], + "type": "text", + "content": " Original 3D Gaussians G" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 427, + 189, + 516, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 427, + 189, + 516, + 197 + ], + "spans": [ + { + "bbox": [ + 427, + 189, + 516, + 197 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 427, + 189, + 516, + 197 + ], + "type": "text", + "content": " Deformed 3D Gaussians " + }, + { + "bbox": [ + 427, + 189, + 516, + 197 + ], + "type": "inline_equation", + "content": "G^{\\prime}" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 426, + 198, + 543, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 426, + 198, + 543, + 207 + ], + "spans": [ + { + "bbox": [ + 426, + 198, + 543, + 207 + ], + "type": "text", + "content": "Gaussian Deformation Field " + }, + { + "bbox": [ + 426, + 198, + 543, + 207 + ], + "type": "inline_equation", + "content": "\\mathrm{F}(G, t_i)" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 426, + 208, + 520, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 426, + 208, + 520, + 215 + ], + "spans": [ + { + "bbox": [ + 426, + 208, + 520, + 215 + ], + "type": "text", + "content": "Gaussian Rasterization Paths" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 359, + 427, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 359, + 427, + 371 + ], + "spans": [ + { + "bbox": [ + 306, + 359, + 427, + 371 + ], + "type": "text", + "content": "2.1. Novel View Synthesis" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 304, + 378, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 378, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 378, + 545, + 713 + ], + "type": "text", + "content": "Novel view synthesis is a important and challenging task in 3D reconstruction. Much approaches are proposed to represent a 3D object and render novel views. Efficient representations such as light fields [4], mesh [6, 15, 24, 46], voxels [16, 18, 23], multi-planes [9] can render high quality image with enough supervisions. NeRF-based approaches [3, 32, 59] demonstrate that implicit radiance fields can effectively learn scene representations and synthesize high-quality novel views. [35, 36, 39] have challenged the static hypothesis, expanding the boundary of novel view synthesis for dynamic scenes. [8] proposes to use an explicit voxel grid to model temporal information, accelerating the learning time for dynamic scenes to half an hour and applied in [17, 29, 56]. The proposed deformation-based neural rendering methods are shown in Fig. 2 (a). Flow-based [13, 25, 29, 48, 61] methods adopting warping algorithm to synthesis novel views by blending nearby frames. [5, 11, 12, 22, 44, 49] represent further advancements in faster dynamic scene learning by adopting decomposed neural voxels. They treat sampled points in each timestamp individually as shown in Fig. 2 (b). [14, 27, 38, 50, 51, 53] are efficient methods to handle multi-view setups. The aforementioned methods though achieve fast training speed, real-time rendering for dynamic scenes is still challenging, especially for monocular input. Our method aims at constructing a highly efficient training and rendering pipeline in Fig. 2 (c), while maintaining the quality, even for sparse inputs." + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20311" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 73, + 545, + 238 + ], + "blocks": [ + { + "bbox": [ + 47, + 73, + 545, + 238 + ], + "lines": [ + { + "bbox": [ + 47, + 73, + 545, + 238 + ], + "spans": [ + { + "bbox": [ + 47, + 73, + 545, + 238 + ], + "type": "image", + "image_path": "79abdf0621f574dc702f76de86f319a1f46faf3cc8d3c4e6ffe3deb822ac0349.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "lines": [ + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "spans": [ + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "type": "text", + "content": "Figure 3. The overall pipeline of our model. Given a group of 3D Gaussians " + }, + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "type": "text", + "content": ", we extract the center coordinate of each 3D Gaussian " + }, + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "type": "text", + "content": " and timestamp " + }, + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "type": "text", + "content": " to compute the voxel feature by querying multi-resolution voxel planes. Then a tiny multi-head Gaussian deformation decoder is used to decode the feature and get the deformed 3D Gaussians " + }, + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "type": "inline_equation", + "content": "\\mathcal{G}'" + }, + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "type": "text", + "content": " at timestamp " + }, + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 247, + 547, + 291 + ], + "type": "text", + "content": ". The deformed Gaussians are then splatted to the rendered image." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 301, + 241, + 314 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 301, + 241, + 314 + ], + "spans": [ + { + "bbox": [ + 47, + 301, + 241, + 314 + ], + "type": "text", + "content": "2.2. Neural Rendering with Point Clouds" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 331, + 288, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 331, + 288, + 462 + ], + "spans": [ + { + "bbox": [ + 46, + 331, + 288, + 462 + ], + "type": "text", + "content": "Effectively representing 3D scenes remains a challenging topic. The community has explored various neural representations [32], e.g. meshes, point clouds [54], voxels [10], and hybrid approaches [33, 47]. Point-cloud-based methods [28, 40, 41, 58] initially target at 3D segmentation and classification. A representative approach for rendering presented in [1, 54] combines point cloud representations with volume rendering, achieving rapid convergence speed even for dynamic novel view synthesis [34, 61]. [20, 21, 42] adopt differential point rendering technique for scene reconstructions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "text", + "content": "Recently, 3D-GS [19] is notable for its pure explicit representation and differential point-based splatting methods, enabling real-time rendering of novel views. Dynamic3DGS [30] models dynamic scenes by tracking the position and variance of each 3D Gaussian at each timestamp " + }, + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "text", + "content": ". An explicit table is utilized to store information about each 3D Gaussian at every timestamp, leading to a linear memory consumption increase, denoted as " + }, + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "O(t\\mathcal{N})" + }, + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "text", + "content": ", in which " + }, + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{N}" + }, + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "text", + "content": " is num of 3D Gaussians. For long-term scene reconstruction, the storage cost will become non-negligible. The memory complexity of our approach only depends on the number of 3D Gaussians and parameters of Gaussians deformation fields network " + }, + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "text", + "content": ", which is denoted as " + }, + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "inline_equation", + "content": "O(\\mathcal{N} + \\mathcal{F})" + }, + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "text", + "content": ". [55] adds a marginal temporal Gaussian distribution into the origin 3D Gaussians, which uplift 3D Gaussians into 4DHowever, it may cause each 3D Gaussian to only focus on their local temporal space. [26] track each 3D Gaussians individually. Our approach also models 3D Gaussian motions but with a compact network, resulting in highly efficient training efficiency and real-time rendering." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 300, + 383, + 314 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 300, + 383, + 314 + ], + "spans": [ + { + "bbox": [ + 306, + 300, + 383, + 314 + ], + "type": "text", + "content": "3. Preliminary" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 321, + 546, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 321, + 546, + 357 + ], + "spans": [ + { + "bbox": [ + 305, + 321, + 546, + 357 + ], + "type": "text", + "content": "In this section, we simply review the representation and rendering process of 3D-GS [19] in Sec. 3.1 and the formula of dynamic NeRFs in Sec. 3.2." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 364, + 434, + 377 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 364, + 434, + 377 + ], + "spans": [ + { + "bbox": [ + 305, + 364, + 434, + 377 + ], + "type": "text", + "content": "3.1. 3D Gaussian Splatting" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 382, + 546, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 382, + 546, + 430 + ], + "spans": [ + { + "bbox": [ + 305, + 382, + 546, + 430 + ], + "type": "text", + "content": "3D Gaussians [19] is an explicit 3D scene representation in the form of point clouds. Each 3D Gaussian is characterized by a covariance matrix " + }, + { + "bbox": [ + 305, + 382, + 546, + 430 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 305, + 382, + 546, + 430 + ], + "type": "text", + "content": " and a center point " + }, + { + "bbox": [ + 305, + 382, + 546, + 430 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 305, + 382, + 546, + 430 + ], + "type": "text", + "content": ", which is referred to as the mean value of the Gaussian:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 377, + 437, + 545, + 453 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 377, + 437, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 377, + 437, + 545, + 453 + ], + "type": "interline_equation", + "content": "G (X) = e ^ {- \\frac {1}{2} \\mathcal {X} ^ {T} \\Sigma^ {- 1} \\mathcal {X}}. \\tag {1}", + "image_path": "5ff4793d6da76c6b93b9280883202b4b1ef342d54eddbfe238bc3d2f2b1ea34e.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 461, + 545, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 461, + 545, + 495 + ], + "spans": [ + { + "bbox": [ + 305, + 461, + 545, + 495 + ], + "type": "text", + "content": "For differentiable optimization, the covariance matrix " + }, + { + "bbox": [ + 305, + 461, + 545, + 495 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 305, + 461, + 545, + 495 + ], + "type": "text", + "content": " can be decomposed into a scaling matrix " + }, + { + "bbox": [ + 305, + 461, + 545, + 495 + ], + "type": "inline_equation", + "content": "\\mathbf{S}" + }, + { + "bbox": [ + 305, + 461, + 545, + 495 + ], + "type": "text", + "content": " and a rotation matrix " + }, + { + "bbox": [ + 305, + 461, + 545, + 495 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 305, + 461, + 545, + 495 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 391, + 496, + 545, + 509 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 496, + 545, + 509 + ], + "spans": [ + { + "bbox": [ + 391, + 496, + 545, + 509 + ], + "type": "interline_equation", + "content": "\\Sigma = \\mathbf {R} \\mathbf {S} \\mathbf {S} ^ {T} \\mathbf {R} ^ {T}. \\tag {2}", + "image_path": "a8c234fdbb97e93ef9677d2896b8856f87d6755635d92fc23058b5cabbe0510c.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 514, + 545, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 514, + 545, + 586 + ], + "spans": [ + { + "bbox": [ + 305, + 514, + 545, + 586 + ], + "type": "text", + "content": "When rendering novel views, differential splatting [57] is employed for the 3D Gaussians within the camera planes. As introduced by [62], using a viewing transform matrix " + }, + { + "bbox": [ + 305, + 514, + 545, + 586 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 305, + 514, + 545, + 586 + ], + "type": "text", + "content": " and the Jacobian matrix " + }, + { + "bbox": [ + 305, + 514, + 545, + 586 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 305, + 514, + 545, + 586 + ], + "type": "text", + "content": " of the affine approximation of the projective transformation, the covariance matrix " + }, + { + "bbox": [ + 305, + 514, + 545, + 586 + ], + "type": "inline_equation", + "content": "\\Sigma'" + }, + { + "bbox": [ + 305, + 514, + 545, + 586 + ], + "type": "text", + "content": " in camera coordinates can be computed as" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 383, + 594, + 545, + 609 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 383, + 594, + 545, + 609 + ], + "spans": [ + { + "bbox": [ + 383, + 594, + 545, + 609 + ], + "type": "interline_equation", + "content": "\\Sigma^ {\\prime} = J W \\Sigma W ^ {T} J ^ {T}. \\tag {3}", + "image_path": "a0bf15724b04e460711b391cdde5532d9f6d7d7149ff4ada338290666e64f72a.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "content": "In summary, each 3D Gaussian is characterized by the following attributes: position " + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{X} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "content": ", color defined by spherical harmonic (SH) coefficients " + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{C} \\in \\mathbb{R}^k" + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "content": " (where " + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "content": " represents nums of SH functions), opacity " + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\alpha \\in \\mathbb{R}" + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "content": ", rotation factor " + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "inline_equation", + "content": "r \\in \\mathbb{R}^4" + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "content": ", and scaling factor " + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "inline_equation", + "content": "s \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "content": ". Specifically, for each pixel, the color and opacity of all the Gaussians are computed using the Gaussian's representation Eq. 1. The blending of " + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "content": " ordered points that overlap the pixel is given by" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20312" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 100, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 100, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 100, + 83 + ], + "type": "text", + "content": "the formula:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 91, + 287, + 125 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 91, + 287, + 125 + ], + "spans": [ + { + "bbox": [ + 111, + 91, + 287, + 125 + ], + "type": "interline_equation", + "content": "C = \\sum_ {i \\in N} c _ {i} \\alpha_ {i} \\prod_ {j = 1} ^ {i - 1} (1 - \\alpha_ {i}). \\tag {4}", + "image_path": "d1f0395df699d768168bc454cb4bb1e6a7b17356eddab4b0c130f87d57ac7c77.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 133, + 287, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 133, + 287, + 181 + ], + "spans": [ + { + "bbox": [ + 47, + 133, + 287, + 181 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 47, + 133, + 287, + 181 + ], + "type": "inline_equation", + "content": "c_{i}" + }, + { + "bbox": [ + 47, + 133, + 287, + 181 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 133, + 287, + 181 + ], + "type": "inline_equation", + "content": "\\alpha_{i}" + }, + { + "bbox": [ + 47, + 133, + 287, + 181 + ], + "type": "text", + "content": " represents the density and color of this point computed by a 3D Gaussian " + }, + { + "bbox": [ + 47, + 133, + 287, + 181 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 47, + 133, + 287, + 181 + ], + "type": "text", + "content": " with covariance " + }, + { + "bbox": [ + 47, + 133, + 287, + 181 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 47, + 133, + 287, + 181 + ], + "type": "text", + "content": " multiplied by an estimizable per-point opacity and SH color coefficients." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 189, + 263, + 202 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 189, + 263, + 202 + ], + "spans": [ + { + "bbox": [ + 47, + 189, + 263, + 202 + ], + "type": "text", + "content": "3.2. Dynamic NeRFs with Deformation Fields" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 208, + 274, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 208, + 274, + 220 + ], + "spans": [ + { + "bbox": [ + 47, + 208, + 274, + 220 + ], + "type": "text", + "content": "All the dynamic NeRF algorithms can be formulated as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 123, + 230, + 287, + 243 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 230, + 287, + 243 + ], + "spans": [ + { + "bbox": [ + 123, + 230, + 287, + 243 + ], + "type": "interline_equation", + "content": "c, \\sigma = \\mathcal {M} (\\mathbf {x}, d, t, \\lambda), \\tag {5}", + "image_path": "380f62300ef30ffe87bc2aa1ab652e1e1fe69fc8c587f9842728ac43d06acd69.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "spans": [ + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "text", + "content": " is a mapping that maps 8D space " + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "inline_equation", + "content": "(\\mathbf{x},d,t,\\lambda)" + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "text", + "content": " to 4D space " + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "inline_equation", + "content": "(c,\\sigma)" + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "text", + "content": ". Where " + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "text", + "content": " reveals to the spatial point, " + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "text", + "content": " is the optional input as used to build topological and appearance changes in [36], and " + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 47, + 252, + 287, + 300 + ], + "type": "text", + "content": " stands for view-dependency." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 300, + 287, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 300, + 287, + 372 + ], + "spans": [ + { + "bbox": [ + 47, + 300, + 287, + 372 + ], + "type": "text", + "content": "As is shown in Fig. 2 (a), all the deformation NeRF based method which estimate the world-to-canonical mapping by a deformation network " + }, + { + "bbox": [ + 47, + 300, + 287, + 372 + ], + "type": "inline_equation", + "content": "\\phi_t:(\\mathbf{x},t)\\to \\Delta \\mathbf{x}" + }, + { + "bbox": [ + 47, + 300, + 287, + 372 + ], + "type": "text", + "content": ". Then a network is introduced to compute volume density and view-dependent RGB color from each ray. The formula for rendering can be expressed as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 382, + 287, + 395 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 382, + 287, + 395 + ], + "spans": [ + { + "bbox": [ + 107, + 382, + 287, + 395 + ], + "type": "interline_equation", + "content": "c, \\sigma = \\operatorname {N e R F} (\\mathbf {x} + \\Delta \\mathbf {x}, d, \\lambda), \\tag {6}", + "image_path": "6315826dcc7fd435504f025181ed3bd33179993cb472056f00b98b9fbe69d5c8.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 404, + 287, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 404, + 287, + 440 + ], + "spans": [ + { + "bbox": [ + 47, + 404, + 287, + 440 + ], + "type": "text", + "content": "where 'NeRF' stands for vanilla NeRF pipeline, " + }, + { + "bbox": [ + 47, + 404, + 287, + 440 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 47, + 404, + 287, + 440 + ], + "type": "text", + "content": " is a frame-dependent code to model the topological and appearance changes [31, 36]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 441, + 287, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 441, + 287, + 512 + ], + "spans": [ + { + "bbox": [ + 47, + 441, + 287, + 512 + ], + "type": "text", + "content": "However, our 4D Gaussian splatting framework presents a novel rendering technique. We successfully compute the canonical-to-world mapping by a Gaussian deformation field network " + }, + { + "bbox": [ + 47, + 441, + 287, + 512 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 47, + 441, + 287, + 512 + ], + "type": "text", + "content": " at the time " + }, + { + "bbox": [ + 47, + 441, + 287, + 512 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 441, + 287, + 512 + ], + "type": "text", + "content": " directly and differential splating [19] is followed, which enables the skill of computing backward flow and tracking for 3D Gaussians." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 523, + 103, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 523, + 103, + 534 + ], + "spans": [ + { + "bbox": [ + 47, + 523, + 103, + 534 + ], + "type": "text", + "content": "4. Method" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 543, + 287, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 543, + 287, + 591 + ], + "spans": [ + { + "bbox": [ + 47, + 543, + 287, + 591 + ], + "type": "text", + "content": "Sec. 4.1 introduces the overall 4D Gaussian Splitting framework. Then, the Gaussian deformation field is proposed in Sec. 4.2. Finally, we describe the optimization process in Sec. 4.3." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 599, + 233, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 599, + 233, + 612 + ], + "spans": [ + { + "bbox": [ + 47, + 599, + 233, + 612 + ], + "type": "text", + "content": "4.1. 4D Gaussian Splatting Framework" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "text", + "content": "As shown in Fig. 3, given a view matrix " + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "inline_equation", + "content": "M = [R,T]" + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "text", + "content": ", times-amp " + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "text", + "content": ", our 4D Gaussian splatting framework includes 3D Gaussians " + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "text", + "content": " and Gaussian deformation field network " + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "text", + "content": ". Then a novel-view image " + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\hat{I}" + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "text", + "content": " is rendered by differential splatting [57] " + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "text", + "content": " following " + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\hat{I} = \\mathcal{S}(M,\\mathcal{G}^{\\prime})" + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\mathcal{G}^{\\prime} = \\Delta \\mathcal{G} + \\mathcal{G}" + }, + { + "bbox": [ + 47, + 617, + 287, + 677 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": "Specifically, the deformation of 3D Gaussians " + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\Delta \\mathcal{G}" + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": " is introduced by the Gaussian deformation field network " + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\Delta \\mathcal{G} = \\mathcal{F}(\\mathcal{G},t)" + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": ", in which the spatial-temporal structure encoder " + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 307, + 69, + 542, + 215 + ], + "blocks": [ + { + "bbox": [ + 307, + 69, + 542, + 215 + ], + "lines": [ + { + "bbox": [ + 307, + 69, + 542, + 215 + ], + "spans": [ + { + "bbox": [ + 307, + 69, + 542, + 215 + ], + "type": "image", + "image_path": "3ec68fe898331e76563915399716aa1fd62987de6864ea790d836aecf93278fa.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 223, + 545, + 256 + ], + "lines": [ + { + "bbox": [ + 305, + 223, + 545, + 256 + ], + "spans": [ + { + "bbox": [ + 305, + 223, + 545, + 256 + ], + "type": "text", + "content": "Figure 4. Illustration of the optimization process. With static 3D Gaussian initialization, our model can learn high-quality 3D Gaussians of the motion part." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 266, + 545, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 266, + 545, + 326 + ], + "spans": [ + { + "bbox": [ + 305, + 266, + 545, + 326 + ], + "type": "text", + "content": "can encode both the temporal and spatial features of 3D Gaussians " + }, + { + "bbox": [ + 305, + 266, + 545, + 326 + ], + "type": "inline_equation", + "content": "f_{d} = \\mathcal{H}(\\mathcal{G},t)" + }, + { + "bbox": [ + 305, + 266, + 545, + 326 + ], + "type": "text", + "content": ", and the multi-head Gaussian deformation decoder " + }, + { + "bbox": [ + 305, + 266, + 545, + 326 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 305, + 266, + 545, + 326 + ], + "type": "text", + "content": " can decode the features and predict each 3D Gaussian's deformation " + }, + { + "bbox": [ + 305, + 266, + 545, + 326 + ], + "type": "inline_equation", + "content": "\\Delta \\mathcal{G} = \\mathcal{D}(f)" + }, + { + "bbox": [ + 305, + 266, + 545, + 326 + ], + "type": "text", + "content": ", then the deformed 3D Gaussians " + }, + { + "bbox": [ + 305, + 266, + 545, + 326 + ], + "type": "inline_equation", + "content": "\\mathcal{G}'" + }, + { + "bbox": [ + 305, + 266, + 545, + 326 + ], + "type": "text", + "content": " can be introduced." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 327, + 545, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 327, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 305, + 327, + 545, + 387 + ], + "type": "text", + "content": "The rendering process of our 4D Gaussian Splatting is depicted in Fig. 2 (c). Our 4D Gaussian splatting converts the original 3D Gaussians " + }, + { + "bbox": [ + 305, + 327, + 545, + 387 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 305, + 327, + 545, + 387 + ], + "type": "text", + "content": " into another group of 3D Gaussians " + }, + { + "bbox": [ + 305, + 327, + 545, + 387 + ], + "type": "inline_equation", + "content": "\\mathcal{G}'" + }, + { + "bbox": [ + 305, + 327, + 545, + 387 + ], + "type": "text", + "content": " given a timestamp " + }, + { + "bbox": [ + 305, + 327, + 545, + 387 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 305, + 327, + 545, + 387 + ], + "type": "text", + "content": ", maintaining the effectiveness of the differential splatting as referred in [57]." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 393, + 504, + 405 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 393, + 504, + 405 + ], + "spans": [ + { + "bbox": [ + 306, + 393, + 504, + 405 + ], + "type": "text", + "content": "4.2. Gaussian Deformation Field Network" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 411, + 545, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 411, + 545, + 459 + ], + "spans": [ + { + "bbox": [ + 305, + 411, + 545, + 459 + ], + "type": "text", + "content": "The network to learn the Gaussian deformation field includes an efficient spatial-temporal structure encoder " + }, + { + "bbox": [ + 305, + 411, + 545, + 459 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 305, + 411, + 545, + 459 + ], + "type": "text", + "content": " and a Gaussian deformation decoder " + }, + { + "bbox": [ + 305, + 411, + 545, + 459 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 305, + 411, + 545, + 459 + ], + "type": "text", + "content": " for predicting the deformation of each 3D Gaussian." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 474, + 545, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 474, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 305, + 474, + 545, + 605 + ], + "type": "text", + "content": "Spatial-Temporal Structure Encoder. Nearby 3D Gaussians always share similar spatial and temporal information. To model 3D Gaussians' features effectively, we introduce an efficient spatial-temporal structure encoder " + }, + { + "bbox": [ + 305, + 474, + 545, + 605 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 305, + 474, + 545, + 605 + ], + "type": "text", + "content": " including a multi-resolution HexPlane " + }, + { + "bbox": [ + 305, + 474, + 545, + 605 + ], + "type": "inline_equation", + "content": "R(i,j)" + }, + { + "bbox": [ + 305, + 474, + 545, + 605 + ], + "type": "text", + "content": " and a tiny MLP " + }, + { + "bbox": [ + 305, + 474, + 545, + 605 + ], + "type": "inline_equation", + "content": "\\phi_d" + }, + { + "bbox": [ + 305, + 474, + 545, + 605 + ], + "type": "text", + "content": " inspired by [5, 8, 11, 44]. While the vanilla 4D neural voxel is memory-consuming, we adopt a 4D K-Planes [11] module to decompose the 4D neural voxel into 6 planes. All 3D Gaussians in a certain area can be contained in the bounding plane voxels and Gaussian's deformation can also be encoded in nearby temporal voxels." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "text", + "content": "Specifically, the spatial-temporal structure encoder " + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "text", + "content": " contains 6 multi-resolution plane modules " + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "inline_equation", + "content": "R_{l}(i,j)" + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "text", + "content": " and a tiny MLP " + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\phi_d" + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "text", + "content": ", i.e. " + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{H}(\\mathcal{G},t) = \\{R_l(i,j),\\phi_d|(i,j)\\in" + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\{(x,y),(x,z),(y,z),(x,t),(y,t),(z,t)\\} ,l\\in \\{1,2\\} \\}" + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "text", + "content": ". The position " + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mu = (x,y,z)" + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "text", + "content": " is the mean value of 3D Gaussians " + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "text", + "content": ". Each voxel module is defined by " + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "inline_equation", + "content": "R(i,j)\\in \\mathbb{R}^{h\\times lN_i\\times lN_j}" + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "text", + "content": " stands for the hidden dim of features, " + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "text", + "content": " denotes the basic resolution of voxel grid and " + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 305, + 606, + 545, + 713 + ], + "type": "text", + "content": " equals to the upsampling scale. This entails encoding information of the 3D" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20313" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 68, + 548, + 233 + ], + "blocks": [ + { + "bbox": [ + 47, + 68, + 548, + 233 + ], + "lines": [ + { + "bbox": [ + 47, + 68, + 548, + 233 + ], + "spans": [ + { + "bbox": [ + 47, + 68, + 548, + 233 + ], + "type": "image", + "image_path": "589f60be5cc17db5ac9d7db98e30f7ded964ca1f15788b98b84e05478d29dd59.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 239, + 547, + 264 + ], + "lines": [ + { + "bbox": [ + 46, + 239, + 547, + 264 + ], + "spans": [ + { + "bbox": [ + 46, + 239, + 547, + 264 + ], + "type": "text", + "content": "Figure 5. Visualization of synthesized datasets compared with other models [5, 8, 11, 17, 19, 49]. The rendering results of [11] are displayed with a default green background. We have adopted their rendering settings." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 272, + 288, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 272, + 288, + 308 + ], + "spans": [ + { + "bbox": [ + 46, + 272, + 288, + 308 + ], + "type": "text", + "content": "Gaussians within the 62D voxel planes while considering temporal information. The formula for computing separate voxel features is as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 316, + 287, + 343 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 316, + 287, + 343 + ], + "spans": [ + { + "bbox": [ + 69, + 316, + 287, + 343 + ], + "type": "interline_equation", + "content": "f _ {h} = \\bigcup_ {l} \\prod \\operatorname {i n t e r p} \\left(R _ {l} (i, j)\\right), \\tag {7}", + "image_path": "4282518762933e41986a10cb94c69cfa499ebb2d8b07ca00441dafa5278a833a.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 60, + 344, + 263, + 359 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 344, + 263, + 359 + ], + "spans": [ + { + "bbox": [ + 60, + 344, + 263, + 359 + ], + "type": "interline_equation", + "content": "(i, j) \\in \\{(x, y), (x, z), (y, z), (x, t), (y, t), (z, t) \\}.", + "image_path": "101337f97538b082e30d9822008b3a0c2b48bf430bb7449978f54ca0ba8eed5e.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 368, + 288, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 368, + 288, + 430 + ], + "spans": [ + { + "bbox": [ + 46, + 368, + 288, + 430 + ], + "type": "inline_equation", + "content": "f_{h} \\in \\mathbb{R}^{h * l}" + }, + { + "bbox": [ + 46, + 368, + 288, + 430 + ], + "type": "text", + "content": " is the feature of neural voxels. 'interp' denotes the bilinear interpolation for querying the voxel features located at 4 vertices of the grid. The discussion of the production process is similar to [11]. Then a tiny MLP " + }, + { + "bbox": [ + 46, + 368, + 288, + 430 + ], + "type": "inline_equation", + "content": "\\phi_{d}" + }, + { + "bbox": [ + 46, + 368, + 288, + 430 + ], + "type": "text", + "content": " merges all the features by " + }, + { + "bbox": [ + 46, + 368, + 288, + 430 + ], + "type": "inline_equation", + "content": "f_{d} = \\phi_{d}(f_{h})" + }, + { + "bbox": [ + 46, + 368, + 288, + 430 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "spans": [ + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "type": "text", + "content": "Multi-head Gaussian Deformation Decoder. When all the features of 3D Gaussians are encoded, we can compute any desired variable with a multi-head Gaussian deformation decoder " + }, + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{\\phi_x,\\phi_r,\\phi_s\\}" + }, + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "type": "text", + "content": ". Separate MLPs are employed to compute the deformation of position " + }, + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "type": "inline_equation", + "content": "\\Delta \\mathcal{X} = \\phi_{x}(f_{d})" + }, + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "type": "text", + "content": ", rotation " + }, + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "type": "inline_equation", + "content": "\\Delta r = \\phi_r(f_d)" + }, + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "type": "text", + "content": ", and scaling " + }, + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "type": "inline_equation", + "content": "\\Delta s = \\phi_s(f_d)" + }, + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "type": "text", + "content": ". Then, the deformed feature " + }, + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "type": "inline_equation", + "content": "(\\mathcal{X}',r',s')" + }, + { + "bbox": [ + 46, + 446, + 287, + 531 + ], + "type": "text", + "content": " can be addressed as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 79, + 541, + 287, + 555 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 541, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 79, + 541, + 287, + 555 + ], + "type": "interline_equation", + "content": "\\left(\\mathcal {X} ^ {\\prime}, r ^ {\\prime}, s ^ {\\prime}\\right) = \\left(\\mathcal {X} + \\Delta \\mathcal {X}, r + \\Delta r, s + \\Delta s\\right). \\tag {8}", + "image_path": "a3f1507968bd109b66f81dea289cceae9df3a96ac130e1e6772f109dcd525c9d.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 565, + 287, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 565, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 47, + 565, + 287, + 590 + ], + "type": "text", + "content": "Finally, we obtain the deformed 3D Gaussians " + }, + { + "bbox": [ + 47, + 565, + 287, + 590 + ], + "type": "inline_equation", + "content": "\\mathcal{G}' = \\{\\mathcal{X}', s', r', \\sigma, \\mathcal{C}\\}" + }, + { + "bbox": [ + 47, + 565, + 287, + 590 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 598, + 133, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 598, + 133, + 611 + ], + "spans": [ + { + "bbox": [ + 47, + 598, + 133, + 611 + ], + "type": "text", + "content": "4.3. Optimization" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": "3D Gaussian Initialization. [19] shows that 3D Gaussians can be well-trained with structure from motion (SfM) [43] points initialization. Similarly, 4D Gaussians should also be fine-tuned in proper 3D Gaussian initialization. We optimize 3D Gaussians at initial 3000 iterations for warm-up and then render images with 3D Gaussians " + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\hat{I} = S(M, \\mathcal{G})" + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": " instead of 4D Gaussians " + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\hat{I} = S(M, \\mathcal{G}')" + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": ". The illustration of the optimization process is shown in Fig. 4." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 272, + 546, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 272, + 546, + 321 + ], + "spans": [ + { + "bbox": [ + 305, + 272, + 546, + 321 + ], + "type": "text", + "content": "Loss Function. Similar to other reconstruction methods [8, 19, 39], we use the L1 color loss to supervise the training process. A grid-based total-variational loss [5, 8, 11, 47] " + }, + { + "bbox": [ + 305, + 272, + 546, + 321 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{tv}" + }, + { + "bbox": [ + 305, + 272, + 546, + 321 + ], + "type": "text", + "content": " is also applied." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 384, + 329, + 546, + 344 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 329, + 546, + 344 + ], + "spans": [ + { + "bbox": [ + 384, + 329, + 546, + 344 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = | \\hat {I} - I | + \\mathcal {L} _ {t v}. \\tag {9}", + "image_path": "39bf0509cdaac913a9d2a4d0b804c6fb43eddd4c160c9d9cdaab9f18e1930b5e.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 348, + 383, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 348, + 383, + 363 + ], + "spans": [ + { + "bbox": [ + 305, + 348, + 383, + 363 + ], + "type": "text", + "content": "5. Experiment" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 369, + 546, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 369, + 546, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 369, + 546, + 464 + ], + "type": "text", + "content": "In this section, we mainly introduce the hyperparameters and datasets of our settings in Sec. 5.1 and the results between different datasets will be compared with [2, 5, 8, 11, 19, 27, 45, 49, 50] in Sec. 5.2. Then, ablation studies are proposed to prove the effectiveness of our approaches in Sec. 5.3 and more discussion about 4D-GS in Sec. 5.4. Finally, we discuss the limitation of our proposed 4D-GS in Sec. 5.5." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 472, + 433, + 486 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 472, + 433, + 486 + ], + "spans": [ + { + "bbox": [ + 305, + 472, + 433, + 486 + ], + "type": "text", + "content": "5.1. Experimental Settings" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 491, + 546, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 491, + 546, + 552 + ], + "spans": [ + { + "bbox": [ + 304, + 491, + 546, + 552 + ], + "type": "text", + "content": "Our implementation is primarily based on the PyTorch [37] framework and tested in a single RTX 3090 GPU, and we've fine-tuned our optimization parameters by the configuration outlined in the 3D-GS [19]. More hyperparameters will be shown in the appendix." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 566, + 546, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 566, + 546, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 566, + 546, + 651 + ], + "type": "text", + "content": "Synthetic Dataset. We primarily assess the performance of our model using synthetic datasets, as introduced by D-NeRF [39]. These datasets are designed for monocular settings, although it's worth noting that the camera poses for each timestamp are close to randomly generated. Each scene within these datasets contains dynamic frames, ranging from 50 to 200 in number." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": "Real-world Datasets. We utilize datasets provided by HyperNeRF [36] and Neu3D's [22] as benchmark datasets to evaluate the performance of our model in real-world scenarios. The Nerfies dataset is captured using one or two" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20314" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 68, + 545, + 244 + ], + "blocks": [ + { + "bbox": [ + 47, + 68, + 545, + 244 + ], + "lines": [ + { + "bbox": [ + 47, + 68, + 545, + 244 + ], + "spans": [ + { + "bbox": [ + 47, + 68, + 545, + 244 + ], + "type": "image", + "image_path": "2d0635783a41ee2783b754c9eaae16cb23001622ef636a0efded0f08bd6a891f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 253, + 546, + 265 + ], + "lines": [ + { + "bbox": [ + 46, + 253, + 546, + 265 + ], + "spans": [ + { + "bbox": [ + 46, + 253, + 546, + 265 + ], + "type": "text", + "content": "Figure 6. Visualization of the HyperNeRF [36] datasets compared with other methods [8, 17, 19, 36]. 'GT' stands for ground truth images." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 121, + 308, + 474, + 418 + ], + "blocks": [ + { + "bbox": [ + 46, + 276, + 546, + 300 + ], + "lines": [ + { + "bbox": [ + 46, + 276, + 546, + 300 + ], + "spans": [ + { + "bbox": [ + 46, + 276, + 546, + 300 + ], + "type": "text", + "content": "Table 1. Quantitative results on the synthesis dataset. The best and the second best results are denoted by pink and yellow. The rendering resolution is set to " + }, + { + "bbox": [ + 46, + 276, + 546, + 300 + ], + "type": "inline_equation", + "content": "800 \\times 800" + }, + { + "bbox": [ + 46, + 276, + 546, + 300 + ], + "type": "text", + "content": ". \"Time\" in the table stands for training times." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 121, + 308, + 474, + 418 + ], + "lines": [ + { + "bbox": [ + 121, + 308, + 474, + 418 + ], + "spans": [ + { + "bbox": [ + 121, + 308, + 474, + 418 + ], + "type": "table", + "html": "
ModelPSNR(dB)↑SSIM↑LPIPS↓Time↓FPS ↑Storage (MB)↓
TiNeuVox-B [8]32.670.970.0428 mins1.548
KPlanes [11]31.610.97-52 mins0.97418
HexPlane-Slim [5]31.040.970.0411m 30s2.538
3D-GS [19]23.190.930.0810 mins17010
FFDNeRF [17]32.680.970.04-< 1440
MSTH [49]31.340.980.026 mins--
V4D [12]33.720.980.026.9 hours2.08377
Ours34.050.980.028 mins8218
", + "image_path": "6182eeb9a23f23372907884edb109992e044e885d60e986ff087ff139ecd01c2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 437, + 287, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 437, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 46, + 437, + 287, + 510 + ], + "type": "text", + "content": "cameras, following straightforward camera motion, while the Neu3D's dataset is captured using 15 to 20 static cameras, involving extended periods and intricate camera motions. We use the points computed by SfM [43] from the first frame of each video in Neu3D's dataset and 200 frames randomly selected in HyperNeRF's." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 515, + 105, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 515, + 105, + 528 + ], + "spans": [ + { + "bbox": [ + 47, + 515, + 105, + 528 + ], + "type": "text", + "content": "5.2. Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 534, + 287, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 534, + 287, + 617 + ], + "spans": [ + { + "bbox": [ + 46, + 534, + 287, + 617 + ], + "type": "text", + "content": "We primarily assess our experimental results using various metrics, encompassing peak-signal-to-noise ratio (PSNR), perceptual quality measure LPIPS [60], structural similarity index (SSIM) [52] and its extensions including structural dissimilarity index measure (DSSIM), multiscale structural similarity index (MS-SSIM), FPS, training times and Storage." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "content": "To assess the quality of novel view synthesis, we conducted benchmarking against several state-of-the-art methods in the field, including [5, 8, 11, 12, 17, 19, 27, 49]. The results are summarized in Tab. 1. While current dynamic hybrid representations can produce high-quality results, they often come with the drawback of rendering speed. The lack of modeling dynamic motion part makes [19] fail to reconstruct dynamic scenes. In contrast, our method en" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 437, + 546, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 546, + 484 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 546, + 484 + ], + "type": "text", + "content": "joys both the highest rendering quality within the synthesis dataset and exceptionally fast rendering speeds while keeping extremely low storage consumption and convergence time." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 486, + 547, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 486, + 547, + 630 + ], + "spans": [ + { + "bbox": [ + 304, + 486, + 547, + 630 + ], + "type": "text", + "content": "Additionally, the results obtained from real-world datasets are presented in Tab. 2 and Tab. 3. It becomes apparent that some NeRFs [2, 5, 45] suffer from slow convergence speed, and the other grid-based NeRF methods [5, 8, 11, 49] encounter difficulties when attempting to capture intricate object details. In stark contrast, our methods research comparable rendering quality, fast convergence, and excel in free-view rendering speed in indoor cases. Though [27] addresses the high quality in comparison to ours, the need for multi-cam setups makes it hard to model monocular scenes and other methods also limit free-view rendering speed and storage." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 634, + 400, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 634, + 400, + 647 + ], + "spans": [ + { + "bbox": [ + 306, + 634, + 400, + 647 + ], + "type": "text", + "content": "5.3. Ablation Study" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 653, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 547, + 715 + ], + "type": "text", + "content": "Spatial-Temporal Structure Encoder. The explicit Hex-Plane encoder " + }, + { + "bbox": [ + 304, + 653, + 547, + 715 + ], + "type": "inline_equation", + "content": "R_{l}(i,j)" + }, + { + "bbox": [ + 304, + 653, + 547, + 715 + ], + "type": "text", + "content": " possesses the capacity to retain 3D Gaussians' spatial and temporal information, which can reduce storage consumption in comparison with purely explicit methods [30]. Discarding this module, we observe" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20315" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 132, + 91, + 460, + 192 + ], + "blocks": [ + { + "bbox": [ + 108, + 70, + 485, + 83 + ], + "lines": [ + { + "bbox": [ + 108, + 70, + 485, + 83 + ], + "spans": [ + { + "bbox": [ + 108, + 70, + 485, + 83 + ], + "type": "text", + "content": "Table 2. Quantitative results on HyperNeRF's [36] vrig dataset. Rendering resolution is set to " + }, + { + "bbox": [ + 108, + 70, + 485, + 83 + ], + "type": "inline_equation", + "content": "{960} \\times {540}" + }, + { + "bbox": [ + 108, + 70, + 485, + 83 + ], + "type": "text", + "content": " ." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 132, + 91, + 460, + 192 + ], + "lines": [ + { + "bbox": [ + 132, + 91, + 460, + 192 + ], + "spans": [ + { + "bbox": [ + 132, + 91, + 460, + 192 + ], + "type": "table", + "html": "
ModelPSNR(dB)↑MS-SSIM↑Times↓FPS↑Storage(MB)↓
Nerfies [35]22.20.803~ hours< 1-
HyperNeRF [36]22.40.81432 hours< 1-
TiNeuVox-B [8]24.30.83630 mins148
3D-GS [19]19.70.68040 mins5552
FFDNeRF [17]24.20.842-0.05440
V4D [12]24.80.8325.5 hours0.29377
Ours25.20.84530 mins3461
", + "image_path": "930d62bc2e1de7987c899d965fc13bf3358595be102923045f2b7fc51d2b487f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 222, + 491, + 319 + ], + "blocks": [ + { + "bbox": [ + 115, + 202, + 477, + 214 + ], + "lines": [ + { + "bbox": [ + 115, + 202, + 477, + 214 + ], + "spans": [ + { + "bbox": [ + 115, + 202, + 477, + 214 + ], + "type": "text", + "content": "Table 3. Quantitative results on the Neu3D's [22] dataset, rendering resolution is set to " + }, + { + "bbox": [ + 115, + 202, + 477, + 214 + ], + "type": "inline_equation", + "content": "1352 \\times 1014" + }, + { + "bbox": [ + 115, + 202, + 477, + 214 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 222, + 491, + 319 + ], + "lines": [ + { + "bbox": [ + 106, + 222, + 491, + 319 + ], + "spans": [ + { + "bbox": [ + 106, + 222, + 491, + 319 + ], + "type": "table", + "html": "
ModelPSNR(dB)↑D-SSIM↓LPIPS↓Time ↓FPS↑Storage (MB)↓
NeRFPlayer [45]30.690.0340.1116 hours0.045-
HyperReel [2]31.100.0360.0969 hours2.0360
HexPlane-all* [5]31.700.0140.07512 hours0.2250
KPlanes [11]31.63--1.8 hours0.3309
Im4D [27]32.58-0.20828 mins~593
MSTH [49]32.370.0150.05620 mins2(15‡)135
Ours31.150.0160.04940 mins3090
", + "image_path": "ab4efbced946b9a4130d7b209ac4d6acfa3926315a16071e28e78ffb92ec8787.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 62, + 320, + 378, + 329 + ], + "lines": [ + { + "bbox": [ + 62, + 320, + 378, + 329 + ], + "spans": [ + { + "bbox": [ + 62, + 320, + 378, + 329 + ], + "type": "text", + "content": "*: The metrics of the model are tested without \"coffee martini\" and resolution is set to " + }, + { + "bbox": [ + 62, + 320, + 378, + 329 + ], + "type": "inline_equation", + "content": "1024 \\times 768" + }, + { + "bbox": [ + 62, + 320, + 378, + 329 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 62, + 329, + 216, + 339 + ], + "lines": [ + { + "bbox": [ + 62, + 329, + 216, + 339 + ], + "spans": [ + { + "bbox": [ + 62, + 329, + 216, + 339 + ], + "type": "text", + "content": ": The FPS is tested with fixed-view rendering." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 359, + 287, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 359, + 287, + 408 + ], + "spans": [ + { + "bbox": [ + 46, + 359, + 287, + 408 + ], + "type": "text", + "content": "that using only a shallow MLP " + }, + { + "bbox": [ + 46, + 359, + 287, + 408 + ], + "type": "inline_equation", + "content": "\\phi_d" + }, + { + "bbox": [ + 46, + 359, + 287, + 408 + ], + "type": "text", + "content": " falls short in modeling complex deformations across various settings. Tab. 4 demonstrates that, while the model incurs minimal memory costs, it does come at the expense of rendering quality." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 417, + 287, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 417, + 287, + 583 + ], + "spans": [ + { + "bbox": [ + 46, + 417, + 287, + 583 + ], + "type": "text", + "content": "Gaussian Deformation Decoder. Our proposed Gaussian deformation decoder " + }, + { + "bbox": [ + 46, + 417, + 287, + 583 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 46, + 417, + 287, + 583 + ], + "type": "text", + "content": " decodes the features from the spatial-temporal structure encoder " + }, + { + "bbox": [ + 46, + 417, + 287, + 583 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 46, + 417, + 287, + 583 + ], + "type": "text", + "content": ". All the changes in 3D Gaussians can be explained by separate MLPs " + }, + { + "bbox": [ + 46, + 417, + 287, + 583 + ], + "type": "inline_equation", + "content": "\\{\\phi_x, \\phi_r, \\phi_s\\}" + }, + { + "bbox": [ + 46, + 417, + 287, + 583 + ], + "type": "text", + "content": ". As is shown in Tab. 4, 4D Gaussians cannot fit dynamic scenes well without modeling 3D Gaussian motion. Meanwhile, the movement of human body joints is typically manifested as stretching and twisting of surface details in a macroscopic view. If one aims to accurately model these movements, the size and shape of 3D Gaussians should also be adjusted accordingly. Otherwise, there may be underfitting of details during excessive stretching, or an inability to correctly simulate the movement of objects at a microscopic level." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": "3D Gaussian Initialization. In some cases without SfM [43] points initialization, training 4D-GS directly may cause difficulty in convergence. Optimizing 3D Gaussians for warm-up enjoys: (a) making some 3D Gaussians stay in the dynamic part, which releases the pressure of large deformation learning by 4D Gaussians as shown in Fig. 4. (b) learning proper 3D Gaussians " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " and suggesting deformation fields paying more attention to the dynamic part. (c) avoiding numeric errors in optimizing the Gaussian deformation network " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " and keeping the training process stable." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 319, + 358, + 533, + 526 + ], + "blocks": [ + { + "bbox": [ + 319, + 358, + 533, + 526 + ], + "lines": [ + { + "bbox": [ + 319, + 358, + 533, + 526 + ], + "spans": [ + { + "bbox": [ + 319, + 358, + 533, + 526 + ], + "type": "image", + "image_path": "d5ca3ea5b0c79315f75c77d126f979c7bcaaa1db0e04ac6bdbcae8033e1e4a6d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 534, + 545, + 556 + ], + "lines": [ + { + "bbox": [ + 305, + 534, + 545, + 556 + ], + "spans": [ + { + "bbox": [ + 305, + 534, + 545, + 556 + ], + "type": "text", + "content": "Figure 7. Visualization of tracking with 3D Gaussians. Each line in the figure of second rows stands for trajectories of 3D Gaussians" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 578, + 545, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 578, + 545, + 603 + ], + "spans": [ + { + "bbox": [ + 305, + 578, + 545, + 603 + ], + "type": "text", + "content": "Tab. 4 also shows that if we train our model without the warm-up coarse stage, the rendering quality will suffer." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 611, + 382, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 611, + 382, + 622 + ], + "spans": [ + { + "bbox": [ + 306, + 611, + 382, + 622 + ], + "type": "text", + "content": "5.4. Discussions" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "content": "Tracking with 3D Gaussians. Tracking in 3D is also a important task. [17] also shows tracking objects' motion in 3D. Different from dynamic3DGS [30], our methods even can present tracking objects in monocular settings with pretty low storage i.e. 10MB in 3D Gaussians " + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "content": " and 8 MB in Gaussian deformation field network " + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "content": ". Fig. 7 shows the 3D Gaussian's deformation at certain timestamps." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20316" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 77, + 91, + 511, + 188 + ], + "blocks": [ + { + "bbox": [ + 159, + 71, + 434, + 82 + ], + "lines": [ + { + "bbox": [ + 159, + 71, + 434, + 82 + ], + "spans": [ + { + "bbox": [ + 159, + 71, + 434, + 82 + ], + "type": "text", + "content": "Table 4. Ablation studies on synthetic datasets using our proposed methods." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 77, + 91, + 511, + 188 + ], + "lines": [ + { + "bbox": [ + 77, + 91, + 511, + 188 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 511, + 188 + ], + "type": "table", + "html": "
ModelPSNR(dB)↑SSIM↑LPIPS↓Time↓FPS↑Storage (MB)↓
Ours w/o HexPlane RL(i,j)27.050.950.054 mins14012
Ours w/o initialization31.910.970.037.5 mins7918
Ours w/o φx26.670.950.078 mins8217
Ours w/o φr33.080.980.038 mins8317
Ours w/o φs33.020.980.038 mins8217
Ours34.050.980.028 mins8218
", + "image_path": "c365f2b894e193d77b3ab32acf3bf460c65f591ee3365c751895f3edd1a6255e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 49, + 196, + 286, + 287 + ], + "blocks": [ + { + "bbox": [ + 49, + 196, + 286, + 287 + ], + "lines": [ + { + "bbox": [ + 49, + 196, + 286, + 287 + ], + "spans": [ + { + "bbox": [ + 49, + 196, + 286, + 287 + ], + "type": "image", + "image_path": "c6bd6b65e62c5b3d545a492d97594d68dd04549c7125eda500e39c47fc4e9d18.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 59, + 296, + 273, + 307 + ], + "lines": [ + { + "bbox": [ + 59, + 296, + 273, + 307 + ], + "spans": [ + { + "bbox": [ + 59, + 296, + 273, + 307 + ], + "type": "text", + "content": "Figure 8. Visualization of composition with 4D Gaussians." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 47, + 318, + 286, + 503 + ], + "blocks": [ + { + "bbox": [ + 47, + 318, + 286, + 503 + ], + "lines": [ + { + "bbox": [ + 47, + 318, + 286, + 503 + ], + "spans": [ + { + "bbox": [ + 47, + 318, + 286, + 503 + ], + "type": "image", + "image_path": "32fa9e66fe525318a6f0c790ad66bfed2b98622fa8fd0d2a3b3a3133fb4dc8ba.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 511, + 287, + 544 + ], + "lines": [ + { + "bbox": [ + 46, + 511, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 46, + 511, + 287, + 544 + ], + "type": "text", + "content": "Figure 9. Visualization of the relationship between rendering speed and numbers of 3D Gaussians in the rendered screens. All the tests are finished in the synthesis dataset." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 566, + 287, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 566, + 287, + 662 + ], + "spans": [ + { + "bbox": [ + 46, + 566, + 287, + 662 + ], + "type": "text", + "content": "Composition with 4D Gaussians. Similar to dynamic3DGS [30], our proposed methods can also propose editing in 4D Gaussians in Fig. 8. Thanks to the explicit representation of 3D Gaussians, all the trained models can predict deformed 3D Gaussians in the same space following " + }, + { + "bbox": [ + 46, + 566, + 287, + 662 + ], + "type": "inline_equation", + "content": "\\mathcal{G}' = \\{\\mathcal{G}_1', \\mathcal{G}_2', \\dots, \\mathcal{G}_n'\\}" + }, + { + "bbox": [ + 46, + 566, + 287, + 662 + ], + "type": "text", + "content": " and differential rendering [57] can project all the point clouds into viewpoints by " + }, + { + "bbox": [ + 46, + 566, + 287, + 662 + ], + "type": "inline_equation", + "content": "\\hat{I} = \\mathcal{S}(M, \\mathcal{G}')" + }, + { + "bbox": [ + 46, + 566, + 287, + 662 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "content": "Analysis of Rendering Speed. As is shown in Fig. 9, we also test the relationship between points in the rendered screen and rendering speed at the resolution of " + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "inline_equation", + "content": "800 \\times 800" + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 198, + 545, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 198, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 304, + 198, + 545, + 293 + ], + "type": "text", + "content": "We found that if the rendered points are lower than 30000, the rendering speed can be up to 90. The config of Gaussian deformation fields are discussed in the appendix. To achieve render-time rendering speed, we should strike a balance among all the rendering resolutions, 4D Gaussians representation including numbers of 3D Gaussians, and the capacity of the Gaussian deformation field network and any other hardware constraints." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 309, + 383, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 309, + 383, + 320 + ], + "spans": [ + { + "bbox": [ + 306, + 309, + 383, + 320 + ], + "type": "text", + "content": "5.5. Limitations" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 331, + 545, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 331, + 545, + 473 + ], + "spans": [ + { + "bbox": [ + 304, + 331, + 545, + 473 + ], + "type": "text", + "content": "Though 4D-GS can indeed attain rapid convergence and yield real-time rendering outcomes in many scenarios, there are a few key challenges to address. First, large motions, the absence of background points, and the unprecise camera pose cause the struggle of optimizing 4D Gaussians. What is more, it is still challenging to 4D-GS also cannot split the joint motion of static and dynamic Gaussiansparts under the monocular settings without any additional supervision. Finally, a more compact algorithm needs to be designed to handle urban-scale reconstruction due to the heavy querying of Gaussian deformation fields by huge numbers of 3D Gaussians." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 494, + 378, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 494, + 378, + 506 + ], + "spans": [ + { + "bbox": [ + 306, + 494, + 378, + 506 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 516, + 545, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 516, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 304, + 516, + 545, + 624 + ], + "type": "text", + "content": "This paper proposes 4D Gaussian splating to achieve real-time dynamic scene rendering. An efficient deformation field network is constructed to accurately model Gaussian motions and shape deformations, where adjacent Gaussians are connected via a spatial-temporal structure encoder. Connections between Gaussians lead to more complete deformed geometry, effectively avoiding avulsion. Our 4D Gaussians can not only model dynamic scenes but also have the potential for 4D objective tracking and editing." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 643, + 403, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 643, + 403, + 656 + ], + "spans": [ + { + "bbox": [ + 306, + 643, + 403, + 656 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "This work was supported by the National Natural Science Foundation of China (No. 62376102). The authors would like to thank Haotong Lin for providing the quantitative results of Im4D [27]." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20317" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "text", + "content": "[1] Jad Abou-Chakra, Feras Dayoub, and Niko Sünderhauf. Particlererf: Particle based encoding for online neural radiance fields in dynamic scenes. arXiv preprint arXiv:2211.04041, 2022. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 203 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 203 + ], + "type": "text", + "content": "[2] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023. 5, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 205, + 288, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 288, + 270 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 288, + 270 + ], + "type": "text", + "content": "[3] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 272, + 288, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 272, + 288, + 327 + ], + "spans": [ + { + "bbox": [ + 53, + 272, + 288, + 327 + ], + "type": "text", + "content": "[4] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 328, + 288, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 328, + 288, + 373 + ], + "spans": [ + { + "bbox": [ + 53, + 328, + 288, + 373 + ], + "type": "text", + "content": "[5] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 130-141, 2023. 1, 2, 4, 5, 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 374, + 288, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 374, + 288, + 430 + ], + "spans": [ + { + "bbox": [ + 53, + 374, + 288, + 430 + ], + "type": "text", + "content": "[6] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (ToG), 34(4):1-13, 2015. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 431, + 288, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 431, + 288, + 464 + ], + "spans": [ + { + "bbox": [ + 53, + 431, + 288, + 464 + ], + "type": "text", + "content": "[7] Robert A Drebin, Loren Carpenter, and Pat Hanrahan. Volume rendering. ACM Siggraph Computer Graphics, 22(4): 65-74, 1988. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 466, + 288, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 466, + 288, + 520 + ], + "spans": [ + { + "bbox": [ + 53, + 466, + 288, + 520 + ], + "type": "text", + "content": "[8] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In SIGGRAPH Asia 2022 Conference Papers, pages 1-9, 2022. 1, 2, 4, 5, 6, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 522, + 288, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 522, + 288, + 588 + ], + "spans": [ + { + "bbox": [ + 53, + 522, + 288, + 588 + ], + "type": "text", + "content": "[9] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snively, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 590, + 288, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 288, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 288, + 645 + ], + "type": "text", + "content": "[10] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 1, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 647, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 712 + ], + "type": "text", + "content": "[11] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbaek Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12479–12488, 2023. 1, 2, 4, 5, 6, 7" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 714 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[12] Wanshui Gan, Hongbin Xu, Yi Huang, Shifeng Chen, and Naoto Yokoya. V4d: Voxel for 4d novel view synthesis. IEEE Transactions on Visualization and Computer Graphics, 2023. 2, 6, 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 118, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 162 + ], + "type": "text", + "content": "[13] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5712-5721, 2021. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 163, + 545, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 207 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 207 + ], + "type": "text", + "content": "[14] Xiangjun Gao, Jiaolong Yang, Jongyoo Kim, Sida Peng, Zicheng Liu, and Xin Tong. Mps-nerf: Generalizable 3d human rendering from multiview images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 209, + 545, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 209, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 209, + 545, + 262 + ], + "type": "text", + "content": "[15] Kaiwen Guo, Feng Xu, Yangang Wang, Yebin Liu, and Qionghai Dai. Robust non-rigid motion tracking and surface reconstruction using 10 regularization. In Proceedings of the IEEE International Conference on Computer Vision, pages 3083-3091, 2015. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 264, + 545, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 264, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 308, + 264, + 545, + 330 + ], + "type": "text", + "content": "[16] Kaiwen Guo, Peter Lincoln, Philip Davidson, Jay Busch, Xueming Yu, Matt Whalen, Geoff Harvey, Sergio Orts-Escolano, Rohit Pandey, Jason Dourgarian, et al. The relightables: Volumetric performance capture of humans with realistic relighting. ACM Transactions on Graphics (ToG), 38(6):1–19, 2019. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 332, + 545, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 332, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 308, + 332, + 545, + 396 + ], + "type": "text", + "content": "[17] Xiang Guo, Jiadai Sun, Yuchao Dai, Guanying Chen, Xiaoting Ye, Xiao Tan, Errui Ding, Yumeng Zhang, and Jingdong Wang. Forward flow for novel view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16022-16033, 2023. 2, 5, 6, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 399, + 545, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 399, + 545, + 442 + ], + "spans": [ + { + "bbox": [ + 308, + 399, + 545, + 442 + ], + "type": "text", + "content": "[18] Tao Hu, Tao Yu, Zerong Zheng, He Zhang, Yebin Liu, and Matthias Zwicker. Hvtr: Hybrid volumetric-textural rendering for human avatars. In 2022 International Conference on 3D Vision (3DV), pages 197-208. IEEE, 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 444, + 545, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 444, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 308, + 444, + 545, + 487 + ], + "type": "text", + "content": "[19] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG), 42(4):1-14, 2023. 1, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 489, + 545, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 489, + 545, + 531 + ], + "spans": [ + { + "bbox": [ + 308, + 489, + 545, + 531 + ], + "type": "text", + "content": "[20] Leonid Keselman and Martial Hebert. Approximate differentiable rendering with algebraic surfaces. In European Conference on Computer Vision, pages 596-614. Springer, 2022. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 534, + 545, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 534, + 545, + 566 + ], + "spans": [ + { + "bbox": [ + 308, + 534, + 545, + 566 + ], + "type": "text", + "content": "[21] Leonid Keselman and Martial Hebert. Flexible techniques for differentiable rendering with 3d gaussians. arXiv preprint arXiv:2308.14737, 2023. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 568, + 545, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 568, + 545, + 643 + ], + "spans": [ + { + "bbox": [ + 308, + 568, + 545, + 643 + ], + "type": "text", + "content": "[22] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 2, 5, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 646, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 646, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 646, + 545, + 689 + ], + "type": "text", + "content": "[23] Zhong Li, Yu Ji, Wei Yang, Jinwei Ye, and Jingyi Yu. Robust 3d human motion reconstruction via dynamic template construction. In 2017 International Conference on 3D Vision (3DV), pages 496-505. IEEE, 2017. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 691, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 714 + ], + "type": "text", + "content": "[24] Zhong Li, Minye Wu, Wangyiteng Zhou, and Jingyi Yu. 4d human body correspondences from panoramic depth maps." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20318" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "text", + "content": "In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2877-2886, 2018. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 150 + ], + "type": "text", + "content": "[25] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6498-6508, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 287, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 287, + 185 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 287, + 185 + ], + "type": "text", + "content": "[26] Zhan Li, Zhang Chen, Zhong Li, and Yi Xu. Spacetime gaussian feature splatting for real-time dynamic view synthesis. arXiv preprint arXiv:2312.16812, 2023. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 287, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 287, + 230 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 287, + 230 + ], + "type": "text", + "content": "[27] Haotong Lin, Sida Peng, Zhen Xu, Tao Xie, Xingyi He, Hu-jun Bao, and Xiaowei Zhou. High-fidelity and real-time novel view synthesis for dynamic scenes. In SIGGRAPH Asia Conference Proceedings, 2023. 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 232, + 287, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 232, + 287, + 275 + ], + "spans": [ + { + "bbox": [ + 48, + 232, + 287, + 275 + ], + "type": "text", + "content": "[28] Xingyu Liu, Mengyuan Yan, and Jeannette Bohg. Meteornet: Deep learning on dynamic 3d point cloud sequences. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9246-9255, 2019. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 277, + 287, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 277, + 287, + 331 + ], + "spans": [ + { + "bbox": [ + 48, + 277, + 287, + 331 + ], + "type": "text", + "content": "[29] Yu-Lun Liu, Chen Gao, Andreas Meuleman, Hung-Yu Tseng, Ayush Saraf, Changil Kim, Yung-Yu Chuang, Johannes Kopf, and Jia-Bin Huang. Robust dynamic radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13-23, 2023. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 333, + 287, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 333, + 287, + 376 + ], + "spans": [ + { + "bbox": [ + 48, + 333, + 287, + 376 + ], + "type": "text", + "content": "[30] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In 3DV, 2024. 2, 3, 6, 7, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 377, + 287, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 377, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 48, + 377, + 287, + 443 + ], + "type": "text", + "content": "[31] Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7210-7219, 2021. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 445, + 287, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 445, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 48, + 445, + 287, + 498 + ], + "type": "text", + "content": "[32] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 1, 2, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 501, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 501, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 501, + 287, + 544 + ], + "type": "text", + "content": "[33] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4):1-15, 2022. 1, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 546, + 287, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 546, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 48, + 546, + 287, + 590 + ], + "type": "text", + "content": "[34] Byeongjun Park and Changick Kim. Point-dynrf: Point-based dynamic radiance fields from a monocular video. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3171-3181, 2024. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 591, + 287, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 591, + 287, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 591, + 287, + 646 + ], + "type": "text", + "content": "[35] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 2, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "text", + "content": "[36] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. arXiv preprint arXiv:2106.13228, 2021. 2, 4,5,6,7" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 137 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 137 + ], + "type": "text", + "content": "[37] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 140, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 140, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 140, + 545, + 194 + ], + "type": "text", + "content": "[38] Sida Peng, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Representing volumetric videos as dynamic mlp maps. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4252-4262, 2023. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 197, + 545, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 545, + 250 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 545, + 250 + ], + "type": "text", + "content": "[39] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10318–10327, 2021. 2, 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 252, + 545, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 252, + 545, + 305 + ], + "spans": [ + { + "bbox": [ + 307, + 252, + 545, + 305 + ], + "type": "text", + "content": "[40] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 308, + 545, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 308, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 307, + 308, + 545, + 353 + ], + "type": "text", + "content": "[41] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 354, + 545, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 545, + 387 + ], + "type": "text", + "content": "[42] Darius Rückert, Linus Franke, and Marc Stamminger. Adop: Approximate differentiable one-pixel point rendering. ACM Transactions on Graphics (ToG), 41(4):1-14, 2022. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 388, + 545, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 431 + ], + "type": "text", + "content": "[43] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4104-4113, 2016. 5, 6, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 434, + 545, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 434, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 307, + 434, + 545, + 498 + ], + "type": "text", + "content": "[44] Ruizhi Shao, Zerong Zheng, Hanzhang Tu, Boning Liu, Hongwen Zhang, and Yebin Liu. Tensor4d: Efficient neural 4d decomposition for high-fidelity dynamic reconstruction and rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16632-16642, 2023. 1, 2, 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 501, + 545, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 501, + 545, + 566 + ], + "spans": [ + { + "bbox": [ + 307, + 501, + 545, + 566 + ], + "type": "text", + "content": "[45] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 5, 6, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 568, + 545, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 568, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 307, + 568, + 545, + 633 + ], + "type": "text", + "content": "[46] Zhuo Su, Lan Xu, Zerong Zheng, Tao Yu, Yebin Liu, and Lu Fang. Robustfusion: Human volumetric capture with data-driven visual cues using a rgbd camera. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part IV 16, pages 246-264. Springer, 2020. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 635, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 545, + 689 + ], + "type": "text", + "content": "[47] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5459-5469, 2022. 1, 3, 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "type": "text", + "content": "[48] Fengrui Tian, Shaoyi Du, and Yueqi Duan. Mononerf: Learning a generalizable dynamic radiance field from" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20319" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 105 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 105 + ], + "type": "text", + "content": "monocular videos. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17903-17913, 2023. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 106, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 106, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 106, + 287, + 149 + ], + "type": "text", + "content": "[49] Feng Wang, Zilong Chen, Guokang Wang, Yafei Song, and Huaping Liu. Masked space-time hash encoding for efficient dynamic scene reconstruction. Advances in neural information processing systems, 2023. 2, 5, 6, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 150, + 287, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 150, + 287, + 204 + ], + "spans": [ + { + "bbox": [ + 48, + 150, + 287, + 204 + ], + "type": "text", + "content": "[50] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, Yafei Song, and Huaping Liu. Mixed neural voxels for fast multiview video synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19706-19716, 2023. 2, 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 205, + 287, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 205, + 287, + 260 + ], + "spans": [ + { + "bbox": [ + 48, + 205, + 287, + 260 + ], + "type": "text", + "content": "[51] Yiming Wang, Qin Han, Marc Habermann, Kostas Dani-ilidis, Christian Theobalt, and Lingjie Liu. Neus2: Fast learning of neural implicit surfaces for multi-view reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3295–3306, 2023. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 261, + 287, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 261, + 287, + 304 + ], + "spans": [ + { + "bbox": [ + 48, + 261, + 287, + 304 + ], + "type": "text", + "content": "[52] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 304, + 287, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 304, + 287, + 358 + ], + "spans": [ + { + "bbox": [ + 48, + 304, + 287, + 358 + ], + "type": "text", + "content": "[53] Qingshan Xu, Weihang Kong, Wenbing Tao, and Marc Pollefeys. Multi-scale geometric consistency guided and planar prior assisted multi-view stereo. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(4):4945-4963, 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 360, + 287, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 360, + 287, + 415 + ], + "spans": [ + { + "bbox": [ + 48, + 360, + 287, + 415 + ], + "type": "text", + "content": "[54] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5438–5448, 2022. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 415, + 287, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 415, + 287, + 458 + ], + "spans": [ + { + "bbox": [ + 48, + 415, + 287, + 458 + ], + "type": "text", + "content": "[55] Zeyu Yang, Hongye Yang, Zijie Pan, Xiatian Zhu, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv:2310.10642, 2023. 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 460, + 287, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 460, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 48, + 460, + 287, + 491 + ], + "type": "text", + "content": "[56] Taoran Yi, Jiemin Fang, Xinggang Wang, and Wenyu Liu. Generalizable neural voxels for fast human radiance fields. arXiv preprint arXiv:2303.15387, 2023. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 492, + 287, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 492, + 287, + 536 + ], + "spans": [ + { + "bbox": [ + 48, + 492, + 287, + 536 + ], + "type": "text", + "content": "[57] Wang Yifan, Felice Serena, Shihao Wu, Cengiz Öz Tireli, and Olga Sorkine-Hornung. Differentiable surface splatting for point-based geometry processing. ACM Transactions on Graphics (TOG), 38(6):1-14, 2019. 2, 3, 4, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 537, + 287, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 537, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 48, + 537, + 287, + 581 + ], + "type": "text", + "content": "[58] Lequan Yu, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. Pu-net: Point cloud upsampling network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2790–2799, 2018. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 582, + 287, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 582, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 48, + 582, + 287, + 613 + ], + "type": "text", + "content": "[59] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 614, + 287, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 287, + 669 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 287, + 669 + ], + "type": "text", + "content": "[60] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "text", + "content": "[61] Kaichen Zhou, Jia-Xing Zhong, Sangyun Shin, Kai Lu, Yiyuan Yang, Andrew Markham, and Niki Trigoni. Dynpoint: Dynamic neural point for view synthesis. Advances in Neural Information Processing Systems, 36, 2024. 2, 3" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 72, + 546, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 72, + 546, + 118 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 546, + 118 + ], + "type": "text", + "content": "[62] Matthias Zwicker, Hanspeter Pfister, Jeroen Van Baar, and Markus Gross. Surface splatting. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 371-378, 2001. 3" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20320" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/cd2548ca-a539-45a3-adaf-36a364d6da68_content_list.json b/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/cd2548ca-a539-45a3-adaf-36a364d6da68_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..56b1724f37ad2d7e83aa51c614734f33dddebf79 --- /dev/null +++ b/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/cd2548ca-a539-45a3-adaf-36a364d6da68_content_list.json @@ -0,0 +1,1363 @@ +[ + { + "type": "text", + "text": "4D-DRESS: A 4D Dataset of Real-World Human Clothing With Semantic Annotations", + "text_level": 1, + "bbox": [ + 196, + 130, + 776, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Wenbo Wang $^{*1}$ Hsuan-I Ho $^{*1}$ Chen Guo $^{1}$ Boxiang Rong $^{1}$ Artur Grigorev $^{1,2}$ \nJie Song $^{1}$ Juan Jose Zarate $^{\\dagger 1}$ Otmar Hilliges $^{1}$", + "bbox": [ + 91, + 200, + 869, + 248 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Department of Computer Science, ETH Zürich Max Planck Institute for Intelligent Systems, Tübingen", + "bbox": [ + 266, + 255, + 702, + 291 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://ait.ethz.ch/4d-dress", + "bbox": [ + 356, + 292, + 607, + 306 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5658c0cc0dd26a6d35c17dc2035b6186240029f9a36a4eee95c9df6c12a11f6a.jpg", + "image_caption": [ + "Figure 1. Overview of 4D-DRESS. We propose the first real-world 4D dataset of human clothing, capturing 64 human outfits in more than 520 motion sequences. These sequences include a) high-quality 4D textured scans; for each scan, we annotate b) vertex-level semantic labels, thereby obtaining c) the corresponding garment meshes and fitted SMPL(-X) body meshes." + ], + "image_footnote": [], + "bbox": [ + 96, + 340, + 859, + 578 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 633, + 313, + 648 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The studies of human clothing for digital avatars have predominantly relied on synthetic datasets. While easy to collect, synthetic data often fall short in realism and fail to capture authentic clothing dynamics. Addressing this gap, we introduce 4D-DRESS, the first real-world 4D dataset advancing human clothing research with its high-quality 4D textured scans and garment meshes. 4D-DRESS captures 64 outfitsits in 520 human motion sequences, amounting to 78k textured scans. Creating a real-world clothing dataset is challenging, particularly in annotating and segmenting the extensive and complex 4D human scans. To address this, we develop a semi-automatic 4D human parsing pipeline. We efficiently combine a human-in-the-loop process with automation to accurately label 4D scans in di", + "bbox": [ + 75, + 665, + 472, + 876 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "verse garments and body movements. Leveraging precise annotations and high-quality garment meshes, we establish several benchmarks for clothing simulation and reconstruction. 4D-DRESS offers realistic and challenging data that complements synthetic sources, paving the way for advancements in research of lifelike human clothing.", + "bbox": [ + 500, + 635, + 892, + 726 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 738, + 632, + 753 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Human clothing is crucial in various applications such as 3D games, animations, and virtual try-on. Researchers are actively investigating algorithms for clothing reconstruction [12, 24, 34] and simulation [4, 5, 15], to achieve realistic clothing behavior, enhance user engagement, and enable cross-industry applications. These algorithms are frequently developed and assessed using synthetic datasets [3, 7, 53], since they comprise a) meshes covering various garment types and outfits and b) parametric body mod", + "bbox": [ + 496, + 763, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal contributors † Corresponding author", + "bbox": [ + 76, + 886, + 334, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "550", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/8457d3e5f3e7044d85e2feb5ee14c7e6408f0c2dad00d8d8024116778b0e982a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Dataset# of Outfits# of FramesData FormatTexturedSemantic LabelsLoose Garments
TailorNet [35]95.5kSMPL + Garments
ReSynth [33]2430kSMPLX + Point Clouds
CLOTH3D [3]8.5k2.1MSMPL + Garments
CLOTH4D [53]1k100kMesh + Garments
BEDLAM [7]111380kSMPL-X + Garments
D-LAYERS [39]5k700kSMPL + Garments
BUFF [51]614kScans + SMPL
CAPE [32]15140kSMPL+D
ActorsHQ [23]839kScans
X-Humans [40]2035kScans + SMPL-(X)
4DHumanOutfit [2]14459kScans + SMPL
4D-DRESS (Ours)6478kScans + SMPL(-X) + Garments
", + "bbox": [ + 163, + 88, + 803, + 296 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table 1. Summary of 4D clothed human datasets. The datasets highlighted in gray color are synthetic datasets while the others are real-world scans. # of Outfits: number of outfits included; # of Frames: total number of 3D human frames; Data Format: 3D representations of human bodies and garments; Textured: with textured map or not; Semantic Labels: with semantic labels for clothing or not; Loose Garments: containing challenging loose clothing such as dresses or not. 4D-DRESS demonstrates outstanding features against others.", + "bbox": [ + 75, + 304, + 893, + 363 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "els with diverse motions. While synthetic datasets lead in outfit quantity and the number of frames provided (refer to Tab. 1), there also presents a significant challenge in bridging the domain gap between the synthetic and real garments. Despite the recently released real-world 4D human datasets such as X-Humans [40], ActorsHQ [23], and 4DHumanOutfit [2], a key limitation persists: they lack accurately segmented garment meshes, offering only raw human scans. Moreover, these datasets are limited in the number of loose garments (e.g., jackets and dresses) or dynamic motions, which reduces their applicability as test benches. These challenges highlight the need for a real-world 4D dataset that provides semantic annotations and captures diverse garments across various body motions.", + "bbox": [ + 75, + 372, + 472, + 585 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we contribute 4D-DRESS, the first real-world dataset of human clothing with 4D semantic segmentation. We aim to provide an evaluation testbench with real-world data for tasks related to human clothing in computer vision and graphics. We capture over 520 human motion sequences featuring 64 distinct real-world human outfits in a high-end multi-view volumetric capture system, similar to the one used in [11]. The complete dataset comprises a total of 78k frames, each composed of an 80k-face triangle mesh, a 1k resolution textured map, and a set of 1k resolution multi-view images. As illustrated in Fig. 1, we provide a) high-quality 4D textured scans, b) vertex-level semantic labels for various clothing types, such as upper, lower, and outer garments, and c) garment meshes along with their registered SMPL(-X) body models.", + "bbox": [ + 75, + 590, + 468, + 818 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Capturing real-world 4D sequences of humans wearing various clothing and performing diverse motions requires dedicated high-end capture facilities. Moreover, processing these clips into accurately annotated and segmented 4D human scans presents significant challenges. To develop our", + "bbox": [ + 75, + 824, + 470, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "dataset, we tackled the task of labeling 78k high-resolution meshes at the vertex level. Given that the mesh topologies of consecutive frames do not inherently correspond, consistently propagating 3D vertex labels from one frame to the next is non-trivial. While previous methods [6, 36] attempted to fit a fixed-topology parametric body model to the scans, these template-based approaches still struggle with scenarios such as a jacket being lifted to reveal a shirt or the emergence of new vertices on a flowing coat as illustrated in the example shown in Fig. 3. Consequently, we opted for an alternative approach. We developed a semi-automatic and template-free 4D human parsing pipeline. Leveraging semantic maps from a 2D human parser [14] and a segmentation model [27], we extended these techniques to 4D, considering both multi-view and temporal consistency. Our pipeline accurately assigns vertex labels without manual intervention in $96.8\\%$ of frames. Within the remaining scans, only $1.5\\%$ of vertices require further rectification, addressed via a human-in-the-loop process.", + "bbox": [ + 496, + 372, + 893, + 660 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The quality of the ground-truth data in 4D-DRESS allows us to establish several evaluation benchmarks for diverse tasks, including clothing simulation, reconstruction, and human parsing. Our evaluation and analysis demonstrate that 4D-DRESS offers realistic and challenging human clothing that cannot be readily modeled by existing algorithms, thereby opening avenues for further research. In summary, our contributions include:", + "bbox": [ + 496, + 672, + 893, + 796 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- the first real-world 4D human clothing dataset comprising 4D textured scans, vertex-level semantic labels, garment meshes, and corresponding parametric body meshes.", + "- a semi-automatic and template-free 4D human parsing pipeline for efficient data annotation.", + "- evaluation benchmarks showing the utility of our dataset." + ], + "bbox": [ + 500, + 806, + 890, + 897 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "551", + "bbox": [ + 486, + 945, + 509, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 89, + 218, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4D clothed human dataset. Datasets featuring clothed humans can be divided into two categories. Firstly, synthetic datasets [3, 7, 33, 35, 39, 53] create large volume of synthetic data using graphic engines [44] and simulation tools [10] (Tab. 1 top). These datasets are easy to scale with ground truth semantic labels available by design. However, they often lack realism in human appearances, clothing deformations, and motion dynamics. Even though recent work [7, 46] attempted to achieve photorealistic human textures with manual efforts, it is challenging to precisely mimic the way real-world clothing moves and deforms. Therefore, it is essential to create datasets of real-world human clothing by capturing these intricate details.", + "bbox": [ + 75, + 109, + 468, + 305 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The second category (Tab. 1 bottom) involves using multi-view volumetric capture systems [11, 26] to collect datasets of people dressed in real-world clothing [2, 17, 20, 22, 23, 32, 40, 41, 43, 50, 51]. However, the resources required for capturing, storing, and processing this data are substantial, which limits the size of these publicly available datasets [2, 40, 51]. Moreover, these methods do not inherently provide labeled annotations, offering only temporally uncorrelated scans. This makes the raw data on these datasets less suitable for research focusing on human clothing. 4D-DRESS gathers a variety of human subjects and outfits providing accurate semantic labels of human clothing, garment meshes, and SMPL/SMPL-X fits.", + "bbox": [ + 75, + 306, + 470, + 503 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Human parsing. Human parsing [49] is a specific task within semantic segmentation aimed at identifying detailed body parts and clothing labels. Conventionally, this challenge is tackled using deep neural networks, trained on images with their corresponding semantic labels [9, 13, 29]. Although these methods have been successful in 2D [14, 18, 19, 28, 30, 45], applying them to annotate 3D and 4D scans is still a challenge. Previous work has explored it using two distinct strategies. One strategy, used by SIZER [43] and MGN [6], involves rendering multi-view images and projecting parsing labels onto 3D meshes through a voting process. While this method considers consistency across multiple views, it overlooks temporal consistency and falls short of accurately labeling 4D scans. Another approach, used by ClothCap [36], registers all scans to a fixed-topology SMPL model [31] with per-vertex displacements. Yet, this method struggles with handling large motions and complex clothing due to limited template resolutions and model-fitting capabilities. This results in noisy labels near boundaries and loose garments. In contrast, our approach combines multiview voting and optical warping in a template-free pipeline, achieving both multi-view and temporal consistency.", + "bbox": [ + 75, + 507, + 470, + 840 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methodology", + "text_level": 1, + "bbox": [ + 76, + 849, + 212, + 867 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To accurately label each vertex within our 4D textured scan sequences, we leverage a semi-automatic parsing pipeline", + "bbox": [ + 76, + 869, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "that incorporates but minimizes manual efforts during the labeling process. Fig. 2 depicts the overall workflow of our pipeline. We first render 24 multi-view images of the current frame textured scan. We combine those images with the previous frame's multi-view images and labels to deploy three state-of-the-art tools to vote candidate labels for each rendered pixel (Sec. 3.1): a) human image parser, b) optical flow transfer, and c) segmentation masks. Next, we re-project and fuse all the 2D label votes via a Graph Cut optimization to obtain vertex-level semantic labels, considering neighboring and temporal consistency (Sec. 3.2). For those challenging frames where further labeling refinement is needed (around $3\\%$ in our dataset), we refined their semantic labels with a manual rectification step that we feed back into the optimization (Sec. 3.3). We describe the details of the pipeline within this section.", + "bbox": [ + 496, + 90, + 890, + 332 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Multi-view Parsing", + "text_level": 1, + "bbox": [ + 500, + 342, + 684, + 357 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "At each frame $k \\in \\{1, \\dots, N_{\\text{frame}}\\}$ , we render the 3D-mesh into a set of multi-view images, consisting of twelve horizontal, six upper, and six lower uniformly distributed views. We note this as $I_{img,n,k}$ with $n \\in \\{1, \\dots, N_{\\text{view}} = 24\\}$ . Within the multi-view space, we tackle the problem of assigning a label vote $l$ to each pixel $p$ using multi-view image-based models. The label $l$ varies for human skin, hair, shoes, upper clothing (shirts, hoodies), lower clothing (shorts, pants), and outer clothing (jackets, coats). For clarity, we omit the frame index $(k)$ in the following unless they are strictly needed. Please refer to Fig. 2 and the Supp. Mat. for more label definitions and the versatility of our parsing method with new labels like belts and socks.", + "bbox": [ + 496, + 358, + 890, + 554 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Human image parser (PAR). Our primary source of labels is a deep-learning image parser, which provides pixel-level votes for body parts and clothes. Specifically, we apply Graphonomy [14] to each view $n$ and store the labels as a new set of images $\\{I_{par}\\}$ (see Fig. 2). These labels are then accessible by the vote function $f_{par,n}(p,l)$ that checks if the image $I_{par,n}$ matches the value $l$ at the pixel $p$ , in which case returns 1, or 0 otherwise. This vote function and the other two defined below will be crucial later when setting our full-mesh optimization (Sec. 3.2).", + "bbox": [ + 496, + 559, + 890, + 710 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Optical flow transfer (OPT). This block leverages the previous frame's multi-view labels to provide temporal consistency. Specifically, we use the optical flow predictor RAFT [42] to transfer multi-view labels in the $k - 1$ frame to the current $k$ frame using the texture features on the rendered multi-view images. Similarly to the image parser above, the optical flow output goes to a set $\\{I_{opt}\\}$ . These labels are accessible via the vote function $f_{opt,n}(p,l)$ , which checks $I_{opt,n}$ and returns 1 if label $l$ is in $p$ and 0 otherwise.", + "bbox": [ + 496, + 714, + 890, + 851 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Segmentation masks (SAM). The multi-view votes generated by the Human Image Parser sometimes lack 3D consistency, particularly when dealing with open garments un", + "bbox": [ + 496, + 854, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "552", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/be342b7739a18ae48556547e268b25e016e371e890d2983624a6ab38295105ed.jpg", + "image_caption": [ + "Figure 2. 4D Human parsing method. We first render current and previous frame scans into multi-view images and labels. Then collect multi-view parsing results from the image parser, optical flows, and segmentation masks (Sec. 3.1). Finally, we project multi-view labels to 3D vertices and optimize vertex labels using the Graph Cut algorithm with vertex-wise unary energy and edge-wise binary energy (Sec. 3.2). The manual rectification labels can be easily introduced by checking multi-view rendered labels. (Sec. 3.3)." + ], + "image_footnote": [], + "bbox": [ + 125, + 85, + 843, + 303 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "der dynamic motions (cf. Fig. 3). While the votes derived from the optical flows provide a cross-frame prior, they may not accurately track every human part and can't identify newly emerging regions. Therefore, we introduce segmentation masks to regularize the label consistency within each masked region. We apply the Segment Anything Model [27] to each rendered image and obtain a self-define group of masks $M_{m,n}$ , with the index $m \\in \\{1, \\dots, N_{mask,n}\\}$ . Within a mask $M_{m,n}$ we compute the score function $S(l, M_{m,n})$ that fuses the votes of the image parser and the optical flow, normalized by the area of the mask:", + "bbox": [ + 75, + 376, + 473, + 541 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {S} (l, M _ {m, n}) = \\frac {\\sum_ {p \\in M _ {m , n}} \\left[ f _ {p a r , n} (p , l) + \\lambda_ {p o} f _ {o p t , n} (p , l) \\right]}{\\sum_ {p \\in M _ {m , n}} \\left(1 + \\lambda_ {p o}\\right)}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 561, + 468, + 631 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the factor $\\lambda_{po}$ weights the contribution of OPT over PAR. We now define a check function, $\\mathcal{C}(p,M_{m,n})$ , that returns 1 if the input evaluation pixel $p$ is in the mask $M_{m,n}$ and 0 otherwise. Finally, we obtain the corresponding vote function by summing over all the masks in the image:", + "bbox": [ + 76, + 631, + 468, + 708 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf _ {s a m, n} (p, l) = \\sum_ {m \\in 1: N _ {m a s k, n}} \\mathcal {C} (p, M _ {m, n}) * \\mathcal {S} (l, M _ {m, n}). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 715, + 468, + 750 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Graph Cut Optimization for Vertex Parsing", + "text_level": 1, + "bbox": [ + 76, + 762, + 449, + 779 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The next step in our semi-automatic process is combining all the labels obtained in Sec. 3.1 to assign a unique label to each scan vertex $v_{i}$ , with $i \\in \\{1, \\dots, N_{\\text{vert}}\\}$ . We frame this 3D semantic segmentation problem as a graph cut optimization: each 3D frame is interpreted as a graph $G$ , where vertices are now nodes and mesh edges are connections. Note that in a traditional Graph Cut, the values of the nodes are fixed, and the optimization computes only the", + "bbox": [ + 75, + 780, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "cost of breaking a connection. In our case, we have several votes for a vertex label, coming from three different tools and from concurrent multi-view projections. We define our cost function that consists of two terms,", + "bbox": [ + 496, + 376, + 892, + 436 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nE (L) = \\sum_ {i \\in 1: N _ {v e r t}} E _ {v e r t} \\left(l _ {i}\\right) + \\sum_ {i, j \\in 1: N _ {v e r t}} E _ {e d g e} \\left(l _ {i}, l _ {j}\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 441, + 890, + 477 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $L = \\{l_i\\}$ represents all the vertex labels in current frame. As described below, the term $E_{vert}$ combines the different votes into a single cost function, while $E_{edge}$ evaluates neighboring labels for consistent 3D segmentation. We follow an approach similar to [8].", + "bbox": [ + 496, + 484, + 890, + 559 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Vertex-wise unary energy. The cost function per node or Unary energy comes from combining the different votes obtained in the multi-view image processing (see Sec. 3.1):", + "bbox": [ + 498, + 559, + 890, + 604 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nE _ {v e r t} \\left(l _ {i}\\right) = \\sum_ {n \\in 1: N _ {v i e w}} \\frac {\\lambda_ {p} E _ {p a r , n} + \\lambda_ {o} E _ {o p t , n} + \\lambda_ {s} E _ {s a m , n}}{N _ {v i e w}}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 501, + 611, + 890, + 662 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where we combine the human image parser $(E_{par})$ , the cross-frame optical prior $(E_{opt})$ , and the segmentation masks regularization $(E_{sam})$ contributions. All these energy terms can be written with the same equation by using the notation $\\mathcal{X} = \\{par, opt, sam\\}$ :", + "bbox": [ + 496, + 662, + 890, + 739 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nE _ {\\mathcal {X}, n} \\left(l _ {i}\\right) = \\sum_ {p \\in P \\left(v _ {i}, n\\right)} - w _ {\\mathcal {X}} \\left(p, v _ {i}\\right) f _ {\\mathcal {X}, n} \\left(p, l _ {i}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 540, + 747, + 890, + 781 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "meaning that energy of the method $\\mathcal{X}$ , calculated for a proposed label $l_{i}$ , is obtained by summing over those pixels $p\\in P(v_i,n)$ whose projections are within a triangle of $v_{i}$ . The weights for the cases of $E_{par}$ and $E_{opt}$ are set to the barycentric distance from the projected pixel $p$ to the vertex $v_{i}$ , which means $w_{par} = w_{opt} = u$ as in Fig. 2. For $E_{sam}$ instead, we set the weight $w_{sam}$ to the constant value 1 given that we look for an across-vertex regularization.", + "bbox": [ + 496, + 789, + 890, + 910 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "553", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/084aa22d3dd3b5dcb324e58f8a0f1888981cbd6de4d78f1a19dec9664621bae9.jpg", + "image_caption": [ + "Figure 3. Qualitative ablation study. We visualize the effectiveness of our 4D human parsing method on our 4D-DRESS dataset. From left to right, we show the improvements after adding the optical flow labels and mask scores to the multi-view image parser labels. The manual rectification efforts can be easily introduced from multi-view rendered labels, with which we achieve high-quality vertex annotations. The problem of isolated labels can be relieved by introducing the edge-wise binary energy term." + ], + "image_footnote": [], + "bbox": [ + 117, + 85, + 851, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Edge-wise binary energy. The Binary energy term penalizes the case of adjacent vertices with different labels, encouraging neighboring vertices to take the same label. Being $A$ the adjacency matrix of the graph $G$ and $\\delta$ the Dirac delta function, the edge cost can be calculated as follows:", + "bbox": [ + 75, + 371, + 468, + 446 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nE _ {e d g e} \\left(l _ {i}, l _ {j}\\right) = \\lambda_ {b} A _ {i, j} \\left(1 - \\delta \\left(l _ {i}, l _ {j}\\right)\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 145, + 453, + 468, + 470 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "which increases the energy by $\\lambda_{b}$ in the case that the adjacent vertices $v_{i}, v_{j}$ take different labels $l_{i} \\neq l_{j}$ .", + "bbox": [ + 75, + 474, + 468, + 506 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Manual Rectification of 3D Labels", + "text_level": 1, + "bbox": [ + 76, + 512, + 375, + 526 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When manual rectification is needed, we introduce it back into the multi-view space as an additional 2D annotation, and we recalculate the steps in Sec. 3.2. Concretely, we ran the graph cut optimization for the first time. Then, we rendered the vertex labels into multi-view labels, from which we let a person introduce corrections by comparing the resulting labels with the textured multi-view images. Similarly to the vote functions of the image parser and optical flow, we create a vote function $f_{man}(p,l)$ that accesses this set of images with rectified annotations and returns 1 if the label $l$ is assigned to the pixel $p$ and 0 otherwise.", + "bbox": [ + 75, + 529, + 468, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Similar to previous cases, we define a per-view manual energy $(E_{man})$ by using the variable $\\mathcal{X} = man$ in Eq. (5), and we added it to the global per-node energy $E_{vert}$ in Eq. (4). We use a constant large weight for $w_{man}$ to favor the manual annotation over other sources of voting where we rectified the labels. The final vertex labels $L^{*} = \\{l^{*}_{i}\\}$ are obtained after the second round of graph cut optimization. This manual rectification process finally changed $1.5\\%$ of vertices within $3.2\\%$ of all frames. The rectification process is detailed in Supp. Mat.", + "bbox": [ + 75, + 696, + 470, + 847 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 851, + 209, + 868 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To validate the effectiveness of our method, we conducted controlled experiments on two synthetic datasets,", + "bbox": [ + 75, + 869, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/c6427b8607437ca67316658e4a898a3c4029f3cdd50a73c4adeddb6f018483c2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CLOTH4D [53]BEDLAM [7]
MethodInnerInnerOuter
SMPL+D [36]0.8720.8460.765
PAR Only [43]0.9610.9100.714
PAR+OPT0.9690.9630.942
PAR+OPT+SAM0.9950.9930.988
", + "bbox": [ + 519, + 369, + 867, + 479 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2. Baseline and ablation study. Mean accuracy of 4D human parsing methods applied on synthetic datasets. The Inner and Outer outfits are selected according to our definition in Sec. 5", + "bbox": [ + 498, + 482, + 890, + 523 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "CLOTH4D [53] and BEDLAM [7], where ground-truth semantic labels are available. We first compare our parsing method with a template-based baseline [36], that uses a semantic template (SMPL model with per-vertex displacements) to track and parse the clothed human scans. Due to the limited resolution and the fixed topology nature of the SMPL+D model, its parsing accuracy is lower than $90\\%$ on all synthetic outfits (see Tab. 2).", + "bbox": [ + 496, + 536, + 890, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We then compare our 4D parsing pipeline with several ablations and report them in Tab. 2. We use an example scan from 4D-DRESS to support the visualization of the ablation study in Fig. 3. Using PAR only shows reasonable results for upper and lower clothes. Yet, it predicts inconsistent labels at open garments like jackets and coats (Fig. 3 PAR Only), resulting in only $71.4\\%$ parsing accuracy on the BEDLAM dataset. The optical flow labels from the previous frame can serve as a cross-frame prior, yet accuracy may vary, particularly in fast-moving arms and cloth boundaries (Fig. 3 PAR+OPT). By fusing both of the previous multi-view labels via the segmentation masks, we achieve better boundary labels (Fig. 3 PAR+OPT+SAM), with $98.8\\%$ accuracy on the outer outfits in BEDLAM, with challenging open garments. Finally, we show the effect of introducing manual efforts to rectify incorrect labels (Fig. 3", + "bbox": [ + 496, + 659, + 892, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "554", + "bbox": [ + 485, + 944, + 509, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/2be5552cd65c63b91fa805f1893d335137d57f11e7c54e70a528ffcac67cca89.jpg", + "image_caption": [ + "Figure 4. Qualitative examples for clothing simulation methods. On the left are templates used for simulations. On the right are ground-truth geometries and original scans, LBS baseline results in body penetrations and overly stretched areas. Compared to other methods, HOOD better models dress and jackets and, with tuned material parameters, HOOD* achieves simulations closest to the ground truth." + ], + "image_footnote": [], + "bbox": [ + 99, + 88, + 869, + 303 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "With Manual). Our parsing method can also be deployed to annotate other existing 4D human datasets. We present examples of BUFF[51], X-Humans [40], and ActorsHQ[23] and additional qualitative results in Supp. Mat.", + "bbox": [ + 75, + 352, + 470, + 414 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Dataset Description", + "text_level": 1, + "bbox": [ + 76, + 424, + 266, + 441 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4D-DRESS contains 520 motion sequences (150 frames at 30 fps) in 64 real-world human outfits with a total of 78k frames. Each frame consists of multi-view images at 1k resolution, an 80k-face triangle 3D mesh with vertex annotations, and a 1k-resolution texture map. We also provide each garment with its canonical template to benefit the clothing simulation study. Finally, each 3D scan is accurately registered by SMPL/SMPL-X body models.", + "bbox": [ + 75, + 444, + 468, + 566 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To record 4D-DRESS we recruited 32 participants (18 female), with an average age of 24. The dataset consists of 4 dresses, 30 upper, 28 lower, and 32 outer garments. Participants were instructed to perform different dynamic motions for each 5-second sequence. For each participant, we capture two types of outfits: Inner Outfit comprising the inner layer dress/upper, and lower garments; and Outer Outfit with an additional layer of garment, such as open jackets or coats. A unique feature of 4D-DRESS is the challenging clothing deformations we captured. To quantify these deformations, we compute the mean distances from the garments to the registered SMPL body surfaces. The inner and outer outfits exhibit distance ranges up to $7.12\\mathrm{cm}$ and $14.76\\mathrm{cm}$ over all frames. This is twice as much as what we observed in the X-Humans dataset [40], for example. In the $10\\%$ most challenging frames, this increases to $20.09\\mathrm{cm}$ for outer outfits, highlighting the prevalence of challenging garments. Please refer to Supp. Mat. for dataset details.", + "bbox": [ + 75, + 566, + 470, + 839 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6. Benchmark Evaluation", + "text_level": 1, + "bbox": [ + 76, + 849, + 294, + 864 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "With high-quality 4D scans and diverse garment meshes in dynamic motions, 4D-DRESS serves as an ideal ground", + "bbox": [ + 75, + 869, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/722ba6aa6b62ca97512813f5db26a11ab151c89e7e072b787ce7348b55d39601.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LowerUpperDressOuter
MethodCD ↓ EStr ↓CD ↓ EStr ↓CD ↓ EStr ↓CD ↓ EStr ↓
LBS1.7670.3332.1670.0954.4611.2934.6260.811
PBNS [4]1.8850.1072.6870.0404.8690.6434.8590.107
NCS [5]1.7160.0172.1120.0164.5480.0314.7380.025
HOOD [15]2.0700.0082.6680.0134.2920.0105.3550.011
HOOD*0.9240.0101.3080.0152.4630.0092.8330.009
", + "bbox": [ + 503, + 349, + 890, + 463 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 3. Clothing simulation benchmark. CD is Chamfer Distance between the simulation and ground truth. $E_{str}$ denotes stretching energy with respect to the template.", + "bbox": [ + 498, + 465, + 890, + 508 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "truth for a variety of computer vision and graphics benchmarks. In our work, we outline several standard benchmarks conducted in these fields using our dataset. Our primary focus is on tasks related to clothing simulation (Sec. 6.1) and clothed human reconstruction (Sec. 6.2). Additionally, benchmarks on human parsing and human representation learning are included in our Supp. Mat.", + "bbox": [ + 498, + 513, + 890, + 619 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.1. Clothing Simulation", + "text_level": 1, + "bbox": [ + 500, + 626, + 691, + 641 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Experimental setup. We introduce a new benchmark for clothing simulation, leveraging the garment meshes from 4D-DRESS, which capture dynamical real-world clothing deformations. This benchmark evaluates three methods for modeling garment dynamics: PBNS [4], Neural Cloth Simulator (NCS [5]), and HOOD [15], as well as a baseline method that applies SMPL-based linear blend-skinning (LBS) to the template. We ran the simulations using T-posed templates extracted from static scans and compared the results to the ground-truth garment meshes across various pose sequences. Our evaluation metrics include the Chamfer Distance (CD), which compares the resulting mesh sequences with ground-truth point clouds, and the average stretching energy $(E_{str})$ calculated by measuring the difference in edge lengths between the simulated and template meshes. The experiments were conducted across four categories of garments (Lower, Upper, Dress, and Outer),", + "bbox": [ + 496, + 643, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "555", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f65e0803ab4942356dd538da773ef30e828b43dfc0b40e6ec6b0d78baf4e4f32.jpg", + "image_caption": [ + "Figure 5. Examples of clothed human reconstruction on 4D-DRESS. We evaluate state-of-the-art methods using both inner (Top) and outer (Bottom) outfits. We show that existing methods generally struggle with the challenging loose garments. Moreover, these approaches cannot faithfully recover realistic details such as clothing wrinkles." + ], + "image_footnote": [], + "bbox": [ + 122, + 89, + 851, + 277 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "with four garment templates in each category. We simulated clothing deformation for each garment in six different pose sequences, providing a comprehensive comparison of their ability to generate realistic motions.", + "bbox": [ + 75, + 330, + 470, + 390 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fine-tuning material parameters. To demonstrate the advantages of real-world garment meshes in 4D-DRESS, we also introduce a simple optimization-based strategy for inverse simulation using HOOD. Specifically, we optimize the material parameters fed into the HOOD model to minimize the simulations' Chamfer Distance to the ground-truth sequences and their stretching energy. This optimized version is denoted as HOOD*. For more details on the material optimization experiments, please refer to Supp. Mat.", + "bbox": [ + 75, + 392, + 468, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation results. The quantitative and qualitative comparisons of the clothing simulation methods are presented in Tab. 3 and Fig. 4 respectively. The LBS baseline and LBS-based approaches (PBNS and NCS) perform better with upper and lower garments, which exhibit limited free-flowing motions compared with the dress and outer garments. Conversely, HOOD excels with dresses, generating more natural, free-flowing motions and achieving lower stretching energy. However, if HOOD fails to generate realistic motions for a single frame, this error propagates to all subsequent frames. This issue does not occur in the LBS-based methods, which generate geometries independently for each frame. With finely-tuned material parameters, HOD* produces garment sequences that more faithfully replicate real-world behavior. We anticipate that future research in learned garment simulation will increasingly focus on modeling real-world garments made from complex heterogeneous materials. This will be a major step in creating realistically animated digital avatars, and we believe 4D-DRESS will be highly instrumental in this task.", + "bbox": [ + 75, + 531, + 470, + 834 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.2. Clothed Human Reconstruction", + "text_level": 1, + "bbox": [ + 76, + 837, + 357, + 852 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Experimental setup. We create a new benchmark for evaluating state-of-the-art clothed human reconstruction methods on the 4D-DRESS dataset. This benchmark is di", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/0730da84cd7f9816b3ec583e4c98d517649d9038ef2a86bd96f55d5e01e7b243.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
InnerOuter
MethodCD↓NC↑IoU↑CD↓NC↑IoU↑
PIFu [37]2.6960.7920.6902.7830.7590.697
PIFuHD [38]2.4260.7930.7392.3930.7630.743
PaMIR [52]2.5200.8050.7062.6080.7770.715
ICON [47]2.4730.7980.7522.8320.7620.756
PHORHUM [1]3.9440.7250.5803.7620.7050.603
ECON [48]2.5430.7960.7362.8520.7600.728
SiTH [21]2.1100.8240.7552.3220.7940.749
", + "bbox": [ + 501, + 327, + 888, + 469 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4. Clothed human reconstruction benchmark. We computed Chamfer distance (CD), normal consistency (NC), and Intersection over Union (IoU) between ground truth and reconstructed meshes obtained from different baselines.", + "bbox": [ + 498, + 470, + 892, + 526 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "vided into three subtasks. First, we evaluate single-view human reconstruction utilizing images and high-quality 3D scans from our dataset. In addition, benefiting from the garment meshes in our dataset, we establish the first real-world benchmark for evaluating single-view clothing reconstruction. Finally, we assess video-based human reconstruction approaches leveraging the sequences in 4D-DRESS that capture rich motion dynamics of both human bodies and garments. In all the experiments, we report 3D metrics including Chamfer Distance (CD), Normal Consistency (NC), and Intersection over Union (IoU) to compare the predictions with ground-truth meshes.", + "bbox": [ + 496, + 532, + 890, + 715 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Single-view human reconstruction. We use the two test sets defined in Sec. 5 (denote as Outer and Inner) to evaluate the following single-view reconstruction methods: PIFu [37], PIFuHD [38], PaMIR [52], ICON [47], PHORHUM [1], ECON [48], and SiTH [21]. The evaluation results are summarized in Fig. 5 and Tab. 4. We observed that methods leveraging SMPL body models as guidance (i.e., ICON, ECON, SiTH) performed better in reconstructing inner clothing. However, their performance significantly declined when dealing with outer garments. On the other hand, end-to-end models like PIFu and PIFuHD demonstrated more stability with both clothing types. This", + "bbox": [ + 496, + 719, + 892, + 901 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "556", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/243a3f5a2c20eacf8a888d8b463bdd71c08c62fcbffd8fa9b5354e4927b91411.jpg", + "image_caption": [ + "Figure 6. Examples of clothing reconstruction on 4D-DRESS. We visualize the reconstructed garment meshes from different approaches. These methods trained on synthetic datasets failed to predict accurate clothing sizes and detailed wrinkles." + ], + "image_footnote": [], + "bbox": [ + 107, + 88, + 436, + 275 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/cd2f4ff89344ecf4b87a3ec600949615be2747c40d95021474396e6d3e0e5a60.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ShoesLowerUpperOuter
MethodCD ↓ IoU↑CD ↓ IoU↑CD ↓ IoU↑CD ↓ IoU↑
BCNet [24]-2.533 0.6752.079 0.7003.600 0.639
SMPLicit [12]2.619 0.6212.101 0.6982.452 0.6173.359 0.618
ClothWild [34]3.657 0.5482.690 0.5823.279 0.5334.163 0.588
", + "bbox": [ + 81, + 335, + 467, + 425 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "leads to an intriguing research question: whether the human body prior is necessary for reconstructing clothing. Qualitatively, we see that even the best-performing methods cannot perfectly reconstruct realistic free-flowing jackets as shown in Tab. 4. We believe 4D-DRESS will offer more valuable insights for research in clothed human reconstruction.", + "bbox": [ + 75, + 474, + 467, + 565 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Single-view clothes reconstruction. Clothes reconstruction has received relatively little attention compared to full-body human reconstruction. Leveraging the garment meshes in 4D-DRESS, we introduce the first real-world benchmark to assess prior art, including BCNet [24], SMPLicit [12], and ClothWild [34]. The results of different clothing types, as shown in Fig. 6, indicate a significant gap between the reconstructed and real clothing. Firstly, the clothing sizes produced by these methods are often inaccurate, suggesting a lack of effective use of image information for guidance. Moreover, the results typically lack geometric details like clothing wrinkles compared to full-body reconstruction. We report quantitative results in Tab. 5. We observed that the data-driven method (BCNet) performs better with inner clothing, while the generative fitting method (SMPLicit) shows more robustness to outer clothing, such as coats. However, none of these methods is designed for or trained on real-world data. The domain gap between synthetic and real data still limits their capability to produce accurate shapes and fine-grained details. We expect our benchmark and dataset will draw more research attention to the topic of real-world clothing reconstruction.", + "bbox": [ + 75, + 568, + 467, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0e4c67b1af81e24c5ffb4527ca8890f4ccb08b968c83878ae911823f9ff57700.jpg", + "image_caption": [ + "Figure 7. Video-based human reconstruction. Qualitative results of video-based human reconstruction methods on 4D-DRESS. Prior works struggle to reconstruct 3D human with challenging outfits and cannot recover the fine-grained surface details." + ], + "image_footnote": [], + "bbox": [ + 519, + 75, + 870, + 167 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/63da0d45b1dd49d21684720cba6b4466c949ffe5db37a7c47dbd6e087505b24a.jpg", + "table_caption": [ + "Table 5. Clothing reconstruction benchmark. We report Chamfer Distance (CD), and Intersection over Union (IoU) between the ground-truth garment meshes and the reconstructed clothing." + ], + "table_footnote": [], + "table_body": "
InnerOuter
MethodCD↓NC↑IoU↑CD↓NC↑IoU↑
SelfRecon [25]3.1800.7290.7544.0270.6830.745
Vid2Avatar [16]2.8700.7500.7723.0140.7250.787
", + "bbox": [ + 501, + 229, + 890, + 303 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 6. Video-based human reconstruction. Results of video-based human reconstruction methods on 4D-DRESS.", + "bbox": [ + 500, + 305, + 890, + 332 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Video-based human reconstruction Leveraging the sequential 4D data in our dataset, we create a new benchmark for evaluating video-based human reconstruction methods. We applied Vid2Avatar [16] and SelfRecon [25] to obtain 4D reconstructions and compared them with the provided ground-truth 4D scans. As observed in Fig. 7, both methods struggle with diverse clothing styles and face challenges in reconstructing surface parts that greatly differ in topology from the human body, such as the open jacket. Moreover, there remains a noticeable discrepancy between the real geometry and the recovered surface details. Quantitatively, the existing methods cannot achieve satisfactory reconstruction results with outer garments, as demonstrated by a large performance degradation in Tab. 6. We believe 4D-DRESS provides essential data for advancing video-based human reconstruction methods, particularly in achieving detailed geometry recovery for challenging clothing.", + "bbox": [ + 496, + 337, + 890, + 593 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Discussion", + "text_level": 1, + "bbox": [ + 500, + 594, + 612, + 608 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations. Our current pipeline requires substantial computational time. The offline manual rectification process and garment mesh extraction also demand expertise in 3D editing and additional human efforts. These factors constrain the scalability of our dataset. With a goal of expanding more diverse subjects and clothing, real-time 4D annotation and rectification/editing will be exciting future work. Conclusion. 4D-DRESS is the first real-world 4D clothed human dataset with semantic annotations, aiming to bridge the gap between existing clothing algorithms and real-world human clothing. We demonstrate that 4D-DRESS is not only a novel data source but also a challenging benchmark for clothing simulation, reconstruction, and other related tasks. We believe that 4D-DRESS can support a wide range of endeavors and foster research progress by providing high-quality 4D data in life like human clothing.", + "bbox": [ + 496, + 613, + 890, + 853 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. This work was partially supported by the Swiss SERI Consolidation Grant \"AI-PERCEIVE\". AG was supported in part by the Max Planck ETH CLS.", + "bbox": [ + 498, + 854, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "557", + "bbox": [ + 485, + 944, + 509, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Thiemo Alldieck, Mihai Zanfir, and Cristian Sminchisescu. Photorealistic monocular 3d reconstruction of humans wearing clothing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 7", + "[2] Matthieu Armando, Laurence Boissieux, Edmond Boyer, Jean-Sebastien Franco, Martin Humenberger, Christophe Legras, Vincent Leroy, Mathieu Marsot, Julien Pansiot, Sergi Pujades, Rim Rekik, Gregory Rogez, Anilkumar Swamy, and Stefanie Wuhrer. 4dhumanoutfit: a multi-subject 4d dataset of human motion sequences in varying outfits exhibiting large displacements. Computer Vision and Image Understanding, 2023. 2, 3", + "[3] Hugo Bertiche, Meysam Madadi, and Sergio Escalera. Cloth3d: clothed 3d humans. In Proceedings of the European Conference on Computer Vision (ECCV), pages 344-359. Springer, 2020. 1, 2, 3", + "[4] Hugo Bertiche, Meysam Madadi, and Sergio Escalera. Pbs: Physically based neural simulation for unsupervised garment pose space deformation. ACM Transactions on Graphics (TOG), 40(6), 2021. 1, 6", + "[5] Hugo Bertiche, Meysam Madadi, and Sergio Escalera. Neural cloth simulation. ACM Transactions on Graphics (TOG), 41(6):1-14, 2022. 1, 6", + "[6] Bharat Lal Bhatnagar, Garvita Tiwari, Christian Theobalt, and Gerard Pons-Moll. Multi-garment net: Learning to dress 3d people from images. In Proceedings of the IEEE International Conference on Computer Vision (ICCV). IEEE, 2019. 2, 3", + "[7] Michael J. Black, Priyanka Patel, Joachim Tesch, and Jinlong Yang. BEDLAM: A synthetic dataset of bodies exhibiting detailed lifelike animated motion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 8726-8737, 2023. 1, 2, 3, 5", + "[8] Y. Boykov, O. Veksler, and R. Zabih. Fast approximate energy minimization via graph cuts. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 23(11): 1222-1239, 2001. 4", + "[9] Xianjie Chen, Roozbeh Mottaghi, Xiaobai Liu, Sanja Fidler, Raquel Urtasun, and Alan Yuille. Detect what you can: Detecting and representing objects using holistic models and body parts. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2014. 3", + "[10] CLO. https://www.clo3d.com, 2022. 3", + "[11] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (TOG), 34(4):1-13, 2015. 2, 3", + "[12] Enric Corona, Albert Pumarola, Guillem Alenyà, Gerard Pons-Moll, and Francesc Moreno-Noguer. Semplicit: Topology-aware generative model for clothed people. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1, 8", + "[13] Ke Gong, Xiaodan Liang, Yicheng Li, Yimin Chen, Ming Yang, and Liang Lin. Instance-level human parsing via part" + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "grouping network. In Proceedings of the European Conference on Computer Vision (ECCV), pages 770-785, 2018. 3", + "[14] Ke Gong, Yiming Gao, Xiaodan Liang, Xiaohui Shen, Meng Wang, and Liang Lin. Graphonomy: Universal human parsing via graph transfer learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3", + "[15] Artur Grigorev, Bernhard Thomaszewski, Michael J. Black, and Otmar Hilliges. Hood: Hierarchical graphs for generalized modelling of clothing dynamics. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 16965-16974, 2023. 1, 6", + "[16] Chen Guo, Tianjian Jiang, Xu Chen, Jie Song, and Otmar Hilliges. Vid2 avatar: 3d avatar reconstruction from videos in the wild via self-supervised scene decomposition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 8", + "[17] Sang-Hun Han, Min-Gyu Park, Ju Hong Yoon, Ju-Mi Kang, Young-Jae Park, and Hae-Gon Jeon. High-fidelity 3d human digitization from single 2k resolution images. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3", + "[18] Haoyu He, Jing Zhang, Qiming Zhang, and Dacheng Tao. Grapy-ml: Graph pyramid mutual learning for cross-dataset human parsing. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2020. 3", + "[19] Haoyu He, Jing Zhang, Bhavani Thuraisingham, and Dacheng Tao. Progressive one-shot human parsing. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2021. 3", + "[20] Zhu Heming, Cao Yu, Jin Hang, Chen Weikai, Du Dong, Wang Zhangye, Cui Shuguang, and Han Xiaoguang. Deep fashion3d: A dataset and benchmark for 3d garment reconstruction from single images. In Proceedings of the European Conference on Computer Vision (ECCV), pages 512-530. Springer International Publishing, 2020. 3", + "[21] Hsuan-I Ho, Jie Song, and Otmar Hilliges. Sith: Single-view textured human reconstruction with image-conditioned diffusion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 7", + "[22] Jie Song Hsuan-I Ho, Lixin Xue and Otmar Hilliges. Learning locally editable virtual humans. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3", + "[23] Mustafa Işik, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 2, 3, 6", + "[24] Boyi Jiang, Juyong Zhang, Yang Hong, Jinhao Luo, Ligang Liu, and Hujun Bao. BCnet: Learning body and cloth shape from a single image. In Proceedings of the European Conference on Computer Vision (ECCV). Springer, 2020. 1, 8", + "[25] Boyi Jiang, Yang Hong, Hujun Bao, and Juyong Zhang. Selfrecon: Self reconstruction your digital avatar from monocular video. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 8" + ], + "bbox": [ + 503, + 92, + 893, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "558", + "bbox": [ + 486, + 945, + 511, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[26] Hanbyul Joo, Tomas Simon, Xulong Li, Hao Liu, Lei Tan, Lin Gui, Sean Banerjee, Timothy Scott Godisart, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social interaction capture. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2017. 3", + "[27] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dollar, and Ross Girshick. Segment anything. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 4015-4026, 2023. 2, 4", + "[28] Peike Li, Yunqiu Xu, Yunchao Wei, and Yi Yang. Self-correction for human parsing. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2020. 3", + "[29] X. Liang, K. Gong, X. Shen, and L. Lin. Look into person: Joint body parsing & pose estimation network and a new benchmark. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 41(04):871-885, 2019. 3", + "[30] Kunliang Liu, Ouk Choi, Jianming Wang, and Wonjun Hwang. Cdgnet: Class distribution guided network for human parsing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4473-4482, 2022. 3", + "[31] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multiperson linear model. ACM Transactions on Graphics (TOG), 34(6):248:1-248:16, 2015. 3", + "[32] Qianli Ma, Jinlong Yang, Anurag Ranjan, Sergi Pujades, Gerard Pons-Moll, Siyu Tang, and Michael J. Black. Learning to Dress 3D People in Generative Clothing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 3", + "[33] Qianli Ma, Jinlong Yang, Siyu Tang, and Michael J. Black. The power of points for modeling humans in clothing. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2, 3", + "[34] Gyeongsik Moon, Hyeongjin Nam, Takaaki Shiratori, and Kyoung Mu Lee. 3d clothed human reconstruction in the wild. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 1, 8", + "[35] Chaitanya Patel, Zhouyingcheng Liao, and Gerard Pons-Moll. Tailornet: Predicting clothing in 3d as a function of human pose, shape and garment style. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). IEEE, 2020. 2, 3", + "[36] Gerard Pons-Moll, Sergi Pujades, Sonny Hu, and Michael J. Black. Clothcap: Seamless 4d clothing capture and retargeting. ACM Transactions on Graphics (TOG), 36(4), 2017. 2, 3, 5", + "[37] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019. 7", + "[38] Shunsuke Saito, Tomas Simon, Jason Saragih, and Hanbyul Joo. Pifuhd: Multi-level pixel-aligned implicit function for" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "high-resolution 3d human digitization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 7", + "[39] Yidi Shao, Chen Change Loy, and Bo Dai. Towards multilayered 3d garments animation. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 2, 3", + "[40] Kaiyue Shen, Chen Guo, Manuel Kaufmann, Juan Zarate, Julien Valentin, Jie Song, and Otmar Hilliges. X-avatar: Expressive human avatars. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3, 6", + "[41] Zhaoqi Su, Tao Yu, Yangang Wang, and Yebin Liu. Deepcloth: Neural garment representation for shape and style editing. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 45(2):1581-1593, 2023. 3", + "[42] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Proceedings of the European Conference on Computer Vision (ECCV), pages 402-419. Springer, 2020. 3", + "[43] Garvita Tiwari, Bharat Lal Bhatnagar, Tony Tung, and Gerard Pons-Moll. Sizer: A dataset and model for parsing 3d clothing and learning size sensitive 3d clothing. In Proceedings of the European Conference on Computer Vision (ECCV). Springer, 2020. 3, 5", + "[44] Unreal Engine 5. https://www.unrealengine.com, 2022.3", + "[45] Wenguan Wang, Hailong Zhu, Jifeng Dai, Yanwei Pang, Jianbing Shen, and Ling Shao. Hierarchical human parsing with typed part-relation reasoning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3", + "[46] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 3681-3691, 2021. 3", + "[47] Yuliang Xiu, Jinlong Yang, Dimitrios Tzionas, and Michael J. Black. ICON: Implicit Clothed humans Obtained from Normals. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 7", + "[48] Yuliang Xiu, Jinlong Yang, Xu Cao, Dimitrios Tzionas, and Michael J. Black. ECON: Explicit Clothed humans Optimized via Normal integration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 7", + "[49] Lu Yang, Wenhe Jia, Shan Li, and Qing Song. Deep learning technique for human parsing: A survey and outlook. arXiv preprint arXiv:2301.00394, 2023. 3", + "[50] Tao Yu, Zerong Zheng, Kaiwen Guo, Pengpeng Liu, Qionghai Dai, and Yebin Liu. Function4d: Real-time human volumetric capture from very sparse consumer rgbd sensors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3", + "[51] Chao Zhang, Sergi Pujades, Michael J. Black, and Gerard Pons-Moll. Detailed, accurate, human shape estimation from clothed 3d scan sequences. In Proceedings of the IEEE" + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "559", + "bbox": [ + 486, + 945, + 511, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 2, 3, 6", + "[52] Zerong Zheng, Tao Yu, Yebin Liu, and Qionghai Dai. Pamir: Parametric model-conditioned implicit representation for image-based human reconstruction. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2021. 7", + "[53] Xingxing Zou, Xintong Han, and Waikeung Wong. Cloth4d: A dataset for clothed human reconstruction. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 12847-12857, 2023. 1, 2, 3, 5" + ], + "bbox": [ + 78, + 90, + 468, + 247 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "560", + "bbox": [ + 486, + 945, + 509, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/cd2548ca-a539-45a3-adaf-36a364d6da68_model.json b/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/cd2548ca-a539-45a3-adaf-36a364d6da68_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c2f2cf291c52874f6dec4bf2a18402126f8e6e3b --- /dev/null +++ b/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/cd2548ca-a539-45a3-adaf-36a364d6da68_model.json @@ -0,0 +1,1982 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.197, + 0.131, + 0.777, + 0.177 + ], + "angle": 0, + "content": "4D-DRESS: A 4D Dataset of Real-World Human Clothing With Semantic Annotations" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.202, + 0.87, + 0.249 + ], + "angle": 0, + "content": "Wenbo Wang\\(^{*1}\\) Hsuan-I Ho\\(^{*1}\\) Chen Guo\\(^{1}\\) Boxiang Rong\\(^{1}\\) Artur Grigorev\\(^{1,2}\\) \nJie Song\\(^{1}\\) Juan Jose Zarate\\(^{\\dagger 1}\\) Otmar Hilliges\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.267, + 0.256, + 0.704, + 0.292 + ], + "angle": 0, + "content": "Department of Computer Science, ETH Zürich Max Planck Institute for Intelligent Systems, Tübingen" + }, + { + "type": "text", + "bbox": [ + 0.357, + 0.294, + 0.608, + 0.308 + ], + "angle": 0, + "content": "https://ait.ethz.ch/4d-dress" + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.342, + 0.861, + 0.579 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.58, + 0.893, + 0.622 + ], + "angle": 0, + "content": "Figure 1. Overview of 4D-DRESS. We propose the first real-world 4D dataset of human clothing, capturing 64 human outfits in more than 520 motion sequences. These sequences include a) high-quality 4D textured scans; for each scan, we annotate b) vertex-level semantic labels, thereby obtaining c) the corresponding garment meshes and fitted SMPL(-X) body meshes." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.634, + 0.314, + 0.65 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.666, + 0.473, + 0.877 + ], + "angle": 0, + "content": "The studies of human clothing for digital avatars have predominantly relied on synthetic datasets. While easy to collect, synthetic data often fall short in realism and fail to capture authentic clothing dynamics. Addressing this gap, we introduce 4D-DRESS, the first real-world 4D dataset advancing human clothing research with its high-quality 4D textured scans and garment meshes. 4D-DRESS captures 64 outfitsits in 520 human motion sequences, amounting to 78k textured scans. Creating a real-world clothing dataset is challenging, particularly in annotating and segmenting the extensive and complex 4D human scans. To address this, we develop a semi-automatic 4D human parsing pipeline. We efficiently combine a human-in-the-loop process with automation to accurately label 4D scans in di" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.636, + 0.893, + 0.727 + ], + "angle": 0, + "content": "verse garments and body movements. Leveraging precise annotations and high-quality garment meshes, we establish several benchmarks for clothing simulation and reconstruction. 4D-DRESS offers realistic and challenging data that complements synthetic sources, paving the way for advancements in research of lifelike human clothing." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.739, + 0.633, + 0.755 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.894, + 0.903 + ], + "angle": 0, + "content": "Human clothing is crucial in various applications such as 3D games, animations, and virtual try-on. Researchers are actively investigating algorithms for clothing reconstruction [12, 24, 34] and simulation [4, 5, 15], to achieve realistic clothing behavior, enhance user engagement, and enable cross-industry applications. These algorithms are frequently developed and assessed using synthetic datasets [3, 7, 53], since they comprise a) meshes covering various garment types and outfits and b) parametric body mod" + }, + { + "type": "page_footnote", + "bbox": [ + 0.078, + 0.887, + 0.336, + 0.901 + ], + "angle": 0, + "content": "* Equal contributors † Corresponding author" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "550" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.164, + 0.089, + 0.805, + 0.297 + ], + "angle": 0, + "content": "
Dataset# of Outfits# of FramesData FormatTexturedSemantic LabelsLoose Garments
TailorNet [35]95.5kSMPL + Garments
ReSynth [33]2430kSMPLX + Point Clouds
CLOTH3D [3]8.5k2.1MSMPL + Garments
CLOTH4D [53]1k100kMesh + Garments
BEDLAM [7]111380kSMPL-X + Garments
D-LAYERS [39]5k700kSMPL + Garments
BUFF [51]614kScans + SMPL
CAPE [32]15140kSMPL+D
ActorsHQ [23]839kScans
X-Humans [40]2035kScans + SMPL-(X)
4DHumanOutfit [2]14459kScans + SMPL
4D-DRESS (Ours)6478kScans + SMPL(-X) + Garments
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.305, + 0.895, + 0.364 + ], + "angle": 0, + "content": "Table 1. Summary of 4D clothed human datasets. The datasets highlighted in gray color are synthetic datasets while the others are real-world scans. # of Outfits: number of outfits included; # of Frames: total number of 3D human frames; Data Format: 3D representations of human bodies and garments; Textured: with textured map or not; Semantic Labels: with semantic labels for clothing or not; Loose Garments: containing challenging loose clothing such as dresses or not. 4D-DRESS demonstrates outstanding features against others." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.373, + 0.473, + 0.587 + ], + "angle": 0, + "content": "els with diverse motions. While synthetic datasets lead in outfit quantity and the number of frames provided (refer to Tab. 1), there also presents a significant challenge in bridging the domain gap between the synthetic and real garments. Despite the recently released real-world 4D human datasets such as X-Humans [40], ActorsHQ [23], and 4DHumanOutfit [2], a key limitation persists: they lack accurately segmented garment meshes, offering only raw human scans. Moreover, these datasets are limited in the number of loose garments (e.g., jackets and dresses) or dynamic motions, which reduces their applicability as test benches. These challenges highlight the need for a real-world 4D dataset that provides semantic annotations and captures diverse garments across various body motions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.592, + 0.47, + 0.819 + ], + "angle": 0, + "content": "In this work, we contribute 4D-DRESS, the first real-world dataset of human clothing with 4D semantic segmentation. We aim to provide an evaluation testbench with real-world data for tasks related to human clothing in computer vision and graphics. We capture over 520 human motion sequences featuring 64 distinct real-world human outfits in a high-end multi-view volumetric capture system, similar to the one used in [11]. The complete dataset comprises a total of 78k frames, each composed of an 80k-face triangle mesh, a 1k resolution textured map, and a set of 1k resolution multi-view images. As illustrated in Fig. 1, we provide a) high-quality 4D textured scans, b) vertex-level semantic labels for various clothing types, such as upper, lower, and outer garments, and c) garment meshes along with their registered SMPL(-X) body models." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Capturing real-world 4D sequences of humans wearing various clothing and performing diverse motions requires dedicated high-end capture facilities. Moreover, processing these clips into accurately annotated and segmented 4D human scans presents significant challenges. To develop our" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.373, + 0.895, + 0.661 + ], + "angle": 0, + "content": "dataset, we tackled the task of labeling 78k high-resolution meshes at the vertex level. Given that the mesh topologies of consecutive frames do not inherently correspond, consistently propagating 3D vertex labels from one frame to the next is non-trivial. While previous methods [6, 36] attempted to fit a fixed-topology parametric body model to the scans, these template-based approaches still struggle with scenarios such as a jacket being lifted to reveal a shirt or the emergence of new vertices on a flowing coat as illustrated in the example shown in Fig. 3. Consequently, we opted for an alternative approach. We developed a semi-automatic and template-free 4D human parsing pipeline. Leveraging semantic maps from a 2D human parser [14] and a segmentation model [27], we extended these techniques to 4D, considering both multi-view and temporal consistency. Our pipeline accurately assigns vertex labels without manual intervention in \\(96.8\\%\\) of frames. Within the remaining scans, only \\(1.5\\%\\) of vertices require further rectification, addressed via a human-in-the-loop process." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.673, + 0.895, + 0.797 + ], + "angle": 0, + "content": "The quality of the ground-truth data in 4D-DRESS allows us to establish several evaluation benchmarks for diverse tasks, including clothing simulation, reconstruction, and human parsing. Our evaluation and analysis demonstrate that 4D-DRESS offers realistic and challenging human clothing that cannot be readily modeled by existing algorithms, thereby opening avenues for further research. In summary, our contributions include:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.808, + 0.892, + 0.852 + ], + "angle": 0, + "content": "- the first real-world 4D human clothing dataset comprising 4D textured scans, vertex-level semantic labels, garment meshes, and corresponding parametric body meshes." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.854, + 0.891, + 0.882 + ], + "angle": 0, + "content": "- a semi-automatic and template-free 4D human parsing pipeline for efficient data annotation." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.884, + 0.891, + 0.898 + ], + "angle": 0, + "content": "- evaluation benchmarks showing the utility of our dataset." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.808, + 0.892, + 0.898 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.51, + 0.957 + ], + "angle": 0, + "content": "551" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.09, + 0.22, + 0.106 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.11, + 0.47, + 0.306 + ], + "angle": 0, + "content": "4D clothed human dataset. Datasets featuring clothed humans can be divided into two categories. Firstly, synthetic datasets [3, 7, 33, 35, 39, 53] create large volume of synthetic data using graphic engines [44] and simulation tools [10] (Tab. 1 top). These datasets are easy to scale with ground truth semantic labels available by design. However, they often lack realism in human appearances, clothing deformations, and motion dynamics. Even though recent work [7, 46] attempted to achieve photorealistic human textures with manual efforts, it is challenging to precisely mimic the way real-world clothing moves and deforms. Therefore, it is essential to create datasets of real-world human clothing by capturing these intricate details." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.307, + 0.471, + 0.504 + ], + "angle": 0, + "content": "The second category (Tab. 1 bottom) involves using multi-view volumetric capture systems [11, 26] to collect datasets of people dressed in real-world clothing [2, 17, 20, 22, 23, 32, 40, 41, 43, 50, 51]. However, the resources required for capturing, storing, and processing this data are substantial, which limits the size of these publicly available datasets [2, 40, 51]. Moreover, these methods do not inherently provide labeled annotations, offering only temporally uncorrelated scans. This makes the raw data on these datasets less suitable for research focusing on human clothing. 4D-DRESS gathers a variety of human subjects and outfits providing accurate semantic labels of human clothing, garment meshes, and SMPL/SMPL-X fits." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.508, + 0.471, + 0.842 + ], + "angle": 0, + "content": "Human parsing. Human parsing [49] is a specific task within semantic segmentation aimed at identifying detailed body parts and clothing labels. Conventionally, this challenge is tackled using deep neural networks, trained on images with their corresponding semantic labels [9, 13, 29]. Although these methods have been successful in 2D [14, 18, 19, 28, 30, 45], applying them to annotate 3D and 4D scans is still a challenge. Previous work has explored it using two distinct strategies. One strategy, used by SIZER [43] and MGN [6], involves rendering multi-view images and projecting parsing labels onto 3D meshes through a voting process. While this method considers consistency across multiple views, it overlooks temporal consistency and falls short of accurately labeling 4D scans. Another approach, used by ClothCap [36], registers all scans to a fixed-topology SMPL model [31] with per-vertex displacements. Yet, this method struggles with handling large motions and complex clothing due to limited template resolutions and model-fitting capabilities. This results in noisy labels near boundaries and loose garments. In contrast, our approach combines multiview voting and optical warping in a template-free pipeline, achieving both multi-view and temporal consistency." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.851, + 0.214, + 0.868 + ], + "angle": 0, + "content": "3. Methodology" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.47, + 0.902 + ], + "angle": 0, + "content": "To accurately label each vertex within our 4D textured scan sequences, we leverage a semi-automatic parsing pipeline" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.333 + ], + "angle": 0, + "content": "that incorporates but minimizes manual efforts during the labeling process. Fig. 2 depicts the overall workflow of our pipeline. We first render 24 multi-view images of the current frame textured scan. We combine those images with the previous frame's multi-view images and labels to deploy three state-of-the-art tools to vote candidate labels for each rendered pixel (Sec. 3.1): a) human image parser, b) optical flow transfer, and c) segmentation masks. Next, we re-project and fuse all the 2D label votes via a Graph Cut optimization to obtain vertex-level semantic labels, considering neighboring and temporal consistency (Sec. 3.2). For those challenging frames where further labeling refinement is needed (around \\(3\\%\\) in our dataset), we refined their semantic labels with a manual rectification step that we feed back into the optimization (Sec. 3.3). We describe the details of the pipeline within this section." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.343, + 0.685, + 0.358 + ], + "angle": 0, + "content": "3.1. Multi-view Parsing" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.359, + 0.892, + 0.555 + ], + "angle": 0, + "content": "At each frame \\( k \\in \\{1, \\dots, N_{\\text{frame}}\\} \\), we render the 3D-mesh into a set of multi-view images, consisting of twelve horizontal, six upper, and six lower uniformly distributed views. We note this as \\( I_{img,n,k} \\) with \\( n \\in \\{1, \\dots, N_{\\text{view}} = 24\\} \\). Within the multi-view space, we tackle the problem of assigning a label vote \\( l \\) to each pixel \\( p \\) using multi-view image-based models. The label \\( l \\) varies for human skin, hair, shoes, upper clothing (shirts, hoodies), lower clothing (shorts, pants), and outer clothing (jackets, coats). For clarity, we omit the frame index \\( (k) \\) in the following unless they are strictly needed. Please refer to Fig. 2 and the Supp. Mat. for more label definitions and the versatility of our parsing method with new labels like belts and socks." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.56, + 0.892, + 0.711 + ], + "angle": 0, + "content": "Human image parser (PAR). Our primary source of labels is a deep-learning image parser, which provides pixel-level votes for body parts and clothes. Specifically, we apply Graphonomy [14] to each view \\( n \\) and store the labels as a new set of images \\( \\{I_{par}\\} \\) (see Fig. 2). These labels are then accessible by the vote function \\( f_{par,n}(p,l) \\) that checks if the image \\( I_{par,n} \\) matches the value \\( l \\) at the pixel \\( p \\), in which case returns 1, or 0 otherwise. This vote function and the other two defined below will be crucial later when setting our full-mesh optimization (Sec. 3.2)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.715, + 0.892, + 0.852 + ], + "angle": 0, + "content": "Optical flow transfer (OPT). This block leverages the previous frame's multi-view labels to provide temporal consistency. Specifically, we use the optical flow predictor RAFT [42] to transfer multi-view labels in the \\(k - 1\\) frame to the current \\(k\\) frame using the texture features on the rendered multi-view images. Similarly to the image parser above, the optical flow output goes to a set \\(\\{I_{opt}\\}\\). These labels are accessible via the vote function \\(f_{opt,n}(p,l)\\), which checks \\(I_{opt,n}\\) and returns 1 if label \\(l\\) is in \\(p\\) and 0 otherwise." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Segmentation masks (SAM). The multi-view votes generated by the Human Image Parser sometimes lack 3D consistency, particularly when dealing with open garments un" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "552" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.126, + 0.087, + 0.844, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.306, + 0.895, + 0.364 + ], + "angle": 0, + "content": "Figure 2. 4D Human parsing method. We first render current and previous frame scans into multi-view images and labels. Then collect multi-view parsing results from the image parser, optical flows, and segmentation masks (Sec. 3.1). Finally, we project multi-view labels to 3D vertices and optimize vertex labels using the Graph Cut algorithm with vertex-wise unary energy and edge-wise binary energy (Sec. 3.2). The manual rectification labels can be easily introduced by checking multi-view rendered labels. (Sec. 3.3)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.377, + 0.474, + 0.542 + ], + "angle": 0, + "content": "der dynamic motions (cf. Fig. 3). While the votes derived from the optical flows provide a cross-frame prior, they may not accurately track every human part and can't identify newly emerging regions. Therefore, we introduce segmentation masks to regularize the label consistency within each masked region. We apply the Segment Anything Model [27] to each rendered image and obtain a self-define group of masks \\( M_{m,n} \\), with the index \\( m \\in \\{1, \\dots, N_{mask,n}\\} \\). Within a mask \\( M_{m,n} \\) we compute the score function \\( S(l, M_{m,n}) \\) that fuses the votes of the image parser and the optical flow, normalized by the area of the mask:" + }, + { + "type": "equation", + "bbox": [ + 0.091, + 0.562, + 0.47, + 0.632 + ], + "angle": 0, + "content": "\\[\n\\mathcal {S} (l, M _ {m, n}) = \\frac {\\sum_ {p \\in M _ {m , n}} \\left[ f _ {p a r , n} (p , l) + \\lambda_ {p o} f _ {o p t , n} (p , l) \\right]}{\\sum_ {p \\in M _ {m , n}} \\left(1 + \\lambda_ {p o}\\right)}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.632, + 0.47, + 0.709 + ], + "angle": 0, + "content": "where the factor \\(\\lambda_{po}\\) weights the contribution of OPT over PAR. We now define a check function, \\(\\mathcal{C}(p,M_{m,n})\\), that returns 1 if the input evaluation pixel \\(p\\) is in the mask \\(M_{m,n}\\) and 0 otherwise. Finally, we obtain the corresponding vote function by summing over all the masks in the image:" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.717, + 0.47, + 0.751 + ], + "angle": 0, + "content": "\\[\nf _ {s a m, n} (p, l) = \\sum_ {m \\in 1: N _ {m a s k, n}} \\mathcal {C} (p, M _ {m, n}) * \\mathcal {S} (l, M _ {m, n}). \\tag {2}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.763, + 0.45, + 0.78 + ], + "angle": 0, + "content": "3.2. Graph Cut Optimization for Vertex Parsing" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.471, + 0.902 + ], + "angle": 0, + "content": "The next step in our semi-automatic process is combining all the labels obtained in Sec. 3.1 to assign a unique label to each scan vertex \\( v_{i} \\), with \\( i \\in \\{1, \\dots, N_{\\text{vert}}\\} \\). We frame this 3D semantic segmentation problem as a graph cut optimization: each 3D frame is interpreted as a graph \\( G \\), where vertices are now nodes and mesh edges are connections. Note that in a traditional Graph Cut, the values of the nodes are fixed, and the optimization computes only the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.377, + 0.893, + 0.437 + ], + "angle": 0, + "content": "cost of breaking a connection. In our case, we have several votes for a vertex label, coming from three different tools and from concurrent multi-view projections. We define our cost function that consists of two terms," + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.443, + 0.892, + 0.478 + ], + "angle": 0, + "content": "\\[\nE (L) = \\sum_ {i \\in 1: N _ {v e r t}} E _ {v e r t} \\left(l _ {i}\\right) + \\sum_ {i, j \\in 1: N _ {v e r t}} E _ {e d g e} \\left(l _ {i}, l _ {j}\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.485, + 0.892, + 0.56 + ], + "angle": 0, + "content": "where \\( L = \\{l_i\\} \\) represents all the vertex labels in current frame. As described below, the term \\( E_{vert} \\) combines the different votes into a single cost function, while \\( E_{edge} \\) evaluates neighboring labels for consistent 3D segmentation. We follow an approach similar to [8]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.56, + 0.892, + 0.606 + ], + "angle": 0, + "content": "Vertex-wise unary energy. The cost function per node or Unary energy comes from combining the different votes obtained in the multi-view image processing (see Sec. 3.1):" + }, + { + "type": "equation", + "bbox": [ + 0.503, + 0.612, + 0.892, + 0.663 + ], + "angle": 0, + "content": "\\[\nE _ {v e r t} \\left(l _ {i}\\right) = \\sum_ {n \\in 1: N _ {v i e w}} \\frac {\\lambda_ {p} E _ {p a r , n} + \\lambda_ {o} E _ {o p t , n} + \\lambda_ {s} E _ {s a m , n}}{N _ {v i e w}}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.664, + 0.892, + 0.74 + ], + "angle": 0, + "content": "where we combine the human image parser \\((E_{par})\\), the cross-frame optical prior \\((E_{opt})\\), and the segmentation masks regularization \\((E_{sam})\\) contributions. All these energy terms can be written with the same equation by using the notation \\(\\mathcal{X} = \\{par, opt, sam\\}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.542, + 0.748, + 0.892, + 0.782 + ], + "angle": 0, + "content": "\\[\nE _ {\\mathcal {X}, n} \\left(l _ {i}\\right) = \\sum_ {p \\in P \\left(v _ {i}, n\\right)} - w _ {\\mathcal {X}} \\left(p, v _ {i}\\right) f _ {\\mathcal {X}, n} \\left(p, l _ {i}\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.79, + 0.892, + 0.911 + ], + "angle": 0, + "content": "meaning that energy of the method \\(\\mathcal{X}\\), calculated for a proposed label \\(l_{i}\\), is obtained by summing over those pixels \\(p\\in P(v_i,n)\\) whose projections are within a triangle of \\(v_{i}\\). The weights for the cases of \\(E_{par}\\) and \\(E_{opt}\\) are set to the barycentric distance from the projected pixel \\(p\\) to the vertex \\(v_{i}\\), which means \\(w_{par} = w_{opt} = u\\) as in Fig. 2. For \\(E_{sam}\\) instead, we set the weight \\(w_{sam}\\) to the constant value 1 given that we look for an across-vertex regularization." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "553" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.087, + 0.852, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.302, + 0.894, + 0.359 + ], + "angle": 0, + "content": "Figure 3. Qualitative ablation study. We visualize the effectiveness of our 4D human parsing method on our 4D-DRESS dataset. From left to right, we show the improvements after adding the optical flow labels and mask scores to the multi-view image parser labels. The manual rectification efforts can be easily introduced from multi-view rendered labels, with which we achieve high-quality vertex annotations. The problem of isolated labels can be relieved by introducing the edge-wise binary energy term." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.372, + 0.47, + 0.448 + ], + "angle": 0, + "content": "Edge-wise binary energy. The Binary energy term penalizes the case of adjacent vertices with different labels, encouraging neighboring vertices to take the same label. Being \\( A \\) the adjacency matrix of the graph \\( G \\) and \\( \\delta \\) the Dirac delta function, the edge cost can be calculated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.146, + 0.454, + 0.469, + 0.471 + ], + "angle": 0, + "content": "\\[\nE _ {e d g e} \\left(l _ {i}, l _ {j}\\right) = \\lambda_ {b} A _ {i, j} \\left(1 - \\delta \\left(l _ {i}, l _ {j}\\right)\\right), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.476, + 0.47, + 0.507 + ], + "angle": 0, + "content": "which increases the energy by \\(\\lambda_{b}\\) in the case that the adjacent vertices \\(v_{i}, v_{j}\\) take different labels \\(l_{i} \\neq l_{j}\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.513, + 0.377, + 0.527 + ], + "angle": 0, + "content": "3.3. Manual Rectification of 3D Labels" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.53, + 0.47, + 0.697 + ], + "angle": 0, + "content": "When manual rectification is needed, we introduce it back into the multi-view space as an additional 2D annotation, and we recalculate the steps in Sec. 3.2. Concretely, we ran the graph cut optimization for the first time. Then, we rendered the vertex labels into multi-view labels, from which we let a person introduce corrections by comparing the resulting labels with the textured multi-view images. Similarly to the vote functions of the image parser and optical flow, we create a vote function \\( f_{man}(p,l) \\) that accesses this set of images with rectified annotations and returns 1 if the label \\( l \\) is assigned to the pixel \\( p \\) and 0 otherwise." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.697, + 0.471, + 0.848 + ], + "angle": 0, + "content": "Similar to previous cases, we define a per-view manual energy \\((E_{man})\\) by using the variable \\(\\mathcal{X} = man\\) in Eq. (5), and we added it to the global per-node energy \\(E_{vert}\\) in Eq. (4). We use a constant large weight for \\(w_{man}\\) to favor the manual annotation over other sources of voting where we rectified the labels. The final vertex labels \\(L^{*} = \\{l^{*}_{i}\\}\\) are obtained after the second round of graph cut optimization. This manual rectification process finally changed \\(1.5\\%\\) of vertices within \\(3.2\\%\\) of all frames. The rectification process is detailed in Supp. Mat." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.852, + 0.21, + 0.869 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "To validate the effectiveness of our method, we conducted controlled experiments on two synthetic datasets," + }, + { + "type": "table", + "bbox": [ + 0.521, + 0.37, + 0.869, + 0.48 + ], + "angle": 0, + "content": "
CLOTH4D [53]BEDLAM [7]
MethodInnerInnerOuter
SMPL+D [36]0.8720.8460.765
PAR Only [43]0.9610.9100.714
PAR+OPT0.9690.9630.942
PAR+OPT+SAM0.9950.9930.988
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.483, + 0.892, + 0.525 + ], + "angle": 0, + "content": "Table 2. Baseline and ablation study. Mean accuracy of 4D human parsing methods applied on synthetic datasets. The Inner and Outer outfits are selected according to our definition in Sec. 5" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.537, + 0.892, + 0.658 + ], + "angle": 0, + "content": "CLOTH4D [53] and BEDLAM [7], where ground-truth semantic labels are available. We first compare our parsing method with a template-based baseline [36], that uses a semantic template (SMPL model with per-vertex displacements) to track and parse the clothed human scans. Due to the limited resolution and the fixed topology nature of the SMPL+D model, its parsing accuracy is lower than \\(90\\%\\) on all synthetic outfits (see Tab. 2)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.893, + 0.901 + ], + "angle": 0, + "content": "We then compare our 4D parsing pipeline with several ablations and report them in Tab. 2. We use an example scan from 4D-DRESS to support the visualization of the ablation study in Fig. 3. Using PAR only shows reasonable results for upper and lower clothes. Yet, it predicts inconsistent labels at open garments like jackets and coats (Fig. 3 PAR Only), resulting in only \\(71.4\\%\\) parsing accuracy on the BEDLAM dataset. The optical flow labels from the previous frame can serve as a cross-frame prior, yet accuracy may vary, particularly in fast-moving arms and cloth boundaries (Fig. 3 PAR+OPT). By fusing both of the previous multi-view labels via the segmentation masks, we achieve better boundary labels (Fig. 3 PAR+OPT+SAM), with \\(98.8\\%\\) accuracy on the outer outfits in BEDLAM, with challenging open garments. Finally, we show the effect of introducing manual efforts to rectify incorrect labels (Fig. 3" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.945, + 0.511, + 0.957 + ], + "angle": 0, + "content": "554" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.101, + 0.089, + 0.87, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.304, + 0.893, + 0.348 + ], + "angle": 0, + "content": "Figure 4. Qualitative examples for clothing simulation methods. On the left are templates used for simulations. On the right are ground-truth geometries and original scans, LBS baseline results in body penetrations and overly stretched areas. Compared to other methods, HOOD better models dress and jackets and, with tuned material parameters, HOOD* achieves simulations closest to the ground truth." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.353, + 0.471, + 0.415 + ], + "angle": 0, + "content": "With Manual). Our parsing method can also be deployed to annotate other existing 4D human datasets. We present examples of BUFF[51], X-Humans [40], and ActorsHQ[23] and additional qualitative results in Supp. Mat." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.425, + 0.267, + 0.442 + ], + "angle": 0, + "content": "5. Dataset Description" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.445, + 0.47, + 0.567 + ], + "angle": 0, + "content": "4D-DRESS contains 520 motion sequences (150 frames at 30 fps) in 64 real-world human outfits with a total of 78k frames. Each frame consists of multi-view images at 1k resolution, an 80k-face triangle 3D mesh with vertex annotations, and a 1k-resolution texture map. We also provide each garment with its canonical template to benefit the clothing simulation study. Finally, each 3D scan is accurately registered by SMPL/SMPL-X body models." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.567, + 0.471, + 0.84 + ], + "angle": 0, + "content": "To record 4D-DRESS we recruited 32 participants (18 female), with an average age of 24. The dataset consists of 4 dresses, 30 upper, 28 lower, and 32 outer garments. Participants were instructed to perform different dynamic motions for each 5-second sequence. For each participant, we capture two types of outfits: Inner Outfit comprising the inner layer dress/upper, and lower garments; and Outer Outfit with an additional layer of garment, such as open jackets or coats. A unique feature of 4D-DRESS is the challenging clothing deformations we captured. To quantify these deformations, we compute the mean distances from the garments to the registered SMPL body surfaces. The inner and outer outfits exhibit distance ranges up to \\(7.12\\mathrm{cm}\\) and \\(14.76\\mathrm{cm}\\) over all frames. This is twice as much as what we observed in the X-Humans dataset [40], for example. In the \\(10\\%\\) most challenging frames, this increases to \\(20.09\\mathrm{cm}\\) for outer outfits, highlighting the prevalence of challenging garments. Please refer to Supp. Mat. for dataset details." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.851, + 0.295, + 0.866 + ], + "angle": 0, + "content": "6. Benchmark Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.47, + 0.901 + ], + "angle": 0, + "content": "With high-quality 4D scans and diverse garment meshes in dynamic motions, 4D-DRESS serves as an ideal ground" + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.351, + 0.892, + 0.464 + ], + "angle": 0, + "content": "
LowerUpperDressOuter
MethodCD ↓ EStr ↓CD ↓ EStr ↓CD ↓ EStr ↓CD ↓ EStr ↓
LBS1.7670.3332.1670.0954.4611.2934.6260.811
PBNS [4]1.8850.1072.6870.0404.8690.6434.8590.107
NCS [5]1.7160.0172.1120.0164.5480.0314.7380.025
HOOD [15]2.0700.0082.6680.0134.2920.0105.3550.011
HOOD*0.9240.0101.3080.0152.4630.0092.8330.009
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.467, + 0.892, + 0.509 + ], + "angle": 0, + "content": "Table 3. Clothing simulation benchmark. CD is Chamfer Distance between the simulation and ground truth. \\( E_{str} \\) denotes stretching energy with respect to the template." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.515, + 0.892, + 0.621 + ], + "angle": 0, + "content": "truth for a variety of computer vision and graphics benchmarks. In our work, we outline several standard benchmarks conducted in these fields using our dataset. Our primary focus is on tasks related to clothing simulation (Sec. 6.1) and clothed human reconstruction (Sec. 6.2). Additionally, benchmarks on human parsing and human representation learning are included in our Supp. Mat." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.627, + 0.692, + 0.642 + ], + "angle": 0, + "content": "6.1. Clothing Simulation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Experimental setup. We introduce a new benchmark for clothing simulation, leveraging the garment meshes from 4D-DRESS, which capture dynamical real-world clothing deformations. This benchmark evaluates three methods for modeling garment dynamics: PBNS [4], Neural Cloth Simulator (NCS [5]), and HOOD [15], as well as a baseline method that applies SMPL-based linear blend-skinning (LBS) to the template. We ran the simulations using T-posed templates extracted from static scans and compared the results to the ground-truth garment meshes across various pose sequences. Our evaluation metrics include the Chamfer Distance (CD), which compares the resulting mesh sequences with ground-truth point clouds, and the average stretching energy \\((E_{str})\\) calculated by measuring the difference in edge lengths between the simulated and template meshes. The experiments were conducted across four categories of garments (Lower, Upper, Dress, and Outer)," + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "555" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.09, + 0.852, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.281, + 0.894, + 0.325 + ], + "angle": 0, + "content": "Figure 5. Examples of clothed human reconstruction on 4D-DRESS. We evaluate state-of-the-art methods using both inner (Top) and outer (Bottom) outfits. We show that existing methods generally struggle with the challenging loose garments. Moreover, these approaches cannot faithfully recover realistic details such as clothing wrinkles." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.331, + 0.471, + 0.391 + ], + "angle": 0, + "content": "with four garment templates in each category. We simulated clothing deformation for each garment in six different pose sequences, providing a comprehensive comparison of their ability to generate realistic motions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.393, + 0.47, + 0.529 + ], + "angle": 0, + "content": "Fine-tuning material parameters. To demonstrate the advantages of real-world garment meshes in 4D-DRESS, we also introduce a simple optimization-based strategy for inverse simulation using HOOD. Specifically, we optimize the material parameters fed into the HOOD model to minimize the simulations' Chamfer Distance to the ground-truth sequences and their stretching energy. This optimized version is denoted as HOOD*. For more details on the material optimization experiments, please refer to Supp. Mat." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.532, + 0.471, + 0.835 + ], + "angle": 0, + "content": "Evaluation results. The quantitative and qualitative comparisons of the clothing simulation methods are presented in Tab. 3 and Fig. 4 respectively. The LBS baseline and LBS-based approaches (PBNS and NCS) perform better with upper and lower garments, which exhibit limited free-flowing motions compared with the dress and outer garments. Conversely, HOOD excels with dresses, generating more natural, free-flowing motions and achieving lower stretching energy. However, if HOOD fails to generate realistic motions for a single frame, this error propagates to all subsequent frames. This issue does not occur in the LBS-based methods, which generate geometries independently for each frame. With finely-tuned material parameters, HOD* produces garment sequences that more faithfully replicate real-world behavior. We anticipate that future research in learned garment simulation will increasingly focus on modeling real-world garments made from complex heterogeneous materials. This will be a major step in creating realistically animated digital avatars, and we believe 4D-DRESS will be highly instrumental in this task." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.838, + 0.358, + 0.853 + ], + "angle": 0, + "content": "6.2. Clothed Human Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Experimental setup. We create a new benchmark for evaluating state-of-the-art clothed human reconstruction methods on the 4D-DRESS dataset. This benchmark is di" + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.328, + 0.89, + 0.47 + ], + "angle": 0, + "content": "
InnerOuter
MethodCD↓NC↑IoU↑CD↓NC↑IoU↑
PIFu [37]2.6960.7920.6902.7830.7590.697
PIFuHD [38]2.4260.7930.7392.3930.7630.743
PaMIR [52]2.5200.8050.7062.6080.7770.715
ICON [47]2.4730.7980.7522.8320.7620.756
PHORHUM [1]3.9440.7250.5803.7620.7050.603
ECON [48]2.5430.7960.7362.8520.7600.728
SiTH [21]2.1100.8240.7552.3220.7940.749
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.472, + 0.893, + 0.527 + ], + "angle": 0, + "content": "Table 4. Clothed human reconstruction benchmark. We computed Chamfer distance (CD), normal consistency (NC), and Intersection over Union (IoU) between ground truth and reconstructed meshes obtained from different baselines." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.533, + 0.892, + 0.716 + ], + "angle": 0, + "content": "vided into three subtasks. First, we evaluate single-view human reconstruction utilizing images and high-quality 3D scans from our dataset. In addition, benefiting from the garment meshes in our dataset, we establish the first real-world benchmark for evaluating single-view clothing reconstruction. Finally, we assess video-based human reconstruction approaches leveraging the sequences in 4D-DRESS that capture rich motion dynamics of both human bodies and garments. In all the experiments, we report 3D metrics including Chamfer Distance (CD), Normal Consistency (NC), and Intersection over Union (IoU) to compare the predictions with ground-truth meshes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.893, + 0.902 + ], + "angle": 0, + "content": "Single-view human reconstruction. We use the two test sets defined in Sec. 5 (denote as Outer and Inner) to evaluate the following single-view reconstruction methods: PIFu [37], PIFuHD [38], PaMIR [52], ICON [47], PHORHUM [1], ECON [48], and SiTH [21]. The evaluation results are summarized in Fig. 5 and Tab. 4. We observed that methods leveraging SMPL body models as guidance (i.e., ICON, ECON, SiTH) performed better in reconstructing inner clothing. However, their performance significantly declined when dealing with outer garments. On the other hand, end-to-end models like PIFu and PIFuHD demonstrated more stability with both clothing types. This" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "556" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.109, + 0.089, + 0.437, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.278, + 0.468, + 0.334 + ], + "angle": 0, + "content": "Figure 6. Examples of clothing reconstruction on 4D-DRESS. We visualize the reconstructed garment meshes from different approaches. These methods trained on synthetic datasets failed to predict accurate clothing sizes and detailed wrinkles." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.337, + 0.468, + 0.426 + ], + "angle": 0, + "content": "
ShoesLowerUpperOuter
MethodCD ↓ IoU↑CD ↓ IoU↑CD ↓ IoU↑CD ↓ IoU↑
BCNet [24]-2.533 0.6752.079 0.7003.600 0.639
SMPLicit [12]2.619 0.6212.101 0.6982.452 0.6173.359 0.618
ClothWild [34]3.657 0.5482.690 0.5823.279 0.5334.163 0.588
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.427, + 0.468, + 0.469 + ], + "angle": 0, + "content": "Table 5. Clothing reconstruction benchmark. We report Chamfer Distance (CD), and Intersection over Union (IoU) between the ground-truth garment meshes and the reconstructed clothing." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.475, + 0.468, + 0.566 + ], + "angle": 0, + "content": "leads to an intriguing research question: whether the human body prior is necessary for reconstructing clothing. Qualitatively, we see that even the best-performing methods cannot perfectly reconstruct realistic free-flowing jackets as shown in Tab. 4. We believe 4D-DRESS will offer more valuable insights for research in clothed human reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.569, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Single-view clothes reconstruction. Clothes reconstruction has received relatively little attention compared to full-body human reconstruction. Leveraging the garment meshes in 4D-DRESS, we introduce the first real-world benchmark to assess prior art, including BCNet [24], SMPLicit [12], and ClothWild [34]. The results of different clothing types, as shown in Fig. 6, indicate a significant gap between the reconstructed and real clothing. Firstly, the clothing sizes produced by these methods are often inaccurate, suggesting a lack of effective use of image information for guidance. Moreover, the results typically lack geometric details like clothing wrinkles compared to full-body reconstruction. We report quantitative results in Tab. 5. We observed that the data-driven method (BCNet) performs better with inner clothing, while the generative fitting method (SMPLicit) shows more robustness to outer clothing, such as coats. However, none of these methods is designed for or trained on real-world data. The domain gap between synthetic and real data still limits their capability to produce accurate shapes and fine-grained details. We expect our benchmark and dataset will draw more research attention to the topic of real-world clothing reconstruction." + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.077, + 0.871, + 0.169 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.172, + 0.892, + 0.228 + ], + "angle": 0, + "content": "Figure 7. Video-based human reconstruction. Qualitative results of video-based human reconstruction methods on 4D-DRESS. Prior works struggle to reconstruct 3D human with challenging outfits and cannot recover the fine-grained surface details." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.23, + 0.892, + 0.304 + ], + "angle": 0, + "content": "
InnerOuter
MethodCD↓NC↑IoU↑CD↓NC↑IoU↑
SelfRecon [25]3.1800.7290.7544.0270.6830.745
Vid2Avatar [16]2.8700.7500.7723.0140.7250.787
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.306, + 0.891, + 0.333 + ], + "angle": 0, + "content": "Table 6. Video-based human reconstruction. Results of video-based human reconstruction methods on 4D-DRESS." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.338, + 0.892, + 0.594 + ], + "angle": 0, + "content": "Video-based human reconstruction Leveraging the sequential 4D data in our dataset, we create a new benchmark for evaluating video-based human reconstruction methods. We applied Vid2Avatar [16] and SelfRecon [25] to obtain 4D reconstructions and compared them with the provided ground-truth 4D scans. As observed in Fig. 7, both methods struggle with diverse clothing styles and face challenges in reconstructing surface parts that greatly differ in topology from the human body, such as the open jacket. Moreover, there remains a noticeable discrepancy between the real geometry and the recovered surface details. Quantitatively, the existing methods cannot achieve satisfactory reconstruction results with outer garments, as demonstrated by a large performance degradation in Tab. 6. We believe 4D-DRESS provides essential data for advancing video-based human reconstruction methods, particularly in achieving detailed geometry recovery for challenging clothing." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.595, + 0.613, + 0.609 + ], + "angle": 0, + "content": "7. Discussion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.614, + 0.892, + 0.854 + ], + "angle": 0, + "content": "Limitations. Our current pipeline requires substantial computational time. The offline manual rectification process and garment mesh extraction also demand expertise in 3D editing and additional human efforts. These factors constrain the scalability of our dataset. With a goal of expanding more diverse subjects and clothing, real-time 4D annotation and rectification/editing will be exciting future work. Conclusion. 4D-DRESS is the first real-world 4D clothed human dataset with semantic annotations, aiming to bridge the gap between existing clothing algorithms and real-world human clothing. We demonstrate that 4D-DRESS is not only a novel data source but also a challenging benchmark for clothing simulation, reconstruction, and other related tasks. We believe that 4D-DRESS can support a wide range of endeavors and foster research progress by providing high-quality 4D data in life like human clothing." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Acknowledgements. This work was partially supported by the Swiss SERI Consolidation Grant \"AI-PERCEIVE\". AG was supported in part by the Max Planck ETH CLS." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.945, + 0.511, + 0.956 + ], + "angle": 0, + "content": "557" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Thiemo Alldieck, Mihai Zanfir, and Cristian Sminchisescu. Photorealistic monocular 3d reconstruction of humans wearing clothing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.284 + ], + "angle": 0, + "content": "[2] Matthieu Armando, Laurence Boissieux, Edmond Boyer, Jean-Sebastien Franco, Martin Humenberger, Christophe Legras, Vincent Leroy, Mathieu Marsot, Julien Pansiot, Sergi Pujades, Rim Rekik, Gregory Rogez, Anilkumar Swamy, and Stefanie Wuhrer. 4dhumanoutfit: a multi-subject 4d dataset of human motion sequences in varying outfits exhibiting large displacements. Computer Vision and Image Understanding, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.286, + 0.472, + 0.342 + ], + "angle": 0, + "content": "[3] Hugo Bertiche, Meysam Madadi, and Sergio Escalera. Cloth3d: clothed 3d humans. In Proceedings of the European Conference on Computer Vision (ECCV), pages 344-359. Springer, 2020. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.343, + 0.472, + 0.399 + ], + "angle": 0, + "content": "[4] Hugo Bertiche, Meysam Madadi, and Sergio Escalera. Pbs: Physically based neural simulation for unsupervised garment pose space deformation. ACM Transactions on Graphics (TOG), 40(6), 2021. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.401, + 0.472, + 0.441 + ], + "angle": 0, + "content": "[5] Hugo Bertiche, Meysam Madadi, and Sergio Escalera. Neural cloth simulation. ACM Transactions on Graphics (TOG), 41(6):1-14, 2022. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.444, + 0.472, + 0.513 + ], + "angle": 0, + "content": "[6] Bharat Lal Bhatnagar, Garvita Tiwari, Christian Theobalt, and Gerard Pons-Moll. Multi-garment net: Learning to dress 3d people from images. In Proceedings of the IEEE International Conference on Computer Vision (ICCV). IEEE, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.515, + 0.472, + 0.584 + ], + "angle": 0, + "content": "[7] Michael J. Black, Priyanka Patel, Joachim Tesch, and Jinlong Yang. BEDLAM: A synthetic dataset of bodies exhibiting detailed lifelike animated motion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 8726-8737, 2023. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.586, + 0.472, + 0.642 + ], + "angle": 0, + "content": "[8] Y. Boykov, O. Veksler, and R. Zabih. Fast approximate energy minimization via graph cuts. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 23(11): 1222-1239, 2001. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.644, + 0.472, + 0.713 + ], + "angle": 0, + "content": "[9] Xianjie Chen, Roozbeh Mottaghi, Xiaobai Liu, Sanja Fidler, Raquel Urtasun, and Alan Yuille. Detect what you can: Detecting and representing objects using holistic models and body parts. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2014. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.715, + 0.393, + 0.729 + ], + "angle": 0, + "content": "[10] CLO. https://www.clo3d.com, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.731, + 0.47, + 0.799 + ], + "angle": 0, + "content": "[11] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (TOG), 34(4):1-13, 2015. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.802, + 0.47, + 0.871 + ], + "angle": 0, + "content": "[12] Enric Corona, Albert Pumarola, Guillem Alenyà, Gerard Pons-Moll, and Francesc Moreno-Noguer. Semplicit: Topology-aware generative model for clothed people. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.47, + 0.902 + ], + "angle": 0, + "content": "[13] Ke Gong, Xiaodan Liang, Yicheng Li, Yimin Chen, Ming Yang, and Liang Lin. Instance-level human parsing via part" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.121 + ], + "angle": 0, + "content": "grouping network. In Proceedings of the European Conference on Computer Vision (ECCV), pages 770-785, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.123, + 0.894, + 0.19 + ], + "angle": 0, + "content": "[14] Ke Gong, Yiming Gao, Xiaodan Liang, Xiaohui Shen, Meng Wang, and Liang Lin. Graphonomy: Universal human parsing via graph transfer learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.193, + 0.894, + 0.262 + ], + "angle": 0, + "content": "[15] Artur Grigorev, Bernhard Thomaszewski, Michael J. Black, and Otmar Hilliges. Hood: Hierarchical graphs for generalized modelling of clothing dynamics. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 16965-16974, 2023. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.264, + 0.894, + 0.332 + ], + "angle": 0, + "content": "[16] Chen Guo, Tianjian Jiang, Xu Chen, Jie Song, and Otmar Hilliges. Vid2 avatar: 3d avatar reconstruction from videos in the wild via self-supervised scene decomposition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.334, + 0.894, + 0.403 + ], + "angle": 0, + "content": "[17] Sang-Hun Han, Min-Gyu Park, Ju Hong Yoon, Ju-Mi Kang, Young-Jae Park, and Hae-Gon Jeon. High-fidelity 3d human digitization from single 2k resolution images. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.405, + 0.892, + 0.461 + ], + "angle": 0, + "content": "[18] Haoyu He, Jing Zhang, Qiming Zhang, and Dacheng Tao. Grapy-ml: Graph pyramid mutual learning for cross-dataset human parsing. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.462, + 0.892, + 0.517 + ], + "angle": 0, + "content": "[19] Haoyu He, Jing Zhang, Bhavani Thuraisingham, and Dacheng Tao. Progressive one-shot human parsing. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.519, + 0.892, + 0.603 + ], + "angle": 0, + "content": "[20] Zhu Heming, Cao Yu, Jin Hang, Chen Weikai, Du Dong, Wang Zhangye, Cui Shuguang, and Han Xiaoguang. Deep fashion3d: A dataset and benchmark for 3d garment reconstruction from single images. In Proceedings of the European Conference on Computer Vision (ECCV), pages 512-530. Springer International Publishing, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.604, + 0.892, + 0.659 + ], + "angle": 0, + "content": "[21] Hsuan-I Ho, Jie Song, and Otmar Hilliges. Sith: Single-view textured human reconstruction with image-conditioned diffusion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.661, + 0.892, + 0.716 + ], + "angle": 0, + "content": "[22] Jie Song Hsuan-I Ho, Lixin Xue and Otmar Hilliges. Learning locally editable virtual humans. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.718, + 0.892, + 0.786 + ], + "angle": 0, + "content": "[23] Mustafa Işik, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.788, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[24] Boyi Jiang, Juyong Zhang, Yang Hong, Jinhao Luo, Ligang Liu, and Hujun Bao. BCnet: Learning body and cloth shape from a single image. In Proceedings of the European Conference on Computer Vision (ECCV). Springer, 2020. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[25] Boyi Jiang, Yang Hong, Hujun Bao, and Juyong Zhang. Selfrecon: Self reconstruction your digital avatar from monocular video. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 8" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "558" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[26] Hanbyul Joo, Tomas Simon, Xulong Li, Hao Liu, Lei Tan, Lin Gui, Sean Banerjee, Timothy Scott Godisart, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social interaction capture. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.178, + 0.471, + 0.261 + ], + "angle": 0, + "content": "[27] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dollar, and Ross Girshick. Segment anything. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 4015-4026, 2023. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.263, + 0.471, + 0.304 + ], + "angle": 0, + "content": "[28] Peike Li, Yunqiu Xu, Yunchao Wei, and Yi Yang. Self-correction for human parsing. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.471, + 0.36 + ], + "angle": 0, + "content": "[29] X. Liang, K. Gong, X. Shen, and L. Lin. Look into person: Joint body parsing & pose estimation network and a new benchmark. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 41(04):871-885, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.362, + 0.47, + 0.431 + ], + "angle": 0, + "content": "[30] Kunliang Liu, Ouk Choi, Jianming Wang, and Wonjun Hwang. Cdgnet: Class distribution guided network for human parsing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4473-4482, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.433, + 0.47, + 0.487 + ], + "angle": 0, + "content": "[31] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multiperson linear model. ACM Transactions on Graphics (TOG), 34(6):248:1-248:16, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.49, + 0.47, + 0.559 + ], + "angle": 0, + "content": "[32] Qianli Ma, Jinlong Yang, Anurag Ranjan, Sergi Pujades, Gerard Pons-Moll, Siyu Tang, and Michael J. Black. Learning to Dress 3D People in Generative Clothing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.561, + 0.47, + 0.616 + ], + "angle": 0, + "content": "[33] Qianli Ma, Jinlong Yang, Siyu Tang, and Michael J. Black. The power of points for modeling humans in clothing. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.618, + 0.47, + 0.673 + ], + "angle": 0, + "content": "[34] Gyeongsik Moon, Hyeongjin Nam, Takaaki Shiratori, and Kyoung Mu Lee. 3d clothed human reconstruction in the wild. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.675, + 0.47, + 0.743 + ], + "angle": 0, + "content": "[35] Chaitanya Patel, Zhouyingcheng Liao, and Gerard Pons-Moll. Tailornet: Predicting clothing in 3d as a function of human pose, shape and garment style. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). IEEE, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.47, + 0.8 + ], + "angle": 0, + "content": "[36] Gerard Pons-Moll, Sergi Pujades, Sonny Hu, and Michael J. Black. Clothcap: Seamless 4d clothing capture and retargeting. ACM Transactions on Graphics (TOG), 36(4), 2017. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.47, + 0.871 + ], + "angle": 0, + "content": "[37] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[38] Shunsuke Saito, Tomas Simon, Jason Saragih, and Hanbyul Joo. Pifuhd: Multi-level pixel-aligned implicit function for" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "high-resolution 3d human digitization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[39] Yidi Shao, Chen Change Loy, and Bo Dai. Towards multilayered 3d garments animation. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.193, + 0.892, + 0.261 + ], + "angle": 0, + "content": "[40] Kaiyue Shen, Chen Guo, Manuel Kaufmann, Juan Zarate, Julien Valentin, Jie Song, and Otmar Hilliges. X-avatar: Expressive human avatars. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.264, + 0.892, + 0.319 + ], + "angle": 0, + "content": "[41] Zhaoqi Su, Tao Yu, Yangang Wang, and Yebin Liu. Deepcloth: Neural garment representation for shape and style editing. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 45(2):1581-1593, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.321, + 0.892, + 0.375 + ], + "angle": 0, + "content": "[42] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Proceedings of the European Conference on Computer Vision (ECCV), pages 402-419. Springer, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.378, + 0.892, + 0.446 + ], + "angle": 0, + "content": "[43] Garvita Tiwari, Bharat Lal Bhatnagar, Tony Tung, and Gerard Pons-Moll. Sizer: A dataset and model for parsing 3d clothing and learning size sensitive 3d clothing. In Proceedings of the European Conference on Computer Vision (ECCV). Springer, 2020. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.448, + 0.892, + 0.474 + ], + "angle": 0, + "content": "[44] Unreal Engine 5. https://www.unrealengine.com, 2022.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.477, + 0.892, + 0.545 + ], + "angle": 0, + "content": "[45] Wenguan Wang, Hailong Zhu, Jifeng Dai, Yanwei Pang, Jianbing Shen, and Ling Shao. Hierarchical human parsing with typed part-relation reasoning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.548, + 0.892, + 0.617 + ], + "angle": 0, + "content": "[46] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 3681-3691, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.619, + 0.892, + 0.673 + ], + "angle": 0, + "content": "[47] Yuliang Xiu, Jinlong Yang, Dimitrios Tzionas, and Michael J. Black. ICON: Implicit Clothed humans Obtained from Normals. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.676, + 0.892, + 0.743 + ], + "angle": 0, + "content": "[48] Yuliang Xiu, Jinlong Yang, Xu Cao, Dimitrios Tzionas, and Michael J. Black. ECON: Explicit Clothed humans Optimized via Normal integration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.746, + 0.892, + 0.788 + ], + "angle": 0, + "content": "[49] Lu Yang, Wenhe Jia, Shan Li, and Qing Song. Deep learning technique for human parsing: A survey and outlook. arXiv preprint arXiv:2301.00394, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.79, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[50] Tao Yu, Zerong Zheng, Kaiwen Guo, Pengpeng Liu, Qionghai Dai, and Yebin Liu. Function4d: Real-time human volumetric capture from very sparse consumer rgbd sensors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[51] Chao Zhang, Sergi Pujades, Michael J. Black, and Gerard Pons-Moll. Detailed, accurate, human shape estimation from clothed 3d scan sequences. In Proceedings of the IEEE" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "559" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.189 + ], + "angle": 0, + "content": "[52] Zerong Zheng, Tao Yu, Yebin Liu, and Qionghai Dai. Pamir: Parametric model-conditioned implicit representation for image-based human reconstruction. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.469, + 0.248 + ], + "angle": 0, + "content": "[53] Xingxing Zou, Xintong Han, and Waikeung Wong. Cloth4d: A dataset for clothed human reconstruction. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 12847-12857, 2023. 1, 2, 3, 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.511, + 0.956 + ], + "angle": 0, + "content": "560" + } + ] +] \ No newline at end of file diff --git a/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/cd2548ca-a539-45a3-adaf-36a364d6da68_origin.pdf b/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/cd2548ca-a539-45a3-adaf-36a364d6da68_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d930c9db8a6c4de16bfe703381fe9a440029e6df --- /dev/null +++ b/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/cd2548ca-a539-45a3-adaf-36a364d6da68_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:498d1bde71a78ab2fbf9cce794c615c5b222d6183b82a2bc06c6790d9e78fd43 +size 8254705 diff --git a/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/full.md b/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8b9e37a0b6aa2b9d25fe7838bb855d25092baa80 --- /dev/null +++ b/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/full.md @@ -0,0 +1,268 @@ +# 4D-DRESS: A 4D Dataset of Real-World Human Clothing With Semantic Annotations + +Wenbo Wang $^{*1}$ Hsuan-I Ho $^{*1}$ Chen Guo $^{1}$ Boxiang Rong $^{1}$ Artur Grigorev $^{1,2}$ +Jie Song $^{1}$ Juan Jose Zarate $^{\dagger 1}$ Otmar Hilliges $^{1}$ + +Department of Computer Science, ETH Zürich Max Planck Institute for Intelligent Systems, Tübingen + +https://ait.ethz.ch/4d-dress + +![](images/5658c0cc0dd26a6d35c17dc2035b6186240029f9a36a4eee95c9df6c12a11f6a.jpg) +Figure 1. Overview of 4D-DRESS. We propose the first real-world 4D dataset of human clothing, capturing 64 human outfits in more than 520 motion sequences. These sequences include a) high-quality 4D textured scans; for each scan, we annotate b) vertex-level semantic labels, thereby obtaining c) the corresponding garment meshes and fitted SMPL(-X) body meshes. + +# Abstract + +The studies of human clothing for digital avatars have predominantly relied on synthetic datasets. While easy to collect, synthetic data often fall short in realism and fail to capture authentic clothing dynamics. Addressing this gap, we introduce 4D-DRESS, the first real-world 4D dataset advancing human clothing research with its high-quality 4D textured scans and garment meshes. 4D-DRESS captures 64 outfitsits in 520 human motion sequences, amounting to 78k textured scans. Creating a real-world clothing dataset is challenging, particularly in annotating and segmenting the extensive and complex 4D human scans. To address this, we develop a semi-automatic 4D human parsing pipeline. We efficiently combine a human-in-the-loop process with automation to accurately label 4D scans in di + +verse garments and body movements. Leveraging precise annotations and high-quality garment meshes, we establish several benchmarks for clothing simulation and reconstruction. 4D-DRESS offers realistic and challenging data that complements synthetic sources, paving the way for advancements in research of lifelike human clothing. + +# 1. Introduction + +Human clothing is crucial in various applications such as 3D games, animations, and virtual try-on. Researchers are actively investigating algorithms for clothing reconstruction [12, 24, 34] and simulation [4, 5, 15], to achieve realistic clothing behavior, enhance user engagement, and enable cross-industry applications. These algorithms are frequently developed and assessed using synthetic datasets [3, 7, 53], since they comprise a) meshes covering various garment types and outfits and b) parametric body mod + +
Dataset# of Outfits# of FramesData FormatTexturedSemantic LabelsLoose Garments
TailorNet [35]95.5kSMPL + Garments
ReSynth [33]2430kSMPLX + Point Clouds
CLOTH3D [3]8.5k2.1MSMPL + Garments
CLOTH4D [53]1k100kMesh + Garments
BEDLAM [7]111380kSMPL-X + Garments
D-LAYERS [39]5k700kSMPL + Garments
BUFF [51]614kScans + SMPL
CAPE [32]15140kSMPL+D
ActorsHQ [23]839kScans
X-Humans [40]2035kScans + SMPL-(X)
4DHumanOutfit [2]14459kScans + SMPL
4D-DRESS (Ours)6478kScans + SMPL(-X) + Garments
+ +Table 1. Summary of 4D clothed human datasets. The datasets highlighted in gray color are synthetic datasets while the others are real-world scans. # of Outfits: number of outfits included; # of Frames: total number of 3D human frames; Data Format: 3D representations of human bodies and garments; Textured: with textured map or not; Semantic Labels: with semantic labels for clothing or not; Loose Garments: containing challenging loose clothing such as dresses or not. 4D-DRESS demonstrates outstanding features against others. + +els with diverse motions. While synthetic datasets lead in outfit quantity and the number of frames provided (refer to Tab. 1), there also presents a significant challenge in bridging the domain gap between the synthetic and real garments. Despite the recently released real-world 4D human datasets such as X-Humans [40], ActorsHQ [23], and 4DHumanOutfit [2], a key limitation persists: they lack accurately segmented garment meshes, offering only raw human scans. Moreover, these datasets are limited in the number of loose garments (e.g., jackets and dresses) or dynamic motions, which reduces their applicability as test benches. These challenges highlight the need for a real-world 4D dataset that provides semantic annotations and captures diverse garments across various body motions. + +In this work, we contribute 4D-DRESS, the first real-world dataset of human clothing with 4D semantic segmentation. We aim to provide an evaluation testbench with real-world data for tasks related to human clothing in computer vision and graphics. We capture over 520 human motion sequences featuring 64 distinct real-world human outfits in a high-end multi-view volumetric capture system, similar to the one used in [11]. The complete dataset comprises a total of 78k frames, each composed of an 80k-face triangle mesh, a 1k resolution textured map, and a set of 1k resolution multi-view images. As illustrated in Fig. 1, we provide a) high-quality 4D textured scans, b) vertex-level semantic labels for various clothing types, such as upper, lower, and outer garments, and c) garment meshes along with their registered SMPL(-X) body models. + +Capturing real-world 4D sequences of humans wearing various clothing and performing diverse motions requires dedicated high-end capture facilities. Moreover, processing these clips into accurately annotated and segmented 4D human scans presents significant challenges. To develop our + +dataset, we tackled the task of labeling 78k high-resolution meshes at the vertex level. Given that the mesh topologies of consecutive frames do not inherently correspond, consistently propagating 3D vertex labels from one frame to the next is non-trivial. While previous methods [6, 36] attempted to fit a fixed-topology parametric body model to the scans, these template-based approaches still struggle with scenarios such as a jacket being lifted to reveal a shirt or the emergence of new vertices on a flowing coat as illustrated in the example shown in Fig. 3. Consequently, we opted for an alternative approach. We developed a semi-automatic and template-free 4D human parsing pipeline. Leveraging semantic maps from a 2D human parser [14] and a segmentation model [27], we extended these techniques to 4D, considering both multi-view and temporal consistency. Our pipeline accurately assigns vertex labels without manual intervention in $96.8\%$ of frames. Within the remaining scans, only $1.5\%$ of vertices require further rectification, addressed via a human-in-the-loop process. + +The quality of the ground-truth data in 4D-DRESS allows us to establish several evaluation benchmarks for diverse tasks, including clothing simulation, reconstruction, and human parsing. Our evaluation and analysis demonstrate that 4D-DRESS offers realistic and challenging human clothing that cannot be readily modeled by existing algorithms, thereby opening avenues for further research. In summary, our contributions include: + +- the first real-world 4D human clothing dataset comprising 4D textured scans, vertex-level semantic labels, garment meshes, and corresponding parametric body meshes. +- a semi-automatic and template-free 4D human parsing pipeline for efficient data annotation. +- evaluation benchmarks showing the utility of our dataset. + +# 2. Related Work + +4D clothed human dataset. Datasets featuring clothed humans can be divided into two categories. Firstly, synthetic datasets [3, 7, 33, 35, 39, 53] create large volume of synthetic data using graphic engines [44] and simulation tools [10] (Tab. 1 top). These datasets are easy to scale with ground truth semantic labels available by design. However, they often lack realism in human appearances, clothing deformations, and motion dynamics. Even though recent work [7, 46] attempted to achieve photorealistic human textures with manual efforts, it is challenging to precisely mimic the way real-world clothing moves and deforms. Therefore, it is essential to create datasets of real-world human clothing by capturing these intricate details. + +The second category (Tab. 1 bottom) involves using multi-view volumetric capture systems [11, 26] to collect datasets of people dressed in real-world clothing [2, 17, 20, 22, 23, 32, 40, 41, 43, 50, 51]. However, the resources required for capturing, storing, and processing this data are substantial, which limits the size of these publicly available datasets [2, 40, 51]. Moreover, these methods do not inherently provide labeled annotations, offering only temporally uncorrelated scans. This makes the raw data on these datasets less suitable for research focusing on human clothing. 4D-DRESS gathers a variety of human subjects and outfits providing accurate semantic labels of human clothing, garment meshes, and SMPL/SMPL-X fits. + +Human parsing. Human parsing [49] is a specific task within semantic segmentation aimed at identifying detailed body parts and clothing labels. Conventionally, this challenge is tackled using deep neural networks, trained on images with their corresponding semantic labels [9, 13, 29]. Although these methods have been successful in 2D [14, 18, 19, 28, 30, 45], applying them to annotate 3D and 4D scans is still a challenge. Previous work has explored it using two distinct strategies. One strategy, used by SIZER [43] and MGN [6], involves rendering multi-view images and projecting parsing labels onto 3D meshes through a voting process. While this method considers consistency across multiple views, it overlooks temporal consistency and falls short of accurately labeling 4D scans. Another approach, used by ClothCap [36], registers all scans to a fixed-topology SMPL model [31] with per-vertex displacements. Yet, this method struggles with handling large motions and complex clothing due to limited template resolutions and model-fitting capabilities. This results in noisy labels near boundaries and loose garments. In contrast, our approach combines multiview voting and optical warping in a template-free pipeline, achieving both multi-view and temporal consistency. + +# 3. Methodology + +To accurately label each vertex within our 4D textured scan sequences, we leverage a semi-automatic parsing pipeline + +that incorporates but minimizes manual efforts during the labeling process. Fig. 2 depicts the overall workflow of our pipeline. We first render 24 multi-view images of the current frame textured scan. We combine those images with the previous frame's multi-view images and labels to deploy three state-of-the-art tools to vote candidate labels for each rendered pixel (Sec. 3.1): a) human image parser, b) optical flow transfer, and c) segmentation masks. Next, we re-project and fuse all the 2D label votes via a Graph Cut optimization to obtain vertex-level semantic labels, considering neighboring and temporal consistency (Sec. 3.2). For those challenging frames where further labeling refinement is needed (around $3\%$ in our dataset), we refined their semantic labels with a manual rectification step that we feed back into the optimization (Sec. 3.3). We describe the details of the pipeline within this section. + +# 3.1. Multi-view Parsing + +At each frame $k \in \{1, \dots, N_{\text{frame}}\}$ , we render the 3D-mesh into a set of multi-view images, consisting of twelve horizontal, six upper, and six lower uniformly distributed views. We note this as $I_{img,n,k}$ with $n \in \{1, \dots, N_{\text{view}} = 24\}$ . Within the multi-view space, we tackle the problem of assigning a label vote $l$ to each pixel $p$ using multi-view image-based models. The label $l$ varies for human skin, hair, shoes, upper clothing (shirts, hoodies), lower clothing (shorts, pants), and outer clothing (jackets, coats). For clarity, we omit the frame index $(k)$ in the following unless they are strictly needed. Please refer to Fig. 2 and the Supp. Mat. for more label definitions and the versatility of our parsing method with new labels like belts and socks. + +Human image parser (PAR). Our primary source of labels is a deep-learning image parser, which provides pixel-level votes for body parts and clothes. Specifically, we apply Graphonomy [14] to each view $n$ and store the labels as a new set of images $\{I_{par}\}$ (see Fig. 2). These labels are then accessible by the vote function $f_{par,n}(p,l)$ that checks if the image $I_{par,n}$ matches the value $l$ at the pixel $p$ , in which case returns 1, or 0 otherwise. This vote function and the other two defined below will be crucial later when setting our full-mesh optimization (Sec. 3.2). + +Optical flow transfer (OPT). This block leverages the previous frame's multi-view labels to provide temporal consistency. Specifically, we use the optical flow predictor RAFT [42] to transfer multi-view labels in the $k - 1$ frame to the current $k$ frame using the texture features on the rendered multi-view images. Similarly to the image parser above, the optical flow output goes to a set $\{I_{opt}\}$ . These labels are accessible via the vote function $f_{opt,n}(p,l)$ , which checks $I_{opt,n}$ and returns 1 if label $l$ is in $p$ and 0 otherwise. + +Segmentation masks (SAM). The multi-view votes generated by the Human Image Parser sometimes lack 3D consistency, particularly when dealing with open garments un + +![](images/be342b7739a18ae48556547e268b25e016e371e890d2983624a6ab38295105ed.jpg) +Figure 2. 4D Human parsing method. We first render current and previous frame scans into multi-view images and labels. Then collect multi-view parsing results from the image parser, optical flows, and segmentation masks (Sec. 3.1). Finally, we project multi-view labels to 3D vertices and optimize vertex labels using the Graph Cut algorithm with vertex-wise unary energy and edge-wise binary energy (Sec. 3.2). The manual rectification labels can be easily introduced by checking multi-view rendered labels. (Sec. 3.3). + +der dynamic motions (cf. Fig. 3). While the votes derived from the optical flows provide a cross-frame prior, they may not accurately track every human part and can't identify newly emerging regions. Therefore, we introduce segmentation masks to regularize the label consistency within each masked region. We apply the Segment Anything Model [27] to each rendered image and obtain a self-define group of masks $M_{m,n}$ , with the index $m \in \{1, \dots, N_{mask,n}\}$ . Within a mask $M_{m,n}$ we compute the score function $S(l, M_{m,n})$ that fuses the votes of the image parser and the optical flow, normalized by the area of the mask: + +$$ +\mathcal {S} (l, M _ {m, n}) = \frac {\sum_ {p \in M _ {m , n}} \left[ f _ {p a r , n} (p , l) + \lambda_ {p o} f _ {o p t , n} (p , l) \right]}{\sum_ {p \in M _ {m , n}} \left(1 + \lambda_ {p o}\right)}, \tag {1} +$$ + +where the factor $\lambda_{po}$ weights the contribution of OPT over PAR. We now define a check function, $\mathcal{C}(p,M_{m,n})$ , that returns 1 if the input evaluation pixel $p$ is in the mask $M_{m,n}$ and 0 otherwise. Finally, we obtain the corresponding vote function by summing over all the masks in the image: + +$$ +f _ {s a m, n} (p, l) = \sum_ {m \in 1: N _ {m a s k, n}} \mathcal {C} (p, M _ {m, n}) * \mathcal {S} (l, M _ {m, n}). \tag {2} +$$ + +# 3.2. Graph Cut Optimization for Vertex Parsing + +The next step in our semi-automatic process is combining all the labels obtained in Sec. 3.1 to assign a unique label to each scan vertex $v_{i}$ , with $i \in \{1, \dots, N_{\text{vert}}\}$ . We frame this 3D semantic segmentation problem as a graph cut optimization: each 3D frame is interpreted as a graph $G$ , where vertices are now nodes and mesh edges are connections. Note that in a traditional Graph Cut, the values of the nodes are fixed, and the optimization computes only the + +cost of breaking a connection. In our case, we have several votes for a vertex label, coming from three different tools and from concurrent multi-view projections. We define our cost function that consists of two terms, + +$$ +E (L) = \sum_ {i \in 1: N _ {v e r t}} E _ {v e r t} \left(l _ {i}\right) + \sum_ {i, j \in 1: N _ {v e r t}} E _ {e d g e} \left(l _ {i}, l _ {j}\right), \tag {3} +$$ + +where $L = \{l_i\}$ represents all the vertex labels in current frame. As described below, the term $E_{vert}$ combines the different votes into a single cost function, while $E_{edge}$ evaluates neighboring labels for consistent 3D segmentation. We follow an approach similar to [8]. + +Vertex-wise unary energy. The cost function per node or Unary energy comes from combining the different votes obtained in the multi-view image processing (see Sec. 3.1): + +$$ +E _ {v e r t} \left(l _ {i}\right) = \sum_ {n \in 1: N _ {v i e w}} \frac {\lambda_ {p} E _ {p a r , n} + \lambda_ {o} E _ {o p t , n} + \lambda_ {s} E _ {s a m , n}}{N _ {v i e w}}, \tag {4} +$$ + +where we combine the human image parser $(E_{par})$ , the cross-frame optical prior $(E_{opt})$ , and the segmentation masks regularization $(E_{sam})$ contributions. All these energy terms can be written with the same equation by using the notation $\mathcal{X} = \{par, opt, sam\}$ : + +$$ +E _ {\mathcal {X}, n} \left(l _ {i}\right) = \sum_ {p \in P \left(v _ {i}, n\right)} - w _ {\mathcal {X}} \left(p, v _ {i}\right) f _ {\mathcal {X}, n} \left(p, l _ {i}\right), \tag {5} +$$ + +meaning that energy of the method $\mathcal{X}$ , calculated for a proposed label $l_{i}$ , is obtained by summing over those pixels $p\in P(v_i,n)$ whose projections are within a triangle of $v_{i}$ . The weights for the cases of $E_{par}$ and $E_{opt}$ are set to the barycentric distance from the projected pixel $p$ to the vertex $v_{i}$ , which means $w_{par} = w_{opt} = u$ as in Fig. 2. For $E_{sam}$ instead, we set the weight $w_{sam}$ to the constant value 1 given that we look for an across-vertex regularization. + +![](images/084aa22d3dd3b5dcb324e58f8a0f1888981cbd6de4d78f1a19dec9664621bae9.jpg) +Figure 3. Qualitative ablation study. We visualize the effectiveness of our 4D human parsing method on our 4D-DRESS dataset. From left to right, we show the improvements after adding the optical flow labels and mask scores to the multi-view image parser labels. The manual rectification efforts can be easily introduced from multi-view rendered labels, with which we achieve high-quality vertex annotations. The problem of isolated labels can be relieved by introducing the edge-wise binary energy term. + +Edge-wise binary energy. The Binary energy term penalizes the case of adjacent vertices with different labels, encouraging neighboring vertices to take the same label. Being $A$ the adjacency matrix of the graph $G$ and $\delta$ the Dirac delta function, the edge cost can be calculated as follows: + +$$ +E _ {e d g e} \left(l _ {i}, l _ {j}\right) = \lambda_ {b} A _ {i, j} \left(1 - \delta \left(l _ {i}, l _ {j}\right)\right), \tag {6} +$$ + +which increases the energy by $\lambda_{b}$ in the case that the adjacent vertices $v_{i}, v_{j}$ take different labels $l_{i} \neq l_{j}$ . + +# 3.3. Manual Rectification of 3D Labels + +When manual rectification is needed, we introduce it back into the multi-view space as an additional 2D annotation, and we recalculate the steps in Sec. 3.2. Concretely, we ran the graph cut optimization for the first time. Then, we rendered the vertex labels into multi-view labels, from which we let a person introduce corrections by comparing the resulting labels with the textured multi-view images. Similarly to the vote functions of the image parser and optical flow, we create a vote function $f_{man}(p,l)$ that accesses this set of images with rectified annotations and returns 1 if the label $l$ is assigned to the pixel $p$ and 0 otherwise. + +Similar to previous cases, we define a per-view manual energy $(E_{man})$ by using the variable $\mathcal{X} = man$ in Eq. (5), and we added it to the global per-node energy $E_{vert}$ in Eq. (4). We use a constant large weight for $w_{man}$ to favor the manual annotation over other sources of voting where we rectified the labels. The final vertex labels $L^{*} = \{l^{*}_{i}\}$ are obtained after the second round of graph cut optimization. This manual rectification process finally changed $1.5\%$ of vertices within $3.2\%$ of all frames. The rectification process is detailed in Supp. Mat. + +# 4. Experiments + +To validate the effectiveness of our method, we conducted controlled experiments on two synthetic datasets, + +
CLOTH4D [53]BEDLAM [7]
MethodInnerInnerOuter
SMPL+D [36]0.8720.8460.765
PAR Only [43]0.9610.9100.714
PAR+OPT0.9690.9630.942
PAR+OPT+SAM0.9950.9930.988
+ +Table 2. Baseline and ablation study. Mean accuracy of 4D human parsing methods applied on synthetic datasets. The Inner and Outer outfits are selected according to our definition in Sec. 5 + +CLOTH4D [53] and BEDLAM [7], where ground-truth semantic labels are available. We first compare our parsing method with a template-based baseline [36], that uses a semantic template (SMPL model with per-vertex displacements) to track and parse the clothed human scans. Due to the limited resolution and the fixed topology nature of the SMPL+D model, its parsing accuracy is lower than $90\%$ on all synthetic outfits (see Tab. 2). + +We then compare our 4D parsing pipeline with several ablations and report them in Tab. 2. We use an example scan from 4D-DRESS to support the visualization of the ablation study in Fig. 3. Using PAR only shows reasonable results for upper and lower clothes. Yet, it predicts inconsistent labels at open garments like jackets and coats (Fig. 3 PAR Only), resulting in only $71.4\%$ parsing accuracy on the BEDLAM dataset. The optical flow labels from the previous frame can serve as a cross-frame prior, yet accuracy may vary, particularly in fast-moving arms and cloth boundaries (Fig. 3 PAR+OPT). By fusing both of the previous multi-view labels via the segmentation masks, we achieve better boundary labels (Fig. 3 PAR+OPT+SAM), with $98.8\%$ accuracy on the outer outfits in BEDLAM, with challenging open garments. Finally, we show the effect of introducing manual efforts to rectify incorrect labels (Fig. 3 + +![](images/2be5552cd65c63b91fa805f1893d335137d57f11e7c54e70a528ffcac67cca89.jpg) +Figure 4. Qualitative examples for clothing simulation methods. On the left are templates used for simulations. On the right are ground-truth geometries and original scans, LBS baseline results in body penetrations and overly stretched areas. Compared to other methods, HOOD better models dress and jackets and, with tuned material parameters, HOOD* achieves simulations closest to the ground truth. + +With Manual). Our parsing method can also be deployed to annotate other existing 4D human datasets. We present examples of BUFF[51], X-Humans [40], and ActorsHQ[23] and additional qualitative results in Supp. Mat. + +# 5. Dataset Description + +4D-DRESS contains 520 motion sequences (150 frames at 30 fps) in 64 real-world human outfits with a total of 78k frames. Each frame consists of multi-view images at 1k resolution, an 80k-face triangle 3D mesh with vertex annotations, and a 1k-resolution texture map. We also provide each garment with its canonical template to benefit the clothing simulation study. Finally, each 3D scan is accurately registered by SMPL/SMPL-X body models. + +To record 4D-DRESS we recruited 32 participants (18 female), with an average age of 24. The dataset consists of 4 dresses, 30 upper, 28 lower, and 32 outer garments. Participants were instructed to perform different dynamic motions for each 5-second sequence. For each participant, we capture two types of outfits: Inner Outfit comprising the inner layer dress/upper, and lower garments; and Outer Outfit with an additional layer of garment, such as open jackets or coats. A unique feature of 4D-DRESS is the challenging clothing deformations we captured. To quantify these deformations, we compute the mean distances from the garments to the registered SMPL body surfaces. The inner and outer outfits exhibit distance ranges up to $7.12\mathrm{cm}$ and $14.76\mathrm{cm}$ over all frames. This is twice as much as what we observed in the X-Humans dataset [40], for example. In the $10\%$ most challenging frames, this increases to $20.09\mathrm{cm}$ for outer outfits, highlighting the prevalence of challenging garments. Please refer to Supp. Mat. for dataset details. + +# 6. Benchmark Evaluation + +With high-quality 4D scans and diverse garment meshes in dynamic motions, 4D-DRESS serves as an ideal ground + +
LowerUpperDressOuter
MethodCD ↓ EStr ↓CD ↓ EStr ↓CD ↓ EStr ↓CD ↓ EStr ↓
LBS1.7670.3332.1670.0954.4611.2934.6260.811
PBNS [4]1.8850.1072.6870.0404.8690.6434.8590.107
NCS [5]1.7160.0172.1120.0164.5480.0314.7380.025
HOOD [15]2.0700.0082.6680.0134.2920.0105.3550.011
HOOD*0.9240.0101.3080.0152.4630.0092.8330.009
+ +Table 3. Clothing simulation benchmark. CD is Chamfer Distance between the simulation and ground truth. $E_{str}$ denotes stretching energy with respect to the template. + +truth for a variety of computer vision and graphics benchmarks. In our work, we outline several standard benchmarks conducted in these fields using our dataset. Our primary focus is on tasks related to clothing simulation (Sec. 6.1) and clothed human reconstruction (Sec. 6.2). Additionally, benchmarks on human parsing and human representation learning are included in our Supp. Mat. + +# 6.1. Clothing Simulation + +Experimental setup. We introduce a new benchmark for clothing simulation, leveraging the garment meshes from 4D-DRESS, which capture dynamical real-world clothing deformations. This benchmark evaluates three methods for modeling garment dynamics: PBNS [4], Neural Cloth Simulator (NCS [5]), and HOOD [15], as well as a baseline method that applies SMPL-based linear blend-skinning (LBS) to the template. We ran the simulations using T-posed templates extracted from static scans and compared the results to the ground-truth garment meshes across various pose sequences. Our evaluation metrics include the Chamfer Distance (CD), which compares the resulting mesh sequences with ground-truth point clouds, and the average stretching energy $(E_{str})$ calculated by measuring the difference in edge lengths between the simulated and template meshes. The experiments were conducted across four categories of garments (Lower, Upper, Dress, and Outer), + +![](images/f65e0803ab4942356dd538da773ef30e828b43dfc0b40e6ec6b0d78baf4e4f32.jpg) +Figure 5. Examples of clothed human reconstruction on 4D-DRESS. We evaluate state-of-the-art methods using both inner (Top) and outer (Bottom) outfits. We show that existing methods generally struggle with the challenging loose garments. Moreover, these approaches cannot faithfully recover realistic details such as clothing wrinkles. + +with four garment templates in each category. We simulated clothing deformation for each garment in six different pose sequences, providing a comprehensive comparison of their ability to generate realistic motions. + +Fine-tuning material parameters. To demonstrate the advantages of real-world garment meshes in 4D-DRESS, we also introduce a simple optimization-based strategy for inverse simulation using HOOD. Specifically, we optimize the material parameters fed into the HOOD model to minimize the simulations' Chamfer Distance to the ground-truth sequences and their stretching energy. This optimized version is denoted as HOOD*. For more details on the material optimization experiments, please refer to Supp. Mat. + +Evaluation results. The quantitative and qualitative comparisons of the clothing simulation methods are presented in Tab. 3 and Fig. 4 respectively. The LBS baseline and LBS-based approaches (PBNS and NCS) perform better with upper and lower garments, which exhibit limited free-flowing motions compared with the dress and outer garments. Conversely, HOOD excels with dresses, generating more natural, free-flowing motions and achieving lower stretching energy. However, if HOOD fails to generate realistic motions for a single frame, this error propagates to all subsequent frames. This issue does not occur in the LBS-based methods, which generate geometries independently for each frame. With finely-tuned material parameters, HOD* produces garment sequences that more faithfully replicate real-world behavior. We anticipate that future research in learned garment simulation will increasingly focus on modeling real-world garments made from complex heterogeneous materials. This will be a major step in creating realistically animated digital avatars, and we believe 4D-DRESS will be highly instrumental in this task. + +# 6.2. Clothed Human Reconstruction + +Experimental setup. We create a new benchmark for evaluating state-of-the-art clothed human reconstruction methods on the 4D-DRESS dataset. This benchmark is di + +
InnerOuter
MethodCD↓NC↑IoU↑CD↓NC↑IoU↑
PIFu [37]2.6960.7920.6902.7830.7590.697
PIFuHD [38]2.4260.7930.7392.3930.7630.743
PaMIR [52]2.5200.8050.7062.6080.7770.715
ICON [47]2.4730.7980.7522.8320.7620.756
PHORHUM [1]3.9440.7250.5803.7620.7050.603
ECON [48]2.5430.7960.7362.8520.7600.728
SiTH [21]2.1100.8240.7552.3220.7940.749
+ +Table 4. Clothed human reconstruction benchmark. We computed Chamfer distance (CD), normal consistency (NC), and Intersection over Union (IoU) between ground truth and reconstructed meshes obtained from different baselines. + +vided into three subtasks. First, we evaluate single-view human reconstruction utilizing images and high-quality 3D scans from our dataset. In addition, benefiting from the garment meshes in our dataset, we establish the first real-world benchmark for evaluating single-view clothing reconstruction. Finally, we assess video-based human reconstruction approaches leveraging the sequences in 4D-DRESS that capture rich motion dynamics of both human bodies and garments. In all the experiments, we report 3D metrics including Chamfer Distance (CD), Normal Consistency (NC), and Intersection over Union (IoU) to compare the predictions with ground-truth meshes. + +Single-view human reconstruction. We use the two test sets defined in Sec. 5 (denote as Outer and Inner) to evaluate the following single-view reconstruction methods: PIFu [37], PIFuHD [38], PaMIR [52], ICON [47], PHORHUM [1], ECON [48], and SiTH [21]. The evaluation results are summarized in Fig. 5 and Tab. 4. We observed that methods leveraging SMPL body models as guidance (i.e., ICON, ECON, SiTH) performed better in reconstructing inner clothing. However, their performance significantly declined when dealing with outer garments. On the other hand, end-to-end models like PIFu and PIFuHD demonstrated more stability with both clothing types. This + +![](images/243a3f5a2c20eacf8a888d8b463bdd71c08c62fcbffd8fa9b5354e4927b91411.jpg) +Figure 6. Examples of clothing reconstruction on 4D-DRESS. We visualize the reconstructed garment meshes from different approaches. These methods trained on synthetic datasets failed to predict accurate clothing sizes and detailed wrinkles. + +
ShoesLowerUpperOuter
MethodCD ↓ IoU↑CD ↓ IoU↑CD ↓ IoU↑CD ↓ IoU↑
BCNet [24]-2.533 0.6752.079 0.7003.600 0.639
SMPLicit [12]2.619 0.6212.101 0.6982.452 0.6173.359 0.618
ClothWild [34]3.657 0.5482.690 0.5823.279 0.5334.163 0.588
+ +leads to an intriguing research question: whether the human body prior is necessary for reconstructing clothing. Qualitatively, we see that even the best-performing methods cannot perfectly reconstruct realistic free-flowing jackets as shown in Tab. 4. We believe 4D-DRESS will offer more valuable insights for research in clothed human reconstruction. + +Single-view clothes reconstruction. Clothes reconstruction has received relatively little attention compared to full-body human reconstruction. Leveraging the garment meshes in 4D-DRESS, we introduce the first real-world benchmark to assess prior art, including BCNet [24], SMPLicit [12], and ClothWild [34]. The results of different clothing types, as shown in Fig. 6, indicate a significant gap between the reconstructed and real clothing. Firstly, the clothing sizes produced by these methods are often inaccurate, suggesting a lack of effective use of image information for guidance. Moreover, the results typically lack geometric details like clothing wrinkles compared to full-body reconstruction. We report quantitative results in Tab. 5. We observed that the data-driven method (BCNet) performs better with inner clothing, while the generative fitting method (SMPLicit) shows more robustness to outer clothing, such as coats. However, none of these methods is designed for or trained on real-world data. The domain gap between synthetic and real data still limits their capability to produce accurate shapes and fine-grained details. We expect our benchmark and dataset will draw more research attention to the topic of real-world clothing reconstruction. + +![](images/0e4c67b1af81e24c5ffb4527ca8890f4ccb08b968c83878ae911823f9ff57700.jpg) +Figure 7. Video-based human reconstruction. Qualitative results of video-based human reconstruction methods on 4D-DRESS. Prior works struggle to reconstruct 3D human with challenging outfits and cannot recover the fine-grained surface details. + +Table 5. Clothing reconstruction benchmark. We report Chamfer Distance (CD), and Intersection over Union (IoU) between the ground-truth garment meshes and the reconstructed clothing. + +
InnerOuter
MethodCD↓NC↑IoU↑CD↓NC↑IoU↑
SelfRecon [25]3.1800.7290.7544.0270.6830.745
Vid2Avatar [16]2.8700.7500.7723.0140.7250.787
+ +Table 6. Video-based human reconstruction. Results of video-based human reconstruction methods on 4D-DRESS. + +Video-based human reconstruction Leveraging the sequential 4D data in our dataset, we create a new benchmark for evaluating video-based human reconstruction methods. We applied Vid2Avatar [16] and SelfRecon [25] to obtain 4D reconstructions and compared them with the provided ground-truth 4D scans. As observed in Fig. 7, both methods struggle with diverse clothing styles and face challenges in reconstructing surface parts that greatly differ in topology from the human body, such as the open jacket. Moreover, there remains a noticeable discrepancy between the real geometry and the recovered surface details. Quantitatively, the existing methods cannot achieve satisfactory reconstruction results with outer garments, as demonstrated by a large performance degradation in Tab. 6. We believe 4D-DRESS provides essential data for advancing video-based human reconstruction methods, particularly in achieving detailed geometry recovery for challenging clothing. + +# 7. Discussion + +Limitations. Our current pipeline requires substantial computational time. The offline manual rectification process and garment mesh extraction also demand expertise in 3D editing and additional human efforts. These factors constrain the scalability of our dataset. With a goal of expanding more diverse subjects and clothing, real-time 4D annotation and rectification/editing will be exciting future work. Conclusion. 4D-DRESS is the first real-world 4D clothed human dataset with semantic annotations, aiming to bridge the gap between existing clothing algorithms and real-world human clothing. We demonstrate that 4D-DRESS is not only a novel data source but also a challenging benchmark for clothing simulation, reconstruction, and other related tasks. We believe that 4D-DRESS can support a wide range of endeavors and foster research progress by providing high-quality 4D data in life like human clothing. + +Acknowledgements. This work was partially supported by the Swiss SERI Consolidation Grant "AI-PERCEIVE". AG was supported in part by the Max Planck ETH CLS. + +# References + +[1] Thiemo Alldieck, Mihai Zanfir, and Cristian Sminchisescu. Photorealistic monocular 3d reconstruction of humans wearing clothing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 7 +[2] Matthieu Armando, Laurence Boissieux, Edmond Boyer, Jean-Sebastien Franco, Martin Humenberger, Christophe Legras, Vincent Leroy, Mathieu Marsot, Julien Pansiot, Sergi Pujades, Rim Rekik, Gregory Rogez, Anilkumar Swamy, and Stefanie Wuhrer. 4dhumanoutfit: a multi-subject 4d dataset of human motion sequences in varying outfits exhibiting large displacements. Computer Vision and Image Understanding, 2023. 2, 3 +[3] Hugo Bertiche, Meysam Madadi, and Sergio Escalera. Cloth3d: clothed 3d humans. In Proceedings of the European Conference on Computer Vision (ECCV), pages 344-359. Springer, 2020. 1, 2, 3 +[4] Hugo Bertiche, Meysam Madadi, and Sergio Escalera. Pbs: Physically based neural simulation for unsupervised garment pose space deformation. ACM Transactions on Graphics (TOG), 40(6), 2021. 1, 6 +[5] Hugo Bertiche, Meysam Madadi, and Sergio Escalera. Neural cloth simulation. ACM Transactions on Graphics (TOG), 41(6):1-14, 2022. 1, 6 +[6] Bharat Lal Bhatnagar, Garvita Tiwari, Christian Theobalt, and Gerard Pons-Moll. Multi-garment net: Learning to dress 3d people from images. In Proceedings of the IEEE International Conference on Computer Vision (ICCV). IEEE, 2019. 2, 3 +[7] Michael J. Black, Priyanka Patel, Joachim Tesch, and Jinlong Yang. BEDLAM: A synthetic dataset of bodies exhibiting detailed lifelike animated motion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 8726-8737, 2023. 1, 2, 3, 5 +[8] Y. Boykov, O. Veksler, and R. Zabih. Fast approximate energy minimization via graph cuts. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 23(11): 1222-1239, 2001. 4 +[9] Xianjie Chen, Roozbeh Mottaghi, Xiaobai Liu, Sanja Fidler, Raquel Urtasun, and Alan Yuille. Detect what you can: Detecting and representing objects using holistic models and body parts. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2014. 3 +[10] CLO. https://www.clo3d.com, 2022. 3 +[11] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (TOG), 34(4):1-13, 2015. 2, 3 +[12] Enric Corona, Albert Pumarola, Guillem Alenyà, Gerard Pons-Moll, and Francesc Moreno-Noguer. Semplicit: Topology-aware generative model for clothed people. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1, 8 +[13] Ke Gong, Xiaodan Liang, Yicheng Li, Yimin Chen, Ming Yang, and Liang Lin. Instance-level human parsing via part + +grouping network. In Proceedings of the European Conference on Computer Vision (ECCV), pages 770-785, 2018. 3 +[14] Ke Gong, Yiming Gao, Xiaodan Liang, Xiaohui Shen, Meng Wang, and Liang Lin. Graphonomy: Universal human parsing via graph transfer learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3 +[15] Artur Grigorev, Bernhard Thomaszewski, Michael J. Black, and Otmar Hilliges. Hood: Hierarchical graphs for generalized modelling of clothing dynamics. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 16965-16974, 2023. 1, 6 +[16] Chen Guo, Tianjian Jiang, Xu Chen, Jie Song, and Otmar Hilliges. Vid2 avatar: 3d avatar reconstruction from videos in the wild via self-supervised scene decomposition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 8 +[17] Sang-Hun Han, Min-Gyu Park, Ju Hong Yoon, Ju-Mi Kang, Young-Jae Park, and Hae-Gon Jeon. High-fidelity 3d human digitization from single 2k resolution images. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3 +[18] Haoyu He, Jing Zhang, Qiming Zhang, and Dacheng Tao. Grapy-ml: Graph pyramid mutual learning for cross-dataset human parsing. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2020. 3 +[19] Haoyu He, Jing Zhang, Bhavani Thuraisingham, and Dacheng Tao. Progressive one-shot human parsing. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2021. 3 +[20] Zhu Heming, Cao Yu, Jin Hang, Chen Weikai, Du Dong, Wang Zhangye, Cui Shuguang, and Han Xiaoguang. Deep fashion3d: A dataset and benchmark for 3d garment reconstruction from single images. In Proceedings of the European Conference on Computer Vision (ECCV), pages 512-530. Springer International Publishing, 2020. 3 +[21] Hsuan-I Ho, Jie Song, and Otmar Hilliges. Sith: Single-view textured human reconstruction with image-conditioned diffusion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 7 +[22] Jie Song Hsuan-I Ho, Lixin Xue and Otmar Hilliges. Learning locally editable virtual humans. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3 +[23] Mustafa Işik, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 2, 3, 6 +[24] Boyi Jiang, Juyong Zhang, Yang Hong, Jinhao Luo, Ligang Liu, and Hujun Bao. BCnet: Learning body and cloth shape from a single image. In Proceedings of the European Conference on Computer Vision (ECCV). Springer, 2020. 1, 8 +[25] Boyi Jiang, Yang Hong, Hujun Bao, and Juyong Zhang. Selfrecon: Self reconstruction your digital avatar from monocular video. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 8 + +[26] Hanbyul Joo, Tomas Simon, Xulong Li, Hao Liu, Lei Tan, Lin Gui, Sean Banerjee, Timothy Scott Godisart, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social interaction capture. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2017. 3 +[27] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dollar, and Ross Girshick. Segment anything. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 4015-4026, 2023. 2, 4 +[28] Peike Li, Yunqiu Xu, Yunchao Wei, and Yi Yang. Self-correction for human parsing. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2020. 3 +[29] X. Liang, K. Gong, X. Shen, and L. Lin. Look into person: Joint body parsing & pose estimation network and a new benchmark. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 41(04):871-885, 2019. 3 +[30] Kunliang Liu, Ouk Choi, Jianming Wang, and Wonjun Hwang. Cdgnet: Class distribution guided network for human parsing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4473-4482, 2022. 3 +[31] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multiperson linear model. ACM Transactions on Graphics (TOG), 34(6):248:1-248:16, 2015. 3 +[32] Qianli Ma, Jinlong Yang, Anurag Ranjan, Sergi Pujades, Gerard Pons-Moll, Siyu Tang, and Michael J. Black. Learning to Dress 3D People in Generative Clothing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 3 +[33] Qianli Ma, Jinlong Yang, Siyu Tang, and Michael J. Black. The power of points for modeling humans in clothing. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2, 3 +[34] Gyeongsik Moon, Hyeongjin Nam, Takaaki Shiratori, and Kyoung Mu Lee. 3d clothed human reconstruction in the wild. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 1, 8 +[35] Chaitanya Patel, Zhouyingcheng Liao, and Gerard Pons-Moll. Tailornet: Predicting clothing in 3d as a function of human pose, shape and garment style. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). IEEE, 2020. 2, 3 +[36] Gerard Pons-Moll, Sergi Pujades, Sonny Hu, and Michael J. Black. Clothcap: Seamless 4d clothing capture and retargeting. ACM Transactions on Graphics (TOG), 36(4), 2017. 2, 3, 5 +[37] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019. 7 +[38] Shunsuke Saito, Tomas Simon, Jason Saragih, and Hanbyul Joo. Pifuhd: Multi-level pixel-aligned implicit function for + +high-resolution 3d human digitization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 7 +[39] Yidi Shao, Chen Change Loy, and Bo Dai. Towards multilayered 3d garments animation. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 2, 3 +[40] Kaiyue Shen, Chen Guo, Manuel Kaufmann, Juan Zarate, Julien Valentin, Jie Song, and Otmar Hilliges. X-avatar: Expressive human avatars. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3, 6 +[41] Zhaoqi Su, Tao Yu, Yangang Wang, and Yebin Liu. Deepcloth: Neural garment representation for shape and style editing. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 45(2):1581-1593, 2023. 3 +[42] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Proceedings of the European Conference on Computer Vision (ECCV), pages 402-419. Springer, 2020. 3 +[43] Garvita Tiwari, Bharat Lal Bhatnagar, Tony Tung, and Gerard Pons-Moll. Sizer: A dataset and model for parsing 3d clothing and learning size sensitive 3d clothing. In Proceedings of the European Conference on Computer Vision (ECCV). Springer, 2020. 3, 5 +[44] Unreal Engine 5. https://www.unrealengine.com, 2022.3 +[45] Wenguan Wang, Hailong Zhu, Jifeng Dai, Yanwei Pang, Jianbing Shen, and Ling Shao. Hierarchical human parsing with typed part-relation reasoning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3 +[46] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 3681-3691, 2021. 3 +[47] Yuliang Xiu, Jinlong Yang, Dimitrios Tzionas, and Michael J. Black. ICON: Implicit Clothed humans Obtained from Normals. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 7 +[48] Yuliang Xiu, Jinlong Yang, Xu Cao, Dimitrios Tzionas, and Michael J. Black. ECON: Explicit Clothed humans Optimized via Normal integration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 7 +[49] Lu Yang, Wenhe Jia, Shan Li, and Qing Song. Deep learning technique for human parsing: A survey and outlook. arXiv preprint arXiv:2301.00394, 2023. 3 +[50] Tao Yu, Zerong Zheng, Kaiwen Guo, Pengpeng Liu, Qionghai Dai, and Yebin Liu. Function4d: Real-time human volumetric capture from very sparse consumer rgbd sensors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3 +[51] Chao Zhang, Sergi Pujades, Michael J. Black, and Gerard Pons-Moll. Detailed, accurate, human shape estimation from clothed 3d scan sequences. In Proceedings of the IEEE + +Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 2, 3, 6 +[52] Zerong Zheng, Tao Yu, Yebin Liu, and Qionghai Dai. Pamir: Parametric model-conditioned implicit representation for image-based human reconstruction. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2021. 7 +[53] Xingxing Zou, Xintong Han, and Waikeung Wong. Cloth4d: A dataset for clothed human reconstruction. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 12847-12857, 2023. 1, 2, 3, 5 \ No newline at end of file diff --git a/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/images.zip b/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..bade73f99748c7fc0e110f42bb8e0f57dcd27372 --- /dev/null +++ b/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28abc93c4c46b579687d91bf36460ffc803d146433de49898b092c3b0764f369 +size 718425 diff --git a/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/layout.json b/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..c56d0870aa17f48cb8a675bf2a0d7ddffa9ac4c2 --- /dev/null +++ b/2024/4D-DRESS_ A 4D Dataset of Real-World Human Clothing With Semantic Annotations/layout.json @@ -0,0 +1,7745 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 120, + 103, + 475, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 103, + 475, + 140 + ], + "spans": [ + { + "bbox": [ + 120, + 103, + 475, + 140 + ], + "type": "text", + "content": "4D-DRESS: A 4D Dataset of Real-World Human Clothing With Semantic Annotations" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "spans": [ + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "text", + "content": "Wenbo Wang" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "inline_equation", + "content": "^{*1}" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "text", + "content": " Hsuan-I Ho" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "inline_equation", + "content": "^{*1}" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "text", + "content": " Chen Guo" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "text", + "content": " Boxiang Rong" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "text", + "content": " Artur Grigorev" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "text", + "content": " \nJie Song" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "text", + "content": " Juan Jose Zarate" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "inline_equation", + "content": "^{\\dagger 1}" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "text", + "content": " Otmar Hilliges" + }, + { + "bbox": [ + 56, + 159, + 532, + 197 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 163, + 202, + 430, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 163, + 202, + 430, + 231 + ], + "spans": [ + { + "bbox": [ + 163, + 202, + 430, + 231 + ], + "type": "text", + "content": "Department of Computer Science, ETH Zürich Max Planck Institute for Intelligent Systems, Tübingen" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 218, + 232, + 372, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 232, + 372, + 243 + ], + "spans": [ + { + "bbox": [ + 218, + 232, + 372, + 243 + ], + "type": "text", + "content": "https://ait.ethz.ch/4d-dress" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 59, + 270, + 526, + 458 + ], + "blocks": [ + { + "bbox": [ + 59, + 270, + 526, + 458 + ], + "lines": [ + { + "bbox": [ + 59, + 270, + 526, + 458 + ], + "spans": [ + { + "bbox": [ + 59, + 270, + 526, + 458 + ], + "type": "image", + "image_path": "5658c0cc0dd26a6d35c17dc2035b6186240029f9a36a4eee95c9df6c12a11f6a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 459, + 546, + 492 + ], + "lines": [ + { + "bbox": [ + 45, + 459, + 546, + 492 + ], + "spans": [ + { + "bbox": [ + 45, + 459, + 546, + 492 + ], + "type": "text", + "content": "Figure 1. Overview of 4D-DRESS. We propose the first real-world 4D dataset of human clothing, capturing 64 human outfits in more than 520 motion sequences. These sequences include a) high-quality 4D textured scans; for each scan, we annotate b) vertex-level semantic labels, thereby obtaining c) the corresponding garment meshes and fitted SMPL(-X) body meshes." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 502, + 192, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 502, + 192, + 514 + ], + "spans": [ + { + "bbox": [ + 143, + 502, + 192, + 514 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 527, + 289, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 527, + 289, + 694 + ], + "spans": [ + { + "bbox": [ + 46, + 527, + 289, + 694 + ], + "type": "text", + "content": "The studies of human clothing for digital avatars have predominantly relied on synthetic datasets. While easy to collect, synthetic data often fall short in realism and fail to capture authentic clothing dynamics. Addressing this gap, we introduce 4D-DRESS, the first real-world 4D dataset advancing human clothing research with its high-quality 4D textured scans and garment meshes. 4D-DRESS captures 64 outfitsits in 520 human motion sequences, amounting to 78k textured scans. Creating a real-world clothing dataset is challenging, particularly in annotating and segmenting the extensive and complex 4D human scans. To address this, we develop a semi-automatic 4D human parsing pipeline. We efficiently combine a human-in-the-loop process with automation to accurately label 4D scans in di" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 503, + 546, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 503, + 546, + 575 + ], + "spans": [ + { + "bbox": [ + 306, + 503, + 546, + 575 + ], + "type": "text", + "content": "verse garments and body movements. Leveraging precise annotations and high-quality garment meshes, we establish several benchmarks for clothing simulation and reconstruction. 4D-DRESS offers realistic and challenging data that complements synthetic sources, paving the way for advancements in research of lifelike human clothing." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 585, + 387, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 585, + 387, + 597 + ], + "spans": [ + { + "bbox": [ + 306, + 585, + 387, + 597 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": "Human clothing is crucial in various applications such as 3D games, animations, and virtual try-on. Researchers are actively investigating algorithms for clothing reconstruction [12, 24, 34] and simulation [4, 5, 15], to achieve realistic clothing behavior, enhance user engagement, and enable cross-industry applications. These algorithms are frequently developed and assessed using synthetic datasets [3, 7, 53], since they comprise a) meshes covering various garment types and outfits and b) parametric body mod" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 702, + 205, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 702, + 205, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 702, + 205, + 713 + ], + "type": "text", + "content": "* Equal contributors † Corresponding author" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "550" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 100, + 70, + 492, + 235 + ], + "blocks": [ + { + "bbox": [ + 100, + 70, + 492, + 235 + ], + "lines": [ + { + "bbox": [ + 100, + 70, + 492, + 235 + ], + "spans": [ + { + "bbox": [ + 100, + 70, + 492, + 235 + ], + "type": "table", + "html": "
Dataset# of Outfits# of FramesData FormatTexturedSemantic LabelsLoose Garments
TailorNet [35]95.5kSMPL + Garments
ReSynth [33]2430kSMPLX + Point Clouds
CLOTH3D [3]8.5k2.1MSMPL + Garments
CLOTH4D [53]1k100kMesh + Garments
BEDLAM [7]111380kSMPL-X + Garments
D-LAYERS [39]5k700kSMPL + Garments
BUFF [51]614kScans + SMPL
CAPE [32]15140kSMPL+D
ActorsHQ [23]839kScans
X-Humans [40]2035kScans + SMPL-(X)
4DHumanOutfit [2]14459kScans + SMPL
4D-DRESS (Ours)6478kScans + SMPL(-X) + Garments
", + "image_path": "8457d3e5f3e7044d85e2feb5ee14c7e6408f0c2dad00d8d8024116778b0e982a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 241, + 547, + 288 + ], + "lines": [ + { + "bbox": [ + 46, + 241, + 547, + 288 + ], + "spans": [ + { + "bbox": [ + 46, + 241, + 547, + 288 + ], + "type": "text", + "content": "Table 1. Summary of 4D clothed human datasets. The datasets highlighted in gray color are synthetic datasets while the others are real-world scans. # of Outfits: number of outfits included; # of Frames: total number of 3D human frames; Data Format: 3D representations of human bodies and garments; Textured: with textured map or not; Semantic Labels: with semantic labels for clothing or not; Loose Garments: containing challenging loose clothing such as dresses or not. 4D-DRESS demonstrates outstanding features against others." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 295, + 289, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 295, + 289, + 464 + ], + "spans": [ + { + "bbox": [ + 46, + 295, + 289, + 464 + ], + "type": "text", + "content": "els with diverse motions. While synthetic datasets lead in outfit quantity and the number of frames provided (refer to Tab. 1), there also presents a significant challenge in bridging the domain gap between the synthetic and real garments. Despite the recently released real-world 4D human datasets such as X-Humans [40], ActorsHQ [23], and 4DHumanOutfit [2], a key limitation persists: they lack accurately segmented garment meshes, offering only raw human scans. Moreover, these datasets are limited in the number of loose garments (e.g., jackets and dresses) or dynamic motions, which reduces their applicability as test benches. These challenges highlight the need for a real-world 4D dataset that provides semantic annotations and captures diverse garments across various body motions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 468, + 287, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 468, + 287, + 648 + ], + "spans": [ + { + "bbox": [ + 46, + 468, + 287, + 648 + ], + "type": "text", + "content": "In this work, we contribute 4D-DRESS, the first real-world dataset of human clothing with 4D semantic segmentation. We aim to provide an evaluation testbench with real-world data for tasks related to human clothing in computer vision and graphics. We capture over 520 human motion sequences featuring 64 distinct real-world human outfits in a high-end multi-view volumetric capture system, similar to the one used in [11]. The complete dataset comprises a total of 78k frames, each composed of an 80k-face triangle mesh, a 1k resolution textured map, and a set of 1k resolution multi-view images. As illustrated in Fig. 1, we provide a) high-quality 4D textured scans, b) vertex-level semantic labels for various clothing types, such as upper, lower, and outer garments, and c) garment meshes along with their registered SMPL(-X) body models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": "Capturing real-world 4D sequences of humans wearing various clothing and performing diverse motions requires dedicated high-end capture facilities. Moreover, processing these clips into accurately annotated and segmented 4D human scans presents significant challenges. To develop our" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 295, + 547, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 295, + 547, + 523 + ], + "spans": [ + { + "bbox": [ + 304, + 295, + 547, + 523 + ], + "type": "text", + "content": "dataset, we tackled the task of labeling 78k high-resolution meshes at the vertex level. Given that the mesh topologies of consecutive frames do not inherently correspond, consistently propagating 3D vertex labels from one frame to the next is non-trivial. While previous methods [6, 36] attempted to fit a fixed-topology parametric body model to the scans, these template-based approaches still struggle with scenarios such as a jacket being lifted to reveal a shirt or the emergence of new vertices on a flowing coat as illustrated in the example shown in Fig. 3. Consequently, we opted for an alternative approach. We developed a semi-automatic and template-free 4D human parsing pipeline. Leveraging semantic maps from a 2D human parser [14] and a segmentation model [27], we extended these techniques to 4D, considering both multi-view and temporal consistency. Our pipeline accurately assigns vertex labels without manual intervention in " + }, + { + "bbox": [ + 304, + 295, + 547, + 523 + ], + "type": "inline_equation", + "content": "96.8\\%" + }, + { + "bbox": [ + 304, + 295, + 547, + 523 + ], + "type": "text", + "content": " of frames. Within the remaining scans, only " + }, + { + "bbox": [ + 304, + 295, + 547, + 523 + ], + "type": "inline_equation", + "content": "1.5\\%" + }, + { + "bbox": [ + 304, + 295, + 547, + 523 + ], + "type": "text", + "content": " of vertices require further rectification, addressed via a human-in-the-loop process." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 533, + 547, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 547, + 631 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 547, + 631 + ], + "type": "text", + "content": "The quality of the ground-truth data in 4D-DRESS allows us to establish several evaluation benchmarks for diverse tasks, including clothing simulation, reconstruction, and human parsing. Our evaluation and analysis demonstrate that 4D-DRESS offers realistic and challenging human clothing that cannot be readily modeled by existing algorithms, thereby opening avenues for further research. In summary, our contributions include:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 639, + 545, + 711 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 306, + 639, + 545, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 639, + 545, + 674 + ], + "spans": [ + { + "bbox": [ + 306, + 639, + 545, + 674 + ], + "type": "text", + "content": "- the first real-world 4D human clothing dataset comprising 4D textured scans, vertex-level semantic labels, garment meshes, and corresponding parametric body meshes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 676, + 545, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 676, + 545, + 698 + ], + "spans": [ + { + "bbox": [ + 306, + 676, + 545, + 698 + ], + "type": "text", + "content": "- a semi-automatic and template-free 4D human parsing pipeline for efficient data annotation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 700, + 545, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 700, + 545, + 711 + ], + "spans": [ + { + "bbox": [ + 306, + 700, + 545, + 711 + ], + "type": "text", + "content": "- evaluation benchmarks showing the utility of our dataset." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "text", + "content": "551" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 134, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 134, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 134, + 83 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 87, + 287, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 87, + 287, + 242 + ], + "spans": [ + { + "bbox": [ + 46, + 87, + 287, + 242 + ], + "type": "text", + "content": "4D clothed human dataset. Datasets featuring clothed humans can be divided into two categories. Firstly, synthetic datasets [3, 7, 33, 35, 39, 53] create large volume of synthetic data using graphic engines [44] and simulation tools [10] (Tab. 1 top). These datasets are easy to scale with ground truth semantic labels available by design. However, they often lack realism in human appearances, clothing deformations, and motion dynamics. Even though recent work [7, 46] attempted to achieve photorealistic human textures with manual efforts, it is challenging to precisely mimic the way real-world clothing moves and deforms. Therefore, it is essential to create datasets of real-world human clothing by capturing these intricate details." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 243, + 288, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 243, + 288, + 399 + ], + "spans": [ + { + "bbox": [ + 46, + 243, + 288, + 399 + ], + "type": "text", + "content": "The second category (Tab. 1 bottom) involves using multi-view volumetric capture systems [11, 26] to collect datasets of people dressed in real-world clothing [2, 17, 20, 22, 23, 32, 40, 41, 43, 50, 51]. However, the resources required for capturing, storing, and processing this data are substantial, which limits the size of these publicly available datasets [2, 40, 51]. Moreover, these methods do not inherently provide labeled annotations, offering only temporally uncorrelated scans. This makes the raw data on these datasets less suitable for research focusing on human clothing. 4D-DRESS gathers a variety of human subjects and outfits providing accurate semantic labels of human clothing, garment meshes, and SMPL/SMPL-X fits." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 402, + 288, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 402, + 288, + 666 + ], + "spans": [ + { + "bbox": [ + 46, + 402, + 288, + 666 + ], + "type": "text", + "content": "Human parsing. Human parsing [49] is a specific task within semantic segmentation aimed at identifying detailed body parts and clothing labels. Conventionally, this challenge is tackled using deep neural networks, trained on images with their corresponding semantic labels [9, 13, 29]. Although these methods have been successful in 2D [14, 18, 19, 28, 30, 45], applying them to annotate 3D and 4D scans is still a challenge. Previous work has explored it using two distinct strategies. One strategy, used by SIZER [43] and MGN [6], involves rendering multi-view images and projecting parsing labels onto 3D meshes through a voting process. While this method considers consistency across multiple views, it overlooks temporal consistency and falls short of accurately labeling 4D scans. Another approach, used by ClothCap [36], registers all scans to a fixed-topology SMPL model [31] with per-vertex displacements. Yet, this method struggles with handling large motions and complex clothing due to limited template resolutions and model-fitting capabilities. This results in noisy labels near boundaries and loose garments. In contrast, our approach combines multiview voting and optical warping in a template-free pipeline, achieving both multi-view and temporal consistency." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 673, + 130, + 687 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 673, + 130, + 687 + ], + "spans": [ + { + "bbox": [ + 47, + 673, + 130, + 687 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": "To accurately label each vertex within our 4D textured scan sequences, we leverage a semi-automatic parsing pipeline" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 263 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 263 + ], + "type": "text", + "content": "that incorporates but minimizes manual efforts during the labeling process. Fig. 2 depicts the overall workflow of our pipeline. We first render 24 multi-view images of the current frame textured scan. We combine those images with the previous frame's multi-view images and labels to deploy three state-of-the-art tools to vote candidate labels for each rendered pixel (Sec. 3.1): a) human image parser, b) optical flow transfer, and c) segmentation masks. Next, we re-project and fuse all the 2D label votes via a Graph Cut optimization to obtain vertex-level semantic labels, considering neighboring and temporal consistency (Sec. 3.2). For those challenging frames where further labeling refinement is needed (around " + }, + { + "bbox": [ + 304, + 72, + 545, + 263 + ], + "type": "inline_equation", + "content": "3\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 263 + ], + "type": "text", + "content": " in our dataset), we refined their semantic labels with a manual rectification step that we feed back into the optimization (Sec. 3.3). We describe the details of the pipeline within this section." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 271, + 419, + 283 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 271, + 419, + 283 + ], + "spans": [ + { + "bbox": [ + 306, + 271, + 419, + 283 + ], + "type": "text", + "content": "3.1. Multi-view Parsing" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "spans": [ + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "text", + "content": "At each frame " + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "inline_equation", + "content": "k \\in \\{1, \\dots, N_{\\text{frame}}\\}" + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "text", + "content": ", we render the 3D-mesh into a set of multi-view images, consisting of twelve horizontal, six upper, and six lower uniformly distributed views. We note this as " + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "inline_equation", + "content": "I_{img,n,k}" + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "inline_equation", + "content": "n \\in \\{1, \\dots, N_{\\text{view}} = 24\\}" + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "text", + "content": ". Within the multi-view space, we tackle the problem of assigning a label vote " + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "text", + "content": " to each pixel " + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "text", + "content": " using multi-view image-based models. The label " + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "text", + "content": " varies for human skin, hair, shoes, upper clothing (shirts, hoodies), lower clothing (shorts, pants), and outer clothing (jackets, coats). For clarity, we omit the frame index " + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "inline_equation", + "content": "(k)" + }, + { + "bbox": [ + 304, + 284, + 545, + 439 + ], + "type": "text", + "content": " in the following unless they are strictly needed. Please refer to Fig. 2 and the Supp. Mat. for more label definitions and the versatility of our parsing method with new labels like belts and socks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "spans": [ + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "text", + "content": "Human image parser (PAR). Our primary source of labels is a deep-learning image parser, which provides pixel-level votes for body parts and clothes. Specifically, we apply Graphonomy [14] to each view " + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "text", + "content": " and store the labels as a new set of images " + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "inline_equation", + "content": "\\{I_{par}\\}" + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "text", + "content": " (see Fig. 2). These labels are then accessible by the vote function " + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "inline_equation", + "content": "f_{par,n}(p,l)" + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "text", + "content": " that checks if the image " + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "inline_equation", + "content": "I_{par,n}" + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "text", + "content": " matches the value " + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "text", + "content": " at the pixel " + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 443, + 545, + 563 + ], + "type": "text", + "content": ", in which case returns 1, or 0 otherwise. This vote function and the other two defined below will be crucial later when setting our full-mesh optimization (Sec. 3.2)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "spans": [ + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "text", + "content": "Optical flow transfer (OPT). This block leverages the previous frame's multi-view labels to provide temporal consistency. Specifically, we use the optical flow predictor RAFT [42] to transfer multi-view labels in the " + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "inline_equation", + "content": "k - 1" + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "text", + "content": " frame to the current " + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "text", + "content": " frame using the texture features on the rendered multi-view images. Similarly to the image parser above, the optical flow output goes to a set " + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "inline_equation", + "content": "\\{I_{opt}\\}" + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "text", + "content": ". These labels are accessible via the vote function " + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "inline_equation", + "content": "f_{opt,n}(p,l)" + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "text", + "content": ", which checks " + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "inline_equation", + "content": "I_{opt,n}" + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "text", + "content": " and returns 1 if label " + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "text", + "content": " is in " + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 566, + 545, + 674 + ], + "type": "text", + "content": " and 0 otherwise." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 677, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 714 + ], + "type": "text", + "content": "Segmentation masks (SAM). The multi-view votes generated by the Human Image Parser sometimes lack 3D consistency, particularly when dealing with open garments un" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "552" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 77, + 68, + 516, + 240 + ], + "blocks": [ + { + "bbox": [ + 77, + 68, + 516, + 240 + ], + "lines": [ + { + "bbox": [ + 77, + 68, + 516, + 240 + ], + "spans": [ + { + "bbox": [ + 77, + 68, + 516, + 240 + ], + "type": "image", + "image_path": "be342b7739a18ae48556547e268b25e016e371e890d2983624a6ab38295105ed.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 242, + 547, + 288 + ], + "lines": [ + { + "bbox": [ + 46, + 242, + 547, + 288 + ], + "spans": [ + { + "bbox": [ + 46, + 242, + 547, + 288 + ], + "type": "text", + "content": "Figure 2. 4D Human parsing method. We first render current and previous frame scans into multi-view images and labels. Then collect multi-view parsing results from the image parser, optical flows, and segmentation masks (Sec. 3.1). Finally, we project multi-view labels to 3D vertices and optimize vertex labels using the Graph Cut algorithm with vertex-wise unary energy and edge-wise binary energy (Sec. 3.2). The manual rectification labels can be easily introduced by checking multi-view rendered labels. (Sec. 3.3)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 298, + 290, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 298, + 290, + 429 + ], + "spans": [ + { + "bbox": [ + 46, + 298, + 290, + 429 + ], + "type": "text", + "content": "der dynamic motions (cf. Fig. 3). While the votes derived from the optical flows provide a cross-frame prior, they may not accurately track every human part and can't identify newly emerging regions. Therefore, we introduce segmentation masks to regularize the label consistency within each masked region. We apply the Segment Anything Model [27] to each rendered image and obtain a self-define group of masks " + }, + { + "bbox": [ + 46, + 298, + 290, + 429 + ], + "type": "inline_equation", + "content": "M_{m,n}" + }, + { + "bbox": [ + 46, + 298, + 290, + 429 + ], + "type": "text", + "content": ", with the index " + }, + { + "bbox": [ + 46, + 298, + 290, + 429 + ], + "type": "inline_equation", + "content": "m \\in \\{1, \\dots, N_{mask,n}\\}" + }, + { + "bbox": [ + 46, + 298, + 290, + 429 + ], + "type": "text", + "content": ". Within a mask " + }, + { + "bbox": [ + 46, + 298, + 290, + 429 + ], + "type": "inline_equation", + "content": "M_{m,n}" + }, + { + "bbox": [ + 46, + 298, + 290, + 429 + ], + "type": "text", + "content": " we compute the score function " + }, + { + "bbox": [ + 46, + 298, + 290, + 429 + ], + "type": "inline_equation", + "content": "S(l, M_{m,n})" + }, + { + "bbox": [ + 46, + 298, + 290, + 429 + ], + "type": "text", + "content": " that fuses the votes of the image parser and the optical flow, normalized by the area of the mask:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 445, + 287, + 500 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 445, + 287, + 500 + ], + "spans": [ + { + "bbox": [ + 55, + 445, + 287, + 500 + ], + "type": "interline_equation", + "content": "\\mathcal {S} (l, M _ {m, n}) = \\frac {\\sum_ {p \\in M _ {m , n}} \\left[ f _ {p a r , n} (p , l) + \\lambda_ {p o} f _ {o p t , n} (p , l) \\right]}{\\sum_ {p \\in M _ {m , n}} \\left(1 + \\lambda_ {p o}\\right)}, \\tag {1}", + "image_path": "57c5ec64e1c497b5602bf587474b47082b4887ead54ad3f56c61628983f633c5.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 500, + 287, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 500, + 287, + 561 + ], + "spans": [ + { + "bbox": [ + 47, + 500, + 287, + 561 + ], + "type": "text", + "content": "where the factor " + }, + { + "bbox": [ + 47, + 500, + 287, + 561 + ], + "type": "inline_equation", + "content": "\\lambda_{po}" + }, + { + "bbox": [ + 47, + 500, + 287, + 561 + ], + "type": "text", + "content": " weights the contribution of OPT over PAR. We now define a check function, " + }, + { + "bbox": [ + 47, + 500, + 287, + 561 + ], + "type": "inline_equation", + "content": "\\mathcal{C}(p,M_{m,n})" + }, + { + "bbox": [ + 47, + 500, + 287, + 561 + ], + "type": "text", + "content": ", that returns 1 if the input evaluation pixel " + }, + { + "bbox": [ + 47, + 500, + 287, + 561 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 47, + 500, + 287, + 561 + ], + "type": "text", + "content": " is in the mask " + }, + { + "bbox": [ + 47, + 500, + 287, + 561 + ], + "type": "inline_equation", + "content": "M_{m,n}" + }, + { + "bbox": [ + 47, + 500, + 287, + 561 + ], + "type": "text", + "content": " and 0 otherwise. Finally, we obtain the corresponding vote function by summing over all the masks in the image:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 567, + 287, + 594 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 567, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 52, + 567, + 287, + 594 + ], + "type": "interline_equation", + "content": "f _ {s a m, n} (p, l) = \\sum_ {m \\in 1: N _ {m a s k, n}} \\mathcal {C} (p, M _ {m, n}) * \\mathcal {S} (l, M _ {m, n}). \\tag {2}", + "image_path": "5e58ce1a1c826ec3450db3558a9ba9a51ac807fd74d402d61fbcf4cdf381d222.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 604, + 275, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 604, + 275, + 617 + ], + "spans": [ + { + "bbox": [ + 47, + 604, + 275, + 617 + ], + "type": "text", + "content": "3.2. Graph Cut Optimization for Vertex Parsing" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "content": "The next step in our semi-automatic process is combining all the labels obtained in Sec. 3.1 to assign a unique label to each scan vertex " + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "inline_equation", + "content": "i \\in \\{1, \\dots, N_{\\text{vert}}\\}" + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "content": ". We frame this 3D semantic segmentation problem as a graph cut optimization: each 3D frame is interpreted as a graph " + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "content": ", where vertices are now nodes and mesh edges are connections. Note that in a traditional Graph Cut, the values of the nodes are fixed, and the optimization computes only the" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 298, + 546, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 298, + 546, + 346 + ], + "spans": [ + { + "bbox": [ + 304, + 298, + 546, + 346 + ], + "type": "text", + "content": "cost of breaking a connection. In our case, we have several votes for a vertex label, coming from three different tools and from concurrent multi-view projections. We define our cost function that consists of two terms," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 312, + 350, + 545, + 378 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 350, + 545, + 378 + ], + "spans": [ + { + "bbox": [ + 312, + 350, + 545, + 378 + ], + "type": "interline_equation", + "content": "E (L) = \\sum_ {i \\in 1: N _ {v e r t}} E _ {v e r t} \\left(l _ {i}\\right) + \\sum_ {i, j \\in 1: N _ {v e r t}} E _ {e d g e} \\left(l _ {i}, l _ {j}\\right), \\tag {3}", + "image_path": "ef80698bddb43ccbb033ac9639b21a311921d4286ff09724564811567df0256d.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "spans": [ + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "inline_equation", + "content": "L = \\{l_i\\}" + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "text", + "content": " represents all the vertex labels in current frame. As described below, the term " + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "inline_equation", + "content": "E_{vert}" + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "text", + "content": " combines the different votes into a single cost function, while " + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "inline_equation", + "content": "E_{edge}" + }, + { + "bbox": [ + 304, + 384, + 545, + 443 + ], + "type": "text", + "content": " evaluates neighboring labels for consistent 3D segmentation. We follow an approach similar to [8]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 443, + 545, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 443, + 545, + 479 + ], + "spans": [ + { + "bbox": [ + 305, + 443, + 545, + 479 + ], + "type": "text", + "content": "Vertex-wise unary energy. The cost function per node or Unary energy comes from combining the different votes obtained in the multi-view image processing (see Sec. 3.1):" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 484, + 545, + 525 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 484, + 545, + 525 + ], + "spans": [ + { + "bbox": [ + 307, + 484, + 545, + 525 + ], + "type": "interline_equation", + "content": "E _ {v e r t} \\left(l _ {i}\\right) = \\sum_ {n \\in 1: N _ {v i e w}} \\frac {\\lambda_ {p} E _ {p a r , n} + \\lambda_ {o} E _ {o p t , n} + \\lambda_ {s} E _ {s a m , n}}{N _ {v i e w}}, \\tag {4}", + "image_path": "7dc922e0821aad9739977b2e35a22c815e5c3b8302a5f2a1520fce9d4328269e.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 525, + 545, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 525, + 545, + 586 + ], + "spans": [ + { + "bbox": [ + 304, + 525, + 545, + 586 + ], + "type": "text", + "content": "where we combine the human image parser " + }, + { + "bbox": [ + 304, + 525, + 545, + 586 + ], + "type": "inline_equation", + "content": "(E_{par})" + }, + { + "bbox": [ + 304, + 525, + 545, + 586 + ], + "type": "text", + "content": ", the cross-frame optical prior " + }, + { + "bbox": [ + 304, + 525, + 545, + 586 + ], + "type": "inline_equation", + "content": "(E_{opt})" + }, + { + "bbox": [ + 304, + 525, + 545, + 586 + ], + "type": "text", + "content": ", and the segmentation masks regularization " + }, + { + "bbox": [ + 304, + 525, + 545, + 586 + ], + "type": "inline_equation", + "content": "(E_{sam})" + }, + { + "bbox": [ + 304, + 525, + 545, + 586 + ], + "type": "text", + "content": " contributions. All these energy terms can be written with the same equation by using the notation " + }, + { + "bbox": [ + 304, + 525, + 545, + 586 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = \\{par, opt, sam\\}" + }, + { + "bbox": [ + 304, + 525, + 545, + 586 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 331, + 592, + 545, + 619 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 592, + 545, + 619 + ], + "spans": [ + { + "bbox": [ + 331, + 592, + 545, + 619 + ], + "type": "interline_equation", + "content": "E _ {\\mathcal {X}, n} \\left(l _ {i}\\right) = \\sum_ {p \\in P \\left(v _ {i}, n\\right)} - w _ {\\mathcal {X}} \\left(p, v _ {i}\\right) f _ {\\mathcal {X}, n} \\left(p, l _ {i}\\right), \\tag {5}", + "image_path": "b76682f6ab2a60c8d6da693e7958a1d2f9d2e6e6e3f84a37df00620cc6669c60.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "spans": [ + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "content": "meaning that energy of the method " + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "content": ", calculated for a proposed label " + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "inline_equation", + "content": "l_{i}" + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "content": ", is obtained by summing over those pixels " + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "inline_equation", + "content": "p\\in P(v_i,n)" + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "content": " whose projections are within a triangle of " + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "content": ". The weights for the cases of " + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "inline_equation", + "content": "E_{par}" + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "inline_equation", + "content": "E_{opt}" + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "content": " are set to the barycentric distance from the projected pixel " + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "content": " to the vertex " + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "content": ", which means " + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "inline_equation", + "content": "w_{par} = w_{opt} = u" + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "content": " as in Fig. 2. For " + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "inline_equation", + "content": "E_{sam}" + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "content": " instead, we set the weight " + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "inline_equation", + "content": "w_{sam}" + }, + { + "bbox": [ + 304, + 625, + 545, + 721 + ], + "type": "text", + "content": " to the constant value 1 given that we look for an across-vertex regularization." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "553" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 68, + 521, + 236 + ], + "blocks": [ + { + "bbox": [ + 72, + 68, + 521, + 236 + ], + "lines": [ + { + "bbox": [ + 72, + 68, + 521, + 236 + ], + "spans": [ + { + "bbox": [ + 72, + 68, + 521, + 236 + ], + "type": "image", + "image_path": "084aa22d3dd3b5dcb324e58f8a0f1888981cbd6de4d78f1a19dec9664621bae9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 239, + 547, + 284 + ], + "lines": [ + { + "bbox": [ + 46, + 239, + 547, + 284 + ], + "spans": [ + { + "bbox": [ + 46, + 239, + 547, + 284 + ], + "type": "text", + "content": "Figure 3. Qualitative ablation study. We visualize the effectiveness of our 4D human parsing method on our 4D-DRESS dataset. From left to right, we show the improvements after adding the optical flow labels and mask scores to the multi-view image parser labels. The manual rectification efforts can be easily introduced from multi-view rendered labels, with which we achieve high-quality vertex annotations. The problem of isolated labels can be relieved by introducing the edge-wise binary energy term." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 294, + 287, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 294, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 46, + 294, + 287, + 354 + ], + "type": "text", + "content": "Edge-wise binary energy. The Binary energy term penalizes the case of adjacent vertices with different labels, encouraging neighboring vertices to take the same label. Being " + }, + { + "bbox": [ + 46, + 294, + 287, + 354 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 294, + 287, + 354 + ], + "type": "text", + "content": " the adjacency matrix of the graph " + }, + { + "bbox": [ + 46, + 294, + 287, + 354 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 46, + 294, + 287, + 354 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 294, + 287, + 354 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 46, + 294, + 287, + 354 + ], + "type": "text", + "content": " the Dirac delta function, the edge cost can be calculated as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 89, + 359, + 287, + 373 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 359, + 287, + 373 + ], + "spans": [ + { + "bbox": [ + 89, + 359, + 287, + 373 + ], + "type": "interline_equation", + "content": "E _ {e d g e} \\left(l _ {i}, l _ {j}\\right) = \\lambda_ {b} A _ {i, j} \\left(1 - \\delta \\left(l _ {i}, l _ {j}\\right)\\right), \\tag {6}", + "image_path": "51350eb3142de419074496d5f915211f940dd52caa40a71ae01900320192438f.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 376, + 287, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 376, + 287, + 401 + ], + "spans": [ + { + "bbox": [ + 46, + 376, + 287, + 401 + ], + "type": "text", + "content": "which increases the energy by " + }, + { + "bbox": [ + 46, + 376, + 287, + 401 + ], + "type": "inline_equation", + "content": "\\lambda_{b}" + }, + { + "bbox": [ + 46, + 376, + 287, + 401 + ], + "type": "text", + "content": " in the case that the adjacent vertices " + }, + { + "bbox": [ + 46, + 376, + 287, + 401 + ], + "type": "inline_equation", + "content": "v_{i}, v_{j}" + }, + { + "bbox": [ + 46, + 376, + 287, + 401 + ], + "type": "text", + "content": " take different labels " + }, + { + "bbox": [ + 46, + 376, + 287, + 401 + ], + "type": "inline_equation", + "content": "l_{i} \\neq l_{j}" + }, + { + "bbox": [ + 46, + 376, + 287, + 401 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 406, + 230, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 406, + 230, + 417 + ], + "spans": [ + { + "bbox": [ + 47, + 406, + 230, + 417 + ], + "type": "text", + "content": "3.3. Manual Rectification of 3D Labels" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 419, + 287, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 419, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 46, + 419, + 287, + 552 + ], + "type": "text", + "content": "When manual rectification is needed, we introduce it back into the multi-view space as an additional 2D annotation, and we recalculate the steps in Sec. 3.2. Concretely, we ran the graph cut optimization for the first time. Then, we rendered the vertex labels into multi-view labels, from which we let a person introduce corrections by comparing the resulting labels with the textured multi-view images. Similarly to the vote functions of the image parser and optical flow, we create a vote function " + }, + { + "bbox": [ + 46, + 419, + 287, + 552 + ], + "type": "inline_equation", + "content": "f_{man}(p,l)" + }, + { + "bbox": [ + 46, + 419, + 287, + 552 + ], + "type": "text", + "content": " that accesses this set of images with rectified annotations and returns 1 if the label " + }, + { + "bbox": [ + 46, + 419, + 287, + 552 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 46, + 419, + 287, + 552 + ], + "type": "text", + "content": " is assigned to the pixel " + }, + { + "bbox": [ + 46, + 419, + 287, + 552 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 46, + 419, + 287, + 552 + ], + "type": "text", + "content": " and 0 otherwise." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "spans": [ + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "text", + "content": "Similar to previous cases, we define a per-view manual energy " + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "inline_equation", + "content": "(E_{man})" + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "text", + "content": " by using the variable " + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = man" + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "text", + "content": " in Eq. (5), and we added it to the global per-node energy " + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "inline_equation", + "content": "E_{vert}" + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "text", + "content": " in Eq. (4). We use a constant large weight for " + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "inline_equation", + "content": "w_{man}" + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "text", + "content": " to favor the manual annotation over other sources of voting where we rectified the labels. The final vertex labels " + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "inline_equation", + "content": "L^{*} = \\{l^{*}_{i}\\}" + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "text", + "content": " are obtained after the second round of graph cut optimization. This manual rectification process finally changed " + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "inline_equation", + "content": "1.5\\%" + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "text", + "content": " of vertices within " + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "inline_equation", + "content": "3.2\\%" + }, + { + "bbox": [ + 46, + 552, + 288, + 671 + ], + "type": "text", + "content": " of all frames. The rectification process is detailed in Supp. Mat." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 674, + 128, + 688 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 674, + 128, + 688 + ], + "spans": [ + { + "bbox": [ + 47, + 674, + 128, + 688 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "content": "To validate the effectiveness of our method, we conducted controlled experiments on two synthetic datasets," + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 318, + 293, + 531, + 380 + ], + "blocks": [ + { + "bbox": [ + 318, + 293, + 531, + 380 + ], + "lines": [ + { + "bbox": [ + 318, + 293, + 531, + 380 + ], + "spans": [ + { + "bbox": [ + 318, + 293, + 531, + 380 + ], + "type": "table", + "html": "
CLOTH4D [53]BEDLAM [7]
MethodInnerInnerOuter
SMPL+D [36]0.8720.8460.765
PAR Only [43]0.9610.9100.714
PAR+OPT0.9690.9630.942
PAR+OPT+SAM0.9950.9930.988
", + "image_path": "c6427b8607437ca67316658e4a898a3c4029f3cdd50a73c4adeddb6f018483c2.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 382, + 545, + 415 + ], + "lines": [ + { + "bbox": [ + 305, + 382, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 305, + 382, + 545, + 415 + ], + "type": "text", + "content": "Table 2. Baseline and ablation study. Mean accuracy of 4D human parsing methods applied on synthetic datasets. The Inner and Outer outfits are selected according to our definition in Sec. 5" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 425, + 545, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 425, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 304, + 425, + 545, + 521 + ], + "type": "text", + "content": "CLOTH4D [53] and BEDLAM [7], where ground-truth semantic labels are available. We first compare our parsing method with a template-based baseline [36], that uses a semantic template (SMPL model with per-vertex displacements) to track and parse the clothed human scans. Due to the limited resolution and the fixed topology nature of the SMPL+D model, its parsing accuracy is lower than " + }, + { + "bbox": [ + 304, + 425, + 545, + 521 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 304, + 425, + 545, + 521 + ], + "type": "text", + "content": " on all synthetic outfits (see Tab. 2)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 522, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 546, + 713 + ], + "type": "text", + "content": "We then compare our 4D parsing pipeline with several ablations and report them in Tab. 2. We use an example scan from 4D-DRESS to support the visualization of the ablation study in Fig. 3. Using PAR only shows reasonable results for upper and lower clothes. Yet, it predicts inconsistent labels at open garments like jackets and coats (Fig. 3 PAR Only), resulting in only " + }, + { + "bbox": [ + 304, + 522, + 546, + 713 + ], + "type": "inline_equation", + "content": "71.4\\%" + }, + { + "bbox": [ + 304, + 522, + 546, + 713 + ], + "type": "text", + "content": " parsing accuracy on the BEDLAM dataset. The optical flow labels from the previous frame can serve as a cross-frame prior, yet accuracy may vary, particularly in fast-moving arms and cloth boundaries (Fig. 3 PAR+OPT). By fusing both of the previous multi-view labels via the segmentation masks, we achieve better boundary labels (Fig. 3 PAR+OPT+SAM), with " + }, + { + "bbox": [ + 304, + 522, + 546, + 713 + ], + "type": "inline_equation", + "content": "98.8\\%" + }, + { + "bbox": [ + 304, + 522, + 546, + 713 + ], + "type": "text", + "content": " accuracy on the outer outfits in BEDLAM, with challenging open garments. Finally, we show the effect of introducing manual efforts to rectify incorrect labels (Fig. 3" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "text", + "content": "554" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 70, + 532, + 240 + ], + "blocks": [ + { + "bbox": [ + 61, + 70, + 532, + 240 + ], + "lines": [ + { + "bbox": [ + 61, + 70, + 532, + 240 + ], + "spans": [ + { + "bbox": [ + 61, + 70, + 532, + 240 + ], + "type": "image", + "image_path": "2be5552cd65c63b91fa805f1893d335137d57f11e7c54e70a528ffcac67cca89.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 240, + 546, + 275 + ], + "lines": [ + { + "bbox": [ + 46, + 240, + 546, + 275 + ], + "spans": [ + { + "bbox": [ + 46, + 240, + 546, + 275 + ], + "type": "text", + "content": "Figure 4. Qualitative examples for clothing simulation methods. On the left are templates used for simulations. On the right are ground-truth geometries and original scans, LBS baseline results in body penetrations and overly stretched areas. Compared to other methods, HOOD better models dress and jackets and, with tuned material parameters, HOOD* achieves simulations closest to the ground truth." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 279, + 288, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 279, + 288, + 328 + ], + "spans": [ + { + "bbox": [ + 46, + 279, + 288, + 328 + ], + "type": "text", + "content": "With Manual). Our parsing method can also be deployed to annotate other existing 4D human datasets. We present examples of BUFF[51], X-Humans [40], and ActorsHQ[23] and additional qualitative results in Supp. Mat." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 336, + 163, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 336, + 163, + 350 + ], + "spans": [ + { + "bbox": [ + 47, + 336, + 163, + 350 + ], + "type": "text", + "content": "5. Dataset Description" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 352, + 287, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 352, + 287, + 449 + ], + "spans": [ + { + "bbox": [ + 46, + 352, + 287, + 449 + ], + "type": "text", + "content": "4D-DRESS contains 520 motion sequences (150 frames at 30 fps) in 64 real-world human outfits with a total of 78k frames. Each frame consists of multi-view images at 1k resolution, an 80k-face triangle 3D mesh with vertex annotations, and a 1k-resolution texture map. We also provide each garment with its canonical template to benefit the clothing simulation study. Finally, each 3D scan is accurately registered by SMPL/SMPL-X body models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 449, + 288, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 449, + 288, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 449, + 288, + 665 + ], + "type": "text", + "content": "To record 4D-DRESS we recruited 32 participants (18 female), with an average age of 24. The dataset consists of 4 dresses, 30 upper, 28 lower, and 32 outer garments. Participants were instructed to perform different dynamic motions for each 5-second sequence. For each participant, we capture two types of outfits: Inner Outfit comprising the inner layer dress/upper, and lower garments; and Outer Outfit with an additional layer of garment, such as open jackets or coats. A unique feature of 4D-DRESS is the challenging clothing deformations we captured. To quantify these deformations, we compute the mean distances from the garments to the registered SMPL body surfaces. The inner and outer outfits exhibit distance ranges up to " + }, + { + "bbox": [ + 46, + 449, + 288, + 665 + ], + "type": "inline_equation", + "content": "7.12\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 449, + 288, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 449, + 288, + 665 + ], + "type": "inline_equation", + "content": "14.76\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 449, + 288, + 665 + ], + "type": "text", + "content": " over all frames. This is twice as much as what we observed in the X-Humans dataset [40], for example. In the " + }, + { + "bbox": [ + 46, + 449, + 288, + 665 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 46, + 449, + 288, + 665 + ], + "type": "text", + "content": " most challenging frames, this increases to " + }, + { + "bbox": [ + 46, + 449, + 288, + 665 + ], + "type": "inline_equation", + "content": "20.09\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 449, + 288, + 665 + ], + "type": "text", + "content": " for outer outfits, highlighting the prevalence of challenging garments. Please refer to Supp. Mat. for dataset details." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 673, + 180, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 673, + 180, + 685 + ], + "spans": [ + { + "bbox": [ + 47, + 673, + 180, + 685 + ], + "type": "text", + "content": "6. Benchmark Evaluation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "content": "With high-quality 4D scans and diverse garment meshes in dynamic motions, 4D-DRESS serves as an ideal ground" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 308, + 277, + 545, + 367 + ], + "blocks": [ + { + "bbox": [ + 308, + 277, + 545, + 367 + ], + "lines": [ + { + "bbox": [ + 308, + 277, + 545, + 367 + ], + "spans": [ + { + "bbox": [ + 308, + 277, + 545, + 367 + ], + "type": "table", + "html": "
LowerUpperDressOuter
MethodCD ↓ EStr ↓CD ↓ EStr ↓CD ↓ EStr ↓CD ↓ EStr ↓
LBS1.7670.3332.1670.0954.4611.2934.6260.811
PBNS [4]1.8850.1072.6870.0404.8690.6434.8590.107
NCS [5]1.7160.0172.1120.0164.5480.0314.7380.025
HOOD [15]2.0700.0082.6680.0134.2920.0105.3550.011
HOOD*0.9240.0101.3080.0152.4630.0092.8330.009
", + "image_path": "722ba6aa6b62ca97512813f5db26a11ab151c89e7e072b787ce7348b55d39601.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 369, + 545, + 403 + ], + "lines": [ + { + "bbox": [ + 305, + 369, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 305, + 369, + 545, + 403 + ], + "type": "text", + "content": "Table 3. Clothing simulation benchmark. CD is Chamfer Distance between the simulation and ground truth. " + }, + { + "bbox": [ + 305, + 369, + 545, + 403 + ], + "type": "inline_equation", + "content": "E_{str}" + }, + { + "bbox": [ + 305, + 369, + 545, + 403 + ], + "type": "text", + "content": " denotes stretching energy with respect to the template." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 407, + 545, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 407, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 305, + 407, + 545, + 491 + ], + "type": "text", + "content": "truth for a variety of computer vision and graphics benchmarks. In our work, we outline several standard benchmarks conducted in these fields using our dataset. Our primary focus is on tasks related to clothing simulation (Sec. 6.1) and clothed human reconstruction (Sec. 6.2). Additionally, benchmarks on human parsing and human representation learning are included in our Supp. Mat." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 496, + 423, + 508 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 496, + 423, + 508 + ], + "spans": [ + { + "bbox": [ + 306, + 496, + 423, + 508 + ], + "type": "text", + "content": "6.1. Clothing Simulation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": "Experimental setup. We introduce a new benchmark for clothing simulation, leveraging the garment meshes from 4D-DRESS, which capture dynamical real-world clothing deformations. This benchmark evaluates three methods for modeling garment dynamics: PBNS [4], Neural Cloth Simulator (NCS [5]), and HOOD [15], as well as a baseline method that applies SMPL-based linear blend-skinning (LBS) to the template. We ran the simulations using T-posed templates extracted from static scans and compared the results to the ground-truth garment meshes across various pose sequences. Our evaluation metrics include the Chamfer Distance (CD), which compares the resulting mesh sequences with ground-truth point clouds, and the average stretching energy " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "(E_{str})" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": " calculated by measuring the difference in edge lengths between the simulated and template meshes. The experiments were conducted across four categories of garments (Lower, Upper, Dress, and Outer)," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "555" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 71, + 521, + 220 + ], + "blocks": [ + { + "bbox": [ + 75, + 71, + 521, + 220 + ], + "lines": [ + { + "bbox": [ + 75, + 71, + 521, + 220 + ], + "spans": [ + { + "bbox": [ + 75, + 71, + 521, + 220 + ], + "type": "image", + "image_path": "f65e0803ab4942356dd538da773ef30e828b43dfc0b40e6ec6b0d78baf4e4f32.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 222, + 547, + 257 + ], + "lines": [ + { + "bbox": [ + 46, + 222, + 547, + 257 + ], + "spans": [ + { + "bbox": [ + 46, + 222, + 547, + 257 + ], + "type": "text", + "content": "Figure 5. Examples of clothed human reconstruction on 4D-DRESS. We evaluate state-of-the-art methods using both inner (Top) and outer (Bottom) outfits. We show that existing methods generally struggle with the challenging loose garments. Moreover, these approaches cannot faithfully recover realistic details such as clothing wrinkles." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 262, + 288, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 262, + 288, + 309 + ], + "spans": [ + { + "bbox": [ + 46, + 262, + 288, + 309 + ], + "type": "text", + "content": "with four garment templates in each category. We simulated clothing deformation for each garment in six different pose sequences, providing a comprehensive comparison of their ability to generate realistic motions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 311, + 287, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 311, + 287, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 311, + 287, + 418 + ], + "type": "text", + "content": "Fine-tuning material parameters. To demonstrate the advantages of real-world garment meshes in 4D-DRESS, we also introduce a simple optimization-based strategy for inverse simulation using HOOD. Specifically, we optimize the material parameters fed into the HOOD model to minimize the simulations' Chamfer Distance to the ground-truth sequences and their stretching energy. This optimized version is denoted as HOOD*. For more details on the material optimization experiments, please refer to Supp. Mat." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 421, + 288, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 421, + 288, + 661 + ], + "spans": [ + { + "bbox": [ + 46, + 421, + 288, + 661 + ], + "type": "text", + "content": "Evaluation results. The quantitative and qualitative comparisons of the clothing simulation methods are presented in Tab. 3 and Fig. 4 respectively. The LBS baseline and LBS-based approaches (PBNS and NCS) perform better with upper and lower garments, which exhibit limited free-flowing motions compared with the dress and outer garments. Conversely, HOOD excels with dresses, generating more natural, free-flowing motions and achieving lower stretching energy. However, if HOOD fails to generate realistic motions for a single frame, this error propagates to all subsequent frames. This issue does not occur in the LBS-based methods, which generate geometries independently for each frame. With finely-tuned material parameters, HOD* produces garment sequences that more faithfully replicate real-world behavior. We anticipate that future research in learned garment simulation will increasingly focus on modeling real-world garments made from complex heterogeneous materials. This will be a major step in creating realistically animated digital avatars, and we believe 4D-DRESS will be highly instrumental in this task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 663, + 219, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 663, + 219, + 675 + ], + "spans": [ + { + "bbox": [ + 47, + 663, + 219, + 675 + ], + "type": "text", + "content": "6.2. Clothed Human Reconstruction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "Experimental setup. We create a new benchmark for evaluating state-of-the-art clothed human reconstruction methods on the 4D-DRESS dataset. This benchmark is di" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 307, + 259, + 544, + 372 + ], + "blocks": [ + { + "bbox": [ + 307, + 259, + 544, + 372 + ], + "lines": [ + { + "bbox": [ + 307, + 259, + 544, + 372 + ], + "spans": [ + { + "bbox": [ + 307, + 259, + 544, + 372 + ], + "type": "table", + "html": "
InnerOuter
MethodCD↓NC↑IoU↑CD↓NC↑IoU↑
PIFu [37]2.6960.7920.6902.7830.7590.697
PIFuHD [38]2.4260.7930.7392.3930.7630.743
PaMIR [52]2.5200.8050.7062.6080.7770.715
ICON [47]2.4730.7980.7522.8320.7620.756
PHORHUM [1]3.9440.7250.5803.7620.7050.603
ECON [48]2.5430.7960.7362.8520.7600.728
SiTH [21]2.1100.8240.7552.3220.7940.749
", + "image_path": "0730da84cd7f9816b3ec583e4c98d517649d9038ef2a86bd96f55d5e01e7b243.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 373, + 546, + 417 + ], + "lines": [ + { + "bbox": [ + 305, + 373, + 546, + 417 + ], + "spans": [ + { + "bbox": [ + 305, + 373, + 546, + 417 + ], + "type": "text", + "content": "Table 4. Clothed human reconstruction benchmark. We computed Chamfer distance (CD), normal consistency (NC), and Intersection over Union (IoU) between ground truth and reconstructed meshes obtained from different baselines." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 422, + 545, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 422, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 422, + 545, + 567 + ], + "type": "text", + "content": "vided into three subtasks. First, we evaluate single-view human reconstruction utilizing images and high-quality 3D scans from our dataset. In addition, benefiting from the garment meshes in our dataset, we establish the first real-world benchmark for evaluating single-view clothing reconstruction. Finally, we assess video-based human reconstruction approaches leveraging the sequences in 4D-DRESS that capture rich motion dynamics of both human bodies and garments. In all the experiments, we report 3D metrics including Chamfer Distance (CD), Normal Consistency (NC), and Intersection over Union (IoU) to compare the predictions with ground-truth meshes." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 570, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 546, + 714 + ], + "type": "text", + "content": "Single-view human reconstruction. We use the two test sets defined in Sec. 5 (denote as Outer and Inner) to evaluate the following single-view reconstruction methods: PIFu [37], PIFuHD [38], PaMIR [52], ICON [47], PHORHUM [1], ECON [48], and SiTH [21]. The evaluation results are summarized in Fig. 5 and Tab. 4. We observed that methods leveraging SMPL body models as guidance (i.e., ICON, ECON, SiTH) performed better in reconstructing inner clothing. However, their performance significantly declined when dealing with outer garments. On the other hand, end-to-end models like PIFu and PIFuHD demonstrated more stability with both clothing types. This" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "556" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 66, + 70, + 267, + 218 + ], + "blocks": [ + { + "bbox": [ + 66, + 70, + 267, + 218 + ], + "lines": [ + { + "bbox": [ + 66, + 70, + 267, + 218 + ], + "spans": [ + { + "bbox": [ + 66, + 70, + 267, + 218 + ], + "type": "image", + "image_path": "243a3f5a2c20eacf8a888d8b463bdd71c08c62fcbffd8fa9b5354e4927b91411.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 220, + 286, + 264 + ], + "lines": [ + { + "bbox": [ + 47, + 220, + 286, + 264 + ], + "spans": [ + { + "bbox": [ + 47, + 220, + 286, + 264 + ], + "type": "text", + "content": "Figure 6. Examples of clothing reconstruction on 4D-DRESS. We visualize the reconstructed garment meshes from different approaches. These methods trained on synthetic datasets failed to predict accurate clothing sizes and detailed wrinkles." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 266, + 286, + 337 + ], + "blocks": [ + { + "bbox": [ + 50, + 266, + 286, + 337 + ], + "lines": [ + { + "bbox": [ + 50, + 266, + 286, + 337 + ], + "spans": [ + { + "bbox": [ + 50, + 266, + 286, + 337 + ], + "type": "table", + "html": "
ShoesLowerUpperOuter
MethodCD ↓ IoU↑CD ↓ IoU↑CD ↓ IoU↑CD ↓ IoU↑
BCNet [24]-2.533 0.6752.079 0.7003.600 0.639
SMPLicit [12]2.619 0.6212.101 0.6982.452 0.6173.359 0.618
ClothWild [34]3.657 0.5482.690 0.5823.279 0.5334.163 0.588
", + "image_path": "cd2f4ff89344ecf4b87a3ec600949615be2747c40d95021474396e6d3e0e5a60.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 376, + 286, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 376, + 286, + 448 + ], + "spans": [ + { + "bbox": [ + 46, + 376, + 286, + 448 + ], + "type": "text", + "content": "leads to an intriguing research question: whether the human body prior is necessary for reconstructing clothing. Qualitatively, we see that even the best-performing methods cannot perfectly reconstruct realistic free-flowing jackets as shown in Tab. 4. We believe 4D-DRESS will offer more valuable insights for research in clothed human reconstruction." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 450, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 450, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 450, + 286, + 713 + ], + "type": "text", + "content": "Single-view clothes reconstruction. Clothes reconstruction has received relatively little attention compared to full-body human reconstruction. Leveraging the garment meshes in 4D-DRESS, we introduce the first real-world benchmark to assess prior art, including BCNet [24], SMPLicit [12], and ClothWild [34]. The results of different clothing types, as shown in Fig. 6, indicate a significant gap between the reconstructed and real clothing. Firstly, the clothing sizes produced by these methods are often inaccurate, suggesting a lack of effective use of image information for guidance. Moreover, the results typically lack geometric details like clothing wrinkles compared to full-body reconstruction. We report quantitative results in Tab. 5. We observed that the data-driven method (BCNet) performs better with inner clothing, while the generative fitting method (SMPLicit) shows more robustness to outer clothing, such as coats. However, none of these methods is designed for or trained on real-world data. The domain gap between synthetic and real data still limits their capability to produce accurate shapes and fine-grained details. We expect our benchmark and dataset will draw more research attention to the topic of real-world clothing reconstruction." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 318, + 60, + 533, + 133 + ], + "blocks": [ + { + "bbox": [ + 318, + 60, + 533, + 133 + ], + "lines": [ + { + "bbox": [ + 318, + 60, + 533, + 133 + ], + "spans": [ + { + "bbox": [ + 318, + 60, + 533, + 133 + ], + "type": "image", + "image_path": "0e4c67b1af81e24c5ffb4527ca8890f4ccb08b968c83878ae911823f9ff57700.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 136, + 545, + 180 + ], + "lines": [ + { + "bbox": [ + 305, + 136, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 305, + 136, + 545, + 180 + ], + "type": "text", + "content": "Figure 7. Video-based human reconstruction. Qualitative results of video-based human reconstruction methods on 4D-DRESS. Prior works struggle to reconstruct 3D human with challenging outfits and cannot recover the fine-grained surface details." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 307, + 182, + 545, + 240 + ], + "blocks": [ + { + "bbox": [ + 47, + 338, + 286, + 371 + ], + "lines": [ + { + "bbox": [ + 47, + 338, + 286, + 371 + ], + "spans": [ + { + "bbox": [ + 47, + 338, + 286, + 371 + ], + "type": "text", + "content": "Table 5. Clothing reconstruction benchmark. We report Chamfer Distance (CD), and Intersection over Union (IoU) between the ground-truth garment meshes and the reconstructed clothing." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 182, + 545, + 240 + ], + "lines": [ + { + "bbox": [ + 307, + 182, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 307, + 182, + 545, + 240 + ], + "type": "table", + "html": "
InnerOuter
MethodCD↓NC↑IoU↑CD↓NC↑IoU↑
SelfRecon [25]3.1800.7290.7544.0270.6830.745
Vid2Avatar [16]2.8700.7500.7723.0140.7250.787
", + "image_path": "63da0d45b1dd49d21684720cba6b4466c949ffe5db37a7c47dbd6e087505b24a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 242, + 545, + 263 + ], + "lines": [ + { + "bbox": [ + 306, + 242, + 545, + 263 + ], + "spans": [ + { + "bbox": [ + 306, + 242, + 545, + 263 + ], + "type": "text", + "content": "Table 6. Video-based human reconstruction. Results of video-based human reconstruction methods on 4D-DRESS." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 267, + 545, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 267, + 545, + 470 + ], + "spans": [ + { + "bbox": [ + 304, + 267, + 545, + 470 + ], + "type": "text", + "content": "Video-based human reconstruction Leveraging the sequential 4D data in our dataset, we create a new benchmark for evaluating video-based human reconstruction methods. We applied Vid2Avatar [16] and SelfRecon [25] to obtain 4D reconstructions and compared them with the provided ground-truth 4D scans. As observed in Fig. 7, both methods struggle with diverse clothing styles and face challenges in reconstructing surface parts that greatly differ in topology from the human body, such as the open jacket. Moreover, there remains a noticeable discrepancy between the real geometry and the recovered surface details. Quantitatively, the existing methods cannot achieve satisfactory reconstruction results with outer garments, as demonstrated by a large performance degradation in Tab. 6. We believe 4D-DRESS provides essential data for advancing video-based human reconstruction methods, particularly in achieving detailed geometry recovery for challenging clothing." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 471, + 375, + 482 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 471, + 375, + 482 + ], + "spans": [ + { + "bbox": [ + 306, + 471, + 375, + 482 + ], + "type": "text", + "content": "7. Discussion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 486, + 545, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 486, + 545, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 486, + 545, + 676 + ], + "type": "text", + "content": "Limitations. Our current pipeline requires substantial computational time. The offline manual rectification process and garment mesh extraction also demand expertise in 3D editing and additional human efforts. These factors constrain the scalability of our dataset. With a goal of expanding more diverse subjects and clothing, real-time 4D annotation and rectification/editing will be exciting future work. Conclusion. 4D-DRESS is the first real-world 4D clothed human dataset with semantic annotations, aiming to bridge the gap between existing clothing algorithms and real-world human clothing. We demonstrate that 4D-DRESS is not only a novel data source but also a challenging benchmark for clothing simulation, reconstruction, and other related tasks. We believe that 4D-DRESS can support a wide range of endeavors and foster research progress by providing high-quality 4D data in life like human clothing." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "content": "Acknowledgements. This work was partially supported by the Swiss SERI Consolidation Grant \"AI-PERCEIVE\". AG was supported in part by the Max Planck ETH CLS." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "text", + "content": "557" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Thiemo Alldieck, Mihai Zanfir, and Cristian Sminchisescu. Photorealistic monocular 3d reconstruction of humans wearing clothing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 224 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 224 + ], + "type": "text", + "content": "[2] Matthieu Armando, Laurence Boissieux, Edmond Boyer, Jean-Sebastien Franco, Martin Humenberger, Christophe Legras, Vincent Leroy, Mathieu Marsot, Julien Pansiot, Sergi Pujades, Rim Rekik, Gregory Rogez, Anilkumar Swamy, and Stefanie Wuhrer. 4dhumanoutfit: a multi-subject 4d dataset of human motion sequences in varying outfits exhibiting large displacements. Computer Vision and Image Understanding, 2023. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 226, + 288, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 226, + 288, + 270 + ], + "spans": [ + { + "bbox": [ + 53, + 226, + 288, + 270 + ], + "type": "text", + "content": "[3] Hugo Bertiche, Meysam Madadi, and Sergio Escalera. Cloth3d: clothed 3d humans. In Proceedings of the European Conference on Computer Vision (ECCV), pages 344-359. Springer, 2020. 1, 2, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 271, + 288, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 271, + 288, + 316 + ], + "spans": [ + { + "bbox": [ + 53, + 271, + 288, + 316 + ], + "type": "text", + "content": "[4] Hugo Bertiche, Meysam Madadi, and Sergio Escalera. Pbs: Physically based neural simulation for unsupervised garment pose space deformation. ACM Transactions on Graphics (TOG), 40(6), 2021. 1, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 317, + 288, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 317, + 288, + 349 + ], + "spans": [ + { + "bbox": [ + 53, + 317, + 288, + 349 + ], + "type": "text", + "content": "[5] Hugo Bertiche, Meysam Madadi, and Sergio Escalera. Neural cloth simulation. ACM Transactions on Graphics (TOG), 41(6):1-14, 2022. 1, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 351, + 288, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 351, + 288, + 406 + ], + "spans": [ + { + "bbox": [ + 53, + 351, + 288, + 406 + ], + "type": "text", + "content": "[6] Bharat Lal Bhatnagar, Garvita Tiwari, Christian Theobalt, and Gerard Pons-Moll. Multi-garment net: Learning to dress 3d people from images. In Proceedings of the IEEE International Conference on Computer Vision (ICCV). IEEE, 2019. 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 407, + 288, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 407, + 288, + 462 + ], + "spans": [ + { + "bbox": [ + 53, + 407, + 288, + 462 + ], + "type": "text", + "content": "[7] Michael J. Black, Priyanka Patel, Joachim Tesch, and Jinlong Yang. BEDLAM: A synthetic dataset of bodies exhibiting detailed lifelike animated motion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 8726-8737, 2023. 1, 2, 3, 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 464, + 288, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 464, + 288, + 508 + ], + "spans": [ + { + "bbox": [ + 53, + 464, + 288, + 508 + ], + "type": "text", + "content": "[8] Y. Boykov, O. Veksler, and R. Zabih. Fast approximate energy minimization via graph cuts. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 23(11): 1222-1239, 2001. 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 510, + 288, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 510, + 288, + 564 + ], + "spans": [ + { + "bbox": [ + 53, + 510, + 288, + 564 + ], + "type": "text", + "content": "[9] Xianjie Chen, Roozbeh Mottaghi, Xiaobai Liu, Sanja Fidler, Raquel Urtasun, and Alan Yuille. Detect what you can: Detecting and representing objects using holistic models and body parts. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2014. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 566, + 240, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 566, + 240, + 577 + ], + "spans": [ + { + "bbox": [ + 48, + 566, + 240, + 577 + ], + "type": "text", + "content": "[10] CLO. https://www.clo3d.com, 2022. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 578, + 287, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 287, + 632 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 287, + 632 + ], + "type": "text", + "content": "[11] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (TOG), 34(4):1-13, 2015. 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "type": "text", + "content": "[12] Enric Corona, Albert Pumarola, Guillem Alenyà, Gerard Pons-Moll, and Francesc Moreno-Noguer. Semplicit: Topology-aware generative model for clothed people. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1, 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "text", + "content": "[13] Ke Gong, Xiaodan Liang, Yicheng Li, Yimin Chen, Ming Yang, and Liang Lin. Instance-level human parsing via part" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 714 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "grouping network. In Proceedings of the European Conference on Computer Vision (ECCV), pages 770-785, 2018. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 97, + 547, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 97, + 547, + 150 + ], + "spans": [ + { + "bbox": [ + 308, + 97, + 547, + 150 + ], + "type": "text", + "content": "[14] Ke Gong, Yiming Gao, Xiaodan Liang, Xiaohui Shen, Meng Wang, and Liang Lin. Graphonomy: Universal human parsing via graph transfer learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 152, + 547, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 152, + 547, + 207 + ], + "spans": [ + { + "bbox": [ + 308, + 152, + 547, + 207 + ], + "type": "text", + "content": "[15] Artur Grigorev, Bernhard Thomaszewski, Michael J. Black, and Otmar Hilliges. Hood: Hierarchical graphs for generalized modelling of clothing dynamics. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 16965-16974, 2023. 1, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 209, + 547, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 209, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 308, + 209, + 547, + 262 + ], + "type": "text", + "content": "[16] Chen Guo, Tianjian Jiang, Xu Chen, Jie Song, and Otmar Hilliges. Vid2 avatar: 3d avatar reconstruction from videos in the wild via self-supervised scene decomposition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 264, + 547, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 264, + 547, + 319 + ], + "spans": [ + { + "bbox": [ + 308, + 264, + 547, + 319 + ], + "type": "text", + "content": "[17] Sang-Hun Han, Min-Gyu Park, Ju Hong Yoon, Ju-Mi Kang, Young-Jae Park, and Hae-Gon Jeon. High-fidelity 3d human digitization from single 2k resolution images. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 320, + 545, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 320, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 308, + 320, + 545, + 365 + ], + "type": "text", + "content": "[18] Haoyu He, Jing Zhang, Qiming Zhang, and Dacheng Tao. Grapy-ml: Graph pyramid mutual learning for cross-dataset human parsing. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2020. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 365, + 545, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 365, + 545, + 409 + ], + "spans": [ + { + "bbox": [ + 308, + 365, + 545, + 409 + ], + "type": "text", + "content": "[19] Haoyu He, Jing Zhang, Bhavani Thuraisingham, and Dacheng Tao. Progressive one-shot human parsing. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2021. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 411, + 545, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 411, + 545, + 477 + ], + "spans": [ + { + "bbox": [ + 308, + 411, + 545, + 477 + ], + "type": "text", + "content": "[20] Zhu Heming, Cao Yu, Jin Hang, Chen Weikai, Du Dong, Wang Zhangye, Cui Shuguang, and Han Xiaoguang. Deep fashion3d: A dataset and benchmark for 3d garment reconstruction from single images. In Proceedings of the European Conference on Computer Vision (ECCV), pages 512-530. Springer International Publishing, 2020. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 478, + 545, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 478, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 308, + 478, + 545, + 521 + ], + "type": "text", + "content": "[21] Hsuan-I Ho, Jie Song, and Otmar Hilliges. Sith: Single-view textured human reconstruction with image-conditioned diffusion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 523, + 545, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 523, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 308, + 523, + 545, + 567 + ], + "type": "text", + "content": "[22] Jie Song Hsuan-I Ho, Lixin Xue and Otmar Hilliges. Learning locally editable virtual humans. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 568, + 545, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 568, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 308, + 568, + 545, + 622 + ], + "type": "text", + "content": "[23] Mustafa Işik, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 2, 3, 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 624, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 624, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 308, + 624, + 545, + 669 + ], + "type": "text", + "content": "[24] Boyi Jiang, Juyong Zhang, Yang Hong, Jinhao Luo, Ligang Liu, and Hujun Bao. BCnet: Learning body and cloth shape from a single image. In Proceedings of the European Conference on Computer Vision (ECCV). Springer, 2020. 1, 8" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 670, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 714 + ], + "type": "text", + "content": "[25] Boyi Jiang, Yang Hong, Hujun Bao, and Juyong Zhang. Selfrecon: Self reconstruction your digital avatar from monocular video. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 8" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "type": "text", + "content": "558" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "type": "text", + "content": "[26] Hanbyul Joo, Tomas Simon, Xulong Li, Hao Liu, Lei Tan, Lin Gui, Sean Banerjee, Timothy Scott Godisart, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social interaction capture. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2017. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 140, + 288, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 140, + 288, + 206 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 288, + 206 + ], + "type": "text", + "content": "[27] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dollar, and Ross Girshick. Segment anything. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 4015-4026, 2023. 2, 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 208, + 288, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 208, + 288, + 240 + ], + "spans": [ + { + "bbox": [ + 48, + 208, + 288, + 240 + ], + "type": "text", + "content": "[28] Peike Li, Yunqiu Xu, Yunchao Wei, and Yi Yang. Self-correction for human parsing. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2020. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 241, + 288, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 288, + 285 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 288, + 285 + ], + "type": "text", + "content": "[29] X. Liang, K. Gong, X. Shen, and L. Lin. Look into person: Joint body parsing & pose estimation network and a new benchmark. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 41(04):871-885, 2019. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 286, + 287, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 286, + 287, + 341 + ], + "spans": [ + { + "bbox": [ + 48, + 286, + 287, + 341 + ], + "type": "text", + "content": "[30] Kunliang Liu, Ouk Choi, Jianming Wang, and Wonjun Hwang. Cdgnet: Class distribution guided network for human parsing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4473-4482, 2022. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 342, + 287, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 342, + 287, + 385 + ], + "spans": [ + { + "bbox": [ + 48, + 342, + 287, + 385 + ], + "type": "text", + "content": "[31] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multiperson linear model. ACM Transactions on Graphics (TOG), 34(6):248:1-248:16, 2015. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 388, + 287, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 287, + 442 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 287, + 442 + ], + "type": "text", + "content": "[32] Qianli Ma, Jinlong Yang, Anurag Ranjan, Sergi Pujades, Gerard Pons-Moll, Siyu Tang, and Michael J. Black. Learning to Dress 3D People in Generative Clothing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 444, + 287, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 444, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 48, + 444, + 287, + 487 + ], + "type": "text", + "content": "[33] Qianli Ma, Jinlong Yang, Siyu Tang, and Michael J. Black. The power of points for modeling humans in clothing. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2021. 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 489, + 287, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 489, + 287, + 533 + ], + "spans": [ + { + "bbox": [ + 48, + 489, + 287, + 533 + ], + "type": "text", + "content": "[34] Gyeongsik Moon, Hyeongjin Nam, Takaaki Shiratori, and Kyoung Mu Lee. 3d clothed human reconstruction in the wild. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 1, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 534, + 287, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 534, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 534, + 287, + 588 + ], + "type": "text", + "content": "[35] Chaitanya Patel, Zhouyingcheng Liao, and Gerard Pons-Moll. Tailornet: Predicting clothing in 3d as a function of human pose, shape and garment style. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). IEEE, 2020. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 590, + 287, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 633 + ], + "type": "text", + "content": "[36] Gerard Pons-Moll, Sergi Pujades, Sonny Hu, and Michael J. Black. Clothcap: Seamless 4d clothing capture and retargeting. ACM Transactions on Graphics (TOG), 36(4), 2017. 2, 3, 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "type": "text", + "content": "[37] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019. 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[38] Shunsuke Saito, Tomas Simon, Jason Saragih, and Hanbyul Joo. Pifuhd: Multi-level pixel-aligned implicit function for" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "text", + "content": "high-resolution 3d human digitization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 7" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 107, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 545, + 150 + ], + "type": "text", + "content": "[39] Yidi Shao, Chen Change Loy, and Bo Dai. Towards multilayered 3d garments animation. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2023. 2, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 152, + 545, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 152, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 308, + 152, + 545, + 206 + ], + "type": "text", + "content": "[40] Kaiyue Shen, Chen Guo, Manuel Kaufmann, Juan Zarate, Julien Valentin, Jie Song, and Otmar Hilliges. X-avatar: Expressive human avatars. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 3, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 209, + 545, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 209, + 545, + 252 + ], + "spans": [ + { + "bbox": [ + 308, + 209, + 545, + 252 + ], + "type": "text", + "content": "[41] Zhaoqi Su, Tao Yu, Yangang Wang, and Yebin Liu. Deepcloth: Neural garment representation for shape and style editing. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 45(2):1581-1593, 2023. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 254, + 545, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 254, + 545, + 297 + ], + "spans": [ + { + "bbox": [ + 308, + 254, + 545, + 297 + ], + "type": "text", + "content": "[42] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Proceedings of the European Conference on Computer Vision (ECCV), pages 402-419. Springer, 2020. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 299, + 545, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 299, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 308, + 299, + 545, + 353 + ], + "type": "text", + "content": "[43] Garvita Tiwari, Bharat Lal Bhatnagar, Tony Tung, and Gerard Pons-Moll. Sizer: A dataset and model for parsing 3d clothing and learning size sensitive 3d clothing. In Proceedings of the European Conference on Computer Vision (ECCV). Springer, 2020. 3, 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 354, + 545, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 354, + 545, + 375 + ], + "spans": [ + { + "bbox": [ + 308, + 354, + 545, + 375 + ], + "type": "text", + "content": "[44] Unreal Engine 5. https://www.unrealengine.com, 2022.3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 377, + 545, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 377, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 308, + 377, + 545, + 431 + ], + "type": "text", + "content": "[45] Wenguan Wang, Hailong Zhu, Jifeng Dai, Yanwei Pang, Jianbing Shen, and Ling Shao. Hierarchical human parsing with typed part-relation reasoning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 434, + 545, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 434, + 545, + 488 + ], + "spans": [ + { + "bbox": [ + 308, + 434, + 545, + 488 + ], + "type": "text", + "content": "[46] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 3681-3691, 2021. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 490, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 490, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 308, + 490, + 545, + 533 + ], + "type": "text", + "content": "[47] Yuliang Xiu, Jinlong Yang, Dimitrios Tzionas, and Michael J. Black. ICON: Implicit Clothed humans Obtained from Normals. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 535, + 545, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 535, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 308, + 535, + 545, + 588 + ], + "type": "text", + "content": "[48] Yuliang Xiu, Jinlong Yang, Xu Cao, Dimitrios Tzionas, and Michael J. Black. ECON: Explicit Clothed humans Optimized via Normal integration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 590, + 545, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 590, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 308, + 590, + 545, + 624 + ], + "type": "text", + "content": "[49] Lu Yang, Wenhe Jia, Shan Li, and Qing Song. Deep learning technique for human parsing: A survey and outlook. arXiv preprint arXiv:2301.00394, 2023. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 625, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 625, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 308, + 625, + 545, + 678 + ], + "type": "text", + "content": "[50] Tao Yu, Zerong Zheng, Kaiwen Guo, Pengpeng Liu, Qionghai Dai, and Yebin Liu. Function4d: Real-time human volumetric capture from very sparse consumer rgbd sensors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "type": "text", + "content": "[51] Chao Zhang, Sergi Pujades, Michael J. Black, and Gerard Pons-Moll. Detailed, accurate, human shape estimation from clothed 3d scan sequences. In Proceedings of the IEEE" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "type": "text", + "content": "559" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 196 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "text", + "content": "Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 2, 3, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "type": "text", + "content": "[52] Zerong Zheng, Tao Yu, Yebin Liu, and Qionghai Dai. Pamir: Parametric model-conditioned implicit representation for image-based human reconstruction. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2021. 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 287, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 287, + 196 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 287, + 196 + ], + "type": "text", + "content": "[53] Xingxing Zou, Xintong Han, and Waikeung Wong. Cloth4d: A dataset for clothed human reconstruction. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 12847-12857, 2023. 1, 2, 3, 5" + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "text", + "content": "560" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/7295c7f1-f21d-431e-aa65-0a0fb95fe12c_content_list.json b/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/7295c7f1-f21d-431e-aa65-0a0fb95fe12c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a1e1ec9484b404cb475e09e44084e9e3aafa7034 --- /dev/null +++ b/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/7295c7f1-f21d-431e-aa65-0a0fb95fe12c_content_list.json @@ -0,0 +1,1430 @@ +[ + { + "type": "text", + "text": "4D-fly: Text-to-4D Generation Using Hybrid Score Distillation Sampling", + "text_level": 1, + "bbox": [ + 122, + 130, + 846, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sherwin Bahmani $^{1,2}$ Ivan Skorokhodov $^{3,4}$ Victor Rong $^{1,2}$ Gordon Wetzstein $^{5}$ Leonidas Guibas $^{5}$ Peter Wonka $^{3}$ Sergey Tulyakov $^{4}$ Jeong Joon Park $^{6}$ Andrea Tagliafasacchi $^{1,7,8}$ David B. Lindell $^{1,2}$", + "bbox": [ + 89, + 179, + 877, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1University of Toronto 2Vector Institute 3KAUST 4Snap Inc. 5Stanford University 6University of Michigan 7SFU 8Google", + "bbox": [ + 104, + 217, + 861, + 233 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/be373046b6b89e238603e355261a367a82d8185543c47f7d9a676c2027f87d0b.jpg", + "image_caption": [ + "Figure 1. Text-to-4D Synthesis. We present 4D-fy, a technique that synthesizes 4D (i.e., dynamic 3D) scenes from a text prompt. We show scenes generated from two text prompts for different viewpoints (vertical dimension) at different time steps (horizontal dimension). Video results can be viewed on our website: https://sherwinbahmani.github.io/4dfy." + ], + "image_footnote": [], + "bbox": [ + 81, + 257, + 493, + 488 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5f03646c6c9ed74bf919e9380dcef17e32af727fed8ff565857e397d0696a8f3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 257, + 893, + 474 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 555, + 313, + 570 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent breakthroughs in text-to-4D generation rely on pre-trained text-to-image and text-to-video models to generate dynamic 3D scenes. However, current text-to-4D methods face a three-way tradeoff between the quality of scene appearance, 3D structure, and motion. For example, text-to-image models and their 3D-aware variants are trained on internet-scale image datasets and can be used to produce scenes with realistic appearance and 3D structure—but no motion. Text-to-video models are trained on relatively smaller video datasets and can produce scenes with motion, but poorer appearance and 3D structure. While these models have complementary strengths, they also have opposing weaknesses, making it difficult to combine them in a way that alleviates this three-way tradeoff. Here, we introduce hybrid score distillation sampling, an alternating optimization procedure that blends supervision signals from multiple pre-trained diffusion models and incorporates benefits of each for high-fidelity text-to-4D generation. Using hybrid SDS, we demonstrate synthesis of 4D scenes with compelling appearance, 3D structure, and motion.", + "bbox": [ + 75, + 580, + 472, + 882 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 501, + 555, + 630, + 570 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The advent of internet-scale image-text datasets [54] and advances in diffusion models [20, 58, 60] have led to new capabilities in stable, high-fidelity image generation from text prompts [6, 51, 52]. Recent methods have also shown that large-scale text-to-image or text-to-video [56] diffusion models learn useful priors for 3D [25, 44] and 4D scene generation [57]. Our work focuses on text-to-4D scene generation (Fig. 1), which promises exciting new capabilities for applications in augmented and virtual reality, computer animation, and industrial design.", + "bbox": [ + 496, + 580, + 893, + 733 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Current techniques for generating 3D or 4D scenes from text prompts typically iteratively optimize a representation of the scene using supervisory signals from a diffusion model [44, 67, 71]. Specifically, these methods render an image of a 3D scene, add noise to the rendered image, use a pre-trained diffusion model to denoise the rendered image, and estimate gradients used to update the 3D representation [44, 67]. This procedure, known as score distillation sampling (SDS) [44], underpins most recent methods for text-conditioned scene generation.", + "bbox": [ + 496, + 733, + 893, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Using SDS for text-to-4D generation requires navigating", + "bbox": [ + 519, + 885, + 890, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "7996", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/38f4dbfb27ef9c614c3f1eb265f908f2269e9efb30ee59b7e8e283a661a149ec.jpg", + "table_caption": [ + "Table 1. Text-to-4D models face a tradeoff between the quality of appearance, 3D structure, and motion depending on the type of generative model used for score distillation sampling (SDS): text-to-image (T2I), 3D-aware T2I, or, text-to-video (T2V)." + ], + "table_footnote": [], + "table_body": "
SDS modelappearance3D structuremotion
T2I [6, 51, 52, 79]highlowN/A
3D-aware T2I [29, 55]mediumhighN/A
T2V [7, 21, 56, 69, 72]lowlowhigh
Our methodmediumhighmedium
", + "bbox": [ + 76, + 155, + 467, + 243 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "a three-way tradeoff between the quality of appearance, 3D structure, and motion (see Table 1); existing techniques obtain satisfactory results in just one or two of these categories. For example, while SDS produces images that appear realistic when rendering a generated scene from any particular viewpoint, inspecting multiple viewpoints can reveal that the scene has several faces or heads, replicated appendages, or other incorrectly repeated 3D structures—an issue now referred to as the \"Janus problem\" [55].1 One way to improve 3D structure is to use SDS with a 3D-aware diffusion model that is trained to generate images from different camera viewpoints [33]. But 3D-aware models sacrifice appearance quality as they require fine-tuning on synthetic datasets of posed images [55]. Incorporating motion into a scene using SDS with a text-to-video model [69] typically degrades the appearance relative to static scenes generated with text-to-image models, which are more realistic (see Fig. 2). While different types of diffusion models thus have complementary qualities, they also have opposing weaknesses (Table 1). Therefore, it is not trivial to combine them in a way that yields text-to-4D generation with high-quality appearance, 3D structure, and motion.", + "bbox": [ + 75, + 270, + 472, + 601 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Here, we propose a method for text-to-4D scene generation that alleviates this three-way tradeoff using hybrid SDS, an alternating optimization scheme that blends gradient updates from multiple pre-trained diffusion models and synthesizes 4D scenes using the best qualities of each. The method consists of three stages of optimization: (1) we use a 3D-aware text-to-image model [55] to generate an initial static 3D scene (without the Janus problem); (2) we continue the optimization by blending in alternating supervision with variational SDS [71] and a text-to-image model to improve appearance; (3) we blend in alternating supervision using video SDS with a text-to-video model [69] to add motion to the scene. By smoothly incorporating supervisory signals from these three diffusion models throughout the training process, we achieve text-driven 4D scene generation with state-of-the-art quality in terms of appearance, 3D structure, and motion. Overall we provide the following contributions. We introduce hybrid SDS, a technique that extracts desir", + "bbox": [ + 75, + 603, + 470, + 875 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0408112f472c85c72761b8513b059cddd1634e8e624b099869a98277e5d3c693.jpg", + "image_caption": [ + "Figure 2. Comparing text-to-image and text-to-video models. Rendered frames from Stable Diffusion version 2.1 (top; text-to-image) [1] and Zeroscope version 2 (bottom; text-to-video) [3] show significant disparity in appearance, with the text-to-image model appearing far more realistic." + ], + "image_footnote": [], + "bbox": [ + 501, + 88, + 893, + 309 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "able qualities from multiple pre-trained diffusion models and alleviates a tradeoff between appearance, 3D structure, and motion in text-to-4D scene generation.", + "bbox": [ + 511, + 411, + 890, + 455 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We provide a quantitative and qualitative evaluation of the method, and we explore the three-way tradeoff space with ablation studies to facilitate future research.", + "- We demonstrate text-to-4D generation based on open-source pretrained models and will make all codes and evaluation procedures publicly available.", + "- We present state-of-the-art results for the task of text-to-4D generation." + ], + "bbox": [ + 500, + 455, + 893, + 575 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 593, + 640, + 609 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our method is related to techniques from multiple areas of generative modeling, including text-to-image, text-to-video, and text-to-3D models. For more extensive discussions of related works, we refer readers to a recent state-of-the-art report on diffusion models [43].", + "bbox": [ + 496, + 619, + 893, + 696 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Text-to-image generation. Methods for generating images from text prompts are a relatively new innovation, first demonstrated using generative adversarial networks [49, 73, 80]. The problem itself is also related to other methods for text-based image retrieval [34] or image-conditioned text generation [61, 77]. More recently, models trained on text-image datasets with billions of samples [54] have become the state of the art for this task [51].", + "bbox": [ + 496, + 703, + 893, + 823 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Diffusion models [20, 59] are a popular architecture for generative modeling on large-scale datasets, and autoregressive models have also shown promising results [47, 78]. Typically, these methods exploit a pretrained text encoder, such as CLIP [46], to encode the text prompt into a feature", + "bbox": [ + 496, + 825, + 893, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "1Referring to the two-faced Roman god of beginnings and endings.", + "bbox": [ + 94, + 886, + 450, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "7997", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "vector used to condition the diffusion model [40, 48]. In diffusion models, high-resolution (i.e., megapixel) image generation is achieved by applying repeated upsampling layers [22, 48] or performing diffusion in the lower-resolution latent space of an autoencoder and then decoding the result to recover an image at the nominal resolution [16, 51]. Our work incorporates two open-source text-to-image diffusion models: Stable Diffusion [51] and MVDream [55] (a recent 3D-aware diffusion model) to enable 4D scene generation.", + "bbox": [ + 75, + 90, + 472, + 227 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Text-to-video generation. Our work relies on the burgeoning field of video generation via diffusion models, an area that is somewhat constrained by the limited scale of video datasets. To counteract this, methods often utilize a hybrid training approach on both image and video datasets, such as WebVid-10M [5], HD-VG-130M [69], or HD-VILA-100M [74]. Recent approaches in this field typically employ variations of pixel-space upsampling (both in space and time) [21] or latent space upsampling to improve spatial and temporal resolution [17, 18, 70, 83]. Autoregressive models distinguish themselves by their ability to generate videos of varying lengths [65]. Further improvements in video synthesis have been achieved by finetuning pre-trained text-to-image diffusion models on video data [7, 56, 72], or separating the content and motion generation process by using an initial image frame as a starting point [17, 72]. Despite recent advances in text-to-video synthesis, the fidelity of generated videos still lags behind that of static image generation (see Fig. 2) and so they perform poorly when used directly with SDS for text-to-4D generation. Instead, our work leverages an open-source latent space text-to-video diffusion model called Zeroscope [3] (extended from the Modelscope architecture [68]) together with other pre-trained, open-source diffusion models using hybrid SDS.", + "bbox": [ + 75, + 237, + 472, + 599 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Text-to-3D generation. Early methods for text-to-3D generation relied on parsers to convert input text to a semantic representation and synthesized scenes from an object database [4, 10, 12]. Later, automated, data-driven methods used multi-modal datasets [11], and pre-trained models, such as CLIP [46], to edit or stylize an input 3D mesh [14, 26] or a radiance field [66]. More recently, CLIP-based supervision enabled synthesis of entire 3D scenes [25, 53], and these techniques evolved into the most recent approaches, which optimize a mesh or radiance field based on SDS supervision [30, 44, 71]. The quality of their 3D structures has been improved by applying diffusion models that consider multiple viewpoints [31, 33, 55]. Alternatively, recent advancements have seen a shift towards using diffusion or transformer models to transform an input 2D image into a 3D representation for novel-view synthesis [9, 15, 35, 45, 62, 63, 76]. Still, these techniques do not yet support generating 4D scenes.", + "bbox": [ + 75, + 609, + 472, + 881 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our work is most closely related to Make-A-", + "bbox": [ + 96, + 885, + 472, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Video3D (MAV3D) [57], a recent method for text-to-4D generation that integrates SDS-based supervision in two separate stages: first with a text-to-image model and subsequently with a text-to-video model. Similar to MAV3D, we aim to generate dynamic 3D scenes; however, our approach uses hybrid SDS, which allows gradient updates from multiple models to be smoothly blended together in an alternating optimization. Our approach generates high-quality dynamic 3D scenes and does not suffer from Janus problems.", + "bbox": [ + 496, + 90, + 893, + 227 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Concurrent works. Concurrent works on text-to-4D [32, 82], image-to-4D [50, 81, 82], and video-to-4D [27, 41, 75] similarly use recent diffusion models for 4D generation.", + "bbox": [ + 496, + 233, + 893, + 279 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 290, + 589, + 306 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our approach for text-to-4D generation builds upon a hash-encoding-based neural representation [39] that implicitly decomposes the scene into static and dynamic feature grids [64]. In this section we overview our representation for 4D neural rendering and describe the optimization procedure based on hybrid SDS (see Fig. 3).", + "bbox": [ + 496, + 316, + 893, + 407 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. 4D Neural Rendering", + "text_level": 1, + "bbox": [ + 500, + 415, + 702, + 431 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Volumetric neural rendering methods represent a scene using a neural representation to parameterize the attenuation and emission of light at every point in 3D space [36, 38]. We can use such a representation to render an image by casting a ray from the camera center of projection, through each pixel location, and into the scene. For sampled points along the ray $\\pmb{\\mu} \\in \\mathbb{R}^3$ , we query a neural representation to retrieve a volumetric density $\\tau \\in \\mathbb{R}_+$ and color $\\mathbf{c} \\in \\mathbb{R}_+^3$ , which describe attenuation and emission of light, respectively, at a particular point. Then, the resulting density and color samples are alpha-composed to recover the color of a rendered pixel $\\mathbf{C}$ as", + "bbox": [ + 496, + 438, + 893, + 619 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {C} = \\sum_ {i} w _ {i} \\mathbf {c} _ {i}, w _ {i} = \\alpha \\prod_ {j < i} (1 - \\alpha_ {j}), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 571, + 627, + 893, + 660 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\alpha_{i} = 1 - e^{-\\tau_{i}\\| \\pmb{\\mu}_{i} - \\pmb{\\mu}_{i + 1}\\|$ . We query the neural representation using an additional input time variable $t$ , which enables modeling time-varying density and color.", + "bbox": [ + 496, + 669, + 893, + 715 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We illustrate the neural representation in Fig. 3; it consists of two multi-resolution hash tables to disentangle static and dynamic scene modeling.", + "bbox": [ + 496, + 715, + 890, + 760 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Following Müller et al. [39], the static hash table stores learnable feature vectors that are indexed by a voxel-lookup and hashing operation and decoded into density and color using two small multilayer perceptrons (MLPs). Concretely, we consider the neural representation", + "bbox": [ + 496, + 761, + 893, + 835 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {N} _ {\\theta}: \\boldsymbol {\\mu}, t \\rightarrow \\tau , \\mathbf {c} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 637, + 845, + 890, + 862 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "with $\\theta = \\{\\theta_{\\mathrm{static}},\\theta_{\\mathrm{dynamic}},\\theta_{\\mathrm{MLP}}\\}$ denoting all learnable parameters from the static and dynamic hash tables and the MLPs.", + "bbox": [ + 496, + 869, + 893, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "7998", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a24e7e6dc53088c14d5889b41abcd7ed78b98954b9f9997f6848a9c43dbad622.jpg", + "image_caption": [ + "Figure 3. Overview. A 4D radiance field is parameterized using a neural representation with a static and dynamic multiscale hash table of features. Images and videos are rendered from the representation using volume rendering, and we supervise the representation using hybrid score distillation sampling—a technique that combines gradients from multiple types of pre-trained diffusion models. In the first stage of training we use gradients $\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}}$ from a 3D-aware text-to-image model (3D-T2I) to iteratively optimize a representation without the Janus problem. Next, we blend in gradient supervision using variational SDS with a text-to-image model (T2I) to improve the appearance (i.e., we alternate supervision between $\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}}$ and $\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}}$ ). In the last stage we incorporate gradients $(\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}})$ from a text-to-video model (T2V) to add motion to the scene, and we update the scene using the other models in an alternating fashion." + ], + "image_footnote": [], + "bbox": [ + 81, + 88, + 897, + 233 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For a given $\\mu$ , we query the static hash table by identifying the closest voxel at each scale $1 \\leq s \\leq S$ . Then, we trilinearly interpolate the feature values from the voxel vertices after retrieving them from the hash table. Retrieved features from each scale are concatenated as $\\mathbf{f}_{\\mathrm{static}} = \\mathbf{f}_{\\mathrm{static}}^{(1)} \\oplus \\dots \\oplus \\mathbf{f}_{\\mathrm{static}}^{(S)}$ . We follow the same procedure to query the dynamic hash table given $(\\mu, t)$ , except we use quadrilinear interpolation to interpolate feature values. The resulting features from the static and dynamic hash tables are added as $\\mathbf{f} = \\mathbf{f}_{\\mathrm{static}} + \\mathbf{f}_{\\mathrm{dynamic}}$ . We do not model view-dependent effects in the feature encoding. Finally, we decode density and color as $\\mathrm{MLP}_{\\tau}(\\mathbf{f})$ and $\\mathrm{MLP}_{\\mathrm{c}}(\\mathbf{f})$ , respectively.", + "bbox": [ + 75, + 375, + 473, + 556 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Hybrid Score Distillation Sampling", + "text_level": 1, + "bbox": [ + 76, + 566, + 383, + 583 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We leverage the 4D representation along with SDS to create dynamic 3D scenes from a text prompt. Our hybrid approach incorporates three different flavors of SDS that are smoothly merged during an alternating optimization procedure to improve the structure and quality of the 4D model:", + "bbox": [ + 75, + 590, + 470, + 666 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. SDS applied to a 3D-aware text-to-image diffusion model to optimize a static scene without the Janus problem.", + "2. Variational score distillation sampling (VSD; a modified version of SDS [71]) using a standard text-to-image model [51] to improve the appearance of the static scene.", + "3. Video SDS using a text-to-video model [69], which extends SDS to multiple video frames and adds motion to the scene." + ], + "bbox": [ + 76, + 667, + 470, + 786 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the following, we describe each type of SDS and how it is used for text-to-4D generation.", + "bbox": [ + 75, + 787, + 468, + 819 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D-aware scene optimization. We first consider optimizing a static scene using SDS with a 3D-aware text-to-image diffusion model [55]. The diffusion model is pre-trained using a stochastic forward process that slowly adds Gaussian noise to multiview images $\\mathbf{x}$ over timesteps $0\\leq t_{d}\\leq T_{d}$ .", + "bbox": [ + 75, + 825, + 472, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "With increasing $t_d$ , the process yields noisy images $\\mathbf{z}_{t_d}$ that, at $t_d = T_d$ , are close to zero-mean Gaussian. After training, the model reverses this process to add structure to the noisy images. It predicts $\\hat{\\mathbf{x}}_{\\phi}(\\mathbf{z}_{t_d}; t_d, \\mathbf{y}, \\mathbf{T})$ , which approximates the output of an optimal denoiser at each timestep $t_d$ , conditioned on a text embedding $\\mathbf{y}$ [48, 51, 52] and the camera extrinsics $\\mathbf{T}$ corresponding to each image. In practice, text-to-image diffusion models typically predict the noise content $\\epsilon_{\\phi}$ rather than the denoised image $\\hat{\\mathbf{x}}_{\\phi}$ . But note that the denoised image can still be obtained as $\\hat{\\mathbf{x}}_{\\phi}(\\mathbf{z}_{t_d}; t_d, \\mathbf{y}, \\mathbf{T}) \\propto \\mathbf{z}_{t_d} - \\epsilon_{\\phi}(\\mathbf{z}_{t_d}; t_d, \\mathbf{y}, \\mathbf{T})$ , i.e., by subtracting the predicted noise from the noisy image [20]. We implement 3D-aware SDS by rendering multiple images $\\mathbf{x}_{\\theta}$ from the neural representation, adding noise $\\epsilon$ , and using the 3D-aware diffusion model [55] to predict the noise $\\epsilon_{\\phi}$ using classifier-free guidance [19]. To update the parameters $\\theta$ of the neural representation, we use the 3D-aware SDS gradient:", + "bbox": [ + 496, + 375, + 893, + 645 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\theta} \\mathcal {L} _ {\\mathrm {3 D}} = \\mathbb {E} _ {t _ {d}, \\boldsymbol {\\epsilon}, \\mathbf {T}} \\left[ w (t _ {d}) \\left(\\boldsymbol {\\epsilon} _ {\\phi} \\left(\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}, \\mathbf {T}\\right) - \\boldsymbol {\\epsilon}\\right) \\frac {\\partial \\mathbf {x} _ {\\theta}}{\\partial \\theta} \\right], \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 508, + 652, + 890, + 696 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $w(t_{d})$ is a weighting function that depends on the diffusion timestep, and we add a stop gradient to the output of the diffusion model [55]. Intuitively, the SDS loss queries the diffusion model to see how it adds structure to an image, then this information is used to backpropagate gradients to the scene representation.", + "bbox": [ + 496, + 698, + 893, + 787 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Improving appearance using VSD. We incorporate an additional loss term based on VSD [71] to improve the appearance of images rendered from the scene. This term uses a pre-trained text-to-image model [51] along with a finetuning scheme that improves image quality over the 3D-aware text-to-image model alone. We follow Wang et al. [71] and augment the standard SDS gradient with the output of an", + "bbox": [ + 496, + 794, + 895, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "7999", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "additional text-to-image diffusion model that is finetuned using a low-rank adaptation [24], during scene optimization. Specifically, we have", + "bbox": [ + 76, + 90, + 470, + 136 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\theta} \\mathcal {L} _ {\\mathrm {I M G}} = \\mathbb {E} _ {t _ {d}, \\boldsymbol {\\epsilon}, \\mathbf {T}} \\left[ w (t _ {d}) \\left(\\boldsymbol {\\epsilon} _ {\\phi} (\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}) - \\boldsymbol {\\epsilon} _ {\\phi} ^ {\\prime} (\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}, \\mathbf {T})\\right) \\frac {\\partial \\mathbf {x} _ {\\theta}}{\\partial \\theta} \\right],\n$$\n", + "text_format": "latex", + "bbox": [ + 78, + 143, + 472, + 176 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\epsilon_{\\phi}^{\\prime}$ is the noise predicted using a finetuned version of the diffusion model that incorporates additional conditioning from the camera extrinsics $\\mathbf{T}$ ; here, we let $\\mathbf{z}_{t_d}$ represent a noisy version of a single image rendered from $\\mathcal{N}_{\\theta}$ . The model is finetuned using the standard diffusion objective", + "bbox": [ + 76, + 179, + 468, + 253 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\theta} \\mathbb {E} _ {t _ {d}, \\boldsymbol {\\epsilon}, \\mathbf {T}} \\left[ \\| \\boldsymbol {\\epsilon} _ {\\phi} ^ {\\prime} \\left(\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}, \\mathbf {T}\\right) - \\boldsymbol {\\epsilon} \\| _ {2} ^ {2} \\right]. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 137, + 263, + 468, + 285 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that, different from the original description of VSD [71], we find we can omit the simultaneous optimization over multiple scene samples (i.e. the variational component of [71]), which reduces memory requirements without significantly degrading appearance.", + "bbox": [ + 76, + 294, + 470, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Adding motion with Video SDS. Last, we use supervision from a text-to-video diffusion model [69] to add motion to the generated scene. This procedure extends the original SDS gradient by incorporating structure added by the diffusion model to all noisy video frames [57]. The video SDS gradient is given as", + "bbox": [ + 76, + 376, + 470, + 467 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\theta} \\mathcal {L} _ {\\mathrm {V I D}} = \\mathbb {E} _ {t _ {d}, \\boldsymbol {\\epsilon}} \\left[ w (t _ {d}) \\left(\\epsilon_ {\\phi} \\left(\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}\\right) - \\boldsymbol {\\epsilon}\\right) \\frac {\\partial \\mathbf {X} _ {\\theta}}{\\partial \\theta} \\right]. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 474, + 468, + 508 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To simplify notation, we re-use $\\epsilon_{\\phi}$ and $\\epsilon$ to here represent the predicted and actual noise for each video frame, and we let $\\mathbf{X}_{\\theta}$ be a collection of $V$ video frames $\\mathbf{X}_{\\theta} = [\\mathbf{x}_{\\theta}^{(1)},\\dots,\\mathbf{x}_{\\theta}^{(V)}]^{T}$ rendered from the representation.", + "bbox": [ + 76, + 516, + 468, + 579 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Optimization procedure - Algorithm 1. We optimize the 4D representation in three stages that smoothly blend supervision in alternating steps from (1) 3D-aware SDS, (2) VSD, and (3) video SDS.", + "bbox": [ + 76, + 583, + 468, + 642 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stage 1. In the first stage of optimization, we update $\\mathcal{N}_{\\theta}$ using gradients from 3D-aware SDS until convergence. Since this stage focuses on optimizing a static scene, we freeze (i.e. do not update) the parameters of the dynamic hash table $\\mathbf{f}_{\\mathrm{dynamic}}$ and only update the static hash table and decoder MLP. We set the total number of first-stage iterations $N_{\\mathrm{stage - 1}}$ to match that of Shi et al. [55], which allows the optimization to proceed until there are no distinguishable changes in the rendered scene from one iteration to the next.", + "bbox": [ + 76, + 643, + 470, + 779 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stage 2. Next, we add VSD gradients using an alternating optimization procedure. At each iteration, we randomly select to update the model using $\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}}$ or $\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}}$ with probability $P_{3\\mathrm{D}}$ and $P_{\\mathrm{IMG}}$ . We continue this alternating optimization for $N_{\\mathrm{stage - 2}}$ iterations, until convergence. As we show in the next section, this stage of optimization results in improved appearance compared to using $\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}}$ alone while also being free of the Janus problem.", + "bbox": [ + 76, + 779, + 470, + 901 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Hybrid Score Distillation Sampling" + ], + "code_body": "Require: \n $\\begin{array}{rlr}{\\mathcal{N}_{\\theta}}&{\\triangleright 4D\\mathrm{~neural~re�atement}}&{}\\\\ {N_{\\mathrm{stage - 1}},N_{\\mathrm{stage - 2}},N_{\\mathrm{stage - 3}}} &{\\triangleright\\mathrm{iterations~for~each~stage}}&{}\\\\ {P_{\\mathrm{3D}},P_{\\mathrm{IMG}}} &{\\triangleright\\mathrm{update~probabilities}}&{}\\\\ {\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}} &{\\triangleright\\mathrm{SDS~grads.~(Eqs.~3,~4,~6)}}&{}\\\\ {\\mathrm{for~iter~in~}N_{\\mathrm{stage - 1}}\\mathrm{~do}} &{\\triangleright 3D\\mathrm{~update}}&{}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}},\\mathrm{with~probability~}P_{\\mathrm{3D}}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}},\\mathrm{with~probability~}P_{\\mathrm{3D}}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}},\\mathrm{with~probability~}P_{\\mathrm{3D}}\\right.\\cdot P_{\\mathrm{IMG}}} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nobreak P_{\\mathrm{3D}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\right.} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{{}_{1}} \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}, \\right.} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{2},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{2},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{2},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{I},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{I},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{I},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} \\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = 0,} & {} & {}\\\\ {\\mathrm{end procedure}} & {} & {}\\\\ {\\end{array}$ \n1: // Stage 1 \n2: freeze dynamic hash map ( $|\\theta _{\\text {dynamic }}$ ) \n3: for iter in $N_{stage-1}$ do ▷ 3D or IMG update \n4: grad $=$ {vT L3D \n5: UPDATE(grad) \n6: // Stage 2 \n7: for iter in $N_{stage-2}$ do ▷ 3D or VD update \n8: grad $=$ {vT L3D with probability P3D \n9: UPDATE(grad) \n10: // Stage 3 \n11: decrease learning rate of static hash map ( $|\\theta _{\\text {static }}$ ) \n12: for iter in $N_{stage-3}$ do ▷ 3D, IMG, or VD update \n13: grad $=$ {vT L3D with probability P3D \n14: if VID, unfreeze $|\\theta _{\\text {dynamic }}$ \n15: UPDATE(grad) \n16: procedure UPDATE(Grad) \n17: x $< N_0$ ▷ render images (Eq. 1) \n18: take gradient step on grad ▷ optimize N0 \n19: if IMG, take finetuning step (Eq. 5) \n20: end procedure", + "bbox": [ + 501, + 107, + 893, + 573 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stage 3. Last, we update the representation using a combination of all gradient updates. Specifically, we randomly select to update the model at each iteration using $\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}}$ , $\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}}$ , or $\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}$ with probability $P_{\\mathrm{3D}}$ , $P_{\\mathrm{3D}}\\cdot P_{\\mathrm{IMG}}$ , and $1 - P_{\\mathrm{3D}}\\cdot P_{\\mathrm{IMG}}$ , respectively. Since we now aim to incorporate motion into the representation, we unfreeze the parameters of the dynamic hash table during the update with $\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}$ but keep them frozen for updates using the text-to-image models. We also decrease the learning rate of the static hash table to preserve the high-quality appearance from the previous stage. We repeat the alternating optimization in the final stage until convergence, which we find occurs consistently within $N_{\\mathrm{stage-3}}$ iterations. Overall, hybrid SDS effectively combines the strengths of each pre-trained diffusion model while avoiding quality degradations that result from naively combining gradients from each model.", + "bbox": [ + 496, + 595, + 893, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Implementation", + "text_level": 1, + "bbox": [ + 500, + 847, + 658, + 863 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We implement hybrid SDS based on the threestudio framework [2], which includes implementations of MV-", + "bbox": [ + 498, + 869, + 893, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "8000", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/0bc5e717e74819f668e110a46ef21906fa72852d57b5716238a2cb542b344c29.jpg", + "image_caption": [ + "Figure 4. Text-to-4D Comparison. We compare against MAV3D [57], and observe our approach obtains significantly higher quality results." + ], + "image_footnote": [], + "bbox": [ + 76, + 88, + 893, + 518 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Dream [55] (for 3D-aware text-to-image diffusion and SDS), ProlificDreamer [71] with Stable Diffusion [51] (text-to-image diffusion and VSD), and we implement the video SDS updates using Zeroscope [3, 69].", + "bbox": [ + 75, + 563, + 470, + 625 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Hyperparameter values. We initialize the 4D neural representation following [30, 44] and add an offset to the density predicted by the network in the center of the scene to promote object-centric reconstructions. We set the learning rates for the static hash map to 0.01, for the dynamic hash map to 0.01, and for the MLP to 0.001. We drop the learning rate for the static hash map to 0.0001 before the last stage to focus the gradient updates on the dynamic hash map. The values of $N_{\\mathrm{stage - 1}}$ , $N_{\\mathrm{stage - 2}}$ , and $N_{\\mathrm{stage - 3}}$ are set to 10000, 10000, and 100000, respectively. We set the probabilities for hybrid SDS to $P_{\\mathrm{3D}} = 0.5$ and $P_{\\mathrm{IMG}} = 0.5$ for a reasonable tradeoff with respect to appearance, 3D structure, and motion.", + "bbox": [ + 75, + 633, + 468, + 815 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Rendering. Each of the diffusion models has a different native resolution, so we render images from $\\mathcal{N}_{\\theta}$ accordingly. We render four images from different camera positions for the 3D-aware SDS at the native $(256\\times 256$ pixel) resolution of the 3D-aware text-to-image model. The VSD update", + "bbox": [ + 75, + 824, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "is computed by rendering a $256 \\times 256$ image and bilinearly upsampling the image to the native resolution of Stable Diffusion $(512 \\times 512)$ . Finally, the video SDS update is computed by rendering 16 video frames at $160 \\times 288$ resolution and upsampling to the native $320 \\times 576$ resolution of Zeroscope.", + "bbox": [ + 496, + 563, + 893, + 640 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 657, + 632, + 676 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Metrics", + "text_level": 1, + "bbox": [ + 500, + 685, + 596, + 700 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We assess our method using CLIP Score [42] and a user study. We compare our model against MAV3D for 28 prompts and against our ablations for a subset of 5 prompts. Current text-to-4D models are costly to train, and many researchers in academia do not have access to the scale of resources available to large tech companies. Hence, we only used a subset due to computational limitations. To promote future research in this field, we open source the evaluation protocol for the user study along the code: https://github.com/sherwinbahmani/4dfy.", + "bbox": [ + 496, + 710, + 893, + 862 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "CLIP Score. CLIP Score [42] evaluates the correlation between a text prompt and an image. Specifically, this cor", + "bbox": [ + 498, + 869, + 893, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "8001", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/10903998fb787604436c7f5b976326d94139a58c29e56ec6b97627a0427fb085.jpg", + "table_caption": [ + "Table 2. Quantitative results. We compare our method against MAV3D and variations of 4D-fy with different loss terms or backbone architectures (i.e., with HexPlane [8]). The methods are evaluated in terms of CLIP Score (CLIP) and human preference based on appearance quality (AQ), 3D structure quality (SQ), motion quality (MQ), text alignment (TA), and overall preference (Overall). The numbers reported for human preference are the percentages of users who voted for our method over the corresponding method in head-to-head comparisons." + ], + "table_footnote": [], + "table_body": "
MethodCLIPHuman Preference
AQSQMQTAOverall
MAV3D [57]33.992%89%41%52%67%
4D-fy34.2
Ablation Study
4D-fy35.0
w/o ΦθL3D/IMG29.3100%100%78%86%94%
w/o ΦθL3D35.188%89%95%92%91%
w/o ΦθLIMG34.570%68%68%69%70%
w/o hybrid SDS33.8100%100%78%88%95%
w/ HexPlane34.595%92%90%92%95%
", + "bbox": [ + 80, + 238, + 468, + 401 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "responds to the cosine similarity between textual CLIP [46] embedding and visual CLIP [46] embedding. The score is bound between 0 and 100, where 100 is best. We calculate the CLIP score for MAV3D using the same procedure we use for our method. Specifically, for each input text prompt, we render a video using the same camera trajectory as MAV3D, i.e., moving around the scene in azimuth with a fixed elevation angle. Subsequently, we score each video frame with CLIP ViT-B/32 and average the scores over all frames and text prompts to derive the final CLIP score.", + "bbox": [ + 75, + 421, + 468, + 571 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "User study. We conduct qualitative comparisons between our method and the baseline, MAV3D, by surveying 26 human evaluators. We use the same head-to-head comparison model as the user survey conducted by MAV3D. Specifically, we present text prompts alongside the corresponding outputs of our method and the baseline method in random order. Evaluators are requested to specify their overall preference for a video, as well as evaluate four specific properties: appearance quality, 3D structure quality, motion quality, and text alignment. In Table 2, we report the percentage of users who prefer each method overall and based on each of the four properties. We conduct $\\chi^2$ -tests to evaluate statistical significance at the $p < 0.05$ level. Further details on the user study are included in the supplementary.", + "bbox": [ + 75, + 579, + 468, + 791 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Results", + "text_level": 1, + "bbox": [ + 76, + 801, + 171, + 816 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We visualize spatio-temporal renderings along with depth maps in comparison to MAV3D in Fig. 4. Although both methods can synthesize 4D scenes, MAV3D noticeably lacks detail. In contrast, our method produces realistic renderings across space and time. We report quantitative metrics in", + "bbox": [ + 75, + 824, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. In terms of CLIP Score and overall preference in the user study 4D-fy outperforms MAV3D. Users indicated a statistically significant preference towards 4D-fy compared to MAV3D in terms of appearance quality, 3D structure quality, text alignment, and overall preference. They rated the motion quality roughly on par with MAV3D, which used a proprietary text-to-video model. For example, overall, $67\\%$ of users prefer our method over $33\\%$ for MAV3D.", + "bbox": [ + 496, + 90, + 890, + 210 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Ablations", + "text_level": 1, + "bbox": [ + 500, + 223, + 609, + 238 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We provide an in-depth analysis motivating our hybrid SDS training scheme by ablating each component and evaluating the use of a 4D neural representation more similar to that of MAV3D. We provide ablations in Table 2 and in Fig. 5.", + "bbox": [ + 496, + 246, + 890, + 306 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Image guidance (w/o $\\nabla_{\\theta}\\mathcal{L}_{3D / \\mathrm{IMG}}$ ). Technically, learning a dynamic 3D scene solely from a text-to-video model without text-to-image guidance is possible. To demonstrate the drawbacks of this approach, we present results where we skip the first two stages and directly train the model with text-to-video guidance only. This corresponds to setting $P_{3\\mathrm{D}} = 0$ and $P_{\\mathrm{IMG}} = 0$ . Our experiments reveal that the text-to-video model fails to provide realistic 3D structure and high-quality appearance for generating a dynamic 3D scene.", + "bbox": [ + 496, + 314, + 893, + 450 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3D-aware guidance (w/o $\\nabla_{\\theta}\\mathcal{L}_{\\mathbf{3D}}$ ). We find that using a 3D-aware diffusion model is crucial for generating realistic 3D structures. If we remove the 3D-aware diffusion model, i.e., by setting $P_{\\mathrm{3D}} = 0$ , we can generate scenes with similar motion and high-quality appearance, but the 3D structure is degraded. This is evident for both scenes in Fig. 5.", + "bbox": [ + 496, + 455, + 890, + 547 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "VSD guidance (w/o $\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}}$ ). We find that VSD helps provide a realistic scene appearance; if we disable it during scene generation, i.e., $P_{\\mathrm{IMG}} = 0$ , there are some negative effects. For example in Fig. 5, the ice cream cone in the bucket (top row) is more detailed, and the dog's face (bottom row) is sharper (please zoom in).", + "bbox": [ + 496, + 554, + 890, + 645 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Hybrid SDS. To illustrate the impact of our hybrid SDS approach we disable image guidance after the second stage by setting $P_{\\mathrm{3D}} = 0$ and $P_{\\mathrm{IMG}} = 0$ for the third stage only. This aligns with the MAV3D training scheme, where a static model is pre-trained with text-to-image and subsequently fine-tuned with text-to-video. Our quantitative and qualitative analysis shows that this approach results in degraded appearance and 3D structure. We find that incorporating text-to-image, 3D-aware text-to-image, and text-to-video via hybrid SDS in the final optimization stage preserves a realistic appearance and high-quality 3D structure.", + "bbox": [ + 496, + 651, + 890, + 818 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Backbone architecture. Finally, we ablate the hash-grid-based 4D representation by replacing it with the HexPlane [8, 13] architecture. This representation similarly disentangles static and dynamic scene components and can be readily integrated into our pipeline. The HexPlane approach", + "bbox": [ + 496, + 824, + 893, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "8002", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d1b8cf07818ff185d04d318b5afa06dfacda54a5971ea22323fa7f4b80b6cda7.jpg", + "image_caption": [ + "Figure 5. Ablation study. We assess the qualitative impact of removing gradient updates from different models during optimization. Our method without image guidance $(\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D} / \\mathrm{IMG}})$ does not produce realistic appearance and 3D structure. Removing the 3D-aware guidance $(\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}})$ generates high-quality appearance but low-quality 3D structure. Our approach without VSD $(\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}})$ reduces the appearance quality. Hybrid SDS is crucial for appearance and 3D structure, while using HexPlane reduces the appearance quality. Best viewed digitally." + ], + "image_footnote": [], + "bbox": [ + 76, + 88, + 893, + 407 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "fails to match the appearance quality of the hash-grid-based representation. MAV3D uses HexPlane but implements a multi-scale variant with a large 5-layer decoding MLP featuring 128 hidden units. We could not re-implement this approach as the model does not fit on an 80 GB A100 GPU. To allow for a fair comparison, we instead increased the capacity of HexPlane to match the memory consumption of our hash-grid-based representation. We expect that increasing the capacity of HexPlane and longer training times could lead to similar results as our representation.", + "bbox": [ + 75, + 502, + 472, + 652 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 76, + 670, + 194, + 686 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our method synthesizes high-quality 4D scenes from text prompts using a novel hybrid score distillation sampling procedure. Our work alleviates a three-way tradeoff between appearance, 3D structure, and motion and is the first to build on open-source models. We will release the code to facilitate future research in text-to-4D generation.", + "bbox": [ + 75, + 696, + 468, + 787 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations. Although our method produces compelling dynamic 3D scenes, there are several limitations and avenues for future work. First, the complexity of motion in our scenes is limited to simple movements. We believe that our method will directly benefit from future progress in text-to-video generation, as current text-to-video models suffer from low-quality renderings and unrealistic motion. Another way", + "bbox": [ + 75, + 794, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "to improve motion could be exploiting recently proposed dynamic representations, e.g., dynamic 3D Gaussians [37]. Moreover, current metrics in text-to-3D generation are not sufficient, as they mainly rely on image-based metrics and user studies. Designing more sophisticated 3D and 4D metrics is an important direction for future work. Lastly, generating each scene takes a significant amount of time. Concurrent text-to-3D works [23, 28] alleviate this problem by training a large-scale model on 3D data, allowing generation within seconds. Incorporating our hybrid optimization procedure to blend between large-scale pre-training on 2D, 3D, and video data could enable fast text-to-4D generation.", + "bbox": [ + 496, + 502, + 893, + 683 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ethics Statement. We condemn the application of our method for creating realistic fake content intended to harm specific entities or propagate misinformation.", + "bbox": [ + 496, + 690, + 892, + 737 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 750, + 687, + 767 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported by the Natural Sciences and Engineering Research Council of Canada (NSERC) Discovery Grant program, the Digital Research Alliance of Canada, and by the Advanced Research Computing at Simon Fraser University. It was also supported in part by ARL grant W911NF-21-2-0104, a Vannevar Bush Faculty Fellowship, a gift from the Adobe Corporation, a PECASE by the ARO, NSF award 1839974, Stanford HAI, and a Samsung GRO.", + "bbox": [ + 496, + 773, + 893, + 887 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8003", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Stable Diffusion version 2. https://github.com/Stability-AI/stablediffusion. Accessed: 2023-10-31. 2", + "[2] Threestudio Github page. https://github.com/threestudio-project/threestudio. Accessed: 2023-10-31.5", + "[3] Zeroscope text-to-video model. https://huggingface.co/cerspense/zeroscope_v2_576w. Accessed: 2023-10-31. 2, 3, 6", + "[4] Giovanni Adorni and Mauro Di Manzo. Natural language input for scene generation. In Proc. EACL, 1983. 3", + "[5] Max Bain, Arsha Nagrani, Gül Varol, and Andrew Zisserman. Frozen in time: A joint video and image encoder for end-to-end retrieval. In Proc. ICCV, 2021. 3", + "[6] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, Bryan Catanzaro, et al. eDiff-I: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 1, 2", + "[7] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proc. CVPR, 2023. 2, 3", + "[8] Ang Cao and Justin Johnson. HexPlane: A fast representation for dynamic scenes. In Proc. CVPR, 2023. 7", + "[9] Eric R Chan, Koki Nagano, Matthew A Chan, Alexander W Bergman, Jeong Joon Park, Axel Levy, Miika Aittala, Shalini De Mello, Tero Karras, and Gordon Wetzstein. Generative novel view synthesis with 3D-aware diffusion models. In Proc. ICCV, 2023. 3", + "[10] Angel Chang, Manolis Savva, and Christopher D Manning. Learning spatial knowledge for text to 3D scene generation. In Proc. EMNLP, 2014. 3", + "[11] Kevin Chen, Christopher B Choy, Manolis Savva, Angel X Chang, Thomas Funkhouser, and Silvio Savarese. Text2shape: Generating shapes from natural language by learning joint embeddings. In Proc. ACCV, 2018. 3", + "[12] Bob Coyne and Richard Sproat. Wordseye: An automatic text-to-scene conversion system. In Proc. SIGGRAPH, 2001. 3", + "[13] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbæk Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proc. CVPR, 2023. 7", + "[14] William Gao, Noam Aigerman, Thibault Groueix, Vova Kim, and Rana Hanocka. Textdeformer: Geometry manipulation using text guidance. In Proc. SIGGRAPH, 2023. 3", + "[15] Jiatao Gu, Alex Trevithick, Kai-En Lin, Joshua M Susskind, Christian Theobalt, Lingjie Liu, and Ravi Ramamoorthi. Nerfdiff: Single-image view synthesis with Nerf-guided distillation from 3d-aware diffusion. In Proc. ICML, 2023. 3", + "[16] Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, and Baining Guo. Vector quantized diffusion model for text-to-image synthesis. In Proc. CVPR, 2022. 3" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[17] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Imagine your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 3", + "[18] Yingqing He, Tianyu Yang, Yong Zhang, Ying Shan, and Qifeng Chen. Latent video diffusion models for high-fidelity video generation with arbitrary lengths. arXiv preprint arXiv:2211.13221, 2022. 3", + "[19] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. In Proc. NeurIPS Workshop on Deep Generative Models, 2021. 4", + "[20] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Proc. NeurIPS, 2020. 1, 2, 4", + "[21] Jonathan Ho, William Chan, Chitwan Sahara, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 2, 3", + "[22] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. The Journal of Machine Learning Research, 23(1):2249-2281, 2022. 3", + "[23] Yicong Hong, Kai Zhang, Jiuming Gu, Sai Bi, Yang Zhou, Difan Liu, Feng Liu, Kalyan Sunkavalli, Trung Bui, and Hao Tan. Lrm: Large reconstruction model for single image to 3D. arXiv preprint arXiv:2311.04400, 2023. 8", + "[24] Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. In Proc. ICLR, 2021. 5", + "[25] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proc. CVPR, 2022. 1, 3", + "[26] Nikolay Jetchev. Clipmatrix: Text-controlled creation of 3D textured meshes. arXiv preprint arXiv:2109.12922, 2021. 3", + "[27] Yanqin Jiang, Li Zhang, Jin Gao, Weimin Hu, and Yao Yao. Consistent4D: Consistent $360^{\\circ}$ dynamic object generation from monocular video. arXiv preprint arXiv:2311.02848, 2023. 3", + "[28] Jiahao Li, Hao Tan, Kai Zhang, Zexiang Xu, Fujun Luan, Yinghao Xu, Yicong Hong, Kalyan Sunkavalli, Greg Shakhnarovich, and Sai Bi. Instant3D: Fast text-to-3D with sparse-view generation and large reconstruction model. arXiv preprint arXiv:2311.06214, 2023. 8", + "[29] Weiyu Li, Rui Chen, Xuelin Chen, and Ping Tan. Sweetdreamer: Aligning geometric priors in 2D diffusion for consistent text-to-3D. arXiv preprint arXiv:2310.02596, 2023. 2", + "[30] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3D: High-resolution text-to-3D content creation. In Proc. CVPR, 2023. 3, 6", + "[31] Yukang Lin, Haonan Han, Chaoqun Gong, Zunnan Xu, Yachao Zhang, and Xiu Li. Consistent123: One image to highly consistent 3D asset using case-aware diffusion priors. arXiv preprint arXiv:2309.17261, 2023. 3" + ], + "bbox": [ + 501, + 92, + 893, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "8004", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[32] Huan Ling, Seung Wook Kim, Antonio Torralba, Sanja Fidler, and Karsten Kreis. Align your gaussians: Text-to-4D with dynamic 4D gaussians and composed diffusion models. arXiv preprint arXiv:2312.13763, 2023. 3", + "[33] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. In Proc. ICCV, 2023. 2, 3", + "[34] Ying Liu, Dengsheng Zhang, Guojun Lu, and Wei-Ying Ma. A survey of content-based image retrieval with high-level semantics. Pattern Recognition, 40(1):262-282, 2007. 2", + "[35] Yuan Liu, Cheng Lin, Zijiao Zeng, Xiaoxiao Long, Lingjie Liu, Taku Komura, and Wenping Wang. Syncdreamer: Generating multiview-consistent images from a single-view image. arXiv preprint arXiv:2309.03453, 2023. 3", + "[36] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. ACM Trans. Graph., 2019. 3", + "[37] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3D Gaussians: Tracking by persistent dynamic view synthesis. arXiv preprint arXiv:2308.09713, 2023. 8", + "[38] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3", + "[39] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 2022. 3", + "[40] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models. In Proc. ICML, 2022. 3", + "[41] Zijie Pan, Zeyu Yang, Xiatian Zhu, and Li Zhang. Fast dynamic 3D object generation from a single-view video. arXiv preprint arXiv:2401.08742, 2024. 3", + "[42] Dong Huk Park, Samaneh Azadi, Xihui Liu, Trevor Darrell, and Anna Rohrbach. Benchmark for compositional text-to-image synthesis. In Proc. NeurIPS, 2021. 6", + "[43] Ryan Po, Wang Yifan, Vladislav Golyanik, Kfir Aberman, Jonathan T Barron, Amit H Bermano, Eric Ryan Chan, Tali Dekel, Aleksander Holynski, Angjoo Kanazawa, et al. State of the art on diffusion models for visual computing. arXiv preprint arXiv:2310.07204, 2023. 2", + "[44] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. DreamFusion: Text-to-3D using 2D diffusion. In Proc. ICLR, 2023. 1, 3, 6", + "[45] Guocheng Qian, Jinjie Mai, Abdullah Hamdi, Jian Ren, Aliaksandr Siarohin, Bing Li, Hsin-Ying Lee, Ivan Skorokhodov, Peter Wonka, Sergey Tulyakov, et al. Magic123: One image to high-quality 3D object generation using both 2D and 3D diffusion priors. arXiv preprint arXiv:2306.17843, 2023. 3", + "[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Proc. ICML, 2021. 2, 3, 7", + "[47] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In Proc. ICML, 2021. 2", + "[48] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. arXiv preprint arXiv:2204.06125, 2022. 3, 4", + "[49] Scott Reed, Zeynep Akata, Xinchen Yan, Lajanugen Logeswaran, Bernt Schiele, and Honglak Lee. Generative adversarial text to image synthesis. In Proc. ICML, 2016. 2", + "[50] Jiawei Ren, Liang Pan, Jiaxiang Tang, Chi Zhang, Ang Cao, Gang Zeng, and Ziwei Liu. DreamGaussian4D: Generative 4D Gaussian splatting. arXiv preprint arXiv:2312.17142, 2023. 3", + "[51] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Bjorn Ommer. High-resolution image synthesis with latent diffusion models. In Proc. CVPR, 2022, 1, 2, 3, 4, 6", + "[52] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Proc. NeurIPS, 2022. 1, 2, 4", + "[53] Aditya Sanghi, Hang Chu, Joseph G Lambourne, Ye Wang, Chin-Yi Cheng, Marco Fumero, and Kamal Rahimi Malekshan. Clip-forge: Towards zero-shot text-to-shape generation. In Proc. CVPR, 2022. 3", + "[54] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. Proc. NeurIPS, 2022. 1, 2", + "[55] Yichun Shi, Peng Wang, Jianglong Ye, Mai Long, Kejie Li, and Xiao Yang. MVDream: Multi-view diffusion for 3d generation. arXiv preprint arXiv:2308.16512, 2023. 2, 3, 4, 5, 6", + "[56] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022. 1, 2, 3", + "[57] Uriel Singer, Shelly Sheynin, Adam Polyak, Oron Ashual, Iurii Makarov, Filippos Kokkinos, Naman Goyal, Andrea Vedaldi, Devi Parikh, Justin Johnson, et al. Text-to-4d dynamic scene generation. In Proc. ICML, 2023. 1, 3, 5, 6, 7", + "[58] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In Proc. ICML, 2015. 1", + "[59] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. Proc. ICLR, 2021. 2", + "[60] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In Proc. ICLR, 2021. 1" + ], + "bbox": [ + 503, + 92, + 893, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "8005", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[61] Matteo Stefanini, Marcella Cornia, Lorenzo Baraldi, Silvia Cascianelli, Giuseppe Fiameni, and Rita Cucchiara. From show to tell: A survey on deep learning-based image captioning. IEEE Trans. Pattern Anal. Mach. Intell., 2022. 2", + "[62] Junshu Tang, Tengfei Wang, Bo Zhang, Ting Zhang, Ran Yi, Lizhuang Ma, and Dong Chen. Make-it-3D: High-fidelity 3D creation from a single image with diffusion prior. arXiv preprint arXiv:2303.14184, 2023. 3", + "[63] Ayush Tewari, Tianwei Yin, George Cazenavette, Semon Rezchikov, Joshua B Tenenbaum, Frédo Durand, William T Freeman, and Vincent Sitzmann. Diffusion with forward models: Solving stochastic inverse problems without direct supervision. arXiv preprint arXiv:2306.11719, 2023. 3", + "[64] Haithem Turki, Jason Y Zhang, Francesco Ferroni, and Deva Ramanan. Suds: Scalable urban dynamic scenes. In Proc CVPR, 2023. 3", + "[65] Ruben Villegas, Mohammad Babaeizadeh, Pieter-Jan Kindermans, Hernan Moraldo, Han Zhang, Mohammad Taghi Saffar, Santiago Castro, Julius Kunze, and Dumitru Erhan. Phenaki: Variable length video generation from open domain textual description. arXiv preprint arXiv:2210.02399, 2022. 3", + "[66] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-NeRF: Text-and-image driven manipulation of neural radiance fields. In Proc. CVPR, 2022. 3", + "[67] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A Yeh, and Greg Shakhnarovich. Score Jacobian Chaining: Lifting pretrained 2D diffusion models for 3D generation. In Proc. CVPR, 2023. 1", + "[68] Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023. 3", + "[69] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 2, 3, 4, 5, 6", + "[70] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. arXiv preprint arXiv:2306.02018, 2023. 3", + "[71] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3D generation with variational score distillation. Proc. NeurIPS, 2023. 1, 2, 3, 4, 5, 6", + "[72] Ruiqi Wu, Liangyu Chen, Tong Yang, Chunle Guo, Chongyi Li, and Xiangyu Zhang. Lamp: Learn a motion pattern for few-shot-based video generation. arXiv preprint arXiv:2310.10769, 2023. 2, 3", + "[73] Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. AttnGAN: Fine-grained text to image generation with attentional generative adversarial networks. In Proc. CVPR, 2018. 2", + "[74] Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In Proc. CVPR, 2022. 3" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[75] Yuyang Yin, Dejia Xu, Zhangyang Wang, Yao Zhao, and Yunchao Wei. 4DGen: Grounded 4D content generation with spatial-temporal consistency. arXiv preprint arXiv:2312.17225, 2023. 3", + "[76] Paul Yoo, Jiaxian Guo, Yutaka Matsuo, and Shixiang Shane Gu. Dreamsparse: Escaping from Plato's cave with 2d diffusion model given sparse views. arXiv preprint arXiv:2306.03414, 2023. 3", + "[77] Quanzeng You, Hailin Jin, Zhaowen Wang, Chen Fang, and Jiebo Luo. Image captioning with semantic attention. In Proc. CVPR, 2016. 2", + "[78] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, Ben Hutchinson, Wei Han, Zarana Parekh, Xin Li, Han Zhang, Jason Baldridge, and Yonghui Wu. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022. 2", + "[79] Lili Yu, Bowen Shi, Ramakanth Pasunuru, Benjamin Muller, Olga Golovneva, Tianlu Wang, Arun Babu, Binh Tang, Brian Karrer, Shelly Sheynin, et al. Scaling autoregressive multimodal models: Pretraining and instruction tuning. arXiv preprint arXiv:2309.02591, 2023. 2", + "[80] Han Zhang, Tao Xu, Hongsheng Li, Shaoting Zhang, Xiaogang Wang, Xiaolei Huang, and Dimitris N Metaxas. StackGAN: Text to photo-realistic image synthesis with stacked generative adversarial networks. In Proc. ICCV, 2017. 2", + "[81] Yuyang Zhao, Zhiwen Yan, Enze Xie, Lanqing Hong, Zhenguo Li, and Gim Hee Lee. Animate124: Animating one image to 4D dynamic scene. arXiv preprint arXiv:2311.14603, 2023. 3", + "[82] Yufeng Zheng, Xueting Li, Koki Nagano, Sifei Liu, Otmar Hilliges, and Shalini De Mello. A unified approach for text-and image-guided 4D scene generation. arXiv preprint arXiv:2311.16854, 2023. 3", + "[83] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 3" + ], + "bbox": [ + 501, + 92, + 893, + 641 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "8006", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/7295c7f1-f21d-431e-aa65-0a0fb95fe12c_model.json b/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/7295c7f1-f21d-431e-aa65-0a0fb95fe12c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..7cbf006b3ac430fca93b33520dffe5f9de118e9b --- /dev/null +++ b/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/7295c7f1-f21d-431e-aa65-0a0fb95fe12c_model.json @@ -0,0 +1,2367 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.123, + 0.131, + 0.848, + 0.154 + ], + "angle": 0, + "content": "4D-fly: Text-to-4D Generation Using Hybrid Score Distillation Sampling" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.18, + 0.878, + 0.217 + ], + "angle": 0, + "content": "Sherwin Bahmani\\(^{1,2}\\) Ivan Skorokhodov\\(^{3,4}\\) Victor Rong\\(^{1,2}\\) Gordon Wetzstein\\(^{5}\\) Leonidas Guibas\\(^{5}\\) Peter Wonka\\(^{3}\\) Sergey Tulyakov\\(^{4}\\) Jeong Joon Park\\(^{6}\\) Andrea Tagliafasacchi\\(^{1,7,8}\\) David B. Lindell\\(^{1,2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.218, + 0.862, + 0.234 + ], + "angle": 0, + "content": "1University of Toronto 2Vector Institute 3KAUST 4Snap Inc. 5Stanford University 6University of Michigan 7SFU 8Google" + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.258, + 0.495, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.258, + 0.895, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.501, + 0.893, + 0.543 + ], + "angle": 0, + "content": "Figure 1. Text-to-4D Synthesis. We present 4D-fy, a technique that synthesizes 4D (i.e., dynamic 3D) scenes from a text prompt. We show scenes generated from two text prompts for different viewpoints (vertical dimension) at different time steps (horizontal dimension). Video results can be viewed on our website: https://sherwinbahmani.github.io/4dfy." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.556, + 0.314, + 0.571 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.581, + 0.473, + 0.883 + ], + "angle": 0, + "content": "Recent breakthroughs in text-to-4D generation rely on pre-trained text-to-image and text-to-video models to generate dynamic 3D scenes. However, current text-to-4D methods face a three-way tradeoff between the quality of scene appearance, 3D structure, and motion. For example, text-to-image models and their 3D-aware variants are trained on internet-scale image datasets and can be used to produce scenes with realistic appearance and 3D structure—but no motion. Text-to-video models are trained on relatively smaller video datasets and can produce scenes with motion, but poorer appearance and 3D structure. While these models have complementary strengths, they also have opposing weaknesses, making it difficult to combine them in a way that alleviates this three-way tradeoff. Here, we introduce hybrid score distillation sampling, an alternating optimization procedure that blends supervision signals from multiple pre-trained diffusion models and incorporates benefits of each for high-fidelity text-to-4D generation. Using hybrid SDS, we demonstrate synthesis of 4D scenes with compelling appearance, 3D structure, and motion." + }, + { + "type": "title", + "bbox": [ + 0.502, + 0.556, + 0.631, + 0.571 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.582, + 0.895, + 0.734 + ], + "angle": 0, + "content": "The advent of internet-scale image-text datasets [54] and advances in diffusion models [20, 58, 60] have led to new capabilities in stable, high-fidelity image generation from text prompts [6, 51, 52]. Recent methods have also shown that large-scale text-to-image or text-to-video [56] diffusion models learn useful priors for 3D [25, 44] and 4D scene generation [57]. Our work focuses on text-to-4D scene generation (Fig. 1), which promises exciting new capabilities for applications in augmented and virtual reality, computer animation, and industrial design." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.734, + 0.895, + 0.885 + ], + "angle": 0, + "content": "Current techniques for generating 3D or 4D scenes from text prompts typically iteratively optimize a representation of the scene using supervisory signals from a diffusion model [44, 67, 71]. Specifically, these methods render an image of a 3D scene, add noise to the rendered image, use a pre-trained diffusion model to denoise the rendered image, and estimate gradients used to update the 3D representation [44, 67]. This procedure, known as score distillation sampling (SDS) [44], underpins most recent methods for text-conditioned scene generation." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.886, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Using SDS for text-to-4D generation requires navigating" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7996" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.473, + 0.146 + ], + "angle": 0, + "content": "Table 1. Text-to-4D models face a tradeoff between the quality of appearance, 3D structure, and motion depending on the type of generative model used for score distillation sampling (SDS): text-to-image (T2I), 3D-aware T2I, or, text-to-video (T2V)." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.156, + 0.468, + 0.244 + ], + "angle": 0, + "content": "
SDS modelappearance3D structuremotion
T2I [6, 51, 52, 79]highlowN/A
3D-aware T2I [29, 55]mediumhighN/A
T2V [7, 21, 56, 69, 72]lowlowhigh
Our methodmediumhighmedium
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.271, + 0.473, + 0.602 + ], + "angle": 0, + "content": "a three-way tradeoff between the quality of appearance, 3D structure, and motion (see Table 1); existing techniques obtain satisfactory results in just one or two of these categories. For example, while SDS produces images that appear realistic when rendering a generated scene from any particular viewpoint, inspecting multiple viewpoints can reveal that the scene has several faces or heads, replicated appendages, or other incorrectly repeated 3D structures—an issue now referred to as the \"Janus problem\" [55].1 One way to improve 3D structure is to use SDS with a 3D-aware diffusion model that is trained to generate images from different camera viewpoints [33]. But 3D-aware models sacrifice appearance quality as they require fine-tuning on synthetic datasets of posed images [55]. Incorporating motion into a scene using SDS with a text-to-video model [69] typically degrades the appearance relative to static scenes generated with text-to-image models, which are more realistic (see Fig. 2). While different types of diffusion models thus have complementary qualities, they also have opposing weaknesses (Table 1). Therefore, it is not trivial to combine them in a way that yields text-to-4D generation with high-quality appearance, 3D structure, and motion." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.604, + 0.472, + 0.876 + ], + "angle": 0, + "content": "Here, we propose a method for text-to-4D scene generation that alleviates this three-way tradeoff using hybrid SDS, an alternating optimization scheme that blends gradient updates from multiple pre-trained diffusion models and synthesizes 4D scenes using the best qualities of each. The method consists of three stages of optimization: (1) we use a 3D-aware text-to-image model [55] to generate an initial static 3D scene (without the Janus problem); (2) we continue the optimization by blending in alternating supervision with variational SDS [71] and a text-to-image model to improve appearance; (3) we blend in alternating supervision using video SDS with a text-to-video model [69] to add motion to the scene. By smoothly incorporating supervisory signals from these three diffusion models throughout the training process, we achieve text-driven 4D scene generation with state-of-the-art quality in terms of appearance, 3D structure, and motion. Overall we provide the following contributions. We introduce hybrid SDS, a technique that extracts desir" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.089, + 0.895, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.322, + 0.895, + 0.392 + ], + "angle": 0, + "content": "Figure 2. Comparing text-to-image and text-to-video models. Rendered frames from Stable Diffusion version 2.1 (top; text-to-image) [1] and Zeroscope version 2 (bottom; text-to-video) [3] show significant disparity in appearance, with the text-to-image model appearing far more realistic." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.412, + 0.892, + 0.456 + ], + "angle": 0, + "content": "able qualities from multiple pre-trained diffusion models and alleviates a tradeoff between appearance, 3D structure, and motion in text-to-4D scene generation." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.457, + 0.892, + 0.501 + ], + "angle": 0, + "content": "- We provide a quantitative and qualitative evaluation of the method, and we explore the three-way tradeoff space with ablation studies to facilitate future research." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.502, + 0.894, + 0.547 + ], + "angle": 0, + "content": "- We demonstrate text-to-4D generation based on open-source pretrained models and will make all codes and evaluation procedures publicly available." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.548, + 0.892, + 0.577 + ], + "angle": 0, + "content": "- We present state-of-the-art results for the task of text-to-4D generation." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.457, + 0.894, + 0.577 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.594, + 0.642, + 0.61 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.621, + 0.894, + 0.697 + ], + "angle": 0, + "content": "Our method is related to techniques from multiple areas of generative modeling, including text-to-image, text-to-video, and text-to-3D models. For more extensive discussions of related works, we refer readers to a recent state-of-the-art report on diffusion models [43]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.704, + 0.895, + 0.824 + ], + "angle": 0, + "content": "Text-to-image generation. Methods for generating images from text prompts are a relatively new innovation, first demonstrated using generative adversarial networks [49, 73, 80]. The problem itself is also related to other methods for text-based image retrieval [34] or image-conditioned text generation [61, 77]. More recently, models trained on text-image datasets with billions of samples [54] have become the state of the art for this task [51]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Diffusion models [20, 59] are a popular architecture for generative modeling on large-scale datasets, and autoregressive models have also shown promising results [47, 78]. Typically, these methods exploit a pretrained text encoder, such as CLIP [46], to encode the text prompt into a feature" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.451, + 0.901 + ], + "angle": 0, + "content": "1Referring to the two-faced Roman god of beginnings and endings." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7997" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.228 + ], + "angle": 0, + "content": "vector used to condition the diffusion model [40, 48]. In diffusion models, high-resolution (i.e., megapixel) image generation is achieved by applying repeated upsampling layers [22, 48] or performing diffusion in the lower-resolution latent space of an autoencoder and then decoding the result to recover an image at the nominal resolution [16, 51]. Our work incorporates two open-source text-to-image diffusion models: Stable Diffusion [51] and MVDream [55] (a recent 3D-aware diffusion model) to enable 4D scene generation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.238, + 0.473, + 0.601 + ], + "angle": 0, + "content": "Text-to-video generation. Our work relies on the burgeoning field of video generation via diffusion models, an area that is somewhat constrained by the limited scale of video datasets. To counteract this, methods often utilize a hybrid training approach on both image and video datasets, such as WebVid-10M [5], HD-VG-130M [69], or HD-VILA-100M [74]. Recent approaches in this field typically employ variations of pixel-space upsampling (both in space and time) [21] or latent space upsampling to improve spatial and temporal resolution [17, 18, 70, 83]. Autoregressive models distinguish themselves by their ability to generate videos of varying lengths [65]. Further improvements in video synthesis have been achieved by finetuning pre-trained text-to-image diffusion models on video data [7, 56, 72], or separating the content and motion generation process by using an initial image frame as a starting point [17, 72]. Despite recent advances in text-to-video synthesis, the fidelity of generated videos still lags behind that of static image generation (see Fig. 2) and so they perform poorly when used directly with SDS for text-to-4D generation. Instead, our work leverages an open-source latent space text-to-video diffusion model called Zeroscope [3] (extended from the Modelscope architecture [68]) together with other pre-trained, open-source diffusion models using hybrid SDS." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.61, + 0.473, + 0.882 + ], + "angle": 0, + "content": "Text-to-3D generation. Early methods for text-to-3D generation relied on parsers to convert input text to a semantic representation and synthesized scenes from an object database [4, 10, 12]. Later, automated, data-driven methods used multi-modal datasets [11], and pre-trained models, such as CLIP [46], to edit or stylize an input 3D mesh [14, 26] or a radiance field [66]. More recently, CLIP-based supervision enabled synthesis of entire 3D scenes [25, 53], and these techniques evolved into the most recent approaches, which optimize a mesh or radiance field based on SDS supervision [30, 44, 71]. The quality of their 3D structures has been improved by applying diffusion models that consider multiple viewpoints [31, 33, 55]. Alternatively, recent advancements have seen a shift towards using diffusion or transformer models to transform an input 2D image into a 3D representation for novel-view synthesis [9, 15, 35, 45, 62, 63, 76]. Still, these techniques do not yet support generating 4D scenes." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.473, + 0.901 + ], + "angle": 0, + "content": "Our work is most closely related to Make-A-" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.228 + ], + "angle": 0, + "content": "Video3D (MAV3D) [57], a recent method for text-to-4D generation that integrates SDS-based supervision in two separate stages: first with a text-to-image model and subsequently with a text-to-video model. Similar to MAV3D, we aim to generate dynamic 3D scenes; however, our approach uses hybrid SDS, which allows gradient updates from multiple models to be smoothly blended together in an alternating optimization. Our approach generates high-quality dynamic 3D scenes and does not suffer from Janus problems." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.234, + 0.895, + 0.28 + ], + "angle": 0, + "content": "Concurrent works. Concurrent works on text-to-4D [32, 82], image-to-4D [50, 81, 82], and video-to-4D [27, 41, 75] similarly use recent diffusion models for 4D generation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.291, + 0.591, + 0.307 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.317, + 0.894, + 0.408 + ], + "angle": 0, + "content": "Our approach for text-to-4D generation builds upon a hash-encoding-based neural representation [39] that implicitly decomposes the scene into static and dynamic feature grids [64]. In this section we overview our representation for 4D neural rendering and describe the optimization procedure based on hybrid SDS (see Fig. 3)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.416, + 0.704, + 0.433 + ], + "angle": 0, + "content": "3.1. 4D Neural Rendering" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.439, + 0.895, + 0.62 + ], + "angle": 0, + "content": "Volumetric neural rendering methods represent a scene using a neural representation to parameterize the attenuation and emission of light at every point in 3D space [36, 38]. We can use such a representation to render an image by casting a ray from the camera center of projection, through each pixel location, and into the scene. For sampled points along the ray \\(\\pmb{\\mu} \\in \\mathbb{R}^3\\), we query a neural representation to retrieve a volumetric density \\(\\tau \\in \\mathbb{R}_+\\) and color \\(\\mathbf{c} \\in \\mathbb{R}_+^3\\), which describe attenuation and emission of light, respectively, at a particular point. Then, the resulting density and color samples are alpha-composed to recover the color of a rendered pixel \\(\\mathbf{C}\\) as" + }, + { + "type": "equation", + "bbox": [ + 0.573, + 0.628, + 0.895, + 0.661 + ], + "angle": 0, + "content": "\\[\n\\mathbf {C} = \\sum_ {i} w _ {i} \\mathbf {c} _ {i}, w _ {i} = \\alpha \\prod_ {j < i} (1 - \\alpha_ {j}), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.67, + 0.895, + 0.717 + ], + "angle": 0, + "content": "where \\(\\alpha_{i} = 1 - e^{-\\tau_{i}\\| \\pmb{\\mu}_{i} - \\pmb{\\mu}_{i + 1}\\|\\). We query the neural representation using an additional input time variable \\(t\\), which enables modeling time-varying density and color." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.717, + 0.892, + 0.761 + ], + "angle": 0, + "content": "We illustrate the neural representation in Fig. 3; it consists of two multi-resolution hash tables to disentangle static and dynamic scene modeling." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.762, + 0.894, + 0.837 + ], + "angle": 0, + "content": "Following Müller et al. [39], the static hash table stores learnable feature vectors that are indexed by a voxel-lookup and hashing operation and decoded into density and color using two small multilayer perceptrons (MLPs). Concretely, we consider the neural representation" + }, + { + "type": "equation", + "bbox": [ + 0.638, + 0.846, + 0.892, + 0.863 + ], + "angle": 0, + "content": "\\[\n\\mathcal {N} _ {\\theta}: \\boldsymbol {\\mu}, t \\rightarrow \\tau , \\mathbf {c} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.895, + 0.901 + ], + "angle": 0, + "content": "with \\(\\theta = \\{\\theta_{\\mathrm{static}},\\theta_{\\mathrm{dynamic}},\\theta_{\\mathrm{MLP}}\\}\\) denoting all learnable parameters from the static and dynamic hash tables and the MLPs." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7998" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.089, + 0.898, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.256, + 0.896, + 0.355 + ], + "angle": 0, + "content": "Figure 3. Overview. A 4D radiance field is parameterized using a neural representation with a static and dynamic multiscale hash table of features. Images and videos are rendered from the representation using volume rendering, and we supervise the representation using hybrid score distillation sampling—a technique that combines gradients from multiple types of pre-trained diffusion models. In the first stage of training we use gradients \\(\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}}\\) from a 3D-aware text-to-image model (3D-T2I) to iteratively optimize a representation without the Janus problem. Next, we blend in gradient supervision using variational SDS with a text-to-image model (T2I) to improve the appearance (i.e., we alternate supervision between \\(\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}}\\) and \\(\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}}\\)). In the last stage we incorporate gradients \\((\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}})\\) from a text-to-video model (T2V) to add motion to the scene, and we update the scene using the other models in an alternating fashion." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.375, + 0.474, + 0.558 + ], + "angle": 0, + "content": "For a given \\(\\mu\\), we query the static hash table by identifying the closest voxel at each scale \\(1 \\leq s \\leq S\\). Then, we trilinearly interpolate the feature values from the voxel vertices after retrieving them from the hash table. Retrieved features from each scale are concatenated as \\(\\mathbf{f}_{\\mathrm{static}} = \\mathbf{f}_{\\mathrm{static}}^{(1)} \\oplus \\dots \\oplus \\mathbf{f}_{\\mathrm{static}}^{(S)}\\). We follow the same procedure to query the dynamic hash table given \\((\\mu, t)\\), except we use quadrilinear interpolation to interpolate feature values. The resulting features from the static and dynamic hash tables are added as \\(\\mathbf{f} = \\mathbf{f}_{\\mathrm{static}} + \\mathbf{f}_{\\mathrm{dynamic}}\\). We do not model view-dependent effects in the feature encoding. Finally, we decode density and color as \\(\\mathrm{MLP}_{\\tau}(\\mathbf{f})\\) and \\(\\mathrm{MLP}_{\\mathrm{c}}(\\mathbf{f})\\), respectively." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.568, + 0.384, + 0.584 + ], + "angle": 0, + "content": "3.2. Hybrid Score Distillation Sampling" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.592, + 0.471, + 0.667 + ], + "angle": 0, + "content": "We leverage the 4D representation along with SDS to create dynamic 3D scenes from a text prompt. Our hybrid approach incorporates three different flavors of SDS that are smoothly merged during an alternating optimization procedure to improve the structure and quality of the 4D model:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.668, + 0.47, + 0.698 + ], + "angle": 0, + "content": "1. SDS applied to a 3D-aware text-to-image diffusion model to optimize a static scene without the Janus problem." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.698, + 0.472, + 0.742 + ], + "angle": 0, + "content": "2. Variational score distillation sampling (VSD; a modified version of SDS [71]) using a standard text-to-image model [51] to improve the appearance of the static scene." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.743, + 0.471, + 0.787 + ], + "angle": 0, + "content": "3. Video SDS using a text-to-video model [69], which extends SDS to multiple video frames and adds motion to the scene." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.668, + 0.472, + 0.787 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.789, + 0.47, + 0.82 + ], + "angle": 0, + "content": "In the following, we describe each type of SDS and how it is used for text-to-4D generation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.473, + 0.902 + ], + "angle": 0, + "content": "3D-aware scene optimization. We first consider optimizing a static scene using SDS with a 3D-aware text-to-image diffusion model [55]. The diffusion model is pre-trained using a stochastic forward process that slowly adds Gaussian noise to multiview images \\(\\mathbf{x}\\) over timesteps \\(0\\leq t_{d}\\leq T_{d}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.375, + 0.895, + 0.646 + ], + "angle": 0, + "content": "With increasing \\( t_d \\), the process yields noisy images \\( \\mathbf{z}_{t_d} \\) that, at \\( t_d = T_d \\), are close to zero-mean Gaussian. After training, the model reverses this process to add structure to the noisy images. It predicts \\( \\hat{\\mathbf{x}}_{\\phi}(\\mathbf{z}_{t_d}; t_d, \\mathbf{y}, \\mathbf{T}) \\), which approximates the output of an optimal denoiser at each timestep \\( t_d \\), conditioned on a text embedding \\( \\mathbf{y} \\) [48, 51, 52] and the camera extrinsics \\( \\mathbf{T} \\) corresponding to each image. In practice, text-to-image diffusion models typically predict the noise content \\( \\epsilon_{\\phi} \\) rather than the denoised image \\( \\hat{\\mathbf{x}}_{\\phi} \\). But note that the denoised image can still be obtained as \\( \\hat{\\mathbf{x}}_{\\phi}(\\mathbf{z}_{t_d}; t_d, \\mathbf{y}, \\mathbf{T}) \\propto \\mathbf{z}_{t_d} - \\epsilon_{\\phi}(\\mathbf{z}_{t_d}; t_d, \\mathbf{y}, \\mathbf{T}) \\), i.e., by subtracting the predicted noise from the noisy image [20]. We implement 3D-aware SDS by rendering multiple images \\( \\mathbf{x}_{\\theta} \\) from the neural representation, adding noise \\( \\epsilon \\), and using the 3D-aware diffusion model [55] to predict the noise \\( \\epsilon_{\\phi} \\) using classifier-free guidance [19]. To update the parameters \\( \\theta \\) of the neural representation, we use the 3D-aware SDS gradient:" + }, + { + "type": "equation", + "bbox": [ + 0.509, + 0.653, + 0.892, + 0.698 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\theta} \\mathcal {L} _ {\\mathrm {3 D}} = \\mathbb {E} _ {t _ {d}, \\boldsymbol {\\epsilon}, \\mathbf {T}} \\left[ w (t _ {d}) \\left(\\boldsymbol {\\epsilon} _ {\\phi} \\left(\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}, \\mathbf {T}\\right) - \\boldsymbol {\\epsilon}\\right) \\frac {\\partial \\mathbf {x} _ {\\theta}}{\\partial \\theta} \\right], \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.699, + 0.895, + 0.789 + ], + "angle": 0, + "content": "where \\( w(t_{d}) \\) is a weighting function that depends on the diffusion timestep, and we add a stop gradient to the output of the diffusion model [55]. Intuitively, the SDS loss queries the diffusion model to see how it adds structure to an image, then this information is used to backpropagate gradients to the scene representation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.896, + 0.902 + ], + "angle": 0, + "content": "Improving appearance using VSD. We incorporate an additional loss term based on VSD [71] to improve the appearance of images rendered from the scene. This term uses a pre-trained text-to-image model [51] along with a finetuning scheme that improves image quality over the 3D-aware text-to-image model alone. We follow Wang et al. [71] and augment the standard SDS gradient with the output of an" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7999" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.471, + 0.137 + ], + "angle": 0, + "content": "additional text-to-image diffusion model that is finetuned using a low-rank adaptation [24], during scene optimization. Specifically, we have" + }, + { + "type": "equation", + "bbox": [ + 0.08, + 0.144, + 0.473, + 0.178 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\theta} \\mathcal {L} _ {\\mathrm {I M G}} = \\mathbb {E} _ {t _ {d}, \\boldsymbol {\\epsilon}, \\mathbf {T}} \\left[ w (t _ {d}) \\left(\\boldsymbol {\\epsilon} _ {\\phi} (\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}) - \\boldsymbol {\\epsilon} _ {\\phi} ^ {\\prime} (\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}, \\mathbf {T})\\right) \\frac {\\partial \\mathbf {x} _ {\\theta}}{\\partial \\theta} \\right],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.18, + 0.47, + 0.255 + ], + "angle": 0, + "content": "where \\(\\epsilon_{\\phi}^{\\prime}\\) is the noise predicted using a finetuned version of the diffusion model that incorporates additional conditioning from the camera extrinsics \\(\\mathbf{T}\\); here, we let \\(\\mathbf{z}_{t_d}\\) represent a noisy version of a single image rendered from \\(\\mathcal{N}_{\\theta}\\). The model is finetuned using the standard diffusion objective" + }, + { + "type": "equation", + "bbox": [ + 0.138, + 0.264, + 0.47, + 0.286 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\theta} \\mathbb {E} _ {t _ {d}, \\boldsymbol {\\epsilon}, \\mathbf {T}} \\left[ \\| \\boldsymbol {\\epsilon} _ {\\phi} ^ {\\prime} \\left(\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}, \\mathbf {T}\\right) - \\boldsymbol {\\epsilon} \\| _ {2} ^ {2} \\right]. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.295, + 0.471, + 0.371 + ], + "angle": 0, + "content": "Note that, different from the original description of VSD [71], we find we can omit the simultaneous optimization over multiple scene samples (i.e. the variational component of [71]), which reduces memory requirements without significantly degrading appearance." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.377, + 0.471, + 0.468 + ], + "angle": 0, + "content": "Adding motion with Video SDS. Last, we use supervision from a text-to-video diffusion model [69] to add motion to the generated scene. This procedure extends the original SDS gradient by incorporating structure added by the diffusion model to all noisy video frames [57]. The video SDS gradient is given as" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.476, + 0.47, + 0.51 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\theta} \\mathcal {L} _ {\\mathrm {V I D}} = \\mathbb {E} _ {t _ {d}, \\boldsymbol {\\epsilon}} \\left[ w (t _ {d}) \\left(\\epsilon_ {\\phi} \\left(\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}\\right) - \\boldsymbol {\\epsilon}\\right) \\frac {\\partial \\mathbf {X} _ {\\theta}}{\\partial \\theta} \\right]. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.517, + 0.47, + 0.58 + ], + "angle": 0, + "content": "To simplify notation, we re-use \\(\\epsilon_{\\phi}\\) and \\(\\epsilon\\) to here represent the predicted and actual noise for each video frame, and we let \\(\\mathbf{X}_{\\theta}\\) be a collection of \\(V\\) video frames \\(\\mathbf{X}_{\\theta} = [\\mathbf{x}_{\\theta}^{(1)},\\dots,\\mathbf{x}_{\\theta}^{(V)}]^{T}\\) rendered from the representation." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.584, + 0.47, + 0.643 + ], + "angle": 0, + "content": "Optimization procedure - Algorithm 1. We optimize the 4D representation in three stages that smoothly blend supervision in alternating steps from (1) 3D-aware SDS, (2) VSD, and (3) video SDS." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.645, + 0.471, + 0.78 + ], + "angle": 0, + "content": "Stage 1. In the first stage of optimization, we update \\(\\mathcal{N}_{\\theta}\\) using gradients from 3D-aware SDS until convergence. Since this stage focuses on optimizing a static scene, we freeze (i.e. do not update) the parameters of the dynamic hash table \\(\\mathbf{f}_{\\mathrm{dynamic}}\\) and only update the static hash table and decoder MLP. We set the total number of first-stage iterations \\(N_{\\mathrm{stage - 1}}\\) to match that of Shi et al. [55], which allows the optimization to proceed until there are no distinguishable changes in the rendered scene from one iteration to the next." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.78, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Stage 2. Next, we add VSD gradients using an alternating optimization procedure. At each iteration, we randomly select to update the model using \\(\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}}\\) or \\(\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}}\\) with probability \\(P_{3\\mathrm{D}}\\) and \\(P_{\\mathrm{IMG}}\\). We continue this alternating optimization for \\(N_{\\mathrm{stage - 2}}\\) iterations, until convergence. As we show in the next section, this stage of optimization results in improved appearance compared to using \\(\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}}\\) alone while also being free of the Janus problem." + }, + { + "type": "code_caption", + "bbox": [ + 0.501, + 0.091, + 0.826, + 0.107 + ], + "angle": 0, + "content": "Algorithm 1 Hybrid Score Distillation Sampling" + }, + { + "type": "algorithm", + "bbox": [ + 0.502, + 0.108, + 0.895, + 0.574 + ], + "angle": 0, + "content": "Require: \n\\(\\begin{array}{rlr}{\\mathcal{N}_{\\theta}}&{\\triangleright 4D\\mathrm{~neural~re�atement}}&{}\\\\ {N_{\\mathrm{stage - 1}},N_{\\mathrm{stage - 2}},N_{\\mathrm{stage - 3}}} &{\\triangleright\\mathrm{iterations~for~each~stage}}&{}\\\\ {P_{\\mathrm{3D}},P_{\\mathrm{IMG}}} &{\\triangleright\\mathrm{update~probabilities}}&{}\\\\ {\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}} &{\\triangleright\\mathrm{SDS~grads.~(Eqs.~3,~4,~6)}}&{}\\\\ {\\mathrm{for~iter~in~}N_{\\mathrm{stage - 1}}\\mathrm{~do}} &{\\triangleright 3D\\mathrm{~update}}&{}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}},\\mathrm{with~probability~}P_{\\mathrm{3D}}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}},\\mathrm{with~probability~}P_{\\mathrm{3D}}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}},\\mathrm{with~probability~}P_{\\mathrm{3D}}\\right.\\cdot P_{\\mathrm{IMG}}} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nobreak P_{\\mathrm{3D}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\right.} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{{}_{1}} \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}, \\right.} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{2},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{2},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{2},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{I},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{I},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{I},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} \\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = 0,} & {} & {}\\\\ {\\mathrm{end procedure}} & {} & {}\\\\ {\\end{array}\\) \n1: // Stage 1 \n2: freeze dynamic hash map ( \\(|\\theta _{\\text {dynamic }}\\) ) \n3: for iter in \\(N_{stage-1}\\) do ▷ 3D or IMG update \n4: grad \\(=\\) {vT L3D \n5: UPDATE(grad) \n6: // Stage 2 \n7: for iter in \\(N_{stage-2}\\) do ▷ 3D or VD update \n8: grad \\(=\\) {vT L3D with probability P3D \n9: UPDATE(grad) \n10: // Stage 3 \n11: decrease learning rate of static hash map ( \\(|\\theta _{\\text {static }}\\) ) \n12: for iter in \\(N_{stage-3}\\) do ▷ 3D, IMG, or VD update \n13: grad \\(=\\) {vT L3D with probability P3D \n14: if VID, unfreeze \\(|\\theta _{\\text {dynamic }}\\) \n15: UPDATE(grad) \n16: procedure UPDATE(Grad) \n17: x \\(< N_0\\) ▷ render images (Eq. 1) \n18: take gradient step on grad ▷ optimize N0 \n19: if IMG, take finetuning step (Eq. 5) \n20: end procedure" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.597, + 0.895, + 0.84 + ], + "angle": 0, + "content": "Stage 3. Last, we update the representation using a combination of all gradient updates. Specifically, we randomly select to update the model at each iteration using \\(\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}}\\), \\(\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}}\\), or \\(\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}\\) with probability \\(P_{\\mathrm{3D}}\\), \\(P_{\\mathrm{3D}}\\cdot P_{\\mathrm{IMG}}\\), and \\(1 - P_{\\mathrm{3D}}\\cdot P_{\\mathrm{IMG}}\\), respectively. Since we now aim to incorporate motion into the representation, we unfreeze the parameters of the dynamic hash table during the update with \\(\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}\\) but keep them frozen for updates using the text-to-image models. We also decrease the learning rate of the static hash table to preserve the high-quality appearance from the previous stage. We repeat the alternating optimization in the final stage until convergence, which we find occurs consistently within \\(N_{\\mathrm{stage-3}}\\) iterations. Overall, hybrid SDS effectively combines the strengths of each pre-trained diffusion model while avoiding quality degradations that result from naively combining gradients from each model." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.848, + 0.66, + 0.864 + ], + "angle": 0, + "content": "3.3. Implementation" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.895, + 0.901 + ], + "angle": 0, + "content": "We implement hybrid SDS based on the threestudio framework [2], which includes implementations of MV-" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8000" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.089, + 0.895, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.529, + 0.891, + 0.545 + ], + "angle": 0, + "content": "Figure 4. Text-to-4D Comparison. We compare against MAV3D [57], and observe our approach obtains significantly higher quality results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.564, + 0.471, + 0.626 + ], + "angle": 0, + "content": "Dream [55] (for 3D-aware text-to-image diffusion and SDS), ProlificDreamer [71] with Stable Diffusion [51] (text-to-image diffusion and VSD), and we implement the video SDS updates using Zeroscope [3, 69]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.634, + 0.47, + 0.816 + ], + "angle": 0, + "content": "Hyperparameter values. We initialize the 4D neural representation following [30, 44] and add an offset to the density predicted by the network in the center of the scene to promote object-centric reconstructions. We set the learning rates for the static hash map to 0.01, for the dynamic hash map to 0.01, and for the MLP to 0.001. We drop the learning rate for the static hash map to 0.0001 before the last stage to focus the gradient updates on the dynamic hash map. The values of \\( N_{\\mathrm{stage - 1}} \\), \\( N_{\\mathrm{stage - 2}} \\), and \\( N_{\\mathrm{stage - 3}} \\) are set to 10000, 10000, and 100000, respectively. We set the probabilities for hybrid SDS to \\( P_{\\mathrm{3D}} = 0.5 \\) and \\( P_{\\mathrm{IMG}} = 0.5 \\) for a reasonable tradeoff with respect to appearance, 3D structure, and motion." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Rendering. Each of the diffusion models has a different native resolution, so we render images from \\(\\mathcal{N}_{\\theta}\\) accordingly. We render four images from different camera positions for the 3D-aware SDS at the native \\((256\\times 256\\) pixel) resolution of the 3D-aware text-to-image model. The VSD update" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.564, + 0.895, + 0.641 + ], + "angle": 0, + "content": "is computed by rendering a \\(256 \\times 256\\) image and bilinearly upsampling the image to the native resolution of Stable Diffusion \\((512 \\times 512)\\). Finally, the video SDS update is computed by rendering 16 video frames at \\(160 \\times 288\\) resolution and upsampling to the native \\(320 \\times 576\\) resolution of Zeroscope." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.659, + 0.633, + 0.677 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.686, + 0.597, + 0.701 + ], + "angle": 0, + "content": "4.1. Metrics" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.711, + 0.895, + 0.863 + ], + "angle": 0, + "content": "We assess our method using CLIP Score [42] and a user study. We compare our model against MAV3D for 28 prompts and against our ablations for a subset of 5 prompts. Current text-to-4D models are costly to train, and many researchers in academia do not have access to the scale of resources available to large tech companies. Hence, we only used a subset due to computational limitations. To promote future research in this field, we open source the evaluation protocol for the user study along the code: https://github.com/sherwinbahmani/4dfy." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.895, + 0.902 + ], + "angle": 0, + "content": "CLIP Score. CLIP Score [42] evaluates the correlation between a text prompt and an image. Specifically, this cor" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "8001" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.472, + 0.215 + ], + "angle": 0, + "content": "Table 2. Quantitative results. We compare our method against MAV3D and variations of 4D-fy with different loss terms or backbone architectures (i.e., with HexPlane [8]). The methods are evaluated in terms of CLIP Score (CLIP) and human preference based on appearance quality (AQ), 3D structure quality (SQ), motion quality (MQ), text alignment (TA), and overall preference (Overall). The numbers reported for human preference are the percentages of users who voted for our method over the corresponding method in head-to-head comparisons." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.239, + 0.47, + 0.402 + ], + "angle": 0, + "content": "
MethodCLIPHuman Preference
AQSQMQTAOverall
MAV3D [57]33.992%89%41%52%67%
4D-fy34.2
Ablation Study
4D-fy35.0
w/o ΦθL3D/IMG29.3100%100%78%86%94%
w/o ΦθL3D35.188%89%95%92%91%
w/o ΦθLIMG34.570%68%68%69%70%
w/o hybrid SDS33.8100%100%78%88%95%
w/ HexPlane34.595%92%90%92%95%
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.422, + 0.47, + 0.573 + ], + "angle": 0, + "content": "responds to the cosine similarity between textual CLIP [46] embedding and visual CLIP [46] embedding. The score is bound between 0 and 100, where 100 is best. We calculate the CLIP score for MAV3D using the same procedure we use for our method. Specifically, for each input text prompt, we render a video using the same camera trajectory as MAV3D, i.e., moving around the scene in azimuth with a fixed elevation angle. Subsequently, we score each video frame with CLIP ViT-B/32 and average the scores over all frames and text prompts to derive the final CLIP score." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.58, + 0.47, + 0.792 + ], + "angle": 0, + "content": "User study. We conduct qualitative comparisons between our method and the baseline, MAV3D, by surveying 26 human evaluators. We use the same head-to-head comparison model as the user survey conducted by MAV3D. Specifically, we present text prompts alongside the corresponding outputs of our method and the baseline method in random order. Evaluators are requested to specify their overall preference for a video, as well as evaluate four specific properties: appearance quality, 3D structure quality, motion quality, and text alignment. In Table 2, we report the percentage of users who prefer each method overall and based on each of the four properties. We conduct \\(\\chi^2\\)-tests to evaluate statistical significance at the \\(p < 0.05\\) level. Further details on the user study are included in the supplementary." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.802, + 0.172, + 0.817 + ], + "angle": 0, + "content": "4.2. Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.468, + 0.901 + ], + "angle": 0, + "content": "We visualize spatio-temporal renderings along with depth maps in comparison to MAV3D in Fig. 4. Although both methods can synthesize 4D scenes, MAV3D noticeably lacks detail. In contrast, our method produces realistic renderings across space and time. We report quantitative metrics in" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.091, + 0.892, + 0.212 + ], + "angle": 0, + "content": "Table 2. In terms of CLIP Score and overall preference in the user study 4D-fy outperforms MAV3D. Users indicated a statistically significant preference towards 4D-fy compared to MAV3D in terms of appearance quality, 3D structure quality, text alignment, and overall preference. They rated the motion quality roughly on par with MAV3D, which used a proprietary text-to-video model. For example, overall, \\(67\\%\\) of users prefer our method over \\(33\\%\\) for MAV3D." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.224, + 0.611, + 0.239 + ], + "angle": 0, + "content": "4.3. Ablations" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.247, + 0.892, + 0.308 + ], + "angle": 0, + "content": "We provide an in-depth analysis motivating our hybrid SDS training scheme by ablating each component and evaluating the use of a 4D neural representation more similar to that of MAV3D. We provide ablations in Table 2 and in Fig. 5." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.315, + 0.895, + 0.451 + ], + "angle": 0, + "content": "Image guidance (w/o \\(\\nabla_{\\theta}\\mathcal{L}_{3D / \\mathrm{IMG}}\\)). Technically, learning a dynamic 3D scene solely from a text-to-video model without text-to-image guidance is possible. To demonstrate the drawbacks of this approach, we present results where we skip the first two stages and directly train the model with text-to-video guidance only. This corresponds to setting \\(P_{3\\mathrm{D}} = 0\\) and \\(P_{\\mathrm{IMG}} = 0\\). Our experiments reveal that the text-to-video model fails to provide realistic 3D structure and high-quality appearance for generating a dynamic 3D scene." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.457, + 0.892, + 0.548 + ], + "angle": 0, + "content": "3D-aware guidance (w/o \\(\\nabla_{\\theta}\\mathcal{L}_{\\mathbf{3D}}\\)). We find that using a 3D-aware diffusion model is crucial for generating realistic 3D structures. If we remove the 3D-aware diffusion model, i.e., by setting \\(P_{\\mathrm{3D}} = 0\\), we can generate scenes with similar motion and high-quality appearance, but the 3D structure is degraded. This is evident for both scenes in Fig. 5." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.555, + 0.892, + 0.646 + ], + "angle": 0, + "content": "VSD guidance (w/o \\(\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}}\\)). We find that VSD helps provide a realistic scene appearance; if we disable it during scene generation, i.e., \\(P_{\\mathrm{IMG}} = 0\\), there are some negative effects. For example in Fig. 5, the ice cream cone in the bucket (top row) is more detailed, and the dog's face (bottom row) is sharper (please zoom in)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.652, + 0.892, + 0.819 + ], + "angle": 0, + "content": "Hybrid SDS. To illustrate the impact of our hybrid SDS approach we disable image guidance after the second stage by setting \\( P_{\\mathrm{3D}} = 0 \\) and \\( P_{\\mathrm{IMG}} = 0 \\) for the third stage only. This aligns with the MAV3D training scheme, where a static model is pre-trained with text-to-image and subsequently fine-tuned with text-to-video. Our quantitative and qualitative analysis shows that this approach results in degraded appearance and 3D structure. We find that incorporating text-to-image, 3D-aware text-to-image, and text-to-video via hybrid SDS in the final optimization stage preserves a realistic appearance and high-quality 3D structure." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Backbone architecture. Finally, we ablate the hash-grid-based 4D representation by replacing it with the HexPlane [8, 13] architecture. This representation similarly disentangles static and dynamic scene components and can be readily integrated into our pipeline. The HexPlane approach" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8002" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.089, + 0.895, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.421, + 0.893, + 0.478 + ], + "angle": 0, + "content": "Figure 5. Ablation study. We assess the qualitative impact of removing gradient updates from different models during optimization. Our method without image guidance \\((\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D} / \\mathrm{IMG}})\\) does not produce realistic appearance and 3D structure. Removing the 3D-aware guidance \\((\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}})\\) generates high-quality appearance but low-quality 3D structure. Our approach without VSD \\((\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}})\\) reduces the appearance quality. Hybrid SDS is crucial for appearance and 3D structure, while using HexPlane reduces the appearance quality. Best viewed digitally." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.503, + 0.473, + 0.653 + ], + "angle": 0, + "content": "fails to match the appearance quality of the hash-grid-based representation. MAV3D uses HexPlane but implements a multi-scale variant with a large 5-layer decoding MLP featuring 128 hidden units. We could not re-implement this approach as the model does not fit on an 80 GB A100 GPU. To allow for a fair comparison, we instead increased the capacity of HexPlane to match the memory consumption of our hash-grid-based representation. We expect that increasing the capacity of HexPlane and longer training times could lead to similar results as our representation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.671, + 0.196, + 0.687 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.697, + 0.47, + 0.789 + ], + "angle": 0, + "content": "Our method synthesizes high-quality 4D scenes from text prompts using a novel hybrid score distillation sampling procedure. Our work alleviates a three-way tradeoff between appearance, 3D structure, and motion and is the first to build on open-source models. We will release the code to facilitate future research in text-to-4D generation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Limitations. Although our method produces compelling dynamic 3D scenes, there are several limitations and avenues for future work. First, the complexity of motion in our scenes is limited to simple movements. We believe that our method will directly benefit from future progress in text-to-video generation, as current text-to-video models suffer from low-quality renderings and unrealistic motion. Another way" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.503, + 0.895, + 0.684 + ], + "angle": 0, + "content": "to improve motion could be exploiting recently proposed dynamic representations, e.g., dynamic 3D Gaussians [37]. Moreover, current metrics in text-to-3D generation are not sufficient, as they mainly rely on image-based metrics and user studies. Designing more sophisticated 3D and 4D metrics is an important direction for future work. Lastly, generating each scene takes a significant amount of time. Concurrent text-to-3D works [23, 28] alleviate this problem by training a large-scale model on 3D data, allowing generation within seconds. Incorporating our hybrid optimization procedure to blend between large-scale pre-training on 2D, 3D, and video data could enable fast text-to-4D generation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.691, + 0.893, + 0.738 + ], + "angle": 0, + "content": "Ethics Statement. We condemn the application of our method for creating realistic fake content intended to harm specific entities or propagate misinformation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.751, + 0.688, + 0.768 + ], + "angle": 0, + "content": "6. Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.775, + 0.895, + 0.888 + ], + "angle": 0, + "content": "This work was supported by the Natural Sciences and Engineering Research Council of Canada (NSERC) Discovery Grant program, the Digital Research Alliance of Canada, and by the Advanced Research Computing at Simon Fraser University. It was also supported in part by ARL grant W911NF-21-2-0104, a Vannevar Bush Faculty Fellowship, a gift from the Adobe Corporation, a PECASE by the ARO, NSF award 1839974, Stanford HAI, and a Samsung GRO." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8003" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.472, + 0.156 + ], + "angle": 0, + "content": "[1] Stable Diffusion version 2. https://github.com/Stability-AI/stablediffusion. Accessed: 2023-10-31. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.472, + 0.199 + ], + "angle": 0, + "content": "[2] Threestudio Github page. https://github.com/threestudio-project/threestudio. Accessed: 2023-10-31.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.202, + 0.472, + 0.242 + ], + "angle": 0, + "content": "[3] Zeroscope text-to-video model. https://huggingface.co/cerspense/zeroscope_v2_576w. Accessed: 2023-10-31. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.245, + 0.47, + 0.273 + ], + "angle": 0, + "content": "[4] Giovanni Adorni and Mauro Di Manzo. Natural language input for scene generation. In Proc. EACL, 1983. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.275, + 0.472, + 0.315 + ], + "angle": 0, + "content": "[5] Max Bain, Arsha Nagrani, Gül Varol, and Andrew Zisserman. Frozen in time: A joint video and image encoder for end-to-end retrieval. In Proc. ICCV, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.318, + 0.472, + 0.386 + ], + "angle": 0, + "content": "[6] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, Bryan Catanzaro, et al. eDiff-I: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.388, + 0.472, + 0.443 + ], + "angle": 0, + "content": "[7] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proc. CVPR, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.446, + 0.47, + 0.472 + ], + "angle": 0, + "content": "[8] Ang Cao and Justin Johnson. HexPlane: A fast representation for dynamic scenes. In Proc. CVPR, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.475, + 0.47, + 0.543 + ], + "angle": 0, + "content": "[9] Eric R Chan, Koki Nagano, Matthew A Chan, Alexander W Bergman, Jeong Joon Park, Axel Levy, Miika Aittala, Shalini De Mello, Tero Karras, and Gordon Wetzstein. Generative novel view synthesis with 3D-aware diffusion models. In Proc. ICCV, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.546, + 0.472, + 0.585 + ], + "angle": 0, + "content": "[10] Angel Chang, Manolis Savva, and Christopher D Manning. Learning spatial knowledge for text to 3D scene generation. In Proc. EMNLP, 2014. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.589, + 0.472, + 0.643 + ], + "angle": 0, + "content": "[11] Kevin Chen, Christopher B Choy, Manolis Savva, Angel X Chang, Thomas Funkhouser, and Silvio Savarese. Text2shape: Generating shapes from natural language by learning joint embeddings. In Proc. ACCV, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.646, + 0.472, + 0.685 + ], + "angle": 0, + "content": "[12] Bob Coyne and Richard Sproat. Wordseye: An automatic text-to-scene conversion system. In Proc. SIGGRAPH, 2001. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.688, + 0.472, + 0.743 + ], + "angle": 0, + "content": "[13] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbæk Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proc. CVPR, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.472, + 0.787 + ], + "angle": 0, + "content": "[14] William Gao, Noam Aigerman, Thibault Groueix, Vova Kim, and Rana Hanocka. Textdeformer: Geometry manipulation using text guidance. In Proc. SIGGRAPH, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.472, + 0.843 + ], + "angle": 0, + "content": "[15] Jiatao Gu, Alex Trevithick, Kai-En Lin, Joshua M Susskind, Christian Theobalt, Lingjie Liu, and Ravi Ramamoorthi. Nerfdiff: Single-image view synthesis with Nerf-guided distillation from 3d-aware diffusion. In Proc. ICML, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.472, + 0.9 + ], + "angle": 0, + "content": "[16] Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, and Baining Guo. Vector quantized diffusion model for text-to-image synthesis. In Proc. CVPR, 2022. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.148 + ], + "angle": 0, + "content": "[17] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Imagine your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.151, + 0.894, + 0.205 + ], + "angle": 0, + "content": "[18] Yingqing He, Tianyu Yang, Yong Zhang, Ying Shan, and Qifeng Chen. Latent video diffusion models for high-fidelity video generation with arbitrary lengths. arXiv preprint arXiv:2211.13221, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.208, + 0.894, + 0.248 + ], + "angle": 0, + "content": "[19] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. In Proc. NeurIPS Workshop on Deep Generative Models, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.252, + 0.894, + 0.292 + ], + "angle": 0, + "content": "[20] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Proc. NeurIPS, 2020. 1, 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.296, + 0.894, + 0.365 + ], + "angle": 0, + "content": "[21] Jonathan Ho, William Chan, Chitwan Sahara, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.368, + 0.894, + 0.422 + ], + "angle": 0, + "content": "[22] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. The Journal of Machine Learning Research, 23(1):2249-2281, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.425, + 0.894, + 0.48 + ], + "angle": 0, + "content": "[23] Yicong Hong, Kai Zhang, Jiuming Gu, Sai Bi, Yang Zhou, Difan Liu, Feng Liu, Kalyan Sunkavalli, Trung Bui, and Hao Tan. Lrm: Large reconstruction model for single image to 3D. arXiv preprint arXiv:2311.04400, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.483, + 0.894, + 0.525 + ], + "angle": 0, + "content": "[24] Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. In Proc. ICLR, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.528, + 0.894, + 0.568 + ], + "angle": 0, + "content": "[25] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proc. CVPR, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.571, + 0.894, + 0.598 + ], + "angle": 0, + "content": "[26] Nikolay Jetchev. Clipmatrix: Text-controlled creation of 3D textured meshes. arXiv preprint arXiv:2109.12922, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.601, + 0.894, + 0.655 + ], + "angle": 0, + "content": "[27] Yanqin Jiang, Li Zhang, Jin Gao, Weimin Hu, and Yao Yao. Consistent4D: Consistent \\(360^{\\circ}\\) dynamic object generation from monocular video. arXiv preprint arXiv:2311.02848, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.659, + 0.894, + 0.728 + ], + "angle": 0, + "content": "[28] Jiahao Li, Hao Tan, Kai Zhang, Zexiang Xu, Fujun Luan, Yinghao Xu, Yicong Hong, Kalyan Sunkavalli, Greg Shakhnarovich, and Sai Bi. Instant3D: Fast text-to-3D with sparse-view generation and large reconstruction model. arXiv preprint arXiv:2311.06214, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.731, + 0.894, + 0.784 + ], + "angle": 0, + "content": "[29] Weiyu Li, Rui Chen, Xuelin Chen, and Ping Tan. Sweetdreamer: Aligning geometric priors in 2D diffusion for consistent text-to-3D. arXiv preprint arXiv:2310.02596, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.788, + 0.894, + 0.843 + ], + "angle": 0, + "content": "[30] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3D: High-resolution text-to-3D content creation. In Proc. CVPR, 2023. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.894, + 0.9 + ], + "angle": 0, + "content": "[31] Yukang Lin, Haonan Han, Chaoqun Gong, Zunnan Xu, Yachao Zhang, and Xiu Li. Consistent123: One image to highly consistent 3D asset using case-aware diffusion priors. arXiv preprint arXiv:2309.17261, 2023. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "8004" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[32] Huan Ling, Seung Wook Kim, Antonio Torralba, Sanja Fidler, and Karsten Kreis. Align your gaussians: Text-to-4D with dynamic 4D gaussians and composed diffusion models. arXiv preprint arXiv:2312.13763, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.471, + 0.19 + ], + "angle": 0, + "content": "[33] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. In Proc. ICCV, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.471, + 0.234 + ], + "angle": 0, + "content": "[34] Ying Liu, Dengsheng Zhang, Guojun Lu, and Wei-Ying Ma. A survey of content-based image retrieval with high-level semantics. Pattern Recognition, 40(1):262-282, 2007. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.236, + 0.471, + 0.29 + ], + "angle": 0, + "content": "[35] Yuan Liu, Cheng Lin, Zijiao Zeng, Xiaoxiao Long, Lingjie Liu, Taku Komura, and Wenping Wang. Syncdreamer: Generating multiview-consistent images from a single-view image. arXiv preprint arXiv:2309.03453, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.292, + 0.471, + 0.347 + ], + "angle": 0, + "content": "[36] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. ACM Trans. Graph., 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.349, + 0.471, + 0.403 + ], + "angle": 0, + "content": "[37] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3D Gaussians: Tracking by persistent dynamic view synthesis. arXiv preprint arXiv:2308.09713, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.405, + 0.471, + 0.473 + ], + "angle": 0, + "content": "[38] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.476, + 0.471, + 0.517 + ], + "angle": 0, + "content": "[39] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.519, + 0.471, + 0.587 + ], + "angle": 0, + "content": "[40] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models. In Proc. ICML, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.589, + 0.471, + 0.63 + ], + "angle": 0, + "content": "[41] Zijie Pan, Zeyu Yang, Xiatian Zhu, and Li Zhang. Fast dynamic 3D object generation from a single-view video. arXiv preprint arXiv:2401.08742, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.632, + 0.471, + 0.673 + ], + "angle": 0, + "content": "[42] Dong Huk Park, Samaneh Azadi, Xihui Liu, Trevor Darrell, and Anna Rohrbach. Benchmark for compositional text-to-image synthesis. In Proc. NeurIPS, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.675, + 0.471, + 0.744 + ], + "angle": 0, + "content": "[43] Ryan Po, Wang Yifan, Vladislav Golyanik, Kfir Aberman, Jonathan T Barron, Amit H Bermano, Eric Ryan Chan, Tali Dekel, Aleksander Holynski, Angjoo Kanazawa, et al. State of the art on diffusion models for visual computing. arXiv preprint arXiv:2310.07204, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.471, + 0.786 + ], + "angle": 0, + "content": "[44] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. DreamFusion: Text-to-3D using 2D diffusion. In Proc. ICLR, 2023. 1, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.471, + 0.858 + ], + "angle": 0, + "content": "[45] Guocheng Qian, Jinjie Mai, Abdullah Hamdi, Jian Ren, Aliaksandr Siarohin, Bing Li, Hsin-Ying Lee, Ivan Skorokhodov, Peter Wonka, Sergey Tulyakov, et al. Magic123: One image to high-quality 3D object generation using both 2D and 3D diffusion priors. arXiv preprint arXiv:2306.17843, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.471, + 0.9 + ], + "angle": 0, + "content": "[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.893, + 0.134 + ], + "angle": 0, + "content": "Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Proc. ICML, 2021. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.135, + 0.894, + 0.176 + ], + "angle": 0, + "content": "[47] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In Proc. ICML, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.178, + 0.894, + 0.23 + ], + "angle": 0, + "content": "[48] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. arXiv preprint arXiv:2204.06125, 2022. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.232, + 0.894, + 0.274 + ], + "angle": 0, + "content": "[49] Scott Reed, Zeynep Akata, Xinchen Yan, Lajanugen Logeswaran, Bernt Schiele, and Honglak Lee. Generative adversarial text to image synthesis. In Proc. ICML, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.275, + 0.893, + 0.327 + ], + "angle": 0, + "content": "[50] Jiawei Ren, Liang Pan, Jiaxiang Tang, Chi Zhang, Ang Cao, Gang Zeng, and Ziwei Liu. DreamGaussian4D: Generative 4D Gaussian splatting. arXiv preprint arXiv:2312.17142, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.33, + 0.894, + 0.384 + ], + "angle": 0, + "content": "[51] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Bjorn Ommer. High-resolution image synthesis with latent diffusion models. In Proc. CVPR, 2022, 1, 2, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.385, + 0.893, + 0.455 + ], + "angle": 0, + "content": "[52] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Proc. NeurIPS, 2022. 1, 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.456, + 0.894, + 0.509 + ], + "angle": 0, + "content": "[53] Aditya Sanghi, Hang Chu, Joseph G Lambourne, Ye Wang, Chin-Yi Cheng, Marco Fumero, and Kamal Rahimi Malekshan. Clip-forge: Towards zero-shot text-to-shape generation. In Proc. CVPR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.511, + 0.894, + 0.579 + ], + "angle": 0, + "content": "[54] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. Proc. NeurIPS, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.581, + 0.893, + 0.634 + ], + "angle": 0, + "content": "[55] Yichun Shi, Peng Wang, Jianglong Ye, Mai Long, Kejie Li, and Xiao Yang. MVDream: Multi-view diffusion for 3d generation. arXiv preprint arXiv:2308.16512, 2023. 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.637, + 0.893, + 0.703 + ], + "angle": 0, + "content": "[56] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.706, + 0.893, + 0.773 + ], + "angle": 0, + "content": "[57] Uriel Singer, Shelly Sheynin, Adam Polyak, Oron Ashual, Iurii Makarov, Filippos Kokkinos, Naman Goyal, Andrea Vedaldi, Devi Parikh, Justin Johnson, et al. Text-to-4d dynamic scene generation. In Proc. ICML, 2023. 1, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.776, + 0.893, + 0.817 + ], + "angle": 0, + "content": "[58] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In Proc. ICML, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.818, + 0.893, + 0.844 + ], + "angle": 0, + "content": "[59] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. Proc. ICLR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.894, + 0.9 + ], + "angle": 0, + "content": "[60] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In Proc. ICLR, 2021. 1" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "8005" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.148 + ], + "angle": 0, + "content": "[61] Matteo Stefanini, Marcella Cornia, Lorenzo Baraldi, Silvia Cascianelli, Giuseppe Fiameni, and Rita Cucchiara. From show to tell: A survey on deep learning-based image captioning. IEEE Trans. Pattern Anal. Mach. Intell., 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.151, + 0.472, + 0.207 + ], + "angle": 0, + "content": "[62] Junshu Tang, Tengfei Wang, Bo Zhang, Ting Zhang, Ran Yi, Lizhuang Ma, and Dong Chen. Make-it-3D: High-fidelity 3D creation from a single image with diffusion prior. arXiv preprint arXiv:2303.14184, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.209, + 0.472, + 0.278 + ], + "angle": 0, + "content": "[63] Ayush Tewari, Tianwei Yin, George Cazenavette, Semon Rezchikov, Joshua B Tenenbaum, Frédo Durand, William T Freeman, and Vincent Sitzmann. Diffusion with forward models: Solving stochastic inverse problems without direct supervision. arXiv preprint arXiv:2306.11719, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.281, + 0.472, + 0.322 + ], + "angle": 0, + "content": "[64] Haithem Turki, Jason Y Zhang, Francesco Ferroni, and Deva Ramanan. Suds: Scalable urban dynamic scenes. In Proc CVPR, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.324, + 0.472, + 0.393 + ], + "angle": 0, + "content": "[65] Ruben Villegas, Mohammad Babaeizadeh, Pieter-Jan Kindermans, Hernan Moraldo, Han Zhang, Mohammad Taghi Saffar, Santiago Castro, Julius Kunze, and Dumitru Erhan. Phenaki: Variable length video generation from open domain textual description. arXiv preprint arXiv:2210.02399, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.396, + 0.472, + 0.438 + ], + "angle": 0, + "content": "[66] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-NeRF: Text-and-image driven manipulation of neural radiance fields. In Proc. CVPR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.44, + 0.472, + 0.495 + ], + "angle": 0, + "content": "[67] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A Yeh, and Greg Shakhnarovich. Score Jacobian Chaining: Lifting pretrained 2D diffusion models for 3D generation. In Proc. CVPR, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.498, + 0.472, + 0.54 + ], + "angle": 0, + "content": "[68] Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.542, + 0.472, + 0.598 + ], + "angle": 0, + "content": "[69] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.601, + 0.472, + 0.668 + ], + "angle": 0, + "content": "[70] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. arXiv preprint arXiv:2306.02018, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.672, + 0.472, + 0.727 + ], + "angle": 0, + "content": "[71] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3D generation with variational score distillation. Proc. NeurIPS, 2023. 1, 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.73, + 0.472, + 0.785 + ], + "angle": 0, + "content": "[72] Ruiqi Wu, Liangyu Chen, Tong Yang, Chunle Guo, Chongyi Li, and Xiangyu Zhang. Lamp: Learn a motion pattern for few-shot-based video generation. arXiv preprint arXiv:2310.10769, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.788, + 0.472, + 0.843 + ], + "angle": 0, + "content": "[73] Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. AttnGAN: Fine-grained text to image generation with attentional generative adversarial networks. In Proc. CVPR, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.472, + 0.901 + ], + "angle": 0, + "content": "[74] Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In Proc. CVPR, 2022. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.147 + ], + "angle": 0, + "content": "[75] Yuyang Yin, Dejia Xu, Zhangyang Wang, Yao Zhao, and Yunchao Wei. 4DGen: Grounded 4D content generation with spatial-temporal consistency. arXiv preprint arXiv:2312.17225, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.894, + 0.205 + ], + "angle": 0, + "content": "[76] Paul Yoo, Jiaxian Guo, Yutaka Matsuo, and Shixiang Shane Gu. Dreamsparse: Escaping from Plato's cave with 2d diffusion model given sparse views. arXiv preprint arXiv:2306.03414, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.207, + 0.894, + 0.247 + ], + "angle": 0, + "content": "[77] Quanzeng You, Hailin Jin, Zhaowen Wang, Chen Fang, and Jiebo Luo. Image captioning with semantic attention. In Proc. CVPR, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.894, + 0.344 + ], + "angle": 0, + "content": "[78] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, Ben Hutchinson, Wei Han, Zarana Parekh, Xin Li, Han Zhang, Jason Baldridge, and Yonghui Wu. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.347, + 0.894, + 0.416 + ], + "angle": 0, + "content": "[79] Lili Yu, Bowen Shi, Ramakanth Pasunuru, Benjamin Muller, Olga Golovneva, Tianlu Wang, Arun Babu, Binh Tang, Brian Karrer, Shelly Sheynin, et al. Scaling autoregressive multimodal models: Pretraining and instruction tuning. arXiv preprint arXiv:2309.02591, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.418, + 0.894, + 0.473 + ], + "angle": 0, + "content": "[80] Han Zhang, Tao Xu, Hongsheng Li, Shaoting Zhang, Xiaogang Wang, Xiaolei Huang, and Dimitris N Metaxas. StackGAN: Text to photo-realistic image synthesis with stacked generative adversarial networks. In Proc. ICCV, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.475, + 0.894, + 0.529 + ], + "angle": 0, + "content": "[81] Yuyang Zhao, Zhiwen Yan, Enze Xie, Lanqing Hong, Zhenguo Li, and Gim Hee Lee. Animate124: Animating one image to 4D dynamic scene. arXiv preprint arXiv:2311.14603, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.531, + 0.894, + 0.585 + ], + "angle": 0, + "content": "[82] Yufeng Zheng, Xueting Li, Koki Nagano, Sifei Liu, Otmar Hilliges, and Shalini De Mello. A unified approach for text-and image-guided 4D scene generation. arXiv preprint arXiv:2311.16854, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.587, + 0.894, + 0.642 + ], + "angle": 0, + "content": "[83] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.956 + ], + "angle": 0, + "content": "8006" + } + ] +] \ No newline at end of file diff --git a/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/7295c7f1-f21d-431e-aa65-0a0fb95fe12c_origin.pdf b/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/7295c7f1-f21d-431e-aa65-0a0fb95fe12c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..93352205ffcc979971859887966bfc618bca50c3 --- /dev/null +++ b/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/7295c7f1-f21d-431e-aa65-0a0fb95fe12c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29eff8683b84513548e53ecef0f35184984e95514247bafe3ae20ab8e0cc217b +size 3228052 diff --git a/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/full.md b/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/full.md new file mode 100644 index 0000000000000000000000000000000000000000..bdb59929cfdebd03856bd8ebb152176d3b617837 --- /dev/null +++ b/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/full.md @@ -0,0 +1,326 @@ +# 4D-fly: Text-to-4D Generation Using Hybrid Score Distillation Sampling + +Sherwin Bahmani $^{1,2}$ Ivan Skorokhodov $^{3,4}$ Victor Rong $^{1,2}$ Gordon Wetzstein $^{5}$ Leonidas Guibas $^{5}$ Peter Wonka $^{3}$ Sergey Tulyakov $^{4}$ Jeong Joon Park $^{6}$ Andrea Tagliafasacchi $^{1,7,8}$ David B. Lindell $^{1,2}$ + +1University of Toronto 2Vector Institute 3KAUST 4Snap Inc. 5Stanford University 6University of Michigan 7SFU 8Google + +![](images/be373046b6b89e238603e355261a367a82d8185543c47f7d9a676c2027f87d0b.jpg) +Figure 1. Text-to-4D Synthesis. We present 4D-fy, a technique that synthesizes 4D (i.e., dynamic 3D) scenes from a text prompt. We show scenes generated from two text prompts for different viewpoints (vertical dimension) at different time steps (horizontal dimension). Video results can be viewed on our website: https://sherwinbahmani.github.io/4dfy. + +![](images/5f03646c6c9ed74bf919e9380dcef17e32af727fed8ff565857e397d0696a8f3.jpg) + +# Abstract + +Recent breakthroughs in text-to-4D generation rely on pre-trained text-to-image and text-to-video models to generate dynamic 3D scenes. However, current text-to-4D methods face a three-way tradeoff between the quality of scene appearance, 3D structure, and motion. For example, text-to-image models and their 3D-aware variants are trained on internet-scale image datasets and can be used to produce scenes with realistic appearance and 3D structure—but no motion. Text-to-video models are trained on relatively smaller video datasets and can produce scenes with motion, but poorer appearance and 3D structure. While these models have complementary strengths, they also have opposing weaknesses, making it difficult to combine them in a way that alleviates this three-way tradeoff. Here, we introduce hybrid score distillation sampling, an alternating optimization procedure that blends supervision signals from multiple pre-trained diffusion models and incorporates benefits of each for high-fidelity text-to-4D generation. Using hybrid SDS, we demonstrate synthesis of 4D scenes with compelling appearance, 3D structure, and motion. + +# 1. Introduction + +The advent of internet-scale image-text datasets [54] and advances in diffusion models [20, 58, 60] have led to new capabilities in stable, high-fidelity image generation from text prompts [6, 51, 52]. Recent methods have also shown that large-scale text-to-image or text-to-video [56] diffusion models learn useful priors for 3D [25, 44] and 4D scene generation [57]. Our work focuses on text-to-4D scene generation (Fig. 1), which promises exciting new capabilities for applications in augmented and virtual reality, computer animation, and industrial design. + +Current techniques for generating 3D or 4D scenes from text prompts typically iteratively optimize a representation of the scene using supervisory signals from a diffusion model [44, 67, 71]. Specifically, these methods render an image of a 3D scene, add noise to the rendered image, use a pre-trained diffusion model to denoise the rendered image, and estimate gradients used to update the 3D representation [44, 67]. This procedure, known as score distillation sampling (SDS) [44], underpins most recent methods for text-conditioned scene generation. + +Using SDS for text-to-4D generation requires navigating + +Table 1. Text-to-4D models face a tradeoff between the quality of appearance, 3D structure, and motion depending on the type of generative model used for score distillation sampling (SDS): text-to-image (T2I), 3D-aware T2I, or, text-to-video (T2V). + +
SDS modelappearance3D structuremotion
T2I [6, 51, 52, 79]highlowN/A
3D-aware T2I [29, 55]mediumhighN/A
T2V [7, 21, 56, 69, 72]lowlowhigh
Our methodmediumhighmedium
+ +a three-way tradeoff between the quality of appearance, 3D structure, and motion (see Table 1); existing techniques obtain satisfactory results in just one or two of these categories. For example, while SDS produces images that appear realistic when rendering a generated scene from any particular viewpoint, inspecting multiple viewpoints can reveal that the scene has several faces or heads, replicated appendages, or other incorrectly repeated 3D structures—an issue now referred to as the "Janus problem" [55].1 One way to improve 3D structure is to use SDS with a 3D-aware diffusion model that is trained to generate images from different camera viewpoints [33]. But 3D-aware models sacrifice appearance quality as they require fine-tuning on synthetic datasets of posed images [55]. Incorporating motion into a scene using SDS with a text-to-video model [69] typically degrades the appearance relative to static scenes generated with text-to-image models, which are more realistic (see Fig. 2). While different types of diffusion models thus have complementary qualities, they also have opposing weaknesses (Table 1). Therefore, it is not trivial to combine them in a way that yields text-to-4D generation with high-quality appearance, 3D structure, and motion. + +Here, we propose a method for text-to-4D scene generation that alleviates this three-way tradeoff using hybrid SDS, an alternating optimization scheme that blends gradient updates from multiple pre-trained diffusion models and synthesizes 4D scenes using the best qualities of each. The method consists of three stages of optimization: (1) we use a 3D-aware text-to-image model [55] to generate an initial static 3D scene (without the Janus problem); (2) we continue the optimization by blending in alternating supervision with variational SDS [71] and a text-to-image model to improve appearance; (3) we blend in alternating supervision using video SDS with a text-to-video model [69] to add motion to the scene. By smoothly incorporating supervisory signals from these three diffusion models throughout the training process, we achieve text-driven 4D scene generation with state-of-the-art quality in terms of appearance, 3D structure, and motion. Overall we provide the following contributions. We introduce hybrid SDS, a technique that extracts desir + +![](images/0408112f472c85c72761b8513b059cddd1634e8e624b099869a98277e5d3c693.jpg) +Figure 2. Comparing text-to-image and text-to-video models. Rendered frames from Stable Diffusion version 2.1 (top; text-to-image) [1] and Zeroscope version 2 (bottom; text-to-video) [3] show significant disparity in appearance, with the text-to-image model appearing far more realistic. + +able qualities from multiple pre-trained diffusion models and alleviates a tradeoff between appearance, 3D structure, and motion in text-to-4D scene generation. + +- We provide a quantitative and qualitative evaluation of the method, and we explore the three-way tradeoff space with ablation studies to facilitate future research. +- We demonstrate text-to-4D generation based on open-source pretrained models and will make all codes and evaluation procedures publicly available. +- We present state-of-the-art results for the task of text-to-4D generation. + +# 2. Related Work + +Our method is related to techniques from multiple areas of generative modeling, including text-to-image, text-to-video, and text-to-3D models. For more extensive discussions of related works, we refer readers to a recent state-of-the-art report on diffusion models [43]. + +Text-to-image generation. Methods for generating images from text prompts are a relatively new innovation, first demonstrated using generative adversarial networks [49, 73, 80]. The problem itself is also related to other methods for text-based image retrieval [34] or image-conditioned text generation [61, 77]. More recently, models trained on text-image datasets with billions of samples [54] have become the state of the art for this task [51]. + +Diffusion models [20, 59] are a popular architecture for generative modeling on large-scale datasets, and autoregressive models have also shown promising results [47, 78]. Typically, these methods exploit a pretrained text encoder, such as CLIP [46], to encode the text prompt into a feature + +vector used to condition the diffusion model [40, 48]. In diffusion models, high-resolution (i.e., megapixel) image generation is achieved by applying repeated upsampling layers [22, 48] or performing diffusion in the lower-resolution latent space of an autoencoder and then decoding the result to recover an image at the nominal resolution [16, 51]. Our work incorporates two open-source text-to-image diffusion models: Stable Diffusion [51] and MVDream [55] (a recent 3D-aware diffusion model) to enable 4D scene generation. + +Text-to-video generation. Our work relies on the burgeoning field of video generation via diffusion models, an area that is somewhat constrained by the limited scale of video datasets. To counteract this, methods often utilize a hybrid training approach on both image and video datasets, such as WebVid-10M [5], HD-VG-130M [69], or HD-VILA-100M [74]. Recent approaches in this field typically employ variations of pixel-space upsampling (both in space and time) [21] or latent space upsampling to improve spatial and temporal resolution [17, 18, 70, 83]. Autoregressive models distinguish themselves by their ability to generate videos of varying lengths [65]. Further improvements in video synthesis have been achieved by finetuning pre-trained text-to-image diffusion models on video data [7, 56, 72], or separating the content and motion generation process by using an initial image frame as a starting point [17, 72]. Despite recent advances in text-to-video synthesis, the fidelity of generated videos still lags behind that of static image generation (see Fig. 2) and so they perform poorly when used directly with SDS for text-to-4D generation. Instead, our work leverages an open-source latent space text-to-video diffusion model called Zeroscope [3] (extended from the Modelscope architecture [68]) together with other pre-trained, open-source diffusion models using hybrid SDS. + +Text-to-3D generation. Early methods for text-to-3D generation relied on parsers to convert input text to a semantic representation and synthesized scenes from an object database [4, 10, 12]. Later, automated, data-driven methods used multi-modal datasets [11], and pre-trained models, such as CLIP [46], to edit or stylize an input 3D mesh [14, 26] or a radiance field [66]. More recently, CLIP-based supervision enabled synthesis of entire 3D scenes [25, 53], and these techniques evolved into the most recent approaches, which optimize a mesh or radiance field based on SDS supervision [30, 44, 71]. The quality of their 3D structures has been improved by applying diffusion models that consider multiple viewpoints [31, 33, 55]. Alternatively, recent advancements have seen a shift towards using diffusion or transformer models to transform an input 2D image into a 3D representation for novel-view synthesis [9, 15, 35, 45, 62, 63, 76]. Still, these techniques do not yet support generating 4D scenes. + +Our work is most closely related to Make-A- + +Video3D (MAV3D) [57], a recent method for text-to-4D generation that integrates SDS-based supervision in two separate stages: first with a text-to-image model and subsequently with a text-to-video model. Similar to MAV3D, we aim to generate dynamic 3D scenes; however, our approach uses hybrid SDS, which allows gradient updates from multiple models to be smoothly blended together in an alternating optimization. Our approach generates high-quality dynamic 3D scenes and does not suffer from Janus problems. + +Concurrent works. Concurrent works on text-to-4D [32, 82], image-to-4D [50, 81, 82], and video-to-4D [27, 41, 75] similarly use recent diffusion models for 4D generation. + +# 3. Method + +Our approach for text-to-4D generation builds upon a hash-encoding-based neural representation [39] that implicitly decomposes the scene into static and dynamic feature grids [64]. In this section we overview our representation for 4D neural rendering and describe the optimization procedure based on hybrid SDS (see Fig. 3). + +# 3.1. 4D Neural Rendering + +Volumetric neural rendering methods represent a scene using a neural representation to parameterize the attenuation and emission of light at every point in 3D space [36, 38]. We can use such a representation to render an image by casting a ray from the camera center of projection, through each pixel location, and into the scene. For sampled points along the ray $\pmb{\mu} \in \mathbb{R}^3$ , we query a neural representation to retrieve a volumetric density $\tau \in \mathbb{R}_+$ and color $\mathbf{c} \in \mathbb{R}_+^3$ , which describe attenuation and emission of light, respectively, at a particular point. Then, the resulting density and color samples are alpha-composed to recover the color of a rendered pixel $\mathbf{C}$ as + +$$ +\mathbf {C} = \sum_ {i} w _ {i} \mathbf {c} _ {i}, w _ {i} = \alpha \prod_ {j < i} (1 - \alpha_ {j}), \tag {1} +$$ + +where $\alpha_{i} = 1 - e^{-\tau_{i}\| \pmb{\mu}_{i} - \pmb{\mu}_{i + 1}\|$ . We query the neural representation using an additional input time variable $t$ , which enables modeling time-varying density and color. + +We illustrate the neural representation in Fig. 3; it consists of two multi-resolution hash tables to disentangle static and dynamic scene modeling. + +Following Müller et al. [39], the static hash table stores learnable feature vectors that are indexed by a voxel-lookup and hashing operation and decoded into density and color using two small multilayer perceptrons (MLPs). Concretely, we consider the neural representation + +$$ +\mathcal {N} _ {\theta}: \boldsymbol {\mu}, t \rightarrow \tau , \mathbf {c} \tag {2} +$$ + +with $\theta = \{\theta_{\mathrm{static}},\theta_{\mathrm{dynamic}},\theta_{\mathrm{MLP}}\}$ denoting all learnable parameters from the static and dynamic hash tables and the MLPs. + +![](images/a24e7e6dc53088c14d5889b41abcd7ed78b98954b9f9997f6848a9c43dbad622.jpg) +Figure 3. Overview. A 4D radiance field is parameterized using a neural representation with a static and dynamic multiscale hash table of features. Images and videos are rendered from the representation using volume rendering, and we supervise the representation using hybrid score distillation sampling—a technique that combines gradients from multiple types of pre-trained diffusion models. In the first stage of training we use gradients $\nabla_{\theta}\mathcal{L}_{3\mathrm{D}}$ from a 3D-aware text-to-image model (3D-T2I) to iteratively optimize a representation without the Janus problem. Next, we blend in gradient supervision using variational SDS with a text-to-image model (T2I) to improve the appearance (i.e., we alternate supervision between $\nabla_{\theta}\mathcal{L}_{\mathrm{IMG}}$ and $\nabla_{\theta}\mathcal{L}_{3\mathrm{D}}$ ). In the last stage we incorporate gradients $(\nabla_{\theta}\mathcal{L}_{\mathrm{VID}})$ from a text-to-video model (T2V) to add motion to the scene, and we update the scene using the other models in an alternating fashion. + +For a given $\mu$ , we query the static hash table by identifying the closest voxel at each scale $1 \leq s \leq S$ . Then, we trilinearly interpolate the feature values from the voxel vertices after retrieving them from the hash table. Retrieved features from each scale are concatenated as $\mathbf{f}_{\mathrm{static}} = \mathbf{f}_{\mathrm{static}}^{(1)} \oplus \dots \oplus \mathbf{f}_{\mathrm{static}}^{(S)}$ . We follow the same procedure to query the dynamic hash table given $(\mu, t)$ , except we use quadrilinear interpolation to interpolate feature values. The resulting features from the static and dynamic hash tables are added as $\mathbf{f} = \mathbf{f}_{\mathrm{static}} + \mathbf{f}_{\mathrm{dynamic}}$ . We do not model view-dependent effects in the feature encoding. Finally, we decode density and color as $\mathrm{MLP}_{\tau}(\mathbf{f})$ and $\mathrm{MLP}_{\mathrm{c}}(\mathbf{f})$ , respectively. + +# 3.2. Hybrid Score Distillation Sampling + +We leverage the 4D representation along with SDS to create dynamic 3D scenes from a text prompt. Our hybrid approach incorporates three different flavors of SDS that are smoothly merged during an alternating optimization procedure to improve the structure and quality of the 4D model: + +1. SDS applied to a 3D-aware text-to-image diffusion model to optimize a static scene without the Janus problem. +2. Variational score distillation sampling (VSD; a modified version of SDS [71]) using a standard text-to-image model [51] to improve the appearance of the static scene. +3. Video SDS using a text-to-video model [69], which extends SDS to multiple video frames and adds motion to the scene. + +In the following, we describe each type of SDS and how it is used for text-to-4D generation. + +3D-aware scene optimization. We first consider optimizing a static scene using SDS with a 3D-aware text-to-image diffusion model [55]. The diffusion model is pre-trained using a stochastic forward process that slowly adds Gaussian noise to multiview images $\mathbf{x}$ over timesteps $0\leq t_{d}\leq T_{d}$ . + +With increasing $t_d$ , the process yields noisy images $\mathbf{z}_{t_d}$ that, at $t_d = T_d$ , are close to zero-mean Gaussian. After training, the model reverses this process to add structure to the noisy images. It predicts $\hat{\mathbf{x}}_{\phi}(\mathbf{z}_{t_d}; t_d, \mathbf{y}, \mathbf{T})$ , which approximates the output of an optimal denoiser at each timestep $t_d$ , conditioned on a text embedding $\mathbf{y}$ [48, 51, 52] and the camera extrinsics $\mathbf{T}$ corresponding to each image. In practice, text-to-image diffusion models typically predict the noise content $\epsilon_{\phi}$ rather than the denoised image $\hat{\mathbf{x}}_{\phi}$ . But note that the denoised image can still be obtained as $\hat{\mathbf{x}}_{\phi}(\mathbf{z}_{t_d}; t_d, \mathbf{y}, \mathbf{T}) \propto \mathbf{z}_{t_d} - \epsilon_{\phi}(\mathbf{z}_{t_d}; t_d, \mathbf{y}, \mathbf{T})$ , i.e., by subtracting the predicted noise from the noisy image [20]. We implement 3D-aware SDS by rendering multiple images $\mathbf{x}_{\theta}$ from the neural representation, adding noise $\epsilon$ , and using the 3D-aware diffusion model [55] to predict the noise $\epsilon_{\phi}$ using classifier-free guidance [19]. To update the parameters $\theta$ of the neural representation, we use the 3D-aware SDS gradient: + +$$ +\nabla_ {\theta} \mathcal {L} _ {\mathrm {3 D}} = \mathbb {E} _ {t _ {d}, \boldsymbol {\epsilon}, \mathbf {T}} \left[ w (t _ {d}) \left(\boldsymbol {\epsilon} _ {\phi} \left(\mathbf {z} _ {t _ {d}}; t _ {d}, \mathbf {y}, \mathbf {T}\right) - \boldsymbol {\epsilon}\right) \frac {\partial \mathbf {x} _ {\theta}}{\partial \theta} \right], \tag {3} +$$ + +where $w(t_{d})$ is a weighting function that depends on the diffusion timestep, and we add a stop gradient to the output of the diffusion model [55]. Intuitively, the SDS loss queries the diffusion model to see how it adds structure to an image, then this information is used to backpropagate gradients to the scene representation. + +Improving appearance using VSD. We incorporate an additional loss term based on VSD [71] to improve the appearance of images rendered from the scene. This term uses a pre-trained text-to-image model [51] along with a finetuning scheme that improves image quality over the 3D-aware text-to-image model alone. We follow Wang et al. [71] and augment the standard SDS gradient with the output of an + +additional text-to-image diffusion model that is finetuned using a low-rank adaptation [24], during scene optimization. Specifically, we have + +$$ +\nabla_ {\theta} \mathcal {L} _ {\mathrm {I M G}} = \mathbb {E} _ {t _ {d}, \boldsymbol {\epsilon}, \mathbf {T}} \left[ w (t _ {d}) \left(\boldsymbol {\epsilon} _ {\phi} (\mathbf {z} _ {t _ {d}}; t _ {d}, \mathbf {y}) - \boldsymbol {\epsilon} _ {\phi} ^ {\prime} (\mathbf {z} _ {t _ {d}}; t _ {d}, \mathbf {y}, \mathbf {T})\right) \frac {\partial \mathbf {x} _ {\theta}}{\partial \theta} \right], +$$ + +where $\epsilon_{\phi}^{\prime}$ is the noise predicted using a finetuned version of the diffusion model that incorporates additional conditioning from the camera extrinsics $\mathbf{T}$ ; here, we let $\mathbf{z}_{t_d}$ represent a noisy version of a single image rendered from $\mathcal{N}_{\theta}$ . The model is finetuned using the standard diffusion objective + +$$ +\min _ {\theta} \mathbb {E} _ {t _ {d}, \boldsymbol {\epsilon}, \mathbf {T}} \left[ \| \boldsymbol {\epsilon} _ {\phi} ^ {\prime} \left(\mathbf {z} _ {t _ {d}}; t _ {d}, \mathbf {y}, \mathbf {T}\right) - \boldsymbol {\epsilon} \| _ {2} ^ {2} \right]. \tag {5} +$$ + +Note that, different from the original description of VSD [71], we find we can omit the simultaneous optimization over multiple scene samples (i.e. the variational component of [71]), which reduces memory requirements without significantly degrading appearance. + +Adding motion with Video SDS. Last, we use supervision from a text-to-video diffusion model [69] to add motion to the generated scene. This procedure extends the original SDS gradient by incorporating structure added by the diffusion model to all noisy video frames [57]. The video SDS gradient is given as + +$$ +\nabla_ {\theta} \mathcal {L} _ {\mathrm {V I D}} = \mathbb {E} _ {t _ {d}, \boldsymbol {\epsilon}} \left[ w (t _ {d}) \left(\epsilon_ {\phi} \left(\mathbf {z} _ {t _ {d}}; t _ {d}, \mathbf {y}\right) - \boldsymbol {\epsilon}\right) \frac {\partial \mathbf {X} _ {\theta}}{\partial \theta} \right]. \tag {6} +$$ + +To simplify notation, we re-use $\epsilon_{\phi}$ and $\epsilon$ to here represent the predicted and actual noise for each video frame, and we let $\mathbf{X}_{\theta}$ be a collection of $V$ video frames $\mathbf{X}_{\theta} = [\mathbf{x}_{\theta}^{(1)},\dots,\mathbf{x}_{\theta}^{(V)}]^{T}$ rendered from the representation. + +Optimization procedure - Algorithm 1. We optimize the 4D representation in three stages that smoothly blend supervision in alternating steps from (1) 3D-aware SDS, (2) VSD, and (3) video SDS. + +Stage 1. In the first stage of optimization, we update $\mathcal{N}_{\theta}$ using gradients from 3D-aware SDS until convergence. Since this stage focuses on optimizing a static scene, we freeze (i.e. do not update) the parameters of the dynamic hash table $\mathbf{f}_{\mathrm{dynamic}}$ and only update the static hash table and decoder MLP. We set the total number of first-stage iterations $N_{\mathrm{stage - 1}}$ to match that of Shi et al. [55], which allows the optimization to proceed until there are no distinguishable changes in the rendered scene from one iteration to the next. + +Stage 2. Next, we add VSD gradients using an alternating optimization procedure. At each iteration, we randomly select to update the model using $\nabla_{\theta}\mathcal{L}_{3\mathrm{D}}$ or $\nabla_{\theta}\mathcal{L}_{\mathrm{IMG}}$ with probability $P_{3\mathrm{D}}$ and $P_{\mathrm{IMG}}$ . We continue this alternating optimization for $N_{\mathrm{stage - 2}}$ iterations, until convergence. As we show in the next section, this stage of optimization results in improved appearance compared to using $\nabla_{\theta}\mathcal{L}_{3\mathrm{D}}$ alone while also being free of the Janus problem. + +Algorithm 1 Hybrid Score Distillation Sampling +Require: + $\begin{array}{rlr}{\mathcal{N}_{\theta}}&{\triangleright 4D\mathrm{~neural~re�atement}}&{}\\ {N_{\mathrm{stage - 1}},N_{\mathrm{stage - 2}},N_{\mathrm{stage - 3}}} &{\triangleright\mathrm{iterations~for~each~stage}}&{}\\ {P_{\mathrm{3D}},P_{\mathrm{IMG}}} &{\triangleright\mathrm{update~probabilities}}&{}\\ {\nabla_{\theta}\mathcal{L}_{\mathrm{3D}},\nabla_{\theta}\mathcal{L}_{\mathrm{IMG}},\nabla_{\theta}\mathcal{L}_{\mathrm{VID}}} &{\triangleright\mathrm{SDS~grads.~(Eqs.~3,~4,~6)}}&{}\\ {\mathrm{for~iter~in~}N_{\mathrm{stage - 1}}\mathrm{~do}} &{\triangleright 3D\mathrm{~update}}&{}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{3D}}\right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{3D}},\mathrm{with~probability~}P_{\mathrm{3D}}\right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{IMG}},\mathrm{otherwise}\right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\mathrm{otherwise}\right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{3D}},\mathrm{with~probability~}P_{\mathrm{3D}}\right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{IMG}},\mathrm{with~probability~}P_{\mathrm{3D}}\right.\cdot P_{\mathrm{IMG}}} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\mathrm{otherwise}\right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\mathrm{otherwise}\right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\mathrm{otherwise}\right.} & {} & {}\\ {\mathrm{grad} = \left\{\nobreak P_{\mathrm{3D}},\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\right.} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\nabla_{\theta}\mathcal{L}_{{}_{1}} \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{VID}},\nabla_{\theta}\mathcal{L}_{\mathrm{VID}}, \right.} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{VID}}, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{VID}}, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{\mathrm{VID}}, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{1}, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{1}, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{1}, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{1},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{1},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{1},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{2},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{2},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{L}_{2},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{I},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{I},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{I},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{T},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{T},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{T},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{T},, \right.} \\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{T},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{T},, \right.} & {} & {}\\ {\mathrm{grad} = \left\{\nabla_{\theta}\mathcal{T},, \right.} & {} & {}\\ {\mathrm{grad} = 0,} & {} & {}\\ {\mathrm{end procedure}} & {} & {}\\ {\end{array}$ +1: // Stage 1 +2: freeze dynamic hash map ( $|\theta _{\text {dynamic }}$ ) +3: for iter in $N_{stage-1}$ do ▷ 3D or IMG update +4: grad $=$ {vT L3D +5: UPDATE(grad) +6: // Stage 2 +7: for iter in $N_{stage-2}$ do ▷ 3D or VD update +8: grad $=$ {vT L3D with probability P3D +9: UPDATE(grad) +10: // Stage 3 +11: decrease learning rate of static hash map ( $|\theta _{\text {static }}$ ) +12: for iter in $N_{stage-3}$ do ▷ 3D, IMG, or VD update +13: grad $=$ {vT L3D with probability P3D +14: if VID, unfreeze $|\theta _{\text {dynamic }}$ +15: UPDATE(grad) +16: procedure UPDATE(Grad) +17: x $< N_0$ ▷ render images (Eq. 1) +18: take gradient step on grad ▷ optimize N0 +19: if IMG, take finetuning step (Eq. 5) +20: end procedure + +Stage 3. Last, we update the representation using a combination of all gradient updates. Specifically, we randomly select to update the model at each iteration using $\nabla_{\theta}\mathcal{L}_{\mathrm{3D}}$ , $\nabla_{\theta}\mathcal{L}_{\mathrm{IMG}}$ , or $\nabla_{\theta}\mathcal{L}_{\mathrm{VID}}$ with probability $P_{\mathrm{3D}}$ , $P_{\mathrm{3D}}\cdot P_{\mathrm{IMG}}$ , and $1 - P_{\mathrm{3D}}\cdot P_{\mathrm{IMG}}$ , respectively. Since we now aim to incorporate motion into the representation, we unfreeze the parameters of the dynamic hash table during the update with $\nabla_{\theta}\mathcal{L}_{\mathrm{VID}}$ but keep them frozen for updates using the text-to-image models. We also decrease the learning rate of the static hash table to preserve the high-quality appearance from the previous stage. We repeat the alternating optimization in the final stage until convergence, which we find occurs consistently within $N_{\mathrm{stage-3}}$ iterations. Overall, hybrid SDS effectively combines the strengths of each pre-trained diffusion model while avoiding quality degradations that result from naively combining gradients from each model. + +# 3.3. Implementation + +We implement hybrid SDS based on the threestudio framework [2], which includes implementations of MV- + +![](images/0bc5e717e74819f668e110a46ef21906fa72852d57b5716238a2cb542b344c29.jpg) +Figure 4. Text-to-4D Comparison. We compare against MAV3D [57], and observe our approach obtains significantly higher quality results. + +Dream [55] (for 3D-aware text-to-image diffusion and SDS), ProlificDreamer [71] with Stable Diffusion [51] (text-to-image diffusion and VSD), and we implement the video SDS updates using Zeroscope [3, 69]. + +Hyperparameter values. We initialize the 4D neural representation following [30, 44] and add an offset to the density predicted by the network in the center of the scene to promote object-centric reconstructions. We set the learning rates for the static hash map to 0.01, for the dynamic hash map to 0.01, and for the MLP to 0.001. We drop the learning rate for the static hash map to 0.0001 before the last stage to focus the gradient updates on the dynamic hash map. The values of $N_{\mathrm{stage - 1}}$ , $N_{\mathrm{stage - 2}}$ , and $N_{\mathrm{stage - 3}}$ are set to 10000, 10000, and 100000, respectively. We set the probabilities for hybrid SDS to $P_{\mathrm{3D}} = 0.5$ and $P_{\mathrm{IMG}} = 0.5$ for a reasonable tradeoff with respect to appearance, 3D structure, and motion. + +Rendering. Each of the diffusion models has a different native resolution, so we render images from $\mathcal{N}_{\theta}$ accordingly. We render four images from different camera positions for the 3D-aware SDS at the native $(256\times 256$ pixel) resolution of the 3D-aware text-to-image model. The VSD update + +is computed by rendering a $256 \times 256$ image and bilinearly upsampling the image to the native resolution of Stable Diffusion $(512 \times 512)$ . Finally, the video SDS update is computed by rendering 16 video frames at $160 \times 288$ resolution and upsampling to the native $320 \times 576$ resolution of Zeroscope. + +# 4. Experiments + +# 4.1. Metrics + +We assess our method using CLIP Score [42] and a user study. We compare our model against MAV3D for 28 prompts and against our ablations for a subset of 5 prompts. Current text-to-4D models are costly to train, and many researchers in academia do not have access to the scale of resources available to large tech companies. Hence, we only used a subset due to computational limitations. To promote future research in this field, we open source the evaluation protocol for the user study along the code: https://github.com/sherwinbahmani/4dfy. + +CLIP Score. CLIP Score [42] evaluates the correlation between a text prompt and an image. Specifically, this cor + +Table 2. Quantitative results. We compare our method against MAV3D and variations of 4D-fy with different loss terms or backbone architectures (i.e., with HexPlane [8]). The methods are evaluated in terms of CLIP Score (CLIP) and human preference based on appearance quality (AQ), 3D structure quality (SQ), motion quality (MQ), text alignment (TA), and overall preference (Overall). The numbers reported for human preference are the percentages of users who voted for our method over the corresponding method in head-to-head comparisons. + +
MethodCLIPHuman Preference
AQSQMQTAOverall
MAV3D [57]33.992%89%41%52%67%
4D-fy34.2
Ablation Study
4D-fy35.0
w/o ΦθL3D/IMG29.3100%100%78%86%94%
w/o ΦθL3D35.188%89%95%92%91%
w/o ΦθLIMG34.570%68%68%69%70%
w/o hybrid SDS33.8100%100%78%88%95%
w/ HexPlane34.595%92%90%92%95%
+ +responds to the cosine similarity between textual CLIP [46] embedding and visual CLIP [46] embedding. The score is bound between 0 and 100, where 100 is best. We calculate the CLIP score for MAV3D using the same procedure we use for our method. Specifically, for each input text prompt, we render a video using the same camera trajectory as MAV3D, i.e., moving around the scene in azimuth with a fixed elevation angle. Subsequently, we score each video frame with CLIP ViT-B/32 and average the scores over all frames and text prompts to derive the final CLIP score. + +User study. We conduct qualitative comparisons between our method and the baseline, MAV3D, by surveying 26 human evaluators. We use the same head-to-head comparison model as the user survey conducted by MAV3D. Specifically, we present text prompts alongside the corresponding outputs of our method and the baseline method in random order. Evaluators are requested to specify their overall preference for a video, as well as evaluate four specific properties: appearance quality, 3D structure quality, motion quality, and text alignment. In Table 2, we report the percentage of users who prefer each method overall and based on each of the four properties. We conduct $\chi^2$ -tests to evaluate statistical significance at the $p < 0.05$ level. Further details on the user study are included in the supplementary. + +# 4.2. Results + +We visualize spatio-temporal renderings along with depth maps in comparison to MAV3D in Fig. 4. Although both methods can synthesize 4D scenes, MAV3D noticeably lacks detail. In contrast, our method produces realistic renderings across space and time. We report quantitative metrics in + +Table 2. In terms of CLIP Score and overall preference in the user study 4D-fy outperforms MAV3D. Users indicated a statistically significant preference towards 4D-fy compared to MAV3D in terms of appearance quality, 3D structure quality, text alignment, and overall preference. They rated the motion quality roughly on par with MAV3D, which used a proprietary text-to-video model. For example, overall, $67\%$ of users prefer our method over $33\%$ for MAV3D. + +# 4.3. Ablations + +We provide an in-depth analysis motivating our hybrid SDS training scheme by ablating each component and evaluating the use of a 4D neural representation more similar to that of MAV3D. We provide ablations in Table 2 and in Fig. 5. + +Image guidance (w/o $\nabla_{\theta}\mathcal{L}_{3D / \mathrm{IMG}}$ ). Technically, learning a dynamic 3D scene solely from a text-to-video model without text-to-image guidance is possible. To demonstrate the drawbacks of this approach, we present results where we skip the first two stages and directly train the model with text-to-video guidance only. This corresponds to setting $P_{3\mathrm{D}} = 0$ and $P_{\mathrm{IMG}} = 0$ . Our experiments reveal that the text-to-video model fails to provide realistic 3D structure and high-quality appearance for generating a dynamic 3D scene. + +3D-aware guidance (w/o $\nabla_{\theta}\mathcal{L}_{\mathbf{3D}}$ ). We find that using a 3D-aware diffusion model is crucial for generating realistic 3D structures. If we remove the 3D-aware diffusion model, i.e., by setting $P_{\mathrm{3D}} = 0$ , we can generate scenes with similar motion and high-quality appearance, but the 3D structure is degraded. This is evident for both scenes in Fig. 5. + +VSD guidance (w/o $\nabla_{\theta}\mathcal{L}_{\mathrm{IMG}}$ ). We find that VSD helps provide a realistic scene appearance; if we disable it during scene generation, i.e., $P_{\mathrm{IMG}} = 0$ , there are some negative effects. For example in Fig. 5, the ice cream cone in the bucket (top row) is more detailed, and the dog's face (bottom row) is sharper (please zoom in). + +Hybrid SDS. To illustrate the impact of our hybrid SDS approach we disable image guidance after the second stage by setting $P_{\mathrm{3D}} = 0$ and $P_{\mathrm{IMG}} = 0$ for the third stage only. This aligns with the MAV3D training scheme, where a static model is pre-trained with text-to-image and subsequently fine-tuned with text-to-video. Our quantitative and qualitative analysis shows that this approach results in degraded appearance and 3D structure. We find that incorporating text-to-image, 3D-aware text-to-image, and text-to-video via hybrid SDS in the final optimization stage preserves a realistic appearance and high-quality 3D structure. + +Backbone architecture. Finally, we ablate the hash-grid-based 4D representation by replacing it with the HexPlane [8, 13] architecture. This representation similarly disentangles static and dynamic scene components and can be readily integrated into our pipeline. The HexPlane approach + +![](images/d1b8cf07818ff185d04d318b5afa06dfacda54a5971ea22323fa7f4b80b6cda7.jpg) +Figure 5. Ablation study. We assess the qualitative impact of removing gradient updates from different models during optimization. Our method without image guidance $(\nabla_{\theta}\mathcal{L}_{3\mathrm{D} / \mathrm{IMG}})$ does not produce realistic appearance and 3D structure. Removing the 3D-aware guidance $(\nabla_{\theta}\mathcal{L}_{3\mathrm{D}})$ generates high-quality appearance but low-quality 3D structure. Our approach without VSD $(\nabla_{\theta}\mathcal{L}_{\mathrm{IMG}})$ reduces the appearance quality. Hybrid SDS is crucial for appearance and 3D structure, while using HexPlane reduces the appearance quality. Best viewed digitally. + +fails to match the appearance quality of the hash-grid-based representation. MAV3D uses HexPlane but implements a multi-scale variant with a large 5-layer decoding MLP featuring 128 hidden units. We could not re-implement this approach as the model does not fit on an 80 GB A100 GPU. To allow for a fair comparison, we instead increased the capacity of HexPlane to match the memory consumption of our hash-grid-based representation. We expect that increasing the capacity of HexPlane and longer training times could lead to similar results as our representation. + +# 5. Conclusion + +Our method synthesizes high-quality 4D scenes from text prompts using a novel hybrid score distillation sampling procedure. Our work alleviates a three-way tradeoff between appearance, 3D structure, and motion and is the first to build on open-source models. We will release the code to facilitate future research in text-to-4D generation. + +Limitations. Although our method produces compelling dynamic 3D scenes, there are several limitations and avenues for future work. First, the complexity of motion in our scenes is limited to simple movements. We believe that our method will directly benefit from future progress in text-to-video generation, as current text-to-video models suffer from low-quality renderings and unrealistic motion. Another way + +to improve motion could be exploiting recently proposed dynamic representations, e.g., dynamic 3D Gaussians [37]. Moreover, current metrics in text-to-3D generation are not sufficient, as they mainly rely on image-based metrics and user studies. Designing more sophisticated 3D and 4D metrics is an important direction for future work. Lastly, generating each scene takes a significant amount of time. Concurrent text-to-3D works [23, 28] alleviate this problem by training a large-scale model on 3D data, allowing generation within seconds. Incorporating our hybrid optimization procedure to blend between large-scale pre-training on 2D, 3D, and video data could enable fast text-to-4D generation. + +Ethics Statement. We condemn the application of our method for creating realistic fake content intended to harm specific entities or propagate misinformation. + +# 6. Acknowledgements + +This work was supported by the Natural Sciences and Engineering Research Council of Canada (NSERC) Discovery Grant program, the Digital Research Alliance of Canada, and by the Advanced Research Computing at Simon Fraser University. It was also supported in part by ARL grant W911NF-21-2-0104, a Vannevar Bush Faculty Fellowship, a gift from the Adobe Corporation, a PECASE by the ARO, NSF award 1839974, Stanford HAI, and a Samsung GRO. + +# References + +[1] Stable Diffusion version 2. https://github.com/Stability-AI/stablediffusion. Accessed: 2023-10-31. 2 +[2] Threestudio Github page. https://github.com/threestudio-project/threestudio. Accessed: 2023-10-31.5 +[3] Zeroscope text-to-video model. https://huggingface.co/cerspense/zeroscope_v2_576w. Accessed: 2023-10-31. 2, 3, 6 +[4] Giovanni Adorni and Mauro Di Manzo. Natural language input for scene generation. In Proc. EACL, 1983. 3 +[5] Max Bain, Arsha Nagrani, Gül Varol, and Andrew Zisserman. Frozen in time: A joint video and image encoder for end-to-end retrieval. In Proc. ICCV, 2021. 3 +[6] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, Bryan Catanzaro, et al. eDiff-I: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 1, 2 +[7] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proc. CVPR, 2023. 2, 3 +[8] Ang Cao and Justin Johnson. HexPlane: A fast representation for dynamic scenes. In Proc. CVPR, 2023. 7 +[9] Eric R Chan, Koki Nagano, Matthew A Chan, Alexander W Bergman, Jeong Joon Park, Axel Levy, Miika Aittala, Shalini De Mello, Tero Karras, and Gordon Wetzstein. Generative novel view synthesis with 3D-aware diffusion models. In Proc. ICCV, 2023. 3 +[10] Angel Chang, Manolis Savva, and Christopher D Manning. Learning spatial knowledge for text to 3D scene generation. In Proc. EMNLP, 2014. 3 +[11] Kevin Chen, Christopher B Choy, Manolis Savva, Angel X Chang, Thomas Funkhouser, and Silvio Savarese. Text2shape: Generating shapes from natural language by learning joint embeddings. In Proc. ACCV, 2018. 3 +[12] Bob Coyne and Richard Sproat. Wordseye: An automatic text-to-scene conversion system. In Proc. SIGGRAPH, 2001. 3 +[13] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbæk Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proc. CVPR, 2023. 7 +[14] William Gao, Noam Aigerman, Thibault Groueix, Vova Kim, and Rana Hanocka. Textdeformer: Geometry manipulation using text guidance. In Proc. SIGGRAPH, 2023. 3 +[15] Jiatao Gu, Alex Trevithick, Kai-En Lin, Joshua M Susskind, Christian Theobalt, Lingjie Liu, and Ravi Ramamoorthi. Nerfdiff: Single-image view synthesis with Nerf-guided distillation from 3d-aware diffusion. In Proc. ICML, 2023. 3 +[16] Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, and Baining Guo. Vector quantized diffusion model for text-to-image synthesis. In Proc. CVPR, 2022. 3 + +[17] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Imagine your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 3 +[18] Yingqing He, Tianyu Yang, Yong Zhang, Ying Shan, and Qifeng Chen. Latent video diffusion models for high-fidelity video generation with arbitrary lengths. arXiv preprint arXiv:2211.13221, 2022. 3 +[19] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. In Proc. NeurIPS Workshop on Deep Generative Models, 2021. 4 +[20] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Proc. NeurIPS, 2020. 1, 2, 4 +[21] Jonathan Ho, William Chan, Chitwan Sahara, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 2, 3 +[22] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. The Journal of Machine Learning Research, 23(1):2249-2281, 2022. 3 +[23] Yicong Hong, Kai Zhang, Jiuming Gu, Sai Bi, Yang Zhou, Difan Liu, Feng Liu, Kalyan Sunkavalli, Trung Bui, and Hao Tan. Lrm: Large reconstruction model for single image to 3D. arXiv preprint arXiv:2311.04400, 2023. 8 +[24] Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. In Proc. ICLR, 2021. 5 +[25] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proc. CVPR, 2022. 1, 3 +[26] Nikolay Jetchev. Clipmatrix: Text-controlled creation of 3D textured meshes. arXiv preprint arXiv:2109.12922, 2021. 3 +[27] Yanqin Jiang, Li Zhang, Jin Gao, Weimin Hu, and Yao Yao. Consistent4D: Consistent $360^{\circ}$ dynamic object generation from monocular video. arXiv preprint arXiv:2311.02848, 2023. 3 +[28] Jiahao Li, Hao Tan, Kai Zhang, Zexiang Xu, Fujun Luan, Yinghao Xu, Yicong Hong, Kalyan Sunkavalli, Greg Shakhnarovich, and Sai Bi. Instant3D: Fast text-to-3D with sparse-view generation and large reconstruction model. arXiv preprint arXiv:2311.06214, 2023. 8 +[29] Weiyu Li, Rui Chen, Xuelin Chen, and Ping Tan. Sweetdreamer: Aligning geometric priors in 2D diffusion for consistent text-to-3D. arXiv preprint arXiv:2310.02596, 2023. 2 +[30] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3D: High-resolution text-to-3D content creation. In Proc. CVPR, 2023. 3, 6 +[31] Yukang Lin, Haonan Han, Chaoqun Gong, Zunnan Xu, Yachao Zhang, and Xiu Li. Consistent123: One image to highly consistent 3D asset using case-aware diffusion priors. arXiv preprint arXiv:2309.17261, 2023. 3 + +[32] Huan Ling, Seung Wook Kim, Antonio Torralba, Sanja Fidler, and Karsten Kreis. Align your gaussians: Text-to-4D with dynamic 4D gaussians and composed diffusion models. arXiv preprint arXiv:2312.13763, 2023. 3 +[33] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. In Proc. ICCV, 2023. 2, 3 +[34] Ying Liu, Dengsheng Zhang, Guojun Lu, and Wei-Ying Ma. A survey of content-based image retrieval with high-level semantics. Pattern Recognition, 40(1):262-282, 2007. 2 +[35] Yuan Liu, Cheng Lin, Zijiao Zeng, Xiaoxiao Long, Lingjie Liu, Taku Komura, and Wenping Wang. Syncdreamer: Generating multiview-consistent images from a single-view image. arXiv preprint arXiv:2309.03453, 2023. 3 +[36] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. ACM Trans. Graph., 2019. 3 +[37] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3D Gaussians: Tracking by persistent dynamic view synthesis. arXiv preprint arXiv:2308.09713, 2023. 8 +[38] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3 +[39] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 2022. 3 +[40] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models. In Proc. ICML, 2022. 3 +[41] Zijie Pan, Zeyu Yang, Xiatian Zhu, and Li Zhang. Fast dynamic 3D object generation from a single-view video. arXiv preprint arXiv:2401.08742, 2024. 3 +[42] Dong Huk Park, Samaneh Azadi, Xihui Liu, Trevor Darrell, and Anna Rohrbach. Benchmark for compositional text-to-image synthesis. In Proc. NeurIPS, 2021. 6 +[43] Ryan Po, Wang Yifan, Vladislav Golyanik, Kfir Aberman, Jonathan T Barron, Amit H Bermano, Eric Ryan Chan, Tali Dekel, Aleksander Holynski, Angjoo Kanazawa, et al. State of the art on diffusion models for visual computing. arXiv preprint arXiv:2310.07204, 2023. 2 +[44] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. DreamFusion: Text-to-3D using 2D diffusion. In Proc. ICLR, 2023. 1, 3, 6 +[45] Guocheng Qian, Jinjie Mai, Abdullah Hamdi, Jian Ren, Aliaksandr Siarohin, Bing Li, Hsin-Ying Lee, Ivan Skorokhodov, Peter Wonka, Sergey Tulyakov, et al. Magic123: One image to high-quality 3D object generation using both 2D and 3D diffusion priors. arXiv preprint arXiv:2306.17843, 2023. 3 +[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen + +Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Proc. ICML, 2021. 2, 3, 7 +[47] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In Proc. ICML, 2021. 2 +[48] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. arXiv preprint arXiv:2204.06125, 2022. 3, 4 +[49] Scott Reed, Zeynep Akata, Xinchen Yan, Lajanugen Logeswaran, Bernt Schiele, and Honglak Lee. Generative adversarial text to image synthesis. In Proc. ICML, 2016. 2 +[50] Jiawei Ren, Liang Pan, Jiaxiang Tang, Chi Zhang, Ang Cao, Gang Zeng, and Ziwei Liu. DreamGaussian4D: Generative 4D Gaussian splatting. arXiv preprint arXiv:2312.17142, 2023. 3 +[51] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Bjorn Ommer. High-resolution image synthesis with latent diffusion models. In Proc. CVPR, 2022, 1, 2, 3, 4, 6 +[52] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Proc. NeurIPS, 2022. 1, 2, 4 +[53] Aditya Sanghi, Hang Chu, Joseph G Lambourne, Ye Wang, Chin-Yi Cheng, Marco Fumero, and Kamal Rahimi Malekshan. Clip-forge: Towards zero-shot text-to-shape generation. In Proc. CVPR, 2022. 3 +[54] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. Proc. NeurIPS, 2022. 1, 2 +[55] Yichun Shi, Peng Wang, Jianglong Ye, Mai Long, Kejie Li, and Xiao Yang. MVDream: Multi-view diffusion for 3d generation. arXiv preprint arXiv:2308.16512, 2023. 2, 3, 4, 5, 6 +[56] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022. 1, 2, 3 +[57] Uriel Singer, Shelly Sheynin, Adam Polyak, Oron Ashual, Iurii Makarov, Filippos Kokkinos, Naman Goyal, Andrea Vedaldi, Devi Parikh, Justin Johnson, et al. Text-to-4d dynamic scene generation. In Proc. ICML, 2023. 1, 3, 5, 6, 7 +[58] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In Proc. ICML, 2015. 1 +[59] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. Proc. ICLR, 2021. 2 +[60] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In Proc. ICLR, 2021. 1 + +[61] Matteo Stefanini, Marcella Cornia, Lorenzo Baraldi, Silvia Cascianelli, Giuseppe Fiameni, and Rita Cucchiara. From show to tell: A survey on deep learning-based image captioning. IEEE Trans. Pattern Anal. Mach. Intell., 2022. 2 +[62] Junshu Tang, Tengfei Wang, Bo Zhang, Ting Zhang, Ran Yi, Lizhuang Ma, and Dong Chen. Make-it-3D: High-fidelity 3D creation from a single image with diffusion prior. arXiv preprint arXiv:2303.14184, 2023. 3 +[63] Ayush Tewari, Tianwei Yin, George Cazenavette, Semon Rezchikov, Joshua B Tenenbaum, Frédo Durand, William T Freeman, and Vincent Sitzmann. Diffusion with forward models: Solving stochastic inverse problems without direct supervision. arXiv preprint arXiv:2306.11719, 2023. 3 +[64] Haithem Turki, Jason Y Zhang, Francesco Ferroni, and Deva Ramanan. Suds: Scalable urban dynamic scenes. In Proc CVPR, 2023. 3 +[65] Ruben Villegas, Mohammad Babaeizadeh, Pieter-Jan Kindermans, Hernan Moraldo, Han Zhang, Mohammad Taghi Saffar, Santiago Castro, Julius Kunze, and Dumitru Erhan. Phenaki: Variable length video generation from open domain textual description. arXiv preprint arXiv:2210.02399, 2022. 3 +[66] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-NeRF: Text-and-image driven manipulation of neural radiance fields. In Proc. CVPR, 2022. 3 +[67] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A Yeh, and Greg Shakhnarovich. Score Jacobian Chaining: Lifting pretrained 2D diffusion models for 3D generation. In Proc. CVPR, 2023. 1 +[68] Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023. 3 +[69] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 2, 3, 4, 5, 6 +[70] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. arXiv preprint arXiv:2306.02018, 2023. 3 +[71] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3D generation with variational score distillation. Proc. NeurIPS, 2023. 1, 2, 3, 4, 5, 6 +[72] Ruiqi Wu, Liangyu Chen, Tong Yang, Chunle Guo, Chongyi Li, and Xiangyu Zhang. Lamp: Learn a motion pattern for few-shot-based video generation. arXiv preprint arXiv:2310.10769, 2023. 2, 3 +[73] Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. AttnGAN: Fine-grained text to image generation with attentional generative adversarial networks. In Proc. CVPR, 2018. 2 +[74] Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In Proc. CVPR, 2022. 3 + +[75] Yuyang Yin, Dejia Xu, Zhangyang Wang, Yao Zhao, and Yunchao Wei. 4DGen: Grounded 4D content generation with spatial-temporal consistency. arXiv preprint arXiv:2312.17225, 2023. 3 +[76] Paul Yoo, Jiaxian Guo, Yutaka Matsuo, and Shixiang Shane Gu. Dreamsparse: Escaping from Plato's cave with 2d diffusion model given sparse views. arXiv preprint arXiv:2306.03414, 2023. 3 +[77] Quanzeng You, Hailin Jin, Zhaowen Wang, Chen Fang, and Jiebo Luo. Image captioning with semantic attention. In Proc. CVPR, 2016. 2 +[78] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, Ben Hutchinson, Wei Han, Zarana Parekh, Xin Li, Han Zhang, Jason Baldridge, and Yonghui Wu. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022. 2 +[79] Lili Yu, Bowen Shi, Ramakanth Pasunuru, Benjamin Muller, Olga Golovneva, Tianlu Wang, Arun Babu, Binh Tang, Brian Karrer, Shelly Sheynin, et al. Scaling autoregressive multimodal models: Pretraining and instruction tuning. arXiv preprint arXiv:2309.02591, 2023. 2 +[80] Han Zhang, Tao Xu, Hongsheng Li, Shaoting Zhang, Xiaogang Wang, Xiaolei Huang, and Dimitris N Metaxas. StackGAN: Text to photo-realistic image synthesis with stacked generative adversarial networks. In Proc. ICCV, 2017. 2 +[81] Yuyang Zhao, Zhiwen Yan, Enze Xie, Lanqing Hong, Zhenguo Li, and Gim Hee Lee. Animate124: Animating one image to 4D dynamic scene. arXiv preprint arXiv:2311.14603, 2023. 3 +[82] Yufeng Zheng, Xueting Li, Koki Nagano, Sifei Liu, Otmar Hilliges, and Shalini De Mello. A unified approach for text-and image-guided 4D scene generation. arXiv preprint arXiv:2311.16854, 2023. 3 +[83] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 3 \ No newline at end of file diff --git a/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/images.zip b/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..ee7f6338d20650894790710f2a145737c1d0d654 --- /dev/null +++ b/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b8ab4f9b6e2825f789338707c5675748442ed1e8a4e8123cc3d9a0e2c55de5f +size 597895 diff --git a/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/layout.json b/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..acf81da7a1b65ab44b8e66e87851a85493bde8b8 --- /dev/null +++ b/2024/4D-fy_ Text-to-4D Generation Using Hybrid Score Distillation Sampling/layout.json @@ -0,0 +1,9311 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 75, + 103, + 518, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 103, + 518, + 121 + ], + "spans": [ + { + "bbox": [ + 75, + 103, + 518, + 121 + ], + "type": "text", + "content": "4D-fly: Text-to-4D Generation Using Hybrid Score Distillation Sampling" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "spans": [ + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "text", + "content": "Sherwin Bahmani" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "text", + "content": " Ivan Skorokhodov" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "inline_equation", + "content": "^{3,4}" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "text", + "content": " Victor Rong" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "text", + "content": " Gordon Wetzstein" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "text", + "content": " Leonidas Guibas" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "text", + "content": " Peter Wonka" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "text", + "content": " Sergey Tulyakov" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "text", + "content": " Jeong Joon Park" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "text", + "content": " Andrea Tagliafasacchi" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "inline_equation", + "content": "^{1,7,8}" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "text", + "content": " David B. Lindell" + }, + { + "bbox": [ + 55, + 142, + 537, + 171 + ], + "type": "inline_equation", + "content": "^{1,2}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 64, + 172, + 527, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 172, + 527, + 185 + ], + "spans": [ + { + "bbox": [ + 64, + 172, + 527, + 185 + ], + "type": "text", + "content": "1University of Toronto 2Vector Institute 3KAUST 4Snap Inc. 5Stanford University 6University of Michigan 7SFU 8Google" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 50, + 204, + 302, + 387 + ], + "blocks": [ + { + "bbox": [ + 50, + 204, + 302, + 387 + ], + "lines": [ + { + "bbox": [ + 50, + 204, + 302, + 387 + ], + "spans": [ + { + "bbox": [ + 50, + 204, + 302, + 387 + ], + "type": "image", + "image_path": "be373046b6b89e238603e355261a367a82d8185543c47f7d9a676c2027f87d0b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 396, + 546, + 430 + ], + "lines": [ + { + "bbox": [ + 46, + 396, + 546, + 430 + ], + "spans": [ + { + "bbox": [ + 46, + 396, + 546, + 430 + ], + "type": "text", + "content": "Figure 1. Text-to-4D Synthesis. We present 4D-fy, a technique that synthesizes 4D (i.e., dynamic 3D) scenes from a text prompt. We show scenes generated from two text prompts for different viewpoints (vertical dimension) at different time steps (horizontal dimension). Video results can be viewed on our website: https://sherwinbahmani.github.io/4dfy." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 306, + 204, + 547, + 376 + ], + "blocks": [ + { + "bbox": [ + 306, + 204, + 547, + 376 + ], + "lines": [ + { + "bbox": [ + 306, + 204, + 547, + 376 + ], + "spans": [ + { + "bbox": [ + 306, + 204, + 547, + 376 + ], + "type": "image", + "image_path": "5f03646c6c9ed74bf919e9380dcef17e32af727fed8ff565857e397d0696a8f3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 440, + 192, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 440, + 192, + 452 + ], + "spans": [ + { + "bbox": [ + 143, + 440, + 192, + 452 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 460, + 289, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 460, + 289, + 699 + ], + "spans": [ + { + "bbox": [ + 46, + 460, + 289, + 699 + ], + "type": "text", + "content": "Recent breakthroughs in text-to-4D generation rely on pre-trained text-to-image and text-to-video models to generate dynamic 3D scenes. However, current text-to-4D methods face a three-way tradeoff between the quality of scene appearance, 3D structure, and motion. For example, text-to-image models and their 3D-aware variants are trained on internet-scale image datasets and can be used to produce scenes with realistic appearance and 3D structure—but no motion. Text-to-video models are trained on relatively smaller video datasets and can produce scenes with motion, but poorer appearance and 3D structure. While these models have complementary strengths, they also have opposing weaknesses, making it difficult to combine them in a way that alleviates this three-way tradeoff. Here, we introduce hybrid score distillation sampling, an alternating optimization procedure that blends supervision signals from multiple pre-trained diffusion models and incorporates benefits of each for high-fidelity text-to-4D generation. Using hybrid SDS, we demonstrate synthesis of 4D scenes with compelling appearance, 3D structure, and motion." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 440, + 386, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 440, + 386, + 452 + ], + "spans": [ + { + "bbox": [ + 307, + 440, + 386, + 452 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 460, + 547, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 547, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 547, + 581 + ], + "type": "text", + "content": "The advent of internet-scale image-text datasets [54] and advances in diffusion models [20, 58, 60] have led to new capabilities in stable, high-fidelity image generation from text prompts [6, 51, 52]. Recent methods have also shown that large-scale text-to-image or text-to-video [56] diffusion models learn useful priors for 3D [25, 44] and 4D scene generation [57]. Our work focuses on text-to-4D scene generation (Fig. 1), which promises exciting new capabilities for applications in augmented and virtual reality, computer animation, and industrial design." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 581, + 547, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 581, + 547, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 581, + 547, + 700 + ], + "type": "text", + "content": "Current techniques for generating 3D or 4D scenes from text prompts typically iteratively optimize a representation of the scene using supervisory signals from a diffusion model [44, 67, 71]. Specifically, these methods render an image of a 3D scene, add noise to the rendered image, use a pre-trained diffusion model to denoise the rendered image, and estimate gradients used to update the 3D representation [44, 67]. This procedure, known as score distillation sampling (SDS) [44], underpins most recent methods for text-conditioned scene generation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 318, + 701, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 701, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 318, + 701, + 545, + 714 + ], + "type": "text", + "content": "Using SDS for text-to-4D generation requires navigating" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "7996" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 123, + 286, + 193 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 289, + 115 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 289, + 115 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 289, + 115 + ], + "type": "text", + "content": "Table 1. Text-to-4D models face a tradeoff between the quality of appearance, 3D structure, and motion depending on the type of generative model used for score distillation sampling (SDS): text-to-image (T2I), 3D-aware T2I, or, text-to-video (T2V)." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 123, + 286, + 193 + ], + "lines": [ + { + "bbox": [ + 47, + 123, + 286, + 193 + ], + "spans": [ + { + "bbox": [ + 47, + 123, + 286, + 193 + ], + "type": "table", + "html": "
SDS modelappearance3D structuremotion
T2I [6, 51, 52, 79]highlowN/A
3D-aware T2I [29, 55]mediumhighN/A
T2V [7, 21, 56, 69, 72]lowlowhigh
Our methodmediumhighmedium
", + "image_path": "38f4dbfb27ef9c614c3f1eb265f908f2269e9efb30ee59b7e8e283a661a149ec.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 214, + 289, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 214, + 289, + 476 + ], + "spans": [ + { + "bbox": [ + 46, + 214, + 289, + 476 + ], + "type": "text", + "content": "a three-way tradeoff between the quality of appearance, 3D structure, and motion (see Table 1); existing techniques obtain satisfactory results in just one or two of these categories. For example, while SDS produces images that appear realistic when rendering a generated scene from any particular viewpoint, inspecting multiple viewpoints can reveal that the scene has several faces or heads, replicated appendages, or other incorrectly repeated 3D structures—an issue now referred to as the \"Janus problem\" [55].1 One way to improve 3D structure is to use SDS with a 3D-aware diffusion model that is trained to generate images from different camera viewpoints [33]. But 3D-aware models sacrifice appearance quality as they require fine-tuning on synthetic datasets of posed images [55]. Incorporating motion into a scene using SDS with a text-to-video model [69] typically degrades the appearance relative to static scenes generated with text-to-image models, which are more realistic (see Fig. 2). While different types of diffusion models thus have complementary qualities, they also have opposing weaknesses (Table 1). Therefore, it is not trivial to combine them in a way that yields text-to-4D generation with high-quality appearance, 3D structure, and motion." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 478, + 288, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 478, + 288, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 478, + 288, + 693 + ], + "type": "text", + "content": "Here, we propose a method for text-to-4D scene generation that alleviates this three-way tradeoff using hybrid SDS, an alternating optimization scheme that blends gradient updates from multiple pre-trained diffusion models and synthesizes 4D scenes using the best qualities of each. The method consists of three stages of optimization: (1) we use a 3D-aware text-to-image model [55] to generate an initial static 3D scene (without the Janus problem); (2) we continue the optimization by blending in alternating supervision with variational SDS [71] and a text-to-image model to improve appearance; (3) we blend in alternating supervision using video SDS with a text-to-video model [69] to add motion to the scene. By smoothly incorporating supervisory signals from these three diffusion models throughout the training process, we achieve text-driven 4D scene generation with state-of-the-art quality in terms of appearance, 3D structure, and motion. Overall we provide the following contributions. We introduce hybrid SDS, a technique that extracts desir" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 70, + 547, + 245 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 547, + 245 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 547, + 245 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 547, + 245 + ], + "type": "image", + "image_path": "0408112f472c85c72761b8513b059cddd1634e8e624b099869a98277e5d3c693.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 255, + 547, + 310 + ], + "lines": [ + { + "bbox": [ + 305, + 255, + 547, + 310 + ], + "spans": [ + { + "bbox": [ + 305, + 255, + 547, + 310 + ], + "type": "text", + "content": "Figure 2. Comparing text-to-image and text-to-video models. Rendered frames from Stable Diffusion version 2.1 (top; text-to-image) [1] and Zeroscope version 2 (bottom; text-to-video) [3] show significant disparity in appearance, with the text-to-image model appearing far more realistic." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 326, + 545, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 326, + 545, + 361 + ], + "spans": [ + { + "bbox": [ + 313, + 326, + 545, + 361 + ], + "type": "text", + "content": "able qualities from multiple pre-trained diffusion models and alleviates a tradeoff between appearance, 3D structure, and motion in text-to-4D scene generation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 361, + 547, + 456 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 306, + 361, + 545, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 361, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 306, + 361, + 545, + 396 + ], + "type": "text", + "content": "- We provide a quantitative and qualitative evaluation of the method, and we explore the three-way tradeoff space with ablation studies to facilitate future research." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 397, + 547, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 397, + 547, + 433 + ], + "spans": [ + { + "bbox": [ + 306, + 397, + 547, + 433 + ], + "type": "text", + "content": "- We demonstrate text-to-4D generation based on open-source pretrained models and will make all codes and evaluation procedures publicly available." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 434, + 545, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 434, + 545, + 456 + ], + "spans": [ + { + "bbox": [ + 306, + 434, + 545, + 456 + ], + "type": "text", + "content": "- We present state-of-the-art results for the task of text-to-4D generation." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 470, + 392, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 470, + 392, + 483 + ], + "spans": [ + { + "bbox": [ + 306, + 470, + 392, + 483 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 491, + 547, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 491, + 547, + 552 + ], + "spans": [ + { + "bbox": [ + 304, + 491, + 547, + 552 + ], + "type": "text", + "content": "Our method is related to techniques from multiple areas of generative modeling, including text-to-image, text-to-video, and text-to-3D models. For more extensive discussions of related works, we refer readers to a recent state-of-the-art report on diffusion models [43]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 557, + 547, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 547, + 652 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 547, + 652 + ], + "type": "text", + "content": "Text-to-image generation. Methods for generating images from text prompts are a relatively new innovation, first demonstrated using generative adversarial networks [49, 73, 80]. The problem itself is also related to other methods for text-based image retrieval [34] or image-conditioned text generation [61, 77]. More recently, models trained on text-image datasets with billions of samples [54] have become the state of the art for this task [51]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "content": "Diffusion models [20, 59] are a popular architecture for generative modeling on large-scale datasets, and autoregressive models have also shown promising results [47, 78]. Typically, these methods exploit a pretrained text encoder, such as CLIP [46], to encode the text prompt into a feature" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 58, + 702, + 276, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 276, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 276, + 713 + ], + "type": "text", + "content": "1Referring to the two-faced Roman god of beginnings and endings." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7997" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": "vector used to condition the diffusion model [40, 48]. In diffusion models, high-resolution (i.e., megapixel) image generation is achieved by applying repeated upsampling layers [22, 48] or performing diffusion in the lower-resolution latent space of an autoencoder and then decoding the result to recover an image at the nominal resolution [16, 51]. Our work incorporates two open-source text-to-image diffusion models: Stable Diffusion [51] and MVDream [55] (a recent 3D-aware diffusion model) to enable 4D scene generation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 188, + 289, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 188, + 289, + 475 + ], + "spans": [ + { + "bbox": [ + 46, + 188, + 289, + 475 + ], + "type": "text", + "content": "Text-to-video generation. Our work relies on the burgeoning field of video generation via diffusion models, an area that is somewhat constrained by the limited scale of video datasets. To counteract this, methods often utilize a hybrid training approach on both image and video datasets, such as WebVid-10M [5], HD-VG-130M [69], or HD-VILA-100M [74]. Recent approaches in this field typically employ variations of pixel-space upsampling (both in space and time) [21] or latent space upsampling to improve spatial and temporal resolution [17, 18, 70, 83]. Autoregressive models distinguish themselves by their ability to generate videos of varying lengths [65]. Further improvements in video synthesis have been achieved by finetuning pre-trained text-to-image diffusion models on video data [7, 56, 72], or separating the content and motion generation process by using an initial image frame as a starting point [17, 72]. Despite recent advances in text-to-video synthesis, the fidelity of generated videos still lags behind that of static image generation (see Fig. 2) and so they perform poorly when used directly with SDS for text-to-4D generation. Instead, our work leverages an open-source latent space text-to-video diffusion model called Zeroscope [3] (extended from the Modelscope architecture [68]) together with other pre-trained, open-source diffusion models using hybrid SDS." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 483, + 289, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 483, + 289, + 698 + ], + "spans": [ + { + "bbox": [ + 46, + 483, + 289, + 698 + ], + "type": "text", + "content": "Text-to-3D generation. Early methods for text-to-3D generation relied on parsers to convert input text to a semantic representation and synthesized scenes from an object database [4, 10, 12]. Later, automated, data-driven methods used multi-modal datasets [11], and pre-trained models, such as CLIP [46], to edit or stylize an input 3D mesh [14, 26] or a radiance field [66]. More recently, CLIP-based supervision enabled synthesis of entire 3D scenes [25, 53], and these techniques evolved into the most recent approaches, which optimize a mesh or radiance field based on SDS supervision [30, 44, 71]. The quality of their 3D structures has been improved by applying diffusion models that consider multiple viewpoints [31, 33, 55]. Alternatively, recent advancements have seen a shift towards using diffusion or transformer models to transform an input 2D image into a 3D representation for novel-view synthesis [9, 15, 35, 45, 62, 63, 76]. Still, these techniques do not yet support generating 4D scenes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 701, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 289, + 713 + ], + "type": "text", + "content": "Our work is most closely related to Make-A-" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "type": "text", + "content": "Video3D (MAV3D) [57], a recent method for text-to-4D generation that integrates SDS-based supervision in two separate stages: first with a text-to-image model and subsequently with a text-to-video model. Similar to MAV3D, we aim to generate dynamic 3D scenes; however, our approach uses hybrid SDS, which allows gradient updates from multiple models to be smoothly blended together in an alternating optimization. Our approach generates high-quality dynamic 3D scenes and does not suffer from Janus problems." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 185, + 547, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 185, + 547, + 221 + ], + "spans": [ + { + "bbox": [ + 304, + 185, + 547, + 221 + ], + "type": "text", + "content": "Concurrent works. Concurrent works on text-to-4D [32, 82], image-to-4D [50, 81, 82], and video-to-4D [27, 41, 75] similarly use recent diffusion models for 4D generation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 230, + 361, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 230, + 361, + 243 + ], + "spans": [ + { + "bbox": [ + 306, + 230, + 361, + 243 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 251, + 547, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 251, + 547, + 323 + ], + "spans": [ + { + "bbox": [ + 304, + 251, + 547, + 323 + ], + "type": "text", + "content": "Our approach for text-to-4D generation builds upon a hash-encoding-based neural representation [39] that implicitly decomposes the scene into static and dynamic feature grids [64]. In this section we overview our representation for 4D neural rendering and describe the optimization procedure based on hybrid SDS (see Fig. 3)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 329, + 430, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 329, + 430, + 342 + ], + "spans": [ + { + "bbox": [ + 306, + 329, + 430, + 342 + ], + "type": "text", + "content": "3.1. 4D Neural Rendering" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 347, + 547, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 347, + 547, + 491 + ], + "spans": [ + { + "bbox": [ + 304, + 347, + 547, + 491 + ], + "type": "text", + "content": "Volumetric neural rendering methods represent a scene using a neural representation to parameterize the attenuation and emission of light at every point in 3D space [36, 38]. We can use such a representation to render an image by casting a ray from the camera center of projection, through each pixel location, and into the scene. For sampled points along the ray " + }, + { + "bbox": [ + 304, + 347, + 547, + 491 + ], + "type": "inline_equation", + "content": "\\pmb{\\mu} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 347, + 547, + 491 + ], + "type": "text", + "content": ", we query a neural representation to retrieve a volumetric density " + }, + { + "bbox": [ + 304, + 347, + 547, + 491 + ], + "type": "inline_equation", + "content": "\\tau \\in \\mathbb{R}_+" + }, + { + "bbox": [ + 304, + 347, + 547, + 491 + ], + "type": "text", + "content": " and color " + }, + { + "bbox": [ + 304, + 347, + 547, + 491 + ], + "type": "inline_equation", + "content": "\\mathbf{c} \\in \\mathbb{R}_+^3" + }, + { + "bbox": [ + 304, + 347, + 547, + 491 + ], + "type": "text", + "content": ", which describe attenuation and emission of light, respectively, at a particular point. Then, the resulting density and color samples are alpha-composed to recover the color of a rendered pixel " + }, + { + "bbox": [ + 304, + 347, + 547, + 491 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 304, + 347, + 547, + 491 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 350, + 497, + 547, + 523 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 497, + 547, + 523 + ], + "spans": [ + { + "bbox": [ + 350, + 497, + 547, + 523 + ], + "type": "interline_equation", + "content": "\\mathbf {C} = \\sum_ {i} w _ {i} \\mathbf {c} _ {i}, w _ {i} = \\alpha \\prod_ {j < i} (1 - \\alpha_ {j}), \\tag {1}", + "image_path": "7b95fd81fb75bd13893baa8818b8773a8aa6069f24726bfc7018afed0199912e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 530, + 547, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 530, + 547, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 530, + 547, + 567 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 530, + 547, + 567 + ], + "type": "inline_equation", + "content": "\\alpha_{i} = 1 - e^{-\\tau_{i}\\| \\pmb{\\mu}_{i} - \\pmb{\\mu}_{i + 1}\\|" + }, + { + "bbox": [ + 304, + 530, + 547, + 567 + ], + "type": "text", + "content": ". We query the neural representation using an additional input time variable " + }, + { + "bbox": [ + 304, + 530, + 547, + 567 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 530, + 547, + 567 + ], + "type": "text", + "content": ", which enables modeling time-varying density and color." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 567, + 545, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 567, + 545, + 602 + ], + "spans": [ + { + "bbox": [ + 304, + 567, + 545, + 602 + ], + "type": "text", + "content": "We illustrate the neural representation in Fig. 3; it consists of two multi-resolution hash tables to disentangle static and dynamic scene modeling." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 603, + 547, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 603, + 547, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 603, + 547, + 662 + ], + "type": "text", + "content": "Following Müller et al. [39], the static hash table stores learnable feature vectors that are indexed by a voxel-lookup and hashing operation and decoded into density and color using two small multilayer perceptrons (MLPs). Concretely, we consider the neural representation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 390, + 670, + 545, + 683 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 390, + 670, + 545, + 683 + ], + "spans": [ + { + "bbox": [ + 390, + 670, + 545, + 683 + ], + "type": "interline_equation", + "content": "\\mathcal {N} _ {\\theta}: \\boldsymbol {\\mu}, t \\rightarrow \\tau , \\mathbf {c} \\tag {2}", + "image_path": "e05fff74f89db24682d0411c632cceb77950666c6314da1d4629b8dda10e35c8.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 689, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 547, + 713 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 304, + 689, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\theta = \\{\\theta_{\\mathrm{static}},\\theta_{\\mathrm{dynamic}},\\theta_{\\mathrm{MLP}}\\}" + }, + { + "bbox": [ + 304, + 689, + 547, + 713 + ], + "type": "text", + "content": " denoting all learnable parameters from the static and dynamic hash tables and the MLPs." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7998" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 70, + 549, + 185 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 549, + 185 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 549, + 185 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 549, + 185 + ], + "type": "image", + "image_path": "a24e7e6dc53088c14d5889b41abcd7ed78b98954b9f9997f6848a9c43dbad622.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 202, + 548, + 281 + ], + "lines": [ + { + "bbox": [ + 46, + 202, + 548, + 281 + ], + "spans": [ + { + "bbox": [ + 46, + 202, + 548, + 281 + ], + "type": "text", + "content": "Figure 3. Overview. A 4D radiance field is parameterized using a neural representation with a static and dynamic multiscale hash table of features. Images and videos are rendered from the representation using volume rendering, and we supervise the representation using hybrid score distillation sampling—a technique that combines gradients from multiple types of pre-trained diffusion models. In the first stage of training we use gradients " + }, + { + "bbox": [ + 46, + 202, + 548, + 281 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 46, + 202, + 548, + 281 + ], + "type": "text", + "content": " from a 3D-aware text-to-image model (3D-T2I) to iteratively optimize a representation without the Janus problem. Next, we blend in gradient supervision using variational SDS with a text-to-image model (T2I) to improve the appearance (i.e., we alternate supervision between " + }, + { + "bbox": [ + 46, + 202, + 548, + 281 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}}" + }, + { + "bbox": [ + 46, + 202, + 548, + 281 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 202, + 548, + 281 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 46, + 202, + 548, + 281 + ], + "type": "text", + "content": "). In the last stage we incorporate gradients " + }, + { + "bbox": [ + 46, + 202, + 548, + 281 + ], + "type": "inline_equation", + "content": "(\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}})" + }, + { + "bbox": [ + 46, + 202, + 548, + 281 + ], + "type": "text", + "content": " from a text-to-video model (T2V) to add motion to the scene, and we update the scene using the other models in an alternating fashion." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "spans": [ + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "text", + "content": "For a given " + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "text", + "content": ", we query the static hash table by identifying the closest voxel at each scale " + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "inline_equation", + "content": "1 \\leq s \\leq S" + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "text", + "content": ". Then, we trilinearly interpolate the feature values from the voxel vertices after retrieving them from the hash table. Retrieved features from each scale are concatenated as " + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{\\mathrm{static}} = \\mathbf{f}_{\\mathrm{static}}^{(1)} \\oplus \\dots \\oplus \\mathbf{f}_{\\mathrm{static}}^{(S)}" + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "text", + "content": ". We follow the same procedure to query the dynamic hash table given " + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "inline_equation", + "content": "(\\mu, t)" + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "text", + "content": ", except we use quadrilinear interpolation to interpolate feature values. The resulting features from the static and dynamic hash tables are added as " + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "inline_equation", + "content": "\\mathbf{f} = \\mathbf{f}_{\\mathrm{static}} + \\mathbf{f}_{\\mathrm{dynamic}}" + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "text", + "content": ". We do not model view-dependent effects in the feature encoding. Finally, we decode density and color as " + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_{\\tau}(\\mathbf{f})" + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_{\\mathrm{c}}(\\mathbf{f})" + }, + { + "bbox": [ + 46, + 297, + 290, + 441 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 449, + 235, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 449, + 235, + 462 + ], + "spans": [ + { + "bbox": [ + 47, + 449, + 235, + 462 + ], + "type": "text", + "content": "3.2. Hybrid Score Distillation Sampling" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 468, + 288, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 468, + 288, + 528 + ], + "spans": [ + { + "bbox": [ + 46, + 468, + 288, + 528 + ], + "type": "text", + "content": "We leverage the 4D representation along with SDS to create dynamic 3D scenes from a text prompt. Our hybrid approach incorporates three different flavors of SDS that are smoothly merged during an alternating optimization procedure to improve the structure and quality of the 4D model:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 529, + 288, + 623 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 47, + 529, + 287, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 529, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 47, + 529, + 287, + 552 + ], + "type": "text", + "content": "1. SDS applied to a 3D-aware text-to-image diffusion model to optimize a static scene without the Janus problem." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 552, + 288, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 552, + 288, + 587 + ], + "spans": [ + { + "bbox": [ + 47, + 552, + 288, + 587 + ], + "type": "text", + "content": "2. Variational score distillation sampling (VSD; a modified version of SDS [71]) using a standard text-to-image model [51] to improve the appearance of the static scene." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 588, + 288, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 588, + 288, + 623 + ], + "spans": [ + { + "bbox": [ + 47, + 588, + 288, + 623 + ], + "type": "text", + "content": "3. Video SDS using a text-to-video model [69], which extends SDS to multiple video frames and adds motion to the scene." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 624, + 287, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 624, + 287, + 649 + ], + "spans": [ + { + "bbox": [ + 46, + 624, + 287, + 649 + ], + "type": "text", + "content": "In the following, we describe each type of SDS and how it is used for text-to-4D generation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 654, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 289, + 714 + ], + "type": "text", + "content": "3D-aware scene optimization. We first consider optimizing a static scene using SDS with a 3D-aware text-to-image diffusion model [55]. The diffusion model is pre-trained using a stochastic forward process that slowly adds Gaussian noise to multiview images " + }, + { + "bbox": [ + 46, + 654, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 654, + 289, + 714 + ], + "type": "text", + "content": " over timesteps " + }, + { + "bbox": [ + 46, + 654, + 289, + 714 + ], + "type": "inline_equation", + "content": "0\\leq t_{d}\\leq T_{d}" + }, + { + "bbox": [ + 46, + 654, + 289, + 714 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "spans": [ + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": "With increasing " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "t_d" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": ", the process yields noisy images " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_{t_d}" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": " that, at " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "t_d = T_d" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": ", are close to zero-mean Gaussian. After training, the model reverses this process to add structure to the noisy images. It predicts " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}_{\\phi}(\\mathbf{z}_{t_d}; t_d, \\mathbf{y}, \\mathbf{T})" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": ", which approximates the output of an optimal denoiser at each timestep " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "t_d" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": ", conditioned on a text embedding " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": " [48, 51, 52] and the camera extrinsics " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "\\mathbf{T}" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": " corresponding to each image. In practice, text-to-image diffusion models typically predict the noise content " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\phi}" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": " rather than the denoised image " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}_{\\phi}" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": ". But note that the denoised image can still be obtained as " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}_{\\phi}(\\mathbf{z}_{t_d}; t_d, \\mathbf{y}, \\mathbf{T}) \\propto \\mathbf{z}_{t_d} - \\epsilon_{\\phi}(\\mathbf{z}_{t_d}; t_d, \\mathbf{y}, \\mathbf{T})" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": ", i.e., by subtracting the predicted noise from the noisy image [20]. We implement 3D-aware SDS by rendering multiple images " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_{\\theta}" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": " from the neural representation, adding noise " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": ", and using the 3D-aware diffusion model [55] to predict the noise " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\phi}" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": " using classifier-free guidance [19]. To update the parameters " + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 297, + 547, + 511 + ], + "type": "text", + "content": " of the neural representation, we use the 3D-aware SDS gradient:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 311, + 517, + 545, + 552 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 517, + 545, + 552 + ], + "spans": [ + { + "bbox": [ + 311, + 517, + 545, + 552 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta} \\mathcal {L} _ {\\mathrm {3 D}} = \\mathbb {E} _ {t _ {d}, \\boldsymbol {\\epsilon}, \\mathbf {T}} \\left[ w (t _ {d}) \\left(\\boldsymbol {\\epsilon} _ {\\phi} \\left(\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}, \\mathbf {T}\\right) - \\boldsymbol {\\epsilon}\\right) \\frac {\\partial \\mathbf {x} _ {\\theta}}{\\partial \\theta} \\right], \\tag {3}", + "image_path": "3721977508914ff0ae8040c56dd3df8452eebb56ca49b4c03f3486164640e412.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 553, + 547, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 553, + 547, + 624 + ], + "spans": [ + { + "bbox": [ + 304, + 553, + 547, + 624 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 553, + 547, + 624 + ], + "type": "inline_equation", + "content": "w(t_{d})" + }, + { + "bbox": [ + 304, + 553, + 547, + 624 + ], + "type": "text", + "content": " is a weighting function that depends on the diffusion timestep, and we add a stop gradient to the output of the diffusion model [55]. Intuitively, the SDS loss queries the diffusion model to see how it adds structure to an image, then this information is used to backpropagate gradients to the scene representation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 629, + 548, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 548, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 548, + 714 + ], + "type": "text", + "content": "Improving appearance using VSD. We incorporate an additional loss term based on VSD [71] to improve the appearance of images rendered from the scene. This term uses a pre-trained text-to-image model [51] along with a finetuning scheme that improves image quality over the 3D-aware text-to-image model alone. We follow Wang et al. [71] and augment the standard SDS gradient with the output of an" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7999" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 288, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 288, + 108 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 288, + 108 + ], + "type": "text", + "content": "additional text-to-image diffusion model that is finetuned using a low-rank adaptation [24], during scene optimization. Specifically, we have" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 114, + 289, + 140 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 114, + 289, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 114, + 289, + 140 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta} \\mathcal {L} _ {\\mathrm {I M G}} = \\mathbb {E} _ {t _ {d}, \\boldsymbol {\\epsilon}, \\mathbf {T}} \\left[ w (t _ {d}) \\left(\\boldsymbol {\\epsilon} _ {\\phi} (\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}) - \\boldsymbol {\\epsilon} _ {\\phi} ^ {\\prime} (\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}, \\mathbf {T})\\right) \\frac {\\partial \\mathbf {x} _ {\\theta}}{\\partial \\theta} \\right],", + "image_path": "89738fa714f2b34bae6cf5f94e3c5354c9cf92f53dda390eb1f47eb4297aacd1.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 142, + 287, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 142, + 287, + 201 + ], + "spans": [ + { + "bbox": [ + 47, + 142, + 287, + 201 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 142, + 287, + 201 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\phi}^{\\prime}" + }, + { + "bbox": [ + 47, + 142, + 287, + 201 + ], + "type": "text", + "content": " is the noise predicted using a finetuned version of the diffusion model that incorporates additional conditioning from the camera extrinsics " + }, + { + "bbox": [ + 47, + 142, + 287, + 201 + ], + "type": "inline_equation", + "content": "\\mathbf{T}" + }, + { + "bbox": [ + 47, + 142, + 287, + 201 + ], + "type": "text", + "content": "; here, we let " + }, + { + "bbox": [ + 47, + 142, + 287, + 201 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_{t_d}" + }, + { + "bbox": [ + 47, + 142, + 287, + 201 + ], + "type": "text", + "content": " represent a noisy version of a single image rendered from " + }, + { + "bbox": [ + 47, + 142, + 287, + 201 + ], + "type": "inline_equation", + "content": "\\mathcal{N}_{\\theta}" + }, + { + "bbox": [ + 47, + 142, + 287, + 201 + ], + "type": "text", + "content": ". The model is finetuned using the standard diffusion objective" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 84, + 209, + 287, + 226 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 209, + 287, + 226 + ], + "spans": [ + { + "bbox": [ + 84, + 209, + 287, + 226 + ], + "type": "interline_equation", + "content": "\\min _ {\\theta} \\mathbb {E} _ {t _ {d}, \\boldsymbol {\\epsilon}, \\mathbf {T}} \\left[ \\| \\boldsymbol {\\epsilon} _ {\\phi} ^ {\\prime} \\left(\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}, \\mathbf {T}\\right) - \\boldsymbol {\\epsilon} \\| _ {2} ^ {2} \\right]. \\tag {5}", + "image_path": "27ccf23ff7e518bc28c2d57ad5ea8db967246b0ded3c9c8db41286f58a1fb9bb.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 233, + 288, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 233, + 288, + 293 + ], + "spans": [ + { + "bbox": [ + 47, + 233, + 288, + 293 + ], + "type": "text", + "content": "Note that, different from the original description of VSD [71], we find we can omit the simultaneous optimization over multiple scene samples (i.e. the variational component of [71]), which reduces memory requirements without significantly degrading appearance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 298, + 288, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 298, + 288, + 370 + ], + "spans": [ + { + "bbox": [ + 47, + 298, + 288, + 370 + ], + "type": "text", + "content": "Adding motion with Video SDS. Last, we use supervision from a text-to-video diffusion model [69] to add motion to the generated scene. This procedure extends the original SDS gradient by incorporating structure added by the diffusion model to all noisy video frames [57]. The video SDS gradient is given as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 376, + 287, + 403 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 376, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 55, + 376, + 287, + 403 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta} \\mathcal {L} _ {\\mathrm {V I D}} = \\mathbb {E} _ {t _ {d}, \\boldsymbol {\\epsilon}} \\left[ w (t _ {d}) \\left(\\epsilon_ {\\phi} \\left(\\mathbf {z} _ {t _ {d}}; t _ {d}, \\mathbf {y}\\right) - \\boldsymbol {\\epsilon}\\right) \\frac {\\partial \\mathbf {X} _ {\\theta}}{\\partial \\theta} \\right]. \\tag {6}", + "image_path": "207cc878c622ad567e74ed695b254a87e61a62c3db272e072762ca85bc89f02a.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "spans": [ + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "type": "text", + "content": "To simplify notation, we re-use " + }, + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\phi}" + }, + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "type": "text", + "content": " to here represent the predicted and actual noise for each video frame, and we let " + }, + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{\\theta}" + }, + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "type": "text", + "content": " be a collection of " + }, + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "type": "text", + "content": " video frames " + }, + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{\\theta} = [\\mathbf{x}_{\\theta}^{(1)},\\dots,\\mathbf{x}_{\\theta}^{(V)}]^{T}" + }, + { + "bbox": [ + 47, + 409, + 287, + 459 + ], + "type": "text", + "content": " rendered from the representation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 462, + 287, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 462, + 287, + 509 + ], + "spans": [ + { + "bbox": [ + 47, + 462, + 287, + 509 + ], + "type": "text", + "content": "Optimization procedure - Algorithm 1. We optimize the 4D representation in three stages that smoothly blend supervision in alternating steps from (1) 3D-aware SDS, (2) VSD, and (3) video SDS." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 510, + 288, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 510, + 288, + 617 + ], + "spans": [ + { + "bbox": [ + 47, + 510, + 288, + 617 + ], + "type": "text", + "content": "Stage 1. In the first stage of optimization, we update " + }, + { + "bbox": [ + 47, + 510, + 288, + 617 + ], + "type": "inline_equation", + "content": "\\mathcal{N}_{\\theta}" + }, + { + "bbox": [ + 47, + 510, + 288, + 617 + ], + "type": "text", + "content": " using gradients from 3D-aware SDS until convergence. Since this stage focuses on optimizing a static scene, we freeze (i.e. do not update) the parameters of the dynamic hash table " + }, + { + "bbox": [ + 47, + 510, + 288, + 617 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{\\mathrm{dynamic}}" + }, + { + "bbox": [ + 47, + 510, + 288, + 617 + ], + "type": "text", + "content": " and only update the static hash table and decoder MLP. We set the total number of first-stage iterations " + }, + { + "bbox": [ + 47, + 510, + 288, + 617 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{stage - 1}}" + }, + { + "bbox": [ + 47, + 510, + 288, + 617 + ], + "type": "text", + "content": " to match that of Shi et al. [55], which allows the optimization to proceed until there are no distinguishable changes in the rendered scene from one iteration to the next." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "text", + "content": "Stage 2. Next, we add VSD gradients using an alternating optimization procedure. At each iteration, we randomly select to update the model using " + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}}" + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "text", + "content": " with probability " + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "P_{3\\mathrm{D}}" + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{IMG}}" + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "text", + "content": ". We continue this alternating optimization for " + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{stage - 2}}" + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "text", + "content": " iterations, until convergence. As we show in the next section, this stage of optimization results in improved appearance compared to using " + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 47, + 617, + 288, + 714 + ], + "type": "text", + "content": " alone while also being free of the Janus problem." + } + ] + } + ], + "index": 10 + }, + { + "type": "code", + "bbox": [ + 307, + 85, + 547, + 454 + ], + "blocks": [ + { + "bbox": [ + 306, + 72, + 505, + 84 + ], + "lines": [ + { + "bbox": [ + 306, + 72, + 505, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 505, + 84 + ], + "type": "text", + "content": "Algorithm 1 Hybrid Score Distillation Sampling" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "lines": [ + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "spans": [ + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "text", + "content": "Require: \n" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "inline_equation", + "content": "\\begin{array}{rlr}{\\mathcal{N}_{\\theta}}&{\\triangleright 4D\\mathrm{~neural~re�atement}}&{}\\\\ {N_{\\mathrm{stage - 1}},N_{\\mathrm{stage - 2}},N_{\\mathrm{stage - 3}}} &{\\triangleright\\mathrm{iterations~for~each~stage}}&{}\\\\ {P_{\\mathrm{3D}},P_{\\mathrm{IMG}}} &{\\triangleright\\mathrm{update~probabilities}}&{}\\\\ {\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}} &{\\triangleright\\mathrm{SDS~grads.~(Eqs.~3,~4,~6)}}&{}\\\\ {\\mathrm{for~iter~in~}N_{\\mathrm{stage - 1}}\\mathrm{~do}} &{\\triangleright 3D\\mathrm{~update}}&{}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}},\\mathrm{with~probability~}P_{\\mathrm{3D}}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}},\\mathrm{with~probability~}P_{\\mathrm{3D}}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}},\\mathrm{with~probability~}P_{\\mathrm{3D}}\\right.\\cdot P_{\\mathrm{IMG}}} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\mathrm{otherwise}\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nobreak P_{\\mathrm{3D}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\right.} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{{}_{1}} \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}},\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}, \\right.} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1}, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{1},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{2},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{2},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{L}_{2},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{I},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{I},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{I},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} \\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = \\left\\{\\nabla_{\\theta}\\mathcal{T},, \\right.} & {} & {}\\\\ {\\mathrm{grad} = 0,} & {} & {}\\\\ {\\mathrm{end procedure}} & {} & {}\\\\ {\\end{array}" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "text", + "content": " \n1: // Stage 1 \n2: freeze dynamic hash map ( " + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "inline_equation", + "content": "|\\theta _{\\text {dynamic }}" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "text", + "content": " ) \n3: for iter in " + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "inline_equation", + "content": "N_{stage-1}" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "text", + "content": " do ▷ 3D or IMG update \n4: grad " + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "text", + "content": " {vT L3D \n5: UPDATE(grad) \n6: // Stage 2 \n7: for iter in " + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "inline_equation", + "content": "N_{stage-2}" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "text", + "content": " do ▷ 3D or VD update \n8: grad " + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "text", + "content": " {vT L3D with probability P3D \n9: UPDATE(grad) \n10: // Stage 3 \n11: decrease learning rate of static hash map ( " + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "inline_equation", + "content": "|\\theta _{\\text {static }}" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "text", + "content": " ) \n12: for iter in " + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "inline_equation", + "content": "N_{stage-3}" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "text", + "content": " do ▷ 3D, IMG, or VD update \n13: grad " + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "text", + "content": " {vT L3D with probability P3D \n14: if VID, unfreeze " + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "inline_equation", + "content": "|\\theta _{\\text {dynamic }}" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "text", + "content": " \n15: UPDATE(grad) \n16: procedure UPDATE(Grad) \n17: x " + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "inline_equation", + "content": "< N_0" + }, + { + "bbox": [ + 307, + 85, + 547, + 454 + ], + "type": "text", + "content": " ▷ render images (Eq. 1) \n18: take gradient step on grad ▷ optimize N0 \n19: if IMG, take finetuning step (Eq. 5) \n20: end procedure" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "algorithm" + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "text", + "content": "Stage 3. Last, we update the representation using a combination of all gradient updates. Specifically, we randomly select to update the model at each iteration using " + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{3D}}" + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}}" + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "text", + "content": ", or " + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}" + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "text", + "content": " with probability " + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{3D}}" + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{3D}}\\cdot P_{\\mathrm{IMG}}" + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "inline_equation", + "content": "1 - P_{\\mathrm{3D}}\\cdot P_{\\mathrm{IMG}}" + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "text", + "content": ", respectively. Since we now aim to incorporate motion into the representation, we unfreeze the parameters of the dynamic hash table during the update with " + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{VID}}" + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "text", + "content": " but keep them frozen for updates using the text-to-image models. We also decrease the learning rate of the static hash table to preserve the high-quality appearance from the previous stage. We repeat the alternating optimization in the final stage until convergence, which we find occurs consistently within " + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{stage-3}}" + }, + { + "bbox": [ + 304, + 472, + 547, + 665 + ], + "type": "text", + "content": " iterations. Overall, hybrid SDS effectively combines the strengths of each pre-trained diffusion model while avoiding quality degradations that result from naively combining gradients from each model." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 671, + 403, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 671, + 403, + 684 + ], + "spans": [ + { + "bbox": [ + 306, + 671, + 403, + 684 + ], + "type": "text", + "content": "3.3. Implementation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 689, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 547, + 713 + ], + "type": "text", + "content": "We implement hybrid SDS based on the threestudio framework [2], which includes implementations of MV-" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8000" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 70, + 547, + 411 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 547, + 411 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 547, + 411 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 547, + 411 + ], + "type": "image", + "image_path": "0bc5e717e74819f668e110a46ef21906fa72852d57b5716238a2cb542b344c29.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 418, + 545, + 431 + ], + "lines": [ + { + "bbox": [ + 46, + 418, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 46, + 418, + 545, + 431 + ], + "type": "text", + "content": "Figure 4. Text-to-4D Comparison. We compare against MAV3D [57], and observe our approach obtains significantly higher quality results." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 446, + 288, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 446, + 288, + 495 + ], + "spans": [ + { + "bbox": [ + 46, + 446, + 288, + 495 + ], + "type": "text", + "content": "Dream [55] (for 3D-aware text-to-image diffusion and SDS), ProlificDreamer [71] with Stable Diffusion [51] (text-to-image diffusion and VSD), and we implement the video SDS updates using Zeroscope [3, 69]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "spans": [ + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "type": "text", + "content": "Hyperparameter values. We initialize the 4D neural representation following [30, 44] and add an offset to the density predicted by the network in the center of the scene to promote object-centric reconstructions. We set the learning rates for the static hash map to 0.01, for the dynamic hash map to 0.01, and for the MLP to 0.001. We drop the learning rate for the static hash map to 0.0001 before the last stage to focus the gradient updates on the dynamic hash map. The values of " + }, + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{stage - 1}}" + }, + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{stage - 2}}" + }, + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{stage - 3}}" + }, + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "type": "text", + "content": " are set to 10000, 10000, and 100000, respectively. We set the probabilities for hybrid SDS to " + }, + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{3D}} = 0.5" + }, + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{IMG}} = 0.5" + }, + { + "bbox": [ + 46, + 502, + 287, + 646 + ], + "type": "text", + "content": " for a reasonable tradeoff with respect to appearance, 3D structure, and motion." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": "Rendering. Each of the diffusion models has a different native resolution, so we render images from " + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{N}_{\\theta}" + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": " accordingly. We render four images from different camera positions for the 3D-aware SDS at the native " + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "(256\\times 256" + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": " pixel) resolution of the 3D-aware text-to-image model. The VSD update" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 446, + 547, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 446, + 547, + 507 + ], + "spans": [ + { + "bbox": [ + 304, + 446, + 547, + 507 + ], + "type": "text", + "content": "is computed by rendering a " + }, + { + "bbox": [ + 304, + 446, + 547, + 507 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 304, + 446, + 547, + 507 + ], + "type": "text", + "content": " image and bilinearly upsampling the image to the native resolution of Stable Diffusion " + }, + { + "bbox": [ + 304, + 446, + 547, + 507 + ], + "type": "inline_equation", + "content": "(512 \\times 512)" + }, + { + "bbox": [ + 304, + 446, + 547, + 507 + ], + "type": "text", + "content": ". Finally, the video SDS update is computed by rendering 16 video frames at " + }, + { + "bbox": [ + 304, + 446, + 547, + 507 + ], + "type": "inline_equation", + "content": "160 \\times 288" + }, + { + "bbox": [ + 304, + 446, + 547, + 507 + ], + "type": "text", + "content": " resolution and upsampling to the native " + }, + { + "bbox": [ + 304, + 446, + 547, + 507 + ], + "type": "inline_equation", + "content": "320 \\times 576" + }, + { + "bbox": [ + 304, + 446, + 547, + 507 + ], + "type": "text", + "content": " resolution of Zeroscope." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 521, + 387, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 521, + 387, + 536 + ], + "spans": [ + { + "bbox": [ + 306, + 521, + 387, + 536 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 543, + 365, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 543, + 365, + 555 + ], + "spans": [ + { + "bbox": [ + 306, + 543, + 365, + 555 + ], + "type": "text", + "content": "4.1. Metrics" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 563, + 547, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 563, + 547, + 683 + ], + "spans": [ + { + "bbox": [ + 304, + 563, + 547, + 683 + ], + "type": "text", + "content": "We assess our method using CLIP Score [42] and a user study. We compare our model against MAV3D for 28 prompts and against our ablations for a subset of 5 prompts. Current text-to-4D models are costly to train, and many researchers in academia do not have access to the scale of resources available to large tech companies. Hence, we only used a subset due to computational limitations. To promote future research in this field, we open source the evaluation protocol for the user study along the code: https://github.com/sherwinbahmani/4dfy." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "text", + "content": "CLIP Score. CLIP Score [42] evaluates the correlation between a text prompt and an image. Specifically, this cor" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "8001" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 49, + 189, + 287, + 318 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 288, + 170 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 288, + 170 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 288, + 170 + ], + "type": "text", + "content": "Table 2. Quantitative results. We compare our method against MAV3D and variations of 4D-fy with different loss terms or backbone architectures (i.e., with HexPlane [8]). The methods are evaluated in terms of CLIP Score (CLIP) and human preference based on appearance quality (AQ), 3D structure quality (SQ), motion quality (MQ), text alignment (TA), and overall preference (Overall). The numbers reported for human preference are the percentages of users who voted for our method over the corresponding method in head-to-head comparisons." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 189, + 287, + 318 + ], + "lines": [ + { + "bbox": [ + 49, + 189, + 287, + 318 + ], + "spans": [ + { + "bbox": [ + 49, + 189, + 287, + 318 + ], + "type": "table", + "html": "
MethodCLIPHuman Preference
AQSQMQTAOverall
MAV3D [57]33.992%89%41%52%67%
4D-fy34.2
Ablation Study
4D-fy35.0
w/o ΦθL3D/IMG29.3100%100%78%86%94%
w/o ΦθL3D35.188%89%95%92%91%
w/o ΦθLIMG34.570%68%68%69%70%
w/o hybrid SDS33.8100%100%78%88%95%
w/ HexPlane34.595%92%90%92%95%
", + "image_path": "10903998fb787604436c7f5b976326d94139a58c29e56ec6b97627a0427fb085.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 334, + 287, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 334, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 46, + 334, + 287, + 453 + ], + "type": "text", + "content": "responds to the cosine similarity between textual CLIP [46] embedding and visual CLIP [46] embedding. The score is bound between 0 and 100, where 100 is best. We calculate the CLIP score for MAV3D using the same procedure we use for our method. Specifically, for each input text prompt, we render a video using the same camera trajectory as MAV3D, i.e., moving around the scene in azimuth with a fixed elevation angle. Subsequently, we score each video frame with CLIP ViT-B/32 and average the scores over all frames and text prompts to derive the final CLIP score." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 459, + 287, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 459, + 287, + 627 + ], + "spans": [ + { + "bbox": [ + 46, + 459, + 287, + 627 + ], + "type": "text", + "content": "User study. We conduct qualitative comparisons between our method and the baseline, MAV3D, by surveying 26 human evaluators. We use the same head-to-head comparison model as the user survey conducted by MAV3D. Specifically, we present text prompts alongside the corresponding outputs of our method and the baseline method in random order. Evaluators are requested to specify their overall preference for a video, as well as evaluate four specific properties: appearance quality, 3D structure quality, motion quality, and text alignment. In Table 2, we report the percentage of users who prefer each method overall and based on each of the four properties. We conduct " + }, + { + "bbox": [ + 46, + 459, + 287, + 627 + ], + "type": "inline_equation", + "content": "\\chi^2" + }, + { + "bbox": [ + 46, + 459, + 287, + 627 + ], + "type": "text", + "content": "-tests to evaluate statistical significance at the " + }, + { + "bbox": [ + 46, + 459, + 287, + 627 + ], + "type": "inline_equation", + "content": "p < 0.05" + }, + { + "bbox": [ + 46, + 459, + 287, + 627 + ], + "type": "text", + "content": " level. Further details on the user study are included in the supplementary." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 635, + 105, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 635, + 105, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 635, + 105, + 647 + ], + "type": "text", + "content": "4.2. Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 653, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 286, + 713 + ], + "type": "text", + "content": "We visualize spatio-temporal renderings along with depth maps in comparison to MAV3D in Fig. 4. Although both methods can synthesize 4D scenes, MAV3D noticeably lacks detail. In contrast, our method produces realistic renderings across space and time. We report quantitative metrics in" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "content": "Table 2. In terms of CLIP Score and overall preference in the user study 4D-fy outperforms MAV3D. Users indicated a statistically significant preference towards 4D-fy compared to MAV3D in terms of appearance quality, 3D structure quality, text alignment, and overall preference. They rated the motion quality roughly on par with MAV3D, which used a proprietary text-to-video model. For example, overall, " + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "67\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "content": " of users prefer our method over " + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "33\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "content": " for MAV3D." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 177, + 373, + 189 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 177, + 373, + 189 + ], + "spans": [ + { + "bbox": [ + 306, + 177, + 373, + 189 + ], + "type": "text", + "content": "4.3. Ablations" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 195, + 545, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 195, + 545, + 243 + ], + "spans": [ + { + "bbox": [ + 304, + 195, + 545, + 243 + ], + "type": "text", + "content": "We provide an in-depth analysis motivating our hybrid SDS training scheme by ablating each component and evaluating the use of a 4D neural representation more similar to that of MAV3D. We provide ablations in Table 2 and in Fig. 5." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 249, + 547, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 249, + 547, + 357 + ], + "spans": [ + { + "bbox": [ + 304, + 249, + 547, + 357 + ], + "type": "text", + "content": "Image guidance (w/o " + }, + { + "bbox": [ + 304, + 249, + 547, + 357 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{3D / \\mathrm{IMG}}" + }, + { + "bbox": [ + 304, + 249, + 547, + 357 + ], + "type": "text", + "content": "). Technically, learning a dynamic 3D scene solely from a text-to-video model without text-to-image guidance is possible. To demonstrate the drawbacks of this approach, we present results where we skip the first two stages and directly train the model with text-to-video guidance only. This corresponds to setting " + }, + { + "bbox": [ + 304, + 249, + 547, + 357 + ], + "type": "inline_equation", + "content": "P_{3\\mathrm{D}} = 0" + }, + { + "bbox": [ + 304, + 249, + 547, + 357 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 249, + 547, + 357 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{IMG}} = 0" + }, + { + "bbox": [ + 304, + 249, + 547, + 357 + ], + "type": "text", + "content": ". Our experiments reveal that the text-to-video model fails to provide realistic 3D structure and high-quality appearance for generating a dynamic 3D scene." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 361, + 545, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 361, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 304, + 361, + 545, + 434 + ], + "type": "text", + "content": "3D-aware guidance (w/o " + }, + { + "bbox": [ + 304, + 361, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{\\mathbf{3D}}" + }, + { + "bbox": [ + 304, + 361, + 545, + 434 + ], + "type": "text", + "content": "). We find that using a 3D-aware diffusion model is crucial for generating realistic 3D structures. If we remove the 3D-aware diffusion model, i.e., by setting " + }, + { + "bbox": [ + 304, + 361, + 545, + 434 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{3D}} = 0" + }, + { + "bbox": [ + 304, + 361, + 545, + 434 + ], + "type": "text", + "content": ", we can generate scenes with similar motion and high-quality appearance, but the 3D structure is degraded. This is evident for both scenes in Fig. 5." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 439, + 545, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 439, + 545, + 511 + ], + "spans": [ + { + "bbox": [ + 304, + 439, + 545, + 511 + ], + "type": "text", + "content": "VSD guidance (w/o " + }, + { + "bbox": [ + 304, + 439, + 545, + 511 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}}" + }, + { + "bbox": [ + 304, + 439, + 545, + 511 + ], + "type": "text", + "content": "). We find that VSD helps provide a realistic scene appearance; if we disable it during scene generation, i.e., " + }, + { + "bbox": [ + 304, + 439, + 545, + 511 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{IMG}} = 0" + }, + { + "bbox": [ + 304, + 439, + 545, + 511 + ], + "type": "text", + "content": ", there are some negative effects. For example in Fig. 5, the ice cream cone in the bucket (top row) is more detailed, and the dog's face (bottom row) is sharper (please zoom in)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 516, + 545, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 516, + 545, + 648 + ], + "spans": [ + { + "bbox": [ + 304, + 516, + 545, + 648 + ], + "type": "text", + "content": "Hybrid SDS. To illustrate the impact of our hybrid SDS approach we disable image guidance after the second stage by setting " + }, + { + "bbox": [ + 304, + 516, + 545, + 648 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{3D}} = 0" + }, + { + "bbox": [ + 304, + 516, + 545, + 648 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 516, + 545, + 648 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{IMG}} = 0" + }, + { + "bbox": [ + 304, + 516, + 545, + 648 + ], + "type": "text", + "content": " for the third stage only. This aligns with the MAV3D training scheme, where a static model is pre-trained with text-to-image and subsequently fine-tuned with text-to-video. Our quantitative and qualitative analysis shows that this approach results in degraded appearance and 3D structure. We find that incorporating text-to-image, 3D-aware text-to-image, and text-to-video via hybrid SDS in the final optimization stage preserves a realistic appearance and high-quality 3D structure." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "type": "text", + "content": "Backbone architecture. Finally, we ablate the hash-grid-based 4D representation by replacing it with the HexPlane [8, 13] architecture. This representation similarly disentangles static and dynamic scene components and can be readily integrated into our pipeline. The HexPlane approach" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8002" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 70, + 547, + 323 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 547, + 323 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 547, + 323 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 547, + 323 + ], + "type": "image", + "image_path": "d1b8cf07818ff185d04d318b5afa06dfacda54a5971ea22323fa7f4b80b6cda7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 333, + 546, + 378 + ], + "lines": [ + { + "bbox": [ + 46, + 333, + 546, + 378 + ], + "spans": [ + { + "bbox": [ + 46, + 333, + 546, + 378 + ], + "type": "text", + "content": "Figure 5. Ablation study. We assess the qualitative impact of removing gradient updates from different models during optimization. Our method without image guidance " + }, + { + "bbox": [ + 46, + 333, + 546, + 378 + ], + "type": "inline_equation", + "content": "(\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D} / \\mathrm{IMG}})" + }, + { + "bbox": [ + 46, + 333, + 546, + 378 + ], + "type": "text", + "content": " does not produce realistic appearance and 3D structure. Removing the 3D-aware guidance " + }, + { + "bbox": [ + 46, + 333, + 546, + 378 + ], + "type": "inline_equation", + "content": "(\\nabla_{\\theta}\\mathcal{L}_{3\\mathrm{D}})" + }, + { + "bbox": [ + 46, + 333, + 546, + 378 + ], + "type": "text", + "content": " generates high-quality appearance but low-quality 3D structure. Our approach without VSD " + }, + { + "bbox": [ + 46, + 333, + 546, + 378 + ], + "type": "inline_equation", + "content": "(\\nabla_{\\theta}\\mathcal{L}_{\\mathrm{IMG}})" + }, + { + "bbox": [ + 46, + 333, + 546, + 378 + ], + "type": "text", + "content": " reduces the appearance quality. Hybrid SDS is crucial for appearance and 3D structure, while using HexPlane reduces the appearance quality. Best viewed digitally." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 398, + 289, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 398, + 289, + 517 + ], + "spans": [ + { + "bbox": [ + 46, + 398, + 289, + 517 + ], + "type": "text", + "content": "fails to match the appearance quality of the hash-grid-based representation. MAV3D uses HexPlane but implements a multi-scale variant with a large 5-layer decoding MLP featuring 128 hidden units. We could not re-implement this approach as the model does not fit on an 80 GB A100 GPU. To allow for a fair comparison, we instead increased the capacity of HexPlane to match the memory consumption of our hash-grid-based representation. We expect that increasing the capacity of HexPlane and longer training times could lead to similar results as our representation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 531, + 119, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 531, + 119, + 544 + ], + "spans": [ + { + "bbox": [ + 47, + 531, + 119, + 544 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 552, + 287, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 552, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 46, + 552, + 287, + 624 + ], + "type": "text", + "content": "Our method synthesizes high-quality 4D scenes from text prompts using a novel hybrid score distillation sampling procedure. Our work alleviates a three-way tradeoff between appearance, 3D structure, and motion and is the first to build on open-source models. We will release the code to facilitate future research in text-to-4D generation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": "Limitations. Although our method produces compelling dynamic 3D scenes, there are several limitations and avenues for future work. First, the complexity of motion in our scenes is limited to simple movements. We believe that our method will directly benefit from future progress in text-to-video generation, as current text-to-video models suffer from low-quality renderings and unrealistic motion. Another way" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 398, + 547, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 398, + 547, + 541 + ], + "spans": [ + { + "bbox": [ + 304, + 398, + 547, + 541 + ], + "type": "text", + "content": "to improve motion could be exploiting recently proposed dynamic representations, e.g., dynamic 3D Gaussians [37]. Moreover, current metrics in text-to-3D generation are not sufficient, as they mainly rely on image-based metrics and user studies. Designing more sophisticated 3D and 4D metrics is an important direction for future work. Lastly, generating each scene takes a significant amount of time. Concurrent text-to-3D works [23, 28] alleviate this problem by training a large-scale model on 3D data, allowing generation within seconds. Incorporating our hybrid optimization procedure to blend between large-scale pre-training on 2D, 3D, and video data could enable fast text-to-4D generation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 547, + 546, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 547, + 546, + 584 + ], + "spans": [ + { + "bbox": [ + 304, + 547, + 546, + 584 + ], + "type": "text", + "content": "Ethics Statement. We condemn the application of our method for creating realistic fake content intended to harm specific entities or propagate misinformation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 594, + 421, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 594, + 421, + 608 + ], + "spans": [ + { + "bbox": [ + 306, + 594, + 421, + 608 + ], + "type": "text", + "content": "6. Acknowledgements" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 613, + 547, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 613, + 547, + 703 + ], + "spans": [ + { + "bbox": [ + 304, + 613, + 547, + 703 + ], + "type": "text", + "content": "This work was supported by the Natural Sciences and Engineering Research Council of Canada (NSERC) Discovery Grant program, the Digital Research Alliance of Canada, and by the Advanced Research Computing at Simon Fraser University. It was also supported in part by ARL grant W911NF-21-2-0104, a Vannevar Bush Faculty Fellowship, a gift from the Adobe Corporation, a PECASE by the ARO, NSF award 1839974, Stanford HAI, and a Samsung GRO." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8003" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 53, + 91, + 288, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 288, + 123 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 288, + 123 + ], + "type": "text", + "content": "[1] Stable Diffusion version 2. https://github.com/Stability-AI/stablediffusion. Accessed: 2023-10-31. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 288, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 288, + 157 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 288, + 157 + ], + "type": "text", + "content": "[2] Threestudio Github page. https://github.com/threestudio-project/threestudio. Accessed: 2023-10-31.5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 159, + 288, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 159, + 288, + 191 + ], + "spans": [ + { + "bbox": [ + 53, + 159, + 288, + 191 + ], + "type": "text", + "content": "[3] Zeroscope text-to-video model. https://huggingface.co/cerspense/zeroscope_v2_576w. Accessed: 2023-10-31. 2, 3, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 194, + 287, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 194, + 287, + 216 + ], + "spans": [ + { + "bbox": [ + 53, + 194, + 287, + 216 + ], + "type": "text", + "content": "[4] Giovanni Adorni and Mauro Di Manzo. Natural language input for scene generation. In Proc. EACL, 1983. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 217, + 288, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 217, + 288, + 249 + ], + "spans": [ + { + "bbox": [ + 53, + 217, + 288, + 249 + ], + "type": "text", + "content": "[5] Max Bain, Arsha Nagrani, Gül Varol, and Andrew Zisserman. Frozen in time: A joint video and image encoder for end-to-end retrieval. In Proc. ICCV, 2021. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 251, + 288, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 251, + 288, + 305 + ], + "spans": [ + { + "bbox": [ + 53, + 251, + 288, + 305 + ], + "type": "text", + "content": "[6] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, Bryan Catanzaro, et al. eDiff-I: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 1, 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 307, + 288, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 307, + 288, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 307, + 288, + 350 + ], + "type": "text", + "content": "[7] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proc. CVPR, 2023. 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 353, + 287, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 353, + 287, + 373 + ], + "spans": [ + { + "bbox": [ + 53, + 353, + 287, + 373 + ], + "type": "text", + "content": "[8] Ang Cao and Justin Johnson. HexPlane: A fast representation for dynamic scenes. In Proc. CVPR, 2023. 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 376, + 287, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 376, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 53, + 376, + 287, + 430 + ], + "type": "text", + "content": "[9] Eric R Chan, Koki Nagano, Matthew A Chan, Alexander W Bergman, Jeong Joon Park, Axel Levy, Miika Aittala, Shalini De Mello, Tero Karras, and Gordon Wetzstein. Generative novel view synthesis with 3D-aware diffusion models. In Proc. ICCV, 2023. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 432, + 288, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 432, + 288, + 463 + ], + "spans": [ + { + "bbox": [ + 48, + 432, + 288, + 463 + ], + "type": "text", + "content": "[10] Angel Chang, Manolis Savva, and Christopher D Manning. Learning spatial knowledge for text to 3D scene generation. In Proc. EMNLP, 2014. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 466, + 288, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 466, + 288, + 509 + ], + "spans": [ + { + "bbox": [ + 48, + 466, + 288, + 509 + ], + "type": "text", + "content": "[11] Kevin Chen, Christopher B Choy, Manolis Savva, Angel X Chang, Thomas Funkhouser, and Silvio Savarese. Text2shape: Generating shapes from natural language by learning joint embeddings. In Proc. ACCV, 2018. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 511, + 288, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 511, + 288, + 542 + ], + "spans": [ + { + "bbox": [ + 48, + 511, + 288, + 542 + ], + "type": "text", + "content": "[12] Bob Coyne and Richard Sproat. Wordseye: An automatic text-to-scene conversion system. In Proc. SIGGRAPH, 2001. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 544, + 288, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 288, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 288, + 588 + ], + "type": "text", + "content": "[13] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbæk Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proc. CVPR, 2023. 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 590, + 288, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 288, + 623 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 288, + 623 + ], + "type": "text", + "content": "[14] William Gao, Noam Aigerman, Thibault Groueix, Vova Kim, and Rana Hanocka. Textdeformer: Geometry manipulation using text guidance. In Proc. SIGGRAPH, 2023. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 624, + 288, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 288, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 288, + 667 + ], + "type": "text", + "content": "[15] Jiatao Gu, Alex Trevithick, Kai-En Lin, Joshua M Susskind, Christian Theobalt, Lingjie Liu, and Ravi Ramamoorthi. Nerfdiff: Single-image view synthesis with Nerf-guided distillation from 3d-aware diffusion. In Proc. ICML, 2023. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "type": "text", + "content": "[16] Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, and Baining Guo. Vector quantized diffusion model for text-to-image synthesis. In Proc. CVPR, 2022. 3" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 117 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 117 + ], + "type": "text", + "content": "[17] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Imagine your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 119, + 547, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 119, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 547, + 162 + ], + "type": "text", + "content": "[18] Yingqing He, Tianyu Yang, Yong Zhang, Ying Shan, and Qifeng Chen. Latent video diffusion models for high-fidelity video generation with arbitrary lengths. arXiv preprint arXiv:2211.13221, 2022. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 164, + 547, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 164, + 547, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 164, + 547, + 196 + ], + "type": "text", + "content": "[19] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. In Proc. NeurIPS Workshop on Deep Generative Models, 2021. 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 199, + 547, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 199, + 547, + 231 + ], + "spans": [ + { + "bbox": [ + 307, + 199, + 547, + 231 + ], + "type": "text", + "content": "[20] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Proc. NeurIPS, 2020. 1, 2, 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 234, + 547, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 234, + 547, + 289 + ], + "spans": [ + { + "bbox": [ + 307, + 234, + 547, + 289 + ], + "type": "text", + "content": "[21] Jonathan Ho, William Chan, Chitwan Sahara, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 2, 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 291, + 547, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 291, + 547, + 334 + ], + "spans": [ + { + "bbox": [ + 307, + 291, + 547, + 334 + ], + "type": "text", + "content": "[22] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. The Journal of Machine Learning Research, 23(1):2249-2281, 2022. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 336, + 547, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 336, + 547, + 380 + ], + "spans": [ + { + "bbox": [ + 307, + 336, + 547, + 380 + ], + "type": "text", + "content": "[23] Yicong Hong, Kai Zhang, Jiuming Gu, Sai Bi, Yang Zhou, Difan Liu, Feng Liu, Kalyan Sunkavalli, Trung Bui, and Hao Tan. Lrm: Large reconstruction model for single image to 3D. arXiv preprint arXiv:2311.04400, 2023. 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 382, + 547, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 382, + 547, + 415 + ], + "spans": [ + { + "bbox": [ + 307, + 382, + 547, + 415 + ], + "type": "text", + "content": "[24] Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. In Proc. ICLR, 2021. 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 418, + 547, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 418, + 547, + 449 + ], + "spans": [ + { + "bbox": [ + 307, + 418, + 547, + 449 + ], + "type": "text", + "content": "[25] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proc. CVPR, 2022. 1, 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 452, + 547, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 452, + 547, + 473 + ], + "spans": [ + { + "bbox": [ + 307, + 452, + 547, + 473 + ], + "type": "text", + "content": "[26] Nikolay Jetchev. Clipmatrix: Text-controlled creation of 3D textured meshes. arXiv preprint arXiv:2109.12922, 2021. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 475, + 547, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 475, + 547, + 518 + ], + "spans": [ + { + "bbox": [ + 307, + 475, + 547, + 518 + ], + "type": "text", + "content": "[27] Yanqin Jiang, Li Zhang, Jin Gao, Weimin Hu, and Yao Yao. Consistent4D: Consistent " + }, + { + "bbox": [ + 307, + 475, + 547, + 518 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 307, + 475, + 547, + 518 + ], + "type": "text", + "content": " dynamic object generation from monocular video. arXiv preprint arXiv:2311.02848, 2023. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 521, + 547, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 521, + 547, + 576 + ], + "spans": [ + { + "bbox": [ + 307, + 521, + 547, + 576 + ], + "type": "text", + "content": "[28] Jiahao Li, Hao Tan, Kai Zhang, Zexiang Xu, Fujun Luan, Yinghao Xu, Yicong Hong, Kalyan Sunkavalli, Greg Shakhnarovich, and Sai Bi. Instant3D: Fast text-to-3D with sparse-view generation and large reconstruction model. arXiv preprint arXiv:2311.06214, 2023. 8" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 578, + 547, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 578, + 547, + 620 + ], + "spans": [ + { + "bbox": [ + 307, + 578, + 547, + 620 + ], + "type": "text", + "content": "[29] Weiyu Li, Rui Chen, Xuelin Chen, and Ping Tan. Sweetdreamer: Aligning geometric priors in 2D diffusion for consistent text-to-3D. arXiv preprint arXiv:2310.02596, 2023. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 624, + 547, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 624, + 547, + 667 + ], + "spans": [ + { + "bbox": [ + 307, + 624, + 547, + 667 + ], + "type": "text", + "content": "[30] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3D: High-resolution text-to-3D content creation. In Proc. CVPR, 2023. 3, 6" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 670, + 547, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 547, + 712 + ], + "type": "text", + "content": "[31] Yukang Lin, Haonan Han, Chaoqun Gong, Zunnan Xu, Yachao Zhang, and Xiu Li. Consistent123: One image to highly consistent 3D asset using case-aware diffusion priors. arXiv preprint arXiv:2309.17261, 2023. 3" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8004" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[32] Huan Ling, Seung Wook Kim, Antonio Torralba, Sanja Fidler, and Karsten Kreis. Align your gaussians: Text-to-4D with dynamic 4D gaussians and composed diffusion models. arXiv preprint arXiv:2312.13763, 2023. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 288, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 288, + 150 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 288, + 150 + ], + "type": "text", + "content": "[33] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. In Proc. ICCV, 2023. 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 288, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 288, + 185 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 288, + 185 + ], + "type": "text", + "content": "[34] Ying Liu, Dengsheng Zhang, Guojun Lu, and Wei-Ying Ma. A survey of content-based image retrieval with high-level semantics. Pattern Recognition, 40(1):262-282, 2007. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 288, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 288, + 229 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 288, + 229 + ], + "type": "text", + "content": "[35] Yuan Liu, Cheng Lin, Zijiao Zeng, Xiaoxiao Long, Lingjie Liu, Taku Komura, and Wenping Wang. Syncdreamer: Generating multiview-consistent images from a single-view image. arXiv preprint arXiv:2309.03453, 2023. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 231, + 288, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 231, + 288, + 274 + ], + "spans": [ + { + "bbox": [ + 48, + 231, + 288, + 274 + ], + "type": "text", + "content": "[36] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. ACM Trans. Graph., 2019. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 276, + 288, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 276, + 288, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 276, + 288, + 319 + ], + "type": "text", + "content": "[37] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3D Gaussians: Tracking by persistent dynamic view synthesis. arXiv preprint arXiv:2308.09713, 2023. 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 320, + 288, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 320, + 288, + 374 + ], + "spans": [ + { + "bbox": [ + 48, + 320, + 288, + 374 + ], + "type": "text", + "content": "[38] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 376, + 288, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 376, + 288, + 409 + ], + "spans": [ + { + "bbox": [ + 48, + 376, + 288, + 409 + ], + "type": "text", + "content": "[39] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 2022. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 411, + 288, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 411, + 288, + 464 + ], + "spans": [ + { + "bbox": [ + 48, + 411, + 288, + 464 + ], + "type": "text", + "content": "[40] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models. In Proc. ICML, 2022. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 466, + 288, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 466, + 288, + 498 + ], + "spans": [ + { + "bbox": [ + 48, + 466, + 288, + 498 + ], + "type": "text", + "content": "[41] Zijie Pan, Zeyu Yang, Xiatian Zhu, and Li Zhang. Fast dynamic 3D object generation from a single-view video. arXiv preprint arXiv:2401.08742, 2024. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 500, + 288, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 500, + 288, + 533 + ], + "spans": [ + { + "bbox": [ + 48, + 500, + 288, + 533 + ], + "type": "text", + "content": "[42] Dong Huk Park, Samaneh Azadi, Xihui Liu, Trevor Darrell, and Anna Rohrbach. Benchmark for compositional text-to-image synthesis. In Proc. NeurIPS, 2021. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 534, + 288, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 534, + 288, + 589 + ], + "spans": [ + { + "bbox": [ + 48, + 534, + 288, + 589 + ], + "type": "text", + "content": "[43] Ryan Po, Wang Yifan, Vladislav Golyanik, Kfir Aberman, Jonathan T Barron, Amit H Bermano, Eric Ryan Chan, Tali Dekel, Aleksander Holynski, Angjoo Kanazawa, et al. State of the art on diffusion models for visual computing. arXiv preprint arXiv:2310.07204, 2023. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 590, + 288, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 288, + 622 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 288, + 622 + ], + "type": "text", + "content": "[44] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. DreamFusion: Text-to-3D using 2D diffusion. In Proc. ICLR, 2023. 1, 3, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 624, + 288, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 288, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 288, + 679 + ], + "type": "text", + "content": "[45] Guocheng Qian, Jinjie Mai, Abdullah Hamdi, Jian Ren, Aliaksandr Siarohin, Bing Li, Hsin-Ying Lee, Ivan Skorokhodov, Peter Wonka, Sergey Tulyakov, et al. Magic123: One image to high-quality 3D object generation using both 2D and 3D diffusion priors. arXiv preprint arXiv:2306.17843, 2023. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 680, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 288, + 712 + ], + "type": "text", + "content": "[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 327, + 73, + 546, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 546, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 546, + 106 + ], + "type": "text", + "content": "Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Proc. ICML, 2021. 2, 3, 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 106, + 547, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 106, + 547, + 139 + ], + "spans": [ + { + "bbox": [ + 308, + 106, + 547, + 139 + ], + "type": "text", + "content": "[47] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In Proc. ICML, 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 140, + 547, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 140, + 547, + 182 + ], + "spans": [ + { + "bbox": [ + 308, + 140, + 547, + 182 + ], + "type": "text", + "content": "[48] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. arXiv preprint arXiv:2204.06125, 2022. 3, 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 183, + 547, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 183, + 547, + 217 + ], + "spans": [ + { + "bbox": [ + 308, + 183, + 547, + 217 + ], + "type": "text", + "content": "[49] Scott Reed, Zeynep Akata, Xinchen Yan, Lajanugen Logeswaran, Bernt Schiele, and Honglak Lee. Generative adversarial text to image synthesis. In Proc. ICML, 2016. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 217, + 546, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 217, + 546, + 258 + ], + "spans": [ + { + "bbox": [ + 308, + 217, + 546, + 258 + ], + "type": "text", + "content": "[50] Jiawei Ren, Liang Pan, Jiaxiang Tang, Chi Zhang, Ang Cao, Gang Zeng, and Ziwei Liu. DreamGaussian4D: Generative 4D Gaussian splatting. arXiv preprint arXiv:2312.17142, 2023. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 261, + 547, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 261, + 547, + 304 + ], + "spans": [ + { + "bbox": [ + 308, + 261, + 547, + 304 + ], + "type": "text", + "content": "[51] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Bjorn Ommer. High-resolution image synthesis with latent diffusion models. In Proc. CVPR, 2022, 1, 2, 3, 4, 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 304, + 546, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 304, + 546, + 360 + ], + "spans": [ + { + "bbox": [ + 308, + 304, + 546, + 360 + ], + "type": "text", + "content": "[52] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Proc. NeurIPS, 2022. 1, 2, 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 361, + 547, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 361, + 547, + 403 + ], + "spans": [ + { + "bbox": [ + 308, + 361, + 547, + 403 + ], + "type": "text", + "content": "[53] Aditya Sanghi, Hang Chu, Joseph G Lambourne, Ye Wang, Chin-Yi Cheng, Marco Fumero, and Kamal Rahimi Malekshan. Clip-forge: Towards zero-shot text-to-shape generation. In Proc. CVPR, 2022. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 404, + 547, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 404, + 547, + 458 + ], + "spans": [ + { + "bbox": [ + 308, + 404, + 547, + 458 + ], + "type": "text", + "content": "[54] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. Proc. NeurIPS, 2022. 1, 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 460, + 546, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 460, + 546, + 502 + ], + "spans": [ + { + "bbox": [ + 308, + 460, + 546, + 502 + ], + "type": "text", + "content": "[55] Yichun Shi, Peng Wang, Jianglong Ye, Mai Long, Kejie Li, and Xiao Yang. MVDream: Multi-view diffusion for 3d generation. arXiv preprint arXiv:2308.16512, 2023. 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 504, + 546, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 504, + 546, + 556 + ], + "spans": [ + { + "bbox": [ + 308, + 504, + 546, + 556 + ], + "type": "text", + "content": "[56] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022. 1, 2, 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 559, + 546, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 559, + 546, + 612 + ], + "spans": [ + { + "bbox": [ + 308, + 559, + 546, + 612 + ], + "type": "text", + "content": "[57] Uriel Singer, Shelly Sheynin, Adam Polyak, Oron Ashual, Iurii Makarov, Filippos Kokkinos, Naman Goyal, Andrea Vedaldi, Devi Parikh, Justin Johnson, et al. Text-to-4d dynamic scene generation. In Proc. ICML, 2023. 1, 3, 5, 6, 7" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 614, + 546, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 614, + 546, + 647 + ], + "spans": [ + { + "bbox": [ + 308, + 614, + 546, + 647 + ], + "type": "text", + "content": "[58] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In Proc. ICML, 2015. 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 647, + 546, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 546, + 668 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 546, + 668 + ], + "type": "text", + "content": "[59] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. Proc. ICLR, 2021. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 670, + 547, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 547, + 712 + ], + "type": "text", + "content": "[60] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In Proc. ICLR, 2021. 1" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8005" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 288, + 117 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 288, + 117 + ], + "type": "text", + "content": "[61] Matteo Stefanini, Marcella Cornia, Lorenzo Baraldi, Silvia Cascianelli, Giuseppe Fiameni, and Rita Cucchiara. From show to tell: A survey on deep learning-based image captioning. IEEE Trans. Pattern Anal. Mach. Intell., 2022. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 119, + 288, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 119, + 288, + 163 + ], + "spans": [ + { + "bbox": [ + 48, + 119, + 288, + 163 + ], + "type": "text", + "content": "[62] Junshu Tang, Tengfei Wang, Bo Zhang, Ting Zhang, Ran Yi, Lizhuang Ma, and Dong Chen. Make-it-3D: High-fidelity 3D creation from a single image with diffusion prior. arXiv preprint arXiv:2303.14184, 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 165, + 288, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 165, + 288, + 220 + ], + "spans": [ + { + "bbox": [ + 48, + 165, + 288, + 220 + ], + "type": "text", + "content": "[63] Ayush Tewari, Tianwei Yin, George Cazenavette, Semon Rezchikov, Joshua B Tenenbaum, Frédo Durand, William T Freeman, and Vincent Sitzmann. Diffusion with forward models: Solving stochastic inverse problems without direct supervision. arXiv preprint arXiv:2306.11719, 2023. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 222, + 288, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 222, + 288, + 255 + ], + "spans": [ + { + "bbox": [ + 48, + 222, + 288, + 255 + ], + "type": "text", + "content": "[64] Haithem Turki, Jason Y Zhang, Francesco Ferroni, and Deva Ramanan. Suds: Scalable urban dynamic scenes. In Proc CVPR, 2023. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 256, + 288, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 256, + 288, + 311 + ], + "spans": [ + { + "bbox": [ + 48, + 256, + 288, + 311 + ], + "type": "text", + "content": "[65] Ruben Villegas, Mohammad Babaeizadeh, Pieter-Jan Kindermans, Hernan Moraldo, Han Zhang, Mohammad Taghi Saffar, Santiago Castro, Julius Kunze, and Dumitru Erhan. Phenaki: Variable length video generation from open domain textual description. arXiv preprint arXiv:2210.02399, 2022. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 313, + 288, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 313, + 288, + 346 + ], + "spans": [ + { + "bbox": [ + 48, + 313, + 288, + 346 + ], + "type": "text", + "content": "[66] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-NeRF: Text-and-image driven manipulation of neural radiance fields. In Proc. CVPR, 2022. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 348, + 288, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 348, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 348, + 288, + 392 + ], + "type": "text", + "content": "[67] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A Yeh, and Greg Shakhnarovich. Score Jacobian Chaining: Lifting pretrained 2D diffusion models for 3D generation. In Proc. CVPR, 2023. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 394, + 288, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 394, + 288, + 427 + ], + "spans": [ + { + "bbox": [ + 48, + 394, + 288, + 427 + ], + "type": "text", + "content": "[68] Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 429, + 288, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 429, + 288, + 473 + ], + "spans": [ + { + "bbox": [ + 48, + 429, + 288, + 473 + ], + "type": "text", + "content": "[69] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 475, + 288, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 475, + 288, + 529 + ], + "spans": [ + { + "bbox": [ + 48, + 475, + 288, + 529 + ], + "type": "text", + "content": "[70] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. arXiv preprint arXiv:2306.02018, 2023. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 532, + 288, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 532, + 288, + 575 + ], + "spans": [ + { + "bbox": [ + 48, + 532, + 288, + 575 + ], + "type": "text", + "content": "[71] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3D generation with variational score distillation. Proc. NeurIPS, 2023. 1, 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 578, + 288, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 288, + 621 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 288, + 621 + ], + "type": "text", + "content": "[72] Ruiqi Wu, Liangyu Chen, Tong Yang, Chunle Guo, Chongyi Li, and Xiangyu Zhang. Lamp: Learn a motion pattern for few-shot-based video generation. arXiv preprint arXiv:2310.10769, 2023. 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 624, + 288, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 288, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 288, + 667 + ], + "type": "text", + "content": "[73] Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. AttnGAN: Fine-grained text to image generation with attentional generative adversarial networks. In Proc. CVPR, 2018. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "type": "text", + "content": "[74] Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In Proc. CVPR, 2022. 3" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 508 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "type": "text", + "content": "[75] Yuyang Yin, Dejia Xu, Zhangyang Wang, Yao Zhao, and Yunchao Wei. 4DGen: Grounded 4D content generation with spatial-temporal consistency. arXiv preprint arXiv:2312.17225, 2023. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 118, + 547, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 547, + 162 + ], + "type": "text", + "content": "[76] Paul Yoo, Jiaxian Guo, Yutaka Matsuo, and Shixiang Shane Gu. Dreamsparse: Escaping from Plato's cave with 2d diffusion model given sparse views. arXiv preprint arXiv:2306.03414, 2023. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 163, + 547, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 547, + 195 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 547, + 195 + ], + "type": "text", + "content": "[77] Quanzeng You, Hailin Jin, Zhaowen Wang, Chen Fang, and Jiebo Luo. Image captioning with semantic attention. In Proc. CVPR, 2016. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 197, + 547, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 547, + 272 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 547, + 272 + ], + "type": "text", + "content": "[78] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, Ben Hutchinson, Wei Han, Zarana Parekh, Xin Li, Han Zhang, Jason Baldridge, and Yonghui Wu. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 274, + 547, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 274, + 547, + 329 + ], + "spans": [ + { + "bbox": [ + 307, + 274, + 547, + 329 + ], + "type": "text", + "content": "[79] Lili Yu, Bowen Shi, Ramakanth Pasunuru, Benjamin Muller, Olga Golovneva, Tianlu Wang, Arun Babu, Binh Tang, Brian Karrer, Shelly Sheynin, et al. Scaling autoregressive multimodal models: Pretraining and instruction tuning. arXiv preprint arXiv:2309.02591, 2023. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 331, + 547, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 331, + 547, + 374 + ], + "spans": [ + { + "bbox": [ + 307, + 331, + 547, + 374 + ], + "type": "text", + "content": "[80] Han Zhang, Tao Xu, Hongsheng Li, Shaoting Zhang, Xiaogang Wang, Xiaolei Huang, and Dimitris N Metaxas. StackGAN: Text to photo-realistic image synthesis with stacked generative adversarial networks. In Proc. ICCV, 2017. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 376, + 547, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 376, + 547, + 418 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 547, + 418 + ], + "type": "text", + "content": "[81] Yuyang Zhao, Zhiwen Yan, Enze Xie, Lanqing Hong, Zhenguo Li, and Gim Hee Lee. Animate124: Animating one image to 4D dynamic scene. arXiv preprint arXiv:2311.14603, 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 420, + 547, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 420, + 547, + 463 + ], + "spans": [ + { + "bbox": [ + 307, + 420, + 547, + 463 + ], + "type": "text", + "content": "[82] Yufeng Zheng, Xueting Li, Koki Nagano, Sifei Liu, Otmar Hilliges, and Shalini De Mello. A unified approach for text-and image-guided 4D scene generation. arXiv preprint arXiv:2311.16854, 2023. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 464, + 547, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 464, + 547, + 508 + ], + "spans": [ + { + "bbox": [ + 307, + 464, + 547, + 508 + ], + "type": "text", + "content": "[83] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 3" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "8006" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/6d74ca33-515d-4b03-96e1-8cccfa68be60_content_list.json b/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/6d74ca33-515d-4b03-96e1-8cccfa68be60_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..93e21a419dc53e631d69d50b84011ee1189ad664 --- /dev/null +++ b/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/6d74ca33-515d-4b03-96e1-8cccfa68be60_content_list.json @@ -0,0 +1,1658 @@ +[ + { + "type": "text", + "text": "4K4D: Real-Time 4D View Synthesis at 4K Resolution", + "text_level": 1, + "bbox": [ + 210, + 130, + 759, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhen Xu $^{1}$ Sida Peng $^{1}$ Haotong Lin $^{1}$ Guangzhao He $^{1}$ \nJiaming Sun $^{1}$ Yujun Shen $^{2}$ Hujun Bao $^{1}$ Xiaowei Zhou $^{1*}$", + "bbox": [ + 233, + 167, + 733, + 208 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Zhejiang University $^{2}$ Ant Group", + "bbox": [ + 331, + 215, + 633, + 236 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b0fb1ae180e4f2553cc3a444670a453b8a845be6541eb9f3574892ad7f2f6b5a.jpg", + "image_caption": [ + "Figure 1. Photorealistic and real-time rendering of dynamic 3D scenes. Our proposed method reconstructs a 4D neural representation from multi-view videos, which can be rendered at $1125 \\times 1536$ resolution with a speed of over 200 FPS using an RTX 3090 GPU while maintaining state-of-the-art quality on the DNA-Rendering [12] dataset. It is also noteworthy that our method reaches over 80 FPS when rendering 4K images with an RTX 4090. Detailed performance under different resolutions using different GPUs can be found in Tab. 5." + ], + "image_footnote": [], + "bbox": [ + 76, + 250, + 269, + 441 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3f817075de57a429debe67895115d7c6df1e274d885f35bf8b436ebc8086815b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 287, + 250, + 478, + 441 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/28cd712e6c35670581fbf72f353f01453419038cb2a31a7a8d9021b2a0f10d68.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 252, + 684, + 440 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ed1b4d46da56e20fd18e50a6c13f84f93f52b87e7220afe789fbc560d6e54fcc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 252, + 890, + 441 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 522, + 313, + 537 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper targets high-fidelity and real-time view synthesis of dynamic 3D scenes at $4K$ resolution. Recent methods on dynamic view synthesis have shown impressive rendering quality. However, their speed is still limited when rendering high-resolution images. To overcome this problem, we propose 4K4D, a 4D point cloud representation that supports hardware rasterization and network pre-computation to enable unprecedented rendering speed with a high rendering quality. Our representation is built on a 4D feature grid so that the points are naturally regularized and can be robustly optimized. In addition, we design a novel hybrid appearance model that significantly boosts the rendering quality while preserving efficiency. Moreover, we develop a differentiable depth peeling algorithm to effectively learn the proposed model from RGB videos. Experiments show that our representation can be rendered at over 400 FPS on the DNA-Rendering dataset at 1080p resolution and 80 FPS on the ENeRF-Outdoor dataset at $4K$ resolution using an RTX 4090 GPU, which is $30\\times$ faster than previous methods and achieves the state-of-the-art rendering quality. Our project page is available at https://zju3dv.github.io/4k4d.", + "bbox": [ + 75, + 550, + 473, + 867 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 501, + 522, + 630, + 537 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dynamic view synthesis aims to reconstruct dynamic 3D scenes from captured videos and create free-viewpoint and immersive virtual playback, which is a long-standing research problem in computer vision and computer graphics. Essential to the practicality of this technique is its ability to be rendered in real-time with high fidelity. Traditional methods [7, 13, 15, 16, 26, 61, 62, 84, 99, 100] represent dynamic 3D scenes as textured mesh sequences which can be rendered efficiently. However, high-quality mesh reconstruction requires complicated capture hardware and is limited to controlled environments.", + "bbox": [ + 496, + 550, + 893, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, implicit neural representations [19, 42, 58] have shown great success in reconstructing dynamic 3D scenes from RGB videos via differentiable rendering. For example, Li et al. [42] model the target scene as a dynamic neural radiance field and leverage volume rendering [17] to synthesize images. Despite impressive view synthesis results, existing approaches typically require seconds or even minutes to render an image at $1080\\mathrm{p}$ resolution due to the costly network evaluation, as discussed by Peng et al. [68]. Inspired by static view synthesis approaches [20, 33, 97], some dynamic view synthesis methods [2, 49, 68, 89] increase the rendering speed by decreasing either the", + "bbox": [ + 496, + 719, + 895, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "The authors from Zhejiang University are affiliated with the State Key Lab of CAD&CG. *Corresponding author: Xiaowei Zhou.", + "bbox": [ + 76, + 875, + 468, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "20029", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "network size or the number of network evaluations. With these strategies, such methods achieve over 40 FPS when rendering moderate-resolution images $(384 \\times 512)$ [49, 68], but are still not fast enough to achieve real-time performance when rendering high-resolution images. For instance, when rendering 4K resolution images, their speed reduces to only 1 or 2 FPS [2, 49, 68].", + "bbox": [ + 75, + 90, + 470, + 196 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose a novel neural representation, named 4K4D, for modeling and rendering dynamic 3D scenes. As illustrated in Fig. 1, 4K4D significantly outperforms previous dynamic view synthesis approaches [19, 49] in terms of the rendering speed, while being competitive in the rendering quality. Our core innovation lies in a 4D point cloud representation and a hybrid appearance model.", + "bbox": [ + 75, + 210, + 470, + 316 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Specifically, for the dynamic scene, we obtain the coarse point cloud sequence using space carving [37] and model the position of each point as a learnable vector. A 4D feature grid is introduced for assigning a feature vector to each point, which is fed into MLP networks to predict the point's radius, density, and spherical harmonics (SH) coefficients [59]. The 4D feature grid naturally applies spatial regularization on the point clouds and makes the optimization more robust (Sec. 5.2). During inference, the point's radius, density and SH coefficients can be pre-computed, which eliminates network evaluations to achieve unprecedented rendering speed. Moreover, we develop a differentiable depth peeling algorithm that exploits the hardware rasterizer to further significantly accelerate the rendering.", + "bbox": [ + 75, + 330, + 470, + 541 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We empirically find that the image blending model [49] achieves higher rendering quality than the SH model used by 3DGS [33]. However, the image blending model of previous methods [48, 49, 90] requires slow network evaluations during inference, limiting their rendering speed. To alleviate this, we introduce a novel design where we make the image blending network independent of the viewing direction, so the network evaluation can be pre-computed and thereby boost the rendering speed. As a two-edged sword, this strategy makes the appearance model discrete along the viewing direction. This downside is compensated for by using another continuous SH model.", + "bbox": [ + 75, + 555, + 470, + 736 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To validate the effectiveness of the proposed pipeline, we evaluate 4K4D on multiple widely used datasets for multi-view dynamic novel view synthesis, including NHR [93], ENeRF-Outdoor [49], DNA-Rendering [12], and Neural3DV [41]. Extensive experiments show that 4K4D could not only be rendered orders of magnitude faster but also notably outperform the baselines in terms of rendering quality. With an RTX 4090 GPU, our method reaches 400 FPS on the DNA-Rendering dataset at $1080\\mathrm{p}$ resolution and 80 FPS on the ENeRF-Outdoor dataset at 4K resolution.", + "bbox": [ + 75, + 750, + 470, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 89, + 640, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Traditional scene representations. In the domain of novel view synthesis, various approaches based on different representations have been proposed, including multi-view image-based methods [6, 8, 18, 31, 69, 103], multi-plane image representations [47, 56, 65, 83, 86, 86], light-field techniques [14, 21, 39] as well as explicit surface or voxel-based methods [5, 13, 15, 22, 44, 61, 62, 100]. The seminal work [13] utilizes depth sensors and multi-view stereo techniques to consolidate per-view depth information into a coherent mesh sequence, producing high-quality volumetric video. These methods require intricate hardware setups and studio arrangements, thus constraining their accessibility.", + "bbox": [ + 496, + 117, + 893, + 299 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural scene representations. Recently, implicit neural scene representations[3, 24, 27, 30, 32, 51, 52, 58, 76, 79-81, 85, 91] have attracted significant interest among researchers. NeRF[58] encodes the radiance fields of static scenes using coordinate-based Multi-Layer Perceptrons (MLP), achieving exceptional novel view synthesis quality. Building upon NeRF, a collection of studies [28, 42, 45, 63, 64, 70, 93] have made extensions to accommodate for dynamic scenes. Another line of studies [10, 46, 90, 98] has focused on integrating image features into the NeRF rendering pipeline. This approach is easily applicable to dynamic scenes, as multi-view videos can be directly decomposed into multiview images. However, NeRF-based approaches often suffer from substantial network evaluation costs during the volume rendering process, which significantly limits their rendering speed and thus hinders their practicality.", + "bbox": [ + 496, + 301, + 895, + 544 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Accelerating neural scene representations. To accelerate NeRF's rendering, multiple works propose to distill implicit MLP networks into explicit structures that offer fast query capabilities, including voxel grids [20, 25, 40, 60, 72, 96, 97], explicit surfaces [11, 23, 29, 36, 54, 67] and point-based representations [1, 33, 35, 38, 71, 73, 101]. These methods effectively reduce the cost or the number of NeRF's MLP evaluations required. Inspired by their success, several approaches [2, 9, 48, 49, 53, 68, 75, 82, 82, 87, 88] have explored the possibility of real-time dynamic view synthesis. HyperReel [2] employs a primitive prediction module to reduce the number of network evaluations, thereby achieving real-time speed at moderate resolutions. However, it should be noted that their rendering speed decreases significantly when rendering higher-resolution images.", + "bbox": [ + 496, + 550, + 893, + 776 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Gaussian Splatting. One notable advancement for accelerating NeRF is the development of 3D Gaussian Splatting (3DGS) [33] which introduces a differentiable Gaussian ellipsoids splatting algorithm for fast and differentiable volume rendering [4, 17]. By effectively eliminating the slow ray marching operation of NeRF with forward splatting and SH [59], they attain both high-fidelity and high-speed rendering. However, the storage cost of 3DGS limits its application", + "bbox": [ + 496, + 779, + 895, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "20030", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/8263b0399b19b14a758fd575b36e4ce00332f08d44187388974d28064e0dae2c.jpg", + "image_caption": [ + "Figure 2. Overview of our proposed pipeline. (a) By applying the space-carving algorithm [37], we extract the initial cloud sequence $\\mathbf{x}, t$ of the target scene. A 4D feature grid [19] is predefined to assign a feature vector to each point, which is then fed into MLPs for the scene geometry and appearance. (b) The geometry model is based on the point location, radius, and density, which forms a semi-transparent point cloud. (c) The appearance model consists of a piece-wise constant IBR term $\\mathbf{c}_{ibr}$ and a continuous SH model $\\mathbf{c}_{sh}$ . (d) The proposed representation is learned from multi-view RGB videos through the differentiable depth peeling algorithm." + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 326, + 275 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/7de42945a0f4e745cdc3461bc94436f18b6ef2629702323ceb0f5e8325f1b54b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 330, + 88, + 658, + 277 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/329e908cc5e9c60be22f7d7ed333720940f31ab0bc5c81894b25a155989e7ae4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 88, + 890, + 277 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "on dynamic scenes. In contrast, the 4D feature grid and image blending model of 4K4D could not only maintain similar rendering quality but also significantly reduce the storage cost for modeling dynamic scenes. Moreover, the simpler point cloud representation and the 4D feature grid regularization also make 4K4D less prone to overfitting training views than 3DGS. Some recent concurrent works [43, 55, 92, 94, 95] have also reported real-time rendering speeds by incorporating temporal correspondence or time-dependency into 3DGS. However, these methods either do not show results on datasets with large and fast motions [43, 95] (like NHR [93]) or only report real-time speed at moderate resolution $(800\\times 800$ [92] and $640\\times 480$ [55]). In contrast, 4K4D is capable of real-time rendering even at 4K resolution while concurrently maintaining state-of-the-art view-synthesis quality on large-motion data.", + "bbox": [ + 75, + 362, + 472, + 604 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Proposed Approach", + "text_level": 1, + "bbox": [ + 76, + 627, + 267, + 645 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a multi-view video capturing a dynamic 3D scene, our goal is to reconstruct the target scene and perform novel view synthesis in real time. To this end, we extract coarse point clouds of the scene using the space-carving algorithm [37] (Sec. 4) and build a point cloud-based neural scene representation, which can be robustly learned from input videos and enable the hardware-accelerated rendering.", + "bbox": [ + 75, + 655, + 468, + 762 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The overview of the proposed model is presented in Fig. 2. In this section, we first describe how to represent the geometry and appearance of dynamic scenes based on point clouds and neural networks (Sec. 3.1). Then, we develop a differentiable depth peeling algorithm for rendering our representation (Sec. 3.2), which is supported by the hardware rasterizer, thereby significantly improving the rendering speed. Finally, we discuss how to optimize the proposed model on input RGB videos (Sec. 3.3).", + "bbox": [ + 75, + 763, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Modeling Dynamic Scenes with Point Clouds", + "text_level": 1, + "bbox": [ + 498, + 361, + 879, + 378 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4D embedding. Given the coarse point clouds of the target scene, we represent its dynamic geometry and appearance using neural networks and feature grids. Specifically, our method first defines six feature planes $\\theta_{xy}, \\theta_{xz}, \\theta_{yz}, \\theta_{tx}, \\theta_{ty}$ , and $\\theta_{tz}$ . To assign a feature vector $\\mathbf{f}$ to any point $\\mathbf{x}$ at frame $t$ , we adopt the strategy of K-Planes [19] to model a 4D feature field $\\Theta(\\mathbf{x}, t)$ using these six planes:", + "bbox": [ + 496, + 385, + 893, + 491 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {f} = \\Theta (\\mathbf {x}, t) = \\theta_ {x y} (x, y) \\oplus \\theta_ {x z} (x, z) \\oplus \\theta_ {y z} (y, z) \\oplus \\\\ \\theta_ {t x} (t, x) \\oplus \\theta_ {t y} (t, y) \\oplus \\theta_ {t z} (t, z), \\tag {1} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 506, + 890, + 542 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{x} = (x,y,z)$ is the input point, and $\\oplus$ indicates the concatenation operator. Please refer to K-Planes [19] for more implementation details.", + "bbox": [ + 496, + 553, + 893, + 598 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Geometry model. Based on coarse point clouds, the dynamic scene geometry is represented by learning three entries on each point: position $\\mathbf{p} \\in R^3$ , radius $r \\in R$ , and density $\\sigma \\in R$ . Using these point entries, we calculate the volume density of space point $\\mathbf{x}$ with respect to an image pixel $\\mathbf{u}$ for the volume rendering, which will be described in Sec. 3.2. The point position $\\mathbf{p}$ is modeled as an estimizable vector. The radius $r$ and density $\\sigma$ are predicted by feeding the feature vector $\\mathbf{f}$ in Eq. (1) to an MLP network.", + "bbox": [ + 496, + 601, + 890, + 737 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Appearance model. As illustrated in Fig. 2c, we use the image blending technique and the spherical harmonics (SH) model [59, 97] to build a hybrid appearance model, where the image blending technique represents the discrete view-dependent appearance $\\mathbf{c}_{ibr}$ and the SH model represents the continuous view-dependent appearance $\\mathbf{c}_{sh}$ . For point $\\mathbf{x}$ at frame $t$ , its color with viewing direction $\\mathbf{d}$ is:", + "bbox": [ + 496, + 739, + 893, + 845 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {c} (\\mathbf {x}, t, \\mathbf {d}) = \\mathbf {c} _ {i b r} (\\mathbf {x}, t, \\mathbf {d}) + \\mathbf {c} _ {s h} (\\mathbf {s}, \\mathbf {d}), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 568, + 857, + 890, + 875 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $s$ means SH coefficients at point $x$ .", + "bbox": [ + 500, + 885, + 777, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "20031", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The discrete view-dependent appearance $\\mathbf{c}_{ibr}$ is inferred based on input images. Specifically, for a point $\\mathbf{x}$ , we first project it into the input image to retrieve the corresponding RGB color $\\mathbf{c}_{img}^i$ . Then, to blend input RGB colors, we calculate the corresponding blending weight $w^i$ based on the point coordinate and the input image. Note that the blending weight is independent from the viewing direction. Next, to achieve the view-dependent effect, we select the $N'$ nearest input views according to the viewing direction. Finally, the color $\\mathbf{c}_{ibr}$ is computed as $\\sum_{i=1}^{N'} w^i \\mathbf{c}_{img}^i$ . Because the $N'$ input views are obtained through the nearest neighbor retrieval, the $\\mathbf{c}_{ibr}$ is inevitably discrete along the viewing direction. To achieve the continuous view-dependent effect, we append the fine-level color $\\mathbf{c}_{sh}$ represented by the SH model, as shown in Fig. 2c.", + "bbox": [ + 75, + 90, + 472, + 320 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In practice, our method regresses the SH coefficients $\\mathbf{s}$ by passing the point feature $\\mathbf{f}$ in Eq. (1) into an MLP network. To predict the blending weight $w^{i}$ in the image blending model $\\mathbf{c}_{ibr}$ , we first project point $\\mathbf{x}$ onto the input image to retrieve the image feature $\\mathbf{f}_{img}^{i}$ , and then concatenate it with the point feature $\\mathbf{f}$ , which is fed into another MLP network to predict the blending weight. The image feature $\\mathbf{f}_{img}^{i}$ is extracted using a 2D CNN network.", + "bbox": [ + 75, + 321, + 472, + 444 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Discussion. Our appearance model is the key to achieving the low-storage, high-fidelity, and real-time view synthesis of dynamic scenes. There are three alternative ways to represent the dynamic appearance, but they cannot perform on par with our model. 1) Defining explicit SH coefficients on each point, as in 3D Gaussian splatting [33]. When the degree of SH coefficients is high and the amount of points of dynamic scenes is large, this model's size could be too big to train on a consumer GPU. 2) MLP-based SH model. Using an MLP to predict SH coefficients of each point can effectively decrease the model size. However, our experiments found that MLP-based SH model struggles to render high-quality images (Sec. 5.2). 3) Continuous view-dependent image blending model, as in ENeRF [49]. We found that representing the appearance with the image blending model exhibits better rendering quality than only with the MLP-based SH model. However, the color network in ENeRF takes the viewing direction as input and thus cannot be easily pre-computed, limiting the rendering speed during inference.", + "bbox": [ + 75, + 446, + 472, + 734 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In contrast to these three methods, our appearance model combines a discrete image blending model $\\mathbf{c}_{ibr}$ with a continuous SH model $\\mathbf{c}_{sh}$ . The image blending model $\\mathbf{c}_{ibr}$ boosts the rendering performance. In addition, it supports the pre-computation, as its network does not take the viewing direction as input. The SH model $\\mathbf{c}_{sh}$ enables the view-dependent effect for any viewing direction. During training, our model represents the scene appearance using networks, so its model size is reasonable. During inference, we pre-compute the network outputs to achieve the real-time rendering, which will be described in Sec. 3.4.", + "bbox": [ + 75, + 734, + 472, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Differentiable Depth Peeling", + "text_level": 1, + "bbox": [ + 498, + 90, + 756, + 107 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our proposed dynamic scene representation can be rendered into images by performing volume rendering [17] on rasterized points. This forward process is much faster than NeRF's backward ray-marching operation [57] since it requires no network evaluation and explicit sampling. The volume rendering equation requires the color and transparency values to be integrated in order [4], thus we utilize the depth-peeling algorithm for acquiring the corresponding ordered points for pixels. Thanks to the point cloud representation, we can leverage the hardware rasterizer to significantly speed up the depth peeling and blending process. Moreover, it is easy to make this rendering process differentiable, enabling us to learn our model from input RGB videos.", + "bbox": [ + 496, + 114, + 893, + 311 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We develop a custom shader to implement the depth peeling algorithm that consists of $K$ rendering passes. Consider a particular image pixel $\\mathbf{u}$ . In the first pass, our method first uses the hardware rasterizer to render point clouds onto the image, which assigns the closest-to-camera point $\\mathbf{x}_0$ to the pixel $\\mathbf{u}$ . Denote the depth of point $\\mathbf{x}_0$ as $t_0$ . Subsequently, in the $k$ -th rendering pass, all points with depth value $t_k$ smaller than the recorded depth of the previous pass $t_{k-1}$ are discarded, thereby resulting in the $k$ -th closest-to-camera point $\\mathbf{x}_k$ for the pixel $\\mathbf{u}$ . Discarding closer points is implemented in our custom shader, so it still supports the hardware rasterization. After $K$ rendering passes, pixel $\\mathbf{u}$ has a set of sorted points $\\{\\mathbf{x}_k | k = 1, \\dots, K\\}$ .", + "bbox": [ + 496, + 313, + 893, + 511 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Based on the sorted points, we use the volume rendering technique to synthesize the color of pixel $\\mathbf{u}$ . The densities of these points for pixel $\\mathbf{u}$ are defined based on the distance between the projected point and pixel $\\mathbf{u}$ on the 2D image:", + "bbox": [ + 496, + 511, + 893, + 574 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha (\\mathbf {u}, \\mathbf {x}) = \\sigma \\cdot \\max (1 - \\frac {| | \\pi (\\mathbf {x}) - \\mathbf {u} | | _ {2} ^ {2}}{r ^ {2}}, 0), \\qquad (3)\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 585, + 893, + 619 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\pi$ is the camera projection function. $\\sigma$ and $r$ are the density and radius of point $\\mathbf{x}$ , which are described in Sec. 3.1. Intuitively, Eq. (3) defines a semi-transparent point representation where the density is the highest around the center and quadratically decreases along its radius. During training, we implement the projection function $\\pi$ using the PyTorch [66], so Eq. (3) is naturally differentiable. During inference, we leverage the hardware rasterization process to efficiently obtain the distance $\\| \\pi (\\mathbf{x}) - \\mathbf{u}\\| _2^2$ , which is implemented using OpenGL [77].", + "bbox": [ + 496, + 630, + 893, + 781 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Denote the density of point $\\mathbf{x}_k$ as $\\alpha_{k}$ . The color of pixel $\\mathbf{u}$ from the volume rendering is formulated as:", + "bbox": [ + 498, + 782, + 893, + 813 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nC (\\mathbf {u}) = \\sum_ {k = 1} ^ {K} T _ {k} \\alpha_ {k} \\mathbf {c} _ {k}, \\text {w h e r e} T _ {k} = \\prod_ {j = 1} ^ {k - 1} (1 - \\alpha_ {j}), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 535, + 828, + 893, + 871 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{c}_k$ is the color of point $\\mathbf{x}_k$ , as described in Eq. (2).", + "bbox": [ + 500, + 885, + 877, + 901 + ], + "page_idx": 3 + }, + { + "type": "footer", + "text": "20032", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Training", + "text_level": 1, + "bbox": [ + 76, + 90, + 181, + 107 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given the rendered pixel color $C(\\mathbf{u})$ , we compare it with the ground-truth pixel color $C_{gt}(\\mathbf{u})$ to optimize our model in an end-to-end fashion using the following loss function:", + "bbox": [ + 76, + 113, + 468, + 159 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {i m g} = \\sum_ {\\mathbf {u} \\in \\mathcal {U}} | | C (\\mathbf {u}) - C _ {g t} (\\mathbf {u}) | | _ {2} ^ {2}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 160, + 170, + 468, + 202 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{U}$ is the set of image pixels. In addition to the MSE loss $L_{img}$ , we also apply the perceptual loss $L_{lpips}$ [102].", + "bbox": [ + 76, + 213, + 468, + 244 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {l p i p s} = \\left\\| \\Phi (I) - \\Phi \\left(I _ {g t}\\right) \\right\\| _ {1}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 256, + 468, + 273 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\Phi$ is the perceptual function (a VGG16 network) and $I, I_{gt}$ are the rendered and ground-truth images, respectively. The perceptual loss [102] computes the difference in image features extracted from the VGG model [78]. Our experiments in Sec. 5.2 show that it effectively improves the perceived quality of the rendered image.", + "bbox": [ + 76, + 284, + 468, + 375 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To regularize the optimization process of our proposed representation, we additionally apply mask supervision to dynamic regions of the target scene. We solely render point clouds of dynamic regions to obtain their masks, where the pixel value is obtained by:", + "bbox": [ + 75, + 375, + 468, + 450 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nM (\\mathbf {u}) = \\sum_ {k = 1} ^ {K} T _ {k} \\alpha_ {k}, \\text {w h e r e} T _ {k} = \\prod_ {j = 1} ^ {k - 1} (1 - \\alpha_ {j}). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 117, + 462, + 468, + 505 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The mask loss is defined as:", + "bbox": [ + 76, + 516, + 264, + 530 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {m s k} = - \\sum_ {\\mathbf {u} \\in \\mathcal {U} ^ {\\prime}} | | M (\\mathbf {u}) - M _ {g t} (\\mathbf {u}) | | _ {2} ^ {2}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 540, + 468, + 573 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{U}'$ means the set of pixels of the rendered mask, and $M_{gt}$ is the ground-truth mask of 2D dynamic regions. This effectively regularizes the optimization of the geometry of dynamic regions by confining it to the visual hulls.", + "bbox": [ + 76, + 584, + 468, + 645 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The final loss function is defined as", + "bbox": [ + 96, + 645, + 333, + 659 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL = L _ {i m g} + \\lambda_ {l p i p s} L _ {l p i p s} + \\lambda_ {m s k} L _ {m s k}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 672, + 468, + 689 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda_{lpips}$ and $\\lambda_{msk}$ are hyperparameters controlling weights of correspondings losses.", + "bbox": [ + 76, + 700, + 468, + 731 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Inference", + "text_level": 1, + "bbox": [ + 76, + 741, + 186, + 755 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After training, we apply a few acceleration techniques to boost the rendering speed of our model. First, we precompute the point location $\\mathbf{p}$ , radius $r$ , density $\\sigma$ , SH coefficients $\\mathbf{s}$ and color blending weights $w_{i}$ before inference, which are stored at the main memory. During rendering, these properties are asynchronously streamed onto the graphics card, overlapping rasterization with memory copy to achieve an optimal rendering speed [74, 77]. After applying this technique, the runtime computation is reduced to only a", + "bbox": [ + 75, + 763, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "depth peeling evaluation (Sec. 3.2) and a spherical harmonics evaluation (Eq. (2)). Second, we convert the model from 32-bit floats to 16-bits for efficient memory access, which increases FPS by 20 and leads to no visible performance loss. Third, the number of rendering passes $K$ for the differentiable depth peeling algorithm is reduced from 15 to 12, also leading to a 20 FPS speedup with no visual quality change. Detailed analyses of rendering speed can be found in Sec. 5.2 and the supplementary material.", + "bbox": [ + 496, + 90, + 890, + 227 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Implementation Details", + "text_level": 1, + "bbox": [ + 498, + 241, + 720, + 258 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "**Optimization.** 4K4D is trained using the PyTorch framework [66]. Using the Adam optimizer [34] with a learning rate $5e^{-3}$ , our models typically converge after 800k iterations for a sequence length of 200 frames, which takes around 24 hours on a single RTX 4090 GPU. Specifically, the learning rate of point positions is set to $1e^{-5}$ , and the regularization loss weights $\\lambda_{lpips}$ and $\\lambda_{msk}$ are set to $1e^{-3}$ . During training, the number of passes $K$ for the differentiable depth peeling is set to 15, and the number of nearest input views $N'$ is set to 4. The rendering speed of our method is reported on an RTX 3090 GPU for the experiments in Sec. 5 unless otherwise stated.", + "bbox": [ + 496, + 267, + 893, + 449 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Initialization of point clouds. We leverage existing multi-view reconstruction methods to initialize the point clouds. For dynamic regions, we use segmentation methods [50] to obtain their masks in input images and utilize the space carving algorithm [37] to extract their coarse geometry. For static background regions, we leverage foreground masks to compute the mask-weighted average of background pixels along all frames, producing background images without the foreground content. Then, an Instant-NGP [60] model is trained on these images, from which we obtain the initial point clouds. After the initialization, the number of points for the dynamic regions is typically 250k per frame, and the static background regions typically consist of 300k points.", + "bbox": [ + 496, + 452, + 893, + 648 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 500, + 662, + 632, + 680 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. We train and evaluate our method 4K4D on multiple widely used multi-view datasets, including DNA-Rendering [12], ENeRF-Outdoor [49] and NHR [93]. DNA-Rendering [12] records 10-second clips of dynamic humans and objects at 15 FPS using 4K and 2K cameras with 60 views. This dataset is very challenging due to the complex clothing and fast motions. We conduct experiments on 4 sequences of DNA-Rendering, with $90\\%$ of the views as training set and the rest as evaluation set. ENeRF-Outdoor [49] records multiple dynamic humans and objects in an outdoor environment at 30FPS using 1080p cameras. We select three 100-frame sequences with 6 different actors (2 for each sequence) holding objects for evaluation. This dataset is difficult for dynamic view synthesis in that not", + "bbox": [ + 496, + 688, + 893, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "20033", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/da01e3d1ddadfb94305be6ee9665ddfa42d9879fee5e3749e0013cae36d2789b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 80, + 89, + 279, + 224 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d6a4a05671ed372f6cc4eaf2356e03e5851e55f31dfed36e5afa7ab9cfdc5987.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 90, + 482, + 224 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8448b6205d5f3750fdd32862e4c2e07339d1e31c341f9093e0b692bfa213231b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 486, + 90, + 684, + 224 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/54c759ca7544129ece803df48055b02a5160ce6d41db1949ca24e86e9ce82588.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 691, + 90, + 890, + 224 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8f56c783ddcdeebf5901ee9125abb11118f008d803cf8d5646b153d7fc16d626.jpg", + "image_caption": [ + "Ground Truth" + ], + "image_footnote": [], + "bbox": [ + 80, + 227, + 279, + 364 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7c4fcdde4f5179c67115d00467d021fbb193d34092ad9b99e4921e7228f1ffdc.jpg", + "image_caption": [ + "Ours (141.7 FPS)" + ], + "image_footnote": [], + "bbox": [ + 282, + 227, + 482, + 364 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9ca5a62f0b9b3a0fc51c8298f082a99c38a2c2599c9a05a58470a6e6a8cd5746.jpg", + "image_caption": [ + "ENeRF (11.3 FPS)", + "Figure 3. Qualitative comparison on the ENeRF-Outdoor [49] dataset that contains $960 \\times 540$ images. Our method achieves much higher rendering quality and can be rendered $14 \\times$ faster than ENeRF[49]. More dynamic results can be found in the supplementary video." + ], + "image_footnote": [], + "bbox": [ + 486, + 227, + 684, + 364 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7ce9783f5b032071f106eee239d5ee3eb79e70e82d662012b281522fb2d7278c.jpg", + "image_caption": [ + "KPlanes (1.4 FPS)" + ], + "image_footnote": [], + "bbox": [ + 691, + 227, + 890, + 364 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "only are there multiple moving humans and objects, but the background is also dynamic due to cast shadows. More details can be found in the supplementary.", + "bbox": [ + 75, + 433, + 468, + 479 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Comparison Experiments", + "text_level": 1, + "bbox": [ + 76, + 492, + 310, + 508 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison on DNA-Rendering [12]. Qualitative and quantitative comparisons on DNA-Rendering [12] are shown in Fig. 4 and Tabs. 1 and 3 respectively. As evident in Tab. 1, our method renders $30\\mathrm{x}$ faster than the SOTA real-time dynamic view synthesis method ENeRF [49] with superior quality. Even when compared with concurrent work [48], our method still achieves $13\\mathrm{x}$ speedup and produces consistently higher quality images. As shown in Fig. 4, KPlanes [19] could not recover the highly detailed appearance and geometry of the 4D dynamic scene. Other image-based methods [48, 49, 90] produce high-quality appearance. However, they tend to produce blurry results around occlusions and edges, leading to degradation of the visual quality while maintaining interactive framerate at best. When compared with 3DGS [33] on the first frame of each sequence, our method achieves a much better storage efficiency $(50\\times)$ thanks to our compact 4D feature grid and image blending model. Moreover, due to the simplicity of our point-based representation, our method is less prone to overfit the training views. More details of the comparison with 3DGS can be found in the supplementary material.", + "bbox": [ + 75, + 518, + 470, + 835 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison on ENeRF-Outdoor [49]. Fig. 3 and Tabs. 2 and 3 provides qualitative and quantitative results on the ENeRF-Outdoor [49] dataset. Even on the challenging ENeRF-Outdoor dataset with multiple actors and the back", + "bbox": [ + 75, + 839, + 470, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/0cefeaeea22cd1a61d33a81749a748dab225c5792011fa6fc61b953ea6130cc0.jpg", + "table_caption": [ + "Table 1. Quantitative comparison on the DNA-Rendering [12] dataset. Image resolutions are ${1024} \\times {1224}$ and ${1125} \\times {1536}$ . Metrics are averaged over all scenes. Green and yellow cell colors indicate the best and the second best results, respectively." + ], + "table_footnote": [], + "table_body": "
PSNR ↑SSIM ↑LPIPS ↓FPS
ENeRF [49]28.1080.9720.0566.011
IBRNet [90]27.8440.9670.0810.100
KPlanes [19]27.4520.9520.1180.640
Im4D [48]28.9910.9730.06215.360
Ours31.1730.9760.055203.610
", + "bbox": [ + 501, + 489, + 890, + 595 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/34401f1811756711cd316a74bc1cd941f9b4e20aa29a4328384a23d71f8d066a.jpg", + "table_caption": [ + "Table 2. Quantitative comparison on the ENeRF-Ourdoor [49] dataset. This dataset includes ${960} \\times {540}$ images. Green and yellow cell colors indicate the best and the second-best results, respectively." + ], + "table_footnote": [], + "table_body": "
PSNR ↑SSIM ↑LPIPS ↓FPS
ENeRF [49]25.4520.8090.27311.309
IBRNet [90]24.9660.9290.1720.140
KPlanes [19]21.3100.7350.4541.370
Ours25.8150.8980.147141.665
", + "bbox": [ + 501, + 648, + 890, + 739 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ground, our method still achieves notably better results while rendering at over 140 FPS. ENeRF [49] produces blurry results on this challenging dataset, and the rendering results of IBRNet [90] contain black artifacts around the edges of the images as shown in Fig. 3. K-Planse [19] fails to reconstruct the dynamic humans and varying background regions. 3DGS [33] not only introduces much higher storage cost than our method $(45\\times)$ , but also faces even more pronounced overfitting problem with smaller number of views (18 for ENeRF-Outdoor). As evident in Tab. 3 and the", + "bbox": [ + 496, + 750, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "20034", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ccffc7287480a9bc28cc74c65427161fb4779d7b7afd299255e044e104a274e3.jpg", + "image_caption": [ + "Figure 4. Qualitative comparison on the DNA-Rendering [12] dataset that contains $1024 \\times 1224$ (and $1125 \\times 1536$ ) images. Our method can produce high-fidelity images at over 200 FPS while other competitors fail to produce high-quality results for highly dynamic scenes." + ], + "image_footnote": [], + "bbox": [ + 91, + 88, + 890, + 469 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "supplementary material, the overfitting severely degrades the rendering quality. Their rendering speed is slower than ours due to excessive point count. More details of the comparison with 3DGS are present in the supplementary material.", + "bbox": [ + 75, + 515, + 470, + 578 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2. Ablation Studies", + "text_level": 1, + "bbox": [ + 76, + 585, + 240, + 599 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We perform ablation studies on the proposed components on the 150-frame 0013_01 sequence of the DNA-Rendering [12] dataset. Our method can be rendered at over 200 FPS with state-of-the-art quality and maintains a only 2MB per frame storage overhead. More detailed rendering speed analysis and breakdown and storage cost analysis can be found in the supplementary material.", + "bbox": [ + 75, + 608, + 468, + 715 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation study on the 4D embedding. The \"w/o f\" variant removes the proposed 4D embedding (Sec. 3.1) module and replaces it with a per-frame and per-point estimizable position, radius, density, and scale. As shown in Fig. 5 and Tab. 4, the \"w/o f\" variant produces blurry and noisy geometry without the 4D embedding $\\Theta$ , which leads to the inferior rendering quality.", + "bbox": [ + 75, + 715, + 468, + 823 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation study on the hybrid appearance model. The \"w/o $\\mathbf{c}_{ibr}$ \" variant removes $\\mathbf{c}_{ibr}$ in the appearance formulation Eq. (2), which not only leads to less details on the recovered appearance but also significantly impedes the quality of the geometry. Adding an additional degree for the SH", + "bbox": [ + 75, + 824, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/2908afefe3d34c69f40f4a3ad8c8b8d0ae2e4ce746b3cc087e5dce9c459531ae.jpg", + "table_caption": [ + "Table 3. Quantitative comparison on the first frame of all sequences of DNA-Rendering [12] (1024×1224 (and 1125×1536) images) and ENeRF-Outdoor [49] (960×540 images). Metrics are averaged for each dataset. \"Storage\" indicates the disk file size of the trained models (including source images for our method)." + ], + "table_footnote": [], + "table_body": "
DatasetMethodPSNRLPIPSFPSStorageTraining
DNA-Rendering3DGS [33]31.160.049113.2224 MB5min
Ours31.870.046241.74.7 MB15min
ENeRF-Outdoor3DGS [33]21.630.34988.4715 MB10min
Ours26.540.145148.616.0 MB30min
", + "bbox": [ + 500, + 587, + 893, + 670 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "coefficients does not lead to a significant performance change (PSNR 30.129 vs. 30.259). Comparatively, our proposed method produces high-fidelity rendering with better details. A visualization of the view-dependent effect produced by $\\mathbf{c}_{sh}$ can be found in the supplementary material.", + "bbox": [ + 496, + 681, + 893, + 756 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation study on loss functions. As shown in Tab. 4, removing the $L_{lpips}$ term not only reduces the perceptual quality (LPIPS score) but also leads to the degradation of other performance metrics. For the highly dynamic DNA-Rendering [12] dataset, the mask loss $L_{msk}$ helps with regularizing the optimization of the dynamic geometry.", + "bbox": [ + 496, + 760, + 893, + 851 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Rendering speed on different GPUs and resolutions. We additionally report the rendering speed of our method on different hardware (RTX 3060, RTX 3090, and RTX 4090)", + "bbox": [ + 496, + 854, + 893, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "20035", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/26be9865e872a28d203df62580367e5c0a2e13957b3ebcc4f9fe1d7cb443b183.jpg", + "image_caption": [ + "Figure 5. Ablation studies on the 0013_01 sequence of DNA-Rendering [12]. Removing our proposed components leads to noisy geometry and blurry appearance. Our method produces high-fidelity results with perceptually accurate shapes and colors. See Sec. 5.2 for more details." + ], + "image_footnote": [], + "bbox": [ + 76, + 84, + 897, + 335 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "with different resolutions (720p, 1080p, and 4K (2160p)) in Tab. 5. The rendering speed reported here contains the overhead of the interactive GUI. 4K4D achieves real-time rendering speed even when rendering 4K (2160p) images on commodity hardware as shown in the table. More real-time rendering demos can be found in the supplementary video.", + "bbox": [ + 75, + 393, + 472, + 488 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion and Discussion", + "text_level": 1, + "bbox": [ + 76, + 507, + 326, + 523 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we provide a neural point cloud-based representation, 4K4D, for real-time rendering of dynamic 3D scenes at 4K resolution. We build 4K4D upon a 4D feature grid to naturally regularize the points and develop a novel hybrid appearance model for high-quality rendering. Furthermore, we develop a differentiable depth peeling algorithm that utilizes the hardware rasterization pipeline to effectively optimize and efficiently render the proposed model. In our experiments, we demonstrate that 4K4D not only achieves state-of-the-art rendering quality but also exhibits a more than $30 \\times$ increase in rendering speed (over 200FPS at 1080p on an RTX 3090 GPU).", + "bbox": [ + 75, + 535, + 468, + 717 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "However, our method still has some limitations. For one, 4K4D cannot produce correspondences of points across frames, which are important for some downstream tasks. Moreover, the storage cost for 4K4D increases linearly with the number of video frames, so our method has difficulty in modeling long volumetric videos. How to model correspondences and reduce the storage cost for long videos could be two interesting problems for future works. Moreover, the rendering quality of our method also depends on the resolution of input images. While our method achieves real-time rendering at 4K resolution, 4K-quality rendering can only be achieved with sufficient input resolution.", + "bbox": [ + 75, + 719, + 470, + 901 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/4d32d1cc5f5bfd01b024a159ea5c6f2d6b78af6624763ee21fe07f3baa3a1c70.jpg", + "table_caption": [ + "Table 4. Ablation studies on the 150-frame 0013_01 sequence of the DNA-Rendering dataset [12]. \"w/o f\" indicates replacing the 4D embedding with a per-frame and per-pointizable position, radius, density, and scale. See Sec. 5.2 for more detailed descriptions for the abbreviations." + ], + "table_footnote": [], + "table_body": "
PSNR ↑SSIM ↑LPIPS ↓Model Size
w/o f29.7790.9670.0571304.0 MiB
w/o cibr30.2590.9730.054225.0 MiB
w/o csh31.9460.9810.040225.0 MiB
w/o Lpips31.6610.9790.063225.0 MiB
w/o Lmsk29.1150.9650.073225.0 MiB
Ours31.9900.9820.040225.0 MiB
", + "bbox": [ + 501, + 465, + 890, + 585 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/9524b68635e6393af8e1923efa4f5dc5fd39eb9f1f4d5e52f60f4cfeb01dd385.jpg", + "table_caption": [ + "Table 5. Rendering speed on different GPUs and resolutions. The results are recorded on the first frame of the 0013_01 sequence of DNA-Rendering [12] and the actor1_4 sequence of ENeRF-Outdoor [49] with the interactive GUI. Resolutions are set to 720p $(720\\times 1280)$ , 1080p $(1080\\times 1920)$ , and 4K $(2160\\times 3840)$ . Even with the overhead of the interactive GUI (\"w/ GUI\"), our method still achieves unprecedented rendering speed. More real-time rendering results can be found in the supplementary video." + ], + "table_footnote": [], + "table_body": "
DatasetRes.RTX 3060RTX 3090RTX 4090
DNA-Rendering [12] w/ GUI720p173.8 FPS246.9 FPS431.0 FPS
1080p138.7 FPS233.1 FPS409.8 FPS
4K90.0 FPS147.4 FPS288.8 FPS
ENeRF-Outdoor [49] w/ GUI720p90.5 FPS130.5 FPS351.5 FPS
1080p66.1 FPS103.6 FPS249.7 FPS
4K25.1 FPS47.2 FPS85.1 FPS
", + "bbox": [ + 501, + 703, + 890, + 823 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement", + "text_level": 1, + "bbox": [ + 500, + 830, + 660, + 848 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The authors would like to acknowledge support from NSFC (No. 62172364) and Information Technology Center and State Key Lab of CAD&CG, Zhejiang University.", + "bbox": [ + 496, + 854, + 893, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "20036", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Kara-Ali Aliev, Artem Sevastopolsky, Maria Kolos, Dmitry Ulyanov, and Victor Lempitsky. Neural point-based graphics. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXII 16, pages 696-712. Springer, 2020. 2", + "[2] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023. 1, 2", + "[3] Benjamin Attal, Jia-Bin Huang, Michael Zollhöfer, Johannes Kopf, and Changil Kim. Learning neural light fields with ray-space embedding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19819-19829, 2022. 2", + "[4] Louis Bavoil and Kevin Myers. Order independent transparency with dual depth peeling. NVIDIA OpenGL SDK, 1:12, 2008. 2, 4", + "[5] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2", + "[6] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques, SIGGRAPH '01, page 425-432, New York, NY, USA, 2001. Association for Computing Machinery. 2", + "[7] Dan Casas, Marco Volino, John Collomosse, and Adrian Hilton. 4d video textures for interactive character appearance. In Computer Graphics Forum, pages 371-380. Wiley Online Library, 2014. 1", + "[8] Gaurav Chaurasia, Sylvain Duchene, Olga Sorkine-Hornung, and George Drettakis. Depth synthesis and local warps for plausible image-based navigation. ACM TOG, 2013. 2", + "[9] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. arXiv, 2022. 2", + "[10] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In ICCV, 2021. 2", + "[11] Zhiqin Chen, Thomas Funkhouser, Peter Hedman, and Andrea Tagliasacchi. Mobilenerf: Exploiting the polygon rasterization pipeline for efficient neural field rendering on mobile architectures. arXiv preprint arXiv:2208.00277, 2022. 2", + "[12] Wei Cheng, Ruixiang Chen, Wanqi Yin, Siming Fan, Keyu Chen, Honglin He, Huiwen Luo, Zhongang Cai, Jingbo Wang, Yang Gao, et al. Dna-rendering: A diverse neural actor repository for high-fidelity human-centric rendering. arXiv preprint arXiv:2307.10173, 2023. 1, 2, 5, 6, 7, 8", + "[13] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (ToG)," + ], + "bbox": [ + 86, + 114, + 470, + 888 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "34(4):1-13,2015.1,2", + "[14] Abe Davis, Marc Levoy, and Fredo Durand. Unstructured light fields. In Computer Graphics Forum, pages 305-314. Wiley Online Library, 2012. 2", + "[15] Mingsong Dou, Sameh Khamis, Yury Degtyarev, Philip Davidson, Sean Ryan Fanello, Adarsh Kowdle, Sergio Orts Escolano, Christoph Rhemann, David Kim, Jonathan Taylor, et al. Fusion4d: Real-time performance capture of challenging scenes. ACM TOG, 2016. 1, 2", + "[16] Mingsong Dou, Jonathan Taylor, Henry Fuchs, Andrew Fitzgibbon, and Shahram Izadi. 3d scanning deformable objects with a single rgbd sensor. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 493-501, 2015. 1", + "[17] Robert A Drebin, Loren Carpenter, and Pat Hanrahan. Volume rendering. ACM Siggraph Computer Graphics, 22(4):65-74, 1988. 1, 2, 4", + "[18] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. Deepstereo: Learning to predict new views from the world's imagery. In CVPR, June 2016. 2", + "[19] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbaek Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12479–12488, 2023. 1, 2, 3, 6", + "[20] Stephan J Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien Valentin. Fastnerf: High-fidelity neural rendering at 200fps. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14346-14355, 2021. 1, 2", + "[21] Steven J Gortler, Radek Grzesczuk, Richard Szeliski, and Michael F Cohen. The lumigraph. In SIGGRAPH, 1996. 2", + "[22] Kaiwen Guo, Peter Lincoln, Philip Davidson, Jay Busch, Xueming Yu, Matt Whalen, Geoff Harvey, Sergio Orts-Escolano, Rohit Pandey, Jason Dourgarian, et al. The relightables: Volumetric performance capture of humans with realistic relighting. ACM Transactions on Graphics (ToG), 38(6):1-19, 2019. 2", + "[23] Jon Hasselgren, Nikolai Hofmann, and Jacob Munkberg. Shape, light, and material decomposition from images using monte carlo rendering and denoising. Advances in Neural Information Processing Systems, 35:22856-22869, 2022. 2", + "[24] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM TOG, 2018. 2", + "[25] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In ICCV, 2021. 2", + "[26] Anna Hilsmann, Philipp Fechteler, Wieland Morgenstern, Wolfgang Paier, Ingo Feldmann, Oliver Schreer, and Peter Eisert. Going beyond free viewpoint: creating animatable volumetric video of human performances. IET Computer Vision, pages 350-358, 2020. 1", + "[27] Tao Hu, Tao Yu, Zerong Zheng, He Zhang, Yebin Liu, and Matthias Zwicker. Hvtr: Hybrid volumetric-textural rendering for human avatars. In 2022 International Conference on 3D Vision (3DV), pages 197-208. IEEE, 2022." + ], + "bbox": [ + 509, + 93, + 893, + 891 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "20037", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "2", + "[28] Mustafa Isik, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. arXiv preprint arXiv:2305.06356, 2023. 2", + "[29] Shubhendu Jena, Franck Multon, and Adnane Boukhayma. Neural mesh-based graphics. In European Conference on Computer Vision, pages 739-757. Springer, 2022. 2", + "[30] Yue Jiang, Dantong Ji, Zhizhong Han, and Matthias Zwicker. Sdfdiff: Differentiable rendering of signed distance fields for 3d shape optimization. In CVPR, 2020. 2", + "[31] Nima Khademi Kalantari, Ting-Chun Wang, and Ravi Ramamoorthi. Learning-based view synthesis for light field cameras. ACM TOG, 2016. 2", + "[32] Petr Kellnhofer, Lars C Jebe, Andrew Jones, Ryan Spicer, Kari Pulli, and Gordon Wetzstein. Neural lumigraph rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4287-4297, 2021. 2", + "[33] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (TOG), 42(4):1-14, 2023. 1, 2, 4, 6, 7", + "[34] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5", + "[35] Georgios Kopanas, Julien Philip, Thomas Leimkuhler, and George Drettakis. Point-based neural rendering with perview optimization. In Computer Graphics Forum, volume 40, pages 29-43. Wiley Online Library, 2021. 2", + "[36] Jonas Kulhanek and Torsten Sattler. Tetra-nerf: Representing neural radiance fields using tetrahedra. arXiv preprint arXiv:2304.09987, 2023. 2", + "[37] Kiriakos N Kutulakos and Steven M Seitz. A theory of shape by space carving. International journal of computer vision, 38:199-218, 2000. 2, 3, 5", + "[38] Christoph Lassner and Michael Zollhofer. Pulsar: Efficient sphere-based neural rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1440-1449, 2021. 2", + "[39] Marc Levoy and Pat Hanrahan. Light field rendering. In SIGGRAPH, 1996. 2", + "[40] Ruilong Li, Hang Gao, Matthew Tancik, and Angjoo Kanazawa. Nerfacc: Efficient sampling accelerates nerfs. arXiv preprint arXiv:2305.04966, 2023. 2", + "[41] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, and Zhaoyang Lv. Neural 3d video synthesis. arXiv preprint arXiv:2103.02597, 2021. 2", + "[42] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 1, 2", + "[43] Zhan Li, Zhang Chen, Zhong Li, and Yi Xu. Spacetime" + ], + "bbox": [ + 86, + 93, + 470, + 888 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "gaussian feature splatting for real-time dynamic view synthesis. arXiv preprint arXiv:2312.16812, 2023. 3", + "[44] Zhong Li, Yu Ji, Wei Yang, Jinwei Ye, and Jingyi Yu. Robust 3d human motion reconstruction via dynamic template construction. In 2017 International Conference on 3D Vision (3DV), pages 496-505. IEEE, 2017. 2", + "[45] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In CVPR, 2021. 2", + "[46] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4273-4284, 2023. 2", + "[47] Zhengqi Li, Wenqi Xian, Abe Davis, and Noah Snavely. Crowdsampling the plenoptic function. In ECCV, 2020. 2", + "[48] Haotong Lin, Sida Peng, Zhen Xu, Tao Xie, Xingyi He, Hujun Bao, and Xiaowei Zhou. High-fidelity and real-time novel view synthesis for dynamic scenes. In SIGGRAPH Asia Conference Proceedings, 2023. 2, 6", + "[49] Haotong Lin, Sida Peng, Zhen Xu, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Efficient neural radiance fields for interactive free-viewpoint video. In SIGGRAPH Asia Conference Proceedings, 2022. 1, 2, 4, 5, 6, 7, 8", + "[50] Shanchuan Lin, Linjie Yang, Imran Saleemi, and Soumyadip Sengupta. Robust high-resolution video matting with temporal guidance. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 238-247, 2022. 5", + "[51] Shichen Liu, Shunsuke Saito, Weikai Chen, and Hao Li. Learning to infer implicit surfaces without 3d supervision. NeurIPS, 2019. 2", + "[52] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. In SIGGRAPH, 2019. 2", + "[53] Stephen Lombardi, Tomas Simon, Gabriel Schwartz, Michael Zollhoefer, Yaser Sheikh, and Jason Saragih. Mixture of volumetric primitives for efficient neural rendering. ACM Transactions on Graphics (TOG), 40(4):1-13, 2021. 2", + "[54] Fan Lu, Yan Xu, Guang Chen, Hongsheng Li, Kwan-Yee Lin, and Changjun Jiang. Urban radiance field representation with deformable neural mesh primitives. arXiv preprint arXiv:2307.10776, 2023. 2", + "[55] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. arXiv preprint arXiv:2308.09713, 2023. 3", + "[56] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM TOG, 2019. 2", + "[57] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. 2020. 4", + "[58] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf:" + ], + "bbox": [ + 509, + 93, + 890, + 888 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "20038", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 1, 2", + "[59] Claus Müller. Spherical harmonics, volume 17. Springer, 2006. 2, 3", + "[60] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4):1-15, 2022. 2, 5", + "[61] Richard A Newcombe, Dieter Fox, and Steven M Seitz. Dynamicfusion: Reconstruction and tracking of non-rigid scenes in real-time. In CVPR, 2015. 1, 2", + "[62] Sergio Orts-Escolano, Christoph Rhemann, Sean Fanello, Wayne Chang, Adarsh Kowdle, Yury Degtyarev, David Kim, Philip L Davidson, Sameh Khamis, Mingsong Dou, et al. Holoportation: Virtual 3d teleportation in real-time. In UIST, 2016. 1, 2", + "[63] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In ICCV, 2021. 2", + "[64] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. arXiv preprint arXiv:2106.13228, 2021. 2", + "[65] Steven Parker, Peter Shirley, and Brian Smits. Single sample soft shadows. Technical report, Technical Report UUCS-98-019, Computer Science Department, University of Utah, 1998. 2", + "[66] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In NeurIPS, 2019. 4, 5", + "[67] Nikolay Patakin, Dmitry Senushkin, Anna Vorontsova, and Anton Konushin. Neural global illumination for inverse rendering. In 2023 IEEE International Conference on Image Processing (ICIP), pages 1580-1584. IEEE, 2023. 2", + "[68] Sida Peng, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Representing volumetric videos as dynamic mlp maps. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4252-4262, 2023. 1, 2", + "[69] Eric Penner and Li Zhang. Soft 3d reconstruction for view synthesis. ACM TOG, 2017. 2", + "[70] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In CVPR, 2021. 2", + "[71] Ruslan Rakhimov, Andrei-Timotei Ardelean, Victor Lempitsky, and Evgeny Burnaev. Npbg++: Accelerating neural point-based graphics. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15969-15979, 2022. 2", + "[72] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with" + ], + "bbox": [ + 86, + 92, + 470, + 891 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "thousands of tiny mpls. In ICCV, pages 14335-14345, 2021. 2", + "[73] Darius Rückert, Linus Franke, and Marc Stamminger. Adop: Approximate differentiable one-pixel point rendering. ACM Transactions on Graphics (ToG), 41(4):1-14, 2022. 2", + "[74] Jason Sanders and Edward Kandrot. CUDA by example: an introduction to general-purpose GPU programming. Addison-Wesley Professional, 2010. 5", + "[75] Ruizhi Shao, Zerong Zheng, Hanzhang Tu, Boning Liu, Hongwen Zhang, and Yebin Liu. Tensor4d: Efficient neural 4d decomposition for high-fidelity dynamic reconstruction and rendering. arXiv, 2022. 2", + "[76] Meng-Li Shih, Shih-Yang Su, Johannes Kopf, and Jia-Bin Huang. 3d photography using context-aware layered depth inpainting. In CVPR, 2020. 2", + "[77] Dave Shreiner et al. OpenGL programming guide: the official guide to learning OpenGL, versions 3.0 and 3.1. Pearson Education, 2009. 4, 5", + "[78] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 5", + "[79] Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems, 34:19313-19325, 2021. 2", + "[80] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhöfer. Deepvoxels: Learning persistent 3d feature embeddings. In CVPR, 2019.", + "[81] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In NeurIPS, 2019. 2", + "[82] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 2", + "[83] Pratul P Srinivasan, Richard Tucker, Jonathan T Barron, Ravi Ramamoorthi, Ren Ng, and Noah Snively. Pushing the boundaries of view extrapolation with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 175-184, 2019. 2", + "[84] Zhuo Su, Lan Xu, Zerong Zheng, Tao Yu, Yebin Liu, and Lu Fang. Robustfusion: Human volumetric capture with data-driven visual cues using a rgbd camera. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part IV 16, pages 246-264. Springer, 2020. 1", + "[85] Mohammed Suhail, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Light field neural rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8269-8279, June 2022. 2", + "[86] Richard Szeliski and Polina Golland. Stereo matching with transparency and matting. In Sixth International Conference on Computer Vision (IEEE Cat. No. 98CH36271), pages 517-524. IEEE, 1998. 2" + ], + "bbox": [ + 509, + 92, + 893, + 890 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "20039", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[87] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, and Huaping Liu. Mixed neural voxels for fast multi-view video synthesis. arXiv preprint arXiv:2212.00190, 2022. 2", + "[88] Liao Wang, Qiang Hu, Qihan He, Ziyu Wang, Jingyi Yu, Tinne Tuytelaars, Lan Xu, and Minye Wu. Neural residual radiance fields for streamably free-viewpoint videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 76-87, 2023. 2", + "[89] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022. 1", + "[90] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P Srinivasan, Howard Zhou, Jonathan T Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4690-4699, 2021. 2, 6", + "[91] Suttisak Wizadwongsa, Pakkapon Phongthawee, Jiraphon Yenphraphai, and Supasorn Suwajanakorn. Nex: Real-time view synthesis with neural basis expansion. In CVPR, 2021. 2", + "[92] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Wang Xinggang. 4d gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 3", + "[93] Minye Wu, Yuehao Wang, Qiang Hu, and Jingyi Yu. Multiview neural human rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1682-1691, 2020. 2, 3, 5", + "[94] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. arXiv preprint arXiv:2309.13101, 2023. 3", + "[95] Zeyu Yang, Hongye Yang, Zijie Pan, Xiatian Zhu, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv 2310.10642, 2023. 3", + "[96] Alex Yu, Sara Fridovich-Keil, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. CVPR, 2022. 2", + "[97] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5752-5761, 2021. 1, 2, 3", + "[98] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2", + "[99] Tao Yu, Zerong Zheng, Kaiwen Guo, Pengpeng Liu, Qionghai Dai, and Yebin Liu. Function4d: Real-time human volumetric capture from very sparse consumer rgbd sensors. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5746-5756, 2021. 1", + "[100] Tao Yu, Zerong Zheng, Kaiwen Guo, Jianhui Zhao, Qionghai Dai, Hao Li, Gerard Pons-Moll, and Yebin Liu. Doublefu" + ], + "bbox": [ + 78, + 90, + 470, + 891 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "sion: Real-time capture of human performances with inner body shapes from a single depth sensor. In CVPR, 2018. 1, 2", + "[101] Qiang Zhang, Seung-Hwan Baek, Szymon Rusinkiewicz, and Felix Heide. Differentiable point-based radiance fields for efficient view synthesis. In SIGGRAPH Asia 2022 Conference Papers, pages 1-12, 2022. 2", + "[102] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5", + "[103] C Lawrence Zitnick, Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High-quality video view interpolation using a layered representation. ACM TOG, 2004. 2" + ], + "bbox": [ + 501, + 90, + 893, + 309 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "20040", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/6d74ca33-515d-4b03-96e1-8cccfa68be60_model.json b/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/6d74ca33-515d-4b03-96e1-8cccfa68be60_model.json new file mode 100644 index 0000000000000000000000000000000000000000..128db9086cda0b17010eff5ca00a9743cc4954e2 --- /dev/null +++ b/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/6d74ca33-515d-4b03-96e1-8cccfa68be60_model.json @@ -0,0 +1,2809 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.212, + 0.131, + 0.76, + 0.152 + ], + "angle": 0, + "content": "4K4D: Real-Time 4D View Synthesis at 4K Resolution" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.168, + 0.734, + 0.209 + ], + "angle": 0, + "content": "Zhen Xu\\(^{1}\\) Sida Peng\\(^{1}\\) Haotong Lin\\(^{1}\\) Guangzhao He\\(^{1}\\) \nJiaming Sun\\(^{1}\\) Yujun Shen\\(^{2}\\) Hujun Bao\\(^{1}\\) Xiaowei Zhou\\(^{1*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.332, + 0.217, + 0.635, + 0.237 + ], + "angle": 0, + "content": "\\(^{1}\\)Zhejiang University \\(^{2}\\)Ant Group" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.25, + 0.271, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.289, + 0.251, + 0.48, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.253, + 0.685, + 0.441 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.253, + 0.891, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.452, + 0.894, + 0.509 + ], + "angle": 0, + "content": "Figure 1. Photorealistic and real-time rendering of dynamic 3D scenes. Our proposed method reconstructs a 4D neural representation from multi-view videos, which can be rendered at \\(1125 \\times 1536\\) resolution with a speed of over 200 FPS using an RTX 3090 GPU while maintaining state-of-the-art quality on the DNA-Rendering [12] dataset. It is also noteworthy that our method reaches over 80 FPS when rendering 4K images with an RTX 4090. Detailed performance under different resolutions using different GPUs can be found in Tab. 5." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.523, + 0.314, + 0.538 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.551, + 0.474, + 0.868 + ], + "angle": 0, + "content": "This paper targets high-fidelity and real-time view synthesis of dynamic 3D scenes at \\(4K\\) resolution. Recent methods on dynamic view synthesis have shown impressive rendering quality. However, their speed is still limited when rendering high-resolution images. To overcome this problem, we propose 4K4D, a 4D point cloud representation that supports hardware rasterization and network pre-computation to enable unprecedented rendering speed with a high rendering quality. Our representation is built on a 4D feature grid so that the points are naturally regularized and can be robustly optimized. In addition, we design a novel hybrid appearance model that significantly boosts the rendering quality while preserving efficiency. Moreover, we develop a differentiable depth peeling algorithm to effectively learn the proposed model from RGB videos. Experiments show that our representation can be rendered at over 400 FPS on the DNA-Rendering dataset at 1080p resolution and 80 FPS on the ENeRF-Outdoor dataset at \\(4K\\) resolution using an RTX 4090 GPU, which is \\(30\\times\\) faster than previous methods and achieves the state-of-the-art rendering quality. Our project page is available at https://zju3dv.github.io/4k4d." + }, + { + "type": "page_footnote", + "bbox": [ + 0.077, + 0.875, + 0.47, + 0.9 + ], + "angle": 0, + "content": "The authors from Zhejiang University are affiliated with the State Key Lab of CAD&CG. *Corresponding author: Xiaowei Zhou." + }, + { + "type": "title", + "bbox": [ + 0.502, + 0.523, + 0.631, + 0.538 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.551, + 0.895, + 0.717 + ], + "angle": 0, + "content": "Dynamic view synthesis aims to reconstruct dynamic 3D scenes from captured videos and create free-viewpoint and immersive virtual playback, which is a long-standing research problem in computer vision and computer graphics. Essential to the practicality of this technique is its ability to be rendered in real-time with high fidelity. Traditional methods [7, 13, 15, 16, 26, 61, 62, 84, 99, 100] represent dynamic 3D scenes as textured mesh sequences which can be rendered efficiently. However, high-quality mesh reconstruction requires complicated capture hardware and is limited to controlled environments." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.896, + 0.901 + ], + "angle": 0, + "content": "Recently, implicit neural representations [19, 42, 58] have shown great success in reconstructing dynamic 3D scenes from RGB videos via differentiable rendering. For example, Li et al. [42] model the target scene as a dynamic neural radiance field and leverage volume rendering [17] to synthesize images. Despite impressive view synthesis results, existing approaches typically require seconds or even minutes to render an image at \\(1080\\mathrm{p}\\) resolution due to the costly network evaluation, as discussed by Peng et al. [68]. Inspired by static view synthesis approaches [20, 33, 97], some dynamic view synthesis methods [2, 49, 68, 89] increase the rendering speed by decreasing either the" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20029" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.198 + ], + "angle": 0, + "content": "network size or the number of network evaluations. With these strategies, such methods achieve over 40 FPS when rendering moderate-resolution images \\((384 \\times 512)\\) [49, 68], but are still not fast enough to achieve real-time performance when rendering high-resolution images. For instance, when rendering 4K resolution images, their speed reduces to only 1 or 2 FPS [2, 49, 68]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.211, + 0.472, + 0.318 + ], + "angle": 0, + "content": "In this paper, we propose a novel neural representation, named 4K4D, for modeling and rendering dynamic 3D scenes. As illustrated in Fig. 1, 4K4D significantly outperforms previous dynamic view synthesis approaches [19, 49] in terms of the rendering speed, while being competitive in the rendering quality. Our core innovation lies in a 4D point cloud representation and a hybrid appearance model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.331, + 0.472, + 0.542 + ], + "angle": 0, + "content": "Specifically, for the dynamic scene, we obtain the coarse point cloud sequence using space carving [37] and model the position of each point as a learnable vector. A 4D feature grid is introduced for assigning a feature vector to each point, which is fed into MLP networks to predict the point's radius, density, and spherical harmonics (SH) coefficients [59]. The 4D feature grid naturally applies spatial regularization on the point clouds and makes the optimization more robust (Sec. 5.2). During inference, the point's radius, density and SH coefficients can be pre-computed, which eliminates network evaluations to achieve unprecedented rendering speed. Moreover, we develop a differentiable depth peeling algorithm that exploits the hardware rasterizer to further significantly accelerate the rendering." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.556, + 0.471, + 0.737 + ], + "angle": 0, + "content": "We empirically find that the image blending model [49] achieves higher rendering quality than the SH model used by 3DGS [33]. However, the image blending model of previous methods [48, 49, 90] requires slow network evaluations during inference, limiting their rendering speed. To alleviate this, we introduce a novel design where we make the image blending network independent of the viewing direction, so the network evaluation can be pre-computed and thereby boost the rendering speed. As a two-edged sword, this strategy makes the appearance model discrete along the viewing direction. This downside is compensated for by using another continuous SH model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.472, + 0.901 + ], + "angle": 0, + "content": "To validate the effectiveness of the proposed pipeline, we evaluate 4K4D on multiple widely used datasets for multi-view dynamic novel view synthesis, including NHR [93], ENeRF-Outdoor [49], DNA-Rendering [12], and Neural3DV [41]. Extensive experiments show that 4K4D could not only be rendered orders of magnitude faster but also notably outperform the baselines in terms of rendering quality. With an RTX 4090 GPU, our method reaches 400 FPS on the DNA-Rendering dataset at \\(1080\\mathrm{p}\\) resolution and 80 FPS on the ENeRF-Outdoor dataset at 4K resolution." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.09, + 0.642, + 0.107 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.118, + 0.895, + 0.3 + ], + "angle": 0, + "content": "Traditional scene representations. In the domain of novel view synthesis, various approaches based on different representations have been proposed, including multi-view image-based methods [6, 8, 18, 31, 69, 103], multi-plane image representations [47, 56, 65, 83, 86, 86], light-field techniques [14, 21, 39] as well as explicit surface or voxel-based methods [5, 13, 15, 22, 44, 61, 62, 100]. The seminal work [13] utilizes depth sensors and multi-view stereo techniques to consolidate per-view depth information into a coherent mesh sequence, producing high-quality volumetric video. These methods require intricate hardware setups and studio arrangements, thus constraining their accessibility." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.303, + 0.897, + 0.545 + ], + "angle": 0, + "content": "Neural scene representations. Recently, implicit neural scene representations[3, 24, 27, 30, 32, 51, 52, 58, 76, 79-81, 85, 91] have attracted significant interest among researchers. NeRF[58] encodes the radiance fields of static scenes using coordinate-based Multi-Layer Perceptrons (MLP), achieving exceptional novel view synthesis quality. Building upon NeRF, a collection of studies [28, 42, 45, 63, 64, 70, 93] have made extensions to accommodate for dynamic scenes. Another line of studies [10, 46, 90, 98] has focused on integrating image features into the NeRF rendering pipeline. This approach is easily applicable to dynamic scenes, as multi-view videos can be directly decomposed into multiview images. However, NeRF-based approaches often suffer from substantial network evaluation costs during the volume rendering process, which significantly limits their rendering speed and thus hinders their practicality." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.551, + 0.895, + 0.777 + ], + "angle": 0, + "content": "Accelerating neural scene representations. To accelerate NeRF's rendering, multiple works propose to distill implicit MLP networks into explicit structures that offer fast query capabilities, including voxel grids [20, 25, 40, 60, 72, 96, 97], explicit surfaces [11, 23, 29, 36, 54, 67] and point-based representations [1, 33, 35, 38, 71, 73, 101]. These methods effectively reduce the cost or the number of NeRF's MLP evaluations required. Inspired by their success, several approaches [2, 9, 48, 49, 53, 68, 75, 82, 82, 87, 88] have explored the possibility of real-time dynamic view synthesis. HyperReel [2] employs a primitive prediction module to reduce the number of network evaluations, thereby achieving real-time speed at moderate resolutions. However, it should be noted that their rendering speed decreases significantly when rendering higher-resolution images." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.897, + 0.903 + ], + "angle": 0, + "content": "Gaussian Splatting. One notable advancement for accelerating NeRF is the development of 3D Gaussian Splatting (3DGS) [33] which introduces a differentiable Gaussian ellipsoids splatting algorithm for fast and differentiable volume rendering [4, 17]. By effectively eliminating the slow ray marching operation of NeRF with forward splatting and SH [59], they attain both high-fidelity and high-speed rendering. However, the storage cost of 3DGS limits its application" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20030" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.327, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.089, + 0.659, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.089, + 0.892, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.281, + 0.894, + 0.352 + ], + "angle": 0, + "content": "Figure 2. Overview of our proposed pipeline. (a) By applying the space-carving algorithm [37], we extract the initial cloud sequence \\(\\mathbf{x}, t\\) of the target scene. A 4D feature grid [19] is predefined to assign a feature vector to each point, which is then fed into MLPs for the scene geometry and appearance. (b) The geometry model is based on the point location, radius, and density, which forms a semi-transparent point cloud. (c) The appearance model consists of a piece-wise constant IBR term \\(\\mathbf{c}_{ibr}\\) and a continuous SH model \\(\\mathbf{c}_{sh}\\). (d) The proposed representation is learned from multi-view RGB videos through the differentiable depth peeling algorithm." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.363, + 0.473, + 0.606 + ], + "angle": 0, + "content": "on dynamic scenes. In contrast, the 4D feature grid and image blending model of 4K4D could not only maintain similar rendering quality but also significantly reduce the storage cost for modeling dynamic scenes. Moreover, the simpler point cloud representation and the 4D feature grid regularization also make 4K4D less prone to overfitting training views than 3DGS. Some recent concurrent works [43, 55, 92, 94, 95] have also reported real-time rendering speeds by incorporating temporal correspondence or time-dependency into 3DGS. However, these methods either do not show results on datasets with large and fast motions [43, 95] (like NHR [93]) or only report real-time speed at moderate resolution \\((800\\times 800\\) [92] and \\(640\\times 480\\) [55]). In contrast, 4K4D is capable of real-time rendering even at 4K resolution while concurrently maintaining state-of-the-art view-synthesis quality on large-motion data." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.628, + 0.269, + 0.646 + ], + "angle": 0, + "content": "3. Proposed Approach" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.656, + 0.47, + 0.763 + ], + "angle": 0, + "content": "Given a multi-view video capturing a dynamic 3D scene, our goal is to reconstruct the target scene and perform novel view synthesis in real time. To this end, we extract coarse point clouds of the scene using the space-carving algorithm [37] (Sec. 4) and build a point cloud-based neural scene representation, which can be robustly learned from input videos and enable the hardware-accelerated rendering." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.471, + 0.903 + ], + "angle": 0, + "content": "The overview of the proposed model is presented in Fig. 2. In this section, we first describe how to represent the geometry and appearance of dynamic scenes based on point clouds and neural networks (Sec. 3.1). Then, we develop a differentiable depth peeling algorithm for rendering our representation (Sec. 3.2), which is supported by the hardware rasterizer, thereby significantly improving the rendering speed. Finally, we discuss how to optimize the proposed model on input RGB videos (Sec. 3.3)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.362, + 0.88, + 0.379 + ], + "angle": 0, + "content": "3.1. Modeling Dynamic Scenes with Point Clouds" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.386, + 0.895, + 0.492 + ], + "angle": 0, + "content": "4D embedding. Given the coarse point clouds of the target scene, we represent its dynamic geometry and appearance using neural networks and feature grids. Specifically, our method first defines six feature planes \\(\\theta_{xy}, \\theta_{xz}, \\theta_{yz}, \\theta_{tx}, \\theta_{ty}\\), and \\(\\theta_{tz}\\). To assign a feature vector \\(\\mathbf{f}\\) to any point \\(\\mathbf{x}\\) at frame \\(t\\), we adopt the strategy of K-Planes [19] to model a 4D feature field \\(\\Theta(\\mathbf{x}, t)\\) using these six planes:" + }, + { + "type": "equation", + "bbox": [ + 0.515, + 0.507, + 0.892, + 0.543 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {f} = \\Theta (\\mathbf {x}, t) = \\theta_ {x y} (x, y) \\oplus \\theta_ {x z} (x, z) \\oplus \\theta_ {y z} (y, z) \\oplus \\\\ \\theta_ {t x} (t, x) \\oplus \\theta_ {t y} (t, y) \\oplus \\theta_ {t z} (t, z), \\tag {1} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.554, + 0.894, + 0.599 + ], + "angle": 0, + "content": "where \\(\\mathbf{x} = (x,y,z)\\) is the input point, and \\(\\oplus\\) indicates the concatenation operator. Please refer to K-Planes [19] for more implementation details." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.602, + 0.892, + 0.738 + ], + "angle": 0, + "content": "Geometry model. Based on coarse point clouds, the dynamic scene geometry is represented by learning three entries on each point: position \\(\\mathbf{p} \\in R^3\\), radius \\(r \\in R\\), and density \\(\\sigma \\in R\\). Using these point entries, we calculate the volume density of space point \\(\\mathbf{x}\\) with respect to an image pixel \\(\\mathbf{u}\\) for the volume rendering, which will be described in Sec. 3.2. The point position \\(\\mathbf{p}\\) is modeled as an estimizable vector. The radius \\(r\\) and density \\(\\sigma\\) are predicted by feeding the feature vector \\(\\mathbf{f}\\) in Eq. (1) to an MLP network." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.74, + 0.894, + 0.846 + ], + "angle": 0, + "content": "Appearance model. As illustrated in Fig. 2c, we use the image blending technique and the spherical harmonics (SH) model [59, 97] to build a hybrid appearance model, where the image blending technique represents the discrete view-dependent appearance \\(\\mathbf{c}_{ibr}\\) and the SH model represents the continuous view-dependent appearance \\(\\mathbf{c}_{sh}\\). For point \\(\\mathbf{x}\\) at frame \\(t\\), its color with viewing direction \\(\\mathbf{d}\\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.569, + 0.858, + 0.892, + 0.875 + ], + "angle": 0, + "content": "\\[\n\\mathbf {c} (\\mathbf {x}, t, \\mathbf {d}) = \\mathbf {c} _ {i b r} (\\mathbf {x}, t, \\mathbf {d}) + \\mathbf {c} _ {s h} (\\mathbf {s}, \\mathbf {d}), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.886, + 0.779, + 0.902 + ], + "angle": 0, + "content": "where \\( s \\) means SH coefficients at point \\( x \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "20031" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.321 + ], + "angle": 0, + "content": "The discrete view-dependent appearance \\(\\mathbf{c}_{ibr}\\) is inferred based on input images. Specifically, for a point \\(\\mathbf{x}\\), we first project it into the input image to retrieve the corresponding RGB color \\(\\mathbf{c}_{img}^i\\). Then, to blend input RGB colors, we calculate the corresponding blending weight \\(w^i\\) based on the point coordinate and the input image. Note that the blending weight is independent from the viewing direction. Next, to achieve the view-dependent effect, we select the \\(N'\\) nearest input views according to the viewing direction. Finally, the color \\(\\mathbf{c}_{ibr}\\) is computed as \\(\\sum_{i=1}^{N'} w^i \\mathbf{c}_{img}^i\\). Because the \\(N'\\) input views are obtained through the nearest neighbor retrieval, the \\(\\mathbf{c}_{ibr}\\) is inevitably discrete along the viewing direction. To achieve the continuous view-dependent effect, we append the fine-level color \\(\\mathbf{c}_{sh}\\) represented by the SH model, as shown in Fig. 2c." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.323, + 0.473, + 0.445 + ], + "angle": 0, + "content": "In practice, our method regresses the SH coefficients \\( \\mathbf{s} \\) by passing the point feature \\( \\mathbf{f} \\) in Eq. (1) into an MLP network. To predict the blending weight \\( w^{i} \\) in the image blending model \\( \\mathbf{c}_{ibr} \\), we first project point \\( \\mathbf{x} \\) onto the input image to retrieve the image feature \\( \\mathbf{f}_{img}^{i} \\), and then concatenate it with the point feature \\( \\mathbf{f} \\), which is fed into another MLP network to predict the blending weight. The image feature \\( \\mathbf{f}_{img}^{i} \\) is extracted using a 2D CNN network." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.447, + 0.473, + 0.735 + ], + "angle": 0, + "content": "Discussion. Our appearance model is the key to achieving the low-storage, high-fidelity, and real-time view synthesis of dynamic scenes. There are three alternative ways to represent the dynamic appearance, but they cannot perform on par with our model. 1) Defining explicit SH coefficients on each point, as in 3D Gaussian splatting [33]. When the degree of SH coefficients is high and the amount of points of dynamic scenes is large, this model's size could be too big to train on a consumer GPU. 2) MLP-based SH model. Using an MLP to predict SH coefficients of each point can effectively decrease the model size. However, our experiments found that MLP-based SH model struggles to render high-quality images (Sec. 5.2). 3) Continuous view-dependent image blending model, as in ENeRF [49]. We found that representing the appearance with the image blending model exhibits better rendering quality than only with the MLP-based SH model. However, the color network in ENeRF takes the viewing direction as input and thus cannot be easily pre-computed, limiting the rendering speed during inference." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.473, + 0.903 + ], + "angle": 0, + "content": "In contrast to these three methods, our appearance model combines a discrete image blending model \\(\\mathbf{c}_{ibr}\\) with a continuous SH model \\(\\mathbf{c}_{sh}\\). The image blending model \\(\\mathbf{c}_{ibr}\\) boosts the rendering performance. In addition, it supports the pre-computation, as its network does not take the viewing direction as input. The SH model \\(\\mathbf{c}_{sh}\\) enables the view-dependent effect for any viewing direction. During training, our model represents the scene appearance using networks, so its model size is reasonable. During inference, we pre-compute the network outputs to achieve the real-time rendering, which will be described in Sec. 3.4." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.091, + 0.757, + 0.108 + ], + "angle": 0, + "content": "3.2. Differentiable Depth Peeling" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.116, + 0.895, + 0.312 + ], + "angle": 0, + "content": "Our proposed dynamic scene representation can be rendered into images by performing volume rendering [17] on rasterized points. This forward process is much faster than NeRF's backward ray-marching operation [57] since it requires no network evaluation and explicit sampling. The volume rendering equation requires the color and transparency values to be integrated in order [4], thus we utilize the depth-peeling algorithm for acquiring the corresponding ordered points for pixels. Thanks to the point cloud representation, we can leverage the hardware rasterizer to significantly speed up the depth peeling and blending process. Moreover, it is easy to make this rendering process differentiable, enabling us to learn our model from input RGB videos." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.314, + 0.895, + 0.512 + ], + "angle": 0, + "content": "We develop a custom shader to implement the depth peeling algorithm that consists of \\( K \\) rendering passes. Consider a particular image pixel \\( \\mathbf{u} \\). In the first pass, our method first uses the hardware rasterizer to render point clouds onto the image, which assigns the closest-to-camera point \\( \\mathbf{x}_0 \\) to the pixel \\( \\mathbf{u} \\). Denote the depth of point \\( \\mathbf{x}_0 \\) as \\( t_0 \\). Subsequently, in the \\( k \\)-th rendering pass, all points with depth value \\( t_k \\) smaller than the recorded depth of the previous pass \\( t_{k-1} \\) are discarded, thereby resulting in the \\( k \\)-th closest-to-camera point \\( \\mathbf{x}_k \\) for the pixel \\( \\mathbf{u} \\). Discarding closer points is implemented in our custom shader, so it still supports the hardware rasterization. After \\( K \\) rendering passes, pixel \\( \\mathbf{u} \\) has a set of sorted points \\( \\{\\mathbf{x}_k | k = 1, \\dots, K\\} \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.512, + 0.895, + 0.575 + ], + "angle": 0, + "content": "Based on the sorted points, we use the volume rendering technique to synthesize the color of pixel \\(\\mathbf{u}\\). The densities of these points for pixel \\(\\mathbf{u}\\) are defined based on the distance between the projected point and pixel \\(\\mathbf{u}\\) on the 2D image:" + }, + { + "type": "equation", + "bbox": [ + 0.554, + 0.587, + 0.895, + 0.62 + ], + "angle": 0, + "content": "\\[\n\\alpha (\\mathbf {u}, \\mathbf {x}) = \\sigma \\cdot \\max (1 - \\frac {| | \\pi (\\mathbf {x}) - \\mathbf {u} | | _ {2} ^ {2}}{r ^ {2}}, 0), \\qquad (3)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.631, + 0.894, + 0.782 + ], + "angle": 0, + "content": "where \\(\\pi\\) is the camera projection function. \\(\\sigma\\) and \\(r\\) are the density and radius of point \\(\\mathbf{x}\\), which are described in Sec. 3.1. Intuitively, Eq. (3) defines a semi-transparent point representation where the density is the highest around the center and quadratically decreases along its radius. During training, we implement the projection function \\(\\pi\\) using the PyTorch [66], so Eq. (3) is naturally differentiable. During inference, we leverage the hardware rasterization process to efficiently obtain the distance \\(\\| \\pi (\\mathbf{x}) - \\mathbf{u}\\| _2^2\\), which is implemented using OpenGL [77]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.784, + 0.895, + 0.814 + ], + "angle": 0, + "content": "Denote the density of point \\(\\mathbf{x}_k\\) as \\(\\alpha_{k}\\). The color of pixel \\(\\mathbf{u}\\) from the volume rendering is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.536, + 0.829, + 0.895, + 0.872 + ], + "angle": 0, + "content": "\\[\nC (\\mathbf {u}) = \\sum_ {k = 1} ^ {K} T _ {k} \\alpha_ {k} \\mathbf {c} _ {k}, \\text {w h e r e} T _ {k} = \\prod_ {j = 1} ^ {k - 1} (1 - \\alpha_ {j}), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.886, + 0.878, + 0.902 + ], + "angle": 0, + "content": "where \\(\\mathbf{c}_k\\) is the color of point \\(\\mathbf{x}_k\\), as described in Eq. (2)." + }, + { + "type": "footer", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20032" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.091, + 0.182, + 0.108 + ], + "angle": 0, + "content": "3.3. Training" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.114, + 0.47, + 0.16 + ], + "angle": 0, + "content": "Given the rendered pixel color \\( C(\\mathbf{u}) \\), we compare it with the ground-truth pixel color \\( C_{gt}(\\mathbf{u}) \\) to optimize our model in an end-to-end fashion using the following loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.161, + 0.171, + 0.47, + 0.203 + ], + "angle": 0, + "content": "\\[\nL _ {i m g} = \\sum_ {\\mathbf {u} \\in \\mathcal {U}} | | C (\\mathbf {u}) - C _ {g t} (\\mathbf {u}) | | _ {2} ^ {2}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.214, + 0.469, + 0.246 + ], + "angle": 0, + "content": "where \\(\\mathcal{U}\\) is the set of image pixels. In addition to the MSE loss \\(L_{img}\\), we also apply the perceptual loss \\(L_{lpips}\\) [102]." + }, + { + "type": "equation", + "bbox": [ + 0.176, + 0.257, + 0.47, + 0.274 + ], + "angle": 0, + "content": "\\[\nL _ {l p i p s} = \\left\\| \\Phi (I) - \\Phi \\left(I _ {g t}\\right) \\right\\| _ {1}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.285, + 0.47, + 0.376 + ], + "angle": 0, + "content": "where \\(\\Phi\\) is the perceptual function (a VGG16 network) and \\(I, I_{gt}\\) are the rendered and ground-truth images, respectively. The perceptual loss [102] computes the difference in image features extracted from the VGG model [78]. Our experiments in Sec. 5.2 show that it effectively improves the perceived quality of the rendered image." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.376, + 0.469, + 0.452 + ], + "angle": 0, + "content": "To regularize the optimization process of our proposed representation, we additionally apply mask supervision to dynamic regions of the target scene. We solely render point clouds of dynamic regions to obtain their masks, where the pixel value is obtained by:" + }, + { + "type": "equation", + "bbox": [ + 0.119, + 0.463, + 0.47, + 0.506 + ], + "angle": 0, + "content": "\\[\nM (\\mathbf {u}) = \\sum_ {k = 1} ^ {K} T _ {k} \\alpha_ {k}, \\text {w h e r e} T _ {k} = \\prod_ {j = 1} ^ {k - 1} (1 - \\alpha_ {j}). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.517, + 0.265, + 0.531 + ], + "angle": 0, + "content": "The mask loss is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.145, + 0.541, + 0.47, + 0.574 + ], + "angle": 0, + "content": "\\[\nL _ {m s k} = - \\sum_ {\\mathbf {u} \\in \\mathcal {U} ^ {\\prime}} | | M (\\mathbf {u}) - M _ {g t} (\\mathbf {u}) | | _ {2} ^ {2}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.585, + 0.469, + 0.646 + ], + "angle": 0, + "content": "where \\(\\mathbf{U}'\\) means the set of pixels of the rendered mask, and \\(M_{gt}\\) is the ground-truth mask of 2D dynamic regions. This effectively regularizes the optimization of the geometry of dynamic regions by confining it to the visual hulls." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.646, + 0.334, + 0.66 + ], + "angle": 0, + "content": "The final loss function is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.14, + 0.674, + 0.47, + 0.69 + ], + "angle": 0, + "content": "\\[\nL = L _ {i m g} + \\lambda_ {l p i p s} L _ {l p i p s} + \\lambda_ {m s k} L _ {m s k}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.702, + 0.469, + 0.732 + ], + "angle": 0, + "content": "where \\(\\lambda_{lpips}\\) and \\(\\lambda_{msk}\\) are hyperparameters controlling weights of correspondings losses." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.742, + 0.187, + 0.756 + ], + "angle": 0, + "content": "3.4. Inference" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.902 + ], + "angle": 0, + "content": "After training, we apply a few acceleration techniques to boost the rendering speed of our model. First, we precompute the point location \\(\\mathbf{p}\\), radius \\(r\\), density \\(\\sigma\\), SH coefficients \\(\\mathbf{s}\\) and color blending weights \\(w_{i}\\) before inference, which are stored at the main memory. During rendering, these properties are asynchronously streamed onto the graphics card, overlapping rasterization with memory copy to achieve an optimal rendering speed [74, 77]. After applying this technique, the runtime computation is reduced to only a" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.228 + ], + "angle": 0, + "content": "depth peeling evaluation (Sec. 3.2) and a spherical harmonics evaluation (Eq. (2)). Second, we convert the model from 32-bit floats to 16-bits for efficient memory access, which increases FPS by 20 and leads to no visible performance loss. Third, the number of rendering passes \\( K \\) for the differentiable depth peeling algorithm is reduced from 15 to 12, also leading to a 20 FPS speedup with no visual quality change. Detailed analyses of rendering speed can be found in Sec. 5.2 and the supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.242, + 0.722, + 0.26 + ], + "angle": 0, + "content": "4. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.268, + 0.894, + 0.45 + ], + "angle": 0, + "content": "**Optimization.** 4K4D is trained using the PyTorch framework [66]. Using the Adam optimizer [34] with a learning rate \\(5e^{-3}\\), our models typically converge after 800k iterations for a sequence length of 200 frames, which takes around 24 hours on a single RTX 4090 GPU. Specifically, the learning rate of point positions is set to \\(1e^{-5}\\), and the regularization loss weights \\(\\lambda_{lpips}\\) and \\(\\lambda_{msk}\\) are set to \\(1e^{-3}\\). During training, the number of passes \\(K\\) for the differentiable depth peeling is set to 15, and the number of nearest input views \\(N'\\) is set to 4. The rendering speed of our method is reported on an RTX 3090 GPU for the experiments in Sec. 5 unless otherwise stated." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.453, + 0.894, + 0.65 + ], + "angle": 0, + "content": "Initialization of point clouds. We leverage existing multi-view reconstruction methods to initialize the point clouds. For dynamic regions, we use segmentation methods [50] to obtain their masks in input images and utilize the space carving algorithm [37] to extract their coarse geometry. For static background regions, we leverage foreground masks to compute the mask-weighted average of background pixels along all frames, producing background images without the foreground content. Then, an Instant-NGP [60] model is trained on these images, from which we obtain the initial point clouds. After the initialization, the number of points for the dynamic regions is typically 250k per frame, and the static background regions typically consist of 300k points." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.664, + 0.633, + 0.681 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Datasets. We train and evaluate our method 4K4D on multiple widely used multi-view datasets, including DNA-Rendering [12], ENeRF-Outdoor [49] and NHR [93]. DNA-Rendering [12] records 10-second clips of dynamic humans and objects at 15 FPS using 4K and 2K cameras with 60 views. This dataset is very challenging due to the complex clothing and fast motions. We conduct experiments on 4 sequences of DNA-Rendering, with \\(90\\%\\) of the views as training set and the rest as evaluation set. ENeRF-Outdoor [49] records multiple dynamic humans and objects in an outdoor environment at 30FPS using 1080p cameras. We select three 100-frame sequences with 6 different actors (2 for each sequence) holding objects for evaluation. This dataset is difficult for dynamic view synthesis in that not" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20033" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.09, + 0.28, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.284, + 0.091, + 0.483, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.487, + 0.091, + 0.686, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.091, + 0.891, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.228, + 0.28, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.138, + 0.37, + 0.223, + 0.381 + ], + "angle": 0, + "content": "Ground Truth" + }, + { + "type": "image", + "bbox": [ + 0.284, + 0.228, + 0.483, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.336, + 0.37, + 0.43, + 0.381 + ], + "angle": 0, + "content": "Ours (141.7 FPS)" + }, + { + "type": "image", + "bbox": [ + 0.487, + 0.228, + 0.686, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.533, + 0.37, + 0.632, + 0.381 + ], + "angle": 0, + "content": "ENeRF (11.3 FPS)" + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.228, + 0.891, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.741, + 0.37, + 0.842, + 0.381 + ], + "angle": 0, + "content": "KPlanes (1.4 FPS)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.386, + 0.893, + 0.415 + ], + "angle": 0, + "content": "Figure 3. Qualitative comparison on the ENeRF-Outdoor [49] dataset that contains \\(960 \\times 540\\) images. Our method achieves much higher rendering quality and can be rendered \\(14 \\times\\) faster than ENeRF[49]. More dynamic results can be found in the supplementary video." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.434, + 0.47, + 0.48 + ], + "angle": 0, + "content": "only are there multiple moving humans and objects, but the background is also dynamic due to cast shadows. More details can be found in the supplementary." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.493, + 0.312, + 0.51 + ], + "angle": 0, + "content": "5.1. Comparison Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.52, + 0.471, + 0.837 + ], + "angle": 0, + "content": "Comparison on DNA-Rendering [12]. Qualitative and quantitative comparisons on DNA-Rendering [12] are shown in Fig. 4 and Tabs. 1 and 3 respectively. As evident in Tab. 1, our method renders \\(30\\mathrm{x}\\) faster than the SOTA real-time dynamic view synthesis method ENeRF [49] with superior quality. Even when compared with concurrent work [48], our method still achieves \\(13\\mathrm{x}\\) speedup and produces consistently higher quality images. As shown in Fig. 4, KPlanes [19] could not recover the highly detailed appearance and geometry of the 4D dynamic scene. Other image-based methods [48, 49, 90] produce high-quality appearance. However, they tend to produce blurry results around occlusions and edges, leading to degradation of the visual quality while maintaining interactive framerate at best. When compared with 3DGS [33] on the first frame of each sequence, our method achieves a much better storage efficiency \\((50\\times)\\) thanks to our compact 4D feature grid and image blending model. Moreover, due to the simplicity of our point-based representation, our method is less prone to overfit the training views. More details of the comparison with 3DGS can be found in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Comparison on ENeRF-Outdoor [49]. Fig. 3 and Tabs. 2 and 3 provides qualitative and quantitative results on the ENeRF-Outdoor [49] dataset. Even on the challenging ENeRF-Outdoor dataset with multiple actors and the back" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.432, + 0.894, + 0.488 + ], + "angle": 0, + "content": "Table 1. Quantitative comparison on the DNA-Rendering [12] dataset. Image resolutions are \\( {1024} \\times {1224} \\) and \\( {1125} \\times {1536} \\) . Metrics are averaged over all scenes. Green and yellow cell colors indicate the best and the second best results, respectively." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.491, + 0.892, + 0.596 + ], + "angle": 0, + "content": "
PSNR ↑SSIM ↑LPIPS ↓FPS
ENeRF [49]28.1080.9720.0566.011
IBRNet [90]27.8440.9670.0810.100
KPlanes [19]27.4520.9520.1180.640
Im4D [48]28.9910.9730.06215.360
Ours31.1730.9760.055203.610
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.607, + 0.892, + 0.648 + ], + "angle": 0, + "content": "Table 2. Quantitative comparison on the ENeRF-Ourdoor [49] dataset. This dataset includes \\( {960} \\times {540} \\) images. Green and yellow cell colors indicate the best and the second-best results, respectively." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.65, + 0.892, + 0.741 + ], + "angle": 0, + "content": "
PSNR ↑SSIM ↑LPIPS ↓FPS
ENeRF [49]25.4520.8090.27311.309
IBRNet [90]24.9660.9290.1720.140
KPlanes [19]21.3100.7350.4541.370
Ours25.8150.8980.147141.665
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.894, + 0.901 + ], + "angle": 0, + "content": "ground, our method still achieves notably better results while rendering at over 140 FPS. ENeRF [49] produces blurry results on this challenging dataset, and the rendering results of IBRNet [90] contain black artifacts around the edges of the images as shown in Fig. 3. K-Planse [19] fails to reconstruct the dynamic humans and varying background regions. 3DGS [33] not only introduces much higher storage cost than our method \\((45\\times)\\), but also faces even more pronounced overfitting problem with smaller number of views (18 for ENeRF-Outdoor). As evident in Tab. 3 and the" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "20034" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.089, + 0.891, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.474, + 0.893, + 0.503 + ], + "angle": 0, + "content": "Figure 4. Qualitative comparison on the DNA-Rendering [12] dataset that contains \\(1024 \\times 1224\\) (and \\(1125 \\times 1536\\)) images. Our method can produce high-fidelity images at over 200 FPS while other competitors fail to produce high-quality results for highly dynamic scenes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.516, + 0.471, + 0.579 + ], + "angle": 0, + "content": "supplementary material, the overfitting severely degrades the rendering quality. Their rendering speed is slower than ours due to excessive point count. More details of the comparison with 3DGS are present in the supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.586, + 0.241, + 0.601 + ], + "angle": 0, + "content": "5.2. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.609, + 0.47, + 0.716 + ], + "angle": 0, + "content": "We perform ablation studies on the proposed components on the 150-frame 0013_01 sequence of the DNA-Rendering [12] dataset. Our method can be rendered at over 200 FPS with state-of-the-art quality and maintains a only 2MB per frame storage overhead. More detailed rendering speed analysis and breakdown and storage cost analysis can be found in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.717, + 0.47, + 0.824 + ], + "angle": 0, + "content": "Ablation study on the 4D embedding. The \"w/o f\" variant removes the proposed 4D embedding (Sec. 3.1) module and replaces it with a per-frame and per-point estimizable position, radius, density, and scale. As shown in Fig. 5 and Tab. 4, the \"w/o f\" variant produces blurry and noisy geometry without the 4D embedding \\(\\Theta\\), which leads to the inferior rendering quality." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Ablation study on the hybrid appearance model. The \"w/o \\(\\mathbf{c}_{ibr}\\)\" variant removes \\(\\mathbf{c}_{ibr}\\) in the appearance formulation Eq. (2), which not only leads to less details on the recovered appearance but also significantly impedes the quality of the geometry. Adding an additional degree for the SH" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.515, + 0.895, + 0.584 + ], + "angle": 0, + "content": "Table 3. Quantitative comparison on the first frame of all sequences of DNA-Rendering [12] (1024×1224 (and 1125×1536) images) and ENeRF-Outdoor [49] (960×540 images). Metrics are averaged for each dataset. \"Storage\" indicates the disk file size of the trained models (including source images for our method)." + }, + { + "type": "table", + "bbox": [ + 0.501, + 0.588, + 0.894, + 0.671 + ], + "angle": 0, + "content": "
DatasetMethodPSNRLPIPSFPSStorageTraining
DNA-Rendering3DGS [33]31.160.049113.2224 MB5min
Ours31.870.046241.74.7 MB15min
ENeRF-Outdoor3DGS [33]21.630.34988.4715 MB10min
Ours26.540.145148.616.0 MB30min
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.682, + 0.894, + 0.757 + ], + "angle": 0, + "content": "coefficients does not lead to a significant performance change (PSNR 30.129 vs. 30.259). Comparatively, our proposed method produces high-fidelity rendering with better details. A visualization of the view-dependent effect produced by \\(\\mathbf{c}_{sh}\\) can be found in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.761, + 0.894, + 0.852 + ], + "angle": 0, + "content": "Ablation study on loss functions. As shown in Tab. 4, removing the \\( L_{lpips} \\) term not only reduces the perceptual quality (LPIPS score) but also leads to the degradation of other performance metrics. For the highly dynamic DNA-Rendering [12] dataset, the mask loss \\( L_{msk} \\) helps with regularizing the optimization of the dynamic geometry." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Rendering speed on different GPUs and resolutions. We additionally report the rendering speed of our method on different hardware (RTX 3060, RTX 3090, and RTX 4090)" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20035" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.085, + 0.898, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.347, + 0.895, + 0.378 + ], + "angle": 0, + "content": "Figure 5. Ablation studies on the 0013_01 sequence of DNA-Rendering [12]. Removing our proposed components leads to noisy geometry and blurry appearance. Our method produces high-fidelity results with perceptually accurate shapes and colors. See Sec. 5.2 for more details." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.395, + 0.473, + 0.489 + ], + "angle": 0, + "content": "with different resolutions (720p, 1080p, and 4K (2160p)) in Tab. 5. The rendering speed reported here contains the overhead of the interactive GUI. 4K4D achieves real-time rendering speed even when rendering 4K (2160p) images on commodity hardware as shown in the table. More real-time rendering demos can be found in the supplementary video." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.508, + 0.327, + 0.525 + ], + "angle": 0, + "content": "6. Conclusion and Discussion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.536, + 0.47, + 0.718 + ], + "angle": 0, + "content": "In this paper, we provide a neural point cloud-based representation, 4K4D, for real-time rendering of dynamic 3D scenes at 4K resolution. We build 4K4D upon a 4D feature grid to naturally regularize the points and develop a novel hybrid appearance model for high-quality rendering. Furthermore, we develop a differentiable depth peeling algorithm that utilizes the hardware rasterization pipeline to effectively optimize and efficiently render the proposed model. In our experiments, we demonstrate that 4K4D not only achieves state-of-the-art rendering quality but also exhibits a more than \\(30 \\times\\) increase in rendering speed (over 200FPS at 1080p on an RTX 3090 GPU)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.471, + 0.902 + ], + "angle": 0, + "content": "However, our method still has some limitations. For one, 4K4D cannot produce correspondences of points across frames, which are important for some downstream tasks. Moreover, the storage cost for 4K4D increases linearly with the number of video frames, so our method has difficulty in modeling long volumetric videos. How to model correspondences and reduce the storage cost for long videos could be two interesting problems for future works. Moreover, the rendering quality of our method also depends on the resolution of input images. While our method achieves real-time rendering at 4K resolution, 4K-quality rendering can only be achieved with sufficient input resolution." + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.393, + 0.895, + 0.463 + ], + "angle": 0, + "content": "Table 4. Ablation studies on the 150-frame 0013_01 sequence of the DNA-Rendering dataset [12]. \"w/o f\" indicates replacing the 4D embedding with a per-frame and per-pointizable position, radius, density, and scale. See Sec. 5.2 for more detailed descriptions for the abbreviations." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.466, + 0.892, + 0.586 + ], + "angle": 0, + "content": "
PSNR ↑SSIM ↑LPIPS ↓Model Size
w/o f29.7790.9670.0571304.0 MiB
w/o cibr30.2590.9730.054225.0 MiB
w/o csh31.9460.9810.040225.0 MiB
w/o Lpips31.6610.9790.063225.0 MiB
w/o Lmsk29.1150.9650.073225.0 MiB
Ours31.9900.9820.040225.0 MiB
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.589, + 0.895, + 0.702 + ], + "angle": 0, + "content": "Table 5. Rendering speed on different GPUs and resolutions. The results are recorded on the first frame of the 0013_01 sequence of DNA-Rendering [12] and the actor1_4 sequence of ENeRF-Outdoor [49] with the interactive GUI. Resolutions are set to 720p \\((720\\times 1280)\\), 1080p \\((1080\\times 1920)\\), and 4K \\((2160\\times 3840)\\). Even with the overhead of the interactive GUI (\"w/ GUI\"), our method still achieves unprecedented rendering speed. More real-time rendering results can be found in the supplementary video." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.704, + 0.892, + 0.824 + ], + "angle": 0, + "content": "
DatasetRes.RTX 3060RTX 3090RTX 4090
DNA-Rendering [12] w/ GUI720p173.8 FPS246.9 FPS431.0 FPS
1080p138.7 FPS233.1 FPS409.8 FPS
4K90.0 FPS147.4 FPS288.8 FPS
ENeRF-Outdoor [49] w/ GUI720p90.5 FPS130.5 FPS351.5 FPS
1080p66.1 FPS103.6 FPS249.7 FPS
4K25.1 FPS47.2 FPS85.1 FPS
" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.661, + 0.849 + ], + "angle": 0, + "content": "Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.895, + 0.903 + ], + "angle": 0, + "content": "The authors would like to acknowledge support from NSFC (No. 62172364) and Information Technology Center and State Key Lab of CAD&CG, Zhejiang University." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20036" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.116, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Kara-Ali Aliev, Artem Sevastopolsky, Maria Kolos, Dmitry Ulyanov, and Victor Lempitsky. Neural point-based graphics. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXII 16, pages 696-712. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.184, + 0.472, + 0.266 + ], + "angle": 0, + "content": "[2] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.266, + 0.471, + 0.333 + ], + "angle": 0, + "content": "[3] Benjamin Attal, Jia-Bin Huang, Michael Zollhöfer, Johannes Kopf, and Changil Kim. Learning neural light fields with ray-space embedding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19819-19829, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.334, + 0.471, + 0.374 + ], + "angle": 0, + "content": "[4] Louis Bavoil and Kevin Myers. Order independent transparency with dual depth peeling. NVIDIA OpenGL SDK, 1:12, 2008. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.374, + 0.471, + 0.442 + ], + "angle": 0, + "content": "[5] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.442, + 0.471, + 0.524 + ], + "angle": 0, + "content": "[6] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques, SIGGRAPH '01, page 425-432, New York, NY, USA, 2001. Association for Computing Machinery. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.524, + 0.471, + 0.578 + ], + "angle": 0, + "content": "[7] Dan Casas, Marco Volino, John Collomosse, and Adrian Hilton. 4d video textures for interactive character appearance. In Computer Graphics Forum, pages 371-380. Wiley Online Library, 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.578, + 0.471, + 0.619 + ], + "angle": 0, + "content": "[8] Gaurav Chaurasia, Sylvain Duchene, Olga Sorkine-Hornung, and George Drettakis. Depth synthesis and local warps for plausible image-based navigation. ACM TOG, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.619, + 0.471, + 0.644 + ], + "angle": 0, + "content": "[9] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. arXiv, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.645, + 0.471, + 0.698 + ], + "angle": 0, + "content": "[10] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In ICCV, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.699, + 0.471, + 0.765 + ], + "angle": 0, + "content": "[11] Zhiqin Chen, Thomas Funkhouser, Peter Hedman, and Andrea Tagliasacchi. Mobilenerf: Exploiting the polygon rasterization pipeline for efficient neural field rendering on mobile architectures. arXiv preprint arXiv:2208.00277, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.766, + 0.471, + 0.835 + ], + "angle": 0, + "content": "[12] Wei Cheng, Ruixiang Chen, Wanqi Yin, Siming Fan, Keyu Chen, Honglin He, Huiwen Luo, Zhongang Cai, Jingbo Wang, Yang Gao, et al. Dna-rendering: A diverse neural actor repository for high-fidelity human-centric rendering. arXiv preprint arXiv:2307.10173, 2023. 1, 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.835, + 0.471, + 0.889 + ], + "angle": 0, + "content": "[13] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (ToG)," + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.116, + 0.472, + 0.889 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.543, + 0.094, + 0.679, + 0.105 + ], + "angle": 0, + "content": "34(4):1-13,2015.1,2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.106, + 0.894, + 0.146 + ], + "angle": 0, + "content": "[14] Abe Davis, Marc Levoy, and Fredo Durand. Unstructured light fields. In Computer Graphics Forum, pages 305-314. Wiley Online Library, 2012. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.147, + 0.894, + 0.214 + ], + "angle": 0, + "content": "[15] Mingsong Dou, Sameh Khamis, Yury Degtyarev, Philip Davidson, Sean Ryan Fanello, Adarsh Kowdle, Sergio Orts Escolano, Christoph Rhemann, David Kim, Jonathan Taylor, et al. Fusion4d: Real-time performance capture of challenging scenes. ACM TOG, 2016. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.215, + 0.894, + 0.282 + ], + "angle": 0, + "content": "[16] Mingsong Dou, Jonathan Taylor, Henry Fuchs, Andrew Fitzgibbon, and Shahram Izadi. 3d scanning deformable objects with a single rgbd sensor. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 493-501, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.283, + 0.894, + 0.322 + ], + "angle": 0, + "content": "[17] Robert A Drebin, Loren Carpenter, and Pat Hanrahan. Volume rendering. ACM Siggraph Computer Graphics, 22(4):65-74, 1988. 1, 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.322, + 0.893, + 0.363 + ], + "angle": 0, + "content": "[18] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. Deepstereo: Learning to predict new views from the world's imagery. In CVPR, June 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.363, + 0.894, + 0.444 + ], + "angle": 0, + "content": "[19] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbaek Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12479–12488, 2023. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.444, + 0.894, + 0.512 + ], + "angle": 0, + "content": "[20] Stephan J Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien Valentin. Fastnerf: High-fidelity neural rendering at 200fps. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14346-14355, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.512, + 0.893, + 0.539 + ], + "angle": 0, + "content": "[21] Steven J Gortler, Radek Grzesczuk, Richard Szeliski, and Michael F Cohen. The lumigraph. In SIGGRAPH, 1996. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.539, + 0.894, + 0.62 + ], + "angle": 0, + "content": "[22] Kaiwen Guo, Peter Lincoln, Philip Davidson, Jay Busch, Xueming Yu, Matt Whalen, Geoff Harvey, Sergio Orts-Escolano, Rohit Pandey, Jason Dourgarian, et al. The relightables: Volumetric performance capture of humans with realistic relighting. ACM Transactions on Graphics (ToG), 38(6):1-19, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.621, + 0.894, + 0.675 + ], + "angle": 0, + "content": "[23] Jon Hasselgren, Nikolai Hofmann, and Jacob Munkberg. Shape, light, and material decomposition from images using monte carlo rendering and denoising. Advances in Neural Information Processing Systems, 35:22856-22869, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.675, + 0.893, + 0.715 + ], + "angle": 0, + "content": "[24] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM TOG, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.715, + 0.894, + 0.768 + ], + "angle": 0, + "content": "[25] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In ICCV, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.769, + 0.893, + 0.837 + ], + "angle": 0, + "content": "[26] Anna Hilsmann, Philipp Fechteler, Wieland Morgenstern, Wolfgang Paier, Ingo Feldmann, Oliver Schreer, and Peter Eisert. Going beyond free viewpoint: creating animatable volumetric video of human performances. IET Computer Vision, pages 350-358, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.837, + 0.894, + 0.892 + ], + "angle": 0, + "content": "[27] Tao Hu, Tao Yu, Zerong Zheng, He Zhang, Yebin Liu, and Matthias Zwicker. Hvtr: Hybrid volumetric-textural rendering for human avatars. In 2022 International Conference on 3D Vision (3DV), pages 197-208. IEEE, 2022." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.094, + 0.894, + 0.892 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20037" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.094, + 0.131, + 0.104 + ], + "angle": 0, + "content": "2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.106, + 0.471, + 0.173 + ], + "angle": 0, + "content": "[28] Mustafa Isik, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. arXiv preprint arXiv:2305.06356, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.174, + 0.471, + 0.214 + ], + "angle": 0, + "content": "[29] Shubhendu Jena, Franck Multon, and Adnane Boukhayma. Neural mesh-based graphics. In European Conference on Computer Vision, pages 739-757. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.215, + 0.471, + 0.254 + ], + "angle": 0, + "content": "[30] Yue Jiang, Dantong Ji, Zhizhong Han, and Matthias Zwicker. Sdfdiff: Differentiable rendering of signed distance fields for 3d shape optimization. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.255, + 0.471, + 0.294 + ], + "angle": 0, + "content": "[31] Nima Khademi Kalantari, Ting-Chun Wang, and Ravi Ramamoorthi. Learning-based view synthesis for light field cameras. ACM TOG, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.295, + 0.471, + 0.361 + ], + "angle": 0, + "content": "[32] Petr Kellnhofer, Lars C Jebe, Andrew Jones, Ryan Spicer, Kari Pulli, and Gordon Wetzstein. Neural lumigraph rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4287-4297, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.362, + 0.471, + 0.417 + ], + "angle": 0, + "content": "[33] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (TOG), 42(4):1-14, 2023. 1, 2, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.418, + 0.471, + 0.456 + ], + "angle": 0, + "content": "[34] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.457, + 0.471, + 0.512 + ], + "angle": 0, + "content": "[35] Georgios Kopanas, Julien Philip, Thomas Leimkuhler, and George Drettakis. Point-based neural rendering with perview optimization. In Computer Graphics Forum, volume 40, pages 29-43. Wiley Online Library, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.512, + 0.471, + 0.55 + ], + "angle": 0, + "content": "[36] Jonas Kulhanek and Torsten Sattler. Tetra-nerf: Representing neural radiance fields using tetrahedra. arXiv preprint arXiv:2304.09987, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.551, + 0.471, + 0.591 + ], + "angle": 0, + "content": "[37] Kiriakos N Kutulakos and Steven M Seitz. A theory of shape by space carving. International journal of computer vision, 38:199-218, 2000. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.592, + 0.471, + 0.646 + ], + "angle": 0, + "content": "[38] Christoph Lassner and Michael Zollhofer. Pulsar: Efficient sphere-based neural rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1440-1449, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.646, + 0.471, + 0.672 + ], + "angle": 0, + "content": "[39] Marc Levoy and Pat Hanrahan. Light field rendering. In SIGGRAPH, 1996. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.673, + 0.471, + 0.712 + ], + "angle": 0, + "content": "[40] Ruilong Li, Hang Gao, Matthew Tancik, and Angjoo Kanazawa. Nerfacc: Efficient sampling accelerates nerfs. arXiv preprint arXiv:2305.04966, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.712, + 0.471, + 0.78 + ], + "angle": 0, + "content": "[41] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, and Zhaoyang Lv. Neural 3d video synthesis. arXiv preprint arXiv:2103.02597, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.78, + 0.471, + 0.874 + ], + "angle": 0, + "content": "[42] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.875, + 0.471, + 0.89 + ], + "angle": 0, + "content": "[43] Zhan Li, Zhang Chen, Zhong Li, and Yi Xu. Spacetime" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.094, + 0.471, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.094, + 0.892, + 0.12 + ], + "angle": 0, + "content": "gaussian feature splatting for real-time dynamic view synthesis. arXiv preprint arXiv:2312.16812, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.12, + 0.892, + 0.174 + ], + "angle": 0, + "content": "[44] Zhong Li, Yu Ji, Wei Yang, Jinwei Ye, and Jingyi Yu. Robust 3d human motion reconstruction via dynamic template construction. In 2017 International Conference on 3D Vision (3DV), pages 496-505. IEEE, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.174, + 0.892, + 0.214 + ], + "angle": 0, + "content": "[45] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.215, + 0.892, + 0.281 + ], + "angle": 0, + "content": "[46] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4273-4284, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.282, + 0.892, + 0.309 + ], + "angle": 0, + "content": "[47] Zhengqi Li, Wenqi Xian, Abe Davis, and Noah Snavely. Crowdsampling the plenoptic function. In ECCV, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.309, + 0.892, + 0.363 + ], + "angle": 0, + "content": "[48] Haotong Lin, Sida Peng, Zhen Xu, Tao Xie, Xingyi He, Hujun Bao, and Xiaowei Zhou. High-fidelity and real-time novel view synthesis for dynamic scenes. In SIGGRAPH Asia Conference Proceedings, 2023. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.363, + 0.892, + 0.417 + ], + "angle": 0, + "content": "[49] Haotong Lin, Sida Peng, Zhen Xu, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Efficient neural radiance fields for interactive free-viewpoint video. In SIGGRAPH Asia Conference Proceedings, 2022. 1, 2, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.418, + 0.892, + 0.484 + ], + "angle": 0, + "content": "[50] Shanchuan Lin, Linjie Yang, Imran Saleemi, and Soumyadip Sengupta. Robust high-resolution video matting with temporal guidance. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 238-247, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.485, + 0.892, + 0.524 + ], + "angle": 0, + "content": "[51] Shichen Liu, Shunsuke Saito, Weikai Chen, and Hao Li. Learning to infer implicit surfaces without 3d supervision. NeurIPS, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.525, + 0.892, + 0.579 + ], + "angle": 0, + "content": "[52] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. In SIGGRAPH, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.58, + 0.892, + 0.633 + ], + "angle": 0, + "content": "[53] Stephen Lombardi, Tomas Simon, Gabriel Schwartz, Michael Zollhoefer, Yaser Sheikh, and Jason Saragih. Mixture of volumetric primitives for efficient neural rendering. ACM Transactions on Graphics (TOG), 40(4):1-13, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.633, + 0.892, + 0.687 + ], + "angle": 0, + "content": "[54] Fan Lu, Yan Xu, Guang Chen, Hongsheng Li, Kwan-Yee Lin, and Changjun Jiang. Urban radiance field representation with deformable neural mesh primitives. arXiv preprint arXiv:2307.10776, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.687, + 0.892, + 0.741 + ], + "angle": 0, + "content": "[55] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. arXiv preprint arXiv:2308.09713, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.741, + 0.892, + 0.808 + ], + "angle": 0, + "content": "[56] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM TOG, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.809, + 0.892, + 0.864 + ], + "angle": 0, + "content": "[57] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.864, + 0.892, + 0.89 + ], + "angle": 0, + "content": "[58] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf:" + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.094, + 0.892, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20038" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.093, + 0.471, + 0.133 + ], + "angle": 0, + "content": "Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.134, + 0.471, + 0.159 + ], + "angle": 0, + "content": "[59] Claus Müller. Spherical harmonics, volume 17. Springer, 2006. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.16, + 0.471, + 0.214 + ], + "angle": 0, + "content": "[60] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4):1-15, 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.215, + 0.471, + 0.254 + ], + "angle": 0, + "content": "[61] Richard A Newcombe, Dieter Fox, and Steven M Seitz. Dynamicfusion: Reconstruction and tracking of non-rigid scenes in real-time. In CVPR, 2015. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.255, + 0.471, + 0.322 + ], + "angle": 0, + "content": "[62] Sergio Orts-Escolano, Christoph Rhemann, Sean Fanello, Wayne Chang, Adarsh Kowdle, Yury Degtyarev, David Kim, Philip L Davidson, Sameh Khamis, Mingsong Dou, et al. Holoportation: Virtual 3d teleportation in real-time. In UIST, 2016. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.323, + 0.471, + 0.376 + ], + "angle": 0, + "content": "[63] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In ICCV, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.377, + 0.471, + 0.444 + ], + "angle": 0, + "content": "[64] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. arXiv preprint arXiv:2106.13228, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.445, + 0.471, + 0.497 + ], + "angle": 0, + "content": "[65] Steven Parker, Peter Shirley, and Brian Smits. Single sample soft shadows. Technical report, Technical Report UUCS-98-019, Computer Science Department, University of Utah, 1998. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.499, + 0.471, + 0.608 + ], + "angle": 0, + "content": "[66] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In NeurIPS, 2019. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.609, + 0.471, + 0.662 + ], + "angle": 0, + "content": "[67] Nikolay Patakin, Dmitry Senushkin, Anna Vorontsova, and Anton Konushin. Neural global illumination for inverse rendering. In 2023 IEEE International Conference on Image Processing (ICIP), pages 1580-1584. IEEE, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.663, + 0.471, + 0.729 + ], + "angle": 0, + "content": "[68] Sida Peng, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Representing volumetric videos as dynamic mlp maps. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4252-4262, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.73, + 0.471, + 0.756 + ], + "angle": 0, + "content": "[69] Eric Penner and Li Zhang. Soft 3d reconstruction for view synthesis. ACM TOG, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.757, + 0.471, + 0.796 + ], + "angle": 0, + "content": "[70] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.797, + 0.471, + 0.864 + ], + "angle": 0, + "content": "[71] Ruslan Rakhimov, Andrei-Timotei Ardelean, Victor Lempitsky, and Evgeny Burnaev. Npbg++: Accelerating neural point-based graphics. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15969-15979, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.865, + 0.471, + 0.892 + ], + "angle": 0, + "content": "[72] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.093, + 0.471, + 0.892 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.093, + 0.894, + 0.119 + ], + "angle": 0, + "content": "thousands of tiny mpls. In ICCV, pages 14335-14345, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.121, + 0.894, + 0.16 + ], + "angle": 0, + "content": "[73] Darius Rückert, Linus Franke, and Marc Stamminger. Adop: Approximate differentiable one-pixel point rendering. ACM Transactions on Graphics (ToG), 41(4):1-14, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.161, + 0.894, + 0.2 + ], + "angle": 0, + "content": "[74] Jason Sanders and Edward Kandrot. CUDA by example: an introduction to general-purpose GPU programming. Addison-Wesley Professional, 2010. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.201, + 0.894, + 0.254 + ], + "angle": 0, + "content": "[75] Ruizhi Shao, Zerong Zheng, Hanzhang Tu, Boning Liu, Hongwen Zhang, and Yebin Liu. Tensor4d: Efficient neural 4d decomposition for high-fidelity dynamic reconstruction and rendering. arXiv, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.255, + 0.894, + 0.294 + ], + "angle": 0, + "content": "[76] Meng-Li Shih, Shih-Yang Su, Johannes Kopf, and Jia-Bin Huang. 3d photography using context-aware layered depth inpainting. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.295, + 0.894, + 0.334 + ], + "angle": 0, + "content": "[77] Dave Shreiner et al. OpenGL programming guide: the official guide to learning OpenGL, versions 3.0 and 3.1. Pearson Education, 2009. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.335, + 0.894, + 0.375 + ], + "angle": 0, + "content": "[78] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.376, + 0.894, + 0.442 + ], + "angle": 0, + "content": "[79] Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems, 34:19313-19325, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.444, + 0.894, + 0.496 + ], + "angle": 0, + "content": "[80] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhöfer. Deepvoxels: Learning persistent 3d feature embeddings. In CVPR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.497, + 0.894, + 0.537 + ], + "angle": 0, + "content": "[81] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In NeurIPS, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.538, + 0.894, + 0.618 + ], + "angle": 0, + "content": "[82] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.62, + 0.894, + 0.687 + ], + "angle": 0, + "content": "[83] Pratul P Srinivasan, Richard Tucker, Jonathan T Barron, Ravi Ramamoorthi, Ren Ng, and Noah Snively. Pushing the boundaries of view extrapolation with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 175-184, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.688, + 0.894, + 0.769 + ], + "angle": 0, + "content": "[84] Zhuo Su, Lan Xu, Zerong Zheng, Tao Yu, Yebin Liu, and Lu Fang. Robustfusion: Human volumetric capture with data-driven visual cues using a rgbd camera. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part IV 16, pages 246-264. Springer, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.77, + 0.894, + 0.836 + ], + "angle": 0, + "content": "[85] Mohammed Suhail, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Light field neural rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8269-8279, June 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.837, + 0.894, + 0.891 + ], + "angle": 0, + "content": "[86] Richard Szeliski and Polina Golland. Stereo matching with transparency and matting. In Sixth International Conference on Computer Vision (IEEE Cat. No. 98CH36271), pages 517-524. IEEE, 1998. 2" + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.093, + 0.894, + 0.891 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20039" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "[87] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, and Huaping Liu. Mixed neural voxels for fast multi-view video synthesis. arXiv preprint arXiv:2212.00190, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.134, + 0.472, + 0.202 + ], + "angle": 0, + "content": "[88] Liao Wang, Qiang Hu, Qihan He, Ziyu Wang, Jingyi Yu, Tinne Tuytelaars, Lan Xu, and Minye Wu. Neural residual radiance fields for streamably free-viewpoint videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 76-87, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.202, + 0.472, + 0.284 + ], + "angle": 0, + "content": "[89] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.284, + 0.472, + 0.378 + ], + "angle": 0, + "content": "[90] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P Srinivasan, Howard Zhou, Jonathan T Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4690-4699, 2021. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.379, + 0.472, + 0.433 + ], + "angle": 0, + "content": "[91] Suttisak Wizadwongsa, Pakkapon Phongthawee, Jiraphon Yenphraphai, and Supasorn Suwajanakorn. Nex: Real-time view synthesis with neural basis expansion. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.433, + 0.472, + 0.488 + ], + "angle": 0, + "content": "[92] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Wang Xinggang. 4d gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.488, + 0.472, + 0.542 + ], + "angle": 0, + "content": "[93] Minye Wu, Yuehao Wang, Qiang Hu, and Jingyi Yu. Multiview neural human rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1682-1691, 2020. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.542, + 0.472, + 0.596 + ], + "angle": 0, + "content": "[94] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. arXiv preprint arXiv:2309.13101, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.596, + 0.472, + 0.649 + ], + "angle": 0, + "content": "[95] Zeyu Yang, Hongye Yang, Zijie Pan, Xiatian Zhu, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv 2310.10642, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.649, + 0.472, + 0.69 + ], + "angle": 0, + "content": "[96] Alex Yu, Sara Fridovich-Keil, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.69, + 0.472, + 0.758 + ], + "angle": 0, + "content": "[97] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5752-5761, 2021. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.758, + 0.472, + 0.798 + ], + "angle": 0, + "content": "[98] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.798, + 0.472, + 0.867 + ], + "angle": 0, + "content": "[99] Tao Yu, Zerong Zheng, Kaiwen Guo, Pengpeng Liu, Qionghai Dai, and Yebin Liu. Function4d: Real-time human volumetric capture from very sparse consumer rgbd sensors. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5746-5756, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.867, + 0.471, + 0.892 + ], + "angle": 0, + "content": "[100] Tao Yu, Zerong Zheng, Kaiwen Guo, Jianhui Zhao, Qionghai Dai, Hao Li, Gerard Pons-Moll, and Yebin Liu. Doublefu" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.892 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.092, + 0.894, + 0.133 + ], + "angle": 0, + "content": "sion: Real-time capture of human performances with inner body shapes from a single depth sensor. In CVPR, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.134, + 0.894, + 0.188 + ], + "angle": 0, + "content": "[101] Qiang Zhang, Seung-Hwan Baek, Szymon Rusinkiewicz, and Felix Heide. Differentiable point-based radiance fields for efficient view synthesis. In SIGGRAPH Asia 2022 Conference Papers, pages 1-12, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.188, + 0.894, + 0.256 + ], + "angle": 0, + "content": "[102] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.256, + 0.894, + 0.31 + ], + "angle": 0, + "content": "[103] C Lawrence Zitnick, Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High-quality video view interpolation using a layered representation. ACM TOG, 2004. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.092, + 0.894, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "20040" + } + ] +] \ No newline at end of file diff --git a/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/6d74ca33-515d-4b03-96e1-8cccfa68be60_origin.pdf b/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/6d74ca33-515d-4b03-96e1-8cccfa68be60_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f5e1e2fbf3b9ad46627b4d02732d44544d8c35dc --- /dev/null +++ b/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/6d74ca33-515d-4b03-96e1-8cccfa68be60_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15901be89dca723e014c8bf1273e2e525fe8a50cbb091f4d43519ada128ace74 +size 5149594 diff --git a/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/full.md b/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/full.md new file mode 100644 index 0000000000000000000000000000000000000000..2a8d3f169bb34771cc992df7fd832439c56945ba --- /dev/null +++ b/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/full.md @@ -0,0 +1,368 @@ +# 4K4D: Real-Time 4D View Synthesis at 4K Resolution + +Zhen Xu $^{1}$ Sida Peng $^{1}$ Haotong Lin $^{1}$ Guangzhao He $^{1}$ +Jiaming Sun $^{1}$ Yujun Shen $^{2}$ Hujun Bao $^{1}$ Xiaowei Zhou $^{1*}$ + +$^{1}$ Zhejiang University $^{2}$ Ant Group + +![](images/b0fb1ae180e4f2553cc3a444670a453b8a845be6541eb9f3574892ad7f2f6b5a.jpg) +Figure 1. Photorealistic and real-time rendering of dynamic 3D scenes. Our proposed method reconstructs a 4D neural representation from multi-view videos, which can be rendered at $1125 \times 1536$ resolution with a speed of over 200 FPS using an RTX 3090 GPU while maintaining state-of-the-art quality on the DNA-Rendering [12] dataset. It is also noteworthy that our method reaches over 80 FPS when rendering 4K images with an RTX 4090. Detailed performance under different resolutions using different GPUs can be found in Tab. 5. + +![](images/3f817075de57a429debe67895115d7c6df1e274d885f35bf8b436ebc8086815b.jpg) + +![](images/28cd712e6c35670581fbf72f353f01453419038cb2a31a7a8d9021b2a0f10d68.jpg) + +![](images/ed1b4d46da56e20fd18e50a6c13f84f93f52b87e7220afe789fbc560d6e54fcc.jpg) + +# Abstract + +This paper targets high-fidelity and real-time view synthesis of dynamic 3D scenes at $4K$ resolution. Recent methods on dynamic view synthesis have shown impressive rendering quality. However, their speed is still limited when rendering high-resolution images. To overcome this problem, we propose 4K4D, a 4D point cloud representation that supports hardware rasterization and network pre-computation to enable unprecedented rendering speed with a high rendering quality. Our representation is built on a 4D feature grid so that the points are naturally regularized and can be robustly optimized. In addition, we design a novel hybrid appearance model that significantly boosts the rendering quality while preserving efficiency. Moreover, we develop a differentiable depth peeling algorithm to effectively learn the proposed model from RGB videos. Experiments show that our representation can be rendered at over 400 FPS on the DNA-Rendering dataset at 1080p resolution and 80 FPS on the ENeRF-Outdoor dataset at $4K$ resolution using an RTX 4090 GPU, which is $30\times$ faster than previous methods and achieves the state-of-the-art rendering quality. Our project page is available at https://zju3dv.github.io/4k4d. + +# 1. Introduction + +Dynamic view synthesis aims to reconstruct dynamic 3D scenes from captured videos and create free-viewpoint and immersive virtual playback, which is a long-standing research problem in computer vision and computer graphics. Essential to the practicality of this technique is its ability to be rendered in real-time with high fidelity. Traditional methods [7, 13, 15, 16, 26, 61, 62, 84, 99, 100] represent dynamic 3D scenes as textured mesh sequences which can be rendered efficiently. However, high-quality mesh reconstruction requires complicated capture hardware and is limited to controlled environments. + +Recently, implicit neural representations [19, 42, 58] have shown great success in reconstructing dynamic 3D scenes from RGB videos via differentiable rendering. For example, Li et al. [42] model the target scene as a dynamic neural radiance field and leverage volume rendering [17] to synthesize images. Despite impressive view synthesis results, existing approaches typically require seconds or even minutes to render an image at $1080\mathrm{p}$ resolution due to the costly network evaluation, as discussed by Peng et al. [68]. Inspired by static view synthesis approaches [20, 33, 97], some dynamic view synthesis methods [2, 49, 68, 89] increase the rendering speed by decreasing either the + +network size or the number of network evaluations. With these strategies, such methods achieve over 40 FPS when rendering moderate-resolution images $(384 \times 512)$ [49, 68], but are still not fast enough to achieve real-time performance when rendering high-resolution images. For instance, when rendering 4K resolution images, their speed reduces to only 1 or 2 FPS [2, 49, 68]. + +In this paper, we propose a novel neural representation, named 4K4D, for modeling and rendering dynamic 3D scenes. As illustrated in Fig. 1, 4K4D significantly outperforms previous dynamic view synthesis approaches [19, 49] in terms of the rendering speed, while being competitive in the rendering quality. Our core innovation lies in a 4D point cloud representation and a hybrid appearance model. + +Specifically, for the dynamic scene, we obtain the coarse point cloud sequence using space carving [37] and model the position of each point as a learnable vector. A 4D feature grid is introduced for assigning a feature vector to each point, which is fed into MLP networks to predict the point's radius, density, and spherical harmonics (SH) coefficients [59]. The 4D feature grid naturally applies spatial regularization on the point clouds and makes the optimization more robust (Sec. 5.2). During inference, the point's radius, density and SH coefficients can be pre-computed, which eliminates network evaluations to achieve unprecedented rendering speed. Moreover, we develop a differentiable depth peeling algorithm that exploits the hardware rasterizer to further significantly accelerate the rendering. + +We empirically find that the image blending model [49] achieves higher rendering quality than the SH model used by 3DGS [33]. However, the image blending model of previous methods [48, 49, 90] requires slow network evaluations during inference, limiting their rendering speed. To alleviate this, we introduce a novel design where we make the image blending network independent of the viewing direction, so the network evaluation can be pre-computed and thereby boost the rendering speed. As a two-edged sword, this strategy makes the appearance model discrete along the viewing direction. This downside is compensated for by using another continuous SH model. + +To validate the effectiveness of the proposed pipeline, we evaluate 4K4D on multiple widely used datasets for multi-view dynamic novel view synthesis, including NHR [93], ENeRF-Outdoor [49], DNA-Rendering [12], and Neural3DV [41]. Extensive experiments show that 4K4D could not only be rendered orders of magnitude faster but also notably outperform the baselines in terms of rendering quality. With an RTX 4090 GPU, our method reaches 400 FPS on the DNA-Rendering dataset at $1080\mathrm{p}$ resolution and 80 FPS on the ENeRF-Outdoor dataset at 4K resolution. + +# 2. Related Work + +Traditional scene representations. In the domain of novel view synthesis, various approaches based on different representations have been proposed, including multi-view image-based methods [6, 8, 18, 31, 69, 103], multi-plane image representations [47, 56, 65, 83, 86, 86], light-field techniques [14, 21, 39] as well as explicit surface or voxel-based methods [5, 13, 15, 22, 44, 61, 62, 100]. The seminal work [13] utilizes depth sensors and multi-view stereo techniques to consolidate per-view depth information into a coherent mesh sequence, producing high-quality volumetric video. These methods require intricate hardware setups and studio arrangements, thus constraining their accessibility. + +Neural scene representations. Recently, implicit neural scene representations[3, 24, 27, 30, 32, 51, 52, 58, 76, 79-81, 85, 91] have attracted significant interest among researchers. NeRF[58] encodes the radiance fields of static scenes using coordinate-based Multi-Layer Perceptrons (MLP), achieving exceptional novel view synthesis quality. Building upon NeRF, a collection of studies [28, 42, 45, 63, 64, 70, 93] have made extensions to accommodate for dynamic scenes. Another line of studies [10, 46, 90, 98] has focused on integrating image features into the NeRF rendering pipeline. This approach is easily applicable to dynamic scenes, as multi-view videos can be directly decomposed into multiview images. However, NeRF-based approaches often suffer from substantial network evaluation costs during the volume rendering process, which significantly limits their rendering speed and thus hinders their practicality. + +Accelerating neural scene representations. To accelerate NeRF's rendering, multiple works propose to distill implicit MLP networks into explicit structures that offer fast query capabilities, including voxel grids [20, 25, 40, 60, 72, 96, 97], explicit surfaces [11, 23, 29, 36, 54, 67] and point-based representations [1, 33, 35, 38, 71, 73, 101]. These methods effectively reduce the cost or the number of NeRF's MLP evaluations required. Inspired by their success, several approaches [2, 9, 48, 49, 53, 68, 75, 82, 82, 87, 88] have explored the possibility of real-time dynamic view synthesis. HyperReel [2] employs a primitive prediction module to reduce the number of network evaluations, thereby achieving real-time speed at moderate resolutions. However, it should be noted that their rendering speed decreases significantly when rendering higher-resolution images. + +Gaussian Splatting. One notable advancement for accelerating NeRF is the development of 3D Gaussian Splatting (3DGS) [33] which introduces a differentiable Gaussian ellipsoids splatting algorithm for fast and differentiable volume rendering [4, 17]. By effectively eliminating the slow ray marching operation of NeRF with forward splatting and SH [59], they attain both high-fidelity and high-speed rendering. However, the storage cost of 3DGS limits its application + +![](images/8263b0399b19b14a758fd575b36e4ce00332f08d44187388974d28064e0dae2c.jpg) +Figure 2. Overview of our proposed pipeline. (a) By applying the space-carving algorithm [37], we extract the initial cloud sequence $\mathbf{x}, t$ of the target scene. A 4D feature grid [19] is predefined to assign a feature vector to each point, which is then fed into MLPs for the scene geometry and appearance. (b) The geometry model is based on the point location, radius, and density, which forms a semi-transparent point cloud. (c) The appearance model consists of a piece-wise constant IBR term $\mathbf{c}_{ibr}$ and a continuous SH model $\mathbf{c}_{sh}$ . (d) The proposed representation is learned from multi-view RGB videos through the differentiable depth peeling algorithm. + +![](images/7de42945a0f4e745cdc3461bc94436f18b6ef2629702323ceb0f5e8325f1b54b.jpg) + +![](images/329e908cc5e9c60be22f7d7ed333720940f31ab0bc5c81894b25a155989e7ae4.jpg) + +on dynamic scenes. In contrast, the 4D feature grid and image blending model of 4K4D could not only maintain similar rendering quality but also significantly reduce the storage cost for modeling dynamic scenes. Moreover, the simpler point cloud representation and the 4D feature grid regularization also make 4K4D less prone to overfitting training views than 3DGS. Some recent concurrent works [43, 55, 92, 94, 95] have also reported real-time rendering speeds by incorporating temporal correspondence or time-dependency into 3DGS. However, these methods either do not show results on datasets with large and fast motions [43, 95] (like NHR [93]) or only report real-time speed at moderate resolution $(800\times 800$ [92] and $640\times 480$ [55]). In contrast, 4K4D is capable of real-time rendering even at 4K resolution while concurrently maintaining state-of-the-art view-synthesis quality on large-motion data. + +# 3. Proposed Approach + +Given a multi-view video capturing a dynamic 3D scene, our goal is to reconstruct the target scene and perform novel view synthesis in real time. To this end, we extract coarse point clouds of the scene using the space-carving algorithm [37] (Sec. 4) and build a point cloud-based neural scene representation, which can be robustly learned from input videos and enable the hardware-accelerated rendering. + +The overview of the proposed model is presented in Fig. 2. In this section, we first describe how to represent the geometry and appearance of dynamic scenes based on point clouds and neural networks (Sec. 3.1). Then, we develop a differentiable depth peeling algorithm for rendering our representation (Sec. 3.2), which is supported by the hardware rasterizer, thereby significantly improving the rendering speed. Finally, we discuss how to optimize the proposed model on input RGB videos (Sec. 3.3). + +# 3.1. Modeling Dynamic Scenes with Point Clouds + +4D embedding. Given the coarse point clouds of the target scene, we represent its dynamic geometry and appearance using neural networks and feature grids. Specifically, our method first defines six feature planes $\theta_{xy}, \theta_{xz}, \theta_{yz}, \theta_{tx}, \theta_{ty}$ , and $\theta_{tz}$ . To assign a feature vector $\mathbf{f}$ to any point $\mathbf{x}$ at frame $t$ , we adopt the strategy of K-Planes [19] to model a 4D feature field $\Theta(\mathbf{x}, t)$ using these six planes: + +$$ +\begin{array}{l} \mathbf {f} = \Theta (\mathbf {x}, t) = \theta_ {x y} (x, y) \oplus \theta_ {x z} (x, z) \oplus \theta_ {y z} (y, z) \oplus \\ \theta_ {t x} (t, x) \oplus \theta_ {t y} (t, y) \oplus \theta_ {t z} (t, z), \tag {1} \\ \end{array} +$$ + +where $\mathbf{x} = (x,y,z)$ is the input point, and $\oplus$ indicates the concatenation operator. Please refer to K-Planes [19] for more implementation details. + +Geometry model. Based on coarse point clouds, the dynamic scene geometry is represented by learning three entries on each point: position $\mathbf{p} \in R^3$ , radius $r \in R$ , and density $\sigma \in R$ . Using these point entries, we calculate the volume density of space point $\mathbf{x}$ with respect to an image pixel $\mathbf{u}$ for the volume rendering, which will be described in Sec. 3.2. The point position $\mathbf{p}$ is modeled as an estimizable vector. The radius $r$ and density $\sigma$ are predicted by feeding the feature vector $\mathbf{f}$ in Eq. (1) to an MLP network. + +Appearance model. As illustrated in Fig. 2c, we use the image blending technique and the spherical harmonics (SH) model [59, 97] to build a hybrid appearance model, where the image blending technique represents the discrete view-dependent appearance $\mathbf{c}_{ibr}$ and the SH model represents the continuous view-dependent appearance $\mathbf{c}_{sh}$ . For point $\mathbf{x}$ at frame $t$ , its color with viewing direction $\mathbf{d}$ is: + +$$ +\mathbf {c} (\mathbf {x}, t, \mathbf {d}) = \mathbf {c} _ {i b r} (\mathbf {x}, t, \mathbf {d}) + \mathbf {c} _ {s h} (\mathbf {s}, \mathbf {d}), \tag {2} +$$ + +where $s$ means SH coefficients at point $x$ . + +The discrete view-dependent appearance $\mathbf{c}_{ibr}$ is inferred based on input images. Specifically, for a point $\mathbf{x}$ , we first project it into the input image to retrieve the corresponding RGB color $\mathbf{c}_{img}^i$ . Then, to blend input RGB colors, we calculate the corresponding blending weight $w^i$ based on the point coordinate and the input image. Note that the blending weight is independent from the viewing direction. Next, to achieve the view-dependent effect, we select the $N'$ nearest input views according to the viewing direction. Finally, the color $\mathbf{c}_{ibr}$ is computed as $\sum_{i=1}^{N'} w^i \mathbf{c}_{img}^i$ . Because the $N'$ input views are obtained through the nearest neighbor retrieval, the $\mathbf{c}_{ibr}$ is inevitably discrete along the viewing direction. To achieve the continuous view-dependent effect, we append the fine-level color $\mathbf{c}_{sh}$ represented by the SH model, as shown in Fig. 2c. + +In practice, our method regresses the SH coefficients $\mathbf{s}$ by passing the point feature $\mathbf{f}$ in Eq. (1) into an MLP network. To predict the blending weight $w^{i}$ in the image blending model $\mathbf{c}_{ibr}$ , we first project point $\mathbf{x}$ onto the input image to retrieve the image feature $\mathbf{f}_{img}^{i}$ , and then concatenate it with the point feature $\mathbf{f}$ , which is fed into another MLP network to predict the blending weight. The image feature $\mathbf{f}_{img}^{i}$ is extracted using a 2D CNN network. + +Discussion. Our appearance model is the key to achieving the low-storage, high-fidelity, and real-time view synthesis of dynamic scenes. There are three alternative ways to represent the dynamic appearance, but they cannot perform on par with our model. 1) Defining explicit SH coefficients on each point, as in 3D Gaussian splatting [33]. When the degree of SH coefficients is high and the amount of points of dynamic scenes is large, this model's size could be too big to train on a consumer GPU. 2) MLP-based SH model. Using an MLP to predict SH coefficients of each point can effectively decrease the model size. However, our experiments found that MLP-based SH model struggles to render high-quality images (Sec. 5.2). 3) Continuous view-dependent image blending model, as in ENeRF [49]. We found that representing the appearance with the image blending model exhibits better rendering quality than only with the MLP-based SH model. However, the color network in ENeRF takes the viewing direction as input and thus cannot be easily pre-computed, limiting the rendering speed during inference. + +In contrast to these three methods, our appearance model combines a discrete image blending model $\mathbf{c}_{ibr}$ with a continuous SH model $\mathbf{c}_{sh}$ . The image blending model $\mathbf{c}_{ibr}$ boosts the rendering performance. In addition, it supports the pre-computation, as its network does not take the viewing direction as input. The SH model $\mathbf{c}_{sh}$ enables the view-dependent effect for any viewing direction. During training, our model represents the scene appearance using networks, so its model size is reasonable. During inference, we pre-compute the network outputs to achieve the real-time rendering, which will be described in Sec. 3.4. + +# 3.2. Differentiable Depth Peeling + +Our proposed dynamic scene representation can be rendered into images by performing volume rendering [17] on rasterized points. This forward process is much faster than NeRF's backward ray-marching operation [57] since it requires no network evaluation and explicit sampling. The volume rendering equation requires the color and transparency values to be integrated in order [4], thus we utilize the depth-peeling algorithm for acquiring the corresponding ordered points for pixels. Thanks to the point cloud representation, we can leverage the hardware rasterizer to significantly speed up the depth peeling and blending process. Moreover, it is easy to make this rendering process differentiable, enabling us to learn our model from input RGB videos. + +We develop a custom shader to implement the depth peeling algorithm that consists of $K$ rendering passes. Consider a particular image pixel $\mathbf{u}$ . In the first pass, our method first uses the hardware rasterizer to render point clouds onto the image, which assigns the closest-to-camera point $\mathbf{x}_0$ to the pixel $\mathbf{u}$ . Denote the depth of point $\mathbf{x}_0$ as $t_0$ . Subsequently, in the $k$ -th rendering pass, all points with depth value $t_k$ smaller than the recorded depth of the previous pass $t_{k-1}$ are discarded, thereby resulting in the $k$ -th closest-to-camera point $\mathbf{x}_k$ for the pixel $\mathbf{u}$ . Discarding closer points is implemented in our custom shader, so it still supports the hardware rasterization. After $K$ rendering passes, pixel $\mathbf{u}$ has a set of sorted points $\{\mathbf{x}_k | k = 1, \dots, K\}$ . + +Based on the sorted points, we use the volume rendering technique to synthesize the color of pixel $\mathbf{u}$ . The densities of these points for pixel $\mathbf{u}$ are defined based on the distance between the projected point and pixel $\mathbf{u}$ on the 2D image: + +$$ +\alpha (\mathbf {u}, \mathbf {x}) = \sigma \cdot \max (1 - \frac {| | \pi (\mathbf {x}) - \mathbf {u} | | _ {2} ^ {2}}{r ^ {2}}, 0), \qquad (3) +$$ + +where $\pi$ is the camera projection function. $\sigma$ and $r$ are the density and radius of point $\mathbf{x}$ , which are described in Sec. 3.1. Intuitively, Eq. (3) defines a semi-transparent point representation where the density is the highest around the center and quadratically decreases along its radius. During training, we implement the projection function $\pi$ using the PyTorch [66], so Eq. (3) is naturally differentiable. During inference, we leverage the hardware rasterization process to efficiently obtain the distance $\| \pi (\mathbf{x}) - \mathbf{u}\| _2^2$ , which is implemented using OpenGL [77]. + +Denote the density of point $\mathbf{x}_k$ as $\alpha_{k}$ . The color of pixel $\mathbf{u}$ from the volume rendering is formulated as: + +$$ +C (\mathbf {u}) = \sum_ {k = 1} ^ {K} T _ {k} \alpha_ {k} \mathbf {c} _ {k}, \text {w h e r e} T _ {k} = \prod_ {j = 1} ^ {k - 1} (1 - \alpha_ {j}), \tag {4} +$$ + +where $\mathbf{c}_k$ is the color of point $\mathbf{x}_k$ , as described in Eq. (2). + +# 3.3. Training + +Given the rendered pixel color $C(\mathbf{u})$ , we compare it with the ground-truth pixel color $C_{gt}(\mathbf{u})$ to optimize our model in an end-to-end fashion using the following loss function: + +$$ +L _ {i m g} = \sum_ {\mathbf {u} \in \mathcal {U}} | | C (\mathbf {u}) - C _ {g t} (\mathbf {u}) | | _ {2} ^ {2}, \tag {5} +$$ + +where $\mathcal{U}$ is the set of image pixels. In addition to the MSE loss $L_{img}$ , we also apply the perceptual loss $L_{lpips}$ [102]. + +$$ +L _ {l p i p s} = \left\| \Phi (I) - \Phi \left(I _ {g t}\right) \right\| _ {1}, \tag {6} +$$ + +where $\Phi$ is the perceptual function (a VGG16 network) and $I, I_{gt}$ are the rendered and ground-truth images, respectively. The perceptual loss [102] computes the difference in image features extracted from the VGG model [78]. Our experiments in Sec. 5.2 show that it effectively improves the perceived quality of the rendered image. + +To regularize the optimization process of our proposed representation, we additionally apply mask supervision to dynamic regions of the target scene. We solely render point clouds of dynamic regions to obtain their masks, where the pixel value is obtained by: + +$$ +M (\mathbf {u}) = \sum_ {k = 1} ^ {K} T _ {k} \alpha_ {k}, \text {w h e r e} T _ {k} = \prod_ {j = 1} ^ {k - 1} (1 - \alpha_ {j}). \tag {7} +$$ + +The mask loss is defined as: + +$$ +L _ {m s k} = - \sum_ {\mathbf {u} \in \mathcal {U} ^ {\prime}} | | M (\mathbf {u}) - M _ {g t} (\mathbf {u}) | | _ {2} ^ {2}, \tag {8} +$$ + +where $\mathbf{U}'$ means the set of pixels of the rendered mask, and $M_{gt}$ is the ground-truth mask of 2D dynamic regions. This effectively regularizes the optimization of the geometry of dynamic regions by confining it to the visual hulls. + +The final loss function is defined as + +$$ +L = L _ {i m g} + \lambda_ {l p i p s} L _ {l p i p s} + \lambda_ {m s k} L _ {m s k}, \tag {9} +$$ + +where $\lambda_{lpips}$ and $\lambda_{msk}$ are hyperparameters controlling weights of correspondings losses. + +# 3.4. Inference + +After training, we apply a few acceleration techniques to boost the rendering speed of our model. First, we precompute the point location $\mathbf{p}$ , radius $r$ , density $\sigma$ , SH coefficients $\mathbf{s}$ and color blending weights $w_{i}$ before inference, which are stored at the main memory. During rendering, these properties are asynchronously streamed onto the graphics card, overlapping rasterization with memory copy to achieve an optimal rendering speed [74, 77]. After applying this technique, the runtime computation is reduced to only a + +depth peeling evaluation (Sec. 3.2) and a spherical harmonics evaluation (Eq. (2)). Second, we convert the model from 32-bit floats to 16-bits for efficient memory access, which increases FPS by 20 and leads to no visible performance loss. Third, the number of rendering passes $K$ for the differentiable depth peeling algorithm is reduced from 15 to 12, also leading to a 20 FPS speedup with no visual quality change. Detailed analyses of rendering speed can be found in Sec. 5.2 and the supplementary material. + +# 4. Implementation Details + +**Optimization.** 4K4D is trained using the PyTorch framework [66]. Using the Adam optimizer [34] with a learning rate $5e^{-3}$ , our models typically converge after 800k iterations for a sequence length of 200 frames, which takes around 24 hours on a single RTX 4090 GPU. Specifically, the learning rate of point positions is set to $1e^{-5}$ , and the regularization loss weights $\lambda_{lpips}$ and $\lambda_{msk}$ are set to $1e^{-3}$ . During training, the number of passes $K$ for the differentiable depth peeling is set to 15, and the number of nearest input views $N'$ is set to 4. The rendering speed of our method is reported on an RTX 3090 GPU for the experiments in Sec. 5 unless otherwise stated. + +Initialization of point clouds. We leverage existing multi-view reconstruction methods to initialize the point clouds. For dynamic regions, we use segmentation methods [50] to obtain their masks in input images and utilize the space carving algorithm [37] to extract their coarse geometry. For static background regions, we leverage foreground masks to compute the mask-weighted average of background pixels along all frames, producing background images without the foreground content. Then, an Instant-NGP [60] model is trained on these images, from which we obtain the initial point clouds. After the initialization, the number of points for the dynamic regions is typically 250k per frame, and the static background regions typically consist of 300k points. + +# 5. Experiments + +Datasets. We train and evaluate our method 4K4D on multiple widely used multi-view datasets, including DNA-Rendering [12], ENeRF-Outdoor [49] and NHR [93]. DNA-Rendering [12] records 10-second clips of dynamic humans and objects at 15 FPS using 4K and 2K cameras with 60 views. This dataset is very challenging due to the complex clothing and fast motions. We conduct experiments on 4 sequences of DNA-Rendering, with $90\%$ of the views as training set and the rest as evaluation set. ENeRF-Outdoor [49] records multiple dynamic humans and objects in an outdoor environment at 30FPS using 1080p cameras. We select three 100-frame sequences with 6 different actors (2 for each sequence) holding objects for evaluation. This dataset is difficult for dynamic view synthesis in that not + +![](images/da01e3d1ddadfb94305be6ee9665ddfa42d9879fee5e3749e0013cae36d2789b.jpg) + +![](images/d6a4a05671ed372f6cc4eaf2356e03e5851e55f31dfed36e5afa7ab9cfdc5987.jpg) + +![](images/8448b6205d5f3750fdd32862e4c2e07339d1e31c341f9093e0b692bfa213231b.jpg) + +![](images/54c759ca7544129ece803df48055b02a5160ce6d41db1949ca24e86e9ce82588.jpg) + +![](images/8f56c783ddcdeebf5901ee9125abb11118f008d803cf8d5646b153d7fc16d626.jpg) +Ground Truth + +![](images/7c4fcdde4f5179c67115d00467d021fbb193d34092ad9b99e4921e7228f1ffdc.jpg) +Ours (141.7 FPS) + +![](images/9ca5a62f0b9b3a0fc51c8298f082a99c38a2c2599c9a05a58470a6e6a8cd5746.jpg) +ENeRF (11.3 FPS) +Figure 3. Qualitative comparison on the ENeRF-Outdoor [49] dataset that contains $960 \times 540$ images. Our method achieves much higher rendering quality and can be rendered $14 \times$ faster than ENeRF[49]. More dynamic results can be found in the supplementary video. + +![](images/7ce9783f5b032071f106eee239d5ee3eb79e70e82d662012b281522fb2d7278c.jpg) +KPlanes (1.4 FPS) + +only are there multiple moving humans and objects, but the background is also dynamic due to cast shadows. More details can be found in the supplementary. + +# 5.1. Comparison Experiments + +Comparison on DNA-Rendering [12]. Qualitative and quantitative comparisons on DNA-Rendering [12] are shown in Fig. 4 and Tabs. 1 and 3 respectively. As evident in Tab. 1, our method renders $30\mathrm{x}$ faster than the SOTA real-time dynamic view synthesis method ENeRF [49] with superior quality. Even when compared with concurrent work [48], our method still achieves $13\mathrm{x}$ speedup and produces consistently higher quality images. As shown in Fig. 4, KPlanes [19] could not recover the highly detailed appearance and geometry of the 4D dynamic scene. Other image-based methods [48, 49, 90] produce high-quality appearance. However, they tend to produce blurry results around occlusions and edges, leading to degradation of the visual quality while maintaining interactive framerate at best. When compared with 3DGS [33] on the first frame of each sequence, our method achieves a much better storage efficiency $(50\times)$ thanks to our compact 4D feature grid and image blending model. Moreover, due to the simplicity of our point-based representation, our method is less prone to overfit the training views. More details of the comparison with 3DGS can be found in the supplementary material. + +Comparison on ENeRF-Outdoor [49]. Fig. 3 and Tabs. 2 and 3 provides qualitative and quantitative results on the ENeRF-Outdoor [49] dataset. Even on the challenging ENeRF-Outdoor dataset with multiple actors and the back + +Table 1. Quantitative comparison on the DNA-Rendering [12] dataset. Image resolutions are ${1024} \times {1224}$ and ${1125} \times {1536}$ . Metrics are averaged over all scenes. Green and yellow cell colors indicate the best and the second best results, respectively. + +
PSNR ↑SSIM ↑LPIPS ↓FPS
ENeRF [49]28.1080.9720.0566.011
IBRNet [90]27.8440.9670.0810.100
KPlanes [19]27.4520.9520.1180.640
Im4D [48]28.9910.9730.06215.360
Ours31.1730.9760.055203.610
+ +Table 2. Quantitative comparison on the ENeRF-Ourdoor [49] dataset. This dataset includes ${960} \times {540}$ images. Green and yellow cell colors indicate the best and the second-best results, respectively. + +
PSNR ↑SSIM ↑LPIPS ↓FPS
ENeRF [49]25.4520.8090.27311.309
IBRNet [90]24.9660.9290.1720.140
KPlanes [19]21.3100.7350.4541.370
Ours25.8150.8980.147141.665
+ +ground, our method still achieves notably better results while rendering at over 140 FPS. ENeRF [49] produces blurry results on this challenging dataset, and the rendering results of IBRNet [90] contain black artifacts around the edges of the images as shown in Fig. 3. K-Planse [19] fails to reconstruct the dynamic humans and varying background regions. 3DGS [33] not only introduces much higher storage cost than our method $(45\times)$ , but also faces even more pronounced overfitting problem with smaller number of views (18 for ENeRF-Outdoor). As evident in Tab. 3 and the + +![](images/ccffc7287480a9bc28cc74c65427161fb4779d7b7afd299255e044e104a274e3.jpg) +Figure 4. Qualitative comparison on the DNA-Rendering [12] dataset that contains $1024 \times 1224$ (and $1125 \times 1536$ ) images. Our method can produce high-fidelity images at over 200 FPS while other competitors fail to produce high-quality results for highly dynamic scenes. + +supplementary material, the overfitting severely degrades the rendering quality. Their rendering speed is slower than ours due to excessive point count. More details of the comparison with 3DGS are present in the supplementary material. + +# 5.2. Ablation Studies + +We perform ablation studies on the proposed components on the 150-frame 0013_01 sequence of the DNA-Rendering [12] dataset. Our method can be rendered at over 200 FPS with state-of-the-art quality and maintains a only 2MB per frame storage overhead. More detailed rendering speed analysis and breakdown and storage cost analysis can be found in the supplementary material. + +Ablation study on the 4D embedding. The "w/o f" variant removes the proposed 4D embedding (Sec. 3.1) module and replaces it with a per-frame and per-point estimizable position, radius, density, and scale. As shown in Fig. 5 and Tab. 4, the "w/o f" variant produces blurry and noisy geometry without the 4D embedding $\Theta$ , which leads to the inferior rendering quality. + +Ablation study on the hybrid appearance model. The "w/o $\mathbf{c}_{ibr}$ " variant removes $\mathbf{c}_{ibr}$ in the appearance formulation Eq. (2), which not only leads to less details on the recovered appearance but also significantly impedes the quality of the geometry. Adding an additional degree for the SH + +Table 3. Quantitative comparison on the first frame of all sequences of DNA-Rendering [12] (1024×1224 (and 1125×1536) images) and ENeRF-Outdoor [49] (960×540 images). Metrics are averaged for each dataset. "Storage" indicates the disk file size of the trained models (including source images for our method). + +
DatasetMethodPSNRLPIPSFPSStorageTraining
DNA-Rendering3DGS [33]31.160.049113.2224 MB5min
Ours31.870.046241.74.7 MB15min
ENeRF-Outdoor3DGS [33]21.630.34988.4715 MB10min
Ours26.540.145148.616.0 MB30min
+ +coefficients does not lead to a significant performance change (PSNR 30.129 vs. 30.259). Comparatively, our proposed method produces high-fidelity rendering with better details. A visualization of the view-dependent effect produced by $\mathbf{c}_{sh}$ can be found in the supplementary material. + +Ablation study on loss functions. As shown in Tab. 4, removing the $L_{lpips}$ term not only reduces the perceptual quality (LPIPS score) but also leads to the degradation of other performance metrics. For the highly dynamic DNA-Rendering [12] dataset, the mask loss $L_{msk}$ helps with regularizing the optimization of the dynamic geometry. + +Rendering speed on different GPUs and resolutions. We additionally report the rendering speed of our method on different hardware (RTX 3060, RTX 3090, and RTX 4090) + +![](images/26be9865e872a28d203df62580367e5c0a2e13957b3ebcc4f9fe1d7cb443b183.jpg) +Figure 5. Ablation studies on the 0013_01 sequence of DNA-Rendering [12]. Removing our proposed components leads to noisy geometry and blurry appearance. Our method produces high-fidelity results with perceptually accurate shapes and colors. See Sec. 5.2 for more details. + +with different resolutions (720p, 1080p, and 4K (2160p)) in Tab. 5. The rendering speed reported here contains the overhead of the interactive GUI. 4K4D achieves real-time rendering speed even when rendering 4K (2160p) images on commodity hardware as shown in the table. More real-time rendering demos can be found in the supplementary video. + +# 6. Conclusion and Discussion + +In this paper, we provide a neural point cloud-based representation, 4K4D, for real-time rendering of dynamic 3D scenes at 4K resolution. We build 4K4D upon a 4D feature grid to naturally regularize the points and develop a novel hybrid appearance model for high-quality rendering. Furthermore, we develop a differentiable depth peeling algorithm that utilizes the hardware rasterization pipeline to effectively optimize and efficiently render the proposed model. In our experiments, we demonstrate that 4K4D not only achieves state-of-the-art rendering quality but also exhibits a more than $30 \times$ increase in rendering speed (over 200FPS at 1080p on an RTX 3090 GPU). + +However, our method still has some limitations. For one, 4K4D cannot produce correspondences of points across frames, which are important for some downstream tasks. Moreover, the storage cost for 4K4D increases linearly with the number of video frames, so our method has difficulty in modeling long volumetric videos. How to model correspondences and reduce the storage cost for long videos could be two interesting problems for future works. Moreover, the rendering quality of our method also depends on the resolution of input images. While our method achieves real-time rendering at 4K resolution, 4K-quality rendering can only be achieved with sufficient input resolution. + +Table 4. Ablation studies on the 150-frame 0013_01 sequence of the DNA-Rendering dataset [12]. "w/o f" indicates replacing the 4D embedding with a per-frame and per-pointizable position, radius, density, and scale. See Sec. 5.2 for more detailed descriptions for the abbreviations. + +
PSNR ↑SSIM ↑LPIPS ↓Model Size
w/o f29.7790.9670.0571304.0 MiB
w/o cibr30.2590.9730.054225.0 MiB
w/o csh31.9460.9810.040225.0 MiB
w/o Lpips31.6610.9790.063225.0 MiB
w/o Lmsk29.1150.9650.073225.0 MiB
Ours31.9900.9820.040225.0 MiB
+ +Table 5. Rendering speed on different GPUs and resolutions. The results are recorded on the first frame of the 0013_01 sequence of DNA-Rendering [12] and the actor1_4 sequence of ENeRF-Outdoor [49] with the interactive GUI. Resolutions are set to 720p $(720\times 1280)$ , 1080p $(1080\times 1920)$ , and 4K $(2160\times 3840)$ . Even with the overhead of the interactive GUI ("w/ GUI"), our method still achieves unprecedented rendering speed. More real-time rendering results can be found in the supplementary video. + +
DatasetRes.RTX 3060RTX 3090RTX 4090
DNA-Rendering [12] w/ GUI720p173.8 FPS246.9 FPS431.0 FPS
1080p138.7 FPS233.1 FPS409.8 FPS
4K90.0 FPS147.4 FPS288.8 FPS
ENeRF-Outdoor [49] w/ GUI720p90.5 FPS130.5 FPS351.5 FPS
1080p66.1 FPS103.6 FPS249.7 FPS
4K25.1 FPS47.2 FPS85.1 FPS
+ +# Acknowledgement + +The authors would like to acknowledge support from NSFC (No. 62172364) and Information Technology Center and State Key Lab of CAD&CG, Zhejiang University. + +# References + +[1] Kara-Ali Aliev, Artem Sevastopolsky, Maria Kolos, Dmitry Ulyanov, and Victor Lempitsky. Neural point-based graphics. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXII 16, pages 696-712. Springer, 2020. 2 +[2] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023. 1, 2 +[3] Benjamin Attal, Jia-Bin Huang, Michael Zollhöfer, Johannes Kopf, and Changil Kim. Learning neural light fields with ray-space embedding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19819-19829, 2022. 2 +[4] Louis Bavoil and Kevin Myers. Order independent transparency with dual depth peeling. NVIDIA OpenGL SDK, 1:12, 2008. 2, 4 +[5] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2 +[6] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques, SIGGRAPH '01, page 425-432, New York, NY, USA, 2001. Association for Computing Machinery. 2 +[7] Dan Casas, Marco Volino, John Collomosse, and Adrian Hilton. 4d video textures for interactive character appearance. In Computer Graphics Forum, pages 371-380. Wiley Online Library, 2014. 1 +[8] Gaurav Chaurasia, Sylvain Duchene, Olga Sorkine-Hornung, and George Drettakis. Depth synthesis and local warps for plausible image-based navigation. ACM TOG, 2013. 2 +[9] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. arXiv, 2022. 2 +[10] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In ICCV, 2021. 2 +[11] Zhiqin Chen, Thomas Funkhouser, Peter Hedman, and Andrea Tagliasacchi. Mobilenerf: Exploiting the polygon rasterization pipeline for efficient neural field rendering on mobile architectures. arXiv preprint arXiv:2208.00277, 2022. 2 +[12] Wei Cheng, Ruixiang Chen, Wanqi Yin, Siming Fan, Keyu Chen, Honglin He, Huiwen Luo, Zhongang Cai, Jingbo Wang, Yang Gao, et al. Dna-rendering: A diverse neural actor repository for high-fidelity human-centric rendering. arXiv preprint arXiv:2307.10173, 2023. 1, 2, 5, 6, 7, 8 +[13] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (ToG), + +34(4):1-13,2015.1,2 +[14] Abe Davis, Marc Levoy, and Fredo Durand. Unstructured light fields. In Computer Graphics Forum, pages 305-314. Wiley Online Library, 2012. 2 +[15] Mingsong Dou, Sameh Khamis, Yury Degtyarev, Philip Davidson, Sean Ryan Fanello, Adarsh Kowdle, Sergio Orts Escolano, Christoph Rhemann, David Kim, Jonathan Taylor, et al. Fusion4d: Real-time performance capture of challenging scenes. ACM TOG, 2016. 1, 2 +[16] Mingsong Dou, Jonathan Taylor, Henry Fuchs, Andrew Fitzgibbon, and Shahram Izadi. 3d scanning deformable objects with a single rgbd sensor. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 493-501, 2015. 1 +[17] Robert A Drebin, Loren Carpenter, and Pat Hanrahan. Volume rendering. ACM Siggraph Computer Graphics, 22(4):65-74, 1988. 1, 2, 4 +[18] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. Deepstereo: Learning to predict new views from the world's imagery. In CVPR, June 2016. 2 +[19] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbaek Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12479–12488, 2023. 1, 2, 3, 6 +[20] Stephan J Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien Valentin. Fastnerf: High-fidelity neural rendering at 200fps. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14346-14355, 2021. 1, 2 +[21] Steven J Gortler, Radek Grzesczuk, Richard Szeliski, and Michael F Cohen. The lumigraph. In SIGGRAPH, 1996. 2 +[22] Kaiwen Guo, Peter Lincoln, Philip Davidson, Jay Busch, Xueming Yu, Matt Whalen, Geoff Harvey, Sergio Orts-Escolano, Rohit Pandey, Jason Dourgarian, et al. The relightables: Volumetric performance capture of humans with realistic relighting. ACM Transactions on Graphics (ToG), 38(6):1-19, 2019. 2 +[23] Jon Hasselgren, Nikolai Hofmann, and Jacob Munkberg. Shape, light, and material decomposition from images using monte carlo rendering and denoising. Advances in Neural Information Processing Systems, 35:22856-22869, 2022. 2 +[24] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM TOG, 2018. 2 +[25] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In ICCV, 2021. 2 +[26] Anna Hilsmann, Philipp Fechteler, Wieland Morgenstern, Wolfgang Paier, Ingo Feldmann, Oliver Schreer, and Peter Eisert. Going beyond free viewpoint: creating animatable volumetric video of human performances. IET Computer Vision, pages 350-358, 2020. 1 +[27] Tao Hu, Tao Yu, Zerong Zheng, He Zhang, Yebin Liu, and Matthias Zwicker. Hvtr: Hybrid volumetric-textural rendering for human avatars. In 2022 International Conference on 3D Vision (3DV), pages 197-208. IEEE, 2022. + +2 +[28] Mustafa Isik, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. arXiv preprint arXiv:2305.06356, 2023. 2 +[29] Shubhendu Jena, Franck Multon, and Adnane Boukhayma. Neural mesh-based graphics. In European Conference on Computer Vision, pages 739-757. Springer, 2022. 2 +[30] Yue Jiang, Dantong Ji, Zhizhong Han, and Matthias Zwicker. Sdfdiff: Differentiable rendering of signed distance fields for 3d shape optimization. In CVPR, 2020. 2 +[31] Nima Khademi Kalantari, Ting-Chun Wang, and Ravi Ramamoorthi. Learning-based view synthesis for light field cameras. ACM TOG, 2016. 2 +[32] Petr Kellnhofer, Lars C Jebe, Andrew Jones, Ryan Spicer, Kari Pulli, and Gordon Wetzstein. Neural lumigraph rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4287-4297, 2021. 2 +[33] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (TOG), 42(4):1-14, 2023. 1, 2, 4, 6, 7 +[34] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5 +[35] Georgios Kopanas, Julien Philip, Thomas Leimkuhler, and George Drettakis. Point-based neural rendering with perview optimization. In Computer Graphics Forum, volume 40, pages 29-43. Wiley Online Library, 2021. 2 +[36] Jonas Kulhanek and Torsten Sattler. Tetra-nerf: Representing neural radiance fields using tetrahedra. arXiv preprint arXiv:2304.09987, 2023. 2 +[37] Kiriakos N Kutulakos and Steven M Seitz. A theory of shape by space carving. International journal of computer vision, 38:199-218, 2000. 2, 3, 5 +[38] Christoph Lassner and Michael Zollhofer. Pulsar: Efficient sphere-based neural rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1440-1449, 2021. 2 +[39] Marc Levoy and Pat Hanrahan. Light field rendering. In SIGGRAPH, 1996. 2 +[40] Ruilong Li, Hang Gao, Matthew Tancik, and Angjoo Kanazawa. Nerfacc: Efficient sampling accelerates nerfs. arXiv preprint arXiv:2305.04966, 2023. 2 +[41] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, and Zhaoyang Lv. Neural 3d video synthesis. arXiv preprint arXiv:2103.02597, 2021. 2 +[42] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 1, 2 +[43] Zhan Li, Zhang Chen, Zhong Li, and Yi Xu. Spacetime + +gaussian feature splatting for real-time dynamic view synthesis. arXiv preprint arXiv:2312.16812, 2023. 3 +[44] Zhong Li, Yu Ji, Wei Yang, Jinwei Ye, and Jingyi Yu. Robust 3d human motion reconstruction via dynamic template construction. In 2017 International Conference on 3D Vision (3DV), pages 496-505. IEEE, 2017. 2 +[45] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In CVPR, 2021. 2 +[46] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4273-4284, 2023. 2 +[47] Zhengqi Li, Wenqi Xian, Abe Davis, and Noah Snavely. Crowdsampling the plenoptic function. In ECCV, 2020. 2 +[48] Haotong Lin, Sida Peng, Zhen Xu, Tao Xie, Xingyi He, Hujun Bao, and Xiaowei Zhou. High-fidelity and real-time novel view synthesis for dynamic scenes. In SIGGRAPH Asia Conference Proceedings, 2023. 2, 6 +[49] Haotong Lin, Sida Peng, Zhen Xu, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Efficient neural radiance fields for interactive free-viewpoint video. In SIGGRAPH Asia Conference Proceedings, 2022. 1, 2, 4, 5, 6, 7, 8 +[50] Shanchuan Lin, Linjie Yang, Imran Saleemi, and Soumyadip Sengupta. Robust high-resolution video matting with temporal guidance. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 238-247, 2022. 5 +[51] Shichen Liu, Shunsuke Saito, Weikai Chen, and Hao Li. Learning to infer implicit surfaces without 3d supervision. NeurIPS, 2019. 2 +[52] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. In SIGGRAPH, 2019. 2 +[53] Stephen Lombardi, Tomas Simon, Gabriel Schwartz, Michael Zollhoefer, Yaser Sheikh, and Jason Saragih. Mixture of volumetric primitives for efficient neural rendering. ACM Transactions on Graphics (TOG), 40(4):1-13, 2021. 2 +[54] Fan Lu, Yan Xu, Guang Chen, Hongsheng Li, Kwan-Yee Lin, and Changjun Jiang. Urban radiance field representation with deformable neural mesh primitives. arXiv preprint arXiv:2307.10776, 2023. 2 +[55] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. arXiv preprint arXiv:2308.09713, 2023. 3 +[56] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM TOG, 2019. 2 +[57] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. 2020. 4 +[58] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: + +Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 1, 2 +[59] Claus Müller. Spherical harmonics, volume 17. Springer, 2006. 2, 3 +[60] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4):1-15, 2022. 2, 5 +[61] Richard A Newcombe, Dieter Fox, and Steven M Seitz. Dynamicfusion: Reconstruction and tracking of non-rigid scenes in real-time. In CVPR, 2015. 1, 2 +[62] Sergio Orts-Escolano, Christoph Rhemann, Sean Fanello, Wayne Chang, Adarsh Kowdle, Yury Degtyarev, David Kim, Philip L Davidson, Sameh Khamis, Mingsong Dou, et al. Holoportation: Virtual 3d teleportation in real-time. In UIST, 2016. 1, 2 +[63] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In ICCV, 2021. 2 +[64] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. arXiv preprint arXiv:2106.13228, 2021. 2 +[65] Steven Parker, Peter Shirley, and Brian Smits. Single sample soft shadows. Technical report, Technical Report UUCS-98-019, Computer Science Department, University of Utah, 1998. 2 +[66] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In NeurIPS, 2019. 4, 5 +[67] Nikolay Patakin, Dmitry Senushkin, Anna Vorontsova, and Anton Konushin. Neural global illumination for inverse rendering. In 2023 IEEE International Conference on Image Processing (ICIP), pages 1580-1584. IEEE, 2023. 2 +[68] Sida Peng, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Representing volumetric videos as dynamic mlp maps. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4252-4262, 2023. 1, 2 +[69] Eric Penner and Li Zhang. Soft 3d reconstruction for view synthesis. ACM TOG, 2017. 2 +[70] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In CVPR, 2021. 2 +[71] Ruslan Rakhimov, Andrei-Timotei Ardelean, Victor Lempitsky, and Evgeny Burnaev. Npbg++: Accelerating neural point-based graphics. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15969-15979, 2022. 2 +[72] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with + +thousands of tiny mpls. In ICCV, pages 14335-14345, 2021. 2 +[73] Darius Rückert, Linus Franke, and Marc Stamminger. Adop: Approximate differentiable one-pixel point rendering. ACM Transactions on Graphics (ToG), 41(4):1-14, 2022. 2 +[74] Jason Sanders and Edward Kandrot. CUDA by example: an introduction to general-purpose GPU programming. Addison-Wesley Professional, 2010. 5 +[75] Ruizhi Shao, Zerong Zheng, Hanzhang Tu, Boning Liu, Hongwen Zhang, and Yebin Liu. Tensor4d: Efficient neural 4d decomposition for high-fidelity dynamic reconstruction and rendering. arXiv, 2022. 2 +[76] Meng-Li Shih, Shih-Yang Su, Johannes Kopf, and Jia-Bin Huang. 3d photography using context-aware layered depth inpainting. In CVPR, 2020. 2 +[77] Dave Shreiner et al. OpenGL programming guide: the official guide to learning OpenGL, versions 3.0 and 3.1. Pearson Education, 2009. 4, 5 +[78] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 5 +[79] Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems, 34:19313-19325, 2021. 2 +[80] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhöfer. Deepvoxels: Learning persistent 3d feature embeddings. In CVPR, 2019. +[81] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In NeurIPS, 2019. 2 +[82] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 2 +[83] Pratul P Srinivasan, Richard Tucker, Jonathan T Barron, Ravi Ramamoorthi, Ren Ng, and Noah Snively. Pushing the boundaries of view extrapolation with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 175-184, 2019. 2 +[84] Zhuo Su, Lan Xu, Zerong Zheng, Tao Yu, Yebin Liu, and Lu Fang. Robustfusion: Human volumetric capture with data-driven visual cues using a rgbd camera. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part IV 16, pages 246-264. Springer, 2020. 1 +[85] Mohammed Suhail, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Light field neural rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8269-8279, June 2022. 2 +[86] Richard Szeliski and Polina Golland. Stereo matching with transparency and matting. In Sixth International Conference on Computer Vision (IEEE Cat. No. 98CH36271), pages 517-524. IEEE, 1998. 2 + +[87] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, and Huaping Liu. Mixed neural voxels for fast multi-view video synthesis. arXiv preprint arXiv:2212.00190, 2022. 2 +[88] Liao Wang, Qiang Hu, Qihan He, Ziyu Wang, Jingyi Yu, Tinne Tuytelaars, Lan Xu, and Minye Wu. Neural residual radiance fields for streamably free-viewpoint videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 76-87, 2023. 2 +[89] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022. 1 +[90] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P Srinivasan, Howard Zhou, Jonathan T Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4690-4699, 2021. 2, 6 +[91] Suttisak Wizadwongsa, Pakkapon Phongthawee, Jiraphon Yenphraphai, and Supasorn Suwajanakorn. Nex: Real-time view synthesis with neural basis expansion. In CVPR, 2021. 2 +[92] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Wang Xinggang. 4d gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 3 +[93] Minye Wu, Yuehao Wang, Qiang Hu, and Jingyi Yu. Multiview neural human rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1682-1691, 2020. 2, 3, 5 +[94] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. arXiv preprint arXiv:2309.13101, 2023. 3 +[95] Zeyu Yang, Hongye Yang, Zijie Pan, Xiatian Zhu, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv 2310.10642, 2023. 3 +[96] Alex Yu, Sara Fridovich-Keil, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. CVPR, 2022. 2 +[97] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5752-5761, 2021. 1, 2, 3 +[98] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2 +[99] Tao Yu, Zerong Zheng, Kaiwen Guo, Pengpeng Liu, Qionghai Dai, and Yebin Liu. Function4d: Real-time human volumetric capture from very sparse consumer rgbd sensors. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5746-5756, 2021. 1 +[100] Tao Yu, Zerong Zheng, Kaiwen Guo, Jianhui Zhao, Qionghai Dai, Hao Li, Gerard Pons-Moll, and Yebin Liu. Doublefu + +sion: Real-time capture of human performances with inner body shapes from a single depth sensor. In CVPR, 2018. 1, 2 +[101] Qiang Zhang, Seung-Hwan Baek, Szymon Rusinkiewicz, and Felix Heide. Differentiable point-based radiance fields for efficient view synthesis. In SIGGRAPH Asia 2022 Conference Papers, pages 1-12, 2022. 2 +[102] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5 +[103] C Lawrence Zitnick, Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High-quality video view interpolation using a layered representation. ACM TOG, 2004. 2 \ No newline at end of file diff --git a/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/images.zip b/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..d4e52c658a41d3d4f9c127f72f810177d54dd48a --- /dev/null +++ b/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b5c6d348848616e68e58bf4239ff188cc0abd2a1d3882186aeefad24864a84c +size 664951 diff --git a/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/layout.json b/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..8c36501eaaa68228568f3219cca35f4c064a1c1d --- /dev/null +++ b/2024/4K4D_ Real-Time 4D View Synthesis at 4K Resolution/layout.json @@ -0,0 +1,11453 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 129, + 103, + 465, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 103, + 465, + 120 + ], + "spans": [ + { + "bbox": [ + 129, + 103, + 465, + 120 + ], + "type": "text", + "content": "4K4D: Real-Time 4D View Synthesis at 4K Resolution" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "spans": [ + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "text", + "content": "Zhen Xu" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "text", + "content": " Sida Peng" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "text", + "content": " Haotong Lin" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "text", + "content": " Guangzhao He" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "text", + "content": " \nJiaming Sun" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "text", + "content": " Yujun Shen" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "text", + "content": " Hujun Bao" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "text", + "content": " Xiaowei Zhou" + }, + { + "bbox": [ + 143, + 133, + 449, + 165 + ], + "type": "inline_equation", + "content": "^{1*}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 203, + 171, + 388, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 171, + 388, + 187 + ], + "spans": [ + { + "bbox": [ + 203, + 171, + 388, + 187 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 203, + 171, + 388, + 187 + ], + "type": "text", + "content": "Zhejiang University " + }, + { + "bbox": [ + 203, + 171, + 388, + 187 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 203, + 171, + 388, + 187 + ], + "type": "text", + "content": "Ant Group" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 47, + 198, + 165, + 350 + ], + "blocks": [ + { + "bbox": [ + 47, + 198, + 165, + 350 + ], + "lines": [ + { + "bbox": [ + 47, + 198, + 165, + 350 + ], + "spans": [ + { + "bbox": [ + 47, + 198, + 165, + 350 + ], + "type": "image", + "image_path": "b0fb1ae180e4f2553cc3a444670a453b8a845be6541eb9f3574892ad7f2f6b5a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 357, + 547, + 403 + ], + "lines": [ + { + "bbox": [ + 45, + 357, + 547, + 403 + ], + "spans": [ + { + "bbox": [ + 45, + 357, + 547, + 403 + ], + "type": "text", + "content": "Figure 1. Photorealistic and real-time rendering of dynamic 3D scenes. Our proposed method reconstructs a 4D neural representation from multi-view videos, which can be rendered at " + }, + { + "bbox": [ + 45, + 357, + 547, + 403 + ], + "type": "inline_equation", + "content": "1125 \\times 1536" + }, + { + "bbox": [ + 45, + 357, + 547, + 403 + ], + "type": "text", + "content": " resolution with a speed of over 200 FPS using an RTX 3090 GPU while maintaining state-of-the-art quality on the DNA-Rendering [12] dataset. It is also noteworthy that our method reaches over 80 FPS when rendering 4K images with an RTX 4090. Detailed performance under different resolutions using different GPUs can be found in Tab. 5." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 176, + 198, + 293, + 350 + ], + "blocks": [ + { + "bbox": [ + 176, + 198, + 293, + 350 + ], + "lines": [ + { + "bbox": [ + 176, + 198, + 293, + 350 + ], + "spans": [ + { + "bbox": [ + 176, + 198, + 293, + 350 + ], + "type": "image", + "image_path": "3f817075de57a429debe67895115d7c6df1e274d885f35bf8b436ebc8086815b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 304, + 200, + 419, + 349 + ], + "blocks": [ + { + "bbox": [ + 304, + 200, + 419, + 349 + ], + "lines": [ + { + "bbox": [ + 304, + 200, + 419, + 349 + ], + "spans": [ + { + "bbox": [ + 304, + 200, + 419, + 349 + ], + "type": "image", + "image_path": "28cd712e6c35670581fbf72f353f01453419038cb2a31a7a8d9021b2a0f10d68.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 432, + 200, + 545, + 350 + ], + "blocks": [ + { + "bbox": [ + 432, + 200, + 545, + 350 + ], + "lines": [ + { + "bbox": [ + 432, + 200, + 545, + 350 + ], + "spans": [ + { + "bbox": [ + 432, + 200, + 545, + 350 + ], + "type": "image", + "image_path": "ed1b4d46da56e20fd18e50a6c13f84f93f52b87e7220afe789fbc560d6e54fcc.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 143, + 414, + 192, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 414, + 192, + 426 + ], + "spans": [ + { + "bbox": [ + 143, + 414, + 192, + 426 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 436, + 290, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 436, + 290, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 436, + 290, + 687 + ], + "type": "text", + "content": "This paper targets high-fidelity and real-time view synthesis of dynamic 3D scenes at " + }, + { + "bbox": [ + 46, + 436, + 290, + 687 + ], + "type": "inline_equation", + "content": "4K" + }, + { + "bbox": [ + 46, + 436, + 290, + 687 + ], + "type": "text", + "content": " resolution. Recent methods on dynamic view synthesis have shown impressive rendering quality. However, their speed is still limited when rendering high-resolution images. To overcome this problem, we propose 4K4D, a 4D point cloud representation that supports hardware rasterization and network pre-computation to enable unprecedented rendering speed with a high rendering quality. Our representation is built on a 4D feature grid so that the points are naturally regularized and can be robustly optimized. In addition, we design a novel hybrid appearance model that significantly boosts the rendering quality while preserving efficiency. Moreover, we develop a differentiable depth peeling algorithm to effectively learn the proposed model from RGB videos. Experiments show that our representation can be rendered at over 400 FPS on the DNA-Rendering dataset at 1080p resolution and 80 FPS on the ENeRF-Outdoor dataset at " + }, + { + "bbox": [ + 46, + 436, + 290, + 687 + ], + "type": "inline_equation", + "content": "4K" + }, + { + "bbox": [ + 46, + 436, + 290, + 687 + ], + "type": "text", + "content": " resolution using an RTX 4090 GPU, which is " + }, + { + "bbox": [ + 46, + 436, + 290, + 687 + ], + "type": "inline_equation", + "content": "30\\times" + }, + { + "bbox": [ + 46, + 436, + 290, + 687 + ], + "type": "text", + "content": " faster than previous methods and achieves the state-of-the-art rendering quality. Our project page is available at https://zju3dv.github.io/4k4d." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 414, + 386, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 414, + 386, + 426 + ], + "spans": [ + { + "bbox": [ + 307, + 414, + 386, + 426 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 436, + 547, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 436, + 547, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 436, + 547, + 567 + ], + "type": "text", + "content": "Dynamic view synthesis aims to reconstruct dynamic 3D scenes from captured videos and create free-viewpoint and immersive virtual playback, which is a long-standing research problem in computer vision and computer graphics. Essential to the practicality of this technique is its ability to be rendered in real-time with high fidelity. Traditional methods [7, 13, 15, 16, 26, 61, 62, 84, 99, 100] represent dynamic 3D scenes as textured mesh sequences which can be rendered efficiently. However, high-quality mesh reconstruction requires complicated capture hardware and is limited to controlled environments." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 570, + 548, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 548, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 548, + 713 + ], + "type": "text", + "content": "Recently, implicit neural representations [19, 42, 58] have shown great success in reconstructing dynamic 3D scenes from RGB videos via differentiable rendering. For example, Li et al. [42] model the target scene as a dynamic neural radiance field and leverage volume rendering [17] to synthesize images. Despite impressive view synthesis results, existing approaches typically require seconds or even minutes to render an image at " + }, + { + "bbox": [ + 304, + 570, + 548, + 713 + ], + "type": "inline_equation", + "content": "1080\\mathrm{p}" + }, + { + "bbox": [ + 304, + 570, + 548, + 713 + ], + "type": "text", + "content": " resolution due to the costly network evaluation, as discussed by Peng et al. [68]. Inspired by static view synthesis approaches [20, 33, 97], some dynamic view synthesis methods [2, 49, 68, 89] increase the rendering speed by decreasing either the" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "type": "text", + "content": "The authors from Zhejiang University are affiliated with the State Key Lab of CAD&CG. *Corresponding author: Xiaowei Zhou." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20029" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "content": "network size or the number of network evaluations. With these strategies, such methods achieve over 40 FPS when rendering moderate-resolution images " + }, + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "inline_equation", + "content": "(384 \\times 512)" + }, + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "content": " [49, 68], but are still not fast enough to achieve real-time performance when rendering high-resolution images. For instance, when rendering 4K resolution images, their speed reduces to only 1 or 2 FPS [2, 49, 68]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 167, + 288, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 167, + 288, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 167, + 288, + 251 + ], + "type": "text", + "content": "In this paper, we propose a novel neural representation, named 4K4D, for modeling and rendering dynamic 3D scenes. As illustrated in Fig. 1, 4K4D significantly outperforms previous dynamic view synthesis approaches [19, 49] in terms of the rendering speed, while being competitive in the rendering quality. Our core innovation lies in a 4D point cloud representation and a hybrid appearance model." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 262, + 288, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 262, + 288, + 429 + ], + "spans": [ + { + "bbox": [ + 46, + 262, + 288, + 429 + ], + "type": "text", + "content": "Specifically, for the dynamic scene, we obtain the coarse point cloud sequence using space carving [37] and model the position of each point as a learnable vector. A 4D feature grid is introduced for assigning a feature vector to each point, which is fed into MLP networks to predict the point's radius, density, and spherical harmonics (SH) coefficients [59]. The 4D feature grid naturally applies spatial regularization on the point clouds and makes the optimization more robust (Sec. 5.2). During inference, the point's radius, density and SH coefficients can be pre-computed, which eliminates network evaluations to achieve unprecedented rendering speed. Moreover, we develop a differentiable depth peeling algorithm that exploits the hardware rasterizer to further significantly accelerate the rendering." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 440, + 288, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 440, + 288, + 583 + ], + "spans": [ + { + "bbox": [ + 46, + 440, + 288, + 583 + ], + "type": "text", + "content": "We empirically find that the image blending model [49] achieves higher rendering quality than the SH model used by 3DGS [33]. However, the image blending model of previous methods [48, 49, 90] requires slow network evaluations during inference, limiting their rendering speed. To alleviate this, we introduce a novel design where we make the image blending network independent of the viewing direction, so the network evaluation can be pre-computed and thereby boost the rendering speed. As a two-edged sword, this strategy makes the appearance model discrete along the viewing direction. This downside is compensated for by using another continuous SH model." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "text", + "content": "To validate the effectiveness of the proposed pipeline, we evaluate 4K4D on multiple widely used datasets for multi-view dynamic novel view synthesis, including NHR [93], ENeRF-Outdoor [49], DNA-Rendering [12], and Neural3DV [41]. Extensive experiments show that 4K4D could not only be rendered orders of magnitude faster but also notably outperform the baselines in terms of rendering quality. With an RTX 4090 GPU, our method reaches 400 FPS on the DNA-Rendering dataset at " + }, + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "inline_equation", + "content": "1080\\mathrm{p}" + }, + { + "bbox": [ + 46, + 594, + 288, + 713 + ], + "type": "text", + "content": " resolution and 80 FPS on the ENeRF-Outdoor dataset at 4K resolution." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 71, + 392, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 71, + 392, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 392, + 84 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 93, + 547, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 93, + 547, + 237 + ], + "spans": [ + { + "bbox": [ + 304, + 93, + 547, + 237 + ], + "type": "text", + "content": "Traditional scene representations. In the domain of novel view synthesis, various approaches based on different representations have been proposed, including multi-view image-based methods [6, 8, 18, 31, 69, 103], multi-plane image representations [47, 56, 65, 83, 86, 86], light-field techniques [14, 21, 39] as well as explicit surface or voxel-based methods [5, 13, 15, 22, 44, 61, 62, 100]. The seminal work [13] utilizes depth sensors and multi-view stereo techniques to consolidate per-view depth information into a coherent mesh sequence, producing high-quality volumetric video. These methods require intricate hardware setups and studio arrangements, thus constraining their accessibility." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 239, + 548, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 239, + 548, + 431 + ], + "spans": [ + { + "bbox": [ + 304, + 239, + 548, + 431 + ], + "type": "text", + "content": "Neural scene representations. Recently, implicit neural scene representations[3, 24, 27, 30, 32, 51, 52, 58, 76, 79-81, 85, 91] have attracted significant interest among researchers. NeRF[58] encodes the radiance fields of static scenes using coordinate-based Multi-Layer Perceptrons (MLP), achieving exceptional novel view synthesis quality. Building upon NeRF, a collection of studies [28, 42, 45, 63, 64, 70, 93] have made extensions to accommodate for dynamic scenes. Another line of studies [10, 46, 90, 98] has focused on integrating image features into the NeRF rendering pipeline. This approach is easily applicable to dynamic scenes, as multi-view videos can be directly decomposed into multiview images. However, NeRF-based approaches often suffer from substantial network evaluation costs during the volume rendering process, which significantly limits their rendering speed and thus hinders their practicality." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 436, + 547, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 436, + 547, + 615 + ], + "spans": [ + { + "bbox": [ + 304, + 436, + 547, + 615 + ], + "type": "text", + "content": "Accelerating neural scene representations. To accelerate NeRF's rendering, multiple works propose to distill implicit MLP networks into explicit structures that offer fast query capabilities, including voxel grids [20, 25, 40, 60, 72, 96, 97], explicit surfaces [11, 23, 29, 36, 54, 67] and point-based representations [1, 33, 35, 38, 71, 73, 101]. These methods effectively reduce the cost or the number of NeRF's MLP evaluations required. Inspired by their success, several approaches [2, 9, 48, 49, 53, 68, 75, 82, 82, 87, 88] have explored the possibility of real-time dynamic view synthesis. HyperReel [2] employs a primitive prediction module to reduce the number of network evaluations, thereby achieving real-time speed at moderate resolutions. However, it should be noted that their rendering speed decreases significantly when rendering higher-resolution images." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 617, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 548, + 715 + ], + "type": "text", + "content": "Gaussian Splatting. One notable advancement for accelerating NeRF is the development of 3D Gaussian Splatting (3DGS) [33] which introduces a differentiable Gaussian ellipsoids splatting algorithm for fast and differentiable volume rendering [4, 17]. By effectively eliminating the slow ray marching operation of NeRF with forward splatting and SH [59], they attain both high-fidelity and high-speed rendering. However, the storage cost of 3DGS limits its application" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20030" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 200, + 218 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 200, + 218 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 200, + 218 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 200, + 218 + ], + "type": "image", + "image_path": "8263b0399b19b14a758fd575b36e4ce00332f08d44187388974d28064e0dae2c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 222, + 547, + 278 + ], + "lines": [ + { + "bbox": [ + 46, + 222, + 547, + 278 + ], + "spans": [ + { + "bbox": [ + 46, + 222, + 547, + 278 + ], + "type": "text", + "content": "Figure 2. Overview of our proposed pipeline. (a) By applying the space-carving algorithm [37], we extract the initial cloud sequence " + }, + { + "bbox": [ + 46, + 222, + 547, + 278 + ], + "type": "inline_equation", + "content": "\\mathbf{x}, t" + }, + { + "bbox": [ + 46, + 222, + 547, + 278 + ], + "type": "text", + "content": " of the target scene. A 4D feature grid [19] is predefined to assign a feature vector to each point, which is then fed into MLPs for the scene geometry and appearance. (b) The geometry model is based on the point location, radius, and density, which forms a semi-transparent point cloud. (c) The appearance model consists of a piece-wise constant IBR term " + }, + { + "bbox": [ + 46, + 222, + 547, + 278 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ibr}" + }, + { + "bbox": [ + 46, + 222, + 547, + 278 + ], + "type": "text", + "content": " and a continuous SH model " + }, + { + "bbox": [ + 46, + 222, + 547, + 278 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{sh}" + }, + { + "bbox": [ + 46, + 222, + 547, + 278 + ], + "type": "text", + "content": ". (d) The proposed representation is learned from multi-view RGB videos through the differentiable depth peeling algorithm." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 202, + 70, + 403, + 220 + ], + "blocks": [ + { + "bbox": [ + 202, + 70, + 403, + 220 + ], + "lines": [ + { + "bbox": [ + 202, + 70, + 403, + 220 + ], + "spans": [ + { + "bbox": [ + 202, + 70, + 403, + 220 + ], + "type": "image", + "image_path": "7de42945a0f4e745cdc3461bc94436f18b6ef2629702323ceb0f5e8325f1b54b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 408, + 70, + 545, + 220 + ], + "blocks": [ + { + "bbox": [ + 408, + 70, + 545, + 220 + ], + "lines": [ + { + "bbox": [ + 408, + 70, + 545, + 220 + ], + "spans": [ + { + "bbox": [ + 408, + 70, + 545, + 220 + ], + "type": "image", + "image_path": "329e908cc5e9c60be22f7d7ed333720940f31ab0bc5c81894b25a155989e7ae4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 287, + 289, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 287, + 289, + 479 + ], + "spans": [ + { + "bbox": [ + 46, + 287, + 289, + 479 + ], + "type": "text", + "content": "on dynamic scenes. In contrast, the 4D feature grid and image blending model of 4K4D could not only maintain similar rendering quality but also significantly reduce the storage cost for modeling dynamic scenes. Moreover, the simpler point cloud representation and the 4D feature grid regularization also make 4K4D less prone to overfitting training views than 3DGS. Some recent concurrent works [43, 55, 92, 94, 95] have also reported real-time rendering speeds by incorporating temporal correspondence or time-dependency into 3DGS. However, these methods either do not show results on datasets with large and fast motions [43, 95] (like NHR [93]) or only report real-time speed at moderate resolution " + }, + { + "bbox": [ + 46, + 287, + 289, + 479 + ], + "type": "inline_equation", + "content": "(800\\times 800" + }, + { + "bbox": [ + 46, + 287, + 289, + 479 + ], + "type": "text", + "content": " [92] and " + }, + { + "bbox": [ + 46, + 287, + 289, + 479 + ], + "type": "inline_equation", + "content": "640\\times 480" + }, + { + "bbox": [ + 46, + 287, + 289, + 479 + ], + "type": "text", + "content": " [55]). In contrast, 4K4D is capable of real-time rendering even at 4K resolution while concurrently maintaining state-of-the-art view-synthesis quality on large-motion data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 497, + 164, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 497, + 164, + 511 + ], + "spans": [ + { + "bbox": [ + 47, + 497, + 164, + 511 + ], + "type": "text", + "content": "3. Proposed Approach" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 519, + 287, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 519, + 287, + 604 + ], + "spans": [ + { + "bbox": [ + 46, + 519, + 287, + 604 + ], + "type": "text", + "content": "Given a multi-view video capturing a dynamic 3D scene, our goal is to reconstruct the target scene and perform novel view synthesis in real time. To this end, we extract coarse point clouds of the scene using the space-carving algorithm [37] (Sec. 4) and build a point cloud-based neural scene representation, which can be robustly learned from input videos and enable the hardware-accelerated rendering." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "type": "text", + "content": "The overview of the proposed model is presented in Fig. 2. In this section, we first describe how to represent the geometry and appearance of dynamic scenes based on point clouds and neural networks (Sec. 3.1). Then, we develop a differentiable depth peeling algorithm for rendering our representation (Sec. 3.2), which is supported by the hardware rasterizer, thereby significantly improving the rendering speed. Finally, we discuss how to optimize the proposed model on input RGB videos (Sec. 3.3)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 286, + 538, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 286, + 538, + 300 + ], + "spans": [ + { + "bbox": [ + 305, + 286, + 538, + 300 + ], + "type": "text", + "content": "3.1. Modeling Dynamic Scenes with Point Clouds" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "spans": [ + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "text", + "content": "4D embedding. Given the coarse point clouds of the target scene, we represent its dynamic geometry and appearance using neural networks and feature grids. Specifically, our method first defines six feature planes " + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "inline_equation", + "content": "\\theta_{xy}, \\theta_{xz}, \\theta_{yz}, \\theta_{tx}, \\theta_{ty}" + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "inline_equation", + "content": "\\theta_{tz}" + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "text", + "content": ". To assign a feature vector " + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "text", + "content": " to any point " + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "text", + "content": " at frame " + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "text", + "content": ", we adopt the strategy of K-Planes [19] to model a 4D feature field " + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "inline_equation", + "content": "\\Theta(\\mathbf{x}, t)" + }, + { + "bbox": [ + 304, + 305, + 547, + 389 + ], + "type": "text", + "content": " using these six planes:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 315, + 401, + 545, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 401, + 545, + 430 + ], + "spans": [ + { + "bbox": [ + 315, + 401, + 545, + 430 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {f} = \\Theta (\\mathbf {x}, t) = \\theta_ {x y} (x, y) \\oplus \\theta_ {x z} (x, z) \\oplus \\theta_ {y z} (y, z) \\oplus \\\\ \\theta_ {t x} (t, x) \\oplus \\theta_ {t y} (t, y) \\oplus \\theta_ {t z} (t, z), \\tag {1} \\\\ \\end{array}", + "image_path": "d55d197beefbce2b43842f2d8fb971c63cbc2af3f257a0f5e454d167a95fd53d.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 438, + 547, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 438, + 547, + 474 + ], + "spans": [ + { + "bbox": [ + 304, + 438, + 547, + 474 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 438, + 547, + 474 + ], + "type": "inline_equation", + "content": "\\mathbf{x} = (x,y,z)" + }, + { + "bbox": [ + 304, + 438, + 547, + 474 + ], + "type": "text", + "content": " is the input point, and " + }, + { + "bbox": [ + 304, + 438, + 547, + 474 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 304, + 438, + 547, + 474 + ], + "type": "text", + "content": " indicates the concatenation operator. Please refer to K-Planes [19] for more implementation details." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "spans": [ + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "text", + "content": "Geometry model. Based on coarse point clouds, the dynamic scene geometry is represented by learning three entries on each point: position " + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "inline_equation", + "content": "\\mathbf{p} \\in R^3" + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "text", + "content": ", radius " + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "inline_equation", + "content": "r \\in R" + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "text", + "content": ", and density " + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "inline_equation", + "content": "\\sigma \\in R" + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "text", + "content": ". Using these point entries, we calculate the volume density of space point " + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "text", + "content": " with respect to an image pixel " + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "text", + "content": " for the volume rendering, which will be described in Sec. 3.2. The point position " + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "inline_equation", + "content": "\\mathbf{p}" + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "text", + "content": " is modeled as an estimizable vector. The radius " + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "text", + "content": " and density " + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "text", + "content": " are predicted by feeding the feature vector " + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 304, + 476, + 545, + 584 + ], + "type": "text", + "content": " in Eq. (1) to an MLP network." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "spans": [ + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "type": "text", + "content": "Appearance model. As illustrated in Fig. 2c, we use the image blending technique and the spherical harmonics (SH) model [59, 97] to build a hybrid appearance model, where the image blending technique represents the discrete view-dependent appearance " + }, + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ibr}" + }, + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "type": "text", + "content": " and the SH model represents the continuous view-dependent appearance " + }, + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{sh}" + }, + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "type": "text", + "content": ". For point " + }, + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "type": "text", + "content": " at frame " + }, + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "type": "text", + "content": ", its color with viewing direction " + }, + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "type": "inline_equation", + "content": "\\mathbf{d}" + }, + { + "bbox": [ + 304, + 586, + 547, + 670 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 348, + 679, + 545, + 693 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 679, + 545, + 693 + ], + "spans": [ + { + "bbox": [ + 348, + 679, + 545, + 693 + ], + "type": "interline_equation", + "content": "\\mathbf {c} (\\mathbf {x}, t, \\mathbf {d}) = \\mathbf {c} _ {i b r} (\\mathbf {x}, t, \\mathbf {d}) + \\mathbf {c} _ {s h} (\\mathbf {s}, \\mathbf {d}), \\tag {2}", + "image_path": "3f906c2c3dd61e7c98808b5a034f8ea509bd91813572caa26cbf757fa45981a6.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 701, + 476, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 701, + 476, + 714 + ], + "spans": [ + { + "bbox": [ + 306, + 701, + 476, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 701, + 476, + 714 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 306, + 701, + 476, + 714 + ], + "type": "text", + "content": " means SH coefficients at point " + }, + { + "bbox": [ + 306, + 701, + 476, + 714 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 306, + 701, + 476, + 714 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20031" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "text", + "content": "The discrete view-dependent appearance " + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ibr}" + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "text", + "content": " is inferred based on input images. Specifically, for a point " + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "text", + "content": ", we first project it into the input image to retrieve the corresponding RGB color " + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{img}^i" + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "text", + "content": ". Then, to blend input RGB colors, we calculate the corresponding blending weight " + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "inline_equation", + "content": "w^i" + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "text", + "content": " based on the point coordinate and the input image. Note that the blending weight is independent from the viewing direction. Next, to achieve the view-dependent effect, we select the " + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "inline_equation", + "content": "N'" + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "text", + "content": " nearest input views according to the viewing direction. Finally, the color " + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ibr}" + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "text", + "content": " is computed as " + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{N'} w^i \\mathbf{c}_{img}^i" + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "text", + "content": ". Because the " + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "inline_equation", + "content": "N'" + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "text", + "content": " input views are obtained through the nearest neighbor retrieval, the " + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ibr}" + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "text", + "content": " is inevitably discrete along the viewing direction. To achieve the continuous view-dependent effect, we append the fine-level color " + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{sh}" + }, + { + "bbox": [ + 46, + 72, + 289, + 254 + ], + "type": "text", + "content": " represented by the SH model, as shown in Fig. 2c." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "text", + "content": "In practice, our method regresses the SH coefficients " + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "text", + "content": " by passing the point feature " + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "text", + "content": " in Eq. (1) into an MLP network. To predict the blending weight " + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "inline_equation", + "content": "w^{i}" + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "text", + "content": " in the image blending model " + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ibr}" + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "text", + "content": ", we first project point " + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "text", + "content": " onto the input image to retrieve the image feature " + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{img}^{i}" + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "text", + "content": ", and then concatenate it with the point feature " + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "text", + "content": ", which is fed into another MLP network to predict the blending weight. The image feature " + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{img}^{i}" + }, + { + "bbox": [ + 46, + 255, + 289, + 352 + ], + "type": "text", + "content": " is extracted using a 2D CNN network." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 354, + 289, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 354, + 289, + 582 + ], + "spans": [ + { + "bbox": [ + 46, + 354, + 289, + 582 + ], + "type": "text", + "content": "Discussion. Our appearance model is the key to achieving the low-storage, high-fidelity, and real-time view synthesis of dynamic scenes. There are three alternative ways to represent the dynamic appearance, but they cannot perform on par with our model. 1) Defining explicit SH coefficients on each point, as in 3D Gaussian splatting [33]. When the degree of SH coefficients is high and the amount of points of dynamic scenes is large, this model's size could be too big to train on a consumer GPU. 2) MLP-based SH model. Using an MLP to predict SH coefficients of each point can effectively decrease the model size. However, our experiments found that MLP-based SH model struggles to render high-quality images (Sec. 5.2). 3) Continuous view-dependent image blending model, as in ENeRF [49]. We found that representing the appearance with the image blending model exhibits better rendering quality than only with the MLP-based SH model. However, the color network in ENeRF takes the viewing direction as input and thus cannot be easily pre-computed, limiting the rendering speed during inference." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": "In contrast to these three methods, our appearance model combines a discrete image blending model " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ibr}" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": " with a continuous SH model " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{sh}" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": ". The image blending model " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ibr}" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": " boosts the rendering performance. In addition, it supports the pre-computation, as its network does not take the viewing direction as input. The SH model " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{sh}" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": " enables the view-dependent effect for any viewing direction. During training, our model represents the scene appearance using networks, so its model size is reasonable. During inference, we pre-compute the network outputs to achieve the real-time rendering, which will be described in Sec. 3.4." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 305, + 72, + 463, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 463, + 85 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 463, + 85 + ], + "type": "text", + "content": "3.2. Differentiable Depth Peeling" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 91, + 547, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 547, + 247 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 547, + 247 + ], + "type": "text", + "content": "Our proposed dynamic scene representation can be rendered into images by performing volume rendering [17] on rasterized points. This forward process is much faster than NeRF's backward ray-marching operation [57] since it requires no network evaluation and explicit sampling. The volume rendering equation requires the color and transparency values to be integrated in order [4], thus we utilize the depth-peeling algorithm for acquiring the corresponding ordered points for pixels. Thanks to the point cloud representation, we can leverage the hardware rasterizer to significantly speed up the depth peeling and blending process. Moreover, it is easy to make this rendering process differentiable, enabling us to learn our model from input RGB videos." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "spans": [ + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": "We develop a custom shader to implement the depth peeling algorithm that consists of " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": " rendering passes. Consider a particular image pixel " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": ". In the first pass, our method first uses the hardware rasterizer to render point clouds onto the image, which assigns the closest-to-camera point " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": " to the pixel " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": ". Denote the depth of point " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "t_0" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": ". Subsequently, in the " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": "-th rendering pass, all points with depth value " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "t_k" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": " smaller than the recorded depth of the previous pass " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "t_{k-1}" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": " are discarded, thereby resulting in the " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": "-th closest-to-camera point " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_k" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": " for the pixel " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": ". Discarding closer points is implemented in our custom shader, so it still supports the hardware rasterization. After " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": " rendering passes, pixel " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": " has a set of sorted points " + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{x}_k | k = 1, \\dots, K\\}" + }, + { + "bbox": [ + 304, + 248, + 547, + 405 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 405, + 547, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 405, + 547, + 455 + ], + "spans": [ + { + "bbox": [ + 304, + 405, + 547, + 455 + ], + "type": "text", + "content": "Based on the sorted points, we use the volume rendering technique to synthesize the color of pixel " + }, + { + "bbox": [ + 304, + 405, + 547, + 455 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 304, + 405, + 547, + 455 + ], + "type": "text", + "content": ". The densities of these points for pixel " + }, + { + "bbox": [ + 304, + 405, + 547, + 455 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 304, + 405, + 547, + 455 + ], + "type": "text", + "content": " are defined based on the distance between the projected point and pixel " + }, + { + "bbox": [ + 304, + 405, + 547, + 455 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 304, + 405, + 547, + 455 + ], + "type": "text", + "content": " on the 2D image:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 339, + 464, + 547, + 491 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 464, + 547, + 491 + ], + "spans": [ + { + "bbox": [ + 339, + 464, + 547, + 491 + ], + "type": "interline_equation", + "content": "\\alpha (\\mathbf {u}, \\mathbf {x}) = \\sigma \\cdot \\max (1 - \\frac {| | \\pi (\\mathbf {x}) - \\mathbf {u} | | _ {2} ^ {2}}{r ^ {2}}, 0), \\qquad (3)", + "image_path": "6389c3c3a1f87f6f891e92a66b6da05edb7599a28cfc8d6b840fd7500bba9db4.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "spans": [ + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "text", + "content": " is the camera projection function. " + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "text", + "content": " are the density and radius of point " + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "text", + "content": ", which are described in Sec. 3.1. Intuitively, Eq. (3) defines a semi-transparent point representation where the density is the highest around the center and quadratically decreases along its radius. During training, we implement the projection function " + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "text", + "content": " using the PyTorch [66], so Eq. (3) is naturally differentiable. During inference, we leverage the hardware rasterization process to efficiently obtain the distance " + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "inline_equation", + "content": "\\| \\pi (\\mathbf{x}) - \\mathbf{u}\\| _2^2" + }, + { + "bbox": [ + 304, + 499, + 547, + 619 + ], + "type": "text", + "content": ", which is implemented using OpenGL [77]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 620, + 547, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 620, + 547, + 644 + ], + "spans": [ + { + "bbox": [ + 305, + 620, + 547, + 644 + ], + "type": "text", + "content": "Denote the density of point " + }, + { + "bbox": [ + 305, + 620, + 547, + 644 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_k" + }, + { + "bbox": [ + 305, + 620, + 547, + 644 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 305, + 620, + 547, + 644 + ], + "type": "inline_equation", + "content": "\\alpha_{k}" + }, + { + "bbox": [ + 305, + 620, + 547, + 644 + ], + "type": "text", + "content": ". The color of pixel " + }, + { + "bbox": [ + 305, + 620, + 547, + 644 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 305, + 620, + 547, + 644 + ], + "type": "text", + "content": " from the volume rendering is formulated as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 328, + 656, + 547, + 690 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 656, + 547, + 690 + ], + "spans": [ + { + "bbox": [ + 328, + 656, + 547, + 690 + ], + "type": "interline_equation", + "content": "C (\\mathbf {u}) = \\sum_ {k = 1} ^ {K} T _ {k} \\alpha_ {k} \\mathbf {c} _ {k}, \\text {w h e r e} T _ {k} = \\prod_ {j = 1} ^ {k - 1} (1 - \\alpha_ {j}), \\tag {4}", + "image_path": "03d3858265c7d2fe3eed4ccc6cbcdd1ee9e99c2c95e0af19a95ec77119ad8033.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 701, + 537, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 701, + 537, + 714 + ], + "spans": [ + { + "bbox": [ + 306, + 701, + 537, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 701, + 537, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_k" + }, + { + "bbox": [ + 306, + 701, + 537, + 714 + ], + "type": "text", + "content": " is the color of point " + }, + { + "bbox": [ + 306, + 701, + 537, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_k" + }, + { + "bbox": [ + 306, + 701, + 537, + 714 + ], + "type": "text", + "content": ", as described in Eq. (2)." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20032" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 111, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 111, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 111, + 85 + ], + "type": "text", + "content": "3.3. Training" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "spans": [ + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "type": "text", + "content": "Given the rendered pixel color " + }, + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "type": "inline_equation", + "content": "C(\\mathbf{u})" + }, + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "type": "text", + "content": ", we compare it with the ground-truth pixel color " + }, + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "type": "inline_equation", + "content": "C_{gt}(\\mathbf{u})" + }, + { + "bbox": [ + 47, + 90, + 287, + 126 + ], + "type": "text", + "content": " to optimize our model in an end-to-end fashion using the following loss function:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 98, + 135, + 287, + 160 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 135, + 287, + 160 + ], + "spans": [ + { + "bbox": [ + 98, + 135, + 287, + 160 + ], + "type": "interline_equation", + "content": "L _ {i m g} = \\sum_ {\\mathbf {u} \\in \\mathcal {U}} | | C (\\mathbf {u}) - C _ {g t} (\\mathbf {u}) | | _ {2} ^ {2}, \\tag {5}", + "image_path": "967c0ca306c4e4a7f664e9d76d58582e7fdbc566fdc53c1406050d495ce8dd73.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 169, + 287, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 169, + 287, + 194 + ], + "spans": [ + { + "bbox": [ + 47, + 169, + 287, + 194 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 169, + 287, + 194 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 47, + 169, + 287, + 194 + ], + "type": "text", + "content": " is the set of image pixels. In addition to the MSE loss " + }, + { + "bbox": [ + 47, + 169, + 287, + 194 + ], + "type": "inline_equation", + "content": "L_{img}" + }, + { + "bbox": [ + 47, + 169, + 287, + 194 + ], + "type": "text", + "content": ", we also apply the perceptual loss " + }, + { + "bbox": [ + 47, + 169, + 287, + 194 + ], + "type": "inline_equation", + "content": "L_{lpips}" + }, + { + "bbox": [ + 47, + 169, + 287, + 194 + ], + "type": "text", + "content": " [102]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 203, + 287, + 217 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 203, + 287, + 217 + ], + "spans": [ + { + "bbox": [ + 107, + 203, + 287, + 217 + ], + "type": "interline_equation", + "content": "L _ {l p i p s} = \\left\\| \\Phi (I) - \\Phi \\left(I _ {g t}\\right) \\right\\| _ {1}, \\tag {6}", + "image_path": "b15c37d54526a6c4febefb6c85e82af8641db6b81817e24a00cb9c875b136488.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": " is the perceptual function (a VGG16 network) and " + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "inline_equation", + "content": "I, I_{gt}" + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": " are the rendered and ground-truth images, respectively. The perceptual loss [102] computes the difference in image features extracted from the VGG model [78]. Our experiments in Sec. 5.2 show that it effectively improves the perceived quality of the rendered image." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 297, + 287, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 297, + 287, + 357 + ], + "spans": [ + { + "bbox": [ + 46, + 297, + 287, + 357 + ], + "type": "text", + "content": "To regularize the optimization process of our proposed representation, we additionally apply mask supervision to dynamic regions of the target scene. We solely render point clouds of dynamic regions to obtain their masks, where the pixel value is obtained by:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 72, + 366, + 287, + 400 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 366, + 287, + 400 + ], + "spans": [ + { + "bbox": [ + 72, + 366, + 287, + 400 + ], + "type": "interline_equation", + "content": "M (\\mathbf {u}) = \\sum_ {k = 1} ^ {K} T _ {k} \\alpha_ {k}, \\text {w h e r e} T _ {k} = \\prod_ {j = 1} ^ {k - 1} (1 - \\alpha_ {j}). \\tag {7}", + "image_path": "db03f21bb039e75092f0b55b62cfb69e454f1e674351a237e2403f3b95fcdc41.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 409, + 162, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 409, + 162, + 420 + ], + "spans": [ + { + "bbox": [ + 47, + 409, + 162, + 420 + ], + "type": "text", + "content": "The mask loss is defined as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 88, + 428, + 287, + 454 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 428, + 287, + 454 + ], + "spans": [ + { + "bbox": [ + 88, + 428, + 287, + 454 + ], + "type": "interline_equation", + "content": "L _ {m s k} = - \\sum_ {\\mathbf {u} \\in \\mathcal {U} ^ {\\prime}} | | M (\\mathbf {u}) - M _ {g t} (\\mathbf {u}) | | _ {2} ^ {2}, \\tag {8}", + "image_path": "223d38823a4f07ab6bcc35dd34d39554f8e59ac265ad42d656aafbd7775e476d.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 463, + 287, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 463, + 287, + 511 + ], + "spans": [ + { + "bbox": [ + 47, + 463, + 287, + 511 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 463, + 287, + 511 + ], + "type": "inline_equation", + "content": "\\mathbf{U}'" + }, + { + "bbox": [ + 47, + 463, + 287, + 511 + ], + "type": "text", + "content": " means the set of pixels of the rendered mask, and " + }, + { + "bbox": [ + 47, + 463, + 287, + 511 + ], + "type": "inline_equation", + "content": "M_{gt}" + }, + { + "bbox": [ + 47, + 463, + 287, + 511 + ], + "type": "text", + "content": " is the ground-truth mask of 2D dynamic regions. This effectively regularizes the optimization of the geometry of dynamic regions by confining it to the visual hulls." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 59, + 511, + 204, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 511, + 204, + 522 + ], + "spans": [ + { + "bbox": [ + 59, + 511, + 204, + 522 + ], + "type": "text", + "content": "The final loss function is defined as" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 85, + 533, + 287, + 546 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 533, + 287, + 546 + ], + "spans": [ + { + "bbox": [ + 85, + 533, + 287, + 546 + ], + "type": "interline_equation", + "content": "L = L _ {i m g} + \\lambda_ {l p i p s} L _ {l p i p s} + \\lambda_ {m s k} L _ {m s k}, \\tag {9}", + "image_path": "7c6b35aed66e68d57db807a2441b39978dabbc0f080e5d6ef243cc09e1fb30d4.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 555, + 287, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 555, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 47, + 555, + 287, + 579 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 555, + 287, + 579 + ], + "type": "inline_equation", + "content": "\\lambda_{lpips}" + }, + { + "bbox": [ + 47, + 555, + 287, + 579 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 555, + 287, + 579 + ], + "type": "inline_equation", + "content": "\\lambda_{msk}" + }, + { + "bbox": [ + 47, + 555, + 287, + 579 + ], + "type": "text", + "content": " are hyperparameters controlling weights of correspondings losses." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 587, + 114, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 587, + 114, + 598 + ], + "spans": [ + { + "bbox": [ + 47, + 587, + 114, + 598 + ], + "type": "text", + "content": "3.4. Inference" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "content": "After training, we apply a few acceleration techniques to boost the rendering speed of our model. First, we precompute the point location " + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{p}" + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "content": ", radius " + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "content": ", density " + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "content": ", SH coefficients " + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "content": " and color blending weights " + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "inline_equation", + "content": "w_{i}" + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "content": " before inference, which are stored at the main memory. During rendering, these properties are asynchronously streamed onto the graphics card, overlapping rasterization with memory copy to achieve an optimal rendering speed [74, 77]. After applying this technique, the runtime computation is reduced to only a" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": "depth peeling evaluation (Sec. 3.2) and a spherical harmonics evaluation (Eq. (2)). Second, we convert the model from 32-bit floats to 16-bits for efficient memory access, which increases FPS by 20 and leads to no visible performance loss. Third, the number of rendering passes " + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": " for the differentiable depth peeling algorithm is reduced from 15 to 12, also leading to a 20 FPS speedup with no visual quality change. Detailed analyses of rendering speed can be found in Sec. 5.2 and the supplementary material." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 191, + 441, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 191, + 441, + 205 + ], + "spans": [ + { + "bbox": [ + 305, + 191, + 441, + 205 + ], + "type": "text", + "content": "4. Implementation Details" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "spans": [ + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "text", + "content": "**Optimization.** 4K4D is trained using the PyTorch framework [66]. Using the Adam optimizer [34] with a learning rate " + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "inline_equation", + "content": "5e^{-3}" + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "text", + "content": ", our models typically converge after 800k iterations for a sequence length of 200 frames, which takes around 24 hours on a single RTX 4090 GPU. Specifically, the learning rate of point positions is set to " + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "inline_equation", + "content": "1e^{-5}" + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "text", + "content": ", and the regularization loss weights " + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "inline_equation", + "content": "\\lambda_{lpips}" + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "inline_equation", + "content": "\\lambda_{msk}" + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "text", + "content": " are set to " + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "inline_equation", + "content": "1e^{-3}" + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "text", + "content": ". During training, the number of passes " + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "text", + "content": " for the differentiable depth peeling is set to 15, and the number of nearest input views " + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "inline_equation", + "content": "N'" + }, + { + "bbox": [ + 304, + 212, + 547, + 356 + ], + "type": "text", + "content": " is set to 4. The rendering speed of our method is reported on an RTX 3090 GPU for the experiments in Sec. 5 unless otherwise stated." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 358, + 547, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 358, + 547, + 514 + ], + "spans": [ + { + "bbox": [ + 304, + 358, + 547, + 514 + ], + "type": "text", + "content": "Initialization of point clouds. We leverage existing multi-view reconstruction methods to initialize the point clouds. For dynamic regions, we use segmentation methods [50] to obtain their masks in input images and utilize the space carving algorithm [37] to extract their coarse geometry. For static background regions, we leverage foreground masks to compute the mask-weighted average of background pixels along all frames, producing background images without the foreground content. Then, an Instant-NGP [60] model is trained on these images, from which we obtain the initial point clouds. After the initialization, the number of points for the dynamic regions is typically 250k per frame, and the static background regions typically consist of 300k points." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 525, + 387, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 525, + 387, + 539 + ], + "spans": [ + { + "bbox": [ + 306, + 525, + 387, + 539 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 545, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 547, + 713 + ], + "type": "text", + "content": "Datasets. We train and evaluate our method 4K4D on multiple widely used multi-view datasets, including DNA-Rendering [12], ENeRF-Outdoor [49] and NHR [93]. DNA-Rendering [12] records 10-second clips of dynamic humans and objects at 15 FPS using 4K and 2K cameras with 60 views. This dataset is very challenging due to the complex clothing and fast motions. We conduct experiments on 4 sequences of DNA-Rendering, with " + }, + { + "bbox": [ + 304, + 545, + 547, + 713 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 304, + 545, + 547, + 713 + ], + "type": "text", + "content": " of the views as training set and the rest as evaluation set. ENeRF-Outdoor [49] records multiple dynamic humans and objects in an outdoor environment at 30FPS using 1080p cameras. We select three 100-frame sequences with 6 different actors (2 for each sequence) holding objects for evaluation. This dataset is difficult for dynamic view synthesis in that not" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20033" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 71, + 171, + 178 + ], + "blocks": [ + { + "bbox": [ + 49, + 71, + 171, + 178 + ], + "lines": [ + { + "bbox": [ + 49, + 71, + 171, + 178 + ], + "spans": [ + { + "bbox": [ + 49, + 71, + 171, + 178 + ], + "type": "image", + "image_path": "da01e3d1ddadfb94305be6ee9665ddfa42d9879fee5e3749e0013cae36d2789b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 173, + 72, + 295, + 178 + ], + "blocks": [ + { + "bbox": [ + 173, + 72, + 295, + 178 + ], + "lines": [ + { + "bbox": [ + 173, + 72, + 295, + 178 + ], + "spans": [ + { + "bbox": [ + 173, + 72, + 295, + 178 + ], + "type": "image", + "image_path": "d6a4a05671ed372f6cc4eaf2356e03e5851e55f31dfed36e5afa7ab9cfdc5987.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 298, + 72, + 419, + 178 + ], + "blocks": [ + { + "bbox": [ + 298, + 72, + 419, + 178 + ], + "lines": [ + { + "bbox": [ + 298, + 72, + 419, + 178 + ], + "spans": [ + { + "bbox": [ + 298, + 72, + 419, + 178 + ], + "type": "image", + "image_path": "8448b6205d5f3750fdd32862e4c2e07339d1e31c341f9093e0b692bfa213231b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 423, + 72, + 545, + 178 + ], + "blocks": [ + { + "bbox": [ + 423, + 72, + 545, + 178 + ], + "lines": [ + { + "bbox": [ + 423, + 72, + 545, + 178 + ], + "spans": [ + { + "bbox": [ + 423, + 72, + 545, + 178 + ], + "type": "image", + "image_path": "54c759ca7544129ece803df48055b02a5160ce6d41db1949ca24e86e9ce82588.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 49, + 180, + 171, + 289 + ], + "blocks": [ + { + "bbox": [ + 49, + 180, + 171, + 289 + ], + "lines": [ + { + "bbox": [ + 49, + 180, + 171, + 289 + ], + "spans": [ + { + "bbox": [ + 49, + 180, + 171, + 289 + ], + "type": "image", + "image_path": "8f56c783ddcdeebf5901ee9125abb11118f008d803cf8d5646b153d7fc16d626.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 84, + 293, + 136, + 301 + ], + "lines": [ + { + "bbox": [ + 84, + 293, + 136, + 301 + ], + "spans": [ + { + "bbox": [ + 84, + 293, + 136, + 301 + ], + "type": "text", + "content": "Ground Truth" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 173, + 180, + 295, + 289 + ], + "blocks": [ + { + "bbox": [ + 173, + 180, + 295, + 289 + ], + "lines": [ + { + "bbox": [ + 173, + 180, + 295, + 289 + ], + "spans": [ + { + "bbox": [ + 173, + 180, + 295, + 289 + ], + "type": "image", + "image_path": "7c4fcdde4f5179c67115d00467d021fbb193d34092ad9b99e4921e7228f1ffdc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 205, + 293, + 263, + 301 + ], + "lines": [ + { + "bbox": [ + 205, + 293, + 263, + 301 + ], + "spans": [ + { + "bbox": [ + 205, + 293, + 263, + 301 + ], + "type": "text", + "content": "Ours (141.7 FPS)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 298, + 180, + 419, + 289 + ], + "blocks": [ + { + "bbox": [ + 298, + 180, + 419, + 289 + ], + "lines": [ + { + "bbox": [ + 298, + 180, + 419, + 289 + ], + "spans": [ + { + "bbox": [ + 298, + 180, + 419, + 289 + ], + "type": "image", + "image_path": "9ca5a62f0b9b3a0fc51c8298f082a99c38a2c2599c9a05a58470a6e6a8cd5746.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 326, + 293, + 386, + 301 + ], + "lines": [ + { + "bbox": [ + 326, + 293, + 386, + 301 + ], + "spans": [ + { + "bbox": [ + 326, + 293, + 386, + 301 + ], + "type": "text", + "content": "ENeRF (11.3 FPS)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 305, + 546, + 328 + ], + "lines": [ + { + "bbox": [ + 46, + 305, + 546, + 328 + ], + "spans": [ + { + "bbox": [ + 46, + 305, + 546, + 328 + ], + "type": "text", + "content": "Figure 3. Qualitative comparison on the ENeRF-Outdoor [49] dataset that contains " + }, + { + "bbox": [ + 46, + 305, + 546, + 328 + ], + "type": "inline_equation", + "content": "960 \\times 540" + }, + { + "bbox": [ + 46, + 305, + 546, + 328 + ], + "type": "text", + "content": " images. Our method achieves much higher rendering quality and can be rendered " + }, + { + "bbox": [ + 46, + 305, + 546, + 328 + ], + "type": "inline_equation", + "content": "14 \\times" + }, + { + "bbox": [ + 46, + 305, + 546, + 328 + ], + "type": "text", + "content": " faster than ENeRF[49]. More dynamic results can be found in the supplementary video." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 423, + 180, + 545, + 289 + ], + "blocks": [ + { + "bbox": [ + 423, + 180, + 545, + 289 + ], + "lines": [ + { + "bbox": [ + 423, + 180, + 545, + 289 + ], + "spans": [ + { + "bbox": [ + 423, + 180, + 545, + 289 + ], + "type": "image", + "image_path": "7ce9783f5b032071f106eee239d5ee3eb79e70e82d662012b281522fb2d7278c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 453, + 293, + 515, + 301 + ], + "lines": [ + { + "bbox": [ + 453, + 293, + 515, + 301 + ], + "spans": [ + { + "bbox": [ + 453, + 293, + 515, + 301 + ], + "type": "text", + "content": "KPlanes (1.4 FPS)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 343, + 287, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 343, + 287, + 380 + ], + "spans": [ + { + "bbox": [ + 46, + 343, + 287, + 380 + ], + "type": "text", + "content": "only are there multiple moving humans and objects, but the background is also dynamic due to cast shadows. More details can be found in the supplementary." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 390, + 190, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 390, + 190, + 403 + ], + "spans": [ + { + "bbox": [ + 47, + 390, + 190, + 403 + ], + "type": "text", + "content": "5.1. Comparison Experiments" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 411, + 288, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 411, + 288, + 662 + ], + "spans": [ + { + "bbox": [ + 46, + 411, + 288, + 662 + ], + "type": "text", + "content": "Comparison on DNA-Rendering [12]. Qualitative and quantitative comparisons on DNA-Rendering [12] are shown in Fig. 4 and Tabs. 1 and 3 respectively. As evident in Tab. 1, our method renders " + }, + { + "bbox": [ + 46, + 411, + 288, + 662 + ], + "type": "inline_equation", + "content": "30\\mathrm{x}" + }, + { + "bbox": [ + 46, + 411, + 288, + 662 + ], + "type": "text", + "content": " faster than the SOTA real-time dynamic view synthesis method ENeRF [49] with superior quality. Even when compared with concurrent work [48], our method still achieves " + }, + { + "bbox": [ + 46, + 411, + 288, + 662 + ], + "type": "inline_equation", + "content": "13\\mathrm{x}" + }, + { + "bbox": [ + 46, + 411, + 288, + 662 + ], + "type": "text", + "content": " speedup and produces consistently higher quality images. As shown in Fig. 4, KPlanes [19] could not recover the highly detailed appearance and geometry of the 4D dynamic scene. Other image-based methods [48, 49, 90] produce high-quality appearance. However, they tend to produce blurry results around occlusions and edges, leading to degradation of the visual quality while maintaining interactive framerate at best. When compared with 3DGS [33] on the first frame of each sequence, our method achieves a much better storage efficiency " + }, + { + "bbox": [ + 46, + 411, + 288, + 662 + ], + "type": "inline_equation", + "content": "(50\\times)" + }, + { + "bbox": [ + 46, + 411, + 288, + 662 + ], + "type": "text", + "content": " thanks to our compact 4D feature grid and image blending model. Moreover, due to the simplicity of our point-based representation, our method is less prone to overfit the training views. More details of the comparison with 3DGS can be found in the supplementary material." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 665, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 713 + ], + "type": "text", + "content": "Comparison on ENeRF-Outdoor [49]. Fig. 3 and Tabs. 2 and 3 provides qualitative and quantitative results on the ENeRF-Outdoor [49] dataset. Even on the challenging ENeRF-Outdoor dataset with multiple actors and the back" + } + ] + } + ], + "index": 16 + }, + { + "type": "table", + "bbox": [ + 307, + 388, + 545, + 472 + ], + "blocks": [ + { + "bbox": [ + 305, + 342, + 547, + 386 + ], + "lines": [ + { + "bbox": [ + 305, + 342, + 547, + 386 + ], + "spans": [ + { + "bbox": [ + 305, + 342, + 547, + 386 + ], + "type": "text", + "content": "Table 1. Quantitative comparison on the DNA-Rendering [12] dataset. Image resolutions are " + }, + { + "bbox": [ + 305, + 342, + 547, + 386 + ], + "type": "inline_equation", + "content": "{1024} \\times {1224}" + }, + { + "bbox": [ + 305, + 342, + 547, + 386 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 342, + 547, + 386 + ], + "type": "inline_equation", + "content": "{1125} \\times {1536}" + }, + { + "bbox": [ + 305, + 342, + 547, + 386 + ], + "type": "text", + "content": " . Metrics are averaged over all scenes. Green and yellow cell colors indicate the best and the second best results, respectively." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 388, + 545, + 472 + ], + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 472 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 472 + ], + "type": "table", + "html": "
PSNR ↑SSIM ↑LPIPS ↓FPS
ENeRF [49]28.1080.9720.0566.011
IBRNet [90]27.8440.9670.0810.100
KPlanes [19]27.4520.9520.1180.640
Im4D [48]28.9910.9730.06215.360
Ours31.1730.9760.055203.610
", + "image_path": "0cefeaeea22cd1a61d33a81749a748dab225c5792011fa6fc61b953ea6130cc0.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_body" + } + ], + "index": 18 + }, + { + "type": "table", + "bbox": [ + 307, + 514, + 545, + 586 + ], + "blocks": [ + { + "bbox": [ + 305, + 480, + 545, + 513 + ], + "lines": [ + { + "bbox": [ + 305, + 480, + 545, + 513 + ], + "spans": [ + { + "bbox": [ + 305, + 480, + 545, + 513 + ], + "type": "text", + "content": "Table 2. Quantitative comparison on the ENeRF-Ourdoor [49] dataset. This dataset includes " + }, + { + "bbox": [ + 305, + 480, + 545, + 513 + ], + "type": "inline_equation", + "content": "{960} \\times {540}" + }, + { + "bbox": [ + 305, + 480, + 545, + 513 + ], + "type": "text", + "content": " images. Green and yellow cell colors indicate the best and the second-best results, respectively." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 514, + 545, + 586 + ], + "lines": [ + { + "bbox": [ + 307, + 514, + 545, + 586 + ], + "spans": [ + { + "bbox": [ + 307, + 514, + 545, + 586 + ], + "type": "table", + "html": "
PSNR ↑SSIM ↑LPIPS ↓FPS
ENeRF [49]25.4520.8090.27311.309
IBRNet [90]24.9660.9290.1720.140
KPlanes [19]21.3100.7350.4541.370
Ours25.8150.8980.147141.665
", + "image_path": "34401f1811756711cd316a74bc1cd941f9b4e20aa29a4328384a23d71f8d066a.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "table_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": "ground, our method still achieves notably better results while rendering at over 140 FPS. ENeRF [49] produces blurry results on this challenging dataset, and the rendering results of IBRNet [90] contain black artifacts around the edges of the images as shown in Fig. 3. K-Planse [19] fails to reconstruct the dynamic humans and varying background regions. 3DGS [33] not only introduces much higher storage cost than our method " + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "inline_equation", + "content": "(45\\times)" + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": ", but also faces even more pronounced overfitting problem with smaller number of views (18 for ENeRF-Outdoor). As evident in Tab. 3 and the" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20034" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 70, + 545, + 372 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 545, + 372 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 545, + 372 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 545, + 372 + ], + "type": "image", + "image_path": "ccffc7287480a9bc28cc74c65427161fb4779d7b7afd299255e044e104a274e3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 375, + 546, + 398 + ], + "lines": [ + { + "bbox": [ + 46, + 375, + 546, + 398 + ], + "spans": [ + { + "bbox": [ + 46, + 375, + 546, + 398 + ], + "type": "text", + "content": "Figure 4. Qualitative comparison on the DNA-Rendering [12] dataset that contains " + }, + { + "bbox": [ + 46, + 375, + 546, + 398 + ], + "type": "inline_equation", + "content": "1024 \\times 1224" + }, + { + "bbox": [ + 46, + 375, + 546, + 398 + ], + "type": "text", + "content": " (and " + }, + { + "bbox": [ + 46, + 375, + 546, + 398 + ], + "type": "inline_equation", + "content": "1125 \\times 1536" + }, + { + "bbox": [ + 46, + 375, + 546, + 398 + ], + "type": "text", + "content": ") images. Our method can produce high-fidelity images at over 200 FPS while other competitors fail to produce high-quality results for highly dynamic scenes." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 408, + 288, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 408, + 288, + 458 + ], + "spans": [ + { + "bbox": [ + 46, + 408, + 288, + 458 + ], + "type": "text", + "content": "supplementary material, the overfitting severely degrades the rendering quality. Their rendering speed is slower than ours due to excessive point count. More details of the comparison with 3DGS are present in the supplementary material." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 464, + 147, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 464, + 147, + 475 + ], + "spans": [ + { + "bbox": [ + 47, + 464, + 147, + 475 + ], + "type": "text", + "content": "5.2. Ablation Studies" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 482, + 287, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 482, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 46, + 482, + 287, + 567 + ], + "type": "text", + "content": "We perform ablation studies on the proposed components on the 150-frame 0013_01 sequence of the DNA-Rendering [12] dataset. Our method can be rendered at over 200 FPS with state-of-the-art quality and maintains a only 2MB per frame storage overhead. More detailed rendering speed analysis and breakdown and storage cost analysis can be found in the supplementary material." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 567, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 567, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 567, + 287, + 652 + ], + "type": "text", + "content": "Ablation study on the 4D embedding. The \"w/o f\" variant removes the proposed 4D embedding (Sec. 3.1) module and replaces it with a per-frame and per-point estimizable position, radius, density, and scale. As shown in Fig. 5 and Tab. 4, the \"w/o f\" variant produces blurry and noisy geometry without the 4D embedding " + }, + { + "bbox": [ + 46, + 567, + 287, + 652 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 46, + 567, + 287, + 652 + ], + "type": "text", + "content": ", which leads to the inferior rendering quality." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 653, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 288, + 714 + ], + "type": "text", + "content": "Ablation study on the hybrid appearance model. The \"w/o " + }, + { + "bbox": [ + 46, + 653, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ibr}" + }, + { + "bbox": [ + 46, + 653, + 288, + 714 + ], + "type": "text", + "content": "\" variant removes " + }, + { + "bbox": [ + 46, + 653, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ibr}" + }, + { + "bbox": [ + 46, + 653, + 288, + 714 + ], + "type": "text", + "content": " in the appearance formulation Eq. (2), which not only leads to less details on the recovered appearance but also significantly impedes the quality of the geometry. Adding an additional degree for the SH" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 306, + 465, + 547, + 531 + ], + "blocks": [ + { + "bbox": [ + 304, + 407, + 547, + 462 + ], + "lines": [ + { + "bbox": [ + 304, + 407, + 547, + 462 + ], + "spans": [ + { + "bbox": [ + 304, + 407, + 547, + 462 + ], + "type": "text", + "content": "Table 3. Quantitative comparison on the first frame of all sequences of DNA-Rendering [12] (1024×1224 (and 1125×1536) images) and ENeRF-Outdoor [49] (960×540 images). Metrics are averaged for each dataset. \"Storage\" indicates the disk file size of the trained models (including source images for our method)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 306, + 465, + 547, + 531 + ], + "lines": [ + { + "bbox": [ + 306, + 465, + 547, + 531 + ], + "spans": [ + { + "bbox": [ + 306, + 465, + 547, + 531 + ], + "type": "table", + "html": "
DatasetMethodPSNRLPIPSFPSStorageTraining
DNA-Rendering3DGS [33]31.160.049113.2224 MB5min
Ours31.870.046241.74.7 MB15min
ENeRF-Outdoor3DGS [33]21.630.34988.4715 MB10min
Ours26.540.145148.616.0 MB30min
", + "image_path": "2908afefe3d34c69f40f4a3ad8c8b8d0ae2e4ce746b3cc087e5dce9c459531ae.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 540, + 547, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 540, + 547, + 599 + ], + "spans": [ + { + "bbox": [ + 304, + 540, + 547, + 599 + ], + "type": "text", + "content": "coefficients does not lead to a significant performance change (PSNR 30.129 vs. 30.259). Comparatively, our proposed method produces high-fidelity rendering with better details. A visualization of the view-dependent effect produced by " + }, + { + "bbox": [ + 304, + 540, + 547, + 599 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{sh}" + }, + { + "bbox": [ + 304, + 540, + 547, + 599 + ], + "type": "text", + "content": " can be found in the supplementary material." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 602, + 547, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 602, + 547, + 674 + ], + "spans": [ + { + "bbox": [ + 304, + 602, + 547, + 674 + ], + "type": "text", + "content": "Ablation study on loss functions. As shown in Tab. 4, removing the " + }, + { + "bbox": [ + 304, + 602, + 547, + 674 + ], + "type": "inline_equation", + "content": "L_{lpips}" + }, + { + "bbox": [ + 304, + 602, + 547, + 674 + ], + "type": "text", + "content": " term not only reduces the perceptual quality (LPIPS score) but also leads to the degradation of other performance metrics. For the highly dynamic DNA-Rendering [12] dataset, the mask loss " + }, + { + "bbox": [ + 304, + 602, + 547, + 674 + ], + "type": "inline_equation", + "content": "L_{msk}" + }, + { + "bbox": [ + 304, + 602, + 547, + 674 + ], + "type": "text", + "content": " helps with regularizing the optimization of the dynamic geometry." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "content": "Rendering speed on different GPUs and resolutions. We additionally report the rendering speed of our method on different hardware (RTX 3060, RTX 3090, and RTX 4090)" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20035" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 67, + 549, + 266 + ], + "blocks": [ + { + "bbox": [ + 47, + 67, + 549, + 266 + ], + "lines": [ + { + "bbox": [ + 47, + 67, + 549, + 266 + ], + "spans": [ + { + "bbox": [ + 47, + 67, + 549, + 266 + ], + "type": "image", + "image_path": "26be9865e872a28d203df62580367e5c0a2e13957b3ebcc4f9fe1d7cb443b183.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 274, + 547, + 299 + ], + "lines": [ + { + "bbox": [ + 46, + 274, + 547, + 299 + ], + "spans": [ + { + "bbox": [ + 46, + 274, + 547, + 299 + ], + "type": "text", + "content": "Figure 5. Ablation studies on the 0013_01 sequence of DNA-Rendering [12]. Removing our proposed components leads to noisy geometry and blurry appearance. Our method produces high-fidelity results with perceptually accurate shapes and colors. See Sec. 5.2 for more details." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 312, + 289, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 312, + 289, + 387 + ], + "spans": [ + { + "bbox": [ + 46, + 312, + 289, + 387 + ], + "type": "text", + "content": "with different resolutions (720p, 1080p, and 4K (2160p)) in Tab. 5. The rendering speed reported here contains the overhead of the interactive GUI. 4K4D achieves real-time rendering speed even when rendering 4K (2160p) images on commodity hardware as shown in the table. More real-time rendering demos can be found in the supplementary video." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 402, + 200, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 402, + 200, + 415 + ], + "spans": [ + { + "bbox": [ + 47, + 402, + 200, + 415 + ], + "type": "text", + "content": "6. Conclusion and Discussion" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 424, + 287, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 424, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 424, + 287, + 568 + ], + "type": "text", + "content": "In this paper, we provide a neural point cloud-based representation, 4K4D, for real-time rendering of dynamic 3D scenes at 4K resolution. We build 4K4D upon a 4D feature grid to naturally regularize the points and develop a novel hybrid appearance model for high-quality rendering. Furthermore, we develop a differentiable depth peeling algorithm that utilizes the hardware rasterization pipeline to effectively optimize and efficiently render the proposed model. In our experiments, we demonstrate that 4K4D not only achieves state-of-the-art rendering quality but also exhibits a more than " + }, + { + "bbox": [ + 46, + 424, + 287, + 568 + ], + "type": "inline_equation", + "content": "30 \\times" + }, + { + "bbox": [ + 46, + 424, + 287, + 568 + ], + "type": "text", + "content": " increase in rendering speed (over 200FPS at 1080p on an RTX 3090 GPU)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": "However, our method still has some limitations. For one, 4K4D cannot produce correspondences of points across frames, which are important for some downstream tasks. Moreover, the storage cost for 4K4D increases linearly with the number of video frames, so our method has difficulty in modeling long volumetric videos. How to model correspondences and reduce the storage cost for long videos could be two interesting problems for future works. Moreover, the rendering quality of our method also depends on the resolution of input images. While our method achieves real-time rendering at 4K resolution, 4K-quality rendering can only be achieved with sufficient input resolution." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 307, + 369, + 545, + 464 + ], + "blocks": [ + { + "bbox": [ + 304, + 311, + 547, + 366 + ], + "lines": [ + { + "bbox": [ + 304, + 311, + 547, + 366 + ], + "spans": [ + { + "bbox": [ + 304, + 311, + 547, + 366 + ], + "type": "text", + "content": "Table 4. Ablation studies on the 150-frame 0013_01 sequence of the DNA-Rendering dataset [12]. \"w/o f\" indicates replacing the 4D embedding with a per-frame and per-pointizable position, radius, density, and scale. See Sec. 5.2 for more detailed descriptions for the abbreviations." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 369, + 545, + 464 + ], + "lines": [ + { + "bbox": [ + 307, + 369, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 307, + 369, + 545, + 464 + ], + "type": "table", + "html": "
PSNR ↑SSIM ↑LPIPS ↓Model Size
w/o f29.7790.9670.0571304.0 MiB
w/o cibr30.2590.9730.054225.0 MiB
w/o csh31.9460.9810.040225.0 MiB
w/o Lpips31.6610.9790.063225.0 MiB
w/o Lmsk29.1150.9650.073225.0 MiB
Ours31.9900.9820.040225.0 MiB
", + "image_path": "4d32d1cc5f5bfd01b024a159ea5c6f2d6b78af6624763ee21fe07f3baa3a1c70.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 307, + 557, + 545, + 652 + ], + "blocks": [ + { + "bbox": [ + 304, + 466, + 547, + 555 + ], + "lines": [ + { + "bbox": [ + 304, + 466, + 547, + 555 + ], + "spans": [ + { + "bbox": [ + 304, + 466, + 547, + 555 + ], + "type": "text", + "content": "Table 5. Rendering speed on different GPUs and resolutions. The results are recorded on the first frame of the 0013_01 sequence of DNA-Rendering [12] and the actor1_4 sequence of ENeRF-Outdoor [49] with the interactive GUI. Resolutions are set to 720p " + }, + { + "bbox": [ + 304, + 466, + 547, + 555 + ], + "type": "inline_equation", + "content": "(720\\times 1280)" + }, + { + "bbox": [ + 304, + 466, + 547, + 555 + ], + "type": "text", + "content": ", 1080p " + }, + { + "bbox": [ + 304, + 466, + 547, + 555 + ], + "type": "inline_equation", + "content": "(1080\\times 1920)" + }, + { + "bbox": [ + 304, + 466, + 547, + 555 + ], + "type": "text", + "content": ", and 4K " + }, + { + "bbox": [ + 304, + 466, + 547, + 555 + ], + "type": "inline_equation", + "content": "(2160\\times 3840)" + }, + { + "bbox": [ + 304, + 466, + 547, + 555 + ], + "type": "text", + "content": ". Even with the overhead of the interactive GUI (\"w/ GUI\"), our method still achieves unprecedented rendering speed. More real-time rendering results can be found in the supplementary video." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 557, + 545, + 652 + ], + "lines": [ + { + "bbox": [ + 307, + 557, + 545, + 652 + ], + "spans": [ + { + "bbox": [ + 307, + 557, + 545, + 652 + ], + "type": "table", + "html": "
DatasetRes.RTX 3060RTX 3090RTX 4090
DNA-Rendering [12] w/ GUI720p173.8 FPS246.9 FPS431.0 FPS
1080p138.7 FPS233.1 FPS409.8 FPS
4K90.0 FPS147.4 FPS288.8 FPS
ENeRF-Outdoor [49] w/ GUI720p90.5 FPS130.5 FPS351.5 FPS
1080p66.1 FPS103.6 FPS249.7 FPS
4K25.1 FPS47.2 FPS85.1 FPS
", + "image_path": "9524b68635e6393af8e1923efa4f5dc5fd39eb9f1f4d5e52f60f4cfeb01dd385.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 658, + 404, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 404, + 672 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 404, + 672 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "text", + "content": "The authors would like to acknowledge support from NSFC (No. 62172364) and Information Technology Center and State Key Lab of CAD&CG, Zhejiang University." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20036" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 91, + 288, + 704 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 58, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 58, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Kara-Ali Aliev, Artem Sevastopolsky, Maria Kolos, Dmitry Ulyanov, and Victor Lempitsky. Neural point-based graphics. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXII 16, pages 696-712. Springer, 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 145, + 288, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 145, + 288, + 210 + ], + "spans": [ + { + "bbox": [ + 58, + 145, + 288, + 210 + ], + "type": "text", + "content": "[2] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023. 1, 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 58, + 210, + 288, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 210, + 288, + 263 + ], + "spans": [ + { + "bbox": [ + 58, + 210, + 288, + 263 + ], + "type": "text", + "content": "[3] Benjamin Attal, Jia-Bin Huang, Michael Zollhöfer, Johannes Kopf, and Changil Kim. Learning neural light fields with ray-space embedding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19819-19829, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 264, + 288, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 264, + 288, + 296 + ], + "spans": [ + { + "bbox": [ + 58, + 264, + 288, + 296 + ], + "type": "text", + "content": "[4] Louis Bavoil and Kevin Myers. Order independent transparency with dual depth peeling. NVIDIA OpenGL SDK, 1:12, 2008. 2, 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 296, + 288, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 296, + 288, + 350 + ], + "spans": [ + { + "bbox": [ + 58, + 296, + 288, + 350 + ], + "type": "text", + "content": "[5] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 350, + 288, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 350, + 288, + 415 + ], + "spans": [ + { + "bbox": [ + 58, + 350, + 288, + 415 + ], + "type": "text", + "content": "[6] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques, SIGGRAPH '01, page 425-432, New York, NY, USA, 2001. Association for Computing Machinery. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 58, + 415, + 288, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 415, + 288, + 457 + ], + "spans": [ + { + "bbox": [ + 58, + 415, + 288, + 457 + ], + "type": "text", + "content": "[7] Dan Casas, Marco Volino, John Collomosse, and Adrian Hilton. 4d video textures for interactive character appearance. In Computer Graphics Forum, pages 371-380. Wiley Online Library, 2014. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 58, + 457, + 288, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 457, + 288, + 490 + ], + "spans": [ + { + "bbox": [ + 58, + 457, + 288, + 490 + ], + "type": "text", + "content": "[8] Gaurav Chaurasia, Sylvain Duchene, Olga Sorkine-Hornung, and George Drettakis. Depth synthesis and local warps for plausible image-based navigation. ACM TOG, 2013. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 58, + 490, + 288, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 490, + 288, + 510 + ], + "spans": [ + { + "bbox": [ + 58, + 490, + 288, + 510 + ], + "type": "text", + "content": "[9] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. arXiv, 2022. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 510, + 288, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 510, + 288, + 552 + ], + "spans": [ + { + "bbox": [ + 53, + 510, + 288, + 552 + ], + "type": "text", + "content": "[10] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In ICCV, 2021. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 553, + 288, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 553, + 288, + 605 + ], + "spans": [ + { + "bbox": [ + 53, + 553, + 288, + 605 + ], + "type": "text", + "content": "[11] Zhiqin Chen, Thomas Funkhouser, Peter Hedman, and Andrea Tagliasacchi. Mobilenerf: Exploiting the polygon rasterization pipeline for efficient neural field rendering on mobile architectures. arXiv preprint arXiv:2208.00277, 2022. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 606, + 288, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 606, + 288, + 661 + ], + "spans": [ + { + "bbox": [ + 53, + 606, + 288, + 661 + ], + "type": "text", + "content": "[12] Wei Cheng, Ruixiang Chen, Wanqi Yin, Siming Fan, Keyu Chen, Honglin He, Huiwen Luo, Zhongang Cai, Jingbo Wang, Yang Gao, et al. Dna-rendering: A diverse neural actor repository for high-fidelity human-centric rendering. arXiv preprint arXiv:2307.10173, 2023. 1, 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 661, + 288, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 661, + 288, + 704 + ], + "spans": [ + { + "bbox": [ + 53, + 661, + 288, + 704 + ], + "type": "text", + "content": "[13] Alvaro Collet, Ming Chuang, Pat Sweeney, Don Gillett, Dennis Evseev, David Calabrese, Hugues Hoppe, Adam Kirk, and Steve Sullivan. High-quality streamable free-viewpoint video. ACM Transactions on Graphics (ToG)," + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 74, + 547, + 706 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 332, + 74, + 415, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 74, + 415, + 83 + ], + "spans": [ + { + "bbox": [ + 332, + 74, + 415, + 83 + ], + "type": "text", + "content": "34(4):1-13,2015.1,2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 83, + 547, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 83, + 547, + 115 + ], + "spans": [ + { + "bbox": [ + 312, + 83, + 547, + 115 + ], + "type": "text", + "content": "[14] Abe Davis, Marc Levoy, and Fredo Durand. Unstructured light fields. In Computer Graphics Forum, pages 305-314. Wiley Online Library, 2012. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 116, + 547, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 116, + 547, + 169 + ], + "spans": [ + { + "bbox": [ + 312, + 116, + 547, + 169 + ], + "type": "text", + "content": "[15] Mingsong Dou, Sameh Khamis, Yury Degtyarev, Philip Davidson, Sean Ryan Fanello, Adarsh Kowdle, Sergio Orts Escolano, Christoph Rhemann, David Kim, Jonathan Taylor, et al. Fusion4d: Real-time performance capture of challenging scenes. ACM TOG, 2016. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 170, + 547, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 170, + 547, + 223 + ], + "spans": [ + { + "bbox": [ + 312, + 170, + 547, + 223 + ], + "type": "text", + "content": "[16] Mingsong Dou, Jonathan Taylor, Henry Fuchs, Andrew Fitzgibbon, and Shahram Izadi. 3d scanning deformable objects with a single rgbd sensor. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 493-501, 2015. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 224, + 547, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 224, + 547, + 255 + ], + "spans": [ + { + "bbox": [ + 312, + 224, + 547, + 255 + ], + "type": "text", + "content": "[17] Robert A Drebin, Loren Carpenter, and Pat Hanrahan. Volume rendering. ACM Siggraph Computer Graphics, 22(4):65-74, 1988. 1, 2, 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 255, + 546, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 255, + 546, + 287 + ], + "spans": [ + { + "bbox": [ + 312, + 255, + 546, + 287 + ], + "type": "text", + "content": "[18] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. Deepstereo: Learning to predict new views from the world's imagery. In CVPR, June 2016. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 287, + 547, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 287, + 547, + 351 + ], + "spans": [ + { + "bbox": [ + 312, + 287, + 547, + 351 + ], + "type": "text", + "content": "[19] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbaek Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12479–12488, 2023. 1, 2, 3, 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 351, + 547, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 351, + 547, + 405 + ], + "spans": [ + { + "bbox": [ + 312, + 351, + 547, + 405 + ], + "type": "text", + "content": "[20] Stephan J Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien Valentin. Fastnerf: High-fidelity neural rendering at 200fps. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14346-14355, 2021. 1, 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 405, + 546, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 405, + 546, + 426 + ], + "spans": [ + { + "bbox": [ + 312, + 405, + 546, + 426 + ], + "type": "text", + "content": "[21] Steven J Gortler, Radek Grzesczuk, Richard Szeliski, and Michael F Cohen. The lumigraph. In SIGGRAPH, 1996. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 426, + 547, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 426, + 547, + 491 + ], + "spans": [ + { + "bbox": [ + 312, + 426, + 547, + 491 + ], + "type": "text", + "content": "[22] Kaiwen Guo, Peter Lincoln, Philip Davidson, Jay Busch, Xueming Yu, Matt Whalen, Geoff Harvey, Sergio Orts-Escolano, Rohit Pandey, Jason Dourgarian, et al. The relightables: Volumetric performance capture of humans with realistic relighting. ACM Transactions on Graphics (ToG), 38(6):1-19, 2019. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 491, + 547, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 491, + 547, + 534 + ], + "spans": [ + { + "bbox": [ + 312, + 491, + 547, + 534 + ], + "type": "text", + "content": "[23] Jon Hasselgren, Nikolai Hofmann, and Jacob Munkberg. Shape, light, and material decomposition from images using monte carlo rendering and denoising. Advances in Neural Information Processing Systems, 35:22856-22869, 2022. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 534, + 546, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 534, + 546, + 566 + ], + "spans": [ + { + "bbox": [ + 312, + 534, + 546, + 566 + ], + "type": "text", + "content": "[24] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM TOG, 2018. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 312, + 566, + 547, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 566, + 547, + 608 + ], + "spans": [ + { + "bbox": [ + 312, + 566, + 547, + 608 + ], + "type": "text", + "content": "[25] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In ICCV, 2021. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 312, + 609, + 546, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 609, + 546, + 662 + ], + "spans": [ + { + "bbox": [ + 312, + 609, + 546, + 662 + ], + "type": "text", + "content": "[26] Anna Hilsmann, Philipp Fechteler, Wieland Morgenstern, Wolfgang Paier, Ingo Feldmann, Oliver Schreer, and Peter Eisert. Going beyond free viewpoint: creating animatable volumetric video of human performances. IET Computer Vision, pages 350-358, 2020. 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 312, + 662, + 547, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 662, + 547, + 706 + ], + "spans": [ + { + "bbox": [ + 312, + 662, + 547, + 706 + ], + "type": "text", + "content": "[27] Tao Hu, Tao Yu, Zerong Zheng, He Zhang, Yebin Liu, and Matthias Zwicker. Hvtr: Hybrid volumetric-textural rendering for human avatars. In 2022 International Conference on 3D Vision (3DV), pages 197-208. IEEE, 2022." + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20037" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 74, + 288, + 704 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 72, + 74, + 80, + 82 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 74, + 80, + 82 + ], + "spans": [ + { + "bbox": [ + 72, + 74, + 80, + 82 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 83, + 288, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 83, + 288, + 137 + ], + "spans": [ + { + "bbox": [ + 53, + 83, + 288, + 137 + ], + "type": "text", + "content": "[28] Mustafa Isik, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. arXiv preprint arXiv:2305.06356, 2023. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 169 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 169 + ], + "type": "text", + "content": "[29] Shubhendu Jena, Franck Multon, and Adnane Boukhayma. Neural mesh-based graphics. In European Conference on Computer Vision, pages 739-757. Springer, 2022. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 170, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 170, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 170, + 288, + 201 + ], + "type": "text", + "content": "[30] Yue Jiang, Dantong Ji, Zhizhong Han, and Matthias Zwicker. Sdfdiff: Differentiable rendering of signed distance fields for 3d shape optimization. In CVPR, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 201, + 288, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 201, + 288, + 232 + ], + "spans": [ + { + "bbox": [ + 53, + 201, + 288, + 232 + ], + "type": "text", + "content": "[31] Nima Khademi Kalantari, Ting-Chun Wang, and Ravi Ramamoorthi. Learning-based view synthesis for light field cameras. ACM TOG, 2016. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 233, + 288, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 233, + 288, + 285 + ], + "spans": [ + { + "bbox": [ + 53, + 233, + 288, + 285 + ], + "type": "text", + "content": "[32] Petr Kellnhofer, Lars C Jebe, Andrew Jones, Ryan Spicer, Kari Pulli, and Gordon Wetzstein. Neural lumigraph rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4287-4297, 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 286, + 288, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 286, + 288, + 330 + ], + "spans": [ + { + "bbox": [ + 53, + 286, + 288, + 330 + ], + "type": "text", + "content": "[33] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (TOG), 42(4):1-14, 2023. 1, 2, 4, 6, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 331, + 288, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 331, + 288, + 361 + ], + "spans": [ + { + "bbox": [ + 53, + 331, + 288, + 361 + ], + "type": "text", + "content": "[34] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 361, + 288, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 361, + 288, + 405 + ], + "spans": [ + { + "bbox": [ + 53, + 361, + 288, + 405 + ], + "type": "text", + "content": "[35] Georgios Kopanas, Julien Philip, Thomas Leimkuhler, and George Drettakis. Point-based neural rendering with perview optimization. In Computer Graphics Forum, volume 40, pages 29-43. Wiley Online Library, 2021. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 405, + 288, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 405, + 288, + 435 + ], + "spans": [ + { + "bbox": [ + 53, + 405, + 288, + 435 + ], + "type": "text", + "content": "[36] Jonas Kulhanek and Torsten Sattler. Tetra-nerf: Representing neural radiance fields using tetrahedra. arXiv preprint arXiv:2304.09987, 2023. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 436, + 288, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 436, + 288, + 468 + ], + "spans": [ + { + "bbox": [ + 53, + 436, + 288, + 468 + ], + "type": "text", + "content": "[37] Kiriakos N Kutulakos and Steven M Seitz. A theory of shape by space carving. International journal of computer vision, 38:199-218, 2000. 2, 3, 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 468, + 288, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 468, + 288, + 511 + ], + "spans": [ + { + "bbox": [ + 53, + 468, + 288, + 511 + ], + "type": "text", + "content": "[38] Christoph Lassner and Michael Zollhofer. Pulsar: Efficient sphere-based neural rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1440-1449, 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 511, + 288, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 511, + 288, + 532 + ], + "spans": [ + { + "bbox": [ + 53, + 511, + 288, + 532 + ], + "type": "text", + "content": "[39] Marc Levoy and Pat Hanrahan. Light field rendering. In SIGGRAPH, 1996. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 533, + 288, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 533, + 288, + 563 + ], + "spans": [ + { + "bbox": [ + 53, + 533, + 288, + 563 + ], + "type": "text", + "content": "[40] Ruilong Li, Hang Gao, Matthew Tancik, and Angjoo Kanazawa. Nerfacc: Efficient sampling accelerates nerfs. arXiv preprint arXiv:2305.04966, 2023. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 563, + 288, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 563, + 288, + 617 + ], + "spans": [ + { + "bbox": [ + 53, + 563, + 288, + 617 + ], + "type": "text", + "content": "[41] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, and Zhaoyang Lv. Neural 3d video synthesis. arXiv preprint arXiv:2103.02597, 2021. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 617, + 288, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 617, + 288, + 692 + ], + "spans": [ + { + "bbox": [ + 53, + 617, + 288, + 692 + ], + "type": "text", + "content": "[42] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 1, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 693, + 288, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 693, + 288, + 704 + ], + "spans": [ + { + "bbox": [ + 53, + 693, + 288, + 704 + ], + "type": "text", + "content": "[43] Zhan Li, Zhang Chen, Zhong Li, and Yi Xu. Spacetime" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 74, + 545, + 704 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 331, + 74, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 74, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 331, + 74, + 545, + 95 + ], + "type": "text", + "content": "gaussian feature splatting for real-time dynamic view synthesis. arXiv preprint arXiv:2312.16812, 2023. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 95, + 545, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 95, + 545, + 137 + ], + "spans": [ + { + "bbox": [ + 312, + 95, + 545, + 137 + ], + "type": "text", + "content": "[44] Zhong Li, Yu Ji, Wei Yang, Jinwei Ye, and Jingyi Yu. Robust 3d human motion reconstruction via dynamic template construction. In 2017 International Conference on 3D Vision (3DV), pages 496-505. IEEE, 2017. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 137, + 545, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 137, + 545, + 169 + ], + "spans": [ + { + "bbox": [ + 312, + 137, + 545, + 169 + ], + "type": "text", + "content": "[45] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In CVPR, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 170, + 545, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 170, + 545, + 222 + ], + "spans": [ + { + "bbox": [ + 312, + 170, + 545, + 222 + ], + "type": "text", + "content": "[46] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4273-4284, 2023. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 223, + 545, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 223, + 545, + 244 + ], + "spans": [ + { + "bbox": [ + 312, + 223, + 545, + 244 + ], + "type": "text", + "content": "[47] Zhengqi Li, Wenqi Xian, Abe Davis, and Noah Snavely. Crowdsampling the plenoptic function. In ECCV, 2020. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 244, + 545, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 244, + 545, + 287 + ], + "spans": [ + { + "bbox": [ + 312, + 244, + 545, + 287 + ], + "type": "text", + "content": "[48] Haotong Lin, Sida Peng, Zhen Xu, Tao Xie, Xingyi He, Hujun Bao, and Xiaowei Zhou. High-fidelity and real-time novel view synthesis for dynamic scenes. In SIGGRAPH Asia Conference Proceedings, 2023. 2, 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 287, + 545, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 287, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 312, + 287, + 545, + 330 + ], + "type": "text", + "content": "[49] Haotong Lin, Sida Peng, Zhen Xu, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Efficient neural radiance fields for interactive free-viewpoint video. In SIGGRAPH Asia Conference Proceedings, 2022. 1, 2, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 331, + 545, + 383 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 331, + 545, + 383 + ], + "spans": [ + { + "bbox": [ + 312, + 331, + 545, + 383 + ], + "type": "text", + "content": "[50] Shanchuan Lin, Linjie Yang, Imran Saleemi, and Soumyadip Sengupta. Robust high-resolution video matting with temporal guidance. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 238-247, 2022. 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 384, + 545, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 384, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 312, + 384, + 545, + 415 + ], + "type": "text", + "content": "[51] Shichen Liu, Shunsuke Saito, Weikai Chen, and Hao Li. Learning to infer implicit surfaces without 3d supervision. NeurIPS, 2019. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 312, + 415, + 545, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 415, + 545, + 458 + ], + "spans": [ + { + "bbox": [ + 312, + 415, + 545, + 458 + ], + "type": "text", + "content": "[52] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. In SIGGRAPH, 2019. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 312, + 459, + 545, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 459, + 545, + 501 + ], + "spans": [ + { + "bbox": [ + 312, + 459, + 545, + 501 + ], + "type": "text", + "content": "[53] Stephen Lombardi, Tomas Simon, Gabriel Schwartz, Michael Zollhoefer, Yaser Sheikh, and Jason Saragih. Mixture of volumetric primitives for efficient neural rendering. ACM Transactions on Graphics (TOG), 40(4):1-13, 2021. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 312, + 501, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 501, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 312, + 501, + 545, + 544 + ], + "type": "text", + "content": "[54] Fan Lu, Yan Xu, Guang Chen, Hongsheng Li, Kwan-Yee Lin, and Changjun Jiang. Urban radiance field representation with deformable neural mesh primitives. arXiv preprint arXiv:2307.10776, 2023. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 312, + 544, + 545, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 544, + 545, + 586 + ], + "spans": [ + { + "bbox": [ + 312, + 544, + 545, + 586 + ], + "type": "text", + "content": "[55] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. arXiv preprint arXiv:2308.09713, 2023. 3" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 312, + 586, + 545, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 586, + 545, + 639 + ], + "spans": [ + { + "bbox": [ + 312, + 586, + 545, + 639 + ], + "type": "text", + "content": "[56] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM TOG, 2019. 2" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 312, + 640, + 545, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 640, + 545, + 684 + ], + "spans": [ + { + "bbox": [ + 312, + 640, + 545, + 684 + ], + "type": "text", + "content": "[57] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. 2020. 4" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 312, + 684, + 545, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 684, + 545, + 704 + ], + "spans": [ + { + "bbox": [ + 312, + 684, + 545, + 704 + ], + "type": "text", + "content": "[58] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf:" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "20038" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 73, + 288, + 706 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 72, + 73, + 288, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 73, + 288, + 105 + ], + "spans": [ + { + "bbox": [ + 72, + 73, + 288, + 105 + ], + "type": "text", + "content": "Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 1, 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 106, + 288, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 106, + 288, + 125 + ], + "spans": [ + { + "bbox": [ + 53, + 106, + 288, + 125 + ], + "type": "text", + "content": "[59] Claus Müller. Spherical harmonics, volume 17. Springer, 2006. 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 126, + 288, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 126, + 288, + 169 + ], + "spans": [ + { + "bbox": [ + 53, + 126, + 288, + 169 + ], + "type": "text", + "content": "[60] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4):1-15, 2022. 2, 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 170, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 170, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 170, + 288, + 201 + ], + "type": "text", + "content": "[61] Richard A Newcombe, Dieter Fox, and Steven M Seitz. Dynamicfusion: Reconstruction and tracking of non-rigid scenes in real-time. In CVPR, 2015. 1, 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 201, + 288, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 201, + 288, + 255 + ], + "spans": [ + { + "bbox": [ + 53, + 201, + 288, + 255 + ], + "type": "text", + "content": "[62] Sergio Orts-Escolano, Christoph Rhemann, Sean Fanello, Wayne Chang, Adarsh Kowdle, Yury Degtyarev, David Kim, Philip L Davidson, Sameh Khamis, Mingsong Dou, et al. Holoportation: Virtual 3d teleportation in real-time. In UIST, 2016. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 255, + 288, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 255, + 288, + 297 + ], + "spans": [ + { + "bbox": [ + 53, + 255, + 288, + 297 + ], + "type": "text", + "content": "[63] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In ICCV, 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 298, + 288, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 298, + 288, + 351 + ], + "spans": [ + { + "bbox": [ + 53, + 298, + 288, + 351 + ], + "type": "text", + "content": "[64] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. arXiv preprint arXiv:2106.13228, 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 352, + 288, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 352, + 288, + 393 + ], + "spans": [ + { + "bbox": [ + 53, + 352, + 288, + 393 + ], + "type": "text", + "content": "[65] Steven Parker, Peter Shirley, and Brian Smits. Single sample soft shadows. Technical report, Technical Report UUCS-98-019, Computer Science Department, University of Utah, 1998. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 395, + 288, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 395, + 288, + 481 + ], + "spans": [ + { + "bbox": [ + 53, + 395, + 288, + 481 + ], + "type": "text", + "content": "[66] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In NeurIPS, 2019. 4, 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 482, + 288, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 482, + 288, + 524 + ], + "spans": [ + { + "bbox": [ + 53, + 482, + 288, + 524 + ], + "type": "text", + "content": "[67] Nikolay Patakin, Dmitry Senushkin, Anna Vorontsova, and Anton Konushin. Neural global illumination for inverse rendering. In 2023 IEEE International Conference on Image Processing (ICIP), pages 1580-1584. IEEE, 2023. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 525, + 288, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 525, + 288, + 577 + ], + "spans": [ + { + "bbox": [ + 53, + 525, + 288, + 577 + ], + "type": "text", + "content": "[68] Sida Peng, Yunzhi Yan, Qing Shuai, Hujun Bao, and Xiaowei Zhou. Representing volumetric videos as dynamic mlp maps. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4252-4262, 2023. 1, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 578, + 288, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 578, + 288, + 598 + ], + "spans": [ + { + "bbox": [ + 53, + 578, + 288, + 598 + ], + "type": "text", + "content": "[69] Eric Penner and Li Zhang. Soft 3d reconstruction for view synthesis. ACM TOG, 2017. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 599, + 288, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 599, + 288, + 630 + ], + "spans": [ + { + "bbox": [ + 53, + 599, + 288, + 630 + ], + "type": "text", + "content": "[70] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In CVPR, 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 631, + 288, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 631, + 288, + 684 + ], + "spans": [ + { + "bbox": [ + 53, + 631, + 288, + 684 + ], + "type": "text", + "content": "[71] Ruslan Rakhimov, Andrei-Timotei Ardelean, Victor Lempitsky, and Evgeny Burnaev. Npbg++: Accelerating neural point-based graphics. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15969-15979, 2022. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 685, + 288, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 685, + 288, + 706 + ], + "spans": [ + { + "bbox": [ + 53, + 685, + 288, + 706 + ], + "type": "text", + "content": "[72] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 73, + 547, + 705 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 331, + 73, + 547, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 73, + 547, + 94 + ], + "spans": [ + { + "bbox": [ + 331, + 73, + 547, + 94 + ], + "type": "text", + "content": "thousands of tiny mpls. In ICCV, pages 14335-14345, 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 95, + 547, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 95, + 547, + 126 + ], + "spans": [ + { + "bbox": [ + 312, + 95, + 547, + 126 + ], + "type": "text", + "content": "[73] Darius Rückert, Linus Franke, and Marc Stamminger. Adop: Approximate differentiable one-pixel point rendering. ACM Transactions on Graphics (ToG), 41(4):1-14, 2022. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 127, + 547, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 127, + 547, + 158 + ], + "spans": [ + { + "bbox": [ + 312, + 127, + 547, + 158 + ], + "type": "text", + "content": "[74] Jason Sanders and Edward Kandrot. CUDA by example: an introduction to general-purpose GPU programming. Addison-Wesley Professional, 2010. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 159, + 547, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 159, + 547, + 201 + ], + "spans": [ + { + "bbox": [ + 312, + 159, + 547, + 201 + ], + "type": "text", + "content": "[75] Ruizhi Shao, Zerong Zheng, Hanzhang Tu, Boning Liu, Hongwen Zhang, and Yebin Liu. Tensor4d: Efficient neural 4d decomposition for high-fidelity dynamic reconstruction and rendering. arXiv, 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 201, + 547, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 201, + 547, + 232 + ], + "spans": [ + { + "bbox": [ + 312, + 201, + 547, + 232 + ], + "type": "text", + "content": "[76] Meng-Li Shih, Shih-Yang Su, Johannes Kopf, and Jia-Bin Huang. 3d photography using context-aware layered depth inpainting. In CVPR, 2020. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 233, + 547, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 233, + 547, + 264 + ], + "spans": [ + { + "bbox": [ + 312, + 233, + 547, + 264 + ], + "type": "text", + "content": "[77] Dave Shreiner et al. OpenGL programming guide: the official guide to learning OpenGL, versions 3.0 and 3.1. Pearson Education, 2009. 4, 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 265, + 547, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 265, + 547, + 297 + ], + "spans": [ + { + "bbox": [ + 312, + 265, + 547, + 297 + ], + "type": "text", + "content": "[78] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 297, + 547, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 297, + 547, + 350 + ], + "spans": [ + { + "bbox": [ + 312, + 297, + 547, + 350 + ], + "type": "text", + "content": "[79] Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems, 34:19313-19325, 2021. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 351, + 547, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 351, + 547, + 392 + ], + "spans": [ + { + "bbox": [ + 312, + 351, + 547, + 392 + ], + "type": "text", + "content": "[80] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhöfer. Deepvoxels: Learning persistent 3d feature embeddings. In CVPR, 2019." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 393, + 547, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 393, + 547, + 425 + ], + "spans": [ + { + "bbox": [ + 312, + 393, + 547, + 425 + ], + "type": "text", + "content": "[81] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In NeurIPS, 2019. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 426, + 547, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 426, + 547, + 489 + ], + "spans": [ + { + "bbox": [ + 312, + 426, + 547, + 489 + ], + "type": "text", + "content": "[82] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 312, + 491, + 547, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 491, + 547, + 544 + ], + "spans": [ + { + "bbox": [ + 312, + 491, + 547, + 544 + ], + "type": "text", + "content": "[83] Pratul P Srinivasan, Richard Tucker, Jonathan T Barron, Ravi Ramamoorthi, Ren Ng, and Noah Snively. Pushing the boundaries of view extrapolation with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 175-184, 2019. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 312, + 544, + 547, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 544, + 547, + 609 + ], + "spans": [ + { + "bbox": [ + 312, + 544, + 547, + 609 + ], + "type": "text", + "content": "[84] Zhuo Su, Lan Xu, Zerong Zheng, Tao Yu, Yebin Liu, and Lu Fang. Robustfusion: Human volumetric capture with data-driven visual cues using a rgbd camera. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part IV 16, pages 246-264. Springer, 2020. 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 312, + 609, + 547, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 609, + 547, + 662 + ], + "spans": [ + { + "bbox": [ + 312, + 609, + 547, + 662 + ], + "type": "text", + "content": "[85] Mohammed Suhail, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Light field neural rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8269-8279, June 2022. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 312, + 662, + 547, + 705 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 662, + 547, + 705 + ], + "spans": [ + { + "bbox": [ + 312, + 662, + 547, + 705 + ], + "type": "text", + "content": "[86] Richard Szeliski and Polina Golland. Stereo matching with transparency and matting. In Sixth International Conference on Computer Vision (IEEE Cat. No. 98CH36271), pages 517-524. IEEE, 1998. 2" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "20039" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 706 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 287, + 106 + ], + "type": "text", + "content": "[87] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, and Huaping Liu. Mixed neural voxels for fast multi-view video synthesis. arXiv preprint arXiv:2212.00190, 2022. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 106, + 288, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 106, + 288, + 159 + ], + "spans": [ + { + "bbox": [ + 53, + 106, + 288, + 159 + ], + "type": "text", + "content": "[88] Liao Wang, Qiang Hu, Qihan He, Ziyu Wang, Jingyi Yu, Tinne Tuytelaars, Lan Xu, and Minye Wu. Neural residual radiance fields for streamably free-viewpoint videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 76-87, 2023. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 159, + 288, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 159, + 288, + 224 + ], + "spans": [ + { + "bbox": [ + 53, + 159, + 288, + 224 + ], + "type": "text", + "content": "[89] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 224, + 288, + 299 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 224, + 288, + 299 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 288, + 299 + ], + "type": "text", + "content": "[90] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P Srinivasan, Howard Zhou, Jonathan T Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4690-4699, 2021. 2, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 300, + 288, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 300, + 288, + 342 + ], + "spans": [ + { + "bbox": [ + 53, + 300, + 288, + 342 + ], + "type": "text", + "content": "[91] Suttisak Wizadwongsa, Pakkapon Phongthawee, Jiraphon Yenphraphai, and Supasorn Suwajanakorn. Nex: Real-time view synthesis with neural basis expansion. In CVPR, 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 342, + 288, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 342, + 288, + 386 + ], + "spans": [ + { + "bbox": [ + 53, + 342, + 288, + 386 + ], + "type": "text", + "content": "[92] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Wang Xinggang. 4d gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 386, + 288, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 386, + 288, + 429 + ], + "spans": [ + { + "bbox": [ + 53, + 386, + 288, + 429 + ], + "type": "text", + "content": "[93] Minye Wu, Yuehao Wang, Qiang Hu, and Jingyi Yu. Multiview neural human rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1682-1691, 2020. 2, 3, 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 429, + 288, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 429, + 288, + 472 + ], + "spans": [ + { + "bbox": [ + 53, + 429, + 288, + 472 + ], + "type": "text", + "content": "[94] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. arXiv preprint arXiv:2309.13101, 2023. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 472, + 288, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 472, + 288, + 514 + ], + "spans": [ + { + "bbox": [ + 53, + 472, + 288, + 514 + ], + "type": "text", + "content": "[95] Zeyu Yang, Hongye Yang, Zijie Pan, Xiatian Zhu, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv 2310.10642, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 514, + 288, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 514, + 288, + 546 + ], + "spans": [ + { + "bbox": [ + 53, + 514, + 288, + 546 + ], + "type": "text", + "content": "[96] Alex Yu, Sara Fridovich-Keil, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. CVPR, 2022. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 546, + 288, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 546, + 288, + 600 + ], + "spans": [ + { + "bbox": [ + 53, + 546, + 288, + 600 + ], + "type": "text", + "content": "[97] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5752-5761, 2021. 1, 2, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 600, + 288, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 600, + 288, + 632 + ], + "spans": [ + { + "bbox": [ + 53, + 600, + 288, + 632 + ], + "type": "text", + "content": "[98] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 632, + 288, + 686 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 632, + 288, + 686 + ], + "spans": [ + { + "bbox": [ + 53, + 632, + 288, + 686 + ], + "type": "text", + "content": "[99] Tao Yu, Zerong Zheng, Kaiwen Guo, Pengpeng Liu, Qionghai Dai, and Yebin Liu. Function4d: Real-time human volumetric capture from very sparse consumer rgbd sensors. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5746-5756, 2021. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 686, + 288, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 686, + 288, + 706 + ], + "spans": [ + { + "bbox": [ + 48, + 686, + 288, + 706 + ], + "type": "text", + "content": "[100] Tao Yu, Zerong Zheng, Kaiwen Guo, Jianhui Zhao, Qionghai Dai, Hao Li, Gerard Pons-Moll, and Yebin Liu. Doublefu" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 72, + 547, + 245 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 331, + 72, + 547, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 72, + 547, + 105 + ], + "spans": [ + { + "bbox": [ + 331, + 72, + 547, + 105 + ], + "type": "text", + "content": "sion: Real-time capture of human performances with inner body shapes from a single depth sensor. In CVPR, 2018. 1, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 106, + 547, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 106, + 547, + 148 + ], + "spans": [ + { + "bbox": [ + 307, + 106, + 547, + 148 + ], + "type": "text", + "content": "[101] Qiang Zhang, Seung-Hwan Baek, Szymon Rusinkiewicz, and Felix Heide. Differentiable point-based radiance fields for efficient view synthesis. In SIGGRAPH Asia 2022 Conference Papers, pages 1-12, 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 148, + 547, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 148, + 547, + 202 + ], + "spans": [ + { + "bbox": [ + 307, + 148, + 547, + 202 + ], + "type": "text", + "content": "[102] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 202, + 547, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 202, + 547, + 245 + ], + "spans": [ + { + "bbox": [ + 308, + 202, + 547, + 245 + ], + "type": "text", + "content": "[103] C Lawrence Zitnick, Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High-quality video view interpolation using a layered representation. ACM TOG, 2004. 2" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20040" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/1d3927f2-2533-4713-91b0-b3f9e13c8aed_content_list.json b/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/1d3927f2-2533-4713-91b0-b3f9e13c8aed_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..02082e810ed8fb4766371cdf9e24f0ff035ce530 --- /dev/null +++ b/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/1d3927f2-2533-4713-91b0-b3f9e13c8aed_content_list.json @@ -0,0 +1,1715 @@ +[ + { + "type": "text", + "text": "6D-Diff: A Keypoint Diffusion Framework for 6D Object Pose Estimation", + "text_level": 1, + "bbox": [ + 112, + 130, + 854, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Li Xu $^{1\\dagger}$ Haoxuan Qu $^{1\\dagger}$ Yujun Cai $^{2}$ Jun Liu $^{1\\dagger}$ $^{1}$ Singapore University of Technology and Design \n $^{2}$ Nanyang Technological University", + "bbox": [ + 282, + 178, + 684, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{li_xu, haoxuan_qu}@mysmail.sutd.edu.sg, yujun001@e.ntu.edu.sg, jun.liu@sutd.edu.sg", + "bbox": [ + 218, + 236, + 746, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 318, + 313, + 334 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Estimating the 6D object pose from a single RGB image often involves noise and indeterminacy due to challenges such as occlusions and cluttered backgrounds. Meanwhile, diffusion models have shown appealing performance in generating high-quality images from random noise with high indeterminacy through step-by-step denoising. Inspired by their denoising capability, we propose a novel diffusion-based framework (6D-Diff) to handle the noise and indeterminacy in object pose estimation for better performance. In our framework, to establish accurate 2D-3D correspondence, we formulate 2D keypoints detection as a reverse diffusion (denoising) process. To facilitate such a denoising process, we design a Mixture-of-Cauchy-based forward diffusion process and condition the reverse process on the object appearance features. Extensive experiments on the LM-O and YCB-V datasets demonstrate the effectiveness of our framework.", + "bbox": [ + 75, + 351, + 473, + 609 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 637, + 209, + 652 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "6D object pose estimation aims to estimate the 6D pose of an object including its location and orientation, which has a wide range of applications, such as augmented reality [39, 47], robotic manipulation [3, 45], and automatic driving [62]. Recently, various methods [4, 5, 19, 22, 27, 44, 53, 61, 64] have been proposed to conduct RGB-based 6D object pose estimation since RGB images are easy to obtain. Despite the increased efforts, a variety of challenges persist in RGB-based 6D object pose estimation, including occlusions, cluttered backgrounds, and changeable environments [8, 40, 44, 60, 63]. These challenges can introduce significant noise and indeterminacy into the pose estimation process, leading to error-prone predictions [8, 40, 44].", + "bbox": [ + 75, + 662, + 468, + 859 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Meanwhile, diffusion models [18, 52] have achieved ap", + "bbox": [ + 96, + 861, + 468, + 876 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/008ea54ca7496cefe0d23c976582eb6ff19287170f86f620cff90d37b7f76d12.jpg", + "image_caption": [ + "Figure 1. Overview of our proposed 6D-Diff framework. As shown, given the 3D keypoints from the object 3D CAD model, we aim to detect the corresponding 2D keypoints in the image to obtain the 6D object pose. Note that when detecting keypoints, there are often challenges such as occlusions (including self-occlusions) and cluttered backgrounds that can introduce noise and indeterminacy into the process, impacting the accuracy of pose prediction." + ], + "image_footnote": [], + "bbox": [ + 503, + 303, + 890, + 409 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "pealing results in various generation tasks such as image synthesis [7, 18] and image editing [41]. Specifically, diffusion models are able to recover high-quality determinate samples (e.g., clean images) from a noisy and indeterminate input data distribution (e.g., random noise) via a step-by-step denoising process [18, 52]. Motivated by such a strong denoising capability [11, 12, 18], we aim to leverage diffusion models to handle the RGB-based 6D object pose estimation task, since this task also involves tackling noise and indeterminacy. However, it can be difficult to directly use diffusion models to estimate the object pose, because diffusion models often start denoising from random Gaussian noise [18, 52]. Meanwhile, in RGB-based 6D object pose estimation, the object pose is often extracted from an intermediate representation, such as keypoint heatmaps [5], pixel-wise voting vectors [44], or object surface keypoint features [4]. Such an intermediate representation encodes useful distribution priors about the object pose. Thus starting denoising from such an representation shall effectively assist the diffusion model in recovering accurate object poses [11]. To achieve this, we propose a novel diffusion-based object pose estimation framework (6D-Diff) that can exploit prior distribution knowledge from the intermediate representation for better performance.", + "bbox": [ + 496, + 537, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Equal contribution; ‡ Corresponding author", + "bbox": [ + 99, + 886, + 346, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "9676", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Overall, our framework is a correspondence-based framework, in which to predict an object pose, given the 3D keypoints pre-selected from the object 3D CAD model, we first predict the coordinates of the 2D image keypoints corresponding to the pre-selected 3D keypoints. We then use the 3D keypoints together with the predicted 2D keypoints coordinates to compute the 6D object pose using a Perspective-n-Point (PnP) solver [10, 31]. As shown in Fig. 1, to predict the 2D keypoints coordinates, we first extract an intermediate representation (the 2D keypoints heatmaps) through a keypoints distribution initializer. As discussed before, due to various factors, there often exists noise and indeterminacy in the keypoints detection process and the extracted heatmaps can be noisy as shown in Fig. 2. Thus we pass the distribution modeled from these keypoints heatmaps into a diffusion model to perform the denoising process to obtain the final keypoints coordinates prediction.", + "bbox": [ + 76, + 90, + 472, + 349 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Analogous to non-equilibrium thermodynamics [50], given a 2D image keypoint, we can consider all its possible locations in the image as particles in thermodynamics. Under low indeterminacy, the particles (possible locations) w.r.t. each 2D keypoint gather, and each keypoint can be determinately and accurately localized. In contrast, under high indeterminacy, these particles can stochastically spread over the input image, and it is difficult to localize each keypoint. The process of converting particles from low indeterminacy to high indeterminacy is called the forward process of the diffusion model. The goal of the diffusion model is to reverse the above forward process (through a reverse process), i.e., converting the particles from high indeterminacy to low indeterminacy. Here in our case, we aim to convert the indeterminate keypoints coordinates distribution modeled from the heatmaps into the determinate distribution. Below we briefly introduce the forward process and the reverse process in our diffusion model.", + "bbox": [ + 76, + 352, + 472, + 625 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In the forward process, we aim to generate supervision signals that will be used to optimize the diffusion model during the reverse process. Specifically, given a set of pre-selected 3D keypoints, we first acquire ground-truth coordinates of their corresponding 2D keypoints using the ground-truth object pose. Then these determinate ground-truth 2D coordinates are gradually diffused towards the indeterminate distribution modeled from the intermediate representation, and the distributions generated along the way will be used as supervision signals. Note that, as the distribution modeled from the intermediate representation can be complex and irregular, it is difficult to characterize such a distribution via the Gaussian distribution. This means that simply applying diffusion models in most existing generation works [7, 18, 52], which start denoising from the random Gaussian noise, can introduce potentially large errors. To tackle this challenge, we draw inspiration from the fact that the Mixture of Cauchy (MoC) model can effectively char", + "bbox": [ + 76, + 628, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/2e07f0c6a7b355d4d6990059a2f244260c20f27c9a178a6394542083d017d832.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
3D CAD modelImageHeatmap
(a)
(b)
", + "bbox": [ + 540, + 90, + 849, + 243 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Figure 2. Above we show two examples of keypoint heatmaps, which serve as the intermediate representation [4, 5, 44] in our framework. The red dots indicate the ground-truth locations of the keypoints. In the example (a), the target object is the pink cat, which is heavily occluded in the image and is shown in a different pose compared to the 3D model. As shown above, due to occlusions and cluttered backgrounds, the keypoint heatmaps are noisy, which reflects the noise and indeterminacy during the keypoints detection process.", + "bbox": [ + 496, + 244, + 893, + 368 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "acterize complex and intractable distributions. Moreover, the MoC model is robust to potential outliers in the distribution to be characterized [26]. Thus we propose to model the intermediate representation using a MoC distribution instead of simply treating it as a random Gaussian noise. In this way, we gradually diffuse the determinate distribution (ground truth) of keypoints coordinates towards the modeled MoC distribution during the forward process.", + "bbox": [ + 496, + 395, + 893, + 518 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Correspondingly, in the reverse process, starting from the MoC distribution modeled in the forward process, we aim to learn to recover the ground-truth keypoints coordinates. To achieve this, we leverage the distributions generated step-by-step during the forward process as the supervision signals to train the diffusion model to learn the reverse process. In this way, the diffusion model can learn to convert the indeterminate MoC distribution of keypoints coordinates into a determinate one smoothly and effectively. After the reverse process, the 2D keypoints coordinates obtained from the final determinate distribution are used to compute the 6D object pose with the pre-selected 3D keypoints. Moreover, we further facilitate the model learning of such a reverse process by injecting object appearance features as context information.", + "bbox": [ + 496, + 527, + 895, + 755 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our work makes the following contributions. 1) We propose a novel 6D-Diff framework, in which we formulate keypoints detection for 6D object pose estimation as a reverse diffusion process to effectively eliminate the noise and indeterminacy in object pose estimation. 2) To take advantage of the intermediate representation that encodes useful prior distribution knowledge for handling this task, we propose a novel MoC-based diffusion process. Besides, we facilitate the model learning by utilizing object features.", + "bbox": [ + 496, + 763, + 895, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "9677", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 89, + 218, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "RGB-based 6D Object Pose Estimation has received a lot of attention [4, 13-16, 23, 32, 33, 36, 38, 43, 44, 46, 53, 54, 56, 63-67]. Some works [22, 27, 61, 63] proposed to directly regress object poses. However, the non-linearity of the rotation space makes direct regression of object poses difficult [32]. Compared to this type of direct methods, correspondence-based methods [5, 19, 43, 44, 46, 53, 56] often demonstrate better performance, which estimate 6D object poses via learning 2D-3D correspondences between the observed image and the object 3D model.", + "bbox": [ + 75, + 122, + 472, + 273 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Among correspondence-based methods, several works [42, 44, 46, 48, 56] aim to predict the 2D keypoints coordinates corresponding to specific 3D keypoints. BB8 [46] proposed to detect the 2D keypoints corresponding to the 8 corners of the object's 3D bounding box. Later, PVNet [44] achieved better performance by estimating 2D keypoints for sampled points on the surface of the object 3D model via pixel-wise voting. Moreover, various methods [19, 43, 53, 61, 67] establish 2D-3D correspondences by localizing the 3D model point corresponding to each observed object pixel. Among these methods, DPOD [67] explored the use of UV texture maps to facilitate model training, and ZebraPose [53] proposed to encode the surface of the object 3D model efficiently through a hierarchical binary grouping. Besides, several pose refinement methods [23, 33, 38, 64] have been proposed, which conducted pose refinement given an initial pose estimation.", + "bbox": [ + 75, + 281, + 472, + 537 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this paper, we also regard object pose estimation as a 2D-3D correspondence estimation problem. Different from previous works, here by formulating 2D-3D correspondence estimation as a distribution transformation process (denoising process), we propose a new framework (6D-Diff) that trains a diffusion model to perform progressive denoising from an indeterminate keypoints distribution to the desired keypoints distribution with low indeterminacy.", + "bbox": [ + 75, + 546, + 472, + 667 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Diffusion Models [7, 9, 18, 50, 52] are originally introduced for image synthesis. Showing appealing generation capabilities, diffusion models have also been explored in various other tasks [11, 12, 20, 25, 30, 37, 41, 58], such as image editing [41] and image inpainting [37]. Here we explore a new framework that tackles object pose estimation with a diffusion model. Different from previous generation works [7, 37, 41] that start denoising from random noise, to aid the denoising process for 6D object pose estimation, we design a novel MoC-based diffusion mechanism that enables the diffusion model to start denoising from a distribution containing useful prior distribution knowledge regarding the object pose. Moreover, we condition the denoising process on the object appearance features, to further guide the diffusion model to obtain accurate predictions.", + "bbox": [ + 75, + 672, + 472, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 89, + 591, + 106 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To handle the noise and indeterminacy in RGB-based 6D object pose estimation, inspired by [11], from a novel perspective of distribution transformation with progressive denoising, we propose a framework (6D-Diff) that represents a new brand of diffusion-based solution for 6D object pose estimation. Below we first revisit diffusion models in Sec. 3.1. Then we discuss our proposed framework in Sec. 3.2, and introduce its training and testing scheme in Sec. 3.3. We finally detail the model architecture in Sec. 3.4.", + "bbox": [ + 496, + 114, + 893, + 252 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Revisiting Diffusion Models", + "text_level": 1, + "bbox": [ + 498, + 260, + 750, + 276 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The diffusion model [18, 52], which is a kind of probabilistic generative model, consists of two parts, namely the forward process and the reverse process. Specifically, given an original sample $d_0$ (e.g., a clean image), the process of diffusing the sample $d_0$ iteratively towards the noise (typically Gaussian noise) $d_K \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})$ (i.e., $d_0 \\to d_1 \\to \\ldots \\to d_K$ ) is called the forward process. In contrast, the process of denoising the noise $d_K$ iteratively towards the sample $d_0$ (i.e., $d_K \\to d_{K-1} \\to \\ldots \\to d_0$ ) is called the reverse process. Each process is defined as a Markov chain.", + "bbox": [ + 496, + 282, + 893, + 434 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Forward Process. To obtain supervision signals for training the diffusion model to learn to perform the reverse process in a stepwise manner, we need to acquire the intermediate step results $\\{d_k\\}_{k=1}^{K-1}$ . Thus the forward process is first performed to generate these intermediate step results for training purpose. Specifically, the posterior distribution $q(d_{1:K}|d_0)$ from $d_1$ to $d_K$ is formulated as:", + "bbox": [ + 496, + 434, + 893, + 541 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(d _ {1: K} \\mid d _ {0}\\right) = \\prod_ {k = 1} ^ {K} q \\left(d _ {k} \\mid d _ {k - 1}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 542, + 890, + 585 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(d _ {k} \\mid d _ {k - 1}\\right) = \\mathcal {N} \\left(d _ {k}; \\sqrt {1 - \\beta_ {k}} d _ {k - 1}, \\beta_ {k} \\mathbf {I}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 588, + 836, + 607 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\{\\beta_{k}\\in (0,1)\\}_{k = 1}^{K}$ denotes a set of fixed variance controllers that control the scale of the injected noise at different steps. According to Eq. (1), we can derive $q(d_k|d_0)$ in closed form as:", + "bbox": [ + 496, + 609, + 893, + 669 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(d _ {k} \\mid d _ {0}\\right) = \\mathcal {N} \\left(d _ {k}; \\sqrt {\\bar {\\alpha} _ {k}} d _ {0}, (1 - \\bar {\\alpha} _ {k}) \\mathbf {I}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 557, + 672, + 890, + 691 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\alpha_{k} = 1 - \\beta_{k}$ and $\\overline{\\alpha}_k = \\prod_{s = 1}^k\\alpha_s$ . Based on Eq. (2), $d_{k}$ can be further expressed as:", + "bbox": [ + 496, + 695, + 890, + 727 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nd _ {k} = \\sqrt {\\bar {\\alpha} _ {k}} d _ {0} + \\sqrt {1 - \\bar {\\alpha} _ {k}} \\epsilon \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 598, + 729, + 890, + 746 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\epsilon \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})$ . From Eq. (3), we can observe that when the number of diffusion steps $K$ is sufficiently large and $\\overline{\\alpha}_K$ correspondingly decreases to nearly zero, the distribution of $d_K$ is approximately a standard Gaussian distribution, i.e., $d_K \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})$ . This means $d_0$ is gradually corrupted into Gaussian noise, which conforms to the nonequilibrium thermodynamics phenomenon of the diffusion process [50].", + "bbox": [ + 496, + 750, + 893, + 869 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Reverse Process. With the intermediate step results $\\{d_k\\}_{k=1}^{K-1}$ acquired in the forward process, the diffusion", + "bbox": [ + 500, + 869, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "9678", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "model is trained to learn to perform the reverse process. Specifically, in the reverse process, each step can be formulated as a function $f$ that takes $d_{k}$ and the diffusion model $M_{diff}$ as inputs and generate $d_{k-1}$ as the output, i.e., $d_{k-1} = f(d_{k}, M_{diff})$ .", + "bbox": [ + 75, + 90, + 467, + 167 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After training the diffusion model, during inference, we do not need to conduct the forward process. Instead, we only conduct the reverse process, which converts a random Gaussian noise $d_{K} \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})$ into a sample $d_0$ of the desired distribution using the trained diffusion model.", + "bbox": [ + 75, + 167, + 467, + 242 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Proposed Framework", + "text_level": 1, + "bbox": [ + 76, + 253, + 279, + 271 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Similar to previous works [21, 44, 53], our framework predicts 6D object poses via a two-stage pipeline. Specifically, (i) we first select $N$ 3D keypoints on the object CAD model and detect the corresponding $N$ 2D keypoints in the image; (ii) we then compute the 6D pose using a PnP solver. Here we mainly focus on the first stage and aim to produce more accurate keypoint detection results.", + "bbox": [ + 75, + 279, + 467, + 383 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "When detecting 2D keypoints, factors like occlusions and cluttered backgrounds can bring noise and indeterminacy into this process, and affect the accuracy of detection results [21, 44]. To handle this problem, inspired by that diffusion models can iteratively reduce indeterminacy and noise in the initial distribution (e.g., standard Gaussian distribution) to generate determinate and high-quality samples of the desired distribution [11, 12], we formulate keypoints detection as generating a determinate distribution of keypoints coordinates $(D_0)$ from an indeterminate initial distribution $(D_K)$ via a diffusion model.", + "bbox": [ + 75, + 385, + 467, + 551 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Moreover, to effectively adapt to the 6D object pose estimation task, the diffusion model in our framework does not start the reverse process from the common initial distribution (i.e., the standard Gaussian distribution) as in most existing diffusion works [7, 18, 52]. Instead, inspired by recent 6D object pose estimation works [4, 5, 61], we first extract an intermediate representation (e.g., heatmaps), and use this representation to initialize a keypoints coordinates distribution (i.e., $D_K$ ), which will serve as the starting point of the reverse process. Such an intermediate representation encodes useful prior distribution information about keypoints coordinates. Thus by starting the reverse process from this representation, we effectively exploit the distribution priors in the representation to aid the diffusion model in recovering accurate keypoints coordinates [11]. Below, we first describe how we initialize the keypoints distribution $D_K$ , and then discuss the corresponding forward and reverse processes in our new framework.", + "bbox": [ + 75, + 551, + 467, + 823 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Keypoints Distribution Initialization. We initialize the keypoints coordinates distribution $D_K$ with extracted heatmaps. Specifically, similar to [29, 34, 53], we first use an off-the-shelf object detector (e.g., Faster RCNN [49]) to detect the bounding box of the target object, and then crop", + "bbox": [ + 75, + 824, + 467, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the detected Region of Interest (ROI) from the input image. We send the ROI into a sub-network (i.e., the keypoints distribution initializer) to predict a number of heatmaps where each heatmap corresponds to one 2D keypoint. We then normalize each heatmap to convert it to a probability distribution. In this way, each normalized heatmap naturally represents the distribution of the corresponding keypoint coordinates, and thus we can use these heatmaps to initialize $D_K$ .", + "bbox": [ + 496, + 90, + 890, + 226 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Forward Process. After distribution initialization, the next step is to iteratively reduce the noise and indeterminacy in the initialized distribution $D_K$ by performing the reverse process $(D_K \\to D_{K-1} \\to \\ldots \\to D_0)$ . To train the diffusion model to perform such a reverse process, we need to obtain the distributions generated along the way (i.e., $\\{D_k\\}_{k=1}^{K-1}$ ) as the supervision signals. Thus, we first need to conduct the forward process to obtain samples from $\\{D_k\\}_{k=1}^{K-1}$ . Specifically, given the ground-truth keypoints coordinates distribution $D_0$ , we define the forward process as: $D_0 \\to D_1 \\to \\ldots \\to D_K$ , where $K$ is the number of diffusion steps. In this forward process, we iteratively add noise to the determinate distribution $D_0$ , i.e., increasing the indeterminacy of generated distributions, to transform it into the initialized distribution $D_K$ with indeterminacy. Via this process, we can generate $\\{D_k\\}_{k=1}^{K-1}$ along the way and use them as supervision signals to train the diffusion model to perform the reverse process.", + "bbox": [ + 496, + 229, + 890, + 500 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "However, in our framework, we do not aim to transform the ground-truth keypoints coordinates distribution $D_0$ towards a standard Gaussian distribution via the forward process, because our initialized distribution $D_K$ is not a random noise. Instead, as discussed before, $D_K$ is initialized with heatmaps (as shown in Fig. 3), since the heatmaps can provide rough estimations about the keypoints coordinates distribution. To effectively utilize such priors in $D_K$ to facilitate the reverse process, we aim to enable the diffusion model to start the reverse process (denoising process) from $D_K$ instead of random Gaussian noise [11]. Thus, the basic forward process (described in Sec. 3.1) in existing generative diffusion models is not suitable in our framework, which motivates us to design a new forward process for our task.", + "bbox": [ + 496, + 503, + 890, + 728 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "However, it is non-trivial to design such a forward process, as the initialized distribution $D_K$ is based on extracted heatmaps, and thus $D_K$ can be complex and irregular, as shown in Fig. 4. Hence modeling $D_K$ as a Gaussian distribution can result in potentially large errors. To handle this challenge, motivated by that the Mixture of Cauchy (MoC) model can effectively and reliably characterize complex and intractable distributions [26], we leverage MoC to characterize $D_K$ . Based on the characterized distribution, we can then perform a corresponding MoC-based forward process.", + "bbox": [ + 496, + 732, + 890, + 883 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, we denote the number of Cauchy kernels", + "bbox": [ + 517, + 885, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "9679", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/437c5b1d10ef8e5b67324541356b9f8aca372444fd4742e3719751b343fb62ea.jpg", + "image_caption": [ + "Figure 3. Illustration of our framework. During testing, given an input image, we first crop the Region of Interest (ROI) from the image through an object detector. After that, we feed the cropped ROI to the keypoints distribution initializer to obtain the heatmaps that can provide useful distribution priors about keypoints, to initialize $D_K$ . Meanwhile, we can obtain object appearance features $f_{\\mathrm{app}}$ . Next, we pass $f_{\\mathrm{app}}$ into the encoder, and the output of the encoder will serve as conditional information to aid the reverse process in the decoder. We sample $M$ sets of 2D keypoints coordinates from $D_K$ , and feed these $M$ sets of coordinates into the decoder to perform the reverse process iteratively together with the step embedding $f_D^k$ . At the final reverse step ( $K$ -th step), we average $\\{d_0^i\\}_{i=1}^M$ as the final keypoints coordinates prediction $d_0$ , and use $d_0$ to compute the 6D pose with the pre-selected 3D keypoints via a PnP solver." + ], + "image_footnote": [], + "bbox": [ + 125, + 94, + 851, + 280 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "in the MoC distribution as $U$ , and use the Expectation-Maximum-type (EM) algorithm [26, 55] to optimize the MoC parameters $\\eta^{\\mathrm{MoC}}$ to characterize the distribution $D_K$ as:", + "bbox": [ + 75, + 398, + 470, + 457 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\eta_ {*} ^ {\\mathrm {M o C}} = \\operatorname {E M} \\left(\\prod_ {v = 1} ^ {V} \\sum_ {u = 1} ^ {U} \\pi_ {u} \\operatorname {C a u c h y} \\left(d _ {K} ^ {v} \\mid \\mu_ {u}, \\gamma_ {u}\\right)\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 117, + 458, + 468, + 500 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\{d_K^v\\}_{v = 1}^V$ denotes $V$ sets of keypoints coordinates sampled from the distribution $D_{K}$ . Note each set of keypoints coordinates $d_K^v$ contains all the $N$ keypoints coordinates (i.e., $d_K^v\\in \\mathbb{R}^{N\\times 2}$ ). $\\pi_u$ denotes the weight of the $u$ -th Cauchy kernel ( $\\sum_{u = 1}^{U}\\pi_{u} = 1$ ), and $\\eta^{\\mathrm{MoC}} = \\{\\mu_1,\\gamma_1,\\dots,\\mu_U,\\gamma_U\\}$ denotes the MoC parameters in which $\\mu_{u}$ and $\\gamma_{u}$ are the location and scale of the $u$ -th Cauchy kernel. Via the above optimization, we can use the optimized parameters $\\eta_*^{\\mathrm{MoC}}$ to model $D_K$ as the characterized distribution $(\\hat{D}_K)$ . Given $\\hat{D}_K$ , we aim to conduct the forward process from the ground-truth keypoints coordinates distribution $D_0$ , so that after $K$ steps of forward diffusion, the generated distribution reaches $\\hat{D}_K$ . To this end, we modify Eq. (3) as follows:", + "bbox": [ + 75, + 502, + 472, + 718 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {d} _ {k} = \\sqrt {\\overline {{\\alpha}} _ {k}} d _ {0} + (1 - \\sqrt {\\overline {{\\alpha}} _ {k}}) \\mu^ {\\mathrm {M o C}} + \\sqrt {1 - \\overline {{\\alpha}} _ {k}} \\epsilon^ {\\mathrm {M o C}} \\qquad (5)\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 727, + 468, + 747 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\hat{d}_k\\in \\mathbb{R}^{N\\times 2}$ represents a sample (i.e., a set of $N$ keypoints coordinates) from the generated distribution $\\tilde{D}_k$ , $\\mu^{\\mathrm{MoC}} = \\sum_{u = 1}^{U}\\mathbb{1}_{u}\\mu_{u}$ , and $\\epsilon^{\\mathrm{MoC}}\\sim$ Cauchy(0, $\\sum_{u = 1}^{U}(\\mathbb{1}_{u}\\gamma_{u})$ ). Note that $\\mathbb{1}_u$ is a zero-one indicator and $\\sum_{u = 1}^{U}\\mathbb{1}_u = 1$ and $\\operatorname {Prob}(\\mathbb{1}_u = 1) = \\pi_u$ .", + "bbox": [ + 75, + 758, + 468, + 840 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "From Eq. (5), we can observe that when $K$ is sufficiently large and $\\overline{\\alpha}_K$ correspondingly decreases to nearly zero, the distribution of $\\hat{d}_K$ reaches the MoC distribution, i.e., $\\hat{d}_K = \\mu^{\\mathrm{MoC}} + \\epsilon^{\\mathrm{MoC}}\\sim \\mathrm{Cauchy}(\\sum_{u = 1}^{U}(\\mathbb{1}_{u}\\mu_{u}),\\sum_{u = 1}^{U}(\\mathbb{1}_{u}\\gamma_{u}))$ .", + "bbox": [ + 75, + 840, + 470, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ter the above MoC-based forward process, we can use the generated $\\{\\hat{D}_k\\}_{k=1}^{K-1}$ as supervision signals to train the diffusion model $M_{\\mathrm{diff}}$ to learn the reverse process. More details about Eq. (5) can be found in Supplementary material. Such a forward process is only conducted to generate supervision signals for training the diffusion model, while we only need to conduct the reverse process during testing.", + "bbox": [ + 496, + 398, + 893, + 503 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Reverse Process. In the reverse process, we aim to recover a desired determinate keypoints distribution $D_0$ from the initial distribution $D_K$ . As discussed above, we characterize $D_K$ via a MoC model and then generate $\\{\\hat{D}_k\\}_{k=1}^{K-1}$ as supervision signals to optimize the diffusion model to learn to perform the reverse process $(\\hat{D}_K \\to \\hat{D}_{K-1} \\to \\dots \\to D_0)$ , in which the model iteratively reduces the noise and indeterminacy in $\\hat{D}_K$ to generate $D_0$ .", + "bbox": [ + 496, + 505, + 893, + 626 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "However, it can still be difficult to generate $D_0$ by directly performing the reverse process from $\\hat{D}_K$ , because the object appearance features are lacking in $\\hat{D}_K$ . Such features can help constrain the model reverse process based on the input image to get accurate predictions. Thus we further leverage the appearance features from the image as context to guide $M_{\\mathrm{diff}}$ in the reverse process. Specifically, we reuse the features extracted from the keypoints distribution initializer as the appearance features $f_{\\mathrm{app}}$ and feed $f_{\\mathrm{app}}$ into the diffusion model, as shown in Fig. 3.", + "bbox": [ + 496, + 627, + 895, + 779 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our reverse process aims to generate a determinate distribution $D_0$ from the indeterminate distribution $\\hat{D}_K$ (during training) or $D_K$ (during testing). Below we describe the reverse process during testing. We first obtain $f_{\\mathrm{app}}$ from the input image. Then to help the diffusion model to learn to perform denoising at each reverse step, following [18, 52], we generate the unique step embedding $f_D^k$ to inject the step number $(k)$ information into the model. In this way, given a", + "bbox": [ + 496, + 780, + 895, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "9680", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "set of noisy keypoints coordinates $d_{k}\\in \\mathbb{R}^{N\\times 2}$ drawn from $D_{k}$ at the $k^{th}$ step, we use diffusion model $M_{\\mathrm{diff}}$ conditioned on the step embedding $f_{D}^{k}$ and the object appearance features $f_{\\mathrm{app}}$ to recover $d_{k - 1}$ from $d_{k}$ as:", + "bbox": [ + 76, + 90, + 468, + 151 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nd _ {k - 1} = M _ {\\text {d i f f}} \\left(d _ {k}, f _ {\\text {a p p}}, f _ {D} ^ {k}\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 155, + 468, + 172 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3. Training and Testing", + "text_level": 1, + "bbox": [ + 76, + 181, + 272, + 196 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training. Following [44], we first select $N$ 3D keypoints from the surface of the object CAD model using the farthest point sampling (FPS) algorithm. Then we conduct the training process in the following two stages.", + "bbox": [ + 75, + 205, + 468, + 265 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the first stage, to initialize the distribution $D_K$ , we optimize the keypoints distribution initializer. Specifically, for each training sample, given the pre-selected $N$ 3D keypoints, we can obtain the ground-truth coordinates of the corresponding $N$ 2D keypoints using the ground-truth 6D object pose. Then for each keypoints, based on the corresponding ground-truth coordinates, we generate a ground-truth heatmap following [42] for training the initializer. Thus for each training sample, we generate $N$ ground-truth heatmaps. In this way, the loss function $L_{\\mathrm{init}}$ for optimizing the initializer can be formulated as:", + "bbox": [ + 75, + 266, + 468, + 430 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {i n i t}} = \\left\\| \\mathbf {H} _ {\\text {p r e d}} - \\mathbf {H} _ {\\mathrm {G T}} \\right\\| _ {2} ^ {2} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 431, + 468, + 460 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathbf{H}_{\\mathrm{pred}}$ and $\\mathbf{H}_{\\mathrm{GT}}$ denote the predicted heatmaps and ground-truth heatmaps, respectively.", + "bbox": [ + 75, + 460, + 468, + 491 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the second stage, we optimize the diffusion model $M_{\\mathrm{diff}}$ . For each training sample, to optimize $M_{\\mathrm{diff}}$ , we perform the following steps. (1) We first send the input image into an off-the-shelf object detector [57] and then feed the detected ROI into the trained initializer to obtain $N$ heatmaps. Meanwhile, we can also obtain $f_{\\mathrm{app}}$ . (2) We use the $N$ predicted heatmaps to initialize $D_K$ , and leverage the EM-type algorithm to characterize $D_K$ as a MoC distribution $\\hat{D}_K$ . (3) Based on $\\hat{D}_K$ , we use the ground-truth keypoints coordinates $d_0$ to directly generate $M$ sets of $(\\hat{d}_1, \\dots, \\hat{d}_K)$ (i.e., $\\{\\hat{d}_1^i, \\dots, \\hat{d}_K^i\\}_{i=1}^M$ ) via the forward process (Eq. (5)). (4) Then, we aim to optimize the diffusion model $M_{\\mathrm{diff}}$ to recover $\\hat{d}_{k-1}^i$ from $\\hat{d}_k^i$ iteratively. Following previous diffusion works [18, 52], we formulate the loss $L_{\\mathrm{diff}}$ for optimizing $M_{\\mathrm{diff}}$ as follows $(\\hat{d}_0^i = d_0$ for all $i$ ):", + "bbox": [ + 75, + 491, + 468, + 718 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {d i f f}} = \\sum_ {i = 1} ^ {M} \\sum_ {k = 1} ^ {K} \\left\\| M _ {\\text {d i f f}} \\left(\\hat {d} _ {k} ^ {i}, f _ {\\text {a p p}}, f _ {D} ^ {k}\\right) - \\hat {d} _ {k - 1} ^ {i} \\right\\| _ {2} ^ {2} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 119, + 720, + 468, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Testing. During testing, for each testing sample, by feeding the input image to the object detector and the keypoints distribution initializer consecutively, we can initialize $D_K$ and meanwhile obtain $f_{\\mathrm{app}}$ . Then, we perform the reverse process. During the reverse process, we sample $M$ sets of noisy keypoints coordinates from $D_K$ (i.e., $\\{d_K^i\\}_{i = 1}^M$ ) and feed them into the trained diffusion model. Here we sample $M$ sets of keypoints coordinates, because we are converting from a distribution $(D_K)$ towards another distribution $(D_0)$ .", + "bbox": [ + 75, + 763, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Then the model iteratively performs the reverse steps. After $K$ reverse diffusion steps, we obtain $M$ sets of predicted keypoints coordinates (i.e., $\\{d_0^i\\}_{i = 1}^M$ ). To obtain the final keypoints coordinates prediction $d_{0}$ , we compute the mean of the $M$ predictions. Finally, we can solve for the 6D object pose using a PnP solver, like [44, 53].", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4. Model Architecture", + "text_level": 1, + "bbox": [ + 498, + 191, + 687, + 207 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our framework mainly consists of the diffusion model $(M_{\\mathrm{diff}})$ and the keypoints distribution initializer.", + "bbox": [ + 496, + 215, + 890, + 246 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Diffusion Model $M_{\\mathrm{diff}}$ . As illustrated in Fig. 3, our proposed diffusion model $M_{\\mathrm{diff}}$ mainly consists of a transformer encoder-decoder architecture. The appearance features $f_{\\mathrm{app}}$ are sent into the encoder for extracting context information to aid the reverse process in the decoder. $f_{D}^{k}$ and $\\{d_k^i\\}_{i=1}^M$ (or $\\{\\hat{d}_k^i\\}_{i=1}^M$ during training) are sent into the decoder for the reverse process. Both the encoder and the decoder contain a stack of three transformer layers.", + "bbox": [ + 496, + 247, + 890, + 369 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "More specifically, as for the encoder part, we first map $f_{\\mathrm{app}} \\in \\mathbb{R}^{16 \\times 16 \\times 512}$ through a $1 \\times 1$ convolution layer to a latent embedding $e_{\\mathrm{app}} \\in \\mathbb{R}^{16 \\times 16 \\times 128}$ . To retain the spatial information, following [59], we further incorporate positional encodings into $e_{\\mathrm{app}}$ . Afterwards, we flatten $e_{\\mathrm{app}}$ into a feature sequence $(\\mathbb{R}^{256 \\times 128})$ , and send it into the encoder. The encoder output $f_{\\mathrm{enc}}$ containing the extracted object information will be sent into the decoder to aid the reverse process. Note that during testing, for each sample, we only need to conduct the above computation process once to obtain the corresponding $f_{\\mathrm{enc}}$ .", + "bbox": [ + 496, + 369, + 890, + 537 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The decoder part iteratively performs the reverse process. For notation simplicity, below we describe the reverse process for a single sample $d_{k}$ instead of the $M$ samples $\\left(\\{d_1^i,\\dots,d_K^i\\}_{i = 1}^M\\right)$ . Specifically, at the $k$ -th reverse step, to inject the current step number $(k)$ information into the decoder, we first generate the step embedding $f_{D}^{k}\\in \\mathbb{R}^{1\\times 128}$ using the sinusoidal function following [18, 52]. Meanwhile, we use an FC layer to map the input $d_{k}\\in \\mathbb{R}^{N\\times 2}$ to a latent embedding $e_k\\in \\mathbb{R}^{N\\times 128}$ . Then we concatenate $f_{D}^{k}$ and $e_k$ along the first dimension, and send it into the decoder. By interacting with the encoder output $f_{\\mathrm{enc}}$ (extracted object information) via cross-attention at each layer, the decoder produces $f_{\\mathrm{dec}}$ , which is further mapped into the keypoints coordinates prediction $d_{k - 1}\\in \\mathbb{R}^{N\\times 2}$ via an FC layer. Then we send $d_{k - 1}$ back to the decoder as the input to perform the next reverse step.", + "bbox": [ + 496, + 537, + 890, + 779 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Keypoints Distribution Initializer. The initializer adopts a ResNet-34 backbone, which is commonly used in 6D pose estimation methods [4, 53, 61]. To generate heatmaps to initialize the distribution $D_K$ , we add two deconvolution layers followed by a $1 \\times 1$ convolution layer after the ResNet-34 backbone, and then we obtain predicted heatmaps $\\mathbf{H}_{\\mathrm{pred}} \\in \\mathbb{R}^{N \\times \\frac{H}{4} \\times \\frac{W}{4}}$ where $H$ and $W$ denote the height and width of the input ROI image respec", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "9681", + "bbox": [ + 480, + 944, + 513, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b61da41b8944ffeb55074eed5c6f67f97a2a423b9bee6f0569047e61a97a81c9.jpg", + "image_caption": [ + "input image" + ], + "image_footnote": [], + "bbox": [ + 158, + 89, + 274, + 179 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0b4b9acb5c85467644b3dd29dc4d8ebc1a7f72cded7d35668f0c854a9786e635.jpg", + "image_caption": [ + "Figure 4. Visualization of the denoising process of a sample with our framework. In this example, the target object is the yellow duck and for clarity, we here show three keypoints only. The red dots indicate the ground-truth locations of these three keypoints. The noisy heatmap before denoising reflects that factors like occlusions and clutter in the scene can introduce noise and indeterminacy when detecting keypoints. As shown, our diffusion model can effectively and smoothly reduce the noise and indeterminacy in the initial distribution step by step, finally recovering a high-quality and determinate distribution of keypoints coordinates. (Better viewed in color)" + ], + "image_footnote": [], + "bbox": [ + 300, + 89, + 428, + 188 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/04c376050a06a7e26e7256b49519f8986a0b703a6aa603b8969c3eed2c6e5963.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 429, + 90, + 545, + 185 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c7f21552bc1e952f449a1464eca28832e2ab5f7c261b243fc3d30f4403e51998.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 557, + 90, + 683, + 185 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0def7fb1407240b98f521abcde04b9fa3bf1679cada464ac1d6b5e5a723577cb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 691, + 90, + 803, + 186 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/481fef7279a38397312ba15e17794116f09ff211cb010d3974ff71280207d39b.jpg", + "table_caption": [ + "Table 1. Comparisons with RGB-based 6D object pose estimation methods on the LM-O dataset. (*) denotes symmetric objects." + ], + "table_footnote": [], + "table_body": "
MethodPVNet [44]HybridPose [51]RePose [24]DeepIM [33]GDR-Net [61]SO-Pose [8]CRT-6D [4]ZebraPose [53]CheckerPose [35]Ours
ape15.820.931.159.246.848.453.457.958.360.6
can63.375.380.063.590.885.892.095.095.797.9
cat16.724.925.626.240.532.742.060.662.363.2
driller65.770.273.155.682.677.481.494.893.796.6
duck25.227.943.052.446.948.944.964.569.967.2
eggbox*50.252.451.763.054.252.462.770.970.073.5
glue*49.653.854.371.775.878.380.288.786.492.0
holepuncher39.754.253.652.560.175.374.383.083.885.5
Mean40.847.551.655.562.262.366.376.977.579.6
", + "bbox": [ + 106, + 279, + 859, + 378 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/21a22844c4fedf440150406bf351ef49223aa0dc64883c425cd53c2d8c624806.jpg", + "table_caption": [ + "Table 2. Comparisons with RGB-based 6D object pose estimation methods on the YCB-V dataset. (-) indicates the corresponding result is not reported in the original paper." + ], + "table_footnote": [], + "table_body": "
MethodADD(-S)AUC of ADD-SAUC of ADD(-S)
SegDriven[21]39.0--
SingleStage[22]53.9--
CosyPose [29]-89.884.5
RePose [24]62.188.582.0
GDR-Net [61]60.191.684.4
SO-Pose [8]56.890.983.9
ZebraPose [53]80.590.185.3
CheckerPose [35]81.491.386.4
Ours83.891.587.0
", + "bbox": [ + 96, + 431, + 447, + 549 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "tively. Moreover, the features outputted by the ResNet-34 backbone, combined with features obtained from methods [35, 53], are used as the object features $f_{\\mathrm{app}}$ .", + "bbox": [ + 75, + 561, + 467, + 609 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 623, + 207, + 638 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1. Datasets & Evaluation Metrics", + "text_level": 1, + "bbox": [ + 76, + 648, + 348, + 664 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Given that previous works [8, 24, 67] have reported the evaluation accuracy over $95\\%$ on the Linemod (LM) dataset [17], the performance on this dataset has become saturated. Thus recent works [4, 53] mainly focus on using the LM-O dataset [2] and the YCB-V dataset [63] that are more challenging, which we follow.", + "bbox": [ + 75, + 672, + 467, + 762 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "LM-O Dataset. The Linemod Occlusion (LM-O) dataset contains 1214 images and is a challenging subset of the LM dataset. In this dataset, around 8 objects are annotated on each image and the objects are often heavily occluded. Following [4, 53], we use both the real images from the LM dataset and the publicly available physically-based rendering (pbr) images [6] as the training images for LM-O. Following [53, 61], on LM-O dataset, we evaluate the model performance using the commonly-used ADD(-S) metric.", + "bbox": [ + 75, + 763, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For this metric, we compute the mean distance between the model points transformed using the predicted pose and the same model points transformed using the ground-truth pose. For symmetric objects, following [63], the mean distance is computed based on the closest point distance. If the mean distance is less than $10\\%$ of the model diameter, the predicted pose is regarded as correct.", + "bbox": [ + 496, + 392, + 890, + 496 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "YCB-V Dataset. The YCB-V dataset is a large-scale dataset containing 21 objects and over 100k real images. The samples in this dataset often exhibit occlusions and cluttered backgrounds. Following [4, 53], we use both the real images from the training set of the YCB-V dataset and the publicly available pbr images as the training images for YCB-V. Following [53, 61], we evaluate the model performance using the following metrics: ADD(-S), AUC (Area Under the Curve) of ADD-S, and AUC of ADD(-S). For calculating AUC, we set the maximum distance threshold to $10\\mathrm{cm}$ following [63].", + "bbox": [ + 496, + 500, + 890, + 665 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Implementation Details", + "text_level": 1, + "bbox": [ + 500, + 679, + 715, + 695 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conduct our experiments on an Nvidia V100 GPU. We set the number of pre-selected 3D keypoints $N$ to 128. During training, following [34, 53], we utilize the dynamic zoom-in strategy to produce augmented ROI images. During testing, we use the detected bounding box with Faster RCNN [49] and FCOS [57] provided by CDPNv2 [34]. The cropped ROI image is resized to the shape of $3 \\times 256 \\times 256$ ( $H = W = 256$ ). We characterize $D_K$ via a MoC model with 9 Cauchy kernels ( $U = 9$ ) for the forward diffusion process. We optimize the diffusion model $M_{\\mathrm{diff}}$ for 1500 epochs using the Adam optimizer [28] with an initial learning rate of 4e-5. Moreover, we set the number of sampled sets $M$ to 5, and the number of diffusion steps $K$ to", + "bbox": [ + 496, + 703, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "9682", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b903ced34bd62194809d38eeea7ebb4deac90adda7a6ec6b6b1dd15de31bce23.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 78, + 87, + 168, + 154 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f6f8a61b91946b5b4da0d6bd2058734361d49f5d4f4dad94c810fad530f14455.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 88, + 267, + 152 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1392556afa4f233e07d526d0ac630e2647172bb4c7341fb837a8ae7e94d8e0fe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 279, + 88, + 367, + 152 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9413cf6a51c687734673050f9721c34d24fda513a1c75409289e7c7fd8e3ec01.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 379, + 88, + 468, + 152 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f861959908df876c547b3fda9766273dec454dc9782689ad252270167f42ff13.jpg", + "image_caption": [ + "Figure 5. Qualitative results. Green bounding boxes represent the ground-truth poses and blue bounding boxes represent the predicted poses of our method. As shown, even facing severe occlusions, clutter in the scene or varying environment, our framework can still accurately recover the object poses, showing the effectiveness of our method for handling the noise and indeterminacy caused by various factors in object pose estimation." + ], + "image_footnote": [], + "bbox": [ + 78, + 160, + 168, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bed618827a36a469dfd3434e06bbbf66692ee482a9da0015ba12e6b8f6a884bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 160, + 267, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/19561a8956a2b49d227d14d1059e3c4bf04463a967e6703eeb7c05efe7904dd0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 279, + 160, + 367, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/40ce7df9b492104dfd35e3a68336179bde926a73fbd8a86faa6c71e5fd6b68ba.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 379, + 160, + 468, + 224 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "100. Following [53], we use Progressive-X [1] as the PnP solver. Note that during testing, instead of performing the reverse process with all the $K$ steps, we accelerate the process with DDIM [52], a recently proposed diffusion acceleration method. With DDIM acceleration, we only need to perform 10 steps to finish the reverse process during testing.", + "bbox": [ + 75, + 347, + 468, + 439 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. Comparison with State-of-the-art Methods", + "text_level": 1, + "bbox": [ + 76, + 449, + 441, + 465 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Results on LM-O Dataset. As shown in Tab. 1, compared to existing methods, our method achieves the best mean performance, showing the superiority of our method. We also show qualitative results on the LM-O dataset in Fig. 5. As shown, even in the presence of large occlusions (including self-occlusions) and cluttered backgrounds, our method still produces accurate predictions.", + "bbox": [ + 75, + 472, + 468, + 577 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Results on YCB-V Dataset. As shown in Tab. 2, our framework achieves the best performance on both the ADD(-S) and the AUC of ADD(-S) metrics, and is comparable to the state-of-the-art method on the AUC of ADD-S metric, showing the effectiveness of our method.", + "bbox": [ + 75, + 579, + 468, + 654 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Ablation Studies", + "text_level": 1, + "bbox": [ + 76, + 665, + 240, + 679 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We conduct extensive ablation experiments on the LM-O dataset, and we report the model performance on ADD(-S) metric averaged over all the objects.", + "bbox": [ + 75, + 688, + 468, + 733 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Impact of denoising process. In our framework, we predict keypoints coordinates via performing the denoising process. To evaluate the efficacy of this process, we test three variants. In the first variant (Variant A), we remove the diffusion model", + "bbox": [ + 75, + 734, + 295, + 853 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/7bacf8a365e25ae6fcc0afc71753e368414288d184ae4124d81596ca2aee8244.jpg", + "table_caption": [ + "Table 3. Evaluation on the effectiveness of the denoising process." + ], + "table_footnote": [], + "table_body": "
MethodADD(-S)
Variant A49.2
Variant B57.3
Variant C61.1
6D-Diff79.6
", + "bbox": [ + 313, + 779, + 468, + 851 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "$M_{\\mathrm{diff}}$ and predict keypoints coordinates directly from the heatmaps produced by the keypoints distribution initializer. The second variant (Variant $B$ ) has the same model architec", + "bbox": [ + 76, + 854, + 468, + 898 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ture as our framework, but the diffusion model is optimized to directly predict the coordinates instead of learning the reverse process. Same as Variant $B$ , the third variant (Variant $C$ ) is also optimized to directly predict coordinates without denoising process. For Variant $C$ , we stack our diffusion model structure multiple times to produce a deep network, which has similar computation complexity with our framework. As shown in Tab. 3, compared to our framework, the performance of these variants significantly drops, showing that the effectiveness of our framework mainly lies in the designed denoising process.", + "bbox": [ + 496, + 90, + 890, + 256 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Impact of object appearance features $f_{\\mathrm{app}}$ In our framework, we send the appearance features $f_{\\mathrm{app}}$ into the diffusion model $M_{\\mathrm{diff}}$ to aid the reverse process. To evaluate its effect,", + "bbox": [ + 498, + 257, + 718, + 347 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/479485e488975fa8d81d403a39dcd7c24ada569bfce8a0b863238f459f3dac0b.jpg", + "table_caption": [ + "Table 4. Evaluation on the effectiveness of the object appearance features $f_{\\mathrm{app}}$" + ], + "table_footnote": [], + "table_body": "
MethodADD(-S)
w/o fapp74.4
6D-Diff79.6
", + "bbox": [ + 736, + 305, + 870, + 345 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "we test a variant in which we do not send $f_{\\mathrm{app}}$ into $M_{\\mathrm{diff}}$ ( $w / o \\, f_{\\mathrm{app}}$ ). As shown in Tab. 4, our framework performs better than this variant, showing that $f_{\\mathrm{app}}$ can aid $M_{\\mathrm{diff}}$ to get more accurate predictions.", + "bbox": [ + 498, + 348, + 890, + 407 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Impact of MoC design. During training, we model the distribution $D_K$ from the intermediate representation", + "bbox": [ + 500, + 409, + 640, + 497 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/8fbb26489f136caeefd614faf3467693341cb2af5a56a7d4ab853d9272ec87ed.jpg", + "table_caption": [ + "Table 5. Evaluation on the effectiveness of the MoC design." + ], + "table_footnote": [], + "table_body": "
MethodADD(-S)
Standard diffusion w/o MoC73.1
Heatmaps as condition76.2
6D-Diff79.6
", + "bbox": [ + 658, + 439, + 890, + 489 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(heatmaps) as a MoC distribution $\\hat{D}_K$ , and train the diffusion model $M_{\\mathrm{diff}}$ to perform the reverse process from $\\hat{D}_K$ . To investigate the impact of this design, we evaluate two variants that train $M_{\\mathrm{diff}}$ in different ways. In the first variant (Standard diffusion w/o MoC), we train the model to start the reverse process from the standard Gaussian noise, i.e., following the basic forward process in Eq. (3) for model training. In the second variant (Heatmaps as condition), we still train the model to start denoising from the random Gaussian noise but we use the heatmaps as the condition for the reverse process. As shown in Tab. 5, our framework consistently outperforms both variants, showing effectiveness of the designed MoC-based forward process.", + "bbox": [ + 496, + 498, + 890, + 695 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 709, + 617, + 724 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we proposed a novel diffusion-based 6D object pose estimation framework, which effectively handles noise and indeterminacy in object pose estimation. In our framework, we formulate object keypoints detection as a carefully-designed reverse diffusion process. We design a novel MoC-based forward process to effectively utilize the distribution priors in intermediate representations. Our framework achieves superior performance.", + "bbox": [ + 496, + 734, + 890, + 853 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement. This work was supported by the National Research Foundation Singapore under the AI Singapore Programme (Award Number: AISG-100E-2023-121).", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "9683", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Daniel Barath and Jiri Matas. Progressive-x: Efficient, anytime, multi-model fitting algorithm. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3780-3788, 2019. 8", + "[2] Eric Brachmann, Frank Michel, Alexander Krull, Michael Ying Yang, Stefan Gumhold, et al. Uncertainty-driven 6d pose estimation of objects and scenes from a single rgb image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3364-3372, 2016. 7", + "[3] Benjamin Busam, Marco Esposito, Simon Che'Rose, Nassir Navab, and Benjamin Frisch. A stereo vision approach for cooperative robotic movement therapy. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 127-135, 2015. 1", + "[4] Pedro Castro and Tae-Kyun Kim. Crt-6d: Fast 6d object pose estimation with cascaded refinement transformers. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 5746-5755, 2023. 1, 2, 3, 4, 6, 7", + "[5] Bo Chen, Alvaro Parra, Jiewei Cao, Nan Li, and Tat-Jun Chin. End-to-end learnable geometric vision by backpropagating pnp optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8100-8109, 2020. 1, 2, 3, 4", + "[6] Maximilian Denninger, Martin Sundermeyer, Dominik Winkelbauer, Youssef Zidan, Dmitry Olefir, Mohamad Elbadrawy, Ahsan Lodhi, and Harinandan Katam. Blenderproc. arXiv preprint arXiv:1911.01911, 2019. 7", + "[7] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34:8780-8794, 2021. 1, 2, 3, 4", + "[8] Yan Di, Fabian Manhardt, Gu Wang, Xiangyang Ji, Nassir Navab, and Federico Tombari. So-pose: Exploiting self-occlusion for direct 6d pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12396–12405, 2021. 1, 7", + "[9] Lin Geng Foo, Hossein Rahmani, and Jun Liu. Aigc for various data modalities: A survey. arXiv preprint arXiv:2308.14177, 2023. 3", + "[10] Xiao-Shan Gao, Xiao-Rong Hou, Jianliang Tang, and Hang-Fei Cheng. Complete solution classification for the perspective-three-point problem. IEEE transactions on pattern analysis and machine intelligence, 25(8):930-943, 2003. 2", + "[11] Jia Gong, Lin Geng Foo, Zhipeng Fan, Qiuhong Ke, Hossein Rahmani, and Jun Liu. Diffpose: Toward more reliable 3d pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13041-13051, 2023. 1, 3, 4", + "[12] Tianpei Gu, Guangyi Chen, Junlong Li, Chunze Lin, Yongming Rao, Jie Zhou, and Jiwen Lu. Stochastic trajectory prediction via motion indeterminacy diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17113-17122, 2022. 1, 3, 4" + ], + "bbox": [ + 78, + 114, + 472, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Shuxuan Guo, Yinlin Hu, Jose M Alvarez, and Mathieu Salzmann. Knowledge distillation for 6d pose estimation by aligning distributions of local predictions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18633-18642, 2023. 3", + "[14] Yang Hai, Rui Song, Jiaojiao Li, and Yinlin Hu. Shape-constraint recurrent flow for 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4831-4840, 2023.", + "[15] Yang Hai, Rui Song, Jiaojiao Li, Mathieu Salzmann, and Yinlin Hu. Rigidity-aware detection for 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8927-8936, 2023.", + "[16] Rasmus Laurvig Haugaard and Anders Glent Buch. Surfemb: Dense and continuous correspondence distributions for object pose estimation with learnt surface embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6749-6758, 2022. 3", + "[17] Stefan Hinterstoisser, Vincent Lepetit, Slobodan Ilic, Stefan Holzer, Gary Bradski, Kurt Konolige, and Nassir Navab. Model based training, detection and pose estimation of texture-less 3d objects in heavily cluttered scenes. In Computer Vision-ACCV 2012: 11th Asian Conference on Computer Vision, Daejeon, Korea, November 5-9, 2012, Revised Selected Papers, Part I 11, pages 548-562. Springer, 2013. 7", + "[18] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, pages 6840-6851. Curran Associates, Inc., 2020. 1, 2, 3, 4, 5, 6", + "[19] Tomas Hodan, Daniel Barath, and Jiri Matas. Epos: Estimating 6d pose of objects with symmetries. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11703-11712, 2020. 1, 3", + "[20] Tsu-Ching Hsiao, Hao-Wei Chen, Hsuan-Kung Yang, and Chun-Yi Lee. Confronting ambiguity in 6d object pose estimation via score-based diffusion on se (3). arXiv preprint arXiv:2305.15873, 2023. 3", + "[21] Yinlin Hu, Joachim Hugonot, Pascal Fua, and Mathieu Salzmann. Segmentation-driven 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3385-3394, 2019. 4, 7", + "[22] Yinlin Hu, Pascal Fua, Wei Wang, and Mathieu Salzmann. Single-stage 6d object pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2930-2939, 2020. 1, 3, 7", + "[23] Shun Iwase, Xingyu Liu, Rawal Khirodkar, Rio Yokota, and Kris M. Kitani. Repose: Fast 6d object pose refinement via deep texture rendering. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 3303-3312, 2021. 3", + "[24] Shun Iwase, Xingyu Liu, Rawal Khirodkar, Rio Yokota, and Kris M Kitani. Repose: Fast 6d object pose refinement via deep texture rendering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3303-3312, 2021. 7" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9684", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[25] Haobo Jiang, Mathieu Salzmann, Zheng Dang, Jin Xie, and Jian Yang. Se (3) diffusion model-based point cloud registration for robust 6d object pose estimation. Advances in Neural Information Processing Systems, 36, 2024. 3", + "[26] Zakiah I. Kalantan and Jochen Einbeck. Quantile-based estimation of the finite cauchy mixture model. Symmetry, 11 (9), 2019. 2, 4, 5", + "[27] Wadim Kehl, Fabian Manhardt, Federico Tombari, Slobodan Ilic, and Nassir Navab. Ssd-6d: Making rgb-based 3d detection and 6d pose estimation great again. In Proceedings of the IEEE international conference on computer vision, pages 1521–1529, 2017. 1, 3", + "[28] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.7", + "[29] Yann Labbe, Justin Carpentier, Mathieu Aubry, and Josef Sivic. Cosypose: Consistent multi-view multi-object 6d pose estimation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVII 16, pages 574-591. Springer, 2020. 4, 7", + "[30] Junhyeok Lee, Junghwa Kang, Yoonho Nam, and TaeYoung Lee. Bias field correction in MRI with hampel noise denoising diffusion probabilistic model. In Medical Imaging with Deep Learning, short paper track, 2023. 3", + "[31] Vincent Lepetit, Francesc Moreno-Noguer, and Pascal Fua. Ep n p: An accurate o (n) solution to the p np problem. International journal of computer vision, 81:155-166, 2009. 2", + "[32] Hongyang Li, Jiehong Lin, and Kui Jia. Dcl-net: Deep correspondence learning network for 6d pose estimation. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part IX, pages 369-385. Springer, 2022. 3", + "[33] Yi Li, Gu Wang, Xiangyang Ji, Yu Xiang, and Dieter Fox. Deepim: Deep iterative matching for 6d pose estimation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 683-698, 2018. 3, 7", + "[34] Zhigang Li, Gu Wang, and Xiangyang Ji. Cdpn: Coordinates-based disentangled pose network for real-time rgb-based 6-dof object pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7678-7687, 2019. 4, 7", + "[35] Ruyi Lian and Haibin Ling. Checkerpose: Progressive dense keypoint localization for object pose estimation with graph neural network. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14022-14033, 2023. 7", + "[36] Xingyu Liu, Ruida Zhang, Chenyangguang Zhang, Bowen Fu, Jiwen Tang, Xiquan Liang, Jingyi Tang, Xiaotian Cheng, Yukang Zhang, Gu Wang, and Xiangyang Ji. Gdnpp. https://github.com/shanice-1/gdrnpp_bop2022, 2022.3", + "[37] Andreas Lugmayr, Martin Danelljan, Andres Romero, Fisher Yu, Radu Timofte, and Luc Van Gool. Repaint: Inpainting using denoising diffusion probabilistic models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11461-11471, 2022. 3" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[38] Fabian Manhardt, Wadim Kehl, Nassir Navab, and Federico Tombari. Deep model-based 6d pose refinement in rgb. In The European Conference on Computer Vision (ECCV), 2018. 3", + "[39] Eric Marchand, Hideaki Uchiyama, and Fabien Spindler. Pose estimation for augmented reality: a hands-on survey. IEEE transactions on visualization and computer graphics, 22(12):2633-2651, 2015. 1", + "[40] Jianhan Mei, Xudong Jiang, and Henghui Ding. Spatial feature mapping for 6 dof object pose estimation. Pattern Recognition, 131:108835, 2022. 1", + "[41] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. Sdedit: Guided image synthesis and editing with stochastic differential equations. In International Conference on Learning Representations, 2021. 1, 3", + "[42] Markus Oberweger, Mahdi Rad, and Vincent Lepetit. Making deep heatmaps robust to partial occlusions for 3d object pose estimation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 119-134, 2018. 3, 6", + "[43] Kiru Park, Timothy Patten, and Markus Vincze. Pix2pose: Pixel-wise coordinate regression of objects for 6d pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7668-7677, 2019. 3", + "[44] Sida Peng, Yuan Liu, Qixing Huang, Xiaowei Zhou, and Hujun Bao. Pvnet: Pixel-wise voting network for 6dof pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4561-4570, 2019. 1, 2, 3, 4, 6, 7", + "[45] Luis Pérez, Inigo Rodríguez, Nuria Rodríguez, Rubén Usamentiaga, and Daniel F García. Robot guidance using machine vision techniques in industrial environments: A comparative review. Sensors, 16(3):335, 2016. 1", + "[46] Mahdi Rad and Vincent Lepetit. Bb8: A scalable, accurate, robust to partial occlusion method for predicting the 3d poses of challenging objects without using depth. In Proceedings of the IEEE international conference on computer vision, pages 3828-3836, 2017. 3", + "[47] Jason Raphael Rambach, Alain Pagani, Michael Schneider, Oleksandr Artemenko, and Didier Stricker. 6dof object tracking based on 3d scans for augmented reality remote live support. Comput., 7:6, 2018. 1", + "[48] Hong Ren, Lin Lin, Yanjie Wang, and Xin Dong. Robust 6-dof pose estimation under hybrid constraints. Sensors, 22 (22):8758, 2022. 3", + "[49] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 4, 7", + "[50] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pages 2256-2265. PMLR, 2015. 2, 3", + "[51] Chen Song, Jiaru Song, and Qixing Huang. Hybridpose: 6d object pose estimation under hybrid representations. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 431-440, 2020. 7" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "9685", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[52] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021. 1, 2, 3, 4, 5, 6, 8", + "[53] Yongzhi Su, Mahdi Saleh, Torben Fetzer, Jason Rambach, Nassir Navab, Benjamin Busam, Didier Stricker, and Federico Tombari. Zebrapose: Coarse to fine surface encoding for 6 dof object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6738-6748, 2022. 1, 3, 4, 6, 7, 8", + "[54] Martin Sundermeyer, Zoltán-Csaba Marton, Maximilian Durner, Manuel Brucker, and Rudolph Triebel. Implicit 3d orientation learning for 6d object detection from rgb images. In European Conference on Computer Vision, 2018. 3", + "[55] Mahdi Teimouri. Statistical inference for mixture of cauchy distributions. arXiv preprint arXiv:1809.05722, 2018. 5", + "[56] Bugra Tekin, Sudipta N Sinha, and Pascal Fua. Real-time seamless single shot 6d object pose prediction. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 292-301, 2018. 3", + "[57] Zhi Tian, Chunhua Shen, Hao Chen, and Tong He. Fcos: Fully convolutional one-stage object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9627-9636, 2019. 6, 7", + "[58] Julien Urain, Niklas Funk, Jan Peters, and Georgia Chalvatzaki. Se(3)-diffusionfields: Learning smooth cost functions for joint grasp and motion optimization through diffusion. IEEE International Conference on Robotics and Automation (ICRA), 2023. 3", + "[59] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 6", + "[60] Gu Wang, Fabian Manhardt, Xingyu Liu, Xiangyang Ji, and Federico Tombari. Occlusion-aware self-supervised monocular 6d object pose estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 1", + "[61] Gu Wang, Fabian Manhardt, Federico Tombari, and Xi-angyang Ji. Gdr-net: Geometry-guided direct regression network for monocular 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16611-16621, 2021. 1, 3, 4, 6, 7", + "[62] Di Wu, Zhaoyong Zhuang, Canqun Xiang, Wenbin Zou, and Xia Li. 6d-vnet: End-to-end 6-dof vehicle pose estimation from monocular rgb images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 1", + "[63] Yu Xiang, Tanner Schmidt, Venkatraman Narayanan, and Dieter Fox. PoseCNN: A convolutional neural network for 6d object pose estimation in cluttered scenes. 2018. 1, 3, 7", + "[64] Yan Xu, Kwan-Yee Lin, Guofeng Zhang, Xiaogang Wang, and Hongsheng Li. Rnnpose: Recurrent 6-dof object pose refinement with robust correspondence field estimation and pose optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022. 1, 3", + "[65] Heng Yang and Marco Pavone. Object pose estimation with statistical guarantees: Conformal keypoint detection" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "and geometric uncertainty propagation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8947-8958, 2023.", + "[66] Jun Yang, Wenjie Xue, Sahar Ghavidel, and Steven L Waslander. 6d pose estimation for textureless objects on rgb frames using multi-view optimization. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 2905-2912. IEEE, 2023.", + "[67] Sergey Zakharov, Ivan S. Shugurov, and Slobodan Ilic. Dpod: 6d pose object detector and refiner. 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 1941-1950, 2019. 3, 7" + ], + "bbox": [ + 501, + 92, + 892, + 258 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "9686", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/1d3927f2-2533-4713-91b0-b3f9e13c8aed_model.json b/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/1d3927f2-2533-4713-91b0-b3f9e13c8aed_model.json new file mode 100644 index 0000000000000000000000000000000000000000..23916c721c44076db0db36a5984b4febcbe005af --- /dev/null +++ b/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/1d3927f2-2533-4713-91b0-b3f9e13c8aed_model.json @@ -0,0 +1,2422 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.131, + 0.856, + 0.152 + ], + "angle": 0, + "content": "6D-Diff: A Keypoint Diffusion Framework for 6D Object Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.283, + 0.179, + 0.686, + 0.234 + ], + "angle": 0, + "content": "Li Xu\\(^{1\\dagger}\\) Haoxuan Qu\\(^{1\\dagger}\\) Yujun Cai\\(^{2}\\) Jun Liu\\(^{1\\dagger}\\) \n\\(^{1}\\)Singapore University of Technology and Design \n\\(^{2}\\)Nanyang Technological University" + }, + { + "type": "text", + "bbox": [ + 0.22, + 0.237, + 0.748, + 0.27 + ], + "angle": 0, + "content": "{li_xu, haoxuan_qu}@mysmail.sutd.edu.sg, yujun001@e.ntu.edu.sg, jun.liu@sutd.edu.sg" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.319, + 0.314, + 0.335 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.352, + 0.474, + 0.61 + ], + "angle": 0, + "content": "Estimating the 6D object pose from a single RGB image often involves noise and indeterminacy due to challenges such as occlusions and cluttered backgrounds. Meanwhile, diffusion models have shown appealing performance in generating high-quality images from random noise with high indeterminacy through step-by-step denoising. Inspired by their denoising capability, we propose a novel diffusion-based framework (6D-Diff) to handle the noise and indeterminacy in object pose estimation for better performance. In our framework, to establish accurate 2D-3D correspondence, we formulate 2D keypoints detection as a reverse diffusion (denoising) process. To facilitate such a denoising process, we design a Mixture-of-Cauchy-based forward diffusion process and condition the reverse process on the object appearance features. Extensive experiments on the LM-O and YCB-V datasets demonstrate the effectiveness of our framework." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.638, + 0.21, + 0.654 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.664, + 0.47, + 0.861 + ], + "angle": 0, + "content": "6D object pose estimation aims to estimate the 6D pose of an object including its location and orientation, which has a wide range of applications, such as augmented reality [39, 47], robotic manipulation [3, 45], and automatic driving [62]. Recently, various methods [4, 5, 19, 22, 27, 44, 53, 61, 64] have been proposed to conduct RGB-based 6D object pose estimation since RGB images are easy to obtain. Despite the increased efforts, a variety of challenges persist in RGB-based 6D object pose estimation, including occlusions, cluttered backgrounds, and changeable environments [8, 40, 44, 60, 63]. These challenges can introduce significant noise and indeterminacy into the pose estimation process, leading to error-prone predictions [8, 40, 44]." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.862, + 0.469, + 0.877 + ], + "angle": 0, + "content": "Meanwhile, diffusion models [18, 52] have achieved ap" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.304, + 0.891, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.426, + 0.893, + 0.526 + ], + "angle": 0, + "content": "Figure 1. Overview of our proposed 6D-Diff framework. As shown, given the 3D keypoints from the object 3D CAD model, we aim to detect the corresponding 2D keypoints in the image to obtain the 6D object pose. Note that when detecting keypoints, there are often challenges such as occlusions (including self-occlusions) and cluttered backgrounds that can introduce noise and indeterminacy into the process, impacting the accuracy of pose prediction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.539, + 0.895, + 0.903 + ], + "angle": 0, + "content": "pealing results in various generation tasks such as image synthesis [7, 18] and image editing [41]. Specifically, diffusion models are able to recover high-quality determinate samples (e.g., clean images) from a noisy and indeterminate input data distribution (e.g., random noise) via a step-by-step denoising process [18, 52]. Motivated by such a strong denoising capability [11, 12, 18], we aim to leverage diffusion models to handle the RGB-based 6D object pose estimation task, since this task also involves tackling noise and indeterminacy. However, it can be difficult to directly use diffusion models to estimate the object pose, because diffusion models often start denoising from random Gaussian noise [18, 52]. Meanwhile, in RGB-based 6D object pose estimation, the object pose is often extracted from an intermediate representation, such as keypoint heatmaps [5], pixel-wise voting vectors [44], or object surface keypoint features [4]. Such an intermediate representation encodes useful distribution priors about the object pose. Thus starting denoising from such an representation shall effectively assist the diffusion model in recovering accurate object poses [11]. To achieve this, we propose a novel diffusion-based object pose estimation framework (6D-Diff) that can exploit prior distribution knowledge from the intermediate representation for better performance." + }, + { + "type": "page_footnote", + "bbox": [ + 0.101, + 0.887, + 0.348, + 0.901 + ], + "angle": 0, + "content": "† Equal contribution; ‡ Corresponding author" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9676" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.35 + ], + "angle": 0, + "content": "Overall, our framework is a correspondence-based framework, in which to predict an object pose, given the 3D keypoints pre-selected from the object 3D CAD model, we first predict the coordinates of the 2D image keypoints corresponding to the pre-selected 3D keypoints. We then use the 3D keypoints together with the predicted 2D keypoints coordinates to compute the 6D object pose using a Perspective-n-Point (PnP) solver [10, 31]. As shown in Fig. 1, to predict the 2D keypoints coordinates, we first extract an intermediate representation (the 2D keypoints heatmaps) through a keypoints distribution initializer. As discussed before, due to various factors, there often exists noise and indeterminacy in the keypoints detection process and the extracted heatmaps can be noisy as shown in Fig. 2. Thus we pass the distribution modeled from these keypoints heatmaps into a diffusion model to perform the denoising process to obtain the final keypoints coordinates prediction." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.353, + 0.473, + 0.626 + ], + "angle": 0, + "content": "Analogous to non-equilibrium thermodynamics [50], given a 2D image keypoint, we can consider all its possible locations in the image as particles in thermodynamics. Under low indeterminacy, the particles (possible locations) w.r.t. each 2D keypoint gather, and each keypoint can be determinately and accurately localized. In contrast, under high indeterminacy, these particles can stochastically spread over the input image, and it is difficult to localize each keypoint. The process of converting particles from low indeterminacy to high indeterminacy is called the forward process of the diffusion model. The goal of the diffusion model is to reverse the above forward process (through a reverse process), i.e., converting the particles from high indeterminacy to low indeterminacy. Here in our case, we aim to convert the indeterminate keypoints coordinates distribution modeled from the heatmaps into the determinate distribution. Below we briefly introduce the forward process and the reverse process in our diffusion model." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.629, + 0.473, + 0.903 + ], + "angle": 0, + "content": "In the forward process, we aim to generate supervision signals that will be used to optimize the diffusion model during the reverse process. Specifically, given a set of pre-selected 3D keypoints, we first acquire ground-truth coordinates of their corresponding 2D keypoints using the ground-truth object pose. Then these determinate ground-truth 2D coordinates are gradually diffused towards the indeterminate distribution modeled from the intermediate representation, and the distributions generated along the way will be used as supervision signals. Note that, as the distribution modeled from the intermediate representation can be complex and irregular, it is difficult to characterize such a distribution via the Gaussian distribution. This means that simply applying diffusion models in most existing generation works [7, 18, 52], which start denoising from the random Gaussian noise, can introduce potentially large errors. To tackle this challenge, we draw inspiration from the fact that the Mixture of Cauchy (MoC) model can effectively char" + }, + { + "type": "table", + "bbox": [ + 0.541, + 0.091, + 0.851, + 0.244 + ], + "angle": 0, + "content": "
3D CAD modelImageHeatmap
(a)
(b)
" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.245, + 0.895, + 0.369 + ], + "angle": 0, + "content": "Figure 2. Above we show two examples of keypoint heatmaps, which serve as the intermediate representation [4, 5, 44] in our framework. The red dots indicate the ground-truth locations of the keypoints. In the example (a), the target object is the pink cat, which is heavily occluded in the image and is shown in a different pose compared to the 3D model. As shown above, due to occlusions and cluttered backgrounds, the keypoint heatmaps are noisy, which reflects the noise and indeterminacy during the keypoints detection process." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.396, + 0.895, + 0.519 + ], + "angle": 0, + "content": "acterize complex and intractable distributions. Moreover, the MoC model is robust to potential outliers in the distribution to be characterized [26]. Thus we propose to model the intermediate representation using a MoC distribution instead of simply treating it as a random Gaussian noise. In this way, we gradually diffuse the determinate distribution (ground truth) of keypoints coordinates towards the modeled MoC distribution during the forward process." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.528, + 0.896, + 0.756 + ], + "angle": 0, + "content": "Correspondingly, in the reverse process, starting from the MoC distribution modeled in the forward process, we aim to learn to recover the ground-truth keypoints coordinates. To achieve this, we leverage the distributions generated step-by-step during the forward process as the supervision signals to train the diffusion model to learn the reverse process. In this way, the diffusion model can learn to convert the indeterminate MoC distribution of keypoints coordinates into a determinate one smoothly and effectively. After the reverse process, the 2D keypoints coordinates obtained from the final determinate distribution are used to compute the 6D object pose with the pre-selected 3D keypoints. Moreover, we further facilitate the model learning of such a reverse process by injecting object appearance features as context information." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.897, + 0.903 + ], + "angle": 0, + "content": "Our work makes the following contributions. 1) We propose a novel 6D-Diff framework, in which we formulate keypoints detection for 6D object pose estimation as a reverse diffusion process to effectively eliminate the noise and indeterminacy in object pose estimation. 2) To take advantage of the intermediate representation that encodes useful prior distribution knowledge for handling this task, we propose a novel MoC-based diffusion process. Besides, we facilitate the model learning by utilizing object features." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9677" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.09, + 0.22, + 0.106 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.123, + 0.473, + 0.275 + ], + "angle": 0, + "content": "RGB-based 6D Object Pose Estimation has received a lot of attention [4, 13-16, 23, 32, 33, 36, 38, 43, 44, 46, 53, 54, 56, 63-67]. Some works [22, 27, 61, 63] proposed to directly regress object poses. However, the non-linearity of the rotation space makes direct regression of object poses difficult [32]. Compared to this type of direct methods, correspondence-based methods [5, 19, 43, 44, 46, 53, 56] often demonstrate better performance, which estimate 6D object poses via learning 2D-3D correspondences between the observed image and the object 3D model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.282, + 0.473, + 0.539 + ], + "angle": 0, + "content": "Among correspondence-based methods, several works [42, 44, 46, 48, 56] aim to predict the 2D keypoints coordinates corresponding to specific 3D keypoints. BB8 [46] proposed to detect the 2D keypoints corresponding to the 8 corners of the object's 3D bounding box. Later, PVNet [44] achieved better performance by estimating 2D keypoints for sampled points on the surface of the object 3D model via pixel-wise voting. Moreover, various methods [19, 43, 53, 61, 67] establish 2D-3D correspondences by localizing the 3D model point corresponding to each observed object pixel. Among these methods, DPOD [67] explored the use of UV texture maps to facilitate model training, and ZebraPose [53] proposed to encode the surface of the object 3D model efficiently through a hierarchical binary grouping. Besides, several pose refinement methods [23, 33, 38, 64] have been proposed, which conducted pose refinement given an initial pose estimation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.547, + 0.473, + 0.669 + ], + "angle": 0, + "content": "In this paper, we also regard object pose estimation as a 2D-3D correspondence estimation problem. Different from previous works, here by formulating 2D-3D correspondence estimation as a distribution transformation process (denoising process), we propose a new framework (6D-Diff) that trains a diffusion model to perform progressive denoising from an indeterminate keypoints distribution to the desired keypoints distribution with low indeterminacy." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.674, + 0.473, + 0.902 + ], + "angle": 0, + "content": "Diffusion Models [7, 9, 18, 50, 52] are originally introduced for image synthesis. Showing appealing generation capabilities, diffusion models have also been explored in various other tasks [11, 12, 20, 25, 30, 37, 41, 58], such as image editing [41] and image inpainting [37]. Here we explore a new framework that tackles object pose estimation with a diffusion model. Different from previous generation works [7, 37, 41] that start denoising from random noise, to aid the denoising process for 6D object pose estimation, we design a novel MoC-based diffusion mechanism that enables the diffusion model to start denoising from a distribution containing useful prior distribution knowledge regarding the object pose. Moreover, we condition the denoising process on the object appearance features, to further guide the diffusion model to obtain accurate predictions." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.09, + 0.593, + 0.107 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.115, + 0.895, + 0.253 + ], + "angle": 0, + "content": "To handle the noise and indeterminacy in RGB-based 6D object pose estimation, inspired by [11], from a novel perspective of distribution transformation with progressive denoising, we propose a framework (6D-Diff) that represents a new brand of diffusion-based solution for 6D object pose estimation. Below we first revisit diffusion models in Sec. 3.1. Then we discuss our proposed framework in Sec. 3.2, and introduce its training and testing scheme in Sec. 3.3. We finally detail the model architecture in Sec. 3.4." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.261, + 0.75, + 0.277 + ], + "angle": 0, + "content": "3.1. Revisiting Diffusion Models" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.284, + 0.895, + 0.435 + ], + "angle": 0, + "content": "The diffusion model [18, 52], which is a kind of probabilistic generative model, consists of two parts, namely the forward process and the reverse process. Specifically, given an original sample \\( d_0 \\) (e.g., a clean image), the process of diffusing the sample \\( d_0 \\) iteratively towards the noise (typically Gaussian noise) \\( d_K \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I}) \\) (i.e., \\( d_0 \\to d_1 \\to \\ldots \\to d_K \\)) is called the forward process. In contrast, the process of denoising the noise \\( d_K \\) iteratively towards the sample \\( d_0 \\) (i.e., \\( d_K \\to d_{K-1} \\to \\ldots \\to d_0 \\)) is called the reverse process. Each process is defined as a Markov chain." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.435, + 0.895, + 0.542 + ], + "angle": 0, + "content": "Forward Process. To obtain supervision signals for training the diffusion model to learn to perform the reverse process in a stepwise manner, we need to acquire the intermediate step results \\(\\{d_k\\}_{k=1}^{K-1}\\). Thus the forward process is first performed to generate these intermediate step results for training purpose. Specifically, the posterior distribution \\(q(d_{1:K}|d_0)\\) from \\(d_1\\) to \\(d_K\\) is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.555, + 0.544, + 0.892, + 0.586 + ], + "angle": 0, + "content": "\\[\nq \\left(d _ {1: K} \\mid d _ {0}\\right) = \\prod_ {k = 1} ^ {K} q \\left(d _ {k} \\mid d _ {k - 1}\\right) \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.556, + 0.589, + 0.837, + 0.608 + ], + "angle": 0, + "content": "\\[\nq \\left(d _ {k} \\mid d _ {k - 1}\\right) = \\mathcal {N} \\left(d _ {k}; \\sqrt {1 - \\beta_ {k}} d _ {k - 1}, \\beta_ {k} \\mathbf {I}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.61, + 0.895, + 0.67 + ], + "angle": 0, + "content": "where \\(\\{\\beta_{k}\\in (0,1)\\}_{k = 1}^{K}\\) denotes a set of fixed variance controllers that control the scale of the injected noise at different steps. According to Eq. (1), we can derive \\(q(d_k|d_0)\\) in closed form as:" + }, + { + "type": "equation", + "bbox": [ + 0.558, + 0.674, + 0.892, + 0.693 + ], + "angle": 0, + "content": "\\[\nq \\left(d _ {k} \\mid d _ {0}\\right) = \\mathcal {N} \\left(d _ {k}; \\sqrt {\\bar {\\alpha} _ {k}} d _ {0}, (1 - \\bar {\\alpha} _ {k}) \\mathbf {I}\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.696, + 0.892, + 0.728 + ], + "angle": 0, + "content": "where \\(\\alpha_{k} = 1 - \\beta_{k}\\) and \\(\\overline{\\alpha}_k = \\prod_{s = 1}^k\\alpha_s\\) . Based on Eq. (2), \\(d_{k}\\) can be further expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.599, + 0.73, + 0.892, + 0.747 + ], + "angle": 0, + "content": "\\[\nd _ {k} = \\sqrt {\\bar {\\alpha} _ {k}} d _ {0} + \\sqrt {1 - \\bar {\\alpha} _ {k}} \\epsilon \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.895, + 0.87 + ], + "angle": 0, + "content": "where \\(\\epsilon \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})\\). From Eq. (3), we can observe that when the number of diffusion steps \\(K\\) is sufficiently large and \\(\\overline{\\alpha}_K\\) correspondingly decreases to nearly zero, the distribution of \\(d_K\\) is approximately a standard Gaussian distribution, i.e., \\(d_K \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})\\). This means \\(d_0\\) is gradually corrupted into Gaussian noise, which conforms to the nonequilibrium thermodynamics phenomenon of the diffusion process [50]." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Reverse Process. With the intermediate step results \\(\\{d_k\\}_{k=1}^{K-1}\\) acquired in the forward process, the diffusion" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9678" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.468, + 0.168 + ], + "angle": 0, + "content": "model is trained to learn to perform the reverse process. Specifically, in the reverse process, each step can be formulated as a function \\( f \\) that takes \\( d_{k} \\) and the diffusion model \\( M_{diff} \\) as inputs and generate \\( d_{k-1} \\) as the output, i.e., \\( d_{k-1} = f(d_{k}, M_{diff}) \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.169, + 0.468, + 0.243 + ], + "angle": 0, + "content": "After training the diffusion model, during inference, we do not need to conduct the forward process. Instead, we only conduct the reverse process, which converts a random Gaussian noise \\( d_{K} \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I}) \\) into a sample \\( d_0 \\) of the desired distribution using the trained diffusion model." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.255, + 0.281, + 0.272 + ], + "angle": 0, + "content": "3.2. Proposed Framework" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.28, + 0.468, + 0.384 + ], + "angle": 0, + "content": "Similar to previous works [21, 44, 53], our framework predicts 6D object poses via a two-stage pipeline. Specifically, (i) we first select \\(N\\) 3D keypoints on the object CAD model and detect the corresponding \\(N\\) 2D keypoints in the image; (ii) we then compute the 6D pose using a PnP solver. Here we mainly focus on the first stage and aim to produce more accurate keypoint detection results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.386, + 0.468, + 0.552 + ], + "angle": 0, + "content": "When detecting 2D keypoints, factors like occlusions and cluttered backgrounds can bring noise and indeterminacy into this process, and affect the accuracy of detection results [21, 44]. To handle this problem, inspired by that diffusion models can iteratively reduce indeterminacy and noise in the initial distribution (e.g., standard Gaussian distribution) to generate determinate and high-quality samples of the desired distribution [11, 12], we formulate keypoints detection as generating a determinate distribution of keypoints coordinates \\((D_0)\\) from an indeterminate initial distribution \\((D_K)\\) via a diffusion model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.553, + 0.468, + 0.824 + ], + "angle": 0, + "content": "Moreover, to effectively adapt to the 6D object pose estimation task, the diffusion model in our framework does not start the reverse process from the common initial distribution (i.e., the standard Gaussian distribution) as in most existing diffusion works [7, 18, 52]. Instead, inspired by recent 6D object pose estimation works [4, 5, 61], we first extract an intermediate representation (e.g., heatmaps), and use this representation to initialize a keypoints coordinates distribution (i.e., \\( D_K \\)), which will serve as the starting point of the reverse process. Such an intermediate representation encodes useful prior distribution information about keypoints coordinates. Thus by starting the reverse process from this representation, we effectively exploit the distribution priors in the representation to aid the diffusion model in recovering accurate keypoints coordinates [11]. Below, we first describe how we initialize the keypoints distribution \\( D_K \\), and then discuss the corresponding forward and reverse processes in our new framework." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Keypoints Distribution Initialization. We initialize the keypoints coordinates distribution \\(D_K\\) with extracted heatmaps. Specifically, similar to [29, 34, 53], we first use an off-the-shelf object detector (e.g., Faster RCNN [49]) to detect the bounding box of the target object, and then crop" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.227 + ], + "angle": 0, + "content": "the detected Region of Interest (ROI) from the input image. We send the ROI into a sub-network (i.e., the keypoints distribution initializer) to predict a number of heatmaps where each heatmap corresponds to one 2D keypoint. We then normalize each heatmap to convert it to a probability distribution. In this way, each normalized heatmap naturally represents the distribution of the corresponding keypoint coordinates, and thus we can use these heatmaps to initialize \\( D_K \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.23, + 0.892, + 0.501 + ], + "angle": 0, + "content": "Forward Process. After distribution initialization, the next step is to iteratively reduce the noise and indeterminacy in the initialized distribution \\( D_K \\) by performing the reverse process \\( (D_K \\to D_{K-1} \\to \\ldots \\to D_0) \\). To train the diffusion model to perform such a reverse process, we need to obtain the distributions generated along the way (i.e., \\( \\{D_k\\}_{k=1}^{K-1} \\)) as the supervision signals. Thus, we first need to conduct the forward process to obtain samples from \\( \\{D_k\\}_{k=1}^{K-1} \\). Specifically, given the ground-truth keypoints coordinates distribution \\( D_0 \\), we define the forward process as: \\( D_0 \\to D_1 \\to \\ldots \\to D_K \\), where \\( K \\) is the number of diffusion steps. In this forward process, we iteratively add noise to the determinate distribution \\( D_0 \\), i.e., increasing the indeterminacy of generated distributions, to transform it into the initialized distribution \\( D_K \\) with indeterminacy. Via this process, we can generate \\( \\{D_k\\}_{k=1}^{K-1} \\) along the way and use them as supervision signals to train the diffusion model to perform the reverse process." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.504, + 0.892, + 0.729 + ], + "angle": 0, + "content": "However, in our framework, we do not aim to transform the ground-truth keypoints coordinates distribution \\(D_0\\) towards a standard Gaussian distribution via the forward process, because our initialized distribution \\(D_K\\) is not a random noise. Instead, as discussed before, \\(D_K\\) is initialized with heatmaps (as shown in Fig. 3), since the heatmaps can provide rough estimations about the keypoints coordinates distribution. To effectively utilize such priors in \\(D_K\\) to facilitate the reverse process, we aim to enable the diffusion model to start the reverse process (denoising process) from \\(D_K\\) instead of random Gaussian noise [11]. Thus, the basic forward process (described in Sec. 3.1) in existing generative diffusion models is not suitable in our framework, which motivates us to design a new forward process for our task." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.733, + 0.892, + 0.884 + ], + "angle": 0, + "content": "However, it is non-trivial to design such a forward process, as the initialized distribution \\( D_K \\) is based on extracted heatmaps, and thus \\( D_K \\) can be complex and irregular, as shown in Fig. 4. Hence modeling \\( D_K \\) as a Gaussian distribution can result in potentially large errors. To handle this challenge, motivated by that the Mixture of Cauchy (MoC) model can effectively and reliably characterize complex and intractable distributions [26], we leverage MoC to characterize \\( D_K \\). Based on the characterized distribution, we can then perform a corresponding MoC-based forward process." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.886, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Specifically, we denote the number of Cauchy kernels" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9679" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.127, + 0.095, + 0.852, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.293, + 0.895, + 0.394 + ], + "angle": 0, + "content": "Figure 3. Illustration of our framework. During testing, given an input image, we first crop the Region of Interest (ROI) from the image through an object detector. After that, we feed the cropped ROI to the keypoints distribution initializer to obtain the heatmaps that can provide useful distribution priors about keypoints, to initialize \\( D_K \\). Meanwhile, we can obtain object appearance features \\( f_{\\mathrm{app}} \\). Next, we pass \\( f_{\\mathrm{app}} \\) into the encoder, and the output of the encoder will serve as conditional information to aid the reverse process in the decoder. We sample \\( M \\) sets of 2D keypoints coordinates from \\( D_K \\), and feed these \\( M \\) sets of coordinates into the decoder to perform the reverse process iteratively together with the step embedding \\( f_D^k \\). At the final reverse step (\\( K \\)-th step), we average \\( \\{d_0^i\\}_{i=1}^M \\) as the final keypoints coordinates prediction \\( d_0 \\), and use \\( d_0 \\) to compute the 6D pose with the pre-selected 3D keypoints via a PnP solver." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.399, + 0.471, + 0.458 + ], + "angle": 0, + "content": "in the MoC distribution as \\(U\\), and use the Expectation-Maximum-type (EM) algorithm [26, 55] to optimize the MoC parameters \\(\\eta^{\\mathrm{MoC}}\\) to characterize the distribution \\(D_K\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.118, + 0.459, + 0.47, + 0.5 + ], + "angle": 0, + "content": "\\[\n\\eta_ {*} ^ {\\mathrm {M o C}} = \\operatorname {E M} \\left(\\prod_ {v = 1} ^ {V} \\sum_ {u = 1} ^ {U} \\pi_ {u} \\operatorname {C a u c h y} \\left(d _ {K} ^ {v} \\mid \\mu_ {u}, \\gamma_ {u}\\right)\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.503, + 0.473, + 0.719 + ], + "angle": 0, + "content": "where \\(\\{d_K^v\\}_{v = 1}^V\\) denotes \\(V\\) sets of keypoints coordinates sampled from the distribution \\(D_{K}\\). Note each set of keypoints coordinates \\(d_K^v\\) contains all the \\(N\\) keypoints coordinates (i.e., \\(d_K^v\\in \\mathbb{R}^{N\\times 2}\\)). \\(\\pi_u\\) denotes the weight of the \\(u\\)-th Cauchy kernel (\\(\\sum_{u = 1}^{U}\\pi_{u} = 1\\)), and \\(\\eta^{\\mathrm{MoC}} = \\{\\mu_1,\\gamma_1,\\dots,\\mu_U,\\gamma_U\\}\\) denotes the MoC parameters in which \\(\\mu_{u}\\) and \\(\\gamma_{u}\\) are the location and scale of the \\(u\\)-th Cauchy kernel. Via the above optimization, we can use the optimized parameters \\(\\eta_*^{\\mathrm{MoC}}\\) to model \\(D_K\\) as the characterized distribution \\((\\hat{D}_K)\\). Given \\(\\hat{D}_K\\), we aim to conduct the forward process from the ground-truth keypoints coordinates distribution \\(D_0\\), so that after \\(K\\) steps of forward diffusion, the generated distribution reaches \\(\\hat{D}_K\\). To this end, we modify Eq. (3) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.728, + 0.47, + 0.748 + ], + "angle": 0, + "content": "\\[\n\\hat {d} _ {k} = \\sqrt {\\overline {{\\alpha}} _ {k}} d _ {0} + (1 - \\sqrt {\\overline {{\\alpha}} _ {k}}) \\mu^ {\\mathrm {M o C}} + \\sqrt {1 - \\overline {{\\alpha}} _ {k}} \\epsilon^ {\\mathrm {M o C}} \\qquad (5)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.759, + 0.47, + 0.841 + ], + "angle": 0, + "content": "where \\(\\hat{d}_k\\in \\mathbb{R}^{N\\times 2}\\) represents a sample (i.e., a set of \\(N\\) keypoints coordinates) from the generated distribution \\(\\tilde{D}_k\\), \\(\\mu^{\\mathrm{MoC}} = \\sum_{u = 1}^{U}\\mathbb{1}_{u}\\mu_{u}\\), and \\(\\epsilon^{\\mathrm{MoC}}\\sim\\) Cauchy(0, \\(\\sum_{u = 1}^{U}(\\mathbb{1}_{u}\\gamma_{u})\\)). Note that \\(\\mathbb{1}_u\\) is a zero-one indicator and \\(\\sum_{u = 1}^{U}\\mathbb{1}_u = 1\\) and \\(\\operatorname {Prob}(\\mathbb{1}_u = 1) = \\pi_u\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.841, + 0.471, + 0.904 + ], + "angle": 0, + "content": "From Eq. (5), we can observe that when \\(K\\) is sufficiently large and \\(\\overline{\\alpha}_K\\) correspondingly decreases to nearly zero, the distribution of \\(\\hat{d}_K\\) reaches the MoC distribution, i.e., \\(\\hat{d}_K = \\mu^{\\mathrm{MoC}} + \\epsilon^{\\mathrm{MoC}}\\sim \\mathrm{Cauchy}(\\sum_{u = 1}^{U}(\\mathbb{1}_{u}\\mu_{u}),\\sum_{u = 1}^{U}(\\mathbb{1}_{u}\\gamma_{u}))\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.399, + 0.895, + 0.505 + ], + "angle": 0, + "content": "ter the above MoC-based forward process, we can use the generated \\(\\{\\hat{D}_k\\}_{k=1}^{K-1}\\) as supervision signals to train the diffusion model \\(M_{\\mathrm{diff}}\\) to learn the reverse process. More details about Eq. (5) can be found in Supplementary material. Such a forward process is only conducted to generate supervision signals for training the diffusion model, while we only need to conduct the reverse process during testing." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.506, + 0.895, + 0.627 + ], + "angle": 0, + "content": "Reverse Process. In the reverse process, we aim to recover a desired determinate keypoints distribution \\( D_0 \\) from the initial distribution \\( D_K \\). As discussed above, we characterize \\( D_K \\) via a MoC model and then generate \\( \\{\\hat{D}_k\\}_{k=1}^{K-1} \\) as supervision signals to optimize the diffusion model to learn to perform the reverse process \\( (\\hat{D}_K \\to \\hat{D}_{K-1} \\to \\dots \\to D_0) \\), in which the model iteratively reduces the noise and indeterminacy in \\( \\hat{D}_K \\) to generate \\( D_0 \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.628, + 0.896, + 0.78 + ], + "angle": 0, + "content": "However, it can still be difficult to generate \\( D_0 \\) by directly performing the reverse process from \\( \\hat{D}_K \\), because the object appearance features are lacking in \\( \\hat{D}_K \\). Such features can help constrain the model reverse process based on the input image to get accurate predictions. Thus we further leverage the appearance features from the image as context to guide \\( M_{\\mathrm{diff}} \\) in the reverse process. Specifically, we reuse the features extracted from the keypoints distribution initializer as the appearance features \\( f_{\\mathrm{app}} \\) and feed \\( f_{\\mathrm{app}} \\) into the diffusion model, as shown in Fig. 3." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.781, + 0.897, + 0.903 + ], + "angle": 0, + "content": "Our reverse process aims to generate a determinate distribution \\(D_0\\) from the indeterminate distribution \\(\\hat{D}_K\\) (during training) or \\(D_K\\) (during testing). Below we describe the reverse process during testing. We first obtain \\(f_{\\mathrm{app}}\\) from the input image. Then to help the diffusion model to learn to perform denoising at each reverse step, following [18, 52], we generate the unique step embedding \\(f_D^k\\) to inject the step number \\((k)\\) information into the model. In this way, given a" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "9680" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.091, + 0.47, + 0.152 + ], + "angle": 0, + "content": "set of noisy keypoints coordinates \\(d_{k}\\in \\mathbb{R}^{N\\times 2}\\) drawn from \\(D_{k}\\) at the \\(k^{th}\\) step, we use diffusion model \\(M_{\\mathrm{diff}}\\) conditioned on the step embedding \\(f_{D}^{k}\\) and the object appearance features \\(f_{\\mathrm{app}}\\) to recover \\(d_{k - 1}\\) from \\(d_{k}\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.156, + 0.469, + 0.174 + ], + "angle": 0, + "content": "\\[\nd _ {k - 1} = M _ {\\text {d i f f}} \\left(d _ {k}, f _ {\\text {a p p}}, f _ {D} ^ {k}\\right) \\tag {6}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.182, + 0.274, + 0.198 + ], + "angle": 0, + "content": "3.3. Training and Testing" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.206, + 0.469, + 0.266 + ], + "angle": 0, + "content": "Training. Following [44], we first select \\(N\\) 3D keypoints from the surface of the object CAD model using the farthest point sampling (FPS) algorithm. Then we conduct the training process in the following two stages." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.267, + 0.469, + 0.431 + ], + "angle": 0, + "content": "In the first stage, to initialize the distribution \\(D_K\\), we optimize the keypoints distribution initializer. Specifically, for each training sample, given the pre-selected \\(N\\) 3D keypoints, we can obtain the ground-truth coordinates of the corresponding \\(N\\) 2D keypoints using the ground-truth 6D object pose. Then for each keypoints, based on the corresponding ground-truth coordinates, we generate a ground-truth heatmap following [42] for training the initializer. Thus for each training sample, we generate \\(N\\) ground-truth heatmaps. In this way, the loss function \\(L_{\\mathrm{init}}\\) for optimizing the initializer can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.433, + 0.469, + 0.461 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {i n i t}} = \\left\\| \\mathbf {H} _ {\\text {p r e d}} - \\mathbf {H} _ {\\mathrm {G T}} \\right\\| _ {2} ^ {2} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.462, + 0.469, + 0.492 + ], + "angle": 0, + "content": "where \\(\\mathbf{H}_{\\mathrm{pred}}\\) and \\(\\mathbf{H}_{\\mathrm{GT}}\\) denote the predicted heatmaps and ground-truth heatmaps, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.492, + 0.469, + 0.719 + ], + "angle": 0, + "content": "In the second stage, we optimize the diffusion model \\( M_{\\mathrm{diff}} \\). For each training sample, to optimize \\( M_{\\mathrm{diff}} \\), we perform the following steps. (1) We first send the input image into an off-the-shelf object detector [57] and then feed the detected ROI into the trained initializer to obtain \\( N \\) heatmaps. Meanwhile, we can also obtain \\( f_{\\mathrm{app}} \\). (2) We use the \\( N \\) predicted heatmaps to initialize \\( D_K \\), and leverage the EM-type algorithm to characterize \\( D_K \\) as a MoC distribution \\( \\hat{D}_K \\). (3) Based on \\( \\hat{D}_K \\), we use the ground-truth keypoints coordinates \\( d_0 \\) to directly generate \\( M \\) sets of \\( (\\hat{d}_1, \\dots, \\hat{d}_K) \\) (i.e., \\( \\{\\hat{d}_1^i, \\dots, \\hat{d}_K^i\\}_{i=1}^M \\)) via the forward process (Eq. (5)). (4) Then, we aim to optimize the diffusion model \\( M_{\\mathrm{diff}} \\) to recover \\( \\hat{d}_{k-1}^i \\) from \\( \\hat{d}_k^i \\) iteratively. Following previous diffusion works [18, 52], we formulate the loss \\( L_{\\mathrm{diff}} \\) for optimizing \\( M_{\\mathrm{diff}} \\) as follows \\( (\\hat{d}_0^i = d_0 \\) for all \\( i \\)):" + }, + { + "type": "equation", + "bbox": [ + 0.12, + 0.722, + 0.469, + 0.763 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {d i f f}} = \\sum_ {i = 1} ^ {M} \\sum_ {k = 1} ^ {K} \\left\\| M _ {\\text {d i f f}} \\left(\\hat {d} _ {k} ^ {i}, f _ {\\text {a p p}}, f _ {D} ^ {k}\\right) - \\hat {d} _ {k - 1} ^ {i} \\right\\| _ {2} ^ {2} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Testing. During testing, for each testing sample, by feeding the input image to the object detector and the keypoints distribution initializer consecutively, we can initialize \\(D_K\\) and meanwhile obtain \\(f_{\\mathrm{app}}\\). Then, we perform the reverse process. During the reverse process, we sample \\(M\\) sets of noisy keypoints coordinates from \\(D_K\\) (i.e., \\(\\{d_K^i\\}_{i = 1}^M\\)) and feed them into the trained diffusion model. Here we sample \\(M\\) sets of keypoints coordinates, because we are converting from a distribution \\((D_K)\\) towards another distribution \\((D_0)\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.183 + ], + "angle": 0, + "content": "Then the model iteratively performs the reverse steps. After \\(K\\) reverse diffusion steps, we obtain \\(M\\) sets of predicted keypoints coordinates (i.e., \\(\\{d_0^i\\}_{i = 1}^M\\)). To obtain the final keypoints coordinates prediction \\(d_{0}\\), we compute the mean of the \\(M\\) predictions. Finally, we can solve for the 6D object pose using a PnP solver, like [44, 53]." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.193, + 0.688, + 0.208 + ], + "angle": 0, + "content": "3.4. Model Architecture" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.217, + 0.892, + 0.247 + ], + "angle": 0, + "content": "Our framework mainly consists of the diffusion model \\((M_{\\mathrm{diff}})\\) and the keypoints distribution initializer." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.248, + 0.892, + 0.37 + ], + "angle": 0, + "content": "Diffusion Model \\( M_{\\mathrm{diff}} \\). As illustrated in Fig. 3, our proposed diffusion model \\( M_{\\mathrm{diff}} \\) mainly consists of a transformer encoder-decoder architecture. The appearance features \\( f_{\\mathrm{app}} \\) are sent into the encoder for extracting context information to aid the reverse process in the decoder. \\( f_{D}^{k} \\) and \\( \\{d_k^i\\}_{i=1}^M \\) (or \\( \\{\\hat{d}_k^i\\}_{i=1}^M \\) during training) are sent into the decoder for the reverse process. Both the encoder and the decoder contain a stack of three transformer layers." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.371, + 0.892, + 0.538 + ], + "angle": 0, + "content": "More specifically, as for the encoder part, we first map \\( f_{\\mathrm{app}} \\in \\mathbb{R}^{16 \\times 16 \\times 512} \\) through a \\( 1 \\times 1 \\) convolution layer to a latent embedding \\( e_{\\mathrm{app}} \\in \\mathbb{R}^{16 \\times 16 \\times 128} \\). To retain the spatial information, following [59], we further incorporate positional encodings into \\( e_{\\mathrm{app}} \\). Afterwards, we flatten \\( e_{\\mathrm{app}} \\) into a feature sequence \\( (\\mathbb{R}^{256 \\times 128}) \\), and send it into the encoder. The encoder output \\( f_{\\mathrm{enc}} \\) containing the extracted object information will be sent into the decoder to aid the reverse process. Note that during testing, for each sample, we only need to conduct the above computation process once to obtain the corresponding \\( f_{\\mathrm{enc}} \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.539, + 0.892, + 0.78 + ], + "angle": 0, + "content": "The decoder part iteratively performs the reverse process. For notation simplicity, below we describe the reverse process for a single sample \\(d_{k}\\) instead of the \\(M\\) samples \\(\\left(\\{d_1^i,\\dots,d_K^i\\}_{i = 1}^M\\right)\\). Specifically, at the \\(k\\)-th reverse step, to inject the current step number \\((k)\\) information into the decoder, we first generate the step embedding \\(f_{D}^{k}\\in \\mathbb{R}^{1\\times 128}\\) using the sinusoidal function following [18, 52]. Meanwhile, we use an FC layer to map the input \\(d_{k}\\in \\mathbb{R}^{N\\times 2}\\) to a latent embedding \\(e_k\\in \\mathbb{R}^{N\\times 128}\\). Then we concatenate \\(f_{D}^{k}\\) and \\(e_k\\) along the first dimension, and send it into the decoder. By interacting with the encoder output \\(f_{\\mathrm{enc}}\\) (extracted object information) via cross-attention at each layer, the decoder produces \\(f_{\\mathrm{dec}}\\), which is further mapped into the keypoints coordinates prediction \\(d_{k - 1}\\in \\mathbb{R}^{N\\times 2}\\) via an FC layer. Then we send \\(d_{k - 1}\\) back to the decoder as the input to perform the next reverse step." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Keypoints Distribution Initializer. The initializer adopts a ResNet-34 backbone, which is commonly used in 6D pose estimation methods [4, 53, 61]. To generate heatmaps to initialize the distribution \\( D_K \\), we add two deconvolution layers followed by a \\( 1 \\times 1 \\) convolution layer after the ResNet-34 backbone, and then we obtain predicted heatmaps \\( \\mathbf{H}_{\\mathrm{pred}} \\in \\mathbb{R}^{N \\times \\frac{H}{4} \\times \\frac{W}{4}} \\) where \\( H \\) and \\( W \\) denote the height and width of the input ROI image respec" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "9681" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.16, + 0.09, + 0.275, + 0.18 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.188, + 0.179, + 0.246, + 0.188 + ], + "angle": 0, + "content": "input image" + }, + { + "type": "image", + "bbox": [ + 0.302, + 0.09, + 0.429, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.43, + 0.091, + 0.547, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.558, + 0.091, + 0.684, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.091, + 0.805, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.191, + 0.892, + 0.261 + ], + "angle": 0, + "content": "Figure 4. Visualization of the denoising process of a sample with our framework. In this example, the target object is the yellow duck and for clarity, we here show three keypoints only. The red dots indicate the ground-truth locations of these three keypoints. The noisy heatmap before denoising reflects that factors like occlusions and clutter in the scene can introduce noise and indeterminacy when detecting keypoints. As shown, our diffusion model can effectively and smoothly reduce the noise and indeterminacy in the initial distribution step by step, finally recovering a high-quality and determinate distribution of keypoints coordinates. (Better viewed in color)" + }, + { + "type": "table_caption", + "bbox": [ + 0.104, + 0.265, + 0.862, + 0.278 + ], + "angle": 0, + "content": "Table 1. Comparisons with RGB-based 6D object pose estimation methods on the LM-O dataset. (*) denotes symmetric objects." + }, + { + "type": "table", + "bbox": [ + 0.107, + 0.28, + 0.861, + 0.38 + ], + "angle": 0, + "content": "
MethodPVNet [44]HybridPose [51]RePose [24]DeepIM [33]GDR-Net [61]SO-Pose [8]CRT-6D [4]ZebraPose [53]CheckerPose [35]Ours
ape15.820.931.159.246.848.453.457.958.360.6
can63.375.380.063.590.885.892.095.095.797.9
cat16.724.925.626.240.532.742.060.662.363.2
driller65.770.273.155.682.677.481.494.893.796.6
duck25.227.943.052.446.948.944.964.569.967.2
eggbox*50.252.451.763.054.252.462.770.970.073.5
glue*49.653.854.371.775.878.380.288.786.492.0
holepuncher39.754.253.652.560.175.374.383.083.885.5
Mean40.847.551.655.562.262.366.376.977.579.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.39, + 0.468, + 0.431 + ], + "angle": 0, + "content": "Table 2. Comparisons with RGB-based 6D object pose estimation methods on the YCB-V dataset. (-) indicates the corresponding result is not reported in the original paper." + }, + { + "type": "table", + "bbox": [ + 0.097, + 0.433, + 0.449, + 0.55 + ], + "angle": 0, + "content": "
MethodADD(-S)AUC of ADD-SAUC of ADD(-S)
SegDriven[21]39.0--
SingleStage[22]53.9--
CosyPose [29]-89.884.5
RePose [24]62.188.582.0
GDR-Net [61]60.191.684.4
SO-Pose [8]56.890.983.9
ZebraPose [53]80.590.185.3
CheckerPose [35]81.491.386.4
Ours83.891.587.0
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.563, + 0.468, + 0.61 + ], + "angle": 0, + "content": "tively. Moreover, the features outputted by the ResNet-34 backbone, combined with features obtained from methods [35, 53], are used as the object features \\( f_{\\mathrm{app}} \\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.624, + 0.208, + 0.64 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.65, + 0.349, + 0.665 + ], + "angle": 0, + "content": "4.1. Datasets & Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.674, + 0.468, + 0.763 + ], + "angle": 0, + "content": "Given that previous works [8, 24, 67] have reported the evaluation accuracy over \\(95\\%\\) on the Linemod (LM) dataset [17], the performance on this dataset has become saturated. Thus recent works [4, 53] mainly focus on using the LM-O dataset [2] and the YCB-V dataset [63] that are more challenging, which we follow." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.468, + 0.901 + ], + "angle": 0, + "content": "LM-O Dataset. The Linemod Occlusion (LM-O) dataset contains 1214 images and is a challenging subset of the LM dataset. In this dataset, around 8 objects are annotated on each image and the objects are often heavily occluded. Following [4, 53], we use both the real images from the LM dataset and the publicly available physically-based rendering (pbr) images [6] as the training images for LM-O. Following [53, 61], on LM-O dataset, we evaluate the model performance using the commonly-used ADD(-S) metric." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.393, + 0.892, + 0.497 + ], + "angle": 0, + "content": "For this metric, we compute the mean distance between the model points transformed using the predicted pose and the same model points transformed using the ground-truth pose. For symmetric objects, following [63], the mean distance is computed based on the closest point distance. If the mean distance is less than \\(10\\%\\) of the model diameter, the predicted pose is regarded as correct." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.5, + 0.892, + 0.666 + ], + "angle": 0, + "content": "YCB-V Dataset. The YCB-V dataset is a large-scale dataset containing 21 objects and over 100k real images. The samples in this dataset often exhibit occlusions and cluttered backgrounds. Following [4, 53], we use both the real images from the training set of the YCB-V dataset and the publicly available pbr images as the training images for YCB-V. Following [53, 61], we evaluate the model performance using the following metrics: ADD(-S), AUC (Area Under the Curve) of ADD-S, and AUC of ADD(-S). For calculating AUC, we set the maximum distance threshold to \\(10\\mathrm{cm}\\) following [63]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.68, + 0.716, + 0.696 + ], + "angle": 0, + "content": "4.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.704, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We conduct our experiments on an Nvidia V100 GPU. We set the number of pre-selected 3D keypoints \\(N\\) to 128. During training, following [34, 53], we utilize the dynamic zoom-in strategy to produce augmented ROI images. During testing, we use the detected bounding box with Faster RCNN [49] and FCOS [57] provided by CDPNv2 [34]. The cropped ROI image is resized to the shape of \\(3 \\times 256 \\times 256\\) (\\(H = W = 256\\)). We characterize \\(D_K\\) via a MoC model with 9 Cauchy kernels (\\(U = 9\\)) for the forward diffusion process. We optimize the diffusion model \\(M_{\\mathrm{diff}}\\) for 1500 epochs using the Adam optimizer [28] with an initial learning rate of 4e-5. Moreover, we set the number of sampled sets \\(M\\) to 5, and the number of diffusion steps \\(K\\) to" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9682" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.079, + 0.088, + 0.169, + 0.155 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.089, + 0.269, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.28, + 0.089, + 0.369, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.089, + 0.469, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.079, + 0.161, + 0.169, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.161, + 0.268, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.28, + 0.161, + 0.369, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.161, + 0.469, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.241, + 0.47, + 0.339 + ], + "angle": 0, + "content": "Figure 5. Qualitative results. Green bounding boxes represent the ground-truth poses and blue bounding boxes represent the predicted poses of our method. As shown, even facing severe occlusions, clutter in the scene or varying environment, our framework can still accurately recover the object poses, showing the effectiveness of our method for handling the noise and indeterminacy caused by various factors in object pose estimation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.348, + 0.469, + 0.44 + ], + "angle": 0, + "content": "100. Following [53], we use Progressive-X [1] as the PnP solver. Note that during testing, instead of performing the reverse process with all the \\(K\\) steps, we accelerate the process with DDIM [52], a recently proposed diffusion acceleration method. With DDIM acceleration, we only need to perform 10 steps to finish the reverse process during testing." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.45, + 0.442, + 0.466 + ], + "angle": 0, + "content": "4.3. Comparison with State-of-the-art Methods" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.473, + 0.469, + 0.578 + ], + "angle": 0, + "content": "Results on LM-O Dataset. As shown in Tab. 1, compared to existing methods, our method achieves the best mean performance, showing the superiority of our method. We also show qualitative results on the LM-O dataset in Fig. 5. As shown, even in the presence of large occlusions (including self-occlusions) and cluttered backgrounds, our method still produces accurate predictions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.58, + 0.469, + 0.655 + ], + "angle": 0, + "content": "Results on YCB-V Dataset. As shown in Tab. 2, our framework achieves the best performance on both the ADD(-S) and the AUC of ADD(-S) metrics, and is comparable to the state-of-the-art method on the AUC of ADD-S metric, showing the effectiveness of our method." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.666, + 0.241, + 0.68 + ], + "angle": 0, + "content": "4.4. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.469, + 0.734 + ], + "angle": 0, + "content": "We conduct extensive ablation experiments on the LM-O dataset, and we report the model performance on ADD(-S) metric averaged over all the objects." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.297, + 0.854 + ], + "angle": 0, + "content": "Impact of denoising process. In our framework, we predict keypoints coordinates via performing the denoising process. To evaluate the efficacy of this process, we test three variants. In the first variant (Variant A), we remove the diffusion model" + }, + { + "type": "table_caption", + "bbox": [ + 0.31, + 0.738, + 0.469, + 0.779 + ], + "angle": 0, + "content": "Table 3. Evaluation on the effectiveness of the denoising process." + }, + { + "type": "table", + "bbox": [ + 0.315, + 0.78, + 0.469, + 0.852 + ], + "angle": 0, + "content": "
MethodADD(-S)
Variant A49.2
Variant B57.3
Variant C61.1
6D-Diff79.6
" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.469, + 0.9 + ], + "angle": 0, + "content": "\\(M_{\\mathrm{diff}}\\) and predict keypoints coordinates directly from the heatmaps produced by the keypoints distribution initializer. The second variant (Variant \\(B\\)) has the same model architec" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.257 + ], + "angle": 0, + "content": "ture as our framework, but the diffusion model is optimized to directly predict the coordinates instead of learning the reverse process. Same as Variant \\( B \\), the third variant (Variant \\( C \\)) is also optimized to directly predict coordinates without denoising process. For Variant \\( C \\), we stack our diffusion model structure multiple times to produce a deep network, which has similar computation complexity with our framework. As shown in Tab. 3, compared to our framework, the performance of these variants significantly drops, showing that the effectiveness of our framework mainly lies in the designed denoising process." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.258, + 0.72, + 0.348 + ], + "angle": 0, + "content": "Impact of object appearance features \\(f_{\\mathrm{app}}\\) In our framework, we send the appearance features \\(f_{\\mathrm{app}}\\) into the diffusion model \\(M_{\\mathrm{diff}}\\) to aid the reverse process. To evaluate its effect," + }, + { + "type": "table_caption", + "bbox": [ + 0.732, + 0.263, + 0.892, + 0.305 + ], + "angle": 0, + "content": "Table 4. Evaluation on the effectiveness of the object appearance features \\(f_{\\mathrm{app}}\\)" + }, + { + "type": "table", + "bbox": [ + 0.737, + 0.306, + 0.872, + 0.347 + ], + "angle": 0, + "content": "
MethodADD(-S)
w/o fapp74.4
6D-Diff79.6
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.349, + 0.892, + 0.408 + ], + "angle": 0, + "content": "we test a variant in which we do not send \\( f_{\\mathrm{app}} \\) into \\( M_{\\mathrm{diff}} \\) (\\( w / o \\, f_{\\mathrm{app}} \\)). As shown in Tab. 4, our framework performs better than this variant, showing that \\( f_{\\mathrm{app}} \\) can aid \\( M_{\\mathrm{diff}} \\) to get more accurate predictions." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.41, + 0.642, + 0.498 + ], + "angle": 0, + "content": "Impact of MoC design. During training, we model the distribution \\(D_K\\) from the intermediate representation" + }, + { + "type": "table_caption", + "bbox": [ + 0.655, + 0.412, + 0.891, + 0.439 + ], + "angle": 0, + "content": "Table 5. Evaluation on the effectiveness of the MoC design." + }, + { + "type": "table", + "bbox": [ + 0.66, + 0.44, + 0.892, + 0.491 + ], + "angle": 0, + "content": "
MethodADD(-S)
Standard diffusion w/o MoC73.1
Heatmaps as condition76.2
6D-Diff79.6
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.499, + 0.892, + 0.696 + ], + "angle": 0, + "content": "(heatmaps) as a MoC distribution \\(\\hat{D}_K\\), and train the diffusion model \\(M_{\\mathrm{diff}}\\) to perform the reverse process from \\(\\hat{D}_K\\). To investigate the impact of this design, we evaluate two variants that train \\(M_{\\mathrm{diff}}\\) in different ways. In the first variant (Standard diffusion w/o MoC), we train the model to start the reverse process from the standard Gaussian noise, i.e., following the basic forward process in Eq. (3) for model training. In the second variant (Heatmaps as condition), we still train the model to start denoising from the random Gaussian noise but we use the heatmaps as the condition for the reverse process. As shown in Tab. 5, our framework consistently outperforms both variants, showing effectiveness of the designed MoC-based forward process." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.71, + 0.619, + 0.725 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.854 + ], + "angle": 0, + "content": "In this paper, we proposed a novel diffusion-based 6D object pose estimation framework, which effectively handles noise and indeterminacy in object pose estimation. In our framework, we formulate object keypoints detection as a carefully-designed reverse diffusion process. We design a novel MoC-based forward process to effectively utilize the distribution priors in intermediate representations. Our framework achieves superior performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Acknowledgement. This work was supported by the National Research Foundation Singapore under the AI Singapore Programme (Award Number: AISG-100E-2023-121)." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9683" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.473, + 0.171 + ], + "angle": 0, + "content": "[1] Daniel Barath and Jiri Matas. Progressive-x: Efficient, anytime, multi-model fitting algorithm. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3780-3788, 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.257 + ], + "angle": 0, + "content": "[2] Eric Brachmann, Frank Michel, Alexander Krull, Michael Ying Yang, Stefan Gumhold, et al. Uncertainty-driven 6d pose estimation of objects and scenes from a single rgb image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3364-3372, 2016. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.258, + 0.472, + 0.328 + ], + "angle": 0, + "content": "[3] Benjamin Busam, Marco Esposito, Simon Che'Rose, Nassir Navab, and Benjamin Frisch. A stereo vision approach for cooperative robotic movement therapy. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 127-135, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.33, + 0.472, + 0.399 + ], + "angle": 0, + "content": "[4] Pedro Castro and Tae-Kyun Kim. Crt-6d: Fast 6d object pose estimation with cascaded refinement transformers. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 5746-5755, 2023. 1, 2, 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.401, + 0.472, + 0.471 + ], + "angle": 0, + "content": "[5] Bo Chen, Alvaro Parra, Jiewei Cao, Nan Li, and Tat-Jun Chin. End-to-end learnable geometric vision by backpropagating pnp optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8100-8109, 2020. 1, 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.473, + 0.472, + 0.528 + ], + "angle": 0, + "content": "[6] Maximilian Denninger, Martin Sundermeyer, Dominik Winkelbauer, Youssef Zidan, Dmitry Olefir, Mohamad Elbadrawy, Ahsan Lodhi, and Harinandan Katam. Blenderproc. arXiv preprint arXiv:1911.01911, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.53, + 0.472, + 0.572 + ], + "angle": 0, + "content": "[7] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34:8780-8794, 2021. 1, 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.574, + 0.472, + 0.644 + ], + "angle": 0, + "content": "[8] Yan Di, Fabian Manhardt, Gu Wang, Xiangyang Ji, Nassir Navab, and Federico Tombari. So-pose: Exploiting self-occlusion for direct 6d pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12396–12405, 2021. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.645, + 0.472, + 0.687 + ], + "angle": 0, + "content": "[9] Lin Geng Foo, Hossein Rahmani, and Jun Liu. Aigc for various data modalities: A survey. arXiv preprint arXiv:2308.14177, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.689, + 0.472, + 0.758 + ], + "angle": 0, + "content": "[10] Xiao-Shan Gao, Xiao-Rong Hou, Jianliang Tang, and Hang-Fei Cheng. Complete solution classification for the perspective-three-point problem. IEEE transactions on pattern analysis and machine intelligence, 25(8):930-943, 2003. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.76, + 0.472, + 0.829 + ], + "angle": 0, + "content": "[11] Jia Gong, Lin Geng Foo, Zhipeng Fan, Qiuhong Ke, Hossein Rahmani, and Jun Liu. Diffpose: Toward more reliable 3d pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13041-13051, 2023. 1, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.831, + 0.472, + 0.902 + ], + "angle": 0, + "content": "[12] Tianpei Gu, Guangyi Chen, Junlong Li, Chunze Lin, Yongming Rao, Jie Zhou, and Jiwen Lu. Stochastic trajectory prediction via motion indeterminacy diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17113-17122, 2022. 1, 3, 4" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.473, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.163 + ], + "angle": 0, + "content": "[13] Shuxuan Guo, Yinlin Hu, Jose M Alvarez, and Mathieu Salzmann. Knowledge distillation for 6d pose estimation by aligning distributions of local predictions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18633-18642, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.893, + 0.22 + ], + "angle": 0, + "content": "[14] Yang Hai, Rui Song, Jiaojiao Li, and Yinlin Hu. Shape-constraint recurrent flow for 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4831-4840, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.894, + 0.29 + ], + "angle": 0, + "content": "[15] Yang Hai, Rui Song, Jiaojiao Li, Mathieu Salzmann, and Yinlin Hu. Rigidity-aware detection for 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8927-8936, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.291, + 0.893, + 0.362 + ], + "angle": 0, + "content": "[16] Rasmus Laurvig Haugaard and Anders Glent Buch. Surfemb: Dense and continuous correspondence distributions for object pose estimation with learnt surface embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6749-6758, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.363, + 0.894, + 0.461 + ], + "angle": 0, + "content": "[17] Stefan Hinterstoisser, Vincent Lepetit, Slobodan Ilic, Stefan Holzer, Gary Bradski, Kurt Konolige, and Nassir Navab. Model based training, detection and pose estimation of texture-less 3d objects in heavily cluttered scenes. In Computer Vision-ACCV 2012: 11th Asian Conference on Computer Vision, Daejeon, Korea, November 5-9, 2012, Revised Selected Papers, Part I 11, pages 548-562. Springer, 2013. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.462, + 0.893, + 0.517 + ], + "angle": 0, + "content": "[18] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, pages 6840-6851. Curran Associates, Inc., 2020. 1, 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.518, + 0.893, + 0.574 + ], + "angle": 0, + "content": "[19] Tomas Hodan, Daniel Barath, and Jiri Matas. Epos: Estimating 6d pose of objects with symmetries. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11703-11712, 2020. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.576, + 0.893, + 0.631 + ], + "angle": 0, + "content": "[20] Tsu-Ching Hsiao, Hao-Wei Chen, Hsuan-Kung Yang, and Chun-Yi Lee. Confronting ambiguity in 6d object pose estimation via score-based diffusion on se (3). arXiv preprint arXiv:2305.15873, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.632, + 0.893, + 0.701 + ], + "angle": 0, + "content": "[21] Yinlin Hu, Joachim Hugonot, Pascal Fua, and Mathieu Salzmann. Segmentation-driven 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3385-3394, 2019. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.703, + 0.894, + 0.76 + ], + "angle": 0, + "content": "[22] Yinlin Hu, Pascal Fua, Wei Wang, and Mathieu Salzmann. Single-stage 6d object pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2930-2939, 2020. 1, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.76, + 0.893, + 0.829 + ], + "angle": 0, + "content": "[23] Shun Iwase, Xingyu Liu, Rawal Khirodkar, Rio Yokota, and Kris M. Kitani. Repose: Fast 6d object pose refinement via deep texture rendering. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 3303-3312, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.831, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[24] Shun Iwase, Xingyu Liu, Rawal Khirodkar, Rio Yokota, and Kris M Kitani. Repose: Fast 6d object pose refinement via deep texture rendering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3303-3312, 2021. 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "9684" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[25] Haobo Jiang, Mathieu Salzmann, Zheng Dang, Jin Xie, and Jian Yang. Se (3) diffusion model-based point cloud registration for robust 6d object pose estimation. Advances in Neural Information Processing Systems, 36, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.468, + 0.189 + ], + "angle": 0, + "content": "[26] Zakiah I. Kalantan and Jochen Einbeck. Quantile-based estimation of the finite cauchy mixture model. Symmetry, 11 (9), 2019. 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.191, + 0.47, + 0.259 + ], + "angle": 0, + "content": "[27] Wadim Kehl, Fabian Manhardt, Federico Tombari, Slobodan Ilic, and Nassir Navab. Ssd-6d: Making rgb-based 3d detection and 6d pose estimation great again. In Proceedings of the IEEE international conference on computer vision, pages 1521–1529, 2017. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.261, + 0.468, + 0.3 + ], + "angle": 0, + "content": "[28] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.302, + 0.468, + 0.383 + ], + "angle": 0, + "content": "[29] Yann Labbe, Justin Carpentier, Mathieu Aubry, and Josef Sivic. Cosypose: Consistent multi-view multi-object 6d pose estimation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVII 16, pages 574-591. Springer, 2020. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.386, + 0.468, + 0.441 + ], + "angle": 0, + "content": "[30] Junhyeok Lee, Junghwa Kang, Yoonho Nam, and TaeYoung Lee. Bias field correction in MRI with hampel noise denoising diffusion probabilistic model. In Medical Imaging with Deep Learning, short paper track, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.443, + 0.468, + 0.495 + ], + "angle": 0, + "content": "[31] Vincent Lepetit, Francesc Moreno-Noguer, and Pascal Fua. Ep n p: An accurate o (n) solution to the p np problem. International journal of computer vision, 81:155-166, 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.498, + 0.468, + 0.566 + ], + "angle": 0, + "content": "[32] Hongyang Li, Jiehong Lin, and Kui Jia. Dcl-net: Deep correspondence learning network for 6d pose estimation. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part IX, pages 369-385. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.568, + 0.468, + 0.622 + ], + "angle": 0, + "content": "[33] Yi Li, Gu Wang, Xiangyang Ji, Yu Xiang, and Dieter Fox. Deepim: Deep iterative matching for 6d pose estimation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 683-698, 2018. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.624, + 0.468, + 0.691 + ], + "angle": 0, + "content": "[34] Zhigang Li, Gu Wang, and Xiangyang Ji. Cdpn: Coordinates-based disentangled pose network for real-time rgb-based 6-dof object pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7678-7687, 2019. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.693, + 0.468, + 0.761 + ], + "angle": 0, + "content": "[35] Ruyi Lian and Haibin Ling. Checkerpose: Progressive dense keypoint localization for object pose estimation with graph neural network. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14022-14033, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.763, + 0.468, + 0.831 + ], + "angle": 0, + "content": "[36] Xingyu Liu, Ruida Zhang, Chenyangguang Zhang, Bowen Fu, Jiwen Tang, Xiquan Liang, Jingyi Tang, Xiaotian Cheng, Yukang Zhang, Gu Wang, and Xiangyang Ji. Gdnpp. https://github.com/shanice-1/gdrnpp_bop2022, 2022.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.833, + 0.468, + 0.901 + ], + "angle": 0, + "content": "[37] Andreas Lugmayr, Martin Danelljan, Andres Romero, Fisher Yu, Radu Timofte, and Luc Van Gool. Repaint: Inpainting using denoising diffusion probabilistic models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11461-11471, 2022. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.146 + ], + "angle": 0, + "content": "[38] Fabian Manhardt, Wadim Kehl, Nassir Navab, and Federico Tombari. Deep model-based 6d pose refinement in rgb. In The European Conference on Computer Vision (ECCV), 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.149, + 0.892, + 0.203 + ], + "angle": 0, + "content": "[39] Eric Marchand, Hideaki Uchiyama, and Fabien Spindler. Pose estimation for augmented reality: a hands-on survey. IEEE transactions on visualization and computer graphics, 22(12):2633-2651, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.205, + 0.892, + 0.245 + ], + "angle": 0, + "content": "[40] Jianhan Mei, Xudong Jiang, and Henghui Ding. Spatial feature mapping for 6 dof object pose estimation. Pattern Recognition, 131:108835, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.247, + 0.892, + 0.314 + ], + "angle": 0, + "content": "[41] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. Sdedit: Guided image synthesis and editing with stochastic differential equations. In International Conference on Learning Representations, 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.316, + 0.892, + 0.372 + ], + "angle": 0, + "content": "[42] Markus Oberweger, Mahdi Rad, and Vincent Lepetit. Making deep heatmaps robust to partial occlusions for 3d object pose estimation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 119-134, 2018. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.373, + 0.892, + 0.427 + ], + "angle": 0, + "content": "[43] Kiru Park, Timothy Patten, and Markus Vincze. Pix2pose: Pixel-wise coordinate regression of objects for 6d pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7668-7677, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.429, + 0.892, + 0.495 + ], + "angle": 0, + "content": "[44] Sida Peng, Yuan Liu, Qixing Huang, Xiaowei Zhou, and Hujun Bao. Pvnet: Pixel-wise voting network for 6dof pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4561-4570, 2019. 1, 2, 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.498, + 0.892, + 0.552 + ], + "angle": 0, + "content": "[45] Luis Pérez, Inigo Rodríguez, Nuria Rodríguez, Rubén Usamentiaga, and Daniel F García. Robot guidance using machine vision techniques in industrial environments: A comparative review. Sensors, 16(3):335, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.554, + 0.892, + 0.621 + ], + "angle": 0, + "content": "[46] Mahdi Rad and Vincent Lepetit. Bb8: A scalable, accurate, robust to partial occlusion method for predicting the 3d poses of challenging objects without using depth. In Proceedings of the IEEE international conference on computer vision, pages 3828-3836, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.623, + 0.892, + 0.678 + ], + "angle": 0, + "content": "[47] Jason Raphael Rambach, Alain Pagani, Michael Schneider, Oleksandr Artemenko, and Didier Stricker. 6dof object tracking based on 3d scans for augmented reality remote live support. Comput., 7:6, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.679, + 0.892, + 0.718 + ], + "angle": 0, + "content": "[48] Hong Ren, Lin Lin, Yanjie Wang, and Xin Dong. Robust 6-dof pose estimation under hybrid constraints. Sensors, 22 (22):8758, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.721, + 0.892, + 0.775 + ], + "angle": 0, + "content": "[49] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.777, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[50] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pages 2256-2265. PMLR, 2015. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[51] Chen Song, Jiaru Song, and Qixing Huang. Hybridpose: 6d object pose estimation under hybrid representations. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 431-440, 2020. 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.515, + 0.956 + ], + "angle": 0, + "content": "9685" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "[52] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021. 1, 2, 3, 4, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.472, + 0.219 + ], + "angle": 0, + "content": "[53] Yongzhi Su, Mahdi Saleh, Torben Fetzer, Jason Rambach, Nassir Navab, Benjamin Busam, Didier Stricker, and Federico Tombari. Zebrapose: Coarse to fine surface encoding for 6 dof object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6738-6748, 2022. 1, 3, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.221, + 0.47, + 0.276 + ], + "angle": 0, + "content": "[54] Martin Sundermeyer, Zoltán-Csaba Marton, Maximilian Durner, Manuel Brucker, and Rudolph Triebel. Implicit 3d orientation learning for 6d object detection from rgb images. In European Conference on Computer Vision, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.277, + 0.469, + 0.304 + ], + "angle": 0, + "content": "[55] Mahdi Teimouri. Statistical inference for mixture of cauchy distributions. arXiv preprint arXiv:1809.05722, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.469, + 0.361 + ], + "angle": 0, + "content": "[56] Bugra Tekin, Sudipta N Sinha, and Pascal Fua. Real-time seamless single shot 6d object pose prediction. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 292-301, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.363, + 0.469, + 0.417 + ], + "angle": 0, + "content": "[57] Zhi Tian, Chunhua Shen, Hao Chen, and Tong He. Fcos: Fully convolutional one-stage object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9627-9636, 2019. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.42, + 0.469, + 0.488 + ], + "angle": 0, + "content": "[58] Julien Urain, Niklas Funk, Jan Peters, and Georgia Chalvatzaki. Se(3)-diffusionfields: Learning smooth cost functions for joint grasp and motion optimization through diffusion. IEEE International Conference on Robotics and Automation (ICRA), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.49, + 0.469, + 0.545 + ], + "angle": 0, + "content": "[59] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.548, + 0.469, + 0.603 + ], + "angle": 0, + "content": "[60] Gu Wang, Fabian Manhardt, Xingyu Liu, Xiangyang Ji, and Federico Tombari. Occlusion-aware self-supervised monocular 6d object pose estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.604, + 0.469, + 0.673 + ], + "angle": 0, + "content": "[61] Gu Wang, Fabian Manhardt, Federico Tombari, and Xi-angyang Ji. Gdr-net: Geometry-guided direct regression network for monocular 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16611-16621, 2021. 1, 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.676, + 0.469, + 0.744 + ], + "angle": 0, + "content": "[62] Di Wu, Zhaoyong Zhuang, Canqun Xiang, Wenbin Zou, and Xia Li. 6d-vnet: End-to-end 6-dof vehicle pose estimation from monocular rgb images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.469, + 0.787 + ], + "angle": 0, + "content": "[63] Yu Xiang, Tanner Schmidt, Venkatraman Narayanan, and Dieter Fox. PoseCNN: A convolutional neural network for 6d object pose estimation in cluttered scenes. 2018. 1, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.469, + 0.87 + ], + "angle": 0, + "content": "[64] Yan Xu, Kwan-Yee Lin, Guofeng Zhang, Xiaogang Wang, and Hongsheng Li. Rnnpose: Recurrent 6-dof object pose refinement with robust correspondence field estimation and pose optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[65] Heng Yang and Marco Pavone. Object pose estimation with statistical guarantees: Conformal keypoint detection" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.893, + 0.134 + ], + "angle": 0, + "content": "and geometric uncertainty propagation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8947-8958, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.893, + 0.205 + ], + "angle": 0, + "content": "[66] Jun Yang, Wenjie Xue, Sahar Ghavidel, and Steven L Waslander. 6d pose estimation for textureless objects on rgb frames using multi-view optimization. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 2905-2912. IEEE, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.207, + 0.893, + 0.26 + ], + "angle": 0, + "content": "[67] Sergey Zakharov, Ivan S. Shugurov, and Slobodan Ilic. Dpod: 6d pose object detector and refiner. 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 1941-1950, 2019. 3, 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.893, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9686" + } + ] +] \ No newline at end of file diff --git a/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/1d3927f2-2533-4713-91b0-b3f9e13c8aed_origin.pdf b/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/1d3927f2-2533-4713-91b0-b3f9e13c8aed_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..88663c9ac7c23920f8cce6394d6ca28c0a59de32 --- /dev/null +++ b/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/1d3927f2-2533-4713-91b0-b3f9e13c8aed_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae7f9374cdfeaa865ba618fe3e9e0d7221b27b66420f43651e524ef083f68802 +size 3700664 diff --git a/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/full.md b/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5729926b413ace588d466797d2af0542ef9ec6f0 --- /dev/null +++ b/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/full.md @@ -0,0 +1,347 @@ +# 6D-Diff: A Keypoint Diffusion Framework for 6D Object Pose Estimation + +Li Xu $^{1\dagger}$ Haoxuan Qu $^{1\dagger}$ Yujun Cai $^{2}$ Jun Liu $^{1\dagger}$ $^{1}$ Singapore University of Technology and Design + $^{2}$ Nanyang Technological University + +{li_xu, haoxuan_qu}@mysmail.sutd.edu.sg, yujun001@e.ntu.edu.sg, jun.liu@sutd.edu.sg + +# Abstract + +Estimating the 6D object pose from a single RGB image often involves noise and indeterminacy due to challenges such as occlusions and cluttered backgrounds. Meanwhile, diffusion models have shown appealing performance in generating high-quality images from random noise with high indeterminacy through step-by-step denoising. Inspired by their denoising capability, we propose a novel diffusion-based framework (6D-Diff) to handle the noise and indeterminacy in object pose estimation for better performance. In our framework, to establish accurate 2D-3D correspondence, we formulate 2D keypoints detection as a reverse diffusion (denoising) process. To facilitate such a denoising process, we design a Mixture-of-Cauchy-based forward diffusion process and condition the reverse process on the object appearance features. Extensive experiments on the LM-O and YCB-V datasets demonstrate the effectiveness of our framework. + +# 1. Introduction + +6D object pose estimation aims to estimate the 6D pose of an object including its location and orientation, which has a wide range of applications, such as augmented reality [39, 47], robotic manipulation [3, 45], and automatic driving [62]. Recently, various methods [4, 5, 19, 22, 27, 44, 53, 61, 64] have been proposed to conduct RGB-based 6D object pose estimation since RGB images are easy to obtain. Despite the increased efforts, a variety of challenges persist in RGB-based 6D object pose estimation, including occlusions, cluttered backgrounds, and changeable environments [8, 40, 44, 60, 63]. These challenges can introduce significant noise and indeterminacy into the pose estimation process, leading to error-prone predictions [8, 40, 44]. + +Meanwhile, diffusion models [18, 52] have achieved ap + +![](images/008ea54ca7496cefe0d23c976582eb6ff19287170f86f620cff90d37b7f76d12.jpg) +Figure 1. Overview of our proposed 6D-Diff framework. As shown, given the 3D keypoints from the object 3D CAD model, we aim to detect the corresponding 2D keypoints in the image to obtain the 6D object pose. Note that when detecting keypoints, there are often challenges such as occlusions (including self-occlusions) and cluttered backgrounds that can introduce noise and indeterminacy into the process, impacting the accuracy of pose prediction. + +pealing results in various generation tasks such as image synthesis [7, 18] and image editing [41]. Specifically, diffusion models are able to recover high-quality determinate samples (e.g., clean images) from a noisy and indeterminate input data distribution (e.g., random noise) via a step-by-step denoising process [18, 52]. Motivated by such a strong denoising capability [11, 12, 18], we aim to leverage diffusion models to handle the RGB-based 6D object pose estimation task, since this task also involves tackling noise and indeterminacy. However, it can be difficult to directly use diffusion models to estimate the object pose, because diffusion models often start denoising from random Gaussian noise [18, 52]. Meanwhile, in RGB-based 6D object pose estimation, the object pose is often extracted from an intermediate representation, such as keypoint heatmaps [5], pixel-wise voting vectors [44], or object surface keypoint features [4]. Such an intermediate representation encodes useful distribution priors about the object pose. Thus starting denoising from such an representation shall effectively assist the diffusion model in recovering accurate object poses [11]. To achieve this, we propose a novel diffusion-based object pose estimation framework (6D-Diff) that can exploit prior distribution knowledge from the intermediate representation for better performance. + +Overall, our framework is a correspondence-based framework, in which to predict an object pose, given the 3D keypoints pre-selected from the object 3D CAD model, we first predict the coordinates of the 2D image keypoints corresponding to the pre-selected 3D keypoints. We then use the 3D keypoints together with the predicted 2D keypoints coordinates to compute the 6D object pose using a Perspective-n-Point (PnP) solver [10, 31]. As shown in Fig. 1, to predict the 2D keypoints coordinates, we first extract an intermediate representation (the 2D keypoints heatmaps) through a keypoints distribution initializer. As discussed before, due to various factors, there often exists noise and indeterminacy in the keypoints detection process and the extracted heatmaps can be noisy as shown in Fig. 2. Thus we pass the distribution modeled from these keypoints heatmaps into a diffusion model to perform the denoising process to obtain the final keypoints coordinates prediction. + +Analogous to non-equilibrium thermodynamics [50], given a 2D image keypoint, we can consider all its possible locations in the image as particles in thermodynamics. Under low indeterminacy, the particles (possible locations) w.r.t. each 2D keypoint gather, and each keypoint can be determinately and accurately localized. In contrast, under high indeterminacy, these particles can stochastically spread over the input image, and it is difficult to localize each keypoint. The process of converting particles from low indeterminacy to high indeterminacy is called the forward process of the diffusion model. The goal of the diffusion model is to reverse the above forward process (through a reverse process), i.e., converting the particles from high indeterminacy to low indeterminacy. Here in our case, we aim to convert the indeterminate keypoints coordinates distribution modeled from the heatmaps into the determinate distribution. Below we briefly introduce the forward process and the reverse process in our diffusion model. + +In the forward process, we aim to generate supervision signals that will be used to optimize the diffusion model during the reverse process. Specifically, given a set of pre-selected 3D keypoints, we first acquire ground-truth coordinates of their corresponding 2D keypoints using the ground-truth object pose. Then these determinate ground-truth 2D coordinates are gradually diffused towards the indeterminate distribution modeled from the intermediate representation, and the distributions generated along the way will be used as supervision signals. Note that, as the distribution modeled from the intermediate representation can be complex and irregular, it is difficult to characterize such a distribution via the Gaussian distribution. This means that simply applying diffusion models in most existing generation works [7, 18, 52], which start denoising from the random Gaussian noise, can introduce potentially large errors. To tackle this challenge, we draw inspiration from the fact that the Mixture of Cauchy (MoC) model can effectively char + +
3D CAD modelImageHeatmap
(a)
(b)
+ +Figure 2. Above we show two examples of keypoint heatmaps, which serve as the intermediate representation [4, 5, 44] in our framework. The red dots indicate the ground-truth locations of the keypoints. In the example (a), the target object is the pink cat, which is heavily occluded in the image and is shown in a different pose compared to the 3D model. As shown above, due to occlusions and cluttered backgrounds, the keypoint heatmaps are noisy, which reflects the noise and indeterminacy during the keypoints detection process. + +acterize complex and intractable distributions. Moreover, the MoC model is robust to potential outliers in the distribution to be characterized [26]. Thus we propose to model the intermediate representation using a MoC distribution instead of simply treating it as a random Gaussian noise. In this way, we gradually diffuse the determinate distribution (ground truth) of keypoints coordinates towards the modeled MoC distribution during the forward process. + +Correspondingly, in the reverse process, starting from the MoC distribution modeled in the forward process, we aim to learn to recover the ground-truth keypoints coordinates. To achieve this, we leverage the distributions generated step-by-step during the forward process as the supervision signals to train the diffusion model to learn the reverse process. In this way, the diffusion model can learn to convert the indeterminate MoC distribution of keypoints coordinates into a determinate one smoothly and effectively. After the reverse process, the 2D keypoints coordinates obtained from the final determinate distribution are used to compute the 6D object pose with the pre-selected 3D keypoints. Moreover, we further facilitate the model learning of such a reverse process by injecting object appearance features as context information. + +Our work makes the following contributions. 1) We propose a novel 6D-Diff framework, in which we formulate keypoints detection for 6D object pose estimation as a reverse diffusion process to effectively eliminate the noise and indeterminacy in object pose estimation. 2) To take advantage of the intermediate representation that encodes useful prior distribution knowledge for handling this task, we propose a novel MoC-based diffusion process. Besides, we facilitate the model learning by utilizing object features. + +# 2. Related Work + +RGB-based 6D Object Pose Estimation has received a lot of attention [4, 13-16, 23, 32, 33, 36, 38, 43, 44, 46, 53, 54, 56, 63-67]. Some works [22, 27, 61, 63] proposed to directly regress object poses. However, the non-linearity of the rotation space makes direct regression of object poses difficult [32]. Compared to this type of direct methods, correspondence-based methods [5, 19, 43, 44, 46, 53, 56] often demonstrate better performance, which estimate 6D object poses via learning 2D-3D correspondences between the observed image and the object 3D model. + +Among correspondence-based methods, several works [42, 44, 46, 48, 56] aim to predict the 2D keypoints coordinates corresponding to specific 3D keypoints. BB8 [46] proposed to detect the 2D keypoints corresponding to the 8 corners of the object's 3D bounding box. Later, PVNet [44] achieved better performance by estimating 2D keypoints for sampled points on the surface of the object 3D model via pixel-wise voting. Moreover, various methods [19, 43, 53, 61, 67] establish 2D-3D correspondences by localizing the 3D model point corresponding to each observed object pixel. Among these methods, DPOD [67] explored the use of UV texture maps to facilitate model training, and ZebraPose [53] proposed to encode the surface of the object 3D model efficiently through a hierarchical binary grouping. Besides, several pose refinement methods [23, 33, 38, 64] have been proposed, which conducted pose refinement given an initial pose estimation. + +In this paper, we also regard object pose estimation as a 2D-3D correspondence estimation problem. Different from previous works, here by formulating 2D-3D correspondence estimation as a distribution transformation process (denoising process), we propose a new framework (6D-Diff) that trains a diffusion model to perform progressive denoising from an indeterminate keypoints distribution to the desired keypoints distribution with low indeterminacy. + +Diffusion Models [7, 9, 18, 50, 52] are originally introduced for image synthesis. Showing appealing generation capabilities, diffusion models have also been explored in various other tasks [11, 12, 20, 25, 30, 37, 41, 58], such as image editing [41] and image inpainting [37]. Here we explore a new framework that tackles object pose estimation with a diffusion model. Different from previous generation works [7, 37, 41] that start denoising from random noise, to aid the denoising process for 6D object pose estimation, we design a novel MoC-based diffusion mechanism that enables the diffusion model to start denoising from a distribution containing useful prior distribution knowledge regarding the object pose. Moreover, we condition the denoising process on the object appearance features, to further guide the diffusion model to obtain accurate predictions. + +# 3. Method + +To handle the noise and indeterminacy in RGB-based 6D object pose estimation, inspired by [11], from a novel perspective of distribution transformation with progressive denoising, we propose a framework (6D-Diff) that represents a new brand of diffusion-based solution for 6D object pose estimation. Below we first revisit diffusion models in Sec. 3.1. Then we discuss our proposed framework in Sec. 3.2, and introduce its training and testing scheme in Sec. 3.3. We finally detail the model architecture in Sec. 3.4. + +# 3.1. Revisiting Diffusion Models + +The diffusion model [18, 52], which is a kind of probabilistic generative model, consists of two parts, namely the forward process and the reverse process. Specifically, given an original sample $d_0$ (e.g., a clean image), the process of diffusing the sample $d_0$ iteratively towards the noise (typically Gaussian noise) $d_K \sim \mathcal{N}(\mathbf{0},\mathbf{I})$ (i.e., $d_0 \to d_1 \to \ldots \to d_K$ ) is called the forward process. In contrast, the process of denoising the noise $d_K$ iteratively towards the sample $d_0$ (i.e., $d_K \to d_{K-1} \to \ldots \to d_0$ ) is called the reverse process. Each process is defined as a Markov chain. + +Forward Process. To obtain supervision signals for training the diffusion model to learn to perform the reverse process in a stepwise manner, we need to acquire the intermediate step results $\{d_k\}_{k=1}^{K-1}$ . Thus the forward process is first performed to generate these intermediate step results for training purpose. Specifically, the posterior distribution $q(d_{1:K}|d_0)$ from $d_1$ to $d_K$ is formulated as: + +$$ +q \left(d _ {1: K} \mid d _ {0}\right) = \prod_ {k = 1} ^ {K} q \left(d _ {k} \mid d _ {k - 1}\right) \tag {1} +$$ + +$$ +q \left(d _ {k} \mid d _ {k - 1}\right) = \mathcal {N} \left(d _ {k}; \sqrt {1 - \beta_ {k}} d _ {k - 1}, \beta_ {k} \mathbf {I}\right) +$$ + +where $\{\beta_{k}\in (0,1)\}_{k = 1}^{K}$ denotes a set of fixed variance controllers that control the scale of the injected noise at different steps. According to Eq. (1), we can derive $q(d_k|d_0)$ in closed form as: + +$$ +q \left(d _ {k} \mid d _ {0}\right) = \mathcal {N} \left(d _ {k}; \sqrt {\bar {\alpha} _ {k}} d _ {0}, (1 - \bar {\alpha} _ {k}) \mathbf {I}\right) \tag {2} +$$ + +where $\alpha_{k} = 1 - \beta_{k}$ and $\overline{\alpha}_k = \prod_{s = 1}^k\alpha_s$ . Based on Eq. (2), $d_{k}$ can be further expressed as: + +$$ +d _ {k} = \sqrt {\bar {\alpha} _ {k}} d _ {0} + \sqrt {1 - \bar {\alpha} _ {k}} \epsilon \tag {3} +$$ + +where $\epsilon \sim \mathcal{N}(\mathbf{0},\mathbf{I})$ . From Eq. (3), we can observe that when the number of diffusion steps $K$ is sufficiently large and $\overline{\alpha}_K$ correspondingly decreases to nearly zero, the distribution of $d_K$ is approximately a standard Gaussian distribution, i.e., $d_K \sim \mathcal{N}(\mathbf{0},\mathbf{I})$ . This means $d_0$ is gradually corrupted into Gaussian noise, which conforms to the nonequilibrium thermodynamics phenomenon of the diffusion process [50]. + +Reverse Process. With the intermediate step results $\{d_k\}_{k=1}^{K-1}$ acquired in the forward process, the diffusion + +model is trained to learn to perform the reverse process. Specifically, in the reverse process, each step can be formulated as a function $f$ that takes $d_{k}$ and the diffusion model $M_{diff}$ as inputs and generate $d_{k-1}$ as the output, i.e., $d_{k-1} = f(d_{k}, M_{diff})$ . + +After training the diffusion model, during inference, we do not need to conduct the forward process. Instead, we only conduct the reverse process, which converts a random Gaussian noise $d_{K} \sim \mathcal{N}(\mathbf{0},\mathbf{I})$ into a sample $d_0$ of the desired distribution using the trained diffusion model. + +# 3.2. Proposed Framework + +Similar to previous works [21, 44, 53], our framework predicts 6D object poses via a two-stage pipeline. Specifically, (i) we first select $N$ 3D keypoints on the object CAD model and detect the corresponding $N$ 2D keypoints in the image; (ii) we then compute the 6D pose using a PnP solver. Here we mainly focus on the first stage and aim to produce more accurate keypoint detection results. + +When detecting 2D keypoints, factors like occlusions and cluttered backgrounds can bring noise and indeterminacy into this process, and affect the accuracy of detection results [21, 44]. To handle this problem, inspired by that diffusion models can iteratively reduce indeterminacy and noise in the initial distribution (e.g., standard Gaussian distribution) to generate determinate and high-quality samples of the desired distribution [11, 12], we formulate keypoints detection as generating a determinate distribution of keypoints coordinates $(D_0)$ from an indeterminate initial distribution $(D_K)$ via a diffusion model. + +Moreover, to effectively adapt to the 6D object pose estimation task, the diffusion model in our framework does not start the reverse process from the common initial distribution (i.e., the standard Gaussian distribution) as in most existing diffusion works [7, 18, 52]. Instead, inspired by recent 6D object pose estimation works [4, 5, 61], we first extract an intermediate representation (e.g., heatmaps), and use this representation to initialize a keypoints coordinates distribution (i.e., $D_K$ ), which will serve as the starting point of the reverse process. Such an intermediate representation encodes useful prior distribution information about keypoints coordinates. Thus by starting the reverse process from this representation, we effectively exploit the distribution priors in the representation to aid the diffusion model in recovering accurate keypoints coordinates [11]. Below, we first describe how we initialize the keypoints distribution $D_K$ , and then discuss the corresponding forward and reverse processes in our new framework. + +Keypoints Distribution Initialization. We initialize the keypoints coordinates distribution $D_K$ with extracted heatmaps. Specifically, similar to [29, 34, 53], we first use an off-the-shelf object detector (e.g., Faster RCNN [49]) to detect the bounding box of the target object, and then crop + +the detected Region of Interest (ROI) from the input image. We send the ROI into a sub-network (i.e., the keypoints distribution initializer) to predict a number of heatmaps where each heatmap corresponds to one 2D keypoint. We then normalize each heatmap to convert it to a probability distribution. In this way, each normalized heatmap naturally represents the distribution of the corresponding keypoint coordinates, and thus we can use these heatmaps to initialize $D_K$ . + +Forward Process. After distribution initialization, the next step is to iteratively reduce the noise and indeterminacy in the initialized distribution $D_K$ by performing the reverse process $(D_K \to D_{K-1} \to \ldots \to D_0)$ . To train the diffusion model to perform such a reverse process, we need to obtain the distributions generated along the way (i.e., $\{D_k\}_{k=1}^{K-1}$ ) as the supervision signals. Thus, we first need to conduct the forward process to obtain samples from $\{D_k\}_{k=1}^{K-1}$ . Specifically, given the ground-truth keypoints coordinates distribution $D_0$ , we define the forward process as: $D_0 \to D_1 \to \ldots \to D_K$ , where $K$ is the number of diffusion steps. In this forward process, we iteratively add noise to the determinate distribution $D_0$ , i.e., increasing the indeterminacy of generated distributions, to transform it into the initialized distribution $D_K$ with indeterminacy. Via this process, we can generate $\{D_k\}_{k=1}^{K-1}$ along the way and use them as supervision signals to train the diffusion model to perform the reverse process. + +However, in our framework, we do not aim to transform the ground-truth keypoints coordinates distribution $D_0$ towards a standard Gaussian distribution via the forward process, because our initialized distribution $D_K$ is not a random noise. Instead, as discussed before, $D_K$ is initialized with heatmaps (as shown in Fig. 3), since the heatmaps can provide rough estimations about the keypoints coordinates distribution. To effectively utilize such priors in $D_K$ to facilitate the reverse process, we aim to enable the diffusion model to start the reverse process (denoising process) from $D_K$ instead of random Gaussian noise [11]. Thus, the basic forward process (described in Sec. 3.1) in existing generative diffusion models is not suitable in our framework, which motivates us to design a new forward process for our task. + +However, it is non-trivial to design such a forward process, as the initialized distribution $D_K$ is based on extracted heatmaps, and thus $D_K$ can be complex and irregular, as shown in Fig. 4. Hence modeling $D_K$ as a Gaussian distribution can result in potentially large errors. To handle this challenge, motivated by that the Mixture of Cauchy (MoC) model can effectively and reliably characterize complex and intractable distributions [26], we leverage MoC to characterize $D_K$ . Based on the characterized distribution, we can then perform a corresponding MoC-based forward process. + +Specifically, we denote the number of Cauchy kernels + +![](images/437c5b1d10ef8e5b67324541356b9f8aca372444fd4742e3719751b343fb62ea.jpg) +Figure 3. Illustration of our framework. During testing, given an input image, we first crop the Region of Interest (ROI) from the image through an object detector. After that, we feed the cropped ROI to the keypoints distribution initializer to obtain the heatmaps that can provide useful distribution priors about keypoints, to initialize $D_K$ . Meanwhile, we can obtain object appearance features $f_{\mathrm{app}}$ . Next, we pass $f_{\mathrm{app}}$ into the encoder, and the output of the encoder will serve as conditional information to aid the reverse process in the decoder. We sample $M$ sets of 2D keypoints coordinates from $D_K$ , and feed these $M$ sets of coordinates into the decoder to perform the reverse process iteratively together with the step embedding $f_D^k$ . At the final reverse step ( $K$ -th step), we average $\{d_0^i\}_{i=1}^M$ as the final keypoints coordinates prediction $d_0$ , and use $d_0$ to compute the 6D pose with the pre-selected 3D keypoints via a PnP solver. + +in the MoC distribution as $U$ , and use the Expectation-Maximum-type (EM) algorithm [26, 55] to optimize the MoC parameters $\eta^{\mathrm{MoC}}$ to characterize the distribution $D_K$ as: + +$$ +\eta_ {*} ^ {\mathrm {M o C}} = \operatorname {E M} \left(\prod_ {v = 1} ^ {V} \sum_ {u = 1} ^ {U} \pi_ {u} \operatorname {C a u c h y} \left(d _ {K} ^ {v} \mid \mu_ {u}, \gamma_ {u}\right)\right) \tag {4} +$$ + +where $\{d_K^v\}_{v = 1}^V$ denotes $V$ sets of keypoints coordinates sampled from the distribution $D_{K}$ . Note each set of keypoints coordinates $d_K^v$ contains all the $N$ keypoints coordinates (i.e., $d_K^v\in \mathbb{R}^{N\times 2}$ ). $\pi_u$ denotes the weight of the $u$ -th Cauchy kernel ( $\sum_{u = 1}^{U}\pi_{u} = 1$ ), and $\eta^{\mathrm{MoC}} = \{\mu_1,\gamma_1,\dots,\mu_U,\gamma_U\}$ denotes the MoC parameters in which $\mu_{u}$ and $\gamma_{u}$ are the location and scale of the $u$ -th Cauchy kernel. Via the above optimization, we can use the optimized parameters $\eta_*^{\mathrm{MoC}}$ to model $D_K$ as the characterized distribution $(\hat{D}_K)$ . Given $\hat{D}_K$ , we aim to conduct the forward process from the ground-truth keypoints coordinates distribution $D_0$ , so that after $K$ steps of forward diffusion, the generated distribution reaches $\hat{D}_K$ . To this end, we modify Eq. (3) as follows: + +$$ +\hat {d} _ {k} = \sqrt {\overline {{\alpha}} _ {k}} d _ {0} + (1 - \sqrt {\overline {{\alpha}} _ {k}}) \mu^ {\mathrm {M o C}} + \sqrt {1 - \overline {{\alpha}} _ {k}} \epsilon^ {\mathrm {M o C}} \qquad (5) +$$ + +where $\hat{d}_k\in \mathbb{R}^{N\times 2}$ represents a sample (i.e., a set of $N$ keypoints coordinates) from the generated distribution $\tilde{D}_k$ , $\mu^{\mathrm{MoC}} = \sum_{u = 1}^{U}\mathbb{1}_{u}\mu_{u}$ , and $\epsilon^{\mathrm{MoC}}\sim$ Cauchy(0, $\sum_{u = 1}^{U}(\mathbb{1}_{u}\gamma_{u})$ ). Note that $\mathbb{1}_u$ is a zero-one indicator and $\sum_{u = 1}^{U}\mathbb{1}_u = 1$ and $\operatorname {Prob}(\mathbb{1}_u = 1) = \pi_u$ . + +From Eq. (5), we can observe that when $K$ is sufficiently large and $\overline{\alpha}_K$ correspondingly decreases to nearly zero, the distribution of $\hat{d}_K$ reaches the MoC distribution, i.e., $\hat{d}_K = \mu^{\mathrm{MoC}} + \epsilon^{\mathrm{MoC}}\sim \mathrm{Cauchy}(\sum_{u = 1}^{U}(\mathbb{1}_{u}\mu_{u}),\sum_{u = 1}^{U}(\mathbb{1}_{u}\gamma_{u}))$ . + +ter the above MoC-based forward process, we can use the generated $\{\hat{D}_k\}_{k=1}^{K-1}$ as supervision signals to train the diffusion model $M_{\mathrm{diff}}$ to learn the reverse process. More details about Eq. (5) can be found in Supplementary material. Such a forward process is only conducted to generate supervision signals for training the diffusion model, while we only need to conduct the reverse process during testing. + +Reverse Process. In the reverse process, we aim to recover a desired determinate keypoints distribution $D_0$ from the initial distribution $D_K$ . As discussed above, we characterize $D_K$ via a MoC model and then generate $\{\hat{D}_k\}_{k=1}^{K-1}$ as supervision signals to optimize the diffusion model to learn to perform the reverse process $(\hat{D}_K \to \hat{D}_{K-1} \to \dots \to D_0)$ , in which the model iteratively reduces the noise and indeterminacy in $\hat{D}_K$ to generate $D_0$ . + +However, it can still be difficult to generate $D_0$ by directly performing the reverse process from $\hat{D}_K$ , because the object appearance features are lacking in $\hat{D}_K$ . Such features can help constrain the model reverse process based on the input image to get accurate predictions. Thus we further leverage the appearance features from the image as context to guide $M_{\mathrm{diff}}$ in the reverse process. Specifically, we reuse the features extracted from the keypoints distribution initializer as the appearance features $f_{\mathrm{app}}$ and feed $f_{\mathrm{app}}$ into the diffusion model, as shown in Fig. 3. + +Our reverse process aims to generate a determinate distribution $D_0$ from the indeterminate distribution $\hat{D}_K$ (during training) or $D_K$ (during testing). Below we describe the reverse process during testing. We first obtain $f_{\mathrm{app}}$ from the input image. Then to help the diffusion model to learn to perform denoising at each reverse step, following [18, 52], we generate the unique step embedding $f_D^k$ to inject the step number $(k)$ information into the model. In this way, given a + +set of noisy keypoints coordinates $d_{k}\in \mathbb{R}^{N\times 2}$ drawn from $D_{k}$ at the $k^{th}$ step, we use diffusion model $M_{\mathrm{diff}}$ conditioned on the step embedding $f_{D}^{k}$ and the object appearance features $f_{\mathrm{app}}$ to recover $d_{k - 1}$ from $d_{k}$ as: + +$$ +d _ {k - 1} = M _ {\text {d i f f}} \left(d _ {k}, f _ {\text {a p p}}, f _ {D} ^ {k}\right) \tag {6} +$$ + +# 3.3. Training and Testing + +Training. Following [44], we first select $N$ 3D keypoints from the surface of the object CAD model using the farthest point sampling (FPS) algorithm. Then we conduct the training process in the following two stages. + +In the first stage, to initialize the distribution $D_K$ , we optimize the keypoints distribution initializer. Specifically, for each training sample, given the pre-selected $N$ 3D keypoints, we can obtain the ground-truth coordinates of the corresponding $N$ 2D keypoints using the ground-truth 6D object pose. Then for each keypoints, based on the corresponding ground-truth coordinates, we generate a ground-truth heatmap following [42] for training the initializer. Thus for each training sample, we generate $N$ ground-truth heatmaps. In this way, the loss function $L_{\mathrm{init}}$ for optimizing the initializer can be formulated as: + +$$ +L _ {\text {i n i t}} = \left\| \mathbf {H} _ {\text {p r e d}} - \mathbf {H} _ {\mathrm {G T}} \right\| _ {2} ^ {2} \tag {7} +$$ + +where $\mathbf{H}_{\mathrm{pred}}$ and $\mathbf{H}_{\mathrm{GT}}$ denote the predicted heatmaps and ground-truth heatmaps, respectively. + +In the second stage, we optimize the diffusion model $M_{\mathrm{diff}}$ . For each training sample, to optimize $M_{\mathrm{diff}}$ , we perform the following steps. (1) We first send the input image into an off-the-shelf object detector [57] and then feed the detected ROI into the trained initializer to obtain $N$ heatmaps. Meanwhile, we can also obtain $f_{\mathrm{app}}$ . (2) We use the $N$ predicted heatmaps to initialize $D_K$ , and leverage the EM-type algorithm to characterize $D_K$ as a MoC distribution $\hat{D}_K$ . (3) Based on $\hat{D}_K$ , we use the ground-truth keypoints coordinates $d_0$ to directly generate $M$ sets of $(\hat{d}_1, \dots, \hat{d}_K)$ (i.e., $\{\hat{d}_1^i, \dots, \hat{d}_K^i\}_{i=1}^M$ ) via the forward process (Eq. (5)). (4) Then, we aim to optimize the diffusion model $M_{\mathrm{diff}}$ to recover $\hat{d}_{k-1}^i$ from $\hat{d}_k^i$ iteratively. Following previous diffusion works [18, 52], we formulate the loss $L_{\mathrm{diff}}$ for optimizing $M_{\mathrm{diff}}$ as follows $(\hat{d}_0^i = d_0$ for all $i$ ): + +$$ +L _ {\text {d i f f}} = \sum_ {i = 1} ^ {M} \sum_ {k = 1} ^ {K} \left\| M _ {\text {d i f f}} \left(\hat {d} _ {k} ^ {i}, f _ {\text {a p p}}, f _ {D} ^ {k}\right) - \hat {d} _ {k - 1} ^ {i} \right\| _ {2} ^ {2} \tag {8} +$$ + +Testing. During testing, for each testing sample, by feeding the input image to the object detector and the keypoints distribution initializer consecutively, we can initialize $D_K$ and meanwhile obtain $f_{\mathrm{app}}$ . Then, we perform the reverse process. During the reverse process, we sample $M$ sets of noisy keypoints coordinates from $D_K$ (i.e., $\{d_K^i\}_{i = 1}^M$ ) and feed them into the trained diffusion model. Here we sample $M$ sets of keypoints coordinates, because we are converting from a distribution $(D_K)$ towards another distribution $(D_0)$ . + +Then the model iteratively performs the reverse steps. After $K$ reverse diffusion steps, we obtain $M$ sets of predicted keypoints coordinates (i.e., $\{d_0^i\}_{i = 1}^M$ ). To obtain the final keypoints coordinates prediction $d_{0}$ , we compute the mean of the $M$ predictions. Finally, we can solve for the 6D object pose using a PnP solver, like [44, 53]. + +# 3.4. Model Architecture + +Our framework mainly consists of the diffusion model $(M_{\mathrm{diff}})$ and the keypoints distribution initializer. + +Diffusion Model $M_{\mathrm{diff}}$ . As illustrated in Fig. 3, our proposed diffusion model $M_{\mathrm{diff}}$ mainly consists of a transformer encoder-decoder architecture. The appearance features $f_{\mathrm{app}}$ are sent into the encoder for extracting context information to aid the reverse process in the decoder. $f_{D}^{k}$ and $\{d_k^i\}_{i=1}^M$ (or $\{\hat{d}_k^i\}_{i=1}^M$ during training) are sent into the decoder for the reverse process. Both the encoder and the decoder contain a stack of three transformer layers. + +More specifically, as for the encoder part, we first map $f_{\mathrm{app}} \in \mathbb{R}^{16 \times 16 \times 512}$ through a $1 \times 1$ convolution layer to a latent embedding $e_{\mathrm{app}} \in \mathbb{R}^{16 \times 16 \times 128}$ . To retain the spatial information, following [59], we further incorporate positional encodings into $e_{\mathrm{app}}$ . Afterwards, we flatten $e_{\mathrm{app}}$ into a feature sequence $(\mathbb{R}^{256 \times 128})$ , and send it into the encoder. The encoder output $f_{\mathrm{enc}}$ containing the extracted object information will be sent into the decoder to aid the reverse process. Note that during testing, for each sample, we only need to conduct the above computation process once to obtain the corresponding $f_{\mathrm{enc}}$ . + +The decoder part iteratively performs the reverse process. For notation simplicity, below we describe the reverse process for a single sample $d_{k}$ instead of the $M$ samples $\left(\{d_1^i,\dots,d_K^i\}_{i = 1}^M\right)$ . Specifically, at the $k$ -th reverse step, to inject the current step number $(k)$ information into the decoder, we first generate the step embedding $f_{D}^{k}\in \mathbb{R}^{1\times 128}$ using the sinusoidal function following [18, 52]. Meanwhile, we use an FC layer to map the input $d_{k}\in \mathbb{R}^{N\times 2}$ to a latent embedding $e_k\in \mathbb{R}^{N\times 128}$ . Then we concatenate $f_{D}^{k}$ and $e_k$ along the first dimension, and send it into the decoder. By interacting with the encoder output $f_{\mathrm{enc}}$ (extracted object information) via cross-attention at each layer, the decoder produces $f_{\mathrm{dec}}$ , which is further mapped into the keypoints coordinates prediction $d_{k - 1}\in \mathbb{R}^{N\times 2}$ via an FC layer. Then we send $d_{k - 1}$ back to the decoder as the input to perform the next reverse step. + +Keypoints Distribution Initializer. The initializer adopts a ResNet-34 backbone, which is commonly used in 6D pose estimation methods [4, 53, 61]. To generate heatmaps to initialize the distribution $D_K$ , we add two deconvolution layers followed by a $1 \times 1$ convolution layer after the ResNet-34 backbone, and then we obtain predicted heatmaps $\mathbf{H}_{\mathrm{pred}} \in \mathbb{R}^{N \times \frac{H}{4} \times \frac{W}{4}}$ where $H$ and $W$ denote the height and width of the input ROI image respec + +![](images/b61da41b8944ffeb55074eed5c6f67f97a2a423b9bee6f0569047e61a97a81c9.jpg) +input image + +![](images/0b4b9acb5c85467644b3dd29dc4d8ebc1a7f72cded7d35668f0c854a9786e635.jpg) +Figure 4. Visualization of the denoising process of a sample with our framework. In this example, the target object is the yellow duck and for clarity, we here show three keypoints only. The red dots indicate the ground-truth locations of these three keypoints. The noisy heatmap before denoising reflects that factors like occlusions and clutter in the scene can introduce noise and indeterminacy when detecting keypoints. As shown, our diffusion model can effectively and smoothly reduce the noise and indeterminacy in the initial distribution step by step, finally recovering a high-quality and determinate distribution of keypoints coordinates. (Better viewed in color) + +![](images/04c376050a06a7e26e7256b49519f8986a0b703a6aa603b8969c3eed2c6e5963.jpg) + +![](images/c7f21552bc1e952f449a1464eca28832e2ab5f7c261b243fc3d30f4403e51998.jpg) + +![](images/0def7fb1407240b98f521abcde04b9fa3bf1679cada464ac1d6b5e5a723577cb.jpg) + +Table 1. Comparisons with RGB-based 6D object pose estimation methods on the LM-O dataset. (*) denotes symmetric objects. + +
MethodPVNet [44]HybridPose [51]RePose [24]DeepIM [33]GDR-Net [61]SO-Pose [8]CRT-6D [4]ZebraPose [53]CheckerPose [35]Ours
ape15.820.931.159.246.848.453.457.958.360.6
can63.375.380.063.590.885.892.095.095.797.9
cat16.724.925.626.240.532.742.060.662.363.2
driller65.770.273.155.682.677.481.494.893.796.6
duck25.227.943.052.446.948.944.964.569.967.2
eggbox*50.252.451.763.054.252.462.770.970.073.5
glue*49.653.854.371.775.878.380.288.786.492.0
holepuncher39.754.253.652.560.175.374.383.083.885.5
Mean40.847.551.655.562.262.366.376.977.579.6
+ +Table 2. Comparisons with RGB-based 6D object pose estimation methods on the YCB-V dataset. (-) indicates the corresponding result is not reported in the original paper. + +
MethodADD(-S)AUC of ADD-SAUC of ADD(-S)
SegDriven[21]39.0--
SingleStage[22]53.9--
CosyPose [29]-89.884.5
RePose [24]62.188.582.0
GDR-Net [61]60.191.684.4
SO-Pose [8]56.890.983.9
ZebraPose [53]80.590.185.3
CheckerPose [35]81.491.386.4
Ours83.891.587.0
+ +tively. Moreover, the features outputted by the ResNet-34 backbone, combined with features obtained from methods [35, 53], are used as the object features $f_{\mathrm{app}}$ . + +# 4. Experiments + +# 4.1. Datasets & Evaluation Metrics + +Given that previous works [8, 24, 67] have reported the evaluation accuracy over $95\%$ on the Linemod (LM) dataset [17], the performance on this dataset has become saturated. Thus recent works [4, 53] mainly focus on using the LM-O dataset [2] and the YCB-V dataset [63] that are more challenging, which we follow. + +LM-O Dataset. The Linemod Occlusion (LM-O) dataset contains 1214 images and is a challenging subset of the LM dataset. In this dataset, around 8 objects are annotated on each image and the objects are often heavily occluded. Following [4, 53], we use both the real images from the LM dataset and the publicly available physically-based rendering (pbr) images [6] as the training images for LM-O. Following [53, 61], on LM-O dataset, we evaluate the model performance using the commonly-used ADD(-S) metric. + +For this metric, we compute the mean distance between the model points transformed using the predicted pose and the same model points transformed using the ground-truth pose. For symmetric objects, following [63], the mean distance is computed based on the closest point distance. If the mean distance is less than $10\%$ of the model diameter, the predicted pose is regarded as correct. + +YCB-V Dataset. The YCB-V dataset is a large-scale dataset containing 21 objects and over 100k real images. The samples in this dataset often exhibit occlusions and cluttered backgrounds. Following [4, 53], we use both the real images from the training set of the YCB-V dataset and the publicly available pbr images as the training images for YCB-V. Following [53, 61], we evaluate the model performance using the following metrics: ADD(-S), AUC (Area Under the Curve) of ADD-S, and AUC of ADD(-S). For calculating AUC, we set the maximum distance threshold to $10\mathrm{cm}$ following [63]. + +# 4.2. Implementation Details + +We conduct our experiments on an Nvidia V100 GPU. We set the number of pre-selected 3D keypoints $N$ to 128. During training, following [34, 53], we utilize the dynamic zoom-in strategy to produce augmented ROI images. During testing, we use the detected bounding box with Faster RCNN [49] and FCOS [57] provided by CDPNv2 [34]. The cropped ROI image is resized to the shape of $3 \times 256 \times 256$ ( $H = W = 256$ ). We characterize $D_K$ via a MoC model with 9 Cauchy kernels ( $U = 9$ ) for the forward diffusion process. We optimize the diffusion model $M_{\mathrm{diff}}$ for 1500 epochs using the Adam optimizer [28] with an initial learning rate of 4e-5. Moreover, we set the number of sampled sets $M$ to 5, and the number of diffusion steps $K$ to + +![](images/b903ced34bd62194809d38eeea7ebb4deac90adda7a6ec6b6b1dd15de31bce23.jpg) + +![](images/f6f8a61b91946b5b4da0d6bd2058734361d49f5d4f4dad94c810fad530f14455.jpg) + +![](images/1392556afa4f233e07d526d0ac630e2647172bb4c7341fb837a8ae7e94d8e0fe.jpg) + +![](images/9413cf6a51c687734673050f9721c34d24fda513a1c75409289e7c7fd8e3ec01.jpg) + +![](images/f861959908df876c547b3fda9766273dec454dc9782689ad252270167f42ff13.jpg) +Figure 5. Qualitative results. Green bounding boxes represent the ground-truth poses and blue bounding boxes represent the predicted poses of our method. As shown, even facing severe occlusions, clutter in the scene or varying environment, our framework can still accurately recover the object poses, showing the effectiveness of our method for handling the noise and indeterminacy caused by various factors in object pose estimation. + +![](images/bed618827a36a469dfd3434e06bbbf66692ee482a9da0015ba12e6b8f6a884bf.jpg) + +![](images/19561a8956a2b49d227d14d1059e3c4bf04463a967e6703eeb7c05efe7904dd0.jpg) + +![](images/40ce7df9b492104dfd35e3a68336179bde926a73fbd8a86faa6c71e5fd6b68ba.jpg) + +100. Following [53], we use Progressive-X [1] as the PnP solver. Note that during testing, instead of performing the reverse process with all the $K$ steps, we accelerate the process with DDIM [52], a recently proposed diffusion acceleration method. With DDIM acceleration, we only need to perform 10 steps to finish the reverse process during testing. + +# 4.3. Comparison with State-of-the-art Methods + +Results on LM-O Dataset. As shown in Tab. 1, compared to existing methods, our method achieves the best mean performance, showing the superiority of our method. We also show qualitative results on the LM-O dataset in Fig. 5. As shown, even in the presence of large occlusions (including self-occlusions) and cluttered backgrounds, our method still produces accurate predictions. + +Results on YCB-V Dataset. As shown in Tab. 2, our framework achieves the best performance on both the ADD(-S) and the AUC of ADD(-S) metrics, and is comparable to the state-of-the-art method on the AUC of ADD-S metric, showing the effectiveness of our method. + +# 4.4. Ablation Studies + +We conduct extensive ablation experiments on the LM-O dataset, and we report the model performance on ADD(-S) metric averaged over all the objects. + +Impact of denoising process. In our framework, we predict keypoints coordinates via performing the denoising process. To evaluate the efficacy of this process, we test three variants. In the first variant (Variant A), we remove the diffusion model + +Table 3. Evaluation on the effectiveness of the denoising process. + +
MethodADD(-S)
Variant A49.2
Variant B57.3
Variant C61.1
6D-Diff79.6
+ +$M_{\mathrm{diff}}$ and predict keypoints coordinates directly from the heatmaps produced by the keypoints distribution initializer. The second variant (Variant $B$ ) has the same model architec + +ture as our framework, but the diffusion model is optimized to directly predict the coordinates instead of learning the reverse process. Same as Variant $B$ , the third variant (Variant $C$ ) is also optimized to directly predict coordinates without denoising process. For Variant $C$ , we stack our diffusion model structure multiple times to produce a deep network, which has similar computation complexity with our framework. As shown in Tab. 3, compared to our framework, the performance of these variants significantly drops, showing that the effectiveness of our framework mainly lies in the designed denoising process. + +Impact of object appearance features $f_{\mathrm{app}}$ In our framework, we send the appearance features $f_{\mathrm{app}}$ into the diffusion model $M_{\mathrm{diff}}$ to aid the reverse process. To evaluate its effect, + +Table 4. Evaluation on the effectiveness of the object appearance features $f_{\mathrm{app}}$ + +
MethodADD(-S)
w/o fapp74.4
6D-Diff79.6
+ +we test a variant in which we do not send $f_{\mathrm{app}}$ into $M_{\mathrm{diff}}$ ( $w / o \, f_{\mathrm{app}}$ ). As shown in Tab. 4, our framework performs better than this variant, showing that $f_{\mathrm{app}}$ can aid $M_{\mathrm{diff}}$ to get more accurate predictions. + +Impact of MoC design. During training, we model the distribution $D_K$ from the intermediate representation + +Table 5. Evaluation on the effectiveness of the MoC design. + +
MethodADD(-S)
Standard diffusion w/o MoC73.1
Heatmaps as condition76.2
6D-Diff79.6
+ +(heatmaps) as a MoC distribution $\hat{D}_K$ , and train the diffusion model $M_{\mathrm{diff}}$ to perform the reverse process from $\hat{D}_K$ . To investigate the impact of this design, we evaluate two variants that train $M_{\mathrm{diff}}$ in different ways. In the first variant (Standard diffusion w/o MoC), we train the model to start the reverse process from the standard Gaussian noise, i.e., following the basic forward process in Eq. (3) for model training. In the second variant (Heatmaps as condition), we still train the model to start denoising from the random Gaussian noise but we use the heatmaps as the condition for the reverse process. As shown in Tab. 5, our framework consistently outperforms both variants, showing effectiveness of the designed MoC-based forward process. + +# 5. Conclusion + +In this paper, we proposed a novel diffusion-based 6D object pose estimation framework, which effectively handles noise and indeterminacy in object pose estimation. In our framework, we formulate object keypoints detection as a carefully-designed reverse diffusion process. We design a novel MoC-based forward process to effectively utilize the distribution priors in intermediate representations. Our framework achieves superior performance. + +Acknowledgement. This work was supported by the National Research Foundation Singapore under the AI Singapore Programme (Award Number: AISG-100E-2023-121). + +# References + +[1] Daniel Barath and Jiri Matas. Progressive-x: Efficient, anytime, multi-model fitting algorithm. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3780-3788, 2019. 8 +[2] Eric Brachmann, Frank Michel, Alexander Krull, Michael Ying Yang, Stefan Gumhold, et al. Uncertainty-driven 6d pose estimation of objects and scenes from a single rgb image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3364-3372, 2016. 7 +[3] Benjamin Busam, Marco Esposito, Simon Che'Rose, Nassir Navab, and Benjamin Frisch. A stereo vision approach for cooperative robotic movement therapy. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 127-135, 2015. 1 +[4] Pedro Castro and Tae-Kyun Kim. Crt-6d: Fast 6d object pose estimation with cascaded refinement transformers. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 5746-5755, 2023. 1, 2, 3, 4, 6, 7 +[5] Bo Chen, Alvaro Parra, Jiewei Cao, Nan Li, and Tat-Jun Chin. End-to-end learnable geometric vision by backpropagating pnp optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8100-8109, 2020. 1, 2, 3, 4 +[6] Maximilian Denninger, Martin Sundermeyer, Dominik Winkelbauer, Youssef Zidan, Dmitry Olefir, Mohamad Elbadrawy, Ahsan Lodhi, and Harinandan Katam. Blenderproc. arXiv preprint arXiv:1911.01911, 2019. 7 +[7] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34:8780-8794, 2021. 1, 2, 3, 4 +[8] Yan Di, Fabian Manhardt, Gu Wang, Xiangyang Ji, Nassir Navab, and Federico Tombari. So-pose: Exploiting self-occlusion for direct 6d pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12396–12405, 2021. 1, 7 +[9] Lin Geng Foo, Hossein Rahmani, and Jun Liu. Aigc for various data modalities: A survey. arXiv preprint arXiv:2308.14177, 2023. 3 +[10] Xiao-Shan Gao, Xiao-Rong Hou, Jianliang Tang, and Hang-Fei Cheng. Complete solution classification for the perspective-three-point problem. IEEE transactions on pattern analysis and machine intelligence, 25(8):930-943, 2003. 2 +[11] Jia Gong, Lin Geng Foo, Zhipeng Fan, Qiuhong Ke, Hossein Rahmani, and Jun Liu. Diffpose: Toward more reliable 3d pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13041-13051, 2023. 1, 3, 4 +[12] Tianpei Gu, Guangyi Chen, Junlong Li, Chunze Lin, Yongming Rao, Jie Zhou, and Jiwen Lu. Stochastic trajectory prediction via motion indeterminacy diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17113-17122, 2022. 1, 3, 4 + +[13] Shuxuan Guo, Yinlin Hu, Jose M Alvarez, and Mathieu Salzmann. Knowledge distillation for 6d pose estimation by aligning distributions of local predictions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18633-18642, 2023. 3 +[14] Yang Hai, Rui Song, Jiaojiao Li, and Yinlin Hu. Shape-constraint recurrent flow for 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4831-4840, 2023. +[15] Yang Hai, Rui Song, Jiaojiao Li, Mathieu Salzmann, and Yinlin Hu. Rigidity-aware detection for 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8927-8936, 2023. +[16] Rasmus Laurvig Haugaard and Anders Glent Buch. Surfemb: Dense and continuous correspondence distributions for object pose estimation with learnt surface embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6749-6758, 2022. 3 +[17] Stefan Hinterstoisser, Vincent Lepetit, Slobodan Ilic, Stefan Holzer, Gary Bradski, Kurt Konolige, and Nassir Navab. Model based training, detection and pose estimation of texture-less 3d objects in heavily cluttered scenes. In Computer Vision-ACCV 2012: 11th Asian Conference on Computer Vision, Daejeon, Korea, November 5-9, 2012, Revised Selected Papers, Part I 11, pages 548-562. Springer, 2013. 7 +[18] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, pages 6840-6851. Curran Associates, Inc., 2020. 1, 2, 3, 4, 5, 6 +[19] Tomas Hodan, Daniel Barath, and Jiri Matas. Epos: Estimating 6d pose of objects with symmetries. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11703-11712, 2020. 1, 3 +[20] Tsu-Ching Hsiao, Hao-Wei Chen, Hsuan-Kung Yang, and Chun-Yi Lee. Confronting ambiguity in 6d object pose estimation via score-based diffusion on se (3). arXiv preprint arXiv:2305.15873, 2023. 3 +[21] Yinlin Hu, Joachim Hugonot, Pascal Fua, and Mathieu Salzmann. Segmentation-driven 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3385-3394, 2019. 4, 7 +[22] Yinlin Hu, Pascal Fua, Wei Wang, and Mathieu Salzmann. Single-stage 6d object pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2930-2939, 2020. 1, 3, 7 +[23] Shun Iwase, Xingyu Liu, Rawal Khirodkar, Rio Yokota, and Kris M. Kitani. Repose: Fast 6d object pose refinement via deep texture rendering. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 3303-3312, 2021. 3 +[24] Shun Iwase, Xingyu Liu, Rawal Khirodkar, Rio Yokota, and Kris M Kitani. Repose: Fast 6d object pose refinement via deep texture rendering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3303-3312, 2021. 7 + +[25] Haobo Jiang, Mathieu Salzmann, Zheng Dang, Jin Xie, and Jian Yang. Se (3) diffusion model-based point cloud registration for robust 6d object pose estimation. Advances in Neural Information Processing Systems, 36, 2024. 3 +[26] Zakiah I. Kalantan and Jochen Einbeck. Quantile-based estimation of the finite cauchy mixture model. Symmetry, 11 (9), 2019. 2, 4, 5 +[27] Wadim Kehl, Fabian Manhardt, Federico Tombari, Slobodan Ilic, and Nassir Navab. Ssd-6d: Making rgb-based 3d detection and 6d pose estimation great again. In Proceedings of the IEEE international conference on computer vision, pages 1521–1529, 2017. 1, 3 +[28] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.7 +[29] Yann Labbe, Justin Carpentier, Mathieu Aubry, and Josef Sivic. Cosypose: Consistent multi-view multi-object 6d pose estimation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVII 16, pages 574-591. Springer, 2020. 4, 7 +[30] Junhyeok Lee, Junghwa Kang, Yoonho Nam, and TaeYoung Lee. Bias field correction in MRI with hampel noise denoising diffusion probabilistic model. In Medical Imaging with Deep Learning, short paper track, 2023. 3 +[31] Vincent Lepetit, Francesc Moreno-Noguer, and Pascal Fua. Ep n p: An accurate o (n) solution to the p np problem. International journal of computer vision, 81:155-166, 2009. 2 +[32] Hongyang Li, Jiehong Lin, and Kui Jia. Dcl-net: Deep correspondence learning network for 6d pose estimation. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part IX, pages 369-385. Springer, 2022. 3 +[33] Yi Li, Gu Wang, Xiangyang Ji, Yu Xiang, and Dieter Fox. Deepim: Deep iterative matching for 6d pose estimation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 683-698, 2018. 3, 7 +[34] Zhigang Li, Gu Wang, and Xiangyang Ji. Cdpn: Coordinates-based disentangled pose network for real-time rgb-based 6-dof object pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7678-7687, 2019. 4, 7 +[35] Ruyi Lian and Haibin Ling. Checkerpose: Progressive dense keypoint localization for object pose estimation with graph neural network. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14022-14033, 2023. 7 +[36] Xingyu Liu, Ruida Zhang, Chenyangguang Zhang, Bowen Fu, Jiwen Tang, Xiquan Liang, Jingyi Tang, Xiaotian Cheng, Yukang Zhang, Gu Wang, and Xiangyang Ji. Gdnpp. https://github.com/shanice-1/gdrnpp_bop2022, 2022.3 +[37] Andreas Lugmayr, Martin Danelljan, Andres Romero, Fisher Yu, Radu Timofte, and Luc Van Gool. Repaint: Inpainting using denoising diffusion probabilistic models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11461-11471, 2022. 3 + +[38] Fabian Manhardt, Wadim Kehl, Nassir Navab, and Federico Tombari. Deep model-based 6d pose refinement in rgb. In The European Conference on Computer Vision (ECCV), 2018. 3 +[39] Eric Marchand, Hideaki Uchiyama, and Fabien Spindler. Pose estimation for augmented reality: a hands-on survey. IEEE transactions on visualization and computer graphics, 22(12):2633-2651, 2015. 1 +[40] Jianhan Mei, Xudong Jiang, and Henghui Ding. Spatial feature mapping for 6 dof object pose estimation. Pattern Recognition, 131:108835, 2022. 1 +[41] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. Sdedit: Guided image synthesis and editing with stochastic differential equations. In International Conference on Learning Representations, 2021. 1, 3 +[42] Markus Oberweger, Mahdi Rad, and Vincent Lepetit. Making deep heatmaps robust to partial occlusions for 3d object pose estimation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 119-134, 2018. 3, 6 +[43] Kiru Park, Timothy Patten, and Markus Vincze. Pix2pose: Pixel-wise coordinate regression of objects for 6d pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7668-7677, 2019. 3 +[44] Sida Peng, Yuan Liu, Qixing Huang, Xiaowei Zhou, and Hujun Bao. Pvnet: Pixel-wise voting network for 6dof pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4561-4570, 2019. 1, 2, 3, 4, 6, 7 +[45] Luis Pérez, Inigo Rodríguez, Nuria Rodríguez, Rubén Usamentiaga, and Daniel F García. Robot guidance using machine vision techniques in industrial environments: A comparative review. Sensors, 16(3):335, 2016. 1 +[46] Mahdi Rad and Vincent Lepetit. Bb8: A scalable, accurate, robust to partial occlusion method for predicting the 3d poses of challenging objects without using depth. In Proceedings of the IEEE international conference on computer vision, pages 3828-3836, 2017. 3 +[47] Jason Raphael Rambach, Alain Pagani, Michael Schneider, Oleksandr Artemenko, and Didier Stricker. 6dof object tracking based on 3d scans for augmented reality remote live support. Comput., 7:6, 2018. 1 +[48] Hong Ren, Lin Lin, Yanjie Wang, and Xin Dong. Robust 6-dof pose estimation under hybrid constraints. Sensors, 22 (22):8758, 2022. 3 +[49] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 4, 7 +[50] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pages 2256-2265. PMLR, 2015. 2, 3 +[51] Chen Song, Jiaru Song, and Qixing Huang. Hybridpose: 6d object pose estimation under hybrid representations. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 431-440, 2020. 7 + +[52] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021. 1, 2, 3, 4, 5, 6, 8 +[53] Yongzhi Su, Mahdi Saleh, Torben Fetzer, Jason Rambach, Nassir Navab, Benjamin Busam, Didier Stricker, and Federico Tombari. Zebrapose: Coarse to fine surface encoding for 6 dof object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6738-6748, 2022. 1, 3, 4, 6, 7, 8 +[54] Martin Sundermeyer, Zoltán-Csaba Marton, Maximilian Durner, Manuel Brucker, and Rudolph Triebel. Implicit 3d orientation learning for 6d object detection from rgb images. In European Conference on Computer Vision, 2018. 3 +[55] Mahdi Teimouri. Statistical inference for mixture of cauchy distributions. arXiv preprint arXiv:1809.05722, 2018. 5 +[56] Bugra Tekin, Sudipta N Sinha, and Pascal Fua. Real-time seamless single shot 6d object pose prediction. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 292-301, 2018. 3 +[57] Zhi Tian, Chunhua Shen, Hao Chen, and Tong He. Fcos: Fully convolutional one-stage object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9627-9636, 2019. 6, 7 +[58] Julien Urain, Niklas Funk, Jan Peters, and Georgia Chalvatzaki. Se(3)-diffusionfields: Learning smooth cost functions for joint grasp and motion optimization through diffusion. IEEE International Conference on Robotics and Automation (ICRA), 2023. 3 +[59] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 6 +[60] Gu Wang, Fabian Manhardt, Xingyu Liu, Xiangyang Ji, and Federico Tombari. Occlusion-aware self-supervised monocular 6d object pose estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 1 +[61] Gu Wang, Fabian Manhardt, Federico Tombari, and Xi-angyang Ji. Gdr-net: Geometry-guided direct regression network for monocular 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16611-16621, 2021. 1, 3, 4, 6, 7 +[62] Di Wu, Zhaoyong Zhuang, Canqun Xiang, Wenbin Zou, and Xia Li. 6d-vnet: End-to-end 6-dof vehicle pose estimation from monocular rgb images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 1 +[63] Yu Xiang, Tanner Schmidt, Venkatraman Narayanan, and Dieter Fox. PoseCNN: A convolutional neural network for 6d object pose estimation in cluttered scenes. 2018. 1, 3, 7 +[64] Yan Xu, Kwan-Yee Lin, Guofeng Zhang, Xiaogang Wang, and Hongsheng Li. Rnnpose: Recurrent 6-dof object pose refinement with robust correspondence field estimation and pose optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022. 1, 3 +[65] Heng Yang and Marco Pavone. Object pose estimation with statistical guarantees: Conformal keypoint detection + +and geometric uncertainty propagation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8947-8958, 2023. +[66] Jun Yang, Wenjie Xue, Sahar Ghavidel, and Steven L Waslander. 6d pose estimation for textureless objects on rgb frames using multi-view optimization. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 2905-2912. IEEE, 2023. +[67] Sergey Zakharov, Ivan S. Shugurov, and Slobodan Ilic. Dpod: 6d pose object detector and refiner. 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 1941-1950, 2019. 3, 7 \ No newline at end of file diff --git a/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/images.zip b/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..97be7dea26e43d51d3eede816b545ef87873f8e0 --- /dev/null +++ b/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:514bd0da1fc1c6235c16c43df3266d84359a1a0616660b8f28d36bf83cc2761d +size 310212 diff --git a/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/layout.json b/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a84c0522278fd24d19aca7af4b153ae3eb220791 --- /dev/null +++ b/2024/6D-Diff_ A Keypoint Diffusion Framework for 6D Object Pose Estimation/layout.json @@ -0,0 +1,12364 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 69, + 103, + 523, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 103, + 523, + 120 + ], + "spans": [ + { + "bbox": [ + 69, + 103, + 523, + 120 + ], + "type": "text", + "content": "6D-Diff: A Keypoint Diffusion Framework for 6D Object Pose Estimation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "spans": [ + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "text", + "content": "Li Xu" + }, + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "text", + "content": " Haoxuan Qu" + }, + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "text", + "content": " Yujun Cai" + }, + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "text", + "content": " Jun Liu" + }, + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "text", + "content": "Singapore University of Technology and Design \n" + }, + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 173, + 141, + 419, + 185 + ], + "type": "text", + "content": "Nanyang Technological University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 134, + 187, + 457, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 187, + 457, + 213 + ], + "spans": [ + { + "bbox": [ + 134, + 187, + 457, + 213 + ], + "type": "text", + "content": "{li_xu, haoxuan_qu}@mysmail.sutd.edu.sg, yujun001@e.ntu.edu.sg, jun.liu@sutd.edu.sg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 252, + 192, + 265 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 252, + 192, + 265 + ], + "spans": [ + { + "bbox": [ + 143, + 252, + 192, + 265 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 278, + 290, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 278, + 290, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 278, + 290, + 483 + ], + "type": "text", + "content": "Estimating the 6D object pose from a single RGB image often involves noise and indeterminacy due to challenges such as occlusions and cluttered backgrounds. Meanwhile, diffusion models have shown appealing performance in generating high-quality images from random noise with high indeterminacy through step-by-step denoising. Inspired by their denoising capability, we propose a novel diffusion-based framework (6D-Diff) to handle the noise and indeterminacy in object pose estimation for better performance. In our framework, to establish accurate 2D-3D correspondence, we formulate 2D keypoints detection as a reverse diffusion (denoising) process. To facilitate such a denoising process, we design a Mixture-of-Cauchy-based forward diffusion process and condition the reverse process on the object appearance features. Extensive experiments on the LM-O and YCB-V datasets demonstrate the effectiveness of our framework." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 505, + 128, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 505, + 128, + 517 + ], + "spans": [ + { + "bbox": [ + 47, + 505, + 128, + 517 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 525, + 287, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 525, + 287, + 681 + ], + "spans": [ + { + "bbox": [ + 46, + 525, + 287, + 681 + ], + "type": "text", + "content": "6D object pose estimation aims to estimate the 6D pose of an object including its location and orientation, which has a wide range of applications, such as augmented reality [39, 47], robotic manipulation [3, 45], and automatic driving [62]. Recently, various methods [4, 5, 19, 22, 27, 44, 53, 61, 64] have been proposed to conduct RGB-based 6D object pose estimation since RGB images are easy to obtain. Despite the increased efforts, a variety of challenges persist in RGB-based 6D object pose estimation, including occlusions, cluttered backgrounds, and changeable environments [8, 40, 44, 60, 63]. These challenges can introduce significant noise and indeterminacy into the pose estimation process, leading to error-prone predictions [8, 40, 44]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 59, + 682, + 287, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 682, + 287, + 694 + ], + "spans": [ + { + "bbox": [ + 59, + 682, + 287, + 694 + ], + "type": "text", + "content": "Meanwhile, diffusion models [18, 52] have achieved ap" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 308, + 240, + 545, + 324 + ], + "blocks": [ + { + "bbox": [ + 308, + 240, + 545, + 324 + ], + "lines": [ + { + "bbox": [ + 308, + 240, + 545, + 324 + ], + "spans": [ + { + "bbox": [ + 308, + 240, + 545, + 324 + ], + "type": "image", + "image_path": "008ea54ca7496cefe0d23c976582eb6ff19287170f86f620cff90d37b7f76d12.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 337, + 546, + 416 + ], + "lines": [ + { + "bbox": [ + 304, + 337, + 546, + 416 + ], + "spans": [ + { + "bbox": [ + 304, + 337, + 546, + 416 + ], + "type": "text", + "content": "Figure 1. Overview of our proposed 6D-Diff framework. As shown, given the 3D keypoints from the object 3D CAD model, we aim to detect the corresponding 2D keypoints in the image to obtain the 6D object pose. Note that when detecting keypoints, there are often challenges such as occlusions (including self-occlusions) and cluttered backgrounds that can introduce noise and indeterminacy into the process, impacting the accuracy of pose prediction." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 426, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 426, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 426, + 547, + 715 + ], + "type": "text", + "content": "pealing results in various generation tasks such as image synthesis [7, 18] and image editing [41]. Specifically, diffusion models are able to recover high-quality determinate samples (e.g., clean images) from a noisy and indeterminate input data distribution (e.g., random noise) via a step-by-step denoising process [18, 52]. Motivated by such a strong denoising capability [11, 12, 18], we aim to leverage diffusion models to handle the RGB-based 6D object pose estimation task, since this task also involves tackling noise and indeterminacy. However, it can be difficult to directly use diffusion models to estimate the object pose, because diffusion models often start denoising from random Gaussian noise [18, 52]. Meanwhile, in RGB-based 6D object pose estimation, the object pose is often extracted from an intermediate representation, such as keypoint heatmaps [5], pixel-wise voting vectors [44], or object surface keypoint features [4]. Such an intermediate representation encodes useful distribution priors about the object pose. Thus starting denoising from such an representation shall effectively assist the diffusion model in recovering accurate object poses [11]. To achieve this, we propose a novel diffusion-based object pose estimation framework (6D-Diff) that can exploit prior distribution knowledge from the intermediate representation for better performance." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 702, + 212, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 702, + 212, + 713 + ], + "spans": [ + { + "bbox": [ + 61, + 702, + 212, + 713 + ], + "type": "text", + "content": "† Equal contribution; ‡ Corresponding author" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "9676" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": "Overall, our framework is a correspondence-based framework, in which to predict an object pose, given the 3D keypoints pre-selected from the object 3D CAD model, we first predict the coordinates of the 2D image keypoints corresponding to the pre-selected 3D keypoints. We then use the 3D keypoints together with the predicted 2D keypoints coordinates to compute the 6D object pose using a Perspective-n-Point (PnP) solver [10, 31]. As shown in Fig. 1, to predict the 2D keypoints coordinates, we first extract an intermediate representation (the 2D keypoints heatmaps) through a keypoints distribution initializer. As discussed before, due to various factors, there often exists noise and indeterminacy in the keypoints detection process and the extracted heatmaps can be noisy as shown in Fig. 2. Thus we pass the distribution modeled from these keypoints heatmaps into a diffusion model to perform the denoising process to obtain the final keypoints coordinates prediction." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 279, + 289, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 279, + 289, + 495 + ], + "spans": [ + { + "bbox": [ + 47, + 279, + 289, + 495 + ], + "type": "text", + "content": "Analogous to non-equilibrium thermodynamics [50], given a 2D image keypoint, we can consider all its possible locations in the image as particles in thermodynamics. Under low indeterminacy, the particles (possible locations) w.r.t. each 2D keypoint gather, and each keypoint can be determinately and accurately localized. In contrast, under high indeterminacy, these particles can stochastically spread over the input image, and it is difficult to localize each keypoint. The process of converting particles from low indeterminacy to high indeterminacy is called the forward process of the diffusion model. The goal of the diffusion model is to reverse the above forward process (through a reverse process), i.e., converting the particles from high indeterminacy to low indeterminacy. Here in our case, we aim to convert the indeterminate keypoints coordinates distribution modeled from the heatmaps into the determinate distribution. Below we briefly introduce the forward process and the reverse process in our diffusion model." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 498, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 498, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 498, + 289, + 715 + ], + "type": "text", + "content": "In the forward process, we aim to generate supervision signals that will be used to optimize the diffusion model during the reverse process. Specifically, given a set of pre-selected 3D keypoints, we first acquire ground-truth coordinates of their corresponding 2D keypoints using the ground-truth object pose. Then these determinate ground-truth 2D coordinates are gradually diffused towards the indeterminate distribution modeled from the intermediate representation, and the distributions generated along the way will be used as supervision signals. Note that, as the distribution modeled from the intermediate representation can be complex and irregular, it is difficult to characterize such a distribution via the Gaussian distribution. This means that simply applying diffusion models in most existing generation works [7, 18, 52], which start denoising from the random Gaussian noise, can introduce potentially large errors. To tackle this challenge, we draw inspiration from the fact that the Mixture of Cauchy (MoC) model can effectively char" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 331, + 72, + 520, + 193 + ], + "blocks": [ + { + "bbox": [ + 331, + 72, + 520, + 193 + ], + "lines": [ + { + "bbox": [ + 331, + 72, + 520, + 193 + ], + "spans": [ + { + "bbox": [ + 331, + 72, + 520, + 193 + ], + "type": "table", + "html": "
3D CAD modelImageHeatmap
(a)
(b)
", + "image_path": "2e07f0c6a7b355d4d6990059a2f244260c20f27c9a178a6394542083d017d832.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 194, + 547, + 292 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 194, + 547, + 292 + ], + "spans": [ + { + "bbox": [ + 304, + 194, + 547, + 292 + ], + "type": "text", + "content": "Figure 2. Above we show two examples of keypoint heatmaps, which serve as the intermediate representation [4, 5, 44] in our framework. The red dots indicate the ground-truth locations of the keypoints. In the example (a), the target object is the pink cat, which is heavily occluded in the image and is shown in a different pose compared to the 3D model. As shown above, due to occlusions and cluttered backgrounds, the keypoint heatmaps are noisy, which reflects the noise and indeterminacy during the keypoints detection process." + } + ] + } + ], + "index": 4, + "type": "text" + }, + { + "bbox": [ + 304, + 313, + 547, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 313, + 547, + 411 + ], + "spans": [ + { + "bbox": [ + 304, + 313, + 547, + 411 + ], + "type": "text", + "content": "acterize complex and intractable distributions. Moreover, the MoC model is robust to potential outliers in the distribution to be characterized [26]. Thus we propose to model the intermediate representation using a MoC distribution instead of simply treating it as a random Gaussian noise. In this way, we gradually diffuse the determinate distribution (ground truth) of keypoints coordinates towards the modeled MoC distribution during the forward process." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 418, + 548, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 418, + 548, + 598 + ], + "spans": [ + { + "bbox": [ + 304, + 418, + 548, + 598 + ], + "type": "text", + "content": "Correspondingly, in the reverse process, starting from the MoC distribution modeled in the forward process, we aim to learn to recover the ground-truth keypoints coordinates. To achieve this, we leverage the distributions generated step-by-step during the forward process as the supervision signals to train the diffusion model to learn the reverse process. In this way, the diffusion model can learn to convert the indeterminate MoC distribution of keypoints coordinates into a determinate one smoothly and effectively. After the reverse process, the 2D keypoints coordinates obtained from the final determinate distribution are used to compute the 6D object pose with the pre-selected 3D keypoints. Moreover, we further facilitate the model learning of such a reverse process by injecting object appearance features as context information." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 605, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 548, + 715 + ], + "type": "text", + "content": "Our work makes the following contributions. 1) We propose a novel 6D-Diff framework, in which we formulate keypoints detection for 6D object pose estimation as a reverse diffusion process to effectively eliminate the noise and indeterminacy in object pose estimation. 2) To take advantage of the intermediate representation that encodes useful prior distribution knowledge for handling this task, we propose a novel MoC-based diffusion process. Besides, we facilitate the model learning by utilizing object features." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "9677" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 134, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 134, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 134, + 83 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 97, + 289, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 97, + 289, + 217 + ], + "spans": [ + { + "bbox": [ + 46, + 97, + 289, + 217 + ], + "type": "text", + "content": "RGB-based 6D Object Pose Estimation has received a lot of attention [4, 13-16, 23, 32, 33, 36, 38, 43, 44, 46, 53, 54, 56, 63-67]. Some works [22, 27, 61, 63] proposed to directly regress object poses. However, the non-linearity of the rotation space makes direct regression of object poses difficult [32]. Compared to this type of direct methods, correspondence-based methods [5, 19, 43, 44, 46, 53, 56] often demonstrate better performance, which estimate 6D object poses via learning 2D-3D correspondences between the observed image and the object 3D model." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 223, + 289, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 223, + 289, + 426 + ], + "spans": [ + { + "bbox": [ + 46, + 223, + 289, + 426 + ], + "type": "text", + "content": "Among correspondence-based methods, several works [42, 44, 46, 48, 56] aim to predict the 2D keypoints coordinates corresponding to specific 3D keypoints. BB8 [46] proposed to detect the 2D keypoints corresponding to the 8 corners of the object's 3D bounding box. Later, PVNet [44] achieved better performance by estimating 2D keypoints for sampled points on the surface of the object 3D model via pixel-wise voting. Moreover, various methods [19, 43, 53, 61, 67] establish 2D-3D correspondences by localizing the 3D model point corresponding to each observed object pixel. Among these methods, DPOD [67] explored the use of UV texture maps to facilitate model training, and ZebraPose [53] proposed to encode the surface of the object 3D model efficiently through a hierarchical binary grouping. Besides, several pose refinement methods [23, 33, 38, 64] have been proposed, which conducted pose refinement given an initial pose estimation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 433, + 289, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 433, + 289, + 529 + ], + "spans": [ + { + "bbox": [ + 46, + 433, + 289, + 529 + ], + "type": "text", + "content": "In this paper, we also regard object pose estimation as a 2D-3D correspondence estimation problem. Different from previous works, here by formulating 2D-3D correspondence estimation as a distribution transformation process (denoising process), we propose a new framework (6D-Diff) that trains a diffusion model to perform progressive denoising from an indeterminate keypoints distribution to the desired keypoints distribution with low indeterminacy." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 533, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 289, + 714 + ], + "type": "text", + "content": "Diffusion Models [7, 9, 18, 50, 52] are originally introduced for image synthesis. Showing appealing generation capabilities, diffusion models have also been explored in various other tasks [11, 12, 20, 25, 30, 37, 41, 58], such as image editing [41] and image inpainting [37]. Here we explore a new framework that tackles object pose estimation with a diffusion model. Different from previous generation works [7, 37, 41] that start denoising from random noise, to aid the denoising process for 6D object pose estimation, we design a novel MoC-based diffusion mechanism that enables the diffusion model to start denoising from a distribution containing useful prior distribution knowledge regarding the object pose. Moreover, we condition the denoising process on the object appearance features, to further guide the diffusion model to obtain accurate predictions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 71, + 362, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 71, + 362, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 362, + 84 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 91, + 547, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 547, + 200 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 547, + 200 + ], + "type": "text", + "content": "To handle the noise and indeterminacy in RGB-based 6D object pose estimation, inspired by [11], from a novel perspective of distribution transformation with progressive denoising, we propose a framework (6D-Diff) that represents a new brand of diffusion-based solution for 6D object pose estimation. Below we first revisit diffusion models in Sec. 3.1. Then we discuss our proposed framework in Sec. 3.2, and introduce its training and testing scheme in Sec. 3.3. We finally detail the model architecture in Sec. 3.4." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 206, + 459, + 219 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 206, + 459, + 219 + ], + "spans": [ + { + "bbox": [ + 305, + 206, + 459, + 219 + ], + "type": "text", + "content": "3.1. Revisiting Diffusion Models" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "spans": [ + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "text", + "content": "The diffusion model [18, 52], which is a kind of probabilistic generative model, consists of two parts, namely the forward process and the reverse process. Specifically, given an original sample " + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "text", + "content": " (e.g., a clean image), the process of diffusing the sample " + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "text", + "content": " iteratively towards the noise (typically Gaussian noise) " + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "inline_equation", + "content": "d_K \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})" + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "inline_equation", + "content": "d_0 \\to d_1 \\to \\ldots \\to d_K" + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "text", + "content": ") is called the forward process. In contrast, the process of denoising the noise " + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "inline_equation", + "content": "d_K" + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "text", + "content": " iteratively towards the sample " + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "inline_equation", + "content": "d_K \\to d_{K-1} \\to \\ldots \\to d_0" + }, + { + "bbox": [ + 304, + 224, + 547, + 344 + ], + "type": "text", + "content": ") is called the reverse process. Each process is defined as a Markov chain." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 344, + 547, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 344, + 547, + 429 + ], + "spans": [ + { + "bbox": [ + 304, + 344, + 547, + 429 + ], + "type": "text", + "content": "Forward Process. To obtain supervision signals for training the diffusion model to learn to perform the reverse process in a stepwise manner, we need to acquire the intermediate step results " + }, + { + "bbox": [ + 304, + 344, + 547, + 429 + ], + "type": "inline_equation", + "content": "\\{d_k\\}_{k=1}^{K-1}" + }, + { + "bbox": [ + 304, + 344, + 547, + 429 + ], + "type": "text", + "content": ". Thus the forward process is first performed to generate these intermediate step results for training purpose. Specifically, the posterior distribution " + }, + { + "bbox": [ + 304, + 344, + 547, + 429 + ], + "type": "inline_equation", + "content": "q(d_{1:K}|d_0)" + }, + { + "bbox": [ + 304, + 344, + 547, + 429 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 304, + 344, + 547, + 429 + ], + "type": "inline_equation", + "content": "d_1" + }, + { + "bbox": [ + 304, + 344, + 547, + 429 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 344, + 547, + 429 + ], + "type": "inline_equation", + "content": "d_K" + }, + { + "bbox": [ + 304, + 344, + 547, + 429 + ], + "type": "text", + "content": " is formulated as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 339, + 430, + 545, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 430, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 339, + 430, + 545, + 464 + ], + "type": "interline_equation", + "content": "q \\left(d _ {1: K} \\mid d _ {0}\\right) = \\prod_ {k = 1} ^ {K} q \\left(d _ {k} \\mid d _ {k - 1}\\right) \\tag {1}", + "image_path": "cfe1fd6c8b1415ceacd888aff386c4d446cedf81e021c86b6abf3dee65fc8189.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 340, + 466, + 512, + 481 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 466, + 512, + 481 + ], + "spans": [ + { + "bbox": [ + 340, + 466, + 512, + 481 + ], + "type": "interline_equation", + "content": "q \\left(d _ {k} \\mid d _ {k - 1}\\right) = \\mathcal {N} \\left(d _ {k}; \\sqrt {1 - \\beta_ {k}} d _ {k - 1}, \\beta_ {k} \\mathbf {I}\\right)", + "image_path": "6faeb704f73ab03104ff7fc2b7524b51f26bbcd7fa33eb6588875b77f20b1f6c.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 483, + 547, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 547, + 530 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 547, + 530 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 483, + 547, + 530 + ], + "type": "inline_equation", + "content": "\\{\\beta_{k}\\in (0,1)\\}_{k = 1}^{K}" + }, + { + "bbox": [ + 304, + 483, + 547, + 530 + ], + "type": "text", + "content": " denotes a set of fixed variance controllers that control the scale of the injected noise at different steps. According to Eq. (1), we can derive " + }, + { + "bbox": [ + 304, + 483, + 547, + 530 + ], + "type": "inline_equation", + "content": "q(d_k|d_0)" + }, + { + "bbox": [ + 304, + 483, + 547, + 530 + ], + "type": "text", + "content": " in closed form as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 341, + 533, + 545, + 548 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 533, + 545, + 548 + ], + "spans": [ + { + "bbox": [ + 341, + 533, + 545, + 548 + ], + "type": "interline_equation", + "content": "q \\left(d _ {k} \\mid d _ {0}\\right) = \\mathcal {N} \\left(d _ {k}; \\sqrt {\\bar {\\alpha} _ {k}} d _ {0}, (1 - \\bar {\\alpha} _ {k}) \\mathbf {I}\\right) \\tag {2}", + "image_path": "1eceb907037a796224d789f2b9181c68cb7d51f379ffa16d1e9b5a2addedd4f8.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 551, + 545, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 551, + 545, + 576 + ], + "spans": [ + { + "bbox": [ + 304, + 551, + 545, + 576 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 551, + 545, + 576 + ], + "type": "inline_equation", + "content": "\\alpha_{k} = 1 - \\beta_{k}" + }, + { + "bbox": [ + 304, + 551, + 545, + 576 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 551, + 545, + 576 + ], + "type": "inline_equation", + "content": "\\overline{\\alpha}_k = \\prod_{s = 1}^k\\alpha_s" + }, + { + "bbox": [ + 304, + 551, + 545, + 576 + ], + "type": "text", + "content": " . Based on Eq. (2), " + }, + { + "bbox": [ + 304, + 551, + 545, + 576 + ], + "type": "inline_equation", + "content": "d_{k}" + }, + { + "bbox": [ + 304, + 551, + 545, + 576 + ], + "type": "text", + "content": " can be further expressed as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 366, + 578, + 545, + 591 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 366, + 578, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 366, + 578, + 545, + 591 + ], + "type": "interline_equation", + "content": "d _ {k} = \\sqrt {\\bar {\\alpha} _ {k}} d _ {0} + \\sqrt {1 - \\bar {\\alpha} _ {k}} \\epsilon \\tag {3}", + "image_path": "69f0abb3faf06e976df8a504a9218e3b6b214d197a3ec8dd6c8ff9956c8135aa.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "inline_equation", + "content": "\\epsilon \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})" + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "text", + "content": ". From Eq. (3), we can observe that when the number of diffusion steps " + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "text", + "content": " is sufficiently large and " + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "inline_equation", + "content": "\\overline{\\alpha}_K" + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "text", + "content": " correspondingly decreases to nearly zero, the distribution of " + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "inline_equation", + "content": "d_K" + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "text", + "content": " is approximately a standard Gaussian distribution, i.e., " + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "inline_equation", + "content": "d_K \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})" + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "text", + "content": ". This means " + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "text", + "content": " is gradually corrupted into Gaussian noise, which conforms to the nonequilibrium thermodynamics phenomenon of the diffusion process [50]." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 689, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 547, + 715 + ], + "type": "text", + "content": "Reverse Process. With the intermediate step results " + }, + { + "bbox": [ + 306, + 689, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\{d_k\\}_{k=1}^{K-1}" + }, + { + "bbox": [ + 306, + 689, + 547, + 715 + ], + "type": "text", + "content": " acquired in the forward process, the diffusion" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9678" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "type": "text", + "content": "model is trained to learn to perform the reverse process. Specifically, in the reverse process, each step can be formulated as a function " + }, + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "type": "text", + "content": " that takes " + }, + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "type": "inline_equation", + "content": "d_{k}" + }, + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "type": "text", + "content": " and the diffusion model " + }, + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "type": "inline_equation", + "content": "M_{diff}" + }, + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "type": "text", + "content": " as inputs and generate " + }, + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "type": "inline_equation", + "content": "d_{k-1}" + }, + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "type": "text", + "content": " as the output, i.e., " + }, + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "type": "inline_equation", + "content": "d_{k-1} = f(d_{k}, M_{diff})" + }, + { + "bbox": [ + 46, + 72, + 286, + 133 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 133, + 286, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 133, + 286, + 192 + ], + "spans": [ + { + "bbox": [ + 46, + 133, + 286, + 192 + ], + "type": "text", + "content": "After training the diffusion model, during inference, we do not need to conduct the forward process. Instead, we only conduct the reverse process, which converts a random Gaussian noise " + }, + { + "bbox": [ + 46, + 133, + 286, + 192 + ], + "type": "inline_equation", + "content": "d_{K} \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})" + }, + { + "bbox": [ + 46, + 133, + 286, + 192 + ], + "type": "text", + "content": " into a sample " + }, + { + "bbox": [ + 46, + 133, + 286, + 192 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 46, + 133, + 286, + 192 + ], + "type": "text", + "content": " of the desired distribution using the trained diffusion model." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 201, + 171, + 215 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 201, + 171, + 215 + ], + "spans": [ + { + "bbox": [ + 47, + 201, + 171, + 215 + ], + "type": "text", + "content": "3.2. Proposed Framework" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 221, + 286, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 221, + 286, + 304 + ], + "spans": [ + { + "bbox": [ + 46, + 221, + 286, + 304 + ], + "type": "text", + "content": "Similar to previous works [21, 44, 53], our framework predicts 6D object poses via a two-stage pipeline. Specifically, (i) we first select " + }, + { + "bbox": [ + 46, + 221, + 286, + 304 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 221, + 286, + 304 + ], + "type": "text", + "content": " 3D keypoints on the object CAD model and detect the corresponding " + }, + { + "bbox": [ + 46, + 221, + 286, + 304 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 221, + 286, + 304 + ], + "type": "text", + "content": " 2D keypoints in the image; (ii) we then compute the 6D pose using a PnP solver. Here we mainly focus on the first stage and aim to produce more accurate keypoint detection results." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 305, + 286, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 305, + 286, + 437 + ], + "spans": [ + { + "bbox": [ + 46, + 305, + 286, + 437 + ], + "type": "text", + "content": "When detecting 2D keypoints, factors like occlusions and cluttered backgrounds can bring noise and indeterminacy into this process, and affect the accuracy of detection results [21, 44]. To handle this problem, inspired by that diffusion models can iteratively reduce indeterminacy and noise in the initial distribution (e.g., standard Gaussian distribution) to generate determinate and high-quality samples of the desired distribution [11, 12], we formulate keypoints detection as generating a determinate distribution of keypoints coordinates " + }, + { + "bbox": [ + 46, + 305, + 286, + 437 + ], + "type": "inline_equation", + "content": "(D_0)" + }, + { + "bbox": [ + 46, + 305, + 286, + 437 + ], + "type": "text", + "content": " from an indeterminate initial distribution " + }, + { + "bbox": [ + 46, + 305, + 286, + 437 + ], + "type": "inline_equation", + "content": "(D_K)" + }, + { + "bbox": [ + 46, + 305, + 286, + 437 + ], + "type": "text", + "content": " via a diffusion model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 437, + 286, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 437, + 286, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 437, + 286, + 652 + ], + "type": "text", + "content": "Moreover, to effectively adapt to the 6D object pose estimation task, the diffusion model in our framework does not start the reverse process from the common initial distribution (i.e., the standard Gaussian distribution) as in most existing diffusion works [7, 18, 52]. Instead, inspired by recent 6D object pose estimation works [4, 5, 61], we first extract an intermediate representation (e.g., heatmaps), and use this representation to initialize a keypoints coordinates distribution (i.e., " + }, + { + "bbox": [ + 46, + 437, + 286, + 652 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 46, + 437, + 286, + 652 + ], + "type": "text", + "content": "), which will serve as the starting point of the reverse process. Such an intermediate representation encodes useful prior distribution information about keypoints coordinates. Thus by starting the reverse process from this representation, we effectively exploit the distribution priors in the representation to aid the diffusion model in recovering accurate keypoints coordinates [11]. Below, we first describe how we initialize the keypoints distribution " + }, + { + "bbox": [ + 46, + 437, + 286, + 652 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 46, + 437, + 286, + 652 + ], + "type": "text", + "content": ", and then discuss the corresponding forward and reverse processes in our new framework." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 653, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 286, + 713 + ], + "type": "text", + "content": "Keypoints Distribution Initialization. We initialize the keypoints coordinates distribution " + }, + { + "bbox": [ + 46, + 653, + 286, + 713 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 46, + 653, + 286, + 713 + ], + "type": "text", + "content": " with extracted heatmaps. Specifically, similar to [29, 34, 53], we first use an off-the-shelf object detector (e.g., Faster RCNN [49]) to detect the bounding box of the target object, and then crop" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "content": "the detected Region of Interest (ROI) from the input image. We send the ROI into a sub-network (i.e., the keypoints distribution initializer) to predict a number of heatmaps where each heatmap corresponds to one 2D keypoint. We then normalize each heatmap to convert it to a probability distribution. In this way, each normalized heatmap naturally represents the distribution of the corresponding keypoint coordinates, and thus we can use these heatmaps to initialize " + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "text", + "content": "Forward Process. After distribution initialization, the next step is to iteratively reduce the noise and indeterminacy in the initialized distribution " + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "text", + "content": " by performing the reverse process " + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "inline_equation", + "content": "(D_K \\to D_{K-1} \\to \\ldots \\to D_0)" + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "text", + "content": ". To train the diffusion model to perform such a reverse process, we need to obtain the distributions generated along the way (i.e., " + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "inline_equation", + "content": "\\{D_k\\}_{k=1}^{K-1}" + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "text", + "content": ") as the supervision signals. Thus, we first need to conduct the forward process to obtain samples from " + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "inline_equation", + "content": "\\{D_k\\}_{k=1}^{K-1}" + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "text", + "content": ". Specifically, given the ground-truth keypoints coordinates distribution " + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "inline_equation", + "content": "D_0" + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "text", + "content": ", we define the forward process as: " + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "inline_equation", + "content": "D_0 \\to D_1 \\to \\ldots \\to D_K" + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "text", + "content": " is the number of diffusion steps. In this forward process, we iteratively add noise to the determinate distribution " + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "inline_equation", + "content": "D_0" + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "text", + "content": ", i.e., increasing the indeterminacy of generated distributions, to transform it into the initialized distribution " + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "text", + "content": " with indeterminacy. Via this process, we can generate " + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "inline_equation", + "content": "\\{D_k\\}_{k=1}^{K-1}" + }, + { + "bbox": [ + 304, + 182, + 545, + 396 + ], + "type": "text", + "content": " along the way and use them as supervision signals to train the diffusion model to perform the reverse process." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "spans": [ + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "type": "text", + "content": "However, in our framework, we do not aim to transform the ground-truth keypoints coordinates distribution " + }, + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "type": "inline_equation", + "content": "D_0" + }, + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "type": "text", + "content": " towards a standard Gaussian distribution via the forward process, because our initialized distribution " + }, + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "type": "text", + "content": " is not a random noise. Instead, as discussed before, " + }, + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "type": "text", + "content": " is initialized with heatmaps (as shown in Fig. 3), since the heatmaps can provide rough estimations about the keypoints coordinates distribution. To effectively utilize such priors in " + }, + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "type": "text", + "content": " to facilitate the reverse process, we aim to enable the diffusion model to start the reverse process (denoising process) from " + }, + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 399, + 545, + 577 + ], + "type": "text", + "content": " instead of random Gaussian noise [11]. Thus, the basic forward process (described in Sec. 3.1) in existing generative diffusion models is not suitable in our framework, which motivates us to design a new forward process for our task." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 580, + 545, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 580, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 580, + 545, + 700 + ], + "type": "text", + "content": "However, it is non-trivial to design such a forward process, as the initialized distribution " + }, + { + "bbox": [ + 304, + 580, + 545, + 700 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 580, + 545, + 700 + ], + "type": "text", + "content": " is based on extracted heatmaps, and thus " + }, + { + "bbox": [ + 304, + 580, + 545, + 700 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 580, + 545, + 700 + ], + "type": "text", + "content": " can be complex and irregular, as shown in Fig. 4. Hence modeling " + }, + { + "bbox": [ + 304, + 580, + 545, + 700 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 580, + 545, + 700 + ], + "type": "text", + "content": " as a Gaussian distribution can result in potentially large errors. To handle this challenge, motivated by that the Mixture of Cauchy (MoC) model can effectively and reliably characterize complex and intractable distributions [26], we leverage MoC to characterize " + }, + { + "bbox": [ + 304, + 580, + 545, + 700 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 580, + 545, + 700 + ], + "type": "text", + "content": ". Based on the characterized distribution, we can then perform a corresponding MoC-based forward process." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "content": "Specifically, we denote the number of Cauchy kernels" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9679" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 77, + 75, + 521, + 222 + ], + "blocks": [ + { + "bbox": [ + 77, + 75, + 521, + 222 + ], + "lines": [ + { + "bbox": [ + 77, + 75, + 521, + 222 + ], + "spans": [ + { + "bbox": [ + 77, + 75, + 521, + 222 + ], + "type": "image", + "image_path": "437c5b1d10ef8e5b67324541356b9f8aca372444fd4742e3719751b343fb62ea.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "lines": [ + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "spans": [ + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "text", + "content": "Figure 3. Illustration of our framework. During testing, given an input image, we first crop the Region of Interest (ROI) from the image through an object detector. After that, we feed the cropped ROI to the keypoints distribution initializer to obtain the heatmaps that can provide useful distribution priors about keypoints, to initialize " + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "text", + "content": ". Meanwhile, we can obtain object appearance features " + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "text", + "content": ". Next, we pass " + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "text", + "content": " into the encoder, and the output of the encoder will serve as conditional information to aid the reverse process in the decoder. We sample " + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "text", + "content": " sets of 2D keypoints coordinates from " + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "text", + "content": ", and feed these " + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "text", + "content": " sets of coordinates into the decoder to perform the reverse process iteratively together with the step embedding " + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "inline_equation", + "content": "f_D^k" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "text", + "content": ". At the final reverse step (" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "text", + "content": "-th step), we average " + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "inline_equation", + "content": "\\{d_0^i\\}_{i=1}^M" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "text", + "content": " as the final keypoints coordinates prediction " + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "text", + "content": ", and use " + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 46, + 232, + 547, + 312 + ], + "type": "text", + "content": " to compute the 6D pose with the pre-selected 3D keypoints via a PnP solver." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 316, + 288, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 316, + 288, + 362 + ], + "spans": [ + { + "bbox": [ + 46, + 316, + 288, + 362 + ], + "type": "text", + "content": "in the MoC distribution as " + }, + { + "bbox": [ + 46, + 316, + 288, + 362 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 46, + 316, + 288, + 362 + ], + "type": "text", + "content": ", and use the Expectation-Maximum-type (EM) algorithm [26, 55] to optimize the MoC parameters " + }, + { + "bbox": [ + 46, + 316, + 288, + 362 + ], + "type": "inline_equation", + "content": "\\eta^{\\mathrm{MoC}}" + }, + { + "bbox": [ + 46, + 316, + 288, + 362 + ], + "type": "text", + "content": " to characterize the distribution " + }, + { + "bbox": [ + 46, + 316, + 288, + 362 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 46, + 316, + 288, + 362 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 72, + 363, + 287, + 396 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 363, + 287, + 396 + ], + "spans": [ + { + "bbox": [ + 72, + 363, + 287, + 396 + ], + "type": "interline_equation", + "content": "\\eta_ {*} ^ {\\mathrm {M o C}} = \\operatorname {E M} \\left(\\prod_ {v = 1} ^ {V} \\sum_ {u = 1} ^ {U} \\pi_ {u} \\operatorname {C a u c h y} \\left(d _ {K} ^ {v} \\mid \\mu_ {u}, \\gamma_ {u}\\right)\\right) \\tag {4}", + "image_path": "8e91156fd789fcb5921bc9029ff9892706d82fd6440c755f81cebbdb49d42090.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "spans": [ + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "\\{d_K^v\\}_{v = 1}^V" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": " denotes " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": " sets of keypoints coordinates sampled from the distribution " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "D_{K}" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": ". Note each set of keypoints coordinates " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "d_K^v" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": " contains all the " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": " keypoints coordinates (i.e., " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "d_K^v\\in \\mathbb{R}^{N\\times 2}" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": "). " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "\\pi_u" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": " denotes the weight of the " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": "-th Cauchy kernel (" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "\\sum_{u = 1}^{U}\\pi_{u} = 1" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": "), and " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "\\eta^{\\mathrm{MoC}} = \\{\\mu_1,\\gamma_1,\\dots,\\mu_U,\\gamma_U\\}" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": " denotes the MoC parameters in which " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "\\mu_{u}" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "\\gamma_{u}" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": " are the location and scale of the " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": "-th Cauchy kernel. Via the above optimization, we can use the optimized parameters " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "\\eta_*^{\\mathrm{MoC}}" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": " to model " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": " as the characterized distribution " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "(\\hat{D}_K)" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": ". Given " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "\\hat{D}_K" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": ", we aim to conduct the forward process from the ground-truth keypoints coordinates distribution " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "D_0" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": ", so that after " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": " steps of forward diffusion, the generated distribution reaches " + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "inline_equation", + "content": "\\hat{D}_K" + }, + { + "bbox": [ + 46, + 398, + 289, + 569 + ], + "type": "text", + "content": ". To this end, we modify Eq. (3) as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 576, + 287, + 592 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 576, + 287, + 592 + ], + "spans": [ + { + "bbox": [ + 55, + 576, + 287, + 592 + ], + "type": "interline_equation", + "content": "\\hat {d} _ {k} = \\sqrt {\\overline {{\\alpha}} _ {k}} d _ {0} + (1 - \\sqrt {\\overline {{\\alpha}} _ {k}}) \\mu^ {\\mathrm {M o C}} + \\sqrt {1 - \\overline {{\\alpha}} _ {k}} \\epsilon^ {\\mathrm {M o C}} \\qquad (5)", + "image_path": "c85b618d9c7710b1a31e4a413b1022ed09943998357f3aff69fd5f492b62a509.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "inline_equation", + "content": "\\hat{d}_k\\in \\mathbb{R}^{N\\times 2}" + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "text", + "content": " represents a sample (i.e., a set of " + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "text", + "content": " keypoints coordinates) from the generated distribution " + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "inline_equation", + "content": "\\tilde{D}_k" + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "inline_equation", + "content": "\\mu^{\\mathrm{MoC}} = \\sum_{u = 1}^{U}\\mathbb{1}_{u}\\mu_{u}" + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "inline_equation", + "content": "\\epsilon^{\\mathrm{MoC}}\\sim" + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "text", + "content": " Cauchy(0, " + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "inline_equation", + "content": "\\sum_{u = 1}^{U}(\\mathbb{1}_{u}\\gamma_{u})" + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "text", + "content": "). Note that " + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "inline_equation", + "content": "\\mathbb{1}_u" + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "text", + "content": " is a zero-one indicator and " + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "inline_equation", + "content": "\\sum_{u = 1}^{U}\\mathbb{1}_u = 1" + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "inline_equation", + "content": "\\operatorname {Prob}(\\mathbb{1}_u = 1) = \\pi_u" + }, + { + "bbox": [ + 46, + 601, + 287, + 666 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "type": "text", + "content": "From Eq. (5), we can observe that when " + }, + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "type": "text", + "content": " is sufficiently large and " + }, + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\overline{\\alpha}_K" + }, + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "type": "text", + "content": " correspondingly decreases to nearly zero, the distribution of " + }, + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\hat{d}_K" + }, + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "type": "text", + "content": " reaches the MoC distribution, i.e., " + }, + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\hat{d}_K = \\mu^{\\mathrm{MoC}} + \\epsilon^{\\mathrm{MoC}}\\sim \\mathrm{Cauchy}(\\sum_{u = 1}^{U}(\\mathbb{1}_{u}\\mu_{u}),\\sum_{u = 1}^{U}(\\mathbb{1}_{u}\\gamma_{u}))" + }, + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 316, + 547, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 547, + 399 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 547, + 399 + ], + "type": "text", + "content": "ter the above MoC-based forward process, we can use the generated " + }, + { + "bbox": [ + 304, + 316, + 547, + 399 + ], + "type": "inline_equation", + "content": "\\{\\hat{D}_k\\}_{k=1}^{K-1}" + }, + { + "bbox": [ + 304, + 316, + 547, + 399 + ], + "type": "text", + "content": " as supervision signals to train the diffusion model " + }, + { + "bbox": [ + 304, + 316, + 547, + 399 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 304, + 316, + 547, + 399 + ], + "type": "text", + "content": " to learn the reverse process. More details about Eq. (5) can be found in Supplementary material. Such a forward process is only conducted to generate supervision signals for training the diffusion model, while we only need to conduct the reverse process during testing." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "spans": [ + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "text", + "content": "Reverse Process. In the reverse process, we aim to recover a desired determinate keypoints distribution " + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "inline_equation", + "content": "D_0" + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "text", + "content": " from the initial distribution " + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "text", + "content": ". As discussed above, we characterize " + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "text", + "content": " via a MoC model and then generate " + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "inline_equation", + "content": "\\{\\hat{D}_k\\}_{k=1}^{K-1}" + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "text", + "content": " as supervision signals to optimize the diffusion model to learn to perform the reverse process " + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "inline_equation", + "content": "(\\hat{D}_K \\to \\hat{D}_{K-1} \\to \\dots \\to D_0)" + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "text", + "content": ", in which the model iteratively reduces the noise and indeterminacy in " + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "inline_equation", + "content": "\\hat{D}_K" + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "text", + "content": " to generate " + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "inline_equation", + "content": "D_0" + }, + { + "bbox": [ + 304, + 400, + 547, + 496 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "text", + "content": "However, it can still be difficult to generate " + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "inline_equation", + "content": "D_0" + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "text", + "content": " by directly performing the reverse process from " + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "inline_equation", + "content": "\\hat{D}_K" + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "text", + "content": ", because the object appearance features are lacking in " + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "inline_equation", + "content": "\\hat{D}_K" + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "text", + "content": ". Such features can help constrain the model reverse process based on the input image to get accurate predictions. Thus we further leverage the appearance features from the image as context to guide " + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "text", + "content": " in the reverse process. Specifically, we reuse the features extracted from the keypoints distribution initializer as the appearance features " + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "text", + "content": " and feed " + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 304, + 497, + 548, + 617 + ], + "type": "text", + "content": " into the diffusion model, as shown in Fig. 3." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "text", + "content": "Our reverse process aims to generate a determinate distribution " + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "inline_equation", + "content": "D_0" + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "text", + "content": " from the indeterminate distribution " + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "inline_equation", + "content": "\\hat{D}_K" + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "text", + "content": " (during training) or " + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "text", + "content": " (during testing). Below we describe the reverse process during testing. We first obtain " + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "text", + "content": " from the input image. Then to help the diffusion model to learn to perform denoising at each reverse step, following [18, 52], we generate the unique step embedding " + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "inline_equation", + "content": "f_D^k" + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "text", + "content": " to inject the step number " + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "inline_equation", + "content": "(k)" + }, + { + "bbox": [ + 304, + 618, + 548, + 715 + ], + "type": "text", + "content": " information into the model. In this way, given a" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "9680" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": "set of noisy keypoints coordinates " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "d_{k}\\in \\mathbb{R}^{N\\times 2}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " drawn from " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "D_{k}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " at the " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "k^{th}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " step, we use diffusion model " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " conditioned on the step embedding " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "f_{D}^{k}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " and the object appearance features " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " to recover " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "d_{k - 1}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "d_{k}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 123, + 287, + 137 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 123, + 287, + 137 + ], + "spans": [ + { + "bbox": [ + 111, + 123, + 287, + 137 + ], + "type": "interline_equation", + "content": "d _ {k - 1} = M _ {\\text {d i f f}} \\left(d _ {k}, f _ {\\text {a p p}}, f _ {D} ^ {k}\\right) \\tag {6}", + "image_path": "7e6c25726eda337007a3825ab6e00f50a5563c09ca5f7ec358f728e73df22859.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 144, + 167, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 144, + 167, + 156 + ], + "spans": [ + { + "bbox": [ + 47, + 144, + 167, + 156 + ], + "type": "text", + "content": "3.3. Training and Testing" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 163, + 287, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 163, + 287, + 210 + ], + "spans": [ + { + "bbox": [ + 46, + 163, + 287, + 210 + ], + "type": "text", + "content": "Training. Following [44], we first select " + }, + { + "bbox": [ + 46, + 163, + 287, + 210 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 163, + 287, + 210 + ], + "type": "text", + "content": " 3D keypoints from the surface of the object CAD model using the farthest point sampling (FPS) algorithm. Then we conduct the training process in the following two stages." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "spans": [ + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "type": "text", + "content": "In the first stage, to initialize the distribution " + }, + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "type": "text", + "content": ", we optimize the keypoints distribution initializer. Specifically, for each training sample, given the pre-selected " + }, + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "type": "text", + "content": " 3D keypoints, we can obtain the ground-truth coordinates of the corresponding " + }, + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "type": "text", + "content": " 2D keypoints using the ground-truth 6D object pose. Then for each keypoints, based on the corresponding ground-truth coordinates, we generate a ground-truth heatmap following [42] for training the initializer. Thus for each training sample, we generate " + }, + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "type": "text", + "content": " ground-truth heatmaps. In this way, the loss function " + }, + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{init}}" + }, + { + "bbox": [ + 46, + 211, + 287, + 341 + ], + "type": "text", + "content": " for optimizing the initializer can be formulated as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 342, + 287, + 365 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 342, + 287, + 365 + ], + "spans": [ + { + "bbox": [ + 117, + 342, + 287, + 365 + ], + "type": "interline_equation", + "content": "L _ {\\text {i n i t}} = \\left\\| \\mathbf {H} _ {\\text {p r e d}} - \\mathbf {H} _ {\\mathrm {G T}} \\right\\| _ {2} ^ {2} \\tag {7}", + "image_path": "07f7eb9539835b7208d8873c7c931d5cb19fd5e0878a2568fefdbd37cc89523c.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 365, + 287, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 287, + 389 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 287, + 389 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 365, + 287, + 389 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_{\\mathrm{pred}}" + }, + { + "bbox": [ + 46, + 365, + 287, + 389 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 365, + 287, + 389 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_{\\mathrm{GT}}" + }, + { + "bbox": [ + 46, + 365, + 287, + 389 + ], + "type": "text", + "content": " denote the predicted heatmaps and ground-truth heatmaps, respectively." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "spans": [ + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": "In the second stage, we optimize the diffusion model " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": ". For each training sample, to optimize " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": ", we perform the following steps. (1) We first send the input image into an off-the-shelf object detector [57] and then feed the detected ROI into the trained initializer to obtain " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": " heatmaps. Meanwhile, we can also obtain " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": ". (2) We use the " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": " predicted heatmaps to initialize " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": ", and leverage the EM-type algorithm to characterize " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": " as a MoC distribution " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\hat{D}_K" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": ". (3) Based on " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\hat{D}_K" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": ", we use the ground-truth keypoints coordinates " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": " to directly generate " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": " sets of " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "(\\hat{d}_1, \\dots, \\hat{d}_K)" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\{\\hat{d}_1^i, \\dots, \\hat{d}_K^i\\}_{i=1}^M" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": ") via the forward process (Eq. (5)). (4) Then, we aim to optimize the diffusion model " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": " to recover " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\hat{d}_{k-1}^i" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\hat{d}_k^i" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": " iteratively. Following previous diffusion works [18, 52], we formulate the loss " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{diff}}" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": " for optimizing " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": " as follows " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "(\\hat{d}_0^i = d_0" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 389, + 287, + 569 + ], + "type": "text", + "content": "):" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 73, + 571, + 287, + 604 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 571, + 287, + 604 + ], + "spans": [ + { + "bbox": [ + 73, + 571, + 287, + 604 + ], + "type": "interline_equation", + "content": "L _ {\\text {d i f f}} = \\sum_ {i = 1} ^ {M} \\sum_ {k = 1} ^ {K} \\left\\| M _ {\\text {d i f f}} \\left(\\hat {d} _ {k} ^ {i}, f _ {\\text {a p p}}, f _ {D} ^ {k}\\right) - \\hat {d} _ {k - 1} ^ {i} \\right\\| _ {2} ^ {2} \\tag {8}", + "image_path": "bd513da553f2377e8ed89956bdd3e0a75d48cdd6051a4141d6c3213233643972.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": "Testing. During testing, for each testing sample, by feeding the input image to the object detector and the keypoints distribution initializer consecutively, we can initialize " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " and meanwhile obtain " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": ". Then, we perform the reverse process. During the reverse process, we sample " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " sets of noisy keypoints coordinates from " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\{d_K^i\\}_{i = 1}^M" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": ") and feed them into the trained diffusion model. Here we sample " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " sets of keypoints coordinates, because we are converting from a distribution " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "(D_K)" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " towards another distribution " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "(D_0)" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "Then the model iteratively performs the reverse steps. After " + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": " reverse diffusion steps, we obtain " + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": " sets of predicted keypoints coordinates (i.e., " + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "inline_equation", + "content": "\\{d_0^i\\}_{i = 1}^M" + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "). To obtain the final keypoints coordinates prediction " + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "inline_equation", + "content": "d_{0}" + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": ", we compute the mean of the " + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": " predictions. Finally, we can solve for the 6D object pose using a PnP solver, like [44, 53]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 152, + 421, + 164 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 152, + 421, + 164 + ], + "spans": [ + { + "bbox": [ + 305, + 152, + 421, + 164 + ], + "type": "text", + "content": "3.4. Model Architecture" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 171, + 545, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 171, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 304, + 171, + 545, + 195 + ], + "type": "text", + "content": "Our framework mainly consists of the diffusion model " + }, + { + "bbox": [ + 304, + 171, + 545, + 195 + ], + "type": "inline_equation", + "content": "(M_{\\mathrm{diff}})" + }, + { + "bbox": [ + 304, + 171, + 545, + 195 + ], + "type": "text", + "content": " and the keypoints distribution initializer." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "text", + "content": "Diffusion Model " + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "text", + "content": ". As illustrated in Fig. 3, our proposed diffusion model " + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "text", + "content": " mainly consists of a transformer encoder-decoder architecture. The appearance features " + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "text", + "content": " are sent into the encoder for extracting context information to aid the reverse process in the decoder. " + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "inline_equation", + "content": "f_{D}^{k}" + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "inline_equation", + "content": "\\{d_k^i\\}_{i=1}^M" + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "text", + "content": " (or " + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "inline_equation", + "content": "\\{\\hat{d}_k^i\\}_{i=1}^M" + }, + { + "bbox": [ + 304, + 196, + 545, + 293 + ], + "type": "text", + "content": " during training) are sent into the decoder for the reverse process. Both the encoder and the decoder contain a stack of three transformer layers." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "text", + "content": "More specifically, as for the encoder part, we first map " + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}} \\in \\mathbb{R}^{16 \\times 16 \\times 512}" + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "text", + "content": " through a " + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "text", + "content": " convolution layer to a latent embedding " + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "inline_equation", + "content": "e_{\\mathrm{app}} \\in \\mathbb{R}^{16 \\times 16 \\times 128}" + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "text", + "content": ". To retain the spatial information, following [59], we further incorporate positional encodings into " + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "inline_equation", + "content": "e_{\\mathrm{app}}" + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "text", + "content": ". Afterwards, we flatten " + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "inline_equation", + "content": "e_{\\mathrm{app}}" + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "text", + "content": " into a feature sequence " + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "inline_equation", + "content": "(\\mathbb{R}^{256 \\times 128})" + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "text", + "content": ", and send it into the encoder. The encoder output " + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{enc}}" + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "text", + "content": " containing the extracted object information will be sent into the decoder to aid the reverse process. Note that during testing, for each sample, we only need to conduct the above computation process once to obtain the corresponding " + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{enc}}" + }, + { + "bbox": [ + 304, + 293, + 545, + 426 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": "The decoder part iteratively performs the reverse process. For notation simplicity, below we describe the reverse process for a single sample " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "d_{k}" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": " instead of the " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": " samples " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "\\left(\\{d_1^i,\\dots,d_K^i\\}_{i = 1}^M\\right)" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": ". Specifically, at the " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": "-th reverse step, to inject the current step number " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "(k)" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": " information into the decoder, we first generate the step embedding " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "f_{D}^{k}\\in \\mathbb{R}^{1\\times 128}" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": " using the sinusoidal function following [18, 52]. Meanwhile, we use an FC layer to map the input " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "d_{k}\\in \\mathbb{R}^{N\\times 2}" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": " to a latent embedding " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "e_k\\in \\mathbb{R}^{N\\times 128}" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": ". Then we concatenate " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "f_{D}^{k}" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "e_k" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": " along the first dimension, and send it into the decoder. By interacting with the encoder output " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{enc}}" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": " (extracted object information) via cross-attention at each layer, the decoder produces " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{dec}}" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": ", which is further mapped into the keypoints coordinates prediction " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "d_{k - 1}\\in \\mathbb{R}^{N\\times 2}" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": " via an FC layer. Then we send " + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "inline_equation", + "content": "d_{k - 1}" + }, + { + "bbox": [ + 304, + 426, + 545, + 617 + ], + "type": "text", + "content": " back to the decoder as the input to perform the next reverse step." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "Keypoints Distribution Initializer. The initializer adopts a ResNet-34 backbone, which is commonly used in 6D pose estimation methods [4, 53, 61]. To generate heatmaps to initialize the distribution " + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": ", we add two deconvolution layers followed by a " + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": " convolution layer after the ResNet-34 backbone, and then we obtain predicted heatmaps " + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_{\\mathrm{pred}} \\in \\mathbb{R}^{N \\times \\frac{H}{4} \\times \\frac{W}{4}}" + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": " denote the height and width of the input ROI image respec" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 314, + 757 + ], + "type": "text", + "content": "9681" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 97, + 71, + 168, + 142 + ], + "blocks": [ + { + "bbox": [ + 97, + 71, + 168, + 142 + ], + "lines": [ + { + "bbox": [ + 97, + 71, + 168, + 142 + ], + "spans": [ + { + "bbox": [ + 97, + 71, + 168, + 142 + ], + "type": "image", + "image_path": "b61da41b8944ffeb55074eed5c6f67f97a2a423b9bee6f0569047e61a97a81c9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 115, + 141, + 150, + 148 + ], + "lines": [ + { + "bbox": [ + 115, + 141, + 150, + 148 + ], + "spans": [ + { + "bbox": [ + 115, + 141, + 150, + 148 + ], + "type": "text", + "content": "input image" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 184, + 71, + 262, + 149 + ], + "blocks": [ + { + "bbox": [ + 184, + 71, + 262, + 149 + ], + "lines": [ + { + "bbox": [ + 184, + 71, + 262, + 149 + ], + "spans": [ + { + "bbox": [ + 184, + 71, + 262, + 149 + ], + "type": "image", + "image_path": "0b4b9acb5c85467644b3dd29dc4d8ebc1a7f72cded7d35668f0c854a9786e635.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 151, + 545, + 206 + ], + "lines": [ + { + "bbox": [ + 46, + 151, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 46, + 151, + 545, + 206 + ], + "type": "text", + "content": "Figure 4. Visualization of the denoising process of a sample with our framework. In this example, the target object is the yellow duck and for clarity, we here show three keypoints only. The red dots indicate the ground-truth locations of these three keypoints. The noisy heatmap before denoising reflects that factors like occlusions and clutter in the scene can introduce noise and indeterminacy when detecting keypoints. As shown, our diffusion model can effectively and smoothly reduce the noise and indeterminacy in the initial distribution step by step, finally recovering a high-quality and determinate distribution of keypoints coordinates. (Better viewed in color)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 263, + 72, + 334, + 147 + ], + "blocks": [ + { + "bbox": [ + 263, + 72, + 334, + 147 + ], + "lines": [ + { + "bbox": [ + 263, + 72, + 334, + 147 + ], + "spans": [ + { + "bbox": [ + 263, + 72, + 334, + 147 + ], + "type": "image", + "image_path": "04c376050a06a7e26e7256b49519f8986a0b703a6aa603b8969c3eed2c6e5963.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 341, + 72, + 418, + 147 + ], + "blocks": [ + { + "bbox": [ + 341, + 72, + 418, + 147 + ], + "lines": [ + { + "bbox": [ + 341, + 72, + 418, + 147 + ], + "spans": [ + { + "bbox": [ + 341, + 72, + 418, + 147 + ], + "type": "image", + "image_path": "c7f21552bc1e952f449a1464eca28832e2ab5f7c261b243fc3d30f4403e51998.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 423, + 72, + 492, + 148 + ], + "blocks": [ + { + "bbox": [ + 423, + 72, + 492, + 148 + ], + "lines": [ + { + "bbox": [ + 423, + 72, + 492, + 148 + ], + "spans": [ + { + "bbox": [ + 423, + 72, + 492, + 148 + ], + "type": "image", + "image_path": "0def7fb1407240b98f521abcde04b9fa3bf1679cada464ac1d6b5e5a723577cb.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 65, + 221, + 526, + 300 + ], + "blocks": [ + { + "bbox": [ + 63, + 209, + 527, + 220 + ], + "lines": [ + { + "bbox": [ + 63, + 209, + 527, + 220 + ], + "spans": [ + { + "bbox": [ + 63, + 209, + 527, + 220 + ], + "type": "text", + "content": "Table 1. Comparisons with RGB-based 6D object pose estimation methods on the LM-O dataset. (*) denotes symmetric objects." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 65, + 221, + 526, + 300 + ], + "lines": [ + { + "bbox": [ + 65, + 221, + 526, + 300 + ], + "spans": [ + { + "bbox": [ + 65, + 221, + 526, + 300 + ], + "type": "table", + "html": "
MethodPVNet [44]HybridPose [51]RePose [24]DeepIM [33]GDR-Net [61]SO-Pose [8]CRT-6D [4]ZebraPose [53]CheckerPose [35]Ours
ape15.820.931.159.246.848.453.457.958.360.6
can63.375.380.063.590.885.892.095.095.797.9
cat16.724.925.626.240.532.742.060.662.363.2
driller65.770.273.155.682.677.481.494.893.796.6
duck25.227.943.052.446.948.944.964.569.967.2
eggbox*50.252.451.763.054.252.462.770.970.073.5
glue*49.653.854.371.775.878.380.288.786.492.0
holepuncher39.754.253.652.560.175.374.383.083.885.5
Mean40.847.551.655.562.262.366.376.977.579.6
", + "image_path": "481fef7279a38397312ba15e17794116f09ff211cb010d3974ff71280207d39b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 59, + 342, + 274, + 435 + ], + "blocks": [ + { + "bbox": [ + 46, + 308, + 286, + 341 + ], + "lines": [ + { + "bbox": [ + 46, + 308, + 286, + 341 + ], + "spans": [ + { + "bbox": [ + 46, + 308, + 286, + 341 + ], + "type": "text", + "content": "Table 2. Comparisons with RGB-based 6D object pose estimation methods on the YCB-V dataset. (-) indicates the corresponding result is not reported in the original paper." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 342, + 274, + 435 + ], + "lines": [ + { + "bbox": [ + 59, + 342, + 274, + 435 + ], + "spans": [ + { + "bbox": [ + 59, + 342, + 274, + 435 + ], + "type": "table", + "html": "
MethodADD(-S)AUC of ADD-SAUC of ADD(-S)
SegDriven[21]39.0--
SingleStage[22]53.9--
CosyPose [29]-89.884.5
RePose [24]62.188.582.0
GDR-Net [61]60.191.684.4
SO-Pose [8]56.890.983.9
ZebraPose [53]80.590.185.3
CheckerPose [35]81.491.386.4
Ours83.891.587.0
", + "image_path": "21a22844c4fedf440150406bf351ef49223aa0dc64883c425cd53c2d8c624806.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 445, + 286, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 445, + 286, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 445, + 286, + 483 + ], + "type": "text", + "content": "tively. Moreover, the features outputted by the ResNet-34 backbone, combined with features obtained from methods [35, 53], are used as the object features " + }, + { + "bbox": [ + 46, + 445, + 286, + 483 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 46, + 445, + 286, + 483 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 494, + 127, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 494, + 127, + 506 + ], + "spans": [ + { + "bbox": [ + 47, + 494, + 127, + 506 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 514, + 213, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 514, + 213, + 526 + ], + "spans": [ + { + "bbox": [ + 47, + 514, + 213, + 526 + ], + "type": "text", + "content": "4.1. Datasets & Evaluation Metrics" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 533, + 286, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 286, + 604 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 286, + 604 + ], + "type": "text", + "content": "Given that previous works [8, 24, 67] have reported the evaluation accuracy over " + }, + { + "bbox": [ + 46, + 533, + 286, + 604 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 46, + 533, + 286, + 604 + ], + "type": "text", + "content": " on the Linemod (LM) dataset [17], the performance on this dataset has become saturated. Thus recent works [4, 53] mainly focus on using the LM-O dataset [2] and the YCB-V dataset [63] that are more challenging, which we follow." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 605, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 286, + 713 + ], + "type": "text", + "content": "LM-O Dataset. The Linemod Occlusion (LM-O) dataset contains 1214 images and is a challenging subset of the LM dataset. In this dataset, around 8 objects are annotated on each image and the objects are often heavily occluded. Following [4, 53], we use both the real images from the LM dataset and the publicly available physically-based rendering (pbr) images [6] as the training images for LM-O. Following [53, 61], on LM-O dataset, we evaluate the model performance using the commonly-used ADD(-S) metric." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 311, + 545, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 311, + 545, + 393 + ], + "spans": [ + { + "bbox": [ + 304, + 311, + 545, + 393 + ], + "type": "text", + "content": "For this metric, we compute the mean distance between the model points transformed using the predicted pose and the same model points transformed using the ground-truth pose. For symmetric objects, following [63], the mean distance is computed based on the closest point distance. If the mean distance is less than " + }, + { + "bbox": [ + 304, + 311, + 545, + 393 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 304, + 311, + 545, + 393 + ], + "type": "text", + "content": " of the model diameter, the predicted pose is regarded as correct." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 396, + 545, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 396, + 545, + 527 + ], + "spans": [ + { + "bbox": [ + 304, + 396, + 545, + 527 + ], + "type": "text", + "content": "YCB-V Dataset. The YCB-V dataset is a large-scale dataset containing 21 objects and over 100k real images. The samples in this dataset often exhibit occlusions and cluttered backgrounds. Following [4, 53], we use both the real images from the training set of the YCB-V dataset and the publicly available pbr images as the training images for YCB-V. Following [53, 61], we evaluate the model performance using the following metrics: ADD(-S), AUC (Area Under the Curve) of ADD-S, and AUC of ADD(-S). For calculating AUC, we set the maximum distance threshold to " + }, + { + "bbox": [ + 304, + 396, + 545, + 527 + ], + "type": "inline_equation", + "content": "10\\mathrm{cm}" + }, + { + "bbox": [ + 304, + 396, + 545, + 527 + ], + "type": "text", + "content": " following [63]." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 538, + 438, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 538, + 438, + 551 + ], + "spans": [ + { + "bbox": [ + 306, + 538, + 438, + 551 + ], + "type": "text", + "content": "4.2. Implementation Details" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "text", + "content": "We conduct our experiments on an Nvidia V100 GPU. We set the number of pre-selected 3D keypoints " + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "text", + "content": " to 128. During training, following [34, 53], we utilize the dynamic zoom-in strategy to produce augmented ROI images. During testing, we use the detected bounding box with Faster RCNN [49] and FCOS [57] provided by CDPNv2 [34]. The cropped ROI image is resized to the shape of " + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "3 \\times 256 \\times 256" + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "H = W = 256" + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "text", + "content": "). We characterize " + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "text", + "content": " via a MoC model with 9 Cauchy kernels (" + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "U = 9" + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "text", + "content": ") for the forward diffusion process. We optimize the diffusion model " + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "text", + "content": " for 1500 epochs using the Adam optimizer [28] with an initial learning rate of 4e-5. Moreover, we set the number of sampled sets " + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "text", + "content": " to 5, and the number of diffusion steps " + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 557, + 545, + 713 + ], + "type": "text", + "content": " to" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "9682" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 69, + 103, + 122 + ], + "blocks": [ + { + "bbox": [ + 48, + 69, + 103, + 122 + ], + "lines": [ + { + "bbox": [ + 48, + 69, + 103, + 122 + ], + "spans": [ + { + "bbox": [ + 48, + 69, + 103, + 122 + ], + "type": "image", + "image_path": "b903ced34bd62194809d38eeea7ebb4deac90adda7a6ec6b6b1dd15de31bce23.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 109, + 70, + 164, + 121 + ], + "blocks": [ + { + "bbox": [ + 109, + 70, + 164, + 121 + ], + "lines": [ + { + "bbox": [ + 109, + 70, + 164, + 121 + ], + "spans": [ + { + "bbox": [ + 109, + 70, + 164, + 121 + ], + "type": "image", + "image_path": "f6f8a61b91946b5b4da0d6bd2058734361d49f5d4f4dad94c810fad530f14455.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 171, + 70, + 225, + 121 + ], + "blocks": [ + { + "bbox": [ + 171, + 70, + 225, + 121 + ], + "lines": [ + { + "bbox": [ + 171, + 70, + 225, + 121 + ], + "spans": [ + { + "bbox": [ + 171, + 70, + 225, + 121 + ], + "type": "image", + "image_path": "1392556afa4f233e07d526d0ac630e2647172bb4c7341fb837a8ae7e94d8e0fe.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 232, + 70, + 287, + 121 + ], + "blocks": [ + { + "bbox": [ + 232, + 70, + 287, + 121 + ], + "lines": [ + { + "bbox": [ + 232, + 70, + 287, + 121 + ], + "spans": [ + { + "bbox": [ + 232, + 70, + 287, + 121 + ], + "type": "image", + "image_path": "9413cf6a51c687734673050f9721c34d24fda513a1c75409289e7c7fd8e3ec01.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 48, + 127, + 103, + 178 + ], + "blocks": [ + { + "bbox": [ + 48, + 127, + 103, + 178 + ], + "lines": [ + { + "bbox": [ + 48, + 127, + 103, + 178 + ], + "spans": [ + { + "bbox": [ + 48, + 127, + 103, + 178 + ], + "type": "image", + "image_path": "f861959908df876c547b3fda9766273dec454dc9782689ad252270167f42ff13.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 190, + 287, + 268 + ], + "lines": [ + { + "bbox": [ + 46, + 190, + 287, + 268 + ], + "spans": [ + { + "bbox": [ + 46, + 190, + 287, + 268 + ], + "type": "text", + "content": "Figure 5. Qualitative results. Green bounding boxes represent the ground-truth poses and blue bounding boxes represent the predicted poses of our method. As shown, even facing severe occlusions, clutter in the scene or varying environment, our framework can still accurately recover the object poses, showing the effectiveness of our method for handling the noise and indeterminacy caused by various factors in object pose estimation." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 109, + 127, + 164, + 178 + ], + "blocks": [ + { + "bbox": [ + 109, + 127, + 164, + 178 + ], + "lines": [ + { + "bbox": [ + 109, + 127, + 164, + 178 + ], + "spans": [ + { + "bbox": [ + 109, + 127, + 164, + 178 + ], + "type": "image", + "image_path": "bed618827a36a469dfd3434e06bbbf66692ee482a9da0015ba12e6b8f6a884bf.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 171, + 127, + 225, + 178 + ], + "blocks": [ + { + "bbox": [ + 171, + 127, + 225, + 178 + ], + "lines": [ + { + "bbox": [ + 171, + 127, + 225, + 178 + ], + "spans": [ + { + "bbox": [ + 171, + 127, + 225, + 178 + ], + "type": "image", + "image_path": "19561a8956a2b49d227d14d1059e3c4bf04463a967e6703eeb7c05efe7904dd0.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 232, + 127, + 287, + 178 + ], + "blocks": [ + { + "bbox": [ + 232, + 127, + 287, + 178 + ], + "lines": [ + { + "bbox": [ + 232, + 127, + 287, + 178 + ], + "spans": [ + { + "bbox": [ + 232, + 127, + 287, + 178 + ], + "type": "image", + "image_path": "40ce7df9b492104dfd35e3a68336179bde926a73fbd8a86faa6c71e5fd6b68ba.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 275, + 287, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 275, + 287, + 348 + ], + "spans": [ + { + "bbox": [ + 46, + 275, + 287, + 348 + ], + "type": "text", + "content": "100. Following [53], we use Progressive-X [1] as the PnP solver. Note that during testing, instead of performing the reverse process with all the " + }, + { + "bbox": [ + 46, + 275, + 287, + 348 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 275, + 287, + 348 + ], + "type": "text", + "content": " steps, we accelerate the process with DDIM [52], a recently proposed diffusion acceleration method. With DDIM acceleration, we only need to perform 10 steps to finish the reverse process during testing." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 356, + 270, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 356, + 270, + 369 + ], + "spans": [ + { + "bbox": [ + 47, + 356, + 270, + 369 + ], + "type": "text", + "content": "4.3. Comparison with State-of-the-art Methods" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 374, + 287, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 374, + 287, + 457 + ], + "spans": [ + { + "bbox": [ + 46, + 374, + 287, + 457 + ], + "type": "text", + "content": "Results on LM-O Dataset. As shown in Tab. 1, compared to existing methods, our method achieves the best mean performance, showing the superiority of our method. We also show qualitative results on the LM-O dataset in Fig. 5. As shown, even in the presence of large occlusions (including self-occlusions) and cluttered backgrounds, our method still produces accurate predictions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 459, + 287, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 459, + 287, + 518 + ], + "spans": [ + { + "bbox": [ + 46, + 459, + 287, + 518 + ], + "type": "text", + "content": "Results on YCB-V Dataset. As shown in Tab. 2, our framework achieves the best performance on both the ADD(-S) and the AUC of ADD(-S) metrics, and is comparable to the state-of-the-art method on the AUC of ADD-S metric, showing the effectiveness of our method." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 527, + 147, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 147, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 147, + 538 + ], + "type": "text", + "content": "4.4. Ablation Studies" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 545, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 287, + 581 + ], + "type": "text", + "content": "We conduct extensive ablation experiments on the LM-O dataset, and we report the model performance on ADD(-S) metric averaged over all the objects." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 582, + 181, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 181, + 676 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 181, + 676 + ], + "type": "text", + "content": "Impact of denoising process. In our framework, we predict keypoints coordinates via performing the denoising process. To evaluate the efficacy of this process, we test three variants. In the first variant (Variant A), we remove the diffusion model" + } + ] + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 192, + 617, + 287, + 674 + ], + "blocks": [ + { + "bbox": [ + 189, + 584, + 287, + 616 + ], + "lines": [ + { + "bbox": [ + 189, + 584, + 287, + 616 + ], + "spans": [ + { + "bbox": [ + 189, + 584, + 287, + 616 + ], + "type": "text", + "content": "Table 3. Evaluation on the effectiveness of the denoising process." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 192, + 617, + 287, + 674 + ], + "lines": [ + { + "bbox": [ + 192, + 617, + 287, + 674 + ], + "spans": [ + { + "bbox": [ + 192, + 617, + 287, + 674 + ], + "type": "table", + "html": "
MethodADD(-S)
Variant A49.2
Variant B57.3
Variant C61.1
6D-Diff79.6
", + "image_path": "7bacf8a365e25ae6fcc0afc71753e368414288d184ae4124d81596ca2aee8244.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 677, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 712 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 47, + 677, + 287, + 712 + ], + "type": "text", + "content": " and predict keypoints coordinates directly from the heatmaps produced by the keypoints distribution initializer. The second variant (Variant " + }, + { + "bbox": [ + 47, + 677, + 287, + 712 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 47, + 677, + 287, + 712 + ], + "type": "text", + "content": ") has the same model architec" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": "ture as our framework, but the diffusion model is optimized to directly predict the coordinates instead of learning the reverse process. Same as Variant " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": ", the third variant (Variant " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": ") is also optimized to directly predict coordinates without denoising process. For Variant " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": ", we stack our diffusion model structure multiple times to produce a deep network, which has similar computation complexity with our framework. As shown in Tab. 3, compared to our framework, the performance of these variants significantly drops, showing that the effectiveness of our framework mainly lies in the designed denoising process." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 204, + 440, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 204, + 440, + 275 + ], + "spans": [ + { + "bbox": [ + 305, + 204, + 440, + 275 + ], + "type": "text", + "content": "Impact of object appearance features " + }, + { + "bbox": [ + 305, + 204, + 440, + 275 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 305, + 204, + 440, + 275 + ], + "type": "text", + "content": " In our framework, we send the appearance features " + }, + { + "bbox": [ + 305, + 204, + 440, + 275 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 305, + 204, + 440, + 275 + ], + "type": "text", + "content": " into the diffusion model " + }, + { + "bbox": [ + 305, + 204, + 440, + 275 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 305, + 204, + 440, + 275 + ], + "type": "text", + "content": " to aid the reverse process. To evaluate its effect," + } + ] + } + ], + "index": 20 + }, + { + "type": "table", + "bbox": [ + 451, + 242, + 533, + 274 + ], + "blocks": [ + { + "bbox": [ + 447, + 208, + 545, + 241 + ], + "lines": [ + { + "bbox": [ + 447, + 208, + 545, + 241 + ], + "spans": [ + { + "bbox": [ + 447, + 208, + 545, + 241 + ], + "type": "text", + "content": "Table 4. Evaluation on the effectiveness of the object appearance features " + }, + { + "bbox": [ + 447, + 208, + 545, + 241 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 451, + 242, + 533, + 274 + ], + "lines": [ + { + "bbox": [ + 451, + 242, + 533, + 274 + ], + "spans": [ + { + "bbox": [ + 451, + 242, + 533, + 274 + ], + "type": "table", + "html": "
MethodADD(-S)
w/o fapp74.4
6D-Diff79.6
", + "image_path": "479485e488975fa8d81d403a39dcd7c24ada569bfce8a0b863238f459f3dac0b.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "table_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "type": "text", + "content": "we test a variant in which we do not send " + }, + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "type": "inline_equation", + "content": "w / o \\, f_{\\mathrm{app}}" + }, + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "type": "text", + "content": "). As shown in Tab. 4, our framework performs better than this variant, showing that " + }, + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{app}}" + }, + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "type": "text", + "content": " can aid " + }, + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 305, + 276, + 545, + 323 + ], + "type": "text", + "content": " to get more accurate predictions." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 324, + 392, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 324, + 392, + 394 + ], + "spans": [ + { + "bbox": [ + 306, + 324, + 392, + 394 + ], + "type": "text", + "content": "Impact of MoC design. During training, we model the distribution " + }, + { + "bbox": [ + 306, + 324, + 392, + 394 + ], + "type": "inline_equation", + "content": "D_K" + }, + { + "bbox": [ + 306, + 324, + 392, + 394 + ], + "type": "text", + "content": " from the intermediate representation" + } + ] + } + ], + "index": 24 + }, + { + "type": "table", + "bbox": [ + 403, + 348, + 545, + 388 + ], + "blocks": [ + { + "bbox": [ + 400, + 326, + 545, + 347 + ], + "lines": [ + { + "bbox": [ + 400, + 326, + 545, + 347 + ], + "spans": [ + { + "bbox": [ + 400, + 326, + 545, + 347 + ], + "type": "text", + "content": "Table 5. Evaluation on the effectiveness of the MoC design." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 403, + 348, + 545, + 388 + ], + "lines": [ + { + "bbox": [ + 403, + 348, + 545, + 388 + ], + "spans": [ + { + "bbox": [ + 403, + 348, + 545, + 388 + ], + "type": "table", + "html": "
MethodADD(-S)
Standard diffusion w/o MoC73.1
Heatmaps as condition76.2
6D-Diff79.6
", + "image_path": "8fbb26489f136caeefd614faf3467693341cb2af5a56a7d4ab853d9272ec87ed.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "table_body" + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 395, + 545, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 395, + 545, + 551 + ], + "spans": [ + { + "bbox": [ + 304, + 395, + 545, + 551 + ], + "type": "text", + "content": "(heatmaps) as a MoC distribution " + }, + { + "bbox": [ + 304, + 395, + 545, + 551 + ], + "type": "inline_equation", + "content": "\\hat{D}_K" + }, + { + "bbox": [ + 304, + 395, + 545, + 551 + ], + "type": "text", + "content": ", and train the diffusion model " + }, + { + "bbox": [ + 304, + 395, + 545, + 551 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 304, + 395, + 545, + 551 + ], + "type": "text", + "content": " to perform the reverse process from " + }, + { + "bbox": [ + 304, + 395, + 545, + 551 + ], + "type": "inline_equation", + "content": "\\hat{D}_K" + }, + { + "bbox": [ + 304, + 395, + 545, + 551 + ], + "type": "text", + "content": ". To investigate the impact of this design, we evaluate two variants that train " + }, + { + "bbox": [ + 304, + 395, + 545, + 551 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{diff}}" + }, + { + "bbox": [ + 304, + 395, + 545, + 551 + ], + "type": "text", + "content": " in different ways. In the first variant (Standard diffusion w/o MoC), we train the model to start the reverse process from the standard Gaussian noise, i.e., following the basic forward process in Eq. (3) for model training. In the second variant (Heatmaps as condition), we still train the model to start denoising from the random Gaussian noise but we use the heatmaps as the condition for the reverse process. As shown in Tab. 5, our framework consistently outperforms both variants, showing effectiveness of the designed MoC-based forward process." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 306, + 562, + 378, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 562, + 378, + 574 + ], + "spans": [ + { + "bbox": [ + 306, + 562, + 378, + 574 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 304, + 582, + 545, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 676 + ], + "type": "text", + "content": "In this paper, we proposed a novel diffusion-based 6D object pose estimation framework, which effectively handles noise and indeterminacy in object pose estimation. In our framework, we formulate object keypoints detection as a carefully-designed reverse diffusion process. We design a novel MoC-based forward process to effectively utilize the distribution priors in intermediate representations. Our framework achieves superior performance." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "Acknowledgement. This work was supported by the National Research Foundation Singapore under the AI Singapore Programme (Award Number: AISG-100E-2023-121)." + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9683" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 289, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 289, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 289, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 289, + 135 + ], + "type": "text", + "content": "[1] Daniel Barath and Jiri Matas. Progressive-x: Efficient, anytime, multi-model fitting algorithm. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3780-3788, 2019. 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 203 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 203 + ], + "type": "text", + "content": "[2] Eric Brachmann, Frank Michel, Alexander Krull, Michael Ying Yang, Stefan Gumhold, et al. Uncertainty-driven 6d pose estimation of objects and scenes from a single rgb image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3364-3372, 2016. 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 204, + 288, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 204, + 288, + 259 + ], + "spans": [ + { + "bbox": [ + 53, + 204, + 288, + 259 + ], + "type": "text", + "content": "[3] Benjamin Busam, Marco Esposito, Simon Che'Rose, Nassir Navab, and Benjamin Frisch. A stereo vision approach for cooperative robotic movement therapy. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 127-135, 2015. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 261, + 288, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 261, + 288, + 316 + ], + "spans": [ + { + "bbox": [ + 53, + 261, + 288, + 316 + ], + "type": "text", + "content": "[4] Pedro Castro and Tae-Kyun Kim. Crt-6d: Fast 6d object pose estimation with cascaded refinement transformers. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 5746-5755, 2023. 1, 2, 3, 4, 6, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 317, + 288, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 317, + 288, + 373 + ], + "spans": [ + { + "bbox": [ + 53, + 317, + 288, + 373 + ], + "type": "text", + "content": "[5] Bo Chen, Alvaro Parra, Jiewei Cao, Nan Li, and Tat-Jun Chin. End-to-end learnable geometric vision by backpropagating pnp optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8100-8109, 2020. 1, 2, 3, 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 374, + 288, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 374, + 288, + 418 + ], + "spans": [ + { + "bbox": [ + 53, + 374, + 288, + 418 + ], + "type": "text", + "content": "[6] Maximilian Denninger, Martin Sundermeyer, Dominik Winkelbauer, Youssef Zidan, Dmitry Olefir, Mohamad Elbadrawy, Ahsan Lodhi, and Harinandan Katam. Blenderproc. arXiv preprint arXiv:1911.01911, 2019. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 419, + 288, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 419, + 288, + 453 + ], + "spans": [ + { + "bbox": [ + 53, + 419, + 288, + 453 + ], + "type": "text", + "content": "[7] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34:8780-8794, 2021. 1, 2, 3, 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 454, + 288, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 454, + 288, + 510 + ], + "spans": [ + { + "bbox": [ + 53, + 454, + 288, + 510 + ], + "type": "text", + "content": "[8] Yan Di, Fabian Manhardt, Gu Wang, Xiangyang Ji, Nassir Navab, and Federico Tombari. So-pose: Exploiting self-occlusion for direct 6d pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12396–12405, 2021. 1, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 510, + 288, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 510, + 288, + 544 + ], + "spans": [ + { + "bbox": [ + 53, + 510, + 288, + 544 + ], + "type": "text", + "content": "[9] Lin Geng Foo, Hossein Rahmani, and Jun Liu. Aigc for various data modalities: A survey. arXiv preprint arXiv:2308.14177, 2023. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 545, + 288, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 545, + 288, + 600 + ], + "spans": [ + { + "bbox": [ + 48, + 545, + 288, + 600 + ], + "type": "text", + "content": "[10] Xiao-Shan Gao, Xiao-Rong Hou, Jianliang Tang, and Hang-Fei Cheng. Complete solution classification for the perspective-three-point problem. IEEE transactions on pattern analysis and machine intelligence, 25(8):930-943, 2003. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 601, + 288, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 288, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 288, + 656 + ], + "type": "text", + "content": "[11] Jia Gong, Lin Geng Foo, Zhipeng Fan, Qiuhong Ke, Hossein Rahmani, and Jun Liu. Diffpose: Toward more reliable 3d pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13041-13051, 2023. 1, 3, 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 658, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 288, + 714 + ], + "type": "text", + "content": "[12] Tianpei Gu, Guangyi Chen, Junlong Li, Chunze Lin, Yongming Rao, Jie Zhou, and Jiwen Lu. Stochastic trajectory prediction via motion indeterminacy diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17113-17122, 2022. 1, 3, 4" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 129 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 129 + ], + "type": "text", + "content": "[13] Shuxuan Guo, Yinlin Hu, Jose M Alvarez, and Mathieu Salzmann. Knowledge distillation for 6d pose estimation by aligning distributions of local predictions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18633-18642, 2023. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 129, + 546, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 546, + 174 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 546, + 174 + ], + "type": "text", + "content": "[14] Yang Hai, Rui Song, Jiaojiao Li, and Yinlin Hu. Shape-constraint recurrent flow for 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4831-4840, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 175, + 547, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 547, + 229 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 547, + 229 + ], + "type": "text", + "content": "[15] Yang Hai, Rui Song, Jiaojiao Li, Mathieu Salzmann, and Yinlin Hu. Rigidity-aware detection for 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8927-8936, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 230, + 546, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 230, + 546, + 286 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 546, + 286 + ], + "type": "text", + "content": "[16] Rasmus Laurvig Haugaard and Anders Glent Buch. Surfemb: Dense and continuous correspondence distributions for object pose estimation with learnt surface embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6749-6758, 2022. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 287, + 547, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 287, + 547, + 365 + ], + "spans": [ + { + "bbox": [ + 307, + 287, + 547, + 365 + ], + "type": "text", + "content": "[17] Stefan Hinterstoisser, Vincent Lepetit, Slobodan Ilic, Stefan Holzer, Gary Bradski, Kurt Konolige, and Nassir Navab. Model based training, detection and pose estimation of texture-less 3d objects in heavily cluttered scenes. In Computer Vision-ACCV 2012: 11th Asian Conference on Computer Vision, Daejeon, Korea, November 5-9, 2012, Revised Selected Papers, Part I 11, pages 548-562. Springer, 2013. 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 365, + 546, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 365, + 546, + 409 + ], + "spans": [ + { + "bbox": [ + 307, + 365, + 546, + 409 + ], + "type": "text", + "content": "[18] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, pages 6840-6851. Curran Associates, Inc., 2020. 1, 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 410, + 546, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 410, + 546, + 454 + ], + "spans": [ + { + "bbox": [ + 307, + 410, + 546, + 454 + ], + "type": "text", + "content": "[19] Tomas Hodan, Daniel Barath, and Jiri Matas. Epos: Estimating 6d pose of objects with symmetries. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11703-11712, 2020. 1, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 456, + 546, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 546, + 499 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 546, + 499 + ], + "type": "text", + "content": "[20] Tsu-Ching Hsiao, Hao-Wei Chen, Hsuan-Kung Yang, and Chun-Yi Lee. Confronting ambiguity in 6d object pose estimation via score-based diffusion on se (3). arXiv preprint arXiv:2305.15873, 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 500, + 546, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 500, + 546, + 555 + ], + "spans": [ + { + "bbox": [ + 307, + 500, + 546, + 555 + ], + "type": "text", + "content": "[21] Yinlin Hu, Joachim Hugonot, Pascal Fua, and Mathieu Salzmann. Segmentation-driven 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3385-3394, 2019. 4, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 556, + 547, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 556, + 547, + 601 + ], + "spans": [ + { + "bbox": [ + 307, + 556, + 547, + 601 + ], + "type": "text", + "content": "[22] Yinlin Hu, Pascal Fua, Wei Wang, and Mathieu Salzmann. Single-stage 6d object pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2930-2939, 2020. 1, 3, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 601, + 546, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 601, + 546, + 656 + ], + "spans": [ + { + "bbox": [ + 307, + 601, + 546, + 656 + ], + "type": "text", + "content": "[23] Shun Iwase, Xingyu Liu, Rawal Khirodkar, Rio Yokota, and Kris M. Kitani. Repose: Fast 6d object pose refinement via deep texture rendering. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 3303-3312, 2021. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 658, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 547, + 713 + ], + "type": "text", + "content": "[24] Shun Iwase, Xingyu Liu, Rawal Khirodkar, Rio Yokota, and Kris M Kitani. Repose: Fast 6d object pose refinement via deep texture rendering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3303-3312, 2021. 7" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "9684" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[25] Haobo Jiang, Mathieu Salzmann, Zheng Dang, Jin Xie, and Jian Yang. Se (3) diffusion model-based point cloud registration for robust 6d object pose estimation. Advances in Neural Information Processing Systems, 36, 2024. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 286, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 286, + 149 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 286, + 149 + ], + "type": "text", + "content": "[26] Zakiah I. Kalantan and Jochen Einbeck. Quantile-based estimation of the finite cauchy mixture model. Symmetry, 11 (9), 2019. 2, 4, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 151, + 287, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 151, + 287, + 205 + ], + "spans": [ + { + "bbox": [ + 49, + 151, + 287, + 205 + ], + "type": "text", + "content": "[27] Wadim Kehl, Fabian Manhardt, Federico Tombari, Slobodan Ilic, and Nassir Navab. Ssd-6d: Making rgb-based 3d detection and 6d pose estimation great again. In Proceedings of the IEEE international conference on computer vision, pages 1521–1529, 2017. 1, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 206, + 286, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 206, + 286, + 237 + ], + "spans": [ + { + "bbox": [ + 49, + 206, + 286, + 237 + ], + "type": "text", + "content": "[28] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 239, + 286, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 239, + 286, + 303 + ], + "spans": [ + { + "bbox": [ + 49, + 239, + 286, + 303 + ], + "type": "text", + "content": "[29] Yann Labbe, Justin Carpentier, Mathieu Aubry, and Josef Sivic. Cosypose: Consistent multi-view multi-object 6d pose estimation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVII 16, pages 574-591. Springer, 2020. 4, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 305, + 286, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 305, + 286, + 349 + ], + "spans": [ + { + "bbox": [ + 49, + 305, + 286, + 349 + ], + "type": "text", + "content": "[30] Junhyeok Lee, Junghwa Kang, Yoonho Nam, and TaeYoung Lee. Bias field correction in MRI with hampel noise denoising diffusion probabilistic model. In Medical Imaging with Deep Learning, short paper track, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 350, + 286, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 350, + 286, + 392 + ], + "spans": [ + { + "bbox": [ + 49, + 350, + 286, + 392 + ], + "type": "text", + "content": "[31] Vincent Lepetit, Francesc Moreno-Noguer, and Pascal Fua. Ep n p: An accurate o (n) solution to the p np problem. International journal of computer vision, 81:155-166, 2009. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 394, + 286, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 394, + 286, + 448 + ], + "spans": [ + { + "bbox": [ + 49, + 394, + 286, + 448 + ], + "type": "text", + "content": "[32] Hongyang Li, Jiehong Lin, and Kui Jia. Dcl-net: Deep correspondence learning network for 6d pose estimation. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part IX, pages 369-385. Springer, 2022. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 449, + 286, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 449, + 286, + 492 + ], + "spans": [ + { + "bbox": [ + 49, + 449, + 286, + 492 + ], + "type": "text", + "content": "[33] Yi Li, Gu Wang, Xiangyang Ji, Yu Xiang, and Dieter Fox. Deepim: Deep iterative matching for 6d pose estimation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 683-698, 2018. 3, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 494, + 286, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 494, + 286, + 547 + ], + "spans": [ + { + "bbox": [ + 49, + 494, + 286, + 547 + ], + "type": "text", + "content": "[34] Zhigang Li, Gu Wang, and Xiangyang Ji. Cdpn: Coordinates-based disentangled pose network for real-time rgb-based 6-dof object pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7678-7687, 2019. 4, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 548, + 286, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 548, + 286, + 602 + ], + "spans": [ + { + "bbox": [ + 49, + 548, + 286, + 602 + ], + "type": "text", + "content": "[35] Ruyi Lian and Haibin Ling. Checkerpose: Progressive dense keypoint localization for object pose estimation with graph neural network. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14022-14033, 2023. 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 604, + 286, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 604, + 286, + 658 + ], + "spans": [ + { + "bbox": [ + 49, + 604, + 286, + 658 + ], + "type": "text", + "content": "[36] Xingyu Liu, Ruida Zhang, Chenyangguang Zhang, Bowen Fu, Jiwen Tang, Xiquan Liang, Jingyi Tang, Xiaotian Cheng, Yukang Zhang, Gu Wang, and Xiangyang Ji. Gdnpp. https://github.com/shanice-1/gdrnpp_bop2022, 2022.3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 659, + 286, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 659, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 49, + 659, + 286, + 713 + ], + "type": "text", + "content": "[37] Andreas Lugmayr, Martin Danelljan, Andres Romero, Fisher Yu, Radu Timofte, and Luc Van Gool. Repaint: Inpainting using denoising diffusion probabilistic models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11461-11471, 2022. 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 115 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 115 + ], + "type": "text", + "content": "[38] Fabian Manhardt, Wadim Kehl, Nassir Navab, and Federico Tombari. Deep model-based 6d pose refinement in rgb. In The European Conference on Computer Vision (ECCV), 2018. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 118, + 545, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 118, + 545, + 160 + ], + "spans": [ + { + "bbox": [ + 308, + 118, + 545, + 160 + ], + "type": "text", + "content": "[39] Eric Marchand, Hideaki Uchiyama, and Fabien Spindler. Pose estimation for augmented reality: a hands-on survey. IEEE transactions on visualization and computer graphics, 22(12):2633-2651, 2015. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 162, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 162, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 308, + 162, + 545, + 194 + ], + "type": "text", + "content": "[40] Jianhan Mei, Xudong Jiang, and Henghui Ding. Spatial feature mapping for 6 dof object pose estimation. Pattern Recognition, 131:108835, 2022. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 195, + 545, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 195, + 545, + 248 + ], + "spans": [ + { + "bbox": [ + 308, + 195, + 545, + 248 + ], + "type": "text", + "content": "[41] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. Sdedit: Guided image synthesis and editing with stochastic differential equations. In International Conference on Learning Representations, 2021. 1, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 250, + 545, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 250, + 545, + 294 + ], + "spans": [ + { + "bbox": [ + 308, + 250, + 545, + 294 + ], + "type": "text", + "content": "[42] Markus Oberweger, Mahdi Rad, and Vincent Lepetit. Making deep heatmaps robust to partial occlusions for 3d object pose estimation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 119-134, 2018. 3, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 295, + 545, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 295, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 308, + 295, + 545, + 338 + ], + "type": "text", + "content": "[43] Kiru Park, Timothy Patten, and Markus Vincze. Pix2pose: Pixel-wise coordinate regression of objects for 6d pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7668-7677, 2019. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 339, + 545, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 339, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 308, + 339, + 545, + 392 + ], + "type": "text", + "content": "[44] Sida Peng, Yuan Liu, Qixing Huang, Xiaowei Zhou, and Hujun Bao. Pvnet: Pixel-wise voting network for 6dof pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4561-4570, 2019. 1, 2, 3, 4, 6, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 394, + 545, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 394, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 308, + 394, + 545, + 437 + ], + "type": "text", + "content": "[45] Luis Pérez, Inigo Rodríguez, Nuria Rodríguez, Rubén Usamentiaga, and Daniel F García. Robot guidance using machine vision techniques in industrial environments: A comparative review. Sensors, 16(3):335, 2016. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 438, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 438, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 308, + 438, + 545, + 491 + ], + "type": "text", + "content": "[46] Mahdi Rad and Vincent Lepetit. Bb8: A scalable, accurate, robust to partial occlusion method for predicting the 3d poses of challenging objects without using depth. In Proceedings of the IEEE international conference on computer vision, pages 3828-3836, 2017. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 493, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 493, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 308, + 493, + 545, + 536 + ], + "type": "text", + "content": "[47] Jason Raphael Rambach, Alain Pagani, Michael Schneider, Oleksandr Artemenko, and Didier Stricker. 6dof object tracking based on 3d scans for augmented reality remote live support. Comput., 7:6, 2018. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 537, + 545, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 545, + 568 + ], + "type": "text", + "content": "[48] Hong Ren, Lin Lin, Yanjie Wang, and Xin Dong. Robust 6-dof pose estimation under hybrid constraints. Sensors, 22 (22):8758, 2022. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 571, + 545, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 571, + 545, + 613 + ], + "spans": [ + { + "bbox": [ + 308, + 571, + 545, + 613 + ], + "type": "text", + "content": "[49] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 4, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 615, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 615, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 308, + 615, + 545, + 667 + ], + "type": "text", + "content": "[50] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pages 2256-2265. PMLR, 2015. 2, 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "text", + "content": "[51] Chen Song, Jiaru Song, and Qixing Huang. Hybridpose: 6d object pose estimation under hybrid representations. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 431-440, 2020. 7" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "9685" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[52] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021. 1, 2, 3, 4, 5, 6, 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 288, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 288, + 173 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 288, + 173 + ], + "type": "text", + "content": "[53] Yongzhi Su, Mahdi Saleh, Torben Fetzer, Jason Rambach, Nassir Navab, Benjamin Busam, Didier Stricker, and Federico Tombari. Zebrapose: Coarse to fine surface encoding for 6 dof object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6738-6748, 2022. 1, 3, 4, 6, 7, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 175, + 287, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 175, + 287, + 218 + ], + "spans": [ + { + "bbox": [ + 48, + 175, + 287, + 218 + ], + "type": "text", + "content": "[54] Martin Sundermeyer, Zoltán-Csaba Marton, Maximilian Durner, Manuel Brucker, and Rudolph Triebel. Implicit 3d orientation learning for 6d object detection from rgb images. In European Conference on Computer Vision, 2018. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 219, + 287, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 219, + 287, + 240 + ], + "spans": [ + { + "bbox": [ + 48, + 219, + 287, + 240 + ], + "type": "text", + "content": "[55] Mahdi Teimouri. Statistical inference for mixture of cauchy distributions. arXiv preprint arXiv:1809.05722, 2018. 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 241, + 287, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 287, + 285 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 287, + 285 + ], + "type": "text", + "content": "[56] Bugra Tekin, Sudipta N Sinha, and Pascal Fua. Real-time seamless single shot 6d object pose prediction. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 292-301, 2018. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 287, + 287, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 287, + 287, + 330 + ], + "spans": [ + { + "bbox": [ + 48, + 287, + 287, + 330 + ], + "type": "text", + "content": "[57] Zhi Tian, Chunhua Shen, Hao Chen, and Tong He. Fcos: Fully convolutional one-stage object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9627-9636, 2019. 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 332, + 287, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 332, + 287, + 386 + ], + "spans": [ + { + "bbox": [ + 48, + 332, + 287, + 386 + ], + "type": "text", + "content": "[58] Julien Urain, Niklas Funk, Jan Peters, and Georgia Chalvatzaki. Se(3)-diffusionfields: Learning smooth cost functions for joint grasp and motion optimization through diffusion. IEEE International Conference on Robotics and Automation (ICRA), 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 388, + 287, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 287, + 431 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 287, + 431 + ], + "type": "text", + "content": "[59] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 434, + 287, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 434, + 287, + 477 + ], + "spans": [ + { + "bbox": [ + 48, + 434, + 287, + 477 + ], + "type": "text", + "content": "[60] Gu Wang, Fabian Manhardt, Xingyu Liu, Xiangyang Ji, and Federico Tombari. Occlusion-aware self-supervised monocular 6d object pose estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 478, + 287, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 478, + 287, + 533 + ], + "spans": [ + { + "bbox": [ + 48, + 478, + 287, + 533 + ], + "type": "text", + "content": "[61] Gu Wang, Fabian Manhardt, Federico Tombari, and Xi-angyang Ji. Gdr-net: Geometry-guided direct regression network for monocular 6d object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16611-16621, 2021. 1, 3, 4, 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 535, + 287, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 535, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 48, + 535, + 287, + 589 + ], + "type": "text", + "content": "[62] Di Wu, Zhaoyong Zhuang, Canqun Xiang, Wenbin Zou, and Xia Li. 6d-vnet: End-to-end 6-dof vehicle pose estimation from monocular rgb images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 590, + 287, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 623 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 623 + ], + "type": "text", + "content": "[63] Yu Xiang, Tanner Schmidt, Venkatraman Narayanan, and Dieter Fox. PoseCNN: A convolutional neural network for 6d object pose estimation in cluttered scenes. 2018. 1, 3, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 624, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 689 + ], + "type": "text", + "content": "[64] Yan Xu, Kwan-Yee Lin, Guofeng Zhang, Xiaogang Wang, and Hongsheng Li. Rnnpose: Recurrent 6-dof object pose refinement with robust correspondence field estimation and pose optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022. 1, 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[65] Heng Yang and Marco Pavone. Object pose estimation with statistical guarantees: Conformal keypoint detection" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 546, + 205 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 327, + 73, + 546, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 546, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 546, + 106 + ], + "type": "text", + "content": "and geometric uncertainty propagation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8947-8958, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 107, + 546, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 546, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 546, + 162 + ], + "type": "text", + "content": "[66] Jun Yang, Wenjie Xue, Sahar Ghavidel, and Steven L Waslander. 6d pose estimation for textureless objects on rgb frames using multi-view optimization. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 2905-2912. IEEE, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 163, + 546, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 546, + 205 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 546, + 205 + ], + "type": "text", + "content": "[67] Sergey Zakharov, Ivan S. Shugurov, and Slobodan Ilic. Dpod: 6d pose object detector and refiner. 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 1941-1950, 2019. 3, 7" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "9686" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/b19cc746-13a8-4ac2-b79c-a1691351681c_content_list.json b/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/b19cc746-13a8-4ac2-b79c-a1691351681c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..88fd225403b31c13dd48815e851f7db9d9f61a60 --- /dev/null +++ b/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/b19cc746-13a8-4ac2-b79c-a1691351681c_content_list.json @@ -0,0 +1,1378 @@ +[ + { + "type": "text", + "text": "A Backpack Full of Skills: Egocentric Video Understanding with Diverse Task Perspectives", + "text_level": 1, + "bbox": [ + 161, + 130, + 807, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Simone Alberto Peirone1 Francesca Pistilli1 Antonio Alliegro1,2 Giuseppe Averta1 \n1 Politecnico di Torino, 2 Istituto Italiano di Tecnologia \nfirstname.lastname@polito.it", + "bbox": [ + 130, + 202, + 834, + 256 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 291, + 313, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Human comprehension of a video stream is naturally broad: in a few instants, we are able to understand what is happening, the relevance and relationship of objects, and forecast what will follow in the near future, everything all at once. We believe that - to effectively transfer such an holistic perception to intelligent machines - an important role is played by learning to correlate concepts and to abstract knowledge coming from different tasks, to synergistically exploit them when learning novel skills. To accomplish this, we look for a unified approach to video understanding which combines shared temporal modelling of human actions with minimal overhead, to support multiple downstream tasks and enable cooperation when learning novel skills. We then propose EgoPack, a solution that creates a collection of task perspectives that can be carried across downstream tasks and used as a potential source of additional insights, as a backpack of skills that a robot can carry around and use when needed. We demonstrate the effectiveness and efficiency of our approach on four Ego4D benchmarks, outperforming current state-of-the-art methods. Project webpage: sapeirone.github.io/EgoPack.", + "bbox": [ + 75, + 323, + 473, + 642 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 662, + 209, + 679 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Our daily living activities are extremely complex and diverse, nonetheless humans have the extraordinary ability to reason on the behaviour itself in just a few instants from a visual input. We are able to spot what another person is doing, predict their next actions based on current observations, and understand the implications of an activity, for instance whether its effects are reversible. Observing someone in the kitchen by the worktable, where there is a pack of flour and a jug of water, we can identify that they are a chef kneading flour (reasoning about current activity). We can also forecast that the next step will involve mixing the flour with water (reasoning about the future), and finally obtaining dough (reasoning about implications of these actions). This type of holistic reasoning, which is natural for humans,", + "bbox": [ + 75, + 688, + 470, + 901 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0d10b8eac08614be2ccc8d3f6eaefd5c661cee07f42c1a02dcdd3b319a3eb533.jpg", + "image_caption": [ + "Figure 1. Given a video stream, a robot is asked to learn a novel task, e.g. Object State Change Classification (OSCC). To learn the new skill, the robot can access previously gained knowledge regarding different tasks, such as Point of No Return (PNR), Long Term Anticipation (LTA) and Action Recognition (AR), and use it during the learning process to enhance downstream task performance. This knowledge is stored as graphs inside the robot's backpack, always ready to boost a new skill." + ], + "image_footnote": [], + "bbox": [ + 514, + 291, + 883, + 554 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "is still a distant goal for artificial intelligence systems. The challenge arises not only from the requirement of executing multiple tasks with a single architecture, but also from the necessity of being able to abstract and repurpose such knowledge across-tasks, for example to foster and enhance the learning of novel skills. Current research trends in human activity understanding predominantly focus on creating several, hyper-specialised, models. This approach splits the understanding of human activities into distinct skills, with each model being independently trained to rely only on \"task-specific\" clues for prediction [69, 73, 76]. However, this approach disregards the valuable insights that could be gleaned from different task perspectives. A first step in this direction relies on Multi-Task Learning (MTL) to exploit", + "bbox": [ + 496, + 688, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "18275", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "the intuition that knowledge sharing between tasks may improve performance. However, the proposed multi-task models have some limitations [40], mostly concerning a negative transfer between tasks, making it difficult to outperform single-task models. Most importantly, MTL usually assumes the availability of supervision for all tasks at training time, limiting the extension of the models at a later time.", + "bbox": [ + 75, + 90, + 467, + 196 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The recently proposed EgoT2 framework [68] offers a unified solution to integrate various egocentric video tasks. It employs an ensemble of diverse, task-specific models and learns to translate task-specific clues through a transformer-based encoder-decoder to benefit one of the tasks. Although this approach fosters positive interactions between tasks, it has significant limitations: i) the primary task should be \"known\" at training time and present within the task-specific models collection, ii) it necessitates an extensive pretraining process and iii) it lacks a knowledge abstraction, as it relies on task-specific models rather than creating transferable concepts.", + "bbox": [ + 75, + 199, + 467, + 380 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Indeed, we argue that an important key to advance the learning capabilities of intelligent systems and to move a step closer to a generalised reasoning on visual understanding involves not only sharing information across tasks, but also abstracting task-specific knowledge for application in new scenarios. Considering an ensemble of vision tasks, each offers a distinct perspective on the input stream and extracts different types of information. Our goal is to encapsulate this diverse knowledge to be leveraged in the future to positively impact the learning of a novel skill. We focus on egocentric video understanding as it is the perfect harbour to study human activities and synergies between tasks. There is a strong connection between egocentric tasks. For instance, specific actions, like peeling a potato, directly result in a change in the state of the object (the potato in this case), illustrating the interconnected nature of these tasks.", + "bbox": [ + 75, + 382, + 467, + 625 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "All the above considerations motivate us in investigating new alternatives and we propose a novel framework for knowledge abstraction and sharing called EgoPack. Our underlying idea, is to exploit a set of known tasks, each one able to interpret an input stream according to its own perspective, to learn reusable knowledge that can aid the learning process of a novel task. We show this concept in Fig. 1, where a robot is equipped with a backpack that figuratively summarises all the knowledge gained from a set of tasks. To learn a new skill, the robot can \"take-out\" task-related knowledge from the backpack and leverage it within the learning process. The task-specific perspectives are collected in a single pretraining step of a novel multi-task network under the form of prototypes. We exploit a new versatile temporal graph-based architecture shared across all the tasks, with minimal overhead to support each task.", + "bbox": [ + 75, + 626, + 467, + 867 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "When learning a new skill, EgoPack promotes the interaction between the different tasks by learning which rele", + "bbox": [ + 76, + 869, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "vant knowledge to extract from the different perspectives. The architecture of EgoPack is notably flexible, enabling easy adaptation to novel tasks by reusing the previous tasks to facilitate the learning process of any novel task.", + "bbox": [ + 498, + 90, + 890, + 151 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We demonstrate the effectiveness and efficiency of our approach on Ego4D [25], a large-scale egocentric vision dataset. To summarise, our main contributions are:", + "bbox": [ + 498, + 152, + 890, + 196 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. We present a unified architecture to learn multiple egocentric vision tasks with minimal task-specific overhead;", + "2. We introduce EgoPack, a novel approach that leverages different task perspectives to build a robust knowledge abstraction which can foster the learning of a novel task;", + "3. Our approach outperforms both specialised single and multi-task baselines by leveraging the unique synergies and distinct perspectives of different tasks;", + "4. EgoPack achieves competitive performance on Ego4D [25] for all the considered benchmarks, outperforming the state-of-the-art on some." + ], + "bbox": [ + 500, + 196, + 890, + 362 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 500, + 375, + 648, + 388 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Egocentric Vision Egocentric vision captures human activities from the privileged perspective of the camera wearer, allowing a unique point of view on their actions [3, 49]. Recently, the field has seen rapid development thanks to the release of several large-scale egocentric vision datasets [11, 12, 25, 30, 34, 56]. The rich annotations of these datasets [12, 25] allow to tackle a large number of tasks, including action recognition [46], action anticipation [18, 23, 76], next active object prediction [19], action segmentation [33, 73] and episodic memory [52]. Previous works in egocentric vision have focused on domain adaptation [6, 44, 48, 50, 70], multimodal learning [20, 62, 70] and large-scale video-language pretraining [1, 51, 75] to learn better representation for downstream tasks.", + "bbox": [ + 498, + 398, + 890, + 611 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Graph Neural Networks for vision tasks Traditional neural networks, including Convolutional Neural Networks (CNNs), have been widely used in computer vision, showing impressive performance on a variety of problems [26, 39, 41]. However, these models often assume data lying on a regular domain, such as images that have a grid-like structure. In recent years, the interest in developing methods able to provide a more general and powerful type of processing has been growing and particular attention has been given to learning methods on graphs. Graph Neural Networks (GNNs) have the innate ability to effectively handle data that lie on irregular domains, such as 3D data [58, 66], robotics [47], molecular chemistry [37], and social or financial networks [15], and to model complex data relations [55]. Recently, transformer-based architectures had a great impact on vision application. Despite Transformers and GNNs share some similarities in their ability to handle various data types, they are fundamentally different in their", + "bbox": [ + 496, + 628, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "18276", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "core architectures and the specific ways they process data. GNNs can model the topology of a graph and the relations between nodes while also inheriting all the desirable properties of classic convolutions: locality, hierarchical structures and efficient weights reuse. In video understanding GNNs have been applied to action localisation [22, 33, 53, 72], to build a knowledge graph from human actions [21], to model human-object interactions [13, 14] or to build a topological map of the environment [45].", + "bbox": [ + 75, + 90, + 472, + 227 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Multi-Task Learning MTL [5, 74] tackles the problem of learning to solve multiple tasks simultaneously. The development of this strategy is justified by the intuition that complex settings require solving multiple tasks, for instance autonomous driving [31], robotics and natural language processing. Furthermore, these networks can bring the theoretical advantage of sharing complementary information to improve performance. Several works have been done in this direction [7, 8, 10, 17, 31, 32, 40, 57], focusing on which parameters or tasks is better to share [28, 35, 60, 61] and promoting synergies between tasks [36, 65]. Such methods encounter the problem of negative transfer [40] and sharing with unrelated tasks [28, 60] consequently suffering of task competition and not being able to benefit from information sharing between tasks. To overcome these limitations, several methods have been proposed to balance task-related losses [9, 38, 59], to dynamically prioritise tasks [27], to reduce gradient interference between tasks [71] or to exploit task interactions at multiple scales [63]. Unfortunately, all these solutions require extensive task-specific tuning, and are not able to build an holistic perception across tasks. Few works have explored MTL in the field of egocentric vision [32, 36, 65, 68]. Among these, the recently proposed EgoT2 [68] builds an ensemble of diverse, task-specific models. The features of the different models are projected into a common feature space and processed through a transformer-based encoder-decoder to translate the contributions of different tasks and generate predictions for the primary task. Notably, the primary task has to be part of the task-specific models. This approach fosters positive interactions between tasks, resulting in improved performance compared to the single-task models. However, it has some limitations, as it is not able to build knowledge abstractions that can be easily transferred to novel tasks. Instead, we propose a model that can build a robust backpack of task perspectives that can be used in learning any novel tasks.", + "bbox": [ + 75, + 244, + 472, + 789 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 800, + 169, + 814 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We tackle a task cooperation setting, in which an egocentric vision model is able to exploit previously acquired knowledge over task perspectives to foster the learning process of any novel task. We formulate the proposed setting in Sec. 3.1. We present a unified temporal architecture to", + "bbox": [ + 75, + 824, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "model multiple tasks in Sec. 3.2, a key step to enable knowledge sharing between tasks. Finally, Sec. 3.3 presents our novel approach EgoPack to enable efficient transfer of different task perspectives to novel tasks.", + "bbox": [ + 496, + 90, + 890, + 151 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Setting", + "text_level": 1, + "bbox": [ + 498, + 162, + 591, + 178 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A task $\\mathcal{T}$ is associated with a dataset $\\mathcal{D} = \\{(v_i,y_i)\\}_{i = 1}^N$ where $v_{i}$ is a video segment of arbitrary length, $y_{i}$ is the associated ground truth label and $N$ is the number of segments. Our approach follows a two-stages training process. First, a model $\\mathcal{M}$ is trained on a set of $K$ tasks $\\{\\mathcal{T}_0,\\dots ,\\mathcal{T}_K\\}$ , under a Multi-Task Learning framework with hard-parameter sharing [54] to encourage the model to learn more general and task-agnostic representations thanks to the joint supervision of multiple tasks. Then, the model is presented with a new task $\\mathcal{T}_{K + 1}$ to learn, without access to the supervision of the previous tasks. In this scenario, the new task may benefit from potential semantic affinities with the previously seen tasks. For example, a model that has learnt to detect object changes may apply this knowledge for action recognition and vice-versa, as some actions are associated with object changes, e.g. cutting something, while others are not, e.g. moving an object. Our goal is to make these semantic affinities more explicit (and exploitable) so that the new task can learn to repurpose these perspectives from previous tasks to improve performance, a step towards more holistic models that seamlessly share knowledge between tasks.", + "bbox": [ + 496, + 185, + 890, + 518 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. A Unified Architecture for Egocentric Tasks", + "text_level": 1, + "bbox": [ + 498, + 529, + 872, + 545 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The main premise of our method is that different egocentric vision tasks can be modelled using a shared architecture with minimal differences between tasks. Under this assumption, videos can be seen as a sequence of $N$ temporal segments encoded as $\\mathbf{x} = \\{\\mathbf{x}_1,\\mathbf{x}_2,\\dots ,\\mathbf{x}_N\\}$ , where $\\mathbf{x}_i\\in \\mathbb{R}^D$ represents the $D$ -dimensional features of segment $v_{i}$ extracted using some video features extractor $\\mathcal{F}$ , e.g. SlowFast [16] or Omnivore [24]. Such sequence could be interpreted as a temporal graph $\\mathcal{G}(\\mathcal{X},\\mathcal{E})$ , whose nodes $\\mathbf{x}_i\\in \\mathcal{X}$ represent the segments of the video, and edges $e_{ij}\\in \\mathcal{E}$ connect nodes $\\mathbf{x}_i$ and $\\mathbf{x}_j$ with a temporal distance considered relevant when lower than a threshold $\\tau$ . The connectivity of the graph defines the extent of its temporal modelling, i.e. connecting further apart nodes enables longer range temporal understanding which could benefit for example anticipation tasks. The threshold $\\tau$ depends on the task at hand and more implementation details are provided in Sec. 4.1. The temporal position of each node in the sequence is encoded by adding to the node embeddings a positional encoding [64].", + "bbox": [ + 496, + 551, + 890, + 853 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This formulation enables the use of Graph Neural Networks (GNNs) to learn the complex temporal relations between video segments and to cast different egocentric vision", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "18277", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/b2f5e1b612ee56559f47d71736cc2ad955ce3dbb0b93273aa55ae4086be6a0e1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 150, + 78, + 468, + 172 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/785ed7da5144e5810ec295e13bc5c0574be4bd65490bc94fcb4b717051fbba72.jpg", + "image_caption": [ + "Figure 2. Architecture of EgoPack when Object State Change Classification (OSCC) is the novel task. Videos are interpreted as a graph, whose nodes $\\mathbf{x}_i$ represent actions, encoded as features, and edges connect temporally close segments. This representation enables the design of a Unified Temporal Backbone to learn multiple tasks with a shared architecture and minimal Task-Specific Heads, leveraging GNNs for temporal modelling. We exploit this architecture to jointly learn $K$ tasks, e.g. AR, LTA and PNR. After this training process, we extract a set of prototypes $\\mathbf{P}^k$ that summarise what the network has learnt from each task $\\mathcal{T}_k$ , like a backpack of skills that we can carry over. In this Cross-Tasks Interaction phase, the network can peek at these different task-perspective to enrich the learning of the novel task." + ], + "image_footnote": [], + "bbox": [ + 174, + 186, + 419, + 324 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a950485f5d0e64e5819580c243221b5a24c344ea28f7feae7d3246b50880e136.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 78, + 836, + 333 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "tasks as graph prediction tasks, such as node-level or graph-level classification, as shown in Fig. 3. This assumption is reflected in our approach by decomposing the multi-task model $\\mathcal{M}$ into two components: a general temporal backbone $\\mathcal{M}_t:\\mathbb{R}^D\\mapsto \\mathbb{R}^{D_t}$ , and a set of task-specific projection heads $\\mathcal{H}_k:\\mathbb{R}^{D_t}\\mapsto \\mathbb{R}^{D_k}$ mapping the graph and/or the nodes to the features space of task $\\mathcal{T}_k$ with dimension $D_{k}$ as shown in Fig.2. $\\mathcal{M}_t$ is a GNN with $L$ layers that takes as input the temporal sequence $\\mathbf{x}$ and provides as output the updated feature vectors $\\mathbf{f} = \\{\\mathbf{f}_1,\\mathbf{f}_2,\\dots ,\\mathbf{f}_N\\}$ . At layer $l$ , node embeddings are projected and combined with their neighbours, following the GraphSAGE architecture [29]:", + "bbox": [ + 75, + 448, + 472, + 630 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} _ {i} ^ {(l + 1)} = \\mathbf {W} _ {r} ^ {(l)} \\mathbf {f} _ {i} ^ {(l)} + \\mathbf {W} ^ {(l)} \\cdot \\mathbf {g} _ {i} ^ {(l + 1)} + \\mathbf {b} ^ {(l)}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 140, + 638, + 468, + 657 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{f}_i^{(l)}\\in \\mathbb{R}^{D_t^{(l)}}$ are the features of node $\\mathbf{x}_i$ , $\\mathbf{b}^{(l)}\\in \\mathbb{R}^{D_t^{(l + 1)}}$ is a bias term, $\\mathbf{W}_r^{(l)},\\mathbf{W}^{(l)}\\in \\mathbb{R}^{D_t^{(l + 1)}\\times D_t^{(l)}}$ are the weight matrices associated to the root node and the aggregated neighbours' contribution $\\mathbf{g}_i^{(l + 1)}$ respectively. The latter is computed as:", + "bbox": [ + 76, + 666, + 468, + 751 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {g} _ {i} ^ {(l + 1)} = \\underset {\\mathbf {f} _ {i} \\in \\mathcal {N} _ {i}} {\\text {m e a n}} \\left(\\phi \\left(\\mathbf {W} _ {p} ^ {(l)} \\mathbf {f} _ {j} ^ {(l)} + \\mathbf {b} _ {p} ^ {(l)}\\right)\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 140, + 762, + 468, + 789 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{W}_p^{(l)}\\in \\mathbb{R}^{D_t^{(l)}\\times D_t^{(l)}}$ projects the neighbours before the aggregation step, $\\phi$ is a non-linearity, $\\mathbf{b}_p^{(l)}\\in \\mathbb{R}^{D_t^{(l + 1)}}$ is a bias term and $\\mathcal{N}_i$ is the set of neighbours of node $\\mathbf{x}_i$ . Each layer is followed by Layer Normalization [2] and a LeakyReLU activation function. A residual connection around the temporal GNN allows the network to preserve", + "bbox": [ + 75, + 803, + 470, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the original features. Intuitively, the neighbourhood $\\mathcal{N}_i$ reflects the temporal dependencies of the input sequence and the GNN allows to iteratively extend the temporal receptive field of each node.", + "bbox": [ + 498, + 448, + 890, + 508 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Task-specific heads The output features of the temporal backbone $\\mathcal{M}_t$ are shared across the different downstream tasks. To project these features into task-specific components, we add a set of projection heads $\\mathcal{H}_k$ , one for each task $\\mathcal{T}_k$ . For graph classification tasks, the nodes of each graph are aggregated using max pooling to obtain a unique features representation. In each head, a MultiLayer Perceptron outputs the task-specific features $\\mathbf{f}_i^k \\in \\mathbb{R}^{D^k}$ and is followed by a linear layer to compute the task logits $\\mathbf{y}_i^k \\in \\mathbb{R}^{D_o^k}$ where $D_o^k$ is the number of labels for task $\\mathcal{T}_k$ . By limiting the task-specific portion of the network to the heads while sharing the temporal backbone, we can obtain the perspective of all tasks with a single forward through the latter. The network is trained on all the tasks by averaging their losses.", + "bbox": [ + 496, + 542, + 892, + 760 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Learning a novel task with a backpack", + "text_level": 1, + "bbox": [ + 500, + 781, + 831, + 797 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To solve the new task, the naive approach would be to finetune the model, adding a new head $\\mathcal{H}_{K + 1}$ and possibly updating the temporal backbone $\\mathcal{M}_t$ . However, finetuning may not fully leverage the insights from other tasks as it could result in the loss of the previously acquired knowledge, as confirmed experimentally in Sec. 4.2.", + "bbox": [ + 496, + 809, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "18278", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Building the backpack We propose to explicitly model the perspectives of the different tasks as a set of task-specific prototypes that abstract the knowledge gained by the previously seen tasks and can be accessed by novel tasks. We call this approach EgoPack and provide an overview in Fig. 2. These task-specific prototypes are collected from videos annotated for action recognition, as human actions can be seen as the common thread behind the different tasks. Practically, we forward these samples through the temporal backbone and take the output of the different task-specific projection heads, thus encoding the perspective of each task given the same input video. Finally, the features obtained from each task are aggregated according to the verb and noun labels of the action, effectively summarising the perspective of each task given the same input action. The result is a set of prototypes $\\mathbf{P}^k = \\{\\mathbf{p}_0^k,\\mathbf{p}_2^k,\\dots ,\\mathbf{p}_P^k\\} \\in \\mathbb{R}^{P\\times D_k}$ for each task $\\mathcal{T}_k$ , where $P$ is the number of unique (verb, noun) pairs in the dataset and $D_{k}$ is the size of the task-specific features. These prototypes are frozen and represent a \"summary\" of what the models has learnt during the multi-task pretraining process, creating an abstraction of the gained knowledge. They can be then reused when learning novel tasks, like a backpack of skills that the model can carry over.", + "bbox": [ + 76, + 90, + 472, + 454 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Leveraging the backpack During the learning process of the novel task $\\mathcal{T}_{K + 1}$ , the model can exploit the task prototypes obtained via the task-specific heads. As before, the output of the temporal backbone $\\mathbf{f}_i$ is forwarded through all the projection heads to obtain the task-specific features $\\mathbf{f}_i^k$ . These features are used as queries to match the corresponding task prototypes $\\mathbf{P}^k$ , selecting the $k$ -Nearest Neighbours among the prototypes using cosine similarity in the features space. Task features and their neighbouring prototypes form a graph-like structure, on which message passing can be used to enrich the task-specific features $\\mathbf{f}_i^k$ , following an iterative refinement approach. In particular, at each layer $l$ we select the closest prototypes with $k$ -NN and update the features $\\mathbf{f}_i^{(l),k}$ according to the following rule:", + "bbox": [ + 76, + 474, + 472, + 690 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} _ {i} ^ {(l + 1), k} = \\mathbf {W} _ {r} ^ {(l)} \\mathbf {f} _ {i} ^ {(l), k} + \\mathbf {W} ^ {(l)} \\cdot \\max _ {\\mathbf {p} _ {j} ^ {k} \\in \\mathcal {N} _ {i} ^ {(l), k}} \\mathbf {p} _ {j} ^ {k}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 130, + 698, + 468, + 729 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{p}_j^k\\in \\mathcal{N}_i^{(l),k}$ are the closest prototypes in $\\mathbf{P}^k$ to $\\mathbf{f}_i^{(l),k}$ and $\\mathbf{W}_r^{(l)}$ , $\\mathbf{W}^{(l)}\\in \\mathbb{R}^{D^k\\times D^k}$ are the weight matrices associated to the input features and the aggregated neighbours respectively. Notably, only the task features are updated while the task prototypes remain frozen to preserve the original perspectives seen by the network.", + "bbox": [ + 76, + 741, + 468, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this process, the task-specific heads $\\mathcal{H}_k$ are initialised from the multi-task training and possibly updated during the task-specific finetuning process, allowing the model to freely explore the set of task prototypes and to select the", + "bbox": [ + 75, + 839, + 470, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "most informative ones for each input sample. After the interaction phase, the refined features $\\tilde{\\mathbf{f}}_i^k$ are fed to a classifier module to obtain the task logits $\\mathbf{y}_i^k\\in \\mathbb{R}^{D_o^k}$ for each task $\\mathcal{T}_k$ in the backpack. The final prediction is the sum of the pre-softmax logits coming from the different tasks and the output of a new head $\\mathcal{H}_{K + 1}$ for the novel task. Intuitively, we allow each task to cast a vote on the final prediction, based on its perspective on the same video segment. In this phase, the temporal network, the task-specific heads and the weights of the GNNs are trained jointly using only the supervision of the novel task $\\mathcal{T}_{K + 1}$ .", + "bbox": [ + 496, + 90, + 893, + 261 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 498, + 275, + 633, + 292 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We evaluate EgoPack on four Ego4d Human-Object Interaction benchmarks: Action Recognition (AR)1, Long Term Action Anticipation (LTA), Object State Change Classification (OSCC) and Point Of No Return (PNR). We report verb and noun top-1 accuracy for AR, accuracy for OSCC, edit distance for LTA and temporal localisation error (in seconds) for PNR.", + "bbox": [ + 496, + 301, + 893, + 407 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Implementation Details", + "text_level": 1, + "bbox": [ + 498, + 417, + 718, + 434 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use Omnivore Swin-L [24] features pre-trained on Kinetics-400 [4], released as part of Ego4D [25] and extracted using dense sampling over a window of 32 frames with a stride of 16 frames and features size 1536. In principle, EgoPack is agnostic to the underlying features extractor and could adopt other architectures. Following previous works on Ego4D [50] we use TRN [77] to temporally aggregate features from the three sub-segments of each input sample. The mapping between videos of each task and its corresponding temporal graph is task dependent, as shown in Fig. 3:", + "bbox": [ + 496, + 441, + 893, + 608 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Action Recognition (AR): actions are mapped to the nodes of the temporal graph $\\mathcal{G}$ , and edges connect each node to the previous and next (Fig. 3a). To account for the variable length of videos, actions are processed in fixed size windows.", + "- Long Term Anticipation (LTA): each input clip is mapped to a node in $\\mathcal{G}$ . Then, a sequence of new nodes is added to the graph, equivalent in number to the clips to forecast. These nodes are initialised with the mean features of the input clips and are connected to the previous and subsequent nodes in the sequence, as well as to the input clips (Fig. 3b).", + "- Object State Change Classification (OSCC) and Point of No Return (PNR): each input segment is further split into $n$ sub-segments to account for the finer temporal granularity required by these tasks. Each sub-segment is" + ], + "bbox": [ + 500, + 609, + 893, + 852 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "$^{1}$ AR is not an official Ego4D benchmark and was derived from the LTA annotations by [68]. To be consistent with previous works, we use the v1 version of the LTA annotations.", + "bbox": [ + 500, + 862, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "18279", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/388c0eb9864c315baa2798b5c69beb5fc0cd38e346851bb86c754bcb621cc228.jpg", + "image_caption": [ + "(a) Node classification (AR, PNR)" + ], + "image_footnote": [], + "bbox": [ + 112, + 93, + 339, + 165 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4c993074ff57171ad3e42f2b5f9f4f95e8778928bbc940f94ece5c91fc4a9d80.jpg", + "image_caption": [ + "(b) Future node classification (LTA)", + "Figure 3. Egocentric vision tasks as graph prediction tasks. In AR and LTA, each node is an action within a temporal sequence and the objective is to predict the verb and noun labels of the nodes. In OSCC and PNR, nodes represent different temporal segments of the video clip and the goal is to output a global prediction for the entire graph (OSCC) or the individual nodes (PNR)." + ], + "image_footnote": [], + "bbox": [ + 375, + 93, + 604, + 166 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a9b3d2e25d485fd2bce6c57a91464f62adbe3d42ecece4c3b1e1614c2a61cc38.jpg", + "image_caption": [ + "(c) Graph classification (OSCC)" + ], + "image_footnote": [], + "bbox": [ + 653, + 95, + 864, + 162 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "mapped to a node in $\\mathcal{G}$ , and edges connect each node to the previous and next (Fig. 3c).", + "bbox": [ + 89, + 251, + 468, + 280 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Tasks have different annotations and are modelled as separate graphs, even though the temporal model is shared. The task prototypes are built using samples from the train split of the AR dataset. Tasks are trained with standard cross entropy loss, with the exception of PNR which uses binary cross entropy. EgoPack is trained for 30, 40 and 10 epochs for AR, LTA and OSCC/PNR respectively, with a learning rate of $1e - 4$ and $1e - 6$ for AR/LTA and OSCC/PNR respectively using Adam optimiser and batch size 16. All tasks share the same temporal and cross-task interaction architecture, with minimal task-specific hyper-parameter tuning. More details are reported in the supplementary.", + "bbox": [ + 75, + 281, + 470, + 464 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Quantitative results", + "text_level": 1, + "bbox": [ + 76, + 474, + 264, + 489 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We show the main results of EgoPack in Table 1. To assess the validity of our approach, we proceed incrementally starting from single tasks models, i.e. each task is trained separately. In this setting, we compare a simple MLP baseline trained on the temporally aggregated features against our temporal graph methodology, which exhibits superior average performance. The improvement is particularly evident in the PNR task, e.g. from $1.76s$ to $0.61s$ , where the subpar outcomes of the MLP can be attributed to the lack of explicit temporal modelling. In addition to higher performance, the temporal graph enables all the tasks to be modelled using a unified architecture which allows to train all the tasks at the same time (MTL). With the MTL model, we observe a significant drop in average performance, mostly driven by worse accuracy in AR and OSCC. This behaviour is the result of negative transfers between tasks when they are trained together [67].", + "bbox": [ + 75, + 500, + 468, + 756 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Cross-Task Interactions We compare our approach EgoPack for efficient cross task interaction with EgoT2 [68], which learns to combine multiple task-specific frozen models to solve one of them. Unlike EgoPack, the learning process of EgoT2 is divided in two stages, i.e. a pre-training step where each individual task is learned from scratch and a task-specific translation step, where just one task of the collection is fine-tuned. Notably,", + "bbox": [ + 75, + 779, + 467, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "both steps require the supervision of the downstream task. On the contrary, the multi-task pre-training of EgoPack is agnostic to the novel downstream task, potentially allowing to transfer the gained knowledge to any new task. To ensure a fair comparison with EgoPack, we re-implemented the task translation mechanism proposed by EgoT2 on top of our Temporal Graph single task models using Omnivore features. This approach is indicated as Task Translation in Table 1. Additional details on its implementation are provided in the supplementary. One of the main benefit of our approach is that it requires a single forward pass through the features extraction and temporal backbones to obtain the perspectives of the different tasks, unlike EgoT2 which requires a forward pass for each single task model. Notably, we also highlight that EgoPack obtains better or comparable performance even though the backbone used for features extraction was not trained on Ego4D.", + "bbox": [ + 496, + 250, + 890, + 508 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Ablation of the different contributions We summarise the main steps leading to EgoPack in Table 2, using an aggregated metric to capture the overall improvement across the various tasks when compared to the baseline. The metric is computed as an average of the individual task metrics. We adjusted the metrics by taking one minus the score for LTA and PNR, as lower values are preferable, and clipped the PNR localisation error at 1.0 to have the same scale across all the metrics. Temporal modelling alone greatly improves the score compared to the baseline. Although MTL allows to train under a multi-task objective, it clearly underperforms the temporal model due to negative transfers [40]. Task Translation partially recovers this gap on some tasks as shown in Table 1, but overall the aggregated metric is comparable with MTL. We speculate that the marginal improvement of Task Translation compared to MTL lies in the limited task-specific context the former has access to, as it can peek at the different perspectives of the auxiliary tasks only for the input video at hand, rather than looking at the entire knowledge gained by the model. On the contrary, the task prototypes of EgoPack allow to carry a more complete summary of what the models has learnt from which it can extract useful knowledge based on the sample and the task at hand. To validate that the benefits of EgoPack were not brought by", + "bbox": [ + 496, + 537, + 892, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "18280", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/629260494700091afe8154de6b852a3c31b1ef6495bfafa202a8165c7a6b8986.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Trained on frozen featuresAROSCCLTAPNR
Verbs Top-1 (%)Nouns Top-1 (%)Acc. (%)Verbs ED (↓)Nouns ED (↓)Loc. Err. (s) (↓)
Ego4D Baselines [25]X22.1821.5568.220.7460.7890.62
EgoT2s [68]X23.0423.2872.690.7310.7690.61
MLP24.0830.4570.470.7630.7421.76
Temporal Graph24.2530.4371.260.7540.7520.61
Multi-Task Learning22.0529.4471.100.7400.7460.62
Task Translation†23.6828.2871.480.7400.7560.61
EgoPack25.1031.1071.830.7280.7520.61
", + "bbox": [ + 78, + 88, + 893, + 234 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/002291742573c6c79a2b3e4292b6052ded236efe4e4707f141fd6827602c5b4b.jpg", + "table_caption": [ + "Table 1. EgoPack on Ego4D HOI tasks. MLP is a simple baseline consisting of a few linear layers, while Temporal Graph models all tasks using a unified temporal graph-based architecture. MTL [54] uses hard parameter sharing to jointly learn all tasks, which may result in negative transfers. Ego-T2s [68] learns to translate features across tasks to optimise the primary task. EgoPack builds on the unified architecture of the Temporal Graph and learns to exploit the perspective of different tasks for efficient transfers to the novel task. Performances of EgoPack are evaluated over three runs using accuracy for AR and OSCC, Edit Distance for LTA and temporal localisation error for PNR. Our implementation of the task translation mechanism from EgoT2 [68] using Omnivore features." + ], + "table_footnote": [], + "table_body": "
Temp. modelMulti-Task ObjectiveCross-Task InteractionMetrics Average (Δ)
MLPXXX0.416
Temp.XX0.433 (+4.22%)
Task Transl.X0.431 (+3.61%)
MTL0.430 (+3.50%)
MTL+FT0.437 (+5.02%)
EgoPackX0.441 (+6.10%)
", + "bbox": [ + 78, + 344, + 468, + 463 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Ablation of the different contributions in EgoPack, measured according to an aggregated score, computed as the mean of the standardised metrics across tasks.", + "bbox": [ + 75, + 472, + 470, + 513 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "the MTL pre-training alone, we also introduce a $MTL + FT$ baseline where a new task-specific head is finetuned for the novel task, without access to the output of the other heads. The limited performance of this configuration could be explained by the model losing the knowledge learnt during the multi-task learning, without a significant improvement over the single-task baselines, thus only partially reusing the gained knowledge. On the contrary, EgoPack preserves this knowledge in the form of prototypes, which proves to be effective for retaining the model's knowledge when learning a new task.", + "bbox": [ + 75, + 523, + 470, + 689 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Depth of the GNN and the selection of $k$ We observe that EgoPack is quite robust to the number of GNN layers in the interaction stage between the input features and the task prototypes, as shown in Fig. 4. Regarding the selection of the $k$ parameter, we compare the $MTL + FT$ baseline ( $k = 0$ ) with EgoPack. The best performance is achieved at $k = 4$ with a saturating trend afterwards, showing that interacting with a limited number of prototypes is sufficient.", + "bbox": [ + 75, + 704, + 468, + 828 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results on the test set We compare EgoPack on the test set of PNR, OSCC and LTA benchmarks, to validate the improvements and soundness of EgoPack. In this setting, a fair comparison between methods is challenging because", + "bbox": [ + 75, + 839, + 470, + 902 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d85c23916c0ac7b3b660d8d0486042fa357246551d5ca799417a8948cf7ee0f1.jpg", + "image_caption": [ + "Figure 4. Parameter analysis for the cross-tasks interaction module of EgoPack. We analyse the impact on performance of GNN depth and the number of nearest neighbours denoted as $k$ -NN." + ], + "image_footnote": [], + "bbox": [ + 488, + 343, + 867, + 460 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "of the use of different backbones, supervision levels, ensemble strategies and challenge-specific tuning, such as training also on the validation set. Remarkably, we achieve SOTA performances in LTA, outperforming the other methods that finetune the entire backbone, with a more evident benefit in the verbs edit distance. In PNR, we closely match other approaches while the improvement is more limited in the OSCC task. In this task, we notice a relevant impact of the Ego4D pretraining on the performance. We provide a more in-depth description of the differences between these methods in the supplementary materials.", + "bbox": [ + 496, + 516, + 890, + 684 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Qualitative results", + "text_level": 1, + "bbox": [ + 500, + 695, + 678, + 710 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Closest Task Prototypes We evaluate which are the closest task-specific prototypes in Fig. 5. In this example, OSCC is the novel task and the model has access to the prototypes of the learnt tasks. We focus on the prototypes from the AR and PNR tasks and group together nodes that share the same verb label to make the picture more readable. Looking at the number of occurrences of the prototypes, we observe that some nodes are more discriminative to detect a state change, e.g. peel and hold actions are typically associated (peel) or not (hold) with state changes, and therefore show more evident peaks for positive and negative classes, indicating the network is using these clues to solve the task.", + "bbox": [ + 496, + 719, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "18281", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/a0c2aa8f7d439faa9470bfd4743bb6f38e97e2166c025bf3266e999de914314d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PNREgo4D Pt.Loc. Error (s) (↓)
CNN LSTM [25]X0.76
EgoVLP [42]0.67
EgoT2 [68]X0.66
EgoPackX0.66
", + "bbox": [ + 83, + 88, + 460, + 162 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/de2f25a829463a8146789b54d966c70d4c024e3b3b59a0fcbed334d62aa878da.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
OSCCEgo4D Pt.Accuracy (%)
I3D RN-50 [25]X67.6
EgoVLP [42]74.0
EgoT2 (EgoVLP) [68]75.0
EgoT2 (I3D) [68]X71.0
EgoPackX72.1
", + "bbox": [ + 83, + 164, + 460, + 250 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/4124e64d5d8f20e654f0c7ae67fdcc581ef8c15c7df61d594cb753602b021fea.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LTAEgo4D Pt.Verb (↓)Noun (↓)Action (↓)
SlowFast [25]X0.7390.7800.943
EgoT2 [68]X0.7220.7640.935
HierVL [1]0.7240.7350.928
I-CVAE [43]X0.7410.7400.930
EgoPackX0.7210.7350.925
", + "bbox": [ + 83, + 251, + 460, + 339 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3. Comparison of EgoPack on the test set of the Ego4D benchmarks. For a fair comparison, we distinguish between methods pretrained on full Ego4D (✓) and those that have been trained only on the benchmark data $(\\mathcal{X})$ , which includes EgoPack.", + "bbox": [ + 75, + 345, + 468, + 402 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2f17ff001c1c4ec47a1319a176c19e654c30aa954cca0ce9ecfe10e0a281603f.jpg", + "image_caption": [ + "(a) AR Task Prototypes" + ], + "image_footnote": [], + "bbox": [ + 83, + 414, + 431, + 510 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c821f609a776382e3041d9fd57b41189ba631e628bb01fe75e6f84195a72ada0.jpg", + "image_caption": [ + "(b) PNR Task Prototypes", + "Figure 5. Closest nodes to the OSCC samples among AR and PNR task prototypes. Some nodes appear to be more discriminative of the presence or absence of an object state change." + ], + "image_footnote": [], + "bbox": [ + 83, + 531, + 431, + 628 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Confusion matrices In Fig. 6, we compare the confusion matrix of EgoPack with the MLP model for the top-20 largest verb and noun classes in the AR task, grouping the remaining classes in a \"rest\" pseudo-class. Overall, we observe an evident improvement on the noun labels, due to the positive effect of cross-tasks interaction. For example, the network appears to better disambiguate between objects that may appear at the same time in the scene, e.g. \"pants\" and \"cloth\" or \"bottle\" and \"lid\", which we speculate to be the result of a better ability of other tasks, namely OSCC, to identify active objects. Regarding the verbs, we also observe notable improvements, in addition to better recognition of verbs that are the temporal inverse of each other, e.g.", + "bbox": [ + 75, + 703, + 470, + 901 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2c3b3965355a45dbad3becf47a2c4f9e78e02723f18ce7093eb77da2f001e841.jpg", + "image_caption": [ + "(a) Verb (MLP)" + ], + "image_footnote": [], + "bbox": [ + 501, + 84, + 669, + 184 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/322879cd38cbdf568bd80ab18f98c46cd658e83b7a251df651c7ea00b37cb42d.jpg", + "image_caption": [ + "(b) Verb (EgoPack)" + ], + "image_footnote": [], + "bbox": [ + 715, + 85, + 880, + 184 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dcf9cbafae1971c11a58b3bca6956ab8ac72d84ad48e42ee1fae0887394f3542.jpg", + "image_caption": [ + "(c)Noun (MLP)" + ], + "image_footnote": [], + "bbox": [ + 500, + 204, + 676, + 310 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d27174a9056017ffc8d0c894a426a221e6ef9eb6ea592042663eba3a83949b8f.jpg", + "image_caption": [ + "(d) Noun (EgoPack)", + "Figure 6. Action Recognition confusion matrix of EgoPack compared to the MLP baseline for the top-20 verb and noun classes." + ], + "image_footnote": [], + "bbox": [ + 707, + 205, + 883, + 310 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "\"put\" and \"take\" or \"open\" and \"close\", thanks to the improved temporal reasoning of our unified model.", + "bbox": [ + 498, + 377, + 890, + 407 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusions and future work", + "text_level": 1, + "bbox": [ + 500, + 424, + 766, + 439 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We presented EgoPack, a framework that allows knowledge sharing between different egocentric vision tasks, enabling an efficient use of the perspectives that each task can provide. We built EgoPack on top of a unified temporal architecture that can model distinct tasks with a shared backbone and minimal task-specific overhead. EgoPack overcomes the main limitation posed by traditional multi-task learning approaches, namely the unrealistic expectation that supervision is available for all tasks at training time. Indeed, the prototypes mechanism behind EgoPack allows to create a summary of what the model has learnt so far as it abstracts the task-specific knowledge that could be used in novel tasks. The model can then be updated to the any new task, while also peeking at the perspective of the previous tasks. Results on Ego4D validate our approach, showing competitive performance with other methods.", + "bbox": [ + 496, + 450, + 890, + 691 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 708, + 666, + 724 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This study was carried out within the FAIR - Future Artificial Intelligence Research and received funding from the European Union Next-GenerationEU (PIANO NAZIONALE DI RIPRESA E RESILLENZA (PNRR) - MISSIONE 4 COMPONENTE 2, INVESTIMENTO 1.3 - D.D. 1555 11/10/2022, PE00000013). This manuscript reflects only the authors' views and opinions, neither the European Union nor the European Commission can be considered responsible for them. We acknowledge the CINECA award under the ISCRA initiative, for the availability of high performance computing resources and support.", + "bbox": [ + 496, + 734, + 893, + 901 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "18282", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Kumar Ashutosh, Rohit Girdhar, Lorenzo Torresani, and Kristen Grauman. Hiervl: Learning hierarchical videolanguage embeddings. In CVPR, 2023. 2, 8", + "[2] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 4", + "[3] Alejandro Betancourt, Pietro Morerio, Carlo S Regazzoni, and Matthias Rauterberg. The evolution of first person vision methods: A survey. IEEE TCSVT, 2015. 2", + "[4] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In CVPR, 2017. 5", + "[5] Rich Caruana. Multitask learning. Machine learning, 28: 41-75, 1997. 3", + "[6] Min-Hung Chen, Zsolt Kira, Ghassan AlRegib, Jaekwon Yoo, Ruxin Chen, and Jian Zheng. Temporal attentive alignment for large-scale video domain adaptation. In ICCV, 2019. 2", + "[7] Ting Chen, Saurabh Saxena, Lala Li, Tsung-Yi Lin, David J Fleet, and Geoffrey E Hinton. A unified sequence interface for vision tasks. In NeurIPS, 2022. 3", + "[8] Tianlong Chen, Xuxi Chen, Xianzhi Du, Abdullah Rashwan, Fan Yang, Huizhong Chen, Zhangyang Wang, and Yeqing Li. Adamv-moe: Adaptive multi-task vision mixture-of-experts. In ICCV, 2023. 3", + "[9] Zhao Chen, Vijay Badrinarayanan, Chen-Yu Lee, and Andrew Rabinovich. Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks. In ICML, 2018. 3", + "[10] Yuanzheng Ci, Yizhou Wang, Meilin Chen, Shixiang Tang, Lei Bai, Feng Zhu, Rui Zhao, Fengwei Yu, Donglian Qi, and Wanli Ouyang. Unihcp: A unified model for human-centric perceptions. In CVPR, 2023. 3", + "[11] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, and Michael Wray. The epic-kitchens dataset: Collection, challenges and baselines. IEEE TPAMI, 2021. 2", + "[12] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Antonino Furnari, Jian Ma, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, and Michael Wray. Rescaling egocentric vision: Collection, pipeline and challenges for epic-kitchens-100. IJCV, 2022. 2", + "[13] Eadom Dessalene, Michael Maynard, Chinmaya Devaraj, Cornelia Fermuller, and Yiannis Aloimonos. Egocentric object manipulation graphs. arXiv preprint arXiv:2006.03201, 2020. 3", + "[14] Eadem Dessalene, Chinmaya Devaraj, Michael Maynard, Cornelia Fermuller, and Yiannis Aloimonos. Forecasting action through contact representations from first person video. IEEE TPAMI, 2021. 3", + "[15] Wenqi Fan, Yao Ma, Qing Li, Yuan He, Eric Zhao, Jiliang Tang, and Dawei Yin. Graph neural networks for social recommendation. In The world wide web conference, 2019. 2" + ], + "bbox": [ + 78, + 114, + 467, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In ICCV, 2019. 3", + "[17] Chris Fifty, Ehsan Amid, Zhe Zhao, Tianhe Yu, Rohan Anil, and Chelsea Finn. Efficiently identifying task groupings for multi-task learning. In NeurIPS, 2021. 3", + "[18] Antonino Furnari and Giovanni Maria Farinella. Rolling-unrolling lstms for action anticipation from first-person video. IEEE TPAMI, 2020. 2", + "[19] Antonino Furnari, Sebastiano Battiato, Kristen Grauman, and Giovanni Maria Farinella. Next-active-object prediction from egocentric videos. Journal of Visual Communication and Image Representation, 2017. 2", + "[20] Ruohan Gao, Tae-Hyun Oh, Kristen Grauman, and Lorenzo Torresani. Listen to look: Action recognition by previewing audio. In CVPR, 2020. 2", + "[21] Pallabi Ghosh, Nirat Saini, Larry S Davis, and Abhinav Shrivastava. All about knowledge graphs for actions. arXiv preprint arXiv:2008.12432, 2020. 3", + "[22] Pallabi Ghosh, Yi Yao, Larry Davis, and Ajay Divakaran. Stacked spatio-temporal graph convolutional networks for action segmentation. In WACV, 2020. 3", + "[23] Rohit Girdhar and Kristen Grauman. Anticipative video transformer. In ICCV, 2021. 2", + "[24] Rohit Girdhar, Mannat Singh, Nikhila Ravi, Laurens van der Maaten, Armand Joulin, and Ishan Misra. Omnivore: A single model for many visual modalities. In CVPR, 2022. 3, 5", + "[25] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In CVPR, 2022. 2, 5, 7, 8", + "[26] Jieuxiang Gu, Zhenhua Wang, Jason Kuen, Lianyang Ma, Amir Shahroudy, Bing Shuai, Ting Liu, Xingxing Wang, Gang Wang, Jianfei Cai, et al. Recent advances in convolutional neural networks. PR, 2018. 2", + "[27] Michelle Guo, Albert Haque, De-An Huang, Serena Yeung, and Li Fei-Fei. Dynamic task prioritization for multitask learning. In ECCV, 2018. 3", + "[28] Pengsheng Guo, Chen-Yu Lee, and Daniel Ulbricht. Learning to branch for multi-task learning. In ICML, 2020. 3", + "[29] Will Hamilton, Zhitao Ying, and Jure Leskovec. Inductive representation learning on large graphs. In NeurIPS, 2017. 4", + "[30] Dan Witzner Hansen and Qiang Ji. In the eye of the beholder: A survey of models for eyes and gaze. IEEE TPAMI, 2009. 2", + "[31] Thomas E. Huang, Yifan Liu, Luc Van Gool, and Fisher Yu. Video task decathlon: Unifying image and video tasks in autonomous driving. In ICCV, 2023. 3", + "[32] Yifei Huang, Minjie Cai, Zhenqiang Li, Feng Lu, and Yoichi Sato. Mutual context network for jointly estimating egocentric gaze and action. IEEE TIP, 2020. 3", + "[33] Yifei Huang, Yusuke Sugano, and Yoichi Sato. Improving action segmentation via graph-based temporal reasoning. In CVPR, 2020. 2, 3" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "18283", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[34] Youngkyoon Jang, Brian Sullivan, Casimir Ludwig, Iain Gilchrist, Dima Damen, and Walterio Mayol-Cuevas. Epictent: An egocentric video dataset for camping tent assembly. In ICCVW, 2019. 2", + "[35] Zhuoliang Kang, Kristen Grauman, and Fei Sha. Learning with whom to share in multi-task feature learning. In ICML, 2011. 3", + "[36] Georgios Kapidis, Ronald Poppe, Elsbeth van Dam, Lucas Noldus, and Remco Veltkamp. Multitask learning to improve egocentric action recognition. In ICCVW, 2019. 3", + "[37] Steven Kearnes, Kevin McCloskey, Marc Berndl, Vijay Pande, and Patrick Riley. Molecular graph convolutions: moving beyond fingerprints. Journal of computer-aided molecular design, 2016. 2", + "[38] Alex Kendall, Yarin Gal, and Roberto Cipolla. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In CVPR, 2018. 3", + "[39] Asifullah Khan, Anabia Sohail, Umme Zahoora, and Aqsa Saeed Qureshi. A survey of the recent architectures of deep convolutional neural networks. Artificial intelligence review, 2020. 2", + "[40] Iasonas Kokkinos. Ethernet: Training a universal convolutional neural network for low-, mid-, and high-level vision using diverse datasets and limited memory. In CVPR, 2017. 2, 3, 6", + "[41] Zewen Li, Fan Liu, Wenjie Yang, Shouheng Peng, and Jun Zhou. A survey of convolutional neural networks: analysis, applications, and prospects. IEEE transactions on neural networks and learning systems, 2021. 2", + "[42] Kevin Qinghong Lin, Jinping Wang, Mattia Soldan, Michael Wray, Rui Yan, Eric Z XU, Difei Gao, Rong-Cheng Tu, Wenzhe Zhao, Weijie Kong, et al. Egocentric video-language pretraining. In NeurIPS, 2022. 8", + "[43] Esteve Valls Mascaró, Hyemin Ahn, and Dongheui Lee. Intention-conditioned long-term human egocentric action anticipation. In WACV, 2023. 8", + "[44] Jonathan Munro and Dima Damen. Multi-modal domain adaptation for fine-grained action recognition. In CVPR, 2020. 2", + "[45] Tushar Nagarajan, Yanghao Li, Christoph Feichtenhofer, and Kristen Grauman. Ego-topo: Environment affordances from egocentric video. In CVPR, 2020. 3", + "[46] Adrián Núñez-Marcos, Gorka Azkune, and Ignacio Arganda-Carreras. Egocentric vision-based action recognition: A survey. Neurocomputing, 2022. 2", + "[47] Francesca Pistilli and Giuseppe Averta. Graph learning in robotics: a survey. IEEE Access, 2023. 2", + "[48] Mirco Planamente, Chiara Plizzari, Simone Alberto Peirone, Barbara Caputo, and Andrea Bottino. Relative norm alignment for tackling domain shift in deep multi-modal classification. IJCV, 2024. 2", + "[49] Chiara Plizzari, Gabriele Goletto, Antonino Furnari, Siddhant Bansal, Francesco Ragusa, Giovanni Maria Farinella, Dima Damen, and Tatiana Tommasi. An outlook into the future of egocentric vision. arXiv preprint arXiv:2308.07123, 2023. 2" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[50] Chiara Plizzari, Toby Perrett, Barbara Caputo, and Dima Damen. What can a cook in italy teach a mechanic in india? action recognition generalisation over scenarios and locations. In ICCV, 2023. 2, 5", + "[51] Shraman Pramanick, Yale Song, Sayan Nag, Kevin Qinghong Lin, Hardik Shah, Mike Zheng Shou, Rama Chellappa, and Pengchuan Zhang. Egovlpv2: Egocentric video-language pre-training with fusion in the backbone. In ICCV, 2023. 2", + "[52] Santhosh K. Ramakrishnan, Ziad Al-Halah, and Kristen Grauman. Spotem: Efficient video search for episodic memory. In ICLR, 2023. 2", + "[53] Maheen Rashid, Hedvig Kjellstrom, and Yong Jae Lee. Action graphs: Weakly-supervised action localization with graph convolution networks. In WACV, 2020. 3", + "[54] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 3, 7", + "[55] Alvaro Sanchez-Gonzalez, Jonathan Godwin, Tobias Pfaff, Rex Ying, Jure Leskovec, and Peter Battaglia. Learning to simulate complex physics with graph networks. In ICML, 2020. 2", + "[56] Fadime Sener, Dibyadip Chatterjee, Daniel Shelepov, Kun He, Dipika Singhania, Robert Wang, and Angela Yao. Assembly101: A large-scale multi-view video dataset for understanding procedural activities. In CVPR, 2022. 2", + "[57] Haosen Shi, Shen Ren, Tianwei Zhang, and Sinno Jialin Pan. Deep multitask learning with progressive parameter sharing. In ICCV, 2023. 3", + "[58] Martin Simonovsky and Nikos Komodakis. Dynamic edge-conditioned filters in convolutional neural networks on graphs. In CVPR, 2017. 2", + "[59] Ayan Sinha, Zhao Chen, Vijay Badrinarayanan, and Andrew Rabinovich. Gradient adversarial training of neural networks. arXiv preprint arXiv:1806.08028, 2018. 3", + "[60] Trevor Standley, Amir Zamir, Dawn Chen, Leonidas Guibas, Jitendra Malik, and Silvio Savarese. Which tasks should be learned together in multi-task learning? In ICML, 2020. 3", + "[61] Ximeng Sun, Rameswar Panda, Rogerio Feris, and Kate Saenko. Adashare: Learning what to share for efficient deep multi-task learning. In NeurIPS, 2020. 3", + "[62] Zehua Sun, Qiuhong Ke, Hossein Rahmani, Mohammed Bennamoun, Gang Wang, and Jun Liu. Human action recognition from various data modalities: A review. IEEE TPAMI, 2023. 2", + "[63] Simon Vandenhende, Stamatios Georgoulis, and Luc Van Gool. Mti-net: Multi-scale task interaction networks for multi-task learning. In ECCV, 2020. 3", + "[64] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, 2017. 3", + "[65] Xiaohan Wang, Linchao Zhu, Heng Wang, and Yi Yang. Interactive prototype learning for egocentric action recognition. In ICCV, 2021. 3", + "[66] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E Sarma, Michael M Bronstein, and Justin M Solomon. Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics, 2019. 2" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "18284", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[67] Sen Wu, Hongyang R. Zhang, and Christopher Ré. Understanding and improving information transfer in multi-task learning. In ICLR, 2020. 6", + "[68] Zihui Xue, Yale Song, Kristen Grauman, and Lorenzo Torresani. Egocentric video task translation. In CVPR, 2023. 2, 3, 5, 6, 7, 8", + "[69] Shen Yan, Xuehan Xiong, Anurag Arnab, Zhichao Lu, Mi Zhang, Chen Sun, and Cordelia Schmid. Multiview transformers for video recognition. In CVPR, 2022. 1", + "[70] Lijin Yang, Yifei Huang, Yusuke Sugano, and Yoichi Sato. Interact before align: Leveraging cross-modal knowledge for domain adaptive action recognition. In CVPR, 2022. 2", + "[71] Tianhe Yu, Saurabh Kumar, Abhishek Gupta, Sergey Levine, Karol Hausman, and Chelsea Finn. Gradient surgery for multi-task learning. In NeurIPS, 2020. 3", + "[72] Runhao Zeng, Wenbing Huang, Mingkui Tan, Yu Rong, Peilin Zhao, Junzhou Huang, and Chuang Gan. Graph convolutional networks for temporal action localization. In ICCV, 2019. 3", + "[73] Chen-Lin Zhang, Jianxin Wu, and Yin Li. Actionformer: Localizing moments of actions with transformers. In ECCV, 2022. 1, 2", + "[74] Yu Zhang and Qiang Yang. A survey on multi-task learning. IEEE Transactions on Knowledge and Data Engineering, 34 (12):5586-5609, 2021. 3", + "[75] Yue Zhao, Ishan Misra, Philipp Krahenbuhl, and Rohit Girdhar. Learning video representations from large language models. In CVPR, 2023. 2", + "[76] Zeyun Zhong, David Schneider, Michael Voit, Rainer Stiefelhagen, and Jürgen Beyerer. Anticipative feature fusion transformer for multi-modal action anticipation. In WACV, 2023. 1, 2", + "[77] Bolei Zhou, Alex Andonian, Aude Oliva, and Antonio Torralba. Temporal relational reasoning in videos. In ECCV, 2018. 5" + ], + "bbox": [ + 78, + 90, + 468, + 585 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "18285", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/b19cc746-13a8-4ac2-b79c-a1691351681c_model.json b/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/b19cc746-13a8-4ac2-b79c-a1691351681c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..ab7f0698fad27342f819a305a9ebfead969b9d4b --- /dev/null +++ b/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/b19cc746-13a8-4ac2-b79c-a1691351681c_model.json @@ -0,0 +1,2312 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.131, + 0.808, + 0.177 + ], + "angle": 0, + "content": "A Backpack Full of Skills: Egocentric Video Understanding with Diverse Task Perspectives" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.203, + 0.836, + 0.257 + ], + "angle": 0, + "content": "Simone Alberto Peirone1 Francesca Pistilli1 Antonio Alliegro1,2 Giuseppe Averta1 \n1 Politecnico di Torino, 2 Istituto Italiano di Tecnologia \nfirstname.lastname@polito.it" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.292, + 0.314, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.324, + 0.474, + 0.643 + ], + "angle": 0, + "content": "Human comprehension of a video stream is naturally broad: in a few instants, we are able to understand what is happening, the relevance and relationship of objects, and forecast what will follow in the near future, everything all at once. We believe that - to effectively transfer such an holistic perception to intelligent machines - an important role is played by learning to correlate concepts and to abstract knowledge coming from different tasks, to synergistically exploit them when learning novel skills. To accomplish this, we look for a unified approach to video understanding which combines shared temporal modelling of human actions with minimal overhead, to support multiple downstream tasks and enable cooperation when learning novel skills. We then propose EgoPack, a solution that creates a collection of task perspectives that can be carried across downstream tasks and used as a potential source of additional insights, as a backpack of skills that a robot can carry around and use when needed. We demonstrate the effectiveness and efficiency of our approach on four Ego4D benchmarks, outperforming current state-of-the-art methods. Project webpage: sapeirone.github.io/EgoPack." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.664, + 0.21, + 0.68 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Our daily living activities are extremely complex and diverse, nonetheless humans have the extraordinary ability to reason on the behaviour itself in just a few instants from a visual input. We are able to spot what another person is doing, predict their next actions based on current observations, and understand the implications of an activity, for instance whether its effects are reversible. Observing someone in the kitchen by the worktable, where there is a pack of flour and a jug of water, we can identify that they are a chef kneading flour (reasoning about current activity). We can also forecast that the next step will involve mixing the flour with water (reasoning about the future), and finally obtaining dough (reasoning about implications of these actions). This type of holistic reasoning, which is natural for humans," + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.292, + 0.885, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.569, + 0.895, + 0.682 + ], + "angle": 0, + "content": "Figure 1. Given a video stream, a robot is asked to learn a novel task, e.g. Object State Change Classification (OSCC). To learn the new skill, the robot can access previously gained knowledge regarding different tasks, such as Point of No Return (PNR), Long Term Anticipation (LTA) and Action Recognition (AR), and use it during the learning process to enhance downstream task performance. This knowledge is stored as graphs inside the robot's backpack, always ready to boost a new skill." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.895, + 0.903 + ], + "angle": 0, + "content": "is still a distant goal for artificial intelligence systems. The challenge arises not only from the requirement of executing multiple tasks with a single architecture, but also from the necessity of being able to abstract and repurpose such knowledge across-tasks, for example to foster and enhance the learning of novel skills. Current research trends in human activity understanding predominantly focus on creating several, hyper-specialised, models. This approach splits the understanding of human activities into distinct skills, with each model being independently trained to rely only on \"task-specific\" clues for prediction [69, 73, 76]. However, this approach disregards the valuable insights that could be gleaned from different task perspectives. A first step in this direction relies on Multi-Task Learning (MTL) to exploit" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18275" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.468, + 0.198 + ], + "angle": 0, + "content": "the intuition that knowledge sharing between tasks may improve performance. However, the proposed multi-task models have some limitations [40], mostly concerning a negative transfer between tasks, making it difficult to outperform single-task models. Most importantly, MTL usually assumes the availability of supervision for all tasks at training time, limiting the extension of the models at a later time." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.2, + 0.468, + 0.381 + ], + "angle": 0, + "content": "The recently proposed EgoT2 framework [68] offers a unified solution to integrate various egocentric video tasks. It employs an ensemble of diverse, task-specific models and learns to translate task-specific clues through a transformer-based encoder-decoder to benefit one of the tasks. Although this approach fosters positive interactions between tasks, it has significant limitations: i) the primary task should be \"known\" at training time and present within the task-specific models collection, ii) it necessitates an extensive pretraining process and iii) it lacks a knowledge abstraction, as it relies on task-specific models rather than creating transferable concepts." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.383, + 0.468, + 0.625 + ], + "angle": 0, + "content": "Indeed, we argue that an important key to advance the learning capabilities of intelligent systems and to move a step closer to a generalised reasoning on visual understanding involves not only sharing information across tasks, but also abstracting task-specific knowledge for application in new scenarios. Considering an ensemble of vision tasks, each offers a distinct perspective on the input stream and extracts different types of information. Our goal is to encapsulate this diverse knowledge to be leveraged in the future to positively impact the learning of a novel skill. We focus on egocentric video understanding as it is the perfect harbour to study human activities and synergies between tasks. There is a strong connection between egocentric tasks. For instance, specific actions, like peeling a potato, directly result in a change in the state of the object (the potato in this case), illustrating the interconnected nature of these tasks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.627, + 0.468, + 0.868 + ], + "angle": 0, + "content": "All the above considerations motivate us in investigating new alternatives and we propose a novel framework for knowledge abstraction and sharing called EgoPack. Our underlying idea, is to exploit a set of known tasks, each one able to interpret an input stream according to its own perspective, to learn reusable knowledge that can aid the learning process of a novel task. We show this concept in Fig. 1, where a robot is equipped with a backpack that figuratively summarises all the knowledge gained from a set of tasks. To learn a new skill, the robot can \"take-out\" task-related knowledge from the backpack and leverage it within the learning process. The task-specific perspectives are collected in a single pretraining step of a novel multi-task network under the form of prototypes. We exploit a new versatile temporal graph-based architecture shared across all the tasks, with minimal overhead to support each task." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.468, + 0.901 + ], + "angle": 0, + "content": "When learning a new skill, EgoPack promotes the interaction between the different tasks by learning which rele" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.891, + 0.152 + ], + "angle": 0, + "content": "vant knowledge to extract from the different perspectives. The architecture of EgoPack is notably flexible, enabling easy adaptation to novel tasks by reusing the previous tasks to facilitate the learning process of any novel task." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.153, + 0.892, + 0.197 + ], + "angle": 0, + "content": "We demonstrate the effectiveness and efficiency of our approach on Ego4D [25], a large-scale egocentric vision dataset. To summarise, our main contributions are:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.198, + 0.891, + 0.227 + ], + "angle": 0, + "content": "1. We present a unified architecture to learn multiple egocentric vision tasks with minimal task-specific overhead;" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.228, + 0.891, + 0.272 + ], + "angle": 0, + "content": "2. We introduce EgoPack, a novel approach that leverages different task perspectives to build a robust knowledge abstraction which can foster the learning of a novel task;" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.273, + 0.891, + 0.317 + ], + "angle": 0, + "content": "3. Our approach outperforms both specialised single and multi-task baselines by leveraging the unique synergies and distinct perspectives of different tasks;" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.318, + 0.891, + 0.363 + ], + "angle": 0, + "content": "4. EgoPack achieves competitive performance on Ego4D [25] for all the considered benchmarks, outperforming the state-of-the-art on some." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.198, + 0.891, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.375, + 0.649, + 0.39 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.4, + 0.892, + 0.612 + ], + "angle": 0, + "content": "Egocentric Vision Egocentric vision captures human activities from the privileged perspective of the camera wearer, allowing a unique point of view on their actions [3, 49]. Recently, the field has seen rapid development thanks to the release of several large-scale egocentric vision datasets [11, 12, 25, 30, 34, 56]. The rich annotations of these datasets [12, 25] allow to tackle a large number of tasks, including action recognition [46], action anticipation [18, 23, 76], next active object prediction [19], action segmentation [33, 73] and episodic memory [52]. Previous works in egocentric vision have focused on domain adaptation [6, 44, 48, 50, 70], multimodal learning [20, 62, 70] and large-scale video-language pretraining [1, 51, 75] to learn better representation for downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.629, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Graph Neural Networks for vision tasks Traditional neural networks, including Convolutional Neural Networks (CNNs), have been widely used in computer vision, showing impressive performance on a variety of problems [26, 39, 41]. However, these models often assume data lying on a regular domain, such as images that have a grid-like structure. In recent years, the interest in developing methods able to provide a more general and powerful type of processing has been growing and particular attention has been given to learning methods on graphs. Graph Neural Networks (GNNs) have the innate ability to effectively handle data that lie on irregular domains, such as 3D data [58, 66], robotics [47], molecular chemistry [37], and social or financial networks [15], and to model complex data relations [55]. Recently, transformer-based architectures had a great impact on vision application. Despite Transformers and GNNs share some similarities in their ability to handle various data types, they are fundamentally different in their" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18276" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.228 + ], + "angle": 0, + "content": "core architectures and the specific ways they process data. GNNs can model the topology of a graph and the relations between nodes while also inheriting all the desirable properties of classic convolutions: locality, hierarchical structures and efficient weights reuse. In video understanding GNNs have been applied to action localisation [22, 33, 53, 72], to build a knowledge graph from human actions [21], to model human-object interactions [13, 14] or to build a topological map of the environment [45]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.245, + 0.473, + 0.79 + ], + "angle": 0, + "content": "Multi-Task Learning MTL [5, 74] tackles the problem of learning to solve multiple tasks simultaneously. The development of this strategy is justified by the intuition that complex settings require solving multiple tasks, for instance autonomous driving [31], robotics and natural language processing. Furthermore, these networks can bring the theoretical advantage of sharing complementary information to improve performance. Several works have been done in this direction [7, 8, 10, 17, 31, 32, 40, 57], focusing on which parameters or tasks is better to share [28, 35, 60, 61] and promoting synergies between tasks [36, 65]. Such methods encounter the problem of negative transfer [40] and sharing with unrelated tasks [28, 60] consequently suffering of task competition and not being able to benefit from information sharing between tasks. To overcome these limitations, several methods have been proposed to balance task-related losses [9, 38, 59], to dynamically prioritise tasks [27], to reduce gradient interference between tasks [71] or to exploit task interactions at multiple scales [63]. Unfortunately, all these solutions require extensive task-specific tuning, and are not able to build an holistic perception across tasks. Few works have explored MTL in the field of egocentric vision [32, 36, 65, 68]. Among these, the recently proposed EgoT2 [68] builds an ensemble of diverse, task-specific models. The features of the different models are projected into a common feature space and processed through a transformer-based encoder-decoder to translate the contributions of different tasks and generate predictions for the primary task. Notably, the primary task has to be part of the task-specific models. This approach fosters positive interactions between tasks, resulting in improved performance compared to the single-task models. However, it has some limitations, as it is not able to build knowledge abstractions that can be easily transferred to novel tasks. Instead, we propose a model that can build a robust backpack of task perspectives that can be used in learning any novel tasks." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.801, + 0.17, + 0.815 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.902 + ], + "angle": 0, + "content": "We tackle a task cooperation setting, in which an egocentric vision model is able to exploit previously acquired knowledge over task perspectives to foster the learning process of any novel task. We formulate the proposed setting in Sec. 3.1. We present a unified temporal architecture to" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.152 + ], + "angle": 0, + "content": "model multiple tasks in Sec. 3.2, a key step to enable knowledge sharing between tasks. Finally, Sec. 3.3 presents our novel approach EgoPack to enable efficient transfer of different task perspectives to novel tasks." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.163, + 0.593, + 0.179 + ], + "angle": 0, + "content": "3.1. Setting" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.186, + 0.892, + 0.519 + ], + "angle": 0, + "content": "A task \\(\\mathcal{T}\\) is associated with a dataset \\(\\mathcal{D} = \\{(v_i,y_i)\\}_{i = 1}^N\\) where \\(v_{i}\\) is a video segment of arbitrary length, \\(y_{i}\\) is the associated ground truth label and \\(N\\) is the number of segments. Our approach follows a two-stages training process. First, a model \\(\\mathcal{M}\\) is trained on a set of \\(K\\) tasks \\(\\{\\mathcal{T}_0,\\dots ,\\mathcal{T}_K\\}\\), under a Multi-Task Learning framework with hard-parameter sharing [54] to encourage the model to learn more general and task-agnostic representations thanks to the joint supervision of multiple tasks. Then, the model is presented with a new task \\(\\mathcal{T}_{K + 1}\\) to learn, without access to the supervision of the previous tasks. In this scenario, the new task may benefit from potential semantic affinities with the previously seen tasks. For example, a model that has learnt to detect object changes may apply this knowledge for action recognition and vice-versa, as some actions are associated with object changes, e.g. cutting something, while others are not, e.g. moving an object. Our goal is to make these semantic affinities more explicit (and exploitable) so that the new task can learn to repurpose these perspectives from previous tasks to improve performance, a step towards more holistic models that seamlessly share knowledge between tasks." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.53, + 0.874, + 0.546 + ], + "angle": 0, + "content": "3.2. A Unified Architecture for Egocentric Tasks" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.553, + 0.892, + 0.854 + ], + "angle": 0, + "content": "The main premise of our method is that different egocentric vision tasks can be modelled using a shared architecture with minimal differences between tasks. Under this assumption, videos can be seen as a sequence of \\(N\\) temporal segments encoded as \\(\\mathbf{x} = \\{\\mathbf{x}_1,\\mathbf{x}_2,\\dots ,\\mathbf{x}_N\\}\\), where \\(\\mathbf{x}_i\\in \\mathbb{R}^D\\) represents the \\(D\\)-dimensional features of segment \\(v_{i}\\) extracted using some video features extractor \\(\\mathcal{F}\\), e.g. SlowFast [16] or Omnivore [24]. Such sequence could be interpreted as a temporal graph \\(\\mathcal{G}(\\mathcal{X},\\mathcal{E})\\), whose nodes \\(\\mathbf{x}_i\\in \\mathcal{X}\\) represent the segments of the video, and edges \\(e_{ij}\\in \\mathcal{E}\\) connect nodes \\(\\mathbf{x}_i\\) and \\(\\mathbf{x}_j\\) with a temporal distance considered relevant when lower than a threshold \\(\\tau\\). The connectivity of the graph defines the extent of its temporal modelling, i.e. connecting further apart nodes enables longer range temporal understanding which could benefit for example anticipation tasks. The threshold \\(\\tau\\) depends on the task at hand and more implementation details are provided in Sec. 4.1. The temporal position of each node in the sequence is encoded by adding to the node embeddings a positional encoding [64]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "This formulation enables the use of Graph Neural Networks (GNNs) to learn the complex temporal relations between video segments and to cast different egocentric vision" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18277" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.151, + 0.079, + 0.47, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.187, + 0.42, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.079, + 0.837, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.348, + 0.892, + 0.432 + ], + "angle": 0, + "content": "Figure 2. Architecture of EgoPack when Object State Change Classification (OSCC) is the novel task. Videos are interpreted as a graph, whose nodes \\(\\mathbf{x}_i\\) represent actions, encoded as features, and edges connect temporally close segments. This representation enables the design of a Unified Temporal Backbone to learn multiple tasks with a shared architecture and minimal Task-Specific Heads, leveraging GNNs for temporal modelling. We exploit this architecture to jointly learn \\(K\\) tasks, e.g. AR, LTA and PNR. After this training process, we extract a set of prototypes \\(\\mathbf{P}^k\\) that summarise what the network has learnt from each task \\(\\mathcal{T}_k\\), like a backpack of skills that we can carry over. In this Cross-Tasks Interaction phase, the network can peek at these different task-perspective to enrich the learning of the novel task." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.449, + 0.473, + 0.631 + ], + "angle": 0, + "content": "tasks as graph prediction tasks, such as node-level or graph-level classification, as shown in Fig. 3. This assumption is reflected in our approach by decomposing the multi-task model \\(\\mathcal{M}\\) into two components: a general temporal backbone \\(\\mathcal{M}_t:\\mathbb{R}^D\\mapsto \\mathbb{R}^{D_t}\\), and a set of task-specific projection heads \\(\\mathcal{H}_k:\\mathbb{R}^{D_t}\\mapsto \\mathbb{R}^{D_k}\\) mapping the graph and/or the nodes to the features space of task \\(\\mathcal{T}_k\\) with dimension \\(D_{k}\\) as shown in Fig.2. \\(\\mathcal{M}_t\\) is a GNN with \\(L\\) layers that takes as input the temporal sequence \\(\\mathbf{x}\\) and provides as output the updated feature vectors \\(\\mathbf{f} = \\{\\mathbf{f}_1,\\mathbf{f}_2,\\dots ,\\mathbf{f}_N\\}\\). At layer \\(l\\), node embeddings are projected and combined with their neighbours, following the GraphSAGE architecture [29]:" + }, + { + "type": "equation", + "bbox": [ + 0.141, + 0.639, + 0.47, + 0.658 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} _ {i} ^ {(l + 1)} = \\mathbf {W} _ {r} ^ {(l)} \\mathbf {f} _ {i} ^ {(l)} + \\mathbf {W} ^ {(l)} \\cdot \\mathbf {g} _ {i} ^ {(l + 1)} + \\mathbf {b} ^ {(l)}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.667, + 0.47, + 0.752 + ], + "angle": 0, + "content": "where \\(\\mathbf{f}_i^{(l)}\\in \\mathbb{R}^{D_t^{(l)}}\\) are the features of node \\(\\mathbf{x}_i\\), \\(\\mathbf{b}^{(l)}\\in \\mathbb{R}^{D_t^{(l + 1)}}\\) is a bias term, \\(\\mathbf{W}_r^{(l)},\\mathbf{W}^{(l)}\\in \\mathbb{R}^{D_t^{(l + 1)}\\times D_t^{(l)}}\\) are the weight matrices associated to the root node and the aggregated neighbours' contribution \\(\\mathbf{g}_i^{(l + 1)}\\) respectively. The latter is computed as:" + }, + { + "type": "equation", + "bbox": [ + 0.141, + 0.763, + 0.469, + 0.79 + ], + "angle": 0, + "content": "\\[\n\\mathbf {g} _ {i} ^ {(l + 1)} = \\underset {\\mathbf {f} _ {i} \\in \\mathcal {N} _ {i}} {\\text {m e a n}} \\left(\\phi \\left(\\mathbf {W} _ {p} ^ {(l)} \\mathbf {f} _ {j} ^ {(l)} + \\mathbf {b} _ {p} ^ {(l)}\\right)\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.804, + 0.471, + 0.901 + ], + "angle": 0, + "content": "where \\(\\mathbf{W}_p^{(l)}\\in \\mathbb{R}^{D_t^{(l)}\\times D_t^{(l)}}\\) projects the neighbours before the aggregation step, \\(\\phi\\) is a non-linearity, \\(\\mathbf{b}_p^{(l)}\\in \\mathbb{R}^{D_t^{(l + 1)}}\\) is a bias term and \\(\\mathcal{N}_i\\) is the set of neighbours of node \\(\\mathbf{x}_i\\). Each layer is followed by Layer Normalization [2] and a LeakyReLU activation function. A residual connection around the temporal GNN allows the network to preserve" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.449, + 0.892, + 0.509 + ], + "angle": 0, + "content": "the original features. Intuitively, the neighbourhood \\(\\mathcal{N}_i\\) reflects the temporal dependencies of the input sequence and the GNN allows to iteratively extend the temporal receptive field of each node." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.544, + 0.893, + 0.761 + ], + "angle": 0, + "content": "Task-specific heads The output features of the temporal backbone \\(\\mathcal{M}_t\\) are shared across the different downstream tasks. To project these features into task-specific components, we add a set of projection heads \\(\\mathcal{H}_k\\), one for each task \\(\\mathcal{T}_k\\). For graph classification tasks, the nodes of each graph are aggregated using max pooling to obtain a unique features representation. In each head, a MultiLayer Perceptron outputs the task-specific features \\(\\mathbf{f}_i^k \\in \\mathbb{R}^{D^k}\\) and is followed by a linear layer to compute the task logits \\(\\mathbf{y}_i^k \\in \\mathbb{R}^{D_o^k}\\) where \\(D_o^k\\) is the number of labels for task \\(\\mathcal{T}_k\\). By limiting the task-specific portion of the network to the heads while sharing the temporal backbone, we can obtain the perspective of all tasks with a single forward through the latter. The network is trained on all the tasks by averaging their losses." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.782, + 0.833, + 0.799 + ], + "angle": 0, + "content": "3.3. Learning a novel task with a backpack" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.892, + 0.901 + ], + "angle": 0, + "content": "To solve the new task, the naive approach would be to finetune the model, adding a new head \\(\\mathcal{H}_{K + 1}\\) and possibly updating the temporal backbone \\(\\mathcal{M}_t\\). However, finetuning may not fully leverage the insights from other tasks as it could result in the loss of the previously acquired knowledge, as confirmed experimentally in Sec. 4.2." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18278" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.455 + ], + "angle": 0, + "content": "Building the backpack We propose to explicitly model the perspectives of the different tasks as a set of task-specific prototypes that abstract the knowledge gained by the previously seen tasks and can be accessed by novel tasks. We call this approach EgoPack and provide an overview in Fig. 2. These task-specific prototypes are collected from videos annotated for action recognition, as human actions can be seen as the common thread behind the different tasks. Practically, we forward these samples through the temporal backbone and take the output of the different task-specific projection heads, thus encoding the perspective of each task given the same input video. Finally, the features obtained from each task are aggregated according to the verb and noun labels of the action, effectively summarising the perspective of each task given the same input action. The result is a set of prototypes \\(\\mathbf{P}^k = \\{\\mathbf{p}_0^k,\\mathbf{p}_2^k,\\dots ,\\mathbf{p}_P^k\\} \\in \\mathbb{R}^{P\\times D_k}\\) for each task \\(\\mathcal{T}_k\\), where \\(P\\) is the number of unique (verb, noun) pairs in the dataset and \\(D_{k}\\) is the size of the task-specific features. These prototypes are frozen and represent a \"summary\" of what the models has learnt during the multi-task pretraining process, creating an abstraction of the gained knowledge. They can be then reused when learning novel tasks, like a backpack of skills that the model can carry over." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.476, + 0.473, + 0.691 + ], + "angle": 0, + "content": "Leveraging the backpack During the learning process of the novel task \\(\\mathcal{T}_{K + 1}\\), the model can exploit the task prototypes obtained via the task-specific heads. As before, the output of the temporal backbone \\(\\mathbf{f}_i\\) is forwarded through all the projection heads to obtain the task-specific features \\(\\mathbf{f}_i^k\\). These features are used as queries to match the corresponding task prototypes \\(\\mathbf{P}^k\\), selecting the \\(k\\)-Nearest Neighbours among the prototypes using cosine similarity in the features space. Task features and their neighbouring prototypes form a graph-like structure, on which message passing can be used to enrich the task-specific features \\(\\mathbf{f}_i^k\\), following an iterative refinement approach. In particular, at each layer \\(l\\) we select the closest prototypes with \\(k\\)-NN and update the features \\(\\mathbf{f}_i^{(l),k}\\) according to the following rule:" + }, + { + "type": "equation", + "bbox": [ + 0.132, + 0.699, + 0.47, + 0.731 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} _ {i} ^ {(l + 1), k} = \\mathbf {W} _ {r} ^ {(l)} \\mathbf {f} _ {i} ^ {(l), k} + \\mathbf {W} ^ {(l)} \\cdot \\max _ {\\mathbf {p} _ {j} ^ {k} \\in \\mathcal {N} _ {i} ^ {(l), k}} \\mathbf {p} _ {j} ^ {k}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.742, + 0.47, + 0.84 + ], + "angle": 0, + "content": "where \\(\\mathbf{p}_j^k\\in \\mathcal{N}_i^{(l),k}\\) are the closest prototypes in \\(\\mathbf{P}^k\\) to \\(\\mathbf{f}_i^{(l),k}\\) and \\(\\mathbf{W}_r^{(l)}\\), \\(\\mathbf{W}^{(l)}\\in \\mathbb{R}^{D^k\\times D^k}\\) are the weight matrices associated to the input features and the aggregated neighbours respectively. Notably, only the task features are updated while the task prototypes remain frozen to preserve the original perspectives seen by the network." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.902 + ], + "angle": 0, + "content": "In this process, the task-specific heads \\(\\mathcal{H}_k\\) are initialised from the multi-task training and possibly updated during the task-specific finetuning process, allowing the model to freely explore the set of task prototypes and to select the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.262 + ], + "angle": 0, + "content": "most informative ones for each input sample. After the interaction phase, the refined features \\(\\tilde{\\mathbf{f}}_i^k\\) are fed to a classifier module to obtain the task logits \\(\\mathbf{y}_i^k\\in \\mathbb{R}^{D_o^k}\\) for each task \\(\\mathcal{T}_k\\) in the backpack. The final prediction is the sum of the pre-softmax logits coming from the different tasks and the output of a new head \\(\\mathcal{H}_{K + 1}\\) for the novel task. Intuitively, we allow each task to cast a vote on the final prediction, based on its perspective on the same video segment. In this phase, the temporal network, the task-specific heads and the weights of the GNNs are trained jointly using only the supervision of the novel task \\(\\mathcal{T}_{K + 1}\\)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.276, + 0.634, + 0.293 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.302, + 0.895, + 0.409 + ], + "angle": 0, + "content": "We evaluate EgoPack on four Ego4d Human-Object Interaction benchmarks: Action Recognition (AR)1, Long Term Action Anticipation (LTA), Object State Change Classification (OSCC) and Point Of No Return (PNR). We report verb and noun top-1 accuracy for AR, accuracy for OSCC, edit distance for LTA and temporal localisation error (in seconds) for PNR." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.419, + 0.719, + 0.435 + ], + "angle": 0, + "content": "4.1. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.443, + 0.895, + 0.609 + ], + "angle": 0, + "content": "We use Omnivore Swin-L [24] features pre-trained on Kinetics-400 [4], released as part of Ego4D [25] and extracted using dense sampling over a window of 32 frames with a stride of 16 frames and features size 1536. In principle, EgoPack is agnostic to the underlying features extractor and could adopt other architectures. Following previous works on Ego4D [50] we use TRN [77] to temporally aggregate features from the three sub-segments of each input sample. The mapping between videos of each task and its corresponding temporal graph is task dependent, as shown in Fig. 3:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.61, + 0.894, + 0.685 + ], + "angle": 0, + "content": "- Action Recognition (AR): actions are mapped to the nodes of the temporal graph \\(\\mathcal{G}\\), and edges connect each node to the previous and next (Fig. 3a). To account for the variable length of videos, actions are processed in fixed size windows." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.686, + 0.895, + 0.79 + ], + "angle": 0, + "content": "- Long Term Anticipation (LTA): each input clip is mapped to a node in \\(\\mathcal{G}\\). Then, a sequence of new nodes is added to the graph, equivalent in number to the clips to forecast. These nodes are initialised with the mean features of the input clips and are connected to the previous and subsequent nodes in the sequence, as well as to the input clips (Fig. 3b)." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.791, + 0.895, + 0.853 + ], + "angle": 0, + "content": "- Object State Change Classification (OSCC) and Point of No Return (PNR): each input segment is further split into \\( n \\) sub-segments to account for the finer temporal granularity required by these tasks. Each sub-segment is" + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.61, + 0.895, + 0.853 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.863, + 0.892, + 0.901 + ], + "angle": 0, + "content": "\\(^{1}\\)AR is not an official Ego4D benchmark and was derived from the LTA annotations by [68]. To be consistent with previous works, we use the v1 version of the LTA annotations." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "18279" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.114, + 0.094, + 0.341, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.134, + 0.171, + 0.316, + 0.183 + ], + "angle": 0, + "content": "(a) Node classification (AR, PNR)" + }, + { + "type": "image", + "bbox": [ + 0.377, + 0.094, + 0.605, + 0.167 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.4, + 0.171, + 0.59, + 0.183 + ], + "angle": 0, + "content": "(b) Future node classification (LTA)" + }, + { + "type": "image", + "bbox": [ + 0.654, + 0.097, + 0.865, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.679, + 0.171, + 0.849, + 0.183 + ], + "angle": 0, + "content": "(c) Graph classification (OSCC)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.195, + 0.894, + 0.239 + ], + "angle": 0, + "content": "Figure 3. Egocentric vision tasks as graph prediction tasks. In AR and LTA, each node is an action within a temporal sequence and the objective is to predict the verb and noun labels of the nodes. In OSCC and PNR, nodes represent different temporal segments of the video clip and the goal is to output a global prediction for the entire graph (OSCC) or the individual nodes (PNR)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.252, + 0.469, + 0.281 + ], + "angle": 0, + "content": "mapped to a node in \\(\\mathcal{G}\\), and edges connect each node to the previous and next (Fig. 3c)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.282, + 0.471, + 0.465 + ], + "angle": 0, + "content": "Tasks have different annotations and are modelled as separate graphs, even though the temporal model is shared. The task prototypes are built using samples from the train split of the AR dataset. Tasks are trained with standard cross entropy loss, with the exception of PNR which uses binary cross entropy. EgoPack is trained for 30, 40 and 10 epochs for AR, LTA and OSCC/PNR respectively, with a learning rate of \\(1e - 4\\) and \\(1e - 6\\) for AR/LTA and OSCC/PNR respectively using Adam optimiser and batch size 16. All tasks share the same temporal and cross-task interaction architecture, with minimal task-specific hyper-parameter tuning. More details are reported in the supplementary." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.476, + 0.266, + 0.491 + ], + "angle": 0, + "content": "4.2. Quantitative results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.5, + 0.47, + 0.757 + ], + "angle": 0, + "content": "We show the main results of EgoPack in Table 1. To assess the validity of our approach, we proceed incrementally starting from single tasks models, i.e. each task is trained separately. In this setting, we compare a simple MLP baseline trained on the temporally aggregated features against our temporal graph methodology, which exhibits superior average performance. The improvement is particularly evident in the PNR task, e.g. from \\(1.76s\\) to \\(0.61s\\), where the subpar outcomes of the MLP can be attributed to the lack of explicit temporal modelling. In addition to higher performance, the temporal graph enables all the tasks to be modelled using a unified architecture which allows to train all the tasks at the same time (MTL). With the MTL model, we observe a significant drop in average performance, mostly driven by worse accuracy in AR and OSCC. This behaviour is the result of negative transfers between tasks when they are trained together [67]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Cross-Task Interactions We compare our approach EgoPack for efficient cross task interaction with EgoT2 [68], which learns to combine multiple task-specific frozen models to solve one of them. Unlike EgoPack, the learning process of EgoT2 is divided in two stages, i.e. a pre-training step where each individual task is learned from scratch and a task-specific translation step, where just one task of the collection is fine-tuned. Notably," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.251, + 0.892, + 0.509 + ], + "angle": 0, + "content": "both steps require the supervision of the downstream task. On the contrary, the multi-task pre-training of EgoPack is agnostic to the novel downstream task, potentially allowing to transfer the gained knowledge to any new task. To ensure a fair comparison with EgoPack, we re-implemented the task translation mechanism proposed by EgoT2 on top of our Temporal Graph single task models using Omnivore features. This approach is indicated as Task Translation in Table 1. Additional details on its implementation are provided in the supplementary. One of the main benefit of our approach is that it requires a single forward pass through the features extraction and temporal backbones to obtain the perspectives of the different tasks, unlike EgoT2 which requires a forward pass for each single task model. Notably, we also highlight that EgoPack obtains better or comparable performance even though the backbone used for features extraction was not trained on Ego4D." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.539, + 0.893, + 0.903 + ], + "angle": 0, + "content": "Ablation of the different contributions We summarise the main steps leading to EgoPack in Table 2, using an aggregated metric to capture the overall improvement across the various tasks when compared to the baseline. The metric is computed as an average of the individual task metrics. We adjusted the metrics by taking one minus the score for LTA and PNR, as lower values are preferable, and clipped the PNR localisation error at 1.0 to have the same scale across all the metrics. Temporal modelling alone greatly improves the score compared to the baseline. Although MTL allows to train under a multi-task objective, it clearly underperforms the temporal model due to negative transfers [40]. Task Translation partially recovers this gap on some tasks as shown in Table 1, but overall the aggregated metric is comparable with MTL. We speculate that the marginal improvement of Task Translation compared to MTL lies in the limited task-specific context the former has access to, as it can peek at the different perspectives of the auxiliary tasks only for the input video at hand, rather than looking at the entire knowledge gained by the model. On the contrary, the task prototypes of EgoPack allow to carry a more complete summary of what the models has learnt from which it can extract useful knowledge based on the sample and the task at hand. To validate that the benefits of EgoPack were not brought by" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18280" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.079, + 0.089, + 0.894, + 0.235 + ], + "angle": 0, + "content": "
Trained on frozen featuresAROSCCLTAPNR
Verbs Top-1 (%)Nouns Top-1 (%)Acc. (%)Verbs ED (↓)Nouns ED (↓)Loc. Err. (s) (↓)
Ego4D Baselines [25]X22.1821.5568.220.7460.7890.62
EgoT2s [68]X23.0423.2872.690.7310.7690.61
MLP24.0830.4570.470.7630.7421.76
Temporal Graph24.2530.4371.260.7540.7520.61
Multi-Task Learning22.0529.4471.100.7400.7460.62
Task Translation†23.6828.2871.480.7400.7560.61
EgoPack25.1031.1071.830.7280.7520.61
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.245, + 0.895, + 0.331 + ], + "angle": 0, + "content": "Table 1. EgoPack on Ego4D HOI tasks. MLP is a simple baseline consisting of a few linear layers, while Temporal Graph models all tasks using a unified temporal graph-based architecture. MTL [54] uses hard parameter sharing to jointly learn all tasks, which may result in negative transfers. Ego-T2s [68] learns to translate features across tasks to optimise the primary task. EgoPack builds on the unified architecture of the Temporal Graph and learns to exploit the perspective of different tasks for efficient transfers to the novel task. Performances of EgoPack are evaluated over three runs using accuracy for AR and OSCC, Edit Distance for LTA and temporal localisation error for PNR. Our implementation of the task translation mechanism from EgoT2 [68] using Omnivore features." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.345, + 0.47, + 0.464 + ], + "angle": 0, + "content": "
Temp. modelMulti-Task ObjectiveCross-Task InteractionMetrics Average (Δ)
MLPXXX0.416
Temp.XX0.433 (+4.22%)
Task Transl.X0.431 (+3.61%)
MTL0.430 (+3.50%)
MTL+FT0.437 (+5.02%)
EgoPackX0.441 (+6.10%)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.473, + 0.471, + 0.515 + ], + "angle": 0, + "content": "Table 2. Ablation of the different contributions in EgoPack, measured according to an aggregated score, computed as the mean of the standardised metrics across tasks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.524, + 0.471, + 0.69 + ], + "angle": 0, + "content": "the MTL pre-training alone, we also introduce a \\(MTL + FT\\) baseline where a new task-specific head is finetuned for the novel task, without access to the output of the other heads. The limited performance of this configuration could be explained by the model losing the knowledge learnt during the multi-task learning, without a significant improvement over the single-task baselines, thus only partially reusing the gained knowledge. On the contrary, EgoPack preserves this knowledge in the form of prototypes, which proves to be effective for retaining the model's knowledge when learning a new task." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.47, + 0.829 + ], + "angle": 0, + "content": "Depth of the GNN and the selection of \\( k \\) We observe that EgoPack is quite robust to the number of GNN layers in the interaction stage between the input features and the task prototypes, as shown in Fig. 4. Regarding the selection of the \\( k \\) parameter, we compare the \\( MTL + FT \\) baseline (\\( k = 0 \\)) with EgoPack. The best performance is achieved at \\( k = 4 \\) with a saturating trend afterwards, showing that interacting with a limited number of prototypes is sufficient." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Results on the test set We compare EgoPack on the test set of PNR, OSCC and LTA benchmarks, to validate the improvements and soundness of EgoPack. In this setting, a fair comparison between methods is challenging because" + }, + { + "type": "image", + "bbox": [ + 0.49, + 0.344, + 0.868, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.465, + 0.892, + 0.507 + ], + "angle": 0, + "content": "Figure 4. Parameter analysis for the cross-tasks interaction module of EgoPack. We analyse the impact on performance of GNN depth and the number of nearest neighbours denoted as \\( k \\)-NN." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.517, + 0.892, + 0.685 + ], + "angle": 0, + "content": "of the use of different backbones, supervision levels, ensemble strategies and challenge-specific tuning, such as training also on the validation set. Remarkably, we achieve SOTA performances in LTA, outperforming the other methods that finetune the entire backbone, with a more evident benefit in the verbs edit distance. In PNR, we closely match other approaches while the improvement is more limited in the OSCC task. In this task, we notice a relevant impact of the Ego4D pretraining on the performance. We provide a more in-depth description of the differences between these methods in the supplementary materials." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.696, + 0.679, + 0.712 + ], + "angle": 0, + "content": "4.3. Qualitative results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Closest Task Prototypes We evaluate which are the closest task-specific prototypes in Fig. 5. In this example, OSCC is the novel task and the model has access to the prototypes of the learnt tasks. We focus on the prototypes from the AR and PNR tasks and group together nodes that share the same verb label to make the picture more readable. Looking at the number of occurrences of the prototypes, we observe that some nodes are more discriminative to detect a state change, e.g. peel and hold actions are typically associated (peel) or not (hold) with state changes, and therefore show more evident peaks for positive and negative classes, indicating the network is using these clues to solve the task." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "18281" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.084, + 0.089, + 0.462, + 0.164 + ], + "angle": 0, + "content": "
PNREgo4D Pt.Loc. Error (s) (↓)
CNN LSTM [25]X0.76
EgoVLP [42]0.67
EgoT2 [68]X0.66
EgoPackX0.66
" + }, + { + "type": "table", + "bbox": [ + 0.084, + 0.165, + 0.462, + 0.25 + ], + "angle": 0, + "content": "
OSCCEgo4D Pt.Accuracy (%)
I3D RN-50 [25]X67.6
EgoVLP [42]74.0
EgoT2 (EgoVLP) [68]75.0
EgoT2 (I3D) [68]X71.0
EgoPackX72.1
" + }, + { + "type": "table", + "bbox": [ + 0.084, + 0.252, + 0.462, + 0.34 + ], + "angle": 0, + "content": "
LTAEgo4D Pt.Verb (↓)Noun (↓)Action (↓)
SlowFast [25]X0.7390.7800.943
EgoT2 [68]X0.7220.7640.935
HierVL [1]0.7240.7350.928
I-CVAE [43]X0.7410.7400.930
EgoPackX0.7210.7350.925
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.347, + 0.47, + 0.404 + ], + "angle": 0, + "content": "Table 3. Comparison of EgoPack on the test set of the Ego4D benchmarks. For a fair comparison, we distinguish between methods pretrained on full Ego4D (✓) and those that have been trained only on the benchmark data \\((\\mathcal{X})\\), which includes EgoPack." + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.415, + 0.433, + 0.511 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.212, + 0.515, + 0.337, + 0.526 + ], + "angle": 0, + "content": "(a) AR Task Prototypes" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.532, + 0.433, + 0.629 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.208, + 0.632, + 0.341, + 0.644 + ], + "angle": 0, + "content": "(b) PNR Task Prototypes" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.653, + 0.47, + 0.696 + ], + "angle": 0, + "content": "Figure 5. Closest nodes to the OSCC samples among AR and PNR task prototypes. Some nodes appear to be more discriminative of the presence or absence of an object state change." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.704, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Confusion matrices In Fig. 6, we compare the confusion matrix of EgoPack with the MLP model for the top-20 largest verb and noun classes in the AR task, grouping the remaining classes in a \"rest\" pseudo-class. Overall, we observe an evident improvement on the noun labels, due to the positive effect of cross-tasks interaction. For example, the network appears to better disambiguate between objects that may appear at the same time in the scene, e.g. \"pants\" and \"cloth\" or \"bottle\" and \"lid\", which we speculate to be the result of a better ability of other tasks, namely OSCC, to identify active objects. Regarding the verbs, we also observe notable improvements, in addition to better recognition of verbs that are the temporal inverse of each other, e.g." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.085, + 0.671, + 0.185 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.55, + 0.189, + 0.632, + 0.201 + ], + "angle": 0, + "content": "(a) Verb (MLP)" + }, + { + "type": "image", + "bbox": [ + 0.716, + 0.086, + 0.882, + 0.185 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.752, + 0.189, + 0.855, + 0.201 + ], + "angle": 0, + "content": "(b) Verb (EgoPack)" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.205, + 0.677, + 0.311 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.548, + 0.315, + 0.635, + 0.327 + ], + "angle": 0, + "content": "(c)Noun (MLP)" + }, + { + "type": "image", + "bbox": [ + 0.709, + 0.206, + 0.885, + 0.311 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.746, + 0.315, + 0.854, + 0.328 + ], + "angle": 0, + "content": "(d) Noun (EgoPack)" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.336, + 0.892, + 0.365 + ], + "angle": 0, + "content": "Figure 6. Action Recognition confusion matrix of EgoPack compared to the MLP baseline for the top-20 verb and noun classes." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.378, + 0.892, + 0.409 + ], + "angle": 0, + "content": "\"put\" and \"take\" or \"open\" and \"close\", thanks to the improved temporal reasoning of our unified model." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.425, + 0.767, + 0.44 + ], + "angle": 0, + "content": "5. Conclusions and future work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.451, + 0.892, + 0.692 + ], + "angle": 0, + "content": "We presented EgoPack, a framework that allows knowledge sharing between different egocentric vision tasks, enabling an efficient use of the perspectives that each task can provide. We built EgoPack on top of a unified temporal architecture that can model distinct tasks with a shared backbone and minimal task-specific overhead. EgoPack overcomes the main limitation posed by traditional multi-task learning approaches, namely the unrealistic expectation that supervision is available for all tasks at training time. Indeed, the prototypes mechanism behind EgoPack allows to create a summary of what the model has learnt so far as it abstracts the task-specific knowledge that could be used in novel tasks. The model can then be updated to the any new task, while also peeking at the perspective of the previous tasks. Results on Ego4D validate our approach, showing competitive performance with other methods." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.709, + 0.667, + 0.725 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.894, + 0.902 + ], + "angle": 0, + "content": "This study was carried out within the FAIR - Future Artificial Intelligence Research and received funding from the European Union Next-GenerationEU (PIANO NAZIONALE DI RIPRESA E RESILLENZA (PNRR) - MISSIONE 4 COMPONENTE 2, INVESTIMENTO 1.3 - D.D. 1555 11/10/2022, PE00000013). This manuscript reflects only the authors' views and opinions, neither the European Union nor the European Commission can be considered responsible for them. We acknowledge the CINECA award under the ISCRA initiative, for the availability of high performance computing resources and support." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "18282" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.468, + 0.156 + ], + "angle": 0, + "content": "[1] Kumar Ashutosh, Rohit Girdhar, Lorenzo Torresani, and Kristen Grauman. Hiervl: Learning hierarchical videolanguage embeddings. In CVPR, 2023. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.468, + 0.199 + ], + "angle": 0, + "content": "[2] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.202, + 0.468, + 0.243 + ], + "angle": 0, + "content": "[3] Alejandro Betancourt, Pietro Morerio, Carlo S Regazzoni, and Matthias Rauterberg. The evolution of first person vision methods: A survey. IEEE TCSVT, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.246, + 0.468, + 0.285 + ], + "angle": 0, + "content": "[4] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In CVPR, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.288, + 0.468, + 0.315 + ], + "angle": 0, + "content": "[5] Rich Caruana. Multitask learning. Machine learning, 28: 41-75, 1997. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.318, + 0.468, + 0.372 + ], + "angle": 0, + "content": "[6] Min-Hung Chen, Zsolt Kira, Ghassan AlRegib, Jaekwon Yoo, Ruxin Chen, and Jian Zheng. Temporal attentive alignment for large-scale video domain adaptation. In ICCV, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.375, + 0.468, + 0.415 + ], + "angle": 0, + "content": "[7] Ting Chen, Saurabh Saxena, Lala Li, Tsung-Yi Lin, David J Fleet, and Geoffrey E Hinton. A unified sequence interface for vision tasks. In NeurIPS, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.418, + 0.468, + 0.473 + ], + "angle": 0, + "content": "[8] Tianlong Chen, Xuxi Chen, Xianzhi Du, Abdullah Rashwan, Fan Yang, Huizhong Chen, Zhangyang Wang, and Yeqing Li. Adamv-moe: Adaptive multi-task vision mixture-of-experts. In ICCV, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.476, + 0.468, + 0.53 + ], + "angle": 0, + "content": "[9] Zhao Chen, Vijay Badrinarayanan, Chen-Yu Lee, and Andrew Rabinovich. Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks. In ICML, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.533, + 0.468, + 0.587 + ], + "angle": 0, + "content": "[10] Yuanzheng Ci, Yizhou Wang, Meilin Chen, Shixiang Tang, Lei Bai, Feng Zhu, Rui Zhao, Fengwei Yu, Donglian Qi, and Wanli Ouyang. Unihcp: A unified model for human-centric perceptions. In CVPR, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.59, + 0.468, + 0.658 + ], + "angle": 0, + "content": "[11] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, and Michael Wray. The epic-kitchens dataset: Collection, challenges and baselines. IEEE TPAMI, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.66, + 0.468, + 0.741 + ], + "angle": 0, + "content": "[12] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Antonino Furnari, Jian Ma, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, and Michael Wray. Rescaling egocentric vision: Collection, pipeline and challenges for epic-kitchens-100. IJCV, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.745, + 0.468, + 0.799 + ], + "angle": 0, + "content": "[13] Eadom Dessalene, Michael Maynard, Chinmaya Devaraj, Cornelia Fermuller, and Yiannis Aloimonos. Egocentric object manipulation graphs. arXiv preprint arXiv:2006.03201, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.468, + 0.856 + ], + "angle": 0, + "content": "[14] Eadem Dessalene, Chinmaya Devaraj, Michael Maynard, Cornelia Fermuller, and Yiannis Aloimonos. Forecasting action through contact representations from first person video. IEEE TPAMI, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.468, + 0.9 + ], + "angle": 0, + "content": "[15] Wenqi Fan, Yao Ma, Qing Li, Yuan He, Eric Zhao, Jiliang Tang, and Dawei Yin. Graph neural networks for social recommendation. In The world wide web conference, 2019. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.468, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.133 + ], + "angle": 0, + "content": "[16] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In ICCV, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.177 + ], + "angle": 0, + "content": "[17] Chris Fifty, Ehsan Amid, Zhe Zhao, Tianhe Yu, Rohan Anil, and Chelsea Finn. Efficiently identifying task groupings for multi-task learning. In NeurIPS, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.18, + 0.892, + 0.22 + ], + "angle": 0, + "content": "[18] Antonino Furnari and Giovanni Maria Farinella. Rolling-unrolling lstms for action anticipation from first-person video. IEEE TPAMI, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.224, + 0.892, + 0.278 + ], + "angle": 0, + "content": "[19] Antonino Furnari, Sebastiano Battiato, Kristen Grauman, and Giovanni Maria Farinella. Next-active-object prediction from egocentric videos. Journal of Visual Communication and Image Representation, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.281, + 0.892, + 0.32 + ], + "angle": 0, + "content": "[20] Ruohan Gao, Tae-Hyun Oh, Kristen Grauman, and Lorenzo Torresani. Listen to look: Action recognition by previewing audio. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.324, + 0.892, + 0.365 + ], + "angle": 0, + "content": "[21] Pallabi Ghosh, Nirat Saini, Larry S Davis, and Abhinav Shrivastava. All about knowledge graphs for actions. arXiv preprint arXiv:2008.12432, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.368, + 0.892, + 0.408 + ], + "angle": 0, + "content": "[22] Pallabi Ghosh, Yi Yao, Larry Davis, and Ajay Divakaran. Stacked spatio-temporal graph convolutional networks for action segmentation. In WACV, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.411, + 0.892, + 0.437 + ], + "angle": 0, + "content": "[23] Rohit Girdhar and Kristen Grauman. Anticipative video transformer. In ICCV, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.441, + 0.892, + 0.493 + ], + "angle": 0, + "content": "[24] Rohit Girdhar, Mannat Singh, Nikhila Ravi, Laurens van der Maaten, Armand Joulin, and Ishan Misra. Omnivore: A single model for many visual modalities. In CVPR, 2022. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.498, + 0.892, + 0.566 + ], + "angle": 0, + "content": "[25] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In CVPR, 2022. 2, 5, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.569, + 0.892, + 0.623 + ], + "angle": 0, + "content": "[26] Jieuxiang Gu, Zhenhua Wang, Jason Kuen, Lianyang Ma, Amir Shahroudy, Bing Shuai, Ting Liu, Xingxing Wang, Gang Wang, Jianfei Cai, et al. Recent advances in convolutional neural networks. PR, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.626, + 0.892, + 0.667 + ], + "angle": 0, + "content": "[27] Michelle Guo, Albert Haque, De-An Huang, Serena Yeung, and Li Fei-Fei. Dynamic task prioritization for multitask learning. In ECCV, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.67, + 0.892, + 0.696 + ], + "angle": 0, + "content": "[28] Pengsheng Guo, Chen-Yu Lee, and Daniel Ulbricht. Learning to branch for multi-task learning. In ICML, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.699, + 0.892, + 0.727 + ], + "angle": 0, + "content": "[29] Will Hamilton, Zhitao Ying, and Jure Leskovec. Inductive representation learning on large graphs. In NeurIPS, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.729, + 0.892, + 0.769 + ], + "angle": 0, + "content": "[30] Dan Witzner Hansen and Qiang Ji. In the eye of the beholder: A survey of models for eyes and gaze. IEEE TPAMI, 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.773, + 0.892, + 0.813 + ], + "angle": 0, + "content": "[31] Thomas E. Huang, Yifan Liu, Luc Van Gool, and Fisher Yu. Video task decathlon: Unifying image and video tasks in autonomous driving. In ICCV, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.816, + 0.892, + 0.856 + ], + "angle": 0, + "content": "[32] Yifei Huang, Minjie Cai, Zhenqiang Li, Feng Lu, and Yoichi Sato. Mutual context network for jointly estimating egocentric gaze and action. IEEE TIP, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.859, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[33] Yifei Huang, Yusuke Sugano, and Yoichi Sato. Improving action segmentation via graph-based temporal reasoning. In CVPR, 2020. 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "18283" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[34] Youngkyoon Jang, Brian Sullivan, Casimir Ludwig, Iain Gilchrist, Dima Damen, and Walterio Mayol-Cuevas. Epictent: An egocentric video dataset for camping tent assembly. In ICCVW, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.15, + 0.47, + 0.19 + ], + "angle": 0, + "content": "[35] Zhuoliang Kang, Kristen Grauman, and Fei Sha. Learning with whom to share in multi-task feature learning. In ICML, 2011. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.194, + 0.47, + 0.235 + ], + "angle": 0, + "content": "[36] Georgios Kapidis, Ronald Poppe, Elsbeth van Dam, Lucas Noldus, and Remco Veltkamp. Multitask learning to improve egocentric action recognition. In ICCVW, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.238, + 0.471, + 0.293 + ], + "angle": 0, + "content": "[37] Steven Kearnes, Kevin McCloskey, Marc Berndl, Vijay Pande, and Patrick Riley. Molecular graph convolutions: moving beyond fingerprints. Journal of computer-aided molecular design, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.296, + 0.47, + 0.336 + ], + "angle": 0, + "content": "[38] Alex Kendall, Yarin Gal, and Roberto Cipolla. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In CVPR, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.339, + 0.471, + 0.394 + ], + "angle": 0, + "content": "[39] Asifullah Khan, Anabia Sohail, Umme Zahoora, and Aqsa Saeed Qureshi. A survey of the recent architectures of deep convolutional neural networks. Artificial intelligence review, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.397, + 0.47, + 0.451 + ], + "angle": 0, + "content": "[40] Iasonas Kokkinos. Ethernet: Training a universal convolutional neural network for low-, mid-, and high-level vision using diverse datasets and limited memory. In CVPR, 2017. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.454, + 0.47, + 0.509 + ], + "angle": 0, + "content": "[41] Zewen Li, Fan Liu, Wenjie Yang, Shouheng Peng, and Jun Zhou. A survey of convolutional neural networks: analysis, applications, and prospects. IEEE transactions on neural networks and learning systems, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.512, + 0.47, + 0.567 + ], + "angle": 0, + "content": "[42] Kevin Qinghong Lin, Jinping Wang, Mattia Soldan, Michael Wray, Rui Yan, Eric Z XU, Difei Gao, Rong-Cheng Tu, Wenzhe Zhao, Weijie Kong, et al. Egocentric video-language pretraining. In NeurIPS, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.569, + 0.47, + 0.61 + ], + "angle": 0, + "content": "[43] Esteve Valls Mascaró, Hyemin Ahn, and Dongheui Lee. Intention-conditioned long-term human egocentric action anticipation. In WACV, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.613, + 0.47, + 0.653 + ], + "angle": 0, + "content": "[44] Jonathan Munro and Dima Damen. Multi-modal domain adaptation for fine-grained action recognition. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.657, + 0.47, + 0.697 + ], + "angle": 0, + "content": "[45] Tushar Nagarajan, Yanghao Li, Christoph Feichtenhofer, and Kristen Grauman. Ego-topo: Environment affordances from egocentric video. In CVPR, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.701, + 0.47, + 0.742 + ], + "angle": 0, + "content": "[46] Adrián Núñez-Marcos, Gorka Azkune, and Ignacio Arganda-Carreras. Egocentric vision-based action recognition: A survey. Neurocomputing, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.745, + 0.47, + 0.771 + ], + "angle": 0, + "content": "[47] Francesca Pistilli and Giuseppe Averta. Graph learning in robotics: a survey. IEEE Access, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.47, + 0.828 + ], + "angle": 0, + "content": "[48] Mirco Planamente, Chiara Plizzari, Simone Alberto Peirone, Barbara Caputo, and Andrea Bottino. Relative norm alignment for tackling domain shift in deep multi-modal classification. IJCV, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.47, + 0.899 + ], + "angle": 0, + "content": "[49] Chiara Plizzari, Gabriele Goletto, Antonino Furnari, Siddhant Bansal, Francesco Ragusa, Giovanni Maria Farinella, Dima Damen, and Tatiana Tommasi. An outlook into the future of egocentric vision. arXiv preprint arXiv:2308.07123, 2023. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.146 + ], + "angle": 0, + "content": "[50] Chiara Plizzari, Toby Perrett, Barbara Caputo, and Dima Damen. What can a cook in italy teach a mechanic in india? action recognition generalisation over scenarios and locations. In ICCV, 2023. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.216 + ], + "angle": 0, + "content": "[51] Shraman Pramanick, Yale Song, Sayan Nag, Kevin Qinghong Lin, Hardik Shah, Mike Zheng Shou, Rama Chellappa, and Pengchuan Zhang. Egovlpv2: Egocentric video-language pre-training with fusion in the backbone. In ICCV, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.219, + 0.892, + 0.259 + ], + "angle": 0, + "content": "[52] Santhosh K. Ramakrishnan, Ziad Al-Halah, and Kristen Grauman. Spotem: Efficient video search for episodic memory. In ICLR, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.261, + 0.892, + 0.3 + ], + "angle": 0, + "content": "[53] Maheen Rashid, Hedvig Kjellstrom, and Yong Jae Lee. Action graphs: Weakly-supervised action localization with graph convolution networks. In WACV, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.302, + 0.892, + 0.341 + ], + "angle": 0, + "content": "[54] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.344, + 0.892, + 0.397 + ], + "angle": 0, + "content": "[55] Alvaro Sanchez-Gonzalez, Jonathan Godwin, Tobias Pfaff, Rex Ying, Jure Leskovec, and Peter Battaglia. Learning to simulate complex physics with graph networks. In ICML, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.4, + 0.892, + 0.453 + ], + "angle": 0, + "content": "[56] Fadime Sener, Dibyadip Chatterjee, Daniel Shelepov, Kun He, Dipika Singhania, Robert Wang, and Angela Yao. Assembly101: A large-scale multi-view video dataset for understanding procedural activities. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.455, + 0.892, + 0.495 + ], + "angle": 0, + "content": "[57] Haosen Shi, Shen Ren, Tianwei Zhang, and Sinno Jialin Pan. Deep multitask learning with progressive parameter sharing. In ICCV, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.498, + 0.892, + 0.537 + ], + "angle": 0, + "content": "[58] Martin Simonovsky and Nikos Komodakis. Dynamic edge-conditioned filters in convolutional neural networks on graphs. In CVPR, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.54, + 0.892, + 0.579 + ], + "angle": 0, + "content": "[59] Ayan Sinha, Zhao Chen, Vijay Badrinarayanan, and Andrew Rabinovich. Gradient adversarial training of neural networks. arXiv preprint arXiv:1806.08028, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.581, + 0.892, + 0.622 + ], + "angle": 0, + "content": "[60] Trevor Standley, Amir Zamir, Dawn Chen, Leonidas Guibas, Jitendra Malik, and Silvio Savarese. Which tasks should be learned together in multi-task learning? In ICML, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.624, + 0.892, + 0.663 + ], + "angle": 0, + "content": "[61] Ximeng Sun, Rameswar Panda, Rogerio Feris, and Kate Saenko. Adashare: Learning what to share for efficient deep multi-task learning. In NeurIPS, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.665, + 0.892, + 0.718 + ], + "angle": 0, + "content": "[62] Zehua Sun, Qiuhong Ke, Hossein Rahmani, Mohammed Bennamoun, Gang Wang, and Jun Liu. Human action recognition from various data modalities: A review. IEEE TPAMI, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.72, + 0.892, + 0.761 + ], + "angle": 0, + "content": "[63] Simon Vandenhende, Stamatios Georgoulis, and Luc Van Gool. Mti-net: Multi-scale task interaction networks for multi-task learning. In ECCV, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.763, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[64] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.804, + 0.892, + 0.844 + ], + "angle": 0, + "content": "[65] Xiaohan Wang, Linchao Zhu, Heng Wang, and Yi Yang. Interactive prototype learning for egocentric action recognition. In ICCV, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[66] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E Sarma, Michael M Bronstein, and Justin M Solomon. Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics, 2019. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "18284" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "[67] Sen Wu, Hongyang R. Zhang, and Christopher Ré. Understanding and improving information transfer in multi-task learning. In ICLR, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.136, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[68] Zihui Xue, Yale Song, Kristen Grauman, and Lorenzo Torresani. Egocentric video task translation. In CVPR, 2023. 2, 3, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.179, + 0.469, + 0.22 + ], + "angle": 0, + "content": "[69] Shen Yan, Xuehan Xiong, Anurag Arnab, Zhichao Lu, Mi Zhang, Chen Sun, and Cordelia Schmid. Multiview transformers for video recognition. In CVPR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.222, + 0.469, + 0.262 + ], + "angle": 0, + "content": "[70] Lijin Yang, Yifei Huang, Yusuke Sugano, and Yoichi Sato. Interact before align: Leveraging cross-modal knowledge for domain adaptive action recognition. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.265, + 0.469, + 0.305 + ], + "angle": 0, + "content": "[71] Tianhe Yu, Saurabh Kumar, Abhishek Gupta, Sergey Levine, Karol Hausman, and Chelsea Finn. Gradient surgery for multi-task learning. In NeurIPS, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.307, + 0.469, + 0.36 + ], + "angle": 0, + "content": "[72] Runhao Zeng, Wenbing Huang, Mingkui Tan, Yu Rong, Peilin Zhao, Junzhou Huang, and Chuang Gan. Graph convolutional networks for temporal action localization. In ICCV, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.364, + 0.469, + 0.403 + ], + "angle": 0, + "content": "[73] Chen-Lin Zhang, Jianxin Wu, and Yin Li. Actionformer: Localizing moments of actions with transformers. In ECCV, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.407, + 0.468, + 0.447 + ], + "angle": 0, + "content": "[74] Yu Zhang and Qiang Yang. A survey on multi-task learning. IEEE Transactions on Knowledge and Data Engineering, 34 (12):5586-5609, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.449, + 0.468, + 0.489 + ], + "angle": 0, + "content": "[75] Yue Zhao, Ishan Misra, Philipp Krahenbuhl, and Rohit Girdhar. Learning video representations from large language models. In CVPR, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.492, + 0.468, + 0.545 + ], + "angle": 0, + "content": "[76] Zeyun Zhong, David Schneider, Michael Voit, Rainer Stiefelhagen, and Jürgen Beyerer. Anticipative feature fusion transformer for multi-modal action anticipation. In WACV, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.549, + 0.468, + 0.587 + ], + "angle": 0, + "content": "[77] Bolei Zhou, Alex Andonian, Aude Oliva, and Antonio Torralba. Temporal relational reasoning in videos. In ECCV, 2018. 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.587 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.956 + ], + "angle": 0, + "content": "18285" + } + ] +] \ No newline at end of file diff --git a/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/b19cc746-13a8-4ac2-b79c-a1691351681c_origin.pdf b/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/b19cc746-13a8-4ac2-b79c-a1691351681c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d9823d8e6d9340a3aff8e63682fe970201f26997 --- /dev/null +++ b/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/b19cc746-13a8-4ac2-b79c-a1691351681c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a9bfb0b1277abfcb9fa592e52204e8ac3ec6e336246e73f405c675d667b9d71 +size 2281898 diff --git a/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/full.md b/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/full.md new file mode 100644 index 0000000000000000000000000000000000000000..06436c581fe8ac232a7112517a44557d47d3ad6f --- /dev/null +++ b/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/full.md @@ -0,0 +1,289 @@ +# A Backpack Full of Skills: Egocentric Video Understanding with Diverse Task Perspectives + +Simone Alberto Peirone1 Francesca Pistilli1 Antonio Alliegro1,2 Giuseppe Averta1 +1 Politecnico di Torino, 2 Istituto Italiano di Tecnologia +firstname.lastname@polito.it + +# Abstract + +Human comprehension of a video stream is naturally broad: in a few instants, we are able to understand what is happening, the relevance and relationship of objects, and forecast what will follow in the near future, everything all at once. We believe that - to effectively transfer such an holistic perception to intelligent machines - an important role is played by learning to correlate concepts and to abstract knowledge coming from different tasks, to synergistically exploit them when learning novel skills. To accomplish this, we look for a unified approach to video understanding which combines shared temporal modelling of human actions with minimal overhead, to support multiple downstream tasks and enable cooperation when learning novel skills. We then propose EgoPack, a solution that creates a collection of task perspectives that can be carried across downstream tasks and used as a potential source of additional insights, as a backpack of skills that a robot can carry around and use when needed. We demonstrate the effectiveness and efficiency of our approach on four Ego4D benchmarks, outperforming current state-of-the-art methods. Project webpage: sapeirone.github.io/EgoPack. + +# 1. Introduction + +Our daily living activities are extremely complex and diverse, nonetheless humans have the extraordinary ability to reason on the behaviour itself in just a few instants from a visual input. We are able to spot what another person is doing, predict their next actions based on current observations, and understand the implications of an activity, for instance whether its effects are reversible. Observing someone in the kitchen by the worktable, where there is a pack of flour and a jug of water, we can identify that they are a chef kneading flour (reasoning about current activity). We can also forecast that the next step will involve mixing the flour with water (reasoning about the future), and finally obtaining dough (reasoning about implications of these actions). This type of holistic reasoning, which is natural for humans, + +![](images/0d10b8eac08614be2ccc8d3f6eaefd5c661cee07f42c1a02dcdd3b319a3eb533.jpg) +Figure 1. Given a video stream, a robot is asked to learn a novel task, e.g. Object State Change Classification (OSCC). To learn the new skill, the robot can access previously gained knowledge regarding different tasks, such as Point of No Return (PNR), Long Term Anticipation (LTA) and Action Recognition (AR), and use it during the learning process to enhance downstream task performance. This knowledge is stored as graphs inside the robot's backpack, always ready to boost a new skill. + +is still a distant goal for artificial intelligence systems. The challenge arises not only from the requirement of executing multiple tasks with a single architecture, but also from the necessity of being able to abstract and repurpose such knowledge across-tasks, for example to foster and enhance the learning of novel skills. Current research trends in human activity understanding predominantly focus on creating several, hyper-specialised, models. This approach splits the understanding of human activities into distinct skills, with each model being independently trained to rely only on "task-specific" clues for prediction [69, 73, 76]. However, this approach disregards the valuable insights that could be gleaned from different task perspectives. A first step in this direction relies on Multi-Task Learning (MTL) to exploit + +the intuition that knowledge sharing between tasks may improve performance. However, the proposed multi-task models have some limitations [40], mostly concerning a negative transfer between tasks, making it difficult to outperform single-task models. Most importantly, MTL usually assumes the availability of supervision for all tasks at training time, limiting the extension of the models at a later time. + +The recently proposed EgoT2 framework [68] offers a unified solution to integrate various egocentric video tasks. It employs an ensemble of diverse, task-specific models and learns to translate task-specific clues through a transformer-based encoder-decoder to benefit one of the tasks. Although this approach fosters positive interactions between tasks, it has significant limitations: i) the primary task should be "known" at training time and present within the task-specific models collection, ii) it necessitates an extensive pretraining process and iii) it lacks a knowledge abstraction, as it relies on task-specific models rather than creating transferable concepts. + +Indeed, we argue that an important key to advance the learning capabilities of intelligent systems and to move a step closer to a generalised reasoning on visual understanding involves not only sharing information across tasks, but also abstracting task-specific knowledge for application in new scenarios. Considering an ensemble of vision tasks, each offers a distinct perspective on the input stream and extracts different types of information. Our goal is to encapsulate this diverse knowledge to be leveraged in the future to positively impact the learning of a novel skill. We focus on egocentric video understanding as it is the perfect harbour to study human activities and synergies between tasks. There is a strong connection between egocentric tasks. For instance, specific actions, like peeling a potato, directly result in a change in the state of the object (the potato in this case), illustrating the interconnected nature of these tasks. + +All the above considerations motivate us in investigating new alternatives and we propose a novel framework for knowledge abstraction and sharing called EgoPack. Our underlying idea, is to exploit a set of known tasks, each one able to interpret an input stream according to its own perspective, to learn reusable knowledge that can aid the learning process of a novel task. We show this concept in Fig. 1, where a robot is equipped with a backpack that figuratively summarises all the knowledge gained from a set of tasks. To learn a new skill, the robot can "take-out" task-related knowledge from the backpack and leverage it within the learning process. The task-specific perspectives are collected in a single pretraining step of a novel multi-task network under the form of prototypes. We exploit a new versatile temporal graph-based architecture shared across all the tasks, with minimal overhead to support each task. + +When learning a new skill, EgoPack promotes the interaction between the different tasks by learning which rele + +vant knowledge to extract from the different perspectives. The architecture of EgoPack is notably flexible, enabling easy adaptation to novel tasks by reusing the previous tasks to facilitate the learning process of any novel task. + +We demonstrate the effectiveness and efficiency of our approach on Ego4D [25], a large-scale egocentric vision dataset. To summarise, our main contributions are: + +1. We present a unified architecture to learn multiple egocentric vision tasks with minimal task-specific overhead; +2. We introduce EgoPack, a novel approach that leverages different task perspectives to build a robust knowledge abstraction which can foster the learning of a novel task; +3. Our approach outperforms both specialised single and multi-task baselines by leveraging the unique synergies and distinct perspectives of different tasks; +4. EgoPack achieves competitive performance on Ego4D [25] for all the considered benchmarks, outperforming the state-of-the-art on some. + +# 2. Related Works + +Egocentric Vision Egocentric vision captures human activities from the privileged perspective of the camera wearer, allowing a unique point of view on their actions [3, 49]. Recently, the field has seen rapid development thanks to the release of several large-scale egocentric vision datasets [11, 12, 25, 30, 34, 56]. The rich annotations of these datasets [12, 25] allow to tackle a large number of tasks, including action recognition [46], action anticipation [18, 23, 76], next active object prediction [19], action segmentation [33, 73] and episodic memory [52]. Previous works in egocentric vision have focused on domain adaptation [6, 44, 48, 50, 70], multimodal learning [20, 62, 70] and large-scale video-language pretraining [1, 51, 75] to learn better representation for downstream tasks. + +Graph Neural Networks for vision tasks Traditional neural networks, including Convolutional Neural Networks (CNNs), have been widely used in computer vision, showing impressive performance on a variety of problems [26, 39, 41]. However, these models often assume data lying on a regular domain, such as images that have a grid-like structure. In recent years, the interest in developing methods able to provide a more general and powerful type of processing has been growing and particular attention has been given to learning methods on graphs. Graph Neural Networks (GNNs) have the innate ability to effectively handle data that lie on irregular domains, such as 3D data [58, 66], robotics [47], molecular chemistry [37], and social or financial networks [15], and to model complex data relations [55]. Recently, transformer-based architectures had a great impact on vision application. Despite Transformers and GNNs share some similarities in their ability to handle various data types, they are fundamentally different in their + +core architectures and the specific ways they process data. GNNs can model the topology of a graph and the relations between nodes while also inheriting all the desirable properties of classic convolutions: locality, hierarchical structures and efficient weights reuse. In video understanding GNNs have been applied to action localisation [22, 33, 53, 72], to build a knowledge graph from human actions [21], to model human-object interactions [13, 14] or to build a topological map of the environment [45]. + +Multi-Task Learning MTL [5, 74] tackles the problem of learning to solve multiple tasks simultaneously. The development of this strategy is justified by the intuition that complex settings require solving multiple tasks, for instance autonomous driving [31], robotics and natural language processing. Furthermore, these networks can bring the theoretical advantage of sharing complementary information to improve performance. Several works have been done in this direction [7, 8, 10, 17, 31, 32, 40, 57], focusing on which parameters or tasks is better to share [28, 35, 60, 61] and promoting synergies between tasks [36, 65]. Such methods encounter the problem of negative transfer [40] and sharing with unrelated tasks [28, 60] consequently suffering of task competition and not being able to benefit from information sharing between tasks. To overcome these limitations, several methods have been proposed to balance task-related losses [9, 38, 59], to dynamically prioritise tasks [27], to reduce gradient interference between tasks [71] or to exploit task interactions at multiple scales [63]. Unfortunately, all these solutions require extensive task-specific tuning, and are not able to build an holistic perception across tasks. Few works have explored MTL in the field of egocentric vision [32, 36, 65, 68]. Among these, the recently proposed EgoT2 [68] builds an ensemble of diverse, task-specific models. The features of the different models are projected into a common feature space and processed through a transformer-based encoder-decoder to translate the contributions of different tasks and generate predictions for the primary task. Notably, the primary task has to be part of the task-specific models. This approach fosters positive interactions between tasks, resulting in improved performance compared to the single-task models. However, it has some limitations, as it is not able to build knowledge abstractions that can be easily transferred to novel tasks. Instead, we propose a model that can build a robust backpack of task perspectives that can be used in learning any novel tasks. + +# 3. Method + +We tackle a task cooperation setting, in which an egocentric vision model is able to exploit previously acquired knowledge over task perspectives to foster the learning process of any novel task. We formulate the proposed setting in Sec. 3.1. We present a unified temporal architecture to + +model multiple tasks in Sec. 3.2, a key step to enable knowledge sharing between tasks. Finally, Sec. 3.3 presents our novel approach EgoPack to enable efficient transfer of different task perspectives to novel tasks. + +# 3.1. Setting + +A task $\mathcal{T}$ is associated with a dataset $\mathcal{D} = \{(v_i,y_i)\}_{i = 1}^N$ where $v_{i}$ is a video segment of arbitrary length, $y_{i}$ is the associated ground truth label and $N$ is the number of segments. Our approach follows a two-stages training process. First, a model $\mathcal{M}$ is trained on a set of $K$ tasks $\{\mathcal{T}_0,\dots ,\mathcal{T}_K\}$ , under a Multi-Task Learning framework with hard-parameter sharing [54] to encourage the model to learn more general and task-agnostic representations thanks to the joint supervision of multiple tasks. Then, the model is presented with a new task $\mathcal{T}_{K + 1}$ to learn, without access to the supervision of the previous tasks. In this scenario, the new task may benefit from potential semantic affinities with the previously seen tasks. For example, a model that has learnt to detect object changes may apply this knowledge for action recognition and vice-versa, as some actions are associated with object changes, e.g. cutting something, while others are not, e.g. moving an object. Our goal is to make these semantic affinities more explicit (and exploitable) so that the new task can learn to repurpose these perspectives from previous tasks to improve performance, a step towards more holistic models that seamlessly share knowledge between tasks. + +# 3.2. A Unified Architecture for Egocentric Tasks + +The main premise of our method is that different egocentric vision tasks can be modelled using a shared architecture with minimal differences between tasks. Under this assumption, videos can be seen as a sequence of $N$ temporal segments encoded as $\mathbf{x} = \{\mathbf{x}_1,\mathbf{x}_2,\dots ,\mathbf{x}_N\}$ , where $\mathbf{x}_i\in \mathbb{R}^D$ represents the $D$ -dimensional features of segment $v_{i}$ extracted using some video features extractor $\mathcal{F}$ , e.g. SlowFast [16] or Omnivore [24]. Such sequence could be interpreted as a temporal graph $\mathcal{G}(\mathcal{X},\mathcal{E})$ , whose nodes $\mathbf{x}_i\in \mathcal{X}$ represent the segments of the video, and edges $e_{ij}\in \mathcal{E}$ connect nodes $\mathbf{x}_i$ and $\mathbf{x}_j$ with a temporal distance considered relevant when lower than a threshold $\tau$ . The connectivity of the graph defines the extent of its temporal modelling, i.e. connecting further apart nodes enables longer range temporal understanding which could benefit for example anticipation tasks. The threshold $\tau$ depends on the task at hand and more implementation details are provided in Sec. 4.1. The temporal position of each node in the sequence is encoded by adding to the node embeddings a positional encoding [64]. + +This formulation enables the use of Graph Neural Networks (GNNs) to learn the complex temporal relations between video segments and to cast different egocentric vision + +![](images/b2f5e1b612ee56559f47d71736cc2ad955ce3dbb0b93273aa55ae4086be6a0e1.jpg) + +![](images/785ed7da5144e5810ec295e13bc5c0574be4bd65490bc94fcb4b717051fbba72.jpg) +Figure 2. Architecture of EgoPack when Object State Change Classification (OSCC) is the novel task. Videos are interpreted as a graph, whose nodes $\mathbf{x}_i$ represent actions, encoded as features, and edges connect temporally close segments. This representation enables the design of a Unified Temporal Backbone to learn multiple tasks with a shared architecture and minimal Task-Specific Heads, leveraging GNNs for temporal modelling. We exploit this architecture to jointly learn $K$ tasks, e.g. AR, LTA and PNR. After this training process, we extract a set of prototypes $\mathbf{P}^k$ that summarise what the network has learnt from each task $\mathcal{T}_k$ , like a backpack of skills that we can carry over. In this Cross-Tasks Interaction phase, the network can peek at these different task-perspective to enrich the learning of the novel task. + +![](images/a950485f5d0e64e5819580c243221b5a24c344ea28f7feae7d3246b50880e136.jpg) + +tasks as graph prediction tasks, such as node-level or graph-level classification, as shown in Fig. 3. This assumption is reflected in our approach by decomposing the multi-task model $\mathcal{M}$ into two components: a general temporal backbone $\mathcal{M}_t:\mathbb{R}^D\mapsto \mathbb{R}^{D_t}$ , and a set of task-specific projection heads $\mathcal{H}_k:\mathbb{R}^{D_t}\mapsto \mathbb{R}^{D_k}$ mapping the graph and/or the nodes to the features space of task $\mathcal{T}_k$ with dimension $D_{k}$ as shown in Fig.2. $\mathcal{M}_t$ is a GNN with $L$ layers that takes as input the temporal sequence $\mathbf{x}$ and provides as output the updated feature vectors $\mathbf{f} = \{\mathbf{f}_1,\mathbf{f}_2,\dots ,\mathbf{f}_N\}$ . At layer $l$ , node embeddings are projected and combined with their neighbours, following the GraphSAGE architecture [29]: + +$$ +\mathbf {f} _ {i} ^ {(l + 1)} = \mathbf {W} _ {r} ^ {(l)} \mathbf {f} _ {i} ^ {(l)} + \mathbf {W} ^ {(l)} \cdot \mathbf {g} _ {i} ^ {(l + 1)} + \mathbf {b} ^ {(l)}, \tag {1} +$$ + +where $\mathbf{f}_i^{(l)}\in \mathbb{R}^{D_t^{(l)}}$ are the features of node $\mathbf{x}_i$ , $\mathbf{b}^{(l)}\in \mathbb{R}^{D_t^{(l + 1)}}$ is a bias term, $\mathbf{W}_r^{(l)},\mathbf{W}^{(l)}\in \mathbb{R}^{D_t^{(l + 1)}\times D_t^{(l)}}$ are the weight matrices associated to the root node and the aggregated neighbours' contribution $\mathbf{g}_i^{(l + 1)}$ respectively. The latter is computed as: + +$$ +\mathbf {g} _ {i} ^ {(l + 1)} = \underset {\mathbf {f} _ {i} \in \mathcal {N} _ {i}} {\text {m e a n}} \left(\phi \left(\mathbf {W} _ {p} ^ {(l)} \mathbf {f} _ {j} ^ {(l)} + \mathbf {b} _ {p} ^ {(l)}\right)\right), \tag {2} +$$ + +where $\mathbf{W}_p^{(l)}\in \mathbb{R}^{D_t^{(l)}\times D_t^{(l)}}$ projects the neighbours before the aggregation step, $\phi$ is a non-linearity, $\mathbf{b}_p^{(l)}\in \mathbb{R}^{D_t^{(l + 1)}}$ is a bias term and $\mathcal{N}_i$ is the set of neighbours of node $\mathbf{x}_i$ . Each layer is followed by Layer Normalization [2] and a LeakyReLU activation function. A residual connection around the temporal GNN allows the network to preserve + +the original features. Intuitively, the neighbourhood $\mathcal{N}_i$ reflects the temporal dependencies of the input sequence and the GNN allows to iteratively extend the temporal receptive field of each node. + +Task-specific heads The output features of the temporal backbone $\mathcal{M}_t$ are shared across the different downstream tasks. To project these features into task-specific components, we add a set of projection heads $\mathcal{H}_k$ , one for each task $\mathcal{T}_k$ . For graph classification tasks, the nodes of each graph are aggregated using max pooling to obtain a unique features representation. In each head, a MultiLayer Perceptron outputs the task-specific features $\mathbf{f}_i^k \in \mathbb{R}^{D^k}$ and is followed by a linear layer to compute the task logits $\mathbf{y}_i^k \in \mathbb{R}^{D_o^k}$ where $D_o^k$ is the number of labels for task $\mathcal{T}_k$ . By limiting the task-specific portion of the network to the heads while sharing the temporal backbone, we can obtain the perspective of all tasks with a single forward through the latter. The network is trained on all the tasks by averaging their losses. + +# 3.3. Learning a novel task with a backpack + +To solve the new task, the naive approach would be to finetune the model, adding a new head $\mathcal{H}_{K + 1}$ and possibly updating the temporal backbone $\mathcal{M}_t$ . However, finetuning may not fully leverage the insights from other tasks as it could result in the loss of the previously acquired knowledge, as confirmed experimentally in Sec. 4.2. + +Building the backpack We propose to explicitly model the perspectives of the different tasks as a set of task-specific prototypes that abstract the knowledge gained by the previously seen tasks and can be accessed by novel tasks. We call this approach EgoPack and provide an overview in Fig. 2. These task-specific prototypes are collected from videos annotated for action recognition, as human actions can be seen as the common thread behind the different tasks. Practically, we forward these samples through the temporal backbone and take the output of the different task-specific projection heads, thus encoding the perspective of each task given the same input video. Finally, the features obtained from each task are aggregated according to the verb and noun labels of the action, effectively summarising the perspective of each task given the same input action. The result is a set of prototypes $\mathbf{P}^k = \{\mathbf{p}_0^k,\mathbf{p}_2^k,\dots ,\mathbf{p}_P^k\} \in \mathbb{R}^{P\times D_k}$ for each task $\mathcal{T}_k$ , where $P$ is the number of unique (verb, noun) pairs in the dataset and $D_{k}$ is the size of the task-specific features. These prototypes are frozen and represent a "summary" of what the models has learnt during the multi-task pretraining process, creating an abstraction of the gained knowledge. They can be then reused when learning novel tasks, like a backpack of skills that the model can carry over. + +Leveraging the backpack During the learning process of the novel task $\mathcal{T}_{K + 1}$ , the model can exploit the task prototypes obtained via the task-specific heads. As before, the output of the temporal backbone $\mathbf{f}_i$ is forwarded through all the projection heads to obtain the task-specific features $\mathbf{f}_i^k$ . These features are used as queries to match the corresponding task prototypes $\mathbf{P}^k$ , selecting the $k$ -Nearest Neighbours among the prototypes using cosine similarity in the features space. Task features and their neighbouring prototypes form a graph-like structure, on which message passing can be used to enrich the task-specific features $\mathbf{f}_i^k$ , following an iterative refinement approach. In particular, at each layer $l$ we select the closest prototypes with $k$ -NN and update the features $\mathbf{f}_i^{(l),k}$ according to the following rule: + +$$ +\mathbf {f} _ {i} ^ {(l + 1), k} = \mathbf {W} _ {r} ^ {(l)} \mathbf {f} _ {i} ^ {(l), k} + \mathbf {W} ^ {(l)} \cdot \max _ {\mathbf {p} _ {j} ^ {k} \in \mathcal {N} _ {i} ^ {(l), k}} \mathbf {p} _ {j} ^ {k}, \tag {3} +$$ + +where $\mathbf{p}_j^k\in \mathcal{N}_i^{(l),k}$ are the closest prototypes in $\mathbf{P}^k$ to $\mathbf{f}_i^{(l),k}$ and $\mathbf{W}_r^{(l)}$ , $\mathbf{W}^{(l)}\in \mathbb{R}^{D^k\times D^k}$ are the weight matrices associated to the input features and the aggregated neighbours respectively. Notably, only the task features are updated while the task prototypes remain frozen to preserve the original perspectives seen by the network. + +In this process, the task-specific heads $\mathcal{H}_k$ are initialised from the multi-task training and possibly updated during the task-specific finetuning process, allowing the model to freely explore the set of task prototypes and to select the + +most informative ones for each input sample. After the interaction phase, the refined features $\tilde{\mathbf{f}}_i^k$ are fed to a classifier module to obtain the task logits $\mathbf{y}_i^k\in \mathbb{R}^{D_o^k}$ for each task $\mathcal{T}_k$ in the backpack. The final prediction is the sum of the pre-softmax logits coming from the different tasks and the output of a new head $\mathcal{H}_{K + 1}$ for the novel task. Intuitively, we allow each task to cast a vote on the final prediction, based on its perspective on the same video segment. In this phase, the temporal network, the task-specific heads and the weights of the GNNs are trained jointly using only the supervision of the novel task $\mathcal{T}_{K + 1}$ . + +# 4. Experiments + +We evaluate EgoPack on four Ego4d Human-Object Interaction benchmarks: Action Recognition (AR)1, Long Term Action Anticipation (LTA), Object State Change Classification (OSCC) and Point Of No Return (PNR). We report verb and noun top-1 accuracy for AR, accuracy for OSCC, edit distance for LTA and temporal localisation error (in seconds) for PNR. + +# 4.1. Implementation Details + +We use Omnivore Swin-L [24] features pre-trained on Kinetics-400 [4], released as part of Ego4D [25] and extracted using dense sampling over a window of 32 frames with a stride of 16 frames and features size 1536. In principle, EgoPack is agnostic to the underlying features extractor and could adopt other architectures. Following previous works on Ego4D [50] we use TRN [77] to temporally aggregate features from the three sub-segments of each input sample. The mapping between videos of each task and its corresponding temporal graph is task dependent, as shown in Fig. 3: + +- Action Recognition (AR): actions are mapped to the nodes of the temporal graph $\mathcal{G}$ , and edges connect each node to the previous and next (Fig. 3a). To account for the variable length of videos, actions are processed in fixed size windows. +- Long Term Anticipation (LTA): each input clip is mapped to a node in $\mathcal{G}$ . Then, a sequence of new nodes is added to the graph, equivalent in number to the clips to forecast. These nodes are initialised with the mean features of the input clips and are connected to the previous and subsequent nodes in the sequence, as well as to the input clips (Fig. 3b). +- Object State Change Classification (OSCC) and Point of No Return (PNR): each input segment is further split into $n$ sub-segments to account for the finer temporal granularity required by these tasks. Each sub-segment is + +![](images/388c0eb9864c315baa2798b5c69beb5fc0cd38e346851bb86c754bcb621cc228.jpg) +(a) Node classification (AR, PNR) + +![](images/4c993074ff57171ad3e42f2b5f9f4f95e8778928bbc940f94ece5c91fc4a9d80.jpg) +(b) Future node classification (LTA) +Figure 3. Egocentric vision tasks as graph prediction tasks. In AR and LTA, each node is an action within a temporal sequence and the objective is to predict the verb and noun labels of the nodes. In OSCC and PNR, nodes represent different temporal segments of the video clip and the goal is to output a global prediction for the entire graph (OSCC) or the individual nodes (PNR). + +![](images/a9b3d2e25d485fd2bce6c57a91464f62adbe3d42ecece4c3b1e1614c2a61cc38.jpg) +(c) Graph classification (OSCC) + +mapped to a node in $\mathcal{G}$ , and edges connect each node to the previous and next (Fig. 3c). + +Tasks have different annotations and are modelled as separate graphs, even though the temporal model is shared. The task prototypes are built using samples from the train split of the AR dataset. Tasks are trained with standard cross entropy loss, with the exception of PNR which uses binary cross entropy. EgoPack is trained for 30, 40 and 10 epochs for AR, LTA and OSCC/PNR respectively, with a learning rate of $1e - 4$ and $1e - 6$ for AR/LTA and OSCC/PNR respectively using Adam optimiser and batch size 16. All tasks share the same temporal and cross-task interaction architecture, with minimal task-specific hyper-parameter tuning. More details are reported in the supplementary. + +# 4.2. Quantitative results + +We show the main results of EgoPack in Table 1. To assess the validity of our approach, we proceed incrementally starting from single tasks models, i.e. each task is trained separately. In this setting, we compare a simple MLP baseline trained on the temporally aggregated features against our temporal graph methodology, which exhibits superior average performance. The improvement is particularly evident in the PNR task, e.g. from $1.76s$ to $0.61s$ , where the subpar outcomes of the MLP can be attributed to the lack of explicit temporal modelling. In addition to higher performance, the temporal graph enables all the tasks to be modelled using a unified architecture which allows to train all the tasks at the same time (MTL). With the MTL model, we observe a significant drop in average performance, mostly driven by worse accuracy in AR and OSCC. This behaviour is the result of negative transfers between tasks when they are trained together [67]. + +Cross-Task Interactions We compare our approach EgoPack for efficient cross task interaction with EgoT2 [68], which learns to combine multiple task-specific frozen models to solve one of them. Unlike EgoPack, the learning process of EgoT2 is divided in two stages, i.e. a pre-training step where each individual task is learned from scratch and a task-specific translation step, where just one task of the collection is fine-tuned. Notably, + +both steps require the supervision of the downstream task. On the contrary, the multi-task pre-training of EgoPack is agnostic to the novel downstream task, potentially allowing to transfer the gained knowledge to any new task. To ensure a fair comparison with EgoPack, we re-implemented the task translation mechanism proposed by EgoT2 on top of our Temporal Graph single task models using Omnivore features. This approach is indicated as Task Translation in Table 1. Additional details on its implementation are provided in the supplementary. One of the main benefit of our approach is that it requires a single forward pass through the features extraction and temporal backbones to obtain the perspectives of the different tasks, unlike EgoT2 which requires a forward pass for each single task model. Notably, we also highlight that EgoPack obtains better or comparable performance even though the backbone used for features extraction was not trained on Ego4D. + +Ablation of the different contributions We summarise the main steps leading to EgoPack in Table 2, using an aggregated metric to capture the overall improvement across the various tasks when compared to the baseline. The metric is computed as an average of the individual task metrics. We adjusted the metrics by taking one minus the score for LTA and PNR, as lower values are preferable, and clipped the PNR localisation error at 1.0 to have the same scale across all the metrics. Temporal modelling alone greatly improves the score compared to the baseline. Although MTL allows to train under a multi-task objective, it clearly underperforms the temporal model due to negative transfers [40]. Task Translation partially recovers this gap on some tasks as shown in Table 1, but overall the aggregated metric is comparable with MTL. We speculate that the marginal improvement of Task Translation compared to MTL lies in the limited task-specific context the former has access to, as it can peek at the different perspectives of the auxiliary tasks only for the input video at hand, rather than looking at the entire knowledge gained by the model. On the contrary, the task prototypes of EgoPack allow to carry a more complete summary of what the models has learnt from which it can extract useful knowledge based on the sample and the task at hand. To validate that the benefits of EgoPack were not brought by + +
Trained on frozen featuresAROSCCLTAPNR
Verbs Top-1 (%)Nouns Top-1 (%)Acc. (%)Verbs ED (↓)Nouns ED (↓)Loc. Err. (s) (↓)
Ego4D Baselines [25]X22.1821.5568.220.7460.7890.62
EgoT2s [68]X23.0423.2872.690.7310.7690.61
MLP24.0830.4570.470.7630.7421.76
Temporal Graph24.2530.4371.260.7540.7520.61
Multi-Task Learning22.0529.4471.100.7400.7460.62
Task Translation†23.6828.2871.480.7400.7560.61
EgoPack25.1031.1071.830.7280.7520.61
+ +Table 1. EgoPack on Ego4D HOI tasks. MLP is a simple baseline consisting of a few linear layers, while Temporal Graph models all tasks using a unified temporal graph-based architecture. MTL [54] uses hard parameter sharing to jointly learn all tasks, which may result in negative transfers. Ego-T2s [68] learns to translate features across tasks to optimise the primary task. EgoPack builds on the unified architecture of the Temporal Graph and learns to exploit the perspective of different tasks for efficient transfers to the novel task. Performances of EgoPack are evaluated over three runs using accuracy for AR and OSCC, Edit Distance for LTA and temporal localisation error for PNR. Our implementation of the task translation mechanism from EgoT2 [68] using Omnivore features. + +
Temp. modelMulti-Task ObjectiveCross-Task InteractionMetrics Average (Δ)
MLPXXX0.416
Temp.XX0.433 (+4.22%)
Task Transl.X0.431 (+3.61%)
MTL0.430 (+3.50%)
MTL+FT0.437 (+5.02%)
EgoPackX0.441 (+6.10%)
+ +Table 2. Ablation of the different contributions in EgoPack, measured according to an aggregated score, computed as the mean of the standardised metrics across tasks. + +the MTL pre-training alone, we also introduce a $MTL + FT$ baseline where a new task-specific head is finetuned for the novel task, without access to the output of the other heads. The limited performance of this configuration could be explained by the model losing the knowledge learnt during the multi-task learning, without a significant improvement over the single-task baselines, thus only partially reusing the gained knowledge. On the contrary, EgoPack preserves this knowledge in the form of prototypes, which proves to be effective for retaining the model's knowledge when learning a new task. + +Depth of the GNN and the selection of $k$ We observe that EgoPack is quite robust to the number of GNN layers in the interaction stage between the input features and the task prototypes, as shown in Fig. 4. Regarding the selection of the $k$ parameter, we compare the $MTL + FT$ baseline ( $k = 0$ ) with EgoPack. The best performance is achieved at $k = 4$ with a saturating trend afterwards, showing that interacting with a limited number of prototypes is sufficient. + +Results on the test set We compare EgoPack on the test set of PNR, OSCC and LTA benchmarks, to validate the improvements and soundness of EgoPack. In this setting, a fair comparison between methods is challenging because + +![](images/d85c23916c0ac7b3b660d8d0486042fa357246551d5ca799417a8948cf7ee0f1.jpg) +Figure 4. Parameter analysis for the cross-tasks interaction module of EgoPack. We analyse the impact on performance of GNN depth and the number of nearest neighbours denoted as $k$ -NN. + +of the use of different backbones, supervision levels, ensemble strategies and challenge-specific tuning, such as training also on the validation set. Remarkably, we achieve SOTA performances in LTA, outperforming the other methods that finetune the entire backbone, with a more evident benefit in the verbs edit distance. In PNR, we closely match other approaches while the improvement is more limited in the OSCC task. In this task, we notice a relevant impact of the Ego4D pretraining on the performance. We provide a more in-depth description of the differences between these methods in the supplementary materials. + +# 4.3. Qualitative results + +Closest Task Prototypes We evaluate which are the closest task-specific prototypes in Fig. 5. In this example, OSCC is the novel task and the model has access to the prototypes of the learnt tasks. We focus on the prototypes from the AR and PNR tasks and group together nodes that share the same verb label to make the picture more readable. Looking at the number of occurrences of the prototypes, we observe that some nodes are more discriminative to detect a state change, e.g. peel and hold actions are typically associated (peel) or not (hold) with state changes, and therefore show more evident peaks for positive and negative classes, indicating the network is using these clues to solve the task. + +
PNREgo4D Pt.Loc. Error (s) (↓)
CNN LSTM [25]X0.76
EgoVLP [42]0.67
EgoT2 [68]X0.66
EgoPackX0.66
+ +
OSCCEgo4D Pt.Accuracy (%)
I3D RN-50 [25]X67.6
EgoVLP [42]74.0
EgoT2 (EgoVLP) [68]75.0
EgoT2 (I3D) [68]X71.0
EgoPackX72.1
+ +
LTAEgo4D Pt.Verb (↓)Noun (↓)Action (↓)
SlowFast [25]X0.7390.7800.943
EgoT2 [68]X0.7220.7640.935
HierVL [1]0.7240.7350.928
I-CVAE [43]X0.7410.7400.930
EgoPackX0.7210.7350.925
+ +Table 3. Comparison of EgoPack on the test set of the Ego4D benchmarks. For a fair comparison, we distinguish between methods pretrained on full Ego4D (✓) and those that have been trained only on the benchmark data $(\mathcal{X})$ , which includes EgoPack. + +![](images/2f17ff001c1c4ec47a1319a176c19e654c30aa954cca0ce9ecfe10e0a281603f.jpg) +(a) AR Task Prototypes + +![](images/c821f609a776382e3041d9fd57b41189ba631e628bb01fe75e6f84195a72ada0.jpg) +(b) PNR Task Prototypes +Figure 5. Closest nodes to the OSCC samples among AR and PNR task prototypes. Some nodes appear to be more discriminative of the presence or absence of an object state change. + +Confusion matrices In Fig. 6, we compare the confusion matrix of EgoPack with the MLP model for the top-20 largest verb and noun classes in the AR task, grouping the remaining classes in a "rest" pseudo-class. Overall, we observe an evident improvement on the noun labels, due to the positive effect of cross-tasks interaction. For example, the network appears to better disambiguate between objects that may appear at the same time in the scene, e.g. "pants" and "cloth" or "bottle" and "lid", which we speculate to be the result of a better ability of other tasks, namely OSCC, to identify active objects. Regarding the verbs, we also observe notable improvements, in addition to better recognition of verbs that are the temporal inverse of each other, e.g. + +![](images/2c3b3965355a45dbad3becf47a2c4f9e78e02723f18ce7093eb77da2f001e841.jpg) +(a) Verb (MLP) + +![](images/322879cd38cbdf568bd80ab18f98c46cd658e83b7a251df651c7ea00b37cb42d.jpg) +(b) Verb (EgoPack) + +![](images/dcf9cbafae1971c11a58b3bca6956ab8ac72d84ad48e42ee1fae0887394f3542.jpg) +(c)Noun (MLP) + +![](images/d27174a9056017ffc8d0c894a426a221e6ef9eb6ea592042663eba3a83949b8f.jpg) +(d) Noun (EgoPack) +Figure 6. Action Recognition confusion matrix of EgoPack compared to the MLP baseline for the top-20 verb and noun classes. + +"put" and "take" or "open" and "close", thanks to the improved temporal reasoning of our unified model. + +# 5. Conclusions and future work + +We presented EgoPack, a framework that allows knowledge sharing between different egocentric vision tasks, enabling an efficient use of the perspectives that each task can provide. We built EgoPack on top of a unified temporal architecture that can model distinct tasks with a shared backbone and minimal task-specific overhead. EgoPack overcomes the main limitation posed by traditional multi-task learning approaches, namely the unrealistic expectation that supervision is available for all tasks at training time. Indeed, the prototypes mechanism behind EgoPack allows to create a summary of what the model has learnt so far as it abstracts the task-specific knowledge that could be used in novel tasks. The model can then be updated to the any new task, while also peeking at the perspective of the previous tasks. Results on Ego4D validate our approach, showing competitive performance with other methods. + +# Acknowledgements + +This study was carried out within the FAIR - Future Artificial Intelligence Research and received funding from the European Union Next-GenerationEU (PIANO NAZIONALE DI RIPRESA E RESILLENZA (PNRR) - MISSIONE 4 COMPONENTE 2, INVESTIMENTO 1.3 - D.D. 1555 11/10/2022, PE00000013). This manuscript reflects only the authors' views and opinions, neither the European Union nor the European Commission can be considered responsible for them. We acknowledge the CINECA award under the ISCRA initiative, for the availability of high performance computing resources and support. + +# References + +[1] Kumar Ashutosh, Rohit Girdhar, Lorenzo Torresani, and Kristen Grauman. Hiervl: Learning hierarchical videolanguage embeddings. In CVPR, 2023. 2, 8 +[2] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 4 +[3] Alejandro Betancourt, Pietro Morerio, Carlo S Regazzoni, and Matthias Rauterberg. The evolution of first person vision methods: A survey. IEEE TCSVT, 2015. 2 +[4] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In CVPR, 2017. 5 +[5] Rich Caruana. Multitask learning. Machine learning, 28: 41-75, 1997. 3 +[6] Min-Hung Chen, Zsolt Kira, Ghassan AlRegib, Jaekwon Yoo, Ruxin Chen, and Jian Zheng. Temporal attentive alignment for large-scale video domain adaptation. In ICCV, 2019. 2 +[7] Ting Chen, Saurabh Saxena, Lala Li, Tsung-Yi Lin, David J Fleet, and Geoffrey E Hinton. A unified sequence interface for vision tasks. In NeurIPS, 2022. 3 +[8] Tianlong Chen, Xuxi Chen, Xianzhi Du, Abdullah Rashwan, Fan Yang, Huizhong Chen, Zhangyang Wang, and Yeqing Li. Adamv-moe: Adaptive multi-task vision mixture-of-experts. In ICCV, 2023. 3 +[9] Zhao Chen, Vijay Badrinarayanan, Chen-Yu Lee, and Andrew Rabinovich. Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks. In ICML, 2018. 3 +[10] Yuanzheng Ci, Yizhou Wang, Meilin Chen, Shixiang Tang, Lei Bai, Feng Zhu, Rui Zhao, Fengwei Yu, Donglian Qi, and Wanli Ouyang. Unihcp: A unified model for human-centric perceptions. In CVPR, 2023. 3 +[11] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, and Michael Wray. The epic-kitchens dataset: Collection, challenges and baselines. IEEE TPAMI, 2021. 2 +[12] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Antonino Furnari, Jian Ma, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, and Michael Wray. Rescaling egocentric vision: Collection, pipeline and challenges for epic-kitchens-100. IJCV, 2022. 2 +[13] Eadom Dessalene, Michael Maynard, Chinmaya Devaraj, Cornelia Fermuller, and Yiannis Aloimonos. Egocentric object manipulation graphs. arXiv preprint arXiv:2006.03201, 2020. 3 +[14] Eadem Dessalene, Chinmaya Devaraj, Michael Maynard, Cornelia Fermuller, and Yiannis Aloimonos. Forecasting action through contact representations from first person video. IEEE TPAMI, 2021. 3 +[15] Wenqi Fan, Yao Ma, Qing Li, Yuan He, Eric Zhao, Jiliang Tang, and Dawei Yin. Graph neural networks for social recommendation. In The world wide web conference, 2019. 2 + +[16] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In ICCV, 2019. 3 +[17] Chris Fifty, Ehsan Amid, Zhe Zhao, Tianhe Yu, Rohan Anil, and Chelsea Finn. Efficiently identifying task groupings for multi-task learning. In NeurIPS, 2021. 3 +[18] Antonino Furnari and Giovanni Maria Farinella. Rolling-unrolling lstms for action anticipation from first-person video. IEEE TPAMI, 2020. 2 +[19] Antonino Furnari, Sebastiano Battiato, Kristen Grauman, and Giovanni Maria Farinella. Next-active-object prediction from egocentric videos. Journal of Visual Communication and Image Representation, 2017. 2 +[20] Ruohan Gao, Tae-Hyun Oh, Kristen Grauman, and Lorenzo Torresani. Listen to look: Action recognition by previewing audio. In CVPR, 2020. 2 +[21] Pallabi Ghosh, Nirat Saini, Larry S Davis, and Abhinav Shrivastava. All about knowledge graphs for actions. arXiv preprint arXiv:2008.12432, 2020. 3 +[22] Pallabi Ghosh, Yi Yao, Larry Davis, and Ajay Divakaran. Stacked spatio-temporal graph convolutional networks for action segmentation. In WACV, 2020. 3 +[23] Rohit Girdhar and Kristen Grauman. Anticipative video transformer. In ICCV, 2021. 2 +[24] Rohit Girdhar, Mannat Singh, Nikhila Ravi, Laurens van der Maaten, Armand Joulin, and Ishan Misra. Omnivore: A single model for many visual modalities. In CVPR, 2022. 3, 5 +[25] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In CVPR, 2022. 2, 5, 7, 8 +[26] Jieuxiang Gu, Zhenhua Wang, Jason Kuen, Lianyang Ma, Amir Shahroudy, Bing Shuai, Ting Liu, Xingxing Wang, Gang Wang, Jianfei Cai, et al. Recent advances in convolutional neural networks. PR, 2018. 2 +[27] Michelle Guo, Albert Haque, De-An Huang, Serena Yeung, and Li Fei-Fei. Dynamic task prioritization for multitask learning. In ECCV, 2018. 3 +[28] Pengsheng Guo, Chen-Yu Lee, and Daniel Ulbricht. Learning to branch for multi-task learning. In ICML, 2020. 3 +[29] Will Hamilton, Zhitao Ying, and Jure Leskovec. Inductive representation learning on large graphs. In NeurIPS, 2017. 4 +[30] Dan Witzner Hansen and Qiang Ji. In the eye of the beholder: A survey of models for eyes and gaze. IEEE TPAMI, 2009. 2 +[31] Thomas E. Huang, Yifan Liu, Luc Van Gool, and Fisher Yu. Video task decathlon: Unifying image and video tasks in autonomous driving. In ICCV, 2023. 3 +[32] Yifei Huang, Minjie Cai, Zhenqiang Li, Feng Lu, and Yoichi Sato. Mutual context network for jointly estimating egocentric gaze and action. IEEE TIP, 2020. 3 +[33] Yifei Huang, Yusuke Sugano, and Yoichi Sato. Improving action segmentation via graph-based temporal reasoning. In CVPR, 2020. 2, 3 + +[34] Youngkyoon Jang, Brian Sullivan, Casimir Ludwig, Iain Gilchrist, Dima Damen, and Walterio Mayol-Cuevas. Epictent: An egocentric video dataset for camping tent assembly. In ICCVW, 2019. 2 +[35] Zhuoliang Kang, Kristen Grauman, and Fei Sha. Learning with whom to share in multi-task feature learning. In ICML, 2011. 3 +[36] Georgios Kapidis, Ronald Poppe, Elsbeth van Dam, Lucas Noldus, and Remco Veltkamp. Multitask learning to improve egocentric action recognition. In ICCVW, 2019. 3 +[37] Steven Kearnes, Kevin McCloskey, Marc Berndl, Vijay Pande, and Patrick Riley. Molecular graph convolutions: moving beyond fingerprints. Journal of computer-aided molecular design, 2016. 2 +[38] Alex Kendall, Yarin Gal, and Roberto Cipolla. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In CVPR, 2018. 3 +[39] Asifullah Khan, Anabia Sohail, Umme Zahoora, and Aqsa Saeed Qureshi. A survey of the recent architectures of deep convolutional neural networks. Artificial intelligence review, 2020. 2 +[40] Iasonas Kokkinos. Ethernet: Training a universal convolutional neural network for low-, mid-, and high-level vision using diverse datasets and limited memory. In CVPR, 2017. 2, 3, 6 +[41] Zewen Li, Fan Liu, Wenjie Yang, Shouheng Peng, and Jun Zhou. A survey of convolutional neural networks: analysis, applications, and prospects. IEEE transactions on neural networks and learning systems, 2021. 2 +[42] Kevin Qinghong Lin, Jinping Wang, Mattia Soldan, Michael Wray, Rui Yan, Eric Z XU, Difei Gao, Rong-Cheng Tu, Wenzhe Zhao, Weijie Kong, et al. Egocentric video-language pretraining. In NeurIPS, 2022. 8 +[43] Esteve Valls Mascaró, Hyemin Ahn, and Dongheui Lee. Intention-conditioned long-term human egocentric action anticipation. In WACV, 2023. 8 +[44] Jonathan Munro and Dima Damen. Multi-modal domain adaptation for fine-grained action recognition. In CVPR, 2020. 2 +[45] Tushar Nagarajan, Yanghao Li, Christoph Feichtenhofer, and Kristen Grauman. Ego-topo: Environment affordances from egocentric video. In CVPR, 2020. 3 +[46] Adrián Núñez-Marcos, Gorka Azkune, and Ignacio Arganda-Carreras. Egocentric vision-based action recognition: A survey. Neurocomputing, 2022. 2 +[47] Francesca Pistilli and Giuseppe Averta. Graph learning in robotics: a survey. IEEE Access, 2023. 2 +[48] Mirco Planamente, Chiara Plizzari, Simone Alberto Peirone, Barbara Caputo, and Andrea Bottino. Relative norm alignment for tackling domain shift in deep multi-modal classification. IJCV, 2024. 2 +[49] Chiara Plizzari, Gabriele Goletto, Antonino Furnari, Siddhant Bansal, Francesco Ragusa, Giovanni Maria Farinella, Dima Damen, and Tatiana Tommasi. An outlook into the future of egocentric vision. arXiv preprint arXiv:2308.07123, 2023. 2 + +[50] Chiara Plizzari, Toby Perrett, Barbara Caputo, and Dima Damen. What can a cook in italy teach a mechanic in india? action recognition generalisation over scenarios and locations. In ICCV, 2023. 2, 5 +[51] Shraman Pramanick, Yale Song, Sayan Nag, Kevin Qinghong Lin, Hardik Shah, Mike Zheng Shou, Rama Chellappa, and Pengchuan Zhang. Egovlpv2: Egocentric video-language pre-training with fusion in the backbone. In ICCV, 2023. 2 +[52] Santhosh K. Ramakrishnan, Ziad Al-Halah, and Kristen Grauman. Spotem: Efficient video search for episodic memory. In ICLR, 2023. 2 +[53] Maheen Rashid, Hedvig Kjellstrom, and Yong Jae Lee. Action graphs: Weakly-supervised action localization with graph convolution networks. In WACV, 2020. 3 +[54] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 3, 7 +[55] Alvaro Sanchez-Gonzalez, Jonathan Godwin, Tobias Pfaff, Rex Ying, Jure Leskovec, and Peter Battaglia. Learning to simulate complex physics with graph networks. In ICML, 2020. 2 +[56] Fadime Sener, Dibyadip Chatterjee, Daniel Shelepov, Kun He, Dipika Singhania, Robert Wang, and Angela Yao. Assembly101: A large-scale multi-view video dataset for understanding procedural activities. In CVPR, 2022. 2 +[57] Haosen Shi, Shen Ren, Tianwei Zhang, and Sinno Jialin Pan. Deep multitask learning with progressive parameter sharing. In ICCV, 2023. 3 +[58] Martin Simonovsky and Nikos Komodakis. Dynamic edge-conditioned filters in convolutional neural networks on graphs. In CVPR, 2017. 2 +[59] Ayan Sinha, Zhao Chen, Vijay Badrinarayanan, and Andrew Rabinovich. Gradient adversarial training of neural networks. arXiv preprint arXiv:1806.08028, 2018. 3 +[60] Trevor Standley, Amir Zamir, Dawn Chen, Leonidas Guibas, Jitendra Malik, and Silvio Savarese. Which tasks should be learned together in multi-task learning? In ICML, 2020. 3 +[61] Ximeng Sun, Rameswar Panda, Rogerio Feris, and Kate Saenko. Adashare: Learning what to share for efficient deep multi-task learning. In NeurIPS, 2020. 3 +[62] Zehua Sun, Qiuhong Ke, Hossein Rahmani, Mohammed Bennamoun, Gang Wang, and Jun Liu. Human action recognition from various data modalities: A review. IEEE TPAMI, 2023. 2 +[63] Simon Vandenhende, Stamatios Georgoulis, and Luc Van Gool. Mti-net: Multi-scale task interaction networks for multi-task learning. In ECCV, 2020. 3 +[64] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, 2017. 3 +[65] Xiaohan Wang, Linchao Zhu, Heng Wang, and Yi Yang. Interactive prototype learning for egocentric action recognition. In ICCV, 2021. 3 +[66] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E Sarma, Michael M Bronstein, and Justin M Solomon. Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics, 2019. 2 + +[67] Sen Wu, Hongyang R. Zhang, and Christopher Ré. Understanding and improving information transfer in multi-task learning. In ICLR, 2020. 6 +[68] Zihui Xue, Yale Song, Kristen Grauman, and Lorenzo Torresani. Egocentric video task translation. In CVPR, 2023. 2, 3, 5, 6, 7, 8 +[69] Shen Yan, Xuehan Xiong, Anurag Arnab, Zhichao Lu, Mi Zhang, Chen Sun, and Cordelia Schmid. Multiview transformers for video recognition. In CVPR, 2022. 1 +[70] Lijin Yang, Yifei Huang, Yusuke Sugano, and Yoichi Sato. Interact before align: Leveraging cross-modal knowledge for domain adaptive action recognition. In CVPR, 2022. 2 +[71] Tianhe Yu, Saurabh Kumar, Abhishek Gupta, Sergey Levine, Karol Hausman, and Chelsea Finn. Gradient surgery for multi-task learning. In NeurIPS, 2020. 3 +[72] Runhao Zeng, Wenbing Huang, Mingkui Tan, Yu Rong, Peilin Zhao, Junzhou Huang, and Chuang Gan. Graph convolutional networks for temporal action localization. In ICCV, 2019. 3 +[73] Chen-Lin Zhang, Jianxin Wu, and Yin Li. Actionformer: Localizing moments of actions with transformers. In ECCV, 2022. 1, 2 +[74] Yu Zhang and Qiang Yang. A survey on multi-task learning. IEEE Transactions on Knowledge and Data Engineering, 34 (12):5586-5609, 2021. 3 +[75] Yue Zhao, Ishan Misra, Philipp Krahenbuhl, and Rohit Girdhar. Learning video representations from large language models. In CVPR, 2023. 2 +[76] Zeyun Zhong, David Schneider, Michael Voit, Rainer Stiefelhagen, and Jürgen Beyerer. Anticipative feature fusion transformer for multi-modal action anticipation. In WACV, 2023. 1, 2 +[77] Bolei Zhou, Alex Andonian, Aude Oliva, and Antonio Torralba. Temporal relational reasoning in videos. In ECCV, 2018. 5 \ No newline at end of file diff --git a/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/images.zip b/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..3d9b2a73f10a637f82b1ca25809e439cdff33d78 --- /dev/null +++ b/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f9a049490217eb30a9c6c8e074e16a2db12401accfcee73613458c41d9c44b2 +size 375816 diff --git a/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/layout.json b/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..49aa552278786ce0ace340b3628712e4caf67648 --- /dev/null +++ b/2024/A Backpack Full of Skills_ Egocentric Video Understanding with Diverse Task Perspectives/layout.json @@ -0,0 +1,9024 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 99, + 103, + 494, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 103, + 494, + 140 + ], + "spans": [ + { + "bbox": [ + 99, + 103, + 494, + 140 + ], + "type": "text", + "content": "A Backpack Full of Skills: Egocentric Video Understanding with Diverse Task Perspectives" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 80, + 160, + 511, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 160, + 511, + 203 + ], + "spans": [ + { + "bbox": [ + 80, + 160, + 511, + 203 + ], + "type": "text", + "content": "Simone Alberto Peirone1 Francesca Pistilli1 Antonio Alliegro1,2 Giuseppe Averta1 \n1 Politecnico di Torino, 2 Istituto Italiano di Tecnologia \nfirstname.lastname@polito.it" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "spans": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 256, + 290, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 256, + 290, + 509 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 290, + 509 + ], + "type": "text", + "content": "Human comprehension of a video stream is naturally broad: in a few instants, we are able to understand what is happening, the relevance and relationship of objects, and forecast what will follow in the near future, everything all at once. We believe that - to effectively transfer such an holistic perception to intelligent machines - an important role is played by learning to correlate concepts and to abstract knowledge coming from different tasks, to synergistically exploit them when learning novel skills. To accomplish this, we look for a unified approach to video understanding which combines shared temporal modelling of human actions with minimal overhead, to support multiple downstream tasks and enable cooperation when learning novel skills. We then propose EgoPack, a solution that creates a collection of task perspectives that can be carried across downstream tasks and used as a potential source of additional insights, as a backpack of skills that a robot can carry around and use when needed. We demonstrate the effectiveness and efficiency of our approach on four Ego4D benchmarks, outperforming current state-of-the-art methods. Project webpage: sapeirone.github.io/EgoPack." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 525, + 128, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 525, + 128, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 525, + 128, + 538 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "type": "text", + "content": "Our daily living activities are extremely complex and diverse, nonetheless humans have the extraordinary ability to reason on the behaviour itself in just a few instants from a visual input. We are able to spot what another person is doing, predict their next actions based on current observations, and understand the implications of an activity, for instance whether its effects are reversible. Observing someone in the kitchen by the worktable, where there is a pack of flour and a jug of water, we can identify that they are a chef kneading flour (reasoning about current activity). We can also forecast that the next step will involve mixing the flour with water (reasoning about the future), and finally obtaining dough (reasoning about implications of these actions). This type of holistic reasoning, which is natural for humans," + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 315, + 231, + 541, + 439 + ], + "blocks": [ + { + "bbox": [ + 315, + 231, + 541, + 439 + ], + "lines": [ + { + "bbox": [ + 315, + 231, + 541, + 439 + ], + "spans": [ + { + "bbox": [ + 315, + 231, + 541, + 439 + ], + "type": "image", + "image_path": "0d10b8eac08614be2ccc8d3f6eaefd5c661cee07f42c1a02dcdd3b319a3eb533.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 450, + 547, + 540 + ], + "lines": [ + { + "bbox": [ + 304, + 450, + 547, + 540 + ], + "spans": [ + { + "bbox": [ + 304, + 450, + 547, + 540 + ], + "type": "text", + "content": "Figure 1. Given a video stream, a robot is asked to learn a novel task, e.g. Object State Change Classification (OSCC). To learn the new skill, the robot can access previously gained knowledge regarding different tasks, such as Point of No Return (PNR), Long Term Anticipation (LTA) and Action Recognition (AR), and use it during the learning process to enhance downstream task performance. This knowledge is stored as graphs inside the robot's backpack, always ready to boost a new skill." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "content": "is still a distant goal for artificial intelligence systems. The challenge arises not only from the requirement of executing multiple tasks with a single architecture, but also from the necessity of being able to abstract and repurpose such knowledge across-tasks, for example to foster and enhance the learning of novel skills. Current research trends in human activity understanding predominantly focus on creating several, hyper-specialised, models. This approach splits the understanding of human activities into distinct skills, with each model being independently trained to rely only on \"task-specific\" clues for prediction [69, 73, 76]. However, this approach disregards the valuable insights that could be gleaned from different task perspectives. A first step in this direction relies on Multi-Task Learning (MTL) to exploit" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18275" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 286, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 286, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 286, + 156 + ], + "type": "text", + "content": "the intuition that knowledge sharing between tasks may improve performance. However, the proposed multi-task models have some limitations [40], mostly concerning a negative transfer between tasks, making it difficult to outperform single-task models. Most importantly, MTL usually assumes the availability of supervision for all tasks at training time, limiting the extension of the models at a later time." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 158, + 286, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 158, + 286, + 301 + ], + "spans": [ + { + "bbox": [ + 46, + 158, + 286, + 301 + ], + "type": "text", + "content": "The recently proposed EgoT2 framework [68] offers a unified solution to integrate various egocentric video tasks. It employs an ensemble of diverse, task-specific models and learns to translate task-specific clues through a transformer-based encoder-decoder to benefit one of the tasks. Although this approach fosters positive interactions between tasks, it has significant limitations: i) the primary task should be \"known\" at training time and present within the task-specific models collection, ii) it necessitates an extensive pretraining process and iii) it lacks a knowledge abstraction, as it relies on task-specific models rather than creating transferable concepts." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 303, + 286, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 303, + 286, + 495 + ], + "spans": [ + { + "bbox": [ + 46, + 303, + 286, + 495 + ], + "type": "text", + "content": "Indeed, we argue that an important key to advance the learning capabilities of intelligent systems and to move a step closer to a generalised reasoning on visual understanding involves not only sharing information across tasks, but also abstracting task-specific knowledge for application in new scenarios. Considering an ensemble of vision tasks, each offers a distinct perspective on the input stream and extracts different types of information. Our goal is to encapsulate this diverse knowledge to be leveraged in the future to positively impact the learning of a novel skill. We focus on egocentric video understanding as it is the perfect harbour to study human activities and synergies between tasks. There is a strong connection between egocentric tasks. For instance, specific actions, like peeling a potato, directly result in a change in the state of the object (the potato in this case), illustrating the interconnected nature of these tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 496, + 286, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 496, + 286, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 496, + 286, + 687 + ], + "type": "text", + "content": "All the above considerations motivate us in investigating new alternatives and we propose a novel framework for knowledge abstraction and sharing called EgoPack. Our underlying idea, is to exploit a set of known tasks, each one able to interpret an input stream according to its own perspective, to learn reusable knowledge that can aid the learning process of a novel task. We show this concept in Fig. 1, where a robot is equipped with a backpack that figuratively summarises all the knowledge gained from a set of tasks. To learn a new skill, the robot can \"take-out\" task-related knowledge from the backpack and leverage it within the learning process. The task-specific perspectives are collected in a single pretraining step of a novel multi-task network under the form of prototypes. We exploit a new versatile temporal graph-based architecture shared across all the tasks, with minimal overhead to support each task." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "type": "text", + "content": "When learning a new skill, EgoPack promotes the interaction between the different tasks by learning which rele" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "type": "text", + "content": "vant knowledge to extract from the different perspectives. The architecture of EgoPack is notably flexible, enabling easy adaptation to novel tasks by reusing the previous tasks to facilitate the learning process of any novel task." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 121, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 121, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 305, + 121, + 545, + 156 + ], + "type": "text", + "content": "We demonstrate the effectiveness and efficiency of our approach on Ego4D [25], a large-scale egocentric vision dataset. To summarise, our main contributions are:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 156, + 545, + 287 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 306, + 156, + 545, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 156, + 545, + 179 + ], + "spans": [ + { + "bbox": [ + 306, + 156, + 545, + 179 + ], + "type": "text", + "content": "1. We present a unified architecture to learn multiple egocentric vision tasks with minimal task-specific overhead;" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 180, + 545, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 180, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 306, + 180, + 545, + 215 + ], + "type": "text", + "content": "2. We introduce EgoPack, a novel approach that leverages different task perspectives to build a robust knowledge abstraction which can foster the learning of a novel task;" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 216, + 545, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 216, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 306, + 216, + 545, + 251 + ], + "type": "text", + "content": "3. Our approach outperforms both specialised single and multi-task baselines by leveraging the unique synergies and distinct perspectives of different tasks;" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 251, + 545, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 251, + 545, + 287 + ], + "spans": [ + { + "bbox": [ + 306, + 251, + 545, + 287 + ], + "type": "text", + "content": "4. EgoPack achieves competitive performance on Ego4D [25] for all the considered benchmarks, outperforming the state-of-the-art on some." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 297, + 397, + 308 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 297, + 397, + 308 + ], + "spans": [ + { + "bbox": [ + 306, + 297, + 397, + 308 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 316, + 545, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 316, + 545, + 484 + ], + "spans": [ + { + "bbox": [ + 305, + 316, + 545, + 484 + ], + "type": "text", + "content": "Egocentric Vision Egocentric vision captures human activities from the privileged perspective of the camera wearer, allowing a unique point of view on their actions [3, 49]. Recently, the field has seen rapid development thanks to the release of several large-scale egocentric vision datasets [11, 12, 25, 30, 34, 56]. The rich annotations of these datasets [12, 25] allow to tackle a large number of tasks, including action recognition [46], action anticipation [18, 23, 76], next active object prediction [19], action segmentation [33, 73] and episodic memory [52]. Previous works in egocentric vision have focused on domain adaptation [6, 44, 48, 50, 70], multimodal learning [20, 62, 70] and large-scale video-language pretraining [1, 51, 75] to learn better representation for downstream tasks." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": "Graph Neural Networks for vision tasks Traditional neural networks, including Convolutional Neural Networks (CNNs), have been widely used in computer vision, showing impressive performance on a variety of problems [26, 39, 41]. However, these models often assume data lying on a regular domain, such as images that have a grid-like structure. In recent years, the interest in developing methods able to provide a more general and powerful type of processing has been growing and particular attention has been given to learning methods on graphs. Graph Neural Networks (GNNs) have the innate ability to effectively handle data that lie on irregular domains, such as 3D data [58, 66], robotics [47], molecular chemistry [37], and social or financial networks [15], and to model complex data relations [55]. Recently, transformer-based architectures had a great impact on vision application. Despite Transformers and GNNs share some similarities in their ability to handle various data types, they are fundamentally different in their" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18276" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": "core architectures and the specific ways they process data. GNNs can model the topology of a graph and the relations between nodes while also inheriting all the desirable properties of classic convolutions: locality, hierarchical structures and efficient weights reuse. In video understanding GNNs have been applied to action localisation [22, 33, 53, 72], to build a knowledge graph from human actions [21], to model human-object interactions [13, 14] or to build a topological map of the environment [45]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 194, + 289, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 194, + 289, + 625 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 289, + 625 + ], + "type": "text", + "content": "Multi-Task Learning MTL [5, 74] tackles the problem of learning to solve multiple tasks simultaneously. The development of this strategy is justified by the intuition that complex settings require solving multiple tasks, for instance autonomous driving [31], robotics and natural language processing. Furthermore, these networks can bring the theoretical advantage of sharing complementary information to improve performance. Several works have been done in this direction [7, 8, 10, 17, 31, 32, 40, 57], focusing on which parameters or tasks is better to share [28, 35, 60, 61] and promoting synergies between tasks [36, 65]. Such methods encounter the problem of negative transfer [40] and sharing with unrelated tasks [28, 60] consequently suffering of task competition and not being able to benefit from information sharing between tasks. To overcome these limitations, several methods have been proposed to balance task-related losses [9, 38, 59], to dynamically prioritise tasks [27], to reduce gradient interference between tasks [71] or to exploit task interactions at multiple scales [63]. Unfortunately, all these solutions require extensive task-specific tuning, and are not able to build an holistic perception across tasks. Few works have explored MTL in the field of egocentric vision [32, 36, 65, 68]. Among these, the recently proposed EgoT2 [68] builds an ensemble of diverse, task-specific models. The features of the different models are projected into a common feature space and processed through a transformer-based encoder-decoder to translate the contributions of different tasks and generate predictions for the primary task. Notably, the primary task has to be part of the task-specific models. This approach fosters positive interactions between tasks, resulting in improved performance compared to the single-task models. However, it has some limitations, as it is not able to build knowledge abstractions that can be easily transferred to novel tasks. Instead, we propose a model that can build a robust backpack of task perspectives that can be used in learning any novel tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 634, + 104, + 645 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 634, + 104, + 645 + ], + "spans": [ + { + "bbox": [ + 47, + 634, + 104, + 645 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 653, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 714 + ], + "type": "text", + "content": "We tackle a task cooperation setting, in which an egocentric vision model is able to exploit previously acquired knowledge over task perspectives to foster the learning process of any novel task. We formulate the proposed setting in Sec. 3.1. We present a unified temporal architecture to" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 545, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 120 + ], + "type": "text", + "content": "model multiple tasks in Sec. 3.2, a key step to enable knowledge sharing between tasks. Finally, Sec. 3.3 presents our novel approach EgoPack to enable efficient transfer of different task perspectives to novel tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 129, + 362, + 141 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 129, + 362, + 141 + ], + "spans": [ + { + "bbox": [ + 305, + 129, + 362, + 141 + ], + "type": "text", + "content": "3.1. Setting" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "spans": [ + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "text", + "content": "A task " + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "text", + "content": " is associated with a dataset " + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{(v_i,y_i)\\}_{i = 1}^N" + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "text", + "content": " is a video segment of arbitrary length, " + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "inline_equation", + "content": "y_{i}" + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "text", + "content": " is the associated ground truth label and " + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "text", + "content": " is the number of segments. Our approach follows a two-stages training process. First, a model " + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "text", + "content": " is trained on a set of " + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "text", + "content": " tasks " + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{T}_0,\\dots ,\\mathcal{T}_K\\}" + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "text", + "content": ", under a Multi-Task Learning framework with hard-parameter sharing [54] to encourage the model to learn more general and task-agnostic representations thanks to the joint supervision of multiple tasks. Then, the model is presented with a new task " + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{K + 1}" + }, + { + "bbox": [ + 304, + 147, + 545, + 411 + ], + "type": "text", + "content": " to learn, without access to the supervision of the previous tasks. In this scenario, the new task may benefit from potential semantic affinities with the previously seen tasks. For example, a model that has learnt to detect object changes may apply this knowledge for action recognition and vice-versa, as some actions are associated with object changes, e.g. cutting something, while others are not, e.g. moving an object. Our goal is to make these semantic affinities more explicit (and exploitable) so that the new task can learn to repurpose these perspectives from previous tasks to improve performance, a step towards more holistic models that seamlessly share knowledge between tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 419, + 534, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 419, + 534, + 432 + ], + "spans": [ + { + "bbox": [ + 305, + 419, + 534, + 432 + ], + "type": "text", + "content": "3.2. A Unified Architecture for Egocentric Tasks" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": "The main premise of our method is that different egocentric vision tasks can be modelled using a shared architecture with minimal differences between tasks. Under this assumption, videos can be seen as a sequence of " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": " temporal segments encoded as " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\mathbf{x} = \\{\\mathbf{x}_1,\\mathbf{x}_2,\\dots ,\\mathbf{x}_N\\}" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i\\in \\mathbb{R}^D" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": " represents the " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": "-dimensional features of segment " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": " extracted using some video features extractor " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": ", e.g. SlowFast [16] or Omnivore [24]. Such sequence could be interpreted as a temporal graph " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\mathcal{G}(\\mathcal{X},\\mathcal{E})" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": ", whose nodes " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i\\in \\mathcal{X}" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": " represent the segments of the video, and edges " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "e_{ij}\\in \\mathcal{E}" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": " connect nodes " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_j" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": " with a temporal distance considered relevant when lower than a threshold " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": ". The connectivity of the graph defines the extent of its temporal modelling, i.e. connecting further apart nodes enables longer range temporal understanding which could benefit for example anticipation tasks. The threshold " + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 437, + 545, + 676 + ], + "type": "text", + "content": " depends on the task at hand and more implementation details are provided in Sec. 4.1. The temporal position of each node in the sequence is encoded by adding to the node embeddings a positional encoding [64]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "This formulation enables the use of Graph Neural Networks (GNNs) to learn the complex temporal relations between video segments and to cast different egocentric vision" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18277" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 92, + 62, + 287, + 137 + ], + "blocks": [ + { + "bbox": [ + 92, + 62, + 287, + 137 + ], + "lines": [ + { + "bbox": [ + 92, + 62, + 287, + 137 + ], + "spans": [ + { + "bbox": [ + 92, + 62, + 287, + 137 + ], + "type": "image", + "image_path": "b2f5e1b612ee56559f47d71736cc2ad955ce3dbb0b93273aa55ae4086be6a0e1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 107, + 148, + 257, + 257 + ], + "blocks": [ + { + "bbox": [ + 107, + 148, + 257, + 257 + ], + "lines": [ + { + "bbox": [ + 107, + 148, + 257, + 257 + ], + "spans": [ + { + "bbox": [ + 107, + 148, + 257, + 257 + ], + "type": "image", + "image_path": "785ed7da5144e5810ec295e13bc5c0574be4bd65490bc94fcb4b717051fbba72.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 275, + 545, + 342 + ], + "lines": [ + { + "bbox": [ + 46, + 275, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 46, + 275, + 545, + 342 + ], + "type": "text", + "content": "Figure 2. Architecture of EgoPack when Object State Change Classification (OSCC) is the novel task. Videos are interpreted as a graph, whose nodes " + }, + { + "bbox": [ + 46, + 275, + 545, + 342 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i" + }, + { + "bbox": [ + 46, + 275, + 545, + 342 + ], + "type": "text", + "content": " represent actions, encoded as features, and edges connect temporally close segments. This representation enables the design of a Unified Temporal Backbone to learn multiple tasks with a shared architecture and minimal Task-Specific Heads, leveraging GNNs for temporal modelling. We exploit this architecture to jointly learn " + }, + { + "bbox": [ + 46, + 275, + 545, + 342 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 275, + 545, + 342 + ], + "type": "text", + "content": " tasks, e.g. AR, LTA and PNR. After this training process, we extract a set of prototypes " + }, + { + "bbox": [ + 46, + 275, + 545, + 342 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^k" + }, + { + "bbox": [ + 46, + 275, + 545, + 342 + ], + "type": "text", + "content": " that summarise what the network has learnt from each task " + }, + { + "bbox": [ + 46, + 275, + 545, + 342 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_k" + }, + { + "bbox": [ + 46, + 275, + 545, + 342 + ], + "type": "text", + "content": ", like a backpack of skills that we can carry over. In this Cross-Tasks Interaction phase, the network can peek at these different task-perspective to enrich the learning of the novel task." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 304, + 62, + 512, + 264 + ], + "blocks": [ + { + "bbox": [ + 304, + 62, + 512, + 264 + ], + "lines": [ + { + "bbox": [ + 304, + 62, + 512, + 264 + ], + "spans": [ + { + "bbox": [ + 304, + 62, + 512, + 264 + ], + "type": "image", + "image_path": "a950485f5d0e64e5819580c243221b5a24c344ea28f7feae7d3246b50880e136.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "spans": [ + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "text", + "content": "tasks as graph prediction tasks, such as node-level or graph-level classification, as shown in Fig. 3. This assumption is reflected in our approach by decomposing the multi-task model " + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "text", + "content": " into two components: a general temporal backbone " + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_t:\\mathbb{R}^D\\mapsto \\mathbb{R}^{D_t}" + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "text", + "content": ", and a set of task-specific projection heads " + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_k:\\mathbb{R}^{D_t}\\mapsto \\mathbb{R}^{D_k}" + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "text", + "content": " mapping the graph and/or the nodes to the features space of task " + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_k" + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "text", + "content": " with dimension " + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "inline_equation", + "content": "D_{k}" + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "text", + "content": " as shown in Fig.2. " + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_t" + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "text", + "content": " is a GNN with " + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "text", + "content": " layers that takes as input the temporal sequence " + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "text", + "content": " and provides as output the updated feature vectors " + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "inline_equation", + "content": "\\mathbf{f} = \\{\\mathbf{f}_1,\\mathbf{f}_2,\\dots ,\\mathbf{f}_N\\}" + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "text", + "content": ". At layer " + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 46, + 355, + 289, + 499 + ], + "type": "text", + "content": ", node embeddings are projected and combined with their neighbours, following the GraphSAGE architecture [29]:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 86, + 506, + 287, + 521 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 506, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 86, + 506, + 287, + 521 + ], + "type": "interline_equation", + "content": "\\mathbf {f} _ {i} ^ {(l + 1)} = \\mathbf {W} _ {r} ^ {(l)} \\mathbf {f} _ {i} ^ {(l)} + \\mathbf {W} ^ {(l)} \\cdot \\mathbf {g} _ {i} ^ {(l + 1)} + \\mathbf {b} ^ {(l)}, \\tag {1}", + "image_path": "366403f0929902723a63dfdf78b31314657c6b6000ded71160f58b8ddedf18b8.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "spans": [ + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i^{(l)}\\in \\mathbb{R}^{D_t^{(l)}}" + }, + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "type": "text", + "content": " are the features of node " + }, + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i" + }, + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "type": "inline_equation", + "content": "\\mathbf{b}^{(l)}\\in \\mathbb{R}^{D_t^{(l + 1)}}" + }, + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "type": "text", + "content": " is a bias term, " + }, + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_r^{(l)},\\mathbf{W}^{(l)}\\in \\mathbb{R}^{D_t^{(l + 1)}\\times D_t^{(l)}}" + }, + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "type": "text", + "content": " are the weight matrices associated to the root node and the aggregated neighbours' contribution " + }, + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "type": "inline_equation", + "content": "\\mathbf{g}_i^{(l + 1)}" + }, + { + "bbox": [ + 47, + 528, + 287, + 595 + ], + "type": "text", + "content": " respectively. The latter is computed as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 86, + 604, + 287, + 625 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 604, + 287, + 625 + ], + "spans": [ + { + "bbox": [ + 86, + 604, + 287, + 625 + ], + "type": "interline_equation", + "content": "\\mathbf {g} _ {i} ^ {(l + 1)} = \\underset {\\mathbf {f} _ {i} \\in \\mathcal {N} _ {i}} {\\text {m e a n}} \\left(\\phi \\left(\\mathbf {W} _ {p} ^ {(l)} \\mathbf {f} _ {j} ^ {(l)} + \\mathbf {b} _ {p} ^ {(l)}\\right)\\right), \\tag {2}", + "image_path": "4fa17ed1e3fa6c927811ef960da92b45b0fce661457219e1e7d83babc1ecad1b.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_p^{(l)}\\in \\mathbb{R}^{D_t^{(l)}\\times D_t^{(l)}}" + }, + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "type": "text", + "content": " projects the neighbours before the aggregation step, " + }, + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "type": "text", + "content": " is a non-linearity, " + }, + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{b}_p^{(l)}\\in \\mathbb{R}^{D_t^{(l + 1)}}" + }, + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "type": "text", + "content": " is a bias term and " + }, + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{N}_i" + }, + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "type": "text", + "content": " is the set of neighbours of node " + }, + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i" + }, + { + "bbox": [ + 46, + 636, + 288, + 713 + ], + "type": "text", + "content": ". Each layer is followed by Layer Normalization [2] and a LeakyReLU activation function. A residual connection around the temporal GNN allows the network to preserve" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 355, + 545, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 355, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 305, + 355, + 545, + 403 + ], + "type": "text", + "content": "the original features. Intuitively, the neighbourhood " + }, + { + "bbox": [ + 305, + 355, + 545, + 403 + ], + "type": "inline_equation", + "content": "\\mathcal{N}_i" + }, + { + "bbox": [ + 305, + 355, + 545, + 403 + ], + "type": "text", + "content": " reflects the temporal dependencies of the input sequence and the GNN allows to iteratively extend the temporal receptive field of each node." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "spans": [ + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "text", + "content": "Task-specific heads The output features of the temporal backbone " + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_t" + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "text", + "content": " are shared across the different downstream tasks. To project these features into task-specific components, we add a set of projection heads " + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_k" + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "text", + "content": ", one for each task " + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_k" + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "text", + "content": ". For graph classification tasks, the nodes of each graph are aggregated using max pooling to obtain a unique features representation. In each head, a MultiLayer Perceptron outputs the task-specific features " + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i^k \\in \\mathbb{R}^{D^k}" + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "text", + "content": " and is followed by a linear layer to compute the task logits " + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_i^k \\in \\mathbb{R}^{D_o^k}" + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "inline_equation", + "content": "D_o^k" + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "text", + "content": " is the number of labels for task " + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_k" + }, + { + "bbox": [ + 304, + 430, + 546, + 602 + ], + "type": "text", + "content": ". By limiting the task-specific portion of the network to the heads while sharing the temporal backbone, we can obtain the perspective of all tasks with a single forward through the latter. The network is trained on all the tasks by averaging their losses." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 619, + 509, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 619, + 509, + 632 + ], + "spans": [ + { + "bbox": [ + 306, + 619, + 509, + 632 + ], + "type": "text", + "content": "3.3. Learning a novel task with a backpack" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "content": "To solve the new task, the naive approach would be to finetune the model, adding a new head " + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{K + 1}" + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "content": " and possibly updating the temporal backbone " + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_t" + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "content": ". However, finetuning may not fully leverage the insights from other tasks as it could result in the loss of the previously acquired knowledge, as confirmed experimentally in Sec. 4.2." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18278" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 360 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 360 + ], + "type": "text", + "content": "Building the backpack We propose to explicitly model the perspectives of the different tasks as a set of task-specific prototypes that abstract the knowledge gained by the previously seen tasks and can be accessed by novel tasks. We call this approach EgoPack and provide an overview in Fig. 2. These task-specific prototypes are collected from videos annotated for action recognition, as human actions can be seen as the common thread behind the different tasks. Practically, we forward these samples through the temporal backbone and take the output of the different task-specific projection heads, thus encoding the perspective of each task given the same input video. Finally, the features obtained from each task are aggregated according to the verb and noun labels of the action, effectively summarising the perspective of each task given the same input action. The result is a set of prototypes " + }, + { + "bbox": [ + 47, + 72, + 289, + 360 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^k = \\{\\mathbf{p}_0^k,\\mathbf{p}_2^k,\\dots ,\\mathbf{p}_P^k\\} \\in \\mathbb{R}^{P\\times D_k}" + }, + { + "bbox": [ + 47, + 72, + 289, + 360 + ], + "type": "text", + "content": " for each task " + }, + { + "bbox": [ + 47, + 72, + 289, + 360 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_k" + }, + { + "bbox": [ + 47, + 72, + 289, + 360 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 72, + 289, + 360 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 47, + 72, + 289, + 360 + ], + "type": "text", + "content": " is the number of unique (verb, noun) pairs in the dataset and " + }, + { + "bbox": [ + 47, + 72, + 289, + 360 + ], + "type": "inline_equation", + "content": "D_{k}" + }, + { + "bbox": [ + 47, + 72, + 289, + 360 + ], + "type": "text", + "content": " is the size of the task-specific features. These prototypes are frozen and represent a \"summary\" of what the models has learnt during the multi-task pretraining process, creating an abstraction of the gained knowledge. They can be then reused when learning novel tasks, like a backpack of skills that the model can carry over." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "spans": [ + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "text", + "content": "Leveraging the backpack During the learning process of the novel task " + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{K + 1}" + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "text", + "content": ", the model can exploit the task prototypes obtained via the task-specific heads. As before, the output of the temporal backbone " + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i" + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "text", + "content": " is forwarded through all the projection heads to obtain the task-specific features " + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i^k" + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "text", + "content": ". These features are used as queries to match the corresponding task prototypes " + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^k" + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "text", + "content": ", selecting the " + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "text", + "content": "-Nearest Neighbours among the prototypes using cosine similarity in the features space. Task features and their neighbouring prototypes form a graph-like structure, on which message passing can be used to enrich the task-specific features " + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i^k" + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "text", + "content": ", following an iterative refinement approach. In particular, at each layer " + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "text", + "content": " we select the closest prototypes with " + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "text", + "content": "-NN and update the features " + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i^{(l),k}" + }, + { + "bbox": [ + 47, + 376, + 289, + 547 + ], + "type": "text", + "content": " according to the following rule:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 80, + 553, + 287, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 553, + 287, + 578 + ], + "spans": [ + { + "bbox": [ + 80, + 553, + 287, + 578 + ], + "type": "interline_equation", + "content": "\\mathbf {f} _ {i} ^ {(l + 1), k} = \\mathbf {W} _ {r} ^ {(l)} \\mathbf {f} _ {i} ^ {(l), k} + \\mathbf {W} ^ {(l)} \\cdot \\max _ {\\mathbf {p} _ {j} ^ {k} \\in \\mathcal {N} _ {i} ^ {(l), k}} \\mathbf {p} _ {j} ^ {k}, \\tag {3}", + "image_path": "dbe25d6ce13953ff2c8f4a7c099618f2fb2ce9b3fdcbc2ff4fb743f63fc42d8e.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_j^k\\in \\mathcal{N}_i^{(l),k}" + }, + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "type": "text", + "content": " are the closest prototypes in " + }, + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^k" + }, + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i^{(l),k}" + }, + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_r^{(l)}" + }, + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{W}^{(l)}\\in \\mathbb{R}^{D^k\\times D^k}" + }, + { + "bbox": [ + 47, + 587, + 287, + 665 + ], + "type": "text", + "content": " are the weight matrices associated to the input features and the aggregated neighbours respectively. Notably, only the task features are updated while the task prototypes remain frozen to preserve the original perspectives seen by the network." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "content": "In this process, the task-specific heads " + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_k" + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "content": " are initialised from the multi-task training and possibly updated during the task-specific finetuning process, allowing the model to freely explore the set of task prototypes and to select the" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "type": "text", + "content": "most informative ones for each input sample. After the interaction phase, the refined features " + }, + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{f}}_i^k" + }, + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "type": "text", + "content": " are fed to a classifier module to obtain the task logits " + }, + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_i^k\\in \\mathbb{R}^{D_o^k}" + }, + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "type": "text", + "content": " for each task " + }, + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_k" + }, + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "type": "text", + "content": " in the backpack. The final prediction is the sum of the pre-softmax logits coming from the different tasks and the output of a new head " + }, + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{K + 1}" + }, + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "type": "text", + "content": " for the novel task. Intuitively, we allow each task to cast a vote on the final prediction, based on its perspective on the same video segment. In this phase, the temporal network, the task-specific heads and the weights of the GNNs are trained jointly using only the supervision of the novel task " + }, + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{K + 1}" + }, + { + "bbox": [ + 304, + 72, + 547, + 207 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 218, + 388, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 218, + 388, + 232 + ], + "spans": [ + { + "bbox": [ + 305, + 218, + 388, + 232 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 239, + 547, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 239, + 547, + 323 + ], + "spans": [ + { + "bbox": [ + 304, + 239, + 547, + 323 + ], + "type": "text", + "content": "We evaluate EgoPack on four Ego4d Human-Object Interaction benchmarks: Action Recognition (AR)1, Long Term Action Anticipation (LTA), Object State Change Classification (OSCC) and Point Of No Return (PNR). We report verb and noun top-1 accuracy for AR, accuracy for OSCC, edit distance for LTA and temporal localisation error (in seconds) for PNR." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 331, + 440, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 331, + 440, + 344 + ], + "spans": [ + { + "bbox": [ + 305, + 331, + 440, + 344 + ], + "type": "text", + "content": "4.1. Implementation Details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 350, + 547, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 547, + 482 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 547, + 482 + ], + "type": "text", + "content": "We use Omnivore Swin-L [24] features pre-trained on Kinetics-400 [4], released as part of Ego4D [25] and extracted using dense sampling over a window of 32 frames with a stride of 16 frames and features size 1536. In principle, EgoPack is agnostic to the underlying features extractor and could adopt other architectures. Following previous works on Ego4D [50] we use TRN [77] to temporally aggregate features from the three sub-segments of each input sample. The mapping between videos of each task and its corresponding temporal graph is task dependent, as shown in Fig. 3:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 483, + 547, + 675 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 306, + 483, + 547, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 483, + 547, + 542 + ], + "spans": [ + { + "bbox": [ + 306, + 483, + 547, + 542 + ], + "type": "text", + "content": "- Action Recognition (AR): actions are mapped to the nodes of the temporal graph " + }, + { + "bbox": [ + 306, + 483, + 547, + 542 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 306, + 483, + 547, + 542 + ], + "type": "text", + "content": ", and edges connect each node to the previous and next (Fig. 3a). To account for the variable length of videos, actions are processed in fixed size windows." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 543, + 547, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 543, + 547, + 625 + ], + "spans": [ + { + "bbox": [ + 306, + 543, + 547, + 625 + ], + "type": "text", + "content": "- Long Term Anticipation (LTA): each input clip is mapped to a node in " + }, + { + "bbox": [ + 306, + 543, + 547, + 625 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 306, + 543, + 547, + 625 + ], + "type": "text", + "content": ". Then, a sequence of new nodes is added to the graph, equivalent in number to the clips to forecast. These nodes are initialised with the mean features of the input clips and are connected to the previous and subsequent nodes in the sequence, as well as to the input clips (Fig. 3b)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 626, + 547, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 626, + 547, + 675 + ], + "spans": [ + { + "bbox": [ + 306, + 626, + 547, + 675 + ], + "type": "text", + "content": "- Object State Change Classification (OSCC) and Point of No Return (PNR): each input segment is further split into " + }, + { + "bbox": [ + 306, + 626, + 547, + 675 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 306, + 626, + 547, + 675 + ], + "type": "text", + "content": " sub-segments to account for the finer temporal granularity required by these tasks. Each sub-segment is" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "text", + "content": "AR is not an official Ego4D benchmark and was derived from the LTA annotations by [68]. To be consistent with previous works, we use the v1 version of the LTA annotations." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "18279" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 74, + 208, + 131 + ], + "blocks": [ + { + "bbox": [ + 69, + 74, + 208, + 131 + ], + "lines": [ + { + "bbox": [ + 69, + 74, + 208, + 131 + ], + "spans": [ + { + "bbox": [ + 69, + 74, + 208, + 131 + ], + "type": "image", + "image_path": "388c0eb9864c315baa2798b5c69beb5fc0cd38e346851bb86c754bcb621cc228.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 82, + 135, + 193, + 144 + ], + "lines": [ + { + "bbox": [ + 82, + 135, + 193, + 144 + ], + "spans": [ + { + "bbox": [ + 82, + 135, + 193, + 144 + ], + "type": "text", + "content": "(a) Node classification (AR, PNR)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 230, + 74, + 370, + 132 + ], + "blocks": [ + { + "bbox": [ + 230, + 74, + 370, + 132 + ], + "lines": [ + { + "bbox": [ + 230, + 74, + 370, + 132 + ], + "spans": [ + { + "bbox": [ + 230, + 74, + 370, + 132 + ], + "type": "image", + "image_path": "4c993074ff57171ad3e42f2b5f9f4f95e8778928bbc940f94ece5c91fc4a9d80.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 244, + 135, + 361, + 144 + ], + "lines": [ + { + "bbox": [ + 244, + 135, + 361, + 144 + ], + "spans": [ + { + "bbox": [ + 244, + 135, + 361, + 144 + ], + "type": "text", + "content": "(b) Future node classification (LTA)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 154, + 547, + 189 + ], + "lines": [ + { + "bbox": [ + 46, + 154, + 547, + 189 + ], + "spans": [ + { + "bbox": [ + 46, + 154, + 547, + 189 + ], + "type": "text", + "content": "Figure 3. Egocentric vision tasks as graph prediction tasks. In AR and LTA, each node is an action within a temporal sequence and the objective is to predict the verb and noun labels of the nodes. In OSCC and PNR, nodes represent different temporal segments of the video clip and the goal is to output a global prediction for the entire graph (OSCC) or the individual nodes (PNR)." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 400, + 76, + 529, + 129 + ], + "blocks": [ + { + "bbox": [ + 400, + 76, + 529, + 129 + ], + "lines": [ + { + "bbox": [ + 400, + 76, + 529, + 129 + ], + "spans": [ + { + "bbox": [ + 400, + 76, + 529, + 129 + ], + "type": "image", + "image_path": "a9b3d2e25d485fd2bce6c57a91464f62adbe3d42ecece4c3b1e1614c2a61cc38.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 415, + 135, + 519, + 144 + ], + "lines": [ + { + "bbox": [ + 415, + 135, + 519, + 144 + ], + "spans": [ + { + "bbox": [ + 415, + 135, + 519, + 144 + ], + "type": "text", + "content": "(c) Graph classification (OSCC)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 199, + 287, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 199, + 287, + 222 + ], + "spans": [ + { + "bbox": [ + 55, + 199, + 287, + 222 + ], + "type": "text", + "content": "mapped to a node in " + }, + { + "bbox": [ + 55, + 199, + 287, + 222 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 55, + 199, + 287, + 222 + ], + "type": "text", + "content": ", and edges connect each node to the previous and next (Fig. 3c)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 223, + 288, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 223, + 288, + 368 + ], + "spans": [ + { + "bbox": [ + 46, + 223, + 288, + 368 + ], + "type": "text", + "content": "Tasks have different annotations and are modelled as separate graphs, even though the temporal model is shared. The task prototypes are built using samples from the train split of the AR dataset. Tasks are trained with standard cross entropy loss, with the exception of PNR which uses binary cross entropy. EgoPack is trained for 30, 40 and 10 epochs for AR, LTA and OSCC/PNR respectively, with a learning rate of " + }, + { + "bbox": [ + 46, + 223, + 288, + 368 + ], + "type": "inline_equation", + "content": "1e - 4" + }, + { + "bbox": [ + 46, + 223, + 288, + 368 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 223, + 288, + 368 + ], + "type": "inline_equation", + "content": "1e - 6" + }, + { + "bbox": [ + 46, + 223, + 288, + 368 + ], + "type": "text", + "content": " for AR/LTA and OSCC/PNR respectively using Adam optimiser and batch size 16. All tasks share the same temporal and cross-task interaction architecture, with minimal task-specific hyper-parameter tuning. More details are reported in the supplementary." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 376, + 162, + 388 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 376, + 162, + 388 + ], + "spans": [ + { + "bbox": [ + 47, + 376, + 162, + 388 + ], + "type": "text", + "content": "4.2. Quantitative results" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 396, + 287, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 396, + 287, + 599 + ], + "spans": [ + { + "bbox": [ + 46, + 396, + 287, + 599 + ], + "type": "text", + "content": "We show the main results of EgoPack in Table 1. To assess the validity of our approach, we proceed incrementally starting from single tasks models, i.e. each task is trained separately. In this setting, we compare a simple MLP baseline trained on the temporally aggregated features against our temporal graph methodology, which exhibits superior average performance. The improvement is particularly evident in the PNR task, e.g. from " + }, + { + "bbox": [ + 46, + 396, + 287, + 599 + ], + "type": "inline_equation", + "content": "1.76s" + }, + { + "bbox": [ + 46, + 396, + 287, + 599 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 396, + 287, + 599 + ], + "type": "inline_equation", + "content": "0.61s" + }, + { + "bbox": [ + 46, + 396, + 287, + 599 + ], + "type": "text", + "content": ", where the subpar outcomes of the MLP can be attributed to the lack of explicit temporal modelling. In addition to higher performance, the temporal graph enables all the tasks to be modelled using a unified architecture which allows to train all the tasks at the same time (MTL). With the MTL model, we observe a significant drop in average performance, mostly driven by worse accuracy in AR and OSCC. This behaviour is the result of negative transfers between tasks when they are trained together [67]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "type": "text", + "content": "Cross-Task Interactions We compare our approach EgoPack for efficient cross task interaction with EgoT2 [68], which learns to combine multiple task-specific frozen models to solve one of them. Unlike EgoPack, the learning process of EgoT2 is divided in two stages, i.e. a pre-training step where each individual task is learned from scratch and a task-specific translation step, where just one task of the collection is fine-tuned. Notably," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 198, + 545, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 198, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 304, + 198, + 545, + 403 + ], + "type": "text", + "content": "both steps require the supervision of the downstream task. On the contrary, the multi-task pre-training of EgoPack is agnostic to the novel downstream task, potentially allowing to transfer the gained knowledge to any new task. To ensure a fair comparison with EgoPack, we re-implemented the task translation mechanism proposed by EgoT2 on top of our Temporal Graph single task models using Omnivore features. This approach is indicated as Task Translation in Table 1. Additional details on its implementation are provided in the supplementary. One of the main benefit of our approach is that it requires a single forward pass through the features extraction and temporal backbones to obtain the perspectives of the different tasks, unlike EgoT2 which requires a forward pass for each single task model. Notably, we also highlight that EgoPack obtains better or comparable performance even though the backbone used for features extraction was not trained on Ego4D." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 426, + 546, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 426, + 546, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 426, + 546, + 715 + ], + "type": "text", + "content": "Ablation of the different contributions We summarise the main steps leading to EgoPack in Table 2, using an aggregated metric to capture the overall improvement across the various tasks when compared to the baseline. The metric is computed as an average of the individual task metrics. We adjusted the metrics by taking one minus the score for LTA and PNR, as lower values are preferable, and clipped the PNR localisation error at 1.0 to have the same scale across all the metrics. Temporal modelling alone greatly improves the score compared to the baseline. Although MTL allows to train under a multi-task objective, it clearly underperforms the temporal model due to negative transfers [40]. Task Translation partially recovers this gap on some tasks as shown in Table 1, but overall the aggregated metric is comparable with MTL. We speculate that the marginal improvement of Task Translation compared to MTL lies in the limited task-specific context the former has access to, as it can peek at the different perspectives of the auxiliary tasks only for the input video at hand, rather than looking at the entire knowledge gained by the model. On the contrary, the task prototypes of EgoPack allow to carry a more complete summary of what the models has learnt from which it can extract useful knowledge based on the sample and the task at hand. To validate that the benefits of EgoPack were not brought by" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18280" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 70, + 547, + 186 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 547, + 186 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 547, + 186 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 547, + 186 + ], + "type": "table", + "html": "
Trained on frozen featuresAROSCCLTAPNR
Verbs Top-1 (%)Nouns Top-1 (%)Acc. (%)Verbs ED (↓)Nouns ED (↓)Loc. Err. (s) (↓)
Ego4D Baselines [25]X22.1821.5568.220.7460.7890.62
EgoT2s [68]X23.0423.2872.690.7310.7690.61
MLP24.0830.4570.470.7630.7421.76
Temporal Graph24.2530.4371.260.7540.7520.61
Multi-Task Learning22.0529.4471.100.7400.7460.62
Task Translation†23.6828.2871.480.7400.7560.61
EgoPack25.1031.1071.830.7280.7520.61
", + "image_path": "629260494700091afe8154de6b852a3c31b1ef6495bfafa202a8165c7a6b8986.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 48, + 273, + 287, + 367 + ], + "blocks": [ + { + "bbox": [ + 46, + 194, + 547, + 262 + ], + "lines": [ + { + "bbox": [ + 46, + 194, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 547, + 262 + ], + "type": "text", + "content": "Table 1. EgoPack on Ego4D HOI tasks. MLP is a simple baseline consisting of a few linear layers, while Temporal Graph models all tasks using a unified temporal graph-based architecture. MTL [54] uses hard parameter sharing to jointly learn all tasks, which may result in negative transfers. Ego-T2s [68] learns to translate features across tasks to optimise the primary task. EgoPack builds on the unified architecture of the Temporal Graph and learns to exploit the perspective of different tasks for efficient transfers to the novel task. Performances of EgoPack are evaluated over three runs using accuracy for AR and OSCC, Edit Distance for LTA and temporal localisation error for PNR. Our implementation of the task translation mechanism from EgoT2 [68] using Omnivore features." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 273, + 287, + 367 + ], + "lines": [ + { + "bbox": [ + 48, + 273, + 287, + 367 + ], + "spans": [ + { + "bbox": [ + 48, + 273, + 287, + 367 + ], + "type": "table", + "html": "
Temp. modelMulti-Task ObjectiveCross-Task InteractionMetrics Average (Δ)
MLPXXX0.416
Temp.XX0.433 (+4.22%)
Task Transl.X0.431 (+3.61%)
MTL0.430 (+3.50%)
MTL+FT0.437 (+5.02%)
EgoPackX0.441 (+6.10%)
", + "image_path": "002291742573c6c79a2b3e4292b6052ded236efe4e4707f141fd6827602c5b4b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 374, + 288, + 407 + ], + "lines": [ + { + "bbox": [ + 46, + 374, + 288, + 407 + ], + "spans": [ + { + "bbox": [ + 46, + 374, + 288, + 407 + ], + "type": "text", + "content": "Table 2. Ablation of the different contributions in EgoPack, measured according to an aggregated score, computed as the mean of the standardised metrics across tasks." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 415, + 288, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 415, + 288, + 546 + ], + "spans": [ + { + "bbox": [ + 46, + 415, + 288, + 546 + ], + "type": "text", + "content": "the MTL pre-training alone, we also introduce a " + }, + { + "bbox": [ + 46, + 415, + 288, + 546 + ], + "type": "inline_equation", + "content": "MTL + FT" + }, + { + "bbox": [ + 46, + 415, + 288, + 546 + ], + "type": "text", + "content": " baseline where a new task-specific head is finetuned for the novel task, without access to the output of the other heads. The limited performance of this configuration could be explained by the model losing the knowledge learnt during the multi-task learning, without a significant improvement over the single-task baselines, thus only partially reusing the gained knowledge. On the contrary, EgoPack preserves this knowledge in the form of prototypes, which proves to be effective for retaining the model's knowledge when learning a new task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "type": "text", + "content": "Depth of the GNN and the selection of " + }, + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "type": "text", + "content": " We observe that EgoPack is quite robust to the number of GNN layers in the interaction stage between the input features and the task prototypes, as shown in Fig. 4. Regarding the selection of the " + }, + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "type": "text", + "content": " parameter, we compare the " + }, + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "type": "inline_equation", + "content": "MTL + FT" + }, + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "type": "text", + "content": " baseline (" + }, + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "type": "inline_equation", + "content": "k = 0" + }, + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "type": "text", + "content": ") with EgoPack. The best performance is achieved at " + }, + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "type": "inline_equation", + "content": "k = 4" + }, + { + "bbox": [ + 46, + 558, + 287, + 656 + ], + "type": "text", + "content": " with a saturating trend afterwards, showing that interacting with a limited number of prototypes is sufficient." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": "Results on the test set We compare EgoPack on the test set of PNR, OSCC and LTA benchmarks, to validate the improvements and soundness of EgoPack. In this setting, a fair comparison between methods is challenging because" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 299, + 272, + 531, + 365 + ], + "blocks": [ + { + "bbox": [ + 299, + 272, + 531, + 365 + ], + "lines": [ + { + "bbox": [ + 299, + 272, + 531, + 365 + ], + "spans": [ + { + "bbox": [ + 299, + 272, + 531, + 365 + ], + "type": "image", + "image_path": "d85c23916c0ac7b3b660d8d0486042fa357246551d5ca799417a8948cf7ee0f1.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 368, + 545, + 401 + ], + "lines": [ + { + "bbox": [ + 304, + 368, + 545, + 401 + ], + "spans": [ + { + "bbox": [ + 304, + 368, + 545, + 401 + ], + "type": "text", + "content": "Figure 4. Parameter analysis for the cross-tasks interaction module of EgoPack. We analyse the impact on performance of GNN depth and the number of nearest neighbours denoted as " + }, + { + "bbox": [ + 304, + 368, + 545, + 401 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 368, + 545, + 401 + ], + "type": "text", + "content": "-NN." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 409, + 545, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 409, + 545, + 542 + ], + "spans": [ + { + "bbox": [ + 304, + 409, + 545, + 542 + ], + "type": "text", + "content": "of the use of different backbones, supervision levels, ensemble strategies and challenge-specific tuning, such as training also on the validation set. Remarkably, we achieve SOTA performances in LTA, outperforming the other methods that finetune the entire backbone, with a more evident benefit in the verbs edit distance. In PNR, we closely match other approaches while the improvement is more limited in the OSCC task. In this task, we notice a relevant impact of the Ego4D pretraining on the performance. We provide a more in-depth description of the differences between these methods in the supplementary materials." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 551, + 415, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 551, + 415, + 563 + ], + "spans": [ + { + "bbox": [ + 306, + 551, + 415, + 563 + ], + "type": "text", + "content": "4.3. Qualitative results" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": "Closest Task Prototypes We evaluate which are the closest task-specific prototypes in Fig. 5. In this example, OSCC is the novel task and the model has access to the prototypes of the learnt tasks. We focus on the prototypes from the AR and PNR tasks and group together nodes that share the same verb label to make the picture more readable. Looking at the number of occurrences of the prototypes, we observe that some nodes are more discriminative to detect a state change, e.g. peel and hold actions are typically associated (peel) or not (hold) with state changes, and therefore show more evident peaks for positive and negative classes, indicating the network is using these clues to solve the task." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18281" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 51, + 70, + 282, + 129 + ], + "blocks": [ + { + "bbox": [ + 51, + 70, + 282, + 129 + ], + "lines": [ + { + "bbox": [ + 51, + 70, + 282, + 129 + ], + "spans": [ + { + "bbox": [ + 51, + 70, + 282, + 129 + ], + "type": "table", + "html": "
PNREgo4D Pt.Loc. Error (s) (↓)
CNN LSTM [25]X0.76
EgoVLP [42]0.67
EgoT2 [68]X0.66
EgoPackX0.66
", + "image_path": "a0c2aa8f7d439faa9470bfd4743bb6f38e97e2166c025bf3266e999de914314d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 51, + 130, + 282, + 198 + ], + "blocks": [ + { + "bbox": [ + 51, + 130, + 282, + 198 + ], + "lines": [ + { + "bbox": [ + 51, + 130, + 282, + 198 + ], + "spans": [ + { + "bbox": [ + 51, + 130, + 282, + 198 + ], + "type": "table", + "html": "
OSCCEgo4D Pt.Accuracy (%)
I3D RN-50 [25]X67.6
EgoVLP [42]74.0
EgoT2 (EgoVLP) [68]75.0
EgoT2 (I3D) [68]X71.0
EgoPackX72.1
", + "image_path": "de2f25a829463a8146789b54d966c70d4c024e3b3b59a0fcbed334d62aa878da.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 51, + 199, + 282, + 269 + ], + "blocks": [ + { + "bbox": [ + 51, + 199, + 282, + 269 + ], + "lines": [ + { + "bbox": [ + 51, + 199, + 282, + 269 + ], + "spans": [ + { + "bbox": [ + 51, + 199, + 282, + 269 + ], + "type": "table", + "html": "
LTAEgo4D Pt.Verb (↓)Noun (↓)Action (↓)
SlowFast [25]X0.7390.7800.943
EgoT2 [68]X0.7220.7640.935
HierVL [1]0.7240.7350.928
I-CVAE [43]X0.7410.7400.930
EgoPackX0.7210.7350.925
", + "image_path": "4124e64d5d8f20e654f0c7ae67fdcc581ef8c15c7df61d594cb753602b021fea.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 274, + 287, + 319 + ], + "lines": [ + { + "bbox": [ + 46, + 274, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 46, + 274, + 287, + 319 + ], + "type": "text", + "content": "Table 3. Comparison of EgoPack on the test set of the Ego4D benchmarks. For a fair comparison, we distinguish between methods pretrained on full Ego4D (✓) and those that have been trained only on the benchmark data " + }, + { + "bbox": [ + 46, + 274, + 287, + 319 + ], + "type": "inline_equation", + "content": "(\\mathcal{X})" + }, + { + "bbox": [ + 46, + 274, + 287, + 319 + ], + "type": "text", + "content": ", which includes EgoPack." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 51, + 328, + 264, + 404 + ], + "blocks": [ + { + "bbox": [ + 51, + 328, + 264, + 404 + ], + "lines": [ + { + "bbox": [ + 51, + 328, + 264, + 404 + ], + "spans": [ + { + "bbox": [ + 51, + 328, + 264, + 404 + ], + "type": "image", + "image_path": "2f17ff001c1c4ec47a1319a176c19e654c30aa954cca0ce9ecfe10e0a281603f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 129, + 407, + 206, + 416 + ], + "lines": [ + { + "bbox": [ + 129, + 407, + 206, + 416 + ], + "spans": [ + { + "bbox": [ + 129, + 407, + 206, + 416 + ], + "type": "text", + "content": "(a) AR Task Prototypes" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 51, + 421, + 264, + 498 + ], + "blocks": [ + { + "bbox": [ + 51, + 421, + 264, + 498 + ], + "lines": [ + { + "bbox": [ + 51, + 421, + 264, + 498 + ], + "spans": [ + { + "bbox": [ + 51, + 421, + 264, + 498 + ], + "type": "image", + "image_path": "c821f609a776382e3041d9fd57b41189ba631e628bb01fe75e6f84195a72ada0.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 127, + 500, + 208, + 510 + ], + "lines": [ + { + "bbox": [ + 127, + 500, + 208, + 510 + ], + "spans": [ + { + "bbox": [ + 127, + 500, + 208, + 510 + ], + "type": "text", + "content": "(b) PNR Task Prototypes" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 517, + 287, + 551 + ], + "lines": [ + { + "bbox": [ + 46, + 517, + 287, + 551 + ], + "spans": [ + { + "bbox": [ + 46, + 517, + 287, + 551 + ], + "type": "text", + "content": "Figure 5. Closest nodes to the OSCC samples among AR and PNR task prototypes. Some nodes appear to be more discriminative of the presence or absence of an object state change." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 557, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 557, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 557, + 288, + 714 + ], + "type": "text", + "content": "Confusion matrices In Fig. 6, we compare the confusion matrix of EgoPack with the MLP model for the top-20 largest verb and noun classes in the AR task, grouping the remaining classes in a \"rest\" pseudo-class. Overall, we observe an evident improvement on the noun labels, due to the positive effect of cross-tasks interaction. For example, the network appears to better disambiguate between objects that may appear at the same time in the scene, e.g. \"pants\" and \"cloth\" or \"bottle\" and \"lid\", which we speculate to be the result of a better ability of other tasks, namely OSCC, to identify active objects. Regarding the verbs, we also observe notable improvements, in addition to better recognition of verbs that are the temporal inverse of each other, e.g." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 307, + 67, + 410, + 146 + ], + "blocks": [ + { + "bbox": [ + 307, + 67, + 410, + 146 + ], + "lines": [ + { + "bbox": [ + 307, + 67, + 410, + 146 + ], + "spans": [ + { + "bbox": [ + 307, + 67, + 410, + 146 + ], + "type": "image", + "image_path": "2c3b3965355a45dbad3becf47a2c4f9e78e02723f18ce7093eb77da2f001e841.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 336, + 149, + 386, + 159 + ], + "lines": [ + { + "bbox": [ + 336, + 149, + 386, + 159 + ], + "spans": [ + { + "bbox": [ + 336, + 149, + 386, + 159 + ], + "type": "text", + "content": "(a) Verb (MLP)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 438, + 68, + 539, + 146 + ], + "blocks": [ + { + "bbox": [ + 438, + 68, + 539, + 146 + ], + "lines": [ + { + "bbox": [ + 438, + 68, + 539, + 146 + ], + "spans": [ + { + "bbox": [ + 438, + 68, + 539, + 146 + ], + "type": "image", + "image_path": "322879cd38cbdf568bd80ab18f98c46cd658e83b7a251df651c7ea00b37cb42d.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 460, + 149, + 523, + 159 + ], + "lines": [ + { + "bbox": [ + 460, + 149, + 523, + 159 + ], + "spans": [ + { + "bbox": [ + 460, + 149, + 523, + 159 + ], + "type": "text", + "content": "(b) Verb (EgoPack)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 306, + 162, + 414, + 246 + ], + "blocks": [ + { + "bbox": [ + 306, + 162, + 414, + 246 + ], + "lines": [ + { + "bbox": [ + 306, + 162, + 414, + 246 + ], + "spans": [ + { + "bbox": [ + 306, + 162, + 414, + 246 + ], + "type": "image", + "image_path": "dcf9cbafae1971c11a58b3bca6956ab8ac72d84ad48e42ee1fae0887394f3542.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 335, + 249, + 388, + 258 + ], + "lines": [ + { + "bbox": [ + 335, + 249, + 388, + 258 + ], + "spans": [ + { + "bbox": [ + 335, + 249, + 388, + 258 + ], + "type": "text", + "content": "(c)Noun (MLP)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 433, + 163, + 541, + 246 + ], + "blocks": [ + { + "bbox": [ + 433, + 163, + 541, + 246 + ], + "lines": [ + { + "bbox": [ + 433, + 163, + 541, + 246 + ], + "spans": [ + { + "bbox": [ + 433, + 163, + 541, + 246 + ], + "type": "image", + "image_path": "d27174a9056017ffc8d0c894a426a221e6ef9eb6ea592042663eba3a83949b8f.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 456, + 249, + 522, + 259 + ], + "lines": [ + { + "bbox": [ + 456, + 249, + 522, + 259 + ], + "spans": [ + { + "bbox": [ + 456, + 249, + 522, + 259 + ], + "type": "text", + "content": "(d) Noun (EgoPack)" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 266, + 545, + 289 + ], + "lines": [ + { + "bbox": [ + 305, + 266, + 545, + 289 + ], + "spans": [ + { + "bbox": [ + 305, + 266, + 545, + 289 + ], + "type": "text", + "content": "Figure 6. Action Recognition confusion matrix of EgoPack compared to the MLP baseline for the top-20 verb and noun classes." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 299, + 545, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 299, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 305, + 299, + 545, + 323 + ], + "type": "text", + "content": "\"put\" and \"take\" or \"open\" and \"close\", thanks to the improved temporal reasoning of our unified model." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 336, + 469, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 336, + 469, + 348 + ], + "spans": [ + { + "bbox": [ + 306, + 336, + 469, + 348 + ], + "type": "text", + "content": "5. Conclusions and future work" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 357, + 545, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 357, + 545, + 548 + ], + "spans": [ + { + "bbox": [ + 304, + 357, + 545, + 548 + ], + "type": "text", + "content": "We presented EgoPack, a framework that allows knowledge sharing between different egocentric vision tasks, enabling an efficient use of the perspectives that each task can provide. We built EgoPack on top of a unified temporal architecture that can model distinct tasks with a shared backbone and minimal task-specific overhead. EgoPack overcomes the main limitation posed by traditional multi-task learning approaches, namely the unrealistic expectation that supervision is available for all tasks at training time. Indeed, the prototypes mechanism behind EgoPack allows to create a summary of what the model has learnt so far as it abstracts the task-specific knowledge that could be used in novel tasks. The model can then be updated to the any new task, while also peeking at the perspective of the previous tasks. Results on Ego4D validate our approach, showing competitive performance with other methods." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 561, + 408, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 561, + 408, + 574 + ], + "spans": [ + { + "bbox": [ + 306, + 561, + 408, + 574 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 582, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 547, + 714 + ], + "type": "text", + "content": "This study was carried out within the FAIR - Future Artificial Intelligence Research and received funding from the European Union Next-GenerationEU (PIANO NAZIONALE DI RIPRESA E RESILLENZA (PNRR) - MISSIONE 4 COMPONENTE 2, INVESTIMENTO 1.3 - D.D. 1555 11/10/2022, PE00000013). This manuscript reflects only the authors' views and opinions, neither the European Union nor the European Commission can be considered responsible for them. We acknowledge the CINECA award under the ISCRA initiative, for the availability of high performance computing resources and support." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18282" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 286, + 712 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "type": "text", + "content": "[1] Kumar Ashutosh, Rohit Girdhar, Lorenzo Torresani, and Kristen Grauman. Hiervl: Learning hierarchical videolanguage embeddings. In CVPR, 2023. 2, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 286, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 286, + 157 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 286, + 157 + ], + "type": "text", + "content": "[2] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 159, + 286, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 159, + 286, + 192 + ], + "spans": [ + { + "bbox": [ + 53, + 159, + 286, + 192 + ], + "type": "text", + "content": "[3] Alejandro Betancourt, Pietro Morerio, Carlo S Regazzoni, and Matthias Rauterberg. The evolution of first person vision methods: A survey. IEEE TCSVT, 2015. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 194, + 286, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 194, + 286, + 225 + ], + "spans": [ + { + "bbox": [ + 53, + 194, + 286, + 225 + ], + "type": "text", + "content": "[4] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In CVPR, 2017. 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 228, + 286, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 228, + 286, + 249 + ], + "spans": [ + { + "bbox": [ + 53, + 228, + 286, + 249 + ], + "type": "text", + "content": "[5] Rich Caruana. Multitask learning. Machine learning, 28: 41-75, 1997. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 251, + 286, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 251, + 286, + 294 + ], + "spans": [ + { + "bbox": [ + 53, + 251, + 286, + 294 + ], + "type": "text", + "content": "[6] Min-Hung Chen, Zsolt Kira, Ghassan AlRegib, Jaekwon Yoo, Ruxin Chen, and Jian Zheng. Temporal attentive alignment for large-scale video domain adaptation. In ICCV, 2019. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 297, + 286, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 297, + 286, + 328 + ], + "spans": [ + { + "bbox": [ + 53, + 297, + 286, + 328 + ], + "type": "text", + "content": "[7] Ting Chen, Saurabh Saxena, Lala Li, Tsung-Yi Lin, David J Fleet, and Geoffrey E Hinton. A unified sequence interface for vision tasks. In NeurIPS, 2022. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 331, + 286, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 331, + 286, + 374 + ], + "spans": [ + { + "bbox": [ + 53, + 331, + 286, + 374 + ], + "type": "text", + "content": "[8] Tianlong Chen, Xuxi Chen, Xianzhi Du, Abdullah Rashwan, Fan Yang, Huizhong Chen, Zhangyang Wang, and Yeqing Li. Adamv-moe: Adaptive multi-task vision mixture-of-experts. In ICCV, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 376, + 286, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 376, + 286, + 419 + ], + "spans": [ + { + "bbox": [ + 53, + 376, + 286, + 419 + ], + "type": "text", + "content": "[9] Zhao Chen, Vijay Badrinarayanan, Chen-Yu Lee, and Andrew Rabinovich. Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks. In ICML, 2018. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 422, + 286, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 422, + 286, + 464 + ], + "spans": [ + { + "bbox": [ + 48, + 422, + 286, + 464 + ], + "type": "text", + "content": "[10] Yuanzheng Ci, Yizhou Wang, Meilin Chen, Shixiang Tang, Lei Bai, Feng Zhu, Rui Zhao, Fengwei Yu, Donglian Qi, and Wanli Ouyang. Unihcp: A unified model for human-centric perceptions. In CVPR, 2023. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 467, + 286, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 467, + 286, + 521 + ], + "spans": [ + { + "bbox": [ + 48, + 467, + 286, + 521 + ], + "type": "text", + "content": "[11] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, and Michael Wray. The epic-kitchens dataset: Collection, challenges and baselines. IEEE TPAMI, 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 522, + 286, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 522, + 286, + 586 + ], + "spans": [ + { + "bbox": [ + 48, + 522, + 286, + 586 + ], + "type": "text", + "content": "[12] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Antonino Furnari, Jian Ma, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, and Michael Wray. Rescaling egocentric vision: Collection, pipeline and challenges for epic-kitchens-100. IJCV, 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 590, + 286, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 286, + 632 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 286, + 632 + ], + "type": "text", + "content": "[13] Eadom Dessalene, Michael Maynard, Chinmaya Devaraj, Cornelia Fermuller, and Yiannis Aloimonos. Egocentric object manipulation graphs. arXiv preprint arXiv:2006.03201, 2020. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 635, + 286, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 286, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 286, + 677 + ], + "type": "text", + "content": "[14] Eadem Dessalene, Chinmaya Devaraj, Michael Maynard, Cornelia Fermuller, and Yiannis Aloimonos. Forecasting action through contact representations from first person video. IEEE TPAMI, 2021. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 680, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 286, + 712 + ], + "type": "text", + "content": "[15] Wenqi Fan, Yao Ma, Qing Li, Yuan He, Eric Zhao, Jiliang Tang, and Dawei Yin. Graph neural networks for social recommendation. In The world wide web conference, 2019. 2" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "text", + "content": "[16] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In ICCV, 2019. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 107, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 140 + ], + "type": "text", + "content": "[17] Chris Fifty, Ehsan Amid, Zhe Zhao, Tianhe Yu, Rohan Anil, and Chelsea Finn. Efficiently identifying task groupings for multi-task learning. In NeurIPS, 2021. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 142, + 545, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 142, + 545, + 174 + ], + "spans": [ + { + "bbox": [ + 307, + 142, + 545, + 174 + ], + "type": "text", + "content": "[18] Antonino Furnari and Giovanni Maria Farinella. Rolling-unrolling lstms for action anticipation from first-person video. IEEE TPAMI, 2020. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 177, + 545, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 177, + 545, + 220 + ], + "spans": [ + { + "bbox": [ + 307, + 177, + 545, + 220 + ], + "type": "text", + "content": "[19] Antonino Furnari, Sebastiano Battiato, Kristen Grauman, and Giovanni Maria Farinella. Next-active-object prediction from egocentric videos. Journal of Visual Communication and Image Representation, 2017. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 222, + 545, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 222, + 545, + 253 + ], + "spans": [ + { + "bbox": [ + 307, + 222, + 545, + 253 + ], + "type": "text", + "content": "[20] Ruohan Gao, Tae-Hyun Oh, Kristen Grauman, and Lorenzo Torresani. Listen to look: Action recognition by previewing audio. In CVPR, 2020. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 256, + 545, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 256, + 545, + 289 + ], + "spans": [ + { + "bbox": [ + 307, + 256, + 545, + 289 + ], + "type": "text", + "content": "[21] Pallabi Ghosh, Nirat Saini, Larry S Davis, and Abhinav Shrivastava. All about knowledge graphs for actions. arXiv preprint arXiv:2008.12432, 2020. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 291, + 545, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 291, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 307, + 291, + 545, + 323 + ], + "type": "text", + "content": "[22] Pallabi Ghosh, Yi Yao, Larry Davis, and Ajay Divakaran. Stacked spatio-temporal graph convolutional networks for action segmentation. In WACV, 2020. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 325, + 545, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 325, + 545, + 346 + ], + "spans": [ + { + "bbox": [ + 307, + 325, + 545, + 346 + ], + "type": "text", + "content": "[23] Rohit Girdhar and Kristen Grauman. Anticipative video transformer. In ICCV, 2021. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 349, + 545, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 349, + 545, + 390 + ], + "spans": [ + { + "bbox": [ + 307, + 349, + 545, + 390 + ], + "type": "text", + "content": "[24] Rohit Girdhar, Mannat Singh, Nikhila Ravi, Laurens van der Maaten, Armand Joulin, and Ishan Misra. Omnivore: A single model for many visual modalities. In CVPR, 2022. 3, 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 394, + 545, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 394, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 307, + 394, + 545, + 448 + ], + "type": "text", + "content": "[25] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In CVPR, 2022. 2, 5, 7, 8" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 450, + 545, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 450, + 545, + 493 + ], + "spans": [ + { + "bbox": [ + 307, + 450, + 545, + 493 + ], + "type": "text", + "content": "[26] Jieuxiang Gu, Zhenhua Wang, Jason Kuen, Lianyang Ma, Amir Shahroudy, Bing Shuai, Ting Liu, Xingxing Wang, Gang Wang, Jianfei Cai, et al. Recent advances in convolutional neural networks. PR, 2018. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 495, + 545, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 495, + 545, + 528 + ], + "spans": [ + { + "bbox": [ + 307, + 495, + 545, + 528 + ], + "type": "text", + "content": "[27] Michelle Guo, Albert Haque, De-An Huang, Serena Yeung, and Li Fei-Fei. Dynamic task prioritization for multitask learning. In ECCV, 2018. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 530, + 545, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 530, + 545, + 551 + ], + "spans": [ + { + "bbox": [ + 307, + 530, + 545, + 551 + ], + "type": "text", + "content": "[28] Pengsheng Guo, Chen-Yu Lee, and Daniel Ulbricht. Learning to branch for multi-task learning. In ICML, 2020. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 553, + 545, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 553, + 545, + 575 + ], + "spans": [ + { + "bbox": [ + 307, + 553, + 545, + 575 + ], + "type": "text", + "content": "[29] Will Hamilton, Zhitao Ying, and Jure Leskovec. Inductive representation learning on large graphs. In NeurIPS, 2017. 4" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 577, + 545, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 577, + 545, + 609 + ], + "spans": [ + { + "bbox": [ + 307, + 577, + 545, + 609 + ], + "type": "text", + "content": "[30] Dan Witzner Hansen and Qiang Ji. In the eye of the beholder: A survey of models for eyes and gaze. IEEE TPAMI, 2009. 2" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 612, + 545, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 612, + 545, + 643 + ], + "spans": [ + { + "bbox": [ + 307, + 612, + 545, + 643 + ], + "type": "text", + "content": "[31] Thomas E. Huang, Yifan Liu, Luc Van Gool, and Fisher Yu. Video task decathlon: Unifying image and video tasks in autonomous driving. In ICCV, 2023. 3" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 646, + 545, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 646, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 307, + 646, + 545, + 677 + ], + "type": "text", + "content": "[32] Yifei Huang, Minjie Cai, Zhenqiang Li, Feng Lu, and Yoichi Sato. Mutual context network for jointly estimating egocentric gaze and action. IEEE TIP, 2020. 3" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 680, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 545, + 712 + ], + "type": "text", + "content": "[33] Yifei Huang, Yusuke Sugano, and Yoichi Sato. Improving action segmentation via graph-based temporal reasoning. In CVPR, 2020. 2, 3" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18283" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[34] Youngkyoon Jang, Brian Sullivan, Casimir Ludwig, Iain Gilchrist, Dima Damen, and Walterio Mayol-Cuevas. Epictent: An egocentric video dataset for camping tent assembly. In ICCVW, 2019. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 287, + 150 + ], + "type": "text", + "content": "[35] Zhuoliang Kang, Kristen Grauman, and Fei Sha. Learning with whom to share in multi-task feature learning. In ICML, 2011. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 153, + 287, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 153, + 287, + 186 + ], + "spans": [ + { + "bbox": [ + 48, + 153, + 287, + 186 + ], + "type": "text", + "content": "[36] Georgios Kapidis, Ronald Poppe, Elsbeth van Dam, Lucas Noldus, and Remco Veltkamp. Multitask learning to improve egocentric action recognition. In ICCVW, 2019. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 188, + 288, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 188, + 288, + 232 + ], + "spans": [ + { + "bbox": [ + 48, + 188, + 288, + 232 + ], + "type": "text", + "content": "[37] Steven Kearnes, Kevin McCloskey, Marc Berndl, Vijay Pande, and Patrick Riley. Molecular graph convolutions: moving beyond fingerprints. Journal of computer-aided molecular design, 2016. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 234, + 287, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 234, + 287, + 266 + ], + "spans": [ + { + "bbox": [ + 48, + 234, + 287, + 266 + ], + "type": "text", + "content": "[38] Alex Kendall, Yarin Gal, and Roberto Cipolla. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In CVPR, 2018. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 268, + 288, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 268, + 288, + 312 + ], + "spans": [ + { + "bbox": [ + 48, + 268, + 288, + 312 + ], + "type": "text", + "content": "[39] Asifullah Khan, Anabia Sohail, Umme Zahoora, and Aqsa Saeed Qureshi. A survey of the recent architectures of deep convolutional neural networks. Artificial intelligence review, 2020. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 314, + 287, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 314, + 287, + 357 + ], + "spans": [ + { + "bbox": [ + 48, + 314, + 287, + 357 + ], + "type": "text", + "content": "[40] Iasonas Kokkinos. Ethernet: Training a universal convolutional neural network for low-, mid-, and high-level vision using diverse datasets and limited memory. In CVPR, 2017. 2, 3, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 359, + 287, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 359, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 48, + 359, + 287, + 403 + ], + "type": "text", + "content": "[41] Zewen Li, Fan Liu, Wenjie Yang, Shouheng Peng, and Jun Zhou. A survey of convolutional neural networks: analysis, applications, and prospects. IEEE transactions on neural networks and learning systems, 2021. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 405, + 287, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 405, + 287, + 449 + ], + "spans": [ + { + "bbox": [ + 48, + 405, + 287, + 449 + ], + "type": "text", + "content": "[42] Kevin Qinghong Lin, Jinping Wang, Mattia Soldan, Michael Wray, Rui Yan, Eric Z XU, Difei Gao, Rong-Cheng Tu, Wenzhe Zhao, Weijie Kong, et al. Egocentric video-language pretraining. In NeurIPS, 2022. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 450, + 287, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 450, + 287, + 483 + ], + "spans": [ + { + "bbox": [ + 48, + 450, + 287, + 483 + ], + "type": "text", + "content": "[43] Esteve Valls Mascaró, Hyemin Ahn, and Dongheui Lee. Intention-conditioned long-term human egocentric action anticipation. In WACV, 2023. 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 485, + 287, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 485, + 287, + 517 + ], + "spans": [ + { + "bbox": [ + 48, + 485, + 287, + 517 + ], + "type": "text", + "content": "[44] Jonathan Munro and Dima Damen. Multi-modal domain adaptation for fine-grained action recognition. In CVPR, 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 520, + 287, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 520, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 48, + 520, + 287, + 552 + ], + "type": "text", + "content": "[45] Tushar Nagarajan, Yanghao Li, Christoph Feichtenhofer, and Kristen Grauman. Ego-topo: Environment affordances from egocentric video. In CVPR, 2020. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 555, + 287, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 287, + 587 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 287, + 587 + ], + "type": "text", + "content": "[46] Adrián Núñez-Marcos, Gorka Azkune, and Ignacio Arganda-Carreras. Egocentric vision-based action recognition: A survey. Neurocomputing, 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 590, + 287, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 610 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 610 + ], + "type": "text", + "content": "[47] Francesca Pistilli and Giuseppe Averta. Graph learning in robotics: a survey. IEEE Access, 2023. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 613, + 287, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 655 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 655 + ], + "type": "text", + "content": "[48] Mirco Planamente, Chiara Plizzari, Simone Alberto Peirone, Barbara Caputo, and Andrea Bottino. Relative norm alignment for tackling domain shift in deep multi-modal classification. IJCV, 2024. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "text", + "content": "[49] Chiara Plizzari, Gabriele Goletto, Antonino Furnari, Siddhant Bansal, Francesco Ragusa, Giovanni Maria Farinella, Dima Damen, and Tatiana Tommasi. An outlook into the future of egocentric vision. arXiv preprint arXiv:2308.07123, 2023. 2" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 115 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 115 + ], + "type": "text", + "content": "[50] Chiara Plizzari, Toby Perrett, Barbara Caputo, and Dima Damen. What can a cook in italy teach a mechanic in india? action recognition generalisation over scenarios and locations. In ICCV, 2023. 2, 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 118, + 545, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 171 + ], + "type": "text", + "content": "[51] Shraman Pramanick, Yale Song, Sayan Nag, Kevin Qinghong Lin, Hardik Shah, Mike Zheng Shou, Rama Chellappa, and Pengchuan Zhang. Egovlpv2: Egocentric video-language pre-training with fusion in the backbone. In ICCV, 2023. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 173, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 173, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 307, + 173, + 545, + 205 + ], + "type": "text", + "content": "[52] Santhosh K. Ramakrishnan, Ziad Al-Halah, and Kristen Grauman. Spotem: Efficient video search for episodic memory. In ICLR, 2023. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 206, + 545, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 206, + 545, + 237 + ], + "spans": [ + { + "bbox": [ + 307, + 206, + 545, + 237 + ], + "type": "text", + "content": "[53] Maheen Rashid, Hedvig Kjellstrom, and Yong Jae Lee. Action graphs: Weakly-supervised action localization with graph convolution networks. In WACV, 2020. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 239, + 545, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 239, + 545, + 270 + ], + "spans": [ + { + "bbox": [ + 307, + 239, + 545, + 270 + ], + "type": "text", + "content": "[54] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 3, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 272, + 545, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 272, + 545, + 314 + ], + "spans": [ + { + "bbox": [ + 307, + 272, + 545, + 314 + ], + "type": "text", + "content": "[55] Alvaro Sanchez-Gonzalez, Jonathan Godwin, Tobias Pfaff, Rex Ying, Jure Leskovec, and Peter Battaglia. Learning to simulate complex physics with graph networks. In ICML, 2020. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 316, + 545, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 316, + 545, + 358 + ], + "spans": [ + { + "bbox": [ + 307, + 316, + 545, + 358 + ], + "type": "text", + "content": "[56] Fadime Sener, Dibyadip Chatterjee, Daniel Shelepov, Kun He, Dipika Singhania, Robert Wang, and Angela Yao. Assembly101: A large-scale multi-view video dataset for understanding procedural activities. In CVPR, 2022. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 360, + 545, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 360, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 307, + 360, + 545, + 392 + ], + "type": "text", + "content": "[57] Haosen Shi, Shen Ren, Tianwei Zhang, and Sinno Jialin Pan. Deep multitask learning with progressive parameter sharing. In ICCV, 2023. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 394, + 545, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 394, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 307, + 394, + 545, + 425 + ], + "type": "text", + "content": "[58] Martin Simonovsky and Nikos Komodakis. Dynamic edge-conditioned filters in convolutional neural networks on graphs. In CVPR, 2017. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 427, + 545, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 427, + 545, + 458 + ], + "spans": [ + { + "bbox": [ + 307, + 427, + 545, + 458 + ], + "type": "text", + "content": "[59] Ayan Sinha, Zhao Chen, Vijay Badrinarayanan, and Andrew Rabinovich. Gradient adversarial training of neural networks. arXiv preprint arXiv:1806.08028, 2018. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 460, + 545, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 460, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 307, + 460, + 545, + 492 + ], + "type": "text", + "content": "[60] Trevor Standley, Amir Zamir, Dawn Chen, Leonidas Guibas, Jitendra Malik, and Silvio Savarese. Which tasks should be learned together in multi-task learning? In ICML, 2020. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 494, + 545, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 494, + 545, + 525 + ], + "spans": [ + { + "bbox": [ + 307, + 494, + 545, + 525 + ], + "type": "text", + "content": "[61] Ximeng Sun, Rameswar Panda, Rogerio Feris, and Kate Saenko. Adashare: Learning what to share for efficient deep multi-task learning. In NeurIPS, 2020. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 526, + 545, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 526, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 307, + 526, + 545, + 568 + ], + "type": "text", + "content": "[62] Zehua Sun, Qiuhong Ke, Hossein Rahmani, Mohammed Bennamoun, Gang Wang, and Jun Liu. Human action recognition from various data modalities: A review. IEEE TPAMI, 2023. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 570, + 545, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 570, + 545, + 602 + ], + "spans": [ + { + "bbox": [ + 307, + 570, + 545, + 602 + ], + "type": "text", + "content": "[63] Simon Vandenhende, Stamatios Georgoulis, and Luc Van Gool. Mti-net: Multi-scale task interaction networks for multi-task learning. In ECCV, 2020. 3" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 604, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 604, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 604, + 545, + 635 + ], + "type": "text", + "content": "[64] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, 2017. 3" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 636, + 545, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 636, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 307, + 636, + 545, + 668 + ], + "type": "text", + "content": "[65] Xiaohan Wang, Linchao Zhu, Heng Wang, and Yi Yang. Interactive prototype learning for egocentric action recognition. In ICCV, 2021. 3" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "type": "text", + "content": "[66] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E Sarma, Michael M Bronstein, and Justin M Solomon. Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics, 2019. 2" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "18284" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 464 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[67] Sen Wu, Hongyang R. Zhang, and Christopher Ré. Understanding and improving information transfer in multi-task learning. In ICLR, 2020. 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 107, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 107, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 49, + 107, + 287, + 139 + ], + "type": "text", + "content": "[68] Zihui Xue, Yale Song, Kristen Grauman, and Lorenzo Torresani. Egocentric video task translation. In CVPR, 2023. 2, 3, 5, 6, 7, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 141, + 287, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 141, + 287, + 174 + ], + "spans": [ + { + "bbox": [ + 49, + 141, + 287, + 174 + ], + "type": "text", + "content": "[69] Shen Yan, Xuehan Xiong, Anurag Arnab, Zhichao Lu, Mi Zhang, Chen Sun, and Cordelia Schmid. Multiview transformers for video recognition. In CVPR, 2022. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 175, + 287, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 175, + 287, + 207 + ], + "spans": [ + { + "bbox": [ + 49, + 175, + 287, + 207 + ], + "type": "text", + "content": "[70] Lijin Yang, Yifei Huang, Yusuke Sugano, and Yoichi Sato. Interact before align: Leveraging cross-modal knowledge for domain adaptive action recognition. In CVPR, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 209, + 287, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 209, + 287, + 241 + ], + "spans": [ + { + "bbox": [ + 49, + 209, + 287, + 241 + ], + "type": "text", + "content": "[71] Tianhe Yu, Saurabh Kumar, Abhishek Gupta, Sergey Levine, Karol Hausman, and Chelsea Finn. Gradient surgery for multi-task learning. In NeurIPS, 2020. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 243, + 287, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 243, + 287, + 285 + ], + "spans": [ + { + "bbox": [ + 49, + 243, + 287, + 285 + ], + "type": "text", + "content": "[72] Runhao Zeng, Wenbing Huang, Mingkui Tan, Yu Rong, Peilin Zhao, Junzhou Huang, and Chuang Gan. Graph convolutional networks for temporal action localization. In ICCV, 2019. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 288, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 288, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 49, + 288, + 287, + 319 + ], + "type": "text", + "content": "[73] Chen-Lin Zhang, Jianxin Wu, and Yin Li. Actionformer: Localizing moments of actions with transformers. In ECCV, 2022. 1, 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 322, + 286, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 322, + 286, + 354 + ], + "spans": [ + { + "bbox": [ + 49, + 322, + 286, + 354 + ], + "type": "text", + "content": "[74] Yu Zhang and Qiang Yang. A survey on multi-task learning. IEEE Transactions on Knowledge and Data Engineering, 34 (12):5586-5609, 2021. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 355, + 286, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 355, + 286, + 387 + ], + "spans": [ + { + "bbox": [ + 49, + 355, + 286, + 387 + ], + "type": "text", + "content": "[75] Yue Zhao, Ishan Misra, Philipp Krahenbuhl, and Rohit Girdhar. Learning video representations from large language models. In CVPR, 2023. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 389, + 286, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 389, + 286, + 431 + ], + "spans": [ + { + "bbox": [ + 49, + 389, + 286, + 431 + ], + "type": "text", + "content": "[76] Zeyun Zhong, David Schneider, Michael Voit, Rainer Stiefelhagen, and Jürgen Beyerer. Anticipative feature fusion transformer for multi-modal action anticipation. In WACV, 2023. 1, 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 434, + 286, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 434, + 286, + 464 + ], + "spans": [ + { + "bbox": [ + 49, + 434, + 286, + 464 + ], + "type": "text", + "content": "[77] Bolei Zhou, Alex Andonian, Aude Oliva, and Antonio Torralba. Temporal relational reasoning in videos. In ECCV, 2018. 5" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "18285" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Bayesian Approach to OOD Robustness in Image Classification/d0e61d97-e025-4ae4-a494-3d44cf79404b_content_list.json b/2024/A Bayesian Approach to OOD Robustness in Image Classification/d0e61d97-e025-4ae4-a494-3d44cf79404b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..8895c06633670385b8623fbea9e4bfbce5a86372 --- /dev/null +++ b/2024/A Bayesian Approach to OOD Robustness in Image Classification/d0e61d97-e025-4ae4-a494-3d44cf79404b_content_list.json @@ -0,0 +1,1593 @@ +[ + { + "type": "text", + "text": "A Bayesian Approach to OOD Robustness in Image Classification", + "text_level": 1, + "bbox": [ + 153, + 130, + 818, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Prakhar Kaushik \nJohns Hopkins University \npkaushi1@jh.edu", + "bbox": [ + 132, + 181, + 339, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Adam Kortylewski \nUniversity of Freiburg \nakortyle@mpi-inf.mpg.de", + "bbox": [ + 380, + 181, + 588, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Alan Yuille Johns Hopkins University ayuille1@jh.edu", + "bbox": [ + 629, + 181, + 836, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "An important and unsolved problem in computer vision is to ensure that the algorithms are robust to changes in image domains. We address this problem in the scenario where we only have access to images from the target domains. Motivated by the challenges of the OOD-CV [45] benchmark where we encounter real world Out-of-Domain (OOD) nuisances and occlusion, we introduce a novel Bayesian approach to OOD robustness for object classification. Our work extends Compositional Neural Networks (CompNets), which have been shown to be robust to occlusion but degrade badly when tested on OOD data. We exploit the fact that CompNets contain a generative head defined over feature vectors represented by von Mises-Fisher (vMF) kernels, which correspond roughly to object parts, and can be learned without supervision. We obverse that some vMF kernels are similar between different domains, while others are not. This enables us to learn a transitional dictionary of vMF kernels that are intermediate between the source and target domains and train the generative model on this dictionary using the annotations on the source domain, followed by iterative refinement. This approach, termed Unsupervised Generative Transition (UGT), performs very well in OOD scenarios even when occlusion is present. UGT is evaluated on different OOD benchmarks including the OOD-CV dataset, several popular datasets (e.g., ImageNet-C [9]), artificial image corruptions (including adding occluders), and synthetic-to-real domain transfer, and does well in all scenarios.", + "bbox": [ + 76, + 300, + 473, + 724 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 753, + 209, + 768 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, machine learning algorithms have been extremely successful for tasks like object classification when evaluated on benchmarked datasets like ImageNet. But these successes require that the training and test data (or", + "bbox": [ + 75, + 779, + 470, + 840 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This work has been supported by Army Research Laboratory award W911NF2320008 and ONR with N00014-21-1-2812. A Kortylewski acknowledges support via his Emmy Noether Research Group funded by the German Science Foundation (DFG) under Grant No. 468670075.", + "bbox": [ + 75, + 851, + 470, + 900 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "the source domain and the target domain data) be identically and independently distributed (IID) from some underlying source. However, in practice, it is important to ensure that the algorithms generalize to data that differ from the training data. For example, in real-world applications, an algorithm for car detection may encounter cars with unusual shapes and textures (Fig. 3), which did not occur in the training set.", + "bbox": [ + 496, + 268, + 893, + 391 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Existing OOD methods [9-12, 28] have shown success in dealing with robustness issues when evaluated on early robustness datasets, such as Imagenet-C [9], Imagenet-R [11], and Imagenet-A [12], where the domain differences are due to synthetic corruptions, adversarial images, rendered images, and similar factors [45]. But these algorithms performed less well on a newer benchmark, OODCV [45], which focuses on systematic analysis of real-world nuisances, e.g. changes in texture, 3D pose, weather, shape, and context. From a related perspective, OOD-CV studies the causal factors that result in the domain gap [4]. In addition, previous works have rarely been evaluated for robustness to occlusion, an important OOD robustness metric.", + "bbox": [ + 496, + 393, + 893, + 592 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work, we address OOD robustness on OOD-CV, and related datasets, focusing on real-world domain differences and occlusion. We build on a class of Bayesian neural models called Compositional Neural Networks (CompNets), as they have been shown to be robust to partial occlusion [20, 21, 36, 42]. This is achieved by replacing the discriminative head of a CNN with a generative model of the feature vectors based on the objects' spatial geometry. However, CompNets are fully supervised and are not robust to OOD nuisances. In this work, we develop an unsupervised approach, Unsupervised Generative Transition (UGT), which generalizes CompNets to OOD scenarios.", + "bbox": [ + 496, + 594, + 895, + 776 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "UGT relies on intuition that in OOD scenarios, the appearance of object parts is highly variable (due to changes like texture or weather), while the spatial geometry of objects is often fairly similar between domains. We analyze CompNets and modify them to take advantage of the intuition mentioned above. By introducing a transitional dictionary of von Mises-Fisher [17] kernels (Fig. 1), which shares the properties of both domains, we can intuitively", + "bbox": [ + 496, + 779, + 895, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "22988", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4a2af56e786ba51eb767d4e63b195f119c66421f7dc321df88dc6faa9a2fa515.jpg", + "image_caption": [ + "Figure 1. Illustration of the key principle underlying our Bayesian approach. Related work has shown that clusters of feature vectors learned in an unsupervised manner resemble part-like patterns [21, 39]. We observe that some feature clusters (represented here on a vMF manifold) are very similar in both IID and OOD data (illustrated in blue and red boxes), whereas for other feature clusters there is no corresponding equivalent in the other domain. Our Bayesian approach exploits this property by first learning a generative model of feature clusters and their spatial combinations on the IID data and subsequently adapting the model to OOD data via an unsupervised adaptation of the vMF cluster dictionary, while retaining the spatial relations between clusters." + ], + "image_footnote": [], + "bbox": [ + 117, + 88, + 851, + 220 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "learn the spatial geometry of the source and transfer it to the target domain. UGT leverages the property that the hierarchical structure of generative models like CompNets can be learned in a two-stage manner. 1) An unsupervised learning stage of a dictionary of neural network features, called vMF kernels, using clustering in both source and target domains. The vMF kernels intuitively represent local object part structures. 2) A supervised learning stage of the spatial relations of the vMF kernels on the source domain.", + "bbox": [ + 75, + 344, + 472, + 478 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We primarily evaluate UGT on the OOD-CV benchmark [45]. In addition, to challenge UGT, we add occluders to OOD-CV and create a new dataset called Occluded-OOD-CV (Sec. 4.1). We also test UGT on Imagenet-C corruptions and Synthetic-to-Real domain robustness. Our studies show that UGT performs well on all these tasks and significantly outperforms the SOTA baselines.", + "bbox": [ + 75, + 479, + 468, + 585 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We make several important contributions in this paper.", + "bbox": [ + 96, + 587, + 455, + 602 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. We model objects by a generative model on feature vectors. Our method, UGT, extends CompNets [21] by decoupling the learning into unsupervised learning of vMF kernels and supervised learning of the spatial geometry enabling us to learn transitional dictionaries.", + "2. UGT achieves state-of-the-art results on the real-world OOD robustness problem on the OOD-CV dataset [45] and demonstrates exceptional performance on generalizing under the synthetic corruptions of Imagenet-C.", + "3. UGT also achieves strong results for the Synthetic-to-Real scenario (UDAParts [24] to Pascal3d+) dataset.", + "4. We introduce the Occluded-OOD-CV dataset by adding occladers to OOD-CV and show that UGT is robust to this compounded problem of occlusion and nuisance." + ], + "bbox": [ + 76, + 604, + 468, + 823 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 76, + 843, + 225, + 858 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "OOD robustness can be considered a subset of the larger unsupervised domain adaptation problem and is closely re", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "lated to domain generalization and transfer learning. Although related to both, our work focuses on OOD robustness. Our aim is to generalize well to an unlabelled target domain which is parameterized by real world nuisance factors like weather, shape, pose, texture changes and partial occlusion - which often leads to drastic changes to visual scenes and objects not found in the source dataset.", + "bbox": [ + 496, + 344, + 890, + 449 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In the past few years, there has been an increase in the number of works [9-12, 28] that characterize model performance on OOD data and treat this as a measure of robustness. The common idea that underlies most works is to leverage a property of the unlabeled target domain to allow generalization of a model trained on the source domain. There have been successful efforts to use feature statistics to adapt to the new domain; e.g., Sun et al. [35] try to minimize domain shift by aligning the second-order statistics of source and target distributions; Bug et al. [1] employ feature aware normalization with gating elements from Long Short-Term Memory units for normalization among different spatial regions of interest. Some methods employ techniques based on adaptive batch normalization and weight normalisation [32]. Other methods include self-learning using entropy minimization [38], adaptive pseudo-labeling techniques [5, 14, 33, 34] and robust lost functions [6, 44].", + "bbox": [ + 496, + 450, + 892, + 705 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Although, current works have been successful at dealing with robustness problems when evaluated on earlier robustness datasets [9, 11, 12] they have been shown to struggle with real world nuisances (OOD-CV [45]) and occlusion [16, 21]. Few generative Bayesian methods such as CompNets [21, 36, 39] have shown their relative robustness to occlusion, but still struggle with other OOD nuisances.", + "bbox": [ + 496, + 708, + 890, + 813 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 828, + 589, + 843 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We address OOD robustness from a Bayesian perspective which, to the best of our knowledge, is novel. Our starting point is a class of generative models, described in Sec. 3.1,", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "22989", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c64f7ca4d5d0801a0959d327d0671933cb3a7fd5d3ef194de6e05ec1403e385e.jpg", + "image_caption": [ + "Figure 2. Rough illustration of our Bayesian method. $(\\dashrightarrow, \\dashrightarrow)$ A DCNN backbone is used to extract the source (IID) $F^{\\mathcal{S}}$ and target (OOD) features $F^{\\mathcal{R}}$ . The source feature vectors $F^{\\mathcal{S}}$ are then used to learn the source vMF kernels that are then adapted to the transitional vMF kernels using target domain features $F^{\\mathcal{R}}$ and the adaptation coefficients $\\psi_{k}$ in an unsupervised manner. $(\\longrightarrow)$ Transitional Spatial coefficients $(A^{\\mathcal{R}})$ are then learned using the transitional vMF likelihood $L^{\\mathcal{R}}$ i.e. non-linear activation applied to a convolution of $F^{\\mathcal{S}}$ and transitional kernels using source labels. $(\\longrightarrow)$ These spatial coefficients are then finetuned $(A^{\\mathcal{R}'})$ using pseudo-scores $\\{\\hat{s}\\}$ generated using the transitional mixture likelihood $E^{\\mathcal{R}}$ of target domain features $F^{\\mathcal{R}}$ . $(\\longrightarrow)$ shows the final feedforward pipeline during inference." + ], + "image_footnote": [], + "bbox": [ + 138, + 88, + 831, + 319 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "which have been shown to be robust to occlusion [21] when not dealing with other OOD nuisances. We describe method motivation in Sec. 3.2 and the technical details in Sec. 3.3.", + "bbox": [ + 75, + 441, + 468, + 486 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Bayesian Neural Architecture", + "text_level": 1, + "bbox": [ + 76, + 494, + 341, + 508 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our base architecture is similar to CompNets [21] and is explained in this section to help readers unfamiliar with them. Our method extends this class of neural models by non-trivially modifying the training methodology to enable OOD robustness along with occlusion robustness.", + "bbox": [ + 75, + 517, + 468, + 590 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This class of models differs from conventional Deep Networks by replacing the discriminative head by a generative model of feature vectors. For each object $y$ we learn a generative model $P(F|y)$ for the feature vectors $F$ . This model is formulated as a mixture model $P(F|y) = \\sum_{m} P(F|y, m)$ where the mixture variable $m$ roughly corresponds to the viewpoint of the object. The conditional distributions $P(F|y, m)$ for the features are factorizable in terms of position so that $P(F|y, m) = \\prod_{a \\in \\mathcal{D}} P(f_a|y, m)$ , where $a \\in \\mathcal{D}$ specifies the position in the image. These distributions $P(f_a|y, m)$ are specified in terms of von Mises-Fisher (vMF) dictionaries, with parameters $\\Lambda = \\{\\sigma_k, \\mu_k\\}$ and by spatial coefficients with parameters $\\mathcal{A} = \\{\\alpha_{a,k}^{y,m}\\}$ . We use the following generative probability distribution for the neural features $F$ conditioned on an object $y$ [20, 21]:", + "bbox": [ + 75, + 590, + 470, + 801 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nP (F | y) = \\sum_ {m} P (F | y, m) = \\sum_ {m} \\prod_ {a \\in \\mathcal {D}} P _ {a} \\left(f _ {a} | y, m\\right) P (m), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 806, + 468, + 832 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nP _ {a} \\left(f _ {a} \\mid y, m\\right) = P _ {a} \\left(f _ {a} \\mid \\mathcal {A}, \\Lambda\\right) = \\sum_ {k} \\alpha_ {a, k} ^ {y, m} P \\left(f _ {a} \\mid \\sigma_ {k}, \\mu_ {k}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 101, + 835, + 468, + 863 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nP (f | \\sigma_ {k}, \\mu_ {k}) = \\frac {e ^ {\\sigma_ {k} \\mu_ {k} ^ {T} f}}{Z (\\sigma_ {k})}, \\| f \\| = 1, \\| \\mu_ {k} \\| = 1, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 101, + 864, + 468, + 898 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We typically use 4 mixture components in our method and $P(m)$ is an uniform prior over the mixture components. As shown in [21, 39] each vMF kernel can be qualitatively interpreted as a subpart of the object (i.e., all image patches with feature responses close to $\\mu_{k}$ look like visually similar object subparts). We use von Mises-Fisher distributions instead of Gaussian distributions because the feature vectors $f_{a}$ and the means $\\mu_{k}$ must have a unit norm [7, 8]. The spatial coefficients $\\mathcal{A} = \\{\\alpha_{a,k}^{y,m}\\}$ specify the probability that the vMF kernel $k$ occurs at the position $a$ conditioned on the object $y$ and its mixture component $m$ .", + "bbox": [ + 500, + 441, + 890, + 607 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Inference. After learning, inference on an image with feature vectors $F$ is performed by a forward pass which estimates which object is more likely to generate the features $F$ of the input image, $\\hat{y} = \\arg\\max_{y} P(F|y)$ [21, 36].", + "bbox": [ + 498, + 607, + 890, + 667 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Occlusion modeling. To make the model, described above, robust to occlusion (in non-OOD data), an outlier process is added to allow for some of the image features to be generated by the object and others by a separate outlier process [20, 36]. This is formalised by:", + "bbox": [ + 498, + 667, + 890, + 738 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nP (F | y) = \\prod_ {a \\in \\mathcal {D}} \\sum_ {m} P _ {a} \\left(f _ {a} | y, m\\right) ^ {z _ {a}} Q \\left(f _ {a}\\right) ^ {1 - z _ {a}} P (m) P \\left(z _ {a}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 739, + 890, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $Q(f_{a})$ is a vMF distribution for a feature generated by an occluder which can be estimated from non-annotated images [19, 21, 42]. The latent variable $z_{a} \\in \\{0,1\\}$ indicates whether pixel $a$ is occluded or not occluded ( $z_{a} = \\{1,0\\}$ respectively) and the prior $P(z_{a})$ indicates the prior probability of a pixel being occluded. Note that we could also, in theory, sum over $z$ (we currently take a max).", + "bbox": [ + 498, + 763, + 890, + 869 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Training CompNets [21, 36, 42] are trained end-to-end to optimize the model parameters $\\Lambda, \\mathcal{A}$ using the standard", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "22990", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "supervision for object classification (e.g., the mixture components and the vMF kernels are treated as latent variables). In an OOD scenario the image features no longer correspond well with the learned generative model and without labels, we cannot trivially finetune the model. UGT utilizes an insightful training strategy to solve this problem.", + "bbox": [ + 75, + 90, + 472, + 183 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Motivation on Generalizing to OOD Data", + "text_level": 1, + "bbox": [ + 76, + 193, + 431, + 209 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "UGT builds upon by the aforementioned Bayesian model because it gives a natural way to formulate an occluder process. These models, however, do not do well on OOD data (Sec. 4). To solve this problem in an unsupervised manner requires reformulation of the training process. We motivate our solution for OOD, UGT, in following stages.", + "bbox": [ + 75, + 215, + 468, + 306 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Firstly, the vMF kernel dictionaries (i.e., the subparts of the object) can be learnt without supervision and hence can be found on both the source (annotated) and the target (non-annotated) domains. Secondly, we observe that some of the vMF kernels are similar between different domains (intuitively some subparts are similar between both domains). Thirdly, we can build on this observation to learn a transitional dictionary, which encourages vMF kernels in both domains to be similar if possible, and which works well in both domains. Fourthly, we note that the spatial coefficients capture the spatial activity pattern of the vMF kernels and these patterns depend on the spatial structure of the objects and so are mostly invariant to the domain, which suggests that we can learn the spatial coefficient on the source domain (where annotations are available), provided we use the transitional dictionary of vMF kernels, and that these spatial coefficients give a good initial estimate for the spatial coefficients on the target domain (which can be improved by simple pseudo-labeling).", + "bbox": [ + 75, + 308, + 472, + 595 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the first stage we learn the vMF dictionaries $\\Lambda$ without supervision by maximum likelihood estimation (MLE) assuming that the feature vectors $\\{f_a\\}$ of all the images (and at all positions) in each domain are generated by a mixture of von-Mises-Fisher distributions $P(f|\\Lambda) = \\sum_{k} \\pi_k e^{\\sigma_k \\mu_k^T f} / Z[\\sigma_k]$ . This is essentially clustering similar to that used in earlier studies [21, 39]. After the $\\Lambda$ are learnt, if annotations are available (i.e., we know the object $y$ ) then we can learn the spatial coefficients $\\mathcal{A}$ from the data $\\{F_n\\}$ in the annotated (source) domain by MLE from the distribution $\\sum_{m} \\prod_{a \\in D} \\sum_{k} \\alpha_{a,k}^{y,m} P(f_a | \\sigma_k, \\mu_k)$ .", + "bbox": [ + 75, + 595, + 472, + 765 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the second stage, we compare the vMF dictionaries $(\\Lambda^S)$ and $(\\Lambda^T)$ on the source (S) and target (T) domain respectively. We observe that a subset of the dictionary vectors are similar, as measured by cosine similarity in the vMF feature space (Fig. 1). We conjecture that this is because a subset of the vMF kernels, which correspond roughly to object subparts [39], is invariant to the nuisance variables which cause the differences between the domains. For example, for an object like a car or bus, some subparts like", + "bbox": [ + 75, + 765, + 472, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "wheels and license plates may be very similar between the source and target domains but others may not (Fig. 1).", + "bbox": [ + 496, + 90, + 890, + 121 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "These observations motivate us to learn a transitional vMF dictionary $(\\Lambda^{\\mathcal{R}})$ . This dictionary is learnt by learning the dictionary on the target domain but adding a prior (or regularization constraint) that the dictionary elements in both domains are similar. Finally, we learn the spatial coefficients $\\mathcal{A}$ on the source domain, but using the transitional dictionary (Sec. 3.3.2). This allows us to utilize object geometry knowledge from the source domain in the target domain. As we show in our experiments and ablation (Sec. 4, Sec. 4.3), this model already works well on the target domain and can be improved by pseudo-labelling techniques.", + "bbox": [ + 496, + 121, + 893, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Training UGT", + "text_level": 1, + "bbox": [ + 498, + 296, + 648, + 313 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our Bayesian method, UGT, involves 3 steps - 1) primarily, learning transitional dictionary $\\Lambda^{\\mathcal{R}}$ , 2) learning transitional spatial coefficients $\\mathcal{A}^{\\mathcal{R}}$ using $f^S$ and $\\Lambda^{\\mathcal{R}}$ , and lastly 3) fin-tuning the transitional parameters $(\\Lambda^{\\mathcal{R}},\\mathcal{A}^{\\mathcal{R}})$ using simple pseudo-labelling. Refer to Fig. 2 for a simple illustration.", + "bbox": [ + 496, + 319, + 893, + 397 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3.1 Learning Transitional Dictionary", + "text_level": 1, + "bbox": [ + 498, + 412, + 785, + 429 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We initialize the transitional von Mises-Fisher(vMF) dictionary vectors with the learnt source domain vMF dictionary vectors, i.e., $\\Lambda^{\\mathcal{R}} = \\Lambda^{S}$ . The source domain vMF dictionaries i.e., $(\\Lambda^{\\mathcal{S}}(\\mu ,\\sigma))$ are learnt from the features $f^{\\mathcal{S}}$ in source domain by MLE as described in Sec. 3.1 using the EM algorithm [39]. We can learn the transitional vMF dictionary parameters $\\Lambda^{\\mathcal{R}}$ from the target domain feature vectors $f^{\\mathcal{R}}$ through a few ways. We can maximize the regularized likelihood shown in Eq. (5) using the EM algorithm used to calculate the source domain parameters. Eq. (5) shows the Bayesian parameterization of our transitional model and can be seen as a penalized or regularized form of maximum likelihood estimation. We penalize the distance between the initialized transitional mean vectors (which are the source parameters) and the learnt ones. This regularization (like others) also helps in avoiding overfitting. Since, we fix $\\sigma_{k}$ as constant to reduce computation, the normalization term $Z(\\sigma)$ reduces to a constant, and we can derive the penalized log-likelihood term as shown in Eq. (6). $\\psi$ is a adaptation parameter discussed later.", + "bbox": [ + 496, + 436, + 893, + 715 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} p \\left(f ^ {\\mathcal {R}} \\mid \\Lambda^ {\\mathcal {R}}\\right) = \\prod_ {n} \\sum_ {k} \\alpha_ {k} P \\left(f _ {a} \\mid \\sigma_ {k}, \\mu_ {k}\\right) \\\\ \\exp \\left(- \\psi_ {k} \\sum_ {k} \\left(\\left| \\left| \\mu_ {k} - \\mu_ {k} ^ {S} \\right| \\right|\\right)\\right) \\tag {5} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 504, + 722, + 890, + 782 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} l \\left(\\Lambda^ {\\mathcal {R}}\\right) = \\sum^ {n} \\log \\left(\\sum^ {k} \\pi_ {k} \\frac {e ^ {\\sigma_ {k} \\mu_ {k} ^ {T} f _ {i}}}{Z \\left(\\sigma_ {k}\\right)}\\right) - \\psi_ {k} \\sum^ {n} \\sum^ {k} \\left(\\left| \\left| \\mu_ {k} - \\mu_ {k} ^ {\\mathcal {S}} \\right| \\right|\\right) \\tag {6} \\\\ \\left| \\left| f \\right| \\right| = 1, \\left| \\left| \\mu_ {k} \\right| \\right| = 1, \\sigma = 1 \\Longrightarrow Z (\\sigma) = c o n s t. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 506, + 786, + 890, + 849 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The Expectation step for learning the transitional parameters is similar to the source version. In the first step, we calculate the summary statistics for the transitional parameters", + "bbox": [ + 496, + 857, + 893, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "22991", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Algorithm 1 Unsupervised Generative Transition", + "text_level": 1, + "bbox": [ + 78, + 90, + 406, + 106 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "1: Input: Set of source domain images $I^S = \\{I_1^S, \\dots, I_n^S\\}$ , target domain images $I^T = \\{I_1^T, \\dots, I_N^T\\}$ , source domain labels $y = \\{y_1^S, \\dots, y_n^S\\}$ , deep network backbone $\\Gamma(., \\zeta)$ , background images $\\mathcal{B}_{i=1}^r$", + "bbox": [ + 86, + 109, + 890, + 143 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2: Output: Target domain model parameters $\\mathcal{T} = (\\mathcal{A},\\Lambda)$ , background model $\\beta_{r}$", + "bbox": [ + 86, + 143, + 617, + 157 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3: procedure UGT $(I^{\\mathcal{S}}, I^T, y, \\Gamma, \\beta_r)$", + "bbox": [ + 86, + 157, + 331, + 172 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4: $\\{F^{\\mathcal{S}}\\} ,\\{F^{\\mathcal{R}}\\} \\longleftarrow \\Gamma ((\\{I^{\\mathcal{S}}\\} ,\\{I^T\\}),\\zeta)$", + "bbox": [ + 86, + 172, + 382, + 186 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5: \\(\\Lambda^{\\mathcal{S}}(\\mu_k)\\gets\\) cluster & MLE(\\({\\cal F}^{\\mathcal{S}}\\})", + "bbox": [ + 86, + 186, + 361, + 202 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "6: $\\Lambda_{initial}^{\\mathcal{R}}(\\mu) \\longleftarrow \\Lambda^{\\mathcal{S}}(\\mu_k)$", + "bbox": [ + 86, + 202, + 292, + 217 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "7: $\\Lambda^{\\mathcal{R}}(\\tilde{\\mu})\\longleftarrow \\mathrm{MLE}(F^T,\\Delta (\\psi ,\\Lambda^{\\mathcal{S}},\\Lambda^{\\mathcal{R}}))$", + "bbox": [ + 86, + 217, + 387, + 233 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "8: $\\{L^{\\mathcal{R}}\\} \\longleftarrow \\sum_{k}\\pi_{k}e^{\\sigma_{k}\\mu_{k}^{T}f^{\\mathcal{S}}} / Z[\\sigma_{k}](F*\\Lambda^{\\mathcal{R}}(\\mu_{k}))$", + "bbox": [ + 86, + 246, + 449, + 266 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "9: $\\mathcal{A}^{\\mathcal{R}}_{y_s,m}\\longleftrightarrow$ cluster&MLE $(\\{L^{\\mathcal{R}}\\} ,y_S)$", + "bbox": [ + 86, + 277, + 385, + 296 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "10: $y_{\\hat{T}} \\longleftarrow \\operatorname{argmax}_{y} P(F|\\Lambda^{\\mathcal{R}}, \\mathcal{A}^{\\mathcal{R}})$", + "11: $\\mathcal{A}_{y_{\\hat{T},m}}^{\\mathcal{R}'} \\longleftrightarrow \\text{cluster&MLE}(\\{L^{\\mathcal{R}}\\}, y_{\\hat{T}})$", + "12: $\\mathcal{T}\\longleftarrow$ optimize $(\\mathcal{L}_{\\mathrm{gce}} + \\psi_v\\mathcal{L} + \\psi_\\alpha \\mathcal{L})$" + ], + "bbox": [ + 81, + 306, + 392, + 358 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "13: end procedure", + "text_level": 1, + "bbox": [ + 81, + 358, + 210, + 372 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "> Extract source & target featuremaps from DCNN backbone", + "$\\triangleright$ Initialize source vMF kernels by kmeans & learn using MLE", + "$\\triangleright$ Initialise transitional vMF kernels with source vMF kernels", + "$\\triangleright$ Learn transitional vMF features using regularized MLE with target domain data (Sec. 3.3.1, Eq. (5)-Eq. (9))", + "$\\triangleright$ Compute regularized transitional vMF likelihood with source featuremaps and transitional vMF kernels", + "$\\triangleright$ Calculate spatial coefficients using transitional vMF likelihood and source feature vectors (Sec. 3.3.2)", + "$\\triangleright$ Pseudo-label target domain data using transitional model", + "$\\triangleright$ Finetune spatial coefficients using pseudolabelled data $y_{\\hat{T}}$", + "$\\triangleright$ Optionally, finetune entire model using $y_{\\hat{T}}$ (Eq. (11))" + ], + "bbox": [ + 480, + 172, + 890, + 358 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "using the new data. For posterior probability defined as", + "bbox": [ + 76, + 402, + 442, + 419 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nP (k \\mid f _ {i}, \\Lambda) = \\frac {\\pi_ {k} p \\left(f _ {i} \\mid \\mu_ {k} , \\sigma_ {k}\\right)}{\\sum^ {K} \\pi_ {k} p \\left(f _ {i} \\mid \\mu_ {k} , \\sigma_ {k}\\right)} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 422, + 468, + 455 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "for the $k^{th}$ mixture and where $p(f|\\mu_k,\\sigma_k)$ is defined in Eq. (3), we update the mixture parameters in the maximization step in a regularized manner as follows,", + "bbox": [ + 75, + 460, + 468, + 503 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\pi} _ {k} = \\nu \\left[ \\psi_ {k} ^ {\\pi} \\frac {1}{n} \\sum_ {i = 1} ^ {n} P \\left(k \\mid f _ {i}, \\Lambda\\right) + \\left(1 - \\psi_ {k} ^ {\\pi}\\right) \\pi_ {k} ^ {S} \\right] \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 133, + 510, + 468, + 545 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mu} _ {k} = \\psi_ {k} ^ {\\mu} \\mathcal {E} _ {k} + \\left(1 - \\psi_ {k} ^ {\\mu}\\right) \\mu_ {k} ^ {S} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 547, + 468, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where, $\\mathcal{E}_k$ is the first moment or mean of the $k^{\\text{th}}$ mixture calculated on the new data, $\\nu$ is a scaling parameter to ensure that $\\sum_{k} \\pi_{k} = 1$ and $\\psi_{k}$ is an adaptation coefficient which is defined for each parameter and mixture. It can be defined in a data-dependent manner [29], i.e., $\\psi_{k}^{\\mu, \\pi} = \\left( \\frac{\\omega_{k}}{P(k|f_{i},\\Lambda)} + 1 \\right)^{-1}$ where $w_{k}$ is an empirically set hyperparameter which controls the adaptation emphasis between source and transitional parameters. Empirically, we observed that the adaptation coefficient is not very sensitive to changes to its value and therefore, we increase it monotonically during the EM iterations. A $\\psi_{k}$ for a specific vMF kernel $\\mu_{k}$ at time-step $t$ in $\\Lambda^{\\mathcal{R}}$ stabilizes if the change in its likelihood component is below a threshold value over the previous EM iteration step t-1 and then $\\psi_{k}$ value. We find that only using the parameter update works well. For simpler datasets, even directly learning the transitional dictionary would suffice.", + "bbox": [ + 75, + 570, + 468, + 829 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.2 Learning Transitional Spatial Coefficients", + "text_level": 1, + "bbox": [ + 76, + 845, + 423, + 863 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After learning $\\Lambda^{\\mathcal{R}}$ , we use it to estimate the transitional spatial coefficients $(\\mathcal{A}^{\\mathcal{R}}(\\alpha))$ using the labeled source domain", + "bbox": [ + 75, + 869, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "features $f^S$ (using MLE). The spatial coefficients represent the expected activation of a calculated vMF kernel $\\mu_k$ at a position $a$ in the feature map for a specific class $y$ .", + "bbox": [ + 498, + 402, + 890, + 449 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nP _ {a} \\left(f _ {a} \\mid y _ {s}, m; \\mathcal {A} ^ {\\mathcal {R}}, \\Lambda^ {R}\\right) = \\sum_ {k} \\alpha_ {a, k} ^ {y _ {s}, m} P \\left(f _ {a} \\mid \\Lambda^ {\\mathcal {R}} \\left(\\sigma_ {k}, \\mu_ {k}\\right)\\right) \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 508, + 460, + 890, + 491 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We can leverage the learnt transitional vMF kernel dictionary $\\Lambda^{\\mathcal{R}}$ to learn spatial coefficients $\\mathcal{A}^{\\mathcal{R}}(\\alpha)$ which represent the spatial relationships of the vMF dictionary vectors over the source domain data $D_{S}$ . As these spatial coefficients $\\mathcal{A}^{\\mathcal{R}}$ are conditioned on $\\Lambda^{\\mathcal{R}}$ , they also correspond to parts of target domain features even when they are learned using $f^{S}$ , thus creating a transitional model with parameters $(\\Lambda^{\\mathcal{R}},\\mathcal{A}^{\\mathcal{R}})$ that we can use to classify target domain data.", + "bbox": [ + 496, + 502, + 890, + 623 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This combination of conditioned transitional vMF dictionary $(\\Lambda^{\\mathcal{R}})$ and spatial coefficients $(\\mathcal{A}^{\\mathcal{R}})$ can be leveraged to label a subset of target domain features, especially since we can focus on the subset of transitional vMF kernels $(\\Lambda^{\\mathcal{R}})$ which are similar to their source counterparts. We can use these pseudo labeled feature vectors $(y_{\\hat{T}})$ , along with $\\Lambda^{\\mathcal{R}}$ to finetune the current spatial coefficients $\\mathcal{A}^{\\mathcal{R}}$ which leads to improved spatial coefficients $\\mathcal{A}^{\\mathcal{R}'}$ .", + "bbox": [ + 496, + 625, + 890, + 744 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finetuning spatial coefficients. Transitional spatial coefficients $(\\mathcal{A}^{\\mathcal{R}})$ are initialized with the values describing the expected activation of transitional vMF dictionary vectors $\\Lambda^{\\mathcal{R}}(\\mu_k)$ for the source data features $f^{\\mathcal{S}}$ at a position $a$ on a feature map $f_{a}$ . Subsequently, we finetune these spatial coefficients $\\mathcal{A}^{\\mathcal{R}}$ using a subset of target domain images that present high activations for the robust set of transitional vMF dictionary vectors $\\Lambda^{\\mathcal{R}}$ . Optionally, we can also finetune $\\Lambda^{\\mathcal{R}}$ by relearning them without any initialization and", + "bbox": [ + 496, + 763, + 890, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "22992", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "regularization constraints. Although our model is trained by partitioning into two parts, it is still fully differentiable and trainable from end to end [20, 36, 42]. We use this model property to finetune the entire model. The loss function (Eq. (11)) consists of a generalized cross entropy [44] term calculated using the model predictions and two regularization parameters for the vMF dictionary and the spatial coefficient parameters. This is to encourage the vMF clusters to be similar to the feature vectors $f_{a}$ . In Eq. (11), $\\zeta_{\\{v,\\alpha\\}}$ represent the trade-off hyperparameters of the regularizing loss terms,", + "bbox": [ + 75, + 90, + 472, + 257 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {\\mathrm {g c e}} \\left(y _ {p r e d}, y _ {\\hat {T}}\\right) + \\zeta_ {v} \\mathcal {L} (F, \\Lambda) + \\zeta_ {\\alpha} \\mathcal {L} (F, \\mathcal {A}), \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 263, + 468, + 282 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For a constant vMF variance $\\sigma_{k}$ (which also reduces the normalisation term to a constant) and assuming hard assignment of features $f_{a}$ to vMF dictionary clusters[21],", + "bbox": [ + 76, + 287, + 468, + 333 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} \\left(F, \\Lambda^ {\\mathcal {R}}\\right) = - \\sum_ {a} \\max _ {k} \\log p \\left(f _ {a} \\mid \\Lambda^ {\\mathcal {R}} \\left(\\mu_ {k}\\right)\\right) \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 342, + 468, + 367 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} \\left(F, \\mathcal {A} ^ {\\mathcal {R} ^ {\\prime}}\\right) = - \\sum_ {a} \\left(1 - z _ {a}\\right) \\log \\left[ \\sum_ {k} \\alpha_ {a, k} ^ {y _ {\\hat {T}}, m} p \\left(f _ {a} \\mid \\Lambda^ {\\mathcal {R}} \\left(\\mu_ {k}\\right)\\right) \\right] \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 369, + 468, + 397 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Latent variable $z_{a}\\in \\{0,1\\}$ is explained in Sec. 3.1.", + "bbox": [ + 76, + 407, + 418, + 422 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 435, + 209, + 452 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our experiments evaluate robustness of vision classification models in an extended out-of-domain setup i.e., generalizing to target domains with individual nuisance factors and partial occlusion. This allows us to thoroughly evaluate the efficacy of current methods which have been shown to perform well on other OOD robustness datasets on OOD-CV[45] (which enables a systematic analysis of nuisances on real-world data), Occluded-OOD-CV (which allows us to evaluate models on a combination of partial occlusion with individual nuisances) and Imagenet-C corruptions (for analysis of synthetic corruptions). Lastly, we also show some initial results on Synthetic to Real OOD robustness using the UDAParts [24] dataset.", + "bbox": [ + 75, + 460, + 472, + 657 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Setup and Data", + "text_level": 1, + "bbox": [ + 76, + 665, + 233, + 681 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets. For primary evaluation, we use the OOD-CV [45] dataset. OOD-CV dataset consists of test subcategories which vary from the training data in terms of a main nuisance factor, namely, context, weather, texture, pose and shape. We use $L0$ for the (0%) occlusion level to represent this data setup in Tab. 1 and Supplementary Sec. B.", + "bbox": [ + 75, + 688, + 468, + 779 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Occluded-OOD-CV. In addition to OOD-CV, we experiment with a more complex robustness analysis setup involving partial occlusion. In this setup, models that have been adapted in an unsupervised manner to target domains with nuisance factors are then evaluated on data with partial occlusion in addition to the real-world nuisances. For this purpose, we create a dataset named Occluded-OOD-CV where we superimpose occluders on the OOD-CV test", + "bbox": [ + 75, + 780, + 470, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "images objects in order to approximate real-life occlusion. These occluders have been cropped from the MS-COCO dataset, similar to [20] and are superimposed on objects in the OOD-CV test set. There are three levels of partial occlusions - $L1(20 - 40\\%)$ , $L2(40 - 60\\%)$ and $L3(60 - 80\\%)$ which allows us to diversely analyze the occlusion robustness of the model (in addition to individual nuisance factors). Fig. 3 shows some example images from our dataset. Previous works [18, 21] have shown that using cropped occluders, as done in Occluded-OOD-CV, is akin to the use of real occluders for classification evaluation. We also use", + "bbox": [ + 496, + 90, + 890, + 257 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/247b46dcecdd1129006577d352d0238e6c1a830e35f3d27817fe3e06676027ae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 266, + 640, + 333 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3e8b036d4dfbd44b03042a2f86d67ea5b222ece38e72c7102cadbc8063546a53.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 268, + 769, + 338 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/350cbe9ac78a261ffb8acc50be5b0ef545f2c17cff9365cbf4fc3c64ac59bc08.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 787, + 268, + 870, + 335 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7aee95b83785551127aae01db6a52fea3e177fdc028df72f2e8716beba8b022a.jpg", + "image_caption": [ + "Context (60-80%) Weather (20-40%) Texture (40-60%)" + ], + "image_footnote": [], + "bbox": [ + 504, + 337, + 620, + 404 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/10b86bdb032d1a58917c7bbe15b8a8370803129ac613383e8e00f4b2911825b2.jpg", + "image_caption": [ + "Figure 3. Occluded-OOD-CV dataset examples. Each object category is identified by its nuisance factor and occlusion percentage" + ], + "image_footnote": [], + "bbox": [ + 630, + 343, + 772, + 402 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f0b40b433831a803979e805e5df8d1ae295722fae4eeaba261a3c90aa992f2a9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 781, + 337, + 887, + 402 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Imagenet-C[9] corruptions in the Pascal3D+ dataset for robustness evaluation with conventionally used synthetic corruptions. We also evaluate models in a synthetic (UDAParts [24]) to real data (Pascal3D+ [41]) setup.", + "bbox": [ + 496, + 477, + 890, + 537 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In summary, we have 5 different real world nuisance data subcategories (context, weather, texture, pose, shape), at least seven synthetic corruption categories (fog, pixelate, motion blur, etc.), one synthetic source dataset and 4 partial occlusion levels (including no occlusion) for each experiment. We also run experiments on all the combined nuisance subcategories (Tab. 1). So, in total we have 24 sets of data and experiments for our (extended) OOD robustness setup on the OOD-CV dataset alone.", + "bbox": [ + 496, + 537, + 890, + 672 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Models. We compare our work with our baseline method CompNets [21], other well known recent works [30, 32] which have been shown to be SOTA on various robustness datasets [9, 11, 12] as well as many well-known UDA methods [3, 13, 15, 22, 23, 25-27, 40, 43]. We focus on VGG16 and Resnet-50 backbones as they have been commonly used in most current methods[20, 30, 32, 44].", + "bbox": [ + 496, + 674, + 890, + 779 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training Setup. All models are trained on the source data with corresponding labels. Models can access some unlabeled nuisance (target) data, which could be a single nuisance (OOD-CV, Imagenet-C), combined nuisances (Tab. 1) or real data (when source data are synthetic). Models do not have access to images with partial occlusion at any time, and partially occluded images are only used for inference. We also avoid using different types of data aug-", + "bbox": [ + 496, + 780, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "22993", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/58acb526a404c7f9ccd6897549d6f07f07ad56d247f160f6805dbeac29fa6acd.jpg", + "table_caption": [ + "Table 1. OOD-CV Nuisances Top-1 Classification Results. Occlusion levels greater than $0\\%$ represent Occluded-OOD-CV dataset." + ], + "table_footnote": [ + "** Pretrained Imagenet Backbone used (Resnet-50) / Pretrained UDA model used." + ], + "table_body": "
MethodCombinedContextWeather
\\(Occlusion\\rightarrow\\)0%20-40%40-60%60-80%0%20-40%40-60%60-80%0%20-40%40-60%60-80%
CDAN [25]**.760.531.420.380.710.541.436.397.745.476.335.299
BSP [2]**.753.506.401.351.610.511.419.385.730.391.266.254
MDD [43]**.780.551469.410.761.531.436.410.802.439.306.271
MCD [31]**.772.556.461.403.798.523.426.374.810.447.336.286
MCC [15]**.785.582.492.434.730.577.454.420.767.503.376.362
FixBi [27]**.821.534.478.399.802.542.445.409.755.489.358.335
MIC [13]**.837.540.376.262.755.602.532.499.817.612.496.427
ToAlign [40]**.761.507.411.346.712.501.393.382.720.381.252.213
CST [23]**.840.579.539.477.687.491.452.411.813.558.397.356
DUA [26]**.699.523.480.403.667.471.434.401.701.465.391.210
DINE [22]**.835.600.493.443.867.515.418.397.798.423.290.261
RPL [30].664.430.346.300.675.457.368.315.642.247.138.122
BNA [32].653.426.343.298.580.397.342.278.635.295.179.171
CompNet [21].720.506.462.415.790.517.454.369.683.434.398.362
UGT (Ours).850.620.570.501.875.624.565.511.856.600.528.465
TexturePoseShape
CDAN [25]**.820.532.420.364.844.620.521.450.773.561.491.441
BSP [2]**.696.444.384.315.831.610.510.423.757.535.485.434
MDD [43]**.895.518.427.400.870.611.534.469.836.541.459.386
MCD [31]**.896.522.432.392.865.623.532.471.834.538.456.397
MCC [15]**.874.671.547.495.867.611.521.460.818.601.524.460
FixBi [27]**.854.574.445.369.842.533.472.446.801.500.435.373
MIC [13]**.821.706.631.576.799.613.509.455.807.608.565.467
ToAlign [40]**.594.413.312.273.788.574.503.418.719.548.460.391
CST [23]**.858.657.538.477.887.617.525.451.831.617.495.441
DUA [26]**.918.691.514.468.755.511.423.355.695.455.386.345
DINE [22]**.911.572.432.401.885.618.543.448.838.520.426.360
RPL [30].703.371.238.227.730.493.400.329.670.426.340.311
BNA [32].701.383.247.239.737.510.407.355.662.436.350.311
CompNet [21].747.539.462.426.768.581.538.458.698.466.451.400
UGT (Ours).936.726.665.635.892.632.555.481.852.644.601.567
", + "bbox": [ + 86, + 114, + 883, + 631 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/082e94c437441cd35dd2ef4e44cb7e9fe84cd6491af224fcd1219644e4ef33e2.jpg", + "table_caption": [ + "Table 2. Imagenet-C Corruptions on Pascal3D+ dataset - Classification Results (Vgg16)" + ], + "table_footnote": [], + "table_body": "
Model\nOcclusion→Elastic TransformGaussian BlurSnow
0%20-40%40-60%60-80%0%20-40%40-60%60-80%0%20-40%40-60%60-80%
RPL [30].830.597.461.371.855.541.403.320.842.592.435.408
BNA [32].793.601.498.400.833.618.484.300.767.627.542.454
CompNet [21].268.183.157.146.732.395.296.241.529.348.258.210
UGT (Ours).872.712.712.494.909.720.613.509.890.742.634.523
Motion BlurContrastFrost
RPL [30].862.629.481.373.901.610.433.321.850.670.511.402
BNA [32].844.623.481.355.899.601.401.315.845.654.501.399
CompNet [21].639.362.287.241.760.472.374.312.740.481.360.301
UGT (Ours).891.763.673.567.923.701.534.412.911.782.672.561
", + "bbox": [ + 86, + 684, + 883, + 868 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "22994", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/1670f7bf9d510b5da4eef1a2d006f7793f45f0726e0e8d68e6a09aa727931988.jpg", + "table_caption": [ + "Table 3. Ablation analysis for (a) OOD-CV [45] Combined (b) OOD-CV Texture (c) Imagenet-C (Snow) Corruption" + ], + "table_footnote": [], + "table_body": "
Occlusion→L0L1L2L3L0L1L2L3L0L1L2L3
Baseline(B).698.466.451.400.715.575.475.409.529.348.258.210
+ΛR+AR.816.598.524.498.785.660.559.515.781.671.582.480
+ΛR+AR'.852.644.601.567.843.764.656.623.885.742.634.523
", + "bbox": [ + 135, + 107, + 834, + 186 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "mentations and additional data training to have a fairer comparison amongst all the works. Although, our Bayesian model does not use pretrained Imagenet backbones for feature extraction for fairness, a number of our comparative methods [2, 15, 25, 26, 43] perform poorly without one, so we relax this constraint for them. Our method is still capable of surpassing them in terms of classification accuracy. Further details are provided in Supplementary Section C.", + "bbox": [ + 75, + 194, + 470, + 315 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2. Results", + "text_level": 1, + "bbox": [ + 76, + 323, + 171, + 338 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "OOD robustness to individual nuisances. Tab. 1 (L0 columns) shows classification results on entire OOD-CV test data (combined nuisances) as well as five individual nuisances. We see that our model achieves state-of-the-art results in all experiments. In Tab. 2, we observe that our model also performs exceedingly well when dealing with synthetic Imagenet-C corruptions. Refer to Supplementary Sec.C2 and Tables 5-11 for additional Imagenet-C results.", + "bbox": [ + 75, + 345, + 468, + 467 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Synthetic to Real. Tab. 4 shows our results on both normal and extended OOD robustness scenario in a synthetic to real setup, showing that our unsupervised method can robustly close the gap between its supervised counterpart while outperforming other methods by large margins.", + "bbox": [ + 75, + 484, + 468, + 561 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/0c1a5e98087aaf73f5a4fc36613f12541a68d28271554085881486153e2811e7.jpg", + "table_caption": [ + "Table 4. Synthetic (UDAParts) [24] to Real (Pascal3D+) [41] dataset - Classification Results on Resnet50" + ], + "table_footnote": [], + "table_body": "
Model0%20-40%40-60%60-80%
RPL [30].822.432.370.335
BNA [32].950.684.484.356
CompNet [21].940.650.475.347
UGT (Ours).992.957.861.753
", + "bbox": [ + 84, + 612, + 460, + 705 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Extended OOD robustness under partial Occlusion. In Tab. 1, Tab. 2 and Supplementary Tables 1-3, 5-11, our model outperforms other methods by significant margins in the extended OOD scenarios of nuisance parameters with partial occlusion. We observe that the performance of other models which have been adapted to the target domain data drops drastically when encountering partial occlusion along with nuisance factors. This underlines the increased complexity of the extended OOD robustness scenario relative to the vanilla OOD robustness setup and how our Bayesian model is able to perform exceedingly well compared to conventional methods.", + "bbox": [ + 75, + 719, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. Ablation Analysis", + "text_level": 1, + "bbox": [ + 500, + 193, + 671, + 209 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Tab. 3 and Supplementary Sec. D & Tables 12-17 show the extensive results of the ablation study for UGT, underlying how each component contributes to the overall compositional model. We can see that just calculating the transitional vMF kernel dictionary $(\\Lambda^{\\mathcal{R}})$ and the transitional spatial coefficients $\\mathcal{A}^{\\mathcal{R}}$ improves the results significantly over the baseline method[21]. Further finetuning the spatial coefficients $(\\mathcal{A}^{\\mathcal{R}'})$ using pseudo-labelled target domain features boosts the performance. We ablate our hypothesis regarding similar vMF kernels in source and target domains by visualizing image patches that are activated by similar cross-domain kernels (Supplementary Figures 9-11). We also ablate our hypothesis regarding robust spatial geometry by visualizing images activated by the same spatial coefficient in both source and target domains (using source and transitional vMF dictionaries) in Supp. Fig 4 and 7. Analysis of adaptation coefficient is discussed in Supp. Sec. E.", + "bbox": [ + 496, + 224, + 890, + 482 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion and Future Work", + "text_level": 1, + "bbox": [ + 498, + 520, + 769, + 536 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we addressed the problem of developing object classification algorithms that are robust to OOD factors such as weather, context and occlusion. We generalize CompNets[21] for OOD robustness by observing that they could be learned in two uncoupled steps: (i) unsupervised learning of a dictionary of vMF kernels (roughly corresponding to the subparts of the object) and (ii) supervised learning of the spatial structure of the objects (intuitively where the subparts occur). This enabled us to: (a) learn a transitional dictionary which captured the feature properties of both domains, and (b) learn the distribution of spatial structure on the source domain and transfer it to the target. This model is very successful and could be improved by simple pseudo-labeling techniques. Our empirical results on the OOD-CV[45], synthetic Imagenet-C corruptions, and the synthetic UDA-Parts dataset display the strong and versatile SOTA performance of our method. In addition, we developed a more challenging dataset Occluded-OOD-CV by introducing occlusion into OOD-CV and show that our Bayesian method, UGT, performed well in this difficult challenge. Our Bayesian approach could be extended to other tasks such as semantic segmentation, exploiting properties of CompNets[36, 37]. We give a qualitative proof of concept in the Supplementary.", + "bbox": [ + 496, + 553, + 890, + 888 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "22995", + "bbox": [ + 478, + 944, + 517, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] D. Bug, S. Schneider, A. Grote, E. Oswald, F. Feuerhake, J. Schüler, and D. Merhof. Context-based normalization of histological stains using deep convolutional features. Lecture Notes in Computer Science, pages 135-142, 2017. 2", + "[2] Xinyang Chen, Sinan Wang, Mingsheng Long, and Jianmin Wang. Transferability vs. discriminability: Batch spectral penalization for adversarial domain adaptation. In Proceedings of the 36th International Conference on Machine Learning, pages 1081-1090. PMLR, 2019. 7, 8", + "[3] Shuhao Cui, Shuhui Wang, Junbao Zhuo, Liang Li, Qingming Huang, and Qi Tian. Towards discriminability and diversity: Batch nuclear-norm maximization under label insufficient situations. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6", + "[4] Nathan Drenkow, Numair Sani, Ilya Shpitser, and Mathias Unberath. Robustness in deep learning for computer vision: Mind the gap? CoRR, abs/2112.00639, 2021. 1", + "[5] Aram Galstyan and Paul R Cohen. Empirical comparison of \"hard\" and \"soft\" label propagation for relational classification. In International Conference on Inductive Logic Programming, pages 98-111. Springer, Berlin, Heidelberg, 2007. 2", + "[6] Aritra Ghosh, Himanshu Kumar, and P. S. Sastry. Robust loss functions under label noise for deep neural networks, 2017. 2", + "[7] Siddharth Gopal and Yiming Yang. Von mises-fisher clustering models. In Proceedings of the 31st International Conference on Machine Learning, pages 154-162, Beijing, China, 2014. PMLR. 3", + "[8] Md Hasnat, Julien Bohné, Jonathan Milgram, Stéphane Gentic, Liming Chen, et al. von mises-fisher mixture model-based deep learning: Application to face verification. arXiv preprint arXiv:1706.04264, 2017. 3", + "[9] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations, 2019. 1, 2, 6", + "[10] Dan Hendrycks, Xiaoyuan Liu, Eric Wallace, Adam Dziedzic, Rishabh Krishnan, and Dawn Song. Pretrained transformers improve out-of-distribution robustness. Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, 2020.", + "[11] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization, 2021. 1, 2, 6", + "[12] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples, 2021. 1, 2, 6", + "[13] Lukas Hoyer, Dengxin Dai, Haoran Wang, and Luc Van Gool. Mic: Masked image consistency for context-enhanced domain adaptation, 2023. 6, 7", + "[14] Dong hyun Lee. Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks. 2" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Ying Jin, Ximei Wang, Mingsheng Long, and Jianmin Wang. Minimum class confusion for versatile domain adaptation, 2019. 6, 7, 8", + "[16] Prakhar Kaushik, Aayush Mishra, Adam Kortylewski, and Alan Yuille. Source-free and image-only unsupervised domain adaptation for category level object pose estimation, 2024. 2", + "[17] Toru Kitagawa and Jeff Rowley. von mises-fisher distributions and their statistical divergence, 2022. 1", + "[18] Muhammed Kocabas, Chun-Hao P. Huang, Otmar Hilliges, and Michael J. Black. Pare: Part attention regressor for 3d human body estimation, 2021. 6", + "[19] Adam Kortylewski, Bernhard Egger, Andreas Schneider, Thomas Gereg, Andreas Morel-Forster, and Thomas Vetter. Empirically analyzing the effect of dataset biases on deep face recognition systems. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2018. 3", + "[20] Adam Kortylewski, Ju He, Qing Liu, and Alan Loddon Yuille. Compositional convolutional neural networks: A deep architecture with innate robustness to partial occlusion. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8937-8946, 2020. 1, 3, 6", + "[21] Adam Kortylewski, Qing Liu, Angtian Wang, Yihong Sun, and Alan Yuille. Compositional convolutional neural networks: A robust and interpretable model for object recognition under occlusion. International Journal of Computer Vision, 129(3):736-760, 2021. 1, 2, 3, 4, 6, 7, 8", + "[22] Jian Liang, Dapeng Hu, Jiashi Feng, and Ran He. Dine: Domain adaptation from single and multiple black-box predictors, 2022. 6, 7", + "[23] Hong Liu, Jianmin Wang, and Mingsheng Long. Cycle self-training for domain adaptation, 2021. 6, 7", + "[24] Qing Liu, Adam Kortylewski, Zhishuai Zhang, Zizhang Li, Mengqi Guo, Qihao Liu, Xiaoding Yuan, Jiteng Mu, Weichao Qiu, and Alan Yuille. Learning part segmentation through unsupervised domain adaptation from synthetic vehicles. In CVPR, 2022. 2, 6, 8", + "[25] Mingsheng Long, ZHANGJIE CAO, Jianmin Wang, and Michael I Jordan. Conditional adversarial domain adaptation. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2018. 6, 7, 8", + "[26] M. Jehanzeb Mirza, Jakub Micorek, Horst Possegger, and Horst Bischof. The norm must go on: Dynamic unsupervised domain adaptation by normalization, 2022. 7, 8", + "[27] Jaemin Na, Heechul Jung, Hyung Jin Chang, and Wonjun Hwang. Fixbi: Bridging domain spaces for unsupervised domain adaptation, 2021. 6, 7", + "[28] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. DoImagenet classifiers generalize toImagenet?, 2019. 1, 2", + "[29] Douglas A. Reynolds, Thomas F. Quatieri, and Robert B. Dunn. Speaker verification using adapted gaussian mixture models. Digital Signal Processing, 10(1):19-41, 2000. 5", + "[30] Evgenia Rusak, Steffen Schneider, Peter Gehler, Oliver Bringmann, Wieland Brendel, and Matthias Bethge. Adapting imagenet-scale models to complex distribution shifts with self-learning, 2021. 6, 7, 8" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "22996", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[31] Kuniaki Saito, Kohei Watanabe, Yoshitaka Ushiku, and Tatsuya Harada. Maximum classifier discrepancy for unsupervised domain adaptation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2018. 7", + "[32] Steffen Schneider, Evgenia Rusak, Luisa Eck, Oliver Bringmann, Wieland Brendel, and Matthias Bethge. Improving robustness against common corruptions by covariate shift adaptation, 2020. 2, 6, 7, 8", + "[33] Ozan Sener, Hyun Oh Song, Ashutosh Saxena, and Silvio Savarese. Learning transferrable representations for unsupervised domain adaptation. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2016. 2", + "[34] Hwanjun Song, Minseok Kim, Dongmin Park, Yooju Shin, and Jae-Gil Lee. Learning from noisy labels with deep neural networks: A survey, 2020. 2", + "[35] Baochen Sun, Jiashi Feng, and Kate Saenko. Correlation alignment for unsupervised domain adaptation. Advances in Computer Vision and Pattern Recognition, pages 153-171, 2017. 2", + "[36] Yihong Sun, Adam Kortylewski, and Alan Yuille. Weakly-supervised amodal instance segmentation with compositional priors. arXiv preprint arXiv:2010.13175, 2020. 1, 2, 3, 6, 8", + "[37] Angtian Wang, Yihong Sun, Adam Kortylewski, and Alan L Yuille. Robust object detection under occlusion with context-aware compositionalnets. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12645-12654, 2020. 8", + "[38] Dequan Wang, Evan Shelhamer, Shaoteng Liu, Bruno Olshausen, and Trevor Darrell. Tent: Fully test-time adaptation by entropy minimization, 2020. 2", + "[39] Jianyu Wang, Zhishuai Zhang, Cihang Xie, Yuyin Zhou, Vittal Premachandran, Jun Zhu, Lingxi Xie, and Alan Yuille. Visual concepts and compositional voting, 2017. 2, 3, 4", + "[40] Guoqiang Wei, Cuiling Lan, Wenjun Zeng, Zhizheng Zhang, and Zhibo Chen. Toalign: Task-oriented alignment for unsupervised domain adaptation, 2021. 6, 7", + "[41] Yu Xiang, Roozbeh Mottaghi, and Silvio Savarese. Beyond Pascal: A benchmark for 3d object detection in the wild. In IEEE Winter Conference on Applications of Computer Vision (WACV), 2014. 6, 8", + "[42] Xiaoding Yuan, Adam Kortylewski, Yihong Sun, and Alan Yuille. Robust instance segmentation through reasoning about multi-object occlusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11141-11150, 2021. 1, 3, 6", + "[43] Yuchen Zhang, Tianle Liu, Mingsheng Long, and Michael I. Jordan. Bridging theory and algorithm for domain adaptation, 2019. 6, 7, 8", + "[44] Zhilu Zhang and Mert R. Sabuncu. Generalized cross entropy loss for training deep neural networks with noisy labels, 2018. 2, 6", + "[45] Bingchen Zhao, Shaozuo Yu, Wufei Ma, Mingxin Yu, Shenxiao Mei, Angtian Wang, Ju He, Alan Yuille, and Adam Kortylewski. Ood-cv: A benchmark for robustness to out-of-distribution shifts of individual nuisances in natural images. Proceedings of the European Conference on Computer Vision (ECCV), 2022. 1, 2, 6, 8" + ], + "bbox": [ + 78, + 92, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "22997", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Bayesian Approach to OOD Robustness in Image Classification/d0e61d97-e025-4ae4-a494-3d44cf79404b_model.json b/2024/A Bayesian Approach to OOD Robustness in Image Classification/d0e61d97-e025-4ae4-a494-3d44cf79404b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..3ccaa02fe38bb4c9cf103d5d7e9caec9ba69b95a --- /dev/null +++ b/2024/A Bayesian Approach to OOD Robustness in Image Classification/d0e61d97-e025-4ae4-a494-3d44cf79404b_model.json @@ -0,0 +1,2233 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.155, + 0.131, + 0.819, + 0.154 + ], + "angle": 0, + "content": "A Bayesian Approach to OOD Robustness in Image Classification" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.182, + 0.34, + 0.234 + ], + "angle": 0, + "content": "Prakhar Kaushik \nJohns Hopkins University \npkaushi1@jh.edu" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.182, + 0.589, + 0.234 + ], + "angle": 0, + "content": "Adam Kortylewski \nUniversity of Freiburg \nakortyle@mpi-inf.mpg.de" + }, + { + "type": "text", + "bbox": [ + 0.63, + 0.182, + 0.837, + 0.234 + ], + "angle": 0, + "content": "Alan Yuille Johns Hopkins University ayuille1@jh.edu" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.285 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.301, + 0.474, + 0.725 + ], + "angle": 0, + "content": "An important and unsolved problem in computer vision is to ensure that the algorithms are robust to changes in image domains. We address this problem in the scenario where we only have access to images from the target domains. Motivated by the challenges of the OOD-CV [45] benchmark where we encounter real world Out-of-Domain (OOD) nuisances and occlusion, we introduce a novel Bayesian approach to OOD robustness for object classification. Our work extends Compositional Neural Networks (CompNets), which have been shown to be robust to occlusion but degrade badly when tested on OOD data. We exploit the fact that CompNets contain a generative head defined over feature vectors represented by von Mises-Fisher (vMF) kernels, which correspond roughly to object parts, and can be learned without supervision. We obverse that some vMF kernels are similar between different domains, while others are not. This enables us to learn a transitional dictionary of vMF kernels that are intermediate between the source and target domains and train the generative model on this dictionary using the annotations on the source domain, followed by iterative refinement. This approach, termed Unsupervised Generative Transition (UGT), performs very well in OOD scenarios even when occlusion is present. UGT is evaluated on different OOD benchmarks including the OOD-CV dataset, several popular datasets (e.g., ImageNet-C [9]), artificial image corruptions (including adding occluders), and synthetic-to-real domain transfer, and does well in all scenarios." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.754, + 0.21, + 0.77 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.471, + 0.842 + ], + "angle": 0, + "content": "In recent years, machine learning algorithms have been extremely successful for tasks like object classification when evaluated on benchmarked datasets like ImageNet. But these successes require that the training and test data (or" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.852, + 0.471, + 0.901 + ], + "angle": 0, + "content": "This work has been supported by Army Research Laboratory award W911NF2320008 and ONR with N00014-21-1-2812. A Kortylewski acknowledges support via his Emmy Noether Research Group funded by the German Science Foundation (DFG) under Grant No. 468670075." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.27, + 0.895, + 0.392 + ], + "angle": 0, + "content": "the source domain and the target domain data) be identically and independently distributed (IID) from some underlying source. However, in practice, it is important to ensure that the algorithms generalize to data that differ from the training data. For example, in real-world applications, an algorithm for car detection may encounter cars with unusual shapes and textures (Fig. 3), which did not occur in the training set." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.395, + 0.895, + 0.593 + ], + "angle": 0, + "content": "Existing OOD methods [9-12, 28] have shown success in dealing with robustness issues when evaluated on early robustness datasets, such as Imagenet-C [9], Imagenet-R [11], and Imagenet-A [12], where the domain differences are due to synthetic corruptions, adversarial images, rendered images, and similar factors [45]. But these algorithms performed less well on a newer benchmark, OODCV [45], which focuses on systematic analysis of real-world nuisances, e.g. changes in texture, 3D pose, weather, shape, and context. From a related perspective, OOD-CV studies the causal factors that result in the domain gap [4]. In addition, previous works have rarely been evaluated for robustness to occlusion, an important OOD robustness metric." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.595, + 0.896, + 0.777 + ], + "angle": 0, + "content": "In this work, we address OOD robustness on OOD-CV, and related datasets, focusing on real-world domain differences and occlusion. We build on a class of Bayesian neural models called Compositional Neural Networks (CompNets), as they have been shown to be robust to partial occlusion [20, 21, 36, 42]. This is achieved by replacing the discriminative head of a CNN with a generative model of the feature vectors based on the objects' spatial geometry. However, CompNets are fully supervised and are not robust to OOD nuisances. In this work, we develop an unsupervised approach, Unsupervised Generative Transition (UGT), which generalizes CompNets to OOD scenarios." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.897, + 0.903 + ], + "angle": 0, + "content": "UGT relies on intuition that in OOD scenarios, the appearance of object parts is highly variable (due to changes like texture or weather), while the spatial geometry of objects is often fairly similar between domains. We analyze CompNets and modify them to take advantage of the intuition mentioned above. By introducing a transitional dictionary of von Mises-Fisher [17] kernels (Fig. 1), which shares the properties of both domains, we can intuitively" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "22988" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.089, + 0.852, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.234, + 0.895, + 0.318 + ], + "angle": 0, + "content": "Figure 1. Illustration of the key principle underlying our Bayesian approach. Related work has shown that clusters of feature vectors learned in an unsupervised manner resemble part-like patterns [21, 39]. We observe that some feature clusters (represented here on a vMF manifold) are very similar in both IID and OOD data (illustrated in blue and red boxes), whereas for other feature clusters there is no corresponding equivalent in the other domain. Our Bayesian approach exploits this property by first learning a generative model of feature clusters and their spatial combinations on the IID data and subsequently adapting the model to OOD data via an unsupervised adaptation of the vMF cluster dictionary, while retaining the spatial relations between clusters." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.345, + 0.473, + 0.479 + ], + "angle": 0, + "content": "learn the spatial geometry of the source and transfer it to the target domain. UGT leverages the property that the hierarchical structure of generative models like CompNets can be learned in a two-stage manner. 1) An unsupervised learning stage of a dictionary of neural network features, called vMF kernels, using clustering in both source and target domains. The vMF kernels intuitively represent local object part structures. 2) A supervised learning stage of the spatial relations of the vMF kernels on the source domain." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.481, + 0.47, + 0.586 + ], + "angle": 0, + "content": "We primarily evaluate UGT on the OOD-CV benchmark [45]. In addition, to challenge UGT, we add occluders to OOD-CV and create a new dataset called Occluded-OOD-CV (Sec. 4.1). We also test UGT on Imagenet-C corruptions and Synthetic-to-Real domain robustness. Our studies show that UGT performs well on all these tasks and significantly outperforms the SOTA baselines." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.588, + 0.457, + 0.603 + ], + "angle": 0, + "content": "We make several important contributions in this paper." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.606, + 0.469, + 0.68 + ], + "angle": 0, + "content": "1. We model objects by a generative model on feature vectors. Our method, UGT, extends CompNets [21] by decoupling the learning into unsupervised learning of vMF kernels and supervised learning of the spatial geometry enabling us to learn transitional dictionaries." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.684, + 0.469, + 0.743 + ], + "angle": 0, + "content": "2. UGT achieves state-of-the-art results on the real-world OOD robustness problem on the OOD-CV dataset [45] and demonstrates exceptional performance on generalizing under the synthetic corruptions of Imagenet-C." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.746, + 0.468, + 0.775 + ], + "angle": 0, + "content": "3. UGT also achieves strong results for the Synthetic-to-Real scenario (UDAParts [24] to Pascal3d+) dataset." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.779, + 0.468, + 0.824 + ], + "angle": 0, + "content": "4. We introduce the Occluded-OOD-CV dataset by adding occladers to OOD-CV and show that UGT is robust to this compounded problem of occlusion and nuisance." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.606, + 0.469, + 0.824 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.844, + 0.227, + 0.859 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.871, + 0.47, + 0.901 + ], + "angle": 0, + "content": "OOD robustness can be considered a subset of the larger unsupervised domain adaptation problem and is closely re" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.345, + 0.892, + 0.45 + ], + "angle": 0, + "content": "lated to domain generalization and transfer learning. Although related to both, our work focuses on OOD robustness. Our aim is to generalize well to an unlabelled target domain which is parameterized by real world nuisance factors like weather, shape, pose, texture changes and partial occlusion - which often leads to drastic changes to visual scenes and objects not found in the source dataset." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.451, + 0.893, + 0.707 + ], + "angle": 0, + "content": "In the past few years, there has been an increase in the number of works [9-12, 28] that characterize model performance on OOD data and treat this as a measure of robustness. The common idea that underlies most works is to leverage a property of the unlabeled target domain to allow generalization of a model trained on the source domain. There have been successful efforts to use feature statistics to adapt to the new domain; e.g., Sun et al. [35] try to minimize domain shift by aligning the second-order statistics of source and target distributions; Bug et al. [1] employ feature aware normalization with gating elements from Long Short-Term Memory units for normalization among different spatial regions of interest. Some methods employ techniques based on adaptive batch normalization and weight normalisation [32]. Other methods include self-learning using entropy minimization [38], adaptive pseudo-labeling techniques [5, 14, 33, 34] and robust lost functions [6, 44]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.709, + 0.892, + 0.814 + ], + "angle": 0, + "content": "Although, current works have been successful at dealing with robustness problems when evaluated on earlier robustness datasets [9, 11, 12] they have been shown to struggle with real world nuisances (OOD-CV [45]) and occlusion [16, 21]. Few generative Bayesian methods such as CompNets [21, 36, 39] have shown their relative robustness to occlusion, but still struggle with other OOD nuisances." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.829, + 0.591, + 0.844 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We address OOD robustness from a Bayesian perspective which, to the best of our knowledge, is novel. Our starting point is a class of generative models, described in Sec. 3.1," + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22989" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.14, + 0.089, + 0.833, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.33, + 0.895, + 0.416 + ], + "angle": 0, + "content": "Figure 2. Rough illustration of our Bayesian method. \\((\\dashrightarrow, \\dashrightarrow)\\) A DCNN backbone is used to extract the source (IID) \\(F^{\\mathcal{S}}\\) and target (OOD) features \\(F^{\\mathcal{R}}\\). The source feature vectors \\(F^{\\mathcal{S}}\\) are then used to learn the source vMF kernels that are then adapted to the transitional vMF kernels using target domain features \\(F^{\\mathcal{R}}\\) and the adaptation coefficients \\(\\psi_{k}\\) in an unsupervised manner. \\((\\longrightarrow)\\) Transitional Spatial coefficients \\((A^{\\mathcal{R}})\\) are then learned using the transitional vMF likelihood \\(L^{\\mathcal{R}}\\) i.e. non-linear activation applied to a convolution of \\(F^{\\mathcal{S}}\\) and transitional kernels using source labels. \\((\\longrightarrow)\\) These spatial coefficients are then finetuned \\((A^{\\mathcal{R}'})\\) using pseudo-scores \\(\\{\\hat{s}\\}\\) generated using the transitional mixture likelihood \\(E^{\\mathcal{R}}\\) of target domain features \\(F^{\\mathcal{R}}\\). \\((\\longrightarrow)\\) shows the final feedforward pipeline during inference." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.442, + 0.47, + 0.487 + ], + "angle": 0, + "content": "which have been shown to be robust to occlusion [21] when not dealing with other OOD nuisances. We describe method motivation in Sec. 3.2 and the technical details in Sec. 3.3." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.495, + 0.342, + 0.51 + ], + "angle": 0, + "content": "3.1. Bayesian Neural Architecture" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.518, + 0.47, + 0.592 + ], + "angle": 0, + "content": "Our base architecture is similar to CompNets [21] and is explained in this section to help readers unfamiliar with them. Our method extends this class of neural models by non-trivially modifying the training methodology to enable OOD robustness along with occlusion robustness." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.592, + 0.471, + 0.803 + ], + "angle": 0, + "content": "This class of models differs from conventional Deep Networks by replacing the discriminative head by a generative model of feature vectors. For each object \\(y\\) we learn a generative model \\(P(F|y)\\) for the feature vectors \\(F\\). This model is formulated as a mixture model \\(P(F|y) = \\sum_{m} P(F|y, m)\\) where the mixture variable \\(m\\) roughly corresponds to the viewpoint of the object. The conditional distributions \\(P(F|y, m)\\) for the features are factorizable in terms of position so that \\(P(F|y, m) = \\prod_{a \\in \\mathcal{D}} P(f_a|y, m)\\), where \\(a \\in \\mathcal{D}\\) specifies the position in the image. These distributions \\(P(f_a|y, m)\\) are specified in terms of von Mises-Fisher (vMF) dictionaries, with parameters \\(\\Lambda = \\{\\sigma_k, \\mu_k\\}\\) and by spatial coefficients with parameters \\(\\mathcal{A} = \\{\\alpha_{a,k}^{y,m}\\}\\). We use the following generative probability distribution for the neural features \\(F\\) conditioned on an object \\(y\\) [20, 21]:" + }, + { + "type": "equation", + "bbox": [ + 0.101, + 0.807, + 0.47, + 0.833 + ], + "angle": 0, + "content": "\\[\nP (F | y) = \\sum_ {m} P (F | y, m) = \\sum_ {m} \\prod_ {a \\in \\mathcal {D}} P _ {a} \\left(f _ {a} | y, m\\right) P (m), \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.102, + 0.836, + 0.47, + 0.864 + ], + "angle": 0, + "content": "\\[\nP _ {a} \\left(f _ {a} \\mid y, m\\right) = P _ {a} \\left(f _ {a} \\mid \\mathcal {A}, \\Lambda\\right) = \\sum_ {k} \\alpha_ {a, k} ^ {y, m} P \\left(f _ {a} \\mid \\sigma_ {k}, \\mu_ {k}\\right), \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.102, + 0.866, + 0.469, + 0.899 + ], + "angle": 0, + "content": "\\[\nP (f | \\sigma_ {k}, \\mu_ {k}) = \\frac {e ^ {\\sigma_ {k} \\mu_ {k} ^ {T} f}}{Z (\\sigma_ {k})}, \\| f \\| = 1, \\| \\mu_ {k} \\| = 1, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.442, + 0.892, + 0.608 + ], + "angle": 0, + "content": "We typically use 4 mixture components in our method and \\( P(m) \\) is an uniform prior over the mixture components. As shown in [21, 39] each vMF kernel can be qualitatively interpreted as a subpart of the object (i.e., all image patches with feature responses close to \\( \\mu_{k} \\) look like visually similar object subparts). We use von Mises-Fisher distributions instead of Gaussian distributions because the feature vectors \\( f_{a} \\) and the means \\( \\mu_{k} \\) must have a unit norm [7, 8]. The spatial coefficients \\( \\mathcal{A} = \\{\\alpha_{a,k}^{y,m}\\} \\) specify the probability that the vMF kernel \\( k \\) occurs at the position \\( a \\) conditioned on the object \\( y \\) and its mixture component \\( m \\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.608, + 0.892, + 0.669 + ], + "angle": 0, + "content": "Inference. After learning, inference on an image with feature vectors \\( F \\) is performed by a forward pass which estimates which object is more likely to generate the features \\( F \\) of the input image, \\( \\hat{y} = \\arg\\max_{y} P(F|y) \\) [21, 36]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.668, + 0.892, + 0.739 + ], + "angle": 0, + "content": "Occlusion modeling. To make the model, described above, robust to occlusion (in non-OOD data), an outlier process is added to allow for some of the image features to be generated by the object and others by a separate outlier process [20, 36]. This is formalised by:" + }, + { + "type": "equation", + "bbox": [ + 0.521, + 0.741, + 0.892, + 0.765 + ], + "angle": 0, + "content": "\\[\nP (F | y) = \\prod_ {a \\in \\mathcal {D}} \\sum_ {m} P _ {a} \\left(f _ {a} | y, m\\right) ^ {z _ {a}} Q \\left(f _ {a}\\right) ^ {1 - z _ {a}} P (m) P \\left(z _ {a}\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.765, + 0.892, + 0.871 + ], + "angle": 0, + "content": "where \\( Q(f_{a}) \\) is a vMF distribution for a feature generated by an occluder which can be estimated from non-annotated images [19, 21, 42]. The latent variable \\( z_{a} \\in \\{0,1\\} \\) indicates whether pixel \\( a \\) is occluded or not occluded (\\( z_{a} = \\{1,0\\} \\) respectively) and the prior \\( P(z_{a}) \\) indicates the prior probability of a pixel being occluded. Note that we could also, in theory, sum over \\( z \\) (we currently take a max)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Training CompNets [21, 36, 42] are trained end-to-end to optimize the model parameters \\(\\Lambda, \\mathcal{A}\\) using the standard" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "22990" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.184 + ], + "angle": 0, + "content": "supervision for object classification (e.g., the mixture components and the vMF kernels are treated as latent variables). In an OOD scenario the image features no longer correspond well with the learned generative model and without labels, we cannot trivially finetune the model. UGT utilizes an insightful training strategy to solve this problem." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.194, + 0.433, + 0.21 + ], + "angle": 0, + "content": "3.2. Motivation on Generalizing to OOD Data" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.217, + 0.47, + 0.308 + ], + "angle": 0, + "content": "UGT builds upon by the aforementioned Bayesian model because it gives a natural way to formulate an occluder process. These models, however, do not do well on OOD data (Sec. 4). To solve this problem in an unsupervised manner requires reformulation of the training process. We motivate our solution for OOD, UGT, in following stages." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.309, + 0.473, + 0.596 + ], + "angle": 0, + "content": "Firstly, the vMF kernel dictionaries (i.e., the subparts of the object) can be learnt without supervision and hence can be found on both the source (annotated) and the target (non-annotated) domains. Secondly, we observe that some of the vMF kernels are similar between different domains (intuitively some subparts are similar between both domains). Thirdly, we can build on this observation to learn a transitional dictionary, which encourages vMF kernels in both domains to be similar if possible, and which works well in both domains. Fourthly, we note that the spatial coefficients capture the spatial activity pattern of the vMF kernels and these patterns depend on the spatial structure of the objects and so are mostly invariant to the domain, which suggests that we can learn the spatial coefficient on the source domain (where annotations are available), provided we use the transitional dictionary of vMF kernels, and that these spatial coefficients give a good initial estimate for the spatial coefficients on the target domain (which can be improved by simple pseudo-labeling)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.597, + 0.473, + 0.766 + ], + "angle": 0, + "content": "In the first stage we learn the vMF dictionaries \\(\\Lambda\\) without supervision by maximum likelihood estimation (MLE) assuming that the feature vectors \\(\\{f_a\\}\\) of all the images (and at all positions) in each domain are generated by a mixture of von-Mises-Fisher distributions \\(P(f|\\Lambda) = \\sum_{k} \\pi_k e^{\\sigma_k \\mu_k^T f} / Z[\\sigma_k]\\). This is essentially clustering similar to that used in earlier studies [21, 39]. After the \\(\\Lambda\\) are learnt, if annotations are available (i.e., we know the object \\(y\\)) then we can learn the spatial coefficients \\(\\mathcal{A}\\) from the data \\(\\{F_n\\}\\) in the annotated (source) domain by MLE from the distribution \\(\\sum_{m} \\prod_{a \\in D} \\sum_{k} \\alpha_{a,k}^{y,m} P(f_a | \\sigma_k, \\mu_k)\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.766, + 0.473, + 0.902 + ], + "angle": 0, + "content": "In the second stage, we compare the vMF dictionaries \\((\\Lambda^S)\\) and \\((\\Lambda^T)\\) on the source (S) and target (T) domain respectively. We observe that a subset of the dictionary vectors are similar, as measured by cosine similarity in the vMF feature space (Fig. 1). We conjecture that this is because a subset of the vMF kernels, which correspond roughly to object subparts [39], is invariant to the nuisance variables which cause the differences between the domains. For example, for an object like a car or bus, some subparts like" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.122 + ], + "angle": 0, + "content": "wheels and license plates may be very similar between the source and target domains but others may not (Fig. 1)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.122, + 0.894, + 0.289 + ], + "angle": 0, + "content": "These observations motivate us to learn a transitional vMF dictionary \\((\\Lambda^{\\mathcal{R}})\\). This dictionary is learnt by learning the dictionary on the target domain but adding a prior (or regularization constraint) that the dictionary elements in both domains are similar. Finally, we learn the spatial coefficients \\(\\mathcal{A}\\) on the source domain, but using the transitional dictionary (Sec. 3.3.2). This allows us to utilize object geometry knowledge from the source domain in the target domain. As we show in our experiments and ablation (Sec. 4, Sec. 4.3), this model already works well on the target domain and can be improved by pseudo-labelling techniques." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.297, + 0.649, + 0.314 + ], + "angle": 0, + "content": "3.3. Training UGT" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.32, + 0.894, + 0.398 + ], + "angle": 0, + "content": "Our Bayesian method, UGT, involves 3 steps - 1) primarily, learning transitional dictionary \\(\\Lambda^{\\mathcal{R}}\\), 2) learning transitional spatial coefficients \\(\\mathcal{A}^{\\mathcal{R}}\\) using \\(f^S\\) and \\(\\Lambda^{\\mathcal{R}}\\), and lastly 3) fin-tuning the transitional parameters \\((\\Lambda^{\\mathcal{R}},\\mathcal{A}^{\\mathcal{R}})\\) using simple pseudo-labelling. Refer to Fig. 2 for a simple illustration." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.414, + 0.787, + 0.43 + ], + "angle": 0, + "content": "3.3.1 Learning Transitional Dictionary" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.437, + 0.895, + 0.716 + ], + "angle": 0, + "content": "We initialize the transitional von Mises-Fisher(vMF) dictionary vectors with the learnt source domain vMF dictionary vectors, i.e., \\(\\Lambda^{\\mathcal{R}} = \\Lambda^{S}\\). The source domain vMF dictionaries i.e., \\((\\Lambda^{\\mathcal{S}}(\\mu ,\\sigma))\\) are learnt from the features \\(f^{\\mathcal{S}}\\) in source domain by MLE as described in Sec. 3.1 using the EM algorithm [39]. We can learn the transitional vMF dictionary parameters \\(\\Lambda^{\\mathcal{R}}\\) from the target domain feature vectors \\(f^{\\mathcal{R}}\\) through a few ways. We can maximize the regularized likelihood shown in Eq. (5) using the EM algorithm used to calculate the source domain parameters. Eq. (5) shows the Bayesian parameterization of our transitional model and can be seen as a penalized or regularized form of maximum likelihood estimation. We penalize the distance between the initialized transitional mean vectors (which are the source parameters) and the learnt ones. This regularization (like others) also helps in avoiding overfitting. Since, we fix \\(\\sigma_{k}\\) as constant to reduce computation, the normalization term \\(Z(\\sigma)\\) reduces to a constant, and we can derive the penalized log-likelihood term as shown in Eq. (6). \\(\\psi\\) is a adaptation parameter discussed later." + }, + { + "type": "equation", + "bbox": [ + 0.505, + 0.723, + 0.892, + 0.784 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} p \\left(f ^ {\\mathcal {R}} \\mid \\Lambda^ {\\mathcal {R}}\\right) = \\prod_ {n} \\sum_ {k} \\alpha_ {k} P \\left(f _ {a} \\mid \\sigma_ {k}, \\mu_ {k}\\right) \\\\ \\exp \\left(- \\psi_ {k} \\sum_ {k} \\left(\\left| \\left| \\mu_ {k} - \\mu_ {k} ^ {S} \\right| \\right|\\right)\\right) \\tag {5} \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.507, + 0.787, + 0.892, + 0.85 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} l \\left(\\Lambda^ {\\mathcal {R}}\\right) = \\sum^ {n} \\log \\left(\\sum^ {k} \\pi_ {k} \\frac {e ^ {\\sigma_ {k} \\mu_ {k} ^ {T} f _ {i}}}{Z \\left(\\sigma_ {k}\\right)}\\right) - \\psi_ {k} \\sum^ {n} \\sum^ {k} \\left(\\left| \\left| \\mu_ {k} - \\mu_ {k} ^ {\\mathcal {S}} \\right| \\right|\\right) \\tag {6} \\\\ \\left| \\left| f \\right| \\right| = 1, \\left| \\left| \\mu_ {k} \\right| \\right| = 1, \\sigma = 1 \\Longrightarrow Z (\\sigma) = c o n s t. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.858, + 0.894, + 0.902 + ], + "angle": 0, + "content": "The Expectation step for learning the transitional parameters is similar to the source version. In the first step, we calculate the summary statistics for the transitional parameters" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "22991" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.091, + 0.408, + 0.107 + ], + "angle": 0, + "content": "Algorithm 1 Unsupervised Generative Transition" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.111, + 0.892, + 0.144 + ], + "angle": 0, + "content": "1: Input: Set of source domain images \\( I^S = \\{I_1^S, \\dots, I_n^S\\} \\), target domain images \\( I^T = \\{I_1^T, \\dots, I_N^T\\} \\), source domain labels \\( y = \\{y_1^S, \\dots, y_n^S\\} \\), deep network backbone \\( \\Gamma(., \\zeta) \\), background images \\( \\mathcal{B}_{i=1}^r \\)" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.144, + 0.619, + 0.158 + ], + "angle": 0, + "content": "2: Output: Target domain model parameters \\(\\mathcal{T} = (\\mathcal{A},\\Lambda)\\), background model \\(\\beta_{r}\\)" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.158, + 0.333, + 0.173 + ], + "angle": 0, + "content": "3: procedure UGT \\((I^{\\mathcal{S}}, I^T, y, \\Gamma, \\beta_r)\\)" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.173, + 0.383, + 0.188 + ], + "angle": 0, + "content": "4: \\(\\{F^{\\mathcal{S}}\\} ,\\{F^{\\mathcal{R}}\\} \\longleftarrow \\Gamma ((\\{I^{\\mathcal{S}}\\} ,\\{I^T\\}),\\zeta)\\)" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.188, + 0.362, + 0.203 + ], + "angle": 0, + "content": "5: \\(\\Lambda^{\\mathcal{S}}(\\mu_k)\\gets\\) cluster & MLE(\\({\\cal F}^{\\mathcal{S}}\\})" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.203, + 0.293, + 0.218 + ], + "angle": 0, + "content": "6: \\(\\Lambda_{initial}^{\\mathcal{R}}(\\mu) \\longleftarrow \\Lambda^{\\mathcal{S}}(\\mu_k)\\)" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.218, + 0.388, + 0.234 + ], + "angle": 0, + "content": "7: \\(\\Lambda^{\\mathcal{R}}(\\tilde{\\mu})\\longleftarrow \\mathrm{MLE}(F^T,\\Delta (\\psi ,\\Lambda^{\\mathcal{S}},\\Lambda^{\\mathcal{R}}))\\)" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.247, + 0.45, + 0.267 + ], + "angle": 0, + "content": "8: \\(\\{L^{\\mathcal{R}}\\} \\longleftarrow \\sum_{k}\\pi_{k}e^{\\sigma_{k}\\mu_{k}^{T}f^{\\mathcal{S}}} / Z[\\sigma_{k}](F*\\Lambda^{\\mathcal{R}}(\\mu_{k}))\\)" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.278, + 0.387, + 0.297 + ], + "angle": 0, + "content": "9: \\(\\mathcal{A}^{\\mathcal{R}}_{y_s,m}\\longleftrightarrow\\) cluster&MLE \\((\\{L^{\\mathcal{R}}\\} ,y_S)\\)" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.307, + 0.351, + 0.325 + ], + "angle": 0, + "content": "10: \\(y_{\\hat{T}} \\longleftarrow \\operatorname{argmax}_{y} P(F|\\Lambda^{\\mathcal{R}}, \\mathcal{A}^{\\mathcal{R}})\\)" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.325, + 0.393, + 0.343 + ], + "angle": 0, + "content": "11: \\(\\mathcal{A}_{y_{\\hat{T},m}}^{\\mathcal{R}'} \\longleftrightarrow \\text{cluster&MLE}(\\{L^{\\mathcal{R}}\\}, y_{\\hat{T}})\\)" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.343, + 0.382, + 0.359 + ], + "angle": 0, + "content": "12: \\(\\mathcal{T}\\longleftarrow\\) optimize \\((\\mathcal{L}_{\\mathrm{gce}} + \\psi_v\\mathcal{L} + \\psi_\\alpha \\mathcal{L})\\)" + }, + { + "type": "list", + "bbox": [ + 0.082, + 0.307, + 0.393, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.359, + 0.211, + 0.373 + ], + "angle": 0, + "content": "13: end procedure" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.173, + 0.888, + 0.188 + ], + "angle": 0, + "content": "> Extract source & target featuremaps from DCNN backbone" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.188, + 0.891, + 0.203 + ], + "angle": 0, + "content": "\\(\\triangleright\\) Initialize source vMF kernels by kmeans & learn using MLE" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.204, + 0.891, + 0.218 + ], + "angle": 0, + "content": "\\(\\triangleright\\) Initialise transitional vMF kernels with source vMF kernels" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.218, + 0.891, + 0.248 + ], + "angle": 0, + "content": "\\(\\triangleright\\) Learn transitional vMF features using regularized MLE with target domain data (Sec. 3.3.1, Eq. (5)-Eq. (9))" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.25, + 0.891, + 0.279 + ], + "angle": 0, + "content": "\\(\\triangleright\\) Compute regularized transitional vMF likelihood with source featuremaps and transitional vMF kernels" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.28, + 0.891, + 0.309 + ], + "angle": 0, + "content": "\\(\\triangleright\\) Calculate spatial coefficients using transitional vMF likelihood and source feature vectors (Sec. 3.3.2)" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.31, + 0.872, + 0.324 + ], + "angle": 0, + "content": "\\(\\triangleright\\) Pseudo-label target domain data using transitional model" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.326, + 0.877, + 0.342 + ], + "angle": 0, + "content": "\\(\\triangleright\\) Finetune spatial coefficients using pseudolabelled data \\(y_{\\hat{T}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.343, + 0.845, + 0.359 + ], + "angle": 0, + "content": "\\(\\triangleright\\) Optionally, finetune entire model using \\(y_{\\hat{T}}\\) (Eq. (11))" + }, + { + "type": "list", + "bbox": [ + 0.482, + 0.173, + 0.891, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.403, + 0.443, + 0.42 + ], + "angle": 0, + "content": "using the new data. For posterior probability defined as" + }, + { + "type": "equation", + "bbox": [ + 0.165, + 0.424, + 0.47, + 0.456 + ], + "angle": 0, + "content": "\\[\nP (k \\mid f _ {i}, \\Lambda) = \\frac {\\pi_ {k} p \\left(f _ {i} \\mid \\mu_ {k} , \\sigma_ {k}\\right)}{\\sum^ {K} \\pi_ {k} p \\left(f _ {i} \\mid \\mu_ {k} , \\sigma_ {k}\\right)} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.461, + 0.47, + 0.505 + ], + "angle": 0, + "content": "for the \\(k^{th}\\) mixture and where \\(p(f|\\mu_k,\\sigma_k)\\) is defined in Eq. (3), we update the mixture parameters in the maximization step in a regularized manner as follows," + }, + { + "type": "equation", + "bbox": [ + 0.135, + 0.511, + 0.47, + 0.546 + ], + "angle": 0, + "content": "\\[\n\\hat {\\pi} _ {k} = \\nu \\left[ \\psi_ {k} ^ {\\pi} \\frac {1}{n} \\sum_ {i = 1} ^ {n} P \\left(k \\mid f _ {i}, \\Lambda\\right) + \\left(1 - \\psi_ {k} ^ {\\pi}\\right) \\pi_ {k} ^ {S} \\right] \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.136, + 0.548, + 0.469, + 0.567 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mu} _ {k} = \\psi_ {k} ^ {\\mu} \\mathcal {E} _ {k} + \\left(1 - \\psi_ {k} ^ {\\mu}\\right) \\mu_ {k} ^ {S} \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.571, + 0.47, + 0.83 + ], + "angle": 0, + "content": "where, \\(\\mathcal{E}_k\\) is the first moment or mean of the \\(k^{\\text{th}}\\) mixture calculated on the new data, \\(\\nu\\) is a scaling parameter to ensure that \\(\\sum_{k} \\pi_{k} = 1\\) and \\(\\psi_{k}\\) is an adaptation coefficient which is defined for each parameter and mixture. It can be defined in a data-dependent manner [29], i.e., \\(\\psi_{k}^{\\mu, \\pi} = \\left( \\frac{\\omega_{k}}{P(k|f_{i},\\Lambda)} + 1 \\right)^{-1}\\) where \\(w_{k}\\) is an empirically set hyperparameter which controls the adaptation emphasis between source and transitional parameters. Empirically, we observed that the adaptation coefficient is not very sensitive to changes to its value and therefore, we increase it monotonically during the EM iterations. A \\(\\psi_{k}\\) for a specific vMF kernel \\(\\mu_{k}\\) at time-step \\(t\\) in \\(\\Lambda^{\\mathcal{R}}\\) stabilizes if the change in its likelihood component is below a threshold value over the previous EM iteration step t-1 and then \\(\\psi_{k}\\) value. We find that only using the parameter update works well. For simpler datasets, even directly learning the transitional dictionary would suffice." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.847, + 0.424, + 0.864 + ], + "angle": 0, + "content": "3.3.2 Learning Transitional Spatial Coefficients" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.87, + 0.47, + 0.902 + ], + "angle": 0, + "content": "After learning \\(\\Lambda^{\\mathcal{R}}\\), we use it to estimate the transitional spatial coefficients \\((\\mathcal{A}^{\\mathcal{R}}(\\alpha))\\) using the labeled source domain" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.403, + 0.892, + 0.45 + ], + "angle": 0, + "content": "features \\( f^S \\) (using MLE). The spatial coefficients represent the expected activation of a calculated vMF kernel \\( \\mu_k \\) at a position \\( a \\) in the feature map for a specific class \\( y \\)." + }, + { + "type": "equation", + "bbox": [ + 0.509, + 0.461, + 0.892, + 0.492 + ], + "angle": 0, + "content": "\\[\nP _ {a} \\left(f _ {a} \\mid y _ {s}, m; \\mathcal {A} ^ {\\mathcal {R}}, \\Lambda^ {R}\\right) = \\sum_ {k} \\alpha_ {a, k} ^ {y _ {s}, m} P \\left(f _ {a} \\mid \\Lambda^ {\\mathcal {R}} \\left(\\sigma_ {k}, \\mu_ {k}\\right)\\right) \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.503, + 0.892, + 0.624 + ], + "angle": 0, + "content": "We can leverage the learnt transitional vMF kernel dictionary \\(\\Lambda^{\\mathcal{R}}\\) to learn spatial coefficients \\(\\mathcal{A}^{\\mathcal{R}}(\\alpha)\\) which represent the spatial relationships of the vMF dictionary vectors over the source domain data \\(D_{S}\\). As these spatial coefficients \\(\\mathcal{A}^{\\mathcal{R}}\\) are conditioned on \\(\\Lambda^{\\mathcal{R}}\\), they also correspond to parts of target domain features even when they are learned using \\(f^{S}\\), thus creating a transitional model with parameters \\((\\Lambda^{\\mathcal{R}},\\mathcal{A}^{\\mathcal{R}})\\) that we can use to classify target domain data." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.625, + 0.892, + 0.746 + ], + "angle": 0, + "content": "This combination of conditioned transitional vMF dictionary \\((\\Lambda^{\\mathcal{R}})\\) and spatial coefficients \\((\\mathcal{A}^{\\mathcal{R}})\\) can be leveraged to label a subset of target domain features, especially since we can focus on the subset of transitional vMF kernels \\((\\Lambda^{\\mathcal{R}})\\) which are similar to their source counterparts. We can use these pseudo labeled feature vectors \\((y_{\\hat{T}})\\), along with \\(\\Lambda^{\\mathcal{R}}\\) to finetune the current spatial coefficients \\(\\mathcal{A}^{\\mathcal{R}}\\) which leads to improved spatial coefficients \\(\\mathcal{A}^{\\mathcal{R}'}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Finetuning spatial coefficients. Transitional spatial coefficients \\((\\mathcal{A}^{\\mathcal{R}})\\) are initialized with the values describing the expected activation of transitional vMF dictionary vectors \\(\\Lambda^{\\mathcal{R}}(\\mu_k)\\) for the source data features \\(f^{\\mathcal{S}}\\) at a position \\(a\\) on a feature map \\(f_{a}\\). Subsequently, we finetune these spatial coefficients \\(\\mathcal{A}^{\\mathcal{R}}\\) using a subset of target domain images that present high activations for the robust set of transitional vMF dictionary vectors \\(\\Lambda^{\\mathcal{R}}\\). Optionally, we can also finetune \\(\\Lambda^{\\mathcal{R}}\\) by relearning them without any initialization and" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "22992" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.258 + ], + "angle": 0, + "content": "regularization constraints. Although our model is trained by partitioning into two parts, it is still fully differentiable and trainable from end to end [20, 36, 42]. We use this model property to finetune the entire model. The loss function (Eq. (11)) consists of a generalized cross entropy [44] term calculated using the model predictions and two regularization parameters for the vMF dictionary and the spatial coefficient parameters. This is to encourage the vMF clusters to be similar to the feature vectors \\( f_{a} \\). In Eq. (11), \\( \\zeta_{\\{v,\\alpha\\}} \\) represent the trade-off hyperparameters of the regularizing loss terms," + }, + { + "type": "equation", + "bbox": [ + 0.101, + 0.265, + 0.47, + 0.283 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {\\mathrm {g c e}} \\left(y _ {p r e d}, y _ {\\hat {T}}\\right) + \\zeta_ {v} \\mathcal {L} (F, \\Lambda) + \\zeta_ {\\alpha} \\mathcal {L} (F, \\mathcal {A}), \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.288, + 0.47, + 0.334 + ], + "angle": 0, + "content": "For a constant vMF variance \\(\\sigma_{k}\\) (which also reduces the normalisation term to a constant) and assuming hard assignment of features \\(f_{a}\\) to vMF dictionary clusters[21]," + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.343, + 0.47, + 0.368 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} \\left(F, \\Lambda^ {\\mathcal {R}}\\right) = - \\sum_ {a} \\max _ {k} \\log p \\left(f _ {a} \\mid \\Lambda^ {\\mathcal {R}} \\left(\\mu_ {k}\\right)\\right) \\tag {12}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.371, + 0.469, + 0.398 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} \\left(F, \\mathcal {A} ^ {\\mathcal {R} ^ {\\prime}}\\right) = - \\sum_ {a} \\left(1 - z _ {a}\\right) \\log \\left[ \\sum_ {k} \\alpha_ {a, k} ^ {y _ {\\hat {T}}, m} p \\left(f _ {a} \\mid \\Lambda^ {\\mathcal {R}} \\left(\\mu_ {k}\\right)\\right) \\right] \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.408, + 0.419, + 0.423 + ], + "angle": 0, + "content": "Latent variable \\(z_{a}\\in \\{0,1\\}\\) is explained in Sec. 3.1." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.436, + 0.21, + 0.453 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.462, + 0.473, + 0.658 + ], + "angle": 0, + "content": "Our experiments evaluate robustness of vision classification models in an extended out-of-domain setup i.e., generalizing to target domains with individual nuisance factors and partial occlusion. This allows us to thoroughly evaluate the efficacy of current methods which have been shown to perform well on other OOD robustness datasets on OOD-CV[45] (which enables a systematic analysis of nuisances on real-world data), Occluded-OOD-CV (which allows us to evaluate models on a combination of partial occlusion with individual nuisances) and Imagenet-C corruptions (for analysis of synthetic corruptions). Lastly, we also show some initial results on Synthetic to Real OOD robustness using the UDAParts [24] dataset." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.666, + 0.234, + 0.683 + ], + "angle": 0, + "content": "4.1. Setup and Data" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.47, + 0.78 + ], + "angle": 0, + "content": "Datasets. For primary evaluation, we use the OOD-CV [45] dataset. OOD-CV dataset consists of test subcategories which vary from the training data in terms of a main nuisance factor, namely, context, weather, texture, pose and shape. We use \\( L0 \\) for the (0%) occlusion level to represent this data setup in Tab. 1 and Supplementary Sec. B." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Occluded-OOD-CV. In addition to OOD-CV, we experiment with a more complex robustness analysis setup involving partial occlusion. In this setup, models that have been adapted in an unsupervised manner to target domains with nuisance factors are then evaluated on data with partial occlusion in addition to the real-world nuisances. For this purpose, we create a dataset named Occluded-OOD-CV where we superimpose occluders on the OOD-CV test" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.258 + ], + "angle": 0, + "content": "images objects in order to approximate real-life occlusion. These occluders have been cropped from the MS-COCO dataset, similar to [20] and are superimposed on objects in the OOD-CV test set. There are three levels of partial occlusions - \\( L1(20 - 40\\%) \\), \\( L2(40 - 60\\%) \\) and \\( L3(60 - 80\\%) \\) which allows us to diversely analyze the occlusion robustness of the model (in addition to individual nuisance factors). Fig. 3 shows some example images from our dataset. Previous works [18, 21] have shown that using cropped occluders, as done in Occluded-OOD-CV, is akin to the use of real occluders for classification evaluation. We also use" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.267, + 0.642, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.269, + 0.771, + 0.339 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.789, + 0.269, + 0.871, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.338, + 0.621, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.344, + 0.774, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.782, + 0.338, + 0.888, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.406, + 0.89, + 0.421 + ], + "angle": 0, + "content": "Context (60-80%) Weather (20-40%) Texture (40-60%)" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.432, + 0.892, + 0.461 + ], + "angle": 0, + "content": "Figure 3. Occluded-OOD-CV dataset examples. Each object category is identified by its nuisance factor and occlusion percentage" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.478, + 0.891, + 0.538 + ], + "angle": 0, + "content": "Imagenet-C[9] corruptions in the Pascal3D+ dataset for robustness evaluation with conventionally used synthetic corruptions. We also evaluate models in a synthetic (UDAParts [24]) to real data (Pascal3D+ [41]) setup." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.539, + 0.892, + 0.674 + ], + "angle": 0, + "content": "In summary, we have 5 different real world nuisance data subcategories (context, weather, texture, pose, shape), at least seven synthetic corruption categories (fog, pixelate, motion blur, etc.), one synthetic source dataset and 4 partial occlusion levels (including no occlusion) for each experiment. We also run experiments on all the combined nuisance subcategories (Tab. 1). So, in total we have 24 sets of data and experiments for our (extended) OOD robustness setup on the OOD-CV dataset alone." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.892, + 0.78 + ], + "angle": 0, + "content": "Models. We compare our work with our baseline method CompNets [21], other well known recent works [30, 32] which have been shown to be SOTA on various robustness datasets [9, 11, 12] as well as many well-known UDA methods [3, 13, 15, 22, 23, 25-27, 40, 43]. We focus on VGG16 and Resnet-50 backbones as they have been commonly used in most current methods[20, 30, 32, 44]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.781, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Training Setup. All models are trained on the source data with corresponding labels. Models can access some unlabeled nuisance (target) data, which could be a single nuisance (OOD-CV, Imagenet-C), combined nuisances (Tab. 1) or real data (when source data are synthetic). Models do not have access to images with partial occlusion at any time, and partially occluded images are only used for inference. We also avoid using different types of data aug-" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22993" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.097, + 0.09, + 0.873, + 0.104 + ], + "angle": 0, + "content": "Table 1. OOD-CV Nuisances Top-1 Classification Results. Occlusion levels greater than \\(0\\%\\) represent Occluded-OOD-CV dataset." + }, + { + "type": "table", + "bbox": [ + 0.087, + 0.116, + 0.884, + 0.632 + ], + "angle": 0, + "content": "
MethodCombinedContextWeather
\\(Occlusion\\rightarrow\\)0%20-40%40-60%60-80%0%20-40%40-60%60-80%0%20-40%40-60%60-80%
CDAN [25]**.760.531.420.380.710.541.436.397.745.476.335.299
BSP [2]**.753.506.401.351.610.511.419.385.730.391.266.254
MDD [43]**.780.551469.410.761.531.436.410.802.439.306.271
MCD [31]**.772.556.461.403.798.523.426.374.810.447.336.286
MCC [15]**.785.582.492.434.730.577.454.420.767.503.376.362
FixBi [27]**.821.534.478.399.802.542.445.409.755.489.358.335
MIC [13]**.837.540.376.262.755.602.532.499.817.612.496.427
ToAlign [40]**.761.507.411.346.712.501.393.382.720.381.252.213
CST [23]**.840.579.539.477.687.491.452.411.813.558.397.356
DUA [26]**.699.523.480.403.667.471.434.401.701.465.391.210
DINE [22]**.835.600.493.443.867.515.418.397.798.423.290.261
RPL [30].664.430.346.300.675.457.368.315.642.247.138.122
BNA [32].653.426.343.298.580.397.342.278.635.295.179.171
CompNet [21].720.506.462.415.790.517.454.369.683.434.398.362
UGT (Ours).850.620.570.501.875.624.565.511.856.600.528.465
TexturePoseShape
CDAN [25]**.820.532.420.364.844.620.521.450.773.561.491.441
BSP [2]**.696.444.384.315.831.610.510.423.757.535.485.434
MDD [43]**.895.518.427.400.870.611.534.469.836.541.459.386
MCD [31]**.896.522.432.392.865.623.532.471.834.538.456.397
MCC [15]**.874.671.547.495.867.611.521.460.818.601.524.460
FixBi [27]**.854.574.445.369.842.533.472.446.801.500.435.373
MIC [13]**.821.706.631.576.799.613.509.455.807.608.565.467
ToAlign [40]**.594.413.312.273.788.574.503.418.719.548.460.391
CST [23]**.858.657.538.477.887.617.525.451.831.617.495.441
DUA [26]**.918.691.514.468.755.511.423.355.695.455.386.345
DINE [22]**.911.572.432.401.885.618.543.448.838.520.426.360
RPL [30].703.371.238.227.730.493.400.329.670.426.340.311
BNA [32].701.383.247.239.737.510.407.355.662.436.350.311
CompNet [21].747.539.462.426.768.581.538.458.698.466.451.400
UGT (Ours).936.726.665.635.892.632.555.481.852.644.601.567
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.1, + 0.633, + 0.558, + 0.646 + ], + "angle": 0, + "content": "** Pretrained Imagenet Backbone used (Resnet-50) / Pretrained UDA model used." + }, + { + "type": "table_caption", + "bbox": [ + 0.223, + 0.659, + 0.746, + 0.674 + ], + "angle": 0, + "content": "Table 2. Imagenet-C Corruptions on Pascal3D+ dataset - Classification Results (Vgg16)" + }, + { + "type": "table", + "bbox": [ + 0.087, + 0.685, + 0.885, + 0.869 + ], + "angle": 0, + "content": "
Model\nOcclusion→Elastic TransformGaussian BlurSnow
0%20-40%40-60%60-80%0%20-40%40-60%60-80%0%20-40%40-60%60-80%
RPL [30].830.597.461.371.855.541.403.320.842.592.435.408
BNA [32].793.601.498.400.833.618.484.300.767.627.542.454
CompNet [21].268.183.157.146.732.395.296.241.529.348.258.210
UGT (Ours).872.712.712.494.909.720.613.509.890.742.634.523
Motion BlurContrastFrost
RPL [30].862.629.481.373.901.610.433.321.850.670.511.402
BNA [32].844.623.481.355.899.601.401.315.845.654.501.399
CompNet [21].639.362.287.241.760.472.374.312.740.481.360.301
UGT (Ours).891.763.673.567.923.701.534.412.911.782.672.561
" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22994" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.14, + 0.082, + 0.83, + 0.098 + ], + "angle": 0, + "content": "Table 3. Ablation analysis for (a) OOD-CV [45] Combined (b) OOD-CV Texture (c) Imagenet-C (Snow) Corruption" + }, + { + "type": "table", + "bbox": [ + 0.137, + 0.108, + 0.835, + 0.187 + ], + "angle": 0, + "content": "
Occlusion→L0L1L2L3L0L1L2L3L0L1L2L3
Baseline(B).698.466.451.400.715.575.475.409.529.348.258.210
+ΛR+AR.816.598.524.498.785.660.559.515.781.671.582.480
+ΛR+AR'.852.644.601.567.843.764.656.623.885.742.634.523
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.195, + 0.471, + 0.316 + ], + "angle": 0, + "content": "mentations and additional data training to have a fairer comparison amongst all the works. Although, our Bayesian model does not use pretrained Imagenet backbones for feature extraction for fairness, a number of our comparative methods [2, 15, 25, 26, 43] perform poorly without one, so we relax this constraint for them. Our method is still capable of surpassing them in terms of classification accuracy. Further details are provided in Supplementary Section C." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.324, + 0.172, + 0.339 + ], + "angle": 0, + "content": "4.2. Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.347, + 0.47, + 0.468 + ], + "angle": 0, + "content": "OOD robustness to individual nuisances. Tab. 1 (L0 columns) shows classification results on entire OOD-CV test data (combined nuisances) as well as five individual nuisances. We see that our model achieves state-of-the-art results in all experiments. In Tab. 2, we observe that our model also performs exceedingly well when dealing with synthetic Imagenet-C corruptions. Refer to Supplementary Sec.C2 and Tables 5-11 for additional Imagenet-C results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.486, + 0.47, + 0.563 + ], + "angle": 0, + "content": "Synthetic to Real. Tab. 4 shows our results on both normal and extended OOD robustness scenario in a synthetic to real setup, showing that our unsupervised method can robustly close the gap between its supervised counterpart while outperforming other methods by large margins." + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.574, + 0.469, + 0.601 + ], + "angle": 0, + "content": "Table 4. Synthetic (UDAParts) [24] to Real (Pascal3D+) [41] dataset - Classification Results on Resnet50" + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.613, + 0.462, + 0.706 + ], + "angle": 0, + "content": "
Model0%20-40%40-60%60-80%
RPL [30].822.432.370.335
BNA [32].950.684.484.356
CompNet [21].940.650.475.347
UGT (Ours).992.957.861.753
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Extended OOD robustness under partial Occlusion. In Tab. 1, Tab. 2 and Supplementary Tables 1-3, 5-11, our model outperforms other methods by significant margins in the extended OOD scenarios of nuisance parameters with partial occlusion. We observe that the performance of other models which have been adapted to the target domain data drops drastically when encountering partial occlusion along with nuisance factors. This underlines the increased complexity of the extended OOD robustness scenario relative to the vanilla OOD robustness setup and how our Bayesian model is able to perform exceedingly well compared to conventional methods." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.194, + 0.673, + 0.21 + ], + "angle": 0, + "content": "4.3. Ablation Analysis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.225, + 0.892, + 0.483 + ], + "angle": 0, + "content": "Tab. 3 and Supplementary Sec. D & Tables 12-17 show the extensive results of the ablation study for UGT, underlying how each component contributes to the overall compositional model. We can see that just calculating the transitional vMF kernel dictionary \\((\\Lambda^{\\mathcal{R}})\\) and the transitional spatial coefficients \\(\\mathcal{A}^{\\mathcal{R}}\\) improves the results significantly over the baseline method[21]. Further finetuning the spatial coefficients \\((\\mathcal{A}^{\\mathcal{R}'})\\) using pseudo-labelled target domain features boosts the performance. We ablate our hypothesis regarding similar vMF kernels in source and target domains by visualizing image patches that are activated by similar cross-domain kernels (Supplementary Figures 9-11). We also ablate our hypothesis regarding robust spatial geometry by visualizing images activated by the same spatial coefficient in both source and target domains (using source and transitional vMF dictionaries) in Supp. Fig 4 and 7. Analysis of adaptation coefficient is discussed in Supp. Sec. E." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.521, + 0.771, + 0.537 + ], + "angle": 0, + "content": "5. Conclusion and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.554, + 0.892, + 0.889 + ], + "angle": 0, + "content": "In this work, we addressed the problem of developing object classification algorithms that are robust to OOD factors such as weather, context and occlusion. We generalize CompNets[21] for OOD robustness by observing that they could be learned in two uncoupled steps: (i) unsupervised learning of a dictionary of vMF kernels (roughly corresponding to the subparts of the object) and (ii) supervised learning of the spatial structure of the objects (intuitively where the subparts occur). This enabled us to: (a) learn a transitional dictionary which captured the feature properties of both domains, and (b) learn the distribution of spatial structure on the source domain and transfer it to the target. This model is very successful and could be improved by simple pseudo-labeling techniques. Our empirical results on the OOD-CV[45], synthetic Imagenet-C corruptions, and the synthetic UDA-Parts dataset display the strong and versatile SOTA performance of our method. In addition, we developed a more challenging dataset Occluded-OOD-CV by introducing occlusion into OOD-CV and show that our Bayesian method, UGT, performed well in this difficult challenge. Our Bayesian approach could be extended to other tasks such as semantic segmentation, exploiting properties of CompNets[36, 37]. We give a qualitative proof of concept in the Supplementary." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.958 + ], + "angle": 0, + "content": "22995" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] D. Bug, S. Schneider, A. Grote, E. Oswald, F. Feuerhake, J. Schüler, and D. Merhof. Context-based normalization of histological stains using deep convolutional features. Lecture Notes in Computer Science, pages 135-142, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.243 + ], + "angle": 0, + "content": "[2] Xinyang Chen, Sinan Wang, Mingsheng Long, and Jianmin Wang. Transferability vs. discriminability: Batch spectral penalization for adversarial domain adaptation. In Proceedings of the 36th International Conference on Machine Learning, pages 1081-1090. PMLR, 2019. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.245, + 0.471, + 0.313 + ], + "angle": 0, + "content": "[3] Shuhao Cui, Shuhui Wang, Junbao Zhuo, Liang Li, Qingming Huang, and Qi Tian. Towards discriminability and diversity: Batch nuclear-norm maximization under label insufficient situations. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.315, + 0.471, + 0.357 + ], + "angle": 0, + "content": "[4] Nathan Drenkow, Numair Sani, Ilya Shpitser, and Mathias Unberath. Robustness in deep learning for computer vision: Mind the gap? CoRR, abs/2112.00639, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.359, + 0.47, + 0.426 + ], + "angle": 0, + "content": "[5] Aram Galstyan and Paul R Cohen. Empirical comparison of \"hard\" and \"soft\" label propagation for relational classification. In International Conference on Inductive Logic Programming, pages 98-111. Springer, Berlin, Heidelberg, 2007. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.429, + 0.47, + 0.47 + ], + "angle": 0, + "content": "[6] Aritra Ghosh, Himanshu Kumar, and P. S. Sastry. Robust loss functions under label noise for deep neural networks, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.473, + 0.47, + 0.528 + ], + "angle": 0, + "content": "[7] Siddharth Gopal and Yiming Yang. Von mises-fisher clustering models. In Proceedings of the 31st International Conference on Machine Learning, pages 154-162, Beijing, China, 2014. PMLR. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.53, + 0.47, + 0.586 + ], + "angle": 0, + "content": "[8] Md Hasnat, Julien Bohné, Jonathan Milgram, Stéphane Gentic, Liming Chen, et al. von mises-fisher mixture model-based deep learning: Application to face verification. arXiv preprint arXiv:1706.04264, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.588, + 0.47, + 0.628 + ], + "angle": 0, + "content": "[9] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations, 2019. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.631, + 0.47, + 0.699 + ], + "angle": 0, + "content": "[10] Dan Hendrycks, Xiaoyuan Liu, Eric Wallace, Adam Dziedzic, Rishabh Krishnan, and Dawn Song. Pretrained transformers improve out-of-distribution robustness. Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.702, + 0.47, + 0.77 + ], + "angle": 0, + "content": "[11] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization, 2021. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.773, + 0.469, + 0.813 + ], + "angle": 0, + "content": "[12] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples, 2021. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.47, + 0.857 + ], + "angle": 0, + "content": "[13] Lukas Hoyer, Dengxin Dai, Haoran Wang, and Luc Van Gool. Mic: Masked image consistency for context-enhanced domain adaptation, 2023. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.47, + 0.899 + ], + "angle": 0, + "content": "[14] Dong hyun Lee. Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[15] Ying Jin, Ximei Wang, Mingsheng Long, and Jianmin Wang. Minimum class confusion for versatile domain adaptation, 2019. 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.189 + ], + "angle": 0, + "content": "[16] Prakhar Kaushik, Aayush Mishra, Adam Kortylewski, and Alan Yuille. Source-free and image-only unsupervised domain adaptation for category level object pose estimation, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.191, + 0.892, + 0.218 + ], + "angle": 0, + "content": "[17] Toru Kitagawa and Jeff Rowley. von mises-fisher distributions and their statistical divergence, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.22, + 0.892, + 0.259 + ], + "angle": 0, + "content": "[18] Muhammed Kocabas, Chun-Hao P. Huang, Otmar Hilliges, and Michael J. Black. Pare: Part attention regressor for 3d human body estimation, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.261, + 0.892, + 0.343 + ], + "angle": 0, + "content": "[19] Adam Kortylewski, Bernhard Egger, Andreas Schneider, Thomas Gereg, Andreas Morel-Forster, and Thomas Vetter. Empirically analyzing the effect of dataset biases on deep face recognition systems. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.345, + 0.892, + 0.413 + ], + "angle": 0, + "content": "[20] Adam Kortylewski, Ju He, Qing Liu, and Alan Loddon Yuille. Compositional convolutional neural networks: A deep architecture with innate robustness to partial occlusion. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8937-8946, 2020. 1, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.415, + 0.892, + 0.482 + ], + "angle": 0, + "content": "[21] Adam Kortylewski, Qing Liu, Angtian Wang, Yihong Sun, and Alan Yuille. Compositional convolutional neural networks: A robust and interpretable model for object recognition under occlusion. International Journal of Computer Vision, 129(3):736-760, 2021. 1, 2, 3, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.484, + 0.892, + 0.524 + ], + "angle": 0, + "content": "[22] Jian Liang, Dapeng Hu, Jiashi Feng, and Ran He. Dine: Domain adaptation from single and multiple black-box predictors, 2022. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.526, + 0.892, + 0.552 + ], + "angle": 0, + "content": "[23] Hong Liu, Jianmin Wang, and Mingsheng Long. Cycle self-training for domain adaptation, 2021. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.554, + 0.892, + 0.621 + ], + "angle": 0, + "content": "[24] Qing Liu, Adam Kortylewski, Zhishuai Zhang, Zizhang Li, Mengqi Guo, Qihao Liu, Xiaoding Yuan, Jiteng Mu, Weichao Qiu, and Alan Yuille. Learning part segmentation through unsupervised domain adaptation from synthetic vehicles. In CVPR, 2022. 2, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.623, + 0.892, + 0.677 + ], + "angle": 0, + "content": "[25] Mingsheng Long, ZHANGJIE CAO, Jianmin Wang, and Michael I Jordan. Conditional adversarial domain adaptation. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2018. 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.679, + 0.892, + 0.719 + ], + "angle": 0, + "content": "[26] M. Jehanzeb Mirza, Jakub Micorek, Horst Possegger, and Horst Bischof. The norm must go on: Dynamic unsupervised domain adaptation by normalization, 2022. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.721, + 0.892, + 0.761 + ], + "angle": 0, + "content": "[27] Jaemin Na, Heechul Jung, Hyung Jin Chang, and Wonjun Hwang. Fixbi: Bridging domain spaces for unsupervised domain adaptation, 2021. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.763, + 0.892, + 0.803 + ], + "angle": 0, + "content": "[28] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. DoImagenet classifiers generalize toImagenet?, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.805, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[29] Douglas A. Reynolds, Thomas F. Quatieri, and Robert B. Dunn. Speaker verification using adapted gaussian mixture models. Digital Signal Processing, 10(1):19-41, 2000. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.847, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[30] Evgenia Rusak, Steffen Schneider, Peter Gehler, Oliver Bringmann, Wieland Brendel, and Matthias Bethge. Adapting imagenet-scale models to complex distribution shifts with self-learning, 2021. 6, 7, 8" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "22996" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.093, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[31] Kuniaki Saito, Kohei Watanabe, Yoshitaka Ushiku, and Tatsuya Harada. Maximum classifier discrepancy for unsupervised domain adaptation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2018. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.47, + 0.203 + ], + "angle": 0, + "content": "[32] Steffen Schneider, Evgenia Rusak, Luisa Eck, Oliver Bringmann, Wieland Brendel, and Matthias Bethge. Improving robustness against common corruptions by covariate shift adaptation, 2020. 2, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.205, + 0.469, + 0.259 + ], + "angle": 0, + "content": "[33] Ozan Sener, Hyun Oh Song, Ashutosh Saxena, and Silvio Savarese. Learning transferrable representations for unsupervised domain adaptation. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.261, + 0.469, + 0.3 + ], + "angle": 0, + "content": "[34] Hwanjun Song, Minseok Kim, Dongmin Park, Yooju Shin, and Jae-Gil Lee. Learning from noisy labels with deep neural networks: A survey, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.302, + 0.469, + 0.355 + ], + "angle": 0, + "content": "[35] Baochen Sun, Jiashi Feng, and Kate Saenko. Correlation alignment for unsupervised domain adaptation. Advances in Computer Vision and Pattern Recognition, pages 153-171, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.358, + 0.469, + 0.412 + ], + "angle": 0, + "content": "[36] Yihong Sun, Adam Kortylewski, and Alan Yuille. Weakly-supervised amodal instance segmentation with compositional priors. arXiv preprint arXiv:2010.13175, 2020. 1, 2, 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.414, + 0.469, + 0.482 + ], + "angle": 0, + "content": "[37] Angtian Wang, Yihong Sun, Adam Kortylewski, and Alan L Yuille. Robust object detection under occlusion with context-aware compositionalnets. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12645-12654, 2020. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.484, + 0.469, + 0.524 + ], + "angle": 0, + "content": "[38] Dequan Wang, Evan Shelhamer, Shaoteng Liu, Bruno Olshausen, and Trevor Darrell. Tent: Fully test-time adaptation by entropy minimization, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.526, + 0.469, + 0.566 + ], + "angle": 0, + "content": "[39] Jianyu Wang, Zhishuai Zhang, Cihang Xie, Yuyin Zhou, Vittal Premachandran, Jun Zhu, Lingxi Xie, and Alan Yuille. Visual concepts and compositional voting, 2017. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.568, + 0.469, + 0.607 + ], + "angle": 0, + "content": "[40] Guoqiang Wei, Cuiling Lan, Wenjun Zeng, Zhizheng Zhang, and Zhibo Chen. Toalign: Task-oriented alignment for unsupervised domain adaptation, 2021. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.609, + 0.469, + 0.663 + ], + "angle": 0, + "content": "[41] Yu Xiang, Roozbeh Mottaghi, and Silvio Savarese. Beyond Pascal: A benchmark for 3d object detection in the wild. In IEEE Winter Conference on Applications of Computer Vision (WACV), 2014. 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.665, + 0.469, + 0.733 + ], + "angle": 0, + "content": "[42] Xiaoding Yuan, Adam Kortylewski, Yihong Sun, and Alan Yuille. Robust instance segmentation through reasoning about multi-object occlusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11141-11150, 2021. 1, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.735, + 0.469, + 0.774 + ], + "angle": 0, + "content": "[43] Yuchen Zhang, Tianle Liu, Mingsheng Long, and Michael I. Jordan. Bridging theory and algorithm for domain adaptation, 2019. 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.469, + 0.816 + ], + "angle": 0, + "content": "[44] Zhilu Zhang and Mert R. Sabuncu. Generalized cross entropy loss for training deep neural networks with noisy labels, 2018. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[45] Bingchen Zhao, Shaozuo Yu, Wufei Ma, Mingxin Yu, Shenxiao Mei, Angtian Wang, Ju He, Alan Yuille, and Adam Kortylewski. Ood-cv: A benchmark for robustness to out-of-distribution shifts of individual nuisances in natural images. Proceedings of the European Conference on Computer Vision (ECCV), 2022. 1, 2, 6, 8" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22997" + } + ] +] \ No newline at end of file diff --git a/2024/A Bayesian Approach to OOD Robustness in Image Classification/d0e61d97-e025-4ae4-a494-3d44cf79404b_origin.pdf b/2024/A Bayesian Approach to OOD Robustness in Image Classification/d0e61d97-e025-4ae4-a494-3d44cf79404b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..56fffa1fefad111636757e0e1778d12a0ad73a21 --- /dev/null +++ b/2024/A Bayesian Approach to OOD Robustness in Image Classification/d0e61d97-e025-4ae4-a494-3d44cf79404b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6110af476041dfc19ba5e6990470a963c62fa2c0392eec281ef22822d670b557 +size 4237939 diff --git a/2024/A Bayesian Approach to OOD Robustness in Image Classification/full.md b/2024/A Bayesian Approach to OOD Robustness in Image Classification/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3f566a547b077a18b6c789b526ea30dde0bfedba --- /dev/null +++ b/2024/A Bayesian Approach to OOD Robustness in Image Classification/full.md @@ -0,0 +1,337 @@ +# A Bayesian Approach to OOD Robustness in Image Classification + +Prakhar Kaushik +Johns Hopkins University +pkaushi1@jh.edu + +Adam Kortylewski +University of Freiburg +akortyle@mpi-inf.mpg.de + +Alan Yuille Johns Hopkins University ayuille1@jh.edu + +# Abstract + +An important and unsolved problem in computer vision is to ensure that the algorithms are robust to changes in image domains. We address this problem in the scenario where we only have access to images from the target domains. Motivated by the challenges of the OOD-CV [45] benchmark where we encounter real world Out-of-Domain (OOD) nuisances and occlusion, we introduce a novel Bayesian approach to OOD robustness for object classification. Our work extends Compositional Neural Networks (CompNets), which have been shown to be robust to occlusion but degrade badly when tested on OOD data. We exploit the fact that CompNets contain a generative head defined over feature vectors represented by von Mises-Fisher (vMF) kernels, which correspond roughly to object parts, and can be learned without supervision. We obverse that some vMF kernels are similar between different domains, while others are not. This enables us to learn a transitional dictionary of vMF kernels that are intermediate between the source and target domains and train the generative model on this dictionary using the annotations on the source domain, followed by iterative refinement. This approach, termed Unsupervised Generative Transition (UGT), performs very well in OOD scenarios even when occlusion is present. UGT is evaluated on different OOD benchmarks including the OOD-CV dataset, several popular datasets (e.g., ImageNet-C [9]), artificial image corruptions (including adding occluders), and synthetic-to-real domain transfer, and does well in all scenarios. + +# 1. Introduction + +In recent years, machine learning algorithms have been extremely successful for tasks like object classification when evaluated on benchmarked datasets like ImageNet. But these successes require that the training and test data (or + +This work has been supported by Army Research Laboratory award W911NF2320008 and ONR with N00014-21-1-2812. A Kortylewski acknowledges support via his Emmy Noether Research Group funded by the German Science Foundation (DFG) under Grant No. 468670075. + +the source domain and the target domain data) be identically and independently distributed (IID) from some underlying source. However, in practice, it is important to ensure that the algorithms generalize to data that differ from the training data. For example, in real-world applications, an algorithm for car detection may encounter cars with unusual shapes and textures (Fig. 3), which did not occur in the training set. + +Existing OOD methods [9-12, 28] have shown success in dealing with robustness issues when evaluated on early robustness datasets, such as Imagenet-C [9], Imagenet-R [11], and Imagenet-A [12], where the domain differences are due to synthetic corruptions, adversarial images, rendered images, and similar factors [45]. But these algorithms performed less well on a newer benchmark, OODCV [45], which focuses on systematic analysis of real-world nuisances, e.g. changes in texture, 3D pose, weather, shape, and context. From a related perspective, OOD-CV studies the causal factors that result in the domain gap [4]. In addition, previous works have rarely been evaluated for robustness to occlusion, an important OOD robustness metric. + +In this work, we address OOD robustness on OOD-CV, and related datasets, focusing on real-world domain differences and occlusion. We build on a class of Bayesian neural models called Compositional Neural Networks (CompNets), as they have been shown to be robust to partial occlusion [20, 21, 36, 42]. This is achieved by replacing the discriminative head of a CNN with a generative model of the feature vectors based on the objects' spatial geometry. However, CompNets are fully supervised and are not robust to OOD nuisances. In this work, we develop an unsupervised approach, Unsupervised Generative Transition (UGT), which generalizes CompNets to OOD scenarios. + +UGT relies on intuition that in OOD scenarios, the appearance of object parts is highly variable (due to changes like texture or weather), while the spatial geometry of objects is often fairly similar between domains. We analyze CompNets and modify them to take advantage of the intuition mentioned above. By introducing a transitional dictionary of von Mises-Fisher [17] kernels (Fig. 1), which shares the properties of both domains, we can intuitively + +![](images/4a2af56e786ba51eb767d4e63b195f119c66421f7dc321df88dc6faa9a2fa515.jpg) +Figure 1. Illustration of the key principle underlying our Bayesian approach. Related work has shown that clusters of feature vectors learned in an unsupervised manner resemble part-like patterns [21, 39]. We observe that some feature clusters (represented here on a vMF manifold) are very similar in both IID and OOD data (illustrated in blue and red boxes), whereas for other feature clusters there is no corresponding equivalent in the other domain. Our Bayesian approach exploits this property by first learning a generative model of feature clusters and their spatial combinations on the IID data and subsequently adapting the model to OOD data via an unsupervised adaptation of the vMF cluster dictionary, while retaining the spatial relations between clusters. + +learn the spatial geometry of the source and transfer it to the target domain. UGT leverages the property that the hierarchical structure of generative models like CompNets can be learned in a two-stage manner. 1) An unsupervised learning stage of a dictionary of neural network features, called vMF kernels, using clustering in both source and target domains. The vMF kernels intuitively represent local object part structures. 2) A supervised learning stage of the spatial relations of the vMF kernels on the source domain. + +We primarily evaluate UGT on the OOD-CV benchmark [45]. In addition, to challenge UGT, we add occluders to OOD-CV and create a new dataset called Occluded-OOD-CV (Sec. 4.1). We also test UGT on Imagenet-C corruptions and Synthetic-to-Real domain robustness. Our studies show that UGT performs well on all these tasks and significantly outperforms the SOTA baselines. + +We make several important contributions in this paper. + +1. We model objects by a generative model on feature vectors. Our method, UGT, extends CompNets [21] by decoupling the learning into unsupervised learning of vMF kernels and supervised learning of the spatial geometry enabling us to learn transitional dictionaries. +2. UGT achieves state-of-the-art results on the real-world OOD robustness problem on the OOD-CV dataset [45] and demonstrates exceptional performance on generalizing under the synthetic corruptions of Imagenet-C. +3. UGT also achieves strong results for the Synthetic-to-Real scenario (UDAParts [24] to Pascal3d+) dataset. +4. We introduce the Occluded-OOD-CV dataset by adding occladers to OOD-CV and show that UGT is robust to this compounded problem of occlusion and nuisance. + +# 2. Related Works + +OOD robustness can be considered a subset of the larger unsupervised domain adaptation problem and is closely re + +lated to domain generalization and transfer learning. Although related to both, our work focuses on OOD robustness. Our aim is to generalize well to an unlabelled target domain which is parameterized by real world nuisance factors like weather, shape, pose, texture changes and partial occlusion - which often leads to drastic changes to visual scenes and objects not found in the source dataset. + +In the past few years, there has been an increase in the number of works [9-12, 28] that characterize model performance on OOD data and treat this as a measure of robustness. The common idea that underlies most works is to leverage a property of the unlabeled target domain to allow generalization of a model trained on the source domain. There have been successful efforts to use feature statistics to adapt to the new domain; e.g., Sun et al. [35] try to minimize domain shift by aligning the second-order statistics of source and target distributions; Bug et al. [1] employ feature aware normalization with gating elements from Long Short-Term Memory units for normalization among different spatial regions of interest. Some methods employ techniques based on adaptive batch normalization and weight normalisation [32]. Other methods include self-learning using entropy minimization [38], adaptive pseudo-labeling techniques [5, 14, 33, 34] and robust lost functions [6, 44]. + +Although, current works have been successful at dealing with robustness problems when evaluated on earlier robustness datasets [9, 11, 12] they have been shown to struggle with real world nuisances (OOD-CV [45]) and occlusion [16, 21]. Few generative Bayesian methods such as CompNets [21, 36, 39] have shown their relative robustness to occlusion, but still struggle with other OOD nuisances. + +# 3. Method + +We address OOD robustness from a Bayesian perspective which, to the best of our knowledge, is novel. Our starting point is a class of generative models, described in Sec. 3.1, + +![](images/c64f7ca4d5d0801a0959d327d0671933cb3a7fd5d3ef194de6e05ec1403e385e.jpg) +Figure 2. Rough illustration of our Bayesian method. $(\dashrightarrow, \dashrightarrow)$ A DCNN backbone is used to extract the source (IID) $F^{\mathcal{S}}$ and target (OOD) features $F^{\mathcal{R}}$ . The source feature vectors $F^{\mathcal{S}}$ are then used to learn the source vMF kernels that are then adapted to the transitional vMF kernels using target domain features $F^{\mathcal{R}}$ and the adaptation coefficients $\psi_{k}$ in an unsupervised manner. $(\longrightarrow)$ Transitional Spatial coefficients $(A^{\mathcal{R}})$ are then learned using the transitional vMF likelihood $L^{\mathcal{R}}$ i.e. non-linear activation applied to a convolution of $F^{\mathcal{S}}$ and transitional kernels using source labels. $(\longrightarrow)$ These spatial coefficients are then finetuned $(A^{\mathcal{R}'})$ using pseudo-scores $\{\hat{s}\}$ generated using the transitional mixture likelihood $E^{\mathcal{R}}$ of target domain features $F^{\mathcal{R}}$ . $(\longrightarrow)$ shows the final feedforward pipeline during inference. + +which have been shown to be robust to occlusion [21] when not dealing with other OOD nuisances. We describe method motivation in Sec. 3.2 and the technical details in Sec. 3.3. + +# 3.1. Bayesian Neural Architecture + +Our base architecture is similar to CompNets [21] and is explained in this section to help readers unfamiliar with them. Our method extends this class of neural models by non-trivially modifying the training methodology to enable OOD robustness along with occlusion robustness. + +This class of models differs from conventional Deep Networks by replacing the discriminative head by a generative model of feature vectors. For each object $y$ we learn a generative model $P(F|y)$ for the feature vectors $F$ . This model is formulated as a mixture model $P(F|y) = \sum_{m} P(F|y, m)$ where the mixture variable $m$ roughly corresponds to the viewpoint of the object. The conditional distributions $P(F|y, m)$ for the features are factorizable in terms of position so that $P(F|y, m) = \prod_{a \in \mathcal{D}} P(f_a|y, m)$ , where $a \in \mathcal{D}$ specifies the position in the image. These distributions $P(f_a|y, m)$ are specified in terms of von Mises-Fisher (vMF) dictionaries, with parameters $\Lambda = \{\sigma_k, \mu_k\}$ and by spatial coefficients with parameters $\mathcal{A} = \{\alpha_{a,k}^{y,m}\}$ . We use the following generative probability distribution for the neural features $F$ conditioned on an object $y$ [20, 21]: + +$$ +P (F | y) = \sum_ {m} P (F | y, m) = \sum_ {m} \prod_ {a \in \mathcal {D}} P _ {a} \left(f _ {a} | y, m\right) P (m), \tag {1} +$$ + +$$ +P _ {a} \left(f _ {a} \mid y, m\right) = P _ {a} \left(f _ {a} \mid \mathcal {A}, \Lambda\right) = \sum_ {k} \alpha_ {a, k} ^ {y, m} P \left(f _ {a} \mid \sigma_ {k}, \mu_ {k}\right), \tag {2} +$$ + +$$ +P (f | \sigma_ {k}, \mu_ {k}) = \frac {e ^ {\sigma_ {k} \mu_ {k} ^ {T} f}}{Z (\sigma_ {k})}, \| f \| = 1, \| \mu_ {k} \| = 1, \tag {3} +$$ + +We typically use 4 mixture components in our method and $P(m)$ is an uniform prior over the mixture components. As shown in [21, 39] each vMF kernel can be qualitatively interpreted as a subpart of the object (i.e., all image patches with feature responses close to $\mu_{k}$ look like visually similar object subparts). We use von Mises-Fisher distributions instead of Gaussian distributions because the feature vectors $f_{a}$ and the means $\mu_{k}$ must have a unit norm [7, 8]. The spatial coefficients $\mathcal{A} = \{\alpha_{a,k}^{y,m}\}$ specify the probability that the vMF kernel $k$ occurs at the position $a$ conditioned on the object $y$ and its mixture component $m$ . + +Inference. After learning, inference on an image with feature vectors $F$ is performed by a forward pass which estimates which object is more likely to generate the features $F$ of the input image, $\hat{y} = \arg\max_{y} P(F|y)$ [21, 36]. + +Occlusion modeling. To make the model, described above, robust to occlusion (in non-OOD data), an outlier process is added to allow for some of the image features to be generated by the object and others by a separate outlier process [20, 36]. This is formalised by: + +$$ +P (F | y) = \prod_ {a \in \mathcal {D}} \sum_ {m} P _ {a} \left(f _ {a} | y, m\right) ^ {z _ {a}} Q \left(f _ {a}\right) ^ {1 - z _ {a}} P (m) P \left(z _ {a}\right) \tag {4} +$$ + +where $Q(f_{a})$ is a vMF distribution for a feature generated by an occluder which can be estimated from non-annotated images [19, 21, 42]. The latent variable $z_{a} \in \{0,1\}$ indicates whether pixel $a$ is occluded or not occluded ( $z_{a} = \{1,0\}$ respectively) and the prior $P(z_{a})$ indicates the prior probability of a pixel being occluded. Note that we could also, in theory, sum over $z$ (we currently take a max). + +Training CompNets [21, 36, 42] are trained end-to-end to optimize the model parameters $\Lambda, \mathcal{A}$ using the standard + +supervision for object classification (e.g., the mixture components and the vMF kernels are treated as latent variables). In an OOD scenario the image features no longer correspond well with the learned generative model and without labels, we cannot trivially finetune the model. UGT utilizes an insightful training strategy to solve this problem. + +# 3.2. Motivation on Generalizing to OOD Data + +UGT builds upon by the aforementioned Bayesian model because it gives a natural way to formulate an occluder process. These models, however, do not do well on OOD data (Sec. 4). To solve this problem in an unsupervised manner requires reformulation of the training process. We motivate our solution for OOD, UGT, in following stages. + +Firstly, the vMF kernel dictionaries (i.e., the subparts of the object) can be learnt without supervision and hence can be found on both the source (annotated) and the target (non-annotated) domains. Secondly, we observe that some of the vMF kernels are similar between different domains (intuitively some subparts are similar between both domains). Thirdly, we can build on this observation to learn a transitional dictionary, which encourages vMF kernels in both domains to be similar if possible, and which works well in both domains. Fourthly, we note that the spatial coefficients capture the spatial activity pattern of the vMF kernels and these patterns depend on the spatial structure of the objects and so are mostly invariant to the domain, which suggests that we can learn the spatial coefficient on the source domain (where annotations are available), provided we use the transitional dictionary of vMF kernels, and that these spatial coefficients give a good initial estimate for the spatial coefficients on the target domain (which can be improved by simple pseudo-labeling). + +In the first stage we learn the vMF dictionaries $\Lambda$ without supervision by maximum likelihood estimation (MLE) assuming that the feature vectors $\{f_a\}$ of all the images (and at all positions) in each domain are generated by a mixture of von-Mises-Fisher distributions $P(f|\Lambda) = \sum_{k} \pi_k e^{\sigma_k \mu_k^T f} / Z[\sigma_k]$ . This is essentially clustering similar to that used in earlier studies [21, 39]. After the $\Lambda$ are learnt, if annotations are available (i.e., we know the object $y$ ) then we can learn the spatial coefficients $\mathcal{A}$ from the data $\{F_n\}$ in the annotated (source) domain by MLE from the distribution $\sum_{m} \prod_{a \in D} \sum_{k} \alpha_{a,k}^{y,m} P(f_a | \sigma_k, \mu_k)$ . + +In the second stage, we compare the vMF dictionaries $(\Lambda^S)$ and $(\Lambda^T)$ on the source (S) and target (T) domain respectively. We observe that a subset of the dictionary vectors are similar, as measured by cosine similarity in the vMF feature space (Fig. 1). We conjecture that this is because a subset of the vMF kernels, which correspond roughly to object subparts [39], is invariant to the nuisance variables which cause the differences between the domains. For example, for an object like a car or bus, some subparts like + +wheels and license plates may be very similar between the source and target domains but others may not (Fig. 1). + +These observations motivate us to learn a transitional vMF dictionary $(\Lambda^{\mathcal{R}})$ . This dictionary is learnt by learning the dictionary on the target domain but adding a prior (or regularization constraint) that the dictionary elements in both domains are similar. Finally, we learn the spatial coefficients $\mathcal{A}$ on the source domain, but using the transitional dictionary (Sec. 3.3.2). This allows us to utilize object geometry knowledge from the source domain in the target domain. As we show in our experiments and ablation (Sec. 4, Sec. 4.3), this model already works well on the target domain and can be improved by pseudo-labelling techniques. + +# 3.3. Training UGT + +Our Bayesian method, UGT, involves 3 steps - 1) primarily, learning transitional dictionary $\Lambda^{\mathcal{R}}$ , 2) learning transitional spatial coefficients $\mathcal{A}^{\mathcal{R}}$ using $f^S$ and $\Lambda^{\mathcal{R}}$ , and lastly 3) fin-tuning the transitional parameters $(\Lambda^{\mathcal{R}},\mathcal{A}^{\mathcal{R}})$ using simple pseudo-labelling. Refer to Fig. 2 for a simple illustration. + +# 3.3.1 Learning Transitional Dictionary + +We initialize the transitional von Mises-Fisher(vMF) dictionary vectors with the learnt source domain vMF dictionary vectors, i.e., $\Lambda^{\mathcal{R}} = \Lambda^{S}$ . The source domain vMF dictionaries i.e., $(\Lambda^{\mathcal{S}}(\mu ,\sigma))$ are learnt from the features $f^{\mathcal{S}}$ in source domain by MLE as described in Sec. 3.1 using the EM algorithm [39]. We can learn the transitional vMF dictionary parameters $\Lambda^{\mathcal{R}}$ from the target domain feature vectors $f^{\mathcal{R}}$ through a few ways. We can maximize the regularized likelihood shown in Eq. (5) using the EM algorithm used to calculate the source domain parameters. Eq. (5) shows the Bayesian parameterization of our transitional model and can be seen as a penalized or regularized form of maximum likelihood estimation. We penalize the distance between the initialized transitional mean vectors (which are the source parameters) and the learnt ones. This regularization (like others) also helps in avoiding overfitting. Since, we fix $\sigma_{k}$ as constant to reduce computation, the normalization term $Z(\sigma)$ reduces to a constant, and we can derive the penalized log-likelihood term as shown in Eq. (6). $\psi$ is a adaptation parameter discussed later. + +$$ +\begin{array}{l} p \left(f ^ {\mathcal {R}} \mid \Lambda^ {\mathcal {R}}\right) = \prod_ {n} \sum_ {k} \alpha_ {k} P \left(f _ {a} \mid \sigma_ {k}, \mu_ {k}\right) \\ \exp \left(- \psi_ {k} \sum_ {k} \left(\left| \left| \mu_ {k} - \mu_ {k} ^ {S} \right| \right|\right)\right) \tag {5} \\ \end{array} +$$ + +$$ +\begin{array}{l} l \left(\Lambda^ {\mathcal {R}}\right) = \sum^ {n} \log \left(\sum^ {k} \pi_ {k} \frac {e ^ {\sigma_ {k} \mu_ {k} ^ {T} f _ {i}}}{Z \left(\sigma_ {k}\right)}\right) - \psi_ {k} \sum^ {n} \sum^ {k} \left(\left| \left| \mu_ {k} - \mu_ {k} ^ {\mathcal {S}} \right| \right|\right) \tag {6} \\ \left| \left| f \right| \right| = 1, \left| \left| \mu_ {k} \right| \right| = 1, \sigma = 1 \Longrightarrow Z (\sigma) = c o n s t. \\ \end{array} +$$ + +The Expectation step for learning the transitional parameters is similar to the source version. In the first step, we calculate the summary statistics for the transitional parameters + +# Algorithm 1 Unsupervised Generative Transition + +1: Input: Set of source domain images $I^S = \{I_1^S, \dots, I_n^S\}$ , target domain images $I^T = \{I_1^T, \dots, I_N^T\}$ , source domain labels $y = \{y_1^S, \dots, y_n^S\}$ , deep network backbone $\Gamma(., \zeta)$ , background images $\mathcal{B}_{i=1}^r$ + +2: Output: Target domain model parameters $\mathcal{T} = (\mathcal{A},\Lambda)$ , background model $\beta_{r}$ + +3: procedure UGT $(I^{\mathcal{S}}, I^T, y, \Gamma, \beta_r)$ + +4: $\{F^{\mathcal{S}}\} ,\{F^{\mathcal{R}}\} \longleftarrow \Gamma ((\{I^{\mathcal{S}}\} ,\{I^T\}),\zeta)$ + +5: \(\Lambda^{\mathcal{S}}(\mu_k)\gets\) cluster & MLE(\({\cal F}^{\mathcal{S}}\}) + +6: $\Lambda_{initial}^{\mathcal{R}}(\mu) \longleftarrow \Lambda^{\mathcal{S}}(\mu_k)$ + +7: $\Lambda^{\mathcal{R}}(\tilde{\mu})\longleftarrow \mathrm{MLE}(F^T,\Delta (\psi ,\Lambda^{\mathcal{S}},\Lambda^{\mathcal{R}}))$ + +8: $\{L^{\mathcal{R}}\} \longleftarrow \sum_{k}\pi_{k}e^{\sigma_{k}\mu_{k}^{T}f^{\mathcal{S}}} / Z[\sigma_{k}](F*\Lambda^{\mathcal{R}}(\mu_{k}))$ + +9: $\mathcal{A}^{\mathcal{R}}_{y_s,m}\longleftrightarrow$ cluster&MLE $(\{L^{\mathcal{R}}\} ,y_S)$ + +10: $y_{\hat{T}} \longleftarrow \operatorname{argmax}_{y} P(F|\Lambda^{\mathcal{R}}, \mathcal{A}^{\mathcal{R}})$ +11: $\mathcal{A}_{y_{\hat{T},m}}^{\mathcal{R}'} \longleftrightarrow \text{cluster&MLE}(\{L^{\mathcal{R}}\}, y_{\hat{T}})$ +12: $\mathcal{T}\longleftarrow$ optimize $(\mathcal{L}_{\mathrm{gce}} + \psi_v\mathcal{L} + \psi_\alpha \mathcal{L})$ + +# 13: end procedure + +> Extract source & target featuremaps from DCNN backbone +$\triangleright$ Initialize source vMF kernels by kmeans & learn using MLE +$\triangleright$ Initialise transitional vMF kernels with source vMF kernels +$\triangleright$ Learn transitional vMF features using regularized MLE with target domain data (Sec. 3.3.1, Eq. (5)-Eq. (9)) +$\triangleright$ Compute regularized transitional vMF likelihood with source featuremaps and transitional vMF kernels +$\triangleright$ Calculate spatial coefficients using transitional vMF likelihood and source feature vectors (Sec. 3.3.2) +$\triangleright$ Pseudo-label target domain data using transitional model +$\triangleright$ Finetune spatial coefficients using pseudolabelled data $y_{\hat{T}}$ +$\triangleright$ Optionally, finetune entire model using $y_{\hat{T}}$ (Eq. (11)) + +using the new data. For posterior probability defined as + +$$ +P (k \mid f _ {i}, \Lambda) = \frac {\pi_ {k} p \left(f _ {i} \mid \mu_ {k} , \sigma_ {k}\right)}{\sum^ {K} \pi_ {k} p \left(f _ {i} \mid \mu_ {k} , \sigma_ {k}\right)} \tag {7} +$$ + +for the $k^{th}$ mixture and where $p(f|\mu_k,\sigma_k)$ is defined in Eq. (3), we update the mixture parameters in the maximization step in a regularized manner as follows, + +$$ +\hat {\pi} _ {k} = \nu \left[ \psi_ {k} ^ {\pi} \frac {1}{n} \sum_ {i = 1} ^ {n} P \left(k \mid f _ {i}, \Lambda\right) + \left(1 - \psi_ {k} ^ {\pi}\right) \pi_ {k} ^ {S} \right] \tag {8} +$$ + +$$ +\hat {\mu} _ {k} = \psi_ {k} ^ {\mu} \mathcal {E} _ {k} + \left(1 - \psi_ {k} ^ {\mu}\right) \mu_ {k} ^ {S} \tag {9} +$$ + +where, $\mathcal{E}_k$ is the first moment or mean of the $k^{\text{th}}$ mixture calculated on the new data, $\nu$ is a scaling parameter to ensure that $\sum_{k} \pi_{k} = 1$ and $\psi_{k}$ is an adaptation coefficient which is defined for each parameter and mixture. It can be defined in a data-dependent manner [29], i.e., $\psi_{k}^{\mu, \pi} = \left( \frac{\omega_{k}}{P(k|f_{i},\Lambda)} + 1 \right)^{-1}$ where $w_{k}$ is an empirically set hyperparameter which controls the adaptation emphasis between source and transitional parameters. Empirically, we observed that the adaptation coefficient is not very sensitive to changes to its value and therefore, we increase it monotonically during the EM iterations. A $\psi_{k}$ for a specific vMF kernel $\mu_{k}$ at time-step $t$ in $\Lambda^{\mathcal{R}}$ stabilizes if the change in its likelihood component is below a threshold value over the previous EM iteration step t-1 and then $\psi_{k}$ value. We find that only using the parameter update works well. For simpler datasets, even directly learning the transitional dictionary would suffice. + +# 3.3.2 Learning Transitional Spatial Coefficients + +After learning $\Lambda^{\mathcal{R}}$ , we use it to estimate the transitional spatial coefficients $(\mathcal{A}^{\mathcal{R}}(\alpha))$ using the labeled source domain + +features $f^S$ (using MLE). The spatial coefficients represent the expected activation of a calculated vMF kernel $\mu_k$ at a position $a$ in the feature map for a specific class $y$ . + +$$ +P _ {a} \left(f _ {a} \mid y _ {s}, m; \mathcal {A} ^ {\mathcal {R}}, \Lambda^ {R}\right) = \sum_ {k} \alpha_ {a, k} ^ {y _ {s}, m} P \left(f _ {a} \mid \Lambda^ {\mathcal {R}} \left(\sigma_ {k}, \mu_ {k}\right)\right) \tag {10} +$$ + +We can leverage the learnt transitional vMF kernel dictionary $\Lambda^{\mathcal{R}}$ to learn spatial coefficients $\mathcal{A}^{\mathcal{R}}(\alpha)$ which represent the spatial relationships of the vMF dictionary vectors over the source domain data $D_{S}$ . As these spatial coefficients $\mathcal{A}^{\mathcal{R}}$ are conditioned on $\Lambda^{\mathcal{R}}$ , they also correspond to parts of target domain features even when they are learned using $f^{S}$ , thus creating a transitional model with parameters $(\Lambda^{\mathcal{R}},\mathcal{A}^{\mathcal{R}})$ that we can use to classify target domain data. + +This combination of conditioned transitional vMF dictionary $(\Lambda^{\mathcal{R}})$ and spatial coefficients $(\mathcal{A}^{\mathcal{R}})$ can be leveraged to label a subset of target domain features, especially since we can focus on the subset of transitional vMF kernels $(\Lambda^{\mathcal{R}})$ which are similar to their source counterparts. We can use these pseudo labeled feature vectors $(y_{\hat{T}})$ , along with $\Lambda^{\mathcal{R}}$ to finetune the current spatial coefficients $\mathcal{A}^{\mathcal{R}}$ which leads to improved spatial coefficients $\mathcal{A}^{\mathcal{R}'}$ . + +Finetuning spatial coefficients. Transitional spatial coefficients $(\mathcal{A}^{\mathcal{R}})$ are initialized with the values describing the expected activation of transitional vMF dictionary vectors $\Lambda^{\mathcal{R}}(\mu_k)$ for the source data features $f^{\mathcal{S}}$ at a position $a$ on a feature map $f_{a}$ . Subsequently, we finetune these spatial coefficients $\mathcal{A}^{\mathcal{R}}$ using a subset of target domain images that present high activations for the robust set of transitional vMF dictionary vectors $\Lambda^{\mathcal{R}}$ . Optionally, we can also finetune $\Lambda^{\mathcal{R}}$ by relearning them without any initialization and + +regularization constraints. Although our model is trained by partitioning into two parts, it is still fully differentiable and trainable from end to end [20, 36, 42]. We use this model property to finetune the entire model. The loss function (Eq. (11)) consists of a generalized cross entropy [44] term calculated using the model predictions and two regularization parameters for the vMF dictionary and the spatial coefficient parameters. This is to encourage the vMF clusters to be similar to the feature vectors $f_{a}$ . In Eq. (11), $\zeta_{\{v,\alpha\}}$ represent the trade-off hyperparameters of the regularizing loss terms, + +$$ +\mathcal {L} = \mathcal {L} _ {\mathrm {g c e}} \left(y _ {p r e d}, y _ {\hat {T}}\right) + \zeta_ {v} \mathcal {L} (F, \Lambda) + \zeta_ {\alpha} \mathcal {L} (F, \mathcal {A}), \tag {11} +$$ + +For a constant vMF variance $\sigma_{k}$ (which also reduces the normalisation term to a constant) and assuming hard assignment of features $f_{a}$ to vMF dictionary clusters[21], + +$$ +\mathcal {L} \left(F, \Lambda^ {\mathcal {R}}\right) = - \sum_ {a} \max _ {k} \log p \left(f _ {a} \mid \Lambda^ {\mathcal {R}} \left(\mu_ {k}\right)\right) \tag {12} +$$ + +$$ +\mathcal {L} \left(F, \mathcal {A} ^ {\mathcal {R} ^ {\prime}}\right) = - \sum_ {a} \left(1 - z _ {a}\right) \log \left[ \sum_ {k} \alpha_ {a, k} ^ {y _ {\hat {T}}, m} p \left(f _ {a} \mid \Lambda^ {\mathcal {R}} \left(\mu_ {k}\right)\right) \right] \tag {13} +$$ + +Latent variable $z_{a}\in \{0,1\}$ is explained in Sec. 3.1. + +# 4. Experiments + +Our experiments evaluate robustness of vision classification models in an extended out-of-domain setup i.e., generalizing to target domains with individual nuisance factors and partial occlusion. This allows us to thoroughly evaluate the efficacy of current methods which have been shown to perform well on other OOD robustness datasets on OOD-CV[45] (which enables a systematic analysis of nuisances on real-world data), Occluded-OOD-CV (which allows us to evaluate models on a combination of partial occlusion with individual nuisances) and Imagenet-C corruptions (for analysis of synthetic corruptions). Lastly, we also show some initial results on Synthetic to Real OOD robustness using the UDAParts [24] dataset. + +# 4.1. Setup and Data + +Datasets. For primary evaluation, we use the OOD-CV [45] dataset. OOD-CV dataset consists of test subcategories which vary from the training data in terms of a main nuisance factor, namely, context, weather, texture, pose and shape. We use $L0$ for the (0%) occlusion level to represent this data setup in Tab. 1 and Supplementary Sec. B. + +Occluded-OOD-CV. In addition to OOD-CV, we experiment with a more complex robustness analysis setup involving partial occlusion. In this setup, models that have been adapted in an unsupervised manner to target domains with nuisance factors are then evaluated on data with partial occlusion in addition to the real-world nuisances. For this purpose, we create a dataset named Occluded-OOD-CV where we superimpose occluders on the OOD-CV test + +images objects in order to approximate real-life occlusion. These occluders have been cropped from the MS-COCO dataset, similar to [20] and are superimposed on objects in the OOD-CV test set. There are three levels of partial occlusions - $L1(20 - 40\%)$ , $L2(40 - 60\%)$ and $L3(60 - 80\%)$ which allows us to diversely analyze the occlusion robustness of the model (in addition to individual nuisance factors). Fig. 3 shows some example images from our dataset. Previous works [18, 21] have shown that using cropped occluders, as done in Occluded-OOD-CV, is akin to the use of real occluders for classification evaluation. We also use + +![](images/247b46dcecdd1129006577d352d0238e6c1a830e35f3d27817fe3e06676027ae.jpg) + +![](images/3e8b036d4dfbd44b03042a2f86d67ea5b222ece38e72c7102cadbc8063546a53.jpg) + +![](images/350cbe9ac78a261ffb8acc50be5b0ef545f2c17cff9365cbf4fc3c64ac59bc08.jpg) + +![](images/7aee95b83785551127aae01db6a52fea3e177fdc028df72f2e8716beba8b022a.jpg) +Context (60-80%) Weather (20-40%) Texture (40-60%) + +![](images/10b86bdb032d1a58917c7bbe15b8a8370803129ac613383e8e00f4b2911825b2.jpg) +Figure 3. Occluded-OOD-CV dataset examples. Each object category is identified by its nuisance factor and occlusion percentage + +![](images/f0b40b433831a803979e805e5df8d1ae295722fae4eeaba261a3c90aa992f2a9.jpg) + +Imagenet-C[9] corruptions in the Pascal3D+ dataset for robustness evaluation with conventionally used synthetic corruptions. We also evaluate models in a synthetic (UDAParts [24]) to real data (Pascal3D+ [41]) setup. + +In summary, we have 5 different real world nuisance data subcategories (context, weather, texture, pose, shape), at least seven synthetic corruption categories (fog, pixelate, motion blur, etc.), one synthetic source dataset and 4 partial occlusion levels (including no occlusion) for each experiment. We also run experiments on all the combined nuisance subcategories (Tab. 1). So, in total we have 24 sets of data and experiments for our (extended) OOD robustness setup on the OOD-CV dataset alone. + +Models. We compare our work with our baseline method CompNets [21], other well known recent works [30, 32] which have been shown to be SOTA on various robustness datasets [9, 11, 12] as well as many well-known UDA methods [3, 13, 15, 22, 23, 25-27, 40, 43]. We focus on VGG16 and Resnet-50 backbones as they have been commonly used in most current methods[20, 30, 32, 44]. + +Training Setup. All models are trained on the source data with corresponding labels. Models can access some unlabeled nuisance (target) data, which could be a single nuisance (OOD-CV, Imagenet-C), combined nuisances (Tab. 1) or real data (when source data are synthetic). Models do not have access to images with partial occlusion at any time, and partially occluded images are only used for inference. We also avoid using different types of data aug- + +Table 1. OOD-CV Nuisances Top-1 Classification Results. Occlusion levels greater than $0\%$ represent Occluded-OOD-CV dataset. + +
MethodCombinedContextWeather
\(Occlusion\rightarrow\)0%20-40%40-60%60-80%0%20-40%40-60%60-80%0%20-40%40-60%60-80%
CDAN [25]**.760.531.420.380.710.541.436.397.745.476.335.299
BSP [2]**.753.506.401.351.610.511.419.385.730.391.266.254
MDD [43]**.780.551469.410.761.531.436.410.802.439.306.271
MCD [31]**.772.556.461.403.798.523.426.374.810.447.336.286
MCC [15]**.785.582.492.434.730.577.454.420.767.503.376.362
FixBi [27]**.821.534.478.399.802.542.445.409.755.489.358.335
MIC [13]**.837.540.376.262.755.602.532.499.817.612.496.427
ToAlign [40]**.761.507.411.346.712.501.393.382.720.381.252.213
CST [23]**.840.579.539.477.687.491.452.411.813.558.397.356
DUA [26]**.699.523.480.403.667.471.434.401.701.465.391.210
DINE [22]**.835.600.493.443.867.515.418.397.798.423.290.261
RPL [30].664.430.346.300.675.457.368.315.642.247.138.122
BNA [32].653.426.343.298.580.397.342.278.635.295.179.171
CompNet [21].720.506.462.415.790.517.454.369.683.434.398.362
UGT (Ours).850.620.570.501.875.624.565.511.856.600.528.465
TexturePoseShape
CDAN [25]**.820.532.420.364.844.620.521.450.773.561.491.441
BSP [2]**.696.444.384.315.831.610.510.423.757.535.485.434
MDD [43]**.895.518.427.400.870.611.534.469.836.541.459.386
MCD [31]**.896.522.432.392.865.623.532.471.834.538.456.397
MCC [15]**.874.671.547.495.867.611.521.460.818.601.524.460
FixBi [27]**.854.574.445.369.842.533.472.446.801.500.435.373
MIC [13]**.821.706.631.576.799.613.509.455.807.608.565.467
ToAlign [40]**.594.413.312.273.788.574.503.418.719.548.460.391
CST [23]**.858.657.538.477.887.617.525.451.831.617.495.441
DUA [26]**.918.691.514.468.755.511.423.355.695.455.386.345
DINE [22]**.911.572.432.401.885.618.543.448.838.520.426.360
RPL [30].703.371.238.227.730.493.400.329.670.426.340.311
BNA [32].701.383.247.239.737.510.407.355.662.436.350.311
CompNet [21].747.539.462.426.768.581.538.458.698.466.451.400
UGT (Ours).936.726.665.635.892.632.555.481.852.644.601.567
+ +** Pretrained Imagenet Backbone used (Resnet-50) / Pretrained UDA model used. + +Table 2. Imagenet-C Corruptions on Pascal3D+ dataset - Classification Results (Vgg16) + +
Model +Occlusion→Elastic TransformGaussian BlurSnow
0%20-40%40-60%60-80%0%20-40%40-60%60-80%0%20-40%40-60%60-80%
RPL [30].830.597.461.371.855.541.403.320.842.592.435.408
BNA [32].793.601.498.400.833.618.484.300.767.627.542.454
CompNet [21].268.183.157.146.732.395.296.241.529.348.258.210
UGT (Ours).872.712.712.494.909.720.613.509.890.742.634.523
Motion BlurContrastFrost
RPL [30].862.629.481.373.901.610.433.321.850.670.511.402
BNA [32].844.623.481.355.899.601.401.315.845.654.501.399
CompNet [21].639.362.287.241.760.472.374.312.740.481.360.301
UGT (Ours).891.763.673.567.923.701.534.412.911.782.672.561
+ +Table 3. Ablation analysis for (a) OOD-CV [45] Combined (b) OOD-CV Texture (c) Imagenet-C (Snow) Corruption + +
Occlusion→L0L1L2L3L0L1L2L3L0L1L2L3
Baseline(B).698.466.451.400.715.575.475.409.529.348.258.210
+ΛR+AR.816.598.524.498.785.660.559.515.781.671.582.480
+ΛR+AR'.852.644.601.567.843.764.656.623.885.742.634.523
+ +mentations and additional data training to have a fairer comparison amongst all the works. Although, our Bayesian model does not use pretrained Imagenet backbones for feature extraction for fairness, a number of our comparative methods [2, 15, 25, 26, 43] perform poorly without one, so we relax this constraint for them. Our method is still capable of surpassing them in terms of classification accuracy. Further details are provided in Supplementary Section C. + +# 4.2. Results + +OOD robustness to individual nuisances. Tab. 1 (L0 columns) shows classification results on entire OOD-CV test data (combined nuisances) as well as five individual nuisances. We see that our model achieves state-of-the-art results in all experiments. In Tab. 2, we observe that our model also performs exceedingly well when dealing with synthetic Imagenet-C corruptions. Refer to Supplementary Sec.C2 and Tables 5-11 for additional Imagenet-C results. + +Synthetic to Real. Tab. 4 shows our results on both normal and extended OOD robustness scenario in a synthetic to real setup, showing that our unsupervised method can robustly close the gap between its supervised counterpart while outperforming other methods by large margins. + +Table 4. Synthetic (UDAParts) [24] to Real (Pascal3D+) [41] dataset - Classification Results on Resnet50 + +
Model0%20-40%40-60%60-80%
RPL [30].822.432.370.335
BNA [32].950.684.484.356
CompNet [21].940.650.475.347
UGT (Ours).992.957.861.753
+ +Extended OOD robustness under partial Occlusion. In Tab. 1, Tab. 2 and Supplementary Tables 1-3, 5-11, our model outperforms other methods by significant margins in the extended OOD scenarios of nuisance parameters with partial occlusion. We observe that the performance of other models which have been adapted to the target domain data drops drastically when encountering partial occlusion along with nuisance factors. This underlines the increased complexity of the extended OOD robustness scenario relative to the vanilla OOD robustness setup and how our Bayesian model is able to perform exceedingly well compared to conventional methods. + +# 4.3. Ablation Analysis + +Tab. 3 and Supplementary Sec. D & Tables 12-17 show the extensive results of the ablation study for UGT, underlying how each component contributes to the overall compositional model. We can see that just calculating the transitional vMF kernel dictionary $(\Lambda^{\mathcal{R}})$ and the transitional spatial coefficients $\mathcal{A}^{\mathcal{R}}$ improves the results significantly over the baseline method[21]. Further finetuning the spatial coefficients $(\mathcal{A}^{\mathcal{R}'})$ using pseudo-labelled target domain features boosts the performance. We ablate our hypothesis regarding similar vMF kernels in source and target domains by visualizing image patches that are activated by similar cross-domain kernels (Supplementary Figures 9-11). We also ablate our hypothesis regarding robust spatial geometry by visualizing images activated by the same spatial coefficient in both source and target domains (using source and transitional vMF dictionaries) in Supp. Fig 4 and 7. Analysis of adaptation coefficient is discussed in Supp. Sec. E. + +# 5. Conclusion and Future Work + +In this work, we addressed the problem of developing object classification algorithms that are robust to OOD factors such as weather, context and occlusion. We generalize CompNets[21] for OOD robustness by observing that they could be learned in two uncoupled steps: (i) unsupervised learning of a dictionary of vMF kernels (roughly corresponding to the subparts of the object) and (ii) supervised learning of the spatial structure of the objects (intuitively where the subparts occur). This enabled us to: (a) learn a transitional dictionary which captured the feature properties of both domains, and (b) learn the distribution of spatial structure on the source domain and transfer it to the target. This model is very successful and could be improved by simple pseudo-labeling techniques. Our empirical results on the OOD-CV[45], synthetic Imagenet-C corruptions, and the synthetic UDA-Parts dataset display the strong and versatile SOTA performance of our method. In addition, we developed a more challenging dataset Occluded-OOD-CV by introducing occlusion into OOD-CV and show that our Bayesian method, UGT, performed well in this difficult challenge. Our Bayesian approach could be extended to other tasks such as semantic segmentation, exploiting properties of CompNets[36, 37]. We give a qualitative proof of concept in the Supplementary. + +# References + +[1] D. Bug, S. Schneider, A. Grote, E. Oswald, F. Feuerhake, J. Schüler, and D. Merhof. Context-based normalization of histological stains using deep convolutional features. Lecture Notes in Computer Science, pages 135-142, 2017. 2 +[2] Xinyang Chen, Sinan Wang, Mingsheng Long, and Jianmin Wang. Transferability vs. discriminability: Batch spectral penalization for adversarial domain adaptation. In Proceedings of the 36th International Conference on Machine Learning, pages 1081-1090. PMLR, 2019. 7, 8 +[3] Shuhao Cui, Shuhui Wang, Junbao Zhuo, Liang Li, Qingming Huang, and Qi Tian. Towards discriminability and diversity: Batch nuclear-norm maximization under label insufficient situations. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6 +[4] Nathan Drenkow, Numair Sani, Ilya Shpitser, and Mathias Unberath. Robustness in deep learning for computer vision: Mind the gap? CoRR, abs/2112.00639, 2021. 1 +[5] Aram Galstyan and Paul R Cohen. Empirical comparison of "hard" and "soft" label propagation for relational classification. In International Conference on Inductive Logic Programming, pages 98-111. Springer, Berlin, Heidelberg, 2007. 2 +[6] Aritra Ghosh, Himanshu Kumar, and P. S. Sastry. Robust loss functions under label noise for deep neural networks, 2017. 2 +[7] Siddharth Gopal and Yiming Yang. Von mises-fisher clustering models. In Proceedings of the 31st International Conference on Machine Learning, pages 154-162, Beijing, China, 2014. PMLR. 3 +[8] Md Hasnat, Julien Bohné, Jonathan Milgram, Stéphane Gentic, Liming Chen, et al. von mises-fisher mixture model-based deep learning: Application to face verification. arXiv preprint arXiv:1706.04264, 2017. 3 +[9] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations, 2019. 1, 2, 6 +[10] Dan Hendrycks, Xiaoyuan Liu, Eric Wallace, Adam Dziedzic, Rishabh Krishnan, and Dawn Song. Pretrained transformers improve out-of-distribution robustness. Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, 2020. +[11] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization, 2021. 1, 2, 6 +[12] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples, 2021. 1, 2, 6 +[13] Lukas Hoyer, Dengxin Dai, Haoran Wang, and Luc Van Gool. Mic: Masked image consistency for context-enhanced domain adaptation, 2023. 6, 7 +[14] Dong hyun Lee. Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks. 2 + +[15] Ying Jin, Ximei Wang, Mingsheng Long, and Jianmin Wang. Minimum class confusion for versatile domain adaptation, 2019. 6, 7, 8 +[16] Prakhar Kaushik, Aayush Mishra, Adam Kortylewski, and Alan Yuille. Source-free and image-only unsupervised domain adaptation for category level object pose estimation, 2024. 2 +[17] Toru Kitagawa and Jeff Rowley. von mises-fisher distributions and their statistical divergence, 2022. 1 +[18] Muhammed Kocabas, Chun-Hao P. Huang, Otmar Hilliges, and Michael J. Black. Pare: Part attention regressor for 3d human body estimation, 2021. 6 +[19] Adam Kortylewski, Bernhard Egger, Andreas Schneider, Thomas Gereg, Andreas Morel-Forster, and Thomas Vetter. Empirically analyzing the effect of dataset biases on deep face recognition systems. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2018. 3 +[20] Adam Kortylewski, Ju He, Qing Liu, and Alan Loddon Yuille. Compositional convolutional neural networks: A deep architecture with innate robustness to partial occlusion. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8937-8946, 2020. 1, 3, 6 +[21] Adam Kortylewski, Qing Liu, Angtian Wang, Yihong Sun, and Alan Yuille. Compositional convolutional neural networks: A robust and interpretable model for object recognition under occlusion. International Journal of Computer Vision, 129(3):736-760, 2021. 1, 2, 3, 4, 6, 7, 8 +[22] Jian Liang, Dapeng Hu, Jiashi Feng, and Ran He. Dine: Domain adaptation from single and multiple black-box predictors, 2022. 6, 7 +[23] Hong Liu, Jianmin Wang, and Mingsheng Long. Cycle self-training for domain adaptation, 2021. 6, 7 +[24] Qing Liu, Adam Kortylewski, Zhishuai Zhang, Zizhang Li, Mengqi Guo, Qihao Liu, Xiaoding Yuan, Jiteng Mu, Weichao Qiu, and Alan Yuille. Learning part segmentation through unsupervised domain adaptation from synthetic vehicles. In CVPR, 2022. 2, 6, 8 +[25] Mingsheng Long, ZHANGJIE CAO, Jianmin Wang, and Michael I Jordan. Conditional adversarial domain adaptation. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2018. 6, 7, 8 +[26] M. Jehanzeb Mirza, Jakub Micorek, Horst Possegger, and Horst Bischof. The norm must go on: Dynamic unsupervised domain adaptation by normalization, 2022. 7, 8 +[27] Jaemin Na, Heechul Jung, Hyung Jin Chang, and Wonjun Hwang. Fixbi: Bridging domain spaces for unsupervised domain adaptation, 2021. 6, 7 +[28] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. DoImagenet classifiers generalize toImagenet?, 2019. 1, 2 +[29] Douglas A. Reynolds, Thomas F. Quatieri, and Robert B. Dunn. Speaker verification using adapted gaussian mixture models. Digital Signal Processing, 10(1):19-41, 2000. 5 +[30] Evgenia Rusak, Steffen Schneider, Peter Gehler, Oliver Bringmann, Wieland Brendel, and Matthias Bethge. Adapting imagenet-scale models to complex distribution shifts with self-learning, 2021. 6, 7, 8 + +[31] Kuniaki Saito, Kohei Watanabe, Yoshitaka Ushiku, and Tatsuya Harada. Maximum classifier discrepancy for unsupervised domain adaptation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2018. 7 +[32] Steffen Schneider, Evgenia Rusak, Luisa Eck, Oliver Bringmann, Wieland Brendel, and Matthias Bethge. Improving robustness against common corruptions by covariate shift adaptation, 2020. 2, 6, 7, 8 +[33] Ozan Sener, Hyun Oh Song, Ashutosh Saxena, and Silvio Savarese. Learning transferrable representations for unsupervised domain adaptation. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2016. 2 +[34] Hwanjun Song, Minseok Kim, Dongmin Park, Yooju Shin, and Jae-Gil Lee. Learning from noisy labels with deep neural networks: A survey, 2020. 2 +[35] Baochen Sun, Jiashi Feng, and Kate Saenko. Correlation alignment for unsupervised domain adaptation. Advances in Computer Vision and Pattern Recognition, pages 153-171, 2017. 2 +[36] Yihong Sun, Adam Kortylewski, and Alan Yuille. Weakly-supervised amodal instance segmentation with compositional priors. arXiv preprint arXiv:2010.13175, 2020. 1, 2, 3, 6, 8 +[37] Angtian Wang, Yihong Sun, Adam Kortylewski, and Alan L Yuille. Robust object detection under occlusion with context-aware compositionalnets. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12645-12654, 2020. 8 +[38] Dequan Wang, Evan Shelhamer, Shaoteng Liu, Bruno Olshausen, and Trevor Darrell. Tent: Fully test-time adaptation by entropy minimization, 2020. 2 +[39] Jianyu Wang, Zhishuai Zhang, Cihang Xie, Yuyin Zhou, Vittal Premachandran, Jun Zhu, Lingxi Xie, and Alan Yuille. Visual concepts and compositional voting, 2017. 2, 3, 4 +[40] Guoqiang Wei, Cuiling Lan, Wenjun Zeng, Zhizheng Zhang, and Zhibo Chen. Toalign: Task-oriented alignment for unsupervised domain adaptation, 2021. 6, 7 +[41] Yu Xiang, Roozbeh Mottaghi, and Silvio Savarese. Beyond Pascal: A benchmark for 3d object detection in the wild. In IEEE Winter Conference on Applications of Computer Vision (WACV), 2014. 6, 8 +[42] Xiaoding Yuan, Adam Kortylewski, Yihong Sun, and Alan Yuille. Robust instance segmentation through reasoning about multi-object occlusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11141-11150, 2021. 1, 3, 6 +[43] Yuchen Zhang, Tianle Liu, Mingsheng Long, and Michael I. Jordan. Bridging theory and algorithm for domain adaptation, 2019. 6, 7, 8 +[44] Zhilu Zhang and Mert R. Sabuncu. Generalized cross entropy loss for training deep neural networks with noisy labels, 2018. 2, 6 +[45] Bingchen Zhao, Shaozuo Yu, Wufei Ma, Mingxin Yu, Shenxiao Mei, Angtian Wang, Ju He, Alan Yuille, and Adam Kortylewski. Ood-cv: A benchmark for robustness to out-of-distribution shifts of individual nuisances in natural images. Proceedings of the European Conference on Computer Vision (ECCV), 2022. 1, 2, 6, 8 \ No newline at end of file diff --git a/2024/A Bayesian Approach to OOD Robustness in Image Classification/images.zip b/2024/A Bayesian Approach to OOD Robustness in Image Classification/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..c926e396978a034e377e6fb928acd6d3c3aca0ad --- /dev/null +++ b/2024/A Bayesian Approach to OOD Robustness in Image Classification/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01d809dc122309fc7377ecc5ba9e96527fab80f282df03916ea7d21d31d38856 +size 695523 diff --git a/2024/A Bayesian Approach to OOD Robustness in Image Classification/layout.json b/2024/A Bayesian Approach to OOD Robustness in Image Classification/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f1d176eba3abb072a1319e74ec28dc70c99aff02 --- /dev/null +++ b/2024/A Bayesian Approach to OOD Robustness in Image Classification/layout.json @@ -0,0 +1,9716 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 94, + 103, + 501, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 103, + 501, + 121 + ], + "spans": [ + { + "bbox": [ + 94, + 103, + 501, + 121 + ], + "type": "text", + "content": "A Bayesian Approach to OOD Robustness in Image Classification" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 81, + 144, + 208, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 144, + 208, + 185 + ], + "spans": [ + { + "bbox": [ + 81, + 144, + 208, + 185 + ], + "type": "text", + "content": "Prakhar Kaushik \nJohns Hopkins University \npkaushi1@jh.edu" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 233, + 144, + 360, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 144, + 360, + 185 + ], + "spans": [ + { + "bbox": [ + 233, + 144, + 360, + 185 + ], + "type": "text", + "content": "Adam Kortylewski \nUniversity of Freiburg \nakortyle@mpi-inf.mpg.de" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 385, + 144, + 512, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 385, + 144, + 512, + 185 + ], + "spans": [ + { + "bbox": [ + 385, + 144, + 512, + 185 + ], + "type": "text", + "content": "Alan Yuille Johns Hopkins University ayuille1@jh.edu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 238, + 290, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 238, + 290, + 574 + ], + "spans": [ + { + "bbox": [ + 47, + 238, + 290, + 574 + ], + "type": "text", + "content": "An important and unsolved problem in computer vision is to ensure that the algorithms are robust to changes in image domains. We address this problem in the scenario where we only have access to images from the target domains. Motivated by the challenges of the OOD-CV [45] benchmark where we encounter real world Out-of-Domain (OOD) nuisances and occlusion, we introduce a novel Bayesian approach to OOD robustness for object classification. Our work extends Compositional Neural Networks (CompNets), which have been shown to be robust to occlusion but degrade badly when tested on OOD data. We exploit the fact that CompNets contain a generative head defined over feature vectors represented by von Mises-Fisher (vMF) kernels, which correspond roughly to object parts, and can be learned without supervision. We obverse that some vMF kernels are similar between different domains, while others are not. This enables us to learn a transitional dictionary of vMF kernels that are intermediate between the source and target domains and train the generative model on this dictionary using the annotations on the source domain, followed by iterative refinement. This approach, termed Unsupervised Generative Transition (UGT), performs very well in OOD scenarios even when occlusion is present. UGT is evaluated on different OOD benchmarks including the OOD-CV dataset, several popular datasets (e.g., ImageNet-C [9]), artificial image corruptions (including adding occluders), and synthetic-to-real domain transfer, and does well in all scenarios." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 597, + 128, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 597, + 128, + 609 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 128, + 609 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 617, + 288, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 666 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 666 + ], + "type": "text", + "content": "In recent years, machine learning algorithms have been extremely successful for tasks like object classification when evaluated on benchmarked datasets like ImageNet. But these successes require that the training and test data (or" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 674, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 674, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 674, + 288, + 713 + ], + "type": "text", + "content": "This work has been supported by Army Research Laboratory award W911NF2320008 and ONR with N00014-21-1-2812. A Kortylewski acknowledges support via his Emmy Noether Research Group funded by the German Science Foundation (DFG) under Grant No. 468670075." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 213, + 547, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 213, + 547, + 310 + ], + "spans": [ + { + "bbox": [ + 304, + 213, + 547, + 310 + ], + "type": "text", + "content": "the source domain and the target domain data) be identically and independently distributed (IID) from some underlying source. However, in practice, it is important to ensure that the algorithms generalize to data that differ from the training data. For example, in real-world applications, an algorithm for car detection may encounter cars with unusual shapes and textures (Fig. 3), which did not occur in the training set." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 312, + 547, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 312, + 547, + 469 + ], + "spans": [ + { + "bbox": [ + 304, + 312, + 547, + 469 + ], + "type": "text", + "content": "Existing OOD methods [9-12, 28] have shown success in dealing with robustness issues when evaluated on early robustness datasets, such as Imagenet-C [9], Imagenet-R [11], and Imagenet-A [12], where the domain differences are due to synthetic corruptions, adversarial images, rendered images, and similar factors [45]. But these algorithms performed less well on a newer benchmark, OODCV [45], which focuses on systematic analysis of real-world nuisances, e.g. changes in texture, 3D pose, weather, shape, and context. From a related perspective, OOD-CV studies the causal factors that result in the domain gap [4]. In addition, previous works have rarely been evaluated for robustness to occlusion, an important OOD robustness metric." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 471, + 548, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 471, + 548, + 615 + ], + "spans": [ + { + "bbox": [ + 304, + 471, + 548, + 615 + ], + "type": "text", + "content": "In this work, we address OOD robustness on OOD-CV, and related datasets, focusing on real-world domain differences and occlusion. We build on a class of Bayesian neural models called Compositional Neural Networks (CompNets), as they have been shown to be robust to partial occlusion [20, 21, 36, 42]. This is achieved by replacing the discriminative head of a CNN with a generative model of the feature vectors based on the objects' spatial geometry. However, CompNets are fully supervised and are not robust to OOD nuisances. In this work, we develop an unsupervised approach, Unsupervised Generative Transition (UGT), which generalizes CompNets to OOD scenarios." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 617, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 548, + 715 + ], + "type": "text", + "content": "UGT relies on intuition that in OOD scenarios, the appearance of object parts is highly variable (due to changes like texture or weather), while the spatial geometry of objects is often fairly similar between domains. We analyze CompNets and modify them to take advantage of the intuition mentioned above. By introducing a transitional dictionary of von Mises-Fisher [17] kernels (Fig. 1), which shares the properties of both domains, we can intuitively" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "22988" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 70, + 521, + 175 + ], + "blocks": [ + { + "bbox": [ + 72, + 70, + 521, + 175 + ], + "lines": [ + { + "bbox": [ + 72, + 70, + 521, + 175 + ], + "spans": [ + { + "bbox": [ + 72, + 70, + 521, + 175 + ], + "type": "image", + "image_path": "4a2af56e786ba51eb767d4e63b195f119c66421f7dc321df88dc6faa9a2fa515.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 185, + 547, + 251 + ], + "lines": [ + { + "bbox": [ + 46, + 185, + 547, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 185, + 547, + 251 + ], + "type": "text", + "content": "Figure 1. Illustration of the key principle underlying our Bayesian approach. Related work has shown that clusters of feature vectors learned in an unsupervised manner resemble part-like patterns [21, 39]. We observe that some feature clusters (represented here on a vMF manifold) are very similar in both IID and OOD data (illustrated in blue and red boxes), whereas for other feature clusters there is no corresponding equivalent in the other domain. Our Bayesian approach exploits this property by first learning a generative model of feature clusters and their spatial combinations on the IID data and subsequently adapting the model to OOD data via an unsupervised adaptation of the vMF cluster dictionary, while retaining the spatial relations between clusters." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 273, + 289, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 273, + 289, + 379 + ], + "spans": [ + { + "bbox": [ + 46, + 273, + 289, + 379 + ], + "type": "text", + "content": "learn the spatial geometry of the source and transfer it to the target domain. UGT leverages the property that the hierarchical structure of generative models like CompNets can be learned in a two-stage manner. 1) An unsupervised learning stage of a dictionary of neural network features, called vMF kernels, using clustering in both source and target domains. The vMF kernels intuitively represent local object part structures. 2) A supervised learning stage of the spatial relations of the vMF kernels on the source domain." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 380, + 287, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 380, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 46, + 380, + 287, + 464 + ], + "type": "text", + "content": "We primarily evaluate UGT on the OOD-CV benchmark [45]. In addition, to challenge UGT, we add occluders to OOD-CV and create a new dataset called Occluded-OOD-CV (Sec. 4.1). We also test UGT on Imagenet-C corruptions and Synthetic-to-Real domain robustness. Our studies show that UGT performs well on all these tasks and significantly outperforms the SOTA baselines." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 465, + 279, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 465, + 279, + 477 + ], + "spans": [ + { + "bbox": [ + 59, + 465, + 279, + 477 + ], + "type": "text", + "content": "We make several important contributions in this paper." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 479, + 287, + 652 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 48, + 479, + 287, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 479, + 287, + 538 + ], + "spans": [ + { + "bbox": [ + 48, + 479, + 287, + 538 + ], + "type": "text", + "content": "1. We model objects by a generative model on feature vectors. Our method, UGT, extends CompNets [21] by decoupling the learning into unsupervised learning of vMF kernels and supervised learning of the spatial geometry enabling us to learn transitional dictionaries." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 541, + 287, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 541, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 47, + 541, + 287, + 588 + ], + "type": "text", + "content": "2. UGT achieves state-of-the-art results on the real-world OOD robustness problem on the OOD-CV dataset [45] and demonstrates exceptional performance on generalizing under the synthetic corruptions of Imagenet-C." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 590, + 286, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 590, + 286, + 613 + ], + "spans": [ + { + "bbox": [ + 47, + 590, + 286, + 613 + ], + "type": "text", + "content": "3. UGT also achieves strong results for the Synthetic-to-Real scenario (UDAParts [24] to Pascal3d+) dataset." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 616, + 286, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 616, + 286, + 652 + ], + "spans": [ + { + "bbox": [ + 47, + 616, + 286, + 652 + ], + "type": "text", + "content": "4. We introduce the Occluded-OOD-CV dataset by adding occladers to OOD-CV and show that UGT is robust to this compounded problem of occlusion and nuisance." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 668, + 138, + 680 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 668, + 138, + 680 + ], + "spans": [ + { + "bbox": [ + 47, + 668, + 138, + 680 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "OOD robustness can be considered a subset of the larger unsupervised domain adaptation problem and is closely re" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 273, + 545, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 273, + 545, + 356 + ], + "spans": [ + { + "bbox": [ + 304, + 273, + 545, + 356 + ], + "type": "text", + "content": "lated to domain generalization and transfer learning. Although related to both, our work focuses on OOD robustness. Our aim is to generalize well to an unlabelled target domain which is parameterized by real world nuisance factors like weather, shape, pose, texture changes and partial occlusion - which often leads to drastic changes to visual scenes and objects not found in the source dataset." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 357, + 546, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 357, + 546, + 559 + ], + "spans": [ + { + "bbox": [ + 304, + 357, + 546, + 559 + ], + "type": "text", + "content": "In the past few years, there has been an increase in the number of works [9-12, 28] that characterize model performance on OOD data and treat this as a measure of robustness. The common idea that underlies most works is to leverage a property of the unlabeled target domain to allow generalization of a model trained on the source domain. There have been successful efforts to use feature statistics to adapt to the new domain; e.g., Sun et al. [35] try to minimize domain shift by aligning the second-order statistics of source and target distributions; Bug et al. [1] employ feature aware normalization with gating elements from Long Short-Term Memory units for normalization among different spatial regions of interest. Some methods employ techniques based on adaptive batch normalization and weight normalisation [32]. Other methods include self-learning using entropy minimization [38], adaptive pseudo-labeling techniques [5, 14, 33, 34] and robust lost functions [6, 44]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 561, + 545, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 561, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 304, + 561, + 545, + 644 + ], + "type": "text", + "content": "Although, current works have been successful at dealing with robustness problems when evaluated on earlier robustness datasets [9, 11, 12] they have been shown to struggle with real world nuisances (OOD-CV [45]) and occlusion [16, 21]. Few generative Bayesian methods such as CompNets [21, 36, 39] have shown their relative robustness to occlusion, but still struggle with other OOD nuisances." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 656, + 361, + 668 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 656, + 361, + 668 + ], + "spans": [ + { + "bbox": [ + 306, + 656, + 361, + 668 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "We address OOD robustness from a Bayesian perspective which, to the best of our knowledge, is novel. Our starting point is a class of generative models, described in Sec. 3.1," + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22989" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 85, + 70, + 509, + 253 + ], + "blocks": [ + { + "bbox": [ + 85, + 70, + 509, + 253 + ], + "lines": [ + { + "bbox": [ + 85, + 70, + 509, + 253 + ], + "spans": [ + { + "bbox": [ + 85, + 70, + 509, + 253 + ], + "type": "image", + "image_path": "c64f7ca4d5d0801a0959d327d0671933cb3a7fd5d3ef194de6e05ec1403e385e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "lines": [ + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "spans": [ + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": "Figure 2. Rough illustration of our Bayesian method. " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "(\\dashrightarrow, \\dashrightarrow)" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " A DCNN backbone is used to extract the source (IID) " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "F^{\\mathcal{S}}" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " and target (OOD) features " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "F^{\\mathcal{R}}" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": ". The source feature vectors " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "F^{\\mathcal{S}}" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " are then used to learn the source vMF kernels that are then adapted to the transitional vMF kernels using target domain features " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "F^{\\mathcal{R}}" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " and the adaptation coefficients " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "\\psi_{k}" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " in an unsupervised manner. " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "(\\longrightarrow)" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " Transitional Spatial coefficients " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "(A^{\\mathcal{R}})" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " are then learned using the transitional vMF likelihood " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "L^{\\mathcal{R}}" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " i.e. non-linear activation applied to a convolution of " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "F^{\\mathcal{S}}" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " and transitional kernels using source labels. " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "(\\longrightarrow)" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " These spatial coefficients are then finetuned " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "(A^{\\mathcal{R}'})" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " using pseudo-scores " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "\\{\\hat{s}\\}" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " generated using the transitional mixture likelihood " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "E^{\\mathcal{R}}" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " of target domain features " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "F^{\\mathcal{R}}" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "inline_equation", + "content": "(\\longrightarrow)" + }, + { + "bbox": [ + 46, + 261, + 547, + 329 + ], + "type": "text", + "content": " shows the final feedforward pipeline during inference." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 350, + 287, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 350, + 287, + 385 + ], + "spans": [ + { + "bbox": [ + 46, + 350, + 287, + 385 + ], + "type": "text", + "content": "which have been shown to be robust to occlusion [21] when not dealing with other OOD nuisances. We describe method motivation in Sec. 3.2 and the technical details in Sec. 3.3." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 392, + 209, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 392, + 209, + 403 + ], + "spans": [ + { + "bbox": [ + 47, + 392, + 209, + 403 + ], + "type": "text", + "content": "3.1. Bayesian Neural Architecture" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 410, + 287, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 410, + 287, + 468 + ], + "spans": [ + { + "bbox": [ + 46, + 410, + 287, + 468 + ], + "type": "text", + "content": "Our base architecture is similar to CompNets [21] and is explained in this section to help readers unfamiliar with them. Our method extends this class of neural models by non-trivially modifying the training methodology to enable OOD robustness along with occlusion robustness." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "spans": [ + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": "This class of models differs from conventional Deep Networks by replacing the discriminative head by a generative model of feature vectors. For each object " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": " we learn a generative model " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "P(F|y)" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": " for the feature vectors " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": ". This model is formulated as a mixture model " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "P(F|y) = \\sum_{m} P(F|y, m)" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": " where the mixture variable " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": " roughly corresponds to the viewpoint of the object. The conditional distributions " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "P(F|y, m)" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": " for the features are factorizable in terms of position so that " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "P(F|y, m) = \\prod_{a \\in \\mathcal{D}} P(f_a|y, m)" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "a \\in \\mathcal{D}" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": " specifies the position in the image. These distributions " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "P(f_a|y, m)" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": " are specified in terms of von Mises-Fisher (vMF) dictionaries, with parameters " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "\\Lambda = \\{\\sigma_k, \\mu_k\\}" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": " and by spatial coefficients with parameters " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "\\mathcal{A} = \\{\\alpha_{a,k}^{y,m}\\}" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": ". We use the following generative probability distribution for the neural features " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": " conditioned on an object " + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 468, + 288, + 635 + ], + "type": "text", + "content": " [20, 21]:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 639, + 287, + 659 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 639, + 287, + 659 + ], + "spans": [ + { + "bbox": [ + 61, + 639, + 287, + 659 + ], + "type": "interline_equation", + "content": "P (F | y) = \\sum_ {m} P (F | y, m) = \\sum_ {m} \\prod_ {a \\in \\mathcal {D}} P _ {a} \\left(f _ {a} | y, m\\right) P (m), \\tag {1}", + "image_path": "553784bf5cf28a1aec68073188fcd17f00cc54e5bdfbb3c6879b9997d31ac7c9.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 662, + 287, + 684 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 662, + 287, + 684 + ], + "spans": [ + { + "bbox": [ + 62, + 662, + 287, + 684 + ], + "type": "interline_equation", + "content": "P _ {a} \\left(f _ {a} \\mid y, m\\right) = P _ {a} \\left(f _ {a} \\mid \\mathcal {A}, \\Lambda\\right) = \\sum_ {k} \\alpha_ {a, k} ^ {y, m} P \\left(f _ {a} \\mid \\sigma_ {k}, \\mu_ {k}\\right), \\tag {2}", + "image_path": "6c2c5ba0bf86403ff9106525a782bfb0c0631b51d4fcc21f420edb17f95134a0.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 685, + 287, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 685, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 62, + 685, + 287, + 712 + ], + "type": "interline_equation", + "content": "P (f | \\sigma_ {k}, \\mu_ {k}) = \\frac {e ^ {\\sigma_ {k} \\mu_ {k} ^ {T} f}}{Z (\\sigma_ {k})}, \\| f \\| = 1, \\| \\mu_ {k} \\| = 1, \\tag {3}", + "image_path": "fee1522d350602c12425bc459a350b11882e7402bb413d199fb694e2f06b2e6c.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "spans": [ + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "text", + "content": "We typically use 4 mixture components in our method and " + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "inline_equation", + "content": "P(m)" + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "text", + "content": " is an uniform prior over the mixture components. As shown in [21, 39] each vMF kernel can be qualitatively interpreted as a subpart of the object (i.e., all image patches with feature responses close to " + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "inline_equation", + "content": "\\mu_{k}" + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "text", + "content": " look like visually similar object subparts). We use von Mises-Fisher distributions instead of Gaussian distributions because the feature vectors " + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "inline_equation", + "content": "f_{a}" + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "text", + "content": " and the means " + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "inline_equation", + "content": "\\mu_{k}" + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "text", + "content": " must have a unit norm [7, 8]. The spatial coefficients " + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "inline_equation", + "content": "\\mathcal{A} = \\{\\alpha_{a,k}^{y,m}\\}" + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "text", + "content": " specify the probability that the vMF kernel " + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "text", + "content": " occurs at the position " + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "text", + "content": " conditioned on the object " + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "text", + "content": " and its mixture component " + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 306, + 350, + 545, + 481 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 481, + 545, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 481, + 545, + 529 + ], + "spans": [ + { + "bbox": [ + 305, + 481, + 545, + 529 + ], + "type": "text", + "content": "Inference. After learning, inference on an image with feature vectors " + }, + { + "bbox": [ + 305, + 481, + 545, + 529 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 305, + 481, + 545, + 529 + ], + "type": "text", + "content": " is performed by a forward pass which estimates which object is more likely to generate the features " + }, + { + "bbox": [ + 305, + 481, + 545, + 529 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 305, + 481, + 545, + 529 + ], + "type": "text", + "content": " of the input image, " + }, + { + "bbox": [ + 305, + 481, + 545, + 529 + ], + "type": "inline_equation", + "content": "\\hat{y} = \\arg\\max_{y} P(F|y)" + }, + { + "bbox": [ + 305, + 481, + 545, + 529 + ], + "type": "text", + "content": " [21, 36]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 529, + 545, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 529, + 545, + 585 + ], + "spans": [ + { + "bbox": [ + 305, + 529, + 545, + 585 + ], + "type": "text", + "content": "Occlusion modeling. To make the model, described above, robust to occlusion (in non-OOD data), an outlier process is added to allow for some of the image features to be generated by the object and others by a separate outlier process [20, 36]. This is formalised by:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 318, + 586, + 545, + 605 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 586, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 318, + 586, + 545, + 605 + ], + "type": "interline_equation", + "content": "P (F | y) = \\prod_ {a \\in \\mathcal {D}} \\sum_ {m} P _ {a} \\left(f _ {a} | y, m\\right) ^ {z _ {a}} Q \\left(f _ {a}\\right) ^ {1 - z _ {a}} P (m) P \\left(z _ {a}\\right) \\tag {4}", + "image_path": "6ab9e8d73393120e60b657d4f18a66a6d06ab008ef546120b23d89ffc316308a.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "inline_equation", + "content": "Q(f_{a})" + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "text", + "content": " is a vMF distribution for a feature generated by an occluder which can be estimated from non-annotated images [19, 21, 42]. The latent variable " + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "inline_equation", + "content": "z_{a} \\in \\{0,1\\}" + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "text", + "content": " indicates whether pixel " + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "text", + "content": " is occluded or not occluded (" + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "inline_equation", + "content": "z_{a} = \\{1,0\\}" + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "text", + "content": " respectively) and the prior " + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "inline_equation", + "content": "P(z_{a})" + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "text", + "content": " indicates the prior probability of a pixel being occluded. Note that we could also, in theory, sum over " + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 305, + 605, + 545, + 689 + ], + "type": "text", + "content": " (we currently take a max)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "Training CompNets [21, 36, 42] are trained end-to-end to optimize the model parameters " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\Lambda, \\mathcal{A}" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": " using the standard" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "22990" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "content": "supervision for object classification (e.g., the mixture components and the vMF kernels are treated as latent variables). In an OOD scenario the image features no longer correspond well with the learned generative model and without labels, we cannot trivially finetune the model. UGT utilizes an insightful training strategy to solve this problem." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 153, + 264, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 153, + 264, + 166 + ], + "spans": [ + { + "bbox": [ + 47, + 153, + 264, + 166 + ], + "type": "text", + "content": "3.2. Motivation on Generalizing to OOD Data" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 171, + 287, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 171, + 287, + 243 + ], + "spans": [ + { + "bbox": [ + 46, + 171, + 287, + 243 + ], + "type": "text", + "content": "UGT builds upon by the aforementioned Bayesian model because it gives a natural way to formulate an occluder process. These models, however, do not do well on OOD data (Sec. 4). To solve this problem in an unsupervised manner requires reformulation of the training process. We motivate our solution for OOD, UGT, in following stages." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 244, + 289, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 244, + 289, + 472 + ], + "spans": [ + { + "bbox": [ + 46, + 244, + 289, + 472 + ], + "type": "text", + "content": "Firstly, the vMF kernel dictionaries (i.e., the subparts of the object) can be learnt without supervision and hence can be found on both the source (annotated) and the target (non-annotated) domains. Secondly, we observe that some of the vMF kernels are similar between different domains (intuitively some subparts are similar between both domains). Thirdly, we can build on this observation to learn a transitional dictionary, which encourages vMF kernels in both domains to be similar if possible, and which works well in both domains. Fourthly, we note that the spatial coefficients capture the spatial activity pattern of the vMF kernels and these patterns depend on the spatial structure of the objects and so are mostly invariant to the domain, which suggests that we can learn the spatial coefficient on the source domain (where annotations are available), provided we use the transitional dictionary of vMF kernels, and that these spatial coefficients give a good initial estimate for the spatial coefficients on the target domain (which can be improved by simple pseudo-labeling)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "spans": [ + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "text", + "content": "In the first stage we learn the vMF dictionaries " + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "inline_equation", + "content": "\\Lambda" + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "text", + "content": " without supervision by maximum likelihood estimation (MLE) assuming that the feature vectors " + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "inline_equation", + "content": "\\{f_a\\}" + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "text", + "content": " of all the images (and at all positions) in each domain are generated by a mixture of von-Mises-Fisher distributions " + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "inline_equation", + "content": "P(f|\\Lambda) = \\sum_{k} \\pi_k e^{\\sigma_k \\mu_k^T f} / Z[\\sigma_k]" + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "text", + "content": ". This is essentially clustering similar to that used in earlier studies [21, 39]. After the " + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "inline_equation", + "content": "\\Lambda" + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "text", + "content": " are learnt, if annotations are available (i.e., we know the object " + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "text", + "content": ") then we can learn the spatial coefficients " + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "text", + "content": " from the data " + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "inline_equation", + "content": "\\{F_n\\}" + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "text", + "content": " in the annotated (source) domain by MLE from the distribution " + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "inline_equation", + "content": "\\sum_{m} \\prod_{a \\in D} \\sum_{k} \\alpha_{a,k}^{y,m} P(f_a | \\sigma_k, \\mu_k)" + }, + { + "bbox": [ + 46, + 472, + 289, + 606 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 606, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 606, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 606, + 289, + 714 + ], + "type": "text", + "content": "In the second stage, we compare the vMF dictionaries " + }, + { + "bbox": [ + 46, + 606, + 289, + 714 + ], + "type": "inline_equation", + "content": "(\\Lambda^S)" + }, + { + "bbox": [ + 46, + 606, + 289, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 606, + 289, + 714 + ], + "type": "inline_equation", + "content": "(\\Lambda^T)" + }, + { + "bbox": [ + 46, + 606, + 289, + 714 + ], + "type": "text", + "content": " on the source (S) and target (T) domain respectively. We observe that a subset of the dictionary vectors are similar, as measured by cosine similarity in the vMF feature space (Fig. 1). We conjecture that this is because a subset of the vMF kernels, which correspond roughly to object subparts [39], is invariant to the nuisance variables which cause the differences between the domains. For example, for an object like a car or bus, some subparts like" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 96 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 96 + ], + "type": "text", + "content": "wheels and license plates may be very similar between the source and target domains but others may not (Fig. 1)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 96, + 547, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 96, + 547, + 228 + ], + "spans": [ + { + "bbox": [ + 304, + 96, + 547, + 228 + ], + "type": "text", + "content": "These observations motivate us to learn a transitional vMF dictionary " + }, + { + "bbox": [ + 304, + 96, + 547, + 228 + ], + "type": "inline_equation", + "content": "(\\Lambda^{\\mathcal{R}})" + }, + { + "bbox": [ + 304, + 96, + 547, + 228 + ], + "type": "text", + "content": ". This dictionary is learnt by learning the dictionary on the target domain but adding a prior (or regularization constraint) that the dictionary elements in both domains are similar. Finally, we learn the spatial coefficients " + }, + { + "bbox": [ + 304, + 96, + 547, + 228 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 304, + 96, + 547, + 228 + ], + "type": "text", + "content": " on the source domain, but using the transitional dictionary (Sec. 3.3.2). This allows us to utilize object geometry knowledge from the source domain in the target domain. As we show in our experiments and ablation (Sec. 4, Sec. 4.3), this model already works well on the target domain and can be improved by pseudo-labelling techniques." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 235, + 397, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 235, + 397, + 248 + ], + "spans": [ + { + "bbox": [ + 305, + 235, + 397, + 248 + ], + "type": "text", + "content": "3.3. Training UGT" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "spans": [ + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "type": "text", + "content": "Our Bayesian method, UGT, involves 3 steps - 1) primarily, learning transitional dictionary " + }, + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "type": "text", + "content": ", 2) learning transitional spatial coefficients " + }, + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "type": "text", + "content": " using " + }, + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "type": "inline_equation", + "content": "f^S" + }, + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "type": "text", + "content": ", and lastly 3) fin-tuning the transitional parameters " + }, + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "type": "inline_equation", + "content": "(\\Lambda^{\\mathcal{R}},\\mathcal{A}^{\\mathcal{R}})" + }, + { + "bbox": [ + 304, + 253, + 547, + 315 + ], + "type": "text", + "content": " using simple pseudo-labelling. Refer to Fig. 2 for a simple illustration." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 327, + 481, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 327, + 481, + 340 + ], + "spans": [ + { + "bbox": [ + 305, + 327, + 481, + 340 + ], + "type": "text", + "content": "3.3.1 Learning Transitional Dictionary" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "text", + "content": "We initialize the transitional von Mises-Fisher(vMF) dictionary vectors with the learnt source domain vMF dictionary vectors, i.e., " + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}} = \\Lambda^{S}" + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "text", + "content": ". The source domain vMF dictionaries i.e., " + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "inline_equation", + "content": "(\\Lambda^{\\mathcal{S}}(\\mu ,\\sigma))" + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "text", + "content": " are learnt from the features " + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "inline_equation", + "content": "f^{\\mathcal{S}}" + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "text", + "content": " in source domain by MLE as described in Sec. 3.1 using the EM algorithm [39]. We can learn the transitional vMF dictionary parameters " + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "text", + "content": " from the target domain feature vectors " + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "inline_equation", + "content": "f^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "text", + "content": " through a few ways. We can maximize the regularized likelihood shown in Eq. (5) using the EM algorithm used to calculate the source domain parameters. Eq. (5) shows the Bayesian parameterization of our transitional model and can be seen as a penalized or regularized form of maximum likelihood estimation. We penalize the distance between the initialized transitional mean vectors (which are the source parameters) and the learnt ones. This regularization (like others) also helps in avoiding overfitting. Since, we fix " + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "inline_equation", + "content": "\\sigma_{k}" + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "text", + "content": " as constant to reduce computation, the normalization term " + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "inline_equation", + "content": "Z(\\sigma)" + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "text", + "content": " reduces to a constant, and we can derive the penalized log-likelihood term as shown in Eq. (6). " + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 304, + 346, + 547, + 567 + ], + "type": "text", + "content": " is a adaptation parameter discussed later." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 572, + 545, + 620 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 572, + 545, + 620 + ], + "spans": [ + { + "bbox": [ + 309, + 572, + 545, + 620 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} p \\left(f ^ {\\mathcal {R}} \\mid \\Lambda^ {\\mathcal {R}}\\right) = \\prod_ {n} \\sum_ {k} \\alpha_ {k} P \\left(f _ {a} \\mid \\sigma_ {k}, \\mu_ {k}\\right) \\\\ \\exp \\left(- \\psi_ {k} \\sum_ {k} \\left(\\left| \\left| \\mu_ {k} - \\mu_ {k} ^ {S} \\right| \\right|\\right)\\right) \\tag {5} \\\\ \\end{array}", + "image_path": "229ed0c17f01215c95bc966dbeaff3ac29451e26ed723ffe59d5adb93a72e572.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 623, + 545, + 673 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 623, + 545, + 673 + ], + "spans": [ + { + "bbox": [ + 310, + 623, + 545, + 673 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} l \\left(\\Lambda^ {\\mathcal {R}}\\right) = \\sum^ {n} \\log \\left(\\sum^ {k} \\pi_ {k} \\frac {e ^ {\\sigma_ {k} \\mu_ {k} ^ {T} f _ {i}}}{Z \\left(\\sigma_ {k}\\right)}\\right) - \\psi_ {k} \\sum^ {n} \\sum^ {k} \\left(\\left| \\left| \\mu_ {k} - \\mu_ {k} ^ {\\mathcal {S}} \\right| \\right|\\right) \\tag {6} \\\\ \\left| \\left| f \\right| \\right| = 1, \\left| \\left| \\mu_ {k} \\right| \\right| = 1, \\sigma = 1 \\Longrightarrow Z (\\sigma) = c o n s t. \\\\ \\end{array}", + "image_path": "cf84e2eb52a89072593d13584c2e482ea65a0502b2d33886973583c0e9408f65.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 679, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 679, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 679, + 547, + 714 + ], + "type": "text", + "content": "The Expectation step for learning the transitional parameters is similar to the source version. In the first step, we calculate the summary statistics for the transitional parameters" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22991" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 249, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 249, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 249, + 84 + ], + "type": "text", + "content": "Algorithm 1 Unsupervised Generative Transition" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 87, + 545, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 87, + 545, + 114 + ], + "spans": [ + { + "bbox": [ + 53, + 87, + 545, + 114 + ], + "type": "text", + "content": "1: Input: Set of source domain images " + }, + { + "bbox": [ + 53, + 87, + 545, + 114 + ], + "type": "inline_equation", + "content": "I^S = \\{I_1^S, \\dots, I_n^S\\}" + }, + { + "bbox": [ + 53, + 87, + 545, + 114 + ], + "type": "text", + "content": ", target domain images " + }, + { + "bbox": [ + 53, + 87, + 545, + 114 + ], + "type": "inline_equation", + "content": "I^T = \\{I_1^T, \\dots, I_N^T\\}" + }, + { + "bbox": [ + 53, + 87, + 545, + 114 + ], + "type": "text", + "content": ", source domain labels " + }, + { + "bbox": [ + 53, + 87, + 545, + 114 + ], + "type": "inline_equation", + "content": "y = \\{y_1^S, \\dots, y_n^S\\}" + }, + { + "bbox": [ + 53, + 87, + 545, + 114 + ], + "type": "text", + "content": ", deep network backbone " + }, + { + "bbox": [ + 53, + 87, + 545, + 114 + ], + "type": "inline_equation", + "content": "\\Gamma(., \\zeta)" + }, + { + "bbox": [ + 53, + 87, + 545, + 114 + ], + "type": "text", + "content": ", background images " + }, + { + "bbox": [ + 53, + 87, + 545, + 114 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_{i=1}^r" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 114, + 378, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 114, + 378, + 125 + ], + "spans": [ + { + "bbox": [ + 53, + 114, + 378, + 125 + ], + "type": "text", + "content": "2: Output: Target domain model parameters " + }, + { + "bbox": [ + 53, + 114, + 378, + 125 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = (\\mathcal{A},\\Lambda)" + }, + { + "bbox": [ + 53, + 114, + 378, + 125 + ], + "type": "text", + "content": ", background model " + }, + { + "bbox": [ + 53, + 114, + 378, + 125 + ], + "type": "inline_equation", + "content": "\\beta_{r}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 125, + 203, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 203, + 137 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 203, + 137 + ], + "type": "text", + "content": "3: procedure UGT " + }, + { + "bbox": [ + 53, + 125, + 203, + 137 + ], + "type": "inline_equation", + "content": "(I^{\\mathcal{S}}, I^T, y, \\Gamma, \\beta_r)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 137, + 234, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 234, + 148 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 234, + 148 + ], + "type": "text", + "content": "4: " + }, + { + "bbox": [ + 53, + 137, + 234, + 148 + ], + "type": "inline_equation", + "content": "\\{F^{\\mathcal{S}}\\} ,\\{F^{\\mathcal{R}}\\} \\longleftarrow \\Gamma ((\\{I^{\\mathcal{S}}\\} ,\\{I^T\\}),\\zeta)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 148, + 221, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 148, + 221, + 160 + ], + "spans": [ + { + "bbox": [ + 53, + 148, + 221, + 160 + ], + "type": "text", + "content": "5: \\(\\Lambda^{\\mathcal{S}}(\\mu_k)\\gets\\) cluster & MLE(\\({\\cal F}^{\\mathcal{S}}\\})" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 160, + 179, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 160, + 179, + 172 + ], + "spans": [ + { + "bbox": [ + 53, + 160, + 179, + 172 + ], + "type": "text", + "content": "6: " + }, + { + "bbox": [ + 53, + 160, + 179, + 172 + ], + "type": "inline_equation", + "content": "\\Lambda_{initial}^{\\mathcal{R}}(\\mu) \\longleftarrow \\Lambda^{\\mathcal{S}}(\\mu_k)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 172, + 237, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 172, + 237, + 185 + ], + "spans": [ + { + "bbox": [ + 53, + 172, + 237, + 185 + ], + "type": "text", + "content": "7: " + }, + { + "bbox": [ + 53, + 172, + 237, + 185 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}}(\\tilde{\\mu})\\longleftarrow \\mathrm{MLE}(F^T,\\Delta (\\psi ,\\Lambda^{\\mathcal{S}},\\Lambda^{\\mathcal{R}}))" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 195, + 275, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 195, + 275, + 211 + ], + "spans": [ + { + "bbox": [ + 53, + 195, + 275, + 211 + ], + "type": "text", + "content": "8: " + }, + { + "bbox": [ + 53, + 195, + 275, + 211 + ], + "type": "inline_equation", + "content": "\\{L^{\\mathcal{R}}\\} \\longleftarrow \\sum_{k}\\pi_{k}e^{\\sigma_{k}\\mu_{k}^{T}f^{\\mathcal{S}}} / Z[\\sigma_{k}](F*\\Lambda^{\\mathcal{R}}(\\mu_{k}))" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 220, + 236, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 220, + 236, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 220, + 236, + 235 + ], + "type": "text", + "content": "9: " + }, + { + "bbox": [ + 53, + 220, + 236, + 235 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathcal{R}}_{y_s,m}\\longleftrightarrow" + }, + { + "bbox": [ + 53, + 220, + 236, + 235 + ], + "type": "text", + "content": " cluster&MLE " + }, + { + "bbox": [ + 53, + 220, + 236, + 235 + ], + "type": "inline_equation", + "content": "(\\{L^{\\mathcal{R}}\\} ,y_S)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 243, + 240, + 284 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 50, + 243, + 214, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 243, + 214, + 257 + ], + "spans": [ + { + "bbox": [ + 50, + 243, + 214, + 257 + ], + "type": "text", + "content": "10: " + }, + { + "bbox": [ + 50, + 243, + 214, + 257 + ], + "type": "inline_equation", + "content": "y_{\\hat{T}} \\longleftarrow \\operatorname{argmax}_{y} P(F|\\Lambda^{\\mathcal{R}}, \\mathcal{A}^{\\mathcal{R}})" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 257, + 240, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 257, + 240, + 271 + ], + "spans": [ + { + "bbox": [ + 50, + 257, + 240, + 271 + ], + "type": "text", + "content": "11: " + }, + { + "bbox": [ + 50, + 257, + 240, + 271 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{y_{\\hat{T},m}}^{\\mathcal{R}'} \\longleftrightarrow \\text{cluster&MLE}(\\{L^{\\mathcal{R}}\\}, y_{\\hat{T}})" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 271, + 233, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 271, + 233, + 284 + ], + "spans": [ + { + "bbox": [ + 50, + 271, + 233, + 284 + ], + "type": "text", + "content": "12: " + }, + { + "bbox": [ + 50, + 271, + 233, + 284 + ], + "type": "inline_equation", + "content": "\\mathcal{T}\\longleftarrow" + }, + { + "bbox": [ + 50, + 271, + 233, + 284 + ], + "type": "text", + "content": " optimize " + }, + { + "bbox": [ + 50, + 271, + 233, + 284 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{\\mathrm{gce}} + \\psi_v\\mathcal{L} + \\psi_\\alpha \\mathcal{L})" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 284, + 129, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 284, + 129, + 295 + ], + "spans": [ + { + "bbox": [ + 50, + 284, + 129, + 295 + ], + "type": "text", + "content": "13: end procedure" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 294, + 137, + 545, + 284 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 294, + 137, + 543, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 137, + 543, + 148 + ], + "spans": [ + { + "bbox": [ + 294, + 137, + 543, + 148 + ], + "type": "text", + "content": "> Extract source & target featuremaps from DCNN backbone" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 294, + 148, + 545, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 148, + 545, + 160 + ], + "spans": [ + { + "bbox": [ + 294, + 148, + 545, + 160 + ], + "type": "inline_equation", + "content": "\\triangleright" + }, + { + "bbox": [ + 294, + 148, + 545, + 160 + ], + "type": "text", + "content": " Initialize source vMF kernels by kmeans & learn using MLE" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 294, + 161, + 545, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 161, + 545, + 172 + ], + "spans": [ + { + "bbox": [ + 294, + 161, + 545, + 172 + ], + "type": "inline_equation", + "content": "\\triangleright" + }, + { + "bbox": [ + 294, + 161, + 545, + 172 + ], + "type": "text", + "content": " Initialise transitional vMF kernels with source vMF kernels" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 294, + 172, + 545, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 172, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 294, + 172, + 545, + 196 + ], + "type": "inline_equation", + "content": "\\triangleright" + }, + { + "bbox": [ + 294, + 172, + 545, + 196 + ], + "type": "text", + "content": " Learn transitional vMF features using regularized MLE with target domain data (Sec. 3.3.1, Eq. (5)-Eq. (9))" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 294, + 198, + 545, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 198, + 545, + 220 + ], + "spans": [ + { + "bbox": [ + 294, + 198, + 545, + 220 + ], + "type": "inline_equation", + "content": "\\triangleright" + }, + { + "bbox": [ + 294, + 198, + 545, + 220 + ], + "type": "text", + "content": " Compute regularized transitional vMF likelihood with source featuremaps and transitional vMF kernels" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 294, + 221, + 545, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 221, + 545, + 244 + ], + "spans": [ + { + "bbox": [ + 294, + 221, + 545, + 244 + ], + "type": "inline_equation", + "content": "\\triangleright" + }, + { + "bbox": [ + 294, + 221, + 545, + 244 + ], + "type": "text", + "content": " Calculate spatial coefficients using transitional vMF likelihood and source feature vectors (Sec. 3.3.2)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 294, + 245, + 533, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 245, + 533, + 256 + ], + "spans": [ + { + "bbox": [ + 294, + 245, + 533, + 256 + ], + "type": "inline_equation", + "content": "\\triangleright" + }, + { + "bbox": [ + 294, + 245, + 533, + 256 + ], + "type": "text", + "content": " Pseudo-label target domain data using transitional model" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 294, + 258, + 536, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 258, + 536, + 270 + ], + "spans": [ + { + "bbox": [ + 294, + 258, + 536, + 270 + ], + "type": "inline_equation", + "content": "\\triangleright" + }, + { + "bbox": [ + 294, + 258, + 536, + 270 + ], + "type": "text", + "content": " Finetune spatial coefficients using pseudolabelled data " + }, + { + "bbox": [ + 294, + 258, + 536, + 270 + ], + "type": "inline_equation", + "content": "y_{\\hat{T}}" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 294, + 271, + 517, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 271, + 517, + 284 + ], + "spans": [ + { + "bbox": [ + 294, + 271, + 517, + 284 + ], + "type": "inline_equation", + "content": "\\triangleright" + }, + { + "bbox": [ + 294, + 271, + 517, + 284 + ], + "type": "text", + "content": " Optionally, finetune entire model using " + }, + { + "bbox": [ + 294, + 271, + 517, + 284 + ], + "type": "inline_equation", + "content": "y_{\\hat{T}}" + }, + { + "bbox": [ + 294, + 271, + 517, + 284 + ], + "type": "text", + "content": " (Eq. (11))" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 319, + 271, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 319, + 271, + 332 + ], + "spans": [ + { + "bbox": [ + 47, + 319, + 271, + 332 + ], + "type": "text", + "content": "using the new data. For posterior probability defined as" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 100, + 335, + 287, + 361 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 335, + 287, + 361 + ], + "spans": [ + { + "bbox": [ + 100, + 335, + 287, + 361 + ], + "type": "interline_equation", + "content": "P (k \\mid f _ {i}, \\Lambda) = \\frac {\\pi_ {k} p \\left(f _ {i} \\mid \\mu_ {k} , \\sigma_ {k}\\right)}{\\sum^ {K} \\pi_ {k} p \\left(f _ {i} \\mid \\mu_ {k} , \\sigma_ {k}\\right)} \\tag {7}", + "image_path": "eeca18c3a7a2942c3ca3b22a4342396b4b61b90a3321c4bffef1020727e78014.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 46, + 365, + 287, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 287, + 399 + ], + "type": "text", + "content": "for the " + }, + { + "bbox": [ + 46, + 365, + 287, + 399 + ], + "type": "inline_equation", + "content": "k^{th}" + }, + { + "bbox": [ + 46, + 365, + 287, + 399 + ], + "type": "text", + "content": " mixture and where " + }, + { + "bbox": [ + 46, + 365, + 287, + 399 + ], + "type": "inline_equation", + "content": "p(f|\\mu_k,\\sigma_k)" + }, + { + "bbox": [ + 46, + 365, + 287, + 399 + ], + "type": "text", + "content": " is defined in Eq. (3), we update the mixture parameters in the maximization step in a regularized manner as follows," + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 82, + 404, + 287, + 432 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 404, + 287, + 432 + ], + "spans": [ + { + "bbox": [ + 82, + 404, + 287, + 432 + ], + "type": "interline_equation", + "content": "\\hat {\\pi} _ {k} = \\nu \\left[ \\psi_ {k} ^ {\\pi} \\frac {1}{n} \\sum_ {i = 1} ^ {n} P \\left(k \\mid f _ {i}, \\Lambda\\right) + \\left(1 - \\psi_ {k} ^ {\\pi}\\right) \\pi_ {k} ^ {S} \\right] \\tag {8}", + "image_path": "36df217e15f1747e50e087f424ee93f471a958180336eccd6484249dad6ae68b.jpg" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 83, + 434, + 287, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 434, + 287, + 449 + ], + "spans": [ + { + "bbox": [ + 83, + 434, + 287, + 449 + ], + "type": "interline_equation", + "content": "\\hat {\\mu} _ {k} = \\psi_ {k} ^ {\\mu} \\mathcal {E} _ {k} + \\left(1 - \\psi_ {k} ^ {\\mu}\\right) \\mu_ {k} ^ {S} \\tag {9}", + "image_path": "4d780c83ac6869646e5ea135beacf30b970396cfa66f28a8ad337dbb7c993c41.jpg" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "spans": [ + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_k" + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": " is the first moment or mean of the " + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "inline_equation", + "content": "k^{\\text{th}}" + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": " mixture calculated on the new data, " + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "inline_equation", + "content": "\\nu" + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": " is a scaling parameter to ensure that " + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "inline_equation", + "content": "\\sum_{k} \\pi_{k} = 1" + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "inline_equation", + "content": "\\psi_{k}" + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": " is an adaptation coefficient which is defined for each parameter and mixture. It can be defined in a data-dependent manner [29], i.e., " + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "inline_equation", + "content": "\\psi_{k}^{\\mu, \\pi} = \\left( \\frac{\\omega_{k}}{P(k|f_{i},\\Lambda)} + 1 \\right)^{-1}" + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "inline_equation", + "content": "w_{k}" + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": " is an empirically set hyperparameter which controls the adaptation emphasis between source and transitional parameters. Empirically, we observed that the adaptation coefficient is not very sensitive to changes to its value and therefore, we increase it monotonically during the EM iterations. A " + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "inline_equation", + "content": "\\psi_{k}" + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": " for a specific vMF kernel " + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "inline_equation", + "content": "\\mu_{k}" + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": " at time-step " + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}}" + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": " stabilizes if the change in its likelihood component is below a threshold value over the previous EM iteration step t-1 and then " + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "inline_equation", + "content": "\\psi_{k}" + }, + { + "bbox": [ + 46, + 452, + 287, + 657 + ], + "type": "text", + "content": " value. We find that only using the parameter update works well. For simpler datasets, even directly learning the transitional dictionary would suffice." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 47, + 670, + 259, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 259, + 684 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 259, + 684 + ], + "type": "text", + "content": "3.3.2 Learning Transitional Spatial Coefficients" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "content": "After learning " + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}}" + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "content": ", we use it to estimate the transitional spatial coefficients " + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "(\\mathcal{A}^{\\mathcal{R}}(\\alpha))" + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "content": " using the labeled source domain" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 305, + 319, + 545, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 319, + 545, + 356 + ], + "spans": [ + { + "bbox": [ + 305, + 319, + 545, + 356 + ], + "type": "text", + "content": "features " + }, + { + "bbox": [ + 305, + 319, + 545, + 356 + ], + "type": "inline_equation", + "content": "f^S" + }, + { + "bbox": [ + 305, + 319, + 545, + 356 + ], + "type": "text", + "content": " (using MLE). The spatial coefficients represent the expected activation of a calculated vMF kernel " + }, + { + "bbox": [ + 305, + 319, + 545, + 356 + ], + "type": "inline_equation", + "content": "\\mu_k" + }, + { + "bbox": [ + 305, + 319, + 545, + 356 + ], + "type": "text", + "content": " at a position " + }, + { + "bbox": [ + 305, + 319, + 545, + 356 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 305, + 319, + 545, + 356 + ], + "type": "text", + "content": " in the feature map for a specific class " + }, + { + "bbox": [ + 305, + 319, + 545, + 356 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 305, + 319, + 545, + 356 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 311, + 365, + 545, + 389 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 365, + 545, + 389 + ], + "spans": [ + { + "bbox": [ + 311, + 365, + 545, + 389 + ], + "type": "interline_equation", + "content": "P _ {a} \\left(f _ {a} \\mid y _ {s}, m; \\mathcal {A} ^ {\\mathcal {R}}, \\Lambda^ {R}\\right) = \\sum_ {k} \\alpha_ {a, k} ^ {y _ {s}, m} P \\left(f _ {a} \\mid \\Lambda^ {\\mathcal {R}} \\left(\\sigma_ {k}, \\mu_ {k}\\right)\\right) \\tag {10}", + "image_path": "d053df8b149bcad9482afe8cfb2c466fc36474a96c9b9c76ce3afee027de6abf.jpg" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "spans": [ + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "text", + "content": "We can leverage the learnt transitional vMF kernel dictionary " + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "text", + "content": " to learn spatial coefficients " + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathcal{R}}(\\alpha)" + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "text", + "content": " which represent the spatial relationships of the vMF dictionary vectors over the source domain data " + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "inline_equation", + "content": "D_{S}" + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "text", + "content": ". As these spatial coefficients " + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "text", + "content": " are conditioned on " + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "text", + "content": ", they also correspond to parts of target domain features even when they are learned using " + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "inline_equation", + "content": "f^{S}" + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "text", + "content": ", thus creating a transitional model with parameters " + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "inline_equation", + "content": "(\\Lambda^{\\mathcal{R}},\\mathcal{A}^{\\mathcal{R}})" + }, + { + "bbox": [ + 304, + 398, + 545, + 494 + ], + "type": "text", + "content": " that we can use to classify target domain data." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "text", + "content": "This combination of conditioned transitional vMF dictionary " + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "inline_equation", + "content": "(\\Lambda^{\\mathcal{R}})" + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "text", + "content": " and spatial coefficients " + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "inline_equation", + "content": "(\\mathcal{A}^{\\mathcal{R}})" + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "text", + "content": " can be leveraged to label a subset of target domain features, especially since we can focus on the subset of transitional vMF kernels " + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "inline_equation", + "content": "(\\Lambda^{\\mathcal{R}})" + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "text", + "content": " which are similar to their source counterparts. We can use these pseudo labeled feature vectors " + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "inline_equation", + "content": "(y_{\\hat{T}})" + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "text", + "content": ", along with " + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "text", + "content": " to finetune the current spatial coefficients " + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "text", + "content": " which leads to improved spatial coefficients " + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathcal{R}'}" + }, + { + "bbox": [ + 304, + 495, + 545, + 590 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "content": "Finetuning spatial coefficients. Transitional spatial coefficients " + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "inline_equation", + "content": "(\\mathcal{A}^{\\mathcal{R}})" + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "content": " are initialized with the values describing the expected activation of transitional vMF dictionary vectors " + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}}(\\mu_k)" + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "content": " for the source data features " + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "inline_equation", + "content": "f^{\\mathcal{S}}" + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "content": " at a position " + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "content": " on a feature map " + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "inline_equation", + "content": "f_{a}" + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "content": ". Subsequently, we finetune these spatial coefficients " + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "content": " using a subset of target domain images that present high activations for the robust set of transitional vMF dictionary vectors " + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "content": ". Optionally, we can also finetune " + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\Lambda^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "content": " by relearning them without any initialization and" + } + ] + } + ], + "index": 37 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "22992" + } + ] + } + ], + "index": 38 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "type": "text", + "content": "regularization constraints. Although our model is trained by partitioning into two parts, it is still fully differentiable and trainable from end to end [20, 36, 42]. We use this model property to finetune the entire model. The loss function (Eq. (11)) consists of a generalized cross entropy [44] term calculated using the model predictions and two regularization parameters for the vMF dictionary and the spatial coefficient parameters. This is to encourage the vMF clusters to be similar to the feature vectors " + }, + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "type": "inline_equation", + "content": "f_{a}" + }, + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "type": "text", + "content": ". In Eq. (11), " + }, + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "type": "inline_equation", + "content": "\\zeta_{\\{v,\\alpha\\}}" + }, + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "type": "text", + "content": " represent the trade-off hyperparameters of the regularizing loss terms," + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 209, + 287, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 209, + 287, + 224 + ], + "spans": [ + { + "bbox": [ + 61, + 209, + 287, + 224 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {\\mathrm {g c e}} \\left(y _ {p r e d}, y _ {\\hat {T}}\\right) + \\zeta_ {v} \\mathcal {L} (F, \\Lambda) + \\zeta_ {\\alpha} \\mathcal {L} (F, \\mathcal {A}), \\tag {11}", + "image_path": "e2b140486c21e897671e425799aada91aab8ded55e6a5f4d7ebb5401a0dfba65.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "spans": [ + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "content": "For a constant vMF variance " + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "inline_equation", + "content": "\\sigma_{k}" + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "content": " (which also reduces the normalisation term to a constant) and assuming hard assignment of features " + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "inline_equation", + "content": "f_{a}" + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "content": " to vMF dictionary clusters[21]," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 271, + 287, + 291 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 271, + 287, + 291 + ], + "spans": [ + { + "bbox": [ + 55, + 271, + 287, + 291 + ], + "type": "interline_equation", + "content": "\\mathcal {L} \\left(F, \\Lambda^ {\\mathcal {R}}\\right) = - \\sum_ {a} \\max _ {k} \\log p \\left(f _ {a} \\mid \\Lambda^ {\\mathcal {R}} \\left(\\mu_ {k}\\right)\\right) \\tag {12}", + "image_path": "9c70cfd78a49e7b81006286a65aaa1de8d153e808799afb731e28bb94e62fde0.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 293, + 287, + 315 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 293, + 287, + 315 + ], + "spans": [ + { + "bbox": [ + 55, + 293, + 287, + 315 + ], + "type": "interline_equation", + "content": "\\mathcal {L} \\left(F, \\mathcal {A} ^ {\\mathcal {R} ^ {\\prime}}\\right) = - \\sum_ {a} \\left(1 - z _ {a}\\right) \\log \\left[ \\sum_ {k} \\alpha_ {a, k} ^ {y _ {\\hat {T}}, m} p \\left(f _ {a} \\mid \\Lambda^ {\\mathcal {R}} \\left(\\mu_ {k}\\right)\\right) \\right] \\tag {13}", + "image_path": "6ff50c9e65be83a4ba3b65903e9609ca3c13acc17b32be1a869d287ba1727c06.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 323, + 256, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 323, + 256, + 335 + ], + "spans": [ + { + "bbox": [ + 47, + 323, + 256, + 335 + ], + "type": "text", + "content": "Latent variable " + }, + { + "bbox": [ + 47, + 323, + 256, + 335 + ], + "type": "inline_equation", + "content": "z_{a}\\in \\{0,1\\}" + }, + { + "bbox": [ + 47, + 323, + 256, + 335 + ], + "type": "text", + "content": " is explained in Sec. 3.1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 345, + 128, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 345, + 128, + 358 + ], + "spans": [ + { + "bbox": [ + 47, + 345, + 128, + 358 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 365, + 289, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 289, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 289, + 521 + ], + "type": "text", + "content": "Our experiments evaluate robustness of vision classification models in an extended out-of-domain setup i.e., generalizing to target domains with individual nuisance factors and partial occlusion. This allows us to thoroughly evaluate the efficacy of current methods which have been shown to perform well on other OOD robustness datasets on OOD-CV[45] (which enables a systematic analysis of nuisances on real-world data), Occluded-OOD-CV (which allows us to evaluate models on a combination of partial occlusion with individual nuisances) and Imagenet-C corruptions (for analysis of synthetic corruptions). Lastly, we also show some initial results on Synthetic to Real OOD robustness using the UDAParts [24] dataset." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 527, + 143, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 143, + 540 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 143, + 540 + ], + "type": "text", + "content": "4.1. Setup and Data" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 545, + 287, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 287, + 617 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 287, + 617 + ], + "type": "text", + "content": "Datasets. For primary evaluation, we use the OOD-CV [45] dataset. OOD-CV dataset consists of test subcategories which vary from the training data in terms of a main nuisance factor, namely, context, weather, texture, pose and shape. We use " + }, + { + "bbox": [ + 46, + 545, + 287, + 617 + ], + "type": "inline_equation", + "content": "L0" + }, + { + "bbox": [ + 46, + 545, + 287, + 617 + ], + "type": "text", + "content": " for the (0%) occlusion level to represent this data setup in Tab. 1 and Supplementary Sec. B." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 618, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 288, + 713 + ], + "type": "text", + "content": "Occluded-OOD-CV. In addition to OOD-CV, we experiment with a more complex robustness analysis setup involving partial occlusion. In this setup, models that have been adapted in an unsupervised manner to target domains with nuisance factors are then evaluated on data with partial occlusion in addition to the real-world nuisances. For this purpose, we create a dataset named Occluded-OOD-CV where we superimpose occluders on the OOD-CV test" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": "images objects in order to approximate real-life occlusion. These occluders have been cropped from the MS-COCO dataset, similar to [20] and are superimposed on objects in the OOD-CV test set. There are three levels of partial occlusions - " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "L1(20 - 40\\%)" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "L2(40 - 60\\%)" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "L3(60 - 80\\%)" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " which allows us to diversely analyze the occlusion robustness of the model (in addition to individual nuisance factors). Fig. 3 shows some example images from our dataset. Previous works [18, 21] have shown that using cropped occluders, as done in Occluded-OOD-CV, is akin to the use of real occluders for classification evaluation. We also use" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 308, + 211, + 392, + 264 + ], + "blocks": [ + { + "bbox": [ + 308, + 211, + 392, + 264 + ], + "lines": [ + { + "bbox": [ + 308, + 211, + 392, + 264 + ], + "spans": [ + { + "bbox": [ + 308, + 211, + 392, + 264 + ], + "type": "image", + "image_path": "247b46dcecdd1129006577d352d0238e6c1a830e35f3d27817fe3e06676027ae.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 399, + 213, + 471, + 268 + ], + "blocks": [ + { + "bbox": [ + 399, + 213, + 471, + 268 + ], + "lines": [ + { + "bbox": [ + 399, + 213, + 471, + 268 + ], + "spans": [ + { + "bbox": [ + 399, + 213, + 471, + 268 + ], + "type": "image", + "image_path": "3e8b036d4dfbd44b03042a2f86d67ea5b222ece38e72c7102cadbc8063546a53.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 482, + 213, + 533, + 266 + ], + "blocks": [ + { + "bbox": [ + 482, + 213, + 533, + 266 + ], + "lines": [ + { + "bbox": [ + 482, + 213, + 533, + 266 + ], + "spans": [ + { + "bbox": [ + 482, + 213, + 533, + 266 + ], + "type": "image", + "image_path": "350cbe9ac78a261ffb8acc50be5b0ef545f2c17cff9365cbf4fc3c64ac59bc08.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 309, + 267, + 380, + 320 + ], + "blocks": [ + { + "bbox": [ + 309, + 267, + 380, + 320 + ], + "lines": [ + { + "bbox": [ + 309, + 267, + 380, + 320 + ], + "spans": [ + { + "bbox": [ + 309, + 267, + 380, + 320 + ], + "type": "image", + "image_path": "7aee95b83785551127aae01db6a52fea3e177fdc028df72f2e8716beba8b022a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 321, + 544, + 333 + ], + "lines": [ + { + "bbox": [ + 306, + 321, + 544, + 333 + ], + "spans": [ + { + "bbox": [ + 306, + 321, + 544, + 333 + ], + "type": "text", + "content": "Context (60-80%) Weather (20-40%) Texture (40-60%)" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 386, + 272, + 473, + 319 + ], + "blocks": [ + { + "bbox": [ + 386, + 272, + 473, + 319 + ], + "lines": [ + { + "bbox": [ + 386, + 272, + 473, + 319 + ], + "spans": [ + { + "bbox": [ + 386, + 272, + 473, + 319 + ], + "type": "image", + "image_path": "10b86bdb032d1a58917c7bbe15b8a8370803129ac613383e8e00f4b2911825b2.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 342, + 545, + 365 + ], + "lines": [ + { + "bbox": [ + 304, + 342, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 545, + 365 + ], + "type": "text", + "content": "Figure 3. Occluded-OOD-CV dataset examples. Each object category is identified by its nuisance factor and occlusion percentage" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 478, + 267, + 543, + 319 + ], + "blocks": [ + { + "bbox": [ + 478, + 267, + 543, + 319 + ], + "lines": [ + { + "bbox": [ + 478, + 267, + 543, + 319 + ], + "spans": [ + { + "bbox": [ + 478, + 267, + 543, + 319 + ], + "type": "image", + "image_path": "f0b40b433831a803979e805e5df8d1ae295722fae4eeaba261a3c90aa992f2a9.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 378, + 545, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 378, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 304, + 378, + 545, + 426 + ], + "type": "text", + "content": "Imagenet-C[9] corruptions in the Pascal3D+ dataset for robustness evaluation with conventionally used synthetic corruptions. We also evaluate models in a synthetic (UDAParts [24]) to real data (Pascal3D+ [41]) setup." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 426, + 545, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 426, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 426, + 545, + 533 + ], + "type": "text", + "content": "In summary, we have 5 different real world nuisance data subcategories (context, weather, texture, pose, shape), at least seven synthetic corruption categories (fog, pixelate, motion blur, etc.), one synthetic source dataset and 4 partial occlusion levels (including no occlusion) for each experiment. We also run experiments on all the combined nuisance subcategories (Tab. 1). So, in total we have 24 sets of data and experiments for our (extended) OOD robustness setup on the OOD-CV dataset alone." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 534, + 545, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 545, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 545, + 617 + ], + "type": "text", + "content": "Models. We compare our work with our baseline method CompNets [21], other well known recent works [30, 32] which have been shown to be SOTA on various robustness datasets [9, 11, 12] as well as many well-known UDA methods [3, 13, 15, 22, 23, 25-27, 40, 43]. We focus on VGG16 and Resnet-50 backbones as they have been commonly used in most current methods[20, 30, 32, 44]." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 618, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 618, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 618, + 545, + 713 + ], + "type": "text", + "content": "Training Setup. All models are trained on the source data with corresponding labels. Models can access some unlabeled nuisance (target) data, which could be a single nuisance (OOD-CV, Imagenet-C), combined nuisances (Tab. 1) or real data (when source data are synthetic). Models do not have access to images with partial occlusion at any time, and partially occluded images are only used for inference. We also avoid using different types of data aug-" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22993" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 91, + 541, + 500 + ], + "blocks": [ + { + "bbox": [ + 59, + 71, + 534, + 82 + ], + "lines": [ + { + "bbox": [ + 59, + 71, + 534, + 82 + ], + "spans": [ + { + "bbox": [ + 59, + 71, + 534, + 82 + ], + "type": "text", + "content": "Table 1. OOD-CV Nuisances Top-1 Classification Results. Occlusion levels greater than " + }, + { + "bbox": [ + 59, + 71, + 534, + 82 + ], + "type": "inline_equation", + "content": "0\\%" + }, + { + "bbox": [ + 59, + 71, + 534, + 82 + ], + "type": "text", + "content": " represent Occluded-OOD-CV dataset." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 91, + 541, + 500 + ], + "lines": [ + { + "bbox": [ + 53, + 91, + 541, + 500 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 541, + 500 + ], + "type": "table", + "html": "
MethodCombinedContextWeather
\\(Occlusion\\rightarrow\\)0%20-40%40-60%60-80%0%20-40%40-60%60-80%0%20-40%40-60%60-80%
CDAN [25]**.760.531.420.380.710.541.436.397.745.476.335.299
BSP [2]**.753.506.401.351.610.511.419.385.730.391.266.254
MDD [43]**.780.551469.410.761.531.436.410.802.439.306.271
MCD [31]**.772.556.461.403.798.523.426.374.810.447.336.286
MCC [15]**.785.582.492.434.730.577.454.420.767.503.376.362
FixBi [27]**.821.534.478.399.802.542.445.409.755.489.358.335
MIC [13]**.837.540.376.262.755.602.532.499.817.612.496.427
ToAlign [40]**.761.507.411.346.712.501.393.382.720.381.252.213
CST [23]**.840.579.539.477.687.491.452.411.813.558.397.356
DUA [26]**.699.523.480.403.667.471.434.401.701.465.391.210
DINE [22]**.835.600.493.443.867.515.418.397.798.423.290.261
RPL [30].664.430.346.300.675.457.368.315.642.247.138.122
BNA [32].653.426.343.298.580.397.342.278.635.295.179.171
CompNet [21].720.506.462.415.790.517.454.369.683.434.398.362
UGT (Ours).850.620.570.501.875.624.565.511.856.600.528.465
TexturePoseShape
CDAN [25]**.820.532.420.364.844.620.521.450.773.561.491.441
BSP [2]**.696.444.384.315.831.610.510.423.757.535.485.434
MDD [43]**.895.518.427.400.870.611.534.469.836.541.459.386
MCD [31]**.896.522.432.392.865.623.532.471.834.538.456.397
MCC [15]**.874.671.547.495.867.611.521.460.818.601.524.460
FixBi [27]**.854.574.445.369.842.533.472.446.801.500.435.373
MIC [13]**.821.706.631.576.799.613.509.455.807.608.565.467
ToAlign [40]**.594.413.312.273.788.574.503.418.719.548.460.391
CST [23]**.858.657.538.477.887.617.525.451.831.617.495.441
DUA [26]**.918.691.514.468.755.511.423.355.695.455.386.345
DINE [22]**.911.572.432.401.885.618.543.448.838.520.426.360
RPL [30].703.371.238.227.730.493.400.329.670.426.340.311
BNA [32].701.383.247.239.737.510.407.355.662.436.350.311
CompNet [21].747.539.462.426.768.581.538.458.698.466.451.400
UGT (Ours).936.726.665.635.892.632.555.481.852.644.601.567
", + "image_path": "58acb526a404c7f9ccd6897549d6f07f07ad56d247f160f6805dbeac29fa6acd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 61, + 501, + 341, + 511 + ], + "lines": [ + { + "bbox": [ + 61, + 501, + 341, + 511 + ], + "spans": [ + { + "bbox": [ + 61, + 501, + 341, + 511 + ], + "type": "text", + "content": "** Pretrained Imagenet Backbone used (Resnet-50) / Pretrained UDA model used." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 53, + 542, + 541, + 688 + ], + "blocks": [ + { + "bbox": [ + 136, + 521, + 456, + 533 + ], + "lines": [ + { + "bbox": [ + 136, + 521, + 456, + 533 + ], + "spans": [ + { + "bbox": [ + 136, + 521, + 456, + 533 + ], + "type": "text", + "content": "Table 2. Imagenet-C Corruptions on Pascal3D+ dataset - Classification Results (Vgg16)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 542, + 541, + 688 + ], + "lines": [ + { + "bbox": [ + 53, + 542, + 541, + 688 + ], + "spans": [ + { + "bbox": [ + 53, + 542, + 541, + 688 + ], + "type": "table", + "html": "
Model\nOcclusion→Elastic TransformGaussian BlurSnow
0%20-40%40-60%60-80%0%20-40%40-60%60-80%0%20-40%40-60%60-80%
RPL [30].830.597.461.371.855.541.403.320.842.592.435.408
BNA [32].793.601.498.400.833.618.484.300.767.627.542.454
CompNet [21].268.183.157.146.732.395.296.241.529.348.258.210
UGT (Ours).872.712.712.494.909.720.613.509.890.742.634.523
Motion BlurContrastFrost
RPL [30].862.629.481.373.901.610.433.321.850.670.511.402
BNA [32].844.623.481.355.899.601.401.315.845.654.501.399
CompNet [21].639.362.287.241.760.472.374.312.740.481.360.301
UGT (Ours).891.763.673.567.923.701.534.412.911.782.672.561
", + "image_path": "082e94c437441cd35dd2ef4e44cb7e9fe84cd6491af224fcd1219644e4ef33e2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "22994" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 83, + 85, + 511, + 148 + ], + "blocks": [ + { + "bbox": [ + 85, + 64, + 507, + 77 + ], + "lines": [ + { + "bbox": [ + 85, + 64, + 507, + 77 + ], + "spans": [ + { + "bbox": [ + 85, + 64, + 507, + 77 + ], + "type": "text", + "content": "Table 3. Ablation analysis for (a) OOD-CV [45] Combined (b) OOD-CV Texture (c) Imagenet-C (Snow) Corruption" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 83, + 85, + 511, + 148 + ], + "lines": [ + { + "bbox": [ + 83, + 85, + 511, + 148 + ], + "spans": [ + { + "bbox": [ + 83, + 85, + 511, + 148 + ], + "type": "table", + "html": "
Occlusion→L0L1L2L3L0L1L2L3L0L1L2L3
Baseline(B).698.466.451.400.715.575.475.409.529.348.258.210
+ΛR+AR.816.598.524.498.785.660.559.515.781.671.582.480
+ΛR+AR'.852.644.601.567.843.764.656.623.885.742.634.523
", + "image_path": "1670f7bf9d510b5da4eef1a2d006f7793f45f0726e0e8d68e6a09aa727931988.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 154, + 288, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 154, + 288, + 250 + ], + "spans": [ + { + "bbox": [ + 46, + 154, + 288, + 250 + ], + "type": "text", + "content": "mentations and additional data training to have a fairer comparison amongst all the works. Although, our Bayesian model does not use pretrained Imagenet backbones for feature extraction for fairness, a number of our comparative methods [2, 15, 25, 26, 43] perform poorly without one, so we relax this constraint for them. Our method is still capable of surpassing them in terms of classification accuracy. Further details are provided in Supplementary Section C." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 256, + 105, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 256, + 105, + 268 + ], + "spans": [ + { + "bbox": [ + 47, + 256, + 105, + 268 + ], + "type": "text", + "content": "4.2. Results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 274, + 287, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 274, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 46, + 274, + 287, + 370 + ], + "type": "text", + "content": "OOD robustness to individual nuisances. Tab. 1 (L0 columns) shows classification results on entire OOD-CV test data (combined nuisances) as well as five individual nuisances. We see that our model achieves state-of-the-art results in all experiments. In Tab. 2, we observe that our model also performs exceedingly well when dealing with synthetic Imagenet-C corruptions. Refer to Supplementary Sec.C2 and Tables 5-11 for additional Imagenet-C results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 384, + 287, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 384, + 287, + 445 + ], + "spans": [ + { + "bbox": [ + 46, + 384, + 287, + 445 + ], + "type": "text", + "content": "Synthetic to Real. Tab. 4 shows our results on both normal and extended OOD robustness scenario in a synthetic to real setup, showing that our unsupervised method can robustly close the gap between its supervised counterpart while outperforming other methods by large margins." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 52, + 485, + 282, + 559 + ], + "blocks": [ + { + "bbox": [ + 47, + 454, + 287, + 475 + ], + "lines": [ + { + "bbox": [ + 47, + 454, + 287, + 475 + ], + "spans": [ + { + "bbox": [ + 47, + 454, + 287, + 475 + ], + "type": "text", + "content": "Table 4. Synthetic (UDAParts) [24] to Real (Pascal3D+) [41] dataset - Classification Results on Resnet50" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 485, + 282, + 559 + ], + "lines": [ + { + "bbox": [ + 52, + 485, + 282, + 559 + ], + "spans": [ + { + "bbox": [ + 52, + 485, + 282, + 559 + ], + "type": "table", + "html": "
Model0%20-40%40-60%60-80%
RPL [30].822.432.370.335
BNA [32].950.684.484.356
CompNet [21].940.650.475.347
UGT (Ours).992.957.861.753
", + "image_path": "0c1a5e98087aaf73f5a4fc36613f12541a68d28271554085881486153e2811e7.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 570, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 287, + 713 + ], + "type": "text", + "content": "Extended OOD robustness under partial Occlusion. In Tab. 1, Tab. 2 and Supplementary Tables 1-3, 5-11, our model outperforms other methods by significant margins in the extended OOD scenarios of nuisance parameters with partial occlusion. We observe that the performance of other models which have been adapted to the target domain data drops drastically when encountering partial occlusion along with nuisance factors. This underlines the increased complexity of the extended OOD robustness scenario relative to the vanilla OOD robustness setup and how our Bayesian model is able to perform exceedingly well compared to conventional methods." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 153, + 411, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 153, + 411, + 166 + ], + "spans": [ + { + "bbox": [ + 306, + 153, + 411, + 166 + ], + "type": "text", + "content": "4.3. Ablation Analysis" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 178, + 545, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 178, + 545, + 382 + ], + "spans": [ + { + "bbox": [ + 304, + 178, + 545, + 382 + ], + "type": "text", + "content": "Tab. 3 and Supplementary Sec. D & Tables 12-17 show the extensive results of the ablation study for UGT, underlying how each component contributes to the overall compositional model. We can see that just calculating the transitional vMF kernel dictionary " + }, + { + "bbox": [ + 304, + 178, + 545, + 382 + ], + "type": "inline_equation", + "content": "(\\Lambda^{\\mathcal{R}})" + }, + { + "bbox": [ + 304, + 178, + 545, + 382 + ], + "type": "text", + "content": " and the transitional spatial coefficients " + }, + { + "bbox": [ + 304, + 178, + 545, + 382 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathcal{R}}" + }, + { + "bbox": [ + 304, + 178, + 545, + 382 + ], + "type": "text", + "content": " improves the results significantly over the baseline method[21]. Further finetuning the spatial coefficients " + }, + { + "bbox": [ + 304, + 178, + 545, + 382 + ], + "type": "inline_equation", + "content": "(\\mathcal{A}^{\\mathcal{R}'})" + }, + { + "bbox": [ + 304, + 178, + 545, + 382 + ], + "type": "text", + "content": " using pseudo-labelled target domain features boosts the performance. We ablate our hypothesis regarding similar vMF kernels in source and target domains by visualizing image patches that are activated by similar cross-domain kernels (Supplementary Figures 9-11). We also ablate our hypothesis regarding robust spatial geometry by visualizing images activated by the same spatial coefficient in both source and target domains (using source and transitional vMF dictionaries) in Supp. Fig 4 and 7. Analysis of adaptation coefficient is discussed in Supp. Sec. E." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 412, + 471, + 425 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 412, + 471, + 425 + ], + "spans": [ + { + "bbox": [ + 305, + 412, + 471, + 425 + ], + "type": "text", + "content": "5. Conclusion and Future Work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 438, + 545, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 438, + 545, + 704 + ], + "spans": [ + { + "bbox": [ + 304, + 438, + 545, + 704 + ], + "type": "text", + "content": "In this work, we addressed the problem of developing object classification algorithms that are robust to OOD factors such as weather, context and occlusion. We generalize CompNets[21] for OOD robustness by observing that they could be learned in two uncoupled steps: (i) unsupervised learning of a dictionary of vMF kernels (roughly corresponding to the subparts of the object) and (ii) supervised learning of the spatial structure of the objects (intuitively where the subparts occur). This enabled us to: (a) learn a transitional dictionary which captured the feature properties of both domains, and (b) learn the distribution of spatial structure on the source domain and transfer it to the target. This model is very successful and could be improved by simple pseudo-labeling techniques. Our empirical results on the OOD-CV[45], synthetic Imagenet-C corruptions, and the synthetic UDA-Parts dataset display the strong and versatile SOTA performance of our method. In addition, we developed a more challenging dataset Occluded-OOD-CV by introducing occlusion into OOD-CV and show that our Bayesian method, UGT, performed well in this difficult challenge. Our Bayesian approach could be extended to other tasks such as semantic segmentation, exploiting properties of CompNets[36, 37]. We give a qualitative proof of concept in the Supplementary." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "text", + "content": "22995" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] D. Bug, S. Schneider, A. Grote, E. Oswald, F. Feuerhake, J. Schüler, and D. Merhof. Context-based normalization of histological stains using deep convolutional features. Lecture Notes in Computer Science, pages 135-142, 2017. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 192 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 192 + ], + "type": "text", + "content": "[2] Xinyang Chen, Sinan Wang, Mingsheng Long, and Jianmin Wang. Transferability vs. discriminability: Batch spectral penalization for adversarial domain adaptation. In Proceedings of the 36th International Conference on Machine Learning, pages 1081-1090. PMLR, 2019. 7, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 194, + 288, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 194, + 288, + 247 + ], + "spans": [ + { + "bbox": [ + 53, + 194, + 288, + 247 + ], + "type": "text", + "content": "[3] Shuhao Cui, Shuhui Wang, Junbao Zhuo, Liang Li, Qingming Huang, and Qi Tian. Towards discriminability and diversity: Batch nuclear-norm maximization under label insufficient situations. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 249, + 288, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 249, + 288, + 282 + ], + "spans": [ + { + "bbox": [ + 53, + 249, + 288, + 282 + ], + "type": "text", + "content": "[4] Nathan Drenkow, Numair Sani, Ilya Shpitser, and Mathias Unberath. Robustness in deep learning for computer vision: Mind the gap? CoRR, abs/2112.00639, 2021. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 284, + 287, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 284, + 287, + 337 + ], + "spans": [ + { + "bbox": [ + 53, + 284, + 287, + 337 + ], + "type": "text", + "content": "[5] Aram Galstyan and Paul R Cohen. Empirical comparison of \"hard\" and \"soft\" label propagation for relational classification. In International Conference on Inductive Logic Programming, pages 98-111. Springer, Berlin, Heidelberg, 2007. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 339, + 287, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 339, + 287, + 372 + ], + "spans": [ + { + "bbox": [ + 53, + 339, + 287, + 372 + ], + "type": "text", + "content": "[6] Aritra Ghosh, Himanshu Kumar, and P. S. Sastry. Robust loss functions under label noise for deep neural networks, 2017. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 374, + 287, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 374, + 287, + 418 + ], + "spans": [ + { + "bbox": [ + 53, + 374, + 287, + 418 + ], + "type": "text", + "content": "[7] Siddharth Gopal and Yiming Yang. Von mises-fisher clustering models. In Proceedings of the 31st International Conference on Machine Learning, pages 154-162, Beijing, China, 2014. PMLR. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 419, + 287, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 419, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 53, + 419, + 287, + 464 + ], + "type": "text", + "content": "[8] Md Hasnat, Julien Bohné, Jonathan Milgram, Stéphane Gentic, Liming Chen, et al. von mises-fisher mixture model-based deep learning: Application to face verification. arXiv preprint arXiv:1706.04264, 2017. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 465, + 287, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 465, + 287, + 497 + ], + "spans": [ + { + "bbox": [ + 53, + 465, + 287, + 497 + ], + "type": "text", + "content": "[9] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations, 2019. 1, 2, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 499, + 287, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 499, + 287, + 553 + ], + "spans": [ + { + "bbox": [ + 48, + 499, + 287, + 553 + ], + "type": "text", + "content": "[10] Dan Hendrycks, Xiaoyuan Liu, Eric Wallace, Adam Dziedzic, Rishabh Krishnan, and Dawn Song. Pretrained transformers improve out-of-distribution robustness. Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 555, + 287, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 287, + 609 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 287, + 609 + ], + "type": "text", + "content": "[11] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization, 2021. 1, 2, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 612, + 287, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 612, + 287, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 612, + 287, + 643 + ], + "type": "text", + "content": "[12] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples, 2021. 1, 2, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 646, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 287, + 678 + ], + "type": "text", + "content": "[13] Lukas Hoyer, Dengxin Dai, Haoran Wang, and Luc Van Gool. Mic: Masked image consistency for context-enhanced domain adaptation, 2023. 6, 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "text", + "content": "[14] Dong hyun Lee. Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks. 2" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[15] Ying Jin, Ximei Wang, Mingsheng Long, and Jianmin Wang. Minimum class confusion for versatile domain adaptation, 2019. 6, 7, 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 107, + 545, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 149 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 149 + ], + "type": "text", + "content": "[16] Prakhar Kaushik, Aayush Mishra, Adam Kortylewski, and Alan Yuille. Source-free and image-only unsupervised domain adaptation for category level object pose estimation, 2024. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 151, + 545, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 151, + 545, + 172 + ], + "spans": [ + { + "bbox": [ + 307, + 151, + 545, + 172 + ], + "type": "text", + "content": "[17] Toru Kitagawa and Jeff Rowley. von mises-fisher distributions and their statistical divergence, 2022. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 174, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 174, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 307, + 174, + 545, + 205 + ], + "type": "text", + "content": "[18] Muhammed Kocabas, Chun-Hao P. Huang, Otmar Hilliges, and Michael J. Black. Pare: Part attention regressor for 3d human body estimation, 2021. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 206, + 545, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 206, + 545, + 271 + ], + "spans": [ + { + "bbox": [ + 308, + 206, + 545, + 271 + ], + "type": "text", + "content": "[19] Adam Kortylewski, Bernhard Egger, Andreas Schneider, Thomas Gereg, Andreas Morel-Forster, and Thomas Vetter. Empirically analyzing the effect of dataset biases on deep face recognition systems. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2018. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 273, + 545, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 273, + 545, + 327 + ], + "spans": [ + { + "bbox": [ + 308, + 273, + 545, + 327 + ], + "type": "text", + "content": "[20] Adam Kortylewski, Ju He, Qing Liu, and Alan Loddon Yuille. Compositional convolutional neural networks: A deep architecture with innate robustness to partial occlusion. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8937-8946, 2020. 1, 3, 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 328, + 545, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 328, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 308, + 328, + 545, + 381 + ], + "type": "text", + "content": "[21] Adam Kortylewski, Qing Liu, Angtian Wang, Yihong Sun, and Alan Yuille. Compositional convolutional neural networks: A robust and interpretable model for object recognition under occlusion. International Journal of Computer Vision, 129(3):736-760, 2021. 1, 2, 3, 4, 6, 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 383, + 545, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 383, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 308, + 383, + 545, + 415 + ], + "type": "text", + "content": "[22] Jian Liang, Dapeng Hu, Jiashi Feng, and Ran He. Dine: Domain adaptation from single and multiple black-box predictors, 2022. 6, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 416, + 545, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 416, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 308, + 416, + 545, + 437 + ], + "type": "text", + "content": "[23] Hong Liu, Jianmin Wang, and Mingsheng Long. Cycle self-training for domain adaptation, 2021. 6, 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 438, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 438, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 308, + 438, + 545, + 491 + ], + "type": "text", + "content": "[24] Qing Liu, Adam Kortylewski, Zhishuai Zhang, Zizhang Li, Mengqi Guo, Qihao Liu, Xiaoding Yuan, Jiteng Mu, Weichao Qiu, and Alan Yuille. Learning part segmentation through unsupervised domain adaptation from synthetic vehicles. In CVPR, 2022. 2, 6, 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 493, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 493, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 308, + 493, + 545, + 536 + ], + "type": "text", + "content": "[25] Mingsheng Long, ZHANGJIE CAO, Jianmin Wang, and Michael I Jordan. Conditional adversarial domain adaptation. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2018. 6, 7, 8" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 537, + 545, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 545, + 569 + ], + "type": "text", + "content": "[26] M. Jehanzeb Mirza, Jakub Micorek, Horst Possegger, and Horst Bischof. The norm must go on: Dynamic unsupervised domain adaptation by normalization, 2022. 7, 8" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 571, + 545, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 571, + 545, + 602 + ], + "spans": [ + { + "bbox": [ + 308, + 571, + 545, + 602 + ], + "type": "text", + "content": "[27] Jaemin Na, Heechul Jung, Hyung Jin Chang, and Wonjun Hwang. Fixbi: Bridging domain spaces for unsupervised domain adaptation, 2021. 6, 7" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 604, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 604, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 308, + 604, + 545, + 635 + ], + "type": "text", + "content": "[28] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. DoImagenet classifiers generalize toImagenet?, 2019. 1, 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 637, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 637, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 308, + 637, + 545, + 669 + ], + "type": "text", + "content": "[29] Douglas A. Reynolds, Thomas F. Quatieri, and Robert B. Dunn. Speaker verification using adapted gaussian mixture models. Digital Signal Processing, 10(1):19-41, 2000. 5" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "text", + "content": "[30] Evgenia Rusak, Steffen Schneider, Peter Gehler, Oliver Bringmann, Wieland Brendel, and Matthias Bethge. Adapting imagenet-scale models to complex distribution shifts with self-learning, 2021. 6, 7, 8" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "22996" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 73, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 73, + 287, + 116 + ], + "type": "text", + "content": "[31] Kuniaki Saito, Kohei Watanabe, Yoshitaka Ushiku, and Tatsuya Harada. Maximum classifier discrepancy for unsupervised domain adaptation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2018. 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 160 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 160 + ], + "type": "text", + "content": "[32] Steffen Schneider, Evgenia Rusak, Luisa Eck, Oliver Bringmann, Wieland Brendel, and Matthias Bethge. Improving robustness against common corruptions by covariate shift adaptation, 2020. 2, 6, 7, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 162, + 287, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 162, + 287, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 162, + 287, + 205 + ], + "type": "text", + "content": "[33] Ozan Sener, Hyun Oh Song, Ashutosh Saxena, and Silvio Savarese. Learning transferrable representations for unsupervised domain adaptation. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2016. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 206, + 287, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 206, + 287, + 237 + ], + "spans": [ + { + "bbox": [ + 48, + 206, + 287, + 237 + ], + "type": "text", + "content": "[34] Hwanjun Song, Minseok Kim, Dongmin Park, Yooju Shin, and Jae-Gil Lee. Learning from noisy labels with deep neural networks: A survey, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 239, + 287, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 239, + 287, + 281 + ], + "spans": [ + { + "bbox": [ + 48, + 239, + 287, + 281 + ], + "type": "text", + "content": "[35] Baochen Sun, Jiashi Feng, and Kate Saenko. Correlation alignment for unsupervised domain adaptation. Advances in Computer Vision and Pattern Recognition, pages 153-171, 2017. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 283, + 287, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 283, + 287, + 326 + ], + "spans": [ + { + "bbox": [ + 48, + 283, + 287, + 326 + ], + "type": "text", + "content": "[36] Yihong Sun, Adam Kortylewski, and Alan Yuille. Weakly-supervised amodal instance segmentation with compositional priors. arXiv preprint arXiv:2010.13175, 2020. 1, 2, 3, 6, 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 327, + 287, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 287, + 381 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 287, + 381 + ], + "type": "text", + "content": "[37] Angtian Wang, Yihong Sun, Adam Kortylewski, and Alan L Yuille. Robust object detection under occlusion with context-aware compositionalnets. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12645-12654, 2020. 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 383, + 287, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 383, + 287, + 415 + ], + "spans": [ + { + "bbox": [ + 48, + 383, + 287, + 415 + ], + "type": "text", + "content": "[38] Dequan Wang, Evan Shelhamer, Shaoteng Liu, Bruno Olshausen, and Trevor Darrell. Tent: Fully test-time adaptation by entropy minimization, 2020. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 416, + 287, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 416, + 287, + 448 + ], + "spans": [ + { + "bbox": [ + 48, + 416, + 287, + 448 + ], + "type": "text", + "content": "[39] Jianyu Wang, Zhishuai Zhang, Cihang Xie, Yuyin Zhou, Vittal Premachandran, Jun Zhu, Lingxi Xie, and Alan Yuille. Visual concepts and compositional voting, 2017. 2, 3, 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 449, + 287, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 449, + 287, + 480 + ], + "spans": [ + { + "bbox": [ + 48, + 449, + 287, + 480 + ], + "type": "text", + "content": "[40] Guoqiang Wei, Cuiling Lan, Wenjun Zeng, Zhizheng Zhang, and Zhibo Chen. Toalign: Task-oriented alignment for unsupervised domain adaptation, 2021. 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 482, + 287, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 482, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 48, + 482, + 287, + 525 + ], + "type": "text", + "content": "[41] Yu Xiang, Roozbeh Mottaghi, and Silvio Savarese. Beyond Pascal: A benchmark for 3d object detection in the wild. In IEEE Winter Conference on Applications of Computer Vision (WACV), 2014. 6, 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 526, + 287, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 526, + 287, + 580 + ], + "spans": [ + { + "bbox": [ + 48, + 526, + 287, + 580 + ], + "type": "text", + "content": "[42] Xiaoding Yuan, Adam Kortylewski, Yihong Sun, and Alan Yuille. Robust instance segmentation through reasoning about multi-object occlusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11141-11150, 2021. 1, 3, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 582, + 287, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 582, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 48, + 582, + 287, + 613 + ], + "type": "text", + "content": "[43] Yuchen Zhang, Tianle Liu, Mingsheng Long, and Michael I. Jordan. Bridging theory and algorithm for domain adaptation, 2019. 6, 7, 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 614, + 287, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 287, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 287, + 646 + ], + "type": "text", + "content": "[44] Zhilu Zhang and Mert R. Sabuncu. Generalized cross entropy loss for training deep neural networks with noisy labels, 2018. 2, 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "text", + "content": "[45] Bingchen Zhao, Shaozuo Yu, Wufei Ma, Mingxin Yu, Shenxiao Mei, Angtian Wang, Ju He, Alan Yuille, and Adam Kortylewski. Ood-cv: A benchmark for robustness to out-of-distribution shifts of individual nuisances in natural images. Proceedings of the European Conference on Computer Vision (ECCV), 2022. 1, 2, 6, 8" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "22997" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/393f0825-ecb8-44e6-bdfa-5dde4b82ecdb_content_list.json b/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/393f0825-ecb8-44e6-bdfa-5dde4b82ecdb_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..45811cb0a3a6d3a988a4a2ff673fc981a6871b9d --- /dev/null +++ b/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/393f0825-ecb8-44e6-bdfa-5dde4b82ecdb_content_list.json @@ -0,0 +1,1310 @@ +[ + { + "type": "text", + "text": "A Call to Reflect on Evaluation Practices for Age Estimation: Comparative Analysis of the State-of-the-Art and a Unified Benchmark", + "text_level": 1, + "bbox": [ + 125, + 130, + 844, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jakub Paplhám \nDepartment of Cybernetics \nFaculty of Electrical Engineering \nCzech Technical University in Prague", + "bbox": [ + 161, + 204, + 460, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "paplhjak@fel(cvut.cz", + "bbox": [ + 220, + 277, + 401, + 291 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Vojtěch Franc \nDepartment of Cybernetics \nFaculty of Electrical Engineering \nCzech Technical University in Prague", + "bbox": [ + 506, + 204, + 807, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "xfrancv@cmp.felk.cvut.cz", + "bbox": [ + 547, + 277, + 761, + 291 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 325, + 313, + 343 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Comparing different age estimation methods poses a challenge due to the unreliability of published results stemming from inconsistencies in the benchmarking process. Previous studies have reported continuous performance improvements over the past decade using specialized methods; however, our findings challenge these claims. This paper identifies two trivial, yet persistent issues with the currently used evaluation protocol and describes how to resolve them. We offer an extensive comparative analysis for state-of-the-art facial age estimation methods. Surprisingly, we find that the performance differences between the methods are negligible compared to the effect of other factors, such as facial alignment, facial coverage, image resolution, model architecture, or the amount of data used for pretraining. We use the gained insights to propose using FaRL as the backbone model and demonstrate its effectiveness on all public datasets. We make the source code and exact data splits public on GitHub and in the supplementary material.", + "bbox": [ + 73, + 358, + 472, + 632 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 662, + 209, + 678 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Age estimation has received significant interest in recent years. However, a closer examination of the evaluation process reveals two underlying issues. First, no standardized data splits are defined for most public datasets, and the used splits are rarely made public, making the results irreproducible. Second, methods often modify multiple components of the age estimation system, making it unclear which modification is responsible for the performance gains.", + "bbox": [ + 75, + 686, + 468, + 809 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper aims to critically analyze the evaluation practices in age estimation research, highlight the issues, and appeal to the community to follow good evaluation practices to resolve them. We benchmark and fairly compare recent deep-learning methods for age estimation from facial images. We focus on state-of-the-art methods that", + "bbox": [ + 75, + 810, + 470, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "adapt a generic architecture by changing its last layer or the loss function to suit the age estimation task. Although this may appear restrictive, it is essential to note that most of the methods proposed in the field fall into this category $(\\approx 70\\%)$ . By comparing methods that modify only a small part of the network, we aim to ensure a fair evaluation, as the remaining setup can be kept identical. Besides the usual intra-class performance, we also evaluate their cross-dataset generalization, which has been neglected in the age prediction literature so far. Surprisingly, we find that the influence of the loss function and the decision layer on the results, usually the primary component that distinguishes different methods, is negligible compared to other factors.", + "bbox": [ + 496, + 328, + 893, + 525 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Contributions", + "text_level": 1, + "bbox": [ + 500, + 547, + 602, + 561 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We show that existing evaluation practices in age estimation do not provide consistent results. This leads to obstacles for researchers aiming to advance prior work and for practitioners striving to pinpoint the most effective approach for their application.", + "- We define a proper evaluation protocol, offer an extensive comparative analysis for state-of-the-art facial age estimation methods, and publish our code.", + "- We show that the performance difference caused by using a different decision layer or training loss is significantly smaller than that caused by other parts of the prediction pipeline.", + "- We identify that the amount of data used for pre-training is the most influential factor and use the observation to propose using FaRL [30] as the backbone architecture. We demonstrate its effectiveness on public datasets." + ], + "bbox": [ + 500, + 563, + 890, + 805 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2. Issues with Current Evaluation Practices", + "text_level": 1, + "bbox": [ + 500, + 820, + 867, + 835 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2.1. Data Splits", + "text_level": 1, + "bbox": [ + 500, + 845, + 620, + 861 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Publications focused on age estimation evaluate their methods on several datasets [1, 4, 20, 21, 23, 25, 29]. The most", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1196", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5822d4cf3b9407a2245cbe0fa17e78ab05bb18c23164a7649a052192ee02992d.jpg", + "image_caption": [ + "Figure 1. Mean Absolute Error (MAE) $\\downarrow$ of age estimation methods on the MORPH dataset, as reported in the existing literature and measured by us, viewed over time. Random splitting remains the prevalent data splitting strategy. The consistent performance improvements over time are attributed in the literature to specialized loss functions for age estimation. Subject-exclusive (identity-disjoint) data splitting is rarely employed. With unified subject-exclusive data splitting and all factors except the loss function fixed, all evaluated methods yield comparable results, failing to achieve the performance gains promised by the random splitting." + ], + "image_footnote": [], + "bbox": [ + 78, + 87, + 467, + 303 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "commonly used of these is the MORPH [23] dataset. However, the evaluation procedures between the publications are not unified. For instance, OR-CNN [21] randomly divides the dataset into two parts: $80\\%$ for training and $20\\%$ for testing. No mention is made of a validation set for model selection. Random splitting (RS) protocol is also used in [3, 5, 12, 13, 19, 28], but the specific data splits differ between studies as they are rarely made public. Since the dataset contains multiple images per person (many captured at the same age), the same individual can be present in both the training and testing sets. This overlap introduces a bias, resulting in overly optimistic evaluation outcomes. The degree of data leakage can vary when using random splitting, making certain data splits more challenging than others. Further, this fundamentally changes the entire setup; rarely will one want to deploy the age estimation system on the people present in the training data. Consequently, comparison of different methods and discerning which method stands out as the most effective based on the published results becomes problematic.", + "bbox": [ + 75, + 488, + 468, + 790 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Only some publications [22, 27] recognize this bias introduced by the splitting strategy and address it by implementing subject-exclusive (SE) [22] splitting. This approach ensures that all images of an individual are exclusively in the (i) training, (ii) validation, or (iii) testing part. The terminology here is not fully established. One might encounter either identity-disjoint or person-disjoint instead", + "bbox": [ + 75, + 794, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "of subject-exclusive in the literature.", + "bbox": [ + 498, + 90, + 740, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To assess how prevalent random splitting (RS) on the MORPH dataset truly is, we conducted a survey of all age estimation papers presented at the CVPR and ICCV since 2013. We found 16 papers focused on age estimation, of which nine use RS, two use SE, five use specialized splits, and three do not utilize MORPH. We further surveyed other research conferences and journals, namely: IJCAI, BMVC, ACCV, IEEE TIP, Pattern Recognit. Lett., Pattern Anal. Appl., and find eight influential age estimation papers that use MORPH. Of those, seven use RS, and one uses a specialized split. By specialized splits, we are referring to non-standard strategies such as ethnically balanced partitions.", + "bbox": [ + 496, + 107, + 890, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Altogether, we discover that only $\\approx 10\\%$ of papers that utilize MORPH use the SE protocol. This finding is concerning, as MORPH [23] is the most popular dataset used to compare age estimation approaches. Other datasets do not provide a reliable benchmark either, as standardized data splits are provided only for two public age estimation datasets: (i) the ChaLearn Looking at People Challenge 2016 (CLAP2016) dataset [1], which is relatively small, consisting of fewer than 8000 images, and (ii) the Cross-Age Celebrity Dataset (CACD2000) [4], which has noisy training annotations and is not intended for age estimation. Comparing methods using only these datasets is, therefore, not satisfactory either. Other popular datasets, AgeDB dataset [20] and Asian Face Age Dataset (AFAD) [21], also consist of multiple images per person, requiring SE splitting. However, they lack any data splits accepted by the community and often are used with the RS protocol. As such, they suffer from the same issues as MORPH [23].", + "bbox": [ + 496, + 289, + 892, + 561 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Pipeline Ablation", + "text_level": 1, + "bbox": [ + 500, + 573, + 669, + 589 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To fairly compare multiple methods, an identical experimental setup should be used for each of them. The current state-of-the-art age estimation approaches adhere to a common framework encompassing: (i) data collection, (ii) data preprocessing, (iii) model design, including the decision layer and the loss function, and (iv) training and evaluation. Most novel approaches introduce distinct changes to the component (iii); namely they design a specialized loss function to exploit the ordinal nature of age. However, they frequently alter multiple components of the framework simultaneously, complicating the attribution of performance improvements to the claimed modifications.", + "bbox": [ + 496, + 597, + 890, + 777 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To compare different loss functions, e.g., [3, 10, 12, 13, 16, 17, 21, 22], the other components of the framework should be kept constant, allowing us to isolate the impact of the selected method on the performance. This is trivial, yet the age estimation community mostly ignores it. Further, many publications hand-wave the other components and do not precisely specify them, making future comparisons meaningless. It's important to question whether the", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "1197", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "reported enhancement in a research paper truly stems from the novel loss function it proposes or if it could be attributed to a different modification. We strongly advocate that each component be addressed in isolation and that the experimental setup be precisely described.", + "bbox": [ + 75, + 90, + 470, + 167 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Over the past decade, numerous novel age estimation methods have been introduced, promising continuous performance improvements every year. However, motivated by these findings, we raise the question: how reliable are the published age estimation results? In Sec. 3 we aim to establish a proper evaluation protocol and use it in Sec. 4 to compare the methods [10, 12, 13, 17, 21, 22, 24] reliably. Figure 1 illustrates the contrast between the performance of state-of-the-art methods as reported in their respective studies and the outcomes as measured by our implementation.", + "bbox": [ + 75, + 167, + 468, + 319 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Evaluation Protocol", + "text_level": 1, + "bbox": [ + 76, + 337, + 267, + 353 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We identified two trivial yet persistent issues that prevent a reliable comparison of age estimation methods. In this section, we address the initial challenge concerning consistent data partitioning. We provide clear guidelines for the evaluation protocol to ensure replicable and fair assessments. Specifically, the protocol should establish a reproducible approach for defining the data used in both (i) training and (ii) performance evaluation. When specifying the training data, one needs to state whether the training dataset is the sole source of information, or if the model was pretrained with additional data. Additionally, the evaluation can be subdivided based on the data used for model evaluation into intra-dataset, and cross-dataset results. We describe how to evaluate models in these settings below.", + "bbox": [ + 75, + 364, + 470, + 575 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Intra-dataset performance To evaluate intra-dataset performance, a single dataset is used for both training and evaluation of the age estimation system. In this case one should (i) randomly split the dataset into subject-exclusive training, validation, and test set $^{1}$ , (ii) train the model on the training set, (iii) measure the model's performance on the validation set, (iv) possibly revert back to step (ii) and train the model again, (v) evaluate the model's performance on the test set, then (vi) publish the results on the test set along with a detailed description of the system components and the data used. If the dataset consists of limited number of examples, it is possible to create multiple splits of the data into training, validation and test set through step (i). Following this, steps (ii) through (v) are iterated $n$ times, where $n$ is the number of generated splits. It is advisable to present the average test performance along with its standard deviation when reporting the results.", + "bbox": [ + 75, + 602, + 468, + 859 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Cross-dataset performance To evaluate cross-dataset performance, the data split in step (i) of the aforementioned evaluation process is generated from a collection of multiple datasets, ensuring that the complete chosen dataset must be employed entirely for evaluation, effectively constituting the designated test set. The remaining steps of the evaluation procedure remain unaltered.", + "bbox": [ + 500, + 90, + 890, + 196 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Regardless of the scenario, whether it is intra-dataset or cross-dataset, each system needs to be evaluated against the test data only once, and the results published. All prior model development and hyperparameter tuning must be based solely on the results on the validation set. Furthermore, it should be indicated whether the training data are the only source of information used for training, or whether the model was pretrained with additional data. In the letter scenario, a detailed description of the additional data and their utilization should also be provided.", + "bbox": [ + 496, + 196, + 892, + 348 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4. Comparative Method Analysis", + "text_level": 1, + "bbox": [ + 498, + 361, + 779, + 378 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This section applies the evaluation protocol to compare state-of-the-art age estimation methods. We maintain a consistent preprocessing procedure, model architecture, and dataset while selectively altering the decision layer and loss function to incorporate modifications proposed in prominent works such as [10, 12, 13, 17, 21, 22, 24].", + "bbox": [ + 496, + 386, + 890, + 476 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1. Methodology", + "text_level": 1, + "bbox": [ + 500, + 486, + 635, + 502 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Datasets We evaluate the methods using 7 datasets: AgeDB [20], AFAD [21], CACD2000 [4], CLAP2016 [1], FG-NET [15], MORPH [23], and UTKFace [29]. We also use the IMDB-WIKI dataset [25] for pre-training with clean labels from Franc and Čech [11].", + "bbox": [ + 496, + 508, + 890, + 584 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data Splits For the CLAP2016 and CACD2000 datasets, we use the single data split provided by the dataset authors. For the remaining datasets, we create five subject-exclusive (SE) data splits. To generate the split, we partition the dataset such that $60\\%$ of the dataset is used for training, $20\\%$ for model selection (validation), and $20\\%$ for evaluating the model performance (test). Additionally, we ensure that each partition has the same age distribution. Due to its small size, we only use FG-NET for evaluation. We make our data splits and code public at Facial-Age-Benchmark2.", + "bbox": [ + 496, + 602, + 890, + 753 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Model Architecture & Weight Initialization We use ResNet-50 [14] as the backbone architecture. We always start the training of the methods from the same initialization. We run the experiments with (i) random initialization, (ii) weights pre-trained on ImageNet (TorchVision'sImagenet1K_V2), and (iii) weights pre-trained on ImageNet and then further trained on IMDB-WIKI for age estimation", + "bbox": [ + 496, + 772, + 890, + 878 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "1The generated training, validation and test sets will usually be a partition of the dataset, however, in any case their intersection must be empty.", + "bbox": [ + 75, + 875, + 467, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://github.com/paplhjak/Facial-Age-Estimation-Benchmark", + "bbox": [ + 514, + 886, + 857, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "1198", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "with cross-entropy. After the pre-training, the last layer of the model is replaced with a layer specific to the desired method. The models are then fine-tuned on the downstream dataset. It is important to note that for the baseline cross-entropy, we also replace the final layer before fine-tuning. This ensures that the experimental setup remains identical to that of the other methods.", + "bbox": [ + 75, + 90, + 472, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Training Details We utilize the Adam optimizer with the parameters $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.999$ . For pre-training on the IMDB-WIKI dataset, we set the learning rate to $\\alpha = 10^{-3}$ and train the model for a total of 100 epochs. For fine-tuning on the remaining datasets we reduce the learning rate to $\\alpha = 10^{-4}$ and train the model for 50 epochs. We use a batch size of 100. The best model is selected based on the MAE metric computed on the validation set. We utilize two data augmentations during training, (i) horizontal mirroring, and (ii) cropping out an $80\\%$ to $100\\%$ portion of the bounding box and resizing it to the model input shape. We do not tune the hyperparameters of the methods [10, 12, 13, 17, 21, 22] on the validation set. We apply them in the original configurations. We argue that if any of the loss functions is a significant improvement over the baseline, we should observe a performance improvement across a broad range of hyperparameters and preprocessing pipelines. We consider our training parameters to be reasonable and to provide a comparison of the methods as if employed out-of-the-box.", + "bbox": [ + 75, + 218, + 472, + 505 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Preprocessing We use the RetinaFace model developed by Deng et al. [8] for face detection and facial landmark detection. We use complete facial coverage, i.e., the images encompass the entire head. We resize the images to a resolution of $256 \\times 256$ pixels and normalize the pixel values of the images. To this end, we subtract the mean and divide by the standard deviation of colors on ImageNet [7].", + "bbox": [ + 75, + 526, + 470, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Metrics We use the Mean Absolute Error (MAE) calculated on the test data as the performance measure. To determine whether any method is consistently better than others, we employ the Friedman test and the Nemenyi critical difference test (FN test) as described by Demšar [6]. The main statistic used in the test is the average ranking (1 is best) of a method computed on multiple datasets. Differences in the average ranking are then used to decide whether a method is significantly better than others or whether the improvement is due to randomness (the null hypothesis). We use a common significance level (p-value) of $\\alpha = 5\\%$ .", + "bbox": [ + 75, + 654, + 472, + 820 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Results", + "text_level": 1, + "bbox": [ + 76, + 830, + 171, + 845 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Intra-Dataset Performance The intra-dataset results can be seen in Tab. 6, highlighted with a grey background. When starting from random initialization, training with the", + "bbox": [ + 75, + 854, + 470, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unimodal loss [17] tends to be unstable. Excluding the Unimodal loss [17] from the evaluation, we apply the FN test. The results indicate that three methods: OR-CNN [21], DLDL [12], and the Mean-Variance loss [22], demonstrate a significant performance improvement over the baseline cross-entropy. With limited data availability, when pretraining is not possible, it is advisable to utilize one of the aforementioned methods.", + "bbox": [ + 496, + 90, + 890, + 210 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "With pre-training, either on ImageNet or IMDB-WIKI, none of the methods is significantly better than the cross-entropy. In other words, we do not observe any systematic improvement by deviating from the standard approach.", + "bbox": [ + 496, + 210, + 890, + 272 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Cross-Dataset Generalization Cross-dataset results, shown in Tab. 6 with white background, were obtained by evaluating the performance of models on datasets that were not used for their training. The cross-dataset performance is unsurprisingly significantly worse than the intra-dataset performance for all of the methods. Using the FN test, we conclude that there is no significant difference in generalization capability between any of the methods [10, 12, 13, 17, 21, 22] and the cross-entropy, regardless of pre-training. In other words, though the loss functions may reduce overfitting, they do not help in the presence of covariate shift.", + "bbox": [ + 496, + 290, + 890, + 469 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "None of the methods perform well when evaluated on a different dataset than the one they were trained on. The best cross-dataset results are achieved by training on either UTKFace or CLAP2016. The worst performance across databases is observed when models are trained on AFAD or MORPH. This discrepancy can be attributed to UTKFace and CLAP2016 having a broader range of images, which allows them to generalize effectively to other datasets. Conversely, the limited diversity in MORPH or AFAD datasets, such as AFAD mainly comprising images of people of Asian ethnicity and around $80\\%$ of MORPH being composed of individuals of African American ethnicity, contributes to the poor knowledge transfer. The significant decrease in the performance of models trained on the MORPH dataset when applied to other age estimation datasets underscores the importance of not relying solely on the MORPH dataset as the benchmark for age estimation. To ensure a reliable evaluation of different methods, it is crucial to incorporate results from alternative datasets as well.", + "bbox": [ + 496, + 470, + 890, + 758 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "5. Component Analysis", + "text_level": 1, + "bbox": [ + 500, + 768, + 697, + 787 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we analyze the influence of the backbone architecture and the data preparation pipeline on model performance. We show that changes to these components can have a much more significant impact on the final performance than the choice of a loss function. When altering a component, we maintain all other components at their defaults, presented as the Cross-Entropy approach in Sec. 4.", + "bbox": [ + 496, + 794, + 890, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "1199", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We use the gained insight to propose a strong baseline age estimation model using the FaRL [30] backbone.", + "bbox": [ + 76, + 90, + 468, + 121 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Model Architecture", + "text_level": 1, + "bbox": [ + 76, + 130, + 264, + 145 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Multiple different backbone architectures can be found in the age estimation literature. Among these architectures, VGG16 [10, 13, 17, 22, 27, 28] and ResNet-50 [2, 3, 19] stand out as the most common choice. We evaluate the influence of the architecture choice on the performance and extend the comparison to include more recent advancements, EfficientNet-B4 and ViT-B-16. We present our findings in Tab. 5. No backbone emerges as universally best across all datasets. Notably, changes in the backbone have a more substantial impact on performance than changes to the loss function, see Tab. 6. This highlights the importance of a thorough ablation, as changes in the backbone architecture could obscure the impact of the loss function.", + "bbox": [ + 75, + 154, + 470, + 349 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.2. Data Preparation Pipeline", + "text_level": 1, + "bbox": [ + 76, + 359, + 313, + 376 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Age estimation models require only a specific region of an image, specifically the person's face, as input, rather than the entire image. However, the influence of this selection process on the model's performance is not apriori known. Should the model be presented with a tight crop of the face or the entire head? Additionally, facial images can differ in terms of scale and resolution since they originate from various sources and as such need to be resized to a uniform resolution. In this section, we examine the impact of the aforementioned data preparation pipeline on the performance of age estimation models. We demonstrate that changes in the preprocessing have a more substantial impact on performance than changes to the loss function.", + "bbox": [ + 75, + 383, + 468, + 580 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Facial Alignment Numerous studies lack an explanation of their facial alignment procedure. Others merely mention the utilization of facial landmarks. To assess whether a standardized alignment is needed for a fair comparison of multiple methods, we adopt three distinct alignment procedures and evaluate their effect on model performance. Firstly, we (i) perform no alignment and employ the bounding box proposed by the facial detection model [8] as the simplest approach. The bounding box sides are parallel to the axes of the image. Secondly, (ii) we utilize the proposed bounding box but rotate it to horizontally align the eyes. Lastly, (iii) we use an alignment procedure, which normalizes the rotation, positioning, and scale. For details, refer to the implementation. A visual representation of these facial alignment methods is depicted in Fig. 2. The performance of models trained using the various alignment procedures is presented in Tab. 1. When working with pre-aligned datasets like AFAD, we observe that procedure (iii) does not yield significant improvements compared to the simpler variants (i) or (ii). Similar results are obtained", + "bbox": [ + 75, + 598, + 470, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "on datasets collected under standardized conditions, such as the MORPH dataset. However, when dealing with inthe-wild datasets like AgeDB and CLAP2016, we find that alignment (iii) leads to noticeable improvements over the simpler methods. Interestingly, on the UTKFace dataset, which also contains in-the-wild images, approach (ii) of solely rotating the proposed bounding boxes achieves the best outcomes. However, the disparities among the various alignment procedures are not substantial. We therefore argue that any facial alignment technique that effectively normalizes the position, rotation, and scale of the faces would yield comparable results.", + "bbox": [ + 496, + 90, + 893, + 272 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Facial Coverage While facial alignment defines the positioning, orientation, and scale of facial landmarks, the extent to which the face is visible in an image also needs to be specified. We refer to this notion as facial coverage. It measures how much of the face is shown in an image and can range from minimal coverage, where only the eyes and mouth are visible, to complete coverage, where the entire head is visible. Determining the optimal compromise between complete facial coverage and minimal coverage is not immediately clear. Complete facial coverage provides a comprehensive view of the face, allowing age estimation algorithms to consider a broader range of facial cues. On the other hand, partial coverage may help reduce overfitting by eliminating irrelevant facial cues and features with high variance. For a visual demonstration of various facial coverage levels, refer to Fig. 3. The concept of facial coverage has received limited attention in age estimation literature. Consequently, the extent of facial coverage utilized in previous studies can only be inferred from the images presented in those works. For instance, Berg et al. [2] seemingly employ minimal coverage, showing slightly more than just the mouth and eyes. The majority of other works [3, 12, 13, 17, 21, 27] tend to adopt partial coverage, where a significant portion of the face, including the chin and forehead, is visible, but not the entire head and hair. In the works of Pan et al. [22], Rothe et al. [24], and Zhang et al. [28], the entire head is shown.", + "bbox": [ + 496, + 295, + 893, + 702 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The performance of models trained with the different coverage levels is presented in Tab. 2. Generally, complete facial coverage, which includes the entire head in the model input, yields the best results across the majority of datasets. However, specifically for AFAD dataset and the MORPH dataset, partial coverage performs better. It is important to note that the AFAD dataset contains preprocessed images that do not capture the entire head. Consequently, using complete facial coverage with this dataset results in the presence of black bars and a decrease in the effective pixel resolution of the face. It is then to be expected that increased facial coverage yields inferior results. The smallest coverage, limited to the facial region up to the eyes and mouth,", + "bbox": [ + 496, + 704, + 893, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "1200", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1e53304d80e10cd67dbd8eab24e4992df4994ac8712e1a03f48a45fc1539fe0a.jpg", + "image_caption": [ + "(a) Crop." + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 187, + 172 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5e27e3ab6f333a541c855371847e0b8229d43e62789118a10a2f63d443029e49.jpg", + "image_caption": [ + "(b) Rotation." + ], + "image_footnote": [], + "bbox": [ + 210, + 88, + 318, + 172 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f7cea1e324fb70130baaa7384dc596c9f93a5c808325b7f61a466b016b00e442.jpg", + "image_caption": [ + "(c) Rot., Trans., Scale.", + "Figure 2. Comparison of different alignment methods using the average face from the FG-NET dataset." + ], + "image_footnote": [], + "bbox": [ + 341, + 88, + 450, + 172 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "consistently performs the worst. With sufficient pixel resolution, the full facial coverage performs the best.", + "bbox": [ + 75, + 253, + 467, + 285 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Input Resolution To investigate the influence of input resolution on age estimation, we performed experiments using multiple resolutions on all datasets: specifically, $256 \\times 256$ , $128 \\times 128$ , and $64 \\times 64$ pixels. The results are presented in Tab. 3. Our findings indicate that an increase in image resolution consistently results in improved model performance across all datasets. Hence, the best performance was achieved with a resolution of $256 \\times 256$ pixels.", + "bbox": [ + 75, + 303, + 467, + 425 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the literature, one can find resolutions ranging from $60 \\times 60$ to $256 \\times 256$ pixels, where newer works tend to use larger resolution images. As the resolution increase can directly be observed to improve the results; and the resolutions increased with years; it is difficult to say whether newly proposed methods are better overall, or whether they perform better due to using higher resolution images.", + "bbox": [ + 75, + 425, + 467, + 531 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Input Transform Finally, we examined the input transformation proposed by Lin et al. [18], which involves converting a face image into a tanh-polar representation. This approach has shown large performance improvements in face semantic segmentation. Lin et al. then modified the network for age estimation, reporting impressive results [19]. We explored the potential benefits of applying this transformation for age estimation. However, our findings indicate that the transformation does not improve the results compared to the baseline, as shown in Tab. 4. Therefore, we conclude that the improved age estimation performance observed by Lin et al. [19] does not arise from the use of a different representation, but rather from pre-training on semantic segmentation or their model architecture.", + "bbox": [ + 75, + 549, + 467, + 761 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3. FaRL Backbone", + "text_level": 1, + "bbox": [ + 76, + 771, + 238, + 786 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We observed that adjustments to the decision layer and loss function have minimal impact on the final model performance. Conversely, large performance disparities arise when modifying other components of the prediction pipeline. Notably, the pretraining data appear to be the most influential factor. Based on this insight, we opt against creating a specialized loss function to enhance the age esti", + "bbox": [ + 75, + 794, + 467, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b33461ff630b18e174f8bf28e104b8ea3cf5ec860777f6f15ab3b0da56ddcec5.jpg", + "image_caption": [ + "(a) Eyes & Mouth.", + "Figure 3. Comparison of different facial coverage levels using the average face from the FG-NET dataset." + ], + "image_footnote": [], + "bbox": [ + 501, + 88, + 611, + 172 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ac9a8548a957f52dba0296747203c80b65c3e53b33219a81a6696567eed36afc.jpg", + "image_caption": [ + "(b) Chin & Forehead." + ], + "image_footnote": [], + "bbox": [ + 633, + 89, + 741, + 172 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3148bc6fde0c3c8feb58bd0a464313f9081bc4ba87a394a01e6373042a7d1d53.jpg", + "image_caption": [ + "(c) Head." + ], + "image_footnote": [], + "bbox": [ + 764, + 89, + 872, + 172 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/6c9b796269f3f02f9bb42ce667e5430cc5ca55e0cfa3b0a86aa6eb400d99146a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetAlignment
CropRotationRot. + Trans. + Scale
AgeDB5.935.925.84
AFAD3.123.113.11
CACD20004.014.004.00
CLAP20164.684.574.49
MORPH2.812.782.79
UTKFace4.494.424.44
", + "bbox": [ + 514, + 241, + 874, + 358 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/90da2433b41cafc22734df1e980f17dc01557eadd82d4d55fa319801bca39b6a.jpg", + "table_caption": [ + "Table 1. MAE $\\downarrow$ of ResNet-50 models with different facial alignment. The models were pre-trained on IMDB-WIKI." + ], + "table_footnote": [], + "table_body": "
DatasetFacial Coverage
Eyes & MouthChin & ForeheadHead
AgeDB6.065.845.81
AFAD3.173.113.14
CACD20004.024.003.96
CLAP20165.064.494.49
MORPH2.882.792.81
UTKFace4.634.444.38
", + "bbox": [ + 509, + 411, + 879, + 527 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/31fc96f4ed07a97905584da5117ba15e0c19b6fea78869540bacfa6bd2e75c3d.jpg", + "table_caption": [ + "Table 2. MAE $\\downarrow$ of ResNet-50 models with different facial coverages. The models were pre-trained on IMDB-WIKI." + ], + "table_footnote": [], + "table_body": "
DatasetImage Resolution
64 × 64128 × 128256 × 256
AgeDB8.436.905.81
AFAD3.363.253.14
CACD20005.014.553.96
CLAP201611.345.904.49
MORPH3.333.072.81
UTKFace5.834.814.38
", + "bbox": [ + 529, + 580, + 857, + 699 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 3. MAE $\\downarrow$ of ResNet-50 models with different image resolutions. The models were pre-trained on IMDB-WIKI.", + "bbox": [ + 500, + 709, + 888, + 736 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "mation system. Instead, we leverage the FaRL backbone by Zheng et al. [30], utilizing a ViT-B-16 [9] model. The FaRL model is trained through a combination of (i) contrastive loss on image-text pairs and (ii) prediction of masked image patches. Training takes place on an extensive collection of facial images (50 million) from the image-text pair LAION dataset [26]. We retain the feature representation extracted by FaRL without altering the model's weights. Our decision to use FaRL is driven solely by the extensive amount", + "bbox": [ + 496, + 763, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "1201", + "bbox": [ + 483, + 944, + 513, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/846c927a4cbe24a421f86eed275ccb17b8251fd0f753df468b2a8973b8d2c785.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetTransform
No TransformRoI Tanh-polar [18]
AgeDB5.815.93
AFAD3.143.15
CACD20003.964.07
CLAP20164.494.71
MORPH2.812.80
UTKFace4.384.39
", + "bbox": [ + 106, + 88, + 439, + 205 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c0872ca7b3a63adeed30742d984c892cb18b4e8644593e54383b96d40287b21d.jpg", + "table_caption": [ + "Table 4. MAE $\\downarrow$ of ResNet-50 models with different input transformations. The models were pre-trained on IMDB-WIKI [25]." + ], + "table_footnote": [], + "table_body": "
DatasetBackbone
ResNet-50Eff.Net-B4ViT-B-16VGG-16
AgeDB5.815.769.076.02
AFAD3.143.204.043.22
CACD20003.964.006.223.92
CLAP20164.494.068.554.65
MORPH2.812.874.352.88
UTKFace4.384.236.884.64
", + "bbox": [ + 81, + 261, + 460, + 373 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5. Intra-dataset MAE $\\downarrow$ with different backbone architectures. The models were pre-trained on IMDB-WIKI [25].", + "bbox": [ + 76, + 383, + 467, + 412 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "of pre-training data it incorporates, rather than specific characteristics of the backbone. Different image encoders could be trained in the same manner. However, due to the costs associated with training such models, we have chosen to use the available FaRL ViT-B-16 backbone. We employ a simple multilayer perceptron (MLP) over the FaRL-extracted features, consisting of 2 layers with 512 neurons each, followed by ReLU activation. Cross-entropy serves as the chosen loss function. For each downstream dataset, we pretrain the MLP on IMDB-WIKI or initialize it to random weights. We choose the preferred option based on validation loss on the downstream dataset. As previously, we replace the final layer before fine-tuning on downstream datasets.", + "bbox": [ + 75, + 443, + 467, + 638 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This straightforward modification outperformed all other models on AgeDB, CLAP2016, and UTKFace datasets. It also achieved superior results on AFAD, matched the performance of other models on CACD2000, but demonstrated worse performance on MORPH. Applying the FN test revealed statistically significant improvements of this model over others in both intra-dataset and cross-dataset evaluations, see Tab. 6. We attribute the poor performance of FaRL on MORPH to the fact that the distributions of images in LAION [26] and MORPH [23] are vastly different. As we do not finetune the feature representation of FaRL [30], it is possible that the representation learned on LAION is superior on the other datasets but deficient on MORPH.", + "bbox": [ + 75, + 641, + 467, + 835 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We do not claim this model to be the ultimate solution, but the results achieved with the FaRL backbone along with our public implementation offer a robust and straightforward baseline for a comparison with future methods.", + "bbox": [ + 75, + 839, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6. Discussion and Conclusions", + "text_level": 1, + "bbox": [ + 500, + 89, + 756, + 106 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this paper, we aimed to establish a fair comparison framework for evaluating various approaches for age estimation. We conducted a comprehensive analysis on seven different datasets, namely AgeDB [20], AFAD [21], CACD2000 [4], CLAP2016 [1], FG-NET [15], MORPH [23], and UTK-Face [29], comparing the models based on their Mean Absolute Error (MAE). To determine if any method outperformed the others, we employed the Friedman test and the Nemenyi critical difference test. When pre-training the models on a large dataset, we did not observe any statistically significant improvement by using the specialized loss functions designed for age estimation. With random model initialization, we observed some improvement over the baseline cross-entropy on small datasets. Specifically, for Mean-Variance loss [22], OR-CNN [21], and DLDL [12]. These improvements can be attributed to implicit regularization provided by these methods.", + "bbox": [ + 496, + 114, + 890, + 372 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Previously published results reported continuous performance improvements over time (as depicted in Fig. 1). Our findings challenge these claims. We argue that the reported improvements can be attributed to either the random data splitting strategy or hyperparameter tuning to achieve the best test set performance. Our analysis of the data preparation pipeline revealed that factors such as the extent of facial coverage or input resolution exert a more significant impact on the results than the choice of the age estimation specific loss function. Guided by these findings, we use the FaRL [30] model as a backbone for age estimation and demonstrated its effectiveness. In summary:", + "bbox": [ + 496, + 371, + 890, + 551 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We show that existing evaluation practices in age estimation do not provide a consistent comparison of the state-of-the-art methods. We define a proper evaluation protocol which addresses the issue.", + "- We show that improvements in age estimation results over recent years can not be attributed to the specialized loss functions introduced in [10, 12, 13, 17, 21, 22], as is claimed in the published literature.", + "- Using the insight gained from analyzing different components of the age estimation pipeline, we construct a prediction model with the FaRL [30] backbone and demonstrate its effectiveness.", + "- To facilitate reproducibility and simple future comparisons, we have made our implementation framework and the exact data splits publicly available." + ], + "bbox": [ + 500, + 553, + 890, + 779 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Acknowledgment", + "text_level": 1, + "bbox": [ + 500, + 792, + 651, + 810 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This research was supported by the Grant Agency of the Czech Technical University in Prague, grant No. SGS23/176/OHK3/3T/13 and by the Grant agency of the Ministry of Interior Czech Republic, project FACIS grant. No. VJ02010041", + "bbox": [ + 496, + 816, + 890, + 892 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "1202", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/6272592e706aa02b2a2f126821780c05ea30cdff2ba5ff8dc79f1637651b6959.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodInit.Evaluation Dataset
AgeDBAFADCACD2000CLAP2016FG-NETMORPHUTKFace
IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.
AgeDBCross-Entropy5.817.207.657.8312.6114.705.908.108.736.8310.8612.4110.8216.2718.874.836.746.938.4511.8811.82
Regression6.236.547.608.0512.1314.196.767.568.748.329.9512.3210.5613.8318.005.646.666.949.4210.4212.14
OR-CNN [21]5.786.517.527.4711.8613.806.057.718.556.6410.1811.749.7413.8217.404.736.617.058.2010.7511.41
DLDL [12]5.806.957.467.811.8514.925.997.968.526.5110.8411.489.2315.6316.954.746.346.827.9711.8711.24
DLDL-v2 [13]5.806.877.587.6112.9816.195.918.158.616.4810.8812.509.9115.3619.014.927.537.207.9711.6111.24
SORD [10]5.816.937.587.8112.8015.425.967.908.776.6110.3712.229.7214.7617.854.766.587.148.1211.5311.60
Mean-Var. [22]5.856.697.337.2612.4014.356.007.898.356.7010.5011.9010.5514.3217.434.996.877.338.2510.7711.64
Unimodal [17]5.907.1115.498.3713.1120.876.228.2416.116.7311.1421.3110.1516.1332.774.846.7817.428.2311.8623.09
FaRL + MLP5.64--7.82--7.41--9.32--9.15--4.73--9.92--
AFADCross-Entropy15.7017.3118.053.143.173.329.5411.1811.218.9610.2310.3210.9211.3811.966.806.838.1912.1013.0713.29
Regression13.6715.9117.213.173.163.308.7210.5110.728.339.9110.0211.2011.8912.356.277.347.9911.2312.8312.96
OR-CNN [21]12.0815.6516.723.163.173.288.8711.0510.897.859.739.9210.6311.9412.586.686.857.8110.5012.4312.74
DLDL [12]14.1215.7017.213.143.163.259.4010.7011.068.689.549.9811.3111.6412.077.046.757.8211.5212.4312.82
DLDL-v2 [13]13.9016.3317.783.153.173.289.4610.6811.028.609.7610.3210.8311.8112.646.926.797.9411.2912.6113.18
SORD [10]14.3016.0817.493.143.153.249.4510.7011.098.649.7910.1011.2111.6312.196.876.827.9311.5912.7913.10
Mean-Var. [22]12.5415.0716.683.163.163.268.9810.3310.757.939.339.7810.9612.2412.436.616.767.8810.5712.0012.62
Unimodal [17]13.9915.8920.973.203.249.309.2310.6814.568.649.7914.5111.3111.8318.297.077.3212.5311.2612.3317.47
FaRL + MLP16.41--3.12--10.95--8.57--12.24--6.62--11.64--
CLAP2000Cross-Entropy9.6611.8410.6010.708.5013.083.964.594.898.428.6410.5117.4523.6420.867.2112.2010.3911.1611.3812.61
Regression10.9110.4410.7610.237.2311.664.064.524.838.847.759.9817.5519.5019.608.618.8111.7911.3410.3811.78
OR-CNN [21]10.4311.0211.859.669.4812.174.014.604.748.578.8510.2918.4724.3220.857.5210.0411.0511.1712.3012.27
DLDL [12]9.8410.7911.2810.099.3013.203.964.424.768.398.499.9918.3818.9921.527.279.1611.0111.1911.9412.27
DLDL-v2 [13]9.9012.3111.208.0311.5011.513.964.574.697.678.889.4318.1122.8919.027.2013.469.7310.5212.3211.47
SORD [10]9.7710.9011.0410.359.5511.953.964.424.708.388.519.8918.0520.8421.737.238.9811.5911.1812.0612.22
Mean-Var. [22]10.8111.4210.839.7110.8211.494.074.604.788.889.2010.0820.4822.6820.148.1412.5911.7211.7412.2912.23
Unimodal [17]10.4611.0446.2610.639.8525.744.104.7337.419.198.9230.9619.3719.7515.848.9411.6432.6311.8911.7532.98
FaRL + MLP11.32--9.08--3.96--8.57--19.63--6.56--11.27--
Cross-Entropy7.3510.1512.265.417.035.346.658.119.114.495.968.735.929.2812.024.966.616.905.747.218.58
Regression7.518.5211.746.075.195.956.867.249.454.654.777.894.856.3110.145.095.498.836.025.938.66
OR-CNN [21]6.838.7411.245.835.925.446.737.258.654.134.607.385.096.479.224.925.786.525.435.957.68
DLDL [12]7.209.3311.395.576.905.856.857.649.264.185.107.395.267.449.184.895.926.525.516.377.87
DLDL-v2 [13]7.149.4212.365.475.956.456.697.999.344.234.878.525.227.048.754.856.047.295.536.128.23
SORD [10]7.199.6012.165.477.746.626.638.099.664.275.347.815.597.777.624.926.016.625.486.468.08
Mean-Var. [22]7.089.1612.585.186.305.386.647.379.944.284.877.955.456.6911.144.967.387.495.526.168.65
Unimodal [17]7.019.7720.715.586.105.546.478.2013.084.175.3913.835.136.3915.134.806.0510.025.446.6715.27
FaRL + MLP7.50--4.34--6.57--3.38--4.95--4.47--4.85-
Cross-Entropy9.6611.7312.636.697.7810.368.5310.8310.116.908.9610.649.4511.9615.382.812.963.018.9710.8111.92
Regression10.4812.9912.566.606.6510.669.8211.479.687.839.2710.679.2410.1316.692.832.742.978.4010.97
OR-CNN [21]9.3511.6512.826.787.7811.818.3911.3410.236.848.7311.059.5811.0917.472.832.852.998.8210.3712.06
DLDL [12]9.4112.0012.666.587.7811.768.5811.9210.106.859.2611.159.4411.4316.942.812.922.988.8010.8112.46
DLDL-v2 [13]9.7911.4912.686.608.2212.458.7910.989.816.988.9811.229.5211.6317.572.822.933.008.9710.7012.47
SORD [10]9.4811.8412.736.547.9111.198.7311.1810.136.848.9910.729.3411.0815.902.812.912.998.8310.8511.97
Mean-Var. [22]9.7011.6212.936.687.8110.418.6510.5910.117.038.8010.569.5111.4515.812.832.892.958.9410.5911.95
Unimodal [17]9.9312.3117.446.637.048.188.6810.1112.037.198.9512.389.8012.1717.832.782.908.669.0710.7515.45
FaRL + MLP8.40--4.67--7.45--6.21--9.28--3.04--
Cross-Entropy6.618.889.585.516.426.756.569.108.984.827.347.504.786.627.645.096.617.354.384.755.32
Regression7.017.798.835.966.266.436.777.878.615.245.936.674.415.077.275.415.956.714.724.535.34
OR-CNN [21]6.718.298.755.566.746.526.618.898.374.956.796.704.545.716.555.266.076.764.404.435.15
DLDL [12]6.658.609.005.426.686.196.529.018.844.817.197.464.855.877.285.166.257.034.394.665.30
DLDL-v2 [13]6.798.439.915.427.186.326.529.428.694.827.878.784.836.547.555.146.367.284.364.685.25
SORD [10]6.618.969.115.427.186.326.529.428.694.827.878.784.836.547.555.146.367.284.364.685.25
Mean-Var. [22]6.798.368.535.416.546.326.558.558.325.046.816.325.056.306.905.376.156.394.424.575.05
Unimodal [17]6.688.6622.425.357.6816.646.589.2817.174.867.6018.834.556.2522.985.225.9616.444.474.7821.01
FaRL + MLP7.16-
", + "bbox": [ + 84, + 135, + 890, + 797 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 6. Intra-dataset and cross-dataset Mean Absolute Error (MAE) $\\downarrow$ of ResNet-50 models. Results marked as Initialization: IMDB are of models that are initialized to ImageNet weights, then trained with Cross-Entropy on IMDB-WIKI [25] and then finetuned on the downstream dataset. Imag. signifies initialization to weights pre-trained on ImageNet. Rand. denotes random initialization.", + "bbox": [ + 78, + 808, + 892, + 849 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "1203", + "bbox": [ + 483, + 945, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] E. Agustsson, R. Timofte, S. Escalera, X. Baro, I. Guyon, and R. Rothe. Apparent and real age estimation in still images with deep residual regressors on appa-real database. In 12th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG), 2017. IEEE, 2017. 1, 2, 3, 7", + "[2] A. Berg, M. Oskarsson, and M. O'Connor. Deep ordinal regression with label diversity. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 2740-2747, Los Alamitos, CA, USA, 2021. IEEE Computer Society. 5", + "[3] Wenzhi Cao, Vahid Mirjalili, and Sebastian Raschka. Rank consistent ordinal regression for neural networks with application to age estimation. Pattern Recognition Letters, 140: 325-331, 2020. 2, 5", + "[4] Bor-Chun Chen, Chu-Song Chen, and Winston H. Hsu. Cross-age reference coding for age-invariant face recognition and retrieval. In Computer Vision – ECCV 2014, pages 768–783, Cham, 2014. Springer International Publishing. 1, 2, 3, 7", + "[5] Shixing Chen, Caojin Zhang, Ming Dong, Jialiang Le, and Mike Rao. Using ranking-cnn for age estimation. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 742-751, 2017. 2", + "[6] Janez Demšar. Statistical comparisons of classifiers over multiple data sets. The Journal of Machine learning research, 7:1-30, 2006. 4", + "[7] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. 4", + "[8] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5203-5212, 2020. 4, 5", + "[9] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. 6", + "[10] Raul Díaz and Amit Marathe. Soft labels for ordinal regression. In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4733-4742, 2019. 2, 3, 4, 5, 7, 8", + "[11] Vojtech Franc and Jan Cech. Learning cnns from weakly annotated facial images. Image and Vision Computing, 2018. 3", + "[12] Bin-Bin Gao, Chao Xing, Chen-Wei Xie, Jianxin Wu, and Xin Geng. Deep label distribution learning with label ambiguity. IEEE Transactions on Image Processing, 26(6):2825-2838, 2017. 2, 3, 4, 5, 7, 8" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Bin-Bin Gao, Hong-Yu Zhou, Jianxin Wu, and Xin Geng. Age estimation using expectation of label distribution learning. In Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI-18, pages 712-718. International Joint Conferences on Artificial Intelligence Organization, 2018. 2, 3, 4, 5, 7, 8", + "[14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016. 3", + "[15] A. Lanitis, C.J. Taylor, and T.F. Cootes. Toward automatic simulation of aging effects on face images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 24(4): 442-455, 2002. 3, 7", + "[16] Ling Li and Hsuan-Tien Lin. Ordinal regression by extended binary classification. In Advances in Neural Information Processing Systems, page 865 - 872, 2007. Cited by: 195. 2", + "[17] Qiang Li, Jingjing Wang, Zhaoliang Yao, Yachun Li, Pengju Yang, Jingwei Yan, Chunmao Wang, and Shiliang Pu. Unimodal-concentrated loss: Fully adaptive label distribution learning for ordinal regression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20513-20522, 2022. 2, 3, 4, 5, 7, 8", + "[18] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Roi tanh-polar transformer network for face parsing in the wild. Image and Vision Computing, 112, 2021. 6, 7", + "[19] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Fpage: Leveraging face parsing attention for facial age estimation in the wild. IEEE Transactions on Image Processing, 2022. 2, 5, 6", + "[20] Stylianos Moschoglou, Athanasios Papaioannou, Christos Sagonas, Jiankang Deng, Irene Kotsia, and Stefanos Zafeiriou. Agedb: the first manually collected, in-the-wild age database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshop, 2017. 1, 2, 3, 7", + "[21] Zhenxing Niu, Mo Zhou, Le Wang, Xinbo Gao, and Gang Hua. Ordinal regression with multiple output cnn for age estimation. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4920-4928, 2016. 1, 2, 3, 4, 5, 7, 8", + "[22] Hongyu Pan, Hu Han, Shiguang Shan, and Xilin Chen. Mean-variance loss for deep age estimation from a face. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5285-5294, 2018. 2, 3, 4, 5, 7, 8", + "[23] K. Ricanek and T. Tesafaye. Morph: a longitudinal image database of normal adult age-progression. In 7th International Conference on Automatic Face and Gesture Recognition (FGR06), pages 341-345, 2006. 1, 2, 3, 7", + "[24] Rasmus Rothe, Radu Timofte, and Luc Van Gool. Dex: Deep expectation of apparent age from a single image. In 2015 IEEE International Conference on Computer Vision Workshop (ICCVW), pages 252-257, 2015. 3, 5", + "[25] Rasmus Rothe, Radu Timofte, and Luc Van Gool. Deep expectation of real and apparent age from a single image without facial landmarks. International Journal of Computer Vision, 126(2-4):144-157, 2018. 1, 3, 7, 8" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "1204", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[26] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade W Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa R Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. LAION-5b: An open large-scale dataset for training next generation image-text models. In Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2022. 6, 7", + "[27] Nyeong-Ho Shin, Seon-Ho Lee, and Chang-Su Kim. Moving window regression: A novel approach to ordinal regression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18760-18769, 2022. 2, 5", + "[28] Yunxuan Zhang, Li Liu, Cheng Li, and Chen-Change Loy. Quantifying facial age by posterior of age comparisons. In Proceedings of the British Machine Vision Conference (BMVC), pages 108.1–108.12. BMVA Press, 2017. 2, 5", + "[29] Zhifei Zhang, Yang Song, and Hairong Qi. Age progression/regression by conditional adversarial autoencoder. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR). IEEE, 2017. 1, 3, 7", + "[30] Yinglin Zheng, Hao Yang, Ting Zhang, Jianmin Bao, Dongdong Chen, Yangyu Huang, Lu Yuan, Dong Chen, Ming Zeng, and Fang Wen. General facial representation learning in a visual-linguistic manner. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18697-18709, 2022. 1, 5, 6, 7" + ], + "bbox": [ + 78, + 90, + 468, + 486 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "1205", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/393f0825-ecb8-44e6-bdfa-5dde4b82ecdb_model.json b/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/393f0825-ecb8-44e6-bdfa-5dde4b82ecdb_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c54188d66d08f186c85999f88835c4ee3f8300af --- /dev/null +++ b/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/393f0825-ecb8-44e6-bdfa-5dde4b82ecdb_model.json @@ -0,0 +1,1760 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.131, + 0.845, + 0.177 + ], + "angle": 0, + "content": "A Call to Reflect on Evaluation Practices for Age Estimation: Comparative Analysis of the State-of-the-Art and a Unified Benchmark" + }, + { + "type": "text", + "bbox": [ + 0.162, + 0.205, + 0.462, + 0.275 + ], + "angle": 0, + "content": "Jakub Paplhám \nDepartment of Cybernetics \nFaculty of Electrical Engineering \nCzech Technical University in Prague" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.278, + 0.402, + 0.292 + ], + "angle": 0, + "content": "paplhjak@fel(cvut.cz" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.205, + 0.808, + 0.275 + ], + "angle": 0, + "content": "Vojtěch Franc \nDepartment of Cybernetics \nFaculty of Electrical Engineering \nCzech Technical University in Prague" + }, + { + "type": "text", + "bbox": [ + 0.548, + 0.278, + 0.763, + 0.292 + ], + "angle": 0, + "content": "xfrancv@cmp.felk.cvut.cz" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.327, + 0.314, + 0.344 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.359, + 0.473, + 0.633 + ], + "angle": 0, + "content": "Comparing different age estimation methods poses a challenge due to the unreliability of published results stemming from inconsistencies in the benchmarking process. Previous studies have reported continuous performance improvements over the past decade using specialized methods; however, our findings challenge these claims. This paper identifies two trivial, yet persistent issues with the currently used evaluation protocol and describes how to resolve them. We offer an extensive comparative analysis for state-of-the-art facial age estimation methods. Surprisingly, we find that the performance differences between the methods are negligible compared to the effect of other factors, such as facial alignment, facial coverage, image resolution, model architecture, or the amount of data used for pretraining. We use the gained insights to propose using FaRL as the backbone model and demonstrate its effectiveness on all public datasets. We make the source code and exact data splits public on GitHub and in the supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.663, + 0.21, + 0.679 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.688, + 0.47, + 0.81 + ], + "angle": 0, + "content": "Age estimation has received significant interest in recent years. However, a closer examination of the evaluation process reveals two underlying issues. First, no standardized data splits are defined for most public datasets, and the used splits are rarely made public, making the results irreproducible. Second, methods often modify multiple components of the age estimation system, making it unclear which modification is responsible for the performance gains." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.902 + ], + "angle": 0, + "content": "This paper aims to critically analyze the evaluation practices in age estimation research, highlight the issues, and appeal to the community to follow good evaluation practices to resolve them. We benchmark and fairly compare recent deep-learning methods for age estimation from facial images. We focus on state-of-the-art methods that" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.329, + 0.895, + 0.526 + ], + "angle": 0, + "content": "adapt a generic architecture by changing its last layer or the loss function to suit the age estimation task. Although this may appear restrictive, it is essential to note that most of the methods proposed in the field fall into this category \\((\\approx 70\\%)\\). By comparing methods that modify only a small part of the network, we aim to ensure a fair evaluation, as the remaining setup can be kept identical. Besides the usual intra-class performance, we also evaluate their cross-dataset generalization, which has been neglected in the age prediction literature so far. Surprisingly, we find that the influence of the loss function and the decision layer on the results, usually the primary component that distinguishes different methods, is negligible compared to other factors." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.548, + 0.604, + 0.562 + ], + "angle": 0, + "content": "Contributions" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.564, + 0.892, + 0.639 + ], + "angle": 0, + "content": "- We show that existing evaluation practices in age estimation do not provide consistent results. This leads to obstacles for researchers aiming to advance prior work and for practitioners striving to pinpoint the most effective approach for their application." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.64, + 0.892, + 0.684 + ], + "angle": 0, + "content": "- We define a proper evaluation protocol, offer an extensive comparative analysis for state-of-the-art facial age estimation methods, and publish our code." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.685, + 0.892, + 0.745 + ], + "angle": 0, + "content": "- We show that the performance difference caused by using a different decision layer or training loss is significantly smaller than that caused by other parts of the prediction pipeline." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.745, + 0.892, + 0.806 + ], + "angle": 0, + "content": "- We identify that the amount of data used for pre-training is the most influential factor and use the observation to propose using FaRL [30] as the backbone architecture. We demonstrate its effectiveness on public datasets." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.564, + 0.892, + 0.806 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.821, + 0.868, + 0.837 + ], + "angle": 0, + "content": "2. Issues with Current Evaluation Practices" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.847, + 0.622, + 0.862 + ], + "angle": 0, + "content": "2.1. Data Splits" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Publications focused on age estimation evaluate their methods on several datasets [1, 4, 20, 21, 23, 25, 29]. The most" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "1196" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.08, + 0.088, + 0.468, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.316, + 0.47, + 0.455 + ], + "angle": 0, + "content": "Figure 1. Mean Absolute Error (MAE) \\(\\downarrow\\) of age estimation methods on the MORPH dataset, as reported in the existing literature and measured by us, viewed over time. Random splitting remains the prevalent data splitting strategy. The consistent performance improvements over time are attributed in the literature to specialized loss functions for age estimation. Subject-exclusive (identity-disjoint) data splitting is rarely employed. With unified subject-exclusive data splitting and all factors except the loss function fixed, all evaluated methods yield comparable results, failing to achieve the performance gains promised by the random splitting." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.489, + 0.47, + 0.791 + ], + "angle": 0, + "content": "commonly used of these is the MORPH [23] dataset. However, the evaluation procedures between the publications are not unified. For instance, OR-CNN [21] randomly divides the dataset into two parts: \\(80\\%\\) for training and \\(20\\%\\) for testing. No mention is made of a validation set for model selection. Random splitting (RS) protocol is also used in [3, 5, 12, 13, 19, 28], but the specific data splits differ between studies as they are rarely made public. Since the dataset contains multiple images per person (many captured at the same age), the same individual can be present in both the training and testing sets. This overlap introduces a bias, resulting in overly optimistic evaluation outcomes. The degree of data leakage can vary when using random splitting, making certain data splits more challenging than others. Further, this fundamentally changes the entire setup; rarely will one want to deploy the age estimation system on the people present in the training data. Consequently, comparison of different methods and discerning which method stands out as the most effective based on the published results becomes problematic." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Only some publications [22, 27] recognize this bias introduced by the splitting strategy and address it by implementing subject-exclusive (SE) [22] splitting. This approach ensures that all images of an individual are exclusively in the (i) training, (ii) validation, or (iii) testing part. The terminology here is not fully established. One might encounter either identity-disjoint or person-disjoint instead" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.741, + 0.106 + ], + "angle": 0, + "content": "of subject-exclusive in the literature." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.108, + 0.892, + 0.289 + ], + "angle": 0, + "content": "To assess how prevalent random splitting (RS) on the MORPH dataset truly is, we conducted a survey of all age estimation papers presented at the CVPR and ICCV since 2013. We found 16 papers focused on age estimation, of which nine use RS, two use SE, five use specialized splits, and three do not utilize MORPH. We further surveyed other research conferences and journals, namely: IJCAI, BMVC, ACCV, IEEE TIP, Pattern Recognit. Lett., Pattern Anal. Appl., and find eight influential age estimation papers that use MORPH. Of those, seven use RS, and one uses a specialized split. By specialized splits, we are referring to non-standard strategies such as ethnically balanced partitions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.29, + 0.893, + 0.562 + ], + "angle": 0, + "content": "Altogether, we discover that only \\(\\approx 10\\%\\) of papers that utilize MORPH use the SE protocol. This finding is concerning, as MORPH [23] is the most popular dataset used to compare age estimation approaches. Other datasets do not provide a reliable benchmark either, as standardized data splits are provided only for two public age estimation datasets: (i) the ChaLearn Looking at People Challenge 2016 (CLAP2016) dataset [1], which is relatively small, consisting of fewer than 8000 images, and (ii) the Cross-Age Celebrity Dataset (CACD2000) [4], which has noisy training annotations and is not intended for age estimation. Comparing methods using only these datasets is, therefore, not satisfactory either. Other popular datasets, AgeDB dataset [20] and Asian Face Age Dataset (AFAD) [21], also consist of multiple images per person, requiring SE splitting. However, they lack any data splits accepted by the community and often are used with the RS protocol. As such, they suffer from the same issues as MORPH [23]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.574, + 0.671, + 0.59 + ], + "angle": 0, + "content": "2.2. Pipeline Ablation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.892, + 0.779 + ], + "angle": 0, + "content": "To fairly compare multiple methods, an identical experimental setup should be used for each of them. The current state-of-the-art age estimation approaches adhere to a common framework encompassing: (i) data collection, (ii) data preprocessing, (iii) model design, including the decision layer and the loss function, and (iv) training and evaluation. Most novel approaches introduce distinct changes to the component (iii); namely they design a specialized loss function to exploit the ordinal nature of age. However, they frequently alter multiple components of the framework simultaneously, complicating the attribution of performance improvements to the claimed modifications." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "To compare different loss functions, e.g., [3, 10, 12, 13, 16, 17, 21, 22], the other components of the framework should be kept constant, allowing us to isolate the impact of the selected method on the performance. This is trivial, yet the age estimation community mostly ignores it. Further, many publications hand-wave the other components and do not precisely specify them, making future comparisons meaningless. It's important to question whether the" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1197" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.168 + ], + "angle": 0, + "content": "reported enhancement in a research paper truly stems from the novel loss function it proposes or if it could be attributed to a different modification. We strongly advocate that each component be addressed in isolation and that the experimental setup be precisely described." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.169, + 0.47, + 0.32 + ], + "angle": 0, + "content": "Over the past decade, numerous novel age estimation methods have been introduced, promising continuous performance improvements every year. However, motivated by these findings, we raise the question: how reliable are the published age estimation results? In Sec. 3 we aim to establish a proper evaluation protocol and use it in Sec. 4 to compare the methods [10, 12, 13, 17, 21, 22, 24] reliably. Figure 1 illustrates the contrast between the performance of state-of-the-art methods as reported in their respective studies and the outcomes as measured by our implementation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.338, + 0.269, + 0.354 + ], + "angle": 0, + "content": "3. Evaluation Protocol" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.365, + 0.471, + 0.576 + ], + "angle": 0, + "content": "We identified two trivial yet persistent issues that prevent a reliable comparison of age estimation methods. In this section, we address the initial challenge concerning consistent data partitioning. We provide clear guidelines for the evaluation protocol to ensure replicable and fair assessments. Specifically, the protocol should establish a reproducible approach for defining the data used in both (i) training and (ii) performance evaluation. When specifying the training data, one needs to state whether the training dataset is the sole source of information, or if the model was pretrained with additional data. Additionally, the evaluation can be subdivided based on the data used for model evaluation into intra-dataset, and cross-dataset results. We describe how to evaluate models in these settings below." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.603, + 0.47, + 0.86 + ], + "angle": 0, + "content": "Intra-dataset performance To evaluate intra-dataset performance, a single dataset is used for both training and evaluation of the age estimation system. In this case one should (i) randomly split the dataset into subject-exclusive training, validation, and test set \\(^{1}\\), (ii) train the model on the training set, (iii) measure the model's performance on the validation set, (iv) possibly revert back to step (ii) and train the model again, (v) evaluate the model's performance on the test set, then (vi) publish the results on the test set along with a detailed description of the system components and the data used. If the dataset consists of limited number of examples, it is possible to create multiple splits of the data into training, validation and test set through step (i). Following this, steps (ii) through (v) are iterated \\(n\\) times, where \\(n\\) is the number of generated splits. It is advisable to present the average test performance along with its standard deviation when reporting the results." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.892, + 0.197 + ], + "angle": 0, + "content": "Cross-dataset performance To evaluate cross-dataset performance, the data split in step (i) of the aforementioned evaluation process is generated from a collection of multiple datasets, ensuring that the complete chosen dataset must be employed entirely for evaluation, effectively constituting the designated test set. The remaining steps of the evaluation procedure remain unaltered." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.198, + 0.893, + 0.349 + ], + "angle": 0, + "content": "Regardless of the scenario, whether it is intra-dataset or cross-dataset, each system needs to be evaluated against the test data only once, and the results published. All prior model development and hyperparameter tuning must be based solely on the results on the validation set. Furthermore, it should be indicated whether the training data are the only source of information used for training, or whether the model was pretrained with additional data. In the letter scenario, a detailed description of the additional data and their utilization should also be provided." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.362, + 0.78, + 0.379 + ], + "angle": 0, + "content": "4. Comparative Method Analysis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.387, + 0.892, + 0.477 + ], + "angle": 0, + "content": "This section applies the evaluation protocol to compare state-of-the-art age estimation methods. We maintain a consistent preprocessing procedure, model architecture, and dataset while selectively altering the decision layer and loss function to incorporate modifications proposed in prominent works such as [10, 12, 13, 17, 21, 22, 24]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.487, + 0.637, + 0.503 + ], + "angle": 0, + "content": "4.1. Methodology" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.51, + 0.892, + 0.585 + ], + "angle": 0, + "content": "Datasets We evaluate the methods using 7 datasets: AgeDB [20], AFAD [21], CACD2000 [4], CLAP2016 [1], FG-NET [15], MORPH [23], and UTKFace [29]. We also use the IMDB-WIKI dataset [25] for pre-training with clean labels from Franc and Čech [11]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.603, + 0.892, + 0.755 + ], + "angle": 0, + "content": "Data Splits For the CLAP2016 and CACD2000 datasets, we use the single data split provided by the dataset authors. For the remaining datasets, we create five subject-exclusive (SE) data splits. To generate the split, we partition the dataset such that \\(60\\%\\) of the dataset is used for training, \\(20\\%\\) for model selection (validation), and \\(20\\%\\) for evaluating the model performance (test). Additionally, we ensure that each partition has the same age distribution. Due to its small size, we only use FG-NET for evaluation. We make our data splits and code public at Facial-Age-Benchmark2." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.773, + 0.892, + 0.879 + ], + "angle": 0, + "content": "Model Architecture & Weight Initialization We use ResNet-50 [14] as the backbone architecture. We always start the training of the methods from the same initialization. We run the experiments with (i) random initialization, (ii) weights pre-trained on ImageNet (TorchVision'sImagenet1K_V2), and (iii) weights pre-trained on ImageNet and then further trained on IMDB-WIKI for age estimation" + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.875, + 0.468, + 0.901 + ], + "angle": 0, + "content": "1The generated training, validation and test sets will usually be a partition of the dataset, however, in any case their intersection must be empty." + }, + { + "type": "page_footnote", + "bbox": [ + 0.516, + 0.887, + 0.858, + 0.901 + ], + "angle": 0, + "content": "\\(^{2}\\)https://github.com/paplhjak/Facial-Age-Estimation-Benchmark" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1198" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.198 + ], + "angle": 0, + "content": "with cross-entropy. After the pre-training, the last layer of the model is replaced with a layer specific to the desired method. The models are then fine-tuned on the downstream dataset. It is important to note that for the baseline cross-entropy, we also replace the final layer before fine-tuning. This ensures that the experimental setup remains identical to that of the other methods." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.219, + 0.473, + 0.506 + ], + "angle": 0, + "content": "Training Details We utilize the Adam optimizer with the parameters \\(\\beta_{1} = 0.9\\), \\(\\beta_{2} = 0.999\\). For pre-training on the IMDB-WIKI dataset, we set the learning rate to \\(\\alpha = 10^{-3}\\) and train the model for a total of 100 epochs. For fine-tuning on the remaining datasets we reduce the learning rate to \\(\\alpha = 10^{-4}\\) and train the model for 50 epochs. We use a batch size of 100. The best model is selected based on the MAE metric computed on the validation set. We utilize two data augmentations during training, (i) horizontal mirroring, and (ii) cropping out an \\(80\\%\\) to \\(100\\%\\) portion of the bounding box and resizing it to the model input shape. We do not tune the hyperparameters of the methods [10, 12, 13, 17, 21, 22] on the validation set. We apply them in the original configurations. We argue that if any of the loss functions is a significant improvement over the baseline, we should observe a performance improvement across a broad range of hyperparameters and preprocessing pipelines. We consider our training parameters to be reasonable and to provide a comparison of the methods as if employed out-of-the-box." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.527, + 0.472, + 0.634 + ], + "angle": 0, + "content": "Preprocessing We use the RetinaFace model developed by Deng et al. [8] for face detection and facial landmark detection. We use complete facial coverage, i.e., the images encompass the entire head. We resize the images to a resolution of \\(256 \\times 256\\) pixels and normalize the pixel values of the images. To this end, we subtract the mean and divide by the standard deviation of colors on ImageNet [7]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.655, + 0.473, + 0.821 + ], + "angle": 0, + "content": "Metrics We use the Mean Absolute Error (MAE) calculated on the test data as the performance measure. To determine whether any method is consistently better than others, we employ the Friedman test and the Nemenyi critical difference test (FN test) as described by Demšar [6]. The main statistic used in the test is the average ranking (1 is best) of a method computed on multiple datasets. Differences in the average ranking are then used to decide whether a method is significantly better than others or whether the improvement is due to randomness (the null hypothesis). We use a common significance level (p-value) of \\(\\alpha = 5\\%\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.832, + 0.172, + 0.846 + ], + "angle": 0, + "content": "4.2. Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.472, + 0.903 + ], + "angle": 0, + "content": "Intra-Dataset Performance The intra-dataset results can be seen in Tab. 6, highlighted with a grey background. When starting from random initialization, training with the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.212 + ], + "angle": 0, + "content": "Unimodal loss [17] tends to be unstable. Excluding the Unimodal loss [17] from the evaluation, we apply the FN test. The results indicate that three methods: OR-CNN [21], DLDL [12], and the Mean-Variance loss [22], demonstrate a significant performance improvement over the baseline cross-entropy. With limited data availability, when pretraining is not possible, it is advisable to utilize one of the aforementioned methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.212, + 0.892, + 0.273 + ], + "angle": 0, + "content": "With pre-training, either on ImageNet or IMDB-WIKI, none of the methods is significantly better than the cross-entropy. In other words, we do not observe any systematic improvement by deviating from the standard approach." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.291, + 0.892, + 0.47 + ], + "angle": 0, + "content": "Cross-Dataset Generalization Cross-dataset results, shown in Tab. 6 with white background, were obtained by evaluating the performance of models on datasets that were not used for their training. The cross-dataset performance is unsurprisingly significantly worse than the intra-dataset performance for all of the methods. Using the FN test, we conclude that there is no significant difference in generalization capability between any of the methods [10, 12, 13, 17, 21, 22] and the cross-entropy, regardless of pre-training. In other words, though the loss functions may reduce overfitting, they do not help in the presence of covariate shift." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.471, + 0.892, + 0.759 + ], + "angle": 0, + "content": "None of the methods perform well when evaluated on a different dataset than the one they were trained on. The best cross-dataset results are achieved by training on either UTKFace or CLAP2016. The worst performance across databases is observed when models are trained on AFAD or MORPH. This discrepancy can be attributed to UTKFace and CLAP2016 having a broader range of images, which allows them to generalize effectively to other datasets. Conversely, the limited diversity in MORPH or AFAD datasets, such as AFAD mainly comprising images of people of Asian ethnicity and around \\(80\\%\\) of MORPH being composed of individuals of African American ethnicity, contributes to the poor knowledge transfer. The significant decrease in the performance of models trained on the MORPH dataset when applied to other age estimation datasets underscores the importance of not relying solely on the MORPH dataset as the benchmark for age estimation. To ensure a reliable evaluation of different methods, it is crucial to incorporate results from alternative datasets as well." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.77, + 0.698, + 0.788 + ], + "angle": 0, + "content": "5. Component Analysis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.892, + 0.903 + ], + "angle": 0, + "content": "In this section, we analyze the influence of the backbone architecture and the data preparation pipeline on model performance. We show that changes to these components can have a much more significant impact on the final performance than the choice of a loss function. When altering a component, we maintain all other components at their defaults, presented as the Cross-Entropy approach in Sec. 4." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1199" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "We use the gained insight to propose a strong baseline age estimation model using the FaRL [30] backbone." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.131, + 0.266, + 0.146 + ], + "angle": 0, + "content": "5.1. Model Architecture" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.155, + 0.471, + 0.351 + ], + "angle": 0, + "content": "Multiple different backbone architectures can be found in the age estimation literature. Among these architectures, VGG16 [10, 13, 17, 22, 27, 28] and ResNet-50 [2, 3, 19] stand out as the most common choice. We evaluate the influence of the architecture choice on the performance and extend the comparison to include more recent advancements, EfficientNet-B4 and ViT-B-16. We present our findings in Tab. 5. No backbone emerges as universally best across all datasets. Notably, changes in the backbone have a more substantial impact on performance than changes to the loss function, see Tab. 6. This highlights the importance of a thorough ablation, as changes in the backbone architecture could obscure the impact of the loss function." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.361, + 0.315, + 0.377 + ], + "angle": 0, + "content": "5.2. Data Preparation Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.384, + 0.47, + 0.581 + ], + "angle": 0, + "content": "Age estimation models require only a specific region of an image, specifically the person's face, as input, rather than the entire image. However, the influence of this selection process on the model's performance is not apriori known. Should the model be presented with a tight crop of the face or the entire head? Additionally, facial images can differ in terms of scale and resolution since they originate from various sources and as such need to be resized to a uniform resolution. In this section, we examine the impact of the aforementioned data preparation pipeline on the performance of age estimation models. We demonstrate that changes in the preprocessing have a more substantial impact on performance than changes to the loss function." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Facial Alignment Numerous studies lack an explanation of their facial alignment procedure. Others merely mention the utilization of facial landmarks. To assess whether a standardized alignment is needed for a fair comparison of multiple methods, we adopt three distinct alignment procedures and evaluate their effect on model performance. Firstly, we (i) perform no alignment and employ the bounding box proposed by the facial detection model [8] as the simplest approach. The bounding box sides are parallel to the axes of the image. Secondly, (ii) we utilize the proposed bounding box but rotate it to horizontally align the eyes. Lastly, (iii) we use an alignment procedure, which normalizes the rotation, positioning, and scale. For details, refer to the implementation. A visual representation of these facial alignment methods is depicted in Fig. 2. The performance of models trained using the various alignment procedures is presented in Tab. 1. When working with pre-aligned datasets like AFAD, we observe that procedure (iii) does not yield significant improvements compared to the simpler variants (i) or (ii). Similar results are obtained" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.273 + ], + "angle": 0, + "content": "on datasets collected under standardized conditions, such as the MORPH dataset. However, when dealing with inthe-wild datasets like AgeDB and CLAP2016, we find that alignment (iii) leads to noticeable improvements over the simpler methods. Interestingly, on the UTKFace dataset, which also contains in-the-wild images, approach (ii) of solely rotating the proposed bounding boxes achieves the best outcomes. However, the disparities among the various alignment procedures are not substantial. We therefore argue that any facial alignment technique that effectively normalizes the position, rotation, and scale of the faces would yield comparable results." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.296, + 0.895, + 0.703 + ], + "angle": 0, + "content": "Facial Coverage While facial alignment defines the positioning, orientation, and scale of facial landmarks, the extent to which the face is visible in an image also needs to be specified. We refer to this notion as facial coverage. It measures how much of the face is shown in an image and can range from minimal coverage, where only the eyes and mouth are visible, to complete coverage, where the entire head is visible. Determining the optimal compromise between complete facial coverage and minimal coverage is not immediately clear. Complete facial coverage provides a comprehensive view of the face, allowing age estimation algorithms to consider a broader range of facial cues. On the other hand, partial coverage may help reduce overfitting by eliminating irrelevant facial cues and features with high variance. For a visual demonstration of various facial coverage levels, refer to Fig. 3. The concept of facial coverage has received limited attention in age estimation literature. Consequently, the extent of facial coverage utilized in previous studies can only be inferred from the images presented in those works. For instance, Berg et al. [2] seemingly employ minimal coverage, showing slightly more than just the mouth and eyes. The majority of other works [3, 12, 13, 17, 21, 27] tend to adopt partial coverage, where a significant portion of the face, including the chin and forehead, is visible, but not the entire head and hair. In the works of Pan et al. [22], Rothe et al. [24], and Zhang et al. [28], the entire head is shown." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.895, + 0.903 + ], + "angle": 0, + "content": "The performance of models trained with the different coverage levels is presented in Tab. 2. Generally, complete facial coverage, which includes the entire head in the model input, yields the best results across the majority of datasets. However, specifically for AFAD dataset and the MORPH dataset, partial coverage performs better. It is important to note that the AFAD dataset contains preprocessed images that do not capture the entire head. Consequently, using complete facial coverage with this dataset results in the presence of black bars and a decrease in the effective pixel resolution of the face. It is then to be expected that increased facial coverage yields inferior results. The smallest coverage, limited to the facial region up to the eyes and mouth," + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1200" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.189, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.119, + 0.176, + 0.167, + 0.188 + ], + "angle": 0, + "content": "(a) Crop." + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.089, + 0.32, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.24, + 0.176, + 0.308, + 0.187 + ], + "angle": 0, + "content": "(b) Rotation." + }, + { + "type": "image", + "bbox": [ + 0.343, + 0.089, + 0.451, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.347, + 0.176, + 0.465, + 0.187 + ], + "angle": 0, + "content": "(c) Rot., Trans., Scale." + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.2, + 0.468, + 0.228 + ], + "angle": 0, + "content": "Figure 2. Comparison of different alignment methods using the average face from the FG-NET dataset." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.255, + 0.468, + 0.286 + ], + "angle": 0, + "content": "consistently performs the worst. With sufficient pixel resolution, the full facial coverage performs the best." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.304, + 0.468, + 0.426 + ], + "angle": 0, + "content": "Input Resolution To investigate the influence of input resolution on age estimation, we performed experiments using multiple resolutions on all datasets: specifically, \\(256 \\times 256\\), \\(128 \\times 128\\), and \\(64 \\times 64\\) pixels. The results are presented in Tab. 3. Our findings indicate that an increase in image resolution consistently results in improved model performance across all datasets. Hence, the best performance was achieved with a resolution of \\(256 \\times 256\\) pixels." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.426, + 0.468, + 0.532 + ], + "angle": 0, + "content": "In the literature, one can find resolutions ranging from \\(60 \\times 60\\) to \\(256 \\times 256\\) pixels, where newer works tend to use larger resolution images. As the resolution increase can directly be observed to improve the results; and the resolutions increased with years; it is difficult to say whether newly proposed methods are better overall, or whether they perform better due to using higher resolution images." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.55, + 0.468, + 0.762 + ], + "angle": 0, + "content": "Input Transform Finally, we examined the input transformation proposed by Lin et al. [18], which involves converting a face image into a tanh-polar representation. This approach has shown large performance improvements in face semantic segmentation. Lin et al. then modified the network for age estimation, reporting impressive results [19]. We explored the potential benefits of applying this transformation for age estimation. However, our findings indicate that the transformation does not improve the results compared to the baseline, as shown in Tab. 4. Therefore, we conclude that the improved age estimation performance observed by Lin et al. [19] does not arise from the use of a different representation, but rather from pre-training on semantic segmentation or their model architecture." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.772, + 0.239, + 0.787 + ], + "angle": 0, + "content": "5.3. FaRL Backbone" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.468, + 0.901 + ], + "angle": 0, + "content": "We observed that adjustments to the decision layer and loss function have minimal impact on the final model performance. Conversely, large performance disparities arise when modifying other components of the prediction pipeline. Notably, the pretraining data appear to be the most influential factor. Based on this insight, we opt against creating a specialized loss function to enhance the age esti" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.089, + 0.612, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.515, + 0.176, + 0.615, + 0.187 + ], + "angle": 0, + "content": "(a) Eyes & Mouth." + }, + { + "type": "image", + "bbox": [ + 0.635, + 0.09, + 0.743, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.64, + 0.175, + 0.754, + 0.187 + ], + "angle": 0, + "content": "(b) Chin & Forehead." + }, + { + "type": "image", + "bbox": [ + 0.766, + 0.09, + 0.874, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.803, + 0.176, + 0.853, + 0.187 + ], + "angle": 0, + "content": "(c) Head." + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.2, + 0.892, + 0.228 + ], + "angle": 0, + "content": "Figure 3. Comparison of different facial coverage levels using the average face from the FG-NET dataset." + }, + { + "type": "table", + "bbox": [ + 0.516, + 0.242, + 0.875, + 0.359 + ], + "angle": 0, + "content": "
DatasetAlignment
CropRotationRot. + Trans. + Scale
AgeDB5.935.925.84
AFAD3.123.113.11
CACD20004.014.004.00
CLAP20164.684.574.49
MORPH2.812.782.79
UTKFace4.494.424.44
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.37, + 0.891, + 0.398 + ], + "angle": 0, + "content": "Table 1. MAE \\( \\downarrow \\) of ResNet-50 models with different facial alignment. The models were pre-trained on IMDB-WIKI." + }, + { + "type": "table", + "bbox": [ + 0.51, + 0.412, + 0.88, + 0.529 + ], + "angle": 0, + "content": "
DatasetFacial Coverage
Eyes & MouthChin & ForeheadHead
AgeDB6.065.845.81
AFAD3.173.113.14
CACD20004.024.003.96
CLAP20165.064.494.49
MORPH2.882.792.81
UTKFace4.634.444.38
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.54, + 0.89, + 0.568 + ], + "angle": 0, + "content": "Table 2. MAE \\( \\downarrow \\) of ResNet-50 models with different facial coverages. The models were pre-trained on IMDB-WIKI." + }, + { + "type": "table", + "bbox": [ + 0.531, + 0.582, + 0.858, + 0.7 + ], + "angle": 0, + "content": "
DatasetImage Resolution
64 × 64128 × 128256 × 256
AgeDB8.436.905.81
AFAD3.363.253.14
CACD20005.014.553.96
CLAP201611.345.904.49
MORPH3.333.072.81
UTKFace5.834.814.38
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.71, + 0.89, + 0.737 + ], + "angle": 0, + "content": "Table 3. MAE \\( \\downarrow \\) of ResNet-50 models with different image resolutions. The models were pre-trained on IMDB-WIKI." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.901 + ], + "angle": 0, + "content": "mation system. Instead, we leverage the FaRL backbone by Zheng et al. [30], utilizing a ViT-B-16 [9] model. The FaRL model is trained through a combination of (i) contrastive loss on image-text pairs and (ii) prediction of masked image patches. Training takes place on an extensive collection of facial images (50 million) from the image-text pair LAION dataset [26]. We retain the feature representation extracted by FaRL without altering the model's weights. Our decision to use FaRL is driven solely by the extensive amount" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "1201" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.107, + 0.089, + 0.44, + 0.206 + ], + "angle": 0, + "content": "
DatasetTransform
No TransformRoI Tanh-polar [18]
AgeDB5.815.93
AFAD3.143.15
CACD20003.964.07
CLAP20164.494.71
MORPH2.812.80
UTKFace4.384.39
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.216, + 0.47, + 0.245 + ], + "angle": 0, + "content": "Table 4. MAE \\( \\downarrow \\) of ResNet-50 models with different input transformations. The models were pre-trained on IMDB-WIKI [25]." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.262, + 0.462, + 0.374 + ], + "angle": 0, + "content": "
DatasetBackbone
ResNet-50Eff.Net-B4ViT-B-16VGG-16
AgeDB5.815.769.076.02
AFAD3.143.204.043.22
CACD20003.964.006.223.92
CLAP20164.494.068.554.65
MORPH2.812.874.352.88
UTKFace4.384.236.884.64
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.384, + 0.468, + 0.413 + ], + "angle": 0, + "content": "Table 5. Intra-dataset MAE \\( \\downarrow \\) with different backbone architectures. The models were pre-trained on IMDB-WIKI [25]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.444, + 0.468, + 0.639 + ], + "angle": 0, + "content": "of pre-training data it incorporates, rather than specific characteristics of the backbone. Different image encoders could be trained in the same manner. However, due to the costs associated with training such models, we have chosen to use the available FaRL ViT-B-16 backbone. We employ a simple multilayer perceptron (MLP) over the FaRL-extracted features, consisting of 2 layers with 512 neurons each, followed by ReLU activation. Cross-entropy serves as the chosen loss function. For each downstream dataset, we pretrain the MLP on IMDB-WIKI or initialize it to random weights. We choose the preferred option based on validation loss on the downstream dataset. As previously, we replace the final layer before fine-tuning on downstream datasets." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.642, + 0.468, + 0.837 + ], + "angle": 0, + "content": "This straightforward modification outperformed all other models on AgeDB, CLAP2016, and UTKFace datasets. It also achieved superior results on AFAD, matched the performance of other models on CACD2000, but demonstrated worse performance on MORPH. Applying the FN test revealed statistically significant improvements of this model over others in both intra-dataset and cross-dataset evaluations, see Tab. 6. We attribute the poor performance of FaRL on MORPH to the fact that the distributions of images in LAION [26] and MORPH [23] are vastly different. As we do not finetune the feature representation of FaRL [30], it is possible that the representation learned on LAION is superior on the other datasets but deficient on MORPH." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.468, + 0.901 + ], + "angle": 0, + "content": "We do not claim this model to be the ultimate solution, but the results achieved with the FaRL backbone along with our public implementation offer a robust and straightforward baseline for a comparison with future methods." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.09, + 0.757, + 0.107 + ], + "angle": 0, + "content": "6. Discussion and Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.115, + 0.892, + 0.373 + ], + "angle": 0, + "content": "In this paper, we aimed to establish a fair comparison framework for evaluating various approaches for age estimation. We conducted a comprehensive analysis on seven different datasets, namely AgeDB [20], AFAD [21], CACD2000 [4], CLAP2016 [1], FG-NET [15], MORPH [23], and UTK-Face [29], comparing the models based on their Mean Absolute Error (MAE). To determine if any method outperformed the others, we employed the Friedman test and the Nemenyi critical difference test. When pre-training the models on a large dataset, we did not observe any statistically significant improvement by using the specialized loss functions designed for age estimation. With random model initialization, we observed some improvement over the baseline cross-entropy on small datasets. Specifically, for Mean-Variance loss [22], OR-CNN [21], and DLDL [12]. These improvements can be attributed to implicit regularization provided by these methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.372, + 0.892, + 0.553 + ], + "angle": 0, + "content": "Previously published results reported continuous performance improvements over time (as depicted in Fig. 1). Our findings challenge these claims. We argue that the reported improvements can be attributed to either the random data splitting strategy or hyperparameter tuning to achieve the best test set performance. Our analysis of the data preparation pipeline revealed that factors such as the extent of facial coverage or input resolution exert a more significant impact on the results than the choice of the age estimation specific loss function. Guided by these findings, we use the FaRL [30] model as a backbone for age estimation and demonstrated its effectiveness. In summary:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.554, + 0.891, + 0.613 + ], + "angle": 0, + "content": "- We show that existing evaluation practices in age estimation do not provide a consistent comparison of the state-of-the-art methods. We define a proper evaluation protocol which addresses the issue." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.614, + 0.892, + 0.674 + ], + "angle": 0, + "content": "- We show that improvements in age estimation results over recent years can not be attributed to the specialized loss functions introduced in [10, 12, 13, 17, 21, 22], as is claimed in the published literature." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.674, + 0.891, + 0.734 + ], + "angle": 0, + "content": "- Using the insight gained from analyzing different components of the age estimation pipeline, we construct a prediction model with the FaRL [30] backbone and demonstrate its effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.735, + 0.891, + 0.78 + ], + "angle": 0, + "content": "- To facilitate reproducibility and simple future comparisons, we have made our implementation framework and the exact data splits publicly available." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.554, + 0.892, + 0.78 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.794, + 0.652, + 0.811 + ], + "angle": 0, + "content": "Acknowledgment" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.818, + 0.892, + 0.893 + ], + "angle": 0, + "content": "This research was supported by the Grant Agency of the Czech Technical University in Prague, grant No. SGS23/176/OHK3/3T/13 and by the Grant agency of the Ministry of Interior Czech Republic, project FACIS grant. No. VJ02010041" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1202" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.085, + 0.136, + 0.891, + 0.798 + ], + "angle": 0, + "content": "
MethodInit.Evaluation Dataset
AgeDBAFADCACD2000CLAP2016FG-NETMORPHUTKFace
IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.
AgeDBCross-Entropy5.817.207.657.8312.6114.705.908.108.736.8310.8612.4110.8216.2718.874.836.746.938.4511.8811.82
Regression6.236.547.608.0512.1314.196.767.568.748.329.9512.3210.5613.8318.005.646.666.949.4210.4212.14
OR-CNN [21]5.786.517.527.4711.8613.806.057.718.556.6410.1811.749.7413.8217.404.736.617.058.2010.7511.41
DLDL [12]5.806.957.467.811.8514.925.997.968.526.5110.8411.489.2315.6316.954.746.346.827.9711.8711.24
DLDL-v2 [13]5.806.877.587.6112.9816.195.918.158.616.4810.8812.509.9115.3619.014.927.537.207.9711.6111.24
SORD [10]5.816.937.587.8112.8015.425.967.908.776.6110.3712.229.7214.7617.854.766.587.148.1211.5311.60
Mean-Var. [22]5.856.697.337.2612.4014.356.007.898.356.7010.5011.9010.5514.3217.434.996.877.338.2510.7711.64
Unimodal [17]5.907.1115.498.3713.1120.876.228.2416.116.7311.1421.3110.1516.1332.774.846.7817.428.2311.8623.09
FaRL + MLP5.64--7.82--7.41--9.32--9.15--4.73--9.92--
AFADCross-Entropy15.7017.3118.053.143.173.329.5411.1811.218.9610.2310.3210.9211.3811.966.806.838.1912.1013.0713.29
Regression13.6715.9117.213.173.163.308.7210.5110.728.339.9110.0211.2011.8912.356.277.347.9911.2312.8312.96
OR-CNN [21]12.0815.6516.723.163.173.288.8711.0510.897.859.739.9210.6311.9412.586.686.857.8110.5012.4312.74
DLDL [12]14.1215.7017.213.143.163.259.4010.7011.068.689.549.9811.3111.6412.077.046.757.8211.5212.4312.82
DLDL-v2 [13]13.9016.3317.783.153.173.289.4610.6811.028.609.7610.3210.8311.8112.646.926.797.9411.2912.6113.18
SORD [10]14.3016.0817.493.143.153.249.4510.7011.098.649.7910.1011.2111.6312.196.876.827.9311.5912.7913.10
Mean-Var. [22]12.5415.0716.683.163.163.268.9810.3310.757.939.339.7810.9612.2412.436.616.767.8810.5712.0012.62
Unimodal [17]13.9915.8920.973.203.249.309.2310.6814.568.649.7914.5111.3111.8318.297.077.3212.5311.2612.3317.47
FaRL + MLP16.41--3.12--10.95--8.57--12.24--6.62--11.64--
CLAP2000Cross-Entropy9.6611.8410.6010.708.5013.083.964.594.898.428.6410.5117.4523.6420.867.2112.2010.3911.1611.3812.61
Regression10.9110.4410.7610.237.2311.664.064.524.838.847.759.9817.5519.5019.608.618.8111.7911.3410.3811.78
OR-CNN [21]10.4311.0211.859.669.4812.174.014.604.748.578.8510.2918.4724.3220.857.5210.0411.0511.1712.3012.27
DLDL [12]9.8410.7911.2810.099.3013.203.964.424.768.398.499.9918.3818.9921.527.279.1611.0111.1911.9412.27
DLDL-v2 [13]9.9012.3111.208.0311.5011.513.964.574.697.678.889.4318.1122.8919.027.2013.469.7310.5212.3211.47
SORD [10]9.7710.9011.0410.359.5511.953.964.424.708.388.519.8918.0520.8421.737.238.9811.5911.1812.0612.22
Mean-Var. [22]10.8111.4210.839.7110.8211.494.074.604.788.889.2010.0820.4822.6820.148.1412.5911.7211.7412.2912.23
Unimodal [17]10.4611.0446.2610.639.8525.744.104.7337.419.198.9230.9619.3719.7515.848.9411.6432.6311.8911.7532.98
FaRL + MLP11.32--9.08--3.96--8.57--19.63--6.56--11.27--
Cross-Entropy7.3510.1512.265.417.035.346.658.119.114.495.968.735.929.2812.024.966.616.905.747.218.58
Regression7.518.5211.746.075.195.956.867.249.454.654.777.894.856.3110.145.095.498.836.025.938.66
OR-CNN [21]6.838.7411.245.835.925.446.737.258.654.134.607.385.096.479.224.925.786.525.435.957.68
DLDL [12]7.209.3311.395.576.905.856.857.649.264.185.107.395.267.449.184.895.926.525.516.377.87
DLDL-v2 [13]7.149.4212.365.475.956.456.697.999.344.234.878.525.227.048.754.856.047.295.536.128.23
SORD [10]7.199.6012.165.477.746.626.638.099.664.275.347.815.597.777.624.926.016.625.486.468.08
Mean-Var. [22]7.089.1612.585.186.305.386.647.379.944.284.877.955.456.6911.144.967.387.495.526.168.65
Unimodal [17]7.019.7720.715.586.105.546.478.2013.084.175.3913.835.136.3915.134.806.0510.025.446.6715.27
FaRL + MLP7.50--4.34--6.57--3.38--4.95--4.47--4.85-
Cross-Entropy9.6611.7312.636.697.7810.368.5310.8310.116.908.9610.649.4511.9615.382.812.963.018.9710.8111.92
Regression10.4812.9912.566.606.6510.669.8211.479.687.839.2710.679.2410.1316.692.832.742.978.4010.97
OR-CNN [21]9.3511.6512.826.787.7811.818.3911.3410.236.848.7311.059.5811.0917.472.832.852.998.8210.3712.06
DLDL [12]9.4112.0012.666.587.7811.768.5811.9210.106.859.2611.159.4411.4316.942.812.922.988.8010.8112.46
DLDL-v2 [13]9.7911.4912.686.608.2212.458.7910.989.816.988.9811.229.5211.6317.572.822.933.008.9710.7012.47
SORD [10]9.4811.8412.736.547.9111.198.7311.1810.136.848.9910.729.3411.0815.902.812.912.998.8310.8511.97
Mean-Var. [22]9.7011.6212.936.687.8110.418.6510.5910.117.038.8010.569.5111.4515.812.832.892.958.9410.5911.95
Unimodal [17]9.9312.3117.446.637.048.188.6810.1112.037.198.9512.389.8012.1717.832.782.908.669.0710.7515.45
FaRL + MLP8.40--4.67--7.45--6.21--9.28--3.04--
Cross-Entropy6.618.889.585.516.426.756.569.108.984.827.347.504.786.627.645.096.617.354.384.755.32
Regression7.017.798.835.966.266.436.777.878.615.245.936.674.415.077.275.415.956.714.724.535.34
OR-CNN [21]6.718.298.755.566.746.526.618.898.374.956.796.704.545.716.555.266.076.764.404.435.15
DLDL [12]6.658.609.005.426.686.196.529.018.844.817.197.464.855.877.285.166.257.034.394.665.30
DLDL-v2 [13]6.798.439.915.427.186.326.529.428.694.827.878.784.836.547.555.146.367.284.364.685.25
SORD [10]6.618.969.115.427.186.326.529.428.694.827.878.784.836.547.555.146.367.284.364.685.25
Mean-Var. [22]6.798.368.535.416.546.326.558.558.325.046.816.325.056.306.905.376.156.394.424.575.05
Unimodal [17]6.688.6622.425.357.6816.646.589.2817.174.867.6018.834.556.2522.985.225.9616.444.474.7821.01
FaRL + MLP7.16-
" + }, + { + "type": "table_caption", + "bbox": [ + 0.079, + 0.809, + 0.893, + 0.851 + ], + "angle": 0, + "content": "Table 6. Intra-dataset and cross-dataset Mean Absolute Error (MAE) \\( \\downarrow \\) of ResNet-50 models. Results marked as Initialization: IMDB are of models that are initialized to ImageNet weights, then trained with Cross-Entropy on IMDB-WIKI [25] and then finetuned on the downstream dataset. Imag. signifies initialization to weights pre-trained on ImageNet. Rand. denotes random initialization." + }, + { + "type": "page_number", + "bbox": [ + 0.485, + 0.946, + 0.515, + 0.957 + ], + "angle": 0, + "content": "1203" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.198 + ], + "angle": 0, + "content": "[1] E. Agustsson, R. Timofte, S. Escalera, X. Baro, I. Guyon, and R. Rothe. Apparent and real age estimation in still images with deep residual regressors on appa-real database. In 12th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG), 2017. IEEE, 2017. 1, 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.201, + 0.472, + 0.27 + ], + "angle": 0, + "content": "[2] A. Berg, M. Oskarsson, and M. O'Connor. Deep ordinal regression with label diversity. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 2740-2747, Los Alamitos, CA, USA, 2021. IEEE Computer Society. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.272, + 0.47, + 0.327 + ], + "angle": 0, + "content": "[3] Wenzhi Cao, Vahid Mirjalili, and Sebastian Raschka. Rank consistent ordinal regression for neural networks with application to age estimation. Pattern Recognition Letters, 140: 325-331, 2020. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.329, + 0.47, + 0.398 + ], + "angle": 0, + "content": "[4] Bor-Chun Chen, Chu-Song Chen, and Winston H. Hsu. Cross-age reference coding for age-invariant face recognition and retrieval. In Computer Vision – ECCV 2014, pages 768–783, Cham, 2014. Springer International Publishing. 1, 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.401, + 0.47, + 0.457 + ], + "angle": 0, + "content": "[5] Shixing Chen, Caojin Zhang, Ming Dong, Jialiang Le, and Mike Rao. Using ranking-cnn for age estimation. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 742-751, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.459, + 0.47, + 0.5 + ], + "angle": 0, + "content": "[6] Janez Demšar. Statistical comparisons of classifiers over multiple data sets. The Journal of Machine learning research, 7:1-30, 2006. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.502, + 0.47, + 0.558 + ], + "angle": 0, + "content": "[7] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.56, + 0.47, + 0.63 + ], + "angle": 0, + "content": "[8] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5203-5212, 2020. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.631, + 0.47, + 0.742 + ], + "angle": 0, + "content": "[9] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.744, + 0.469, + 0.799 + ], + "angle": 0, + "content": "[10] Raul Díaz and Amit Marathe. Soft labels for ordinal regression. In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4733-4742, 2019. 2, 3, 4, 5, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.802, + 0.469, + 0.841 + ], + "angle": 0, + "content": "[11] Vojtech Franc and Jan Cech. Learning cnns from weakly annotated facial images. Image and Vision Computing, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.47, + 0.9 + ], + "angle": 0, + "content": "[12] Bin-Bin Gao, Chao Xing, Chen-Wei Xie, Jianxin Wu, and Xin Geng. Deep label distribution learning with label ambiguity. IEEE Transactions on Image Processing, 26(6):2825-2838, 2017. 2, 3, 4, 5, 7, 8" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.176 + ], + "angle": 0, + "content": "[13] Bin-Bin Gao, Hong-Yu Zhou, Jianxin Wu, and Xin Geng. Age estimation using expectation of label distribution learning. In Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI-18, pages 712-718. International Joint Conferences on Artificial Intelligence Organization, 2018. 2, 3, 4, 5, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.177, + 0.892, + 0.232 + ], + "angle": 0, + "content": "[14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.233, + 0.892, + 0.287 + ], + "angle": 0, + "content": "[15] A. Lanitis, C.J. Taylor, and T.F. Cootes. Toward automatic simulation of aging effects on face images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 24(4): 442-455, 2002. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.288, + 0.892, + 0.33 + ], + "angle": 0, + "content": "[16] Ling Li and Hsuan-Tien Lin. Ordinal regression by extended binary classification. In Advances in Neural Information Processing Systems, page 865 - 872, 2007. Cited by: 195. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.33, + 0.892, + 0.425 + ], + "angle": 0, + "content": "[17] Qiang Li, Jingjing Wang, Zhaoliang Yao, Yachun Li, Pengju Yang, Jingwei Yan, Chunmao Wang, and Shiliang Pu. Unimodal-concentrated loss: Fully adaptive label distribution learning for ordinal regression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20513-20522, 2022. 2, 3, 4, 5, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.427, + 0.892, + 0.469 + ], + "angle": 0, + "content": "[18] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Roi tanh-polar transformer network for face parsing in the wild. Image and Vision Computing, 112, 2021. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.47, + 0.892, + 0.523 + ], + "angle": 0, + "content": "[19] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Fpage: Leveraging face parsing attention for facial age estimation in the wild. IEEE Transactions on Image Processing, 2022. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.525, + 0.892, + 0.607 + ], + "angle": 0, + "content": "[20] Stylianos Moschoglou, Athanasios Papaioannou, Christos Sagonas, Jiankang Deng, Irene Kotsia, and Stefanos Zafeiriou. Agedb: the first manually collected, in-the-wild age database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshop, 2017. 1, 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.609, + 0.892, + 0.677 + ], + "angle": 0, + "content": "[21] Zhenxing Niu, Mo Zhou, Le Wang, Xinbo Gao, and Gang Hua. Ordinal regression with multiple output cnn for age estimation. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4920-4928, 2016. 1, 2, 3, 4, 5, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.679, + 0.892, + 0.734 + ], + "angle": 0, + "content": "[22] Hongyu Pan, Hu Han, Shiguang Shan, and Xilin Chen. Mean-variance loss for deep age estimation from a face. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5285-5294, 2018. 2, 3, 4, 5, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.735, + 0.892, + 0.789 + ], + "angle": 0, + "content": "[23] K. Ricanek and T. Tesafaye. Morph: a longitudinal image database of normal adult age-progression. In 7th International Conference on Automatic Face and Gesture Recognition (FGR06), pages 341-345, 2006. 1, 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.79, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[24] Rasmus Rothe, Radu Timofte, and Luc Van Gool. Dex: Deep expectation of apparent age from a single image. In 2015 IEEE International Conference on Computer Vision Workshop (ICCVW), pages 252-257, 2015. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[25] Rasmus Rothe, Radu Timofte, and Luc Van Gool. Deep expectation of real and apparent age from a single image without facial landmarks. International Journal of Computer Vision, 126(2-4):144-157, 2018. 1, 3, 7, 8" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1204" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.217 + ], + "angle": 0, + "content": "[26] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade W Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa R Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. LAION-5b: An open large-scale dataset for training next generation image-text models. In Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2022. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.218, + 0.47, + 0.288 + ], + "angle": 0, + "content": "[27] Nyeong-Ho Shin, Seon-Ho Lee, and Chang-Su Kim. Moving window regression: A novel approach to ordinal regression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18760-18769, 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.289, + 0.47, + 0.346 + ], + "angle": 0, + "content": "[28] Yunxuan Zhang, Li Liu, Cheng Li, and Chen-Change Loy. Quantifying facial age by posterior of age comparisons. In Proceedings of the British Machine Vision Conference (BMVC), pages 108.1–108.12. BMVA Press, 2017. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.346, + 0.47, + 0.401 + ], + "angle": 0, + "content": "[29] Zhifei Zhang, Yang Song, and Hairong Qi. Age progression/regression by conditional adversarial autoencoder. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR). IEEE, 2017. 1, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.402, + 0.47, + 0.487 + ], + "angle": 0, + "content": "[30] Yinglin Zheng, Hao Yang, Ting Zhang, Jianmin Bao, Dongdong Chen, Yangyu Huang, Lu Yuan, Dong Chen, Ming Zeng, and Fang Wen. General facial representation learning in a visual-linguistic manner. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18697-18709, 2022. 1, 5, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.487 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "1205" + } + ] +] \ No newline at end of file diff --git a/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/393f0825-ecb8-44e6-bdfa-5dde4b82ecdb_origin.pdf b/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/393f0825-ecb8-44e6-bdfa-5dde4b82ecdb_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ad20fd1e373a4967ec7c62fed954937e4ee2ac28 --- /dev/null +++ b/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/393f0825-ecb8-44e6-bdfa-5dde4b82ecdb_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:563d99fa1a8a8efae4136a0896d2054d2c8cfde08ca53ee6f4cfa07f07ea7765 +size 541474 diff --git a/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/full.md b/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/full.md new file mode 100644 index 0000000000000000000000000000000000000000..9849e682f2f35f79b91916d15e906877c7e8b16a --- /dev/null +++ b/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/full.md @@ -0,0 +1,241 @@ +# A Call to Reflect on Evaluation Practices for Age Estimation: Comparative Analysis of the State-of-the-Art and a Unified Benchmark + +Jakub Paplhám +Department of Cybernetics +Faculty of Electrical Engineering +Czech Technical University in Prague + +paplhjak@fel(cvut.cz + +Vojtěch Franc +Department of Cybernetics +Faculty of Electrical Engineering +Czech Technical University in Prague + +xfrancv@cmp.felk.cvut.cz + +# Abstract + +Comparing different age estimation methods poses a challenge due to the unreliability of published results stemming from inconsistencies in the benchmarking process. Previous studies have reported continuous performance improvements over the past decade using specialized methods; however, our findings challenge these claims. This paper identifies two trivial, yet persistent issues with the currently used evaluation protocol and describes how to resolve them. We offer an extensive comparative analysis for state-of-the-art facial age estimation methods. Surprisingly, we find that the performance differences between the methods are negligible compared to the effect of other factors, such as facial alignment, facial coverage, image resolution, model architecture, or the amount of data used for pretraining. We use the gained insights to propose using FaRL as the backbone model and demonstrate its effectiveness on all public datasets. We make the source code and exact data splits public on GitHub and in the supplementary material. + +# 1. Introduction + +Age estimation has received significant interest in recent years. However, a closer examination of the evaluation process reveals two underlying issues. First, no standardized data splits are defined for most public datasets, and the used splits are rarely made public, making the results irreproducible. Second, methods often modify multiple components of the age estimation system, making it unclear which modification is responsible for the performance gains. + +This paper aims to critically analyze the evaluation practices in age estimation research, highlight the issues, and appeal to the community to follow good evaluation practices to resolve them. We benchmark and fairly compare recent deep-learning methods for age estimation from facial images. We focus on state-of-the-art methods that + +adapt a generic architecture by changing its last layer or the loss function to suit the age estimation task. Although this may appear restrictive, it is essential to note that most of the methods proposed in the field fall into this category $(\approx 70\%)$ . By comparing methods that modify only a small part of the network, we aim to ensure a fair evaluation, as the remaining setup can be kept identical. Besides the usual intra-class performance, we also evaluate their cross-dataset generalization, which has been neglected in the age prediction literature so far. Surprisingly, we find that the influence of the loss function and the decision layer on the results, usually the primary component that distinguishes different methods, is negligible compared to other factors. + +# Contributions + +- We show that existing evaluation practices in age estimation do not provide consistent results. This leads to obstacles for researchers aiming to advance prior work and for practitioners striving to pinpoint the most effective approach for their application. +- We define a proper evaluation protocol, offer an extensive comparative analysis for state-of-the-art facial age estimation methods, and publish our code. +- We show that the performance difference caused by using a different decision layer or training loss is significantly smaller than that caused by other parts of the prediction pipeline. +- We identify that the amount of data used for pre-training is the most influential factor and use the observation to propose using FaRL [30] as the backbone architecture. We demonstrate its effectiveness on public datasets. + +# 2. Issues with Current Evaluation Practices + +# 2.1. Data Splits + +Publications focused on age estimation evaluate their methods on several datasets [1, 4, 20, 21, 23, 25, 29]. The most + +![](images/5822d4cf3b9407a2245cbe0fa17e78ab05bb18c23164a7649a052192ee02992d.jpg) +Figure 1. Mean Absolute Error (MAE) $\downarrow$ of age estimation methods on the MORPH dataset, as reported in the existing literature and measured by us, viewed over time. Random splitting remains the prevalent data splitting strategy. The consistent performance improvements over time are attributed in the literature to specialized loss functions for age estimation. Subject-exclusive (identity-disjoint) data splitting is rarely employed. With unified subject-exclusive data splitting and all factors except the loss function fixed, all evaluated methods yield comparable results, failing to achieve the performance gains promised by the random splitting. + +commonly used of these is the MORPH [23] dataset. However, the evaluation procedures between the publications are not unified. For instance, OR-CNN [21] randomly divides the dataset into two parts: $80\%$ for training and $20\%$ for testing. No mention is made of a validation set for model selection. Random splitting (RS) protocol is also used in [3, 5, 12, 13, 19, 28], but the specific data splits differ between studies as they are rarely made public. Since the dataset contains multiple images per person (many captured at the same age), the same individual can be present in both the training and testing sets. This overlap introduces a bias, resulting in overly optimistic evaluation outcomes. The degree of data leakage can vary when using random splitting, making certain data splits more challenging than others. Further, this fundamentally changes the entire setup; rarely will one want to deploy the age estimation system on the people present in the training data. Consequently, comparison of different methods and discerning which method stands out as the most effective based on the published results becomes problematic. + +Only some publications [22, 27] recognize this bias introduced by the splitting strategy and address it by implementing subject-exclusive (SE) [22] splitting. This approach ensures that all images of an individual are exclusively in the (i) training, (ii) validation, or (iii) testing part. The terminology here is not fully established. One might encounter either identity-disjoint or person-disjoint instead + +of subject-exclusive in the literature. + +To assess how prevalent random splitting (RS) on the MORPH dataset truly is, we conducted a survey of all age estimation papers presented at the CVPR and ICCV since 2013. We found 16 papers focused on age estimation, of which nine use RS, two use SE, five use specialized splits, and three do not utilize MORPH. We further surveyed other research conferences and journals, namely: IJCAI, BMVC, ACCV, IEEE TIP, Pattern Recognit. Lett., Pattern Anal. Appl., and find eight influential age estimation papers that use MORPH. Of those, seven use RS, and one uses a specialized split. By specialized splits, we are referring to non-standard strategies such as ethnically balanced partitions. + +Altogether, we discover that only $\approx 10\%$ of papers that utilize MORPH use the SE protocol. This finding is concerning, as MORPH [23] is the most popular dataset used to compare age estimation approaches. Other datasets do not provide a reliable benchmark either, as standardized data splits are provided only for two public age estimation datasets: (i) the ChaLearn Looking at People Challenge 2016 (CLAP2016) dataset [1], which is relatively small, consisting of fewer than 8000 images, and (ii) the Cross-Age Celebrity Dataset (CACD2000) [4], which has noisy training annotations and is not intended for age estimation. Comparing methods using only these datasets is, therefore, not satisfactory either. Other popular datasets, AgeDB dataset [20] and Asian Face Age Dataset (AFAD) [21], also consist of multiple images per person, requiring SE splitting. However, they lack any data splits accepted by the community and often are used with the RS protocol. As such, they suffer from the same issues as MORPH [23]. + +# 2.2. Pipeline Ablation + +To fairly compare multiple methods, an identical experimental setup should be used for each of them. The current state-of-the-art age estimation approaches adhere to a common framework encompassing: (i) data collection, (ii) data preprocessing, (iii) model design, including the decision layer and the loss function, and (iv) training and evaluation. Most novel approaches introduce distinct changes to the component (iii); namely they design a specialized loss function to exploit the ordinal nature of age. However, they frequently alter multiple components of the framework simultaneously, complicating the attribution of performance improvements to the claimed modifications. + +To compare different loss functions, e.g., [3, 10, 12, 13, 16, 17, 21, 22], the other components of the framework should be kept constant, allowing us to isolate the impact of the selected method on the performance. This is trivial, yet the age estimation community mostly ignores it. Further, many publications hand-wave the other components and do not precisely specify them, making future comparisons meaningless. It's important to question whether the + +reported enhancement in a research paper truly stems from the novel loss function it proposes or if it could be attributed to a different modification. We strongly advocate that each component be addressed in isolation and that the experimental setup be precisely described. + +Over the past decade, numerous novel age estimation methods have been introduced, promising continuous performance improvements every year. However, motivated by these findings, we raise the question: how reliable are the published age estimation results? In Sec. 3 we aim to establish a proper evaluation protocol and use it in Sec. 4 to compare the methods [10, 12, 13, 17, 21, 22, 24] reliably. Figure 1 illustrates the contrast between the performance of state-of-the-art methods as reported in their respective studies and the outcomes as measured by our implementation. + +# 3. Evaluation Protocol + +We identified two trivial yet persistent issues that prevent a reliable comparison of age estimation methods. In this section, we address the initial challenge concerning consistent data partitioning. We provide clear guidelines for the evaluation protocol to ensure replicable and fair assessments. Specifically, the protocol should establish a reproducible approach for defining the data used in both (i) training and (ii) performance evaluation. When specifying the training data, one needs to state whether the training dataset is the sole source of information, or if the model was pretrained with additional data. Additionally, the evaluation can be subdivided based on the data used for model evaluation into intra-dataset, and cross-dataset results. We describe how to evaluate models in these settings below. + +Intra-dataset performance To evaluate intra-dataset performance, a single dataset is used for both training and evaluation of the age estimation system. In this case one should (i) randomly split the dataset into subject-exclusive training, validation, and test set $^{1}$ , (ii) train the model on the training set, (iii) measure the model's performance on the validation set, (iv) possibly revert back to step (ii) and train the model again, (v) evaluate the model's performance on the test set, then (vi) publish the results on the test set along with a detailed description of the system components and the data used. If the dataset consists of limited number of examples, it is possible to create multiple splits of the data into training, validation and test set through step (i). Following this, steps (ii) through (v) are iterated $n$ times, where $n$ is the number of generated splits. It is advisable to present the average test performance along with its standard deviation when reporting the results. + +Cross-dataset performance To evaluate cross-dataset performance, the data split in step (i) of the aforementioned evaluation process is generated from a collection of multiple datasets, ensuring that the complete chosen dataset must be employed entirely for evaluation, effectively constituting the designated test set. The remaining steps of the evaluation procedure remain unaltered. + +Regardless of the scenario, whether it is intra-dataset or cross-dataset, each system needs to be evaluated against the test data only once, and the results published. All prior model development and hyperparameter tuning must be based solely on the results on the validation set. Furthermore, it should be indicated whether the training data are the only source of information used for training, or whether the model was pretrained with additional data. In the letter scenario, a detailed description of the additional data and their utilization should also be provided. + +# 4. Comparative Method Analysis + +This section applies the evaluation protocol to compare state-of-the-art age estimation methods. We maintain a consistent preprocessing procedure, model architecture, and dataset while selectively altering the decision layer and loss function to incorporate modifications proposed in prominent works such as [10, 12, 13, 17, 21, 22, 24]. + +# 4.1. Methodology + +Datasets We evaluate the methods using 7 datasets: AgeDB [20], AFAD [21], CACD2000 [4], CLAP2016 [1], FG-NET [15], MORPH [23], and UTKFace [29]. We also use the IMDB-WIKI dataset [25] for pre-training with clean labels from Franc and Čech [11]. + +Data Splits For the CLAP2016 and CACD2000 datasets, we use the single data split provided by the dataset authors. For the remaining datasets, we create five subject-exclusive (SE) data splits. To generate the split, we partition the dataset such that $60\%$ of the dataset is used for training, $20\%$ for model selection (validation), and $20\%$ for evaluating the model performance (test). Additionally, we ensure that each partition has the same age distribution. Due to its small size, we only use FG-NET for evaluation. We make our data splits and code public at Facial-Age-Benchmark2. + +Model Architecture & Weight Initialization We use ResNet-50 [14] as the backbone architecture. We always start the training of the methods from the same initialization. We run the experiments with (i) random initialization, (ii) weights pre-trained on ImageNet (TorchVision'sImagenet1K_V2), and (iii) weights pre-trained on ImageNet and then further trained on IMDB-WIKI for age estimation + +with cross-entropy. After the pre-training, the last layer of the model is replaced with a layer specific to the desired method. The models are then fine-tuned on the downstream dataset. It is important to note that for the baseline cross-entropy, we also replace the final layer before fine-tuning. This ensures that the experimental setup remains identical to that of the other methods. + +Training Details We utilize the Adam optimizer with the parameters $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ . For pre-training on the IMDB-WIKI dataset, we set the learning rate to $\alpha = 10^{-3}$ and train the model for a total of 100 epochs. For fine-tuning on the remaining datasets we reduce the learning rate to $\alpha = 10^{-4}$ and train the model for 50 epochs. We use a batch size of 100. The best model is selected based on the MAE metric computed on the validation set. We utilize two data augmentations during training, (i) horizontal mirroring, and (ii) cropping out an $80\%$ to $100\%$ portion of the bounding box and resizing it to the model input shape. We do not tune the hyperparameters of the methods [10, 12, 13, 17, 21, 22] on the validation set. We apply them in the original configurations. We argue that if any of the loss functions is a significant improvement over the baseline, we should observe a performance improvement across a broad range of hyperparameters and preprocessing pipelines. We consider our training parameters to be reasonable and to provide a comparison of the methods as if employed out-of-the-box. + +Preprocessing We use the RetinaFace model developed by Deng et al. [8] for face detection and facial landmark detection. We use complete facial coverage, i.e., the images encompass the entire head. We resize the images to a resolution of $256 \times 256$ pixels and normalize the pixel values of the images. To this end, we subtract the mean and divide by the standard deviation of colors on ImageNet [7]. + +Metrics We use the Mean Absolute Error (MAE) calculated on the test data as the performance measure. To determine whether any method is consistently better than others, we employ the Friedman test and the Nemenyi critical difference test (FN test) as described by Demšar [6]. The main statistic used in the test is the average ranking (1 is best) of a method computed on multiple datasets. Differences in the average ranking are then used to decide whether a method is significantly better than others or whether the improvement is due to randomness (the null hypothesis). We use a common significance level (p-value) of $\alpha = 5\%$ . + +# 4.2. Results + +Intra-Dataset Performance The intra-dataset results can be seen in Tab. 6, highlighted with a grey background. When starting from random initialization, training with the + +Unimodal loss [17] tends to be unstable. Excluding the Unimodal loss [17] from the evaluation, we apply the FN test. The results indicate that three methods: OR-CNN [21], DLDL [12], and the Mean-Variance loss [22], demonstrate a significant performance improvement over the baseline cross-entropy. With limited data availability, when pretraining is not possible, it is advisable to utilize one of the aforementioned methods. + +With pre-training, either on ImageNet or IMDB-WIKI, none of the methods is significantly better than the cross-entropy. In other words, we do not observe any systematic improvement by deviating from the standard approach. + +Cross-Dataset Generalization Cross-dataset results, shown in Tab. 6 with white background, were obtained by evaluating the performance of models on datasets that were not used for their training. The cross-dataset performance is unsurprisingly significantly worse than the intra-dataset performance for all of the methods. Using the FN test, we conclude that there is no significant difference in generalization capability between any of the methods [10, 12, 13, 17, 21, 22] and the cross-entropy, regardless of pre-training. In other words, though the loss functions may reduce overfitting, they do not help in the presence of covariate shift. + +None of the methods perform well when evaluated on a different dataset than the one they were trained on. The best cross-dataset results are achieved by training on either UTKFace or CLAP2016. The worst performance across databases is observed when models are trained on AFAD or MORPH. This discrepancy can be attributed to UTKFace and CLAP2016 having a broader range of images, which allows them to generalize effectively to other datasets. Conversely, the limited diversity in MORPH or AFAD datasets, such as AFAD mainly comprising images of people of Asian ethnicity and around $80\%$ of MORPH being composed of individuals of African American ethnicity, contributes to the poor knowledge transfer. The significant decrease in the performance of models trained on the MORPH dataset when applied to other age estimation datasets underscores the importance of not relying solely on the MORPH dataset as the benchmark for age estimation. To ensure a reliable evaluation of different methods, it is crucial to incorporate results from alternative datasets as well. + +# 5. Component Analysis + +In this section, we analyze the influence of the backbone architecture and the data preparation pipeline on model performance. We show that changes to these components can have a much more significant impact on the final performance than the choice of a loss function. When altering a component, we maintain all other components at their defaults, presented as the Cross-Entropy approach in Sec. 4. + +We use the gained insight to propose a strong baseline age estimation model using the FaRL [30] backbone. + +# 5.1. Model Architecture + +Multiple different backbone architectures can be found in the age estimation literature. Among these architectures, VGG16 [10, 13, 17, 22, 27, 28] and ResNet-50 [2, 3, 19] stand out as the most common choice. We evaluate the influence of the architecture choice on the performance and extend the comparison to include more recent advancements, EfficientNet-B4 and ViT-B-16. We present our findings in Tab. 5. No backbone emerges as universally best across all datasets. Notably, changes in the backbone have a more substantial impact on performance than changes to the loss function, see Tab. 6. This highlights the importance of a thorough ablation, as changes in the backbone architecture could obscure the impact of the loss function. + +# 5.2. Data Preparation Pipeline + +Age estimation models require only a specific region of an image, specifically the person's face, as input, rather than the entire image. However, the influence of this selection process on the model's performance is not apriori known. Should the model be presented with a tight crop of the face or the entire head? Additionally, facial images can differ in terms of scale and resolution since they originate from various sources and as such need to be resized to a uniform resolution. In this section, we examine the impact of the aforementioned data preparation pipeline on the performance of age estimation models. We demonstrate that changes in the preprocessing have a more substantial impact on performance than changes to the loss function. + +Facial Alignment Numerous studies lack an explanation of their facial alignment procedure. Others merely mention the utilization of facial landmarks. To assess whether a standardized alignment is needed for a fair comparison of multiple methods, we adopt three distinct alignment procedures and evaluate their effect on model performance. Firstly, we (i) perform no alignment and employ the bounding box proposed by the facial detection model [8] as the simplest approach. The bounding box sides are parallel to the axes of the image. Secondly, (ii) we utilize the proposed bounding box but rotate it to horizontally align the eyes. Lastly, (iii) we use an alignment procedure, which normalizes the rotation, positioning, and scale. For details, refer to the implementation. A visual representation of these facial alignment methods is depicted in Fig. 2. The performance of models trained using the various alignment procedures is presented in Tab. 1. When working with pre-aligned datasets like AFAD, we observe that procedure (iii) does not yield significant improvements compared to the simpler variants (i) or (ii). Similar results are obtained + +on datasets collected under standardized conditions, such as the MORPH dataset. However, when dealing with inthe-wild datasets like AgeDB and CLAP2016, we find that alignment (iii) leads to noticeable improvements over the simpler methods. Interestingly, on the UTKFace dataset, which also contains in-the-wild images, approach (ii) of solely rotating the proposed bounding boxes achieves the best outcomes. However, the disparities among the various alignment procedures are not substantial. We therefore argue that any facial alignment technique that effectively normalizes the position, rotation, and scale of the faces would yield comparable results. + +Facial Coverage While facial alignment defines the positioning, orientation, and scale of facial landmarks, the extent to which the face is visible in an image also needs to be specified. We refer to this notion as facial coverage. It measures how much of the face is shown in an image and can range from minimal coverage, where only the eyes and mouth are visible, to complete coverage, where the entire head is visible. Determining the optimal compromise between complete facial coverage and minimal coverage is not immediately clear. Complete facial coverage provides a comprehensive view of the face, allowing age estimation algorithms to consider a broader range of facial cues. On the other hand, partial coverage may help reduce overfitting by eliminating irrelevant facial cues and features with high variance. For a visual demonstration of various facial coverage levels, refer to Fig. 3. The concept of facial coverage has received limited attention in age estimation literature. Consequently, the extent of facial coverage utilized in previous studies can only be inferred from the images presented in those works. For instance, Berg et al. [2] seemingly employ minimal coverage, showing slightly more than just the mouth and eyes. The majority of other works [3, 12, 13, 17, 21, 27] tend to adopt partial coverage, where a significant portion of the face, including the chin and forehead, is visible, but not the entire head and hair. In the works of Pan et al. [22], Rothe et al. [24], and Zhang et al. [28], the entire head is shown. + +The performance of models trained with the different coverage levels is presented in Tab. 2. Generally, complete facial coverage, which includes the entire head in the model input, yields the best results across the majority of datasets. However, specifically for AFAD dataset and the MORPH dataset, partial coverage performs better. It is important to note that the AFAD dataset contains preprocessed images that do not capture the entire head. Consequently, using complete facial coverage with this dataset results in the presence of black bars and a decrease in the effective pixel resolution of the face. It is then to be expected that increased facial coverage yields inferior results. The smallest coverage, limited to the facial region up to the eyes and mouth, + +![](images/1e53304d80e10cd67dbd8eab24e4992df4994ac8712e1a03f48a45fc1539fe0a.jpg) +(a) Crop. + +![](images/5e27e3ab6f333a541c855371847e0b8229d43e62789118a10a2f63d443029e49.jpg) +(b) Rotation. + +![](images/f7cea1e324fb70130baaa7384dc596c9f93a5c808325b7f61a466b016b00e442.jpg) +(c) Rot., Trans., Scale. +Figure 2. Comparison of different alignment methods using the average face from the FG-NET dataset. + +consistently performs the worst. With sufficient pixel resolution, the full facial coverage performs the best. + +Input Resolution To investigate the influence of input resolution on age estimation, we performed experiments using multiple resolutions on all datasets: specifically, $256 \times 256$ , $128 \times 128$ , and $64 \times 64$ pixels. The results are presented in Tab. 3. Our findings indicate that an increase in image resolution consistently results in improved model performance across all datasets. Hence, the best performance was achieved with a resolution of $256 \times 256$ pixels. + +In the literature, one can find resolutions ranging from $60 \times 60$ to $256 \times 256$ pixels, where newer works tend to use larger resolution images. As the resolution increase can directly be observed to improve the results; and the resolutions increased with years; it is difficult to say whether newly proposed methods are better overall, or whether they perform better due to using higher resolution images. + +Input Transform Finally, we examined the input transformation proposed by Lin et al. [18], which involves converting a face image into a tanh-polar representation. This approach has shown large performance improvements in face semantic segmentation. Lin et al. then modified the network for age estimation, reporting impressive results [19]. We explored the potential benefits of applying this transformation for age estimation. However, our findings indicate that the transformation does not improve the results compared to the baseline, as shown in Tab. 4. Therefore, we conclude that the improved age estimation performance observed by Lin et al. [19] does not arise from the use of a different representation, but rather from pre-training on semantic segmentation or their model architecture. + +# 5.3. FaRL Backbone + +We observed that adjustments to the decision layer and loss function have minimal impact on the final model performance. Conversely, large performance disparities arise when modifying other components of the prediction pipeline. Notably, the pretraining data appear to be the most influential factor. Based on this insight, we opt against creating a specialized loss function to enhance the age esti + +![](images/b33461ff630b18e174f8bf28e104b8ea3cf5ec860777f6f15ab3b0da56ddcec5.jpg) +(a) Eyes & Mouth. +Figure 3. Comparison of different facial coverage levels using the average face from the FG-NET dataset. + +![](images/ac9a8548a957f52dba0296747203c80b65c3e53b33219a81a6696567eed36afc.jpg) +(b) Chin & Forehead. + +![](images/3148bc6fde0c3c8feb58bd0a464313f9081bc4ba87a394a01e6373042a7d1d53.jpg) +(c) Head. + +
DatasetAlignment
CropRotationRot. + Trans. + Scale
AgeDB5.935.925.84
AFAD3.123.113.11
CACD20004.014.004.00
CLAP20164.684.574.49
MORPH2.812.782.79
UTKFace4.494.424.44
+ +Table 1. MAE $\downarrow$ of ResNet-50 models with different facial alignment. The models were pre-trained on IMDB-WIKI. + +
DatasetFacial Coverage
Eyes & MouthChin & ForeheadHead
AgeDB6.065.845.81
AFAD3.173.113.14
CACD20004.024.003.96
CLAP20165.064.494.49
MORPH2.882.792.81
UTKFace4.634.444.38
+ +Table 2. MAE $\downarrow$ of ResNet-50 models with different facial coverages. The models were pre-trained on IMDB-WIKI. + +
DatasetImage Resolution
64 × 64128 × 128256 × 256
AgeDB8.436.905.81
AFAD3.363.253.14
CACD20005.014.553.96
CLAP201611.345.904.49
MORPH3.333.072.81
UTKFace5.834.814.38
+ +Table 3. MAE $\downarrow$ of ResNet-50 models with different image resolutions. The models were pre-trained on IMDB-WIKI. + +mation system. Instead, we leverage the FaRL backbone by Zheng et al. [30], utilizing a ViT-B-16 [9] model. The FaRL model is trained through a combination of (i) contrastive loss on image-text pairs and (ii) prediction of masked image patches. Training takes place on an extensive collection of facial images (50 million) from the image-text pair LAION dataset [26]. We retain the feature representation extracted by FaRL without altering the model's weights. Our decision to use FaRL is driven solely by the extensive amount + +
DatasetTransform
No TransformRoI Tanh-polar [18]
AgeDB5.815.93
AFAD3.143.15
CACD20003.964.07
CLAP20164.494.71
MORPH2.812.80
UTKFace4.384.39
+ +Table 4. MAE $\downarrow$ of ResNet-50 models with different input transformations. The models were pre-trained on IMDB-WIKI [25]. + +
DatasetBackbone
ResNet-50Eff.Net-B4ViT-B-16VGG-16
AgeDB5.815.769.076.02
AFAD3.143.204.043.22
CACD20003.964.006.223.92
CLAP20164.494.068.554.65
MORPH2.812.874.352.88
UTKFace4.384.236.884.64
+ +Table 5. Intra-dataset MAE $\downarrow$ with different backbone architectures. The models were pre-trained on IMDB-WIKI [25]. + +of pre-training data it incorporates, rather than specific characteristics of the backbone. Different image encoders could be trained in the same manner. However, due to the costs associated with training such models, we have chosen to use the available FaRL ViT-B-16 backbone. We employ a simple multilayer perceptron (MLP) over the FaRL-extracted features, consisting of 2 layers with 512 neurons each, followed by ReLU activation. Cross-entropy serves as the chosen loss function. For each downstream dataset, we pretrain the MLP on IMDB-WIKI or initialize it to random weights. We choose the preferred option based on validation loss on the downstream dataset. As previously, we replace the final layer before fine-tuning on downstream datasets. + +This straightforward modification outperformed all other models on AgeDB, CLAP2016, and UTKFace datasets. It also achieved superior results on AFAD, matched the performance of other models on CACD2000, but demonstrated worse performance on MORPH. Applying the FN test revealed statistically significant improvements of this model over others in both intra-dataset and cross-dataset evaluations, see Tab. 6. We attribute the poor performance of FaRL on MORPH to the fact that the distributions of images in LAION [26] and MORPH [23] are vastly different. As we do not finetune the feature representation of FaRL [30], it is possible that the representation learned on LAION is superior on the other datasets but deficient on MORPH. + +We do not claim this model to be the ultimate solution, but the results achieved with the FaRL backbone along with our public implementation offer a robust and straightforward baseline for a comparison with future methods. + +# 6. Discussion and Conclusions + +In this paper, we aimed to establish a fair comparison framework for evaluating various approaches for age estimation. We conducted a comprehensive analysis on seven different datasets, namely AgeDB [20], AFAD [21], CACD2000 [4], CLAP2016 [1], FG-NET [15], MORPH [23], and UTK-Face [29], comparing the models based on their Mean Absolute Error (MAE). To determine if any method outperformed the others, we employed the Friedman test and the Nemenyi critical difference test. When pre-training the models on a large dataset, we did not observe any statistically significant improvement by using the specialized loss functions designed for age estimation. With random model initialization, we observed some improvement over the baseline cross-entropy on small datasets. Specifically, for Mean-Variance loss [22], OR-CNN [21], and DLDL [12]. These improvements can be attributed to implicit regularization provided by these methods. + +Previously published results reported continuous performance improvements over time (as depicted in Fig. 1). Our findings challenge these claims. We argue that the reported improvements can be attributed to either the random data splitting strategy or hyperparameter tuning to achieve the best test set performance. Our analysis of the data preparation pipeline revealed that factors such as the extent of facial coverage or input resolution exert a more significant impact on the results than the choice of the age estimation specific loss function. Guided by these findings, we use the FaRL [30] model as a backbone for age estimation and demonstrated its effectiveness. In summary: + +- We show that existing evaluation practices in age estimation do not provide a consistent comparison of the state-of-the-art methods. We define a proper evaluation protocol which addresses the issue. +- We show that improvements in age estimation results over recent years can not be attributed to the specialized loss functions introduced in [10, 12, 13, 17, 21, 22], as is claimed in the published literature. +- Using the insight gained from analyzing different components of the age estimation pipeline, we construct a prediction model with the FaRL [30] backbone and demonstrate its effectiveness. +- To facilitate reproducibility and simple future comparisons, we have made our implementation framework and the exact data splits publicly available. + +# Acknowledgment + +This research was supported by the Grant Agency of the Czech Technical University in Prague, grant No. SGS23/176/OHK3/3T/13 and by the Grant agency of the Ministry of Interior Czech Republic, project FACIS grant. No. VJ02010041 + +
MethodInit.Evaluation Dataset
AgeDBAFADCACD2000CLAP2016FG-NETMORPHUTKFace
IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.
AgeDBCross-Entropy5.817.207.657.8312.6114.705.908.108.736.8310.8612.4110.8216.2718.874.836.746.938.4511.8811.82
Regression6.236.547.608.0512.1314.196.767.568.748.329.9512.3210.5613.8318.005.646.666.949.4210.4212.14
OR-CNN [21]5.786.517.527.4711.8613.806.057.718.556.6410.1811.749.7413.8217.404.736.617.058.2010.7511.41
DLDL [12]5.806.957.467.811.8514.925.997.968.526.5110.8411.489.2315.6316.954.746.346.827.9711.8711.24
DLDL-v2 [13]5.806.877.587.6112.9816.195.918.158.616.4810.8812.509.9115.3619.014.927.537.207.9711.6111.24
SORD [10]5.816.937.587.8112.8015.425.967.908.776.6110.3712.229.7214.7617.854.766.587.148.1211.5311.60
Mean-Var. [22]5.856.697.337.2612.4014.356.007.898.356.7010.5011.9010.5514.3217.434.996.877.338.2510.7711.64
Unimodal [17]5.907.1115.498.3713.1120.876.228.2416.116.7311.1421.3110.1516.1332.774.846.7817.428.2311.8623.09
FaRL + MLP5.64--7.82--7.41--9.32--9.15--4.73--9.92--
AFADCross-Entropy15.7017.3118.053.143.173.329.5411.1811.218.9610.2310.3210.9211.3811.966.806.838.1912.1013.0713.29
Regression13.6715.9117.213.173.163.308.7210.5110.728.339.9110.0211.2011.8912.356.277.347.9911.2312.8312.96
OR-CNN [21]12.0815.6516.723.163.173.288.8711.0510.897.859.739.9210.6311.9412.586.686.857.8110.5012.4312.74
DLDL [12]14.1215.7017.213.143.163.259.4010.7011.068.689.549.9811.3111.6412.077.046.757.8211.5212.4312.82
DLDL-v2 [13]13.9016.3317.783.153.173.289.4610.6811.028.609.7610.3210.8311.8112.646.926.797.9411.2912.6113.18
SORD [10]14.3016.0817.493.143.153.249.4510.7011.098.649.7910.1011.2111.6312.196.876.827.9311.5912.7913.10
Mean-Var. [22]12.5415.0716.683.163.163.268.9810.3310.757.939.339.7810.9612.2412.436.616.767.8810.5712.0012.62
Unimodal [17]13.9915.8920.973.203.249.309.2310.6814.568.649.7914.5111.3111.8318.297.077.3212.5311.2612.3317.47
FaRL + MLP16.41--3.12--10.95--8.57--12.24--6.62--11.64--
CLAP2000Cross-Entropy9.6611.8410.6010.708.5013.083.964.594.898.428.6410.5117.4523.6420.867.2112.2010.3911.1611.3812.61
Regression10.9110.4410.7610.237.2311.664.064.524.838.847.759.9817.5519.5019.608.618.8111.7911.3410.3811.78
OR-CNN [21]10.4311.0211.859.669.4812.174.014.604.748.578.8510.2918.4724.3220.857.5210.0411.0511.1712.3012.27
DLDL [12]9.8410.7911.2810.099.3013.203.964.424.768.398.499.9918.3818.9921.527.279.1611.0111.1911.9412.27
DLDL-v2 [13]9.9012.3111.208.0311.5011.513.964.574.697.678.889.4318.1122.8919.027.2013.469.7310.5212.3211.47
SORD [10]9.7710.9011.0410.359.5511.953.964.424.708.388.519.8918.0520.8421.737.238.9811.5911.1812.0612.22
Mean-Var. [22]10.8111.4210.839.7110.8211.494.074.604.788.889.2010.0820.4822.6820.148.1412.5911.7211.7412.2912.23
Unimodal [17]10.4611.0446.2610.639.8525.744.104.7337.419.198.9230.9619.3719.7515.848.9411.6432.6311.8911.7532.98
FaRL + MLP11.32--9.08--3.96--8.57--19.63--6.56--11.27--
Cross-Entropy7.3510.1512.265.417.035.346.658.119.114.495.968.735.929.2812.024.966.616.905.747.218.58
Regression7.518.5211.746.075.195.956.867.249.454.654.777.894.856.3110.145.095.498.836.025.938.66
OR-CNN [21]6.838.7411.245.835.925.446.737.258.654.134.607.385.096.479.224.925.786.525.435.957.68
DLDL [12]7.209.3311.395.576.905.856.857.649.264.185.107.395.267.449.184.895.926.525.516.377.87
DLDL-v2 [13]7.149.4212.365.475.956.456.697.999.344.234.878.525.227.048.754.856.047.295.536.128.23
SORD [10]7.199.6012.165.477.746.626.638.099.664.275.347.815.597.777.624.926.016.625.486.468.08
Mean-Var. [22]7.089.1612.585.186.305.386.647.379.944.284.877.955.456.6911.144.967.387.495.526.168.65
Unimodal [17]7.019.7720.715.586.105.546.478.2013.084.175.3913.835.136.3915.134.806.0510.025.446.6715.27
FaRL + MLP7.50--4.34--6.57--3.38--4.95--4.47--4.85-
Cross-Entropy9.6611.7312.636.697.7810.368.5310.8310.116.908.9610.649.4511.9615.382.812.963.018.9710.8111.92
Regression10.4812.9912.566.606.6510.669.8211.479.687.839.2710.679.2410.1316.692.832.742.978.4010.97
OR-CNN [21]9.3511.6512.826.787.7811.818.3911.3410.236.848.7311.059.5811.0917.472.832.852.998.8210.3712.06
DLDL [12]9.4112.0012.666.587.7811.768.5811.9210.106.859.2611.159.4411.4316.942.812.922.988.8010.8112.46
DLDL-v2 [13]9.7911.4912.686.608.2212.458.7910.989.816.988.9811.229.5211.6317.572.822.933.008.9710.7012.47
SORD [10]9.4811.8412.736.547.9111.198.7311.1810.136.848.9910.729.3411.0815.902.812.912.998.8310.8511.97
Mean-Var. [22]9.7011.6212.936.687.8110.418.6510.5910.117.038.8010.569.5111.4515.812.832.892.958.9410.5911.95
Unimodal [17]9.9312.3117.446.637.048.188.6810.1112.037.198.9512.389.8012.1717.832.782.908.669.0710.7515.45
FaRL + MLP8.40--4.67--7.45--6.21--9.28--3.04--
Cross-Entropy6.618.889.585.516.426.756.569.108.984.827.347.504.786.627.645.096.617.354.384.755.32
Regression7.017.798.835.966.266.436.777.878.615.245.936.674.415.077.275.415.956.714.724.535.34
OR-CNN [21]6.718.298.755.566.746.526.618.898.374.956.796.704.545.716.555.266.076.764.404.435.15
DLDL [12]6.658.609.005.426.686.196.529.018.844.817.197.464.855.877.285.166.257.034.394.665.30
DLDL-v2 [13]6.798.439.915.427.186.326.529.428.694.827.878.784.836.547.555.146.367.284.364.685.25
SORD [10]6.618.969.115.427.186.326.529.428.694.827.878.784.836.547.555.146.367.284.364.685.25
Mean-Var. [22]6.798.368.535.416.546.326.558.558.325.046.816.325.056.306.905.376.156.394.424.575.05
Unimodal [17]6.688.6622.425.357.6816.646.589.2817.174.867.6018.834.556.2522.985.225.9616.444.474.7821.01
FaRL + MLP7.16-
+ +Table 6. Intra-dataset and cross-dataset Mean Absolute Error (MAE) $\downarrow$ of ResNet-50 models. Results marked as Initialization: IMDB are of models that are initialized to ImageNet weights, then trained with Cross-Entropy on IMDB-WIKI [25] and then finetuned on the downstream dataset. Imag. signifies initialization to weights pre-trained on ImageNet. Rand. denotes random initialization. + +# References + +[1] E. Agustsson, R. Timofte, S. Escalera, X. Baro, I. Guyon, and R. Rothe. Apparent and real age estimation in still images with deep residual regressors on appa-real database. In 12th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG), 2017. IEEE, 2017. 1, 2, 3, 7 +[2] A. Berg, M. Oskarsson, and M. O'Connor. Deep ordinal regression with label diversity. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 2740-2747, Los Alamitos, CA, USA, 2021. IEEE Computer Society. 5 +[3] Wenzhi Cao, Vahid Mirjalili, and Sebastian Raschka. Rank consistent ordinal regression for neural networks with application to age estimation. Pattern Recognition Letters, 140: 325-331, 2020. 2, 5 +[4] Bor-Chun Chen, Chu-Song Chen, and Winston H. Hsu. Cross-age reference coding for age-invariant face recognition and retrieval. In Computer Vision – ECCV 2014, pages 768–783, Cham, 2014. Springer International Publishing. 1, 2, 3, 7 +[5] Shixing Chen, Caojin Zhang, Ming Dong, Jialiang Le, and Mike Rao. Using ranking-cnn for age estimation. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 742-751, 2017. 2 +[6] Janez Demšar. Statistical comparisons of classifiers over multiple data sets. The Journal of Machine learning research, 7:1-30, 2006. 4 +[7] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. 4 +[8] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5203-5212, 2020. 4, 5 +[9] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. 6 +[10] Raul Díaz and Amit Marathe. Soft labels for ordinal regression. In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4733-4742, 2019. 2, 3, 4, 5, 7, 8 +[11] Vojtech Franc and Jan Cech. Learning cnns from weakly annotated facial images. Image and Vision Computing, 2018. 3 +[12] Bin-Bin Gao, Chao Xing, Chen-Wei Xie, Jianxin Wu, and Xin Geng. Deep label distribution learning with label ambiguity. IEEE Transactions on Image Processing, 26(6):2825-2838, 2017. 2, 3, 4, 5, 7, 8 + +[13] Bin-Bin Gao, Hong-Yu Zhou, Jianxin Wu, and Xin Geng. Age estimation using expectation of label distribution learning. In Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI-18, pages 712-718. International Joint Conferences on Artificial Intelligence Organization, 2018. 2, 3, 4, 5, 7, 8 +[14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016. 3 +[15] A. Lanitis, C.J. Taylor, and T.F. Cootes. Toward automatic simulation of aging effects on face images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 24(4): 442-455, 2002. 3, 7 +[16] Ling Li and Hsuan-Tien Lin. Ordinal regression by extended binary classification. In Advances in Neural Information Processing Systems, page 865 - 872, 2007. Cited by: 195. 2 +[17] Qiang Li, Jingjing Wang, Zhaoliang Yao, Yachun Li, Pengju Yang, Jingwei Yan, Chunmao Wang, and Shiliang Pu. Unimodal-concentrated loss: Fully adaptive label distribution learning for ordinal regression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20513-20522, 2022. 2, 3, 4, 5, 7, 8 +[18] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Roi tanh-polar transformer network for face parsing in the wild. Image and Vision Computing, 112, 2021. 6, 7 +[19] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Fpage: Leveraging face parsing attention for facial age estimation in the wild. IEEE Transactions on Image Processing, 2022. 2, 5, 6 +[20] Stylianos Moschoglou, Athanasios Papaioannou, Christos Sagonas, Jiankang Deng, Irene Kotsia, and Stefanos Zafeiriou. Agedb: the first manually collected, in-the-wild age database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshop, 2017. 1, 2, 3, 7 +[21] Zhenxing Niu, Mo Zhou, Le Wang, Xinbo Gao, and Gang Hua. Ordinal regression with multiple output cnn for age estimation. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4920-4928, 2016. 1, 2, 3, 4, 5, 7, 8 +[22] Hongyu Pan, Hu Han, Shiguang Shan, and Xilin Chen. Mean-variance loss for deep age estimation from a face. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5285-5294, 2018. 2, 3, 4, 5, 7, 8 +[23] K. Ricanek and T. Tesafaye. Morph: a longitudinal image database of normal adult age-progression. In 7th International Conference on Automatic Face and Gesture Recognition (FGR06), pages 341-345, 2006. 1, 2, 3, 7 +[24] Rasmus Rothe, Radu Timofte, and Luc Van Gool. Dex: Deep expectation of apparent age from a single image. In 2015 IEEE International Conference on Computer Vision Workshop (ICCVW), pages 252-257, 2015. 3, 5 +[25] Rasmus Rothe, Radu Timofte, and Luc Van Gool. Deep expectation of real and apparent age from a single image without facial landmarks. International Journal of Computer Vision, 126(2-4):144-157, 2018. 1, 3, 7, 8 + +[26] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade W Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa R Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. LAION-5b: An open large-scale dataset for training next generation image-text models. In Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2022. 6, 7 +[27] Nyeong-Ho Shin, Seon-Ho Lee, and Chang-Su Kim. Moving window regression: A novel approach to ordinal regression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18760-18769, 2022. 2, 5 +[28] Yunxuan Zhang, Li Liu, Cheng Li, and Chen-Change Loy. Quantifying facial age by posterior of age comparisons. In Proceedings of the British Machine Vision Conference (BMVC), pages 108.1–108.12. BMVA Press, 2017. 2, 5 +[29] Zhifei Zhang, Yang Song, and Hairong Qi. Age progression/regression by conditional adversarial autoencoder. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR). IEEE, 2017. 1, 3, 7 +[30] Yinglin Zheng, Hao Yang, Ting Zhang, Jianmin Bao, Dongdong Chen, Yangyu Huang, Lu Yuan, Dong Chen, Ming Zeng, and Fang Wen. General facial representation learning in a visual-linguistic manner. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18697-18709, 2022. 1, 5, 6, 7 \ No newline at end of file diff --git a/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/images.zip b/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..6f6566094c9a3a9843d4369db9625382a0b8b32d --- /dev/null +++ b/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f0544ff386934df29ce78401ce36cabf71fb4e5da4cef58bddc878b013532c9 +size 621337 diff --git a/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/layout.json b/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..c3f490ae545d32c8034cabf29e65a772e2a30e87 --- /dev/null +++ b/2024/A Call to Reflect on Evaluation Practices for Age Estimation_ Comparative Analysis of the State-of-the-Art and a Unified Benchmark/layout.json @@ -0,0 +1,6047 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 77, + 103, + 517, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 103, + 517, + 140 + ], + "spans": [ + { + "bbox": [ + 77, + 103, + 517, + 140 + ], + "type": "text", + "content": "A Call to Reflect on Evaluation Practices for Age Estimation: Comparative Analysis of the State-of-the-Art and a Unified Benchmark" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 99, + 162, + 282, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 162, + 282, + 217 + ], + "spans": [ + { + "bbox": [ + 99, + 162, + 282, + 217 + ], + "type": "text", + "content": "Jakub Paplhám \nDepartment of Cybernetics \nFaculty of Electrical Engineering \nCzech Technical University in Prague" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 135, + 220, + 246, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 220, + 246, + 231 + ], + "spans": [ + { + "bbox": [ + 135, + 220, + 246, + 231 + ], + "type": "text", + "content": "paplhjak@fel(cvut.cz" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 310, + 162, + 494, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 162, + 494, + 217 + ], + "spans": [ + { + "bbox": [ + 310, + 162, + 494, + 217 + ], + "type": "text", + "content": "Vojtěch Franc \nDepartment of Cybernetics \nFaculty of Electrical Engineering \nCzech Technical University in Prague" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 335, + 220, + 466, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 220, + 466, + 231 + ], + "spans": [ + { + "bbox": [ + 335, + 220, + 466, + 231 + ], + "type": "text", + "content": "xfrancv@cmp.felk.cvut.cz" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 258, + 192, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 258, + 192, + 272 + ], + "spans": [ + { + "bbox": [ + 143, + 258, + 192, + 272 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 284, + 289, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 284, + 289, + 501 + ], + "spans": [ + { + "bbox": [ + 45, + 284, + 289, + 501 + ], + "type": "text", + "content": "Comparing different age estimation methods poses a challenge due to the unreliability of published results stemming from inconsistencies in the benchmarking process. Previous studies have reported continuous performance improvements over the past decade using specialized methods; however, our findings challenge these claims. This paper identifies two trivial, yet persistent issues with the currently used evaluation protocol and describes how to resolve them. We offer an extensive comparative analysis for state-of-the-art facial age estimation methods. Surprisingly, we find that the performance differences between the methods are negligible compared to the effect of other factors, such as facial alignment, facial coverage, image resolution, model architecture, or the amount of data used for pretraining. We use the gained insights to propose using FaRL as the backbone model and demonstrate its effectiveness on all public datasets. We make the source code and exact data splits public on GitHub and in the supplementary material." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 525, + 128, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 525, + 128, + 537 + ], + "spans": [ + { + "bbox": [ + 47, + 525, + 128, + 537 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 544, + 287, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 641 + ], + "type": "text", + "content": "Age estimation has received significant interest in recent years. However, a closer examination of the evaluation process reveals two underlying issues. First, no standardized data splits are defined for most public datasets, and the used splits are rarely made public, making the results irreproducible. Second, methods often modify multiple components of the age estimation system, making it unclear which modification is responsible for the performance gains." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "content": "This paper aims to critically analyze the evaluation practices in age estimation research, highlight the issues, and appeal to the community to follow good evaluation practices to resolve them. We benchmark and fairly compare recent deep-learning methods for age estimation from facial images. We focus on state-of-the-art methods that" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 260, + 547, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 260, + 547, + 416 + ], + "spans": [ + { + "bbox": [ + 304, + 260, + 547, + 416 + ], + "type": "text", + "content": "adapt a generic architecture by changing its last layer or the loss function to suit the age estimation task. Although this may appear restrictive, it is essential to note that most of the methods proposed in the field fall into this category " + }, + { + "bbox": [ + 304, + 260, + 547, + 416 + ], + "type": "inline_equation", + "content": "(\\approx 70\\%)" + }, + { + "bbox": [ + 304, + 260, + 547, + 416 + ], + "type": "text", + "content": ". By comparing methods that modify only a small part of the network, we aim to ensure a fair evaluation, as the remaining setup can be kept identical. Besides the usual intra-class performance, we also evaluate their cross-dataset generalization, which has been neglected in the age prediction literature so far. Surprisingly, we find that the influence of the loss function and the decision layer on the results, usually the primary component that distinguishes different methods, is negligible compared to other factors." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 434, + 369, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 434, + 369, + 445 + ], + "spans": [ + { + "bbox": [ + 306, + 434, + 369, + 445 + ], + "type": "text", + "content": "Contributions" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 446, + 545, + 638 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 306, + 446, + 545, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 446, + 545, + 506 + ], + "spans": [ + { + "bbox": [ + 306, + 446, + 545, + 506 + ], + "type": "text", + "content": "- We show that existing evaluation practices in age estimation do not provide consistent results. This leads to obstacles for researchers aiming to advance prior work and for practitioners striving to pinpoint the most effective approach for their application." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 506, + 545, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 506, + 545, + 541 + ], + "spans": [ + { + "bbox": [ + 306, + 506, + 545, + 541 + ], + "type": "text", + "content": "- We define a proper evaluation protocol, offer an extensive comparative analysis for state-of-the-art facial age estimation methods, and publish our code." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 542, + 545, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 542, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 306, + 542, + 545, + 590 + ], + "type": "text", + "content": "- We show that the performance difference caused by using a different decision layer or training loss is significantly smaller than that caused by other parts of the prediction pipeline." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 590, + 545, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 590, + 545, + 638 + ], + "spans": [ + { + "bbox": [ + 306, + 590, + 545, + 638 + ], + "type": "text", + "content": "- We identify that the amount of data used for pre-training is the most influential factor and use the observation to propose using FaRL [30] as the backbone architecture. We demonstrate its effectiveness on public datasets." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 650, + 531, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 650, + 531, + 662 + ], + "spans": [ + { + "bbox": [ + 306, + 650, + 531, + 662 + ], + "type": "text", + "content": "2. Issues with Current Evaluation Practices" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 670, + 380, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 380, + 682 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 380, + 682 + ], + "type": "text", + "content": "2.1. Data Splits" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "Publications focused on age estimation evaluate their methods on several datasets [1, 4, 20, 21, 23, 25, 29]. The most" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "1196" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 69, + 286, + 240 + ], + "blocks": [ + { + "bbox": [ + 48, + 69, + 286, + 240 + ], + "lines": [ + { + "bbox": [ + 48, + 69, + 286, + 240 + ], + "spans": [ + { + "bbox": [ + 48, + 69, + 286, + 240 + ], + "type": "image", + "image_path": "5822d4cf3b9407a2245cbe0fa17e78ab05bb18c23164a7649a052192ee02992d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 250, + 287, + 360 + ], + "lines": [ + { + "bbox": [ + 46, + 250, + 287, + 360 + ], + "spans": [ + { + "bbox": [ + 46, + 250, + 287, + 360 + ], + "type": "text", + "content": "Figure 1. Mean Absolute Error (MAE) " + }, + { + "bbox": [ + 46, + 250, + 287, + 360 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 46, + 250, + 287, + 360 + ], + "type": "text", + "content": " of age estimation methods on the MORPH dataset, as reported in the existing literature and measured by us, viewed over time. Random splitting remains the prevalent data splitting strategy. The consistent performance improvements over time are attributed in the literature to specialized loss functions for age estimation. Subject-exclusive (identity-disjoint) data splitting is rarely employed. With unified subject-exclusive data splitting and all factors except the loss function fixed, all evaluated methods yield comparable results, failing to achieve the performance gains promised by the random splitting." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 387, + 287, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 387, + 287, + 626 + ], + "spans": [ + { + "bbox": [ + 46, + 387, + 287, + 626 + ], + "type": "text", + "content": "commonly used of these is the MORPH [23] dataset. However, the evaluation procedures between the publications are not unified. For instance, OR-CNN [21] randomly divides the dataset into two parts: " + }, + { + "bbox": [ + 46, + 387, + 287, + 626 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 46, + 387, + 287, + 626 + ], + "type": "text", + "content": " for training and " + }, + { + "bbox": [ + 46, + 387, + 287, + 626 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 46, + 387, + 287, + 626 + ], + "type": "text", + "content": " for testing. No mention is made of a validation set for model selection. Random splitting (RS) protocol is also used in [3, 5, 12, 13, 19, 28], but the specific data splits differ between studies as they are rarely made public. Since the dataset contains multiple images per person (many captured at the same age), the same individual can be present in both the training and testing sets. This overlap introduces a bias, resulting in overly optimistic evaluation outcomes. The degree of data leakage can vary when using random splitting, making certain data splits more challenging than others. Further, this fundamentally changes the entire setup; rarely will one want to deploy the age estimation system on the people present in the training data. Consequently, comparison of different methods and discerning which method stands out as the most effective based on the published results becomes problematic." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 629, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 286, + 713 + ], + "type": "text", + "content": "Only some publications [22, 27] recognize this bias introduced by the splitting strategy and address it by implementing subject-exclusive (SE) [22] splitting. This approach ensures that all images of an individual are exclusively in the (i) training, (ii) validation, or (iii) testing part. The terminology here is not fully established. One might encounter either identity-disjoint or person-disjoint instead" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 305, + 72, + 453, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 453, + 83 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 453, + 83 + ], + "type": "text", + "content": "of subject-exclusive in the literature." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 85, + 545, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 85, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 304, + 85, + 545, + 228 + ], + "type": "text", + "content": "To assess how prevalent random splitting (RS) on the MORPH dataset truly is, we conducted a survey of all age estimation papers presented at the CVPR and ICCV since 2013. We found 16 papers focused on age estimation, of which nine use RS, two use SE, five use specialized splits, and three do not utilize MORPH. We further surveyed other research conferences and journals, namely: IJCAI, BMVC, ACCV, IEEE TIP, Pattern Recognit. Lett., Pattern Anal. Appl., and find eight influential age estimation papers that use MORPH. Of those, seven use RS, and one uses a specialized split. By specialized splits, we are referring to non-standard strategies such as ethnically balanced partitions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 229, + 546, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 229, + 546, + 445 + ], + "spans": [ + { + "bbox": [ + 304, + 229, + 546, + 445 + ], + "type": "text", + "content": "Altogether, we discover that only " + }, + { + "bbox": [ + 304, + 229, + 546, + 445 + ], + "type": "inline_equation", + "content": "\\approx 10\\%" + }, + { + "bbox": [ + 304, + 229, + 546, + 445 + ], + "type": "text", + "content": " of papers that utilize MORPH use the SE protocol. This finding is concerning, as MORPH [23] is the most popular dataset used to compare age estimation approaches. Other datasets do not provide a reliable benchmark either, as standardized data splits are provided only for two public age estimation datasets: (i) the ChaLearn Looking at People Challenge 2016 (CLAP2016) dataset [1], which is relatively small, consisting of fewer than 8000 images, and (ii) the Cross-Age Celebrity Dataset (CACD2000) [4], which has noisy training annotations and is not intended for age estimation. Comparing methods using only these datasets is, therefore, not satisfactory either. Other popular datasets, AgeDB dataset [20] and Asian Face Age Dataset (AFAD) [21], also consist of multiple images per person, requiring SE splitting. However, they lack any data splits accepted by the community and often are used with the RS protocol. As such, they suffer from the same issues as MORPH [23]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 454, + 410, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 454, + 410, + 467 + ], + "spans": [ + { + "bbox": [ + 306, + 454, + 410, + 467 + ], + "type": "text", + "content": "2.2. Pipeline Ablation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 473, + 545, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 545, + 616 + ], + "type": "text", + "content": "To fairly compare multiple methods, an identical experimental setup should be used for each of them. The current state-of-the-art age estimation approaches adhere to a common framework encompassing: (i) data collection, (ii) data preprocessing, (iii) model design, including the decision layer and the loss function, and (iv) training and evaluation. Most novel approaches introduce distinct changes to the component (iii); namely they design a specialized loss function to exploit the ordinal nature of age. However, they frequently alter multiple components of the framework simultaneously, complicating the attribution of performance improvements to the claimed modifications." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "To compare different loss functions, e.g., [3, 10, 12, 13, 16, 17, 21, 22], the other components of the framework should be kept constant, allowing us to isolate the impact of the selected method on the performance. This is trivial, yet the age estimation community mostly ignores it. Further, many publications hand-wave the other components and do not precisely specify them, making future comparisons meaningless. It's important to question whether the" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "1197" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 133 + ], + "type": "text", + "content": "reported enhancement in a research paper truly stems from the novel loss function it proposes or if it could be attributed to a different modification. We strongly advocate that each component be addressed in isolation and that the experimental setup be precisely described." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 133, + 287, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 133, + 287, + 253 + ], + "spans": [ + { + "bbox": [ + 46, + 133, + 287, + 253 + ], + "type": "text", + "content": "Over the past decade, numerous novel age estimation methods have been introduced, promising continuous performance improvements every year. However, motivated by these findings, we raise the question: how reliable are the published age estimation results? In Sec. 3 we aim to establish a proper evaluation protocol and use it in Sec. 4 to compare the methods [10, 12, 13, 17, 21, 22, 24] reliably. Figure 1 illustrates the contrast between the performance of state-of-the-art methods as reported in their respective studies and the outcomes as measured by our implementation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 267, + 164, + 280 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 267, + 164, + 280 + ], + "spans": [ + { + "bbox": [ + 47, + 267, + 164, + 280 + ], + "type": "text", + "content": "3. Evaluation Protocol" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 289, + 288, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 289, + 288, + 456 + ], + "spans": [ + { + "bbox": [ + 46, + 289, + 288, + 456 + ], + "type": "text", + "content": "We identified two trivial yet persistent issues that prevent a reliable comparison of age estimation methods. In this section, we address the initial challenge concerning consistent data partitioning. We provide clear guidelines for the evaluation protocol to ensure replicable and fair assessments. Specifically, the protocol should establish a reproducible approach for defining the data used in both (i) training and (ii) performance evaluation. When specifying the training data, one needs to state whether the training dataset is the sole source of information, or if the model was pretrained with additional data. Additionally, the evaluation can be subdivided based on the data used for model evaluation into intra-dataset, and cross-dataset results. We describe how to evaluate models in these settings below." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 477, + 287, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 477, + 287, + 681 + ], + "spans": [ + { + "bbox": [ + 46, + 477, + 287, + 681 + ], + "type": "text", + "content": "Intra-dataset performance To evaluate intra-dataset performance, a single dataset is used for both training and evaluation of the age estimation system. In this case one should (i) randomly split the dataset into subject-exclusive training, validation, and test set " + }, + { + "bbox": [ + 46, + 477, + 287, + 681 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 46, + 477, + 287, + 681 + ], + "type": "text", + "content": ", (ii) train the model on the training set, (iii) measure the model's performance on the validation set, (iv) possibly revert back to step (ii) and train the model again, (v) evaluate the model's performance on the test set, then (vi) publish the results on the test set along with a detailed description of the system components and the data used. If the dataset consists of limited number of examples, it is possible to create multiple splits of the data into training, validation and test set through step (i). Following this, steps (ii) through (v) are iterated " + }, + { + "bbox": [ + 46, + 477, + 287, + 681 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 477, + 287, + 681 + ], + "type": "text", + "content": " times, where " + }, + { + "bbox": [ + 46, + 477, + 287, + 681 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 477, + 287, + 681 + ], + "type": "text", + "content": " is the number of generated splits. It is advisable to present the average test performance along with its standard deviation when reporting the results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 545, + 156 + ], + "type": "text", + "content": "Cross-dataset performance To evaluate cross-dataset performance, the data split in step (i) of the aforementioned evaluation process is generated from a collection of multiple datasets, ensuring that the complete chosen dataset must be employed entirely for evaluation, effectively constituting the designated test set. The remaining steps of the evaluation procedure remain unaltered." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 156, + 546, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 156, + 546, + 276 + ], + "spans": [ + { + "bbox": [ + 304, + 156, + 546, + 276 + ], + "type": "text", + "content": "Regardless of the scenario, whether it is intra-dataset or cross-dataset, each system needs to be evaluated against the test data only once, and the results published. All prior model development and hyperparameter tuning must be based solely on the results on the validation set. Furthermore, it should be indicated whether the training data are the only source of information used for training, or whether the model was pretrained with additional data. In the letter scenario, a detailed description of the additional data and their utilization should also be provided." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 286, + 477, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 286, + 477, + 300 + ], + "spans": [ + { + "bbox": [ + 305, + 286, + 477, + 300 + ], + "type": "text", + "content": "4. Comparative Method Analysis" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 306, + 545, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 306, + 545, + 377 + ], + "spans": [ + { + "bbox": [ + 304, + 306, + 545, + 377 + ], + "type": "text", + "content": "This section applies the evaluation protocol to compare state-of-the-art age estimation methods. We maintain a consistent preprocessing procedure, model architecture, and dataset while selectively altering the decision layer and loss function to incorporate modifications proposed in prominent works such as [10, 12, 13, 17, 21, 22, 24]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 385, + 389, + 398 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 385, + 389, + 398 + ], + "spans": [ + { + "bbox": [ + 306, + 385, + 389, + 398 + ], + "type": "text", + "content": "4.1. Methodology" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 403, + 545, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 403, + 545, + 463 + ], + "spans": [ + { + "bbox": [ + 304, + 403, + 545, + 463 + ], + "type": "text", + "content": "Datasets We evaluate the methods using 7 datasets: AgeDB [20], AFAD [21], CACD2000 [4], CLAP2016 [1], FG-NET [15], MORPH [23], and UTKFace [29]. We also use the IMDB-WIKI dataset [25] for pre-training with clean labels from Franc and Čech [11]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 477, + 545, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 477, + 545, + 597 + ], + "spans": [ + { + "bbox": [ + 304, + 477, + 545, + 597 + ], + "type": "text", + "content": "Data Splits For the CLAP2016 and CACD2000 datasets, we use the single data split provided by the dataset authors. For the remaining datasets, we create five subject-exclusive (SE) data splits. To generate the split, we partition the dataset such that " + }, + { + "bbox": [ + 304, + 477, + 545, + 597 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 304, + 477, + 545, + 597 + ], + "type": "text", + "content": " of the dataset is used for training, " + }, + { + "bbox": [ + 304, + 477, + 545, + 597 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 304, + 477, + 545, + 597 + ], + "type": "text", + "content": " for model selection (validation), and " + }, + { + "bbox": [ + 304, + 477, + 545, + 597 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 304, + 477, + 545, + 597 + ], + "type": "text", + "content": " for evaluating the model performance (test). Additionally, we ensure that each partition has the same age distribution. Due to its small size, we only use FG-NET for evaluation. We make our data splits and code public at Facial-Age-Benchmark2." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 612, + 545, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 612, + 545, + 696 + ], + "spans": [ + { + "bbox": [ + 304, + 612, + 545, + 696 + ], + "type": "text", + "content": "Model Architecture & Weight Initialization We use ResNet-50 [14] as the backbone architecture. We always start the training of the methods from the same initialization. We run the experiments with (i) random initialization, (ii) weights pre-trained on ImageNet (TorchVision'sImagenet1K_V2), and (iii) weights pre-trained on ImageNet and then further trained on IMDB-WIKI for age estimation" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 693, + 286, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 693, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 693, + 286, + 713 + ], + "type": "text", + "content": "1The generated training, validation and test sets will usually be a partition of the dataset, however, in any case their intersection must be empty." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 702, + 525, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 702, + 525, + 713 + ], + "spans": [ + { + "bbox": [ + 315, + 702, + 525, + 713 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 702, + 525, + 713 + ], + "type": "text", + "content": "https://github.com/paplhjak/Facial-Age-Estimation-Benchmark" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "1198" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "content": "with cross-entropy. After the pre-training, the last layer of the model is replaced with a layer specific to the desired method. The models are then fine-tuned on the downstream dataset. It is important to note that for the baseline cross-entropy, we also replace the final layer before fine-tuning. This ensures that the experimental setup remains identical to that of the other methods." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "spans": [ + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "text", + "content": "Training Details We utilize the Adam optimizer with the parameters " + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.999" + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "text", + "content": ". For pre-training on the IMDB-WIKI dataset, we set the learning rate to " + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "inline_equation", + "content": "\\alpha = 10^{-3}" + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "text", + "content": " and train the model for a total of 100 epochs. For fine-tuning on the remaining datasets we reduce the learning rate to " + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "inline_equation", + "content": "\\alpha = 10^{-4}" + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "text", + "content": " and train the model for 50 epochs. We use a batch size of 100. The best model is selected based on the MAE metric computed on the validation set. We utilize two data augmentations during training, (i) horizontal mirroring, and (ii) cropping out an " + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 46, + 173, + 289, + 400 + ], + "type": "text", + "content": " portion of the bounding box and resizing it to the model input shape. We do not tune the hyperparameters of the methods [10, 12, 13, 17, 21, 22] on the validation set. We apply them in the original configurations. We argue that if any of the loss functions is a significant improvement over the baseline, we should observe a performance improvement across a broad range of hyperparameters and preprocessing pipelines. We consider our training parameters to be reasonable and to provide a comparison of the methods as if employed out-of-the-box." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 417, + 288, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 417, + 288, + 502 + ], + "spans": [ + { + "bbox": [ + 46, + 417, + 288, + 502 + ], + "type": "text", + "content": "Preprocessing We use the RetinaFace model developed by Deng et al. [8] for face detection and facial landmark detection. We use complete facial coverage, i.e., the images encompass the entire head. We resize the images to a resolution of " + }, + { + "bbox": [ + 46, + 417, + 288, + 502 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 46, + 417, + 288, + 502 + ], + "type": "text", + "content": " pixels and normalize the pixel values of the images. To this end, we subtract the mean and divide by the standard deviation of colors on ImageNet [7]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 518, + 289, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 518, + 289, + 650 + ], + "spans": [ + { + "bbox": [ + 46, + 518, + 289, + 650 + ], + "type": "text", + "content": "Metrics We use the Mean Absolute Error (MAE) calculated on the test data as the performance measure. To determine whether any method is consistently better than others, we employ the Friedman test and the Nemenyi critical difference test (FN test) as described by Demšar [6]. The main statistic used in the test is the average ranking (1 is best) of a method computed on multiple datasets. Differences in the average ranking are then used to decide whether a method is significantly better than others or whether the improvement is due to randomness (the null hypothesis). We use a common significance level (p-value) of " + }, + { + "bbox": [ + 46, + 518, + 289, + 650 + ], + "type": "inline_equation", + "content": "\\alpha = 5\\%" + }, + { + "bbox": [ + 46, + 518, + 289, + 650 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 658, + 105, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 658, + 105, + 670 + ], + "spans": [ + { + "bbox": [ + 47, + 658, + 105, + 670 + ], + "type": "text", + "content": "4.2. Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "content": "Intra-Dataset Performance The intra-dataset results can be seen in Tab. 6, highlighted with a grey background. When starting from random initialization, training with the" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "content": "Unimodal loss [17] tends to be unstable. Excluding the Unimodal loss [17] from the evaluation, we apply the FN test. The results indicate that three methods: OR-CNN [21], DLDL [12], and the Mean-Variance loss [22], demonstrate a significant performance improvement over the baseline cross-entropy. With limited data availability, when pretraining is not possible, it is advisable to utilize one of the aforementioned methods." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 167, + 545, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 167, + 545, + 216 + ], + "spans": [ + { + "bbox": [ + 304, + 167, + 545, + 216 + ], + "type": "text", + "content": "With pre-training, either on ImageNet or IMDB-WIKI, none of the methods is significantly better than the cross-entropy. In other words, we do not observe any systematic improvement by deviating from the standard approach." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 230, + 545, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 230, + 545, + 372 + ], + "spans": [ + { + "bbox": [ + 304, + 230, + 545, + 372 + ], + "type": "text", + "content": "Cross-Dataset Generalization Cross-dataset results, shown in Tab. 6 with white background, were obtained by evaluating the performance of models on datasets that were not used for their training. The cross-dataset performance is unsurprisingly significantly worse than the intra-dataset performance for all of the methods. Using the FN test, we conclude that there is no significant difference in generalization capability between any of the methods [10, 12, 13, 17, 21, 22] and the cross-entropy, regardless of pre-training. In other words, though the loss functions may reduce overfitting, they do not help in the presence of covariate shift." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 373, + 545, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 373, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 304, + 373, + 545, + 601 + ], + "type": "text", + "content": "None of the methods perform well when evaluated on a different dataset than the one they were trained on. The best cross-dataset results are achieved by training on either UTKFace or CLAP2016. The worst performance across databases is observed when models are trained on AFAD or MORPH. This discrepancy can be attributed to UTKFace and CLAP2016 having a broader range of images, which allows them to generalize effectively to other datasets. Conversely, the limited diversity in MORPH or AFAD datasets, such as AFAD mainly comprising images of people of Asian ethnicity and around " + }, + { + "bbox": [ + 304, + 373, + 545, + 601 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 304, + 373, + 545, + 601 + ], + "type": "text", + "content": " of MORPH being composed of individuals of African American ethnicity, contributes to the poor knowledge transfer. The significant decrease in the performance of models trained on the MORPH dataset when applied to other age estimation datasets underscores the importance of not relying solely on the MORPH dataset as the benchmark for age estimation. To ensure a reliable evaluation of different methods, it is crucial to incorporate results from alternative datasets as well." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 609, + 427, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 609, + 427, + 624 + ], + "spans": [ + { + "bbox": [ + 306, + 609, + 427, + 624 + ], + "type": "text", + "content": "5. Component Analysis" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 629, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 545, + 715 + ], + "type": "text", + "content": "In this section, we analyze the influence of the backbone architecture and the data preparation pipeline on model performance. We show that changes to these components can have a much more significant impact on the final performance than the choice of a loss function. When altering a component, we maintain all other components at their defaults, presented as the Cross-Entropy approach in Sec. 4." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "1199" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": "We use the gained insight to propose a strong baseline age estimation model using the FaRL [30] backbone." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 103, + 162, + 115 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 103, + 162, + 115 + ], + "spans": [ + { + "bbox": [ + 47, + 103, + 162, + 115 + ], + "type": "text", + "content": "5.1. Model Architecture" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 122, + 288, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 122, + 288, + 277 + ], + "spans": [ + { + "bbox": [ + 46, + 122, + 288, + 277 + ], + "type": "text", + "content": "Multiple different backbone architectures can be found in the age estimation literature. Among these architectures, VGG16 [10, 13, 17, 22, 27, 28] and ResNet-50 [2, 3, 19] stand out as the most common choice. We evaluate the influence of the architecture choice on the performance and extend the comparison to include more recent advancements, EfficientNet-B4 and ViT-B-16. We present our findings in Tab. 5. No backbone emerges as universally best across all datasets. Notably, changes in the backbone have a more substantial impact on performance than changes to the loss function, see Tab. 6. This highlights the importance of a thorough ablation, as changes in the backbone architecture could obscure the impact of the loss function." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 285, + 192, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 285, + 192, + 298 + ], + "spans": [ + { + "bbox": [ + 47, + 285, + 192, + 298 + ], + "type": "text", + "content": "5.2. Data Preparation Pipeline" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 304, + 287, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 304, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 304, + 287, + 460 + ], + "type": "text", + "content": "Age estimation models require only a specific region of an image, specifically the person's face, as input, rather than the entire image. However, the influence of this selection process on the model's performance is not apriori known. Should the model be presented with a tight crop of the face or the entire head? Additionally, facial images can differ in terms of scale and resolution since they originate from various sources and as such need to be resized to a uniform resolution. In this section, we examine the impact of the aforementioned data preparation pipeline on the performance of age estimation models. We demonstrate that changes in the preprocessing have a more substantial impact on performance than changes to the loss function." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 288, + 714 + ], + "type": "text", + "content": "Facial Alignment Numerous studies lack an explanation of their facial alignment procedure. Others merely mention the utilization of facial landmarks. To assess whether a standardized alignment is needed for a fair comparison of multiple methods, we adopt three distinct alignment procedures and evaluate their effect on model performance. Firstly, we (i) perform no alignment and employ the bounding box proposed by the facial detection model [8] as the simplest approach. The bounding box sides are parallel to the axes of the image. Secondly, (ii) we utilize the proposed bounding box but rotate it to horizontally align the eyes. Lastly, (iii) we use an alignment procedure, which normalizes the rotation, positioning, and scale. For details, refer to the implementation. A visual representation of these facial alignment methods is depicted in Fig. 2. The performance of models trained using the various alignment procedures is presented in Tab. 1. When working with pre-aligned datasets like AFAD, we observe that procedure (iii) does not yield significant improvements compared to the simpler variants (i) or (ii). Similar results are obtained" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 547, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 216 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 216 + ], + "type": "text", + "content": "on datasets collected under standardized conditions, such as the MORPH dataset. However, when dealing with inthe-wild datasets like AgeDB and CLAP2016, we find that alignment (iii) leads to noticeable improvements over the simpler methods. Interestingly, on the UTKFace dataset, which also contains in-the-wild images, approach (ii) of solely rotating the proposed bounding boxes achieves the best outcomes. However, the disparities among the various alignment procedures are not substantial. We therefore argue that any facial alignment technique that effectively normalizes the position, rotation, and scale of the faces would yield comparable results." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 234, + 547, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 234, + 547, + 556 + ], + "spans": [ + { + "bbox": [ + 304, + 234, + 547, + 556 + ], + "type": "text", + "content": "Facial Coverage While facial alignment defines the positioning, orientation, and scale of facial landmarks, the extent to which the face is visible in an image also needs to be specified. We refer to this notion as facial coverage. It measures how much of the face is shown in an image and can range from minimal coverage, where only the eyes and mouth are visible, to complete coverage, where the entire head is visible. Determining the optimal compromise between complete facial coverage and minimal coverage is not immediately clear. Complete facial coverage provides a comprehensive view of the face, allowing age estimation algorithms to consider a broader range of facial cues. On the other hand, partial coverage may help reduce overfitting by eliminating irrelevant facial cues and features with high variance. For a visual demonstration of various facial coverage levels, refer to Fig. 3. The concept of facial coverage has received limited attention in age estimation literature. Consequently, the extent of facial coverage utilized in previous studies can only be inferred from the images presented in those works. For instance, Berg et al. [2] seemingly employ minimal coverage, showing slightly more than just the mouth and eyes. The majority of other works [3, 12, 13, 17, 21, 27] tend to adopt partial coverage, where a significant portion of the face, including the chin and forehead, is visible, but not the entire head and hair. In the works of Pan et al. [22], Rothe et al. [24], and Zhang et al. [28], the entire head is shown." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 558, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 547, + 715 + ], + "type": "text", + "content": "The performance of models trained with the different coverage levels is presented in Tab. 2. Generally, complete facial coverage, which includes the entire head in the model input, yields the best results across the majority of datasets. However, specifically for AFAD dataset and the MORPH dataset, partial coverage performs better. It is important to note that the AFAD dataset contains preprocessed images that do not capture the entire head. Consequently, using complete facial coverage with this dataset results in the presence of black bars and a decrease in the effective pixel resolution of the face. It is then to be expected that increased facial coverage yields inferior results. The smallest coverage, limited to the facial region up to the eyes and mouth," + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1200" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 115, + 137 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 115, + 137 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 115, + 137 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 115, + 137 + ], + "type": "image", + "image_path": "1e53304d80e10cd67dbd8eab24e4992df4994ac8712e1a03f48a45fc1539fe0a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 72, + 139, + 102, + 148 + ], + "lines": [ + { + "bbox": [ + 72, + 139, + 102, + 148 + ], + "spans": [ + { + "bbox": [ + 72, + 139, + 102, + 148 + ], + "type": "text", + "content": "(a) Crop." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 129, + 70, + 195, + 137 + ], + "blocks": [ + { + "bbox": [ + 129, + 70, + 195, + 137 + ], + "lines": [ + { + "bbox": [ + 129, + 70, + 195, + 137 + ], + "spans": [ + { + "bbox": [ + 129, + 70, + 195, + 137 + ], + "type": "image", + "image_path": "5e27e3ab6f333a541c855371847e0b8229d43e62789118a10a2f63d443029e49.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 139, + 188, + 148 + ], + "lines": [ + { + "bbox": [ + 146, + 139, + 188, + 148 + ], + "spans": [ + { + "bbox": [ + 146, + 139, + 188, + 148 + ], + "type": "text", + "content": "(b) Rotation." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 209, + 70, + 276, + 137 + ], + "blocks": [ + { + "bbox": [ + 209, + 70, + 276, + 137 + ], + "lines": [ + { + "bbox": [ + 209, + 70, + 276, + 137 + ], + "spans": [ + { + "bbox": [ + 209, + 70, + 276, + 137 + ], + "type": "image", + "image_path": "f7cea1e324fb70130baaa7384dc596c9f93a5c808325b7f61a466b016b00e442.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 212, + 139, + 284, + 148 + ], + "lines": [ + { + "bbox": [ + 212, + 139, + 284, + 148 + ], + "spans": [ + { + "bbox": [ + 212, + 139, + 284, + 148 + ], + "type": "text", + "content": "(c) Rot., Trans., Scale." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 47, + 158, + 286, + 180 + ], + "lines": [ + { + "bbox": [ + 47, + 158, + 286, + 180 + ], + "spans": [ + { + "bbox": [ + 47, + 158, + 286, + 180 + ], + "type": "text", + "content": "Figure 2. Comparison of different alignment methods using the average face from the FG-NET dataset." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 201, + 286, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 201, + 286, + 226 + ], + "spans": [ + { + "bbox": [ + 46, + 201, + 286, + 226 + ], + "type": "text", + "content": "consistently performs the worst. With sufficient pixel resolution, the full facial coverage performs the best." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 240, + 286, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 240, + 286, + 337 + ], + "spans": [ + { + "bbox": [ + 46, + 240, + 286, + 337 + ], + "type": "text", + "content": "Input Resolution To investigate the influence of input resolution on age estimation, we performed experiments using multiple resolutions on all datasets: specifically, " + }, + { + "bbox": [ + 46, + 240, + 286, + 337 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 46, + 240, + 286, + 337 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 240, + 286, + 337 + ], + "type": "inline_equation", + "content": "128 \\times 128" + }, + { + "bbox": [ + 46, + 240, + 286, + 337 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 240, + 286, + 337 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 46, + 240, + 286, + 337 + ], + "type": "text", + "content": " pixels. The results are presented in Tab. 3. Our findings indicate that an increase in image resolution consistently results in improved model performance across all datasets. Hence, the best performance was achieved with a resolution of " + }, + { + "bbox": [ + 46, + 240, + 286, + 337 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 46, + 240, + 286, + 337 + ], + "type": "text", + "content": " pixels." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 337, + 286, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 337, + 286, + 421 + ], + "spans": [ + { + "bbox": [ + 46, + 337, + 286, + 421 + ], + "type": "text", + "content": "In the literature, one can find resolutions ranging from " + }, + { + "bbox": [ + 46, + 337, + 286, + 421 + ], + "type": "inline_equation", + "content": "60 \\times 60" + }, + { + "bbox": [ + 46, + 337, + 286, + 421 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 337, + 286, + 421 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 46, + 337, + 286, + 421 + ], + "type": "text", + "content": " pixels, where newer works tend to use larger resolution images. As the resolution increase can directly be observed to improve the results; and the resolutions increased with years; it is difficult to say whether newly proposed methods are better overall, or whether they perform better due to using higher resolution images." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 435, + 286, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 435, + 286, + 603 + ], + "spans": [ + { + "bbox": [ + 46, + 435, + 286, + 603 + ], + "type": "text", + "content": "Input Transform Finally, we examined the input transformation proposed by Lin et al. [18], which involves converting a face image into a tanh-polar representation. This approach has shown large performance improvements in face semantic segmentation. Lin et al. then modified the network for age estimation, reporting impressive results [19]. We explored the potential benefits of applying this transformation for age estimation. However, our findings indicate that the transformation does not improve the results compared to the baseline, as shown in Tab. 4. Therefore, we conclude that the improved age estimation performance observed by Lin et al. [19] does not arise from the use of a different representation, but rather from pre-training on semantic segmentation or their model architecture." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 611, + 146, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 611, + 146, + 623 + ], + "spans": [ + { + "bbox": [ + 47, + 611, + 146, + 623 + ], + "type": "text", + "content": "5.3. FaRL Backbone" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 629, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 286, + 713 + ], + "type": "text", + "content": "We observed that adjustments to the decision layer and loss function have minimal impact on the final model performance. Conversely, large performance disparities arise when modifying other components of the prediction pipeline. Notably, the pretraining data appear to be the most influential factor. Based on this insight, we opt against creating a specialized loss function to enhance the age esti" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 307, + 70, + 374, + 137 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 374, + 137 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 374, + 137 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 374, + 137 + ], + "type": "image", + "image_path": "b33461ff630b18e174f8bf28e104b8ea3cf5ec860777f6f15ab3b0da56ddcec5.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 139, + 376, + 148 + ], + "lines": [ + { + "bbox": [ + 315, + 139, + 376, + 148 + ], + "spans": [ + { + "bbox": [ + 315, + 139, + 376, + 148 + ], + "type": "text", + "content": "(a) Eyes & Mouth." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 158, + 545, + 180 + ], + "lines": [ + { + "bbox": [ + 306, + 158, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 306, + 158, + 545, + 180 + ], + "type": "text", + "content": "Figure 3. Comparison of different facial coverage levels using the average face from the FG-NET dataset." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 388, + 71, + 454, + 137 + ], + "blocks": [ + { + "bbox": [ + 388, + 71, + 454, + 137 + ], + "lines": [ + { + "bbox": [ + 388, + 71, + 454, + 137 + ], + "spans": [ + { + "bbox": [ + 388, + 71, + 454, + 137 + ], + "type": "image", + "image_path": "ac9a8548a957f52dba0296747203c80b65c3e53b33219a81a6696567eed36afc.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 391, + 138, + 461, + 148 + ], + "lines": [ + { + "bbox": [ + 391, + 138, + 461, + 148 + ], + "spans": [ + { + "bbox": [ + 391, + 138, + 461, + 148 + ], + "type": "text", + "content": "(b) Chin & Forehead." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 468, + 71, + 534, + 137 + ], + "blocks": [ + { + "bbox": [ + 468, + 71, + 534, + 137 + ], + "lines": [ + { + "bbox": [ + 468, + 71, + 534, + 137 + ], + "spans": [ + { + "bbox": [ + 468, + 71, + 534, + 137 + ], + "type": "image", + "image_path": "3148bc6fde0c3c8feb58bd0a464313f9081bc4ba87a394a01e6373042a7d1d53.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 491, + 139, + 522, + 148 + ], + "lines": [ + { + "bbox": [ + 491, + 139, + 522, + 148 + ], + "spans": [ + { + "bbox": [ + 491, + 139, + 522, + 148 + ], + "type": "text", + "content": "(c) Head." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "table", + "bbox": [ + 315, + 191, + 535, + 284 + ], + "blocks": [ + { + "bbox": [ + 315, + 191, + 535, + 284 + ], + "lines": [ + { + "bbox": [ + 315, + 191, + 535, + 284 + ], + "spans": [ + { + "bbox": [ + 315, + 191, + 535, + 284 + ], + "type": "table", + "html": "
DatasetAlignment
CropRotationRot. + Trans. + Scale
AgeDB5.935.925.84
AFAD3.123.113.11
CACD20004.014.004.00
CLAP20164.684.574.49
MORPH2.812.782.79
UTKFace4.494.424.44
", + "image_path": "6c9b796269f3f02f9bb42ce667e5430cc5ca55e0cfa3b0a86aa6eb400d99146a.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "table_body" + } + ], + "index": 20 + }, + { + "type": "table", + "bbox": [ + 312, + 326, + 538, + 418 + ], + "blocks": [ + { + "bbox": [ + 306, + 293, + 545, + 315 + ], + "lines": [ + { + "bbox": [ + 306, + 293, + 545, + 315 + ], + "spans": [ + { + "bbox": [ + 306, + 293, + 545, + 315 + ], + "type": "text", + "content": "Table 1. MAE " + }, + { + "bbox": [ + 306, + 293, + 545, + 315 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 306, + 293, + 545, + 315 + ], + "type": "text", + "content": " of ResNet-50 models with different facial alignment. The models were pre-trained on IMDB-WIKI." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 312, + 326, + 538, + 418 + ], + "lines": [ + { + "bbox": [ + 312, + 326, + 538, + 418 + ], + "spans": [ + { + "bbox": [ + 312, + 326, + 538, + 418 + ], + "type": "table", + "html": "
DatasetFacial Coverage
Eyes & MouthChin & ForeheadHead
AgeDB6.065.845.81
AFAD3.173.113.14
CACD20004.024.003.96
CLAP20165.064.494.49
MORPH2.882.792.81
UTKFace4.634.444.38
", + "image_path": "90da2433b41cafc22734df1e980f17dc01557eadd82d4d55fa319801bca39b6a.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "table_body" + } + ], + "index": 22 + }, + { + "type": "table", + "bbox": [ + 324, + 460, + 525, + 554 + ], + "blocks": [ + { + "bbox": [ + 306, + 427, + 544, + 449 + ], + "lines": [ + { + "bbox": [ + 306, + 427, + 544, + 449 + ], + "spans": [ + { + "bbox": [ + 306, + 427, + 544, + 449 + ], + "type": "text", + "content": "Table 2. MAE " + }, + { + "bbox": [ + 306, + 427, + 544, + 449 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 306, + 427, + 544, + 449 + ], + "type": "text", + "content": " of ResNet-50 models with different facial coverages. The models were pre-trained on IMDB-WIKI." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 324, + 460, + 525, + 554 + ], + "lines": [ + { + "bbox": [ + 324, + 460, + 525, + 554 + ], + "spans": [ + { + "bbox": [ + 324, + 460, + 525, + 554 + ], + "type": "table", + "html": "
DatasetImage Resolution
64 × 64128 × 128256 × 256
AgeDB8.436.905.81
AFAD3.363.253.14
CACD20005.014.553.96
CLAP201611.345.904.49
MORPH3.333.072.81
UTKFace5.834.814.38
", + "image_path": "31fc96f4ed07a97905584da5117ba15e0c19b6fea78869540bacfa6bd2e75c3d.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "table_body" + } + ], + "index": 24 + }, + { + "bbox": [ + 306, + 562, + 544, + 583 + ], + "lines": [ + { + "bbox": [ + 306, + 562, + 544, + 583 + ], + "spans": [ + { + "bbox": [ + 306, + 562, + 544, + 583 + ], + "type": "text", + "content": "Table 3. MAE " + }, + { + "bbox": [ + 306, + 562, + 544, + 583 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 306, + 562, + 544, + 583 + ], + "type": "text", + "content": " of ResNet-50 models with different image resolutions. The models were pre-trained on IMDB-WIKI." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "type": "text", + "content": "mation system. Instead, we leverage the FaRL backbone by Zheng et al. [30], utilizing a ViT-B-16 [9] model. The FaRL model is trained through a combination of (i) contrastive loss on image-text pairs and (ii) prediction of masked image patches. Training takes place on an extensive collection of facial images (50 million) from the image-text pair LAION dataset [26]. We retain the feature representation extracted by FaRL without altering the model's weights. Our decision to use FaRL is driven solely by the extensive amount" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 314, + 757 + ], + "type": "text", + "content": "1201" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 65, + 70, + 269, + 163 + ], + "blocks": [ + { + "bbox": [ + 65, + 70, + 269, + 163 + ], + "lines": [ + { + "bbox": [ + 65, + 70, + 269, + 163 + ], + "spans": [ + { + "bbox": [ + 65, + 70, + 269, + 163 + ], + "type": "table", + "html": "
DatasetTransform
No TransformRoI Tanh-polar [18]
AgeDB5.815.93
AFAD3.143.15
CACD20003.964.07
CLAP20164.494.71
MORPH2.812.80
UTKFace4.384.39
", + "image_path": "846c927a4cbe24a421f86eed275ccb17b8251fd0f753df468b2a8973b8d2c785.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 207, + 282, + 296 + ], + "blocks": [ + { + "bbox": [ + 47, + 171, + 287, + 194 + ], + "lines": [ + { + "bbox": [ + 47, + 171, + 287, + 194 + ], + "spans": [ + { + "bbox": [ + 47, + 171, + 287, + 194 + ], + "type": "text", + "content": "Table 4. MAE " + }, + { + "bbox": [ + 47, + 171, + 287, + 194 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 47, + 171, + 287, + 194 + ], + "type": "text", + "content": " of ResNet-50 models with different input transformations. The models were pre-trained on IMDB-WIKI [25]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 207, + 282, + 296 + ], + "lines": [ + { + "bbox": [ + 50, + 207, + 282, + 296 + ], + "spans": [ + { + "bbox": [ + 50, + 207, + 282, + 296 + ], + "type": "table", + "html": "
DatasetBackbone
ResNet-50Eff.Net-B4ViT-B-16VGG-16
AgeDB5.815.769.076.02
AFAD3.143.204.043.22
CACD20003.964.006.223.92
CLAP20164.494.068.554.65
MORPH2.812.874.352.88
UTKFace4.384.236.884.64
", + "image_path": "c0872ca7b3a63adeed30742d984c892cb18b4e8644593e54383b96d40287b21d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 304, + 286, + 327 + ], + "lines": [ + { + "bbox": [ + 47, + 304, + 286, + 327 + ], + "spans": [ + { + "bbox": [ + 47, + 304, + 286, + 327 + ], + "type": "text", + "content": "Table 5. Intra-dataset MAE " + }, + { + "bbox": [ + 47, + 304, + 286, + 327 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 47, + 304, + 286, + 327 + ], + "type": "text", + "content": " with different backbone architectures. The models were pre-trained on IMDB-WIKI [25]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 351, + 286, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 351, + 286, + 506 + ], + "spans": [ + { + "bbox": [ + 46, + 351, + 286, + 506 + ], + "type": "text", + "content": "of pre-training data it incorporates, rather than specific characteristics of the backbone. Different image encoders could be trained in the same manner. However, due to the costs associated with training such models, we have chosen to use the available FaRL ViT-B-16 backbone. We employ a simple multilayer perceptron (MLP) over the FaRL-extracted features, consisting of 2 layers with 512 neurons each, followed by ReLU activation. Cross-entropy serves as the chosen loss function. For each downstream dataset, we pretrain the MLP on IMDB-WIKI or initialize it to random weights. We choose the preferred option based on validation loss on the downstream dataset. As previously, we replace the final layer before fine-tuning on downstream datasets." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 508, + 286, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 508, + 286, + 662 + ], + "spans": [ + { + "bbox": [ + 46, + 508, + 286, + 662 + ], + "type": "text", + "content": "This straightforward modification outperformed all other models on AgeDB, CLAP2016, and UTKFace datasets. It also achieved superior results on AFAD, matched the performance of other models on CACD2000, but demonstrated worse performance on MORPH. Applying the FN test revealed statistically significant improvements of this model over others in both intra-dataset and cross-dataset evaluations, see Tab. 6. We attribute the poor performance of FaRL on MORPH to the fact that the distributions of images in LAION [26] and MORPH [23] are vastly different. As we do not finetune the feature representation of FaRL [30], it is possible that the representation learned on LAION is superior on the other datasets but deficient on MORPH." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "text", + "content": "We do not claim this model to be the ultimate solution, but the results achieved with the FaRL backbone along with our public implementation offer a robust and straightforward baseline for a comparison with future methods." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 71, + 463, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 71, + 463, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 463, + 84 + ], + "type": "text", + "content": "6. Discussion and Conclusions" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 91, + 545, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 545, + 295 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 545, + 295 + ], + "type": "text", + "content": "In this paper, we aimed to establish a fair comparison framework for evaluating various approaches for age estimation. We conducted a comprehensive analysis on seven different datasets, namely AgeDB [20], AFAD [21], CACD2000 [4], CLAP2016 [1], FG-NET [15], MORPH [23], and UTK-Face [29], comparing the models based on their Mean Absolute Error (MAE). To determine if any method outperformed the others, we employed the Friedman test and the Nemenyi critical difference test. When pre-training the models on a large dataset, we did not observe any statistically significant improvement by using the specialized loss functions designed for age estimation. With random model initialization, we observed some improvement over the baseline cross-entropy on small datasets. Specifically, for Mean-Variance loss [22], OR-CNN [21], and DLDL [12]. These improvements can be attributed to implicit regularization provided by these methods." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 294, + 545, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 294, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 294, + 545, + 437 + ], + "type": "text", + "content": "Previously published results reported continuous performance improvements over time (as depicted in Fig. 1). Our findings challenge these claims. We argue that the reported improvements can be attributed to either the random data splitting strategy or hyperparameter tuning to achieve the best test set performance. Our analysis of the data preparation pipeline revealed that factors such as the extent of facial coverage or input resolution exert a more significant impact on the results than the choice of the age estimation specific loss function. Guided by these findings, we use the FaRL [30] model as a backbone for age estimation and demonstrated its effectiveness. In summary:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 438, + 545, + 617 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 306, + 438, + 545, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 438, + 545, + 485 + ], + "spans": [ + { + "bbox": [ + 306, + 438, + 545, + 485 + ], + "type": "text", + "content": "- We show that existing evaluation practices in age estimation do not provide a consistent comparison of the state-of-the-art methods. We define a proper evaluation protocol which addresses the issue." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 486, + 545, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 486, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 306, + 486, + 545, + 533 + ], + "type": "text", + "content": "- We show that improvements in age estimation results over recent years can not be attributed to the specialized loss functions introduced in [10, 12, 13, 17, 21, 22], as is claimed in the published literature." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 533, + 545, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 533, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 306, + 533, + 545, + 581 + ], + "type": "text", + "content": "- Using the insight gained from analyzing different components of the age estimation pipeline, we construct a prediction model with the FaRL [30] backbone and demonstrate its effectiveness." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 582, + 545, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 582, + 545, + 617 + ], + "spans": [ + { + "bbox": [ + 306, + 582, + 545, + 617 + ], + "type": "text", + "content": "- To facilitate reproducibility and simple future comparisons, we have made our implementation framework and the exact data splits publicly available." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 628, + 399, + 642 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 628, + 399, + 642 + ], + "spans": [ + { + "bbox": [ + 306, + 628, + 399, + 642 + ], + "type": "text", + "content": "Acknowledgment" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 647, + 545, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 647, + 545, + 707 + ], + "spans": [ + { + "bbox": [ + 304, + 647, + 545, + 707 + ], + "type": "text", + "content": "This research was supported by the Grant Agency of the Czech Technical University in Prague, grant No. SGS23/176/OHK3/3T/13 and by the Grant agency of the Ministry of Interior Czech Republic, project FACIS grant. No. VJ02010041" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1202" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 107, + 545, + 632 + ], + "blocks": [ + { + "bbox": [ + 52, + 107, + 545, + 632 + ], + "lines": [ + { + "bbox": [ + 52, + 107, + 545, + 632 + ], + "spans": [ + { + "bbox": [ + 52, + 107, + 545, + 632 + ], + "type": "table", + "html": "
MethodInit.Evaluation Dataset
AgeDBAFADCACD2000CLAP2016FG-NETMORPHUTKFace
IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.IMDBImag.Rand.
AgeDBCross-Entropy5.817.207.657.8312.6114.705.908.108.736.8310.8612.4110.8216.2718.874.836.746.938.4511.8811.82
Regression6.236.547.608.0512.1314.196.767.568.748.329.9512.3210.5613.8318.005.646.666.949.4210.4212.14
OR-CNN [21]5.786.517.527.4711.8613.806.057.718.556.6410.1811.749.7413.8217.404.736.617.058.2010.7511.41
DLDL [12]5.806.957.467.811.8514.925.997.968.526.5110.8411.489.2315.6316.954.746.346.827.9711.8711.24
DLDL-v2 [13]5.806.877.587.6112.9816.195.918.158.616.4810.8812.509.9115.3619.014.927.537.207.9711.6111.24
SORD [10]5.816.937.587.8112.8015.425.967.908.776.6110.3712.229.7214.7617.854.766.587.148.1211.5311.60
Mean-Var. [22]5.856.697.337.2612.4014.356.007.898.356.7010.5011.9010.5514.3217.434.996.877.338.2510.7711.64
Unimodal [17]5.907.1115.498.3713.1120.876.228.2416.116.7311.1421.3110.1516.1332.774.846.7817.428.2311.8623.09
FaRL + MLP5.64--7.82--7.41--9.32--9.15--4.73--9.92--
AFADCross-Entropy15.7017.3118.053.143.173.329.5411.1811.218.9610.2310.3210.9211.3811.966.806.838.1912.1013.0713.29
Regression13.6715.9117.213.173.163.308.7210.5110.728.339.9110.0211.2011.8912.356.277.347.9911.2312.8312.96
OR-CNN [21]12.0815.6516.723.163.173.288.8711.0510.897.859.739.9210.6311.9412.586.686.857.8110.5012.4312.74
DLDL [12]14.1215.7017.213.143.163.259.4010.7011.068.689.549.9811.3111.6412.077.046.757.8211.5212.4312.82
DLDL-v2 [13]13.9016.3317.783.153.173.289.4610.6811.028.609.7610.3210.8311.8112.646.926.797.9411.2912.6113.18
SORD [10]14.3016.0817.493.143.153.249.4510.7011.098.649.7910.1011.2111.6312.196.876.827.9311.5912.7913.10
Mean-Var. [22]12.5415.0716.683.163.163.268.9810.3310.757.939.339.7810.9612.2412.436.616.767.8810.5712.0012.62
Unimodal [17]13.9915.8920.973.203.249.309.2310.6814.568.649.7914.5111.3111.8318.297.077.3212.5311.2612.3317.47
FaRL + MLP16.41--3.12--10.95--8.57--12.24--6.62--11.64--
CLAP2000Cross-Entropy9.6611.8410.6010.708.5013.083.964.594.898.428.6410.5117.4523.6420.867.2112.2010.3911.1611.3812.61
Regression10.9110.4410.7610.237.2311.664.064.524.838.847.759.9817.5519.5019.608.618.8111.7911.3410.3811.78
OR-CNN [21]10.4311.0211.859.669.4812.174.014.604.748.578.8510.2918.4724.3220.857.5210.0411.0511.1712.3012.27
DLDL [12]9.8410.7911.2810.099.3013.203.964.424.768.398.499.9918.3818.9921.527.279.1611.0111.1911.9412.27
DLDL-v2 [13]9.9012.3111.208.0311.5011.513.964.574.697.678.889.4318.1122.8919.027.2013.469.7310.5212.3211.47
SORD [10]9.7710.9011.0410.359.5511.953.964.424.708.388.519.8918.0520.8421.737.238.9811.5911.1812.0612.22
Mean-Var. [22]10.8111.4210.839.7110.8211.494.074.604.788.889.2010.0820.4822.6820.148.1412.5911.7211.7412.2912.23
Unimodal [17]10.4611.0446.2610.639.8525.744.104.7337.419.198.9230.9619.3719.7515.848.9411.6432.6311.8911.7532.98
FaRL + MLP11.32--9.08--3.96--8.57--19.63--6.56--11.27--
Cross-Entropy7.3510.1512.265.417.035.346.658.119.114.495.968.735.929.2812.024.966.616.905.747.218.58
Regression7.518.5211.746.075.195.956.867.249.454.654.777.894.856.3110.145.095.498.836.025.938.66
OR-CNN [21]6.838.7411.245.835.925.446.737.258.654.134.607.385.096.479.224.925.786.525.435.957.68
DLDL [12]7.209.3311.395.576.905.856.857.649.264.185.107.395.267.449.184.895.926.525.516.377.87
DLDL-v2 [13]7.149.4212.365.475.956.456.697.999.344.234.878.525.227.048.754.856.047.295.536.128.23
SORD [10]7.199.6012.165.477.746.626.638.099.664.275.347.815.597.777.624.926.016.625.486.468.08
Mean-Var. [22]7.089.1612.585.186.305.386.647.379.944.284.877.955.456.6911.144.967.387.495.526.168.65
Unimodal [17]7.019.7720.715.586.105.546.478.2013.084.175.3913.835.136.3915.134.806.0510.025.446.6715.27
FaRL + MLP7.50--4.34--6.57--3.38--4.95--4.47--4.85-
Cross-Entropy9.6611.7312.636.697.7810.368.5310.8310.116.908.9610.649.4511.9615.382.812.963.018.9710.8111.92
Regression10.4812.9912.566.606.6510.669.8211.479.687.839.2710.679.2410.1316.692.832.742.978.4010.97
OR-CNN [21]9.3511.6512.826.787.7811.818.3911.3410.236.848.7311.059.5811.0917.472.832.852.998.8210.3712.06
DLDL [12]9.4112.0012.666.587.7811.768.5811.9210.106.859.2611.159.4411.4316.942.812.922.988.8010.8112.46
DLDL-v2 [13]9.7911.4912.686.608.2212.458.7910.989.816.988.9811.229.5211.6317.572.822.933.008.9710.7012.47
SORD [10]9.4811.8412.736.547.9111.198.7311.1810.136.848.9910.729.3411.0815.902.812.912.998.8310.8511.97
Mean-Var. [22]9.7011.6212.936.687.8110.418.6510.5910.117.038.8010.569.5111.4515.812.832.892.958.9410.5911.95
Unimodal [17]9.9312.3117.446.637.048.188.6810.1112.037.198.9512.389.8012.1717.832.782.908.669.0710.7515.45
FaRL + MLP8.40--4.67--7.45--6.21--9.28--3.04--
Cross-Entropy6.618.889.585.516.426.756.569.108.984.827.347.504.786.627.645.096.617.354.384.755.32
Regression7.017.798.835.966.266.436.777.878.615.245.936.674.415.077.275.415.956.714.724.535.34
OR-CNN [21]6.718.298.755.566.746.526.618.898.374.956.796.704.545.716.555.266.076.764.404.435.15
DLDL [12]6.658.609.005.426.686.196.529.018.844.817.197.464.855.877.285.166.257.034.394.665.30
DLDL-v2 [13]6.798.439.915.427.186.326.529.428.694.827.878.784.836.547.555.146.367.284.364.685.25
SORD [10]6.618.969.115.427.186.326.529.428.694.827.878.784.836.547.555.146.367.284.364.685.25
Mean-Var. [22]6.798.368.535.416.546.326.558.558.325.046.816.325.056.306.905.376.156.394.424.575.05
Unimodal [17]6.688.6622.425.357.6816.646.589.2817.174.867.6018.834.556.2522.985.225.9616.444.474.7821.01
FaRL + MLP7.16-
", + "image_path": "6272592e706aa02b2a2f126821780c05ea30cdff2ba5ff8dc79f1637651b6959.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 640, + 546, + 673 + ], + "lines": [ + { + "bbox": [ + 48, + 640, + 546, + 673 + ], + "spans": [ + { + "bbox": [ + 48, + 640, + 546, + 673 + ], + "type": "text", + "content": "Table 6. Intra-dataset and cross-dataset Mean Absolute Error (MAE) " + }, + { + "bbox": [ + 48, + 640, + 546, + 673 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 48, + 640, + 546, + 673 + ], + "type": "text", + "content": " of ResNet-50 models. Results marked as Initialization: IMDB are of models that are initialized to ImageNet weights, then trained with Cross-Entropy on IMDB-WIKI [25] and then finetuned on the downstream dataset. Imag. signifies initialization to weights pre-trained on ImageNet. Rand. denotes random initialization." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "type": "text", + "content": "1203" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "text", + "content": "[1] E. Agustsson, R. Timofte, S. Escalera, X. Baro, I. Guyon, and R. Rothe. Apparent and real age estimation in still images with deep residual regressors on appa-real database. In 12th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG), 2017. IEEE, 2017. 1, 2, 3, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 159, + 288, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 159, + 288, + 213 + ], + "spans": [ + { + "bbox": [ + 53, + 159, + 288, + 213 + ], + "type": "text", + "content": "[2] A. Berg, M. Oskarsson, and M. O'Connor. Deep ordinal regression with label diversity. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 2740-2747, Los Alamitos, CA, USA, 2021. IEEE Computer Society. 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 215, + 287, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 215, + 287, + 258 + ], + "spans": [ + { + "bbox": [ + 53, + 215, + 287, + 258 + ], + "type": "text", + "content": "[3] Wenzhi Cao, Vahid Mirjalili, and Sebastian Raschka. Rank consistent ordinal regression for neural networks with application to age estimation. Pattern Recognition Letters, 140: 325-331, 2020. 2, 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 260, + 287, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 260, + 287, + 315 + ], + "spans": [ + { + "bbox": [ + 53, + 260, + 287, + 315 + ], + "type": "text", + "content": "[4] Bor-Chun Chen, Chu-Song Chen, and Winston H. Hsu. Cross-age reference coding for age-invariant face recognition and retrieval. In Computer Vision – ECCV 2014, pages 768–783, Cham, 2014. Springer International Publishing. 1, 2, 3, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 317, + 287, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 317, + 287, + 361 + ], + "spans": [ + { + "bbox": [ + 53, + 317, + 287, + 361 + ], + "type": "text", + "content": "[5] Shixing Chen, Caojin Zhang, Ming Dong, Jialiang Le, and Mike Rao. Using ranking-cnn for age estimation. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 742-751, 2017. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 363, + 287, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 363, + 287, + 396 + ], + "spans": [ + { + "bbox": [ + 53, + 363, + 287, + 396 + ], + "type": "text", + "content": "[6] Janez Demšar. Statistical comparisons of classifiers over multiple data sets. The Journal of Machine learning research, 7:1-30, 2006. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 397, + 287, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 397, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 397, + 287, + 441 + ], + "type": "text", + "content": "[7] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 443, + 287, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 443, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 53, + 443, + 287, + 498 + ], + "type": "text", + "content": "[8] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5203-5212, 2020. 4, 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 499, + 287, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 499, + 287, + 587 + ], + "spans": [ + { + "bbox": [ + 53, + 499, + 287, + 587 + ], + "type": "text", + "content": "[9] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 589, + 287, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 589, + 287, + 632 + ], + "spans": [ + { + "bbox": [ + 48, + 589, + 287, + 632 + ], + "type": "text", + "content": "[10] Raul Díaz and Amit Marathe. Soft labels for ordinal regression. In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4733-4742, 2019. 2, 3, 4, 5, 7, 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 635, + 287, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 666 + ], + "type": "text", + "content": "[11] Vojtech Franc and Jan Cech. Learning cnns from weakly annotated facial images. Image and Vision Computing, 2018. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "text", + "content": "[12] Bin-Bin Gao, Chao Xing, Chen-Wei Xie, Jianxin Wu, and Xin Geng. Deep label distribution learning with label ambiguity. IEEE Transactions on Image Processing, 26(6):2825-2838, 2017. 2, 3, 4, 5, 7, 8" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 139 + ], + "type": "text", + "content": "[13] Bin-Bin Gao, Hong-Yu Zhou, Jianxin Wu, and Xin Geng. Age estimation using expectation of label distribution learning. In Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI-18, pages 712-718. International Joint Conferences on Artificial Intelligence Organization, 2018. 2, 3, 4, 5, 7, 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 140, + 545, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 140, + 545, + 183 + ], + "spans": [ + { + "bbox": [ + 307, + 140, + 545, + 183 + ], + "type": "text", + "content": "[14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 184, + 545, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 184, + 545, + 227 + ], + "spans": [ + { + "bbox": [ + 308, + 184, + 545, + 227 + ], + "type": "text", + "content": "[15] A. Lanitis, C.J. Taylor, and T.F. Cootes. Toward automatic simulation of aging effects on face images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 24(4): 442-455, 2002. 3, 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 228, + 545, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 228, + 545, + 261 + ], + "spans": [ + { + "bbox": [ + 308, + 228, + 545, + 261 + ], + "type": "text", + "content": "[16] Ling Li and Hsuan-Tien Lin. Ordinal regression by extended binary classification. In Advances in Neural Information Processing Systems, page 865 - 872, 2007. Cited by: 195. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 261, + 545, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 261, + 545, + 336 + ], + "spans": [ + { + "bbox": [ + 308, + 261, + 545, + 336 + ], + "type": "text", + "content": "[17] Qiang Li, Jingjing Wang, Zhaoliang Yao, Yachun Li, Pengju Yang, Jingwei Yan, Chunmao Wang, and Shiliang Pu. Unimodal-concentrated loss: Fully adaptive label distribution learning for ordinal regression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20513-20522, 2022. 2, 3, 4, 5, 7, 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 338, + 545, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 338, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 308, + 338, + 545, + 371 + ], + "type": "text", + "content": "[18] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Roi tanh-polar transformer network for face parsing in the wild. Image and Vision Computing, 112, 2021. 6, 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 372, + 545, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 372, + 545, + 414 + ], + "spans": [ + { + "bbox": [ + 308, + 372, + 545, + 414 + ], + "type": "text", + "content": "[19] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Fpage: Leveraging face parsing attention for facial age estimation in the wild. IEEE Transactions on Image Processing, 2022. 2, 5, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 415, + 545, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 415, + 545, + 480 + ], + "spans": [ + { + "bbox": [ + 308, + 415, + 545, + 480 + ], + "type": "text", + "content": "[20] Stylianos Moschoglou, Athanasios Papaioannou, Christos Sagonas, Jiankang Deng, Irene Kotsia, and Stefanos Zafeiriou. Agedb: the first manually collected, in-the-wild age database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshop, 2017. 1, 2, 3, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 482, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 482, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 308, + 482, + 545, + 536 + ], + "type": "text", + "content": "[21] Zhenxing Niu, Mo Zhou, Le Wang, Xinbo Gao, and Gang Hua. Ordinal regression with multiple output cnn for age estimation. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4920-4928, 2016. 1, 2, 3, 4, 5, 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 537, + 545, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 545, + 581 + ], + "type": "text", + "content": "[22] Hongyu Pan, Hu Han, Shiguang Shan, and Xilin Chen. Mean-variance loss for deep age estimation from a face. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5285-5294, 2018. 2, 3, 4, 5, 7, 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 582, + 545, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 582, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 308, + 582, + 545, + 624 + ], + "type": "text", + "content": "[23] K. Ricanek and T. Tesafaye. Morph: a longitudinal image database of normal adult age-progression. In 7th International Conference on Automatic Face and Gesture Recognition (FGR06), pages 341-345, 2006. 1, 2, 3, 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 625, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 625, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 308, + 625, + 545, + 669 + ], + "type": "text", + "content": "[24] Rasmus Rothe, Radu Timofte, and Luc Van Gool. Dex: Deep expectation of apparent age from a single image. In 2015 IEEE International Conference on Computer Vision Workshop (ICCVW), pages 252-257, 2015. 3, 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 670, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 712 + ], + "type": "text", + "content": "[25] Rasmus Rothe, Radu Timofte, and Luc Van Gool. Deep expectation of real and apparent age from a single image without facial landmarks. International Journal of Computer Vision, 126(2-4):144-157, 2018. 1, 3, 7, 8" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1204" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 385 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 171 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 171 + ], + "type": "text", + "content": "[26] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade W Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa R Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. LAION-5b: An open large-scale dataset for training next generation image-text models. In Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2022. 6, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 172, + 287, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 172, + 287, + 228 + ], + "spans": [ + { + "bbox": [ + 49, + 172, + 287, + 228 + ], + "type": "text", + "content": "[27] Nyeong-Ho Shin, Seon-Ho Lee, and Chang-Su Kim. Moving window regression: A novel approach to ordinal regression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18760-18769, 2022. 2, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 228, + 287, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 228, + 287, + 274 + ], + "spans": [ + { + "bbox": [ + 49, + 228, + 287, + 274 + ], + "type": "text", + "content": "[28] Yunxuan Zhang, Li Liu, Cheng Li, and Chen-Change Loy. Quantifying facial age by posterior of age comparisons. In Proceedings of the British Machine Vision Conference (BMVC), pages 108.1–108.12. BMVA Press, 2017. 2, 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 274, + 287, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 274, + 287, + 317 + ], + "spans": [ + { + "bbox": [ + 49, + 274, + 287, + 317 + ], + "type": "text", + "content": "[29] Zhifei Zhang, Yang Song, and Hairong Qi. Age progression/regression by conditional adversarial autoencoder. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR). IEEE, 2017. 1, 3, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 318, + 287, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 318, + 287, + 385 + ], + "spans": [ + { + "bbox": [ + 49, + 318, + 287, + 385 + ], + "type": "text", + "content": "[30] Yinglin Zheng, Hao Yang, Ting Zhang, Jianmin Bao, Dongdong Chen, Yangyu Huang, Lu Yuan, Dong Chen, Ming Zeng, and Fang Wen. General facial representation learning in a visual-linguistic manner. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18697-18709, 2022. 1, 5, 6, 7" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1205" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Category Agnostic Model for Visual Rearrangment/4b328694-69ab-47a2-83d3-ce2efe00b0f0_content_list.json b/2024/A Category Agnostic Model for Visual Rearrangment/4b328694-69ab-47a2-83d3-ce2efe00b0f0_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..0ddcdbb18967d1110fd8f2d67b754e1bbc852950 --- /dev/null +++ b/2024/A Category Agnostic Model for Visual Rearrangment/4b328694-69ab-47a2-83d3-ce2efe00b0f0_content_list.json @@ -0,0 +1,1502 @@ +[ + { + "type": "text", + "text": "A Category Agnostic Model for Visual Rearrangement", + "text_level": 1, + "bbox": [ + 214, + 130, + 756, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yuyi Liu $^{1,2}$ , Xinhang Song $^{1,2}$ , Weijie Li $^{1,2}$ , Xiaohan Wang $^{1}$ , Shuqiang Jiang $^{1,2}$", + "bbox": [ + 176, + 179, + 792, + 200 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Key Lab of Intelligent Information Processing Laboratory of the Chinese Academy of Sciences (CAS), Institute of Computing Technology, Beijing $^{2}$ University of Chinese Academy of Sciences, Beijing {yuyi.liu, xinhang song, weijie.li, xiaohan.wang}@vipl.ict.ac.cn sqjiang@ict.ac.cn", + "bbox": [ + 176, + 200, + 792, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 289, + 313, + 304 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper presents a novel category agnostic model for visual rearrangement task, which can help an embodied agent to physically recover the shuffled scene configuration without any category concepts to the goal configuration. Previous methods usually follow a similar architecture, completing the rearrangement task by aligning the scene changes of the goal and shuffled configuration, according to the semantic scene graphs. However, constructing scene graphs requires the inference of category labels, which not only causes the accuracy drop of the entire task but also limits the application in real world scenario. In this paper, we delve deep into the essence of visual rearrangement task and focus on the two most essential issues, scene change detection and scene change matching. We utilize the movement and the protrusion of point cloud to accurately identify the scene changes and match these changes depending on the similarity of category agnostic appearance feature. Moreover, to assist the agent to explore the environment more efficiently and comprehensively, we propose a closer-aligned-retrace exploration policy, aiming to observe more details of the scene at a closer distance. We conduct extensive experiments on AI2THOR Rearrangement Challenge based on RoomR dataset and a new multi-room multi-instance dataset MrMiR collected by us. The experimental results demonstrate the effectiveness of our proposed method.", + "bbox": [ + 75, + 323, + 473, + 717 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 751, + 209, + 767 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Rearrangement task remains a practical challenge for embodied agents that assist humans in real life, whose goal is to bring a given physical environment into the goal state with a goal specification [2]. In this paper, we focus on a branch of the general rearrangement task based on ExperienceGoal, i.e., visual rearrangement task[45], which requires an agent to recover the scene configuration after it was shuffled randomly. Due to the excessive complexity of", + "bbox": [ + 75, + 779, + 470, + 901 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c58d68c513c2feb8e0e7df93515cd0b68fe93a15b8ae7b9077124b7472caffd2.jpg", + "image_caption": [ + "Figure 1. Influence of different scene change representations on scene change detection sensitivity and scene change matching simplicity. To strike a balance between these two issues, we select point cloud as our scene change representation." + ], + "image_footnote": [], + "bbox": [ + 503, + 287, + 890, + 470 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "the state space, the end-to-end deep reinforcement learning methods previously used for navigation struggle to cover this task, resulting in performance only marginally above chance [18, 45]. Recent works demonstrate that the modular methods, such as MaSS [41] and TIDEE [38], effectively reduce the complexity of the rearrangement task by dividing the task into several modules. These methods use a pre-trained detector to assign category labels to each object and infer the rearrangement goals through matching the semantic scene graphs of both the goal and shuffled configuration.", + "bbox": [ + 496, + 566, + 892, + 731 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, the introduction of category information may not be that necessary as the essential goal of visual rearrangement task is equal to \"make it like what it was before\". Even without the category labels, we can still perform the task by memorizing the appearance characteristics and the state information of objects in the scene. Besides, due to the limited accuracy of the detector, the transition from visual input to category information will inevitably lead to errors, which can accumulate and propagate to subsequent modules, thereby causing the accuracy drop of the entire task. Previous works achieve large gains with ground-truth se", + "bbox": [ + 496, + 734, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "16457", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "mantic segmentation[18, 41]. Moreover, there are inherent limitations of the methodologies based on category information. Once the detector is trained, these methods are restricted to a fixed set of categories and powerless against the object categories not previously observed in training environment. Using zero-shot methods, such as SAM [22] combined with CLIP [33], can considerably expand the known categories, but they are still within a limited set. It is impractical to retrain the model every time a new object category emerges due to the extensive resources required.", + "bbox": [ + 75, + 90, + 472, + 242 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the above problems, our motivation is to identify all scene changes in the room and restore them, regardless of any category. Previous methods use semantic labels for SCM because these labels provide a high-level representation of objects and make SCM straightforward. However, the scene changes can be represented in numerous ways, ranging from pixel to point cloud, and up to label combined with positional information. As shown in Fig. 1, there is an inherent trade-off: while simpler representations minimize information loss during conversion and enhance the sensitivity of SCD, they simultaneously complicate the process of SCM.", + "bbox": [ + 75, + 244, + 472, + 425 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Point cloud can serve as an appropriate representation of scene change, as it captures rich geometric, positional, and scale information of objects and remains robust against varied observation angles and obstructions from other objects. Leveraging point cloud facilitates efficient SCD and also provides richer appearance information for SCM. However, due to the inherent unordered nature and rotational invariance of point cloud, it is difficult to match the point cloud directly. We need to extract high-dimensional appearance features from point cloud for SCM.", + "bbox": [ + 75, + 428, + 472, + 580 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Based on these observations, we propose a category ag- nostic model for visual rearrangement task called CAVR, to the best of our knowledge, this is the first attempt for visual rearrangement without category inferring. By utilizing point cloud as the scene change representation, CAVR can recover the scene configuration to its goal state without any category concepts. In CAVR, we introduce a closeraligned-retrace exploration policy to help agent conduct exploration effectively for SCD. Meanwhile, we maintain a diff-cloud, which consists of two components, one for the point cloud moved and another for the point cloud protruding in the shuffled scene configuration, compared to the goal configuration. The diff-cloud precisely captures the variations occurring throughout the scene. After exploration, we utilize the pre-trained appearance feature extractor to embed the diff-cloud and then match the scene changes across various locations based on the similarity of appearance feature, resulting in a series of rearrangement goals. Then we use a planning-based policy to restore them to their goal states in succession.", + "bbox": [ + 75, + 580, + 472, + 883 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We conduct experiments on AI2THOR Rearrangement", + "bbox": [ + 96, + 885, + 470, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Challenge based on the RoomR dataset[45] and shows improvements on both the success rate and the portion of successfully fixed objects. To cater to more practical demands, we introduce a multi-room multi-instance rearrangement dataset MrMiR based on ProcTHOR simulator[12]. The experimental results on MrMiR dataset fully demonstrate the effectiveness of our method in the complex multi-room environment.", + "bbox": [ + 496, + 90, + 893, + 212 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 500, + 224, + 650, + 242 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Rearrangement The general rearrangement problem [2] aims to transform the environment from an initial state to a goal state through interaction. We focus on an instantiation of the rearrangement problem[45], in which the goal state is specified by immersing the agent in the goal environment and allowing the agent to explore autonomously. Prior works can be classified into two categories, end-to-end reinforcement learning and modular methods. The end-to-end methods [18, 45] perform poorly mainly due to the large action space and complex stages in the task. Comparatively, the modular methods [38, 41] have shown surprising progress in improving the success rate. In detail, Mass[41] proposes a semantic policy with a voxel-based semantic map to find and match the changed objects. TIDEE[38] utilizes the spatial relationships between objects to determine the changed objects. Motivated by prior works, we also propose a modular method, while our model can perform the task without any category information.", + "bbox": [ + 496, + 250, + 893, + 523 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Visual exploration Visual exploration refers to the process in which an agent collects information about the 3D environment through motion and perception [14, 29, 30, 35]. For visual exploration, efficiency is of utmost significance, involving how to access a broader range of regions [3, 6, 17, 39], observe more objects [16] and obtain a larger volume of environmental information relevant to downstream tasks (such as navigation) [25, 43, 44, 46-49] within a certain budget.", + "bbox": [ + 496, + 523, + 893, + 657 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To improve the efficiency of exploration, several methods have employed ideas like curiosity [5, 7, 30, 31], coverage [6, 11] and reconstruction [21, 34]. Most related to ours is the coverage-based works, which try to maximize the area seen in the environment [6, 11]. In our exploration policy, both the area explored and the observation distance are considered simultaneously to accurately observe more details of the scene.", + "bbox": [ + 496, + 659, + 893, + 779 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Scene Change Detection Scene change detection (SCD) refers to the task of identifying and localizing changes of a scene captured at different times[9, 26, 27, 36, 37, 40, 42]. Depending on the types of scene representation, methods are classified into two categories, respectively, 2D domain and 3D domain[37]. The first one devises specific neural networks to process the image pair taken at different times and generate a pixel-level prediction, namely, each pixel is", + "bbox": [ + 496, + 780, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "16458", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "classified into a category of change[1, 4, 9, 10, 19, 36, 37, 42]. Some studies focus on the scene change detection in 3D domain. They aim to reconstruct a time-varying 3D model from images taken from multiple viewpoints at different times and represent the temporal scene changes over several decades[26, 27, 40]. In our task, we not only focus on identifying changes in the 3D environment, but also emphasize the importance of matching changes across various locations, which is crucial for enabling the agent to accurately recover the scene configuration.", + "bbox": [ + 75, + 90, + 472, + 243 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 253, + 169, + 271 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given the intrinsic complexity of visual rearrangement task, in this section, we present a modular approach to tackle the task, decomposing it into manageable subtasks including visual exploration, scene change detection and scene change matching. Our pipeline is illustrated in Fig. 2. We start this section by giving the definition of visual rearrangement task in Sec. 3.1. Then we describe the three modules separately. The visual exploration module (Sec. 3.2) requires the agent to explore the environment efficiently and comprehensively while retaining memory of the environment. Subsequently, the scene change detection module (Sec. 3.3) utilizes the agent's memory of the goal environment and compares it with the current environment to identify all scene changes. Then to recover the goal configuration, the scene change matching module (Sec. 3.4) is proposed to correlate these changes across different areas within the scene and infer the rearrangement goals.", + "bbox": [ + 75, + 279, + 472, + 537 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Visual Rearrangement Task", + "text_level": 1, + "bbox": [ + 76, + 545, + 328, + 561 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "According to the commonly accepted norms in the community[2], the rearrangement task is defined in a general form, where an agent is initialized in a starting state $s^0$ and required to transform the environment from $s^0$ to the goal state $s^* \\in S^*$ with the possible actions $a \\in A$ . The environment state space is denoted as the Cartesian product of the pose spaces of all rigid parts: $S = (R^3 \\times SO3) \\times (R^3 \\times SO3) \\ldots \\times (R^3 \\times SO3)$ , where $R^3$ and $SO3$ represent the 3D locations and rotations space. Follow the Partially Observable Markov Decision Processes (POMDP), the agent typically has no access to any state space and must operate purely based on the sensory observations $o \\in O$ and the given goal specification $g = \\phi(s^0, S^*)$ . Based on different goal specification forms (GeometricGoal, ImageGoal, LanguageGoal, ExperienceGoal, et al.), the general rearrangement task has various levels of difficulty.", + "bbox": [ + 75, + 568, + 472, + 809 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We consider an instance of rearrangement task proposed by Weihs et al.[45], which adopts the ExperienceGoal as the goal specification $g$ and is defined as a two-stage task, including the walkthrough and unshuffle stages. During the walkthrough stage, the agent is immersed in a room of goal state $s^*$ and allowed to explore autonomously. Sequentially,", + "bbox": [ + 75, + 810, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the walkthrough environment is shuffled and some objects' states are changed, denoted as the unshuffle stage, where the agent officially starts the rearrangement task and reorganizes the shuffled scene configuration back.", + "bbox": [ + 496, + 90, + 890, + 152 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Visual Exploration", + "text_level": 1, + "bbox": [ + 498, + 160, + 681, + 176 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Under the two-stage rearrangement task, the initial exploration of the target environment is critical for the subsequent stages, since the agent is expected to acquire more object information in the fewest number of steps. Previous works adopt coverage-based exploration [38] or a search policy based on the expert distribution of objects [41]. However, there are usually many small-sized objects distributed across the scene, which can be easily overlooked or obscured by large entities when observed from a distance. Therefore, we propose a closer-aligned-retrace exploration policy, aiming to observe more objects at a closer distance to improve the observation accuracy and completeness.", + "bbox": [ + 496, + 183, + 890, + 364 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The core idea of the proposed policy is to build an observation distance map $m_{o} \\in \\mathbb{R}^{H \\times W}$ , where each grid denotes the minimum distance at which the current coordinate point is observed by the agent. Through the optimization of $m_{o}$ , the agent can be guided to observe objects closer.", + "bbox": [ + 496, + 364, + 890, + 439 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In exploration, at each timestep, the agent obtains visual observation RGBD and updates its own pose. Following previous work [8, 28, 41], we also build a 2D obstacle map $m_{t} \\in \\mathbb{R}^{H \\times W}$ with the proposed observation distance map $m_{o}$ . At the beginning of the exploration, due to the limited range of movement, the observation distance map $m_{o}$ predominantly consists of high distance values. Therefore, the visual exploration policy $\\pi$ can be represented by optimizing a function $f$ of $m_{o}$ and a distance thresh $\\epsilon_{d}$ .", + "bbox": [ + 496, + 440, + 890, + 575 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\pi (a) = f \\left(m _ {o}, \\epsilon_ {d}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 633, + 585, + 756, + 602 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The goal of optimization is to minimize the observation distance map (i.e., $\\min(m_o) \\leq \\epsilon_d$ ). We employ an analytical approach to obtain the solution. Specifically, based on the current observation distance map $m_o$ , we select a waypoint as the next exploration goal and apply the route planning Dijkstra algorithm [13] to generate a path on the obstacle map $m_t$ . As to the waypoint selection, we prioritize selecting those with higher distance values on the distance map, aiming to observe objects closer. To better compare the shuffled and goal state of the scene for rearrangement goals inference, in the unshuffle stage, the agent tries its best to replicate the trajectory of the walkthrough stage.", + "bbox": [ + 496, + 612, + 890, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Scene Change Detection", + "text_level": 1, + "bbox": [ + 498, + 801, + 720, + 816 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Detecting changes within the scene is a critical capability for an agent to perform rearrangement tasks. We maintain a diff-cloud to represent the scene changes. As shown in Fig. 2 (b), the diff-cloud consists of two parts. The red and blue points respectively represent the moved and protruding", + "bbox": [ + 496, + 825, + 890, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "16459", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/8f9314053a3dc254d64dfd5ec5e18e3612266664ae3466209d2a713feaf4dbee.jpg", + "image_caption": [ + "Figure 2. Pipeline of our CAVR model. (a) The gradient color transitioning from blue to red in the observation distance map represents the distances ranging from $0\\mathrm{m}$ to $5\\mathrm{m}$ . We adopt a closer-aligned-retrace exploration policy to observe more details by optimizing a function of the distance map. (b) Scene change detection is performed by comparing the point clouds corresponding to the goal configuration and the shuffled configuration of the scene, recording the moved part (blue points) and the protruding part (red points) to construct the diff-cloud. (c) We extract the entity-layer information from the two parts of the diff-cloud and match these entities depending on the similarity of category agnostic appearance feature. (d) After the matching process, we obtain a series of rearrangement goals with their goal states (indicated by the dashed bounding boxes) and shuffled states (indicated by the solid bounding boxes)." + ], + "image_footnote": [], + "bbox": [ + 81, + 92, + 890, + 409 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "point clouds in the shuffled configuration, compared to the goal configuration. Next, we explain how to construct the diff-cloud using visual inputs from the two stages.", + "bbox": [ + 75, + 553, + 470, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During the walkthrough stage, at each pose $p^w$ of the agent, we employ the depth information $D_{p^w}$ to generate an egocentric point cloud $c_{p^w}^{ego}$ . Each point in $c_{p^w}^{ego}$ is associated with a pixel in depth $D_{p^w}$ . Then we convert $c_{p^w}^{ego}$ from the agent's coordinate system to global coordinate system, resulting in a geocentric point cloud $c_{p^w}^{geo}$ . For the observed RGB image $I_{p^w}$ , we adopt the pre-trained resnet18 model [20] provided by the official PyTorch to extract a visual feature map $f_{p^w}$ .", + "bbox": [ + 75, + 606, + 468, + 743 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During the unshuffle stage, we use the same method to generate the geocentric point cloud $c_{p^u}^{geo}$ and the feature map $f_{p^u}$ for each pose $p^u$ . If $p^u$ aligns with a previous pose $p^w$ in the walkthrough stage, we compare the two corresponding point clouds, $c_{p^w}^{geo}$ and $c_{p^u}^{geo}$ . A considerable shift between two point coordinates associated with the same pixel indicates the changes have occurred in this location. Specifically, the increase in distance from the agent suggests removal of some objects, while the decrease signifies objects addition. Based on the distance variations, these", + "bbox": [ + 75, + 750, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "points are allocated to the moved part and the protruding part of the diff-cloud, respectively. Moreover, for the area implying scene changes, we extract the corresponding visual feature from the feature map and assign it to each point in that region. Each point in the diff-cloud is represented as $\\{x,y,z,v\\}$ , where $x,y,z$ is the 3D coordinate in the global coordinate system and $v$ is the visual feature.", + "bbox": [ + 498, + 553, + 893, + 659 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Scene Change Matching", + "text_level": 1, + "bbox": [ + 500, + 676, + 723, + 693 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After exploration of the walkthrough stage and the unshuffle stage, we acquire the comprehensive diff-cloud that encompasses changes in all areas of the scene. Note that in the visual rearrangement task settings, objects cannot disappear into thin air, they are simply moved from one place to another. Therefore, to recover the scene configuration, we need to match changes across various locations in the scene. Since the diff-cloud contains only some points in space, we first extract the entity-layer information from it and then perform matching operations on the entity-level.", + "bbox": [ + 496, + 700, + 890, + 853 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We apply the density-based clustering algorithm DBSCAN [15] separately to the two parts of the diff-cloud, resulting in two sets of entities, a moved entity set", + "bbox": [ + 498, + 854, + 893, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "16460", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\Omega^m = \\{\\omega_1^m,\\omega_2^m,\\dots ,\\omega_k^m\\}$ and an protruding entity set $\\Omega^p = \\{\\omega_1^p,\\omega_2^p,\\dots ,\\omega_l^p\\}$ . Each entity in these two sets is a collection of some points in the diff-cloud: $\\omega = \\{(x_{1},y_{1},z_{1},v_{1}),(x_{2},y_{2},z_{2},v_{2}),\\ldots ,(x_{n},y_{n},z_{n},v_{n})\\}$ .", + "bbox": [ + 76, + 90, + 468, + 151 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Fig. 2(c), the scene change matching process can be regarded as the weighted bipartite graph matching between $\\Omega^m$ and $\\Omega^p$ . We construct a bipartite graph $G = (\\Omega^m \\cup \\Omega^p, E)$ , where $\\Omega^m \\cup \\Omega^p$ is the node set and $E$ represents the all fully connected edge set. Every edge $e \\in E$ has one end node in $\\Omega^m$ and the other end node in $\\Omega^p$ . The function $\\phi$ assigns a positive weight value to each edge. A matching $M$ is a subset of $E$ such that each node in $\\Omega^m \\cup \\Omega^p$ appears in at most one edge in $M$ . Our goal is to find the maximum matching:", + "bbox": [ + 75, + 152, + 470, + 303 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nM ^ {*} = \\operatorname * {a r g m a x} _ {M} \\sum_ {e \\in M} \\phi (e),\n$$\n", + "text_format": "latex", + "bbox": [ + 183, + 311, + 361, + 344 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $e = e(\\omega_i^m,\\omega_j^p)$ represents the edge matching node $\\omega_{i}^{m}$ and $\\omega_{j}^{p}$", + "bbox": [ + 75, + 354, + 468, + 387 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The role of the weight function $\\phi(e)$ is to determine the possibility that $\\omega_i^m$ and $\\omega_j^p$ belong to the same instance. Based on this, we design the weight function to calculate the similarity in appearance of these two nodes. The appearance of each entity $\\omega$ is considered from two aspects: geometric feature geo and visual feature vis. For vis, we use the average of the visual features of all the points in this entity. In terms of geo, we train a geometric feature extractor, which builds upon PointNet++[32] and embeds the raw point cloud data. The weight function $\\phi$ is specifically defined as", + "bbox": [ + 75, + 386, + 468, + 551 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\phi \\big (e \\left(\\omega_ {i} ^ {m}, \\omega_ {j} ^ {p}\\right) \\big) = C o s i m \\left(g e o _ {i} ^ {m}, g e o _ {j} ^ {p}\\right) + C o s i m \\left(v i s _ {i} ^ {m}, v i s _ {j} ^ {p}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 561, + 478, + 582 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where Cosim refers to the cosine similarity.", + "bbox": [ + 76, + 590, + 372, + 604 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Then the Kuhn-Munkres algorithm [24] is adopted to solve this maximum matching problem. Once entities are matched, we acquire entity pairs $\\{(\\omega_1^m,\\omega_{j_1}^p),(\\omega_2^m,\\omega_{j_2}^p),\\ldots ,(\\omega_t^m,\\omega_{j_t}^p)\\}$ as rearrangement goals. Each entity pair $(\\omega_i^m,\\omega_{j_i}^p)$ represents the two different states of the same instance, where $\\omega_{i}^{m}$ denotes the goal state and $\\omega_{j_i}^p$ denotes the current state of the instance. Subsequently, for the inferred rearrangement goals, we transport them to their goal states in succession, during which, we leverage the 2D obstacle map and Dijkstra algorithm [13] to conduct obstacle avoidance and navigation path planning.", + "bbox": [ + 75, + 604, + 468, + 787 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 800, + 209, + 816 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Rearrangement task remains a practical challenge for embodied agents that assist humans in real life, whose goal is to bring a given physical environment into the goal state with a goal specification [2]. Each pixel is classified into a category of change[36].", + "bbox": [ + 75, + 824, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/a02fbf47f9b823e571f022d7775f6bc487000e61c6cc5b71cbb8c22ffc8e088d.jpg", + "table_caption": [ + "Table 1. Comparison on RoomR dataset" + ], + "table_footnote": [ + "\"Suc\": Success; \"FS\": Fixed Strict; \"E\": Energy Remain; \"Mis\": Misplaced." + ], + "table_body": "
MethodSuc (%)↑FS (%)↑E↓Mis ↓
TIDEE11.728.90.7150.734
MaSS4.716.51.0161.018
Our14.233.10.7140.707
", + "bbox": [ + 501, + 114, + 893, + 191 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/6d67523976df72675ffd5e9ace7f19559a81963645191a5e449f6063511b9558.jpg", + "table_caption": [ + "Table 2. Comparison on our MrMiR dataset" + ], + "table_footnote": [ + "\"Suc\": Success; \"FS\": Fixed Strict; \"E\": Energy Remain; \"Mis\": Misplaced." + ], + "table_body": "
MethodSuc (%)↑FS (%)↑E↓Mis ↓
TIDEE1.014.10.9170.924
MaSS0.610.51.0191.026
Our5.028.70.73270.7134
", + "bbox": [ + 500, + 267, + 893, + 345 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experiment Setup", + "text_level": 1, + "bbox": [ + 500, + 406, + 674, + 422 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Dataset We evaluate our method on the AI2THOR Rearrangement Challenge based on the RoomR dataset[45], which consists of 80 rooms and 4000 tasks for training, and 20 rooms with 1000 tasks each for both validation and test. Each task in RoomR involves 1 to 5 objects with state changes, characterized by object locations or openness.", + "bbox": [ + 496, + 430, + 890, + 521 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In RoomR[45] dataset, the spatial range of object changes is limited due to the confined area with single-room scenes and the target objects to be rearranged are mainly category-wise, i.e., most categories only have one instance. To cater for the prevalent characteristics of indoor environments in reality, we build a more practical and challenging dataset MrMiR for the two-stage rearrangement task on the ProcTHOR simulator[12], where the change in the state of an object can involve a broader spatial range, even extending across different rooms. Besides, there exists multiple instances within the same category that have different appearance. The simulator ProcTHOR[12] respectively provides 10,000 training, 1000 valid and 1000 test apartments. For our task need, we totally select 6000 apartments in the simulator, splitting 5000 apartments for training, 500 apartments for validation, and 500 apartments for test. Each apartment contains multiple instances within the same category that have different appearance. For each apartment, we randomly generate one rearrangement task. Therefore, our MrMiR dataset totally contains 6000 rearrangement tasks, the same as RoomR. Fig. 3 illustrates the comparison of scene area distribution between our MrMiR dataset and RoomR dataset. It can be seen that our dataset encompasses a diverse range of scene area, while RoomR mainly focusing on small rooms under $100\\mathrm{m}^2$ .", + "bbox": [ + 496, + 523, + 892, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "16461", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To train the geometric feature extractor based on PointNet++[32], which embeds point cloud, we generate a dataset using AI2THOR[23]. We collect 77K sample pairs, of which 70K are used for training and 7K for testing. Each sample pair is composed of two point clouds, which may either represent the same instance (the positive pair) or different instances (the negative pair). The distribution of the positive and negative pairs is balanced, with a 1:1 ratio. Within each room of AI2THOR, we generate positive sample pairs by applying different transformation operations to the point cloud of the same object. We also perform transformation operations on the point clouds of two different objects to generate negative sample pairs. For the transformation operations, we consider random rotation, adding random noise, and randomly deleting $20\\%$ of the original point cloud data.", + "bbox": [ + 75, + 90, + 472, + 332 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics To evaluate an agent's performance, we consider several metrics as follows: (1) Success. The success metric is a binary indicator of each task, which is strictly defined as whether the whole objects' states have been restored to their goal states. (2) Fixed Strict. This metric records the proportion of successfully fixed objects per task. If there are any newly misplaced objects at the end of a task, this metric will be set as 0. (3) Misplaced. This metric is denoted as the number of misplaced objects after the unshuffle stage divided by the number of misplaced objects at the start of the unsuffle stage. (4) Energy Remaining (E). The above metrics are quite strict, which is not possible to measure the distance to task completion. The energy is used to represent the difference between two possible states of an object, which can be functioned as $D: S \\times S \\Rightarrow [0,1]$ . The larger the energy value, the greater the difference between the two states, whereas if the two states are approximately equal, the energy value is 0. Therefore, this metric can be computed as the sum of all objects' energy after the unshuffle stage, divided by the sum of all objects' energy at the beginning of the unshuffle stage.", + "bbox": [ + 75, + 337, + 472, + 654 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation details The distance threshold $\\epsilon_{d}$ is set to $1.5\\mathrm{m}$ , which is determined through hyper-parameter tuning, as detailed in Sec. 4.4. To ensure a fair comparison, we limit the maximum step number for both the exploration and rearrangement stages. In RoomR dataset[45], the exploration step limit is set to 300 and the navigation step limit for each object's rearrangement is set to 50. In our MrMiR dataset, we categorize the apartments by area into five levels: $< 10\\mathrm{m}^2$ , $10 - 60\\mathrm{m}^2$ , $60 - 150\\mathrm{m}^2$ , $150 - 300\\mathrm{m}^2$ , $>300\\mathrm{m}^2$ . Correspondingly, the exploration step limits are set to 50, 200, 300, 500 and 800 and the navigation step limits for each object's rearrangement are set to 50, 80, 100, 200, 300. When we train the geometric feature extractor, we use Adam as our optimizer and the hyper-parameters $(lr,\\beta_1,\\beta_2,\\epsilon)$ are set to $(0.001,0.9,0.999,1\\mathrm{e} - 8)$ . The parameters and models are tuned only on the RoomR dataset", + "bbox": [ + 75, + 659, + 472, + 901 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d4c18e530969ea7ba8d0b7fee367d8bf7fccb542eb8852b903f1d4cc70683665.jpg", + "image_caption": [ + "Figure 3. Comparison of scene area distribution between MrMiR and RoomR[45] datasets." + ], + "image_footnote": [], + "bbox": [ + 540, + 89, + 815, + 247 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "and are directly tested on the MrMiR dataset.", + "bbox": [ + 500, + 320, + 799, + 335 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Comparisons with Related Works", + "text_level": 1, + "bbox": [ + 500, + 357, + 794, + 373 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We report the quantitative comparisons on the RoomR dataset in Table 1 and the MrMiR dataset in Table 2 with the two state-of-the-art modular methods MaSS[41] and TIDEE[38].", + "bbox": [ + 498, + 385, + 890, + 445 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "MaSS [41] employs a Gaussian mixture model to train a semantic search strategy, aiming to guide the agent towards regions where the likelihood of object occurrence is higher. During the exploration process, the 3D voxel semantic map is constructed, which is then used to match and identify objects that need to be rearranged.", + "bbox": [ + 498, + 450, + 890, + 541 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "TIDEE [38] employs a coverage-based exploration policy to extract the spatial relationships between objects. After the exploration of two stages, the relationship changes are used to identify the rearrangement goals.", + "bbox": [ + 498, + 544, + 890, + 604 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Given that the original work of TIDEE is based on category-level (i.e., only records the category information of objects, and for multiple instances under the same category, only chooses one as the target), it cannot be directly applicable to our MrMiR dataset. To be fair, we make modifications to TIDEE by extracting all spatial relationships between instances when testing on the MrMiR dataset.", + "bbox": [ + 496, + 609, + 890, + 715 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Table 1, our proposed method CAVR outperforms the related works in all metrics. Specifically, it improves the success rate by $2.5\\%$ and the proportion of successfully fixed objects by $5.38\\%$ . Beyond the primary improvements, the decrease in energy and misplaced metrics suggests that our CAVR method could rearrange the environment closer to the goal configuration, even without fully completing the task. As shown in Table 2, the disparity between the related works and our CAVR method has further increased, fully demonstrating the superiority of our method in dealing with more complex and challenging environment.", + "bbox": [ + 496, + 719, + 893, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "16462", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/9d18ca77476744f81c67f3738229e54a11cc9b76bf222ba77490156193f9f44a.jpg", + "table_caption": [ + "Table 3. Ablation Study" + ], + "table_footnote": [ + "“√” represents utilizing our proposed corresponding modules (closer-aligned-retrace exploration policy and scene change matching based on similarity of appearance including visual feature and geometric feature introduced in Sec. 3); “E”: Energy Remaining." + ], + "table_body": "
Visual ExplorationScene Change MatchingSuccess (%)↑FixedStrict (%)↑E↓Misplaced ↓
coverage13.131.00.7220.717
MaSS's8.725.80.7630.754
uniform11.324.60.8180.807
visual14.032.60.7240.720
geometric14.232.30.7230.717
14.233.10.7140.707
", + "bbox": [ + 78, + 114, + 895, + 258 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/93c766c9dfdecea3fff6c54193df27b7ed4e58ba90e68c43885c4d45f889d290.jpg", + "image_caption": [ + "Figure 4. Rearrangement performance relative to distance threshold $\\epsilon_{d}$ . The blue lines represent the average metrics across the tasks of validation set of RoomR[45], with the shaded area representing the $68\\%$ confidence interval. Higher values of Success and %FixedStrict indicate superior performance, whereas lower EnergyRemaining and %Misplaced indicate better results." + ], + "image_footnote": [], + "bbox": [ + 78, + 305, + 272, + 426 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/45ca38cc6e3eb03e7b0171879b3b040a00bde5abe31f9af4c036a4e8a34e40ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 277, + 304, + 472, + 426 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f90728106f2e49f787f5d606a0cbe7e34e97e606d06968d43178eb1eac61ede1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 480, + 304, + 684, + 426 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b85f8f4a863c972d24acb24515355f9eeb599822145d46b83387421dc550afd5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 304, + 887, + 426 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 508, + 230, + 525 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Considering the complexity of visual rearrangement task, we conduct ablation studies on RoomR dataset [45] to further investigate the importance of different modules within the overall task. In the ablation studies, we keep the diffcloud as the representation of scene changes.", + "bbox": [ + 75, + 534, + 468, + 609 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation on the visual exploration module We replace our closer-aligned-retrace exploration policy with: a) Coverage-based exploration policy This strategy randomly selects target points from unexplored areas, which are used in TIDEE [38]. b) MaSS's semantic policy This ablation directly adopts the semantic policy proposed in [41], which trains a network to search the object distribution.", + "bbox": [ + 75, + 611, + 468, + 731 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation on the scene change matching module The process of scene change matching can be abstracted as a maximum weight matching problem in bipartite graph. We substitute the weights of edges with: a) Uniform weights This ablation set all edge weights to the same value regardless of the objects' appearance, which leads to a random matching. b) Similarity of visual feature This ablation only utilize the similarity of visual feature as the weight. c) Similarity of geometric feature This ablation only use the similarity of geometric feature as the weight.", + "bbox": [ + 75, + 733, + 470, + 883 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The experimental results are presented in Table 3. In the", + "bbox": [ + 96, + 885, + 470, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ablation study on the visual exploration module, the model with MaSS's exploration policy perform worst due to the substantial variation in objects distribution within rooms, making it challenging to model them effectively with a uniform network. The model with coverage-based policy also underperform as it is likely to overlook minor changes when the observation distance is considerable. In the ablation study on the scene change matching module, removing any part of the appearance feature clearly decreases the performance in all metrics, which illustrates the noticeable impact of our extracted appearance feature on the visual rearrangement task.", + "bbox": [ + 496, + 508, + 893, + 690 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Hyper-parameter Tuning", + "text_level": 1, + "bbox": [ + 498, + 708, + 732, + 724 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conduct experiments on the validation set of RoomR[45] to determine the distance threshold in the optimization criteria for our closer-aligned-retrace exploration policy. A very small threshold value means visiting nearly every grid space on the map, while a large threshold value ignores the underlying concern of non-ambiguous scene change detection. The exploration happens in the unshuffle stage as well and our exploration policy leads the agent to try its best to replicate the previous trajectory. Therefore the threshold value determines the trade-off between optimality in terms of the agent traversal for exploration and a", + "bbox": [ + 496, + 734, + 893, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "16463", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7149785c55bd1885dd7bea58448324f51893f61179ef7663851dd92c6d18cd79.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 127, + 90, + 272, + 200 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/adc897f0f598b5a8bb756f5d60318946a76e4cea141b8c8bec3d8ec551f46c54.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 279, + 90, + 424, + 200 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a3ca2b936f2a943a404682ffc09a2039627478f63645225030f3d7e571aeef64.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 433, + 90, + 573, + 200 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/32bd738717ba6245ba55d90d0a596038d86b182a6c3a2fac58395a0bb9c67392.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 584, + 90, + 727, + 200 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2b986476f189378ddf65c842c834717dea5348edb3917e689ddc4be186cf1037.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 733, + 90, + 877, + 200 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1feea33d1266b6a38b1b4a7399bb33a5050d3a58a4c59e0f911b4c05f989b7a5.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 127, + 219, + 272, + 329 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/609d8de081d1ff07f78a683bd68426856d88c5ba3a48c303aa5ae73f9506fa18.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 279, + 219, + 421, + 329 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/905de26f5b4e979d979697de92735960e763e7523578e028756b0cce0204ffd2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 431, + 219, + 573, + 329 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f6dc3cc380e7ad7a9920a73ab658606957d8acf47dc6e70761942af6e851f305.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 583, + 219, + 725, + 329 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/38ae046bc51695003b76ee14f83d191f54dd59fc7d8daef874d5aefc127e2e6d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 733, + 219, + 877, + 329 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3072faf8aa403bb33acae04c23293120ea0ec29efd0ee520a6358a1df1e52e21.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 127, + 330, + 272, + 441 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f6f52c9fb5660bf092baad4d53b3d1e76e32e77b17846a92bd1843d32e56c6a6.jpg", + "image_caption": [ + "Figure 5. Visualization of optimization process of observation distance map and construction of diff-cloud (a) In the walkthrough stage, objects begin in the positions indicated by the dashed blue bounding boxes. Observation distance map is positioned at the top right corner of each image. The color transitioning from blue to red represents the distances ranging from $0\\mathrm{m}$ to $5\\mathrm{m}$ . (b) In the unshuffle stage, objects are moved to the locations indicated by the solid red box. (c) The diff-cloud is gradually built up, including the moved part (blue points) and the protruding part (red points)." + ], + "image_footnote": [], + "bbox": [ + 279, + 330, + 421, + 441 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/afb6c381cd2f5eb5f626adbbb9058e8592df17a1995d2efb0f05a8698483c1e3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 431, + 330, + 573, + 441 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7fdfcd37d4774f6eddae9031b1466958d031873a45a25bcb35d4754dbf10c20c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 583, + 330, + 725, + 441 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2b6d53dc284f29ff6d02a8b6b72dd6bf3b0bf6af88038db82bd1cd80e86d7192.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 733, + 330, + 875, + 441 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "non-ambiguous scene change detection.", + "bbox": [ + 76, + 551, + 341, + 566 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in the Fig.4, we set observation distance thresholds from $1\\mathrm{m}$ to $7\\mathrm{m}$ and compute the average metrics of 1000 tasks. Optimal performance on the validation set is achieved with a distance threshold at $1.5\\mathrm{m}$ , which is the threshold consistently applied in the other experiments throughout this paper. In this experiment, error bars are calculated based on a $68\\%$ confidence interval.", + "bbox": [ + 75, + 569, + 468, + 675 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Visualization", + "text_level": 1, + "bbox": [ + 76, + 693, + 215, + 708 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We visualize and analyze the optimization of the observation distance map during the walkthrough stage and the construction of the diff-cloud in the unshuffle stage, as shown in Fig. 5. As the exploration progresses, the distance map increasingly exhibit hues of blue, which indicates that our exploration policy enables the agent to observe the scene details up close. In the unshuffle stage, as the diff-cloud is gradually built up, we develop a distinct understanding of the changes occurring throughout the scene. After matching these changes according to the similarity of their appearance, we can carry out the rearrangement execution procedurally.", + "bbox": [ + 75, + 719, + 472, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 550, + 619, + 566 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We propose a category agnostic model for visual rearrangement task in this paper. Our method is composed of a closer-aligned-retrace exploration policy, a scene change detection module based on point cloud and a scene change matching module utilizing the similarity of appearance feature, each specifically designed to recover the scene configuration regardless of any category labels. To validate the proposed method, we conduct experiments on the RoomR dataset and a more practical dataset MrMiR collected by us, where multiple instances distribute across multiple rooms. Experimental results on these two datasets demonstrate that our method is able to perform the visual rearrangement task effectively without any category information.", + "bbox": [ + 496, + 580, + 893, + 779 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements: This work was supported by the National Natural Science Foundation of China under Grant 62125207, 62272443, 62032022 and U23B2012, in part by Beijing Natural Science Foundation under Grant JQ22012, Z190020.", + "bbox": [ + 496, + 825, + 893, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "16464", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Pablo F Alcantarilla, Simon Stent, German Ros, Roberto Arroyo, and Riccardo Gherardi. Street-view change detection with deconvolutional networks. Autonomous Robots, 42: 1301-1322, 2018. 3", + "[2] Dhruv Batra, Angel X. Chang, Sonia Chernova, Andrew J. Davison, Jia Deng, Vladlen Koltun, Sergey Levine, Jitendra Malik, Igor Mordatch, Roozbeh Mottaghi, Manolis Savva, and Hao Su. Rearrangement: A challenge for embodied ai, 2020. 1, 2, 3, 5", + "[3] Edward Beeching, Jilles Dibangoye, Olivier Simonin, and Christian Wolf. Learning to plan with uncertain topological maps. In European Conference on Computer Vision, pages 473-490. Springer, 2020. 2", + "[4] Shuhui Bu, Qing Li, Pengcheng Han, Pengyu Leng, and Ke Li. Mask-cdnet: A mask based pixel change detection network. Neurocomputing, 378:166-178, 2020. 3", + "[5] Yuri Burda, Harri Edwards, Deepak Pathak, Amos Storkey, Trevor Darrell, and Alexei A. Efros. Large-scale study of curiosity-driven learning. In ICLR, 2019. 2", + "[6] Devendra Singh Chaplot, Dhiraj Gandhi, Saurabh Gupta, Abhinav Gupta, and Ruslan Salakhutdinov. Learning to explore using active neural slam. In International Conference on Learning Representations (ICLR), 2020. 2", + "[7] Devendra Singh Chaplot, Helen Jiang, Saurabh Gupta, and Abhinav Gupta. Semantic curiosity for active visual learning. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part VI 16, pages 309–326. Springer, 2020. 2", + "[8] Devendra Singh Chaplot, Murtaza Dalal, Saurabh Gupta, Jitendra Malik, and Russ R Salakhutdinov. Seal: Self-supervised embodied active learning using exploration and 3d consistency. Advances in neural information processing systems, 34:13086-13098, 2021. 3", + "[9] Chao-Peng Chen, Jun-Wei Hsieh, Ping-Yang Chen, Yi-Kuan Hsieh, and Bor-Shiun Wang. Saras-net: scale and relation aware siamese network for change detection. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 14187-14195, 2023. 2, 3", + "[10] Shuo Chen, Kailun Yang, and Rainer Stiefelhagen. Drtanet: Dynamic receptive temporal attention network for street scene change detection. In 2021 IEEE Intelligent Vehicles Symposium (IV), pages 502-509. IEEE, 2021. 3", + "[11] Tao Chen, Saurabh Gupta, and Abhinav Gupta. Learning exploration policies for navigation. In International Conference on Learning Representations, 2019. 2", + "[12] Matt Deitke, Eli VanderBilt, Alvaro Herrasti, Luca Weihs, Jordi Salvador, Kiana Ehsani, Winson Han, Eric Kolve, Ali Farhadi, Aniruddha Kembhavi, et al. Procthor: Large-scale embodied ai using procedural generation. arXiv preprint arXiv:2206.06994, 2022. 2, 5", + "[13] Edsger W Dijkstra. A note on two problems in connexion with graphs. Numerische mathematik, 1(1):269-271, 1959. 3, 5", + "[14] H. Durrant-Whyte and T. Bailey. Simultaneous localization and mapping: part i. IEEE Robotics & Automation Magazine, 13(2):99-110, 2006. 2" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Martin Ester, Hans-Peter Kriegel, Jörg Sander, Xiaowei Xu, et al. A density-based algorithm for discovering clusters in large spatial databases with noise. In kdd, pages 226-231, 1996. 4", + "[16] Kuan Fang, Alexander Toshev, Li Fei-Fei, and Silvio Savarese. Scene memory transformer for embodied agents in long-horizon tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[17] Kuan Fang, Alexander Toshev, Li Fei-Fei, and Silvio Savarese. Scene memory transformer for embodied agents in long-horizon tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 538–547, 2019. 2", + "[18] Samir Yitzhak Gadre, Kiana Ehsani, Shuran Song, and Roozbeh Mottaghi. Continuous scene representations for embodied ai. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14849-14859, 2022. 1, 2", + "[19] Enqiang Guo, Xinsha Fu, Jiawei Zhu, Min Deng, Yu Liu, Qing Zhu, and Haifeng Li. Learning to measure change: Fully convolutional siamese metric networks for scene change detection. arXiv preprint arXiv:1810.09111, 2018.3", + "[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 4", + "[21] Dinesh Jayaraman and Kristen Grauman. Learning to look around: Intelligently exploring unseen environments for unknown tasks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2", + "[22] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 2", + "[23] Eric Kolve, Roozbeh Mottaghi, Winson Han, Eli VanderBilt, Luca Weihs, Alvaro Herrasti, Matt Deitke, Kiana Ehsani, Daniel Gordon, Yuke Zhu, et al. Ai2-thor: An interactive 3d environment for visual ai. arXiv preprint arXiv:1712.05474, 2017.6", + "[24] Harold W Kuhn. The hungarian method for the assignment problem. Naval research logistics quarterly, 2(1-2):83-97, 1955. 5", + "[25] Weijie Li, Xinhang Song, Yubing Bai, Sixian Zhang, and Shuqiang Jiang. ION: instance-level object navigation. In MM '21: ACM Multimedia Conference, Virtual Event, China, October 20 - 24, 2021, pages 4343-4352. ACM, 2021. 2", + "[26] Haotong Lin, Qianqian Wang, Ruojin Cai, Sida Peng, Hadar Averbuch-Elor, Xiaowei Zhou, and Noah Snively. Neural scene chronology. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20752-20761, 2023. 2, 3", + "[27] Kevin Matzen and Noah Snavely. Scene chronology. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pages 615-630. Springer, 2014. 2, 3" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "16465", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] So Yeon Min, Devendra Singh Chaplot, Pradeep Ravikumar, Yonatan Bisk, and Ruslan Salakhutdinov. Film: Following instructions in language with modular methods. arXiv preprint arXiv:2110.07342, 2021. 3", + "[29] Medhini Narasimhan, Erik Wijmans, Xinlei Chen, Trevor Darrell, Dhruv Batra, Devi Parikh, and Amanpreet Singh. Seeing the un-scene: Learning amodal semantic maps for room navigation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVIII 16, pages 513-529. Springer, 2020. 2", + "[30] Deepak Pathak, Pulkit Agrawal, Alexei A Efros, and Trevor Darrell. Curiosity-driven exploration by self-supervised prediction. In International conference on machine learning, pages 2778-2787. PMLR, 2017. 2", + "[31] Deepak Pathak, Dhiraj Gandhi, and Abhinav Gupta. Self-supervised exploration via disagreement. In International conference on machine learning, pages 5062-5071. PMLR, 2019. 2", + "[32] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 5, 6", + "[33] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2", + "[34] Santhosh K Ramakrishnan, Ziad Al-Halah, and Kristen Grauman. Occupancy anticipation for efficient exploration and navigation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part V 16, pages 400-418. Springer, 2020. 2", + "[35] Santhosh K Ramakrishnan, Dinesh Jayaraman, and Kristen Grauman. An exploration of embodied visual exploration. International Journal of Computer Vision, 129:1616-1649, 2021. 2", + "[36] Vijaya Raghavan T Ramkumar, Elahe Arani, and Bahram Zonooz. Differencing based self-supervised pretraining for scene change detection. In Conference on Lifelong Learning Agents, pages 952-965. PMLR, 2022. 2, 3, 5", + "[37] Ken Sakurada, Mikiya Shibuya, and Weimin Wang. Weakly supervised silhouette-based semantic scene change detection. In 2020 IEEE International conference on robotics and automation (ICRA), pages 6861-6867. IEEE, 2020. 2, 3", + "[38] Gabriel Sarch, Zhaoyuan Fang, Adam W Harley, Paul Schydlo, Michael J Tarr, Saurabh Gupta, and Katerina Fragkiadaki. Tidee: Tidying up novel rooms using visuo-semantic commonsense priors. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIX, pages 480-496. Springer, 2022. 1, 2, 3, 6, 7", + "[39] Nikolay Savinov, Anton Raichuk, Raphaël Marinier, Damien Vincent, Marc Pollefeys, Timothy Lillicrap, and Sylvain Gelly. Episodic curiosity through reachability. In International Conference on Learning Representations (ICLR), 2019. 2" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[40] Grant Schindler and Frank Dellaert. Probabilistic temporal inference on reconstructed 3d scenes. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1410-1417. IEEE, 2010. 2, 3", + "[41] Brandon Trabucco, Gunnar Sigurdsson, Robinson Piramuthu, Gaurav S Sukhatme, and Ruslan Salakhutdinov. A simple approach for visual rearrangement: 3d mapping and semantic search. arXiv preprint arXiv:2206.13396, 2022. 1, 2, 3, 6, 7", + "[42] Guo-Hua Wang, Bin-Bin Gao, and Chengjie Wang. How to reduce change detection to semantic segmentation. Pattern Recognition, 138:109384, 2023. 2, 3", + "[43] Xiaohan Wang, Yuehu Liu, Xinhang Song, Beibei Wang, and Shuqiang Jiang. Generating explanations for embodied action decision from visual observation. In Proceedings of the 31st ACM International Conference on Multimedia, pages 2838-2846, 2023. 2", + "[44] Xiaohan Wang, Yuehu Liu, Xinhang Song, Beibei Wang, and Shuqiang Jiang. Camp: Causal multi-policy planning for interactive navigation in multi-room scenes. Advances in Neural Information Processing Systems, 36, 2024. 2", + "[45] Luca Weihs, Matt Deitke, Aniruddha Kembhavi, and Roozbeh Mottaghi. Visual room rearrangement. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5922-5931, 2021. 1, 2, 3, 5, 6, 7", + "[46] Haitao Zeng, Xinhang Song, and Shuqiang Jiang. Multi-object navigation using potential target position policy function. IEEE Transactions on Image Processing, 2023. 2", + "[47] Sixian Zhang, Weijie Li, Xinhang Song, Yubing Bai, and Shuqiang Jiang. Generative meta-adversarial network for unseen object navigation. In Computer Vision - ECCV 2022 - 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIX, pages 301-320.", + "[48] Sixian Zhang, Xinhang Song, Yubing Bai, Weijie Li, Yakui Chu, and Shuqiang Jiang. Hierarchical object-to-zone graph for object navigation. In 2021 IEEE/CVF International Conference on Computer Vision, ICCV 2021, Montreal, QC, Canada, October 10-17, 2021, pages 15110-15120. IEEE, 2021.", + "[49] Sixian Zhang, Xinhang Song, Weijie Li, Yubing Bai, Xinyao Yu, and Shuqiang Jiang. Layout-based causal inference for object navigation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10792-10802, 2023. 2" + ], + "bbox": [ + 501, + 92, + 890, + 712 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "16466", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Category Agnostic Model for Visual Rearrangment/4b328694-69ab-47a2-83d3-ce2efe00b0f0_model.json b/2024/A Category Agnostic Model for Visual Rearrangment/4b328694-69ab-47a2-83d3-ce2efe00b0f0_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f83d7ebeb401f703095f4e9ed4864c5796f623f2 --- /dev/null +++ b/2024/A Category Agnostic Model for Visual Rearrangment/4b328694-69ab-47a2-83d3-ce2efe00b0f0_model.json @@ -0,0 +1,2057 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.131, + 0.757, + 0.154 + ], + "angle": 0, + "content": "A Category Agnostic Model for Visual Rearrangement" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.18, + 0.793, + 0.201 + ], + "angle": 0, + "content": "Yuyi Liu\\(^{1,2}\\), Xinhang Song\\(^{1,2}\\), Weijie Li\\(^{1,2}\\), Xiaohan Wang\\(^{1}\\), Shuqiang Jiang\\(^{1,2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.201, + 0.793, + 0.27 + ], + "angle": 0, + "content": "\\(^{1}\\)Key Lab of Intelligent Information Processing Laboratory of the Chinese Academy of Sciences (CAS), Institute of Computing Technology, Beijing \\(^{2}\\)University of Chinese Academy of Sciences, Beijing {yuyi.liu, xinhang song, weijie.li, xiaohan.wang}@vipl.ict.ac.cn sqjiang@ict.ac.cn" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.29, + 0.314, + 0.305 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.324, + 0.474, + 0.718 + ], + "angle": 0, + "content": "This paper presents a novel category agnostic model for visual rearrangement task, which can help an embodied agent to physically recover the shuffled scene configuration without any category concepts to the goal configuration. Previous methods usually follow a similar architecture, completing the rearrangement task by aligning the scene changes of the goal and shuffled configuration, according to the semantic scene graphs. However, constructing scene graphs requires the inference of category labels, which not only causes the accuracy drop of the entire task but also limits the application in real world scenario. In this paper, we delve deep into the essence of visual rearrangement task and focus on the two most essential issues, scene change detection and scene change matching. We utilize the movement and the protrusion of point cloud to accurately identify the scene changes and match these changes depending on the similarity of category agnostic appearance feature. Moreover, to assist the agent to explore the environment more efficiently and comprehensively, we propose a closer-aligned-retrace exploration policy, aiming to observe more details of the scene at a closer distance. We conduct extensive experiments on AI2THOR Rearrangement Challenge based on RoomR dataset and a new multi-room multi-instance dataset MrMiR collected by us. The experimental results demonstrate the effectiveness of our proposed method." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.752, + 0.21, + 0.768 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Rearrangement task remains a practical challenge for embodied agents that assist humans in real life, whose goal is to bring a given physical environment into the goal state with a goal specification [2]. In this paper, we focus on a branch of the general rearrangement task based on ExperienceGoal, i.e., visual rearrangement task[45], which requires an agent to recover the scene configuration after it was shuffled randomly. Due to the excessive complexity of" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.288, + 0.891, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.481, + 0.894, + 0.538 + ], + "angle": 0, + "content": "Figure 1. Influence of different scene change representations on scene change detection sensitivity and scene change matching simplicity. To strike a balance between these two issues, we select point cloud as our scene change representation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.567, + 0.893, + 0.732 + ], + "angle": 0, + "content": "the state space, the end-to-end deep reinforcement learning methods previously used for navigation struggle to cover this task, resulting in performance only marginally above chance [18, 45]. Recent works demonstrate that the modular methods, such as MaSS [41] and TIDEE [38], effectively reduce the complexity of the rearrangement task by dividing the task into several modules. These methods use a pre-trained detector to assign category labels to each object and infer the rearrangement goals through matching the semantic scene graphs of both the goal and shuffled configuration." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.895, + 0.903 + ], + "angle": 0, + "content": "However, the introduction of category information may not be that necessary as the essential goal of visual rearrangement task is equal to \"make it like what it was before\". Even without the category labels, we can still perform the task by memorizing the appearance characteristics and the state information of objects in the scene. Besides, due to the limited accuracy of the detector, the transition from visual input to category information will inevitably lead to errors, which can accumulate and propagate to subsequent modules, thereby causing the accuracy drop of the entire task. Previous works achieve large gains with ground-truth se" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "16457" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.243 + ], + "angle": 0, + "content": "mantic segmentation[18, 41]. Moreover, there are inherent limitations of the methodologies based on category information. Once the detector is trained, these methods are restricted to a fixed set of categories and powerless against the object categories not previously observed in training environment. Using zero-shot methods, such as SAM [22] combined with CLIP [33], can considerably expand the known categories, but they are still within a limited set. It is impractical to retrain the model every time a new object category emerges due to the extensive resources required." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.245, + 0.473, + 0.426 + ], + "angle": 0, + "content": "To address the above problems, our motivation is to identify all scene changes in the room and restore them, regardless of any category. Previous methods use semantic labels for SCM because these labels provide a high-level representation of objects and make SCM straightforward. However, the scene changes can be represented in numerous ways, ranging from pixel to point cloud, and up to label combined with positional information. As shown in Fig. 1, there is an inherent trade-off: while simpler representations minimize information loss during conversion and enhance the sensitivity of SCD, they simultaneously complicate the process of SCM." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.429, + 0.473, + 0.581 + ], + "angle": 0, + "content": "Point cloud can serve as an appropriate representation of scene change, as it captures rich geometric, positional, and scale information of objects and remains robust against varied observation angles and obstructions from other objects. Leveraging point cloud facilitates efficient SCD and also provides richer appearance information for SCM. However, due to the inherent unordered nature and rotational invariance of point cloud, it is difficult to match the point cloud directly. We need to extract high-dimensional appearance features from point cloud for SCM." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.582, + 0.473, + 0.884 + ], + "angle": 0, + "content": "Based on these observations, we propose a category ag- nostic model for visual rearrangement task called CAVR, to the best of our knowledge, this is the first attempt for visual rearrangement without category inferring. By utilizing point cloud as the scene change representation, CAVR can recover the scene configuration to its goal state without any category concepts. In CAVR, we introduce a closeraligned-retrace exploration policy to help agent conduct exploration effectively for SCD. Meanwhile, we maintain a diff-cloud, which consists of two components, one for the point cloud moved and another for the point cloud protruding in the shuffled scene configuration, compared to the goal configuration. The diff-cloud precisely captures the variations occurring throughout the scene. After exploration, we utilize the pre-trained appearance feature extractor to embed the diff-cloud and then match the scene changes across various locations based on the similarity of appearance feature, resulting in a series of rearrangement goals. Then we use a planning-based policy to restore them to their goal states in succession." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.472, + 0.902 + ], + "angle": 0, + "content": "We conduct experiments on AI2THOR Rearrangement" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.213 + ], + "angle": 0, + "content": "Challenge based on the RoomR dataset[45] and shows improvements on both the success rate and the portion of successfully fixed objects. To cater to more practical demands, we introduce a multi-room multi-instance rearrangement dataset MrMiR based on ProcTHOR simulator[12]. The experimental results on MrMiR dataset fully demonstrate the effectiveness of our method in the complex multi-room environment." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.226, + 0.651, + 0.243 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.251, + 0.895, + 0.524 + ], + "angle": 0, + "content": "Rearrangement The general rearrangement problem [2] aims to transform the environment from an initial state to a goal state through interaction. We focus on an instantiation of the rearrangement problem[45], in which the goal state is specified by immersing the agent in the goal environment and allowing the agent to explore autonomously. Prior works can be classified into two categories, end-to-end reinforcement learning and modular methods. The end-to-end methods [18, 45] perform poorly mainly due to the large action space and complex stages in the task. Comparatively, the modular methods [38, 41] have shown surprising progress in improving the success rate. In detail, Mass[41] proposes a semantic policy with a voxel-based semantic map to find and match the changed objects. TIDEE[38] utilizes the spatial relationships between objects to determine the changed objects. Motivated by prior works, we also propose a modular method, while our model can perform the task without any category information." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.524, + 0.895, + 0.659 + ], + "angle": 0, + "content": "Visual exploration Visual exploration refers to the process in which an agent collects information about the 3D environment through motion and perception [14, 29, 30, 35]. For visual exploration, efficiency is of utmost significance, involving how to access a broader range of regions [3, 6, 17, 39], observe more objects [16] and obtain a larger volume of environmental information relevant to downstream tasks (such as navigation) [25, 43, 44, 46-49] within a certain budget." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.895, + 0.78 + ], + "angle": 0, + "content": "To improve the efficiency of exploration, several methods have employed ideas like curiosity [5, 7, 30, 31], coverage [6, 11] and reconstruction [21, 34]. Most related to ours is the coverage-based works, which try to maximize the area seen in the environment [6, 11]. In our exploration policy, both the area explored and the observation distance are considered simultaneously to accurately observe more details of the scene." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.781, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Scene Change Detection Scene change detection (SCD) refers to the task of identifying and localizing changes of a scene captured at different times[9, 26, 27, 36, 37, 40, 42]. Depending on the types of scene representation, methods are classified into two categories, respectively, 2D domain and 3D domain[37]. The first one devises specific neural networks to process the image pair taken at different times and generate a pixel-level prediction, namely, each pixel is" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "16458" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.244 + ], + "angle": 0, + "content": "classified into a category of change[1, 4, 9, 10, 19, 36, 37, 42]. Some studies focus on the scene change detection in 3D domain. They aim to reconstruct a time-varying 3D model from images taken from multiple viewpoints at different times and represent the temporal scene changes over several decades[26, 27, 40]. In our task, we not only focus on identifying changes in the 3D environment, but also emphasize the importance of matching changes across various locations, which is crucial for enabling the agent to accurately recover the scene configuration." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.255, + 0.17, + 0.272 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.28, + 0.473, + 0.538 + ], + "angle": 0, + "content": "Given the intrinsic complexity of visual rearrangement task, in this section, we present a modular approach to tackle the task, decomposing it into manageable subtasks including visual exploration, scene change detection and scene change matching. Our pipeline is illustrated in Fig. 2. We start this section by giving the definition of visual rearrangement task in Sec. 3.1. Then we describe the three modules separately. The visual exploration module (Sec. 3.2) requires the agent to explore the environment efficiently and comprehensively while retaining memory of the environment. Subsequently, the scene change detection module (Sec. 3.3) utilizes the agent's memory of the goal environment and compares it with the current environment to identify all scene changes. Then to recover the goal configuration, the scene change matching module (Sec. 3.4) is proposed to correlate these changes across different areas within the scene and infer the rearrangement goals." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.546, + 0.329, + 0.563 + ], + "angle": 0, + "content": "3.1. Visual Rearrangement Task" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.569, + 0.473, + 0.81 + ], + "angle": 0, + "content": "According to the commonly accepted norms in the community[2], the rearrangement task is defined in a general form, where an agent is initialized in a starting state \\( s^0 \\) and required to transform the environment from \\( s^0 \\) to the goal state \\( s^* \\in S^* \\) with the possible actions \\( a \\in A \\). The environment state space is denoted as the Cartesian product of the pose spaces of all rigid parts: \\( S = (R^3 \\times SO3) \\times (R^3 \\times SO3) \\ldots \\times (R^3 \\times SO3) \\), where \\( R^3 \\) and \\( SO3 \\) represent the 3D locations and rotations space. Follow the Partially Observable Markov Decision Processes (POMDP), the agent typically has no access to any state space and must operate purely based on the sensory observations \\( o \\in O \\) and the given goal specification \\( g = \\phi(s^0, S^*) \\). Based on different goal specification forms (GeometricGoal, ImageGoal, LanguageGoal, ExperienceGoal, et al.), the general rearrangement task has various levels of difficulty." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.472, + 0.903 + ], + "angle": 0, + "content": "We consider an instance of rearrangement task proposed by Weihs et al.[45], which adopts the ExperienceGoal as the goal specification \\( g \\) and is defined as a two-stage task, including the walkthrough and unshuffle stages. During the walkthrough stage, the agent is immersed in a room of goal state \\( s^* \\) and allowed to explore autonomously. Sequentially," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.153 + ], + "angle": 0, + "content": "the walkthrough environment is shuffled and some objects' states are changed, denoted as the unshuffle stage, where the agent officially starts the rearrangement task and reorganizes the shuffled scene configuration back." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.161, + 0.682, + 0.177 + ], + "angle": 0, + "content": "3.2. Visual Exploration" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.184, + 0.892, + 0.365 + ], + "angle": 0, + "content": "Under the two-stage rearrangement task, the initial exploration of the target environment is critical for the subsequent stages, since the agent is expected to acquire more object information in the fewest number of steps. Previous works adopt coverage-based exploration [38] or a search policy based on the expert distribution of objects [41]. However, there are usually many small-sized objects distributed across the scene, which can be easily overlooked or obscured by large entities when observed from a distance. Therefore, we propose a closer-aligned-retrace exploration policy, aiming to observe more objects at a closer distance to improve the observation accuracy and completeness." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.366, + 0.892, + 0.44 + ], + "angle": 0, + "content": "The core idea of the proposed policy is to build an observation distance map \\(m_{o} \\in \\mathbb{R}^{H \\times W}\\), where each grid denotes the minimum distance at which the current coordinate point is observed by the agent. Through the optimization of \\(m_{o}\\), the agent can be guided to observe objects closer." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.441, + 0.892, + 0.577 + ], + "angle": 0, + "content": "In exploration, at each timestep, the agent obtains visual observation RGBD and updates its own pose. Following previous work [8, 28, 41], we also build a 2D obstacle map \\( m_{t} \\in \\mathbb{R}^{H \\times W} \\) with the proposed observation distance map \\( m_{o} \\). At the beginning of the exploration, due to the limited range of movement, the observation distance map \\( m_{o} \\) predominantly consists of high distance values. Therefore, the visual exploration policy \\( \\pi \\) can be represented by optimizing a function \\( f \\) of \\( m_{o} \\) and a distance thresh \\( \\epsilon_{d} \\)." + }, + { + "type": "equation", + "bbox": [ + 0.634, + 0.587, + 0.758, + 0.603 + ], + "angle": 0, + "content": "\\[\n\\pi (a) = f \\left(m _ {o}, \\epsilon_ {d}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.613, + 0.892, + 0.794 + ], + "angle": 0, + "content": "The goal of optimization is to minimize the observation distance map (i.e., \\(\\min(m_o) \\leq \\epsilon_d\\)). We employ an analytical approach to obtain the solution. Specifically, based on the current observation distance map \\(m_o\\), we select a waypoint as the next exploration goal and apply the route planning Dijkstra algorithm [13] to generate a path on the obstacle map \\(m_t\\). As to the waypoint selection, we prioritize selecting those with higher distance values on the distance map, aiming to observe objects closer. To better compare the shuffled and goal state of the scene for rearrangement goals inference, in the unshuffle stage, the agent tries its best to replicate the trajectory of the walkthrough stage." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.802, + 0.722, + 0.818 + ], + "angle": 0, + "content": "3.3. Scene Change Detection" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.903 + ], + "angle": 0, + "content": "Detecting changes within the scene is a critical capability for an agent to perform rearrangement tasks. We maintain a diff-cloud to represent the scene changes. As shown in Fig. 2 (b), the diff-cloud consists of two parts. The red and blue points respectively represent the moved and protruding" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "16459" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.093, + 0.891, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.43, + 0.896, + 0.528 + ], + "angle": 0, + "content": "Figure 2. Pipeline of our CAVR model. (a) The gradient color transitioning from blue to red in the observation distance map represents the distances ranging from \\(0\\mathrm{m}\\) to \\(5\\mathrm{m}\\). We adopt a closer-aligned-retrace exploration policy to observe more details by optimizing a function of the distance map. (b) Scene change detection is performed by comparing the point clouds corresponding to the goal configuration and the shuffled configuration of the scene, recording the moved part (blue points) and the protruding part (red points) to construct the diff-cloud. (c) We extract the entity-layer information from the two parts of the diff-cloud and match these entities depending on the similarity of category agnostic appearance feature. (d) After the matching process, we obtain a series of rearrangement goals with their goal states (indicated by the dashed bounding boxes) and shuffled states (indicated by the solid bounding boxes)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.554, + 0.471, + 0.601 + ], + "angle": 0, + "content": "point clouds in the shuffled configuration, compared to the goal configuration. Next, we explain how to construct the diff-cloud using visual inputs from the two stages." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.607, + 0.47, + 0.744 + ], + "angle": 0, + "content": "During the walkthrough stage, at each pose \\( p^w \\) of the agent, we employ the depth information \\( D_{p^w} \\) to generate an egocentric point cloud \\( c_{p^w}^{ego} \\). Each point in \\( c_{p^w}^{ego} \\) is associated with a pixel in depth \\( D_{p^w} \\). Then we convert \\( c_{p^w}^{ego} \\) from the agent's coordinate system to global coordinate system, resulting in a geocentric point cloud \\( c_{p^w}^{geo} \\). For the observed RGB image \\( I_{p^w} \\), we adopt the pre-trained resnet18 model [20] provided by the official PyTorch to extract a visual feature map \\( f_{p^w} \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.471, + 0.902 + ], + "angle": 0, + "content": "During the unshuffle stage, we use the same method to generate the geocentric point cloud \\( c_{p^u}^{geo} \\) and the feature map \\( f_{p^u} \\) for each pose \\( p^u \\). If \\( p^u \\) aligns with a previous pose \\( p^w \\) in the walkthrough stage, we compare the two corresponding point clouds, \\( c_{p^w}^{geo} \\) and \\( c_{p^u}^{geo} \\). A considerable shift between two point coordinates associated with the same pixel indicates the changes have occurred in this location. Specifically, the increase in distance from the agent suggests removal of some objects, while the decrease signifies objects addition. Based on the distance variations, these" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.554, + 0.895, + 0.66 + ], + "angle": 0, + "content": "points are allocated to the moved part and the protruding part of the diff-cloud, respectively. Moreover, for the area implying scene changes, we extract the corresponding visual feature from the feature map and assign it to each point in that region. Each point in the diff-cloud is represented as \\(\\{x,y,z,v\\}\\), where \\(x,y,z\\) is the 3D coordinate in the global coordinate system and \\(v\\) is the visual feature." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.677, + 0.724, + 0.694 + ], + "angle": 0, + "content": "3.4. Scene Change Matching" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.702, + 0.892, + 0.854 + ], + "angle": 0, + "content": "After exploration of the walkthrough stage and the unshuffle stage, we acquire the comprehensive diff-cloud that encompasses changes in all areas of the scene. Note that in the visual rearrangement task settings, objects cannot disappear into thin air, they are simply moved from one place to another. Therefore, to recover the scene configuration, we need to match changes across various locations in the scene. Since the diff-cloud contains only some points in space, we first extract the entity-layer information from it and then perform matching operations on the entity-level." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.895, + 0.903 + ], + "angle": 0, + "content": "We apply the density-based clustering algorithm DBSCAN [15] separately to the two parts of the diff-cloud, resulting in two sets of entities, a moved entity set" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "16460" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.091, + 0.47, + 0.152 + ], + "angle": 0, + "content": "\\(\\Omega^m = \\{\\omega_1^m,\\omega_2^m,\\dots ,\\omega_k^m\\}\\) and an protruding entity set \\(\\Omega^p = \\{\\omega_1^p,\\omega_2^p,\\dots ,\\omega_l^p\\}\\). Each entity in these two sets is a collection of some points in the diff-cloud: \\(\\omega = \\{(x_{1},y_{1},z_{1},v_{1}),(x_{2},y_{2},z_{2},v_{2}),\\ldots ,(x_{n},y_{n},z_{n},v_{n})\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.153, + 0.471, + 0.304 + ], + "angle": 0, + "content": "As shown in Fig. 2(c), the scene change matching process can be regarded as the weighted bipartite graph matching between \\(\\Omega^m\\) and \\(\\Omega^p\\). We construct a bipartite graph \\(G = (\\Omega^m \\cup \\Omega^p, E)\\), where \\(\\Omega^m \\cup \\Omega^p\\) is the node set and \\(E\\) represents the all fully connected edge set. Every edge \\(e \\in E\\) has one end node in \\(\\Omega^m\\) and the other end node in \\(\\Omega^p\\). The function \\(\\phi\\) assigns a positive weight value to each edge. A matching \\(M\\) is a subset of \\(E\\) such that each node in \\(\\Omega^m \\cup \\Omega^p\\) appears in at most one edge in \\(M\\). Our goal is to find the maximum matching:" + }, + { + "type": "equation", + "bbox": [ + 0.184, + 0.313, + 0.362, + 0.345 + ], + "angle": 0, + "content": "\\[\nM ^ {*} = \\operatorname * {a r g m a x} _ {M} \\sum_ {e \\in M} \\phi (e),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.356, + 0.469, + 0.388 + ], + "angle": 0, + "content": "where \\(e = e(\\omega_i^m,\\omega_j^p)\\) represents the edge matching node \\(\\omega_{i}^{m}\\) and \\(\\omega_{j}^{p}\\)" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.387, + 0.47, + 0.552 + ], + "angle": 0, + "content": "The role of the weight function \\(\\phi(e)\\) is to determine the possibility that \\(\\omega_i^m\\) and \\(\\omega_j^p\\) belong to the same instance. Based on this, we design the weight function to calculate the similarity in appearance of these two nodes. The appearance of each entity \\(\\omega\\) is considered from two aspects: geometric feature geo and visual feature vis. For vis, we use the average of the visual features of all the points in this entity. In terms of geo, we train a geometric feature extractor, which builds upon PointNet++[32] and embeds the raw point cloud data. The weight function \\(\\phi\\) is specifically defined as" + }, + { + "type": "equation", + "bbox": [ + 0.077, + 0.563, + 0.479, + 0.583 + ], + "angle": 0, + "content": "\\[\n\\phi \\big (e \\left(\\omega_ {i} ^ {m}, \\omega_ {j} ^ {p}\\right) \\big) = C o s i m \\left(g e o _ {i} ^ {m}, g e o _ {j} ^ {p}\\right) + C o s i m \\left(v i s _ {i} ^ {m}, v i s _ {j} ^ {p}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.591, + 0.373, + 0.606 + ], + "angle": 0, + "content": "where Cosim refers to the cosine similarity." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.606, + 0.47, + 0.789 + ], + "angle": 0, + "content": "Then the Kuhn-Munkres algorithm [24] is adopted to solve this maximum matching problem. Once entities are matched, we acquire entity pairs \\(\\{(\\omega_1^m,\\omega_{j_1}^p),(\\omega_2^m,\\omega_{j_2}^p),\\ldots ,(\\omega_t^m,\\omega_{j_t}^p)\\}\\) as rearrangement goals. Each entity pair \\((\\omega_i^m,\\omega_{j_i}^p)\\) represents the two different states of the same instance, where \\(\\omega_{i}^{m}\\) denotes the goal state and \\(\\omega_{j_i}^p\\) denotes the current state of the instance. Subsequently, for the inferred rearrangement goals, we transport them to their goal states in succession, during which, we leverage the 2D obstacle map and Dijkstra algorithm [13] to conduct obstacle avoidance and navigation path planning." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.801, + 0.21, + 0.817 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Rearrangement task remains a practical challenge for embodied agents that assist humans in real life, whose goal is to bring a given physical environment into the goal state with a goal specification [2]. Each pixel is classified into a category of change[36]." + }, + { + "type": "table_caption", + "bbox": [ + 0.576, + 0.09, + 0.816, + 0.104 + ], + "angle": 0, + "content": "Table 1. Comparison on RoomR dataset" + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.115, + 0.894, + 0.192 + ], + "angle": 0, + "content": "
MethodSuc (%)↑FS (%)↑E↓Mis ↓
TIDEE11.728.90.7150.734
MaSS4.716.51.0161.018
Our14.233.10.7140.707
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.5, + 0.196, + 0.88, + 0.227 + ], + "angle": 0, + "content": "\"Suc\": Success; \"FS\": Fixed Strict; \"E\": Energy Remain; \"Mis\": Misplaced." + }, + { + "type": "table_caption", + "bbox": [ + 0.565, + 0.243, + 0.828, + 0.257 + ], + "angle": 0, + "content": "Table 2. Comparison on our MrMiR dataset" + }, + { + "type": "table", + "bbox": [ + 0.501, + 0.268, + 0.894, + 0.346 + ], + "angle": 0, + "content": "
MethodSuc (%)↑FS (%)↑E↓Mis ↓
TIDEE1.014.10.9170.924
MaSS0.610.51.0191.026
Our5.028.70.73270.7134
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.5, + 0.349, + 0.88, + 0.38 + ], + "angle": 0, + "content": "\"Suc\": Success; \"FS\": Fixed Strict; \"E\": Energy Remain; \"Mis\": Misplaced." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.407, + 0.676, + 0.423 + ], + "angle": 0, + "content": "4.1. Experiment Setup" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.431, + 0.892, + 0.522 + ], + "angle": 0, + "content": "Dataset We evaluate our method on the AI2THOR Rearrangement Challenge based on the RoomR dataset[45], which consists of 80 rooms and 4000 tasks for training, and 20 rooms with 1000 tasks each for both validation and test. Each task in RoomR involves 1 to 5 objects with state changes, characterized by object locations or openness." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.524, + 0.893, + 0.901 + ], + "angle": 0, + "content": "In RoomR[45] dataset, the spatial range of object changes is limited due to the confined area with single-room scenes and the target objects to be rearranged are mainly category-wise, i.e., most categories only have one instance. To cater for the prevalent characteristics of indoor environments in reality, we build a more practical and challenging dataset MrMiR for the two-stage rearrangement task on the ProcTHOR simulator[12], where the change in the state of an object can involve a broader spatial range, even extending across different rooms. Besides, there exists multiple instances within the same category that have different appearance. The simulator ProcTHOR[12] respectively provides 10,000 training, 1000 valid and 1000 test apartments. For our task need, we totally select 6000 apartments in the simulator, splitting 5000 apartments for training, 500 apartments for validation, and 500 apartments for test. Each apartment contains multiple instances within the same category that have different appearance. For each apartment, we randomly generate one rearrangement task. Therefore, our MrMiR dataset totally contains 6000 rearrangement tasks, the same as RoomR. Fig. 3 illustrates the comparison of scene area distribution between our MrMiR dataset and RoomR dataset. It can be seen that our dataset encompasses a diverse range of scene area, while RoomR mainly focusing on small rooms under \\(100\\mathrm{m}^2\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "16461" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.333 + ], + "angle": 0, + "content": "To train the geometric feature extractor based on PointNet++[32], which embeds point cloud, we generate a dataset using AI2THOR[23]. We collect 77K sample pairs, of which 70K are used for training and 7K for testing. Each sample pair is composed of two point clouds, which may either represent the same instance (the positive pair) or different instances (the negative pair). The distribution of the positive and negative pairs is balanced, with a 1:1 ratio. Within each room of AI2THOR, we generate positive sample pairs by applying different transformation operations to the point cloud of the same object. We also perform transformation operations on the point clouds of two different objects to generate negative sample pairs. For the transformation operations, we consider random rotation, adding random noise, and randomly deleting \\(20\\%\\) of the original point cloud data." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.338, + 0.473, + 0.655 + ], + "angle": 0, + "content": "Metrics To evaluate an agent's performance, we consider several metrics as follows: (1) Success. The success metric is a binary indicator of each task, which is strictly defined as whether the whole objects' states have been restored to their goal states. (2) Fixed Strict. This metric records the proportion of successfully fixed objects per task. If there are any newly misplaced objects at the end of a task, this metric will be set as 0. (3) Misplaced. This metric is denoted as the number of misplaced objects after the unshuffle stage divided by the number of misplaced objects at the start of the unsuffle stage. (4) Energy Remaining (E). The above metrics are quite strict, which is not possible to measure the distance to task completion. The energy is used to represent the difference between two possible states of an object, which can be functioned as \\( D: S \\times S \\Rightarrow [0,1] \\). The larger the energy value, the greater the difference between the two states, whereas if the two states are approximately equal, the energy value is 0. Therefore, this metric can be computed as the sum of all objects' energy after the unshuffle stage, divided by the sum of all objects' energy at the beginning of the unshuffle stage." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.473, + 0.902 + ], + "angle": 0, + "content": "Implementation details The distance threshold \\(\\epsilon_{d}\\) is set to \\(1.5\\mathrm{m}\\), which is determined through hyper-parameter tuning, as detailed in Sec. 4.4. To ensure a fair comparison, we limit the maximum step number for both the exploration and rearrangement stages. In RoomR dataset[45], the exploration step limit is set to 300 and the navigation step limit for each object's rearrangement is set to 50. In our MrMiR dataset, we categorize the apartments by area into five levels: \\(< 10\\mathrm{m}^2\\), \\(10 - 60\\mathrm{m}^2\\), \\(60 - 150\\mathrm{m}^2\\), \\(150 - 300\\mathrm{m}^2\\), \\(>300\\mathrm{m}^2\\). Correspondingly, the exploration step limits are set to 50, 200, 300, 500 and 800 and the navigation step limits for each object's rearrangement are set to 50, 80, 100, 200, 300. When we train the geometric feature extractor, we use Adam as our optimizer and the hyper-parameters \\((lr,\\beta_1,\\beta_2,\\epsilon)\\) are set to \\((0.001,0.9,0.999,1\\mathrm{e} - 8)\\). The parameters and models are tuned only on the RoomR dataset" + }, + { + "type": "image", + "bbox": [ + 0.541, + 0.09, + 0.816, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.257, + 0.892, + 0.286 + ], + "angle": 0, + "content": "Figure 3. Comparison of scene area distribution between MrMiR and RoomR[45] datasets." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.321, + 0.8, + 0.336 + ], + "angle": 0, + "content": "and are directly tested on the MrMiR dataset." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.358, + 0.795, + 0.374 + ], + "angle": 0, + "content": "4.2. Comparisons with Related Works" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.386, + 0.892, + 0.446 + ], + "angle": 0, + "content": "We report the quantitative comparisons on the RoomR dataset in Table 1 and the MrMiR dataset in Table 2 with the two state-of-the-art modular methods MaSS[41] and TIDEE[38]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.451, + 0.892, + 0.542 + ], + "angle": 0, + "content": "MaSS [41] employs a Gaussian mixture model to train a semantic search strategy, aiming to guide the agent towards regions where the likelihood of object occurrence is higher. During the exploration process, the 3D voxel semantic map is constructed, which is then used to match and identify objects that need to be rearranged." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.545, + 0.892, + 0.606 + ], + "angle": 0, + "content": "TIDEE [38] employs a coverage-based exploration policy to extract the spatial relationships between objects. After the exploration of two stages, the relationship changes are used to identify the rearrangement goals." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.61, + 0.892, + 0.716 + ], + "angle": 0, + "content": "Given that the original work of TIDEE is based on category-level (i.e., only records the category information of objects, and for multiple instances under the same category, only chooses one as the target), it cannot be directly applicable to our MrMiR dataset. To be fair, we make modifications to TIDEE by extracting all spatial relationships between instances when testing on the MrMiR dataset." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.9 + ], + "angle": 0, + "content": "As shown in Table 1, our proposed method CAVR outperforms the related works in all metrics. Specifically, it improves the success rate by \\(2.5\\%\\) and the proportion of successfully fixed objects by \\(5.38\\%\\). Beyond the primary improvements, the decrease in energy and misplaced metrics suggests that our CAVR method could rearrange the environment closer to the goal configuration, even without fully completing the task. As shown in Table 2, the disparity between the related works and our CAVR method has further increased, fully demonstrating the superiority of our method in dealing with more complex and challenging environment." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "16462" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.412, + 0.09, + 0.559, + 0.104 + ], + "angle": 0, + "content": "Table 3. Ablation Study" + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.115, + 0.896, + 0.26 + ], + "angle": 0, + "content": "
Visual ExplorationScene Change MatchingSuccess (%)↑FixedStrict (%)↑E↓Misplaced ↓
coverage13.131.00.7220.717
MaSS's8.725.80.7630.754
uniform11.324.60.8180.807
visual14.032.60.7240.720
geometric14.232.30.7230.717
14.233.10.7140.707
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.1, + 0.261, + 0.894, + 0.292 + ], + "angle": 0, + "content": "“√” represents utilizing our proposed corresponding modules (closer-aligned-retrace exploration policy and scene change matching based on similarity of appearance including visual feature and geometric feature introduced in Sec. 3); “E”: Energy Remaining." + }, + { + "type": "image", + "bbox": [ + 0.079, + 0.306, + 0.273, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.279, + 0.305, + 0.473, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.482, + 0.305, + 0.685, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.689, + 0.305, + 0.888, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.442, + 0.896, + 0.486 + ], + "angle": 0, + "content": "Figure 4. Rearrangement performance relative to distance threshold \\(\\epsilon_{d}\\). The blue lines represent the average metrics across the tasks of validation set of RoomR[45], with the shaded area representing the \\(68\\%\\) confidence interval. Higher values of Success and %FixedStrict indicate superior performance, whereas lower EnergyRemaining and %Misplaced indicate better results." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.51, + 0.232, + 0.526 + ], + "angle": 0, + "content": "4.3. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.535, + 0.47, + 0.61 + ], + "angle": 0, + "content": "Considering the complexity of visual rearrangement task, we conduct ablation studies on RoomR dataset [45] to further investigate the importance of different modules within the overall task. In the ablation studies, we keep the diffcloud as the representation of scene changes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.612, + 0.47, + 0.732 + ], + "angle": 0, + "content": "Ablation on the visual exploration module We replace our closer-aligned-retrace exploration policy with: a) Coverage-based exploration policy This strategy randomly selects target points from unexplored areas, which are used in TIDEE [38]. b) MaSS's semantic policy This ablation directly adopts the semantic policy proposed in [41], which trains a network to search the object distribution." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.734, + 0.471, + 0.885 + ], + "angle": 0, + "content": "Ablation on the scene change matching module The process of scene change matching can be abstracted as a maximum weight matching problem in bipartite graph. We substitute the weights of edges with: a) Uniform weights This ablation set all edge weights to the same value regardless of the objects' appearance, which leads to a random matching. b) Similarity of visual feature This ablation only utilize the similarity of visual feature as the weight. c) Similarity of geometric feature This ablation only use the similarity of geometric feature as the weight." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.471, + 0.901 + ], + "angle": 0, + "content": "The experimental results are presented in Table 3. In the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.51, + 0.895, + 0.691 + ], + "angle": 0, + "content": "ablation study on the visual exploration module, the model with MaSS's exploration policy perform worst due to the substantial variation in objects distribution within rooms, making it challenging to model them effectively with a uniform network. The model with coverage-based policy also underperform as it is likely to overlook minor changes when the observation distance is considerable. In the ablation study on the scene change matching module, removing any part of the appearance feature clearly decreases the performance in all metrics, which illustrates the noticeable impact of our extracted appearance feature on the visual rearrangement task." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.709, + 0.733, + 0.726 + ], + "angle": 0, + "content": "4.4. Hyper-parameter Tuning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.895, + 0.903 + ], + "angle": 0, + "content": "We conduct experiments on the validation set of RoomR[45] to determine the distance threshold in the optimization criteria for our closer-aligned-retrace exploration policy. A very small threshold value means visiting nearly every grid space on the map, while a large threshold value ignores the underlying concern of non-ambiguous scene change detection. The exploration happens in the unshuffle stage as well and our exploration policy leads the agent to try its best to replicate the previous trajectory. Therefore the threshold value determines the trade-off between optimality in terms of the agent traversal for exploration and a" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "16463" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.139, + 0.11, + 0.156 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.091, + 0.273, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.281, + 0.091, + 0.425, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.434, + 0.091, + 0.575, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.091, + 0.728, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.735, + 0.091, + 0.878, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.267, + 0.11, + 0.285 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.22, + 0.273, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.281, + 0.22, + 0.423, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.433, + 0.22, + 0.575, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.584, + 0.22, + 0.726, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.735, + 0.22, + 0.878, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.38, + 0.11, + 0.397 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.332, + 0.273, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.28, + 0.332, + 0.423, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.433, + 0.332, + 0.574, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.584, + 0.332, + 0.726, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.735, + 0.332, + 0.877, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.456, + 0.895, + 0.527 + ], + "angle": 0, + "content": "Figure 5. Visualization of optimization process of observation distance map and construction of diff-cloud (a) In the walkthrough stage, objects begin in the positions indicated by the dashed blue bounding boxes. Observation distance map is positioned at the top right corner of each image. The color transitioning from blue to red represents the distances ranging from \\(0\\mathrm{m}\\) to \\(5\\mathrm{m}\\). (b) In the unshuffle stage, objects are moved to the locations indicated by the solid red box. (c) The diff-cloud is gradually built up, including the moved part (blue points) and the protruding part (red points)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.553, + 0.343, + 0.568 + ], + "angle": 0, + "content": "non-ambiguous scene change detection." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.57, + 0.47, + 0.676 + ], + "angle": 0, + "content": "As shown in the Fig.4, we set observation distance thresholds from \\(1\\mathrm{m}\\) to \\(7\\mathrm{m}\\) and compute the average metrics of 1000 tasks. Optimal performance on the validation set is achieved with a distance threshold at \\(1.5\\mathrm{m}\\), which is the threshold consistently applied in the other experiments throughout this paper. In this experiment, error bars are calculated based on a \\(68\\%\\) confidence interval." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.694, + 0.216, + 0.709 + ], + "angle": 0, + "content": "4.5. Visualization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.473, + 0.903 + ], + "angle": 0, + "content": "We visualize and analyze the optimization of the observation distance map during the walkthrough stage and the construction of the diff-cloud in the unshuffle stage, as shown in Fig. 5. As the exploration progresses, the distance map increasingly exhibit hues of blue, which indicates that our exploration policy enables the agent to observe the scene details up close. In the unshuffle stage, as the diff-cloud is gradually built up, we develop a distinct understanding of the changes occurring throughout the scene. After matching these changes according to the similarity of their appearance, we can carry out the rearrangement execution procedurally." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.551, + 0.62, + 0.567 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.582, + 0.895, + 0.78 + ], + "angle": 0, + "content": "We propose a category agnostic model for visual rearrangement task in this paper. Our method is composed of a closer-aligned-retrace exploration policy, a scene change detection module based on point cloud and a scene change matching module utilizing the similarity of appearance feature, each specifically designed to recover the scene configuration regardless of any category labels. To validate the proposed method, we conduct experiments on the RoomR dataset and a more practical dataset MrMiR collected by us, where multiple instances distribute across multiple rooms. Experimental results on these two datasets demonstrate that our method is able to perform the visual rearrangement task effectively without any category information." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.895, + 0.9 + ], + "angle": 0, + "content": "Acknowledgements: This work was supported by the National Natural Science Foundation of China under Grant 62125207, 62272443, 62032022 and U23B2012, in part by Beijing Natural Science Foundation under Grant JQ22012, Z190020." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "16464" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.169 + ], + "angle": 0, + "content": "[1] Pablo F Alcantarilla, Simon Stent, German Ros, Roberto Arroyo, and Riccardo Gherardi. Street-view change detection with deconvolutional networks. Autonomous Robots, 42: 1301-1322, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.472, + 0.24 + ], + "angle": 0, + "content": "[2] Dhruv Batra, Angel X. Chang, Sonia Chernova, Andrew J. Davison, Jia Deng, Vladlen Koltun, Sergey Levine, Jitendra Malik, Igor Mordatch, Roozbeh Mottaghi, Manolis Savva, and Hao Su. Rearrangement: A challenge for embodied ai, 2020. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.242, + 0.471, + 0.297 + ], + "angle": 0, + "content": "[3] Edward Beeching, Jilles Dibangoye, Olivier Simonin, and Christian Wolf. Learning to plan with uncertain topological maps. In European Conference on Computer Vision, pages 473-490. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.298, + 0.47, + 0.339 + ], + "angle": 0, + "content": "[4] Shuhui Bu, Qing Li, Pengcheng Han, Pengyu Leng, and Ke Li. Mask-cdnet: A mask based pixel change detection network. Neurocomputing, 378:166-178, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.341, + 0.471, + 0.382 + ], + "angle": 0, + "content": "[5] Yuri Burda, Harri Edwards, Deepak Pathak, Amos Storkey, Trevor Darrell, and Alexei A. Efros. Large-scale study of curiosity-driven learning. In ICLR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.383, + 0.471, + 0.437 + ], + "angle": 0, + "content": "[6] Devendra Singh Chaplot, Dhiraj Gandhi, Saurabh Gupta, Abhinav Gupta, and Ruslan Salakhutdinov. Learning to explore using active neural slam. In International Conference on Learning Representations (ICLR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.438, + 0.47, + 0.508 + ], + "angle": 0, + "content": "[7] Devendra Singh Chaplot, Helen Jiang, Saurabh Gupta, and Abhinav Gupta. Semantic curiosity for active visual learning. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part VI 16, pages 309–326. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.509, + 0.47, + 0.577 + ], + "angle": 0, + "content": "[8] Devendra Singh Chaplot, Murtaza Dalal, Saurabh Gupta, Jitendra Malik, and Russ R Salakhutdinov. Seal: Self-supervised embodied active learning using exploration and 3d consistency. Advances in neural information processing systems, 34:13086-13098, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.579, + 0.47, + 0.647 + ], + "angle": 0, + "content": "[9] Chao-Peng Chen, Jun-Wei Hsieh, Ping-Yang Chen, Yi-Kuan Hsieh, and Bor-Shiun Wang. Saras-net: scale and relation aware siamese network for change detection. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 14187-14195, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.649, + 0.47, + 0.704 + ], + "angle": 0, + "content": "[10] Shuo Chen, Kailun Yang, and Rainer Stiefelhagen. Drtanet: Dynamic receptive temporal attention network for street scene change detection. In 2021 IEEE Intelligent Vehicles Symposium (IV), pages 502-509. IEEE, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.469, + 0.746 + ], + "angle": 0, + "content": "[11] Tao Chen, Saurabh Gupta, and Abhinav Gupta. Learning exploration policies for navigation. In International Conference on Learning Representations, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.747, + 0.47, + 0.816 + ], + "angle": 0, + "content": "[12] Matt Deitke, Eli VanderBilt, Alvaro Herrasti, Luca Weihs, Jordi Salvador, Kiana Ehsani, Winson Han, Eric Kolve, Ali Farhadi, Aniruddha Kembhavi, et al. Procthor: Large-scale embodied ai using procedural generation. arXiv preprint arXiv:2206.06994, 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.47, + 0.857 + ], + "angle": 0, + "content": "[13] Edsger W Dijkstra. A note on two problems in connexion with graphs. Numerische mathematik, 1(1):269-271, 1959. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[14] H. Durrant-Whyte and T. Bailey. Simultaneous localization and mapping: part i. IEEE Robotics & Automation Magazine, 13(2):99-110, 2006. 2" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[15] Martin Ester, Hans-Peter Kriegel, Jörg Sander, Xiaowei Xu, et al. A density-based algorithm for discovering clusters in large spatial databases with noise. In kdd, pages 226-231, 1996. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.894, + 0.216 + ], + "angle": 0, + "content": "[16] Kuan Fang, Alexander Toshev, Li Fei-Fei, and Silvio Savarese. Scene memory transformer for embodied agents in long-horizon tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.218, + 0.892, + 0.286 + ], + "angle": 0, + "content": "[17] Kuan Fang, Alexander Toshev, Li Fei-Fei, and Silvio Savarese. Scene memory transformer for embodied agents in long-horizon tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 538–547, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.288, + 0.892, + 0.356 + ], + "angle": 0, + "content": "[18] Samir Yitzhak Gadre, Kiana Ehsani, Shuran Song, and Roozbeh Mottaghi. Continuous scene representations for embodied ai. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14849-14859, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.358, + 0.892, + 0.425 + ], + "angle": 0, + "content": "[19] Enqiang Guo, Xinsha Fu, Jiawei Zhu, Min Deng, Yu Liu, Qing Zhu, and Haifeng Li. Learning to measure change: Fully convolutional siamese metric networks for scene change detection. arXiv preprint arXiv:1810.09111, 2018.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.427, + 0.892, + 0.482 + ], + "angle": 0, + "content": "[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.484, + 0.892, + 0.538 + ], + "angle": 0, + "content": "[21] Dinesh Jayaraman and Kristen Grauman. Learning to look around: Intelligently exploring unseen environments for unknown tasks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.539, + 0.892, + 0.594 + ], + "angle": 0, + "content": "[22] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.595, + 0.892, + 0.662 + ], + "angle": 0, + "content": "[23] Eric Kolve, Roozbeh Mottaghi, Winson Han, Eli VanderBilt, Luca Weihs, Alvaro Herrasti, Matt Deitke, Kiana Ehsani, Daniel Gordon, Yuke Zhu, et al. Ai2-thor: An interactive 3d environment for visual ai. arXiv preprint arXiv:1712.05474, 2017.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.665, + 0.892, + 0.704 + ], + "angle": 0, + "content": "[24] Harold W Kuhn. The hungarian method for the assignment problem. Naval research logistics quarterly, 2(1-2):83-97, 1955. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.706, + 0.892, + 0.773 + ], + "angle": 0, + "content": "[25] Weijie Li, Xinhang Song, Yubing Bai, Sixian Zhang, and Shuqiang Jiang. ION: instance-level object navigation. In MM '21: ACM Multimedia Conference, Virtual Event, China, October 20 - 24, 2021, pages 4343-4352. ACM, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.776, + 0.892, + 0.844 + ], + "angle": 0, + "content": "[26] Haotong Lin, Qianqian Wang, Ruojin Cai, Sida Peng, Hadar Averbuch-Elor, Xiaowei Zhou, and Noah Snively. Neural scene chronology. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20752-20761, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[27] Kevin Matzen and Noah Snavely. Scene chronology. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pages 615-630. Springer, 2014. 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "16465" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[28] So Yeon Min, Devendra Singh Chaplot, Pradeep Ravikumar, Yonatan Bisk, and Ruslan Salakhutdinov. Film: Following instructions in language with modular methods. arXiv preprint arXiv:2110.07342, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.471, + 0.243 + ], + "angle": 0, + "content": "[29] Medhini Narasimhan, Erik Wijmans, Xinlei Chen, Trevor Darrell, Dhruv Batra, Devi Parikh, and Amanpreet Singh. Seeing the un-scene: Learning amodal semantic maps for room navigation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVIII 16, pages 513-529. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.246, + 0.471, + 0.3 + ], + "angle": 0, + "content": "[30] Deepak Pathak, Pulkit Agrawal, Alexei A Efros, and Trevor Darrell. Curiosity-driven exploration by self-supervised prediction. In International conference on machine learning, pages 2778-2787. PMLR, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.302, + 0.471, + 0.356 + ], + "angle": 0, + "content": "[31] Deepak Pathak, Dhiraj Gandhi, and Abhinav Gupta. Self-supervised exploration via disagreement. In International conference on machine learning, pages 5062-5071. PMLR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.358, + 0.471, + 0.413 + ], + "angle": 0, + "content": "[32] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.414, + 0.471, + 0.495 + ], + "angle": 0, + "content": "[33] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.497, + 0.471, + 0.566 + ], + "angle": 0, + "content": "[34] Santhosh K Ramakrishnan, Ziad Al-Halah, and Kristen Grauman. Occupancy anticipation for efficient exploration and navigation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part V 16, pages 400-418. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.567, + 0.471, + 0.62 + ], + "angle": 0, + "content": "[35] Santhosh K Ramakrishnan, Dinesh Jayaraman, and Kristen Grauman. An exploration of embodied visual exploration. International Journal of Computer Vision, 129:1616-1649, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.623, + 0.471, + 0.678 + ], + "angle": 0, + "content": "[36] Vijaya Raghavan T Ramkumar, Elahe Arani, and Bahram Zonooz. Differencing based self-supervised pretraining for scene change detection. In Conference on Lifelong Learning Agents, pages 952-965. PMLR, 2022. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.679, + 0.471, + 0.734 + ], + "angle": 0, + "content": "[37] Ken Sakurada, Mikiya Shibuya, and Weimin Wang. Weakly supervised silhouette-based semantic scene change detection. In 2020 IEEE International conference on robotics and automation (ICRA), pages 6861-6867. IEEE, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.735, + 0.471, + 0.831 + ], + "angle": 0, + "content": "[38] Gabriel Sarch, Zhaoyuan Fang, Adam W Harley, Paul Schydlo, Michael J Tarr, Saurabh Gupta, and Katerina Fragkiadaki. Tidee: Tidying up novel rooms using visuo-semantic commonsense priors. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIX, pages 480-496. Springer, 2022. 1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.471, + 0.899 + ], + "angle": 0, + "content": "[39] Nikolay Savinov, Anton Raichuk, Raphaël Marinier, Damien Vincent, Marc Pollefeys, Timothy Lillicrap, and Sylvain Gelly. Episodic curiosity through reachability. In International Conference on Learning Representations (ICLR), 2019. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[40] Grant Schindler and Frank Dellaert. Probabilistic temporal inference on reconstructed 3d scenes. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1410-1417. IEEE, 2010. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.218 + ], + "angle": 0, + "content": "[41] Brandon Trabucco, Gunnar Sigurdsson, Robinson Piramuthu, Gaurav S Sukhatme, and Ruslan Salakhutdinov. A simple approach for visual rearrangement: 3d mapping and semantic search. arXiv preprint arXiv:2206.13396, 2022. 1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.22, + 0.892, + 0.261 + ], + "angle": 0, + "content": "[42] Guo-Hua Wang, Bin-Bin Gao, and Chengjie Wang. How to reduce change detection to semantic segmentation. Pattern Recognition, 138:109384, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.263, + 0.892, + 0.33 + ], + "angle": 0, + "content": "[43] Xiaohan Wang, Yuehu Liu, Xinhang Song, Beibei Wang, and Shuqiang Jiang. Generating explanations for embodied action decision from visual observation. In Proceedings of the 31st ACM International Conference on Multimedia, pages 2838-2846, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.333, + 0.892, + 0.388 + ], + "angle": 0, + "content": "[44] Xiaohan Wang, Yuehu Liu, Xinhang Song, Beibei Wang, and Shuqiang Jiang. Camp: Causal multi-policy planning for interactive navigation in multi-room scenes. Advances in Neural Information Processing Systems, 36, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.39, + 0.892, + 0.445 + ], + "angle": 0, + "content": "[45] Luca Weihs, Matt Deitke, Aniruddha Kembhavi, and Roozbeh Mottaghi. Visual room rearrangement. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5922-5931, 2021. 1, 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.447, + 0.892, + 0.488 + ], + "angle": 0, + "content": "[46] Haitao Zeng, Xinhang Song, and Shuqiang Jiang. Multi-object navigation using potential target position policy function. IEEE Transactions on Image Processing, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.489, + 0.892, + 0.559 + ], + "angle": 0, + "content": "[47] Sixian Zhang, Weijie Li, Xinhang Song, Yubing Bai, and Shuqiang Jiang. Generative meta-adversarial network for unseen object navigation. In Computer Vision - ECCV 2022 - 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIX, pages 301-320." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.56, + 0.892, + 0.642 + ], + "angle": 0, + "content": "[48] Sixian Zhang, Xinhang Song, Yubing Bai, Weijie Li, Yakui Chu, and Shuqiang Jiang. Hierarchical object-to-zone graph for object navigation. In 2021 IEEE/CVF International Conference on Computer Vision, ICCV 2021, Montreal, QC, Canada, October 10-17, 2021, pages 15110-15120. IEEE, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.644, + 0.892, + 0.713 + ], + "angle": 0, + "content": "[49] Sixian Zhang, Xinhang Song, Weijie Li, Yubing Bai, Xinyao Yu, and Shuqiang Jiang. Layout-based causal inference for object navigation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10792-10802, 2023. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "16466" + } + ] +] \ No newline at end of file diff --git a/2024/A Category Agnostic Model for Visual Rearrangment/4b328694-69ab-47a2-83d3-ce2efe00b0f0_origin.pdf b/2024/A Category Agnostic Model for Visual Rearrangment/4b328694-69ab-47a2-83d3-ce2efe00b0f0_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b4e1258770275d7d0afe0fe0c0996eac20e6e0ab --- /dev/null +++ b/2024/A Category Agnostic Model for Visual Rearrangment/4b328694-69ab-47a2-83d3-ce2efe00b0f0_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d98b4ec975c722da3cc7037ecdf79b6623efe876ad667a0f412a14cab466933 +size 7094659 diff --git a/2024/A Category Agnostic Model for Visual Rearrangment/full.md b/2024/A Category Agnostic Model for Visual Rearrangment/full.md new file mode 100644 index 0000000000000000000000000000000000000000..37e668cac8ea01e916e2704dd84ea7ad4fddf82c --- /dev/null +++ b/2024/A Category Agnostic Model for Visual Rearrangment/full.md @@ -0,0 +1,290 @@ +# A Category Agnostic Model for Visual Rearrangement + +Yuyi Liu $^{1,2}$ , Xinhang Song $^{1,2}$ , Weijie Li $^{1,2}$ , Xiaohan Wang $^{1}$ , Shuqiang Jiang $^{1,2}$ + +$^{1}$ Key Lab of Intelligent Information Processing Laboratory of the Chinese Academy of Sciences (CAS), Institute of Computing Technology, Beijing $^{2}$ University of Chinese Academy of Sciences, Beijing {yuyi.liu, xinhang song, weijie.li, xiaohan.wang}@vipl.ict.ac.cn sqjiang@ict.ac.cn + +# Abstract + +This paper presents a novel category agnostic model for visual rearrangement task, which can help an embodied agent to physically recover the shuffled scene configuration without any category concepts to the goal configuration. Previous methods usually follow a similar architecture, completing the rearrangement task by aligning the scene changes of the goal and shuffled configuration, according to the semantic scene graphs. However, constructing scene graphs requires the inference of category labels, which not only causes the accuracy drop of the entire task but also limits the application in real world scenario. In this paper, we delve deep into the essence of visual rearrangement task and focus on the two most essential issues, scene change detection and scene change matching. We utilize the movement and the protrusion of point cloud to accurately identify the scene changes and match these changes depending on the similarity of category agnostic appearance feature. Moreover, to assist the agent to explore the environment more efficiently and comprehensively, we propose a closer-aligned-retrace exploration policy, aiming to observe more details of the scene at a closer distance. We conduct extensive experiments on AI2THOR Rearrangement Challenge based on RoomR dataset and a new multi-room multi-instance dataset MrMiR collected by us. The experimental results demonstrate the effectiveness of our proposed method. + +# 1. Introduction + +Rearrangement task remains a practical challenge for embodied agents that assist humans in real life, whose goal is to bring a given physical environment into the goal state with a goal specification [2]. In this paper, we focus on a branch of the general rearrangement task based on ExperienceGoal, i.e., visual rearrangement task[45], which requires an agent to recover the scene configuration after it was shuffled randomly. Due to the excessive complexity of + +![](images/c58d68c513c2feb8e0e7df93515cd0b68fe93a15b8ae7b9077124b7472caffd2.jpg) +Figure 1. Influence of different scene change representations on scene change detection sensitivity and scene change matching simplicity. To strike a balance between these two issues, we select point cloud as our scene change representation. + +the state space, the end-to-end deep reinforcement learning methods previously used for navigation struggle to cover this task, resulting in performance only marginally above chance [18, 45]. Recent works demonstrate that the modular methods, such as MaSS [41] and TIDEE [38], effectively reduce the complexity of the rearrangement task by dividing the task into several modules. These methods use a pre-trained detector to assign category labels to each object and infer the rearrangement goals through matching the semantic scene graphs of both the goal and shuffled configuration. + +However, the introduction of category information may not be that necessary as the essential goal of visual rearrangement task is equal to "make it like what it was before". Even without the category labels, we can still perform the task by memorizing the appearance characteristics and the state information of objects in the scene. Besides, due to the limited accuracy of the detector, the transition from visual input to category information will inevitably lead to errors, which can accumulate and propagate to subsequent modules, thereby causing the accuracy drop of the entire task. Previous works achieve large gains with ground-truth se + +mantic segmentation[18, 41]. Moreover, there are inherent limitations of the methodologies based on category information. Once the detector is trained, these methods are restricted to a fixed set of categories and powerless against the object categories not previously observed in training environment. Using zero-shot methods, such as SAM [22] combined with CLIP [33], can considerably expand the known categories, but they are still within a limited set. It is impractical to retrain the model every time a new object category emerges due to the extensive resources required. + +To address the above problems, our motivation is to identify all scene changes in the room and restore them, regardless of any category. Previous methods use semantic labels for SCM because these labels provide a high-level representation of objects and make SCM straightforward. However, the scene changes can be represented in numerous ways, ranging from pixel to point cloud, and up to label combined with positional information. As shown in Fig. 1, there is an inherent trade-off: while simpler representations minimize information loss during conversion and enhance the sensitivity of SCD, they simultaneously complicate the process of SCM. + +Point cloud can serve as an appropriate representation of scene change, as it captures rich geometric, positional, and scale information of objects and remains robust against varied observation angles and obstructions from other objects. Leveraging point cloud facilitates efficient SCD and also provides richer appearance information for SCM. However, due to the inherent unordered nature and rotational invariance of point cloud, it is difficult to match the point cloud directly. We need to extract high-dimensional appearance features from point cloud for SCM. + +Based on these observations, we propose a category ag- nostic model for visual rearrangement task called CAVR, to the best of our knowledge, this is the first attempt for visual rearrangement without category inferring. By utilizing point cloud as the scene change representation, CAVR can recover the scene configuration to its goal state without any category concepts. In CAVR, we introduce a closeraligned-retrace exploration policy to help agent conduct exploration effectively for SCD. Meanwhile, we maintain a diff-cloud, which consists of two components, one for the point cloud moved and another for the point cloud protruding in the shuffled scene configuration, compared to the goal configuration. The diff-cloud precisely captures the variations occurring throughout the scene. After exploration, we utilize the pre-trained appearance feature extractor to embed the diff-cloud and then match the scene changes across various locations based on the similarity of appearance feature, resulting in a series of rearrangement goals. Then we use a planning-based policy to restore them to their goal states in succession. + +We conduct experiments on AI2THOR Rearrangement + +Challenge based on the RoomR dataset[45] and shows improvements on both the success rate and the portion of successfully fixed objects. To cater to more practical demands, we introduce a multi-room multi-instance rearrangement dataset MrMiR based on ProcTHOR simulator[12]. The experimental results on MrMiR dataset fully demonstrate the effectiveness of our method in the complex multi-room environment. + +# 2. Related Works + +Rearrangement The general rearrangement problem [2] aims to transform the environment from an initial state to a goal state through interaction. We focus on an instantiation of the rearrangement problem[45], in which the goal state is specified by immersing the agent in the goal environment and allowing the agent to explore autonomously. Prior works can be classified into two categories, end-to-end reinforcement learning and modular methods. The end-to-end methods [18, 45] perform poorly mainly due to the large action space and complex stages in the task. Comparatively, the modular methods [38, 41] have shown surprising progress in improving the success rate. In detail, Mass[41] proposes a semantic policy with a voxel-based semantic map to find and match the changed objects. TIDEE[38] utilizes the spatial relationships between objects to determine the changed objects. Motivated by prior works, we also propose a modular method, while our model can perform the task without any category information. + +Visual exploration Visual exploration refers to the process in which an agent collects information about the 3D environment through motion and perception [14, 29, 30, 35]. For visual exploration, efficiency is of utmost significance, involving how to access a broader range of regions [3, 6, 17, 39], observe more objects [16] and obtain a larger volume of environmental information relevant to downstream tasks (such as navigation) [25, 43, 44, 46-49] within a certain budget. + +To improve the efficiency of exploration, several methods have employed ideas like curiosity [5, 7, 30, 31], coverage [6, 11] and reconstruction [21, 34]. Most related to ours is the coverage-based works, which try to maximize the area seen in the environment [6, 11]. In our exploration policy, both the area explored and the observation distance are considered simultaneously to accurately observe more details of the scene. + +Scene Change Detection Scene change detection (SCD) refers to the task of identifying and localizing changes of a scene captured at different times[9, 26, 27, 36, 37, 40, 42]. Depending on the types of scene representation, methods are classified into two categories, respectively, 2D domain and 3D domain[37]. The first one devises specific neural networks to process the image pair taken at different times and generate a pixel-level prediction, namely, each pixel is + +classified into a category of change[1, 4, 9, 10, 19, 36, 37, 42]. Some studies focus on the scene change detection in 3D domain. They aim to reconstruct a time-varying 3D model from images taken from multiple viewpoints at different times and represent the temporal scene changes over several decades[26, 27, 40]. In our task, we not only focus on identifying changes in the 3D environment, but also emphasize the importance of matching changes across various locations, which is crucial for enabling the agent to accurately recover the scene configuration. + +# 3. Method + +Given the intrinsic complexity of visual rearrangement task, in this section, we present a modular approach to tackle the task, decomposing it into manageable subtasks including visual exploration, scene change detection and scene change matching. Our pipeline is illustrated in Fig. 2. We start this section by giving the definition of visual rearrangement task in Sec. 3.1. Then we describe the three modules separately. The visual exploration module (Sec. 3.2) requires the agent to explore the environment efficiently and comprehensively while retaining memory of the environment. Subsequently, the scene change detection module (Sec. 3.3) utilizes the agent's memory of the goal environment and compares it with the current environment to identify all scene changes. Then to recover the goal configuration, the scene change matching module (Sec. 3.4) is proposed to correlate these changes across different areas within the scene and infer the rearrangement goals. + +# 3.1. Visual Rearrangement Task + +According to the commonly accepted norms in the community[2], the rearrangement task is defined in a general form, where an agent is initialized in a starting state $s^0$ and required to transform the environment from $s^0$ to the goal state $s^* \in S^*$ with the possible actions $a \in A$ . The environment state space is denoted as the Cartesian product of the pose spaces of all rigid parts: $S = (R^3 \times SO3) \times (R^3 \times SO3) \ldots \times (R^3 \times SO3)$ , where $R^3$ and $SO3$ represent the 3D locations and rotations space. Follow the Partially Observable Markov Decision Processes (POMDP), the agent typically has no access to any state space and must operate purely based on the sensory observations $o \in O$ and the given goal specification $g = \phi(s^0, S^*)$ . Based on different goal specification forms (GeometricGoal, ImageGoal, LanguageGoal, ExperienceGoal, et al.), the general rearrangement task has various levels of difficulty. + +We consider an instance of rearrangement task proposed by Weihs et al.[45], which adopts the ExperienceGoal as the goal specification $g$ and is defined as a two-stage task, including the walkthrough and unshuffle stages. During the walkthrough stage, the agent is immersed in a room of goal state $s^*$ and allowed to explore autonomously. Sequentially, + +the walkthrough environment is shuffled and some objects' states are changed, denoted as the unshuffle stage, where the agent officially starts the rearrangement task and reorganizes the shuffled scene configuration back. + +# 3.2. Visual Exploration + +Under the two-stage rearrangement task, the initial exploration of the target environment is critical for the subsequent stages, since the agent is expected to acquire more object information in the fewest number of steps. Previous works adopt coverage-based exploration [38] or a search policy based on the expert distribution of objects [41]. However, there are usually many small-sized objects distributed across the scene, which can be easily overlooked or obscured by large entities when observed from a distance. Therefore, we propose a closer-aligned-retrace exploration policy, aiming to observe more objects at a closer distance to improve the observation accuracy and completeness. + +The core idea of the proposed policy is to build an observation distance map $m_{o} \in \mathbb{R}^{H \times W}$ , where each grid denotes the minimum distance at which the current coordinate point is observed by the agent. Through the optimization of $m_{o}$ , the agent can be guided to observe objects closer. + +In exploration, at each timestep, the agent obtains visual observation RGBD and updates its own pose. Following previous work [8, 28, 41], we also build a 2D obstacle map $m_{t} \in \mathbb{R}^{H \times W}$ with the proposed observation distance map $m_{o}$ . At the beginning of the exploration, due to the limited range of movement, the observation distance map $m_{o}$ predominantly consists of high distance values. Therefore, the visual exploration policy $\pi$ can be represented by optimizing a function $f$ of $m_{o}$ and a distance thresh $\epsilon_{d}$ . + +$$ +\pi (a) = f \left(m _ {o}, \epsilon_ {d}\right) +$$ + +The goal of optimization is to minimize the observation distance map (i.e., $\min(m_o) \leq \epsilon_d$ ). We employ an analytical approach to obtain the solution. Specifically, based on the current observation distance map $m_o$ , we select a waypoint as the next exploration goal and apply the route planning Dijkstra algorithm [13] to generate a path on the obstacle map $m_t$ . As to the waypoint selection, we prioritize selecting those with higher distance values on the distance map, aiming to observe objects closer. To better compare the shuffled and goal state of the scene for rearrangement goals inference, in the unshuffle stage, the agent tries its best to replicate the trajectory of the walkthrough stage. + +# 3.3. Scene Change Detection + +Detecting changes within the scene is a critical capability for an agent to perform rearrangement tasks. We maintain a diff-cloud to represent the scene changes. As shown in Fig. 2 (b), the diff-cloud consists of two parts. The red and blue points respectively represent the moved and protruding + +![](images/8f9314053a3dc254d64dfd5ec5e18e3612266664ae3466209d2a713feaf4dbee.jpg) +Figure 2. Pipeline of our CAVR model. (a) The gradient color transitioning from blue to red in the observation distance map represents the distances ranging from $0\mathrm{m}$ to $5\mathrm{m}$ . We adopt a closer-aligned-retrace exploration policy to observe more details by optimizing a function of the distance map. (b) Scene change detection is performed by comparing the point clouds corresponding to the goal configuration and the shuffled configuration of the scene, recording the moved part (blue points) and the protruding part (red points) to construct the diff-cloud. (c) We extract the entity-layer information from the two parts of the diff-cloud and match these entities depending on the similarity of category agnostic appearance feature. (d) After the matching process, we obtain a series of rearrangement goals with their goal states (indicated by the dashed bounding boxes) and shuffled states (indicated by the solid bounding boxes). + +point clouds in the shuffled configuration, compared to the goal configuration. Next, we explain how to construct the diff-cloud using visual inputs from the two stages. + +During the walkthrough stage, at each pose $p^w$ of the agent, we employ the depth information $D_{p^w}$ to generate an egocentric point cloud $c_{p^w}^{ego}$ . Each point in $c_{p^w}^{ego}$ is associated with a pixel in depth $D_{p^w}$ . Then we convert $c_{p^w}^{ego}$ from the agent's coordinate system to global coordinate system, resulting in a geocentric point cloud $c_{p^w}^{geo}$ . For the observed RGB image $I_{p^w}$ , we adopt the pre-trained resnet18 model [20] provided by the official PyTorch to extract a visual feature map $f_{p^w}$ . + +During the unshuffle stage, we use the same method to generate the geocentric point cloud $c_{p^u}^{geo}$ and the feature map $f_{p^u}$ for each pose $p^u$ . If $p^u$ aligns with a previous pose $p^w$ in the walkthrough stage, we compare the two corresponding point clouds, $c_{p^w}^{geo}$ and $c_{p^u}^{geo}$ . A considerable shift between two point coordinates associated with the same pixel indicates the changes have occurred in this location. Specifically, the increase in distance from the agent suggests removal of some objects, while the decrease signifies objects addition. Based on the distance variations, these + +points are allocated to the moved part and the protruding part of the diff-cloud, respectively. Moreover, for the area implying scene changes, we extract the corresponding visual feature from the feature map and assign it to each point in that region. Each point in the diff-cloud is represented as $\{x,y,z,v\}$ , where $x,y,z$ is the 3D coordinate in the global coordinate system and $v$ is the visual feature. + +# 3.4. Scene Change Matching + +After exploration of the walkthrough stage and the unshuffle stage, we acquire the comprehensive diff-cloud that encompasses changes in all areas of the scene. Note that in the visual rearrangement task settings, objects cannot disappear into thin air, they are simply moved from one place to another. Therefore, to recover the scene configuration, we need to match changes across various locations in the scene. Since the diff-cloud contains only some points in space, we first extract the entity-layer information from it and then perform matching operations on the entity-level. + +We apply the density-based clustering algorithm DBSCAN [15] separately to the two parts of the diff-cloud, resulting in two sets of entities, a moved entity set + +$\Omega^m = \{\omega_1^m,\omega_2^m,\dots ,\omega_k^m\}$ and an protruding entity set $\Omega^p = \{\omega_1^p,\omega_2^p,\dots ,\omega_l^p\}$ . Each entity in these two sets is a collection of some points in the diff-cloud: $\omega = \{(x_{1},y_{1},z_{1},v_{1}),(x_{2},y_{2},z_{2},v_{2}),\ldots ,(x_{n},y_{n},z_{n},v_{n})\}$ . + +As shown in Fig. 2(c), the scene change matching process can be regarded as the weighted bipartite graph matching between $\Omega^m$ and $\Omega^p$ . We construct a bipartite graph $G = (\Omega^m \cup \Omega^p, E)$ , where $\Omega^m \cup \Omega^p$ is the node set and $E$ represents the all fully connected edge set. Every edge $e \in E$ has one end node in $\Omega^m$ and the other end node in $\Omega^p$ . The function $\phi$ assigns a positive weight value to each edge. A matching $M$ is a subset of $E$ such that each node in $\Omega^m \cup \Omega^p$ appears in at most one edge in $M$ . Our goal is to find the maximum matching: + +$$ +M ^ {*} = \operatorname * {a r g m a x} _ {M} \sum_ {e \in M} \phi (e), +$$ + +where $e = e(\omega_i^m,\omega_j^p)$ represents the edge matching node $\omega_{i}^{m}$ and $\omega_{j}^{p}$ + +The role of the weight function $\phi(e)$ is to determine the possibility that $\omega_i^m$ and $\omega_j^p$ belong to the same instance. Based on this, we design the weight function to calculate the similarity in appearance of these two nodes. The appearance of each entity $\omega$ is considered from two aspects: geometric feature geo and visual feature vis. For vis, we use the average of the visual features of all the points in this entity. In terms of geo, we train a geometric feature extractor, which builds upon PointNet++[32] and embeds the raw point cloud data. The weight function $\phi$ is specifically defined as + +$$ +\phi \big (e \left(\omega_ {i} ^ {m}, \omega_ {j} ^ {p}\right) \big) = C o s i m \left(g e o _ {i} ^ {m}, g e o _ {j} ^ {p}\right) + C o s i m \left(v i s _ {i} ^ {m}, v i s _ {j} ^ {p}\right), +$$ + +where Cosim refers to the cosine similarity. + +Then the Kuhn-Munkres algorithm [24] is adopted to solve this maximum matching problem. Once entities are matched, we acquire entity pairs $\{(\omega_1^m,\omega_{j_1}^p),(\omega_2^m,\omega_{j_2}^p),\ldots ,(\omega_t^m,\omega_{j_t}^p)\}$ as rearrangement goals. Each entity pair $(\omega_i^m,\omega_{j_i}^p)$ represents the two different states of the same instance, where $\omega_{i}^{m}$ denotes the goal state and $\omega_{j_i}^p$ denotes the current state of the instance. Subsequently, for the inferred rearrangement goals, we transport them to their goal states in succession, during which, we leverage the 2D obstacle map and Dijkstra algorithm [13] to conduct obstacle avoidance and navigation path planning. + +# 4. Experiments + +Rearrangement task remains a practical challenge for embodied agents that assist humans in real life, whose goal is to bring a given physical environment into the goal state with a goal specification [2]. Each pixel is classified into a category of change[36]. + +Table 1. Comparison on RoomR dataset + +
MethodSuc (%)↑FS (%)↑E↓Mis ↓
TIDEE11.728.90.7150.734
MaSS4.716.51.0161.018
Our14.233.10.7140.707
+ +"Suc": Success; "FS": Fixed Strict; "E": Energy Remain; "Mis": Misplaced. + +Table 2. Comparison on our MrMiR dataset + +
MethodSuc (%)↑FS (%)↑E↓Mis ↓
TIDEE1.014.10.9170.924
MaSS0.610.51.0191.026
Our5.028.70.73270.7134
+ +"Suc": Success; "FS": Fixed Strict; "E": Energy Remain; "Mis": Misplaced. + +# 4.1. Experiment Setup + +Dataset We evaluate our method on the AI2THOR Rearrangement Challenge based on the RoomR dataset[45], which consists of 80 rooms and 4000 tasks for training, and 20 rooms with 1000 tasks each for both validation and test. Each task in RoomR involves 1 to 5 objects with state changes, characterized by object locations or openness. + +In RoomR[45] dataset, the spatial range of object changes is limited due to the confined area with single-room scenes and the target objects to be rearranged are mainly category-wise, i.e., most categories only have one instance. To cater for the prevalent characteristics of indoor environments in reality, we build a more practical and challenging dataset MrMiR for the two-stage rearrangement task on the ProcTHOR simulator[12], where the change in the state of an object can involve a broader spatial range, even extending across different rooms. Besides, there exists multiple instances within the same category that have different appearance. The simulator ProcTHOR[12] respectively provides 10,000 training, 1000 valid and 1000 test apartments. For our task need, we totally select 6000 apartments in the simulator, splitting 5000 apartments for training, 500 apartments for validation, and 500 apartments for test. Each apartment contains multiple instances within the same category that have different appearance. For each apartment, we randomly generate one rearrangement task. Therefore, our MrMiR dataset totally contains 6000 rearrangement tasks, the same as RoomR. Fig. 3 illustrates the comparison of scene area distribution between our MrMiR dataset and RoomR dataset. It can be seen that our dataset encompasses a diverse range of scene area, while RoomR mainly focusing on small rooms under $100\mathrm{m}^2$ . + +To train the geometric feature extractor based on PointNet++[32], which embeds point cloud, we generate a dataset using AI2THOR[23]. We collect 77K sample pairs, of which 70K are used for training and 7K for testing. Each sample pair is composed of two point clouds, which may either represent the same instance (the positive pair) or different instances (the negative pair). The distribution of the positive and negative pairs is balanced, with a 1:1 ratio. Within each room of AI2THOR, we generate positive sample pairs by applying different transformation operations to the point cloud of the same object. We also perform transformation operations on the point clouds of two different objects to generate negative sample pairs. For the transformation operations, we consider random rotation, adding random noise, and randomly deleting $20\%$ of the original point cloud data. + +Metrics To evaluate an agent's performance, we consider several metrics as follows: (1) Success. The success metric is a binary indicator of each task, which is strictly defined as whether the whole objects' states have been restored to their goal states. (2) Fixed Strict. This metric records the proportion of successfully fixed objects per task. If there are any newly misplaced objects at the end of a task, this metric will be set as 0. (3) Misplaced. This metric is denoted as the number of misplaced objects after the unshuffle stage divided by the number of misplaced objects at the start of the unsuffle stage. (4) Energy Remaining (E). The above metrics are quite strict, which is not possible to measure the distance to task completion. The energy is used to represent the difference between two possible states of an object, which can be functioned as $D: S \times S \Rightarrow [0,1]$ . The larger the energy value, the greater the difference between the two states, whereas if the two states are approximately equal, the energy value is 0. Therefore, this metric can be computed as the sum of all objects' energy after the unshuffle stage, divided by the sum of all objects' energy at the beginning of the unshuffle stage. + +Implementation details The distance threshold $\epsilon_{d}$ is set to $1.5\mathrm{m}$ , which is determined through hyper-parameter tuning, as detailed in Sec. 4.4. To ensure a fair comparison, we limit the maximum step number for both the exploration and rearrangement stages. In RoomR dataset[45], the exploration step limit is set to 300 and the navigation step limit for each object's rearrangement is set to 50. In our MrMiR dataset, we categorize the apartments by area into five levels: $< 10\mathrm{m}^2$ , $10 - 60\mathrm{m}^2$ , $60 - 150\mathrm{m}^2$ , $150 - 300\mathrm{m}^2$ , $>300\mathrm{m}^2$ . Correspondingly, the exploration step limits are set to 50, 200, 300, 500 and 800 and the navigation step limits for each object's rearrangement are set to 50, 80, 100, 200, 300. When we train the geometric feature extractor, we use Adam as our optimizer and the hyper-parameters $(lr,\beta_1,\beta_2,\epsilon)$ are set to $(0.001,0.9,0.999,1\mathrm{e} - 8)$ . The parameters and models are tuned only on the RoomR dataset + +![](images/d4c18e530969ea7ba8d0b7fee367d8bf7fccb542eb8852b903f1d4cc70683665.jpg) +Figure 3. Comparison of scene area distribution between MrMiR and RoomR[45] datasets. + +and are directly tested on the MrMiR dataset. + +# 4.2. Comparisons with Related Works + +We report the quantitative comparisons on the RoomR dataset in Table 1 and the MrMiR dataset in Table 2 with the two state-of-the-art modular methods MaSS[41] and TIDEE[38]. + +MaSS [41] employs a Gaussian mixture model to train a semantic search strategy, aiming to guide the agent towards regions where the likelihood of object occurrence is higher. During the exploration process, the 3D voxel semantic map is constructed, which is then used to match and identify objects that need to be rearranged. + +TIDEE [38] employs a coverage-based exploration policy to extract the spatial relationships between objects. After the exploration of two stages, the relationship changes are used to identify the rearrangement goals. + +Given that the original work of TIDEE is based on category-level (i.e., only records the category information of objects, and for multiple instances under the same category, only chooses one as the target), it cannot be directly applicable to our MrMiR dataset. To be fair, we make modifications to TIDEE by extracting all spatial relationships between instances when testing on the MrMiR dataset. + +As shown in Table 1, our proposed method CAVR outperforms the related works in all metrics. Specifically, it improves the success rate by $2.5\%$ and the proportion of successfully fixed objects by $5.38\%$ . Beyond the primary improvements, the decrease in energy and misplaced metrics suggests that our CAVR method could rearrange the environment closer to the goal configuration, even without fully completing the task. As shown in Table 2, the disparity between the related works and our CAVR method has further increased, fully demonstrating the superiority of our method in dealing with more complex and challenging environment. + +Table 3. Ablation Study + +
Visual ExplorationScene Change MatchingSuccess (%)↑FixedStrict (%)↑E↓Misplaced ↓
coverage13.131.00.7220.717
MaSS's8.725.80.7630.754
uniform11.324.60.8180.807
visual14.032.60.7240.720
geometric14.232.30.7230.717
14.233.10.7140.707
+ +“√” represents utilizing our proposed corresponding modules (closer-aligned-retrace exploration policy and scene change matching based on similarity of appearance including visual feature and geometric feature introduced in Sec. 3); “E”: Energy Remaining. + +![](images/93c766c9dfdecea3fff6c54193df27b7ed4e58ba90e68c43885c4d45f889d290.jpg) +Figure 4. Rearrangement performance relative to distance threshold $\epsilon_{d}$ . The blue lines represent the average metrics across the tasks of validation set of RoomR[45], with the shaded area representing the $68\%$ confidence interval. Higher values of Success and %FixedStrict indicate superior performance, whereas lower EnergyRemaining and %Misplaced indicate better results. + +![](images/45ca38cc6e3eb03e7b0171879b3b040a00bde5abe31f9af4c036a4e8a34e40ef.jpg) + +![](images/f90728106f2e49f787f5d606a0cbe7e34e97e606d06968d43178eb1eac61ede1.jpg) + +![](images/b85f8f4a863c972d24acb24515355f9eeb599822145d46b83387421dc550afd5.jpg) + +# 4.3. Ablation Study + +Considering the complexity of visual rearrangement task, we conduct ablation studies on RoomR dataset [45] to further investigate the importance of different modules within the overall task. In the ablation studies, we keep the diffcloud as the representation of scene changes. + +Ablation on the visual exploration module We replace our closer-aligned-retrace exploration policy with: a) Coverage-based exploration policy This strategy randomly selects target points from unexplored areas, which are used in TIDEE [38]. b) MaSS's semantic policy This ablation directly adopts the semantic policy proposed in [41], which trains a network to search the object distribution. + +Ablation on the scene change matching module The process of scene change matching can be abstracted as a maximum weight matching problem in bipartite graph. We substitute the weights of edges with: a) Uniform weights This ablation set all edge weights to the same value regardless of the objects' appearance, which leads to a random matching. b) Similarity of visual feature This ablation only utilize the similarity of visual feature as the weight. c) Similarity of geometric feature This ablation only use the similarity of geometric feature as the weight. + +The experimental results are presented in Table 3. In the + +ablation study on the visual exploration module, the model with MaSS's exploration policy perform worst due to the substantial variation in objects distribution within rooms, making it challenging to model them effectively with a uniform network. The model with coverage-based policy also underperform as it is likely to overlook minor changes when the observation distance is considerable. In the ablation study on the scene change matching module, removing any part of the appearance feature clearly decreases the performance in all metrics, which illustrates the noticeable impact of our extracted appearance feature on the visual rearrangement task. + +# 4.4. Hyper-parameter Tuning + +We conduct experiments on the validation set of RoomR[45] to determine the distance threshold in the optimization criteria for our closer-aligned-retrace exploration policy. A very small threshold value means visiting nearly every grid space on the map, while a large threshold value ignores the underlying concern of non-ambiguous scene change detection. The exploration happens in the unshuffle stage as well and our exploration policy leads the agent to try its best to replicate the previous trajectory. Therefore the threshold value determines the trade-off between optimality in terms of the agent traversal for exploration and a + +![](images/7149785c55bd1885dd7bea58448324f51893f61179ef7663851dd92c6d18cd79.jpg) +(a) + +![](images/adc897f0f598b5a8bb756f5d60318946a76e4cea141b8c8bec3d8ec551f46c54.jpg) + +![](images/a3ca2b936f2a943a404682ffc09a2039627478f63645225030f3d7e571aeef64.jpg) + +![](images/32bd738717ba6245ba55d90d0a596038d86b182a6c3a2fac58395a0bb9c67392.jpg) + +![](images/2b986476f189378ddf65c842c834717dea5348edb3917e689ddc4be186cf1037.jpg) + +![](images/1feea33d1266b6a38b1b4a7399bb33a5050d3a58a4c59e0f911b4c05f989b7a5.jpg) +(b) + +![](images/609d8de081d1ff07f78a683bd68426856d88c5ba3a48c303aa5ae73f9506fa18.jpg) + +![](images/905de26f5b4e979d979697de92735960e763e7523578e028756b0cce0204ffd2.jpg) + +![](images/f6dc3cc380e7ad7a9920a73ab658606957d8acf47dc6e70761942af6e851f305.jpg) + +![](images/38ae046bc51695003b76ee14f83d191f54dd59fc7d8daef874d5aefc127e2e6d.jpg) + +![](images/3072faf8aa403bb33acae04c23293120ea0ec29efd0ee520a6358a1df1e52e21.jpg) +(c) + +![](images/f6f52c9fb5660bf092baad4d53b3d1e76e32e77b17846a92bd1843d32e56c6a6.jpg) +Figure 5. Visualization of optimization process of observation distance map and construction of diff-cloud (a) In the walkthrough stage, objects begin in the positions indicated by the dashed blue bounding boxes. Observation distance map is positioned at the top right corner of each image. The color transitioning from blue to red represents the distances ranging from $0\mathrm{m}$ to $5\mathrm{m}$ . (b) In the unshuffle stage, objects are moved to the locations indicated by the solid red box. (c) The diff-cloud is gradually built up, including the moved part (blue points) and the protruding part (red points). + +![](images/afb6c381cd2f5eb5f626adbbb9058e8592df17a1995d2efb0f05a8698483c1e3.jpg) + +![](images/7fdfcd37d4774f6eddae9031b1466958d031873a45a25bcb35d4754dbf10c20c.jpg) + +![](images/2b6d53dc284f29ff6d02a8b6b72dd6bf3b0bf6af88038db82bd1cd80e86d7192.jpg) + +non-ambiguous scene change detection. + +As shown in the Fig.4, we set observation distance thresholds from $1\mathrm{m}$ to $7\mathrm{m}$ and compute the average metrics of 1000 tasks. Optimal performance on the validation set is achieved with a distance threshold at $1.5\mathrm{m}$ , which is the threshold consistently applied in the other experiments throughout this paper. In this experiment, error bars are calculated based on a $68\%$ confidence interval. + +# 4.5. Visualization + +We visualize and analyze the optimization of the observation distance map during the walkthrough stage and the construction of the diff-cloud in the unshuffle stage, as shown in Fig. 5. As the exploration progresses, the distance map increasingly exhibit hues of blue, which indicates that our exploration policy enables the agent to observe the scene details up close. In the unshuffle stage, as the diff-cloud is gradually built up, we develop a distinct understanding of the changes occurring throughout the scene. After matching these changes according to the similarity of their appearance, we can carry out the rearrangement execution procedurally. + +# 5. Conclusion + +We propose a category agnostic model for visual rearrangement task in this paper. Our method is composed of a closer-aligned-retrace exploration policy, a scene change detection module based on point cloud and a scene change matching module utilizing the similarity of appearance feature, each specifically designed to recover the scene configuration regardless of any category labels. To validate the proposed method, we conduct experiments on the RoomR dataset and a more practical dataset MrMiR collected by us, where multiple instances distribute across multiple rooms. Experimental results on these two datasets demonstrate that our method is able to perform the visual rearrangement task effectively without any category information. + +Acknowledgements: This work was supported by the National Natural Science Foundation of China under Grant 62125207, 62272443, 62032022 and U23B2012, in part by Beijing Natural Science Foundation under Grant JQ22012, Z190020. + +# References + +[1] Pablo F Alcantarilla, Simon Stent, German Ros, Roberto Arroyo, and Riccardo Gherardi. Street-view change detection with deconvolutional networks. Autonomous Robots, 42: 1301-1322, 2018. 3 +[2] Dhruv Batra, Angel X. Chang, Sonia Chernova, Andrew J. Davison, Jia Deng, Vladlen Koltun, Sergey Levine, Jitendra Malik, Igor Mordatch, Roozbeh Mottaghi, Manolis Savva, and Hao Su. Rearrangement: A challenge for embodied ai, 2020. 1, 2, 3, 5 +[3] Edward Beeching, Jilles Dibangoye, Olivier Simonin, and Christian Wolf. Learning to plan with uncertain topological maps. In European Conference on Computer Vision, pages 473-490. Springer, 2020. 2 +[4] Shuhui Bu, Qing Li, Pengcheng Han, Pengyu Leng, and Ke Li. Mask-cdnet: A mask based pixel change detection network. Neurocomputing, 378:166-178, 2020. 3 +[5] Yuri Burda, Harri Edwards, Deepak Pathak, Amos Storkey, Trevor Darrell, and Alexei A. Efros. Large-scale study of curiosity-driven learning. In ICLR, 2019. 2 +[6] Devendra Singh Chaplot, Dhiraj Gandhi, Saurabh Gupta, Abhinav Gupta, and Ruslan Salakhutdinov. Learning to explore using active neural slam. In International Conference on Learning Representations (ICLR), 2020. 2 +[7] Devendra Singh Chaplot, Helen Jiang, Saurabh Gupta, and Abhinav Gupta. Semantic curiosity for active visual learning. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part VI 16, pages 309–326. Springer, 2020. 2 +[8] Devendra Singh Chaplot, Murtaza Dalal, Saurabh Gupta, Jitendra Malik, and Russ R Salakhutdinov. Seal: Self-supervised embodied active learning using exploration and 3d consistency. Advances in neural information processing systems, 34:13086-13098, 2021. 3 +[9] Chao-Peng Chen, Jun-Wei Hsieh, Ping-Yang Chen, Yi-Kuan Hsieh, and Bor-Shiun Wang. Saras-net: scale and relation aware siamese network for change detection. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 14187-14195, 2023. 2, 3 +[10] Shuo Chen, Kailun Yang, and Rainer Stiefelhagen. Drtanet: Dynamic receptive temporal attention network for street scene change detection. In 2021 IEEE Intelligent Vehicles Symposium (IV), pages 502-509. IEEE, 2021. 3 +[11] Tao Chen, Saurabh Gupta, and Abhinav Gupta. Learning exploration policies for navigation. In International Conference on Learning Representations, 2019. 2 +[12] Matt Deitke, Eli VanderBilt, Alvaro Herrasti, Luca Weihs, Jordi Salvador, Kiana Ehsani, Winson Han, Eric Kolve, Ali Farhadi, Aniruddha Kembhavi, et al. Procthor: Large-scale embodied ai using procedural generation. arXiv preprint arXiv:2206.06994, 2022. 2, 5 +[13] Edsger W Dijkstra. A note on two problems in connexion with graphs. Numerische mathematik, 1(1):269-271, 1959. 3, 5 +[14] H. Durrant-Whyte and T. Bailey. Simultaneous localization and mapping: part i. IEEE Robotics & Automation Magazine, 13(2):99-110, 2006. 2 + +[15] Martin Ester, Hans-Peter Kriegel, Jörg Sander, Xiaowei Xu, et al. A density-based algorithm for discovering clusters in large spatial databases with noise. In kdd, pages 226-231, 1996. 4 +[16] Kuan Fang, Alexander Toshev, Li Fei-Fei, and Silvio Savarese. Scene memory transformer for embodied agents in long-horizon tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[17] Kuan Fang, Alexander Toshev, Li Fei-Fei, and Silvio Savarese. Scene memory transformer for embodied agents in long-horizon tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 538–547, 2019. 2 +[18] Samir Yitzhak Gadre, Kiana Ehsani, Shuran Song, and Roozbeh Mottaghi. Continuous scene representations for embodied ai. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14849-14859, 2022. 1, 2 +[19] Enqiang Guo, Xinsha Fu, Jiawei Zhu, Min Deng, Yu Liu, Qing Zhu, and Haifeng Li. Learning to measure change: Fully convolutional siamese metric networks for scene change detection. arXiv preprint arXiv:1810.09111, 2018.3 +[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 4 +[21] Dinesh Jayaraman and Kristen Grauman. Learning to look around: Intelligently exploring unseen environments for unknown tasks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2 +[22] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 2 +[23] Eric Kolve, Roozbeh Mottaghi, Winson Han, Eli VanderBilt, Luca Weihs, Alvaro Herrasti, Matt Deitke, Kiana Ehsani, Daniel Gordon, Yuke Zhu, et al. Ai2-thor: An interactive 3d environment for visual ai. arXiv preprint arXiv:1712.05474, 2017.6 +[24] Harold W Kuhn. The hungarian method for the assignment problem. Naval research logistics quarterly, 2(1-2):83-97, 1955. 5 +[25] Weijie Li, Xinhang Song, Yubing Bai, Sixian Zhang, and Shuqiang Jiang. ION: instance-level object navigation. In MM '21: ACM Multimedia Conference, Virtual Event, China, October 20 - 24, 2021, pages 4343-4352. ACM, 2021. 2 +[26] Haotong Lin, Qianqian Wang, Ruojin Cai, Sida Peng, Hadar Averbuch-Elor, Xiaowei Zhou, and Noah Snively. Neural scene chronology. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20752-20761, 2023. 2, 3 +[27] Kevin Matzen and Noah Snavely. Scene chronology. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pages 615-630. Springer, 2014. 2, 3 + +[28] So Yeon Min, Devendra Singh Chaplot, Pradeep Ravikumar, Yonatan Bisk, and Ruslan Salakhutdinov. Film: Following instructions in language with modular methods. arXiv preprint arXiv:2110.07342, 2021. 3 +[29] Medhini Narasimhan, Erik Wijmans, Xinlei Chen, Trevor Darrell, Dhruv Batra, Devi Parikh, and Amanpreet Singh. Seeing the un-scene: Learning amodal semantic maps for room navigation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVIII 16, pages 513-529. Springer, 2020. 2 +[30] Deepak Pathak, Pulkit Agrawal, Alexei A Efros, and Trevor Darrell. Curiosity-driven exploration by self-supervised prediction. In International conference on machine learning, pages 2778-2787. PMLR, 2017. 2 +[31] Deepak Pathak, Dhiraj Gandhi, and Abhinav Gupta. Self-supervised exploration via disagreement. In International conference on machine learning, pages 5062-5071. PMLR, 2019. 2 +[32] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 5, 6 +[33] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2 +[34] Santhosh K Ramakrishnan, Ziad Al-Halah, and Kristen Grauman. Occupancy anticipation for efficient exploration and navigation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part V 16, pages 400-418. Springer, 2020. 2 +[35] Santhosh K Ramakrishnan, Dinesh Jayaraman, and Kristen Grauman. An exploration of embodied visual exploration. International Journal of Computer Vision, 129:1616-1649, 2021. 2 +[36] Vijaya Raghavan T Ramkumar, Elahe Arani, and Bahram Zonooz. Differencing based self-supervised pretraining for scene change detection. In Conference on Lifelong Learning Agents, pages 952-965. PMLR, 2022. 2, 3, 5 +[37] Ken Sakurada, Mikiya Shibuya, and Weimin Wang. Weakly supervised silhouette-based semantic scene change detection. In 2020 IEEE International conference on robotics and automation (ICRA), pages 6861-6867. IEEE, 2020. 2, 3 +[38] Gabriel Sarch, Zhaoyuan Fang, Adam W Harley, Paul Schydlo, Michael J Tarr, Saurabh Gupta, and Katerina Fragkiadaki. Tidee: Tidying up novel rooms using visuo-semantic commonsense priors. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIX, pages 480-496. Springer, 2022. 1, 2, 3, 6, 7 +[39] Nikolay Savinov, Anton Raichuk, Raphaël Marinier, Damien Vincent, Marc Pollefeys, Timothy Lillicrap, and Sylvain Gelly. Episodic curiosity through reachability. In International Conference on Learning Representations (ICLR), 2019. 2 + +[40] Grant Schindler and Frank Dellaert. Probabilistic temporal inference on reconstructed 3d scenes. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1410-1417. IEEE, 2010. 2, 3 +[41] Brandon Trabucco, Gunnar Sigurdsson, Robinson Piramuthu, Gaurav S Sukhatme, and Ruslan Salakhutdinov. A simple approach for visual rearrangement: 3d mapping and semantic search. arXiv preprint arXiv:2206.13396, 2022. 1, 2, 3, 6, 7 +[42] Guo-Hua Wang, Bin-Bin Gao, and Chengjie Wang. How to reduce change detection to semantic segmentation. Pattern Recognition, 138:109384, 2023. 2, 3 +[43] Xiaohan Wang, Yuehu Liu, Xinhang Song, Beibei Wang, and Shuqiang Jiang. Generating explanations for embodied action decision from visual observation. In Proceedings of the 31st ACM International Conference on Multimedia, pages 2838-2846, 2023. 2 +[44] Xiaohan Wang, Yuehu Liu, Xinhang Song, Beibei Wang, and Shuqiang Jiang. Camp: Causal multi-policy planning for interactive navigation in multi-room scenes. Advances in Neural Information Processing Systems, 36, 2024. 2 +[45] Luca Weihs, Matt Deitke, Aniruddha Kembhavi, and Roozbeh Mottaghi. Visual room rearrangement. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5922-5931, 2021. 1, 2, 3, 5, 6, 7 +[46] Haitao Zeng, Xinhang Song, and Shuqiang Jiang. Multi-object navigation using potential target position policy function. IEEE Transactions on Image Processing, 2023. 2 +[47] Sixian Zhang, Weijie Li, Xinhang Song, Yubing Bai, and Shuqiang Jiang. Generative meta-adversarial network for unseen object navigation. In Computer Vision - ECCV 2022 - 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIX, pages 301-320. +[48] Sixian Zhang, Xinhang Song, Yubing Bai, Weijie Li, Yakui Chu, and Shuqiang Jiang. Hierarchical object-to-zone graph for object navigation. In 2021 IEEE/CVF International Conference on Computer Vision, ICCV 2021, Montreal, QC, Canada, October 10-17, 2021, pages 15110-15120. IEEE, 2021. +[49] Sixian Zhang, Xinhang Song, Weijie Li, Yubing Bai, Xinyao Yu, and Shuqiang Jiang. Layout-based causal inference for object navigation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10792-10802, 2023. 2 \ No newline at end of file diff --git a/2024/A Category Agnostic Model for Visual Rearrangment/images.zip b/2024/A Category Agnostic Model for Visual Rearrangment/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..aec6221c8501906f16d578e3ed72e337c7ad67c4 --- /dev/null +++ b/2024/A Category Agnostic Model for Visual Rearrangment/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ace2b14b45c2be884900b966ed92139f31dac25273744ad6f10b1ab8be0add99 +size 407618 diff --git a/2024/A Category Agnostic Model for Visual Rearrangment/layout.json b/2024/A Category Agnostic Model for Visual Rearrangment/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..c8126d90b81d55348fee096fcc9b7a1e34f94c3c --- /dev/null +++ b/2024/A Category Agnostic Model for Visual Rearrangment/layout.json @@ -0,0 +1,8431 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 131, + 103, + 463, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 103, + 463, + 121 + ], + "spans": [ + { + "bbox": [ + 131, + 103, + 463, + 121 + ], + "type": "text", + "content": "A Category Agnostic Model for Visual Rearrangement" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 108, + 142, + 485, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 142, + 485, + 159 + ], + "spans": [ + { + "bbox": [ + 108, + 142, + 485, + 159 + ], + "type": "text", + "content": "Yuyi Liu" + }, + { + "bbox": [ + 108, + 142, + 485, + 159 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 108, + 142, + 485, + 159 + ], + "type": "text", + "content": ", Xinhang Song" + }, + { + "bbox": [ + 108, + 142, + 485, + 159 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 108, + 142, + 485, + 159 + ], + "type": "text", + "content": ", Weijie Li" + }, + { + "bbox": [ + 108, + 142, + 485, + 159 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 108, + 142, + 485, + 159 + ], + "type": "text", + "content": ", Xiaohan Wang" + }, + { + "bbox": [ + 108, + 142, + 485, + 159 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 108, + 142, + 485, + 159 + ], + "type": "text", + "content": ", Shuqiang Jiang" + }, + { + "bbox": [ + 108, + 142, + 485, + 159 + ], + "type": "inline_equation", + "content": "^{1,2}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 108, + 159, + 485, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 159, + 485, + 213 + ], + "spans": [ + { + "bbox": [ + 108, + 159, + 485, + 213 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 108, + 159, + 485, + 213 + ], + "type": "text", + "content": "Key Lab of Intelligent Information Processing Laboratory of the Chinese Academy of Sciences (CAS), Institute of Computing Technology, Beijing " + }, + { + "bbox": [ + 108, + 159, + 485, + 213 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 108, + 159, + 485, + 213 + ], + "type": "text", + "content": "University of Chinese Academy of Sciences, Beijing {yuyi.liu, xinhang song, weijie.li, xiaohan.wang}@vipl.ict.ac.cn sqjiang@ict.ac.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 229, + 192, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 229, + 192, + 241 + ], + "spans": [ + { + "bbox": [ + 143, + 229, + 192, + 241 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 256, + 290, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 256, + 290, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 290, + 568 + ], + "type": "text", + "content": "This paper presents a novel category agnostic model for visual rearrangement task, which can help an embodied agent to physically recover the shuffled scene configuration without any category concepts to the goal configuration. Previous methods usually follow a similar architecture, completing the rearrangement task by aligning the scene changes of the goal and shuffled configuration, according to the semantic scene graphs. However, constructing scene graphs requires the inference of category labels, which not only causes the accuracy drop of the entire task but also limits the application in real world scenario. In this paper, we delve deep into the essence of visual rearrangement task and focus on the two most essential issues, scene change detection and scene change matching. We utilize the movement and the protrusion of point cloud to accurately identify the scene changes and match these changes depending on the similarity of category agnostic appearance feature. Moreover, to assist the agent to explore the environment more efficiently and comprehensively, we propose a closer-aligned-retrace exploration policy, aiming to observe more details of the scene at a closer distance. We conduct extensive experiments on AI2THOR Rearrangement Challenge based on RoomR dataset and a new multi-room multi-instance dataset MrMiR collected by us. The experimental results demonstrate the effectiveness of our proposed method." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 595, + 128, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 595, + 128, + 608 + ], + "spans": [ + { + "bbox": [ + 47, + 595, + 128, + 608 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "content": "Rearrangement task remains a practical challenge for embodied agents that assist humans in real life, whose goal is to bring a given physical environment into the goal state with a goal specification [2]. In this paper, we focus on a branch of the general rearrangement task based on ExperienceGoal, i.e., visual rearrangement task[45], which requires an agent to recover the scene configuration after it was shuffled randomly. Due to the excessive complexity of" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 308, + 228, + 545, + 373 + ], + "blocks": [ + { + "bbox": [ + 308, + 228, + 545, + 373 + ], + "lines": [ + { + "bbox": [ + 308, + 228, + 545, + 373 + ], + "spans": [ + { + "bbox": [ + 308, + 228, + 545, + 373 + ], + "type": "image", + "image_path": "c58d68c513c2feb8e0e7df93515cd0b68fe93a15b8ae7b9077124b7472caffd2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 380, + 547, + 426 + ], + "lines": [ + { + "bbox": [ + 305, + 380, + 547, + 426 + ], + "spans": [ + { + "bbox": [ + 305, + 380, + 547, + 426 + ], + "type": "text", + "content": "Figure 1. Influence of different scene change representations on scene change detection sensitivity and scene change matching simplicity. To strike a balance between these two issues, we select point cloud as our scene change representation." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 449, + 546, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 546, + 579 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 546, + 579 + ], + "type": "text", + "content": "the state space, the end-to-end deep reinforcement learning methods previously used for navigation struggle to cover this task, resulting in performance only marginally above chance [18, 45]. Recent works demonstrate that the modular methods, such as MaSS [41] and TIDEE [38], effectively reduce the complexity of the rearrangement task by dividing the task into several modules. These methods use a pre-trained detector to assign category labels to each object and infer the rearrangement goals through matching the semantic scene graphs of both the goal and shuffled configuration." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "type": "text", + "content": "However, the introduction of category information may not be that necessary as the essential goal of visual rearrangement task is equal to \"make it like what it was before\". Even without the category labels, we can still perform the task by memorizing the appearance characteristics and the state information of objects in the scene. Besides, due to the limited accuracy of the detector, the transition from visual input to category information will inevitably lead to errors, which can accumulate and propagate to subsequent modules, thereby causing the accuracy drop of the entire task. Previous works achieve large gains with ground-truth se" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "16457" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 192 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 192 + ], + "type": "text", + "content": "mantic segmentation[18, 41]. Moreover, there are inherent limitations of the methodologies based on category information. Once the detector is trained, these methods are restricted to a fixed set of categories and powerless against the object categories not previously observed in training environment. Using zero-shot methods, such as SAM [22] combined with CLIP [33], can considerably expand the known categories, but they are still within a limited set. It is impractical to retrain the model every time a new object category emerges due to the extensive resources required." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 194, + 289, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 194, + 289, + 337 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 289, + 337 + ], + "type": "text", + "content": "To address the above problems, our motivation is to identify all scene changes in the room and restore them, regardless of any category. Previous methods use semantic labels for SCM because these labels provide a high-level representation of objects and make SCM straightforward. However, the scene changes can be represented in numerous ways, ranging from pixel to point cloud, and up to label combined with positional information. As shown in Fig. 1, there is an inherent trade-off: while simpler representations minimize information loss during conversion and enhance the sensitivity of SCD, they simultaneously complicate the process of SCM." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 339, + 289, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 339, + 289, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 339, + 289, + 460 + ], + "type": "text", + "content": "Point cloud can serve as an appropriate representation of scene change, as it captures rich geometric, positional, and scale information of objects and remains robust against varied observation angles and obstructions from other objects. Leveraging point cloud facilitates efficient SCD and also provides richer appearance information for SCM. However, due to the inherent unordered nature and rotational invariance of point cloud, it is difficult to match the point cloud directly. We need to extract high-dimensional appearance features from point cloud for SCM." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 460, + 289, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 460, + 289, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 460, + 289, + 700 + ], + "type": "text", + "content": "Based on these observations, we propose a category ag- nostic model for visual rearrangement task called CAVR, to the best of our knowledge, this is the first attempt for visual rearrangement without category inferring. By utilizing point cloud as the scene change representation, CAVR can recover the scene configuration to its goal state without any category concepts. In CAVR, we introduce a closeraligned-retrace exploration policy to help agent conduct exploration effectively for SCD. Meanwhile, we maintain a diff-cloud, which consists of two components, one for the point cloud moved and another for the point cloud protruding in the shuffled scene configuration, compared to the goal configuration. The diff-cloud precisely captures the variations occurring throughout the scene. After exploration, we utilize the pre-trained appearance feature extractor to embed the diff-cloud and then match the scene changes across various locations based on the similarity of appearance feature, resulting in a series of rearrangement goals. Then we use a planning-based policy to restore them to their goal states in succession." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 701, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 288, + 714 + ], + "type": "text", + "content": "We conduct experiments on AI2THOR Rearrangement" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 547, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 168 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 168 + ], + "type": "text", + "content": "Challenge based on the RoomR dataset[45] and shows improvements on both the success rate and the portion of successfully fixed objects. To cater to more practical demands, we introduce a multi-room multi-instance rearrangement dataset MrMiR based on ProcTHOR simulator[12]. The experimental results on MrMiR dataset fully demonstrate the effectiveness of our method in the complex multi-room environment." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 178, + 398, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 178, + 398, + 192 + ], + "spans": [ + { + "bbox": [ + 306, + 178, + 398, + 192 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 198, + 547, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 198, + 547, + 415 + ], + "spans": [ + { + "bbox": [ + 304, + 198, + 547, + 415 + ], + "type": "text", + "content": "Rearrangement The general rearrangement problem [2] aims to transform the environment from an initial state to a goal state through interaction. We focus on an instantiation of the rearrangement problem[45], in which the goal state is specified by immersing the agent in the goal environment and allowing the agent to explore autonomously. Prior works can be classified into two categories, end-to-end reinforcement learning and modular methods. The end-to-end methods [18, 45] perform poorly mainly due to the large action space and complex stages in the task. Comparatively, the modular methods [38, 41] have shown surprising progress in improving the success rate. In detail, Mass[41] proposes a semantic policy with a voxel-based semantic map to find and match the changed objects. TIDEE[38] utilizes the spatial relationships between objects to determine the changed objects. Motivated by prior works, we also propose a modular method, while our model can perform the task without any category information." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 415, + 547, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 547, + 521 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 547, + 521 + ], + "type": "text", + "content": "Visual exploration Visual exploration refers to the process in which an agent collects information about the 3D environment through motion and perception [14, 29, 30, 35]. For visual exploration, efficiency is of utmost significance, involving how to access a broader range of regions [3, 6, 17, 39], observe more objects [16] and obtain a larger volume of environmental information relevant to downstream tasks (such as navigation) [25, 43, 44, 46-49] within a certain budget." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 522, + 547, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 547, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 547, + 617 + ], + "type": "text", + "content": "To improve the efficiency of exploration, several methods have employed ideas like curiosity [5, 7, 30, 31], coverage [6, 11] and reconstruction [21, 34]. Most related to ours is the coverage-based works, which try to maximize the area seen in the environment [6, 11]. In our exploration policy, both the area explored and the observation distance are considered simultaneously to accurately observe more details of the scene." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 618, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 618, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 618, + 547, + 715 + ], + "type": "text", + "content": "Scene Change Detection Scene change detection (SCD) refers to the task of identifying and localizing changes of a scene captured at different times[9, 26, 27, 36, 37, 40, 42]. Depending on the types of scene representation, methods are classified into two categories, respectively, 2D domain and 3D domain[37]. The first one devises specific neural networks to process the image pair taken at different times and generate a pixel-level prediction, namely, each pixel is" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "16458" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "type": "text", + "content": "classified into a category of change[1, 4, 9, 10, 19, 36, 37, 42]. Some studies focus on the scene change detection in 3D domain. They aim to reconstruct a time-varying 3D model from images taken from multiple viewpoints at different times and represent the temporal scene changes over several decades[26, 27, 40]. In our task, we not only focus on identifying changes in the 3D environment, but also emphasize the importance of matching changes across various locations, which is crucial for enabling the agent to accurately recover the scene configuration." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 201, + 104, + 215 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 201, + 104, + 215 + ], + "spans": [ + { + "bbox": [ + 47, + 201, + 104, + 215 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 221, + 289, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 221, + 289, + 426 + ], + "spans": [ + { + "bbox": [ + 46, + 221, + 289, + 426 + ], + "type": "text", + "content": "Given the intrinsic complexity of visual rearrangement task, in this section, we present a modular approach to tackle the task, decomposing it into manageable subtasks including visual exploration, scene change detection and scene change matching. Our pipeline is illustrated in Fig. 2. We start this section by giving the definition of visual rearrangement task in Sec. 3.1. Then we describe the three modules separately. The visual exploration module (Sec. 3.2) requires the agent to explore the environment efficiently and comprehensively while retaining memory of the environment. Subsequently, the scene change detection module (Sec. 3.3) utilizes the agent's memory of the goal environment and compares it with the current environment to identify all scene changes. Then to recover the goal configuration, the scene change matching module (Sec. 3.4) is proposed to correlate these changes across different areas within the scene and infer the rearrangement goals." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 432, + 201, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 432, + 201, + 445 + ], + "spans": [ + { + "bbox": [ + 47, + 432, + 201, + 445 + ], + "type": "text", + "content": "3.1. Visual Rearrangement Task" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "text", + "content": "According to the commonly accepted norms in the community[2], the rearrangement task is defined in a general form, where an agent is initialized in a starting state " + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "inline_equation", + "content": "s^0" + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "text", + "content": " and required to transform the environment from " + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "inline_equation", + "content": "s^0" + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "text", + "content": " to the goal state " + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "inline_equation", + "content": "s^* \\in S^*" + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "text", + "content": " with the possible actions " + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "inline_equation", + "content": "a \\in A" + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "text", + "content": ". The environment state space is denoted as the Cartesian product of the pose spaces of all rigid parts: " + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "inline_equation", + "content": "S = (R^3 \\times SO3) \\times (R^3 \\times SO3) \\ldots \\times (R^3 \\times SO3)" + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "inline_equation", + "content": "R^3" + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "inline_equation", + "content": "SO3" + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "text", + "content": " represent the 3D locations and rotations space. Follow the Partially Observable Markov Decision Processes (POMDP), the agent typically has no access to any state space and must operate purely based on the sensory observations " + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "inline_equation", + "content": "o \\in O" + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "text", + "content": " and the given goal specification " + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "inline_equation", + "content": "g = \\phi(s^0, S^*)" + }, + { + "bbox": [ + 46, + 450, + 289, + 641 + ], + "type": "text", + "content": ". Based on different goal specification forms (GeometricGoal, ImageGoal, LanguageGoal, ExperienceGoal, et al.), the general rearrangement task has various levels of difficulty." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "content": "We consider an instance of rearrangement task proposed by Weihs et al.[45], which adopts the ExperienceGoal as the goal specification " + }, + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "content": " and is defined as a two-stage task, including the walkthrough and unshuffle stages. During the walkthrough stage, the agent is immersed in a room of goal state " + }, + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "inline_equation", + "content": "s^*" + }, + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "content": " and allowed to explore autonomously. Sequentially," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 121 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 121 + ], + "type": "text", + "content": "the walkthrough environment is shuffled and some objects' states are changed, denoted as the unshuffle stage, where the agent officially starts the rearrangement task and reorganizes the shuffled scene configuration back." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 127, + 417, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 127, + 417, + 140 + ], + "spans": [ + { + "bbox": [ + 305, + 127, + 417, + 140 + ], + "type": "text", + "content": "3.2. Visual Exploration" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 145, + 545, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 145, + 545, + 289 + ], + "spans": [ + { + "bbox": [ + 304, + 145, + 545, + 289 + ], + "type": "text", + "content": "Under the two-stage rearrangement task, the initial exploration of the target environment is critical for the subsequent stages, since the agent is expected to acquire more object information in the fewest number of steps. Previous works adopt coverage-based exploration [38] or a search policy based on the expert distribution of objects [41]. However, there are usually many small-sized objects distributed across the scene, which can be easily overlooked or obscured by large entities when observed from a distance. Therefore, we propose a closer-aligned-retrace exploration policy, aiming to observe more objects at a closer distance to improve the observation accuracy and completeness." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 289, + 545, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 289, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 304, + 289, + 545, + 348 + ], + "type": "text", + "content": "The core idea of the proposed policy is to build an observation distance map " + }, + { + "bbox": [ + 304, + 289, + 545, + 348 + ], + "type": "inline_equation", + "content": "m_{o} \\in \\mathbb{R}^{H \\times W}" + }, + { + "bbox": [ + 304, + 289, + 545, + 348 + ], + "type": "text", + "content": ", where each grid denotes the minimum distance at which the current coordinate point is observed by the agent. Through the optimization of " + }, + { + "bbox": [ + 304, + 289, + 545, + 348 + ], + "type": "inline_equation", + "content": "m_{o}" + }, + { + "bbox": [ + 304, + 289, + 545, + 348 + ], + "type": "text", + "content": ", the agent can be guided to observe objects closer." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "spans": [ + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "text", + "content": "In exploration, at each timestep, the agent obtains visual observation RGBD and updates its own pose. Following previous work [8, 28, 41], we also build a 2D obstacle map " + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "inline_equation", + "content": "m_{t} \\in \\mathbb{R}^{H \\times W}" + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "text", + "content": " with the proposed observation distance map " + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "inline_equation", + "content": "m_{o}" + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "text", + "content": ". At the beginning of the exploration, due to the limited range of movement, the observation distance map " + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "inline_equation", + "content": "m_{o}" + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "text", + "content": " predominantly consists of high distance values. Therefore, the visual exploration policy " + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "text", + "content": " can be represented by optimizing a function " + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "inline_equation", + "content": "m_{o}" + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "text", + "content": " and a distance thresh " + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "inline_equation", + "content": "\\epsilon_{d}" + }, + { + "bbox": [ + 304, + 349, + 545, + 456 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 388, + 464, + 463, + 477 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 464, + 463, + 477 + ], + "spans": [ + { + "bbox": [ + 388, + 464, + 463, + 477 + ], + "type": "interline_equation", + "content": "\\pi (a) = f \\left(m _ {o}, \\epsilon_ {d}\\right)", + "image_path": "3779df1d719ff61c056a607d3709ef023401d1a9c2585ca664a05be749458ab1.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 485, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 485, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 485, + 545, + 628 + ], + "type": "text", + "content": "The goal of optimization is to minimize the observation distance map (i.e., " + }, + { + "bbox": [ + 304, + 485, + 545, + 628 + ], + "type": "inline_equation", + "content": "\\min(m_o) \\leq \\epsilon_d" + }, + { + "bbox": [ + 304, + 485, + 545, + 628 + ], + "type": "text", + "content": "). We employ an analytical approach to obtain the solution. Specifically, based on the current observation distance map " + }, + { + "bbox": [ + 304, + 485, + 545, + 628 + ], + "type": "inline_equation", + "content": "m_o" + }, + { + "bbox": [ + 304, + 485, + 545, + 628 + ], + "type": "text", + "content": ", we select a waypoint as the next exploration goal and apply the route planning Dijkstra algorithm [13] to generate a path on the obstacle map " + }, + { + "bbox": [ + 304, + 485, + 545, + 628 + ], + "type": "inline_equation", + "content": "m_t" + }, + { + "bbox": [ + 304, + 485, + 545, + 628 + ], + "type": "text", + "content": ". As to the waypoint selection, we prioritize selecting those with higher distance values on the distance map, aiming to observe objects closer. To better compare the shuffled and goal state of the scene for rearrangement goals inference, in the unshuffle stage, the agent tries its best to replicate the trajectory of the walkthrough stage." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 635, + 441, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 635, + 441, + 647 + ], + "spans": [ + { + "bbox": [ + 305, + 635, + 441, + 647 + ], + "type": "text", + "content": "3.3. Scene Change Detection" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "text", + "content": "Detecting changes within the scene is a critical capability for an agent to perform rearrangement tasks. We maintain a diff-cloud to represent the scene changes. As shown in Fig. 2 (b), the diff-cloud consists of two parts. The red and blue points respectively represent the moved and protruding" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "16459" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 73, + 545, + 324 + ], + "blocks": [ + { + "bbox": [ + 50, + 73, + 545, + 324 + ], + "lines": [ + { + "bbox": [ + 50, + 73, + 545, + 324 + ], + "spans": [ + { + "bbox": [ + 50, + 73, + 545, + 324 + ], + "type": "image", + "image_path": "8f9314053a3dc254d64dfd5ec5e18e3612266664ae3466209d2a713feaf4dbee.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 340, + 548, + 418 + ], + "lines": [ + { + "bbox": [ + 46, + 340, + 548, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 340, + 548, + 418 + ], + "type": "text", + "content": "Figure 2. Pipeline of our CAVR model. (a) The gradient color transitioning from blue to red in the observation distance map represents the distances ranging from " + }, + { + "bbox": [ + 46, + 340, + 548, + 418 + ], + "type": "inline_equation", + "content": "0\\mathrm{m}" + }, + { + "bbox": [ + 46, + 340, + 548, + 418 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 340, + 548, + 418 + ], + "type": "inline_equation", + "content": "5\\mathrm{m}" + }, + { + "bbox": [ + 46, + 340, + 548, + 418 + ], + "type": "text", + "content": ". We adopt a closer-aligned-retrace exploration policy to observe more details by optimizing a function of the distance map. (b) Scene change detection is performed by comparing the point clouds corresponding to the goal configuration and the shuffled configuration of the scene, recording the moved part (blue points) and the protruding part (red points) to construct the diff-cloud. (c) We extract the entity-layer information from the two parts of the diff-cloud and match these entities depending on the similarity of category agnostic appearance feature. (d) After the matching process, we obtain a series of rearrangement goals with their goal states (indicated by the dashed bounding boxes) and shuffled states (indicated by the solid bounding boxes)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 438, + 288, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 438, + 288, + 475 + ], + "spans": [ + { + "bbox": [ + 46, + 438, + 288, + 475 + ], + "type": "text", + "content": "point clouds in the shuffled configuration, compared to the goal configuration. Next, we explain how to construct the diff-cloud using visual inputs from the two stages." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "text", + "content": "During the walkthrough stage, at each pose " + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "inline_equation", + "content": "p^w" + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "text", + "content": " of the agent, we employ the depth information " + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "inline_equation", + "content": "D_{p^w}" + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "text", + "content": " to generate an egocentric point cloud " + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "inline_equation", + "content": "c_{p^w}^{ego}" + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "text", + "content": ". Each point in " + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "inline_equation", + "content": "c_{p^w}^{ego}" + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "text", + "content": " is associated with a pixel in depth " + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "inline_equation", + "content": "D_{p^w}" + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "text", + "content": ". Then we convert " + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "inline_equation", + "content": "c_{p^w}^{ego}" + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "text", + "content": " from the agent's coordinate system to global coordinate system, resulting in a geocentric point cloud " + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "inline_equation", + "content": "c_{p^w}^{geo}" + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "text", + "content": ". For the observed RGB image " + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "inline_equation", + "content": "I_{p^w}" + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "text", + "content": ", we adopt the pre-trained resnet18 model [20] provided by the official PyTorch to extract a visual feature map " + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "inline_equation", + "content": "f_{p^w}" + }, + { + "bbox": [ + 46, + 480, + 287, + 589 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": "During the unshuffle stage, we use the same method to generate the geocentric point cloud " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "c_{p^u}^{geo}" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " and the feature map " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "f_{p^u}" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " for each pose " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "p^u" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "p^u" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " aligns with a previous pose " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "p^w" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " in the walkthrough stage, we compare the two corresponding point clouds, " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "c_{p^w}^{geo}" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "c_{p^u}^{geo}" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": ". A considerable shift between two point coordinates associated with the same pixel indicates the changes have occurred in this location. Specifically, the increase in distance from the agent suggests removal of some objects, while the decrease signifies objects addition. Based on the distance variations, these" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 438, + 547, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 438, + 547, + 522 + ], + "spans": [ + { + "bbox": [ + 305, + 438, + 547, + 522 + ], + "type": "text", + "content": "points are allocated to the moved part and the protruding part of the diff-cloud, respectively. Moreover, for the area implying scene changes, we extract the corresponding visual feature from the feature map and assign it to each point in that region. Each point in the diff-cloud is represented as " + }, + { + "bbox": [ + 305, + 438, + 547, + 522 + ], + "type": "inline_equation", + "content": "\\{x,y,z,v\\}" + }, + { + "bbox": [ + 305, + 438, + 547, + 522 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 305, + 438, + 547, + 522 + ], + "type": "inline_equation", + "content": "x,y,z" + }, + { + "bbox": [ + 305, + 438, + 547, + 522 + ], + "type": "text", + "content": " is the 3D coordinate in the global coordinate system and " + }, + { + "bbox": [ + 305, + 438, + 547, + 522 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 305, + 438, + 547, + 522 + ], + "type": "text", + "content": " is the visual feature." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 536, + 443, + 549 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 536, + 443, + 549 + ], + "spans": [ + { + "bbox": [ + 306, + 536, + 443, + 549 + ], + "type": "text", + "content": "3.4. Scene Change Matching" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 555, + 545, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 555, + 545, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 555, + 545, + 676 + ], + "type": "text", + "content": "After exploration of the walkthrough stage and the unshuffle stage, we acquire the comprehensive diff-cloud that encompasses changes in all areas of the scene. Note that in the visual rearrangement task settings, objects cannot disappear into thin air, they are simply moved from one place to another. Therefore, to recover the scene configuration, we need to match changes across various locations in the scene. Since the diff-cloud contains only some points in space, we first extract the entity-layer information from it and then perform matching operations on the entity-level." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "type": "text", + "content": "We apply the density-based clustering algorithm DBSCAN [15] separately to the two parts of the diff-cloud, resulting in two sets of entities, a moved entity set" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "16460" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\Omega^m = \\{\\omega_1^m,\\omega_2^m,\\dots ,\\omega_k^m\\}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " and an protruding entity set " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\Omega^p = \\{\\omega_1^p,\\omega_2^p,\\dots ,\\omega_l^p\\}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": ". Each entity in these two sets is a collection of some points in the diff-cloud: " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\omega = \\{(x_{1},y_{1},z_{1},v_{1}),(x_{2},y_{2},z_{2},v_{2}),\\ldots ,(x_{n},y_{n},z_{n},v_{n})\\}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "spans": [ + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": "As shown in Fig. 2(c), the scene change matching process can be regarded as the weighted bipartite graph matching between " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "\\Omega^m" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "\\Omega^p" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": ". We construct a bipartite graph " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "G = (\\Omega^m \\cup \\Omega^p, E)" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "\\Omega^m \\cup \\Omega^p" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": " is the node set and " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": " represents the all fully connected edge set. Every edge " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "e \\in E" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": " has one end node in " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "\\Omega^m" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": " and the other end node in " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "\\Omega^p" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": ". The function " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": " assigns a positive weight value to each edge. A matching " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": " is a subset of " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": " such that each node in " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "\\Omega^m \\cup \\Omega^p" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": " appears in at most one edge in " + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 121, + 288, + 240 + ], + "type": "text", + "content": ". Our goal is to find the maximum matching:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 112, + 247, + 221, + 273 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 247, + 221, + 273 + ], + "spans": [ + { + "bbox": [ + 112, + 247, + 221, + 273 + ], + "type": "interline_equation", + "content": "M ^ {*} = \\operatorname * {a r g m a x} _ {M} \\sum_ {e \\in M} \\phi (e),", + "image_path": "af09ef43128a2168e20b9e614e0509c21a9b5560643bf0aa049c988bbe10d806.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 281, + 287, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 281, + 287, + 307 + ], + "spans": [ + { + "bbox": [ + 46, + 281, + 287, + 307 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 281, + 287, + 307 + ], + "type": "inline_equation", + "content": "e = e(\\omega_i^m,\\omega_j^p)" + }, + { + "bbox": [ + 46, + 281, + 287, + 307 + ], + "type": "text", + "content": " represents the edge matching node " + }, + { + "bbox": [ + 46, + 281, + 287, + 307 + ], + "type": "inline_equation", + "content": "\\omega_{i}^{m}" + }, + { + "bbox": [ + 46, + 281, + 287, + 307 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 281, + 287, + 307 + ], + "type": "inline_equation", + "content": "\\omega_{j}^{p}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "type": "text", + "content": "The role of the weight function " + }, + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "type": "inline_equation", + "content": "\\phi(e)" + }, + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "type": "text", + "content": " is to determine the possibility that " + }, + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "type": "inline_equation", + "content": "\\omega_i^m" + }, + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "type": "inline_equation", + "content": "\\omega_j^p" + }, + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "type": "text", + "content": " belong to the same instance. Based on this, we design the weight function to calculate the similarity in appearance of these two nodes. The appearance of each entity " + }, + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "type": "text", + "content": " is considered from two aspects: geometric feature geo and visual feature vis. For vis, we use the average of the visual features of all the points in this entity. In terms of geo, we train a geometric feature extractor, which builds upon PointNet++[32] and embeds the raw point cloud data. The weight function " + }, + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 46, + 306, + 287, + 437 + ], + "type": "text", + "content": " is specifically defined as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 445, + 293, + 461 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 445, + 293, + 461 + ], + "spans": [ + { + "bbox": [ + 47, + 445, + 293, + 461 + ], + "type": "interline_equation", + "content": "\\phi \\big (e \\left(\\omega_ {i} ^ {m}, \\omega_ {j} ^ {p}\\right) \\big) = C o s i m \\left(g e o _ {i} ^ {m}, g e o _ {j} ^ {p}\\right) + C o s i m \\left(v i s _ {i} ^ {m}, v i s _ {j} ^ {p}\\right),", + "image_path": "d405af52fe9c038672fa9d9611ad3ab73c1202e5ac6ad88279f871e9ddc59d6c.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 468, + 228, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 468, + 228, + 479 + ], + "spans": [ + { + "bbox": [ + 47, + 468, + 228, + 479 + ], + "type": "text", + "content": "where Cosim refers to the cosine similarity." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 479, + 287, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 479, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 46, + 479, + 287, + 624 + ], + "type": "text", + "content": "Then the Kuhn-Munkres algorithm [24] is adopted to solve this maximum matching problem. Once entities are matched, we acquire entity pairs " + }, + { + "bbox": [ + 46, + 479, + 287, + 624 + ], + "type": "inline_equation", + "content": "\\{(\\omega_1^m,\\omega_{j_1}^p),(\\omega_2^m,\\omega_{j_2}^p),\\ldots ,(\\omega_t^m,\\omega_{j_t}^p)\\}" + }, + { + "bbox": [ + 46, + 479, + 287, + 624 + ], + "type": "text", + "content": " as rearrangement goals. Each entity pair " + }, + { + "bbox": [ + 46, + 479, + 287, + 624 + ], + "type": "inline_equation", + "content": "(\\omega_i^m,\\omega_{j_i}^p)" + }, + { + "bbox": [ + 46, + 479, + 287, + 624 + ], + "type": "text", + "content": " represents the two different states of the same instance, where " + }, + { + "bbox": [ + 46, + 479, + 287, + 624 + ], + "type": "inline_equation", + "content": "\\omega_{i}^{m}" + }, + { + "bbox": [ + 46, + 479, + 287, + 624 + ], + "type": "text", + "content": " denotes the goal state and " + }, + { + "bbox": [ + 46, + 479, + 287, + 624 + ], + "type": "inline_equation", + "content": "\\omega_{j_i}^p" + }, + { + "bbox": [ + 46, + 479, + 287, + 624 + ], + "type": "text", + "content": " denotes the current state of the instance. Subsequently, for the inferred rearrangement goals, we transport them to their goal states in succession, during which, we leverage the 2D obstacle map and Dijkstra algorithm [13] to conduct obstacle avoidance and navigation path planning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 634, + 128, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 634, + 128, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 634, + 128, + 647 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 653, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 714 + ], + "type": "text", + "content": "Rearrangement task remains a practical challenge for embodied agents that assist humans in real life, whose goal is to bring a given physical environment into the goal state with a goal specification [2]. Each pixel is classified into a category of change[36]." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 307, + 91, + 547, + 152 + ], + "blocks": [ + { + "bbox": [ + 352, + 71, + 499, + 82 + ], + "lines": [ + { + "bbox": [ + 352, + 71, + 499, + 82 + ], + "spans": [ + { + "bbox": [ + 352, + 71, + 499, + 82 + ], + "type": "text", + "content": "Table 1. Comparison on RoomR dataset" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 91, + 547, + 152 + ], + "lines": [ + { + "bbox": [ + 307, + 91, + 547, + 152 + ], + "spans": [ + { + "bbox": [ + 307, + 91, + 547, + 152 + ], + "type": "table", + "html": "
MethodSuc (%)↑FS (%)↑E↓Mis ↓
TIDEE11.728.90.7150.734
MaSS4.716.51.0161.018
Our14.233.10.7140.707
", + "image_path": "a02fbf47f9b823e571f022d7775f6bc487000e61c6cc5b71cbb8c22ffc8e088d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 306, + 155, + 538, + 179 + ], + "lines": [ + { + "bbox": [ + 306, + 155, + 538, + 179 + ], + "spans": [ + { + "bbox": [ + 306, + 155, + 538, + 179 + ], + "type": "text", + "content": "\"Suc\": Success; \"FS\": Fixed Strict; \"E\": Energy Remain; \"Mis\": Misplaced." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 306, + 212, + 547, + 274 + ], + "blocks": [ + { + "bbox": [ + 345, + 192, + 506, + 203 + ], + "lines": [ + { + "bbox": [ + 345, + 192, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 345, + 192, + 506, + 203 + ], + "type": "text", + "content": "Table 2. Comparison on our MrMiR dataset" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 306, + 212, + 547, + 274 + ], + "lines": [ + { + "bbox": [ + 306, + 212, + 547, + 274 + ], + "spans": [ + { + "bbox": [ + 306, + 212, + 547, + 274 + ], + "type": "table", + "html": "
MethodSuc (%)↑FS (%)↑E↓Mis ↓
TIDEE1.014.10.9170.924
MaSS0.610.51.0191.026
Our5.028.70.73270.7134
", + "image_path": "6d67523976df72675ffd5e9ace7f19559a81963645191a5e449f6063511b9558.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 306, + 276, + 538, + 300 + ], + "lines": [ + { + "bbox": [ + 306, + 276, + 538, + 300 + ], + "spans": [ + { + "bbox": [ + 306, + 276, + 538, + 300 + ], + "type": "text", + "content": "\"Suc\": Success; \"FS\": Fixed Strict; \"E\": Energy Remain; \"Mis\": Misplaced." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 322, + 413, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 322, + 413, + 335 + ], + "spans": [ + { + "bbox": [ + 306, + 322, + 413, + 335 + ], + "type": "text", + "content": "4.1. Experiment Setup" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 341, + 545, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 341, + 545, + 413 + ], + "spans": [ + { + "bbox": [ + 304, + 341, + 545, + 413 + ], + "type": "text", + "content": "Dataset We evaluate our method on the AI2THOR Rearrangement Challenge based on the RoomR dataset[45], which consists of 80 rooms and 4000 tasks for training, and 20 rooms with 1000 tasks each for both validation and test. Each task in RoomR involves 1 to 5 objects with state changes, characterized by object locations or openness." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 415, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 546, + 713 + ], + "type": "text", + "content": "In RoomR[45] dataset, the spatial range of object changes is limited due to the confined area with single-room scenes and the target objects to be rearranged are mainly category-wise, i.e., most categories only have one instance. To cater for the prevalent characteristics of indoor environments in reality, we build a more practical and challenging dataset MrMiR for the two-stage rearrangement task on the ProcTHOR simulator[12], where the change in the state of an object can involve a broader spatial range, even extending across different rooms. Besides, there exists multiple instances within the same category that have different appearance. The simulator ProcTHOR[12] respectively provides 10,000 training, 1000 valid and 1000 test apartments. For our task need, we totally select 6000 apartments in the simulator, splitting 5000 apartments for training, 500 apartments for validation, and 500 apartments for test. Each apartment contains multiple instances within the same category that have different appearance. For each apartment, we randomly generate one rearrangement task. Therefore, our MrMiR dataset totally contains 6000 rearrangement tasks, the same as RoomR. Fig. 3 illustrates the comparison of scene area distribution between our MrMiR dataset and RoomR dataset. It can be seen that our dataset encompasses a diverse range of scene area, while RoomR mainly focusing on small rooms under " + }, + { + "bbox": [ + 304, + 415, + 546, + 713 + ], + "type": "inline_equation", + "content": "100\\mathrm{m}^2" + }, + { + "bbox": [ + 304, + 415, + 546, + 713 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "16461" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 263 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 263 + ], + "type": "text", + "content": "To train the geometric feature extractor based on PointNet++[32], which embeds point cloud, we generate a dataset using AI2THOR[23]. We collect 77K sample pairs, of which 70K are used for training and 7K for testing. Each sample pair is composed of two point clouds, which may either represent the same instance (the positive pair) or different instances (the negative pair). The distribution of the positive and negative pairs is balanced, with a 1:1 ratio. Within each room of AI2THOR, we generate positive sample pairs by applying different transformation operations to the point cloud of the same object. We also perform transformation operations on the point clouds of two different objects to generate negative sample pairs. For the transformation operations, we consider random rotation, adding random noise, and randomly deleting " + }, + { + "bbox": [ + 46, + 72, + 289, + 263 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 46, + 72, + 289, + 263 + ], + "type": "text", + "content": " of the original point cloud data." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 267, + 289, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 267, + 289, + 518 + ], + "spans": [ + { + "bbox": [ + 46, + 267, + 289, + 518 + ], + "type": "text", + "content": "Metrics To evaluate an agent's performance, we consider several metrics as follows: (1) Success. The success metric is a binary indicator of each task, which is strictly defined as whether the whole objects' states have been restored to their goal states. (2) Fixed Strict. This metric records the proportion of successfully fixed objects per task. If there are any newly misplaced objects at the end of a task, this metric will be set as 0. (3) Misplaced. This metric is denoted as the number of misplaced objects after the unshuffle stage divided by the number of misplaced objects at the start of the unsuffle stage. (4) Energy Remaining (E). The above metrics are quite strict, which is not possible to measure the distance to task completion. The energy is used to represent the difference between two possible states of an object, which can be functioned as " + }, + { + "bbox": [ + 46, + 267, + 289, + 518 + ], + "type": "inline_equation", + "content": "D: S \\times S \\Rightarrow [0,1]" + }, + { + "bbox": [ + 46, + 267, + 289, + 518 + ], + "type": "text", + "content": ". The larger the energy value, the greater the difference between the two states, whereas if the two states are approximately equal, the energy value is 0. Therefore, this metric can be computed as the sum of all objects' energy after the unshuffle stage, divided by the sum of all objects' energy at the beginning of the unshuffle stage." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "text", + "content": "Implementation details The distance threshold " + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\epsilon_{d}" + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "text", + "content": " is set to " + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "inline_equation", + "content": "1.5\\mathrm{m}" + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "text", + "content": ", which is determined through hyper-parameter tuning, as detailed in Sec. 4.4. To ensure a fair comparison, we limit the maximum step number for both the exploration and rearrangement stages. In RoomR dataset[45], the exploration step limit is set to 300 and the navigation step limit for each object's rearrangement is set to 50. In our MrMiR dataset, we categorize the apartments by area into five levels: " + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "inline_equation", + "content": "< 10\\mathrm{m}^2" + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "inline_equation", + "content": "10 - 60\\mathrm{m}^2" + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "inline_equation", + "content": "60 - 150\\mathrm{m}^2" + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "inline_equation", + "content": "150 - 300\\mathrm{m}^2" + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "inline_equation", + "content": ">300\\mathrm{m}^2" + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "text", + "content": ". Correspondingly, the exploration step limits are set to 50, 200, 300, 500 and 800 and the navigation step limits for each object's rearrangement are set to 50, 80, 100, 200, 300. When we train the geometric feature extractor, we use Adam as our optimizer and the hyper-parameters " + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "inline_equation", + "content": "(lr,\\beta_1,\\beta_2,\\epsilon)" + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "text", + "content": " are set to " + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "inline_equation", + "content": "(0.001,0.9,0.999,1\\mathrm{e} - 8)" + }, + { + "bbox": [ + 46, + 522, + 289, + 714 + ], + "type": "text", + "content": ". The parameters and models are tuned only on the RoomR dataset" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 331, + 71, + 499, + 196 + ], + "blocks": [ + { + "bbox": [ + 331, + 71, + 499, + 196 + ], + "lines": [ + { + "bbox": [ + 331, + 71, + 499, + 196 + ], + "spans": [ + { + "bbox": [ + 331, + 71, + 499, + 196 + ], + "type": "image", + "image_path": "d4c18e530969ea7ba8d0b7fee367d8bf7fccb542eb8852b903f1d4cc70683665.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 203, + 545, + 226 + ], + "lines": [ + { + "bbox": [ + 305, + 203, + 545, + 226 + ], + "spans": [ + { + "bbox": [ + 305, + 203, + 545, + 226 + ], + "type": "text", + "content": "Figure 3. Comparison of scene area distribution between MrMiR and RoomR[45] datasets." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 306, + 254, + 489, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 254, + 489, + 266 + ], + "spans": [ + { + "bbox": [ + 306, + 254, + 489, + 266 + ], + "type": "text", + "content": "and are directly tested on the MrMiR dataset." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 283, + 486, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 283, + 486, + 296 + ], + "spans": [ + { + "bbox": [ + 306, + 283, + 486, + 296 + ], + "type": "text", + "content": "4.2. Comparisons with Related Works" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 305, + 545, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 305, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 305, + 305, + 545, + 353 + ], + "type": "text", + "content": "We report the quantitative comparisons on the RoomR dataset in Table 1 and the MrMiR dataset in Table 2 with the two state-of-the-art modular methods MaSS[41] and TIDEE[38]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 357, + 545, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 357, + 545, + 429 + ], + "spans": [ + { + "bbox": [ + 305, + 357, + 545, + 429 + ], + "type": "text", + "content": "MaSS [41] employs a Gaussian mixture model to train a semantic search strategy, aiming to guide the agent towards regions where the likelihood of object occurrence is higher. During the exploration process, the 3D voxel semantic map is constructed, which is then used to match and identify objects that need to be rearranged." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 431, + 545, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 431, + 545, + 479 + ], + "spans": [ + { + "bbox": [ + 305, + 431, + 545, + 479 + ], + "type": "text", + "content": "TIDEE [38] employs a coverage-based exploration policy to extract the spatial relationships between objects. After the exploration of two stages, the relationship changes are used to identify the rearrangement goals." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 483, + 545, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 545, + 567 + ], + "type": "text", + "content": "Given that the original work of TIDEE is based on category-level (i.e., only records the category information of objects, and for multiple instances under the same category, only chooses one as the target), it cannot be directly applicable to our MrMiR dataset. To be fair, we make modifications to TIDEE by extracting all spatial relationships between instances when testing on the MrMiR dataset." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 570, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 712 + ], + "type": "text", + "content": "As shown in Table 1, our proposed method CAVR outperforms the related works in all metrics. Specifically, it improves the success rate by " + }, + { + "bbox": [ + 304, + 570, + 547, + 712 + ], + "type": "inline_equation", + "content": "2.5\\%" + }, + { + "bbox": [ + 304, + 570, + 547, + 712 + ], + "type": "text", + "content": " and the proportion of successfully fixed objects by " + }, + { + "bbox": [ + 304, + 570, + 547, + 712 + ], + "type": "inline_equation", + "content": "5.38\\%" + }, + { + "bbox": [ + 304, + 570, + 547, + 712 + ], + "type": "text", + "content": ". Beyond the primary improvements, the decrease in energy and misplaced metrics suggests that our CAVR method could rearrange the environment closer to the goal configuration, even without fully completing the task. As shown in Table 2, the disparity between the related works and our CAVR method has further increased, fully demonstrating the superiority of our method in dealing with more complex and challenging environment." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "16462" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 91, + 548, + 205 + ], + "blocks": [ + { + "bbox": [ + 252, + 71, + 342, + 82 + ], + "lines": [ + { + "bbox": [ + 252, + 71, + 342, + 82 + ], + "spans": [ + { + "bbox": [ + 252, + 71, + 342, + 82 + ], + "type": "text", + "content": "Table 3. Ablation Study" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 91, + 548, + 205 + ], + "lines": [ + { + "bbox": [ + 48, + 91, + 548, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 91, + 548, + 205 + ], + "type": "table", + "html": "
Visual ExplorationScene Change MatchingSuccess (%)↑FixedStrict (%)↑E↓Misplaced ↓
coverage13.131.00.7220.717
MaSS's8.725.80.7630.754
uniform11.324.60.8180.807
visual14.032.60.7240.720
geometric14.232.30.7230.717
14.233.10.7140.707
", + "image_path": "9d18ca77476744f81c67f3738229e54a11cc9b76bf222ba77490156193f9f44a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 61, + 206, + 547, + 231 + ], + "lines": [ + { + "bbox": [ + 61, + 206, + 547, + 231 + ], + "spans": [ + { + "bbox": [ + 61, + 206, + 547, + 231 + ], + "type": "text", + "content": "“√” represents utilizing our proposed corresponding modules (closer-aligned-retrace exploration policy and scene change matching based on similarity of appearance including visual feature and geometric feature introduced in Sec. 3); “E”: Energy Remaining." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 48, + 242, + 167, + 338 + ], + "blocks": [ + { + "bbox": [ + 48, + 242, + 167, + 338 + ], + "lines": [ + { + "bbox": [ + 48, + 242, + 167, + 338 + ], + "spans": [ + { + "bbox": [ + 48, + 242, + 167, + 338 + ], + "type": "image", + "image_path": "93c766c9dfdecea3fff6c54193df27b7ed4e58ba90e68c43885c4d45f889d290.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 350, + 548, + 384 + ], + "lines": [ + { + "bbox": [ + 46, + 350, + 548, + 384 + ], + "spans": [ + { + "bbox": [ + 46, + 350, + 548, + 384 + ], + "type": "text", + "content": "Figure 4. Rearrangement performance relative to distance threshold " + }, + { + "bbox": [ + 46, + 350, + 548, + 384 + ], + "type": "inline_equation", + "content": "\\epsilon_{d}" + }, + { + "bbox": [ + 46, + 350, + 548, + 384 + ], + "type": "text", + "content": ". The blue lines represent the average metrics across the tasks of validation set of RoomR[45], with the shaded area representing the " + }, + { + "bbox": [ + 46, + 350, + 548, + 384 + ], + "type": "inline_equation", + "content": "68\\%" + }, + { + "bbox": [ + 46, + 350, + 548, + 384 + ], + "type": "text", + "content": " confidence interval. Higher values of Success and %FixedStrict indicate superior performance, whereas lower EnergyRemaining and %Misplaced indicate better results." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 170, + 241, + 289, + 338 + ], + "blocks": [ + { + "bbox": [ + 170, + 241, + 289, + 338 + ], + "lines": [ + { + "bbox": [ + 170, + 241, + 289, + 338 + ], + "spans": [ + { + "bbox": [ + 170, + 241, + 289, + 338 + ], + "type": "image", + "image_path": "45ca38cc6e3eb03e7b0171879b3b040a00bde5abe31f9af4c036a4e8a34e40ef.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 294, + 241, + 419, + 338 + ], + "blocks": [ + { + "bbox": [ + 294, + 241, + 419, + 338 + ], + "lines": [ + { + "bbox": [ + 294, + 241, + 419, + 338 + ], + "spans": [ + { + "bbox": [ + 294, + 241, + 419, + 338 + ], + "type": "image", + "image_path": "f90728106f2e49f787f5d606a0cbe7e34e97e606d06968d43178eb1eac61ede1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 421, + 241, + 543, + 338 + ], + "blocks": [ + { + "bbox": [ + 421, + 241, + 543, + 338 + ], + "lines": [ + { + "bbox": [ + 421, + 241, + 543, + 338 + ], + "spans": [ + { + "bbox": [ + 421, + 241, + 543, + 338 + ], + "type": "image", + "image_path": "b85f8f4a863c972d24acb24515355f9eeb599822145d46b83387421dc550afd5.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 403, + 141, + 416 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 403, + 141, + 416 + ], + "spans": [ + { + "bbox": [ + 47, + 403, + 141, + 416 + ], + "type": "text", + "content": "4.3. Ablation Study" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 423, + 287, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 423, + 287, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 423, + 287, + 483 + ], + "type": "text", + "content": "Considering the complexity of visual rearrangement task, we conduct ablation studies on RoomR dataset [45] to further investigate the importance of different modules within the overall task. In the ablation studies, we keep the diffcloud as the representation of scene changes." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 484, + 287, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 484, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 46, + 484, + 287, + 579 + ], + "type": "text", + "content": "Ablation on the visual exploration module We replace our closer-aligned-retrace exploration policy with: a) Coverage-based exploration policy This strategy randomly selects target points from unexplored areas, which are used in TIDEE [38]. b) MaSS's semantic policy This ablation directly adopts the semantic policy proposed in [41], which trains a network to search the object distribution." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 581, + 288, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 581, + 288, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 581, + 288, + 700 + ], + "type": "text", + "content": "Ablation on the scene change matching module The process of scene change matching can be abstracted as a maximum weight matching problem in bipartite graph. We substitute the weights of edges with: a) Uniform weights This ablation set all edge weights to the same value regardless of the objects' appearance, which leads to a random matching. b) Similarity of visual feature This ablation only utilize the similarity of visual feature as the weight. c) Similarity of geometric feature This ablation only use the similarity of geometric feature as the weight." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 59, + 701, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 288, + 713 + ], + "type": "text", + "content": "The experimental results are presented in Table 3. In the" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 403, + 547, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 403, + 547, + 547 + ], + "spans": [ + { + "bbox": [ + 304, + 403, + 547, + 547 + ], + "type": "text", + "content": "ablation study on the visual exploration module, the model with MaSS's exploration policy perform worst due to the substantial variation in objects distribution within rooms, making it challenging to model them effectively with a uniform network. The model with coverage-based policy also underperform as it is likely to overlook minor changes when the observation distance is considerable. In the ablation study on the scene change matching module, removing any part of the appearance feature clearly decreases the performance in all metrics, which illustrates the noticeable impact of our extracted appearance feature on the visual rearrangement task." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 561, + 448, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 561, + 448, + 574 + ], + "spans": [ + { + "bbox": [ + 305, + 561, + 448, + 574 + ], + "type": "text", + "content": "4.4. Hyper-parameter Tuning" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "type": "text", + "content": "We conduct experiments on the validation set of RoomR[45] to determine the distance threshold in the optimization criteria for our closer-aligned-retrace exploration policy. A very small threshold value means visiting nearly every grid space on the map, while a large threshold value ignores the underlying concern of non-ambiguous scene change detection. The exploration happens in the unshuffle stage as well and our exploration policy leads the agent to try its best to replicate the previous trajectory. Therefore the threshold value determines the trade-off between optimality in terms of the agent traversal for exploration and a" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "16463" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 72, + 167, + 159 + ], + "blocks": [ + { + "bbox": [ + 52, + 110, + 67, + 123 + ], + "lines": [ + { + "bbox": [ + 52, + 110, + 67, + 123 + ], + "spans": [ + { + "bbox": [ + 52, + 110, + 67, + 123 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 78, + 72, + 167, + 159 + ], + "lines": [ + { + "bbox": [ + 78, + 72, + 167, + 159 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 167, + 159 + ], + "type": "image", + "image_path": "7149785c55bd1885dd7bea58448324f51893f61179ef7663851dd92c6d18cd79.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 171, + 72, + 260, + 159 + ], + "blocks": [ + { + "bbox": [ + 171, + 72, + 260, + 159 + ], + "lines": [ + { + "bbox": [ + 171, + 72, + 260, + 159 + ], + "spans": [ + { + "bbox": [ + 171, + 72, + 260, + 159 + ], + "type": "image", + "image_path": "adc897f0f598b5a8bb756f5d60318946a76e4cea141b8c8bec3d8ec551f46c54.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 265, + 72, + 351, + 159 + ], + "blocks": [ + { + "bbox": [ + 265, + 72, + 351, + 159 + ], + "lines": [ + { + "bbox": [ + 265, + 72, + 351, + 159 + ], + "spans": [ + { + "bbox": [ + 265, + 72, + 351, + 159 + ], + "type": "image", + "image_path": "a3ca2b936f2a943a404682ffc09a2039627478f63645225030f3d7e571aeef64.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 358, + 72, + 445, + 159 + ], + "blocks": [ + { + "bbox": [ + 358, + 72, + 445, + 159 + ], + "lines": [ + { + "bbox": [ + 358, + 72, + 445, + 159 + ], + "spans": [ + { + "bbox": [ + 358, + 72, + 445, + 159 + ], + "type": "image", + "image_path": "32bd738717ba6245ba55d90d0a596038d86b182a6c3a2fac58395a0bb9c67392.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 449, + 72, + 537, + 159 + ], + "blocks": [ + { + "bbox": [ + 449, + 72, + 537, + 159 + ], + "lines": [ + { + "bbox": [ + 449, + 72, + 537, + 159 + ], + "spans": [ + { + "bbox": [ + 449, + 72, + 537, + 159 + ], + "type": "image", + "image_path": "2b986476f189378ddf65c842c834717dea5348edb3917e689ddc4be186cf1037.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 78, + 174, + 167, + 261 + ], + "blocks": [ + { + "bbox": [ + 51, + 211, + 67, + 225 + ], + "lines": [ + { + "bbox": [ + 51, + 211, + 67, + 225 + ], + "spans": [ + { + "bbox": [ + 51, + 211, + 67, + 225 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 78, + 174, + 167, + 261 + ], + "lines": [ + { + "bbox": [ + 78, + 174, + 167, + 261 + ], + "spans": [ + { + "bbox": [ + 78, + 174, + 167, + 261 + ], + "type": "image", + "image_path": "1feea33d1266b6a38b1b4a7399bb33a5050d3a58a4c59e0f911b4c05f989b7a5.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 171, + 174, + 258, + 261 + ], + "blocks": [ + { + "bbox": [ + 171, + 174, + 258, + 261 + ], + "lines": [ + { + "bbox": [ + 171, + 174, + 258, + 261 + ], + "spans": [ + { + "bbox": [ + 171, + 174, + 258, + 261 + ], + "type": "image", + "image_path": "609d8de081d1ff07f78a683bd68426856d88c5ba3a48c303aa5ae73f9506fa18.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 264, + 174, + 351, + 261 + ], + "blocks": [ + { + "bbox": [ + 264, + 174, + 351, + 261 + ], + "lines": [ + { + "bbox": [ + 264, + 174, + 351, + 261 + ], + "spans": [ + { + "bbox": [ + 264, + 174, + 351, + 261 + ], + "type": "image", + "image_path": "905de26f5b4e979d979697de92735960e763e7523578e028756b0cce0204ffd2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 357, + 174, + 444, + 261 + ], + "blocks": [ + { + "bbox": [ + 357, + 174, + 444, + 261 + ], + "lines": [ + { + "bbox": [ + 357, + 174, + 444, + 261 + ], + "spans": [ + { + "bbox": [ + 357, + 174, + 444, + 261 + ], + "type": "image", + "image_path": "f6dc3cc380e7ad7a9920a73ab658606957d8acf47dc6e70761942af6e851f305.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 449, + 174, + 537, + 261 + ], + "blocks": [ + { + "bbox": [ + 449, + 174, + 537, + 261 + ], + "lines": [ + { + "bbox": [ + 449, + 174, + 537, + 261 + ], + "spans": [ + { + "bbox": [ + 449, + 174, + 537, + 261 + ], + "type": "image", + "image_path": "38ae046bc51695003b76ee14f83d191f54dd59fc7d8daef874d5aefc127e2e6d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 78, + 262, + 167, + 350 + ], + "blocks": [ + { + "bbox": [ + 52, + 300, + 67, + 314 + ], + "lines": [ + { + "bbox": [ + 52, + 300, + 67, + 314 + ], + "spans": [ + { + "bbox": [ + 52, + 300, + 67, + 314 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 78, + 262, + 167, + 350 + ], + "lines": [ + { + "bbox": [ + 78, + 262, + 167, + 350 + ], + "spans": [ + { + "bbox": [ + 78, + 262, + 167, + 350 + ], + "type": "image", + "image_path": "3072faf8aa403bb33acae04c23293120ea0ec29efd0ee520a6358a1df1e52e21.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 171, + 262, + 258, + 350 + ], + "blocks": [ + { + "bbox": [ + 171, + 262, + 258, + 350 + ], + "lines": [ + { + "bbox": [ + 171, + 262, + 258, + 350 + ], + "spans": [ + { + "bbox": [ + 171, + 262, + 258, + 350 + ], + "type": "image", + "image_path": "f6f52c9fb5660bf092baad4d53b3d1e76e32e77b17846a92bd1843d32e56c6a6.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 361, + 547, + 417 + ], + "lines": [ + { + "bbox": [ + 46, + 361, + 547, + 417 + ], + "spans": [ + { + "bbox": [ + 46, + 361, + 547, + 417 + ], + "type": "text", + "content": "Figure 5. Visualization of optimization process of observation distance map and construction of diff-cloud (a) In the walkthrough stage, objects begin in the positions indicated by the dashed blue bounding boxes. Observation distance map is positioned at the top right corner of each image. The color transitioning from blue to red represents the distances ranging from " + }, + { + "bbox": [ + 46, + 361, + 547, + 417 + ], + "type": "inline_equation", + "content": "0\\mathrm{m}" + }, + { + "bbox": [ + 46, + 361, + 547, + 417 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 361, + 547, + 417 + ], + "type": "inline_equation", + "content": "5\\mathrm{m}" + }, + { + "bbox": [ + 46, + 361, + 547, + 417 + ], + "type": "text", + "content": ". (b) In the unshuffle stage, objects are moved to the locations indicated by the solid red box. (c) The diff-cloud is gradually built up, including the moved part (blue points) and the protruding part (red points)." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 264, + 262, + 351, + 350 + ], + "blocks": [ + { + "bbox": [ + 264, + 262, + 351, + 350 + ], + "lines": [ + { + "bbox": [ + 264, + 262, + 351, + 350 + ], + "spans": [ + { + "bbox": [ + 264, + 262, + 351, + 350 + ], + "type": "image", + "image_path": "afb6c381cd2f5eb5f626adbbb9058e8592df17a1995d2efb0f05a8698483c1e3.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 357, + 262, + 444, + 350 + ], + "blocks": [ + { + "bbox": [ + 357, + 262, + 444, + 350 + ], + "lines": [ + { + "bbox": [ + 357, + 262, + 444, + 350 + ], + "spans": [ + { + "bbox": [ + 357, + 262, + 444, + 350 + ], + "type": "image", + "image_path": "7fdfcd37d4774f6eddae9031b1466958d031873a45a25bcb35d4754dbf10c20c.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 449, + 262, + 536, + 350 + ], + "blocks": [ + { + "bbox": [ + 449, + 262, + 536, + 350 + ], + "lines": [ + { + "bbox": [ + 449, + 262, + 536, + 350 + ], + "spans": [ + { + "bbox": [ + 449, + 262, + 536, + 350 + ], + "type": "image", + "image_path": "2b6d53dc284f29ff6d02a8b6b72dd6bf3b0bf6af88038db82bd1cd80e86d7192.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 437, + 209, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 437, + 209, + 449 + ], + "spans": [ + { + "bbox": [ + 47, + 437, + 209, + 449 + ], + "type": "text", + "content": "non-ambiguous scene change detection." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 46, + 451, + 287, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 451, + 287, + 535 + ], + "spans": [ + { + "bbox": [ + 46, + 451, + 287, + 535 + ], + "type": "text", + "content": "As shown in the Fig.4, we set observation distance thresholds from " + }, + { + "bbox": [ + 46, + 451, + 287, + 535 + ], + "type": "inline_equation", + "content": "1\\mathrm{m}" + }, + { + "bbox": [ + 46, + 451, + 287, + 535 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 451, + 287, + 535 + ], + "type": "inline_equation", + "content": "7\\mathrm{m}" + }, + { + "bbox": [ + 46, + 451, + 287, + 535 + ], + "type": "text", + "content": " and compute the average metrics of 1000 tasks. Optimal performance on the validation set is achieved with a distance threshold at " + }, + { + "bbox": [ + 46, + 451, + 287, + 535 + ], + "type": "inline_equation", + "content": "1.5\\mathrm{m}" + }, + { + "bbox": [ + 46, + 451, + 287, + 535 + ], + "type": "text", + "content": ", which is the threshold consistently applied in the other experiments throughout this paper. In this experiment, error bars are calculated based on a " + }, + { + "bbox": [ + 46, + 451, + 287, + 535 + ], + "type": "inline_equation", + "content": "68\\%" + }, + { + "bbox": [ + 46, + 451, + 287, + 535 + ], + "type": "text", + "content": " confidence interval." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 549, + 132, + 561 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 549, + 132, + 561 + ], + "spans": [ + { + "bbox": [ + 47, + 549, + 132, + 561 + ], + "type": "text", + "content": "4.5. Visualization" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": "We visualize and analyze the optimization of the observation distance map during the walkthrough stage and the construction of the diff-cloud in the unshuffle stage, as shown in Fig. 5. As the exploration progresses, the distance map increasingly exhibit hues of blue, which indicates that our exploration policy enables the agent to observe the scene details up close. In the unshuffle stage, as the diff-cloud is gradually built up, we develop a distinct understanding of the changes occurring throughout the scene. After matching these changes according to the similarity of their appearance, we can carry out the rearrangement execution procedurally." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 436, + 379, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 436, + 379, + 449 + ], + "spans": [ + { + "bbox": [ + 306, + 436, + 379, + 449 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 460, + 547, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 547, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 547, + 617 + ], + "type": "text", + "content": "We propose a category agnostic model for visual rearrangement task in this paper. Our method is composed of a closer-aligned-retrace exploration policy, a scene change detection module based on point cloud and a scene change matching module utilizing the similarity of appearance feature, each specifically designed to recover the scene configuration regardless of any category labels. To validate the proposed method, we conduct experiments on the RoomR dataset and a more practical dataset MrMiR collected by us, where multiple instances distribute across multiple rooms. Experimental results on these two datasets demonstrate that our method is able to perform the visual rearrangement task effectively without any category information." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 654, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 712 + ], + "type": "text", + "content": "Acknowledgements: This work was supported by the National Natural Science Foundation of China under Grant 62125207, 62272443, 62032022 and U23B2012, in part by Beijing Natural Science Foundation under Grant JQ22012, Z190020." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "16464" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "text", + "content": "[1] Pablo F Alcantarilla, Simon Stent, German Ros, Roberto Arroyo, and Riccardo Gherardi. Street-view change detection with deconvolutional networks. Autonomous Robots, 42: 1301-1322, 2018. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 135, + 288, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 288, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 288, + 190 + ], + "type": "text", + "content": "[2] Dhruv Batra, Angel X. Chang, Sonia Chernova, Andrew J. Davison, Jia Deng, Vladlen Koltun, Sergey Levine, Jitendra Malik, Igor Mordatch, Roozbeh Mottaghi, Manolis Savva, and Hao Su. Rearrangement: A challenge for embodied ai, 2020. 1, 2, 3, 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 191, + 288, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 191, + 288, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 288, + 235 + ], + "type": "text", + "content": "[3] Edward Beeching, Jilles Dibangoye, Olivier Simonin, and Christian Wolf. Learning to plan with uncertain topological maps. In European Conference on Computer Vision, pages 473-490. Springer, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 236, + 287, + 268 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 287, + 268 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 287, + 268 + ], + "type": "text", + "content": "[4] Shuhui Bu, Qing Li, Pengcheng Han, Pengyu Leng, and Ke Li. Mask-cdnet: A mask based pixel change detection network. Neurocomputing, 378:166-178, 2020. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 270, + 288, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 270, + 288, + 302 + ], + "spans": [ + { + "bbox": [ + 53, + 270, + 288, + 302 + ], + "type": "text", + "content": "[5] Yuri Burda, Harri Edwards, Deepak Pathak, Amos Storkey, Trevor Darrell, and Alexei A. Efros. Large-scale study of curiosity-driven learning. In ICLR, 2019. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 303, + 288, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 303, + 288, + 346 + ], + "spans": [ + { + "bbox": [ + 53, + 303, + 288, + 346 + ], + "type": "text", + "content": "[6] Devendra Singh Chaplot, Dhiraj Gandhi, Saurabh Gupta, Abhinav Gupta, and Ruslan Salakhutdinov. Learning to explore using active neural slam. In International Conference on Learning Representations (ICLR), 2020. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 346, + 287, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 346, + 287, + 402 + ], + "spans": [ + { + "bbox": [ + 53, + 346, + 287, + 402 + ], + "type": "text", + "content": "[7] Devendra Singh Chaplot, Helen Jiang, Saurabh Gupta, and Abhinav Gupta. Semantic curiosity for active visual learning. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part VI 16, pages 309–326. Springer, 2020. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 403, + 287, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 403, + 287, + 456 + ], + "spans": [ + { + "bbox": [ + 53, + 403, + 287, + 456 + ], + "type": "text", + "content": "[8] Devendra Singh Chaplot, Murtaza Dalal, Saurabh Gupta, Jitendra Malik, and Russ R Salakhutdinov. Seal: Self-supervised embodied active learning using exploration and 3d consistency. Advances in neural information processing systems, 34:13086-13098, 2021. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 458, + 287, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 458, + 287, + 512 + ], + "spans": [ + { + "bbox": [ + 53, + 458, + 287, + 512 + ], + "type": "text", + "content": "[9] Chao-Peng Chen, Jun-Wei Hsieh, Ping-Yang Chen, Yi-Kuan Hsieh, and Bor-Shiun Wang. Saras-net: scale and relation aware siamese network for change detection. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 14187-14195, 2023. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "type": "text", + "content": "[10] Shuo Chen, Kailun Yang, and Rainer Stiefelhagen. Drtanet: Dynamic receptive temporal attention network for street scene change detection. In 2021 IEEE Intelligent Vehicles Symposium (IV), pages 502-509. IEEE, 2021. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 558, + 287, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 287, + 590 + ], + "type": "text", + "content": "[11] Tao Chen, Saurabh Gupta, and Abhinav Gupta. Learning exploration policies for navigation. In International Conference on Learning Representations, 2019. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 591, + 287, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 591, + 287, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 591, + 287, + 646 + ], + "type": "text", + "content": "[12] Matt Deitke, Eli VanderBilt, Alvaro Herrasti, Luca Weihs, Jordi Salvador, Kiana Ehsani, Winson Han, Eric Kolve, Ali Farhadi, Aniruddha Kembhavi, et al. Procthor: Large-scale embodied ai using procedural generation. arXiv preprint arXiv:2206.06994, 2022. 2, 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 647, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 678 + ], + "type": "text", + "content": "[13] Edsger W Dijkstra. A note on two problems in connexion with graphs. Numerische mathematik, 1(1):269-271, 1959. 3, 5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "text", + "content": "[14] H. Durrant-Whyte and T. Bailey. Simultaneous localization and mapping: part i. IEEE Robotics & Automation Magazine, 13(2):99-110, 2006. 2" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[15] Martin Ester, Hans-Peter Kriegel, Jörg Sander, Xiaowei Xu, et al. A density-based algorithm for discovering clusters in large spatial databases with noise. In kdd, pages 226-231, 1996. 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 118, + 547, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 547, + 171 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 547, + 171 + ], + "type": "text", + "content": "[16] Kuan Fang, Alexander Toshev, Li Fei-Fei, and Silvio Savarese. Scene memory transformer for embodied agents in long-horizon tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 172, + 545, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 172, + 545, + 226 + ], + "spans": [ + { + "bbox": [ + 307, + 172, + 545, + 226 + ], + "type": "text", + "content": "[17] Kuan Fang, Alexander Toshev, Li Fei-Fei, and Silvio Savarese. Scene memory transformer for embodied agents in long-horizon tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 538–547, 2019. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 228, + 545, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 228, + 545, + 281 + ], + "spans": [ + { + "bbox": [ + 307, + 228, + 545, + 281 + ], + "type": "text", + "content": "[18] Samir Yitzhak Gadre, Kiana Ehsani, Shuran Song, and Roozbeh Mottaghi. Continuous scene representations for embodied ai. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14849-14859, 2022. 1, 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 283, + 545, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 283, + 545, + 336 + ], + "spans": [ + { + "bbox": [ + 307, + 283, + 545, + 336 + ], + "type": "text", + "content": "[19] Enqiang Guo, Xinsha Fu, Jiawei Zhu, Min Deng, Yu Liu, Qing Zhu, and Haifeng Li. Learning to measure change: Fully convolutional siamese metric networks for scene change detection. arXiv preprint arXiv:1810.09111, 2018.3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 338, + 545, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 338, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 307, + 338, + 545, + 381 + ], + "type": "text", + "content": "[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 383, + 545, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 383, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 307, + 383, + 545, + 426 + ], + "type": "text", + "content": "[21] Dinesh Jayaraman and Kristen Grauman. Learning to look around: Intelligently exploring unseen environments for unknown tasks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 426, + 545, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 426, + 545, + 470 + ], + "spans": [ + { + "bbox": [ + 307, + 426, + 545, + 470 + ], + "type": "text", + "content": "[22] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 471, + 545, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 471, + 545, + 524 + ], + "spans": [ + { + "bbox": [ + 307, + 471, + 545, + 524 + ], + "type": "text", + "content": "[23] Eric Kolve, Roozbeh Mottaghi, Winson Han, Eli VanderBilt, Luca Weihs, Alvaro Herrasti, Matt Deitke, Kiana Ehsani, Daniel Gordon, Yuke Zhu, et al. Ai2-thor: An interactive 3d environment for visual ai. arXiv preprint arXiv:1712.05474, 2017.6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 526, + 545, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 526, + 545, + 557 + ], + "spans": [ + { + "bbox": [ + 307, + 526, + 545, + 557 + ], + "type": "text", + "content": "[24] Harold W Kuhn. The hungarian method for the assignment problem. Naval research logistics quarterly, 2(1-2):83-97, 1955. 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 559, + 545, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 559, + 545, + 612 + ], + "spans": [ + { + "bbox": [ + 307, + 559, + 545, + 612 + ], + "type": "text", + "content": "[25] Weijie Li, Xinhang Song, Yubing Bai, Sixian Zhang, and Shuqiang Jiang. ION: instance-level object navigation. In MM '21: ACM Multimedia Conference, Virtual Event, China, October 20 - 24, 2021, pages 4343-4352. ACM, 2021. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 614, + 545, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 614, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 307, + 614, + 545, + 668 + ], + "type": "text", + "content": "[26] Haotong Lin, Qianqian Wang, Ruojin Cai, Sida Peng, Hadar Averbuch-Elor, Xiaowei Zhou, and Noah Snively. Neural scene chronology. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20752-20761, 2023. 2, 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 545, + 713 + ], + "type": "text", + "content": "[27] Kevin Matzen and Noah Snavely. Scene chronology. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pages 615-630. Springer, 2014. 2, 3" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "16465" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[28] So Yeon Min, Devendra Singh Chaplot, Pradeep Ravikumar, Yonatan Bisk, and Ruslan Salakhutdinov. Film: Following instructions in language with modular methods. arXiv preprint arXiv:2110.07342, 2021. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 288, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 288, + 192 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 288, + 192 + ], + "type": "text", + "content": "[29] Medhini Narasimhan, Erik Wijmans, Xinlei Chen, Trevor Darrell, Dhruv Batra, Devi Parikh, and Amanpreet Singh. Seeing the un-scene: Learning amodal semantic maps for room navigation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVIII 16, pages 513-529. Springer, 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 194, + 288, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 194, + 288, + 237 + ], + "spans": [ + { + "bbox": [ + 48, + 194, + 288, + 237 + ], + "type": "text", + "content": "[30] Deepak Pathak, Pulkit Agrawal, Alexei A Efros, and Trevor Darrell. Curiosity-driven exploration by self-supervised prediction. In International conference on machine learning, pages 2778-2787. PMLR, 2017. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 239, + 288, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 239, + 288, + 281 + ], + "spans": [ + { + "bbox": [ + 48, + 239, + 288, + 281 + ], + "type": "text", + "content": "[31] Deepak Pathak, Dhiraj Gandhi, and Abhinav Gupta. Self-supervised exploration via disagreement. In International conference on machine learning, pages 5062-5071. PMLR, 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 283, + 288, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 283, + 288, + 327 + ], + "spans": [ + { + "bbox": [ + 48, + 283, + 288, + 327 + ], + "type": "text", + "content": "[32] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 5, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 327, + 288, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 288, + 392 + ], + "type": "text", + "content": "[33] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 393, + 288, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 393, + 288, + 448 + ], + "spans": [ + { + "bbox": [ + 48, + 393, + 288, + 448 + ], + "type": "text", + "content": "[34] Santhosh K Ramakrishnan, Ziad Al-Halah, and Kristen Grauman. Occupancy anticipation for efficient exploration and navigation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part V 16, pages 400-418. Springer, 2020. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 449, + 288, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 449, + 288, + 491 + ], + "spans": [ + { + "bbox": [ + 48, + 449, + 288, + 491 + ], + "type": "text", + "content": "[35] Santhosh K Ramakrishnan, Dinesh Jayaraman, and Kristen Grauman. An exploration of embodied visual exploration. International Journal of Computer Vision, 129:1616-1649, 2021. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 493, + 288, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 493, + 288, + 536 + ], + "spans": [ + { + "bbox": [ + 48, + 493, + 288, + 536 + ], + "type": "text", + "content": "[36] Vijaya Raghavan T Ramkumar, Elahe Arani, and Bahram Zonooz. Differencing based self-supervised pretraining for scene change detection. In Conference on Lifelong Learning Agents, pages 952-965. PMLR, 2022. 2, 3, 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 537, + 288, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 537, + 288, + 581 + ], + "spans": [ + { + "bbox": [ + 48, + 537, + 288, + 581 + ], + "type": "text", + "content": "[37] Ken Sakurada, Mikiya Shibuya, and Weimin Wang. Weakly supervised silhouette-based semantic scene change detection. In 2020 IEEE International conference on robotics and automation (ICRA), pages 6861-6867. IEEE, 2020. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 582, + 288, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 582, + 288, + 658 + ], + "spans": [ + { + "bbox": [ + 48, + 582, + 288, + 658 + ], + "type": "text", + "content": "[38] Gabriel Sarch, Zhaoyuan Fang, Adam W Harley, Paul Schydlo, Michael J Tarr, Saurabh Gupta, and Katerina Fragkiadaki. Tidee: Tidying up novel rooms using visuo-semantic commonsense priors. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIX, pages 480-496. Springer, 2022. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 658, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 288, + 712 + ], + "type": "text", + "content": "[39] Nikolay Savinov, Anton Raichuk, Raphaël Marinier, Damien Vincent, Marc Pollefeys, Timothy Lillicrap, and Sylvain Gelly. Episodic curiosity through reachability. In International Conference on Learning Representations (ICLR), 2019. 2" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 564 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[40] Grant Schindler and Frank Dellaert. Probabilistic temporal inference on reconstructed 3d scenes. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1410-1417. IEEE, 2010. 2, 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 118, + 545, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 172 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 172 + ], + "type": "text", + "content": "[41] Brandon Trabucco, Gunnar Sigurdsson, Robinson Piramuthu, Gaurav S Sukhatme, and Ruslan Salakhutdinov. A simple approach for visual rearrangement: 3d mapping and semantic search. arXiv preprint arXiv:2206.13396, 2022. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 174, + 545, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 174, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 174, + 545, + 206 + ], + "type": "text", + "content": "[42] Guo-Hua Wang, Bin-Bin Gao, and Chengjie Wang. How to reduce change detection to semantic segmentation. Pattern Recognition, 138:109384, 2023. 2, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 208, + 545, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 208, + 545, + 261 + ], + "spans": [ + { + "bbox": [ + 307, + 208, + 545, + 261 + ], + "type": "text", + "content": "[43] Xiaohan Wang, Yuehu Liu, Xinhang Song, Beibei Wang, and Shuqiang Jiang. Generating explanations for embodied action decision from visual observation. In Proceedings of the 31st ACM International Conference on Multimedia, pages 2838-2846, 2023. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 263, + 545, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 263, + 545, + 307 + ], + "spans": [ + { + "bbox": [ + 307, + 263, + 545, + 307 + ], + "type": "text", + "content": "[44] Xiaohan Wang, Yuehu Liu, Xinhang Song, Beibei Wang, and Shuqiang Jiang. Camp: Causal multi-policy planning for interactive navigation in multi-room scenes. Advances in Neural Information Processing Systems, 36, 2024. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 308, + 545, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 308, + 545, + 352 + ], + "spans": [ + { + "bbox": [ + 307, + 308, + 545, + 352 + ], + "type": "text", + "content": "[45] Luca Weihs, Matt Deitke, Aniruddha Kembhavi, and Roozbeh Mottaghi. Visual room rearrangement. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5922-5931, 2021. 1, 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 354, + 545, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 545, + 386 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 545, + 386 + ], + "type": "text", + "content": "[46] Haitao Zeng, Xinhang Song, and Shuqiang Jiang. Multi-object navigation using potential target position policy function. IEEE Transactions on Image Processing, 2023. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 387, + 545, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 387, + 545, + 442 + ], + "spans": [ + { + "bbox": [ + 307, + 387, + 545, + 442 + ], + "type": "text", + "content": "[47] Sixian Zhang, Weijie Li, Xinhang Song, Yubing Bai, and Shuqiang Jiang. Generative meta-adversarial network for unseen object navigation. In Computer Vision - ECCV 2022 - 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIX, pages 301-320." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 443, + 545, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 443, + 545, + 508 + ], + "spans": [ + { + "bbox": [ + 307, + 443, + 545, + 508 + ], + "type": "text", + "content": "[48] Sixian Zhang, Xinhang Song, Yubing Bai, Weijie Li, Yakui Chu, and Shuqiang Jiang. Hierarchical object-to-zone graph for object navigation. In 2021 IEEE/CVF International Conference on Computer Vision, ICCV 2021, Montreal, QC, Canada, October 10-17, 2021, pages 15110-15120. IEEE, 2021." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 510, + 545, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 510, + 545, + 564 + ], + "spans": [ + { + "bbox": [ + 307, + 510, + 545, + 564 + ], + "type": "text", + "content": "[49] Sixian Zhang, Xinhang Song, Weijie Li, Yubing Bai, Xinyao Yu, and Shuqiang Jiang. Layout-based causal inference for object navigation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10792-10802, 2023. 2" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "16466" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/69aa9b91-03a6-4c14-a53b-96602951c67b_content_list.json b/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/69aa9b91-03a6-4c14-a53b-96602951c67b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..56c7357a39eb70e93941ba99e54051996446a44f --- /dev/null +++ b/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/69aa9b91-03a6-4c14-a53b-96602951c67b_content_list.json @@ -0,0 +1,1528 @@ +[ + { + "type": "text", + "text": "A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models", + "text_level": 1, + "bbox": [ + 94, + 130, + 875, + 155 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Julio Silva-Rodriguez", + "bbox": [ + 166, + 180, + 354, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sina Hajimiri", + "bbox": [ + 395, + 180, + 504, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ismail Ben Ayed", + "bbox": [ + 545, + 181, + 679, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ÉTS Montreal", + "bbox": [ + 426, + 199, + 542, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "julio-jose.silva-rodriguez@etsmt1.ca", + "bbox": [ + 310, + 219, + 651, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jose Dolz", + "bbox": [ + 718, + 181, + 800, + 196 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/82130ed8dda85ff2f0317020867ca340011005508c804a9c8616604948ccad77.jpg", + "image_caption": [ + "(a) CLIP-Adapter [11]", + "Figure 1. Pitfalls of few-shot adapters due to the absence of a model selection strategy. The cross-shift model selection matrices $(i,j)$ depict the relative improvement w.r.t. a zero-shot initialized Linear Probing when using the optimal hyperparameters for the dataset $i$ (rows), for adapting in another task $j$ (columns), for each SoTA method (first three plots) and our approach (last plot)." + ], + "image_footnote": [], + "bbox": [ + 63, + 286, + 264, + 417 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c75b44f3de5192e7ec83212cf6ff6a9895b8fc7bf3d5600d20371e35bbd6cdb7.jpg", + "image_caption": [ + "(b) TIP-Adapter(f) [42]" + ], + "image_footnote": [], + "bbox": [ + 264, + 286, + 467, + 417 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8dbbc2575cf75c7536dfa70a8a994c4f1a2b0d453b2e52f716a7a85f50678148.jpg", + "image_caption": [ + "(c) TaskRes [40]" + ], + "image_footnote": [], + "bbox": [ + 467, + 286, + 666, + 417 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a61f88c5dda613b069ca788c6d83166a342f21da1bd87aa1d8d1b6b168bb7c3d.jpg", + "image_caption": [ + "(d) CLAP (Ours)" + ], + "image_footnote": [], + "bbox": [ + 666, + 286, + 897, + 417 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 483, + 313, + 498 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Efficient transfer learning (ETL) is receiving increasing attention to adapt large pre-trained language-vision models on downstream tasks with a few labeled samples. While significant progress has been made, we reveal that state-of-the-art ETL approaches exhibit strong performance only in narrowly-defined experimental setups, and with a careful adjustment of hyperparameters based on a large corpus of labeled samples. In particular, we make two interesting, and surprising empirical observations. First, to outperform a simple Linear Probing baseline, these methods require to optimize their hyper-parameters on each target task. And second, they typically underperform -sometimes dramatically-standard zero-shot predictions in the presence of distributional drifts. Motivated by the unrealistic assumptions made in the existing literature, i.e., access to a large validation set and case-specific grid-search for optimal hyperparameters, we propose a novel approach that meets the requirements of real-world scenarios. More concretely, we introduce a Class-Adaptive linear Probe (CLAP) objective, whose balancing term is optimized via an adaptation of the general Augmented Lagrangian method tailored to this context. We comprehensively evaluate CLAP on a broad span of datasets and scenarios, demonstrating that it consistently outperforms SoTA approaches, while yet being a much more efficient alternative. Code available at", + "bbox": [ + 73, + 523, + 472, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://github.com/jusiro/CLAP.", + "bbox": [ + 500, + 484, + 810, + 500 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 527, + 632, + 542 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large vision-language models (VLMs), such as CLIP [30], are reshaping the research landscape with their unprecedented performance. These models undergo training on an extensive dataset consisting of hundreds of millions of image-text pairs, which are leveraged via contrastive learning [30]. Once trained, VLMs offer a remarkable zero-shot performance on a wide span of visual recognition problems thanks to the rich learned representations [27, 30]. Nevertheless, the extensive hardware and data-driven resources that such training demands [3] suggest that these models can only be trained on singular occasions. Furthermore, the large scale of these networks poses important challenges when it comes to adjusting their parameters on small downstream tasks that involve only a few labeled samples, making the full fine-tuning of the entire model impractical.", + "bbox": [ + 496, + 551, + 890, + 779 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "An emerging alternative to alleviate this issue consists in fine-tuning VLMs by adding a small set of learnable parameters, whose values are optimized during the adaptation step [11, 19, 42, 45, 46]. These tunable weights can be introduced in the input space as visual [19] or text prompts [45, 46], or added in the form of adapters across the network [11, 40, 42]. While both families of approaches fit within the Efficient Transfer Learning (ETL) literature,", + "bbox": [ + 496, + 780, + 892, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "23681", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "prompt learning still requires backpropagating the gradients through the entire network. Thus, besides introducing a burden on resource reuse, these methods preclude black-box adaptation, introducing a potential concern about leaking the source data, which is paramount in privacy-oriented applications. In contrast, strategies based on adapters only need gradients on the extra set of parameters, typically in the last layer, avoiding costly fine-tuning processes and data leakage, yet yielding state-of-the-art performance [24, 40].", + "bbox": [ + 75, + 90, + 472, + 227 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite the progress observed in adapter-based methods for fine-tuning VLMs under the few-shot learning paradigm, improving the performance on the target task while preserving their generalization capabilities remains still a challenge [46]. We argue that this is likely due to the severe overfitting to the support set samples employed during few-shot adaptation, which significantly deviates the updated class prototypes from the zero-shot prototypes initially provided by the pre-trained model. In fact, popular adapter-based ETL strategies, such as CLIP-Adapter [11] and TIP-Adapter [42], carefully adjust the model-specific hyperparameters, in conjunction with other key hyperparameters related to the learning scheduler, to control the trade-off between initial zero-shot inference and the integration of new information from the support set. Furthermore, recent evidence [24] suggests that these works apparently use the large-scale test set to adjust their hyperparameters.", + "bbox": [ + 75, + 229, + 472, + 487 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A significant limitation becomes evident in that these hyperparameters, when optimized for one specific task, do not exhibit strong generalizability to other tasks, as illustrated in Fig. 1. Indeed, state-of-the-art (SoTA) methods struggle to find a homogeneous configuration that outperforms a simple well-initialized Linear Probing (LP) adaptation. Notably, in a realistic adaptation scenario (Fig. 1), we can observe dramatic performance degradations, up to $21\\%$ , compared to this simple baseline. These practices virtually bias the model selection process, as assuming access to a significantly larger set of labeled samples, and adjusting the model hyperparameters in a case-specific manner, is not only unrealistic but also impractical (grid-search must be done for each case). Thus, we argue that if an ETL method's model selection strategy is not solely based on the support samples, the method is incomplete, and impractical for real-world few-shot adaptation problems.", + "bbox": [ + 75, + 489, + 472, + 747 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we seek to redirect the efforts on few-shot ETL to a more strict, but realistic scenario, in which only the support samples are accessible during training. The absence of an evaluation subset urges novel adapters to include a model selection strategy, robust across a large spectrum of tasks. Interestingly, we empirically observed that a carefully designed Linear Probing (ZS-LP), whose weights are initialized with the zero-shot prototypes from CLIP, is a strong baseline that outperforms more convoluted ETL solutions. To further improve the baseline ZS-LP and opti", + "bbox": [ + 75, + 750, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "mize the trade-off between initial zero-shot representations and updated class prototypes on novel tasks, we propose penalizing large deviations from the original zero-shot prototypes during adaptation. The resulting learning objective, however, presents two major issues. First, the penalty included to control the deviation between original and updated prototypes is a scalar value, uniform across all classes, which can detrimentally affect the model's performance in the presence of harder-to-learn classes. Second, the penalty balancing weight must be set using a validation set, which juxtaposes with our validation-free scenario. To address these limitations, we propose CLass-Adaptive linear Probe (CLAP), which is based on an Augmented Lagrangian Multiplier approach. We can summarize our contributions as:", + "bbox": [ + 496, + 90, + 893, + 303 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We empirically observe that SoTA few-shot ETL adapters require careful adjustment of a set of key hyperparameters for each task, which is unrealistic and impractical in real-world settings. Surprisingly, if a fixed configuration is adopted across tasks, these methods are likely to substantially underperform a simple Linear Probing strategy initialized with the zero-shot prototypes from CLIP.", + "- We propose a principled solution to tackle the trade-off between original and updated class prototypes in Linear Probing, which integrates a penalty term to penalize large deviations from zero-shot prototypes. To address the underlying challenges from the resulting constrained optimization problem, we present a modified Augmented Lagrangian Multiplier (ALM) method. This alleviates the need of having to fine-tune the penalty balancing weight, which is learned in the outer iteration of the optimization process. In order to adapt ALM to the presented scenario, two critical choices were made: $i$ Leveraging class prototypes, as well as data augmentation, motivate the use of class-wise multipliers, instead of sample and class-wise multipliers as in the original ALM; $ii$ In the presented scenario, there is no access to a validation set, and the only feedback available is from the support samples. Hence, we only perform one outer-step update, which can avoid potential overfitting on the support set.", + "- We provide extensive experiments to assess the performance of CLAP in the proposed scenario, including few-shot adaptation on 11 popular classification benchmarks, domain generalization, comparison to full fine-tuning methods, and ablation studies to validate our choices. As shown in Fig. 1 and in the experimental section, CLAP delivers consistent performance across different tasks with a homogeneous configuration, and largely outperforms SoTA ETL approaches in all scenarios." + ], + "bbox": [ + 500, + 304, + 893, + 818 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related work", + "text_level": 1, + "bbox": [ + 500, + 830, + 638, + 845 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Vision-language pre-trained models. The field of machine learning is in the midst of a paradigm shift with the emerging rise of vision-language models (VLMs). These", + "bbox": [ + 498, + 854, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "23682", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "networks have gained increasing popularity, especially fueled by the significant improvements achieved in computer vision and natural language processing tasks [5, 18, 30, 41]. The prevailing learning paradigm consists of a dual stream of data, which separately encodes images and their text counterparts, leveraging contrastive learning at a large scale to bridge image and text representations in the latent space. Particularly, models such as CLIP [30] and ALIGN [18] have successfully mitigated the distribution discrepancy between text and images, and have shown tremendous zero-shot capabilities on visual recognition tasks, primarily in the context of classification.", + "bbox": [ + 75, + 90, + 472, + 271 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Full fine-tuning. A body of work proposes fine-tuning the entire VLMs to adapt to a specific task [12, 22, 36]. This strategy, however, presents several drawbacks. Concretely, fine-tuning increases the complexity of the model being optimized, makes the optimization process more time-consuming compared to ETL methods, and requires access to the backbone weights, which does not allow a black-box adaptation. Furthermore, full fine-tuning methods typically tend to overfit when trained on small datasets, requiring a large corpus of labeled data for the target task, which may be impractical in many real-world scenarios.", + "bbox": [ + 75, + 273, + 472, + 439 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Efficient transfer leaning attempts to address these issues by updating a small set of learnable parameters and leveraging a limited amount of annotated samples. Current ETL literature can be categorized into Prompt Learning [20, 38, 39, 45-47] and Adapter-based [11, 40, 42] approaches. Prompt Learning represents a recent advancement in the realm of natural language processing [23, 43], which has been recently adopted with success in VLMs. In these methods, only the text tokens provided to the model are optimized. Nevertheless, these techniques require long training steps due to backpropagating the gradient over the entire network, which juxtaposes with the spirit of efficient adaptation. Furthermore, black-box adaptation is also not possible in prompt learning. Adapter-based methods, in contrast, offer a much lighter alternative as only a small subset of parameters, typically at the latest layers, are adjusted. For example, CLIP-Adapter [11] integrates a two-layer MLP to modify the visual embedding generated by CLIP. In TIP-Adapter [42], the visual prototypes obtained from the few-shot support samples are leveraged to compute the similarity with the visual embedding of the test image, which is later used to modify the CLIP visual embedding.", + "bbox": [ + 75, + 441, + 472, + 773 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Preliminaries", + "text_level": 1, + "bbox": [ + 76, + 789, + 217, + 806 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Contrastive vision-language pre-training", + "text_level": 1, + "bbox": [ + 76, + 815, + 426, + 832 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Large-scale VLMs, such as CLIP [30], are trained on large heterogeneous datasets, encouraging image and text representations to correlate in a joint embedding space. Formally, CLIP comprises a vision encoder, $f_{\\theta}(\\cdot)$ , and a text encoder,", + "bbox": [ + 75, + 839, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$f_{\\phi}(\\cdot)$ , each aiming at learning a rich representation of their data points. These points are projected in an $\\ell_2$ -normalized shared embedding space, yielding the corresponding visual $\\mathbf{v}$ and text $\\mathbf{t}$ embeddings. The whole network is optimized to maximize the similarity between the projected embeddings of paired images and texts, using a contrastive loss.", + "bbox": [ + 496, + 90, + 893, + 183 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Transferability", + "text_level": 1, + "bbox": [ + 498, + 189, + 653, + 205 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Zero-shot inference. For a particular downstream image classification task, CLIP-based models are able to provide predictions based on the similarity between category prompts, i.e., text descriptions of target classes, and testing images. Given a set of $C$ categories, and an ensemble of $N$ text prompts for each one, $\\{\\{T_{n,c}\\}_{n=1}^{N}\\}_{c=1}^{C}$ , a common practice is to obtain a zero-shot prototype for each target category by computing the center of the $\\ell_2$ -normalized text embeddings for each class, $\\mathbf{t}_c = \\frac{1}{N}\\sum_{n=1}^{N}f_\\phi(T_{n,c})$ . Thus, for a given query image $\\mathbf{x}$ , the zero-shot prediction is obtained from the softmax cosine similarity between the vision embedding $\\mathbf{v} = f_\\theta(\\mathbf{x})$ , and category prototypes $\\mathbf{t}_c$ :", + "bbox": [ + 496, + 212, + 893, + 395 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {y} _ {c} = \\frac {\\exp \\left(\\mathbf {v} \\cdot \\mathbf {t} _ {c} ^ {\\top} / \\tau\\right)}{\\sum_ {i = 1} ^ {C} \\exp \\left(\\mathbf {v} \\cdot \\mathbf {t} _ {i} ^ {\\top} / \\tau\\right)}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 602, + 411, + 890, + 450 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\tau$ is a temperature parameter learned during the pretraining stage, and $\\mathbf{v} \\cdot \\mathbf{t}^{\\top}$ the dot product operator, which is equivalent to cosine similarity, as vectors are $\\ell_{2}$ -normalized.", + "bbox": [ + 496, + 453, + 893, + 501 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Few-shot learning. This scenario assumes access to limited supervisory information on the downstream tasks, in the form of a few examples for each target category, so-called shots. Formally, we denote a support set, $S = \\{(\\mathbf{x}^{(m)},\\mathbf{y}^{(m)})\\}_{m = 1}^{M = K\\times C}$ , composed of $K$ images for each target category, such that $K$ takes a small value, e.g., $K\\in \\{1,2,4,8,16\\}$ , and where $\\mathbf{y}\\in \\{0,1\\} ^C$ is the corresponding one-hot label for a given image $\\mathbf{x}$ . The objective is to adapt the pre-trained model using this limited support set.", + "bbox": [ + 496, + 516, + 893, + 654 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Efficient transfer learning with adapters", + "text_level": 1, + "bbox": [ + 498, + 659, + 846, + 676 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In their general form, ETL methods based on adapters learn a set of transformations over the pre-trained features $(\\mathbf{v}^{\\prime},\\mathbf{t}^{\\prime} = f_{\\psi}(\\mathbf{v},\\mathbf{t}))$ , parameterized by the so-called adapter $\\psi$ , which produces softmax scores for the new tasks following Eq. (1). The adapter $\\psi$ can be optimized by minimizing the popular cross-entropy (CE) loss, $\\mathcal{H}(\\mathbf{y},\\hat{\\mathbf{y}}) = -\\sum_{c = 1}^{C}y_{c}\\log \\hat{y}_{c}$ , over the support set samples:", + "bbox": [ + 496, + 683, + 893, + 791 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\psi} \\frac {1}{M} \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 598, + 799, + 890, + 840 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.4. Pitfalls of existing few-shot ETL methods", + "text_level": 1, + "bbox": [ + 498, + 847, + 852, + 863 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recent ETL methods tailored to VLMs focus on enhancing the supervision provided by the support samples with", + "bbox": [ + 496, + 869, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "23683", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "priors learned by the VLMs at the task at hand. The pretrained model gathers robust knowledge and is able to align visual and textual concepts. Retaining this prior knowledge can therefore produce more robust adapters, able to generalize beyond the specific bias introduced in the few support samples, to more general concepts. In this context, the zero-shot prototypes from CLIP act as a proxy to initialize the learning procedure into a reliable region. For instance, CLIP-Adapter [11] maintains the zero-shot prototypes based inference as in Eq. (1), but includes a residual multi-layered perceptron to modify visual features, such as $\\mathbf{v}' = \\mathbf{v} + \\alpha_{\\mathrm{r}}f_{\\psi}(\\mathbf{v})$ . TIP-Adapter [42] includes an additional complexity layer, by combining the similarity of the zero-shot prototypes with a weighted similarity to the support samples, $f_{\\psi}(\\cdot ,\\beta)$ , controlled by the hyperparameter $\\beta$ , such that the predicted logits are $\\mathbf{l}_c = \\alpha_{\\mathrm{tipA}}f_{\\psi}(\\mathbf{v},\\beta) + \\mathbf{v}\\cdot \\mathbf{t}_c^\\top /\\tau$ . Finally, TaskRes [40] learns a modification of the initial zero-shot prototypes, $\\mathbf{w}_{TR}$ , using the support samples. The divergence between the initial and final prototypes is controlled by a residual ratio: $\\mathbf{t}' = \\mathbf{t} + \\alpha_{\\mathrm{TR}}\\mathbf{w}_{TR}$ . Nevertheless, these methods lack a model selection strategy to set these hyperparameters (See Supp. Sec. A for details).", + "bbox": [ + 76, + 90, + 472, + 425 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Proposed approach", + "text_level": 1, + "bbox": [ + 76, + 436, + 264, + 454 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Revisiting Linear Probing", + "text_level": 1, + "bbox": [ + 76, + 460, + 313, + 478 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The most straightforward approach used to adapt VLMs is Linear Probing [30], which refers to fitting a multiclass logistic regression linear classifier on top of the pre-trained features. Formally, the objective is to learn a set of class-wise prototypes, $\\mathbf{w}_c$ , to provide softmax class scores for a given visual embedding $\\mathbf{v}$ :", + "bbox": [ + 75, + 484, + 468, + 575 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {y} _ {c} = \\frac {\\exp \\left(\\mathbf {v} \\cdot \\mathbf {w} _ {c} ^ {\\top} / \\tau\\right)}{\\sum_ {i = 1} ^ {C} \\exp \\left(\\mathbf {v} \\cdot \\mathbf {w} _ {i} ^ {\\top} / \\tau\\right)}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 585, + 468, + 625 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The $\\mathbf{w}_{\\mathrm{c}}$ prototypes can be trained to minimize the cross-entropy loss on the support samples, as in Eq. (2), using standard SGD. Besides, a common practice in ETL is to regularize the trained weights [24, 30, 40] by minimizing its $\\ell_{2}$ -norm with an additional term, weighted by an empirically-optimized non-negative balancing term $\\lambda_{wd}$ . Despite its limited performance shown for few-shot adaptation [11, 30], we believe that this requires further exploration, as LP is a lightweight adaptation strategy, especially convenient due to its convexity during optimization. In this work, we present an updated view of Linear Probing. First, the class weights are initialized using the CLIP zero-shot prototypes, as SoTA ETL methods do [11, 40, 42]. Second, we replace the weight decay in the loss function and explicitly perform an $\\ell_{2}$ -normalization of the prototypes after each update, to exactly meet the pre-training scenario during adaptation, inspired by [12]. Similarly, cosine similarity is also scaled with CLIP's pre-trained temperature", + "bbox": [ + 75, + 628, + 472, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\tau$ . Last, we incorporate data augmentation, usually not included in LP. We refer to this updated Linear Probing version for vision-language models as ZS-LP1. Interestingly, ZS-LP serves as a strong baseline (see Tab. 1), which does not require adjusting specific hyperparameters per task.", + "bbox": [ + 496, + 90, + 890, + 167 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Constrained Linear Probing", + "text_level": 1, + "bbox": [ + 498, + 176, + 756, + 191 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Albeit a well-initialized Linear Probing offers a strong baseline for efficient transfer learning, the updated prototypes might deviate from the initial regions offering strong generalization. This is especially the case in the few-shot setting, where the few provided support samples might be under-representative and contain specific biases that produce spurious correlations, hence harming the generalization after adaptation [34, 44]. Thus, to retain the strong basis provided by the VLM model, and avoid prototype degradation, we resort to a constrained formulation of the loss in Eq. (2).", + "bbox": [ + 496, + 199, + 890, + 351 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Retaining prior knowledge. A direct form to avoid prototype degradation from zero-shot points is to constrain the cross-entropy minimization to enforce the resulting prototypes to remain close to the initial solution (i.e., initial set of prototypes $\\mathcal{T} = [\\mathbf{t}_1,\\dots ,\\mathbf{t}_c]$ ). Specifically, this constrained optimization problem can be defined as follows:", + "bbox": [ + 496, + 369, + 890, + 460 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\mathcal {W}} \\quad \\frac {1}{M} \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 472, + 890, + 513 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l l} \\text {s . t .} & \\mathbf {w} _ {c} = \\mathbf {t} _ {c} \\quad \\forall c \\in \\{1, \\dots , C \\}, \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 581, + 516, + 808, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "with $\\mathcal{W} = [\\mathbf{w}_1, \\dots, \\mathbf{w}_C]$ the set of learnable class prototypes. We can approximate the minimum of the constrained problem in Eq. (4) by a penalty-based optimization approach, transforming the above formulation into an unconstrained problem, and using an $\\ell_2$ -penalty between the class prototypes and the set of zero-shot anchors:", + "bbox": [ + 496, + 539, + 890, + 630 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\mathcal {W}} \\quad \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) + \\lambda \\sum_ {m = 1} ^ {M} \\sum_ {c = 1} ^ {C} \\left\\| \\mathbf {t} _ {c} - \\mathbf {w} _ {c} ^ {(m)} \\right\\| _ {2} ^ {2}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 641, + 890, + 695 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\lambda \\in \\mathbb{R}_+$ is a scalar weight controlling the contribution of the corresponding penalty. Note that $\\mathbf{w}_c^{(m)}$ is the optimal class prototype for the support sample $m$ that minimizes the left term. For clarity in the presentation, we have omitted the normalization by the cardinality of each set.", + "bbox": [ + 496, + 696, + 890, + 773 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Sample and class-specific constraints. The associated constrained problem in Eq. (4) is approximated by an unconstrained formulation, which uses a single uniform penalty without considering individual data samples or", + "bbox": [ + 496, + 792, + 890, + 853 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "1Although the recent work in [24] explores some of these LP improvements, they still resort to a weight-decay regularization of the LP parameters, whose optimum relative weight is found in a few-shot validation set.", + "bbox": [ + 500, + 862, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "23684", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "classes. Certainly, all samples and categories within a given dataset may indeed present different intrinsic learning challenges. Thus, the problem in Eq. (5) is not solved accurately. A better alternative would consist in integrating multiple penalty weights $\\lambda$ , one for each sample and class, producing a set of penalty weights $\\Lambda \\in \\mathbb{R}_{+}^{M \\times C}$ . The resulting optimization problem can then be defined as:", + "bbox": [ + 75, + 90, + 472, + 196 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\mathcal {W}} \\quad \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) + \\sum_ {m = 1} ^ {M} \\sum_ {c = 1} ^ {C} \\boldsymbol {\\Lambda} _ {m c} \\left\\| \\mathbf {t} _ {c} - \\mathbf {w} _ {c} ^ {(m)} \\right\\| _ {2} ^ {2}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 83, + 220, + 468, + 273 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Now, from an optimization standpoint, if we suppose that there exists an optimal set of class-prototypes $\\mathcal{W}^*$ for the problem presented in Eq. (4), there also exists $\\Lambda^{*}\\in \\mathbb{R}_{+}^{M\\times C}$ such that $(\\mathcal{W}^{*},\\Lambda^{*})$ represents a saddle point of the Lagrangian associated to Eq. (4). In this scenario, $\\Lambda^{*}$ are the Lagrange multipliers of the presented problem, and is intuitive to consider $\\Lambda = \\Lambda^{*}$ as the best choice to solve Eq. (6).", + "bbox": [ + 75, + 275, + 468, + 381 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Nevertheless, using the Lagrange multipliers $\\Lambda^{*}$ as the weights for the penalties in Eq. (6) may not be feasible in practice. In particular, a number of conventional strategies employed to train deep neural networks hinder straightforward minimization. First, the use of mini-batch gradient descent averages the updated prototypes for every single observation into a mean prototype per class, making a sample-wise constraint hard to achieve. Furthermore, performing data augmentation over the support samples may yield distinct penalty weights for the augmented versions, which could be harder or easier to classify than their original counterparts.", + "bbox": [ + 75, + 381, + 468, + 561 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To alleviate the aforementioned challenges, we propose to relax the sample-wise penalties, which results in solving:", + "bbox": [ + 76, + 561, + 468, + 594 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\mathcal {W}} \\quad \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) + \\sum_ {c = 1} ^ {C} \\lambda_ {c} \\left\\| \\mathbf {t} _ {c} - \\mathbf {w} _ {c} \\right\\| _ {2} ^ {2}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 616, + 468, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda \\in \\mathbb{R}_+^C$ is a set of $C$ class-wise penalty weights. While the problem complexity has been reduced by removing sample-wise penalty weights, we still need to choose $C$ weights for the class-wise penalties. This poses a challenge in the optimization, particularly for datasets that contain a large number of categories, such as ImageNet [8] ( $C = 1000$ ), where properly selecting the penalty weights $\\lambda \\in \\mathbb{R}_+^C$ can be a laborious process. Furthermore, choosing these values \"by hand\" juxtaposes with our goal of providing a validation-free solution for ETL.", + "bbox": [ + 75, + 670, + 468, + 821 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Class Adaptive Constraint for Linear Probing", + "text_level": 1, + "bbox": [ + 76, + 830, + 467, + 848 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "General Augmented Lagrangian. Augmented Lagrangian Multiplier (ALM) methods present an appealing alternative for learning the penalty weights. These popular methods", + "bbox": [ + 75, + 854, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "in optimization, which solve a constrained problem by the interplay of penalties and primal-dual steps, present well-known advantages [1, 32]. Formally, we can define a general constrained optimization problem as:", + "bbox": [ + 496, + 90, + 892, + 151 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {x} \\quad g (x) \\quad \\text {s . t .} \\quad h _ {i} (x) \\leq 0, \\quad i = 1, \\dots , n \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 542, + 174, + 890, + 195 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "with $g: \\mathbb{R}^d \\to \\mathbb{R}$ the objective function and $h_i: \\mathbb{R}^d \\to \\mathbb{R}, i = 1, \\dots, n$ the set of constraint functions. This problem is generally tackled by solving a succession of $j \\in \\mathbb{N}$ unconstrained problems, each solved approximately w.r.t $x$ :", + "bbox": [ + 496, + 200, + 893, + 263 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {x, \\lambda} \\quad \\mathcal {L} ^ {(j)} (x) = g (x) + \\sum_ {i = 1} ^ {n} P \\left(h _ {i} (x), \\rho_ {i} ^ {(j)}, \\lambda_ {i} ^ {(j)}\\right), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 517, + 281, + 890, + 321 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "with $P:\\mathbb{R}\\times \\mathbb{R}_{+ + }\\times \\mathbb{R}_{+ + }\\to \\mathbb{R}$ a penalty-Lagrangian function, whose derivative w.r.t. its first variable $P^{\\prime}(z,\\rho ,\\lambda)\\equiv$ $\\frac{\\partial}{\\partial z} P(z,\\rho ,\\lambda)$ exists, is positive and continuous for all $z\\in \\mathbb{R}$ and $(\\rho ,\\lambda)\\in (\\mathbb{R}_{+ + })^{2}$ . The set of axioms that any penalty function $P$ must satisfy [2] are detailed in Supp. Sec. B. Furthermore, $\\pmb {\\rho}^{(j)} = (\\rho_i^{(j)})_{1\\leq i\\leq n}\\in \\mathbb{R}_{+ + }^n$ and $\\pmb{\\lambda}^{(j)} =$ $(\\lambda_i^{(j)})_{1\\leq i\\leq n}\\in \\mathbb{R}_{+ + }^n$ denote the penalty parameters and multipliers associated to the penalty $P$ at the iteration $j$", + "bbox": [ + 498, + 325, + 892, + 452 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The ALM can be split into two iterations: outer iterations (indexed by $j$ ), where the penalty multipliers $\\lambda$ and the penalty parameters $\\rho$ are updated, and the inner iterations, where $\\mathcal{L}^{(j)}$ (Eq. (9)) is minimized using the previous solution as initialization. In particular, the penalty multipliers $\\lambda^{(j)}$ are updated to the derivative of $P$ w.r.t. to the solution obtained during the last inner step:", + "bbox": [ + 496, + 452, + 893, + 559 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda_ {i} ^ {(j + 1)} = P ^ {\\prime} \\left(h _ {i} (x), \\rho_ {i} ^ {(j)}, \\lambda_ {i} ^ {(j)}\\right). \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 569, + 890, + 590 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "By doing this, the penalty multipliers increase when the constraint is violated, and decrease otherwise. Thus, this strategy enables an adaptive and learnable way for determining the penalty weights.", + "bbox": [ + 496, + 593, + 890, + 652 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our solution. We propose to use an ALM approach to solve the problem in Eq. (7). In particular, we reformulate this problem integrating a penalty function $P$ parameterized by $(\\rho, \\lambda) \\in \\mathbb{R}_{++}^{C} \\times \\mathbb{R}_{++}^{C}$ , formally defined as:", + "bbox": [ + 496, + 652, + 890, + 715 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\mathcal {W}, \\boldsymbol {\\lambda}} \\quad \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) + \\sum_ {c = 1} ^ {C} P \\left(\\mathbf {t} _ {c} - \\mathbf {w} _ {c}, \\rho_ {c}, \\lambda_ {c}\\right). \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 732, + 890, + 785 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Following our realistic validation-free scenario, the only data from which we can obtain feedback during adaptation is the support set $S$ . Thus, the penalty multiplier for class $c$ at epoch $j + 1$ can be defined as:", + "bbox": [ + 496, + 787, + 893, + 847 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda_ {c} ^ {(j + 1)} = \\frac {1}{| \\mathcal {S} |} \\sum_ {(\\mathbf {x}, \\mathbf {y}) \\in \\mathcal {S}} P ^ {\\prime} \\left(\\mathbf {t} _ {c} - \\mathbf {w} _ {c}, \\rho_ {c} ^ {(j)}, \\lambda_ {c} ^ {(j)}\\right). \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 529, + 864, + 890, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "23685", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As suggested by prior work [2, 25], we employ the PHR function as penalty $P$ , defined as:", + "bbox": [ + 76, + 90, + 468, + 121 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {P H R} (z, \\rho , \\lambda) = \\left\\{ \\begin{array}{l l} \\lambda z + \\frac {1}{2} \\rho z ^ {2} & \\text {i f} \\quad \\lambda + \\rho z \\geq 0; \\\\ - \\frac {\\lambda^ {2}}{2 \\rho} & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 96, + 140, + 468, + 181 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Nevertheless, as we empirically found in our experiments (Supp. Sec. C.3), estimating Lagrange multipliers from the support samples might overfit the training data. As we do not have access to additional data points, we follow a simple strategy, consisting in performing only one iteration of the $\\lambda$ update. For a given target task, we rely on text embeddings as an anchor that offers a generalizable representation of concrete concepts along different visual domains. Thus, we consider the zero-shot prototypes $\\mathbf{t}_c$ as the initial approximation of the problem in Eq. (12) (first inner step). Instead of initializing $\\lambda$ randomly, which might hamper the convergence, we compute the penalty weight for a given class as the average of the zero-shot softmax scores for all support samples belonging to that class, such that $\\lambda_c^* = \\frac{1}{|\\mathcal{B}_c^+|}\\sum_{i\\in \\mathcal{B}_c^+}\\hat{y}_c^{(i)}$ , with $\\mathcal{B}_c^+ = \\{i|i\\in M,y_c^{(i)} = 1\\}$ . Note that these values are obtained by replacing $\\mathbf{w}_c$ with the solution found in the inner step $(\\mathbf{t}_c)$ in Eq. (3), which indeed satisfies the constraint $\\mathbf{w}_c = \\mathbf{t}_c$ , resulting in a zero penalty. Taking now the derivative w.r.t. $z$ of PHR, it is straightforward to see that the learned value of $\\lambda$ after one iteration is indeed $\\lambda_c^*$ .", + "bbox": [ + 76, + 186, + 470, + 508 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 520, + 209, + 537 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Setup", + "text_level": 1, + "bbox": [ + 76, + 545, + 158, + 561 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets: Few-shot adaptation. We follow prior ETL literature [11, 40, 42] and benchmark all the methods on 11 datasets: Imagenet [8], Caltech101 [10], OxfordPets [29], StanfordCars [21], Flowers102 [28], Food101 [4], FGVCAircraft [26], SUN397 [37], DTD [7], EuroSAT [15], and UCF101 [33]. These cover a diverse set of computer vision classification tasks, from general objects to actions or fine-grained categories in specialized applications. To train the few-shot adapters, we randomly retrieve $K$ shots ( $K \\in \\{1, 2, 4, 8, 16\\}$ ) for each class. Last, for evaluation, we used the test sets provided in each dataset, with the same data splits as [40, 46]. Domain generalization capabilities. We further assess the model's robustness to domain shifts by following existing ETL works. We used ImageNet as a source domain for adaptation, and its variants as target tasks, which include: ImageNetV2 [31], ImageNet-Sketch [35], ImageNet-A [16], and ImageNet-R [17]. In this scenario, the model only sees a few labeled samples from the source domain, and target data are used exclusively for testing. In addition, we also employ this setting to motivate the use of efficient adapters vs fine-tuning the entire VLM [12, 22, 40].", + "bbox": [ + 76, + 568, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation details. All experiments are based on CLIP [30] pre-trained features, using different backbones: ResNet-50 [14] and ViT-B/16 [9] (results for other backbones in Supp. Sec. C.2). We resort to ResNet-50 as backbone in the ablation studies. For each downstream task we first extract all pre-trained features of the support shots and then run adaptation experiments over those. Data augmentation is applied during the feature extraction stage using random zoom, crops, and flips, following [40, 45]. The number of augmentations per support sample is set to 20. We used the same text prompts per dataset as in [40, 46]. Following our claim that using a validation set on few-shot adaptation is unrealistic, we trained ZS-LP and CLAP using the same configuration for all datasets, number of shots, and visual backbones. Concretely, we optimize the adapter for 300 epochs, using SGD optimizer with Momentum of 0.9. We use a relatively large initial learning rate of 0.1 to avoid underfitting on the support set, whose value decreases during training following a cosine decay scheduler. We ran all experiments with three different random seeds, and the results were averaged across runs.", + "bbox": [ + 496, + 90, + 890, + 407 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines and adaptation protocol. We selected adapter-based methods as our main competitors based on the similarity to our approach, including Clip-Adapter [11], TIP-Adapter [42], TaskRes [40], and Cross-Modal [24]. It is important to highlight that prior works [11, 40, 42] apparently leverage either the extensive test set, or an independent additional validation subset, to adjust important hyperparameters for few-shot adaptation, such as the learning rate, training epochs, and particular parameters that control each method [24]. Nevertheless, as we exposed in Fig. 1, their performance dramatically decreases when the set of hyperparameters is not adjusted for the testing scenario. To adhere to real-world requirements, we define a strict few-shot adaptation protocol, in which no validation or test samples are available to find the best case-specific configuration for each method, and hyperparameters remain fixed across tasks (details in Supp. Sec. A.4).", + "bbox": [ + 496, + 409, + 892, + 667 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Results", + "text_level": 1, + "bbox": [ + 500, + 679, + 591, + 694 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Efficient transfer learning. We report in Tab. 1 the performance of adapter-based approaches averaged across 11 datasets, in the more realistic and practical validation-free experimental setting. Furthermore, for prompt-learning-based approaches, we include the results reported in prior literature, for a more comprehensive comparison. From these values, we can make interesting observations. First, a well-initialized Linear Probe, i.e., using the CLIP zero-shot weights, does not show the performance degradation discussed in prior works, and it is indeed a competitive alternative to SoTA approaches. Second, and more surprisingly, more complex approaches such as CLIP-Adapter, or TIP-Adapter, show a significant decline in performance com", + "bbox": [ + 496, + 704, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "23686", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "pared to their original results when no validation set is available for model selection. Interestingly, TaskRes(e), which is some sort of two-stage zero-shot initialization Linear Probing with an updated text projection, also offers robust performance. Nevertheless, the absence of a detailed explanation of how the enhanced version is obtained in the original work hampers fair comparisons. Third, constraining the weights update to remain close to the zero-shot knowledge (CLAP) shows consistent improvements across different shots, especially in the very low data regime. This suggests that retaining the previous base knowledge from VLMs is important to avoid diverging because of unrepresentative shots during adaptation. Results per dataset are detailed in Supp. Fig. 8 and Supp. Tab. 9.", + "bbox": [ + 75, + 90, + 472, + 303 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ef04845cc7ab5531e1e0a9bfcb616fdbd62e17647894940e67b0e36bd3b61a10.jpg", + "table_caption": [ + "Table 1. Comparison to state-of-the-art methods for few-shot adaptation of CLIP-based models, using ResNet-50 backbone. ETL methods are trained under the same protocol, i.e., absence of a validation set and using a fixed configuration across datasets, and results are averaged across 11 datasets. Prompt-learning methods results are directly extracted from [6, 13]. Best results in bold." + ], + "table_footnote": [], + "table_body": "
MethodK=1K=2K=4K=8K=16
Prompt-learning methods
CoOp ICV'22[46]59.5661.7866.4769.8573.33
ProGrad ICCV'23[13]62.6164.9068.4571.4174.28
PLOT ICLR'23[6]62.5965.2368.6071.2373.94
Efficient transfer learning - a.k.a Adapters
Zero-Shot ICML'21[30]57.7157.7157.7157.7157.71
Rand. Init LP ICML'21[30]30.4241.8651.6960.8467.54
CLIP-Adapter ICV'23[11]58.4362.4666.1869.8773.35
TIP-Adapter ECCV'22[42]58.8660.3361.4963.1564.61
TIP-Adapter(f) ECCV'22[42]60.2962.2665.3268.3571.40
CrossModal-LPCVPR'23[24]62.2464.4866.6770.3673.65
TaskRes(e)CVP'23[40]61.4465.2668.3571.6674.42
ZS-LP61.2864.8867.9871.4374.37
CLAP62.7966.0769.1372.0874.57
", + "bbox": [ + 80, + 402, + 460, + 599 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Domain generalization. If adaptation is not carefully conducted, the resulting model might distort the pre-trained knowledge and underperform when new data with domain drifts is involved [22], even below the zero-shot (no adaptation) performance. Thus, evaluating the robustness of novel adapters under this scenario of domain generalization is of special interest. To do so, adapters are optimized on ImageNet using 16 shots per class, and directly evaluated on ImageNet variants. In this setting, we also assume the absence of a validation dataset, and hence all adapters are trained until convergence, using the same configuration across backbones. A summary of the results is reported in Tab. 2, while specific numbers across datasets and additional backbones are included in Supp. Tab. 10. From these experiments, we make two striking observations. First, ZS-LP is a strong baseline compared to other more complex adapters on the source domain. Even more remarkably, prior SoTA adapters, such as CLIP-Adapter or TIP-Adapter, fail to generalize to unseen domains. In", + "bbox": [ + 75, + 613, + 472, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "deed, when using recent vision transformers, which are overtaking convolutional neural networks, none of existing adapters-based approaches outperform standard zero-shot prediction in the presence of distributional drifts. In contrast, CLAP yields the best in-distribution performance and also shows consistent improvements under domain shifts across all backbones.", + "bbox": [ + 496, + 90, + 893, + 196 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/09b97238e490ec5763ba80ebf91660f82564cae94b2edfb0efc95cec3596a6a7.jpg", + "table_caption": [ + "Table 2. Robustness to domain shifts. Adapters are adjusted on ImageNet and evaluated at out-of-distribution generalization on 4 ImageNet shifts. Bold indicates best performance. Differences with respect to no adaptation (a.k.a zero-shot) are highlighted." + ], + "table_footnote": [], + "table_body": "
MethodSource (Imagenet)Target (Average)
ResNet-50Zero-Shot ICML'21[30]60.3540.61
Rand. Init LP ICML'21[30]52.24(−8.11)↓24.61(−16.00)↓
CLIP-Adapter IJCV'23[11]59.02(−1.33)↓31.21(−9.40)↓
TIP-Adapter ECCV'22[42]57.81(−2.54)↓40.69(+0.08)↑
TIP-Adapter(f) ECCV'22[42]62.27(+1.92)↑41.36(+0.75)↑
TaskRes(e) CVPR'23[40]60.85(+0.50)↑41.28(+0.67)↑
ZS-LP61.00(+0.65)↑36.58(−4.03)↓
CLAP65.02(+4.67)↑42.91(+2.30)↑
ViT-B/16Zero-Shot ICML'21[30]68.7157.17
Rand. Init LP ICML'21[30]62.95(−5.76)↓40.41(−16.76)↓
CLIP-Adapter IJCV'23[11]68.46(−0.25)↓50.72(−6.45)↓
TIP-Adapter ECCV'22[42]53.81(−14.90)↓41.55(−15.62)↓
TIP-Adapter(f) ECCV'22[42]51.71(−17.00)↓35.58(−21.6)↓
TaskRes(e) CVPR'23[40]70.84(+2.13)↑55.35(−1.82)↓
ZS-LP69.73(+1.02)↑53.65(−3.52)↓
CLAP73.38(+4.67)↑60.04(+2.87)↑
", + "bbox": [ + 506, + 267, + 883, + 465 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ce9371f7ec7ac2457a804f5af74c896af421b56dc708c0c47a4fa06901e07cbe.jpg", + "table_caption": [ + "Table 3. Fine-tuning (FT) vs. efficient transfer learning (ETL). A benchmark for the low data regime, i.e., 8 shots for each class. For the sake of fairness, FT methods (above the dashed line) are trained with 4 shots and early-stopped using a validation set containing 4 shots. On the other hand, ETL methods (below the dashed line) are trained using 8 shots and rely solely on the support set. All methods use ViT-B/16 as CLIP backbone." + ], + "table_footnote": [ + "*Specific numbers for FT, LP-FT, WiSE-FT, and FLYP are retrieved from [12]." + ], + "table_body": "
MethodSource ImagenetTarget
-V2-Sketch-A-RAvg.
Fine-tuning (FT)69.8862.4447.0747.5276.0858.28
LP-FTICLR'23 [22]71.2964.0448.5049.4977.6359.92
WiSEcvPR'22 [36]71.1763.8149.3850.5978.5660.59
FLYPcvPR'23 [12]71.5164.5949.5051.3278.5260.98
Zero-Shot68.7160.7646.1847.7673.9857.17
Rand. Init LP56.5847.1725.8227.0347.0536.77
ZS-LP68.4960.0742.7742.3971.7354.24
CLAP71.7564.0647.6648.4076.7059.21
", + "bbox": [ + 503, + 578, + 883, + 698 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Is it worth optimizing the entire model? We now compare CLAP to end-to-end full fine-tuning (FT) approaches: LP-FT [22], WiSE-FT [36], and FLYP [12]. The former two methods require a validation set for early stopping, and the latter two use it for both early stopping and tuning the mixing coefficient hyperparameter $\\alpha$ . Therefore, for a $K$ -shot problem, these methods actually require $2K$ shots for each class, $K$ for training, and $K$ for validation. As the balancing penalty term in CLAP is optimized with the support set, and does not require a validation set, a fair comparison would be to evaluate the $K$ -shot performance of fine-tuning methods against our method's $2K$ -shot results. Thus, Tab. 3 in", + "bbox": [ + 496, + 719, + 893, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "23687", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "cludes the performance of all the models when 8 labeled images are available for each class overall. Analyzing the results, we can conclude that in the low data regime, full finetuning is not necessarily superior to ETL when compared properly. More specifically, our approach outperforms finetuning methods in in-distribution performance and performs reasonably well on OOD datasets, while having a fraction of the estimizable parameters of fine-tuning methods.", + "bbox": [ + 75, + 90, + 472, + 212 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3. Ablation experiments", + "text_level": 1, + "bbox": [ + 76, + 220, + 279, + 237 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "On the need for model selection strategies. Relevant methods (e.g., CLIP-Adapter [11], TIP-Adapter [42], or TaskRes [40]) include different hyperparameters that directly control their performance. Nevertheless, these methods are incomplete, since they do not include any strategy for adjusting these parameters, typically referred to as model selection. In contrast, and as previously stressed, there is evidence that these works use a large evaluation subset to adapt their settings to each scenario [24]. To investigate this observation, we evaluate these methods in cross-dataset model selection experiments. The best hyperparameters values for a task (i.e., dataset), which are found in an Oracle scenario using the entire test subset, are used during adaptation to another dataset. The matrices showing the relative improvements over a zero-shot initialized Linear Probing (ZS-LP) are depicted in Fig. 1. These results show empirically that the hyperparameters values are highly task-dependent, and that SoTA methods must adjust their hyperparameters on the target task to outperform this simple baseline, which is unrealistic in practice. In contrast, the proposed CLAP is more robust, showing consistent results across all datasets, even in the worst degradation case, as it does not require particular modifications per task.", + "bbox": [ + 75, + 244, + 472, + 593 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/a2c46c28d3f5c753e8d05b3ce3bda45b2739b52899683c25037468ffd7d1d56d.jpg", + "table_caption": [ + "Table 4. Improving Linear Probing. Using as baseline the proposed ZS-LP configuration detailed in Sec. 4.1, we isolate the effect of removing different parts of the model, while keeping the rest static. Results are averaged across 11 datasets." + ], + "table_footnote": [], + "table_body": "
MethodK=1K=2K=4
ZS-LP61.2864.8867.98
w/o DA57.72(−3.5)↓61.94(−2.9)↓65.41(−2.5)↓
w/o Temp. Scaling (τ)58.33(−2.9)↓59.85(−5.0)↓59.91(−8.0)↓
w/o L2-norm48.67(−12.6)↓55.29(−9.6)↓61.16(−6.8)↓
Rand. Init.30.42(−30.8)↓41.86(−23.0)↓51.69(−16.2)↓
", + "bbox": [ + 81, + 664, + 460, + 750 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Details in Linear Probing matter. As described earlier in Sec. 4.1, LP has been discouraged in the prior literature due to its limited performance in few-shot adaptation [11, 30]. Nevertheless, we argue that this behavior stems from the original way in which LP was introduced in [30], inspired by prior self-supervised learning methods. Indeed, a strategy tailored to contrastive VLMs alleviates the performance drop of LP observed in prior works. In particular, using zero-shot initialization, the same temperature scaling as", + "bbox": [ + 75, + 763, + 468, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "pre-training, and explicit $\\ell_2$ -normalization of the class prototypes, considerably improves the generalization of few-shot adaptation (Tab. 4). This aligns with relevant literature on other topics such as FT [12], which suggests that the adaptation conditions should match the pre-training setting. Also, including other heuristics such as data augmentation (DA), usually omitted in LP [40, 42], is of special relevance.", + "bbox": [ + 496, + 90, + 890, + 196 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Using a few-shot validation set. Cross-Modal adapter [24] uses a validation set composed of $(\\min(K, 4))$ samples to adjust the experimental setting and early stopping. Even though this setting is more appropriate, it still requires an additional number of shots for model selection. Nevertheless, for the sake of fairness, the performance comparison to methods that do not require a validation set should be carried out by training the latter methods using $K + \\min(K, 4)$ shots. When this fair benchmark is established (see Tab. 5), simple ZS-LP excels again as a strong baseline, outperforming more complex methods on the low-shot regime. Only when using a large number of shots ( $K > 8$ ) partial finetuning and ETL methods marginally benefit from validation samples. However, model selection using a validation set increases the computational workload and processing times during adaptation due to its grid search nature.", + "bbox": [ + 496, + 220, + 892, + 464 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/49f9a338f2b7a0091f9a129c24014a786739d9885db789aac22adac6595be69f.jpg", + "table_caption": [ + "Table 5. Using a few-shot validation set. Results for priors works on this setting are obtained from [24]. Average across 11 datasets." + ], + "table_footnote": [], + "table_body": "
MethodK=1K=2K=4K=8K=16
Protocol in [24]: K-shots for train + min(K, 4) for validation
TIP-Adapter [42]63.365.969.072.275.1
CrossModal LP [24]64.167.070.373.076.0
CrossModal Adapter [24]64.467.670.873.475.9
CrossModal PartialFT [24]64.767.270.573.677.1
Ours: using K + min(K, 4) shots for training
ZS-LP64.968.071.473.175.0
CLAP66.169.172.173.575.1
", + "bbox": [ + 501, + 510, + 893, + 633 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Limitations", + "text_level": 1, + "bbox": [ + 500, + 648, + 624, + 666 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we have introduced a Class-Adaptive linear Probe (CLAP) objective, based on an adaptation of the general Augmented Lagrangian method, for efficient adaptation of large vision-language models in realistic scenarios. Despite its superiority, our empirical validation suggests that the benefits of our approach diminish as the number of shots increases, indicating that other strategies might be privileged if the number of adaptation samples is large.", + "bbox": [ + 496, + 675, + 890, + 797 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 500, + 813, + 658, + 830 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work is supported by the National Science and Engineering Research Council of Canada (NSERC) and Fonds de recherche du Québec (FRQNT). We also thank Calcul Quebec and Compute Canada.", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "23688", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 80, + 90, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Dimitri P. Bertsekas. Constrained Optimization and Lagrange Multiplier Methods (Optimization and Neural Computation Series). Athena Scientific, 1 edition, 1996.", + "[2] Ernesto G Birgin, Romulo A Castillo, and José Mario Martínez. Numerical comparison of augmented lagrangian algorithms for nonconvex problems. Computational Optimization and Applications, 31(1):31-55, 2005.", + "[3] Rishi Bommasani et al. On the opportunities and risks of foundation models. ArXiv, 2021.", + "[4] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 - mining discriminative components with random forests. In European Conference on Computer Vision (ECCV), 2014.", + "[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in Neural Information Processing Systems (NeurIPS), 33:1877-1901, 2020.", + "[6] Guangyi Chen, Weiran Yao, Xiangchen Song, Xinyue Li, Yongming Rao, and Kun Zhang. Prompt learning with optimal transport for vision-language models. In International Conference on Learning Representations (ICLR), 2023.", + "[7] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3606-3613, 2014.", + "[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 248-255, 2009.", + "[9] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. International Conference on Learning Representations (ICLR), 2021.", + "[10] Li Fei-Fei, R. Fergus, and P. Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Worskshops (CVPRW), pages 178–178, 2004.", + "[11] Peng Gao, Shijie Geng, Renrui Zhang, Teli Ma, Rongyao Fang, Yongfeng Zhang, Hongsheng Li, and Yu Qiao. Clip-adapter: Better vision-language models with feature adapters. International Journal of Computer Vision (IJCV), 2023.", + "[12] Sachin Goyal, Ananya Kumar, Sankalp Garg, Zico Kolter, and Aditi Raghunathan. Finetune like you pretrain: Improved finetuning of zero-shot vision models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19338-19347, 2023." + ], + "bbox": [ + 78, + 114, + 467, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Changsheng Xu Hantao Yao, Rui Zhang. Visual-language prompt tuning with knowledge-guided context optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023.", + "[14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2016.", + "[15] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. Introducing eurosat: A novel dataset and deep learning benchmark for land use and land cover classification. In IEEE International Geoscience and Remote Sensing Symposium (IGARSS), pages 3606-3613, 2018.", + "[16] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15262-15271, 2019.", + "[17] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadayath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), page 8340-8349, 2021.", + "[18] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning (ICML), pages 4904-4916, 2021.", + "[19] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. In European Conference on Computer Vision (ECCV), pages 709-727, 2022.", + "[20] Muhammad Uzair Khattak, Hanoona Rasheed, Muhammad Maaz, Salman Khan, and Fahad Shahbaz Khan. Maple: Multi-modal prompt learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19113-19122, 2023.", + "[21] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), page 3498–3505, 2012.", + "[22] Ananya Kumar, Aditi Raghunathan, Robbie Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. In International Conference on Learning Representations (ICLR), pages 1-42, 2022.", + "[23] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. In _Conference on Empirical Methods in Natural Language Processing (EMNLP)_, pages 3045-3059, 2021.", + "[24] Zhiqiu Lin, Samuel Yu, Zhiyi Kuang, Deepak Pathak, and Deva Ramanan. Multimodality helps unimodality: Cross-modal few-shot learning with multimodal models. In Pro" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "23689", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023.", + "[25] Bingyuan Liu, Jérôme Rony, Adrian Galdran, Jose Dolz, and Ismail Ben Ayed. Class adaptive network calibration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16070-16079, 2023.", + "[26] S. Maji, J. Kannala, E. Rahtu, M. Blaschko, and A. Vedaldi. Fine-grained visual classification of aircraft. In ArXiv Preprint, 2013.", + "[27] Sachit Menon and Carl Vondrick. Visual classification via description from large language models. In International Conference on Learning Representations (ICLR), pages 1-17, 2023.", + "[28] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In Indian Conference on Computer Vision, Graphics and Image Processing, 2008.", + "[29] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), page 3498-3505, 2012.", + "[30] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning (ICML), pages 8748-8763, 2021.", + "[31] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. Do imagenet classifiers generalize to imagenet? In International Conference on Machine Learning (ICML), pages 5389-5400, 2019.", + "[32] Sara Sangalli, Ertunc Erdil, Andeas Hotker, Olivio F Donati, and Ender Konukoglu. Constrained optimization to train neural networks on critical and under-represented classes. In Advances in Neural Information Processing Systems (NeurIPS), 2021.", + "[33] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. In ArXiv Preprint, 2012.", + "[34] Rohan Taori, Achal Dave, Vaishaal Shankar, Nicholas Carlini, Benjamin Recht, and Ludwig Schmidt. Measuring robustness to natural distribution shifts in image classification. In Advances in Neural Information Processing Systems (NeurIPS), 2020.", + "[35] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In Advances in Neural Information Processing Systems (NeurIPS), 2019.", + "[36] Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo-Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, and Ludwig Schmidt. Robust fine-tuning of zero-shot models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7959-7971, 2022.", + "[37] Jianxiong Xiao, James Hays, Krista A. Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene" + ], + "bbox": [ + 78, + 92, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "recognition from abbey to zoo. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3485-3492, 2010.", + "[38] Yinghui Xing, Qirui Wu, De Cheng, Shizhou Zhang, Guoqiang Liang, Peng Wang, and Yanning Zhang. Dual modality prompt tuning for vision-language pre-trained model. IEEE Transactions on Multimedia, 2023.", + "[39] Hantao Yao, Rui Zhang, and Changsheng Xu. Visual-language prompt tuning with knowledge-guided context optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6757-6767, 2023.", + "[40] Tao Yu, Zhihe Lu, Xin Jin, Zhibo Chen, and Xinchao Wang. Task residual for tuning vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10899-10909, 2023.", + "[41] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18123-18133, 2022.", + "[42] Renrui Zhang, Rongyao Fang, Wei Zhang, Peng Gao, Kunchang Li, Jifeng Dai, Yu Qiao, and Hongsheng Li. Tip-adapter: Training-free clip-adapter for better vision-language modeling. In European Conference on Computer Vision (ECCV), pages 1-19, 2022.", + "[43] Zexuan Zhong, Dan Friedman, and Danqi Chen. Factual probing is [mask]: Learning vs. learning to recall. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5017-5033, 2021.", + "[44] Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 45:4396-4415, 2022.", + "[45] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Conditional prompt learning for vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022.", + "[46] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision (IJCV), 2022.", + "[47] Beier Zhu, Yulei Niu, Yucheng Han, Yue Wu, and Hanwang Zhang. Prompt-aligned gradient for prompt tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15659-15669, 2023." + ], + "bbox": [ + 503, + 92, + 890, + 755 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "23690", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/69aa9b91-03a6-4c14-a53b-96602951c67b_model.json b/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/69aa9b91-03a6-4c14-a53b-96602951c67b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..de717e9a44b0ea4c534e28515589f7f16e390d0d --- /dev/null +++ b/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/69aa9b91-03a6-4c14-a53b-96602951c67b_model.json @@ -0,0 +1,2101 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.096, + 0.131, + 0.877, + 0.156 + ], + "angle": 0, + "content": "A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models" + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.181, + 0.356, + 0.2 + ], + "angle": 0, + "content": "Julio Silva-Rodriguez" + }, + { + "type": "text", + "bbox": [ + 0.396, + 0.181, + 0.506, + 0.199 + ], + "angle": 0, + "content": "Sina Hajimiri" + }, + { + "type": "text", + "bbox": [ + 0.546, + 0.182, + 0.681, + 0.199 + ], + "angle": 0, + "content": "Ismail Ben Ayed" + }, + { + "type": "text", + "bbox": [ + 0.428, + 0.2, + 0.543, + 0.217 + ], + "angle": 0, + "content": "ÉTS Montreal" + }, + { + "type": "text", + "bbox": [ + 0.312, + 0.22, + 0.652, + 0.235 + ], + "angle": 0, + "content": "julio-jose.silva-rodriguez@etsmt1.ca" + }, + { + "type": "text", + "bbox": [ + 0.72, + 0.182, + 0.802, + 0.198 + ], + "angle": 0, + "content": "Jose Dolz" + }, + { + "type": "image_caption", + "bbox": [ + 0.107, + 0.275, + 0.22, + 0.287 + ], + "angle": 0, + "content": "(a) CLIP-Adapter [11]" + }, + { + "type": "image", + "bbox": [ + 0.064, + 0.287, + 0.265, + 0.418 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.307, + 0.275, + 0.423, + 0.287 + ], + "angle": 0, + "content": "(b) TIP-Adapter(f) [42]" + }, + { + "type": "image", + "bbox": [ + 0.265, + 0.287, + 0.468, + 0.418 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.526, + 0.275, + 0.606, + 0.287 + ], + "angle": 0, + "content": "(c) TaskRes [40]" + }, + { + "type": "image", + "bbox": [ + 0.468, + 0.287, + 0.668, + 0.418 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.728, + 0.275, + 0.811, + 0.287 + ], + "angle": 0, + "content": "(d) CLAP (Ours)" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.287, + 0.898, + 0.418 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.429, + 0.893, + 0.471 + ], + "angle": 0, + "content": "Figure 1. Pitfalls of few-shot adapters due to the absence of a model selection strategy. The cross-shift model selection matrices \\((i,j)\\) depict the relative improvement w.r.t. a zero-shot initialized Linear Probing when using the optimal hyperparameters for the dataset \\(i\\) (rows), for adapting in another task \\(j\\) (columns), for each SoTA method (first three plots) and our approach (last plot)." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.484, + 0.314, + 0.499 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.524, + 0.473, + 0.902 + ], + "angle": 0, + "content": "Efficient transfer learning (ETL) is receiving increasing attention to adapt large pre-trained language-vision models on downstream tasks with a few labeled samples. While significant progress has been made, we reveal that state-of-the-art ETL approaches exhibit strong performance only in narrowly-defined experimental setups, and with a careful adjustment of hyperparameters based on a large corpus of labeled samples. In particular, we make two interesting, and surprising empirical observations. First, to outperform a simple Linear Probing baseline, these methods require to optimize their hyper-parameters on each target task. And second, they typically underperform -sometimes dramatically-standard zero-shot predictions in the presence of distributional drifts. Motivated by the unrealistic assumptions made in the existing literature, i.e., access to a large validation set and case-specific grid-search for optimal hyperparameters, we propose a novel approach that meets the requirements of real-world scenarios. More concretely, we introduce a Class-Adaptive linear Probe (CLAP) objective, whose balancing term is optimized via an adaptation of the general Augmented Lagrangian method tailored to this context. We comprehensively evaluate CLAP on a broad span of datasets and scenarios, demonstrating that it consistently outperforms SoTA approaches, while yet being a much more efficient alternative. Code available at" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.485, + 0.811, + 0.5 + ], + "angle": 0, + "content": "https://github.com/jusiro/CLAP." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.528, + 0.633, + 0.544 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.553, + 0.892, + 0.78 + ], + "angle": 0, + "content": "Large vision-language models (VLMs), such as CLIP [30], are reshaping the research landscape with their unprecedented performance. These models undergo training on an extensive dataset consisting of hundreds of millions of image-text pairs, which are leveraged via contrastive learning [30]. Once trained, VLMs offer a remarkable zero-shot performance on a wide span of visual recognition problems thanks to the rich learned representations [27, 30]. Nevertheless, the extensive hardware and data-driven resources that such training demands [3] suggest that these models can only be trained on singular occasions. Furthermore, the large scale of these networks poses important challenges when it comes to adjusting their parameters on small downstream tasks that involve only a few labeled samples, making the full fine-tuning of the entire model impractical." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.781, + 0.893, + 0.902 + ], + "angle": 0, + "content": "An emerging alternative to alleviate this issue consists in fine-tuning VLMs by adding a small set of learnable parameters, whose values are optimized during the adaptation step [11, 19, 42, 45, 46]. These tunable weights can be introduced in the input space as visual [19] or text prompts [45, 46], or added in the form of adapters across the network [11, 40, 42]. While both families of approaches fit within the Efficient Transfer Learning (ETL) literature," + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "23681" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.228 + ], + "angle": 0, + "content": "prompt learning still requires backpropagating the gradients through the entire network. Thus, besides introducing a burden on resource reuse, these methods preclude black-box adaptation, introducing a potential concern about leaking the source data, which is paramount in privacy-oriented applications. In contrast, strategies based on adapters only need gradients on the extra set of parameters, typically in the last layer, avoiding costly fine-tuning processes and data leakage, yet yielding state-of-the-art performance [24, 40]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.23, + 0.473, + 0.488 + ], + "angle": 0, + "content": "Despite the progress observed in adapter-based methods for fine-tuning VLMs under the few-shot learning paradigm, improving the performance on the target task while preserving their generalization capabilities remains still a challenge [46]. We argue that this is likely due to the severe overfitting to the support set samples employed during few-shot adaptation, which significantly deviates the updated class prototypes from the zero-shot prototypes initially provided by the pre-trained model. In fact, popular adapter-based ETL strategies, such as CLIP-Adapter [11] and TIP-Adapter [42], carefully adjust the model-specific hyperparameters, in conjunction with other key hyperparameters related to the learning scheduler, to control the trade-off between initial zero-shot inference and the integration of new information from the support set. Furthermore, recent evidence [24] suggests that these works apparently use the large-scale test set to adjust their hyperparameters." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.491, + 0.473, + 0.748 + ], + "angle": 0, + "content": "A significant limitation becomes evident in that these hyperparameters, when optimized for one specific task, do not exhibit strong generalizability to other tasks, as illustrated in Fig. 1. Indeed, state-of-the-art (SoTA) methods struggle to find a homogeneous configuration that outperforms a simple well-initialized Linear Probing (LP) adaptation. Notably, in a realistic adaptation scenario (Fig. 1), we can observe dramatic performance degradations, up to \\(21\\%\\), compared to this simple baseline. These practices virtually bias the model selection process, as assuming access to a significantly larger set of labeled samples, and adjusting the model hyperparameters in a case-specific manner, is not only unrealistic but also impractical (grid-search must be done for each case). Thus, we argue that if an ETL method's model selection strategy is not solely based on the support samples, the method is incomplete, and impractical for real-world few-shot adaptation problems." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.473, + 0.903 + ], + "angle": 0, + "content": "In this work, we seek to redirect the efforts on few-shot ETL to a more strict, but realistic scenario, in which only the support samples are accessible during training. The absence of an evaluation subset urges novel adapters to include a model selection strategy, robust across a large spectrum of tasks. Interestingly, we empirically observed that a carefully designed Linear Probing (ZS-LP), whose weights are initialized with the zero-shot prototypes from CLIP, is a strong baseline that outperforms more convoluted ETL solutions. To further improve the baseline ZS-LP and opti" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.304 + ], + "angle": 0, + "content": "mize the trade-off between initial zero-shot representations and updated class prototypes on novel tasks, we propose penalizing large deviations from the original zero-shot prototypes during adaptation. The resulting learning objective, however, presents two major issues. First, the penalty included to control the deviation between original and updated prototypes is a scalar value, uniform across all classes, which can detrimentally affect the model's performance in the presence of harder-to-learn classes. Second, the penalty balancing weight must be set using a validation set, which juxtaposes with our validation-free scenario. To address these limitations, we propose CLass-Adaptive linear Probe (CLAP), which is based on an Augmented Lagrangian Multiplier approach. We can summarize our contributions as:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.305, + 0.892, + 0.41 + ], + "angle": 0, + "content": "- We empirically observe that SoTA few-shot ETL adapters require careful adjustment of a set of key hyperparameters for each task, which is unrealistic and impractical in real-world settings. Surprisingly, if a fixed configuration is adopted across tasks, these methods are likely to substantially underperform a simple Linear Probing strategy initialized with the zero-shot prototypes from CLIP." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.411, + 0.893, + 0.682 + ], + "angle": 0, + "content": "- We propose a principled solution to tackle the trade-off between original and updated class prototypes in Linear Probing, which integrates a penalty term to penalize large deviations from zero-shot prototypes. To address the underlying challenges from the resulting constrained optimization problem, we present a modified Augmented Lagrangian Multiplier (ALM) method. This alleviates the need of having to fine-tune the penalty balancing weight, which is learned in the outer iteration of the optimization process. In order to adapt ALM to the presented scenario, two critical choices were made: \\(i\\) Leveraging class prototypes, as well as data augmentation, motivate the use of class-wise multipliers, instead of sample and class-wise multipliers as in the original ALM; \\(ii\\) In the presented scenario, there is no access to a validation set, and the only feedback available is from the support samples. Hence, we only perform one outer-step update, which can avoid potential overfitting on the support set." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.683, + 0.894, + 0.819 + ], + "angle": 0, + "content": "- We provide extensive experiments to assess the performance of CLAP in the proposed scenario, including few-shot adaptation on 11 popular classification benchmarks, domain generalization, comparison to full fine-tuning methods, and ablation studies to validate our choices. As shown in Fig. 1 and in the experimental section, CLAP delivers consistent performance across different tasks with a homogeneous configuration, and largely outperforms SoTA ETL approaches in all scenarios." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.305, + 0.894, + 0.819 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.831, + 0.64, + 0.847 + ], + "angle": 0, + "content": "2. Related work" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Vision-language pre-trained models. The field of machine learning is in the midst of a paradigm shift with the emerging rise of vision-language models (VLMs). These" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "23682" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.272 + ], + "angle": 0, + "content": "networks have gained increasing popularity, especially fueled by the significant improvements achieved in computer vision and natural language processing tasks [5, 18, 30, 41]. The prevailing learning paradigm consists of a dual stream of data, which separately encodes images and their text counterparts, leveraging contrastive learning at a large scale to bridge image and text representations in the latent space. Particularly, models such as CLIP [30] and ALIGN [18] have successfully mitigated the distribution discrepancy between text and images, and have shown tremendous zero-shot capabilities on visual recognition tasks, primarily in the context of classification." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.274, + 0.473, + 0.44 + ], + "angle": 0, + "content": "Full fine-tuning. A body of work proposes fine-tuning the entire VLMs to adapt to a specific task [12, 22, 36]. This strategy, however, presents several drawbacks. Concretely, fine-tuning increases the complexity of the model being optimized, makes the optimization process more time-consuming compared to ETL methods, and requires access to the backbone weights, which does not allow a black-box adaptation. Furthermore, full fine-tuning methods typically tend to overfit when trained on small datasets, requiring a large corpus of labeled data for the target task, which may be impractical in many real-world scenarios." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.442, + 0.473, + 0.775 + ], + "angle": 0, + "content": "Efficient transfer leaning attempts to address these issues by updating a small set of learnable parameters and leveraging a limited amount of annotated samples. Current ETL literature can be categorized into Prompt Learning [20, 38, 39, 45-47] and Adapter-based [11, 40, 42] approaches. Prompt Learning represents a recent advancement in the realm of natural language processing [23, 43], which has been recently adopted with success in VLMs. In these methods, only the text tokens provided to the model are optimized. Nevertheless, these techniques require long training steps due to backpropagating the gradient over the entire network, which juxtaposes with the spirit of efficient adaptation. Furthermore, black-box adaptation is also not possible in prompt learning. Adapter-based methods, in contrast, offer a much lighter alternative as only a small subset of parameters, typically at the latest layers, are adjusted. For example, CLIP-Adapter [11] integrates a two-layer MLP to modify the visual embedding generated by CLIP. In TIP-Adapter [42], the visual prototypes obtained from the few-shot support samples are leveraged to compute the similarity with the visual embedding of the test image, which is later used to modify the CLIP visual embedding." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.79, + 0.218, + 0.807 + ], + "angle": 0, + "content": "3. Preliminaries" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.816, + 0.427, + 0.833 + ], + "angle": 0, + "content": "3.1. Contrastive vision-language pre-training" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.472, + 0.903 + ], + "angle": 0, + "content": "Large-scale VLMs, such as CLIP [30], are trained on large heterogeneous datasets, encouraging image and text representations to correlate in a joint embedding space. Formally, CLIP comprises a vision encoder, \\( f_{\\theta}(\\cdot) \\), and a text encoder," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.184 + ], + "angle": 0, + "content": "\\(f_{\\phi}(\\cdot)\\), each aiming at learning a rich representation of their data points. These points are projected in an \\(\\ell_2\\)-normalized shared embedding space, yielding the corresponding visual \\(\\mathbf{v}\\) and text \\(\\mathbf{t}\\) embeddings. The whole network is optimized to maximize the similarity between the projected embeddings of paired images and texts, using a contrastive loss." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.19, + 0.654, + 0.207 + ], + "angle": 0, + "content": "3.2. Transferability" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.213, + 0.895, + 0.396 + ], + "angle": 0, + "content": "Zero-shot inference. For a particular downstream image classification task, CLIP-based models are able to provide predictions based on the similarity between category prompts, i.e., text descriptions of target classes, and testing images. Given a set of \\( C \\) categories, and an ensemble of \\( N \\) text prompts for each one, \\( \\{\\{T_{n,c}\\}_{n=1}^{N}\\}_{c=1}^{C} \\), a common practice is to obtain a zero-shot prototype for each target category by computing the center of the \\( \\ell_2 \\)-normalized text embeddings for each class, \\( \\mathbf{t}_c = \\frac{1}{N}\\sum_{n=1}^{N}f_\\phi(T_{n,c}) \\). Thus, for a given query image \\( \\mathbf{x} \\), the zero-shot prediction is obtained from the softmax cosine similarity between the vision embedding \\( \\mathbf{v} = f_\\theta(\\mathbf{x}) \\), and category prototypes \\( \\mathbf{t}_c \\):" + }, + { + "type": "equation", + "bbox": [ + 0.603, + 0.412, + 0.892, + 0.451 + ], + "angle": 0, + "content": "\\[\n\\hat {y} _ {c} = \\frac {\\exp \\left(\\mathbf {v} \\cdot \\mathbf {t} _ {c} ^ {\\top} / \\tau\\right)}{\\sum_ {i = 1} ^ {C} \\exp \\left(\\mathbf {v} \\cdot \\mathbf {t} _ {i} ^ {\\top} / \\tau\\right)}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.454, + 0.895, + 0.502 + ], + "angle": 0, + "content": "where \\(\\tau\\) is a temperature parameter learned during the pretraining stage, and \\(\\mathbf{v} \\cdot \\mathbf{t}^{\\top}\\) the dot product operator, which is equivalent to cosine similarity, as vectors are \\(\\ell_{2}\\)-normalized." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.517, + 0.895, + 0.655 + ], + "angle": 0, + "content": "Few-shot learning. This scenario assumes access to limited supervisory information on the downstream tasks, in the form of a few examples for each target category, so-called shots. Formally, we denote a support set, \\( S = \\{(\\mathbf{x}^{(m)},\\mathbf{y}^{(m)})\\}_{m = 1}^{M = K\\times C} \\), composed of \\( K \\) images for each target category, such that \\( K \\) takes a small value, e.g., \\( K\\in \\{1,2,4,8,16\\} \\), and where \\( \\mathbf{y}\\in \\{0,1\\} ^C \\) is the corresponding one-hot label for a given image \\( \\mathbf{x} \\). The objective is to adapt the pre-trained model using this limited support set." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.66, + 0.848, + 0.677 + ], + "angle": 0, + "content": "3.3. Efficient transfer learning with adapters" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.684, + 0.894, + 0.792 + ], + "angle": 0, + "content": "In their general form, ETL methods based on adapters learn a set of transformations over the pre-trained features \\((\\mathbf{v}^{\\prime},\\mathbf{t}^{\\prime} = f_{\\psi}(\\mathbf{v},\\mathbf{t}))\\) , parameterized by the so-called adapter \\(\\psi\\) , which produces softmax scores for the new tasks following Eq. (1). The adapter \\(\\psi\\) can be optimized by minimizing the popular cross-entropy (CE) loss, \\(\\mathcal{H}(\\mathbf{y},\\hat{\\mathbf{y}}) = -\\sum_{c = 1}^{C}y_{c}\\log \\hat{y}_{c}\\) , over the support set samples:" + }, + { + "type": "equation", + "bbox": [ + 0.599, + 0.8, + 0.892, + 0.842 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\psi} \\frac {1}{M} \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right). \\tag {2}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.848, + 0.854, + 0.864 + ], + "angle": 0, + "content": "3.4. Pitfalls of existing few-shot ETL methods" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Recent ETL methods tailored to VLMs focus on enhancing the supervision provided by the support samples with" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "23683" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.426 + ], + "angle": 0, + "content": "priors learned by the VLMs at the task at hand. The pretrained model gathers robust knowledge and is able to align visual and textual concepts. Retaining this prior knowledge can therefore produce more robust adapters, able to generalize beyond the specific bias introduced in the few support samples, to more general concepts. In this context, the zero-shot prototypes from CLIP act as a proxy to initialize the learning procedure into a reliable region. For instance, CLIP-Adapter [11] maintains the zero-shot prototypes based inference as in Eq. (1), but includes a residual multi-layered perceptron to modify visual features, such as \\(\\mathbf{v}' = \\mathbf{v} + \\alpha_{\\mathrm{r}}f_{\\psi}(\\mathbf{v})\\). TIP-Adapter [42] includes an additional complexity layer, by combining the similarity of the zero-shot prototypes with a weighted similarity to the support samples, \\(f_{\\psi}(\\cdot ,\\beta)\\), controlled by the hyperparameter \\(\\beta\\), such that the predicted logits are \\(\\mathbf{l}_c = \\alpha_{\\mathrm{tipA}}f_{\\psi}(\\mathbf{v},\\beta) + \\mathbf{v}\\cdot \\mathbf{t}_c^\\top /\\tau\\). Finally, TaskRes [40] learns a modification of the initial zero-shot prototypes, \\(\\mathbf{w}_{TR}\\), using the support samples. The divergence between the initial and final prototypes is controlled by a residual ratio: \\(\\mathbf{t}' = \\mathbf{t} + \\alpha_{\\mathrm{TR}}\\mathbf{w}_{TR}\\). Nevertheless, these methods lack a model selection strategy to set these hyperparameters (See Supp. Sec. A for details)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.438, + 0.265, + 0.455 + ], + "angle": 0, + "content": "4. Proposed approach" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.462, + 0.314, + 0.479 + ], + "angle": 0, + "content": "4.1. Revisiting Linear Probing" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.486, + 0.47, + 0.577 + ], + "angle": 0, + "content": "The most straightforward approach used to adapt VLMs is Linear Probing [30], which refers to fitting a multiclass logistic regression linear classifier on top of the pre-trained features. Formally, the objective is to learn a set of class-wise prototypes, \\(\\mathbf{w}_c\\), to provide softmax class scores for a given visual embedding \\(\\mathbf{v}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.178, + 0.587, + 0.47, + 0.625 + ], + "angle": 0, + "content": "\\[\n\\hat {y} _ {c} = \\frac {\\exp \\left(\\mathbf {v} \\cdot \\mathbf {w} _ {c} ^ {\\top} / \\tau\\right)}{\\sum_ {i = 1} ^ {C} \\exp \\left(\\mathbf {v} \\cdot \\mathbf {w} _ {i} ^ {\\top} / \\tau\\right)}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.629, + 0.473, + 0.902 + ], + "angle": 0, + "content": "The \\(\\mathbf{w}_{\\mathrm{c}}\\) prototypes can be trained to minimize the cross-entropy loss on the support samples, as in Eq. (2), using standard SGD. Besides, a common practice in ETL is to regularize the trained weights [24, 30, 40] by minimizing its \\(\\ell_{2}\\)-norm with an additional term, weighted by an empirically-optimized non-negative balancing term \\(\\lambda_{wd}\\). Despite its limited performance shown for few-shot adaptation [11, 30], we believe that this requires further exploration, as LP is a lightweight adaptation strategy, especially convenient due to its convexity during optimization. In this work, we present an updated view of Linear Probing. First, the class weights are initialized using the CLIP zero-shot prototypes, as SoTA ETL methods do [11, 40, 42]. Second, we replace the weight decay in the loss function and explicitly perform an \\(\\ell_{2}\\)-normalization of the prototypes after each update, to exactly meet the pre-training scenario during adaptation, inspired by [12]. Similarly, cosine similarity is also scaled with CLIP's pre-trained temperature" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.168 + ], + "angle": 0, + "content": "\\(\\tau\\). Last, we incorporate data augmentation, usually not included in LP. We refer to this updated Linear Probing version for vision-language models as ZS-LP1. Interestingly, ZS-LP serves as a strong baseline (see Tab. 1), which does not require adjusting specific hyperparameters per task." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.177, + 0.757, + 0.193 + ], + "angle": 0, + "content": "4.2. Constrained Linear Probing" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.2, + 0.892, + 0.352 + ], + "angle": 0, + "content": "Albeit a well-initialized Linear Probing offers a strong baseline for efficient transfer learning, the updated prototypes might deviate from the initial regions offering strong generalization. This is especially the case in the few-shot setting, where the few provided support samples might be under-representative and contain specific biases that produce spurious correlations, hence harming the generalization after adaptation [34, 44]. Thus, to retain the strong basis provided by the VLM model, and avoid prototype degradation, we resort to a constrained formulation of the loss in Eq. (2)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.371, + 0.892, + 0.461 + ], + "angle": 0, + "content": "Retaining prior knowledge. A direct form to avoid prototype degradation from zero-shot points is to constrain the cross-entropy minimization to enforce the resulting prototypes to remain close to the initial solution (i.e., initial set of prototypes \\(\\mathcal{T} = [\\mathbf{t}_1,\\dots ,\\mathbf{t}_c]\\)). Specifically, this constrained optimization problem can be defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.591, + 0.473, + 0.892, + 0.514 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\mathcal {W}} \\quad \\frac {1}{M} \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.582, + 0.517, + 0.81, + 0.534 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l l} \\text {s . t .} & \\mathbf {w} _ {c} = \\mathbf {t} _ {c} \\quad \\forall c \\in \\{1, \\dots , C \\}, \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.54, + 0.892, + 0.631 + ], + "angle": 0, + "content": "with \\(\\mathcal{W} = [\\mathbf{w}_1, \\dots, \\mathbf{w}_C]\\) the set of learnable class prototypes. We can approximate the minimum of the constrained problem in Eq. (4) by a penalty-based optimization approach, transforming the above formulation into an unconstrained problem, and using an \\(\\ell_2\\)-penalty between the class prototypes and the set of zero-shot anchors:" + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.642, + 0.892, + 0.696 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\mathcal {W}} \\quad \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) + \\lambda \\sum_ {m = 1} ^ {M} \\sum_ {c = 1} ^ {C} \\left\\| \\mathbf {t} _ {c} - \\mathbf {w} _ {c} ^ {(m)} \\right\\| _ {2} ^ {2}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.697, + 0.892, + 0.775 + ], + "angle": 0, + "content": "where \\(\\lambda \\in \\mathbb{R}_+\\) is a scalar weight controlling the contribution of the corresponding penalty. Note that \\(\\mathbf{w}_c^{(m)}\\) is the optimal class prototype for the support sample \\(m\\) that minimizes the left term. For clarity in the presentation, we have omitted the normalization by the cardinality of each set." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.793, + 0.892, + 0.854 + ], + "angle": 0, + "content": "Sample and class-specific constraints. The associated constrained problem in Eq. (4) is approximated by an unconstrained formulation, which uses a single uniform penalty without considering individual data samples or" + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.863, + 0.892, + 0.901 + ], + "angle": 0, + "content": "1Although the recent work in [24] explores some of these LP improvements, they still resort to a weight-decay regularization of the LP parameters, whose optimum relative weight is found in a few-shot validation set." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "23684" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.198 + ], + "angle": 0, + "content": "classes. Certainly, all samples and categories within a given dataset may indeed present different intrinsic learning challenges. Thus, the problem in Eq. (5) is not solved accurately. A better alternative would consist in integrating multiple penalty weights \\(\\lambda\\), one for each sample and class, producing a set of penalty weights \\(\\Lambda \\in \\mathbb{R}_{+}^{M \\times C}\\). The resulting optimization problem can then be defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.084, + 0.221, + 0.47, + 0.274 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\mathcal {W}} \\quad \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) + \\sum_ {m = 1} ^ {M} \\sum_ {c = 1} ^ {C} \\boldsymbol {\\Lambda} _ {m c} \\left\\| \\mathbf {t} _ {c} - \\mathbf {w} _ {c} ^ {(m)} \\right\\| _ {2} ^ {2}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.276, + 0.47, + 0.382 + ], + "angle": 0, + "content": "Now, from an optimization standpoint, if we suppose that there exists an optimal set of class-prototypes \\(\\mathcal{W}^*\\) for the problem presented in Eq. (4), there also exists \\(\\Lambda^{*}\\in \\mathbb{R}_{+}^{M\\times C}\\) such that \\((\\mathcal{W}^{*},\\Lambda^{*})\\) represents a saddle point of the Lagrangian associated to Eq. (4). In this scenario, \\(\\Lambda^{*}\\) are the Lagrange multipliers of the presented problem, and is intuitive to consider \\(\\Lambda = \\Lambda^{*}\\) as the best choice to solve Eq. (6)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.382, + 0.469, + 0.563 + ], + "angle": 0, + "content": "Nevertheless, using the Lagrange multipliers \\(\\Lambda^{*}\\) as the weights for the penalties in Eq. (6) may not be feasible in practice. In particular, a number of conventional strategies employed to train deep neural networks hinder straightforward minimization. First, the use of mini-batch gradient descent averages the updated prototypes for every single observation into a mean prototype per class, making a sample-wise constraint hard to achieve. Furthermore, performing data augmentation over the support samples may yield distinct penalty weights for the augmented versions, which could be harder or easier to classify than their original counterparts." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.563, + 0.47, + 0.595 + ], + "angle": 0, + "content": "To alleviate the aforementioned challenges, we propose to relax the sample-wise penalties, which results in solving:" + }, + { + "type": "equation", + "bbox": [ + 0.101, + 0.617, + 0.47, + 0.658 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\mathcal {W}} \\quad \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) + \\sum_ {c = 1} ^ {C} \\lambda_ {c} \\left\\| \\mathbf {t} _ {c} - \\mathbf {w} _ {c} \\right\\| _ {2} ^ {2}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.671, + 0.47, + 0.822 + ], + "angle": 0, + "content": "where \\(\\lambda \\in \\mathbb{R}_+^C\\) is a set of \\(C\\) class-wise penalty weights. While the problem complexity has been reduced by removing sample-wise penalty weights, we still need to choose \\(C\\) weights for the class-wise penalties. This poses a challenge in the optimization, particularly for datasets that contain a large number of categories, such as ImageNet [8] (\\(C = 1000\\)), where properly selecting the penalty weights \\(\\lambda \\in \\mathbb{R}_+^C\\) can be a laborious process. Furthermore, choosing these values \"by hand\" juxtaposes with our goal of providing a validation-free solution for ETL." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.832, + 0.468, + 0.849 + ], + "angle": 0, + "content": "4.3. Class Adaptive Constraint for Linear Probing" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.902 + ], + "angle": 0, + "content": "General Augmented Lagrangian. Augmented Lagrangian Multiplier (ALM) methods present an appealing alternative for learning the penalty weights. These popular methods" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.893, + 0.152 + ], + "angle": 0, + "content": "in optimization, which solve a constrained problem by the interplay of penalties and primal-dual steps, present well-known advantages [1, 32]. Formally, we can define a general constrained optimization problem as:" + }, + { + "type": "equation", + "bbox": [ + 0.543, + 0.175, + 0.892, + 0.196 + ], + "angle": 0, + "content": "\\[\n\\min _ {x} \\quad g (x) \\quad \\text {s . t .} \\quad h _ {i} (x) \\leq 0, \\quad i = 1, \\dots , n \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.202, + 0.894, + 0.265 + ], + "angle": 0, + "content": "with \\( g: \\mathbb{R}^d \\to \\mathbb{R} \\) the objective function and \\( h_i: \\mathbb{R}^d \\to \\mathbb{R}, i = 1, \\dots, n \\) the set of constraint functions. This problem is generally tackled by solving a succession of \\( j \\in \\mathbb{N} \\) unconstrained problems, each solved approximately w.r.t \\( x \\):" + }, + { + "type": "equation", + "bbox": [ + 0.519, + 0.282, + 0.892, + 0.322 + ], + "angle": 0, + "content": "\\[\n\\min _ {x, \\lambda} \\quad \\mathcal {L} ^ {(j)} (x) = g (x) + \\sum_ {i = 1} ^ {n} P \\left(h _ {i} (x), \\rho_ {i} ^ {(j)}, \\lambda_ {i} ^ {(j)}\\right), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.326, + 0.893, + 0.453 + ], + "angle": 0, + "content": "with \\(P:\\mathbb{R}\\times \\mathbb{R}_{+ + }\\times \\mathbb{R}_{+ + }\\to \\mathbb{R}\\) a penalty-Lagrangian function, whose derivative w.r.t. its first variable \\(P^{\\prime}(z,\\rho ,\\lambda)\\equiv\\) \\(\\frac{\\partial}{\\partial z} P(z,\\rho ,\\lambda)\\) exists, is positive and continuous for all \\(z\\in \\mathbb{R}\\) and \\((\\rho ,\\lambda)\\in (\\mathbb{R}_{+ + })^{2}\\) . The set of axioms that any penalty function \\(P\\) must satisfy [2] are detailed in Supp. Sec. B. Furthermore, \\(\\pmb {\\rho}^{(j)} = (\\rho_i^{(j)})_{1\\leq i\\leq n}\\in \\mathbb{R}_{+ + }^n\\) and \\(\\pmb{\\lambda}^{(j)} =\\) \\((\\lambda_i^{(j)})_{1\\leq i\\leq n}\\in \\mathbb{R}_{+ + }^n\\) denote the penalty parameters and multipliers associated to the penalty \\(P\\) at the iteration \\(j\\)" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.453, + 0.894, + 0.56 + ], + "angle": 0, + "content": "The ALM can be split into two iterations: outer iterations (indexed by \\( j \\)), where the penalty multipliers \\( \\lambda \\) and the penalty parameters \\( \\rho \\) are updated, and the inner iterations, where \\( \\mathcal{L}^{(j)} \\) (Eq. (9)) is minimized using the previous solution as initialization. In particular, the penalty multipliers \\( \\lambda^{(j)} \\) are updated to the derivative of \\( P \\) w.r.t. to the solution obtained during the last inner step:" + }, + { + "type": "equation", + "bbox": [ + 0.591, + 0.57, + 0.892, + 0.591 + ], + "angle": 0, + "content": "\\[\n\\lambda_ {i} ^ {(j + 1)} = P ^ {\\prime} \\left(h _ {i} (x), \\rho_ {i} ^ {(j)}, \\lambda_ {i} ^ {(j)}\\right). \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.594, + 0.892, + 0.654 + ], + "angle": 0, + "content": "By doing this, the penalty multipliers increase when the constraint is violated, and decrease otherwise. Thus, this strategy enables an adaptive and learnable way for determining the penalty weights." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.654, + 0.892, + 0.716 + ], + "angle": 0, + "content": "Our solution. We propose to use an ALM approach to solve the problem in Eq. (7). In particular, we reformulate this problem integrating a penalty function \\(P\\) parameterized by \\((\\rho, \\lambda) \\in \\mathbb{R}_{++}^{C} \\times \\mathbb{R}_{++}^{C}\\), formally defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.733, + 0.892, + 0.786 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\mathcal {W}, \\boldsymbol {\\lambda}} \\quad \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) + \\sum_ {c = 1} ^ {C} P \\left(\\mathbf {t} _ {c} - \\mathbf {w} _ {c}, \\rho_ {c}, \\lambda_ {c}\\right). \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.788, + 0.894, + 0.848 + ], + "angle": 0, + "content": "Following our realistic validation-free scenario, the only data from which we can obtain feedback during adaptation is the support set \\( S \\). Thus, the penalty multiplier for class \\( c \\) at epoch \\( j + 1 \\) can be defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.531, + 0.865, + 0.892, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\lambda_ {c} ^ {(j + 1)} = \\frac {1}{| \\mathcal {S} |} \\sum_ {(\\mathbf {x}, \\mathbf {y}) \\in \\mathcal {S}} P ^ {\\prime} \\left(\\mathbf {t} _ {c} - \\mathbf {w} _ {c}, \\rho_ {c} ^ {(j)}, \\lambda_ {c} ^ {(j)}\\right). \\tag {12}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "23685" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.078, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "As suggested by prior work [2, 25], we employ the PHR function as penalty \\(P\\), defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.097, + 0.141, + 0.47, + 0.182 + ], + "angle": 0, + "content": "\\[\n\\operatorname {P H R} (z, \\rho , \\lambda) = \\left\\{ \\begin{array}{l l} \\lambda z + \\frac {1}{2} \\rho z ^ {2} & \\text {i f} \\quad \\lambda + \\rho z \\geq 0; \\\\ - \\frac {\\lambda^ {2}}{2 \\rho} & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.187, + 0.471, + 0.509 + ], + "angle": 0, + "content": "Nevertheless, as we empirically found in our experiments (Supp. Sec. C.3), estimating Lagrange multipliers from the support samples might overfit the training data. As we do not have access to additional data points, we follow a simple strategy, consisting in performing only one iteration of the \\(\\lambda\\) update. For a given target task, we rely on text embeddings as an anchor that offers a generalizable representation of concrete concepts along different visual domains. Thus, we consider the zero-shot prototypes \\(\\mathbf{t}_c\\) as the initial approximation of the problem in Eq. (12) (first inner step). Instead of initializing \\(\\lambda\\) randomly, which might hamper the convergence, we compute the penalty weight for a given class as the average of the zero-shot softmax scores for all support samples belonging to that class, such that \\(\\lambda_c^* = \\frac{1}{|\\mathcal{B}_c^+|}\\sum_{i\\in \\mathcal{B}_c^+}\\hat{y}_c^{(i)}\\), with \\(\\mathcal{B}_c^+ = \\{i|i\\in M,y_c^{(i)} = 1\\}\\). Note that these values are obtained by replacing \\(\\mathbf{w}_c\\) with the solution found in the inner step \\((\\mathbf{t}_c)\\) in Eq. (3), which indeed satisfies the constraint \\(\\mathbf{w}_c = \\mathbf{t}_c\\), resulting in a zero penalty. Taking now the derivative w.r.t. \\(z\\) of PHR, it is straightforward to see that the learned value of \\(\\lambda\\) after one iteration is indeed \\(\\lambda_c^*\\)." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.521, + 0.21, + 0.538 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.546, + 0.16, + 0.563 + ], + "angle": 0, + "content": "5.1. Setup" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.569, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Datasets: Few-shot adaptation. We follow prior ETL literature [11, 40, 42] and benchmark all the methods on 11 datasets: Imagenet [8], Caltech101 [10], OxfordPets [29], StanfordCars [21], Flowers102 [28], Food101 [4], FGVCAircraft [26], SUN397 [37], DTD [7], EuroSAT [15], and UCF101 [33]. These cover a diverse set of computer vision classification tasks, from general objects to actions or fine-grained categories in specialized applications. To train the few-shot adapters, we randomly retrieve \\( K \\) shots (\\( K \\in \\{1, 2, 4, 8, 16\\} \\)) for each class. Last, for evaluation, we used the test sets provided in each dataset, with the same data splits as [40, 46]. Domain generalization capabilities. We further assess the model's robustness to domain shifts by following existing ETL works. We used ImageNet as a source domain for adaptation, and its variants as target tasks, which include: ImageNetV2 [31], ImageNet-Sketch [35], ImageNet-A [16], and ImageNet-R [17]. In this scenario, the model only sees a few labeled samples from the source domain, and target data are used exclusively for testing. In addition, we also employ this setting to motivate the use of efficient adapters vs fine-tuning the entire VLM [12, 22, 40]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.409 + ], + "angle": 0, + "content": "Implementation details. All experiments are based on CLIP [30] pre-trained features, using different backbones: ResNet-50 [14] and ViT-B/16 [9] (results for other backbones in Supp. Sec. C.2). We resort to ResNet-50 as backbone in the ablation studies. For each downstream task we first extract all pre-trained features of the support shots and then run adaptation experiments over those. Data augmentation is applied during the feature extraction stage using random zoom, crops, and flips, following [40, 45]. The number of augmentations per support sample is set to 20. We used the same text prompts per dataset as in [40, 46]. Following our claim that using a validation set on few-shot adaptation is unrealistic, we trained ZS-LP and CLAP using the same configuration for all datasets, number of shots, and visual backbones. Concretely, we optimize the adapter for 300 epochs, using SGD optimizer with Momentum of 0.9. We use a relatively large initial learning rate of 0.1 to avoid underfitting on the support set, whose value decreases during training following a cosine decay scheduler. We ran all experiments with three different random seeds, and the results were averaged across runs." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.41, + 0.893, + 0.668 + ], + "angle": 0, + "content": "Baselines and adaptation protocol. We selected adapter-based methods as our main competitors based on the similarity to our approach, including Clip-Adapter [11], TIP-Adapter [42], TaskRes [40], and Cross-Modal [24]. It is important to highlight that prior works [11, 40, 42] apparently leverage either the extensive test set, or an independent additional validation subset, to adjust important hyperparameters for few-shot adaptation, such as the learning rate, training epochs, and particular parameters that control each method [24]. Nevertheless, as we exposed in Fig. 1, their performance dramatically decreases when the set of hyperparameters is not adjusted for the testing scenario. To adhere to real-world requirements, we define a strict few-shot adaptation protocol, in which no validation or test samples are available to find the best case-specific configuration for each method, and hyperparameters remain fixed across tasks (details in Supp. Sec. A.4)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.68, + 0.593, + 0.695 + ], + "angle": 0, + "content": "5.2. Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Efficient transfer learning. We report in Tab. 1 the performance of adapter-based approaches averaged across 11 datasets, in the more realistic and practical validation-free experimental setting. Furthermore, for prompt-learning-based approaches, we include the results reported in prior literature, for a more comprehensive comparison. From these values, we can make interesting observations. First, a well-initialized Linear Probe, i.e., using the CLIP zero-shot weights, does not show the performance degradation discussed in prior works, and it is indeed a competitive alternative to SoTA approaches. Second, and more surprisingly, more complex approaches such as CLIP-Adapter, or TIP-Adapter, show a significant decline in performance com" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.946, + 0.521, + 0.957 + ], + "angle": 0, + "content": "23686" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.304 + ], + "angle": 0, + "content": "pared to their original results when no validation set is available for model selection. Interestingly, TaskRes(e), which is some sort of two-stage zero-shot initialization Linear Probing with an updated text projection, also offers robust performance. Nevertheless, the absence of a detailed explanation of how the enhanced version is obtained in the original work hampers fair comparisons. Third, constraining the weights update to remain close to the zero-shot knowledge (CLAP) shows consistent improvements across different shots, especially in the very low data regime. This suggests that retaining the previous base knowledge from VLMs is important to avoid diverging because of unrepresentative shots during adaptation. Results per dataset are detailed in Supp. Fig. 8 and Supp. Tab. 9." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.315, + 0.472, + 0.399 + ], + "angle": 0, + "content": "Table 1. Comparison to state-of-the-art methods for few-shot adaptation of CLIP-based models, using ResNet-50 backbone. ETL methods are trained under the same protocol, i.e., absence of a validation set and using a fixed configuration across datasets, and results are averaged across 11 datasets. Prompt-learning methods results are directly extracted from [6, 13]. Best results in bold." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.403, + 0.462, + 0.601 + ], + "angle": 0, + "content": "
MethodK=1K=2K=4K=8K=16
Prompt-learning methods
CoOp ICV'22[46]59.5661.7866.4769.8573.33
ProGrad ICCV'23[13]62.6164.9068.4571.4174.28
PLOT ICLR'23[6]62.5965.2368.6071.2373.94
Efficient transfer learning - a.k.a Adapters
Zero-Shot ICML'21[30]57.7157.7157.7157.7157.71
Rand. Init LP ICML'21[30]30.4241.8651.6960.8467.54
CLIP-Adapter ICV'23[11]58.4362.4666.1869.8773.35
TIP-Adapter ECCV'22[42]58.8660.3361.4963.1564.61
TIP-Adapter(f) ECCV'22[42]60.2962.2665.3268.3571.40
CrossModal-LPCVPR'23[24]62.2464.4866.6770.3673.65
TaskRes(e)CVP'23[40]61.4465.2668.3571.6674.42
ZS-LP61.2864.8867.9871.4374.37
CLAP62.7966.0769.1372.0874.57
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.614, + 0.473, + 0.902 + ], + "angle": 0, + "content": "Domain generalization. If adaptation is not carefully conducted, the resulting model might distort the pre-trained knowledge and underperform when new data with domain drifts is involved [22], even below the zero-shot (no adaptation) performance. Thus, evaluating the robustness of novel adapters under this scenario of domain generalization is of special interest. To do so, adapters are optimized on ImageNet using 16 shots per class, and directly evaluated on ImageNet variants. In this setting, we also assume the absence of a validation dataset, and hence all adapters are trained until convergence, using the same configuration across backbones. A summary of the results is reported in Tab. 2, while specific numbers across datasets and additional backbones are included in Supp. Tab. 10. From these experiments, we make two striking observations. First, ZS-LP is a strong baseline compared to other more complex adapters on the source domain. Even more remarkably, prior SoTA adapters, such as CLIP-Adapter or TIP-Adapter, fail to generalize to unseen domains. In" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.198 + ], + "angle": 0, + "content": "deed, when using recent vision transformers, which are overtaking convolutional neural networks, none of existing adapters-based approaches outperform standard zero-shot prediction in the presence of distributional drifts. In contrast, CLAP yields the best in-distribution performance and also shows consistent improvements under domain shifts across all backbones." + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.208, + 0.895, + 0.265 + ], + "angle": 0, + "content": "Table 2. Robustness to domain shifts. Adapters are adjusted on ImageNet and evaluated at out-of-distribution generalization on 4 ImageNet shifts. Bold indicates best performance. Differences with respect to no adaptation (a.k.a zero-shot) are highlighted." + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.268, + 0.885, + 0.466 + ], + "angle": 0, + "content": "
MethodSource (Imagenet)Target (Average)
ResNet-50Zero-Shot ICML'21[30]60.3540.61
Rand. Init LP ICML'21[30]52.24(−8.11)↓24.61(−16.00)↓
CLIP-Adapter IJCV'23[11]59.02(−1.33)↓31.21(−9.40)↓
TIP-Adapter ECCV'22[42]57.81(−2.54)↓40.69(+0.08)↑
TIP-Adapter(f) ECCV'22[42]62.27(+1.92)↑41.36(+0.75)↑
TaskRes(e) CVPR'23[40]60.85(+0.50)↑41.28(+0.67)↑
ZS-LP61.00(+0.65)↑36.58(−4.03)↓
CLAP65.02(+4.67)↑42.91(+2.30)↑
ViT-B/16Zero-Shot ICML'21[30]68.7157.17
Rand. Init LP ICML'21[30]62.95(−5.76)↓40.41(−16.76)↓
CLIP-Adapter IJCV'23[11]68.46(−0.25)↓50.72(−6.45)↓
TIP-Adapter ECCV'22[42]53.81(−14.90)↓41.55(−15.62)↓
TIP-Adapter(f) ECCV'22[42]51.71(−17.00)↓35.58(−21.6)↓
TaskRes(e) CVPR'23[40]70.84(+2.13)↑55.35(−1.82)↓
ZS-LP69.73(+1.02)↑53.65(−3.52)↓
CLAP73.38(+4.67)↑60.04(+2.87)↑
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.478, + 0.892, + 0.574 + ], + "angle": 0, + "content": "Table 3. Fine-tuning (FT) vs. efficient transfer learning (ETL). A benchmark for the low data regime, i.e., 8 shots for each class. For the sake of fairness, FT methods (above the dashed line) are trained with 4 shots and early-stopped using a validation set containing 4 shots. On the other hand, ETL methods (below the dashed line) are trained using 8 shots and rely solely on the support set. All methods use ViT-B/16 as CLIP backbone." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.579, + 0.885, + 0.699 + ], + "angle": 0, + "content": "
MethodSource ImagenetTarget
-V2-Sketch-A-RAvg.
Fine-tuning (FT)69.8862.4447.0747.5276.0858.28
LP-FTICLR'23 [22]71.2964.0448.5049.4977.6359.92
WiSEcvPR'22 [36]71.1763.8149.3850.5978.5660.59
FLYPcvPR'23 [12]71.5164.5949.5051.3278.5260.98
Zero-Shot68.7160.7646.1847.7673.9857.17
Rand. Init LP56.5847.1725.8227.0347.0536.77
ZS-LP68.4960.0742.7742.3971.7354.24
CLAP71.7564.0647.6648.4076.7059.21
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.51, + 0.699, + 0.876, + 0.71 + ], + "angle": 0, + "content": "*Specific numbers for FT, LP-FT, WiSE-FT, and FLYP are retrieved from [12]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Is it worth optimizing the entire model? We now compare CLAP to end-to-end full fine-tuning (FT) approaches: LP-FT [22], WiSE-FT [36], and FLYP [12]. The former two methods require a validation set for early stopping, and the latter two use it for both early stopping and tuning the mixing coefficient hyperparameter \\(\\alpha\\). Therefore, for a \\(K\\)-shot problem, these methods actually require \\(2K\\) shots for each class, \\(K\\) for training, and \\(K\\) for validation. As the balancing penalty term in CLAP is optimized with the support set, and does not require a validation set, a fair comparison would be to evaluate the \\(K\\)-shot performance of fine-tuning methods against our method's \\(2K\\)-shot results. Thus, Tab. 3 in" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "23687" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.213 + ], + "angle": 0, + "content": "cludes the performance of all the models when 8 labeled images are available for each class overall. Analyzing the results, we can conclude that in the low data regime, full finetuning is not necessarily superior to ETL when compared properly. More specifically, our approach outperforms finetuning methods in in-distribution performance and performs reasonably well on OOD datasets, while having a fraction of the estimizable parameters of fine-tuning methods." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.222, + 0.281, + 0.238 + ], + "angle": 0, + "content": "5.3. Ablation experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.245, + 0.473, + 0.594 + ], + "angle": 0, + "content": "On the need for model selection strategies. Relevant methods (e.g., CLIP-Adapter [11], TIP-Adapter [42], or TaskRes [40]) include different hyperparameters that directly control their performance. Nevertheless, these methods are incomplete, since they do not include any strategy for adjusting these parameters, typically referred to as model selection. In contrast, and as previously stressed, there is evidence that these works use a large evaluation subset to adapt their settings to each scenario [24]. To investigate this observation, we evaluate these methods in cross-dataset model selection experiments. The best hyperparameters values for a task (i.e., dataset), which are found in an Oracle scenario using the entire test subset, are used during adaptation to another dataset. The matrices showing the relative improvements over a zero-shot initialized Linear Probing (ZS-LP) are depicted in Fig. 1. These results show empirically that the hyperparameters values are highly task-dependent, and that SoTA methods must adjust their hyperparameters on the target task to outperform this simple baseline, which is unrealistic in practice. In contrast, the proposed CLAP is more robust, showing consistent results across all datasets, even in the worst degradation case, as it does not require particular modifications per task." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.606, + 0.47, + 0.661 + ], + "angle": 0, + "content": "Table 4. Improving Linear Probing. Using as baseline the proposed ZS-LP configuration detailed in Sec. 4.1, we isolate the effect of removing different parts of the model, while keeping the rest static. Results are averaged across 11 datasets." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.665, + 0.462, + 0.75 + ], + "angle": 0, + "content": "
MethodK=1K=2K=4
ZS-LP61.2864.8867.98
w/o DA57.72(−3.5)↓61.94(−2.9)↓65.41(−2.5)↓
w/o Temp. Scaling (τ)58.33(−2.9)↓59.85(−5.0)↓59.91(−8.0)↓
w/o L2-norm48.67(−12.6)↓55.29(−9.6)↓61.16(−6.8)↓
Rand. Init.30.42(−30.8)↓41.86(−23.0)↓51.69(−16.2)↓
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Details in Linear Probing matter. As described earlier in Sec. 4.1, LP has been discouraged in the prior literature due to its limited performance in few-shot adaptation [11, 30]. Nevertheless, we argue that this behavior stems from the original way in which LP was introduced in [30], inspired by prior self-supervised learning methods. Indeed, a strategy tailored to contrastive VLMs alleviates the performance drop of LP observed in prior works. In particular, using zero-shot initialization, the same temperature scaling as" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.198 + ], + "angle": 0, + "content": "pre-training, and explicit \\(\\ell_2\\)-normalization of the class prototypes, considerably improves the generalization of few-shot adaptation (Tab. 4). This aligns with relevant literature on other topics such as FT [12], which suggests that the adaptation conditions should match the pre-training setting. Also, including other heuristics such as data augmentation (DA), usually omitted in LP [40, 42], is of special relevance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.222, + 0.893, + 0.465 + ], + "angle": 0, + "content": "Using a few-shot validation set. Cross-Modal adapter [24] uses a validation set composed of \\((\\min(K, 4))\\) samples to adjust the experimental setting and early stopping. Even though this setting is more appropriate, it still requires an additional number of shots for model selection. Nevertheless, for the sake of fairness, the performance comparison to methods that do not require a validation set should be carried out by training the latter methods using \\(K + \\min(K, 4)\\) shots. When this fair benchmark is established (see Tab. 5), simple ZS-LP excels again as a strong baseline, outperforming more complex methods on the low-shot regime. Only when using a large number of shots (\\(K > 8\\)) partial finetuning and ETL methods marginally benefit from validation samples. However, model selection using a validation set increases the computational workload and processing times during adaptation due to its grid search nature." + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.479, + 0.892, + 0.507 + ], + "angle": 0, + "content": "Table 5. Using a few-shot validation set. Results for priors works on this setting are obtained from [24]. Average across 11 datasets." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.511, + 0.895, + 0.634 + ], + "angle": 0, + "content": "
MethodK=1K=2K=4K=8K=16
Protocol in [24]: K-shots for train + min(K, 4) for validation
TIP-Adapter [42]63.365.969.072.275.1
CrossModal LP [24]64.167.070.373.076.0
CrossModal Adapter [24]64.467.670.873.475.9
CrossModal PartialFT [24]64.767.270.573.677.1
Ours: using K + min(K, 4) shots for training
ZS-LP64.968.071.473.175.0
CLAP66.169.172.173.575.1
" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.65, + 0.625, + 0.667 + ], + "angle": 0, + "content": "6. Limitations" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.676, + 0.892, + 0.798 + ], + "angle": 0, + "content": "In this work, we have introduced a Class-Adaptive linear Probe (CLAP) objective, based on an adaptation of the general Augmented Lagrangian method, for efficient adaptation of large vision-language models in realistic scenarios. Despite its superiority, our empirical validation suggests that the benefits of our approach diminish as the number of shots increases, indicating that other strategies might be privileged if the number of adaptation samples is large." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.814, + 0.66, + 0.831 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "This work is supported by the National Science and Engineering Research Council of Canada (NSERC) and Fonds de recherche du Québec (FRQNT). We also thank Calcul Quebec and Compute Canada." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "23688" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.081, + 0.091, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.468, + 0.156 + ], + "angle": 0, + "content": "[1] Dimitri P. Bertsekas. Constrained Optimization and Lagrange Multiplier Methods (Optimization and Neural Computation Series). Athena Scientific, 1 edition, 1996." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.158, + 0.468, + 0.212 + ], + "angle": 0, + "content": "[2] Ernesto G Birgin, Romulo A Castillo, and José Mario Martínez. Numerical comparison of augmented lagrangian algorithms for nonconvex problems. Computational Optimization and Applications, 31(1):31-55, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.215, + 0.468, + 0.24 + ], + "angle": 0, + "content": "[3] Rishi Bommasani et al. On the opportunities and risks of foundation models. ArXiv, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.243, + 0.468, + 0.297 + ], + "angle": 0, + "content": "[4] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 - mining discriminative components with random forests. In European Conference on Computer Vision (ECCV), 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.3, + 0.468, + 0.38 + ], + "angle": 0, + "content": "[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in Neural Information Processing Systems (NeurIPS), 33:1877-1901, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.383, + 0.468, + 0.437 + ], + "angle": 0, + "content": "[6] Guangyi Chen, Weiran Yao, Xiangchen Song, Xinyue Li, Yongming Rao, and Kun Zhang. Prompt learning with optimal transport for vision-language models. In International Conference on Learning Representations (ICLR), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.44, + 0.468, + 0.507 + ], + "angle": 0, + "content": "[7] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3606-3613, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.51, + 0.468, + 0.577 + ], + "angle": 0, + "content": "[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 248-255, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.581, + 0.468, + 0.675 + ], + "angle": 0, + "content": "[9] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. International Conference on Learning Representations (ICLR), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.678, + 0.468, + 0.759 + ], + "angle": 0, + "content": "[10] Li Fei-Fei, R. Fergus, and P. Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Worskshops (CVPRW), pages 178–178, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.762, + 0.468, + 0.829 + ], + "angle": 0, + "content": "[11] Peng Gao, Shijie Geng, Renrui Zhang, Teli Ma, Rongyao Fang, Yongfeng Zhang, Hongsheng Li, and Yu Qiao. Clip-adapter: Better vision-language models with feature adapters. International Journal of Computer Vision (IJCV), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.468, + 0.9 + ], + "angle": 0, + "content": "[12] Sachin Goyal, Ananya Kumar, Sankalp Garg, Zico Kolter, and Aditi Raghunathan. Finetune like you pretrain: Improved finetuning of zero-shot vision models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19338-19347, 2023." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.468, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[13] Changsheng Xu Hantao Yao, Rui Zhang. Visual-language prompt tuning with knowledge-guided context optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.151, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.208, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[15] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. Introducing eurosat: A novel dataset and deep learning benchmark for land use and land cover classification. In IEEE International Geoscience and Remote Sensing Symposium (IGARSS), pages 3606-3613, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.279, + 0.892, + 0.345 + ], + "angle": 0, + "content": "[16] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15262-15271, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.349, + 0.892, + 0.445 + ], + "angle": 0, + "content": "[17] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadayath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), page 8340-8349, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.448, + 0.892, + 0.529 + ], + "angle": 0, + "content": "[18] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning (ICML), pages 4904-4916, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.533, + 0.892, + 0.587 + ], + "angle": 0, + "content": "[19] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. In European Conference on Computer Vision (ECCV), pages 709-727, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.59, + 0.892, + 0.657 + ], + "angle": 0, + "content": "[20] Muhammad Uzair Khattak, Hanoona Rasheed, Muhammad Maaz, Salman Khan, and Fahad Shahbaz Khan. Maple: Multi-modal prompt learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19113-19122, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.661, + 0.892, + 0.728 + ], + "angle": 0, + "content": "[21] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), page 3498–3505, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.732, + 0.892, + 0.798 + ], + "angle": 0, + "content": "[22] Ananya Kumar, Aditi Raghunathan, Robbie Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. In International Conference on Learning Representations (ICLR), pages 1-42, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.803, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[23] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. In _Conference on Empirical Methods in Natural Language Processing (EMNLP)_, pages 3045-3059, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.86, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[24] Zhiqiu Lin, Samuel Yu, Zhiyi Kuang, Deepak Pathak, and Deva Ramanan. Multimodality helps unimodality: Cross-modal few-shot learning with multimodal models. In Pro" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "23689" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.093, + 0.469, + 0.12 + ], + "angle": 0, + "content": "ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.177 + ], + "angle": 0, + "content": "[25] Bingyuan Liu, Jérôme Rony, Adrian Galdran, Jose Dolz, and Ismail Ben Ayed. Class adaptive network calibration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16070-16079, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.469, + 0.22 + ], + "angle": 0, + "content": "[26] S. Maji, J. Kannala, E. Rahtu, M. Blaschko, and A. Vedaldi. Fine-grained visual classification of aircraft. In ArXiv Preprint, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.222, + 0.469, + 0.276 + ], + "angle": 0, + "content": "[27] Sachit Menon and Carl Vondrick. Visual classification via description from large language models. In International Conference on Learning Representations (ICLR), pages 1-17, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.279, + 0.469, + 0.333 + ], + "angle": 0, + "content": "[28] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In Indian Conference on Computer Vision, Graphics and Image Processing, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.336, + 0.469, + 0.39 + ], + "angle": 0, + "content": "[29] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), page 3498-3505, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.393, + 0.469, + 0.475 + ], + "angle": 0, + "content": "[30] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning (ICML), pages 8748-8763, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.478, + 0.469, + 0.532 + ], + "angle": 0, + "content": "[31] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. Do imagenet classifiers generalize to imagenet? In International Conference on Machine Learning (ICML), pages 5389-5400, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.534, + 0.469, + 0.602 + ], + "angle": 0, + "content": "[32] Sara Sangalli, Ertunc Erdil, Andeas Hotker, Olivio F Donati, and Ender Konukoglu. Constrained optimization to train neural networks on critical and under-represented classes. In Advances in Neural Information Processing Systems (NeurIPS), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.605, + 0.469, + 0.646 + ], + "angle": 0, + "content": "[33] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. In ArXiv Preprint, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.648, + 0.469, + 0.716 + ], + "angle": 0, + "content": "[34] Rohan Taori, Achal Dave, Vaishaal Shankar, Nicholas Carlini, Benjamin Recht, and Ludwig Schmidt. Measuring robustness to natural distribution shifts in image classification. In Advances in Neural Information Processing Systems (NeurIPS), 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.719, + 0.469, + 0.773 + ], + "angle": 0, + "content": "[35] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In Advances in Neural Information Processing Systems (NeurIPS), 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[36] Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo-Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, and Ludwig Schmidt. Robust fine-tuning of zero-shot models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7959-7971, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.874, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[37] Jianxiong Xiao, James Hays, Krista A. Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.135 + ], + "angle": 0, + "content": "recognition from abbey to zoo. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3485-3492, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[38] Yinghui Xing, Qirui Wu, De Cheng, Shizhou Zhang, Guoqiang Liang, Peng Wang, and Yanning Zhang. Dual modality prompt tuning for vision-language pre-trained model. IEEE Transactions on Multimedia, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.193, + 0.892, + 0.26 + ], + "angle": 0, + "content": "[39] Hantao Yao, Rui Zhang, and Changsheng Xu. Visual-language prompt tuning with knowledge-guided context optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6757-6767, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.263, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[40] Tao Yu, Zhihe Lu, Xin Jin, Zhibo Chen, and Xinchao Wang. Task residual for tuning vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10899-10909, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.32, + 0.892, + 0.4 + ], + "angle": 0, + "content": "[41] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18123-18133, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.404, + 0.892, + 0.472 + ], + "angle": 0, + "content": "[42] Renrui Zhang, Rongyao Fang, Wei Zhang, Peng Gao, Kunchang Li, Jifeng Dai, Yu Qiao, and Hongsheng Li. Tip-adapter: Training-free clip-adapter for better vision-language modeling. In European Conference on Computer Vision (ECCV), pages 1-19, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.475, + 0.892, + 0.543 + ], + "angle": 0, + "content": "[43] Zexuan Zhong, Dan Friedman, and Danqi Chen. Factual probing is [mask]: Learning vs. learning to recall. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5017-5033, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.545, + 0.892, + 0.599 + ], + "angle": 0, + "content": "[44] Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 45:4396-4415, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.602, + 0.892, + 0.657 + ], + "angle": 0, + "content": "[45] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Conditional prompt learning for vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.659, + 0.892, + 0.699 + ], + "angle": 0, + "content": "[46] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision (IJCV), 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.701, + 0.892, + 0.756 + ], + "angle": 0, + "content": "[47] Beier Zhu, Yulei Niu, Yucheng Han, Yue Wu, and Hanwang Zhang. Prompt-aligned gradient for prompt tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15659-15669, 2023." + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.756 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "23690" + } + ] +] \ No newline at end of file diff --git a/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/69aa9b91-03a6-4c14-a53b-96602951c67b_origin.pdf b/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/69aa9b91-03a6-4c14-a53b-96602951c67b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d88564efad3d2edf5fcb7e425cdb089decd45e00 --- /dev/null +++ b/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/69aa9b91-03a6-4c14-a53b-96602951c67b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f203abd9a45d1871a045abf3972ed70a33afbec6f4a8b27c5a7e6edecfd83892 +size 1545744 diff --git a/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/full.md b/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/full.md new file mode 100644 index 0000000000000000000000000000000000000000..c1e7fd1e13a96005c66a0c5ada3f2818235423c0 --- /dev/null +++ b/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/full.md @@ -0,0 +1,317 @@ +# A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models + +Julio Silva-Rodriguez + +Sina Hajimiri + +Ismail Ben Ayed + +ÉTS Montreal + +julio-jose.silva-rodriguez@etsmt1.ca + +Jose Dolz + +![](images/82130ed8dda85ff2f0317020867ca340011005508c804a9c8616604948ccad77.jpg) +(a) CLIP-Adapter [11] +Figure 1. Pitfalls of few-shot adapters due to the absence of a model selection strategy. The cross-shift model selection matrices $(i,j)$ depict the relative improvement w.r.t. a zero-shot initialized Linear Probing when using the optimal hyperparameters for the dataset $i$ (rows), for adapting in another task $j$ (columns), for each SoTA method (first three plots) and our approach (last plot). + +![](images/c75b44f3de5192e7ec83212cf6ff6a9895b8fc7bf3d5600d20371e35bbd6cdb7.jpg) +(b) TIP-Adapter(f) [42] + +![](images/8dbbc2575cf75c7536dfa70a8a994c4f1a2b0d453b2e52f716a7a85f50678148.jpg) +(c) TaskRes [40] + +![](images/a61f88c5dda613b069ca788c6d83166a342f21da1bd87aa1d8d1b6b168bb7c3d.jpg) +(d) CLAP (Ours) + +# Abstract + +Efficient transfer learning (ETL) is receiving increasing attention to adapt large pre-trained language-vision models on downstream tasks with a few labeled samples. While significant progress has been made, we reveal that state-of-the-art ETL approaches exhibit strong performance only in narrowly-defined experimental setups, and with a careful adjustment of hyperparameters based on a large corpus of labeled samples. In particular, we make two interesting, and surprising empirical observations. First, to outperform a simple Linear Probing baseline, these methods require to optimize their hyper-parameters on each target task. And second, they typically underperform -sometimes dramatically-standard zero-shot predictions in the presence of distributional drifts. Motivated by the unrealistic assumptions made in the existing literature, i.e., access to a large validation set and case-specific grid-search for optimal hyperparameters, we propose a novel approach that meets the requirements of real-world scenarios. More concretely, we introduce a Class-Adaptive linear Probe (CLAP) objective, whose balancing term is optimized via an adaptation of the general Augmented Lagrangian method tailored to this context. We comprehensively evaluate CLAP on a broad span of datasets and scenarios, demonstrating that it consistently outperforms SoTA approaches, while yet being a much more efficient alternative. Code available at + +https://github.com/jusiro/CLAP. + +# 1. Introduction + +Large vision-language models (VLMs), such as CLIP [30], are reshaping the research landscape with their unprecedented performance. These models undergo training on an extensive dataset consisting of hundreds of millions of image-text pairs, which are leveraged via contrastive learning [30]. Once trained, VLMs offer a remarkable zero-shot performance on a wide span of visual recognition problems thanks to the rich learned representations [27, 30]. Nevertheless, the extensive hardware and data-driven resources that such training demands [3] suggest that these models can only be trained on singular occasions. Furthermore, the large scale of these networks poses important challenges when it comes to adjusting their parameters on small downstream tasks that involve only a few labeled samples, making the full fine-tuning of the entire model impractical. + +An emerging alternative to alleviate this issue consists in fine-tuning VLMs by adding a small set of learnable parameters, whose values are optimized during the adaptation step [11, 19, 42, 45, 46]. These tunable weights can be introduced in the input space as visual [19] or text prompts [45, 46], or added in the form of adapters across the network [11, 40, 42]. While both families of approaches fit within the Efficient Transfer Learning (ETL) literature, + +prompt learning still requires backpropagating the gradients through the entire network. Thus, besides introducing a burden on resource reuse, these methods preclude black-box adaptation, introducing a potential concern about leaking the source data, which is paramount in privacy-oriented applications. In contrast, strategies based on adapters only need gradients on the extra set of parameters, typically in the last layer, avoiding costly fine-tuning processes and data leakage, yet yielding state-of-the-art performance [24, 40]. + +Despite the progress observed in adapter-based methods for fine-tuning VLMs under the few-shot learning paradigm, improving the performance on the target task while preserving their generalization capabilities remains still a challenge [46]. We argue that this is likely due to the severe overfitting to the support set samples employed during few-shot adaptation, which significantly deviates the updated class prototypes from the zero-shot prototypes initially provided by the pre-trained model. In fact, popular adapter-based ETL strategies, such as CLIP-Adapter [11] and TIP-Adapter [42], carefully adjust the model-specific hyperparameters, in conjunction with other key hyperparameters related to the learning scheduler, to control the trade-off between initial zero-shot inference and the integration of new information from the support set. Furthermore, recent evidence [24] suggests that these works apparently use the large-scale test set to adjust their hyperparameters. + +A significant limitation becomes evident in that these hyperparameters, when optimized for one specific task, do not exhibit strong generalizability to other tasks, as illustrated in Fig. 1. Indeed, state-of-the-art (SoTA) methods struggle to find a homogeneous configuration that outperforms a simple well-initialized Linear Probing (LP) adaptation. Notably, in a realistic adaptation scenario (Fig. 1), we can observe dramatic performance degradations, up to $21\%$ , compared to this simple baseline. These practices virtually bias the model selection process, as assuming access to a significantly larger set of labeled samples, and adjusting the model hyperparameters in a case-specific manner, is not only unrealistic but also impractical (grid-search must be done for each case). Thus, we argue that if an ETL method's model selection strategy is not solely based on the support samples, the method is incomplete, and impractical for real-world few-shot adaptation problems. + +In this work, we seek to redirect the efforts on few-shot ETL to a more strict, but realistic scenario, in which only the support samples are accessible during training. The absence of an evaluation subset urges novel adapters to include a model selection strategy, robust across a large spectrum of tasks. Interestingly, we empirically observed that a carefully designed Linear Probing (ZS-LP), whose weights are initialized with the zero-shot prototypes from CLIP, is a strong baseline that outperforms more convoluted ETL solutions. To further improve the baseline ZS-LP and opti + +mize the trade-off between initial zero-shot representations and updated class prototypes on novel tasks, we propose penalizing large deviations from the original zero-shot prototypes during adaptation. The resulting learning objective, however, presents two major issues. First, the penalty included to control the deviation between original and updated prototypes is a scalar value, uniform across all classes, which can detrimentally affect the model's performance in the presence of harder-to-learn classes. Second, the penalty balancing weight must be set using a validation set, which juxtaposes with our validation-free scenario. To address these limitations, we propose CLass-Adaptive linear Probe (CLAP), which is based on an Augmented Lagrangian Multiplier approach. We can summarize our contributions as: + +- We empirically observe that SoTA few-shot ETL adapters require careful adjustment of a set of key hyperparameters for each task, which is unrealistic and impractical in real-world settings. Surprisingly, if a fixed configuration is adopted across tasks, these methods are likely to substantially underperform a simple Linear Probing strategy initialized with the zero-shot prototypes from CLIP. +- We propose a principled solution to tackle the trade-off between original and updated class prototypes in Linear Probing, which integrates a penalty term to penalize large deviations from zero-shot prototypes. To address the underlying challenges from the resulting constrained optimization problem, we present a modified Augmented Lagrangian Multiplier (ALM) method. This alleviates the need of having to fine-tune the penalty balancing weight, which is learned in the outer iteration of the optimization process. In order to adapt ALM to the presented scenario, two critical choices were made: $i$ Leveraging class prototypes, as well as data augmentation, motivate the use of class-wise multipliers, instead of sample and class-wise multipliers as in the original ALM; $ii$ In the presented scenario, there is no access to a validation set, and the only feedback available is from the support samples. Hence, we only perform one outer-step update, which can avoid potential overfitting on the support set. +- We provide extensive experiments to assess the performance of CLAP in the proposed scenario, including few-shot adaptation on 11 popular classification benchmarks, domain generalization, comparison to full fine-tuning methods, and ablation studies to validate our choices. As shown in Fig. 1 and in the experimental section, CLAP delivers consistent performance across different tasks with a homogeneous configuration, and largely outperforms SoTA ETL approaches in all scenarios. + +# 2. Related work + +Vision-language pre-trained models. The field of machine learning is in the midst of a paradigm shift with the emerging rise of vision-language models (VLMs). These + +networks have gained increasing popularity, especially fueled by the significant improvements achieved in computer vision and natural language processing tasks [5, 18, 30, 41]. The prevailing learning paradigm consists of a dual stream of data, which separately encodes images and their text counterparts, leveraging contrastive learning at a large scale to bridge image and text representations in the latent space. Particularly, models such as CLIP [30] and ALIGN [18] have successfully mitigated the distribution discrepancy between text and images, and have shown tremendous zero-shot capabilities on visual recognition tasks, primarily in the context of classification. + +Full fine-tuning. A body of work proposes fine-tuning the entire VLMs to adapt to a specific task [12, 22, 36]. This strategy, however, presents several drawbacks. Concretely, fine-tuning increases the complexity of the model being optimized, makes the optimization process more time-consuming compared to ETL methods, and requires access to the backbone weights, which does not allow a black-box adaptation. Furthermore, full fine-tuning methods typically tend to overfit when trained on small datasets, requiring a large corpus of labeled data for the target task, which may be impractical in many real-world scenarios. + +Efficient transfer leaning attempts to address these issues by updating a small set of learnable parameters and leveraging a limited amount of annotated samples. Current ETL literature can be categorized into Prompt Learning [20, 38, 39, 45-47] and Adapter-based [11, 40, 42] approaches. Prompt Learning represents a recent advancement in the realm of natural language processing [23, 43], which has been recently adopted with success in VLMs. In these methods, only the text tokens provided to the model are optimized. Nevertheless, these techniques require long training steps due to backpropagating the gradient over the entire network, which juxtaposes with the spirit of efficient adaptation. Furthermore, black-box adaptation is also not possible in prompt learning. Adapter-based methods, in contrast, offer a much lighter alternative as only a small subset of parameters, typically at the latest layers, are adjusted. For example, CLIP-Adapter [11] integrates a two-layer MLP to modify the visual embedding generated by CLIP. In TIP-Adapter [42], the visual prototypes obtained from the few-shot support samples are leveraged to compute the similarity with the visual embedding of the test image, which is later used to modify the CLIP visual embedding. + +# 3. Preliminaries + +# 3.1. Contrastive vision-language pre-training + +Large-scale VLMs, such as CLIP [30], are trained on large heterogeneous datasets, encouraging image and text representations to correlate in a joint embedding space. Formally, CLIP comprises a vision encoder, $f_{\theta}(\cdot)$ , and a text encoder, + +$f_{\phi}(\cdot)$ , each aiming at learning a rich representation of their data points. These points are projected in an $\ell_2$ -normalized shared embedding space, yielding the corresponding visual $\mathbf{v}$ and text $\mathbf{t}$ embeddings. The whole network is optimized to maximize the similarity between the projected embeddings of paired images and texts, using a contrastive loss. + +# 3.2. Transferability + +Zero-shot inference. For a particular downstream image classification task, CLIP-based models are able to provide predictions based on the similarity between category prompts, i.e., text descriptions of target classes, and testing images. Given a set of $C$ categories, and an ensemble of $N$ text prompts for each one, $\{\{T_{n,c}\}_{n=1}^{N}\}_{c=1}^{C}$ , a common practice is to obtain a zero-shot prototype for each target category by computing the center of the $\ell_2$ -normalized text embeddings for each class, $\mathbf{t}_c = \frac{1}{N}\sum_{n=1}^{N}f_\phi(T_{n,c})$ . Thus, for a given query image $\mathbf{x}$ , the zero-shot prediction is obtained from the softmax cosine similarity between the vision embedding $\mathbf{v} = f_\theta(\mathbf{x})$ , and category prototypes $\mathbf{t}_c$ : + +$$ +\hat {y} _ {c} = \frac {\exp \left(\mathbf {v} \cdot \mathbf {t} _ {c} ^ {\top} / \tau\right)}{\sum_ {i = 1} ^ {C} \exp \left(\mathbf {v} \cdot \mathbf {t} _ {i} ^ {\top} / \tau\right)}, \tag {1} +$$ + +where $\tau$ is a temperature parameter learned during the pretraining stage, and $\mathbf{v} \cdot \mathbf{t}^{\top}$ the dot product operator, which is equivalent to cosine similarity, as vectors are $\ell_{2}$ -normalized. + +Few-shot learning. This scenario assumes access to limited supervisory information on the downstream tasks, in the form of a few examples for each target category, so-called shots. Formally, we denote a support set, $S = \{(\mathbf{x}^{(m)},\mathbf{y}^{(m)})\}_{m = 1}^{M = K\times C}$ , composed of $K$ images for each target category, such that $K$ takes a small value, e.g., $K\in \{1,2,4,8,16\}$ , and where $\mathbf{y}\in \{0,1\} ^C$ is the corresponding one-hot label for a given image $\mathbf{x}$ . The objective is to adapt the pre-trained model using this limited support set. + +# 3.3. Efficient transfer learning with adapters + +In their general form, ETL methods based on adapters learn a set of transformations over the pre-trained features $(\mathbf{v}^{\prime},\mathbf{t}^{\prime} = f_{\psi}(\mathbf{v},\mathbf{t}))$ , parameterized by the so-called adapter $\psi$ , which produces softmax scores for the new tasks following Eq. (1). The adapter $\psi$ can be optimized by minimizing the popular cross-entropy (CE) loss, $\mathcal{H}(\mathbf{y},\hat{\mathbf{y}}) = -\sum_{c = 1}^{C}y_{c}\log \hat{y}_{c}$ , over the support set samples: + +$$ +\min _ {\psi} \frac {1}{M} \sum_ {m = 1} ^ {M} \mathcal {H} \left(\mathbf {y} ^ {(m)}, \hat {\mathbf {y}} ^ {(m)}\right). \tag {2} +$$ + +# 3.4. Pitfalls of existing few-shot ETL methods + +Recent ETL methods tailored to VLMs focus on enhancing the supervision provided by the support samples with + +priors learned by the VLMs at the task at hand. The pretrained model gathers robust knowledge and is able to align visual and textual concepts. Retaining this prior knowledge can therefore produce more robust adapters, able to generalize beyond the specific bias introduced in the few support samples, to more general concepts. In this context, the zero-shot prototypes from CLIP act as a proxy to initialize the learning procedure into a reliable region. For instance, CLIP-Adapter [11] maintains the zero-shot prototypes based inference as in Eq. (1), but includes a residual multi-layered perceptron to modify visual features, such as $\mathbf{v}' = \mathbf{v} + \alpha_{\mathrm{r}}f_{\psi}(\mathbf{v})$ . TIP-Adapter [42] includes an additional complexity layer, by combining the similarity of the zero-shot prototypes with a weighted similarity to the support samples, $f_{\psi}(\cdot ,\beta)$ , controlled by the hyperparameter $\beta$ , such that the predicted logits are $\mathbf{l}_c = \alpha_{\mathrm{tipA}}f_{\psi}(\mathbf{v},\beta) + \mathbf{v}\cdot \mathbf{t}_c^\top /\tau$ . Finally, TaskRes [40] learns a modification of the initial zero-shot prototypes, $\mathbf{w}_{TR}$ , using the support samples. The divergence between the initial and final prototypes is controlled by a residual ratio: $\mathbf{t}' = \mathbf{t} + \alpha_{\mathrm{TR}}\mathbf{w}_{TR}$ . Nevertheless, these methods lack a model selection strategy to set these hyperparameters (See Supp. Sec. A for details). + +# 4. Proposed approach + +# 4.1. Revisiting Linear Probing + +The most straightforward approach used to adapt VLMs is Linear Probing [30], which refers to fitting a multiclass logistic regression linear classifier on top of the pre-trained features. Formally, the objective is to learn a set of class-wise prototypes, $\mathbf{w}_c$ , to provide softmax class scores for a given visual embedding $\mathbf{v}$ : + +$$ +\hat {y} _ {c} = \frac {\exp \left(\mathbf {v} \cdot \mathbf {w} _ {c} ^ {\top} / \tau\right)}{\sum_ {i = 1} ^ {C} \exp \left(\mathbf {v} \cdot \mathbf {w} _ {i} ^ {\top} / \tau\right)}. \tag {3} +$$ + +The $\mathbf{w}_{\mathrm{c}}$ prototypes can be trained to minimize the cross-entropy loss on the support samples, as in Eq. (2), using standard SGD. Besides, a common practice in ETL is to regularize the trained weights [24, 30, 40] by minimizing its $\ell_{2}$ -norm with an additional term, weighted by an empirically-optimized non-negative balancing term $\lambda_{wd}$ . Despite its limited performance shown for few-shot adaptation [11, 30], we believe that this requires further exploration, as LP is a lightweight adaptation strategy, especially convenient due to its convexity during optimization. In this work, we present an updated view of Linear Probing. First, the class weights are initialized using the CLIP zero-shot prototypes, as SoTA ETL methods do [11, 40, 42]. Second, we replace the weight decay in the loss function and explicitly perform an $\ell_{2}$ -normalization of the prototypes after each update, to exactly meet the pre-training scenario during adaptation, inspired by [12]. Similarly, cosine similarity is also scaled with CLIP's pre-trained temperature + +$\tau$ . Last, we incorporate data augmentation, usually not included in LP. We refer to this updated Linear Probing version for vision-language models as ZS-LP1. Interestingly, ZS-LP serves as a strong baseline (see Tab. 1), which does not require adjusting specific hyperparameters per task. + +# 4.2. Constrained Linear Probing + +Albeit a well-initialized Linear Probing offers a strong baseline for efficient transfer learning, the updated prototypes might deviate from the initial regions offering strong generalization. This is especially the case in the few-shot setting, where the few provided support samples might be under-representative and contain specific biases that produce spurious correlations, hence harming the generalization after adaptation [34, 44]. Thus, to retain the strong basis provided by the VLM model, and avoid prototype degradation, we resort to a constrained formulation of the loss in Eq. (2). + +Retaining prior knowledge. A direct form to avoid prototype degradation from zero-shot points is to constrain the cross-entropy minimization to enforce the resulting prototypes to remain close to the initial solution (i.e., initial set of prototypes $\mathcal{T} = [\mathbf{t}_1,\dots ,\mathbf{t}_c]$ ). Specifically, this constrained optimization problem can be defined as follows: + +$$ +\min _ {\mathcal {W}} \quad \frac {1}{M} \sum_ {m = 1} ^ {M} \mathcal {H} \left(\mathbf {y} ^ {(m)}, \hat {\mathbf {y}} ^ {(m)}\right) \tag {4} +$$ + +$$ +\begin{array}{l l} \text {s . t .} & \mathbf {w} _ {c} = \mathbf {t} _ {c} \quad \forall c \in \{1, \dots , C \}, \end{array} +$$ + +with $\mathcal{W} = [\mathbf{w}_1, \dots, \mathbf{w}_C]$ the set of learnable class prototypes. We can approximate the minimum of the constrained problem in Eq. (4) by a penalty-based optimization approach, transforming the above formulation into an unconstrained problem, and using an $\ell_2$ -penalty between the class prototypes and the set of zero-shot anchors: + +$$ +\min _ {\mathcal {W}} \quad \sum_ {m = 1} ^ {M} \mathcal {H} \left(\mathbf {y} ^ {(m)}, \hat {\mathbf {y}} ^ {(m)}\right) + \lambda \sum_ {m = 1} ^ {M} \sum_ {c = 1} ^ {C} \left\| \mathbf {t} _ {c} - \mathbf {w} _ {c} ^ {(m)} \right\| _ {2} ^ {2}, \tag {5} +$$ + +where $\lambda \in \mathbb{R}_+$ is a scalar weight controlling the contribution of the corresponding penalty. Note that $\mathbf{w}_c^{(m)}$ is the optimal class prototype for the support sample $m$ that minimizes the left term. For clarity in the presentation, we have omitted the normalization by the cardinality of each set. + +Sample and class-specific constraints. The associated constrained problem in Eq. (4) is approximated by an unconstrained formulation, which uses a single uniform penalty without considering individual data samples or + +classes. Certainly, all samples and categories within a given dataset may indeed present different intrinsic learning challenges. Thus, the problem in Eq. (5) is not solved accurately. A better alternative would consist in integrating multiple penalty weights $\lambda$ , one for each sample and class, producing a set of penalty weights $\Lambda \in \mathbb{R}_{+}^{M \times C}$ . The resulting optimization problem can then be defined as: + +$$ +\min _ {\mathcal {W}} \quad \sum_ {m = 1} ^ {M} \mathcal {H} \left(\mathbf {y} ^ {(m)}, \hat {\mathbf {y}} ^ {(m)}\right) + \sum_ {m = 1} ^ {M} \sum_ {c = 1} ^ {C} \boldsymbol {\Lambda} _ {m c} \left\| \mathbf {t} _ {c} - \mathbf {w} _ {c} ^ {(m)} \right\| _ {2} ^ {2}. \tag {6} +$$ + +Now, from an optimization standpoint, if we suppose that there exists an optimal set of class-prototypes $\mathcal{W}^*$ for the problem presented in Eq. (4), there also exists $\Lambda^{*}\in \mathbb{R}_{+}^{M\times C}$ such that $(\mathcal{W}^{*},\Lambda^{*})$ represents a saddle point of the Lagrangian associated to Eq. (4). In this scenario, $\Lambda^{*}$ are the Lagrange multipliers of the presented problem, and is intuitive to consider $\Lambda = \Lambda^{*}$ as the best choice to solve Eq. (6). + +Nevertheless, using the Lagrange multipliers $\Lambda^{*}$ as the weights for the penalties in Eq. (6) may not be feasible in practice. In particular, a number of conventional strategies employed to train deep neural networks hinder straightforward minimization. First, the use of mini-batch gradient descent averages the updated prototypes for every single observation into a mean prototype per class, making a sample-wise constraint hard to achieve. Furthermore, performing data augmentation over the support samples may yield distinct penalty weights for the augmented versions, which could be harder or easier to classify than their original counterparts. + +To alleviate the aforementioned challenges, we propose to relax the sample-wise penalties, which results in solving: + +$$ +\min _ {\mathcal {W}} \quad \sum_ {m = 1} ^ {M} \mathcal {H} \left(\mathbf {y} ^ {(m)}, \hat {\mathbf {y}} ^ {(m)}\right) + \sum_ {c = 1} ^ {C} \lambda_ {c} \left\| \mathbf {t} _ {c} - \mathbf {w} _ {c} \right\| _ {2} ^ {2}, \tag {7} +$$ + +where $\lambda \in \mathbb{R}_+^C$ is a set of $C$ class-wise penalty weights. While the problem complexity has been reduced by removing sample-wise penalty weights, we still need to choose $C$ weights for the class-wise penalties. This poses a challenge in the optimization, particularly for datasets that contain a large number of categories, such as ImageNet [8] ( $C = 1000$ ), where properly selecting the penalty weights $\lambda \in \mathbb{R}_+^C$ can be a laborious process. Furthermore, choosing these values "by hand" juxtaposes with our goal of providing a validation-free solution for ETL. + +# 4.3. Class Adaptive Constraint for Linear Probing + +General Augmented Lagrangian. Augmented Lagrangian Multiplier (ALM) methods present an appealing alternative for learning the penalty weights. These popular methods + +in optimization, which solve a constrained problem by the interplay of penalties and primal-dual steps, present well-known advantages [1, 32]. Formally, we can define a general constrained optimization problem as: + +$$ +\min _ {x} \quad g (x) \quad \text {s . t .} \quad h _ {i} (x) \leq 0, \quad i = 1, \dots , n \tag {8} +$$ + +with $g: \mathbb{R}^d \to \mathbb{R}$ the objective function and $h_i: \mathbb{R}^d \to \mathbb{R}, i = 1, \dots, n$ the set of constraint functions. This problem is generally tackled by solving a succession of $j \in \mathbb{N}$ unconstrained problems, each solved approximately w.r.t $x$ : + +$$ +\min _ {x, \lambda} \quad \mathcal {L} ^ {(j)} (x) = g (x) + \sum_ {i = 1} ^ {n} P \left(h _ {i} (x), \rho_ {i} ^ {(j)}, \lambda_ {i} ^ {(j)}\right), \tag {9} +$$ + +with $P:\mathbb{R}\times \mathbb{R}_{+ + }\times \mathbb{R}_{+ + }\to \mathbb{R}$ a penalty-Lagrangian function, whose derivative w.r.t. its first variable $P^{\prime}(z,\rho ,\lambda)\equiv$ $\frac{\partial}{\partial z} P(z,\rho ,\lambda)$ exists, is positive and continuous for all $z\in \mathbb{R}$ and $(\rho ,\lambda)\in (\mathbb{R}_{+ + })^{2}$ . The set of axioms that any penalty function $P$ must satisfy [2] are detailed in Supp. Sec. B. Furthermore, $\pmb {\rho}^{(j)} = (\rho_i^{(j)})_{1\leq i\leq n}\in \mathbb{R}_{+ + }^n$ and $\pmb{\lambda}^{(j)} =$ $(\lambda_i^{(j)})_{1\leq i\leq n}\in \mathbb{R}_{+ + }^n$ denote the penalty parameters and multipliers associated to the penalty $P$ at the iteration $j$ + +The ALM can be split into two iterations: outer iterations (indexed by $j$ ), where the penalty multipliers $\lambda$ and the penalty parameters $\rho$ are updated, and the inner iterations, where $\mathcal{L}^{(j)}$ (Eq. (9)) is minimized using the previous solution as initialization. In particular, the penalty multipliers $\lambda^{(j)}$ are updated to the derivative of $P$ w.r.t. to the solution obtained during the last inner step: + +$$ +\lambda_ {i} ^ {(j + 1)} = P ^ {\prime} \left(h _ {i} (x), \rho_ {i} ^ {(j)}, \lambda_ {i} ^ {(j)}\right). \tag {10} +$$ + +By doing this, the penalty multipliers increase when the constraint is violated, and decrease otherwise. Thus, this strategy enables an adaptive and learnable way for determining the penalty weights. + +Our solution. We propose to use an ALM approach to solve the problem in Eq. (7). In particular, we reformulate this problem integrating a penalty function $P$ parameterized by $(\rho, \lambda) \in \mathbb{R}_{++}^{C} \times \mathbb{R}_{++}^{C}$ , formally defined as: + +$$ +\min _ {\mathcal {W}, \boldsymbol {\lambda}} \quad \sum_ {m = 1} ^ {M} \mathcal {H} \left(\mathbf {y} ^ {(m)}, \hat {\mathbf {y}} ^ {(m)}\right) + \sum_ {c = 1} ^ {C} P \left(\mathbf {t} _ {c} - \mathbf {w} _ {c}, \rho_ {c}, \lambda_ {c}\right). \tag {11} +$$ + +Following our realistic validation-free scenario, the only data from which we can obtain feedback during adaptation is the support set $S$ . Thus, the penalty multiplier for class $c$ at epoch $j + 1$ can be defined as: + +$$ +\lambda_ {c} ^ {(j + 1)} = \frac {1}{| \mathcal {S} |} \sum_ {(\mathbf {x}, \mathbf {y}) \in \mathcal {S}} P ^ {\prime} \left(\mathbf {t} _ {c} - \mathbf {w} _ {c}, \rho_ {c} ^ {(j)}, \lambda_ {c} ^ {(j)}\right). \tag {12} +$$ + +As suggested by prior work [2, 25], we employ the PHR function as penalty $P$ , defined as: + +$$ +\operatorname {P H R} (z, \rho , \lambda) = \left\{ \begin{array}{l l} \lambda z + \frac {1}{2} \rho z ^ {2} & \text {i f} \quad \lambda + \rho z \geq 0; \\ - \frac {\lambda^ {2}}{2 \rho} & \text {o t h e r w i s e .} \end{array} \right. \tag {13} +$$ + +Nevertheless, as we empirically found in our experiments (Supp. Sec. C.3), estimating Lagrange multipliers from the support samples might overfit the training data. As we do not have access to additional data points, we follow a simple strategy, consisting in performing only one iteration of the $\lambda$ update. For a given target task, we rely on text embeddings as an anchor that offers a generalizable representation of concrete concepts along different visual domains. Thus, we consider the zero-shot prototypes $\mathbf{t}_c$ as the initial approximation of the problem in Eq. (12) (first inner step). Instead of initializing $\lambda$ randomly, which might hamper the convergence, we compute the penalty weight for a given class as the average of the zero-shot softmax scores for all support samples belonging to that class, such that $\lambda_c^* = \frac{1}{|\mathcal{B}_c^+|}\sum_{i\in \mathcal{B}_c^+}\hat{y}_c^{(i)}$ , with $\mathcal{B}_c^+ = \{i|i\in M,y_c^{(i)} = 1\}$ . Note that these values are obtained by replacing $\mathbf{w}_c$ with the solution found in the inner step $(\mathbf{t}_c)$ in Eq. (3), which indeed satisfies the constraint $\mathbf{w}_c = \mathbf{t}_c$ , resulting in a zero penalty. Taking now the derivative w.r.t. $z$ of PHR, it is straightforward to see that the learned value of $\lambda$ after one iteration is indeed $\lambda_c^*$ . + +# 5. Experiments + +# 5.1. Setup + +Datasets: Few-shot adaptation. We follow prior ETL literature [11, 40, 42] and benchmark all the methods on 11 datasets: Imagenet [8], Caltech101 [10], OxfordPets [29], StanfordCars [21], Flowers102 [28], Food101 [4], FGVCAircraft [26], SUN397 [37], DTD [7], EuroSAT [15], and UCF101 [33]. These cover a diverse set of computer vision classification tasks, from general objects to actions or fine-grained categories in specialized applications. To train the few-shot adapters, we randomly retrieve $K$ shots ( $K \in \{1, 2, 4, 8, 16\}$ ) for each class. Last, for evaluation, we used the test sets provided in each dataset, with the same data splits as [40, 46]. Domain generalization capabilities. We further assess the model's robustness to domain shifts by following existing ETL works. We used ImageNet as a source domain for adaptation, and its variants as target tasks, which include: ImageNetV2 [31], ImageNet-Sketch [35], ImageNet-A [16], and ImageNet-R [17]. In this scenario, the model only sees a few labeled samples from the source domain, and target data are used exclusively for testing. In addition, we also employ this setting to motivate the use of efficient adapters vs fine-tuning the entire VLM [12, 22, 40]. + +Implementation details. All experiments are based on CLIP [30] pre-trained features, using different backbones: ResNet-50 [14] and ViT-B/16 [9] (results for other backbones in Supp. Sec. C.2). We resort to ResNet-50 as backbone in the ablation studies. For each downstream task we first extract all pre-trained features of the support shots and then run adaptation experiments over those. Data augmentation is applied during the feature extraction stage using random zoom, crops, and flips, following [40, 45]. The number of augmentations per support sample is set to 20. We used the same text prompts per dataset as in [40, 46]. Following our claim that using a validation set on few-shot adaptation is unrealistic, we trained ZS-LP and CLAP using the same configuration for all datasets, number of shots, and visual backbones. Concretely, we optimize the adapter for 300 epochs, using SGD optimizer with Momentum of 0.9. We use a relatively large initial learning rate of 0.1 to avoid underfitting on the support set, whose value decreases during training following a cosine decay scheduler. We ran all experiments with three different random seeds, and the results were averaged across runs. + +Baselines and adaptation protocol. We selected adapter-based methods as our main competitors based on the similarity to our approach, including Clip-Adapter [11], TIP-Adapter [42], TaskRes [40], and Cross-Modal [24]. It is important to highlight that prior works [11, 40, 42] apparently leverage either the extensive test set, or an independent additional validation subset, to adjust important hyperparameters for few-shot adaptation, such as the learning rate, training epochs, and particular parameters that control each method [24]. Nevertheless, as we exposed in Fig. 1, their performance dramatically decreases when the set of hyperparameters is not adjusted for the testing scenario. To adhere to real-world requirements, we define a strict few-shot adaptation protocol, in which no validation or test samples are available to find the best case-specific configuration for each method, and hyperparameters remain fixed across tasks (details in Supp. Sec. A.4). + +# 5.2. Results + +Efficient transfer learning. We report in Tab. 1 the performance of adapter-based approaches averaged across 11 datasets, in the more realistic and practical validation-free experimental setting. Furthermore, for prompt-learning-based approaches, we include the results reported in prior literature, for a more comprehensive comparison. From these values, we can make interesting observations. First, a well-initialized Linear Probe, i.e., using the CLIP zero-shot weights, does not show the performance degradation discussed in prior works, and it is indeed a competitive alternative to SoTA approaches. Second, and more surprisingly, more complex approaches such as CLIP-Adapter, or TIP-Adapter, show a significant decline in performance com + +pared to their original results when no validation set is available for model selection. Interestingly, TaskRes(e), which is some sort of two-stage zero-shot initialization Linear Probing with an updated text projection, also offers robust performance. Nevertheless, the absence of a detailed explanation of how the enhanced version is obtained in the original work hampers fair comparisons. Third, constraining the weights update to remain close to the zero-shot knowledge (CLAP) shows consistent improvements across different shots, especially in the very low data regime. This suggests that retaining the previous base knowledge from VLMs is important to avoid diverging because of unrepresentative shots during adaptation. Results per dataset are detailed in Supp. Fig. 8 and Supp. Tab. 9. + +Table 1. Comparison to state-of-the-art methods for few-shot adaptation of CLIP-based models, using ResNet-50 backbone. ETL methods are trained under the same protocol, i.e., absence of a validation set and using a fixed configuration across datasets, and results are averaged across 11 datasets. Prompt-learning methods results are directly extracted from [6, 13]. Best results in bold. + +
MethodK=1K=2K=4K=8K=16
Prompt-learning methods
CoOp ICV'22[46]59.5661.7866.4769.8573.33
ProGrad ICCV'23[13]62.6164.9068.4571.4174.28
PLOT ICLR'23[6]62.5965.2368.6071.2373.94
Efficient transfer learning - a.k.a Adapters
Zero-Shot ICML'21[30]57.7157.7157.7157.7157.71
Rand. Init LP ICML'21[30]30.4241.8651.6960.8467.54
CLIP-Adapter ICV'23[11]58.4362.4666.1869.8773.35
TIP-Adapter ECCV'22[42]58.8660.3361.4963.1564.61
TIP-Adapter(f) ECCV'22[42]60.2962.2665.3268.3571.40
CrossModal-LPCVPR'23[24]62.2464.4866.6770.3673.65
TaskRes(e)CVP'23[40]61.4465.2668.3571.6674.42
ZS-LP61.2864.8867.9871.4374.37
CLAP62.7966.0769.1372.0874.57
+ +Domain generalization. If adaptation is not carefully conducted, the resulting model might distort the pre-trained knowledge and underperform when new data with domain drifts is involved [22], even below the zero-shot (no adaptation) performance. Thus, evaluating the robustness of novel adapters under this scenario of domain generalization is of special interest. To do so, adapters are optimized on ImageNet using 16 shots per class, and directly evaluated on ImageNet variants. In this setting, we also assume the absence of a validation dataset, and hence all adapters are trained until convergence, using the same configuration across backbones. A summary of the results is reported in Tab. 2, while specific numbers across datasets and additional backbones are included in Supp. Tab. 10. From these experiments, we make two striking observations. First, ZS-LP is a strong baseline compared to other more complex adapters on the source domain. Even more remarkably, prior SoTA adapters, such as CLIP-Adapter or TIP-Adapter, fail to generalize to unseen domains. In + +deed, when using recent vision transformers, which are overtaking convolutional neural networks, none of existing adapters-based approaches outperform standard zero-shot prediction in the presence of distributional drifts. In contrast, CLAP yields the best in-distribution performance and also shows consistent improvements under domain shifts across all backbones. + +Table 2. Robustness to domain shifts. Adapters are adjusted on ImageNet and evaluated at out-of-distribution generalization on 4 ImageNet shifts. Bold indicates best performance. Differences with respect to no adaptation (a.k.a zero-shot) are highlighted. + +
MethodSource (Imagenet)Target (Average)
ResNet-50Zero-Shot ICML'21[30]60.3540.61
Rand. Init LP ICML'21[30]52.24(−8.11)↓24.61(−16.00)↓
CLIP-Adapter IJCV'23[11]59.02(−1.33)↓31.21(−9.40)↓
TIP-Adapter ECCV'22[42]57.81(−2.54)↓40.69(+0.08)↑
TIP-Adapter(f) ECCV'22[42]62.27(+1.92)↑41.36(+0.75)↑
TaskRes(e) CVPR'23[40]60.85(+0.50)↑41.28(+0.67)↑
ZS-LP61.00(+0.65)↑36.58(−4.03)↓
CLAP65.02(+4.67)↑42.91(+2.30)↑
ViT-B/16Zero-Shot ICML'21[30]68.7157.17
Rand. Init LP ICML'21[30]62.95(−5.76)↓40.41(−16.76)↓
CLIP-Adapter IJCV'23[11]68.46(−0.25)↓50.72(−6.45)↓
TIP-Adapter ECCV'22[42]53.81(−14.90)↓41.55(−15.62)↓
TIP-Adapter(f) ECCV'22[42]51.71(−17.00)↓35.58(−21.6)↓
TaskRes(e) CVPR'23[40]70.84(+2.13)↑55.35(−1.82)↓
ZS-LP69.73(+1.02)↑53.65(−3.52)↓
CLAP73.38(+4.67)↑60.04(+2.87)↑
+ +Table 3. Fine-tuning (FT) vs. efficient transfer learning (ETL). A benchmark for the low data regime, i.e., 8 shots for each class. For the sake of fairness, FT methods (above the dashed line) are trained with 4 shots and early-stopped using a validation set containing 4 shots. On the other hand, ETL methods (below the dashed line) are trained using 8 shots and rely solely on the support set. All methods use ViT-B/16 as CLIP backbone. + +
MethodSource ImagenetTarget
-V2-Sketch-A-RAvg.
Fine-tuning (FT)69.8862.4447.0747.5276.0858.28
LP-FTICLR'23 [22]71.2964.0448.5049.4977.6359.92
WiSEcvPR'22 [36]71.1763.8149.3850.5978.5660.59
FLYPcvPR'23 [12]71.5164.5949.5051.3278.5260.98
Zero-Shot68.7160.7646.1847.7673.9857.17
Rand. Init LP56.5847.1725.8227.0347.0536.77
ZS-LP68.4960.0742.7742.3971.7354.24
CLAP71.7564.0647.6648.4076.7059.21
+ +*Specific numbers for FT, LP-FT, WiSE-FT, and FLYP are retrieved from [12]. + +Is it worth optimizing the entire model? We now compare CLAP to end-to-end full fine-tuning (FT) approaches: LP-FT [22], WiSE-FT [36], and FLYP [12]. The former two methods require a validation set for early stopping, and the latter two use it for both early stopping and tuning the mixing coefficient hyperparameter $\alpha$ . Therefore, for a $K$ -shot problem, these methods actually require $2K$ shots for each class, $K$ for training, and $K$ for validation. As the balancing penalty term in CLAP is optimized with the support set, and does not require a validation set, a fair comparison would be to evaluate the $K$ -shot performance of fine-tuning methods against our method's $2K$ -shot results. Thus, Tab. 3 in + +cludes the performance of all the models when 8 labeled images are available for each class overall. Analyzing the results, we can conclude that in the low data regime, full finetuning is not necessarily superior to ETL when compared properly. More specifically, our approach outperforms finetuning methods in in-distribution performance and performs reasonably well on OOD datasets, while having a fraction of the estimizable parameters of fine-tuning methods. + +# 5.3. Ablation experiments + +On the need for model selection strategies. Relevant methods (e.g., CLIP-Adapter [11], TIP-Adapter [42], or TaskRes [40]) include different hyperparameters that directly control their performance. Nevertheless, these methods are incomplete, since they do not include any strategy for adjusting these parameters, typically referred to as model selection. In contrast, and as previously stressed, there is evidence that these works use a large evaluation subset to adapt their settings to each scenario [24]. To investigate this observation, we evaluate these methods in cross-dataset model selection experiments. The best hyperparameters values for a task (i.e., dataset), which are found in an Oracle scenario using the entire test subset, are used during adaptation to another dataset. The matrices showing the relative improvements over a zero-shot initialized Linear Probing (ZS-LP) are depicted in Fig. 1. These results show empirically that the hyperparameters values are highly task-dependent, and that SoTA methods must adjust their hyperparameters on the target task to outperform this simple baseline, which is unrealistic in practice. In contrast, the proposed CLAP is more robust, showing consistent results across all datasets, even in the worst degradation case, as it does not require particular modifications per task. + +Table 4. Improving Linear Probing. Using as baseline the proposed ZS-LP configuration detailed in Sec. 4.1, we isolate the effect of removing different parts of the model, while keeping the rest static. Results are averaged across 11 datasets. + +
MethodK=1K=2K=4
ZS-LP61.2864.8867.98
w/o DA57.72(−3.5)↓61.94(−2.9)↓65.41(−2.5)↓
w/o Temp. Scaling (τ)58.33(−2.9)↓59.85(−5.0)↓59.91(−8.0)↓
w/o L2-norm48.67(−12.6)↓55.29(−9.6)↓61.16(−6.8)↓
Rand. Init.30.42(−30.8)↓41.86(−23.0)↓51.69(−16.2)↓
+ +Details in Linear Probing matter. As described earlier in Sec. 4.1, LP has been discouraged in the prior literature due to its limited performance in few-shot adaptation [11, 30]. Nevertheless, we argue that this behavior stems from the original way in which LP was introduced in [30], inspired by prior self-supervised learning methods. Indeed, a strategy tailored to contrastive VLMs alleviates the performance drop of LP observed in prior works. In particular, using zero-shot initialization, the same temperature scaling as + +pre-training, and explicit $\ell_2$ -normalization of the class prototypes, considerably improves the generalization of few-shot adaptation (Tab. 4). This aligns with relevant literature on other topics such as FT [12], which suggests that the adaptation conditions should match the pre-training setting. Also, including other heuristics such as data augmentation (DA), usually omitted in LP [40, 42], is of special relevance. + +Using a few-shot validation set. Cross-Modal adapter [24] uses a validation set composed of $(\min(K, 4))$ samples to adjust the experimental setting and early stopping. Even though this setting is more appropriate, it still requires an additional number of shots for model selection. Nevertheless, for the sake of fairness, the performance comparison to methods that do not require a validation set should be carried out by training the latter methods using $K + \min(K, 4)$ shots. When this fair benchmark is established (see Tab. 5), simple ZS-LP excels again as a strong baseline, outperforming more complex methods on the low-shot regime. Only when using a large number of shots ( $K > 8$ ) partial finetuning and ETL methods marginally benefit from validation samples. However, model selection using a validation set increases the computational workload and processing times during adaptation due to its grid search nature. + +Table 5. Using a few-shot validation set. Results for priors works on this setting are obtained from [24]. Average across 11 datasets. + +
MethodK=1K=2K=4K=8K=16
Protocol in [24]: K-shots for train + min(K, 4) for validation
TIP-Adapter [42]63.365.969.072.275.1
CrossModal LP [24]64.167.070.373.076.0
CrossModal Adapter [24]64.467.670.873.475.9
CrossModal PartialFT [24]64.767.270.573.677.1
Ours: using K + min(K, 4) shots for training
ZS-LP64.968.071.473.175.0
CLAP66.169.172.173.575.1
+ +# 6. Limitations + +In this work, we have introduced a Class-Adaptive linear Probe (CLAP) objective, based on an adaptation of the general Augmented Lagrangian method, for efficient adaptation of large vision-language models in realistic scenarios. Despite its superiority, our empirical validation suggests that the benefits of our approach diminish as the number of shots increases, indicating that other strategies might be privileged if the number of adaptation samples is large. + +# Acknowledgments + +This work is supported by the National Science and Engineering Research Council of Canada (NSERC) and Fonds de recherche du Québec (FRQNT). We also thank Calcul Quebec and Compute Canada. + +# References + +[1] Dimitri P. Bertsekas. Constrained Optimization and Lagrange Multiplier Methods (Optimization and Neural Computation Series). Athena Scientific, 1 edition, 1996. +[2] Ernesto G Birgin, Romulo A Castillo, and José Mario Martínez. Numerical comparison of augmented lagrangian algorithms for nonconvex problems. Computational Optimization and Applications, 31(1):31-55, 2005. +[3] Rishi Bommasani et al. On the opportunities and risks of foundation models. ArXiv, 2021. +[4] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 - mining discriminative components with random forests. In European Conference on Computer Vision (ECCV), 2014. +[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in Neural Information Processing Systems (NeurIPS), 33:1877-1901, 2020. +[6] Guangyi Chen, Weiran Yao, Xiangchen Song, Xinyue Li, Yongming Rao, and Kun Zhang. Prompt learning with optimal transport for vision-language models. In International Conference on Learning Representations (ICLR), 2023. +[7] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3606-3613, 2014. +[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 248-255, 2009. +[9] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. International Conference on Learning Representations (ICLR), 2021. +[10] Li Fei-Fei, R. Fergus, and P. Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Worskshops (CVPRW), pages 178–178, 2004. +[11] Peng Gao, Shijie Geng, Renrui Zhang, Teli Ma, Rongyao Fang, Yongfeng Zhang, Hongsheng Li, and Yu Qiao. Clip-adapter: Better vision-language models with feature adapters. International Journal of Computer Vision (IJCV), 2023. +[12] Sachin Goyal, Ananya Kumar, Sankalp Garg, Zico Kolter, and Aditi Raghunathan. Finetune like you pretrain: Improved finetuning of zero-shot vision models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19338-19347, 2023. + +[13] Changsheng Xu Hantao Yao, Rui Zhang. Visual-language prompt tuning with knowledge-guided context optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. +[14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2016. +[15] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. Introducing eurosat: A novel dataset and deep learning benchmark for land use and land cover classification. In IEEE International Geoscience and Remote Sensing Symposium (IGARSS), pages 3606-3613, 2018. +[16] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15262-15271, 2019. +[17] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadayath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), page 8340-8349, 2021. +[18] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning (ICML), pages 4904-4916, 2021. +[19] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. In European Conference on Computer Vision (ECCV), pages 709-727, 2022. +[20] Muhammad Uzair Khattak, Hanoona Rasheed, Muhammad Maaz, Salman Khan, and Fahad Shahbaz Khan. Maple: Multi-modal prompt learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19113-19122, 2023. +[21] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), page 3498–3505, 2012. +[22] Ananya Kumar, Aditi Raghunathan, Robbie Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. In International Conference on Learning Representations (ICLR), pages 1-42, 2022. +[23] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. In _Conference on Empirical Methods in Natural Language Processing (EMNLP)_, pages 3045-3059, 2021. +[24] Zhiqiu Lin, Samuel Yu, Zhiyi Kuang, Deepak Pathak, and Deva Ramanan. Multimodality helps unimodality: Cross-modal few-shot learning with multimodal models. In Pro + +ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. +[25] Bingyuan Liu, Jérôme Rony, Adrian Galdran, Jose Dolz, and Ismail Ben Ayed. Class adaptive network calibration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16070-16079, 2023. +[26] S. Maji, J. Kannala, E. Rahtu, M. Blaschko, and A. Vedaldi. Fine-grained visual classification of aircraft. In ArXiv Preprint, 2013. +[27] Sachit Menon and Carl Vondrick. Visual classification via description from large language models. In International Conference on Learning Representations (ICLR), pages 1-17, 2023. +[28] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In Indian Conference on Computer Vision, Graphics and Image Processing, 2008. +[29] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), page 3498-3505, 2012. +[30] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning (ICML), pages 8748-8763, 2021. +[31] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. Do imagenet classifiers generalize to imagenet? In International Conference on Machine Learning (ICML), pages 5389-5400, 2019. +[32] Sara Sangalli, Ertunc Erdil, Andeas Hotker, Olivio F Donati, and Ender Konukoglu. Constrained optimization to train neural networks on critical and under-represented classes. In Advances in Neural Information Processing Systems (NeurIPS), 2021. +[33] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. In ArXiv Preprint, 2012. +[34] Rohan Taori, Achal Dave, Vaishaal Shankar, Nicholas Carlini, Benjamin Recht, and Ludwig Schmidt. Measuring robustness to natural distribution shifts in image classification. In Advances in Neural Information Processing Systems (NeurIPS), 2020. +[35] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In Advances in Neural Information Processing Systems (NeurIPS), 2019. +[36] Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo-Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, and Ludwig Schmidt. Robust fine-tuning of zero-shot models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7959-7971, 2022. +[37] Jianxiong Xiao, James Hays, Krista A. Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene + +recognition from abbey to zoo. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3485-3492, 2010. +[38] Yinghui Xing, Qirui Wu, De Cheng, Shizhou Zhang, Guoqiang Liang, Peng Wang, and Yanning Zhang. Dual modality prompt tuning for vision-language pre-trained model. IEEE Transactions on Multimedia, 2023. +[39] Hantao Yao, Rui Zhang, and Changsheng Xu. Visual-language prompt tuning with knowledge-guided context optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6757-6767, 2023. +[40] Tao Yu, Zhihe Lu, Xin Jin, Zhibo Chen, and Xinchao Wang. Task residual for tuning vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10899-10909, 2023. +[41] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18123-18133, 2022. +[42] Renrui Zhang, Rongyao Fang, Wei Zhang, Peng Gao, Kunchang Li, Jifeng Dai, Yu Qiao, and Hongsheng Li. Tip-adapter: Training-free clip-adapter for better vision-language modeling. In European Conference on Computer Vision (ECCV), pages 1-19, 2022. +[43] Zexuan Zhong, Dan Friedman, and Danqi Chen. Factual probing is [mask]: Learning vs. learning to recall. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5017-5033, 2021. +[44] Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 45:4396-4415, 2022. +[45] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Conditional prompt learning for vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. +[46] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision (IJCV), 2022. +[47] Beier Zhu, Yulei Niu, Yucheng Han, Yue Wu, and Hanwang Zhang. Prompt-aligned gradient for prompt tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15659-15669, 2023. \ No newline at end of file diff --git a/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/images.zip b/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..21220d6d24da35c1a81024c16d4b21f1235bbf25 --- /dev/null +++ b/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31e978fdab2321f251cc4a3cadbdf68a741ac6e3dbbfc754e60ae0111806452f +size 455857 diff --git a/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/layout.json b/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..2d6a1a0e11265c620f222ee6f0b2eb7c008ad841 --- /dev/null +++ b/2024/A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models/layout.json @@ -0,0 +1,8635 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 58, + 103, + 536, + 123 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 103, + 536, + 123 + ], + "spans": [ + { + "bbox": [ + 58, + 103, + 536, + 123 + ], + "type": "text", + "content": "A Closer Look at the Few-Shot Adaptation of Large Vision-Language Models" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 102, + 143, + 217, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 143, + 217, + 158 + ], + "spans": [ + { + "bbox": [ + 102, + 143, + 217, + 158 + ], + "type": "text", + "content": "Julio Silva-Rodriguez" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 242, + 143, + 309, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 143, + 309, + 157 + ], + "spans": [ + { + "bbox": [ + 242, + 143, + 309, + 157 + ], + "type": "text", + "content": "Sina Hajimiri" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 334, + 144, + 416, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 144, + 416, + 157 + ], + "spans": [ + { + "bbox": [ + 334, + 144, + 416, + 157 + ], + "type": "text", + "content": "Ismail Ben Ayed" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 261, + 158, + 332, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 158, + 332, + 171 + ], + "spans": [ + { + "bbox": [ + 261, + 158, + 332, + 171 + ], + "type": "text", + "content": "ÉTS Montreal" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 190, + 174, + 399, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 174, + 399, + 186 + ], + "spans": [ + { + "bbox": [ + 190, + 174, + 399, + 186 + ], + "type": "text", + "content": "julio-jose.silva-rodriguez@etsmt1.ca" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 440, + 144, + 490, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 440, + 144, + 490, + 156 + ], + "spans": [ + { + "bbox": [ + 440, + 144, + 490, + 156 + ], + "type": "text", + "content": "Jose Dolz" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 39, + 227, + 162, + 331 + ], + "blocks": [ + { + "bbox": [ + 65, + 217, + 134, + 227 + ], + "lines": [ + { + "bbox": [ + 65, + 217, + 134, + 227 + ], + "spans": [ + { + "bbox": [ + 65, + 217, + 134, + 227 + ], + "type": "text", + "content": "(a) CLIP-Adapter [11]" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 39, + 227, + 162, + 331 + ], + "lines": [ + { + "bbox": [ + 39, + 227, + 162, + 331 + ], + "spans": [ + { + "bbox": [ + 39, + 227, + 162, + 331 + ], + "type": "image", + "image_path": "82130ed8dda85ff2f0317020867ca340011005508c804a9c8616604948ccad77.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 339, + 546, + 373 + ], + "lines": [ + { + "bbox": [ + 45, + 339, + 546, + 373 + ], + "spans": [ + { + "bbox": [ + 45, + 339, + 546, + 373 + ], + "type": "text", + "content": "Figure 1. Pitfalls of few-shot adapters due to the absence of a model selection strategy. The cross-shift model selection matrices " + }, + { + "bbox": [ + 45, + 339, + 546, + 373 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 45, + 339, + 546, + 373 + ], + "type": "text", + "content": " depict the relative improvement w.r.t. a zero-shot initialized Linear Probing when using the optimal hyperparameters for the dataset " + }, + { + "bbox": [ + 45, + 339, + 546, + 373 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 45, + 339, + 546, + 373 + ], + "type": "text", + "content": " (rows), for adapting in another task " + }, + { + "bbox": [ + 45, + 339, + 546, + 373 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 45, + 339, + 546, + 373 + ], + "type": "text", + "content": " (columns), for each SoTA method (first three plots) and our approach (last plot)." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 162, + 227, + 286, + 331 + ], + "blocks": [ + { + "bbox": [ + 187, + 217, + 258, + 227 + ], + "lines": [ + { + "bbox": [ + 187, + 217, + 258, + 227 + ], + "spans": [ + { + "bbox": [ + 187, + 217, + 258, + 227 + ], + "type": "text", + "content": "(b) TIP-Adapter(f) [42]" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 162, + 227, + 286, + 331 + ], + "lines": [ + { + "bbox": [ + 162, + 227, + 286, + 331 + ], + "spans": [ + { + "bbox": [ + 162, + 227, + 286, + 331 + ], + "type": "image", + "image_path": "c75b44f3de5192e7ec83212cf6ff6a9895b8fc7bf3d5600d20371e35bbd6cdb7.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 286, + 227, + 408, + 331 + ], + "blocks": [ + { + "bbox": [ + 321, + 217, + 370, + 227 + ], + "lines": [ + { + "bbox": [ + 321, + 217, + 370, + 227 + ], + "spans": [ + { + "bbox": [ + 321, + 217, + 370, + 227 + ], + "type": "text", + "content": "(c) TaskRes [40]" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 286, + 227, + 408, + 331 + ], + "lines": [ + { + "bbox": [ + 286, + 227, + 408, + 331 + ], + "spans": [ + { + "bbox": [ + 286, + 227, + 408, + 331 + ], + "type": "image", + "image_path": "8dbbc2575cf75c7536dfa70a8a994c4f1a2b0d453b2e52f716a7a85f50678148.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 408, + 227, + 549, + 331 + ], + "blocks": [ + { + "bbox": [ + 445, + 217, + 496, + 227 + ], + "lines": [ + { + "bbox": [ + 445, + 217, + 496, + 227 + ], + "spans": [ + { + "bbox": [ + 445, + 217, + 496, + 227 + ], + "type": "text", + "content": "(d) CLAP (Ours)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 408, + 227, + 549, + 331 + ], + "lines": [ + { + "bbox": [ + 408, + 227, + 549, + 331 + ], + "spans": [ + { + "bbox": [ + 408, + 227, + 549, + 331 + ], + "type": "image", + "image_path": "a61f88c5dda613b069ca788c6d83166a342f21da1bd87aa1d8d1b6b168bb7c3d.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 143, + 383, + 192, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 383, + 192, + 395 + ], + "spans": [ + { + "bbox": [ + 143, + 383, + 192, + 395 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 45, + 415, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 415, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 45, + 415, + 289, + 714 + ], + "type": "text", + "content": "Efficient transfer learning (ETL) is receiving increasing attention to adapt large pre-trained language-vision models on downstream tasks with a few labeled samples. While significant progress has been made, we reveal that state-of-the-art ETL approaches exhibit strong performance only in narrowly-defined experimental setups, and with a careful adjustment of hyperparameters based on a large corpus of labeled samples. In particular, we make two interesting, and surprising empirical observations. First, to outperform a simple Linear Probing baseline, these methods require to optimize their hyper-parameters on each target task. And second, they typically underperform -sometimes dramatically-standard zero-shot predictions in the presence of distributional drifts. Motivated by the unrealistic assumptions made in the existing literature, i.e., access to a large validation set and case-specific grid-search for optimal hyperparameters, we propose a novel approach that meets the requirements of real-world scenarios. More concretely, we introduce a Class-Adaptive linear Probe (CLAP) objective, whose balancing term is optimized via an adaptation of the general Augmented Lagrangian method tailored to this context. We comprehensively evaluate CLAP on a broad span of datasets and scenarios, demonstrating that it consistently outperforms SoTA approaches, while yet being a much more efficient alternative. Code available at" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 384, + 496, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 384, + 496, + 396 + ], + "spans": [ + { + "bbox": [ + 306, + 384, + 496, + 396 + ], + "type": "text", + "content": "https://github.com/jusiro/CLAP." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 418, + 387, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 418, + 387, + 430 + ], + "spans": [ + { + "bbox": [ + 306, + 418, + 387, + 430 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 437, + 545, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 545, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 545, + 617 + ], + "type": "text", + "content": "Large vision-language models (VLMs), such as CLIP [30], are reshaping the research landscape with their unprecedented performance. These models undergo training on an extensive dataset consisting of hundreds of millions of image-text pairs, which are leveraged via contrastive learning [30]. Once trained, VLMs offer a remarkable zero-shot performance on a wide span of visual recognition problems thanks to the rich learned representations [27, 30]. Nevertheless, the extensive hardware and data-driven resources that such training demands [3] suggest that these models can only be trained on singular occasions. Furthermore, the large scale of these networks poses important challenges when it comes to adjusting their parameters on small downstream tasks that involve only a few labeled samples, making the full fine-tuning of the entire model impractical." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 618, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 618, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 618, + 546, + 714 + ], + "type": "text", + "content": "An emerging alternative to alleviate this issue consists in fine-tuning VLMs by adding a small set of learnable parameters, whose values are optimized during the adaptation step [11, 19, 42, 45, 46]. These tunable weights can be introduced in the input space as visual [19] or text prompts [45, 46], or added in the form of adapters across the network [11, 40, 42]. While both families of approaches fit within the Efficient Transfer Learning (ETL) literature," + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "23681" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": "prompt learning still requires backpropagating the gradients through the entire network. Thus, besides introducing a burden on resource reuse, these methods preclude black-box adaptation, introducing a potential concern about leaking the source data, which is paramount in privacy-oriented applications. In contrast, strategies based on adapters only need gradients on the extra set of parameters, typically in the last layer, avoiding costly fine-tuning processes and data leakage, yet yielding state-of-the-art performance [24, 40]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 182, + 289, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 182, + 289, + 386 + ], + "spans": [ + { + "bbox": [ + 46, + 182, + 289, + 386 + ], + "type": "text", + "content": "Despite the progress observed in adapter-based methods for fine-tuning VLMs under the few-shot learning paradigm, improving the performance on the target task while preserving their generalization capabilities remains still a challenge [46]. We argue that this is likely due to the severe overfitting to the support set samples employed during few-shot adaptation, which significantly deviates the updated class prototypes from the zero-shot prototypes initially provided by the pre-trained model. In fact, popular adapter-based ETL strategies, such as CLIP-Adapter [11] and TIP-Adapter [42], carefully adjust the model-specific hyperparameters, in conjunction with other key hyperparameters related to the learning scheduler, to control the trade-off between initial zero-shot inference and the integration of new information from the support set. Furthermore, recent evidence [24] suggests that these works apparently use the large-scale test set to adjust their hyperparameters." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 388, + 289, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 388, + 289, + 592 + ], + "spans": [ + { + "bbox": [ + 46, + 388, + 289, + 592 + ], + "type": "text", + "content": "A significant limitation becomes evident in that these hyperparameters, when optimized for one specific task, do not exhibit strong generalizability to other tasks, as illustrated in Fig. 1. Indeed, state-of-the-art (SoTA) methods struggle to find a homogeneous configuration that outperforms a simple well-initialized Linear Probing (LP) adaptation. Notably, in a realistic adaptation scenario (Fig. 1), we can observe dramatic performance degradations, up to " + }, + { + "bbox": [ + 46, + 388, + 289, + 592 + ], + "type": "inline_equation", + "content": "21\\%" + }, + { + "bbox": [ + 46, + 388, + 289, + 592 + ], + "type": "text", + "content": ", compared to this simple baseline. These practices virtually bias the model selection process, as assuming access to a significantly larger set of labeled samples, and adjusting the model hyperparameters in a case-specific manner, is not only unrealistic but also impractical (grid-search must be done for each case). Thus, we argue that if an ETL method's model selection strategy is not solely based on the support samples, the method is incomplete, and impractical for real-world few-shot adaptation problems." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": "In this work, we seek to redirect the efforts on few-shot ETL to a more strict, but realistic scenario, in which only the support samples are accessible during training. The absence of an evaluation subset urges novel adapters to include a model selection strategy, robust across a large spectrum of tasks. Interestingly, we empirically observed that a carefully designed Linear Probing (ZS-LP), whose weights are initialized with the zero-shot prototypes from CLIP, is a strong baseline that outperforms more convoluted ETL solutions. To further improve the baseline ZS-LP and opti" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": "mize the trade-off between initial zero-shot representations and updated class prototypes on novel tasks, we propose penalizing large deviations from the original zero-shot prototypes during adaptation. The resulting learning objective, however, presents two major issues. First, the penalty included to control the deviation between original and updated prototypes is a scalar value, uniform across all classes, which can detrimentally affect the model's performance in the presence of harder-to-learn classes. Second, the penalty balancing weight must be set using a validation set, which juxtaposes with our validation-free scenario. To address these limitations, we propose CLass-Adaptive linear Probe (CLAP), which is based on an Augmented Lagrangian Multiplier approach. We can summarize our contributions as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 241, + 547, + 648 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 306, + 241, + 545, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 241, + 545, + 324 + ], + "spans": [ + { + "bbox": [ + 306, + 241, + 545, + 324 + ], + "type": "text", + "content": "- We empirically observe that SoTA few-shot ETL adapters require careful adjustment of a set of key hyperparameters for each task, which is unrealistic and impractical in real-world settings. Surprisingly, if a fixed configuration is adopted across tasks, these methods are likely to substantially underperform a simple Linear Probing strategy initialized with the zero-shot prototypes from CLIP." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 325, + 546, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 325, + 546, + 540 + ], + "spans": [ + { + "bbox": [ + 306, + 325, + 546, + 540 + ], + "type": "text", + "content": "- We propose a principled solution to tackle the trade-off between original and updated class prototypes in Linear Probing, which integrates a penalty term to penalize large deviations from zero-shot prototypes. To address the underlying challenges from the resulting constrained optimization problem, we present a modified Augmented Lagrangian Multiplier (ALM) method. This alleviates the need of having to fine-tune the penalty balancing weight, which is learned in the outer iteration of the optimization process. In order to adapt ALM to the presented scenario, two critical choices were made: " + }, + { + "bbox": [ + 306, + 325, + 546, + 540 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 306, + 325, + 546, + 540 + ], + "type": "text", + "content": " Leveraging class prototypes, as well as data augmentation, motivate the use of class-wise multipliers, instead of sample and class-wise multipliers as in the original ALM; " + }, + { + "bbox": [ + 306, + 325, + 546, + 540 + ], + "type": "inline_equation", + "content": "ii" + }, + { + "bbox": [ + 306, + 325, + 546, + 540 + ], + "type": "text", + "content": " In the presented scenario, there is no access to a validation set, and the only feedback available is from the support samples. Hence, we only perform one outer-step update, which can avoid potential overfitting on the support set." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 540, + 547, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 540, + 547, + 648 + ], + "spans": [ + { + "bbox": [ + 306, + 540, + 547, + 648 + ], + "type": "text", + "content": "- We provide extensive experiments to assess the performance of CLAP in the proposed scenario, including few-shot adaptation on 11 popular classification benchmarks, domain generalization, comparison to full fine-tuning methods, and ablation studies to validate our choices. As shown in Fig. 1 and in the experimental section, CLAP delivers consistent performance across different tasks with a homogeneous configuration, and largely outperforms SoTA ETL approaches in all scenarios." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 658, + 391, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 391, + 670 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 391, + 670 + ], + "type": "text", + "content": "2. Related work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "type": "text", + "content": "Vision-language pre-trained models. The field of machine learning is in the midst of a paradigm shift with the emerging rise of vision-language models (VLMs). These" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "23682" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "type": "text", + "content": "networks have gained increasing popularity, especially fueled by the significant improvements achieved in computer vision and natural language processing tasks [5, 18, 30, 41]. The prevailing learning paradigm consists of a dual stream of data, which separately encodes images and their text counterparts, leveraging contrastive learning at a large scale to bridge image and text representations in the latent space. Particularly, models such as CLIP [30] and ALIGN [18] have successfully mitigated the distribution discrepancy between text and images, and have shown tremendous zero-shot capabilities on visual recognition tasks, primarily in the context of classification." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 217, + 289, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 217, + 289, + 348 + ], + "spans": [ + { + "bbox": [ + 46, + 217, + 289, + 348 + ], + "type": "text", + "content": "Full fine-tuning. A body of work proposes fine-tuning the entire VLMs to adapt to a specific task [12, 22, 36]. This strategy, however, presents several drawbacks. Concretely, fine-tuning increases the complexity of the model being optimized, makes the optimization process more time-consuming compared to ETL methods, and requires access to the backbone weights, which does not allow a black-box adaptation. Furthermore, full fine-tuning methods typically tend to overfit when trained on small datasets, requiring a large corpus of labeled data for the target task, which may be impractical in many real-world scenarios." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 350, + 289, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 350, + 289, + 613 + ], + "spans": [ + { + "bbox": [ + 46, + 350, + 289, + 613 + ], + "type": "text", + "content": "Efficient transfer leaning attempts to address these issues by updating a small set of learnable parameters and leveraging a limited amount of annotated samples. Current ETL literature can be categorized into Prompt Learning [20, 38, 39, 45-47] and Adapter-based [11, 40, 42] approaches. Prompt Learning represents a recent advancement in the realm of natural language processing [23, 43], which has been recently adopted with success in VLMs. In these methods, only the text tokens provided to the model are optimized. Nevertheless, these techniques require long training steps due to backpropagating the gradient over the entire network, which juxtaposes with the spirit of efficient adaptation. Furthermore, black-box adaptation is also not possible in prompt learning. Adapter-based methods, in contrast, offer a much lighter alternative as only a small subset of parameters, typically at the latest layers, are adjusted. For example, CLIP-Adapter [11] integrates a two-layer MLP to modify the visual embedding generated by CLIP. In TIP-Adapter [42], the visual prototypes obtained from the few-shot support samples are leveraged to compute the similarity with the visual embedding of the test image, which is later used to modify the CLIP visual embedding." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 625, + 133, + 639 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 625, + 133, + 639 + ], + "spans": [ + { + "bbox": [ + 47, + 625, + 133, + 639 + ], + "type": "text", + "content": "3. Preliminaries" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 646, + 261, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 646, + 261, + 659 + ], + "spans": [ + { + "bbox": [ + 47, + 646, + 261, + 659 + ], + "type": "text", + "content": "3.1. Contrastive vision-language pre-training" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": "Large-scale VLMs, such as CLIP [30], are trained on large heterogeneous datasets, encouraging image and text representations to correlate in a joint embedding space. Formally, CLIP comprises a vision encoder, " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "f_{\\theta}(\\cdot)" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": ", and a text encoder," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "type": "inline_equation", + "content": "f_{\\phi}(\\cdot)" + }, + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "type": "text", + "content": ", each aiming at learning a rich representation of their data points. These points are projected in an " + }, + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "type": "text", + "content": "-normalized shared embedding space, yielding the corresponding visual " + }, + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "type": "text", + "content": " and text " + }, + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "type": "text", + "content": " embeddings. The whole network is optimized to maximize the similarity between the projected embeddings of paired images and texts, using a contrastive loss." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 150, + 400, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 150, + 400, + 163 + ], + "spans": [ + { + "bbox": [ + 305, + 150, + 400, + 163 + ], + "type": "text", + "content": "3.2. Transferability" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "spans": [ + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "text", + "content": "Zero-shot inference. For a particular downstream image classification task, CLIP-based models are able to provide predictions based on the similarity between category prompts, i.e., text descriptions of target classes, and testing images. Given a set of " + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "text", + "content": " categories, and an ensemble of " + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "text", + "content": " text prompts for each one, " + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "inline_equation", + "content": "\\{\\{T_{n,c}\\}_{n=1}^{N}\\}_{c=1}^{C}" + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "text", + "content": ", a common practice is to obtain a zero-shot prototype for each target category by computing the center of the " + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "text", + "content": "-normalized text embeddings for each class, " + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_c = \\frac{1}{N}\\sum_{n=1}^{N}f_\\phi(T_{n,c})" + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "text", + "content": ". Thus, for a given query image " + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "text", + "content": ", the zero-shot prediction is obtained from the softmax cosine similarity between the vision embedding " + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "inline_equation", + "content": "\\mathbf{v} = f_\\theta(\\mathbf{x})" + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "text", + "content": ", and category prototypes " + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_c" + }, + { + "bbox": [ + 304, + 168, + 547, + 313 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 369, + 326, + 545, + 357 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 326, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 369, + 326, + 545, + 357 + ], + "type": "interline_equation", + "content": "\\hat {y} _ {c} = \\frac {\\exp \\left(\\mathbf {v} \\cdot \\mathbf {t} _ {c} ^ {\\top} / \\tau\\right)}{\\sum_ {i = 1} ^ {C} \\exp \\left(\\mathbf {v} \\cdot \\mathbf {t} _ {i} ^ {\\top} / \\tau\\right)}, \\tag {1}", + "image_path": "88d925c3418e9653bb7d3d8ca5bba15805d8f2412930d0b4a2159af2d0a6d70e.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 359, + 547, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 359, + 547, + 397 + ], + "spans": [ + { + "bbox": [ + 304, + 359, + 547, + 397 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 359, + 547, + 397 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 359, + 547, + 397 + ], + "type": "text", + "content": " is a temperature parameter learned during the pretraining stage, and " + }, + { + "bbox": [ + 304, + 359, + 547, + 397 + ], + "type": "inline_equation", + "content": "\\mathbf{v} \\cdot \\mathbf{t}^{\\top}" + }, + { + "bbox": [ + 304, + 359, + 547, + 397 + ], + "type": "text", + "content": " the dot product operator, which is equivalent to cosine similarity, as vectors are " + }, + { + "bbox": [ + 304, + 359, + 547, + 397 + ], + "type": "inline_equation", + "content": "\\ell_{2}" + }, + { + "bbox": [ + 304, + 359, + 547, + 397 + ], + "type": "text", + "content": "-normalized." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "spans": [ + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "text", + "content": "Few-shot learning. This scenario assumes access to limited supervisory information on the downstream tasks, in the form of a few examples for each target category, so-called shots. Formally, we denote a support set, " + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "inline_equation", + "content": "S = \\{(\\mathbf{x}^{(m)},\\mathbf{y}^{(m)})\\}_{m = 1}^{M = K\\times C}" + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "text", + "content": ", composed of " + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "text", + "content": " images for each target category, such that " + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "text", + "content": " takes a small value, e.g., " + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "inline_equation", + "content": "K\\in \\{1,2,4,8,16\\}" + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "text", + "content": ", and where " + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "inline_equation", + "content": "\\mathbf{y}\\in \\{0,1\\} ^C" + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "text", + "content": " is the corresponding one-hot label for a given image " + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 409, + 547, + 518 + ], + "type": "text", + "content": ". The objective is to adapt the pre-trained model using this limited support set." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 522, + 518, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 522, + 518, + 536 + ], + "spans": [ + { + "bbox": [ + 305, + 522, + 518, + 536 + ], + "type": "text", + "content": "3.3. Efficient transfer learning with adapters" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 541, + 547, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 541, + 547, + 627 + ], + "spans": [ + { + "bbox": [ + 304, + 541, + 547, + 627 + ], + "type": "text", + "content": "In their general form, ETL methods based on adapters learn a set of transformations over the pre-trained features " + }, + { + "bbox": [ + 304, + 541, + 547, + 627 + ], + "type": "inline_equation", + "content": "(\\mathbf{v}^{\\prime},\\mathbf{t}^{\\prime} = f_{\\psi}(\\mathbf{v},\\mathbf{t}))" + }, + { + "bbox": [ + 304, + 541, + 547, + 627 + ], + "type": "text", + "content": " , parameterized by the so-called adapter " + }, + { + "bbox": [ + 304, + 541, + 547, + 627 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 304, + 541, + 547, + 627 + ], + "type": "text", + "content": " , which produces softmax scores for the new tasks following Eq. (1). The adapter " + }, + { + "bbox": [ + 304, + 541, + 547, + 627 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 304, + 541, + 547, + 627 + ], + "type": "text", + "content": " can be optimized by minimizing the popular cross-entropy (CE) loss, " + }, + { + "bbox": [ + 304, + 541, + 547, + 627 + ], + "type": "inline_equation", + "content": "\\mathcal{H}(\\mathbf{y},\\hat{\\mathbf{y}}) = -\\sum_{c = 1}^{C}y_{c}\\log \\hat{y}_{c}" + }, + { + "bbox": [ + 304, + 541, + 547, + 627 + ], + "type": "text", + "content": " , over the support set samples:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 366, + 633, + 545, + 666 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 366, + 633, + 545, + 666 + ], + "spans": [ + { + "bbox": [ + 366, + 633, + 545, + 666 + ], + "type": "interline_equation", + "content": "\\min _ {\\psi} \\frac {1}{M} \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right). \\tag {2}", + "image_path": "c67db423eb9e4ef78b26db45960bb80b6c3b92327f732564c1de7cc0bc0806d6.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 671, + 522, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 671, + 522, + 684 + ], + "spans": [ + { + "bbox": [ + 305, + 671, + 522, + 684 + ], + "type": "text", + "content": "3.4. Pitfalls of existing few-shot ETL methods" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 689, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 547, + 715 + ], + "type": "text", + "content": "Recent ETL methods tailored to VLMs focus on enhancing the supervision provided by the support samples with" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "23683" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "text", + "content": "priors learned by the VLMs at the task at hand. The pretrained model gathers robust knowledge and is able to align visual and textual concepts. Retaining this prior knowledge can therefore produce more robust adapters, able to generalize beyond the specific bias introduced in the few support samples, to more general concepts. In this context, the zero-shot prototypes from CLIP act as a proxy to initialize the learning procedure into a reliable region. For instance, CLIP-Adapter [11] maintains the zero-shot prototypes based inference as in Eq. (1), but includes a residual multi-layered perceptron to modify visual features, such as " + }, + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{v}' = \\mathbf{v} + \\alpha_{\\mathrm{r}}f_{\\psi}(\\mathbf{v})" + }, + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "text", + "content": ". TIP-Adapter [42] includes an additional complexity layer, by combining the similarity of the zero-shot prototypes with a weighted similarity to the support samples, " + }, + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "inline_equation", + "content": "f_{\\psi}(\\cdot ,\\beta)" + }, + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "text", + "content": ", controlled by the hyperparameter " + }, + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "text", + "content": ", such that the predicted logits are " + }, + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_c = \\alpha_{\\mathrm{tipA}}f_{\\psi}(\\mathbf{v},\\beta) + \\mathbf{v}\\cdot \\mathbf{t}_c^\\top /\\tau" + }, + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "text", + "content": ". Finally, TaskRes [40] learns a modification of the initial zero-shot prototypes, " + }, + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{TR}" + }, + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "text", + "content": ", using the support samples. The divergence between the initial and final prototypes is controlled by a residual ratio: " + }, + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{t}' = \\mathbf{t} + \\alpha_{\\mathrm{TR}}\\mathbf{w}_{TR}" + }, + { + "bbox": [ + 47, + 72, + 289, + 337 + ], + "type": "text", + "content": ". Nevertheless, these methods lack a model selection strategy to set these hyperparameters (See Supp. Sec. A for details)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 346, + 162, + 360 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 346, + 162, + 360 + ], + "spans": [ + { + "bbox": [ + 47, + 346, + 162, + 360 + ], + "type": "text", + "content": "4. Proposed approach" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 365, + 192, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 365, + 192, + 379 + ], + "spans": [ + { + "bbox": [ + 47, + 365, + 192, + 379 + ], + "type": "text", + "content": "4.1. Revisiting Linear Probing" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 384, + 287, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 384, + 287, + 456 + ], + "spans": [ + { + "bbox": [ + 46, + 384, + 287, + 456 + ], + "type": "text", + "content": "The most straightforward approach used to adapt VLMs is Linear Probing [30], which refers to fitting a multiclass logistic regression linear classifier on top of the pre-trained features. Formally, the objective is to learn a set of class-wise prototypes, " + }, + { + "bbox": [ + 46, + 384, + 287, + 456 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_c" + }, + { + "bbox": [ + 46, + 384, + 287, + 456 + ], + "type": "text", + "content": ", to provide softmax class scores for a given visual embedding " + }, + { + "bbox": [ + 46, + 384, + 287, + 456 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 46, + 384, + 287, + 456 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 108, + 464, + 287, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 464, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 108, + 464, + 287, + 495 + ], + "type": "interline_equation", + "content": "\\hat {y} _ {c} = \\frac {\\exp \\left(\\mathbf {v} \\cdot \\mathbf {w} _ {c} ^ {\\top} / \\tau\\right)}{\\sum_ {i = 1} ^ {C} \\exp \\left(\\mathbf {v} \\cdot \\mathbf {w} _ {i} ^ {\\top} / \\tau\\right)}. \\tag {3}", + "image_path": "796f945ecfbd155266494892de8056c8ccb14dd4f37f3189e93f0f96cb346928.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 498, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 289, + 714 + ], + "type": "text", + "content": "The " + }, + { + "bbox": [ + 46, + 498, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{\\mathrm{c}}" + }, + { + "bbox": [ + 46, + 498, + 289, + 714 + ], + "type": "text", + "content": " prototypes can be trained to minimize the cross-entropy loss on the support samples, as in Eq. (2), using standard SGD. Besides, a common practice in ETL is to regularize the trained weights [24, 30, 40] by minimizing its " + }, + { + "bbox": [ + 46, + 498, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\ell_{2}" + }, + { + "bbox": [ + 46, + 498, + 289, + 714 + ], + "type": "text", + "content": "-norm with an additional term, weighted by an empirically-optimized non-negative balancing term " + }, + { + "bbox": [ + 46, + 498, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\lambda_{wd}" + }, + { + "bbox": [ + 46, + 498, + 289, + 714 + ], + "type": "text", + "content": ". Despite its limited performance shown for few-shot adaptation [11, 30], we believe that this requires further exploration, as LP is a lightweight adaptation strategy, especially convenient due to its convexity during optimization. In this work, we present an updated view of Linear Probing. First, the class weights are initialized using the CLIP zero-shot prototypes, as SoTA ETL methods do [11, 40, 42]. Second, we replace the weight decay in the loss function and explicitly perform an " + }, + { + "bbox": [ + 46, + 498, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\ell_{2}" + }, + { + "bbox": [ + 46, + 498, + 289, + 714 + ], + "type": "text", + "content": "-normalization of the prototypes after each update, to exactly meet the pre-training scenario during adaptation, inspired by [12]. Similarly, cosine similarity is also scaled with CLIP's pre-trained temperature" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "content": ". Last, we incorporate data augmentation, usually not included in LP. We refer to this updated Linear Probing version for vision-language models as ZS-LP1. Interestingly, ZS-LP serves as a strong baseline (see Tab. 1), which does not require adjusting specific hyperparameters per task." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 140, + 463, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 140, + 463, + 152 + ], + "spans": [ + { + "bbox": [ + 305, + 140, + 463, + 152 + ], + "type": "text", + "content": "4.2. Constrained Linear Probing" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 158, + 545, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 158, + 545, + 278 + ], + "spans": [ + { + "bbox": [ + 304, + 158, + 545, + 278 + ], + "type": "text", + "content": "Albeit a well-initialized Linear Probing offers a strong baseline for efficient transfer learning, the updated prototypes might deviate from the initial regions offering strong generalization. This is especially the case in the few-shot setting, where the few provided support samples might be under-representative and contain specific biases that produce spurious correlations, hence harming the generalization after adaptation [34, 44]. Thus, to retain the strong basis provided by the VLM model, and avoid prototype degradation, we resort to a constrained formulation of the loss in Eq. (2)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 293, + 545, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 293, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 304, + 293, + 545, + 365 + ], + "type": "text", + "content": "Retaining prior knowledge. A direct form to avoid prototype degradation from zero-shot points is to constrain the cross-entropy minimization to enforce the resulting prototypes to remain close to the initial solution (i.e., initial set of prototypes " + }, + { + "bbox": [ + 304, + 293, + 545, + 365 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = [\\mathbf{t}_1,\\dots ,\\mathbf{t}_c]" + }, + { + "bbox": [ + 304, + 293, + 545, + 365 + ], + "type": "text", + "content": "). Specifically, this constrained optimization problem can be defined as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 361, + 374, + 545, + 407 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 374, + 545, + 407 + ], + "spans": [ + { + "bbox": [ + 361, + 374, + 545, + 407 + ], + "type": "interline_equation", + "content": "\\min _ {\\mathcal {W}} \\quad \\frac {1}{M} \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) \\tag {4}", + "image_path": "e53abadbf2c188bec50b4b876b3557a7ad08fd48cdc637007988821f106f80f0.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 356, + 409, + 495, + 422 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 356, + 409, + 495, + 422 + ], + "spans": [ + { + "bbox": [ + 356, + 409, + 495, + 422 + ], + "type": "interline_equation", + "content": "\\begin{array}{l l} \\text {s . t .} & \\mathbf {w} _ {c} = \\mathbf {t} _ {c} \\quad \\forall c \\in \\{1, \\dots , C \\}, \\end{array}", + "image_path": "33410d05ccacc685e10b09df4f1ffad1abe706c6ba8955e03f4b6328859ed869.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "inline_equation", + "content": "\\mathcal{W} = [\\mathbf{w}_1, \\dots, \\mathbf{w}_C]" + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "text", + "content": " the set of learnable class prototypes. We can approximate the minimum of the constrained problem in Eq. (4) by a penalty-based optimization approach, transforming the above formulation into an unconstrained problem, and using an " + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "text", + "content": "-penalty between the class prototypes and the set of zero-shot anchors:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 312, + 508, + 545, + 551 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 508, + 545, + 551 + ], + "spans": [ + { + "bbox": [ + 312, + 508, + 545, + 551 + ], + "type": "interline_equation", + "content": "\\min _ {\\mathcal {W}} \\quad \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) + \\lambda \\sum_ {m = 1} ^ {M} \\sum_ {c = 1} ^ {C} \\left\\| \\mathbf {t} _ {c} - \\mathbf {w} _ {c} ^ {(m)} \\right\\| _ {2} ^ {2}, \\tag {5}", + "image_path": "d2fa4437d350971d9da07bb176c7e3fe7bf85b505310ab11e0cd3ab35d7680fc.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 552, + 545, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 552, + 545, + 613 + ], + "spans": [ + { + "bbox": [ + 304, + 552, + 545, + 613 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 552, + 545, + 613 + ], + "type": "inline_equation", + "content": "\\lambda \\in \\mathbb{R}_+" + }, + { + "bbox": [ + 304, + 552, + 545, + 613 + ], + "type": "text", + "content": " is a scalar weight controlling the contribution of the corresponding penalty. Note that " + }, + { + "bbox": [ + 304, + 552, + 545, + 613 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_c^{(m)}" + }, + { + "bbox": [ + 304, + 552, + 545, + 613 + ], + "type": "text", + "content": " is the optimal class prototype for the support sample " + }, + { + "bbox": [ + 304, + 552, + 545, + 613 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 304, + 552, + 545, + 613 + ], + "type": "text", + "content": " that minimizes the left term. For clarity in the presentation, we have omitted the normalization by the cardinality of each set." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 628, + 545, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 628, + 545, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 628, + 545, + 676 + ], + "type": "text", + "content": "Sample and class-specific constraints. The associated constrained problem in Eq. (4) is approximated by an unconstrained formulation, which uses a single uniform penalty without considering individual data samples or" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "text", + "content": "1Although the recent work in [24] explores some of these LP improvements, they still resort to a weight-decay regularization of the LP parameters, whose optimum relative weight is found in a few-shot validation set." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "23684" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "content": "classes. Certainly, all samples and categories within a given dataset may indeed present different intrinsic learning challenges. Thus, the problem in Eq. (5) is not solved accurately. A better alternative would consist in integrating multiple penalty weights " + }, + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "content": ", one for each sample and class, producing a set of penalty weights " + }, + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "\\Lambda \\in \\mathbb{R}_{+}^{M \\times C}" + }, + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "content": ". The resulting optimization problem can then be defined as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 175, + 287, + 217 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 175, + 287, + 217 + ], + "spans": [ + { + "bbox": [ + 51, + 175, + 287, + 217 + ], + "type": "interline_equation", + "content": "\\min _ {\\mathcal {W}} \\quad \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) + \\sum_ {m = 1} ^ {M} \\sum_ {c = 1} ^ {C} \\boldsymbol {\\Lambda} _ {m c} \\left\\| \\mathbf {t} _ {c} - \\mathbf {w} _ {c} ^ {(m)} \\right\\| _ {2} ^ {2}. \\tag {6}", + "image_path": "281db390717fd107cfb83c6a998cf460dacbfc2c34d1bba1070b4fd9a8f74a7e.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "spans": [ + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "type": "text", + "content": "Now, from an optimization standpoint, if we suppose that there exists an optimal set of class-prototypes " + }, + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "type": "inline_equation", + "content": "\\mathcal{W}^*" + }, + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "type": "text", + "content": " for the problem presented in Eq. (4), there also exists " + }, + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "type": "inline_equation", + "content": "\\Lambda^{*}\\in \\mathbb{R}_{+}^{M\\times C}" + }, + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "type": "inline_equation", + "content": "(\\mathcal{W}^{*},\\Lambda^{*})" + }, + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "type": "text", + "content": " represents a saddle point of the Lagrangian associated to Eq. (4). In this scenario, " + }, + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "type": "inline_equation", + "content": "\\Lambda^{*}" + }, + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "type": "text", + "content": " are the Lagrange multipliers of the presented problem, and is intuitive to consider " + }, + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "type": "inline_equation", + "content": "\\Lambda = \\Lambda^{*}" + }, + { + "bbox": [ + 46, + 218, + 287, + 302 + ], + "type": "text", + "content": " as the best choice to solve Eq. (6)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 302, + 287, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 302, + 287, + 445 + ], + "spans": [ + { + "bbox": [ + 46, + 302, + 287, + 445 + ], + "type": "text", + "content": "Nevertheless, using the Lagrange multipliers " + }, + { + "bbox": [ + 46, + 302, + 287, + 445 + ], + "type": "inline_equation", + "content": "\\Lambda^{*}" + }, + { + "bbox": [ + 46, + 302, + 287, + 445 + ], + "type": "text", + "content": " as the weights for the penalties in Eq. (6) may not be feasible in practice. In particular, a number of conventional strategies employed to train deep neural networks hinder straightforward minimization. First, the use of mini-batch gradient descent averages the updated prototypes for every single observation into a mean prototype per class, making a sample-wise constraint hard to achieve. Furthermore, performing data augmentation over the support samples may yield distinct penalty weights for the augmented versions, which could be harder or easier to classify than their original counterparts." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 445, + 287, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 445, + 287, + 471 + ], + "spans": [ + { + "bbox": [ + 47, + 445, + 287, + 471 + ], + "type": "text", + "content": "To alleviate the aforementioned challenges, we propose to relax the sample-wise penalties, which results in solving:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 488, + 287, + 521 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 488, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 61, + 488, + 287, + 521 + ], + "type": "interline_equation", + "content": "\\min _ {\\mathcal {W}} \\quad \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) + \\sum_ {c = 1} ^ {C} \\lambda_ {c} \\left\\| \\mathbf {t} _ {c} - \\mathbf {w} _ {c} \\right\\| _ {2} ^ {2}, \\tag {7}", + "image_path": "6abca0d48369b4fc6ba7617f2009683e202981ef396019a06b8033893fc7d3a5.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "spans": [ + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "type": "inline_equation", + "content": "\\lambda \\in \\mathbb{R}_+^C" + }, + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "type": "text", + "content": " is a set of " + }, + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "type": "text", + "content": " class-wise penalty weights. While the problem complexity has been reduced by removing sample-wise penalty weights, we still need to choose " + }, + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "type": "text", + "content": " weights for the class-wise penalties. This poses a challenge in the optimization, particularly for datasets that contain a large number of categories, such as ImageNet [8] (" + }, + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "type": "inline_equation", + "content": "C = 1000" + }, + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "type": "text", + "content": "), where properly selecting the penalty weights " + }, + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "type": "inline_equation", + "content": "\\lambda \\in \\mathbb{R}_+^C" + }, + { + "bbox": [ + 46, + 531, + 287, + 651 + ], + "type": "text", + "content": " can be a laborious process. Furthermore, choosing these values \"by hand\" juxtaposes with our goal of providing a validation-free solution for ETL." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 658, + 286, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 658, + 286, + 672 + ], + "spans": [ + { + "bbox": [ + 47, + 658, + 286, + 672 + ], + "type": "text", + "content": "4.3. Class Adaptive Constraint for Linear Probing" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "content": "General Augmented Lagrangian. Augmented Lagrangian Multiplier (ALM) methods present an appealing alternative for learning the penalty weights. These popular methods" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 72, + 546, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 546, + 120 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 546, + 120 + ], + "type": "text", + "content": "in optimization, which solve a constrained problem by the interplay of penalties and primal-dual steps, present well-known advantages [1, 32]. Formally, we can define a general constrained optimization problem as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 332, + 138, + 545, + 155 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 138, + 545, + 155 + ], + "spans": [ + { + "bbox": [ + 332, + 138, + 545, + 155 + ], + "type": "interline_equation", + "content": "\\min _ {x} \\quad g (x) \\quad \\text {s . t .} \\quad h _ {i} (x) \\leq 0, \\quad i = 1, \\dots , n \\tag {8}", + "image_path": "ebd39d8497f0aa072f83b2f3f88a1e1593f9aa4146f7406a0d358cfd884b04ba.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 159, + 547, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 159, + 547, + 209 + ], + "spans": [ + { + "bbox": [ + 304, + 159, + 547, + 209 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 304, + 159, + 547, + 209 + ], + "type": "inline_equation", + "content": "g: \\mathbb{R}^d \\to \\mathbb{R}" + }, + { + "bbox": [ + 304, + 159, + 547, + 209 + ], + "type": "text", + "content": " the objective function and " + }, + { + "bbox": [ + 304, + 159, + 547, + 209 + ], + "type": "inline_equation", + "content": "h_i: \\mathbb{R}^d \\to \\mathbb{R}, i = 1, \\dots, n" + }, + { + "bbox": [ + 304, + 159, + 547, + 209 + ], + "type": "text", + "content": " the set of constraint functions. This problem is generally tackled by solving a succession of " + }, + { + "bbox": [ + 304, + 159, + 547, + 209 + ], + "type": "inline_equation", + "content": "j \\in \\mathbb{N}" + }, + { + "bbox": [ + 304, + 159, + 547, + 209 + ], + "type": "text", + "content": " unconstrained problems, each solved approximately w.r.t " + }, + { + "bbox": [ + 304, + 159, + 547, + 209 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 159, + 547, + 209 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 223, + 545, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 223, + 545, + 255 + ], + "spans": [ + { + "bbox": [ + 317, + 223, + 545, + 255 + ], + "type": "interline_equation", + "content": "\\min _ {x, \\lambda} \\quad \\mathcal {L} ^ {(j)} (x) = g (x) + \\sum_ {i = 1} ^ {n} P \\left(h _ {i} (x), \\rho_ {i} ^ {(j)}, \\lambda_ {i} ^ {(j)}\\right), \\tag {9}", + "image_path": "b584b8cae8a6255541b17a5baa3878364563471dd6884f46b7fb892e388caa90.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "spans": [ + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "inline_equation", + "content": "P:\\mathbb{R}\\times \\mathbb{R}_{+ + }\\times \\mathbb{R}_{+ + }\\to \\mathbb{R}" + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "text", + "content": " a penalty-Lagrangian function, whose derivative w.r.t. its first variable " + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "inline_equation", + "content": "P^{\\prime}(z,\\rho ,\\lambda)\\equiv" + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "inline_equation", + "content": "\\frac{\\partial}{\\partial z} P(z,\\rho ,\\lambda)" + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "text", + "content": " exists, is positive and continuous for all " + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "inline_equation", + "content": "z\\in \\mathbb{R}" + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "inline_equation", + "content": "(\\rho ,\\lambda)\\in (\\mathbb{R}_{+ + })^{2}" + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "text", + "content": " . The set of axioms that any penalty function " + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "text", + "content": " must satisfy [2] are detailed in Supp. Sec. B. Furthermore, " + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "inline_equation", + "content": "\\pmb {\\rho}^{(j)} = (\\rho_i^{(j)})_{1\\leq i\\leq n}\\in \\mathbb{R}_{+ + }^n" + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "inline_equation", + "content": "\\pmb{\\lambda}^{(j)} =" + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "inline_equation", + "content": "(\\lambda_i^{(j)})_{1\\leq i\\leq n}\\in \\mathbb{R}_{+ + }^n" + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "text", + "content": " denote the penalty parameters and multipliers associated to the penalty " + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "text", + "content": " at the iteration " + }, + { + "bbox": [ + 305, + 258, + 546, + 358 + ], + "type": "inline_equation", + "content": "j" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "spans": [ + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "text", + "content": "The ALM can be split into two iterations: outer iterations (indexed by " + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "text", + "content": "), where the penalty multipliers " + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "text", + "content": " and the penalty parameters " + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "text", + "content": " are updated, and the inner iterations, where " + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{(j)}" + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "text", + "content": " (Eq. (9)) is minimized using the previous solution as initialization. In particular, the penalty multipliers " + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "inline_equation", + "content": "\\lambda^{(j)}" + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "text", + "content": " are updated to the derivative of " + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 304, + 358, + 547, + 443 + ], + "type": "text", + "content": " w.r.t. to the solution obtained during the last inner step:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 361, + 451, + 545, + 468 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 451, + 545, + 468 + ], + "spans": [ + { + "bbox": [ + 361, + 451, + 545, + 468 + ], + "type": "interline_equation", + "content": "\\lambda_ {i} ^ {(j + 1)} = P ^ {\\prime} \\left(h _ {i} (x), \\rho_ {i} ^ {(j)}, \\lambda_ {i} ^ {(j)}\\right). \\tag {10}", + "image_path": "f81ed40d64fcb65cc22f1378b3bfbb05f4b34f077851061a7c856ed4167ff43f.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 470, + 545, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 470, + 545, + 517 + ], + "spans": [ + { + "bbox": [ + 304, + 470, + 545, + 517 + ], + "type": "text", + "content": "By doing this, the penalty multipliers increase when the constraint is violated, and decrease otherwise. Thus, this strategy enables an adaptive and learnable way for determining the penalty weights." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 517, + 545, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 517, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 517, + 545, + 567 + ], + "type": "text", + "content": "Our solution. We propose to use an ALM approach to solve the problem in Eq. (7). In particular, we reformulate this problem integrating a penalty function " + }, + { + "bbox": [ + 304, + 517, + 545, + 567 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 304, + 517, + 545, + 567 + ], + "type": "text", + "content": " parameterized by " + }, + { + "bbox": [ + 304, + 517, + 545, + 567 + ], + "type": "inline_equation", + "content": "(\\rho, \\lambda) \\in \\mathbb{R}_{++}^{C} \\times \\mathbb{R}_{++}^{C}" + }, + { + "bbox": [ + 304, + 517, + 545, + 567 + ], + "type": "text", + "content": ", formally defined as:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 318, + 580, + 545, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 580, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 318, + 580, + 545, + 622 + ], + "type": "interline_equation", + "content": "\\min _ {\\mathcal {W}, \\boldsymbol {\\lambda}} \\quad \\sum_ {m = 1} ^ {M} \\mathcal {H} \\left(\\mathbf {y} ^ {(m)}, \\hat {\\mathbf {y}} ^ {(m)}\\right) + \\sum_ {c = 1} ^ {C} P \\left(\\mathbf {t} _ {c} - \\mathbf {w} _ {c}, \\rho_ {c}, \\lambda_ {c}\\right). \\tag {11}", + "image_path": "0025dcb1b1af5e6367775d69799b8bbcd6a2d64747051efe39b4bb9e4207f774.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 624, + 547, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 624, + 547, + 671 + ], + "spans": [ + { + "bbox": [ + 304, + 624, + 547, + 671 + ], + "type": "text", + "content": "Following our realistic validation-free scenario, the only data from which we can obtain feedback during adaptation is the support set " + }, + { + "bbox": [ + 304, + 624, + 547, + 671 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 624, + 547, + 671 + ], + "type": "text", + "content": ". Thus, the penalty multiplier for class " + }, + { + "bbox": [ + 304, + 624, + 547, + 671 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 624, + 547, + 671 + ], + "type": "text", + "content": " at epoch " + }, + { + "bbox": [ + 304, + 624, + 547, + 671 + ], + "type": "inline_equation", + "content": "j + 1" + }, + { + "bbox": [ + 304, + 624, + 547, + 671 + ], + "type": "text", + "content": " can be defined as:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 324, + 685, + 545, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 685, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 324, + 685, + 545, + 715 + ], + "type": "interline_equation", + "content": "\\lambda_ {c} ^ {(j + 1)} = \\frac {1}{| \\mathcal {S} |} \\sum_ {(\\mathbf {x}, \\mathbf {y}) \\in \\mathcal {S}} P ^ {\\prime} \\left(\\mathbf {t} _ {c} - \\mathbf {w} _ {c}, \\rho_ {c} ^ {(j)}, \\lambda_ {c} ^ {(j)}\\right). \\tag {12}", + "image_path": "3cefbd93cd105d079b119ae2a674f868b78cce63edee6076edbf5575ab4dbb84.jpg" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "23685" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": "As suggested by prior work [2, 25], we employ the PHR function as penalty " + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": ", defined as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 59, + 111, + 287, + 144 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 111, + 287, + 144 + ], + "spans": [ + { + "bbox": [ + 59, + 111, + 287, + 144 + ], + "type": "interline_equation", + "content": "\\operatorname {P H R} (z, \\rho , \\lambda) = \\left\\{ \\begin{array}{l l} \\lambda z + \\frac {1}{2} \\rho z ^ {2} & \\text {i f} \\quad \\lambda + \\rho z \\geq 0; \\\\ - \\frac {\\lambda^ {2}}{2 \\rho} & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {13}", + "image_path": "b0eb21304277e7791a7525a37cd0d24bfda0d0c5c4eaf0d3043a5f552ea9de26.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "spans": [ + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "content": "Nevertheless, as we empirically found in our experiments (Supp. Sec. C.3), estimating Lagrange multipliers from the support samples might overfit the training data. As we do not have access to additional data points, we follow a simple strategy, consisting in performing only one iteration of the " + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "content": " update. For a given target task, we rely on text embeddings as an anchor that offers a generalizable representation of concrete concepts along different visual domains. Thus, we consider the zero-shot prototypes " + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_c" + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "content": " as the initial approximation of the problem in Eq. (12) (first inner step). Instead of initializing " + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "content": " randomly, which might hamper the convergence, we compute the penalty weight for a given class as the average of the zero-shot softmax scores for all support samples belonging to that class, such that " + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "inline_equation", + "content": "\\lambda_c^* = \\frac{1}{|\\mathcal{B}_c^+|}\\sum_{i\\in \\mathcal{B}_c^+}\\hat{y}_c^{(i)}" + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_c^+ = \\{i|i\\in M,y_c^{(i)} = 1\\}" + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "content": ". Note that these values are obtained by replacing " + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_c" + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "content": " with the solution found in the inner step " + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "inline_equation", + "content": "(\\mathbf{t}_c)" + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "content": " in Eq. (3), which indeed satisfies the constraint " + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_c = \\mathbf{t}_c" + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "content": ", resulting in a zero penalty. Taking now the derivative w.r.t. " + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "content": " of PHR, it is straightforward to see that the learned value of " + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "content": " after one iteration is indeed " + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "inline_equation", + "content": "\\lambda_c^*" + }, + { + "bbox": [ + 47, + 148, + 288, + 403 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 412, + 128, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 128, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 128, + 426 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 432, + 97, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 432, + 97, + 445 + ], + "spans": [ + { + "bbox": [ + 47, + 432, + 97, + 445 + ], + "type": "text", + "content": "5.1. Setup" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 450, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 450, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 450, + 287, + 713 + ], + "type": "text", + "content": "Datasets: Few-shot adaptation. We follow prior ETL literature [11, 40, 42] and benchmark all the methods on 11 datasets: Imagenet [8], Caltech101 [10], OxfordPets [29], StanfordCars [21], Flowers102 [28], Food101 [4], FGVCAircraft [26], SUN397 [37], DTD [7], EuroSAT [15], and UCF101 [33]. These cover a diverse set of computer vision classification tasks, from general objects to actions or fine-grained categories in specialized applications. To train the few-shot adapters, we randomly retrieve " + }, + { + "bbox": [ + 47, + 450, + 287, + 713 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 47, + 450, + 287, + 713 + ], + "type": "text", + "content": " shots (" + }, + { + "bbox": [ + 47, + 450, + 287, + 713 + ], + "type": "inline_equation", + "content": "K \\in \\{1, 2, 4, 8, 16\\}" + }, + { + "bbox": [ + 47, + 450, + 287, + 713 + ], + "type": "text", + "content": ") for each class. Last, for evaluation, we used the test sets provided in each dataset, with the same data splits as [40, 46]. Domain generalization capabilities. We further assess the model's robustness to domain shifts by following existing ETL works. We used ImageNet as a source domain for adaptation, and its variants as target tasks, which include: ImageNetV2 [31], ImageNet-Sketch [35], ImageNet-A [16], and ImageNet-R [17]. In this scenario, the model only sees a few labeled samples from the source domain, and target data are used exclusively for testing. In addition, we also employ this setting to motivate the use of efficient adapters vs fine-tuning the entire VLM [12, 22, 40]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 323 + ], + "type": "text", + "content": "Implementation details. All experiments are based on CLIP [30] pre-trained features, using different backbones: ResNet-50 [14] and ViT-B/16 [9] (results for other backbones in Supp. Sec. C.2). We resort to ResNet-50 as backbone in the ablation studies. For each downstream task we first extract all pre-trained features of the support shots and then run adaptation experiments over those. Data augmentation is applied during the feature extraction stage using random zoom, crops, and flips, following [40, 45]. The number of augmentations per support sample is set to 20. We used the same text prompts per dataset as in [40, 46]. Following our claim that using a validation set on few-shot adaptation is unrealistic, we trained ZS-LP and CLAP using the same configuration for all datasets, number of shots, and visual backbones. Concretely, we optimize the adapter for 300 epochs, using SGD optimizer with Momentum of 0.9. We use a relatively large initial learning rate of 0.1 to avoid underfitting on the support set, whose value decreases during training following a cosine decay scheduler. We ran all experiments with three different random seeds, and the results were averaged across runs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 324, + 546, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 324, + 546, + 529 + ], + "spans": [ + { + "bbox": [ + 304, + 324, + 546, + 529 + ], + "type": "text", + "content": "Baselines and adaptation protocol. We selected adapter-based methods as our main competitors based on the similarity to our approach, including Clip-Adapter [11], TIP-Adapter [42], TaskRes [40], and Cross-Modal [24]. It is important to highlight that prior works [11, 40, 42] apparently leverage either the extensive test set, or an independent additional validation subset, to adjust important hyperparameters for few-shot adaptation, such as the learning rate, training epochs, and particular parameters that control each method [24]. Nevertheless, as we exposed in Fig. 1, their performance dramatically decreases when the set of hyperparameters is not adjusted for the testing scenario. To adhere to real-world requirements, we define a strict few-shot adaptation protocol, in which no validation or test samples are available to find the best case-specific configuration for each method, and hyperparameters remain fixed across tasks (details in Supp. Sec. A.4)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 538, + 362, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 538, + 362, + 550 + ], + "spans": [ + { + "bbox": [ + 306, + 538, + 362, + 550 + ], + "type": "text", + "content": "5.2. Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "type": "text", + "content": "Efficient transfer learning. We report in Tab. 1 the performance of adapter-based approaches averaged across 11 datasets, in the more realistic and practical validation-free experimental setting. Furthermore, for prompt-learning-based approaches, we include the results reported in prior literature, for a more comprehensive comparison. From these values, we can make interesting observations. First, a well-initialized Linear Probe, i.e., using the CLIP zero-shot weights, does not show the performance degradation discussed in prior works, and it is indeed a competitive alternative to SoTA approaches. Second, and more surprisingly, more complex approaches such as CLIP-Adapter, or TIP-Adapter, show a significant decline in performance com" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "23686" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "content": "pared to their original results when no validation set is available for model selection. Interestingly, TaskRes(e), which is some sort of two-stage zero-shot initialization Linear Probing with an updated text projection, also offers robust performance. Nevertheless, the absence of a detailed explanation of how the enhanced version is obtained in the original work hampers fair comparisons. Third, constraining the weights update to remain close to the zero-shot knowledge (CLAP) shows consistent improvements across different shots, especially in the very low data regime. This suggests that retaining the previous base knowledge from VLMs is important to avoid diverging because of unrepresentative shots during adaptation. Results per dataset are detailed in Supp. Fig. 8 and Supp. Tab. 9." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 49, + 319, + 282, + 475 + ], + "blocks": [ + { + "bbox": [ + 46, + 249, + 288, + 316 + ], + "lines": [ + { + "bbox": [ + 46, + 249, + 288, + 316 + ], + "spans": [ + { + "bbox": [ + 46, + 249, + 288, + 316 + ], + "type": "text", + "content": "Table 1. Comparison to state-of-the-art methods for few-shot adaptation of CLIP-based models, using ResNet-50 backbone. ETL methods are trained under the same protocol, i.e., absence of a validation set and using a fixed configuration across datasets, and results are averaged across 11 datasets. Prompt-learning methods results are directly extracted from [6, 13]. Best results in bold." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 319, + 282, + 475 + ], + "lines": [ + { + "bbox": [ + 49, + 319, + 282, + 475 + ], + "spans": [ + { + "bbox": [ + 49, + 319, + 282, + 475 + ], + "type": "table", + "html": "
MethodK=1K=2K=4K=8K=16
Prompt-learning methods
CoOp ICV'22[46]59.5661.7866.4769.8573.33
ProGrad ICCV'23[13]62.6164.9068.4571.4174.28
PLOT ICLR'23[6]62.5965.2368.6071.2373.94
Efficient transfer learning - a.k.a Adapters
Zero-Shot ICML'21[30]57.7157.7157.7157.7157.71
Rand. Init LP ICML'21[30]30.4241.8651.6960.8467.54
CLIP-Adapter ICV'23[11]58.4362.4666.1869.8773.35
TIP-Adapter ECCV'22[42]58.8660.3361.4963.1564.61
TIP-Adapter(f) ECCV'22[42]60.2962.2665.3268.3571.40
CrossModal-LPCVPR'23[24]62.2464.4866.6770.3673.65
TaskRes(e)CVP'23[40]61.4465.2668.3571.6674.42
ZS-LP61.2864.8867.9871.4374.37
CLAP62.7966.0769.1372.0874.57
", + "image_path": "ef04845cc7ab5531e1e0a9bfcb616fdbd62e17647894940e67b0e36bd3b61a10.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 486, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 486, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 486, + 289, + 714 + ], + "type": "text", + "content": "Domain generalization. If adaptation is not carefully conducted, the resulting model might distort the pre-trained knowledge and underperform when new data with domain drifts is involved [22], even below the zero-shot (no adaptation) performance. Thus, evaluating the robustness of novel adapters under this scenario of domain generalization is of special interest. To do so, adapters are optimized on ImageNet using 16 shots per class, and directly evaluated on ImageNet variants. In this setting, we also assume the absence of a validation dataset, and hence all adapters are trained until convergence, using the same configuration across backbones. A summary of the results is reported in Tab. 2, while specific numbers across datasets and additional backbones are included in Supp. Tab. 10. From these experiments, we make two striking observations. First, ZS-LP is a strong baseline compared to other more complex adapters on the source domain. Even more remarkably, prior SoTA adapters, such as CLIP-Adapter or TIP-Adapter, fail to generalize to unseen domains. In" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 547, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 156 + ], + "type": "text", + "content": "deed, when using recent vision transformers, which are overtaking convolutional neural networks, none of existing adapters-based approaches outperform standard zero-shot prediction in the presence of distributional drifts. In contrast, CLAP yields the best in-distribution performance and also shows consistent improvements under domain shifts across all backbones." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 310, + 212, + 541, + 369 + ], + "blocks": [ + { + "bbox": [ + 304, + 164, + 547, + 209 + ], + "lines": [ + { + "bbox": [ + 304, + 164, + 547, + 209 + ], + "spans": [ + { + "bbox": [ + 304, + 164, + 547, + 209 + ], + "type": "text", + "content": "Table 2. Robustness to domain shifts. Adapters are adjusted on ImageNet and evaluated at out-of-distribution generalization on 4 ImageNet shifts. Bold indicates best performance. Differences with respect to no adaptation (a.k.a zero-shot) are highlighted." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 212, + 541, + 369 + ], + "lines": [ + { + "bbox": [ + 310, + 212, + 541, + 369 + ], + "spans": [ + { + "bbox": [ + 310, + 212, + 541, + 369 + ], + "type": "table", + "html": "
MethodSource (Imagenet)Target (Average)
ResNet-50Zero-Shot ICML'21[30]60.3540.61
Rand. Init LP ICML'21[30]52.24(−8.11)↓24.61(−16.00)↓
CLIP-Adapter IJCV'23[11]59.02(−1.33)↓31.21(−9.40)↓
TIP-Adapter ECCV'22[42]57.81(−2.54)↓40.69(+0.08)↑
TIP-Adapter(f) ECCV'22[42]62.27(+1.92)↑41.36(+0.75)↑
TaskRes(e) CVPR'23[40]60.85(+0.50)↑41.28(+0.67)↑
ZS-LP61.00(+0.65)↑36.58(−4.03)↓
CLAP65.02(+4.67)↑42.91(+2.30)↑
ViT-B/16Zero-Shot ICML'21[30]68.7157.17
Rand. Init LP ICML'21[30]62.95(−5.76)↓40.41(−16.76)↓
CLIP-Adapter IJCV'23[11]68.46(−0.25)↓50.72(−6.45)↓
TIP-Adapter ECCV'22[42]53.81(−14.90)↓41.55(−15.62)↓
TIP-Adapter(f) ECCV'22[42]51.71(−17.00)↓35.58(−21.6)↓
TaskRes(e) CVPR'23[40]70.84(+2.13)↑55.35(−1.82)↓
ZS-LP69.73(+1.02)↑53.65(−3.52)↓
CLAP73.38(+4.67)↑60.04(+2.87)↑
", + "image_path": "09b97238e490ec5763ba80ebf91660f82564cae94b2edfb0efc95cec3596a6a7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 308, + 458, + 541, + 553 + ], + "blocks": [ + { + "bbox": [ + 304, + 378, + 545, + 454 + ], + "lines": [ + { + "bbox": [ + 304, + 378, + 545, + 454 + ], + "spans": [ + { + "bbox": [ + 304, + 378, + 545, + 454 + ], + "type": "text", + "content": "Table 3. Fine-tuning (FT) vs. efficient transfer learning (ETL). A benchmark for the low data regime, i.e., 8 shots for each class. For the sake of fairness, FT methods (above the dashed line) are trained with 4 shots and early-stopped using a validation set containing 4 shots. On the other hand, ETL methods (below the dashed line) are trained using 8 shots and rely solely on the support set. All methods use ViT-B/16 as CLIP backbone." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 458, + 541, + 553 + ], + "lines": [ + { + "bbox": [ + 308, + 458, + 541, + 553 + ], + "spans": [ + { + "bbox": [ + 308, + 458, + 541, + 553 + ], + "type": "table", + "html": "
MethodSource ImagenetTarget
-V2-Sketch-A-RAvg.
Fine-tuning (FT)69.8862.4447.0747.5276.0858.28
LP-FTICLR'23 [22]71.2964.0448.5049.4977.6359.92
WiSEcvPR'22 [36]71.1763.8149.3850.5978.5660.59
FLYPcvPR'23 [12]71.5164.5949.5051.3278.5260.98
Zero-Shot68.7160.7646.1847.7673.9857.17
Rand. Init LP56.5847.1725.8227.0347.0536.77
ZS-LP68.4960.0742.7742.3971.7354.24
CLAP71.7564.0647.6648.4076.7059.21
", + "image_path": "ce9371f7ec7ac2457a804f5af74c896af421b56dc708c0c47a4fa06901e07cbe.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 312, + 553, + 536, + 562 + ], + "lines": [ + { + "bbox": [ + 312, + 553, + 536, + 562 + ], + "spans": [ + { + "bbox": [ + 312, + 553, + 536, + 562 + ], + "type": "text", + "content": "*Specific numbers for FT, LP-FT, WiSE-FT, and FLYP are retrieved from [12]." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": "Is it worth optimizing the entire model? We now compare CLAP to end-to-end full fine-tuning (FT) approaches: LP-FT [22], WiSE-FT [36], and FLYP [12]. The former two methods require a validation set for early stopping, and the latter two use it for both early stopping and tuning the mixing coefficient hyperparameter " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": ". Therefore, for a " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": "-shot problem, these methods actually require " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "2K" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " shots for each class, " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " for training, and " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " for validation. As the balancing penalty term in CLAP is optimized with the support set, and does not require a validation set, a fair comparison would be to evaluate the " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": "-shot performance of fine-tuning methods against our method's " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "2K" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": "-shot results. Thus, Tab. 3 in" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "23687" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "type": "text", + "content": "cludes the performance of all the models when 8 labeled images are available for each class overall. Analyzing the results, we can conclude that in the low data regime, full finetuning is not necessarily superior to ETL when compared properly. More specifically, our approach outperforms finetuning methods in in-distribution performance and performs reasonably well on OOD datasets, while having a fraction of the estimizable parameters of fine-tuning methods." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 175, + 171, + 188 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 175, + 171, + 188 + ], + "spans": [ + { + "bbox": [ + 47, + 175, + 171, + 188 + ], + "type": "text", + "content": "5.3. Ablation experiments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 194, + 289, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 194, + 289, + 470 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 289, + 470 + ], + "type": "text", + "content": "On the need for model selection strategies. Relevant methods (e.g., CLIP-Adapter [11], TIP-Adapter [42], or TaskRes [40]) include different hyperparameters that directly control their performance. Nevertheless, these methods are incomplete, since they do not include any strategy for adjusting these parameters, typically referred to as model selection. In contrast, and as previously stressed, there is evidence that these works use a large evaluation subset to adapt their settings to each scenario [24]. To investigate this observation, we evaluate these methods in cross-dataset model selection experiments. The best hyperparameters values for a task (i.e., dataset), which are found in an Oracle scenario using the entire test subset, are used during adaptation to another dataset. The matrices showing the relative improvements over a zero-shot initialized Linear Probing (ZS-LP) are depicted in Fig. 1. These results show empirically that the hyperparameters values are highly task-dependent, and that SoTA methods must adjust their hyperparameters on the target task to outperform this simple baseline, which is unrealistic in practice. In contrast, the proposed CLAP is more robust, showing consistent results across all datasets, even in the worst degradation case, as it does not require particular modifications per task." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 50, + 526, + 282, + 594 + ], + "blocks": [ + { + "bbox": [ + 46, + 479, + 287, + 523 + ], + "lines": [ + { + "bbox": [ + 46, + 479, + 287, + 523 + ], + "spans": [ + { + "bbox": [ + 46, + 479, + 287, + 523 + ], + "type": "text", + "content": "Table 4. Improving Linear Probing. Using as baseline the proposed ZS-LP configuration detailed in Sec. 4.1, we isolate the effect of removing different parts of the model, while keeping the rest static. Results are averaged across 11 datasets." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 526, + 282, + 594 + ], + "lines": [ + { + "bbox": [ + 50, + 526, + 282, + 594 + ], + "spans": [ + { + "bbox": [ + 50, + 526, + 282, + 594 + ], + "type": "table", + "html": "
MethodK=1K=2K=4
ZS-LP61.2864.8867.98
w/o DA57.72(−3.5)↓61.94(−2.9)↓65.41(−2.5)↓
w/o Temp. Scaling (τ)58.33(−2.9)↓59.85(−5.0)↓59.91(−8.0)↓
w/o L2-norm48.67(−12.6)↓55.29(−9.6)↓61.16(−6.8)↓
Rand. Init.30.42(−30.8)↓41.86(−23.0)↓51.69(−16.2)↓
", + "image_path": "a2c46c28d3f5c753e8d05b3ce3bda45b2739b52899683c25037468ffd7d1d56d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 605, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 715 + ], + "type": "text", + "content": "Details in Linear Probing matter. As described earlier in Sec. 4.1, LP has been discouraged in the prior literature due to its limited performance in few-shot adaptation [11, 30]. Nevertheless, we argue that this behavior stems from the original way in which LP was introduced in [30], inspired by prior self-supervised learning methods. Indeed, a strategy tailored to contrastive VLMs alleviates the performance drop of LP observed in prior works. In particular, using zero-shot initialization, the same temperature scaling as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "pre-training, and explicit " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "-normalization of the class prototypes, considerably improves the generalization of few-shot adaptation (Tab. 4). This aligns with relevant literature on other topics such as FT [12], which suggests that the adaptation conditions should match the pre-training setting. Also, including other heuristics such as data augmentation (DA), usually omitted in LP [40, 42], is of special relevance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 175, + 546, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 175, + 546, + 368 + ], + "spans": [ + { + "bbox": [ + 304, + 175, + 546, + 368 + ], + "type": "text", + "content": "Using a few-shot validation set. Cross-Modal adapter [24] uses a validation set composed of " + }, + { + "bbox": [ + 304, + 175, + 546, + 368 + ], + "type": "inline_equation", + "content": "(\\min(K, 4))" + }, + { + "bbox": [ + 304, + 175, + 546, + 368 + ], + "type": "text", + "content": " samples to adjust the experimental setting and early stopping. Even though this setting is more appropriate, it still requires an additional number of shots for model selection. Nevertheless, for the sake of fairness, the performance comparison to methods that do not require a validation set should be carried out by training the latter methods using " + }, + { + "bbox": [ + 304, + 175, + 546, + 368 + ], + "type": "inline_equation", + "content": "K + \\min(K, 4)" + }, + { + "bbox": [ + 304, + 175, + 546, + 368 + ], + "type": "text", + "content": " shots. When this fair benchmark is established (see Tab. 5), simple ZS-LP excels again as a strong baseline, outperforming more complex methods on the low-shot regime. Only when using a large number of shots (" + }, + { + "bbox": [ + 304, + 175, + 546, + 368 + ], + "type": "inline_equation", + "content": "K > 8" + }, + { + "bbox": [ + 304, + 175, + 546, + 368 + ], + "type": "text", + "content": ") partial finetuning and ETL methods marginally benefit from validation samples. However, model selection using a validation set increases the computational workload and processing times during adaptation due to its grid search nature." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 307, + 404, + 547, + 502 + ], + "blocks": [ + { + "bbox": [ + 305, + 379, + 545, + 401 + ], + "lines": [ + { + "bbox": [ + 305, + 379, + 545, + 401 + ], + "spans": [ + { + "bbox": [ + 305, + 379, + 545, + 401 + ], + "type": "text", + "content": "Table 5. Using a few-shot validation set. Results for priors works on this setting are obtained from [24]. Average across 11 datasets." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 404, + 547, + 502 + ], + "lines": [ + { + "bbox": [ + 307, + 404, + 547, + 502 + ], + "spans": [ + { + "bbox": [ + 307, + 404, + 547, + 502 + ], + "type": "table", + "html": "
MethodK=1K=2K=4K=8K=16
Protocol in [24]: K-shots for train + min(K, 4) for validation
TIP-Adapter [42]63.365.969.072.275.1
CrossModal LP [24]64.167.070.373.076.0
CrossModal Adapter [24]64.467.670.873.475.9
CrossModal PartialFT [24]64.767.270.573.677.1
Ours: using K + min(K, 4) shots for training
ZS-LP64.968.071.473.175.0
CLAP66.169.172.173.575.1
", + "image_path": "49f9a338f2b7a0091f9a129c24014a786739d9885db789aac22adac6595be69f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 514, + 382, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 514, + 382, + 528 + ], + "spans": [ + { + "bbox": [ + 306, + 514, + 382, + 528 + ], + "type": "text", + "content": "6. Limitations" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 535, + 545, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 535, + 545, + 632 + ], + "spans": [ + { + "bbox": [ + 304, + 535, + 545, + 632 + ], + "type": "text", + "content": "In this work, we have introduced a Class-Adaptive linear Probe (CLAP) objective, based on an adaptation of the general Augmented Lagrangian method, for efficient adaptation of large vision-language models in realistic scenarios. Despite its superiority, our empirical validation suggests that the benefits of our approach diminish as the number of shots increases, indicating that other strategies might be privileged if the number of adaptation samples is large." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 644, + 403, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 644, + 403, + 658 + ], + "spans": [ + { + "bbox": [ + 306, + 644, + 403, + 658 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "This work is supported by the National Science and Engineering Research Council of Canada (NSERC) and Fonds de recherche du Québec (FRQNT). We also thank Calcul Quebec and Compute Canada." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "23688" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 49, + 72, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 72, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 49, + 72, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 286, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "type": "text", + "content": "[1] Dimitri P. Bertsekas. Constrained Optimization and Lagrange Multiplier Methods (Optimization and Neural Computation Series). Athena Scientific, 1 edition, 1996." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 286, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 286, + 167 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 286, + 167 + ], + "type": "text", + "content": "[2] Ernesto G Birgin, Romulo A Castillo, and José Mario Martínez. Numerical comparison of augmented lagrangian algorithms for nonconvex problems. Computational Optimization and Applications, 31(1):31-55, 2005." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 170, + 286, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 170, + 286, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 170, + 286, + 190 + ], + "type": "text", + "content": "[3] Rishi Bommasani et al. On the opportunities and risks of foundation models. ArXiv, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 192, + 286, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 192, + 286, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 192, + 286, + 235 + ], + "type": "text", + "content": "[4] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 - mining discriminative components with random forests. In European Conference on Computer Vision (ECCV), 2014." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 237, + 286, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 237, + 286, + 300 + ], + "spans": [ + { + "bbox": [ + 53, + 237, + 286, + 300 + ], + "type": "text", + "content": "[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in Neural Information Processing Systems (NeurIPS), 33:1877-1901, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 303, + 286, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 303, + 286, + 346 + ], + "spans": [ + { + "bbox": [ + 53, + 303, + 286, + 346 + ], + "type": "text", + "content": "[6] Guangyi Chen, Weiran Yao, Xiangchen Song, Xinyue Li, Yongming Rao, and Kun Zhang. Prompt learning with optimal transport for vision-language models. In International Conference on Learning Representations (ICLR), 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 348, + 286, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 348, + 286, + 401 + ], + "spans": [ + { + "bbox": [ + 53, + 348, + 286, + 401 + ], + "type": "text", + "content": "[7] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3606-3613, 2014." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 403, + 286, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 403, + 286, + 456 + ], + "spans": [ + { + "bbox": [ + 53, + 403, + 286, + 456 + ], + "type": "text", + "content": "[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 248-255, 2009." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 460, + 286, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 460, + 286, + 534 + ], + "spans": [ + { + "bbox": [ + 53, + 460, + 286, + 534 + ], + "type": "text", + "content": "[9] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. International Conference on Learning Representations (ICLR), 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 536, + 286, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 536, + 286, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 536, + 286, + 601 + ], + "type": "text", + "content": "[10] Li Fei-Fei, R. Fergus, and P. Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Worskshops (CVPRW), pages 178–178, 2004." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 603, + 286, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 603, + 286, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 603, + 286, + 656 + ], + "type": "text", + "content": "[11] Peng Gao, Shijie Geng, Renrui Zhang, Teli Ma, Rongyao Fang, Yongfeng Zhang, Hongsheng Li, and Yu Qiao. Clip-adapter: Better vision-language models with feature adapters. International Journal of Computer Vision (IJCV), 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 658, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 286, + 712 + ], + "type": "text", + "content": "[12] Sachin Goyal, Ananya Kumar, Sankalp Garg, Zico Kolter, and Aditi Raghunathan. Finetune like you pretrain: Improved finetuning of zero-shot vision models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19338-19347, 2023." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[13] Changsheng Xu Hantao Yao, Rui Zhang. Visual-language prompt tuning with knowledge-guided context optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 119, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 119, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 545, + 162 + ], + "type": "text", + "content": "[14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2016." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 164, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 164, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 308, + 164, + 545, + 217 + ], + "type": "text", + "content": "[15] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. Introducing eurosat: A novel dataset and deep learning benchmark for land use and land cover classification. In IEEE International Geoscience and Remote Sensing Symposium (IGARSS), pages 3606-3613, 2018." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 220, + 545, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 220, + 545, + 273 + ], + "spans": [ + { + "bbox": [ + 308, + 220, + 545, + 273 + ], + "type": "text", + "content": "[16] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15262-15271, 2019." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 276, + 545, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 276, + 545, + 352 + ], + "spans": [ + { + "bbox": [ + 308, + 276, + 545, + 352 + ], + "type": "text", + "content": "[17] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadayath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), page 8340-8349, 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 354, + 545, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 354, + 545, + 418 + ], + "spans": [ + { + "bbox": [ + 308, + 354, + 545, + 418 + ], + "type": "text", + "content": "[18] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning (ICML), pages 4904-4916, 2021." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 422, + 545, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 422, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 308, + 422, + 545, + 464 + ], + "type": "text", + "content": "[19] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. In European Conference on Computer Vision (ECCV), pages 709-727, 2022." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 467, + 545, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 467, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 308, + 467, + 545, + 520 + ], + "type": "text", + "content": "[20] Muhammad Uzair Khattak, Hanoona Rasheed, Muhammad Maaz, Salman Khan, and Fahad Shahbaz Khan. Maple: Multi-modal prompt learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19113-19122, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 523, + 545, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 523, + 545, + 576 + ], + "spans": [ + { + "bbox": [ + 308, + 523, + 545, + 576 + ], + "type": "text", + "content": "[21] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), page 3498–3505, 2012." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 579, + 545, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 579, + 545, + 632 + ], + "spans": [ + { + "bbox": [ + 308, + 579, + 545, + 632 + ], + "type": "text", + "content": "[22] Ananya Kumar, Aditi Raghunathan, Robbie Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. In International Conference on Learning Representations (ICLR), pages 1-42, 2022." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 635, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 635, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 308, + 635, + 545, + 678 + ], + "type": "text", + "content": "[23] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. In _Conference on Empirical Methods in Natural Language Processing (EMNLP)_, pages 3045-3059, 2021." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 681, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 681, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 681, + 545, + 712 + ], + "type": "text", + "content": "[24] Zhiqiu Lin, Samuel Yu, Zhiyi Kuang, Deepak Pathak, and Deva Ramanan. Multimodality helps unimodality: Cross-modal few-shot learning with multimodal models. In Pro" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "23689" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "text", + "content": "ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "text", + "content": "[25] Bingyuan Liu, Jérôme Rony, Adrian Galdran, Jose Dolz, and Ismail Ben Ayed. Class adaptive network calibration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16070-16079, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 174 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 174 + ], + "type": "text", + "content": "[26] S. Maji, J. Kannala, E. Rahtu, M. Blaschko, and A. Vedaldi. Fine-grained visual classification of aircraft. In ArXiv Preprint, 2013." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 175, + 287, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 175, + 287, + 218 + ], + "spans": [ + { + "bbox": [ + 48, + 175, + 287, + 218 + ], + "type": "text", + "content": "[27] Sachit Menon and Carl Vondrick. Visual classification via description from large language models. In International Conference on Learning Representations (ICLR), pages 1-17, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 220, + 287, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 220, + 287, + 263 + ], + "spans": [ + { + "bbox": [ + 48, + 220, + 287, + 263 + ], + "type": "text", + "content": "[28] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In Indian Conference on Computer Vision, Graphics and Image Processing, 2008." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 266, + 287, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 266, + 287, + 308 + ], + "spans": [ + { + "bbox": [ + 48, + 266, + 287, + 308 + ], + "type": "text", + "content": "[29] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), page 3498-3505, 2012." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 311, + 287, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 311, + 287, + 376 + ], + "spans": [ + { + "bbox": [ + 48, + 311, + 287, + 376 + ], + "type": "text", + "content": "[30] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning (ICML), pages 8748-8763, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 378, + 287, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 378, + 287, + 421 + ], + "spans": [ + { + "bbox": [ + 48, + 378, + 287, + 421 + ], + "type": "text", + "content": "[31] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. Do imagenet classifiers generalize to imagenet? In International Conference on Machine Learning (ICML), pages 5389-5400, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 422, + 287, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 422, + 287, + 476 + ], + "spans": [ + { + "bbox": [ + 48, + 422, + 287, + 476 + ], + "type": "text", + "content": "[32] Sara Sangalli, Ertunc Erdil, Andeas Hotker, Olivio F Donati, and Ender Konukoglu. Constrained optimization to train neural networks on critical and under-represented classes. In Advances in Neural Information Processing Systems (NeurIPS), 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 479, + 287, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 479, + 287, + 511 + ], + "spans": [ + { + "bbox": [ + 48, + 479, + 287, + 511 + ], + "type": "text", + "content": "[33] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. In ArXiv Preprint, 2012." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 513, + 287, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 513, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 513, + 287, + 567 + ], + "type": "text", + "content": "[34] Rohan Taori, Achal Dave, Vaishaal Shankar, Nicholas Carlini, Benjamin Recht, and Ludwig Schmidt. Measuring robustness to natural distribution shifts in image classification. In Advances in Neural Information Processing Systems (NeurIPS), 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 569, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 569, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 569, + 287, + 612 + ], + "type": "text", + "content": "[35] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In Advances in Neural Information Processing Systems (NeurIPS), 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 614, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 287, + 689 + ], + "type": "text", + "content": "[36] Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo-Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, and Ludwig Schmidt. Robust fine-tuning of zero-shot models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7959-7971, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 692, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 692, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 692, + 287, + 712 + ], + "type": "text", + "content": "[37] Jianxiong Xiao, James Hays, Krista A. Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 598 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "text", + "content": "recognition from abbey to zoo. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3485-3492, 2010." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 107, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 545, + 150 + ], + "type": "text", + "content": "[38] Yinghui Xing, Qirui Wu, De Cheng, Shizhou Zhang, Guoqiang Liang, Peng Wang, and Yanning Zhang. Dual modality prompt tuning for vision-language pre-trained model. IEEE Transactions on Multimedia, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 152, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 152, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 308, + 152, + 545, + 205 + ], + "type": "text", + "content": "[39] Hantao Yao, Rui Zhang, and Changsheng Xu. Visual-language prompt tuning with knowledge-guided context optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6757-6767, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 208, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 208, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 308, + 208, + 545, + 251 + ], + "type": "text", + "content": "[40] Tao Yu, Zhihe Lu, Xin Jin, Zhibo Chen, and Xinchao Wang. Task residual for tuning vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10899-10909, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 253, + 545, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 253, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 308, + 253, + 545, + 316 + ], + "type": "text", + "content": "[41] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18123-18133, 2022." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 319, + 545, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 319, + 545, + 373 + ], + "spans": [ + { + "bbox": [ + 308, + 319, + 545, + 373 + ], + "type": "text", + "content": "[42] Renrui Zhang, Rongyao Fang, Wei Zhang, Peng Gao, Kunchang Li, Jifeng Dai, Yu Qiao, and Hongsheng Li. Tip-adapter: Training-free clip-adapter for better vision-language modeling. In European Conference on Computer Vision (ECCV), pages 1-19, 2022." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 376, + 545, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 376, + 545, + 430 + ], + "spans": [ + { + "bbox": [ + 308, + 376, + 545, + 430 + ], + "type": "text", + "content": "[43] Zexuan Zhong, Dan Friedman, and Danqi Chen. Factual probing is [mask]: Learning vs. learning to recall. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5017-5033, 2021." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 431, + 545, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 431, + 545, + 474 + ], + "spans": [ + { + "bbox": [ + 308, + 431, + 545, + 474 + ], + "type": "text", + "content": "[44] Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 45:4396-4415, 2022." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 476, + 545, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 476, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 308, + 476, + 545, + 520 + ], + "type": "text", + "content": "[45] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Conditional prompt learning for vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 521, + 545, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 521, + 545, + 553 + ], + "spans": [ + { + "bbox": [ + 308, + 521, + 545, + 553 + ], + "type": "text", + "content": "[46] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision (IJCV), 2022." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 555, + 545, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 555, + 545, + 598 + ], + "spans": [ + { + "bbox": [ + 308, + 555, + 545, + 598 + ], + "type": "text", + "content": "[47] Beier Zhu, Yulei Niu, Yucheng Han, Yue Wu, and Hanwang Zhang. Prompt-aligned gradient for prompt tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15659-15669, 2023." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "23690" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/80eefa2b-3d90-4d98-ab03-f2521d12efac_content_list.json b/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/80eefa2b-3d90-4d98-ab03-f2521d12efac_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..0cd14a164585a029d471ca50f7dbf360515acf33 --- /dev/null +++ b/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/80eefa2b-3d90-4d98-ab03-f2521d12efac_content_list.json @@ -0,0 +1,1765 @@ +[ + { + "type": "text", + "text": "A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling", + "text_level": 1, + "bbox": [ + 127, + 128, + 841, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Wentao $\\mathbf{Q}\\mathbf{u}^{1}$ , Yuantian Shao $^{1}$ , Lingwu Meng $^{1}$ , Xiaoshui Huang $^{2*}$ , Liang Xiao $^{1*}$ Nanjing University of Science and Technology $^{1}$ , Shanghai AI Laboratory $^{2}$", + "bbox": [ + 196, + 202, + 820, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{quwentao, alvin_s, menglw815}@njust.edu.cn, huangxiaoshui@163.com, xiaoliang@mail.njust.edu.cn", + "bbox": [ + 86, + 241, + 919, + 257 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 291, + 313, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Point cloud upsampling (PCU) enriches the representation of raw point clouds, significantly improving the performance in downstream tasks such as classification and reconstruction. Most of the existing point cloud upsampling methods focus on sparse point cloud feature extraction and upsampling module design. In a different way, we dive deeper into directly modelling the gradient of data distribution from dense point clouds. In this paper, we proposed a conditional denoising diffusion probabilistic model (DDPM) for point cloud upsampling, called PUDM. Specifically, PUDM treats the sparse point cloud as a condition, and iteratively learns the transformation relationship between the dense point cloud and the noise. Simultaneously, PUDM aligns with a dual mapping paradigm to further improve the discernment of point features. In this context, PUDM enables learning complex geometry details in the ground truth through the dominant features, while avoiding an additional upsampling module design. Furthermore, to generate high-quality arbitrary-scale point clouds during inference, PUDM exploits the prior knowledge of the scale between sparse point clouds and dense point clouds during training by parameterizing a rate factor. Moreover, PUDM exhibits strong noise robustness in experimental results. In the quantitative and qualitative evaluations on PU1K and PUGAN, PUDM significantly outperformed existing methods in terms of Chamfer Distance (CD) and Hausdorff Distance (HD), achieving state of the art (SOTA) performance.", + "bbox": [ + 76, + 323, + 473, + 731 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 773, + 209, + 789 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Point clouds, as a most fundamental 3D representation, have been widely used in various downstream tasks such as 3D reconstruction [19, 24], autonomous driving [4, 16, 49], and robotics technology [42, 46]. However, raw point clouds captured from 3D sensors often exhibit sparsity,", + "bbox": [ + 76, + 799, + 468, + 876 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/baae014e43e709df28a42b3bb1701b0aecc9e06c095f8dfb58086e74802356ab.jpg", + "image_caption": [ + "Figure 1. Most existing methods achieving satisfactory results for input sparse point clouds with clear geometric structures (such as the hole on the green cover rear), but performing poorly for those with fuzzy geometric details (like the eyes of the red pig). However, our results, with close proximity to the ground truth." + ], + "image_footnote": [], + "bbox": [ + 501, + 287, + 893, + 435 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "noise, and non-uniformity. This is substantiated across diverse publicly available benchmark datasets, such as KITTI [8], ScanNet [5]. Hence, point cloud upsampling, which involves the transformation of sparse, incomplete, and noisy point clouds into dense, complete, and artifact-free representations, has garnered considerable research interest.", + "bbox": [ + 496, + 540, + 890, + 630 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Inspired by deep learning, the pioneering work PU-Net [44] is the first to utilize deep neural networks to address this problem. This first divides the input point cloud into multiple patches and then extracts multi-scale features. Subsequently, these features are aggregated and fed into an upsampling module to approximate the dense point cloud coordinates. Building this approach, many works [17, 18, 21, 30, 43] optimize neural networks by focusing on sparse point cloud feature extraction and upsampling module design.", + "bbox": [ + 496, + 631, + 892, + 781 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, while these methods have achieved improved results, predicting dense point cloud coordinates via sparse point cloud features is an indirect approximating approach. Typically, these methods first utilize an encoder to extract sparse point cloud features, and then use a carefully designed upsampling module to fit dense point cloud coordinates. This approach has three limitations. First, the non", + "bbox": [ + 496, + 782, + 893, + 888 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding Author. https://github.com/QWTforGithub/PUDM", + "bbox": [ + 101, + 883, + 470, + 897 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "20786", + "bbox": [ + 478, + 941, + 519, + 954 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "dominance of features causes the generated results to be more inclined toward input sparse point clouds, struggling to represent reasonable geometry details from the ground truth, as Fig 1 illustrated. Second, the additional upsampling module designs increase the workload for algorithm designers and often disrupt the intrinsic coordinate mappings in point clouds [30, 43, 44]. Third, they mostly require the joint supervision of the CD loss and other losses, resulting in them sensitive to noise [13, 39].", + "bbox": [ + 75, + 90, + 468, + 226 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we consider the point cloud upsampling task as a conditional generation problem. This first explores the incorporation of probabilistic models for point cloud upsampling. We propose a novel point cloud upsampling network, called PUDM, which is formally based on a conditional DDPM. Unlike previous methods, PUDM models the gradient of data distribution from dense point clouds (i.e., the ground truth), directly utilizing the dominant features to fit the ground truth, and decoupling the dependency on CD loss. Moreover, the auto-regressive nature of DDPM enables PUDM to efficiently avoid the additional upsampling module design, ensuring intrinsic point-wise mapping relationships in point clouds.", + "bbox": [ + 75, + 227, + 470, + 422 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Simultaneously, to improve the ability of perceiving point features, PUDM employs a dual mapping paradigm. This naturally establishes a dual mapping relationship: between the generated sparse point cloud and the sparse point cloud, and between the dense point cloud and the noise. In this context, PUDM has the ability to learn complex geometric structures from the ground truth, generating uniform surfaces aligned with the ground truth, as Fig 1.", + "bbox": [ + 75, + 422, + 468, + 542 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Furthermore, we found that DDPM only models fixed-scale point cloud objects during training. To overcome this, we consider parameterizing a rate factor to exploit the prior knowledge of the scale between sparse point clouds and dense point clouds. In this way, PUDM enables to generate high-fidelity arbitrary-scale point clouds during inference.", + "bbox": [ + 75, + 544, + 468, + 633 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In additional, benefiting from the inherent denoising architecture and the non-dependency for CD loss, PUDM demonstrates a remarkable degree of robustness in noise experiments.", + "bbox": [ + 75, + 633, + 468, + 694 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our key contributions can be summarized as:", + "bbox": [ + 94, + 695, + 395, + 709 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We systematically analyze and recognize conditional DDPM as a favorable model for generating uniform point clouds at arbitrary scales in point cloud upsampling tasks.", + "- We propose a novel network with a dual mapping for point cloud upsampling, named PUDM, which is based on conditional DDPM.", + "- By exploiting the rate prior, PUDM exhibits the ability of generating high-fidelity point clouds across arbitrary scales during inference.", + "- Comprehensive experiments demonstrate the outstanding capability of PUDM in generating geometric details in public benchmarks of point cloud upsampling." + ], + "bbox": [ + 76, + 710, + 468, + 892 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 500, + 89, + 650, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Learnable Point Cloud Upsampling. The integration of deep learning with formidable data-driven and trainable attributes has markedly accelerated progress within the 3D field. Thanks to the powerful representation capabilities of deep neural networks, directly learning features from 3D data has become achievable, such as PointNet [28], PointNet++ [29], DGCNN [27], MinkowskiEngine [2], and KPConv [38]. Benefiting from the above, PU-Net [44] stands as the pioneer in integrating deep neural networks into point cloud upsampling tasks. This first aggregates multi-scale features for each point through multiple MLPs, and then expands them into a point cloud upsampling set via a channel shuffle layer. Following this pattern, some methods have achieved more significant results, such as MPU [43], PU-GAN [17], Dis-PU [18], and PU-GCN [30]. PU-EVA [21] is the first to achieve the arbitrary-scale point clouds upsampling via edge-vector based affine combinations in one-time training. Subsequently, PUGeo [32] and NePs [7] believe that sampling points within a 2D continuous space can generate higher-quality results. Furthermore, Grad-PU [9] transforms the point cloud upsampling task into a coordinate approximation problem, avoiding the upsampling module design.", + "bbox": [ + 496, + 114, + 890, + 460 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Most methods predict the dense point cloud coordinates via sparse point cloud features, and extend the point set relying on an upsampling module. This causes them to struggle to learn complex geometry details from the ground truth. Moreover, they frequently exhibit a susceptibility to noise due to depending on CD loss during training. In this paper, we consider transforming the point cloud upsampling task into a point cloud generation problem, and first utilize conditional DDPM to address the aforementioned issues.", + "bbox": [ + 496, + 462, + 890, + 597 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "DDPM for Point Cloud Generation. Inspired by the success in image generation tasks [33-35], there has been greater attention on directly generating point clouds through DDPM. [22] represents the pioneering effort in applying DDPM to unconditional point cloud generation. Subsequently, [50] extends the application of DDPM to the point cloud completion task by training a point-voxel CNN [20]. However, the voxelization process introduces additional computational complexity. Furthermore, PDR [23] takes raw point clouds as input. But this requires training the two stages (coarse-to-fine) of diffusion models, resulting in a greater time overhead.", + "bbox": [ + 496, + 598, + 890, + 777 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we explore to the application of conditional DDPM to handle the point cloud upsampling task. Unlike the point cloud generation and completion task, point cloud upsampling exhibits the difference of the point cloud scale between training and inference. We overcome this issue by exploiting a rate prior. Meanwhile, our method based on a dual mapping paradigm enables to efficiently learn complex geometric details in a single-stage training.", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "20787", + "bbox": [ + 478, + 941, + 519, + 954 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Denoising Diffusion Probabilistic Models", + "text_level": 1, + "bbox": [ + 76, + 89, + 442, + 106 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Background for DDPM", + "text_level": 1, + "bbox": [ + 76, + 114, + 294, + 128 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The forward and reverse process. Given the dense point cloud $\\pmb{x}$ sampled from a meaningful point distribution $P_{data}$ , and an implicit variable $\\pmb{z}$ sampled from a tractable noise distribution $P_{latent}$ , DDPM establishes the transformation relationship between $\\pmb{x}$ and $\\pmb{z}$ through two Markov chains. This conducts an auto-regressive process: a forward process $q$ that gradually adds noise to $\\pmb{x}$ until $\\pmb{x}$ degrades to $\\pmb{z}$ , and a reverse process $p_{\\theta}$ that slowly removes noise from $\\pmb{z}$ until $\\pmb{z}$ recovers to $\\pmb{x}$ . We constrain the transformation speed using a time step $t \\sim \\mathcal{U}(T)$ ( $T = 1000$ in this paper).", + "bbox": [ + 76, + 137, + 467, + 287 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Training objective under specific conditions. Given a set of conditions $C = \\{c_i | i = 1..S\\}$ , the training objective of DDPM under specific conditions is (please refer to the supplementary materials for the detailed derivation):", + "bbox": [ + 76, + 289, + 467, + 349 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nL (\\theta) = \\mathbb {E} _ {t \\sim U (T), \\epsilon \\sim \\mathcal {N} (0, I)} | | \\epsilon - \\epsilon_ {\\boldsymbol {\\theta}} (\\boldsymbol {x} _ {t}, C, t) | | ^ {2} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 117, + 375, + 468, + 393 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\pmb{x}_{t} = \\sqrt{1 - \\overline{\\alpha}_{t}}\\pmb {\\epsilon} + \\sqrt{\\overline{\\alpha}_{t}}\\pmb{x_{0}}$ [11].", + "bbox": [ + 76, + 402, + 334, + 417 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The gradient of data distribution. Furthermore, we use a stochastic differential equation (SDE) to describe the process of DDPM [37]:", + "bbox": [ + 76, + 419, + 467, + 463 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\ns _ {\\theta} \\left(\\boldsymbol {x} _ {t}, t\\right) = \\nabla_ {x} \\log \\left(\\boldsymbol {x} _ {t}\\right) = - \\frac {1}{\\sqrt {1 - \\bar {\\alpha} _ {t}}} \\boldsymbol {\\epsilon} _ {\\theta} \\left(\\boldsymbol {x} _ {t}, t\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 484, + 468, + 516 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The training objective of DDPM is essentially equivalent to computing the score (the gradient of data distribution), which differs only by a constant factor $-\\frac{1}{\\sqrt{1 - \\overline{\\alpha}_t}}$ .", + "bbox": [ + 76, + 523, + 467, + 571 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Analysis of DDPM for PCU", + "text_level": 1, + "bbox": [ + 76, + 579, + 323, + 594 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We pioneer the exploration of the advantages and limitations of DDPM for PCU, hoping these insights encourage more researchers to introduce probabilistic models into PCU.", + "bbox": [ + 76, + 602, + 467, + 660 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "DDPM is an effective model for PCU. As mentioned in Sec 3.1, the auto-regressive nature of DDPM allows it to directly learn geometry details of the ground truth using the dominant features, generating closer-to-truth, fine-grained results.", + "bbox": [ + 76, + 662, + 467, + 736 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Simultaneously, the reverse process of DDPM in PCU is:", + "bbox": [ + 96, + 738, + 467, + 753 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\theta} \\left(\\boldsymbol {x} _ {\\mathbf {0}: T}, \\boldsymbol {c}\\right) = p \\left(\\boldsymbol {x} _ {T}\\right) \\prod_ {t = 1} ^ {T} p _ {\\theta} \\left(\\boldsymbol {x} _ {t - 1} \\mid \\boldsymbol {x} _ {t}, \\boldsymbol {c}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 133, + 763, + 468, + 805 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $c$ means the sparse point cloud sampled from a data distribution $P_{c}$ . According to Eq 3, the condition $c$ participates in each step of the reverse process. In fact, this is usually achieved using an additional branch network interacting with the noise network, without intrinsically disrupting the auto-regressive process of DDPM, thus cleverly", + "bbox": [ + 76, + 810, + 467, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "avoiding to design an additional upsampling module. Moreover, the process naturally defines a one-to-one point-wise mapping relationship between the dense point cloud and the noise, preserving the order of points in the diffusion process.", + "bbox": [ + 498, + 90, + 890, + 165 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Furthermore, the efficient denoising architecture and the decoupling of CD loss significantly support the strong noise robustness of DDPM.", + "bbox": [ + 498, + 167, + 890, + 210 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The limitations of DDPM in PCU. While DDPM showcases some advantageous attributes within PCU, it also harbors certain potential limitations:", + "bbox": [ + 498, + 210, + 890, + 256 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Limitation 1: The lack of effective prior knowledge in the 3D field results in the weak feature perception capability for point cloud conditional networks [14, 31, 47], significantly affecting the final generation results (Tab 8). Although some methods [23] compensate for this problem via a two-stage (coarse-to-fine) training approach, they require a higher training cost.", + "- Limitation 2: The auto-regressive nature of DDPM provides robust modeling capabilities for fixed-scale objects during training, but it struggles to generate high-quality arbitrary-scale ones during inference (Tab 9). Some works treat different scale point cloud upsampling as multiple tasks [30, 43, 44], but it's not advisable for DDPM due to the excessively high training cost." + ], + "bbox": [ + 500, + 257, + 890, + 468 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4. Methodology", + "text_level": 1, + "bbox": [ + 500, + 482, + 633, + 500 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1. Dual mapping Formulation", + "text_level": 1, + "bbox": [ + 500, + 507, + 746, + 523 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For limitation 1, we adopt a dual mapping paradigm. We first provide a formal exposition of its conception, subsequently delineating the manner in which PUDM aligns with these principles, with a particular emphasis on its role.", + "bbox": [ + 498, + 529, + 890, + 589 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given two point sets of $\\pmb{x}^1 = \\{x_i^1 \\in \\mathbb{R}^3 | i = 1..M\\}$ and $\\pmb{x}^2 = \\{x_i^2 \\in \\mathbb{R}^3 | i = 1..N\\}$ from different data distributions, a network $f_x$ with a dual-branch architecture $(f_x = \\{f_1, f_2\\})$ , and the corresponding supervision signals for these branches $(l_x = \\{l_1, l_2\\})$ , if $f_x$ satisfies:", + "bbox": [ + 498, + 590, + 890, + 666 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {y} ^ {1} = f _ {1} \\left(\\boldsymbol {x} ^ {1}\\right), \\quad \\boldsymbol {y} ^ {2} = f _ {2} \\left(\\boldsymbol {x} ^ {2}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 593, + 679, + 890, + 698 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\pmb{y}^1 = \\{y_i^1\\in \\mathbb{R}^3|i = 1..M\\}$ , $\\pmb{y}^2 = \\{y_i^2\\in \\mathbb{R}^3|i = 1..N\\}$ . $f_{x}$ can be claimed as a dual mapping network. Eq 4 means that each element in the original input has one and only one corresponding element in the final output in each branch.", + "bbox": [ + 498, + 702, + 890, + 777 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In PUDM, we only require the conditional network to meet the above condition, because the noise network inherently builds a one-to-one point-wise mapping between the input and the output [23]. Specifically, we first force the output $\\pmb{c}^{\\prime} = \\{c_{i}^{\\prime}\\in \\mathbb{R}^{3}|i = 1..M\\}$ from the conditional network $f_{\\psi}$ to approximate the sparse point cloud $\\pmb {c} = \\{c_i\\in \\mathbb{R}^3 |i = 1..M\\}$ coordinates via MLPs, and then optimize the process by the mean squared error loss:", + "bbox": [ + 498, + 779, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "20788", + "bbox": [ + 478, + 941, + 517, + 953 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nL (\\psi) = \\mathbb {E} _ {\\boldsymbol {c} \\sim P _ {c}} \\| \\boldsymbol {c} - \\boldsymbol {c} ^ {\\prime} \\| ^ {2} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 104, + 468, + 122 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Formally, this establishes a one-to-one point-wise mapping between the input and the output for the conditional network, $\\pmb{c}^{\\prime} = f_{\\psi}(\\pmb {c}) = \\mathcal{D}_{c}(\\mathcal{E}_{c}(\\pmb {c},\\mathcal{T}\\mathcal{M}(\\mathcal{E}_{n}(\\pmb{x}_{t},r,t))))$ , as shown in Fig 2. $\\mathcal{T}\\mathcal{M}(\\cdot)$ denotes the Transfer Module defined in Sec 4.3.", + "bbox": [ + 76, + 128, + 468, + 203 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For point cloud tasks with unordered structures, this pattern effectively enhances network capability in capturing point features by preserving the ordered relationships between input and output points [3, 12]. Moreover, corresponding supervision signals ensure adequate training for each branch network (Fig 7), providing an effective strategy to address the challenge of lacking robust 3D pre-trained models for conditional branch networks in point cloud generation tasks.", + "bbox": [ + 75, + 204, + 470, + 339 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Rate Modeling", + "text_level": 1, + "bbox": [ + 76, + 349, + 227, + 366 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For limitation 2, drawing inspiration from the practice of adding class labels in conditional probabilistic models [6, 10, 26], we propose a simple and effective approach to achieve high-quality arbitrary-scale sampling during inference. Specifically, we first add a rate label $r$ to each sample pair, $(c, x) \\to (c, x, r)$ (the supplementary materials provide ablation studies for different forms of the rate label $r$ ). Subsequently, we parameter the rate factor using an embedding layer. In this way, the reverse process of DDPM is:", + "bbox": [ + 75, + 372, + 468, + 508 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\theta} \\left(\\boldsymbol {x} _ {\\mathbf {0}: T}, \\boldsymbol {c}, r\\right) = p \\left(\\boldsymbol {x} _ {T}\\right) \\prod_ {t = 1} ^ {T} p _ {\\theta} \\left(\\boldsymbol {x} _ {t - 1} \\mid \\boldsymbol {x} _ {t}, \\boldsymbol {c}, r\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 117, + 532, + 468, + 574 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Eq 6 demonstrates that this simply adds an additional condition to DDPM, the rate prior $r$ , without increasing the number of samples. Unlike class labels, we found in experiments that this conditional prior we exploited can significantly improve the generation quality of unseen-scale point clouds. The reason is that generating unseen-scale and seen-category objects usually are easier compared to generating seen-scale and unseen-category ones for models.", + "bbox": [ + 75, + 584, + 468, + 705 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.3. Network Architecture", + "text_level": 1, + "bbox": [ + 76, + 714, + 282, + 729 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we introduce the overall framework of PUDM, consisting of three crucial components: the conditional network (C-Net), the noise network (N-Net), and the Transfer Module (TM). This process is remarkably illustrated in Fig 2. The parameter setting and implementation details are provided in the supplementary materials.", + "bbox": [ + 75, + 737, + 468, + 827 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The Conditional Network (C-Net). We use PointNet++ [29] as the backbone. This follows the standard U-Net framework. The encoder and decoder are composed of multiple Set Abstraction (SA) layers and Feature Propagation", + "bbox": [ + 76, + 828, + 468, + 888 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/48c53407bff01a253261e6c0afaccb535c5cf92c36b66d5ac18fa84b9c22d0af.jpg", + "image_caption": [ + "Figure 2. The overall framework of PUDM: The N-Net (upper branch) and the C-Net (lower branch) both establish a one-to-one point-wise mapping between input and output using mean squared error loss. They engage in information exchange through a transfer module (TM). Simultaneously, the rate prompt is provided to exploit the prior knowledge of the scale between sparse point clouds and dense point clouds." + ], + "image_footnote": [], + "bbox": [ + 504, + 88, + 893, + 261 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "(FP) layers, respectively. Unlike PointNet++ using the max-pooling layer to filter features, we consider utilizing the self-attention layer to retain more fine-grained information [25, 48]. In addition, we only feed the sparse point cloud into the C-Net to ensure the feature extraction in a pure and effective manner.", + "bbox": [ + 496, + 393, + 890, + 482 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The Noise Network (N-Net). The N-Net and the C-Net share the same network architecture. In contrast to the C-Net, we need to introduce additional guidance information to the N-Net for modeling the diffusion steps.", + "bbox": [ + 496, + 483, + 890, + 544 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We first transform the sparse point cloud $\\mathbf{c} \\in \\mathbb{R}^{N \\times 3}$ into the interpolation point cloud $\\mathbf{i} \\in \\mathbb{R}^{rN \\times 3}$ through the midpoint interpolation [9], and then sum $\\mathbf{i}$ and $\\mathbf{x}_t$ as the input for the N-Net. Meanwhile, we extract the global features from $\\mathbf{i}$ to enhance the semantic understanding. Furthermore, to identify the noise level, we encode the time step $t$ . Finally, as mentioned in Sec 4.2, we parameterized the rate factor $r$ . These additional pieces of information are both treated as global features, and incorporated into each stage of the encoder and the decoder in the N-Net.", + "bbox": [ + 496, + 542, + 892, + 694 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The Transfer Module (TM). We propose a bidirectional interaction module (TM) to serve as an intermediary between the C-Net and the N-Net. We only place the TM at the bottleneck stage of U-Net, due to the significant computational efficiency and the abundant semantic information via the maximum receptive field [12, 15].", + "bbox": [ + 496, + 695, + 890, + 786 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given the outputs of the encoder in the C-Net and the N-Net, $F^{c} \\in \\mathbb{R}^{N_{e}^{c} \\times C_{e}^{c}}$ , $F^{n} \\in \\mathbb{R}^{N_{e}^{n} \\times C_{e}^{n}}$ separately, the TM first transforms $F^{c} \\to (Q) \\in \\mathbb{R}^{N_{e}^{c} \\times C_{i}}$ and $F^{n} \\to (K, V) \\in \\mathbb{R}^{N_{e}^{n} \\times C_{i}}$ via MLPs. Next, we can obtain the fused feature:", + "bbox": [ + 496, + 786, + 890, + 845 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nF _ {f} = M L P \\left(\\operatorname {s o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {C _ {i}}}\\right) V\\right) + F ^ {c} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 558, + 856, + 890, + 891 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "20789", + "bbox": [ + 478, + 941, + 517, + 953 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Subsequently, $F_{f}$ is fed into a feed-forward network (FFN) to output the final features. Similarly, the same operation is also applied in reverse direction, so that information flows in both directions, $F^{c} \\rightarrow F^{n}$ and $F^{n} \\rightarrow F^{c}$ .", + "bbox": [ + 76, + 90, + 468, + 151 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.4. Training and Inference", + "text_level": 1, + "bbox": [ + 76, + 161, + 290, + 176 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training. As mentioned earlier (Eq 1 and Eq 5), PUDM is a dual mapping network, and models the rate prior during training. Therefore, the training objective is:", + "bbox": [ + 76, + 184, + 468, + 229 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {m s e} = L (\\theta) + \\alpha L (\\psi) \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 244, + 468, + 260 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\alpha$ means a weighting factor ( $\\alpha = 1$ in this paper).", + "bbox": [ + 76, + 267, + 447, + 281 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Inference. We found that adding the interpolated points $\\mathbf{i}$ as the guidance information significantly improves the generated quality during inference. Therefore, we iteratively transform $\\boldsymbol{x}_t$ into $\\boldsymbol{x}_0$ based on:", + "bbox": [ + 76, + 282, + 468, + 342 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} _ {t - 1} = \\gamma \\left(\\frac {1}{\\sqrt {\\alpha_ {t}}} \\left(\\boldsymbol {x} _ {t} - \\frac {1 - a _ {t}}{\\sqrt {1 - \\bar {\\alpha} _ {t}}} \\epsilon_ {\\theta} \\left(\\boldsymbol {x} _ {t}, \\boldsymbol {c}, r, t\\right)\\right) + \\boldsymbol {\\sigma} _ {t} \\boldsymbol {\\epsilon} + \\mathbf {i}\\right) \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 78, + 352, + 468, + 397 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\gamma$ denotes a scale factor ( $\\gamma = 0.5$ in this paper).", + "bbox": [ + 76, + 398, + 434, + 414 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 426, + 209, + 444 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Experiment Setup", + "text_level": 1, + "bbox": [ + 76, + 450, + 253, + 468 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Dataset. In our experiments, we utilize two public benchmarks (PUGAN [17], PU1K [30]) for evaluation. We adhere to the official training/testing partitioning protocols for these datasets. This uses Poisson disk sampling [45] to generate 24,000 and 69,000 uniform patches for training, respectively. Each patch contains 256 points, while the corresponding ground truth has 1024 points. Meanwhile, 27 and 127 point clouds are used for testing, respectively. The input sparse point clouds consist of 2048 points, and are upsampled to $2048 \\times R$ points via evaluated methods.", + "bbox": [ + 75, + 474, + 468, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Metrics. Following [9, 43, 44], we employ the Chamfer Distance $(\\mathrm{CD} \\times 10^{-3})$ , Hausdorff Distance $(\\mathrm{HD} \\times 10^{-3})$ , and Point-to-Surface Distance $(\\mathrm{P2F} \\times 10^{-3})$ as evaluation metrics in our experiments.", + "bbox": [ + 76, + 626, + 468, + 686 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.2. Comparison with SOTA", + "text_level": 1, + "bbox": [ + 76, + 695, + 297, + 710 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Results on PUGAN. We first conduct the point cloud upsampling at low upsampling rate $(4\\times)$ and high upsampling rate $(16\\times)$ on PUGAN. Tab 1 illustrates the substantial superiority of our method in geometric detail description compared to other methods, as evidenced by significantly reduced CD and HD. Because our method models the gradient of data distribution from dense point clouds, facilitating the direct approximation of geometric details from the ground truth, thereby yielding higher accuracy of our results. Fig 3 further substantiates our viewpoint, and shows that our method produces fewer outliers, aligning with more uniform surfaces, closer to the ground truth.", + "bbox": [ + 75, + 719, + 468, + 898 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In addition, despite P2F falling slightly behind Grad-PU [9] at $4\\times$ , the difference is insignificant due to the asymmetry between points and surfaces [9, 17].", + "bbox": [ + 500, + 90, + 890, + 137 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/de92d1212254e59d21be41e7f71f0b6532b51777d0ab7f3395ccd533da809c4b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Methods16×
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]0.5296.8054.4600.5108.2066.041
MPU [43]0.2926.6722.8220.2197.0543.085
PU-GAN [30]0.2825.5772.0160.2076.9632.556
Dis-PU [18]0.2743.6961.9430.1674.9232.261
PU-EVA [21]0.2773.9712.5240.1855.2732.972
PU-GCN [30]0.2683.2012.4890.1614.2832.632
NePS [7]0.2593.6481.9350.1524.9102.198
Grad-PU [9]0.2452.3691.8930.1082.3522.127
Ours0.1311.2201.9120.0821.1202.114
", + "bbox": [ + 503, + 151, + 890, + 267 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Arbitrary Upsampling Rates on PUGAN. Similarly to [9], we perform comparative analyses across different rates on PUGAN. Tab 2 shows that our method steadily outperforms Grad-PU [9] across nearly all metrics. In particular, our method demonstrates a significant performance advantage in terms of CD and HD, further affirming the superiority in learning complex geometric details.", + "bbox": [ + 498, + 314, + 890, + 419 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Moreover, we visualize the results at higher upsampling rates $(16\\times, 32\\times, 64\\times,$ and $128\\times)$ in Fig 4. Our results obviously exhibit more complete, uniform, and smooth compared to Grad-PU [9].", + "bbox": [ + 498, + 420, + 890, + 479 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/d61c465670b6f38236a03243ed057ba6ad3daed68d7bfda5fd5cbfcac427f697.jpg", + "table_caption": [ + "Table 1. The results of $4 \\times$ and $16 \\times$ on PUGAN. Our method significantly surpasses other methods in terms of CD and HD." + ], + "table_footnote": [], + "table_body": "
RatesGrad-PU [9]Ours
CD↓HD↓P2F↓CD↓HD↓P2F↓
0.5403.1771.7750.2471.4101.812
0.3532.6081.6540.1711.2921.785
0.2342.5491.8360.1161.2441.794
0.2252.5261.9810.1071.2351.980
0.2192.6341.9400.1061.2311.952
", + "bbox": [ + 503, + 494, + 875, + 574 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/79174611f9e2bf16bd433f503c135cddd0c2bd14f576f6227f90dedaa5f2effa.jpg", + "table_caption": [ + "Table 2. Grad-PU vs. ours at different rates on PUGAN. Benefiting from the rate modeling, our method still exhibits remarkable performance at different rates." + ], + "table_footnote": [], + "table_body": "
MethodsCD↓HD↓P2F↓
PU-Net [44]1.15515.1704.834
MPU [43]0.93513.3273.511
PU-GCN [30]0.5857.5772.499
Grad-PU [9]0.4043.7321.474
Ours0.2172.1641.477
", + "bbox": [ + 503, + 648, + 890, + 715 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 3. The results of $4 \\times$ on PU1K. We utilize the experimental results from the original paper. Our method outperforms other methods across nearly all metrics.", + "bbox": [ + 500, + 719, + 890, + 760 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Results on PU1K. Furthermore, we also conduct the evaluation at $4 \\times$ on more challenging PU1K [30]. As reported in Tab 3, our method continues to demonstrate substantial advantages in terms of CD and HD compared to other methods.", + "bbox": [ + 498, + 777, + 890, + 852 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Result on Real datasets. Additionally, we conduct the evaluation on real indoor (ScanNet [5]) and outdoor (KITTI [8]) scene datasets. Note that all methods are only trained", + "bbox": [ + 498, + 854, + 890, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "20790", + "bbox": [ + 478, + 941, + 519, + 953 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/90fff76d4415f2ac586b9b84ce1da06869b125f6214f172f5c983bf01a912df8.jpg", + "image_caption": [ + "Figure 3. Visualization results at $4\\times$ on PUGAN. Our result exhibits fewer outliers, and clearly captures geometric details from the ground truth (the holes on the casting)." + ], + "image_footnote": [], + "bbox": [ + 81, + 87, + 893, + 250 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/064b9ae13cac1aa8005976ecf0ef5d7802f449b4a9f26c572dc317b6f425c604.jpg", + "image_caption": [ + "Figure 4. Grad-PU vs. ours at large rates on PUGAN. Our method consistently generates more uniform and smooth surfaces (these results are achieved using an NVIDIA 3090 GPU)." + ], + "image_footnote": [], + "bbox": [ + 86, + 310, + 460, + 628 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "on PUGAN. Upsampling scene-level point clouds poses greater challenges than upsampling object-level ones, due to the former having more intricate geometric structures. Due to the absence of the ground truth, our analysis is confined to qualitative comparisons. In Fig 5, our method still generates reasonable and smooth surfaces on some complex structures, while other methods exhibit artifacts such as overlap and voids. Simultaneously, Fig 6 illustrates that our results show more complete and fewer outliers. Although Grad-PU [9] also demonstrates good outlier results, it generates a considerable amount of uneven surfaces.", + "bbox": [ + 75, + 705, + 472, + 875 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3. Validation for Noise Robustness", + "text_level": 1, + "bbox": [ + 500, + 313, + 779, + 329 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Gaussian Noise. To demonstrate the robustness, we perturb the sparse point clouds with Gaussian noise sampled $\\mathcal{N}(0,I)$ added at different noise levels $\\tau$ .", + "bbox": [ + 498, + 337, + 890, + 382 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Tab 4, our method significantly outperforms other methods under multiple level noise perturbations ( $\\tau = 0.01$ , $\\tau = 0.02$ ). Specifically, this is because our method models the noise $\\epsilon$ (the gradient of data distribution) and avoids CD loss during training.", + "bbox": [ + 498, + 383, + 892, + 458 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/b3fbec42fb03ebbc0187bd4e91a55f5b844a62f9faa499db968e8c50e019b1b2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Noise Levels Methodsτ = 0.01τ = 0.02
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]0.6288.0689.8161.07810.86716.401
MPU [43]0.5066.9789.0590.92910.82015.621
PU-GAN [30]0.4646.0707.4980.88710.60215.088
Dis-PU [18]0.4195.4136.7230.8189.34514.376
PU-EVA [21]0.4595.3777.1890.8399.32514.652
PU-GCN [30]0.4485.5866.9890.8168.60413.798
NePS [7]0.4255.4386.5460.7989.10212.088
Grad-PU [9]0.4144.1456.4000.7667.33611.534
Ours0.2102.4306.0700.5295.4719.742
", + "bbox": [ + 503, + 470, + 890, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 4. The results of $4 \\times$ at low-level Gaussian noise on PUGAN. Our method significantly outperforms other methods in terms of noise robustness.", + "bbox": [ + 498, + 585, + 890, + 627 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Moreover, we also conduct the evaluation under more challenging noise perturbations. Tab 5 shows that our method exhibits stronger robustness results at higher level noise perturbations ( $\\tau = 0.05$ and $\\tau = 0.1$ ). This indicates that our method exhibits a trend of resilience for the noise robustness.", + "bbox": [ + 496, + 643, + 890, + 733 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Other Noise. Furthermore, we also investigated the performance of our method on uniform noise. Admittedly, while our method still keeps SOTA performance, as shown in Tab 6, the results on uniform noise show significantly lower than that on Gaussian noise.", + "bbox": [ + 496, + 734, + 890, + 808 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We provide an intuitive explanation. Eq 2 demonstrates that the training objective of DDPM is to fit the gradient of data distribution (modeling the noise $\\epsilon$ , named score) [37]. Essentially, DDPM learns the direction of noise generation. When the conditions with noise are considered, the disturbance in the direction exhibits relatively small, because the", + "bbox": [ + 496, + 810, + 892, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "20791", + "bbox": [ + 478, + 941, + 517, + 954 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1f2b71bfcd4250bb6bd32e18e2c92a351ec9a9ecc902be7c53fa8f8e0a7ddcd2.jpg", + "image_caption": [ + "Figure 5. The results of $4 \\times$ on KITTI. Our method noticeably generates more reasonable and uniform results on some complex geometric structures." + ], + "image_footnote": [], + "bbox": [ + 78, + 89, + 893, + 200 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/8ea52825783d60da96befc77db6ca76bb7be6a86bbd73a6e6ae667ca59a94cb7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Noise Levels\nMethodsτ = 0.05τ = 0.1
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]1.37013.72923.2491.49814.19323.846
MPU [43]1.24711.64522.1891.32112.41523.841
PU-GAN [30]1.1249.09121.2521.27110.91123.174
Dis-PU [18]1.0767.92120.6031.24410.91322.845
PU-EVA [21]1.0577.91020.0441.2269.30522.296
PU-GCN [30]1.2639.86922.8351.45611.06325.213
NePS [7]1.1439.64518.6421.1989.87420.162
Grad-PU [9]0.9788.05716.9271.1188.94618.845
Ours0.6185.38614.7510.8536.23916.845
", + "bbox": [ + 81, + 263, + 467, + 375 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "noise has a similar distribution to $\\epsilon$ . Therefore, during inference, our method demonstrates robustness to approximating noise distributions of $\\epsilon$ (Gaussian noise), but performs poorly when faced with different ones (the supplementary materials provide more noise experiments to support this conclusion).", + "bbox": [ + 75, + 445, + 468, + 536 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/da6b882a0f03bb7b860f54b788d5ec60371afdf8d9fb7c960a6ca9990f7040d0.jpg", + "table_caption": [ + "Table 5. The results of $4 \\times$ at high-level Gaussian noise on PUGAN. Compared to other methods, our method demonstrates a more favorable upward trend for robustness to noise." + ], + "table_footnote": [], + "table_body": "
Noise Levels Methodsτ = 0.05τ = 0.1
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]1.49014.47323.2231.72515.44225.251
MPU [43]1.22410.84220.4561.54511.64523.512
PU-GAN [30]1.0347.75718.6171.3279.70021.321
Dis-PU [18]1.0066.85617.8731.3147.46320.980
PU-EVA [21]1.0247.53418.1791.3348.05621.158
PU-GCN [30]1.0459.64318.8991.32510.87721.633
NePS [7]1.0487.34518.0541.3219.64521.314
Grad-PU [9]1.0676.63417.7341.3997.21521.028
Ours0.9986.11017.5581.3106.73220.564
", + "bbox": [ + 81, + 549, + 467, + 660 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.4. Effectiveness in Downstream Task", + "text_level": 1, + "bbox": [ + 76, + 718, + 375, + 734 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We evaluate the effectiveness of upsampling quality in the downstream task: point cloud classification. Meanwhile, we also conducted experiments on point cloud part segmentation, please refer to the supplementary materials.", + "bbox": [ + 75, + 741, + 467, + 801 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "PointNet [28] and PointNet++ [29] are chosen as the downstream task models due to their significant performance and widespread influence in 3D tasks. We follow the official training and testing procedures. Simultaneously, we select ModelNet40 [40] (40 categories) and ShapeNet [1] (16 categories) as the benchmarks for point cloud clas", + "bbox": [ + 75, + 801, + 467, + 893 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "sification. For a fair and effective evaluation, we use only 3D coordinates as the input. Similar to the evaluated strategy on real datasets, all point cloud upsampling methods are only trained on PUGAN.", + "bbox": [ + 496, + 263, + 890, + 324 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For evaluation, we first subsample 256/512 points from test point clouds on ModelNet40/ShapeNet. Subsequently, they are upsampled to 1024/2048 points through evaluation methods. As depicted in Tab 7, our results significantly improve the classification accuracy compared to the low-res point clouds, and consistently outperforms other methods across all metrics.", + "bbox": [ + 496, + 324, + 890, + 429 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ace120a5cd65541b12e705926704ecd157745c7a2dec3a9cbf0f1e5133d6c6c5.jpg", + "table_caption": [ + "Table 6. The results of $4 \\times$ at high-level uniform noise on PUGAN. Our method outperforms other methods on all metrics." + ], + "table_footnote": [], + "table_body": "
DatasetsModelNet40 (%)ShapeNet (%)
ModelsPointNetPointNet++PointNetPointNet++
MethodsIA↑CA↑IA↑CA↑IA↑CA↑IA↑CA↑
Low-res87.1583.1288.8784.4597.6195.0998.2096.11
High-res90.7487.1492.2489.9198.8996.6199.2798.18
PU-Net [44]88.7285.2588.9985.4397.9995.6998.5796.35
MPU [43]89.0485.8489.5486.5198.0395.9298.9496.81
PU-GAN [30]89.9585.6890.4587.2398.7595.7090.4587.23
Dis-PU [18]88.7085.3489.5686.5398.8096.0799.0097.15
PU-EVA [21]89.2785.6389.9686.8698.7295.6999.0797.58
PU-GCN [30]89.7785.3889.4586.1598.7896.0699.0397.42
NePS [7]90.0186.1590.3287.3498.9496.2099.1297.94
Grad-PU [9]90.0586.0689.9887.4998.8296.1999.1097.63
Ours90.3386.5492.1489.4298.8596.5899.1397.99
", + "bbox": [ + 503, + 441, + 890, + 569 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 7. The results of point cloud classification. \"Low-res\" refers to the point cloud subsampled, while \"High-res\" denotes the original test point cloud. Meanwhile, \"IA\" stands for instance accuracy, and \"CA\" denotes class accuracy. Our results have more reasonable, finer-grained, and closer-to-ground truth geometric structures, thereby achieving more significant classification accuracy.", + "bbox": [ + 496, + 571, + 890, + 656 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.5. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 685, + 653, + 702 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "With/Without the dual mapping paradigm. Thanks to the rich and structured data, the conditional networks for text or images can be replaced by powerful pre-trained models [34-36, 41]. However, robust pre-trained backbones are lacking in the 3D field due to scarce data and challenging feature extraction [14, 31, 47]. In this paper, we employ the dual mapping paradigm to augment the capability of perceiving point features for PUDM, ensuring the comprehensive training of the C-Net. To validate this point, we remove the supervision signal from the C-Net to disrupt this pattern. Meanwhile, we also validate the importance of the C-Net by retaining only the N-Net in PUDM.", + "bbox": [ + 496, + 708, + 890, + 890 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "20792", + "bbox": [ + 478, + 941, + 519, + 953 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/493648e1692ba9e130de1a46f1159344609e40badeb199ee00599ab89b217701.jpg", + "image_caption": [ + "Figure 6. The results of $4 \\times$ on ScanNet. Our results exhibit reduced instances of outliers, concurrently generating more uniform and complete surfaces." + ], + "image_footnote": [], + "bbox": [ + 78, + 89, + 893, + 195 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As reported in Tab 8, disrupting the dual mapping pattern leads to a significant decrease in performance due to the weakened point feature perception ability of the C-Net. Fig 7 visualizes the results of the C-Net generating input sparse points using the dual mapping paradigm.", + "bbox": [ + 75, + 258, + 468, + 335 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Meanwhile, although removing the C-Net can maintain a single mapping pattern, as demonstrated in prior research [21, 30, 44], sparse point cloud feature extraction plays a pivotal role in PCU.", + "bbox": [ + 75, + 335, + 468, + 395 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/0189e6ae053adc712c8ea52923813d633a74a1cac89c3054bc0b49bdce9b3ab9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsCD↓HD↓P2F↓
Without the C-Net0.2122.0152.284
Without the dual mapping0.1681.4982.013
With the dual mapping0.1311.2201.912
", + "bbox": [ + 81, + 407, + 467, + 457 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0694b37cefe58ad43c6b8079627825b457931431fe8e7c100b2f1bde44e660e3.jpg", + "image_caption": [ + "Figure 7. Visualization results of the C-Net generating sparse point clouds on PUGAN. This demonstrates that the C-Net has been effectively trained." + ], + "image_footnote": [], + "bbox": [ + 81, + 513, + 465, + 683 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "With/Without the rate prior. As mentioned in Sec 4.2, we introduce the rate prior into PUDM during training to achieve high-quality generation of point clouds during inference. Tab 9 demonstrates the effectiveness of this approach. Without the rate prior, the overall performance notably decreases, and exhibits significant fluctuations (performing better at $4\\times$ , but worse at other rates).", + "bbox": [ + 75, + 750, + 468, + 853 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Single/Multiple Transfer Module. In this paper, we employ a TM positioned at the bottleneck stage of the U-Net, as its maximum receptive field provides ample con", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/7ac0188c410e2749301becf02d8846423e4b249120431eebd8853da8a073ba5a.jpg", + "table_caption": [ + "Table 8. Ablation study of the dual mapping paradigm. The dual mapping pattern evidently achieves the best performance." + ], + "table_footnote": [], + "table_body": "
RatesWithout the rate modelingWith the rate modeling
CD↓HD↓P2F↓CD↓HD↓P2F↓
0.2951.8162.0140.2471.4101.812
0.2241.5441.9750.1711.2921.785
0.1581.5121.8150.1311.2201.912
0.1661.5481.9440.1161.2441.794
0.1511.5281.9560.1071.2351.980
0.1441.4251.9880.1061.2311.952
0.1391.3991.9210.1041.2151.875
", + "bbox": [ + 503, + 258, + 890, + 358 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "textual information [12, 15]. Meanwhile, we also attempt to place multiple TMs at each stage in U-Net to enable the interaction of multi-scale information [23]. Tab 10 shows that although multiple TMs lead to a slight improvement in terms of CD loss, it is not cost-effective due to the significant increase in computational cost.", + "bbox": [ + 498, + 415, + 890, + 506 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/35d5af10d25e101337c3989a41488c0d1391d6d7a02283f8ef485caa726961aa.jpg", + "table_caption": [ + "Table 9. Ablation study of the rate prior. Utilizing the rate prior significantly enhances the quality of arbitrary-scale sampling." + ], + "table_footnote": [], + "table_body": "
MethodsCD↓HD↓P2F↓Params↓
Multiple TMs0.1291.2351.95328.65M
Single TM0.1311.2201.91216.03M
", + "bbox": [ + 501, + 520, + 890, + 556 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 10. Ablation study of the Transfer Module. Using the single TM strikes a balance between performance and efficiency.", + "bbox": [ + 500, + 558, + 890, + 585 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 617, + 617, + 633 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we systematically analyze and identify the potential of DDPM as a promising model for PCU. Meanwhile, we propose PUDM based on conditional DDPM. PUDM enables to directly utilize the dominant features to generate geometric details approximating the ground truth. Additionally, we analyze the limitations of applying DDPM to PCU (the absence of efficient prior knowledge for the conditional network and the fixed-scale object modeling), and propose corresponding solutions (a dual mapping paradigm and the rate modeling). Moreover, we offer a straightforward explanation regarding the robustness to noise for PUDM observed in experiments.", + "bbox": [ + 496, + 643, + 890, + 823 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments. This work was supported in part by the Jiangsu Geological Bureau ResearchProject under Grant 2023KY11, in part by the National Natural Science Foundation of China under Grant 61871226, and in part by the National Key R&D Program of China (NO.2022ZD0160101).", + "bbox": [ + 496, + 824, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "20793", + "bbox": [ + 478, + 941, + 517, + 954 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015. 7", + "[2] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3075-3084, 2019. 2", + "[3] Christopher Choy, Jaesik Park, and Vladlen Koltun. Fully convolutional geometric features. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8958-8966, 2019. 4", + "[4] Yaodong Cui, Ren Chen, Wenbo Chu, Long Chen, Daxin Tian, Ying Li, and Dongpu Cao. Deep learning for image and point cloud fusion in autonomous driving: A review. pages 722-739. IEEE, 2021. 1", + "[5] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5828-5839, 2017. 1, 5", + "[6] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 4", + "[7] Wanquan Feng, Jin Li, Hongrui Cai, Xiaonan Luo, and Juyong Zhang. Neural points: Point cloud representation with neural fields for arbitrary upsampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18633-18642, 2022. 2, 5, 6, 7", + "[8] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. pages 1231-1237. Sage Publications Sage UK: London, England, 2013. 1, 5", + "[9] Yun He, Danhang Tang, Yinda Zhang, Xiangyang Xue, and Yanwei Fu. Grad-pu: Arbitrary-scale point cloud upsampling via gradient descent with learned distance functions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5354-5363, 2023. 2, 4, 5, 6, 7", + "[10] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 4", + "[11] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 3", + "[12] Shengyu Huang, Zan Gojcic, Mikhail Usvyatsov, Andreas Wieser, and Konrad Schindler. Predator: Registration of 3d point clouds with low overlap. In Proceedings of the IEEE/CVF Conference on computer vision and pattern recognition, pages 4267-4276, 2021. 4, 8", + "[13] Sheng Yu Huang, Hao-Yu Hsu, and Frank Wang. Spovt: Semantic-prototype variational transformer for dense point cloud semantic completion. Advances in Neural Information Processing Systems, 35:33934–33946, 2022. 2" + ], + "bbox": [ + 78, + 114, + 470, + 891 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Xiaoshui Huang, Sheng Li, Wentao Qu, Tong He, Yifan Zuo, and Wanli Ouyang. Frozen clip model is efficient point cloud backbone. arXiv preprint arXiv:2212.04098, 2022. 3, 7", + "[15] Xiaoshui Huang, Wentao Qu, Yifan Zuo, Yuming Fang, and Xiaowei Zhao. Imfnet: Interpretable multimodal fusion for point cloud registration. IEEE Robotics and Automation Letters, 7(4):12323-12330, 2022. 4, 8", + "[16] Jiaxin Li and Gim Hee Lee. Deepi2p: Image-to-point cloud registration via deep classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15960-15969, 2021. 1", + "[17] Ruihui Li, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. Pu-gan: a point cloud upsampling adversarial network. In Proceedings of the IEEE/CVF international conference on computer vision, pages 7203–7212, 2019. 1, 2, 5", + "[18] Ruihui Li, Xianzhi Li, Pheng-Ann Heng, and Chi-Wing Fu. Point cloud upsampling via disentangled refinement. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 344–353, 2021. 1, 2, 5, 6, 7", + "[19] Chen-Hsuan Lin, Chen Kong, and Simon Lucey. Learning efficient point cloud generation for dense 3d object reconstruction. In proceedings of the AAAI Conference on Artificial Intelligence, 2018. 1", + "[20] Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Pointvoxel cnn for efficient 3d deep learning. Advances in Neural Information Processing Systems, 32, 2019. 2", + "[21] Luqing Luo, Lulu Tang, Wanyi Zhou, Shizheng Wang, and Zhi-Xin Yang. Pu-eva: An edge-vector based approximation solution for flexible-scale point cloud upsampling. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16208-16217, 2021. 1, 2, 5, 6, 7, 8", + "[22] Shitong Luo and Wei Hu. Diffusion probabilistic models for 3d point cloud generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2837-2845, 2021. 2", + "[23] Zhaoyang Lyu, Zhifeng Kong, Xudong Xu, Liang Pan, and Dahua Lin. A conditional point diffusion-refinement paradigm for 3d point cloud completion. arXiv preprint arXiv:2112.03530, 2021. 2, 3, 8", + "[24] Luke Melas-Kyriazi, Christian Rupprecht, and Andrea Vedaldi. Pc2: Projection-conditioned point cloud diffusion for single-image 3d reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12923-12932, 2023. 1", + "[25] Liang Pan, Xinyi Chen, Zhongang Cai, Junzhe Zhang, Haiyu Zhao, Shuai Yi, and Ziwei Liu. Variational relational point completion network. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8524-8533, 2021. 4", + "[26] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 4", + "[27] Anh Viet Phan, Minh Le Nguyen, Yen Lam Hoang Nguyen, and Lam Thu Bui. Dgcnn: A convolutional neural network over large-scale labeled graphs. Neural Networks, 108:533-543, 2018. 2" + ], + "bbox": [ + 501, + 92, + 893, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "20794", + "bbox": [ + 478, + 941, + 519, + 953 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 2, 7", + "[29] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 2, 4, 7", + "[30] Guocheng Qian, Abdulellah Abualshour, Guohao Li, Ali Thabet, and Bernard Ghanem. Pu-gcn: Point cloud upsampling using graph convolutional networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11683-11692, 2021. 1, 2, 3, 5, 6, 7, 8", + "[31] Guocheng Qian, Xingdi Zhang, Abdullah Hamdi, and Bernard Ghanem. Pix4point: Image pretrained transformers for 3d point cloud understanding. 2022. 3, 7", + "[32] Yue Qian, Junhui Hou, Sam Kwong, and Ying He. Pugeonet: A geometry-centric network for 3d point cloud upsampling. In European conference on computer vision, pages 752-769. Springer, 2020. 2", + "[33] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pages 8821-8831. PMLR, 2021. 2", + "[34] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 7", + "[35] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2", + "[36] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 7", + "[37] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020. 3, 6", + "[38] Hugues Thomas, Charles R Qi, Jean-Emmanuel Deschaud, Beatrix Marcotegui, François Goulette, and Leonidas J Guibas. Kpconv: Flexible and deformable convolution for point clouds. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6411–6420, 2019. 2", + "[39] Tong Wu, Liang Pan, Junzhe Zhang, Tai Wang, Ziwei Liu, and Dahua Lin. Balanced chamfer distance as a comprehensive metric for point cloud completion. Advances in Neural Information Processing Systems, 34:29088-29100, 2021. 2", + "[40] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In" + ], + "bbox": [ + 78, + 90, + 470, + 896 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 7", + "[41] Jiale Xu, Xintao Wang, Weihao Cheng, Yan-Pei Cao, Ying Shan, Xiaohu Qie, and Shenghua Gao. Dream3d: Zero-shot text-to-3d synthesis using 3d shape prior and text-to-image diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20908–20918, 2023. 7", + "[42] Lei Yang, Yanhong Liu, Jinzhu Peng, and Zize Liang. A novel system for off-line 3d seam extraction and path planning based on point cloud segmentation for arc welding robot. Robotics and Computer-Integrated Manufacturing, 64:101929, 2020. 1", + "[43] Wang Yifan, Shihao Wu, Hui Huang, Daniel Cohen-Or, and Olga Sorkine-Hornung. Patch-based progressive 3d point set upsampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5958–5967, 2019. 1, 2, 3, 5, 6, 7", + "[44] Lequan Yu, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. Pu-net: Point cloud upsampling network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2790–2799, 2018. 1, 2, 3, 5, 6, 7, 8", + "[45] Cem Yuksel. Sample elimination for generating poisson disk sample sets. In Computer Graphics Forum, pages 25-32. Wiley Online Library, 2015. 5", + "[46] Dandan Zhang, Weiyong Si, Wen Fan, Yuan Guan, and Chenguang Yang. From teleoperation to autonomous robot-assisted microsurgery: A survey. Machine Intelligence Research, 19(4):288-306, 2022. 1", + "[47] Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8552-8562, 2022. 3, 7", + "[48] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 16259-16268, 2021. 4", + "[49] Yuchao Zheng, Yujie Li, Shuo Yang, and Huimin Lu. Global-pbnet: A novel point cloud registration for autonomous driving. pages 22312-22319. IEEE, 2022. 1", + "[50] Linqi Zhou, Yilun Du, and Jiajun Wu. 3d shape generation and completion through point-voxel diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5826–5835, 2021. 2" + ], + "bbox": [ + 501, + 92, + 890, + 741 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "20795", + "bbox": [ + 478, + 941, + 519, + 953 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/80eefa2b-3d90-4d98-ab03-f2521d12efac_model.json b/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/80eefa2b-3d90-4d98-ab03-f2521d12efac_model.json new file mode 100644 index 0000000000000000000000000000000000000000..ca333b19af7122de19d14e9cacdf08ed09582c1a --- /dev/null +++ b/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/80eefa2b-3d90-4d98-ab03-f2521d12efac_model.json @@ -0,0 +1,2387 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.129, + 0.13, + 0.843, + 0.178 + ], + "angle": 0, + "content": "A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.203, + 0.821, + 0.24 + ], + "angle": 0, + "content": "Wentao \\(\\mathbf{Q}\\mathbf{u}^{1}\\), Yuantian Shao\\(^{1}\\), Lingwu Meng\\(^{1}\\), Xiaoshui Huang\\(^{2*}\\), Liang Xiao\\(^{1*}\\) Nanjing University of Science and Technology\\(^{1}\\), Shanghai AI Laboratory\\(^{2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.242, + 0.921, + 0.258 + ], + "angle": 0, + "content": "{quwentao, alvin_s, menglw815}@njust.edu.cn, huangxiaoshui@163.com, xiaoliang@mail.njust.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.292, + 0.314, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.324, + 0.474, + 0.732 + ], + "angle": 0, + "content": "Point cloud upsampling (PCU) enriches the representation of raw point clouds, significantly improving the performance in downstream tasks such as classification and reconstruction. Most of the existing point cloud upsampling methods focus on sparse point cloud feature extraction and upsampling module design. In a different way, we dive deeper into directly modelling the gradient of data distribution from dense point clouds. In this paper, we proposed a conditional denoising diffusion probabilistic model (DDPM) for point cloud upsampling, called PUDM. Specifically, PUDM treats the sparse point cloud as a condition, and iteratively learns the transformation relationship between the dense point cloud and the noise. Simultaneously, PUDM aligns with a dual mapping paradigm to further improve the discernment of point features. In this context, PUDM enables learning complex geometry details in the ground truth through the dominant features, while avoiding an additional upsampling module design. Furthermore, to generate high-quality arbitrary-scale point clouds during inference, PUDM exploits the prior knowledge of the scale between sparse point clouds and dense point clouds during training by parameterizing a rate factor. Moreover, PUDM exhibits strong noise robustness in experimental results. In the quantitative and qualitative evaluations on PU1K and PUGAN, PUDM significantly outperformed existing methods in terms of Chamfer Distance (CD) and Hausdorff Distance (HD), achieving state of the art (SOTA) performance." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.775, + 0.21, + 0.79 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.8, + 0.47, + 0.877 + ], + "angle": 0, + "content": "Point clouds, as a most fundamental 3D representation, have been widely used in various downstream tasks such as 3D reconstruction [19, 24], autonomous driving [4, 16, 49], and robotics technology [42, 46]. However, raw point clouds captured from 3D sensors often exhibit sparsity," + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.288, + 0.895, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.445, + 0.894, + 0.515 + ], + "angle": 0, + "content": "Figure 1. Most existing methods achieving satisfactory results for input sparse point clouds with clear geometric structures (such as the hole on the green cover rear), but performing poorly for those with fuzzy geometric details (like the eyes of the red pig). However, our results, with close proximity to the ground truth." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.541, + 0.892, + 0.631 + ], + "angle": 0, + "content": "noise, and non-uniformity. This is substantiated across diverse publicly available benchmark datasets, such as KITTI [8], ScanNet [5]. Hence, point cloud upsampling, which involves the transformation of sparse, incomplete, and noisy point clouds into dense, complete, and artifact-free representations, has garnered considerable research interest." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.632, + 0.893, + 0.782 + ], + "angle": 0, + "content": "Inspired by deep learning, the pioneering work PU-Net [44] is the first to utilize deep neural networks to address this problem. This first divides the input point cloud into multiple patches and then extracts multi-scale features. Subsequently, these features are aggregated and fed into an upsampling module to approximate the dense point cloud coordinates. Building this approach, many works [17, 18, 21, 30, 43] optimize neural networks by focusing on sparse point cloud feature extraction and upsampling module design." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.783, + 0.895, + 0.889 + ], + "angle": 0, + "content": "However, while these methods have achieved improved results, predicting dense point cloud coordinates via sparse point cloud features is an indirect approximating approach. Typically, these methods first utilize an encoder to extract sparse point cloud features, and then use a carefully designed upsampling module to fit dense point cloud coordinates. This approach has three limitations. First, the non" + }, + { + "type": "page_footnote", + "bbox": [ + 0.102, + 0.885, + 0.471, + 0.898 + ], + "angle": 0, + "content": "*Corresponding Author. https://github.com/QWTforGithub/PUDM" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.943, + 0.521, + 0.955 + ], + "angle": 0, + "content": "20786" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.227 + ], + "angle": 0, + "content": "dominance of features causes the generated results to be more inclined toward input sparse point clouds, struggling to represent reasonable geometry details from the ground truth, as Fig 1 illustrated. Second, the additional upsampling module designs increase the workload for algorithm designers and often disrupt the intrinsic coordinate mappings in point clouds [30, 43, 44]. Third, they mostly require the joint supervision of the CD loss and other losses, resulting in them sensitive to noise [13, 39]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.228, + 0.471, + 0.424 + ], + "angle": 0, + "content": "In this paper, we consider the point cloud upsampling task as a conditional generation problem. This first explores the incorporation of probabilistic models for point cloud upsampling. We propose a novel point cloud upsampling network, called PUDM, which is formally based on a conditional DDPM. Unlike previous methods, PUDM models the gradient of data distribution from dense point clouds (i.e., the ground truth), directly utilizing the dominant features to fit the ground truth, and decoupling the dependency on CD loss. Moreover, the auto-regressive nature of DDPM enables PUDM to efficiently avoid the additional upsampling module design, ensuring intrinsic point-wise mapping relationships in point clouds." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.424, + 0.47, + 0.544 + ], + "angle": 0, + "content": "Simultaneously, to improve the ability of perceiving point features, PUDM employs a dual mapping paradigm. This naturally establishes a dual mapping relationship: between the generated sparse point cloud and the sparse point cloud, and between the dense point cloud and the noise. In this context, PUDM has the ability to learn complex geometric structures from the ground truth, generating uniform surfaces aligned with the ground truth, as Fig 1." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.545, + 0.47, + 0.635 + ], + "angle": 0, + "content": "Furthermore, we found that DDPM only models fixed-scale point cloud objects during training. To overcome this, we consider parameterizing a rate factor to exploit the prior knowledge of the scale between sparse point clouds and dense point clouds. In this way, PUDM enables to generate high-fidelity arbitrary-scale point clouds during inference." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.635, + 0.469, + 0.695 + ], + "angle": 0, + "content": "In additional, benefiting from the inherent denoising architecture and the non-dependency for CD loss, PUDM demonstrates a remarkable degree of robustness in noise experiments." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.696, + 0.397, + 0.71 + ], + "angle": 0, + "content": "Our key contributions can be summarized as:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.711, + 0.469, + 0.756 + ], + "angle": 0, + "content": "- We systematically analyze and recognize conditional DDPM as a favorable model for generating uniform point clouds at arbitrary scales in point cloud upsampling tasks." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.757, + 0.469, + 0.8 + ], + "angle": 0, + "content": "- We propose a novel network with a dual mapping for point cloud upsampling, named PUDM, which is based on conditional DDPM." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.801, + 0.469, + 0.846 + ], + "angle": 0, + "content": "- By exploiting the rate prior, PUDM exhibits the ability of generating high-fidelity point clouds across arbitrary scales during inference." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.847, + 0.469, + 0.893 + ], + "angle": 0, + "content": "- Comprehensive experiments demonstrate the outstanding capability of PUDM in generating geometric details in public benchmarks of point cloud upsampling." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.711, + 0.469, + 0.893 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.09, + 0.651, + 0.107 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.115, + 0.892, + 0.462 + ], + "angle": 0, + "content": "Learnable Point Cloud Upsampling. The integration of deep learning with formidable data-driven and trainable attributes has markedly accelerated progress within the 3D field. Thanks to the powerful representation capabilities of deep neural networks, directly learning features from 3D data has become achievable, such as PointNet [28], PointNet++ [29], DGCNN [27], MinkowskiEngine [2], and KPConv [38]. Benefiting from the above, PU-Net [44] stands as the pioneer in integrating deep neural networks into point cloud upsampling tasks. This first aggregates multi-scale features for each point through multiple MLPs, and then expands them into a point cloud upsampling set via a channel shuffle layer. Following this pattern, some methods have achieved more significant results, such as MPU [43], PU-GAN [17], Dis-PU [18], and PU-GCN [30]. PU-EVA [21] is the first to achieve the arbitrary-scale point clouds upsampling via edge-vector based affine combinations in one-time training. Subsequently, PUGeo [32] and NePs [7] believe that sampling points within a 2D continuous space can generate higher-quality results. Furthermore, Grad-PU [9] transforms the point cloud upsampling task into a coordinate approximation problem, avoiding the upsampling module design." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.463, + 0.892, + 0.598 + ], + "angle": 0, + "content": "Most methods predict the dense point cloud coordinates via sparse point cloud features, and extend the point set relying on an upsampling module. This causes them to struggle to learn complex geometry details from the ground truth. Moreover, they frequently exhibit a susceptibility to noise due to depending on CD loss during training. In this paper, we consider transforming the point cloud upsampling task into a point cloud generation problem, and first utilize conditional DDPM to address the aforementioned issues." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.599, + 0.892, + 0.779 + ], + "angle": 0, + "content": "DDPM for Point Cloud Generation. Inspired by the success in image generation tasks [33-35], there has been greater attention on directly generating point clouds through DDPM. [22] represents the pioneering effort in applying DDPM to unconditional point cloud generation. Subsequently, [50] extends the application of DDPM to the point cloud completion task by training a point-voxel CNN [20]. However, the voxelization process introduces additional computational complexity. Furthermore, PDR [23] takes raw point clouds as input. But this requires training the two stages (coarse-to-fine) of diffusion models, resulting in a greater time overhead." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In this paper, we explore to the application of conditional DDPM to handle the point cloud upsampling task. Unlike the point cloud generation and completion task, point cloud upsampling exhibits the difference of the point cloud scale between training and inference. We overcome this issue by exploiting a rate prior. Meanwhile, our method based on a dual mapping paradigm enables to efficiently learn complex geometric details in a single-stage training." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.943, + 0.521, + 0.955 + ], + "angle": 0, + "content": "20787" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.09, + 0.444, + 0.107 + ], + "angle": 0, + "content": "3. Denoising Diffusion Probabilistic Models" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.115, + 0.295, + 0.13 + ], + "angle": 0, + "content": "3.1. Background for DDPM" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.138, + 0.468, + 0.289 + ], + "angle": 0, + "content": "The forward and reverse process. Given the dense point cloud \\( \\pmb{x} \\) sampled from a meaningful point distribution \\( P_{data} \\), and an implicit variable \\( \\pmb{z} \\) sampled from a tractable noise distribution \\( P_{latent} \\), DDPM establishes the transformation relationship between \\( \\pmb{x} \\) and \\( \\pmb{z} \\) through two Markov chains. This conducts an auto-regressive process: a forward process \\( q \\) that gradually adds noise to \\( \\pmb{x} \\) until \\( \\pmb{x} \\) degrades to \\( \\pmb{z} \\), and a reverse process \\( p_{\\theta} \\) that slowly removes noise from \\( \\pmb{z} \\) until \\( \\pmb{z} \\) recovers to \\( \\pmb{x} \\). We constrain the transformation speed using a time step \\( t \\sim \\mathcal{U}(T) \\) (\\( T = 1000 \\) in this paper)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.29, + 0.468, + 0.35 + ], + "angle": 0, + "content": "Training objective under specific conditions. Given a set of conditions \\( C = \\{c_i | i = 1..S\\} \\), the training objective of DDPM under specific conditions is (please refer to the supplementary materials for the detailed derivation):" + }, + { + "type": "equation", + "bbox": [ + 0.118, + 0.375, + 0.469, + 0.394 + ], + "angle": 0, + "content": "\\[\nL (\\theta) = \\mathbb {E} _ {t \\sim U (T), \\epsilon \\sim \\mathcal {N} (0, I)} | | \\epsilon - \\epsilon_ {\\boldsymbol {\\theta}} (\\boldsymbol {x} _ {t}, C, t) | | ^ {2} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.403, + 0.336, + 0.419 + ], + "angle": 0, + "content": "where \\(\\pmb{x}_{t} = \\sqrt{1 - \\overline{\\alpha}_{t}}\\pmb {\\epsilon} + \\sqrt{\\overline{\\alpha}_{t}}\\pmb{x_{0}}\\) [11]." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.42, + 0.468, + 0.464 + ], + "angle": 0, + "content": "The gradient of data distribution. Furthermore, we use a stochastic differential equation (SDE) to describe the process of DDPM [37]:" + }, + { + "type": "equation", + "bbox": [ + 0.105, + 0.485, + 0.469, + 0.517 + ], + "angle": 0, + "content": "\\[\ns _ {\\theta} \\left(\\boldsymbol {x} _ {t}, t\\right) = \\nabla_ {x} \\log \\left(\\boldsymbol {x} _ {t}\\right) = - \\frac {1}{\\sqrt {1 - \\bar {\\alpha} _ {t}}} \\boldsymbol {\\epsilon} _ {\\theta} \\left(\\boldsymbol {x} _ {t}, t\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.524, + 0.468, + 0.573 + ], + "angle": 0, + "content": "The training objective of DDPM is essentially equivalent to computing the score (the gradient of data distribution), which differs only by a constant factor \\(-\\frac{1}{\\sqrt{1 - \\overline{\\alpha}_t}}\\)." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.58, + 0.325, + 0.595 + ], + "angle": 0, + "content": "3.2. Analysis of DDPM for PCU" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.603, + 0.468, + 0.661 + ], + "angle": 0, + "content": "We pioneer the exploration of the advantages and limitations of DDPM for PCU, hoping these insights encourage more researchers to introduce probabilistic models into PCU." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.664, + 0.468, + 0.737 + ], + "angle": 0, + "content": "DDPM is an effective model for PCU. As mentioned in Sec 3.1, the auto-regressive nature of DDPM allows it to directly learn geometry details of the ground truth using the dominant features, generating closer-to-truth, fine-grained results." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.739, + 0.468, + 0.754 + ], + "angle": 0, + "content": "Simultaneously, the reverse process of DDPM in PCU is:" + }, + { + "type": "equation", + "bbox": [ + 0.135, + 0.765, + 0.469, + 0.806 + ], + "angle": 0, + "content": "\\[\np _ {\\theta} \\left(\\boldsymbol {x} _ {\\mathbf {0}: T}, \\boldsymbol {c}\\right) = p \\left(\\boldsymbol {x} _ {T}\\right) \\prod_ {t = 1} ^ {T} p _ {\\theta} \\left(\\boldsymbol {x} _ {t - 1} \\mid \\boldsymbol {x} _ {t}, \\boldsymbol {c}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.811, + 0.468, + 0.901 + ], + "angle": 0, + "content": "where \\( c \\) means the sparse point cloud sampled from a data distribution \\( P_{c} \\). According to Eq 3, the condition \\( c \\) participates in each step of the reverse process. In fact, this is usually achieved using an additional branch network interacting with the noise network, without intrinsically disrupting the auto-regressive process of DDPM, thus cleverly" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.166 + ], + "angle": 0, + "content": "avoiding to design an additional upsampling module. Moreover, the process naturally defines a one-to-one point-wise mapping relationship between the dense point cloud and the noise, preserving the order of points in the diffusion process." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.168, + 0.892, + 0.211 + ], + "angle": 0, + "content": "Furthermore, the efficient denoising architecture and the decoupling of CD loss significantly support the strong noise robustness of DDPM." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.212, + 0.892, + 0.257 + ], + "angle": 0, + "content": "The limitations of DDPM in PCU. While DDPM showcases some advantageous attributes within PCU, it also harbors certain potential limitations:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.258, + 0.892, + 0.363 + ], + "angle": 0, + "content": "- Limitation 1: The lack of effective prior knowledge in the 3D field results in the weak feature perception capability for point cloud conditional networks [14, 31, 47], significantly affecting the final generation results (Tab 8). Although some methods [23] compensate for this problem via a two-stage (coarse-to-fine) training approach, they require a higher training cost." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.364, + 0.892, + 0.469 + ], + "angle": 0, + "content": "- Limitation 2: The auto-regressive nature of DDPM provides robust modeling capabilities for fixed-scale objects during training, but it struggles to generate high-quality arbitrary-scale ones during inference (Tab 9). Some works treat different scale point cloud upsampling as multiple tasks [30, 43, 44], but it's not advisable for DDPM due to the excessively high training cost." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.258, + 0.892, + 0.469 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.483, + 0.634, + 0.5 + ], + "angle": 0, + "content": "4. Methodology" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.508, + 0.747, + 0.524 + ], + "angle": 0, + "content": "4.1. Dual mapping Formulation" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.53, + 0.892, + 0.59 + ], + "angle": 0, + "content": "For limitation 1, we adopt a dual mapping paradigm. We first provide a formal exposition of its conception, subsequently delineating the manner in which PUDM aligns with these principles, with a particular emphasis on its role." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.591, + 0.892, + 0.667 + ], + "angle": 0, + "content": "Given two point sets of \\( \\pmb{x}^1 = \\{x_i^1 \\in \\mathbb{R}^3 | i = 1..M\\} \\) and \\( \\pmb{x}^2 = \\{x_i^2 \\in \\mathbb{R}^3 | i = 1..N\\} \\) from different data distributions, a network \\( f_x \\) with a dual-branch architecture \\( (f_x = \\{f_1, f_2\\}) \\), and the corresponding supervision signals for these branches \\( (l_x = \\{l_1, l_2\\}) \\), if \\( f_x \\) satisfies:" + }, + { + "type": "equation", + "bbox": [ + 0.594, + 0.68, + 0.891, + 0.699 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {y} ^ {1} = f _ {1} \\left(\\boldsymbol {x} ^ {1}\\right), \\quad \\boldsymbol {y} ^ {2} = f _ {2} \\left(\\boldsymbol {x} ^ {2}\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.703, + 0.892, + 0.778 + ], + "angle": 0, + "content": "where \\(\\pmb{y}^1 = \\{y_i^1\\in \\mathbb{R}^3|i = 1..M\\}\\), \\(\\pmb{y}^2 = \\{y_i^2\\in \\mathbb{R}^3|i = 1..N\\}\\). \\(f_{x}\\) can be claimed as a dual mapping network. Eq 4 means that each element in the original input has one and only one corresponding element in the final output in each branch." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In PUDM, we only require the conditional network to meet the above condition, because the noise network inherently builds a one-to-one point-wise mapping between the input and the output [23]. Specifically, we first force the output \\( \\pmb{c}^{\\prime} = \\{c_{i}^{\\prime}\\in \\mathbb{R}^{3}|i = 1..M\\} \\) from the conditional network \\( f_{\\psi} \\) to approximate the sparse point cloud \\( \\pmb {c} = \\{c_i\\in \\mathbb{R}^3 |i = 1..M\\} \\) coordinates via MLPs, and then optimize the process by the mean squared error loss:" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.943, + 0.519, + 0.954 + ], + "angle": 0, + "content": "20788" + } + ], + [ + { + "type": "equation", + "bbox": [ + 0.188, + 0.105, + 0.469, + 0.123 + ], + "angle": 0, + "content": "\\[\nL (\\psi) = \\mathbb {E} _ {\\boldsymbol {c} \\sim P _ {c}} \\| \\boldsymbol {c} - \\boldsymbol {c} ^ {\\prime} \\| ^ {2} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.129, + 0.47, + 0.204 + ], + "angle": 0, + "content": "Formally, this establishes a one-to-one point-wise mapping between the input and the output for the conditional network, \\( \\pmb{c}^{\\prime} = f_{\\psi}(\\pmb {c}) = \\mathcal{D}_{c}(\\mathcal{E}_{c}(\\pmb {c},\\mathcal{T}\\mathcal{M}(\\mathcal{E}_{n}(\\pmb{x}_{t},r,t)))) \\), as shown in Fig 2. \\( \\mathcal{T}\\mathcal{M}(\\cdot) \\) denotes the Transfer Module defined in Sec 4.3." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.205, + 0.471, + 0.34 + ], + "angle": 0, + "content": "For point cloud tasks with unordered structures, this pattern effectively enhances network capability in capturing point features by preserving the ordered relationships between input and output points [3, 12]. Moreover, corresponding supervision signals ensure adequate training for each branch network (Fig 7), providing an effective strategy to address the challenge of lacking robust 3D pre-trained models for conditional branch networks in point cloud generation tasks." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.35, + 0.228, + 0.367 + ], + "angle": 0, + "content": "4.2. Rate Modeling" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.373, + 0.47, + 0.51 + ], + "angle": 0, + "content": "For limitation 2, drawing inspiration from the practice of adding class labels in conditional probabilistic models [6, 10, 26], we propose a simple and effective approach to achieve high-quality arbitrary-scale sampling during inference. Specifically, we first add a rate label \\( r \\) to each sample pair, \\( (c, x) \\to (c, x, r) \\) (the supplementary materials provide ablation studies for different forms of the rate label \\( r \\)). Subsequently, we parameter the rate factor using an embedding layer. In this way, the reverse process of DDPM is:" + }, + { + "type": "equation", + "bbox": [ + 0.119, + 0.533, + 0.47, + 0.575 + ], + "angle": 0, + "content": "\\[\np _ {\\theta} \\left(\\boldsymbol {x} _ {\\mathbf {0}: T}, \\boldsymbol {c}, r\\right) = p \\left(\\boldsymbol {x} _ {T}\\right) \\prod_ {t = 1} ^ {T} p _ {\\theta} \\left(\\boldsymbol {x} _ {t - 1} \\mid \\boldsymbol {x} _ {t}, \\boldsymbol {c}, r\\right) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.585, + 0.47, + 0.706 + ], + "angle": 0, + "content": "Eq 6 demonstrates that this simply adds an additional condition to DDPM, the rate prior \\( r \\), without increasing the number of samples. Unlike class labels, we found in experiments that this conditional prior we exploited can significantly improve the generation quality of unseen-scale point clouds. The reason is that generating unseen-scale and seen-category objects usually are easier compared to generating seen-scale and unseen-category ones for models." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.715, + 0.283, + 0.73 + ], + "angle": 0, + "content": "4.3. Network Architecture" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.738, + 0.469, + 0.828 + ], + "angle": 0, + "content": "In this section, we introduce the overall framework of PUDM, consisting of three crucial components: the conditional network (C-Net), the noise network (N-Net), and the Transfer Module (TM). This process is remarkably illustrated in Fig 2. The parameter setting and implementation details are provided in the supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.829, + 0.47, + 0.889 + ], + "angle": 0, + "content": "The Conditional Network (C-Net). We use PointNet++ [29] as the backbone. This follows the standard U-Net framework. The encoder and decoder are composed of multiple Set Abstraction (SA) layers and Feature Propagation" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.089, + 0.895, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.269, + 0.894, + 0.368 + ], + "angle": 0, + "content": "Figure 2. The overall framework of PUDM: The N-Net (upper branch) and the C-Net (lower branch) both establish a one-to-one point-wise mapping between input and output using mean squared error loss. They engage in information exchange through a transfer module (TM). Simultaneously, the rate prompt is provided to exploit the prior knowledge of the scale between sparse point clouds and dense point clouds." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.394, + 0.892, + 0.483 + ], + "angle": 0, + "content": "(FP) layers, respectively. Unlike PointNet++ using the max-pooling layer to filter features, we consider utilizing the self-attention layer to retain more fine-grained information [25, 48]. In addition, we only feed the sparse point cloud into the C-Net to ensure the feature extraction in a pure and effective manner." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.484, + 0.892, + 0.545 + ], + "angle": 0, + "content": "The Noise Network (N-Net). The N-Net and the C-Net share the same network architecture. In contrast to the C-Net, we need to introduce additional guidance information to the N-Net for modeling the diffusion steps." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.544, + 0.893, + 0.695 + ], + "angle": 0, + "content": "We first transform the sparse point cloud \\( \\mathbf{c} \\in \\mathbb{R}^{N \\times 3} \\) into the interpolation point cloud \\( \\mathbf{i} \\in \\mathbb{R}^{rN \\times 3} \\) through the midpoint interpolation [9], and then sum \\( \\mathbf{i} \\) and \\( \\mathbf{x}_t \\) as the input for the N-Net. Meanwhile, we extract the global features from \\( \\mathbf{i} \\) to enhance the semantic understanding. Furthermore, to identify the noise level, we encode the time step \\( t \\). Finally, as mentioned in Sec 4.2, we parameterized the rate factor \\( r \\). These additional pieces of information are both treated as global features, and incorporated into each stage of the encoder and the decoder in the N-Net." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.696, + 0.892, + 0.787 + ], + "angle": 0, + "content": "The Transfer Module (TM). We propose a bidirectional interaction module (TM) to serve as an intermediary between the C-Net and the N-Net. We only place the TM at the bottleneck stage of U-Net, due to the significant computational efficiency and the abundant semantic information via the maximum receptive field [12, 15]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.787, + 0.892, + 0.847 + ], + "angle": 0, + "content": "Given the outputs of the encoder in the C-Net and the N-Net, \\( F^{c} \\in \\mathbb{R}^{N_{e}^{c} \\times C_{e}^{c}} \\), \\( F^{n} \\in \\mathbb{R}^{N_{e}^{n} \\times C_{e}^{n}} \\) separately, the TM first transforms \\( F^{c} \\to (Q) \\in \\mathbb{R}^{N_{e}^{c} \\times C_{i}} \\) and \\( F^{n} \\to (K, V) \\in \\mathbb{R}^{N_{e}^{n} \\times C_{i}} \\) via MLPs. Next, we can obtain the fused feature:" + }, + { + "type": "equation", + "bbox": [ + 0.56, + 0.857, + 0.892, + 0.892 + ], + "angle": 0, + "content": "\\[\nF _ {f} = M L P \\left(\\operatorname {s o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {C _ {i}}}\\right) V\\right) + F ^ {c} \\tag {7}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.943, + 0.519, + 0.954 + ], + "angle": 0, + "content": "20789" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "Subsequently, \\( F_{f} \\) is fed into a feed-forward network (FFN) to output the final features. Similarly, the same operation is also applied in reverse direction, so that information flows in both directions, \\( F^{c} \\rightarrow F^{n} \\) and \\( F^{n} \\rightarrow F^{c} \\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.162, + 0.292, + 0.178 + ], + "angle": 0, + "content": "4.4. Training and Inference" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.185, + 0.469, + 0.231 + ], + "angle": 0, + "content": "Training. As mentioned earlier (Eq 1 and Eq 5), PUDM is a dual mapping network, and models the rate prior during training. Therefore, the training objective is:" + }, + { + "type": "equation", + "bbox": [ + 0.193, + 0.245, + 0.469, + 0.261 + ], + "angle": 0, + "content": "\\[\nL _ {m s e} = L (\\theta) + \\alpha L (\\psi) \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.268, + 0.449, + 0.282 + ], + "angle": 0, + "content": "where \\(\\alpha\\) means a weighting factor (\\(\\alpha = 1\\) in this paper)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.283, + 0.469, + 0.343 + ], + "angle": 0, + "content": "Inference. We found that adding the interpolated points \\( \\mathbf{i} \\) as the guidance information significantly improves the generated quality during inference. Therefore, we iteratively transform \\( \\boldsymbol{x}_t \\) into \\( \\boldsymbol{x}_0 \\) based on:" + }, + { + "type": "equation", + "bbox": [ + 0.079, + 0.353, + 0.469, + 0.398 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} _ {t - 1} = \\gamma \\left(\\frac {1}{\\sqrt {\\alpha_ {t}}} \\left(\\boldsymbol {x} _ {t} - \\frac {1 - a _ {t}}{\\sqrt {1 - \\bar {\\alpha} _ {t}}} \\epsilon_ {\\theta} \\left(\\boldsymbol {x} _ {t}, \\boldsymbol {c}, r, t\\right)\\right) + \\boldsymbol {\\sigma} _ {t} \\boldsymbol {\\epsilon} + \\mathbf {i}\\right) \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.399, + 0.435, + 0.415 + ], + "angle": 0, + "content": "where \\(\\gamma\\) denotes a scale factor (\\(\\gamma = 0.5\\) in this paper)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.428, + 0.21, + 0.445 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.452, + 0.254, + 0.469 + ], + "angle": 0, + "content": "5.1. Experiment Setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.475, + 0.469, + 0.626 + ], + "angle": 0, + "content": "Dataset. In our experiments, we utilize two public benchmarks (PUGAN [17], PU1K [30]) for evaluation. We adhere to the official training/testing partitioning protocols for these datasets. This uses Poisson disk sampling [45] to generate 24,000 and 69,000 uniform patches for training, respectively. Each patch contains 256 points, while the corresponding ground truth has 1024 points. Meanwhile, 27 and 127 point clouds are used for testing, respectively. The input sparse point clouds consist of 2048 points, and are upsampled to \\(2048 \\times R\\) points via evaluated methods." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.627, + 0.469, + 0.687 + ], + "angle": 0, + "content": "Metrics. Following [9, 43, 44], we employ the Chamfer Distance \\((\\mathrm{CD} \\times 10^{-3})\\), Hausdorff Distance \\((\\mathrm{HD} \\times 10^{-3})\\), and Point-to-Surface Distance \\((\\mathrm{P2F} \\times 10^{-3})\\) as evaluation metrics in our experiments." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.696, + 0.298, + 0.712 + ], + "angle": 0, + "content": "5.2. Comparison with SOTA" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.469, + 0.9 + ], + "angle": 0, + "content": "Results on PUGAN. We first conduct the point cloud upsampling at low upsampling rate \\((4\\times)\\) and high upsampling rate \\((16\\times)\\) on PUGAN. Tab 1 illustrates the substantial superiority of our method in geometric detail description compared to other methods, as evidenced by significantly reduced CD and HD. Because our method models the gradient of data distribution from dense point clouds, facilitating the direct approximation of geometric details from the ground truth, thereby yielding higher accuracy of our results. Fig 3 further substantiates our viewpoint, and shows that our method produces fewer outliers, aligning with more uniform surfaces, closer to the ground truth." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.892, + 0.138 + ], + "angle": 0, + "content": "In addition, despite P2F falling slightly behind Grad-PU [9] at \\(4\\times\\), the difference is insignificant due to the asymmetry between points and surfaces [9, 17]." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.152, + 0.892, + 0.268 + ], + "angle": 0, + "content": "
Methods16×
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]0.5296.8054.4600.5108.2066.041
MPU [43]0.2926.6722.8220.2197.0543.085
PU-GAN [30]0.2825.5772.0160.2076.9632.556
Dis-PU [18]0.2743.6961.9430.1674.9232.261
PU-EVA [21]0.2773.9712.5240.1855.2732.972
PU-GCN [30]0.2683.2012.4890.1614.2832.632
NePS [7]0.2593.6481.9350.1524.9102.198
Grad-PU [9]0.2452.3691.8930.1082.3522.127
Ours0.1311.2201.9120.0821.1202.114
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.269, + 0.892, + 0.297 + ], + "angle": 0, + "content": "Table 1. The results of \\(4 \\times\\) and \\(16 \\times\\) on PUGAN. Our method significantly surpasses other methods in terms of CD and HD." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.315, + 0.892, + 0.42 + ], + "angle": 0, + "content": "Arbitrary Upsampling Rates on PUGAN. Similarly to [9], we perform comparative analyses across different rates on PUGAN. Tab 2 shows that our method steadily outperforms Grad-PU [9] across nearly all metrics. In particular, our method demonstrates a significant performance advantage in terms of CD and HD, further affirming the superiority in learning complex geometric details." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.421, + 0.892, + 0.481 + ], + "angle": 0, + "content": "Moreover, we visualize the results at higher upsampling rates \\((16\\times, 32\\times, 64\\times,\\) and \\(128\\times)\\) in Fig 4. Our results obviously exhibit more complete, uniform, and smooth compared to Grad-PU [9]." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.495, + 0.876, + 0.575 + ], + "angle": 0, + "content": "
RatesGrad-PU [9]Ours
CD↓HD↓P2F↓CD↓HD↓P2F↓
0.5403.1771.7750.2471.4101.812
0.3532.6081.6540.1711.2921.785
0.2342.5491.8360.1161.2441.794
0.2252.5261.9810.1071.2351.980
0.2192.6341.9400.1061.2311.952
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.576, + 0.892, + 0.618 + ], + "angle": 0, + "content": "Table 2. Grad-PU vs. ours at different rates on PUGAN. Benefiting from the rate modeling, our method still exhibits remarkable performance at different rates." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.65, + 0.892, + 0.717 + ], + "angle": 0, + "content": "
MethodsCD↓HD↓P2F↓
PU-Net [44]1.15515.1704.834
MPU [43]0.93513.3273.511
PU-GCN [30]0.5857.5772.499
Grad-PU [9]0.4043.7321.474
Ours0.2172.1641.477
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.72, + 0.892, + 0.761 + ], + "angle": 0, + "content": "Table 3. The results of \\(4 \\times\\) on PU1K. We utilize the experimental results from the original paper. Our method outperforms other methods across nearly all metrics." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.779, + 0.892, + 0.853 + ], + "angle": 0, + "content": "Results on PU1K. Furthermore, we also conduct the evaluation at \\(4 \\times\\) on more challenging PU1K [30]. As reported in Tab 3, our method continues to demonstrate substantial advantages in terms of CD and HD compared to other methods." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.855, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Result on Real datasets. Additionally, we conduct the evaluation on real indoor (ScanNet [5]) and outdoor (KITTI [8]) scene datasets. Note that all methods are only trained" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.943, + 0.521, + 0.954 + ], + "angle": 0, + "content": "20790" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.088, + 0.895, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.26, + 0.893, + 0.289 + ], + "angle": 0, + "content": "Figure 3. Visualization results at \\(4\\times\\) on PUGAN. Our result exhibits fewer outliers, and clearly captures geometric details from the ground truth (the holes on the casting)." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.311, + 0.462, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.639, + 0.47, + 0.682 + ], + "angle": 0, + "content": "Figure 4. Grad-PU vs. ours at large rates on PUGAN. Our method consistently generates more uniform and smooth surfaces (these results are achieved using an NVIDIA 3090 GPU)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.707, + 0.473, + 0.875 + ], + "angle": 0, + "content": "on PUGAN. Upsampling scene-level point clouds poses greater challenges than upsampling object-level ones, due to the former having more intricate geometric structures. Due to the absence of the ground truth, our analysis is confined to qualitative comparisons. In Fig 5, our method still generates reasonable and smooth surfaces on some complex structures, while other methods exhibit artifacts such as overlap and voids. Simultaneously, Fig 6 illustrates that our results show more complete and fewer outliers. Although Grad-PU [9] also demonstrates good outlier results, it generates a considerable amount of uneven surfaces." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.314, + 0.78, + 0.33 + ], + "angle": 0, + "content": "5.3. Validation for Noise Robustness" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.338, + 0.892, + 0.383 + ], + "angle": 0, + "content": "Gaussian Noise. To demonstrate the robustness, we perturb the sparse point clouds with Gaussian noise sampled \\(\\mathcal{N}(0,I)\\) added at different noise levels \\(\\tau\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.384, + 0.893, + 0.459 + ], + "angle": 0, + "content": "As shown in Tab 4, our method significantly outperforms other methods under multiple level noise perturbations (\\(\\tau = 0.01\\), \\(\\tau = 0.02\\)). Specifically, this is because our method models the noise \\(\\epsilon\\) (the gradient of data distribution) and avoids CD loss during training." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.471, + 0.892, + 0.582 + ], + "angle": 0, + "content": "
Noise Levels Methodsτ = 0.01τ = 0.02
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]0.6288.0689.8161.07810.86716.401
MPU [43]0.5066.9789.0590.92910.82015.621
PU-GAN [30]0.4646.0707.4980.88710.60215.088
Dis-PU [18]0.4195.4136.7230.8189.34514.376
PU-EVA [21]0.4595.3777.1890.8399.32514.652
PU-GCN [30]0.4485.5866.9890.8168.60413.798
NePS [7]0.4255.4386.5460.7989.10212.088
Grad-PU [9]0.4144.1456.4000.7667.33611.534
Ours0.2102.4306.0700.5295.4719.742
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.587, + 0.892, + 0.628 + ], + "angle": 0, + "content": "Table 4. The results of \\(4 \\times\\) at low-level Gaussian noise on PUGAN. Our method significantly outperforms other methods in terms of noise robustness." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.645, + 0.892, + 0.734 + ], + "angle": 0, + "content": "Moreover, we also conduct the evaluation under more challenging noise perturbations. Tab 5 shows that our method exhibits stronger robustness results at higher level noise perturbations (\\(\\tau = 0.05\\) and \\(\\tau = 0.1\\)). This indicates that our method exhibits a trend of resilience for the noise robustness." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.809 + ], + "angle": 0, + "content": "Other Noise. Furthermore, we also investigated the performance of our method on uniform noise. Admittedly, while our method still keeps SOTA performance, as shown in Tab 6, the results on uniform noise show significantly lower than that on Gaussian noise." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.893, + 0.901 + ], + "angle": 0, + "content": "We provide an intuitive explanation. Eq 2 demonstrates that the training objective of DDPM is to fit the gradient of data distribution (modeling the noise \\(\\epsilon\\), named score) [37]. Essentially, DDPM learns the direction of noise generation. When the conditions with noise are considered, the disturbance in the direction exhibits relatively small, because the" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.943, + 0.518, + 0.955 + ], + "angle": 0, + "content": "20791" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.08, + 0.09, + 0.895, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.21, + 0.895, + 0.237 + ], + "angle": 0, + "content": "Figure 5. The results of \\(4 \\times\\) on KITTI. Our method noticeably generates more reasonable and uniform results on some complex geometric structures." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.264, + 0.468, + 0.375 + ], + "angle": 0, + "content": "
Noise Levels\nMethodsτ = 0.05τ = 0.1
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]1.37013.72923.2491.49814.19323.846
MPU [43]1.24711.64522.1891.32112.41523.841
PU-GAN [30]1.1249.09121.2521.27110.91123.174
Dis-PU [18]1.0767.92120.6031.24410.91322.845
PU-EVA [21]1.0577.91020.0441.2269.30522.296
PU-GCN [30]1.2639.86922.8351.45611.06325.213
NePS [7]1.1439.64518.6421.1989.87420.162
Grad-PU [9]0.9788.05716.9271.1188.94618.845
Ours0.6185.38614.7510.8536.23916.845
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.376, + 0.471, + 0.418 + ], + "angle": 0, + "content": "Table 5. The results of \\(4 \\times\\) at high-level Gaussian noise on PUGAN. Compared to other methods, our method demonstrates a more favorable upward trend for robustness to noise." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.446, + 0.47, + 0.537 + ], + "angle": 0, + "content": "noise has a similar distribution to \\(\\epsilon\\). Therefore, during inference, our method demonstrates robustness to approximating noise distributions of \\(\\epsilon\\) (Gaussian noise), but performs poorly when faced with different ones (the supplementary materials provide more noise experiments to support this conclusion)." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.55, + 0.468, + 0.661 + ], + "angle": 0, + "content": "
Noise Levels Methodsτ = 0.05τ = 0.1
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]1.49014.47323.2231.72515.44225.251
MPU [43]1.22410.84220.4561.54511.64523.512
PU-GAN [30]1.0347.75718.6171.3279.70021.321
Dis-PU [18]1.0066.85617.8731.3147.46320.980
PU-EVA [21]1.0247.53418.1791.3348.05621.158
PU-GCN [30]1.0459.64318.8991.32510.87721.633
NePS [7]1.0487.34518.0541.3219.64521.314
Grad-PU [9]1.0676.63417.7341.3997.21521.028
Ours0.9986.11017.5581.3106.73220.564
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.663, + 0.468, + 0.692 + ], + "angle": 0, + "content": "Table 6. The results of \\(4 \\times\\) at high-level uniform noise on PUGAN. Our method outperforms other methods on all metrics." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.719, + 0.376, + 0.735 + ], + "angle": 0, + "content": "5.4. Effectiveness in Downstream Task" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.742, + 0.468, + 0.803 + ], + "angle": 0, + "content": "We evaluate the effectiveness of upsampling quality in the downstream task: point cloud classification. Meanwhile, we also conducted experiments on point cloud part segmentation, please refer to the supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.803, + 0.468, + 0.894 + ], + "angle": 0, + "content": "PointNet [28] and PointNet++ [29] are chosen as the downstream task models due to their significant performance and widespread influence in 3D tasks. We follow the official training and testing procedures. Simultaneously, we select ModelNet40 [40] (40 categories) and ShapeNet [1] (16 categories) as the benchmarks for point cloud clas" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.265, + 0.892, + 0.325 + ], + "angle": 0, + "content": "sification. For a fair and effective evaluation, we use only 3D coordinates as the input. Similar to the evaluated strategy on real datasets, all point cloud upsampling methods are only trained on PUGAN." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.325, + 0.892, + 0.43 + ], + "angle": 0, + "content": "For evaluation, we first subsample 256/512 points from test point clouds on ModelNet40/ShapeNet. Subsequently, they are upsampled to 1024/2048 points through evaluation methods. As depicted in Tab 7, our results significantly improve the classification accuracy compared to the low-res point clouds, and consistently outperforms other methods across all metrics." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.442, + 0.892, + 0.57 + ], + "angle": 0, + "content": "
DatasetsModelNet40 (%)ShapeNet (%)
ModelsPointNetPointNet++PointNetPointNet++
MethodsIA↑CA↑IA↑CA↑IA↑CA↑IA↑CA↑
Low-res87.1583.1288.8784.4597.6195.0998.2096.11
High-res90.7487.1492.2489.9198.8996.6199.2798.18
PU-Net [44]88.7285.2588.9985.4397.9995.6998.5796.35
MPU [43]89.0485.8489.5486.5198.0395.9298.9496.81
PU-GAN [30]89.9585.6890.4587.2398.7595.7090.4587.23
Dis-PU [18]88.7085.3489.5686.5398.8096.0799.0097.15
PU-EVA [21]89.2785.6389.9686.8698.7295.6999.0797.58
PU-GCN [30]89.7785.3889.4586.1598.7896.0699.0397.42
NePS [7]90.0186.1590.3287.3498.9496.2099.1297.94
Grad-PU [9]90.0586.0689.9887.4998.8296.1999.1097.63
Ours90.3386.5492.1489.4298.8596.5899.1397.99
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.573, + 0.892, + 0.657 + ], + "angle": 0, + "content": "Table 7. The results of point cloud classification. \"Low-res\" refers to the point cloud subsampled, while \"High-res\" denotes the original test point cloud. Meanwhile, \"IA\" stands for instance accuracy, and \"CA\" denotes class accuracy. Our results have more reasonable, finer-grained, and closer-to-ground truth geometric structures, thereby achieving more significant classification accuracy." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.686, + 0.654, + 0.703 + ], + "angle": 0, + "content": "5.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.709, + 0.892, + 0.891 + ], + "angle": 0, + "content": "With/Without the dual mapping paradigm. Thanks to the rich and structured data, the conditional networks for text or images can be replaced by powerful pre-trained models [34-36, 41]. However, robust pre-trained backbones are lacking in the 3D field due to scarce data and challenging feature extraction [14, 31, 47]. In this paper, we employ the dual mapping paradigm to augment the capability of perceiving point features for PUDM, ensuring the comprehensive training of the C-Net. To validate this point, we remove the supervision signal from the C-Net to disrupt this pattern. Meanwhile, we also validate the importance of the C-Net by retaining only the N-Net in PUDM." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.942, + 0.521, + 0.954 + ], + "angle": 0, + "content": "20792" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.08, + 0.09, + 0.895, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.205, + 0.895, + 0.234 + ], + "angle": 0, + "content": "Figure 6. The results of \\(4 \\times\\) on ScanNet. Our results exhibit reduced instances of outliers, concurrently generating more uniform and complete surfaces." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.26, + 0.47, + 0.336 + ], + "angle": 0, + "content": "As reported in Tab 8, disrupting the dual mapping pattern leads to a significant decrease in performance due to the weakened point feature perception ability of the C-Net. Fig 7 visualizes the results of the C-Net generating input sparse points using the dual mapping paradigm." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.336, + 0.47, + 0.396 + ], + "angle": 0, + "content": "Meanwhile, although removing the C-Net can maintain a single mapping pattern, as demonstrated in prior research [21, 30, 44], sparse point cloud feature extraction plays a pivotal role in PCU." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.409, + 0.468, + 0.458 + ], + "angle": 0, + "content": "
MethodsCD↓HD↓P2F↓
Without the C-Net0.2122.0152.284
Without the dual mapping0.1681.4982.013
With the dual mapping0.1311.2201.912
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.459, + 0.469, + 0.487 + ], + "angle": 0, + "content": "Table 8. Ablation study of the dual mapping paradigm. The dual mapping pattern evidently achieves the best performance." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.514, + 0.467, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.693, + 0.47, + 0.737 + ], + "angle": 0, + "content": "Figure 7. Visualization results of the C-Net generating sparse point clouds on PUGAN. This demonstrates that the C-Net has been effectively trained." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.469, + 0.854 + ], + "angle": 0, + "content": "With/Without the rate prior. As mentioned in Sec 4.2, we introduce the rate prior into PUDM during training to achieve high-quality generation of point clouds during inference. Tab 9 demonstrates the effectiveness of this approach. Without the rate prior, the overall performance notably decreases, and exhibits significant fluctuations (performing better at \\(4\\times\\), but worse at other rates)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Single/Multiple Transfer Module. In this paper, we employ a TM positioned at the bottleneck stage of the U-Net, as its maximum receptive field provides ample con" + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.259, + 0.892, + 0.359 + ], + "angle": 0, + "content": "
RatesWithout the rate modelingWith the rate modeling
CD↓HD↓P2F↓CD↓HD↓P2F↓
0.2951.8162.0140.2471.4101.812
0.2241.5441.9750.1711.2921.785
0.1581.5121.8150.1311.2201.912
0.1661.5481.9440.1161.2441.794
0.1511.5281.9560.1071.2351.980
0.1441.4251.9880.1061.2311.952
0.1391.3991.9210.1041.2151.875
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.36, + 0.892, + 0.389 + ], + "angle": 0, + "content": "Table 9. Ablation study of the rate prior. Utilizing the rate prior significantly enhances the quality of arbitrary-scale sampling." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.416, + 0.892, + 0.507 + ], + "angle": 0, + "content": "textual information [12, 15]. Meanwhile, we also attempt to place multiple TMs at each stage in U-Net to enable the interaction of multi-scale information [23]. Tab 10 shows that although multiple TMs lead to a slight improvement in terms of CD loss, it is not cost-effective due to the significant increase in computational cost." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.521, + 0.892, + 0.558 + ], + "angle": 0, + "content": "
MethodsCD↓HD↓P2F↓Params↓
Multiple TMs0.1291.2351.95328.65M
Single TM0.1311.2201.91216.03M
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.559, + 0.892, + 0.587 + ], + "angle": 0, + "content": "Table 10. Ablation study of the Transfer Module. Using the single TM strikes a balance between performance and efficiency." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.618, + 0.618, + 0.634 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.824 + ], + "angle": 0, + "content": "In this paper, we systematically analyze and identify the potential of DDPM as a promising model for PCU. Meanwhile, we propose PUDM based on conditional DDPM. PUDM enables to directly utilize the dominant features to generate geometric details approximating the ground truth. Additionally, we analyze the limitations of applying DDPM to PCU (the absence of efficient prior knowledge for the conditional network and the fixed-scale object modeling), and propose corresponding solutions (a dual mapping paradigm and the rate modeling). Moreover, we offer a straightforward explanation regarding the robustness to noise for PUDM observed in experiments." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Acknowledgments. This work was supported in part by the Jiangsu Geological Bureau ResearchProject under Grant 2023KY11, in part by the National Natural Science Foundation of China under Grant 61871226, and in part by the National Key R&D Program of China (NO.2022ZD0160101)." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.943, + 0.519, + 0.955 + ], + "angle": 0, + "content": "20793" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.186, + 0.472, + 0.254 + ], + "angle": 0, + "content": "[2] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3075-3084, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.256, + 0.471, + 0.312 + ], + "angle": 0, + "content": "[3] Christopher Choy, Jaesik Park, and Vladlen Koltun. Fully convolutional geometric features. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8958-8966, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.314, + 0.471, + 0.368 + ], + "angle": 0, + "content": "[4] Yaodong Cui, Ren Chen, Wenbo Chu, Long Chen, Daxin Tian, Ying Li, and Dongpu Cao. Deep learning for image and point cloud fusion in autonomous driving: A review. pages 722-739. IEEE, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.37, + 0.471, + 0.438 + ], + "angle": 0, + "content": "[5] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5828-5839, 2017. 1, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.44, + 0.471, + 0.481 + ], + "angle": 0, + "content": "[6] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.483, + 0.472, + 0.552 + ], + "angle": 0, + "content": "[7] Wanquan Feng, Jin Li, Hongrui Cai, Xiaonan Luo, and Juyong Zhang. Neural points: Point cloud representation with neural fields for arbitrary upsampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18633-18642, 2022. 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.554, + 0.471, + 0.608 + ], + "angle": 0, + "content": "[8] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. pages 1231-1237. Sage Publications Sage UK: London, England, 2013. 1, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.61, + 0.471, + 0.692 + ], + "angle": 0, + "content": "[9] Yun He, Danhang Tang, Yinda Zhang, Xiangyang Xue, and Yanwei Fu. Grad-pu: Arbitrary-scale point cloud upsampling via gradient descent with learned distance functions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5354-5363, 2023. 2, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.694, + 0.47, + 0.722 + ], + "angle": 0, + "content": "[10] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.723, + 0.47, + 0.765 + ], + "angle": 0, + "content": "[11] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.766, + 0.471, + 0.835 + ], + "angle": 0, + "content": "[12] Shengyu Huang, Zan Gojcic, Mikhail Usvyatsov, Andreas Wieser, and Konrad Schindler. Predator: Registration of 3d point clouds with low overlap. In Proceedings of the IEEE/CVF Conference on computer vision and pattern recognition, pages 4267-4276, 2021. 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.836, + 0.47, + 0.892 + ], + "angle": 0, + "content": "[13] Sheng Yu Huang, Hao-Yu Hsu, and Frank Wang. Spovt: Semantic-prototype variational transformer for dense point cloud semantic completion. Advances in Neural Information Processing Systems, 35:33934–33946, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.892 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.135 + ], + "angle": 0, + "content": "[14] Xiaoshui Huang, Sheng Li, Wentao Qu, Tong He, Yifan Zuo, and Wanli Ouyang. Frozen clip model is efficient point cloud backbone. arXiv preprint arXiv:2212.04098, 2022. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.189 + ], + "angle": 0, + "content": "[15] Xiaoshui Huang, Wentao Qu, Yifan Zuo, Yuming Fang, and Xiaowei Zhao. Imfnet: Interpretable multimodal fusion for point cloud registration. IEEE Robotics and Automation Letters, 7(4):12323-12330, 2022. 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.19, + 0.895, + 0.245 + ], + "angle": 0, + "content": "[16] Jiaxin Li and Gim Hee Lee. Deepi2p: Image-to-point cloud registration via deep classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15960-15969, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.247, + 0.892, + 0.314 + ], + "angle": 0, + "content": "[17] Ruihui Li, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. Pu-gan: a point cloud upsampling adversarial network. In Proceedings of the IEEE/CVF international conference on computer vision, pages 7203–7212, 2019. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.316, + 0.892, + 0.372 + ], + "angle": 0, + "content": "[18] Ruihui Li, Xianzhi Li, Pheng-Ann Heng, and Chi-Wing Fu. Point cloud upsampling via disentangled refinement. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 344–353, 2021. 1, 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.372, + 0.892, + 0.427 + ], + "angle": 0, + "content": "[19] Chen-Hsuan Lin, Chen Kong, and Simon Lucey. Learning efficient point cloud generation for dense 3d object reconstruction. In proceedings of the AAAI Conference on Artificial Intelligence, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.428, + 0.892, + 0.469 + ], + "angle": 0, + "content": "[20] Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Pointvoxel cnn for efficient 3d deep learning. Advances in Neural Information Processing Systems, 32, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.47, + 0.892, + 0.538 + ], + "angle": 0, + "content": "[21] Luqing Luo, Lulu Tang, Wanyi Zhou, Shizheng Wang, and Zhi-Xin Yang. Pu-eva: An edge-vector based approximation solution for flexible-scale point cloud upsampling. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16208-16217, 2021. 1, 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.539, + 0.892, + 0.594 + ], + "angle": 0, + "content": "[22] Shitong Luo and Wei Hu. Diffusion probabilistic models for 3d point cloud generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2837-2845, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.596, + 0.892, + 0.65 + ], + "angle": 0, + "content": "[23] Zhaoyang Lyu, Zhifeng Kong, Xudong Xu, Liang Pan, and Dahua Lin. A conditional point diffusion-refinement paradigm for 3d point cloud completion. arXiv preprint arXiv:2112.03530, 2021. 2, 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.651, + 0.895, + 0.72 + ], + "angle": 0, + "content": "[24] Luke Melas-Kyriazi, Christian Rupprecht, and Andrea Vedaldi. Pc2: Projection-conditioned point cloud diffusion for single-image 3d reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12923-12932, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.721, + 0.892, + 0.788 + ], + "angle": 0, + "content": "[25] Liang Pan, Xinyi Chen, Zhongang Cai, Junzhe Zhang, Haiyu Zhao, Shuai Yi, and Ziwei Liu. Variational relational point completion network. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8524-8533, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.79, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[26] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.845, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[27] Anh Viet Phan, Minh Le Nguyen, Yen Lam Hoang Nguyen, and Lam Thu Bui. Dgcnn: A convolutional neural network over large-scale labeled graphs. Neural Networks, 108:533-543, 2018. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.895, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.943, + 0.52, + 0.954 + ], + "angle": 0, + "content": "20794" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.472, + 0.219 + ], + "angle": 0, + "content": "[29] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 2, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.22, + 0.472, + 0.29 + ], + "angle": 0, + "content": "[30] Guocheng Qian, Abdulellah Abualshour, Guohao Li, Ali Thabet, and Bernard Ghanem. Pu-gcn: Point cloud upsampling using graph convolutional networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11683-11692, 2021. 1, 2, 3, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.291, + 0.472, + 0.332 + ], + "angle": 0, + "content": "[31] Guocheng Qian, Xingdi Zhang, Abdullah Hamdi, and Bernard Ghanem. Pix4point: Image pretrained transformers for 3d point cloud understanding. 2022. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.333, + 0.472, + 0.389 + ], + "angle": 0, + "content": "[32] Yue Qian, Junhui Hou, Sam Kwong, and Ying He. Pugeonet: A geometry-centric network for 3d point cloud upsampling. In European conference on computer vision, pages 752-769. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.39, + 0.472, + 0.457 + ], + "angle": 0, + "content": "[33] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pages 8821-8831. PMLR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.46, + 0.472, + 0.515 + ], + "angle": 0, + "content": "[34] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.517, + 0.472, + 0.586 + ], + "angle": 0, + "content": "[35] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.588, + 0.472, + 0.669 + ], + "angle": 0, + "content": "[36] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.672, + 0.472, + 0.727 + ], + "angle": 0, + "content": "[37] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.728, + 0.472, + 0.797 + ], + "angle": 0, + "content": "[38] Hugues Thomas, Charles R Qi, Jean-Emmanuel Deschaud, Beatrix Marcotegui, François Goulette, and Leonidas J Guibas. Kpconv: Flexible and deformable convolution for point clouds. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6411–6420, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.799, + 0.472, + 0.853 + ], + "angle": 0, + "content": "[39] Tong Wu, Liang Pan, Junzhe Zhang, Tai Wang, Ziwei Liu, and Dahua Lin. Balanced chamfer distance as a comprehensive metric for point cloud completion. Advances in Neural Information Processing Systems, 34:29088-29100, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.855, + 0.472, + 0.897 + ], + "angle": 0, + "content": "[40] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.897 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.121 + ], + "angle": 0, + "content": "Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[41] Jiale Xu, Xintao Wang, Weihao Cheng, Yan-Pei Cao, Ying Shan, Xiaohu Qie, and Shenghua Gao. Dream3d: Zero-shot text-to-3d synthesis using 3d shape prior and text-to-image diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20908–20918, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.206, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[42] Lei Yang, Yanhong Liu, Jinzhu Peng, and Zize Liang. A novel system for off-line 3d seam extraction and path planning based on point cloud segmentation for arc welding robot. Robotics and Computer-Integrated Manufacturing, 64:101929, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.277, + 0.892, + 0.345 + ], + "angle": 0, + "content": "[43] Wang Yifan, Shihao Wu, Hui Huang, Daniel Cohen-Or, and Olga Sorkine-Hornung. Patch-based progressive 3d point set upsampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5958–5967, 2019. 1, 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.347, + 0.892, + 0.415 + ], + "angle": 0, + "content": "[44] Lequan Yu, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. Pu-net: Point cloud upsampling network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2790–2799, 2018. 1, 2, 3, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.418, + 0.892, + 0.459 + ], + "angle": 0, + "content": "[45] Cem Yuksel. Sample elimination for generating poisson disk sample sets. In Computer Graphics Forum, pages 25-32. Wiley Online Library, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.461, + 0.892, + 0.515 + ], + "angle": 0, + "content": "[46] Dandan Zhang, Weiyong Si, Wen Fan, Yuan Guan, and Chenguang Yang. From teleoperation to autonomous robot-assisted microsurgery: A survey. Machine Intelligence Research, 19(4):288-306, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.517, + 0.892, + 0.585 + ], + "angle": 0, + "content": "[47] Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8552-8562, 2022. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.587, + 0.892, + 0.643 + ], + "angle": 0, + "content": "[48] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 16259-16268, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.645, + 0.892, + 0.685 + ], + "angle": 0, + "content": "[49] Yuchao Zheng, Yujie Li, Shuo Yang, and Huimin Lu. Global-pbnet: A novel point cloud registration for autonomous driving. pages 22312-22319. IEEE, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.687, + 0.892, + 0.742 + ], + "angle": 0, + "content": "[50] Linqi Zhou, Yilun Du, and Jiajun Wu. 3d shape generation and completion through point-voxel diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5826–5835, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.742 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.943, + 0.52, + 0.954 + ], + "angle": 0, + "content": "20795" + } + ] +] \ No newline at end of file diff --git a/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/80eefa2b-3d90-4d98-ab03-f2521d12efac_origin.pdf b/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/80eefa2b-3d90-4d98-ab03-f2521d12efac_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e72b2f4868f0d5a77646ed64c3474e8670ad4ebd --- /dev/null +++ b/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/80eefa2b-3d90-4d98-ab03-f2521d12efac_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2174ab5e5cd171d0f0ce716a820d9eed231fa62bb4a2a7d05f41376a070224e5 +size 4423083 diff --git a/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/full.md b/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/full.md new file mode 100644 index 0000000000000000000000000000000000000000..afbb0628f572e670fe0b5671c28ec3d2826fe812 --- /dev/null +++ b/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/full.md @@ -0,0 +1,352 @@ +# A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling + +Wentao $\mathbf{Q}\mathbf{u}^{1}$ , Yuantian Shao $^{1}$ , Lingwu Meng $^{1}$ , Xiaoshui Huang $^{2*}$ , Liang Xiao $^{1*}$ Nanjing University of Science and Technology $^{1}$ , Shanghai AI Laboratory $^{2}$ + +{quwentao, alvin_s, menglw815}@njust.edu.cn, huangxiaoshui@163.com, xiaoliang@mail.njust.edu.cn + +# Abstract + +Point cloud upsampling (PCU) enriches the representation of raw point clouds, significantly improving the performance in downstream tasks such as classification and reconstruction. Most of the existing point cloud upsampling methods focus on sparse point cloud feature extraction and upsampling module design. In a different way, we dive deeper into directly modelling the gradient of data distribution from dense point clouds. In this paper, we proposed a conditional denoising diffusion probabilistic model (DDPM) for point cloud upsampling, called PUDM. Specifically, PUDM treats the sparse point cloud as a condition, and iteratively learns the transformation relationship between the dense point cloud and the noise. Simultaneously, PUDM aligns with a dual mapping paradigm to further improve the discernment of point features. In this context, PUDM enables learning complex geometry details in the ground truth through the dominant features, while avoiding an additional upsampling module design. Furthermore, to generate high-quality arbitrary-scale point clouds during inference, PUDM exploits the prior knowledge of the scale between sparse point clouds and dense point clouds during training by parameterizing a rate factor. Moreover, PUDM exhibits strong noise robustness in experimental results. In the quantitative and qualitative evaluations on PU1K and PUGAN, PUDM significantly outperformed existing methods in terms of Chamfer Distance (CD) and Hausdorff Distance (HD), achieving state of the art (SOTA) performance. + +# 1. Introduction + +Point clouds, as a most fundamental 3D representation, have been widely used in various downstream tasks such as 3D reconstruction [19, 24], autonomous driving [4, 16, 49], and robotics technology [42, 46]. However, raw point clouds captured from 3D sensors often exhibit sparsity, + +![](images/baae014e43e709df28a42b3bb1701b0aecc9e06c095f8dfb58086e74802356ab.jpg) +Figure 1. Most existing methods achieving satisfactory results for input sparse point clouds with clear geometric structures (such as the hole on the green cover rear), but performing poorly for those with fuzzy geometric details (like the eyes of the red pig). However, our results, with close proximity to the ground truth. + +noise, and non-uniformity. This is substantiated across diverse publicly available benchmark datasets, such as KITTI [8], ScanNet [5]. Hence, point cloud upsampling, which involves the transformation of sparse, incomplete, and noisy point clouds into dense, complete, and artifact-free representations, has garnered considerable research interest. + +Inspired by deep learning, the pioneering work PU-Net [44] is the first to utilize deep neural networks to address this problem. This first divides the input point cloud into multiple patches and then extracts multi-scale features. Subsequently, these features are aggregated and fed into an upsampling module to approximate the dense point cloud coordinates. Building this approach, many works [17, 18, 21, 30, 43] optimize neural networks by focusing on sparse point cloud feature extraction and upsampling module design. + +However, while these methods have achieved improved results, predicting dense point cloud coordinates via sparse point cloud features is an indirect approximating approach. Typically, these methods first utilize an encoder to extract sparse point cloud features, and then use a carefully designed upsampling module to fit dense point cloud coordinates. This approach has three limitations. First, the non + +dominance of features causes the generated results to be more inclined toward input sparse point clouds, struggling to represent reasonable geometry details from the ground truth, as Fig 1 illustrated. Second, the additional upsampling module designs increase the workload for algorithm designers and often disrupt the intrinsic coordinate mappings in point clouds [30, 43, 44]. Third, they mostly require the joint supervision of the CD loss and other losses, resulting in them sensitive to noise [13, 39]. + +In this paper, we consider the point cloud upsampling task as a conditional generation problem. This first explores the incorporation of probabilistic models for point cloud upsampling. We propose a novel point cloud upsampling network, called PUDM, which is formally based on a conditional DDPM. Unlike previous methods, PUDM models the gradient of data distribution from dense point clouds (i.e., the ground truth), directly utilizing the dominant features to fit the ground truth, and decoupling the dependency on CD loss. Moreover, the auto-regressive nature of DDPM enables PUDM to efficiently avoid the additional upsampling module design, ensuring intrinsic point-wise mapping relationships in point clouds. + +Simultaneously, to improve the ability of perceiving point features, PUDM employs a dual mapping paradigm. This naturally establishes a dual mapping relationship: between the generated sparse point cloud and the sparse point cloud, and between the dense point cloud and the noise. In this context, PUDM has the ability to learn complex geometric structures from the ground truth, generating uniform surfaces aligned with the ground truth, as Fig 1. + +Furthermore, we found that DDPM only models fixed-scale point cloud objects during training. To overcome this, we consider parameterizing a rate factor to exploit the prior knowledge of the scale between sparse point clouds and dense point clouds. In this way, PUDM enables to generate high-fidelity arbitrary-scale point clouds during inference. + +In additional, benefiting from the inherent denoising architecture and the non-dependency for CD loss, PUDM demonstrates a remarkable degree of robustness in noise experiments. + +Our key contributions can be summarized as: + +- We systematically analyze and recognize conditional DDPM as a favorable model for generating uniform point clouds at arbitrary scales in point cloud upsampling tasks. +- We propose a novel network with a dual mapping for point cloud upsampling, named PUDM, which is based on conditional DDPM. +- By exploiting the rate prior, PUDM exhibits the ability of generating high-fidelity point clouds across arbitrary scales during inference. +- Comprehensive experiments demonstrate the outstanding capability of PUDM in generating geometric details in public benchmarks of point cloud upsampling. + +# 2. Related Works + +Learnable Point Cloud Upsampling. The integration of deep learning with formidable data-driven and trainable attributes has markedly accelerated progress within the 3D field. Thanks to the powerful representation capabilities of deep neural networks, directly learning features from 3D data has become achievable, such as PointNet [28], PointNet++ [29], DGCNN [27], MinkowskiEngine [2], and KPConv [38]. Benefiting from the above, PU-Net [44] stands as the pioneer in integrating deep neural networks into point cloud upsampling tasks. This first aggregates multi-scale features for each point through multiple MLPs, and then expands them into a point cloud upsampling set via a channel shuffle layer. Following this pattern, some methods have achieved more significant results, such as MPU [43], PU-GAN [17], Dis-PU [18], and PU-GCN [30]. PU-EVA [21] is the first to achieve the arbitrary-scale point clouds upsampling via edge-vector based affine combinations in one-time training. Subsequently, PUGeo [32] and NePs [7] believe that sampling points within a 2D continuous space can generate higher-quality results. Furthermore, Grad-PU [9] transforms the point cloud upsampling task into a coordinate approximation problem, avoiding the upsampling module design. + +Most methods predict the dense point cloud coordinates via sparse point cloud features, and extend the point set relying on an upsampling module. This causes them to struggle to learn complex geometry details from the ground truth. Moreover, they frequently exhibit a susceptibility to noise due to depending on CD loss during training. In this paper, we consider transforming the point cloud upsampling task into a point cloud generation problem, and first utilize conditional DDPM to address the aforementioned issues. + +DDPM for Point Cloud Generation. Inspired by the success in image generation tasks [33-35], there has been greater attention on directly generating point clouds through DDPM. [22] represents the pioneering effort in applying DDPM to unconditional point cloud generation. Subsequently, [50] extends the application of DDPM to the point cloud completion task by training a point-voxel CNN [20]. However, the voxelization process introduces additional computational complexity. Furthermore, PDR [23] takes raw point clouds as input. But this requires training the two stages (coarse-to-fine) of diffusion models, resulting in a greater time overhead. + +In this paper, we explore to the application of conditional DDPM to handle the point cloud upsampling task. Unlike the point cloud generation and completion task, point cloud upsampling exhibits the difference of the point cloud scale between training and inference. We overcome this issue by exploiting a rate prior. Meanwhile, our method based on a dual mapping paradigm enables to efficiently learn complex geometric details in a single-stage training. + +# 3. Denoising Diffusion Probabilistic Models + +# 3.1. Background for DDPM + +The forward and reverse process. Given the dense point cloud $\pmb{x}$ sampled from a meaningful point distribution $P_{data}$ , and an implicit variable $\pmb{z}$ sampled from a tractable noise distribution $P_{latent}$ , DDPM establishes the transformation relationship between $\pmb{x}$ and $\pmb{z}$ through two Markov chains. This conducts an auto-regressive process: a forward process $q$ that gradually adds noise to $\pmb{x}$ until $\pmb{x}$ degrades to $\pmb{z}$ , and a reverse process $p_{\theta}$ that slowly removes noise from $\pmb{z}$ until $\pmb{z}$ recovers to $\pmb{x}$ . We constrain the transformation speed using a time step $t \sim \mathcal{U}(T)$ ( $T = 1000$ in this paper). + +Training objective under specific conditions. Given a set of conditions $C = \{c_i | i = 1..S\}$ , the training objective of DDPM under specific conditions is (please refer to the supplementary materials for the detailed derivation): + +$$ +L (\theta) = \mathbb {E} _ {t \sim U (T), \epsilon \sim \mathcal {N} (0, I)} | | \epsilon - \epsilon_ {\boldsymbol {\theta}} (\boldsymbol {x} _ {t}, C, t) | | ^ {2} \tag {1} +$$ + +where $\pmb{x}_{t} = \sqrt{1 - \overline{\alpha}_{t}}\pmb {\epsilon} + \sqrt{\overline{\alpha}_{t}}\pmb{x_{0}}$ [11]. + +The gradient of data distribution. Furthermore, we use a stochastic differential equation (SDE) to describe the process of DDPM [37]: + +$$ +s _ {\theta} \left(\boldsymbol {x} _ {t}, t\right) = \nabla_ {x} \log \left(\boldsymbol {x} _ {t}\right) = - \frac {1}{\sqrt {1 - \bar {\alpha} _ {t}}} \boldsymbol {\epsilon} _ {\theta} \left(\boldsymbol {x} _ {t}, t\right) \tag {2} +$$ + +The training objective of DDPM is essentially equivalent to computing the score (the gradient of data distribution), which differs only by a constant factor $-\frac{1}{\sqrt{1 - \overline{\alpha}_t}}$ . + +# 3.2. Analysis of DDPM for PCU + +We pioneer the exploration of the advantages and limitations of DDPM for PCU, hoping these insights encourage more researchers to introduce probabilistic models into PCU. + +DDPM is an effective model for PCU. As mentioned in Sec 3.1, the auto-regressive nature of DDPM allows it to directly learn geometry details of the ground truth using the dominant features, generating closer-to-truth, fine-grained results. + +Simultaneously, the reverse process of DDPM in PCU is: + +$$ +p _ {\theta} \left(\boldsymbol {x} _ {\mathbf {0}: T}, \boldsymbol {c}\right) = p \left(\boldsymbol {x} _ {T}\right) \prod_ {t = 1} ^ {T} p _ {\theta} \left(\boldsymbol {x} _ {t - 1} \mid \boldsymbol {x} _ {t}, \boldsymbol {c}\right) \tag {3} +$$ + +where $c$ means the sparse point cloud sampled from a data distribution $P_{c}$ . According to Eq 3, the condition $c$ participates in each step of the reverse process. In fact, this is usually achieved using an additional branch network interacting with the noise network, without intrinsically disrupting the auto-regressive process of DDPM, thus cleverly + +avoiding to design an additional upsampling module. Moreover, the process naturally defines a one-to-one point-wise mapping relationship between the dense point cloud and the noise, preserving the order of points in the diffusion process. + +Furthermore, the efficient denoising architecture and the decoupling of CD loss significantly support the strong noise robustness of DDPM. + +The limitations of DDPM in PCU. While DDPM showcases some advantageous attributes within PCU, it also harbors certain potential limitations: + +- Limitation 1: The lack of effective prior knowledge in the 3D field results in the weak feature perception capability for point cloud conditional networks [14, 31, 47], significantly affecting the final generation results (Tab 8). Although some methods [23] compensate for this problem via a two-stage (coarse-to-fine) training approach, they require a higher training cost. +- Limitation 2: The auto-regressive nature of DDPM provides robust modeling capabilities for fixed-scale objects during training, but it struggles to generate high-quality arbitrary-scale ones during inference (Tab 9). Some works treat different scale point cloud upsampling as multiple tasks [30, 43, 44], but it's not advisable for DDPM due to the excessively high training cost. + +# 4. Methodology + +# 4.1. Dual mapping Formulation + +For limitation 1, we adopt a dual mapping paradigm. We first provide a formal exposition of its conception, subsequently delineating the manner in which PUDM aligns with these principles, with a particular emphasis on its role. + +Given two point sets of $\pmb{x}^1 = \{x_i^1 \in \mathbb{R}^3 | i = 1..M\}$ and $\pmb{x}^2 = \{x_i^2 \in \mathbb{R}^3 | i = 1..N\}$ from different data distributions, a network $f_x$ with a dual-branch architecture $(f_x = \{f_1, f_2\})$ , and the corresponding supervision signals for these branches $(l_x = \{l_1, l_2\})$ , if $f_x$ satisfies: + +$$ +\boldsymbol {y} ^ {1} = f _ {1} \left(\boldsymbol {x} ^ {1}\right), \quad \boldsymbol {y} ^ {2} = f _ {2} \left(\boldsymbol {x} ^ {2}\right) \tag {4} +$$ + +where $\pmb{y}^1 = \{y_i^1\in \mathbb{R}^3|i = 1..M\}$ , $\pmb{y}^2 = \{y_i^2\in \mathbb{R}^3|i = 1..N\}$ . $f_{x}$ can be claimed as a dual mapping network. Eq 4 means that each element in the original input has one and only one corresponding element in the final output in each branch. + +In PUDM, we only require the conditional network to meet the above condition, because the noise network inherently builds a one-to-one point-wise mapping between the input and the output [23]. Specifically, we first force the output $\pmb{c}^{\prime} = \{c_{i}^{\prime}\in \mathbb{R}^{3}|i = 1..M\}$ from the conditional network $f_{\psi}$ to approximate the sparse point cloud $\pmb {c} = \{c_i\in \mathbb{R}^3 |i = 1..M\}$ coordinates via MLPs, and then optimize the process by the mean squared error loss: + +$$ +L (\psi) = \mathbb {E} _ {\boldsymbol {c} \sim P _ {c}} \| \boldsymbol {c} - \boldsymbol {c} ^ {\prime} \| ^ {2} \tag {5} +$$ + +Formally, this establishes a one-to-one point-wise mapping between the input and the output for the conditional network, $\pmb{c}^{\prime} = f_{\psi}(\pmb {c}) = \mathcal{D}_{c}(\mathcal{E}_{c}(\pmb {c},\mathcal{T}\mathcal{M}(\mathcal{E}_{n}(\pmb{x}_{t},r,t))))$ , as shown in Fig 2. $\mathcal{T}\mathcal{M}(\cdot)$ denotes the Transfer Module defined in Sec 4.3. + +For point cloud tasks with unordered structures, this pattern effectively enhances network capability in capturing point features by preserving the ordered relationships between input and output points [3, 12]. Moreover, corresponding supervision signals ensure adequate training for each branch network (Fig 7), providing an effective strategy to address the challenge of lacking robust 3D pre-trained models for conditional branch networks in point cloud generation tasks. + +# 4.2. Rate Modeling + +For limitation 2, drawing inspiration from the practice of adding class labels in conditional probabilistic models [6, 10, 26], we propose a simple and effective approach to achieve high-quality arbitrary-scale sampling during inference. Specifically, we first add a rate label $r$ to each sample pair, $(c, x) \to (c, x, r)$ (the supplementary materials provide ablation studies for different forms of the rate label $r$ ). Subsequently, we parameter the rate factor using an embedding layer. In this way, the reverse process of DDPM is: + +$$ +p _ {\theta} \left(\boldsymbol {x} _ {\mathbf {0}: T}, \boldsymbol {c}, r\right) = p \left(\boldsymbol {x} _ {T}\right) \prod_ {t = 1} ^ {T} p _ {\theta} \left(\boldsymbol {x} _ {t - 1} \mid \boldsymbol {x} _ {t}, \boldsymbol {c}, r\right) \tag {6} +$$ + +Eq 6 demonstrates that this simply adds an additional condition to DDPM, the rate prior $r$ , without increasing the number of samples. Unlike class labels, we found in experiments that this conditional prior we exploited can significantly improve the generation quality of unseen-scale point clouds. The reason is that generating unseen-scale and seen-category objects usually are easier compared to generating seen-scale and unseen-category ones for models. + +# 4.3. Network Architecture + +In this section, we introduce the overall framework of PUDM, consisting of three crucial components: the conditional network (C-Net), the noise network (N-Net), and the Transfer Module (TM). This process is remarkably illustrated in Fig 2. The parameter setting and implementation details are provided in the supplementary materials. + +The Conditional Network (C-Net). We use PointNet++ [29] as the backbone. This follows the standard U-Net framework. The encoder and decoder are composed of multiple Set Abstraction (SA) layers and Feature Propagation + +![](images/48c53407bff01a253261e6c0afaccb535c5cf92c36b66d5ac18fa84b9c22d0af.jpg) +Figure 2. The overall framework of PUDM: The N-Net (upper branch) and the C-Net (lower branch) both establish a one-to-one point-wise mapping between input and output using mean squared error loss. They engage in information exchange through a transfer module (TM). Simultaneously, the rate prompt is provided to exploit the prior knowledge of the scale between sparse point clouds and dense point clouds. + +(FP) layers, respectively. Unlike PointNet++ using the max-pooling layer to filter features, we consider utilizing the self-attention layer to retain more fine-grained information [25, 48]. In addition, we only feed the sparse point cloud into the C-Net to ensure the feature extraction in a pure and effective manner. + +The Noise Network (N-Net). The N-Net and the C-Net share the same network architecture. In contrast to the C-Net, we need to introduce additional guidance information to the N-Net for modeling the diffusion steps. + +We first transform the sparse point cloud $\mathbf{c} \in \mathbb{R}^{N \times 3}$ into the interpolation point cloud $\mathbf{i} \in \mathbb{R}^{rN \times 3}$ through the midpoint interpolation [9], and then sum $\mathbf{i}$ and $\mathbf{x}_t$ as the input for the N-Net. Meanwhile, we extract the global features from $\mathbf{i}$ to enhance the semantic understanding. Furthermore, to identify the noise level, we encode the time step $t$ . Finally, as mentioned in Sec 4.2, we parameterized the rate factor $r$ . These additional pieces of information are both treated as global features, and incorporated into each stage of the encoder and the decoder in the N-Net. + +The Transfer Module (TM). We propose a bidirectional interaction module (TM) to serve as an intermediary between the C-Net and the N-Net. We only place the TM at the bottleneck stage of U-Net, due to the significant computational efficiency and the abundant semantic information via the maximum receptive field [12, 15]. + +Given the outputs of the encoder in the C-Net and the N-Net, $F^{c} \in \mathbb{R}^{N_{e}^{c} \times C_{e}^{c}}$ , $F^{n} \in \mathbb{R}^{N_{e}^{n} \times C_{e}^{n}}$ separately, the TM first transforms $F^{c} \to (Q) \in \mathbb{R}^{N_{e}^{c} \times C_{i}}$ and $F^{n} \to (K, V) \in \mathbb{R}^{N_{e}^{n} \times C_{i}}$ via MLPs. Next, we can obtain the fused feature: + +$$ +F _ {f} = M L P \left(\operatorname {s o f t m a x} \left(\frac {Q K ^ {T}}{\sqrt {C _ {i}}}\right) V\right) + F ^ {c} \tag {7} +$$ + +Subsequently, $F_{f}$ is fed into a feed-forward network (FFN) to output the final features. Similarly, the same operation is also applied in reverse direction, so that information flows in both directions, $F^{c} \rightarrow F^{n}$ and $F^{n} \rightarrow F^{c}$ . + +# 4.4. Training and Inference + +Training. As mentioned earlier (Eq 1 and Eq 5), PUDM is a dual mapping network, and models the rate prior during training. Therefore, the training objective is: + +$$ +L _ {m s e} = L (\theta) + \alpha L (\psi) \tag {8} +$$ + +where $\alpha$ means a weighting factor ( $\alpha = 1$ in this paper). + +Inference. We found that adding the interpolated points $\mathbf{i}$ as the guidance information significantly improves the generated quality during inference. Therefore, we iteratively transform $\boldsymbol{x}_t$ into $\boldsymbol{x}_0$ based on: + +$$ +\boldsymbol {x} _ {t - 1} = \gamma \left(\frac {1}{\sqrt {\alpha_ {t}}} \left(\boldsymbol {x} _ {t} - \frac {1 - a _ {t}}{\sqrt {1 - \bar {\alpha} _ {t}}} \epsilon_ {\theta} \left(\boldsymbol {x} _ {t}, \boldsymbol {c}, r, t\right)\right) + \boldsymbol {\sigma} _ {t} \boldsymbol {\epsilon} + \mathbf {i}\right) \tag {9} +$$ + +where $\gamma$ denotes a scale factor ( $\gamma = 0.5$ in this paper). + +# 5. Experiments + +# 5.1. Experiment Setup + +Dataset. In our experiments, we utilize two public benchmarks (PUGAN [17], PU1K [30]) for evaluation. We adhere to the official training/testing partitioning protocols for these datasets. This uses Poisson disk sampling [45] to generate 24,000 and 69,000 uniform patches for training, respectively. Each patch contains 256 points, while the corresponding ground truth has 1024 points. Meanwhile, 27 and 127 point clouds are used for testing, respectively. The input sparse point clouds consist of 2048 points, and are upsampled to $2048 \times R$ points via evaluated methods. + +Metrics. Following [9, 43, 44], we employ the Chamfer Distance $(\mathrm{CD} \times 10^{-3})$ , Hausdorff Distance $(\mathrm{HD} \times 10^{-3})$ , and Point-to-Surface Distance $(\mathrm{P2F} \times 10^{-3})$ as evaluation metrics in our experiments. + +# 5.2. Comparison with SOTA + +Results on PUGAN. We first conduct the point cloud upsampling at low upsampling rate $(4\times)$ and high upsampling rate $(16\times)$ on PUGAN. Tab 1 illustrates the substantial superiority of our method in geometric detail description compared to other methods, as evidenced by significantly reduced CD and HD. Because our method models the gradient of data distribution from dense point clouds, facilitating the direct approximation of geometric details from the ground truth, thereby yielding higher accuracy of our results. Fig 3 further substantiates our viewpoint, and shows that our method produces fewer outliers, aligning with more uniform surfaces, closer to the ground truth. + +In addition, despite P2F falling slightly behind Grad-PU [9] at $4\times$ , the difference is insignificant due to the asymmetry between points and surfaces [9, 17]. + +
Methods16×
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]0.5296.8054.4600.5108.2066.041
MPU [43]0.2926.6722.8220.2197.0543.085
PU-GAN [30]0.2825.5772.0160.2076.9632.556
Dis-PU [18]0.2743.6961.9430.1674.9232.261
PU-EVA [21]0.2773.9712.5240.1855.2732.972
PU-GCN [30]0.2683.2012.4890.1614.2832.632
NePS [7]0.2593.6481.9350.1524.9102.198
Grad-PU [9]0.2452.3691.8930.1082.3522.127
Ours0.1311.2201.9120.0821.1202.114
+ +Arbitrary Upsampling Rates on PUGAN. Similarly to [9], we perform comparative analyses across different rates on PUGAN. Tab 2 shows that our method steadily outperforms Grad-PU [9] across nearly all metrics. In particular, our method demonstrates a significant performance advantage in terms of CD and HD, further affirming the superiority in learning complex geometric details. + +Moreover, we visualize the results at higher upsampling rates $(16\times, 32\times, 64\times,$ and $128\times)$ in Fig 4. Our results obviously exhibit more complete, uniform, and smooth compared to Grad-PU [9]. + +Table 1. The results of $4 \times$ and $16 \times$ on PUGAN. Our method significantly surpasses other methods in terms of CD and HD. + +
RatesGrad-PU [9]Ours
CD↓HD↓P2F↓CD↓HD↓P2F↓
0.5403.1771.7750.2471.4101.812
0.3532.6081.6540.1711.2921.785
0.2342.5491.8360.1161.2441.794
0.2252.5261.9810.1071.2351.980
0.2192.6341.9400.1061.2311.952
+ +Table 2. Grad-PU vs. ours at different rates on PUGAN. Benefiting from the rate modeling, our method still exhibits remarkable performance at different rates. + +
MethodsCD↓HD↓P2F↓
PU-Net [44]1.15515.1704.834
MPU [43]0.93513.3273.511
PU-GCN [30]0.5857.5772.499
Grad-PU [9]0.4043.7321.474
Ours0.2172.1641.477
+ +Table 3. The results of $4 \times$ on PU1K. We utilize the experimental results from the original paper. Our method outperforms other methods across nearly all metrics. + +Results on PU1K. Furthermore, we also conduct the evaluation at $4 \times$ on more challenging PU1K [30]. As reported in Tab 3, our method continues to demonstrate substantial advantages in terms of CD and HD compared to other methods. + +Result on Real datasets. Additionally, we conduct the evaluation on real indoor (ScanNet [5]) and outdoor (KITTI [8]) scene datasets. Note that all methods are only trained + +![](images/90fff76d4415f2ac586b9b84ce1da06869b125f6214f172f5c983bf01a912df8.jpg) +Figure 3. Visualization results at $4\times$ on PUGAN. Our result exhibits fewer outliers, and clearly captures geometric details from the ground truth (the holes on the casting). + +![](images/064b9ae13cac1aa8005976ecf0ef5d7802f449b4a9f26c572dc317b6f425c604.jpg) +Figure 4. Grad-PU vs. ours at large rates on PUGAN. Our method consistently generates more uniform and smooth surfaces (these results are achieved using an NVIDIA 3090 GPU). + +on PUGAN. Upsampling scene-level point clouds poses greater challenges than upsampling object-level ones, due to the former having more intricate geometric structures. Due to the absence of the ground truth, our analysis is confined to qualitative comparisons. In Fig 5, our method still generates reasonable and smooth surfaces on some complex structures, while other methods exhibit artifacts such as overlap and voids. Simultaneously, Fig 6 illustrates that our results show more complete and fewer outliers. Although Grad-PU [9] also demonstrates good outlier results, it generates a considerable amount of uneven surfaces. + +# 5.3. Validation for Noise Robustness + +Gaussian Noise. To demonstrate the robustness, we perturb the sparse point clouds with Gaussian noise sampled $\mathcal{N}(0,I)$ added at different noise levels $\tau$ . + +As shown in Tab 4, our method significantly outperforms other methods under multiple level noise perturbations ( $\tau = 0.01$ , $\tau = 0.02$ ). Specifically, this is because our method models the noise $\epsilon$ (the gradient of data distribution) and avoids CD loss during training. + +
Noise Levels Methodsτ = 0.01τ = 0.02
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]0.6288.0689.8161.07810.86716.401
MPU [43]0.5066.9789.0590.92910.82015.621
PU-GAN [30]0.4646.0707.4980.88710.60215.088
Dis-PU [18]0.4195.4136.7230.8189.34514.376
PU-EVA [21]0.4595.3777.1890.8399.32514.652
PU-GCN [30]0.4485.5866.9890.8168.60413.798
NePS [7]0.4255.4386.5460.7989.10212.088
Grad-PU [9]0.4144.1456.4000.7667.33611.534
Ours0.2102.4306.0700.5295.4719.742
+ +Table 4. The results of $4 \times$ at low-level Gaussian noise on PUGAN. Our method significantly outperforms other methods in terms of noise robustness. + +Moreover, we also conduct the evaluation under more challenging noise perturbations. Tab 5 shows that our method exhibits stronger robustness results at higher level noise perturbations ( $\tau = 0.05$ and $\tau = 0.1$ ). This indicates that our method exhibits a trend of resilience for the noise robustness. + +Other Noise. Furthermore, we also investigated the performance of our method on uniform noise. Admittedly, while our method still keeps SOTA performance, as shown in Tab 6, the results on uniform noise show significantly lower than that on Gaussian noise. + +We provide an intuitive explanation. Eq 2 demonstrates that the training objective of DDPM is to fit the gradient of data distribution (modeling the noise $\epsilon$ , named score) [37]. Essentially, DDPM learns the direction of noise generation. When the conditions with noise are considered, the disturbance in the direction exhibits relatively small, because the + +![](images/1f2b71bfcd4250bb6bd32e18e2c92a351ec9a9ecc902be7c53fa8f8e0a7ddcd2.jpg) +Figure 5. The results of $4 \times$ on KITTI. Our method noticeably generates more reasonable and uniform results on some complex geometric structures. + +
Noise Levels +Methodsτ = 0.05τ = 0.1
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]1.37013.72923.2491.49814.19323.846
MPU [43]1.24711.64522.1891.32112.41523.841
PU-GAN [30]1.1249.09121.2521.27110.91123.174
Dis-PU [18]1.0767.92120.6031.24410.91322.845
PU-EVA [21]1.0577.91020.0441.2269.30522.296
PU-GCN [30]1.2639.86922.8351.45611.06325.213
NePS [7]1.1439.64518.6421.1989.87420.162
Grad-PU [9]0.9788.05716.9271.1188.94618.845
Ours0.6185.38614.7510.8536.23916.845
+ +noise has a similar distribution to $\epsilon$ . Therefore, during inference, our method demonstrates robustness to approximating noise distributions of $\epsilon$ (Gaussian noise), but performs poorly when faced with different ones (the supplementary materials provide more noise experiments to support this conclusion). + +Table 5. The results of $4 \times$ at high-level Gaussian noise on PUGAN. Compared to other methods, our method demonstrates a more favorable upward trend for robustness to noise. + +
Noise Levels Methodsτ = 0.05τ = 0.1
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]1.49014.47323.2231.72515.44225.251
MPU [43]1.22410.84220.4561.54511.64523.512
PU-GAN [30]1.0347.75718.6171.3279.70021.321
Dis-PU [18]1.0066.85617.8731.3147.46320.980
PU-EVA [21]1.0247.53418.1791.3348.05621.158
PU-GCN [30]1.0459.64318.8991.32510.87721.633
NePS [7]1.0487.34518.0541.3219.64521.314
Grad-PU [9]1.0676.63417.7341.3997.21521.028
Ours0.9986.11017.5581.3106.73220.564
+ +# 5.4. Effectiveness in Downstream Task + +We evaluate the effectiveness of upsampling quality in the downstream task: point cloud classification. Meanwhile, we also conducted experiments on point cloud part segmentation, please refer to the supplementary materials. + +PointNet [28] and PointNet++ [29] are chosen as the downstream task models due to their significant performance and widespread influence in 3D tasks. We follow the official training and testing procedures. Simultaneously, we select ModelNet40 [40] (40 categories) and ShapeNet [1] (16 categories) as the benchmarks for point cloud clas + +sification. For a fair and effective evaluation, we use only 3D coordinates as the input. Similar to the evaluated strategy on real datasets, all point cloud upsampling methods are only trained on PUGAN. + +For evaluation, we first subsample 256/512 points from test point clouds on ModelNet40/ShapeNet. Subsequently, they are upsampled to 1024/2048 points through evaluation methods. As depicted in Tab 7, our results significantly improve the classification accuracy compared to the low-res point clouds, and consistently outperforms other methods across all metrics. + +Table 6. The results of $4 \times$ at high-level uniform noise on PUGAN. Our method outperforms other methods on all metrics. + +
DatasetsModelNet40 (%)ShapeNet (%)
ModelsPointNetPointNet++PointNetPointNet++
MethodsIA↑CA↑IA↑CA↑IA↑CA↑IA↑CA↑
Low-res87.1583.1288.8784.4597.6195.0998.2096.11
High-res90.7487.1492.2489.9198.8996.6199.2798.18
PU-Net [44]88.7285.2588.9985.4397.9995.6998.5796.35
MPU [43]89.0485.8489.5486.5198.0395.9298.9496.81
PU-GAN [30]89.9585.6890.4587.2398.7595.7090.4587.23
Dis-PU [18]88.7085.3489.5686.5398.8096.0799.0097.15
PU-EVA [21]89.2785.6389.9686.8698.7295.6999.0797.58
PU-GCN [30]89.7785.3889.4586.1598.7896.0699.0397.42
NePS [7]90.0186.1590.3287.3498.9496.2099.1297.94
Grad-PU [9]90.0586.0689.9887.4998.8296.1999.1097.63
Ours90.3386.5492.1489.4298.8596.5899.1397.99
+ +Table 7. The results of point cloud classification. "Low-res" refers to the point cloud subsampled, while "High-res" denotes the original test point cloud. Meanwhile, "IA" stands for instance accuracy, and "CA" denotes class accuracy. Our results have more reasonable, finer-grained, and closer-to-ground truth geometric structures, thereby achieving more significant classification accuracy. + +# 5.5. Ablation Study + +With/Without the dual mapping paradigm. Thanks to the rich and structured data, the conditional networks for text or images can be replaced by powerful pre-trained models [34-36, 41]. However, robust pre-trained backbones are lacking in the 3D field due to scarce data and challenging feature extraction [14, 31, 47]. In this paper, we employ the dual mapping paradigm to augment the capability of perceiving point features for PUDM, ensuring the comprehensive training of the C-Net. To validate this point, we remove the supervision signal from the C-Net to disrupt this pattern. Meanwhile, we also validate the importance of the C-Net by retaining only the N-Net in PUDM. + +![](images/493648e1692ba9e130de1a46f1159344609e40badeb199ee00599ab89b217701.jpg) +Figure 6. The results of $4 \times$ on ScanNet. Our results exhibit reduced instances of outliers, concurrently generating more uniform and complete surfaces. + +As reported in Tab 8, disrupting the dual mapping pattern leads to a significant decrease in performance due to the weakened point feature perception ability of the C-Net. Fig 7 visualizes the results of the C-Net generating input sparse points using the dual mapping paradigm. + +Meanwhile, although removing the C-Net can maintain a single mapping pattern, as demonstrated in prior research [21, 30, 44], sparse point cloud feature extraction plays a pivotal role in PCU. + +
MethodsCD↓HD↓P2F↓
Without the C-Net0.2122.0152.284
Without the dual mapping0.1681.4982.013
With the dual mapping0.1311.2201.912
+ +![](images/0694b37cefe58ad43c6b8079627825b457931431fe8e7c100b2f1bde44e660e3.jpg) +Figure 7. Visualization results of the C-Net generating sparse point clouds on PUGAN. This demonstrates that the C-Net has been effectively trained. + +With/Without the rate prior. As mentioned in Sec 4.2, we introduce the rate prior into PUDM during training to achieve high-quality generation of point clouds during inference. Tab 9 demonstrates the effectiveness of this approach. Without the rate prior, the overall performance notably decreases, and exhibits significant fluctuations (performing better at $4\times$ , but worse at other rates). + +Single/Multiple Transfer Module. In this paper, we employ a TM positioned at the bottleneck stage of the U-Net, as its maximum receptive field provides ample con + +Table 8. Ablation study of the dual mapping paradigm. The dual mapping pattern evidently achieves the best performance. + +
RatesWithout the rate modelingWith the rate modeling
CD↓HD↓P2F↓CD↓HD↓P2F↓
0.2951.8162.0140.2471.4101.812
0.2241.5441.9750.1711.2921.785
0.1581.5121.8150.1311.2201.912
0.1661.5481.9440.1161.2441.794
0.1511.5281.9560.1071.2351.980
0.1441.4251.9880.1061.2311.952
0.1391.3991.9210.1041.2151.875
+ +textual information [12, 15]. Meanwhile, we also attempt to place multiple TMs at each stage in U-Net to enable the interaction of multi-scale information [23]. Tab 10 shows that although multiple TMs lead to a slight improvement in terms of CD loss, it is not cost-effective due to the significant increase in computational cost. + +Table 9. Ablation study of the rate prior. Utilizing the rate prior significantly enhances the quality of arbitrary-scale sampling. + +
MethodsCD↓HD↓P2F↓Params↓
Multiple TMs0.1291.2351.95328.65M
Single TM0.1311.2201.91216.03M
+ +Table 10. Ablation study of the Transfer Module. Using the single TM strikes a balance between performance and efficiency. + +# 6. Conclusion + +In this paper, we systematically analyze and identify the potential of DDPM as a promising model for PCU. Meanwhile, we propose PUDM based on conditional DDPM. PUDM enables to directly utilize the dominant features to generate geometric details approximating the ground truth. Additionally, we analyze the limitations of applying DDPM to PCU (the absence of efficient prior knowledge for the conditional network and the fixed-scale object modeling), and propose corresponding solutions (a dual mapping paradigm and the rate modeling). Moreover, we offer a straightforward explanation regarding the robustness to noise for PUDM observed in experiments. + +Acknowledgments. This work was supported in part by the Jiangsu Geological Bureau ResearchProject under Grant 2023KY11, in part by the National Natural Science Foundation of China under Grant 61871226, and in part by the National Key R&D Program of China (NO.2022ZD0160101). + +# References + +[1] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015. 7 +[2] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3075-3084, 2019. 2 +[3] Christopher Choy, Jaesik Park, and Vladlen Koltun. Fully convolutional geometric features. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8958-8966, 2019. 4 +[4] Yaodong Cui, Ren Chen, Wenbo Chu, Long Chen, Daxin Tian, Ying Li, and Dongpu Cao. Deep learning for image and point cloud fusion in autonomous driving: A review. pages 722-739. IEEE, 2021. 1 +[5] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5828-5839, 2017. 1, 5 +[6] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 4 +[7] Wanquan Feng, Jin Li, Hongrui Cai, Xiaonan Luo, and Juyong Zhang. Neural points: Point cloud representation with neural fields for arbitrary upsampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18633-18642, 2022. 2, 5, 6, 7 +[8] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. pages 1231-1237. Sage Publications Sage UK: London, England, 2013. 1, 5 +[9] Yun He, Danhang Tang, Yinda Zhang, Xiangyang Xue, and Yanwei Fu. Grad-pu: Arbitrary-scale point cloud upsampling via gradient descent with learned distance functions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5354-5363, 2023. 2, 4, 5, 6, 7 +[10] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 4 +[11] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 3 +[12] Shengyu Huang, Zan Gojcic, Mikhail Usvyatsov, Andreas Wieser, and Konrad Schindler. Predator: Registration of 3d point clouds with low overlap. In Proceedings of the IEEE/CVF Conference on computer vision and pattern recognition, pages 4267-4276, 2021. 4, 8 +[13] Sheng Yu Huang, Hao-Yu Hsu, and Frank Wang. Spovt: Semantic-prototype variational transformer for dense point cloud semantic completion. Advances in Neural Information Processing Systems, 35:33934–33946, 2022. 2 + +[14] Xiaoshui Huang, Sheng Li, Wentao Qu, Tong He, Yifan Zuo, and Wanli Ouyang. Frozen clip model is efficient point cloud backbone. arXiv preprint arXiv:2212.04098, 2022. 3, 7 +[15] Xiaoshui Huang, Wentao Qu, Yifan Zuo, Yuming Fang, and Xiaowei Zhao. Imfnet: Interpretable multimodal fusion for point cloud registration. IEEE Robotics and Automation Letters, 7(4):12323-12330, 2022. 4, 8 +[16] Jiaxin Li and Gim Hee Lee. Deepi2p: Image-to-point cloud registration via deep classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15960-15969, 2021. 1 +[17] Ruihui Li, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. Pu-gan: a point cloud upsampling adversarial network. In Proceedings of the IEEE/CVF international conference on computer vision, pages 7203–7212, 2019. 1, 2, 5 +[18] Ruihui Li, Xianzhi Li, Pheng-Ann Heng, and Chi-Wing Fu. Point cloud upsampling via disentangled refinement. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 344–353, 2021. 1, 2, 5, 6, 7 +[19] Chen-Hsuan Lin, Chen Kong, and Simon Lucey. Learning efficient point cloud generation for dense 3d object reconstruction. In proceedings of the AAAI Conference on Artificial Intelligence, 2018. 1 +[20] Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Pointvoxel cnn for efficient 3d deep learning. Advances in Neural Information Processing Systems, 32, 2019. 2 +[21] Luqing Luo, Lulu Tang, Wanyi Zhou, Shizheng Wang, and Zhi-Xin Yang. Pu-eva: An edge-vector based approximation solution for flexible-scale point cloud upsampling. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16208-16217, 2021. 1, 2, 5, 6, 7, 8 +[22] Shitong Luo and Wei Hu. Diffusion probabilistic models for 3d point cloud generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2837-2845, 2021. 2 +[23] Zhaoyang Lyu, Zhifeng Kong, Xudong Xu, Liang Pan, and Dahua Lin. A conditional point diffusion-refinement paradigm for 3d point cloud completion. arXiv preprint arXiv:2112.03530, 2021. 2, 3, 8 +[24] Luke Melas-Kyriazi, Christian Rupprecht, and Andrea Vedaldi. Pc2: Projection-conditioned point cloud diffusion for single-image 3d reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12923-12932, 2023. 1 +[25] Liang Pan, Xinyi Chen, Zhongang Cai, Junzhe Zhang, Haiyu Zhao, Shuai Yi, and Ziwei Liu. Variational relational point completion network. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8524-8533, 2021. 4 +[26] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 4 +[27] Anh Viet Phan, Minh Le Nguyen, Yen Lam Hoang Nguyen, and Lam Thu Bui. Dgcnn: A convolutional neural network over large-scale labeled graphs. Neural Networks, 108:533-543, 2018. 2 + +[28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 2, 7 +[29] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 2, 4, 7 +[30] Guocheng Qian, Abdulellah Abualshour, Guohao Li, Ali Thabet, and Bernard Ghanem. Pu-gcn: Point cloud upsampling using graph convolutional networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11683-11692, 2021. 1, 2, 3, 5, 6, 7, 8 +[31] Guocheng Qian, Xingdi Zhang, Abdullah Hamdi, and Bernard Ghanem. Pix4point: Image pretrained transformers for 3d point cloud understanding. 2022. 3, 7 +[32] Yue Qian, Junhui Hou, Sam Kwong, and Ying He. Pugeonet: A geometry-centric network for 3d point cloud upsampling. In European conference on computer vision, pages 752-769. Springer, 2020. 2 +[33] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pages 8821-8831. PMLR, 2021. 2 +[34] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 7 +[35] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2 +[36] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 7 +[37] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020. 3, 6 +[38] Hugues Thomas, Charles R Qi, Jean-Emmanuel Deschaud, Beatrix Marcotegui, François Goulette, and Leonidas J Guibas. Kpconv: Flexible and deformable convolution for point clouds. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6411–6420, 2019. 2 +[39] Tong Wu, Liang Pan, Junzhe Zhang, Tai Wang, Ziwei Liu, and Dahua Lin. Balanced chamfer distance as a comprehensive metric for point cloud completion. Advances in Neural Information Processing Systems, 34:29088-29100, 2021. 2 +[40] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In + +Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 7 +[41] Jiale Xu, Xintao Wang, Weihao Cheng, Yan-Pei Cao, Ying Shan, Xiaohu Qie, and Shenghua Gao. Dream3d: Zero-shot text-to-3d synthesis using 3d shape prior and text-to-image diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20908–20918, 2023. 7 +[42] Lei Yang, Yanhong Liu, Jinzhu Peng, and Zize Liang. A novel system for off-line 3d seam extraction and path planning based on point cloud segmentation for arc welding robot. Robotics and Computer-Integrated Manufacturing, 64:101929, 2020. 1 +[43] Wang Yifan, Shihao Wu, Hui Huang, Daniel Cohen-Or, and Olga Sorkine-Hornung. Patch-based progressive 3d point set upsampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5958–5967, 2019. 1, 2, 3, 5, 6, 7 +[44] Lequan Yu, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. Pu-net: Point cloud upsampling network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2790–2799, 2018. 1, 2, 3, 5, 6, 7, 8 +[45] Cem Yuksel. Sample elimination for generating poisson disk sample sets. In Computer Graphics Forum, pages 25-32. Wiley Online Library, 2015. 5 +[46] Dandan Zhang, Weiyong Si, Wen Fan, Yuan Guan, and Chenguang Yang. From teleoperation to autonomous robot-assisted microsurgery: A survey. Machine Intelligence Research, 19(4):288-306, 2022. 1 +[47] Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8552-8562, 2022. 3, 7 +[48] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 16259-16268, 2021. 4 +[49] Yuchao Zheng, Yujie Li, Shuo Yang, and Huimin Lu. Global-pbnet: A novel point cloud registration for autonomous driving. pages 22312-22319. IEEE, 2022. 1 +[50] Linqi Zhou, Yilun Du, and Jiajun Wu. 3d shape generation and completion through point-voxel diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5826–5835, 2021. 2 \ No newline at end of file diff --git a/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/images.zip b/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..b1f77df3f6f576d4464569ce2d90681e4d0c33f2 --- /dev/null +++ b/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eab5f55909222c0f7d7d569be6044af5b3d87b9427990783c7ac8b820dc4f28c +size 763570 diff --git a/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/layout.json b/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1cd55f023394cded806f37232d83988094447a88 --- /dev/null +++ b/2024/A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling/layout.json @@ -0,0 +1,9270 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 78, + 102, + 515, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 102, + 515, + 140 + ], + "spans": [ + { + "bbox": [ + 78, + 102, + 515, + 140 + ], + "type": "text", + "content": "A Conditional Denoising Diffusion Probabilistic Model for Point Cloud Upsampling" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "spans": [ + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "text", + "content": "Wentao " + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}\\mathbf{u}^{1}" + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "text", + "content": ", Yuantian Shao" + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "text", + "content": ", Lingwu Meng" + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "text", + "content": ", Xiaoshui Huang" + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "text", + "content": ", Liang Xiao" + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "text", + "content": " Nanjing University of Science and Technology" + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "text", + "content": ", Shanghai AI Laboratory" + }, + { + "bbox": [ + 120, + 160, + 502, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 191, + 563, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 191, + 563, + 204 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 563, + 204 + ], + "type": "text", + "content": "{quwentao, alvin_s, menglw815}@njust.edu.cn, huangxiaoshui@163.com, xiaoliang@mail.njust.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "spans": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 256, + 290, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 256, + 290, + 579 + ], + "spans": [ + { + "bbox": [ + 47, + 256, + 290, + 579 + ], + "type": "text", + "content": "Point cloud upsampling (PCU) enriches the representation of raw point clouds, significantly improving the performance in downstream tasks such as classification and reconstruction. Most of the existing point cloud upsampling methods focus on sparse point cloud feature extraction and upsampling module design. In a different way, we dive deeper into directly modelling the gradient of data distribution from dense point clouds. In this paper, we proposed a conditional denoising diffusion probabilistic model (DDPM) for point cloud upsampling, called PUDM. Specifically, PUDM treats the sparse point cloud as a condition, and iteratively learns the transformation relationship between the dense point cloud and the noise. Simultaneously, PUDM aligns with a dual mapping paradigm to further improve the discernment of point features. In this context, PUDM enables learning complex geometry details in the ground truth through the dominant features, while avoiding an additional upsampling module design. Furthermore, to generate high-quality arbitrary-scale point clouds during inference, PUDM exploits the prior knowledge of the scale between sparse point clouds and dense point clouds during training by parameterizing a rate factor. Moreover, PUDM exhibits strong noise robustness in experimental results. In the quantitative and qualitative evaluations on PU1K and PUGAN, PUDM significantly outperformed existing methods in terms of Chamfer Distance (CD) and Hausdorff Distance (HD), achieving state of the art (SOTA) performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 613, + 128, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 613, + 128, + 625 + ], + "spans": [ + { + "bbox": [ + 47, + 613, + 128, + 625 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 633, + 287, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 633, + 287, + 694 + ], + "spans": [ + { + "bbox": [ + 47, + 633, + 287, + 694 + ], + "type": "text", + "content": "Point clouds, as a most fundamental 3D representation, have been widely used in various downstream tasks such as 3D reconstruction [19, 24], autonomous driving [4, 16, 49], and robotics technology [42, 46]. However, raw point clouds captured from 3D sensors often exhibit sparsity," + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 307, + 228, + 547, + 345 + ], + "blocks": [ + { + "bbox": [ + 307, + 228, + 547, + 345 + ], + "lines": [ + { + "bbox": [ + 307, + 228, + 547, + 345 + ], + "spans": [ + { + "bbox": [ + 307, + 228, + 547, + 345 + ], + "type": "image", + "image_path": "baae014e43e709df28a42b3bb1701b0aecc9e06c095f8dfb58086e74802356ab.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 352, + 547, + 407 + ], + "lines": [ + { + "bbox": [ + 305, + 352, + 547, + 407 + ], + "spans": [ + { + "bbox": [ + 305, + 352, + 547, + 407 + ], + "type": "text", + "content": "Figure 1. Most existing methods achieving satisfactory results for input sparse point clouds with clear geometric structures (such as the hole on the green cover rear), but performing poorly for those with fuzzy geometric details (like the eyes of the red pig). However, our results, with close proximity to the ground truth." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 428, + 545, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 428, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 304, + 428, + 545, + 499 + ], + "type": "text", + "content": "noise, and non-uniformity. This is substantiated across diverse publicly available benchmark datasets, such as KITTI [8], ScanNet [5]. Hence, point cloud upsampling, which involves the transformation of sparse, incomplete, and noisy point clouds into dense, complete, and artifact-free representations, has garnered considerable research interest." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 500, + 546, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 500, + 546, + 619 + ], + "spans": [ + { + "bbox": [ + 304, + 500, + 546, + 619 + ], + "type": "text", + "content": "Inspired by deep learning, the pioneering work PU-Net [44] is the first to utilize deep neural networks to address this problem. This first divides the input point cloud into multiple patches and then extracts multi-scale features. Subsequently, these features are aggregated and fed into an upsampling module to approximate the dense point cloud coordinates. Building this approach, many works [17, 18, 21, 30, 43] optimize neural networks by focusing on sparse point cloud feature extraction and upsampling module design." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 620, + 547, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 620, + 547, + 704 + ], + "spans": [ + { + "bbox": [ + 304, + 620, + 547, + 704 + ], + "type": "text", + "content": "However, while these methods have achieved improved results, predicting dense point cloud coordinates via sparse point cloud features is an indirect approximating approach. Typically, these methods first utilize an encoder to extract sparse point cloud features, and then use a carefully designed upsampling module to fit dense point cloud coordinates. This approach has three limitations. First, the non" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 62, + 700, + 288, + 711 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 700, + 288, + 711 + ], + "spans": [ + { + "bbox": [ + 62, + 700, + 288, + 711 + ], + "type": "text", + "content": "*Corresponding Author. https://github.com/QWTforGithub/PUDM" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 293, + 746, + 318, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 746, + 318, + 756 + ], + "spans": [ + { + "bbox": [ + 293, + 746, + 318, + 756 + ], + "type": "text", + "content": "20786" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 179 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 179 + ], + "type": "text", + "content": "dominance of features causes the generated results to be more inclined toward input sparse point clouds, struggling to represent reasonable geometry details from the ground truth, as Fig 1 illustrated. Second, the additional upsampling module designs increase the workload for algorithm designers and often disrupt the intrinsic coordinate mappings in point clouds [30, 43, 44]. Third, they mostly require the joint supervision of the CD loss and other losses, resulting in them sensitive to noise [13, 39]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 180, + 288, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 180, + 288, + 335 + ], + "spans": [ + { + "bbox": [ + 46, + 180, + 288, + 335 + ], + "type": "text", + "content": "In this paper, we consider the point cloud upsampling task as a conditional generation problem. This first explores the incorporation of probabilistic models for point cloud upsampling. We propose a novel point cloud upsampling network, called PUDM, which is formally based on a conditional DDPM. Unlike previous methods, PUDM models the gradient of data distribution from dense point clouds (i.e., the ground truth), directly utilizing the dominant features to fit the ground truth, and decoupling the dependency on CD loss. Moreover, the auto-regressive nature of DDPM enables PUDM to efficiently avoid the additional upsampling module design, ensuring intrinsic point-wise mapping relationships in point clouds." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 335, + 287, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 335, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 46, + 335, + 287, + 430 + ], + "type": "text", + "content": "Simultaneously, to improve the ability of perceiving point features, PUDM employs a dual mapping paradigm. This naturally establishes a dual mapping relationship: between the generated sparse point cloud and the sparse point cloud, and between the dense point cloud and the noise. In this context, PUDM has the ability to learn complex geometric structures from the ground truth, generating uniform surfaces aligned with the ground truth, as Fig 1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 431, + 287, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 431, + 287, + 502 + ], + "spans": [ + { + "bbox": [ + 46, + 431, + 287, + 502 + ], + "type": "text", + "content": "Furthermore, we found that DDPM only models fixed-scale point cloud objects during training. To overcome this, we consider parameterizing a rate factor to exploit the prior knowledge of the scale between sparse point clouds and dense point clouds. In this way, PUDM enables to generate high-fidelity arbitrary-scale point clouds during inference." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 502, + 287, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 502, + 287, + 550 + ], + "spans": [ + { + "bbox": [ + 46, + 502, + 287, + 550 + ], + "type": "text", + "content": "In additional, benefiting from the inherent denoising architecture and the non-dependency for CD loss, PUDM demonstrates a remarkable degree of robustness in noise experiments." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 551, + 242, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 551, + 242, + 562 + ], + "spans": [ + { + "bbox": [ + 58, + 551, + 242, + 562 + ], + "type": "text", + "content": "Our key contributions can be summarized as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 563, + 287, + 707 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 47, + 563, + 287, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 563, + 287, + 598 + ], + "spans": [ + { + "bbox": [ + 47, + 563, + 287, + 598 + ], + "type": "text", + "content": "- We systematically analyze and recognize conditional DDPM as a favorable model for generating uniform point clouds at arbitrary scales in point cloud upsampling tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 599, + 287, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 599, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 47, + 599, + 287, + 633 + ], + "type": "text", + "content": "- We propose a novel network with a dual mapping for point cloud upsampling, named PUDM, which is based on conditional DDPM." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 634, + 287, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 634, + 287, + 670 + ], + "spans": [ + { + "bbox": [ + 47, + 634, + 287, + 670 + ], + "type": "text", + "content": "- By exploiting the rate prior, PUDM exhibits the ability of generating high-fidelity point clouds across arbitrary scales during inference." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 670, + 287, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 287, + 707 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 287, + 707 + ], + "type": "text", + "content": "- Comprehensive experiments demonstrate the outstanding capability of PUDM in generating geometric details in public benchmarks of point cloud upsampling." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 71, + 398, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 71, + 398, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 398, + 84 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 91, + 545, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 545, + 365 + ], + "type": "text", + "content": "Learnable Point Cloud Upsampling. The integration of deep learning with formidable data-driven and trainable attributes has markedly accelerated progress within the 3D field. Thanks to the powerful representation capabilities of deep neural networks, directly learning features from 3D data has become achievable, such as PointNet [28], PointNet++ [29], DGCNN [27], MinkowskiEngine [2], and KPConv [38]. Benefiting from the above, PU-Net [44] stands as the pioneer in integrating deep neural networks into point cloud upsampling tasks. This first aggregates multi-scale features for each point through multiple MLPs, and then expands them into a point cloud upsampling set via a channel shuffle layer. Following this pattern, some methods have achieved more significant results, such as MPU [43], PU-GAN [17], Dis-PU [18], and PU-GCN [30]. PU-EVA [21] is the first to achieve the arbitrary-scale point clouds upsampling via edge-vector based affine combinations in one-time training. Subsequently, PUGeo [32] and NePs [7] believe that sampling points within a 2D continuous space can generate higher-quality results. Furthermore, Grad-PU [9] transforms the point cloud upsampling task into a coordinate approximation problem, avoiding the upsampling module design." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 366, + 545, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 545, + 473 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 545, + 473 + ], + "type": "text", + "content": "Most methods predict the dense point cloud coordinates via sparse point cloud features, and extend the point set relying on an upsampling module. This causes them to struggle to learn complex geometry details from the ground truth. Moreover, they frequently exhibit a susceptibility to noise due to depending on CD loss during training. In this paper, we consider transforming the point cloud upsampling task into a point cloud generation problem, and first utilize conditional DDPM to address the aforementioned issues." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 474, + 545, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 474, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 474, + 545, + 616 + ], + "type": "text", + "content": "DDPM for Point Cloud Generation. Inspired by the success in image generation tasks [33-35], there has been greater attention on directly generating point clouds through DDPM. [22] represents the pioneering effort in applying DDPM to unconditional point cloud generation. Subsequently, [50] extends the application of DDPM to the point cloud completion task by training a point-voxel CNN [20]. However, the voxelization process introduces additional computational complexity. Furthermore, PDR [23] takes raw point clouds as input. But this requires training the two stages (coarse-to-fine) of diffusion models, resulting in a greater time overhead." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "In this paper, we explore to the application of conditional DDPM to handle the point cloud upsampling task. Unlike the point cloud generation and completion task, point cloud upsampling exhibits the difference of the point cloud scale between training and inference. We overcome this issue by exploiting a rate prior. Meanwhile, our method based on a dual mapping paradigm enables to efficiently learn complex geometric details in a single-stage training." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 746, + 318, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 746, + 318, + 756 + ], + "spans": [ + { + "bbox": [ + 293, + 746, + 318, + 756 + ], + "type": "text", + "content": "20787" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 271, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 271, + 84 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 271, + 84 + ], + "type": "text", + "content": "3. Denoising Diffusion Probabilistic Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 180, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 91, + 180, + 102 + ], + "spans": [ + { + "bbox": [ + 47, + 91, + 180, + 102 + ], + "type": "text", + "content": "3.1. Background for DDPM" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "spans": [ + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": "The forward and reverse process. Given the dense point cloud " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": " sampled from a meaningful point distribution " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "P_{data}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": ", and an implicit variable " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{z}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": " sampled from a tractable noise distribution " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "P_{latent}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": ", DDPM establishes the transformation relationship between " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{z}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": " through two Markov chains. This conducts an auto-regressive process: a forward process " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": " that gradually adds noise to " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": " until " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": " degrades to " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{z}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": ", and a reverse process " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "p_{\\theta}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": " that slowly removes noise from " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{z}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": " until " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{z}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": " recovers to " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": ". We constrain the transformation speed using a time step " + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "t \\sim \\mathcal{U}(T)" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "inline_equation", + "content": "T = 1000" + }, + { + "bbox": [ + 47, + 109, + 286, + 228 + ], + "type": "text", + "content": " in this paper)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 229, + 286, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 229, + 286, + 277 + ], + "spans": [ + { + "bbox": [ + 47, + 229, + 286, + 277 + ], + "type": "text", + "content": "Training objective under specific conditions. Given a set of conditions " + }, + { + "bbox": [ + 47, + 229, + 286, + 277 + ], + "type": "inline_equation", + "content": "C = \\{c_i | i = 1..S\\}" + }, + { + "bbox": [ + 47, + 229, + 286, + 277 + ], + "type": "text", + "content": ", the training objective of DDPM under specific conditions is (please refer to the supplementary materials for the detailed derivation):" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 72, + 297, + 287, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 297, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 72, + 297, + 287, + 312 + ], + "type": "interline_equation", + "content": "L (\\theta) = \\mathbb {E} _ {t \\sim U (T), \\epsilon \\sim \\mathcal {N} (0, I)} | | \\epsilon - \\epsilon_ {\\boldsymbol {\\theta}} (\\boldsymbol {x} _ {t}, C, t) | | ^ {2} \\tag {1}", + "image_path": "e015ab6650a9f423d06203f7b876d0786ff6a6e2a894676db87259de15d4646f.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 319, + 205, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 319, + 205, + 331 + ], + "spans": [ + { + "bbox": [ + 47, + 319, + 205, + 331 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 319, + 205, + 331 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{t} = \\sqrt{1 - \\overline{\\alpha}_{t}}\\pmb {\\epsilon} + \\sqrt{\\overline{\\alpha}_{t}}\\pmb{x_{0}}" + }, + { + "bbox": [ + 47, + 319, + 205, + 331 + ], + "type": "text", + "content": " [11]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 332, + 286, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 332, + 286, + 367 + ], + "spans": [ + { + "bbox": [ + 47, + 332, + 286, + 367 + ], + "type": "text", + "content": "The gradient of data distribution. Furthermore, we use a stochastic differential equation (SDE) to describe the process of DDPM [37]:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 64, + 384, + 287, + 409 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 384, + 287, + 409 + ], + "spans": [ + { + "bbox": [ + 64, + 384, + 287, + 409 + ], + "type": "interline_equation", + "content": "s _ {\\theta} \\left(\\boldsymbol {x} _ {t}, t\\right) = \\nabla_ {x} \\log \\left(\\boldsymbol {x} _ {t}\\right) = - \\frac {1}{\\sqrt {1 - \\bar {\\alpha} _ {t}}} \\boldsymbol {\\epsilon} _ {\\theta} \\left(\\boldsymbol {x} _ {t}, t\\right) \\tag {2}", + "image_path": "f8ab3ac4332de58a1445af3805293cf00b5edac61ca03a8e7accc35b54a43ada.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 415, + 286, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 415, + 286, + 453 + ], + "spans": [ + { + "bbox": [ + 47, + 415, + 286, + 453 + ], + "type": "text", + "content": "The training objective of DDPM is essentially equivalent to computing the score (the gradient of data distribution), which differs only by a constant factor " + }, + { + "bbox": [ + 47, + 415, + 286, + 453 + ], + "type": "inline_equation", + "content": "-\\frac{1}{\\sqrt{1 - \\overline{\\alpha}_t}}" + }, + { + "bbox": [ + 47, + 415, + 286, + 453 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 459, + 198, + 471 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 459, + 198, + 471 + ], + "spans": [ + { + "bbox": [ + 47, + 459, + 198, + 471 + ], + "type": "text", + "content": "3.2. Analysis of DDPM for PCU" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 477, + 286, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 477, + 286, + 523 + ], + "spans": [ + { + "bbox": [ + 47, + 477, + 286, + 523 + ], + "type": "text", + "content": "We pioneer the exploration of the advantages and limitations of DDPM for PCU, hoping these insights encourage more researchers to introduce probabilistic models into PCU." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 525, + 286, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 525, + 286, + 583 + ], + "spans": [ + { + "bbox": [ + 47, + 525, + 286, + 583 + ], + "type": "text", + "content": "DDPM is an effective model for PCU. As mentioned in Sec 3.1, the auto-regressive nature of DDPM allows it to directly learn geometry details of the ground truth using the dominant features, generating closer-to-truth, fine-grained results." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 59, + 585, + 286, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 585, + 286, + 597 + ], + "spans": [ + { + "bbox": [ + 59, + 585, + 286, + 597 + ], + "type": "text", + "content": "Simultaneously, the reverse process of DDPM in PCU is:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 82, + 605, + 287, + 638 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 605, + 287, + 638 + ], + "spans": [ + { + "bbox": [ + 82, + 605, + 287, + 638 + ], + "type": "interline_equation", + "content": "p _ {\\theta} \\left(\\boldsymbol {x} _ {\\mathbf {0}: T}, \\boldsymbol {c}\\right) = p \\left(\\boldsymbol {x} _ {T}\\right) \\prod_ {t = 1} ^ {T} p _ {\\theta} \\left(\\boldsymbol {x} _ {t - 1} \\mid \\boldsymbol {x} _ {t}, \\boldsymbol {c}\\right) \\tag {3}", + "image_path": "e9be81c52c624a6cc9518000914f16097caf4118272387176ffd406490eb0e83.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 642, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 642, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 642, + 286, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 642, + 286, + 713 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 47, + 642, + 286, + 713 + ], + "type": "text", + "content": " means the sparse point cloud sampled from a data distribution " + }, + { + "bbox": [ + 47, + 642, + 286, + 713 + ], + "type": "inline_equation", + "content": "P_{c}" + }, + { + "bbox": [ + 47, + 642, + 286, + 713 + ], + "type": "text", + "content": ". According to Eq 3, the condition " + }, + { + "bbox": [ + 47, + 642, + 286, + 713 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 47, + 642, + 286, + 713 + ], + "type": "text", + "content": " participates in each step of the reverse process. In fact, this is usually achieved using an additional branch network interacting with the noise network, without intrinsically disrupting the auto-regressive process of DDPM, thus cleverly" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 72, + 545, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 131 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 131 + ], + "type": "text", + "content": "avoiding to design an additional upsampling module. Moreover, the process naturally defines a one-to-one point-wise mapping relationship between the dense point cloud and the noise, preserving the order of points in the diffusion process." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 133, + 545, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 133, + 545, + 167 + ], + "spans": [ + { + "bbox": [ + 305, + 133, + 545, + 167 + ], + "type": "text", + "content": "Furthermore, the efficient denoising architecture and the decoupling of CD loss significantly support the strong noise robustness of DDPM." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 167, + 545, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 167, + 545, + 203 + ], + "spans": [ + { + "bbox": [ + 305, + 167, + 545, + 203 + ], + "type": "text", + "content": "The limitations of DDPM in PCU. While DDPM showcases some advantageous attributes within PCU, it also harbors certain potential limitations:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 204, + 545, + 371 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 306, + 204, + 545, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 204, + 545, + 287 + ], + "spans": [ + { + "bbox": [ + 306, + 204, + 545, + 287 + ], + "type": "text", + "content": "- Limitation 1: The lack of effective prior knowledge in the 3D field results in the weak feature perception capability for point cloud conditional networks [14, 31, 47], significantly affecting the final generation results (Tab 8). Although some methods [23] compensate for this problem via a two-stage (coarse-to-fine) training approach, they require a higher training cost." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 288, + 545, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 288, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 306, + 288, + 545, + 371 + ], + "type": "text", + "content": "- Limitation 2: The auto-regressive nature of DDPM provides robust modeling capabilities for fixed-scale objects during training, but it struggles to generate high-quality arbitrary-scale ones during inference (Tab 9). Some works treat different scale point cloud upsampling as multiple tasks [30, 43, 44], but it's not advisable for DDPM due to the excessively high training cost." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 382, + 388, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 382, + 388, + 396 + ], + "spans": [ + { + "bbox": [ + 306, + 382, + 388, + 396 + ], + "type": "text", + "content": "4. Methodology" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 402, + 457, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 402, + 457, + 415 + ], + "spans": [ + { + "bbox": [ + 306, + 402, + 457, + 415 + ], + "type": "text", + "content": "4.1. Dual mapping Formulation" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 419, + 545, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 419, + 545, + 467 + ], + "spans": [ + { + "bbox": [ + 305, + 419, + 545, + 467 + ], + "type": "text", + "content": "For limitation 1, we adopt a dual mapping paradigm. We first provide a formal exposition of its conception, subsequently delineating the manner in which PUDM aligns with these principles, with a particular emphasis on its role." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "spans": [ + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "text", + "content": "Given two point sets of " + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "inline_equation", + "content": "\\pmb{x}^1 = \\{x_i^1 \\in \\mathbb{R}^3 | i = 1..M\\}" + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "inline_equation", + "content": "\\pmb{x}^2 = \\{x_i^2 \\in \\mathbb{R}^3 | i = 1..N\\}" + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "text", + "content": " from different data distributions, a network " + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "inline_equation", + "content": "f_x" + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "text", + "content": " with a dual-branch architecture " + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "inline_equation", + "content": "(f_x = \\{f_1, f_2\\})" + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "text", + "content": ", and the corresponding supervision signals for these branches " + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "inline_equation", + "content": "(l_x = \\{l_1, l_2\\})" + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "text", + "content": ", if " + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "inline_equation", + "content": "f_x" + }, + { + "bbox": [ + 305, + 468, + 545, + 528 + ], + "type": "text", + "content": " satisfies:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 363, + 538, + 545, + 553 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 363, + 538, + 545, + 553 + ], + "spans": [ + { + "bbox": [ + 363, + 538, + 545, + 553 + ], + "type": "interline_equation", + "content": "\\boldsymbol {y} ^ {1} = f _ {1} \\left(\\boldsymbol {x} ^ {1}\\right), \\quad \\boldsymbol {y} ^ {2} = f _ {2} \\left(\\boldsymbol {x} ^ {2}\\right) \\tag {4}", + "image_path": "b8822367f3409adf95cc4a97ceeec906e103a9eaf4a3047ab8d48482470839a6.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 305, + 556, + 545, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 556, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 305, + 556, + 545, + 616 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 556, + 545, + 616 + ], + "type": "inline_equation", + "content": "\\pmb{y}^1 = \\{y_i^1\\in \\mathbb{R}^3|i = 1..M\\}" + }, + { + "bbox": [ + 305, + 556, + 545, + 616 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 556, + 545, + 616 + ], + "type": "inline_equation", + "content": "\\pmb{y}^2 = \\{y_i^2\\in \\mathbb{R}^3|i = 1..N\\}" + }, + { + "bbox": [ + 305, + 556, + 545, + 616 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 305, + 556, + 545, + 616 + ], + "type": "inline_equation", + "content": "f_{x}" + }, + { + "bbox": [ + 305, + 556, + 545, + 616 + ], + "type": "text", + "content": " can be claimed as a dual mapping network. Eq 4 means that each element in the original input has one and only one corresponding element in the final output in each branch." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "type": "text", + "content": "In PUDM, we only require the conditional network to meet the above condition, because the noise network inherently builds a one-to-one point-wise mapping between the input and the output [23]. Specifically, we first force the output " + }, + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\pmb{c}^{\\prime} = \\{c_{i}^{\\prime}\\in \\mathbb{R}^{3}|i = 1..M\\}" + }, + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "type": "text", + "content": " from the conditional network " + }, + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "f_{\\psi}" + }, + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "type": "text", + "content": " to approximate the sparse point cloud " + }, + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\pmb {c} = \\{c_i\\in \\mathbb{R}^3 |i = 1..M\\}" + }, + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "type": "text", + "content": " coordinates via MLPs, and then optimize the process by the mean squared error loss:" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 746, + 317, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 746, + 317, + 755 + ], + "spans": [ + { + "bbox": [ + 293, + 746, + 317, + 755 + ], + "type": "text", + "content": "20788" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 115, + 83, + 287, + 97 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 83, + 287, + 97 + ], + "spans": [ + { + "bbox": [ + 115, + 83, + 287, + 97 + ], + "type": "interline_equation", + "content": "L (\\psi) = \\mathbb {E} _ {\\boldsymbol {c} \\sim P _ {c}} \\| \\boldsymbol {c} - \\boldsymbol {c} ^ {\\prime} \\| ^ {2} \\tag {5}", + "image_path": "34423e624441d3e45a8d8f9eff1ec838c266ca5511dc64ab41a4a1b3ff80b872.jpg" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 102, + 287, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 102, + 287, + 161 + ], + "spans": [ + { + "bbox": [ + 47, + 102, + 287, + 161 + ], + "type": "text", + "content": "Formally, this establishes a one-to-one point-wise mapping between the input and the output for the conditional network, " + }, + { + "bbox": [ + 47, + 102, + 287, + 161 + ], + "type": "inline_equation", + "content": "\\pmb{c}^{\\prime} = f_{\\psi}(\\pmb {c}) = \\mathcal{D}_{c}(\\mathcal{E}_{c}(\\pmb {c},\\mathcal{T}\\mathcal{M}(\\mathcal{E}_{n}(\\pmb{x}_{t},r,t))))" + }, + { + "bbox": [ + 47, + 102, + 287, + 161 + ], + "type": "text", + "content": ", as shown in Fig 2. " + }, + { + "bbox": [ + 47, + 102, + 287, + 161 + ], + "type": "inline_equation", + "content": "\\mathcal{T}\\mathcal{M}(\\cdot)" + }, + { + "bbox": [ + 47, + 102, + 287, + 161 + ], + "type": "text", + "content": " denotes the Transfer Module defined in Sec 4.3." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 162, + 288, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 162, + 288, + 269 + ], + "spans": [ + { + "bbox": [ + 46, + 162, + 288, + 269 + ], + "type": "text", + "content": "For point cloud tasks with unordered structures, this pattern effectively enhances network capability in capturing point features by preserving the ordered relationships between input and output points [3, 12]. Moreover, corresponding supervision signals ensure adequate training for each branch network (Fig 7), providing an effective strategy to address the challenge of lacking robust 3D pre-trained models for conditional branch networks in point cloud generation tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 277, + 139, + 290 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 277, + 139, + 290 + ], + "spans": [ + { + "bbox": [ + 47, + 277, + 139, + 290 + ], + "type": "text", + "content": "4.2. Rate Modeling" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "text", + "content": "For limitation 2, drawing inspiration from the practice of adding class labels in conditional probabilistic models [6, 10, 26], we propose a simple and effective approach to achieve high-quality arbitrary-scale sampling during inference. Specifically, we first add a rate label " + }, + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "text", + "content": " to each sample pair, " + }, + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "inline_equation", + "content": "(c, x) \\to (c, x, r)" + }, + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "text", + "content": " (the supplementary materials provide ablation studies for different forms of the rate label " + }, + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "text", + "content": "). Subsequently, we parameter the rate factor using an embedding layer. In this way, the reverse process of DDPM is:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 72, + 422, + 287, + 455 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 422, + 287, + 455 + ], + "spans": [ + { + "bbox": [ + 72, + 422, + 287, + 455 + ], + "type": "interline_equation", + "content": "p _ {\\theta} \\left(\\boldsymbol {x} _ {\\mathbf {0}: T}, \\boldsymbol {c}, r\\right) = p \\left(\\boldsymbol {x} _ {T}\\right) \\prod_ {t = 1} ^ {T} p _ {\\theta} \\left(\\boldsymbol {x} _ {t - 1} \\mid \\boldsymbol {x} _ {t}, \\boldsymbol {c}, r\\right) \\tag {6}", + "image_path": "ad9750ff96ef511d9f39ee252ef87c03cc3900593aa8718e6d6169926ff0f0a3.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 463, + 287, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 463, + 287, + 559 + ], + "spans": [ + { + "bbox": [ + 46, + 463, + 287, + 559 + ], + "type": "text", + "content": "Eq 6 demonstrates that this simply adds an additional condition to DDPM, the rate prior " + }, + { + "bbox": [ + 46, + 463, + 287, + 559 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 463, + 287, + 559 + ], + "type": "text", + "content": ", without increasing the number of samples. Unlike class labels, we found in experiments that this conditional prior we exploited can significantly improve the generation quality of unseen-scale point clouds. The reason is that generating unseen-scale and seen-category objects usually are easier compared to generating seen-scale and unseen-category ones for models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 566, + 173, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 566, + 173, + 578 + ], + "spans": [ + { + "bbox": [ + 47, + 566, + 173, + 578 + ], + "type": "text", + "content": "4.3. Network Architecture" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 584, + 287, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 584, + 287, + 655 + ], + "spans": [ + { + "bbox": [ + 46, + 584, + 287, + 655 + ], + "type": "text", + "content": "In this section, we introduce the overall framework of PUDM, consisting of three crucial components: the conditional network (C-Net), the noise network (N-Net), and the Transfer Module (TM). This process is remarkably illustrated in Fig 2. The parameter setting and implementation details are provided in the supplementary materials." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 656, + 287, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 656, + 287, + 704 + ], + "spans": [ + { + "bbox": [ + 47, + 656, + 287, + 704 + ], + "type": "text", + "content": "The Conditional Network (C-Net). We use PointNet++ [29] as the backbone. This follows the standard U-Net framework. The encoder and decoder are composed of multiple Set Abstraction (SA) layers and Feature Propagation" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 70, + 547, + 207 + ], + "blocks": [ + { + "bbox": [ + 309, + 70, + 547, + 207 + ], + "lines": [ + { + "bbox": [ + 309, + 70, + 547, + 207 + ], + "spans": [ + { + "bbox": [ + 309, + 70, + 547, + 207 + ], + "type": "image", + "image_path": "48c53407bff01a253261e6c0afaccb535c5cf92c36b66d5ac18fa84b9c22d0af.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 213, + 547, + 291 + ], + "lines": [ + { + "bbox": [ + 304, + 213, + 547, + 291 + ], + "spans": [ + { + "bbox": [ + 304, + 213, + 547, + 291 + ], + "type": "text", + "content": "Figure 2. The overall framework of PUDM: The N-Net (upper branch) and the C-Net (lower branch) both establish a one-to-one point-wise mapping between input and output using mean squared error loss. They engage in information exchange through a transfer module (TM). Simultaneously, the rate prompt is provided to exploit the prior knowledge of the scale between sparse point clouds and dense point clouds." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 312, + 545, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 312, + 545, + 382 + ], + "spans": [ + { + "bbox": [ + 304, + 312, + 545, + 382 + ], + "type": "text", + "content": "(FP) layers, respectively. Unlike PointNet++ using the max-pooling layer to filter features, we consider utilizing the self-attention layer to retain more fine-grained information [25, 48]. In addition, we only feed the sparse point cloud into the C-Net to ensure the feature extraction in a pure and effective manner." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 383, + 545, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 383, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 304, + 383, + 545, + 431 + ], + "type": "text", + "content": "The Noise Network (N-Net). The N-Net and the C-Net share the same network architecture. In contrast to the C-Net, we need to introduce additional guidance information to the N-Net for modeling the diffusion steps." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "spans": [ + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "text", + "content": "We first transform the sparse point cloud " + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "inline_equation", + "content": "\\mathbf{c} \\in \\mathbb{R}^{N \\times 3}" + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "text", + "content": " into the interpolation point cloud " + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "inline_equation", + "content": "\\mathbf{i} \\in \\mathbb{R}^{rN \\times 3}" + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "text", + "content": " through the midpoint interpolation [9], and then sum " + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "inline_equation", + "content": "\\mathbf{i}" + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "text", + "content": " as the input for the N-Net. Meanwhile, we extract the global features from " + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "inline_equation", + "content": "\\mathbf{i}" + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "text", + "content": " to enhance the semantic understanding. Furthermore, to identify the noise level, we encode the time step " + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "text", + "content": ". Finally, as mentioned in Sec 4.2, we parameterized the rate factor " + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 430, + 546, + 550 + ], + "type": "text", + "content": ". These additional pieces of information are both treated as global features, and incorporated into each stage of the encoder and the decoder in the N-Net." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 551, + 545, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 551, + 545, + 623 + ], + "spans": [ + { + "bbox": [ + 304, + 551, + 545, + 623 + ], + "type": "text", + "content": "The Transfer Module (TM). We propose a bidirectional interaction module (TM) to serve as an intermediary between the C-Net and the N-Net. We only place the TM at the bottleneck stage of U-Net, due to the significant computational efficiency and the abundant semantic information via the maximum receptive field [12, 15]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 623, + 545, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 623, + 545, + 670 + ], + "spans": [ + { + "bbox": [ + 304, + 623, + 545, + 670 + ], + "type": "text", + "content": "Given the outputs of the encoder in the C-Net and the N-Net, " + }, + { + "bbox": [ + 304, + 623, + 545, + 670 + ], + "type": "inline_equation", + "content": "F^{c} \\in \\mathbb{R}^{N_{e}^{c} \\times C_{e}^{c}}" + }, + { + "bbox": [ + 304, + 623, + 545, + 670 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 623, + 545, + 670 + ], + "type": "inline_equation", + "content": "F^{n} \\in \\mathbb{R}^{N_{e}^{n} \\times C_{e}^{n}}" + }, + { + "bbox": [ + 304, + 623, + 545, + 670 + ], + "type": "text", + "content": " separately, the TM first transforms " + }, + { + "bbox": [ + 304, + 623, + 545, + 670 + ], + "type": "inline_equation", + "content": "F^{c} \\to (Q) \\in \\mathbb{R}^{N_{e}^{c} \\times C_{i}}" + }, + { + "bbox": [ + 304, + 623, + 545, + 670 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 623, + 545, + 670 + ], + "type": "inline_equation", + "content": "F^{n} \\to (K, V) \\in \\mathbb{R}^{N_{e}^{n} \\times C_{i}}" + }, + { + "bbox": [ + 304, + 623, + 545, + 670 + ], + "type": "text", + "content": " via MLPs. Next, we can obtain the fused feature:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 342, + 678, + 545, + 706 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 342, + 678, + 545, + 706 + ], + "spans": [ + { + "bbox": [ + 342, + 678, + 545, + 706 + ], + "type": "interline_equation", + "content": "F _ {f} = M L P \\left(\\operatorname {s o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {C _ {i}}}\\right) V\\right) + F ^ {c} \\tag {7}", + "image_path": "c14b7223810548a1044200852a1df03423625f4c2dbb89135cc91ad6d4256c70.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 746, + 317, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 746, + 317, + 755 + ], + "spans": [ + { + "bbox": [ + 293, + 746, + 317, + 755 + ], + "type": "text", + "content": "20789" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": "Subsequently, " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "F_{f}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " is fed into a feed-forward network (FFN) to output the final features. Similarly, the same operation is also applied in reverse direction, so that information flows in both directions, " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "F^{c} \\rightarrow F^{n}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "F^{n} \\rightarrow F^{c}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 128, + 178, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 128, + 178, + 140 + ], + "spans": [ + { + "bbox": [ + 47, + 128, + 178, + 140 + ], + "type": "text", + "content": "4.4. Training and Inference" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 146, + 287, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 146, + 287, + 182 + ], + "spans": [ + { + "bbox": [ + 47, + 146, + 287, + 182 + ], + "type": "text", + "content": "Training. As mentioned earlier (Eq 1 and Eq 5), PUDM is a dual mapping network, and models the rate prior during training. Therefore, the training objective is:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 118, + 194, + 287, + 206 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 194, + 287, + 206 + ], + "spans": [ + { + "bbox": [ + 118, + 194, + 287, + 206 + ], + "type": "interline_equation", + "content": "L _ {m s e} = L (\\theta) + \\alpha L (\\psi) \\tag {8}", + "image_path": "7af8edfe568e3696b298e20218b4e5c347cd9bd6ce3643d44ae4047b9378badc.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 212, + 274, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 212, + 274, + 223 + ], + "spans": [ + { + "bbox": [ + 47, + 212, + 274, + 223 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 212, + 274, + 223 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 47, + 212, + 274, + 223 + ], + "type": "text", + "content": " means a weighting factor (" + }, + { + "bbox": [ + 47, + 212, + 274, + 223 + ], + "type": "inline_equation", + "content": "\\alpha = 1" + }, + { + "bbox": [ + 47, + 212, + 274, + 223 + ], + "type": "text", + "content": " in this paper)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 224, + 287, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 224, + 287, + 271 + ], + "spans": [ + { + "bbox": [ + 47, + 224, + 287, + 271 + ], + "type": "text", + "content": "Inference. We found that adding the interpolated points " + }, + { + "bbox": [ + 47, + 224, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\mathbf{i}" + }, + { + "bbox": [ + 47, + 224, + 287, + 271 + ], + "type": "text", + "content": " as the guidance information significantly improves the generated quality during inference. Therefore, we iteratively transform " + }, + { + "bbox": [ + 47, + 224, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t" + }, + { + "bbox": [ + 47, + 224, + 287, + 271 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 47, + 224, + 287, + 271 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_0" + }, + { + "bbox": [ + 47, + 224, + 287, + 271 + ], + "type": "text", + "content": " based on:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 279, + 287, + 315 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 279, + 287, + 315 + ], + "spans": [ + { + "bbox": [ + 48, + 279, + 287, + 315 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} _ {t - 1} = \\gamma \\left(\\frac {1}{\\sqrt {\\alpha_ {t}}} \\left(\\boldsymbol {x} _ {t} - \\frac {1 - a _ {t}}{\\sqrt {1 - \\bar {\\alpha} _ {t}}} \\epsilon_ {\\theta} \\left(\\boldsymbol {x} _ {t}, \\boldsymbol {c}, r, t\\right)\\right) + \\boldsymbol {\\sigma} _ {t} \\boldsymbol {\\epsilon} + \\mathbf {i}\\right) \\tag {9}", + "image_path": "a92458c5543cd2155ca7c0ae910339529fa6d943ac4977524354d7f7b4cf59c1.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 316, + 266, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 316, + 266, + 328 + ], + "spans": [ + { + "bbox": [ + 47, + 316, + 266, + 328 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 316, + 266, + 328 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 47, + 316, + 266, + 328 + ], + "type": "text", + "content": " denotes a scale factor (" + }, + { + "bbox": [ + 47, + 316, + 266, + 328 + ], + "type": "inline_equation", + "content": "\\gamma = 0.5" + }, + { + "bbox": [ + 47, + 316, + 266, + 328 + ], + "type": "text", + "content": " in this paper)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 338, + 128, + 352 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 338, + 128, + 352 + ], + "spans": [ + { + "bbox": [ + 47, + 338, + 128, + 352 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 357, + 155, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 357, + 155, + 371 + ], + "spans": [ + { + "bbox": [ + 47, + 357, + 155, + 371 + ], + "type": "text", + "content": "5.1. Experiment Setup" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 376, + 287, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 376, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 46, + 376, + 287, + 495 + ], + "type": "text", + "content": "Dataset. In our experiments, we utilize two public benchmarks (PUGAN [17], PU1K [30]) for evaluation. We adhere to the official training/testing partitioning protocols for these datasets. This uses Poisson disk sampling [45] to generate 24,000 and 69,000 uniform patches for training, respectively. Each patch contains 256 points, while the corresponding ground truth has 1024 points. Meanwhile, 27 and 127 point clouds are used for testing, respectively. The input sparse point clouds consist of 2048 points, and are upsampled to " + }, + { + "bbox": [ + 46, + 376, + 287, + 495 + ], + "type": "inline_equation", + "content": "2048 \\times R" + }, + { + "bbox": [ + 46, + 376, + 287, + 495 + ], + "type": "text", + "content": " points via evaluated methods." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 496, + 287, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 496, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 47, + 496, + 287, + 544 + ], + "type": "text", + "content": "Metrics. Following [9, 43, 44], we employ the Chamfer Distance " + }, + { + "bbox": [ + 47, + 496, + 287, + 544 + ], + "type": "inline_equation", + "content": "(\\mathrm{CD} \\times 10^{-3})" + }, + { + "bbox": [ + 47, + 496, + 287, + 544 + ], + "type": "text", + "content": ", Hausdorff Distance " + }, + { + "bbox": [ + 47, + 496, + 287, + 544 + ], + "type": "inline_equation", + "content": "(\\mathrm{HD} \\times 10^{-3})" + }, + { + "bbox": [ + 47, + 496, + 287, + 544 + ], + "type": "text", + "content": ", and Point-to-Surface Distance " + }, + { + "bbox": [ + 47, + 496, + 287, + 544 + ], + "type": "inline_equation", + "content": "(\\mathrm{P2F} \\times 10^{-3})" + }, + { + "bbox": [ + 47, + 496, + 287, + 544 + ], + "type": "text", + "content": " as evaluation metrics in our experiments." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 551, + 182, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 551, + 182, + 563 + ], + "spans": [ + { + "bbox": [ + 47, + 551, + 182, + 563 + ], + "type": "text", + "content": "5.2. Comparison with SOTA" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 570, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 287, + 712 + ], + "type": "text", + "content": "Results on PUGAN. We first conduct the point cloud upsampling at low upsampling rate " + }, + { + "bbox": [ + 46, + 570, + 287, + 712 + ], + "type": "inline_equation", + "content": "(4\\times)" + }, + { + "bbox": [ + 46, + 570, + 287, + 712 + ], + "type": "text", + "content": " and high upsampling rate " + }, + { + "bbox": [ + 46, + 570, + 287, + 712 + ], + "type": "inline_equation", + "content": "(16\\times)" + }, + { + "bbox": [ + 46, + 570, + 287, + 712 + ], + "type": "text", + "content": " on PUGAN. Tab 1 illustrates the substantial superiority of our method in geometric detail description compared to other methods, as evidenced by significantly reduced CD and HD. Because our method models the gradient of data distribution from dense point clouds, facilitating the direct approximation of geometric details from the ground truth, thereby yielding higher accuracy of our results. Fig 3 further substantiates our viewpoint, and shows that our method produces fewer outliers, aligning with more uniform surfaces, closer to the ground truth." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 72, + 545, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 545, + 109 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 545, + 109 + ], + "type": "text", + "content": "In addition, despite P2F falling slightly behind Grad-PU [9] at " + }, + { + "bbox": [ + 306, + 72, + 545, + 109 + ], + "type": "inline_equation", + "content": "4\\times" + }, + { + "bbox": [ + 306, + 72, + 545, + 109 + ], + "type": "text", + "content": ", the difference is insignificant due to the asymmetry between points and surfaces [9, 17]." + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 308, + 120, + 545, + 212 + ], + "blocks": [ + { + "bbox": [ + 308, + 120, + 545, + 212 + ], + "lines": [ + { + "bbox": [ + 308, + 120, + 545, + 212 + ], + "spans": [ + { + "bbox": [ + 308, + 120, + 545, + 212 + ], + "type": "table", + "html": "
Methods16×
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]0.5296.8054.4600.5108.2066.041
MPU [43]0.2926.6722.8220.2197.0543.085
PU-GAN [30]0.2825.5772.0160.2076.9632.556
Dis-PU [18]0.2743.6961.9430.1674.9232.261
PU-EVA [21]0.2773.9712.5240.1855.2732.972
PU-GCN [30]0.2683.2012.4890.1614.2832.632
NePS [7]0.2593.6481.9350.1524.9102.198
Grad-PU [9]0.2452.3691.8930.1082.3522.127
Ours0.1311.2201.9120.0821.1202.114
", + "image_path": "de92d1212254e59d21be41e7f71f0b6532b51777d0ab7f3395ccd533da809c4b.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 249, + 545, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 249, + 545, + 332 + ], + "spans": [ + { + "bbox": [ + 305, + 249, + 545, + 332 + ], + "type": "text", + "content": "Arbitrary Upsampling Rates on PUGAN. Similarly to [9], we perform comparative analyses across different rates on PUGAN. Tab 2 shows that our method steadily outperforms Grad-PU [9] across nearly all metrics. In particular, our method demonstrates a significant performance advantage in terms of CD and HD, further affirming the superiority in learning complex geometric details." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 333, + 545, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 333, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 305, + 333, + 545, + 380 + ], + "type": "text", + "content": "Moreover, we visualize the results at higher upsampling rates " + }, + { + "bbox": [ + 305, + 333, + 545, + 380 + ], + "type": "inline_equation", + "content": "(16\\times, 32\\times, 64\\times," + }, + { + "bbox": [ + 305, + 333, + 545, + 380 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 333, + 545, + 380 + ], + "type": "inline_equation", + "content": "128\\times)" + }, + { + "bbox": [ + 305, + 333, + 545, + 380 + ], + "type": "text", + "content": " in Fig 4. Our results obviously exhibit more complete, uniform, and smooth compared to Grad-PU [9]." + } + ] + } + ], + "index": 18 + }, + { + "type": "table", + "bbox": [ + 308, + 392, + 536, + 455 + ], + "blocks": [ + { + "bbox": [ + 306, + 213, + 545, + 235 + ], + "lines": [ + { + "bbox": [ + 306, + 213, + 545, + 235 + ], + "spans": [ + { + "bbox": [ + 306, + 213, + 545, + 235 + ], + "type": "text", + "content": "Table 1. The results of " + }, + { + "bbox": [ + 306, + 213, + 545, + 235 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 306, + 213, + 545, + 235 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 306, + 213, + 545, + 235 + ], + "type": "inline_equation", + "content": "16 \\times" + }, + { + "bbox": [ + 306, + 213, + 545, + 235 + ], + "type": "text", + "content": " on PUGAN. Our method significantly surpasses other methods in terms of CD and HD." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 392, + 536, + 455 + ], + "lines": [ + { + "bbox": [ + 308, + 392, + 536, + 455 + ], + "spans": [ + { + "bbox": [ + 308, + 392, + 536, + 455 + ], + "type": "table", + "html": "
RatesGrad-PU [9]Ours
CD↓HD↓P2F↓CD↓HD↓P2F↓
0.5403.1771.7750.2471.4101.812
0.3532.6081.6540.1711.2921.785
0.2342.5491.8360.1161.2441.794
0.2252.5261.9810.1071.2351.980
0.2192.6341.9400.1061.2311.952
", + "image_path": "d61c465670b6f38236a03243ed057ba6ad3daed68d7bfda5fd5cbfcac427f697.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_body" + } + ], + "index": 19 + }, + { + "type": "table", + "bbox": [ + 308, + 514, + 545, + 567 + ], + "blocks": [ + { + "bbox": [ + 306, + 456, + 545, + 489 + ], + "lines": [ + { + "bbox": [ + 306, + 456, + 545, + 489 + ], + "spans": [ + { + "bbox": [ + 306, + 456, + 545, + 489 + ], + "type": "text", + "content": "Table 2. Grad-PU vs. ours at different rates on PUGAN. Benefiting from the rate modeling, our method still exhibits remarkable performance at different rates." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 514, + 545, + 567 + ], + "lines": [ + { + "bbox": [ + 308, + 514, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 308, + 514, + 545, + 567 + ], + "type": "table", + "html": "
MethodsCD↓HD↓P2F↓
PU-Net [44]1.15515.1704.834
MPU [43]0.93513.3273.511
PU-GCN [30]0.5857.5772.499
Grad-PU [9]0.4043.7321.474
Ours0.2172.1641.477
", + "image_path": "79174611f9e2bf16bd433f503c135cddd0c2bd14f576f6227f90dedaa5f2effa.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "table_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 570, + 545, + 602 + ], + "lines": [ + { + "bbox": [ + 306, + 570, + 545, + 602 + ], + "spans": [ + { + "bbox": [ + 306, + 570, + 545, + 602 + ], + "type": "text", + "content": "Table 3. The results of " + }, + { + "bbox": [ + 306, + 570, + 545, + 602 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 306, + 570, + 545, + 602 + ], + "type": "text", + "content": " on PU1K. We utilize the experimental results from the original paper. Our method outperforms other methods across nearly all metrics." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 616, + 545, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 616, + 545, + 675 + ], + "spans": [ + { + "bbox": [ + 305, + 616, + 545, + 675 + ], + "type": "text", + "content": "Results on PU1K. Furthermore, we also conduct the evaluation at " + }, + { + "bbox": [ + 305, + 616, + 545, + 675 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 305, + 616, + 545, + 675 + ], + "type": "text", + "content": " on more challenging PU1K [30]. As reported in Tab 3, our method continues to demonstrate substantial advantages in terms of CD and HD compared to other methods." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 677, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 712 + ], + "type": "text", + "content": "Result on Real datasets. Additionally, we conduct the evaluation on real indoor (ScanNet [5]) and outdoor (KITTI [8]) scene datasets. Note that all methods are only trained" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 746, + 318, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 746, + 318, + 755 + ], + "spans": [ + { + "bbox": [ + 293, + 746, + 318, + 755 + ], + "type": "text", + "content": "20790" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 69, + 547, + 198 + ], + "blocks": [ + { + "bbox": [ + 50, + 69, + 547, + 198 + ], + "lines": [ + { + "bbox": [ + 50, + 69, + 547, + 198 + ], + "spans": [ + { + "bbox": [ + 50, + 69, + 547, + 198 + ], + "type": "image", + "image_path": "90fff76d4415f2ac586b9b84ce1da06869b125f6214f172f5c983bf01a912df8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 205, + 546, + 228 + ], + "lines": [ + { + "bbox": [ + 46, + 205, + 546, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 205, + 546, + 228 + ], + "type": "text", + "content": "Figure 3. Visualization results at " + }, + { + "bbox": [ + 46, + 205, + 546, + 228 + ], + "type": "inline_equation", + "content": "4\\times" + }, + { + "bbox": [ + 46, + 205, + 546, + 228 + ], + "type": "text", + "content": " on PUGAN. Our result exhibits fewer outliers, and clearly captures geometric details from the ground truth (the holes on the casting)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 53, + 246, + 282, + 498 + ], + "blocks": [ + { + "bbox": [ + 53, + 246, + 282, + 498 + ], + "lines": [ + { + "bbox": [ + 53, + 246, + 282, + 498 + ], + "spans": [ + { + "bbox": [ + 53, + 246, + 282, + 498 + ], + "type": "image", + "image_path": "064b9ae13cac1aa8005976ecf0ef5d7802f449b4a9f26c572dc317b6f425c604.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 506, + 287, + 540 + ], + "lines": [ + { + "bbox": [ + 46, + 506, + 287, + 540 + ], + "spans": [ + { + "bbox": [ + 46, + 506, + 287, + 540 + ], + "type": "text", + "content": "Figure 4. Grad-PU vs. ours at large rates on PUGAN. Our method consistently generates more uniform and smooth surfaces (these results are achieved using an NVIDIA 3090 GPU)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 559, + 289, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 559, + 289, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 559, + 289, + 693 + ], + "type": "text", + "content": "on PUGAN. Upsampling scene-level point clouds poses greater challenges than upsampling object-level ones, due to the former having more intricate geometric structures. Due to the absence of the ground truth, our analysis is confined to qualitative comparisons. In Fig 5, our method still generates reasonable and smooth surfaces on some complex structures, while other methods exhibit artifacts such as overlap and voids. Simultaneously, Fig 6 illustrates that our results show more complete and fewer outliers. Although Grad-PU [9] also demonstrates good outlier results, it generates a considerable amount of uneven surfaces." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 248, + 477, + 261 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 248, + 477, + 261 + ], + "spans": [ + { + "bbox": [ + 306, + 248, + 477, + 261 + ], + "type": "text", + "content": "5.3. Validation for Noise Robustness" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 267, + 545, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 267, + 545, + 303 + ], + "spans": [ + { + "bbox": [ + 305, + 267, + 545, + 303 + ], + "type": "text", + "content": "Gaussian Noise. To demonstrate the robustness, we perturb the sparse point clouds with Gaussian noise sampled " + }, + { + "bbox": [ + 305, + 267, + 545, + 303 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0,I)" + }, + { + "bbox": [ + 305, + 267, + 545, + 303 + ], + "type": "text", + "content": " added at different noise levels " + }, + { + "bbox": [ + 305, + 267, + 545, + 303 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 305, + 267, + 545, + 303 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 304, + 546, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 304, + 546, + 363 + ], + "spans": [ + { + "bbox": [ + 305, + 304, + 546, + 363 + ], + "type": "text", + "content": "As shown in Tab 4, our method significantly outperforms other methods under multiple level noise perturbations (" + }, + { + "bbox": [ + 305, + 304, + 546, + 363 + ], + "type": "inline_equation", + "content": "\\tau = 0.01" + }, + { + "bbox": [ + 305, + 304, + 546, + 363 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 304, + 546, + 363 + ], + "type": "inline_equation", + "content": "\\tau = 0.02" + }, + { + "bbox": [ + 305, + 304, + 546, + 363 + ], + "type": "text", + "content": "). Specifically, this is because our method models the noise " + }, + { + "bbox": [ + 305, + 304, + 546, + 363 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 305, + 304, + 546, + 363 + ], + "type": "text", + "content": " (the gradient of data distribution) and avoids CD loss during training." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 308, + 373, + 545, + 460 + ], + "blocks": [ + { + "bbox": [ + 308, + 373, + 545, + 460 + ], + "lines": [ + { + "bbox": [ + 308, + 373, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 308, + 373, + 545, + 460 + ], + "type": "table", + "html": "
Noise Levels Methodsτ = 0.01τ = 0.02
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]0.6288.0689.8161.07810.86716.401
MPU [43]0.5066.9789.0590.92910.82015.621
PU-GAN [30]0.4646.0707.4980.88710.60215.088
Dis-PU [18]0.4195.4136.7230.8189.34514.376
PU-EVA [21]0.4595.3777.1890.8399.32514.652
PU-GCN [30]0.4485.5866.9890.8168.60413.798
NePS [7]0.4255.4386.5460.7989.10212.088
Grad-PU [9]0.4144.1456.4000.7667.33611.534
Ours0.2102.4306.0700.5295.4719.742
", + "image_path": "b3fbec42fb03ebbc0187bd4e91a55f5b844a62f9faa499db968e8c50e019b1b2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 464, + 545, + 497 + ], + "lines": [ + { + "bbox": [ + 305, + 464, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 305, + 464, + 545, + 497 + ], + "type": "text", + "content": "Table 4. The results of " + }, + { + "bbox": [ + 305, + 464, + 545, + 497 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 305, + 464, + 545, + 497 + ], + "type": "text", + "content": " at low-level Gaussian noise on PUGAN. Our method significantly outperforms other methods in terms of noise robustness." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 510, + 545, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 581 + ], + "type": "text", + "content": "Moreover, we also conduct the evaluation under more challenging noise perturbations. Tab 5 shows that our method exhibits stronger robustness results at higher level noise perturbations (" + }, + { + "bbox": [ + 304, + 510, + 545, + 581 + ], + "type": "inline_equation", + "content": "\\tau = 0.05" + }, + { + "bbox": [ + 304, + 510, + 545, + 581 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 510, + 545, + 581 + ], + "type": "inline_equation", + "content": "\\tau = 0.1" + }, + { + "bbox": [ + 304, + 510, + 545, + 581 + ], + "type": "text", + "content": "). This indicates that our method exhibits a trend of resilience for the noise robustness." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 582, + 545, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 640 + ], + "type": "text", + "content": "Other Noise. Furthermore, we also investigated the performance of our method on uniform noise. Admittedly, while our method still keeps SOTA performance, as shown in Tab 6, the results on uniform noise show significantly lower than that on Gaussian noise." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 642, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 546, + 713 + ], + "type": "text", + "content": "We provide an intuitive explanation. Eq 2 demonstrates that the training objective of DDPM is to fit the gradient of data distribution (modeling the noise " + }, + { + "bbox": [ + 304, + 642, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 304, + 642, + 546, + 713 + ], + "type": "text", + "content": ", named score) [37]. Essentially, DDPM learns the direction of noise generation. When the conditions with noise are considered, the disturbance in the direction exhibits relatively small, because the" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 746, + 317, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 746, + 317, + 756 + ], + "spans": [ + { + "bbox": [ + 293, + 746, + 317, + 756 + ], + "type": "text", + "content": "20791" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 71, + 547, + 159 + ], + "blocks": [ + { + "bbox": [ + 48, + 71, + 547, + 159 + ], + "lines": [ + { + "bbox": [ + 48, + 71, + 547, + 159 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 547, + 159 + ], + "type": "image", + "image_path": "1f2b71bfcd4250bb6bd32e18e2c92a351ec9a9ecc902be7c53fa8f8e0a7ddcd2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 166, + 547, + 187 + ], + "lines": [ + { + "bbox": [ + 46, + 166, + 547, + 187 + ], + "spans": [ + { + "bbox": [ + 46, + 166, + 547, + 187 + ], + "type": "text", + "content": "Figure 5. The results of " + }, + { + "bbox": [ + 46, + 166, + 547, + 187 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 46, + 166, + 547, + 187 + ], + "type": "text", + "content": " on KITTI. Our method noticeably generates more reasonable and uniform results on some complex geometric structures." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 209, + 286, + 297 + ], + "blocks": [ + { + "bbox": [ + 50, + 209, + 286, + 297 + ], + "lines": [ + { + "bbox": [ + 50, + 209, + 286, + 297 + ], + "spans": [ + { + "bbox": [ + 50, + 209, + 286, + 297 + ], + "type": "table", + "html": "
Noise Levels\nMethodsτ = 0.05τ = 0.1
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]1.37013.72923.2491.49814.19323.846
MPU [43]1.24711.64522.1891.32112.41523.841
PU-GAN [30]1.1249.09121.2521.27110.91123.174
Dis-PU [18]1.0767.92120.6031.24410.91322.845
PU-EVA [21]1.0577.91020.0441.2269.30522.296
PU-GCN [30]1.2639.86922.8351.45611.06325.213
NePS [7]1.1439.64518.6421.1989.87420.162
Grad-PU [9]0.9788.05716.9271.1188.94618.845
Ours0.6185.38614.7510.8536.23916.845
", + "image_path": "8ea52825783d60da96befc77db6ca76bb7be6a86bbd73a6e6ae667ca59a94cb7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 353, + 287, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 353, + 287, + 425 + ], + "spans": [ + { + "bbox": [ + 46, + 353, + 287, + 425 + ], + "type": "text", + "content": "noise has a similar distribution to " + }, + { + "bbox": [ + 46, + 353, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 46, + 353, + 287, + 425 + ], + "type": "text", + "content": ". Therefore, during inference, our method demonstrates robustness to approximating noise distributions of " + }, + { + "bbox": [ + 46, + 353, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 46, + 353, + 287, + 425 + ], + "type": "text", + "content": " (Gaussian noise), but performs poorly when faced with different ones (the supplementary materials provide more noise experiments to support this conclusion)." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 50, + 435, + 286, + 523 + ], + "blocks": [ + { + "bbox": [ + 46, + 297, + 288, + 331 + ], + "lines": [ + { + "bbox": [ + 46, + 297, + 288, + 331 + ], + "spans": [ + { + "bbox": [ + 46, + 297, + 288, + 331 + ], + "type": "text", + "content": "Table 5. The results of " + }, + { + "bbox": [ + 46, + 297, + 288, + 331 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 46, + 297, + 288, + 331 + ], + "type": "text", + "content": " at high-level Gaussian noise on PUGAN. Compared to other methods, our method demonstrates a more favorable upward trend for robustness to noise." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 435, + 286, + 523 + ], + "lines": [ + { + "bbox": [ + 50, + 435, + 286, + 523 + ], + "spans": [ + { + "bbox": [ + 50, + 435, + 286, + 523 + ], + "type": "table", + "html": "
Noise Levels Methodsτ = 0.05τ = 0.1
CD↓HD↓P2F↓CD↓HD↓P2F↓
PU-Net [44]1.49014.47323.2231.72515.44225.251
MPU [43]1.22410.84220.4561.54511.64523.512
PU-GAN [30]1.0347.75718.6171.3279.70021.321
Dis-PU [18]1.0066.85617.8731.3147.46320.980
PU-EVA [21]1.0247.53418.1791.3348.05621.158
PU-GCN [30]1.0459.64318.8991.32510.87721.633
NePS [7]1.0487.34518.0541.3219.64521.314
Grad-PU [9]1.0676.63417.7341.3997.21521.028
Ours0.9986.11017.5581.3106.73220.564
", + "image_path": "da6b882a0f03bb7b860f54b788d5ec60371afdf8d9fb7c960a6ca9990f7040d0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 569, + 230, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 569, + 230, + 582 + ], + "spans": [ + { + "bbox": [ + 47, + 569, + 230, + 582 + ], + "type": "text", + "content": "5.4. Effectiveness in Downstream Task" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 587, + 286, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 587, + 286, + 635 + ], + "spans": [ + { + "bbox": [ + 46, + 587, + 286, + 635 + ], + "type": "text", + "content": "We evaluate the effectiveness of upsampling quality in the downstream task: point cloud classification. Meanwhile, we also conducted experiments on point cloud part segmentation, please refer to the supplementary materials." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 635, + 286, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 635, + 286, + 708 + ], + "spans": [ + { + "bbox": [ + 46, + 635, + 286, + 708 + ], + "type": "text", + "content": "PointNet [28] and PointNet++ [29] are chosen as the downstream task models due to their significant performance and widespread influence in 3D tasks. We follow the official training and testing procedures. Simultaneously, we select ModelNet40 [40] (40 categories) and ShapeNet [1] (16 categories) as the benchmarks for point cloud clas" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 209, + 545, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 209, + 545, + 257 + ], + "spans": [ + { + "bbox": [ + 304, + 209, + 545, + 257 + ], + "type": "text", + "content": "sification. For a fair and effective evaluation, we use only 3D coordinates as the input. Similar to the evaluated strategy on real datasets, all point cloud upsampling methods are only trained on PUGAN." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 257, + 545, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 257, + 545, + 340 + ], + "spans": [ + { + "bbox": [ + 304, + 257, + 545, + 340 + ], + "type": "text", + "content": "For evaluation, we first subsample 256/512 points from test point clouds on ModelNet40/ShapeNet. Subsequently, they are upsampled to 1024/2048 points through evaluation methods. As depicted in Tab 7, our results significantly improve the classification accuracy compared to the low-res point clouds, and consistently outperforms other methods across all metrics." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 308, + 350, + 545, + 451 + ], + "blocks": [ + { + "bbox": [ + 47, + 525, + 286, + 548 + ], + "lines": [ + { + "bbox": [ + 47, + 525, + 286, + 548 + ], + "spans": [ + { + "bbox": [ + 47, + 525, + 286, + 548 + ], + "type": "text", + "content": "Table 6. The results of " + }, + { + "bbox": [ + 47, + 525, + 286, + 548 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 47, + 525, + 286, + 548 + ], + "type": "text", + "content": " at high-level uniform noise on PUGAN. Our method outperforms other methods on all metrics." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 350, + 545, + 451 + ], + "lines": [ + { + "bbox": [ + 308, + 350, + 545, + 451 + ], + "spans": [ + { + "bbox": [ + 308, + 350, + 545, + 451 + ], + "type": "table", + "html": "
DatasetsModelNet40 (%)ShapeNet (%)
ModelsPointNetPointNet++PointNetPointNet++
MethodsIA↑CA↑IA↑CA↑IA↑CA↑IA↑CA↑
Low-res87.1583.1288.8784.4597.6195.0998.2096.11
High-res90.7487.1492.2489.9198.8996.6199.2798.18
PU-Net [44]88.7285.2588.9985.4397.9995.6998.5796.35
MPU [43]89.0485.8489.5486.5198.0395.9298.9496.81
PU-GAN [30]89.9585.6890.4587.2398.7595.7090.4587.23
Dis-PU [18]88.7085.3489.5686.5398.8096.0799.0097.15
PU-EVA [21]89.2785.6389.9686.8698.7295.6999.0797.58
PU-GCN [30]89.7785.3889.4586.1598.7896.0699.0397.42
NePS [7]90.0186.1590.3287.3498.9496.2099.1297.94
Grad-PU [9]90.0586.0689.9887.4998.8296.1999.1097.63
Ours90.3386.5492.1489.4298.8596.5899.1397.99
", + "image_path": "ace120a5cd65541b12e705926704ecd157745c7a2dec3a9cbf0f1e5133d6c6c5.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 453, + 545, + 520 + ], + "lines": [ + { + "bbox": [ + 304, + 453, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 304, + 453, + 545, + 520 + ], + "type": "text", + "content": "Table 7. The results of point cloud classification. \"Low-res\" refers to the point cloud subsampled, while \"High-res\" denotes the original test point cloud. Meanwhile, \"IA\" stands for instance accuracy, and \"CA\" denotes class accuracy. Our results have more reasonable, finer-grained, and closer-to-ground truth geometric structures, thereby achieving more significant classification accuracy." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 543, + 400, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 543, + 400, + 556 + ], + "spans": [ + { + "bbox": [ + 306, + 543, + 400, + 556 + ], + "type": "text", + "content": "5.5. Ablation Study" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 561, + 545, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 561, + 545, + 705 + ], + "spans": [ + { + "bbox": [ + 304, + 561, + 545, + 705 + ], + "type": "text", + "content": "With/Without the dual mapping paradigm. Thanks to the rich and structured data, the conditional networks for text or images can be replaced by powerful pre-trained models [34-36, 41]. However, robust pre-trained backbones are lacking in the 3D field due to scarce data and challenging feature extraction [14, 31, 47]. In this paper, we employ the dual mapping paradigm to augment the capability of perceiving point features for PUDM, ensuring the comprehensive training of the C-Net. To validate this point, we remove the supervision signal from the C-Net to disrupt this pattern. Meanwhile, we also validate the importance of the C-Net by retaining only the N-Net in PUDM." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 746, + 318, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 746, + 318, + 755 + ], + "spans": [ + { + "bbox": [ + 293, + 746, + 318, + 755 + ], + "type": "text", + "content": "20792" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 71, + 547, + 155 + ], + "blocks": [ + { + "bbox": [ + 48, + 71, + 547, + 155 + ], + "lines": [ + { + "bbox": [ + 48, + 71, + 547, + 155 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 547, + 155 + ], + "type": "image", + "image_path": "493648e1692ba9e130de1a46f1159344609e40badeb199ee00599ab89b217701.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 162, + 547, + 185 + ], + "lines": [ + { + "bbox": [ + 46, + 162, + 547, + 185 + ], + "spans": [ + { + "bbox": [ + 46, + 162, + 547, + 185 + ], + "type": "text", + "content": "Figure 6. The results of " + }, + { + "bbox": [ + 46, + 162, + 547, + 185 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 46, + 162, + 547, + 185 + ], + "type": "text", + "content": " on ScanNet. Our results exhibit reduced instances of outliers, concurrently generating more uniform and complete surfaces." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 205, + 287, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 205, + 287, + 266 + ], + "spans": [ + { + "bbox": [ + 46, + 205, + 287, + 266 + ], + "type": "text", + "content": "As reported in Tab 8, disrupting the dual mapping pattern leads to a significant decrease in performance due to the weakened point feature perception ability of the C-Net. Fig 7 visualizes the results of the C-Net generating input sparse points using the dual mapping paradigm." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 266, + 287, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 287, + 313 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 287, + 313 + ], + "type": "text", + "content": "Meanwhile, although removing the C-Net can maintain a single mapping pattern, as demonstrated in prior research [21, 30, 44], sparse point cloud feature extraction plays a pivotal role in PCU." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 50, + 323, + 286, + 362 + ], + "blocks": [ + { + "bbox": [ + 50, + 323, + 286, + 362 + ], + "lines": [ + { + "bbox": [ + 50, + 323, + 286, + 362 + ], + "spans": [ + { + "bbox": [ + 50, + 323, + 286, + 362 + ], + "type": "table", + "html": "
MethodsCD↓HD↓P2F↓
Without the C-Net0.2122.0152.284
Without the dual mapping0.1681.4982.013
With the dual mapping0.1311.2201.912
", + "image_path": "0189e6ae053adc712c8ea52923813d633a74a1cac89c3054bc0b49bdce9b3ab9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 50, + 407, + 285, + 541 + ], + "blocks": [ + { + "bbox": [ + 50, + 407, + 285, + 541 + ], + "lines": [ + { + "bbox": [ + 50, + 407, + 285, + 541 + ], + "spans": [ + { + "bbox": [ + 50, + 407, + 285, + 541 + ], + "type": "image", + "image_path": "0694b37cefe58ad43c6b8079627825b457931431fe8e7c100b2f1bde44e660e3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 548, + 287, + 583 + ], + "lines": [ + { + "bbox": [ + 46, + 548, + 287, + 583 + ], + "spans": [ + { + "bbox": [ + 46, + 548, + 287, + 583 + ], + "type": "text", + "content": "Figure 7. Visualization results of the C-Net generating sparse point clouds on PUGAN. This demonstrates that the C-Net has been effectively trained." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 594, + 287, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 287, + 676 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 287, + 676 + ], + "type": "text", + "content": "With/Without the rate prior. As mentioned in Sec 4.2, we introduce the rate prior into PUDM during training to achieve high-quality generation of point clouds during inference. Tab 9 demonstrates the effectiveness of this approach. Without the rate prior, the overall performance notably decreases, and exhibits significant fluctuations (performing better at " + }, + { + "bbox": [ + 46, + 594, + 287, + 676 + ], + "type": "inline_equation", + "content": "4\\times" + }, + { + "bbox": [ + 46, + 594, + 287, + 676 + ], + "type": "text", + "content": ", but worse at other rates)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "Single/Multiple Transfer Module. In this paper, we employ a TM positioned at the bottleneck stage of the U-Net, as its maximum receptive field provides ample con" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 308, + 205, + 545, + 284 + ], + "blocks": [ + { + "bbox": [ + 47, + 363, + 287, + 385 + ], + "lines": [ + { + "bbox": [ + 47, + 363, + 287, + 385 + ], + "spans": [ + { + "bbox": [ + 47, + 363, + 287, + 385 + ], + "type": "text", + "content": "Table 8. Ablation study of the dual mapping paradigm. The dual mapping pattern evidently achieves the best performance." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 205, + 545, + 284 + ], + "lines": [ + { + "bbox": [ + 308, + 205, + 545, + 284 + ], + "spans": [ + { + "bbox": [ + 308, + 205, + 545, + 284 + ], + "type": "table", + "html": "
RatesWithout the rate modelingWith the rate modeling
CD↓HD↓P2F↓CD↓HD↓P2F↓
0.2951.8162.0140.2471.4101.812
0.2241.5441.9750.1711.2921.785
0.1581.5121.8150.1311.2201.912
0.1661.5481.9440.1161.2441.794
0.1511.5281.9560.1071.2351.980
0.1441.4251.9880.1061.2311.952
0.1391.3991.9210.1041.2151.875
", + "image_path": "7ac0188c410e2749301becf02d8846423e4b249120431eebd8853da8a073ba5a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 329, + 545, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 329, + 545, + 401 + ], + "spans": [ + { + "bbox": [ + 305, + 329, + 545, + 401 + ], + "type": "text", + "content": "textual information [12, 15]. Meanwhile, we also attempt to place multiple TMs at each stage in U-Net to enable the interaction of multi-scale information [23]. Tab 10 shows that although multiple TMs lead to a slight improvement in terms of CD loss, it is not cost-effective due to the significant increase in computational cost." + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 307, + 412, + 545, + 441 + ], + "blocks": [ + { + "bbox": [ + 306, + 285, + 545, + 308 + ], + "lines": [ + { + "bbox": [ + 306, + 285, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 306, + 285, + 545, + 308 + ], + "type": "text", + "content": "Table 9. Ablation study of the rate prior. Utilizing the rate prior significantly enhances the quality of arbitrary-scale sampling." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 412, + 545, + 441 + ], + "lines": [ + { + "bbox": [ + 307, + 412, + 545, + 441 + ], + "spans": [ + { + "bbox": [ + 307, + 412, + 545, + 441 + ], + "type": "table", + "html": "
MethodsCD↓HD↓P2F↓Params↓
Multiple TMs0.1291.2351.95328.65M
Single TM0.1311.2201.91216.03M
", + "image_path": "35d5af10d25e101337c3989a41488c0d1391d6d7a02283f8ef485caa726961aa.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 442, + 545, + 464 + ], + "lines": [ + { + "bbox": [ + 306, + 442, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 306, + 442, + 545, + 464 + ], + "type": "text", + "content": "Table 10. Ablation study of the Transfer Module. Using the single TM strikes a balance between performance and efficiency." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 489, + 378, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 489, + 378, + 502 + ], + "spans": [ + { + "bbox": [ + 306, + 489, + 378, + 502 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 510, + 545, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 652 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 652 + ], + "type": "text", + "content": "In this paper, we systematically analyze and identify the potential of DDPM as a promising model for PCU. Meanwhile, we propose PUDM based on conditional DDPM. PUDM enables to directly utilize the dominant features to generate geometric details approximating the ground truth. Additionally, we analyze the limitations of applying DDPM to PCU (the absence of efficient prior knowledge for the conditional network and the fixed-scale object modeling), and propose corresponding solutions (a dual mapping paradigm and the rate modeling). Moreover, we offer a straightforward explanation regarding the robustness to noise for PUDM observed in experiments." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 653, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 545, + 712 + ], + "type": "text", + "content": "Acknowledgments. This work was supported in part by the Jiangsu Geological Bureau ResearchProject under Grant 2023KY11, in part by the National Natural Science Foundation of China under Grant 61871226, and in part by the National Key R&D Program of China (NO.2022ZD0160101)." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 746, + 317, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 746, + 317, + 756 + ], + "spans": [ + { + "bbox": [ + 293, + 746, + 317, + 756 + ], + "type": "text", + "content": "20793" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 706 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015. 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 147, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 147, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 147, + 288, + 201 + ], + "type": "text", + "content": "[2] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3075-3084, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 202, + 288, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 288, + 247 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 288, + 247 + ], + "type": "text", + "content": "[3] Christopher Choy, Jaesik Park, and Vladlen Koltun. Fully convolutional geometric features. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8958-8966, 2019. 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 248, + 288, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 248, + 288, + 291 + ], + "spans": [ + { + "bbox": [ + 53, + 248, + 288, + 291 + ], + "type": "text", + "content": "[4] Yaodong Cui, Ren Chen, Wenbo Chu, Long Chen, Daxin Tian, Ying Li, and Dongpu Cao. Deep learning for image and point cloud fusion in autonomous driving: A review. pages 722-739. IEEE, 2021. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 293, + 288, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 293, + 288, + 346 + ], + "spans": [ + { + "bbox": [ + 53, + 293, + 288, + 346 + ], + "type": "text", + "content": "[5] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5828-5839, 2017. 1, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 348, + 288, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 348, + 288, + 380 + ], + "spans": [ + { + "bbox": [ + 53, + 348, + 288, + 380 + ], + "type": "text", + "content": "[6] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 382, + 288, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 382, + 288, + 437 + ], + "spans": [ + { + "bbox": [ + 53, + 382, + 288, + 437 + ], + "type": "text", + "content": "[7] Wanquan Feng, Jin Li, Hongrui Cai, Xiaonan Luo, and Juyong Zhang. Neural points: Point cloud representation with neural fields for arbitrary upsampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18633-18642, 2022. 2, 5, 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 438, + 288, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 438, + 288, + 481 + ], + "spans": [ + { + "bbox": [ + 53, + 438, + 288, + 481 + ], + "type": "text", + "content": "[8] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. pages 1231-1237. Sage Publications Sage UK: London, England, 2013. 1, 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 483, + 288, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 483, + 288, + 548 + ], + "spans": [ + { + "bbox": [ + 53, + 483, + 288, + 548 + ], + "type": "text", + "content": "[9] Yun He, Danhang Tang, Yinda Zhang, Xiangyang Xue, and Yanwei Fu. Grad-pu: Arbitrary-scale point cloud upsampling via gradient descent with learned distance functions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5354-5363, 2023. 2, 4, 5, 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 549, + 287, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 549, + 287, + 571 + ], + "spans": [ + { + "bbox": [ + 48, + 549, + 287, + 571 + ], + "type": "text", + "content": "[10] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 572, + 287, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 572, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 48, + 572, + 287, + 605 + ], + "type": "text", + "content": "[11] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 606, + 288, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 606, + 288, + 661 + ], + "spans": [ + { + "bbox": [ + 48, + 606, + 288, + 661 + ], + "type": "text", + "content": "[12] Shengyu Huang, Zan Gojcic, Mikhail Usvyatsov, Andreas Wieser, and Konrad Schindler. Predator: Registration of 3d point clouds with low overlap. In Proceedings of the IEEE/CVF Conference on computer vision and pattern recognition, pages 4267-4276, 2021. 4, 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 662, + 287, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 662, + 287, + 706 + ], + "spans": [ + { + "bbox": [ + 48, + 662, + 287, + 706 + ], + "type": "text", + "content": "[13] Sheng Yu Huang, Hao-Yu Hsu, and Frank Wang. Spovt: Semantic-prototype variational transformer for dense point cloud semantic completion. Advances in Neural Information Processing Systems, 35:33934–33946, 2022. 2" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[14] Xiaoshui Huang, Sheng Li, Wentao Qu, Tong He, Yifan Zuo, and Wanli Ouyang. Frozen clip model is efficient point cloud backbone. arXiv preprint arXiv:2212.04098, 2022. 3, 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 107, + 545, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 149 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 149 + ], + "type": "text", + "content": "[15] Xiaoshui Huang, Wentao Qu, Yifan Zuo, Yuming Fang, and Xiaowei Zhao. Imfnet: Interpretable multimodal fusion for point cloud registration. IEEE Robotics and Automation Letters, 7(4):12323-12330, 2022. 4, 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 150, + 547, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 150, + 547, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 150, + 547, + 194 + ], + "type": "text", + "content": "[16] Jiaxin Li and Gim Hee Lee. Deepi2p: Image-to-point cloud registration via deep classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15960-15969, 2021. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 195, + 545, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 195, + 545, + 248 + ], + "spans": [ + { + "bbox": [ + 307, + 195, + 545, + 248 + ], + "type": "text", + "content": "[17] Ruihui Li, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. Pu-gan: a point cloud upsampling adversarial network. In Proceedings of the IEEE/CVF international conference on computer vision, pages 7203–7212, 2019. 1, 2, 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 250, + 545, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 250, + 545, + 294 + ], + "spans": [ + { + "bbox": [ + 307, + 250, + 545, + 294 + ], + "type": "text", + "content": "[18] Ruihui Li, Xianzhi Li, Pheng-Ann Heng, and Chi-Wing Fu. Point cloud upsampling via disentangled refinement. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 344–353, 2021. 1, 2, 5, 6, 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 294, + 545, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 294, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 307, + 294, + 545, + 338 + ], + "type": "text", + "content": "[19] Chen-Hsuan Lin, Chen Kong, and Simon Lucey. Learning efficient point cloud generation for dense 3d object reconstruction. In proceedings of the AAAI Conference on Artificial Intelligence, 2018. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 338, + 545, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 338, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 307, + 338, + 545, + 371 + ], + "type": "text", + "content": "[20] Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Pointvoxel cnn for efficient 3d deep learning. Advances in Neural Information Processing Systems, 32, 2019. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 372, + 545, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 372, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 307, + 372, + 545, + 426 + ], + "type": "text", + "content": "[21] Luqing Luo, Lulu Tang, Wanyi Zhou, Shizheng Wang, and Zhi-Xin Yang. Pu-eva: An edge-vector based approximation solution for flexible-scale point cloud upsampling. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16208-16217, 2021. 1, 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 426, + 545, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 426, + 545, + 470 + ], + "spans": [ + { + "bbox": [ + 307, + 426, + 545, + 470 + ], + "type": "text", + "content": "[22] Shitong Luo and Wei Hu. Diffusion probabilistic models for 3d point cloud generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2837-2845, 2021. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 472, + 545, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 472, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 307, + 472, + 545, + 514 + ], + "type": "text", + "content": "[23] Zhaoyang Lyu, Zhifeng Kong, Xudong Xu, Liang Pan, and Dahua Lin. A conditional point diffusion-refinement paradigm for 3d point cloud completion. arXiv preprint arXiv:2112.03530, 2021. 2, 3, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 515, + 547, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 515, + 547, + 570 + ], + "spans": [ + { + "bbox": [ + 307, + 515, + 547, + 570 + ], + "type": "text", + "content": "[24] Luke Melas-Kyriazi, Christian Rupprecht, and Andrea Vedaldi. Pc2: Projection-conditioned point cloud diffusion for single-image 3d reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12923-12932, 2023. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 571, + 545, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 571, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 307, + 571, + 545, + 624 + ], + "type": "text", + "content": "[25] Liang Pan, Xinyi Chen, Zhongang Cai, Junzhe Zhang, Haiyu Zhao, Shuai Yi, and Ziwei Liu. Variational relational point completion network. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8524-8533, 2021. 4" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 625, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 625, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 307, + 625, + 545, + 667 + ], + "type": "text", + "content": "[26] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 4" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 669, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 669, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 669, + 545, + 712 + ], + "type": "text", + "content": "[27] Anh Viet Phan, Minh Le Nguyen, Yen Lam Hoang Nguyen, and Lam Thu Bui. Dgcnn: A convolutional neural network over large-scale labeled graphs. Neural Networks, 108:533-543, 2018. 2" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 746, + 318, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 746, + 318, + 755 + ], + "spans": [ + { + "bbox": [ + 293, + 746, + 318, + 755 + ], + "type": "text", + "content": "20794" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 710 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 2, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 288, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 288, + 173 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 288, + 173 + ], + "type": "text", + "content": "[29] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 2, 4, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 174, + 288, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 174, + 288, + 229 + ], + "spans": [ + { + "bbox": [ + 48, + 174, + 288, + 229 + ], + "type": "text", + "content": "[30] Guocheng Qian, Abdulellah Abualshour, Guohao Li, Ali Thabet, and Bernard Ghanem. Pu-gcn: Point cloud upsampling using graph convolutional networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11683-11692, 2021. 1, 2, 3, 5, 6, 7, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 230, + 288, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 230, + 288, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 230, + 288, + 262 + ], + "type": "text", + "content": "[31] Guocheng Qian, Xingdi Zhang, Abdullah Hamdi, and Bernard Ghanem. Pix4point: Image pretrained transformers for 3d point cloud understanding. 2022. 3, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 263, + 288, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 263, + 288, + 308 + ], + "spans": [ + { + "bbox": [ + 48, + 263, + 288, + 308 + ], + "type": "text", + "content": "[32] Yue Qian, Junhui Hou, Sam Kwong, and Ying He. Pugeonet: A geometry-centric network for 3d point cloud upsampling. In European conference on computer vision, pages 752-769. Springer, 2020. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 308, + 288, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 308, + 288, + 361 + ], + "spans": [ + { + "bbox": [ + 48, + 308, + 288, + 361 + ], + "type": "text", + "content": "[33] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pages 8821-8831. PMLR, 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 364, + 288, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 364, + 288, + 407 + ], + "spans": [ + { + "bbox": [ + 48, + 364, + 288, + 407 + ], + "type": "text", + "content": "[34] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 409, + 288, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 409, + 288, + 464 + ], + "spans": [ + { + "bbox": [ + 48, + 409, + 288, + 464 + ], + "type": "text", + "content": "[35] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 465, + 288, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 465, + 288, + 529 + ], + "spans": [ + { + "bbox": [ + 48, + 465, + 288, + 529 + ], + "type": "text", + "content": "[36] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 532, + 288, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 532, + 288, + 575 + ], + "spans": [ + { + "bbox": [ + 48, + 532, + 288, + 575 + ], + "type": "text", + "content": "[37] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020. 3, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 576, + 288, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 576, + 288, + 631 + ], + "spans": [ + { + "bbox": [ + 48, + 576, + 288, + 631 + ], + "type": "text", + "content": "[38] Hugues Thomas, Charles R Qi, Jean-Emmanuel Deschaud, Beatrix Marcotegui, François Goulette, and Leonidas J Guibas. Kpconv: Flexible and deformable convolution for point clouds. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6411–6420, 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 632, + 288, + 675 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 632, + 288, + 675 + ], + "spans": [ + { + "bbox": [ + 48, + 632, + 288, + 675 + ], + "type": "text", + "content": "[39] Tong Wu, Liang Pan, Junzhe Zhang, Tai Wang, Ziwei Liu, and Dahua Lin. Balanced chamfer distance as a comprehensive metric for point cloud completion. Advances in Neural Information Processing Systems, 34:29088-29100, 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 677, + 288, + 710 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 677, + 288, + 710 + ], + "spans": [ + { + "bbox": [ + 48, + 677, + 288, + 710 + ], + "type": "text", + "content": "[40] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 587 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 7" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 96, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 161 + ], + "type": "text", + "content": "[41] Jiale Xu, Xintao Wang, Weihao Cheng, Yan-Pei Cao, Ying Shan, Xiaohu Qie, and Shenghua Gao. Dream3d: Zero-shot text-to-3d synthesis using 3d shape prior and text-to-image diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20908–20918, 2023. 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "text", + "content": "[42] Lei Yang, Yanhong Liu, Jinzhu Peng, and Zize Liang. A novel system for off-line 3d seam extraction and path planning based on point cloud segmentation for arc welding robot. Robotics and Computer-Integrated Manufacturing, 64:101929, 2020. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 219, + 545, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 219, + 545, + 273 + ], + "spans": [ + { + "bbox": [ + 307, + 219, + 545, + 273 + ], + "type": "text", + "content": "[43] Wang Yifan, Shihao Wu, Hui Huang, Daniel Cohen-Or, and Olga Sorkine-Hornung. Patch-based progressive 3d point set upsampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5958–5967, 2019. 1, 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 274, + 545, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 274, + 545, + 328 + ], + "spans": [ + { + "bbox": [ + 308, + 274, + 545, + 328 + ], + "type": "text", + "content": "[44] Lequan Yu, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. Pu-net: Point cloud upsampling network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2790–2799, 2018. 1, 2, 3, 5, 6, 7, 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 331, + 545, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 331, + 545, + 363 + ], + "spans": [ + { + "bbox": [ + 308, + 331, + 545, + 363 + ], + "type": "text", + "content": "[45] Cem Yuksel. Sample elimination for generating poisson disk sample sets. In Computer Graphics Forum, pages 25-32. Wiley Online Library, 2015. 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 365, + 545, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 365, + 545, + 407 + ], + "spans": [ + { + "bbox": [ + 308, + 365, + 545, + 407 + ], + "type": "text", + "content": "[46] Dandan Zhang, Weiyong Si, Wen Fan, Yuan Guan, and Chenguang Yang. From teleoperation to autonomous robot-assisted microsurgery: A survey. Machine Intelligence Research, 19(4):288-306, 2022. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 409, + 545, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 409, + 545, + 463 + ], + "spans": [ + { + "bbox": [ + 308, + 409, + 545, + 463 + ], + "type": "text", + "content": "[47] Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8552-8562, 2022. 3, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 464, + 545, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 464, + 545, + 509 + ], + "spans": [ + { + "bbox": [ + 308, + 464, + 545, + 509 + ], + "type": "text", + "content": "[48] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 16259-16268, 2021. 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 510, + 545, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 510, + 545, + 542 + ], + "spans": [ + { + "bbox": [ + 308, + 510, + 545, + 542 + ], + "type": "text", + "content": "[49] Yuchao Zheng, Yujie Li, Shuo Yang, and Huimin Lu. Global-pbnet: A novel point cloud registration for autonomous driving. pages 22312-22319. IEEE, 2022. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 544, + 545, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 544, + 545, + 587 + ], + "spans": [ + { + "bbox": [ + 308, + 544, + 545, + 587 + ], + "type": "text", + "content": "[50] Linqi Zhou, Yilun Du, and Jiajun Wu. 3d shape generation and completion through point-voxel diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5826–5835, 2021. 2" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 746, + 318, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 746, + 318, + 755 + ], + "spans": [ + { + "bbox": [ + 293, + 746, + 318, + 755 + ], + "type": "text", + "content": "20795" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/2e309a58-2e8d-4563-8890-368854bbd34f_content_list.json b/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/2e309a58-2e8d-4563-8890-368854bbd34f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..1b62f1aa36a9425f9642f82f171cd78c3dc14b9b --- /dev/null +++ b/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/2e309a58-2e8d-4563-8890-368854bbd34f_content_list.json @@ -0,0 +1,1869 @@ +[ + { + "type": "text", + "text": "A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation", + "text_level": 1, + "bbox": [ + 91, + 130, + 879, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Qucheng Peng, Ce Zheng, Chen Chen", + "bbox": [ + 331, + 204, + 638, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Center for Research in Computer Vision, University of Central Florida", + "bbox": [ + 205, + 220, + 763, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{qucheng.peng,ce.zheng}@ucf.edu,chen.chen@crcv.ucf.edu", + "bbox": [ + 241, + 241, + 727, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 291, + 313, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D human pose data collected in controlled laboratory settings present challenges for pose estimators that generalize across diverse scenarios. To address this, domain generalization is employed. Current methodologies in domain generalization for 3D human pose estimation typically utilize adversarial training to generate synthetic poses for training. Nonetheless, these approaches exhibit several limitations. First, the lack of prior information about the target domain complicates the application of suitable augmentation through a single pose augmentor; affecting generalization on target domains. Moreover, adversarial training's discriminator tends to enforce similarity between source and synthesized poses, impeding the exploration of out-of-source distributions. Furthermore, the pose estimator's optimization is not exposed to domain shifts, limiting its overall generalization ability.", + "bbox": [ + 75, + 324, + 472, + 566 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address these limitations, we propose a novel framework featuring two pose augmentors: the weak and the strong augmentors. Our framework employs differential strategies for generation and discrimination processes, facilitating the preservation of knowledge related to source poses and the exploration of out-of-source distributions without prior information about target poses. Besides, we leverage meta-optimization to simulate domain shifts in the optimization process of the pose estimator, thereby improving its generalization ability. Our proposed approach significantly outperforms existing methods, as demonstrated through comprehensive experiments on various benchmark datasets. Our code will be released at https://github.com/davidpengucf/DAF-DG.", + "bbox": [ + 75, + 568, + 472, + 779 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 813, + 209, + 828 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D human pose estimation (HPE) is the process of predicting the 3D coordinates of human joints from images or videos. It serves as the foundation for various applications including person re-identification [29], action recognition", + "bbox": [ + 75, + 839, + 470, + 902 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0150271f669295bceb3aa7fe0418a4dd03c382458b5585d610ad84e6a19a6c08.jpg", + "image_caption": [ + "Figure 1. Comparisons between existing single-augmentor frameworks and our proposed dual-augmentor framework on a toy example. Current single-augmentor methods excel at simulating Target Domain 2 but exhibit limitations in simulating Target Domain 1, closely resembling the source, and Target Domain 3, deviating significantly from the source. In our framework, the weak aug- mentor excels in simulating Target Domain 1, while the strong augmentor effectively imitates both Target Domain 2 and 3." + ], + "image_footnote": [], + "bbox": [ + 500, + 287, + 890, + 477 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "[10, 20, 34], human mesh recovery [44, 45], virtual reality [9, 35]. However, the annotated 3D data are often collected in controlled laboratory environments for convenience, featuring indoor settings and limited actions performed by few individuals. As a result, pose estimators trained on these labeled datasets face challenges in generalizing to varied in-the-wild scenarios. Hence, the notion of domain generalization (DG) is pivotal in incorporating knowledge from labeled (source) data into a pose estimator that could generalize well on unseen (target) data. Unlike domain adaptation (DA) which involves the training with target data, DG relies solely on the source data as a reference, without any prior information about the target data.", + "bbox": [ + 496, + 609, + 893, + 808 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Existing DG approaches for 3D HPE [7, 11, 37] conduct substantial augmentations on the source poses to obtain synthesized poses via adversarial training, and incorporate the synthesized poses as complementary to the source poses for HPE model training. However, these methods have several limitations. First, in the context of DG for 3D HPE, there", + "bbox": [ + 496, + 809, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 1, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "2240", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "is a complete lack of information about poses in target domains. If target poses closely resemble the source (as Target Domain 1 in Fig. 1), poses generated by extensive augmentation notably differ from the target, thereby impeding generalization. Conversely, when target poses significantly deviate from the source distributions (as Target Domain 3 in Fig. 1), poses generated via insufficient augmentation may not sufficiently explore out-of-source knowledge to simulate the target. Existing methods only use a single augmentor, making it challenging to simultaneously achieve both objectives. Second, the adversarial training between synthesized and source poses constrains the diversity of generation. Existing methods typically employ the generative adversarial network (GAN) [6] structure, which includes one pose generator responsible for pose generation and one discriminator to assist the pose generator by providing feedback. Specifically, the discriminator enforces similarity between synthesized and source poses, aiming to ensure that the generated poses closely resemble the source poses, which harms the exploration of out-of-source knowledge.", + "bbox": [ + 76, + 90, + 472, + 393 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these limitations, we propose a novel framework featuring two pose augmentors: the weak augmentor and the strong augmentor. The weak augmentor is designed to simulate target poses closely resembling source poses, while the strong augmentor generates target poses that exhibit significant deviations from source distributions. To delineate their characteristics, differential strategies are employed for generation and discrimination processes, as detailed in Sec. 3.3. Notably, our framework alleviates the constraints on strong-augmented poses by traditional adversarial training methods. Instead of enforcing similarity between the source and all the augmented poses, we utilize weak-augmented poses as an intermediary, enabling discrimination between strong- and weak-augmented poses and facilitating discrimination between source and weak-augmented poses. To optimize the utilization of synthesized poses, we introduce meta-optimization among source, weak-augmented, and strong-augmented poses, as elaborated in Sec. 3.4. Our training process exposes the pose estimator to domain shifts during the optimization processes, thereby enhancing its adaptability to handle domain shifts during the inference stage. Our contributions can be summarized in three main aspects:", + "bbox": [ + 76, + 397, + 472, + 744 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a novel framework featuring both the weak and strong pose augmentors, which effectively preserves knowledge related to source poses while simultaneously exploring out-of-source distributions through differential strategies for the generation and discrimination processes of the two augmentors.", + "- We introduce meta-optimization to enhance the utilization of synthesized poses. By simulating domain shifts among source, weak-augmented, and strong-augmented poses during the optimization processes, the pose estima" + ], + "bbox": [ + 76, + 750, + 468, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "tor's generalization ability is further improved.", + "bbox": [ + 511, + 90, + 821, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- We conduct comprehensive experiments on several benchmark datasets, and the results demonstrate that our approach significantly outperforms state-of-the-art methods by a considerable margin.", + "bbox": [ + 500, + 106, + 890, + 167 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 180, + 640, + 195 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D Human Pose Estimation. The widely adopted two-stage technique in 3D HPE, as demonstrated in [23, 36, 38, 40, 43, 46], initially employs 2D human pose estimators [17, 26, 27, 33] for 2D pose predictions, followed by lifting these predictions to 3D poses. Among these approaches, [38] integrates graph-structured semantic information to enhance the estimation process, while [23] utilizes dilated temporal convolutional layers for temporal information encoding, and [43] presents a purely transformer-based 3D approach. Moreover, [36] effectively models inter-frame correspondences with a mixed sequence-to-sequence encoder, and recent works such as [40] explore the frequency domain to improve inference efficiency, and [46] employs unified representation learning for 3D human poses.", + "bbox": [ + 496, + 205, + 890, + 417 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Domain Generalization. Current DG methods aim to learn domain-invariant representations and are categorized into three types: domain alignment [21, 41], meta-learning [15, 28], and augmentation strategies [24, 25, 39, 42]. For domain alignment, [41] enhances the conditional invariance of learned features by incorporating an entropy regularization term, leading to improved classifier generalization. [21] iteratively segregates samples into latent domains through clustering. Concerning meta-learning, [15] proposes a model-agnostic training procedure that simulates domain shift during training, whereas [28] applies meta-learning to single-domain generalization. Regarding augmentation strategies, [39] introduces a novel regularization term for adversarial data augmentation derived from the information bottleneck principle, while [42] presents a unique style hallucination module to generate style-diversified samples crucial for generalization.", + "bbox": [ + 496, + 417, + 892, + 672 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Differing from the current DG approaches for 3D HPE that focus solely on augmentations, we also incorporate meta-learning-based approaches to enhance generalization. Cross-domain Learning for 3D Human Pose Estimation.", + "bbox": [ + 496, + 674, + 890, + 738 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Cross-domain learning for 3D HPE is categorized into two types: domain generalization [7, 11, 16, 37] and domain adaptation [2, 4, 18]. In domain generalization, training processes exclusively utilizes source data, and the resulting model is directly applied to infer target data. [37] adjusts various geometry factors of human poses through differentiable operations. [11] applies DH Forward Kinematics [1] to drive 3D pose augmentation and obtain diverse poses. [7] incorporates Counterfactual Risk Minimization [30] to achieve unbiased learning. [16] addresses with network designs like the interpolation sub-net and body-parts grouping", + "bbox": [ + 496, + 734, + 890, + 901 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2241", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "net. In domain adaptation, labeled source data and unlabeled target data are used simultaneously during the training process. [4] employs generative adversarial network [6] to discriminate between source and target during training. [2] utilizes global position alignment and local pose augmentation to transfer from source to target. [18] employs a multi-hypotheses network along with target-specific source augmentation for the problem.", + "bbox": [ + 75, + 90, + 468, + 210 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this paper, we focus on domain generalization for 3D HPE. In addition to synthesizing novel 3D poses for better generalization, we also simulate domain shifts by using both source and synthesized poses during optimizations.", + "bbox": [ + 76, + 214, + 470, + 275 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methodology", + "text_level": 1, + "bbox": [ + 76, + 292, + 210, + 311 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminaries", + "text_level": 1, + "bbox": [ + 76, + 320, + 217, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2D-to-3D lifting Paradigm of 3D HPE. Current 2D-to-3D lifting paradigm of 3D HPE [23, 36, 38, 43] assumes that $x_{i}^{s} \\in \\mathbb{R}^{J \\times 2}$ represents the 2D coordinates of $J$ keypoints of a sample in the source domain (2D poses as input), and $y_{i}^{s} \\in \\mathbb{R}^{J \\times 3}$ represents the corresponding 3D positions in the camera coordinate system (3D poses as output), we denote the source domain with $N$ samples as $S = \\{(x_{i}^{s}, y_{i}^{s})\\}_{i=1}^{N}$ , encompassing $N$ 2D-3D pairs. Moreover, we define the pose estimator as $\\mathcal{P}: x_{i}^{s} \\mapsto \\hat{y}_{i}^{s}$ , where $\\hat{y}_{i}^{s}$ represents the predicted corresponding 3D pose positions. For a fully supervised human pose estimation problem, we aim to achieve an ideal $\\mathcal{P}$ by solving the following optimization objective:", + "bbox": [ + 75, + 345, + 468, + 527 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\mathcal {P}} \\mathbb {E} _ {\\left(x _ {i} ^ {s}, y _ {i} ^ {s}\\right) \\in \\mathcal {S}} \\mathcal {L} _ {M S E} \\left(\\mathcal {P} \\left(x _ {i} ^ {s}\\right), y _ {i} ^ {s}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 547, + 468, + 570 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_{MSE}$ represents the Mean Squared Error (MSE) loss. However, the objective [23, 43] is designed to achieve optimal performance on source poses, rendering it inadequate for addressing the DG problem, as it does not account for the domain gap between source and target domains.", + "bbox": [ + 75, + 580, + 468, + 657 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "DG for 3D HPE. Within the paradigm of 2D-to-3D lifting HPE, our primary goal is to derive an estimator $\\mathcal{P}$ that demonstrates commendable 3D HPE performance specifically within the target domain $T$ . Under this scenario, the target domain $T = \\{(x_j^t,y_j^t)\\}_{j = 1}^M$ with $M$ samples can only be used for inference and is not involved in the training process. However, when utilizing solely the original source domain, the pose estimator cannot learn out-of-source distributions, which is essential for achieving good performance on the target domain. Existing methods [7, 11, 16, 37] tend to conduct augmentation to the original source poses to enhance data diversity, thereby improving the estimator's generalization ability. The augmentor is denoted as $\\mathcal{A}:y_i^s\\mapsto y_i^a$ , while the projection from 3D to 2D via camera parameters (completely known) is defined as $\\mathcal{R}:y_i^a\\mapsto x_i^a$ . Consequently, the min-max optimization objective for domain", + "bbox": [ + 75, + 657, + 470, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "generalization can be defined as follows:", + "bbox": [ + 500, + 90, + 771, + 106 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\min _ {\\mathcal {P}} \\max _ {\\mathcal {A}} \\mathbb {E} _ {\\left(x _ {i} ^ {s}, y _ {i} ^ {s}\\right) \\in \\mathcal {S}} \\left[ \\mathcal {L} _ {M S E} \\left(\\mathcal {P} \\left(x _ {i} ^ {s}\\right), y _ {i} ^ {s}\\right) \\right. \\tag {2} \\\\ + \\mathcal {L} _ {M S E} (\\mathcal {P} (\\mathcal {R} (\\mathcal {A} (y _ {i} ^ {s}))), \\mathcal {A} (y _ {i} ^ {s})) ]. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 118, + 890, + 159 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The objective is a min-max game between the pose augmentor $\\mathcal{A}$ and the pose estimator $\\mathcal{P}$ , encouraging the estimator $\\mathcal{P}$ to learn out-of-source distributions, while conducting augmentations to a significant extent is beneficial to generate more diverse samples, and that is why the loss is minimized with respect to the augmentor $\\mathcal{P}$ and maximized with respect to the augmentor $\\mathcal{A}$ in the optimization.", + "bbox": [ + 496, + 170, + 890, + 276 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Overview of the Proposed Method", + "text_level": 1, + "bbox": [ + 500, + 287, + 797, + 303 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Existing methods [7, 11, 37] often apply intricate augmentations to the original poses in the source domain, relying on the discrimination between augmented poses and source poses simultaneously. However, this kind of approach raises two concerns. First, as this is a DG problem for 3D HPE, any information about the target domain is entirely unknown. If the target domain bears a striking resemblance to the source domain (like Target Domain 1 in Fig. 1), poses generated by extensive augmentation might hinder the pose estimator's inference on it. Conversely, in cases where the target domain significantly diverges from the source distributions (like Target Domain 3 in Fig. 1), poses generated by insufficient augmentation may fail to adequately explore out-of-source knowledge for the pose estimator. Second, when target domain is distant from the source and needs significant augmentations, the adversarial training between source and synthesized poses limits the diversity of generated poses. Specifically, the discriminator enforces similarity between source and synthesized poses, thereby causing the synthesized poses to remain similar to the source poses.", + "bbox": [ + 496, + 310, + 890, + 613 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To tackle these concerns, we propose a novel dual-augmentor framework depicted in Fig. 2. This framework involves two augmentors that generate weak- and strong-augmented poses, enabling the handling of diverse unknown target domains. Additionally, the weak-augmentation module serves as a bridge between strong-augmented and source poses. Specifically, the discrimination between source poses and weak-augmented poses is utilized to update the weak augmentor, while the discrimination between weak- and strong-augmented poses is employed to optimize the strong augmentor. This approach liberates the strong augmentor from heavy reliance on the source domain and enables the exploration of more out-of-source knowledge. Further details regarding the pose augmentation process can be found in Sec. 3.3.", + "bbox": [ + 496, + 613, + 890, + 839 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Having elucidated the methodology for synthesizing poses, the subsequent discourse pivots towards the utilization of these synthesized poses. Previous works [7, 11, 37] overlook the interactions between source poses and aug-", + "bbox": [ + 496, + 839, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "2242", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f00b92fd08cb0341489c9c94db5f4262d93765d824e775da0c537f54469b9e82.jpg", + "image_caption": [ + "Figure 2. Overall framework of our dual-augmentor method. Initially, the original pose undergoes processing through two pose augmentors, resulting in weak- and strong-augmented poses (See Sec. 3.3). The weak augmentor simulates target domains similar to the source domain, while the strong augmentor emulates target domains that deviate significantly from the source distributions. Subsequently, the original pose and the two augmented poses are input to the pose estimator for further meta-optimization (See Sec. 3.4)." + ], + "image_footnote": [], + "bbox": [ + 78, + 88, + 893, + 260 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "mented poses, dealing with the optimizations of pose estimator on them separately. In contrast, we propose a model-agnostic meta optimization approach that enhances the interactions between source poses and the two types of augmented poses to simulate domain shifts in the optimization and leverage domain-invariant knowledge while maintaining the original 2D-to-3D lifting backbone's structure unchanged. Further details concerning the meta optimization process can be found in Sec. 3.4.", + "bbox": [ + 75, + 332, + 472, + 468 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Pose Augmentation", + "text_level": 1, + "bbox": [ + 76, + 479, + 264, + 496 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4cefbeab32a0f16b34b0639ebe8ca41db3974403f048d0dcb3a9b02252fdb16f.jpg", + "image_caption": [ + "Figure 3. The differentiation of the weak and strong generators. Within each pipeline, denoted as \"W-\" for weak ones and \"S-\" for strong ones, there exist four pose states: original (OR), after bone angle operation (BA), after bone length operation (BL) and after rotation and translation operation (RT). For proximate states, similarities are enhanced for both generators. When there is a one-state gap between states, the weak generator continues to enhance similarities, whereas the strong generator enlarges dissimilarities." + ], + "image_footnote": [], + "bbox": [ + 78, + 505, + 472, + 685 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The pose augmentation architecture comprises two pose augmentors: the weak augmentor and the strong augmentor. Each augmentor comprises two components: the generator, responsible for producing diverse synthesized poses to facilitate the training of the pose estimator, and the discriminator, which collaborates with the generator to regu", + "bbox": [ + 75, + 810, + 470, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "late the quality of the generated poses. Our objective is to apply differential strategies to the generation (named differential generation) and discrimination (named differential discrimination) of the two augmentors, enabling them to generate weak- and strong-augmented poses.", + "bbox": [ + 496, + 332, + 893, + 409 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Differential Generation: The generation process, as illustrated in Figure 3, consists of three modules in sequence for each generator pipeline: the Bone Angle Generator, Bone Length Generator, and Rotation and Translation Generator, resulting in four statuses in the pipeline: original (OR), after the bone angle operation (BA), after the bone length operation (BL), and after the rotation and translation operation (RT). Existing approaches such as [16, 37] typically treat the entire generation pipeline in an end-to-end manner and only utilize the $(OR, RT)$ pair. In contrast, our method deals with the generation in a more fine-grained fashion. We group statuses into pairs based on their relations: proximate pairs as $PP = \\{(OR, BA), (BA, BL), (BL, RT)\\}$ , and one-state gap pairs as $OG = \\{(OR, BL), (BA, RT)\\}$ .", + "bbox": [ + 496, + 420, + 893, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To begin, we define the measurement of similarity within a pair. Solely relying on conventional absolute position losses, such as the MSE loss, is not adequate in this context for two reasons. First, the three modules within the generator all perform operations on the level of bone vector, not on the joint positions. If one joint undergoes significant position changes after an operation, other joints connected to it will also experience considerable movement, even if the bone vector between them remains stable. In such cases, position-based measurements cannot fully reflect the extent of augmentation based on the bone vector. Second, human poses possess kinematic attributes, and a position-based measurement overlooks the graphical information. Therefore, we introduce the Laplacian weighted similarity measurement. For the human model, it is straightforward to obtain degree matrix $D$ and adjacency matrix $A$ , and the normalized Laplacian matrix can be represented as:", + "bbox": [ + 496, + 643, + 895, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "2243", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nW _ {N L} = I - D ^ {- \\frac {1}{2}} A D ^ {- \\frac {1}{2}}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 103, + 468, + 122 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $I$ is the identity matrix, and $W_{NL}$ is the normalized Laplacian matrix encoding graphical information. Given a pair of statuses $(st_1, st_2)$ (from either PP or OG), the similarity measurement is defined as:", + "bbox": [ + 76, + 127, + 468, + 183 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s i m} \\left(s t _ {1}, s t _ {2}\\right) = \\underbrace {\\| s t _ {1} - s t _ {2} \\|} _ {\\text {M S E S i m i l a r i t y}} + \\underbrace {\\| W _ {N L} s t _ {1} - W _ {N L} s t _ {2} \\|} _ {\\text {L a p l a c i a n W e i g h t e d S i m i l a r i t y}}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 191, + 468, + 224 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To differentiate between the two generators, we apply distinctive strategies. For the weak generator, we enhance similarities for its PP and OG sets to maintain a slight level of augmentation in the synthesized poses, as indicated by:", + "bbox": [ + 76, + 232, + 468, + 292 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {w g} = \\underset {(s t _ {1}, s t _ {2}) \\in P P} {\\mathbb {E}} \\mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}) \\\\ + \\alpha_ {1} \\underset {(s t _ {1}, s t _ {2}) \\in O G} {\\mathbb {E}} \\mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 147, + 303, + 395, + 356 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For the strong generator, we enhance similarities within its PP set to ensure the reasonableness of the synthesized output, while enlarging dissimilarities within its OG sets to maintain a significant level of augmentation, expressed as:", + "bbox": [ + 76, + 366, + 468, + 426 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {s g} = \\underset {(s t _ {1}, s t _ {2}) \\in P P} {\\mathbb {E}} \\mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}) \\\\ - \\alpha_ {2} \\underset {(s t _ {1}, s t _ {2}) \\in O G} {\\mathbb {E}} \\mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 148, + 436, + 393, + 489 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$\\alpha_{1}$ and $\\alpha_{2}$ are trade-off parameters.", + "bbox": [ + 76, + 501, + 313, + 515 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Differential Discrimination: The discrimination process comprises two min-max games [6, 8]: one between the source pose and the weak-augmented poses, and the other between the weak-augmented pose and the strong-augmented pose. We adopt the WGAN-GP [8] structure here. The discrimination losses regarding the source poses $y^{or}$ , weak-augmented poses $y^{wa}$ , and strong-augmented poses $y^{sa}$ are defined as follows:", + "bbox": [ + 76, + 515, + 468, + 636 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {w d} = \\mathbb {E} \\left[ D _ {w a} \\left(y ^ {o r}\\right) \\right] - \\mathbb {E} \\left[ D _ {w a} \\left(y ^ {w a}\\right) \\right] \\tag {7} \\\\ + \\beta_ {1} \\mathbb {E} (1 - \\| \\nabla_ {\\hat {y} ^ {w a}} D _ {w a} (\\hat {y} ^ {w a}) \\|), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 646, + 468, + 681 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{r l} \\mathcal {L} _ {s d} = & \\mathbb {E} [ D _ {s a} (y ^ {w a}) ] - \\mathbb {E} [ D _ {s a} (y ^ {s a}) ] \\\\ & + \\beta_ {2} \\mathbb {E} (1 - \\| \\nabla_ {\\hat {y} ^ {s a}} D _ {s a} (\\hat {y} ^ {s a}) \\|). \\end{array} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 694, + 468, + 728 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, $\\mathcal{L}_{wd}$ is the discrimination loss between $y^{or}$ and $y^{wa}$ , used to update the weak augmentor, and $D_{wa}$ is the weak discriminator. $\\mathcal{L}_{sd}$ is the discrimination loss between $y^{wa}$ and $y^{sa}$ , used to update the strong augmentor, and $D_{sa}$ is the strong discriminator. $\\hat{y}^{wa}$ and $\\hat{y}^{sa}$ are built via interpolation, such that $\\hat{y}^{wa} = \\epsilon y^{or} + (1 - \\epsilon)y^{wa}$ and $\\hat{y}^{sa} = \\epsilon y^{wa} + (1 - \\epsilon)y^{sa}$ , where $\\epsilon$ is randomly drawn from $U[0,1]$ . $\\beta_{1}$ and $\\beta_{2}$ are trade-off parameters.", + "bbox": [ + 76, + 734, + 468, + 854 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "By implementing this discrimination process in two min-max games, the weak augmentor is capable of retaining more source information and alleviating adverse effects", + "bbox": [ + 76, + 854, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "stemming from irrationally synthesized poses. Simultaneously, the strong augmentor can overcome a strong dependency on the source distributions, and explore out-of-source distributions more effectively. With diverse synthesized poses to simulate potential target poses, it is beneficial for further domain generalization in pose estimation.", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Meta Optimization", + "text_level": 1, + "bbox": [ + 500, + 188, + 683, + 205 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Algorithm 1 Meta Optimization Pseudo Code", + "text_level": 1, + "bbox": [ + 500, + 212, + 807, + 227 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Input: Original source 2D-3D pose pairs $(x^{or},y^{or})$ ; Weak Generator $G_{wa}$ ; Strong Generator $G_{sa}$", + "bbox": [ + 500, + 232, + 890, + 262 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Init: Pose estimator $\\mathcal{P}_t$ , Learning rates $lr_1$ and $lr_2$ , inner loop iteration $k$ , Hyperparameter $\\gamma$", + "bbox": [ + 500, + 262, + 890, + 292 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Output: Updated pose estimator $\\mathcal{P}_{t + 2}$ after two-step meta optimization", + "bbox": [ + 500, + 292, + 890, + 323 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1: Generate weak-augmented 3D poses $y^{wa} = G_{wa}(y^{or})$", + "2: Project $y^{wa}$ to 2D poses $x^{wa}$ with camera parameters", + "3: // Meta-train on Source data:", + "4: Update $\\mathcal{P}_t' = \\mathcal{P}_t - lr_1\\nabla \\mathcal{L}_{MSE}(\\mathcal{P}_t(x^{or}),y^{or})$", + "5: for $i\\gets 1,\\dots,k$ do" + ], + "bbox": [ + 509, + 323, + 879, + 397 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "6: // Meta-test on Weak-augmented data:", + "7: $\\mathcal{L}_{weak - test} = L_{MSE}(\\mathcal{P}_t'(x^w)$", + "8: end for", + "9: // Meta update on Source and Weak-augmented data:", + "10: $\\mathcal{L}_{t + 1} = \\mathcal{L}_{MSE}(\\mathcal{P}_t(x^{or}),y^{or}) + \\gamma L_{weak - test}$", + "11: $\\mathcal{P}_{t + 1} = \\mathcal{P}_t - lr_2(\\partial \\mathcal{L}_{t + 1} / \\partial \\mathcal{P}_t)$", + "12: Generate strong-augmented 3D poses $y^{sa} = G_{sa}(y^{or})$", + "13: Project $y^{sa}$ to 2D poses $x^{sa}$ with camera parameters", + "14: // Meta-train on Weak-augmented data:", + "15: Update $\\mathcal{P}_{t + 1}^{\\prime} = \\mathcal{P}_{t + 1} - lr_{1}\\nabla \\mathcal{L}_{MSE}(\\mathcal{P}_{t + 1}(x^{wa}),y^{wa})$", + "16: for $i\\gets 1,\\dots,k$ do", + "17: // Meta-test on Strong-augmented data:", + "18: $\\mathcal{L}_{\\text {strong-test }} = L_{MSE}\\left(\\mathcal{P}_{t+1}^{\\prime}\\left(x^{sa}\\right), y^{sa}\\right)$", + "19: end for", + "20: // Meta update on Weak- and Strong-augmented data:", + "21: $\\mathcal{L}_{t + 2} = \\mathcal{L}_{MSE}\\big(\\mathcal{P}_{t + 1}(x^{wa}),y^{wa}\\big) + \\gamma L_{strong - test}$", + "22: $\\mathcal{P}_{t + 2} = \\mathcal{P}_{t + 1} - lr_2(\\partial \\mathcal{L}_{t + 2} / \\partial \\mathcal{P}_{t + 1})$" + ], + "bbox": [ + 504, + 323, + 887, + 655 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For DG problem in 3D HPE, two principal challenges must be addressed. First, there is the issue of synthesizing data, as detailed in Section 3.3. The second challenge revolves around the effective utilization of synthesized data, a facet often overlooked by current methodologies. Existing DG approaches for 3D HPE [11, 37], conduct the optimization of the pose estimator based on source and synthesized data independently. Unfortunately, this approach lacks mechanisms for fostering interactions between these two optimization processes, resulting in a deficiency of simulated domain shifts in the optimization trajectory.", + "bbox": [ + 496, + 672, + 890, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In contrast, our proposed model-agnostic strategy incorporates meta-optimization to bolster interactions among source poses, weak-augmented poses, and strong-augmented poses. This process facilitates the learning of", + "bbox": [ + 496, + 840, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "2244", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "domain-invariant knowledge during the update of the pose estimator, as outlined in Algorithm 1. The effectiveness of this approach lies in the fact that the objectives in meta-optimization not only aim to minimize losses on source and synthesized poses but also enhance the alignment of optimization directions during training, thus enhancing generalization significantly.", + "bbox": [ + 75, + 90, + 470, + 196 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The algorithm can be dissected into two parts: Lines 1-11 manage interactions between source poses and weakly-augmented poses, while Lines 12-22 address interactions between weak- and strong-augmented poses. This step-by-step approach is taken due to the substantial domain gap between source poses and strong-augmented poses. Weak-augmented poses serve as a bridge between source poses and strong-augmented poses, alleviating the challenge of directly aligning source and strong-augmented data. By incorporating all three types of poses in the optimization, the pose estimator can effectively generalize across diverse target domains, avoiding overfitting specific pose data types.", + "bbox": [ + 75, + 198, + 473, + 380 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 395, + 209, + 412 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Datasets and Metrics", + "text_level": 1, + "bbox": [ + 76, + 420, + 274, + 434 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this paper, we evaluate our approach using several widely-used 3D human pose benchmarks, including Human3.6M [12], MPI-INF-3DHP [22], and 3DPW [31]. Moreover, following previous works [11, 16, 37], we adopt the 16-keypoint human model with Hip joint as the origin.", + "bbox": [ + 75, + 444, + 468, + 520 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Human3.6M is an indoor dataset comprising 3.6 million frames and consisting of 7 subjects denoted as S1, S5, S6, S7, S8, S9, and S11. For the cross-dataset evaluation in Tab. 1, Tab. 2 and Tab. 5, we use S1, S5, S6, S7, S8 as the source domain. In the cross-scenario evaluation on the entire Human3.6M dataset in Tab. 3, S1, S5, S6, S7, S8 are the source domain, while S9 and S11 are the target domain. Mean Per Joint Position Error (MPJPE) and Procrustes-Aligned Mean Per Joint Position Error (PA-MPJPE) are employed as evaluation metrics. For the cross-scenario evaluation on partial Human3.6M, we follow previous works [4, 11, 16] to define two tasks as shown in Tab. 4. One task uses S1 as the source and S5, S6, S7, S8 as the target, while the other task uses S1, S5 as the source and S6, S7, S8 as the target. Both tasks utilize MPJPE as the metric.", + "bbox": [ + 75, + 521, + 468, + 747 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "MPI-INF-3DHP (3DHP) is an in-the-wild dataset, and we utilize only its test set for cross-dataset evaluation, as shown in Tab. 1 and Tab. 5, which consists of approximately 3k frames. The results are presented based on three metrics: Percentage of Correct Keypoints (PCK), Area Under the Curve (AUC), and MPJPE.", + "bbox": [ + 75, + 748, + 468, + 838 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3DPW is another in-the-wild dataset featuring more challenging poses and scenes. We utilize it for cross-dataset evaluation, as shown in Tab. 2. Here PA-MPJPE and MPJPE serve as the evaluation metrics.", + "bbox": [ + 75, + 839, + 468, + 898 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Implementation Details", + "text_level": 1, + "bbox": [ + 498, + 90, + 718, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For all the generators and discriminators, we ensure consistency by employing the same fully-connected layers, aligning with the methodology described in [37]. In the data augmentation process, the learning rate is set to 1e-4 for generators and 2e-4 for discriminators. We set $\\alpha_{1}$ and $\\alpha_{2}$ to 0.50 and 0.35, respectively, while both $\\beta_{1}$ and $\\beta_{2}$ are assigned a value of 4. During the meta optimization process, we utilize a learning rate of 1e-4 for $lr_{1}$ and 5e-4 for $lr_{2}$ . The trade-off parameter $\\gamma$ and the inner loop iteration $k$ are both set to 1.", + "bbox": [ + 496, + 113, + 890, + 263 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Moreover, we employ the Adam optimizer [13] for data augmentation and the AdamW optimizer [19] for meta optimization. Our experiments are conducted with a batch size of 1024 over 60 epochs. We initialize the pose estimator with a warm-up phase lasting two epochs for supervised learning on source data. From the third epoch onwards, data augmentation and meta-optimization begin.", + "bbox": [ + 496, + 265, + 892, + 369 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Quantitative Results", + "text_level": 1, + "bbox": [ + 498, + 378, + 694, + 393 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/8f44de63d5f50eead70d0d472f1860b03734389231b5846447abe1aaabc5e086.jpg", + "table_caption": [ + "Table 1. Cross-dataset evaluation on 3DHP dataset." + ], + "table_footnote": [], + "table_body": "
MethodVenueDGPCK ↑AUC ↑MPJPE ↓
VPose (1-frame) [23]CVPR'19×80.942.5102.3
EvoSkeleton [17]CVPR'2081.246.199.7
RepNet [32]CVPR'1981.854.892.5
PoseAug [37]TPAMI'2388.657.373.0
DH-AUG [11]ECCV'2289.557.971.2
PoseGU [7]CVIU'2386.357.275.0
CEE-Net [16]AAAI'2389.958.269.7
Ours92.960.763.1
", + "bbox": [ + 501, + 424, + 890, + 526 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/ca8e6fdc561a0e8535e215fc3b88171e5719851992c42c3703fbb3b987ab3d1a.jpg", + "table_caption": [ + "Table 2. Cross-dataset evaluation on 3DPW dataset." + ], + "table_footnote": [], + "table_body": "
MethodVenueDGPA-MPJPE ↓MPJPE ↓
VPose (1-frame) [23]CVPR'19×94.6125.7
VIBE [14]CVPR'2082.3122.5
PoseAug [37]TPAMI'2381.6119.0
DH-AUG [11]ECCV'2279.3112.8
PoseGU [7]CVIU'2392.3-
CEE-Net [16]AAAI'2376.8-
Ours73.2106.6
", + "bbox": [ + 501, + 549, + 890, + 646 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/10605c9aee8c252ac91b5935155020ccabfc18850385bf150612cd4a4c9cd9cc.jpg", + "table_caption": [ + "Table 3. Cross-scenario evaluation on Entire Human3.6M dataset. S1,S5,S6,S7,S8 are the source and S9,S11 are the target." + ], + "table_footnote": [], + "table_body": "
MethodVenueDGMPJPE ↓PA-MPJPE ↓
VPose (1-frame) [23]CVPR'19×52.740.9
EvoSkeleton [17]CVPR'2050.938.0
PoseAug [37]TPAMI'2350.239.1
DH-AUG [11]ECCV'2249.838.3
CEE-Net [16]AAAI'2347.336.8
Ours44.434.6
", + "bbox": [ + 501, + 690, + 890, + 775 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Cross-dataset evaluation results. In cross-dataset evaluations, source and target come from different datasets. Following identical paradigms from existing methods [11, 16, 37], we use ground truth 2D keypoints as input, single-frame VPose [23] as the lifting backbone, and Human3.6M as the source dataset. Our method demonstrates notable performance in all metrics, as presented in Tab. 1 and Tab. 2. Notably, our approach outperforms CEE-Net by $3.0\\%$ in", + "bbox": [ + 496, + 779, + 892, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "2245", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/69ed370ea71a3814b8e029e508885cc2818fbde7e35f9e1e43c2be4f70f5de50.jpg", + "image_caption": [ + "2D Predictions" + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 148, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f5f923dfd5ef95f1f23cf09a13d974c2d7a2b89a289c2917d8ea3f1995f80b11.jpg", + "image_caption": [ + "Source-only" + ], + "image_footnote": [], + "bbox": [ + 148, + 90, + 199, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f12b9d6f32a368726f6e917db019c899ba069f5e168c5c5db8e2e348a7e5ce29.jpg", + "image_caption": [ + "y PoseAug" + ], + "image_footnote": [], + "bbox": [ + 200, + 90, + 251, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a867fb9336b76f49bdef0eb9f5ca4a98ce74d67ff5d93e77159c51affc0563ee.jpg", + "image_caption": [ + "Aug DH-AUG" + ], + "image_footnote": [], + "bbox": [ + 254, + 90, + 343, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4b796d4bc4f48b9aa7f01a990953389b7021f4efef90a74237f469707e67bcd1.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 344, + 90, + 403, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b5b53cd399848bed410593791d913ddb58cea6fa53e9750ba6f16fbec964d5d8.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 403, + 90, + 475, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8b67c42b1404be43128773873db50b8941e0ca47c9d424a40e31c08fc054edcb.jpg", + "image_caption": [ + "2D Predictions" + ], + "image_footnote": [], + "bbox": [ + 483, + 88, + 557, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/22c84e246d36d9a940072f10243dca940db9f1f00be9fa00018446e74b2b8a20.jpg", + "image_caption": [ + "Source-only" + ], + "image_footnote": [], + "bbox": [ + 557, + 90, + 647, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/729ee476b5eaeb79e8f0e1da7401f939710b23069a4c3826c52b4d218bf50b34.jpg", + "image_caption": [ + "Aug.", + "DH-AUG" + ], + "image_footnote": [], + "bbox": [ + 647, + 90, + 728, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1d3973dd2e03defbb681c3d390c711fc12ae79f2d36671cf16ce39050226e77f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 90, + 756, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1aa113f5f3e1dd54d0f99b0fdc3faa9b290f192a48b343006994985d37504da9.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 754, + 90, + 825, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/49fc8a54aee6614591d8d3927daa65202e3b95baec84945c5bcabcb2a82d3e1b.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 828, + 90, + 892, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/931d87359b051ce269c7c90069438e97dfe52456d9bf7de8e22e003dbe54d4c8.jpg", + "image_caption": [ + "2D Predictions", + "Figure 5. Results on Cross-scenario evaluation. Left is for task S1,S5,S6,S7,S8 $\\rightarrow$ S9,S11, and right is for task S1,S5 $\\rightarrow$ S6,S7,S8." + ], + "image_footnote": [], + "bbox": [ + 78, + 227, + 148, + 280 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9a6564c1f032822faa2f16bf41617166d653917e6cbb2ad5d22335fe9bd90e92.jpg", + "image_caption": [ + "Figure 4. Qualitative results on Cross-dataset evaluation. Left is 3DHP dataset, and right is 3DPW dataset.", + "Source-only" + ], + "image_footnote": [], + "bbox": [ + 148, + 228, + 212, + 280 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6d44c3cf69ab1ed91effee01703bd7a3e01fd67d63ac9b736b05c007116f522f.jpg", + "image_caption": [ + "PoseAug" + ], + "image_footnote": [], + "bbox": [ + 214, + 228, + 277, + 280 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/15bd7399286acf4524b3604d612a55b22f5b1371bea43ed91d58fd6c2ad0a3d0.jpg", + "image_caption": [ + "DH-AUG" + ], + "image_footnote": [], + "bbox": [ + 279, + 228, + 343, + 279 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/33bb676595dd8358d94901149c89c4f6c70f0dfdb0131e37e72c9e87baa66d28.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 344, + 228, + 408, + 279 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a86d6f99b3618f6a7280d3e64c4f6c46e67f2d9b7c8db81fcd43e88c767393fd.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 408, + 228, + 473, + 279 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/358d4cb812d51a63539bbf1cd1fa79b57dc51c312f1f09705d42ad775845f3b2.jpg", + "image_caption": [ + "2D Predictions" + ], + "image_footnote": [], + "bbox": [ + 483, + 228, + 557, + 280 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a8ff924ae78809ef520009d05e5aba4bec4bfeb7730d1ee1c64eab2eb123eda5.jpg", + "image_caption": [ + "Source-only" + ], + "image_footnote": [], + "bbox": [ + 558, + 228, + 633, + 280 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6d2def2010f88a8f77d2255e89fda3a198eb72c03ee0c328cefb70132fb2411d.jpg", + "image_caption": [ + "PoseAug" + ], + "image_footnote": [], + "bbox": [ + 633, + 228, + 689, + 280 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cb116c9311744f80b515f493986436d19d132f590fde21a99a0305eb7c1db38c.jpg", + "image_caption": [ + "DH-AUG" + ], + "image_footnote": [], + "bbox": [ + 691, + 228, + 756, + 280 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/87595cde366a5d8aacd989384ae4920bfa2b2d0c6be7a4ffc4d3f96f37c7b884.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 756, + 228, + 821, + 280 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c0997b28caa17fd803835075fb7fcf80e79ab248f9dfcb280ee449c981dd6edf.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 823, + 228, + 888, + 280 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/4332e622e1370f17764e0e8c7e6136a4ee0ee69465d7a6050466ba6dff4a87cc.jpg", + "table_caption": [ + "Table 4. Cross-scenario evaluation on Partial Human3.6M dataset. For the task \"S1\", S1 is the source and S5, S6, S7, S8 are the target. For the task \"S1+S5\", S1 and S5 are the source, and S6, S7, S8 are the target. MPJPE $(\\downarrow)$ is used for evaluation." + ], + "table_footnote": [], + "table_body": "
MethodVenueDGS1S1+S5
VPose (1-frame) [23]CVPR'19×65.257.9
EvoSkeleton [17]CVPR'2061.554.6
PoseAug [37]TPAMI'2356.751.3
DH-AUG [11]ECCV'2252.247.0
CEE-Net [16]AAAI'2351.946.7
Ours50.345.4
", + "bbox": [ + 78, + 378, + 468, + 478 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/3ab9956c59add66cf4fa963f320ab42834dd7ad211d477a525325a22adaa45e3.jpg", + "table_caption": [ + "Table 5. Cross-dataset evaluation with MPJPE (↓) on 3DHP with varied 2D predictions and 2D-to-3D backbones (1-frame)." + ], + "table_footnote": [], + "table_body": "
MethodDGDET [5]CPN [3]HR [33]GT
SemGCN [38]×101.998.795.697.4
SemGCN + PoseAug [37]89.989.389.186.1
SemGCN + CEE-generator [16]83.682.882.481.3
SemGCN + DH-AUG [11]79.776.773.071.3
SemGCN + Ours76.574.170.768.9
VPose [23]×92.689.885.686.6
VPose + PoseAug [37]78.378.473.273.0
VPose + CEE-generator [16]75.675.271.271.4
VPose + DH-AUG [11]76.774.871.171.2
VPose + Ours72.470.962.463.1
PoseFormer [43]×91.989.284.285.7
PoseFormer + PoseAug [37]77.777.572.172.3
PoseFormer + CEE-generator [16]----
PoseFormer + DH-AUG [11]75.674.871.672.0
PoseFormer + Ours72.270.562.863.4
MixSTE [36]×90.687.482.084.0
MixSTE + PoseAug [37]76.176.371.771.6
MixSTE + CEE-generator [16]----
MixSTE + DH-AUG [11]74.874.470.970.7
MixSTE + Ours70.568.260.461.0
", + "bbox": [ + 78, + 520, + 468, + 723 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "PCK and $2.5\\%$ in AUC, and reduces MPJPE by $6.6\\mathrm{mm}$ in the 3DHP task. In the case of 3DPW, our method shows an improvement of $3.6\\mathrm{mm}$ in PA-MPJPE compared to CEE-Net [16]. While CEE-Net [16] and PoseGU [7] do not disclose their codes or report their results on MPJPE, it is evident that our method surpasses DH-AUG [11] by $6.2\\mathrm{mm}$ .", + "bbox": [ + 75, + 733, + 468, + 823 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Cross-scenario evaluation results. In cross-scenario evaluations, source and target come from different subsets of the same dataset. Maintaining consistency with previous works, we utilize ground truth 2D keypoints as input and single-frame VPose [23] as the 2D-to-3D lifting network.", + "bbox": [ + 75, + 825, + 468, + 898 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For the situation of using Entire Human3.6M in Tab. 3, our method demonstrates superior performance compared to CEE-Net [16] with a $2.9\\mathrm{mm}$ reduction in MPJPE and a $2.2\\mathrm{mm}$ improvement in PA-MPJPE. In the case of using partial Human3.6M in Tab. 4, our approach surpasses CEE-Net [16] by $1.6\\mathrm{mm}$ in the S1 task and $1.3\\mathrm{mm}$ in the S1+S5 task based on the MPJPE metric.", + "bbox": [ + 496, + 321, + 890, + 426 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results with varied 2D predictions and backbones. The results presented in Tables 1 to 4 are confined to the usage of ground truth 2D input and single-frame VPose backbone, which may raise concerns about the universality of the proposed method. To address this concern, we assess the performance of our approach with various 2D predictions such as DET [5], CPN [3], HRNet [33], and diverse lifting backbones including SemGCN [38], PoseFormer [43], and MixSTE [36], as displayed in Table 5. In this evaluation, 3DHP serves as the dataset, and MPJPE is the metric used. Notably, all the listed backbones are single-frame versions. As CEE-Net [16] only provides results for its generation part, CEE-generator, and does not offer open-source code, we have included partial results of CEE-generator. From Table 5, it is evident that our method surpasses all the existing methods, demonstrating the robustness of our framework across various settings.", + "bbox": [ + 496, + 428, + 890, + 684 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Qualitative Results", + "text_level": 1, + "bbox": [ + 500, + 695, + 681, + 710 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fig. 4 shows qualitative results on cross-dataset evaluation (3DHP on the left side and 3DPW on the right side), while Fig. 5 displays qualitative results on cross-scenario evaluation (S1,S5,S6,S7,S8 $\\rightarrow$ S9,S11 on the left side and S1,S5 $\\rightarrow$ S6,S7,S8 on the right side). HRNet [33] is applied as the 2D pose estimator and VPose [23] is the 2D-to-3D lifting backbone. We use Source-only, PoseAug [37], DH-AUG [11], Ours, and Ground Truth (GT) for qualitative comparison. Because CEE-Net does not provide source codes or pretrained models, we cannot generate visual examples from it. It is evident that our method outperforms other baselines significantly.", + "bbox": [ + 496, + 719, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "2246", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 90, + 230, + 107 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation study on the overall framework. The ablation study is conducted to illustrate the functions of three proposed modules: differential generation (DiffGen) in Sec. 3.3, differential discrimination (DiffDis) in Sec. 3.3, and meta optimization (MetaOpt) in Sec. 3.4. In Tab. 6, we apply ground truth as 2D predictions and VPose as the backbone. The absence of DiffGen leads to a decrease in PCK and AUC by $2.4\\%$ and $2.1\\%$ respectively, accompanied by a $5.9\\mathrm{mm}$ increase in MPJPE on 3DHP, while it increases PA-MPJPE and MPJPE by $3.1\\mathrm{mm}$ and $5.2\\mathrm{mm}$ separately on 3DPW. Similarly, the exclusion of DiffDis results in a decrease of $1.7\\%$ in both PCK and AUC, with a corresponding $4.1\\mathrm{mm}$ increase in MPJPE on 3DHP. As for 3DPW, the removal causes a degradation of $1.9\\mathrm{mm}$ in PA-MPJPE and $3.0\\mathrm{mm}$ in MPJPE. Besides, the removal of MetaOpt leads to a decline in PCK and AUC by $1.2\\%$ and $0.8\\%$ respectively, along with a $2.4\\mathrm{mm}$ increase in MPJPE on 3DHP, and an increase of $1.3\\mathrm{mm}$ and $1.8\\mathrm{mm}$ in PA-MPJPE and MPJPE separately. These results show that each module plays a critical role in obtaining better generalization.", + "bbox": [ + 75, + 113, + 472, + 417 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/f19feb37ace070267065bcea200ca931c970e5ae6c06334aa935135b31396242.jpg", + "table_caption": [ + "Table 6. Overall framework ablation study on 3DHP and 3DPW" + ], + "table_footnote": [], + "table_body": "
3DHP3DPW
MethodPCK ↑AUC ↑MPJPE ↓PA-MPJPE ↓MPJPE ↓
Ours w/o DiffGen90.558.669.076.3111.8
Ours w/o DiffDis91.259.067.375.1109.6
Ours w/o MetaOpt91.759.965.574.5108.4
Ours92.960.763.173.2106.6
", + "bbox": [ + 78, + 441, + 468, + 507 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation study on the generators. There exist two generators in our framework, and each with two pair groups. In this part, we discuss the functions of proximate pairs in weak augmentor (W-PP), proximate pairs in strong aug-. mentor (S-PP), one-state gap pairs in weak augmentor (WOG), and one-state gap pairs in strong augmentor (S-OG).", + "bbox": [ + 75, + 515, + 468, + 608 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/067c6cdadefb488d326227d072de20adb8def617de015df204962b9d903aa48a.jpg", + "table_caption": [ + "Table 7. Generators' ablation study on 3DHP and 3DPW" + ], + "table_footnote": [], + "table_body": "
3DHP3DPW
MethodPCK ↑AUC ↑MPJPE ↓PA-MPJPE ↓MPJPE ↓
Ours w/o W-PP88.357.572.681.7118.8
Ours w/o S-PP90.858.271.378.1111.0
Ours w/o W-OG92.159.665.874.7108.7
Ours w/o S-OG91.458.968.275.4109.5
Ours92.960.763.173.2106.6
", + "bbox": [ + 78, + 631, + 468, + 709 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Table 7, excluding W-PP or S-PP leads to a significant decline in PCK by $4.6\\%$ and $2.1\\%$ , and in AUC by $3.2\\%$ and $2.5\\%$ respectively, accompanied by a notable increase of $9.5\\mathrm{mm}$ and $8.2\\mathrm{mm}$ in MPJPE separately on 3DHP. These results emphasize the critical role of maintaining similarity in proximate pairs for both weak and strong augmentors, serving as the fundamental basis for generating effective and reasonable synthesized poses. Moreover, the absence of W-OG leads to a decline in PCK and AUC by $0.8\\%$ and $1.1\\%$ respectively, with a corresponding $1.7\\mathrm{mm}$ increase in the MPJPE on 3DHP. The removal of S-OG results in a decrease in PCK and AUC scores by $1.5\\%$ and $1.8\\%$ respec", + "bbox": [ + 75, + 719, + 472, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "tively, along with a $3.4\\mathrm{mm}$ increase in MPJPE on 3DHP. These results highlight the significance of maintaining differentiation between the weak augmentor and the strong augmentor during the generation process, where enlarging dissimilarity in S-OG is more important in discriminating these two generators.", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation study on the number of augmentors. Comparisons were conducted between our dual-augmentor framework and single-augmentor frameworks. Alongside our proposed framework, two single-augmentor frameworks were considered in the ablation study, utilizing either the weak-augmentor (WA) or the strong-augmentor (SA). The discrimination and meta-optimization processes exclusively involved source poses and one category of synthesized poses. The results, using ground truth as 2D predictions and VPose as the backbone, are presented in Table 8.", + "bbox": [ + 496, + 181, + 892, + 333 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/e376a1c07baf8cfc4af89cb42d582f4c5d592206d0972b59f99b4f2de456145c.jpg", + "table_caption": [ + "Table 8. Ablation study of number of augmentors on 3DHP and 3DPW" + ], + "table_footnote": [], + "table_body": "
3DHP3DPW
MethodPCK ↑AUC ↑MPJPE ↓PA-MPJPE ↓MPJPE ↓
WA87.356.074.580.5117.7
SA89.857.871.079.1111.4
Ours92.960.763.173.2106.6
", + "bbox": [ + 501, + 373, + 890, + 438 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "From Table 8, it is evident that our proposed framework surpasses both WA and SA significantly, underscoring the superiority of employing two augmentors over a single aug- mentor in addressing DG for 3D HPE. Furthermore, SA outperforms WA, emphasizing the greater significance of exploring out-of-source distributions compared to retaining source-relevant knowledge in cross-dataset tasks.", + "bbox": [ + 496, + 449, + 893, + 555 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 571, + 619, + 587 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose a novel dual-augmentor framework designed to enhance domain generalization in 3D human pose estimation. Our framework addresses the critical aspects of data augmentation and the effective utilization of synthesized data. To achieve this, we implement distinctive strategies for the weak and strong generators, ensuring the preservation of source-specific information while simultaneously exploring out-of-source distributions. Moreover, we incorporate meta-optimization techniques to facilitate enhanced interaction among source data, weak-augmented data, and strong-augmented data, thereby simulating domain shifts in the training of pose estimator and fostering the acquisition of domain-invariant knowledge. Extensive experimentation and comprehensive analysis conducted across multiple datasets demonstrate the superior performance of our proposed approach over existing state-of-the-art methods.", + "bbox": [ + 496, + 597, + 893, + 852 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements This material is based upon work supported by the National Science Foundation under Grant CNS-1910844.", + "bbox": [ + 496, + 854, + 893, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "2247", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Maher Baili, Philippe Wenger, and Damien Chablat. A classification of 3r orthogonal manipulators by the topology of their workspace. In IEEE International Conference on Robotics and Automation, 2004. Proceedings. ICRA'04. 2004, pages 1933-1938. IEEE, 2004. 2", + "[2] Wenhao Chai, Zhongyu Jiang, Jenq-Neng Hwang, and Gaoang Wang. Global adaptation meets local generalization: Unsupervised domain adaptation for 3d human pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14655-14665, 2023. 2, 3", + "[3] Yilun Chen, Zhicheng Wang, Yuxiang Peng, Zhiqiang Zhang, Gang Yu, and Jian Sun. Cascaded pyramid network for multi-person pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7103-7112, 2018. 7", + "[4] Mohsen Gholami, Bastian Wandt, Helge Rhodin, Rabab Ward, and Z Jane Wang. Adaptpose: Cross-dataset adaptation for 3d human pose estimation by learnable motion generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13075-13085, 2022. 2, 3, 6", + "[5] Ross Girshick, Ilija Radosavovic, Georgia Gkioxari, Piotr Dólar, and Kaiming He. Detector. https://github.com/facebookresearch/detectron, 2018.7", + "[6] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. Advances in neural information processing systems, 27, 2014. 2, 3, 5", + "[7] Shannan Guan, Haiyan Lu, Linchao Zhu, and Gengfa Fang. Posegu: 3d human pose estimation with novel human pose generator and unbiased learning. Computer Vision and Image Understanding, 233:103715, 2023. 1, 2, 3, 6, 7", + "[8] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. Advances in neural information processing systems, 30, 2017. 5", + "[9] Vladimir Guzov, Aymen Mir, Torsten Sattler, and Gerard Pons-Moll. Human positioning system (hps): 3d human pose estimation and self-localization in large scenes from body-mounted sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4318-4329, 2021. 1", + "[10] Yilei Hua, Wenhan Wu, Ce Zheng, Aidong Lu, Mengyuan Liu, Chen Chen, and Shiqian Wu. Part aware contrastive learning for self-supervised action recognition. In International Joint Conference on Artificial Intelligence, 2023. 1", + "[11] Linzhi Huang, Jiahao Liang, and Weihong Deng. Dh-aug: Dh forward kinematics model driven augmentation for 3d human pose estimation. In European Conference on Computer Vision, pages 436-453. Springer, 2022. 1, 2, 3, 5, 6, 7", + "[12] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environ-" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ments. IEEE transactions on pattern analysis and machine intelligence, 36(7):1325-1339, 2013. 6", + "[13] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6", + "[14] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. Vibe: Video inference for human body pose and shape estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5253-5263, 2020. 6", + "[15] Da Li, Yongxin Yang, Yi-Zhe Song, and Timothy Hospedales. Learning to generalize: Meta-learning for domain generalization. In Proceedings of the AAAI conference on artificial intelligence, 2018. 2", + "[16] Haolun Li and Chi-Man Pun. Cee-net: Complementary end-to-end network for 3d human pose generation and estimation. Proceedings of the AAAI Conference on Artificial Intelligence, 37(1):1305-1313, 2023. 2, 3, 4, 6, 7", + "[17] Shichao Li, Lei Ke, Kevin Pratama, Yu-Wing Tai, Chi-Keung Tang, and Kwang-Ting Cheng. Cascaded deep monocular 3d human pose estimation with evolutionary training data. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6173–6183, 2020. 2, 6, 7", + "[18] Hanbing Liu, Jun-Yan He, Zhi-Qi Cheng, Wangmeng Xiang, Qize Yang, Wenhao Chai, Gaoang Wang, Xu Bao, Bin Luo, Yifeng Geng, et al. Posynda: Multi-hypothesis pose synthesis domain adaptation for robust 3d human pose estimation. In Proceedings of the ACM International Conference on Multimedia, 2023. 2, 3", + "[19] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2018. 6", + "[20] Zhengzhi Lu, He Wang, Ziyi Chang, Guoan Yang, and Hubert P. H. Shum. Hard no-box adversarial attack on skeleton-based human action recognition with skeleton-motion-informed gradient. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4597-4606, 2023. 1", + "[21] Toshihiko Matsuura and Tatsuya Harada. Domain generalization using a mixture of multiple latent domains. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 11749-11756, 2020. 2", + "[22] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3d human pose estimation in the wild using improved cnn supervision. In 2017 international conference on 3D vision (3DV), pages 506-516. IEEE, 2017. 6", + "[23] Dario Pavllo, Christoph Feichtenhofer, David Grangier, and Michael Auli. 3d human pose estimation in video with temporal convolutions and semi-supervised training. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7753-7762, 2019. 2, 3, 6, 7", + "[24] Qucheng Peng. Multi-source and Source-Private Cross-Domain Learning for Visual Recognition. PhD thesis, Purdue University, 2022. 2" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "2248", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[25] Qucheng Peng, Zhengming Ding, Lingjuan Lyu, Lichao Sun, and Chen Chen. Rain: regularization on input and network for black-box domain adaptation. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence, pages 4118–4126, 2023. 2", + "[26] Qucheng Peng, Ce Zheng, and Chen Chen. Source-free domain adaptive human pose estimation. In 2023 IEEE/CVF International Conference on Computer Vision (ICCV), pages 4803-4813. IEEE, 2023. 2", + "[27] Ekkasit Pinyoanuntapong, Ayman Ali, Kalvik Jakkala, Pu Wang, Minwoo Lee, Qucheng Peng, Chen Chen, and Zhi Sun. Gaitsada: Self-aligned domain adaptation for mmwave gait recognition. In 2023 IEEE 20th International Conference on Mobile Ad Hoc and Smart Systems (MASS), pages 218-226. IEEE, 2023. 2", + "[28] Fengchun Qiao, Long Zhao, and Xi Peng. Learning to learn single domain generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12556-12565, 2020. 2", + "[29] Chi Su, Jianing Li, Shiliang Zhang, Junliang Xing, Wen Gao, and Qi Tian. Pose-driven deep convolutional model for person re-identification. In Proceedings of the IEEE international conference on computer vision, pages 3960–3969, 2017. 1", + "[30] Adith Swaminathan and Thorsten Joachims. Counterfactual risk minimization: Learning from logged bandit feedback. In International Conference on Machine Learning, pages 814-823. PMLR, 2015. 2", + "[31] Timo Von Marcard, Roberto Henschel, Michael J Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3d human pose in the wild using imus and a moving camera. In Proceedings of the European conference on computer vision (ECCV), pages 601-617, 2018. 6", + "[32] Bastian Wandt and Bodo Rosenhahn. Repnet: Weakly supervised training of an adversarial reprojection network for 3d human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7782-7791, 2019. 6", + "[33] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, et al. Deep high-resolution representation learning for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 43(10):3349-3364, 2020. 2, 7", + "[34] Hong Yan, Yang Liu, Yushen Wei, Zhen Li, Guanbin Li, and Liang Lin. Skeletonmae: Graph-based masked autoencoder for skeleton sequence pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5606-5618, 2023. 1", + "[35] Hongwei Yi, Chun-Hao P Huang, Shashank Tripathi, Lea Hering, Justus Thies, and Michael J Black. Mime: Human-aware 3d scene generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12965-12976, 2023. 1", + "[36] Jinlu Zhang, Zhigang Tu, Jianyu Yang, Yujin Chen, and Jun-song Yuan. Mixste: Seq2seq mixed spatio-temporal encoder for 3d human pose estimation in video. In Proceedings of" + ], + "bbox": [ + 78, + 90, + 470, + 901 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "the IEEE/CVF conference on computer vision and pattern recognition, pages 13232-13242, 2022. 2, 3, 7", + "[37] Jianfeng Zhang, Kehong Gong, Xinchao Wang, and Jiashi Feng. Learning to augment poses for 3d human pose estimation in images and videos. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(8):10012-10026, 2023. 1, 2, 3, 4, 5, 6, 7", + "[38] Long Zhao, Xi Peng, Yu Tian, Mubbasir Kapadia, and Dimitris N Metaxas. Semantic graph convolutional networks for 3d human pose regression. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3425-3435, 2019. 2, 3, 7", + "[39] Long Zhao, Ting Liu, Xi Peng, and Dimitris Metaxas. Maximum-entropy adversarial data augmentation for improved generalization and robustness. Advances in Neural Information Processing Systems, 33:14435–14447, 2020. 2", + "[40] Qitao Zhao, Ce Zheng, Mengyuan Liu, Pichao Wang, and Chen Chen. Poseformerv2: Exploring frequency domain for efficient and robust 3d human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8877-8886, 2023. 2", + "[41] Shanshan Zhao, Mingming Gong, Tongliang Liu, Huan Fu, and Dacheng Tao. Domain generalization via entropy regularization. Advances in Neural Information Processing Systems, 33:16096-16107, 2020. 2", + "[42] Yuyang Zhao, Zhun Zhong, Na Zhao, Nicu Sebe, and Gim Hee Lee. Style-hallucinated dual consistency learning for domain generalized semantic segmentation. In European Conference on Computer Vision, pages 535-552. Springer, 2022. 2", + "[43] Ce Zheng, Sijie Zhu, Matias Mendieta, Taojiannan Yang, Chen Chen, and Zhengming Ding. 3d human pose estimation with spatial and temporal transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11656-11665, 2021. 2, 3, 7", + "[44] Ce Zheng, Xianpeng Liu, Guo-Jun Qi, and Chen Chen. Potter: Pooling attention transformer for efficient human mesh recovery. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 1", + "[45] Ce Zheng, Matias Mendieta, Taojiannan Yang, Guo-Jun Qi, and Chen Chen. Feater: An efficient network for human reconstruction via feature map-based transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 1", + "[46] Wentao Zhu, Xiaoxuan Ma, Zhaoyang Liu, Libin Liu, Wayne Wu, and Yizhou Wang. Motionbert: A unified perspective on learning human motion representations. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 2" + ], + "bbox": [ + 501, + 92, + 893, + 781 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "2249", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/2e309a58-2e8d-4563-8890-368854bbd34f_model.json b/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/2e309a58-2e8d-4563-8890-368854bbd34f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e33aa4d1340ea906ac897e6b8ec7bc3b0734ba2f --- /dev/null +++ b/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/2e309a58-2e8d-4563-8890-368854bbd34f_model.json @@ -0,0 +1,2827 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.002, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.131, + 0.88, + 0.175 + ], + "angle": 0, + "content": "A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.333, + 0.205, + 0.639, + 0.222 + ], + "angle": 0, + "content": "Qucheng Peng, Ce Zheng, Chen Chen" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.222, + 0.764, + 0.24 + ], + "angle": 0, + "content": "Center for Research in Computer Vision, University of Central Florida" + }, + { + "type": "text", + "bbox": [ + 0.242, + 0.242, + 0.728, + 0.256 + ], + "angle": 0, + "content": "{qucheng.peng,ce.zheng}@ucf.edu,chen.chen@crcv.ucf.edu" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.292, + 0.314, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.325, + 0.473, + 0.568 + ], + "angle": 0, + "content": "3D human pose data collected in controlled laboratory settings present challenges for pose estimators that generalize across diverse scenarios. To address this, domain generalization is employed. Current methodologies in domain generalization for 3D human pose estimation typically utilize adversarial training to generate synthetic poses for training. Nonetheless, these approaches exhibit several limitations. First, the lack of prior information about the target domain complicates the application of suitable augmentation through a single pose augmentor; affecting generalization on target domains. Moreover, adversarial training's discriminator tends to enforce similarity between source and synthesized poses, impeding the exploration of out-of-source distributions. Furthermore, the pose estimator's optimization is not exposed to domain shifts, limiting its overall generalization ability." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.569, + 0.473, + 0.78 + ], + "angle": 0, + "content": "To address these limitations, we propose a novel framework featuring two pose augmentors: the weak and the strong augmentors. Our framework employs differential strategies for generation and discrimination processes, facilitating the preservation of knowledge related to source poses and the exploration of out-of-source distributions without prior information about target poses. Besides, we leverage meta-optimization to simulate domain shifts in the optimization process of the pose estimator, thereby improving its generalization ability. Our proposed approach significantly outperforms existing methods, as demonstrated through comprehensive experiments on various benchmark datasets. Our code will be released at https://github.com/davidpengucf/DAF-DG." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.814, + 0.21, + 0.829 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.903 + ], + "angle": 0, + "content": "3D human pose estimation (HPE) is the process of predicting the 3D coordinates of human joints from images or videos. It serves as the foundation for various applications including person re-identification [29], action recognition" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.288, + 0.892, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.48, + 0.895, + 0.592 + ], + "angle": 0, + "content": "Figure 1. Comparisons between existing single-augmentor frameworks and our proposed dual-augmentor framework on a toy example. Current single-augmentor methods excel at simulating Target Domain 2 but exhibit limitations in simulating Target Domain 1, closely resembling the source, and Target Domain 3, deviating significantly from the source. In our framework, the weak aug- mentor excels in simulating Target Domain 1, while the strong augmentor effectively imitates both Target Domain 2 and 3." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.611, + 0.895, + 0.809 + ], + "angle": 0, + "content": "[10, 20, 34], human mesh recovery [44, 45], virtual reality [9, 35]. However, the annotated 3D data are often collected in controlled laboratory environments for convenience, featuring indoor settings and limited actions performed by few individuals. As a result, pose estimators trained on these labeled datasets face challenges in generalizing to varied in-the-wild scenarios. Hence, the notion of domain generalization (DG) is pivotal in incorporating knowledge from labeled (source) data into a pose estimator that could generalize well on unseen (target) data. Unlike domain adaptation (DA) which involves the training with target data, DG relies solely on the source data as a reference, without any prior information about the target data." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Existing DG approaches for 3D HPE [7, 11, 37] conduct substantial augmentations on the source poses to obtain synthesized poses via adversarial training, and incorporate the synthesized poses as complementary to the source poses for HPE model training. However, these methods have several limitations. First, in the context of DG for 3D HPE, there" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2240" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.395 + ], + "angle": 0, + "content": "is a complete lack of information about poses in target domains. If target poses closely resemble the source (as Target Domain 1 in Fig. 1), poses generated by extensive augmentation notably differ from the target, thereby impeding generalization. Conversely, when target poses significantly deviate from the source distributions (as Target Domain 3 in Fig. 1), poses generated via insufficient augmentation may not sufficiently explore out-of-source knowledge to simulate the target. Existing methods only use a single augmentor, making it challenging to simultaneously achieve both objectives. Second, the adversarial training between synthesized and source poses constrains the diversity of generation. Existing methods typically employ the generative adversarial network (GAN) [6] structure, which includes one pose generator responsible for pose generation and one discriminator to assist the pose generator by providing feedback. Specifically, the discriminator enforces similarity between synthesized and source poses, aiming to ensure that the generated poses closely resemble the source poses, which harms the exploration of out-of-source knowledge." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.398, + 0.473, + 0.746 + ], + "angle": 0, + "content": "To address these limitations, we propose a novel framework featuring two pose augmentors: the weak augmentor and the strong augmentor. The weak augmentor is designed to simulate target poses closely resembling source poses, while the strong augmentor generates target poses that exhibit significant deviations from source distributions. To delineate their characteristics, differential strategies are employed for generation and discrimination processes, as detailed in Sec. 3.3. Notably, our framework alleviates the constraints on strong-augmented poses by traditional adversarial training methods. Instead of enforcing similarity between the source and all the augmented poses, we utilize weak-augmented poses as an intermediary, enabling discrimination between strong- and weak-augmented poses and facilitating discrimination between source and weak-augmented poses. To optimize the utilization of synthesized poses, we introduce meta-optimization among source, weak-augmented, and strong-augmented poses, as elaborated in Sec. 3.4. Our training process exposes the pose estimator to domain shifts during the optimization processes, thereby enhancing its adaptability to handle domain shifts during the inference stage. Our contributions can be summarized in three main aspects:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.75, + 0.47, + 0.84 + ], + "angle": 0, + "content": "- We propose a novel framework featuring both the weak and strong pose augmentors, which effectively preserves knowledge related to source poses while simultaneously exploring out-of-source distributions through differential strategies for the generation and discrimination processes of the two augmentors." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.841, + 0.47, + 0.902 + ], + "angle": 0, + "content": "- We introduce meta-optimization to enhance the utilization of synthesized poses. By simulating domain shifts among source, weak-augmented, and strong-augmented poses during the optimization processes, the pose estima" + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.75, + 0.47, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.823, + 0.106 + ], + "angle": 0, + "content": "tor's generalization ability is further improved." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.107, + 0.892, + 0.168 + ], + "angle": 0, + "content": "- We conduct comprehensive experiments on several benchmark datasets, and the results demonstrate that our approach significantly outperforms state-of-the-art methods by a considerable margin." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.181, + 0.642, + 0.196 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.206, + 0.892, + 0.418 + ], + "angle": 0, + "content": "3D Human Pose Estimation. The widely adopted two-stage technique in 3D HPE, as demonstrated in [23, 36, 38, 40, 43, 46], initially employs 2D human pose estimators [17, 26, 27, 33] for 2D pose predictions, followed by lifting these predictions to 3D poses. Among these approaches, [38] integrates graph-structured semantic information to enhance the estimation process, while [23] utilizes dilated temporal convolutional layers for temporal information encoding, and [43] presents a purely transformer-based 3D approach. Moreover, [36] effectively models inter-frame correspondences with a mixed sequence-to-sequence encoder, and recent works such as [40] explore the frequency domain to improve inference efficiency, and [46] employs unified representation learning for 3D human poses." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.418, + 0.893, + 0.674 + ], + "angle": 0, + "content": "Domain Generalization. Current DG methods aim to learn domain-invariant representations and are categorized into three types: domain alignment [21, 41], meta-learning [15, 28], and augmentation strategies [24, 25, 39, 42]. For domain alignment, [41] enhances the conditional invariance of learned features by incorporating an entropy regularization term, leading to improved classifier generalization. [21] iteratively segregates samples into latent domains through clustering. Concerning meta-learning, [15] proposes a model-agnostic training procedure that simulates domain shift during training, whereas [28] applies meta-learning to single-domain generalization. Regarding augmentation strategies, [39] introduces a novel regularization term for adversarial data augmentation derived from the information bottleneck principle, while [42] presents a unique style hallucination module to generate style-diversified samples crucial for generalization." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.892, + 0.739 + ], + "angle": 0, + "content": "Differing from the current DG approaches for 3D HPE that focus solely on augmentations, we also incorporate meta-learning-based approaches to enhance generalization. Cross-domain Learning for 3D Human Pose Estimation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Cross-domain learning for 3D HPE is categorized into two types: domain generalization [7, 11, 16, 37] and domain adaptation [2, 4, 18]. In domain generalization, training processes exclusively utilizes source data, and the resulting model is directly applied to infer target data. [37] adjusts various geometry factors of human poses through differentiable operations. [11] applies DH Forward Kinematics [1] to drive 3D pose augmentation and obtain diverse poses. [7] incorporates Counterfactual Risk Minimization [30] to achieve unbiased learning. [16] addresses with network designs like the interpolation sub-net and body-parts grouping" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "2241" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.212 + ], + "angle": 0, + "content": "net. In domain adaptation, labeled source data and unlabeled target data are used simultaneously during the training process. [4] employs generative adversarial network [6] to discriminate between source and target during training. [2] utilizes global position alignment and local pose augmentation to transfer from source to target. [18] employs a multi-hypotheses network along with target-specific source augmentation for the problem." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.215, + 0.471, + 0.276 + ], + "angle": 0, + "content": "In this paper, we focus on domain generalization for 3D HPE. In addition to synthesizing novel 3D poses for better generalization, we also simulate domain shifts by using both source and synthesized poses during optimizations." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.294, + 0.212, + 0.312 + ], + "angle": 0, + "content": "3. Methodology" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.321, + 0.218, + 0.336 + ], + "angle": 0, + "content": "3.1. Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.347, + 0.47, + 0.528 + ], + "angle": 0, + "content": "2D-to-3D lifting Paradigm of 3D HPE. Current 2D-to-3D lifting paradigm of 3D HPE [23, 36, 38, 43] assumes that \\( x_{i}^{s} \\in \\mathbb{R}^{J \\times 2} \\) represents the 2D coordinates of \\( J \\) keypoints of a sample in the source domain (2D poses as input), and \\( y_{i}^{s} \\in \\mathbb{R}^{J \\times 3} \\) represents the corresponding 3D positions in the camera coordinate system (3D poses as output), we denote the source domain with \\( N \\) samples as \\( S = \\{(x_{i}^{s}, y_{i}^{s})\\}_{i=1}^{N} \\), encompassing \\( N \\) 2D-3D pairs. Moreover, we define the pose estimator as \\( \\mathcal{P}: x_{i}^{s} \\mapsto \\hat{y}_{i}^{s} \\), where \\( \\hat{y}_{i}^{s} \\) represents the predicted corresponding 3D pose positions. For a fully supervised human pose estimation problem, we aim to achieve an ideal \\( \\mathcal{P} \\) by solving the following optimization objective:" + }, + { + "type": "equation", + "bbox": [ + 0.157, + 0.549, + 0.469, + 0.571 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\mathcal {P}} \\mathbb {E} _ {\\left(x _ {i} ^ {s}, y _ {i} ^ {s}\\right) \\in \\mathcal {S}} \\mathcal {L} _ {M S E} \\left(\\mathcal {P} \\left(x _ {i} ^ {s}\\right), y _ {i} ^ {s}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.582, + 0.47, + 0.658 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_{MSE}\\) represents the Mean Squared Error (MSE) loss. However, the objective [23, 43] is designed to achieve optimal performance on source poses, rendering it inadequate for addressing the DG problem, as it does not account for the domain gap between source and target domains." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.659, + 0.471, + 0.902 + ], + "angle": 0, + "content": "DG for 3D HPE. Within the paradigm of 2D-to-3D lifting HPE, our primary goal is to derive an estimator \\(\\mathcal{P}\\) that demonstrates commendable 3D HPE performance specifically within the target domain \\(T\\). Under this scenario, the target domain \\(T = \\{(x_j^t,y_j^t)\\}_{j = 1}^M\\) with \\(M\\) samples can only be used for inference and is not involved in the training process. However, when utilizing solely the original source domain, the pose estimator cannot learn out-of-source distributions, which is essential for achieving good performance on the target domain. Existing methods [7, 11, 16, 37] tend to conduct augmentation to the original source poses to enhance data diversity, thereby improving the estimator's generalization ability. The augmentor is denoted as \\(\\mathcal{A}:y_i^s\\mapsto y_i^a\\), while the projection from 3D to 2D via camera parameters (completely known) is defined as \\(\\mathcal{R}:y_i^a\\mapsto x_i^a\\). Consequently, the min-max optimization objective for domain" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.772, + 0.107 + ], + "angle": 0, + "content": "generalization can be defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.556, + 0.119, + 0.892, + 0.16 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\min _ {\\mathcal {P}} \\max _ {\\mathcal {A}} \\mathbb {E} _ {\\left(x _ {i} ^ {s}, y _ {i} ^ {s}\\right) \\in \\mathcal {S}} \\left[ \\mathcal {L} _ {M S E} \\left(\\mathcal {P} \\left(x _ {i} ^ {s}\\right), y _ {i} ^ {s}\\right) \\right. \\tag {2} \\\\ + \\mathcal {L} _ {M S E} (\\mathcal {P} (\\mathcal {R} (\\mathcal {A} (y _ {i} ^ {s}))), \\mathcal {A} (y _ {i} ^ {s})) ]. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.171, + 0.892, + 0.277 + ], + "angle": 0, + "content": "The objective is a min-max game between the pose augmentor \\(\\mathcal{A}\\) and the pose estimator \\(\\mathcal{P}\\), encouraging the estimator \\(\\mathcal{P}\\) to learn out-of-source distributions, while conducting augmentations to a significant extent is beneficial to generate more diverse samples, and that is why the loss is minimized with respect to the augmentor \\(\\mathcal{P}\\) and maximized with respect to the augmentor \\(\\mathcal{A}\\) in the optimization." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.288, + 0.799, + 0.304 + ], + "angle": 0, + "content": "3.2. Overview of the Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.311, + 0.892, + 0.614 + ], + "angle": 0, + "content": "Existing methods [7, 11, 37] often apply intricate augmentations to the original poses in the source domain, relying on the discrimination between augmented poses and source poses simultaneously. However, this kind of approach raises two concerns. First, as this is a DG problem for 3D HPE, any information about the target domain is entirely unknown. If the target domain bears a striking resemblance to the source domain (like Target Domain 1 in Fig. 1), poses generated by extensive augmentation might hinder the pose estimator's inference on it. Conversely, in cases where the target domain significantly diverges from the source distributions (like Target Domain 3 in Fig. 1), poses generated by insufficient augmentation may fail to adequately explore out-of-source knowledge for the pose estimator. Second, when target domain is distant from the source and needs significant augmentations, the adversarial training between source and synthesized poses limits the diversity of generated poses. Specifically, the discriminator enforces similarity between source and synthesized poses, thereby causing the synthesized poses to remain similar to the source poses." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.614, + 0.892, + 0.84 + ], + "angle": 0, + "content": "To tackle these concerns, we propose a novel dual-augmentor framework depicted in Fig. 2. This framework involves two augmentors that generate weak- and strong-augmented poses, enabling the handling of diverse unknown target domains. Additionally, the weak-augmentation module serves as a bridge between strong-augmented and source poses. Specifically, the discrimination between source poses and weak-augmented poses is utilized to update the weak augmentor, while the discrimination between weak- and strong-augmented poses is employed to optimize the strong augmentor. This approach liberates the strong augmentor from heavy reliance on the source domain and enables the exploration of more out-of-source knowledge. Further details regarding the pose augmentation process can be found in Sec. 3.3." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Having elucidated the methodology for synthesizing poses, the subsequent discourse pivots towards the utilization of these synthesized poses. Previous works [7, 11, 37] overlook the interactions between source poses and aug-" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2242" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.08, + 0.089, + 0.895, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.263, + 0.895, + 0.32 + ], + "angle": 0, + "content": "Figure 2. Overall framework of our dual-augmentor method. Initially, the original pose undergoes processing through two pose augmentors, resulting in weak- and strong-augmented poses (See Sec. 3.3). The weak augmentor simulates target domains similar to the source domain, while the strong augmentor emulates target domains that deviate significantly from the source distributions. Subsequently, the original pose and the two augmented poses are input to the pose estimator for further meta-optimization (See Sec. 3.4)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.333, + 0.473, + 0.469 + ], + "angle": 0, + "content": "mented poses, dealing with the optimizations of pose estimator on them separately. In contrast, we propose a model-agnostic meta optimization approach that enhances the interactions between source poses and the two types of augmented poses to simulate domain shifts in the optimization and leverage domain-invariant knowledge while maintaining the original 2D-to-3D lifting backbone's structure unchanged. Further details concerning the meta optimization process can be found in Sec. 3.4." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.481, + 0.265, + 0.497 + ], + "angle": 0, + "content": "3.3. Pose Augmentation" + }, + { + "type": "image", + "bbox": [ + 0.079, + 0.506, + 0.473, + 0.686 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.687, + 0.472, + 0.8 + ], + "angle": 0, + "content": "Figure 3. The differentiation of the weak and strong generators. Within each pipeline, denoted as \"W-\" for weak ones and \"S-\" for strong ones, there exist four pose states: original (OR), after bone angle operation (BA), after bone length operation (BL) and after rotation and translation operation (RT). For proximate states, similarities are enhanced for both generators. When there is a one-state gap between states, the weak generator continues to enhance similarities, whereas the strong generator enlarges dissimilarities." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.903 + ], + "angle": 0, + "content": "The pose augmentation architecture comprises two pose augmentors: the weak augmentor and the strong augmentor. Each augmentor comprises two components: the generator, responsible for producing diverse synthesized poses to facilitate the training of the pose estimator, and the discriminator, which collaborates with the generator to regu" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.333, + 0.895, + 0.41 + ], + "angle": 0, + "content": "late the quality of the generated poses. Our objective is to apply differential strategies to the generation (named differential generation) and discrimination (named differential discrimination) of the two augmentors, enabling them to generate weak- and strong-augmented poses." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.421, + 0.895, + 0.634 + ], + "angle": 0, + "content": "Differential Generation: The generation process, as illustrated in Figure 3, consists of three modules in sequence for each generator pipeline: the Bone Angle Generator, Bone Length Generator, and Rotation and Translation Generator, resulting in four statuses in the pipeline: original (OR), after the bone angle operation (BA), after the bone length operation (BL), and after the rotation and translation operation (RT). Existing approaches such as [16, 37] typically treat the entire generation pipeline in an end-to-end manner and only utilize the \\((OR, RT)\\) pair. In contrast, our method deals with the generation in a more fine-grained fashion. We group statuses into pairs based on their relations: proximate pairs as \\(PP = \\{(OR, BA), (BA, BL), (BL, RT)\\}\\), and one-state gap pairs as \\(OG = \\{(OR, BL), (BA, RT)\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.645, + 0.896, + 0.902 + ], + "angle": 0, + "content": "To begin, we define the measurement of similarity within a pair. Solely relying on conventional absolute position losses, such as the MSE loss, is not adequate in this context for two reasons. First, the three modules within the generator all perform operations on the level of bone vector, not on the joint positions. If one joint undergoes significant position changes after an operation, other joints connected to it will also experience considerable movement, even if the bone vector between them remains stable. In such cases, position-based measurements cannot fully reflect the extent of augmentation based on the bone vector. Second, human poses possess kinematic attributes, and a position-based measurement overlooks the graphical information. Therefore, we introduce the Laplacian weighted similarity measurement. For the human model, it is straightforward to obtain degree matrix \\( D \\) and adjacency matrix \\( A \\), and the normalized Laplacian matrix can be represented as:" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2243" + } + ], + [ + { + "type": "equation", + "bbox": [ + 0.185, + 0.104, + 0.469, + 0.123 + ], + "angle": 0, + "content": "\\[\nW _ {N L} = I - D ^ {- \\frac {1}{2}} A D ^ {- \\frac {1}{2}}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.128, + 0.47, + 0.184 + ], + "angle": 0, + "content": "where \\(I\\) is the identity matrix, and \\(W_{NL}\\) is the normalized Laplacian matrix encoding graphical information. Given a pair of statuses \\((st_1, st_2)\\) (from either PP or OG), the similarity measurement is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.193, + 0.47, + 0.226 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s i m} \\left(s t _ {1}, s t _ {2}\\right) = \\underbrace {\\| s t _ {1} - s t _ {2} \\|} _ {\\text {M S E S i m i l a r i t y}} + \\underbrace {\\| W _ {N L} s t _ {1} - W _ {N L} s t _ {2} \\|} _ {\\text {L a p l a c i a n W e i g h t e d S i m i l a r i t y}}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.233, + 0.47, + 0.294 + ], + "angle": 0, + "content": "To differentiate between the two generators, we apply distinctive strategies. For the weak generator, we enhance similarities for its PP and OG sets to maintain a slight level of augmentation in the synthesized poses, as indicated by:" + }, + { + "type": "equation", + "bbox": [ + 0.148, + 0.304, + 0.396, + 0.357 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {w g} = \\underset {(s t _ {1}, s t _ {2}) \\in P P} {\\mathbb {E}} \\mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}) \\\\ + \\alpha_ {1} \\underset {(s t _ {1}, s t _ {2}) \\in O G} {\\mathbb {E}} \\mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.367, + 0.47, + 0.428 + ], + "angle": 0, + "content": "For the strong generator, we enhance similarities within its PP set to ensure the reasonableness of the synthesized output, while enlarging dissimilarities within its OG sets to maintain a significant level of augmentation, expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.15, + 0.438, + 0.394, + 0.491 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {s g} = \\underset {(s t _ {1}, s t _ {2}) \\in P P} {\\mathbb {E}} \\mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}) \\\\ - \\alpha_ {2} \\underset {(s t _ {1}, s t _ {2}) \\in O G} {\\mathbb {E}} \\mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.502, + 0.314, + 0.516 + ], + "angle": 0, + "content": "\\(\\alpha_{1}\\) and \\(\\alpha_{2}\\) are trade-off parameters." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.516, + 0.469, + 0.637 + ], + "angle": 0, + "content": "Differential Discrimination: The discrimination process comprises two min-max games [6, 8]: one between the source pose and the weak-augmented poses, and the other between the weak-augmented pose and the strong-augmented pose. We adopt the WGAN-GP [8] structure here. The discrimination losses regarding the source poses \\( y^{or} \\), weak-augmented poses \\( y^{wa} \\), and strong-augmented poses \\( y^{sa} \\) are defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.14, + 0.647, + 0.47, + 0.682 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {w d} = \\mathbb {E} \\left[ D _ {w a} \\left(y ^ {o r}\\right) \\right] - \\mathbb {E} \\left[ D _ {w a} \\left(y ^ {w a}\\right) \\right] \\tag {7} \\\\ + \\beta_ {1} \\mathbb {E} (1 - \\| \\nabla_ {\\hat {y} ^ {w a}} D _ {w a} (\\hat {y} ^ {w a}) \\|), \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.145, + 0.695, + 0.47, + 0.729 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{r l} \\mathcal {L} _ {s d} = & \\mathbb {E} [ D _ {s a} (y ^ {w a}) ] - \\mathbb {E} [ D _ {s a} (y ^ {s a}) ] \\\\ & + \\beta_ {2} \\mathbb {E} (1 - \\| \\nabla_ {\\hat {y} ^ {s a}} D _ {s a} (\\hat {y} ^ {s a}) \\|). \\end{array} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.735, + 0.469, + 0.855 + ], + "angle": 0, + "content": "Here, \\(\\mathcal{L}_{wd}\\) is the discrimination loss between \\(y^{or}\\) and \\(y^{wa}\\), used to update the weak augmentor, and \\(D_{wa}\\) is the weak discriminator. \\(\\mathcal{L}_{sd}\\) is the discrimination loss between \\(y^{wa}\\) and \\(y^{sa}\\), used to update the strong augmentor, and \\(D_{sa}\\) is the strong discriminator. \\(\\hat{y}^{wa}\\) and \\(\\hat{y}^{sa}\\) are built via interpolation, such that \\(\\hat{y}^{wa} = \\epsilon y^{or} + (1 - \\epsilon)y^{wa}\\) and \\(\\hat{y}^{sa} = \\epsilon y^{wa} + (1 - \\epsilon)y^{sa}\\), where \\(\\epsilon\\) is randomly drawn from \\(U[0,1]\\). \\(\\beta_{1}\\) and \\(\\beta_{2}\\) are trade-off parameters." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "By implementing this discrimination process in two min-max games, the weak augmentor is capable of retaining more source information and alleviating adverse effects" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.183 + ], + "angle": 0, + "content": "stemming from irrationally synthesized poses. Simultaneously, the strong augmentor can overcome a strong dependency on the source distributions, and explore out-of-source distributions more effectively. With diverse synthesized poses to simulate potential target poses, it is beneficial for further domain generalization in pose estimation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.189, + 0.684, + 0.206 + ], + "angle": 0, + "content": "3.4. Meta Optimization" + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.213, + 0.808, + 0.228 + ], + "angle": 0, + "content": "Algorithm 1 Meta Optimization Pseudo Code" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.233, + 0.892, + 0.263 + ], + "angle": 0, + "content": "Input: Original source 2D-3D pose pairs \\((x^{or},y^{or})\\); Weak Generator \\(G_{wa}\\); Strong Generator \\(G_{sa}\\)" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.263, + 0.892, + 0.294 + ], + "angle": 0, + "content": "Init: Pose estimator \\(\\mathcal{P}_t\\), Learning rates \\(lr_1\\) and \\(lr_2\\), inner loop iteration \\(k\\), Hyperparameter \\(\\gamma\\)" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.294, + 0.892, + 0.324 + ], + "angle": 0, + "content": "Output: Updated pose estimator \\(\\mathcal{P}_{t + 2}\\) after two-step meta optimization" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.324, + 0.88, + 0.339 + ], + "angle": 0, + "content": "1: Generate weak-augmented 3D poses \\( y^{wa} = G_{wa}(y^{or}) \\)" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.339, + 0.88, + 0.354 + ], + "angle": 0, + "content": "2: Project \\( y^{wa} \\) to 2D poses \\( x^{wa} \\) with camera parameters" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.355, + 0.72, + 0.367 + ], + "angle": 0, + "content": "3: // Meta-train on Source data:" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.368, + 0.832, + 0.385 + ], + "angle": 0, + "content": "4: Update \\(\\mathcal{P}_t' = \\mathcal{P}_t - lr_1\\nabla \\mathcal{L}_{MSE}(\\mathcal{P}_t(x^{or}),y^{or})\\)" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.385, + 0.652, + 0.398 + ], + "angle": 0, + "content": "5: for \\(i\\gets 1,\\dots,k\\) do" + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.324, + 0.88, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.399, + 0.797, + 0.413 + ], + "angle": 0, + "content": "6: // Meta-test on Weak-augmented data:" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.414, + 0.737, + 0.43 + ], + "angle": 0, + "content": "7: \\(\\mathcal{L}_{weak - test} = L_{MSE}(\\mathcal{P}_t'(x^w)\\)" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.431, + 0.583, + 0.442 + ], + "angle": 0, + "content": "8: end for" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.445, + 0.876, + 0.459 + ], + "angle": 0, + "content": "9: // Meta update on Source and Weak-augmented data:" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.461, + 0.808, + 0.474 + ], + "angle": 0, + "content": "10: \\(\\mathcal{L}_{t + 1} = \\mathcal{L}_{MSE}(\\mathcal{P}_t(x^{or}),y^{or}) + \\gamma L_{weak - test}\\)" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.475, + 0.734, + 0.49 + ], + "angle": 0, + "content": "11: \\(\\mathcal{P}_{t + 1} = \\mathcal{P}_t - lr_2(\\partial \\mathcal{L}_{t + 1} / \\partial \\mathcal{P}_t)\\)" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.49, + 0.879, + 0.505 + ], + "angle": 0, + "content": "12: Generate strong-augmented 3D poses \\( y^{sa} = G_{sa}(y^{or}) \\)" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.506, + 0.874, + 0.52 + ], + "angle": 0, + "content": "13: Project \\( y^{sa} \\) to 2D poses \\( x^{sa} \\) with camera parameters" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.521, + 0.788, + 0.534 + ], + "angle": 0, + "content": "14: // Meta-train on Weak-augmented data:" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.535, + 0.888, + 0.551 + ], + "angle": 0, + "content": "15: Update \\(\\mathcal{P}_{t + 1}^{\\prime} = \\mathcal{P}_{t + 1} - lr_{1}\\nabla \\mathcal{L}_{MSE}(\\mathcal{P}_{t + 1}(x^{wa}),y^{wa})\\)" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.552, + 0.651, + 0.564 + ], + "angle": 0, + "content": "16: for \\(i\\gets 1,\\dots,k\\) do" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.566, + 0.804, + 0.58 + ], + "angle": 0, + "content": "17: // Meta-test on Strong-augmented data:" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.58, + 0.81, + 0.597 + ], + "angle": 0, + "content": "18: \\(\\mathcal{L}_{\\text {strong-test }} = L_{MSE}\\left(\\mathcal{P}_{t+1}^{\\prime}\\left(x^{sa}\\right), y^{sa}\\right)\\)" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.597, + 0.583, + 0.608 + ], + "angle": 0, + "content": "19: end for" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.611, + 0.88, + 0.625 + ], + "angle": 0, + "content": "20: // Meta update on Weak- and Strong-augmented data:" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.626, + 0.839, + 0.641 + ], + "angle": 0, + "content": "21: \\(\\mathcal{L}_{t + 2} = \\mathcal{L}_{MSE}\\big(\\mathcal{P}_{t + 1}(x^{wa}),y^{wa}\\big) + \\gamma L_{strong - test}\\)" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.642, + 0.767, + 0.656 + ], + "angle": 0, + "content": "22: \\(\\mathcal{P}_{t + 2} = \\mathcal{P}_{t + 1} - lr_2(\\partial \\mathcal{L}_{t + 2} / \\partial \\mathcal{P}_{t + 1})\\)" + }, + { + "type": "list", + "bbox": [ + 0.505, + 0.324, + 0.888, + 0.656 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.674, + 0.892, + 0.84 + ], + "angle": 0, + "content": "For DG problem in 3D HPE, two principal challenges must be addressed. First, there is the issue of synthesizing data, as detailed in Section 3.3. The second challenge revolves around the effective utilization of synthesized data, a facet often overlooked by current methodologies. Existing DG approaches for 3D HPE [11, 37], conduct the optimization of the pose estimator based on source and synthesized data independently. Unfortunately, this approach lacks mechanisms for fostering interactions between these two optimization processes, resulting in a deficiency of simulated domain shifts in the optimization trajectory." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.841, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In contrast, our proposed model-agnostic strategy incorporates meta-optimization to bolster interactions among source poses, weak-augmented poses, and strong-augmented poses. This process facilitates the learning of" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2244" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.198 + ], + "angle": 0, + "content": "domain-invariant knowledge during the update of the pose estimator, as outlined in Algorithm 1. The effectiveness of this approach lies in the fact that the objectives in meta-optimization not only aim to minimize losses on source and synthesized poses but also enhance the alignment of optimization directions during training, thus enhancing generalization significantly." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.199, + 0.474, + 0.381 + ], + "angle": 0, + "content": "The algorithm can be dissected into two parts: Lines 1-11 manage interactions between source poses and weakly-augmented poses, while Lines 12-22 address interactions between weak- and strong-augmented poses. This step-by-step approach is taken due to the substantial domain gap between source poses and strong-augmented poses. Weak-augmented poses serve as a bridge between source poses and strong-augmented poses, alleviating the challenge of directly aligning source and strong-augmented data. By incorporating all three types of poses in the optimization, the pose estimator can effectively generalize across diverse target domains, avoiding overfitting specific pose data types." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.396, + 0.21, + 0.414 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.421, + 0.276, + 0.435 + ], + "angle": 0, + "content": "4.1. Datasets and Metrics" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.445, + 0.47, + 0.521 + ], + "angle": 0, + "content": "In this paper, we evaluate our approach using several widely-used 3D human pose benchmarks, including Human3.6M [12], MPI-INF-3DHP [22], and 3DPW [31]. Moreover, following previous works [11, 16, 37], we adopt the 16-keypoint human model with Hip joint as the origin." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.522, + 0.47, + 0.748 + ], + "angle": 0, + "content": "Human3.6M is an indoor dataset comprising 3.6 million frames and consisting of 7 subjects denoted as S1, S5, S6, S7, S8, S9, and S11. For the cross-dataset evaluation in Tab. 1, Tab. 2 and Tab. 5, we use S1, S5, S6, S7, S8 as the source domain. In the cross-scenario evaluation on the entire Human3.6M dataset in Tab. 3, S1, S5, S6, S7, S8 are the source domain, while S9 and S11 are the target domain. Mean Per Joint Position Error (MPJPE) and Procrustes-Aligned Mean Per Joint Position Error (PA-MPJPE) are employed as evaluation metrics. For the cross-scenario evaluation on partial Human3.6M, we follow previous works [4, 11, 16] to define two tasks as shown in Tab. 4. One task uses S1 as the source and S5, S6, S7, S8 as the target, while the other task uses S1, S5 as the source and S6, S7, S8 as the target. Both tasks utilize MPJPE as the metric." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.749, + 0.47, + 0.839 + ], + "angle": 0, + "content": "MPI-INF-3DHP (3DHP) is an in-the-wild dataset, and we utilize only its test set for cross-dataset evaluation, as shown in Tab. 1 and Tab. 5, which consists of approximately 3k frames. The results are presented based on three metrics: Percentage of Correct Keypoints (PCK), Area Under the Curve (AUC), and MPJPE." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.9 + ], + "angle": 0, + "content": "3DPW is another in-the-wild dataset featuring more challenging poses and scenes. We utilize it for cross-dataset evaluation, as shown in Tab. 2. Here PA-MPJPE and MPJPE serve as the evaluation metrics." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.091, + 0.719, + 0.108 + ], + "angle": 0, + "content": "4.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.114, + 0.892, + 0.265 + ], + "angle": 0, + "content": "For all the generators and discriminators, we ensure consistency by employing the same fully-connected layers, aligning with the methodology described in [37]. In the data augmentation process, the learning rate is set to 1e-4 for generators and 2e-4 for discriminators. We set \\(\\alpha_{1}\\) and \\(\\alpha_{2}\\) to 0.50 and 0.35, respectively, while both \\(\\beta_{1}\\) and \\(\\beta_{2}\\) are assigned a value of 4. During the meta optimization process, we utilize a learning rate of 1e-4 for \\(lr_{1}\\) and 5e-4 for \\(lr_{2}\\). The trade-off parameter \\(\\gamma\\) and the inner loop iteration \\(k\\) are both set to 1." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.266, + 0.893, + 0.371 + ], + "angle": 0, + "content": "Moreover, we employ the Adam optimizer [13] for data augmentation and the AdamW optimizer [19] for meta optimization. Our experiments are conducted with a batch size of 1024 over 60 epochs. We initialize the pose estimator with a warm-up phase lasting two epochs for supervised learning on source data. From the third epoch onwards, data augmentation and meta-optimization begin." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.379, + 0.695, + 0.395 + ], + "angle": 0, + "content": "4.3. Quantitative Results" + }, + { + "type": "table_caption", + "bbox": [ + 0.543, + 0.409, + 0.848, + 0.422 + ], + "angle": 0, + "content": "Table 1. Cross-dataset evaluation on 3DHP dataset." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.425, + 0.892, + 0.527 + ], + "angle": 0, + "content": "
MethodVenueDGPCK ↑AUC ↑MPJPE ↓
VPose (1-frame) [23]CVPR'19×80.942.5102.3
EvoSkeleton [17]CVPR'2081.246.199.7
RepNet [32]CVPR'1981.854.892.5
PoseAug [37]TPAMI'2388.657.373.0
DH-AUG [11]ECCV'2289.557.971.2
PoseGU [7]CVIU'2386.357.275.0
CEE-Net [16]AAAI'2389.958.269.7
Ours92.960.763.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.541, + 0.534, + 0.85, + 0.546 + ], + "angle": 0, + "content": "Table 2. Cross-dataset evaluation on 3DPW dataset." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.55, + 0.892, + 0.647 + ], + "angle": 0, + "content": "
MethodVenueDGPA-MPJPE ↓MPJPE ↓
VPose (1-frame) [23]CVPR'19×94.6125.7
VIBE [14]CVPR'2082.3122.5
PoseAug [37]TPAMI'2381.6119.0
DH-AUG [11]ECCV'2279.3112.8
PoseGU [7]CVIU'2392.3-
CEE-Net [16]AAAI'2376.8-
Ours73.2106.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.66, + 0.892, + 0.688 + ], + "angle": 0, + "content": "Table 3. Cross-scenario evaluation on Entire Human3.6M dataset. S1,S5,S6,S7,S8 are the source and S9,S11 are the target." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.691, + 0.892, + 0.776 + ], + "angle": 0, + "content": "
MethodVenueDGMPJPE ↓PA-MPJPE ↓
VPose (1-frame) [23]CVPR'19×52.740.9
EvoSkeleton [17]CVPR'2050.938.0
PoseAug [37]TPAMI'2350.239.1
DH-AUG [11]ECCV'2249.838.3
CEE-Net [16]AAAI'2347.336.8
Ours44.434.6
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Cross-dataset evaluation results. In cross-dataset evaluations, source and target come from different datasets. Following identical paradigms from existing methods [11, 16, 37], we use ground truth 2D keypoints as input, single-frame VPose [23] as the lifting backbone, and Human3.6M as the source dataset. Our method demonstrates notable performance in all metrics, as presented in Tab. 1 and Tab. 2. Notably, our approach outperforms CEE-Net by \\(3.0\\%\\) in" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2245" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.149, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.081, + 0.194, + 0.148, + 0.203 + ], + "angle": 0, + "content": "2D Predictions" + }, + { + "type": "image", + "bbox": [ + 0.15, + 0.092, + 0.2, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.154, + 0.195, + 0.208, + 0.203 + ], + "angle": 0, + "content": "Source-only" + }, + { + "type": "image", + "bbox": [ + 0.202, + 0.092, + 0.252, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.203, + 0.195, + 0.268, + 0.203 + ], + "angle": 0, + "content": "y PoseAug" + }, + { + "type": "image", + "bbox": [ + 0.255, + 0.092, + 0.344, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.255, + 0.194, + 0.337, + 0.203 + ], + "angle": 0, + "content": "Aug DH-AUG" + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.092, + 0.404, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.368, + 0.195, + 0.392, + 0.203 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.405, + 0.092, + 0.476, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.439, + 0.195, + 0.455, + 0.203 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.485, + 0.089, + 0.558, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.49, + 0.195, + 0.555, + 0.203 + ], + "angle": 0, + "content": "2D Predictions" + }, + { + "type": "image", + "bbox": [ + 0.558, + 0.092, + 0.648, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.564, + 0.195, + 0.617, + 0.203 + ], + "angle": 0, + "content": "Source-only" + }, + { + "type": "image", + "bbox": [ + 0.648, + 0.092, + 0.73, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.664, + 0.195, + 0.678, + 0.203 + ], + "angle": 0, + "content": "Aug." + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.092, + 0.757, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.7, + 0.195, + 0.744, + 0.203 + ], + "angle": 0, + "content": "DH-AUG" + }, + { + "type": "image", + "bbox": [ + 0.755, + 0.092, + 0.826, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.777, + 0.195, + 0.801, + 0.203 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.83, + 0.092, + 0.893, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.847, + 0.195, + 0.862, + 0.203 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.206, + 0.799, + 0.221 + ], + "angle": 0, + "content": "Figure 4. Qualitative results on Cross-dataset evaluation. Left is 3DHP dataset, and right is 3DPW dataset." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.228, + 0.149, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.282, + 0.148, + 0.291 + ], + "angle": 0, + "content": "2D Predictions" + }, + { + "type": "image", + "bbox": [ + 0.15, + 0.229, + 0.214, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.156, + 0.282, + 0.209, + 0.291 + ], + "angle": 0, + "content": "Source-only" + }, + { + "type": "image", + "bbox": [ + 0.215, + 0.229, + 0.279, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.282, + 0.27, + 0.291 + ], + "angle": 0, + "content": "PoseAug" + }, + { + "type": "image", + "bbox": [ + 0.28, + 0.229, + 0.344, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.293, + 0.282, + 0.335, + 0.291 + ], + "angle": 0, + "content": "DH-AUG" + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.229, + 0.409, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.368, + 0.282, + 0.393, + 0.291 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.41, + 0.229, + 0.474, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.439, + 0.282, + 0.456, + 0.291 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.485, + 0.229, + 0.558, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.49, + 0.282, + 0.556, + 0.291 + ], + "angle": 0, + "content": "2D Predictions" + }, + { + "type": "image", + "bbox": [ + 0.559, + 0.229, + 0.634, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.564, + 0.282, + 0.617, + 0.291 + ], + "angle": 0, + "content": "Source-only" + }, + { + "type": "image", + "bbox": [ + 0.635, + 0.229, + 0.69, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.638, + 0.282, + 0.677, + 0.291 + ], + "angle": 0, + "content": "PoseAug" + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.229, + 0.757, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.702, + 0.282, + 0.746, + 0.291 + ], + "angle": 0, + "content": "DH-AUG" + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.229, + 0.822, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.779, + 0.282, + 0.803, + 0.291 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.824, + 0.229, + 0.89, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.848, + 0.282, + 0.865, + 0.291 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image_caption", + "bbox": [ + 0.099, + 0.294, + 0.87, + 0.308 + ], + "angle": 0, + "content": "Figure 5. Results on Cross-scenario evaluation. Left is for task S1,S5,S6,S7,S8 \\(\\rightarrow\\) S9,S11, and right is for task S1,S5 \\(\\rightarrow\\) S6,S7,S8." + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.32, + 0.468, + 0.376 + ], + "angle": 0, + "content": "Table 4. Cross-scenario evaluation on Partial Human3.6M dataset. For the task \"S1\", S1 is the source and S5, S6, S7, S8 are the target. For the task \"S1+S5\", S1 and S5 are the source, and S6, S7, S8 are the target. MPJPE \\((\\downarrow)\\) is used for evaluation." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.38, + 0.469, + 0.479 + ], + "angle": 0, + "content": "
MethodVenueDGS1S1+S5
VPose (1-frame) [23]CVPR'19×65.257.9
EvoSkeleton [17]CVPR'2061.554.6
PoseAug [37]TPAMI'2356.751.3
DH-AUG [11]ECCV'2252.247.0
CEE-Net [16]AAAI'2351.946.7
Ours50.345.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.49, + 0.469, + 0.518 + ], + "angle": 0, + "content": "Table 5. Cross-dataset evaluation with MPJPE (↓) on 3DHP with varied 2D predictions and 2D-to-3D backbones (1-frame)." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.521, + 0.469, + 0.724 + ], + "angle": 0, + "content": "
MethodDGDET [5]CPN [3]HR [33]GT
SemGCN [38]×101.998.795.697.4
SemGCN + PoseAug [37]89.989.389.186.1
SemGCN + CEE-generator [16]83.682.882.481.3
SemGCN + DH-AUG [11]79.776.773.071.3
SemGCN + Ours76.574.170.768.9
VPose [23]×92.689.885.686.6
VPose + PoseAug [37]78.378.473.273.0
VPose + CEE-generator [16]75.675.271.271.4
VPose + DH-AUG [11]76.774.871.171.2
VPose + Ours72.470.962.463.1
PoseFormer [43]×91.989.284.285.7
PoseFormer + PoseAug [37]77.777.572.172.3
PoseFormer + CEE-generator [16]----
PoseFormer + DH-AUG [11]75.674.871.672.0
PoseFormer + Ours72.270.562.863.4
MixSTE [36]×90.687.482.084.0
MixSTE + PoseAug [37]76.176.371.771.6
MixSTE + CEE-generator [16]----
MixSTE + DH-AUG [11]74.874.470.970.7
MixSTE + Ours70.568.260.461.0
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.734, + 0.469, + 0.824 + ], + "angle": 0, + "content": "PCK and \\(2.5\\%\\) in AUC, and reduces MPJPE by \\(6.6\\mathrm{mm}\\) in the 3DHP task. In the case of 3DPW, our method shows an improvement of \\(3.6\\mathrm{mm}\\) in PA-MPJPE compared to CEE-Net [16]. While CEE-Net [16] and PoseGU [7] do not disclose their codes or report their results on MPJPE, it is evident that our method surpasses DH-AUG [11] by \\(6.2\\mathrm{mm}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.469, + 0.9 + ], + "angle": 0, + "content": "Cross-scenario evaluation results. In cross-scenario evaluations, source and target come from different subsets of the same dataset. Maintaining consistency with previous works, we utilize ground truth 2D keypoints as input and single-frame VPose [23] as the 2D-to-3D lifting network." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.323, + 0.892, + 0.427 + ], + "angle": 0, + "content": "For the situation of using Entire Human3.6M in Tab. 3, our method demonstrates superior performance compared to CEE-Net [16] with a \\(2.9\\mathrm{mm}\\) reduction in MPJPE and a \\(2.2\\mathrm{mm}\\) improvement in PA-MPJPE. In the case of using partial Human3.6M in Tab. 4, our approach surpasses CEE-Net [16] by \\(1.6\\mathrm{mm}\\) in the S1 task and \\(1.3\\mathrm{mm}\\) in the S1+S5 task based on the MPJPE metric." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.429, + 0.892, + 0.685 + ], + "angle": 0, + "content": "Results with varied 2D predictions and backbones. The results presented in Tables 1 to 4 are confined to the usage of ground truth 2D input and single-frame VPose backbone, which may raise concerns about the universality of the proposed method. To address this concern, we assess the performance of our approach with various 2D predictions such as DET [5], CPN [3], HRNet [33], and diverse lifting backbones including SemGCN [38], PoseFormer [43], and MixSTE [36], as displayed in Table 5. In this evaluation, 3DHP serves as the dataset, and MPJPE is the metric used. Notably, all the listed backbones are single-frame versions. As CEE-Net [16] only provides results for its generation part, CEE-generator, and does not offer open-source code, we have included partial results of CEE-generator. From Table 5, it is evident that our method surpasses all the existing methods, demonstrating the robustness of our framework across various settings." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.696, + 0.682, + 0.712 + ], + "angle": 0, + "content": "4.4. Qualitative Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Fig. 4 shows qualitative results on cross-dataset evaluation (3DHP on the left side and 3DPW on the right side), while Fig. 5 displays qualitative results on cross-scenario evaluation (S1,S5,S6,S7,S8 \\(\\rightarrow\\) S9,S11 on the left side and S1,S5 \\(\\rightarrow\\) S6,S7,S8 on the right side). HRNet [33] is applied as the 2D pose estimator and VPose [23] is the 2D-to-3D lifting backbone. We use Source-only, PoseAug [37], DH-AUG [11], Ours, and Ground Truth (GT) for qualitative comparison. Because CEE-Net does not provide source codes or pretrained models, we cannot generate visual examples from it. It is evident that our method outperforms other baselines significantly." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "2246" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.091, + 0.231, + 0.108 + ], + "angle": 0, + "content": "4.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.114, + 0.473, + 0.418 + ], + "angle": 0, + "content": "Ablation study on the overall framework. The ablation study is conducted to illustrate the functions of three proposed modules: differential generation (DiffGen) in Sec. 3.3, differential discrimination (DiffDis) in Sec. 3.3, and meta optimization (MetaOpt) in Sec. 3.4. In Tab. 6, we apply ground truth as 2D predictions and VPose as the backbone. The absence of DiffGen leads to a decrease in PCK and AUC by \\(2.4\\%\\) and \\(2.1\\%\\) respectively, accompanied by a \\(5.9\\mathrm{mm}\\) increase in MPJPE on 3DHP, while it increases PA-MPJPE and MPJPE by \\(3.1\\mathrm{mm}\\) and \\(5.2\\mathrm{mm}\\) separately on 3DPW. Similarly, the exclusion of DiffDis results in a decrease of \\(1.7\\%\\) in both PCK and AUC, with a corresponding \\(4.1\\mathrm{mm}\\) increase in MPJPE on 3DHP. As for 3DPW, the removal causes a degradation of \\(1.9\\mathrm{mm}\\) in PA-MPJPE and \\(3.0\\mathrm{mm}\\) in MPJPE. Besides, the removal of MetaOpt leads to a decline in PCK and AUC by \\(1.2\\%\\) and \\(0.8\\%\\) respectively, along with a \\(2.4\\mathrm{mm}\\) increase in MPJPE on 3DHP, and an increase of \\(1.3\\mathrm{mm}\\) and \\(1.8\\mathrm{mm}\\) in PA-MPJPE and MPJPE separately. These results show that each module plays a critical role in obtaining better generalization." + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.424, + 0.465, + 0.438 + ], + "angle": 0, + "content": "Table 6. Overall framework ablation study on 3DHP and 3DPW" + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.442, + 0.47, + 0.508 + ], + "angle": 0, + "content": "
3DHP3DPW
MethodPCK ↑AUC ↑MPJPE ↓PA-MPJPE ↓MPJPE ↓
Ours w/o DiffGen90.558.669.076.3111.8
Ours w/o DiffDis91.259.067.375.1109.6
Ours w/o MetaOpt91.759.965.574.5108.4
Ours92.960.763.173.2106.6
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.516, + 0.47, + 0.609 + ], + "angle": 0, + "content": "Ablation study on the generators. There exist two generators in our framework, and each with two pair groups. In this part, we discuss the functions of proximate pairs in weak augmentor (W-PP), proximate pairs in strong aug-. mentor (S-PP), one-state gap pairs in weak augmentor (WOG), and one-state gap pairs in strong augmentor (S-OG)." + }, + { + "type": "table_caption", + "bbox": [ + 0.104, + 0.614, + 0.443, + 0.629 + ], + "angle": 0, + "content": "Table 7. Generators' ablation study on 3DHP and 3DPW" + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.632, + 0.47, + 0.71 + ], + "angle": 0, + "content": "
3DHP3DPW
MethodPCK ↑AUC ↑MPJPE ↓PA-MPJPE ↓MPJPE ↓
Ours w/o W-PP88.357.572.681.7118.8
Ours w/o S-PP90.858.271.378.1111.0
Ours w/o W-OG92.159.665.874.7108.7
Ours w/o S-OG91.458.968.275.4109.5
Ours92.960.763.173.2106.6
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.473, + 0.903 + ], + "angle": 0, + "content": "In Table 7, excluding W-PP or S-PP leads to a significant decline in PCK by \\(4.6\\%\\) and \\(2.1\\%\\), and in AUC by \\(3.2\\%\\) and \\(2.5\\%\\) respectively, accompanied by a notable increase of \\(9.5\\mathrm{mm}\\) and \\(8.2\\mathrm{mm}\\) in MPJPE separately on 3DHP. These results emphasize the critical role of maintaining similarity in proximate pairs for both weak and strong augmentors, serving as the fundamental basis for generating effective and reasonable synthesized poses. Moreover, the absence of W-OG leads to a decline in PCK and AUC by \\(0.8\\%\\) and \\(1.1\\%\\) respectively, with a corresponding \\(1.7\\mathrm{mm}\\) increase in the MPJPE on 3DHP. The removal of S-OG results in a decrease in PCK and AUC scores by \\(1.5\\%\\) and \\(1.8\\%\\) respec" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.182 + ], + "angle": 0, + "content": "tively, along with a \\(3.4\\mathrm{mm}\\) increase in MPJPE on 3DHP. These results highlight the significance of maintaining differentiation between the weak augmentor and the strong augmentor during the generation process, where enlarging dissimilarity in S-OG is more important in discriminating these two generators." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.183, + 0.893, + 0.334 + ], + "angle": 0, + "content": "Ablation study on the number of augmentors. Comparisons were conducted between our dual-augmentor framework and single-augmentor frameworks. Alongside our proposed framework, two single-augmentor frameworks were considered in the ablation study, utilizing either the weak-augmentor (WA) or the strong-augmentor (SA). The discrimination and meta-optimization processes exclusively involved source poses and one category of synthesized poses. The results, using ground truth as 2D predictions and VPose as the backbone, are presented in Table 8." + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.343, + 0.893, + 0.37 + ], + "angle": 0, + "content": "Table 8. Ablation study of number of augmentors on 3DHP and 3DPW" + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.374, + 0.892, + 0.439 + ], + "angle": 0, + "content": "
3DHP3DPW
MethodPCK ↑AUC ↑MPJPE ↓PA-MPJPE ↓MPJPE ↓
WA87.356.074.580.5117.7
SA89.857.871.079.1111.4
Ours92.960.763.173.2106.6
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.45, + 0.894, + 0.556 + ], + "angle": 0, + "content": "From Table 8, it is evident that our proposed framework surpasses both WA and SA significantly, underscoring the superiority of employing two augmentors over a single aug- mentor in addressing DG for 3D HPE. Furthermore, SA outperforms WA, emphasizing the greater significance of exploring out-of-source distributions compared to retaining source-relevant knowledge in cross-dataset tasks." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.572, + 0.62, + 0.588 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.895, + 0.853 + ], + "angle": 0, + "content": "In this paper, we propose a novel dual-augmentor framework designed to enhance domain generalization in 3D human pose estimation. Our framework addresses the critical aspects of data augmentation and the effective utilization of synthesized data. To achieve this, we implement distinctive strategies for the weak and strong generators, ensuring the preservation of source-specific information while simultaneously exploring out-of-source distributions. Moreover, we incorporate meta-optimization techniques to facilitate enhanced interaction among source data, weak-augmented data, and strong-augmented data, thereby simulating domain shifts in the training of pose estimator and fostering the acquisition of domain-invariant knowledge. Extensive experimentation and comprehensive analysis conducted across multiple datasets demonstrate the superior performance of our proposed approach over existing state-of-the-art methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Acknowledgements This material is based upon work supported by the National Science Foundation under Grant CNS-1910844." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2247" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.185 + ], + "angle": 0, + "content": "[1] Maher Baili, Philippe Wenger, and Damien Chablat. A classification of 3r orthogonal manipulators by the topology of their workspace. In IEEE International Conference on Robotics and Automation, 2004. Proceedings. ICRA'04. 2004, pages 1933-1938. IEEE, 2004. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.187, + 0.472, + 0.269 + ], + "angle": 0, + "content": "[2] Wenhao Chai, Zhongyu Jiang, Jenq-Neng Hwang, and Gaoang Wang. Global adaptation meets local generalization: Unsupervised domain adaptation for 3d human pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14655-14665, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.271, + 0.472, + 0.342 + ], + "angle": 0, + "content": "[3] Yilun Chen, Zhicheng Wang, Yuxiang Peng, Zhiqiang Zhang, Gang Yu, and Jian Sun. Cascaded pyramid network for multi-person pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7103-7112, 2018. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.343, + 0.472, + 0.426 + ], + "angle": 0, + "content": "[4] Mohsen Gholami, Bastian Wandt, Helge Rhodin, Rabab Ward, and Z Jane Wang. Adaptpose: Cross-dataset adaptation for 3d human pose estimation by learnable motion generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13075-13085, 2022. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.429, + 0.47, + 0.47 + ], + "angle": 0, + "content": "[5] Ross Girshick, Ilija Radosavovic, Georgia Gkioxari, Piotr Dólar, and Kaiming He. Detector. https://github.com/facebookresearch/detectron, 2018.7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.472, + 0.47, + 0.528 + ], + "angle": 0, + "content": "[6] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. Advances in neural information processing systems, 27, 2014. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.53, + 0.47, + 0.586 + ], + "angle": 0, + "content": "[7] Shannan Guan, Haiyan Lu, Linchao Zhu, and Gengfa Fang. Posegu: 3d human pose estimation with novel human pose generator and unbiased learning. Computer Vision and Image Understanding, 233:103715, 2023. 1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.588, + 0.472, + 0.643 + ], + "angle": 0, + "content": "[8] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. Advances in neural information processing systems, 30, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.645, + 0.472, + 0.728 + ], + "angle": 0, + "content": "[9] Vladimir Guzov, Aymen Mir, Torsten Sattler, and Gerard Pons-Moll. Human positioning system (hps): 3d human pose estimation and self-localization in large scenes from body-mounted sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4318-4329, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.73, + 0.47, + 0.786 + ], + "angle": 0, + "content": "[10] Yilei Hua, Wenhan Wu, Ce Zheng, Aidong Lu, Mengyuan Liu, Chen Chen, and Shiqian Wu. Part aware contrastive learning for self-supervised action recognition. In International Joint Conference on Artificial Intelligence, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.788, + 0.47, + 0.855 + ], + "angle": 0, + "content": "[11] Linzhi Huang, Jiahao Liang, and Weihong Deng. Dh-aug: Dh forward kinematics model driven augmentation for 3d human pose estimation. In European Conference on Computer Vision, pages 436-453. Springer, 2022. 1, 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.859, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[12] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environ-" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "ments. IEEE transactions on pattern analysis and machine intelligence, 36(7):1325-1339, 2013. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.123, + 0.892, + 0.164 + ], + "angle": 0, + "content": "[13] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.167, + 0.892, + 0.235 + ], + "angle": 0, + "content": "[14] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. Vibe: Video inference for human body pose and shape estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5253-5263, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.239, + 0.892, + 0.295 + ], + "angle": 0, + "content": "[15] Da Li, Yongxin Yang, Yi-Zhe Song, and Timothy Hospedales. Learning to generalize: Meta-learning for domain generalization. In Proceedings of the AAAI conference on artificial intelligence, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.298, + 0.892, + 0.353 + ], + "angle": 0, + "content": "[16] Haolun Li and Chi-Man Pun. Cee-net: Complementary end-to-end network for 3d human pose generation and estimation. Proceedings of the AAAI Conference on Artificial Intelligence, 37(1):1305-1313, 2023. 2, 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.355, + 0.892, + 0.436 + ], + "angle": 0, + "content": "[17] Shichao Li, Lei Ke, Kevin Pratama, Yu-Wing Tai, Chi-Keung Tang, and Kwang-Ting Cheng. Cascaded deep monocular 3d human pose estimation with evolutionary training data. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6173–6183, 2020. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.441, + 0.892, + 0.524 + ], + "angle": 0, + "content": "[18] Hanbing Liu, Jun-Yan He, Zhi-Qi Cheng, Wangmeng Xiang, Qize Yang, Wenhao Chai, Gaoang Wang, Xu Bao, Bin Luo, Yifeng Geng, et al. Posynda: Multi-hypothesis pose synthesis domain adaptation for robust 3d human pose estimation. In Proceedings of the ACM International Conference on Multimedia, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.527, + 0.892, + 0.568 + ], + "angle": 0, + "content": "[19] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.571, + 0.892, + 0.654 + ], + "angle": 0, + "content": "[20] Zhengzhi Lu, He Wang, Ziyi Chang, Guoan Yang, and Hubert P. H. Shum. Hard no-box adversarial attack on skeleton-based human action recognition with skeleton-motion-informed gradient. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4597-4606, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.657, + 0.892, + 0.713 + ], + "angle": 0, + "content": "[21] Toshihiko Matsuura and Tatsuya Harada. Domain generalization using a mixture of multiple latent domains. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 11749-11756, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.716, + 0.892, + 0.785 + ], + "angle": 0, + "content": "[22] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3d human pose estimation in the wild using improved cnn supervision. In 2017 international conference on 3D vision (3DV), pages 506-516. IEEE, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.787, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[23] Dario Pavllo, Christoph Feichtenhofer, David Grangier, and Michael Auli. 3d human pose estimation in video with temporal convolutions and semi-supervised training. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7753-7762, 2019. 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.859, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[24] Qucheng Peng. Multi-source and Source-Private Cross-Domain Learning for Visual Recognition. PhD thesis, Purdue University, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "2248" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.162 + ], + "angle": 0, + "content": "[25] Qucheng Peng, Zhengming Ding, Lingjuan Lyu, Lichao Sun, and Chen Chen. Rain: regularization on input and network for black-box domain adaptation. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence, pages 4118–4126, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.164, + 0.472, + 0.219 + ], + "angle": 0, + "content": "[26] Qucheng Peng, Ce Zheng, and Chen Chen. Source-free domain adaptive human pose estimation. In 2023 IEEE/CVF International Conference on Computer Vision (ICCV), pages 4803-4813. IEEE, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.221, + 0.472, + 0.303 + ], + "angle": 0, + "content": "[27] Ekkasit Pinyoanuntapong, Ayman Ali, Kalvik Jakkala, Pu Wang, Minwoo Lee, Qucheng Peng, Chen Chen, and Zhi Sun. Gaitsada: Self-aligned domain adaptation for mmwave gait recognition. In 2023 IEEE 20th International Conference on Mobile Ad Hoc and Smart Systems (MASS), pages 218-226. IEEE, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.472, + 0.361 + ], + "angle": 0, + "content": "[28] Fengchun Qiao, Long Zhao, and Xi Peng. Learning to learn single domain generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12556-12565, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.362, + 0.472, + 0.431 + ], + "angle": 0, + "content": "[29] Chi Su, Jianing Li, Shiliang Zhang, Junliang Xing, Wen Gao, and Qi Tian. Pose-driven deep convolutional model for person re-identification. In Proceedings of the IEEE international conference on computer vision, pages 3960–3969, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.433, + 0.472, + 0.488 + ], + "angle": 0, + "content": "[30] Adith Swaminathan and Thorsten Joachims. Counterfactual risk minimization: Learning from logged bandit feedback. In International Conference on Machine Learning, pages 814-823. PMLR, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.49, + 0.472, + 0.56 + ], + "angle": 0, + "content": "[31] Timo Von Marcard, Roberto Henschel, Michael J Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3d human pose in the wild using imus and a moving camera. In Proceedings of the European conference on computer vision (ECCV), pages 601-617, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.562, + 0.472, + 0.631 + ], + "angle": 0, + "content": "[32] Bastian Wandt and Bodo Rosenhahn. Repnet: Weakly supervised training of an adversarial reprojection network for 3d human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7782-7791, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.632, + 0.472, + 0.715 + ], + "angle": 0, + "content": "[33] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, et al. Deep high-resolution representation learning for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 43(10):3349-3364, 2020. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.717, + 0.472, + 0.787 + ], + "angle": 0, + "content": "[34] Hong Yan, Yang Liu, Yushen Wei, Zhen Li, Guanbin Li, and Liang Lin. Skeletonmae: Graph-based masked autoencoder for skeleton sequence pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5606-5618, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.788, + 0.472, + 0.858 + ], + "angle": 0, + "content": "[35] Hongwei Yi, Chun-Hao P Huang, Shashank Tripathi, Lea Hering, Justus Thies, and Michael J Black. Mime: Human-aware 3d scene generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12965-12976, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.472, + 0.902 + ], + "angle": 0, + "content": "[36] Jinlu Zhang, Zhigang Tu, Jianyu Yang, Yujin Chen, and Jun-song Yuan. Mixste: Seq2seq mixed spatio-temporal encoder for 3d human pose estimation in video. In Proceedings of" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "the IEEE/CVF conference on computer vision and pattern recognition, pages 13232-13242, 2022. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.894, + 0.19 + ], + "angle": 0, + "content": "[37] Jianfeng Zhang, Kehong Gong, Xinchao Wang, and Jiashi Feng. Learning to augment poses for 3d human pose estimation in images and videos. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(8):10012-10026, 2023. 1, 2, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.894, + 0.261 + ], + "angle": 0, + "content": "[38] Long Zhao, Xi Peng, Yu Tian, Mubbasir Kapadia, and Dimitris N Metaxas. Semantic graph convolutional networks for 3d human pose regression. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3425-3435, 2019. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.262, + 0.894, + 0.318 + ], + "angle": 0, + "content": "[39] Long Zhao, Ting Liu, Xi Peng, and Dimitris Metaxas. Maximum-entropy adversarial data augmentation for improved generalization and robustness. Advances in Neural Information Processing Systems, 33:14435–14447, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.319, + 0.894, + 0.389 + ], + "angle": 0, + "content": "[40] Qitao Zhao, Ce Zheng, Mengyuan Liu, Pichao Wang, and Chen Chen. Poseformerv2: Exploring frequency domain for efficient and robust 3d human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8877-8886, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.39, + 0.892, + 0.444 + ], + "angle": 0, + "content": "[41] Shanshan Zhao, Mingming Gong, Tongliang Liu, Huan Fu, and Dacheng Tao. Domain generalization via entropy regularization. Advances in Neural Information Processing Systems, 33:16096-16107, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.446, + 0.892, + 0.514 + ], + "angle": 0, + "content": "[42] Yuyang Zhao, Zhun Zhong, Na Zhao, Nicu Sebe, and Gim Hee Lee. Style-hallucinated dual consistency learning for domain generalized semantic segmentation. In European Conference on Computer Vision, pages 535-552. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.516, + 0.892, + 0.585 + ], + "angle": 0, + "content": "[43] Ce Zheng, Sijie Zhu, Matias Mendieta, Taojiannan Yang, Chen Chen, and Zhengming Ding. 3d human pose estimation with spatial and temporal transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11656-11665, 2021. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.587, + 0.892, + 0.643 + ], + "angle": 0, + "content": "[44] Ce Zheng, Xianpeng Liu, Guo-Jun Qi, and Chen Chen. Potter: Pooling attention transformer for efficient human mesh recovery. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.644, + 0.892, + 0.713 + ], + "angle": 0, + "content": "[45] Ce Zheng, Matias Mendieta, Taojiannan Yang, Guo-Jun Qi, and Chen Chen. Feater: An efficient network for human reconstruction via feature map-based transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.714, + 0.892, + 0.782 + ], + "angle": 0, + "content": "[46] Wentao Zhu, Xiaoxuan Ma, Zhaoyang Liu, Libin Liu, Wayne Wu, and Yizhou Wang. Motionbert: A unified perspective on learning human motion representations. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.782 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "2249" + } + ] +] \ No newline at end of file diff --git a/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/2e309a58-2e8d-4563-8890-368854bbd34f_origin.pdf b/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/2e309a58-2e8d-4563-8890-368854bbd34f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3a20cbc0639a3ddc2205d0c240ccdadb18475762 --- /dev/null +++ b/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/2e309a58-2e8d-4563-8890-368854bbd34f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:473a74407a2308bfdf38261ae37f43e40e37c37b1f0f4b0ea9ce83409ceda832 +size 2771870 diff --git a/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/full.md b/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..dfd620a11a5ba9323cbbd559a5a096266852f74b --- /dev/null +++ b/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/full.md @@ -0,0 +1,393 @@ +# A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation + +Qucheng Peng, Ce Zheng, Chen Chen + +Center for Research in Computer Vision, University of Central Florida + +{qucheng.peng,ce.zheng}@ucf.edu,chen.chen@crcv.ucf.edu + +# Abstract + +3D human pose data collected in controlled laboratory settings present challenges for pose estimators that generalize across diverse scenarios. To address this, domain generalization is employed. Current methodologies in domain generalization for 3D human pose estimation typically utilize adversarial training to generate synthetic poses for training. Nonetheless, these approaches exhibit several limitations. First, the lack of prior information about the target domain complicates the application of suitable augmentation through a single pose augmentor; affecting generalization on target domains. Moreover, adversarial training's discriminator tends to enforce similarity between source and synthesized poses, impeding the exploration of out-of-source distributions. Furthermore, the pose estimator's optimization is not exposed to domain shifts, limiting its overall generalization ability. + +To address these limitations, we propose a novel framework featuring two pose augmentors: the weak and the strong augmentors. Our framework employs differential strategies for generation and discrimination processes, facilitating the preservation of knowledge related to source poses and the exploration of out-of-source distributions without prior information about target poses. Besides, we leverage meta-optimization to simulate domain shifts in the optimization process of the pose estimator, thereby improving its generalization ability. Our proposed approach significantly outperforms existing methods, as demonstrated through comprehensive experiments on various benchmark datasets. Our code will be released at https://github.com/davidpengucf/DAF-DG. + +# 1. Introduction + +3D human pose estimation (HPE) is the process of predicting the 3D coordinates of human joints from images or videos. It serves as the foundation for various applications including person re-identification [29], action recognition + +![](images/0150271f669295bceb3aa7fe0418a4dd03c382458b5585d610ad84e6a19a6c08.jpg) +Figure 1. Comparisons between existing single-augmentor frameworks and our proposed dual-augmentor framework on a toy example. Current single-augmentor methods excel at simulating Target Domain 2 but exhibit limitations in simulating Target Domain 1, closely resembling the source, and Target Domain 3, deviating significantly from the source. In our framework, the weak aug- mentor excels in simulating Target Domain 1, while the strong augmentor effectively imitates both Target Domain 2 and 3. + +[10, 20, 34], human mesh recovery [44, 45], virtual reality [9, 35]. However, the annotated 3D data are often collected in controlled laboratory environments for convenience, featuring indoor settings and limited actions performed by few individuals. As a result, pose estimators trained on these labeled datasets face challenges in generalizing to varied in-the-wild scenarios. Hence, the notion of domain generalization (DG) is pivotal in incorporating knowledge from labeled (source) data into a pose estimator that could generalize well on unseen (target) data. Unlike domain adaptation (DA) which involves the training with target data, DG relies solely on the source data as a reference, without any prior information about the target data. + +Existing DG approaches for 3D HPE [7, 11, 37] conduct substantial augmentations on the source poses to obtain synthesized poses via adversarial training, and incorporate the synthesized poses as complementary to the source poses for HPE model training. However, these methods have several limitations. First, in the context of DG for 3D HPE, there + +is a complete lack of information about poses in target domains. If target poses closely resemble the source (as Target Domain 1 in Fig. 1), poses generated by extensive augmentation notably differ from the target, thereby impeding generalization. Conversely, when target poses significantly deviate from the source distributions (as Target Domain 3 in Fig. 1), poses generated via insufficient augmentation may not sufficiently explore out-of-source knowledge to simulate the target. Existing methods only use a single augmentor, making it challenging to simultaneously achieve both objectives. Second, the adversarial training between synthesized and source poses constrains the diversity of generation. Existing methods typically employ the generative adversarial network (GAN) [6] structure, which includes one pose generator responsible for pose generation and one discriminator to assist the pose generator by providing feedback. Specifically, the discriminator enforces similarity between synthesized and source poses, aiming to ensure that the generated poses closely resemble the source poses, which harms the exploration of out-of-source knowledge. + +To address these limitations, we propose a novel framework featuring two pose augmentors: the weak augmentor and the strong augmentor. The weak augmentor is designed to simulate target poses closely resembling source poses, while the strong augmentor generates target poses that exhibit significant deviations from source distributions. To delineate their characteristics, differential strategies are employed for generation and discrimination processes, as detailed in Sec. 3.3. Notably, our framework alleviates the constraints on strong-augmented poses by traditional adversarial training methods. Instead of enforcing similarity between the source and all the augmented poses, we utilize weak-augmented poses as an intermediary, enabling discrimination between strong- and weak-augmented poses and facilitating discrimination between source and weak-augmented poses. To optimize the utilization of synthesized poses, we introduce meta-optimization among source, weak-augmented, and strong-augmented poses, as elaborated in Sec. 3.4. Our training process exposes the pose estimator to domain shifts during the optimization processes, thereby enhancing its adaptability to handle domain shifts during the inference stage. Our contributions can be summarized in three main aspects: + +- We propose a novel framework featuring both the weak and strong pose augmentors, which effectively preserves knowledge related to source poses while simultaneously exploring out-of-source distributions through differential strategies for the generation and discrimination processes of the two augmentors. +- We introduce meta-optimization to enhance the utilization of synthesized poses. By simulating domain shifts among source, weak-augmented, and strong-augmented poses during the optimization processes, the pose estima + +tor's generalization ability is further improved. + +- We conduct comprehensive experiments on several benchmark datasets, and the results demonstrate that our approach significantly outperforms state-of-the-art methods by a considerable margin. + +# 2. Related Work + +3D Human Pose Estimation. The widely adopted two-stage technique in 3D HPE, as demonstrated in [23, 36, 38, 40, 43, 46], initially employs 2D human pose estimators [17, 26, 27, 33] for 2D pose predictions, followed by lifting these predictions to 3D poses. Among these approaches, [38] integrates graph-structured semantic information to enhance the estimation process, while [23] utilizes dilated temporal convolutional layers for temporal information encoding, and [43] presents a purely transformer-based 3D approach. Moreover, [36] effectively models inter-frame correspondences with a mixed sequence-to-sequence encoder, and recent works such as [40] explore the frequency domain to improve inference efficiency, and [46] employs unified representation learning for 3D human poses. + +Domain Generalization. Current DG methods aim to learn domain-invariant representations and are categorized into three types: domain alignment [21, 41], meta-learning [15, 28], and augmentation strategies [24, 25, 39, 42]. For domain alignment, [41] enhances the conditional invariance of learned features by incorporating an entropy regularization term, leading to improved classifier generalization. [21] iteratively segregates samples into latent domains through clustering. Concerning meta-learning, [15] proposes a model-agnostic training procedure that simulates domain shift during training, whereas [28] applies meta-learning to single-domain generalization. Regarding augmentation strategies, [39] introduces a novel regularization term for adversarial data augmentation derived from the information bottleneck principle, while [42] presents a unique style hallucination module to generate style-diversified samples crucial for generalization. + +Differing from the current DG approaches for 3D HPE that focus solely on augmentations, we also incorporate meta-learning-based approaches to enhance generalization. Cross-domain Learning for 3D Human Pose Estimation. + +Cross-domain learning for 3D HPE is categorized into two types: domain generalization [7, 11, 16, 37] and domain adaptation [2, 4, 18]. In domain generalization, training processes exclusively utilizes source data, and the resulting model is directly applied to infer target data. [37] adjusts various geometry factors of human poses through differentiable operations. [11] applies DH Forward Kinematics [1] to drive 3D pose augmentation and obtain diverse poses. [7] incorporates Counterfactual Risk Minimization [30] to achieve unbiased learning. [16] addresses with network designs like the interpolation sub-net and body-parts grouping + +net. In domain adaptation, labeled source data and unlabeled target data are used simultaneously during the training process. [4] employs generative adversarial network [6] to discriminate between source and target during training. [2] utilizes global position alignment and local pose augmentation to transfer from source to target. [18] employs a multi-hypotheses network along with target-specific source augmentation for the problem. + +In this paper, we focus on domain generalization for 3D HPE. In addition to synthesizing novel 3D poses for better generalization, we also simulate domain shifts by using both source and synthesized poses during optimizations. + +# 3. Methodology + +# 3.1. Preliminaries + +2D-to-3D lifting Paradigm of 3D HPE. Current 2D-to-3D lifting paradigm of 3D HPE [23, 36, 38, 43] assumes that $x_{i}^{s} \in \mathbb{R}^{J \times 2}$ represents the 2D coordinates of $J$ keypoints of a sample in the source domain (2D poses as input), and $y_{i}^{s} \in \mathbb{R}^{J \times 3}$ represents the corresponding 3D positions in the camera coordinate system (3D poses as output), we denote the source domain with $N$ samples as $S = \{(x_{i}^{s}, y_{i}^{s})\}_{i=1}^{N}$ , encompassing $N$ 2D-3D pairs. Moreover, we define the pose estimator as $\mathcal{P}: x_{i}^{s} \mapsto \hat{y}_{i}^{s}$ , where $\hat{y}_{i}^{s}$ represents the predicted corresponding 3D pose positions. For a fully supervised human pose estimation problem, we aim to achieve an ideal $\mathcal{P}$ by solving the following optimization objective: + +$$ +\min _ {\mathcal {P}} \mathbb {E} _ {\left(x _ {i} ^ {s}, y _ {i} ^ {s}\right) \in \mathcal {S}} \mathcal {L} _ {M S E} \left(\mathcal {P} \left(x _ {i} ^ {s}\right), y _ {i} ^ {s}\right), \tag {1} +$$ + +where $\mathcal{L}_{MSE}$ represents the Mean Squared Error (MSE) loss. However, the objective [23, 43] is designed to achieve optimal performance on source poses, rendering it inadequate for addressing the DG problem, as it does not account for the domain gap between source and target domains. + +DG for 3D HPE. Within the paradigm of 2D-to-3D lifting HPE, our primary goal is to derive an estimator $\mathcal{P}$ that demonstrates commendable 3D HPE performance specifically within the target domain $T$ . Under this scenario, the target domain $T = \{(x_j^t,y_j^t)\}_{j = 1}^M$ with $M$ samples can only be used for inference and is not involved in the training process. However, when utilizing solely the original source domain, the pose estimator cannot learn out-of-source distributions, which is essential for achieving good performance on the target domain. Existing methods [7, 11, 16, 37] tend to conduct augmentation to the original source poses to enhance data diversity, thereby improving the estimator's generalization ability. The augmentor is denoted as $\mathcal{A}:y_i^s\mapsto y_i^a$ , while the projection from 3D to 2D via camera parameters (completely known) is defined as $\mathcal{R}:y_i^a\mapsto x_i^a$ . Consequently, the min-max optimization objective for domain + +generalization can be defined as follows: + +$$ +\begin{array}{l} \min _ {\mathcal {P}} \max _ {\mathcal {A}} \mathbb {E} _ {\left(x _ {i} ^ {s}, y _ {i} ^ {s}\right) \in \mathcal {S}} \left[ \mathcal {L} _ {M S E} \left(\mathcal {P} \left(x _ {i} ^ {s}\right), y _ {i} ^ {s}\right) \right. \tag {2} \\ + \mathcal {L} _ {M S E} (\mathcal {P} (\mathcal {R} (\mathcal {A} (y _ {i} ^ {s}))), \mathcal {A} (y _ {i} ^ {s})) ]. \\ \end{array} +$$ + +The objective is a min-max game between the pose augmentor $\mathcal{A}$ and the pose estimator $\mathcal{P}$ , encouraging the estimator $\mathcal{P}$ to learn out-of-source distributions, while conducting augmentations to a significant extent is beneficial to generate more diverse samples, and that is why the loss is minimized with respect to the augmentor $\mathcal{P}$ and maximized with respect to the augmentor $\mathcal{A}$ in the optimization. + +# 3.2. Overview of the Proposed Method + +Existing methods [7, 11, 37] often apply intricate augmentations to the original poses in the source domain, relying on the discrimination between augmented poses and source poses simultaneously. However, this kind of approach raises two concerns. First, as this is a DG problem for 3D HPE, any information about the target domain is entirely unknown. If the target domain bears a striking resemblance to the source domain (like Target Domain 1 in Fig. 1), poses generated by extensive augmentation might hinder the pose estimator's inference on it. Conversely, in cases where the target domain significantly diverges from the source distributions (like Target Domain 3 in Fig. 1), poses generated by insufficient augmentation may fail to adequately explore out-of-source knowledge for the pose estimator. Second, when target domain is distant from the source and needs significant augmentations, the adversarial training between source and synthesized poses limits the diversity of generated poses. Specifically, the discriminator enforces similarity between source and synthesized poses, thereby causing the synthesized poses to remain similar to the source poses. + +To tackle these concerns, we propose a novel dual-augmentor framework depicted in Fig. 2. This framework involves two augmentors that generate weak- and strong-augmented poses, enabling the handling of diverse unknown target domains. Additionally, the weak-augmentation module serves as a bridge between strong-augmented and source poses. Specifically, the discrimination between source poses and weak-augmented poses is utilized to update the weak augmentor, while the discrimination between weak- and strong-augmented poses is employed to optimize the strong augmentor. This approach liberates the strong augmentor from heavy reliance on the source domain and enables the exploration of more out-of-source knowledge. Further details regarding the pose augmentation process can be found in Sec. 3.3. + +Having elucidated the methodology for synthesizing poses, the subsequent discourse pivots towards the utilization of these synthesized poses. Previous works [7, 11, 37] overlook the interactions between source poses and aug- + +![](images/f00b92fd08cb0341489c9c94db5f4262d93765d824e775da0c537f54469b9e82.jpg) +Figure 2. Overall framework of our dual-augmentor method. Initially, the original pose undergoes processing through two pose augmentors, resulting in weak- and strong-augmented poses (See Sec. 3.3). The weak augmentor simulates target domains similar to the source domain, while the strong augmentor emulates target domains that deviate significantly from the source distributions. Subsequently, the original pose and the two augmented poses are input to the pose estimator for further meta-optimization (See Sec. 3.4). + +mented poses, dealing with the optimizations of pose estimator on them separately. In contrast, we propose a model-agnostic meta optimization approach that enhances the interactions between source poses and the two types of augmented poses to simulate domain shifts in the optimization and leverage domain-invariant knowledge while maintaining the original 2D-to-3D lifting backbone's structure unchanged. Further details concerning the meta optimization process can be found in Sec. 3.4. + +# 3.3. Pose Augmentation + +![](images/4cefbeab32a0f16b34b0639ebe8ca41db3974403f048d0dcb3a9b02252fdb16f.jpg) +Figure 3. The differentiation of the weak and strong generators. Within each pipeline, denoted as "W-" for weak ones and "S-" for strong ones, there exist four pose states: original (OR), after bone angle operation (BA), after bone length operation (BL) and after rotation and translation operation (RT). For proximate states, similarities are enhanced for both generators. When there is a one-state gap between states, the weak generator continues to enhance similarities, whereas the strong generator enlarges dissimilarities. + +The pose augmentation architecture comprises two pose augmentors: the weak augmentor and the strong augmentor. Each augmentor comprises two components: the generator, responsible for producing diverse synthesized poses to facilitate the training of the pose estimator, and the discriminator, which collaborates with the generator to regu + +late the quality of the generated poses. Our objective is to apply differential strategies to the generation (named differential generation) and discrimination (named differential discrimination) of the two augmentors, enabling them to generate weak- and strong-augmented poses. + +Differential Generation: The generation process, as illustrated in Figure 3, consists of three modules in sequence for each generator pipeline: the Bone Angle Generator, Bone Length Generator, and Rotation and Translation Generator, resulting in four statuses in the pipeline: original (OR), after the bone angle operation (BA), after the bone length operation (BL), and after the rotation and translation operation (RT). Existing approaches such as [16, 37] typically treat the entire generation pipeline in an end-to-end manner and only utilize the $(OR, RT)$ pair. In contrast, our method deals with the generation in a more fine-grained fashion. We group statuses into pairs based on their relations: proximate pairs as $PP = \{(OR, BA), (BA, BL), (BL, RT)\}$ , and one-state gap pairs as $OG = \{(OR, BL), (BA, RT)\}$ . + +To begin, we define the measurement of similarity within a pair. Solely relying on conventional absolute position losses, such as the MSE loss, is not adequate in this context for two reasons. First, the three modules within the generator all perform operations on the level of bone vector, not on the joint positions. If one joint undergoes significant position changes after an operation, other joints connected to it will also experience considerable movement, even if the bone vector between them remains stable. In such cases, position-based measurements cannot fully reflect the extent of augmentation based on the bone vector. Second, human poses possess kinematic attributes, and a position-based measurement overlooks the graphical information. Therefore, we introduce the Laplacian weighted similarity measurement. For the human model, it is straightforward to obtain degree matrix $D$ and adjacency matrix $A$ , and the normalized Laplacian matrix can be represented as: + +$$ +W _ {N L} = I - D ^ {- \frac {1}{2}} A D ^ {- \frac {1}{2}}, \tag {3} +$$ + +where $I$ is the identity matrix, and $W_{NL}$ is the normalized Laplacian matrix encoding graphical information. Given a pair of statuses $(st_1, st_2)$ (from either PP or OG), the similarity measurement is defined as: + +$$ +\mathcal {L} _ {s i m} \left(s t _ {1}, s t _ {2}\right) = \underbrace {\| s t _ {1} - s t _ {2} \|} _ {\text {M S E S i m i l a r i t y}} + \underbrace {\| W _ {N L} s t _ {1} - W _ {N L} s t _ {2} \|} _ {\text {L a p l a c i a n W e i g h t e d S i m i l a r i t y}}. \tag {4} +$$ + +To differentiate between the two generators, we apply distinctive strategies. For the weak generator, we enhance similarities for its PP and OG sets to maintain a slight level of augmentation in the synthesized poses, as indicated by: + +$$ +\begin{array}{l} \mathcal {L} _ {w g} = \underset {(s t _ {1}, s t _ {2}) \in P P} {\mathbb {E}} \mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}) \\ + \alpha_ {1} \underset {(s t _ {1}, s t _ {2}) \in O G} {\mathbb {E}} \mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}). \\ \end{array} +$$ + +For the strong generator, we enhance similarities within its PP set to ensure the reasonableness of the synthesized output, while enlarging dissimilarities within its OG sets to maintain a significant level of augmentation, expressed as: + +$$ +\begin{array}{l} \mathcal {L} _ {s g} = \underset {(s t _ {1}, s t _ {2}) \in P P} {\mathbb {E}} \mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}) \\ - \alpha_ {2} \underset {(s t _ {1}, s t _ {2}) \in O G} {\mathbb {E}} \mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}). \\ \end{array} +$$ + +$\alpha_{1}$ and $\alpha_{2}$ are trade-off parameters. + +Differential Discrimination: The discrimination process comprises two min-max games [6, 8]: one between the source pose and the weak-augmented poses, and the other between the weak-augmented pose and the strong-augmented pose. We adopt the WGAN-GP [8] structure here. The discrimination losses regarding the source poses $y^{or}$ , weak-augmented poses $y^{wa}$ , and strong-augmented poses $y^{sa}$ are defined as follows: + +$$ +\begin{array}{l} \mathcal {L} _ {w d} = \mathbb {E} \left[ D _ {w a} \left(y ^ {o r}\right) \right] - \mathbb {E} \left[ D _ {w a} \left(y ^ {w a}\right) \right] \tag {7} \\ + \beta_ {1} \mathbb {E} (1 - \| \nabla_ {\hat {y} ^ {w a}} D _ {w a} (\hat {y} ^ {w a}) \|), \\ \end{array} +$$ + +$$ +\begin{array}{r l} \mathcal {L} _ {s d} = & \mathbb {E} [ D _ {s a} (y ^ {w a}) ] - \mathbb {E} [ D _ {s a} (y ^ {s a}) ] \\ & + \beta_ {2} \mathbb {E} (1 - \| \nabla_ {\hat {y} ^ {s a}} D _ {s a} (\hat {y} ^ {s a}) \|). \end{array} \tag {8} +$$ + +Here, $\mathcal{L}_{wd}$ is the discrimination loss between $y^{or}$ and $y^{wa}$ , used to update the weak augmentor, and $D_{wa}$ is the weak discriminator. $\mathcal{L}_{sd}$ is the discrimination loss between $y^{wa}$ and $y^{sa}$ , used to update the strong augmentor, and $D_{sa}$ is the strong discriminator. $\hat{y}^{wa}$ and $\hat{y}^{sa}$ are built via interpolation, such that $\hat{y}^{wa} = \epsilon y^{or} + (1 - \epsilon)y^{wa}$ and $\hat{y}^{sa} = \epsilon y^{wa} + (1 - \epsilon)y^{sa}$ , where $\epsilon$ is randomly drawn from $U[0,1]$ . $\beta_{1}$ and $\beta_{2}$ are trade-off parameters. + +By implementing this discrimination process in two min-max games, the weak augmentor is capable of retaining more source information and alleviating adverse effects + +stemming from irrationally synthesized poses. Simultaneously, the strong augmentor can overcome a strong dependency on the source distributions, and explore out-of-source distributions more effectively. With diverse synthesized poses to simulate potential target poses, it is beneficial for further domain generalization in pose estimation. + +# 3.4. Meta Optimization + +# Algorithm 1 Meta Optimization Pseudo Code + +Input: Original source 2D-3D pose pairs $(x^{or},y^{or})$ ; Weak Generator $G_{wa}$ ; Strong Generator $G_{sa}$ + +Init: Pose estimator $\mathcal{P}_t$ , Learning rates $lr_1$ and $lr_2$ , inner loop iteration $k$ , Hyperparameter $\gamma$ + +Output: Updated pose estimator $\mathcal{P}_{t + 2}$ after two-step meta optimization + +1: Generate weak-augmented 3D poses $y^{wa} = G_{wa}(y^{or})$ +2: Project $y^{wa}$ to 2D poses $x^{wa}$ with camera parameters +3: // Meta-train on Source data: +4: Update $\mathcal{P}_t' = \mathcal{P}_t - lr_1\nabla \mathcal{L}_{MSE}(\mathcal{P}_t(x^{or}),y^{or})$ +5: for $i\gets 1,\dots,k$ do + +6: // Meta-test on Weak-augmented data: +7: $\mathcal{L}_{weak - test} = L_{MSE}(\mathcal{P}_t'(x^w)$ +8: end for +9: // Meta update on Source and Weak-augmented data: +10: $\mathcal{L}_{t + 1} = \mathcal{L}_{MSE}(\mathcal{P}_t(x^{or}),y^{or}) + \gamma L_{weak - test}$ +11: $\mathcal{P}_{t + 1} = \mathcal{P}_t - lr_2(\partial \mathcal{L}_{t + 1} / \partial \mathcal{P}_t)$ +12: Generate strong-augmented 3D poses $y^{sa} = G_{sa}(y^{or})$ +13: Project $y^{sa}$ to 2D poses $x^{sa}$ with camera parameters +14: // Meta-train on Weak-augmented data: +15: Update $\mathcal{P}_{t + 1}^{\prime} = \mathcal{P}_{t + 1} - lr_{1}\nabla \mathcal{L}_{MSE}(\mathcal{P}_{t + 1}(x^{wa}),y^{wa})$ +16: for $i\gets 1,\dots,k$ do +17: // Meta-test on Strong-augmented data: +18: $\mathcal{L}_{\text {strong-test }} = L_{MSE}\left(\mathcal{P}_{t+1}^{\prime}\left(x^{sa}\right), y^{sa}\right)$ +19: end for +20: // Meta update on Weak- and Strong-augmented data: +21: $\mathcal{L}_{t + 2} = \mathcal{L}_{MSE}\big(\mathcal{P}_{t + 1}(x^{wa}),y^{wa}\big) + \gamma L_{strong - test}$ +22: $\mathcal{P}_{t + 2} = \mathcal{P}_{t + 1} - lr_2(\partial \mathcal{L}_{t + 2} / \partial \mathcal{P}_{t + 1})$ + +For DG problem in 3D HPE, two principal challenges must be addressed. First, there is the issue of synthesizing data, as detailed in Section 3.3. The second challenge revolves around the effective utilization of synthesized data, a facet often overlooked by current methodologies. Existing DG approaches for 3D HPE [11, 37], conduct the optimization of the pose estimator based on source and synthesized data independently. Unfortunately, this approach lacks mechanisms for fostering interactions between these two optimization processes, resulting in a deficiency of simulated domain shifts in the optimization trajectory. + +In contrast, our proposed model-agnostic strategy incorporates meta-optimization to bolster interactions among source poses, weak-augmented poses, and strong-augmented poses. This process facilitates the learning of + +domain-invariant knowledge during the update of the pose estimator, as outlined in Algorithm 1. The effectiveness of this approach lies in the fact that the objectives in meta-optimization not only aim to minimize losses on source and synthesized poses but also enhance the alignment of optimization directions during training, thus enhancing generalization significantly. + +The algorithm can be dissected into two parts: Lines 1-11 manage interactions between source poses and weakly-augmented poses, while Lines 12-22 address interactions between weak- and strong-augmented poses. This step-by-step approach is taken due to the substantial domain gap between source poses and strong-augmented poses. Weak-augmented poses serve as a bridge between source poses and strong-augmented poses, alleviating the challenge of directly aligning source and strong-augmented data. By incorporating all three types of poses in the optimization, the pose estimator can effectively generalize across diverse target domains, avoiding overfitting specific pose data types. + +# 4. Experiments + +# 4.1. Datasets and Metrics + +In this paper, we evaluate our approach using several widely-used 3D human pose benchmarks, including Human3.6M [12], MPI-INF-3DHP [22], and 3DPW [31]. Moreover, following previous works [11, 16, 37], we adopt the 16-keypoint human model with Hip joint as the origin. + +Human3.6M is an indoor dataset comprising 3.6 million frames and consisting of 7 subjects denoted as S1, S5, S6, S7, S8, S9, and S11. For the cross-dataset evaluation in Tab. 1, Tab. 2 and Tab. 5, we use S1, S5, S6, S7, S8 as the source domain. In the cross-scenario evaluation on the entire Human3.6M dataset in Tab. 3, S1, S5, S6, S7, S8 are the source domain, while S9 and S11 are the target domain. Mean Per Joint Position Error (MPJPE) and Procrustes-Aligned Mean Per Joint Position Error (PA-MPJPE) are employed as evaluation metrics. For the cross-scenario evaluation on partial Human3.6M, we follow previous works [4, 11, 16] to define two tasks as shown in Tab. 4. One task uses S1 as the source and S5, S6, S7, S8 as the target, while the other task uses S1, S5 as the source and S6, S7, S8 as the target. Both tasks utilize MPJPE as the metric. + +MPI-INF-3DHP (3DHP) is an in-the-wild dataset, and we utilize only its test set for cross-dataset evaluation, as shown in Tab. 1 and Tab. 5, which consists of approximately 3k frames. The results are presented based on three metrics: Percentage of Correct Keypoints (PCK), Area Under the Curve (AUC), and MPJPE. + +3DPW is another in-the-wild dataset featuring more challenging poses and scenes. We utilize it for cross-dataset evaluation, as shown in Tab. 2. Here PA-MPJPE and MPJPE serve as the evaluation metrics. + +# 4.2. Implementation Details + +For all the generators and discriminators, we ensure consistency by employing the same fully-connected layers, aligning with the methodology described in [37]. In the data augmentation process, the learning rate is set to 1e-4 for generators and 2e-4 for discriminators. We set $\alpha_{1}$ and $\alpha_{2}$ to 0.50 and 0.35, respectively, while both $\beta_{1}$ and $\beta_{2}$ are assigned a value of 4. During the meta optimization process, we utilize a learning rate of 1e-4 for $lr_{1}$ and 5e-4 for $lr_{2}$ . The trade-off parameter $\gamma$ and the inner loop iteration $k$ are both set to 1. + +Moreover, we employ the Adam optimizer [13] for data augmentation and the AdamW optimizer [19] for meta optimization. Our experiments are conducted with a batch size of 1024 over 60 epochs. We initialize the pose estimator with a warm-up phase lasting two epochs for supervised learning on source data. From the third epoch onwards, data augmentation and meta-optimization begin. + +# 4.3. Quantitative Results + +Table 1. Cross-dataset evaluation on 3DHP dataset. + +
MethodVenueDGPCK ↑AUC ↑MPJPE ↓
VPose (1-frame) [23]CVPR'19×80.942.5102.3
EvoSkeleton [17]CVPR'2081.246.199.7
RepNet [32]CVPR'1981.854.892.5
PoseAug [37]TPAMI'2388.657.373.0
DH-AUG [11]ECCV'2289.557.971.2
PoseGU [7]CVIU'2386.357.275.0
CEE-Net [16]AAAI'2389.958.269.7
Ours92.960.763.1
+ +Table 2. Cross-dataset evaluation on 3DPW dataset. + +
MethodVenueDGPA-MPJPE ↓MPJPE ↓
VPose (1-frame) [23]CVPR'19×94.6125.7
VIBE [14]CVPR'2082.3122.5
PoseAug [37]TPAMI'2381.6119.0
DH-AUG [11]ECCV'2279.3112.8
PoseGU [7]CVIU'2392.3-
CEE-Net [16]AAAI'2376.8-
Ours73.2106.6
+ +Table 3. Cross-scenario evaluation on Entire Human3.6M dataset. S1,S5,S6,S7,S8 are the source and S9,S11 are the target. + +
MethodVenueDGMPJPE ↓PA-MPJPE ↓
VPose (1-frame) [23]CVPR'19×52.740.9
EvoSkeleton [17]CVPR'2050.938.0
PoseAug [37]TPAMI'2350.239.1
DH-AUG [11]ECCV'2249.838.3
CEE-Net [16]AAAI'2347.336.8
Ours44.434.6
+ +Cross-dataset evaluation results. In cross-dataset evaluations, source and target come from different datasets. Following identical paradigms from existing methods [11, 16, 37], we use ground truth 2D keypoints as input, single-frame VPose [23] as the lifting backbone, and Human3.6M as the source dataset. Our method demonstrates notable performance in all metrics, as presented in Tab. 1 and Tab. 2. Notably, our approach outperforms CEE-Net by $3.0\%$ in + +![](images/69ed370ea71a3814b8e029e508885cc2818fbde7e35f9e1e43c2be4f70f5de50.jpg) +2D Predictions + +![](images/f5f923dfd5ef95f1f23cf09a13d974c2d7a2b89a289c2917d8ea3f1995f80b11.jpg) +Source-only + +![](images/f12b9d6f32a368726f6e917db019c899ba069f5e168c5c5db8e2e348a7e5ce29.jpg) +y PoseAug + +![](images/a867fb9336b76f49bdef0eb9f5ca4a98ce74d67ff5d93e77159c51affc0563ee.jpg) +Aug DH-AUG + +![](images/4b796d4bc4f48b9aa7f01a990953389b7021f4efef90a74237f469707e67bcd1.jpg) +Ours + +![](images/b5b53cd399848bed410593791d913ddb58cea6fa53e9750ba6f16fbec964d5d8.jpg) +GT + +![](images/8b67c42b1404be43128773873db50b8941e0ca47c9d424a40e31c08fc054edcb.jpg) +2D Predictions + +![](images/22c84e246d36d9a940072f10243dca940db9f1f00be9fa00018446e74b2b8a20.jpg) +Source-only + +![](images/729ee476b5eaeb79e8f0e1da7401f939710b23069a4c3826c52b4d218bf50b34.jpg) +Aug. +DH-AUG + +![](images/1d3973dd2e03defbb681c3d390c711fc12ae79f2d36671cf16ce39050226e77f.jpg) + +![](images/1aa113f5f3e1dd54d0f99b0fdc3faa9b290f192a48b343006994985d37504da9.jpg) +Ours + +![](images/49fc8a54aee6614591d8d3927daa65202e3b95baec84945c5bcabcb2a82d3e1b.jpg) +GT + +![](images/931d87359b051ce269c7c90069438e97dfe52456d9bf7de8e22e003dbe54d4c8.jpg) +2D Predictions +Figure 5. Results on Cross-scenario evaluation. Left is for task S1,S5,S6,S7,S8 $\rightarrow$ S9,S11, and right is for task S1,S5 $\rightarrow$ S6,S7,S8. + +![](images/9a6564c1f032822faa2f16bf41617166d653917e6cbb2ad5d22335fe9bd90e92.jpg) +Figure 4. Qualitative results on Cross-dataset evaluation. Left is 3DHP dataset, and right is 3DPW dataset. +Source-only + +![](images/6d44c3cf69ab1ed91effee01703bd7a3e01fd67d63ac9b736b05c007116f522f.jpg) +PoseAug + +![](images/15bd7399286acf4524b3604d612a55b22f5b1371bea43ed91d58fd6c2ad0a3d0.jpg) +DH-AUG + +![](images/33bb676595dd8358d94901149c89c4f6c70f0dfdb0131e37e72c9e87baa66d28.jpg) +Ours + +![](images/a86d6f99b3618f6a7280d3e64c4f6c46e67f2d9b7c8db81fcd43e88c767393fd.jpg) +GT + +![](images/358d4cb812d51a63539bbf1cd1fa79b57dc51c312f1f09705d42ad775845f3b2.jpg) +2D Predictions + +![](images/a8ff924ae78809ef520009d05e5aba4bec4bfeb7730d1ee1c64eab2eb123eda5.jpg) +Source-only + +![](images/6d2def2010f88a8f77d2255e89fda3a198eb72c03ee0c328cefb70132fb2411d.jpg) +PoseAug + +![](images/cb116c9311744f80b515f493986436d19d132f590fde21a99a0305eb7c1db38c.jpg) +DH-AUG + +![](images/87595cde366a5d8aacd989384ae4920bfa2b2d0c6be7a4ffc4d3f96f37c7b884.jpg) +Ours + +![](images/c0997b28caa17fd803835075fb7fcf80e79ab248f9dfcb280ee449c981dd6edf.jpg) +GT + +Table 4. Cross-scenario evaluation on Partial Human3.6M dataset. For the task "S1", S1 is the source and S5, S6, S7, S8 are the target. For the task "S1+S5", S1 and S5 are the source, and S6, S7, S8 are the target. MPJPE $(\downarrow)$ is used for evaluation. + +
MethodVenueDGS1S1+S5
VPose (1-frame) [23]CVPR'19×65.257.9
EvoSkeleton [17]CVPR'2061.554.6
PoseAug [37]TPAMI'2356.751.3
DH-AUG [11]ECCV'2252.247.0
CEE-Net [16]AAAI'2351.946.7
Ours50.345.4
+ +Table 5. Cross-dataset evaluation with MPJPE (↓) on 3DHP with varied 2D predictions and 2D-to-3D backbones (1-frame). + +
MethodDGDET [5]CPN [3]HR [33]GT
SemGCN [38]×101.998.795.697.4
SemGCN + PoseAug [37]89.989.389.186.1
SemGCN + CEE-generator [16]83.682.882.481.3
SemGCN + DH-AUG [11]79.776.773.071.3
SemGCN + Ours76.574.170.768.9
VPose [23]×92.689.885.686.6
VPose + PoseAug [37]78.378.473.273.0
VPose + CEE-generator [16]75.675.271.271.4
VPose + DH-AUG [11]76.774.871.171.2
VPose + Ours72.470.962.463.1
PoseFormer [43]×91.989.284.285.7
PoseFormer + PoseAug [37]77.777.572.172.3
PoseFormer + CEE-generator [16]----
PoseFormer + DH-AUG [11]75.674.871.672.0
PoseFormer + Ours72.270.562.863.4
MixSTE [36]×90.687.482.084.0
MixSTE + PoseAug [37]76.176.371.771.6
MixSTE + CEE-generator [16]----
MixSTE + DH-AUG [11]74.874.470.970.7
MixSTE + Ours70.568.260.461.0
+ +PCK and $2.5\%$ in AUC, and reduces MPJPE by $6.6\mathrm{mm}$ in the 3DHP task. In the case of 3DPW, our method shows an improvement of $3.6\mathrm{mm}$ in PA-MPJPE compared to CEE-Net [16]. While CEE-Net [16] and PoseGU [7] do not disclose their codes or report their results on MPJPE, it is evident that our method surpasses DH-AUG [11] by $6.2\mathrm{mm}$ . + +Cross-scenario evaluation results. In cross-scenario evaluations, source and target come from different subsets of the same dataset. Maintaining consistency with previous works, we utilize ground truth 2D keypoints as input and single-frame VPose [23] as the 2D-to-3D lifting network. + +For the situation of using Entire Human3.6M in Tab. 3, our method demonstrates superior performance compared to CEE-Net [16] with a $2.9\mathrm{mm}$ reduction in MPJPE and a $2.2\mathrm{mm}$ improvement in PA-MPJPE. In the case of using partial Human3.6M in Tab. 4, our approach surpasses CEE-Net [16] by $1.6\mathrm{mm}$ in the S1 task and $1.3\mathrm{mm}$ in the S1+S5 task based on the MPJPE metric. + +Results with varied 2D predictions and backbones. The results presented in Tables 1 to 4 are confined to the usage of ground truth 2D input and single-frame VPose backbone, which may raise concerns about the universality of the proposed method. To address this concern, we assess the performance of our approach with various 2D predictions such as DET [5], CPN [3], HRNet [33], and diverse lifting backbones including SemGCN [38], PoseFormer [43], and MixSTE [36], as displayed in Table 5. In this evaluation, 3DHP serves as the dataset, and MPJPE is the metric used. Notably, all the listed backbones are single-frame versions. As CEE-Net [16] only provides results for its generation part, CEE-generator, and does not offer open-source code, we have included partial results of CEE-generator. From Table 5, it is evident that our method surpasses all the existing methods, demonstrating the robustness of our framework across various settings. + +# 4.4. Qualitative Results + +Fig. 4 shows qualitative results on cross-dataset evaluation (3DHP on the left side and 3DPW on the right side), while Fig. 5 displays qualitative results on cross-scenario evaluation (S1,S5,S6,S7,S8 $\rightarrow$ S9,S11 on the left side and S1,S5 $\rightarrow$ S6,S7,S8 on the right side). HRNet [33] is applied as the 2D pose estimator and VPose [23] is the 2D-to-3D lifting backbone. We use Source-only, PoseAug [37], DH-AUG [11], Ours, and Ground Truth (GT) for qualitative comparison. Because CEE-Net does not provide source codes or pretrained models, we cannot generate visual examples from it. It is evident that our method outperforms other baselines significantly. + +# 4.5. Ablation Study + +Ablation study on the overall framework. The ablation study is conducted to illustrate the functions of three proposed modules: differential generation (DiffGen) in Sec. 3.3, differential discrimination (DiffDis) in Sec. 3.3, and meta optimization (MetaOpt) in Sec. 3.4. In Tab. 6, we apply ground truth as 2D predictions and VPose as the backbone. The absence of DiffGen leads to a decrease in PCK and AUC by $2.4\%$ and $2.1\%$ respectively, accompanied by a $5.9\mathrm{mm}$ increase in MPJPE on 3DHP, while it increases PA-MPJPE and MPJPE by $3.1\mathrm{mm}$ and $5.2\mathrm{mm}$ separately on 3DPW. Similarly, the exclusion of DiffDis results in a decrease of $1.7\%$ in both PCK and AUC, with a corresponding $4.1\mathrm{mm}$ increase in MPJPE on 3DHP. As for 3DPW, the removal causes a degradation of $1.9\mathrm{mm}$ in PA-MPJPE and $3.0\mathrm{mm}$ in MPJPE. Besides, the removal of MetaOpt leads to a decline in PCK and AUC by $1.2\%$ and $0.8\%$ respectively, along with a $2.4\mathrm{mm}$ increase in MPJPE on 3DHP, and an increase of $1.3\mathrm{mm}$ and $1.8\mathrm{mm}$ in PA-MPJPE and MPJPE separately. These results show that each module plays a critical role in obtaining better generalization. + +Table 6. Overall framework ablation study on 3DHP and 3DPW + +
3DHP3DPW
MethodPCK ↑AUC ↑MPJPE ↓PA-MPJPE ↓MPJPE ↓
Ours w/o DiffGen90.558.669.076.3111.8
Ours w/o DiffDis91.259.067.375.1109.6
Ours w/o MetaOpt91.759.965.574.5108.4
Ours92.960.763.173.2106.6
+ +Ablation study on the generators. There exist two generators in our framework, and each with two pair groups. In this part, we discuss the functions of proximate pairs in weak augmentor (W-PP), proximate pairs in strong aug-. mentor (S-PP), one-state gap pairs in weak augmentor (WOG), and one-state gap pairs in strong augmentor (S-OG). + +Table 7. Generators' ablation study on 3DHP and 3DPW + +
3DHP3DPW
MethodPCK ↑AUC ↑MPJPE ↓PA-MPJPE ↓MPJPE ↓
Ours w/o W-PP88.357.572.681.7118.8
Ours w/o S-PP90.858.271.378.1111.0
Ours w/o W-OG92.159.665.874.7108.7
Ours w/o S-OG91.458.968.275.4109.5
Ours92.960.763.173.2106.6
+ +In Table 7, excluding W-PP or S-PP leads to a significant decline in PCK by $4.6\%$ and $2.1\%$ , and in AUC by $3.2\%$ and $2.5\%$ respectively, accompanied by a notable increase of $9.5\mathrm{mm}$ and $8.2\mathrm{mm}$ in MPJPE separately on 3DHP. These results emphasize the critical role of maintaining similarity in proximate pairs for both weak and strong augmentors, serving as the fundamental basis for generating effective and reasonable synthesized poses. Moreover, the absence of W-OG leads to a decline in PCK and AUC by $0.8\%$ and $1.1\%$ respectively, with a corresponding $1.7\mathrm{mm}$ increase in the MPJPE on 3DHP. The removal of S-OG results in a decrease in PCK and AUC scores by $1.5\%$ and $1.8\%$ respec + +tively, along with a $3.4\mathrm{mm}$ increase in MPJPE on 3DHP. These results highlight the significance of maintaining differentiation between the weak augmentor and the strong augmentor during the generation process, where enlarging dissimilarity in S-OG is more important in discriminating these two generators. + +Ablation study on the number of augmentors. Comparisons were conducted between our dual-augmentor framework and single-augmentor frameworks. Alongside our proposed framework, two single-augmentor frameworks were considered in the ablation study, utilizing either the weak-augmentor (WA) or the strong-augmentor (SA). The discrimination and meta-optimization processes exclusively involved source poses and one category of synthesized poses. The results, using ground truth as 2D predictions and VPose as the backbone, are presented in Table 8. + +Table 8. Ablation study of number of augmentors on 3DHP and 3DPW + +
3DHP3DPW
MethodPCK ↑AUC ↑MPJPE ↓PA-MPJPE ↓MPJPE ↓
WA87.356.074.580.5117.7
SA89.857.871.079.1111.4
Ours92.960.763.173.2106.6
+ +From Table 8, it is evident that our proposed framework surpasses both WA and SA significantly, underscoring the superiority of employing two augmentors over a single aug- mentor in addressing DG for 3D HPE. Furthermore, SA outperforms WA, emphasizing the greater significance of exploring out-of-source distributions compared to retaining source-relevant knowledge in cross-dataset tasks. + +# 5. Conclusion + +In this paper, we propose a novel dual-augmentor framework designed to enhance domain generalization in 3D human pose estimation. Our framework addresses the critical aspects of data augmentation and the effective utilization of synthesized data. To achieve this, we implement distinctive strategies for the weak and strong generators, ensuring the preservation of source-specific information while simultaneously exploring out-of-source distributions. Moreover, we incorporate meta-optimization techniques to facilitate enhanced interaction among source data, weak-augmented data, and strong-augmented data, thereby simulating domain shifts in the training of pose estimator and fostering the acquisition of domain-invariant knowledge. Extensive experimentation and comprehensive analysis conducted across multiple datasets demonstrate the superior performance of our proposed approach over existing state-of-the-art methods. + +Acknowledgements This material is based upon work supported by the National Science Foundation under Grant CNS-1910844. + +# References + +[1] Maher Baili, Philippe Wenger, and Damien Chablat. A classification of 3r orthogonal manipulators by the topology of their workspace. In IEEE International Conference on Robotics and Automation, 2004. Proceedings. ICRA'04. 2004, pages 1933-1938. IEEE, 2004. 2 +[2] Wenhao Chai, Zhongyu Jiang, Jenq-Neng Hwang, and Gaoang Wang. Global adaptation meets local generalization: Unsupervised domain adaptation for 3d human pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14655-14665, 2023. 2, 3 +[3] Yilun Chen, Zhicheng Wang, Yuxiang Peng, Zhiqiang Zhang, Gang Yu, and Jian Sun. Cascaded pyramid network for multi-person pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7103-7112, 2018. 7 +[4] Mohsen Gholami, Bastian Wandt, Helge Rhodin, Rabab Ward, and Z Jane Wang. Adaptpose: Cross-dataset adaptation for 3d human pose estimation by learnable motion generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13075-13085, 2022. 2, 3, 6 +[5] Ross Girshick, Ilija Radosavovic, Georgia Gkioxari, Piotr Dólar, and Kaiming He. Detector. https://github.com/facebookresearch/detectron, 2018.7 +[6] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. Advances in neural information processing systems, 27, 2014. 2, 3, 5 +[7] Shannan Guan, Haiyan Lu, Linchao Zhu, and Gengfa Fang. Posegu: 3d human pose estimation with novel human pose generator and unbiased learning. Computer Vision and Image Understanding, 233:103715, 2023. 1, 2, 3, 6, 7 +[8] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. Advances in neural information processing systems, 30, 2017. 5 +[9] Vladimir Guzov, Aymen Mir, Torsten Sattler, and Gerard Pons-Moll. Human positioning system (hps): 3d human pose estimation and self-localization in large scenes from body-mounted sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4318-4329, 2021. 1 +[10] Yilei Hua, Wenhan Wu, Ce Zheng, Aidong Lu, Mengyuan Liu, Chen Chen, and Shiqian Wu. Part aware contrastive learning for self-supervised action recognition. In International Joint Conference on Artificial Intelligence, 2023. 1 +[11] Linzhi Huang, Jiahao Liang, and Weihong Deng. Dh-aug: Dh forward kinematics model driven augmentation for 3d human pose estimation. In European Conference on Computer Vision, pages 436-453. Springer, 2022. 1, 2, 3, 5, 6, 7 +[12] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environ- + +ments. IEEE transactions on pattern analysis and machine intelligence, 36(7):1325-1339, 2013. 6 +[13] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6 +[14] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. Vibe: Video inference for human body pose and shape estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5253-5263, 2020. 6 +[15] Da Li, Yongxin Yang, Yi-Zhe Song, and Timothy Hospedales. Learning to generalize: Meta-learning for domain generalization. In Proceedings of the AAAI conference on artificial intelligence, 2018. 2 +[16] Haolun Li and Chi-Man Pun. Cee-net: Complementary end-to-end network for 3d human pose generation and estimation. Proceedings of the AAAI Conference on Artificial Intelligence, 37(1):1305-1313, 2023. 2, 3, 4, 6, 7 +[17] Shichao Li, Lei Ke, Kevin Pratama, Yu-Wing Tai, Chi-Keung Tang, and Kwang-Ting Cheng. Cascaded deep monocular 3d human pose estimation with evolutionary training data. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6173–6183, 2020. 2, 6, 7 +[18] Hanbing Liu, Jun-Yan He, Zhi-Qi Cheng, Wangmeng Xiang, Qize Yang, Wenhao Chai, Gaoang Wang, Xu Bao, Bin Luo, Yifeng Geng, et al. Posynda: Multi-hypothesis pose synthesis domain adaptation for robust 3d human pose estimation. In Proceedings of the ACM International Conference on Multimedia, 2023. 2, 3 +[19] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2018. 6 +[20] Zhengzhi Lu, He Wang, Ziyi Chang, Guoan Yang, and Hubert P. H. Shum. Hard no-box adversarial attack on skeleton-based human action recognition with skeleton-motion-informed gradient. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4597-4606, 2023. 1 +[21] Toshihiko Matsuura and Tatsuya Harada. Domain generalization using a mixture of multiple latent domains. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 11749-11756, 2020. 2 +[22] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3d human pose estimation in the wild using improved cnn supervision. In 2017 international conference on 3D vision (3DV), pages 506-516. IEEE, 2017. 6 +[23] Dario Pavllo, Christoph Feichtenhofer, David Grangier, and Michael Auli. 3d human pose estimation in video with temporal convolutions and semi-supervised training. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7753-7762, 2019. 2, 3, 6, 7 +[24] Qucheng Peng. Multi-source and Source-Private Cross-Domain Learning for Visual Recognition. PhD thesis, Purdue University, 2022. 2 + +[25] Qucheng Peng, Zhengming Ding, Lingjuan Lyu, Lichao Sun, and Chen Chen. Rain: regularization on input and network for black-box domain adaptation. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence, pages 4118–4126, 2023. 2 +[26] Qucheng Peng, Ce Zheng, and Chen Chen. Source-free domain adaptive human pose estimation. In 2023 IEEE/CVF International Conference on Computer Vision (ICCV), pages 4803-4813. IEEE, 2023. 2 +[27] Ekkasit Pinyoanuntapong, Ayman Ali, Kalvik Jakkala, Pu Wang, Minwoo Lee, Qucheng Peng, Chen Chen, and Zhi Sun. Gaitsada: Self-aligned domain adaptation for mmwave gait recognition. In 2023 IEEE 20th International Conference on Mobile Ad Hoc and Smart Systems (MASS), pages 218-226. IEEE, 2023. 2 +[28] Fengchun Qiao, Long Zhao, and Xi Peng. Learning to learn single domain generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12556-12565, 2020. 2 +[29] Chi Su, Jianing Li, Shiliang Zhang, Junliang Xing, Wen Gao, and Qi Tian. Pose-driven deep convolutional model for person re-identification. In Proceedings of the IEEE international conference on computer vision, pages 3960–3969, 2017. 1 +[30] Adith Swaminathan and Thorsten Joachims. Counterfactual risk minimization: Learning from logged bandit feedback. In International Conference on Machine Learning, pages 814-823. PMLR, 2015. 2 +[31] Timo Von Marcard, Roberto Henschel, Michael J Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3d human pose in the wild using imus and a moving camera. In Proceedings of the European conference on computer vision (ECCV), pages 601-617, 2018. 6 +[32] Bastian Wandt and Bodo Rosenhahn. Repnet: Weakly supervised training of an adversarial reprojection network for 3d human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7782-7791, 2019. 6 +[33] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, et al. Deep high-resolution representation learning for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 43(10):3349-3364, 2020. 2, 7 +[34] Hong Yan, Yang Liu, Yushen Wei, Zhen Li, Guanbin Li, and Liang Lin. Skeletonmae: Graph-based masked autoencoder for skeleton sequence pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5606-5618, 2023. 1 +[35] Hongwei Yi, Chun-Hao P Huang, Shashank Tripathi, Lea Hering, Justus Thies, and Michael J Black. Mime: Human-aware 3d scene generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12965-12976, 2023. 1 +[36] Jinlu Zhang, Zhigang Tu, Jianyu Yang, Yujin Chen, and Jun-song Yuan. Mixste: Seq2seq mixed spatio-temporal encoder for 3d human pose estimation in video. In Proceedings of + +the IEEE/CVF conference on computer vision and pattern recognition, pages 13232-13242, 2022. 2, 3, 7 +[37] Jianfeng Zhang, Kehong Gong, Xinchao Wang, and Jiashi Feng. Learning to augment poses for 3d human pose estimation in images and videos. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(8):10012-10026, 2023. 1, 2, 3, 4, 5, 6, 7 +[38] Long Zhao, Xi Peng, Yu Tian, Mubbasir Kapadia, and Dimitris N Metaxas. Semantic graph convolutional networks for 3d human pose regression. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3425-3435, 2019. 2, 3, 7 +[39] Long Zhao, Ting Liu, Xi Peng, and Dimitris Metaxas. Maximum-entropy adversarial data augmentation for improved generalization and robustness. Advances in Neural Information Processing Systems, 33:14435–14447, 2020. 2 +[40] Qitao Zhao, Ce Zheng, Mengyuan Liu, Pichao Wang, and Chen Chen. Poseformerv2: Exploring frequency domain for efficient and robust 3d human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8877-8886, 2023. 2 +[41] Shanshan Zhao, Mingming Gong, Tongliang Liu, Huan Fu, and Dacheng Tao. Domain generalization via entropy regularization. Advances in Neural Information Processing Systems, 33:16096-16107, 2020. 2 +[42] Yuyang Zhao, Zhun Zhong, Na Zhao, Nicu Sebe, and Gim Hee Lee. Style-hallucinated dual consistency learning for domain generalized semantic segmentation. In European Conference on Computer Vision, pages 535-552. Springer, 2022. 2 +[43] Ce Zheng, Sijie Zhu, Matias Mendieta, Taojiannan Yang, Chen Chen, and Zhengming Ding. 3d human pose estimation with spatial and temporal transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11656-11665, 2021. 2, 3, 7 +[44] Ce Zheng, Xianpeng Liu, Guo-Jun Qi, and Chen Chen. Potter: Pooling attention transformer for efficient human mesh recovery. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 1 +[45] Ce Zheng, Matias Mendieta, Taojiannan Yang, Guo-Jun Qi, and Chen Chen. Feater: An efficient network for human reconstruction via feature map-based transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 1 +[46] Wentao Zhu, Xiaoxuan Ma, Zhaoyang Liu, Libin Liu, Wayne Wu, and Yizhou Wang. Motionbert: A unified perspective on learning human motion representations. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 2 \ No newline at end of file diff --git a/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/images.zip b/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..50500126d9b58430e63570a4dda3204a14d107aa --- /dev/null +++ b/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08a06770748290406856cea49b391bd6a822c2eec4132f5ee32bc62e7d96ec45 +size 536714 diff --git a/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/layout.json b/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..90163a658f952c41d5712bdc57f7c517e173ef2b --- /dev/null +++ b/2024/A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation/layout.json @@ -0,0 +1,11164 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 56, + 103, + 538, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 103, + 538, + 138 + ], + "spans": [ + { + "bbox": [ + 56, + 103, + 538, + 138 + ], + "type": "text", + "content": "A Dual-Augmentor Framework for Domain Generalization in 3D Human Pose Estimation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 203, + 162, + 391, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 162, + 391, + 175 + ], + "spans": [ + { + "bbox": [ + 203, + 162, + 391, + 175 + ], + "type": "text", + "content": "Qucheng Peng, Ce Zheng, Chen Chen" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 126, + 175, + 467, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 175, + 467, + 190 + ], + "spans": [ + { + "bbox": [ + 126, + 175, + 467, + 190 + ], + "type": "text", + "content": "Center for Research in Computer Vision, University of Central Florida" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 148, + 191, + 445, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 191, + 445, + 202 + ], + "spans": [ + { + "bbox": [ + 148, + 191, + 445, + 202 + ], + "type": "text", + "content": "{qucheng.peng,ce.zheng}@ucf.edu,chen.chen@crcv.ucf.edu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "spans": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 257, + 289, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 257, + 289, + 449 + ], + "spans": [ + { + "bbox": [ + 46, + 257, + 289, + 449 + ], + "type": "text", + "content": "3D human pose data collected in controlled laboratory settings present challenges for pose estimators that generalize across diverse scenarios. To address this, domain generalization is employed. Current methodologies in domain generalization for 3D human pose estimation typically utilize adversarial training to generate synthetic poses for training. Nonetheless, these approaches exhibit several limitations. First, the lack of prior information about the target domain complicates the application of suitable augmentation through a single pose augmentor; affecting generalization on target domains. Moreover, adversarial training's discriminator tends to enforce similarity between source and synthesized poses, impeding the exploration of out-of-source distributions. Furthermore, the pose estimator's optimization is not exposed to domain shifts, limiting its overall generalization ability." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 450, + 289, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 450, + 289, + 617 + ], + "spans": [ + { + "bbox": [ + 46, + 450, + 289, + 617 + ], + "type": "text", + "content": "To address these limitations, we propose a novel framework featuring two pose augmentors: the weak and the strong augmentors. Our framework employs differential strategies for generation and discrimination processes, facilitating the preservation of knowledge related to source poses and the exploration of out-of-source distributions without prior information about target poses. Besides, we leverage meta-optimization to simulate domain shifts in the optimization process of the pose estimator, thereby improving its generalization ability. Our proposed approach significantly outperforms existing methods, as demonstrated through comprehensive experiments on various benchmark datasets. Our code will be released at https://github.com/davidpengucf/DAF-DG." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 644, + 128, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 644, + 128, + 656 + ], + "spans": [ + { + "bbox": [ + 47, + 644, + 128, + 656 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": "3D human pose estimation (HPE) is the process of predicting the 3D coordinates of human joints from images or videos. It serves as the foundation for various applications including person re-identification [29], action recognition" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 228, + 545, + 378 + ], + "blocks": [ + { + "bbox": [ + 306, + 228, + 545, + 378 + ], + "lines": [ + { + "bbox": [ + 306, + 228, + 545, + 378 + ], + "spans": [ + { + "bbox": [ + 306, + 228, + 545, + 378 + ], + "type": "image", + "image_path": "0150271f669295bceb3aa7fe0418a4dd03c382458b5585d610ad84e6a19a6c08.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 380, + 547, + 468 + ], + "lines": [ + { + "bbox": [ + 305, + 380, + 547, + 468 + ], + "spans": [ + { + "bbox": [ + 305, + 380, + 547, + 468 + ], + "type": "text", + "content": "Figure 1. Comparisons between existing single-augmentor frameworks and our proposed dual-augmentor framework on a toy example. Current single-augmentor methods excel at simulating Target Domain 2 but exhibit limitations in simulating Target Domain 1, closely resembling the source, and Target Domain 3, deviating significantly from the source. In our framework, the weak aug- mentor excels in simulating Target Domain 1, while the strong augmentor effectively imitates both Target Domain 2 and 3." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 483, + 547, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 547, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 547, + 640 + ], + "type": "text", + "content": "[10, 20, 34], human mesh recovery [44, 45], virtual reality [9, 35]. However, the annotated 3D data are often collected in controlled laboratory environments for convenience, featuring indoor settings and limited actions performed by few individuals. As a result, pose estimators trained on these labeled datasets face challenges in generalizing to varied in-the-wild scenarios. Hence, the notion of domain generalization (DG) is pivotal in incorporating knowledge from labeled (source) data into a pose estimator that could generalize well on unseen (target) data. Unlike domain adaptation (DA) which involves the training with target data, DG relies solely on the source data as a reference, without any prior information about the target data." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 641, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 547, + 713 + ], + "type": "text", + "content": "Existing DG approaches for 3D HPE [7, 11, 37] conduct substantial augmentations on the source poses to obtain synthesized poses via adversarial training, and incorporate the synthesized poses as complementary to the source poses for HPE model training. However, these methods have several limitations. First, in the context of DG for 3D HPE, there" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 1, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 1, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 1, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2240" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 312 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 312 + ], + "type": "text", + "content": "is a complete lack of information about poses in target domains. If target poses closely resemble the source (as Target Domain 1 in Fig. 1), poses generated by extensive augmentation notably differ from the target, thereby impeding generalization. Conversely, when target poses significantly deviate from the source distributions (as Target Domain 3 in Fig. 1), poses generated via insufficient augmentation may not sufficiently explore out-of-source knowledge to simulate the target. Existing methods only use a single augmentor, making it challenging to simultaneously achieve both objectives. Second, the adversarial training between synthesized and source poses constrains the diversity of generation. Existing methods typically employ the generative adversarial network (GAN) [6] structure, which includes one pose generator responsible for pose generation and one discriminator to assist the pose generator by providing feedback. Specifically, the discriminator enforces similarity between synthesized and source poses, aiming to ensure that the generated poses closely resemble the source poses, which harms the exploration of out-of-source knowledge." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 315, + 289, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 315, + 289, + 590 + ], + "spans": [ + { + "bbox": [ + 47, + 315, + 289, + 590 + ], + "type": "text", + "content": "To address these limitations, we propose a novel framework featuring two pose augmentors: the weak augmentor and the strong augmentor. The weak augmentor is designed to simulate target poses closely resembling source poses, while the strong augmentor generates target poses that exhibit significant deviations from source distributions. To delineate their characteristics, differential strategies are employed for generation and discrimination processes, as detailed in Sec. 3.3. Notably, our framework alleviates the constraints on strong-augmented poses by traditional adversarial training methods. Instead of enforcing similarity between the source and all the augmented poses, we utilize weak-augmented poses as an intermediary, enabling discrimination between strong- and weak-augmented poses and facilitating discrimination between source and weak-augmented poses. To optimize the utilization of synthesized poses, we introduce meta-optimization among source, weak-augmented, and strong-augmented poses, as elaborated in Sec. 3.4. Our training process exposes the pose estimator to domain shifts during the optimization processes, thereby enhancing its adaptability to handle domain shifts during the inference stage. Our contributions can be summarized in three main aspects:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 594, + 287, + 714 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 47, + 594, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 594, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 47, + 594, + 287, + 665 + ], + "type": "text", + "content": "- We propose a novel framework featuring both the weak and strong pose augmentors, which effectively preserves knowledge related to source poses while simultaneously exploring out-of-source distributions through differential strategies for the generation and discrimination processes of the two augmentors." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 666, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 666, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 666, + 287, + 714 + ], + "type": "text", + "content": "- We introduce meta-optimization to enhance the utilization of synthesized poses. By simulating domain shifts among source, weak-augmented, and strong-augmented poses during the optimization processes, the pose estima" + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 72, + 503, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 503, + 83 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 503, + 83 + ], + "type": "text", + "content": "tor's generalization ability is further improved." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 84, + 545, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 84, + 545, + 133 + ], + "spans": [ + { + "bbox": [ + 306, + 84, + 545, + 133 + ], + "type": "text", + "content": "- We conduct comprehensive experiments on several benchmark datasets, and the results demonstrate that our approach significantly outperforms state-of-the-art methods by a considerable margin." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 143, + 392, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 143, + 392, + 155 + ], + "spans": [ + { + "bbox": [ + 306, + 143, + 392, + 155 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 163, + 545, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 163, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 304, + 163, + 545, + 331 + ], + "type": "text", + "content": "3D Human Pose Estimation. The widely adopted two-stage technique in 3D HPE, as demonstrated in [23, 36, 38, 40, 43, 46], initially employs 2D human pose estimators [17, 26, 27, 33] for 2D pose predictions, followed by lifting these predictions to 3D poses. Among these approaches, [38] integrates graph-structured semantic information to enhance the estimation process, while [23] utilizes dilated temporal convolutional layers for temporal information encoding, and [43] presents a purely transformer-based 3D approach. Moreover, [36] effectively models inter-frame correspondences with a mixed sequence-to-sequence encoder, and recent works such as [40] explore the frequency domain to improve inference efficiency, and [46] employs unified representation learning for 3D human poses." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 331, + 546, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 331, + 546, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 331, + 546, + 533 + ], + "type": "text", + "content": "Domain Generalization. Current DG methods aim to learn domain-invariant representations and are categorized into three types: domain alignment [21, 41], meta-learning [15, 28], and augmentation strategies [24, 25, 39, 42]. For domain alignment, [41] enhances the conditional invariance of learned features by incorporating an entropy regularization term, leading to improved classifier generalization. [21] iteratively segregates samples into latent domains through clustering. Concerning meta-learning, [15] proposes a model-agnostic training procedure that simulates domain shift during training, whereas [28] applies meta-learning to single-domain generalization. Regarding augmentation strategies, [39] introduces a novel regularization term for adversarial data augmentation derived from the information bottleneck principle, while [42] presents a unique style hallucination module to generate style-diversified samples crucial for generalization." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 534, + 545, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 545, + 585 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 545, + 585 + ], + "type": "text", + "content": "Differing from the current DG approaches for 3D HPE that focus solely on augmentations, we also incorporate meta-learning-based approaches to enhance generalization. Cross-domain Learning for 3D Human Pose Estimation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 582, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 714 + ], + "type": "text", + "content": "Cross-domain learning for 3D HPE is categorized into two types: domain generalization [7, 11, 16, 37] and domain adaptation [2, 4, 18]. In domain generalization, training processes exclusively utilizes source data, and the resulting model is directly applied to infer target data. [37] adjusts various geometry factors of human poses through differentiable operations. [11] applies DH Forward Kinematics [1] to drive 3D pose augmentation and obtain diverse poses. [7] incorporates Counterfactual Risk Minimization [30] to achieve unbiased learning. [16] addresses with network designs like the interpolation sub-net and body-parts grouping" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "2241" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 167 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 167 + ], + "type": "text", + "content": "net. In domain adaptation, labeled source data and unlabeled target data are used simultaneously during the training process. [4] employs generative adversarial network [6] to discriminate between source and target during training. [2] utilizes global position alignment and local pose augmentation to transfer from source to target. [18] employs a multi-hypotheses network along with target-specific source augmentation for the problem." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 170, + 288, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 170, + 288, + 218 + ], + "spans": [ + { + "bbox": [ + 47, + 170, + 288, + 218 + ], + "type": "text", + "content": "In this paper, we focus on domain generalization for 3D HPE. In addition to synthesizing novel 3D poses for better generalization, we also simulate domain shifts by using both source and synthesized poses during optimizations." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 232, + 129, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 232, + 129, + 247 + ], + "spans": [ + { + "bbox": [ + 47, + 232, + 129, + 247 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 254, + 133, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 254, + 133, + 266 + ], + "spans": [ + { + "bbox": [ + 47, + 254, + 133, + 266 + ], + "type": "text", + "content": "3.1. Preliminaries" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "text", + "content": "2D-to-3D lifting Paradigm of 3D HPE. Current 2D-to-3D lifting paradigm of 3D HPE [23, 36, 38, 43] assumes that " + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "inline_equation", + "content": "x_{i}^{s} \\in \\mathbb{R}^{J \\times 2}" + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "text", + "content": " represents the 2D coordinates of " + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "text", + "content": " keypoints of a sample in the source domain (2D poses as input), and " + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "inline_equation", + "content": "y_{i}^{s} \\in \\mathbb{R}^{J \\times 3}" + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "text", + "content": " represents the corresponding 3D positions in the camera coordinate system (3D poses as output), we denote the source domain with " + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "text", + "content": " samples as " + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "inline_equation", + "content": "S = \\{(x_{i}^{s}, y_{i}^{s})\\}_{i=1}^{N}" + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "text", + "content": ", encompassing " + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "text", + "content": " 2D-3D pairs. Moreover, we define the pose estimator as " + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{P}: x_{i}^{s} \\mapsto \\hat{y}_{i}^{s}" + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "inline_equation", + "content": "\\hat{y}_{i}^{s}" + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "text", + "content": " represents the predicted corresponding 3D pose positions. For a fully supervised human pose estimation problem, we aim to achieve an ideal " + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 46, + 274, + 287, + 418 + ], + "type": "text", + "content": " by solving the following optimization objective:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 434, + 287, + 452 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 434, + 287, + 452 + ], + "spans": [ + { + "bbox": [ + 96, + 434, + 287, + 452 + ], + "type": "interline_equation", + "content": "\\min _ {\\mathcal {P}} \\mathbb {E} _ {\\left(x _ {i} ^ {s}, y _ {i} ^ {s}\\right) \\in \\mathcal {S}} \\mathcal {L} _ {M S E} \\left(\\mathcal {P} \\left(x _ {i} ^ {s}\\right), y _ {i} ^ {s}\\right), \\tag {1}", + "image_path": "0e2c4778a6c35e588a836b1d0e60c0279d1e165e86408a4fdcb5a0a0214b1e0a.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 460, + 287, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 460, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 460, + 287, + 521 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 460, + 287, + 521 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{MSE}" + }, + { + "bbox": [ + 46, + 460, + 287, + 521 + ], + "type": "text", + "content": " represents the Mean Squared Error (MSE) loss. However, the objective [23, 43] is designed to achieve optimal performance on source poses, rendering it inadequate for addressing the DG problem, as it does not account for the domain gap between source and target domains." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "text", + "content": "DG for 3D HPE. Within the paradigm of 2D-to-3D lifting HPE, our primary goal is to derive an estimator " + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "text", + "content": " that demonstrates commendable 3D HPE performance specifically within the target domain " + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "text", + "content": ". Under this scenario, the target domain " + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "inline_equation", + "content": "T = \\{(x_j^t,y_j^t)\\}_{j = 1}^M" + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "text", + "content": " samples can only be used for inference and is not involved in the training process. However, when utilizing solely the original source domain, the pose estimator cannot learn out-of-source distributions, which is essential for achieving good performance on the target domain. Existing methods [7, 11, 16, 37] tend to conduct augmentation to the original source poses to enhance data diversity, thereby improving the estimator's generalization ability. The augmentor is denoted as " + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{A}:y_i^s\\mapsto y_i^a" + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "text", + "content": ", while the projection from 3D to 2D via camera parameters (completely known) is defined as " + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{R}:y_i^a\\mapsto x_i^a" + }, + { + "bbox": [ + 46, + 521, + 288, + 714 + ], + "type": "text", + "content": ". Consequently, the min-max optimization objective for domain" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 72, + 472, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 472, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 472, + 84 + ], + "type": "text", + "content": "generalization can be defined as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 340, + 94, + 545, + 126 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 94, + 545, + 126 + ], + "spans": [ + { + "bbox": [ + 340, + 94, + 545, + 126 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\min _ {\\mathcal {P}} \\max _ {\\mathcal {A}} \\mathbb {E} _ {\\left(x _ {i} ^ {s}, y _ {i} ^ {s}\\right) \\in \\mathcal {S}} \\left[ \\mathcal {L} _ {M S E} \\left(\\mathcal {P} \\left(x _ {i} ^ {s}\\right), y _ {i} ^ {s}\\right) \\right. \\tag {2} \\\\ + \\mathcal {L} _ {M S E} (\\mathcal {P} (\\mathcal {R} (\\mathcal {A} (y _ {i} ^ {s}))), \\mathcal {A} (y _ {i} ^ {s})) ]. \\\\ \\end{array}", + "image_path": "9b3a32150724c5e82074984424f65f23dba1dd5c3865a7c0ed49f7c0c206f0bc.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "spans": [ + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "type": "text", + "content": "The objective is a min-max game between the pose augmentor " + }, + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "type": "text", + "content": " and the pose estimator " + }, + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "type": "text", + "content": ", encouraging the estimator " + }, + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "type": "text", + "content": " to learn out-of-source distributions, while conducting augmentations to a significant extent is beneficial to generate more diverse samples, and that is why the loss is minimized with respect to the augmentor " + }, + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "type": "text", + "content": " and maximized with respect to the augmentor " + }, + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 304, + 135, + 545, + 219 + ], + "type": "text", + "content": " in the optimization." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 228, + 488, + 240 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 228, + 488, + 240 + ], + "spans": [ + { + "bbox": [ + 306, + 228, + 488, + 240 + ], + "type": "text", + "content": "3.2. Overview of the Proposed Method" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 246, + 545, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 246, + 545, + 486 + ], + "spans": [ + { + "bbox": [ + 304, + 246, + 545, + 486 + ], + "type": "text", + "content": "Existing methods [7, 11, 37] often apply intricate augmentations to the original poses in the source domain, relying on the discrimination between augmented poses and source poses simultaneously. However, this kind of approach raises two concerns. First, as this is a DG problem for 3D HPE, any information about the target domain is entirely unknown. If the target domain bears a striking resemblance to the source domain (like Target Domain 1 in Fig. 1), poses generated by extensive augmentation might hinder the pose estimator's inference on it. Conversely, in cases where the target domain significantly diverges from the source distributions (like Target Domain 3 in Fig. 1), poses generated by insufficient augmentation may fail to adequately explore out-of-source knowledge for the pose estimator. Second, when target domain is distant from the source and needs significant augmentations, the adversarial training between source and synthesized poses limits the diversity of generated poses. Specifically, the discriminator enforces similarity between source and synthesized poses, thereby causing the synthesized poses to remain similar to the source poses." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 486, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 486, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 486, + 545, + 665 + ], + "type": "text", + "content": "To tackle these concerns, we propose a novel dual-augmentor framework depicted in Fig. 2. This framework involves two augmentors that generate weak- and strong-augmented poses, enabling the handling of diverse unknown target domains. Additionally, the weak-augmentation module serves as a bridge between strong-augmented and source poses. Specifically, the discrimination between source poses and weak-augmented poses is utilized to update the weak augmentor, while the discrimination between weak- and strong-augmented poses is employed to optimize the strong augmentor. This approach liberates the strong augmentor from heavy reliance on the source domain and enables the exploration of more out-of-source knowledge. Further details regarding the pose augmentation process can be found in Sec. 3.3." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 665, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 714 + ], + "type": "text", + "content": "Having elucidated the methodology for synthesizing poses, the subsequent discourse pivots towards the utilization of these synthesized poses. Previous works [7, 11, 37] overlook the interactions between source poses and aug-" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2242" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 70, + 547, + 206 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 547, + 206 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 547, + 206 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 547, + 206 + ], + "type": "image", + "image_path": "f00b92fd08cb0341489c9c94db5f4262d93765d824e775da0c537f54469b9e82.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 208, + 547, + 253 + ], + "lines": [ + { + "bbox": [ + 46, + 208, + 547, + 253 + ], + "spans": [ + { + "bbox": [ + 46, + 208, + 547, + 253 + ], + "type": "text", + "content": "Figure 2. Overall framework of our dual-augmentor method. Initially, the original pose undergoes processing through two pose augmentors, resulting in weak- and strong-augmented poses (See Sec. 3.3). The weak augmentor simulates target domains similar to the source domain, while the strong augmentor emulates target domains that deviate significantly from the source distributions. Subsequently, the original pose and the two augmented poses are input to the pose estimator for further meta-optimization (See Sec. 3.4)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 263, + 289, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 263, + 289, + 371 + ], + "spans": [ + { + "bbox": [ + 46, + 263, + 289, + 371 + ], + "type": "text", + "content": "mented poses, dealing with the optimizations of pose estimator on them separately. In contrast, we propose a model-agnostic meta optimization approach that enhances the interactions between source poses and the two types of augmented poses to simulate domain shifts in the optimization and leverage domain-invariant knowledge while maintaining the original 2D-to-3D lifting backbone's structure unchanged. Further details concerning the meta optimization process can be found in Sec. 3.4." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 380, + 162, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 380, + 162, + 393 + ], + "spans": [ + { + "bbox": [ + 47, + 380, + 162, + 393 + ], + "type": "text", + "content": "3.3. Pose Augmentation" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 48, + 400, + 289, + 543 + ], + "blocks": [ + { + "bbox": [ + 48, + 400, + 289, + 543 + ], + "lines": [ + { + "bbox": [ + 48, + 400, + 289, + 543 + ], + "spans": [ + { + "bbox": [ + 48, + 400, + 289, + 543 + ], + "type": "image", + "image_path": "4cefbeab32a0f16b34b0639ebe8ca41db3974403f048d0dcb3a9b02252fdb16f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 544, + 288, + 633 + ], + "lines": [ + { + "bbox": [ + 46, + 544, + 288, + 633 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 288, + 633 + ], + "type": "text", + "content": "Figure 3. The differentiation of the weak and strong generators. Within each pipeline, denoted as \"W-\" for weak ones and \"S-\" for strong ones, there exist four pose states: original (OR), after bone angle operation (BA), after bone length operation (BL) and after rotation and translation operation (RT). For proximate states, similarities are enhanced for both generators. When there is a one-state gap between states, the weak generator continues to enhance similarities, whereas the strong generator enlarges dissimilarities." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "content": "The pose augmentation architecture comprises two pose augmentors: the weak augmentor and the strong augmentor. Each augmentor comprises two components: the generator, responsible for producing diverse synthesized poses to facilitate the training of the pose estimator, and the discriminator, which collaborates with the generator to regu" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 263, + 547, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 263, + 547, + 324 + ], + "spans": [ + { + "bbox": [ + 304, + 263, + 547, + 324 + ], + "type": "text", + "content": "late the quality of the generated poses. Our objective is to apply differential strategies to the generation (named differential generation) and discrimination (named differential discrimination) of the two augmentors, enabling them to generate weak- and strong-augmented poses." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 333, + 547, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 333, + 547, + 502 + ], + "spans": [ + { + "bbox": [ + 304, + 333, + 547, + 502 + ], + "type": "text", + "content": "Differential Generation: The generation process, as illustrated in Figure 3, consists of three modules in sequence for each generator pipeline: the Bone Angle Generator, Bone Length Generator, and Rotation and Translation Generator, resulting in four statuses in the pipeline: original (OR), after the bone angle operation (BA), after the bone length operation (BL), and after the rotation and translation operation (RT). Existing approaches such as [16, 37] typically treat the entire generation pipeline in an end-to-end manner and only utilize the " + }, + { + "bbox": [ + 304, + 333, + 547, + 502 + ], + "type": "inline_equation", + "content": "(OR, RT)" + }, + { + "bbox": [ + 304, + 333, + 547, + 502 + ], + "type": "text", + "content": " pair. In contrast, our method deals with the generation in a more fine-grained fashion. We group statuses into pairs based on their relations: proximate pairs as " + }, + { + "bbox": [ + 304, + 333, + 547, + 502 + ], + "type": "inline_equation", + "content": "PP = \\{(OR, BA), (BA, BL), (BL, RT)\\}" + }, + { + "bbox": [ + 304, + 333, + 547, + 502 + ], + "type": "text", + "content": ", and one-state gap pairs as " + }, + { + "bbox": [ + 304, + 333, + 547, + 502 + ], + "type": "inline_equation", + "content": "OG = \\{(OR, BL), (BA, RT)\\}" + }, + { + "bbox": [ + 304, + 333, + 547, + 502 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 510, + 548, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 548, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 548, + 714 + ], + "type": "text", + "content": "To begin, we define the measurement of similarity within a pair. Solely relying on conventional absolute position losses, such as the MSE loss, is not adequate in this context for two reasons. First, the three modules within the generator all perform operations on the level of bone vector, not on the joint positions. If one joint undergoes significant position changes after an operation, other joints connected to it will also experience considerable movement, even if the bone vector between them remains stable. In such cases, position-based measurements cannot fully reflect the extent of augmentation based on the bone vector. Second, human poses possess kinematic attributes, and a position-based measurement overlooks the graphical information. Therefore, we introduce the Laplacian weighted similarity measurement. For the human model, it is straightforward to obtain degree matrix " + }, + { + "bbox": [ + 304, + 510, + 548, + 714 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 510, + 548, + 714 + ], + "type": "text", + "content": " and adjacency matrix " + }, + { + "bbox": [ + 304, + 510, + 548, + 714 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 304, + 510, + 548, + 714 + ], + "type": "text", + "content": ", and the normalized Laplacian matrix can be represented as:" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2243" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 113, + 82, + 287, + 97 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 82, + 287, + 97 + ], + "spans": [ + { + "bbox": [ + 113, + 82, + 287, + 97 + ], + "type": "interline_equation", + "content": "W _ {N L} = I - D ^ {- \\frac {1}{2}} A D ^ {- \\frac {1}{2}}, \\tag {3}", + "image_path": "7f8be61d903b501de758e12bcb9e371e38731678530c5084e9db844ad3027d29.jpg" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 101, + 287, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 101, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 47, + 101, + 287, + 145 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 101, + 287, + 145 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 47, + 101, + 287, + 145 + ], + "type": "text", + "content": " is the identity matrix, and " + }, + { + "bbox": [ + 47, + 101, + 287, + 145 + ], + "type": "inline_equation", + "content": "W_{NL}" + }, + { + "bbox": [ + 47, + 101, + 287, + 145 + ], + "type": "text", + "content": " is the normalized Laplacian matrix encoding graphical information. Given a pair of statuses " + }, + { + "bbox": [ + 47, + 101, + 287, + 145 + ], + "type": "inline_equation", + "content": "(st_1, st_2)" + }, + { + "bbox": [ + 47, + 101, + 287, + 145 + ], + "type": "text", + "content": " (from either PP or OG), the similarity measurement is defined as:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 152, + 287, + 178 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 152, + 287, + 178 + ], + "spans": [ + { + "bbox": [ + 55, + 152, + 287, + 178 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s i m} \\left(s t _ {1}, s t _ {2}\\right) = \\underbrace {\\| s t _ {1} - s t _ {2} \\|} _ {\\text {M S E S i m i l a r i t y}} + \\underbrace {\\| W _ {N L} s t _ {1} - W _ {N L} s t _ {2} \\|} _ {\\text {L a p l a c i a n W e i g h t e d S i m i l a r i t y}}. \\tag {4}", + "image_path": "7ae9dc08adb516462a2f066627c5ca15118f9f80c582b422d95da3f61493ef28.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 184, + 287, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 184, + 287, + 232 + ], + "spans": [ + { + "bbox": [ + 47, + 184, + 287, + 232 + ], + "type": "text", + "content": "To differentiate between the two generators, we apply distinctive strategies. For the weak generator, we enhance similarities for its PP and OG sets to maintain a slight level of augmentation in the synthesized poses, as indicated by:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 90, + 240, + 242, + 282 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 240, + 242, + 282 + ], + "spans": [ + { + "bbox": [ + 90, + 240, + 242, + 282 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {w g} = \\underset {(s t _ {1}, s t _ {2}) \\in P P} {\\mathbb {E}} \\mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}) \\\\ + \\alpha_ {1} \\underset {(s t _ {1}, s t _ {2}) \\in O G} {\\mathbb {E}} \\mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}). \\\\ \\end{array}", + "image_path": "f8b2bbde26016ab9391a8907e68ad38fef9db53b087df02d4740ca8e96ffce6c.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 290, + 287, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 290, + 287, + 338 + ], + "spans": [ + { + "bbox": [ + 47, + 290, + 287, + 338 + ], + "type": "text", + "content": "For the strong generator, we enhance similarities within its PP set to ensure the reasonableness of the synthesized output, while enlarging dissimilarities within its OG sets to maintain a significant level of augmentation, expressed as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 91, + 346, + 241, + 388 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 346, + 241, + 388 + ], + "spans": [ + { + "bbox": [ + 91, + 346, + 241, + 388 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {s g} = \\underset {(s t _ {1}, s t _ {2}) \\in P P} {\\mathbb {E}} \\mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}) \\\\ - \\alpha_ {2} \\underset {(s t _ {1}, s t _ {2}) \\in O G} {\\mathbb {E}} \\mathcal {L} _ {s i m} (s t _ {1}, s t _ {2}). \\\\ \\end{array}", + "image_path": "c5ec8e0eb32648c72609b9e847828b15def693a55983860b210299859e70e8ed.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 397, + 192, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 397, + 192, + 408 + ], + "spans": [ + { + "bbox": [ + 47, + 397, + 192, + 408 + ], + "type": "inline_equation", + "content": "\\alpha_{1}" + }, + { + "bbox": [ + 47, + 397, + 192, + 408 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 397, + 192, + 408 + ], + "type": "inline_equation", + "content": "\\alpha_{2}" + }, + { + "bbox": [ + 47, + 397, + 192, + 408 + ], + "type": "text", + "content": " are trade-off parameters." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 408, + 287, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 408, + 287, + 504 + ], + "spans": [ + { + "bbox": [ + 47, + 408, + 287, + 504 + ], + "type": "text", + "content": "Differential Discrimination: The discrimination process comprises two min-max games [6, 8]: one between the source pose and the weak-augmented poses, and the other between the weak-augmented pose and the strong-augmented pose. We adopt the WGAN-GP [8] structure here. The discrimination losses regarding the source poses " + }, + { + "bbox": [ + 47, + 408, + 287, + 504 + ], + "type": "inline_equation", + "content": "y^{or}" + }, + { + "bbox": [ + 47, + 408, + 287, + 504 + ], + "type": "text", + "content": ", weak-augmented poses " + }, + { + "bbox": [ + 47, + 408, + 287, + 504 + ], + "type": "inline_equation", + "content": "y^{wa}" + }, + { + "bbox": [ + 47, + 408, + 287, + 504 + ], + "type": "text", + "content": ", and strong-augmented poses " + }, + { + "bbox": [ + 47, + 408, + 287, + 504 + ], + "type": "inline_equation", + "content": "y^{sa}" + }, + { + "bbox": [ + 47, + 408, + 287, + 504 + ], + "type": "text", + "content": " are defined as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 85, + 512, + 287, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 512, + 287, + 540 + ], + "spans": [ + { + "bbox": [ + 85, + 512, + 287, + 540 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {w d} = \\mathbb {E} \\left[ D _ {w a} \\left(y ^ {o r}\\right) \\right] - \\mathbb {E} \\left[ D _ {w a} \\left(y ^ {w a}\\right) \\right] \\tag {7} \\\\ + \\beta_ {1} \\mathbb {E} (1 - \\| \\nabla_ {\\hat {y} ^ {w a}} D _ {w a} (\\hat {y} ^ {w a}) \\|), \\\\ \\end{array}", + "image_path": "71d871455de3e3f22cf25a8f2468fdfb09749df2cf786bc993566527fe9375f1.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 88, + 550, + 287, + 577 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 550, + 287, + 577 + ], + "spans": [ + { + "bbox": [ + 88, + 550, + 287, + 577 + ], + "type": "interline_equation", + "content": "\\begin{array}{r l} \\mathcal {L} _ {s d} = & \\mathbb {E} [ D _ {s a} (y ^ {w a}) ] - \\mathbb {E} [ D _ {s a} (y ^ {s a}) ] \\\\ & + \\beta_ {2} \\mathbb {E} (1 - \\| \\nabla_ {\\hat {y} ^ {s a}} D _ {s a} (\\hat {y} ^ {s a}) \\|). \\end{array} \\tag {8}", + "image_path": "06b95256f71a0d33fda92e22a5d9b896d488c72ecd585e6c606ee4ad203ad741.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{wd}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": " is the discrimination loss between " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "y^{or}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "y^{wa}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": ", used to update the weak augmentor, and " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "D_{wa}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": " is the weak discriminator. " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sd}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": " is the discrimination loss between " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "y^{wa}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "y^{sa}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": ", used to update the strong augmentor, and " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "D_{sa}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": " is the strong discriminator. " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\hat{y}^{wa}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\hat{y}^{sa}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": " are built via interpolation, such that " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\hat{y}^{wa} = \\epsilon y^{or} + (1 - \\epsilon)y^{wa}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\hat{y}^{sa} = \\epsilon y^{wa} + (1 - \\epsilon)y^{sa}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": " is randomly drawn from " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "U[0,1]" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\beta_{1}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\beta_{2}" + }, + { + "bbox": [ + 47, + 582, + 287, + 677 + ], + "type": "text", + "content": " are trade-off parameters." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": "By implementing this discrimination process in two min-max games, the weak augmentor is capable of retaining more source information and alleviating adverse effects" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "stemming from irrationally synthesized poses. Simultaneously, the strong augmentor can overcome a strong dependency on the source distributions, and explore out-of-source distributions more effectively. With diverse synthesized poses to simulate potential target poses, it is beneficial for further domain generalization in pose estimation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 149, + 418, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 149, + 418, + 163 + ], + "spans": [ + { + "bbox": [ + 306, + 149, + 418, + 163 + ], + "type": "text", + "content": "3.4. Meta Optimization" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 168, + 494, + 180 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 168, + 494, + 180 + ], + "spans": [ + { + "bbox": [ + 306, + 168, + 494, + 180 + ], + "type": "text", + "content": "Algorithm 1 Meta Optimization Pseudo Code" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 184, + 545, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 184, + 545, + 208 + ], + "spans": [ + { + "bbox": [ + 306, + 184, + 545, + 208 + ], + "type": "text", + "content": "Input: Original source 2D-3D pose pairs " + }, + { + "bbox": [ + 306, + 184, + 545, + 208 + ], + "type": "inline_equation", + "content": "(x^{or},y^{or})" + }, + { + "bbox": [ + 306, + 184, + 545, + 208 + ], + "type": "text", + "content": "; Weak Generator " + }, + { + "bbox": [ + 306, + 184, + 545, + 208 + ], + "type": "inline_equation", + "content": "G_{wa}" + }, + { + "bbox": [ + 306, + 184, + 545, + 208 + ], + "type": "text", + "content": "; Strong Generator " + }, + { + "bbox": [ + 306, + 184, + 545, + 208 + ], + "type": "inline_equation", + "content": "G_{sa}" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 208, + 545, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 208, + 545, + 232 + ], + "spans": [ + { + "bbox": [ + 306, + 208, + 545, + 232 + ], + "type": "text", + "content": "Init: Pose estimator " + }, + { + "bbox": [ + 306, + 208, + 545, + 232 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_t" + }, + { + "bbox": [ + 306, + 208, + 545, + 232 + ], + "type": "text", + "content": ", Learning rates " + }, + { + "bbox": [ + 306, + 208, + 545, + 232 + ], + "type": "inline_equation", + "content": "lr_1" + }, + { + "bbox": [ + 306, + 208, + 545, + 232 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 306, + 208, + 545, + 232 + ], + "type": "inline_equation", + "content": "lr_2" + }, + { + "bbox": [ + 306, + 208, + 545, + 232 + ], + "type": "text", + "content": ", inner loop iteration " + }, + { + "bbox": [ + 306, + 208, + 545, + 232 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 306, + 208, + 545, + 232 + ], + "type": "text", + "content": ", Hyperparameter " + }, + { + "bbox": [ + 306, + 208, + 545, + 232 + ], + "type": "inline_equation", + "content": "\\gamma" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 232, + 545, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 232, + 545, + 256 + ], + "spans": [ + { + "bbox": [ + 306, + 232, + 545, + 256 + ], + "type": "text", + "content": "Output: Updated pose estimator " + }, + { + "bbox": [ + 306, + 232, + 545, + 256 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{t + 2}" + }, + { + "bbox": [ + 306, + 232, + 545, + 256 + ], + "type": "text", + "content": " after two-step meta optimization" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 256, + 538, + 315 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 312, + 256, + 538, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 256, + 538, + 268 + ], + "spans": [ + { + "bbox": [ + 312, + 256, + 538, + 268 + ], + "type": "text", + "content": "1: Generate weak-augmented 3D poses " + }, + { + "bbox": [ + 312, + 256, + 538, + 268 + ], + "type": "inline_equation", + "content": "y^{wa} = G_{wa}(y^{or})" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 268, + 538, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 268, + 538, + 280 + ], + "spans": [ + { + "bbox": [ + 312, + 268, + 538, + 280 + ], + "type": "text", + "content": "2: Project " + }, + { + "bbox": [ + 312, + 268, + 538, + 280 + ], + "type": "inline_equation", + "content": "y^{wa}" + }, + { + "bbox": [ + 312, + 268, + 538, + 280 + ], + "type": "text", + "content": " to 2D poses " + }, + { + "bbox": [ + 312, + 268, + 538, + 280 + ], + "type": "inline_equation", + "content": "x^{wa}" + }, + { + "bbox": [ + 312, + 268, + 538, + 280 + ], + "type": "text", + "content": " with camera parameters" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 281, + 440, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 281, + 440, + 290 + ], + "spans": [ + { + "bbox": [ + 312, + 281, + 440, + 290 + ], + "type": "text", + "content": "3: // Meta-train on Source data:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 291, + 509, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 291, + 509, + 304 + ], + "spans": [ + { + "bbox": [ + 312, + 291, + 509, + 304 + ], + "type": "text", + "content": "4: Update " + }, + { + "bbox": [ + 312, + 291, + 509, + 304 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_t' = \\mathcal{P}_t - lr_1\\nabla \\mathcal{L}_{MSE}(\\mathcal{P}_t(x^{or}),y^{or})" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 304, + 399, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 304, + 399, + 315 + ], + "spans": [ + { + "bbox": [ + 312, + 304, + 399, + 315 + ], + "type": "text", + "content": "5: for " + }, + { + "bbox": [ + 312, + 304, + 399, + 315 + ], + "type": "inline_equation", + "content": "i\\gets 1,\\dots,k" + }, + { + "bbox": [ + 312, + 304, + 399, + 315 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 309, + 256, + 543, + 519 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 312, + 316, + 487, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 316, + 487, + 327 + ], + "spans": [ + { + "bbox": [ + 312, + 316, + 487, + 327 + ], + "type": "text", + "content": "6: // Meta-test on Weak-augmented data:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 327, + 451, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 327, + 451, + 340 + ], + "spans": [ + { + "bbox": [ + 312, + 327, + 451, + 340 + ], + "type": "text", + "content": "7: " + }, + { + "bbox": [ + 312, + 327, + 451, + 340 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{weak - test} = L_{MSE}(\\mathcal{P}_t'(x^w)" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 312, + 341, + 356, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 341, + 356, + 350 + ], + "spans": [ + { + "bbox": [ + 312, + 341, + 356, + 350 + ], + "type": "text", + "content": "8: end for" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 312, + 352, + 536, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 352, + 536, + 363 + ], + "spans": [ + { + "bbox": [ + 312, + 352, + 536, + 363 + ], + "type": "text", + "content": "9: // Meta update on Source and Weak-augmented data:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 309, + 365, + 494, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 365, + 494, + 375 + ], + "spans": [ + { + "bbox": [ + 309, + 365, + 494, + 375 + ], + "type": "text", + "content": "10: " + }, + { + "bbox": [ + 309, + 365, + 494, + 375 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{t + 1} = \\mathcal{L}_{MSE}(\\mathcal{P}_t(x^{or}),y^{or}) + \\gamma L_{weak - test}" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 309, + 376, + 449, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 376, + 449, + 388 + ], + "spans": [ + { + "bbox": [ + 309, + 376, + 449, + 388 + ], + "type": "text", + "content": "11: " + }, + { + "bbox": [ + 309, + 376, + 449, + 388 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{t + 1} = \\mathcal{P}_t - lr_2(\\partial \\mathcal{L}_{t + 1} / \\partial \\mathcal{P}_t)" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 309, + 388, + 537, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 388, + 537, + 399 + ], + "spans": [ + { + "bbox": [ + 309, + 388, + 537, + 399 + ], + "type": "text", + "content": "12: Generate strong-augmented 3D poses " + }, + { + "bbox": [ + 309, + 388, + 537, + 399 + ], + "type": "inline_equation", + "content": "y^{sa} = G_{sa}(y^{or})" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 309, + 400, + 534, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 400, + 534, + 411 + ], + "spans": [ + { + "bbox": [ + 309, + 400, + 534, + 411 + ], + "type": "text", + "content": "13: Project " + }, + { + "bbox": [ + 309, + 400, + 534, + 411 + ], + "type": "inline_equation", + "content": "y^{sa}" + }, + { + "bbox": [ + 309, + 400, + 534, + 411 + ], + "type": "text", + "content": " to 2D poses " + }, + { + "bbox": [ + 309, + 400, + 534, + 411 + ], + "type": "inline_equation", + "content": "x^{sa}" + }, + { + "bbox": [ + 309, + 400, + 534, + 411 + ], + "type": "text", + "content": " with camera parameters" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 309, + 412, + 482, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 412, + 482, + 422 + ], + "spans": [ + { + "bbox": [ + 309, + 412, + 482, + 422 + ], + "type": "text", + "content": "14: // Meta-train on Weak-augmented data:" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 309, + 423, + 543, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 423, + 543, + 436 + ], + "spans": [ + { + "bbox": [ + 309, + 423, + 543, + 436 + ], + "type": "text", + "content": "15: Update " + }, + { + "bbox": [ + 309, + 423, + 543, + 436 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{t + 1}^{\\prime} = \\mathcal{P}_{t + 1} - lr_{1}\\nabla \\mathcal{L}_{MSE}(\\mathcal{P}_{t + 1}(x^{wa}),y^{wa})" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 309, + 437, + 398, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 437, + 398, + 446 + ], + "spans": [ + { + "bbox": [ + 309, + 437, + 398, + 446 + ], + "type": "text", + "content": "16: for " + }, + { + "bbox": [ + 309, + 437, + 398, + 446 + ], + "type": "inline_equation", + "content": "i\\gets 1,\\dots,k" + }, + { + "bbox": [ + 309, + 437, + 398, + 446 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 309, + 448, + 492, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 448, + 492, + 459 + ], + "spans": [ + { + "bbox": [ + 309, + 448, + 492, + 459 + ], + "type": "text", + "content": "17: // Meta-test on Strong-augmented data:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 309, + 459, + 495, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 459, + 495, + 472 + ], + "spans": [ + { + "bbox": [ + 309, + 459, + 495, + 472 + ], + "type": "text", + "content": "18: " + }, + { + "bbox": [ + 309, + 459, + 495, + 472 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text {strong-test }} = L_{MSE}\\left(\\mathcal{P}_{t+1}^{\\prime}\\left(x^{sa}\\right), y^{sa}\\right)" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 309, + 472, + 356, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 472, + 356, + 481 + ], + "spans": [ + { + "bbox": [ + 309, + 472, + 356, + 481 + ], + "type": "text", + "content": "19: end for" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 309, + 483, + 538, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 483, + 538, + 495 + ], + "spans": [ + { + "bbox": [ + 309, + 483, + 538, + 495 + ], + "type": "text", + "content": "20: // Meta update on Weak- and Strong-augmented data:" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 309, + 495, + 513, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 495, + 513, + 507 + ], + "spans": [ + { + "bbox": [ + 309, + 495, + 513, + 507 + ], + "type": "text", + "content": "21: " + }, + { + "bbox": [ + 309, + 495, + 513, + 507 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{t + 2} = \\mathcal{L}_{MSE}\\big(\\mathcal{P}_{t + 1}(x^{wa}),y^{wa}\\big) + \\gamma L_{strong - test}" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 309, + 508, + 469, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 508, + 469, + 519 + ], + "spans": [ + { + "bbox": [ + 309, + 508, + 469, + 519 + ], + "type": "text", + "content": "22: " + }, + { + "bbox": [ + 309, + 508, + 469, + 519 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{t + 2} = \\mathcal{P}_{t + 1} - lr_2(\\partial \\mathcal{L}_{t + 2} / \\partial \\mathcal{P}_{t + 1})" + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 533, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 545, + 665 + ], + "type": "text", + "content": "For DG problem in 3D HPE, two principal challenges must be addressed. First, there is the issue of synthesizing data, as detailed in Section 3.3. The second challenge revolves around the effective utilization of synthesized data, a facet often overlooked by current methodologies. Existing DG approaches for 3D HPE [11, 37], conduct the optimization of the pose estimator based on source and synthesized data independently. Unfortunately, this approach lacks mechanisms for fostering interactions between these two optimization processes, resulting in a deficiency of simulated domain shifts in the optimization trajectory." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 304, + 666, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 666, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 666, + 545, + 713 + ], + "type": "text", + "content": "In contrast, our proposed model-agnostic strategy incorporates meta-optimization to bolster interactions among source poses, weak-augmented poses, and strong-augmented poses. This process facilitates the learning of" + } + ] + } + ], + "index": 44 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2244" + } + ] + } + ], + "index": 45 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "content": "domain-invariant knowledge during the update of the pose estimator, as outlined in Algorithm 1. The effectiveness of this approach lies in the fact that the objectives in meta-optimization not only aim to minimize losses on source and synthesized poses but also enhance the alignment of optimization directions during training, thus enhancing generalization significantly." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 157, + 290, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 157, + 290, + 301 + ], + "spans": [ + { + "bbox": [ + 46, + 157, + 290, + 301 + ], + "type": "text", + "content": "The algorithm can be dissected into two parts: Lines 1-11 manage interactions between source poses and weakly-augmented poses, while Lines 12-22 address interactions between weak- and strong-augmented poses. This step-by-step approach is taken due to the substantial domain gap between source poses and strong-augmented poses. Weak-augmented poses serve as a bridge between source poses and strong-augmented poses, alleviating the challenge of directly aligning source and strong-augmented data. By incorporating all three types of poses in the optimization, the pose estimator can effectively generalize across diverse target domains, avoiding overfitting specific pose data types." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 313, + 128, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 313, + 128, + 327 + ], + "spans": [ + { + "bbox": [ + 47, + 313, + 128, + 327 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 333, + 168, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 333, + 168, + 344 + ], + "spans": [ + { + "bbox": [ + 47, + 333, + 168, + 344 + ], + "type": "text", + "content": "4.1. Datasets and Metrics" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 352, + 287, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 352, + 287, + 412 + ], + "spans": [ + { + "bbox": [ + 46, + 352, + 287, + 412 + ], + "type": "text", + "content": "In this paper, we evaluate our approach using several widely-used 3D human pose benchmarks, including Human3.6M [12], MPI-INF-3DHP [22], and 3DPW [31]. Moreover, following previous works [11, 16, 37], we adopt the 16-keypoint human model with Hip joint as the origin." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 413, + 287, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 413, + 287, + 592 + ], + "spans": [ + { + "bbox": [ + 46, + 413, + 287, + 592 + ], + "type": "text", + "content": "Human3.6M is an indoor dataset comprising 3.6 million frames and consisting of 7 subjects denoted as S1, S5, S6, S7, S8, S9, and S11. For the cross-dataset evaluation in Tab. 1, Tab. 2 and Tab. 5, we use S1, S5, S6, S7, S8 as the source domain. In the cross-scenario evaluation on the entire Human3.6M dataset in Tab. 3, S1, S5, S6, S7, S8 are the source domain, while S9 and S11 are the target domain. Mean Per Joint Position Error (MPJPE) and Procrustes-Aligned Mean Per Joint Position Error (PA-MPJPE) are employed as evaluation metrics. For the cross-scenario evaluation on partial Human3.6M, we follow previous works [4, 11, 16] to define two tasks as shown in Tab. 4. One task uses S1 as the source and S5, S6, S7, S8 as the target, while the other task uses S1, S5 as the source and S6, S7, S8 as the target. Both tasks utilize MPJPE as the metric." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 593, + 287, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 593, + 287, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 593, + 287, + 664 + ], + "type": "text", + "content": "MPI-INF-3DHP (3DHP) is an in-the-wild dataset, and we utilize only its test set for cross-dataset evaluation, as shown in Tab. 1 and Tab. 5, which consists of approximately 3k frames. The results are presented based on three metrics: Percentage of Correct Keypoints (PCK), Area Under the Curve (AUC), and MPJPE." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 665, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 712 + ], + "type": "text", + "content": "3DPW is another in-the-wild dataset featuring more challenging poses and scenes. We utilize it for cross-dataset evaluation, as shown in Tab. 2. Here PA-MPJPE and MPJPE serve as the evaluation metrics." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 72, + 440, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 440, + 85 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 440, + 85 + ], + "type": "text", + "content": "4.2. Implementation Details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "spans": [ + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "text", + "content": "For all the generators and discriminators, we ensure consistency by employing the same fully-connected layers, aligning with the methodology described in [37]. In the data augmentation process, the learning rate is set to 1e-4 for generators and 2e-4 for discriminators. We set " + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\alpha_{1}" + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\alpha_{2}" + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "text", + "content": " to 0.50 and 0.35, respectively, while both " + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\beta_{1}" + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\beta_{2}" + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "text", + "content": " are assigned a value of 4. During the meta optimization process, we utilize a learning rate of 1e-4 for " + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "inline_equation", + "content": "lr_{1}" + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "text", + "content": " and 5e-4 for " + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "inline_equation", + "content": "lr_{2}" + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "text", + "content": ". The trade-off parameter " + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "text", + "content": " and the inner loop iteration " + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 90, + 545, + 209 + ], + "type": "text", + "content": " are both set to 1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 210, + 546, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 210, + 546, + 293 + ], + "spans": [ + { + "bbox": [ + 304, + 210, + 546, + 293 + ], + "type": "text", + "content": "Moreover, we employ the Adam optimizer [13] for data augmentation and the AdamW optimizer [19] for meta optimization. Our experiments are conducted with a batch size of 1024 over 60 epochs. We initialize the pose estimator with a warm-up phase lasting two epochs for supervised learning on source data. From the third epoch onwards, data augmentation and meta-optimization begin." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 300, + 425, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 300, + 425, + 312 + ], + "spans": [ + { + "bbox": [ + 305, + 300, + 425, + 312 + ], + "type": "text", + "content": "4.3. Quantitative Results" + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 307, + 336, + 545, + 417 + ], + "blocks": [ + { + "bbox": [ + 332, + 323, + 518, + 334 + ], + "lines": [ + { + "bbox": [ + 332, + 323, + 518, + 334 + ], + "spans": [ + { + "bbox": [ + 332, + 323, + 518, + 334 + ], + "type": "text", + "content": "Table 1. Cross-dataset evaluation on 3DHP dataset." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 336, + 545, + 417 + ], + "lines": [ + { + "bbox": [ + 307, + 336, + 545, + 417 + ], + "spans": [ + { + "bbox": [ + 307, + 336, + 545, + 417 + ], + "type": "table", + "html": "
MethodVenueDGPCK ↑AUC ↑MPJPE ↓
VPose (1-frame) [23]CVPR'19×80.942.5102.3
EvoSkeleton [17]CVPR'2081.246.199.7
RepNet [32]CVPR'1981.854.892.5
PoseAug [37]TPAMI'2388.657.373.0
DH-AUG [11]ECCV'2289.557.971.2
PoseGU [7]CVIU'2386.357.275.0
CEE-Net [16]AAAI'2389.958.269.7
Ours92.960.763.1
", + "image_path": "8f44de63d5f50eead70d0d472f1860b03734389231b5846447abe1aaabc5e086.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 307, + 435, + 545, + 512 + ], + "blocks": [ + { + "bbox": [ + 331, + 422, + 520, + 432 + ], + "lines": [ + { + "bbox": [ + 331, + 422, + 520, + 432 + ], + "spans": [ + { + "bbox": [ + 331, + 422, + 520, + 432 + ], + "type": "text", + "content": "Table 2. Cross-dataset evaluation on 3DPW dataset." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 435, + 545, + 512 + ], + "lines": [ + { + "bbox": [ + 307, + 435, + 545, + 512 + ], + "spans": [ + { + "bbox": [ + 307, + 435, + 545, + 512 + ], + "type": "table", + "html": "
MethodVenueDGPA-MPJPE ↓MPJPE ↓
VPose (1-frame) [23]CVPR'19×94.6125.7
VIBE [14]CVPR'2082.3122.5
PoseAug [37]TPAMI'2381.6119.0
DH-AUG [11]ECCV'2279.3112.8
PoseGU [7]CVIU'2392.3-
CEE-Net [16]AAAI'2376.8-
Ours73.2106.6
", + "image_path": "ca8e6fdc561a0e8535e215fc3b88171e5719851992c42c3703fbb3b987ab3d1a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 307, + 547, + 545, + 614 + ], + "blocks": [ + { + "bbox": [ + 305, + 522, + 545, + 544 + ], + "lines": [ + { + "bbox": [ + 305, + 522, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 305, + 522, + 545, + 544 + ], + "type": "text", + "content": "Table 3. Cross-scenario evaluation on Entire Human3.6M dataset. S1,S5,S6,S7,S8 are the source and S9,S11 are the target." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 547, + 545, + 614 + ], + "lines": [ + { + "bbox": [ + 307, + 547, + 545, + 614 + ], + "spans": [ + { + "bbox": [ + 307, + 547, + 545, + 614 + ], + "type": "table", + "html": "
MethodVenueDGMPJPE ↓PA-MPJPE ↓
VPose (1-frame) [23]CVPR'19×52.740.9
EvoSkeleton [17]CVPR'2050.938.0
PoseAug [37]TPAMI'2350.239.1
DH-AUG [11]ECCV'2249.838.3
CEE-Net [16]AAAI'2347.336.8
Ours44.434.6
", + "image_path": "10605c9aee8c252ac91b5935155020ccabfc18850385bf150612cd4a4c9cd9cc.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": "Cross-dataset evaluation results. In cross-dataset evaluations, source and target come from different datasets. Following identical paradigms from existing methods [11, 16, 37], we use ground truth 2D keypoints as input, single-frame VPose [23] as the lifting backbone, and Human3.6M as the source dataset. Our method demonstrates notable performance in all metrics, as presented in Tab. 1 and Tab. 2. Notably, our approach outperforms CEE-Net by " + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "inline_equation", + "content": "3.0\\%" + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": " in" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2245" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 91, + 152 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 91, + 152 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 91, + 152 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 91, + 152 + ], + "type": "image", + "image_path": "69ed370ea71a3814b8e029e508885cc2818fbde7e35f9e1e43c2be4f70f5de50.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 49, + 153, + 90, + 160 + ], + "lines": [ + { + "bbox": [ + 49, + 153, + 90, + 160 + ], + "spans": [ + { + "bbox": [ + 49, + 153, + 90, + 160 + ], + "type": "text", + "content": "2D Predictions" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 91, + 72, + 122, + 152 + ], + "blocks": [ + { + "bbox": [ + 91, + 72, + 122, + 152 + ], + "lines": [ + { + "bbox": [ + 91, + 72, + 122, + 152 + ], + "spans": [ + { + "bbox": [ + 91, + 72, + 122, + 152 + ], + "type": "image", + "image_path": "f5f923dfd5ef95f1f23cf09a13d974c2d7a2b89a289c2917d8ea3f1995f80b11.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 94, + 154, + 127, + 160 + ], + "lines": [ + { + "bbox": [ + 94, + 154, + 127, + 160 + ], + "spans": [ + { + "bbox": [ + 94, + 154, + 127, + 160 + ], + "type": "text", + "content": "Source-only" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 123, + 72, + 154, + 152 + ], + "blocks": [ + { + "bbox": [ + 123, + 72, + 154, + 152 + ], + "lines": [ + { + "bbox": [ + 123, + 72, + 154, + 152 + ], + "spans": [ + { + "bbox": [ + 123, + 72, + 154, + 152 + ], + "type": "image", + "image_path": "f12b9d6f32a368726f6e917db019c899ba069f5e168c5c5db8e2e348a7e5ce29.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 124, + 154, + 164, + 160 + ], + "lines": [ + { + "bbox": [ + 124, + 154, + 164, + 160 + ], + "spans": [ + { + "bbox": [ + 124, + 154, + 164, + 160 + ], + "type": "text", + "content": "y PoseAug" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 156, + 72, + 210, + 152 + ], + "blocks": [ + { + "bbox": [ + 156, + 72, + 210, + 152 + ], + "lines": [ + { + "bbox": [ + 156, + 72, + 210, + 152 + ], + "spans": [ + { + "bbox": [ + 156, + 72, + 210, + 152 + ], + "type": "image", + "image_path": "a867fb9336b76f49bdef0eb9f5ca4a98ce74d67ff5d93e77159c51affc0563ee.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 156, + 153, + 206, + 160 + ], + "lines": [ + { + "bbox": [ + 156, + 153, + 206, + 160 + ], + "spans": [ + { + "bbox": [ + 156, + 153, + 206, + 160 + ], + "type": "text", + "content": "Aug DH-AUG" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 211, + 72, + 247, + 152 + ], + "blocks": [ + { + "bbox": [ + 211, + 72, + 247, + 152 + ], + "lines": [ + { + "bbox": [ + 211, + 72, + 247, + 152 + ], + "spans": [ + { + "bbox": [ + 211, + 72, + 247, + 152 + ], + "type": "image", + "image_path": "4b796d4bc4f48b9aa7f01a990953389b7021f4efef90a74237f469707e67bcd1.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 154, + 239, + 160 + ], + "lines": [ + { + "bbox": [ + 225, + 154, + 239, + 160 + ], + "spans": [ + { + "bbox": [ + 225, + 154, + 239, + 160 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 247, + 72, + 291, + 152 + ], + "blocks": [ + { + "bbox": [ + 247, + 72, + 291, + 152 + ], + "lines": [ + { + "bbox": [ + 247, + 72, + 291, + 152 + ], + "spans": [ + { + "bbox": [ + 247, + 72, + 291, + 152 + ], + "type": "image", + "image_path": "b5b53cd399848bed410593791d913ddb58cea6fa53e9750ba6f16fbec964d5d8.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 268, + 154, + 278, + 160 + ], + "lines": [ + { + "bbox": [ + 268, + 154, + 278, + 160 + ], + "spans": [ + { + "bbox": [ + 268, + 154, + 278, + 160 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 296, + 70, + 341, + 152 + ], + "blocks": [ + { + "bbox": [ + 296, + 70, + 341, + 152 + ], + "lines": [ + { + "bbox": [ + 296, + 70, + 341, + 152 + ], + "spans": [ + { + "bbox": [ + 296, + 70, + 341, + 152 + ], + "type": "image", + "image_path": "8b67c42b1404be43128773873db50b8941e0ca47c9d424a40e31c08fc054edcb.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 299, + 154, + 339, + 160 + ], + "lines": [ + { + "bbox": [ + 299, + 154, + 339, + 160 + ], + "spans": [ + { + "bbox": [ + 299, + 154, + 339, + 160 + ], + "type": "text", + "content": "2D Predictions" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 341, + 72, + 396, + 152 + ], + "blocks": [ + { + "bbox": [ + 341, + 72, + 396, + 152 + ], + "lines": [ + { + "bbox": [ + 341, + 72, + 396, + 152 + ], + "spans": [ + { + "bbox": [ + 341, + 72, + 396, + 152 + ], + "type": "image", + "image_path": "22c84e246d36d9a940072f10243dca940db9f1f00be9fa00018446e74b2b8a20.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 345, + 154, + 377, + 160 + ], + "lines": [ + { + "bbox": [ + 345, + 154, + 377, + 160 + ], + "spans": [ + { + "bbox": [ + 345, + 154, + 377, + 160 + ], + "type": "text", + "content": "Source-only" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 396, + 72, + 446, + 152 + ], + "blocks": [ + { + "bbox": [ + 396, + 72, + 446, + 152 + ], + "lines": [ + { + "bbox": [ + 396, + 72, + 446, + 152 + ], + "spans": [ + { + "bbox": [ + 396, + 72, + 446, + 152 + ], + "type": "image", + "image_path": "729ee476b5eaeb79e8f0e1da7401f939710b23069a4c3826c52b4d218bf50b34.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 154, + 414, + 160 + ], + "lines": [ + { + "bbox": [ + 406, + 154, + 414, + 160 + ], + "spans": [ + { + "bbox": [ + 406, + 154, + 414, + 160 + ], + "type": "text", + "content": "Aug." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 428, + 154, + 455, + 160 + ], + "lines": [ + { + "bbox": [ + 428, + 154, + 455, + 160 + ], + "spans": [ + { + "bbox": [ + 428, + 154, + 455, + 160 + ], + "type": "text", + "content": "DH-AUG" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 416, + 72, + 463, + 152 + ], + "blocks": [ + { + "bbox": [ + 416, + 72, + 463, + 152 + ], + "lines": [ + { + "bbox": [ + 416, + 72, + 463, + 152 + ], + "spans": [ + { + "bbox": [ + 416, + 72, + 463, + 152 + ], + "type": "image", + "image_path": "1d3973dd2e03defbb681c3d390c711fc12ae79f2d36671cf16ce39050226e77f.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 462, + 72, + 505, + 152 + ], + "blocks": [ + { + "bbox": [ + 462, + 72, + 505, + 152 + ], + "lines": [ + { + "bbox": [ + 462, + 72, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 462, + 72, + 505, + 152 + ], + "type": "image", + "image_path": "1aa113f5f3e1dd54d0f99b0fdc3faa9b290f192a48b343006994985d37504da9.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 475, + 154, + 490, + 160 + ], + "lines": [ + { + "bbox": [ + 475, + 154, + 490, + 160 + ], + "spans": [ + { + "bbox": [ + 475, + 154, + 490, + 160 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 507, + 72, + 546, + 152 + ], + "blocks": [ + { + "bbox": [ + 507, + 72, + 546, + 152 + ], + "lines": [ + { + "bbox": [ + 507, + 72, + 546, + 152 + ], + "spans": [ + { + "bbox": [ + 507, + 72, + 546, + 152 + ], + "type": "image", + "image_path": "49fc8a54aee6614591d8d3927daa65202e3b95baec84945c5bcabcb2a82d3e1b.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 518, + 154, + 527, + 160 + ], + "lines": [ + { + "bbox": [ + 518, + 154, + 527, + 160 + ], + "spans": [ + { + "bbox": [ + 518, + 154, + 527, + 160 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 48, + 180, + 91, + 222 + ], + "blocks": [ + { + "bbox": [ + 48, + 180, + 91, + 222 + ], + "lines": [ + { + "bbox": [ + 48, + 180, + 91, + 222 + ], + "spans": [ + { + "bbox": [ + 48, + 180, + 91, + 222 + ], + "type": "image", + "image_path": "931d87359b051ce269c7c90069438e97dfe52456d9bf7de8e22e003dbe54d4c8.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 223, + 90, + 230 + ], + "lines": [ + { + "bbox": [ + 50, + 223, + 90, + 230 + ], + "spans": [ + { + "bbox": [ + 50, + 223, + 90, + 230 + ], + "type": "text", + "content": "2D Predictions" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 60, + 232, + 532, + 243 + ], + "lines": [ + { + "bbox": [ + 60, + 232, + 532, + 243 + ], + "spans": [ + { + "bbox": [ + 60, + 232, + 532, + 243 + ], + "type": "text", + "content": "Figure 5. Results on Cross-scenario evaluation. Left is for task S1,S5,S6,S7,S8 " + }, + { + "bbox": [ + 60, + 232, + 532, + 243 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 60, + 232, + 532, + 243 + ], + "type": "text", + "content": " S9,S11, and right is for task S1,S5 " + }, + { + "bbox": [ + 60, + 232, + 532, + 243 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 60, + 232, + 532, + 243 + ], + "type": "text", + "content": " S6,S7,S8." + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 91, + 181, + 130, + 222 + ], + "blocks": [ + { + "bbox": [ + 104, + 163, + 488, + 175 + ], + "lines": [ + { + "bbox": [ + 104, + 163, + 488, + 175 + ], + "spans": [ + { + "bbox": [ + 104, + 163, + 488, + 175 + ], + "type": "text", + "content": "Figure 4. Qualitative results on Cross-dataset evaluation. Left is 3DHP dataset, and right is 3DPW dataset." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 91, + 181, + 130, + 222 + ], + "lines": [ + { + "bbox": [ + 91, + 181, + 130, + 222 + ], + "spans": [ + { + "bbox": [ + 91, + 181, + 130, + 222 + ], + "type": "image", + "image_path": "9a6564c1f032822faa2f16bf41617166d653917e6cbb2ad5d22335fe9bd90e92.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 95, + 223, + 127, + 230 + ], + "lines": [ + { + "bbox": [ + 95, + 223, + 127, + 230 + ], + "spans": [ + { + "bbox": [ + 95, + 223, + 127, + 230 + ], + "type": "text", + "content": "Source-only" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 131, + 181, + 170, + 222 + ], + "blocks": [ + { + "bbox": [ + 131, + 181, + 170, + 222 + ], + "lines": [ + { + "bbox": [ + 131, + 181, + 170, + 222 + ], + "spans": [ + { + "bbox": [ + 131, + 181, + 170, + 222 + ], + "type": "image", + "image_path": "6d44c3cf69ab1ed91effee01703bd7a3e01fd67d63ac9b736b05c007116f522f.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 223, + 165, + 230 + ], + "lines": [ + { + "bbox": [ + 140, + 223, + 165, + 230 + ], + "spans": [ + { + "bbox": [ + 140, + 223, + 165, + 230 + ], + "type": "text", + "content": "PoseAug" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 171, + 181, + 210, + 221 + ], + "blocks": [ + { + "bbox": [ + 171, + 181, + 210, + 221 + ], + "lines": [ + { + "bbox": [ + 171, + 181, + 210, + 221 + ], + "spans": [ + { + "bbox": [ + 171, + 181, + 210, + 221 + ], + "type": "image", + "image_path": "15bd7399286acf4524b3604d612a55b22f5b1371bea43ed91d58fd6c2ad0a3d0.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 223, + 205, + 230 + ], + "lines": [ + { + "bbox": [ + 179, + 223, + 205, + 230 + ], + "spans": [ + { + "bbox": [ + 179, + 223, + 205, + 230 + ], + "type": "text", + "content": "DH-AUG" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 211, + 181, + 250, + 221 + ], + "blocks": [ + { + "bbox": [ + 211, + 181, + 250, + 221 + ], + "lines": [ + { + "bbox": [ + 211, + 181, + 250, + 221 + ], + "spans": [ + { + "bbox": [ + 211, + 181, + 250, + 221 + ], + "type": "image", + "image_path": "33bb676595dd8358d94901149c89c4f6c70f0dfdb0131e37e72c9e87baa66d28.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 223, + 240, + 230 + ], + "lines": [ + { + "bbox": [ + 225, + 223, + 240, + 230 + ], + "spans": [ + { + "bbox": [ + 225, + 223, + 240, + 230 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 250, + 181, + 290, + 221 + ], + "blocks": [ + { + "bbox": [ + 250, + 181, + 290, + 221 + ], + "lines": [ + { + "bbox": [ + 250, + 181, + 290, + 221 + ], + "spans": [ + { + "bbox": [ + 250, + 181, + 290, + 221 + ], + "type": "image", + "image_path": "a86d6f99b3618f6a7280d3e64c4f6c46e67f2d9b7c8db81fcd43e88c767393fd.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 268, + 223, + 279, + 230 + ], + "lines": [ + { + "bbox": [ + 268, + 223, + 279, + 230 + ], + "spans": [ + { + "bbox": [ + 268, + 223, + 279, + 230 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 296, + 181, + 341, + 222 + ], + "blocks": [ + { + "bbox": [ + 296, + 181, + 341, + 222 + ], + "lines": [ + { + "bbox": [ + 296, + 181, + 341, + 222 + ], + "spans": [ + { + "bbox": [ + 296, + 181, + 341, + 222 + ], + "type": "image", + "image_path": "358d4cb812d51a63539bbf1cd1fa79b57dc51c312f1f09705d42ad775845f3b2.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 299, + 223, + 340, + 230 + ], + "lines": [ + { + "bbox": [ + 299, + 223, + 340, + 230 + ], + "spans": [ + { + "bbox": [ + 299, + 223, + 340, + 230 + ], + "type": "text", + "content": "2D Predictions" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 342, + 181, + 388, + 222 + ], + "blocks": [ + { + "bbox": [ + 342, + 181, + 388, + 222 + ], + "lines": [ + { + "bbox": [ + 342, + 181, + 388, + 222 + ], + "spans": [ + { + "bbox": [ + 342, + 181, + 388, + 222 + ], + "type": "image", + "image_path": "a8ff924ae78809ef520009d05e5aba4bec4bfeb7730d1ee1c64eab2eb123eda5.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 345, + 223, + 377, + 230 + ], + "lines": [ + { + "bbox": [ + 345, + 223, + 377, + 230 + ], + "spans": [ + { + "bbox": [ + 345, + 223, + 377, + 230 + ], + "type": "text", + "content": "Source-only" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 388, + 181, + 422, + 222 + ], + "blocks": [ + { + "bbox": [ + 388, + 181, + 422, + 222 + ], + "lines": [ + { + "bbox": [ + 388, + 181, + 422, + 222 + ], + "spans": [ + { + "bbox": [ + 388, + 181, + 422, + 222 + ], + "type": "image", + "image_path": "6d2def2010f88a8f77d2255e89fda3a198eb72c03ee0c328cefb70132fb2411d.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 390, + 223, + 414, + 230 + ], + "lines": [ + { + "bbox": [ + 390, + 223, + 414, + 230 + ], + "spans": [ + { + "bbox": [ + 390, + 223, + 414, + 230 + ], + "type": "text", + "content": "PoseAug" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 423, + 181, + 463, + 222 + ], + "blocks": [ + { + "bbox": [ + 423, + 181, + 463, + 222 + ], + "lines": [ + { + "bbox": [ + 423, + 181, + 463, + 222 + ], + "spans": [ + { + "bbox": [ + 423, + 181, + 463, + 222 + ], + "type": "image", + "image_path": "cb116c9311744f80b515f493986436d19d132f590fde21a99a0305eb7c1db38c.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 429, + 223, + 456, + 230 + ], + "lines": [ + { + "bbox": [ + 429, + 223, + 456, + 230 + ], + "spans": [ + { + "bbox": [ + 429, + 223, + 456, + 230 + ], + "type": "text", + "content": "DH-AUG" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 463, + 181, + 503, + 222 + ], + "blocks": [ + { + "bbox": [ + 463, + 181, + 503, + 222 + ], + "lines": [ + { + "bbox": [ + 463, + 181, + 503, + 222 + ], + "spans": [ + { + "bbox": [ + 463, + 181, + 503, + 222 + ], + "type": "image", + "image_path": "87595cde366a5d8aacd989384ae4920bfa2b2d0c6be7a4ffc4d3f96f37c7b884.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 476, + 223, + 491, + 230 + ], + "lines": [ + { + "bbox": [ + 476, + 223, + 491, + 230 + ], + "spans": [ + { + "bbox": [ + 476, + 223, + 491, + 230 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_caption" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 504, + 181, + 544, + 222 + ], + "blocks": [ + { + "bbox": [ + 504, + 181, + 544, + 222 + ], + "lines": [ + { + "bbox": [ + 504, + 181, + 544, + 222 + ], + "spans": [ + { + "bbox": [ + 504, + 181, + 544, + 222 + ], + "type": "image", + "image_path": "c0997b28caa17fd803835075fb7fcf80e79ab248f9dfcb280ee449c981dd6edf.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 518, + 223, + 529, + 230 + ], + "lines": [ + { + "bbox": [ + 518, + 223, + 529, + 230 + ], + "spans": [ + { + "bbox": [ + 518, + 223, + 529, + 230 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_caption" + } + ], + "index": 47 + }, + { + "type": "table", + "bbox": [ + 48, + 300, + 287, + 379 + ], + "blocks": [ + { + "bbox": [ + 47, + 253, + 286, + 297 + ], + "lines": [ + { + "bbox": [ + 47, + 253, + 286, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 253, + 286, + 297 + ], + "type": "text", + "content": "Table 4. Cross-scenario evaluation on Partial Human3.6M dataset. For the task \"S1\", S1 is the source and S5, S6, S7, S8 are the target. For the task \"S1+S5\", S1 and S5 are the source, and S6, S7, S8 are the target. MPJPE " + }, + { + "bbox": [ + 47, + 253, + 286, + 297 + ], + "type": "inline_equation", + "content": "(\\downarrow)" + }, + { + "bbox": [ + 47, + 253, + 286, + 297 + ], + "type": "text", + "content": " is used for evaluation." + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 300, + 287, + 379 + ], + "lines": [ + { + "bbox": [ + 48, + 300, + 287, + 379 + ], + "spans": [ + { + "bbox": [ + 48, + 300, + 287, + 379 + ], + "type": "table", + "html": "
MethodVenueDGS1S1+S5
VPose (1-frame) [23]CVPR'19×65.257.9
EvoSkeleton [17]CVPR'2061.554.6
PoseAug [37]TPAMI'2356.751.3
DH-AUG [11]ECCV'2252.247.0
CEE-Net [16]AAAI'2351.946.7
Ours50.345.4
", + "image_path": "4332e622e1370f17764e0e8c7e6136a4ee0ee69465d7a6050466ba6dff4a87cc.jpg" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "table_body" + } + ], + "index": 51 + }, + { + "type": "table", + "bbox": [ + 48, + 412, + 287, + 573 + ], + "blocks": [ + { + "bbox": [ + 47, + 388, + 287, + 410 + ], + "lines": [ + { + "bbox": [ + 47, + 388, + 287, + 410 + ], + "spans": [ + { + "bbox": [ + 47, + 388, + 287, + 410 + ], + "type": "text", + "content": "Table 5. Cross-dataset evaluation with MPJPE (↓) on 3DHP with varied 2D predictions and 2D-to-3D backbones (1-frame)." + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 412, + 287, + 573 + ], + "lines": [ + { + "bbox": [ + 48, + 412, + 287, + 573 + ], + "spans": [ + { + "bbox": [ + 48, + 412, + 287, + 573 + ], + "type": "table", + "html": "
MethodDGDET [5]CPN [3]HR [33]GT
SemGCN [38]×101.998.795.697.4
SemGCN + PoseAug [37]89.989.389.186.1
SemGCN + CEE-generator [16]83.682.882.481.3
SemGCN + DH-AUG [11]79.776.773.071.3
SemGCN + Ours76.574.170.768.9
VPose [23]×92.689.885.686.6
VPose + PoseAug [37]78.378.473.273.0
VPose + CEE-generator [16]75.675.271.271.4
VPose + DH-AUG [11]76.774.871.171.2
VPose + Ours72.470.962.463.1
PoseFormer [43]×91.989.284.285.7
PoseFormer + PoseAug [37]77.777.572.172.3
PoseFormer + CEE-generator [16]----
PoseFormer + DH-AUG [11]75.674.871.672.0
PoseFormer + Ours72.270.562.863.4
MixSTE [36]×90.687.482.084.0
MixSTE + PoseAug [37]76.176.371.771.6
MixSTE + CEE-generator [16]----
MixSTE + DH-AUG [11]74.874.470.970.7
MixSTE + Ours70.568.260.461.0
", + "image_path": "3ab9956c59add66cf4fa963f320ab42834dd7ad211d477a525325a22adaa45e3.jpg" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "table_body" + } + ], + "index": 53 + }, + { + "bbox": [ + 46, + 581, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 581, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 581, + 287, + 652 + ], + "type": "text", + "content": "PCK and " + }, + { + "bbox": [ + 46, + 581, + 287, + 652 + ], + "type": "inline_equation", + "content": "2.5\\%" + }, + { + "bbox": [ + 46, + 581, + 287, + 652 + ], + "type": "text", + "content": " in AUC, and reduces MPJPE by " + }, + { + "bbox": [ + 46, + 581, + 287, + 652 + ], + "type": "inline_equation", + "content": "6.6\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 581, + 287, + 652 + ], + "type": "text", + "content": " in the 3DHP task. In the case of 3DPW, our method shows an improvement of " + }, + { + "bbox": [ + 46, + 581, + 287, + 652 + ], + "type": "inline_equation", + "content": "3.6\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 581, + 287, + 652 + ], + "type": "text", + "content": " in PA-MPJPE compared to CEE-Net [16]. While CEE-Net [16] and PoseGU [7] do not disclose their codes or report their results on MPJPE, it is evident that our method surpasses DH-AUG [11] by " + }, + { + "bbox": [ + 46, + 581, + 287, + 652 + ], + "type": "inline_equation", + "content": "6.2\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 581, + 287, + 652 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 46, + 654, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 287, + 712 + ], + "type": "text", + "content": "Cross-scenario evaluation results. In cross-scenario evaluations, source and target come from different subsets of the same dataset. Maintaining consistency with previous works, we utilize ground truth 2D keypoints as input and single-frame VPose [23] as the 2D-to-3D lifting network." + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 304, + 255, + 545, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 255, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 304, + 255, + 545, + 338 + ], + "type": "text", + "content": "For the situation of using Entire Human3.6M in Tab. 3, our method demonstrates superior performance compared to CEE-Net [16] with a " + }, + { + "bbox": [ + 304, + 255, + 545, + 338 + ], + "type": "inline_equation", + "content": "2.9\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 255, + 545, + 338 + ], + "type": "text", + "content": " reduction in MPJPE and a " + }, + { + "bbox": [ + 304, + 255, + 545, + 338 + ], + "type": "inline_equation", + "content": "2.2\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 255, + 545, + 338 + ], + "type": "text", + "content": " improvement in PA-MPJPE. In the case of using partial Human3.6M in Tab. 4, our approach surpasses CEE-Net [16] by " + }, + { + "bbox": [ + 304, + 255, + 545, + 338 + ], + "type": "inline_equation", + "content": "1.6\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 255, + 545, + 338 + ], + "type": "text", + "content": " in the S1 task and " + }, + { + "bbox": [ + 304, + 255, + 545, + 338 + ], + "type": "inline_equation", + "content": "1.3\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 255, + 545, + 338 + ], + "type": "text", + "content": " in the S1+S5 task based on the MPJPE metric." + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 304, + 339, + 545, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 339, + 545, + 542 + ], + "spans": [ + { + "bbox": [ + 304, + 339, + 545, + 542 + ], + "type": "text", + "content": "Results with varied 2D predictions and backbones. The results presented in Tables 1 to 4 are confined to the usage of ground truth 2D input and single-frame VPose backbone, which may raise concerns about the universality of the proposed method. To address this concern, we assess the performance of our approach with various 2D predictions such as DET [5], CPN [3], HRNet [33], and diverse lifting backbones including SemGCN [38], PoseFormer [43], and MixSTE [36], as displayed in Table 5. In this evaluation, 3DHP serves as the dataset, and MPJPE is the metric used. Notably, all the listed backbones are single-frame versions. As CEE-Net [16] only provides results for its generation part, CEE-generator, and does not offer open-source code, we have included partial results of CEE-generator. From Table 5, it is evident that our method surpasses all the existing methods, demonstrating the robustness of our framework across various settings." + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 306, + 551, + 417, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 551, + 417, + 563 + ], + "spans": [ + { + "bbox": [ + 306, + 551, + 417, + 563 + ], + "type": "text", + "content": "4.4. Qualitative Results" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": "Fig. 4 shows qualitative results on cross-dataset evaluation (3DHP on the left side and 3DPW on the right side), while Fig. 5 displays qualitative results on cross-scenario evaluation (S1,S5,S6,S7,S8 " + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": " S9,S11 on the left side and S1,S5 " + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": " S6,S7,S8 on the right side). HRNet [33] is applied as the 2D pose estimator and VPose [23] is the 2D-to-3D lifting backbone. We use Source-only, PoseAug [37], DH-AUG [11], Ours, and Ground Truth (GT) for qualitative comparison. Because CEE-Net does not provide source codes or pretrained models, we cannot generate visual examples from it. It is evident that our method outperforms other baselines significantly." + } + ] + } + ], + "index": 59 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2246" + } + ] + } + ], + "index": 60 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 141, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 141, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 141, + 85 + ], + "type": "text", + "content": "4.5. Ablation Study" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "spans": [ + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": "Ablation study on the overall framework. The ablation study is conducted to illustrate the functions of three proposed modules: differential generation (DiffGen) in Sec. 3.3, differential discrimination (DiffDis) in Sec. 3.3, and meta optimization (MetaOpt) in Sec. 3.4. In Tab. 6, we apply ground truth as 2D predictions and VPose as the backbone. The absence of DiffGen leads to a decrease in PCK and AUC by " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "2.4\\%" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "2.1\\%" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " respectively, accompanied by a " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "5.9\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " increase in MPJPE on 3DHP, while it increases PA-MPJPE and MPJPE by " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "3.1\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "5.2\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " separately on 3DPW. Similarly, the exclusion of DiffDis results in a decrease of " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "1.7\\%" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " in both PCK and AUC, with a corresponding " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "4.1\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " increase in MPJPE on 3DHP. As for 3DPW, the removal causes a degradation of " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "1.9\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " in PA-MPJPE and " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "3.0\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " in MPJPE. Besides, the removal of MetaOpt leads to a decline in PCK and AUC by " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "1.2\\%" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "0.8\\%" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " respectively, along with a " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "2.4\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " increase in MPJPE on 3DHP, and an increase of " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "1.3\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "inline_equation", + "content": "1.8\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 90, + 289, + 331 + ], + "type": "text", + "content": " in PA-MPJPE and MPJPE separately. These results show that each module plays a critical role in obtaining better generalization." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 48, + 350, + 287, + 402 + ], + "blocks": [ + { + "bbox": [ + 50, + 335, + 284, + 346 + ], + "lines": [ + { + "bbox": [ + 50, + 335, + 284, + 346 + ], + "spans": [ + { + "bbox": [ + 50, + 335, + 284, + 346 + ], + "type": "text", + "content": "Table 6. Overall framework ablation study on 3DHP and 3DPW" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 350, + 287, + 402 + ], + "lines": [ + { + "bbox": [ + 48, + 350, + 287, + 402 + ], + "spans": [ + { + "bbox": [ + 48, + 350, + 287, + 402 + ], + "type": "table", + "html": "
3DHP3DPW
MethodPCK ↑AUC ↑MPJPE ↓PA-MPJPE ↓MPJPE ↓
Ours w/o DiffGen90.558.669.076.3111.8
Ours w/o DiffDis91.259.067.375.1109.6
Ours w/o MetaOpt91.759.965.574.5108.4
Ours92.960.763.173.2106.6
", + "image_path": "f19feb37ace070267065bcea200ca931c970e5ae6c06334aa935135b31396242.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 408, + 287, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 408, + 287, + 482 + ], + "spans": [ + { + "bbox": [ + 46, + 408, + 287, + 482 + ], + "type": "text", + "content": "Ablation study on the generators. There exist two generators in our framework, and each with two pair groups. In this part, we discuss the functions of proximate pairs in weak augmentor (W-PP), proximate pairs in strong aug-. mentor (S-PP), one-state gap pairs in weak augmentor (WOG), and one-state gap pairs in strong augmentor (S-OG)." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 48, + 500, + 287, + 562 + ], + "blocks": [ + { + "bbox": [ + 63, + 486, + 271, + 498 + ], + "lines": [ + { + "bbox": [ + 63, + 486, + 271, + 498 + ], + "spans": [ + { + "bbox": [ + 63, + 486, + 271, + 498 + ], + "type": "text", + "content": "Table 7. Generators' ablation study on 3DHP and 3DPW" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 500, + 287, + 562 + ], + "lines": [ + { + "bbox": [ + 48, + 500, + 287, + 562 + ], + "spans": [ + { + "bbox": [ + 48, + 500, + 287, + 562 + ], + "type": "table", + "html": "
3DHP3DPW
MethodPCK ↑AUC ↑MPJPE ↓PA-MPJPE ↓MPJPE ↓
Ours w/o W-PP88.357.572.681.7118.8
Ours w/o S-PP90.858.271.378.1111.0
Ours w/o W-OG92.159.665.874.7108.7
Ours w/o S-OG91.458.968.275.4109.5
Ours92.960.763.173.2106.6
", + "image_path": "067c6cdadefb488d326227d072de20adb8def617de015df204962b9d903aa48a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": "In Table 7, excluding W-PP or S-PP leads to a significant decline in PCK by " + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "inline_equation", + "content": "4.6\\%" + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "inline_equation", + "content": "2.1\\%" + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": ", and in AUC by " + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "inline_equation", + "content": "3.2\\%" + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "inline_equation", + "content": "2.5\\%" + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": " respectively, accompanied by a notable increase of " + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "inline_equation", + "content": "9.5\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "inline_equation", + "content": "8.2\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": " in MPJPE separately on 3DHP. These results emphasize the critical role of maintaining similarity in proximate pairs for both weak and strong augmentors, serving as the fundamental basis for generating effective and reasonable synthesized poses. Moreover, the absence of W-OG leads to a decline in PCK and AUC by " + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "inline_equation", + "content": "0.8\\%" + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "inline_equation", + "content": "1.1\\%" + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": " respectively, with a corresponding " + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "inline_equation", + "content": "1.7\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": " increase in the MPJPE on 3DHP. The removal of S-OG results in a decrease in PCK and AUC scores by " + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "inline_equation", + "content": "1.5\\%" + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "inline_equation", + "content": "1.8\\%" + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": " respec" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "tively, along with a " + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "inline_equation", + "content": "3.4\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": " increase in MPJPE on 3DHP. These results highlight the significance of maintaining differentiation between the weak augmentor and the strong augmentor during the generation process, where enlarging dissimilarity in S-OG is more important in discriminating these two generators." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 144, + 546, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 144, + 546, + 264 + ], + "spans": [ + { + "bbox": [ + 304, + 144, + 546, + 264 + ], + "type": "text", + "content": "Ablation study on the number of augmentors. Comparisons were conducted between our dual-augmentor framework and single-augmentor frameworks. Alongside our proposed framework, two single-augmentor frameworks were considered in the ablation study, utilizing either the weak-augmentor (WA) or the strong-augmentor (SA). The discrimination and meta-optimization processes exclusively involved source poses and one category of synthesized poses. The results, using ground truth as 2D predictions and VPose as the backbone, are presented in Table 8." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 307, + 296, + 545, + 347 + ], + "blocks": [ + { + "bbox": [ + 306, + 271, + 546, + 293 + ], + "lines": [ + { + "bbox": [ + 306, + 271, + 546, + 293 + ], + "spans": [ + { + "bbox": [ + 306, + 271, + 546, + 293 + ], + "type": "text", + "content": "Table 8. Ablation study of number of augmentors on 3DHP and 3DPW" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 296, + 545, + 347 + ], + "lines": [ + { + "bbox": [ + 307, + 296, + 545, + 347 + ], + "spans": [ + { + "bbox": [ + 307, + 296, + 545, + 347 + ], + "type": "table", + "html": "
3DHP3DPW
MethodPCK ↑AUC ↑MPJPE ↓PA-MPJPE ↓MPJPE ↓
WA87.356.074.580.5117.7
SA89.857.871.079.1111.4
Ours92.960.763.173.2106.6
", + "image_path": "e376a1c07baf8cfc4af89cb42d582f4c5d592206d0972b59f99b4f2de456145c.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 356, + 547, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 356, + 547, + 440 + ], + "spans": [ + { + "bbox": [ + 304, + 356, + 547, + 440 + ], + "type": "text", + "content": "From Table 8, it is evident that our proposed framework surpasses both WA and SA significantly, underscoring the superiority of employing two augmentors over a single aug- mentor in addressing DG for 3D HPE. Furthermore, SA outperforms WA, emphasizing the greater significance of exploring out-of-source distributions compared to retaining source-relevant knowledge in cross-dataset tasks." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 453, + 379, + 465 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 453, + 379, + 465 + ], + "spans": [ + { + "bbox": [ + 306, + 453, + 379, + 465 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 473, + 547, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 547, + 675 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 547, + 675 + ], + "type": "text", + "content": "In this paper, we propose a novel dual-augmentor framework designed to enhance domain generalization in 3D human pose estimation. Our framework addresses the critical aspects of data augmentation and the effective utilization of synthesized data. To achieve this, we implement distinctive strategies for the weak and strong generators, ensuring the preservation of source-specific information while simultaneously exploring out-of-source distributions. Moreover, we incorporate meta-optimization techniques to facilitate enhanced interaction among source data, weak-augmented data, and strong-augmented data, thereby simulating domain shifts in the training of pose estimator and fostering the acquisition of domain-invariant knowledge. Extensive experimentation and comprehensive analysis conducted across multiple datasets demonstrate the superior performance of our proposed approach over existing state-of-the-art methods." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "content": "Acknowledgements This material is based upon work supported by the National Science Foundation under Grant CNS-1910844." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2247" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 146 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 146 + ], + "type": "text", + "content": "[1] Maher Baili, Philippe Wenger, and Damien Chablat. A classification of 3r orthogonal manipulators by the topology of their workspace. In IEEE International Conference on Robotics and Automation, 2004. Proceedings. ICRA'04. 2004, pages 1933-1938. IEEE, 2004. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 148, + 288, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 148, + 288, + 213 + ], + "spans": [ + { + "bbox": [ + 53, + 148, + 288, + 213 + ], + "type": "text", + "content": "[2] Wenhao Chai, Zhongyu Jiang, Jenq-Neng Hwang, and Gaoang Wang. Global adaptation meets local generalization: Unsupervised domain adaptation for 3d human pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14655-14665, 2023. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 214, + 288, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 214, + 288, + 270 + ], + "spans": [ + { + "bbox": [ + 53, + 214, + 288, + 270 + ], + "type": "text", + "content": "[3] Yilun Chen, Zhicheng Wang, Yuxiang Peng, Zhiqiang Zhang, Gang Yu, and Jian Sun. Cascaded pyramid network for multi-person pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7103-7112, 2018. 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 271, + 288, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 271, + 288, + 337 + ], + "spans": [ + { + "bbox": [ + 53, + 271, + 288, + 337 + ], + "type": "text", + "content": "[4] Mohsen Gholami, Bastian Wandt, Helge Rhodin, Rabab Ward, and Z Jane Wang. Adaptpose: Cross-dataset adaptation for 3d human pose estimation by learnable motion generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13075-13085, 2022. 2, 3, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 339, + 287, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 339, + 287, + 372 + ], + "spans": [ + { + "bbox": [ + 53, + 339, + 287, + 372 + ], + "type": "text", + "content": "[5] Ross Girshick, Ilija Radosavovic, Georgia Gkioxari, Piotr Dólar, and Kaiming He. Detector. https://github.com/facebookresearch/detectron, 2018.7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 373, + 287, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 373, + 287, + 418 + ], + "spans": [ + { + "bbox": [ + 53, + 373, + 287, + 418 + ], + "type": "text", + "content": "[6] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. Advances in neural information processing systems, 27, 2014. 2, 3, 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 419, + 287, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 419, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 53, + 419, + 287, + 464 + ], + "type": "text", + "content": "[7] Shannan Guan, Haiyan Lu, Linchao Zhu, and Gengfa Fang. Posegu: 3d human pose estimation with novel human pose generator and unbiased learning. Computer Vision and Image Understanding, 233:103715, 2023. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 465, + 288, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 465, + 288, + 509 + ], + "spans": [ + { + "bbox": [ + 53, + 465, + 288, + 509 + ], + "type": "text", + "content": "[8] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. Advances in neural information processing systems, 30, 2017. 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 510, + 288, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 510, + 288, + 576 + ], + "spans": [ + { + "bbox": [ + 53, + 510, + 288, + 576 + ], + "type": "text", + "content": "[9] Vladimir Guzov, Aymen Mir, Torsten Sattler, and Gerard Pons-Moll. Human positioning system (hps): 3d human pose estimation and self-localization in large scenes from body-mounted sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4318-4329, 2021. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 578, + 287, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 287, + 622 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 287, + 622 + ], + "type": "text", + "content": "[10] Yilei Hua, Wenhan Wu, Ce Zheng, Aidong Lu, Mengyuan Liu, Chen Chen, and Shiqian Wu. Part aware contrastive learning for self-supervised action recognition. In International Joint Conference on Artificial Intelligence, 2023. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 624, + 287, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 677 + ], + "type": "text", + "content": "[11] Linzhi Huang, Jiahao Liang, and Weihong Deng. Dh-aug: Dh forward kinematics model driven augmentation for 3d human pose estimation. In European Conference on Computer Vision, pages 436-453. Springer, 2022. 1, 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "text", + "content": "[12] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environ-" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "ments. IEEE transactions on pattern analysis and machine intelligence, 36(7):1325-1339, 2013. 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 97, + 545, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 97, + 545, + 129 + ], + "spans": [ + { + "bbox": [ + 307, + 97, + 545, + 129 + ], + "type": "text", + "content": "[13] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 132, + 545, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 132, + 545, + 186 + ], + "spans": [ + { + "bbox": [ + 307, + 132, + 545, + 186 + ], + "type": "text", + "content": "[14] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. Vibe: Video inference for human body pose and shape estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5253-5263, 2020. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 189, + 545, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 189, + 545, + 233 + ], + "spans": [ + { + "bbox": [ + 307, + 189, + 545, + 233 + ], + "type": "text", + "content": "[15] Da Li, Yongxin Yang, Yi-Zhe Song, and Timothy Hospedales. Learning to generalize: Meta-learning for domain generalization. In Proceedings of the AAAI conference on artificial intelligence, 2018. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 236, + 545, + 279 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 236, + 545, + 279 + ], + "spans": [ + { + "bbox": [ + 307, + 236, + 545, + 279 + ], + "type": "text", + "content": "[16] Haolun Li and Chi-Man Pun. Cee-net: Complementary end-to-end network for 3d human pose generation and estimation. Proceedings of the AAAI Conference on Artificial Intelligence, 37(1):1305-1313, 2023. 2, 3, 4, 6, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 281, + 545, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 281, + 545, + 345 + ], + "spans": [ + { + "bbox": [ + 307, + 281, + 545, + 345 + ], + "type": "text", + "content": "[17] Shichao Li, Lei Ke, Kevin Pratama, Yu-Wing Tai, Chi-Keung Tang, and Kwang-Ting Cheng. Cascaded deep monocular 3d human pose estimation with evolutionary training data. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6173–6183, 2020. 2, 6, 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 349, + 545, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 349, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 307, + 349, + 545, + 415 + ], + "type": "text", + "content": "[18] Hanbing Liu, Jun-Yan He, Zhi-Qi Cheng, Wangmeng Xiang, Qize Yang, Wenhao Chai, Gaoang Wang, Xu Bao, Bin Luo, Yifeng Geng, et al. Posynda: Multi-hypothesis pose synthesis domain adaptation for robust 3d human pose estimation. In Proceedings of the ACM International Conference on Multimedia, 2023. 2, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 417, + 545, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 417, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 307, + 417, + 545, + 449 + ], + "type": "text", + "content": "[19] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2018. 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 452, + 545, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 452, + 545, + 517 + ], + "spans": [ + { + "bbox": [ + 307, + 452, + 545, + 517 + ], + "type": "text", + "content": "[20] Zhengzhi Lu, He Wang, Ziyi Chang, Guoan Yang, and Hubert P. H. Shum. Hard no-box adversarial attack on skeleton-based human action recognition with skeleton-motion-informed gradient. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4597-4606, 2023. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 520, + 545, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 520, + 545, + 564 + ], + "spans": [ + { + "bbox": [ + 307, + 520, + 545, + 564 + ], + "type": "text", + "content": "[21] Toshihiko Matsuura and Tatsuya Harada. Domain generalization using a mixture of multiple latent domains. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 11749-11756, 2020. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 567, + 545, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 567, + 545, + 621 + ], + "spans": [ + { + "bbox": [ + 307, + 567, + 545, + 621 + ], + "type": "text", + "content": "[22] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3d human pose estimation in the wild using improved cnn supervision. In 2017 international conference on 3D vision (3DV), pages 506-516. IEEE, 2017. 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 623, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 623, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 307, + 623, + 545, + 678 + ], + "type": "text", + "content": "[23] Dario Pavllo, Christoph Feichtenhofer, David Grangier, and Michael Auli. 3d human pose estimation in video with temporal convolutions and semi-supervised training. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7753-7762, 2019. 2, 3, 6, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "text", + "content": "[24] Qucheng Peng. Multi-source and Source-Private Cross-Domain Learning for Visual Recognition. PhD thesis, Purdue University, 2022. 2" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "2248" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 288, + 128 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 288, + 128 + ], + "type": "text", + "content": "[25] Qucheng Peng, Zhengming Ding, Lingjuan Lyu, Lichao Sun, and Chen Chen. Rain: regularization on input and network for black-box domain adaptation. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence, pages 4118–4126, 2023. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 288, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 288, + 173 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 288, + 173 + ], + "type": "text", + "content": "[26] Qucheng Peng, Ce Zheng, and Chen Chen. Source-free domain adaptive human pose estimation. In 2023 IEEE/CVF International Conference on Computer Vision (ICCV), pages 4803-4813. IEEE, 2023. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 175, + 288, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 175, + 288, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 175, + 288, + 239 + ], + "type": "text", + "content": "[27] Ekkasit Pinyoanuntapong, Ayman Ali, Kalvik Jakkala, Pu Wang, Minwoo Lee, Qucheng Peng, Chen Chen, and Zhi Sun. Gaitsada: Self-aligned domain adaptation for mmwave gait recognition. In 2023 IEEE 20th International Conference on Mobile Ad Hoc and Smart Systems (MASS), pages 218-226. IEEE, 2023. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 241, + 288, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 288, + 285 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 288, + 285 + ], + "type": "text", + "content": "[28] Fengchun Qiao, Long Zhao, and Xi Peng. Learning to learn single domain generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12556-12565, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 286, + 288, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 286, + 288, + 341 + ], + "spans": [ + { + "bbox": [ + 48, + 286, + 288, + 341 + ], + "type": "text", + "content": "[29] Chi Su, Jianing Li, Shiliang Zhang, Junliang Xing, Wen Gao, and Qi Tian. Pose-driven deep convolutional model for person re-identification. In Proceedings of the IEEE international conference on computer vision, pages 3960–3969, 2017. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 342, + 288, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 342, + 288, + 386 + ], + "spans": [ + { + "bbox": [ + 48, + 342, + 288, + 386 + ], + "type": "text", + "content": "[30] Adith Swaminathan and Thorsten Joachims. Counterfactual risk minimization: Learning from logged bandit feedback. In International Conference on Machine Learning, pages 814-823. PMLR, 2015. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 388, + 288, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 288, + 443 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 288, + 443 + ], + "type": "text", + "content": "[31] Timo Von Marcard, Roberto Henschel, Michael J Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3d human pose in the wild using imus and a moving camera. In Proceedings of the European conference on computer vision (ECCV), pages 601-617, 2018. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 445, + 288, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 445, + 288, + 499 + ], + "spans": [ + { + "bbox": [ + 48, + 445, + 288, + 499 + ], + "type": "text", + "content": "[32] Bastian Wandt and Bodo Rosenhahn. Repnet: Weakly supervised training of an adversarial reprojection network for 3d human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7782-7791, 2019. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 500, + 288, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 500, + 288, + 566 + ], + "spans": [ + { + "bbox": [ + 48, + 500, + 288, + 566 + ], + "type": "text", + "content": "[33] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, et al. Deep high-resolution representation learning for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 43(10):3349-3364, 2020. 2, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 567, + 288, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 288, + 623 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 288, + 623 + ], + "type": "text", + "content": "[34] Hong Yan, Yang Liu, Yushen Wei, Zhen Li, Guanbin Li, and Liang Lin. Skeletonmae: Graph-based masked autoencoder for skeleton sequence pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5606-5618, 2023. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 624, + 288, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 288, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 288, + 679 + ], + "type": "text", + "content": "[35] Hongwei Yi, Chun-Hao P Huang, Shashank Tripathi, Lea Hering, Justus Thies, and Michael J Black. Mime: Human-aware 3d scene generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12965-12976, 2023. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 680, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 288, + 714 + ], + "type": "text", + "content": "[36] Jinlu Zhang, Zhigang Tu, Jianyu Yang, Yujin Chen, and Jun-song Yuan. Mixste: Seq2seq mixed spatio-temporal encoder for 3d human pose estimation in video. In Proceedings of" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 619 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "the IEEE/CVF conference on computer vision and pattern recognition, pages 13232-13242, 2022. 2, 3, 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 96, + 547, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 547, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 547, + 150 + ], + "type": "text", + "content": "[37] Jianfeng Zhang, Kehong Gong, Xinchao Wang, and Jiashi Feng. Learning to augment poses for 3d human pose estimation in images and videos. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(8):10012-10026, 2023. 1, 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 152, + 547, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 547, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 547, + 206 + ], + "type": "text", + "content": "[38] Long Zhao, Xi Peng, Yu Tian, Mubbasir Kapadia, and Dimitris N Metaxas. Semantic graph convolutional networks for 3d human pose regression. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3425-3435, 2019. 2, 3, 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 207, + 547, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 207, + 547, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 207, + 547, + 251 + ], + "type": "text", + "content": "[39] Long Zhao, Ting Liu, Xi Peng, and Dimitris Metaxas. Maximum-entropy adversarial data augmentation for improved generalization and robustness. Advances in Neural Information Processing Systems, 33:14435–14447, 2020. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 252, + 547, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 252, + 547, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 252, + 547, + 308 + ], + "type": "text", + "content": "[40] Qitao Zhao, Ce Zheng, Mengyuan Liu, Pichao Wang, and Chen Chen. Poseformerv2: Exploring frequency domain for efficient and robust 3d human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8877-8886, 2023. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 308, + 545, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 308, + 545, + 351 + ], + "spans": [ + { + "bbox": [ + 307, + 308, + 545, + 351 + ], + "type": "text", + "content": "[41] Shanshan Zhao, Mingming Gong, Tongliang Liu, Huan Fu, and Dacheng Tao. Domain generalization via entropy regularization. Advances in Neural Information Processing Systems, 33:16096-16107, 2020. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 353, + 545, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 353, + 545, + 407 + ], + "spans": [ + { + "bbox": [ + 307, + 353, + 545, + 407 + ], + "type": "text", + "content": "[42] Yuyang Zhao, Zhun Zhong, Na Zhao, Nicu Sebe, and Gim Hee Lee. Style-hallucinated dual consistency learning for domain generalized semantic segmentation. In European Conference on Computer Vision, pages 535-552. Springer, 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 408, + 545, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 408, + 545, + 463 + ], + "spans": [ + { + "bbox": [ + 307, + 408, + 545, + 463 + ], + "type": "text", + "content": "[43] Ce Zheng, Sijie Zhu, Matias Mendieta, Taojiannan Yang, Chen Chen, and Zhengming Ding. 3d human pose estimation with spatial and temporal transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11656-11665, 2021. 2, 3, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 464, + 545, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 464, + 545, + 509 + ], + "spans": [ + { + "bbox": [ + 307, + 464, + 545, + 509 + ], + "type": "text", + "content": "[44] Ce Zheng, Xianpeng Liu, Guo-Jun Qi, and Chen Chen. Potter: Pooling attention transformer for efficient human mesh recovery. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 510, + 545, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 510, + 545, + 564 + ], + "spans": [ + { + "bbox": [ + 307, + 510, + 545, + 564 + ], + "type": "text", + "content": "[45] Ce Zheng, Matias Mendieta, Taojiannan Yang, Guo-Jun Qi, and Chen Chen. Feater: An efficient network for human reconstruction via feature map-based transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 565, + 545, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 565, + 545, + 619 + ], + "spans": [ + { + "bbox": [ + 307, + 565, + 545, + 619 + ], + "type": "text", + "content": "[46] Wentao Zhu, Xiaoxuan Ma, Zhaoyang Liu, Libin Liu, Wayne Wu, and Yizhou Wang. Motionbert: A unified perspective on learning human motion representations. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 2" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "2249" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/eb377d27-ee40-49e4-9796-048cc8e1c35d_content_list.json b/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/eb377d27-ee40-49e4-9796-048cc8e1c35d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..24617cb460f6adb2b5a6dd5e2d495f62ce9e845a --- /dev/null +++ b/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/eb377d27-ee40-49e4-9796-048cc8e1c35d_content_list.json @@ -0,0 +1,2124 @@ +[ + { + "type": "text", + "text": "A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution", + "text_level": 1, + "bbox": [ + 81, + 130, + 888, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhixiong Yang1 \nShuanghui Zhang1", + "bbox": [ + 169, + 179, + 334, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jingyuan Xia $^{1,*}$ \nZhen Liu", + "bbox": [ + 344, + 180, + 470, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shengxi Li² \nYaowen Fu¹", + "bbox": [ + 509, + 180, + 612, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xinghua Huang $^{1}$ Yongxiang Liu $^{1}$", + "bbox": [ + 656, + 180, + 795, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ College of Electronic Engineering, National University of Defense Technology, Changsha, China \n $^{2}$ College of Electronic Engineering, Beihang University, Beijing, China \nyzx21@nudt.edu.cn, j.xia10@nudt.edu.cn", + "bbox": [ + 99, + 232, + 870, + 286 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 321, + 313, + 338 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Deep learning-based methods have achieved significant successes on solving the blind super-resolution (BSR) problem. However, most of them request supervised pretraining on labelled datasets. This paper proposes an unsupervised kernel estimation model, named dynamic kernel prior (DKP), to realize an unsupervised and pre-training-free learning-based algorithm for solving the BSR problem. DKP can adaptively learn dynamic kernel priors to realize real-time kernel estimation, and thereby enables superior HR image restoration performances. This is achieved by a Markov chain Monte Carlo sampling process on random kernel distributions. The learned kernel prior is then assigned to optimize a blur kernel estimation network, which entails a network-based Langevin dynamic optimization strategy. These two techniques ensure the accuracy of the kernel estimation. DKP can be easily used to replace the kernel estimation models in the existing methods, such as Double-DIP and FKP-DIP, or be added to the off-the-shelf image restoration model, such as diffusion model. In this paper, we incorporate our DKP model with DIP and diffusion model, referring to DIP-DKP and Diff-DKP, for validations. Extensive simulations on Gaussian and motion kernel scenarios demonstrate that the proposed DKP model can significantly improve the kernel estimation with comparable runtime and memory usage, leading to state-of-the-art BSR results. The code is available at https://github.com/XYLGroup/DKP.", + "bbox": [ + 75, + 353, + 473, + 760 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 787, + 207, + 803 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Deep learning provides a new avenue for solving the blind super-resolution (BSR) problem, which aims to reconstruct", + "bbox": [ + 75, + 813, + 468, + 844 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhixiong Yang and Jingyuan Xia contributed equally to this work (*Corresponding author: Jingyuan Xia). This work is supported by National Natural Science Foundation of China, projects 61921001, 62131020, 62322121 and 62171448, and the NSFDYS of Hunan 2022J110067.", + "bbox": [ + 75, + 851, + 468, + 898 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "high-resolution (HR) images from the low-resolution (LR) observations with unknown blur kernels, and is known to be highly non-convex and ill-posed. To alleviate the non-convexity and ill-posedness, most of learning-based BSR methods incorporate image priors via supervised learning based on paired LR-HR samples. However, pre-defined labeled training datasets are expensive, time-consuming, and even not feasible in specific scenarios, such as for high speed targets (e.g., satellites, aircraft) and medical images (e.g., beating heart). Thus, unsupervised learning-based solutions are highly demanded for BSR problem.", + "bbox": [ + 496, + 321, + 893, + 489 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The existing BSR methods can be roughly divided into model-based and learning-based strategies in terms of the priors adopted to provide performance guarantee. Model-based approaches [19, 21, 33, 51] typically adopt hand-designed and explicit constraints as regularizations on image properties, or expert knowledge of the blur kernel. Meanwhile, learning-based BSR methods [12, 16, 20, 27-29, 49, 53, 54, 56] aim to train an end-to-end network with paired LR-HR image samples to leverage data priors for boosting performances. However, these methods highly demand the data and need to undergo throughing pre-training before applications, leading to limited generalization ability towards varying blur kernels. To alleviate this issue, quite a few methods [5, 13, 40, 50, 57, 59] substitute the cumbersome training in advance by a well-trained diffusion model with significantly less fine-tuning samples in an off-the-shelf fashion. On the other side, a slice of works [3, 17, 25, 34, 39, 51] propose to replace the HR image data priors by kernel priors, which are more substantial, economical and efficient to be trained. However, both of these advances are underlying the supervised learning scheme with necessity of training on labeled datasets, still hindering the flexibility and generalization ability towards the BSR tasks with different kernels and unknown HR ground truths.", + "bbox": [ + 496, + 489, + 892, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we propose a dynamic kernel prior (DKP) generation model that can be plug-in with the majority of image restoration (IR) models, to solve BSR problem in an", + "bbox": [ + 498, + 854, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "26046", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "unsupervised way. The proposed DKP model consists of two modules: random kernel sampling (RKS) module and prior kernel estimation (PKE) module. In the RKS module, a Markov Chain Monte Carlo (MCMC) sampling strategy on kernel distributions iteratively generates random kernels as kernel priors, which are then assigned to the PKE module. The PKE module is employed to estimate the blur kernel with respect to the kernel prior generated from the RKS module, the observed LR input and estimated HR image from the IR model. The estimated blur kernel is then assigned to an adopted IR model for the HR image restoration. Along with the alternative solving processes, the MCMC process in RKS module converges to a desired kernel distribution with respect to the LR observation and the estimated HR image to guarantee a rational kernel prior. Meanwhile, a network-based Langevin dynamics (NLD) paradigm is proposed to optimize the kernel estimator in our PKE module with respect to the RKS output kernel prior and the data consistency based on the LR image reconstruction error. The RKS module realizes an unsupervised kernel prior learning. The PKE module achieves promising kernel estimation via the NLD update scheme, which further alleviates the non-convexity and ill-posedness in the view of optimization strategy. In this way, the DKP model is capable of providing the plug-and-play kernel estimation without training in advance on paired LR-HR samples, and is flexible to be applied to the existing IR models for solving the BSR problem.", + "bbox": [ + 76, + 90, + 472, + 512 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Two applications are proposed to validate the feasibility and performance of our DKP model: deep image prior (DIP) [44] and diffusion model [14] adopted as the IR model, referring to DIP-DKP and Diff-DKP, respectively. For the DIP-DKP, we simultaneously optimize the parameters of DIP and DKP models from scratch during the alternative solution process. For the Diff-DKP, the adopted diffusion model is off-the-shelf from [14] and is applied as the fixed HR image restorer. The DKP model is optimized from scratch as well. Extensive simulation results show that the DIP-DKP achieves comparable performance than the existing methods, while the Diff-DKP achieves the state-of-the-art performance in both of Gaussian and motion kernel scenarios. The main contributions are summarized as follows:", + "bbox": [ + 76, + 516, + 472, + 742 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The RKS module is proposed to generate a rational kernel prior from the MCMC sampling on random kernel distributions. This way, an unsupervised kernel prior learning is achieved to substitute the pre-training phase.", + "- In PKE module, the NLD is proposed to optimize the kernel estimator, ensuring good convergence and concise estimation of the blur kernel from the perspective of optimization strategy.", + "- The proposed DKP model enjoys the ease use on the popular IR models without the necessity of pre-training/re" + ], + "bbox": [ + 76, + 750, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "training towards different scenarios. The two applications, i.e., DIP-DKP and Diff-DKP, validate the state-of-the-art performance and excellent flexibility of our DKP model.", + "bbox": [ + 511, + 90, + 893, + 150 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 164, + 640, + 180 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To alleviate the non-convexity and ill-posedness, early model-based approaches [11, 31, 33, 37] typically construct image priors in explicit formulations, such as the total variation (TV) [36], gradient profile [42], hyper-Laplacian [21] and sparsity [19]. In contrast, learning-based methods [7, 12, 16–18, 20, 28, 29, 45, 49, 53, 56] typically train an end-to-end network on labelled image samples to incorporate data priors. Wang et al. [45] proposed a CNN-based deep network with degradation feature representation module to learn image degradation feature from supervised training on paired LR-HR images. Li et al. [24] proposed a transformer network to learn multi-scale image feature via self-attention mechanisms. To reduce the high training costs of time and data, recent advances [5, 38, 40, 46, 50, 57, 59] are proposed to solve BSR problem by an off-the-shelf diffusion model [14]. Lin et al. [26] proposed to partially fine-tune the parameters of diffusion model with significantly less labeled images. Wang et al. [46] further formulated a diffusion-based BSR algorithm that iteratively solves super-resolution tasks with the given kernel without re-training. Different from the end-to-end models that are trained on paired image samples, recent methods tend to resolve BSR problem via pre-training on kernel datasets [25] or pre-defined kernel priors [51]. An alternative framework between the kernel estimation and image restoration is typically adopted in these methods [3, 10, 12, 39, 44, 58], such as double-deep image prior (Double-DIP) [34]. On the basis of this framework, Liang et al. [25] established a flow-based kernel prior (FKP) network that is pre-trained on labeled kernels to enroll kernel priors while the HR image is estimated by DIP network in an online fashion. Yue et al. [51] proposed a hand-crafted kernel prior model to improve the robustness towards the Gaussian kernel scenario. Despite the fact that these methods approximately bring down the data requirements and training costs, the necessity of training in advances or hand-crafted design still limits the flexibility and generalization ability towards the varying kernel scenarios (Gaussian and motion) without ground truths.", + "bbox": [ + 496, + 189, + 893, + 765 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Dynamic Kernel Prior (DKP)", + "text_level": 1, + "bbox": [ + 500, + 776, + 769, + 792 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Problem Formulation. The degradation model of BSR problem is commonly expressed as follows,", + "bbox": [ + 498, + 801, + 890, + 832 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {y} = (\\boldsymbol {x} \\otimes \\boldsymbol {k}) \\downarrow_ {s} + \\boldsymbol {n}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 622, + 844, + 890, + 859 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $y$ denotes the LR image, $\\pmb{x}$ denotes the HR image, $\\otimes$ indicates the convolution operation, $\\downarrow_{s}$ denotes the down", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "26047", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ffb76d946744169a154063112a00aed614d570b2867d0b6b869d24ea7f8d968e.jpg", + "image_caption": [ + "Figure 1. The overview of the RKS module. The MCMC simulation can generate the random kernel $\\pmb{k}_p^t$ from random kernel distributions $\\{\\pmb{k}_r^l\\}_{l=1}^L$ as the kernel prior with respect to the current model parameters $\\pmb{x}^{t-1}, \\pmb{y}$ ." + ], + "image_footnote": [], + "bbox": [ + 116, + 82, + 433, + 231 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "sampling operation with scale factor $s$ , and $k$ denotes the blur kernel. The BSR problem (1) can be formulated as a maximum a posteriori (MAP) problem:", + "bbox": [ + 75, + 306, + 468, + 354 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\max _ {\\boldsymbol {x}, \\boldsymbol {k}} p (\\boldsymbol {y} | \\boldsymbol {x}, \\boldsymbol {k}) p (\\boldsymbol {x}) p (\\boldsymbol {k}), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 361, + 468, + 383 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $p(\\boldsymbol{y}|\\boldsymbol{x},\\boldsymbol{k})$ denotes the likelihood of the observed LR image $\\boldsymbol{y}$ , $p(\\boldsymbol{x})$ and $p(\\boldsymbol{k})$ are the HR image and kernel priors, respectively. Image priors [8, 14, 41, 44, 55] have been well-designed and fully-studied in the past decade. In contrast, researches on kernel priors $p(\\boldsymbol{k})$ are in the ascendant, as kernel samples are less expensive to obtain and the training phase is more efficient [9, 12, 25, 51, 53].", + "bbox": [ + 75, + 393, + 468, + 498 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this paper, we propose the DKP model, which comprises two modules: RKS and PKE. The RKS module is employed to generate rational kernel priors, which are assigned to the PKE module to support the estimation of blur kernel. Let $t = 1,2,\\dots ,T$ denote the alternative iterations among these two modules and the adopted IR model, $\\pmb{k}^t$ and $\\pmb{x}^{t}$ denote the estimated blur kernel and HR image at the $t^{th}$ iteration, respectively. The details of DKP model is given below.", + "bbox": [ + 75, + 500, + 468, + 633 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "RKS module. At the $t^{th}$ iteration, the RKS module plays the key role of generating a rational kernel prior $\\pmb{k}_p^t$ from the MCMC simulation. The overview diagram is shown in Fig. 1. Let $p(\\pmb{k}_r|\\pmb{\\Sigma}_r)$ denotes that the random kernel $\\pmb{k}_r$ is conditioned by the latent variable $\\pmb{\\Sigma}_r$ , in which $p(\\pmb{\\Sigma}_r)$ determines the category of blur kernel. Then the distribution of the kernel prior $\\pmb{k}_p^t$ can be formulated as", + "bbox": [ + 75, + 633, + 468, + 743 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\np \\left(\\boldsymbol {k} _ {p} ^ {t}\\right) = \\int_ {\\boldsymbol {\\Sigma} _ {r}} p \\left(\\boldsymbol {k} _ {r} \\mid \\boldsymbol {\\Sigma} _ {r}\\right) p \\left(\\boldsymbol {\\Sigma} _ {r}\\right) d \\boldsymbol {\\Sigma} _ {r}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 153, + 750, + 468, + 782 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Here, $\\Sigma_r$ is the parameter of kernel (e.g., the variance of Gaussian kernel or the length of motion kernel). It is not easy to sample all the possible $\\Sigma_r$ , and therefore, we convert (3) into the Monte Carlo simulation in the following form:", + "bbox": [ + 75, + 790, + 468, + 863 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\np \\left(\\boldsymbol {k} _ {p} ^ {t}\\right) \\approx \\sum_ {l = 1} ^ {L} p \\left(\\boldsymbol {k} _ {r} ^ {l} \\mid \\boldsymbol {\\Sigma} _ {r} ^ {l}\\right) \\boldsymbol {\\Sigma} _ {r} ^ {l}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 863, + 468, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $l$ denotes the index of the Monte Carlo sampling, $\\pmb{\\Sigma}_r^l$ denotes the $l^{th}$ sampled latent variable, $\\pmb{k}_r^l$ is the $l^{th}$ sampled kernel, conditioned on the $\\pmb{\\Sigma}_r^l$ .", + "bbox": [ + 498, + 90, + 890, + 135 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To ensure the rationality of randomly generated kernels towards the BSR problem, as well as the optimization during the iterations, the MCMC simulation is proposed as follows,", + "bbox": [ + 498, + 136, + 890, + 196 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\np \\left(\\boldsymbol {k} _ {p} ^ {t} \\mid \\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right) \\approx \\sum_ {l = 1} ^ {L} p \\left(\\boldsymbol {k} _ {r} ^ {l} \\mid \\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right) p \\left(\\boldsymbol {k} _ {r} ^ {l} \\mid \\boldsymbol {\\Sigma} _ {r} ^ {l}\\right) \\boldsymbol {\\Sigma} _ {r} ^ {l}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 208, + 890, + 250 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $p(\\pmb{k}_r^l|\\pmb{x}^{t-1},\\pmb{y})$ denotes the kernel weight $\\omega^l$ , that is conditioned on the observed LR image $\\pmb{y}$ and the estimated HR image $\\pmb{x}^{t-1}$ with respect to the MCMC loss $\\mathcal{L}_{MCMC}$ in the following form", + "bbox": [ + 498, + 261, + 890, + 324 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\omega^ {l} = p \\left(\\boldsymbol {k} _ {r} ^ {l} \\mid \\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right) \\propto \\frac {1}{\\mathcal {L} _ {M C M C} ^ {l}}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 335, + 890, + 368 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where", + "bbox": [ + 500, + 378, + 544, + 391 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {M C M C} ^ {l} = \\left\\| \\boldsymbol {y} - \\left(\\boldsymbol {x} ^ {t - 1} \\otimes \\boldsymbol {k} _ {r} ^ {l}\\right) \\downarrow_ {s} \\right\\| _ {F} ^ {2} + \\delta , \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 560, + 404, + 890, + 422 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$\\delta$ is the noise to prevent $\\mathcal{L}_{MCMC}^l = 0$ . In this way, $\\pmb{k}_p^t$ can be formulated as", + "bbox": [ + 498, + 434, + 890, + 463 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {k} _ {p} ^ {t} = \\frac {1}{L} \\sum_ {l = 1} ^ {L} \\omega^ {l} \\boldsymbol {k} _ {r} ^ {l}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 632, + 463, + 890, + 503 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The obtained $\\pmb{k}_p^t$ is then assigned to the PKE module as a rational kernel prior, which will be introduced next.", + "bbox": [ + 498, + 512, + 890, + 541 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We note that the obtained kernel prior $k_{p}^{t}$ is an expectation of $L$ times sampling according to (4). The number of the sampling times $L$ plays the role of annealing/tempering in MCMC simulations as a hyper-parameter. Details of the tuning on $L$ will be given in Section 5.1.", + "bbox": [ + 498, + 542, + 890, + 618 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "PKE module. In our DKP model, the PKE module is employed to estimate the blur kernel by a lightweight network $\\mathrm{G}_k$ with parameters $\\phi_k$ as follows", + "bbox": [ + 498, + 618, + 890, + 662 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {k} ^ {t} = \\mathbf {G} _ {\\boldsymbol {k}} \\left(\\phi_ {\\boldsymbol {k}} ^ {t}\\right). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 643, + 675, + 890, + 693 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The network $\\mathbf{G}_k$ takes a fixed noise that is randomly initialized as input, and we neglect it for demonstration convenience as $\\phi_k^t$ are the main variables.", + "bbox": [ + 498, + 705, + 890, + 751 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This kernel estimator $\\mathbf{G}_k$ is optimized in the NLD paradigm with respect to the data-consistency term and kernel prior term, as shown in Fig. 2. The data-consistency term is computed by the LR image reconstruction error, which is given by", + "bbox": [ + 496, + 751, + 890, + 827 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\log p \\left(\\phi_ {\\boldsymbol {k}} ^ {t - 1} \\mid \\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right) = - \\| \\boldsymbol {y} - \\left(\\boldsymbol {x} ^ {t - 1} \\otimes \\mathbf {G} _ {\\boldsymbol {k}} \\left(\\phi_ {\\boldsymbol {k}} ^ {t - 1}\\right)\\right) \\downarrow_ {s} \\| _ {F} ^ {2}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 498, + 837, + 890, + 869 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The kernel prior term is computed based on the difference between the network-estimated kernel $\\mathbf{G}_k(\\boldsymbol{\\phi}_k^{t - 1})$ and the", + "bbox": [ + 498, + 869, + 890, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "26048", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/c828cc9c92064405ff6c0566c475b9fc2592e3e1b74d7eebe5dd14dd0f6e7b61.jpg", + "image_caption": [ + "Figure 2. The overview of the PKE module. The blur kernel $\\pmb{k}^t$ is estimated by the network $G_{k}$ , whose parameters $\\phi_{k}$ are updated by the kernel prior term from RKS module and data-consistency term, based on the LR image reconstruction error." + ], + "image_footnote": [], + "bbox": [ + 81, + 80, + 464, + 227 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "random-sampled kernel $k_{p}^{t}$ from the RKS module as follows,", + "bbox": [ + 75, + 304, + 468, + 333 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\log p \\left(\\boldsymbol {\\phi} _ {\\boldsymbol {k}} ^ {t - 1} \\mid \\boldsymbol {k} _ {p} ^ {t}\\right) = - \\| \\mathrm {G} _ {\\boldsymbol {k}} \\left(\\boldsymbol {\\phi} _ {\\boldsymbol {k}} ^ {t - 1}\\right) - \\boldsymbol {k} _ {p} ^ {t} \\| _ {F} ^ {2}. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 133, + 340, + 468, + 359 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "By combining (10) and (11), the network parameters $\\phi_{k}^{t - 1}$ can be updated as follows,", + "bbox": [ + 76, + 366, + 467, + 396 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\phi_ {\\boldsymbol {k}} ^ {t} = \\phi_ {\\boldsymbol {k}} ^ {t - 1} + \\frac {\\delta^ {2}}{2} \\frac {\\partial \\log p (\\phi_ {\\boldsymbol {k}} ^ {t - 1} | \\boldsymbol {x} ^ {t - 1} , \\boldsymbol {y})}{\\partial \\phi_ {\\boldsymbol {k}} ^ {t - 1}} \\\\ + \\delta \\frac {\\partial \\log p \\left(\\phi_ {\\boldsymbol {k}} ^ {t - 1} \\mid \\boldsymbol {k} _ {p} ^ {t}\\right)}{\\partial \\phi_ {\\boldsymbol {k}} ^ {t - 1}}, \\tag {12} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 107, + 401, + 468, + 478 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the second term is the data-consistency update, the third term is the additional update based on the random kernel $\\pmb{k}_p^t$ .", + "bbox": [ + 75, + 482, + 468, + 527 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "It has been proved to be effective that the random noise-based disturbance can prevent being trapped into bad local modes for the variable update in Langevin dynamics [2, 32, 48, 51]. More details of Langevin dynamics refer to the supplementary material. At this stage, the random kernel sample from the RKS module can be regarded as the random \"noise\" for the $\\phi_{k}^{t - 1}$ update. Eq. (12) can be reformulated as follows,", + "bbox": [ + 75, + 527, + 468, + 647 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\phi_ {k} ^ {t} = \\phi_ {k} ^ {t - 1} + \\frac {\\delta^ {2}}{2} \\frac {\\partial \\log p \\left(\\phi_ {k} ^ {t - 1} \\mid \\boldsymbol {x} ^ {t - 1} , \\boldsymbol {y}\\right)}{\\partial \\phi_ {k} ^ {t - 1}} + \\zeta_ {\\phi_ {k}} ^ {t - 1}, \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 96, + 652, + 468, + 689 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\zeta_{\\phi_k}^{t - 1} = \\frac{\\partial\\log p(\\phi_k^{t - 1}|k_p^t)}{\\partial\\phi_k^{t - 1}}$ denotes the parameters correlated Langevin dynamics disturbance.", + "bbox": [ + 75, + 694, + 467, + 733 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The pipeline of our DKP at the $t^{th}$ iteration is given in Algorithm 1. The whole DKP model is implemented in a plug-and-play style, in which training in advance is not required. Besides, the random kernels from the RKS module are self-adaptively sampled through the MCMC simulation, without the need of labeled training data. We should also note that the DKP model only brings negligible runtime and memory cost in applications, as the adopted network $\\mathbf{G}_k$ is typically lightweight. This leads to high flexibility and low computational complexity. These three merits promise our DKP the convenience of being applied to", + "bbox": [ + 75, + 734, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Algorithm 1: The proposed DKP model.", + "text_level": 1, + "bbox": [ + 509, + 94, + 787, + 109 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Given: $x^{t - 1},y$ and $\\phi_{k}^{t - 1}$", + "2 % Random Kernel Sampling (RKS) Module", + "3 Sample random kernels $\\{k_r^l\\}_{l = 1}^L$ via MC.", + "4 for $l\\gets 0,1,\\ldots ,L$ do" + ], + "bbox": [ + 508, + 112, + 754, + 165 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{c c} \\mathbf {5} & \\omega^ {l} = \\frac {1}{\\mathcal {L} _ {M C M C} ^ {l}}, \\mathcal {L} _ {M C M C} ^ {l} = \\| \\boldsymbol {y} - (\\boldsymbol {x} ^ {t - 1} \\otimes \\boldsymbol {k} _ {r} ^ {l}) \\downarrow_ {s} \\| _ {F} ^ {2} + \\delta \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 508, + 165, + 849, + 185 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "6 end", + "bbox": [ + 508, + 185, + 544, + 194 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "7 $\\pmb{k}_p^t = \\frac{1}{L}\\sum_{l = 1}^L\\omega^l\\pmb{k}_r^t$", + "bbox": [ + 508, + 196, + 637, + 212 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "8 % Prior Kernel Estimation (PKE) Module", + "bbox": [ + 508, + 212, + 741, + 223 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\phi_ {k} ^ {t} = \\phi_ {k} ^ {t - 1} + \\frac {\\delta^ {2}}{2} \\frac {\\partial \\log p \\left(\\phi_ {k} ^ {t - 1} | \\boldsymbol {x} ^ {t - 1} , \\boldsymbol {y}\\right)}{\\partial \\phi_ {k} ^ {t - 1}} + \\delta \\frac {\\partial \\log p \\left(\\phi_ {k} ^ {t - 1} | \\boldsymbol {k} _ {p} ^ {t}\\right)}{\\partial \\phi_ {k} ^ {t - 1}}\n$$\n", + "text_format": "latex", + "bbox": [ + 508, + 224, + 856, + 251 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "10 Output: $\\pmb{k}^t = \\mathrm{G}_k(\\phi_k^t)$ .", + "bbox": [ + 506, + 251, + 653, + 263 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/33ffb3936f668692c79d7b7e5149db888b139a49aa0a790243c3d47cf877110e.jpg", + "image_caption": [ + "Figure 3. The overview of our DKP-based BSR method." + ], + "image_footnote": [], + "bbox": [ + 504, + 275, + 887, + 386 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the existing image restoration approaches, including the untrained DIP model and off-the-shelf pre-trained diffusion model, which will be detailed in the next section.", + "bbox": [ + 498, + 429, + 890, + 473 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. DKP-based BSR Methods", + "text_level": 1, + "bbox": [ + 500, + 484, + 740, + 501 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Pipeline", + "text_level": 1, + "bbox": [ + 500, + 510, + 598, + 526 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The overview of the proposed DKP-based BSR method is illustrated in Fig. 3. The DKP model (gray box), including RKS module (blue box), PKE module (lilac box), and IR model (red box) alternatively optimize the blur kernel and refine the HR image, respectively. For each iteration, the estimated HR image $\\boldsymbol{x}^{t-1}$ and LR image $\\boldsymbol{y}$ are first fed to RKS module $f_{\\mathrm{RKS}}$ to generate kernel prior", + "bbox": [ + 496, + 532, + 890, + 640 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {k} _ {p} ^ {t} = f _ {\\mathrm {R K S}} \\left(\\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right), \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 619, + 646, + 890, + 666 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\pmb{x}^{t - 1}$ denotes the estimated HR image from the last IR model output. Then, the kernel prior $\\pmb{k}_p^t$ will be assigned to the PKE module $f_{\\mathrm{PKE}}$ , which estimates kernel as follows,", + "bbox": [ + 498, + 670, + 890, + 715 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {k} ^ {t} = f _ {\\mathrm {P K E}} \\left(\\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}, \\boldsymbol {k} _ {p} ^ {t}\\right), \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 722, + 890, + 742 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\pmb{k}^t$ is the estimated kernel at the $t^{th}$ kernel estimation iteration, which will be assigned to the IR model. The $t^{th}$ HR image $\\pmb{x}^t$ can be estimated by the IR model as follows", + "bbox": [ + 498, + 748, + 890, + 792 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} ^ {t} = f _ {\\mathrm {I R}} \\left(\\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}, \\boldsymbol {k} ^ {t}\\right), \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 614, + 800, + 890, + 819 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $f_{\\mathrm{IR}}$ denotes the adopted IR model. In this paper, two representative IR models, DIP [44] and diffusion model [14], are applied to evaluate the DKP-based BSR solutions, referring to DIP-DKP and Diff-DKP, which are introduced in the sequel.", + "bbox": [ + 496, + 825, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "26049", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2: The proposed DIP-DKP." + ], + "code_body": "1 Given: $y,\\phi_x^0,\\phi_{DKP}^0,x^0 = G_x(\\phi_x^0)$ \n2 for $t\\gets 0,I,\\dots,T - I$ do \n3 $\\%$ DKP-based kernel estimation stage \n4 $\\phi_{DKP}^{t + 1} = \\phi_{DKP}^{t} + \\frac{\\delta^{2}}{2}\\frac{\\partial\\log p(\\phi_{DKP}^{t}|x^{t},y)}{\\partial\\phi_{DKP}^{t}} +\\delta \\frac{\\partial\\log p(\\phi_{DKP}^{t}|k_{p}^{t})}{\\partial\\phi_{DKP}^{t}}$ \n5 $\\pmb {k}^{t + 1} = \\mathrm{G}_{DKP}(\\phi_{DKP}^{t + 1})$ \n6 $\\%$ DIP-based image restoration stage \n7 $\\phi_x^{t + 1} = \\phi_x^t +\\gamma_x^t\\frac{\\partial\\log p(\\phi_x^t|y,k^t)}{\\partial\\phi_x^t}$ \n8 $\\pmb{x}^{t + 1} = \\mathbf{G}_{\\pmb{x}}(\\pmb{\\phi}_{\\pmb{x}}^{t + 1})$ \n9 end \n10 Output: $x^T,k^T$", + "bbox": [ + 81, + 111, + 467, + 282 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. The proposed DIP-DKP", + "text_level": 1, + "bbox": [ + 76, + 311, + 297, + 329 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "DIP-based Image Restoration. DIP [44] is designed for capturing low-level image statistics, and estimates HR image $\\boldsymbol{x} = \\mathrm{G}_{\\boldsymbol{x}}(\\boldsymbol{z}_{\\boldsymbol{x}}, \\phi_{\\boldsymbol{x}})$ from a fixed random noise input $z_{x}$ (we omit $z_{x}$ in the rest of this paper for demonstration convenience). A typical formulation of DIP-based BSR methods [25, 34] is given as follows", + "bbox": [ + 75, + 335, + 468, + 428 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{ \\begin{array}{l} \\phi_ {x} ^ {*}, \\phi_ {k} ^ {*} = \\underset {\\phi_ {x}, \\phi_ {k}} {\\arg \\min } \\| \\boldsymbol {y} - \\\\ \\quad \\left(\\mathrm {G} _ {\\boldsymbol {x}} \\left(\\phi_ {\\boldsymbol {x}}\\right) \\otimes \\mathrm {G} _ {\\boldsymbol {k}} \\left(\\phi_ {\\boldsymbol {k}}\\right)\\right) \\downarrow_ {s} \\| _ {F} ^ {2}, \\\\ \\boldsymbol {x} ^ {*} = \\mathrm {G} _ {\\boldsymbol {x}} \\left(\\phi_ {\\boldsymbol {x}} ^ {*}\\right), \\boldsymbol {k} ^ {*} = \\mathrm {G} _ {\\boldsymbol {k}} \\left(\\phi_ {\\boldsymbol {k}} ^ {*}\\right). \\end{array} \\right. \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 438, + 468, + 503 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Double-DIP [34] and FKP-DIP [25] have exploited the effectiveness towards the BSR problem. However, the kernel prior of $\\mathrm{G}_k(\\phi_k^*)$ either adopts the untrained network with limited performances on the kernel estimation [34], or pre-trained kernel network, referring to FKP [25], that requests supervised training in advance. As shall be shown in experiments, pre-trained networks do not perform well to generate reasonable kernel estimations when the kernel categories vary.", + "bbox": [ + 75, + 512, + 468, + 648 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Proposed DIP-DKP. We replace the untrained or pretrained networks for kernel priors in the existing DIP-based alternative framework by the proposed DKP model, which we refer to as DIP-DKP. The objective of our proposed DIP-DKP can be formulated as follows,", + "bbox": [ + 75, + 648, + 468, + 723 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{ \\begin{array}{l} \\phi_ {\\boldsymbol {x}} ^ {*}, \\phi_ {D K P} ^ {*} = \\underset {\\phi_ {\\boldsymbol {x}}, \\phi_ {D K P}} {\\arg \\min } \\| \\boldsymbol {y} - \\left(\\mathrm {G} _ {D K P} \\left(\\phi_ {D K P}\\right) \\otimes \\right. \\\\ \\quad \\left. \\mathrm {G} _ {\\boldsymbol {x}} \\left(\\phi_ {\\boldsymbol {x}}\\right)\\right) \\downarrow_ {s} \\| _ {F} ^ {2} + \\| \\mathrm {G} _ {D K P} \\left(\\phi_ {D K P}\\right) - \\boldsymbol {k} _ {p}) \\| _ {F} ^ {2}, \\\\ \\boldsymbol {x} ^ {*} = \\mathrm {G} _ {\\boldsymbol {x}} \\left(\\phi_ {\\boldsymbol {x}} ^ {*}\\right), \\boldsymbol {k} ^ {*} = \\mathrm {G} _ {D K P} \\left(\\phi_ {D K P} ^ {*}\\right), \\end{array} \\right. \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 88, + 733, + 468, + 800 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathrm{G}_{DKP}(\\phi_{DKP})$ is the kernel network of the proposed DKP model.", + "bbox": [ + 76, + 810, + 468, + 838 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The overall solution procedure of the proposed DIP-DKP is given in Algorithm 2. At each $t^{th}$ iteration, the kernel $\\pmb{k}^t$ is estimated in the DKP-based kernel estimation stage and then is assigned to the DIP model for HR image", + "bbox": [ + 76, + 839, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 3: The proposed Diff-DKP." + ], + "code_body": "1 Given: $\\pmb{y}$ $\\phi_{DKP}^{T},S_{\\theta}$ and $\\pmb{x}_T\\sim \\mathcal{N}(\\pmb {0},\\pmb {I})$ \n2 for $t\\gets T,T - I,\\dots ,1$ do \n3 $\\%$ Diffusion-based image restoration process \n4 $\\pmb{x}_{0|t} = \\frac{1}{\\sqrt{\\alpha t}} (\\pmb{x}_t - \\mathcal{S}_\\theta (\\pmb{x}_t,t)\\sqrt{1 - \\overline{\\alpha}^t})$ \n5 $\\%$ DKP incorporated data consistency refinement \n6 $\\phi_{DKP}^{t - 1} = \\phi_{DKP}^{t} + \\frac{\\delta^{2}}{2}\\frac{\\partial\\log p(\\phi_{DKP}^{t}|\\pmb{x}_{0|t},\\pmb{y})}{\\partial\\phi_{DKP}^{t}} +\\delta \\frac{\\partial\\log p(\\phi_{DKP}^{t}|\\pmb{k}_{p}^{t})}{\\partial\\phi_{DKP}^{t}}$ \n7 $\\pmb{k}^{t - 1} = G_{DKP}(\\phi_{DKP}^{t - 1})$ \n8 $\\hat{\\pmb{x}}_{0|t} = \\pmb{x}_{0|t} + \\gamma_{\\pmb{x}}^{t}\\frac{\\partial\\log p(\\pmb{x}_{0|t}|\\pmb{y},\\pmb{k}^{t - 1})}{\\partial\\pmb{x}_{0|t}}$ \n9 $\\pmb{x}_{t - 1}\\sim p(\\pmb{x}_{t - 1}|\\pmb{x}_t,\\hat{\\pmb{x}}_{0|t})$ \n10 end \n11 Output: $\\pmb{x}_0,\\pmb{k}^0$", + "bbox": [ + 506, + 111, + 888, + 306 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "restoration in the forward propagation. In the back propagation, the parameters of DIP and DKP, i.e., $\\phi_{x}$ and $\\phi_{DKP}$ , are updated while solving the BSR problem via an unsupervised inference. With DKP, DIP-DKP realizes an adaptive kernel learning along the convergence trajectory of the BSR objective function, enabling accurate and dynamic kernel estimation. Therefore, without expensive labeled data and long training time in advance, DIP-DKP can estimate HR image and blur kernel simultaneously in a plug-and-play style.", + "bbox": [ + 496, + 337, + 890, + 488 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3.Diff-DKP", + "text_level": 1, + "bbox": [ + 500, + 498, + 609, + 513 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Original DDPM Inference Process. Denoising diffusion probabilistic models (DDPM) [14] defines a T-step forward process to add noise to data and a T-step reverse process to restore desired data from the noise. When an off-the-shelf DDPM $S_{\\theta}$ is applied to solve image restoration problem, the reverse process is implemented as inference process to estimate the high quality image as follows,", + "bbox": [ + 496, + 522, + 890, + 628 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{ \\begin{array}{l} \\boldsymbol {x} _ {0 | t} = \\frac {1}{\\sqrt {\\bar {\\alpha} ^ {t}}} \\left(\\boldsymbol {x} _ {t} - S _ {\\boldsymbol {\\theta}} \\left(\\boldsymbol {x} _ {t}, t\\right) \\sqrt {1 - \\bar {\\alpha} ^ {t}}\\right), \\\\ \\boldsymbol {x} _ {t - 1} \\sim p \\left(\\boldsymbol {x} _ {t - 1} \\mid \\boldsymbol {x} _ {t}, \\boldsymbol {x} _ {0 | t}\\right), \\end{array} \\right. \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 638, + 890, + 690 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\pmb{x}_{0|t}$ denotes the estimated HR image $\\pmb{x}_0$ at the $t^{th}$ step, and $\\overline{\\alpha}^t$ is the hyper-parameter. To ensure that HR images $\\pmb{x}_0 \\sim q(\\pmb{x})$ can be reconstructed from random noise $\\pmb{x}_T \\sim \\mathcal{N}(\\pmb{0},\\pmb{I})$ , the existing methods typically re-train [38] or fine-tune [50] the DDPM model via supervised learning on LR-HR datasets, or provide ground truth kernel [46] to enroll task-specific knowledge for convergence guarantee. However, the performance of DDPM is unstable, even when trained by a large number of labeled dataset.", + "bbox": [ + 496, + 703, + 890, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Proposed Diff-DKP. The instability of DDPM mainly comes from the training process that involves multiple image processing tasks. In this case, the off-the-shelf diffusion model cannot concentrate on BSR objective, thus leading to", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "26050", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "image distortion and content mismatch. To alleviate this issue, the proposed Diff-DKP incorporates the DKP model to provide task-specific data-consistency knowledge on the basis of the vanilla DDPM reverse iterations. Specifically, an external DKP incorporated data consistency refinement of $\\pmb{x}_{0|t}$ is inserted between (21) and (22), given by", + "bbox": [ + 75, + 90, + 472, + 183 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {x}} _ {0 | t} = \\boldsymbol {x} _ {0 | t} + \\gamma_ {\\boldsymbol {x}} ^ {t} \\frac {\\partial \\log p (\\boldsymbol {x} _ {0 | t} | \\boldsymbol {y} , \\boldsymbol {k} ^ {t})}{\\partial \\boldsymbol {x} _ {0 | t}}, \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 148, + 190, + 468, + 224 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\gamma_{\\pmb{x}}^{t}$ is the update step, and", + "bbox": [ + 76, + 233, + 294, + 250 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\log p \\left(\\boldsymbol {x} _ {0 | t} \\mid \\boldsymbol {y}, \\boldsymbol {k} ^ {t}\\right) = - \\| \\boldsymbol {y} - \\left(\\boldsymbol {x} _ {0 | t} \\otimes \\boldsymbol {k} ^ {t}\\right) \\downarrow_ {s} \\| _ {F} ^ {2}, \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 256, + 468, + 273 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "which enables the inference process to converge to the right direction along with the data-consistent solution.", + "bbox": [ + 75, + 281, + 468, + 311 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The overview of the Diff-DKP algorithm is presented in Algorithm 3. Let $t = T, T - 1, \\ldots, 1$ denote the index of the diffusion reverse step. At each step, the diffusion model first estimates the $x_{0|t}$ . Then, the DKP model adaptively generates kernel prior with respect to the latest $x_{0|t}$ , while $x_{0|t}$ is further updated with respect to the data consistency Eq. (24), thus, ensuring the inference process is underlying the BSR objective. It is noteworthy to point out that the parameters of the diffusion model are fixed and only the parameters of lightweight kernel estimator network are optimized in the inference process.", + "bbox": [ + 75, + 311, + 468, + 477 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this way, the off-the-shelf diffusion model plays the role of HR image estimator, while the estimated HR image is further refined by the BSR task specific prior knowledge, referring to Eq. (23). Different from those methods that incorporate prior knowledge of BSR task via supervised re-training/fine-tuning, Diff-DKP behaves a plug-and-play scheme, thus without data demands and training cost before implementation.", + "bbox": [ + 75, + 478, + 468, + 599 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 609, + 209, + 628 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 76, + 635, + 267, + 652 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Data Preparation. Following the widely adopted kernel assumption [25, 35, 45, 51], we conduct the experiments on anisotropic Gaussian kernels and motion kernels, which are shown in Fig. 4. The kernel sizes are set to $(4s + 3)\\times (4s + 3)$ . For the Gaussian kernel, the width ranges are set to $[0.175s, 2.5s]$ , and the rotation angle range is set to $[0,\\pi]$ , with a scale factor $s = 4$ , respectively. For the motion kernel, we adopt random motion kernel generation method proposed by [22], which simulates realistic and complex blur kernels from random trajectories. Detailed formulations of Gaussian and motion kernels are given in the supplementary material. We synthesize LR images with random kernels with respect to Eq. (1) for testing data based on five popular public benchmark datasets, including Set5 [4], Set14 [52], BSD100 [30], Urban100 [15] and RealSRSet [23]. We compare these kernels in terms of the peak", + "bbox": [ + 75, + 657, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "signal to noise ratio (PSNR), and compare HR images in terms of PSNR and structural similarity (SSIM) [47].", + "bbox": [ + 498, + 90, + 890, + 121 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison Methods. The proposed DIP-DKP and Diff-DKP are compared with existing baselines including: Double-DIP [34], DIP-FKP [25], DASR [45], BSRDM [51], DCLS [29], DARM [58] and DiffBSR [26]. Specifically, Double-DIP tends to provide kernel priors by training a FCN network only with respect to the LR image restoration error. DIP-FKP incorporates the FKP model as kernel prior which is pre-trained on kernel datasets. KernelGAN+ZSSR and DARM are self-supervised and train an intergenerative adversarial network (GAN) to estimate the blur kernel. BSRDM formulates an elaborate degradation modelling on noise and kernel as handcrafted priors. DASR is a representative end-to-end method that is pre-trained on DIV2K [1] and Flickr2K [43] HR image datasets. DiffBSR is fine-tuned on BSR labeled datasets before applied to estimate HR images.", + "bbox": [ + 496, + 122, + 892, + 364 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation and Hyper-parameters. The adopted kernel estimation network $\\mathbf{G}_k$ of PKE module in this paper is a three-layer fully-connected network (FCN). The adopted DIP model follows the original settings in [44], and the diffusion model is the vanilla version [14] that is trained on ImageNet [6]. The number of sampling times in the MCMC simulation $L$ is the only hyper-parameter in the proposed DKP model. The hyper-parameter tuning results are given in Table 1. It is explicit that the performance reaches equilibrium around $L \\in [4,8]$ . To balance the efficiency and effectiveness, we set $L = 5$ in this paper.", + "bbox": [ + 496, + 364, + 893, + 532 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Comparison with State-of-the-Arts", + "text_level": 1, + "bbox": [ + 500, + 545, + 803, + 561 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation on Gaussian Kernel Scenario. Quantitative evaluation results on four datasets with scale factors $s = 4$ are presented in the upper half part of Table 2. We can see that the proposed DIP-DKP and Diff-DKP achieve the second and the best results on all datasets. We note that DIP-DKP only realizes slightly higher performance than the existing state-of-the-art (SotA) methods, while Diff-DKP achieves significantly better performances. This recalls our demonstrations in Section 4: DIP-DKP is totally trained while solving from scratch, and the DKP model plays the role of providing better convergence guarantee. Diff-DKP utilizes the DKP model to guide the well-trained diffusion model with fruitful data priors to converge to BSR task for better HR image restoration performances. In Table 3, we further show that our DKP model achieves the accurate ker-", + "bbox": [ + 496, + 569, + 890, + 795 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e17616ecb84820f3c50d57a76c2eab77ff14b9093bb41a43ffdba1ebef265c94.jpg", + "image_caption": [ + "Figure 4. The visualization of the adopted blur kernels." + ], + "image_footnote": [], + "bbox": [ + 514, + 810, + 694, + 890 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/734751729182f53bed5f8588502ac7611eefe08dd77ef41b8154c2eaf7769b59.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 702, + 811, + 875, + 888 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "26051", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/2b850c0b06e72f0a6709d14b613e416cc47b5809173470775cd7b2293b4ed8e9.jpg", + "table_caption": [ + "Table 1. Average image PSNR performance of the proposed DIP-DKP and Diff-DKP on Set5 [4] on the Gaussian kernel scenario." + ], + "table_footnote": [], + "table_body": "
MethodsL=0L=2L=4L=6L=8L=10L=15
DIP-DKP (Ours)20.9927.1228.4428.5728.5228.2928.03
Diff-DKP (Ours)21.9728.9529.4029.4729.7629.6729.26
", + "bbox": [ + 192, + 109, + 777, + 148 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/e3bdeafbf2e62557ddf775f0bd155afbfa176997d3b2364d66433fbd9b07d19e.jpg", + "table_caption": [ + "Table 2. Average PSNR/SSIM of different methods on public datasets that are synthesized by the random Gaussian/Motion kernels with $s = 4$ . The best and second best results are highlighted in red and blue colors, respectively." + ], + "table_footnote": [], + "table_body": "
MethodKernelSet5 [4]Set14 [52]BSD100 [30]Urban100 [15]
Double-DIP [34]20.99/0.557818.31/0.442618.57/0.381518.15/0.4491
DASR [45]27.37/0.785925.43/0.659125.11/0.612922.88/0.6448
DIP-FKP [25]27.77/0.791425.65/0.676425.15/0.635422.89/0.6327
BSRDM [51]Gaussian kernel scenario27.81/0.802925.35/0.685925.61/0.652622.36/0.6601
DCLS [29]27.50/0.794825.68/0.663925.34/0.616922.92/0.6475
DiffBIR [26]25.15/0.646823.01/0.593523.88/0.558621.94/0.5657
DARM [58]26.25/0.681824.19/0.618724.29/0.589822.14/0.5967
DIP-DKP (Ours)28.03/0.803925.98/0.687825.66/0.653123.24/0.6644
Diff-DKP (Ours)29.44/0.859226.76/0.740026.63/0.705723.92/0.6875
Double-DIP [34]18.92/0.451020.41/0.484719.00/0.375715.42/0.2932
DASR [45]24.21/0.725224.16/0.614522.47/0.583620.24/0.5478
DIP-FKP [25]24.61/0.737124.21/0.622722.80/0.588020.33/0.5572
BSRDM [51]Motion kernel scenario24.01/0.709823.56/0.600922.62/0.579120.40/0.5494
DCLS [29]24.78/0.732324.38/0.621122.74/0.592220.49/0.5534
DiffBIR [26]23.63/0.636723.59/0.604322.35/0.578420.14/0.5347
DARM [58]24.23/0.726923.95/0.629422.48/0.583020.58/0.5595
DIP-DKP (Ours)25.30/0.741724.52/0.643423.02/0.613621.24/0.5667
Diff-DKP (Ours)28.74/0.831326.03/0.671924.10/0.628722.26/0.5862
", + "bbox": [ + 145, + 191, + 823, + 383 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "nel estimation with higher kernel PSNR.", + "bbox": [ + 76, + 400, + 346, + 415 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation on Motion Kernel Scenario. The lower half part of Table 2 shows the simulation results on the motion kernel scenario. The supervised learning methods, i.e., DASR and DiffBIR, are re-trained/fine-tuned on motion kernel degraded HR image datasets. DIP-FKP is retrained on the motion kernel dataset. The proposed DIP-DKP and Diff-DKP show significantly better performance on the motion kernel scenario, which validates that the proposed DKP model has good generalization ability towards different kernel categories. Specifically, Diff-DKP presents stable PSNR/SSIM scores when being applied to estimate motion kernels, while the rest suffer significant performance drop. This indicates that the proposed DKP is expected to handle kernel varying tasks.", + "bbox": [ + 75, + 431, + 470, + 643 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Visual Results. The visual results of different methods on synthetic and real-world images are shown in Fig. 5. We can see that 1) In the case of Gaussian kernel, all methods are capable of producing satisfactory deblurring results, while our DIP-DKP and Diff-DKP yield better results with more accurate kernel estimation. 2) In the case of motion kernel, certain distortion on the estimated kernel can be seen in FKP-DKP and BSRDM fail to estimate motion kernel. Meanwhile, our DIP-DKP and Diff-DKP achieve approximately accurate motion kernel estimation. 3) In the case of real image, both DIP-FKP and BSRDM estimate the Gaussian-like kernels, whereas our DIP-DKP and Diff-DKP tend to estimate the non-Gaussian kernels. This verifies that an adaptive and flexible kernel estimation discipline is learned by our DKP model, which may fit the real-world applications better.", + "bbox": [ + 75, + 657, + 468, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/2a0d53fdf8594459ed1cf2f8db9041551becbd578049f0131e8903190deedb58.jpg", + "table_caption": [ + "Table 3. Average PSNR/SSIM of images and PSNR of kernels on Set14 [52] with $s = 4$ . The best and second best results are highlighted in red and blue colors, respectively." + ], + "table_footnote": [], + "table_body": "
MethodKernelKernel PSNRImage PSNR/SSIM
DIP-DKP without RKS37.9218.77/0.4227
Diff-DKP without RKS40.9317.33/0.3408
Double-DIP [34]Gaussian50.6218.31/0.4426
DIP-FKP [25]kernel54.4625.65/0.6764
BSRDM [51]scenario55.3825.35/0.6859
DIP-DKP (Ours)56.2025.98/0.6878
Diff-DKP (Ours)56.7626.76/0.7400
DIP-DKP without RKS34.9218.19/0.4223
Diff-DKP without RKS34.7817.65/0.3513
Double-DIP [34]Motion35.5220.41/0.4847
DIP-FKP [25]kernel37.5224.21/0.6227
BSRDM [51]scenario37.8823.56/0.6009
DIP-DKP (Ours)39.3324.52/0.6434
Diff-DKP (Ours)40.3726.03/0.6719
", + "bbox": [ + 509, + 444, + 883, + 609 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3. Ablation Studies", + "text_level": 1, + "bbox": [ + 500, + 632, + 663, + 646 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation study of RKS module. The ablation studies are carried on the MCMC sampling of kernel priors. \"Without RKS\" denotes that the adopted DKP updates the kernel network only by the data-consistency term without the learned kernel prior. In Fig. 6 (left), it can be seen that the estimated kernels without RKS have significant distortion, leading to remarkable PSNR drop of the estimated HR image, while DIP-DKP can estimate Gaussian kernels precisely with respect to the ground truth (with red frame). Fig. 6 (right) shows that the accurate motion kernel estimation no longer exists when the RKS module is absent. It is thus obvious that without the kernel prior learned from the MCMC process, the Diff-DKP fails to converge to a rational motion kernel estimation. The average kernel and image results are shown in Table 3. Without kernel prior learned from the RKS module, the kernel estimation performances of DKP-", + "bbox": [ + 496, + 657, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "26052", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/51381e48c7b82f12ffa64ca0f6ec5f1bf5b357e0570436240c9e19b7bea6feb5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 80, + 225, + 159 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6fe0a4fbb7052d95cf117db8fc3a770d279cfd562b59d9b4159bfa939140f05a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 227, + 80, + 328, + 159 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/93da8cdbaedea7e10231537944da49d9cba747400f7f03f6564760528175ae3a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 80, + 431, + 159 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fd453a0940f59143122da358bb162cc8e87bd01ce860b1da7493f6950ee3f68c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 434, + 80, + 535, + 159 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f5d8382d84cccc79b62efcfb64ee7224e2388c883d7311572724fcab0e44af20.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 537, + 80, + 638, + 159 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1f5ba65f5734f6709d5b9df45412ddb776b1b2ecf2168c4de1a0ac93e89b3e17.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 80, + 741, + 159 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/09a252daf9dc4b8900e2b9de6087a6ff342b922738b361011620a5119fc20b8e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 743, + 80, + 844, + 159 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f3a3cd032b95adefc909fee52f059087439b88452adc7657da88e042eebd6738.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 125, + 161, + 225, + 238 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ce3551ad7534d0ccc01bcc62ffc84f5e89bc1a14126b035e0575fd7c791ca54b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 227, + 161, + 328, + 238 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/52e589b8f5a7a0c6603398b67349f69cc93b1448ecadb586b5eee1d8cc1a16e2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 161, + 431, + 238 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/30812206585b82b02566959cc728f1b819eb7e8da33302746a414dc44b8463f3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 434, + 161, + 535, + 238 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/96145f05ad97ff295b0d4ff1f4c08dcc6a2c9ea864dfe2e7a6bb2dfec5c0aaac.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 537, + 161, + 638, + 238 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e9e107ff03f3c40c994c36439711e4d805a8958f16c19221524f8b55e6956fb6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 161, + 741, + 238 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/29d07cf3a2031de882d1c1f0fcc0dae3b2ca02b5858e19a44cb2d9d95d15df70.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 743, + 161, + 844, + 238 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/197ced1e62b01a2b9e9887f25a208d29d992ee2b50a7d3912a9f3005e5eb4a76.jpg", + "image_caption": [ + "$\\mathrm{LR}(\\times 4)$" + ], + "image_footnote": [], + "bbox": [ + 125, + 241, + 225, + 318 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ec6891048cb6675c0de44197b91d426539a2615e80fea325230bcc13a897df73.jpg", + "image_caption": [ + "DASR", + "27.42/27.74/-" + ], + "image_footnote": [], + "bbox": [ + 227, + 241, + 328, + 318 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f04ff3c280e1a78fba329d678d5e36337c5003aadb5313bd385cefca4583cdc2.jpg", + "image_caption": [ + "DRAM", + "26.69/26.84/-" + ], + "image_footnote": [], + "bbox": [ + 331, + 241, + 431, + 318 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d2d71d944caebd8064bbe29f6c79c2982956524cf98e277970c2a3f431a21a18.jpg", + "image_caption": [ + "FKP-DIP", + "27.80/28.95/-" + ], + "image_footnote": [], + "bbox": [ + 434, + 241, + 535, + 318 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4a56c861d1cc9720ff0203c8caa3358c9f0550d2f626e41d21ffe6d577efbc19.jpg", + "image_caption": [ + "BSRDM", + "27.60/29.14/-", + "DIP-DKP(ours)" + ], + "image_footnote": [], + "bbox": [ + 537, + 241, + 638, + 318 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/02d0f327e96b0d90d885714b522364b12286274cb2ed518bcb489ed14a7a5a86.jpg", + "image_caption": [ + "28.07/29.45/-" + ], + "image_footnote": [], + "bbox": [ + 640, + 241, + 741, + 318 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5c6a086412b71b626a213b9aee06a0c66f89a5c22ea99350d6310c5fa404b291.jpg", + "image_caption": [ + "Diff-DKP(ours)", + "29.38/29.89/-" + ], + "image_footnote": [], + "bbox": [ + 743, + 241, + 844, + 318 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4e52cfe17c2e44f9daf2a30e95937eeefb07f88e212e3a0ed62a09e8263c4eae.jpg", + "image_caption": [ + "PSNR (dB)", + "(a) BSD100 \"040\"", + "Figure 6. The intermediate results of DIP-DKP, Diff-DKP and their no RKS module versions over iterations on two test images." + ], + "image_footnote": [], + "bbox": [ + 78, + 375, + 272, + 494 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/df228a524780145f17249603a2e92cd2ed8548707b36487d01ad17ba7aaf7fa7.jpg", + "image_caption": [ + "Figure 5. Visual results of different methods on public datasets for scale factor 4. Estimated/ground-truth kernels are shown on the top left.", + "(b) Set14 \"monarch\"" + ], + "image_footnote": [], + "bbox": [ + 272, + 375, + 467, + 494 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/d3a20a9b0e3a30cbceb0ce64d3be2524a080deeb4df58cdd7b8843025d10532b.jpg", + "table_caption": [ + "Table 4. The ablation of PKE module. (Set5, x4, image PSNR)" + ], + "table_footnote": [], + "table_body": "
Layers\\Units10100100010000
113.7523.5728.9328.24
313.6128.9728.4828.35
513.3028.8128.5226.65
713.8628.3028.5427.93
", + "bbox": [ + 83, + 563, + 460, + 614 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "based BSR methods have a significant performance drop, leading to poor image restoration performance as well.", + "bbox": [ + 75, + 625, + 467, + 654 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation study of PKE module. Since PKE essentially estimates blur kernels on the basis of the random kernel priors and LR observations, thus it is indispensable and we conduct ablation study on the different structures of kernel network in PKE module in Table 4. We find that the kernel network performs well when it has 3-7 layers with 100-1000 units in each layer. This indicates that the kernel network has good generalization-ability for the structure without the necessity of elaborately designing the network.", + "bbox": [ + 75, + 655, + 468, + 790 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4. Model Size, Runtime and Memory Usage", + "text_level": 1, + "bbox": [ + 76, + 801, + 426, + 816 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The kernel network of our DKP model has a total of $562K$ parameters (FLOPs: $536K$ ) while Double-DIP and DIP-FKP have $641K$ parameters (FLOPs: $600K$ ) and $143K$ parameters (FLOPs: $178K$ ), respectively. The runtime and memory usage of our DIP-DKP on a GeForce RTX 3090", + "bbox": [ + 75, + 824, + 468, + 898 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "GPU for generating a HR image of size $512 \\times 512$ are about 92 seconds and 11GB memory, which is comparable with the Double-DIP (91 seconds and 11.2GB) and DIP-FKP (90 seconds and 10.6GB). As for Diff-DKP, the $512 \\times 512$ image needs to be divided into four $256 \\times 256$ images for restoration, which costs a total of 60 seconds and 4GB memory. Considering that our DIP-DKP and Diff-DKP are unsupervised and plug-and-play, it is reasonable to say that our methods have moderate computational costs.", + "bbox": [ + 496, + 378, + 890, + 515 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Due to the page limitations, more experimental results are given in the supplementary material.", + "bbox": [ + 498, + 518, + 890, + 547 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 568, + 617, + 583 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose a dynamic kernel prior (DKP) model to solve the BSR problem in an unsupervised and pre-training-free paradigm. DKP realizes the rational kernel prior learning from MCMC sampling on random kernel distributions, providing accurate kernel estimation and thus leading to better HR image restoration. DKP can be easily incorporated with existing image restoration model, such as DIP and diffusion model, by replacing their kernel modeling modules or adding as an external kernel prior generator. When applied to solve the BSR problem, DKP is trained while solving the task with respect to the LR image restoration error, enabling no training necessity and labeled data demands. Extensive experiments on Gaussian and motion kernel scenarios with synthetic LR images and real-world images validate that DKP-based methods improve the kernel estimation accuracy significantly and thus lead to superior BSR results. We believe that the concept of using a trainable sampling process to provide adaptive priors will lead to a new direction in solving low-level tasks, aiming to achieve superior performance with modest computational costs in the way of unsupervised inference.", + "bbox": [ + 496, + 595, + 890, + 886 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "26053", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 126-135, 2017. 6", + "[2] Dominique Bakry and Michel Émery. Diffusions hypercontractives. In Séminaire de Probabilités XIX 1983/84: Proceedings, pages 177-206. Springer, 2006. 4", + "[3] Sefi Bell-Kligler, Assaf Shocher, and Michal Irani. Blind super-resolution kernel estimation using an internal-gan. Advances in Neural Information Processing Systems, 32, 2019. 1, 2", + "[4] Marco Bevilacqua, Aline Roumy, Christine Guillemot, and Marie Line Alberi-Morel. Low-complexity single-image super-resolution based on nonnegative neighbor embedding. In British Machine Vision Conference, pages 135-1, 2012. 6, 7", + "[5] Hyungjin Chung, Jeongsol Kim, Michael T McCann, Marc L Klasky, and Jong Chul Ye. Diffusion posterior sampling for general noisy inverse problems. arXiv preprint arXiv:2209.14687, 2022. 1, 2", + "[6] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009. 6", + "[7] Chao Dong, Chen Change Loy, Kaiming He, and Xiaou Tang. Learning a deep convolutional network for image super-resolution. In European conference on computer vision, pages 184-199. Springer, 2014. 2", + "[8] Yangyi Dong, Xiaoyun Zhang, Zhixin Wang, Ya Zhang, Siheng Chen, and Yanfeng Wang. Unpaired face restoration via learnable cross-quality shift. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 667-675, 2022. 3", + "[9] Netalee Efrat, Daniel Glasner, Alexander Apartsin, Boaz Nadler, and Anat Levin. Accurate blur models vs. image priors in single image super-resolution. In Proceedings of the IEEE International Conference on Computer Vision, pages 2832-2839, 2013. 3", + "[10] Yosef Gandelsman, Assaf Shocher, and Michal Irani. \"double-dip\": Unsupervised image decomposition via coupled deep-image-priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11026-11035, 2019. 2", + "[11] Daniel Glasner, Shai Bagon, and Michal Irani. Superresolution from a single image. In 2009 IEEE 12th international conference on computer vision, pages 349-356. IEEE, 2009. 2", + "[12] Jinjin Gu, Hannan Lu, Wangmeng Zuo, and Chao Dong. Blind super-resolution with iterative kernel correction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1604-1613, 2019. 1, 2, 3", + "[13] Lanqing Guo, Chong Wang, Wenhan Yang, Siyu Huang, Yufei Wang, Hanspeter Pfister, and Bihan Wen. Shadowdiffusion: When degradation prior meets diffusion model for" + ], + "bbox": [ + 78, + 114, + 468, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "shadow removal. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14049-14058, 2023. 1", + "[14] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2, 3, 4, 5, 6", + "[15] Jia-Bin Huang, Abhishek Singh, and Narendra Ahuja. Single image super-resolution from transformed self-exemplars. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5197-5206, 2015. 6, 7", + "[16] Yan Huang, Shang Li, Liang Wang, Tieniu Tan, et al. Unfolding the alternating optimization for blind super resolution. Advances in Neural Information Processing Systems, 33:5632-5643, 2020. 1, 2", + "[17] Meiguang Jin, Stefan Roth, and Paolo Favaro. Normalized blind deconvolution. In Proceedings of the European Conference on Computer Vision (ECCV), pages 668-684, 2018. 1", + "[18] Jiwon Kim, Jung Kwon Lee, and Kyoung Mu Lee. Accurate image super-resolution using very deep convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1646-1654, 2016. 2", + "[19] Kwang In Kim and Younghee Kwon. Single-image superresolution using sparse regression and natural image prior. IEEE transactions on pattern analysis and machine intelligence, 32(6):1127-1133, 2010. 1, 2", + "[20] Soo Ye Kim, Hyeonjun Sim, and Munchurl Kim. Koalanet: Blind super-resolution using kernel-oriented adaptive local adjustment. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10611-10620, 2021. 1, 2", + "[21] Dilip Krishnan and Rob Fergus. Fast image deconvolution using hyper-laplacian priors. Advances in neural information processing systems, 22, 2009. 1, 2", + "[22] Orest Kupyn, Volodymyr Budzan, Mykola Mykhailych, Dmytro Mishkin, and Jií Matas. Deblurgan: Blind motion deblurring using conditional adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8183-8192, 2018. 6", + "[23] Yuelong Li, Mohammad Tofighi, Junyi Geng, Vishal Monga, and Yonina C Eldar. Efficient and interpretable deep blind image deblurring via algorithm unrolling. IEEE Transactions on Computational Imaging, 6:666-681, 2020. 6", + "[24] Yawei Li, Yuchen Fan, Xiaoyu Xiang, Denis Demandolx, Rakesh Ranjan, Radu Timofte, and Luc Van Gool. Efficient and explicit modelling of image hierarchies for image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18278-18289, 2023. 2", + "[25] Jingyun Liang, Kai Zhang, Shuhang Gu, Luc Van Gool, and Radu Timofte. Flow-based kernel prior with application to blind super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10601-10610, 2021. 1, 2, 3, 5, 6, 7", + "[26] Xinqi Lin, Jingwen He, Ziyan Chen, Zhaoyang Lyu, Ben Fei, Bo Dai, Wanli Ouyang, Yu Qiao, and Chao Dong. Diffbir: Towards blind image restoration with generative diffusion prior. arXiv preprint arXiv:2308.15070, 2023. 2, 6, 7" + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "26054", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] Jie Liu, Wenjie Zhang, Yuting Tang, Jie Tang, and Gangshan Wu. Residual feature aggregation network for image superresolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2359-2368, 2020. 1", + "[28] Zhengxiong Luo, Yan Huang, Shang Li, Liang Wang, and Tieniu Tan. End-to-end alternating optimization for blind super resolution. arXiv preprint arXiv:2105.06878, 2021. 2", + "[29] Ziwei Luo, Haibin Huang, Lei Yu, Youwei Li, Haoqiang Fan, and Shuaicheng Liu. Deep constrained least squares for blind image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17642-17652, 2022. 1, 2, 6, 7", + "[30] David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, pages 416-423. IEEE, 2001. 6, 7", + "[31] Tomer Michaeli and Michal Irani. Nonparametric blind super-resolution. In Proceedings of the IEEE International Conference on Computer Vision, pages 945-952, 2013. 2", + "[32] Radford M Neal et al. Mcmc using hamiltonian dynamics. Handbook of markov chain monte carlo, 2(11):2, 2011. 4", + "[33] Daniele Perrone and Paolo Favaro. A clearer picture of total variation blind deconvolution. IEEE transactions on pattern analysis and machine intelligence, 38(6):1041-1055, 2015. 1, 2", + "[34] Dongwei Ren, Kai Zhang, Qilong Wang, Qinghua Hu, and Wangmeng Zuo. Neural blind deconvolution using deep priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3341-3350, 2020. 1, 2, 5, 6, 7", + "[35] Gernot Riegler, Samuel Schulter, Matthias Ruther, and Horst Bischof. Conditioned regression models for non-blind single image super-resolution. In Proceedings of the IEEE International Conference on Computer Vision, pages 522-530, 2015. 6", + "[36] Leonid I Rudin, Stanley Osher, and Emad Fatemi. Nonlinear total variation based noise removal algorithms. Physica D: nonlinear phenomena, 60(1-4):259-268, 1992. 2", + "[37] Marshall F Tappen Bryan C Russell and William T Freeman. Exploiting the sparse derivative prior for super-resolution and image demosaicing. In Proceedings of the Third International Workshop Statistical and Computational Theories of Vision, pages 1-28, 2003. 2", + "[38] Chitwan Sahara, Jonathan Ho, William Chan, Tim Salimans, David J Fleet, and Mohammad Norouzi. Image super-resolution via iterative refinement. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(4):4713-4726, 2022. 2, 5", + "[39] Assaf Shocher, Nadav Cohen, and Michal Irani. \"zero-shot\" super-resolution using deep internal learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3118-3126, 2018. 1, 2", + "[40] Jiaming Song, Arash Vahdat, Morteza Mardani, and Jan Kautz. Pseudoinverse-guided diffusion models for inverse" + ], + "bbox": [ + 78, + 90, + 470, + 901 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "problems. In International Conference on Learning Representations, 2022. 1, 2", + "[41] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. Advances in neural information processing systems, 32, 2019. 3", + "[42] Jian Sun, Zongben Xu, and Heung-Yeung Shum. Image super-resolution using gradient profile prior. In 2008 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 2", + "[43] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 114-125, 2017. 6", + "[44] Dmitry Ulyanov, Andrea Vedaldi, and Victor Lempitsky. Deep image prior. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 9446-9454, 2018. 2, 3, 4, 5, 6", + "[45] Longguang Wang, Yingqian Wang, Xiaoyu Dong, Qingyu Xu, Jungang Yang, Wei An, and Yulan Guo. Unsupervised degradation representation learning for blind superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10581-10590, 2021. 2, 6, 7", + "[46] Yinhuai Wang, Jiwen Yu, and Jian Zhang. Zero-shot image restoration using denoising diffusion null-space model. arXiv preprint arXiv:2212.00490, 2022. 2, 5", + "[47] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6", + "[48] Max Welling and Yee W Teh. Bayesian learning via stochastic gradient Langevin dynamics. In Proceedings of the 28th international conference on machine learning (ICML-11), pages 681–688, 2011. 4", + "[49] Yu-Syuan Xu, Shou-Yao Roy Tseng, Yu Tseng, Hsien-Kai Kuo, and Yi-Min Tsai. Unified dynamic convolutional network for super-resolution with variational degradations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12496-12505, 2020. 1, 2", + "[50] Xunpeng Yi, Han Xu, Hao Zhang, Linfeng Tang, and Jiayi Ma. Diff-retinex: Rethinking low-light image enhancement with a generative diffusion model. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12302-12311, 2023. 1, 2, 5", + "[51] Zongsheng Yue, Qian Zhao, Jianwen Xie, Lei Zhang, Deyu Meng, and Kwan-Yee K. Wong. Blind image superresolution with elaborate degradation modeling on noise and kernel. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2128-2138, 2022. 1, 2, 3, 4, 6, 7", + "[52] Roman Zeyde, Michael Elad, and Matan Protter. On single image scale-up using sparse-representations. In International conference on curves and surfaces, pages 711-730. Springer, 2010. 6, 7" + ], + "bbox": [ + 501, + 92, + 890, + 901 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "26055", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[53] Kai Zhang, Wangmeng Zuo, and Lei Zhang. Learning a single convolutional super-resolution network for multiple degradations. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3262-3271, 2018. 1, 2, 3", + "[54] Kai Zhang, Luc Van Gool, and Radu Timofte. Deep unfolding network for image super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3217-3226, 2020. 1", + "[55] Kai Zhang, Yawei Li, Wangmeng Zuo, Lei Zhang, Luc Van Gool, and Radu Timofte. Plug-and-play image restoration with deep denoiser prior. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(10):6360-6376, 2021. 3", + "[56] Yulun Zhang, Kunpeng Li, Kai Li, Lichen Wang, Bineng Zhong, and Yun Fu. Image super-resolution using very deep residual channel attention networks. In Proceedings of the European conference on computer vision (ECCV), pages 286-301, 2018. 1, 2", + "[57] Zixiang Zhao, Haowen Bai, Yuzhhi Zhu, Jiangshe Zhang, Shuang Xu, Yulun Zhang, Kai Zhang, Deyu Meng, Radu Timofte, and Luc Van Gool. Ddfm: denoising diffusion model for multi-modality image fusion. arXiv preprint arXiv:2303.06840, 2023.1, 2", + "[58] Hongyang Zhou, Xiaobin Zhu, Jianqing Zhu, Zheng Han, Shi-Xue Zhang, Jingyan Qin, and Xu-Cheng Yin. Learning correction filter via degradation-adaptive regression for blind single image super-resolution. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12365-12375, 2023. 2, 6, 7", + "[59] Yuanzhi Zhu, Kai Zhang, Jingyun Liang, Jiezhang Cao, Bihan Wen, Radu Timofte, and Luc Van Gool. Denoising diffusion models for plug-and-play image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1219-1229, 2023. 1, 2" + ], + "bbox": [ + 78, + 90, + 470, + 584 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "26056", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/eb377d27-ee40-49e4-9796-048cc8e1c35d_model.json b/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/eb377d27-ee40-49e4-9796-048cc8e1c35d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a54afd4b47cd5f2249d6b8f6c7daf4a290708119 --- /dev/null +++ b/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/eb377d27-ee40-49e4-9796-048cc8e1c35d_model.json @@ -0,0 +1,2961 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.131, + 0.89, + 0.153 + ], + "angle": 0, + "content": "A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.18, + 0.335, + 0.218 + ], + "angle": 0, + "content": "Zhixiong Yang1 \nShuanghui Zhang1" + }, + { + "type": "text", + "bbox": [ + 0.346, + 0.181, + 0.472, + 0.216 + ], + "angle": 0, + "content": "Jingyuan Xia\\(^{1,*}\\) \nZhen Liu" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.181, + 0.614, + 0.216 + ], + "angle": 0, + "content": "Shengxi Li² \nYaowen Fu¹" + }, + { + "type": "text", + "bbox": [ + 0.657, + 0.181, + 0.796, + 0.218 + ], + "angle": 0, + "content": "Xinghua Huang\\(^{1}\\) Yongxiang Liu\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.233, + 0.871, + 0.287 + ], + "angle": 0, + "content": "\\(^{1}\\)College of Electronic Engineering, National University of Defense Technology, Changsha, China \n\\(^{2}\\)College of Electronic Engineering, Beihang University, Beijing, China \nyzx21@nudt.edu.cn, j.xia10@nudt.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.322, + 0.314, + 0.339 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.354, + 0.474, + 0.761 + ], + "angle": 0, + "content": "Deep learning-based methods have achieved significant successes on solving the blind super-resolution (BSR) problem. However, most of them request supervised pretraining on labelled datasets. This paper proposes an unsupervised kernel estimation model, named dynamic kernel prior (DKP), to realize an unsupervised and pre-training-free learning-based algorithm for solving the BSR problem. DKP can adaptively learn dynamic kernel priors to realize real-time kernel estimation, and thereby enables superior HR image restoration performances. This is achieved by a Markov chain Monte Carlo sampling process on random kernel distributions. The learned kernel prior is then assigned to optimize a blur kernel estimation network, which entails a network-based Langevin dynamic optimization strategy. These two techniques ensure the accuracy of the kernel estimation. DKP can be easily used to replace the kernel estimation models in the existing methods, such as Double-DIP and FKP-DIP, or be added to the off-the-shelf image restoration model, such as diffusion model. In this paper, we incorporate our DKP model with DIP and diffusion model, referring to DIP-DKP and Diff-DKP, for validations. Extensive simulations on Gaussian and motion kernel scenarios demonstrate that the proposed DKP model can significantly improve the kernel estimation with comparable runtime and memory usage, leading to state-of-the-art BSR results. The code is available at https://github.com/XYLGroup/DKP." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.789, + 0.208, + 0.804 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.814, + 0.47, + 0.845 + ], + "angle": 0, + "content": "Deep learning provides a new avenue for solving the blind super-resolution (BSR) problem, which aims to reconstruct" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.852, + 0.469, + 0.9 + ], + "angle": 0, + "content": "Zhixiong Yang and Jingyuan Xia contributed equally to this work (*Corresponding author: Jingyuan Xia). This work is supported by National Natural Science Foundation of China, projects 61921001, 62131020, 62322121 and 62171448, and the NSFDYS of Hunan 2022J110067." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.323, + 0.895, + 0.49 + ], + "angle": 0, + "content": "high-resolution (HR) images from the low-resolution (LR) observations with unknown blur kernels, and is known to be highly non-convex and ill-posed. To alleviate the non-convexity and ill-posedness, most of learning-based BSR methods incorporate image priors via supervised learning based on paired LR-HR samples. However, pre-defined labeled training datasets are expensive, time-consuming, and even not feasible in specific scenarios, such as for high speed targets (e.g., satellites, aircraft) and medical images (e.g., beating heart). Thus, unsupervised learning-based solutions are highly demanded for BSR problem." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.491, + 0.893, + 0.853 + ], + "angle": 0, + "content": "The existing BSR methods can be roughly divided into model-based and learning-based strategies in terms of the priors adopted to provide performance guarantee. Model-based approaches [19, 21, 33, 51] typically adopt hand-designed and explicit constraints as regularizations on image properties, or expert knowledge of the blur kernel. Meanwhile, learning-based BSR methods [12, 16, 20, 27-29, 49, 53, 54, 56] aim to train an end-to-end network with paired LR-HR image samples to leverage data priors for boosting performances. However, these methods highly demand the data and need to undergo throughing pre-training before applications, leading to limited generalization ability towards varying blur kernels. To alleviate this issue, quite a few methods [5, 13, 40, 50, 57, 59] substitute the cumbersome training in advance by a well-trained diffusion model with significantly less fine-tuning samples in an off-the-shelf fashion. On the other side, a slice of works [3, 17, 25, 34, 39, 51] propose to replace the HR image data priors by kernel priors, which are more substantial, economical and efficient to be trained. However, both of these advances are underlying the supervised learning scheme with necessity of training on labeled datasets, still hindering the flexibility and generalization ability towards the BSR tasks with different kernels and unknown HR ground truths." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.895, + 0.901 + ], + "angle": 0, + "content": "In this paper, we propose a dynamic kernel prior (DKP) generation model that can be plug-in with the majority of image restoration (IR) models, to solve BSR problem in an" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "26046" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.513 + ], + "angle": 0, + "content": "unsupervised way. The proposed DKP model consists of two modules: random kernel sampling (RKS) module and prior kernel estimation (PKE) module. In the RKS module, a Markov Chain Monte Carlo (MCMC) sampling strategy on kernel distributions iteratively generates random kernels as kernel priors, which are then assigned to the PKE module. The PKE module is employed to estimate the blur kernel with respect to the kernel prior generated from the RKS module, the observed LR input and estimated HR image from the IR model. The estimated blur kernel is then assigned to an adopted IR model for the HR image restoration. Along with the alternative solving processes, the MCMC process in RKS module converges to a desired kernel distribution with respect to the LR observation and the estimated HR image to guarantee a rational kernel prior. Meanwhile, a network-based Langevin dynamics (NLD) paradigm is proposed to optimize the kernel estimator in our PKE module with respect to the RKS output kernel prior and the data consistency based on the LR image reconstruction error. The RKS module realizes an unsupervised kernel prior learning. The PKE module achieves promising kernel estimation via the NLD update scheme, which further alleviates the non-convexity and ill-posedness in the view of optimization strategy. In this way, the DKP model is capable of providing the plug-and-play kernel estimation without training in advance on paired LR-HR samples, and is flexible to be applied to the existing IR models for solving the BSR problem." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.517, + 0.473, + 0.743 + ], + "angle": 0, + "content": "Two applications are proposed to validate the feasibility and performance of our DKP model: deep image prior (DIP) [44] and diffusion model [14] adopted as the IR model, referring to DIP-DKP and Diff-DKP, respectively. For the DIP-DKP, we simultaneously optimize the parameters of DIP and DKP models from scratch during the alternative solution process. For the Diff-DKP, the adopted diffusion model is off-the-shelf from [14] and is applied as the fixed HR image restorer. The DKP model is optimized from scratch as well. Extensive simulation results show that the DIP-DKP achieves comparable performance than the existing methods, while the Diff-DKP achieves the state-of-the-art performance in both of Gaussian and motion kernel scenarios. The main contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.75, + 0.47, + 0.81 + ], + "angle": 0, + "content": "- The RKS module is proposed to generate a rational kernel prior from the MCMC sampling on random kernel distributions. This way, an unsupervised kernel prior learning is achieved to substitute the pre-training phase." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.811, + 0.469, + 0.87 + ], + "angle": 0, + "content": "- In PKE module, the NLD is proposed to optimize the kernel estimator, ensuring good convergence and concise estimation of the blur kernel from the perspective of optimization strategy." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "- The proposed DKP model enjoys the ease use on the popular IR models without the necessity of pre-training/re" + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.75, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.894, + 0.151 + ], + "angle": 0, + "content": "training towards different scenarios. The two applications, i.e., DIP-DKP and Diff-DKP, validate the state-of-the-art performance and excellent flexibility of our DKP model." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.165, + 0.642, + 0.181 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.19, + 0.895, + 0.766 + ], + "angle": 0, + "content": "To alleviate the non-convexity and ill-posedness, early model-based approaches [11, 31, 33, 37] typically construct image priors in explicit formulations, such as the total variation (TV) [36], gradient profile [42], hyper-Laplacian [21] and sparsity [19]. In contrast, learning-based methods [7, 12, 16–18, 20, 28, 29, 45, 49, 53, 56] typically train an end-to-end network on labelled image samples to incorporate data priors. Wang et al. [45] proposed a CNN-based deep network with degradation feature representation module to learn image degradation feature from supervised training on paired LR-HR images. Li et al. [24] proposed a transformer network to learn multi-scale image feature via self-attention mechanisms. To reduce the high training costs of time and data, recent advances [5, 38, 40, 46, 50, 57, 59] are proposed to solve BSR problem by an off-the-shelf diffusion model [14]. Lin et al. [26] proposed to partially fine-tune the parameters of diffusion model with significantly less labeled images. Wang et al. [46] further formulated a diffusion-based BSR algorithm that iteratively solves super-resolution tasks with the given kernel without re-training. Different from the end-to-end models that are trained on paired image samples, recent methods tend to resolve BSR problem via pre-training on kernel datasets [25] or pre-defined kernel priors [51]. An alternative framework between the kernel estimation and image restoration is typically adopted in these methods [3, 10, 12, 39, 44, 58], such as double-deep image prior (Double-DIP) [34]. On the basis of this framework, Liang et al. [25] established a flow-based kernel prior (FKP) network that is pre-trained on labeled kernels to enroll kernel priors while the HR image is estimated by DIP network in an online fashion. Yue et al. [51] proposed a hand-crafted kernel prior model to improve the robustness towards the Gaussian kernel scenario. Despite the fact that these methods approximately bring down the data requirements and training costs, the necessity of training in advances or hand-crafted design still limits the flexibility and generalization ability towards the varying kernel scenarios (Gaussian and motion) without ground truths." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.777, + 0.771, + 0.794 + ], + "angle": 0, + "content": "3. Dynamic Kernel Prior (DKP)" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.803, + 0.892, + 0.833 + ], + "angle": 0, + "content": "Problem Formulation. The degradation model of BSR problem is commonly expressed as follows," + }, + { + "type": "equation", + "bbox": [ + 0.624, + 0.845, + 0.892, + 0.86 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {y} = (\\boldsymbol {x} \\otimes \\boldsymbol {k}) \\downarrow_ {s} + \\boldsymbol {n}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "where \\(y\\) denotes the LR image, \\(\\pmb{x}\\) denotes the HR image, \\(\\otimes\\) indicates the convolution operation, \\(\\downarrow_{s}\\) denotes the down" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "26047" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.117, + 0.083, + 0.434, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.235, + 0.47, + 0.292 + ], + "angle": 0, + "content": "Figure 1. The overview of the RKS module. The MCMC simulation can generate the random kernel \\( \\pmb{k}_p^t \\) from random kernel distributions \\( \\{\\pmb{k}_r^l\\}_{l=1}^L \\) as the kernel prior with respect to the current model parameters \\( \\pmb{x}^{t-1}, \\pmb{y} \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.308, + 0.47, + 0.355 + ], + "angle": 0, + "content": "sampling operation with scale factor \\( s \\), and \\( k \\) denotes the blur kernel. The BSR problem (1) can be formulated as a maximum a posteriori (MAP) problem:" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.362, + 0.469, + 0.385 + ], + "angle": 0, + "content": "\\[\n\\max _ {\\boldsymbol {x}, \\boldsymbol {k}} p (\\boldsymbol {y} | \\boldsymbol {x}, \\boldsymbol {k}) p (\\boldsymbol {x}) p (\\boldsymbol {k}), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.394, + 0.469, + 0.499 + ], + "angle": 0, + "content": "where \\(p(\\boldsymbol{y}|\\boldsymbol{x},\\boldsymbol{k})\\) denotes the likelihood of the observed LR image \\(\\boldsymbol{y}\\), \\(p(\\boldsymbol{x})\\) and \\(p(\\boldsymbol{k})\\) are the HR image and kernel priors, respectively. Image priors [8, 14, 41, 44, 55] have been well-designed and fully-studied in the past decade. In contrast, researches on kernel priors \\(p(\\boldsymbol{k})\\) are in the ascendant, as kernel samples are less expensive to obtain and the training phase is more efficient [9, 12, 25, 51, 53]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.5, + 0.469, + 0.634 + ], + "angle": 0, + "content": "In this paper, we propose the DKP model, which comprises two modules: RKS and PKE. The RKS module is employed to generate rational kernel priors, which are assigned to the PKE module to support the estimation of blur kernel. Let \\( t = 1,2,\\dots ,T \\) denote the alternative iterations among these two modules and the adopted IR model, \\( \\pmb{k}^t \\) and \\( \\pmb{x}^{t} \\) denote the estimated blur kernel and HR image at the \\( t^{th} \\) iteration, respectively. The details of DKP model is given below." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.635, + 0.469, + 0.744 + ], + "angle": 0, + "content": "RKS module. At the \\(t^{th}\\) iteration, the RKS module plays the key role of generating a rational kernel prior \\(\\pmb{k}_p^t\\) from the MCMC simulation. The overview diagram is shown in Fig. 1. Let \\(p(\\pmb{k}_r|\\pmb{\\Sigma}_r)\\) denotes that the random kernel \\(\\pmb{k}_r\\) is conditioned by the latent variable \\(\\pmb{\\Sigma}_r\\), in which \\(p(\\pmb{\\Sigma}_r)\\) determines the category of blur kernel. Then the distribution of the kernel prior \\(\\pmb{k}_p^t\\) can be formulated as" + }, + { + "type": "equation", + "bbox": [ + 0.155, + 0.751, + 0.469, + 0.784 + ], + "angle": 0, + "content": "\\[\np \\left(\\boldsymbol {k} _ {p} ^ {t}\\right) = \\int_ {\\boldsymbol {\\Sigma} _ {r}} p \\left(\\boldsymbol {k} _ {r} \\mid \\boldsymbol {\\Sigma} _ {r}\\right) p \\left(\\boldsymbol {\\Sigma} _ {r}\\right) d \\boldsymbol {\\Sigma} _ {r}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.791, + 0.469, + 0.864 + ], + "angle": 0, + "content": "Here, \\(\\Sigma_r\\) is the parameter of kernel (e.g., the variance of Gaussian kernel or the length of motion kernel). It is not easy to sample all the possible \\(\\Sigma_r\\), and therefore, we convert (3) into the Monte Carlo simulation in the following form:" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.864, + 0.469, + 0.904 + ], + "angle": 0, + "content": "\\[\np \\left(\\boldsymbol {k} _ {p} ^ {t}\\right) \\approx \\sum_ {l = 1} ^ {L} p \\left(\\boldsymbol {k} _ {r} ^ {l} \\mid \\boldsymbol {\\Sigma} _ {r} ^ {l}\\right) \\boldsymbol {\\Sigma} _ {r} ^ {l}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.091, + 0.892, + 0.136 + ], + "angle": 0, + "content": "where \\(l\\) denotes the index of the Monte Carlo sampling, \\(\\pmb{\\Sigma}_r^l\\) denotes the \\(l^{th}\\) sampled latent variable, \\(\\pmb{k}_r^l\\) is the \\(l^{th}\\) sampled kernel, conditioned on the \\(\\pmb{\\Sigma}_r^l\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.137, + 0.892, + 0.197 + ], + "angle": 0, + "content": "To ensure the rationality of randomly generated kernels towards the BSR problem, as well as the optimization during the iterations, the MCMC simulation is proposed as follows," + }, + { + "type": "equation", + "bbox": [ + 0.521, + 0.209, + 0.892, + 0.25 + ], + "angle": 0, + "content": "\\[\np \\left(\\boldsymbol {k} _ {p} ^ {t} \\mid \\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right) \\approx \\sum_ {l = 1} ^ {L} p \\left(\\boldsymbol {k} _ {r} ^ {l} \\mid \\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right) p \\left(\\boldsymbol {k} _ {r} ^ {l} \\mid \\boldsymbol {\\Sigma} _ {r} ^ {l}\\right) \\boldsymbol {\\Sigma} _ {r} ^ {l}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.262, + 0.892, + 0.325 + ], + "angle": 0, + "content": "where \\(p(\\pmb{k}_r^l|\\pmb{x}^{t-1},\\pmb{y})\\) denotes the kernel weight \\(\\omega^l\\), that is conditioned on the observed LR image \\(\\pmb{y}\\) and the estimated HR image \\(\\pmb{x}^{t-1}\\) with respect to the MCMC loss \\(\\mathcal{L}_{MCMC}\\) in the following form" + }, + { + "type": "equation", + "bbox": [ + 0.588, + 0.336, + 0.892, + 0.369 + ], + "angle": 0, + "content": "\\[\n\\omega^ {l} = p \\left(\\boldsymbol {k} _ {r} ^ {l} \\mid \\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right) \\propto \\frac {1}{\\mathcal {L} _ {M C M C} ^ {l}}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.379, + 0.545, + 0.392 + ], + "angle": 0, + "content": "where" + }, + { + "type": "equation", + "bbox": [ + 0.561, + 0.405, + 0.892, + 0.424 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {M C M C} ^ {l} = \\left\\| \\boldsymbol {y} - \\left(\\boldsymbol {x} ^ {t - 1} \\otimes \\boldsymbol {k} _ {r} ^ {l}\\right) \\downarrow_ {s} \\right\\| _ {F} ^ {2} + \\delta , \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.435, + 0.892, + 0.464 + ], + "angle": 0, + "content": "\\(\\delta\\) is the noise to prevent \\(\\mathcal{L}_{MCMC}^l = 0\\). In this way, \\(\\pmb{k}_p^t\\) can be formulated as" + }, + { + "type": "equation", + "bbox": [ + 0.633, + 0.464, + 0.891, + 0.504 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {k} _ {p} ^ {t} = \\frac {1}{L} \\sum_ {l = 1} ^ {L} \\omega^ {l} \\boldsymbol {k} _ {r} ^ {l}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.513, + 0.892, + 0.542 + ], + "angle": 0, + "content": "The obtained \\( \\pmb{k}_p^t \\) is then assigned to the PKE module as a rational kernel prior, which will be introduced next." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.543, + 0.892, + 0.619 + ], + "angle": 0, + "content": "We note that the obtained kernel prior \\( k_{p}^{t} \\) is an expectation of \\( L \\) times sampling according to (4). The number of the sampling times \\( L \\) plays the role of annealing/tempering in MCMC simulations as a hyper-parameter. Details of the tuning on \\( L \\) will be given in Section 5.1." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.619, + 0.892, + 0.664 + ], + "angle": 0, + "content": "PKE module. In our DKP model, the PKE module is employed to estimate the blur kernel by a lightweight network \\( \\mathrm{G}_k \\) with parameters \\( \\phi_k \\) as follows" + }, + { + "type": "equation", + "bbox": [ + 0.645, + 0.676, + 0.891, + 0.694 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {k} ^ {t} = \\mathbf {G} _ {\\boldsymbol {k}} \\left(\\phi_ {\\boldsymbol {k}} ^ {t}\\right). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.706, + 0.891, + 0.752 + ], + "angle": 0, + "content": "The network \\( \\mathbf{G}_k \\) takes a fixed noise that is randomly initialized as input, and we neglect it for demonstration convenience as \\( \\phi_k^t \\) are the main variables." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.752, + 0.892, + 0.828 + ], + "angle": 0, + "content": "This kernel estimator \\( \\mathbf{G}_k \\) is optimized in the NLD paradigm with respect to the data-consistency term and kernel prior term, as shown in Fig. 2. The data-consistency term is computed by the LR image reconstruction error, which is given by" + }, + { + "type": "equation", + "bbox": [ + 0.499, + 0.838, + 0.892, + 0.87 + ], + "angle": 0, + "content": "\\[\n\\log p \\left(\\phi_ {\\boldsymbol {k}} ^ {t - 1} \\mid \\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right) = - \\| \\boldsymbol {y} - \\left(\\boldsymbol {x} ^ {t - 1} \\otimes \\mathbf {G} _ {\\boldsymbol {k}} \\left(\\phi_ {\\boldsymbol {k}} ^ {t - 1}\\right)\\right) \\downarrow_ {s} \\| _ {F} ^ {2}. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.903 + ], + "angle": 0, + "content": "The kernel prior term is computed based on the difference between the network-estimated kernel \\(\\mathbf{G}_k(\\boldsymbol{\\phi}_k^{t - 1})\\) and the" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "26048" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.082, + 0.465, + 0.228 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.234, + 0.47, + 0.29 + ], + "angle": 0, + "content": "Figure 2. The overview of the PKE module. The blur kernel \\( \\pmb{k}^t \\) is estimated by the network \\( G_{k} \\), whose parameters \\( \\phi_{k} \\) are updated by the kernel prior term from RKS module and data-consistency term, based on the LR image reconstruction error." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.305, + 0.469, + 0.334 + ], + "angle": 0, + "content": "random-sampled kernel \\( k_{p}^{t} \\) from the RKS module as follows," + }, + { + "type": "equation", + "bbox": [ + 0.134, + 0.341, + 0.469, + 0.361 + ], + "angle": 0, + "content": "\\[\n\\log p \\left(\\boldsymbol {\\phi} _ {\\boldsymbol {k}} ^ {t - 1} \\mid \\boldsymbol {k} _ {p} ^ {t}\\right) = - \\| \\mathrm {G} _ {\\boldsymbol {k}} \\left(\\boldsymbol {\\phi} _ {\\boldsymbol {k}} ^ {t - 1}\\right) - \\boldsymbol {k} _ {p} ^ {t} \\| _ {F} ^ {2}. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.367, + 0.468, + 0.397 + ], + "angle": 0, + "content": "By combining (10) and (11), the network parameters \\(\\phi_{k}^{t - 1}\\) can be updated as follows," + }, + { + "type": "equation", + "bbox": [ + 0.108, + 0.402, + 0.469, + 0.479 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\phi_ {\\boldsymbol {k}} ^ {t} = \\phi_ {\\boldsymbol {k}} ^ {t - 1} + \\frac {\\delta^ {2}}{2} \\frac {\\partial \\log p (\\phi_ {\\boldsymbol {k}} ^ {t - 1} | \\boldsymbol {x} ^ {t - 1} , \\boldsymbol {y})}{\\partial \\phi_ {\\boldsymbol {k}} ^ {t - 1}} \\\\ + \\delta \\frac {\\partial \\log p \\left(\\phi_ {\\boldsymbol {k}} ^ {t - 1} \\mid \\boldsymbol {k} _ {p} ^ {t}\\right)}{\\partial \\phi_ {\\boldsymbol {k}} ^ {t - 1}}, \\tag {12} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.483, + 0.469, + 0.528 + ], + "angle": 0, + "content": "where the second term is the data-consistency update, the third term is the additional update based on the random kernel \\( \\pmb{k}_p^t \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.528, + 0.469, + 0.648 + ], + "angle": 0, + "content": "It has been proved to be effective that the random noise-based disturbance can prevent being trapped into bad local modes for the variable update in Langevin dynamics [2, 32, 48, 51]. More details of Langevin dynamics refer to the supplementary material. At this stage, the random kernel sample from the RKS module can be regarded as the random \"noise\" for the \\(\\phi_{k}^{t - 1}\\) update. Eq. (12) can be reformulated as follows," + }, + { + "type": "equation", + "bbox": [ + 0.097, + 0.653, + 0.469, + 0.69 + ], + "angle": 0, + "content": "\\[\n\\phi_ {k} ^ {t} = \\phi_ {k} ^ {t - 1} + \\frac {\\delta^ {2}}{2} \\frac {\\partial \\log p \\left(\\phi_ {k} ^ {t - 1} \\mid \\boldsymbol {x} ^ {t - 1} , \\boldsymbol {y}\\right)}{\\partial \\phi_ {k} ^ {t - 1}} + \\zeta_ {\\phi_ {k}} ^ {t - 1}, \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.695, + 0.468, + 0.734 + ], + "angle": 0, + "content": "where \\(\\zeta_{\\phi_k}^{t - 1} = \\frac{\\partial\\log p(\\phi_k^{t - 1}|k_p^t)}{\\partial\\phi_k^{t - 1}}\\) denotes the parameters correlated Langevin dynamics disturbance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.469, + 0.901 + ], + "angle": 0, + "content": "The pipeline of our DKP at the \\(t^{th}\\) iteration is given in Algorithm 1. The whole DKP model is implemented in a plug-and-play style, in which training in advance is not required. Besides, the random kernels from the RKS module are self-adaptively sampled through the MCMC simulation, without the need of labeled training data. We should also note that the DKP model only brings negligible runtime and memory cost in applications, as the adopted network \\(\\mathbf{G}_k\\) is typically lightweight. This leads to high flexibility and low computational complexity. These three merits promise our DKP the convenience of being applied to" + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.095, + 0.788, + 0.11 + ], + "angle": 0, + "content": "Algorithm 1: The proposed DKP model." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.113, + 0.67, + 0.129 + ], + "angle": 0, + "content": "1 Given: \\(x^{t - 1},y\\) and \\(\\phi_{k}^{t - 1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.129, + 0.755, + 0.141 + ], + "angle": 0, + "content": "2 % Random Kernel Sampling (RKS) Module" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.142, + 0.745, + 0.155 + ], + "angle": 0, + "content": "3 Sample random kernels \\(\\{k_r^l\\}_{l = 1}^L\\) via MC." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.155, + 0.643, + 0.166 + ], + "angle": 0, + "content": "4 for \\(l\\gets 0,1,\\ldots ,L\\) do" + }, + { + "type": "list", + "bbox": [ + 0.509, + 0.113, + 0.755, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.509, + 0.166, + 0.851, + 0.186 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{c c} \\mathbf {5} & \\omega^ {l} = \\frac {1}{\\mathcal {L} _ {M C M C} ^ {l}}, \\mathcal {L} _ {M C M C} ^ {l} = \\| \\boldsymbol {y} - (\\boldsymbol {x} ^ {t - 1} \\otimes \\boldsymbol {k} _ {r} ^ {l}) \\downarrow_ {s} \\| _ {F} ^ {2} + \\delta \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.186, + 0.545, + 0.195 + ], + "angle": 0, + "content": "6 end" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.197, + 0.638, + 0.213 + ], + "angle": 0, + "content": "7 \\(\\pmb{k}_p^t = \\frac{1}{L}\\sum_{l = 1}^L\\omega^l\\pmb{k}_r^t\\)" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.213, + 0.743, + 0.224 + ], + "angle": 0, + "content": "8 % Prior Kernel Estimation (PKE) Module" + }, + { + "type": "equation", + "bbox": [ + 0.509, + 0.225, + 0.857, + 0.252 + ], + "angle": 0, + "content": "\\[\n\\phi_ {k} ^ {t} = \\phi_ {k} ^ {t - 1} + \\frac {\\delta^ {2}}{2} \\frac {\\partial \\log p \\left(\\phi_ {k} ^ {t - 1} | \\boldsymbol {x} ^ {t - 1} , \\boldsymbol {y}\\right)}{\\partial \\phi_ {k} ^ {t - 1}} + \\delta \\frac {\\partial \\log p \\left(\\phi_ {k} ^ {t - 1} | \\boldsymbol {k} _ {p} ^ {t}\\right)}{\\partial \\phi_ {k} ^ {t - 1}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.252, + 0.655, + 0.265 + ], + "angle": 0, + "content": "10 Output: \\( \\pmb{k}^t = \\mathrm{G}_k(\\phi_k^t) \\)." + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.276, + 0.888, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.527, + 0.4, + 0.863, + 0.413 + ], + "angle": 0, + "content": "Figure 3. The overview of our DKP-based BSR method." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.43, + 0.892, + 0.474 + ], + "angle": 0, + "content": "the existing image restoration approaches, including the untrained DIP model and off-the-shelf pre-trained diffusion model, which will be detailed in the next section." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.486, + 0.741, + 0.502 + ], + "angle": 0, + "content": "4. DKP-based BSR Methods" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.511, + 0.599, + 0.527 + ], + "angle": 0, + "content": "4.1. Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.534, + 0.892, + 0.641 + ], + "angle": 0, + "content": "The overview of the proposed DKP-based BSR method is illustrated in Fig. 3. The DKP model (gray box), including RKS module (blue box), PKE module (lilac box), and IR model (red box) alternatively optimize the blur kernel and refine the HR image, respectively. For each iteration, the estimated HR image \\( \\boldsymbol{x}^{t-1} \\) and LR image \\( \\boldsymbol{y} \\) are first fed to RKS module \\( f_{\\mathrm{RKS}} \\) to generate kernel prior" + }, + { + "type": "equation", + "bbox": [ + 0.62, + 0.647, + 0.891, + 0.667 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {k} _ {p} ^ {t} = f _ {\\mathrm {R K S}} \\left(\\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right), \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.671, + 0.892, + 0.717 + ], + "angle": 0, + "content": "where \\(\\pmb{x}^{t - 1}\\) denotes the estimated HR image from the last IR model output. Then, the kernel prior \\(\\pmb{k}_p^t\\) will be assigned to the PKE module \\(f_{\\mathrm{PKE}}\\), which estimates kernel as follows," + }, + { + "type": "equation", + "bbox": [ + 0.609, + 0.723, + 0.891, + 0.743 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {k} ^ {t} = f _ {\\mathrm {P K E}} \\left(\\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}, \\boldsymbol {k} _ {p} ^ {t}\\right), \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.749, + 0.891, + 0.794 + ], + "angle": 0, + "content": "where \\(\\pmb{k}^t\\) is the estimated kernel at the \\(t^{th}\\) kernel estimation iteration, which will be assigned to the IR model. The \\(t^{th}\\) HR image \\(\\pmb{x}^t\\) can be estimated by the IR model as follows" + }, + { + "type": "equation", + "bbox": [ + 0.615, + 0.801, + 0.891, + 0.82 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} ^ {t} = f _ {\\mathrm {I R}} \\left(\\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}, \\boldsymbol {k} ^ {t}\\right), \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.901 + ], + "angle": 0, + "content": "where \\( f_{\\mathrm{IR}} \\) denotes the adopted IR model. In this paper, two representative IR models, DIP [44] and diffusion model [14], are applied to evaluate the DKP-based BSR solutions, referring to DIP-DKP and Diff-DKP, which are introduced in the sequel." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "26049" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.089, + 0.096, + 0.351, + 0.11 + ], + "angle": 0, + "content": "Algorithm 2: The proposed DIP-DKP." + }, + { + "type": "algorithm", + "bbox": [ + 0.082, + 0.112, + 0.468, + 0.284 + ], + "angle": 0, + "content": "1 Given: \\(y,\\phi_x^0,\\phi_{DKP}^0,x^0 = G_x(\\phi_x^0)\\) \n2 for \\(t\\gets 0,I,\\dots,T - I\\) do \n3 \\(\\%\\) DKP-based kernel estimation stage \n4 \\(\\phi_{DKP}^{t + 1} = \\phi_{DKP}^{t} + \\frac{\\delta^{2}}{2}\\frac{\\partial\\log p(\\phi_{DKP}^{t}|x^{t},y)}{\\partial\\phi_{DKP}^{t}} +\\delta \\frac{\\partial\\log p(\\phi_{DKP}^{t}|k_{p}^{t})}{\\partial\\phi_{DKP}^{t}}\\) \n5 \\(\\pmb {k}^{t + 1} = \\mathrm{G}_{DKP}(\\phi_{DKP}^{t + 1})\\) \n6 \\(\\%\\) DIP-based image restoration stage \n7 \\(\\phi_x^{t + 1} = \\phi_x^t +\\gamma_x^t\\frac{\\partial\\log p(\\phi_x^t|y,k^t)}{\\partial\\phi_x^t}\\) \n8 \\(\\pmb{x}^{t + 1} = \\mathbf{G}_{\\pmb{x}}(\\pmb{\\phi}_{\\pmb{x}}^{t + 1})\\) \n9 end \n10 Output: \\(x^T,k^T\\)" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.313, + 0.299, + 0.33 + ], + "angle": 0, + "content": "4.2. The proposed DIP-DKP" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.337, + 0.47, + 0.429 + ], + "angle": 0, + "content": "DIP-based Image Restoration. DIP [44] is designed for capturing low-level image statistics, and estimates HR image \\( \\boldsymbol{x} = \\mathrm{G}_{\\boldsymbol{x}}(\\boldsymbol{z}_{\\boldsymbol{x}}, \\phi_{\\boldsymbol{x}}) \\) from a fixed random noise input \\( z_{x} \\) (we omit \\( z_{x} \\) in the rest of this paper for demonstration convenience). A typical formulation of DIP-based BSR methods [25, 34] is given as follows" + }, + { + "type": "equation", + "bbox": [ + 0.106, + 0.439, + 0.469, + 0.504 + ], + "angle": 0, + "content": "\\[\n\\left\\{ \\begin{array}{l} \\phi_ {x} ^ {*}, \\phi_ {k} ^ {*} = \\underset {\\phi_ {x}, \\phi_ {k}} {\\arg \\min } \\| \\boldsymbol {y} - \\\\ \\quad \\left(\\mathrm {G} _ {\\boldsymbol {x}} \\left(\\phi_ {\\boldsymbol {x}}\\right) \\otimes \\mathrm {G} _ {\\boldsymbol {k}} \\left(\\phi_ {\\boldsymbol {k}}\\right)\\right) \\downarrow_ {s} \\| _ {F} ^ {2}, \\\\ \\boldsymbol {x} ^ {*} = \\mathrm {G} _ {\\boldsymbol {x}} \\left(\\phi_ {\\boldsymbol {x}} ^ {*}\\right), \\boldsymbol {k} ^ {*} = \\mathrm {G} _ {\\boldsymbol {k}} \\left(\\phi_ {\\boldsymbol {k}} ^ {*}\\right). \\end{array} \\right. \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.513, + 0.469, + 0.649 + ], + "angle": 0, + "content": "Double-DIP [34] and FKP-DIP [25] have exploited the effectiveness towards the BSR problem. However, the kernel prior of \\( \\mathrm{G}_k(\\phi_k^*) \\) either adopts the untrained network with limited performances on the kernel estimation [34], or pre-trained kernel network, referring to FKP [25], that requests supervised training in advance. As shall be shown in experiments, pre-trained networks do not perform well to generate reasonable kernel estimations when the kernel categories vary." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.65, + 0.469, + 0.724 + ], + "angle": 0, + "content": "Proposed DIP-DKP. We replace the untrained or pretrained networks for kernel priors in the existing DIP-based alternative framework by the proposed DKP model, which we refer to as DIP-DKP. The objective of our proposed DIP-DKP can be formulated as follows," + }, + { + "type": "equation", + "bbox": [ + 0.089, + 0.734, + 0.469, + 0.801 + ], + "angle": 0, + "content": "\\[\n\\left\\{ \\begin{array}{l} \\phi_ {\\boldsymbol {x}} ^ {*}, \\phi_ {D K P} ^ {*} = \\underset {\\phi_ {\\boldsymbol {x}}, \\phi_ {D K P}} {\\arg \\min } \\| \\boldsymbol {y} - \\left(\\mathrm {G} _ {D K P} \\left(\\phi_ {D K P}\\right) \\otimes \\right. \\\\ \\quad \\left. \\mathrm {G} _ {\\boldsymbol {x}} \\left(\\phi_ {\\boldsymbol {x}}\\right)\\right) \\downarrow_ {s} \\| _ {F} ^ {2} + \\| \\mathrm {G} _ {D K P} \\left(\\phi_ {D K P}\\right) - \\boldsymbol {k} _ {p}) \\| _ {F} ^ {2}, \\\\ \\boldsymbol {x} ^ {*} = \\mathrm {G} _ {\\boldsymbol {x}} \\left(\\phi_ {\\boldsymbol {x}} ^ {*}\\right), \\boldsymbol {k} ^ {*} = \\mathrm {G} _ {D K P} \\left(\\phi_ {D K P} ^ {*}\\right), \\end{array} \\right. \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.811, + 0.469, + 0.839 + ], + "angle": 0, + "content": "where \\(\\mathrm{G}_{DKP}(\\phi_{DKP})\\) is the kernel network of the proposed DKP model." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.84, + 0.469, + 0.901 + ], + "angle": 0, + "content": "The overall solution procedure of the proposed DIP-DKP is given in Algorithm 2. At each \\(t^{th}\\) iteration, the kernel \\(\\pmb{k}^t\\) is estimated in the DKP-based kernel estimation stage and then is assigned to the DIP model for HR image" + }, + { + "type": "code_caption", + "bbox": [ + 0.51, + 0.095, + 0.774, + 0.11 + ], + "angle": 0, + "content": "Algorithm 3: The proposed Diff-DKP." + }, + { + "type": "algorithm", + "bbox": [ + 0.507, + 0.112, + 0.89, + 0.308 + ], + "angle": 0, + "content": "1 Given: \\(\\pmb{y}\\) \\(\\phi_{DKP}^{T},S_{\\theta}\\) and \\(\\pmb{x}_T\\sim \\mathcal{N}(\\pmb {0},\\pmb {I})\\) \n2 for \\(t\\gets T,T - I,\\dots ,1\\) do \n3 \\(\\%\\) Diffusion-based image restoration process \n4 \\(\\pmb{x}_{0|t} = \\frac{1}{\\sqrt{\\alpha t}} (\\pmb{x}_t - \\mathcal{S}_\\theta (\\pmb{x}_t,t)\\sqrt{1 - \\overline{\\alpha}^t})\\) \n5 \\(\\%\\) DKP incorporated data consistency refinement \n6 \\(\\phi_{DKP}^{t - 1} = \\phi_{DKP}^{t} + \\frac{\\delta^{2}}{2}\\frac{\\partial\\log p(\\phi_{DKP}^{t}|\\pmb{x}_{0|t},\\pmb{y})}{\\partial\\phi_{DKP}^{t}} +\\delta \\frac{\\partial\\log p(\\phi_{DKP}^{t}|\\pmb{k}_{p}^{t})}{\\partial\\phi_{DKP}^{t}}\\) \n7 \\(\\pmb{k}^{t - 1} = G_{DKP}(\\phi_{DKP}^{t - 1})\\) \n8 \\(\\hat{\\pmb{x}}_{0|t} = \\pmb{x}_{0|t} + \\gamma_{\\pmb{x}}^{t}\\frac{\\partial\\log p(\\pmb{x}_{0|t}|\\pmb{y},\\pmb{k}^{t - 1})}{\\partial\\pmb{x}_{0|t}}\\) \n9 \\(\\pmb{x}_{t - 1}\\sim p(\\pmb{x}_{t - 1}|\\pmb{x}_t,\\hat{\\pmb{x}}_{0|t})\\) \n10 end \n11 Output: \\(\\pmb{x}_0,\\pmb{k}^0\\)" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.338, + 0.892, + 0.489 + ], + "angle": 0, + "content": "restoration in the forward propagation. In the back propagation, the parameters of DIP and DKP, i.e., \\(\\phi_{x}\\) and \\(\\phi_{DKP}\\), are updated while solving the BSR problem via an unsupervised inference. With DKP, DIP-DKP realizes an adaptive kernel learning along the convergence trajectory of the BSR objective function, enabling accurate and dynamic kernel estimation. Therefore, without expensive labeled data and long training time in advance, DIP-DKP can estimate HR image and blur kernel simultaneously in a plug-and-play style." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.499, + 0.611, + 0.515 + ], + "angle": 0, + "content": "4.3.Diff-DKP" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.523, + 0.892, + 0.63 + ], + "angle": 0, + "content": "Original DDPM Inference Process. Denoising diffusion probabilistic models (DDPM) [14] defines a T-step forward process to add noise to data and a T-step reverse process to restore desired data from the noise. When an off-the-shelf DDPM \\( S_{\\theta} \\) is applied to solve image restoration problem, the reverse process is implemented as inference process to estimate the high quality image as follows," + }, + { + "type": "equation", + "bbox": [ + 0.556, + 0.64, + 0.891, + 0.691 + ], + "angle": 0, + "content": "\\[\n\\left\\{ \\begin{array}{l} \\boldsymbol {x} _ {0 | t} = \\frac {1}{\\sqrt {\\bar {\\alpha} ^ {t}}} \\left(\\boldsymbol {x} _ {t} - S _ {\\boldsymbol {\\theta}} \\left(\\boldsymbol {x} _ {t}, t\\right) \\sqrt {1 - \\bar {\\alpha} ^ {t}}\\right), \\\\ \\boldsymbol {x} _ {t - 1} \\sim p \\left(\\boldsymbol {x} _ {t - 1} \\mid \\boldsymbol {x} _ {t}, \\boldsymbol {x} _ {0 | t}\\right), \\end{array} \\right. \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.704, + 0.892, + 0.84 + ], + "angle": 0, + "content": "where \\( \\pmb{x}_{0|t} \\) denotes the estimated HR image \\( \\pmb{x}_0 \\) at the \\( t^{th} \\) step, and \\( \\overline{\\alpha}^t \\) is the hyper-parameter. To ensure that HR images \\( \\pmb{x}_0 \\sim q(\\pmb{x}) \\) can be reconstructed from random noise \\( \\pmb{x}_T \\sim \\mathcal{N}(\\pmb{0},\\pmb{I}) \\), the existing methods typically re-train [38] or fine-tune [50] the DDPM model via supervised learning on LR-HR datasets, or provide ground truth kernel [46] to enroll task-specific knowledge for convergence guarantee. However, the performance of DDPM is unstable, even when trained by a large number of labeled dataset." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Proposed Diff-DKP. The instability of DDPM mainly comes from the training process that involves multiple image processing tasks. In this case, the off-the-shelf diffusion model cannot concentrate on BSR objective, thus leading to" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "26050" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.184 + ], + "angle": 0, + "content": "image distortion and content mismatch. To alleviate this issue, the proposed Diff-DKP incorporates the DKP model to provide task-specific data-consistency knowledge on the basis of the vanilla DDPM reverse iterations. Specifically, an external DKP incorporated data consistency refinement of \\( \\pmb{x}_{0|t} \\) is inserted between (21) and (22), given by" + }, + { + "type": "equation", + "bbox": [ + 0.15, + 0.191, + 0.47, + 0.226 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {x}} _ {0 | t} = \\boldsymbol {x} _ {0 | t} + \\gamma_ {\\boldsymbol {x}} ^ {t} \\frac {\\partial \\log p (\\boldsymbol {x} _ {0 | t} | \\boldsymbol {y} , \\boldsymbol {k} ^ {t})}{\\partial \\boldsymbol {x} _ {0 | t}}, \\tag {23}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.234, + 0.295, + 0.25 + ], + "angle": 0, + "content": "where \\(\\gamma_{\\pmb{x}}^{t}\\) is the update step, and" + }, + { + "type": "equation", + "bbox": [ + 0.106, + 0.257, + 0.469, + 0.275 + ], + "angle": 0, + "content": "\\[\n\\log p \\left(\\boldsymbol {x} _ {0 | t} \\mid \\boldsymbol {y}, \\boldsymbol {k} ^ {t}\\right) = - \\| \\boldsymbol {y} - \\left(\\boldsymbol {x} _ {0 | t} \\otimes \\boldsymbol {k} ^ {t}\\right) \\downarrow_ {s} \\| _ {F} ^ {2}, \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.282, + 0.469, + 0.312 + ], + "angle": 0, + "content": "which enables the inference process to converge to the right direction along with the data-consistent solution." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.313, + 0.469, + 0.478 + ], + "angle": 0, + "content": "The overview of the Diff-DKP algorithm is presented in Algorithm 3. Let \\( t = T, T - 1, \\ldots, 1 \\) denote the index of the diffusion reverse step. At each step, the diffusion model first estimates the \\( x_{0|t} \\). Then, the DKP model adaptively generates kernel prior with respect to the latest \\( x_{0|t} \\), while \\( x_{0|t} \\) is further updated with respect to the data consistency Eq. (24), thus, ensuring the inference process is underlying the BSR objective. It is noteworthy to point out that the parameters of the diffusion model are fixed and only the parameters of lightweight kernel estimator network are optimized in the inference process." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.479, + 0.47, + 0.6 + ], + "angle": 0, + "content": "In this way, the off-the-shelf diffusion model plays the role of HR image estimator, while the estimated HR image is further refined by the BSR task specific prior knowledge, referring to Eq. (23). Different from those methods that incorporate prior knowledge of BSR task via supervised re-training/fine-tuning, Diff-DKP behaves a plug-and-play scheme, thus without data demands and training cost before implementation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.611, + 0.21, + 0.629 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.636, + 0.268, + 0.654 + ], + "angle": 0, + "content": "5.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.659, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Data Preparation. Following the widely adopted kernel assumption [25, 35, 45, 51], we conduct the experiments on anisotropic Gaussian kernels and motion kernels, which are shown in Fig. 4. The kernel sizes are set to \\((4s + 3)\\times (4s + 3)\\). For the Gaussian kernel, the width ranges are set to \\([0.175s, 2.5s]\\), and the rotation angle range is set to \\([0,\\pi]\\), with a scale factor \\(s = 4\\), respectively. For the motion kernel, we adopt random motion kernel generation method proposed by [22], which simulates realistic and complex blur kernels from random trajectories. Detailed formulations of Gaussian and motion kernels are given in the supplementary material. We synthesize LR images with random kernels with respect to Eq. (1) for testing data based on five popular public benchmark datasets, including Set5 [4], Set14 [52], BSD100 [30], Urban100 [15] and RealSRSet [23]. We compare these kernels in terms of the peak" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.122 + ], + "angle": 0, + "content": "signal to noise ratio (PSNR), and compare HR images in terms of PSNR and structural similarity (SSIM) [47]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.123, + 0.893, + 0.365 + ], + "angle": 0, + "content": "Comparison Methods. The proposed DIP-DKP and Diff-DKP are compared with existing baselines including: Double-DIP [34], DIP-FKP [25], DASR [45], BSRDM [51], DCLS [29], DARM [58] and DiffBSR [26]. Specifically, Double-DIP tends to provide kernel priors by training a FCN network only with respect to the LR image restoration error. DIP-FKP incorporates the FKP model as kernel prior which is pre-trained on kernel datasets. KernelGAN+ZSSR and DARM are self-supervised and train an intergenerative adversarial network (GAN) to estimate the blur kernel. BSRDM formulates an elaborate degradation modelling on noise and kernel as handcrafted priors. DASR is a representative end-to-end method that is pre-trained on DIV2K [1] and Flickr2K [43] HR image datasets. DiffBSR is fine-tuned on BSR labeled datasets before applied to estimate HR images." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.366, + 0.894, + 0.533 + ], + "angle": 0, + "content": "Implementation and Hyper-parameters. The adopted kernel estimation network \\( \\mathbf{G}_k \\) of PKE module in this paper is a three-layer fully-connected network (FCN). The adopted DIP model follows the original settings in [44], and the diffusion model is the vanilla version [14] that is trained on ImageNet [6]. The number of sampling times in the MCMC simulation \\( L \\) is the only hyper-parameter in the proposed DKP model. The hyper-parameter tuning results are given in Table 1. It is explicit that the performance reaches equilibrium around \\( L \\in [4,8] \\). To balance the efficiency and effectiveness, we set \\( L = 5 \\) in this paper." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.546, + 0.805, + 0.562 + ], + "angle": 0, + "content": "5.2. Comparison with State-of-the-Arts" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.57, + 0.892, + 0.796 + ], + "angle": 0, + "content": "Evaluation on Gaussian Kernel Scenario. Quantitative evaluation results on four datasets with scale factors \\( s = 4 \\) are presented in the upper half part of Table 2. We can see that the proposed DIP-DKP and Diff-DKP achieve the second and the best results on all datasets. We note that DIP-DKP only realizes slightly higher performance than the existing state-of-the-art (SotA) methods, while Diff-DKP achieves significantly better performances. This recalls our demonstrations in Section 4: DIP-DKP is totally trained while solving from scratch, and the DKP model plays the role of providing better convergence guarantee. Diff-DKP utilizes the DKP model to guide the well-trained diffusion model with fruitful data priors to converge to BSR task for better HR image restoration performances. In Table 3, we further show that our DKP model achieves the accurate ker-" + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.811, + 0.695, + 0.891 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.704, + 0.812, + 0.877, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.532, + 0.894, + 0.86, + 0.907 + ], + "angle": 0, + "content": "Figure 4. The visualization of the adopted blur kernels." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "26051" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.101, + 0.089, + 0.868, + 0.104 + ], + "angle": 0, + "content": "Table 1. Average image PSNR performance of the proposed DIP-DKP and Diff-DKP on Set5 [4] on the Gaussian kernel scenario." + }, + { + "type": "table", + "bbox": [ + 0.194, + 0.111, + 0.778, + 0.15 + ], + "angle": 0, + "content": "
MethodsL=0L=2L=4L=6L=8L=10L=15
DIP-DKP (Ours)20.9927.1228.4428.5728.5228.2928.03
Diff-DKP (Ours)21.9728.9529.4029.4729.7629.6729.26
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.158, + 0.89, + 0.187 + ], + "angle": 0, + "content": "Table 2. Average PSNR/SSIM of different methods on public datasets that are synthesized by the random Gaussian/Motion kernels with \\( s = 4 \\). The best and second best results are highlighted in red and blue colors, respectively." + }, + { + "type": "table", + "bbox": [ + 0.147, + 0.193, + 0.825, + 0.385 + ], + "angle": 0, + "content": "
MethodKernelSet5 [4]Set14 [52]BSD100 [30]Urban100 [15]
Double-DIP [34]20.99/0.557818.31/0.442618.57/0.381518.15/0.4491
DASR [45]27.37/0.785925.43/0.659125.11/0.612922.88/0.6448
DIP-FKP [25]27.77/0.791425.65/0.676425.15/0.635422.89/0.6327
BSRDM [51]Gaussian kernel scenario27.81/0.802925.35/0.685925.61/0.652622.36/0.6601
DCLS [29]27.50/0.794825.68/0.663925.34/0.616922.92/0.6475
DiffBIR [26]25.15/0.646823.01/0.593523.88/0.558621.94/0.5657
DARM [58]26.25/0.681824.19/0.618724.29/0.589822.14/0.5967
DIP-DKP (Ours)28.03/0.803925.98/0.687825.66/0.653123.24/0.6644
Diff-DKP (Ours)29.44/0.859226.76/0.740026.63/0.705723.92/0.6875
Double-DIP [34]18.92/0.451020.41/0.484719.00/0.375715.42/0.2932
DASR [45]24.21/0.725224.16/0.614522.47/0.583620.24/0.5478
DIP-FKP [25]24.61/0.737124.21/0.622722.80/0.588020.33/0.5572
BSRDM [51]Motion kernel scenario24.01/0.709823.56/0.600922.62/0.579120.40/0.5494
DCLS [29]24.78/0.732324.38/0.621122.74/0.592220.49/0.5534
DiffBIR [26]23.63/0.636723.59/0.604322.35/0.578420.14/0.5347
DARM [58]24.23/0.726923.95/0.629422.48/0.583020.58/0.5595
DIP-DKP (Ours)25.30/0.741724.52/0.643423.02/0.613621.24/0.5667
Diff-DKP (Ours)28.74/0.831326.03/0.671924.10/0.628722.26/0.5862
" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.401, + 0.347, + 0.416 + ], + "angle": 0, + "content": "nel estimation with higher kernel PSNR." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.432, + 0.471, + 0.645 + ], + "angle": 0, + "content": "Evaluation on Motion Kernel Scenario. The lower half part of Table 2 shows the simulation results on the motion kernel scenario. The supervised learning methods, i.e., DASR and DiffBIR, are re-trained/fine-tuned on motion kernel degraded HR image datasets. DIP-FKP is retrained on the motion kernel dataset. The proposed DIP-DKP and Diff-DKP show significantly better performance on the motion kernel scenario, which validates that the proposed DKP model has good generalization ability towards different kernel categories. Specifically, Diff-DKP presents stable PSNR/SSIM scores when being applied to estimate motion kernels, while the rest suffer significant performance drop. This indicates that the proposed DKP is expected to handle kernel varying tasks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.659, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Visual Results. The visual results of different methods on synthetic and real-world images are shown in Fig. 5. We can see that 1) In the case of Gaussian kernel, all methods are capable of producing satisfactory deblurring results, while our DIP-DKP and Diff-DKP yield better results with more accurate kernel estimation. 2) In the case of motion kernel, certain distortion on the estimated kernel can be seen in FKP-DKP and BSRDM fail to estimate motion kernel. Meanwhile, our DIP-DKP and Diff-DKP achieve approximately accurate motion kernel estimation. 3) In the case of real image, both DIP-FKP and BSRDM estimate the Gaussian-like kernels, whereas our DIP-DKP and Diff-DKP tend to estimate the non-Gaussian kernels. This verifies that an adaptive and flexible kernel estimation discipline is learned by our DKP model, which may fit the real-world applications better." + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.399, + 0.892, + 0.442 + ], + "angle": 0, + "content": "Table 3. Average PSNR/SSIM of images and PSNR of kernels on Set14 [52] with \\( s = 4 \\). The best and second best results are highlighted in red and blue colors, respectively." + }, + { + "type": "table", + "bbox": [ + 0.51, + 0.445, + 0.885, + 0.611 + ], + "angle": 0, + "content": "
MethodKernelKernel PSNRImage PSNR/SSIM
DIP-DKP without RKS37.9218.77/0.4227
Diff-DKP without RKS40.9317.33/0.3408
Double-DIP [34]Gaussian50.6218.31/0.4426
DIP-FKP [25]kernel54.4625.65/0.6764
BSRDM [51]scenario55.3825.35/0.6859
DIP-DKP (Ours)56.2025.98/0.6878
Diff-DKP (Ours)56.7626.76/0.7400
DIP-DKP without RKS34.9218.19/0.4223
Diff-DKP without RKS34.7817.65/0.3513
Double-DIP [34]Motion35.5220.41/0.4847
DIP-FKP [25]kernel37.5224.21/0.6227
BSRDM [51]scenario37.8823.56/0.6009
DIP-DKP (Ours)39.3324.52/0.6434
Diff-DKP (Ours)40.3726.03/0.6719
" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.633, + 0.665, + 0.647 + ], + "angle": 0, + "content": "5.3. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Ablation study of RKS module. The ablation studies are carried on the MCMC sampling of kernel priors. \"Without RKS\" denotes that the adopted DKP updates the kernel network only by the data-consistency term without the learned kernel prior. In Fig. 6 (left), it can be seen that the estimated kernels without RKS have significant distortion, leading to remarkable PSNR drop of the estimated HR image, while DIP-DKP can estimate Gaussian kernels precisely with respect to the ground truth (with red frame). Fig. 6 (right) shows that the accurate motion kernel estimation no longer exists when the RKS module is absent. It is thus obvious that without the kernel prior learned from the MCMC process, the Diff-DKP fails to converge to a rational motion kernel estimation. The average kernel and image results are shown in Table 3. Without kernel prior learned from the RKS module, the kernel estimation performances of DKP-" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "26052" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.125, + 0.081, + 0.227, + 0.16 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.228, + 0.082, + 0.33, + 0.16 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.082, + 0.433, + 0.16 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.435, + 0.082, + 0.536, + 0.16 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.538, + 0.082, + 0.64, + 0.16 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.082, + 0.742, + 0.16 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.744, + 0.082, + 0.846, + 0.16 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.162, + 0.227, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.228, + 0.162, + 0.33, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.162, + 0.433, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.435, + 0.162, + 0.536, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.538, + 0.162, + 0.64, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.162, + 0.743, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.745, + 0.162, + 0.846, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.242, + 0.227, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.228, + 0.242, + 0.33, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.242, + 0.433, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.435, + 0.242, + 0.536, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.538, + 0.242, + 0.64, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.242, + 0.743, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.745, + 0.242, + 0.846, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.156, + 0.322, + 0.196, + 0.333 + ], + "angle": 0, + "content": "\\(\\mathrm{LR}(\\times 4)\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.146, + 0.336, + 0.206, + 0.347 + ], + "angle": 0, + "content": "PSNR (dB)" + }, + { + "type": "image_caption", + "bbox": [ + 0.262, + 0.322, + 0.297, + 0.332 + ], + "angle": 0, + "content": "DASR" + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.336, + 0.315, + 0.346 + ], + "angle": 0, + "content": "27.42/27.74/-" + }, + { + "type": "image_caption", + "bbox": [ + 0.363, + 0.322, + 0.403, + 0.332 + ], + "angle": 0, + "content": "DRAM" + }, + { + "type": "image_caption", + "bbox": [ + 0.347, + 0.336, + 0.418, + 0.346 + ], + "angle": 0, + "content": "26.69/26.84/-" + }, + { + "type": "image_caption", + "bbox": [ + 0.462, + 0.322, + 0.51, + 0.332 + ], + "angle": 0, + "content": "FKP-DIP" + }, + { + "type": "image_caption", + "bbox": [ + 0.45, + 0.336, + 0.521, + 0.346 + ], + "angle": 0, + "content": "27.80/28.95/-" + }, + { + "type": "image_caption", + "bbox": [ + 0.566, + 0.322, + 0.611, + 0.332 + ], + "angle": 0, + "content": "BSRDM" + }, + { + "type": "image_caption", + "bbox": [ + 0.553, + 0.336, + 0.624, + 0.346 + ], + "angle": 0, + "content": "27.60/29.14/-" + }, + { + "type": "image_caption", + "bbox": [ + 0.623, + 0.322, + 0.733, + 0.332 + ], + "angle": 0, + "content": "DIP-DKP(ours)" + }, + { + "type": "image_caption", + "bbox": [ + 0.656, + 0.336, + 0.728, + 0.346 + ], + "angle": 0, + "content": "28.07/29.45/-" + }, + { + "type": "image_caption", + "bbox": [ + 0.754, + 0.322, + 0.837, + 0.332 + ], + "angle": 0, + "content": "Diff-DKP(ours)" + }, + { + "type": "image_caption", + "bbox": [ + 0.76, + 0.336, + 0.83, + 0.346 + ], + "angle": 0, + "content": "29.38/29.89/-" + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.351, + 0.891, + 0.365 + ], + "angle": 0, + "content": "Figure 5. Visual results of different methods on public datasets for scale factor 4. Estimated/ground-truth kernels are shown on the top left." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.375, + 0.274, + 0.495 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.128, + 0.496, + 0.24, + 0.508 + ], + "angle": 0, + "content": "(a) BSD100 \"040\"" + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.375, + 0.468, + 0.496 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.321, + 0.496, + 0.441, + 0.507 + ], + "angle": 0, + "content": "(b) Set14 \"monarch\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.51, + 0.468, + 0.537 + ], + "angle": 0, + "content": "Figure 6. The intermediate results of DIP-DKP, Diff-DKP and their no RKS module versions over iterations on two test images." + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.546, + 0.46, + 0.56 + ], + "angle": 0, + "content": "Table 4. The ablation of PKE module. (Set5, x4, image PSNR)" + }, + { + "type": "table", + "bbox": [ + 0.084, + 0.564, + 0.462, + 0.616 + ], + "angle": 0, + "content": "
Layers\\Units10100100010000
113.7523.5728.9328.24
313.6128.9728.4828.35
513.3028.8128.5226.65
713.8628.3028.5427.93
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.626, + 0.468, + 0.655 + ], + "angle": 0, + "content": "based BSR methods have a significant performance drop, leading to poor image restoration performance as well." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.656, + 0.469, + 0.791 + ], + "angle": 0, + "content": "Ablation study of PKE module. Since PKE essentially estimates blur kernels on the basis of the random kernel priors and LR observations, thus it is indispensable and we conduct ablation study on the different structures of kernel network in PKE module in Table 4. We find that the kernel network performs well when it has 3-7 layers with 100-1000 units in each layer. This indicates that the kernel network has good generalization-ability for the structure without the necessity of elaborately designing the network." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.802, + 0.427, + 0.818 + ], + "angle": 0, + "content": "5.4. Model Size, Runtime and Memory Usage" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.469, + 0.9 + ], + "angle": 0, + "content": "The kernel network of our DKP model has a total of \\(562K\\) parameters (FLOPs: \\(536K\\)) while Double-DIP and DIP-FKP have \\(641K\\) parameters (FLOPs: \\(600K\\)) and \\(143K\\) parameters (FLOPs: \\(178K\\)), respectively. The runtime and memory usage of our DIP-DKP on a GeForce RTX 3090" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.38, + 0.892, + 0.516 + ], + "angle": 0, + "content": "GPU for generating a HR image of size \\(512 \\times 512\\) are about 92 seconds and 11GB memory, which is comparable with the Double-DIP (91 seconds and 11.2GB) and DIP-FKP (90 seconds and 10.6GB). As for Diff-DKP, the \\(512 \\times 512\\) image needs to be divided into four \\(256 \\times 256\\) images for restoration, which costs a total of 60 seconds and 4GB memory. Considering that our DIP-DKP and Diff-DKP are unsupervised and plug-and-play, it is reasonable to say that our methods have moderate computational costs." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.519, + 0.892, + 0.549 + ], + "angle": 0, + "content": "Due to the page limitations, more experimental results are given in the supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.569, + 0.619, + 0.584 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.596, + 0.892, + 0.887 + ], + "angle": 0, + "content": "In this paper, we propose a dynamic kernel prior (DKP) model to solve the BSR problem in an unsupervised and pre-training-free paradigm. DKP realizes the rational kernel prior learning from MCMC sampling on random kernel distributions, providing accurate kernel estimation and thus leading to better HR image restoration. DKP can be easily incorporated with existing image restoration model, such as DIP and diffusion model, by replacing their kernel modeling modules or adding as an external kernel prior generator. When applied to solve the BSR problem, DKP is trained while solving the task with respect to the LR image restoration error, enabling no training necessity and labeled data demands. Extensive experiments on Gaussian and motion kernel scenarios with synthetic LR images and real-world images validate that DKP-based methods improve the kernel estimation accuracy significantly and thus lead to superior BSR results. We believe that the concept of using a trainable sampling process to provide adaptive priors will lead to a new direction in solving low-level tasks, aiming to achieve superior performance with modest computational costs in the way of unsupervised inference." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "26053" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 126-135, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.47, + 0.214 + ], + "angle": 0, + "content": "[2] Dominique Bakry and Michel Émery. Diffusions hypercontractives. In Séminaire de Probabilités XIX 1983/84: Proceedings, pages 177-206. Springer, 2006. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.216, + 0.47, + 0.271 + ], + "angle": 0, + "content": "[3] Sefi Bell-Kligler, Assaf Shocher, and Michal Irani. Blind super-resolution kernel estimation using an internal-gan. Advances in Neural Information Processing Systems, 32, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.274, + 0.47, + 0.342 + ], + "angle": 0, + "content": "[4] Marco Bevilacqua, Aline Roumy, Christine Guillemot, and Marie Line Alberi-Morel. Low-complexity single-image super-resolution based on nonnegative neighbor embedding. In British Machine Vision Conference, pages 135-1, 2012. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.345, + 0.47, + 0.4 + ], + "angle": 0, + "content": "[5] Hyungjin Chung, Jeongsol Kim, Michael T McCann, Marc L Klasky, and Jong Chul Ye. Diffusion posterior sampling for general noisy inverse problems. arXiv preprint arXiv:2209.14687, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.402, + 0.47, + 0.457 + ], + "angle": 0, + "content": "[6] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.46, + 0.469, + 0.515 + ], + "angle": 0, + "content": "[7] Chao Dong, Chen Change Loy, Kaiming He, and Xiaou Tang. Learning a deep convolutional network for image super-resolution. In European conference on computer vision, pages 184-199. Springer, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.517, + 0.47, + 0.586 + ], + "angle": 0, + "content": "[8] Yangyi Dong, Xiaoyun Zhang, Zhixin Wang, Ya Zhang, Siheng Chen, and Yanfeng Wang. Unpaired face restoration via learnable cross-quality shift. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 667-675, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.589, + 0.47, + 0.657 + ], + "angle": 0, + "content": "[9] Netalee Efrat, Daniel Glasner, Alexander Apartsin, Boaz Nadler, and Anat Levin. Accurate blur models vs. image priors in single image super-resolution. In Proceedings of the IEEE International Conference on Computer Vision, pages 2832-2839, 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.66, + 0.469, + 0.728 + ], + "angle": 0, + "content": "[10] Yosef Gandelsman, Assaf Shocher, and Michal Irani. \"double-dip\": Unsupervised image decomposition via coupled deep-image-priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11026-11035, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.731, + 0.469, + 0.785 + ], + "angle": 0, + "content": "[11] Daniel Glasner, Shai Bagon, and Michal Irani. Superresolution from a single image. In 2009 IEEE 12th international conference on computer vision, pages 349-356. IEEE, 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.788, + 0.469, + 0.855 + ], + "angle": 0, + "content": "[12] Jinjin Gu, Hannan Lu, Wangmeng Zuo, and Chao Dong. Blind super-resolution with iterative kernel correction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1604-1613, 2019. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[13] Lanqing Guo, Chong Wang, Wenhan Yang, Siyu Huang, Yufei Wang, Hanspeter Pfister, and Bihan Wen. Shadowdiffusion: When degradation prior meets diffusion model for" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.133 + ], + "angle": 0, + "content": "shadow removal. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14049-14058, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.135, + 0.892, + 0.176 + ], + "angle": 0, + "content": "[14] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.178, + 0.892, + 0.232 + ], + "angle": 0, + "content": "[15] Jia-Bin Huang, Abhishek Singh, and Narendra Ahuja. Single image super-resolution from transformed self-exemplars. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5197-5206, 2015. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.234, + 0.892, + 0.287 + ], + "angle": 0, + "content": "[16] Yan Huang, Shang Li, Liang Wang, Tieniu Tan, et al. Unfolding the alternating optimization for blind super resolution. Advances in Neural Information Processing Systems, 33:5632-5643, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.289, + 0.892, + 0.342 + ], + "angle": 0, + "content": "[17] Meiguang Jin, Stefan Roth, and Paolo Favaro. Normalized blind deconvolution. In Proceedings of the European Conference on Computer Vision (ECCV), pages 668-684, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.344, + 0.892, + 0.399 + ], + "angle": 0, + "content": "[18] Jiwon Kim, Jung Kwon Lee, and Kyoung Mu Lee. Accurate image super-resolution using very deep convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1646-1654, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.401, + 0.892, + 0.455 + ], + "angle": 0, + "content": "[19] Kwang In Kim and Younghee Kwon. Single-image superresolution using sparse regression and natural image prior. IEEE transactions on pattern analysis and machine intelligence, 32(6):1127-1133, 2010. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.457, + 0.892, + 0.523 + ], + "angle": 0, + "content": "[20] Soo Ye Kim, Hyeonjun Sim, and Munchurl Kim. Koalanet: Blind super-resolution using kernel-oriented adaptive local adjustment. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10611-10620, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.526, + 0.892, + 0.566 + ], + "angle": 0, + "content": "[21] Dilip Krishnan and Rob Fergus. Fast image deconvolution using hyper-laplacian priors. Advances in neural information processing systems, 22, 2009. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.568, + 0.892, + 0.636 + ], + "angle": 0, + "content": "[22] Orest Kupyn, Volodymyr Budzan, Mykola Mykhailych, Dmytro Mishkin, and Jií Matas. Deblurgan: Blind motion deblurring using conditional adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8183-8192, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.638, + 0.892, + 0.692 + ], + "angle": 0, + "content": "[23] Yuelong Li, Mohammad Tofighi, Junyi Geng, Vishal Monga, and Yonina C Eldar. Efficient and interpretable deep blind image deblurring via algorithm unrolling. IEEE Transactions on Computational Imaging, 6:666-681, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.693, + 0.892, + 0.774 + ], + "angle": 0, + "content": "[24] Yawei Li, Yuchen Fan, Xiaoyu Xiang, Denis Demandolx, Rakesh Ranjan, Radu Timofte, and Luc Van Gool. Efficient and explicit modelling of image hierarchies for image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18278-18289, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.776, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[25] Jingyun Liang, Kai Zhang, Shuhang Gu, Luc Van Gool, and Radu Timofte. Flow-based kernel prior with application to blind super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10601-10610, 2021. 1, 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.847, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[26] Xinqi Lin, Jingwen He, Ziyan Chen, Zhaoyang Lyu, Ben Fei, Bo Dai, Wanli Ouyang, Yu Qiao, and Chao Dong. Diffbir: Towards blind image restoration with generative diffusion prior. arXiv preprint arXiv:2308.15070, 2023. 2, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "26054" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[27] Jie Liu, Wenjie Zhang, Yuting Tang, Jie Tang, and Gangshan Wu. Residual feature aggregation network for image superresolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2359-2368, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.47, + 0.207 + ], + "angle": 0, + "content": "[28] Zhengxiong Luo, Yan Huang, Shang Li, Liang Wang, and Tieniu Tan. End-to-end alternating optimization for blind super resolution. arXiv preprint arXiv:2105.06878, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.207, + 0.471, + 0.276 + ], + "angle": 0, + "content": "[29] Ziwei Luo, Haibin Huang, Lei Yu, Youwei Li, Haoqiang Fan, and Shuaicheng Liu. Deep constrained least squares for blind image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17642-17652, 2022. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.277, + 0.471, + 0.36 + ], + "angle": 0, + "content": "[30] David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, pages 416-423. IEEE, 2001. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.361, + 0.471, + 0.404 + ], + "angle": 0, + "content": "[31] Tomer Michaeli and Michal Irani. Nonparametric blind super-resolution. In Proceedings of the IEEE International Conference on Computer Vision, pages 945-952, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.405, + 0.469, + 0.433 + ], + "angle": 0, + "content": "[32] Radford M Neal et al. Mcmc using hamiltonian dynamics. Handbook of markov chain monte carlo, 2(11):2, 2011. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.434, + 0.469, + 0.489 + ], + "angle": 0, + "content": "[33] Daniele Perrone and Paolo Favaro. A clearer picture of total variation blind deconvolution. IEEE transactions on pattern analysis and machine intelligence, 38(6):1041-1055, 2015. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.49, + 0.469, + 0.559 + ], + "angle": 0, + "content": "[34] Dongwei Ren, Kai Zhang, Qilong Wang, Qinghua Hu, and Wangmeng Zuo. Neural blind deconvolution using deep priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3341-3350, 2020. 1, 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.561, + 0.469, + 0.63 + ], + "angle": 0, + "content": "[35] Gernot Riegler, Samuel Schulter, Matthias Ruther, and Horst Bischof. Conditioned regression models for non-blind single image super-resolution. In Proceedings of the IEEE International Conference on Computer Vision, pages 522-530, 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.632, + 0.469, + 0.674 + ], + "angle": 0, + "content": "[36] Leonid I Rudin, Stanley Osher, and Emad Fatemi. Nonlinear total variation based noise removal algorithms. Physica D: nonlinear phenomena, 60(1-4):259-268, 1992. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.675, + 0.469, + 0.744 + ], + "angle": 0, + "content": "[37] Marshall F Tappen Bryan C Russell and William T Freeman. Exploiting the sparse derivative prior for super-resolution and image demosaicing. In Proceedings of the Third International Workshop Statistical and Computational Theories of Vision, pages 1-28, 2003. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.745, + 0.469, + 0.814 + ], + "angle": 0, + "content": "[38] Chitwan Sahara, Jonathan Ho, William Chan, Tim Salimans, David J Fleet, and Mohammad Norouzi. Image super-resolution via iterative refinement. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(4):4713-4726, 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.469, + 0.873 + ], + "angle": 0, + "content": "[39] Assaf Shocher, Nadav Cohen, and Michal Irani. \"zero-shot\" super-resolution using deep internal learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3118-3126, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.902 + ], + "angle": 0, + "content": "[40] Jiaming Song, Arash Vahdat, Morteza Mardani, and Jan Kautz. Pseudoinverse-guided diffusion models for inverse" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "problems. In International Conference on Learning Representations, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.123, + 0.892, + 0.166 + ], + "angle": 0, + "content": "[41] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. Advances in neural information processing systems, 32, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.167, + 0.892, + 0.223 + ], + "angle": 0, + "content": "[42] Jian Sun, Zongben Xu, and Heung-Yeung Shum. Image super-resolution using gradient profile prior. In 2008 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.225, + 0.892, + 0.295 + ], + "angle": 0, + "content": "[43] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 114-125, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.297, + 0.892, + 0.354 + ], + "angle": 0, + "content": "[44] Dmitry Ulyanov, Andrea Vedaldi, and Victor Lempitsky. Deep image prior. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 9446-9454, 2018. 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.355, + 0.892, + 0.438 + ], + "angle": 0, + "content": "[45] Longguang Wang, Yingqian Wang, Xiaoyu Dong, Qingyu Xu, Jungang Yang, Wei An, and Yulan Guo. Unsupervised degradation representation learning for blind superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10581-10590, 2021. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.441, + 0.892, + 0.485 + ], + "angle": 0, + "content": "[46] Yinhuai Wang, Jiwen Yu, and Jian Zhang. Zero-shot image restoration using denoising diffusion null-space model. arXiv preprint arXiv:2212.00490, 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.485, + 0.892, + 0.542 + ], + "angle": 0, + "content": "[47] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.543, + 0.892, + 0.6 + ], + "angle": 0, + "content": "[48] Max Welling and Yee W Teh. Bayesian learning via stochastic gradient Langevin dynamics. In Proceedings of the 28th international conference on machine learning (ICML-11), pages 681–688, 2011. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.601, + 0.892, + 0.685 + ], + "angle": 0, + "content": "[49] Yu-Syuan Xu, Shou-Yao Roy Tseng, Yu Tseng, Hsien-Kai Kuo, and Yi-Min Tsai. Unified dynamic convolutional network for super-resolution with variational degradations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12496-12505, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.687, + 0.892, + 0.758 + ], + "angle": 0, + "content": "[50] Xunpeng Yi, Han Xu, Hao Zhang, Linfeng Tang, and Jiayi Ma. Diff-retinex: Rethinking low-light image enhancement with a generative diffusion model. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12302-12311, 2023. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.759, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[51] Zongsheng Yue, Qian Zhao, Jianwen Xie, Lei Zhang, Deyu Meng, and Kwan-Yee K. Wong. Blind image superresolution with elaborate degradation modeling on noise and kernel. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2128-2138, 2022. 1, 2, 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.845, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[52] Roman Zeyde, Michael Elad, and Matan Protter. On single image scale-up using sparse-representations. In International conference on curves and surfaces, pages 711-730. Springer, 2010. 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "26055" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[53] Kai Zhang, Wangmeng Zuo, and Lei Zhang. Learning a single convolutional super-resolution network for multiple degradations. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3262-3271, 2018. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.472, + 0.219 + ], + "angle": 0, + "content": "[54] Kai Zhang, Luc Van Gool, and Radu Timofte. Deep unfolding network for image super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3217-3226, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.22, + 0.471, + 0.288 + ], + "angle": 0, + "content": "[55] Kai Zhang, Yawei Li, Wangmeng Zuo, Lei Zhang, Luc Van Gool, and Radu Timofte. Plug-and-play image restoration with deep denoiser prior. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(10):6360-6376, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.29, + 0.471, + 0.359 + ], + "angle": 0, + "content": "[56] Yulun Zhang, Kunpeng Li, Kai Li, Lichen Wang, Bineng Zhong, and Yun Fu. Image super-resolution using very deep residual channel attention networks. In Proceedings of the European conference on computer vision (ECCV), pages 286-301, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.36, + 0.47, + 0.43 + ], + "angle": 0, + "content": "[57] Zixiang Zhao, Haowen Bai, Yuzhhi Zhu, Jiangshe Zhang, Shuang Xu, Yulun Zhang, Kai Zhang, Deyu Meng, Radu Timofte, and Luc Van Gool. Ddfm: denoising diffusion model for multi-modality image fusion. arXiv preprint arXiv:2303.06840, 2023.1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.431, + 0.47, + 0.514 + ], + "angle": 0, + "content": "[58] Hongyang Zhou, Xiaobin Zhu, Jianqing Zhu, Zheng Han, Shi-Xue Zhang, Jingyan Qin, and Xu-Cheng Yin. Learning correction filter via degradation-adaptive regression for blind single image super-resolution. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12365-12375, 2023. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.515, + 0.47, + 0.585 + ], + "angle": 0, + "content": "[59] Yuanzhi Zhu, Kai Zhang, Jingyun Liang, Jiezhang Cao, Bihan Wen, Radu Timofte, and Luc Van Gool. Denoising diffusion models for plug-and-play image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1219-1229, 2023. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.585 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "26056" + } + ] +] \ No newline at end of file diff --git a/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/eb377d27-ee40-49e4-9796-048cc8e1c35d_origin.pdf b/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/eb377d27-ee40-49e4-9796-048cc8e1c35d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3f046427dfdb7cc0833b5b0bd6670c8dcfbf05c6 --- /dev/null +++ b/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/eb377d27-ee40-49e4-9796-048cc8e1c35d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44e69f68dd9fb468d7480c592183148473830298c9ce1250a24417d0fd676818 +size 7019933 diff --git a/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/full.md b/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/full.md new file mode 100644 index 0000000000000000000000000000000000000000..818b6659700852087f600b5ff62f34c6357d2fcc --- /dev/null +++ b/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/full.md @@ -0,0 +1,478 @@ +# A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution + +Zhixiong Yang1 +Shuanghui Zhang1 + +Jingyuan Xia $^{1,*}$ +Zhen Liu + +Shengxi Li² +Yaowen Fu¹ + +Xinghua Huang $^{1}$ Yongxiang Liu $^{1}$ + +$^{1}$ College of Electronic Engineering, National University of Defense Technology, Changsha, China + $^{2}$ College of Electronic Engineering, Beihang University, Beijing, China +yzx21@nudt.edu.cn, j.xia10@nudt.edu.cn + +# Abstract + +Deep learning-based methods have achieved significant successes on solving the blind super-resolution (BSR) problem. However, most of them request supervised pretraining on labelled datasets. This paper proposes an unsupervised kernel estimation model, named dynamic kernel prior (DKP), to realize an unsupervised and pre-training-free learning-based algorithm for solving the BSR problem. DKP can adaptively learn dynamic kernel priors to realize real-time kernel estimation, and thereby enables superior HR image restoration performances. This is achieved by a Markov chain Monte Carlo sampling process on random kernel distributions. The learned kernel prior is then assigned to optimize a blur kernel estimation network, which entails a network-based Langevin dynamic optimization strategy. These two techniques ensure the accuracy of the kernel estimation. DKP can be easily used to replace the kernel estimation models in the existing methods, such as Double-DIP and FKP-DIP, or be added to the off-the-shelf image restoration model, such as diffusion model. In this paper, we incorporate our DKP model with DIP and diffusion model, referring to DIP-DKP and Diff-DKP, for validations. Extensive simulations on Gaussian and motion kernel scenarios demonstrate that the proposed DKP model can significantly improve the kernel estimation with comparable runtime and memory usage, leading to state-of-the-art BSR results. The code is available at https://github.com/XYLGroup/DKP. + +# 1. Introduction + +Deep learning provides a new avenue for solving the blind super-resolution (BSR) problem, which aims to reconstruct + +Zhixiong Yang and Jingyuan Xia contributed equally to this work (*Corresponding author: Jingyuan Xia). This work is supported by National Natural Science Foundation of China, projects 61921001, 62131020, 62322121 and 62171448, and the NSFDYS of Hunan 2022J110067. + +high-resolution (HR) images from the low-resolution (LR) observations with unknown blur kernels, and is known to be highly non-convex and ill-posed. To alleviate the non-convexity and ill-posedness, most of learning-based BSR methods incorporate image priors via supervised learning based on paired LR-HR samples. However, pre-defined labeled training datasets are expensive, time-consuming, and even not feasible in specific scenarios, such as for high speed targets (e.g., satellites, aircraft) and medical images (e.g., beating heart). Thus, unsupervised learning-based solutions are highly demanded for BSR problem. + +The existing BSR methods can be roughly divided into model-based and learning-based strategies in terms of the priors adopted to provide performance guarantee. Model-based approaches [19, 21, 33, 51] typically adopt hand-designed and explicit constraints as regularizations on image properties, or expert knowledge of the blur kernel. Meanwhile, learning-based BSR methods [12, 16, 20, 27-29, 49, 53, 54, 56] aim to train an end-to-end network with paired LR-HR image samples to leverage data priors for boosting performances. However, these methods highly demand the data and need to undergo throughing pre-training before applications, leading to limited generalization ability towards varying blur kernels. To alleviate this issue, quite a few methods [5, 13, 40, 50, 57, 59] substitute the cumbersome training in advance by a well-trained diffusion model with significantly less fine-tuning samples in an off-the-shelf fashion. On the other side, a slice of works [3, 17, 25, 34, 39, 51] propose to replace the HR image data priors by kernel priors, which are more substantial, economical and efficient to be trained. However, both of these advances are underlying the supervised learning scheme with necessity of training on labeled datasets, still hindering the flexibility and generalization ability towards the BSR tasks with different kernels and unknown HR ground truths. + +In this paper, we propose a dynamic kernel prior (DKP) generation model that can be plug-in with the majority of image restoration (IR) models, to solve BSR problem in an + +unsupervised way. The proposed DKP model consists of two modules: random kernel sampling (RKS) module and prior kernel estimation (PKE) module. In the RKS module, a Markov Chain Monte Carlo (MCMC) sampling strategy on kernel distributions iteratively generates random kernels as kernel priors, which are then assigned to the PKE module. The PKE module is employed to estimate the blur kernel with respect to the kernel prior generated from the RKS module, the observed LR input and estimated HR image from the IR model. The estimated blur kernel is then assigned to an adopted IR model for the HR image restoration. Along with the alternative solving processes, the MCMC process in RKS module converges to a desired kernel distribution with respect to the LR observation and the estimated HR image to guarantee a rational kernel prior. Meanwhile, a network-based Langevin dynamics (NLD) paradigm is proposed to optimize the kernel estimator in our PKE module with respect to the RKS output kernel prior and the data consistency based on the LR image reconstruction error. The RKS module realizes an unsupervised kernel prior learning. The PKE module achieves promising kernel estimation via the NLD update scheme, which further alleviates the non-convexity and ill-posedness in the view of optimization strategy. In this way, the DKP model is capable of providing the plug-and-play kernel estimation without training in advance on paired LR-HR samples, and is flexible to be applied to the existing IR models for solving the BSR problem. + +Two applications are proposed to validate the feasibility and performance of our DKP model: deep image prior (DIP) [44] and diffusion model [14] adopted as the IR model, referring to DIP-DKP and Diff-DKP, respectively. For the DIP-DKP, we simultaneously optimize the parameters of DIP and DKP models from scratch during the alternative solution process. For the Diff-DKP, the adopted diffusion model is off-the-shelf from [14] and is applied as the fixed HR image restorer. The DKP model is optimized from scratch as well. Extensive simulation results show that the DIP-DKP achieves comparable performance than the existing methods, while the Diff-DKP achieves the state-of-the-art performance in both of Gaussian and motion kernel scenarios. The main contributions are summarized as follows: + +- The RKS module is proposed to generate a rational kernel prior from the MCMC sampling on random kernel distributions. This way, an unsupervised kernel prior learning is achieved to substitute the pre-training phase. +- In PKE module, the NLD is proposed to optimize the kernel estimator, ensuring good convergence and concise estimation of the blur kernel from the perspective of optimization strategy. +- The proposed DKP model enjoys the ease use on the popular IR models without the necessity of pre-training/re + +training towards different scenarios. The two applications, i.e., DIP-DKP and Diff-DKP, validate the state-of-the-art performance and excellent flexibility of our DKP model. + +# 2. Related Work + +To alleviate the non-convexity and ill-posedness, early model-based approaches [11, 31, 33, 37] typically construct image priors in explicit formulations, such as the total variation (TV) [36], gradient profile [42], hyper-Laplacian [21] and sparsity [19]. In contrast, learning-based methods [7, 12, 16–18, 20, 28, 29, 45, 49, 53, 56] typically train an end-to-end network on labelled image samples to incorporate data priors. Wang et al. [45] proposed a CNN-based deep network with degradation feature representation module to learn image degradation feature from supervised training on paired LR-HR images. Li et al. [24] proposed a transformer network to learn multi-scale image feature via self-attention mechanisms. To reduce the high training costs of time and data, recent advances [5, 38, 40, 46, 50, 57, 59] are proposed to solve BSR problem by an off-the-shelf diffusion model [14]. Lin et al. [26] proposed to partially fine-tune the parameters of diffusion model with significantly less labeled images. Wang et al. [46] further formulated a diffusion-based BSR algorithm that iteratively solves super-resolution tasks with the given kernel without re-training. Different from the end-to-end models that are trained on paired image samples, recent methods tend to resolve BSR problem via pre-training on kernel datasets [25] or pre-defined kernel priors [51]. An alternative framework between the kernel estimation and image restoration is typically adopted in these methods [3, 10, 12, 39, 44, 58], such as double-deep image prior (Double-DIP) [34]. On the basis of this framework, Liang et al. [25] established a flow-based kernel prior (FKP) network that is pre-trained on labeled kernels to enroll kernel priors while the HR image is estimated by DIP network in an online fashion. Yue et al. [51] proposed a hand-crafted kernel prior model to improve the robustness towards the Gaussian kernel scenario. Despite the fact that these methods approximately bring down the data requirements and training costs, the necessity of training in advances or hand-crafted design still limits the flexibility and generalization ability towards the varying kernel scenarios (Gaussian and motion) without ground truths. + +# 3. Dynamic Kernel Prior (DKP) + +Problem Formulation. The degradation model of BSR problem is commonly expressed as follows, + +$$ +\boldsymbol {y} = (\boldsymbol {x} \otimes \boldsymbol {k}) \downarrow_ {s} + \boldsymbol {n}, \tag {1} +$$ + +where $y$ denotes the LR image, $\pmb{x}$ denotes the HR image, $\otimes$ indicates the convolution operation, $\downarrow_{s}$ denotes the down + +![](images/ffb76d946744169a154063112a00aed614d570b2867d0b6b869d24ea7f8d968e.jpg) +Figure 1. The overview of the RKS module. The MCMC simulation can generate the random kernel $\pmb{k}_p^t$ from random kernel distributions $\{\pmb{k}_r^l\}_{l=1}^L$ as the kernel prior with respect to the current model parameters $\pmb{x}^{t-1}, \pmb{y}$ . + +sampling operation with scale factor $s$ , and $k$ denotes the blur kernel. The BSR problem (1) can be formulated as a maximum a posteriori (MAP) problem: + +$$ +\max _ {\boldsymbol {x}, \boldsymbol {k}} p (\boldsymbol {y} | \boldsymbol {x}, \boldsymbol {k}) p (\boldsymbol {x}) p (\boldsymbol {k}), \tag {2} +$$ + +where $p(\boldsymbol{y}|\boldsymbol{x},\boldsymbol{k})$ denotes the likelihood of the observed LR image $\boldsymbol{y}$ , $p(\boldsymbol{x})$ and $p(\boldsymbol{k})$ are the HR image and kernel priors, respectively. Image priors [8, 14, 41, 44, 55] have been well-designed and fully-studied in the past decade. In contrast, researches on kernel priors $p(\boldsymbol{k})$ are in the ascendant, as kernel samples are less expensive to obtain and the training phase is more efficient [9, 12, 25, 51, 53]. + +In this paper, we propose the DKP model, which comprises two modules: RKS and PKE. The RKS module is employed to generate rational kernel priors, which are assigned to the PKE module to support the estimation of blur kernel. Let $t = 1,2,\dots ,T$ denote the alternative iterations among these two modules and the adopted IR model, $\pmb{k}^t$ and $\pmb{x}^{t}$ denote the estimated blur kernel and HR image at the $t^{th}$ iteration, respectively. The details of DKP model is given below. + +RKS module. At the $t^{th}$ iteration, the RKS module plays the key role of generating a rational kernel prior $\pmb{k}_p^t$ from the MCMC simulation. The overview diagram is shown in Fig. 1. Let $p(\pmb{k}_r|\pmb{\Sigma}_r)$ denotes that the random kernel $\pmb{k}_r$ is conditioned by the latent variable $\pmb{\Sigma}_r$ , in which $p(\pmb{\Sigma}_r)$ determines the category of blur kernel. Then the distribution of the kernel prior $\pmb{k}_p^t$ can be formulated as + +$$ +p \left(\boldsymbol {k} _ {p} ^ {t}\right) = \int_ {\boldsymbol {\Sigma} _ {r}} p \left(\boldsymbol {k} _ {r} \mid \boldsymbol {\Sigma} _ {r}\right) p \left(\boldsymbol {\Sigma} _ {r}\right) d \boldsymbol {\Sigma} _ {r}. \tag {3} +$$ + +Here, $\Sigma_r$ is the parameter of kernel (e.g., the variance of Gaussian kernel or the length of motion kernel). It is not easy to sample all the possible $\Sigma_r$ , and therefore, we convert (3) into the Monte Carlo simulation in the following form: + +$$ +p \left(\boldsymbol {k} _ {p} ^ {t}\right) \approx \sum_ {l = 1} ^ {L} p \left(\boldsymbol {k} _ {r} ^ {l} \mid \boldsymbol {\Sigma} _ {r} ^ {l}\right) \boldsymbol {\Sigma} _ {r} ^ {l}, \tag {4} +$$ + +where $l$ denotes the index of the Monte Carlo sampling, $\pmb{\Sigma}_r^l$ denotes the $l^{th}$ sampled latent variable, $\pmb{k}_r^l$ is the $l^{th}$ sampled kernel, conditioned on the $\pmb{\Sigma}_r^l$ . + +To ensure the rationality of randomly generated kernels towards the BSR problem, as well as the optimization during the iterations, the MCMC simulation is proposed as follows, + +$$ +p \left(\boldsymbol {k} _ {p} ^ {t} \mid \boldsymbol {x} ^ {t - 1}, \boldsymbol {y}\right) \approx \sum_ {l = 1} ^ {L} p \left(\boldsymbol {k} _ {r} ^ {l} \mid \boldsymbol {x} ^ {t - 1}, \boldsymbol {y}\right) p \left(\boldsymbol {k} _ {r} ^ {l} \mid \boldsymbol {\Sigma} _ {r} ^ {l}\right) \boldsymbol {\Sigma} _ {r} ^ {l}, \tag {5} +$$ + +where $p(\pmb{k}_r^l|\pmb{x}^{t-1},\pmb{y})$ denotes the kernel weight $\omega^l$ , that is conditioned on the observed LR image $\pmb{y}$ and the estimated HR image $\pmb{x}^{t-1}$ with respect to the MCMC loss $\mathcal{L}_{MCMC}$ in the following form + +$$ +\omega^ {l} = p \left(\boldsymbol {k} _ {r} ^ {l} \mid \boldsymbol {x} ^ {t - 1}, \boldsymbol {y}\right) \propto \frac {1}{\mathcal {L} _ {M C M C} ^ {l}}, \tag {6} +$$ + +where + +$$ +\mathcal {L} _ {M C M C} ^ {l} = \left\| \boldsymbol {y} - \left(\boldsymbol {x} ^ {t - 1} \otimes \boldsymbol {k} _ {r} ^ {l}\right) \downarrow_ {s} \right\| _ {F} ^ {2} + \delta , \tag {7} +$$ + +$\delta$ is the noise to prevent $\mathcal{L}_{MCMC}^l = 0$ . In this way, $\pmb{k}_p^t$ can be formulated as + +$$ +\boldsymbol {k} _ {p} ^ {t} = \frac {1}{L} \sum_ {l = 1} ^ {L} \omega^ {l} \boldsymbol {k} _ {r} ^ {l}. \tag {8} +$$ + +The obtained $\pmb{k}_p^t$ is then assigned to the PKE module as a rational kernel prior, which will be introduced next. + +We note that the obtained kernel prior $k_{p}^{t}$ is an expectation of $L$ times sampling according to (4). The number of the sampling times $L$ plays the role of annealing/tempering in MCMC simulations as a hyper-parameter. Details of the tuning on $L$ will be given in Section 5.1. + +PKE module. In our DKP model, the PKE module is employed to estimate the blur kernel by a lightweight network $\mathrm{G}_k$ with parameters $\phi_k$ as follows + +$$ +\boldsymbol {k} ^ {t} = \mathbf {G} _ {\boldsymbol {k}} \left(\phi_ {\boldsymbol {k}} ^ {t}\right). \tag {9} +$$ + +The network $\mathbf{G}_k$ takes a fixed noise that is randomly initialized as input, and we neglect it for demonstration convenience as $\phi_k^t$ are the main variables. + +This kernel estimator $\mathbf{G}_k$ is optimized in the NLD paradigm with respect to the data-consistency term and kernel prior term, as shown in Fig. 2. The data-consistency term is computed by the LR image reconstruction error, which is given by + +$$ +\log p \left(\phi_ {\boldsymbol {k}} ^ {t - 1} \mid \boldsymbol {x} ^ {t - 1}, \boldsymbol {y}\right) = - \| \boldsymbol {y} - \left(\boldsymbol {x} ^ {t - 1} \otimes \mathbf {G} _ {\boldsymbol {k}} \left(\phi_ {\boldsymbol {k}} ^ {t - 1}\right)\right) \downarrow_ {s} \| _ {F} ^ {2}. \tag {10} +$$ + +The kernel prior term is computed based on the difference between the network-estimated kernel $\mathbf{G}_k(\boldsymbol{\phi}_k^{t - 1})$ and the + +![](images/c828cc9c92064405ff6c0566c475b9fc2592e3e1b74d7eebe5dd14dd0f6e7b61.jpg) +Figure 2. The overview of the PKE module. The blur kernel $\pmb{k}^t$ is estimated by the network $G_{k}$ , whose parameters $\phi_{k}$ are updated by the kernel prior term from RKS module and data-consistency term, based on the LR image reconstruction error. + +random-sampled kernel $k_{p}^{t}$ from the RKS module as follows, + +$$ +\log p \left(\boldsymbol {\phi} _ {\boldsymbol {k}} ^ {t - 1} \mid \boldsymbol {k} _ {p} ^ {t}\right) = - \| \mathrm {G} _ {\boldsymbol {k}} \left(\boldsymbol {\phi} _ {\boldsymbol {k}} ^ {t - 1}\right) - \boldsymbol {k} _ {p} ^ {t} \| _ {F} ^ {2}. \tag {11} +$$ + +By combining (10) and (11), the network parameters $\phi_{k}^{t - 1}$ can be updated as follows, + +$$ +\begin{array}{l} \phi_ {\boldsymbol {k}} ^ {t} = \phi_ {\boldsymbol {k}} ^ {t - 1} + \frac {\delta^ {2}}{2} \frac {\partial \log p (\phi_ {\boldsymbol {k}} ^ {t - 1} | \boldsymbol {x} ^ {t - 1} , \boldsymbol {y})}{\partial \phi_ {\boldsymbol {k}} ^ {t - 1}} \\ + \delta \frac {\partial \log p \left(\phi_ {\boldsymbol {k}} ^ {t - 1} \mid \boldsymbol {k} _ {p} ^ {t}\right)}{\partial \phi_ {\boldsymbol {k}} ^ {t - 1}}, \tag {12} \\ \end{array} +$$ + +where the second term is the data-consistency update, the third term is the additional update based on the random kernel $\pmb{k}_p^t$ . + +It has been proved to be effective that the random noise-based disturbance can prevent being trapped into bad local modes for the variable update in Langevin dynamics [2, 32, 48, 51]. More details of Langevin dynamics refer to the supplementary material. At this stage, the random kernel sample from the RKS module can be regarded as the random "noise" for the $\phi_{k}^{t - 1}$ update. Eq. (12) can be reformulated as follows, + +$$ +\phi_ {k} ^ {t} = \phi_ {k} ^ {t - 1} + \frac {\delta^ {2}}{2} \frac {\partial \log p \left(\phi_ {k} ^ {t - 1} \mid \boldsymbol {x} ^ {t - 1} , \boldsymbol {y}\right)}{\partial \phi_ {k} ^ {t - 1}} + \zeta_ {\phi_ {k}} ^ {t - 1}, \tag {13} +$$ + +where $\zeta_{\phi_k}^{t - 1} = \frac{\partial\log p(\phi_k^{t - 1}|k_p^t)}{\partial\phi_k^{t - 1}}$ denotes the parameters correlated Langevin dynamics disturbance. + +The pipeline of our DKP at the $t^{th}$ iteration is given in Algorithm 1. The whole DKP model is implemented in a plug-and-play style, in which training in advance is not required. Besides, the random kernels from the RKS module are self-adaptively sampled through the MCMC simulation, without the need of labeled training data. We should also note that the DKP model only brings negligible runtime and memory cost in applications, as the adopted network $\mathbf{G}_k$ is typically lightweight. This leads to high flexibility and low computational complexity. These three merits promise our DKP the convenience of being applied to + +# Algorithm 1: The proposed DKP model. + +1 Given: $x^{t - 1},y$ and $\phi_{k}^{t - 1}$ +2 % Random Kernel Sampling (RKS) Module +3 Sample random kernels $\{k_r^l\}_{l = 1}^L$ via MC. +4 for $l\gets 0,1,\ldots ,L$ do + +$$ +\begin{array}{c c} \mathbf {5} & \omega^ {l} = \frac {1}{\mathcal {L} _ {M C M C} ^ {l}}, \mathcal {L} _ {M C M C} ^ {l} = \| \boldsymbol {y} - (\boldsymbol {x} ^ {t - 1} \otimes \boldsymbol {k} _ {r} ^ {l}) \downarrow_ {s} \| _ {F} ^ {2} + \delta \end{array} +$$ + +6 end + +7 $\pmb{k}_p^t = \frac{1}{L}\sum_{l = 1}^L\omega^l\pmb{k}_r^t$ + +8 % Prior Kernel Estimation (PKE) Module + +$$ +\phi_ {k} ^ {t} = \phi_ {k} ^ {t - 1} + \frac {\delta^ {2}}{2} \frac {\partial \log p \left(\phi_ {k} ^ {t - 1} | \boldsymbol {x} ^ {t - 1} , \boldsymbol {y}\right)}{\partial \phi_ {k} ^ {t - 1}} + \delta \frac {\partial \log p \left(\phi_ {k} ^ {t - 1} | \boldsymbol {k} _ {p} ^ {t}\right)}{\partial \phi_ {k} ^ {t - 1}} +$$ + +10 Output: $\pmb{k}^t = \mathrm{G}_k(\phi_k^t)$ . + +![](images/33ffb3936f668692c79d7b7e5149db888b139a49aa0a790243c3d47cf877110e.jpg) +Figure 3. The overview of our DKP-based BSR method. + +the existing image restoration approaches, including the untrained DIP model and off-the-shelf pre-trained diffusion model, which will be detailed in the next section. + +# 4. DKP-based BSR Methods + +# 4.1. Pipeline + +The overview of the proposed DKP-based BSR method is illustrated in Fig. 3. The DKP model (gray box), including RKS module (blue box), PKE module (lilac box), and IR model (red box) alternatively optimize the blur kernel and refine the HR image, respectively. For each iteration, the estimated HR image $\boldsymbol{x}^{t-1}$ and LR image $\boldsymbol{y}$ are first fed to RKS module $f_{\mathrm{RKS}}$ to generate kernel prior + +$$ +\boldsymbol {k} _ {p} ^ {t} = f _ {\mathrm {R K S}} \left(\boldsymbol {x} ^ {t - 1}, \boldsymbol {y}\right), \tag {14} +$$ + +where $\pmb{x}^{t - 1}$ denotes the estimated HR image from the last IR model output. Then, the kernel prior $\pmb{k}_p^t$ will be assigned to the PKE module $f_{\mathrm{PKE}}$ , which estimates kernel as follows, + +$$ +\boldsymbol {k} ^ {t} = f _ {\mathrm {P K E}} \left(\boldsymbol {x} ^ {t - 1}, \boldsymbol {y}, \boldsymbol {k} _ {p} ^ {t}\right), \tag {15} +$$ + +where $\pmb{k}^t$ is the estimated kernel at the $t^{th}$ kernel estimation iteration, which will be assigned to the IR model. The $t^{th}$ HR image $\pmb{x}^t$ can be estimated by the IR model as follows + +$$ +\boldsymbol {x} ^ {t} = f _ {\mathrm {I R}} \left(\boldsymbol {x} ^ {t - 1}, \boldsymbol {y}, \boldsymbol {k} ^ {t}\right), \tag {16} +$$ + +where $f_{\mathrm{IR}}$ denotes the adopted IR model. In this paper, two representative IR models, DIP [44] and diffusion model [14], are applied to evaluate the DKP-based BSR solutions, referring to DIP-DKP and Diff-DKP, which are introduced in the sequel. + +Algorithm 2: The proposed DIP-DKP. +1 Given: $y,\phi_x^0,\phi_{DKP}^0,x^0 = G_x(\phi_x^0)$ +2 for $t\gets 0,I,\dots,T - I$ do +3 $\%$ DKP-based kernel estimation stage +4 $\phi_{DKP}^{t + 1} = \phi_{DKP}^{t} + \frac{\delta^{2}}{2}\frac{\partial\log p(\phi_{DKP}^{t}|x^{t},y)}{\partial\phi_{DKP}^{t}} +\delta \frac{\partial\log p(\phi_{DKP}^{t}|k_{p}^{t})}{\partial\phi_{DKP}^{t}}$ +5 $\pmb {k}^{t + 1} = \mathrm{G}_{DKP}(\phi_{DKP}^{t + 1})$ +6 $\%$ DIP-based image restoration stage +7 $\phi_x^{t + 1} = \phi_x^t +\gamma_x^t\frac{\partial\log p(\phi_x^t|y,k^t)}{\partial\phi_x^t}$ +8 $\pmb{x}^{t + 1} = \mathbf{G}_{\pmb{x}}(\pmb{\phi}_{\pmb{x}}^{t + 1})$ +9 end +10 Output: $x^T,k^T$ + +# 4.2. The proposed DIP-DKP + +DIP-based Image Restoration. DIP [44] is designed for capturing low-level image statistics, and estimates HR image $\boldsymbol{x} = \mathrm{G}_{\boldsymbol{x}}(\boldsymbol{z}_{\boldsymbol{x}}, \phi_{\boldsymbol{x}})$ from a fixed random noise input $z_{x}$ (we omit $z_{x}$ in the rest of this paper for demonstration convenience). A typical formulation of DIP-based BSR methods [25, 34] is given as follows + +$$ +\left\{ \begin{array}{l} \phi_ {x} ^ {*}, \phi_ {k} ^ {*} = \underset {\phi_ {x}, \phi_ {k}} {\arg \min } \| \boldsymbol {y} - \\ \quad \left(\mathrm {G} _ {\boldsymbol {x}} \left(\phi_ {\boldsymbol {x}}\right) \otimes \mathrm {G} _ {\boldsymbol {k}} \left(\phi_ {\boldsymbol {k}}\right)\right) \downarrow_ {s} \| _ {F} ^ {2}, \\ \boldsymbol {x} ^ {*} = \mathrm {G} _ {\boldsymbol {x}} \left(\phi_ {\boldsymbol {x}} ^ {*}\right), \boldsymbol {k} ^ {*} = \mathrm {G} _ {\boldsymbol {k}} \left(\phi_ {\boldsymbol {k}} ^ {*}\right). \end{array} \right. \tag {18} +$$ + +Double-DIP [34] and FKP-DIP [25] have exploited the effectiveness towards the BSR problem. However, the kernel prior of $\mathrm{G}_k(\phi_k^*)$ either adopts the untrained network with limited performances on the kernel estimation [34], or pre-trained kernel network, referring to FKP [25], that requests supervised training in advance. As shall be shown in experiments, pre-trained networks do not perform well to generate reasonable kernel estimations when the kernel categories vary. + +Proposed DIP-DKP. We replace the untrained or pretrained networks for kernel priors in the existing DIP-based alternative framework by the proposed DKP model, which we refer to as DIP-DKP. The objective of our proposed DIP-DKP can be formulated as follows, + +$$ +\left\{ \begin{array}{l} \phi_ {\boldsymbol {x}} ^ {*}, \phi_ {D K P} ^ {*} = \underset {\phi_ {\boldsymbol {x}}, \phi_ {D K P}} {\arg \min } \| \boldsymbol {y} - \left(\mathrm {G} _ {D K P} \left(\phi_ {D K P}\right) \otimes \right. \\ \quad \left. \mathrm {G} _ {\boldsymbol {x}} \left(\phi_ {\boldsymbol {x}}\right)\right) \downarrow_ {s} \| _ {F} ^ {2} + \| \mathrm {G} _ {D K P} \left(\phi_ {D K P}\right) - \boldsymbol {k} _ {p}) \| _ {F} ^ {2}, \\ \boldsymbol {x} ^ {*} = \mathrm {G} _ {\boldsymbol {x}} \left(\phi_ {\boldsymbol {x}} ^ {*}\right), \boldsymbol {k} ^ {*} = \mathrm {G} _ {D K P} \left(\phi_ {D K P} ^ {*}\right), \end{array} \right. \tag {20} +$$ + +where $\mathrm{G}_{DKP}(\phi_{DKP})$ is the kernel network of the proposed DKP model. + +The overall solution procedure of the proposed DIP-DKP is given in Algorithm 2. At each $t^{th}$ iteration, the kernel $\pmb{k}^t$ is estimated in the DKP-based kernel estimation stage and then is assigned to the DIP model for HR image + +Algorithm 3: The proposed Diff-DKP. +1 Given: $\pmb{y}$ $\phi_{DKP}^{T},S_{\theta}$ and $\pmb{x}_T\sim \mathcal{N}(\pmb {0},\pmb {I})$ +2 for $t\gets T,T - I,\dots ,1$ do +3 $\%$ Diffusion-based image restoration process +4 $\pmb{x}_{0|t} = \frac{1}{\sqrt{\alpha t}} (\pmb{x}_t - \mathcal{S}_\theta (\pmb{x}_t,t)\sqrt{1 - \overline{\alpha}^t})$ +5 $\%$ DKP incorporated data consistency refinement +6 $\phi_{DKP}^{t - 1} = \phi_{DKP}^{t} + \frac{\delta^{2}}{2}\frac{\partial\log p(\phi_{DKP}^{t}|\pmb{x}_{0|t},\pmb{y})}{\partial\phi_{DKP}^{t}} +\delta \frac{\partial\log p(\phi_{DKP}^{t}|\pmb{k}_{p}^{t})}{\partial\phi_{DKP}^{t}}$ +7 $\pmb{k}^{t - 1} = G_{DKP}(\phi_{DKP}^{t - 1})$ +8 $\hat{\pmb{x}}_{0|t} = \pmb{x}_{0|t} + \gamma_{\pmb{x}}^{t}\frac{\partial\log p(\pmb{x}_{0|t}|\pmb{y},\pmb{k}^{t - 1})}{\partial\pmb{x}_{0|t}}$ +9 $\pmb{x}_{t - 1}\sim p(\pmb{x}_{t - 1}|\pmb{x}_t,\hat{\pmb{x}}_{0|t})$ +10 end +11 Output: $\pmb{x}_0,\pmb{k}^0$ + +restoration in the forward propagation. In the back propagation, the parameters of DIP and DKP, i.e., $\phi_{x}$ and $\phi_{DKP}$ , are updated while solving the BSR problem via an unsupervised inference. With DKP, DIP-DKP realizes an adaptive kernel learning along the convergence trajectory of the BSR objective function, enabling accurate and dynamic kernel estimation. Therefore, without expensive labeled data and long training time in advance, DIP-DKP can estimate HR image and blur kernel simultaneously in a plug-and-play style. + +# 4.3.Diff-DKP + +Original DDPM Inference Process. Denoising diffusion probabilistic models (DDPM) [14] defines a T-step forward process to add noise to data and a T-step reverse process to restore desired data from the noise. When an off-the-shelf DDPM $S_{\theta}$ is applied to solve image restoration problem, the reverse process is implemented as inference process to estimate the high quality image as follows, + +$$ +\left\{ \begin{array}{l} \boldsymbol {x} _ {0 | t} = \frac {1}{\sqrt {\bar {\alpha} ^ {t}}} \left(\boldsymbol {x} _ {t} - S _ {\boldsymbol {\theta}} \left(\boldsymbol {x} _ {t}, t\right) \sqrt {1 - \bar {\alpha} ^ {t}}\right), \\ \boldsymbol {x} _ {t - 1} \sim p \left(\boldsymbol {x} _ {t - 1} \mid \boldsymbol {x} _ {t}, \boldsymbol {x} _ {0 | t}\right), \end{array} \right. \tag {21} +$$ + +where $\pmb{x}_{0|t}$ denotes the estimated HR image $\pmb{x}_0$ at the $t^{th}$ step, and $\overline{\alpha}^t$ is the hyper-parameter. To ensure that HR images $\pmb{x}_0 \sim q(\pmb{x})$ can be reconstructed from random noise $\pmb{x}_T \sim \mathcal{N}(\pmb{0},\pmb{I})$ , the existing methods typically re-train [38] or fine-tune [50] the DDPM model via supervised learning on LR-HR datasets, or provide ground truth kernel [46] to enroll task-specific knowledge for convergence guarantee. However, the performance of DDPM is unstable, even when trained by a large number of labeled dataset. + +Proposed Diff-DKP. The instability of DDPM mainly comes from the training process that involves multiple image processing tasks. In this case, the off-the-shelf diffusion model cannot concentrate on BSR objective, thus leading to + +image distortion and content mismatch. To alleviate this issue, the proposed Diff-DKP incorporates the DKP model to provide task-specific data-consistency knowledge on the basis of the vanilla DDPM reverse iterations. Specifically, an external DKP incorporated data consistency refinement of $\pmb{x}_{0|t}$ is inserted between (21) and (22), given by + +$$ +\hat {\boldsymbol {x}} _ {0 | t} = \boldsymbol {x} _ {0 | t} + \gamma_ {\boldsymbol {x}} ^ {t} \frac {\partial \log p (\boldsymbol {x} _ {0 | t} | \boldsymbol {y} , \boldsymbol {k} ^ {t})}{\partial \boldsymbol {x} _ {0 | t}}, \tag {23} +$$ + +where $\gamma_{\pmb{x}}^{t}$ is the update step, and + +$$ +\log p \left(\boldsymbol {x} _ {0 | t} \mid \boldsymbol {y}, \boldsymbol {k} ^ {t}\right) = - \| \boldsymbol {y} - \left(\boldsymbol {x} _ {0 | t} \otimes \boldsymbol {k} ^ {t}\right) \downarrow_ {s} \| _ {F} ^ {2}, \tag {24} +$$ + +which enables the inference process to converge to the right direction along with the data-consistent solution. + +The overview of the Diff-DKP algorithm is presented in Algorithm 3. Let $t = T, T - 1, \ldots, 1$ denote the index of the diffusion reverse step. At each step, the diffusion model first estimates the $x_{0|t}$ . Then, the DKP model adaptively generates kernel prior with respect to the latest $x_{0|t}$ , while $x_{0|t}$ is further updated with respect to the data consistency Eq. (24), thus, ensuring the inference process is underlying the BSR objective. It is noteworthy to point out that the parameters of the diffusion model are fixed and only the parameters of lightweight kernel estimator network are optimized in the inference process. + +In this way, the off-the-shelf diffusion model plays the role of HR image estimator, while the estimated HR image is further refined by the BSR task specific prior knowledge, referring to Eq. (23). Different from those methods that incorporate prior knowledge of BSR task via supervised re-training/fine-tuning, Diff-DKP behaves a plug-and-play scheme, thus without data demands and training cost before implementation. + +# 5. Experiments + +# 5.1. Experimental Setup + +Data Preparation. Following the widely adopted kernel assumption [25, 35, 45, 51], we conduct the experiments on anisotropic Gaussian kernels and motion kernels, which are shown in Fig. 4. The kernel sizes are set to $(4s + 3)\times (4s + 3)$ . For the Gaussian kernel, the width ranges are set to $[0.175s, 2.5s]$ , and the rotation angle range is set to $[0,\pi]$ , with a scale factor $s = 4$ , respectively. For the motion kernel, we adopt random motion kernel generation method proposed by [22], which simulates realistic and complex blur kernels from random trajectories. Detailed formulations of Gaussian and motion kernels are given in the supplementary material. We synthesize LR images with random kernels with respect to Eq. (1) for testing data based on five popular public benchmark datasets, including Set5 [4], Set14 [52], BSD100 [30], Urban100 [15] and RealSRSet [23]. We compare these kernels in terms of the peak + +signal to noise ratio (PSNR), and compare HR images in terms of PSNR and structural similarity (SSIM) [47]. + +Comparison Methods. The proposed DIP-DKP and Diff-DKP are compared with existing baselines including: Double-DIP [34], DIP-FKP [25], DASR [45], BSRDM [51], DCLS [29], DARM [58] and DiffBSR [26]. Specifically, Double-DIP tends to provide kernel priors by training a FCN network only with respect to the LR image restoration error. DIP-FKP incorporates the FKP model as kernel prior which is pre-trained on kernel datasets. KernelGAN+ZSSR and DARM are self-supervised and train an intergenerative adversarial network (GAN) to estimate the blur kernel. BSRDM formulates an elaborate degradation modelling on noise and kernel as handcrafted priors. DASR is a representative end-to-end method that is pre-trained on DIV2K [1] and Flickr2K [43] HR image datasets. DiffBSR is fine-tuned on BSR labeled datasets before applied to estimate HR images. + +Implementation and Hyper-parameters. The adopted kernel estimation network $\mathbf{G}_k$ of PKE module in this paper is a three-layer fully-connected network (FCN). The adopted DIP model follows the original settings in [44], and the diffusion model is the vanilla version [14] that is trained on ImageNet [6]. The number of sampling times in the MCMC simulation $L$ is the only hyper-parameter in the proposed DKP model. The hyper-parameter tuning results are given in Table 1. It is explicit that the performance reaches equilibrium around $L \in [4,8]$ . To balance the efficiency and effectiveness, we set $L = 5$ in this paper. + +# 5.2. Comparison with State-of-the-Arts + +Evaluation on Gaussian Kernel Scenario. Quantitative evaluation results on four datasets with scale factors $s = 4$ are presented in the upper half part of Table 2. We can see that the proposed DIP-DKP and Diff-DKP achieve the second and the best results on all datasets. We note that DIP-DKP only realizes slightly higher performance than the existing state-of-the-art (SotA) methods, while Diff-DKP achieves significantly better performances. This recalls our demonstrations in Section 4: DIP-DKP is totally trained while solving from scratch, and the DKP model plays the role of providing better convergence guarantee. Diff-DKP utilizes the DKP model to guide the well-trained diffusion model with fruitful data priors to converge to BSR task for better HR image restoration performances. In Table 3, we further show that our DKP model achieves the accurate ker- + +![](images/e17616ecb84820f3c50d57a76c2eab77ff14b9093bb41a43ffdba1ebef265c94.jpg) +Figure 4. The visualization of the adopted blur kernels. + +![](images/734751729182f53bed5f8588502ac7611eefe08dd77ef41b8154c2eaf7769b59.jpg) + +Table 1. Average image PSNR performance of the proposed DIP-DKP and Diff-DKP on Set5 [4] on the Gaussian kernel scenario. + +
MethodsL=0L=2L=4L=6L=8L=10L=15
DIP-DKP (Ours)20.9927.1228.4428.5728.5228.2928.03
Diff-DKP (Ours)21.9728.9529.4029.4729.7629.6729.26
+ +Table 2. Average PSNR/SSIM of different methods on public datasets that are synthesized by the random Gaussian/Motion kernels with $s = 4$ . The best and second best results are highlighted in red and blue colors, respectively. + +
MethodKernelSet5 [4]Set14 [52]BSD100 [30]Urban100 [15]
Double-DIP [34]20.99/0.557818.31/0.442618.57/0.381518.15/0.4491
DASR [45]27.37/0.785925.43/0.659125.11/0.612922.88/0.6448
DIP-FKP [25]27.77/0.791425.65/0.676425.15/0.635422.89/0.6327
BSRDM [51]Gaussian kernel scenario27.81/0.802925.35/0.685925.61/0.652622.36/0.6601
DCLS [29]27.50/0.794825.68/0.663925.34/0.616922.92/0.6475
DiffBIR [26]25.15/0.646823.01/0.593523.88/0.558621.94/0.5657
DARM [58]26.25/0.681824.19/0.618724.29/0.589822.14/0.5967
DIP-DKP (Ours)28.03/0.803925.98/0.687825.66/0.653123.24/0.6644
Diff-DKP (Ours)29.44/0.859226.76/0.740026.63/0.705723.92/0.6875
Double-DIP [34]18.92/0.451020.41/0.484719.00/0.375715.42/0.2932
DASR [45]24.21/0.725224.16/0.614522.47/0.583620.24/0.5478
DIP-FKP [25]24.61/0.737124.21/0.622722.80/0.588020.33/0.5572
BSRDM [51]Motion kernel scenario24.01/0.709823.56/0.600922.62/0.579120.40/0.5494
DCLS [29]24.78/0.732324.38/0.621122.74/0.592220.49/0.5534
DiffBIR [26]23.63/0.636723.59/0.604322.35/0.578420.14/0.5347
DARM [58]24.23/0.726923.95/0.629422.48/0.583020.58/0.5595
DIP-DKP (Ours)25.30/0.741724.52/0.643423.02/0.613621.24/0.5667
Diff-DKP (Ours)28.74/0.831326.03/0.671924.10/0.628722.26/0.5862
+ +nel estimation with higher kernel PSNR. + +Evaluation on Motion Kernel Scenario. The lower half part of Table 2 shows the simulation results on the motion kernel scenario. The supervised learning methods, i.e., DASR and DiffBIR, are re-trained/fine-tuned on motion kernel degraded HR image datasets. DIP-FKP is retrained on the motion kernel dataset. The proposed DIP-DKP and Diff-DKP show significantly better performance on the motion kernel scenario, which validates that the proposed DKP model has good generalization ability towards different kernel categories. Specifically, Diff-DKP presents stable PSNR/SSIM scores when being applied to estimate motion kernels, while the rest suffer significant performance drop. This indicates that the proposed DKP is expected to handle kernel varying tasks. + +Visual Results. The visual results of different methods on synthetic and real-world images are shown in Fig. 5. We can see that 1) In the case of Gaussian kernel, all methods are capable of producing satisfactory deblurring results, while our DIP-DKP and Diff-DKP yield better results with more accurate kernel estimation. 2) In the case of motion kernel, certain distortion on the estimated kernel can be seen in FKP-DKP and BSRDM fail to estimate motion kernel. Meanwhile, our DIP-DKP and Diff-DKP achieve approximately accurate motion kernel estimation. 3) In the case of real image, both DIP-FKP and BSRDM estimate the Gaussian-like kernels, whereas our DIP-DKP and Diff-DKP tend to estimate the non-Gaussian kernels. This verifies that an adaptive and flexible kernel estimation discipline is learned by our DKP model, which may fit the real-world applications better. + +Table 3. Average PSNR/SSIM of images and PSNR of kernels on Set14 [52] with $s = 4$ . The best and second best results are highlighted in red and blue colors, respectively. + +
MethodKernelKernel PSNRImage PSNR/SSIM
DIP-DKP without RKS37.9218.77/0.4227
Diff-DKP without RKS40.9317.33/0.3408
Double-DIP [34]Gaussian50.6218.31/0.4426
DIP-FKP [25]kernel54.4625.65/0.6764
BSRDM [51]scenario55.3825.35/0.6859
DIP-DKP (Ours)56.2025.98/0.6878
Diff-DKP (Ours)56.7626.76/0.7400
DIP-DKP without RKS34.9218.19/0.4223
Diff-DKP without RKS34.7817.65/0.3513
Double-DIP [34]Motion35.5220.41/0.4847
DIP-FKP [25]kernel37.5224.21/0.6227
BSRDM [51]scenario37.8823.56/0.6009
DIP-DKP (Ours)39.3324.52/0.6434
Diff-DKP (Ours)40.3726.03/0.6719
+ +# 5.3. Ablation Studies + +Ablation study of RKS module. The ablation studies are carried on the MCMC sampling of kernel priors. "Without RKS" denotes that the adopted DKP updates the kernel network only by the data-consistency term without the learned kernel prior. In Fig. 6 (left), it can be seen that the estimated kernels without RKS have significant distortion, leading to remarkable PSNR drop of the estimated HR image, while DIP-DKP can estimate Gaussian kernels precisely with respect to the ground truth (with red frame). Fig. 6 (right) shows that the accurate motion kernel estimation no longer exists when the RKS module is absent. It is thus obvious that without the kernel prior learned from the MCMC process, the Diff-DKP fails to converge to a rational motion kernel estimation. The average kernel and image results are shown in Table 3. Without kernel prior learned from the RKS module, the kernel estimation performances of DKP- + +![](images/51381e48c7b82f12ffa64ca0f6ec5f1bf5b357e0570436240c9e19b7bea6feb5.jpg) + +![](images/6fe0a4fbb7052d95cf117db8fc3a770d279cfd562b59d9b4159bfa939140f05a.jpg) + +![](images/93da8cdbaedea7e10231537944da49d9cba747400f7f03f6564760528175ae3a.jpg) + +![](images/fd453a0940f59143122da358bb162cc8e87bd01ce860b1da7493f6950ee3f68c.jpg) + +![](images/f5d8382d84cccc79b62efcfb64ee7224e2388c883d7311572724fcab0e44af20.jpg) + +![](images/1f5ba65f5734f6709d5b9df45412ddb776b1b2ecf2168c4de1a0ac93e89b3e17.jpg) + +![](images/09a252daf9dc4b8900e2b9de6087a6ff342b922738b361011620a5119fc20b8e.jpg) + +![](images/f3a3cd032b95adefc909fee52f059087439b88452adc7657da88e042eebd6738.jpg) + +![](images/ce3551ad7534d0ccc01bcc62ffc84f5e89bc1a14126b035e0575fd7c791ca54b.jpg) + +![](images/52e589b8f5a7a0c6603398b67349f69cc93b1448ecadb586b5eee1d8cc1a16e2.jpg) + +![](images/30812206585b82b02566959cc728f1b819eb7e8da33302746a414dc44b8463f3.jpg) + +![](images/96145f05ad97ff295b0d4ff1f4c08dcc6a2c9ea864dfe2e7a6bb2dfec5c0aaac.jpg) + +![](images/e9e107ff03f3c40c994c36439711e4d805a8958f16c19221524f8b55e6956fb6.jpg) + +![](images/29d07cf3a2031de882d1c1f0fcc0dae3b2ca02b5858e19a44cb2d9d95d15df70.jpg) + +![](images/197ced1e62b01a2b9e9887f25a208d29d992ee2b50a7d3912a9f3005e5eb4a76.jpg) +$\mathrm{LR}(\times 4)$ + +![](images/ec6891048cb6675c0de44197b91d426539a2615e80fea325230bcc13a897df73.jpg) +DASR +27.42/27.74/- + +![](images/f04ff3c280e1a78fba329d678d5e36337c5003aadb5313bd385cefca4583cdc2.jpg) +DRAM +26.69/26.84/- + +![](images/d2d71d944caebd8064bbe29f6c79c2982956524cf98e277970c2a3f431a21a18.jpg) +FKP-DIP +27.80/28.95/- + +![](images/4a56c861d1cc9720ff0203c8caa3358c9f0550d2f626e41d21ffe6d577efbc19.jpg) +BSRDM +27.60/29.14/- +DIP-DKP(ours) + +![](images/02d0f327e96b0d90d885714b522364b12286274cb2ed518bcb489ed14a7a5a86.jpg) +28.07/29.45/- + +![](images/5c6a086412b71b626a213b9aee06a0c66f89a5c22ea99350d6310c5fa404b291.jpg) +Diff-DKP(ours) +29.38/29.89/- + +![](images/4e52cfe17c2e44f9daf2a30e95937eeefb07f88e212e3a0ed62a09e8263c4eae.jpg) +PSNR (dB) +(a) BSD100 "040" +Figure 6. The intermediate results of DIP-DKP, Diff-DKP and their no RKS module versions over iterations on two test images. + +![](images/df228a524780145f17249603a2e92cd2ed8548707b36487d01ad17ba7aaf7fa7.jpg) +Figure 5. Visual results of different methods on public datasets for scale factor 4. Estimated/ground-truth kernels are shown on the top left. +(b) Set14 "monarch" + +Table 4. The ablation of PKE module. (Set5, x4, image PSNR) + +
Layers\Units10100100010000
113.7523.5728.9328.24
313.6128.9728.4828.35
513.3028.8128.5226.65
713.8628.3028.5427.93
+ +based BSR methods have a significant performance drop, leading to poor image restoration performance as well. + +Ablation study of PKE module. Since PKE essentially estimates blur kernels on the basis of the random kernel priors and LR observations, thus it is indispensable and we conduct ablation study on the different structures of kernel network in PKE module in Table 4. We find that the kernel network performs well when it has 3-7 layers with 100-1000 units in each layer. This indicates that the kernel network has good generalization-ability for the structure without the necessity of elaborately designing the network. + +# 5.4. Model Size, Runtime and Memory Usage + +The kernel network of our DKP model has a total of $562K$ parameters (FLOPs: $536K$ ) while Double-DIP and DIP-FKP have $641K$ parameters (FLOPs: $600K$ ) and $143K$ parameters (FLOPs: $178K$ ), respectively. The runtime and memory usage of our DIP-DKP on a GeForce RTX 3090 + +GPU for generating a HR image of size $512 \times 512$ are about 92 seconds and 11GB memory, which is comparable with the Double-DIP (91 seconds and 11.2GB) and DIP-FKP (90 seconds and 10.6GB). As for Diff-DKP, the $512 \times 512$ image needs to be divided into four $256 \times 256$ images for restoration, which costs a total of 60 seconds and 4GB memory. Considering that our DIP-DKP and Diff-DKP are unsupervised and plug-and-play, it is reasonable to say that our methods have moderate computational costs. + +Due to the page limitations, more experimental results are given in the supplementary material. + +# 6. Conclusion + +In this paper, we propose a dynamic kernel prior (DKP) model to solve the BSR problem in an unsupervised and pre-training-free paradigm. DKP realizes the rational kernel prior learning from MCMC sampling on random kernel distributions, providing accurate kernel estimation and thus leading to better HR image restoration. DKP can be easily incorporated with existing image restoration model, such as DIP and diffusion model, by replacing their kernel modeling modules or adding as an external kernel prior generator. When applied to solve the BSR problem, DKP is trained while solving the task with respect to the LR image restoration error, enabling no training necessity and labeled data demands. Extensive experiments on Gaussian and motion kernel scenarios with synthetic LR images and real-world images validate that DKP-based methods improve the kernel estimation accuracy significantly and thus lead to superior BSR results. We believe that the concept of using a trainable sampling process to provide adaptive priors will lead to a new direction in solving low-level tasks, aiming to achieve superior performance with modest computational costs in the way of unsupervised inference. + +# References + +[1] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 126-135, 2017. 6 +[2] Dominique Bakry and Michel Émery. Diffusions hypercontractives. In Séminaire de Probabilités XIX 1983/84: Proceedings, pages 177-206. Springer, 2006. 4 +[3] Sefi Bell-Kligler, Assaf Shocher, and Michal Irani. Blind super-resolution kernel estimation using an internal-gan. Advances in Neural Information Processing Systems, 32, 2019. 1, 2 +[4] Marco Bevilacqua, Aline Roumy, Christine Guillemot, and Marie Line Alberi-Morel. Low-complexity single-image super-resolution based on nonnegative neighbor embedding. In British Machine Vision Conference, pages 135-1, 2012. 6, 7 +[5] Hyungjin Chung, Jeongsol Kim, Michael T McCann, Marc L Klasky, and Jong Chul Ye. Diffusion posterior sampling for general noisy inverse problems. arXiv preprint arXiv:2209.14687, 2022. 1, 2 +[6] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009. 6 +[7] Chao Dong, Chen Change Loy, Kaiming He, and Xiaou Tang. Learning a deep convolutional network for image super-resolution. In European conference on computer vision, pages 184-199. Springer, 2014. 2 +[8] Yangyi Dong, Xiaoyun Zhang, Zhixin Wang, Ya Zhang, Siheng Chen, and Yanfeng Wang. Unpaired face restoration via learnable cross-quality shift. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 667-675, 2022. 3 +[9] Netalee Efrat, Daniel Glasner, Alexander Apartsin, Boaz Nadler, and Anat Levin. Accurate blur models vs. image priors in single image super-resolution. In Proceedings of the IEEE International Conference on Computer Vision, pages 2832-2839, 2013. 3 +[10] Yosef Gandelsman, Assaf Shocher, and Michal Irani. "double-dip": Unsupervised image decomposition via coupled deep-image-priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11026-11035, 2019. 2 +[11] Daniel Glasner, Shai Bagon, and Michal Irani. Superresolution from a single image. In 2009 IEEE 12th international conference on computer vision, pages 349-356. IEEE, 2009. 2 +[12] Jinjin Gu, Hannan Lu, Wangmeng Zuo, and Chao Dong. Blind super-resolution with iterative kernel correction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1604-1613, 2019. 1, 2, 3 +[13] Lanqing Guo, Chong Wang, Wenhan Yang, Siyu Huang, Yufei Wang, Hanspeter Pfister, and Bihan Wen. Shadowdiffusion: When degradation prior meets diffusion model for + +shadow removal. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14049-14058, 2023. 1 +[14] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2, 3, 4, 5, 6 +[15] Jia-Bin Huang, Abhishek Singh, and Narendra Ahuja. Single image super-resolution from transformed self-exemplars. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5197-5206, 2015. 6, 7 +[16] Yan Huang, Shang Li, Liang Wang, Tieniu Tan, et al. Unfolding the alternating optimization for blind super resolution. Advances in Neural Information Processing Systems, 33:5632-5643, 2020. 1, 2 +[17] Meiguang Jin, Stefan Roth, and Paolo Favaro. Normalized blind deconvolution. In Proceedings of the European Conference on Computer Vision (ECCV), pages 668-684, 2018. 1 +[18] Jiwon Kim, Jung Kwon Lee, and Kyoung Mu Lee. Accurate image super-resolution using very deep convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1646-1654, 2016. 2 +[19] Kwang In Kim and Younghee Kwon. Single-image superresolution using sparse regression and natural image prior. IEEE transactions on pattern analysis and machine intelligence, 32(6):1127-1133, 2010. 1, 2 +[20] Soo Ye Kim, Hyeonjun Sim, and Munchurl Kim. Koalanet: Blind super-resolution using kernel-oriented adaptive local adjustment. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10611-10620, 2021. 1, 2 +[21] Dilip Krishnan and Rob Fergus. Fast image deconvolution using hyper-laplacian priors. Advances in neural information processing systems, 22, 2009. 1, 2 +[22] Orest Kupyn, Volodymyr Budzan, Mykola Mykhailych, Dmytro Mishkin, and Jií Matas. Deblurgan: Blind motion deblurring using conditional adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8183-8192, 2018. 6 +[23] Yuelong Li, Mohammad Tofighi, Junyi Geng, Vishal Monga, and Yonina C Eldar. Efficient and interpretable deep blind image deblurring via algorithm unrolling. IEEE Transactions on Computational Imaging, 6:666-681, 2020. 6 +[24] Yawei Li, Yuchen Fan, Xiaoyu Xiang, Denis Demandolx, Rakesh Ranjan, Radu Timofte, and Luc Van Gool. Efficient and explicit modelling of image hierarchies for image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18278-18289, 2023. 2 +[25] Jingyun Liang, Kai Zhang, Shuhang Gu, Luc Van Gool, and Radu Timofte. Flow-based kernel prior with application to blind super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10601-10610, 2021. 1, 2, 3, 5, 6, 7 +[26] Xinqi Lin, Jingwen He, Ziyan Chen, Zhaoyang Lyu, Ben Fei, Bo Dai, Wanli Ouyang, Yu Qiao, and Chao Dong. Diffbir: Towards blind image restoration with generative diffusion prior. arXiv preprint arXiv:2308.15070, 2023. 2, 6, 7 + +[27] Jie Liu, Wenjie Zhang, Yuting Tang, Jie Tang, and Gangshan Wu. Residual feature aggregation network for image superresolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2359-2368, 2020. 1 +[28] Zhengxiong Luo, Yan Huang, Shang Li, Liang Wang, and Tieniu Tan. End-to-end alternating optimization for blind super resolution. arXiv preprint arXiv:2105.06878, 2021. 2 +[29] Ziwei Luo, Haibin Huang, Lei Yu, Youwei Li, Haoqiang Fan, and Shuaicheng Liu. Deep constrained least squares for blind image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17642-17652, 2022. 1, 2, 6, 7 +[30] David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, pages 416-423. IEEE, 2001. 6, 7 +[31] Tomer Michaeli and Michal Irani. Nonparametric blind super-resolution. In Proceedings of the IEEE International Conference on Computer Vision, pages 945-952, 2013. 2 +[32] Radford M Neal et al. Mcmc using hamiltonian dynamics. Handbook of markov chain monte carlo, 2(11):2, 2011. 4 +[33] Daniele Perrone and Paolo Favaro. A clearer picture of total variation blind deconvolution. IEEE transactions on pattern analysis and machine intelligence, 38(6):1041-1055, 2015. 1, 2 +[34] Dongwei Ren, Kai Zhang, Qilong Wang, Qinghua Hu, and Wangmeng Zuo. Neural blind deconvolution using deep priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3341-3350, 2020. 1, 2, 5, 6, 7 +[35] Gernot Riegler, Samuel Schulter, Matthias Ruther, and Horst Bischof. Conditioned regression models for non-blind single image super-resolution. In Proceedings of the IEEE International Conference on Computer Vision, pages 522-530, 2015. 6 +[36] Leonid I Rudin, Stanley Osher, and Emad Fatemi. Nonlinear total variation based noise removal algorithms. Physica D: nonlinear phenomena, 60(1-4):259-268, 1992. 2 +[37] Marshall F Tappen Bryan C Russell and William T Freeman. Exploiting the sparse derivative prior for super-resolution and image demosaicing. In Proceedings of the Third International Workshop Statistical and Computational Theories of Vision, pages 1-28, 2003. 2 +[38] Chitwan Sahara, Jonathan Ho, William Chan, Tim Salimans, David J Fleet, and Mohammad Norouzi. Image super-resolution via iterative refinement. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(4):4713-4726, 2022. 2, 5 +[39] Assaf Shocher, Nadav Cohen, and Michal Irani. "zero-shot" super-resolution using deep internal learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3118-3126, 2018. 1, 2 +[40] Jiaming Song, Arash Vahdat, Morteza Mardani, and Jan Kautz. Pseudoinverse-guided diffusion models for inverse + +problems. In International Conference on Learning Representations, 2022. 1, 2 +[41] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. Advances in neural information processing systems, 32, 2019. 3 +[42] Jian Sun, Zongben Xu, and Heung-Yeung Shum. Image super-resolution using gradient profile prior. In 2008 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 2 +[43] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 114-125, 2017. 6 +[44] Dmitry Ulyanov, Andrea Vedaldi, and Victor Lempitsky. Deep image prior. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 9446-9454, 2018. 2, 3, 4, 5, 6 +[45] Longguang Wang, Yingqian Wang, Xiaoyu Dong, Qingyu Xu, Jungang Yang, Wei An, and Yulan Guo. Unsupervised degradation representation learning for blind superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10581-10590, 2021. 2, 6, 7 +[46] Yinhuai Wang, Jiwen Yu, and Jian Zhang. Zero-shot image restoration using denoising diffusion null-space model. arXiv preprint arXiv:2212.00490, 2022. 2, 5 +[47] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6 +[48] Max Welling and Yee W Teh. Bayesian learning via stochastic gradient Langevin dynamics. In Proceedings of the 28th international conference on machine learning (ICML-11), pages 681–688, 2011. 4 +[49] Yu-Syuan Xu, Shou-Yao Roy Tseng, Yu Tseng, Hsien-Kai Kuo, and Yi-Min Tsai. Unified dynamic convolutional network for super-resolution with variational degradations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12496-12505, 2020. 1, 2 +[50] Xunpeng Yi, Han Xu, Hao Zhang, Linfeng Tang, and Jiayi Ma. Diff-retinex: Rethinking low-light image enhancement with a generative diffusion model. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12302-12311, 2023. 1, 2, 5 +[51] Zongsheng Yue, Qian Zhao, Jianwen Xie, Lei Zhang, Deyu Meng, and Kwan-Yee K. Wong. Blind image superresolution with elaborate degradation modeling on noise and kernel. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2128-2138, 2022. 1, 2, 3, 4, 6, 7 +[52] Roman Zeyde, Michael Elad, and Matan Protter. On single image scale-up using sparse-representations. In International conference on curves and surfaces, pages 711-730. Springer, 2010. 6, 7 + +[53] Kai Zhang, Wangmeng Zuo, and Lei Zhang. Learning a single convolutional super-resolution network for multiple degradations. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3262-3271, 2018. 1, 2, 3 +[54] Kai Zhang, Luc Van Gool, and Radu Timofte. Deep unfolding network for image super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3217-3226, 2020. 1 +[55] Kai Zhang, Yawei Li, Wangmeng Zuo, Lei Zhang, Luc Van Gool, and Radu Timofte. Plug-and-play image restoration with deep denoiser prior. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(10):6360-6376, 2021. 3 +[56] Yulun Zhang, Kunpeng Li, Kai Li, Lichen Wang, Bineng Zhong, and Yun Fu. Image super-resolution using very deep residual channel attention networks. In Proceedings of the European conference on computer vision (ECCV), pages 286-301, 2018. 1, 2 +[57] Zixiang Zhao, Haowen Bai, Yuzhhi Zhu, Jiangshe Zhang, Shuang Xu, Yulun Zhang, Kai Zhang, Deyu Meng, Radu Timofte, and Luc Van Gool. Ddfm: denoising diffusion model for multi-modality image fusion. arXiv preprint arXiv:2303.06840, 2023.1, 2 +[58] Hongyang Zhou, Xiaobin Zhu, Jianqing Zhu, Zheng Han, Shi-Xue Zhang, Jingyan Qin, and Xu-Cheng Yin. Learning correction filter via degradation-adaptive regression for blind single image super-resolution. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12365-12375, 2023. 2, 6, 7 +[59] Yuanzhi Zhu, Kai Zhang, Jingyun Liang, Jiezhang Cao, Bihan Wen, Radu Timofte, and Luc Van Gool. Denoising diffusion models for plug-and-play image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1219-1229, 2023. 1, 2 \ No newline at end of file diff --git a/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/images.zip b/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..eb05e43704fc970e6a552feee7d66de35369d66a --- /dev/null +++ b/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdeaf47a97446a96f15e2ad6cab65488ecc5f68eaac575be40b0310da98da8ef +size 639961 diff --git a/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/layout.json b/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..76c33b8bdc615f65948cd9d3ff9364f0fd450d5d --- /dev/null +++ b/2024/A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution/layout.json @@ -0,0 +1,12102 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 50, + 103, + 544, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 103, + 544, + 121 + ], + "spans": [ + { + "bbox": [ + 50, + 103, + 544, + 121 + ], + "type": "text", + "content": "A Dynamic Kernel Prior Model for Unsupervised Blind Image Super-Resolution" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 142, + 205, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 142, + 205, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 142, + 205, + 172 + ], + "type": "text", + "content": "Zhixiong Yang1 \nShuanghui Zhang1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 211, + 143, + 288, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 143, + 288, + 171 + ], + "spans": [ + { + "bbox": [ + 211, + 143, + 288, + 171 + ], + "type": "text", + "content": "Jingyuan Xia" + }, + { + "bbox": [ + 211, + 143, + 288, + 171 + ], + "type": "inline_equation", + "content": "^{1,*}" + }, + { + "bbox": [ + 211, + 143, + 288, + 171 + ], + "type": "text", + "content": " \nZhen Liu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 312, + 143, + 375, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 143, + 375, + 171 + ], + "spans": [ + { + "bbox": [ + 312, + 143, + 375, + 171 + ], + "type": "text", + "content": "Shengxi Li² \nYaowen Fu¹" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 402, + 143, + 487, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 143, + 487, + 172 + ], + "spans": [ + { + "bbox": [ + 402, + 143, + 487, + 172 + ], + "type": "text", + "content": "Xinghua Huang" + }, + { + "bbox": [ + 402, + 143, + 487, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 402, + 143, + 487, + 172 + ], + "type": "text", + "content": " Yongxiang Liu" + }, + { + "bbox": [ + 402, + 143, + 487, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 61, + 184, + 533, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 184, + 533, + 227 + ], + "spans": [ + { + "bbox": [ + 61, + 184, + 533, + 227 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 61, + 184, + 533, + 227 + ], + "type": "text", + "content": "College of Electronic Engineering, National University of Defense Technology, Changsha, China \n" + }, + { + "bbox": [ + 61, + 184, + 533, + 227 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 61, + 184, + 533, + 227 + ], + "type": "text", + "content": "College of Electronic Engineering, Beihang University, Beijing, China \nyzx21@nudt.edu.cn, j.xia10@nudt.edu.cn" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 143, + 255, + 192, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 255, + 192, + 268 + ], + "spans": [ + { + "bbox": [ + 143, + 255, + 192, + 268 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 280, + 290, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 280, + 290, + 602 + ], + "spans": [ + { + "bbox": [ + 46, + 280, + 290, + 602 + ], + "type": "text", + "content": "Deep learning-based methods have achieved significant successes on solving the blind super-resolution (BSR) problem. However, most of them request supervised pretraining on labelled datasets. This paper proposes an unsupervised kernel estimation model, named dynamic kernel prior (DKP), to realize an unsupervised and pre-training-free learning-based algorithm for solving the BSR problem. DKP can adaptively learn dynamic kernel priors to realize real-time kernel estimation, and thereby enables superior HR image restoration performances. This is achieved by a Markov chain Monte Carlo sampling process on random kernel distributions. The learned kernel prior is then assigned to optimize a blur kernel estimation network, which entails a network-based Langevin dynamic optimization strategy. These two techniques ensure the accuracy of the kernel estimation. DKP can be easily used to replace the kernel estimation models in the existing methods, such as Double-DIP and FKP-DIP, or be added to the off-the-shelf image restoration model, such as diffusion model. In this paper, we incorporate our DKP model with DIP and diffusion model, referring to DIP-DKP and Diff-DKP, for validations. Extensive simulations on Gaussian and motion kernel scenarios demonstrate that the proposed DKP model can significantly improve the kernel estimation with comparable runtime and memory usage, leading to state-of-the-art BSR results. The code is available at https://github.com/XYLGroup/DKP." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 624, + 127, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 624, + 127, + 636 + ], + "spans": [ + { + "bbox": [ + 47, + 624, + 127, + 636 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 644, + 287, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 644, + 287, + 669 + ], + "spans": [ + { + "bbox": [ + 46, + 644, + 287, + 669 + ], + "type": "text", + "content": "Deep learning provides a new avenue for solving the blind super-resolution (BSR) problem, which aims to reconstruct" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 674, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 674, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 674, + 287, + 712 + ], + "type": "text", + "content": "Zhixiong Yang and Jingyuan Xia contributed equally to this work (*Corresponding author: Jingyuan Xia). This work is supported by National Natural Science Foundation of China, projects 61921001, 62131020, 62322121 and 62171448, and the NSFDYS of Hunan 2022J110067." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 255, + 547, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 255, + 547, + 388 + ], + "spans": [ + { + "bbox": [ + 304, + 255, + 547, + 388 + ], + "type": "text", + "content": "high-resolution (HR) images from the low-resolution (LR) observations with unknown blur kernels, and is known to be highly non-convex and ill-posed. To alleviate the non-convexity and ill-posedness, most of learning-based BSR methods incorporate image priors via supervised learning based on paired LR-HR samples. However, pre-defined labeled training datasets are expensive, time-consuming, and even not feasible in specific scenarios, such as for high speed targets (e.g., satellites, aircraft) and medical images (e.g., beating heart). Thus, unsupervised learning-based solutions are highly demanded for BSR problem." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 388, + 546, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 388, + 546, + 675 + ], + "spans": [ + { + "bbox": [ + 304, + 388, + 546, + 675 + ], + "type": "text", + "content": "The existing BSR methods can be roughly divided into model-based and learning-based strategies in terms of the priors adopted to provide performance guarantee. Model-based approaches [19, 21, 33, 51] typically adopt hand-designed and explicit constraints as regularizations on image properties, or expert knowledge of the blur kernel. Meanwhile, learning-based BSR methods [12, 16, 20, 27-29, 49, 53, 54, 56] aim to train an end-to-end network with paired LR-HR image samples to leverage data priors for boosting performances. However, these methods highly demand the data and need to undergo throughing pre-training before applications, leading to limited generalization ability towards varying blur kernels. To alleviate this issue, quite a few methods [5, 13, 40, 50, 57, 59] substitute the cumbersome training in advance by a well-trained diffusion model with significantly less fine-tuning samples in an off-the-shelf fashion. On the other side, a slice of works [3, 17, 25, 34, 39, 51] propose to replace the HR image data priors by kernel priors, which are more substantial, economical and efficient to be trained. However, both of these advances are underlying the supervised learning scheme with necessity of training on labeled datasets, still hindering the flexibility and generalization ability towards the BSR tasks with different kernels and unknown HR ground truths." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "type": "text", + "content": "In this paper, we propose a dynamic kernel prior (DKP) generation model that can be plug-in with the majority of image restoration (IR) models, to solve BSR problem in an" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "26046" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 406 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 406 + ], + "type": "text", + "content": "unsupervised way. The proposed DKP model consists of two modules: random kernel sampling (RKS) module and prior kernel estimation (PKE) module. In the RKS module, a Markov Chain Monte Carlo (MCMC) sampling strategy on kernel distributions iteratively generates random kernels as kernel priors, which are then assigned to the PKE module. The PKE module is employed to estimate the blur kernel with respect to the kernel prior generated from the RKS module, the observed LR input and estimated HR image from the IR model. The estimated blur kernel is then assigned to an adopted IR model for the HR image restoration. Along with the alternative solving processes, the MCMC process in RKS module converges to a desired kernel distribution with respect to the LR observation and the estimated HR image to guarantee a rational kernel prior. Meanwhile, a network-based Langevin dynamics (NLD) paradigm is proposed to optimize the kernel estimator in our PKE module with respect to the RKS output kernel prior and the data consistency based on the LR image reconstruction error. The RKS module realizes an unsupervised kernel prior learning. The PKE module achieves promising kernel estimation via the NLD update scheme, which further alleviates the non-convexity and ill-posedness in the view of optimization strategy. In this way, the DKP model is capable of providing the plug-and-play kernel estimation without training in advance on paired LR-HR samples, and is flexible to be applied to the existing IR models for solving the BSR problem." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 409, + 289, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 409, + 289, + 588 + ], + "spans": [ + { + "bbox": [ + 47, + 409, + 289, + 588 + ], + "type": "text", + "content": "Two applications are proposed to validate the feasibility and performance of our DKP model: deep image prior (DIP) [44] and diffusion model [14] adopted as the IR model, referring to DIP-DKP and Diff-DKP, respectively. For the DIP-DKP, we simultaneously optimize the parameters of DIP and DKP models from scratch during the alternative solution process. For the Diff-DKP, the adopted diffusion model is off-the-shelf from [14] and is applied as the fixed HR image restorer. The DKP model is optimized from scratch as well. Extensive simulation results show that the DIP-DKP achieves comparable performance than the existing methods, while the Diff-DKP achieves the state-of-the-art performance in both of Gaussian and motion kernel scenarios. The main contributions are summarized as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 594, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 47, + 594, + 287, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 594, + 287, + 641 + ], + "spans": [ + { + "bbox": [ + 47, + 594, + 287, + 641 + ], + "type": "text", + "content": "- The RKS module is proposed to generate a rational kernel prior from the MCMC sampling on random kernel distributions. This way, an unsupervised kernel prior learning is achieved to substitute the pre-training phase." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 642, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 642, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 642, + 287, + 689 + ], + "type": "text", + "content": "- In PKE module, the NLD is proposed to optimize the kernel estimator, ensuring good convergence and concise estimation of the blur kernel from the perspective of optimization strategy." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "- The proposed DKP model enjoys the ease use on the popular IR models without the necessity of pre-training/re" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 72, + 547, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 547, + 119 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 547, + 119 + ], + "type": "text", + "content": "training towards different scenarios. The two applications, i.e., DIP-DKP and Diff-DKP, validate the state-of-the-art performance and excellent flexibility of our DKP model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 130, + 392, + 143 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 130, + 392, + 143 + ], + "spans": [ + { + "bbox": [ + 306, + 130, + 392, + 143 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 150, + 547, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 150, + 547, + 606 + ], + "spans": [ + { + "bbox": [ + 304, + 150, + 547, + 606 + ], + "type": "text", + "content": "To alleviate the non-convexity and ill-posedness, early model-based approaches [11, 31, 33, 37] typically construct image priors in explicit formulations, such as the total variation (TV) [36], gradient profile [42], hyper-Laplacian [21] and sparsity [19]. In contrast, learning-based methods [7, 12, 16–18, 20, 28, 29, 45, 49, 53, 56] typically train an end-to-end network on labelled image samples to incorporate data priors. Wang et al. [45] proposed a CNN-based deep network with degradation feature representation module to learn image degradation feature from supervised training on paired LR-HR images. Li et al. [24] proposed a transformer network to learn multi-scale image feature via self-attention mechanisms. To reduce the high training costs of time and data, recent advances [5, 38, 40, 46, 50, 57, 59] are proposed to solve BSR problem by an off-the-shelf diffusion model [14]. Lin et al. [26] proposed to partially fine-tune the parameters of diffusion model with significantly less labeled images. Wang et al. [46] further formulated a diffusion-based BSR algorithm that iteratively solves super-resolution tasks with the given kernel without re-training. Different from the end-to-end models that are trained on paired image samples, recent methods tend to resolve BSR problem via pre-training on kernel datasets [25] or pre-defined kernel priors [51]. An alternative framework between the kernel estimation and image restoration is typically adopted in these methods [3, 10, 12, 39, 44, 58], such as double-deep image prior (Double-DIP) [34]. On the basis of this framework, Liang et al. [25] established a flow-based kernel prior (FKP) network that is pre-trained on labeled kernels to enroll kernel priors while the HR image is estimated by DIP network in an online fashion. Yue et al. [51] proposed a hand-crafted kernel prior model to improve the robustness towards the Gaussian kernel scenario. Despite the fact that these methods approximately bring down the data requirements and training costs, the necessity of training in advances or hand-crafted design still limits the flexibility and generalization ability towards the varying kernel scenarios (Gaussian and motion) without ground truths." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 615, + 471, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 615, + 471, + 628 + ], + "spans": [ + { + "bbox": [ + 306, + 615, + 471, + 628 + ], + "type": "text", + "content": "3. Dynamic Kernel Prior (DKP)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 635, + 545, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 635, + 545, + 659 + ], + "spans": [ + { + "bbox": [ + 305, + 635, + 545, + 659 + ], + "type": "text", + "content": "Problem Formulation. The degradation model of BSR problem is commonly expressed as follows," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 381, + 669, + 545, + 681 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 669, + 545, + 681 + ], + "spans": [ + { + "bbox": [ + 381, + 669, + 545, + 681 + ], + "type": "interline_equation", + "content": "\\boldsymbol {y} = (\\boldsymbol {x} \\otimes \\boldsymbol {k}) \\downarrow_ {s} + \\boldsymbol {n}, \\tag {1}", + "image_path": "0854317068da3587ac2ba58351b321bc144efa5a565b1ad70df303c2eaea80b5.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": " denotes the LR image, " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": " denotes the HR image, " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\otimes" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": " indicates the convolution operation, " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\downarrow_{s}" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": " denotes the down" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "26047" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 71, + 65, + 265, + 183 + ], + "blocks": [ + { + "bbox": [ + 71, + 65, + 265, + 183 + ], + "lines": [ + { + "bbox": [ + 71, + 65, + 265, + 183 + ], + "spans": [ + { + "bbox": [ + 71, + 65, + 265, + 183 + ], + "type": "image", + "image_path": "ffb76d946744169a154063112a00aed614d570b2867d0b6b869d24ea7f8d968e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 186, + 287, + 231 + ], + "lines": [ + { + "bbox": [ + 46, + 186, + 287, + 231 + ], + "spans": [ + { + "bbox": [ + 46, + 186, + 287, + 231 + ], + "type": "text", + "content": "Figure 1. The overview of the RKS module. The MCMC simulation can generate the random kernel " + }, + { + "bbox": [ + 46, + 186, + 287, + 231 + ], + "type": "inline_equation", + "content": "\\pmb{k}_p^t" + }, + { + "bbox": [ + 46, + 186, + 287, + 231 + ], + "type": "text", + "content": " from random kernel distributions " + }, + { + "bbox": [ + 46, + 186, + 287, + 231 + ], + "type": "inline_equation", + "content": "\\{\\pmb{k}_r^l\\}_{l=1}^L" + }, + { + "bbox": [ + 46, + 186, + 287, + 231 + ], + "type": "text", + "content": " as the kernel prior with respect to the current model parameters " + }, + { + "bbox": [ + 46, + 186, + 287, + 231 + ], + "type": "inline_equation", + "content": "\\pmb{x}^{t-1}, \\pmb{y}" + }, + { + "bbox": [ + 46, + 186, + 287, + 231 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 243, + 287, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 243, + 287, + 281 + ], + "spans": [ + { + "bbox": [ + 46, + 243, + 287, + 281 + ], + "type": "text", + "content": "sampling operation with scale factor " + }, + { + "bbox": [ + 46, + 243, + 287, + 281 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 243, + 287, + 281 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 243, + 287, + 281 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 243, + 287, + 281 + ], + "type": "text", + "content": " denotes the blur kernel. The BSR problem (1) can be formulated as a maximum a posteriori (MAP) problem:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 286, + 287, + 304 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 286, + 287, + 304 + ], + "spans": [ + { + "bbox": [ + 115, + 286, + 287, + 304 + ], + "type": "interline_equation", + "content": "\\max _ {\\boldsymbol {x}, \\boldsymbol {k}} p (\\boldsymbol {y} | \\boldsymbol {x}, \\boldsymbol {k}) p (\\boldsymbol {x}) p (\\boldsymbol {k}), \\tag {2}", + "image_path": "dece4661c15af8393c9693b4c18cbaa7b3aaa57515f758b82ab5ce5b7891a806.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "spans": [ + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "type": "inline_equation", + "content": "p(\\boldsymbol{y}|\\boldsymbol{x},\\boldsymbol{k})" + }, + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "type": "text", + "content": " denotes the likelihood of the observed LR image " + }, + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "type": "inline_equation", + "content": "\\boldsymbol{y}" + }, + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "type": "inline_equation", + "content": "p(\\boldsymbol{x})" + }, + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "type": "inline_equation", + "content": "p(\\boldsymbol{k})" + }, + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "type": "text", + "content": " are the HR image and kernel priors, respectively. Image priors [8, 14, 41, 44, 55] have been well-designed and fully-studied in the past decade. In contrast, researches on kernel priors " + }, + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "type": "inline_equation", + "content": "p(\\boldsymbol{k})" + }, + { + "bbox": [ + 46, + 312, + 287, + 395 + ], + "type": "text", + "content": " are in the ascendant, as kernel samples are less expensive to obtain and the training phase is more efficient [9, 12, 25, 51, 53]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 396, + 287, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 396, + 287, + 502 + ], + "spans": [ + { + "bbox": [ + 46, + 396, + 287, + 502 + ], + "type": "text", + "content": "In this paper, we propose the DKP model, which comprises two modules: RKS and PKE. The RKS module is employed to generate rational kernel priors, which are assigned to the PKE module to support the estimation of blur kernel. Let " + }, + { + "bbox": [ + 46, + 396, + 287, + 502 + ], + "type": "inline_equation", + "content": "t = 1,2,\\dots ,T" + }, + { + "bbox": [ + 46, + 396, + 287, + 502 + ], + "type": "text", + "content": " denote the alternative iterations among these two modules and the adopted IR model, " + }, + { + "bbox": [ + 46, + 396, + 287, + 502 + ], + "type": "inline_equation", + "content": "\\pmb{k}^t" + }, + { + "bbox": [ + 46, + 396, + 287, + 502 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 396, + 287, + 502 + ], + "type": "inline_equation", + "content": "\\pmb{x}^{t}" + }, + { + "bbox": [ + 46, + 396, + 287, + 502 + ], + "type": "text", + "content": " denote the estimated blur kernel and HR image at the " + }, + { + "bbox": [ + 46, + 396, + 287, + 502 + ], + "type": "inline_equation", + "content": "t^{th}" + }, + { + "bbox": [ + 46, + 396, + 287, + 502 + ], + "type": "text", + "content": " iteration, respectively. The details of DKP model is given below." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "text", + "content": "RKS module. At the " + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "inline_equation", + "content": "t^{th}" + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "text", + "content": " iteration, the RKS module plays the key role of generating a rational kernel prior " + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\pmb{k}_p^t" + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "text", + "content": " from the MCMC simulation. The overview diagram is shown in Fig. 1. Let " + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "inline_equation", + "content": "p(\\pmb{k}_r|\\pmb{\\Sigma}_r)" + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "text", + "content": " denotes that the random kernel " + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\pmb{k}_r" + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "text", + "content": " is conditioned by the latent variable " + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_r" + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "text", + "content": ", in which " + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "inline_equation", + "content": "p(\\pmb{\\Sigma}_r)" + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "text", + "content": " determines the category of blur kernel. Then the distribution of the kernel prior " + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\pmb{k}_p^t" + }, + { + "bbox": [ + 46, + 502, + 287, + 589 + ], + "type": "text", + "content": " can be formulated as" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 94, + 594, + 287, + 620 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 594, + 287, + 620 + ], + "spans": [ + { + "bbox": [ + 94, + 594, + 287, + 620 + ], + "type": "interline_equation", + "content": "p \\left(\\boldsymbol {k} _ {p} ^ {t}\\right) = \\int_ {\\boldsymbol {\\Sigma} _ {r}} p \\left(\\boldsymbol {k} _ {r} \\mid \\boldsymbol {\\Sigma} _ {r}\\right) p \\left(\\boldsymbol {\\Sigma} _ {r}\\right) d \\boldsymbol {\\Sigma} _ {r}. \\tag {3}", + "image_path": "8605b423c791af74dbf5ab26165770a98b79c9581f439f3a4c87f2fc84bcb611.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 626, + 287, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 626, + 287, + 684 + ], + "spans": [ + { + "bbox": [ + 46, + 626, + 287, + 684 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 46, + 626, + 287, + 684 + ], + "type": "inline_equation", + "content": "\\Sigma_r" + }, + { + "bbox": [ + 46, + 626, + 287, + 684 + ], + "type": "text", + "content": " is the parameter of kernel (e.g., the variance of Gaussian kernel or the length of motion kernel). It is not easy to sample all the possible " + }, + { + "bbox": [ + 46, + 626, + 287, + 684 + ], + "type": "inline_equation", + "content": "\\Sigma_r" + }, + { + "bbox": [ + 46, + 626, + 287, + 684 + ], + "type": "text", + "content": ", and therefore, we convert (3) into the Monte Carlo simulation in the following form:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 684, + 287, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 684, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 111, + 684, + 287, + 715 + ], + "type": "interline_equation", + "content": "p \\left(\\boldsymbol {k} _ {p} ^ {t}\\right) \\approx \\sum_ {l = 1} ^ {L} p \\left(\\boldsymbol {k} _ {r} ^ {l} \\mid \\boldsymbol {\\Sigma} _ {r} ^ {l}\\right) \\boldsymbol {\\Sigma} _ {r} ^ {l}, \\tag {4}", + "image_path": "bed78f1ba5a185957592e0d7a0d33f3149054ac5eb7dedb6c5c1313745d347f2.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "text", + "content": " denotes the index of the Monte Carlo sampling, " + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_r^l" + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "inline_equation", + "content": "l^{th}" + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "text", + "content": " sampled latent variable, " + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "inline_equation", + "content": "\\pmb{k}_r^l" + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "inline_equation", + "content": "l^{th}" + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "text", + "content": " sampled kernel, conditioned on the " + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_r^l" + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 108, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 108, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 305, + 108, + 545, + 156 + ], + "type": "text", + "content": "To ensure the rationality of randomly generated kernels towards the BSR problem, as well as the optimization during the iterations, the MCMC simulation is proposed as follows," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 318, + 165, + 545, + 198 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 165, + 545, + 198 + ], + "spans": [ + { + "bbox": [ + 318, + 165, + 545, + 198 + ], + "type": "interline_equation", + "content": "p \\left(\\boldsymbol {k} _ {p} ^ {t} \\mid \\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right) \\approx \\sum_ {l = 1} ^ {L} p \\left(\\boldsymbol {k} _ {r} ^ {l} \\mid \\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right) p \\left(\\boldsymbol {k} _ {r} ^ {l} \\mid \\boldsymbol {\\Sigma} _ {r} ^ {l}\\right) \\boldsymbol {\\Sigma} _ {r} ^ {l}, \\tag {5}", + "image_path": "2045e0a6ced7f809ba4ec57c1c2fa26a975258b5e6e0fc3ff70317d9e87aae42.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "spans": [ + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "type": "inline_equation", + "content": "p(\\pmb{k}_r^l|\\pmb{x}^{t-1},\\pmb{y})" + }, + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "type": "text", + "content": " denotes the kernel weight " + }, + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "type": "inline_equation", + "content": "\\omega^l" + }, + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "type": "text", + "content": ", that is conditioned on the observed LR image " + }, + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "type": "inline_equation", + "content": "\\pmb{y}" + }, + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "type": "text", + "content": " and the estimated HR image " + }, + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "type": "inline_equation", + "content": "\\pmb{x}^{t-1}" + }, + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "type": "text", + "content": " with respect to the MCMC loss " + }, + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{MCMC}" + }, + { + "bbox": [ + 305, + 207, + 545, + 257 + ], + "type": "text", + "content": " in the following form" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 359, + 266, + 545, + 292 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 266, + 545, + 292 + ], + "spans": [ + { + "bbox": [ + 359, + 266, + 545, + 292 + ], + "type": "interline_equation", + "content": "\\omega^ {l} = p \\left(\\boldsymbol {k} _ {r} ^ {l} \\mid \\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right) \\propto \\frac {1}{\\mathcal {L} _ {M C M C} ^ {l}}, \\tag {6}", + "image_path": "9b9c0fcb90411ef9a9c96d0973cc995861dacd3263c758434c22e26226241ffd.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 300, + 333, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 300, + 333, + 310 + ], + "spans": [ + { + "bbox": [ + 306, + 300, + 333, + 310 + ], + "type": "text", + "content": "where" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 343, + 320, + 545, + 335 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 320, + 545, + 335 + ], + "spans": [ + { + "bbox": [ + 343, + 320, + 545, + 335 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {M C M C} ^ {l} = \\left\\| \\boldsymbol {y} - \\left(\\boldsymbol {x} ^ {t - 1} \\otimes \\boldsymbol {k} _ {r} ^ {l}\\right) \\downarrow_ {s} \\right\\| _ {F} ^ {2} + \\delta , \\tag {7}", + "image_path": "cca3fd2af0b9c1cfc3ebc9baa3f9a459755e33cfd717d2304717c1ab977c32cd.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 344, + 545, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 344, + 545, + 367 + ], + "spans": [ + { + "bbox": [ + 305, + 344, + 545, + 367 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 305, + 344, + 545, + 367 + ], + "type": "text", + "content": " is the noise to prevent " + }, + { + "bbox": [ + 305, + 344, + 545, + 367 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{MCMC}^l = 0" + }, + { + "bbox": [ + 305, + 344, + 545, + 367 + ], + "type": "text", + "content": ". In this way, " + }, + { + "bbox": [ + 305, + 344, + 545, + 367 + ], + "type": "inline_equation", + "content": "\\pmb{k}_p^t" + }, + { + "bbox": [ + 305, + 344, + 545, + 367 + ], + "type": "text", + "content": " can be formulated as" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 387, + 367, + 545, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 387, + 367, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 387, + 367, + 545, + 399 + ], + "type": "interline_equation", + "content": "\\boldsymbol {k} _ {p} ^ {t} = \\frac {1}{L} \\sum_ {l = 1} ^ {L} \\omega^ {l} \\boldsymbol {k} _ {r} ^ {l}. \\tag {8}", + "image_path": "b668ee36f9badb26ca7509b33f3bf81868f662993aa702ae6589ceaff5cfcadf.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 406, + 545, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 406, + 545, + 429 + ], + "spans": [ + { + "bbox": [ + 305, + 406, + 545, + 429 + ], + "type": "text", + "content": "The obtained " + }, + { + "bbox": [ + 305, + 406, + 545, + 429 + ], + "type": "inline_equation", + "content": "\\pmb{k}_p^t" + }, + { + "bbox": [ + 305, + 406, + 545, + 429 + ], + "type": "text", + "content": " is then assigned to the PKE module as a rational kernel prior, which will be introduced next." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 430, + 545, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 430, + 545, + 490 + ], + "spans": [ + { + "bbox": [ + 305, + 430, + 545, + 490 + ], + "type": "text", + "content": "We note that the obtained kernel prior " + }, + { + "bbox": [ + 305, + 430, + 545, + 490 + ], + "type": "inline_equation", + "content": "k_{p}^{t}" + }, + { + "bbox": [ + 305, + 430, + 545, + 490 + ], + "type": "text", + "content": " is an expectation of " + }, + { + "bbox": [ + 305, + 430, + 545, + 490 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 305, + 430, + 545, + 490 + ], + "type": "text", + "content": " times sampling according to (4). The number of the sampling times " + }, + { + "bbox": [ + 305, + 430, + 545, + 490 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 305, + 430, + 545, + 490 + ], + "type": "text", + "content": " plays the role of annealing/tempering in MCMC simulations as a hyper-parameter. Details of the tuning on " + }, + { + "bbox": [ + 305, + 430, + 545, + 490 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 305, + 430, + 545, + 490 + ], + "type": "text", + "content": " will be given in Section 5.1." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 490, + 545, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 490, + 545, + 525 + ], + "spans": [ + { + "bbox": [ + 305, + 490, + 545, + 525 + ], + "type": "text", + "content": "PKE module. In our DKP model, the PKE module is employed to estimate the blur kernel by a lightweight network " + }, + { + "bbox": [ + 305, + 490, + 545, + 525 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_k" + }, + { + "bbox": [ + 305, + 490, + 545, + 525 + ], + "type": "text", + "content": " with parameters " + }, + { + "bbox": [ + 305, + 490, + 545, + 525 + ], + "type": "inline_equation", + "content": "\\phi_k" + }, + { + "bbox": [ + 305, + 490, + 545, + 525 + ], + "type": "text", + "content": " as follows" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 394, + 535, + 545, + 549 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 394, + 535, + 545, + 549 + ], + "spans": [ + { + "bbox": [ + 394, + 535, + 545, + 549 + ], + "type": "interline_equation", + "content": "\\boldsymbol {k} ^ {t} = \\mathbf {G} _ {\\boldsymbol {k}} \\left(\\phi_ {\\boldsymbol {k}} ^ {t}\\right). \\tag {9}", + "image_path": "9bd98ea12da65825ead34153bf7828a5ff512af9e80c57980ade8f36739ed106.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 559, + 545, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 559, + 545, + 595 + ], + "spans": [ + { + "bbox": [ + 305, + 559, + 545, + 595 + ], + "type": "text", + "content": "The network " + }, + { + "bbox": [ + 305, + 559, + 545, + 595 + ], + "type": "inline_equation", + "content": "\\mathbf{G}_k" + }, + { + "bbox": [ + 305, + 559, + 545, + 595 + ], + "type": "text", + "content": " takes a fixed noise that is randomly initialized as input, and we neglect it for demonstration convenience as " + }, + { + "bbox": [ + 305, + 559, + 545, + 595 + ], + "type": "inline_equation", + "content": "\\phi_k^t" + }, + { + "bbox": [ + 305, + 559, + 545, + 595 + ], + "type": "text", + "content": " are the main variables." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 595, + 545, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 595, + 545, + 655 + ], + "spans": [ + { + "bbox": [ + 304, + 595, + 545, + 655 + ], + "type": "text", + "content": "This kernel estimator " + }, + { + "bbox": [ + 304, + 595, + 545, + 655 + ], + "type": "inline_equation", + "content": "\\mathbf{G}_k" + }, + { + "bbox": [ + 304, + 595, + 545, + 655 + ], + "type": "text", + "content": " is optimized in the NLD paradigm with respect to the data-consistency term and kernel prior term, as shown in Fig. 2. The data-consistency term is computed by the LR image reconstruction error, which is given by" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 663, + 545, + 689 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 663, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 305, + 663, + 545, + 689 + ], + "type": "interline_equation", + "content": "\\log p \\left(\\phi_ {\\boldsymbol {k}} ^ {t - 1} \\mid \\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right) = - \\| \\boldsymbol {y} - \\left(\\boldsymbol {x} ^ {t - 1} \\otimes \\mathbf {G} _ {\\boldsymbol {k}} \\left(\\phi_ {\\boldsymbol {k}} ^ {t - 1}\\right)\\right) \\downarrow_ {s} \\| _ {F} ^ {2}. \\tag {10}", + "image_path": "0975d87032e22e4cc321f5f64d60ea81c53a5c93c51784053311586f32be6e1f.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "type": "text", + "content": "The kernel prior term is computed based on the difference between the network-estimated kernel " + }, + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{G}_k(\\boldsymbol{\\phi}_k^{t - 1})" + }, + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "type": "text", + "content": " and the" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "26048" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 64, + 284, + 180 + ], + "blocks": [ + { + "bbox": [ + 50, + 64, + 284, + 180 + ], + "lines": [ + { + "bbox": [ + 50, + 64, + 284, + 180 + ], + "spans": [ + { + "bbox": [ + 50, + 64, + 284, + 180 + ], + "type": "image", + "image_path": "c828cc9c92064405ff6c0566c475b9fc2592e3e1b74d7eebe5dd14dd0f6e7b61.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 185, + 287, + 229 + ], + "lines": [ + { + "bbox": [ + 46, + 185, + 287, + 229 + ], + "spans": [ + { + "bbox": [ + 46, + 185, + 287, + 229 + ], + "type": "text", + "content": "Figure 2. The overview of the PKE module. The blur kernel " + }, + { + "bbox": [ + 46, + 185, + 287, + 229 + ], + "type": "inline_equation", + "content": "\\pmb{k}^t" + }, + { + "bbox": [ + 46, + 185, + 287, + 229 + ], + "type": "text", + "content": " is estimated by the network " + }, + { + "bbox": [ + 46, + 185, + 287, + 229 + ], + "type": "inline_equation", + "content": "G_{k}" + }, + { + "bbox": [ + 46, + 185, + 287, + 229 + ], + "type": "text", + "content": ", whose parameters " + }, + { + "bbox": [ + 46, + 185, + 287, + 229 + ], + "type": "inline_equation", + "content": "\\phi_{k}" + }, + { + "bbox": [ + 46, + 185, + 287, + 229 + ], + "type": "text", + "content": " are updated by the kernel prior term from RKS module and data-consistency term, based on the LR image reconstruction error." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 241, + 287, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 241, + 287, + 264 + ], + "spans": [ + { + "bbox": [ + 46, + 241, + 287, + 264 + ], + "type": "text", + "content": "random-sampled kernel " + }, + { + "bbox": [ + 46, + 241, + 287, + 264 + ], + "type": "inline_equation", + "content": "k_{p}^{t}" + }, + { + "bbox": [ + 46, + 241, + 287, + 264 + ], + "type": "text", + "content": " from the RKS module as follows," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 270, + 287, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 270, + 287, + 285 + ], + "spans": [ + { + "bbox": [ + 82, + 270, + 287, + 285 + ], + "type": "interline_equation", + "content": "\\log p \\left(\\boldsymbol {\\phi} _ {\\boldsymbol {k}} ^ {t - 1} \\mid \\boldsymbol {k} _ {p} ^ {t}\\right) = - \\| \\mathrm {G} _ {\\boldsymbol {k}} \\left(\\boldsymbol {\\phi} _ {\\boldsymbol {k}} ^ {t - 1}\\right) - \\boldsymbol {k} _ {p} ^ {t} \\| _ {F} ^ {2}. \\tag {11}", + "image_path": "6cde1b5953bcea0c2a85045322a907779a6e1594d3a75dfb9c4d2541ffb783c3.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 290, + 286, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 290, + 286, + 314 + ], + "spans": [ + { + "bbox": [ + 47, + 290, + 286, + 314 + ], + "type": "text", + "content": "By combining (10) and (11), the network parameters " + }, + { + "bbox": [ + 47, + 290, + 286, + 314 + ], + "type": "inline_equation", + "content": "\\phi_{k}^{t - 1}" + }, + { + "bbox": [ + 47, + 290, + 286, + 314 + ], + "type": "text", + "content": " can be updated as follows," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 318, + 287, + 379 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 318, + 287, + 379 + ], + "spans": [ + { + "bbox": [ + 66, + 318, + 287, + 379 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\phi_ {\\boldsymbol {k}} ^ {t} = \\phi_ {\\boldsymbol {k}} ^ {t - 1} + \\frac {\\delta^ {2}}{2} \\frac {\\partial \\log p (\\phi_ {\\boldsymbol {k}} ^ {t - 1} | \\boldsymbol {x} ^ {t - 1} , \\boldsymbol {y})}{\\partial \\phi_ {\\boldsymbol {k}} ^ {t - 1}} \\\\ + \\delta \\frac {\\partial \\log p \\left(\\phi_ {\\boldsymbol {k}} ^ {t - 1} \\mid \\boldsymbol {k} _ {p} ^ {t}\\right)}{\\partial \\phi_ {\\boldsymbol {k}} ^ {t - 1}}, \\tag {12} \\\\ \\end{array}", + "image_path": "c7730f9755e1602ecd4bf955a5ee868de74632649878cbafdbc212babf52a25b.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 382, + 287, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 382, + 287, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 382, + 287, + 418 + ], + "type": "text", + "content": "where the second term is the data-consistency update, the third term is the additional update based on the random kernel " + }, + { + "bbox": [ + 46, + 382, + 287, + 418 + ], + "type": "inline_equation", + "content": "\\pmb{k}_p^t" + }, + { + "bbox": [ + 46, + 382, + 287, + 418 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 418, + 287, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 418, + 287, + 513 + ], + "spans": [ + { + "bbox": [ + 46, + 418, + 287, + 513 + ], + "type": "text", + "content": "It has been proved to be effective that the random noise-based disturbance can prevent being trapped into bad local modes for the variable update in Langevin dynamics [2, 32, 48, 51]. More details of Langevin dynamics refer to the supplementary material. At this stage, the random kernel sample from the RKS module can be regarded as the random \"noise\" for the " + }, + { + "bbox": [ + 46, + 418, + 287, + 513 + ], + "type": "inline_equation", + "content": "\\phi_{k}^{t - 1}" + }, + { + "bbox": [ + 46, + 418, + 287, + 513 + ], + "type": "text", + "content": " update. Eq. (12) can be reformulated as follows," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 59, + 517, + 287, + 546 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 517, + 287, + 546 + ], + "spans": [ + { + "bbox": [ + 59, + 517, + 287, + 546 + ], + "type": "interline_equation", + "content": "\\phi_ {k} ^ {t} = \\phi_ {k} ^ {t - 1} + \\frac {\\delta^ {2}}{2} \\frac {\\partial \\log p \\left(\\phi_ {k} ^ {t - 1} \\mid \\boldsymbol {x} ^ {t - 1} , \\boldsymbol {y}\\right)}{\\partial \\phi_ {k} ^ {t - 1}} + \\zeta_ {\\phi_ {k}} ^ {t - 1}, \\tag {13}", + "image_path": "a505afc45c875cb98efb6e41604929b2e8f2b8667e4454d4018531d71ca89931.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 550, + 286, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 550, + 286, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 550, + 286, + 581 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 550, + 286, + 581 + ], + "type": "inline_equation", + "content": "\\zeta_{\\phi_k}^{t - 1} = \\frac{\\partial\\log p(\\phi_k^{t - 1}|k_p^t)}{\\partial\\phi_k^{t - 1}}" + }, + { + "bbox": [ + 46, + 550, + 286, + 581 + ], + "type": "text", + "content": " denotes the parameters correlated Langevin dynamics disturbance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "text", + "content": "The pipeline of our DKP at the " + }, + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "inline_equation", + "content": "t^{th}" + }, + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "text", + "content": " iteration is given in Algorithm 1. The whole DKP model is implemented in a plug-and-play style, in which training in advance is not required. Besides, the random kernels from the RKS module are self-adaptively sampled through the MCMC simulation, without the need of labeled training data. We should also note that the DKP model only brings negligible runtime and memory cost in applications, as the adopted network " + }, + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{G}_k" + }, + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "text", + "content": " is typically lightweight. This leads to high flexibility and low computational complexity. These three merits promise our DKP the convenience of being applied to" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 75, + 482, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 75, + 482, + 87 + ], + "spans": [ + { + "bbox": [ + 312, + 75, + 482, + 87 + ], + "type": "text", + "content": "Algorithm 1: The proposed DKP model." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 311, + 89, + 462, + 131 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 311, + 89, + 410, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 89, + 410, + 102 + ], + "spans": [ + { + "bbox": [ + 311, + 89, + 410, + 102 + ], + "type": "text", + "content": "1 Given: " + }, + { + "bbox": [ + 311, + 89, + 410, + 102 + ], + "type": "inline_equation", + "content": "x^{t - 1},y" + }, + { + "bbox": [ + 311, + 89, + 410, + 102 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 311, + 89, + 410, + 102 + ], + "type": "inline_equation", + "content": "\\phi_{k}^{t - 1}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 311, + 102, + 462, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 102, + 462, + 111 + ], + "spans": [ + { + "bbox": [ + 311, + 102, + 462, + 111 + ], + "type": "text", + "content": "2 % Random Kernel Sampling (RKS) Module" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 311, + 112, + 455, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 112, + 455, + 122 + ], + "spans": [ + { + "bbox": [ + 311, + 112, + 455, + 122 + ], + "type": "text", + "content": "3 Sample random kernels " + }, + { + "bbox": [ + 311, + 112, + 455, + 122 + ], + "type": "inline_equation", + "content": "\\{k_r^l\\}_{l = 1}^L" + }, + { + "bbox": [ + 311, + 112, + 455, + 122 + ], + "type": "text", + "content": " via MC." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 311, + 122, + 393, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 122, + 393, + 131 + ], + "spans": [ + { + "bbox": [ + 311, + 122, + 393, + 131 + ], + "type": "text", + "content": "4 for " + }, + { + "bbox": [ + 311, + 122, + 393, + 131 + ], + "type": "inline_equation", + "content": "l\\gets 0,1,\\ldots ,L" + }, + { + "bbox": [ + 311, + 122, + 393, + 131 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 311, + 131, + 520, + 147 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 131, + 520, + 147 + ], + "spans": [ + { + "bbox": [ + 311, + 131, + 520, + 147 + ], + "type": "interline_equation", + "content": "\\begin{array}{c c} \\mathbf {5} & \\omega^ {l} = \\frac {1}{\\mathcal {L} _ {M C M C} ^ {l}}, \\mathcal {L} _ {M C M C} ^ {l} = \\| \\boldsymbol {y} - (\\boldsymbol {x} ^ {t - 1} \\otimes \\boldsymbol {k} _ {r} ^ {l}) \\downarrow_ {s} \\| _ {F} ^ {2} + \\delta \\end{array}", + "image_path": "0ce813627ab875064c777a145f52fe0459b25c6ca70d42a9bf1486b7ffbbcc41.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 311, + 147, + 333, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 147, + 333, + 154 + ], + "spans": [ + { + "bbox": [ + 311, + 147, + 333, + 154 + ], + "type": "text", + "content": "6 end" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 311, + 156, + 390, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 156, + 390, + 168 + ], + "spans": [ + { + "bbox": [ + 311, + 156, + 390, + 168 + ], + "type": "text", + "content": "7 " + }, + { + "bbox": [ + 311, + 156, + 390, + 168 + ], + "type": "inline_equation", + "content": "\\pmb{k}_p^t = \\frac{1}{L}\\sum_{l = 1}^L\\omega^l\\pmb{k}_r^t" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 311, + 168, + 454, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 168, + 454, + 177 + ], + "spans": [ + { + "bbox": [ + 311, + 168, + 454, + 177 + ], + "type": "text", + "content": "8 % Prior Kernel Estimation (PKE) Module" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 311, + 178, + 524, + 199 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 178, + 524, + 199 + ], + "spans": [ + { + "bbox": [ + 311, + 178, + 524, + 199 + ], + "type": "interline_equation", + "content": "\\phi_ {k} ^ {t} = \\phi_ {k} ^ {t - 1} + \\frac {\\delta^ {2}}{2} \\frac {\\partial \\log p \\left(\\phi_ {k} ^ {t - 1} | \\boldsymbol {x} ^ {t - 1} , \\boldsymbol {y}\\right)}{\\partial \\phi_ {k} ^ {t - 1}} + \\delta \\frac {\\partial \\log p \\left(\\phi_ {k} ^ {t - 1} | \\boldsymbol {k} _ {p} ^ {t}\\right)}{\\partial \\phi_ {k} ^ {t - 1}}", + "image_path": "7d9dba31cb4ebd22c3b7895549a90c133c81abd2e7a0a807329f53d6b0e7fc4d.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 199, + 400, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 199, + 400, + 209 + ], + "spans": [ + { + "bbox": [ + 310, + 199, + 400, + 209 + ], + "type": "text", + "content": "10 Output: " + }, + { + "bbox": [ + 310, + 199, + 400, + 209 + ], + "type": "inline_equation", + "content": "\\pmb{k}^t = \\mathrm{G}_k(\\phi_k^t)" + }, + { + "bbox": [ + 310, + 199, + 400, + 209 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 309, + 218, + 543, + 306 + ], + "blocks": [ + { + "bbox": [ + 309, + 218, + 543, + 306 + ], + "lines": [ + { + "bbox": [ + 309, + 218, + 543, + 306 + ], + "spans": [ + { + "bbox": [ + 309, + 218, + 543, + 306 + ], + "type": "image", + "image_path": "33ffb3936f668692c79d7b7e5149db888b139a49aa0a790243c3d47cf877110e.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 322, + 316, + 528, + 327 + ], + "lines": [ + { + "bbox": [ + 322, + 316, + 528, + 327 + ], + "spans": [ + { + "bbox": [ + 322, + 316, + 528, + 327 + ], + "type": "text", + "content": "Figure 3. The overview of our DKP-based BSR method." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 340, + 545, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 340, + 545, + 375 + ], + "spans": [ + { + "bbox": [ + 305, + 340, + 545, + 375 + ], + "type": "text", + "content": "the existing image restoration approaches, including the untrained DIP model and off-the-shelf pre-trained diffusion model, which will be detailed in the next section." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 306, + 384, + 453, + 397 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 384, + 453, + 397 + ], + "spans": [ + { + "bbox": [ + 306, + 384, + 453, + 397 + ], + "type": "text", + "content": "4. DKP-based BSR Methods" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 306, + 404, + 366, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 404, + 366, + 417 + ], + "spans": [ + { + "bbox": [ + 306, + 404, + 366, + 417 + ], + "type": "text", + "content": "4.1. Pipeline" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 304, + 422, + 545, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 422, + 545, + 507 + ], + "spans": [ + { + "bbox": [ + 304, + 422, + 545, + 507 + ], + "type": "text", + "content": "The overview of the proposed DKP-based BSR method is illustrated in Fig. 3. The DKP model (gray box), including RKS module (blue box), PKE module (lilac box), and IR model (red box) alternatively optimize the blur kernel and refine the HR image, respectively. For each iteration, the estimated HR image " + }, + { + "bbox": [ + 304, + 422, + 545, + 507 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}^{t-1}" + }, + { + "bbox": [ + 304, + 422, + 545, + 507 + ], + "type": "text", + "content": " and LR image " + }, + { + "bbox": [ + 304, + 422, + 545, + 507 + ], + "type": "inline_equation", + "content": "\\boldsymbol{y}" + }, + { + "bbox": [ + 304, + 422, + 545, + 507 + ], + "type": "text", + "content": " are first fed to RKS module " + }, + { + "bbox": [ + 304, + 422, + 545, + 507 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{RKS}}" + }, + { + "bbox": [ + 304, + 422, + 545, + 507 + ], + "type": "text", + "content": " to generate kernel prior" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 379, + 512, + 545, + 528 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 379, + 512, + 545, + 528 + ], + "spans": [ + { + "bbox": [ + 379, + 512, + 545, + 528 + ], + "type": "interline_equation", + "content": "\\boldsymbol {k} _ {p} ^ {t} = f _ {\\mathrm {R K S}} \\left(\\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}\\right), \\tag {14}", + "image_path": "a066df8d4b5b3c033eb8f54e156ff5caf5df13622b38a63ed72075237c0008c1.jpg" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 305, + 531, + 545, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 531, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 305, + 531, + 545, + 567 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 531, + 545, + 567 + ], + "type": "inline_equation", + "content": "\\pmb{x}^{t - 1}" + }, + { + "bbox": [ + 305, + 531, + 545, + 567 + ], + "type": "text", + "content": " denotes the estimated HR image from the last IR model output. Then, the kernel prior " + }, + { + "bbox": [ + 305, + 531, + 545, + 567 + ], + "type": "inline_equation", + "content": "\\pmb{k}_p^t" + }, + { + "bbox": [ + 305, + 531, + 545, + 567 + ], + "type": "text", + "content": " will be assigned to the PKE module " + }, + { + "bbox": [ + 305, + 531, + 545, + 567 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{PKE}}" + }, + { + "bbox": [ + 305, + 531, + 545, + 567 + ], + "type": "text", + "content": ", which estimates kernel as follows," + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 372, + 572, + 545, + 588 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 572, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 372, + 572, + 545, + 588 + ], + "type": "interline_equation", + "content": "\\boldsymbol {k} ^ {t} = f _ {\\mathrm {P K E}} \\left(\\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}, \\boldsymbol {k} _ {p} ^ {t}\\right), \\tag {15}", + "image_path": "fb699beebc3971eaec1ed19cc80d9fe667e8a9a185f108425e500875c85140f8.jpg" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 305, + 593, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 593, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 305, + 593, + 545, + 628 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 593, + 545, + 628 + ], + "type": "inline_equation", + "content": "\\pmb{k}^t" + }, + { + "bbox": [ + 305, + 593, + 545, + 628 + ], + "type": "text", + "content": " is the estimated kernel at the " + }, + { + "bbox": [ + 305, + 593, + 545, + 628 + ], + "type": "inline_equation", + "content": "t^{th}" + }, + { + "bbox": [ + 305, + 593, + 545, + 628 + ], + "type": "text", + "content": " kernel estimation iteration, which will be assigned to the IR model. The " + }, + { + "bbox": [ + 305, + 593, + 545, + 628 + ], + "type": "inline_equation", + "content": "t^{th}" + }, + { + "bbox": [ + 305, + 593, + 545, + 628 + ], + "type": "text", + "content": " HR image " + }, + { + "bbox": [ + 305, + 593, + 545, + 628 + ], + "type": "inline_equation", + "content": "\\pmb{x}^t" + }, + { + "bbox": [ + 305, + 593, + 545, + 628 + ], + "type": "text", + "content": " can be estimated by the IR model as follows" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 376, + 634, + 545, + 649 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 634, + 545, + 649 + ], + "spans": [ + { + "bbox": [ + 376, + 634, + 545, + 649 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} ^ {t} = f _ {\\mathrm {I R}} \\left(\\boldsymbol {x} ^ {t - 1}, \\boldsymbol {y}, \\boldsymbol {k} ^ {t}\\right), \\tag {16}", + "image_path": "5e3e35fe3077a4f67128c6f6003320689f6ec4f5df00f5c77c0214c73ba1ee6e.jpg" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{IR}}" + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": " denotes the adopted IR model. In this paper, two representative IR models, DIP [44] and diffusion model [14], are applied to evaluate the DKP-based BSR solutions, referring to DIP-DKP and Diff-DKP, which are introduced in the sequel." + } + ] + } + ], + "index": 34 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "26049" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 50, + 88, + 286, + 224 + ], + "blocks": [ + { + "bbox": [ + 54, + 76, + 214, + 87 + ], + "lines": [ + { + "bbox": [ + 54, + 76, + 214, + 87 + ], + "spans": [ + { + "bbox": [ + 54, + 76, + 214, + 87 + ], + "type": "text", + "content": "Algorithm 2: The proposed DIP-DKP." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "lines": [ + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "spans": [ + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "text", + "content": "1 Given: " + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "inline_equation", + "content": "y,\\phi_x^0,\\phi_{DKP}^0,x^0 = G_x(\\phi_x^0)" + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "text", + "content": " \n2 for " + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "inline_equation", + "content": "t\\gets 0,I,\\dots,T - I" + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "text", + "content": " do \n3 " + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "inline_equation", + "content": "\\%" + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "text", + "content": " DKP-based kernel estimation stage \n4 " + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "inline_equation", + "content": "\\phi_{DKP}^{t + 1} = \\phi_{DKP}^{t} + \\frac{\\delta^{2}}{2}\\frac{\\partial\\log p(\\phi_{DKP}^{t}|x^{t},y)}{\\partial\\phi_{DKP}^{t}} +\\delta \\frac{\\partial\\log p(\\phi_{DKP}^{t}|k_{p}^{t})}{\\partial\\phi_{DKP}^{t}}" + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "text", + "content": " \n5 " + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "inline_equation", + "content": "\\pmb {k}^{t + 1} = \\mathrm{G}_{DKP}(\\phi_{DKP}^{t + 1})" + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "text", + "content": " \n6 " + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "inline_equation", + "content": "\\%" + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "text", + "content": " DIP-based image restoration stage \n7 " + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "inline_equation", + "content": "\\phi_x^{t + 1} = \\phi_x^t +\\gamma_x^t\\frac{\\partial\\log p(\\phi_x^t|y,k^t)}{\\partial\\phi_x^t}" + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "text", + "content": " \n8 " + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "inline_equation", + "content": "\\pmb{x}^{t + 1} = \\mathbf{G}_{\\pmb{x}}(\\pmb{\\phi}_{\\pmb{x}}^{t + 1})" + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "text", + "content": " \n9 end \n10 Output: " + }, + { + "bbox": [ + 50, + 88, + 286, + 224 + ], + "type": "inline_equation", + "content": "x^T,k^T" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "algorithm" + }, + { + "bbox": [ + 47, + 247, + 182, + 261 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 247, + 182, + 261 + ], + "spans": [ + { + "bbox": [ + 47, + 247, + 182, + 261 + ], + "type": "text", + "content": "4.2. The proposed DIP-DKP" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 266, + 287, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 287, + 339 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 287, + 339 + ], + "type": "text", + "content": "DIP-based Image Restoration. DIP [44] is designed for capturing low-level image statistics, and estimates HR image " + }, + { + "bbox": [ + 46, + 266, + 287, + 339 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x} = \\mathrm{G}_{\\boldsymbol{x}}(\\boldsymbol{z}_{\\boldsymbol{x}}, \\phi_{\\boldsymbol{x}})" + }, + { + "bbox": [ + 46, + 266, + 287, + 339 + ], + "type": "text", + "content": " from a fixed random noise input " + }, + { + "bbox": [ + 46, + 266, + 287, + 339 + ], + "type": "inline_equation", + "content": "z_{x}" + }, + { + "bbox": [ + 46, + 266, + 287, + 339 + ], + "type": "text", + "content": " (we omit " + }, + { + "bbox": [ + 46, + 266, + 287, + 339 + ], + "type": "inline_equation", + "content": "z_{x}" + }, + { + "bbox": [ + 46, + 266, + 287, + 339 + ], + "type": "text", + "content": " in the rest of this paper for demonstration convenience). A typical formulation of DIP-based BSR methods [25, 34] is given as follows" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 64, + 347, + 287, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 347, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 64, + 347, + 287, + 399 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{l} \\phi_ {x} ^ {*}, \\phi_ {k} ^ {*} = \\underset {\\phi_ {x}, \\phi_ {k}} {\\arg \\min } \\| \\boldsymbol {y} - \\\\ \\quad \\left(\\mathrm {G} _ {\\boldsymbol {x}} \\left(\\phi_ {\\boldsymbol {x}}\\right) \\otimes \\mathrm {G} _ {\\boldsymbol {k}} \\left(\\phi_ {\\boldsymbol {k}}\\right)\\right) \\downarrow_ {s} \\| _ {F} ^ {2}, \\\\ \\boldsymbol {x} ^ {*} = \\mathrm {G} _ {\\boldsymbol {x}} \\left(\\phi_ {\\boldsymbol {x}} ^ {*}\\right), \\boldsymbol {k} ^ {*} = \\mathrm {G} _ {\\boldsymbol {k}} \\left(\\phi_ {\\boldsymbol {k}} ^ {*}\\right). \\end{array} \\right. \\tag {18}", + "image_path": "6aa2543296d305f389b02094112af2a4c472e800dffe8284283e9a569a0ff77c.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 406, + 287, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 406, + 287, + 514 + ], + "spans": [ + { + "bbox": [ + 46, + 406, + 287, + 514 + ], + "type": "text", + "content": "Double-DIP [34] and FKP-DIP [25] have exploited the effectiveness towards the BSR problem. However, the kernel prior of " + }, + { + "bbox": [ + 46, + 406, + 287, + 514 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_k(\\phi_k^*)" + }, + { + "bbox": [ + 46, + 406, + 287, + 514 + ], + "type": "text", + "content": " either adopts the untrained network with limited performances on the kernel estimation [34], or pre-trained kernel network, referring to FKP [25], that requests supervised training in advance. As shall be shown in experiments, pre-trained networks do not perform well to generate reasonable kernel estimations when the kernel categories vary." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 514, + 287, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 514, + 287, + 573 + ], + "spans": [ + { + "bbox": [ + 46, + 514, + 287, + 573 + ], + "type": "text", + "content": "Proposed DIP-DKP. We replace the untrained or pretrained networks for kernel priors in the existing DIP-based alternative framework by the proposed DKP model, which we refer to as DIP-DKP. The objective of our proposed DIP-DKP can be formulated as follows," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 581, + 287, + 634 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 581, + 287, + 634 + ], + "spans": [ + { + "bbox": [ + 54, + 581, + 287, + 634 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{l} \\phi_ {\\boldsymbol {x}} ^ {*}, \\phi_ {D K P} ^ {*} = \\underset {\\phi_ {\\boldsymbol {x}}, \\phi_ {D K P}} {\\arg \\min } \\| \\boldsymbol {y} - \\left(\\mathrm {G} _ {D K P} \\left(\\phi_ {D K P}\\right) \\otimes \\right. \\\\ \\quad \\left. \\mathrm {G} _ {\\boldsymbol {x}} \\left(\\phi_ {\\boldsymbol {x}}\\right)\\right) \\downarrow_ {s} \\| _ {F} ^ {2} + \\| \\mathrm {G} _ {D K P} \\left(\\phi_ {D K P}\\right) - \\boldsymbol {k} _ {p}) \\| _ {F} ^ {2}, \\\\ \\boldsymbol {x} ^ {*} = \\mathrm {G} _ {\\boldsymbol {x}} \\left(\\phi_ {\\boldsymbol {x}} ^ {*}\\right), \\boldsymbol {k} ^ {*} = \\mathrm {G} _ {D K P} \\left(\\phi_ {D K P} ^ {*}\\right), \\end{array} \\right. \\tag {20}", + "image_path": "7746e721c3ccc28ff49408f5e2499acb712d84ab9d54591f306c9b3e158ebcf2.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 642, + 287, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 642, + 287, + 664 + ], + "spans": [ + { + "bbox": [ + 47, + 642, + 287, + 664 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 642, + 287, + 664 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{DKP}(\\phi_{DKP})" + }, + { + "bbox": [ + 47, + 642, + 287, + 664 + ], + "type": "text", + "content": " is the kernel network of the proposed DKP model." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 665, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 665, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 665, + 287, + 713 + ], + "type": "text", + "content": "The overall solution procedure of the proposed DIP-DKP is given in Algorithm 2. At each " + }, + { + "bbox": [ + 47, + 665, + 287, + 713 + ], + "type": "inline_equation", + "content": "t^{th}" + }, + { + "bbox": [ + 47, + 665, + 287, + 713 + ], + "type": "text", + "content": " iteration, the kernel " + }, + { + "bbox": [ + 47, + 665, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\pmb{k}^t" + }, + { + "bbox": [ + 47, + 665, + 287, + 713 + ], + "type": "text", + "content": " is estimated in the DKP-based kernel estimation stage and then is assigned to the DIP model for HR image" + } + ] + } + ], + "index": 9 + }, + { + "type": "code", + "bbox": [ + 310, + 88, + 544, + 243 + ], + "blocks": [ + { + "bbox": [ + 312, + 75, + 473, + 87 + ], + "lines": [ + { + "bbox": [ + 312, + 75, + 473, + 87 + ], + "spans": [ + { + "bbox": [ + 312, + 75, + 473, + 87 + ], + "type": "text", + "content": "Algorithm 3: The proposed Diff-DKP." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "lines": [ + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "spans": [ + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "text", + "content": "1 Given: " + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{y}" + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "inline_equation", + "content": "\\phi_{DKP}^{T},S_{\\theta}" + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{x}_T\\sim \\mathcal{N}(\\pmb {0},\\pmb {I})" + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "text", + "content": " \n2 for " + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "inline_equation", + "content": "t\\gets T,T - I,\\dots ,1" + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "text", + "content": " do \n3 " + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "inline_equation", + "content": "\\%" + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "text", + "content": " Diffusion-based image restoration process \n4 " + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{0|t} = \\frac{1}{\\sqrt{\\alpha t}} (\\pmb{x}_t - \\mathcal{S}_\\theta (\\pmb{x}_t,t)\\sqrt{1 - \\overline{\\alpha}^t})" + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "text", + "content": " \n5 " + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "inline_equation", + "content": "\\%" + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "text", + "content": " DKP incorporated data consistency refinement \n6 " + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "inline_equation", + "content": "\\phi_{DKP}^{t - 1} = \\phi_{DKP}^{t} + \\frac{\\delta^{2}}{2}\\frac{\\partial\\log p(\\phi_{DKP}^{t}|\\pmb{x}_{0|t},\\pmb{y})}{\\partial\\phi_{DKP}^{t}} +\\delta \\frac{\\partial\\log p(\\phi_{DKP}^{t}|\\pmb{k}_{p}^{t})}{\\partial\\phi_{DKP}^{t}}" + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "text", + "content": " \n7 " + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{k}^{t - 1} = G_{DKP}(\\phi_{DKP}^{t - 1})" + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "text", + "content": " \n8 " + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{x}}_{0|t} = \\pmb{x}_{0|t} + \\gamma_{\\pmb{x}}^{t}\\frac{\\partial\\log p(\\pmb{x}_{0|t}|\\pmb{y},\\pmb{k}^{t - 1})}{\\partial\\pmb{x}_{0|t}}" + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "text", + "content": " \n9 " + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{t - 1}\\sim p(\\pmb{x}_{t - 1}|\\pmb{x}_t,\\hat{\\pmb{x}}_{0|t})" + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "text", + "content": " \n10 end \n11 Output: " + }, + { + "bbox": [ + 310, + 88, + 544, + 243 + ], + "type": "inline_equation", + "content": "\\pmb{x}_0,\\pmb{k}^0" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "algorithm" + }, + { + "bbox": [ + 304, + 267, + 545, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 267, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 304, + 267, + 545, + 387 + ], + "type": "text", + "content": "restoration in the forward propagation. In the back propagation, the parameters of DIP and DKP, i.e., " + }, + { + "bbox": [ + 304, + 267, + 545, + 387 + ], + "type": "inline_equation", + "content": "\\phi_{x}" + }, + { + "bbox": [ + 304, + 267, + 545, + 387 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 267, + 545, + 387 + ], + "type": "inline_equation", + "content": "\\phi_{DKP}" + }, + { + "bbox": [ + 304, + 267, + 545, + 387 + ], + "type": "text", + "content": ", are updated while solving the BSR problem via an unsupervised inference. With DKP, DIP-DKP realizes an adaptive kernel learning along the convergence trajectory of the BSR objective function, enabling accurate and dynamic kernel estimation. Therefore, without expensive labeled data and long training time in advance, DIP-DKP can estimate HR image and blur kernel simultaneously in a plug-and-play style." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 395, + 373, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 395, + 373, + 407 + ], + "spans": [ + { + "bbox": [ + 306, + 395, + 373, + 407 + ], + "type": "text", + "content": "4.3.Diff-DKP" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 414, + 545, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 414, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 304, + 414, + 545, + 498 + ], + "type": "text", + "content": "Original DDPM Inference Process. Denoising diffusion probabilistic models (DDPM) [14] defines a T-step forward process to add noise to data and a T-step reverse process to restore desired data from the noise. When an off-the-shelf DDPM " + }, + { + "bbox": [ + 304, + 414, + 545, + 498 + ], + "type": "inline_equation", + "content": "S_{\\theta}" + }, + { + "bbox": [ + 304, + 414, + 545, + 498 + ], + "type": "text", + "content": " is applied to solve image restoration problem, the reverse process is implemented as inference process to estimate the high quality image as follows," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 340, + 506, + 545, + 547 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 506, + 545, + 547 + ], + "spans": [ + { + "bbox": [ + 340, + 506, + 545, + 547 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{l} \\boldsymbol {x} _ {0 | t} = \\frac {1}{\\sqrt {\\bar {\\alpha} ^ {t}}} \\left(\\boldsymbol {x} _ {t} - S _ {\\boldsymbol {\\theta}} \\left(\\boldsymbol {x} _ {t}, t\\right) \\sqrt {1 - \\bar {\\alpha} ^ {t}}\\right), \\\\ \\boldsymbol {x} _ {t - 1} \\sim p \\left(\\boldsymbol {x} _ {t - 1} \\mid \\boldsymbol {x} _ {t}, \\boldsymbol {x} _ {0 | t}\\right), \\end{array} \\right. \\tag {21}", + "image_path": "b196f7b606dbc5e4ba326d5fc483e229b11e4ab519b185582c620a739c6f87aa.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{0|t}" + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "text", + "content": " denotes the estimated HR image " + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\pmb{x}_0" + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "text", + "content": " at the " + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "inline_equation", + "content": "t^{th}" + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "text", + "content": " step, and " + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\overline{\\alpha}^t" + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "text", + "content": " is the hyper-parameter. To ensure that HR images " + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\pmb{x}_0 \\sim q(\\pmb{x})" + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "text", + "content": " can be reconstructed from random noise " + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\pmb{x}_T \\sim \\mathcal{N}(\\pmb{0},\\pmb{I})" + }, + { + "bbox": [ + 304, + 557, + 545, + 665 + ], + "type": "text", + "content": ", the existing methods typically re-train [38] or fine-tune [50] the DDPM model via supervised learning on LR-HR datasets, or provide ground truth kernel [46] to enroll task-specific knowledge for convergence guarantee. However, the performance of DDPM is unstable, even when trained by a large number of labeled dataset." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Proposed Diff-DKP. The instability of DDPM mainly comes from the training process that involves multiple image processing tasks. In this case, the off-the-shelf diffusion model cannot concentrate on BSR objective, thus leading to" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "26050" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "content": "image distortion and content mismatch. To alleviate this issue, the proposed Diff-DKP incorporates the DKP model to provide task-specific data-consistency knowledge on the basis of the vanilla DDPM reverse iterations. Specifically, an external DKP incorporated data consistency refinement of " + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{0|t}" + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "content": " is inserted between (21) and (22), given by" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 91, + 151, + 287, + 178 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 151, + 287, + 178 + ], + "spans": [ + { + "bbox": [ + 91, + 151, + 287, + 178 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {x}} _ {0 | t} = \\boldsymbol {x} _ {0 | t} + \\gamma_ {\\boldsymbol {x}} ^ {t} \\frac {\\partial \\log p (\\boldsymbol {x} _ {0 | t} | \\boldsymbol {y} , \\boldsymbol {k} ^ {t})}{\\partial \\boldsymbol {x} _ {0 | t}}, \\tag {23}", + "image_path": "b83505b27fc3981a6afd822dc2e1e9c7210f53957a796de93ccf7b304ccb796f.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 185, + 180, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 185, + 180, + 198 + ], + "spans": [ + { + "bbox": [ + 47, + 185, + 180, + 198 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 185, + 180, + 198 + ], + "type": "inline_equation", + "content": "\\gamma_{\\pmb{x}}^{t}" + }, + { + "bbox": [ + 47, + 185, + 180, + 198 + ], + "type": "text", + "content": " is the update step, and" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 64, + 203, + 287, + 217 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 203, + 287, + 217 + ], + "spans": [ + { + "bbox": [ + 64, + 203, + 287, + 217 + ], + "type": "interline_equation", + "content": "\\log p \\left(\\boldsymbol {x} _ {0 | t} \\mid \\boldsymbol {y}, \\boldsymbol {k} ^ {t}\\right) = - \\| \\boldsymbol {y} - \\left(\\boldsymbol {x} _ {0 | t} \\otimes \\boldsymbol {k} ^ {t}\\right) \\downarrow_ {s} \\| _ {F} ^ {2}, \\tag {24}", + "image_path": "f143b8f0da9eb81dbc7426b27811639eb4562326ceccf92270a2735cffc0804d.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 223, + 287, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 223, + 287, + 247 + ], + "spans": [ + { + "bbox": [ + 46, + 223, + 287, + 247 + ], + "type": "text", + "content": "which enables the inference process to converge to the right direction along with the data-consistent solution." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 247, + 287, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 247, + 287, + 378 + ], + "spans": [ + { + "bbox": [ + 46, + 247, + 287, + 378 + ], + "type": "text", + "content": "The overview of the Diff-DKP algorithm is presented in Algorithm 3. Let " + }, + { + "bbox": [ + 46, + 247, + 287, + 378 + ], + "type": "inline_equation", + "content": "t = T, T - 1, \\ldots, 1" + }, + { + "bbox": [ + 46, + 247, + 287, + 378 + ], + "type": "text", + "content": " denote the index of the diffusion reverse step. At each step, the diffusion model first estimates the " + }, + { + "bbox": [ + 46, + 247, + 287, + 378 + ], + "type": "inline_equation", + "content": "x_{0|t}" + }, + { + "bbox": [ + 46, + 247, + 287, + 378 + ], + "type": "text", + "content": ". Then, the DKP model adaptively generates kernel prior with respect to the latest " + }, + { + "bbox": [ + 46, + 247, + 287, + 378 + ], + "type": "inline_equation", + "content": "x_{0|t}" + }, + { + "bbox": [ + 46, + 247, + 287, + 378 + ], + "type": "text", + "content": ", while " + }, + { + "bbox": [ + 46, + 247, + 287, + 378 + ], + "type": "inline_equation", + "content": "x_{0|t}" + }, + { + "bbox": [ + 46, + 247, + 287, + 378 + ], + "type": "text", + "content": " is further updated with respect to the data consistency Eq. (24), thus, ensuring the inference process is underlying the BSR objective. It is noteworthy to point out that the parameters of the diffusion model are fixed and only the parameters of lightweight kernel estimator network are optimized in the inference process." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 379, + 287, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 379, + 287, + 475 + ], + "spans": [ + { + "bbox": [ + 46, + 379, + 287, + 475 + ], + "type": "text", + "content": "In this way, the off-the-shelf diffusion model plays the role of HR image estimator, while the estimated HR image is further refined by the BSR task specific prior knowledge, referring to Eq. (23). Different from those methods that incorporate prior knowledge of BSR task via supervised re-training/fine-tuning, Diff-DKP behaves a plug-and-play scheme, thus without data demands and training cost before implementation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 483, + 128, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 483, + 128, + 498 + ], + "spans": [ + { + "bbox": [ + 47, + 483, + 128, + 498 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 503, + 164, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 503, + 164, + 517 + ], + "spans": [ + { + "bbox": [ + 47, + 503, + 164, + 517 + ], + "type": "text", + "content": "5.1. Experimental Setup" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 521, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 521, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 521, + 287, + 713 + ], + "type": "text", + "content": "Data Preparation. Following the widely adopted kernel assumption [25, 35, 45, 51], we conduct the experiments on anisotropic Gaussian kernels and motion kernels, which are shown in Fig. 4. The kernel sizes are set to " + }, + { + "bbox": [ + 46, + 521, + 287, + 713 + ], + "type": "inline_equation", + "content": "(4s + 3)\\times (4s + 3)" + }, + { + "bbox": [ + 46, + 521, + 287, + 713 + ], + "type": "text", + "content": ". For the Gaussian kernel, the width ranges are set to " + }, + { + "bbox": [ + 46, + 521, + 287, + 713 + ], + "type": "inline_equation", + "content": "[0.175s, 2.5s]" + }, + { + "bbox": [ + 46, + 521, + 287, + 713 + ], + "type": "text", + "content": ", and the rotation angle range is set to " + }, + { + "bbox": [ + 46, + 521, + 287, + 713 + ], + "type": "inline_equation", + "content": "[0,\\pi]" + }, + { + "bbox": [ + 46, + 521, + 287, + 713 + ], + "type": "text", + "content": ", with a scale factor " + }, + { + "bbox": [ + 46, + 521, + 287, + 713 + ], + "type": "inline_equation", + "content": "s = 4" + }, + { + "bbox": [ + 46, + 521, + 287, + 713 + ], + "type": "text", + "content": ", respectively. For the motion kernel, we adopt random motion kernel generation method proposed by [22], which simulates realistic and complex blur kernels from random trajectories. Detailed formulations of Gaussian and motion kernels are given in the supplementary material. We synthesize LR images with random kernels with respect to Eq. (1) for testing data based on five popular public benchmark datasets, including Set5 [4], Set14 [52], BSD100 [30], Urban100 [15] and RealSRSet [23]. We compare these kernels in terms of the peak" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "type": "text", + "content": "signal to noise ratio (PSNR), and compare HR images in terms of PSNR and structural similarity (SSIM) [47]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 97, + 546, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 97, + 546, + 289 + ], + "spans": [ + { + "bbox": [ + 304, + 97, + 546, + 289 + ], + "type": "text", + "content": "Comparison Methods. The proposed DIP-DKP and Diff-DKP are compared with existing baselines including: Double-DIP [34], DIP-FKP [25], DASR [45], BSRDM [51], DCLS [29], DARM [58] and DiffBSR [26]. Specifically, Double-DIP tends to provide kernel priors by training a FCN network only with respect to the LR image restoration error. DIP-FKP incorporates the FKP model as kernel prior which is pre-trained on kernel datasets. KernelGAN+ZSSR and DARM are self-supervised and train an intergenerative adversarial network (GAN) to estimate the blur kernel. BSRDM formulates an elaborate degradation modelling on noise and kernel as handcrafted priors. DASR is a representative end-to-end method that is pre-trained on DIV2K [1] and Flickr2K [43] HR image datasets. DiffBSR is fine-tuned on BSR labeled datasets before applied to estimate HR images." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 289, + 547, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 289, + 547, + 422 + ], + "spans": [ + { + "bbox": [ + 304, + 289, + 547, + 422 + ], + "type": "text", + "content": "Implementation and Hyper-parameters. The adopted kernel estimation network " + }, + { + "bbox": [ + 304, + 289, + 547, + 422 + ], + "type": "inline_equation", + "content": "\\mathbf{G}_k" + }, + { + "bbox": [ + 304, + 289, + 547, + 422 + ], + "type": "text", + "content": " of PKE module in this paper is a three-layer fully-connected network (FCN). The adopted DIP model follows the original settings in [44], and the diffusion model is the vanilla version [14] that is trained on ImageNet [6]. The number of sampling times in the MCMC simulation " + }, + { + "bbox": [ + 304, + 289, + 547, + 422 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 289, + 547, + 422 + ], + "type": "text", + "content": " is the only hyper-parameter in the proposed DKP model. The hyper-parameter tuning results are given in Table 1. It is explicit that the performance reaches equilibrium around " + }, + { + "bbox": [ + 304, + 289, + 547, + 422 + ], + "type": "inline_equation", + "content": "L \\in [4,8]" + }, + { + "bbox": [ + 304, + 289, + 547, + 422 + ], + "type": "text", + "content": ". To balance the efficiency and effectiveness, we set " + }, + { + "bbox": [ + 304, + 289, + 547, + 422 + ], + "type": "inline_equation", + "content": "L = 5" + }, + { + "bbox": [ + 304, + 289, + 547, + 422 + ], + "type": "text", + "content": " in this paper." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 432, + 492, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 432, + 492, + 445 + ], + "spans": [ + { + "bbox": [ + 306, + 432, + 492, + 445 + ], + "type": "text", + "content": "5.2. Comparison with State-of-the-Arts" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 451, + 545, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 451, + 545, + 630 + ], + "spans": [ + { + "bbox": [ + 304, + 451, + 545, + 630 + ], + "type": "text", + "content": "Evaluation on Gaussian Kernel Scenario. Quantitative evaluation results on four datasets with scale factors " + }, + { + "bbox": [ + 304, + 451, + 545, + 630 + ], + "type": "inline_equation", + "content": "s = 4" + }, + { + "bbox": [ + 304, + 451, + 545, + 630 + ], + "type": "text", + "content": " are presented in the upper half part of Table 2. We can see that the proposed DIP-DKP and Diff-DKP achieve the second and the best results on all datasets. We note that DIP-DKP only realizes slightly higher performance than the existing state-of-the-art (SotA) methods, while Diff-DKP achieves significantly better performances. This recalls our demonstrations in Section 4: DIP-DKP is totally trained while solving from scratch, and the DKP model plays the role of providing better convergence guarantee. Diff-DKP utilizes the DKP model to guide the well-trained diffusion model with fruitful data priors to converge to BSR task for better HR image restoration performances. In Table 3, we further show that our DKP model achieves the accurate ker-" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 315, + 642, + 425, + 705 + ], + "blocks": [ + { + "bbox": [ + 315, + 642, + 425, + 705 + ], + "lines": [ + { + "bbox": [ + 315, + 642, + 425, + 705 + ], + "spans": [ + { + "bbox": [ + 315, + 642, + 425, + 705 + ], + "type": "image", + "image_path": "e17616ecb84820f3c50d57a76c2eab77ff14b9093bb41a43ffdba1ebef265c94.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 325, + 708, + 526, + 718 + ], + "lines": [ + { + "bbox": [ + 325, + 708, + 526, + 718 + ], + "spans": [ + { + "bbox": [ + 325, + 708, + 526, + 718 + ], + "type": "text", + "content": "Figure 4. The visualization of the adopted blur kernels." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 430, + 643, + 536, + 704 + ], + "blocks": [ + { + "bbox": [ + 430, + 643, + 536, + 704 + ], + "lines": [ + { + "bbox": [ + 430, + 643, + 536, + 704 + ], + "spans": [ + { + "bbox": [ + 430, + 643, + 536, + 704 + ], + "type": "image", + "image_path": "734751729182f53bed5f8588502ac7611eefe08dd77ef41b8154c2eaf7769b59.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "26051" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 118, + 87, + 476, + 118 + ], + "blocks": [ + { + "bbox": [ + 61, + 70, + 531, + 82 + ], + "lines": [ + { + "bbox": [ + 61, + 70, + 531, + 82 + ], + "spans": [ + { + "bbox": [ + 61, + 70, + 531, + 82 + ], + "type": "text", + "content": "Table 1. Average image PSNR performance of the proposed DIP-DKP and Diff-DKP on Set5 [4] on the Gaussian kernel scenario." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 118, + 87, + 476, + 118 + ], + "lines": [ + { + "bbox": [ + 118, + 87, + 476, + 118 + ], + "spans": [ + { + "bbox": [ + 118, + 87, + 476, + 118 + ], + "type": "table", + "html": "
MethodsL=0L=2L=4L=6L=8L=10L=15
DIP-DKP (Ours)20.9927.1228.4428.5728.5228.2928.03
Diff-DKP (Ours)21.9728.9529.4029.4729.7629.6729.26
", + "image_path": "2b850c0b06e72f0a6709d14b613e416cc47b5809173470775cd7b2293b4ed8e9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 89, + 152, + 504, + 304 + ], + "blocks": [ + { + "bbox": [ + 46, + 125, + 544, + 148 + ], + "lines": [ + { + "bbox": [ + 46, + 125, + 544, + 148 + ], + "spans": [ + { + "bbox": [ + 46, + 125, + 544, + 148 + ], + "type": "text", + "content": "Table 2. Average PSNR/SSIM of different methods on public datasets that are synthesized by the random Gaussian/Motion kernels with " + }, + { + "bbox": [ + 46, + 125, + 544, + 148 + ], + "type": "inline_equation", + "content": "s = 4" + }, + { + "bbox": [ + 46, + 125, + 544, + 148 + ], + "type": "text", + "content": ". The best and second best results are highlighted in red and blue colors, respectively." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 89, + 152, + 504, + 304 + ], + "lines": [ + { + "bbox": [ + 89, + 152, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 89, + 152, + 504, + 304 + ], + "type": "table", + "html": "
MethodKernelSet5 [4]Set14 [52]BSD100 [30]Urban100 [15]
Double-DIP [34]20.99/0.557818.31/0.442618.57/0.381518.15/0.4491
DASR [45]27.37/0.785925.43/0.659125.11/0.612922.88/0.6448
DIP-FKP [25]27.77/0.791425.65/0.676425.15/0.635422.89/0.6327
BSRDM [51]Gaussian kernel scenario27.81/0.802925.35/0.685925.61/0.652622.36/0.6601
DCLS [29]27.50/0.794825.68/0.663925.34/0.616922.92/0.6475
DiffBIR [26]25.15/0.646823.01/0.593523.88/0.558621.94/0.5657
DARM [58]26.25/0.681824.19/0.618724.29/0.589822.14/0.5967
DIP-DKP (Ours)28.03/0.803925.98/0.687825.66/0.653123.24/0.6644
Diff-DKP (Ours)29.44/0.859226.76/0.740026.63/0.705723.92/0.6875
Double-DIP [34]18.92/0.451020.41/0.484719.00/0.375715.42/0.2932
DASR [45]24.21/0.725224.16/0.614522.47/0.583620.24/0.5478
DIP-FKP [25]24.61/0.737124.21/0.622722.80/0.588020.33/0.5572
BSRDM [51]Motion kernel scenario24.01/0.709823.56/0.600922.62/0.579120.40/0.5494
DCLS [29]24.78/0.732324.38/0.621122.74/0.592220.49/0.5534
DiffBIR [26]23.63/0.636723.59/0.604322.35/0.578420.14/0.5347
DARM [58]24.23/0.726923.95/0.629422.48/0.583020.58/0.5595
DIP-DKP (Ours)25.30/0.741724.52/0.643423.02/0.613621.24/0.5667
Diff-DKP (Ours)28.74/0.831326.03/0.671924.10/0.628722.26/0.5862
", + "image_path": "e3bdeafbf2e62557ddf775f0bd155afbfa176997d3b2364d66433fbd9b07d19e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 317, + 212, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 317, + 212, + 329 + ], + "spans": [ + { + "bbox": [ + 47, + 317, + 212, + 329 + ], + "type": "text", + "content": "nel estimation with higher kernel PSNR." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 342, + 288, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 288, + 510 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 288, + 510 + ], + "type": "text", + "content": "Evaluation on Motion Kernel Scenario. The lower half part of Table 2 shows the simulation results on the motion kernel scenario. The supervised learning methods, i.e., DASR and DiffBIR, are re-trained/fine-tuned on motion kernel degraded HR image datasets. DIP-FKP is retrained on the motion kernel dataset. The proposed DIP-DKP and Diff-DKP show significantly better performance on the motion kernel scenario, which validates that the proposed DKP model has good generalization ability towards different kernel categories. Specifically, Diff-DKP presents stable PSNR/SSIM scores when being applied to estimate motion kernels, while the rest suffer significant performance drop. This indicates that the proposed DKP is expected to handle kernel varying tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 521, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 521, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 521, + 287, + 715 + ], + "type": "text", + "content": "Visual Results. The visual results of different methods on synthetic and real-world images are shown in Fig. 5. We can see that 1) In the case of Gaussian kernel, all methods are capable of producing satisfactory deblurring results, while our DIP-DKP and Diff-DKP yield better results with more accurate kernel estimation. 2) In the case of motion kernel, certain distortion on the estimated kernel can be seen in FKP-DKP and BSRDM fail to estimate motion kernel. Meanwhile, our DIP-DKP and Diff-DKP achieve approximately accurate motion kernel estimation. 3) In the case of real image, both DIP-FKP and BSRDM estimate the Gaussian-like kernels, whereas our DIP-DKP and Diff-DKP tend to estimate the non-Gaussian kernels. This verifies that an adaptive and flexible kernel estimation discipline is learned by our DKP model, which may fit the real-world applications better." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 312, + 352, + 541, + 483 + ], + "blocks": [ + { + "bbox": [ + 305, + 316, + 545, + 350 + ], + "lines": [ + { + "bbox": [ + 305, + 316, + 545, + 350 + ], + "spans": [ + { + "bbox": [ + 305, + 316, + 545, + 350 + ], + "type": "text", + "content": "Table 3. Average PSNR/SSIM of images and PSNR of kernels on Set14 [52] with " + }, + { + "bbox": [ + 305, + 316, + 545, + 350 + ], + "type": "inline_equation", + "content": "s = 4" + }, + { + "bbox": [ + 305, + 316, + 545, + 350 + ], + "type": "text", + "content": ". The best and second best results are highlighted in red and blue colors, respectively." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 312, + 352, + 541, + 483 + ], + "lines": [ + { + "bbox": [ + 312, + 352, + 541, + 483 + ], + "spans": [ + { + "bbox": [ + 312, + 352, + 541, + 483 + ], + "type": "table", + "html": "
MethodKernelKernel PSNRImage PSNR/SSIM
DIP-DKP without RKS37.9218.77/0.4227
Diff-DKP without RKS40.9317.33/0.3408
Double-DIP [34]Gaussian50.6218.31/0.4426
DIP-FKP [25]kernel54.4625.65/0.6764
BSRDM [51]scenario55.3825.35/0.6859
DIP-DKP (Ours)56.2025.98/0.6878
Diff-DKP (Ours)56.7626.76/0.7400
DIP-DKP without RKS34.9218.19/0.4223
Diff-DKP without RKS34.7817.65/0.3513
Double-DIP [34]Motion35.5220.41/0.4847
DIP-FKP [25]kernel37.5224.21/0.6227
BSRDM [51]scenario37.8823.56/0.6009
DIP-DKP (Ours)39.3324.52/0.6434
Diff-DKP (Ours)40.3726.03/0.6719
", + "image_path": "2a0d53fdf8594459ed1cf2f8db9041551becbd578049f0131e8903190deedb58.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 501, + 406, + 512 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 501, + 406, + 512 + ], + "spans": [ + { + "bbox": [ + 306, + 501, + 406, + 512 + ], + "type": "text", + "content": "5.3. Ablation Studies" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": "Ablation study of RKS module. The ablation studies are carried on the MCMC sampling of kernel priors. \"Without RKS\" denotes that the adopted DKP updates the kernel network only by the data-consistency term without the learned kernel prior. In Fig. 6 (left), it can be seen that the estimated kernels without RKS have significant distortion, leading to remarkable PSNR drop of the estimated HR image, while DIP-DKP can estimate Gaussian kernels precisely with respect to the ground truth (with red frame). Fig. 6 (right) shows that the accurate motion kernel estimation no longer exists when the RKS module is absent. It is thus obvious that without the kernel prior learned from the MCMC process, the Diff-DKP fails to converge to a rational motion kernel estimation. The average kernel and image results are shown in Table 3. Without kernel prior learned from the RKS module, the kernel estimation performances of DKP-" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "26052" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 64, + 138, + 126 + ], + "blocks": [ + { + "bbox": [ + 76, + 64, + 138, + 126 + ], + "lines": [ + { + "bbox": [ + 76, + 64, + 138, + 126 + ], + "spans": [ + { + "bbox": [ + 76, + 64, + 138, + 126 + ], + "type": "image", + "image_path": "51381e48c7b82f12ffa64ca0f6ec5f1bf5b357e0570436240c9e19b7bea6feb5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 139, + 64, + 201, + 126 + ], + "blocks": [ + { + "bbox": [ + 139, + 64, + 201, + 126 + ], + "lines": [ + { + "bbox": [ + 139, + 64, + 201, + 126 + ], + "spans": [ + { + "bbox": [ + 139, + 64, + 201, + 126 + ], + "type": "image", + "image_path": "6fe0a4fbb7052d95cf117db8fc3a770d279cfd562b59d9b4159bfa939140f05a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 203, + 64, + 264, + 126 + ], + "blocks": [ + { + "bbox": [ + 203, + 64, + 264, + 126 + ], + "lines": [ + { + "bbox": [ + 203, + 64, + 264, + 126 + ], + "spans": [ + { + "bbox": [ + 203, + 64, + 264, + 126 + ], + "type": "image", + "image_path": "93da8cdbaedea7e10231537944da49d9cba747400f7f03f6564760528175ae3a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 266, + 64, + 328, + 126 + ], + "blocks": [ + { + "bbox": [ + 266, + 64, + 328, + 126 + ], + "lines": [ + { + "bbox": [ + 266, + 64, + 328, + 126 + ], + "spans": [ + { + "bbox": [ + 266, + 64, + 328, + 126 + ], + "type": "image", + "image_path": "fd453a0940f59143122da358bb162cc8e87bd01ce860b1da7493f6950ee3f68c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 329, + 64, + 391, + 126 + ], + "blocks": [ + { + "bbox": [ + 329, + 64, + 391, + 126 + ], + "lines": [ + { + "bbox": [ + 329, + 64, + 391, + 126 + ], + "spans": [ + { + "bbox": [ + 329, + 64, + 391, + 126 + ], + "type": "image", + "image_path": "f5d8382d84cccc79b62efcfb64ee7224e2388c883d7311572724fcab0e44af20.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 392, + 64, + 454, + 126 + ], + "blocks": [ + { + "bbox": [ + 392, + 64, + 454, + 126 + ], + "lines": [ + { + "bbox": [ + 392, + 64, + 454, + 126 + ], + "spans": [ + { + "bbox": [ + 392, + 64, + 454, + 126 + ], + "type": "image", + "image_path": "1f5ba65f5734f6709d5b9df45412ddb776b1b2ecf2168c4de1a0ac93e89b3e17.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 455, + 64, + 517, + 126 + ], + "blocks": [ + { + "bbox": [ + 455, + 64, + 517, + 126 + ], + "lines": [ + { + "bbox": [ + 455, + 64, + 517, + 126 + ], + "spans": [ + { + "bbox": [ + 455, + 64, + 517, + 126 + ], + "type": "image", + "image_path": "09a252daf9dc4b8900e2b9de6087a6ff342b922738b361011620a5119fc20b8e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 77, + 128, + 138, + 189 + ], + "blocks": [ + { + "bbox": [ + 77, + 128, + 138, + 189 + ], + "lines": [ + { + "bbox": [ + 77, + 128, + 138, + 189 + ], + "spans": [ + { + "bbox": [ + 77, + 128, + 138, + 189 + ], + "type": "image", + "image_path": "f3a3cd032b95adefc909fee52f059087439b88452adc7657da88e042eebd6738.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 139, + 128, + 201, + 189 + ], + "blocks": [ + { + "bbox": [ + 139, + 128, + 201, + 189 + ], + "lines": [ + { + "bbox": [ + 139, + 128, + 201, + 189 + ], + "spans": [ + { + "bbox": [ + 139, + 128, + 201, + 189 + ], + "type": "image", + "image_path": "ce3551ad7534d0ccc01bcc62ffc84f5e89bc1a14126b035e0575fd7c791ca54b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 203, + 128, + 264, + 189 + ], + "blocks": [ + { + "bbox": [ + 203, + 128, + 264, + 189 + ], + "lines": [ + { + "bbox": [ + 203, + 128, + 264, + 189 + ], + "spans": [ + { + "bbox": [ + 203, + 128, + 264, + 189 + ], + "type": "image", + "image_path": "52e589b8f5a7a0c6603398b67349f69cc93b1448ecadb586b5eee1d8cc1a16e2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 266, + 128, + 328, + 189 + ], + "blocks": [ + { + "bbox": [ + 266, + 128, + 328, + 189 + ], + "lines": [ + { + "bbox": [ + 266, + 128, + 328, + 189 + ], + "spans": [ + { + "bbox": [ + 266, + 128, + 328, + 189 + ], + "type": "image", + "image_path": "30812206585b82b02566959cc728f1b819eb7e8da33302746a414dc44b8463f3.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 329, + 128, + 391, + 189 + ], + "blocks": [ + { + "bbox": [ + 329, + 128, + 391, + 189 + ], + "lines": [ + { + "bbox": [ + 329, + 128, + 391, + 189 + ], + "spans": [ + { + "bbox": [ + 329, + 128, + 391, + 189 + ], + "type": "image", + "image_path": "96145f05ad97ff295b0d4ff1f4c08dcc6a2c9ea864dfe2e7a6bb2dfec5c0aaac.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 392, + 128, + 454, + 189 + ], + "blocks": [ + { + "bbox": [ + 392, + 128, + 454, + 189 + ], + "lines": [ + { + "bbox": [ + 392, + 128, + 454, + 189 + ], + "spans": [ + { + "bbox": [ + 392, + 128, + 454, + 189 + ], + "type": "image", + "image_path": "e9e107ff03f3c40c994c36439711e4d805a8958f16c19221524f8b55e6956fb6.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 455, + 128, + 517, + 189 + ], + "blocks": [ + { + "bbox": [ + 455, + 128, + 517, + 189 + ], + "lines": [ + { + "bbox": [ + 455, + 128, + 517, + 189 + ], + "spans": [ + { + "bbox": [ + 455, + 128, + 517, + 189 + ], + "type": "image", + "image_path": "29d07cf3a2031de882d1c1f0fcc0dae3b2ca02b5858e19a44cb2d9d95d15df70.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 77, + 191, + 138, + 252 + ], + "blocks": [ + { + "bbox": [ + 77, + 191, + 138, + 252 + ], + "lines": [ + { + "bbox": [ + 77, + 191, + 138, + 252 + ], + "spans": [ + { + "bbox": [ + 77, + 191, + 138, + 252 + ], + "type": "image", + "image_path": "197ced1e62b01a2b9e9887f25a208d29d992ee2b50a7d3912a9f3005e5eb4a76.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 95, + 255, + 119, + 263 + ], + "lines": [ + { + "bbox": [ + 95, + 255, + 119, + 263 + ], + "spans": [ + { + "bbox": [ + 95, + 255, + 119, + 263 + ], + "type": "inline_equation", + "content": "\\mathrm{LR}(\\times 4)" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 139, + 191, + 201, + 252 + ], + "blocks": [ + { + "bbox": [ + 139, + 191, + 201, + 252 + ], + "lines": [ + { + "bbox": [ + 139, + 191, + 201, + 252 + ], + "spans": [ + { + "bbox": [ + 139, + 191, + 201, + 252 + ], + "type": "image", + "image_path": "ec6891048cb6675c0de44197b91d426539a2615e80fea325230bcc13a897df73.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 160, + 255, + 181, + 262 + ], + "lines": [ + { + "bbox": [ + 160, + 255, + 181, + 262 + ], + "spans": [ + { + "bbox": [ + 160, + 255, + 181, + 262 + ], + "type": "text", + "content": "DASR" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 149, + 266, + 192, + 274 + ], + "lines": [ + { + "bbox": [ + 149, + 266, + 192, + 274 + ], + "spans": [ + { + "bbox": [ + 149, + 266, + 192, + 274 + ], + "type": "text", + "content": "27.42/27.74/-" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 203, + 191, + 264, + 252 + ], + "blocks": [ + { + "bbox": [ + 203, + 191, + 264, + 252 + ], + "lines": [ + { + "bbox": [ + 203, + 191, + 264, + 252 + ], + "spans": [ + { + "bbox": [ + 203, + 191, + 264, + 252 + ], + "type": "image", + "image_path": "f04ff3c280e1a78fba329d678d5e36337c5003aadb5313bd385cefca4583cdc2.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 222, + 255, + 246, + 262 + ], + "lines": [ + { + "bbox": [ + 222, + 255, + 246, + 262 + ], + "spans": [ + { + "bbox": [ + 222, + 255, + 246, + 262 + ], + "type": "text", + "content": "DRAM" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 212, + 266, + 255, + 274 + ], + "lines": [ + { + "bbox": [ + 212, + 266, + 255, + 274 + ], + "spans": [ + { + "bbox": [ + 212, + 266, + 255, + 274 + ], + "type": "text", + "content": "26.69/26.84/-" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 266, + 191, + 328, + 252 + ], + "blocks": [ + { + "bbox": [ + 266, + 191, + 328, + 252 + ], + "lines": [ + { + "bbox": [ + 266, + 191, + 328, + 252 + ], + "spans": [ + { + "bbox": [ + 266, + 191, + 328, + 252 + ], + "type": "image", + "image_path": "d2d71d944caebd8064bbe29f6c79c2982956524cf98e277970c2a3f431a21a18.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 255, + 312, + 262 + ], + "lines": [ + { + "bbox": [ + 282, + 255, + 312, + 262 + ], + "spans": [ + { + "bbox": [ + 282, + 255, + 312, + 262 + ], + "type": "text", + "content": "FKP-DIP" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 275, + 266, + 318, + 274 + ], + "lines": [ + { + "bbox": [ + 275, + 266, + 318, + 274 + ], + "spans": [ + { + "bbox": [ + 275, + 266, + 318, + 274 + ], + "type": "text", + "content": "27.80/28.95/-" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 329, + 191, + 391, + 252 + ], + "blocks": [ + { + "bbox": [ + 329, + 191, + 391, + 252 + ], + "lines": [ + { + "bbox": [ + 329, + 191, + 391, + 252 + ], + "spans": [ + { + "bbox": [ + 329, + 191, + 391, + 252 + ], + "type": "image", + "image_path": "4a56c861d1cc9720ff0203c8caa3358c9f0550d2f626e41d21ffe6d577efbc19.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 346, + 255, + 373, + 262 + ], + "lines": [ + { + "bbox": [ + 346, + 255, + 373, + 262 + ], + "spans": [ + { + "bbox": [ + 346, + 255, + 373, + 262 + ], + "type": "text", + "content": "BSRDM" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 338, + 266, + 381, + 274 + ], + "lines": [ + { + "bbox": [ + 338, + 266, + 381, + 274 + ], + "spans": [ + { + "bbox": [ + 338, + 266, + 381, + 274 + ], + "type": "text", + "content": "27.60/29.14/-" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 381, + 255, + 448, + 262 + ], + "lines": [ + { + "bbox": [ + 381, + 255, + 448, + 262 + ], + "spans": [ + { + "bbox": [ + 381, + 255, + 448, + 262 + ], + "type": "text", + "content": "DIP-DKP(ours)" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 392, + 191, + 454, + 252 + ], + "blocks": [ + { + "bbox": [ + 392, + 191, + 454, + 252 + ], + "lines": [ + { + "bbox": [ + 392, + 191, + 454, + 252 + ], + "spans": [ + { + "bbox": [ + 392, + 191, + 454, + 252 + ], + "type": "image", + "image_path": "02d0f327e96b0d90d885714b522364b12286274cb2ed518bcb489ed14a7a5a86.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 401, + 266, + 445, + 274 + ], + "lines": [ + { + "bbox": [ + 401, + 266, + 445, + 274 + ], + "spans": [ + { + "bbox": [ + 401, + 266, + 445, + 274 + ], + "type": "text", + "content": "28.07/29.45/-" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 455, + 191, + 517, + 252 + ], + "blocks": [ + { + "bbox": [ + 455, + 191, + 517, + 252 + ], + "lines": [ + { + "bbox": [ + 455, + 191, + 517, + 252 + ], + "spans": [ + { + "bbox": [ + 455, + 191, + 517, + 252 + ], + "type": "image", + "image_path": "5c6a086412b71b626a213b9aee06a0c66f89a5c22ea99350d6310c5fa404b291.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 461, + 255, + 512, + 262 + ], + "lines": [ + { + "bbox": [ + 461, + 255, + 512, + 262 + ], + "spans": [ + { + "bbox": [ + 461, + 255, + 512, + 262 + ], + "type": "text", + "content": "Diff-DKP(ours)" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 465, + 266, + 507, + 274 + ], + "lines": [ + { + "bbox": [ + 465, + 266, + 507, + 274 + ], + "spans": [ + { + "bbox": [ + 465, + 266, + 507, + 274 + ], + "type": "text", + "content": "29.38/29.89/-" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 48, + 297, + 167, + 392 + ], + "blocks": [ + { + "bbox": [ + 89, + 266, + 126, + 274 + ], + "lines": [ + { + "bbox": [ + 89, + 266, + 126, + 274 + ], + "spans": [ + { + "bbox": [ + 89, + 266, + 126, + 274 + ], + "type": "text", + "content": "PSNR (dB)" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 48, + 297, + 167, + 392 + ], + "lines": [ + { + "bbox": [ + 48, + 297, + 167, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 297, + 167, + 392 + ], + "type": "image", + "image_path": "4e52cfe17c2e44f9daf2a30e95937eeefb07f88e212e3a0ed62a09e8263c4eae.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 78, + 392, + 146, + 402 + ], + "lines": [ + { + "bbox": [ + 78, + 392, + 146, + 402 + ], + "spans": [ + { + "bbox": [ + 78, + 392, + 146, + 402 + ], + "type": "text", + "content": "(a) BSD100 \"040\"" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 47, + 403, + 286, + 425 + ], + "lines": [ + { + "bbox": [ + 47, + 403, + 286, + 425 + ], + "spans": [ + { + "bbox": [ + 47, + 403, + 286, + 425 + ], + "type": "text", + "content": "Figure 6. The intermediate results of DIP-DKP, Diff-DKP and their no RKS module versions over iterations on two test images." + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 167, + 297, + 286, + 392 + ], + "blocks": [ + { + "bbox": [ + 47, + 277, + 545, + 289 + ], + "lines": [ + { + "bbox": [ + 47, + 277, + 545, + 289 + ], + "spans": [ + { + "bbox": [ + 47, + 277, + 545, + 289 + ], + "type": "text", + "content": "Figure 5. Visual results of different methods on public datasets for scale factor 4. Estimated/ground-truth kernels are shown on the top left." + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 167, + 297, + 286, + 392 + ], + "lines": [ + { + "bbox": [ + 167, + 297, + 286, + 392 + ], + "spans": [ + { + "bbox": [ + 167, + 297, + 286, + 392 + ], + "type": "image", + "image_path": "df228a524780145f17249603a2e92cd2ed8548707b36487d01ad17ba7aaf7fa7.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 196, + 392, + 269, + 401 + ], + "lines": [ + { + "bbox": [ + 196, + 392, + 269, + 401 + ], + "spans": [ + { + "bbox": [ + 196, + 392, + 269, + 401 + ], + "type": "text", + "content": "(b) Set14 \"monarch\"" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_caption" + } + ], + "index": 38 + }, + { + "type": "table", + "bbox": [ + 51, + 446, + 282, + 487 + ], + "blocks": [ + { + "bbox": [ + 52, + 432, + 281, + 443 + ], + "lines": [ + { + "bbox": [ + 52, + 432, + 281, + 443 + ], + "spans": [ + { + "bbox": [ + 52, + 432, + 281, + 443 + ], + "type": "text", + "content": "Table 4. The ablation of PKE module. (Set5, x4, image PSNR)" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 51, + 446, + 282, + 487 + ], + "lines": [ + { + "bbox": [ + 51, + 446, + 282, + 487 + ], + "spans": [ + { + "bbox": [ + 51, + 446, + 282, + 487 + ], + "type": "table", + "html": "
Layers\\Units10100100010000
113.7523.5728.9328.24
313.6128.9728.4828.35
513.3028.8128.5226.65
713.8628.3028.5427.93
", + "image_path": "d3a20a9b0e3a30cbceb0ce64d3be2524a080deeb4df58cdd7b8843025d10532b.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "table_body" + } + ], + "index": 42 + }, + { + "bbox": [ + 46, + 495, + 286, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 495, + 286, + 518 + ], + "spans": [ + { + "bbox": [ + 46, + 495, + 286, + 518 + ], + "type": "text", + "content": "based BSR methods have a significant performance drop, leading to poor image restoration performance as well." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 46, + 519, + 287, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 519, + 287, + 626 + ], + "spans": [ + { + "bbox": [ + 46, + 519, + 287, + 626 + ], + "type": "text", + "content": "Ablation study of PKE module. Since PKE essentially estimates blur kernels on the basis of the random kernel priors and LR observations, thus it is indispensable and we conduct ablation study on the different structures of kernel network in PKE module in Table 4. We find that the kernel network performs well when it has 3-7 layers with 100-1000 units in each layer. This indicates that the kernel network has good generalization-ability for the structure without the necessity of elaborately designing the network." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 47, + 635, + 261, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 635, + 261, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 635, + 261, + 647 + ], + "type": "text", + "content": "5.4. Model Size, Runtime and Memory Usage" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "text", + "content": "The kernel network of our DKP model has a total of " + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "inline_equation", + "content": "562K" + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "text", + "content": " parameters (FLOPs: " + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "inline_equation", + "content": "536K" + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "text", + "content": ") while Double-DIP and DIP-FKP have " + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "inline_equation", + "content": "641K" + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "text", + "content": " parameters (FLOPs: " + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "inline_equation", + "content": "600K" + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "text", + "content": ") and " + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "inline_equation", + "content": "143K" + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "text", + "content": " parameters (FLOPs: " + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "inline_equation", + "content": "178K" + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "text", + "content": "), respectively. The runtime and memory usage of our DIP-DKP on a GeForce RTX 3090" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 304, + 300, + 545, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 300, + 545, + 408 + ], + "spans": [ + { + "bbox": [ + 304, + 300, + 545, + 408 + ], + "type": "text", + "content": "GPU for generating a HR image of size " + }, + { + "bbox": [ + 304, + 300, + 545, + 408 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 304, + 300, + 545, + 408 + ], + "type": "text", + "content": " are about 92 seconds and 11GB memory, which is comparable with the Double-DIP (91 seconds and 11.2GB) and DIP-FKP (90 seconds and 10.6GB). As for Diff-DKP, the " + }, + { + "bbox": [ + 304, + 300, + 545, + 408 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 304, + 300, + 545, + 408 + ], + "type": "text", + "content": " image needs to be divided into four " + }, + { + "bbox": [ + 304, + 300, + 545, + 408 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 304, + 300, + 545, + 408 + ], + "type": "text", + "content": " images for restoration, which costs a total of 60 seconds and 4GB memory. Considering that our DIP-DKP and Diff-DKP are unsupervised and plug-and-play, it is reasonable to say that our methods have moderate computational costs." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 305, + 411, + 545, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 411, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 305, + 411, + 545, + 434 + ], + "type": "text", + "content": "Due to the page limitations, more experimental results are given in the supplementary material." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 306, + 450, + 378, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 450, + 378, + 462 + ], + "spans": [ + { + "bbox": [ + 306, + 450, + 378, + 462 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 304, + 472, + 545, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 472, + 545, + 702 + ], + "spans": [ + { + "bbox": [ + 304, + 472, + 545, + 702 + ], + "type": "text", + "content": "In this paper, we propose a dynamic kernel prior (DKP) model to solve the BSR problem in an unsupervised and pre-training-free paradigm. DKP realizes the rational kernel prior learning from MCMC sampling on random kernel distributions, providing accurate kernel estimation and thus leading to better HR image restoration. DKP can be easily incorporated with existing image restoration model, such as DIP and diffusion model, by replacing their kernel modeling modules or adding as an external kernel prior generator. When applied to solve the BSR problem, DKP is trained while solving the task with respect to the LR image restoration error, enabling no training necessity and labeled data demands. Extensive experiments on Gaussian and motion kernel scenarios with synthetic LR images and real-world images validate that DKP-based methods improve the kernel estimation accuracy significantly and thus lead to superior BSR results. We believe that the concept of using a trainable sampling process to provide adaptive priors will lead to a new direction in solving low-level tasks, aiming to achieve superior performance with modest computational costs in the way of unsupervised inference." + } + ] + } + ], + "index": 50 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "26053" + } + ] + } + ], + "index": 51 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 126-135, 2017. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 287, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 287, + 169 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 287, + 169 + ], + "type": "text", + "content": "[2] Dominique Bakry and Michel Émery. Diffusions hypercontractives. In Séminaire de Probabilités XIX 1983/84: Proceedings, pages 177-206. Springer, 2006. 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 171, + 287, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 171, + 287, + 214 + ], + "spans": [ + { + "bbox": [ + 53, + 171, + 287, + 214 + ], + "type": "text", + "content": "[3] Sefi Bell-Kligler, Assaf Shocher, and Michal Irani. Blind super-resolution kernel estimation using an internal-gan. Advances in Neural Information Processing Systems, 32, 2019. 1, 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 217, + 287, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 217, + 287, + 270 + ], + "spans": [ + { + "bbox": [ + 53, + 217, + 287, + 270 + ], + "type": "text", + "content": "[4] Marco Bevilacqua, Aline Roumy, Christine Guillemot, and Marie Line Alberi-Morel. Low-complexity single-image super-resolution based on nonnegative neighbor embedding. In British Machine Vision Conference, pages 135-1, 2012. 6, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 273, + 287, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 273, + 287, + 316 + ], + "spans": [ + { + "bbox": [ + 53, + 273, + 287, + 316 + ], + "type": "text", + "content": "[5] Hyungjin Chung, Jeongsol Kim, Michael T McCann, Marc L Klasky, and Jong Chul Ye. Diffusion posterior sampling for general noisy inverse problems. arXiv preprint arXiv:2209.14687, 2022. 1, 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 318, + 287, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 318, + 287, + 361 + ], + "spans": [ + { + "bbox": [ + 53, + 318, + 287, + 361 + ], + "type": "text", + "content": "[6] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 364, + 287, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 364, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 53, + 364, + 287, + 407 + ], + "type": "text", + "content": "[7] Chao Dong, Chen Change Loy, Kaiming He, and Xiaou Tang. Learning a deep convolutional network for image super-resolution. In European conference on computer vision, pages 184-199. Springer, 2014. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 409, + 287, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 409, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 53, + 409, + 287, + 464 + ], + "type": "text", + "content": "[8] Yangyi Dong, Xiaoyun Zhang, Zhixin Wang, Ya Zhang, Siheng Chen, and Yanfeng Wang. Unpaired face restoration via learnable cross-quality shift. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 667-675, 2022. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 466, + 287, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 466, + 287, + 520 + ], + "spans": [ + { + "bbox": [ + 53, + 466, + 287, + 520 + ], + "type": "text", + "content": "[9] Netalee Efrat, Daniel Glasner, Alexander Apartsin, Boaz Nadler, and Anat Levin. Accurate blur models vs. image priors in single image super-resolution. In Proceedings of the IEEE International Conference on Computer Vision, pages 2832-2839, 2013. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 522, + 287, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 522, + 287, + 576 + ], + "spans": [ + { + "bbox": [ + 48, + 522, + 287, + 576 + ], + "type": "text", + "content": "[10] Yosef Gandelsman, Assaf Shocher, and Michal Irani. \"double-dip\": Unsupervised image decomposition via coupled deep-image-priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11026-11035, 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 578, + 287, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 287, + 621 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 287, + 621 + ], + "type": "text", + "content": "[11] Daniel Glasner, Shai Bagon, and Michal Irani. Superresolution from a single image. In 2009 IEEE 12th international conference on computer vision, pages 349-356. IEEE, 2009. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 624, + 287, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 677 + ], + "type": "text", + "content": "[12] Jinjin Gu, Hannan Lu, Wangmeng Zuo, and Chao Dong. Blind super-resolution with iterative kernel correction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1604-1613, 2019. 1, 2, 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "text", + "content": "[13] Lanqing Guo, Chong Wang, Wenhan Yang, Siyu Huang, Yufei Wang, Hanspeter Pfister, and Bihan Wen. Shadowdiffusion: When degradation prior meets diffusion model for" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 105 + ], + "type": "text", + "content": "shadow removal. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14049-14058, 2023. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 106, + 545, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 106, + 545, + 139 + ], + "spans": [ + { + "bbox": [ + 308, + 106, + 545, + 139 + ], + "type": "text", + "content": "[14] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 140, + 545, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 140, + 545, + 183 + ], + "spans": [ + { + "bbox": [ + 308, + 140, + 545, + 183 + ], + "type": "text", + "content": "[15] Jia-Bin Huang, Abhishek Singh, and Narendra Ahuja. Single image super-resolution from transformed self-exemplars. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5197-5206, 2015. 6, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 185, + 545, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 185, + 545, + 227 + ], + "spans": [ + { + "bbox": [ + 308, + 185, + 545, + 227 + ], + "type": "text", + "content": "[16] Yan Huang, Shang Li, Liang Wang, Tieniu Tan, et al. Unfolding the alternating optimization for blind super resolution. Advances in Neural Information Processing Systems, 33:5632-5643, 2020. 1, 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 228, + 545, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 228, + 545, + 270 + ], + "spans": [ + { + "bbox": [ + 308, + 228, + 545, + 270 + ], + "type": "text", + "content": "[17] Meiguang Jin, Stefan Roth, and Paolo Favaro. Normalized blind deconvolution. In Proceedings of the European Conference on Computer Vision (ECCV), pages 668-684, 2018. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 272, + 545, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 272, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 308, + 272, + 545, + 316 + ], + "type": "text", + "content": "[18] Jiwon Kim, Jung Kwon Lee, and Kyoung Mu Lee. Accurate image super-resolution using very deep convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1646-1654, 2016. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 317, + 545, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 317, + 545, + 360 + ], + "spans": [ + { + "bbox": [ + 308, + 317, + 545, + 360 + ], + "type": "text", + "content": "[19] Kwang In Kim and Younghee Kwon. Single-image superresolution using sparse regression and natural image prior. IEEE transactions on pattern analysis and machine intelligence, 32(6):1127-1133, 2010. 1, 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 361, + 545, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 361, + 545, + 414 + ], + "spans": [ + { + "bbox": [ + 308, + 361, + 545, + 414 + ], + "type": "text", + "content": "[20] Soo Ye Kim, Hyeonjun Sim, and Munchurl Kim. Koalanet: Blind super-resolution using kernel-oriented adaptive local adjustment. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10611-10620, 2021. 1, 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 416, + 545, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 416, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 308, + 416, + 545, + 448 + ], + "type": "text", + "content": "[21] Dilip Krishnan and Rob Fergus. Fast image deconvolution using hyper-laplacian priors. Advances in neural information processing systems, 22, 2009. 1, 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 449, + 545, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 449, + 545, + 503 + ], + "spans": [ + { + "bbox": [ + 308, + 449, + 545, + 503 + ], + "type": "text", + "content": "[22] Orest Kupyn, Volodymyr Budzan, Mykola Mykhailych, Dmytro Mishkin, and Jií Matas. Deblurgan: Blind motion deblurring using conditional adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8183-8192, 2018. 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 505, + 545, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 505, + 545, + 548 + ], + "spans": [ + { + "bbox": [ + 308, + 505, + 545, + 548 + ], + "type": "text", + "content": "[23] Yuelong Li, Mohammad Tofighi, Junyi Geng, Vishal Monga, and Yonina C Eldar. Efficient and interpretable deep blind image deblurring via algorithm unrolling. IEEE Transactions on Computational Imaging, 6:666-681, 2020. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 548, + 545, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 548, + 545, + 613 + ], + "spans": [ + { + "bbox": [ + 308, + 548, + 545, + 613 + ], + "type": "text", + "content": "[24] Yawei Li, Yuchen Fan, Xiaoyu Xiang, Denis Demandolx, Rakesh Ranjan, Radu Timofte, and Luc Van Gool. Efficient and explicit modelling of image hierarchies for image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18278-18289, 2023. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 614, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 614, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 308, + 614, + 545, + 669 + ], + "type": "text", + "content": "[25] Jingyun Liang, Kai Zhang, Shuhang Gu, Luc Van Gool, and Radu Timofte. Flow-based kernel prior with application to blind super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10601-10610, 2021. 1, 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "text", + "content": "[26] Xinqi Lin, Jingwen He, Ziyan Chen, Zhaoyang Lyu, Ben Fei, Bo Dai, Wanli Ouyang, Yu Qiao, and Chao Dong. Diffbir: Towards blind image restoration with generative diffusion prior. arXiv preprint arXiv:2308.15070, 2023. 2, 6, 7" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "26054" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[27] Jie Liu, Wenjie Zhang, Yuting Tang, Jie Tang, and Gangshan Wu. Residual feature aggregation network for image superresolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2359-2368, 2020. 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 287, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 287, + 163 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 287, + 163 + ], + "type": "text", + "content": "[28] Zhengxiong Luo, Yan Huang, Shang Li, Liang Wang, and Tieniu Tan. End-to-end alternating optimization for blind super resolution. arXiv preprint arXiv:2105.06878, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 163, + 288, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 288, + 218 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 288, + 218 + ], + "type": "text", + "content": "[29] Ziwei Luo, Haibin Huang, Lei Yu, Youwei Li, Haoqiang Fan, and Shuaicheng Liu. Deep constrained least squares for blind image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17642-17652, 2022. 1, 2, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 219, + 288, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 219, + 288, + 285 + ], + "spans": [ + { + "bbox": [ + 48, + 219, + 288, + 285 + ], + "type": "text", + "content": "[30] David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, pages 416-423. IEEE, 2001. 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 285, + 288, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 285, + 288, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 285, + 288, + 319 + ], + "type": "text", + "content": "[31] Tomer Michaeli and Michal Irani. Nonparametric blind super-resolution. In Proceedings of the IEEE International Conference on Computer Vision, pages 945-952, 2013. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 320, + 287, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 320, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 48, + 320, + 287, + 342 + ], + "type": "text", + "content": "[32] Radford M Neal et al. Mcmc using hamiltonian dynamics. Handbook of markov chain monte carlo, 2(11):2, 2011. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 343, + 287, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 343, + 287, + 387 + ], + "spans": [ + { + "bbox": [ + 48, + 343, + 287, + 387 + ], + "type": "text", + "content": "[33] Daniele Perrone and Paolo Favaro. A clearer picture of total variation blind deconvolution. IEEE transactions on pattern analysis and machine intelligence, 38(6):1041-1055, 2015. 1, 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 388, + 287, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 287, + 442 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 287, + 442 + ], + "type": "text", + "content": "[34] Dongwei Ren, Kai Zhang, Qilong Wang, Qinghua Hu, and Wangmeng Zuo. Neural blind deconvolution using deep priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3341-3350, 2020. 1, 2, 5, 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 444, + 287, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 444, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 48, + 444, + 287, + 498 + ], + "type": "text", + "content": "[35] Gernot Riegler, Samuel Schulter, Matthias Ruther, and Horst Bischof. Conditioned regression models for non-blind single image super-resolution. In Proceedings of the IEEE International Conference on Computer Vision, pages 522-530, 2015. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 500, + 287, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 500, + 287, + 533 + ], + "spans": [ + { + "bbox": [ + 48, + 500, + 287, + 533 + ], + "type": "text", + "content": "[36] Leonid I Rudin, Stanley Osher, and Emad Fatemi. Nonlinear total variation based noise removal algorithms. Physica D: nonlinear phenomena, 60(1-4):259-268, 1992. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 534, + 287, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 534, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 48, + 534, + 287, + 589 + ], + "type": "text", + "content": "[37] Marshall F Tappen Bryan C Russell and William T Freeman. Exploiting the sparse derivative prior for super-resolution and image demosaicing. In Proceedings of the Third International Workshop Statistical and Computational Theories of Vision, pages 1-28, 2003. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 590, + 287, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 644 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 644 + ], + "type": "text", + "content": "[38] Chitwan Sahara, Jonathan Ho, William Chan, Tim Salimans, David J Fleet, and Mohammad Norouzi. Image super-resolution via iterative refinement. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(4):4713-4726, 2022. 2, 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 646, + 287, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 287, + 691 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 287, + 691 + ], + "type": "text", + "content": "[39] Assaf Shocher, Nadav Cohen, and Michal Irani. \"zero-shot\" super-resolution using deep internal learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3118-3126, 2018. 1, 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "text", + "content": "[40] Jiaming Song, Arash Vahdat, Morteza Mardani, and Jan Kautz. Pseudoinverse-guided diffusion models for inverse" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 714 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "problems. In International Conference on Learning Representations, 2022. 1, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 97, + 545, + 131 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 97, + 545, + 131 + ], + "spans": [ + { + "bbox": [ + 307, + 97, + 545, + 131 + ], + "type": "text", + "content": "[41] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. Advances in neural information processing systems, 32, 2019. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 132, + 545, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 132, + 545, + 176 + ], + "spans": [ + { + "bbox": [ + 307, + 132, + 545, + 176 + ], + "type": "text", + "content": "[42] Jian Sun, Zongben Xu, and Heung-Yeung Shum. Image super-resolution using gradient profile prior. In 2008 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 178, + 545, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 178, + 545, + 233 + ], + "spans": [ + { + "bbox": [ + 307, + 178, + 545, + 233 + ], + "type": "text", + "content": "[43] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 114-125, 2017. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 235, + 545, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 235, + 545, + 280 + ], + "spans": [ + { + "bbox": [ + 307, + 235, + 545, + 280 + ], + "type": "text", + "content": "[44] Dmitry Ulyanov, Andrea Vedaldi, and Victor Lempitsky. Deep image prior. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 9446-9454, 2018. 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 281, + 545, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 281, + 545, + 346 + ], + "spans": [ + { + "bbox": [ + 307, + 281, + 545, + 346 + ], + "type": "text", + "content": "[45] Longguang Wang, Yingqian Wang, Xiaoyu Dong, Qingyu Xu, Jungang Yang, Wei An, and Yulan Guo. Unsupervised degradation representation learning for blind superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10581-10590, 2021. 2, 6, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 349, + 545, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 349, + 545, + 384 + ], + "spans": [ + { + "bbox": [ + 307, + 349, + 545, + 384 + ], + "type": "text", + "content": "[46] Yinhuai Wang, Jiwen Yu, and Jian Zhang. Zero-shot image restoration using denoising diffusion null-space model. arXiv preprint arXiv:2212.00490, 2022. 2, 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 384, + 545, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 384, + 545, + 429 + ], + "spans": [ + { + "bbox": [ + 307, + 384, + 545, + 429 + ], + "type": "text", + "content": "[47] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 430, + 545, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 430, + 545, + 475 + ], + "spans": [ + { + "bbox": [ + 307, + 430, + 545, + 475 + ], + "type": "text", + "content": "[48] Max Welling and Yee W Teh. Bayesian learning via stochastic gradient Langevin dynamics. In Proceedings of the 28th international conference on machine learning (ICML-11), pages 681–688, 2011. 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 475, + 545, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 475, + 545, + 542 + ], + "spans": [ + { + "bbox": [ + 307, + 475, + 545, + 542 + ], + "type": "text", + "content": "[49] Yu-Syuan Xu, Shou-Yao Roy Tseng, Yu Tseng, Hsien-Kai Kuo, and Yi-Min Tsai. Unified dynamic convolutional network for super-resolution with variational degradations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12496-12505, 2020. 1, 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 544, + 545, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 544, + 545, + 600 + ], + "spans": [ + { + "bbox": [ + 307, + 544, + 545, + 600 + ], + "type": "text", + "content": "[50] Xunpeng Yi, Han Xu, Hao Zhang, Linfeng Tang, and Jiayi Ma. Diff-retinex: Rethinking low-light image enhancement with a generative diffusion model. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12302-12311, 2023. 1, 2, 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 601, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 601, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 307, + 601, + 545, + 667 + ], + "type": "text", + "content": "[51] Zongsheng Yue, Qian Zhao, Jianwen Xie, Lei Zhang, Deyu Meng, and Kwan-Yee K. Wong. Blind image superresolution with elaborate degradation modeling on noise and kernel. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2128-2138, 2022. 1, 2, 3, 4, 6, 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 669, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 669, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 307, + 669, + 545, + 714 + ], + "type": "text", + "content": "[52] Roman Zeyde, Michael Elad, and Matan Protter. On single image scale-up using sparse-representations. In International conference on curves and surfaces, pages 711-730. Springer, 2010. 6, 7" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "26055" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 463 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[53] Kai Zhang, Wangmeng Zuo, and Lei Zhang. Learning a single convolutional super-resolution network for multiple degradations. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3262-3271, 2018. 1, 2, 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 288, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 288, + 173 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 288, + 173 + ], + "type": "text", + "content": "[54] Kai Zhang, Luc Van Gool, and Radu Timofte. Deep unfolding network for image super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3217-3226, 2020. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 174, + 288, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 174, + 288, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 174, + 288, + 228 + ], + "type": "text", + "content": "[55] Kai Zhang, Yawei Li, Wangmeng Zuo, Lei Zhang, Luc Van Gool, and Radu Timofte. Plug-and-play image restoration with deep denoiser prior. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(10):6360-6376, 2021. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 229, + 288, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 229, + 288, + 284 + ], + "spans": [ + { + "bbox": [ + 48, + 229, + 288, + 284 + ], + "type": "text", + "content": "[56] Yulun Zhang, Kunpeng Li, Kai Li, Lichen Wang, Bineng Zhong, and Yun Fu. Image super-resolution using very deep residual channel attention networks. In Proceedings of the European conference on computer vision (ECCV), pages 286-301, 2018. 1, 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 285, + 287, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 285, + 287, + 340 + ], + "spans": [ + { + "bbox": [ + 48, + 285, + 287, + 340 + ], + "type": "text", + "content": "[57] Zixiang Zhao, Haowen Bai, Yuzhhi Zhu, Jiangshe Zhang, Shuang Xu, Yulun Zhang, Kai Zhang, Deyu Meng, Radu Timofte, and Luc Van Gool. Ddfm: denoising diffusion model for multi-modality image fusion. arXiv preprint arXiv:2303.06840, 2023.1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 341, + 287, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 341, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 48, + 341, + 287, + 407 + ], + "type": "text", + "content": "[58] Hongyang Zhou, Xiaobin Zhu, Jianqing Zhu, Zheng Han, Shi-Xue Zhang, Jingyan Qin, and Xu-Cheng Yin. Learning correction filter via degradation-adaptive regression for blind single image super-resolution. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12365-12375, 2023. 2, 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 407, + 287, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 407, + 287, + 463 + ], + "spans": [ + { + "bbox": [ + 48, + 407, + 287, + 463 + ], + "type": "text", + "content": "[59] Yuanzhi Zhu, Kai Zhang, Jingyun Liang, Jiezhang Cao, Bihan Wen, Radu Timofte, and Luc Van Gool. Denoising diffusion models for plug-and-play image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1219-1229, 2023. 1, 2" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "26056" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A General and Efficient Training for Transformer via Token Expansion/6b319bca-e10d-4650-be77-29bcc4ffd8dd_content_list.json b/2024/A General and Efficient Training for Transformer via Token Expansion/6b319bca-e10d-4650-be77-29bcc4ffd8dd_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..b7c78d7ebe2780decd64a6f2992828e3a498bfa9 --- /dev/null +++ b/2024/A General and Efficient Training for Transformer via Token Expansion/6b319bca-e10d-4650-be77-29bcc4ffd8dd_content_list.json @@ -0,0 +1,1693 @@ +[ + { + "type": "text", + "text": "A General and Efficient Training for Transformer via Token Expansion", + "text_level": 1, + "bbox": [ + 124, + 130, + 844, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Wenxuan Huang $^{1*}$ Yunhang Shen $^{2*}$ Jiao Xie $^{3}$ Baochang Zhang $^{4}$ Gaoqi He $^{1}$ Ke Li $^{2}$ Xing Sun $^{2}$ Shaohui Lin $^{1,5\\boxtimes}$", + "bbox": [ + 122, + 178, + 859, + 215 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ School of Computer Science and Technology, East China Normal University, Shanghai, China", + "$^{2}$ Tencent Youtu Lab, China $^{3}$ Xiamen University, China $^{4}$ Beihang University, China" + ], + "bbox": [ + 178, + 217, + 805, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{5}$ Key Laboratory of Advanced Theory and Application in Statistics and Data Science - MOE, China", + "bbox": [ + 163, + 252, + 820, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "osilly0616@gmail.com, shenyunhang01@gmail.com, jiaoxiel990@126.com, bczhang@buaa.edu.cn \ngqhe@cs.ecnu.edu.cn, tristanli.sh@gmail.com, winfred.sun@gmail.com, shaohuilin007@gmail.com", + "bbox": [ + 84, + 272, + 893, + 304 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 339, + 313, + 354 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The remarkable performance of Vision Transformers (ViTs) typically requires an extremely large training cost. Existing methods have attempted to accelerate the training of ViTs, yet typically disregard method universality with accuracy dropping. Meanwhile, they break the training consistency of the original transformers, including the consistency of hyper-parameters, architecture, and strategy, which prevents them from being widely applied to different Transformer networks. In this paper, we propose a novel token growth scheme Token Expansion (termed ToE) to achieve consistent training acceleration for ViTs. We introduce an \"initialization-expansion-merging\" pipeline to maintain the integrity of the intermediate feature distribution of original transformers, preventing the loss of crucial learnable information in the training process. ToE can not only be seamlessly integrated into the training and fine-tuning process of transformers (e.g., DeiT and LV-ViT), but also effective for efficient training frameworks (e.g., EfficientTrain), without twisting the original training hyperparameters, architecture, and introducing additional training strategies. Extensive experiments demonstrate that ToE achieves about $1.3 \\times$ faster for the training of ViTs in a lossless manner, or even with performance gains over the full-token training baselines. Code is available at https://github.com/Osilly-TokenExpansion.", + "bbox": [ + 75, + 369, + 473, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 777, + 209, + 792 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Transformers have achieved excellent performance in the tasks of natural language processing (NLP) [1-3] and computer vision [4-7]. Despite their great success, modern Transformer models typically require extremely large parameters and computation consumption due to the quadratic com", + "bbox": [ + 75, + 801, + 470, + 878 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/45df7dce1b92becae204babc845a313fd66d8dbe7292778f31f5df230207e99a.jpg", + "table_caption": [ + "Table 1. Training results for DeiT [4] on ImageNet-1K. DeiT does not use the EMA strategy by default. a/b in the column of Top-1 Acc. means without/with EMA strategy using the official GitHub repo. The training time is averagely measured on one/four NVIDIA RTX A6000 GPUs 3 times with a batch size of 1, 024 for DeiT-Tiny/Base, respectively." + ], + "table_footnote": [], + "table_body": "
ModelMethodTraining consistencyTop-1 Acc. (%)Training time (GPU hours)
HyperArchStrategy
TinyBaseline [4]---72.254.6h
S2ViTE (600 epoch) [10]××70.1 (-2.1)-
ToMeRDS→[11]×71.7 (-0.5)53.3h
NetworkExpansion6→12 [12]×70.3 (-1.9) / 70.1 (-2.1)43.2h
ToERi=0.5 (Ours)72.6 (+0.4)44.2h
BaseBaseline [4]---81.8292.8h
StackBERT [13]×80.8 (-1.0)231.6h
NetworkExpansion6→12 [12]×81.0 (-0.8) / 81.5 (-0.3)226.8h
ToERi=0.5 (Ours)81.6 (-0.2)231.2h
", + "bbox": [ + 503, + 402, + 885, + 503 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "putational complexity in the self-attention module. For example, ViT-H/14 [8] requires $\\sim 1,000$ FLOPs, which is $250 \\times$ larger than ResNet-50 [9]. The entire training process needs a significant amount of computing resources to reach model convergence, resulting in a substantial computation overhead. To reduce the computational cost of large models, there has been growing research attention on accelerating Transformers for either training or inference.", + "bbox": [ + 496, + 513, + 893, + 635 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Existing Transformer pruning methods [14-22] aim to reduce the inference complexity. Among them, structure pruning [14-17] and token pruning [18-22] focus on reducing the neurons or tokens of Transformers to accelerate the inference. However, these pruning methods require additional training computational cost in each forward-backward iteration to determine which neurons or tokens are important enough to be retained, or the fine-tuning for pruned models. Recently, Transformer quantization [23-26] accelerates the inference via low-bit computation, but they also cannot reduce the training computation cost. Thus, it is challenging for them to effectively accelerate the training of Transformers in practical scenarios, e.g., cloud service.", + "bbox": [ + 496, + 638, + 895, + 835 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To reduce the training computation overhead, recent works [12, 13, 27-29] have proposed structure growth methods. They update a smaller number of model parameters during the early stages of training and gradually increase", + "bbox": [ + 496, + 839, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution.", + "bbox": [ + 94, + 886, + 205, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Corresponding author.", + "bbox": [ + 253, + 886, + 383, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "15783", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8378b2f501e358b565ceaaac602632602be08ca09bdf943a4a83a123d5773ebc.jpg", + "image_caption": [ + "Figure 1. The \"initialization-expansion-merging\" pipeline of proposed ToE. We take the 1st training stage $(\\delta = 1)$ , the kept rate $r_1 = 2r_0 = \\frac{2}{3}$ , the repetition step $k = 1$ as example. ToE is only added after the first Transformer block to guide the token selection and usage. During training, steps (1), (2), and (3) are performed for each iteration with the reduction of token numbers. First, seed tokens are selected for token initialization through step (1). Then, the number of tokens is expanded via step (2) for token expansion. Finally, we merge the unselected token set (blue boxes) into the selected one (red boxes) with the close feature distributions in step (3) for token merging. During testing, ToE can be safely removed to generate the same Transformer architecture as the original full-token Transformer." + ], + "image_footnote": [], + "bbox": [ + 171, + 85, + 802, + 271 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the number of parameters involved in the updating process as training progresses. However, the existing methods fail to achieve general transformer training acceleration without accuracy dropping (shown in Tab. 1), and they break the training consistency of the original transformers from three perspectives: (1) Hyper-parameter consistency. Existing methods (e.g., SViTE [10]) delicately tune training hyperparameters (e.g., learning rate and epoch number) of the original models, which are sensitive to individual ViTs [4] and require additional trial-and-error costs for different networks. (2) Architecture consistency. Existing methods [10, 11] alter the final model architectures, which may deviate from the user's requirements and potentially necessitates additional hardware/software support to implement real training speedup. For example, ToMe [11] progressively merges similar tokens layer-by-layer to reduce the number of tokens in ViTs during training, which replaces the attention operators with the weighted average attention modules, generating a different model architecture that deviates from the original Transformer. Moreover, it cannot significantly accelerate the practical training due to the unfriendly computation. (3) Strategy consistency. Existing methods [12, 13, 27] may suffer from performance deterioration across different Transformers by adding additional training strategies, such as EMA and reset optimizer states. It means the effectiveness of these strategies is for specific models, which limits the method's universality whether employing them for training. In Tab. 1, the extra EMA strategy in [12] plays different roles to the performance across different models, i.e., the effectiveness for DeiT-base but not for DeiT-tiny. Thus, this begs our rethinkings: How to implement real and friendly training speedup for Transformers while keeping the training consistency and high accuracy?", + "bbox": [ + 76, + 357, + 472, + 854 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To answer the above question, we propose a novel token growth scheme, Token Expansion (termed ToE) to achieve general training acceleration for ViTs, while adhering to", + "bbox": [ + 76, + 854, + 470, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the training consistency of original models. Specifically, we present an \"initialization-expansion-merging\" pipeline (in Fig. 1) to maintain the integrity of the intermediate feature distribution of original transformers, preventing the loss of crucial learnable information during the accelerated training process. Similar to structure growth methods, we initially involve a limited number of tokens to participate in training and gradually grow the token number during training progress, eventually reaching the utilization of the entire token set. Then, a widest feature-distribution token expansion is introduced to make the feature distributions of the selected token set as wide as possible. Additionally, a feature-distribution token merging combines the tokens with close feature distributions to further avoid information loss. ToE not only accelerates the training and fine-tuning process of popular Transformers in a lossless manner or even with performance improvement, but also can be integrated into the existing efficient training frameworks (e.g., EfficientTrain [30]) for further performance improvement, without twisting the original training hyper-parameters, architecture, and introducing additional training strategies.", + "bbox": [ + 496, + 357, + 895, + 676 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our main contributions can be summarized as follows:", + "bbox": [ + 517, + 680, + 880, + 695 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose ToE, a novel token growth scheme to accelerate ViTs from the perspective of tokens. ToE is a consistent training acceleration method and can be seamlessly integrated into the training and fine-tuning process of transformers without any modifications to the original training hyper-parameters, architecture, and strategies.", + "- We propose an effective \"initialization-expansion-merging\" framework to avoid the token information loss by maintaining the integrity of the intermediate feature distribution.", + "- Extensive experiments demonstrate that ToE accelerates the training and fine-tuning process of ViTs with a negligible accuracy drop or even surpassing the original full-token counterparts, which outperforms previous SOTA methods." + ], + "bbox": [ + 500, + 700, + 893, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "15784", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 89, + 218, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1. Training Acceleration for Transformers", + "text_level": 1, + "bbox": [ + 76, + 114, + 416, + 131 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As mentioned above, many existing works focus on accelerating the training of transformers from the perspective of structural parameters. These structure methods [10, 12, 13, 27, 31, 32] reduce the number of updated parameters in the training process to save the computational cost. In contrast, the proposed ToE accelerates training from the perspective of reducing token redundancy. In other words, ToE computes a smaller number of tokens but still optimizes all parameters. It avoids potential performance drops in many structure growth methods due to the inconsistent structures of before-and-after models during structure growth and resetting of optimizer state when updating new structural parameters.", + "bbox": [ + 75, + 138, + 472, + 319 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ToMe [11] uses a limited number of tokens to participate in training and progressively merges similar tokens layer-by-layer, which changes the attention operator in inference. ToE also involves merging tokens with close feature distributions by feature-distribution token merging. However, our merging strategy is performed only once at the end of the \"initialization-expansion-merging\" pipeline during training, which prevents the information loss of tokens. This ensures that ToE avoids the mismatch between practical and theoretical acceleration caused by excessive merging operations and operator modifications.", + "bbox": [ + 75, + 320, + 472, + 487 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Additionally, several works [30, 33-35] also consider to reduce the data for training. The work in [33] deduplicates training datasets to save computational resources. Unfortunately, it usually introduces additional computational costs and sometimes becomes a bottleneck by using additional time to process datasets during training [36]. PSS [35] uses fewer patches obtained by splitting images during training. EfficientTrain [30] and PL [34] use images of different sizes and additional data augmentation. However, EfficientTrain and PL change the training pipelines that differ from the training of the original model, e.g., hyper-parameters. Moreover, the above methods consider the properties of training data. In contrast, ToE focuses on the crucial learnable information in the intermediate feature space of transformers. Thus, ToE can be integrated into the above methods in a plug-and-play manner to further enhance training efficiency.", + "bbox": [ + 75, + 487, + 472, + 729 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Training Acceleration for CNNs", + "text_level": 1, + "bbox": [ + 76, + 739, + 359, + 757 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Prior efficient training acceleration methods have explored ways to speed up the training of CNN models [37-42]. For example, works in [37, 38] consider pruning gradients to reduce training computation costs. Works in [39, 40] attempt to use quantization technical to achieve training acceleration. Others try to reduce training time either by reducing the number of optimization iterations with a linear decay for the learning rate [41] or skipping easy samples that contribute little to loss reduction [42]. However, these methods may not", + "bbox": [ + 75, + 763, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "be directly applied to Transformers for training acceleration due to the specific architectural differences between transformers and CNNs. Differently, ToE focuses on the training acceleration for Transformers on the token dimension.", + "bbox": [ + 496, + 90, + 893, + 151 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Transformer pruning", + "text_level": 1, + "bbox": [ + 498, + 161, + 702, + 178 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Transformer pruning methods typically reduce parameters or tokens to generate sparse Transformers for fast inference. Structure pruning methods [14-17] attempted to prune the structures of transformers. Token pruning methods [18-22] focused on dynamically determining the importance of input tokens and pruning them during inference.", + "bbox": [ + 496, + 184, + 893, + 275 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The key differences between our method and transformer pruning methods are two-fold. (1) Transformer pruning methods primarily aim to accelerate transformer inference, while our target is for training acceleration. (2) We obtain a dense model after training by token growth, which is entirely consistent with the original model for inference. In contrast, pruning methods generate sparse models after training.", + "bbox": [ + 496, + 276, + 893, + 382 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 498, + 395, + 591, + 411 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminaries and Notations", + "text_level": 1, + "bbox": [ + 498, + 420, + 751, + 436 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a Transformer with $L$ blocks, we denote the sets of input and output tokens for the $l$ -th block as $\\mathcal{S}_{l-1}$ and $\\mathcal{S}_l$ with $l \\in \\{1,2,\\dots,L\\}$ , respectively. The index set of output tokens for the $l$ -th block is defined as $\\mathcal{I} = \\{1,2,\\dots,N_l\\}$ , where $N_l$ is the number of output tokens for the $l$ -th block. We further denote the $i$ -th token of the output tokens for the $l$ -th block as $t_{l,i} \\in \\mathbb{R}^d$ , thus $\\mathcal{S}_l = \\{t_{l,i} | \\forall i \\in \\mathcal{I}\\}$ .", + "bbox": [ + 496, + 443, + 893, + 549 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For the $l$ -th Transformer block, we consider to reduce the output tokens to a specified size $N_{l}^{\\prime} = \\lfloor rN_{l}\\rfloor$ , where $r\\in (0,1]$ is the kept rate of tokens, and $\\lfloor \\cdot \\rfloor$ is a floor function. Further, we define the index set of kept tokens as $\\mathcal{I}' = \\{1,2,\\dots ,N_l'\\}$ and we obtain a subset $S_{l}^{\\prime} = \\{t_{l,i}^{\\prime}|\\forall i\\in \\mathcal{I}^{\\prime}\\}$ of output tokens. When the output tokens of the $l$ -th block are reduced, this results in a corresponding reduction in the quantity of input tokens for blocks beyond the $l$ -th block. Furthermore, the computational complexity of self-attention blocks and MLP layers in Transformers is directly proportional to the number of input tokens. According to the work [43], the computation in the forward and backward propagation of modern neural networks roughly conforms to 1:2. Therefore, the reduction of tokens significantly accelerates the computation in both the forward and backward propagations during training if $r < 1$ . Note that, to reduce the complex search computation for the kept rate of tokens $r$ across all Transformer blocks, we simply and effectively set $r$ to be the same in all blocks that benefit from acceleration.", + "bbox": [ + 496, + 550, + 895, + 837 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Overview of ToE", + "text_level": 1, + "bbox": [ + 498, + 845, + 666, + 861 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As shown in Fig. 1, ToE initially selects a significantly small number of tokens, then progressively grows to the final full-", + "bbox": [ + 496, + 869, + 893, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "15785", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "token same as the original Transformer, thereby achieving training acceleration. We divide the origin training process into $N_{g}$ stages on average. We use a limited number of tokens to participate in each training stage and gradually grow the token number along with the training stages. The token growth strategy consists of three steps:", + "bbox": [ + 75, + 90, + 468, + 181 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Initial token selection as the seed tokens. we initially select $\\lfloor r_0N_l\\rfloor$ output tokens from the origin token set $S_{l}$ as the seed token set by using Uniform sampling on the index set $\\mathcal{I}$ , where $r_0$ represents the pre-defined initial kept rate, which is default set to less than 0.3 in our experiments unless otherwise specified.", + "(2) Token expansion. In the $\\delta$ -th $(\\delta \\in \\{1, 2, \\dots, N_g\\})$ training stage, we perform $\\delta$ times token expansion to preserve the integrity of the original intermediate feature space. Furthermore, we pre-define the keep rate of the first stage to be $r_1$ . The kept rate of $\\delta$ -th stage $r_\\delta$ is computed as:" + ], + "bbox": [ + 75, + 181, + 470, + 348 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mu_ {\\delta} = \\left\\{ \\begin{array}{l l} r _ {1} - r _ {0}, & \\text {i f} \\delta = 1, \\\\ \\frac {1 - r _ {1}}{N _ {g} - 1}, & \\text {o t h e r w i s e ,} \\end{array} \\right. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 357, + 468, + 393 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nr _ {\\delta} = r _ {\\delta - 1} + \\mu_ {\\delta},\n$$\n", + "text_format": "latex", + "bbox": [ + 183, + 398, + 287, + 411 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mu_{\\delta}$ is the token expansion rate in the $\\delta$ -th training stage and $r_1 = 2 \\cdot r_0 \\in (0,1]$ . After the $\\delta$ times token expansion, we select $\\lfloor r_{\\delta}N_l\\rfloor$ tokens from the full-token set $S_{l}$ . In Sec. 3.3.2, we will introduce the widest feature-distribution token expansion method to select $\\lfloor r_{\\delta}N_l\\rfloor$ tokens, which aims to expand the token distribution space to effectively present full-token feature distribution.", + "bbox": [ + 75, + 420, + 468, + 525 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "(3) Token merging. To further avoid information loss during the training process, we consider merging the unselected tokens into the selected ones in the token expansion process, which retains effective information of the unselected tokens in the merged token set $S_{l}^{\\prime}$ . Inspired by ToMe [11], we merge averagely the tokens that the feature distributions are close as one new token, which is further introduced in Sec. 3.3.3.", + "bbox": [ + 75, + 525, + 468, + 630 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During training, ToE performs steps (1), (2), and (3) on the original full-token set for each training iteration, which reduces the number of tokens involved in training while retaining the effective information from the full-token set.", + "bbox": [ + 75, + 631, + 468, + 691 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Token Expansion", + "text_level": 1, + "bbox": [ + 76, + 700, + 245, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this Section, we introduce the proposed ToE method, including spatial-distribution token initialization, widest feature-distribution token expansion, feature-distribution token merging, and its optimization.", + "bbox": [ + 75, + 724, + 468, + 785 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3.1 Spatial-distribution Token Initialization", + "text_level": 1, + "bbox": [ + 76, + 803, + 406, + 818 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For the initialization, we apply a simple strategy to select the initial token set from $S_{l}$ . We define the index of the initial token set as:", + "bbox": [ + 75, + 825, + 468, + 869 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {I} ^ {(I)} = \\{i | \\forall i \\bmod \\left\\lfloor \\frac {1}{r _ {0}} \\right\\rfloor = 1 \\wedge \\forall i \\in \\mathcal {I} \\}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 148, + 877, + 468, + 905 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The selected token set and the unselected tokens set can be expressed as $\\mathbb{A} = \\{t_{l,i}|\\forall i\\in \\mathcal{I}^{(I)}\\}$ and $\\mathbb{B} = \\mathcal{S}_l - \\mathbb{A}$ , respectively. This initialization selection strategy is based on spatial distribution. It indicates that we choose one token out of every $\\left\\lfloor \\frac{1}{r_0}\\right\\rfloor$ tokens from the original token set and add it to the initial token set. Our strategy is simple, yet effective, to ensure that the initially selected tokens provide broad spatial coverage across the image patches.", + "bbox": [ + 496, + 90, + 890, + 212 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3.2 Widest Feature-distribution Token Expansion", + "text_level": 1, + "bbox": [ + 498, + 231, + 870, + 244 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Previous works [11, 18] show that the intermediate feature space in modern Transformers is overparameterized, such that they prune the full-token Transformers to be sparse ones. Actually, through the above token initialization, we obtain the sparse Transformers. However, the performance drops significantly if we only train on these selected tokens. Thus, we consider to grow the number of tokens, which is expected to preserve the integrity of the original intermediate feature space and avoid the loss of tokens containing valuable information. Inspired by this, we seek to maintain the integrity of the intermediate feature distribution. Intuitively, when the feature distributions of two token sets are sufficiently close, they have similar information that can be used to effectively represent each other. In contrast, given one token whose feature distribution deviates significantly from all other tokens in the token set, it will be difficult to be adequately represented by other tokens, such that we expect to select this token to underscore its importance in the token expansion.", + "bbox": [ + 496, + 253, + 892, + 526 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To this end, we propose the widest feature-distribution token expansion strategy. Specifically, we perform the expanding operation on the selected tokens from the initialized set. For the $\\delta$ -th stage of token expansion, we consider the selected token set $\\mathbb{A} \\in \\mathbb{R}^{|A| \\times d}$ and the unselected token set $\\mathbb{B} \\in \\mathbb{R}^{|B| \\times d}$ as the 2D matrices, where $|\\cdot|$ and $d$ respectively denote the number of tokens and feature dimension, and $|\\mathbb{A}| + |\\mathbb{B}| = N_l$ . We utilize Cosine Distance as the metric to measure the distance between feature distribution of tokens in these two sets (other metrics see Tab. 9):", + "bbox": [ + 496, + 527, + 892, + 676 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {D} (\\mathbb {B}, \\mathbb {A}) = \\mathbf {1} - \\cos \\langle \\mathbb {B}, \\mathbb {A} \\rangle = \\mathbf {1} - \\frac {\\mathbb {B} \\mathbb {A} ^ {\\mathrm {T}}}{\\| \\mathbb {B} \\| \\cdot \\| \\mathbb {A} \\|}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 686, + 890, + 718 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{1}$ is an all-one matrix. $\\mathcal{D}(\\mathbb{B},\\mathbb{A})\\in \\mathbb{R}^{|\\mathbb{B}|\\times |\\mathbb{A}|}$ represents the pairwise distances between tokens in $\\mathbb{B}$ and $\\mathbb{A}$ .", + "bbox": [ + 496, + 727, + 888, + 758 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We further define the distance between the feature distribution of tokens in $\\mathbb{B}$ and its closest token in $\\mathbb{A}$ as distance $(\\mathbb{B} \\to \\mathbb{A}) \\in \\mathbb{R}^{|\\mathbb{B}|}$ :", + "bbox": [ + 496, + 758, + 890, + 804 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {d i s t a n c e} (\\mathbb {B} \\rightarrow \\mathbb {A}) _ {i} = \\min _ {j} \\left(\\mathcal {D} (\\mathbb {B}, \\mathbb {A}) _ {i, j}\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 570, + 814, + 890, + 830 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $i\\in \\{1,\\dots ,|B|\\}$ and $j\\in \\{1,\\dots ,|A|\\}$ . Eq. 4 indicates that we sample the minimal values of the feature-distribution distance matrix $\\mathcal{D}(\\mathbb{B},\\mathbb{A})$ along the second dimension. Thus, distance $(\\mathbb{B}\\to \\mathbb{A})_i$ measures importance of", + "bbox": [ + 496, + 839, + 893, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "15786", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/5c8a1f33feeb9bdc18742850a7af1f0c08f43d23d6ab144b70f1a527aa45ad2f.jpg", + "image_caption": [ + "Figure 2. Visualization for the feature distribution of token set. We use T-SNE [44] to visualize the output token feature distributions at the first block, the tokens selected by ToE, and the output tokens after the second block. Baselines are DeiT-small trained on ImageNet-1K. ToE preserves the distribution integrity of intermediate features of the original token set across different Transformer blocks while ensuring that feature distributions are as wide as possible." + ], + "image_footnote": [], + "bbox": [ + 166, + 88, + 532, + 242 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/3f2550f8eecf5402dda4f6f3ff26dc3859e9699e4ca2e741516b9454fdc5eaae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 537, + 88, + 803, + 242 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$i$ -th token in $\\mathbb{B}$ . At this point, we progressively add the most important token to $\\mathbb{A}$ , which is formulated as:", + "bbox": [ + 76, + 292, + 470, + 323 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {A} = \\mathbb {A} + t ^ {*}, \\quad \\mathbb {B} = \\mathbb {B} - t ^ {*},\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 329, + 359, + 342 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nt ^ {*} = \\left\\{\\mathbb {B} _ {i} | i = \\operatorname {a r g m a x} (d i s t a n c e (\\mathbb {B} \\rightarrow \\mathbb {A})) \\right\\}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 340, + 468, + 357 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $t^*$ is the most important token in $\\mathbb{B}$ . When the feature distribution of one token is far from its closest token, it can be said that the feature distribution of this token deviates significantly from that of all other tokens in the token set. The operation described in Eq. 5 is performed for $\\lfloor \\mu_{\\delta}N_l \\rfloor$ times to select $\\lfloor \\mu_{\\delta}N_l \\rfloor$ tokens from $\\mathbb{B}$ into $\\mathbb{A}$ . The widest feature-distribution token expansion strategy ensures that the feature distributions of the selected token set become as wide as possible, preventing the loss of important tokens. However, as we need to iterate $\\lfloor \\mu_{\\delta}N_l \\rfloor$ times expansion, it results in a considerable consumption of computational resources. Considering the computation parallelization, we modify the expanding operation in Eq. 5 parallelly:", + "bbox": [ + 75, + 363, + 472, + 559 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {A} = \\mathbb {A} + \\mathcal {S} ^ {*}, \\quad \\mathbb {B} = \\mathbb {B} - \\mathcal {S} ^ {*},\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 565, + 362, + 578 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {S} ^ {*} = \\left\\{\\mathbb {B} _ {i} | i \\in \\operatorname {t o p k} _ {\\lfloor \\mu_ {\\delta} \\mathrm {N} _ {1 / \\mathrm {k}} \\rfloor} (d i s t a n c e (\\mathbb {B} \\rightarrow \\mathbb {A})) \\right\\}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 120, + 575, + 468, + 595 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $k$ is the pre-defined repetition step of parallel expanding operation, $S^*$ is a token set consisting of the important tokens in $\\mathbb{B}$ , $\\mathrm{topk}_n$ denotes the top argmax with the number of $n$ tokens. By this way, we only perform $k$ times parallel expanding operation to expand $\\lfloor \\mu_{\\delta} N_l \\rfloor$ tokens, and its computational consumption is negligible with small $k$ .", + "bbox": [ + 75, + 599, + 470, + 691 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.3 Feature-distribution Token Merging", + "text_level": 1, + "bbox": [ + 76, + 708, + 382, + 724 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After token expansion, we aim to retain the effective information of the unselected tokens, such that we merge the unselected tokens that the feature distributions are close to the selected ones. The feature-distribution token merging can be formulated as:", + "bbox": [ + 75, + 731, + 470, + 806 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {S} _ {l} ^ {\\prime} = \\left\\{\\operatorname {m e a n} \\left(\\mathbb {A} _ {j}, \\mathcal {S} _ {j} ^ {(M)}\\right) | \\forall j \\in \\{1, 2, \\dots , | \\mathbb {A} | \\} \\right\\}, \\text {w h e r e}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 810, + 429, + 827 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {S} _ {i} ^ {\\left(\\hat {M}\\right)} = \\left\\{\\mathbb {B} _ {i} \\mid \\mathcal {I} _ {i} ^ {\\left(\\hat {M}\\right)} = = j, \\forall i \\in \\{1, 2, \\dots , | \\mathbb {B} | \\} \\right\\}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 117, + 825, + 468, + 843 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {I} ^ {(M)} = \\operatorname {a r g m i n} _ {j} (\\mathcal {D} (\\mathbb {B}, \\mathbb {A}) _ {i, j}),\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 842, + 357, + 859 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $S_{l}^{\\prime}\\in \\mathbb{R}^{|\\mathbb{A}|\\times d}$ is the token set merging the closest tokens from $\\mathbb{B}$ to $\\mathbb{A}$ , and mean $(\\mathbb{A}_j,S_j^{(M)})$ indicate that we", + "bbox": [ + 75, + 864, + 470, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "merge $\\mathbb{B}$ into $\\mathbb{A}$ averagely based on the index set $\\mathcal{I}^{(M)}\\in$ $\\mathbb{R}^{\\left|\\mathbb{B}\\right|}$ . Note that every $\\mathbb{B}_i$ participates in the merging to avoid the information dropping for the unselected tokens.", + "bbox": [ + 498, + 292, + 892, + 339 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.4 Optimization of ToE", + "text_level": 1, + "bbox": [ + 498, + 359, + 697, + 375 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our objective loss is the same as the original models, e.g., cross-entropy loss in DeiT. The training details of ToE are presented in Algorithm 1. Note that we only apply ToE to the output tokens of the first transformer block. The detailed analysis is discussed in Sec. 4.4.", + "bbox": [ + 496, + 383, + 893, + 458 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ToE is a plug-and-play acceleration module, which has three following advantages: (1) As shown in Fig. 2, we observed that the selected token set obtained by ToE in the multiple block outputs has a larger average distribution distance via T-SNE [44], compared to that in the original full-token set (see First block vs. After ToE). Moreover, it maintains a feature distribution similar to the original token set. It indicates ToE can preserve the integrity of the intermediate feature distribution of the original token set across different Transformer blocks by reducing the number of tokens. (2) ToE is a parameter-free module, it does not introduce any trainable parameters and utilizes efficient matrix calculations that the computational overhead is negligible, compared to computation-intensive self-attention. (3) The speedup factors (e.g., token kept rate $r_1$ and training stage $N_g$ ) of ToE are independent of the original model's training hyper-parameters. This decoupling allows ToE to be seamlessly integrated into the training process of the original model, obviating the need for any adjustments to the training hyper-parameters.", + "bbox": [ + 496, + 459, + 893, + 746 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 761, + 633, + 777 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 500, + 786, + 705, + 801 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets and baselines. We evaluate our method on ImageNet-1K [45] and CIFAR-10/100 [46]. For baselines, we use two popular ViTs, i.e., DeiT [4] and LV-ViT [5], as the base models to evaluate the proposed ToE on ImageNet-1K. To further evaluate the universality, we integrate ToE into the efficient training framework EfficientTrain [30]. Moreover,", + "bbox": [ + 496, + 810, + 893, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "15787", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: Optimization with ToE" + ], + "code_body": "Input: Input dataset $\\mathcal{X}$ output token number $N_{l}$ total training stage $N_{g}$ kept rate of the first training stage $r_1$ repetition step of the parallel expanding operation $k$ Transformer parameters $\\theta$ maximum iterations $T$ Output: Updated Transformer parameters $\\theta$ for $t\\gets 1$ to $T$ do Sample from $\\mathcal{X}$ to obtain data sample $x$ ,feed-forwarded through the embedding and first $l$ -th transformer blocks to obtain the output token set $S_{l}$ . \n3 $\\% \\% \\%$ Spatial-distribution Token Initialization%% \n4 $r_0\\gets \\frac{1}{2} r_1$ . \n5 Initialize A and B by $r_0,S_l$ via Eq. 2; \n6 $\\% \\% \\%$ Widest Feature-distribution Token Expansion%% \n7Obtain the current training stage $\\delta = \\lceil N_g*t / T\\rceil$ . \n8 for $m\\gets 1$ to $\\delta$ do if $m = 1$ then $\\mu_{m}\\leftarrow r_{1} - r_{0};$ else $\\mu_{m}\\leftarrow \\frac{1 - r_{1}}{N_{g} - 1}$ end for $n\\gets 1$ to k do Update A and B by $\\mu_{m},N_{l},k,$ prior A and prior B via Eq. 6; \nend \nend \n $\\% \\% \\%$ Feature-distribution Token Merging%% \n9 Obtain $S_l^{\\prime}$ by A and B via Eq. 7; \n $S_l^{\\prime}$ feed-forwardsed through the $l + 1$ -th transformer block to final layer and progressively obtain the final prediction y; \n $\\% \\% \\%$ Parameter Updating%% \n2 Use $y$ to compute the loss and obtain the gradient $\\nabla \\theta$ . \n23 Use $\\nabla \\theta$ to update prior $\\theta$ via the optimizer to obtain new $\\theta$ . \nend \nreturn $\\theta$", + "bbox": [ + 81, + 111, + 468, + 539 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "we evaluate the transfer learning ability using pre-trained weights of ToE on DeiT and the performance of accelerating the fine-tuning process with ToE on CIFAR-10/100.", + "bbox": [ + 75, + 556, + 468, + 602 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation metrics. We report Top-1 accuracy, the GPU training time and FLOPs as the evaluation metric. To evaluate the training speed, we report the total GPU hours consumed during the entire training process, as well as the theoretical FLOPs for one forward-backward process. To avoid the impact of memory access and kernel launching on training time [12], we report the GPU hours on different numbers of GPUs, but with the same GPU numbers to evaluate different training methods. The FLOPs for the forward process are measured using thop1, and for the backward process, we follow [43] and calculate it as twice the FLOPs of the forward process.", + "bbox": [ + 75, + 603, + 470, + 785 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementations. All methods are trained by Pytorch [47]. For DeiT and LV-ViT, all experiments are conducted on four NVIDIA RTX A6000 GPUs $^2$ , while EfficientTrain is trained on eight NVIDIA RTX A6000 GPUs.", + "bbox": [ + 75, + 786, + 472, + 848 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "All hyper-parameters (e.g., learning rate, decay strategy and rate), and training strategies and optimization processes are the same as the original papers unless otherwise specified.", + "bbox": [ + 500, + 90, + 890, + 136 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Growth strategy. In default, we divide the origin training process into $N_{g} = 3$ stages on average. The token kept rate of 1st stage $r_1$ is set to 0.4, 0.5 or 0.6, our method is corresponding to be denoted as ToE $r_1 = 0.4$ , ToE $r_1 = 0.5$ or ToE $r_1 = 0.6$ . Correspondingly, the kept rate of the initial stage $r_0$ is set to 0.2, 0.25 and 0.3. The repetition step of parallel expanding operation $k$ is default set to 2, and we perform ToE on the output tokens of the first block for all models.", + "bbox": [ + 496, + 137, + 893, + 258 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Results on ImageNet-1k", + "text_level": 1, + "bbox": [ + 498, + 270, + 720, + 286 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "DeiT and LV-ViT As shown in Tab. 2, ToE achieves lossless training acceleration with SOTA performance. For example, $\\mathrm{ToE}_{r_1 = 0.5}$ achieves $0.4\\%$ Top-1 accuracy improvement with $1.27\\times$ theoretical and $1.24\\times$ practical faster speed to train DeiT-tiny. For DeiT-small, it achieves $1.3\\times$ training acceleration without accuracy drop. Compared to the SOTA methods, $\\mathrm{ToE}_{r_1 = 0.5}$ outperforms SViTE [10] and NetworkExpansion [12] at least $1\\%$ Top-1 accuracy at the consistent acceleration ratio for training both DeiT-tiny and DeiT-small. Compared to ToMe [11], $\\mathrm{ToE}_{r_1 = 0.5}$ also achieves both higher accuracy and practical training speed. Note that ToMe is able to reduce GFLOPs, but fails to accelerate training due to the usage of unfriendly weighted average attention and layer-wise merging operations. For DeiT-base, $\\mathrm{ToE}_{r_1 = 0.5}$ drops only $0.2\\%$ Top-1 accuracy while saving more than 60 GPU hours in the practical training process, which is comparable to NetworkExpansion with EMA. If we relax the restriction of hyper-parameter consistency (presented in Appendix), $\\mathrm{ToE}_{r_1 = 0.4}^{\\text{Hyper}}$ outperforms NetworkExpansion with $0.2\\%$ accuracy and 24h training time reduction.", + "bbox": [ + 496, + 294, + 893, + 597 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For LV-ViT-T and LV-ViT-S shown in Tab. 3, $\\mathrm{ToE}_{r_1 = 0.4}$ achieves efficient training with $1.2\\times$ acceleration rate, while without accuracy drop or even with accuracy improvement for training LV-ViT-T, compared to baselines. Note that the results of $\\mathrm{ToE}_{r_1 = 0.4}$ and NetworkExpansion are reported with EMA, due to the default LV-ViT training with EMA. In addition, $\\mathrm{ToE}_{r_1 = 0.4}$ outperforms NetworkExpansion in both training acceleration and accuracy with 0.5h training time reduction and $0.6\\%$ accuracy for LV-ViT-T, respectively.", + "bbox": [ + 496, + 597, + 890, + 733 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We also present the validation Top-1 accuracy of ToE and NetworkExpansion during training DeiT-tiny and LV-ViT in Fig. 3. As observed, ToE initially reduces token redundancy during training, resulting in some performance drops compared to the baseline. However, in the later stages of training, ToE introduces more tokens for training, gradually reducing the accuracy gap to the baseline. Benefiting from the reduction of token redundancy in the early stages, models trained by ToE with the proposed token expansion and merging achieve higher accuracies, compared to baselines. Compared to NetworkExpansion, our ToE is more stable to", + "bbox": [ + 496, + 734, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "$^{1}$ https://github.com/Lyken17/pytorch-OpCounter/blob/master/thop", + "bbox": [ + 76, + 862, + 429, + 875 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "2Note that the used number of GPUs for training may be different to the evaluation of training speedup for a fair comparison.", + "bbox": [ + 78, + 875, + 468, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "15788", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/2d39401233cbea7c48190a61fa1725657fb58ab43f9beb1271f4d2cd1cb6fe24.jpg", + "table_caption": [ + "Table 2. Performance comparison for DeiT on ImageNet-1K. a/b in the column of Top-1 Acc. means without/with EMA strategy using the official GitHub repo†. The training time is averagely measured on one/two/four NVIDIA RTX A6000 GPUs for DeiT-tiny/small/base 3 times, and the batch size is set to 1, 024 in all following tables and figures." + ], + "table_footnote": [ + "$\\dagger$ https://github.com/huawei-noah/Efficient-Computing/tree/master/TrainingAcceleration/NetworkExpansion" + ], + "table_body": "
ModelMethodConsistencyTop-1 Acc. (%)GFLOPs (per training iter)Training time (total GPU hours)Acceleration (practical rate)
Hyper?Architecture?Strategy?
DeiT-tinyBaseline [4]---72.23.3 × 10354.6h1.00×
(NeurIPS'21) S2ViTE-Tiny (600 epoch) [10]××70.1 (-2.1)2.5 × 103(1.32×)-1.19×
(ICLR'23) ToMeDeTrs→[11]×71.7 (-0.5)2.5 × 103(1.32×)53.3h1.02×
(CVPR'23) NetworkExpansion6→12 [12]×70.3 (-1.9) / 70.1 (-2.1)2.5 × 103(1.32×)43.2h1.26×
ToE r1=0.5 (Ours)72.6 (+0.4)2.6 × 103(1.27×)44.2h1.24×
DeiT-smallBaseline [4]---79.81.3 × 104124.5h1.00×
(ICLR'23) ToMeDeTrs→[11]×79.7 (-0.1)9.8 × 103(1.33×)121.5h1.02×
(CVPR'23) NetworkExpansion6→12 [12]×78.8 (-1.0) / 78.6 (-1.2)9.8 × 103(1.33×)100.3h1.24×
ToE r1=0.5 (Ours)79.8 (+0.0)1.0 × 104(1.30×)102.2h1.22×
DeiT-baseBaseline [4]---81.85.2 × 104292.8h1.00×
(ICML'19) StackBERT [13]×80.8 (-1.0)4.2 × 104(1.24×)231.6h1.26×
(CVPR'23) NetworkExpansion6→12 [12]×81.0 (-0.8) / 81.5 (-0.3)3.9 × 104(1.33×)226.8h1.29×
ToE r1=0.5 (Ours)81.6 (-0.2)4.0 × 104(1.30×)231.2h1.27×
ToE r1=0.4 (Ours)81.4 (-0.4)3.8 × 104(1.37×)225.2h1.30×
ToE Hyper r1=0.5 (Ours)×81.8 (+0.0)3.6 × 104(1.44×)213.2h1.37×
ToE Hyper r1=0.4 (Ours)×81.7 (-0.1)3.3 × 104(1.58×)202.8h1.44×
", + "bbox": [ + 119, + 128, + 849, + 313 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ea951d67bfce9e4cc46a8224f907852fe43ae31bb8941faab564d6557113be52.jpg", + "table_caption": [ + "Table 3. Performance comparison for LV-ViT on ImageNet-1K. $\\ddagger$ indicates that results reproduced by the official GitHub repo. The training time is averagely measured on two/four NVIDIA RTX A6000 GPUs 3 times for LV-ViT-T/S with a fixed batch size of 1, 024." + ], + "table_footnote": [], + "table_body": "
ModelMethodTop-1 Acc. (%)GFLOPs (per training iter)Training time (total GPU hours)
LV-ViT-TBaseline [5]79.18.2 × 103130.5h
(CVPR'23) NetworkExpansion6→12 [12][78.8 (-0.3)]7.1 × 103(1.15×)114.4h (1.14×)
ToERr1=0.4 (Ours)79.4 (+0.3)7.0 × 103(1.17×)113.9h (1.15×)
LV-ViT-SBaseline [5]83.31.9 × 104237.3h
(CVPR'23) NetworkExpansion8→16 [12][82.9 (-0.4)]1.5 × 104(1.27×)195.5h (1.21×)
ToERr1=0.4 (Ours)83.3 (+0.0)1.4 × 104(1.36×)195.3h (1.22×)
LV-ViT-MBaseline [5]84.13.7 × 104368.7h
(CVPR'23) NetworkExpansion10→20 [12]84.0 (-0.1)2.9 × 104(1.28×)292.7h (1.26×)
ToERr1=0.4 (Ours)84.1 (+0.0)2.7 × 104(1.37×)292.5h (1.26×)
", + "bbox": [ + 80, + 380, + 464, + 477 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/afceb7dc322b2645dff5d32c9f5eab1f28bb878b4890ad11e3ea2c4782b89766.jpg", + "table_caption": [ + "Table 4. Performance comparison between EfficientTrain [30] and our combination framework on ImageNet-1K." + ], + "table_footnote": [], + "table_body": "
ModelMethodTop-1 Acc. (%)GFLOPs (per training iter)Training time (total GPU hours)
DeiT-tinyBaseline (EfficientTrain) [30]72.51.3 × 10452.5h
(ICCV'23) EfficientTrain [30]73.3 (+0.8)8.8 × 103(1.48×)36.5h (1.44×)
EfficientTrain + ToE r1=0.6 (Ours)73.5 (+1.0)7.6 × 103(1.71×)32.3h (1.63×)
DeiT-smallBaseline (EfficientTrain) [30]80.35.2 × 104121.3h
(ICCV'23) EfficientTrain [30]80.4 (+0.1)3.4 × 103(1.53×)85.2h (1.42×)
EfficientTrain + ToE r1=0.6 (Ours)80.4 (+0.1)2.9 × 104(1.79×)79.4h (1.53×)
", + "bbox": [ + 80, + 513, + 464, + 588 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "train with consistent accuracy improvement during training, while the accuracy of NetworkExpansion with EMA drops significantly at the intermediate epoch number and then restores due to the inconsistent structures of before-and-after models when structure growing. More validation curves are presented in the Appendix.", + "bbox": [ + 75, + 598, + 468, + 688 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Combination with EfficientTrain [30]. ToE can be seamlessly integrated into the EfficientTrain framework to further improve the performance. We do not modify the pipeline of EfficientTrain and simply apply ToE to the output tokens of the model's first block. The results are summarized in Tab. 4, which effectively evaluates the universality of ToE. The combination of EfficientTrain and ToE achieves higher training speeds to further enhance the training efficiency of EfficientTrain with accuracy improvement.", + "bbox": [ + 75, + 688, + 470, + 825 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Transfer Results on CIFAR-10/100", + "text_level": 1, + "bbox": [ + 76, + 832, + 380, + 847 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "we further explore the transfer learning ability of ToE-pre-trained weights and evaluate whether ToE can be used to accelerate the fine-tuning on CIFAR-10/100. For the fine", + "bbox": [ + 75, + 854, + 472, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e4a076fa3bb61e3910d0c7c855bab5fc0ea079737439b0dcfb9202bcf8c6a1fc.jpg", + "image_caption": [ + "Figure 3. Validation Top-1 accuracy of DeiT-tiny and LV-ViT-T on ImageNet-1k during training with different methods." + ], + "image_footnote": [], + "bbox": [ + 526, + 337, + 692, + 465 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/355923237431d617b81e17a50299e4dadbc1c37b5bb34e2d63dc71c99d681041.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 692, + 337, + 854, + 464 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c63816583d803418e89e5ec0202d9b1029555ff25acfc4b52ccad086ea31f3fe.jpg", + "table_caption": [ + "Table 5. Results for fine-tuning DeiT on CIFAR-10/100." + ], + "table_footnote": [], + "table_body": "
ModelPre-trainingFine-tuningTop-1 Acc. (%)
MethodAccelerationMethodAccelerationCIFAR-10CIFAR-100
DeiT-tinyBaseline [4]1.0xBaseline [4]1.0x98.0786.78
Baseline [4]1.0xToE\\( r_1=0.5 \\)1.3x98.10 (+0.03)86.74 (-0.04)
ToE\\( r_1=0.5 \\)1.3xBaseline [4]1.0x98.19 (+0.12)87.10 (+0.32)
ToE\\( r_1=0.5 \\)1.3xToE\\( r_1=0.5 \\)1.3x98.16 (+0.09)86.91 (+0.13)
DeiT-smallBaseline [4]1.0xBaseline [4]1.0x98.9390.15
Baseline [4]1.3xToE\\( r_1=0.5 \\)1.3x98.96 (+0.03)90.19 (+0.04)
ToE\\( r_1=0.5 \\)1.3xBaseline [4]1.0x99.03 (+0.10)90.37 (+0.22)
ToE\\( r_1=0.5 \\)1.3xToE\\( r_1=0.5 \\)1.3x98.99 (+0.06)90.26 (+0.11)
", + "bbox": [ + 503, + 521, + 887, + 618 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "tuning settings, we follow the settings of the official GitHub repo $^{3}$ . We introduce the training details in the Appendix.", + "bbox": [ + 498, + 627, + 890, + 657 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Tab. 5, pre-training weights by ToE is able to improve the accuracy on CIFAR-10/100 for DeiT-tiny/small, when using the same baseline training for fine-tuning (see the 1st and 3rd rows in both DeiT-tiny and DeiT-small). For example, ToE pre-training outperforms baseline pretraining by $0.32\\%$ accuracy on CIFAR-100, which evaluates the strong transfer ability of ToE. In addition, our ToE is also effective and efficient for fine-tuning (see the 1st and 2nd rows in DeiT-tiny/small). ToE achieves $1.3 \\times$ acceleration for fine-tuning DeiT-tiny with 0.03 accuracy improvement on CIFAR-10. Further, we employ ToE for both pre-training and fine-tuning, which significantly accelerates the training with an accuracy improvement of at least $0.06\\%$ on CIFAR-10 for both DeiT-tiny/small, compared to that using both baselines.", + "bbox": [ + 496, + 659, + 893, + 871 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "3https://github.com/facebookresearch/deit", + "bbox": [ + 500, + 886, + 723, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "15789", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/7293af1047e8703722870c9aa8e40d77269546798b59aa8efc2e5a3830d108b5.jpg", + "table_caption": [ + "Table 6. Ablation studies of different speedup factors for DeiT-tiny on ImageNet-1K. The default $r_0 / r_1$ , $N_g$ and $k$ are set to 1/2, 3 and 2, respectively. All results in this table have almost the same training speeds for 44h training (total GPU hours)." + ], + "table_footnote": [], + "table_body": "
DeiT-tinyFactorsr0/r1=1/3r0/r1=2/3Ng=2Ng=4k=1k=3default
Top-1 Acc. (%)72.372.572.472.572.572.672.6
", + "bbox": [ + 81, + 142, + 460, + 166 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/7c753cbd91bc0ab27bba020fc9519b7ee1b1a3c74d7595379e2751383ed54fa0.jpg", + "table_caption": [ + "Table 7. Effect of \"initialization-expansion-merge\" pipeline for DeiT on ImageNet-1K. $\\pm$ indicates we conduct 3 runs to calculate the mean and std." + ], + "table_footnote": [], + "table_body": "
InitializationExpansionMergeTop-1 Acc. (%)
RandomSpatialDeiT-tinyDeiT-small
×72.679.8
×72.3±0.279.7±0.1
××71.279.1
××71.779.6
", + "bbox": [ + 135, + 203, + 406, + 268 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/eed7f08200b8813a7c5f5c2b415a76d88cfadabdc14a319390be3656b802c8a6.jpg", + "table_caption": [ + "Table 8. Results of applying ToE to different early transformer block's output tokens for DeiT-tiny on ImageNet-1K." + ], + "table_footnote": [], + "table_body": "
BlockTop-1 Acc. (%) DeiT-tinyGFLOPs (per training iter)Training time (total GPU hours)
Embedding72.12.51 × 10343.5h
First block72.62.58 × 10344.2h
Second block72.22.65 × 10345.2h
Third block72.12.71 × 10346.9h
", + "bbox": [ + 135, + 304, + 406, + 363 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 373, + 230, + 388 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effect of speedup factors in ToE. As presented in Tab. 6, we verify the sensitivity of the speedup factors mentioned in Sec. 3.3, such as the ratio of $r_0 / r_1$ , training stages $N_g$ and parallel expanding operation $k$ . At almost the same training time, ToE is relatively insensitive to these factors, w.r.t accuracy. It allows ToE to be easily integrated into the different models' training pipeline with minimal factor adjustments.", + "bbox": [ + 75, + 398, + 470, + 503 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We further adjust the keep rate of the first stage $r_1$ to control the training speed, and the relationship between $r_1$ and training speed is illustrated in Fig. 4. We found ToE achieves more than $1.3 \\times$ acceleration on DeiT-tiny without accuracy dropping. Additionally, it also demonstrates that reducing token redundancy in the early stages of training sometimes improves the model performance.", + "bbox": [ + 75, + 505, + 468, + 611 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effect of \"Initialization-expansion-merging\". Tab. 7 provides an analysis of the necessity of each step in the proposed \"initialization-expansion-merging\" pipeline. When we randomly select tokens as the initial token set rather than spatial-distribution token initialization, it leads to performance degradation. Furthermore, removing widest feature-distribution token expansion and feature-distribution token merging from the pipeline significantly decreases the accuracy, e.g., more than $0.9\\%$ and $1.4\\%$ accuracy drops without the merging and expansion for DeiT-tiny, respectively.", + "bbox": [ + 75, + 612, + 470, + 763 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Where to apply ToE. Work in [32, 48] demonstrates that class attention tends to be a global pooling as more attention operations are performed, and tokens in early blocks are more similar. This leads to more redundancy in tokens from early blocks. Consequently, applying ToE to the output tokens of early blocks can achieve higher acceleration. As shown in Tab. 8, we default apply ToE to the output tokens of the first block, which achieves the best trade-off between accuracy and training speed, compared to other early blocks.", + "bbox": [ + 75, + 763, + 470, + 901 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2c6c478fd4a34c506bc32a7becb6e5b32586b147ae21b6af3b68f5c61bd31370.jpg", + "image_caption": [ + "Figure 4. Trade-off between acceleration ratio and model performance by setting different $r_1$ ." + ], + "image_footnote": [], + "bbox": [ + 542, + 93, + 846, + 244 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/2ea3f59ae626807578f42186b9633575c86a54248696756c4315e24b0b4335b4.jpg", + "table_caption": [ + "Table 9. Results of different feature-distribution distances in Eq. 3 for DeiT on ImageNet-1K." + ], + "table_footnote": [], + "table_body": "
MeasureTop-1 Acc. (%)
DeiT-tinyDeiT-small
Manhattan Distance69.878.0
Euclidean Distance70.678.4
Cosine Distance72.679.8
", + "bbox": [ + 588, + 315, + 800, + 378 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effect of the feature-distribution distance. We explore the metric that measures the feature-distribution distance between two tokens in Eq. 3. As shown in Tab. 9, we use three different metrics: Manhattan distance, Euclidean distance, and Cosine distance. We observe that Cosine distance achieves the best performance as the distance metric.", + "bbox": [ + 498, + 388, + 893, + 479 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 497, + 617, + 512 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we proposed a novel token growth scheme Token Expansion (ToE) to achieve consistent training acceleration for ViTs. ToE introduce an \"initialization-expansion-merging\" pipeline to maintain the integrity of the intermediate feature distribution of original transformers, preventing the loss of crucial learnable information in the training process. In experiments, ToE can be seamlessly integrated into the training of various transformers and efficient training frameworks in a lossless manner or even accuracy improvement, compared to the entire full-token training. These experimental results of ToE also demonstrate the superior performance gains over the SOTA methods.", + "bbox": [ + 496, + 523, + 893, + 705 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 723, + 666, + 739 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work is supported by the National Natural Science Foundation of China (NO. 62102151), the National Key Research and Development Program of China (No. 2023YFC3306401), Shanghai Sailing Program (21YF1411200), Shanghai Science and Technology Commission (22511104600), CCF-Tencent Rhino-Bird Open Research Fund, the Open Research Fund of Key Laboratory of Advanced Theory and Application in Statistics and Data Science, Ministry of Education (KLATASDS2305), the Fundamental Research Funds for the Central Universities.", + "bbox": [ + 496, + 750, + 893, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "15790", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. NeurIPS, 30, 2017. 1", + "[2] Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In NAACL, pages 4171–4186, 2019.", + "[3] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. NeurIPS, 33:1877-1901, 2020. 1", + "[4] Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In ICLR, pages 10347-10357. PMLR, 2021. 1, 2, 5, 7", + "[5] Zi-Hang Jiang, Qibin Hou, Li Yuan, Daquan Zhou, Yujun Shi, Xiaojie Jin, Anran Wang, and Jiashi Feng. All tokens matter: Token labeling for training better vision transformers. NeurIPS, 34:18590-18602, 2021. 5, 7", + "[6] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, pages 213-229. Springer, 2020.", + "[7] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. Segformer: Simple and efficient design for semantic segmentation with transformers. NeurIPS, 34:12077-12090, 2021. 1", + "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 1", + "[9] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, pages 770-778, 2016. 1", + "[10] Tianlong Chen, Yu Cheng, Zhe Gan, Lu Yuan, Lei Zhang, and Zhangyang Wang. Chasing sparsity in vision transformers: An end-to-end exploration. NeurIPS, 34:19974-19988, 2021. 1, 2, 3, 6, 7", + "[11] Daniel Bolya, Cheng-Yang Fu, Xiaoliang Dai, Peizhao Zhang, Christoph Feichtenhofer, and Judy Hoffman. Token merging: Your vit but faster. In ICLR, 2022. 1, 2, 3, 4, 6, 7", + "[12] Ning Ding, Yehui Tang, Kai Han, Chao Xu, and Yunhe Wang. Network expansion for practical training acceleration. In CVPR, pages 20269-20279, 2023. 1, 2, 3, 6, 7", + "[13] Linyuan Gong, Di He, Zhuohan Li, Tao Qin, Liwei Wang, and Tieyan Liu. Efficient training of bert by progressively stacking. In ICML, pages 2337-2346. PMLR, 2019. 1, 2, 3, 7", + "[14] Huanrui Yang, Hongxu Yin, Maying Shen, Pavlo Molchanov, Hai Li, and Jan Kautz. Global vision transformer pruning with hessian-aware saliency. In CVPR, pages 18547-18557, 2023. 1, 3", + "[15] Fang Yu, Kun Huang, Meng Wang, Yuan Cheng, Wei Chu," + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "and Li Cui. Width & depth pruning for vision transformers. In AAAI, volume 36, pages 3143-3151, 2022.", + "[16] François Lagunas, Ella Charlaix, Victor Sanh, and Alexander M Rush. Block pruning for faster transformers. In EMNLP, pages 10619-10629, 2021.", + "[17] Mengzhou Xia, Zexuan Zhong, and Danqi Chen. Structured pruning learns compact and accurate models. In ACL, pages 1513-1528, 2022. 1, 3", + "[18] Yongming Rao, Wenliang Zhao, Benlin Liu, Jiwen Lu, Jie Zhou, and Cho-Jui Hsieh. Dynamicvit: Efficient vision transformers with dynamic token sparsification. NeurIPS, 34:13937-13949, 2021. 1, 3, 4", + "[19] Lingchen Meng, Hengduo Li, Bor-Chun Chen, Shiyi Lan, Zuxuan Wu, Yu-Gang Jiang, and Ser-Nam Lim. Adavit: Adaptive vision transformers for efficient image recognition. In CVPR, pages 12309-12318, 2022.", + "[20] Mohsen Fayyaz, Soroush Abbasi Koohpayegani, Farnoush Rezaei Jafari, Sunando Sengupta, Hamid Reza Vaezi Joze, Eric Sommerlade, Hamed Pirsiavash, and Jürgen Gall. Adaptive token sampling for efficient vision transformers. In ECCV, pages 396-414. Springer, 2022.", + "[21] Zhenglun Kong, Peiyan Dong, Xiaolong Ma, Xin Meng, Wei Niu, Mengshu Sun, Xuan Shen, Geng Yuan, Bin Ren, Hao Tang, et al. Spvit: Enabling faster vision transformers via latency-aware soft token pruning. In ECCV, pages 620-640. Springer, 2022.", + "[22] Hongxu Yin, Arash Vahdat, Jose M Alvarez, Arun Mallya, Jan Kautz, and Pavlo Molchanov. A-vit: Adaptive tokens for efficient vision transformer. In CVPR, pages 10809-10818, 2022. 1, 3", + "[23] Sheng Xu, Yanjing Li, Mingbao Lin, Peng Gao, Guodong Guo, Jinhu Lu, and Baochang Zhang. Q-detr: An efficient low-bit quantized detection transformer. In CVPR, pages 3842-3851, 2023. 1", + "[24] Yanjing Li, Sheng Xu, Baochang Zhang, Xianbin Cao, Peng Gao, and Guodong Guo. Q-vit: Accurate and fully quantized low-bit vision transformer. NeurIPS, 35:34451-34463, 2022.", + "[25] Yefei He, Zhenyu Lou, Luoming Zhang, Jing Liu, Weijia Wu, Hong Zhou, and Bohan Zhuang. Bivit: Extremely compressed binary vision transformers. In ICCV, pages 5651-5663, 2023.", + "[26] Phuoc-Hoan Charles Le and Xinlin Li. Binaryvit: Pushing binary vision transformers towards convolutional models. In CVPR, pages 4664-4673, 2023. 1", + "[27] Cheng Chen, Yichun Yin, Lifeng Shang, Xin Jiang, Yujia Qin, Fengyu Wang, Zhi Wang, Xiao Chen, Zhiyuan Liu, and Qun Liu. bert2bert: Towards reusable pretrained language models. In ACL, pages 2134-2148, 2022. 1, 2, 3", + "[28] Xin Yuan, Pedro Savarese, and Michael Maire. Growing efficient deep networks by structured continuous sparsification. In ICLR, 2021.", + "[29] Wei Wen, Feng Yan, Yiran Chen, and Hai Li. Autogrow: Automatic layer growing in deep convolutional networks. In KDD, pages 833-841, 2020. 1", + "[30] Yulin Wang, Yang Yue, Rui Lu, Tianjiao Liu, Zhao Zhong, Shiji Song, and Gao Huang. Efficienttrain: Exploring generalized curriculum learning for training visual backbones. In ICCV, pages 5852-5864, 2023. 2, 3, 5, 7" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "15791", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[31] Changlin Li, Bohan Zhuang, Guangrun Wang, Xiaodan Liang, Xiaojun Chang, and Yi Yang. Automated progressive learning for efficient training of vision transformers. In CVPR, pages 12486-12496, 2022. 3", + "[32] Xuran Pan, Xuan Jin, Yuan He, Shiji Song, Gao Huang, et al. Budgeted training for vision transformer. In ICLR, 2022. 3, 8", + "[33] Katherine Lee, Daphne Ippolito, Andrew Nystrom, Chiyuan Zhang, Douglas Eck, Chris Callison-Burch, and Nicholas Carlini. Deduplicating training data makes language models better. In ACL, pages 8424-8445, 2022. 3", + "[34] Mingxing Tan and Quoc Le. Efficientnetv2: Smaller models and faster training. In ICLR, pages 10096-10106. PMLR, 2021. 3", + "[35] Bradley McDanel and Chi Phuong Huynh. Accelerating vision transformer training via a patch sampling schedule. arXiv preprint arXiv:2208.09520, 2022. 3", + "[36] Li Shen, Yan Sun, Zhiyuan Yu, Liang Ding, Xinmei Tian, and Dacheng Tao. On efficient training of large-scale deep learning models: A literature review. arXiv preprint arXiv:2304.03589, 2023. 3", + "[37] Yuedong Yang, Guihong Li, and Radu Marculescu. Efficient on-device training via gradient filtering. In CVPR, pages 3811-3820, 2023. 3", + "[38] Xucheng Ye, Pengcheng Dai, Junyu Luo, Xin Guo, Yingjie Qi, Jianlei Yang, and Yiran Chen. Accelerating cnn training by pruning activation gradients. In ECCV, pages 322-338. Springer, 2020. 3", + "[39] Yonggan Fu, Haoran You, Yang Zhao, Yue Wang, Chaojian Li, Kailash Gopalakrishnan, Zhangyang Wang, and Yingyan Lin. Fractrain: Fractionally squeezing bit savings both temporally and spatially for efficient dnn training. NeurIPS, 33:12127-12139, 2020. 3", + "[40] Yue Wang, Ziyu Jiang, Xiaohan Chen, Pengfei Xu, Yang Zhao, Yingyan Lin, and Zhangyang Wang. E2-train: Training state-of-the-art cnns with over $80\\%$ energy savings. NeurIPS, 32, 2019. 3", + "[41] Mengtian Li, Ersin Yumer, and Deva Ramanan. Budgeted training: Rethinking deep neural network training under resource constraints. In ICLR, 2019. 3", + "[42] Jiong Zhang, Hsiang-Fu Yu, and Inderjit S Dhillon. Autoassist: A framework to accelerate training of deep neural networks. NeurIPS, 32, 2019. 3", + "[43] Marius Hobbhahn and Jaime Sevilla. What's the backward-forward flop ratio for neural networks? https://epochai.org/blog/backward-forward-FLOP-ratio, 2021. Accessed: 2023-9-28. 3, 6", + "[44] Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. JMLR, 9:2579-2605, 2008. 5", + "[45] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, pages 248-255. IEEE, 2009. 5", + "[46] Alex Krizhevsky et al. Learning multiple layers of features from tiny images. 2009. 5", + "[47] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. NeurIPS, 32, 2019. 6" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "ref_text", + "text": "[48] Maithra Raghu, Thomas Unterthiner, Simon Kornblith, Chiyuan Zhang, and Alexey Dosovitskiy. Do vision transformers see like convolutional neural networks? NeurIPS, 34:12116-12128, 2021. 8", + "bbox": [ + 501, + 92, + 893, + 146 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "15792", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A General and Efficient Training for Transformer via Token Expansion/6b319bca-e10d-4650-be77-29bcc4ffd8dd_model.json b/2024/A General and Efficient Training for Transformer via Token Expansion/6b319bca-e10d-4650-be77-29bcc4ffd8dd_model.json new file mode 100644 index 0000000000000000000000000000000000000000..71e081091098529061d1527c73962d1ba875fe54 --- /dev/null +++ b/2024/A General and Efficient Training for Transformer via Token Expansion/6b319bca-e10d-4650-be77-29bcc4ffd8dd_model.json @@ -0,0 +1,2321 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.125, + 0.131, + 0.846, + 0.154 + ], + "angle": 0, + "content": "A General and Efficient Training for Transformer via Token Expansion" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.179, + 0.861, + 0.216 + ], + "angle": 0, + "content": "Wenxuan Huang\\(^{1*}\\) Yunhang Shen\\(^{2*}\\) Jiao Xie\\(^{3}\\) Baochang Zhang\\(^{4}\\) Gaoqi He\\(^{1}\\) Ke Li\\(^{2}\\) Xing Sun\\(^{2}\\) Shaohui Lin\\(^{1,5\\boxtimes}\\)" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.218, + 0.806, + 0.234 + ], + "angle": 0, + "content": "\\(^{1}\\)School of Computer Science and Technology, East China Normal University, Shanghai, China" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.235, + 0.806, + 0.251 + ], + "angle": 0, + "content": "\\(^{2}\\)Tencent Youtu Lab, China \\(^{3}\\)Xiamen University, China \\(^{4}\\)Beihang University, China" + }, + { + "type": "list", + "bbox": [ + 0.179, + 0.218, + 0.806, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.253, + 0.821, + 0.269 + ], + "angle": 0, + "content": "\\(^{5}\\)Key Laboratory of Advanced Theory and Application in Statistics and Data Science - MOE, China" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.273, + 0.894, + 0.305 + ], + "angle": 0, + "content": "osilly0616@gmail.com, shenyunhang01@gmail.com, jiaoxiel990@126.com, bczhang@buaa.edu.cn \ngqhe@cs.ecnu.edu.cn, tristanli.sh@gmail.com, winfred.sun@gmail.com, shaohuilin007@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.34, + 0.314, + 0.356 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.371, + 0.474, + 0.751 + ], + "angle": 0, + "content": "The remarkable performance of Vision Transformers (ViTs) typically requires an extremely large training cost. Existing methods have attempted to accelerate the training of ViTs, yet typically disregard method universality with accuracy dropping. Meanwhile, they break the training consistency of the original transformers, including the consistency of hyper-parameters, architecture, and strategy, which prevents them from being widely applied to different Transformer networks. In this paper, we propose a novel token growth scheme Token Expansion (termed ToE) to achieve consistent training acceleration for ViTs. We introduce an \"initialization-expansion-merging\" pipeline to maintain the integrity of the intermediate feature distribution of original transformers, preventing the loss of crucial learnable information in the training process. ToE can not only be seamlessly integrated into the training and fine-tuning process of transformers (e.g., DeiT and LV-ViT), but also effective for efficient training frameworks (e.g., EfficientTrain), without twisting the original training hyperparameters, architecture, and introducing additional training strategies. Extensive experiments demonstrate that ToE achieves about \\(1.3 \\times\\) faster for the training of ViTs in a lossless manner, or even with performance gains over the full-token training baselines. Code is available at https://github.com/Osilly-TokenExpansion." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.778, + 0.21, + 0.793 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.803, + 0.472, + 0.879 + ], + "angle": 0, + "content": "Transformers have achieved excellent performance in the tasks of natural language processing (NLP) [1-3] and computer vision [4-7]. Despite their great success, modern Transformer models typically require extremely large parameters and computation consumption due to the quadratic com" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.341, + 0.894, + 0.402 + ], + "angle": 0, + "content": "Table 1. Training results for DeiT [4] on ImageNet-1K. DeiT does not use the EMA strategy by default. a/b in the column of Top-1 Acc. means without/with EMA strategy using the official GitHub repo. The training time is averagely measured on one/four NVIDIA RTX A6000 GPUs 3 times with a batch size of 1, 024 for DeiT-Tiny/Base, respectively." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.404, + 0.887, + 0.505 + ], + "angle": 0, + "content": "
ModelMethodTraining consistencyTop-1 Acc. (%)Training time (GPU hours)
HyperArchStrategy
TinyBaseline [4]---72.254.6h
S2ViTE (600 epoch) [10]××70.1 (-2.1)-
ToMeRDS→[11]×71.7 (-0.5)53.3h
NetworkExpansion6→12 [12]×70.3 (-1.9) / 70.1 (-2.1)43.2h
ToERi=0.5 (Ours)72.6 (+0.4)44.2h
BaseBaseline [4]---81.8292.8h
StackBERT [13]×80.8 (-1.0)231.6h
NetworkExpansion6→12 [12]×81.0 (-0.8) / 81.5 (-0.3)226.8h
ToERi=0.5 (Ours)81.6 (-0.2)231.2h
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.514, + 0.895, + 0.636 + ], + "angle": 0, + "content": "putational complexity in the self-attention module. For example, ViT-H/14 [8] requires \\(\\sim 1,000\\) FLOPs, which is \\(250 \\times\\) larger than ResNet-50 [9]. The entire training process needs a significant amount of computing resources to reach model convergence, resulting in a substantial computation overhead. To reduce the computational cost of large models, there has been growing research attention on accelerating Transformers for either training or inference." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.64, + 0.896, + 0.837 + ], + "angle": 0, + "content": "Existing Transformer pruning methods [14-22] aim to reduce the inference complexity. Among them, structure pruning [14-17] and token pruning [18-22] focus on reducing the neurons or tokens of Transformers to accelerate the inference. However, these pruning methods require additional training computational cost in each forward-backward iteration to determine which neurons or tokens are important enough to be retained, or the fine-tuning for pruned models. Recently, Transformer quantization [23-26] accelerates the inference via low-bit computation, but they also cannot reduce the training computation cost. Thus, it is challenging for them to effectively accelerate the training of Transformers in practical scenarios, e.g., cloud service." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.895, + 0.903 + ], + "angle": 0, + "content": "To reduce the training computation overhead, recent works [12, 13, 27-29] have proposed structure growth methods. They update a smaller number of model parameters during the early stages of training and gradually increase" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.207, + 0.9 + ], + "angle": 0, + "content": "*Equal contribution." + }, + { + "type": "page_footnote", + "bbox": [ + 0.254, + 0.887, + 0.385, + 0.9 + ], + "angle": 0, + "content": "Corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15783" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.173, + 0.087, + 0.803, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.274, + 0.895, + 0.349 + ], + "angle": 0, + "content": "Figure 1. The \"initialization-expansion-merging\" pipeline of proposed ToE. We take the 1st training stage \\((\\delta = 1)\\), the kept rate \\(r_1 = 2r_0 = \\frac{2}{3}\\), the repetition step \\(k = 1\\) as example. ToE is only added after the first Transformer block to guide the token selection and usage. During training, steps (1), (2), and (3) are performed for each iteration with the reduction of token numbers. First, seed tokens are selected for token initialization through step (1). Then, the number of tokens is expanded via step (2) for token expansion. Finally, we merge the unselected token set (blue boxes) into the selected one (red boxes) with the close feature distributions in step (3) for token merging. During testing, ToE can be safely removed to generate the same Transformer architecture as the original full-token Transformer." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.358, + 0.473, + 0.856 + ], + "angle": 0, + "content": "the number of parameters involved in the updating process as training progresses. However, the existing methods fail to achieve general transformer training acceleration without accuracy dropping (shown in Tab. 1), and they break the training consistency of the original transformers from three perspectives: (1) Hyper-parameter consistency. Existing methods (e.g., SViTE [10]) delicately tune training hyperparameters (e.g., learning rate and epoch number) of the original models, which are sensitive to individual ViTs [4] and require additional trial-and-error costs for different networks. (2) Architecture consistency. Existing methods [10, 11] alter the final model architectures, which may deviate from the user's requirements and potentially necessitates additional hardware/software support to implement real training speedup. For example, ToMe [11] progressively merges similar tokens layer-by-layer to reduce the number of tokens in ViTs during training, which replaces the attention operators with the weighted average attention modules, generating a different model architecture that deviates from the original Transformer. Moreover, it cannot significantly accelerate the practical training due to the unfriendly computation. (3) Strategy consistency. Existing methods [12, 13, 27] may suffer from performance deterioration across different Transformers by adding additional training strategies, such as EMA and reset optimizer states. It means the effectiveness of these strategies is for specific models, which limits the method's universality whether employing them for training. In Tab. 1, the extra EMA strategy in [12] plays different roles to the performance across different models, i.e., the effectiveness for DeiT-base but not for DeiT-tiny. Thus, this begs our rethinkings: How to implement real and friendly training speedup for Transformers while keeping the training consistency and high accuracy?" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.472, + 0.903 + ], + "angle": 0, + "content": "To answer the above question, we propose a novel token growth scheme, Token Expansion (termed ToE) to achieve general training acceleration for ViTs, while adhering to" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.358, + 0.896, + 0.678 + ], + "angle": 0, + "content": "the training consistency of original models. Specifically, we present an \"initialization-expansion-merging\" pipeline (in Fig. 1) to maintain the integrity of the intermediate feature distribution of original transformers, preventing the loss of crucial learnable information during the accelerated training process. Similar to structure growth methods, we initially involve a limited number of tokens to participate in training and gradually grow the token number during training progress, eventually reaching the utilization of the entire token set. Then, a widest feature-distribution token expansion is introduced to make the feature distributions of the selected token set as wide as possible. Additionally, a feature-distribution token merging combines the tokens with close feature distributions to further avoid information loss. ToE not only accelerates the training and fine-tuning process of popular Transformers in a lossless manner or even with performance improvement, but also can be integrated into the existing efficient training frameworks (e.g., EfficientTrain [30]) for further performance improvement, without twisting the original training hyper-parameters, architecture, and introducing additional training strategies." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.681, + 0.882, + 0.696 + ], + "angle": 0, + "content": "Our main contributions can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.702, + 0.895, + 0.793 + ], + "angle": 0, + "content": "- We propose ToE, a novel token growth scheme to accelerate ViTs from the perspective of tokens. ToE is a consistent training acceleration method and can be seamlessly integrated into the training and fine-tuning process of transformers without any modifications to the original training hyper-parameters, architecture, and strategies." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.793, + 0.895, + 0.837 + ], + "angle": 0, + "content": "- We propose an effective \"initialization-expansion-merging\" framework to avoid the token information loss by maintaining the integrity of the intermediate feature distribution." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.838, + 0.895, + 0.899 + ], + "angle": 0, + "content": "- Extensive experiments demonstrate that ToE accelerates the training and fine-tuning process of ViTs with a negligible accuracy drop or even surpassing the original full-token counterparts, which outperforms previous SOTA methods." + }, + { + "type": "list", + "bbox": [ + 0.501, + 0.702, + 0.895, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "15784" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.09, + 0.22, + 0.106 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.115, + 0.418, + 0.132 + ], + "angle": 0, + "content": "2.1. Training Acceleration for Transformers" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.139, + 0.473, + 0.32 + ], + "angle": 0, + "content": "As mentioned above, many existing works focus on accelerating the training of transformers from the perspective of structural parameters. These structure methods [10, 12, 13, 27, 31, 32] reduce the number of updated parameters in the training process to save the computational cost. In contrast, the proposed ToE accelerates training from the perspective of reducing token redundancy. In other words, ToE computes a smaller number of tokens but still optimizes all parameters. It avoids potential performance drops in many structure growth methods due to the inconsistent structures of before-and-after models during structure growth and resetting of optimizer state when updating new structural parameters." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.321, + 0.473, + 0.488 + ], + "angle": 0, + "content": "ToMe [11] uses a limited number of tokens to participate in training and progressively merges similar tokens layer-by-layer, which changes the attention operator in inference. ToE also involves merging tokens with close feature distributions by feature-distribution token merging. However, our merging strategy is performed only once at the end of the \"initialization-expansion-merging\" pipeline during training, which prevents the information loss of tokens. This ensures that ToE avoids the mismatch between practical and theoretical acceleration caused by excessive merging operations and operator modifications." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.488, + 0.473, + 0.731 + ], + "angle": 0, + "content": "Additionally, several works [30, 33-35] also consider to reduce the data for training. The work in [33] deduplicates training datasets to save computational resources. Unfortunately, it usually introduces additional computational costs and sometimes becomes a bottleneck by using additional time to process datasets during training [36]. PSS [35] uses fewer patches obtained by splitting images during training. EfficientTrain [30] and PL [34] use images of different sizes and additional data augmentation. However, EfficientTrain and PL change the training pipelines that differ from the training of the original model, e.g., hyper-parameters. Moreover, the above methods consider the properties of training data. In contrast, ToE focuses on the crucial learnable information in the intermediate feature space of transformers. Thus, ToE can be integrated into the above methods in a plug-and-play manner to further enhance training efficiency." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.741, + 0.36, + 0.758 + ], + "angle": 0, + "content": "2.2. Training Acceleration for CNNs" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.472, + 0.903 + ], + "angle": 0, + "content": "Prior efficient training acceleration methods have explored ways to speed up the training of CNN models [37-42]. For example, works in [37, 38] consider pruning gradients to reduce training computation costs. Works in [39, 40] attempt to use quantization technical to achieve training acceleration. Others try to reduce training time either by reducing the number of optimization iterations with a linear decay for the learning rate [41] or skipping easy samples that contribute little to loss reduction [42]. However, these methods may not" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.152 + ], + "angle": 0, + "content": "be directly applied to Transformers for training acceleration due to the specific architectural differences between transformers and CNNs. Differently, ToE focuses on the training acceleration for Transformers on the token dimension." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.162, + 0.704, + 0.179 + ], + "angle": 0, + "content": "2.3. Transformer pruning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.185, + 0.895, + 0.276 + ], + "angle": 0, + "content": "Transformer pruning methods typically reduce parameters or tokens to generate sparse Transformers for fast inference. Structure pruning methods [14-17] attempted to prune the structures of transformers. Token pruning methods [18-22] focused on dynamically determining the importance of input tokens and pruning them during inference." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.277, + 0.895, + 0.383 + ], + "angle": 0, + "content": "The key differences between our method and transformer pruning methods are two-fold. (1) Transformer pruning methods primarily aim to accelerate transformer inference, while our target is for training acceleration. (2) We obtain a dense model after training by token growth, which is entirely consistent with the original model for inference. In contrast, pruning methods generate sparse models after training." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.396, + 0.593, + 0.412 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.421, + 0.753, + 0.437 + ], + "angle": 0, + "content": "3.1. Preliminaries and Notations" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.444, + 0.895, + 0.55 + ], + "angle": 0, + "content": "Given a Transformer with \\(L\\) blocks, we denote the sets of input and output tokens for the \\(l\\)-th block as \\(\\mathcal{S}_{l-1}\\) and \\(\\mathcal{S}_l\\) with \\(l \\in \\{1,2,\\dots,L\\}\\), respectively. The index set of output tokens for the \\(l\\)-th block is defined as \\(\\mathcal{I} = \\{1,2,\\dots,N_l\\}\\), where \\(N_l\\) is the number of output tokens for the \\(l\\)-th block. We further denote the \\(i\\)-th token of the output tokens for the \\(l\\)-th block as \\(t_{l,i} \\in \\mathbb{R}^d\\), thus \\(\\mathcal{S}_l = \\{t_{l,i} | \\forall i \\in \\mathcal{I}\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.551, + 0.897, + 0.838 + ], + "angle": 0, + "content": "For the \\(l\\)-th Transformer block, we consider to reduce the output tokens to a specified size \\(N_{l}^{\\prime} = \\lfloor rN_{l}\\rfloor\\), where \\(r\\in (0,1]\\) is the kept rate of tokens, and \\(\\lfloor \\cdot \\rfloor\\) is a floor function. Further, we define the index set of kept tokens as \\(\\mathcal{I}' = \\{1,2,\\dots ,N_l'\\}\\) and we obtain a subset \\(S_{l}^{\\prime} = \\{t_{l,i}^{\\prime}|\\forall i\\in \\mathcal{I}^{\\prime}\\}\\) of output tokens. When the output tokens of the \\(l\\)-th block are reduced, this results in a corresponding reduction in the quantity of input tokens for blocks beyond the \\(l\\)-th block. Furthermore, the computational complexity of self-attention blocks and MLP layers in Transformers is directly proportional to the number of input tokens. According to the work [43], the computation in the forward and backward propagation of modern neural networks roughly conforms to 1:2. Therefore, the reduction of tokens significantly accelerates the computation in both the forward and backward propagations during training if \\(r < 1\\). Note that, to reduce the complex search computation for the kept rate of tokens \\(r\\) across all Transformer blocks, we simply and effectively set \\(r\\) to be the same in all blocks that benefit from acceleration." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.847, + 0.667, + 0.862 + ], + "angle": 0, + "content": "3.2. Overview of ToE" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.895, + 0.901 + ], + "angle": 0, + "content": "As shown in Fig. 1, ToE initially selects a significantly small number of tokens, then progressively grows to the final full-" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15785" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.182 + ], + "angle": 0, + "content": "token same as the original Transformer, thereby achieving training acceleration. We divide the origin training process into \\( N_{g} \\) stages on average. We use a limited number of tokens to participate in each training stage and gradually grow the token number along with the training stages. The token growth strategy consists of three steps:" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.182, + 0.47, + 0.272 + ], + "angle": 0, + "content": "(1) Initial token selection as the seed tokens. we initially select \\(\\lfloor r_0N_l\\rfloor\\) output tokens from the origin token set \\(S_{l}\\) as the seed token set by using Uniform sampling on the index set \\(\\mathcal{I}\\), where \\(r_0\\) represents the pre-defined initial kept rate, which is default set to less than 0.3 in our experiments unless otherwise specified." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.273, + 0.471, + 0.349 + ], + "angle": 0, + "content": "(2) Token expansion. In the \\(\\delta\\)-th \\((\\delta \\in \\{1, 2, \\dots, N_g\\})\\) training stage, we perform \\(\\delta\\) times token expansion to preserve the integrity of the original intermediate feature space. Furthermore, we pre-define the keep rate of the first stage to be \\(r_1\\). The kept rate of \\(\\delta\\)-th stage \\(r_\\delta\\) is computed as:" + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.182, + 0.471, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.181, + 0.358, + 0.47, + 0.395 + ], + "angle": 0, + "content": "\\[\n\\mu_ {\\delta} = \\left\\{ \\begin{array}{l l} r _ {1} - r _ {0}, & \\text {i f} \\delta = 1, \\\\ \\frac {1 - r _ {1}}{N _ {g} - 1}, & \\text {o t h e r w i s e ,} \\end{array} \\right. \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.184, + 0.399, + 0.289, + 0.412 + ], + "angle": 0, + "content": "\\[\nr _ {\\delta} = r _ {\\delta - 1} + \\mu_ {\\delta},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.421, + 0.47, + 0.526 + ], + "angle": 0, + "content": "where \\(\\mu_{\\delta}\\) is the token expansion rate in the \\(\\delta\\)-th training stage and \\(r_1 = 2 \\cdot r_0 \\in (0,1]\\). After the \\(\\delta\\) times token expansion, we select \\(\\lfloor r_{\\delta}N_l\\rfloor\\) tokens from the full-token set \\(S_{l}\\). In Sec. 3.3.2, we will introduce the widest feature-distribution token expansion method to select \\(\\lfloor r_{\\delta}N_l\\rfloor\\) tokens, which aims to expand the token distribution space to effectively present full-token feature distribution." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.526, + 0.47, + 0.631 + ], + "angle": 0, + "content": "(3) Token merging. To further avoid information loss during the training process, we consider merging the unselected tokens into the selected ones in the token expansion process, which retains effective information of the unselected tokens in the merged token set \\( S_{l}^{\\prime} \\). Inspired by ToMe [11], we merge averagely the tokens that the feature distributions are close as one new token, which is further introduced in Sec. 3.3.3." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.632, + 0.469, + 0.692 + ], + "angle": 0, + "content": "During training, ToE performs steps (1), (2), and (3) on the original full-token set for each training iteration, which reduces the number of tokens involved in training while retaining the effective information from the full-token set." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.702, + 0.246, + 0.717 + ], + "angle": 0, + "content": "3.3. Token Expansion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.725, + 0.47, + 0.786 + ], + "angle": 0, + "content": "In this Section, we introduce the proposed ToE method, including spatial-distribution token initialization, widest feature-distribution token expansion, feature-distribution token merging, and its optimization." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.804, + 0.408, + 0.819 + ], + "angle": 0, + "content": "3.3.1 Spatial-distribution Token Initialization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.827, + 0.469, + 0.871 + ], + "angle": 0, + "content": "For the initialization, we apply a simple strategy to select the initial token set from \\( S_{l} \\). We define the index of the initial token set as:" + }, + { + "type": "equation", + "bbox": [ + 0.149, + 0.878, + 0.47, + 0.906 + ], + "angle": 0, + "content": "\\[\n\\mathcal {I} ^ {(I)} = \\{i | \\forall i \\bmod \\left\\lfloor \\frac {1}{r _ {0}} \\right\\rfloor = 1 \\wedge \\forall i \\in \\mathcal {I} \\}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.213 + ], + "angle": 0, + "content": "The selected token set and the unselected tokens set can be expressed as \\(\\mathbb{A} = \\{t_{l,i}|\\forall i\\in \\mathcal{I}^{(I)}\\}\\) and \\(\\mathbb{B} = \\mathcal{S}_l - \\mathbb{A}\\), respectively. This initialization selection strategy is based on spatial distribution. It indicates that we choose one token out of every \\(\\left\\lfloor \\frac{1}{r_0}\\right\\rfloor\\) tokens from the original token set and add it to the initial token set. Our strategy is simple, yet effective, to ensure that the initially selected tokens provide broad spatial coverage across the image patches." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.232, + 0.871, + 0.246 + ], + "angle": 0, + "content": "3.3.2 Widest Feature-distribution Token Expansion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.255, + 0.893, + 0.527 + ], + "angle": 0, + "content": "Previous works [11, 18] show that the intermediate feature space in modern Transformers is overparameterized, such that they prune the full-token Transformers to be sparse ones. Actually, through the above token initialization, we obtain the sparse Transformers. However, the performance drops significantly if we only train on these selected tokens. Thus, we consider to grow the number of tokens, which is expected to preserve the integrity of the original intermediate feature space and avoid the loss of tokens containing valuable information. Inspired by this, we seek to maintain the integrity of the intermediate feature distribution. Intuitively, when the feature distributions of two token sets are sufficiently close, they have similar information that can be used to effectively represent each other. In contrast, given one token whose feature distribution deviates significantly from all other tokens in the token set, it will be difficult to be adequately represented by other tokens, such that we expect to select this token to underscore its importance in the token expansion." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.528, + 0.893, + 0.678 + ], + "angle": 0, + "content": "To this end, we propose the widest feature-distribution token expansion strategy. Specifically, we perform the expanding operation on the selected tokens from the initialized set. For the \\(\\delta\\)-th stage of token expansion, we consider the selected token set \\(\\mathbb{A} \\in \\mathbb{R}^{|A| \\times d}\\) and the unselected token set \\(\\mathbb{B} \\in \\mathbb{R}^{|B| \\times d}\\) as the 2D matrices, where \\(|\\cdot|\\) and \\(d\\) respectively denote the number of tokens and feature dimension, and \\(|\\mathbb{A}| + |\\mathbb{B}| = N_l\\). We utilize Cosine Distance as the metric to measure the distance between feature distribution of tokens in these two sets (other metrics see Tab. 9):" + }, + { + "type": "equation", + "bbox": [ + 0.557, + 0.687, + 0.892, + 0.719 + ], + "angle": 0, + "content": "\\[\n\\mathcal {D} (\\mathbb {B}, \\mathbb {A}) = \\mathbf {1} - \\cos \\langle \\mathbb {B}, \\mathbb {A} \\rangle = \\mathbf {1} - \\frac {\\mathbb {B} \\mathbb {A} ^ {\\mathrm {T}}}{\\| \\mathbb {B} \\| \\cdot \\| \\mathbb {A} \\|}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.728, + 0.89, + 0.759 + ], + "angle": 0, + "content": "where \\(\\mathbf{1}\\) is an all-one matrix. \\(\\mathcal{D}(\\mathbb{B},\\mathbb{A})\\in \\mathbb{R}^{|\\mathbb{B}|\\times |\\mathbb{A}|}\\) represents the pairwise distances between tokens in \\(\\mathbb{B}\\) and \\(\\mathbb{A}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.76, + 0.892, + 0.805 + ], + "angle": 0, + "content": "We further define the distance between the feature distribution of tokens in \\(\\mathbb{B}\\) and its closest token in \\(\\mathbb{A}\\) as distance \\((\\mathbb{B} \\to \\mathbb{A}) \\in \\mathbb{R}^{|\\mathbb{B}|}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.571, + 0.815, + 0.892, + 0.831 + ], + "angle": 0, + "content": "\\[\n\\operatorname {d i s t a n c e} (\\mathbb {B} \\rightarrow \\mathbb {A}) _ {i} = \\min _ {j} \\left(\\mathcal {D} (\\mathbb {B}, \\mathbb {A}) _ {i, j}\\right), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.901 + ], + "angle": 0, + "content": "where \\(i\\in \\{1,\\dots ,|B|\\}\\) and \\(j\\in \\{1,\\dots ,|A|\\}\\). Eq. 4 indicates that we sample the minimal values of the feature-distribution distance matrix \\(\\mathcal{D}(\\mathbb{B},\\mathbb{A})\\) along the second dimension. Thus, distance \\((\\mathbb{B}\\to \\mathbb{A})_i\\) measures importance of" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15786" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.167, + 0.089, + 0.533, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.538, + 0.089, + 0.805, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.246, + 0.895, + 0.285 + ], + "angle": 0, + "content": "Figure 2. Visualization for the feature distribution of token set. We use T-SNE [44] to visualize the output token feature distributions at the first block, the tokens selected by ToE, and the output tokens after the second block. Baselines are DeiT-small trained on ImageNet-1K. ToE preserves the distribution integrity of intermediate features of the original token set across different Transformer blocks while ensuring that feature distributions are as wide as possible." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.293, + 0.471, + 0.324 + ], + "angle": 0, + "content": "\\(i\\)-th token in \\(\\mathbb{B}\\). At this point, we progressively add the most important token to \\(\\mathbb{A}\\), which is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.186, + 0.33, + 0.36, + 0.343 + ], + "angle": 0, + "content": "\\[\n\\mathbb {A} = \\mathbb {A} + t ^ {*}, \\quad \\mathbb {B} = \\mathbb {B} - t ^ {*},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.139, + 0.341, + 0.47, + 0.358 + ], + "angle": 0, + "content": "\\[\nt ^ {*} = \\left\\{\\mathbb {B} _ {i} | i = \\operatorname {a r g m a x} (d i s t a n c e (\\mathbb {B} \\rightarrow \\mathbb {A})) \\right\\}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.364, + 0.473, + 0.56 + ], + "angle": 0, + "content": "where \\( t^* \\) is the most important token in \\( \\mathbb{B} \\). When the feature distribution of one token is far from its closest token, it can be said that the feature distribution of this token deviates significantly from that of all other tokens in the token set. The operation described in Eq. 5 is performed for \\( \\lfloor \\mu_{\\delta}N_l \\rfloor \\) times to select \\( \\lfloor \\mu_{\\delta}N_l \\rfloor \\) tokens from \\( \\mathbb{B} \\) into \\( \\mathbb{A} \\). The widest feature-distribution token expansion strategy ensures that the feature distributions of the selected token set become as wide as possible, preventing the loss of important tokens. However, as we need to iterate \\( \\lfloor \\mu_{\\delta}N_l \\rfloor \\) times expansion, it results in a considerable consumption of computational resources. Considering the computation parallelization, we modify the expanding operation in Eq. 5 parallelly:" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.566, + 0.364, + 0.579 + ], + "angle": 0, + "content": "\\[\n\\mathbb {A} = \\mathbb {A} + \\mathcal {S} ^ {*}, \\quad \\mathbb {B} = \\mathbb {B} - \\mathcal {S} ^ {*},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.122, + 0.577, + 0.47, + 0.597 + ], + "angle": 0, + "content": "\\[\n\\mathcal {S} ^ {*} = \\left\\{\\mathbb {B} _ {i} | i \\in \\operatorname {t o p k} _ {\\lfloor \\mu_ {\\delta} \\mathrm {N} _ {1 / \\mathrm {k}} \\rfloor} (d i s t a n c e (\\mathbb {B} \\rightarrow \\mathbb {A})) \\right\\}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.601, + 0.472, + 0.692 + ], + "angle": 0, + "content": "where \\( k \\) is the pre-defined repetition step of parallel expanding operation, \\( S^* \\) is a token set consisting of the important tokens in \\( \\mathbb{B} \\), \\( \\mathrm{topk}_n \\) denotes the top argmax with the number of \\( n \\) tokens. By this way, we only perform \\( k \\) times parallel expanding operation to expand \\( \\lfloor \\mu_{\\delta} N_l \\rfloor \\) tokens, and its computational consumption is negligible with small \\( k \\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.709, + 0.383, + 0.725 + ], + "angle": 0, + "content": "3.3.3 Feature-distribution Token Merging" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.732, + 0.471, + 0.807 + ], + "angle": 0, + "content": "After token expansion, we aim to retain the effective information of the unselected tokens, such that we merge the unselected tokens that the feature distributions are close to the selected ones. The feature-distribution token merging can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.1, + 0.811, + 0.43, + 0.828 + ], + "angle": 0, + "content": "\\[\n\\mathcal {S} _ {l} ^ {\\prime} = \\left\\{\\operatorname {m e a n} \\left(\\mathbb {A} _ {j}, \\mathcal {S} _ {j} ^ {(M)}\\right) | \\forall j \\in \\{1, 2, \\dots , | \\mathbb {A} | \\} \\right\\}, \\text {w h e r e}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.119, + 0.826, + 0.47, + 0.844 + ], + "angle": 0, + "content": "\\[\n\\mathcal {S} _ {i} ^ {\\left(\\hat {M}\\right)} = \\left\\{\\mathbb {B} _ {i} \\mid \\mathcal {I} _ {i} ^ {\\left(\\hat {M}\\right)} = = j, \\forall i \\in \\{1, 2, \\dots , | \\mathbb {B} | \\} \\right\\}, \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.843, + 0.358, + 0.86 + ], + "angle": 0, + "content": "\\[\n\\mathcal {I} ^ {(M)} = \\operatorname {a r g m i n} _ {j} (\\mathcal {D} (\\mathbb {B}, \\mathbb {A}) _ {i, j}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.866, + 0.471, + 0.904 + ], + "angle": 0, + "content": "where \\(S_{l}^{\\prime}\\in \\mathbb{R}^{|\\mathbb{A}|\\times d}\\) is the token set merging the closest tokens from \\(\\mathbb{B}\\) to \\(\\mathbb{A}\\), and mean \\((\\mathbb{A}_j,S_j^{(M)})\\) indicate that we" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.293, + 0.893, + 0.34 + ], + "angle": 0, + "content": "merge \\(\\mathbb{B}\\) into \\(\\mathbb{A}\\) averagely based on the index set \\(\\mathcal{I}^{(M)}\\in\\) \\(\\mathbb{R}^{\\left|\\mathbb{B}\\right|}\\). Note that every \\(\\mathbb{B}_i\\) participates in the merging to avoid the information dropping for the unselected tokens." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.36, + 0.698, + 0.375 + ], + "angle": 0, + "content": "3.3.4 Optimization of ToE" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.384, + 0.894, + 0.459 + ], + "angle": 0, + "content": "Our objective loss is the same as the original models, e.g., cross-entropy loss in DeiT. The training details of ToE are presented in Algorithm 1. Note that we only apply ToE to the output tokens of the first transformer block. The detailed analysis is discussed in Sec. 4.4." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.46, + 0.895, + 0.747 + ], + "angle": 0, + "content": "ToE is a plug-and-play acceleration module, which has three following advantages: (1) As shown in Fig. 2, we observed that the selected token set obtained by ToE in the multiple block outputs has a larger average distribution distance via T-SNE [44], compared to that in the original full-token set (see First block vs. After ToE). Moreover, it maintains a feature distribution similar to the original token set. It indicates ToE can preserve the integrity of the intermediate feature distribution of the original token set across different Transformer blocks by reducing the number of tokens. (2) ToE is a parameter-free module, it does not introduce any trainable parameters and utilizes efficient matrix calculations that the computational overhead is negligible, compared to computation-intensive self-attention. (3) The speedup factors (e.g., token kept rate \\( r_1 \\) and training stage \\( N_g \\)) of ToE are independent of the original model's training hyper-parameters. This decoupling allows ToE to be seamlessly integrated into the training process of the original model, obviating the need for any adjustments to the training hyper-parameters." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.762, + 0.634, + 0.779 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.787, + 0.707, + 0.803 + ], + "angle": 0, + "content": "4.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Datasets and baselines. We evaluate our method on ImageNet-1K [45] and CIFAR-10/100 [46]. For baselines, we use two popular ViTs, i.e., DeiT [4] and LV-ViT [5], as the base models to evaluate the proposed ToE on ImageNet-1K. To further evaluate the universality, we integrate ToE into the efficient training framework EfficientTrain [30]. Moreover," + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15787" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.088, + 0.095, + 0.335, + 0.11 + ], + "angle": 0, + "content": "Algorithm 1: Optimization with ToE" + }, + { + "type": "algorithm", + "bbox": [ + 0.082, + 0.112, + 0.47, + 0.54 + ], + "angle": 0, + "content": "Input: Input dataset \\(\\mathcal{X}\\) output token number \\(N_{l}\\) total training stage \\(N_{g}\\) kept rate of the first training stage \\(r_1\\) repetition step of the parallel expanding operation \\(k\\) Transformer parameters \\(\\theta\\) maximum iterations \\(T\\) Output: Updated Transformer parameters \\(\\theta\\) for \\(t\\gets 1\\) to \\(T\\) do Sample from \\(\\mathcal{X}\\) to obtain data sample \\(x\\) ,feed-forwarded through the embedding and first \\(l\\) -th transformer blocks to obtain the output token set \\(S_{l}\\) . \n3 \\(\\% \\% \\%\\) Spatial-distribution Token Initialization%% \n4 \\(r_0\\gets \\frac{1}{2} r_1\\) . \n5 Initialize A and B by \\(r_0,S_l\\) via Eq. 2; \n6 \\(\\% \\% \\%\\) Widest Feature-distribution Token Expansion%% \n7Obtain the current training stage \\(\\delta = \\lceil N_g*t / T\\rceil\\) . \n8 for \\(m\\gets 1\\) to \\(\\delta\\) do if \\(m = 1\\) then \\(\\mu_{m}\\leftarrow r_{1} - r_{0};\\) else \\(\\mu_{m}\\leftarrow \\frac{1 - r_{1}}{N_{g} - 1}\\) end for \\(n\\gets 1\\) to k do Update A and B by \\(\\mu_{m},N_{l},k,\\) prior A and prior B via Eq. 6; \nend \nend \n\\(\\% \\% \\%\\) Feature-distribution Token Merging%% \n9 Obtain \\(S_l^{\\prime}\\) by A and B via Eq. 7; \n\\(S_l^{\\prime}\\) feed-forwardsed through the \\(l + 1\\) -th transformer block to final layer and progressively obtain the final prediction y; \n\\(\\% \\% \\%\\) Parameter Updating%% \n2 Use \\(y\\) to compute the loss and obtain the gradient \\(\\nabla \\theta\\) . \n23 Use \\(\\nabla \\theta\\) to update prior \\(\\theta\\) via the optimizer to obtain new \\(\\theta\\) . \nend \nreturn \\(\\theta\\)" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.557, + 0.47, + 0.603 + ], + "angle": 0, + "content": "we evaluate the transfer learning ability using pre-trained weights of ToE on DeiT and the performance of accelerating the fine-tuning process with ToE on CIFAR-10/100." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.604, + 0.471, + 0.786 + ], + "angle": 0, + "content": "Evaluation metrics. We report Top-1 accuracy, the GPU training time and FLOPs as the evaluation metric. To evaluate the training speed, we report the total GPU hours consumed during the entire training process, as well as the theoretical FLOPs for one forward-backward process. To avoid the impact of memory access and kernel launching on training time [12], we report the GPU hours on different numbers of GPUs, but with the same GPU numbers to evaluate different training methods. The FLOPs for the forward process are measured using thop1, and for the backward process, we follow [43] and calculate it as twice the FLOPs of the forward process." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.787, + 0.473, + 0.849 + ], + "angle": 0, + "content": "Implementations. All methods are trained by Pytorch [47]. For DeiT and LV-ViT, all experiments are conducted on four NVIDIA RTX A6000 GPUs\\(^2\\), while EfficientTrain is trained on eight NVIDIA RTX A6000 GPUs." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.892, + 0.137 + ], + "angle": 0, + "content": "All hyper-parameters (e.g., learning rate, decay strategy and rate), and training strategies and optimization processes are the same as the original papers unless otherwise specified." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.138, + 0.894, + 0.26 + ], + "angle": 0, + "content": "Growth strategy. In default, we divide the origin training process into \\( N_{g} = 3 \\) stages on average. The token kept rate of 1st stage \\( r_1 \\) is set to 0.4, 0.5 or 0.6, our method is corresponding to be denoted as ToE \\( r_1 = 0.4 \\), ToE \\( r_1 = 0.5 \\) or ToE \\( r_1 = 0.6 \\). Correspondingly, the kept rate of the initial stage \\( r_0 \\) is set to 0.2, 0.25 and 0.3. The repetition step of parallel expanding operation \\( k \\) is default set to 2, and we perform ToE on the output tokens of the first block for all models." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.271, + 0.722, + 0.287 + ], + "angle": 0, + "content": "4.2. Results on ImageNet-1k" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.295, + 0.895, + 0.598 + ], + "angle": 0, + "content": "DeiT and LV-ViT As shown in Tab. 2, ToE achieves lossless training acceleration with SOTA performance. For example, \\(\\mathrm{ToE}_{r_1 = 0.5}\\) achieves \\(0.4\\%\\) Top-1 accuracy improvement with \\(1.27\\times\\) theoretical and \\(1.24\\times\\) practical faster speed to train DeiT-tiny. For DeiT-small, it achieves \\(1.3\\times\\) training acceleration without accuracy drop. Compared to the SOTA methods, \\(\\mathrm{ToE}_{r_1 = 0.5}\\) outperforms SViTE [10] and NetworkExpansion [12] at least \\(1\\%\\) Top-1 accuracy at the consistent acceleration ratio for training both DeiT-tiny and DeiT-small. Compared to ToMe [11], \\(\\mathrm{ToE}_{r_1 = 0.5}\\) also achieves both higher accuracy and practical training speed. Note that ToMe is able to reduce GFLOPs, but fails to accelerate training due to the usage of unfriendly weighted average attention and layer-wise merging operations. For DeiT-base, \\(\\mathrm{ToE}_{r_1 = 0.5}\\) drops only \\(0.2\\%\\) Top-1 accuracy while saving more than 60 GPU hours in the practical training process, which is comparable to NetworkExpansion with EMA. If we relax the restriction of hyper-parameter consistency (presented in Appendix), \\(\\mathrm{ToE}_{r_1 = 0.4}^{\\text{Hyper}}\\) outperforms NetworkExpansion with \\(0.2\\%\\) accuracy and 24h training time reduction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.892, + 0.734 + ], + "angle": 0, + "content": "For LV-ViT-T and LV-ViT-S shown in Tab. 3, \\(\\mathrm{ToE}_{r_1 = 0.4}\\) achieves efficient training with \\(1.2\\times\\) acceleration rate, while without accuracy drop or even with accuracy improvement for training LV-ViT-T, compared to baselines. Note that the results of \\(\\mathrm{ToE}_{r_1 = 0.4}\\) and NetworkExpansion are reported with EMA, due to the default LV-ViT training with EMA. In addition, \\(\\mathrm{ToE}_{r_1 = 0.4}\\) outperforms NetworkExpansion in both training acceleration and accuracy with 0.5h training time reduction and \\(0.6\\%\\) accuracy for LV-ViT-T, respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.895, + 0.901 + ], + "angle": 0, + "content": "We also present the validation Top-1 accuracy of ToE and NetworkExpansion during training DeiT-tiny and LV-ViT in Fig. 3. As observed, ToE initially reduces token redundancy during training, resulting in some performance drops compared to the baseline. However, in the later stages of training, ToE introduces more tokens for training, gradually reducing the accuracy gap to the baseline. Benefiting from the reduction of token redundancy in the early stages, models trained by ToE with the proposed token expansion and merging achieve higher accuracies, compared to baselines. Compared to NetworkExpansion, our ToE is more stable to" + }, + { + "type": "page_footnote", + "bbox": [ + 0.078, + 0.863, + 0.431, + 0.876 + ], + "angle": 0, + "content": "\\(^{1}\\)https://github.com/Lyken17/pytorch-OpCounter/blob/master/thop" + }, + { + "type": "page_footnote", + "bbox": [ + 0.08, + 0.876, + 0.469, + 0.9 + ], + "angle": 0, + "content": "2Note that the used number of GPUs for training may be different to the evaluation of training speedup for a fair comparison." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.863, + 0.469, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15788" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.092, + 0.895, + 0.129 + ], + "angle": 0, + "content": "Table 2. Performance comparison for DeiT on ImageNet-1K. a/b in the column of Top-1 Acc. means without/with EMA strategy using the official GitHub repo†. The training time is averagely measured on one/two/four NVIDIA RTX A6000 GPUs for DeiT-tiny/small/base 3 times, and the batch size is set to 1, 024 in all following tables and figures." + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.13, + 0.85, + 0.314 + ], + "angle": 0, + "content": "
ModelMethodConsistencyTop-1 Acc. (%)GFLOPs (per training iter)Training time (total GPU hours)Acceleration (practical rate)
Hyper?Architecture?Strategy?
DeiT-tinyBaseline [4]---72.23.3 × 10354.6h1.00×
(NeurIPS'21) S2ViTE-Tiny (600 epoch) [10]××70.1 (-2.1)2.5 × 103(1.32×)-1.19×
(ICLR'23) ToMeDeTrs→[11]×71.7 (-0.5)2.5 × 103(1.32×)53.3h1.02×
(CVPR'23) NetworkExpansion6→12 [12]×70.3 (-1.9) / 70.1 (-2.1)2.5 × 103(1.32×)43.2h1.26×
ToE r1=0.5 (Ours)72.6 (+0.4)2.6 × 103(1.27×)44.2h1.24×
DeiT-smallBaseline [4]---79.81.3 × 104124.5h1.00×
(ICLR'23) ToMeDeTrs→[11]×79.7 (-0.1)9.8 × 103(1.33×)121.5h1.02×
(CVPR'23) NetworkExpansion6→12 [12]×78.8 (-1.0) / 78.6 (-1.2)9.8 × 103(1.33×)100.3h1.24×
ToE r1=0.5 (Ours)79.8 (+0.0)1.0 × 104(1.30×)102.2h1.22×
DeiT-baseBaseline [4]---81.85.2 × 104292.8h1.00×
(ICML'19) StackBERT [13]×80.8 (-1.0)4.2 × 104(1.24×)231.6h1.26×
(CVPR'23) NetworkExpansion6→12 [12]×81.0 (-0.8) / 81.5 (-0.3)3.9 × 104(1.33×)226.8h1.29×
ToE r1=0.5 (Ours)81.6 (-0.2)4.0 × 104(1.30×)231.2h1.27×
ToE r1=0.4 (Ours)81.4 (-0.4)3.8 × 104(1.37×)225.2h1.30×
ToE Hyper r1=0.5 (Ours)×81.8 (+0.0)3.6 × 104(1.44×)213.2h1.37×
ToE Hyper r1=0.4 (Ours)×81.7 (-0.1)3.3 × 104(1.58×)202.8h1.44×
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.127, + 0.314, + 0.55, + 0.325 + ], + "angle": 0, + "content": "\\(\\dagger\\) https://github.com/huawei-noah/Efficient-Computing/tree/master/TrainingAcceleration/NetworkExpansion" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.331, + 0.471, + 0.379 + ], + "angle": 0, + "content": "Table 3. Performance comparison for LV-ViT on ImageNet-1K. \\(\\ddagger\\) indicates that results reproduced by the official GitHub repo. The training time is averagely measured on two/four NVIDIA RTX A6000 GPUs 3 times for LV-ViT-T/S with a fixed batch size of 1, 024." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.381, + 0.465, + 0.478 + ], + "angle": 0, + "content": "
ModelMethodTop-1 Acc. (%)GFLOPs (per training iter)Training time (total GPU hours)
LV-ViT-TBaseline [5]79.18.2 × 103130.5h
(CVPR'23) NetworkExpansion6→12 [12][78.8 (-0.3)]7.1 × 103(1.15×)114.4h (1.14×)
ToERr1=0.4 (Ours)79.4 (+0.3)7.0 × 103(1.17×)113.9h (1.15×)
LV-ViT-SBaseline [5]83.31.9 × 104237.3h
(CVPR'23) NetworkExpansion8→16 [12][82.9 (-0.4)]1.5 × 104(1.27×)195.5h (1.21×)
ToERr1=0.4 (Ours)83.3 (+0.0)1.4 × 104(1.36×)195.3h (1.22×)
LV-ViT-MBaseline [5]84.13.7 × 104368.7h
(CVPR'23) NetworkExpansion10→20 [12]84.0 (-0.1)2.9 × 104(1.28×)292.7h (1.26×)
ToERr1=0.4 (Ours)84.1 (+0.0)2.7 × 104(1.37×)292.5h (1.26×)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.487, + 0.47, + 0.514 + ], + "angle": 0, + "content": "Table 4. Performance comparison between EfficientTrain [30] and our combination framework on ImageNet-1K." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.515, + 0.465, + 0.589 + ], + "angle": 0, + "content": "
ModelMethodTop-1 Acc. (%)GFLOPs (per training iter)Training time (total GPU hours)
DeiT-tinyBaseline (EfficientTrain) [30]72.51.3 × 10452.5h
(ICCV'23) EfficientTrain [30]73.3 (+0.8)8.8 × 103(1.48×)36.5h (1.44×)
EfficientTrain + ToE r1=0.6 (Ours)73.5 (+1.0)7.6 × 103(1.71×)32.3h (1.63×)
DeiT-smallBaseline (EfficientTrain) [30]80.35.2 × 104121.3h
(ICCV'23) EfficientTrain [30]80.4 (+0.1)3.4 × 103(1.53×)85.2h (1.42×)
EfficientTrain + ToE r1=0.6 (Ours)80.4 (+0.1)2.9 × 104(1.79×)79.4h (1.53×)
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.47, + 0.689 + ], + "angle": 0, + "content": "train with consistent accuracy improvement during training, while the accuracy of NetworkExpansion with EMA drops significantly at the intermediate epoch number and then restores due to the inconsistent structures of before-and-after models when structure growing. More validation curves are presented in the Appendix." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.471, + 0.826 + ], + "angle": 0, + "content": "Combination with EfficientTrain [30]. ToE can be seamlessly integrated into the EfficientTrain framework to further improve the performance. We do not modify the pipeline of EfficientTrain and simply apply ToE to the output tokens of the model's first block. The results are summarized in Tab. 4, which effectively evaluates the universality of ToE. The combination of EfficientTrain and ToE achieves higher training speeds to further enhance the training efficiency of EfficientTrain with accuracy improvement." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.833, + 0.381, + 0.848 + ], + "angle": 0, + "content": "4.3. Transfer Results on CIFAR-10/100" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.473, + 0.901 + ], + "angle": 0, + "content": "we further explore the transfer learning ability of ToE-pre-trained weights and evaluate whether ToE can be used to accelerate the fine-tuning on CIFAR-10/100. For the fine" + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.338, + 0.693, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.693, + 0.338, + 0.856, + 0.465 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.469, + 0.892, + 0.494 + ], + "angle": 0, + "content": "Figure 3. Validation Top-1 accuracy of DeiT-tiny and LV-ViT-T on ImageNet-1k during training with different methods." + }, + { + "type": "table_caption", + "bbox": [ + 0.547, + 0.508, + 0.844, + 0.52 + ], + "angle": 0, + "content": "Table 5. Results for fine-tuning DeiT on CIFAR-10/100." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.522, + 0.888, + 0.619 + ], + "angle": 0, + "content": "
ModelPre-trainingFine-tuningTop-1 Acc. (%)
MethodAccelerationMethodAccelerationCIFAR-10CIFAR-100
DeiT-tinyBaseline [4]1.0xBaseline [4]1.0x98.0786.78
Baseline [4]1.0xToE\\( r_1=0.5 \\)1.3x98.10 (+0.03)86.74 (-0.04)
ToE\\( r_1=0.5 \\)1.3xBaseline [4]1.0x98.19 (+0.12)87.10 (+0.32)
ToE\\( r_1=0.5 \\)1.3xToE\\( r_1=0.5 \\)1.3x98.16 (+0.09)86.91 (+0.13)
DeiT-smallBaseline [4]1.0xBaseline [4]1.0x98.9390.15
Baseline [4]1.3xToE\\( r_1=0.5 \\)1.3x98.96 (+0.03)90.19 (+0.04)
ToE\\( r_1=0.5 \\)1.3xBaseline [4]1.0x99.03 (+0.10)90.37 (+0.22)
ToE\\( r_1=0.5 \\)1.3xToE\\( r_1=0.5 \\)1.3x98.99 (+0.06)90.26 (+0.11)
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.628, + 0.892, + 0.659 + ], + "angle": 0, + "content": "tuning settings, we follow the settings of the official GitHub repo \\(^{3}\\). We introduce the training details in the Appendix." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.895, + 0.872 + ], + "angle": 0, + "content": "As shown in Tab. 5, pre-training weights by ToE is able to improve the accuracy on CIFAR-10/100 for DeiT-tiny/small, when using the same baseline training for fine-tuning (see the 1st and 3rd rows in both DeiT-tiny and DeiT-small). For example, ToE pre-training outperforms baseline pretraining by \\(0.32\\%\\) accuracy on CIFAR-100, which evaluates the strong transfer ability of ToE. In addition, our ToE is also effective and efficient for fine-tuning (see the 1st and 2nd rows in DeiT-tiny/small). ToE achieves \\(1.3 \\times\\) acceleration for fine-tuning DeiT-tiny with 0.03 accuracy improvement on CIFAR-10. Further, we employ ToE for both pre-training and fine-tuning, which significantly accelerates the training with an accuracy improvement of at least \\(0.06\\%\\) on CIFAR-10 for both DeiT-tiny/small, compared to that using both baselines." + }, + { + "type": "page_footnote", + "bbox": [ + 0.501, + 0.887, + 0.724, + 0.901 + ], + "angle": 0, + "content": "3https://github.com/facebookresearch/deit" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15789" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.092, + 0.472, + 0.141 + ], + "angle": 0, + "content": "Table 6. Ablation studies of different speedup factors for DeiT-tiny on ImageNet-1K. The default \\( r_0 / r_1 \\), \\( N_g \\) and \\( k \\) are set to 1/2, 3 and 2, respectively. All results in this table have almost the same training speeds for 44h training (total GPU hours)." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.143, + 0.462, + 0.167 + ], + "angle": 0, + "content": "
DeiT-tinyFactorsr0/r1=1/3r0/r1=2/3Ng=2Ng=4k=1k=3default
Top-1 Acc. (%)72.372.572.472.572.572.672.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.177, + 0.469, + 0.202 + ], + "angle": 0, + "content": "Table 7. Effect of \"initialization-expansion-merge\" pipeline for DeiT on ImageNet-1K. \\(\\pm\\) indicates we conduct 3 runs to calculate the mean and std." + }, + { + "type": "table", + "bbox": [ + 0.137, + 0.204, + 0.408, + 0.269 + ], + "angle": 0, + "content": "
InitializationExpansionMergeTop-1 Acc. (%)
RandomSpatialDeiT-tinyDeiT-small
×72.679.8
×72.3±0.279.7±0.1
××71.279.1
××71.779.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.279, + 0.469, + 0.304 + ], + "angle": 0, + "content": "Table 8. Results of applying ToE to different early transformer block's output tokens for DeiT-tiny on ImageNet-1K." + }, + { + "type": "table", + "bbox": [ + 0.137, + 0.305, + 0.408, + 0.364 + ], + "angle": 0, + "content": "
BlockTop-1 Acc. (%) DeiT-tinyGFLOPs (per training iter)Training time (total GPU hours)
Embedding72.12.51 × 10343.5h
First block72.62.58 × 10344.2h
Second block72.22.65 × 10345.2h
Third block72.12.71 × 10346.9h
" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.374, + 0.231, + 0.39 + ], + "angle": 0, + "content": "4.4. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.399, + 0.471, + 0.505 + ], + "angle": 0, + "content": "Effect of speedup factors in ToE. As presented in Tab. 6, we verify the sensitivity of the speedup factors mentioned in Sec. 3.3, such as the ratio of \\( r_0 / r_1 \\), training stages \\( N_g \\) and parallel expanding operation \\( k \\). At almost the same training time, ToE is relatively insensitive to these factors, w.r.t accuracy. It allows ToE to be easily integrated into the different models' training pipeline with minimal factor adjustments." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.506, + 0.47, + 0.612 + ], + "angle": 0, + "content": "We further adjust the keep rate of the first stage \\( r_1 \\) to control the training speed, and the relationship between \\( r_1 \\) and training speed is illustrated in Fig. 4. We found ToE achieves more than \\( 1.3 \\times \\) acceleration on DeiT-tiny without accuracy dropping. Additionally, it also demonstrates that reducing token redundancy in the early stages of training sometimes improves the model performance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.613, + 0.471, + 0.765 + ], + "angle": 0, + "content": "Effect of \"Initialization-expansion-merging\". Tab. 7 provides an analysis of the necessity of each step in the proposed \"initialization-expansion-merging\" pipeline. When we randomly select tokens as the initial token set rather than spatial-distribution token initialization, it leads to performance degradation. Furthermore, removing widest feature-distribution token expansion and feature-distribution token merging from the pipeline significantly decreases the accuracy, e.g., more than \\(0.9\\%\\) and \\(1.4\\%\\) accuracy drops without the merging and expansion for DeiT-tiny, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Where to apply ToE. Work in [32, 48] demonstrates that class attention tends to be a global pooling as more attention operations are performed, and tokens in early blocks are more similar. This leads to more redundancy in tokens from early blocks. Consequently, applying ToE to the output tokens of early blocks can achieve higher acceleration. As shown in Tab. 8, we default apply ToE to the output tokens of the first block, which achieves the best trade-off between accuracy and training speed, compared to other early blocks." + }, + { + "type": "image", + "bbox": [ + 0.543, + 0.094, + 0.848, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.251, + 0.892, + 0.276 + ], + "angle": 0, + "content": "Figure 4. Trade-off between acceleration ratio and model performance by setting different \\( r_1 \\)." + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.29, + 0.892, + 0.315 + ], + "angle": 0, + "content": "Table 9. Results of different feature-distribution distances in Eq. 3 for DeiT on ImageNet-1K." + }, + { + "type": "table", + "bbox": [ + 0.589, + 0.316, + 0.801, + 0.38 + ], + "angle": 0, + "content": "
MeasureTop-1 Acc. (%)
DeiT-tinyDeiT-small
Manhattan Distance69.878.0
Euclidean Distance70.678.4
Cosine Distance72.679.8
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.389, + 0.894, + 0.48 + ], + "angle": 0, + "content": "Effect of the feature-distribution distance. We explore the metric that measures the feature-distribution distance between two tokens in Eq. 3. As shown in Tab. 9, we use three different metrics: Manhattan distance, Euclidean distance, and Cosine distance. We observe that Cosine distance achieves the best performance as the distance metric." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.498, + 0.619, + 0.513 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.524, + 0.895, + 0.706 + ], + "angle": 0, + "content": "In this paper, we proposed a novel token growth scheme Token Expansion (ToE) to achieve consistent training acceleration for ViTs. ToE introduce an \"initialization-expansion-merging\" pipeline to maintain the integrity of the intermediate feature distribution of original transformers, preventing the loss of crucial learnable information in the training process. In experiments, ToE can be seamlessly integrated into the training of various transformers and efficient training frameworks in a lossless manner or even accuracy improvement, compared to the entire full-token training. These experimental results of ToE also demonstrate the superior performance gains over the SOTA methods." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.724, + 0.667, + 0.74 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.895, + 0.901 + ], + "angle": 0, + "content": "This work is supported by the National Natural Science Foundation of China (NO. 62102151), the National Key Research and Development Program of China (No. 2023YFC3306401), Shanghai Sailing Program (21YF1411200), Shanghai Science and Technology Commission (22511104600), CCF-Tencent Rhino-Bird Open Research Fund, the Open Research Fund of Key Laboratory of Advanced Theory and Application in Statistics and Data Science, Ministry of Education (KLATASDS2305), the Fundamental Research Funds for the Central Universities." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15790" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.472, + 0.157 + ], + "angle": 0, + "content": "[1] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. NeurIPS, 30, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.472, + 0.213 + ], + "angle": 0, + "content": "[2] Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In NAACL, pages 4171–4186, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.216, + 0.472, + 0.284 + ], + "angle": 0, + "content": "[3] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. NeurIPS, 33:1877-1901, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.286, + 0.472, + 0.354 + ], + "angle": 0, + "content": "[4] Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In ICLR, pages 10347-10357. PMLR, 2021. 1, 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.357, + 0.472, + 0.412 + ], + "angle": 0, + "content": "[5] Zi-Hang Jiang, Qibin Hou, Li Yuan, Daquan Zhou, Yujun Shi, Xiaojie Jin, Anran Wang, and Jiashi Feng. All tokens matter: Token labeling for training better vision transformers. NeurIPS, 34:18590-18602, 2021. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.415, + 0.472, + 0.47 + ], + "angle": 0, + "content": "[6] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, pages 213-229. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.472, + 0.472, + 0.526 + ], + "angle": 0, + "content": "[7] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. Segformer: Simple and efficient design for semantic segmentation with transformers. NeurIPS, 34:12077-12090, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.529, + 0.472, + 0.598 + ], + "angle": 0, + "content": "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.6, + 0.472, + 0.64 + ], + "angle": 0, + "content": "[9] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, pages 770-778, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.643, + 0.472, + 0.697 + ], + "angle": 0, + "content": "[10] Tianlong Chen, Yu Cheng, Zhe Gan, Lu Yuan, Lei Zhang, and Zhangyang Wang. Chasing sparsity in vision transformers: An end-to-end exploration. NeurIPS, 34:19974-19988, 2021. 1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.7, + 0.472, + 0.741 + ], + "angle": 0, + "content": "[11] Daniel Bolya, Cheng-Yang Fu, Xiaoliang Dai, Peizhao Zhang, Christoph Feichtenhofer, and Judy Hoffman. Token merging: Your vit but faster. In ICLR, 2022. 1, 2, 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.743, + 0.472, + 0.785 + ], + "angle": 0, + "content": "[12] Ning Ding, Yehui Tang, Kai Han, Chao Xu, and Yunhe Wang. Network expansion for practical training acceleration. In CVPR, pages 20269-20279, 2023. 1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.787, + 0.472, + 0.828 + ], + "angle": 0, + "content": "[13] Linyuan Gong, Di He, Zhuohan Li, Tao Qin, Liwei Wang, and Tieyan Liu. Efficient training of bert by progressively stacking. In ICML, pages 2337-2346. PMLR, 2019. 1, 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.83, + 0.472, + 0.884 + ], + "angle": 0, + "content": "[14] Huanrui Yang, Hongxu Yin, Maying Shen, Pavlo Molchanov, Hai Li, and Jan Kautz. Global vision transformer pruning with hessian-aware saliency. In CVPR, pages 18547-18557, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.887, + 0.472, + 0.901 + ], + "angle": 0, + "content": "[15] Fang Yu, Kun Huang, Meng Wang, Yuan Cheng, Wei Chu," + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.894, + 0.12 + ], + "angle": 0, + "content": "and Li Cui. Width & depth pruning for vision transformers. In AAAI, volume 36, pages 3143-3151, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.894, + 0.163 + ], + "angle": 0, + "content": "[16] François Lagunas, Ella Charlaix, Victor Sanh, and Alexander M Rush. Block pruning for faster transformers. In EMNLP, pages 10619-10629, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.165, + 0.893, + 0.205 + ], + "angle": 0, + "content": "[17] Mengzhou Xia, Zexuan Zhong, and Danqi Chen. Structured pruning learns compact and accurate models. In ACL, pages 1513-1528, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.208, + 0.894, + 0.261 + ], + "angle": 0, + "content": "[18] Yongming Rao, Wenliang Zhao, Benlin Liu, Jiwen Lu, Jie Zhou, and Cho-Jui Hsieh. Dynamicvit: Efficient vision transformers with dynamic token sparsification. NeurIPS, 34:13937-13949, 2021. 1, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.264, + 0.894, + 0.319 + ], + "angle": 0, + "content": "[19] Lingchen Meng, Hengduo Li, Bor-Chun Chen, Shiyi Lan, Zuxuan Wu, Yu-Gang Jiang, and Ser-Nam Lim. Adavit: Adaptive vision transformers for efficient image recognition. In CVPR, pages 12309-12318, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.321, + 0.893, + 0.39 + ], + "angle": 0, + "content": "[20] Mohsen Fayyaz, Soroush Abbasi Koohpayegani, Farnoush Rezaei Jafari, Sunando Sengupta, Hamid Reza Vaezi Joze, Eric Sommerlade, Hamed Pirsiavash, and Jürgen Gall. Adaptive token sampling for efficient vision transformers. In ECCV, pages 396-414. Springer, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.392, + 0.893, + 0.46 + ], + "angle": 0, + "content": "[21] Zhenglun Kong, Peiyan Dong, Xiaolong Ma, Xin Meng, Wei Niu, Mengshu Sun, Xuan Shen, Geng Yuan, Bin Ren, Hao Tang, et al. Spvit: Enabling faster vision transformers via latency-aware soft token pruning. In ECCV, pages 620-640. Springer, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.462, + 0.893, + 0.516 + ], + "angle": 0, + "content": "[22] Hongxu Yin, Arash Vahdat, Jose M Alvarez, Arun Mallya, Jan Kautz, and Pavlo Molchanov. A-vit: Adaptive tokens for efficient vision transformer. In CVPR, pages 10809-10818, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.519, + 0.893, + 0.572 + ], + "angle": 0, + "content": "[23] Sheng Xu, Yanjing Li, Mingbao Lin, Peng Gao, Guodong Guo, Jinhu Lu, and Baochang Zhang. Q-detr: An efficient low-bit quantized detection transformer. In CVPR, pages 3842-3851, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.575, + 0.893, + 0.616 + ], + "angle": 0, + "content": "[24] Yanjing Li, Sheng Xu, Baochang Zhang, Xianbin Cao, Peng Gao, and Guodong Guo. Q-vit: Accurate and fully quantized low-bit vision transformer. NeurIPS, 35:34451-34463, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.618, + 0.893, + 0.659 + ], + "angle": 0, + "content": "[25] Yefei He, Zhenyu Lou, Luoming Zhang, Jing Liu, Weijia Wu, Hong Zhou, and Bohan Zhuang. Bivit: Extremely compressed binary vision transformers. In ICCV, pages 5651-5663, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.661, + 0.893, + 0.702 + ], + "angle": 0, + "content": "[26] Phuoc-Hoan Charles Le and Xinlin Li. Binaryvit: Pushing binary vision transformers towards convolutional models. In CVPR, pages 4664-4673, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.704, + 0.894, + 0.758 + ], + "angle": 0, + "content": "[27] Cheng Chen, Yichun Yin, Lifeng Shang, Xin Jiang, Yujia Qin, Fengyu Wang, Zhi Wang, Xiao Chen, Zhiyuan Liu, and Qun Liu. bert2bert: Towards reusable pretrained language models. In ACL, pages 2134-2148, 2022. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.76, + 0.894, + 0.8 + ], + "angle": 0, + "content": "[28] Xin Yuan, Pedro Savarese, and Michael Maire. Growing efficient deep networks by structured continuous sparsification. In ICLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.803, + 0.893, + 0.844 + ], + "angle": 0, + "content": "[29] Wei Wen, Feng Yan, Yiran Chen, and Hai Li. Autogrow: Automatic layer growing in deep convolutional networks. In KDD, pages 833-841, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[30] Yulin Wang, Yang Yue, Rui Lu, Tianjiao Liu, Zhao Zhong, Shiji Song, and Gao Huang. Efficienttrain: Exploring generalized curriculum learning for training visual backbones. In ICCV, pages 5852-5864, 2023. 2, 3, 5, 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "15791" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[31] Changlin Li, Bohan Zhuang, Guangrun Wang, Xiaodan Liang, Xiaojun Chang, and Yi Yang. Automated progressive learning for efficient training of vision transformers. In CVPR, pages 12486-12496, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.471, + 0.175 + ], + "angle": 0, + "content": "[32] Xuran Pan, Xuan Jin, Yuan He, Shiji Song, Gao Huang, et al. Budgeted training for vision transformer. In ICLR, 2022. 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.178, + 0.469, + 0.232 + ], + "angle": 0, + "content": "[33] Katherine Lee, Daphne Ippolito, Andrew Nystrom, Chiyuan Zhang, Douglas Eck, Chris Callison-Burch, and Nicholas Carlini. Deduplicating training data makes language models better. In ACL, pages 8424-8445, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.233, + 0.469, + 0.272 + ], + "angle": 0, + "content": "[34] Mingxing Tan and Quoc Le. Efficientnetv2: Smaller models and faster training. In ICLR, pages 10096-10106. PMLR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.274, + 0.469, + 0.315 + ], + "angle": 0, + "content": "[35] Bradley McDanel and Chi Phuong Huynh. Accelerating vision transformer training via a patch sampling schedule. arXiv preprint arXiv:2208.09520, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.317, + 0.469, + 0.37 + ], + "angle": 0, + "content": "[36] Li Shen, Yan Sun, Zhiyuan Yu, Liang Ding, Xinmei Tian, and Dacheng Tao. On efficient training of large-scale deep learning models: A literature review. arXiv preprint arXiv:2304.03589, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.373, + 0.469, + 0.412 + ], + "angle": 0, + "content": "[37] Yuedong Yang, Guihong Li, and Radu Marculescu. Efficient on-device training via gradient filtering. In CVPR, pages 3811-3820, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.414, + 0.469, + 0.468 + ], + "angle": 0, + "content": "[38] Xucheng Ye, Pengcheng Dai, Junyu Luo, Xin Guo, Yingjie Qi, Jianlei Yang, and Yiran Chen. Accelerating cnn training by pruning activation gradients. In ECCV, pages 322-338. Springer, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.47, + 0.469, + 0.536 + ], + "angle": 0, + "content": "[39] Yonggan Fu, Haoran You, Yang Zhao, Yue Wang, Chaojian Li, Kailash Gopalakrishnan, Zhangyang Wang, and Yingyan Lin. Fractrain: Fractionally squeezing bit savings both temporally and spatially for efficient dnn training. NeurIPS, 33:12127-12139, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.539, + 0.469, + 0.592 + ], + "angle": 0, + "content": "[40] Yue Wang, Ziyu Jiang, Xiaohan Chen, Pengfei Xu, Yang Zhao, Yingyan Lin, and Zhangyang Wang. E2-train: Training state-of-the-art cnns with over \\(80\\%\\) energy savings. NeurIPS, 32, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.595, + 0.469, + 0.635 + ], + "angle": 0, + "content": "[41] Mengtian Li, Ersin Yumer, and Deva Ramanan. Budgeted training: Rethinking deep neural network training under resource constraints. In ICLR, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.637, + 0.469, + 0.676 + ], + "angle": 0, + "content": "[42] Jiong Zhang, Hsiang-Fu Yu, and Inderjit S Dhillon. Autoassist: A framework to accelerate training of deep neural networks. NeurIPS, 32, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.679, + 0.469, + 0.733 + ], + "angle": 0, + "content": "[43] Marius Hobbhahn and Jaime Sevilla. What's the backward-forward flop ratio for neural networks? https://epochai.org/blog/backward-forward-FLOP-ratio, 2021. Accessed: 2023-9-28. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.735, + 0.469, + 0.761 + ], + "angle": 0, + "content": "[44] Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. JMLR, 9:2579-2605, 2008. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.763, + 0.469, + 0.803 + ], + "angle": 0, + "content": "[45] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, pages 248-255. IEEE, 2009. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.805, + 0.469, + 0.831 + ], + "angle": 0, + "content": "[46] Alex Krizhevsky et al. Learning multiple layers of features from tiny images. 2009. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.833, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[47] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. NeurIPS, 32, 2019. 6" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.147 + ], + "angle": 0, + "content": "[48] Maithra Raghu, Thomas Unterthiner, Simon Kornblith, Chiyuan Zhang, and Alexey Dosovitskiy. Do vision transformers see like convolutional neural networks? NeurIPS, 34:12116-12128, 2021. 8" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15792" + } + ] +] \ No newline at end of file diff --git a/2024/A General and Efficient Training for Transformer via Token Expansion/6b319bca-e10d-4650-be77-29bcc4ffd8dd_origin.pdf b/2024/A General and Efficient Training for Transformer via Token Expansion/6b319bca-e10d-4650-be77-29bcc4ffd8dd_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..73f938b3bae6d49ebdc9a7117c48d973c82e4e0d --- /dev/null +++ b/2024/A General and Efficient Training for Transformer via Token Expansion/6b319bca-e10d-4650-be77-29bcc4ffd8dd_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ee611cc3413912fc1588c45d367df12d6dcd0debe3f77fa7f355f713f799814 +size 10433586 diff --git a/2024/A General and Efficient Training for Transformer via Token Expansion/full.md b/2024/A General and Efficient Training for Transformer via Token Expansion/full.md new file mode 100644 index 0000000000000000000000000000000000000000..afce6e7f082696937fd1329df720be73ef1d4fa1 --- /dev/null +++ b/2024/A General and Efficient Training for Transformer via Token Expansion/full.md @@ -0,0 +1,356 @@ +# A General and Efficient Training for Transformer via Token Expansion + +Wenxuan Huang $^{1*}$ Yunhang Shen $^{2*}$ Jiao Xie $^{3}$ Baochang Zhang $^{4}$ Gaoqi He $^{1}$ Ke Li $^{2}$ Xing Sun $^{2}$ Shaohui Lin $^{1,5\boxtimes}$ + +$^{1}$ School of Computer Science and Technology, East China Normal University, Shanghai, China +$^{2}$ Tencent Youtu Lab, China $^{3}$ Xiamen University, China $^{4}$ Beihang University, China + +$^{5}$ Key Laboratory of Advanced Theory and Application in Statistics and Data Science - MOE, China + +osilly0616@gmail.com, shenyunhang01@gmail.com, jiaoxiel990@126.com, bczhang@buaa.edu.cn +gqhe@cs.ecnu.edu.cn, tristanli.sh@gmail.com, winfred.sun@gmail.com, shaohuilin007@gmail.com + +# Abstract + +The remarkable performance of Vision Transformers (ViTs) typically requires an extremely large training cost. Existing methods have attempted to accelerate the training of ViTs, yet typically disregard method universality with accuracy dropping. Meanwhile, they break the training consistency of the original transformers, including the consistency of hyper-parameters, architecture, and strategy, which prevents them from being widely applied to different Transformer networks. In this paper, we propose a novel token growth scheme Token Expansion (termed ToE) to achieve consistent training acceleration for ViTs. We introduce an "initialization-expansion-merging" pipeline to maintain the integrity of the intermediate feature distribution of original transformers, preventing the loss of crucial learnable information in the training process. ToE can not only be seamlessly integrated into the training and fine-tuning process of transformers (e.g., DeiT and LV-ViT), but also effective for efficient training frameworks (e.g., EfficientTrain), without twisting the original training hyperparameters, architecture, and introducing additional training strategies. Extensive experiments demonstrate that ToE achieves about $1.3 \times$ faster for the training of ViTs in a lossless manner, or even with performance gains over the full-token training baselines. Code is available at https://github.com/Osilly-TokenExpansion. + +# 1. Introduction + +Transformers have achieved excellent performance in the tasks of natural language processing (NLP) [1-3] and computer vision [4-7]. Despite their great success, modern Transformer models typically require extremely large parameters and computation consumption due to the quadratic com + +Table 1. Training results for DeiT [4] on ImageNet-1K. DeiT does not use the EMA strategy by default. a/b in the column of Top-1 Acc. means without/with EMA strategy using the official GitHub repo. The training time is averagely measured on one/four NVIDIA RTX A6000 GPUs 3 times with a batch size of 1, 024 for DeiT-Tiny/Base, respectively. + +
ModelMethodTraining consistencyTop-1 Acc. (%)Training time (GPU hours)
HyperArchStrategy
TinyBaseline [4]---72.254.6h
S2ViTE (600 epoch) [10]××70.1 (-2.1)-
ToMeRDS→[11]×71.7 (-0.5)53.3h
NetworkExpansion6→12 [12]×70.3 (-1.9) / 70.1 (-2.1)43.2h
ToERi=0.5 (Ours)72.6 (+0.4)44.2h
BaseBaseline [4]---81.8292.8h
StackBERT [13]×80.8 (-1.0)231.6h
NetworkExpansion6→12 [12]×81.0 (-0.8) / 81.5 (-0.3)226.8h
ToERi=0.5 (Ours)81.6 (-0.2)231.2h
+ +putational complexity in the self-attention module. For example, ViT-H/14 [8] requires $\sim 1,000$ FLOPs, which is $250 \times$ larger than ResNet-50 [9]. The entire training process needs a significant amount of computing resources to reach model convergence, resulting in a substantial computation overhead. To reduce the computational cost of large models, there has been growing research attention on accelerating Transformers for either training or inference. + +Existing Transformer pruning methods [14-22] aim to reduce the inference complexity. Among them, structure pruning [14-17] and token pruning [18-22] focus on reducing the neurons or tokens of Transformers to accelerate the inference. However, these pruning methods require additional training computational cost in each forward-backward iteration to determine which neurons or tokens are important enough to be retained, or the fine-tuning for pruned models. Recently, Transformer quantization [23-26] accelerates the inference via low-bit computation, but they also cannot reduce the training computation cost. Thus, it is challenging for them to effectively accelerate the training of Transformers in practical scenarios, e.g., cloud service. + +To reduce the training computation overhead, recent works [12, 13, 27-29] have proposed structure growth methods. They update a smaller number of model parameters during the early stages of training and gradually increase + +![](images/8378b2f501e358b565ceaaac602632602be08ca09bdf943a4a83a123d5773ebc.jpg) +Figure 1. The "initialization-expansion-merging" pipeline of proposed ToE. We take the 1st training stage $(\delta = 1)$ , the kept rate $r_1 = 2r_0 = \frac{2}{3}$ , the repetition step $k = 1$ as example. ToE is only added after the first Transformer block to guide the token selection and usage. During training, steps (1), (2), and (3) are performed for each iteration with the reduction of token numbers. First, seed tokens are selected for token initialization through step (1). Then, the number of tokens is expanded via step (2) for token expansion. Finally, we merge the unselected token set (blue boxes) into the selected one (red boxes) with the close feature distributions in step (3) for token merging. During testing, ToE can be safely removed to generate the same Transformer architecture as the original full-token Transformer. + +the number of parameters involved in the updating process as training progresses. However, the existing methods fail to achieve general transformer training acceleration without accuracy dropping (shown in Tab. 1), and they break the training consistency of the original transformers from three perspectives: (1) Hyper-parameter consistency. Existing methods (e.g., SViTE [10]) delicately tune training hyperparameters (e.g., learning rate and epoch number) of the original models, which are sensitive to individual ViTs [4] and require additional trial-and-error costs for different networks. (2) Architecture consistency. Existing methods [10, 11] alter the final model architectures, which may deviate from the user's requirements and potentially necessitates additional hardware/software support to implement real training speedup. For example, ToMe [11] progressively merges similar tokens layer-by-layer to reduce the number of tokens in ViTs during training, which replaces the attention operators with the weighted average attention modules, generating a different model architecture that deviates from the original Transformer. Moreover, it cannot significantly accelerate the practical training due to the unfriendly computation. (3) Strategy consistency. Existing methods [12, 13, 27] may suffer from performance deterioration across different Transformers by adding additional training strategies, such as EMA and reset optimizer states. It means the effectiveness of these strategies is for specific models, which limits the method's universality whether employing them for training. In Tab. 1, the extra EMA strategy in [12] plays different roles to the performance across different models, i.e., the effectiveness for DeiT-base but not for DeiT-tiny. Thus, this begs our rethinkings: How to implement real and friendly training speedup for Transformers while keeping the training consistency and high accuracy? + +To answer the above question, we propose a novel token growth scheme, Token Expansion (termed ToE) to achieve general training acceleration for ViTs, while adhering to + +the training consistency of original models. Specifically, we present an "initialization-expansion-merging" pipeline (in Fig. 1) to maintain the integrity of the intermediate feature distribution of original transformers, preventing the loss of crucial learnable information during the accelerated training process. Similar to structure growth methods, we initially involve a limited number of tokens to participate in training and gradually grow the token number during training progress, eventually reaching the utilization of the entire token set. Then, a widest feature-distribution token expansion is introduced to make the feature distributions of the selected token set as wide as possible. Additionally, a feature-distribution token merging combines the tokens with close feature distributions to further avoid information loss. ToE not only accelerates the training and fine-tuning process of popular Transformers in a lossless manner or even with performance improvement, but also can be integrated into the existing efficient training frameworks (e.g., EfficientTrain [30]) for further performance improvement, without twisting the original training hyper-parameters, architecture, and introducing additional training strategies. + +Our main contributions can be summarized as follows: + +- We propose ToE, a novel token growth scheme to accelerate ViTs from the perspective of tokens. ToE is a consistent training acceleration method and can be seamlessly integrated into the training and fine-tuning process of transformers without any modifications to the original training hyper-parameters, architecture, and strategies. +- We propose an effective "initialization-expansion-merging" framework to avoid the token information loss by maintaining the integrity of the intermediate feature distribution. +- Extensive experiments demonstrate that ToE accelerates the training and fine-tuning process of ViTs with a negligible accuracy drop or even surpassing the original full-token counterparts, which outperforms previous SOTA methods. + +# 2. Related Work + +# 2.1. Training Acceleration for Transformers + +As mentioned above, many existing works focus on accelerating the training of transformers from the perspective of structural parameters. These structure methods [10, 12, 13, 27, 31, 32] reduce the number of updated parameters in the training process to save the computational cost. In contrast, the proposed ToE accelerates training from the perspective of reducing token redundancy. In other words, ToE computes a smaller number of tokens but still optimizes all parameters. It avoids potential performance drops in many structure growth methods due to the inconsistent structures of before-and-after models during structure growth and resetting of optimizer state when updating new structural parameters. + +ToMe [11] uses a limited number of tokens to participate in training and progressively merges similar tokens layer-by-layer, which changes the attention operator in inference. ToE also involves merging tokens with close feature distributions by feature-distribution token merging. However, our merging strategy is performed only once at the end of the "initialization-expansion-merging" pipeline during training, which prevents the information loss of tokens. This ensures that ToE avoids the mismatch between practical and theoretical acceleration caused by excessive merging operations and operator modifications. + +Additionally, several works [30, 33-35] also consider to reduce the data for training. The work in [33] deduplicates training datasets to save computational resources. Unfortunately, it usually introduces additional computational costs and sometimes becomes a bottleneck by using additional time to process datasets during training [36]. PSS [35] uses fewer patches obtained by splitting images during training. EfficientTrain [30] and PL [34] use images of different sizes and additional data augmentation. However, EfficientTrain and PL change the training pipelines that differ from the training of the original model, e.g., hyper-parameters. Moreover, the above methods consider the properties of training data. In contrast, ToE focuses on the crucial learnable information in the intermediate feature space of transformers. Thus, ToE can be integrated into the above methods in a plug-and-play manner to further enhance training efficiency. + +# 2.2. Training Acceleration for CNNs + +Prior efficient training acceleration methods have explored ways to speed up the training of CNN models [37-42]. For example, works in [37, 38] consider pruning gradients to reduce training computation costs. Works in [39, 40] attempt to use quantization technical to achieve training acceleration. Others try to reduce training time either by reducing the number of optimization iterations with a linear decay for the learning rate [41] or skipping easy samples that contribute little to loss reduction [42]. However, these methods may not + +be directly applied to Transformers for training acceleration due to the specific architectural differences between transformers and CNNs. Differently, ToE focuses on the training acceleration for Transformers on the token dimension. + +# 2.3. Transformer pruning + +Transformer pruning methods typically reduce parameters or tokens to generate sparse Transformers for fast inference. Structure pruning methods [14-17] attempted to prune the structures of transformers. Token pruning methods [18-22] focused on dynamically determining the importance of input tokens and pruning them during inference. + +The key differences between our method and transformer pruning methods are two-fold. (1) Transformer pruning methods primarily aim to accelerate transformer inference, while our target is for training acceleration. (2) We obtain a dense model after training by token growth, which is entirely consistent with the original model for inference. In contrast, pruning methods generate sparse models after training. + +# 3. Method + +# 3.1. Preliminaries and Notations + +Given a Transformer with $L$ blocks, we denote the sets of input and output tokens for the $l$ -th block as $\mathcal{S}_{l-1}$ and $\mathcal{S}_l$ with $l \in \{1,2,\dots,L\}$ , respectively. The index set of output tokens for the $l$ -th block is defined as $\mathcal{I} = \{1,2,\dots,N_l\}$ , where $N_l$ is the number of output tokens for the $l$ -th block. We further denote the $i$ -th token of the output tokens for the $l$ -th block as $t_{l,i} \in \mathbb{R}^d$ , thus $\mathcal{S}_l = \{t_{l,i} | \forall i \in \mathcal{I}\}$ . + +For the $l$ -th Transformer block, we consider to reduce the output tokens to a specified size $N_{l}^{\prime} = \lfloor rN_{l}\rfloor$ , where $r\in (0,1]$ is the kept rate of tokens, and $\lfloor \cdot \rfloor$ is a floor function. Further, we define the index set of kept tokens as $\mathcal{I}' = \{1,2,\dots ,N_l'\}$ and we obtain a subset $S_{l}^{\prime} = \{t_{l,i}^{\prime}|\forall i\in \mathcal{I}^{\prime}\}$ of output tokens. When the output tokens of the $l$ -th block are reduced, this results in a corresponding reduction in the quantity of input tokens for blocks beyond the $l$ -th block. Furthermore, the computational complexity of self-attention blocks and MLP layers in Transformers is directly proportional to the number of input tokens. According to the work [43], the computation in the forward and backward propagation of modern neural networks roughly conforms to 1:2. Therefore, the reduction of tokens significantly accelerates the computation in both the forward and backward propagations during training if $r < 1$ . Note that, to reduce the complex search computation for the kept rate of tokens $r$ across all Transformer blocks, we simply and effectively set $r$ to be the same in all blocks that benefit from acceleration. + +# 3.2. Overview of ToE + +As shown in Fig. 1, ToE initially selects a significantly small number of tokens, then progressively grows to the final full- + +token same as the original Transformer, thereby achieving training acceleration. We divide the origin training process into $N_{g}$ stages on average. We use a limited number of tokens to participate in each training stage and gradually grow the token number along with the training stages. The token growth strategy consists of three steps: + +(1) Initial token selection as the seed tokens. we initially select $\lfloor r_0N_l\rfloor$ output tokens from the origin token set $S_{l}$ as the seed token set by using Uniform sampling on the index set $\mathcal{I}$ , where $r_0$ represents the pre-defined initial kept rate, which is default set to less than 0.3 in our experiments unless otherwise specified. +(2) Token expansion. In the $\delta$ -th $(\delta \in \{1, 2, \dots, N_g\})$ training stage, we perform $\delta$ times token expansion to preserve the integrity of the original intermediate feature space. Furthermore, we pre-define the keep rate of the first stage to be $r_1$ . The kept rate of $\delta$ -th stage $r_\delta$ is computed as: + +$$ +\mu_ {\delta} = \left\{ \begin{array}{l l} r _ {1} - r _ {0}, & \text {i f} \delta = 1, \\ \frac {1 - r _ {1}}{N _ {g} - 1}, & \text {o t h e r w i s e ,} \end{array} \right. \tag {1} +$$ + +$$ +r _ {\delta} = r _ {\delta - 1} + \mu_ {\delta}, +$$ + +where $\mu_{\delta}$ is the token expansion rate in the $\delta$ -th training stage and $r_1 = 2 \cdot r_0 \in (0,1]$ . After the $\delta$ times token expansion, we select $\lfloor r_{\delta}N_l\rfloor$ tokens from the full-token set $S_{l}$ . In Sec. 3.3.2, we will introduce the widest feature-distribution token expansion method to select $\lfloor r_{\delta}N_l\rfloor$ tokens, which aims to expand the token distribution space to effectively present full-token feature distribution. + +(3) Token merging. To further avoid information loss during the training process, we consider merging the unselected tokens into the selected ones in the token expansion process, which retains effective information of the unselected tokens in the merged token set $S_{l}^{\prime}$ . Inspired by ToMe [11], we merge averagely the tokens that the feature distributions are close as one new token, which is further introduced in Sec. 3.3.3. + +During training, ToE performs steps (1), (2), and (3) on the original full-token set for each training iteration, which reduces the number of tokens involved in training while retaining the effective information from the full-token set. + +# 3.3. Token Expansion + +In this Section, we introduce the proposed ToE method, including spatial-distribution token initialization, widest feature-distribution token expansion, feature-distribution token merging, and its optimization. + +# 3.3.1 Spatial-distribution Token Initialization + +For the initialization, we apply a simple strategy to select the initial token set from $S_{l}$ . We define the index of the initial token set as: + +$$ +\mathcal {I} ^ {(I)} = \{i | \forall i \bmod \left\lfloor \frac {1}{r _ {0}} \right\rfloor = 1 \wedge \forall i \in \mathcal {I} \}. \tag {2} +$$ + +The selected token set and the unselected tokens set can be expressed as $\mathbb{A} = \{t_{l,i}|\forall i\in \mathcal{I}^{(I)}\}$ and $\mathbb{B} = \mathcal{S}_l - \mathbb{A}$ , respectively. This initialization selection strategy is based on spatial distribution. It indicates that we choose one token out of every $\left\lfloor \frac{1}{r_0}\right\rfloor$ tokens from the original token set and add it to the initial token set. Our strategy is simple, yet effective, to ensure that the initially selected tokens provide broad spatial coverage across the image patches. + +# 3.3.2 Widest Feature-distribution Token Expansion + +Previous works [11, 18] show that the intermediate feature space in modern Transformers is overparameterized, such that they prune the full-token Transformers to be sparse ones. Actually, through the above token initialization, we obtain the sparse Transformers. However, the performance drops significantly if we only train on these selected tokens. Thus, we consider to grow the number of tokens, which is expected to preserve the integrity of the original intermediate feature space and avoid the loss of tokens containing valuable information. Inspired by this, we seek to maintain the integrity of the intermediate feature distribution. Intuitively, when the feature distributions of two token sets are sufficiently close, they have similar information that can be used to effectively represent each other. In contrast, given one token whose feature distribution deviates significantly from all other tokens in the token set, it will be difficult to be adequately represented by other tokens, such that we expect to select this token to underscore its importance in the token expansion. + +To this end, we propose the widest feature-distribution token expansion strategy. Specifically, we perform the expanding operation on the selected tokens from the initialized set. For the $\delta$ -th stage of token expansion, we consider the selected token set $\mathbb{A} \in \mathbb{R}^{|A| \times d}$ and the unselected token set $\mathbb{B} \in \mathbb{R}^{|B| \times d}$ as the 2D matrices, where $|\cdot|$ and $d$ respectively denote the number of tokens and feature dimension, and $|\mathbb{A}| + |\mathbb{B}| = N_l$ . We utilize Cosine Distance as the metric to measure the distance between feature distribution of tokens in these two sets (other metrics see Tab. 9): + +$$ +\mathcal {D} (\mathbb {B}, \mathbb {A}) = \mathbf {1} - \cos \langle \mathbb {B}, \mathbb {A} \rangle = \mathbf {1} - \frac {\mathbb {B} \mathbb {A} ^ {\mathrm {T}}}{\| \mathbb {B} \| \cdot \| \mathbb {A} \|}, \tag {3} +$$ + +where $\mathbf{1}$ is an all-one matrix. $\mathcal{D}(\mathbb{B},\mathbb{A})\in \mathbb{R}^{|\mathbb{B}|\times |\mathbb{A}|}$ represents the pairwise distances between tokens in $\mathbb{B}$ and $\mathbb{A}$ . + +We further define the distance between the feature distribution of tokens in $\mathbb{B}$ and its closest token in $\mathbb{A}$ as distance $(\mathbb{B} \to \mathbb{A}) \in \mathbb{R}^{|\mathbb{B}|}$ : + +$$ +\operatorname {d i s t a n c e} (\mathbb {B} \rightarrow \mathbb {A}) _ {i} = \min _ {j} \left(\mathcal {D} (\mathbb {B}, \mathbb {A}) _ {i, j}\right), \tag {4} +$$ + +where $i\in \{1,\dots ,|B|\}$ and $j\in \{1,\dots ,|A|\}$ . Eq. 4 indicates that we sample the minimal values of the feature-distribution distance matrix $\mathcal{D}(\mathbb{B},\mathbb{A})$ along the second dimension. Thus, distance $(\mathbb{B}\to \mathbb{A})_i$ measures importance of + +![](images/5c8a1f33feeb9bdc18742850a7af1f0c08f43d23d6ab144b70f1a527aa45ad2f.jpg) +Figure 2. Visualization for the feature distribution of token set. We use T-SNE [44] to visualize the output token feature distributions at the first block, the tokens selected by ToE, and the output tokens after the second block. Baselines are DeiT-small trained on ImageNet-1K. ToE preserves the distribution integrity of intermediate features of the original token set across different Transformer blocks while ensuring that feature distributions are as wide as possible. + +![](images/3f2550f8eecf5402dda4f6f3ff26dc3859e9699e4ca2e741516b9454fdc5eaae.jpg) + +$i$ -th token in $\mathbb{B}$ . At this point, we progressively add the most important token to $\mathbb{A}$ , which is formulated as: + +$$ +\mathbb {A} = \mathbb {A} + t ^ {*}, \quad \mathbb {B} = \mathbb {B} - t ^ {*}, +$$ + +$$ +t ^ {*} = \left\{\mathbb {B} _ {i} | i = \operatorname {a r g m a x} (d i s t a n c e (\mathbb {B} \rightarrow \mathbb {A})) \right\}, \tag {5} +$$ + +where $t^*$ is the most important token in $\mathbb{B}$ . When the feature distribution of one token is far from its closest token, it can be said that the feature distribution of this token deviates significantly from that of all other tokens in the token set. The operation described in Eq. 5 is performed for $\lfloor \mu_{\delta}N_l \rfloor$ times to select $\lfloor \mu_{\delta}N_l \rfloor$ tokens from $\mathbb{B}$ into $\mathbb{A}$ . The widest feature-distribution token expansion strategy ensures that the feature distributions of the selected token set become as wide as possible, preventing the loss of important tokens. However, as we need to iterate $\lfloor \mu_{\delta}N_l \rfloor$ times expansion, it results in a considerable consumption of computational resources. Considering the computation parallelization, we modify the expanding operation in Eq. 5 parallelly: + +$$ +\mathbb {A} = \mathbb {A} + \mathcal {S} ^ {*}, \quad \mathbb {B} = \mathbb {B} - \mathcal {S} ^ {*}, +$$ + +$$ +\mathcal {S} ^ {*} = \left\{\mathbb {B} _ {i} | i \in \operatorname {t o p k} _ {\lfloor \mu_ {\delta} \mathrm {N} _ {1 / \mathrm {k}} \rfloor} (d i s t a n c e (\mathbb {B} \rightarrow \mathbb {A})) \right\}, \tag {6} +$$ + +where $k$ is the pre-defined repetition step of parallel expanding operation, $S^*$ is a token set consisting of the important tokens in $\mathbb{B}$ , $\mathrm{topk}_n$ denotes the top argmax with the number of $n$ tokens. By this way, we only perform $k$ times parallel expanding operation to expand $\lfloor \mu_{\delta} N_l \rfloor$ tokens, and its computational consumption is negligible with small $k$ . + +# 3.3.3 Feature-distribution Token Merging + +After token expansion, we aim to retain the effective information of the unselected tokens, such that we merge the unselected tokens that the feature distributions are close to the selected ones. The feature-distribution token merging can be formulated as: + +$$ +\mathcal {S} _ {l} ^ {\prime} = \left\{\operatorname {m e a n} \left(\mathbb {A} _ {j}, \mathcal {S} _ {j} ^ {(M)}\right) | \forall j \in \{1, 2, \dots , | \mathbb {A} | \} \right\}, \text {w h e r e} +$$ + +$$ +\mathcal {S} _ {i} ^ {\left(\hat {M}\right)} = \left\{\mathbb {B} _ {i} \mid \mathcal {I} _ {i} ^ {\left(\hat {M}\right)} = = j, \forall i \in \{1, 2, \dots , | \mathbb {B} | \} \right\}, \tag {7} +$$ + +$$ +\mathcal {I} ^ {(M)} = \operatorname {a r g m i n} _ {j} (\mathcal {D} (\mathbb {B}, \mathbb {A}) _ {i, j}), +$$ + +where $S_{l}^{\prime}\in \mathbb{R}^{|\mathbb{A}|\times d}$ is the token set merging the closest tokens from $\mathbb{B}$ to $\mathbb{A}$ , and mean $(\mathbb{A}_j,S_j^{(M)})$ indicate that we + +merge $\mathbb{B}$ into $\mathbb{A}$ averagely based on the index set $\mathcal{I}^{(M)}\in$ $\mathbb{R}^{\left|\mathbb{B}\right|}$ . Note that every $\mathbb{B}_i$ participates in the merging to avoid the information dropping for the unselected tokens. + +# 3.3.4 Optimization of ToE + +Our objective loss is the same as the original models, e.g., cross-entropy loss in DeiT. The training details of ToE are presented in Algorithm 1. Note that we only apply ToE to the output tokens of the first transformer block. The detailed analysis is discussed in Sec. 4.4. + +ToE is a plug-and-play acceleration module, which has three following advantages: (1) As shown in Fig. 2, we observed that the selected token set obtained by ToE in the multiple block outputs has a larger average distribution distance via T-SNE [44], compared to that in the original full-token set (see First block vs. After ToE). Moreover, it maintains a feature distribution similar to the original token set. It indicates ToE can preserve the integrity of the intermediate feature distribution of the original token set across different Transformer blocks by reducing the number of tokens. (2) ToE is a parameter-free module, it does not introduce any trainable parameters and utilizes efficient matrix calculations that the computational overhead is negligible, compared to computation-intensive self-attention. (3) The speedup factors (e.g., token kept rate $r_1$ and training stage $N_g$ ) of ToE are independent of the original model's training hyper-parameters. This decoupling allows ToE to be seamlessly integrated into the training process of the original model, obviating the need for any adjustments to the training hyper-parameters. + +# 4. Experiments + +# 4.1. Experimental Settings + +Datasets and baselines. We evaluate our method on ImageNet-1K [45] and CIFAR-10/100 [46]. For baselines, we use two popular ViTs, i.e., DeiT [4] and LV-ViT [5], as the base models to evaluate the proposed ToE on ImageNet-1K. To further evaluate the universality, we integrate ToE into the efficient training framework EfficientTrain [30]. Moreover, + +Algorithm 1: Optimization with ToE +Input: Input dataset $\mathcal{X}$ output token number $N_{l}$ total training stage $N_{g}$ kept rate of the first training stage $r_1$ repetition step of the parallel expanding operation $k$ Transformer parameters $\theta$ maximum iterations $T$ Output: Updated Transformer parameters $\theta$ for $t\gets 1$ to $T$ do Sample from $\mathcal{X}$ to obtain data sample $x$ ,feed-forwarded through the embedding and first $l$ -th transformer blocks to obtain the output token set $S_{l}$ . +3 $\% \% \%$ Spatial-distribution Token Initialization%% +4 $r_0\gets \frac{1}{2} r_1$ . +5 Initialize A and B by $r_0,S_l$ via Eq. 2; +6 $\% \% \%$ Widest Feature-distribution Token Expansion%% +7Obtain the current training stage $\delta = \lceil N_g*t / T\rceil$ . +8 for $m\gets 1$ to $\delta$ do if $m = 1$ then $\mu_{m}\leftarrow r_{1} - r_{0};$ else $\mu_{m}\leftarrow \frac{1 - r_{1}}{N_{g} - 1}$ end for $n\gets 1$ to k do Update A and B by $\mu_{m},N_{l},k,$ prior A and prior B via Eq. 6; +end +end + $\% \% \%$ Feature-distribution Token Merging%% +9 Obtain $S_l^{\prime}$ by A and B via Eq. 7; + $S_l^{\prime}$ feed-forwardsed through the $l + 1$ -th transformer block to final layer and progressively obtain the final prediction y; + $\% \% \%$ Parameter Updating%% +2 Use $y$ to compute the loss and obtain the gradient $\nabla \theta$ . +23 Use $\nabla \theta$ to update prior $\theta$ via the optimizer to obtain new $\theta$ . +end +return $\theta$ + +we evaluate the transfer learning ability using pre-trained weights of ToE on DeiT and the performance of accelerating the fine-tuning process with ToE on CIFAR-10/100. + +Evaluation metrics. We report Top-1 accuracy, the GPU training time and FLOPs as the evaluation metric. To evaluate the training speed, we report the total GPU hours consumed during the entire training process, as well as the theoretical FLOPs for one forward-backward process. To avoid the impact of memory access and kernel launching on training time [12], we report the GPU hours on different numbers of GPUs, but with the same GPU numbers to evaluate different training methods. The FLOPs for the forward process are measured using thop1, and for the backward process, we follow [43] and calculate it as twice the FLOPs of the forward process. + +Implementations. All methods are trained by Pytorch [47]. For DeiT and LV-ViT, all experiments are conducted on four NVIDIA RTX A6000 GPUs $^2$ , while EfficientTrain is trained on eight NVIDIA RTX A6000 GPUs. + +All hyper-parameters (e.g., learning rate, decay strategy and rate), and training strategies and optimization processes are the same as the original papers unless otherwise specified. + +Growth strategy. In default, we divide the origin training process into $N_{g} = 3$ stages on average. The token kept rate of 1st stage $r_1$ is set to 0.4, 0.5 or 0.6, our method is corresponding to be denoted as ToE $r_1 = 0.4$ , ToE $r_1 = 0.5$ or ToE $r_1 = 0.6$ . Correspondingly, the kept rate of the initial stage $r_0$ is set to 0.2, 0.25 and 0.3. The repetition step of parallel expanding operation $k$ is default set to 2, and we perform ToE on the output tokens of the first block for all models. + +# 4.2. Results on ImageNet-1k + +DeiT and LV-ViT As shown in Tab. 2, ToE achieves lossless training acceleration with SOTA performance. For example, $\mathrm{ToE}_{r_1 = 0.5}$ achieves $0.4\%$ Top-1 accuracy improvement with $1.27\times$ theoretical and $1.24\times$ practical faster speed to train DeiT-tiny. For DeiT-small, it achieves $1.3\times$ training acceleration without accuracy drop. Compared to the SOTA methods, $\mathrm{ToE}_{r_1 = 0.5}$ outperforms SViTE [10] and NetworkExpansion [12] at least $1\%$ Top-1 accuracy at the consistent acceleration ratio for training both DeiT-tiny and DeiT-small. Compared to ToMe [11], $\mathrm{ToE}_{r_1 = 0.5}$ also achieves both higher accuracy and practical training speed. Note that ToMe is able to reduce GFLOPs, but fails to accelerate training due to the usage of unfriendly weighted average attention and layer-wise merging operations. For DeiT-base, $\mathrm{ToE}_{r_1 = 0.5}$ drops only $0.2\%$ Top-1 accuracy while saving more than 60 GPU hours in the practical training process, which is comparable to NetworkExpansion with EMA. If we relax the restriction of hyper-parameter consistency (presented in Appendix), $\mathrm{ToE}_{r_1 = 0.4}^{\text{Hyper}}$ outperforms NetworkExpansion with $0.2\%$ accuracy and 24h training time reduction. + +For LV-ViT-T and LV-ViT-S shown in Tab. 3, $\mathrm{ToE}_{r_1 = 0.4}$ achieves efficient training with $1.2\times$ acceleration rate, while without accuracy drop or even with accuracy improvement for training LV-ViT-T, compared to baselines. Note that the results of $\mathrm{ToE}_{r_1 = 0.4}$ and NetworkExpansion are reported with EMA, due to the default LV-ViT training with EMA. In addition, $\mathrm{ToE}_{r_1 = 0.4}$ outperforms NetworkExpansion in both training acceleration and accuracy with 0.5h training time reduction and $0.6\%$ accuracy for LV-ViT-T, respectively. + +We also present the validation Top-1 accuracy of ToE and NetworkExpansion during training DeiT-tiny and LV-ViT in Fig. 3. As observed, ToE initially reduces token redundancy during training, resulting in some performance drops compared to the baseline. However, in the later stages of training, ToE introduces more tokens for training, gradually reducing the accuracy gap to the baseline. Benefiting from the reduction of token redundancy in the early stages, models trained by ToE with the proposed token expansion and merging achieve higher accuracies, compared to baselines. Compared to NetworkExpansion, our ToE is more stable to + +Table 2. Performance comparison for DeiT on ImageNet-1K. a/b in the column of Top-1 Acc. means without/with EMA strategy using the official GitHub repo†. The training time is averagely measured on one/two/four NVIDIA RTX A6000 GPUs for DeiT-tiny/small/base 3 times, and the batch size is set to 1, 024 in all following tables and figures. + +
ModelMethodConsistencyTop-1 Acc. (%)GFLOPs (per training iter)Training time (total GPU hours)Acceleration (practical rate)
Hyper?Architecture?Strategy?
DeiT-tinyBaseline [4]---72.23.3 × 10354.6h1.00×
(NeurIPS'21) S2ViTE-Tiny (600 epoch) [10]××70.1 (-2.1)2.5 × 103(1.32×)-1.19×
(ICLR'23) ToMeDeTrs→[11]×71.7 (-0.5)2.5 × 103(1.32×)53.3h1.02×
(CVPR'23) NetworkExpansion6→12 [12]×70.3 (-1.9) / 70.1 (-2.1)2.5 × 103(1.32×)43.2h1.26×
ToE r1=0.5 (Ours)72.6 (+0.4)2.6 × 103(1.27×)44.2h1.24×
DeiT-smallBaseline [4]---79.81.3 × 104124.5h1.00×
(ICLR'23) ToMeDeTrs→[11]×79.7 (-0.1)9.8 × 103(1.33×)121.5h1.02×
(CVPR'23) NetworkExpansion6→12 [12]×78.8 (-1.0) / 78.6 (-1.2)9.8 × 103(1.33×)100.3h1.24×
ToE r1=0.5 (Ours)79.8 (+0.0)1.0 × 104(1.30×)102.2h1.22×
DeiT-baseBaseline [4]---81.85.2 × 104292.8h1.00×
(ICML'19) StackBERT [13]×80.8 (-1.0)4.2 × 104(1.24×)231.6h1.26×
(CVPR'23) NetworkExpansion6→12 [12]×81.0 (-0.8) / 81.5 (-0.3)3.9 × 104(1.33×)226.8h1.29×
ToE r1=0.5 (Ours)81.6 (-0.2)4.0 × 104(1.30×)231.2h1.27×
ToE r1=0.4 (Ours)81.4 (-0.4)3.8 × 104(1.37×)225.2h1.30×
ToE Hyper r1=0.5 (Ours)×81.8 (+0.0)3.6 × 104(1.44×)213.2h1.37×
ToE Hyper r1=0.4 (Ours)×81.7 (-0.1)3.3 × 104(1.58×)202.8h1.44×
+ +$\dagger$ https://github.com/huawei-noah/Efficient-Computing/tree/master/TrainingAcceleration/NetworkExpansion + +Table 3. Performance comparison for LV-ViT on ImageNet-1K. $\ddagger$ indicates that results reproduced by the official GitHub repo. The training time is averagely measured on two/four NVIDIA RTX A6000 GPUs 3 times for LV-ViT-T/S with a fixed batch size of 1, 024. + +
ModelMethodTop-1 Acc. (%)GFLOPs (per training iter)Training time (total GPU hours)
LV-ViT-TBaseline [5]79.18.2 × 103130.5h
(CVPR'23) NetworkExpansion6→12 [12][78.8 (-0.3)]7.1 × 103(1.15×)114.4h (1.14×)
ToERr1=0.4 (Ours)79.4 (+0.3)7.0 × 103(1.17×)113.9h (1.15×)
LV-ViT-SBaseline [5]83.31.9 × 104237.3h
(CVPR'23) NetworkExpansion8→16 [12][82.9 (-0.4)]1.5 × 104(1.27×)195.5h (1.21×)
ToERr1=0.4 (Ours)83.3 (+0.0)1.4 × 104(1.36×)195.3h (1.22×)
LV-ViT-MBaseline [5]84.13.7 × 104368.7h
(CVPR'23) NetworkExpansion10→20 [12]84.0 (-0.1)2.9 × 104(1.28×)292.7h (1.26×)
ToERr1=0.4 (Ours)84.1 (+0.0)2.7 × 104(1.37×)292.5h (1.26×)
+ +Table 4. Performance comparison between EfficientTrain [30] and our combination framework on ImageNet-1K. + +
ModelMethodTop-1 Acc. (%)GFLOPs (per training iter)Training time (total GPU hours)
DeiT-tinyBaseline (EfficientTrain) [30]72.51.3 × 10452.5h
(ICCV'23) EfficientTrain [30]73.3 (+0.8)8.8 × 103(1.48×)36.5h (1.44×)
EfficientTrain + ToE r1=0.6 (Ours)73.5 (+1.0)7.6 × 103(1.71×)32.3h (1.63×)
DeiT-smallBaseline (EfficientTrain) [30]80.35.2 × 104121.3h
(ICCV'23) EfficientTrain [30]80.4 (+0.1)3.4 × 103(1.53×)85.2h (1.42×)
EfficientTrain + ToE r1=0.6 (Ours)80.4 (+0.1)2.9 × 104(1.79×)79.4h (1.53×)
+ +train with consistent accuracy improvement during training, while the accuracy of NetworkExpansion with EMA drops significantly at the intermediate epoch number and then restores due to the inconsistent structures of before-and-after models when structure growing. More validation curves are presented in the Appendix. + +Combination with EfficientTrain [30]. ToE can be seamlessly integrated into the EfficientTrain framework to further improve the performance. We do not modify the pipeline of EfficientTrain and simply apply ToE to the output tokens of the model's first block. The results are summarized in Tab. 4, which effectively evaluates the universality of ToE. The combination of EfficientTrain and ToE achieves higher training speeds to further enhance the training efficiency of EfficientTrain with accuracy improvement. + +# 4.3. Transfer Results on CIFAR-10/100 + +we further explore the transfer learning ability of ToE-pre-trained weights and evaluate whether ToE can be used to accelerate the fine-tuning on CIFAR-10/100. For the fine + +![](images/e4a076fa3bb61e3910d0c7c855bab5fc0ea079737439b0dcfb9202bcf8c6a1fc.jpg) +Figure 3. Validation Top-1 accuracy of DeiT-tiny and LV-ViT-T on ImageNet-1k during training with different methods. + +![](images/355923237431d617b81e17a50299e4dadbc1c37b5bb34e2d63dc71c99d681041.jpg) + +Table 5. Results for fine-tuning DeiT on CIFAR-10/100. + +
ModelPre-trainingFine-tuningTop-1 Acc. (%)
MethodAccelerationMethodAccelerationCIFAR-10CIFAR-100
DeiT-tinyBaseline [4]1.0xBaseline [4]1.0x98.0786.78
Baseline [4]1.0xToE\( r_1=0.5 \)1.3x98.10 (+0.03)86.74 (-0.04)
ToE\( r_1=0.5 \)1.3xBaseline [4]1.0x98.19 (+0.12)87.10 (+0.32)
ToE\( r_1=0.5 \)1.3xToE\( r_1=0.5 \)1.3x98.16 (+0.09)86.91 (+0.13)
DeiT-smallBaseline [4]1.0xBaseline [4]1.0x98.9390.15
Baseline [4]1.3xToE\( r_1=0.5 \)1.3x98.96 (+0.03)90.19 (+0.04)
ToE\( r_1=0.5 \)1.3xBaseline [4]1.0x99.03 (+0.10)90.37 (+0.22)
ToE\( r_1=0.5 \)1.3xToE\( r_1=0.5 \)1.3x98.99 (+0.06)90.26 (+0.11)
+ +tuning settings, we follow the settings of the official GitHub repo $^{3}$ . We introduce the training details in the Appendix. + +As shown in Tab. 5, pre-training weights by ToE is able to improve the accuracy on CIFAR-10/100 for DeiT-tiny/small, when using the same baseline training for fine-tuning (see the 1st and 3rd rows in both DeiT-tiny and DeiT-small). For example, ToE pre-training outperforms baseline pretraining by $0.32\%$ accuracy on CIFAR-100, which evaluates the strong transfer ability of ToE. In addition, our ToE is also effective and efficient for fine-tuning (see the 1st and 2nd rows in DeiT-tiny/small). ToE achieves $1.3 \times$ acceleration for fine-tuning DeiT-tiny with 0.03 accuracy improvement on CIFAR-10. Further, we employ ToE for both pre-training and fine-tuning, which significantly accelerates the training with an accuracy improvement of at least $0.06\%$ on CIFAR-10 for both DeiT-tiny/small, compared to that using both baselines. + +Table 6. Ablation studies of different speedup factors for DeiT-tiny on ImageNet-1K. The default $r_0 / r_1$ , $N_g$ and $k$ are set to 1/2, 3 and 2, respectively. All results in this table have almost the same training speeds for 44h training (total GPU hours). + +
DeiT-tinyFactorsr0/r1=1/3r0/r1=2/3Ng=2Ng=4k=1k=3default
Top-1 Acc. (%)72.372.572.472.572.572.672.6
+ +Table 7. Effect of "initialization-expansion-merge" pipeline for DeiT on ImageNet-1K. $\pm$ indicates we conduct 3 runs to calculate the mean and std. + +
InitializationExpansionMergeTop-1 Acc. (%)
RandomSpatialDeiT-tinyDeiT-small
×72.679.8
×72.3±0.279.7±0.1
××71.279.1
××71.779.6
+ +Table 8. Results of applying ToE to different early transformer block's output tokens for DeiT-tiny on ImageNet-1K. + +
BlockTop-1 Acc. (%) DeiT-tinyGFLOPs (per training iter)Training time (total GPU hours)
Embedding72.12.51 × 10343.5h
First block72.62.58 × 10344.2h
Second block72.22.65 × 10345.2h
Third block72.12.71 × 10346.9h
+ +# 4.4. Ablation Study + +Effect of speedup factors in ToE. As presented in Tab. 6, we verify the sensitivity of the speedup factors mentioned in Sec. 3.3, such as the ratio of $r_0 / r_1$ , training stages $N_g$ and parallel expanding operation $k$ . At almost the same training time, ToE is relatively insensitive to these factors, w.r.t accuracy. It allows ToE to be easily integrated into the different models' training pipeline with minimal factor adjustments. + +We further adjust the keep rate of the first stage $r_1$ to control the training speed, and the relationship between $r_1$ and training speed is illustrated in Fig. 4. We found ToE achieves more than $1.3 \times$ acceleration on DeiT-tiny without accuracy dropping. Additionally, it also demonstrates that reducing token redundancy in the early stages of training sometimes improves the model performance. + +Effect of "Initialization-expansion-merging". Tab. 7 provides an analysis of the necessity of each step in the proposed "initialization-expansion-merging" pipeline. When we randomly select tokens as the initial token set rather than spatial-distribution token initialization, it leads to performance degradation. Furthermore, removing widest feature-distribution token expansion and feature-distribution token merging from the pipeline significantly decreases the accuracy, e.g., more than $0.9\%$ and $1.4\%$ accuracy drops without the merging and expansion for DeiT-tiny, respectively. + +Where to apply ToE. Work in [32, 48] demonstrates that class attention tends to be a global pooling as more attention operations are performed, and tokens in early blocks are more similar. This leads to more redundancy in tokens from early blocks. Consequently, applying ToE to the output tokens of early blocks can achieve higher acceleration. As shown in Tab. 8, we default apply ToE to the output tokens of the first block, which achieves the best trade-off between accuracy and training speed, compared to other early blocks. + +![](images/2c6c478fd4a34c506bc32a7becb6e5b32586b147ae21b6af3b68f5c61bd31370.jpg) +Figure 4. Trade-off between acceleration ratio and model performance by setting different $r_1$ . + +Table 9. Results of different feature-distribution distances in Eq. 3 for DeiT on ImageNet-1K. + +
MeasureTop-1 Acc. (%)
DeiT-tinyDeiT-small
Manhattan Distance69.878.0
Euclidean Distance70.678.4
Cosine Distance72.679.8
+ +Effect of the feature-distribution distance. We explore the metric that measures the feature-distribution distance between two tokens in Eq. 3. As shown in Tab. 9, we use three different metrics: Manhattan distance, Euclidean distance, and Cosine distance. We observe that Cosine distance achieves the best performance as the distance metric. + +# 5. Conclusion + +In this paper, we proposed a novel token growth scheme Token Expansion (ToE) to achieve consistent training acceleration for ViTs. ToE introduce an "initialization-expansion-merging" pipeline to maintain the integrity of the intermediate feature distribution of original transformers, preventing the loss of crucial learnable information in the training process. In experiments, ToE can be seamlessly integrated into the training of various transformers and efficient training frameworks in a lossless manner or even accuracy improvement, compared to the entire full-token training. These experimental results of ToE also demonstrate the superior performance gains over the SOTA methods. + +# Acknowledgements + +This work is supported by the National Natural Science Foundation of China (NO. 62102151), the National Key Research and Development Program of China (No. 2023YFC3306401), Shanghai Sailing Program (21YF1411200), Shanghai Science and Technology Commission (22511104600), CCF-Tencent Rhino-Bird Open Research Fund, the Open Research Fund of Key Laboratory of Advanced Theory and Application in Statistics and Data Science, Ministry of Education (KLATASDS2305), the Fundamental Research Funds for the Central Universities. + +# References + +[1] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. NeurIPS, 30, 2017. 1 +[2] Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In NAACL, pages 4171–4186, 2019. +[3] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. NeurIPS, 33:1877-1901, 2020. 1 +[4] Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In ICLR, pages 10347-10357. PMLR, 2021. 1, 2, 5, 7 +[5] Zi-Hang Jiang, Qibin Hou, Li Yuan, Daquan Zhou, Yujun Shi, Xiaojie Jin, Anran Wang, and Jiashi Feng. All tokens matter: Token labeling for training better vision transformers. NeurIPS, 34:18590-18602, 2021. 5, 7 +[6] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, pages 213-229. Springer, 2020. +[7] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. Segformer: Simple and efficient design for semantic segmentation with transformers. NeurIPS, 34:12077-12090, 2021. 1 +[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 1 +[9] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, pages 770-778, 2016. 1 +[10] Tianlong Chen, Yu Cheng, Zhe Gan, Lu Yuan, Lei Zhang, and Zhangyang Wang. Chasing sparsity in vision transformers: An end-to-end exploration. NeurIPS, 34:19974-19988, 2021. 1, 2, 3, 6, 7 +[11] Daniel Bolya, Cheng-Yang Fu, Xiaoliang Dai, Peizhao Zhang, Christoph Feichtenhofer, and Judy Hoffman. Token merging: Your vit but faster. In ICLR, 2022. 1, 2, 3, 4, 6, 7 +[12] Ning Ding, Yehui Tang, Kai Han, Chao Xu, and Yunhe Wang. Network expansion for practical training acceleration. In CVPR, pages 20269-20279, 2023. 1, 2, 3, 6, 7 +[13] Linyuan Gong, Di He, Zhuohan Li, Tao Qin, Liwei Wang, and Tieyan Liu. Efficient training of bert by progressively stacking. In ICML, pages 2337-2346. PMLR, 2019. 1, 2, 3, 7 +[14] Huanrui Yang, Hongxu Yin, Maying Shen, Pavlo Molchanov, Hai Li, and Jan Kautz. Global vision transformer pruning with hessian-aware saliency. In CVPR, pages 18547-18557, 2023. 1, 3 +[15] Fang Yu, Kun Huang, Meng Wang, Yuan Cheng, Wei Chu, + +and Li Cui. Width & depth pruning for vision transformers. In AAAI, volume 36, pages 3143-3151, 2022. +[16] François Lagunas, Ella Charlaix, Victor Sanh, and Alexander M Rush. Block pruning for faster transformers. In EMNLP, pages 10619-10629, 2021. +[17] Mengzhou Xia, Zexuan Zhong, and Danqi Chen. Structured pruning learns compact and accurate models. In ACL, pages 1513-1528, 2022. 1, 3 +[18] Yongming Rao, Wenliang Zhao, Benlin Liu, Jiwen Lu, Jie Zhou, and Cho-Jui Hsieh. Dynamicvit: Efficient vision transformers with dynamic token sparsification. NeurIPS, 34:13937-13949, 2021. 1, 3, 4 +[19] Lingchen Meng, Hengduo Li, Bor-Chun Chen, Shiyi Lan, Zuxuan Wu, Yu-Gang Jiang, and Ser-Nam Lim. Adavit: Adaptive vision transformers for efficient image recognition. In CVPR, pages 12309-12318, 2022. +[20] Mohsen Fayyaz, Soroush Abbasi Koohpayegani, Farnoush Rezaei Jafari, Sunando Sengupta, Hamid Reza Vaezi Joze, Eric Sommerlade, Hamed Pirsiavash, and Jürgen Gall. Adaptive token sampling for efficient vision transformers. In ECCV, pages 396-414. Springer, 2022. +[21] Zhenglun Kong, Peiyan Dong, Xiaolong Ma, Xin Meng, Wei Niu, Mengshu Sun, Xuan Shen, Geng Yuan, Bin Ren, Hao Tang, et al. Spvit: Enabling faster vision transformers via latency-aware soft token pruning. In ECCV, pages 620-640. Springer, 2022. +[22] Hongxu Yin, Arash Vahdat, Jose M Alvarez, Arun Mallya, Jan Kautz, and Pavlo Molchanov. A-vit: Adaptive tokens for efficient vision transformer. In CVPR, pages 10809-10818, 2022. 1, 3 +[23] Sheng Xu, Yanjing Li, Mingbao Lin, Peng Gao, Guodong Guo, Jinhu Lu, and Baochang Zhang. Q-detr: An efficient low-bit quantized detection transformer. In CVPR, pages 3842-3851, 2023. 1 +[24] Yanjing Li, Sheng Xu, Baochang Zhang, Xianbin Cao, Peng Gao, and Guodong Guo. Q-vit: Accurate and fully quantized low-bit vision transformer. NeurIPS, 35:34451-34463, 2022. +[25] Yefei He, Zhenyu Lou, Luoming Zhang, Jing Liu, Weijia Wu, Hong Zhou, and Bohan Zhuang. Bivit: Extremely compressed binary vision transformers. In ICCV, pages 5651-5663, 2023. +[26] Phuoc-Hoan Charles Le and Xinlin Li. Binaryvit: Pushing binary vision transformers towards convolutional models. In CVPR, pages 4664-4673, 2023. 1 +[27] Cheng Chen, Yichun Yin, Lifeng Shang, Xin Jiang, Yujia Qin, Fengyu Wang, Zhi Wang, Xiao Chen, Zhiyuan Liu, and Qun Liu. bert2bert: Towards reusable pretrained language models. In ACL, pages 2134-2148, 2022. 1, 2, 3 +[28] Xin Yuan, Pedro Savarese, and Michael Maire. Growing efficient deep networks by structured continuous sparsification. In ICLR, 2021. +[29] Wei Wen, Feng Yan, Yiran Chen, and Hai Li. Autogrow: Automatic layer growing in deep convolutional networks. In KDD, pages 833-841, 2020. 1 +[30] Yulin Wang, Yang Yue, Rui Lu, Tianjiao Liu, Zhao Zhong, Shiji Song, and Gao Huang. Efficienttrain: Exploring generalized curriculum learning for training visual backbones. In ICCV, pages 5852-5864, 2023. 2, 3, 5, 7 + +[31] Changlin Li, Bohan Zhuang, Guangrun Wang, Xiaodan Liang, Xiaojun Chang, and Yi Yang. Automated progressive learning for efficient training of vision transformers. In CVPR, pages 12486-12496, 2022. 3 +[32] Xuran Pan, Xuan Jin, Yuan He, Shiji Song, Gao Huang, et al. Budgeted training for vision transformer. In ICLR, 2022. 3, 8 +[33] Katherine Lee, Daphne Ippolito, Andrew Nystrom, Chiyuan Zhang, Douglas Eck, Chris Callison-Burch, and Nicholas Carlini. Deduplicating training data makes language models better. In ACL, pages 8424-8445, 2022. 3 +[34] Mingxing Tan and Quoc Le. Efficientnetv2: Smaller models and faster training. In ICLR, pages 10096-10106. PMLR, 2021. 3 +[35] Bradley McDanel and Chi Phuong Huynh. Accelerating vision transformer training via a patch sampling schedule. arXiv preprint arXiv:2208.09520, 2022. 3 +[36] Li Shen, Yan Sun, Zhiyuan Yu, Liang Ding, Xinmei Tian, and Dacheng Tao. On efficient training of large-scale deep learning models: A literature review. arXiv preprint arXiv:2304.03589, 2023. 3 +[37] Yuedong Yang, Guihong Li, and Radu Marculescu. Efficient on-device training via gradient filtering. In CVPR, pages 3811-3820, 2023. 3 +[38] Xucheng Ye, Pengcheng Dai, Junyu Luo, Xin Guo, Yingjie Qi, Jianlei Yang, and Yiran Chen. Accelerating cnn training by pruning activation gradients. In ECCV, pages 322-338. Springer, 2020. 3 +[39] Yonggan Fu, Haoran You, Yang Zhao, Yue Wang, Chaojian Li, Kailash Gopalakrishnan, Zhangyang Wang, and Yingyan Lin. Fractrain: Fractionally squeezing bit savings both temporally and spatially for efficient dnn training. NeurIPS, 33:12127-12139, 2020. 3 +[40] Yue Wang, Ziyu Jiang, Xiaohan Chen, Pengfei Xu, Yang Zhao, Yingyan Lin, and Zhangyang Wang. E2-train: Training state-of-the-art cnns with over $80\%$ energy savings. NeurIPS, 32, 2019. 3 +[41] Mengtian Li, Ersin Yumer, and Deva Ramanan. Budgeted training: Rethinking deep neural network training under resource constraints. In ICLR, 2019. 3 +[42] Jiong Zhang, Hsiang-Fu Yu, and Inderjit S Dhillon. Autoassist: A framework to accelerate training of deep neural networks. NeurIPS, 32, 2019. 3 +[43] Marius Hobbhahn and Jaime Sevilla. What's the backward-forward flop ratio for neural networks? https://epochai.org/blog/backward-forward-FLOP-ratio, 2021. Accessed: 2023-9-28. 3, 6 +[44] Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. JMLR, 9:2579-2605, 2008. 5 +[45] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, pages 248-255. IEEE, 2009. 5 +[46] Alex Krizhevsky et al. Learning multiple layers of features from tiny images. 2009. 5 +[47] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. NeurIPS, 32, 2019. 6 + +[48] Maithra Raghu, Thomas Unterthiner, Simon Kornblith, Chiyuan Zhang, and Alexey Dosovitskiy. Do vision transformers see like convolutional neural networks? NeurIPS, 34:12116-12128, 2021. 8 \ No newline at end of file diff --git a/2024/A General and Efficient Training for Transformer via Token Expansion/images.zip b/2024/A General and Efficient Training for Transformer via Token Expansion/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..4bf662760be8d7c1b2484e7af9329f3f1494aab3 --- /dev/null +++ b/2024/A General and Efficient Training for Transformer via Token Expansion/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf0e0ddb7436ed4dc6c894a4b245f0063ff6449dd3160d618518f98c694d18ce +size 480620 diff --git a/2024/A General and Efficient Training for Transformer via Token Expansion/layout.json b/2024/A General and Efficient Training for Transformer via Token Expansion/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..120256de5ccaacde0271aeac4ccc9460d7bdbc7a --- /dev/null +++ b/2024/A General and Efficient Training for Transformer via Token Expansion/layout.json @@ -0,0 +1,10890 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 76, + 103, + 517, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 103, + 517, + 121 + ], + "spans": [ + { + "bbox": [ + 76, + 103, + 517, + 121 + ], + "type": "text", + "content": "A General and Efficient Training for Transformer via Token Expansion" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "spans": [ + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "text", + "content": "Wenxuan Huang" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "text", + "content": " Yunhang Shen" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "text", + "content": " Jiao Xie" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "text", + "content": " Baochang Zhang" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "text", + "content": " Gaoqi He" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "text", + "content": " Ke Li" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "text", + "content": " Xing Sun" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "text", + "content": " Shaohui Lin" + }, + { + "bbox": [ + 75, + 141, + 526, + 171 + ], + "type": "inline_equation", + "content": "^{1,5\\boxtimes}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 109, + 172, + 493, + 198 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 110, + 172, + 493, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 172, + 493, + 185 + ], + "spans": [ + { + "bbox": [ + 110, + 172, + 493, + 185 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 172, + 493, + 185 + ], + "type": "text", + "content": "School of Computer Science and Technology, East China Normal University, Shanghai, China" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 109, + 186, + 493, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 186, + 493, + 198 + ], + "spans": [ + { + "bbox": [ + 109, + 186, + 493, + 198 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 109, + 186, + 493, + 198 + ], + "type": "text", + "content": "Tencent Youtu Lab, China " + }, + { + "bbox": [ + 109, + 186, + 493, + 198 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 109, + 186, + 493, + 198 + ], + "type": "text", + "content": "Xiamen University, China " + }, + { + "bbox": [ + 109, + 186, + 493, + 198 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 109, + 186, + 493, + 198 + ], + "type": "text", + "content": "Beihang University, China" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 100, + 200, + 502, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 200, + 502, + 213 + ], + "spans": [ + { + "bbox": [ + 100, + 200, + 502, + 213 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 100, + 200, + 502, + 213 + ], + "type": "text", + "content": "Key Laboratory of Advanced Theory and Application in Statistics and Data Science - MOE, China" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 216, + 547, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 216, + 547, + 241 + ], + "spans": [ + { + "bbox": [ + 52, + 216, + 547, + 241 + ], + "type": "text", + "content": "osilly0616@gmail.com, shenyunhang01@gmail.com, jiaoxiel990@126.com, bczhang@buaa.edu.cn \ngqhe@cs.ecnu.edu.cn, tristanli.sh@gmail.com, winfred.sun@gmail.com, shaohuilin007@gmail.com" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 143, + 269, + 192, + 281 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 269, + 192, + 281 + ], + "spans": [ + { + "bbox": [ + 143, + 269, + 192, + 281 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 293, + 290, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 293, + 290, + 594 + ], + "spans": [ + { + "bbox": [ + 46, + 293, + 290, + 594 + ], + "type": "text", + "content": "The remarkable performance of Vision Transformers (ViTs) typically requires an extremely large training cost. Existing methods have attempted to accelerate the training of ViTs, yet typically disregard method universality with accuracy dropping. Meanwhile, they break the training consistency of the original transformers, including the consistency of hyper-parameters, architecture, and strategy, which prevents them from being widely applied to different Transformer networks. In this paper, we propose a novel token growth scheme Token Expansion (termed ToE) to achieve consistent training acceleration for ViTs. We introduce an \"initialization-expansion-merging\" pipeline to maintain the integrity of the intermediate feature distribution of original transformers, preventing the loss of crucial learnable information in the training process. ToE can not only be seamlessly integrated into the training and fine-tuning process of transformers (e.g., DeiT and LV-ViT), but also effective for efficient training frameworks (e.g., EfficientTrain), without twisting the original training hyperparameters, architecture, and introducing additional training strategies. Extensive experiments demonstrate that ToE achieves about " + }, + { + "bbox": [ + 46, + 293, + 290, + 594 + ], + "type": "inline_equation", + "content": "1.3 \\times" + }, + { + "bbox": [ + 46, + 293, + 290, + 594 + ], + "type": "text", + "content": " faster for the training of ViTs in a lossless manner, or even with performance gains over the full-token training baselines. Code is available at https://github.com/Osilly-TokenExpansion." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 616, + 128, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 616, + 128, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 616, + 128, + 628 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 635, + 288, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 635, + 288, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 635, + 288, + 696 + ], + "type": "text", + "content": "Transformers have achieved excellent performance in the tasks of natural language processing (NLP) [1-3] and computer vision [4-7]. Despite their great success, modern Transformer models typically require extremely large parameters and computation consumption due to the quadratic com" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 308, + 319, + 542, + 399 + ], + "blocks": [ + { + "bbox": [ + 306, + 270, + 547, + 318 + ], + "lines": [ + { + "bbox": [ + 306, + 270, + 547, + 318 + ], + "spans": [ + { + "bbox": [ + 306, + 270, + 547, + 318 + ], + "type": "text", + "content": "Table 1. Training results for DeiT [4] on ImageNet-1K. DeiT does not use the EMA strategy by default. a/b in the column of Top-1 Acc. means without/with EMA strategy using the official GitHub repo. The training time is averagely measured on one/four NVIDIA RTX A6000 GPUs 3 times with a batch size of 1, 024 for DeiT-Tiny/Base, respectively." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 319, + 542, + 399 + ], + "lines": [ + { + "bbox": [ + 308, + 319, + 542, + 399 + ], + "spans": [ + { + "bbox": [ + 308, + 319, + 542, + 399 + ], + "type": "table", + "html": "
ModelMethodTraining consistencyTop-1 Acc. (%)Training time (GPU hours)
HyperArchStrategy
TinyBaseline [4]---72.254.6h
S2ViTE (600 epoch) [10]××70.1 (-2.1)-
ToMeRDS→[11]×71.7 (-0.5)53.3h
NetworkExpansion6→12 [12]×70.3 (-1.9) / 70.1 (-2.1)43.2h
ToERi=0.5 (Ours)72.6 (+0.4)44.2h
BaseBaseline [4]---81.8292.8h
StackBERT [13]×80.8 (-1.0)231.6h
NetworkExpansion6→12 [12]×81.0 (-0.8) / 81.5 (-0.3)226.8h
ToERi=0.5 (Ours)81.6 (-0.2)231.2h
", + "image_path": "45df7dce1b92becae204babc845a313fd66d8dbe7292778f31f5df230207e99a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 407, + 547, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 407, + 547, + 503 + ], + "spans": [ + { + "bbox": [ + 304, + 407, + 547, + 503 + ], + "type": "text", + "content": "putational complexity in the self-attention module. For example, ViT-H/14 [8] requires " + }, + { + "bbox": [ + 304, + 407, + 547, + 503 + ], + "type": "inline_equation", + "content": "\\sim 1,000" + }, + { + "bbox": [ + 304, + 407, + 547, + 503 + ], + "type": "text", + "content": " FLOPs, which is " + }, + { + "bbox": [ + 304, + 407, + 547, + 503 + ], + "type": "inline_equation", + "content": "250 \\times" + }, + { + "bbox": [ + 304, + 407, + 547, + 503 + ], + "type": "text", + "content": " larger than ResNet-50 [9]. The entire training process needs a significant amount of computing resources to reach model convergence, resulting in a substantial computation overhead. To reduce the computational cost of large models, there has been growing research attention on accelerating Transformers for either training or inference." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 506, + 548, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 506, + 548, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 506, + 548, + 662 + ], + "type": "text", + "content": "Existing Transformer pruning methods [14-22] aim to reduce the inference complexity. Among them, structure pruning [14-17] and token pruning [18-22] focus on reducing the neurons or tokens of Transformers to accelerate the inference. However, these pruning methods require additional training computational cost in each forward-backward iteration to determine which neurons or tokens are important enough to be retained, or the fine-tuning for pruned models. Recently, Transformer quantization [23-26] accelerates the inference via low-bit computation, but they also cannot reduce the training computation cost. Thus, it is challenging for them to effectively accelerate the training of Transformers in practical scenarios, e.g., cloud service." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": "To reduce the training computation overhead, recent works [12, 13, 27-29] have proposed structure growth methods. They update a smaller number of model parameters during the early stages of training and gradually increase" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 126, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 126, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 126, + 712 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 155, + 702, + 235, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 702, + 235, + 712 + ], + "spans": [ + { + "bbox": [ + 155, + 702, + 235, + 712 + ], + "type": "text", + "content": "Corresponding author." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15783" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 105, + 68, + 491, + 215 + ], + "blocks": [ + { + "bbox": [ + 105, + 68, + 491, + 215 + ], + "lines": [ + { + "bbox": [ + 105, + 68, + 491, + 215 + ], + "spans": [ + { + "bbox": [ + 105, + 68, + 491, + 215 + ], + "type": "image", + "image_path": "8378b2f501e358b565ceaaac602632602be08ca09bdf943a4a83a123d5773ebc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 217, + 547, + 276 + ], + "lines": [ + { + "bbox": [ + 46, + 217, + 547, + 276 + ], + "spans": [ + { + "bbox": [ + 46, + 217, + 547, + 276 + ], + "type": "text", + "content": "Figure 1. The \"initialization-expansion-merging\" pipeline of proposed ToE. We take the 1st training stage " + }, + { + "bbox": [ + 46, + 217, + 547, + 276 + ], + "type": "inline_equation", + "content": "(\\delta = 1)" + }, + { + "bbox": [ + 46, + 217, + 547, + 276 + ], + "type": "text", + "content": ", the kept rate " + }, + { + "bbox": [ + 46, + 217, + 547, + 276 + ], + "type": "inline_equation", + "content": "r_1 = 2r_0 = \\frac{2}{3}" + }, + { + "bbox": [ + 46, + 217, + 547, + 276 + ], + "type": "text", + "content": ", the repetition step " + }, + { + "bbox": [ + 46, + 217, + 547, + 276 + ], + "type": "inline_equation", + "content": "k = 1" + }, + { + "bbox": [ + 46, + 217, + 547, + 276 + ], + "type": "text", + "content": " as example. ToE is only added after the first Transformer block to guide the token selection and usage. During training, steps (1), (2), and (3) are performed for each iteration with the reduction of token numbers. First, seed tokens are selected for token initialization through step (1). Then, the number of tokens is expanded via step (2) for token expansion. Finally, we merge the unselected token set (blue boxes) into the selected one (red boxes) with the close feature distributions in step (3) for token merging. During testing, ToE can be safely removed to generate the same Transformer architecture as the original full-token Transformer." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 283, + 289, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 283, + 289, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 283, + 289, + 677 + ], + "type": "text", + "content": "the number of parameters involved in the updating process as training progresses. However, the existing methods fail to achieve general transformer training acceleration without accuracy dropping (shown in Tab. 1), and they break the training consistency of the original transformers from three perspectives: (1) Hyper-parameter consistency. Existing methods (e.g., SViTE [10]) delicately tune training hyperparameters (e.g., learning rate and epoch number) of the original models, which are sensitive to individual ViTs [4] and require additional trial-and-error costs for different networks. (2) Architecture consistency. Existing methods [10, 11] alter the final model architectures, which may deviate from the user's requirements and potentially necessitates additional hardware/software support to implement real training speedup. For example, ToMe [11] progressively merges similar tokens layer-by-layer to reduce the number of tokens in ViTs during training, which replaces the attention operators with the weighted average attention modules, generating a different model architecture that deviates from the original Transformer. Moreover, it cannot significantly accelerate the practical training due to the unfriendly computation. (3) Strategy consistency. Existing methods [12, 13, 27] may suffer from performance deterioration across different Transformers by adding additional training strategies, such as EMA and reset optimizer states. It means the effectiveness of these strategies is for specific models, which limits the method's universality whether employing them for training. In Tab. 1, the extra EMA strategy in [12] plays different roles to the performance across different models, i.e., the effectiveness for DeiT-base but not for DeiT-tiny. Thus, this begs our rethinkings: How to implement real and friendly training speedup for Transformers while keeping the training consistency and high accuracy?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "text", + "content": "To answer the above question, we propose a novel token growth scheme, Token Expansion (termed ToE) to achieve general training acceleration for ViTs, while adhering to" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 283, + 548, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 283, + 548, + 536 + ], + "spans": [ + { + "bbox": [ + 304, + 283, + 548, + 536 + ], + "type": "text", + "content": "the training consistency of original models. Specifically, we present an \"initialization-expansion-merging\" pipeline (in Fig. 1) to maintain the integrity of the intermediate feature distribution of original transformers, preventing the loss of crucial learnable information during the accelerated training process. Similar to structure growth methods, we initially involve a limited number of tokens to participate in training and gradually grow the token number during training progress, eventually reaching the utilization of the entire token set. Then, a widest feature-distribution token expansion is introduced to make the feature distributions of the selected token set as wide as possible. Additionally, a feature-distribution token merging combines the tokens with close feature distributions to further avoid information loss. ToE not only accelerates the training and fine-tuning process of popular Transformers in a lossless manner or even with performance improvement, but also can be integrated into the existing efficient training frameworks (e.g., EfficientTrain [30]) for further performance improvement, without twisting the original training hyper-parameters, architecture, and introducing additional training strategies." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 317, + 539, + 539, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 539, + 539, + 551 + ], + "spans": [ + { + "bbox": [ + 317, + 539, + 539, + 551 + ], + "type": "text", + "content": "Our main contributions can be summarized as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 555, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 306, + 555, + 547, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 555, + 547, + 628 + ], + "spans": [ + { + "bbox": [ + 306, + 555, + 547, + 628 + ], + "type": "text", + "content": "- We propose ToE, a novel token growth scheme to accelerate ViTs from the perspective of tokens. ToE is a consistent training acceleration method and can be seamlessly integrated into the training and fine-tuning process of transformers without any modifications to the original training hyper-parameters, architecture, and strategies." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 628, + 547, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 628, + 547, + 662 + ], + "spans": [ + { + "bbox": [ + 306, + 628, + 547, + 662 + ], + "type": "text", + "content": "- We propose an effective \"initialization-expansion-merging\" framework to avoid the token information loss by maintaining the integrity of the intermediate feature distribution." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 663, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 663, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 306, + 663, + 547, + 712 + ], + "type": "text", + "content": "- Extensive experiments demonstrate that ToE accelerates the training and fine-tuning process of ViTs with a negligible accuracy drop or even surpassing the original full-token counterparts, which outperforms previous SOTA methods." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "15784" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 134, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 134, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 134, + 83 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 255, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 91, + 255, + 104 + ], + "spans": [ + { + "bbox": [ + 47, + 91, + 255, + 104 + ], + "type": "text", + "content": "2.1. Training Acceleration for Transformers" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 110, + 289, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 110, + 289, + 253 + ], + "spans": [ + { + "bbox": [ + 46, + 110, + 289, + 253 + ], + "type": "text", + "content": "As mentioned above, many existing works focus on accelerating the training of transformers from the perspective of structural parameters. These structure methods [10, 12, 13, 27, 31, 32] reduce the number of updated parameters in the training process to save the computational cost. In contrast, the proposed ToE accelerates training from the perspective of reducing token redundancy. In other words, ToE computes a smaller number of tokens but still optimizes all parameters. It avoids potential performance drops in many structure growth methods due to the inconsistent structures of before-and-after models during structure growth and resetting of optimizer state when updating new structural parameters." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 254, + 289, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 254, + 289, + 386 + ], + "spans": [ + { + "bbox": [ + 46, + 254, + 289, + 386 + ], + "type": "text", + "content": "ToMe [11] uses a limited number of tokens to participate in training and progressively merges similar tokens layer-by-layer, which changes the attention operator in inference. ToE also involves merging tokens with close feature distributions by feature-distribution token merging. However, our merging strategy is performed only once at the end of the \"initialization-expansion-merging\" pipeline during training, which prevents the information loss of tokens. This ensures that ToE avoids the mismatch between practical and theoretical acceleration caused by excessive merging operations and operator modifications." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "spans": [ + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "text", + "content": "Additionally, several works [30, 33-35] also consider to reduce the data for training. The work in [33] deduplicates training datasets to save computational resources. Unfortunately, it usually introduces additional computational costs and sometimes becomes a bottleneck by using additional time to process datasets during training [36]. PSS [35] uses fewer patches obtained by splitting images during training. EfficientTrain [30] and PL [34] use images of different sizes and additional data augmentation. However, EfficientTrain and PL change the training pipelines that differ from the training of the original model, e.g., hyper-parameters. Moreover, the above methods consider the properties of training data. In contrast, ToE focuses on the crucial learnable information in the intermediate feature space of transformers. Thus, ToE can be integrated into the above methods in a plug-and-play manner to further enhance training efficiency." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 586, + 220, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 586, + 220, + 600 + ], + "spans": [ + { + "bbox": [ + 47, + 586, + 220, + 600 + ], + "type": "text", + "content": "2.2. Training Acceleration for CNNs" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "type": "text", + "content": "Prior efficient training acceleration methods have explored ways to speed up the training of CNN models [37-42]. For example, works in [37, 38] consider pruning gradients to reduce training computation costs. Works in [39, 40] attempt to use quantization technical to achieve training acceleration. Others try to reduce training time either by reducing the number of optimization iterations with a linear decay for the learning rate [41] or skipping easy samples that contribute little to loss reduction [42]. However, these methods may not" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 547, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 120 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 120 + ], + "type": "text", + "content": "be directly applied to Transformers for training acceleration due to the specific architectural differences between transformers and CNNs. Differently, ToE focuses on the training acceleration for Transformers on the token dimension." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 128, + 430, + 141 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 128, + 430, + 141 + ], + "spans": [ + { + "bbox": [ + 305, + 128, + 430, + 141 + ], + "type": "text", + "content": "2.3. Transformer pruning" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 146, + 547, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 146, + 547, + 218 + ], + "spans": [ + { + "bbox": [ + 304, + 146, + 547, + 218 + ], + "type": "text", + "content": "Transformer pruning methods typically reduce parameters or tokens to generate sparse Transformers for fast inference. Structure pruning methods [14-17] attempted to prune the structures of transformers. Token pruning methods [18-22] focused on dynamically determining the importance of input tokens and pruning them during inference." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 219, + 547, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 219, + 547, + 303 + ], + "spans": [ + { + "bbox": [ + 304, + 219, + 547, + 303 + ], + "type": "text", + "content": "The key differences between our method and transformer pruning methods are two-fold. (1) Transformer pruning methods primarily aim to accelerate transformer inference, while our target is for training acceleration. (2) We obtain a dense model after training by token growth, which is entirely consistent with the original model for inference. In contrast, pruning methods generate sparse models after training." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 313, + 362, + 326 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 313, + 362, + 326 + ], + "spans": [ + { + "bbox": [ + 305, + 313, + 362, + 326 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 333, + 460, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 333, + 460, + 346 + ], + "spans": [ + { + "bbox": [ + 305, + 333, + 460, + 346 + ], + "type": "text", + "content": "3.1. Preliminaries and Notations" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "spans": [ + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": "Given a Transformer with " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": " blocks, we denote the sets of input and output tokens for the " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": "-th block as " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_{l-1}" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_l" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "l \\in \\{1,2,\\dots,L\\}" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": ", respectively. The index set of output tokens for the " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": "-th block is defined as " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "\\mathcal{I} = \\{1,2,\\dots,N_l\\}" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "N_l" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": " is the number of output tokens for the " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": "-th block. We further denote the " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": "-th token of the output tokens for the " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": "-th block as " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "t_{l,i} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": ", thus " + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_l = \\{t_{l,i} | \\forall i \\in \\mathcal{I}\\}" + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "spans": [ + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "content": "For the " + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "content": "-th Transformer block, we consider to reduce the output tokens to a specified size " + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "inline_equation", + "content": "N_{l}^{\\prime} = \\lfloor rN_{l}\\rfloor" + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "inline_equation", + "content": "r\\in (0,1]" + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "content": " is the kept rate of tokens, and " + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "inline_equation", + "content": "\\lfloor \\cdot \\rfloor" + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "content": " is a floor function. Further, we define the index set of kept tokens as " + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "inline_equation", + "content": "\\mathcal{I}' = \\{1,2,\\dots ,N_l'\\}" + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "content": " and we obtain a subset " + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "inline_equation", + "content": "S_{l}^{\\prime} = \\{t_{l,i}^{\\prime}|\\forall i\\in \\mathcal{I}^{\\prime}\\}" + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "content": " of output tokens. When the output tokens of the " + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "content": "-th block are reduced, this results in a corresponding reduction in the quantity of input tokens for blocks beyond the " + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "content": "-th block. Furthermore, the computational complexity of self-attention blocks and MLP layers in Transformers is directly proportional to the number of input tokens. According to the work [43], the computation in the forward and backward propagation of modern neural networks roughly conforms to 1:2. Therefore, the reduction of tokens significantly accelerates the computation in both the forward and backward propagations during training if " + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "inline_equation", + "content": "r < 1" + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "content": ". Note that, to reduce the complex search computation for the kept rate of tokens " + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "content": " across all Transformer blocks, we simply and effectively set " + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 436, + 548, + 663 + ], + "type": "text", + "content": " to be the same in all blocks that benefit from acceleration." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 670, + 408, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 670, + 408, + 682 + ], + "spans": [ + { + "bbox": [ + 305, + 670, + 408, + 682 + ], + "type": "text", + "content": "3.2. Overview of ToE" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 689, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 547, + 713 + ], + "type": "text", + "content": "As shown in Fig. 1, ToE initially selects a significantly small number of tokens, then progressively grows to the final full-" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15785" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": "token same as the original Transformer, thereby achieving training acceleration. We divide the origin training process into " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "N_{g}" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " stages on average. We use a limited number of tokens to participate in each training stage and gradually grow the token number along with the training stages. The token growth strategy consists of three steps:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 144, + 288, + 276 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 46, + 144, + 287, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 144, + 287, + 215 + ], + "spans": [ + { + "bbox": [ + 46, + 144, + 287, + 215 + ], + "type": "text", + "content": "(1) Initial token selection as the seed tokens. we initially select " + }, + { + "bbox": [ + 46, + 144, + 287, + 215 + ], + "type": "inline_equation", + "content": "\\lfloor r_0N_l\\rfloor" + }, + { + "bbox": [ + 46, + 144, + 287, + 215 + ], + "type": "text", + "content": " output tokens from the origin token set " + }, + { + "bbox": [ + 46, + 144, + 287, + 215 + ], + "type": "inline_equation", + "content": "S_{l}" + }, + { + "bbox": [ + 46, + 144, + 287, + 215 + ], + "type": "text", + "content": " as the seed token set by using Uniform sampling on the index set " + }, + { + "bbox": [ + 46, + 144, + 287, + 215 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 46, + 144, + 287, + 215 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 144, + 287, + 215 + ], + "type": "inline_equation", + "content": "r_0" + }, + { + "bbox": [ + 46, + 144, + 287, + 215 + ], + "type": "text", + "content": " represents the pre-defined initial kept rate, which is default set to less than 0.3 in our experiments unless otherwise specified." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "spans": [ + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "text", + "content": "(2) Token expansion. In the " + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "text", + "content": "-th " + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "inline_equation", + "content": "(\\delta \\in \\{1, 2, \\dots, N_g\\})" + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "text", + "content": " training stage, we perform " + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "text", + "content": " times token expansion to preserve the integrity of the original intermediate feature space. Furthermore, we pre-define the keep rate of the first stage to be " + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "inline_equation", + "content": "r_1" + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "text", + "content": ". The kept rate of " + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "text", + "content": "-th stage " + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "inline_equation", + "content": "r_\\delta" + }, + { + "bbox": [ + 46, + 216, + 288, + 276 + ], + "type": "text", + "content": " is computed as:" + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 110, + 283, + 287, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 283, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 110, + 283, + 287, + 312 + ], + "type": "interline_equation", + "content": "\\mu_ {\\delta} = \\left\\{ \\begin{array}{l l} r _ {1} - r _ {0}, & \\text {i f} \\delta = 1, \\\\ \\frac {1 - r _ {1}}{N _ {g} - 1}, & \\text {o t h e r w i s e ,} \\end{array} \\right. \\tag {1}", + "image_path": "d95e58b0c8d40a46edcebee8e45754b9f761b8f491c612679e1446a3a4d469b6.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 112, + 316, + 176, + 326 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 316, + 176, + 326 + ], + "spans": [ + { + "bbox": [ + 112, + 316, + 176, + 326 + ], + "type": "interline_equation", + "content": "r _ {\\delta} = r _ {\\delta - 1} + \\mu_ {\\delta},", + "image_path": "df34849537d565e5bd505332de63d06206ba0db73d628dc7ad7fe36010484748.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "spans": [ + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "inline_equation", + "content": "\\mu_{\\delta}" + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "text", + "content": " is the token expansion rate in the " + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "text", + "content": "-th training stage and " + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "inline_equation", + "content": "r_1 = 2 \\cdot r_0 \\in (0,1]" + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "text", + "content": ". After the " + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "text", + "content": " times token expansion, we select " + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "inline_equation", + "content": "\\lfloor r_{\\delta}N_l\\rfloor" + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "text", + "content": " tokens from the full-token set " + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "inline_equation", + "content": "S_{l}" + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "text", + "content": ". In Sec. 3.3.2, we will introduce the widest feature-distribution token expansion method to select " + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "inline_equation", + "content": "\\lfloor r_{\\delta}N_l\\rfloor" + }, + { + "bbox": [ + 46, + 333, + 287, + 416 + ], + "type": "text", + "content": " tokens, which aims to expand the token distribution space to effectively present full-token feature distribution." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 416, + 287, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 416, + 287, + 499 + ], + "spans": [ + { + "bbox": [ + 46, + 416, + 287, + 499 + ], + "type": "text", + "content": "(3) Token merging. To further avoid information loss during the training process, we consider merging the unselected tokens into the selected ones in the token expansion process, which retains effective information of the unselected tokens in the merged token set " + }, + { + "bbox": [ + 46, + 416, + 287, + 499 + ], + "type": "inline_equation", + "content": "S_{l}^{\\prime}" + }, + { + "bbox": [ + 46, + 416, + 287, + 499 + ], + "type": "text", + "content": ". Inspired by ToMe [11], we merge averagely the tokens that the feature distributions are close as one new token, which is further introduced in Sec. 3.3.3." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 500, + 287, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 500, + 287, + 548 + ], + "spans": [ + { + "bbox": [ + 46, + 500, + 287, + 548 + ], + "type": "text", + "content": "During training, ToE performs steps (1), (2), and (3) on the original full-token set for each training iteration, which reduces the number of tokens involved in training while retaining the effective information from the full-token set." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 555, + 150, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 555, + 150, + 567 + ], + "spans": [ + { + "bbox": [ + 47, + 555, + 150, + 567 + ], + "type": "text", + "content": "3.3. Token Expansion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 574, + 287, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 574, + 287, + 622 + ], + "spans": [ + { + "bbox": [ + 46, + 574, + 287, + 622 + ], + "type": "text", + "content": "In this Section, we introduce the proposed ToE method, including spatial-distribution token initialization, widest feature-distribution token expansion, feature-distribution token merging, and its optimization." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 636, + 249, + 648 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 636, + 249, + 648 + ], + "spans": [ + { + "bbox": [ + 47, + 636, + 249, + 648 + ], + "type": "text", + "content": "3.3.1 Spatial-distribution Token Initialization" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 654, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 287, + 689 + ], + "type": "text", + "content": "For the initialization, we apply a simple strategy to select the initial token set from " + }, + { + "bbox": [ + 46, + 654, + 287, + 689 + ], + "type": "inline_equation", + "content": "S_{l}" + }, + { + "bbox": [ + 46, + 654, + 287, + 689 + ], + "type": "text", + "content": ". We define the index of the initial token set as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 91, + 695, + 287, + 717 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 695, + 287, + 717 + ], + "spans": [ + { + "bbox": [ + 91, + 695, + 287, + 717 + ], + "type": "interline_equation", + "content": "\\mathcal {I} ^ {(I)} = \\{i | \\forall i \\bmod \\left\\lfloor \\frac {1}{r _ {0}} \\right\\rfloor = 1 \\wedge \\forall i \\in \\mathcal {I} \\}. \\tag {2}", + "image_path": "0f00fa081e78694857bb75af61a2825595b62e5e222d1d48b9eb2af395d6178d.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "text", + "content": "The selected token set and the unselected tokens set can be expressed as " + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "inline_equation", + "content": "\\mathbb{A} = \\{t_{l,i}|\\forall i\\in \\mathcal{I}^{(I)}\\}" + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "inline_equation", + "content": "\\mathbb{B} = \\mathcal{S}_l - \\mathbb{A}" + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "text", + "content": ", respectively. This initialization selection strategy is based on spatial distribution. It indicates that we choose one token out of every " + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "inline_equation", + "content": "\\left\\lfloor \\frac{1}{r_0}\\right\\rfloor" + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "text", + "content": " tokens from the original token set and add it to the initial token set. Our strategy is simple, yet effective, to ensure that the initially selected tokens provide broad spatial coverage across the image patches." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 183, + 533, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 183, + 533, + 194 + ], + "spans": [ + { + "bbox": [ + 305, + 183, + 533, + 194 + ], + "type": "text", + "content": "3.3.2 Widest Feature-distribution Token Expansion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 201, + 546, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 201, + 546, + 417 + ], + "spans": [ + { + "bbox": [ + 304, + 201, + 546, + 417 + ], + "type": "text", + "content": "Previous works [11, 18] show that the intermediate feature space in modern Transformers is overparameterized, such that they prune the full-token Transformers to be sparse ones. Actually, through the above token initialization, we obtain the sparse Transformers. However, the performance drops significantly if we only train on these selected tokens. Thus, we consider to grow the number of tokens, which is expected to preserve the integrity of the original intermediate feature space and avoid the loss of tokens containing valuable information. Inspired by this, we seek to maintain the integrity of the intermediate feature distribution. Intuitively, when the feature distributions of two token sets are sufficiently close, they have similar information that can be used to effectively represent each other. In contrast, given one token whose feature distribution deviates significantly from all other tokens in the token set, it will be difficult to be adequately represented by other tokens, such that we expect to select this token to underscore its importance in the token expansion." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "spans": [ + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "text", + "content": "To this end, we propose the widest feature-distribution token expansion strategy. Specifically, we perform the expanding operation on the selected tokens from the initialized set. For the " + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "text", + "content": "-th stage of token expansion, we consider the selected token set " + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "inline_equation", + "content": "\\mathbb{A} \\in \\mathbb{R}^{|A| \\times d}" + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "text", + "content": " and the unselected token set " + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "inline_equation", + "content": "\\mathbb{B} \\in \\mathbb{R}^{|B| \\times d}" + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "text", + "content": " as the 2D matrices, where " + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "inline_equation", + "content": "|\\cdot|" + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "text", + "content": " respectively denote the number of tokens and feature dimension, and " + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "inline_equation", + "content": "|\\mathbb{A}| + |\\mathbb{B}| = N_l" + }, + { + "bbox": [ + 304, + 418, + 546, + 536 + ], + "type": "text", + "content": ". We utilize Cosine Distance as the metric to measure the distance between feature distribution of tokens in these two sets (other metrics see Tab. 9):" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 340, + 544, + 545, + 569 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 544, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 340, + 544, + 545, + 569 + ], + "type": "interline_equation", + "content": "\\mathcal {D} (\\mathbb {B}, \\mathbb {A}) = \\mathbf {1} - \\cos \\langle \\mathbb {B}, \\mathbb {A} \\rangle = \\mathbf {1} - \\frac {\\mathbb {B} \\mathbb {A} ^ {\\mathrm {T}}}{\\| \\mathbb {B} \\| \\cdot \\| \\mathbb {A} \\|}, \\tag {3}", + "image_path": "1c7d40c4340d60bbb49b38521a51defb80f142b70d7e591bced92f6f7526e74c.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 576, + 544, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 576, + 544, + 601 + ], + "spans": [ + { + "bbox": [ + 304, + 576, + 544, + 601 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 576, + 544, + 601 + ], + "type": "inline_equation", + "content": "\\mathbf{1}" + }, + { + "bbox": [ + 304, + 576, + 544, + 601 + ], + "type": "text", + "content": " is an all-one matrix. " + }, + { + "bbox": [ + 304, + 576, + 544, + 601 + ], + "type": "inline_equation", + "content": "\\mathcal{D}(\\mathbb{B},\\mathbb{A})\\in \\mathbb{R}^{|\\mathbb{B}|\\times |\\mathbb{A}|}" + }, + { + "bbox": [ + 304, + 576, + 544, + 601 + ], + "type": "text", + "content": " represents the pairwise distances between tokens in " + }, + { + "bbox": [ + 304, + 576, + 544, + 601 + ], + "type": "inline_equation", + "content": "\\mathbb{B}" + }, + { + "bbox": [ + 304, + 576, + 544, + 601 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 576, + 544, + 601 + ], + "type": "inline_equation", + "content": "\\mathbb{A}" + }, + { + "bbox": [ + 304, + 576, + 544, + 601 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 601, + 545, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 601, + 545, + 637 + ], + "spans": [ + { + "bbox": [ + 304, + 601, + 545, + 637 + ], + "type": "text", + "content": "We further define the distance between the feature distribution of tokens in " + }, + { + "bbox": [ + 304, + 601, + 545, + 637 + ], + "type": "inline_equation", + "content": "\\mathbb{B}" + }, + { + "bbox": [ + 304, + 601, + 545, + 637 + ], + "type": "text", + "content": " and its closest token in " + }, + { + "bbox": [ + 304, + 601, + 545, + 637 + ], + "type": "inline_equation", + "content": "\\mathbb{A}" + }, + { + "bbox": [ + 304, + 601, + 545, + 637 + ], + "type": "text", + "content": " as distance " + }, + { + "bbox": [ + 304, + 601, + 545, + 637 + ], + "type": "inline_equation", + "content": "(\\mathbb{B} \\to \\mathbb{A}) \\in \\mathbb{R}^{|\\mathbb{B}|}" + }, + { + "bbox": [ + 304, + 601, + 545, + 637 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 349, + 645, + 545, + 658 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 349, + 645, + 545, + 658 + ], + "spans": [ + { + "bbox": [ + 349, + 645, + 545, + 658 + ], + "type": "interline_equation", + "content": "\\operatorname {d i s t a n c e} (\\mathbb {B} \\rightarrow \\mathbb {A}) _ {i} = \\min _ {j} \\left(\\mathcal {D} (\\mathbb {B}, \\mathbb {A}) _ {i, j}\\right), \\tag {4}", + "image_path": "3cfeb5a0076cd6be43abad4cf879dc37b73b37afc9b17b0e8ccb96a307bfcd18.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "inline_equation", + "content": "i\\in \\{1,\\dots ,|B|\\}" + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "inline_equation", + "content": "j\\in \\{1,\\dots ,|A|\\}" + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": ". Eq. 4 indicates that we sample the minimal values of the feature-distribution distance matrix " + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{D}(\\mathbb{B},\\mathbb{A})" + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": " along the second dimension. Thus, distance " + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "inline_equation", + "content": "(\\mathbb{B}\\to \\mathbb{A})_i" + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": " measures importance of" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15786" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 102, + 70, + 326, + 192 + ], + "blocks": [ + { + "bbox": [ + 102, + 70, + 326, + 192 + ], + "lines": [ + { + "bbox": [ + 102, + 70, + 326, + 192 + ], + "spans": [ + { + "bbox": [ + 102, + 70, + 326, + 192 + ], + "type": "image", + "image_path": "5c8a1f33feeb9bdc18742850a7af1f0c08f43d23d6ab144b70f1a527aa45ad2f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 194, + 547, + 225 + ], + "lines": [ + { + "bbox": [ + 46, + 194, + 547, + 225 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 547, + 225 + ], + "type": "text", + "content": "Figure 2. Visualization for the feature distribution of token set. We use T-SNE [44] to visualize the output token feature distributions at the first block, the tokens selected by ToE, and the output tokens after the second block. Baselines are DeiT-small trained on ImageNet-1K. ToE preserves the distribution integrity of intermediate features of the original token set across different Transformer blocks while ensuring that feature distributions are as wide as possible." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 329, + 70, + 492, + 192 + ], + "blocks": [ + { + "bbox": [ + 329, + 70, + 492, + 192 + ], + "lines": [ + { + "bbox": [ + 329, + 70, + 492, + 192 + ], + "spans": [ + { + "bbox": [ + 329, + 70, + 492, + 192 + ], + "type": "image", + "image_path": "3f2550f8eecf5402dda4f6f3ff26dc3859e9699e4ca2e741516b9454fdc5eaae.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 232, + 288, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 232, + 288, + 256 + ], + "spans": [ + { + "bbox": [ + 47, + 232, + 288, + 256 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 232, + 288, + 256 + ], + "type": "text", + "content": "-th token in " + }, + { + "bbox": [ + 47, + 232, + 288, + 256 + ], + "type": "inline_equation", + "content": "\\mathbb{B}" + }, + { + "bbox": [ + 47, + 232, + 288, + 256 + ], + "type": "text", + "content": ". At this point, we progressively add the most important token to " + }, + { + "bbox": [ + 47, + 232, + 288, + 256 + ], + "type": "inline_equation", + "content": "\\mathbb{A}" + }, + { + "bbox": [ + 47, + 232, + 288, + 256 + ], + "type": "text", + "content": ", which is formulated as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 261, + 220, + 271 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 261, + 220, + 271 + ], + "spans": [ + { + "bbox": [ + 113, + 261, + 220, + 271 + ], + "type": "interline_equation", + "content": "\\mathbb {A} = \\mathbb {A} + t ^ {*}, \\quad \\mathbb {B} = \\mathbb {B} - t ^ {*},", + "image_path": "e311aeb8f1d5a1d78ec65e0233ba285a0931262ea4b4c1512dd8ac3de5e832e2.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 270, + 287, + 283 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 270, + 287, + 283 + ], + "spans": [ + { + "bbox": [ + 85, + 270, + 287, + 283 + ], + "type": "interline_equation", + "content": "t ^ {*} = \\left\\{\\mathbb {B} _ {i} | i = \\operatorname {a r g m a x} (d i s t a n c e (\\mathbb {B} \\rightarrow \\mathbb {A})) \\right\\}, \\tag {5}", + "image_path": "326f2fa66d17762f960dfd6a6b873566907e926fbd8b05288fec0e3d88abbba1.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "spans": [ + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "inline_equation", + "content": "t^*" + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "text", + "content": " is the most important token in " + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "inline_equation", + "content": "\\mathbb{B}" + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "text", + "content": ". When the feature distribution of one token is far from its closest token, it can be said that the feature distribution of this token deviates significantly from that of all other tokens in the token set. The operation described in Eq. 5 is performed for " + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "inline_equation", + "content": "\\lfloor \\mu_{\\delta}N_l \\rfloor" + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "text", + "content": " times to select " + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "inline_equation", + "content": "\\lfloor \\mu_{\\delta}N_l \\rfloor" + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "text", + "content": " tokens from " + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "inline_equation", + "content": "\\mathbb{B}" + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "inline_equation", + "content": "\\mathbb{A}" + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "text", + "content": ". The widest feature-distribution token expansion strategy ensures that the feature distributions of the selected token set become as wide as possible, preventing the loss of important tokens. However, as we need to iterate " + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "inline_equation", + "content": "\\lfloor \\mu_{\\delta}N_l \\rfloor" + }, + { + "bbox": [ + 46, + 288, + 289, + 443 + ], + "type": "text", + "content": " times expansion, it results in a considerable consumption of computational resources. Considering the computation parallelization, we modify the expanding operation in Eq. 5 parallelly:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 448, + 222, + 458 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 448, + 222, + 458 + ], + "spans": [ + { + "bbox": [ + 111, + 448, + 222, + 458 + ], + "type": "interline_equation", + "content": "\\mathbb {A} = \\mathbb {A} + \\mathcal {S} ^ {*}, \\quad \\mathbb {B} = \\mathbb {B} - \\mathcal {S} ^ {*},", + "image_path": "34e1252b2e4c76e6d9576533ca3bc30e0c0bc8d7612905ad0dea7880d398a4c1.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 74, + 456, + 287, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 456, + 287, + 472 + ], + "spans": [ + { + "bbox": [ + 74, + 456, + 287, + 472 + ], + "type": "interline_equation", + "content": "\\mathcal {S} ^ {*} = \\left\\{\\mathbb {B} _ {i} | i \\in \\operatorname {t o p k} _ {\\lfloor \\mu_ {\\delta} \\mathrm {N} _ {1 / \\mathrm {k}} \\rfloor} (d i s t a n c e (\\mathbb {B} \\rightarrow \\mathbb {A})) \\right\\}, \\tag {6}", + "image_path": "35ea0e5a38fc8156372096a8ac2439457a2aee5c4250bbc3c0dbde58167b8bc0.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "spans": [ + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "text", + "content": " is the pre-defined repetition step of parallel expanding operation, " + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "inline_equation", + "content": "S^*" + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "text", + "content": " is a token set consisting of the important tokens in " + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "inline_equation", + "content": "\\mathbb{B}" + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "inline_equation", + "content": "\\mathrm{topk}_n" + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "text", + "content": " denotes the top argmax with the number of " + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "text", + "content": " tokens. By this way, we only perform " + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "text", + "content": " times parallel expanding operation to expand " + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "inline_equation", + "content": "\\lfloor \\mu_{\\delta} N_l \\rfloor" + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "text", + "content": " tokens, and its computational consumption is negligible with small " + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 475, + 288, + 548 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 561, + 234, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 561, + 234, + 574 + ], + "spans": [ + { + "bbox": [ + 47, + 561, + 234, + 574 + ], + "type": "text", + "content": "3.3.3 Feature-distribution Token Merging" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 579, + 288, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 579, + 288, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 579, + 288, + 639 + ], + "type": "text", + "content": "After token expansion, we aim to retain the effective information of the unselected tokens, such that we merge the unselected tokens that the feature distributions are close to the selected ones. The feature-distribution token merging can be formulated as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 61, + 642, + 263, + 655 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 642, + 263, + 655 + ], + "spans": [ + { + "bbox": [ + 61, + 642, + 263, + 655 + ], + "type": "interline_equation", + "content": "\\mathcal {S} _ {l} ^ {\\prime} = \\left\\{\\operatorname {m e a n} \\left(\\mathbb {A} _ {j}, \\mathcal {S} _ {j} ^ {(M)}\\right) | \\forall j \\in \\{1, 2, \\dots , | \\mathbb {A} | \\} \\right\\}, \\text {w h e r e}", + "image_path": "48411ecf9fb9c4502d33edfce3065920a26255a7c11ed8bd0736b1dd68d6e825.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 72, + 654, + 287, + 668 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 654, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 72, + 654, + 287, + 668 + ], + "type": "interline_equation", + "content": "\\mathcal {S} _ {i} ^ {\\left(\\hat {M}\\right)} = \\left\\{\\mathbb {B} _ {i} \\mid \\mathcal {I} _ {i} ^ {\\left(\\hat {M}\\right)} = = j, \\forall i \\in \\{1, 2, \\dots , | \\mathbb {B} | \\} \\right\\}, \\tag {7}", + "image_path": "41b0e59a216fe9467ba46c8102ea67afd2a9257704ccb433695d7757f57ee560.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 667, + 219, + 681 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 667, + 219, + 681 + ], + "spans": [ + { + "bbox": [ + 107, + 667, + 219, + 681 + ], + "type": "interline_equation", + "content": "\\mathcal {I} ^ {(M)} = \\operatorname {a r g m i n} _ {j} (\\mathcal {D} (\\mathbb {B}, \\mathbb {A}) _ {i, j}),", + "image_path": "6e0e691cc8b338fa4cb0e8603f6c31f634e9cc791ad001a35f180fef15914075.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 685, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 685, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 685, + 288, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 685, + 288, + 715 + ], + "type": "inline_equation", + "content": "S_{l}^{\\prime}\\in \\mathbb{R}^{|\\mathbb{A}|\\times d}" + }, + { + "bbox": [ + 46, + 685, + 288, + 715 + ], + "type": "text", + "content": " is the token set merging the closest tokens from " + }, + { + "bbox": [ + 46, + 685, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbb{B}" + }, + { + "bbox": [ + 46, + 685, + 288, + 715 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 685, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbb{A}" + }, + { + "bbox": [ + 46, + 685, + 288, + 715 + ], + "type": "text", + "content": ", and mean " + }, + { + "bbox": [ + 46, + 685, + 288, + 715 + ], + "type": "inline_equation", + "content": "(\\mathbb{A}_j,S_j^{(M)})" + }, + { + "bbox": [ + 46, + 685, + 288, + 715 + ], + "type": "text", + "content": " indicate that we" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 232, + 546, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 232, + 546, + 269 + ], + "spans": [ + { + "bbox": [ + 305, + 232, + 546, + 269 + ], + "type": "text", + "content": "merge " + }, + { + "bbox": [ + 305, + 232, + 546, + 269 + ], + "type": "inline_equation", + "content": "\\mathbb{B}" + }, + { + "bbox": [ + 305, + 232, + 546, + 269 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 305, + 232, + 546, + 269 + ], + "type": "inline_equation", + "content": "\\mathbb{A}" + }, + { + "bbox": [ + 305, + 232, + 546, + 269 + ], + "type": "text", + "content": " averagely based on the index set " + }, + { + "bbox": [ + 305, + 232, + 546, + 269 + ], + "type": "inline_equation", + "content": "\\mathcal{I}^{(M)}\\in" + }, + { + "bbox": [ + 305, + 232, + 546, + 269 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{\\left|\\mathbb{B}\\right|}" + }, + { + "bbox": [ + 305, + 232, + 546, + 269 + ], + "type": "text", + "content": ". Note that every " + }, + { + "bbox": [ + 305, + 232, + 546, + 269 + ], + "type": "inline_equation", + "content": "\\mathbb{B}_i" + }, + { + "bbox": [ + 305, + 232, + 546, + 269 + ], + "type": "text", + "content": " participates in the merging to avoid the information dropping for the unselected tokens." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 285, + 427, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 285, + 427, + 297 + ], + "spans": [ + { + "bbox": [ + 305, + 285, + 427, + 297 + ], + "type": "text", + "content": "3.3.4 Optimization of ToE" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 304, + 547, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 304, + 547, + 363 + ], + "spans": [ + { + "bbox": [ + 304, + 304, + 547, + 363 + ], + "type": "text", + "content": "Our objective loss is the same as the original models, e.g., cross-entropy loss in DeiT. The training details of ToE are presented in Algorithm 1. Note that we only apply ToE to the output tokens of the first transformer block. The detailed analysis is discussed in Sec. 4.4." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 364, + 547, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 364, + 547, + 591 + ], + "spans": [ + { + "bbox": [ + 304, + 364, + 547, + 591 + ], + "type": "text", + "content": "ToE is a plug-and-play acceleration module, which has three following advantages: (1) As shown in Fig. 2, we observed that the selected token set obtained by ToE in the multiple block outputs has a larger average distribution distance via T-SNE [44], compared to that in the original full-token set (see First block vs. After ToE). Moreover, it maintains a feature distribution similar to the original token set. It indicates ToE can preserve the integrity of the intermediate feature distribution of the original token set across different Transformer blocks by reducing the number of tokens. (2) ToE is a parameter-free module, it does not introduce any trainable parameters and utilizes efficient matrix calculations that the computational overhead is negligible, compared to computation-intensive self-attention. (3) The speedup factors (e.g., token kept rate " + }, + { + "bbox": [ + 304, + 364, + 547, + 591 + ], + "type": "inline_equation", + "content": "r_1" + }, + { + "bbox": [ + 304, + 364, + 547, + 591 + ], + "type": "text", + "content": " and training stage " + }, + { + "bbox": [ + 304, + 364, + 547, + 591 + ], + "type": "inline_equation", + "content": "N_g" + }, + { + "bbox": [ + 304, + 364, + 547, + 591 + ], + "type": "text", + "content": ") of ToE are independent of the original model's training hyper-parameters. This decoupling allows ToE to be seamlessly integrated into the training process of the original model, obviating the need for any adjustments to the training hyper-parameters." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 603, + 388, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 603, + 388, + 616 + ], + "spans": [ + { + "bbox": [ + 306, + 603, + 388, + 616 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 623, + 432, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 623, + 432, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 623, + 432, + 635 + ], + "type": "text", + "content": "4.1. Experimental Settings" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "type": "text", + "content": "Datasets and baselines. We evaluate our method on ImageNet-1K [45] and CIFAR-10/100 [46]. For baselines, we use two popular ViTs, i.e., DeiT [4] and LV-ViT [5], as the base models to evaluate the proposed ToE on ImageNet-1K. To further evaluate the universality, we integrate ToE into the efficient training framework EfficientTrain [30]. Moreover," + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15787" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 50, + 88, + 287, + 427 + ], + "blocks": [ + { + "bbox": [ + 53, + 75, + 205, + 87 + ], + "lines": [ + { + "bbox": [ + 53, + 75, + 205, + 87 + ], + "spans": [ + { + "bbox": [ + 53, + 75, + 205, + 87 + ], + "type": "text", + "content": "Algorithm 1: Optimization with ToE" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "lines": [ + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "spans": [ + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": "Input: Input dataset " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " output token number " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "N_{l}" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " total training stage " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "N_{g}" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " kept rate of the first training stage " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "r_1" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " repetition step of the parallel expanding operation " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " Transformer parameters " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " maximum iterations " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " Output: Updated Transformer parameters " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "t\\gets 1" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " do Sample from " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " to obtain data sample " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " ,feed-forwarded through the embedding and first " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " -th transformer blocks to obtain the output token set " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "S_{l}" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " . \n3 " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\% \\% \\%" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " Spatial-distribution Token Initialization%% \n4 " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "r_0\\gets \\frac{1}{2} r_1" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " . \n5 Initialize A and B by " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "r_0,S_l" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " via Eq. 2; \n6 " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\% \\% \\%" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " Widest Feature-distribution Token Expansion%% \n7Obtain the current training stage " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\delta = \\lceil N_g*t / T\\rceil" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " . \n8 for " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "m\\gets 1" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " do if " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "m = 1" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\mu_{m}\\leftarrow r_{1} - r_{0};" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " else " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\mu_{m}\\leftarrow \\frac{1 - r_{1}}{N_{g} - 1}" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " end for " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "n\\gets 1" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " to k do Update A and B by " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\mu_{m},N_{l},k," + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " prior A and prior B via Eq. 6; \nend \nend \n" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\% \\% \\%" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " Feature-distribution Token Merging%% \n9 Obtain " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "S_l^{\\prime}" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " by A and B via Eq. 7; \n" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "S_l^{\\prime}" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " feed-forwardsed through the " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "l + 1" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " -th transformer block to final layer and progressively obtain the final prediction y; \n" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\% \\% \\%" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " Parameter Updating%% \n2 Use " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " to compute the loss and obtain the gradient " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\nabla \\theta" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " . \n23 Use " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\nabla \\theta" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " to update prior " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " via the optimizer to obtain new " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "text", + "content": " . \nend \nreturn " + }, + { + "bbox": [ + 50, + 88, + 287, + 427 + ], + "type": "inline_equation", + "content": "\\theta" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "algorithm" + }, + { + "bbox": [ + 46, + 441, + 287, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 441, + 287, + 477 + ], + "spans": [ + { + "bbox": [ + 46, + 441, + 287, + 477 + ], + "type": "text", + "content": "we evaluate the transfer learning ability using pre-trained weights of ToE on DeiT and the performance of accelerating the fine-tuning process with ToE on CIFAR-10/100." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 478, + 288, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 478, + 288, + 622 + ], + "spans": [ + { + "bbox": [ + 46, + 478, + 288, + 622 + ], + "type": "text", + "content": "Evaluation metrics. We report Top-1 accuracy, the GPU training time and FLOPs as the evaluation metric. To evaluate the training speed, we report the total GPU hours consumed during the entire training process, as well as the theoretical FLOPs for one forward-backward process. To avoid the impact of memory access and kernel launching on training time [12], we report the GPU hours on different numbers of GPUs, but with the same GPU numbers to evaluate different training methods. The FLOPs for the forward process are measured using thop1, and for the backward process, we follow [43] and calculate it as twice the FLOPs of the forward process." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 623, + 289, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 623, + 289, + 672 + ], + "spans": [ + { + "bbox": [ + 46, + 623, + 289, + 672 + ], + "type": "text", + "content": "Implementations. All methods are trained by Pytorch [47]. For DeiT and LV-ViT, all experiments are conducted on four NVIDIA RTX A6000 GPUs" + }, + { + "bbox": [ + 46, + 623, + 289, + 672 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 46, + 623, + 289, + 672 + ], + "type": "text", + "content": ", while EfficientTrain is trained on eight NVIDIA RTX A6000 GPUs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 72, + 545, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 545, + 108 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 545, + 108 + ], + "type": "text", + "content": "All hyper-parameters (e.g., learning rate, decay strategy and rate), and training strategies and optimization processes are the same as the original papers unless otherwise specified." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "text", + "content": "Growth strategy. In default, we divide the origin training process into " + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "inline_equation", + "content": "N_{g} = 3" + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "text", + "content": " stages on average. The token kept rate of 1st stage " + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "inline_equation", + "content": "r_1" + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "text", + "content": " is set to 0.4, 0.5 or 0.6, our method is corresponding to be denoted as ToE " + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "inline_equation", + "content": "r_1 = 0.4" + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "text", + "content": ", ToE " + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "inline_equation", + "content": "r_1 = 0.5" + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "text", + "content": " or ToE " + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "inline_equation", + "content": "r_1 = 0.6" + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "text", + "content": ". Correspondingly, the kept rate of the initial stage " + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "inline_equation", + "content": "r_0" + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "text", + "content": " is set to 0.2, 0.25 and 0.3. The repetition step of parallel expanding operation " + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 109, + 547, + 205 + ], + "type": "text", + "content": " is default set to 2, and we perform ToE on the output tokens of the first block for all models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 214, + 441, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 214, + 441, + 227 + ], + "spans": [ + { + "bbox": [ + 305, + 214, + 441, + 227 + ], + "type": "text", + "content": "4.2. Results on ImageNet-1k" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "spans": [ + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": "DeiT and LV-ViT As shown in Tab. 2, ToE achieves lossless training acceleration with SOTA performance. For example, " + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "inline_equation", + "content": "\\mathrm{ToE}_{r_1 = 0.5}" + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": " achieves " + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "inline_equation", + "content": "0.4\\%" + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": " Top-1 accuracy improvement with " + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "inline_equation", + "content": "1.27\\times" + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": " theoretical and " + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "inline_equation", + "content": "1.24\\times" + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": " practical faster speed to train DeiT-tiny. For DeiT-small, it achieves " + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "inline_equation", + "content": "1.3\\times" + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": " training acceleration without accuracy drop. Compared to the SOTA methods, " + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "inline_equation", + "content": "\\mathrm{ToE}_{r_1 = 0.5}" + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": " outperforms SViTE [10] and NetworkExpansion [12] at least " + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": " Top-1 accuracy at the consistent acceleration ratio for training both DeiT-tiny and DeiT-small. Compared to ToMe [11], " + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "inline_equation", + "content": "\\mathrm{ToE}_{r_1 = 0.5}" + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": " also achieves both higher accuracy and practical training speed. Note that ToMe is able to reduce GFLOPs, but fails to accelerate training due to the usage of unfriendly weighted average attention and layer-wise merging operations. For DeiT-base, " + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "inline_equation", + "content": "\\mathrm{ToE}_{r_1 = 0.5}" + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": " drops only " + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "inline_equation", + "content": "0.2\\%" + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": " Top-1 accuracy while saving more than 60 GPU hours in the practical training process, which is comparable to NetworkExpansion with EMA. If we relax the restriction of hyper-parameter consistency (presented in Appendix), " + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "inline_equation", + "content": "\\mathrm{ToE}_{r_1 = 0.4}^{\\text{Hyper}}" + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": " outperforms NetworkExpansion with " + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "inline_equation", + "content": "0.2\\%" + }, + { + "bbox": [ + 304, + 233, + 547, + 473 + ], + "type": "text", + "content": " accuracy and 24h training time reduction." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "type": "text", + "content": "For LV-ViT-T and LV-ViT-S shown in Tab. 3, " + }, + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "type": "inline_equation", + "content": "\\mathrm{ToE}_{r_1 = 0.4}" + }, + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "type": "text", + "content": " achieves efficient training with " + }, + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "type": "inline_equation", + "content": "1.2\\times" + }, + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "type": "text", + "content": " acceleration rate, while without accuracy drop or even with accuracy improvement for training LV-ViT-T, compared to baselines. Note that the results of " + }, + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "type": "inline_equation", + "content": "\\mathrm{ToE}_{r_1 = 0.4}" + }, + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "type": "text", + "content": " and NetworkExpansion are reported with EMA, due to the default LV-ViT training with EMA. In addition, " + }, + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "type": "inline_equation", + "content": "\\mathrm{ToE}_{r_1 = 0.4}" + }, + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "type": "text", + "content": " outperforms NetworkExpansion in both training acceleration and accuracy with 0.5h training time reduction and " + }, + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "type": "inline_equation", + "content": "0.6\\%" + }, + { + "bbox": [ + 304, + 473, + 545, + 581 + ], + "type": "text", + "content": " accuracy for LV-ViT-T, respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "text", + "content": "We also present the validation Top-1 accuracy of ToE and NetworkExpansion during training DeiT-tiny and LV-ViT in Fig. 3. As observed, ToE initially reduces token redundancy during training, resulting in some performance drops compared to the baseline. However, in the later stages of training, ToE introduces more tokens for training, gradually reducing the accuracy gap to the baseline. Benefiting from the reduction of token redundancy in the early stages, models trained by ToE with the proposed token expansion and merging achieve higher accuracies, compared to baselines. Compared to NetworkExpansion, our ToE is more stable to" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 683, + 263, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 683, + 263, + 693 + ], + "spans": [ + { + "bbox": [ + 47, + 683, + 263, + 693 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 47, + 683, + 263, + 693 + ], + "type": "text", + "content": "https://github.com/Lyken17/pytorch-OpCounter/blob/master/thop" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 693, + 287, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 693, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 693, + 287, + 712 + ], + "type": "text", + "content": "2Note that the used number of GPUs for training may be different to the evaluation of training speedup for a fair comparison." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15788" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 73, + 102, + 520, + 248 + ], + "blocks": [ + { + "bbox": [ + 46, + 72, + 547, + 102 + ], + "lines": [ + { + "bbox": [ + 46, + 72, + 547, + 102 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 547, + 102 + ], + "type": "text", + "content": "Table 2. Performance comparison for DeiT on ImageNet-1K. a/b in the column of Top-1 Acc. means without/with EMA strategy using the official GitHub repo†. The training time is averagely measured on one/two/four NVIDIA RTX A6000 GPUs for DeiT-tiny/small/base 3 times, and the batch size is set to 1, 024 in all following tables and figures." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 73, + 102, + 520, + 248 + ], + "lines": [ + { + "bbox": [ + 73, + 102, + 520, + 248 + ], + "spans": [ + { + "bbox": [ + 73, + 102, + 520, + 248 + ], + "type": "table", + "html": "
ModelMethodConsistencyTop-1 Acc. (%)GFLOPs (per training iter)Training time (total GPU hours)Acceleration (practical rate)
Hyper?Architecture?Strategy?
DeiT-tinyBaseline [4]---72.23.3 × 10354.6h1.00×
(NeurIPS'21) S2ViTE-Tiny (600 epoch) [10]××70.1 (-2.1)2.5 × 103(1.32×)-1.19×
(ICLR'23) ToMeDeTrs→[11]×71.7 (-0.5)2.5 × 103(1.32×)53.3h1.02×
(CVPR'23) NetworkExpansion6→12 [12]×70.3 (-1.9) / 70.1 (-2.1)2.5 × 103(1.32×)43.2h1.26×
ToE r1=0.5 (Ours)72.6 (+0.4)2.6 × 103(1.27×)44.2h1.24×
DeiT-smallBaseline [4]---79.81.3 × 104124.5h1.00×
(ICLR'23) ToMeDeTrs→[11]×79.7 (-0.1)9.8 × 103(1.33×)121.5h1.02×
(CVPR'23) NetworkExpansion6→12 [12]×78.8 (-1.0) / 78.6 (-1.2)9.8 × 103(1.33×)100.3h1.24×
ToE r1=0.5 (Ours)79.8 (+0.0)1.0 × 104(1.30×)102.2h1.22×
DeiT-baseBaseline [4]---81.85.2 × 104292.8h1.00×
(ICML'19) StackBERT [13]×80.8 (-1.0)4.2 × 104(1.24×)231.6h1.26×
(CVPR'23) NetworkExpansion6→12 [12]×81.0 (-0.8) / 81.5 (-0.3)3.9 × 104(1.33×)226.8h1.29×
ToE r1=0.5 (Ours)81.6 (-0.2)4.0 × 104(1.30×)231.2h1.27×
ToE r1=0.4 (Ours)81.4 (-0.4)3.8 × 104(1.37×)225.2h1.30×
ToE Hyper r1=0.5 (Ours)×81.8 (+0.0)3.6 × 104(1.44×)213.2h1.37×
ToE Hyper r1=0.4 (Ours)×81.7 (-0.1)3.3 × 104(1.58×)202.8h1.44×
", + "image_path": "2d39401233cbea7c48190a61fa1725657fb58ab43f9beb1271f4d2cd1cb6fe24.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 77, + 248, + 336, + 257 + ], + "lines": [ + { + "bbox": [ + 77, + 248, + 336, + 257 + ], + "spans": [ + { + "bbox": [ + 77, + 248, + 336, + 257 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 77, + 248, + 336, + 257 + ], + "type": "text", + "content": " https://github.com/huawei-noah/Efficient-Computing/tree/master/TrainingAcceleration/NetworkExpansion" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 49, + 301, + 284, + 378 + ], + "blocks": [ + { + "bbox": [ + 46, + 262, + 288, + 300 + ], + "lines": [ + { + "bbox": [ + 46, + 262, + 288, + 300 + ], + "spans": [ + { + "bbox": [ + 46, + 262, + 288, + 300 + ], + "type": "text", + "content": "Table 3. Performance comparison for LV-ViT on ImageNet-1K. " + }, + { + "bbox": [ + 46, + 262, + 288, + 300 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 46, + 262, + 288, + 300 + ], + "type": "text", + "content": " indicates that results reproduced by the official GitHub repo. The training time is averagely measured on two/four NVIDIA RTX A6000 GPUs 3 times for LV-ViT-T/S with a fixed batch size of 1, 024." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 301, + 284, + 378 + ], + "lines": [ + { + "bbox": [ + 49, + 301, + 284, + 378 + ], + "spans": [ + { + "bbox": [ + 49, + 301, + 284, + 378 + ], + "type": "table", + "html": "
ModelMethodTop-1 Acc. (%)GFLOPs (per training iter)Training time (total GPU hours)
LV-ViT-TBaseline [5]79.18.2 × 103130.5h
(CVPR'23) NetworkExpansion6→12 [12][78.8 (-0.3)]7.1 × 103(1.15×)114.4h (1.14×)
ToERr1=0.4 (Ours)79.4 (+0.3)7.0 × 103(1.17×)113.9h (1.15×)
LV-ViT-SBaseline [5]83.31.9 × 104237.3h
(CVPR'23) NetworkExpansion8→16 [12][82.9 (-0.4)]1.5 × 104(1.27×)195.5h (1.21×)
ToERr1=0.4 (Ours)83.3 (+0.0)1.4 × 104(1.36×)195.3h (1.22×)
LV-ViT-MBaseline [5]84.13.7 × 104368.7h
(CVPR'23) NetworkExpansion10→20 [12]84.0 (-0.1)2.9 × 104(1.28×)292.7h (1.26×)
ToERr1=0.4 (Ours)84.1 (+0.0)2.7 × 104(1.37×)292.5h (1.26×)
", + "image_path": "ea951d67bfce9e4cc46a8224f907852fe43ae31bb8941faab564d6557113be52.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 49, + 407, + 284, + 466 + ], + "blocks": [ + { + "bbox": [ + 47, + 385, + 287, + 407 + ], + "lines": [ + { + "bbox": [ + 47, + 385, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 47, + 385, + 287, + 407 + ], + "type": "text", + "content": "Table 4. Performance comparison between EfficientTrain [30] and our combination framework on ImageNet-1K." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 407, + 284, + 466 + ], + "lines": [ + { + "bbox": [ + 49, + 407, + 284, + 466 + ], + "spans": [ + { + "bbox": [ + 49, + 407, + 284, + 466 + ], + "type": "table", + "html": "
ModelMethodTop-1 Acc. (%)GFLOPs (per training iter)Training time (total GPU hours)
DeiT-tinyBaseline (EfficientTrain) [30]72.51.3 × 10452.5h
(ICCV'23) EfficientTrain [30]73.3 (+0.8)8.8 × 103(1.48×)36.5h (1.44×)
EfficientTrain + ToE r1=0.6 (Ours)73.5 (+1.0)7.6 × 103(1.71×)32.3h (1.63×)
DeiT-smallBaseline (EfficientTrain) [30]80.35.2 × 104121.3h
(ICCV'23) EfficientTrain [30]80.4 (+0.1)3.4 × 103(1.53×)85.2h (1.42×)
EfficientTrain + ToE r1=0.6 (Ours)80.4 (+0.1)2.9 × 104(1.79×)79.4h (1.53×)
", + "image_path": "afceb7dc322b2645dff5d32c9f5eab1f28bb878b4890ad11e3ea2c4782b89766.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 474, + 287, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 287, + 545 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 287, + 545 + ], + "type": "text", + "content": "train with consistent accuracy improvement during training, while the accuracy of NetworkExpansion with EMA drops significantly at the intermediate epoch number and then restores due to the inconsistent structures of before-and-after models when structure growing. More validation curves are presented in the Appendix." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 545, + 288, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 288, + 654 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 288, + 654 + ], + "type": "text", + "content": "Combination with EfficientTrain [30]. ToE can be seamlessly integrated into the EfficientTrain framework to further improve the performance. We do not modify the pipeline of EfficientTrain and simply apply ToE to the output tokens of the model's first block. The results are summarized in Tab. 4, which effectively evaluates the universality of ToE. The combination of EfficientTrain and ToE achieves higher training speeds to further enhance the training efficiency of EfficientTrain with accuracy improvement." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 659, + 233, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 659, + 233, + 671 + ], + "spans": [ + { + "bbox": [ + 47, + 659, + 233, + 671 + ], + "type": "text", + "content": "4.3. Transfer Results on CIFAR-10/100" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 677, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 289, + 713 + ], + "type": "text", + "content": "we further explore the transfer learning ability of ToE-pre-trained weights and evaluate whether ToE can be used to accelerate the fine-tuning on CIFAR-10/100. For the fine" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 322, + 267, + 424, + 369 + ], + "blocks": [ + { + "bbox": [ + 322, + 267, + 424, + 369 + ], + "lines": [ + { + "bbox": [ + 322, + 267, + 424, + 369 + ], + "spans": [ + { + "bbox": [ + 322, + 267, + 424, + 369 + ], + "type": "image", + "image_path": "e4a076fa3bb61e3910d0c7c855bab5fc0ea079737439b0dcfb9202bcf8c6a1fc.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 371, + 545, + 391 + ], + "lines": [ + { + "bbox": [ + 306, + 371, + 545, + 391 + ], + "spans": [ + { + "bbox": [ + 306, + 371, + 545, + 391 + ], + "type": "text", + "content": "Figure 3. Validation Top-1 accuracy of DeiT-tiny and LV-ViT-T on ImageNet-1k during training with different methods." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 424, + 267, + 523, + 368 + ], + "blocks": [ + { + "bbox": [ + 424, + 267, + 523, + 368 + ], + "lines": [ + { + "bbox": [ + 424, + 267, + 523, + 368 + ], + "spans": [ + { + "bbox": [ + 424, + 267, + 523, + 368 + ], + "type": "image", + "image_path": "355923237431d617b81e17a50299e4dadbc1c37b5bb34e2d63dc71c99d681041.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 308, + 413, + 543, + 490 + ], + "blocks": [ + { + "bbox": [ + 334, + 402, + 516, + 411 + ], + "lines": [ + { + "bbox": [ + 334, + 402, + 516, + 411 + ], + "spans": [ + { + "bbox": [ + 334, + 402, + 516, + 411 + ], + "type": "text", + "content": "Table 5. Results for fine-tuning DeiT on CIFAR-10/100." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 413, + 543, + 490 + ], + "lines": [ + { + "bbox": [ + 308, + 413, + 543, + 490 + ], + "spans": [ + { + "bbox": [ + 308, + 413, + 543, + 490 + ], + "type": "table", + "html": "
ModelPre-trainingFine-tuningTop-1 Acc. (%)
MethodAccelerationMethodAccelerationCIFAR-10CIFAR-100
DeiT-tinyBaseline [4]1.0xBaseline [4]1.0x98.0786.78
Baseline [4]1.0xToE\\( r_1=0.5 \\)1.3x98.10 (+0.03)86.74 (-0.04)
ToE\\( r_1=0.5 \\)1.3xBaseline [4]1.0x98.19 (+0.12)87.10 (+0.32)
ToE\\( r_1=0.5 \\)1.3xToE\\( r_1=0.5 \\)1.3x98.16 (+0.09)86.91 (+0.13)
DeiT-smallBaseline [4]1.0xBaseline [4]1.0x98.9390.15
Baseline [4]1.3xToE\\( r_1=0.5 \\)1.3x98.96 (+0.03)90.19 (+0.04)
ToE\\( r_1=0.5 \\)1.3xBaseline [4]1.0x99.03 (+0.10)90.37 (+0.22)
ToE\\( r_1=0.5 \\)1.3xToE\\( r_1=0.5 \\)1.3x98.99 (+0.06)90.26 (+0.11)
", + "image_path": "c63816583d803418e89e5ec0202d9b1029555ff25acfc4b52ccad086ea31f3fe.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 497, + 545, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 497, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 305, + 497, + 545, + 521 + ], + "type": "text", + "content": "tuning settings, we follow the settings of the official GitHub repo " + }, + { + "bbox": [ + 305, + 497, + 545, + 521 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 305, + 497, + 545, + 521 + ], + "type": "text", + "content": ". We introduce the training details in the Appendix." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 522, + 547, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 547, + 690 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 547, + 690 + ], + "type": "text", + "content": "As shown in Tab. 5, pre-training weights by ToE is able to improve the accuracy on CIFAR-10/100 for DeiT-tiny/small, when using the same baseline training for fine-tuning (see the 1st and 3rd rows in both DeiT-tiny and DeiT-small). For example, ToE pre-training outperforms baseline pretraining by " + }, + { + "bbox": [ + 304, + 522, + 547, + 690 + ], + "type": "inline_equation", + "content": "0.32\\%" + }, + { + "bbox": [ + 304, + 522, + 547, + 690 + ], + "type": "text", + "content": " accuracy on CIFAR-100, which evaluates the strong transfer ability of ToE. In addition, our ToE is also effective and efficient for fine-tuning (see the 1st and 2nd rows in DeiT-tiny/small). ToE achieves " + }, + { + "bbox": [ + 304, + 522, + 547, + 690 + ], + "type": "inline_equation", + "content": "1.3 \\times" + }, + { + "bbox": [ + 304, + 522, + 547, + 690 + ], + "type": "text", + "content": " acceleration for fine-tuning DeiT-tiny with 0.03 accuracy improvement on CIFAR-10. Further, we employ ToE for both pre-training and fine-tuning, which significantly accelerates the training with an accuracy improvement of at least " + }, + { + "bbox": [ + 304, + 522, + 547, + 690 + ], + "type": "inline_equation", + "content": "0.06\\%" + }, + { + "bbox": [ + 304, + 522, + 547, + 690 + ], + "type": "text", + "content": " on CIFAR-10 for both DeiT-tiny/small, compared to that using both baselines." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 702, + 443, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 702, + 443, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 702, + 443, + 713 + ], + "type": "text", + "content": "3https://github.com/facebookresearch/deit" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15789" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 113, + 282, + 132 + ], + "blocks": [ + { + "bbox": [ + 47, + 72, + 288, + 111 + ], + "lines": [ + { + "bbox": [ + 47, + 72, + 288, + 111 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 288, + 111 + ], + "type": "text", + "content": "Table 6. Ablation studies of different speedup factors for DeiT-tiny on ImageNet-1K. The default " + }, + { + "bbox": [ + 47, + 72, + 288, + 111 + ], + "type": "inline_equation", + "content": "r_0 / r_1" + }, + { + "bbox": [ + 47, + 72, + 288, + 111 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 72, + 288, + 111 + ], + "type": "inline_equation", + "content": "N_g" + }, + { + "bbox": [ + 47, + 72, + 288, + 111 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 288, + 111 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 47, + 72, + 288, + 111 + ], + "type": "text", + "content": " are set to 1/2, 3 and 2, respectively. All results in this table have almost the same training speeds for 44h training (total GPU hours)." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 113, + 282, + 132 + ], + "lines": [ + { + "bbox": [ + 50, + 113, + 282, + 132 + ], + "spans": [ + { + "bbox": [ + 50, + 113, + 282, + 132 + ], + "type": "table", + "html": "
DeiT-tinyFactorsr0/r1=1/3r0/r1=2/3Ng=2Ng=4k=1k=3default
Top-1 Acc. (%)72.372.572.472.572.572.672.6
", + "image_path": "7293af1047e8703722870c9aa8e40d77269546798b59aa8efc2e5a3830d108b5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 83, + 161, + 249, + 213 + ], + "blocks": [ + { + "bbox": [ + 47, + 140, + 287, + 159 + ], + "lines": [ + { + "bbox": [ + 47, + 140, + 287, + 159 + ], + "spans": [ + { + "bbox": [ + 47, + 140, + 287, + 159 + ], + "type": "text", + "content": "Table 7. Effect of \"initialization-expansion-merge\" pipeline for DeiT on ImageNet-1K. " + }, + { + "bbox": [ + 47, + 140, + 287, + 159 + ], + "type": "inline_equation", + "content": "\\pm" + }, + { + "bbox": [ + 47, + 140, + 287, + 159 + ], + "type": "text", + "content": " indicates we conduct 3 runs to calculate the mean and std." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 83, + 161, + 249, + 213 + ], + "lines": [ + { + "bbox": [ + 83, + 161, + 249, + 213 + ], + "spans": [ + { + "bbox": [ + 83, + 161, + 249, + 213 + ], + "type": "table", + "html": "
InitializationExpansionMergeTop-1 Acc. (%)
RandomSpatialDeiT-tinyDeiT-small
×72.679.8
×72.3±0.279.7±0.1
××71.279.1
××71.779.6
", + "image_path": "7c753cbd91bc0ab27bba020fc9519b7ee1b1a3c74d7595379e2751383ed54fa0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 83, + 241, + 249, + 288 + ], + "blocks": [ + { + "bbox": [ + 47, + 220, + 287, + 240 + ], + "lines": [ + { + "bbox": [ + 47, + 220, + 287, + 240 + ], + "spans": [ + { + "bbox": [ + 47, + 220, + 287, + 240 + ], + "type": "text", + "content": "Table 8. Results of applying ToE to different early transformer block's output tokens for DeiT-tiny on ImageNet-1K." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 83, + 241, + 249, + 288 + ], + "lines": [ + { + "bbox": [ + 83, + 241, + 249, + 288 + ], + "spans": [ + { + "bbox": [ + 83, + 241, + 249, + 288 + ], + "type": "table", + "html": "
BlockTop-1 Acc. (%) DeiT-tinyGFLOPs (per training iter)Training time (total GPU hours)
Embedding72.12.51 × 10343.5h
First block72.62.58 × 10344.2h
Second block72.22.65 × 10345.2h
Third block72.12.71 × 10346.9h
", + "image_path": "eed7f08200b8813a7c5f5c2b415a76d88cfadabdc14a319390be3656b802c8a6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 296, + 141, + 308 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 296, + 141, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 296, + 141, + 308 + ], + "type": "text", + "content": "4.4. Ablation Study" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 316, + 288, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 316, + 288, + 399 + ], + "spans": [ + { + "bbox": [ + 46, + 316, + 288, + 399 + ], + "type": "text", + "content": "Effect of speedup factors in ToE. As presented in Tab. 6, we verify the sensitivity of the speedup factors mentioned in Sec. 3.3, such as the ratio of " + }, + { + "bbox": [ + 46, + 316, + 288, + 399 + ], + "type": "inline_equation", + "content": "r_0 / r_1" + }, + { + "bbox": [ + 46, + 316, + 288, + 399 + ], + "type": "text", + "content": ", training stages " + }, + { + "bbox": [ + 46, + 316, + 288, + 399 + ], + "type": "inline_equation", + "content": "N_g" + }, + { + "bbox": [ + 46, + 316, + 288, + 399 + ], + "type": "text", + "content": " and parallel expanding operation " + }, + { + "bbox": [ + 46, + 316, + 288, + 399 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 316, + 288, + 399 + ], + "type": "text", + "content": ". At almost the same training time, ToE is relatively insensitive to these factors, w.r.t accuracy. It allows ToE to be easily integrated into the different models' training pipeline with minimal factor adjustments." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 400, + 287, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 400, + 287, + 484 + ], + "spans": [ + { + "bbox": [ + 46, + 400, + 287, + 484 + ], + "type": "text", + "content": "We further adjust the keep rate of the first stage " + }, + { + "bbox": [ + 46, + 400, + 287, + 484 + ], + "type": "inline_equation", + "content": "r_1" + }, + { + "bbox": [ + 46, + 400, + 287, + 484 + ], + "type": "text", + "content": " to control the training speed, and the relationship between " + }, + { + "bbox": [ + 46, + 400, + 287, + 484 + ], + "type": "inline_equation", + "content": "r_1" + }, + { + "bbox": [ + 46, + 400, + 287, + 484 + ], + "type": "text", + "content": " and training speed is illustrated in Fig. 4. We found ToE achieves more than " + }, + { + "bbox": [ + 46, + 400, + 287, + 484 + ], + "type": "inline_equation", + "content": "1.3 \\times" + }, + { + "bbox": [ + 46, + 400, + 287, + 484 + ], + "type": "text", + "content": " acceleration on DeiT-tiny without accuracy dropping. Additionally, it also demonstrates that reducing token redundancy in the early stages of training sometimes improves the model performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 485, + 288, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 485, + 288, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 485, + 288, + 605 + ], + "type": "text", + "content": "Effect of \"Initialization-expansion-merging\". Tab. 7 provides an analysis of the necessity of each step in the proposed \"initialization-expansion-merging\" pipeline. When we randomly select tokens as the initial token set rather than spatial-distribution token initialization, it leads to performance degradation. Furthermore, removing widest feature-distribution token expansion and feature-distribution token merging from the pipeline significantly decreases the accuracy, e.g., more than " + }, + { + "bbox": [ + 46, + 485, + 288, + 605 + ], + "type": "inline_equation", + "content": "0.9\\%" + }, + { + "bbox": [ + 46, + 485, + 288, + 605 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 485, + 288, + 605 + ], + "type": "inline_equation", + "content": "1.4\\%" + }, + { + "bbox": [ + 46, + 485, + 288, + 605 + ], + "type": "text", + "content": " accuracy drops without the merging and expansion for DeiT-tiny, respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "content": "Where to apply ToE. Work in [32, 48] demonstrates that class attention tends to be a global pooling as more attention operations are performed, and tokens in early blocks are more similar. This leads to more redundancy in tokens from early blocks. Consequently, applying ToE to the output tokens of early blocks can achieve higher acceleration. As shown in Tab. 8, we default apply ToE to the output tokens of the first block, which achieves the best trade-off between accuracy and training speed, compared to other early blocks." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 332, + 74, + 518, + 194 + ], + "blocks": [ + { + "bbox": [ + 332, + 74, + 518, + 194 + ], + "lines": [ + { + "bbox": [ + 332, + 74, + 518, + 194 + ], + "spans": [ + { + "bbox": [ + 332, + 74, + 518, + 194 + ], + "type": "image", + "image_path": "2c6c478fd4a34c506bc32a7becb6e5b32586b147ae21b6af3b68f5c61bd31370.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 198, + 545, + 218 + ], + "lines": [ + { + "bbox": [ + 306, + 198, + 545, + 218 + ], + "spans": [ + { + "bbox": [ + 306, + 198, + 545, + 218 + ], + "type": "text", + "content": "Figure 4. Trade-off between acceleration ratio and model performance by setting different " + }, + { + "bbox": [ + 306, + 198, + 545, + 218 + ], + "type": "inline_equation", + "content": "r_1" + }, + { + "bbox": [ + 306, + 198, + 545, + 218 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 360, + 250, + 490, + 300 + ], + "blocks": [ + { + "bbox": [ + 306, + 229, + 545, + 249 + ], + "lines": [ + { + "bbox": [ + 306, + 229, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 306, + 229, + 545, + 249 + ], + "type": "text", + "content": "Table 9. Results of different feature-distribution distances in Eq. 3 for DeiT on ImageNet-1K." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 360, + 250, + 490, + 300 + ], + "lines": [ + { + "bbox": [ + 360, + 250, + 490, + 300 + ], + "spans": [ + { + "bbox": [ + 360, + 250, + 490, + 300 + ], + "type": "table", + "html": "
MeasureTop-1 Acc. (%)
DeiT-tinyDeiT-small
Manhattan Distance69.878.0
Euclidean Distance70.678.4
Cosine Distance72.679.8
", + "image_path": "2ea3f59ae626807578f42186b9633575c86a54248696756c4315e24b0b4335b4.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 308, + 547, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 308, + 547, + 380 + ], + "spans": [ + { + "bbox": [ + 305, + 308, + 547, + 380 + ], + "type": "text", + "content": "Effect of the feature-distribution distance. We explore the metric that measures the feature-distribution distance between two tokens in Eq. 3. As shown in Tab. 9, we use three different metrics: Manhattan distance, Euclidean distance, and Cosine distance. We observe that Cosine distance achieves the best performance as the distance metric." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 394, + 378, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 394, + 378, + 406 + ], + "spans": [ + { + "bbox": [ + 306, + 394, + 378, + 406 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 415, + 547, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 547, + 559 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 547, + 559 + ], + "type": "text", + "content": "In this paper, we proposed a novel token growth scheme Token Expansion (ToE) to achieve consistent training acceleration for ViTs. ToE introduce an \"initialization-expansion-merging\" pipeline to maintain the integrity of the intermediate feature distribution of original transformers, preventing the loss of crucial learnable information in the training process. In experiments, ToE can be seamlessly integrated into the training of various transformers and efficient training frameworks in a lossless manner or even accuracy improvement, compared to the entire full-token training. These experimental results of ToE also demonstrate the superior performance gains over the SOTA methods." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 573, + 408, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 573, + 408, + 586 + ], + "spans": [ + { + "bbox": [ + 306, + 573, + 408, + 586 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 547, + 713 + ], + "type": "text", + "content": "This work is supported by the National Natural Science Foundation of China (NO. 62102151), the National Key Research and Development Program of China (No. 2023YFC3306401), Shanghai Sailing Program (21YF1411200), Shanghai Science and Technology Commission (22511104600), CCF-Tencent Rhino-Bird Open Research Fund, the Open Research Fund of Key Laboratory of Advanced Theory and Application in Statistics and Data Science, Ministry of Education (KLATASDS2305), the Fundamental Research Funds for the Central Universities." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15790" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 288, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 288, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 288, + 124 + ], + "type": "text", + "content": "[1] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. NeurIPS, 30, 2017. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 288, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 288, + 168 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 288, + 168 + ], + "type": "text", + "content": "[2] Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In NAACL, pages 4171–4186, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 171, + 288, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 171, + 288, + 224 + ], + "spans": [ + { + "bbox": [ + 53, + 171, + 288, + 224 + ], + "type": "text", + "content": "[3] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. NeurIPS, 33:1877-1901, 2020. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 226, + 288, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 226, + 288, + 280 + ], + "spans": [ + { + "bbox": [ + 53, + 226, + 288, + 280 + ], + "type": "text", + "content": "[4] Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In ICLR, pages 10347-10357. PMLR, 2021. 1, 2, 5, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 282, + 288, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 282, + 288, + 326 + ], + "spans": [ + { + "bbox": [ + 53, + 282, + 288, + 326 + ], + "type": "text", + "content": "[5] Zi-Hang Jiang, Qibin Hou, Li Yuan, Daquan Zhou, Yujun Shi, Xiaojie Jin, Anran Wang, and Jiashi Feng. All tokens matter: Token labeling for training better vision transformers. NeurIPS, 34:18590-18602, 2021. 5, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 328, + 288, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 328, + 288, + 372 + ], + "spans": [ + { + "bbox": [ + 53, + 328, + 288, + 372 + ], + "type": "text", + "content": "[6] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, pages 213-229. Springer, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 373, + 288, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 373, + 288, + 416 + ], + "spans": [ + { + "bbox": [ + 53, + 373, + 288, + 416 + ], + "type": "text", + "content": "[7] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. Segformer: Simple and efficient design for semantic segmentation with transformers. NeurIPS, 34:12077-12090, 2021. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 418, + 288, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 418, + 288, + 473 + ], + "spans": [ + { + "bbox": [ + 53, + 418, + 288, + 473 + ], + "type": "text", + "content": "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 475, + 288, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 475, + 288, + 506 + ], + "spans": [ + { + "bbox": [ + 53, + 475, + 288, + 506 + ], + "type": "text", + "content": "[9] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, pages 770-778, 2016. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 509, + 288, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 509, + 288, + 552 + ], + "spans": [ + { + "bbox": [ + 48, + 509, + 288, + 552 + ], + "type": "text", + "content": "[10] Tianlong Chen, Yu Cheng, Zhe Gan, Lu Yuan, Lei Zhang, and Zhangyang Wang. Chasing sparsity in vision transformers: An end-to-end exploration. NeurIPS, 34:19974-19988, 2021. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 554, + 288, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 554, + 288, + 586 + ], + "spans": [ + { + "bbox": [ + 48, + 554, + 288, + 586 + ], + "type": "text", + "content": "[11] Daniel Bolya, Cheng-Yang Fu, Xiaoliang Dai, Peizhao Zhang, Christoph Feichtenhofer, and Judy Hoffman. Token merging: Your vit but faster. In ICLR, 2022. 1, 2, 3, 4, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 588, + 288, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 588, + 288, + 621 + ], + "spans": [ + { + "bbox": [ + 48, + 588, + 288, + 621 + ], + "type": "text", + "content": "[12] Ning Ding, Yehui Tang, Kai Han, Chao Xu, and Yunhe Wang. Network expansion for practical training acceleration. In CVPR, pages 20269-20279, 2023. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 623, + 288, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 623, + 288, + 655 + ], + "spans": [ + { + "bbox": [ + 48, + 623, + 288, + 655 + ], + "type": "text", + "content": "[13] Linyuan Gong, Di He, Zhuohan Li, Tao Qin, Liwei Wang, and Tieyan Liu. Efficient training of bert by progressively stacking. In ICML, pages 2337-2346. PMLR, 2019. 1, 2, 3, 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 657, + 288, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 657, + 288, + 700 + ], + "spans": [ + { + "bbox": [ + 48, + 657, + 288, + 700 + ], + "type": "text", + "content": "[14] Huanrui Yang, Hongxu Yin, Maying Shen, Pavlo Molchanov, Hai Li, and Jan Kautz. Global vision transformer pruning with hessian-aware saliency. In CVPR, pages 18547-18557, 2023. 1, 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 702, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 702, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 702, + 288, + 713 + ], + "type": "text", + "content": "[15] Fang Yu, Kun Huang, Meng Wang, Yuan Cheng, Wei Chu," + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 326, + 73, + 547, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 547, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 547, + 95 + ], + "type": "text", + "content": "and Li Cui. Width & depth pruning for vision transformers. In AAAI, volume 36, pages 3143-3151, 2022." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 96, + 547, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 547, + 129 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 547, + 129 + ], + "type": "text", + "content": "[16] François Lagunas, Ella Charlaix, Victor Sanh, and Alexander M Rush. Block pruning for faster transformers. In EMNLP, pages 10619-10629, 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 130, + 546, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 130, + 546, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 130, + 546, + 162 + ], + "type": "text", + "content": "[17] Mengzhou Xia, Zexuan Zhong, and Danqi Chen. Structured pruning learns compact and accurate models. In ACL, pages 1513-1528, 2022. 1, 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 164, + 547, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 164, + 547, + 206 + ], + "spans": [ + { + "bbox": [ + 308, + 164, + 547, + 206 + ], + "type": "text", + "content": "[18] Yongming Rao, Wenliang Zhao, Benlin Liu, Jiwen Lu, Jie Zhou, and Cho-Jui Hsieh. Dynamicvit: Efficient vision transformers with dynamic token sparsification. NeurIPS, 34:13937-13949, 2021. 1, 3, 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 209, + 547, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 209, + 547, + 252 + ], + "spans": [ + { + "bbox": [ + 308, + 209, + 547, + 252 + ], + "type": "text", + "content": "[19] Lingchen Meng, Hengduo Li, Bor-Chun Chen, Shiyi Lan, Zuxuan Wu, Yu-Gang Jiang, and Ser-Nam Lim. Adavit: Adaptive vision transformers for efficient image recognition. In CVPR, pages 12309-12318, 2022." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 254, + 546, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 254, + 546, + 308 + ], + "spans": [ + { + "bbox": [ + 308, + 254, + 546, + 308 + ], + "type": "text", + "content": "[20] Mohsen Fayyaz, Soroush Abbasi Koohpayegani, Farnoush Rezaei Jafari, Sunando Sengupta, Hamid Reza Vaezi Joze, Eric Sommerlade, Hamed Pirsiavash, and Jürgen Gall. Adaptive token sampling for efficient vision transformers. In ECCV, pages 396-414. Springer, 2022." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 310, + 546, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 310, + 546, + 364 + ], + "spans": [ + { + "bbox": [ + 308, + 310, + 546, + 364 + ], + "type": "text", + "content": "[21] Zhenglun Kong, Peiyan Dong, Xiaolong Ma, Xin Meng, Wei Niu, Mengshu Sun, Xuan Shen, Geng Yuan, Bin Ren, Hao Tang, et al. Spvit: Enabling faster vision transformers via latency-aware soft token pruning. In ECCV, pages 620-640. Springer, 2022." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 365, + 546, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 365, + 546, + 408 + ], + "spans": [ + { + "bbox": [ + 308, + 365, + 546, + 408 + ], + "type": "text", + "content": "[22] Hongxu Yin, Arash Vahdat, Jose M Alvarez, Arun Mallya, Jan Kautz, and Pavlo Molchanov. A-vit: Adaptive tokens for efficient vision transformer. In CVPR, pages 10809-10818, 2022. 1, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 411, + 546, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 411, + 546, + 453 + ], + "spans": [ + { + "bbox": [ + 308, + 411, + 546, + 453 + ], + "type": "text", + "content": "[23] Sheng Xu, Yanjing Li, Mingbao Lin, Peng Gao, Guodong Guo, Jinhu Lu, and Baochang Zhang. Q-detr: An efficient low-bit quantized detection transformer. In CVPR, pages 3842-3851, 2023. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 455, + 546, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 455, + 546, + 487 + ], + "spans": [ + { + "bbox": [ + 308, + 455, + 546, + 487 + ], + "type": "text", + "content": "[24] Yanjing Li, Sheng Xu, Baochang Zhang, Xianbin Cao, Peng Gao, and Guodong Guo. Q-vit: Accurate and fully quantized low-bit vision transformer. NeurIPS, 35:34451-34463, 2022." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 489, + 546, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 489, + 546, + 521 + ], + "spans": [ + { + "bbox": [ + 308, + 489, + 546, + 521 + ], + "type": "text", + "content": "[25] Yefei He, Zhenyu Lou, Luoming Zhang, Jing Liu, Weijia Wu, Hong Zhou, and Bohan Zhuang. Bivit: Extremely compressed binary vision transformers. In ICCV, pages 5651-5663, 2023." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 523, + 546, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 523, + 546, + 555 + ], + "spans": [ + { + "bbox": [ + 308, + 523, + 546, + 555 + ], + "type": "text", + "content": "[26] Phuoc-Hoan Charles Le and Xinlin Li. Binaryvit: Pushing binary vision transformers towards convolutional models. In CVPR, pages 4664-4673, 2023. 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 557, + 547, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 557, + 547, + 600 + ], + "spans": [ + { + "bbox": [ + 308, + 557, + 547, + 600 + ], + "type": "text", + "content": "[27] Cheng Chen, Yichun Yin, Lifeng Shang, Xin Jiang, Yujia Qin, Fengyu Wang, Zhi Wang, Xiao Chen, Zhiyuan Liu, and Qun Liu. bert2bert: Towards reusable pretrained language models. In ACL, pages 2134-2148, 2022. 1, 2, 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 601, + 547, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 601, + 547, + 633 + ], + "spans": [ + { + "bbox": [ + 308, + 601, + 547, + 633 + ], + "type": "text", + "content": "[28] Xin Yuan, Pedro Savarese, and Michael Maire. Growing efficient deep networks by structured continuous sparsification. In ICLR, 2021." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 635, + 546, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 635, + 546, + 668 + ], + "spans": [ + { + "bbox": [ + 308, + 635, + 546, + 668 + ], + "type": "text", + "content": "[29] Wei Wen, Feng Yan, Yiran Chen, and Hai Li. Autogrow: Automatic layer growing in deep convolutional networks. In KDD, pages 833-841, 2020. 1" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 670, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 547, + 713 + ], + "type": "text", + "content": "[30] Yulin Wang, Yang Yue, Rui Lu, Tianjiao Liu, Zhao Zhong, Shiji Song, and Gao Huang. Efficienttrain: Exploring generalized curriculum learning for training visual backbones. In ICCV, pages 5852-5864, 2023. 2, 3, 5, 7" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15791" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[31] Changlin Li, Bohan Zhuang, Guangrun Wang, Xiaodan Liang, Xiaojun Chang, and Yi Yang. Automated progressive learning for efficient training of vision transformers. In CVPR, pages 12486-12496, 2022. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 288, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 288, + 138 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 288, + 138 + ], + "type": "text", + "content": "[32] Xuran Pan, Xuan Jin, Yuan He, Shiji Song, Gao Huang, et al. Budgeted training for vision transformer. In ICLR, 2022. 3, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 140, + 287, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 140, + 287, + 183 + ], + "spans": [ + { + "bbox": [ + 49, + 140, + 287, + 183 + ], + "type": "text", + "content": "[33] Katherine Lee, Daphne Ippolito, Andrew Nystrom, Chiyuan Zhang, Douglas Eck, Chris Callison-Burch, and Nicholas Carlini. Deduplicating training data makes language models better. In ACL, pages 8424-8445, 2022. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 184, + 287, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 184, + 287, + 215 + ], + "spans": [ + { + "bbox": [ + 49, + 184, + 287, + 215 + ], + "type": "text", + "content": "[34] Mingxing Tan and Quoc Le. Efficientnetv2: Smaller models and faster training. In ICLR, pages 10096-10106. PMLR, 2021. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 217, + 287, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 217, + 287, + 249 + ], + "spans": [ + { + "bbox": [ + 49, + 217, + 287, + 249 + ], + "type": "text", + "content": "[35] Bradley McDanel and Chi Phuong Huynh. Accelerating vision transformer training via a patch sampling schedule. arXiv preprint arXiv:2208.09520, 2022. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 251, + 287, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 251, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 49, + 251, + 287, + 293 + ], + "type": "text", + "content": "[36] Li Shen, Yan Sun, Zhiyuan Yu, Liang Ding, Xinmei Tian, and Dacheng Tao. On efficient training of large-scale deep learning models: A literature review. arXiv preprint arXiv:2304.03589, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 295, + 287, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 295, + 287, + 326 + ], + "spans": [ + { + "bbox": [ + 49, + 295, + 287, + 326 + ], + "type": "text", + "content": "[37] Yuedong Yang, Guihong Li, and Radu Marculescu. Efficient on-device training via gradient filtering. In CVPR, pages 3811-3820, 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 327, + 287, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 327, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 49, + 327, + 287, + 370 + ], + "type": "text", + "content": "[38] Xucheng Ye, Pengcheng Dai, Junyu Luo, Xin Guo, Yingjie Qi, Jianlei Yang, and Yiran Chen. Accelerating cnn training by pruning activation gradients. In ECCV, pages 322-338. Springer, 2020. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 372, + 287, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 372, + 287, + 424 + ], + "spans": [ + { + "bbox": [ + 49, + 372, + 287, + 424 + ], + "type": "text", + "content": "[39] Yonggan Fu, Haoran You, Yang Zhao, Yue Wang, Chaojian Li, Kailash Gopalakrishnan, Zhangyang Wang, and Yingyan Lin. Fractrain: Fractionally squeezing bit savings both temporally and spatially for efficient dnn training. NeurIPS, 33:12127-12139, 2020. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 426, + 287, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 426, + 287, + 468 + ], + "spans": [ + { + "bbox": [ + 49, + 426, + 287, + 468 + ], + "type": "text", + "content": "[40] Yue Wang, Ziyu Jiang, Xiaohan Chen, Pengfei Xu, Yang Zhao, Yingyan Lin, and Zhangyang Wang. E2-train: Training state-of-the-art cnns with over " + }, + { + "bbox": [ + 49, + 426, + 287, + 468 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 49, + 426, + 287, + 468 + ], + "type": "text", + "content": " energy savings. NeurIPS, 32, 2019. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 471, + 287, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 471, + 287, + 502 + ], + "spans": [ + { + "bbox": [ + 49, + 471, + 287, + 502 + ], + "type": "text", + "content": "[41] Mengtian Li, Ersin Yumer, and Deva Ramanan. Budgeted training: Rethinking deep neural network training under resource constraints. In ICLR, 2019. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 504, + 287, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 504, + 287, + 535 + ], + "spans": [ + { + "bbox": [ + 49, + 504, + 287, + 535 + ], + "type": "text", + "content": "[42] Jiong Zhang, Hsiang-Fu Yu, and Inderjit S Dhillon. Autoassist: A framework to accelerate training of deep neural networks. NeurIPS, 32, 2019. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 537, + 287, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 537, + 287, + 580 + ], + "spans": [ + { + "bbox": [ + 49, + 537, + 287, + 580 + ], + "type": "text", + "content": "[43] Marius Hobbhahn and Jaime Sevilla. What's the backward-forward flop ratio for neural networks? https://epochai.org/blog/backward-forward-FLOP-ratio, 2021. Accessed: 2023-9-28. 3, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 582, + 287, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 582, + 287, + 602 + ], + "spans": [ + { + "bbox": [ + 49, + 582, + 287, + 602 + ], + "type": "text", + "content": "[44] Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. JMLR, 9:2579-2605, 2008. 5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 49, + 604, + 287, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 604, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 49, + 604, + 287, + 635 + ], + "type": "text", + "content": "[45] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, pages 248-255. IEEE, 2009. 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 49, + 637, + 287, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 637, + 287, + 658 + ], + "spans": [ + { + "bbox": [ + 49, + 637, + 287, + 658 + ], + "type": "text", + "content": "[46] Alex Krizhevsky et al. Learning multiple layers of features from tiny images. 2009. 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 49, + 659, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 659, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 49, + 659, + 287, + 712 + ], + "type": "text", + "content": "[47] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. NeurIPS, 32, 2019. 6" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "type": "text", + "content": "[48] Maithra Raghu, Thomas Unterthiner, Simon Kornblith, Chiyuan Zhang, and Alexey Dosovitskiy. Do vision transformers see like convolutional neural networks? NeurIPS, 34:12116-12128, 2021. 8" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "15792" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/25668f69-d21b-4819-83f7-a45db4e2f055_content_list.json b/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/25668f69-d21b-4819-83f7-a45db4e2f055_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..c5aa12d7723c23dc3e2235b810bcbeee10fb82ae --- /dev/null +++ b/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/25668f69-d21b-4819-83f7-a45db4e2f055_content_list.json @@ -0,0 +1,1605 @@ +[ + { + "type": "text", + "text": "A Generative Approach for Wikipedia-Scale Visual Entity Recognition", + "text_level": 1, + "bbox": [ + 127, + 130, + 841, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mathilde Caron", + "bbox": [ + 209, + 180, + 339, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ahmet Iscen", + "bbox": [ + 362, + 181, + 465, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Alireza Fathi", + "bbox": [ + 491, + 181, + 596, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Cordelia Schmid", + "bbox": [ + 622, + 181, + 758, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Google Research", + "bbox": [ + 413, + 199, + 550, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 251, + 310, + 266 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we address web-scale visual entity recognition, specifically the task of mapping a given query image to one of the 6 million existing entities in Wikipedia. One way of approaching a problem of such scale is using dual-encoder models (e.g. CLIP), where all the entity names and query images are embedded into a unified space, paving the way for an approximate kNN search. Alternatively, it is also possible to re-purpose a captioning model to directly generate the entity names for a given image. In contrast, we introduce a novel Generative Entity Recognition (GER) framework, which given an input image learns to auto-regressively decode a semantic and discriminative \"code\" identifying the target entity. Our experiments demonstrate the efficacy of this GER paradigm, showcasing state-of-the-art performance on the challenging OVEN benchmark. GER surpasses strong captioning, dual-encoder, visual matching and hierarchical classification baselines, affirming its advantage in tackling the complexities of web-scale recognition.", + "bbox": [ + 75, + 284, + 473, + 556 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 588, + 207, + 604 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Generative vision-language models such as GPT-4 [30], Flamingo [2] or PALI [5], are becoming increasingly popular for computer vision applications. They show an impressive ability to generate free-form text for describing the contents of an image (captioning), or answering questions based on an image (visual-question answering). Nevertheless, their potential for recognition tasks [12], which usually require a more concise, structured output, remains underexplored. The focus of this paper is to explore their application for the challenging task of web-scale entity recognition. A recent benchmark, Open-domain Visual Entity recognitionN (OVEN) [12], challenges models to associate an image with a Wikipedia entity from a pool of over six million entities. Models must establish a robust association between images across millions of coarse-grained and fine-grained entities, encompassing a wide spectrum of concepts such as animals, buildings, locations, and a multitude of others [12].", + "bbox": [ + 75, + 614, + 468, + 875 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/71441a3ecf82a7a85c4fedd9224c40da2d4e966b6fd20e143ebd532d5d9457b5.jpg", + "image_caption": [ + "Figure 1. We introduce GER, a novel generative paradigm for web-scale visual entity recognition. We create compact semantic codes for each entity, and learn to auto-regressively generate them for a given query image at inference." + ], + "image_footnote": [], + "bbox": [ + 514, + 252, + 754, + 396 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/94363eac2716d72aa979169ee926daf310567333168b0f6f9e50906de07abac2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 763, + 253, + 859, + 395 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Traditionally, the predominant methods employed to address the challenge of visual entity recognition have revolved around either classification or contrastive dual-encoder paradigm like CLIP [32]. While classification offers a straightforward approach, it grapples with limitations when confronted with extensive label spaces such as that of OVEN, resulting in substantial parameter counts and practical engineering complexities. The dual-encoder approach on the other hand, learns a unified image-text feature space, thereby facilitating efficient nearest neighbor searches for recognition. Nonetheless, this approach exhibits its own drawbacks: (a) it does not directly optimize for the final recognition task but instead relies on indirect optimization through contrastive loss where a set of negative data has to be subsampled at training time [11, 29, 32], (b) compressing either the image or text into an embedding vector results in loss of information, detrimentally affecting performance for fine-grained recognition [15] and (c) the memory requirements for storing dense representations scale proportionally with the size of the entity set.", + "bbox": [ + 496, + 474, + 892, + 777 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "These challenges of the dual-encoder paradigm have kindled interest in alternative strategies. Notably, in Natural Language Processing (NLP) domain, recent works challenge the dual-encoder approach and use generative models instead for information retrieval [6, 25, 31, 33, 41, 42]. These works represent each element of the corpus by a compact code of integers, and learn an auto-regressive generative model to decode the target code for a given query. This", + "bbox": [ + 496, + 779, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Code: github.com/google-research/scenic/tree/main/scenic/projects/gerald", + "bbox": [ + 76, + 887, + 470, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "17313", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "paradigm promises to overcome some drawbacks of dual-encoders by simplifying the retrieval pipeline such that the training and inference objectives are the same, and directly encoding the corpus within the model's parameters. Also as an alternative to dual encoders, OVEN paper [12] showcases the feasibility of extending a generative image captioning model [5] for visual entity recognition by matching the generated caption to one of the Wikipedia entity texts [34].", + "bbox": [ + 75, + 90, + 472, + 212 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Inspired by these recent explorations, we propose a Generative Entity Recognition (GER) framework (illustrated in Fig. 1) to facilitate end-to-end visual entity recognition by leveraging generative auto-regressive models. Specifically, we represent each Wikipedia entity with a code, i.e. a short sequence of integers. Then, we train models to predict an entity from an input image by auto-regressively generating the code corresponding to the target entity. We find that creating unAmbiguous, Language-based and Discriminative (ALD) entity codes results in the best variant of our GER framework, which we denote by GER-ALD. In fact, while we observe that unstructured \"atomic\" codes work well in some scenarios, they fail when training data or model capacity are limited or more importantly, when the entity set reaches the million scale (see Sec. 4.4.1). Plus, they cannot generalize to new entities. In contrast, we find that semantically-structured codes based on language improve upon atomic codes by leveraging generic concepts shared across related entities (see example in Fig. 1 with \"Black colobus\" and \"Black-and-white colobus\" sharing common code tokens). A simple way of creating codes based on language is to directly tokenize [20] the entity name, which is akin to image captioning where the entity name is used as a caption [6, 12]. However, we find that such tokenized entity names contain clutter and noisy information, all the more so when the entity name is long (see Sec. 4.4.2). Our GER-ALD method improves over this simple captioning baseline by decoding only the most discriminative part of the tokenized entity name, i.e. the part which makes the considered entity name the most different compared to all other entities.", + "bbox": [ + 75, + 214, + 472, + 667 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Finally, we also propose an entity-based pre-training to condition the GER models to web-scale entity recognition. Inspired by recent advances in retrieval-based methods [15, 23], we retrieve a subset of images from a large-scale image-text dataset typically used for captioning or contrastive pre-training [5] and re-purpose it by replacing the original text captions with related OVEN entity names. Overall, our experiments demonstrate the efficacy of the proposed GER paradigm: GER-ALD outperforms previously published numbers on OVEN benchmark [12] by $+6.7$ top-1 accuracy, while using $42\\times$ less parameters. In summary, our contributions are as follows:", + "bbox": [ + 75, + 670, + 472, + 851 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- a generative entity recognition framework (GER) to facilitate end-to-end visual entity recognition;", + "- an innovative strategy for encoding Wikipedia enti" + ], + "bbox": [ + 93, + 854, + 467, + 898 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ties into unambiguous language-based discriminative (ALD) codes that are highly effective for GER;", + "bbox": [ + 529, + 90, + 890, + 119 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- an entity-based pre-training process without requiring human intervention;", + "- state-of-the-art results in challenging web-scale OVEN entity recognition and on-par performance to traditional classifiers in smaller-scale label-space scenarios." + ], + "bbox": [ + 516, + 122, + 890, + 195 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related work", + "text_level": 1, + "bbox": [ + 500, + 218, + 635, + 233 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Visual entity recognition aims to recognize classes, or entities given visual inputs [35]. Granularity of visual entity recognition tasks varies from every-day generic objects [8, 9], to fine-grained domains, such as birds [44], dogs [17], cars [18], food [4], landmarks [47], faces [50] and natural world species [43]. Some challenges for the visual entity recognition tasks include imbalanced training classes following a long-tailed distribution [24], or noisy training labels [22]. Recent work [12] proposes a new, web-scale dataset for open-domain entity recognition. This challenging benchmark contains 6M entity names derived from Wikipedia page titles, including coarse-grained and fine-grained entities, encompassing a wide spectrum of concepts such as animals, buildings, organizations, landmarks, and a multitude of other. The authors show that generative captioning models (i.e. PaLI [5]) outperform dual encoder models for large-scale entity recognition. In this paper, we build upon this observation, and study generative models for accurate and efficient entity recognition.", + "bbox": [ + 496, + 246, + 890, + 532 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Extreme classification tackles entity recognition specifically at a very large scale with a pure classification approach [1, 3, 26]. Typical approaches explore strategies for scaling to the hundred of thousands scale and preliminary results are even shown at million scale [1]. By leveraging generative image-to-text models, we propose a fresh perspective beyond traditional classification methods typically used in the context of large-scale visual entity recognition.", + "bbox": [ + 496, + 535, + 890, + 656 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Generative auto-regressive retrieval methods are increasingly popular in NLP [6, 25, 31, 33, 41, 42]. GENRE retrieves Wikipedia entities by generating their names in an autoregressive fashion. Seminal work DSI [42] shows the benefit of learning to decode compact codes (created either randomly or with hierarchical k-means clustering) associated with each document. Neural Corpus Indexer [46] proposes a specific decoding scheme for generative retrieval and show the benefit of query augmentation by automatically generating training queries for documents to be indexed. TIGER [33] studies generative retrieval in the context of recommender systems. Finally, [31] conducts a systematic study of generative retrieval systems when scaled to millions of document passages. Only very few works explore this family of approaches in computer vision domain, and only in very small-scale and uni-modal scenarios [49].", + "bbox": [ + 496, + 659, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "17314", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4cc96816c58a6c1fb274e3abae0507b3fefa0dc7e816fca1b20527ce03259cbf.jpg", + "image_caption": [ + "(a) ALD: creating unambiguous language-based discriminative entity codes", + "Figure 2. Overview of GER-ALD method. (a) We utilize a text tokenizer to create compact and semantic codes, which represents each entity with short, but discriminative representations. (b) We learn a generative auto-regressive model, which learns to decode the correct code for given query image and text pair." + ], + "image_footnote": [], + "bbox": [ + 86, + 104, + 450, + 286 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 371, + 166, + 386 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our goal is to explore how to adapt generative autoregressive models to the task of visual entity recognition (GER). While previous works have shown preliminary signal that it is possible to repurpose autoregressive models for entity recognition by directly decoding entity names [6, 12], we propose a more effective strategy. An overview of our framework is in Fig. 2.", + "bbox": [ + 75, + 396, + 468, + 503 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Problem definition", + "text_level": 1, + "bbox": [ + 76, + 512, + 256, + 527 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Web-scale visual entity recognition. The Open-domain Visual Entity recognitioN (OVEN) [12] task consists of mapping input visual queries to one of the 6M English Wikipedia entities. More specifically, for a given image query $x_{v}$ and text query $x_{t}$ , the model needs to recognize the corresponding entity $e$ among the set $\\mathcal{E}$ of all possible entities. The purpose of the input text $x_{t}$ is to achieve unambiguous recognition. For example, when several entities are represented in the query image $x_{v}$ , the text query indicates which one needs to be recognized. Each entity $e \\in \\mathcal{E}$ comes with an entity name, denoted by $t_{e}$ , which corresponds to the title of the entity Wikipedia page.", + "bbox": [ + 75, + 536, + 468, + 717 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Representing each entity with a code. In GER, we represent each entity $e$ by a code denoted by $c^e = \\{c_1^e,\\dots,c_L^e\\} \\in [[1,V]]^L$ where $L$ is the length of the code and $V$ is the size of the vocabulary of all integer values that each code token $c_i^e$ can take. This forms up to $V^L$ unique codes. Note that vanilla image classification and captioning baselines can both be cast into this code formulation. In fact, with $L = 1$ and $V = |\\mathcal{E}|$ , the codes are equivalent to the labels used in standard multi-class classification. On the other hand, if each code token value in $[[1,V]]$ maps to a (sub-)word in a pre-defined vocabulary [20], then the codes simply correspond to standard tokenized text used in captioning", + "bbox": [ + 75, + 717, + 470, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "models [19, 39, 45]. In the following paragraphs, we detail GER-ALD, our most effective strategy for building codes $C$ to represent all 6M English Wikipedia entities.", + "bbox": [ + 496, + 90, + 890, + 137 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. GER-ALD: Creating ALD codes for GER", + "text_level": 1, + "bbox": [ + 498, + 148, + 815, + 162 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We design the code set $C$ so that it has three properties which we find are important for effective GER models: i) semantically structured thanks to language, ii) discriminative and compact, and iii) unambiguous. Our algorithm to create such unambiguous, language-based and discriminative codes, called ALD, is illustrated in Fig. 2 (a) and described in pseudo-code in Algorithm 1 of the Appendix.", + "bbox": [ + 496, + 171, + 890, + 277 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Semantic tokens based on language. We find that entity codes $C$ benefit from following a semantic structure, especially in scenarios where memorizing unstructured atomic codes is difficult. We show in Sec. 4.4.1 that using unstructured atomic codes fail when the amount of training data or the model capacity are limited or, of particular interest, when the entity set size increases to the million scale (see Fig. 3). Intuitively, we want entities that are semantically similar to have some overlapping code tokens. For example, we wish that entities $e = \\mathrm{Q}521977$ with corresponding name $t_{\\mathrm{Q}521977} =$ \"Black colobus\" and $e = \\mathrm{Q}358813$ with corresponding name $t_{\\mathrm{Q}358813} =$ \"Black-and-white colobos\" to share some code tokens, given that these correspond to two close species.", + "bbox": [ + 496, + 277, + 890, + 488 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A simple yet effective way of having semantic codes is to tokenize the entity names based on text tokenizers [6, 19, 20, 39]. If each of the sub-words in the entity names are mapped to an integer representing this sub-word, then entities Q358813 and Q521977 naturally share code tokens: those representing the phrase \"colobus\". We denote by $\\Phi(.)$ an off-the-shelf text tokenizer with a vocabulary of $V_{\\Phi}$ sub-words such that $\\Phi(t_e) = \\{y_1^e, \\dots, y_{L_e}^e\\} \\in [[1, V_{\\Phi}]]^{L_e}$ where $L_e$ is the length of the tokenized entity name $\\Phi(t_e)$ . In practice we use the same language tokenizer as GIT [45] for $\\Phi(.)$ and have a vocabulary size of $V = V_{\\Phi} = 30522$ . We refer to the baseline of using codes $C$ created by simple tokenization of the entity name as GER-CAPTION (i.e. we treat the entity name as a caption) [6]. We show in the following paragraph how GER-ALD codes differ from such GER-CAPTION codes by making them more compact and discriminative.", + "bbox": [ + 496, + 489, + 892, + 733 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Discriminative and compact codes. Our goal is to build short and highly discriminative codes because they are easier to learn for the model, as validated by our experiments in Sec. 4.4.2. For example, the tokenized entity name $\\Phi(t_{\\mathrm{Q358813}}) = \\Phi(\\text{\"Black-and-white colobus}\")$ counts $L_{\\mathrm{Q358813}} = 8$ tokens, but clearly not all 8 tokens are important to make this entity discriminative compared to all other existing entities. Hence, we choose to represent each entity with the bare minimum, removing all the clutter which is not only non-discriminative but also adds noise. We achieve this by selecting the most discriminative and rarest tokens", + "bbox": [ + 496, + 734, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "17315", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "within the tokenized entity name. Specifically, we compute the frequency $f_{v}$ of each token value $v \\in [1, V]$ in the vocabulary over the entire corpus of tokenized entity names $\\{\\Phi(t_{e})\\}_{e \\in \\mathcal{E}}$ . We have $f_{v} = \\frac{n_{v}}{\\sum_{u=1}^{V} n_{u}}$ where $n_{v}$ is the number of times $v$ appears in $\\{\\Phi(t_{e})\\}_{e \\in \\mathcal{E}}$ . We create an ALD code $c_{e}$ for each entity by keeping only the $(L-1)$ tokens with the lowest frequencies and discarding the other ones. For example for entity Q358813, the 3 tokens with the lowest frequencies are \"col\", \"ob\" and \"white\". Interestingly, these 3 most discriminative tokens appear at the end of the code for GER-CAPTION. By contrast, they appear right at the beginning of the code for GER-ALD and they constitute the only tokens to be decoded by the model, which intuitively explains the improved performance of GER-ALD codes, as analyzed later in Sec. 4.4.2 especially when entities have long names (see Fig. 4). Finally an interesting by-product of using short codes is that they are faster to decode (the complexity of decoding is $\\mathcal{O}(L^{2})$ ) and require less memory footprint to store. Unambiguous codes. Note that several entities might share the same least frequent $(L-1)^{\\text{th}}$ tokens. In this case their code are exactly identical up to the $(L-1)^{\\text{th}}$ token. We use the last $L^{\\text{th}}$ token to ensure that each entity has a unique code: we greedily assign the last code token $c_{L}^{e}$ to the next least frequent word of the tokenized entity name until the code $c_{e}$ is different from all existing codes. If this still fails to create a unique code, we assign $c_{L}^{e}$ to a random token value $v'$ so that the resulting code is unique. With code length $L = 4$ , only 0.5% of the entities use a random token value.", + "bbox": [ + 76, + 90, + 472, + 521 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Training", + "text_level": 1, + "bbox": [ + 76, + 530, + 181, + 546 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we describe the model used to decode entity codes from an input image-text pair. Importantly, we also introduce our entity-based pre-training to condition the generative model to the task of entity recognition.", + "bbox": [ + 76, + 553, + 468, + 614 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Auto-regressive generative models. We build upon GIT [45], an auto-regressive image-to-text generative model. The query image-text pair $(x_v, x_t)$ is transformed into a set of $d$ -dimensional embeddings using a visual encoder for $x_v$ and the text tokenizer $\\Phi(.)$ for $x_t$ . The resulting output is represented by $\\mathbf{X}_v \\in \\mathbb{R}^{N_v \\times d}$ (resp. $\\mathbf{X}_t \\in \\mathbb{R}^{N_t \\times d}$ ) for image (resp. text) tokens. We then input $\\mathbf{X}_v$ and $\\mathbf{X}_t$ to a decoder network $g(.)$ whose task is to decode the next code token $c_i^e$ , conditioned on the previous tokens $c_j^e$ . Each code token value $v$ in $\\mathbb{I}[1, V]$ maps to a learnable $d$ -dimensional vector $\\mathbf{Y}_v$ (gathered in the embedding matrix $\\mathbf{Y} \\in \\mathbb{R}^{(V+1) \\times d}$ where $\\mathbf{Y}_0$ corresponds to the \"beginning of code\" token). We train with a language modeling loss:", + "bbox": [ + 76, + 614, + 472, + 813 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} ^ {e} = \\frac {1}{L} \\sum_ {i = 1} ^ {L} \\ell (c _ {i} ^ {e}, g ([ \\mathbf {X} _ {v}; \\mathbf {X} _ {t}; \\mathbf {Y} _ {0}; \\mathbf {Y} _ {c _ {0 < j < i}} ^ {e} ])\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 814, + 413, + 854 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $[;]$ corresponds to the concatenation operation in the first dimension and $\\ell$ is the softmax cross-entropy loss with label-smoothing [27]. We average $\\mathcal{L}^e$ over a mini-batch and", + "bbox": [ + 76, + 854, + 470, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "learn the weights of the visual encoder, decoder $g(.)$ and embedding matrix $\\mathbf{Y}$ through back-propagation. When decoding, we use beam search to obtain the best predicted entity coded. We find that we do not need to constrain the beam search to existing codes since more than $99\\%$ of the top-1 predictions are valid codes for converged GER models.", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Entity-based pre-training. Common auto-regressive models such as GIT [45] or PaLI [5] are pre-trained for descriptive captioning. As shown in Tab. 5 and Fig 9 of the Appendix, they generalize poorly to entity recognition. This is because of the task discrepancy between predicting a descriptive caption and predicting an entity name. In order to condition our models better for entity recognition, we propose to collect a significant number of entity-based pretraining images, each associated with a Wikipedia entity instead of a generic caption. However, such an entity-based pretraining dataset does not exist. We create it in an automatic way, without any human supervision.", + "bbox": [ + 496, + 181, + 892, + 364 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To do so, we leverage existing large-scale image-caption datasets [37, 38]: unless specified otherwise we use WebLI [5]. For each Wikipedia entity, we retrieve in WebLI the image-caption pairs that best represent this entity and replace their original captions by this entity name [15, 23]. Specifically, we embed the 6M entity names of OVEN with a semantic text encoder [32] and find the top- $k$ most similar captions in WebLI. We retrieve their corresponding images and replace their original captions by the considered entity name. We ensure that no image is assigned to multiple entities to avoid instability during training. We vary the number of retrieved images $k$ per entity from 2 to 100 to produce pre-training datasets of different sizes: from 11M up to 55M images (see Fig. 6). We denote by Entity-WebLI (resp. Entity-LAION) the resulting dataset used for entity-based pretraining, built from WebLI (resp. LAION [38]). This way of creating pre-training data is akin to the query generation techniques used for generative retrieval in NLP [46]. However, rather than generating a synthetic input, we simply retrieve input images from a large-scale dataset.", + "bbox": [ + 496, + 364, + 893, + 667 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Baselines", + "text_level": 1, + "bbox": [ + 500, + 678, + 607, + 693 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We compare our method to the following different baselines. Hierarchical classification. Solving million-scale entity recognition with classification is unpractical due to the very large number of classes. A workaround is to use hierarchical classifiers. As OVEN does not come with hierarchical labels we obtain a 3-level hierarchy through k-means of the 6M entity names encoded with sentence-T5 [28]. We train a multi-class classifier for each parent node in the hierarchy. To avoid training a huge number of different classification matrices, we learn a generic classifier matrix per level which is modified by learnable small modifiers depending on the path in the hierarchy.", + "bbox": [ + 496, + 702, + 890, + 883 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Dual encoders. Another typical workaround to classifica", + "bbox": [ + 500, + 885, + 890, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "17316", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "tion is to rely on deep metric learning approaches [36] such as Noise Contrastive Estimation [11] and its InfoNCE variant [29] as used in popular dual encoder approaches [16, 32]. Dual encoders learn a unified image-text feature space with separate encoders, thereby facilitating efficient nearest neighbor searches for recognition. We use CLIP-L/14 [32]. Visual matching. We also experiment with pure visual matching baselines. We use off-the-shelf CLIP-L/14 visual encoder and Entity-WebLI (55M) dataset as the memory. We use $k = 500$ for nearest neighbor search with majority voting as it obtains the best results on OVEN val set.", + "bbox": [ + 76, + 90, + 468, + 255 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Captioning. We compare to Git-Large [45] or PaLI [5] image-to-text auto-regressive captioning models.", + "bbox": [ + 76, + 257, + 467, + 286 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "GER-baselines: alternative code creation strategies. We compare GER-ALD, i.e. the best variant of GER, with several alternatives. First, GER-ATOMIC refers to using atomic, completely unstructured codes, i.e. each code token $c_{i}^{e}$ is randomly drawn from $[1, V]^{L}$ [42]. Second, we consider two alternatives using semantically structured codes: (i) GER-HKC where we embed the entity names with a pretrained text encoder before applying hierarchical k-means clustering on the resulting embeddings [42] and (ii) GER-CAPTION where we create a code by tokenizing the entity name with $\\Phi(\\cdot)$ [6, 12]. Details on the baselines are in Appendix Sec. 6.4.", + "bbox": [ + 75, + 287, + 468, + 470 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 482, + 209, + 500 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we detail our experimental setup, compare our method with state of the art and baselines, and finally present thorough analyses on code creation and pretraining.", + "bbox": [ + 75, + 507, + 468, + 554 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental setting", + "text_level": 1, + "bbox": [ + 76, + 560, + 274, + 575 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "OVEN dataset consists of 6,063,945 different entities [12]. We evaluate the models on the validation and test splits, by reporting the harmonic mean (HM) of top-1 accuracy scores between \"seen\" and \"unseen\" entities. Seen are entities present in the OVEN training set. Unseen entities are a subset of entities among the ones not present in the training set. The models are evaluated on a total of 3192 entities (1721 for seen and 1471 for unseen) for validation and 15888 entities (8355 for seen and 7533 for unseen) for test. We call the entities that the model is evaluated on by \"positive\" entities (i.e. the union of the 3192 validation and 15888 test entities) and all other entities by \"negative\" entities.", + "bbox": [ + 75, + 583, + 467, + 763 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Pretraining and finetuning. Unless specified otherwise, we pretrain our models on the entity-WebLI dataset, which we create considering all 6M entity names as described in Sec. 3.3. After this entity-based pretraining, the models are finetuned on OVEN training set which consists only of the \"seen\" entities. All implementation details are in Sec. 6 in Appendix and code is released in the ScENIC library [7].", + "bbox": [ + 75, + 765, + 467, + 869 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Preventing data leakage. We remove pretraining images from Entity-WebLI and Entity-LAION with a cosine simi", + "bbox": [ + 75, + 869, + 467, + 900 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/57c187a289560461613a0d4800de171f75af30d841a0cab3e7cf32cce8f33c02.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Method#par.(B)PretrainingOVEN test
dataset#imgsHMseen unseen
Dual encoder approaches
CLIPViT-L140.42OpenAI400M5.25.6 4.9
CLIPfusionViT-L140.88OpenAI400M8.433.6 4.8
CLIP2CLIPViT-L140.86OpenAI400M11.512.6 10.5
Captioning approaches
GiT-Large0.40WebLI100M7.017.6 4.3
PaLI-3B3WebLI1B9.119.1 6.0
PaLI-17B17WebLI1B16.028.3 11.2
", + "bbox": [ + 498, + 88, + 893, + 267 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/37ec99f5509283dc5ae6866bdfe4afac57f2ec6f5462ffb0f673ac82730735b5.jpg", + "table_caption": [ + "Generative entity recognition" + ], + "table_footnote": [], + "table_body": "
GER-ALD‡(Ours)0.40Entity-LAION 41M20.929.116.3
GER-ALD (Ours)0.40Entity-WebLI 55M22.731.517.7
", + "bbox": [ + 500, + 282, + 890, + 318 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1. Comparison with state-of-the-art approaches on OVEN entity test split. We report the harmonic mean (HM) of the seen and unseen splits (top-1 accuracy) after finetuning on OVEN training set. Numbers are taken from [12] except methods based on GiT-Large which are run by us. We indicate the total number of parameters of each model (\"# par.\") in billion and the pretraining dataset details. $\\ddagger$ : use only publicly available data.", + "bbox": [ + 496, + 323, + 893, + 421 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "larity (with CLIP-L/14 visual features) above 0.95 with any of the OVEN test or val images. We chose a 0.95 conservative threshold by looking at some examples: similarity 0.95 corresponds to conceptually similar images but clearly not duplicates (see Fig. 8 in Appendix).", + "bbox": [ + 496, + 426, + 893, + 502 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Comparison with the state of the art", + "text_level": 1, + "bbox": [ + 498, + 512, + 815, + 527 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Tab. 1, we compare the performance of GER-ALD, our best GER variant, on the OVEN entity benchmark with previously published numbers after finetuning on the OVEN training set. We see that our method outperforms previously proposed approaches by significant margins. Notably, GER-ALD improves over the captioning model PALI-17B by $+6.8$ top-1 HM test accuracy (a relative improvement of $43\\%$ ) while using $42\\times$ less parameters.", + "bbox": [ + 496, + 535, + 890, + 656 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Comparison with baselines", + "text_level": 1, + "bbox": [ + 498, + 665, + 743, + 681 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Tab. 2, we compare GER-ALD with the different baselines described in Sec. 3.4. All baselines use exactly the same pretraining dataset entity-based WebLI (55M) and model architectures of comparable sizes.", + "bbox": [ + 496, + 688, + 890, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Comparing GER to different paradigms. We see in Tab. 2 that GER outperforms strong captioning, dual-encoder, visual matching and hierarchical classification baselines, affirming its advantage in tackling web-scale visual entity recognition. Our superior performance compared to dual encoders aligns with previous works observing that CLIP struggles for fine-grained recognition [12, 15]. Due to query image and entity name similarities being captured only through a vector dot product, potentially fine-grained interactions are missed. Also, GER offers significant advan", + "bbox": [ + 496, + 750, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "17317", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/5f1a4045442862784949dcc07bec9612cd9ecb6ebc0385496e946c2b61e42b60.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodEntity-based pretraining+ finetuning on seen
HMseenunseenHMseenunseen
Dual encoders9.28.99.416.324.312.3
Visual matching16.215.517.116.415.717.2
Captioning13.213.113.316.825.912.5
Hierarchical. classif.14.714.814.621.829.617.2
Generative entity recognition
GER-ATOMIC15.915.316.720.126.216.3
GER-CAPTION14.316.512.620.726.816.9
GER-HKC15.815.516.021.025.217.9
GER-ALD17.718.317.222.731.517.7
", + "bbox": [ + 76, + 88, + 472, + 284 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Baseline comparisons. All baselines use exactly the same pretraining dataset Entity-WebLI (55M) and architectures of comparable number of parameters ( $\\sim$ 400M). All numbers are obtained with finetuning on seen split after entity-based pretraining. We report the Harmonic Mean of top-1 accuracy on OVEN test.", + "bbox": [ + 76, + 290, + 470, + 359 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "tages over dual encoders: its computational complexity is not a function of entity set size and it does not require to store entity dense embeddings.", + "bbox": [ + 75, + 372, + 468, + 417 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Different GER variants. In Tab. 2, we compare different variants of GER: one variant using unstructured codes (GER-ATOMIC) and three variants using semantically-structured codes: GER-CAPTION, GER-HKC and GER-ALD. We observe that GER-ALD is the best performing variant, both after entity-based pretraining and after finetuning on the OVEN seen entities. Compared to GER-CAPTION, GER-ALD use codes that are more discriminative and compact, which improves the performance particularly for entities with long names (see Sec. 4.4.2). Compared to GER-ATOMIC, GER-ALD codes yield a semantic structure which is crucial for million-scale label-space as shown in Sec. 4.4.1. GER-HKC model also gets strong performance but relies on an off-the-shelf semantic text encoder which makes the approach more complex and costly compared to GER-ALD. GER-HKC is a first step towards learning codes and we hope future works will propose original and better code creation strategies [41].", + "bbox": [ + 75, + 417, + 470, + 675 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Analysis and ablation study", + "text_level": 1, + "bbox": [ + 76, + 683, + 326, + 699 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, unless specified otherwise, we report the accuracy on the OVEN validation set [12] evaluated after pretraining on Entity-WebLI (27M), i.e. no OVEN finetuning.", + "bbox": [ + 75, + 705, + 468, + 753 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4.1 Semantic versus atomic codes", + "text_level": 1, + "bbox": [ + 76, + 760, + 336, + 773 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Fig. 3 (and Appendix Tab. 6), we report the relative improvement of semantically-structured codes (GER-ALD) compared to unstructured codes (GER-ATOMIC). We vary pretraining data size, model capacity and label-space size. A relative improvement of $100\\%$ means that the performance of GER-ALD doubles compared to GER-ATOMIC.", + "bbox": [ + 75, + 779, + 468, + 869 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Limited pretraining data. In Fig. 3 (left), we see that semantic codes outperform atomic codes when the amount of", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/fc485a04e1fa39ed6140465682b580b11f57b5d11e34249509069af703d73f1f.jpg", + "image_caption": [ + "Figure 3. Semantic vs atomic codes. We report the relative improvement in $\\%$ of GER-ALD compared to GER-ATOMIC in 3 scenarios: (i) limited pretraining data, (ii) limited model capacity and (iii) massive-scale label-space. Plots share a common experiment shown by which uses a pretraining dataset size of $27M$ , Large model and 6M entity set. The setting reported in Tab. 2 is $\\star$ ." + ], + "image_footnote": [], + "bbox": [ + 506, + 90, + 647, + 215 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/49ddd9ead505f53ad5aa89fa2bf94a3d9e74c2d35943e33683061e45dc5eb76c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 90, + 771, + 215 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a321e732990e2f78376a325c300a9db02fec44f11189a5cbedc7b97934bc8a73.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 774, + 90, + 880, + 215 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "data available for pretraining diminishes. In fact, the results reported in Tab. 2 corresponds to the most favorable scenario for GER-ATOMIC with 55M pretraining datapoints (represented by $\\star$ in Fig. 3). The relative improvement in this case is still of $14\\%$ while it grows to more than $1000\\%$ when the amount of data is reduced by $5\\times$ .", + "bbox": [ + 496, + 310, + 890, + 400 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Limited model capacity. In Fig. 3 (middle), we see that the model struggles to learn unstructured codes when its capacity is reduced. When considering the small version of our model (114M parameters), the performance with atomic codes is very poor: 0.7 top-1 accuracy.", + "bbox": [ + 496, + 401, + 890, + 477 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Web-scale label-space. In Fig. 3 (right), we vary the number of entities for pretraining. The \"positive\" entities (see Sec. 4.1) are always included in the pretraining set and the amount of \"negative\" entities is increased, effectively acting as distractors. First, we see in Fig. 3 (right) that for relatively small-scale label-space $(\\leq 100k)$ , the benefit of having semantic codes versus atomic is small. In this regime we find that the model can memorize all the entities without the need for semantic structure between them. This aligns with the findings of DSI [42]. We evaluate GER further in small label-spaces in Sec. 4.5. However, we see that in million-scale label-space regime, semantic structure becomes important and significantly improves the performance compared to atomic codes: $+26\\%$ relative improvement.", + "bbox": [ + 496, + 477, + 890, + 688 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Overall, we find that GER-ATOMIC fail to learn unstructured codes when the amount of pretraining data or architecture capacity are reduced, or when the label-space increases to million-scale. Unlike GER-ATOMIC, GER-ALD succeed in these scenarios thanks to the semantic structure easing the learning. Next, we analyze how GER-ALD improves over another type of semantic codes: GER-CAPTION codes.", + "bbox": [ + 496, + 688, + 890, + 792 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4.2 ALD versus captioning codes", + "text_level": 1, + "bbox": [ + 500, + 805, + 751, + 820 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We analyze why unambiguous, language-based and discriminative codes (GER-ALD) are more effective for entity recognition than directly decoding the entity name (GER-CAPTION). In Fig. 5 (left), we report the performance of GER-ALD and GER-CAPTION when varying the length $L$ of", + "bbox": [ + 496, + 825, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "17318", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5e982a64297df540f5e1649720c37e332c6e861af130b3ec3da376952bfc2789.jpg", + "image_caption": [ + "Figure 4. Accuracy per entity name length for GER-ALD versus GER-CAPTION codes. (left): Accuracy averaged per entity name length. (right): Qualitative examples of predictions for long entity names. Code tokens are symbolized between brackets." + ], + "image_footnote": [], + "bbox": [ + 81, + 90, + 256, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/eedacf2d00bdff76ba9f66b7ef25772fc19c9946e84f65608ef91e873ad609a0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 259, + 90, + 467, + 190 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c8771de9c408fdd4a43b14eadb5f4ac86d327ff9e4ecc5d562866f15ec39ff23.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 468, + 90, + 658, + 190 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f3a380580b731f41ddcf38c9dd6375c81f4c893e3b55d9dcd30452b178e646d2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 665, + 90, + 890, + 190 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7a8b000100afffaa3a3ead775c80cbe07506e2dad8956364ae73be324f6c333d.jpg", + "image_caption": [ + "Figure 5. ALD versus captioning codes. (left): Effect of different code lengths for GER-ALD and GER-CAPTION codes. (right): Cumulative distribution function (CDF) of (in green) the position of the least frequent token in the tokenized entity name and of (in pink) the length of tokenized entity name." + ], + "image_footnote": [], + "bbox": [ + 81, + 233, + 272, + 366 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/60dcc266746f7b206eec86d4e9d14c65ebf83741a0337a71bc15e370e27cbc33.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 272, + 233, + 462, + 364 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "the codes. Fixing a code length $L$ to a caption corresponds to keeping only the first $L^{\\text{th}}$ tokens of the entity name. In Fig. 5 (right), we report the cumulative distribution functions (CDF) of (i) the position within the tokenized entity name of the least frequent token among the entire corpus (as described in Sec. 3.2) and (ii) the total number of tokens in the tokenized entity name ( $L_{e}$ in the notations of Sec. 3.2)).", + "bbox": [ + 75, + 445, + 468, + 551 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Discriminative tokens versus number of tokens. We observe in Fig. 5 (left) that the performance of GER-CAPTION increases drastically from $L = 2$ to $L = 4$ . At the same time, we see in Fig. 5 (right) that for $L = 4$ , less than half of the entity names are considered in full while more than $80\\%$ of the GER-CAPTION codes contain the least frequent token of the entire tokenized name. This hints that what is important for language-based codes is not to describe the full entity name but to include its most discriminative part. We also observe that the performance of captioning increases only moderately from $L = 4$ to $L = 8$ even though the number of entities considered in full increases drastically from $46.6\\%$ to $100\\%$ . This confirms our intuition that decoding all the entity name tokens does not have a major impact on the performance as long as the most discriminative tokens are decoded. Overall, these observations motivate the ALD design of keeping only the most discriminative tokens, which is shown in Fig. 5 to lead to improved performance compared to decoding the full tokenized entity name.", + "bbox": [ + 75, + 551, + 470, + 838 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effect of code length for GER-ALD. We see in Fig. 5 (left) that the performance of GER-ALD is the best for $L = 4$ . With smaller code lengths, we need to resort to random tokens a lot to achieve unique codes (see Sec. 3.2), which deters the", + "bbox": [ + 75, + 839, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/72bf4dc9d70f5830ea073fffbc11e42754c6eb2225f9706b62e98609ed90fd40.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Selection strategyHM
Least frequent tokens14.4
Most frequent tokens12.3
First tokens12.0
Random tokens11.3
", + "bbox": [ + 500, + 229, + 681, + 316 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/1b86fe8938b6ef96d3317e4f30f599f745382962b0c45607376ff8242227dda6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Tokens orderHM
Least frequent first14.4
Syntax order14.4
Random order13.0
Least frequent last12.7
", + "bbox": [ + 712, + 229, + 879, + 316 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. Ablation study of GER-ALD codes. (left) Word tokens selection. (right) Tokens order. All variants use $L = 4$ . Default is in top rows. Non language-based GER-ATOMIC gets 11.4 top-1.", + "bbox": [ + 498, + 320, + 890, + 362 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "performance. For example at $L = 2$ , more than $10\\%$ of the entities use a random code token while this percentage decreases to $0.5\\%$ at $L = 4$ . We also see that the performance of GER-ALD decreases for code length above $L = 4$ , which hints that only the few most discriminative tokens are important while additional ones clutter the entity code. Interestingly we also observe in Fig. 5 (left) that when considering all the tokens, GER-ALD performance is slightly below that of GER-CAPTION. This might seem surprising since the same amount of information is present in both cases. However we find that when considering all the tokens, it is more difficult for the model to decode tokens ordered by frequencies than tokens ordered syntactically.", + "bbox": [ + 496, + 375, + 890, + 571 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Entities with long entity names. In Fig. 4 (left), we report the accuracy per entity name length for both GER-ALD and GER-CAPTION finetuned models. We see that the longer the entity name, the more GER-ALD improves over captioning. Longer entities tend to have more noise with key information further into the code. We also show in Fig. 4 qualitative examples of entities with long entity names (more in Fig. 12 in Appendix). In the left example, we see that GER-ALD use the token combination [col][ob] to represent the semantic concept of colobus monkey species. The last token is used to efficiently differentiate between sub-species of colobus. This compact and discriminative way of encoding the entity allows GER-ALD to successfully predict this entity whereas GER-CAPTION fails to generate the entity tokenized name.", + "bbox": [ + 496, + 571, + 890, + 784 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4.3 Creating codes with ALD", + "text_level": 1, + "bbox": [ + 500, + 792, + 725, + 806 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Least frequent tokens. In Tab. 3 (left), we validate our choice of selecting the least frequent tokens by evaluating 3 alternatives: random choice, most frequent tokens and first-appearing tokens in tokenized entity name. We see that these alternative strategies hurt the performance significantly. Qualitative examples in Appendix Fig. 11 show that", + "bbox": [ + 496, + 809, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "17319", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/28cbe87488f35f91349ac790f7a382f1275ed50b7595d54ab89b2ad94f98bba8.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetCodesHM
WebLIWebLI caption1.8
Entity-WebLI (55M)WebLI caption12.9 (+11.1)
Entity-WebLI (55M)Entity name14.8 (+1.9)
Entity-WebLI (55M)ALD17.5 (+2.7)
", + "bbox": [ + 76, + 88, + 460, + 172 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/28887796d8c7d3ad0466ad5434277fcb99be97764d83f27a20f6c6bd45bad6ea.jpg", + "image_caption": [ + "Figure 6. Entity-based pretraining ablation. (left): Validation OVEN accuracy. (right): Examples of original WebLI captions versus corresponding OVEN entity names.", + "Figure 7. Pretraining. We vary the size of the pretraining dataset by changing the amount of retrieved examples from WebLI for each OVEN entity (see Sec. 3.3)." + ], + "image_footnote": [], + "bbox": [ + 81, + 223, + 290, + 342 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "the kept tokens are less semantic and discriminative compared to GER-ALD strategy of keeping the least frequent tokens. Note that all these variants are at least as good as GER-ATOMIC (11.4 top-1) which is not based on language at all.", + "bbox": [ + 75, + 351, + 467, + 411 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Decoding order. In Tab. 3 (right), we vary the order of the first $L - 1$ tokens in GER-ALD codes. Instead of decoding tokens from least to most frequent, we evaluate most to least frequent, syntax order and random order. Note that the selected tokens are the same in all variants, only their order changes. We see that both \"least frequent first\" and \"syntax\" orders achieve the best of performance.", + "bbox": [ + 75, + 411, + 467, + 516 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.4 Entity-based pretraining", + "text_level": 1, + "bbox": [ + 76, + 529, + 303, + 544 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Entity-based pretraining. In Fig. 6, we analyze why our entity-based pretraining improves over the standard captioning pretraining of PaLI or GiT models. First, we see that our method of selecting WebLI data relevant to OVEN entities drastically improves the performance (+11.1 in Fig. 6 (left)). This is because, by design, we select image-text pairs from WebLI that have captions similar to OVEN entity names. Hence, this data is directly relevant for the OVEN entity recognition benchmark. Second, we see that replacing the original WebLI caption with its corresponding entity name from OVEN leads to superior performance (+1.9). We see in the qualitative examples of Fig. 6 (right) that original captions contain a lot of descriptive information not directly relevant to the entity. Lastly, we confirm that using GER-ALD codes is better (+2.7) than tokenized entity name.", + "bbox": [ + 75, + 553, + 467, + 779 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Dataset size. In Fig. 7, we evaluate the effect of the pretraining dataset size for GER models. We control the dataset size by varying the amount of retrieved examples from WebLI for each of the OVEN entities (see Sec. 3.3). We see in Fig. 7 that GER-ALD, GER-CAPTION and GER-ATOMIC benefit greatly from more data and do not seem to have reached saturation yet. As analyzed in Sec. 4.4.1, GER-ATOMIC fails when the amount of pretraining data decreases.", + "bbox": [ + 75, + 780, + 467, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/e0db313c9f8812c0e7d81e835994124684b7edecb88603562464280e32a22e5c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodImageNet-LTWebVision
Classif. MLP74.380.9
GER-ATOMIC L = 280.884.7
GER-ALD L = 280.984.8
GER-ATOMIC L = 1 (~ Classif. MAP)81.084.8
Previously published numbers
NCR [13]-76.8
CurrNet [10]-79.3
PEL [40]78.3-
MAM [14]†82.383.6
", + "bbox": [ + 498, + 88, + 888, + 250 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4. Evaluation of classification models and GER on small-scale label-spaces. $\\dagger$ indicates the use of additional data.", + "bbox": [ + 498, + 252, + 890, + 280 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Link with classification", + "text_level": 1, + "bbox": [ + 500, + 287, + 714, + 301 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "A typical way of tackling visual entity recognition is by training a classifier into the number of entities [35]. This is not a viable solution for web-scale problems such as OVEN where a single fully-connected layer for a 6M classes has an enormous parameter count of 4.6B. In this section, we evaluate GER in cases where learning a classification model is a feasible choice (smaller number of classes). Classification can be cast in our GER framework simply by setting $L = 1$ and $V = |\\mathcal{E}| =$ number of classes (see Sec. 3.1), making it a special case of atomic codes with $L = 1$ . Since the decoder decodes a single token, it is equivalent to a multi-layer Multihead Attention Pooling (MAP) head [21, 48]. In Tab. 4, we consider two challenging classification datasets: long-tailed ImageNet-LT [24] and noisy Webvision [22]. We evaluate GER-{ALD, ATOMIC} and a classification baseline using multi-layer perceptron (MLP) on averaged-pooled patch tokens. Implementation details are in Sec 6.3 in Appendix.", + "bbox": [ + 496, + 311, + 890, + 566 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We see in Tab. 4 that using GER-ATOMIC instead of standard MLP improves significantly the performance of the classification model (74.3 versus 81.0 for ImageNet-LT). We also observe that GER-ATOMIC and GER-ALD have comparable performance in this relatively small label-space regime (1k classes). As a matter of fact, this achieves state-of-the-art accuracy for both datasets (when no additional external data is used). This shows that GER framework not only excels for large-scale scenarios, but also works well in datasets with smaller number of visual entities, making GER a general framework for visual entity recognition.", + "bbox": [ + 496, + 568, + 890, + 734 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 750, + 617, + 763 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we propose a novel generative framework for web-scale visual entity recognition. We represent each entity by a compact, discriminative and semantic code that a generative auto-regressive model learns to decode. In future work, we will explore ways of creating better entity codes by leveraging additional information: either from the Wikipedia page such as the description of the entity and its attached image or also by using external tools.", + "bbox": [ + 496, + 773, + 890, + 886 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "17320", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Rahul Agrawal, Archit Gupta, Yashoteja Prabhu, and Manik Varma. Multi-label learning with millions of labels: Recommending advertiser bid phrases for web pages. In Proceedings of the 22nd international conference on World Wide Web, pages 13-24, 2013. 2", + "[2] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in Neural Information Processing Systems, 35:23716-23736, 2022. 1", + "[3] Samy Bengio, Krzysztof Dembczynski, Thorsten Joachims, Marius Kloft, and Manik Varma. Extreme Classification (Dagstuhl Seminar 18291). Dagstuhl Reports, 2019. 2", + "[4] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In ECCV, 2014. 2", + "[5] Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, et al. Pali: A jointly-scaled multilingual language-image model. *ICLR*, 2023. 1, 2, 4, 5, 11, 12", + "[6] Nicola De Cao, Gautier Izacard, Sebastian Riedel, and Fabio Petroni. Autoregressive entity retrieval. arXiv preprint arXiv:2010.00904, 2020. 1, 2, 3, 5", + "[7] Mostafa Dehghani, Alexey Gritsenko, Anurag Arnab, Matthias Minderer, and Yi Tay. Scenic: A JAX library for computer vision research and beyond. arXiv preprint arXiv:2110.11403, 2021. 5", + "[8] Mark Everingham, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes (voc) challenge. *IJCV*, 88, 2010. 2", + "[9] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In CVPR, 2004. 2", + "[10] Sheng Guo, Weilin Huang, Haozhi Zhang, Chenfan Zhuang, Dengke Dong, Matthew R Scott, and Dinglong Huang. CurriculumNet: Weakly supervised learning from large-scale web images. In ECCV, pages 135-150, 2018. 8", + "[11] Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics. JMLR Workshop and Conference Proceedings, 2010. 1, 5", + "[12] Hexiang Hu, Yi Luan, Yang Chen, Urvashi Khandelwal, Mandar Joshi, Kenton Lee, Kristina Toutanova, and Ming-Wei Chang. Open-domain visual entity recognition: Towards recognizing millions of wikipedia entities. ICCV, 2023. 1, 2, 3, 5, 6", + "[13] Ahmet Iscen, Jack Valmadre, Anurag Arnab, and Cordelia Schmid. Learning with neighbor consistency for noisy labels. In CVPR, 2022. 8", + "[14] Ahmet Iscen, Alireza Fathi, and Cordelia Schmid. Improving image recognition by retrieving from web-scale image-text data. CVPR, 2023. 8" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Ahmet Iscen, Mathilde Caron, Alireza Fathi, and Cordelia Schmid. Retrieval-enhanced contrastive vision-text models. *ICLR*, 2024. 1, 2, 4, 5", + "[16] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 5", + "[17] Aditya Khosla, Nityananda Jayadevaprakash, Bangpeng Yao, and Li Fei-Fei. Novel dataset for fine-grained image categorization. In First Workshop on Fine-Grained Visual Categorization, CVPR, 2011. 2", + "[18] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV, 2013. 2", + "[19] Taku Kudo. Subword regularization: Improving neural network translation models with multiple subword candidates. arXiv preprint arXiv:1804.10959, 2018.3", + "[20] Taku Kudo and John Richardson. Sentencepiece: A simple and language independent subword tokenizer and detokenizer for neural text processing. arXiv preprint arXiv:1808.06226, 2018. 2, 3, 14", + "[21] Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In International conference on machine learning, 2019. 8", + "[22] Wen Li, Limin Wang, Wei Li, Eirikur Agustsson, and Luc Van Gool. Webvision database: Visual learning and understanding from web data. arXiv preprint arXiv:1708.02862, 2017. 2, 8, 11", + "[23] Haotian Liu, Kilho Son, Jianwei Yang, Ce Liu, Jianfeng Gao, Yong Jae Lee, and Chunyuan Li. Learning customized visual models with retrieval-augmented knowledge. In CVPR, 2023. 2, 4", + "[24] Ziwei Liu, Zhongqi Miao, Xiaohang Zhan, Jiayun Wang, Boqing Gong, and Stella X. Yu. Large-scale long-tailed recognition in an open world. In CVPR, 2019. 2, 8, 11", + "[25] Sanket Vaibhav Mehta, Jai Gupta, Yi Tay, Mostafa Dehghani, Vinh Q Tran, Jinfeng Rao, Marc Najork, Emma Strubell, and Donald Metzler. Dsi++: Updating transformer memory with new documents. arXiv preprint arXiv:2212.09744, 2022. 1, 2", + "[26] Anshul Mittal, Kunal Dahiya, Shreya Malani, Janani Ramaswamy, Seba Kuruvilla, Jitendra Ajmera, Keng-hao Chang, Sumeet Agarwal, Purushottam Kar, and Manik Varma. Multi-modal extreme classification. In CVPR, 2022. 2", + "[27] Rafael Müller, Simon Kornblith, and Geoffrey E Hinton. When does label smoothing help? Advances in neural information processing systems, 32, 2019. 4", + "[28] Jianmo Ni, Gustavo Hernández Ábrego, Noah Constant, Ji Ma, Keith B Hall, Daniel Cer, and Yinfei Yang. Sentence-t5: Scalable sentence encoders from pre-trained text-to-text models. arXiv preprint arXiv:2108.08877, 2021. 4, 12", + "[29] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. 1, 5" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "17321", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[30] OpenAI. GPT-4 technical report. arXiv preprint arXiv:2303.08774, 2023.1", + "[31] Ronak Pradeep, Kai Hui, Jai Gupta, Adam D Lelkes, Honglei Zhuang, Jimmy Lin, Donald Metzler, and Vinh Q Tran. How does generative retrieval scale to millions of passages? arXiv preprint arXiv:2305.11841, 2023. 1, 2", + "[32] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 4, 5", + "[33] Shashank Rajput, Nikhil Mehta, Anima Singh, Raghunanandan H Keshavan, Trung Vu, Lukasz Heldt, Lichan Hong, Yi Tay, Vinh Q Tran, Jonah Samost, et al. Recommender systems with generative retrieval. arXiv preprint arXiv:2305.05065, 2023. 1, 2", + "[34] Stephen Robertson, Hugo Zaragoza, et al. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 2009. 2", + "[35] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 2015. 2, 8", + "[36] Florian Schroff, Dmitry Kalenichenko, and James Philbin. Facenet: A unified embedding for face recognition and clustering. In CVPR, 2015. 5", + "[37] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114, 2021. 4", + "[38] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. arXiv preprint arXiv:2210.08402, 2022. 4", + "[39] Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909, 2015. 3", + "[40] Jiang-Xin Shi, Tong Wei, Zhi Zhou, Xin-Yan Han, Jie-Jing Shao, and Yu-Feng Li. Parameter-efficient long-tailed recognition. arXiv preprint arXiv:2309.10019, 2023. 8", + "[41] Weiwei Sun, Lingyong Yan, Zheng Chen, Shuaiqiang Wang, Haichao Zhu, Pengjie Ren, Zhumin Chen, Dawei Yin, Maarten Rijke, and Zhaochun Ren. Learning to tokenize for generative retrieval. NeurIPS, 2023. 1, 2, 6", + "[42] Yi Tay, Vinh Tran, Mostafa Dehghani, Jianmo Ni, Dara Bahri, Harsh Mehta, Zhen Qin, Kai Hui, Zhe Zhao, Jai Gupta, et al. Transformer memory as a differentiable search index. Advances in Neural Information Processing Systems, 2022. 1, 2, 5, 6", + "[43] Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In CVPR, 2018. 2" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[44] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. 2", + "[45] Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, and Lijuan Wang. Git: A generative image-to-text transformer for vision and language. arXiv preprint arXiv:2205.14100, 2022. 3, 4, 5, 11, 12", + "[46] Yujing Wang, Yingyan Hou, Haonan Wang, Ziming Miao, Shibin Wu, Qi Chen, Yuqing Xia, Chengmin Chi, Guoshuai Zhao, Zheng Liu, et al. A neural corpus indexer for document retrieval. NeurIPS, 2022. 2, 4", + "[47] Tobias Weyand, Andre Araujo, Bingyi Cao, and Jack Sim. Google landmarks dataset v2-a large-scale benchmark for instance-level recognition and retrieval. In CVPR, 2020. 2", + "[48] Xiaohua Zhai, Alexander Kolesnikov, Neil Houlsby, and Lucas Beyer. Scaling vision transformers. In CVPR, 2022. 8", + "[49] Yidan Zhang, Ting Zhang, Dong Chen, Yujing Wang, Qi Chen, Xing Xie, Hao Sun, Weiwei Deng, Qi Zhang, Fan Yang, et al. Irgen: Generative modeling for image retrieval. arXiv preprint arXiv:2303.10126, 2023. 2", + "[50] Zheng Zhu, Guan Huang, Jiankang Deng, Yun Ye, Junjie Huang, Xinze Chen, Jiagang Zhu, Tian Yang, Dalong Du, Jiwen Lu, et al. Webface260m: A benchmark for million-scale deep face recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2" + ], + "bbox": [ + 501, + 92, + 890, + 459 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "17322", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/25668f69-d21b-4819-83f7-a45db4e2f055_model.json b/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/25668f69-d21b-4819-83f7-a45db4e2f055_model.json new file mode 100644 index 0000000000000000000000000000000000000000..29a01e18904d2d33732b9b310db4ee6f2031530e --- /dev/null +++ b/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/25668f69-d21b-4819-83f7-a45db4e2f055_model.json @@ -0,0 +1,2167 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.129, + 0.131, + 0.842, + 0.154 + ], + "angle": 0, + "content": "A Generative Approach for Wikipedia-Scale Visual Entity Recognition" + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.181, + 0.34, + 0.198 + ], + "angle": 0, + "content": "Mathilde Caron" + }, + { + "type": "text", + "bbox": [ + 0.364, + 0.182, + 0.467, + 0.197 + ], + "angle": 0, + "content": "Ahmet Iscen" + }, + { + "type": "text", + "bbox": [ + 0.493, + 0.182, + 0.598, + 0.197 + ], + "angle": 0, + "content": "Alireza Fathi" + }, + { + "type": "text", + "bbox": [ + 0.624, + 0.182, + 0.759, + 0.197 + ], + "angle": 0, + "content": "Cordelia Schmid" + }, + { + "type": "text", + "bbox": [ + 0.415, + 0.2, + 0.551, + 0.217 + ], + "angle": 0, + "content": "Google Research" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.252, + 0.312, + 0.267 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.285, + 0.474, + 0.557 + ], + "angle": 0, + "content": "In this paper, we address web-scale visual entity recognition, specifically the task of mapping a given query image to one of the 6 million existing entities in Wikipedia. One way of approaching a problem of such scale is using dual-encoder models (e.g. CLIP), where all the entity names and query images are embedded into a unified space, paving the way for an approximate kNN search. Alternatively, it is also possible to re-purpose a captioning model to directly generate the entity names for a given image. In contrast, we introduce a novel Generative Entity Recognition (GER) framework, which given an input image learns to auto-regressively decode a semantic and discriminative \"code\" identifying the target entity. Our experiments demonstrate the efficacy of this GER paradigm, showcasing state-of-the-art performance on the challenging OVEN benchmark. GER surpasses strong captioning, dual-encoder, visual matching and hierarchical classification baselines, affirming its advantage in tackling the complexities of web-scale recognition." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.589, + 0.208, + 0.605 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.616, + 0.47, + 0.875 + ], + "angle": 0, + "content": "Generative vision-language models such as GPT-4 [30], Flamingo [2] or PALI [5], are becoming increasingly popular for computer vision applications. They show an impressive ability to generate free-form text for describing the contents of an image (captioning), or answering questions based on an image (visual-question answering). Nevertheless, their potential for recognition tasks [12], which usually require a more concise, structured output, remains underexplored. The focus of this paper is to explore their application for the challenging task of web-scale entity recognition. A recent benchmark, Open-domain Visual Entity recognitionN (OVEN) [12], challenges models to associate an image with a Wikipedia entity from a pool of over six million entities. Models must establish a robust association between images across millions of coarse-grained and fine-grained entities, encompassing a wide spectrum of concepts such as animals, buildings, locations, and a multitude of others [12]." + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.253, + 0.755, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.764, + 0.254, + 0.861, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.404, + 0.895, + 0.461 + ], + "angle": 0, + "content": "Figure 1. We introduce GER, a novel generative paradigm for web-scale visual entity recognition. We create compact semantic codes for each entity, and learn to auto-regressively generate them for a given query image at inference." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.476, + 0.893, + 0.779 + ], + "angle": 0, + "content": "Traditionally, the predominant methods employed to address the challenge of visual entity recognition have revolved around either classification or contrastive dual-encoder paradigm like CLIP [32]. While classification offers a straightforward approach, it grapples with limitations when confronted with extensive label spaces such as that of OVEN, resulting in substantial parameter counts and practical engineering complexities. The dual-encoder approach on the other hand, learns a unified image-text feature space, thereby facilitating efficient nearest neighbor searches for recognition. Nonetheless, this approach exhibits its own drawbacks: (a) it does not directly optimize for the final recognition task but instead relies on indirect optimization through contrastive loss where a set of negative data has to be subsampled at training time [11, 29, 32], (b) compressing either the image or text into an embedding vector results in loss of information, detrimentally affecting performance for fine-grained recognition [15] and (c) the memory requirements for storing dense representations scale proportionally with the size of the entity set." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.903 + ], + "angle": 0, + "content": "These challenges of the dual-encoder paradigm have kindled interest in alternative strategies. Notably, in Natural Language Processing (NLP) domain, recent works challenge the dual-encoder approach and use generative models instead for information retrieval [6, 25, 31, 33, 41, 42]. These works represent each element of the corpus by a compact code of integers, and learn an auto-regressive generative model to decode the target code for a given query. This" + }, + { + "type": "page_footnote", + "bbox": [ + 0.078, + 0.888, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Code: github.com/google-research/scenic/tree/main/scenic/projects/gerald" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17313" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.213 + ], + "angle": 0, + "content": "paradigm promises to overcome some drawbacks of dual-encoders by simplifying the retrieval pipeline such that the training and inference objectives are the same, and directly encoding the corpus within the model's parameters. Also as an alternative to dual encoders, OVEN paper [12] showcases the feasibility of extending a generative image captioning model [5] for visual entity recognition by matching the generated caption to one of the Wikipedia entity texts [34]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.215, + 0.473, + 0.668 + ], + "angle": 0, + "content": "Inspired by these recent explorations, we propose a Generative Entity Recognition (GER) framework (illustrated in Fig. 1) to facilitate end-to-end visual entity recognition by leveraging generative auto-regressive models. Specifically, we represent each Wikipedia entity with a code, i.e. a short sequence of integers. Then, we train models to predict an entity from an input image by auto-regressively generating the code corresponding to the target entity. We find that creating unAmbiguous, Language-based and Discriminative (ALD) entity codes results in the best variant of our GER framework, which we denote by GER-ALD. In fact, while we observe that unstructured \"atomic\" codes work well in some scenarios, they fail when training data or model capacity are limited or more importantly, when the entity set reaches the million scale (see Sec. 4.4.1). Plus, they cannot generalize to new entities. In contrast, we find that semantically-structured codes based on language improve upon atomic codes by leveraging generic concepts shared across related entities (see example in Fig. 1 with \"Black colobus\" and \"Black-and-white colobus\" sharing common code tokens). A simple way of creating codes based on language is to directly tokenize [20] the entity name, which is akin to image captioning where the entity name is used as a caption [6, 12]. However, we find that such tokenized entity names contain clutter and noisy information, all the more so when the entity name is long (see Sec. 4.4.2). Our GER-ALD method improves over this simple captioning baseline by decoding only the most discriminative part of the tokenized entity name, i.e. the part which makes the considered entity name the most different compared to all other entities." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.671, + 0.473, + 0.852 + ], + "angle": 0, + "content": "Finally, we also propose an entity-based pre-training to condition the GER models to web-scale entity recognition. Inspired by recent advances in retrieval-based methods [15, 23], we retrieve a subset of images from a large-scale image-text dataset typically used for captioning or contrastive pre-training [5] and re-purpose it by replacing the original text captions with related OVEN entity names. Overall, our experiments demonstrate the efficacy of the proposed GER paradigm: GER-ALD outperforms previously published numbers on OVEN benchmark [12] by \\(+6.7\\) top-1 accuracy, while using \\(42\\times\\) less parameters. In summary, our contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.855, + 0.468, + 0.885 + ], + "angle": 0, + "content": "- a generative entity recognition framework (GER) to facilitate end-to-end visual entity recognition;" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.887, + 0.468, + 0.9 + ], + "angle": 0, + "content": "- an innovative strategy for encoding Wikipedia enti" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.855, + 0.468, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.092, + 0.892, + 0.121 + ], + "angle": 0, + "content": "ties into unambiguous language-based discriminative (ALD) codes that are highly effective for GER;" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.123, + 0.892, + 0.15 + ], + "angle": 0, + "content": "- an entity-based pre-training process without requiring human intervention;" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.152, + 0.892, + 0.196 + ], + "angle": 0, + "content": "- state-of-the-art results in challenging web-scale OVEN entity recognition and on-par performance to traditional classifiers in smaller-scale label-space scenarios." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.123, + 0.892, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.219, + 0.637, + 0.234 + ], + "angle": 0, + "content": "2. Related work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.247, + 0.892, + 0.534 + ], + "angle": 0, + "content": "Visual entity recognition aims to recognize classes, or entities given visual inputs [35]. Granularity of visual entity recognition tasks varies from every-day generic objects [8, 9], to fine-grained domains, such as birds [44], dogs [17], cars [18], food [4], landmarks [47], faces [50] and natural world species [43]. Some challenges for the visual entity recognition tasks include imbalanced training classes following a long-tailed distribution [24], or noisy training labels [22]. Recent work [12] proposes a new, web-scale dataset for open-domain entity recognition. This challenging benchmark contains 6M entity names derived from Wikipedia page titles, including coarse-grained and fine-grained entities, encompassing a wide spectrum of concepts such as animals, buildings, organizations, landmarks, and a multitude of other. The authors show that generative captioning models (i.e. PaLI [5]) outperform dual encoder models for large-scale entity recognition. In this paper, we build upon this observation, and study generative models for accurate and efficient entity recognition." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.536, + 0.892, + 0.657 + ], + "angle": 0, + "content": "Extreme classification tackles entity recognition specifically at a very large scale with a pure classification approach [1, 3, 26]. Typical approaches explore strategies for scaling to the hundred of thousands scale and preliminary results are even shown at million scale [1]. By leveraging generative image-to-text models, we propose a fresh perspective beyond traditional classification methods typically used in the context of large-scale visual entity recognition." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Generative auto-regressive retrieval methods are increasingly popular in NLP [6, 25, 31, 33, 41, 42]. GENRE retrieves Wikipedia entities by generating their names in an autoregressive fashion. Seminal work DSI [42] shows the benefit of learning to decode compact codes (created either randomly or with hierarchical k-means clustering) associated with each document. Neural Corpus Indexer [46] proposes a specific decoding scheme for generative retrieval and show the benefit of query augmentation by automatically generating training queries for documents to be indexed. TIGER [33] studies generative retrieval in the context of recommender systems. Finally, [31] conducts a systematic study of generative retrieval systems when scaled to millions of document passages. Only very few works explore this family of approaches in computer vision domain, and only in very small-scale and uni-modal scenarios [49]." + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "17314" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.086, + 0.092, + 0.44, + 0.104 + ], + "angle": 0, + "content": "(a) ALD: creating unambiguous language-based discriminative entity codes" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.106, + 0.451, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.291, + 0.471, + 0.362 + ], + "angle": 0, + "content": "Figure 2. Overview of GER-ALD method. (a) We utilize a text tokenizer to create compact and semantic codes, which represents each entity with short, but discriminative representations. (b) We learn a generative auto-regressive model, which learns to decode the correct code for given query image and text pair." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.372, + 0.168, + 0.387 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.397, + 0.47, + 0.504 + ], + "angle": 0, + "content": "Our goal is to explore how to adapt generative autoregressive models to the task of visual entity recognition (GER). While previous works have shown preliminary signal that it is possible to repurpose autoregressive models for entity recognition by directly decoding entity names [6, 12], we propose a more effective strategy. An overview of our framework is in Fig. 2." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.513, + 0.258, + 0.528 + ], + "angle": 0, + "content": "3.1. Problem definition" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.537, + 0.47, + 0.718 + ], + "angle": 0, + "content": "Web-scale visual entity recognition. The Open-domain Visual Entity recognitioN (OVEN) [12] task consists of mapping input visual queries to one of the 6M English Wikipedia entities. More specifically, for a given image query \\( x_{v} \\) and text query \\( x_{t} \\), the model needs to recognize the corresponding entity \\( e \\) among the set \\( \\mathcal{E} \\) of all possible entities. The purpose of the input text \\( x_{t} \\) is to achieve unambiguous recognition. For example, when several entities are represented in the query image \\( x_{v} \\), the text query indicates which one needs to be recognized. Each entity \\( e \\in \\mathcal{E} \\) comes with an entity name, denoted by \\( t_{e} \\), which corresponds to the title of the entity Wikipedia page." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.718, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Representing each entity with a code. In GER, we represent each entity \\( e \\) by a code denoted by \\( c^e = \\{c_1^e,\\dots,c_L^e\\} \\in [[1,V]]^L \\) where \\( L \\) is the length of the code and \\( V \\) is the size of the vocabulary of all integer values that each code token \\( c_i^e \\) can take. This forms up to \\( V^L \\) unique codes. Note that vanilla image classification and captioning baselines can both be cast into this code formulation. In fact, with \\( L = 1 \\) and \\( V = |\\mathcal{E}| \\), the codes are equivalent to the labels used in standard multi-class classification. On the other hand, if each code token value in \\( [[1,V]] \\) maps to a (sub-)word in a pre-defined vocabulary [20], then the codes simply correspond to standard tokenized text used in captioning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.138 + ], + "angle": 0, + "content": "models [19, 39, 45]. In the following paragraphs, we detail GER-ALD, our most effective strategy for building codes \\( C \\) to represent all 6M English Wikipedia entities." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.149, + 0.816, + 0.164 + ], + "angle": 0, + "content": "3.2. GER-ALD: Creating ALD codes for GER" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.172, + 0.892, + 0.278 + ], + "angle": 0, + "content": "We design the code set \\( C \\) so that it has three properties which we find are important for effective GER models: i) semantically structured thanks to language, ii) discriminative and compact, and iii) unambiguous. Our algorithm to create such unambiguous, language-based and discriminative codes, called ALD, is illustrated in Fig. 2 (a) and described in pseudo-code in Algorithm 1 of the Appendix." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.279, + 0.892, + 0.489 + ], + "angle": 0, + "content": "Semantic tokens based on language. We find that entity codes \\( C \\) benefit from following a semantic structure, especially in scenarios where memorizing unstructured atomic codes is difficult. We show in Sec. 4.4.1 that using unstructured atomic codes fail when the amount of training data or the model capacity are limited or, of particular interest, when the entity set size increases to the million scale (see Fig. 3). Intuitively, we want entities that are semantically similar to have some overlapping code tokens. For example, we wish that entities \\( e = \\mathrm{Q}521977 \\) with corresponding name \\( t_{\\mathrm{Q}521977} = \\) \"Black colobus\" and \\( e = \\mathrm{Q}358813 \\) with corresponding name \\( t_{\\mathrm{Q}358813} = \\) \"Black-and-white colobos\" to share some code tokens, given that these correspond to two close species." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.49, + 0.893, + 0.734 + ], + "angle": 0, + "content": "A simple yet effective way of having semantic codes is to tokenize the entity names based on text tokenizers [6, 19, 20, 39]. If each of the sub-words in the entity names are mapped to an integer representing this sub-word, then entities Q358813 and Q521977 naturally share code tokens: those representing the phrase \"colobus\". We denote by \\(\\Phi(.)\\) an off-the-shelf text tokenizer with a vocabulary of \\(V_{\\Phi}\\) sub-words such that \\(\\Phi(t_e) = \\{y_1^e, \\dots, y_{L_e}^e\\} \\in [[1, V_{\\Phi}]]^{L_e}\\) where \\(L_e\\) is the length of the tokenized entity name \\(\\Phi(t_e)\\). In practice we use the same language tokenizer as GIT [45] for \\(\\Phi(.)\\) and have a vocabulary size of \\(V = V_{\\Phi} = 30522\\). We refer to the baseline of using codes \\(C\\) created by simple tokenization of the entity name as GER-CAPTION (i.e. we treat the entity name as a caption) [6]. We show in the following paragraph how GER-ALD codes differ from such GER-CAPTION codes by making them more compact and discriminative." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Discriminative and compact codes. Our goal is to build short and highly discriminative codes because they are easier to learn for the model, as validated by our experiments in Sec. 4.4.2. For example, the tokenized entity name \\(\\Phi(t_{\\mathrm{Q358813}}) = \\Phi(\\text{\"Black-and-white colobus}\")\\) counts \\(L_{\\mathrm{Q358813}} = 8\\) tokens, but clearly not all 8 tokens are important to make this entity discriminative compared to all other existing entities. Hence, we choose to represent each entity with the bare minimum, removing all the clutter which is not only non-discriminative but also adds noise. We achieve this by selecting the most discriminative and rarest tokens" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17315" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.522 + ], + "angle": 0, + "content": "within the tokenized entity name. Specifically, we compute the frequency \\( f_{v} \\) of each token value \\( v \\in [1, V] \\) in the vocabulary over the entire corpus of tokenized entity names \\( \\{\\Phi(t_{e})\\}_{e \\in \\mathcal{E}} \\). We have \\( f_{v} = \\frac{n_{v}}{\\sum_{u=1}^{V} n_{u}} \\) where \\( n_{v} \\) is the number of times \\( v \\) appears in \\( \\{\\Phi(t_{e})\\}_{e \\in \\mathcal{E}} \\). We create an ALD code \\( c_{e} \\) for each entity by keeping only the \\( (L-1) \\) tokens with the lowest frequencies and discarding the other ones. For example for entity Q358813, the 3 tokens with the lowest frequencies are \"col\", \"ob\" and \"white\". Interestingly, these 3 most discriminative tokens appear at the end of the code for GER-CAPTION. By contrast, they appear right at the beginning of the code for GER-ALD and they constitute the only tokens to be decoded by the model, which intuitively explains the improved performance of GER-ALD codes, as analyzed later in Sec. 4.4.2 especially when entities have long names (see Fig. 4). Finally an interesting by-product of using short codes is that they are faster to decode (the complexity of decoding is \\( \\mathcal{O}(L^{2}) \\)) and require less memory footprint to store. Unambiguous codes. Note that several entities might share the same least frequent \\( (L-1)^{\\text{th}} \\) tokens. In this case their code are exactly identical up to the \\( (L-1)^{\\text{th}} \\) token. We use the last \\( L^{\\text{th}} \\) token to ensure that each entity has a unique code: we greedily assign the last code token \\( c_{L}^{e} \\) to the next least frequent word of the tokenized entity name until the code \\( c_{e} \\) is different from all existing codes. If this still fails to create a unique code, we assign \\( c_{L}^{e} \\) to a random token value \\( v' \\) so that the resulting code is unique. With code length \\( L = 4 \\), only 0.5% of the entities use a random token value." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.531, + 0.182, + 0.547 + ], + "angle": 0, + "content": "3.3. Training" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.554, + 0.47, + 0.616 + ], + "angle": 0, + "content": "In this section, we describe the model used to decode entity codes from an input image-text pair. Importantly, we also introduce our entity-based pre-training to condition the generative model to the task of entity recognition." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.616, + 0.473, + 0.814 + ], + "angle": 0, + "content": "Auto-regressive generative models. We build upon GIT [45], an auto-regressive image-to-text generative model. The query image-text pair \\((x_v, x_t)\\) is transformed into a set of \\(d\\)-dimensional embeddings using a visual encoder for \\(x_v\\) and the text tokenizer \\(\\Phi(.)\\) for \\(x_t\\). The resulting output is represented by \\(\\mathbf{X}_v \\in \\mathbb{R}^{N_v \\times d}\\) (resp. \\(\\mathbf{X}_t \\in \\mathbb{R}^{N_t \\times d}\\)) for image (resp. text) tokens. We then input \\(\\mathbf{X}_v\\) and \\(\\mathbf{X}_t\\) to a decoder network \\(g(.)\\) whose task is to decode the next code token \\(c_i^e\\), conditioned on the previous tokens \\(c_j^e\\). Each code token value \\(v\\) in \\(\\mathbb{I}[1, V]\\) maps to a learnable \\(d\\)-dimensional vector \\(\\mathbf{Y}_v\\) (gathered in the embedding matrix \\(\\mathbf{Y} \\in \\mathbb{R}^{(V+1) \\times d}\\) where \\(\\mathbf{Y}_0\\) corresponds to the \"beginning of code\" token). We train with a language modeling loss:" + }, + { + "type": "equation", + "bbox": [ + 0.136, + 0.815, + 0.414, + 0.856 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} ^ {e} = \\frac {1}{L} \\sum_ {i = 1} ^ {L} \\ell (c _ {i} ^ {e}, g ([ \\mathbf {X} _ {v}; \\mathbf {X} _ {t}; \\mathbf {Y} _ {0}; \\mathbf {Y} _ {c _ {0 < j < i}} ^ {e} ])\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.472, + 0.903 + ], + "angle": 0, + "content": "where \\([;]\\) corresponds to the concatenation operation in the first dimension and \\(\\ell\\) is the softmax cross-entropy loss with label-smoothing [27]. We average \\(\\mathcal{L}^e\\) over a mini-batch and" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.182 + ], + "angle": 0, + "content": "learn the weights of the visual encoder, decoder \\( g(.) \\) and embedding matrix \\( \\mathbf{Y} \\) through back-propagation. When decoding, we use beam search to obtain the best predicted entity coded. We find that we do not need to constrain the beam search to existing codes since more than \\( 99\\% \\) of the top-1 predictions are valid codes for converged GER models." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.183, + 0.893, + 0.365 + ], + "angle": 0, + "content": "Entity-based pre-training. Common auto-regressive models such as GIT [45] or PaLI [5] are pre-trained for descriptive captioning. As shown in Tab. 5 and Fig 9 of the Appendix, they generalize poorly to entity recognition. This is because of the task discrepancy between predicting a descriptive caption and predicting an entity name. In order to condition our models better for entity recognition, we propose to collect a significant number of entity-based pretraining images, each associated with a Wikipedia entity instead of a generic caption. However, such an entity-based pretraining dataset does not exist. We create it in an automatic way, without any human supervision." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.365, + 0.895, + 0.668 + ], + "angle": 0, + "content": "To do so, we leverage existing large-scale image-caption datasets [37, 38]: unless specified otherwise we use WebLI [5]. For each Wikipedia entity, we retrieve in WebLI the image-caption pairs that best represent this entity and replace their original captions by this entity name [15, 23]. Specifically, we embed the 6M entity names of OVEN with a semantic text encoder [32] and find the top-\\(k\\) most similar captions in WebLI. We retrieve their corresponding images and replace their original captions by the considered entity name. We ensure that no image is assigned to multiple entities to avoid instability during training. We vary the number of retrieved images \\(k\\) per entity from 2 to 100 to produce pre-training datasets of different sizes: from 11M up to 55M images (see Fig. 6). We denote by Entity-WebLI (resp. Entity-LAION) the resulting dataset used for entity-based pretraining, built from WebLI (resp. LAION [38]). This way of creating pre-training data is akin to the query generation techniques used for generative retrieval in NLP [46]. However, rather than generating a synthetic input, we simply retrieve input images from a large-scale dataset." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.679, + 0.608, + 0.694 + ], + "angle": 0, + "content": "3.4. Baselines" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.703, + 0.892, + 0.885 + ], + "angle": 0, + "content": "We compare our method to the following different baselines. Hierarchical classification. Solving million-scale entity recognition with classification is unpractical due to the very large number of classes. A workaround is to use hierarchical classifiers. As OVEN does not come with hierarchical labels we obtain a 3-level hierarchy through k-means of the 6M entity names encoded with sentence-T5 [28]. We train a multi-class classifier for each parent node in the hierarchy. To avoid training a huge number of different classification matrices, we learn a generic classifier matrix per level which is modified by learnable small modifiers depending on the path in the hierarchy." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.886, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Dual encoders. Another typical workaround to classifica" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "17316" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.256 + ], + "angle": 0, + "content": "tion is to rely on deep metric learning approaches [36] such as Noise Contrastive Estimation [11] and its InfoNCE variant [29] as used in popular dual encoder approaches [16, 32]. Dual encoders learn a unified image-text feature space with separate encoders, thereby facilitating efficient nearest neighbor searches for recognition. We use CLIP-L/14 [32]. Visual matching. We also experiment with pure visual matching baselines. We use off-the-shelf CLIP-L/14 visual encoder and Entity-WebLI (55M) dataset as the memory. We use \\( k = 500 \\) for nearest neighbor search with majority voting as it obtains the best results on OVEN val set." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.258, + 0.468, + 0.287 + ], + "angle": 0, + "content": "Captioning. We compare to Git-Large [45] or PaLI [5] image-to-text auto-regressive captioning models." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.288, + 0.47, + 0.471 + ], + "angle": 0, + "content": "GER-baselines: alternative code creation strategies. We compare GER-ALD, i.e. the best variant of GER, with several alternatives. First, GER-ATOMIC refers to using atomic, completely unstructured codes, i.e. each code token \\( c_{i}^{e} \\) is randomly drawn from \\( [1, V]^{L} \\) [42]. Second, we consider two alternatives using semantically structured codes: (i) GER-HKC where we embed the entity names with a pretrained text encoder before applying hierarchical k-means clustering on the resulting embeddings [42] and (ii) GER-CAPTION where we create a code by tokenizing the entity name with \\( \\Phi(\\cdot) \\) [6, 12]. Details on the baselines are in Appendix Sec. 6.4." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.483, + 0.21, + 0.5 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.508, + 0.47, + 0.555 + ], + "angle": 0, + "content": "In this section, we detail our experimental setup, compare our method with state of the art and baselines, and finally present thorough analyses on code creation and pretraining." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.561, + 0.275, + 0.577 + ], + "angle": 0, + "content": "4.1. Experimental setting" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.584, + 0.468, + 0.765 + ], + "angle": 0, + "content": "OVEN dataset consists of 6,063,945 different entities [12]. We evaluate the models on the validation and test splits, by reporting the harmonic mean (HM) of top-1 accuracy scores between \"seen\" and \"unseen\" entities. Seen are entities present in the OVEN training set. Unseen entities are a subset of entities among the ones not present in the training set. The models are evaluated on a total of 3192 entities (1721 for seen and 1471 for unseen) for validation and 15888 entities (8355 for seen and 7533 for unseen) for test. We call the entities that the model is evaluated on by \"positive\" entities (i.e. the union of the 3192 validation and 15888 test entities) and all other entities by \"negative\" entities." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.766, + 0.468, + 0.871 + ], + "angle": 0, + "content": "Pretraining and finetuning. Unless specified otherwise, we pretrain our models on the entity-WebLI dataset, which we create considering all 6M entity names as described in Sec. 3.3. After this entity-based pretraining, the models are finetuned on OVEN training set which consists only of the \"seen\" entities. All implementation details are in Sec. 6 in Appendix and code is released in the ScENIC library [7]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Preventing data leakage. We remove pretraining images from Entity-WebLI and Entity-LAION with a cosine simi" + }, + { + "type": "table", + "bbox": [ + 0.499, + 0.089, + 0.895, + 0.268 + ], + "angle": 0, + "content": "
Method#par.(B)PretrainingOVEN test
dataset#imgsHMseen unseen
Dual encoder approaches
CLIPViT-L140.42OpenAI400M5.25.6 4.9
CLIPfusionViT-L140.88OpenAI400M8.433.6 4.8
CLIP2CLIPViT-L140.86OpenAI400M11.512.6 10.5
Captioning approaches
GiT-Large0.40WebLI100M7.017.6 4.3
PaLI-3B3WebLI1B9.119.1 6.0
PaLI-17B17WebLI1B16.028.3 11.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.501, + 0.27, + 0.679, + 0.283 + ], + "angle": 0, + "content": "Generative entity recognition" + }, + { + "type": "table", + "bbox": [ + 0.501, + 0.284, + 0.892, + 0.319 + ], + "angle": 0, + "content": "
GER-ALD‡(Ours)0.40Entity-LAION 41M20.929.116.3
GER-ALD (Ours)0.40Entity-WebLI 55M22.731.517.7
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.324, + 0.894, + 0.422 + ], + "angle": 0, + "content": "Table 1. Comparison with state-of-the-art approaches on OVEN entity test split. We report the harmonic mean (HM) of the seen and unseen splits (top-1 accuracy) after finetuning on OVEN training set. Numbers are taken from [12] except methods based on GiT-Large which are run by us. We indicate the total number of parameters of each model (\"# par.\") in billion and the pretraining dataset details. \\(\\ddagger\\): use only publicly available data." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.427, + 0.894, + 0.503 + ], + "angle": 0, + "content": "larity (with CLIP-L/14 visual features) above 0.95 with any of the OVEN test or val images. We chose a 0.95 conservative threshold by looking at some examples: similarity 0.95 corresponds to conceptually similar images but clearly not duplicates (see Fig. 8 in Appendix)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.513, + 0.816, + 0.529 + ], + "angle": 0, + "content": "4.2. Comparison with the state of the art" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.536, + 0.892, + 0.657 + ], + "angle": 0, + "content": "In Tab. 1, we compare the performance of GER-ALD, our best GER variant, on the OVEN entity benchmark with previously published numbers after finetuning on the OVEN training set. We see that our method outperforms previously proposed approaches by significant margins. Notably, GER-ALD improves over the captioning model PALI-17B by \\(+6.8\\) top-1 HM test accuracy (a relative improvement of \\(43\\%\\)) while using \\(42\\times\\) less parameters." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.666, + 0.744, + 0.683 + ], + "angle": 0, + "content": "4.3. Comparison with baselines" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.892, + 0.75 + ], + "angle": 0, + "content": "In Tab. 2, we compare GER-ALD with the different baselines described in Sec. 3.4. All baselines use exactly the same pretraining dataset entity-based WebLI (55M) and model architectures of comparable sizes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Comparing GER to different paradigms. We see in Tab. 2 that GER outperforms strong captioning, dual-encoder, visual matching and hierarchical classification baselines, affirming its advantage in tackling web-scale visual entity recognition. Our superior performance compared to dual encoders aligns with previous works observing that CLIP struggles for fine-grained recognition [12, 15]. Due to query image and entity name similarities being captured only through a vector dot product, potentially fine-grained interactions are missed. Also, GER offers significant advan" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17317" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.078, + 0.089, + 0.473, + 0.285 + ], + "angle": 0, + "content": "
MethodEntity-based pretraining+ finetuning on seen
HMseenunseenHMseenunseen
Dual encoders9.28.99.416.324.312.3
Visual matching16.215.517.116.415.717.2
Captioning13.213.113.316.825.912.5
Hierarchical. classif.14.714.814.621.829.617.2
Generative entity recognition
GER-ATOMIC15.915.316.720.126.216.3
GER-CAPTION14.316.512.620.726.816.9
GER-HKC15.815.516.021.025.217.9
GER-ALD17.718.317.222.731.517.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.291, + 0.472, + 0.361 + ], + "angle": 0, + "content": "Table 2. Baseline comparisons. All baselines use exactly the same pretraining dataset Entity-WebLI (55M) and architectures of comparable number of parameters (\\(\\sim\\)400M). All numbers are obtained with finetuning on seen split after entity-based pretraining. We report the Harmonic Mean of top-1 accuracy on OVEN test." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.373, + 0.47, + 0.418 + ], + "angle": 0, + "content": "tages over dual encoders: its computational complexity is not a function of entity set size and it does not require to store entity dense embeddings." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.418, + 0.471, + 0.676 + ], + "angle": 0, + "content": "Different GER variants. In Tab. 2, we compare different variants of GER: one variant using unstructured codes (GER-ATOMIC) and three variants using semantically-structured codes: GER-CAPTION, GER-HKC and GER-ALD. We observe that GER-ALD is the best performing variant, both after entity-based pretraining and after finetuning on the OVEN seen entities. Compared to GER-CAPTION, GER-ALD use codes that are more discriminative and compact, which improves the performance particularly for entities with long names (see Sec. 4.4.2). Compared to GER-ATOMIC, GER-ALD codes yield a semantic structure which is crucial for million-scale label-space as shown in Sec. 4.4.1. GER-HKC model also gets strong performance but relies on an off-the-shelf semantic text encoder which makes the approach more complex and costly compared to GER-ALD. GER-HKC is a first step towards learning codes and we hope future works will propose original and better code creation strategies [41]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.684, + 0.327, + 0.7 + ], + "angle": 0, + "content": "4.4. Analysis and ablation study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.707, + 0.47, + 0.754 + ], + "angle": 0, + "content": "In this section, unless specified otherwise, we report the accuracy on the OVEN validation set [12] evaluated after pretraining on Entity-WebLI (27M), i.e. no OVEN finetuning." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.761, + 0.338, + 0.775 + ], + "angle": 0, + "content": "4.4.1 Semantic versus atomic codes" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.469, + 0.87 + ], + "angle": 0, + "content": "In Fig. 3 (and Appendix Tab. 6), we report the relative improvement of semantically-structured codes (GER-ALD) compared to unstructured codes (GER-ATOMIC). We vary pretraining data size, model capacity and label-space size. A relative improvement of \\(100\\%\\) means that the performance of GER-ALD doubles compared to GER-ATOMIC." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Limited pretraining data. In Fig. 3 (left), we see that semantic codes outperform atomic codes when the amount of" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.092, + 0.648, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.652, + 0.092, + 0.772, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.775, + 0.092, + 0.882, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.22, + 0.892, + 0.304 + ], + "angle": 0, + "content": "Figure 3. Semantic vs atomic codes. We report the relative improvement in \\(\\%\\) of GER-ALD compared to GER-ATOMIC in 3 scenarios: (i) limited pretraining data, (ii) limited model capacity and (iii) massive-scale label-space. Plots share a common experiment shown by which uses a pretraining dataset size of \\(27M\\), Large model and 6M entity set. The setting reported in Tab. 2 is \\(\\star\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.311, + 0.892, + 0.401 + ], + "angle": 0, + "content": "data available for pretraining diminishes. In fact, the results reported in Tab. 2 corresponds to the most favorable scenario for GER-ATOMIC with 55M pretraining datapoints (represented by \\(\\star\\) in Fig. 3). The relative improvement in this case is still of \\(14\\%\\) while it grows to more than \\(1000\\%\\) when the amount of data is reduced by \\(5\\times\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.402, + 0.892, + 0.478 + ], + "angle": 0, + "content": "Limited model capacity. In Fig. 3 (middle), we see that the model struggles to learn unstructured codes when its capacity is reduced. When considering the small version of our model (114M parameters), the performance with atomic codes is very poor: 0.7 top-1 accuracy." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.478, + 0.892, + 0.689 + ], + "angle": 0, + "content": "Web-scale label-space. In Fig. 3 (right), we vary the number of entities for pretraining. The \"positive\" entities (see Sec. 4.1) are always included in the pretraining set and the amount of \"negative\" entities is increased, effectively acting as distractors. First, we see in Fig. 3 (right) that for relatively small-scale label-space \\((\\leq 100k)\\), the benefit of having semantic codes versus atomic is small. In this regime we find that the model can memorize all the entities without the need for semantic structure between them. This aligns with the findings of DSI [42]. We evaluate GER further in small label-spaces in Sec. 4.5. However, we see that in million-scale label-space regime, semantic structure becomes important and significantly improves the performance compared to atomic codes: \\(+26\\%\\) relative improvement." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.892, + 0.794 + ], + "angle": 0, + "content": "Overall, we find that GER-ATOMIC fail to learn unstructured codes when the amount of pretraining data or architecture capacity are reduced, or when the label-space increases to million-scale. Unlike GER-ATOMIC, GER-ALD succeed in these scenarios thanks to the semantic structure easing the learning. Next, we analyze how GER-ALD improves over another type of semantic codes: GER-CAPTION codes." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.806, + 0.753, + 0.821 + ], + "angle": 0, + "content": "4.4.2 ALD versus captioning codes" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.894, + 0.901 + ], + "angle": 0, + "content": "We analyze why unambiguous, language-based and discriminative codes (GER-ALD) are more effective for entity recognition than directly decoding the entity name (GER-CAPTION). In Fig. 5 (left), we report the performance of GER-ALD and GER-CAPTION when varying the length \\( L \\) of" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17318" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.092, + 0.258, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.261, + 0.092, + 0.468, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.47, + 0.092, + 0.659, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.092, + 0.892, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.193, + 0.89, + 0.222 + ], + "angle": 0, + "content": "Figure 4. Accuracy per entity name length for GER-ALD versus GER-CAPTION codes. (left): Accuracy averaged per entity name length. (right): Qualitative examples of predictions for long entity names. Code tokens are symbolized between brackets." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.234, + 0.273, + 0.367 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.234, + 0.464, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.369, + 0.47, + 0.439 + ], + "angle": 0, + "content": "Figure 5. ALD versus captioning codes. (left): Effect of different code lengths for GER-ALD and GER-CAPTION codes. (right): Cumulative distribution function (CDF) of (in green) the position of the least frequent token in the tokenized entity name and of (in pink) the length of tokenized entity name." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.446, + 0.47, + 0.552 + ], + "angle": 0, + "content": "the codes. Fixing a code length \\( L \\) to a caption corresponds to keeping only the first \\( L^{\\text{th}} \\) tokens of the entity name. In Fig. 5 (right), we report the cumulative distribution functions (CDF) of (i) the position within the tokenized entity name of the least frequent token among the entire corpus (as described in Sec. 3.2) and (ii) the total number of tokens in the tokenized entity name (\\( L_{e} \\) in the notations of Sec. 3.2))." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.553, + 0.471, + 0.839 + ], + "angle": 0, + "content": "Discriminative tokens versus number of tokens. We observe in Fig. 5 (left) that the performance of GER-CAPTION increases drastically from \\( L = 2 \\) to \\( L = 4 \\). At the same time, we see in Fig. 5 (right) that for \\( L = 4 \\), less than half of the entity names are considered in full while more than \\( 80\\% \\) of the GER-CAPTION codes contain the least frequent token of the entire tokenized name. This hints that what is important for language-based codes is not to describe the full entity name but to include its most discriminative part. We also observe that the performance of captioning increases only moderately from \\( L = 4 \\) to \\( L = 8 \\) even though the number of entities considered in full increases drastically from \\( 46.6\\% \\) to \\( 100\\% \\). This confirms our intuition that decoding all the entity name tokens does not have a major impact on the performance as long as the most discriminative tokens are decoded. Overall, these observations motivate the ALD design of keeping only the most discriminative tokens, which is shown in Fig. 5 to lead to improved performance compared to decoding the full tokenized entity name." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Effect of code length for GER-ALD. We see in Fig. 5 (left) that the performance of GER-ALD is the best for \\( L = 4 \\). With smaller code lengths, we need to resort to random tokens a lot to achieve unique codes (see Sec. 3.2), which deters the" + }, + { + "type": "table", + "bbox": [ + 0.5, + 0.231, + 0.682, + 0.317 + ], + "angle": 0, + "content": "
Selection strategyHM
Least frequent tokens14.4
Most frequent tokens12.3
First tokens12.0
Random tokens11.3
" + }, + { + "type": "table", + "bbox": [ + 0.714, + 0.231, + 0.88, + 0.317 + ], + "angle": 0, + "content": "
Tokens orderHM
Least frequent first14.4
Syntax order14.4
Random order13.0
Least frequent last12.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.321, + 0.892, + 0.363 + ], + "angle": 0, + "content": "Table 3. Ablation study of GER-ALD codes. (left) Word tokens selection. (right) Tokens order. All variants use \\( L = 4 \\). Default is in top rows. Non language-based GER-ATOMIC gets 11.4 top-1." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.376, + 0.892, + 0.572 + ], + "angle": 0, + "content": "performance. For example at \\( L = 2 \\), more than \\( 10\\% \\) of the entities use a random code token while this percentage decreases to \\( 0.5\\% \\) at \\( L = 4 \\). We also see that the performance of GER-ALD decreases for code length above \\( L = 4 \\), which hints that only the few most discriminative tokens are important while additional ones clutter the entity code. Interestingly we also observe in Fig. 5 (left) that when considering all the tokens, GER-ALD performance is slightly below that of GER-CAPTION. This might seem surprising since the same amount of information is present in both cases. However we find that when considering all the tokens, it is more difficult for the model to decode tokens ordered by frequencies than tokens ordered syntactically." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.573, + 0.892, + 0.785 + ], + "angle": 0, + "content": "Entities with long entity names. In Fig. 4 (left), we report the accuracy per entity name length for both GER-ALD and GER-CAPTION finetuned models. We see that the longer the entity name, the more GER-ALD improves over captioning. Longer entities tend to have more noise with key information further into the code. We also show in Fig. 4 qualitative examples of entities with long entity names (more in Fig. 12 in Appendix). In the left example, we see that GER-ALD use the token combination [col][ob] to represent the semantic concept of colobus monkey species. The last token is used to efficiently differentiate between sub-species of colobus. This compact and discriminative way of encoding the entity allows GER-ALD to successfully predict this entity whereas GER-CAPTION fails to generate the entity tokenized name." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.793, + 0.726, + 0.808 + ], + "angle": 0, + "content": "4.4.3 Creating codes with ALD" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Least frequent tokens. In Tab. 3 (left), we validate our choice of selecting the least frequent tokens by evaluating 3 alternatives: random choice, most frequent tokens and first-appearing tokens in tokenized entity name. We see that these alternative strategies hurt the performance significantly. Qualitative examples in Appendix Fig. 11 show that" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17319" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.078, + 0.089, + 0.461, + 0.173 + ], + "angle": 0, + "content": "
DatasetCodesHM
WebLIWebLI caption1.8
Entity-WebLI (55M)WebLI caption12.9 (+11.1)
Entity-WebLI (55M)Entity name14.8 (+1.9)
Entity-WebLI (55M)ALD17.5 (+2.7)
" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.178, + 0.47, + 0.22 + ], + "angle": 0, + "content": "Figure 6. Entity-based pretraining ablation. (left): Validation OVEN accuracy. (right): Examples of original WebLI captions versus corresponding OVEN entity names." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.224, + 0.291, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.293, + 0.228, + 0.455, + 0.326 + ], + "angle": 0, + "content": "Figure 7. Pretraining. We vary the size of the pretraining dataset by changing the amount of retrieved examples from WebLI for each OVEN entity (see Sec. 3.3)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.352, + 0.468, + 0.412 + ], + "angle": 0, + "content": "the kept tokens are less semantic and discriminative compared to GER-ALD strategy of keeping the least frequent tokens. Note that all these variants are at least as good as GER-ATOMIC (11.4 top-1) which is not based on language at all." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.412, + 0.468, + 0.517 + ], + "angle": 0, + "content": "Decoding order. In Tab. 3 (right), we vary the order of the first \\( L - 1 \\) tokens in GER-ALD codes. Instead of decoding tokens from least to most frequent, we evaluate most to least frequent, syntax order and random order. Note that the selected tokens are the same in all variants, only their order changes. We see that both \"least frequent first\" and \"syntax\" orders achieve the best of performance." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.53, + 0.304, + 0.545 + ], + "angle": 0, + "content": "4.4.4 Entity-based pretraining" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.554, + 0.468, + 0.78 + ], + "angle": 0, + "content": "Entity-based pretraining. In Fig. 6, we analyze why our entity-based pretraining improves over the standard captioning pretraining of PaLI or GiT models. First, we see that our method of selecting WebLI data relevant to OVEN entities drastically improves the performance (+11.1 in Fig. 6 (left)). This is because, by design, we select image-text pairs from WebLI that have captions similar to OVEN entity names. Hence, this data is directly relevant for the OVEN entity recognition benchmark. Second, we see that replacing the original WebLI caption with its corresponding entity name from OVEN leads to superior performance (+1.9). We see in the qualitative examples of Fig. 6 (right) that original captions contain a lot of descriptive information not directly relevant to the entity. Lastly, we confirm that using GER-ALD codes is better (+2.7) than tokenized entity name." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Dataset size. In Fig. 7, we evaluate the effect of the pretraining dataset size for GER models. We control the dataset size by varying the amount of retrieved examples from WebLI for each of the OVEN entities (see Sec. 3.3). We see in Fig. 7 that GER-ALD, GER-CAPTION and GER-ATOMIC benefit greatly from more data and do not seem to have reached saturation yet. As analyzed in Sec. 4.4.1, GER-ATOMIC fails when the amount of pretraining data decreases." + }, + { + "type": "table", + "bbox": [ + 0.499, + 0.089, + 0.889, + 0.25 + ], + "angle": 0, + "content": "
MethodImageNet-LTWebVision
Classif. MLP74.380.9
GER-ATOMIC L = 280.884.7
GER-ALD L = 280.984.8
GER-ATOMIC L = 1 (~ Classif. MAP)81.084.8
Previously published numbers
NCR [13]-76.8
CurrNet [10]-79.3
PEL [40]78.3-
MAM [14]†82.383.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.253, + 0.892, + 0.281 + ], + "angle": 0, + "content": "Table 4. Evaluation of classification models and GER on small-scale label-spaces. \\(\\dagger\\) indicates the use of additional data." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.288, + 0.715, + 0.302 + ], + "angle": 0, + "content": "4.5. Link with classification" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.312, + 0.892, + 0.568 + ], + "angle": 0, + "content": "A typical way of tackling visual entity recognition is by training a classifier into the number of entities [35]. This is not a viable solution for web-scale problems such as OVEN where a single fully-connected layer for a 6M classes has an enormous parameter count of 4.6B. In this section, we evaluate GER in cases where learning a classification model is a feasible choice (smaller number of classes). Classification can be cast in our GER framework simply by setting \\( L = 1 \\) and \\( V = |\\mathcal{E}| = \\) number of classes (see Sec. 3.1), making it a special case of atomic codes with \\( L = 1 \\). Since the decoder decodes a single token, it is equivalent to a multi-layer Multihead Attention Pooling (MAP) head [21, 48]. In Tab. 4, we consider two challenging classification datasets: long-tailed ImageNet-LT [24] and noisy Webvision [22]. We evaluate GER-{ALD, ATOMIC} and a classification baseline using multi-layer perceptron (MLP) on averaged-pooled patch tokens. Implementation details are in Sec 6.3 in Appendix." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.569, + 0.892, + 0.735 + ], + "angle": 0, + "content": "We see in Tab. 4 that using GER-ATOMIC instead of standard MLP improves significantly the performance of the classification model (74.3 versus 81.0 for ImageNet-LT). We also observe that GER-ATOMIC and GER-ALD have comparable performance in this relatively small label-space regime (1k classes). As a matter of fact, this achieves state-of-the-art accuracy for both datasets (when no additional external data is used). This shows that GER framework not only excels for large-scale scenarios, but also works well in datasets with smaller number of visual entities, making GER a general framework for visual entity recognition." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.751, + 0.619, + 0.765 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.775, + 0.892, + 0.887 + ], + "angle": 0, + "content": "In this work, we propose a novel generative framework for web-scale visual entity recognition. We represent each entity by a compact, discriminative and semantic code that a generative auto-regressive model learns to decode. In future work, we will explore ways of creating better entity codes by leveraging additional information: either from the Wikipedia page such as the description of the entity and its attached image or also by using external tools." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17320" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Rahul Agrawal, Archit Gupta, Yashoteja Prabhu, and Manik Varma. Multi-label learning with millions of labels: Recommending advertiser bid phrases for web pages. In Proceedings of the 22nd international conference on World Wide Web, pages 13-24, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.185, + 0.472, + 0.267 + ], + "angle": 0, + "content": "[2] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in Neural Information Processing Systems, 35:23716-23736, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.269, + 0.47, + 0.311 + ], + "angle": 0, + "content": "[3] Samy Bengio, Krzysztof Dembczynski, Thorsten Joachims, Marius Kloft, and Manik Varma. Extreme Classification (Dagstuhl Seminar 18291). Dagstuhl Reports, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.312, + 0.47, + 0.352 + ], + "angle": 0, + "content": "[4] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In ECCV, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.354, + 0.47, + 0.422 + ], + "angle": 0, + "content": "[5] Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, et al. Pali: A jointly-scaled multilingual language-image model. *ICLR*, 2023. 1, 2, 4, 5, 11, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.424, + 0.47, + 0.465 + ], + "angle": 0, + "content": "[6] Nicola De Cao, Gautier Izacard, Sebastian Riedel, and Fabio Petroni. Autoregressive entity retrieval. arXiv preprint arXiv:2010.00904, 2020. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.467, + 0.47, + 0.521 + ], + "angle": 0, + "content": "[7] Mostafa Dehghani, Alexey Gritsenko, Anurag Arnab, Matthias Minderer, and Yi Tay. Scenic: A JAX library for computer vision research and beyond. arXiv preprint arXiv:2110.11403, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.523, + 0.47, + 0.564 + ], + "angle": 0, + "content": "[8] Mark Everingham, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes (voc) challenge. *IJCV*, 88, 2010. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.565, + 0.47, + 0.619 + ], + "angle": 0, + "content": "[9] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In CVPR, 2004. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.621, + 0.47, + 0.676 + ], + "angle": 0, + "content": "[10] Sheng Guo, Weilin Huang, Haozhi Zhang, Chenfan Zhuang, Dengke Dong, Matthew R Scott, and Dinglong Huang. CurriculumNet: Weakly supervised learning from large-scale web images. In ECCV, pages 135-150, 2018. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.677, + 0.47, + 0.746 + ], + "angle": 0, + "content": "[11] Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics. JMLR Workshop and Conference Proceedings, 2010. 1, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.747, + 0.47, + 0.815 + ], + "angle": 0, + "content": "[12] Hexiang Hu, Yi Luan, Yang Chen, Urvashi Khandelwal, Mandar Joshi, Kenton Lee, Kristina Toutanova, and Ming-Wei Chang. Open-domain visual entity recognition: Towards recognizing millions of wikipedia entities. ICCV, 2023. 1, 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.817, + 0.47, + 0.858 + ], + "angle": 0, + "content": "[13] Ahmet Iscen, Jack Valmadre, Anurag Arnab, and Cordelia Schmid. Learning with neighbor consistency for noisy labels. In CVPR, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.859, + 0.47, + 0.9 + ], + "angle": 0, + "content": "[14] Ahmet Iscen, Alireza Fathi, and Cordelia Schmid. Improving image recognition by retrieving from web-scale image-text data. CVPR, 2023. 8" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[15] Ahmet Iscen, Mathilde Caron, Alireza Fathi, and Cordelia Schmid. Retrieval-enhanced contrastive vision-text models. *ICLR*, 2024. 1, 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[16] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.892, + 0.247 + ], + "angle": 0, + "content": "[17] Aditya Khosla, Nityananda Jayadevaprakash, Bangpeng Yao, and Li Fei-Fei. Novel dataset for fine-grained image categorization. In First Workshop on Fine-Grained Visual Categorization, CVPR, 2011. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.892, + 0.289 + ], + "angle": 0, + "content": "[18] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.291, + 0.892, + 0.333 + ], + "angle": 0, + "content": "[19] Taku Kudo. Subword regularization: Improving neural network translation models with multiple subword candidates. arXiv preprint arXiv:1804.10959, 2018.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.335, + 0.892, + 0.389 + ], + "angle": 0, + "content": "[20] Taku Kudo and John Richardson. Sentencepiece: A simple and language independent subword tokenizer and detokenizer for neural text processing. arXiv preprint arXiv:1808.06226, 2018. 2, 3, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.391, + 0.892, + 0.459 + ], + "angle": 0, + "content": "[21] Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In International conference on machine learning, 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.461, + 0.892, + 0.517 + ], + "angle": 0, + "content": "[22] Wen Li, Limin Wang, Wei Li, Eirikur Agustsson, and Luc Van Gool. Webvision database: Visual learning and understanding from web data. arXiv preprint arXiv:1708.02862, 2017. 2, 8, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.519, + 0.892, + 0.573 + ], + "angle": 0, + "content": "[23] Haotian Liu, Kilho Son, Jianwei Yang, Ce Liu, Jianfeng Gao, Yong Jae Lee, and Chunyuan Li. Learning customized visual models with retrieval-augmented knowledge. In CVPR, 2023. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.575, + 0.892, + 0.617 + ], + "angle": 0, + "content": "[24] Ziwei Liu, Zhongqi Miao, Xiaohang Zhan, Jiayun Wang, Boqing Gong, and Stella X. Yu. Large-scale long-tailed recognition in an open world. In CVPR, 2019. 2, 8, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.619, + 0.892, + 0.686 + ], + "angle": 0, + "content": "[25] Sanket Vaibhav Mehta, Jai Gupta, Yi Tay, Mostafa Dehghani, Vinh Q Tran, Jinfeng Rao, Marc Najork, Emma Strubell, and Donald Metzler. Dsi++: Updating transformer memory with new documents. arXiv preprint arXiv:2212.09744, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.689, + 0.892, + 0.757 + ], + "angle": 0, + "content": "[26] Anshul Mittal, Kunal Dahiya, Shreya Malani, Janani Ramaswamy, Seba Kuruvilla, Jitendra Ajmera, Keng-hao Chang, Sumeet Agarwal, Purushottam Kar, and Manik Varma. Multi-modal extreme classification. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.759, + 0.892, + 0.801 + ], + "angle": 0, + "content": "[27] Rafael Müller, Simon Kornblith, and Geoffrey E Hinton. When does label smoothing help? Advances in neural information processing systems, 32, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.802, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[28] Jianmo Ni, Gustavo Hernández Ábrego, Noah Constant, Ji Ma, Keith B Hall, Daniel Cer, and Yinfei Yang. Sentence-t5: Scalable sentence encoders from pre-trained text-to-text models. arXiv preprint arXiv:2108.08877, 2021. 4, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.859, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[29] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. 1, 5" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "17321" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.119 + ], + "angle": 0, + "content": "[30] OpenAI. GPT-4 technical report. arXiv preprint arXiv:2303.08774, 2023.1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.177 + ], + "angle": 0, + "content": "[31] Ronak Pradeep, Kai Hui, Jai Gupta, Adam D Lelkes, Honglei Zhuang, Jimmy Lin, Donald Metzler, and Vinh Q Tran. How does generative retrieval scale to millions of passages? arXiv preprint arXiv:2305.11841, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.469, + 0.247 + ], + "angle": 0, + "content": "[32] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.25, + 0.469, + 0.318 + ], + "angle": 0, + "content": "[33] Shashank Rajput, Nikhil Mehta, Anima Singh, Raghunanandan H Keshavan, Trung Vu, Lukasz Heldt, Lichan Hong, Yi Tay, Vinh Q Tran, Jonah Samost, et al. Recommender systems with generative retrieval. arXiv preprint arXiv:2305.05065, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.321, + 0.469, + 0.361 + ], + "angle": 0, + "content": "[34] Stephen Robertson, Hugo Zaragoza, et al. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.363, + 0.469, + 0.432 + ], + "angle": 0, + "content": "[35] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 2015. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.434, + 0.469, + 0.474 + ], + "angle": 0, + "content": "[36] Florian Schroff, Dmitry Kalenichenko, and James Philbin. Facenet: A unified embedding for face recognition and clustering. In CVPR, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.477, + 0.469, + 0.546 + ], + "angle": 0, + "content": "[37] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.548, + 0.469, + 0.629 + ], + "angle": 0, + "content": "[38] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. arXiv preprint arXiv:2210.08402, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.632, + 0.469, + 0.673 + ], + "angle": 0, + "content": "[39] Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.676, + 0.469, + 0.716 + ], + "angle": 0, + "content": "[40] Jiang-Xin Shi, Tong Wei, Zhi Zhou, Xin-Yan Han, Jie-Jing Shao, and Yu-Feng Li. Parameter-efficient long-tailed recognition. arXiv preprint arXiv:2309.10019, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.718, + 0.469, + 0.773 + ], + "angle": 0, + "content": "[41] Weiwei Sun, Lingyong Yan, Zheng Chen, Shuaiqiang Wang, Haichao Zhu, Pengjie Ren, Zhumin Chen, Dawei Yin, Maarten Rijke, and Zhaochun Ren. Learning to tokenize for generative retrieval. NeurIPS, 2023. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.469, + 0.843 + ], + "angle": 0, + "content": "[42] Yi Tay, Vinh Tran, Mostafa Dehghani, Jianmo Ni, Dara Bahri, Harsh Mehta, Zhen Qin, Kai Hui, Zhe Zhao, Jai Gupta, et al. Transformer memory as a differentiable search index. Advances in Neural Information Processing Systems, 2022. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[43] Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In CVPR, 2018. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.133 + ], + "angle": 0, + "content": "[44] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[45] Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, and Lijuan Wang. Git: A generative image-to-text transformer for vision and language. arXiv preprint arXiv:2205.14100, 2022. 3, 4, 5, 11, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.206, + 0.892, + 0.261 + ], + "angle": 0, + "content": "[46] Yujing Wang, Yingyan Hou, Haonan Wang, Ziming Miao, Shibin Wu, Qi Chen, Yuqing Xia, Chengmin Chi, Guoshuai Zhao, Zheng Liu, et al. A neural corpus indexer for document retrieval. NeurIPS, 2022. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.263, + 0.892, + 0.303 + ], + "angle": 0, + "content": "[47] Tobias Weyand, Andre Araujo, Bingyi Cao, and Jack Sim. Google landmarks dataset v2-a large-scale benchmark for instance-level recognition and retrieval. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.305, + 0.892, + 0.333 + ], + "angle": 0, + "content": "[48] Xiaohua Zhai, Alexander Kolesnikov, Neil Houlsby, and Lucas Beyer. Scaling vision transformers. In CVPR, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.335, + 0.892, + 0.389 + ], + "angle": 0, + "content": "[49] Yidan Zhang, Ting Zhang, Dong Chen, Yujing Wang, Qi Chen, Xing Xie, Hao Sun, Weiwei Deng, Qi Zhang, Fan Yang, et al. Irgen: Generative modeling for image retrieval. arXiv preprint arXiv:2303.10126, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.391, + 0.892, + 0.46 + ], + "angle": 0, + "content": "[50] Zheng Zhu, Guan Huang, Jiankang Deng, Yun Ye, Junjie Huang, Xinze Chen, Jiagang Zhu, Tian Yang, Dalong Du, Jiwen Lu, et al. Webface260m: A benchmark for million-scale deep face recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.46 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "17322" + } + ] +] \ No newline at end of file diff --git a/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/25668f69-d21b-4819-83f7-a45db4e2f055_origin.pdf b/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/25668f69-d21b-4819-83f7-a45db4e2f055_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..314e0d2f53cb5ddc46078dcd21449e5b5669f289 --- /dev/null +++ b/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/25668f69-d21b-4819-83f7-a45db4e2f055_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:378823329bcef56ca8742f81f6106e7f6cdc15a5bacda96d3a6fb7f81d9491c3 +size 613703 diff --git a/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/full.md b/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0854238e90d31472fd19f9b7d90e58248c81ae93 --- /dev/null +++ b/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/full.md @@ -0,0 +1,297 @@ +# A Generative Approach for Wikipedia-Scale Visual Entity Recognition + +Mathilde Caron + +Ahmet Iscen + +Alireza Fathi + +Cordelia Schmid + +Google Research + +# Abstract + +In this paper, we address web-scale visual entity recognition, specifically the task of mapping a given query image to one of the 6 million existing entities in Wikipedia. One way of approaching a problem of such scale is using dual-encoder models (e.g. CLIP), where all the entity names and query images are embedded into a unified space, paving the way for an approximate kNN search. Alternatively, it is also possible to re-purpose a captioning model to directly generate the entity names for a given image. In contrast, we introduce a novel Generative Entity Recognition (GER) framework, which given an input image learns to auto-regressively decode a semantic and discriminative "code" identifying the target entity. Our experiments demonstrate the efficacy of this GER paradigm, showcasing state-of-the-art performance on the challenging OVEN benchmark. GER surpasses strong captioning, dual-encoder, visual matching and hierarchical classification baselines, affirming its advantage in tackling the complexities of web-scale recognition. + +# 1. Introduction + +Generative vision-language models such as GPT-4 [30], Flamingo [2] or PALI [5], are becoming increasingly popular for computer vision applications. They show an impressive ability to generate free-form text for describing the contents of an image (captioning), or answering questions based on an image (visual-question answering). Nevertheless, their potential for recognition tasks [12], which usually require a more concise, structured output, remains underexplored. The focus of this paper is to explore their application for the challenging task of web-scale entity recognition. A recent benchmark, Open-domain Visual Entity recognitionN (OVEN) [12], challenges models to associate an image with a Wikipedia entity from a pool of over six million entities. Models must establish a robust association between images across millions of coarse-grained and fine-grained entities, encompassing a wide spectrum of concepts such as animals, buildings, locations, and a multitude of others [12]. + +![](images/71441a3ecf82a7a85c4fedd9224c40da2d4e966b6fd20e143ebd532d5d9457b5.jpg) +Figure 1. We introduce GER, a novel generative paradigm for web-scale visual entity recognition. We create compact semantic codes for each entity, and learn to auto-regressively generate them for a given query image at inference. + +![](images/94363eac2716d72aa979169ee926daf310567333168b0f6f9e50906de07abac2.jpg) + +Traditionally, the predominant methods employed to address the challenge of visual entity recognition have revolved around either classification or contrastive dual-encoder paradigm like CLIP [32]. While classification offers a straightforward approach, it grapples with limitations when confronted with extensive label spaces such as that of OVEN, resulting in substantial parameter counts and practical engineering complexities. The dual-encoder approach on the other hand, learns a unified image-text feature space, thereby facilitating efficient nearest neighbor searches for recognition. Nonetheless, this approach exhibits its own drawbacks: (a) it does not directly optimize for the final recognition task but instead relies on indirect optimization through contrastive loss where a set of negative data has to be subsampled at training time [11, 29, 32], (b) compressing either the image or text into an embedding vector results in loss of information, detrimentally affecting performance for fine-grained recognition [15] and (c) the memory requirements for storing dense representations scale proportionally with the size of the entity set. + +These challenges of the dual-encoder paradigm have kindled interest in alternative strategies. Notably, in Natural Language Processing (NLP) domain, recent works challenge the dual-encoder approach and use generative models instead for information retrieval [6, 25, 31, 33, 41, 42]. These works represent each element of the corpus by a compact code of integers, and learn an auto-regressive generative model to decode the target code for a given query. This + +paradigm promises to overcome some drawbacks of dual-encoders by simplifying the retrieval pipeline such that the training and inference objectives are the same, and directly encoding the corpus within the model's parameters. Also as an alternative to dual encoders, OVEN paper [12] showcases the feasibility of extending a generative image captioning model [5] for visual entity recognition by matching the generated caption to one of the Wikipedia entity texts [34]. + +Inspired by these recent explorations, we propose a Generative Entity Recognition (GER) framework (illustrated in Fig. 1) to facilitate end-to-end visual entity recognition by leveraging generative auto-regressive models. Specifically, we represent each Wikipedia entity with a code, i.e. a short sequence of integers. Then, we train models to predict an entity from an input image by auto-regressively generating the code corresponding to the target entity. We find that creating unAmbiguous, Language-based and Discriminative (ALD) entity codes results in the best variant of our GER framework, which we denote by GER-ALD. In fact, while we observe that unstructured "atomic" codes work well in some scenarios, they fail when training data or model capacity are limited or more importantly, when the entity set reaches the million scale (see Sec. 4.4.1). Plus, they cannot generalize to new entities. In contrast, we find that semantically-structured codes based on language improve upon atomic codes by leveraging generic concepts shared across related entities (see example in Fig. 1 with "Black colobus" and "Black-and-white colobus" sharing common code tokens). A simple way of creating codes based on language is to directly tokenize [20] the entity name, which is akin to image captioning where the entity name is used as a caption [6, 12]. However, we find that such tokenized entity names contain clutter and noisy information, all the more so when the entity name is long (see Sec. 4.4.2). Our GER-ALD method improves over this simple captioning baseline by decoding only the most discriminative part of the tokenized entity name, i.e. the part which makes the considered entity name the most different compared to all other entities. + +Finally, we also propose an entity-based pre-training to condition the GER models to web-scale entity recognition. Inspired by recent advances in retrieval-based methods [15, 23], we retrieve a subset of images from a large-scale image-text dataset typically used for captioning or contrastive pre-training [5] and re-purpose it by replacing the original text captions with related OVEN entity names. Overall, our experiments demonstrate the efficacy of the proposed GER paradigm: GER-ALD outperforms previously published numbers on OVEN benchmark [12] by $+6.7$ top-1 accuracy, while using $42\times$ less parameters. In summary, our contributions are as follows: + +- a generative entity recognition framework (GER) to facilitate end-to-end visual entity recognition; +- an innovative strategy for encoding Wikipedia enti + +ties into unambiguous language-based discriminative (ALD) codes that are highly effective for GER; + +- an entity-based pre-training process without requiring human intervention; +- state-of-the-art results in challenging web-scale OVEN entity recognition and on-par performance to traditional classifiers in smaller-scale label-space scenarios. + +# 2. Related work + +Visual entity recognition aims to recognize classes, or entities given visual inputs [35]. Granularity of visual entity recognition tasks varies from every-day generic objects [8, 9], to fine-grained domains, such as birds [44], dogs [17], cars [18], food [4], landmarks [47], faces [50] and natural world species [43]. Some challenges for the visual entity recognition tasks include imbalanced training classes following a long-tailed distribution [24], or noisy training labels [22]. Recent work [12] proposes a new, web-scale dataset for open-domain entity recognition. This challenging benchmark contains 6M entity names derived from Wikipedia page titles, including coarse-grained and fine-grained entities, encompassing a wide spectrum of concepts such as animals, buildings, organizations, landmarks, and a multitude of other. The authors show that generative captioning models (i.e. PaLI [5]) outperform dual encoder models for large-scale entity recognition. In this paper, we build upon this observation, and study generative models for accurate and efficient entity recognition. + +Extreme classification tackles entity recognition specifically at a very large scale with a pure classification approach [1, 3, 26]. Typical approaches explore strategies for scaling to the hundred of thousands scale and preliminary results are even shown at million scale [1]. By leveraging generative image-to-text models, we propose a fresh perspective beyond traditional classification methods typically used in the context of large-scale visual entity recognition. + +Generative auto-regressive retrieval methods are increasingly popular in NLP [6, 25, 31, 33, 41, 42]. GENRE retrieves Wikipedia entities by generating their names in an autoregressive fashion. Seminal work DSI [42] shows the benefit of learning to decode compact codes (created either randomly or with hierarchical k-means clustering) associated with each document. Neural Corpus Indexer [46] proposes a specific decoding scheme for generative retrieval and show the benefit of query augmentation by automatically generating training queries for documents to be indexed. TIGER [33] studies generative retrieval in the context of recommender systems. Finally, [31] conducts a systematic study of generative retrieval systems when scaled to millions of document passages. Only very few works explore this family of approaches in computer vision domain, and only in very small-scale and uni-modal scenarios [49]. + +![](images/4cc96816c58a6c1fb274e3abae0507b3fefa0dc7e816fca1b20527ce03259cbf.jpg) +(a) ALD: creating unambiguous language-based discriminative entity codes +Figure 2. Overview of GER-ALD method. (a) We utilize a text tokenizer to create compact and semantic codes, which represents each entity with short, but discriminative representations. (b) We learn a generative auto-regressive model, which learns to decode the correct code for given query image and text pair. + +# 3. Method + +Our goal is to explore how to adapt generative autoregressive models to the task of visual entity recognition (GER). While previous works have shown preliminary signal that it is possible to repurpose autoregressive models for entity recognition by directly decoding entity names [6, 12], we propose a more effective strategy. An overview of our framework is in Fig. 2. + +# 3.1. Problem definition + +Web-scale visual entity recognition. The Open-domain Visual Entity recognitioN (OVEN) [12] task consists of mapping input visual queries to one of the 6M English Wikipedia entities. More specifically, for a given image query $x_{v}$ and text query $x_{t}$ , the model needs to recognize the corresponding entity $e$ among the set $\mathcal{E}$ of all possible entities. The purpose of the input text $x_{t}$ is to achieve unambiguous recognition. For example, when several entities are represented in the query image $x_{v}$ , the text query indicates which one needs to be recognized. Each entity $e \in \mathcal{E}$ comes with an entity name, denoted by $t_{e}$ , which corresponds to the title of the entity Wikipedia page. + +Representing each entity with a code. In GER, we represent each entity $e$ by a code denoted by $c^e = \{c_1^e,\dots,c_L^e\} \in [[1,V]]^L$ where $L$ is the length of the code and $V$ is the size of the vocabulary of all integer values that each code token $c_i^e$ can take. This forms up to $V^L$ unique codes. Note that vanilla image classification and captioning baselines can both be cast into this code formulation. In fact, with $L = 1$ and $V = |\mathcal{E}|$ , the codes are equivalent to the labels used in standard multi-class classification. On the other hand, if each code token value in $[[1,V]]$ maps to a (sub-)word in a pre-defined vocabulary [20], then the codes simply correspond to standard tokenized text used in captioning + +models [19, 39, 45]. In the following paragraphs, we detail GER-ALD, our most effective strategy for building codes $C$ to represent all 6M English Wikipedia entities. + +# 3.2. GER-ALD: Creating ALD codes for GER + +We design the code set $C$ so that it has three properties which we find are important for effective GER models: i) semantically structured thanks to language, ii) discriminative and compact, and iii) unambiguous. Our algorithm to create such unambiguous, language-based and discriminative codes, called ALD, is illustrated in Fig. 2 (a) and described in pseudo-code in Algorithm 1 of the Appendix. + +Semantic tokens based on language. We find that entity codes $C$ benefit from following a semantic structure, especially in scenarios where memorizing unstructured atomic codes is difficult. We show in Sec. 4.4.1 that using unstructured atomic codes fail when the amount of training data or the model capacity are limited or, of particular interest, when the entity set size increases to the million scale (see Fig. 3). Intuitively, we want entities that are semantically similar to have some overlapping code tokens. For example, we wish that entities $e = \mathrm{Q}521977$ with corresponding name $t_{\mathrm{Q}521977} =$ "Black colobus" and $e = \mathrm{Q}358813$ with corresponding name $t_{\mathrm{Q}358813} =$ "Black-and-white colobos" to share some code tokens, given that these correspond to two close species. + +A simple yet effective way of having semantic codes is to tokenize the entity names based on text tokenizers [6, 19, 20, 39]. If each of the sub-words in the entity names are mapped to an integer representing this sub-word, then entities Q358813 and Q521977 naturally share code tokens: those representing the phrase "colobus". We denote by $\Phi(.)$ an off-the-shelf text tokenizer with a vocabulary of $V_{\Phi}$ sub-words such that $\Phi(t_e) = \{y_1^e, \dots, y_{L_e}^e\} \in [[1, V_{\Phi}]]^{L_e}$ where $L_e$ is the length of the tokenized entity name $\Phi(t_e)$ . In practice we use the same language tokenizer as GIT [45] for $\Phi(.)$ and have a vocabulary size of $V = V_{\Phi} = 30522$ . We refer to the baseline of using codes $C$ created by simple tokenization of the entity name as GER-CAPTION (i.e. we treat the entity name as a caption) [6]. We show in the following paragraph how GER-ALD codes differ from such GER-CAPTION codes by making them more compact and discriminative. + +Discriminative and compact codes. Our goal is to build short and highly discriminative codes because they are easier to learn for the model, as validated by our experiments in Sec. 4.4.2. For example, the tokenized entity name $\Phi(t_{\mathrm{Q358813}}) = \Phi(\text{"Black-and-white colobus}")$ counts $L_{\mathrm{Q358813}} = 8$ tokens, but clearly not all 8 tokens are important to make this entity discriminative compared to all other existing entities. Hence, we choose to represent each entity with the bare minimum, removing all the clutter which is not only non-discriminative but also adds noise. We achieve this by selecting the most discriminative and rarest tokens + +within the tokenized entity name. Specifically, we compute the frequency $f_{v}$ of each token value $v \in [1, V]$ in the vocabulary over the entire corpus of tokenized entity names $\{\Phi(t_{e})\}_{e \in \mathcal{E}}$ . We have $f_{v} = \frac{n_{v}}{\sum_{u=1}^{V} n_{u}}$ where $n_{v}$ is the number of times $v$ appears in $\{\Phi(t_{e})\}_{e \in \mathcal{E}}$ . We create an ALD code $c_{e}$ for each entity by keeping only the $(L-1)$ tokens with the lowest frequencies and discarding the other ones. For example for entity Q358813, the 3 tokens with the lowest frequencies are "col", "ob" and "white". Interestingly, these 3 most discriminative tokens appear at the end of the code for GER-CAPTION. By contrast, they appear right at the beginning of the code for GER-ALD and they constitute the only tokens to be decoded by the model, which intuitively explains the improved performance of GER-ALD codes, as analyzed later in Sec. 4.4.2 especially when entities have long names (see Fig. 4). Finally an interesting by-product of using short codes is that they are faster to decode (the complexity of decoding is $\mathcal{O}(L^{2})$ ) and require less memory footprint to store. Unambiguous codes. Note that several entities might share the same least frequent $(L-1)^{\text{th}}$ tokens. In this case their code are exactly identical up to the $(L-1)^{\text{th}}$ token. We use the last $L^{\text{th}}$ token to ensure that each entity has a unique code: we greedily assign the last code token $c_{L}^{e}$ to the next least frequent word of the tokenized entity name until the code $c_{e}$ is different from all existing codes. If this still fails to create a unique code, we assign $c_{L}^{e}$ to a random token value $v'$ so that the resulting code is unique. With code length $L = 4$ , only 0.5% of the entities use a random token value. + +# 3.3. Training + +In this section, we describe the model used to decode entity codes from an input image-text pair. Importantly, we also introduce our entity-based pre-training to condition the generative model to the task of entity recognition. + +Auto-regressive generative models. We build upon GIT [45], an auto-regressive image-to-text generative model. The query image-text pair $(x_v, x_t)$ is transformed into a set of $d$ -dimensional embeddings using a visual encoder for $x_v$ and the text tokenizer $\Phi(.)$ for $x_t$ . The resulting output is represented by $\mathbf{X}_v \in \mathbb{R}^{N_v \times d}$ (resp. $\mathbf{X}_t \in \mathbb{R}^{N_t \times d}$ ) for image (resp. text) tokens. We then input $\mathbf{X}_v$ and $\mathbf{X}_t$ to a decoder network $g(.)$ whose task is to decode the next code token $c_i^e$ , conditioned on the previous tokens $c_j^e$ . Each code token value $v$ in $\mathbb{I}[1, V]$ maps to a learnable $d$ -dimensional vector $\mathbf{Y}_v$ (gathered in the embedding matrix $\mathbf{Y} \in \mathbb{R}^{(V+1) \times d}$ where $\mathbf{Y}_0$ corresponds to the "beginning of code" token). We train with a language modeling loss: + +$$ +\mathcal {L} ^ {e} = \frac {1}{L} \sum_ {i = 1} ^ {L} \ell (c _ {i} ^ {e}, g ([ \mathbf {X} _ {v}; \mathbf {X} _ {t}; \mathbf {Y} _ {0}; \mathbf {Y} _ {c _ {0 < j < i}} ^ {e} ]) +$$ + +where $[;]$ corresponds to the concatenation operation in the first dimension and $\ell$ is the softmax cross-entropy loss with label-smoothing [27]. We average $\mathcal{L}^e$ over a mini-batch and + +learn the weights of the visual encoder, decoder $g(.)$ and embedding matrix $\mathbf{Y}$ through back-propagation. When decoding, we use beam search to obtain the best predicted entity coded. We find that we do not need to constrain the beam search to existing codes since more than $99\%$ of the top-1 predictions are valid codes for converged GER models. + +Entity-based pre-training. Common auto-regressive models such as GIT [45] or PaLI [5] are pre-trained for descriptive captioning. As shown in Tab. 5 and Fig 9 of the Appendix, they generalize poorly to entity recognition. This is because of the task discrepancy between predicting a descriptive caption and predicting an entity name. In order to condition our models better for entity recognition, we propose to collect a significant number of entity-based pretraining images, each associated with a Wikipedia entity instead of a generic caption. However, such an entity-based pretraining dataset does not exist. We create it in an automatic way, without any human supervision. + +To do so, we leverage existing large-scale image-caption datasets [37, 38]: unless specified otherwise we use WebLI [5]. For each Wikipedia entity, we retrieve in WebLI the image-caption pairs that best represent this entity and replace their original captions by this entity name [15, 23]. Specifically, we embed the 6M entity names of OVEN with a semantic text encoder [32] and find the top- $k$ most similar captions in WebLI. We retrieve their corresponding images and replace their original captions by the considered entity name. We ensure that no image is assigned to multiple entities to avoid instability during training. We vary the number of retrieved images $k$ per entity from 2 to 100 to produce pre-training datasets of different sizes: from 11M up to 55M images (see Fig. 6). We denote by Entity-WebLI (resp. Entity-LAION) the resulting dataset used for entity-based pretraining, built from WebLI (resp. LAION [38]). This way of creating pre-training data is akin to the query generation techniques used for generative retrieval in NLP [46]. However, rather than generating a synthetic input, we simply retrieve input images from a large-scale dataset. + +# 3.4. Baselines + +We compare our method to the following different baselines. Hierarchical classification. Solving million-scale entity recognition with classification is unpractical due to the very large number of classes. A workaround is to use hierarchical classifiers. As OVEN does not come with hierarchical labels we obtain a 3-level hierarchy through k-means of the 6M entity names encoded with sentence-T5 [28]. We train a multi-class classifier for each parent node in the hierarchy. To avoid training a huge number of different classification matrices, we learn a generic classifier matrix per level which is modified by learnable small modifiers depending on the path in the hierarchy. + +Dual encoders. Another typical workaround to classifica + +tion is to rely on deep metric learning approaches [36] such as Noise Contrastive Estimation [11] and its InfoNCE variant [29] as used in popular dual encoder approaches [16, 32]. Dual encoders learn a unified image-text feature space with separate encoders, thereby facilitating efficient nearest neighbor searches for recognition. We use CLIP-L/14 [32]. Visual matching. We also experiment with pure visual matching baselines. We use off-the-shelf CLIP-L/14 visual encoder and Entity-WebLI (55M) dataset as the memory. We use $k = 500$ for nearest neighbor search with majority voting as it obtains the best results on OVEN val set. + +Captioning. We compare to Git-Large [45] or PaLI [5] image-to-text auto-regressive captioning models. + +GER-baselines: alternative code creation strategies. We compare GER-ALD, i.e. the best variant of GER, with several alternatives. First, GER-ATOMIC refers to using atomic, completely unstructured codes, i.e. each code token $c_{i}^{e}$ is randomly drawn from $[1, V]^{L}$ [42]. Second, we consider two alternatives using semantically structured codes: (i) GER-HKC where we embed the entity names with a pretrained text encoder before applying hierarchical k-means clustering on the resulting embeddings [42] and (ii) GER-CAPTION where we create a code by tokenizing the entity name with $\Phi(\cdot)$ [6, 12]. Details on the baselines are in Appendix Sec. 6.4. + +# 4. Experiments + +In this section, we detail our experimental setup, compare our method with state of the art and baselines, and finally present thorough analyses on code creation and pretraining. + +# 4.1. Experimental setting + +OVEN dataset consists of 6,063,945 different entities [12]. We evaluate the models on the validation and test splits, by reporting the harmonic mean (HM) of top-1 accuracy scores between "seen" and "unseen" entities. Seen are entities present in the OVEN training set. Unseen entities are a subset of entities among the ones not present in the training set. The models are evaluated on a total of 3192 entities (1721 for seen and 1471 for unseen) for validation and 15888 entities (8355 for seen and 7533 for unseen) for test. We call the entities that the model is evaluated on by "positive" entities (i.e. the union of the 3192 validation and 15888 test entities) and all other entities by "negative" entities. + +Pretraining and finetuning. Unless specified otherwise, we pretrain our models on the entity-WebLI dataset, which we create considering all 6M entity names as described in Sec. 3.3. After this entity-based pretraining, the models are finetuned on OVEN training set which consists only of the "seen" entities. All implementation details are in Sec. 6 in Appendix and code is released in the ScENIC library [7]. + +Preventing data leakage. We remove pretraining images from Entity-WebLI and Entity-LAION with a cosine simi + +
Method#par.(B)PretrainingOVEN test
dataset#imgsHMseen unseen
Dual encoder approaches
CLIPViT-L140.42OpenAI400M5.25.6 4.9
CLIPfusionViT-L140.88OpenAI400M8.433.6 4.8
CLIP2CLIPViT-L140.86OpenAI400M11.512.6 10.5
Captioning approaches
GiT-Large0.40WebLI100M7.017.6 4.3
PaLI-3B3WebLI1B9.119.1 6.0
PaLI-17B17WebLI1B16.028.3 11.2
+ +Generative entity recognition + +
GER-ALD‡(Ours)0.40Entity-LAION 41M20.929.116.3
GER-ALD (Ours)0.40Entity-WebLI 55M22.731.517.7
+ +Table 1. Comparison with state-of-the-art approaches on OVEN entity test split. We report the harmonic mean (HM) of the seen and unseen splits (top-1 accuracy) after finetuning on OVEN training set. Numbers are taken from [12] except methods based on GiT-Large which are run by us. We indicate the total number of parameters of each model ("# par.") in billion and the pretraining dataset details. $\ddagger$ : use only publicly available data. + +larity (with CLIP-L/14 visual features) above 0.95 with any of the OVEN test or val images. We chose a 0.95 conservative threshold by looking at some examples: similarity 0.95 corresponds to conceptually similar images but clearly not duplicates (see Fig. 8 in Appendix). + +# 4.2. Comparison with the state of the art + +In Tab. 1, we compare the performance of GER-ALD, our best GER variant, on the OVEN entity benchmark with previously published numbers after finetuning on the OVEN training set. We see that our method outperforms previously proposed approaches by significant margins. Notably, GER-ALD improves over the captioning model PALI-17B by $+6.8$ top-1 HM test accuracy (a relative improvement of $43\%$ ) while using $42\times$ less parameters. + +# 4.3. Comparison with baselines + +In Tab. 2, we compare GER-ALD with the different baselines described in Sec. 3.4. All baselines use exactly the same pretraining dataset entity-based WebLI (55M) and model architectures of comparable sizes. + +Comparing GER to different paradigms. We see in Tab. 2 that GER outperforms strong captioning, dual-encoder, visual matching and hierarchical classification baselines, affirming its advantage in tackling web-scale visual entity recognition. Our superior performance compared to dual encoders aligns with previous works observing that CLIP struggles for fine-grained recognition [12, 15]. Due to query image and entity name similarities being captured only through a vector dot product, potentially fine-grained interactions are missed. Also, GER offers significant advan + +
MethodEntity-based pretraining+ finetuning on seen
HMseenunseenHMseenunseen
Dual encoders9.28.99.416.324.312.3
Visual matching16.215.517.116.415.717.2
Captioning13.213.113.316.825.912.5
Hierarchical. classif.14.714.814.621.829.617.2
Generative entity recognition
GER-ATOMIC15.915.316.720.126.216.3
GER-CAPTION14.316.512.620.726.816.9
GER-HKC15.815.516.021.025.217.9
GER-ALD17.718.317.222.731.517.7
+ +Table 2. Baseline comparisons. All baselines use exactly the same pretraining dataset Entity-WebLI (55M) and architectures of comparable number of parameters ( $\sim$ 400M). All numbers are obtained with finetuning on seen split after entity-based pretraining. We report the Harmonic Mean of top-1 accuracy on OVEN test. + +tages over dual encoders: its computational complexity is not a function of entity set size and it does not require to store entity dense embeddings. + +Different GER variants. In Tab. 2, we compare different variants of GER: one variant using unstructured codes (GER-ATOMIC) and three variants using semantically-structured codes: GER-CAPTION, GER-HKC and GER-ALD. We observe that GER-ALD is the best performing variant, both after entity-based pretraining and after finetuning on the OVEN seen entities. Compared to GER-CAPTION, GER-ALD use codes that are more discriminative and compact, which improves the performance particularly for entities with long names (see Sec. 4.4.2). Compared to GER-ATOMIC, GER-ALD codes yield a semantic structure which is crucial for million-scale label-space as shown in Sec. 4.4.1. GER-HKC model also gets strong performance but relies on an off-the-shelf semantic text encoder which makes the approach more complex and costly compared to GER-ALD. GER-HKC is a first step towards learning codes and we hope future works will propose original and better code creation strategies [41]. + +# 4.4. Analysis and ablation study + +In this section, unless specified otherwise, we report the accuracy on the OVEN validation set [12] evaluated after pretraining on Entity-WebLI (27M), i.e. no OVEN finetuning. + +# 4.4.1 Semantic versus atomic codes + +In Fig. 3 (and Appendix Tab. 6), we report the relative improvement of semantically-structured codes (GER-ALD) compared to unstructured codes (GER-ATOMIC). We vary pretraining data size, model capacity and label-space size. A relative improvement of $100\%$ means that the performance of GER-ALD doubles compared to GER-ATOMIC. + +Limited pretraining data. In Fig. 3 (left), we see that semantic codes outperform atomic codes when the amount of + +![](images/fc485a04e1fa39ed6140465682b580b11f57b5d11e34249509069af703d73f1f.jpg) +Figure 3. Semantic vs atomic codes. We report the relative improvement in $\%$ of GER-ALD compared to GER-ATOMIC in 3 scenarios: (i) limited pretraining data, (ii) limited model capacity and (iii) massive-scale label-space. Plots share a common experiment shown by which uses a pretraining dataset size of $27M$ , Large model and 6M entity set. The setting reported in Tab. 2 is $\star$ . + +![](images/49ddd9ead505f53ad5aa89fa2bf94a3d9e74c2d35943e33683061e45dc5eb76c.jpg) + +![](images/a321e732990e2f78376a325c300a9db02fec44f11189a5cbedc7b97934bc8a73.jpg) + +data available for pretraining diminishes. In fact, the results reported in Tab. 2 corresponds to the most favorable scenario for GER-ATOMIC with 55M pretraining datapoints (represented by $\star$ in Fig. 3). The relative improvement in this case is still of $14\%$ while it grows to more than $1000\%$ when the amount of data is reduced by $5\times$ . + +Limited model capacity. In Fig. 3 (middle), we see that the model struggles to learn unstructured codes when its capacity is reduced. When considering the small version of our model (114M parameters), the performance with atomic codes is very poor: 0.7 top-1 accuracy. + +Web-scale label-space. In Fig. 3 (right), we vary the number of entities for pretraining. The "positive" entities (see Sec. 4.1) are always included in the pretraining set and the amount of "negative" entities is increased, effectively acting as distractors. First, we see in Fig. 3 (right) that for relatively small-scale label-space $(\leq 100k)$ , the benefit of having semantic codes versus atomic is small. In this regime we find that the model can memorize all the entities without the need for semantic structure between them. This aligns with the findings of DSI [42]. We evaluate GER further in small label-spaces in Sec. 4.5. However, we see that in million-scale label-space regime, semantic structure becomes important and significantly improves the performance compared to atomic codes: $+26\%$ relative improvement. + +Overall, we find that GER-ATOMIC fail to learn unstructured codes when the amount of pretraining data or architecture capacity are reduced, or when the label-space increases to million-scale. Unlike GER-ATOMIC, GER-ALD succeed in these scenarios thanks to the semantic structure easing the learning. Next, we analyze how GER-ALD improves over another type of semantic codes: GER-CAPTION codes. + +# 4.4.2 ALD versus captioning codes + +We analyze why unambiguous, language-based and discriminative codes (GER-ALD) are more effective for entity recognition than directly decoding the entity name (GER-CAPTION). In Fig. 5 (left), we report the performance of GER-ALD and GER-CAPTION when varying the length $L$ of + +![](images/5e982a64297df540f5e1649720c37e332c6e861af130b3ec3da376952bfc2789.jpg) +Figure 4. Accuracy per entity name length for GER-ALD versus GER-CAPTION codes. (left): Accuracy averaged per entity name length. (right): Qualitative examples of predictions for long entity names. Code tokens are symbolized between brackets. + +![](images/eedacf2d00bdff76ba9f66b7ef25772fc19c9946e84f65608ef91e873ad609a0.jpg) + +![](images/c8771de9c408fdd4a43b14eadb5f4ac86d327ff9e4ecc5d562866f15ec39ff23.jpg) + +![](images/f3a380580b731f41ddcf38c9dd6375c81f4c893e3b55d9dcd30452b178e646d2.jpg) + +![](images/7a8b000100afffaa3a3ead775c80cbe07506e2dad8956364ae73be324f6c333d.jpg) +Figure 5. ALD versus captioning codes. (left): Effect of different code lengths for GER-ALD and GER-CAPTION codes. (right): Cumulative distribution function (CDF) of (in green) the position of the least frequent token in the tokenized entity name and of (in pink) the length of tokenized entity name. + +![](images/60dcc266746f7b206eec86d4e9d14c65ebf83741a0337a71bc15e370e27cbc33.jpg) + +the codes. Fixing a code length $L$ to a caption corresponds to keeping only the first $L^{\text{th}}$ tokens of the entity name. In Fig. 5 (right), we report the cumulative distribution functions (CDF) of (i) the position within the tokenized entity name of the least frequent token among the entire corpus (as described in Sec. 3.2) and (ii) the total number of tokens in the tokenized entity name ( $L_{e}$ in the notations of Sec. 3.2)). + +Discriminative tokens versus number of tokens. We observe in Fig. 5 (left) that the performance of GER-CAPTION increases drastically from $L = 2$ to $L = 4$ . At the same time, we see in Fig. 5 (right) that for $L = 4$ , less than half of the entity names are considered in full while more than $80\%$ of the GER-CAPTION codes contain the least frequent token of the entire tokenized name. This hints that what is important for language-based codes is not to describe the full entity name but to include its most discriminative part. We also observe that the performance of captioning increases only moderately from $L = 4$ to $L = 8$ even though the number of entities considered in full increases drastically from $46.6\%$ to $100\%$ . This confirms our intuition that decoding all the entity name tokens does not have a major impact on the performance as long as the most discriminative tokens are decoded. Overall, these observations motivate the ALD design of keeping only the most discriminative tokens, which is shown in Fig. 5 to lead to improved performance compared to decoding the full tokenized entity name. + +Effect of code length for GER-ALD. We see in Fig. 5 (left) that the performance of GER-ALD is the best for $L = 4$ . With smaller code lengths, we need to resort to random tokens a lot to achieve unique codes (see Sec. 3.2), which deters the + +
Selection strategyHM
Least frequent tokens14.4
Most frequent tokens12.3
First tokens12.0
Random tokens11.3
+ +
Tokens orderHM
Least frequent first14.4
Syntax order14.4
Random order13.0
Least frequent last12.7
+ +Table 3. Ablation study of GER-ALD codes. (left) Word tokens selection. (right) Tokens order. All variants use $L = 4$ . Default is in top rows. Non language-based GER-ATOMIC gets 11.4 top-1. + +performance. For example at $L = 2$ , more than $10\%$ of the entities use a random code token while this percentage decreases to $0.5\%$ at $L = 4$ . We also see that the performance of GER-ALD decreases for code length above $L = 4$ , which hints that only the few most discriminative tokens are important while additional ones clutter the entity code. Interestingly we also observe in Fig. 5 (left) that when considering all the tokens, GER-ALD performance is slightly below that of GER-CAPTION. This might seem surprising since the same amount of information is present in both cases. However we find that when considering all the tokens, it is more difficult for the model to decode tokens ordered by frequencies than tokens ordered syntactically. + +Entities with long entity names. In Fig. 4 (left), we report the accuracy per entity name length for both GER-ALD and GER-CAPTION finetuned models. We see that the longer the entity name, the more GER-ALD improves over captioning. Longer entities tend to have more noise with key information further into the code. We also show in Fig. 4 qualitative examples of entities with long entity names (more in Fig. 12 in Appendix). In the left example, we see that GER-ALD use the token combination [col][ob] to represent the semantic concept of colobus monkey species. The last token is used to efficiently differentiate between sub-species of colobus. This compact and discriminative way of encoding the entity allows GER-ALD to successfully predict this entity whereas GER-CAPTION fails to generate the entity tokenized name. + +# 4.4.3 Creating codes with ALD + +Least frequent tokens. In Tab. 3 (left), we validate our choice of selecting the least frequent tokens by evaluating 3 alternatives: random choice, most frequent tokens and first-appearing tokens in tokenized entity name. We see that these alternative strategies hurt the performance significantly. Qualitative examples in Appendix Fig. 11 show that + +
DatasetCodesHM
WebLIWebLI caption1.8
Entity-WebLI (55M)WebLI caption12.9 (+11.1)
Entity-WebLI (55M)Entity name14.8 (+1.9)
Entity-WebLI (55M)ALD17.5 (+2.7)
+ +![](images/28887796d8c7d3ad0466ad5434277fcb99be97764d83f27a20f6c6bd45bad6ea.jpg) +Figure 6. Entity-based pretraining ablation. (left): Validation OVEN accuracy. (right): Examples of original WebLI captions versus corresponding OVEN entity names. +Figure 7. Pretraining. We vary the size of the pretraining dataset by changing the amount of retrieved examples from WebLI for each OVEN entity (see Sec. 3.3). + +the kept tokens are less semantic and discriminative compared to GER-ALD strategy of keeping the least frequent tokens. Note that all these variants are at least as good as GER-ATOMIC (11.4 top-1) which is not based on language at all. + +Decoding order. In Tab. 3 (right), we vary the order of the first $L - 1$ tokens in GER-ALD codes. Instead of decoding tokens from least to most frequent, we evaluate most to least frequent, syntax order and random order. Note that the selected tokens are the same in all variants, only their order changes. We see that both "least frequent first" and "syntax" orders achieve the best of performance. + +# 4.4.4 Entity-based pretraining + +Entity-based pretraining. In Fig. 6, we analyze why our entity-based pretraining improves over the standard captioning pretraining of PaLI or GiT models. First, we see that our method of selecting WebLI data relevant to OVEN entities drastically improves the performance (+11.1 in Fig. 6 (left)). This is because, by design, we select image-text pairs from WebLI that have captions similar to OVEN entity names. Hence, this data is directly relevant for the OVEN entity recognition benchmark. Second, we see that replacing the original WebLI caption with its corresponding entity name from OVEN leads to superior performance (+1.9). We see in the qualitative examples of Fig. 6 (right) that original captions contain a lot of descriptive information not directly relevant to the entity. Lastly, we confirm that using GER-ALD codes is better (+2.7) than tokenized entity name. + +Dataset size. In Fig. 7, we evaluate the effect of the pretraining dataset size for GER models. We control the dataset size by varying the amount of retrieved examples from WebLI for each of the OVEN entities (see Sec. 3.3). We see in Fig. 7 that GER-ALD, GER-CAPTION and GER-ATOMIC benefit greatly from more data and do not seem to have reached saturation yet. As analyzed in Sec. 4.4.1, GER-ATOMIC fails when the amount of pretraining data decreases. + +
MethodImageNet-LTWebVision
Classif. MLP74.380.9
GER-ATOMIC L = 280.884.7
GER-ALD L = 280.984.8
GER-ATOMIC L = 1 (~ Classif. MAP)81.084.8
Previously published numbers
NCR [13]-76.8
CurrNet [10]-79.3
PEL [40]78.3-
MAM [14]†82.383.6
+ +Table 4. Evaluation of classification models and GER on small-scale label-spaces. $\dagger$ indicates the use of additional data. + +# 4.5. Link with classification + +A typical way of tackling visual entity recognition is by training a classifier into the number of entities [35]. This is not a viable solution for web-scale problems such as OVEN where a single fully-connected layer for a 6M classes has an enormous parameter count of 4.6B. In this section, we evaluate GER in cases where learning a classification model is a feasible choice (smaller number of classes). Classification can be cast in our GER framework simply by setting $L = 1$ and $V = |\mathcal{E}| =$ number of classes (see Sec. 3.1), making it a special case of atomic codes with $L = 1$ . Since the decoder decodes a single token, it is equivalent to a multi-layer Multihead Attention Pooling (MAP) head [21, 48]. In Tab. 4, we consider two challenging classification datasets: long-tailed ImageNet-LT [24] and noisy Webvision [22]. We evaluate GER-{ALD, ATOMIC} and a classification baseline using multi-layer perceptron (MLP) on averaged-pooled patch tokens. Implementation details are in Sec 6.3 in Appendix. + +We see in Tab. 4 that using GER-ATOMIC instead of standard MLP improves significantly the performance of the classification model (74.3 versus 81.0 for ImageNet-LT). We also observe that GER-ATOMIC and GER-ALD have comparable performance in this relatively small label-space regime (1k classes). As a matter of fact, this achieves state-of-the-art accuracy for both datasets (when no additional external data is used). This shows that GER framework not only excels for large-scale scenarios, but also works well in datasets with smaller number of visual entities, making GER a general framework for visual entity recognition. + +# 5. Conclusion + +In this work, we propose a novel generative framework for web-scale visual entity recognition. We represent each entity by a compact, discriminative and semantic code that a generative auto-regressive model learns to decode. In future work, we will explore ways of creating better entity codes by leveraging additional information: either from the Wikipedia page such as the description of the entity and its attached image or also by using external tools. + +# References + +[1] Rahul Agrawal, Archit Gupta, Yashoteja Prabhu, and Manik Varma. Multi-label learning with millions of labels: Recommending advertiser bid phrases for web pages. In Proceedings of the 22nd international conference on World Wide Web, pages 13-24, 2013. 2 +[2] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in Neural Information Processing Systems, 35:23716-23736, 2022. 1 +[3] Samy Bengio, Krzysztof Dembczynski, Thorsten Joachims, Marius Kloft, and Manik Varma. Extreme Classification (Dagstuhl Seminar 18291). Dagstuhl Reports, 2019. 2 +[4] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In ECCV, 2014. 2 +[5] Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, et al. Pali: A jointly-scaled multilingual language-image model. *ICLR*, 2023. 1, 2, 4, 5, 11, 12 +[6] Nicola De Cao, Gautier Izacard, Sebastian Riedel, and Fabio Petroni. Autoregressive entity retrieval. arXiv preprint arXiv:2010.00904, 2020. 1, 2, 3, 5 +[7] Mostafa Dehghani, Alexey Gritsenko, Anurag Arnab, Matthias Minderer, and Yi Tay. Scenic: A JAX library for computer vision research and beyond. arXiv preprint arXiv:2110.11403, 2021. 5 +[8] Mark Everingham, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes (voc) challenge. *IJCV*, 88, 2010. 2 +[9] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In CVPR, 2004. 2 +[10] Sheng Guo, Weilin Huang, Haozhi Zhang, Chenfan Zhuang, Dengke Dong, Matthew R Scott, and Dinglong Huang. CurriculumNet: Weakly supervised learning from large-scale web images. In ECCV, pages 135-150, 2018. 8 +[11] Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics. JMLR Workshop and Conference Proceedings, 2010. 1, 5 +[12] Hexiang Hu, Yi Luan, Yang Chen, Urvashi Khandelwal, Mandar Joshi, Kenton Lee, Kristina Toutanova, and Ming-Wei Chang. Open-domain visual entity recognition: Towards recognizing millions of wikipedia entities. ICCV, 2023. 1, 2, 3, 5, 6 +[13] Ahmet Iscen, Jack Valmadre, Anurag Arnab, and Cordelia Schmid. Learning with neighbor consistency for noisy labels. In CVPR, 2022. 8 +[14] Ahmet Iscen, Alireza Fathi, and Cordelia Schmid. Improving image recognition by retrieving from web-scale image-text data. CVPR, 2023. 8 + +[15] Ahmet Iscen, Mathilde Caron, Alireza Fathi, and Cordelia Schmid. Retrieval-enhanced contrastive vision-text models. *ICLR*, 2024. 1, 2, 4, 5 +[16] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 5 +[17] Aditya Khosla, Nityananda Jayadevaprakash, Bangpeng Yao, and Li Fei-Fei. Novel dataset for fine-grained image categorization. In First Workshop on Fine-Grained Visual Categorization, CVPR, 2011. 2 +[18] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV, 2013. 2 +[19] Taku Kudo. Subword regularization: Improving neural network translation models with multiple subword candidates. arXiv preprint arXiv:1804.10959, 2018.3 +[20] Taku Kudo and John Richardson. Sentencepiece: A simple and language independent subword tokenizer and detokenizer for neural text processing. arXiv preprint arXiv:1808.06226, 2018. 2, 3, 14 +[21] Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In International conference on machine learning, 2019. 8 +[22] Wen Li, Limin Wang, Wei Li, Eirikur Agustsson, and Luc Van Gool. Webvision database: Visual learning and understanding from web data. arXiv preprint arXiv:1708.02862, 2017. 2, 8, 11 +[23] Haotian Liu, Kilho Son, Jianwei Yang, Ce Liu, Jianfeng Gao, Yong Jae Lee, and Chunyuan Li. Learning customized visual models with retrieval-augmented knowledge. In CVPR, 2023. 2, 4 +[24] Ziwei Liu, Zhongqi Miao, Xiaohang Zhan, Jiayun Wang, Boqing Gong, and Stella X. Yu. Large-scale long-tailed recognition in an open world. In CVPR, 2019. 2, 8, 11 +[25] Sanket Vaibhav Mehta, Jai Gupta, Yi Tay, Mostafa Dehghani, Vinh Q Tran, Jinfeng Rao, Marc Najork, Emma Strubell, and Donald Metzler. Dsi++: Updating transformer memory with new documents. arXiv preprint arXiv:2212.09744, 2022. 1, 2 +[26] Anshul Mittal, Kunal Dahiya, Shreya Malani, Janani Ramaswamy, Seba Kuruvilla, Jitendra Ajmera, Keng-hao Chang, Sumeet Agarwal, Purushottam Kar, and Manik Varma. Multi-modal extreme classification. In CVPR, 2022. 2 +[27] Rafael Müller, Simon Kornblith, and Geoffrey E Hinton. When does label smoothing help? Advances in neural information processing systems, 32, 2019. 4 +[28] Jianmo Ni, Gustavo Hernández Ábrego, Noah Constant, Ji Ma, Keith B Hall, Daniel Cer, and Yinfei Yang. Sentence-t5: Scalable sentence encoders from pre-trained text-to-text models. arXiv preprint arXiv:2108.08877, 2021. 4, 12 +[29] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. 1, 5 + +[30] OpenAI. GPT-4 technical report. arXiv preprint arXiv:2303.08774, 2023.1 +[31] Ronak Pradeep, Kai Hui, Jai Gupta, Adam D Lelkes, Honglei Zhuang, Jimmy Lin, Donald Metzler, and Vinh Q Tran. How does generative retrieval scale to millions of passages? arXiv preprint arXiv:2305.11841, 2023. 1, 2 +[32] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 4, 5 +[33] Shashank Rajput, Nikhil Mehta, Anima Singh, Raghunanandan H Keshavan, Trung Vu, Lukasz Heldt, Lichan Hong, Yi Tay, Vinh Q Tran, Jonah Samost, et al. Recommender systems with generative retrieval. arXiv preprint arXiv:2305.05065, 2023. 1, 2 +[34] Stephen Robertson, Hugo Zaragoza, et al. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 2009. 2 +[35] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 2015. 2, 8 +[36] Florian Schroff, Dmitry Kalenichenko, and James Philbin. Facenet: A unified embedding for face recognition and clustering. In CVPR, 2015. 5 +[37] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114, 2021. 4 +[38] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. arXiv preprint arXiv:2210.08402, 2022. 4 +[39] Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909, 2015. 3 +[40] Jiang-Xin Shi, Tong Wei, Zhi Zhou, Xin-Yan Han, Jie-Jing Shao, and Yu-Feng Li. Parameter-efficient long-tailed recognition. arXiv preprint arXiv:2309.10019, 2023. 8 +[41] Weiwei Sun, Lingyong Yan, Zheng Chen, Shuaiqiang Wang, Haichao Zhu, Pengjie Ren, Zhumin Chen, Dawei Yin, Maarten Rijke, and Zhaochun Ren. Learning to tokenize for generative retrieval. NeurIPS, 2023. 1, 2, 6 +[42] Yi Tay, Vinh Tran, Mostafa Dehghani, Jianmo Ni, Dara Bahri, Harsh Mehta, Zhen Qin, Kai Hui, Zhe Zhao, Jai Gupta, et al. Transformer memory as a differentiable search index. Advances in Neural Information Processing Systems, 2022. 1, 2, 5, 6 +[43] Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In CVPR, 2018. 2 + +[44] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. 2 +[45] Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, and Lijuan Wang. Git: A generative image-to-text transformer for vision and language. arXiv preprint arXiv:2205.14100, 2022. 3, 4, 5, 11, 12 +[46] Yujing Wang, Yingyan Hou, Haonan Wang, Ziming Miao, Shibin Wu, Qi Chen, Yuqing Xia, Chengmin Chi, Guoshuai Zhao, Zheng Liu, et al. A neural corpus indexer for document retrieval. NeurIPS, 2022. 2, 4 +[47] Tobias Weyand, Andre Araujo, Bingyi Cao, and Jack Sim. Google landmarks dataset v2-a large-scale benchmark for instance-level recognition and retrieval. In CVPR, 2020. 2 +[48] Xiaohua Zhai, Alexander Kolesnikov, Neil Houlsby, and Lucas Beyer. Scaling vision transformers. In CVPR, 2022. 8 +[49] Yidan Zhang, Ting Zhang, Dong Chen, Yujing Wang, Qi Chen, Xing Xie, Hao Sun, Weiwei Deng, Qi Zhang, Fan Yang, et al. Irgen: Generative modeling for image retrieval. arXiv preprint arXiv:2303.10126, 2023. 2 +[50] Zheng Zhu, Guan Huang, Jiankang Deng, Yun Ye, Junjie Huang, Xinze Chen, Jiagang Zhu, Tian Yang, Dalong Du, Jiwen Lu, et al. Webface260m: A benchmark for million-scale deep face recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2 \ No newline at end of file diff --git a/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/images.zip b/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..e29513e881d8d5a9a57e632a80c092657a6117bb --- /dev/null +++ b/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c581234e9acb822f3563b48049c78638d438c566bed2f64368cbca59f708c95 +size 376636 diff --git a/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/layout.json b/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..4415160bcbae643457e1440292fd06942b9ded45 --- /dev/null +++ b/2024/A Generative Approach for Wikipedia-Scale Visual Entity Recognition/layout.json @@ -0,0 +1,9145 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 78, + 103, + 515, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 103, + 515, + 121 + ], + "spans": [ + { + "bbox": [ + 78, + 103, + 515, + 121 + ], + "type": "text", + "content": "A Generative Approach for Wikipedia-Scale Visual Entity Recognition" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 128, + 143, + 208, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 143, + 208, + 156 + ], + "spans": [ + { + "bbox": [ + 128, + 143, + 208, + 156 + ], + "type": "text", + "content": "Mathilde Caron" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 222, + 144, + 285, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 144, + 285, + 156 + ], + "spans": [ + { + "bbox": [ + 222, + 144, + 285, + 156 + ], + "type": "text", + "content": "Ahmet Iscen" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 301, + 144, + 365, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 144, + 365, + 156 + ], + "spans": [ + { + "bbox": [ + 301, + 144, + 365, + 156 + ], + "type": "text", + "content": "Alireza Fathi" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 381, + 144, + 464, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 144, + 464, + 156 + ], + "spans": [ + { + "bbox": [ + 381, + 144, + 464, + 156 + ], + "type": "text", + "content": "Cordelia Schmid" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 253, + 158, + 337, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 158, + 337, + 171 + ], + "spans": [ + { + "bbox": [ + 253, + 158, + 337, + 171 + ], + "type": "text", + "content": "Google Research" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 143, + 199, + 190, + 211 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 199, + 190, + 211 + ], + "spans": [ + { + "bbox": [ + 143, + 199, + 190, + 211 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 225, + 290, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 225, + 290, + 441 + ], + "spans": [ + { + "bbox": [ + 46, + 225, + 290, + 441 + ], + "type": "text", + "content": "In this paper, we address web-scale visual entity recognition, specifically the task of mapping a given query image to one of the 6 million existing entities in Wikipedia. One way of approaching a problem of such scale is using dual-encoder models (e.g. CLIP), where all the entity names and query images are embedded into a unified space, paving the way for an approximate kNN search. Alternatively, it is also possible to re-purpose a captioning model to directly generate the entity names for a given image. In contrast, we introduce a novel Generative Entity Recognition (GER) framework, which given an input image learns to auto-regressively decode a semantic and discriminative \"code\" identifying the target entity. Our experiments demonstrate the efficacy of this GER paradigm, showcasing state-of-the-art performance on the challenging OVEN benchmark. GER surpasses strong captioning, dual-encoder, visual matching and hierarchical classification baselines, affirming its advantage in tackling the complexities of web-scale recognition." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 466, + 127, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 466, + 127, + 479 + ], + "spans": [ + { + "bbox": [ + 47, + 466, + 127, + 479 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 487, + 287, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 487, + 287, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 487, + 287, + 693 + ], + "type": "text", + "content": "Generative vision-language models such as GPT-4 [30], Flamingo [2] or PALI [5], are becoming increasingly popular for computer vision applications. They show an impressive ability to generate free-form text for describing the contents of an image (captioning), or answering questions based on an image (visual-question answering). Nevertheless, their potential for recognition tasks [12], which usually require a more concise, structured output, remains underexplored. The focus of this paper is to explore their application for the challenging task of web-scale entity recognition. A recent benchmark, Open-domain Visual Entity recognitionN (OVEN) [12], challenges models to associate an image with a Wikipedia entity from a pool of over six million entities. Models must establish a robust association between images across millions of coarse-grained and fine-grained entities, encompassing a wide spectrum of concepts such as animals, buildings, locations, and a multitude of others [12]." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 315, + 200, + 462, + 314 + ], + "blocks": [ + { + "bbox": [ + 315, + 200, + 462, + 314 + ], + "lines": [ + { + "bbox": [ + 315, + 200, + 462, + 314 + ], + "spans": [ + { + "bbox": [ + 315, + 200, + 462, + 314 + ], + "type": "image", + "image_path": "71441a3ecf82a7a85c4fedd9224c40da2d4e966b6fd20e143ebd532d5d9457b5.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 319, + 547, + 365 + ], + "lines": [ + { + "bbox": [ + 305, + 319, + 547, + 365 + ], + "spans": [ + { + "bbox": [ + 305, + 319, + 547, + 365 + ], + "type": "text", + "content": "Figure 1. We introduce GER, a novel generative paradigm for web-scale visual entity recognition. We create compact semantic codes for each entity, and learn to auto-regressively generate them for a given query image at inference." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 467, + 201, + 526, + 313 + ], + "blocks": [ + { + "bbox": [ + 467, + 201, + 526, + 313 + ], + "lines": [ + { + "bbox": [ + 467, + 201, + 526, + 313 + ], + "spans": [ + { + "bbox": [ + 467, + 201, + 526, + 313 + ], + "type": "image", + "image_path": "94363eac2716d72aa979169ee926daf310567333168b0f6f9e50906de07abac2.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 376, + 546, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 376, + 546, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 376, + 546, + 616 + ], + "type": "text", + "content": "Traditionally, the predominant methods employed to address the challenge of visual entity recognition have revolved around either classification or contrastive dual-encoder paradigm like CLIP [32]. While classification offers a straightforward approach, it grapples with limitations when confronted with extensive label spaces such as that of OVEN, resulting in substantial parameter counts and practical engineering complexities. The dual-encoder approach on the other hand, learns a unified image-text feature space, thereby facilitating efficient nearest neighbor searches for recognition. Nonetheless, this approach exhibits its own drawbacks: (a) it does not directly optimize for the final recognition task but instead relies on indirect optimization through contrastive loss where a set of negative data has to be subsampled at training time [11, 29, 32], (b) compressing either the image or text into an embedding vector results in loss of information, detrimentally affecting performance for fine-grained recognition [15] and (c) the memory requirements for storing dense representations scale proportionally with the size of the entity set." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": "These challenges of the dual-encoder paradigm have kindled interest in alternative strategies. Notably, in Natural Language Processing (NLP) domain, recent works challenge the dual-encoder approach and use generative models instead for information retrieval [6, 25, 31, 33, 41, 42]. These works represent each element of the corpus by a compact code of integers, and learn an auto-regressive generative model to decode the target code for a given query. This" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 703, + 288, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 703, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 703, + 288, + 713 + ], + "type": "text", + "content": "Code: github.com/google-research/scenic/tree/main/scenic/projects/gerald" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17313" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "type": "text", + "content": "paradigm promises to overcome some drawbacks of dual-encoders by simplifying the retrieval pipeline such that the training and inference objectives are the same, and directly encoding the corpus within the model's parameters. Also as an alternative to dual encoders, OVEN paper [12] showcases the feasibility of extending a generative image captioning model [5] for visual entity recognition by matching the generated caption to one of the Wikipedia entity texts [34]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 170, + 289, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 170, + 289, + 529 + ], + "spans": [ + { + "bbox": [ + 46, + 170, + 289, + 529 + ], + "type": "text", + "content": "Inspired by these recent explorations, we propose a Generative Entity Recognition (GER) framework (illustrated in Fig. 1) to facilitate end-to-end visual entity recognition by leveraging generative auto-regressive models. Specifically, we represent each Wikipedia entity with a code, i.e. a short sequence of integers. Then, we train models to predict an entity from an input image by auto-regressively generating the code corresponding to the target entity. We find that creating unAmbiguous, Language-based and Discriminative (ALD) entity codes results in the best variant of our GER framework, which we denote by GER-ALD. In fact, while we observe that unstructured \"atomic\" codes work well in some scenarios, they fail when training data or model capacity are limited or more importantly, when the entity set reaches the million scale (see Sec. 4.4.1). Plus, they cannot generalize to new entities. In contrast, we find that semantically-structured codes based on language improve upon atomic codes by leveraging generic concepts shared across related entities (see example in Fig. 1 with \"Black colobus\" and \"Black-and-white colobus\" sharing common code tokens). A simple way of creating codes based on language is to directly tokenize [20] the entity name, which is akin to image captioning where the entity name is used as a caption [6, 12]. However, we find that such tokenized entity names contain clutter and noisy information, all the more so when the entity name is long (see Sec. 4.4.2). Our GER-ALD method improves over this simple captioning baseline by decoding only the most discriminative part of the tokenized entity name, i.e. the part which makes the considered entity name the most different compared to all other entities." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 531, + 289, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 531, + 289, + 674 + ], + "spans": [ + { + "bbox": [ + 46, + 531, + 289, + 674 + ], + "type": "text", + "content": "Finally, we also propose an entity-based pre-training to condition the GER models to web-scale entity recognition. Inspired by recent advances in retrieval-based methods [15, 23], we retrieve a subset of images from a large-scale image-text dataset typically used for captioning or contrastive pre-training [5] and re-purpose it by replacing the original text captions with related OVEN entity names. Overall, our experiments demonstrate the efficacy of the proposed GER paradigm: GER-ALD outperforms previously published numbers on OVEN benchmark [12] by " + }, + { + "bbox": [ + 46, + 531, + 289, + 674 + ], + "type": "inline_equation", + "content": "+6.7" + }, + { + "bbox": [ + 46, + 531, + 289, + 674 + ], + "type": "text", + "content": " top-1 accuracy, while using " + }, + { + "bbox": [ + 46, + 531, + 289, + 674 + ], + "type": "inline_equation", + "content": "42\\times" + }, + { + "bbox": [ + 46, + 531, + 289, + 674 + ], + "type": "text", + "content": " less parameters. In summary, our contributions are as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 677, + 286, + 712 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 57, + 677, + 286, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 677, + 286, + 700 + ], + "spans": [ + { + "bbox": [ + 57, + 677, + 286, + 700 + ], + "type": "text", + "content": "- a generative entity recognition framework (GER) to facilitate end-to-end visual entity recognition;" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 702, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 702, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 702, + 286, + 712 + ], + "type": "text", + "content": "- an innovative strategy for encoding Wikipedia enti" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 324, + 72, + 545, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 72, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 324, + 72, + 545, + 95 + ], + "type": "text", + "content": "ties into unambiguous language-based discriminative (ALD) codes that are highly effective for GER;" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 316, + 97, + 545, + 155 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 316, + 97, + 545, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 97, + 545, + 118 + ], + "spans": [ + { + "bbox": [ + 316, + 97, + 545, + 118 + ], + "type": "text", + "content": "- an entity-based pre-training process without requiring human intervention;" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 120, + 545, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 120, + 545, + 155 + ], + "spans": [ + { + "bbox": [ + 316, + 120, + 545, + 155 + ], + "type": "text", + "content": "- state-of-the-art results in challenging web-scale OVEN entity recognition and on-par performance to traditional classifiers in smaller-scale label-space scenarios." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 173, + 389, + 185 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 173, + 389, + 185 + ], + "spans": [ + { + "bbox": [ + 306, + 173, + 389, + 185 + ], + "type": "text", + "content": "2. Related work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 195, + 545, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 195, + 545, + 422 + ], + "spans": [ + { + "bbox": [ + 304, + 195, + 545, + 422 + ], + "type": "text", + "content": "Visual entity recognition aims to recognize classes, or entities given visual inputs [35]. Granularity of visual entity recognition tasks varies from every-day generic objects [8, 9], to fine-grained domains, such as birds [44], dogs [17], cars [18], food [4], landmarks [47], faces [50] and natural world species [43]. Some challenges for the visual entity recognition tasks include imbalanced training classes following a long-tailed distribution [24], or noisy training labels [22]. Recent work [12] proposes a new, web-scale dataset for open-domain entity recognition. This challenging benchmark contains 6M entity names derived from Wikipedia page titles, including coarse-grained and fine-grained entities, encompassing a wide spectrum of concepts such as animals, buildings, organizations, landmarks, and a multitude of other. The authors show that generative captioning models (i.e. PaLI [5]) outperform dual encoder models for large-scale entity recognition. In this paper, we build upon this observation, and study generative models for accurate and efficient entity recognition." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "type": "text", + "content": "Extreme classification tackles entity recognition specifically at a very large scale with a pure classification approach [1, 3, 26]. Typical approaches explore strategies for scaling to the hundred of thousands scale and preliminary results are even shown at million scale [1]. By leveraging generative image-to-text models, we propose a fresh perspective beyond traditional classification methods typically used in the context of large-scale visual entity recognition." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 522, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 545, + 713 + ], + "type": "text", + "content": "Generative auto-regressive retrieval methods are increasingly popular in NLP [6, 25, 31, 33, 41, 42]. GENRE retrieves Wikipedia entities by generating their names in an autoregressive fashion. Seminal work DSI [42] shows the benefit of learning to decode compact codes (created either randomly or with hierarchical k-means clustering) associated with each document. Neural Corpus Indexer [46] proposes a specific decoding scheme for generative retrieval and show the benefit of query augmentation by automatically generating training queries for documents to be indexed. TIGER [33] studies generative retrieval in the context of recommender systems. Finally, [31] conducts a systematic study of generative retrieval systems when scaled to millions of document passages. Only very few works explore this family of approaches in computer vision domain, and only in very small-scale and uni-modal scenarios [49]." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "17314" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 83, + 276, + 227 + ], + "blocks": [ + { + "bbox": [ + 52, + 72, + 269, + 82 + ], + "lines": [ + { + "bbox": [ + 52, + 72, + 269, + 82 + ], + "spans": [ + { + "bbox": [ + 52, + 72, + 269, + 82 + ], + "type": "text", + "content": "(a) ALD: creating unambiguous language-based discriminative entity codes" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 53, + 83, + 276, + 227 + ], + "lines": [ + { + "bbox": [ + 53, + 83, + 276, + 227 + ], + "spans": [ + { + "bbox": [ + 53, + 83, + 276, + 227 + ], + "type": "image", + "image_path": "4cc96816c58a6c1fb274e3abae0507b3fefa0dc7e816fca1b20527ce03259cbf.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 230, + 288, + 286 + ], + "lines": [ + { + "bbox": [ + 46, + 230, + 288, + 286 + ], + "spans": [ + { + "bbox": [ + 46, + 230, + 288, + 286 + ], + "type": "text", + "content": "Figure 2. Overview of GER-ALD method. (a) We utilize a text tokenizer to create compact and semantic codes, which represents each entity with short, but discriminative representations. (b) We learn a generative auto-regressive model, which learns to decode the correct code for given query image and text pair." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 294, + 102, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 294, + 102, + 306 + ], + "spans": [ + { + "bbox": [ + 47, + 294, + 102, + 306 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 314, + 287, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 314, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 46, + 314, + 287, + 399 + ], + "type": "text", + "content": "Our goal is to explore how to adapt generative autoregressive models to the task of visual entity recognition (GER). While previous works have shown preliminary signal that it is possible to repurpose autoregressive models for entity recognition by directly decoding entity names [6, 12], we propose a more effective strategy. An overview of our framework is in Fig. 2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 406, + 157, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 406, + 157, + 418 + ], + "spans": [ + { + "bbox": [ + 47, + 406, + 157, + 418 + ], + "type": "text", + "content": "3.1. Problem definition" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "text", + "content": "Web-scale visual entity recognition. The Open-domain Visual Entity recognitioN (OVEN) [12] task consists of mapping input visual queries to one of the 6M English Wikipedia entities. More specifically, for a given image query " + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "inline_equation", + "content": "x_{v}" + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "text", + "content": " and text query " + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "text", + "content": ", the model needs to recognize the corresponding entity " + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "text", + "content": " among the set " + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "text", + "content": " of all possible entities. The purpose of the input text " + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "text", + "content": " is to achieve unambiguous recognition. For example, when several entities are represented in the query image " + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "inline_equation", + "content": "x_{v}" + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "text", + "content": ", the text query indicates which one needs to be recognized. Each entity " + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "inline_equation", + "content": "e \\in \\mathcal{E}" + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "text", + "content": " comes with an entity name, denoted by " + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "inline_equation", + "content": "t_{e}" + }, + { + "bbox": [ + 46, + 425, + 287, + 568 + ], + "type": "text", + "content": ", which corresponds to the title of the entity Wikipedia page." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "text", + "content": "Representing each entity with a code. In GER, we represent each entity " + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "text", + "content": " by a code denoted by " + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "inline_equation", + "content": "c^e = \\{c_1^e,\\dots,c_L^e\\} \\in [[1,V]]^L" + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "text", + "content": " is the length of the code and " + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "text", + "content": " is the size of the vocabulary of all integer values that each code token " + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "inline_equation", + "content": "c_i^e" + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "text", + "content": " can take. This forms up to " + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "inline_equation", + "content": "V^L" + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "text", + "content": " unique codes. Note that vanilla image classification and captioning baselines can both be cast into this code formulation. In fact, with " + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "inline_equation", + "content": "L = 1" + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "inline_equation", + "content": "V = |\\mathcal{E}|" + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "text", + "content": ", the codes are equivalent to the labels used in standard multi-class classification. On the other hand, if each code token value in " + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "inline_equation", + "content": "[[1,V]]" + }, + { + "bbox": [ + 46, + 568, + 288, + 714 + ], + "type": "text", + "content": " maps to a (sub-)word in a pre-defined vocabulary [20], then the codes simply correspond to standard tokenized text used in captioning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 545, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 109 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 109 + ], + "type": "text", + "content": "models [19, 39, 45]. In the following paragraphs, we detail GER-ALD, our most effective strategy for building codes " + }, + { + "bbox": [ + 304, + 72, + 545, + 109 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 72, + 545, + 109 + ], + "type": "text", + "content": " to represent all 6M English Wikipedia entities." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 118, + 499, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 118, + 499, + 129 + ], + "spans": [ + { + "bbox": [ + 305, + 118, + 499, + 129 + ], + "type": "text", + "content": "3.2. GER-ALD: Creating ALD codes for GER" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 136, + 545, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 136, + 545, + 220 + ], + "spans": [ + { + "bbox": [ + 304, + 136, + 545, + 220 + ], + "type": "text", + "content": "We design the code set " + }, + { + "bbox": [ + 304, + 136, + 545, + 220 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 136, + 545, + 220 + ], + "type": "text", + "content": " so that it has three properties which we find are important for effective GER models: i) semantically structured thanks to language, ii) discriminative and compact, and iii) unambiguous. Our algorithm to create such unambiguous, language-based and discriminative codes, called ALD, is illustrated in Fig. 2 (a) and described in pseudo-code in Algorithm 1 of the Appendix." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "type": "text", + "content": "Semantic tokens based on language. We find that entity codes " + }, + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "type": "text", + "content": " benefit from following a semantic structure, especially in scenarios where memorizing unstructured atomic codes is difficult. We show in Sec. 4.4.1 that using unstructured atomic codes fail when the amount of training data or the model capacity are limited or, of particular interest, when the entity set size increases to the million scale (see Fig. 3). Intuitively, we want entities that are semantically similar to have some overlapping code tokens. For example, we wish that entities " + }, + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "type": "inline_equation", + "content": "e = \\mathrm{Q}521977" + }, + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "type": "text", + "content": " with corresponding name " + }, + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "type": "inline_equation", + "content": "t_{\\mathrm{Q}521977} =" + }, + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "type": "text", + "content": " \"Black colobus\" and " + }, + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "type": "inline_equation", + "content": "e = \\mathrm{Q}358813" + }, + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "type": "text", + "content": " with corresponding name " + }, + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "type": "inline_equation", + "content": "t_{\\mathrm{Q}358813} =" + }, + { + "bbox": [ + 304, + 220, + 545, + 387 + ], + "type": "text", + "content": " \"Black-and-white colobos\" to share some code tokens, given that these correspond to two close species." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "text", + "content": "A simple yet effective way of having semantic codes is to tokenize the entity names based on text tokenizers [6, 19, 20, 39]. If each of the sub-words in the entity names are mapped to an integer representing this sub-word, then entities Q358813 and Q521977 naturally share code tokens: those representing the phrase \"colobus\". We denote by " + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "inline_equation", + "content": "\\Phi(.)" + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "text", + "content": " an off-the-shelf text tokenizer with a vocabulary of " + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "inline_equation", + "content": "V_{\\Phi}" + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "text", + "content": " sub-words such that " + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "inline_equation", + "content": "\\Phi(t_e) = \\{y_1^e, \\dots, y_{L_e}^e\\} \\in [[1, V_{\\Phi}]]^{L_e}" + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "inline_equation", + "content": "L_e" + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "text", + "content": " is the length of the tokenized entity name " + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "inline_equation", + "content": "\\Phi(t_e)" + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "text", + "content": ". In practice we use the same language tokenizer as GIT [45] for " + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "inline_equation", + "content": "\\Phi(.)" + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "text", + "content": " and have a vocabulary size of " + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "inline_equation", + "content": "V = V_{\\Phi} = 30522" + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "text", + "content": ". We refer to the baseline of using codes " + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 388, + 546, + 581 + ], + "type": "text", + "content": " created by simple tokenization of the entity name as GER-CAPTION (i.e. we treat the entity name as a caption) [6]. We show in the following paragraph how GER-ALD codes differ from such GER-CAPTION codes by making them more compact and discriminative." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": "Discriminative and compact codes. Our goal is to build short and highly discriminative codes because they are easier to learn for the model, as validated by our experiments in Sec. 4.4.2. For example, the tokenized entity name " + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\Phi(t_{\\mathrm{Q358813}}) = \\Phi(\\text{\"Black-and-white colobus}\")" + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": " counts " + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{Q358813}} = 8" + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": " tokens, but clearly not all 8 tokens are important to make this entity discriminative compared to all other existing entities. Hence, we choose to represent each entity with the bare minimum, removing all the clutter which is not only non-discriminative but also adds noise. We achieve this by selecting the most discriminative and rarest tokens" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "17315" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": "within the tokenized entity name. Specifically, we compute the frequency " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "f_{v}" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " of each token value " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "v \\in [1, V]" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " in the vocabulary over the entire corpus of tokenized entity names " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "\\{\\Phi(t_{e})\\}_{e \\in \\mathcal{E}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": ". We have " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "f_{v} = \\frac{n_{v}}{\\sum_{u=1}^{V} n_{u}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "n_{v}" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " is the number of times " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " appears in " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "\\{\\Phi(t_{e})\\}_{e \\in \\mathcal{E}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": ". We create an ALD code " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "c_{e}" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " for each entity by keeping only the " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "(L-1)" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " tokens with the lowest frequencies and discarding the other ones. For example for entity Q358813, the 3 tokens with the lowest frequencies are \"col\", \"ob\" and \"white\". Interestingly, these 3 most discriminative tokens appear at the end of the code for GER-CAPTION. By contrast, they appear right at the beginning of the code for GER-ALD and they constitute the only tokens to be decoded by the model, which intuitively explains the improved performance of GER-ALD codes, as analyzed later in Sec. 4.4.2 especially when entities have long names (see Fig. 4). Finally an interesting by-product of using short codes is that they are faster to decode (the complexity of decoding is " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(L^{2})" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": ") and require less memory footprint to store. Unambiguous codes. Note that several entities might share the same least frequent " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "(L-1)^{\\text{th}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " tokens. In this case their code are exactly identical up to the " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "(L-1)^{\\text{th}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " token. We use the last " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "L^{\\text{th}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " token to ensure that each entity has a unique code: we greedily assign the last code token " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "c_{L}^{e}" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " to the next least frequent word of the tokenized entity name until the code " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "c_{e}" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " is different from all existing codes. If this still fails to create a unique code, we assign " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "c_{L}^{e}" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " to a random token value " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "v'" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": " so that the resulting code is unique. With code length " + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "inline_equation", + "content": "L = 4" + }, + { + "bbox": [ + 47, + 72, + 289, + 413 + ], + "type": "text", + "content": ", only 0.5% of the entities use a random token value." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 420, + 111, + 433 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 420, + 111, + 433 + ], + "spans": [ + { + "bbox": [ + 47, + 420, + 111, + 433 + ], + "type": "text", + "content": "3.3. Training" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 438, + 287, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 438, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 47, + 438, + 287, + 487 + ], + "type": "text", + "content": "In this section, we describe the model used to decode entity codes from an input image-text pair. Importantly, we also introduce our entity-based pre-training to condition the generative model to the task of entity recognition." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "spans": [ + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": "Auto-regressive generative models. We build upon GIT [45], an auto-regressive image-to-text generative model. The query image-text pair " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "(x_v, x_t)" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": " is transformed into a set of " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": "-dimensional embeddings using a visual encoder for " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "x_v" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": " and the text tokenizer " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "\\Phi(.)" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "x_t" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": ". The resulting output is represented by " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_v \\in \\mathbb{R}^{N_v \\times d}" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": " (resp. " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_t \\in \\mathbb{R}^{N_t \\times d}" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": ") for image (resp. text) tokens. We then input " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_v" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_t" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": " to a decoder network " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "g(.)" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": " whose task is to decode the next code token " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "c_i^e" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": ", conditioned on the previous tokens " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "c_j^e" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": ". Each code token value " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "\\mathbb{I}[1, V]" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": " maps to a learnable " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": "-dimensional vector " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_v" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": " (gathered in the embedding matrix " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "\\mathbf{Y} \\in \\mathbb{R}^{(V+1) \\times d}" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_0" + }, + { + "bbox": [ + 47, + 487, + 289, + 644 + ], + "type": "text", + "content": " corresponds to the \"beginning of code\" token). We train with a language modeling loss:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 83, + 645, + 253, + 677 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 645, + 253, + 677 + ], + "spans": [ + { + "bbox": [ + 83, + 645, + 253, + 677 + ], + "type": "interline_equation", + "content": "\\mathcal {L} ^ {e} = \\frac {1}{L} \\sum_ {i = 1} ^ {L} \\ell (c _ {i} ^ {e}, g ([ \\mathbf {X} _ {v}; \\mathbf {X} _ {t}; \\mathbf {Y} _ {0}; \\mathbf {Y} _ {c _ {0 < j < i}} ^ {e} ])", + "image_path": "e17161045428a5c30806fa9aff57314b45654fd246312431e846174f2abad922.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "inline_equation", + "content": "[;]" + }, + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "text", + "content": " corresponds to the concatenation operation in the first dimension and " + }, + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "text", + "content": " is the softmax cross-entropy loss with label-smoothing [27]. We average " + }, + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^e" + }, + { + "bbox": [ + 47, + 677, + 288, + 715 + ], + "type": "text", + "content": " over a mini-batch and" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "learn the weights of the visual encoder, decoder " + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "inline_equation", + "content": "g(.)" + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": " and embedding matrix " + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}" + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": " through back-propagation. When decoding, we use beam search to obtain the best predicted entity coded. We find that we do not need to constrain the beam search to existing codes since more than " + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": " of the top-1 predictions are valid codes for converged GER models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 144, + 546, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 144, + 546, + 289 + ], + "spans": [ + { + "bbox": [ + 304, + 144, + 546, + 289 + ], + "type": "text", + "content": "Entity-based pre-training. Common auto-regressive models such as GIT [45] or PaLI [5] are pre-trained for descriptive captioning. As shown in Tab. 5 and Fig 9 of the Appendix, they generalize poorly to entity recognition. This is because of the task discrepancy between predicting a descriptive caption and predicting an entity name. In order to condition our models better for entity recognition, we propose to collect a significant number of entity-based pretraining images, each associated with a Wikipedia entity instead of a generic caption. However, such an entity-based pretraining dataset does not exist. We create it in an automatic way, without any human supervision." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 289, + 547, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 289, + 547, + 529 + ], + "spans": [ + { + "bbox": [ + 304, + 289, + 547, + 529 + ], + "type": "text", + "content": "To do so, we leverage existing large-scale image-caption datasets [37, 38]: unless specified otherwise we use WebLI [5]. For each Wikipedia entity, we retrieve in WebLI the image-caption pairs that best represent this entity and replace their original captions by this entity name [15, 23]. Specifically, we embed the 6M entity names of OVEN with a semantic text encoder [32] and find the top-" + }, + { + "bbox": [ + 304, + 289, + 547, + 529 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 289, + 547, + 529 + ], + "type": "text", + "content": " most similar captions in WebLI. We retrieve their corresponding images and replace their original captions by the considered entity name. We ensure that no image is assigned to multiple entities to avoid instability during training. We vary the number of retrieved images " + }, + { + "bbox": [ + 304, + 289, + 547, + 529 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 289, + 547, + 529 + ], + "type": "text", + "content": " per entity from 2 to 100 to produce pre-training datasets of different sizes: from 11M up to 55M images (see Fig. 6). We denote by Entity-WebLI (resp. Entity-LAION) the resulting dataset used for entity-based pretraining, built from WebLI (resp. LAION [38]). This way of creating pre-training data is akin to the query generation techniques used for generative retrieval in NLP [46]. However, rather than generating a synthetic input, we simply retrieve input images from a large-scale dataset." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 537, + 372, + 549 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 537, + 372, + 549 + ], + "spans": [ + { + "bbox": [ + 306, + 537, + 372, + 549 + ], + "type": "text", + "content": "3.4. Baselines" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 556, + 545, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 556, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 556, + 545, + 700 + ], + "type": "text", + "content": "We compare our method to the following different baselines. Hierarchical classification. Solving million-scale entity recognition with classification is unpractical due to the very large number of classes. A workaround is to use hierarchical classifiers. As OVEN does not come with hierarchical labels we obtain a 3-level hierarchy through k-means of the 6M entity names encoded with sentence-T5 [28]. We train a multi-class classifier for each parent node in the hierarchy. To avoid training a huge number of different classification matrices, we learn a generic classifier matrix per level which is modified by learnable small modifiers depending on the path in the hierarchy." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 701, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 701, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 306, + 701, + 545, + 714 + ], + "type": "text", + "content": "Dual encoders. Another typical workaround to classifica" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17316" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 202 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 202 + ], + "type": "text", + "content": "tion is to rely on deep metric learning approaches [36] such as Noise Contrastive Estimation [11] and its InfoNCE variant [29] as used in popular dual encoder approaches [16, 32]. Dual encoders learn a unified image-text feature space with separate encoders, thereby facilitating efficient nearest neighbor searches for recognition. We use CLIP-L/14 [32]. Visual matching. We also experiment with pure visual matching baselines. We use off-the-shelf CLIP-L/14 visual encoder and Entity-WebLI (55M) dataset as the memory. We use " + }, + { + "bbox": [ + 47, + 72, + 287, + 202 + ], + "type": "inline_equation", + "content": "k = 500" + }, + { + "bbox": [ + 47, + 72, + 287, + 202 + ], + "type": "text", + "content": " for nearest neighbor search with majority voting as it obtains the best results on OVEN val set." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 204, + 286, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 204, + 286, + 227 + ], + "spans": [ + { + "bbox": [ + 47, + 204, + 286, + 227 + ], + "type": "text", + "content": "Captioning. We compare to Git-Large [45] or PaLI [5] image-to-text auto-regressive captioning models." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 228, + 287, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 228, + 287, + 373 + ], + "spans": [ + { + "bbox": [ + 46, + 228, + 287, + 373 + ], + "type": "text", + "content": "GER-baselines: alternative code creation strategies. We compare GER-ALD, i.e. the best variant of GER, with several alternatives. First, GER-ATOMIC refers to using atomic, completely unstructured codes, i.e. each code token " + }, + { + "bbox": [ + 46, + 228, + 287, + 373 + ], + "type": "inline_equation", + "content": "c_{i}^{e}" + }, + { + "bbox": [ + 46, + 228, + 287, + 373 + ], + "type": "text", + "content": " is randomly drawn from " + }, + { + "bbox": [ + 46, + 228, + 287, + 373 + ], + "type": "inline_equation", + "content": "[1, V]^{L}" + }, + { + "bbox": [ + 46, + 228, + 287, + 373 + ], + "type": "text", + "content": " [42]. Second, we consider two alternatives using semantically structured codes: (i) GER-HKC where we embed the entity names with a pretrained text encoder before applying hierarchical k-means clustering on the resulting embeddings [42] and (ii) GER-CAPTION where we create a code by tokenizing the entity name with " + }, + { + "bbox": [ + 46, + 228, + 287, + 373 + ], + "type": "inline_equation", + "content": "\\Phi(\\cdot)" + }, + { + "bbox": [ + 46, + 228, + 287, + 373 + ], + "type": "text", + "content": " [6, 12]. Details on the baselines are in Appendix Sec. 6.4." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 382, + 128, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 382, + 128, + 396 + ], + "spans": [ + { + "bbox": [ + 47, + 382, + 128, + 396 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 402, + 287, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 402, + 287, + 439 + ], + "spans": [ + { + "bbox": [ + 46, + 402, + 287, + 439 + ], + "type": "text", + "content": "In this section, we detail our experimental setup, compare our method with state of the art and baselines, and finally present thorough analyses on code creation and pretraining." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 444, + 168, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 444, + 168, + 456 + ], + "spans": [ + { + "bbox": [ + 47, + 444, + 168, + 456 + ], + "type": "text", + "content": "4.1. Experimental setting" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 462, + 286, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 462, + 286, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 462, + 286, + 605 + ], + "type": "text", + "content": "OVEN dataset consists of 6,063,945 different entities [12]. We evaluate the models on the validation and test splits, by reporting the harmonic mean (HM) of top-1 accuracy scores between \"seen\" and \"unseen\" entities. Seen are entities present in the OVEN training set. Unseen entities are a subset of entities among the ones not present in the training set. The models are evaluated on a total of 3192 entities (1721 for seen and 1471 for unseen) for validation and 15888 entities (8355 for seen and 7533 for unseen) for test. We call the entities that the model is evaluated on by \"positive\" entities (i.e. the union of the 3192 validation and 15888 test entities) and all other entities by \"negative\" entities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 606, + 286, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 606, + 286, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 606, + 286, + 689 + ], + "type": "text", + "content": "Pretraining and finetuning. Unless specified otherwise, we pretrain our models on the entity-WebLI dataset, which we create considering all 6M entity names as described in Sec. 3.3. After this entity-based pretraining, the models are finetuned on OVEN training set which consists only of the \"seen\" entities. All implementation details are in Sec. 6 in Appendix and code is released in the ScENIC library [7]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 689, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 286, + 713 + ], + "type": "text", + "content": "Preventing data leakage. We remove pretraining images from Entity-WebLI and Entity-LAION with a cosine simi" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 305, + 70, + 547, + 212 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 547, + 212 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 547, + 212 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 547, + 212 + ], + "type": "table", + "html": "
Method#par.(B)PretrainingOVEN test
dataset#imgsHMseen unseen
Dual encoder approaches
CLIPViT-L140.42OpenAI400M5.25.6 4.9
CLIPfusionViT-L140.88OpenAI400M8.433.6 4.8
CLIP2CLIPViT-L140.86OpenAI400M11.512.6 10.5
Captioning approaches
GiT-Large0.40WebLI100M7.017.6 4.3
PaLI-3B3WebLI1B9.119.1 6.0
PaLI-17B17WebLI1B16.028.3 11.2
", + "image_path": "57c187a289560461613a0d4800de171f75af30d841a0cab3e7cf32cce8f33c02.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 306, + 224, + 545, + 252 + ], + "blocks": [ + { + "bbox": [ + 306, + 213, + 415, + 224 + ], + "lines": [ + { + "bbox": [ + 306, + 213, + 415, + 224 + ], + "spans": [ + { + "bbox": [ + 306, + 213, + 415, + 224 + ], + "type": "text", + "content": "Generative entity recognition" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 306, + 224, + 545, + 252 + ], + "lines": [ + { + "bbox": [ + 306, + 224, + 545, + 252 + ], + "spans": [ + { + "bbox": [ + 306, + 224, + 545, + 252 + ], + "type": "table", + "html": "
GER-ALD‡(Ours)0.40Entity-LAION 41M20.929.116.3
GER-ALD (Ours)0.40Entity-WebLI 55M22.731.517.7
", + "image_path": "37ec99f5509283dc5ae6866bdfe4afac57f2ec6f5462ffb0f673ac82730735b5.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 256, + 547, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 256, + 547, + 334 + ], + "spans": [ + { + "bbox": [ + 304, + 256, + 547, + 334 + ], + "type": "text", + "content": "Table 1. Comparison with state-of-the-art approaches on OVEN entity test split. We report the harmonic mean (HM) of the seen and unseen splits (top-1 accuracy) after finetuning on OVEN training set. Numbers are taken from [12] except methods based on GiT-Large which are run by us. We indicate the total number of parameters of each model (\"# par.\") in billion and the pretraining dataset details. " + }, + { + "bbox": [ + 304, + 256, + 547, + 334 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 304, + 256, + 547, + 334 + ], + "type": "text", + "content": ": use only publicly available data." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 338, + 547, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 338, + 547, + 398 + ], + "spans": [ + { + "bbox": [ + 304, + 338, + 547, + 398 + ], + "type": "text", + "content": "larity (with CLIP-L/14 visual features) above 0.95 with any of the OVEN test or val images. We chose a 0.95 conservative threshold by looking at some examples: similarity 0.95 corresponds to conceptually similar images but clearly not duplicates (see Fig. 8 in Appendix)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 406, + 499, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 406, + 499, + 418 + ], + "spans": [ + { + "bbox": [ + 305, + 406, + 499, + 418 + ], + "type": "text", + "content": "4.2. Comparison with the state of the art" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "type": "text", + "content": "In Tab. 1, we compare the performance of GER-ALD, our best GER variant, on the OVEN entity benchmark with previously published numbers after finetuning on the OVEN training set. We see that our method outperforms previously proposed approaches by significant margins. Notably, GER-ALD improves over the captioning model PALI-17B by " + }, + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "type": "inline_equation", + "content": "+6.8" + }, + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "type": "text", + "content": " top-1 HM test accuracy (a relative improvement of " + }, + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "type": "inline_equation", + "content": "43\\%" + }, + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "type": "text", + "content": ") while using " + }, + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "type": "inline_equation", + "content": "42\\times" + }, + { + "bbox": [ + 304, + 424, + 545, + 520 + ], + "type": "text", + "content": " less parameters." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 527, + 455, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 527, + 455, + 540 + ], + "spans": [ + { + "bbox": [ + 305, + 527, + 455, + 540 + ], + "type": "text", + "content": "4.3. Comparison with baselines" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "type": "text", + "content": "In Tab. 2, we compare GER-ALD with the different baselines described in Sec. 3.4. All baselines use exactly the same pretraining dataset entity-based WebLI (55M) and model architectures of comparable sizes." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": "Comparing GER to different paradigms. We see in Tab. 2 that GER outperforms strong captioning, dual-encoder, visual matching and hierarchical classification baselines, affirming its advantage in tackling web-scale visual entity recognition. Our superior performance compared to dual encoders aligns with previous works observing that CLIP struggles for fine-grained recognition [12, 15]. Due to query image and entity name similarities being captured only through a vector dot product, potentially fine-grained interactions are missed. Also, GER offers significant advan" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17317" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 70, + 289, + 225 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 289, + 225 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 289, + 225 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 289, + 225 + ], + "type": "table", + "html": "
MethodEntity-based pretraining+ finetuning on seen
HMseenunseenHMseenunseen
Dual encoders9.28.99.416.324.312.3
Visual matching16.215.517.116.415.717.2
Captioning13.213.113.316.825.912.5
Hierarchical. classif.14.714.814.621.829.617.2
Generative entity recognition
GER-ATOMIC15.915.316.720.126.216.3
GER-CAPTION14.316.512.620.726.816.9
GER-HKC15.815.516.021.025.217.9
GER-ALD17.718.317.222.731.517.7
", + "image_path": "5f1a4045442862784949dcc07bec9612cd9ecb6ebc0385496e946c2b61e42b60.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 230, + 288, + 285 + ], + "lines": [ + { + "bbox": [ + 47, + 230, + 288, + 285 + ], + "spans": [ + { + "bbox": [ + 47, + 230, + 288, + 285 + ], + "type": "text", + "content": "Table 2. Baseline comparisons. All baselines use exactly the same pretraining dataset Entity-WebLI (55M) and architectures of comparable number of parameters (" + }, + { + "bbox": [ + 47, + 230, + 288, + 285 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 47, + 230, + 288, + 285 + ], + "type": "text", + "content": "400M). All numbers are obtained with finetuning on seen split after entity-based pretraining. We report the Harmonic Mean of top-1 accuracy on OVEN test." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 295, + 287, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 295, + 287, + 331 + ], + "spans": [ + { + "bbox": [ + 46, + 295, + 287, + 331 + ], + "type": "text", + "content": "tages over dual encoders: its computational complexity is not a function of entity set size and it does not require to store entity dense embeddings." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 331, + 288, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 331, + 288, + 535 + ], + "spans": [ + { + "bbox": [ + 46, + 331, + 288, + 535 + ], + "type": "text", + "content": "Different GER variants. In Tab. 2, we compare different variants of GER: one variant using unstructured codes (GER-ATOMIC) and three variants using semantically-structured codes: GER-CAPTION, GER-HKC and GER-ALD. We observe that GER-ALD is the best performing variant, both after entity-based pretraining and after finetuning on the OVEN seen entities. Compared to GER-CAPTION, GER-ALD use codes that are more discriminative and compact, which improves the performance particularly for entities with long names (see Sec. 4.4.2). Compared to GER-ATOMIC, GER-ALD codes yield a semantic structure which is crucial for million-scale label-space as shown in Sec. 4.4.1. GER-HKC model also gets strong performance but relies on an off-the-shelf semantic text encoder which makes the approach more complex and costly compared to GER-ALD. GER-HKC is a first step towards learning codes and we hope future works will propose original and better code creation strategies [41]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 541, + 200, + 554 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 541, + 200, + 554 + ], + "spans": [ + { + "bbox": [ + 47, + 541, + 200, + 554 + ], + "type": "text", + "content": "4.4. Analysis and ablation study" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 559, + 287, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 559, + 287, + 597 + ], + "spans": [ + { + "bbox": [ + 46, + 559, + 287, + 597 + ], + "type": "text", + "content": "In this section, unless specified otherwise, we report the accuracy on the OVEN validation set [12] evaluated after pretraining on Entity-WebLI (27M), i.e. no OVEN finetuning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 602, + 206, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 602, + 206, + 613 + ], + "spans": [ + { + "bbox": [ + 47, + 602, + 206, + 613 + ], + "type": "text", + "content": "4.4.1 Semantic versus atomic codes" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 617, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 287, + 689 + ], + "type": "text", + "content": "In Fig. 3 (and Appendix Tab. 6), we report the relative improvement of semantically-structured codes (GER-ALD) compared to unstructured codes (GER-ATOMIC). We vary pretraining data size, model capacity and label-space size. A relative improvement of " + }, + { + "bbox": [ + 46, + 617, + 287, + 689 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 46, + 617, + 287, + 689 + ], + "type": "text", + "content": " means that the performance of GER-ALD doubles compared to GER-ATOMIC." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "Limited pretraining data. In Fig. 3 (left), we see that semantic codes outperform atomic codes when the amount of" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 72, + 396, + 171 + ], + "blocks": [ + { + "bbox": [ + 310, + 72, + 396, + 171 + ], + "lines": [ + { + "bbox": [ + 310, + 72, + 396, + 171 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 396, + 171 + ], + "type": "image", + "image_path": "fc485a04e1fa39ed6140465682b580b11f57b5d11e34249509069af703d73f1f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 174, + 545, + 240 + ], + "lines": [ + { + "bbox": [ + 304, + 174, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 304, + 174, + 545, + 240 + ], + "type": "text", + "content": "Figure 3. Semantic vs atomic codes. We report the relative improvement in " + }, + { + "bbox": [ + 304, + 174, + 545, + 240 + ], + "type": "inline_equation", + "content": "\\%" + }, + { + "bbox": [ + 304, + 174, + 545, + 240 + ], + "type": "text", + "content": " of GER-ALD compared to GER-ATOMIC in 3 scenarios: (i) limited pretraining data, (ii) limited model capacity and (iii) massive-scale label-space. Plots share a common experiment shown by which uses a pretraining dataset size of " + }, + { + "bbox": [ + 304, + 174, + 545, + 240 + ], + "type": "inline_equation", + "content": "27M" + }, + { + "bbox": [ + 304, + 174, + 545, + 240 + ], + "type": "text", + "content": ", Large model and 6M entity set. The setting reported in Tab. 2 is " + }, + { + "bbox": [ + 304, + 174, + 545, + 240 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 304, + 174, + 545, + 240 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 399, + 72, + 472, + 171 + ], + "blocks": [ + { + "bbox": [ + 399, + 72, + 472, + 171 + ], + "lines": [ + { + "bbox": [ + 399, + 72, + 472, + 171 + ], + "spans": [ + { + "bbox": [ + 399, + 72, + 472, + 171 + ], + "type": "image", + "image_path": "49ddd9ead505f53ad5aa89fa2bf94a3d9e74c2d35943e33683061e45dc5eb76c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 474, + 72, + 539, + 171 + ], + "blocks": [ + { + "bbox": [ + 474, + 72, + 539, + 171 + ], + "lines": [ + { + "bbox": [ + 474, + 72, + 539, + 171 + ], + "spans": [ + { + "bbox": [ + 474, + 72, + 539, + 171 + ], + "type": "image", + "image_path": "a321e732990e2f78376a325c300a9db02fec44f11189a5cbedc7b97934bc8a73.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 246, + 545, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 246, + 545, + 317 + ], + "spans": [ + { + "bbox": [ + 304, + 246, + 545, + 317 + ], + "type": "text", + "content": "data available for pretraining diminishes. In fact, the results reported in Tab. 2 corresponds to the most favorable scenario for GER-ATOMIC with 55M pretraining datapoints (represented by " + }, + { + "bbox": [ + 304, + 246, + 545, + 317 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 304, + 246, + 545, + 317 + ], + "type": "text", + "content": " in Fig. 3). The relative improvement in this case is still of " + }, + { + "bbox": [ + 304, + 246, + 545, + 317 + ], + "type": "inline_equation", + "content": "14\\%" + }, + { + "bbox": [ + 304, + 246, + 545, + 317 + ], + "type": "text", + "content": " while it grows to more than " + }, + { + "bbox": [ + 304, + 246, + 545, + 317 + ], + "type": "inline_equation", + "content": "1000\\%" + }, + { + "bbox": [ + 304, + 246, + 545, + 317 + ], + "type": "text", + "content": " when the amount of data is reduced by " + }, + { + "bbox": [ + 304, + 246, + 545, + 317 + ], + "type": "inline_equation", + "content": "5\\times" + }, + { + "bbox": [ + 304, + 246, + 545, + 317 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 318, + 545, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 318, + 545, + 378 + ], + "spans": [ + { + "bbox": [ + 304, + 318, + 545, + 378 + ], + "type": "text", + "content": "Limited model capacity. In Fig. 3 (middle), we see that the model struggles to learn unstructured codes when its capacity is reduced. When considering the small version of our model (114M parameters), the performance with atomic codes is very poor: 0.7 top-1 accuracy." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 378, + 545, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 378, + 545, + 545 + ], + "spans": [ + { + "bbox": [ + 304, + 378, + 545, + 545 + ], + "type": "text", + "content": "Web-scale label-space. In Fig. 3 (right), we vary the number of entities for pretraining. The \"positive\" entities (see Sec. 4.1) are always included in the pretraining set and the amount of \"negative\" entities is increased, effectively acting as distractors. First, we see in Fig. 3 (right) that for relatively small-scale label-space " + }, + { + "bbox": [ + 304, + 378, + 545, + 545 + ], + "type": "inline_equation", + "content": "(\\leq 100k)" + }, + { + "bbox": [ + 304, + 378, + 545, + 545 + ], + "type": "text", + "content": ", the benefit of having semantic codes versus atomic is small. In this regime we find that the model can memorize all the entities without the need for semantic structure between them. This aligns with the findings of DSI [42]. We evaluate GER further in small label-spaces in Sec. 4.5. However, we see that in million-scale label-space regime, semantic structure becomes important and significantly improves the performance compared to atomic codes: " + }, + { + "bbox": [ + 304, + 378, + 545, + 545 + ], + "type": "inline_equation", + "content": "+26\\%" + }, + { + "bbox": [ + 304, + 378, + 545, + 545 + ], + "type": "text", + "content": " relative improvement." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 545, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 545, + 628 + ], + "type": "text", + "content": "Overall, we find that GER-ATOMIC fail to learn unstructured codes when the amount of pretraining data or architecture capacity are reduced, or when the label-space increases to million-scale. Unlike GER-ATOMIC, GER-ALD succeed in these scenarios thanks to the semantic structure easing the learning. Next, we analyze how GER-ALD improves over another type of semantic codes: GER-CAPTION codes." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 638, + 460, + 650 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 638, + 460, + 650 + ], + "spans": [ + { + "bbox": [ + 306, + 638, + 460, + 650 + ], + "type": "text", + "content": "4.4.2 ALD versus captioning codes" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "content": "We analyze why unambiguous, language-based and discriminative codes (GER-ALD) are more effective for entity recognition than directly decoding the entity name (GER-CAPTION). In Fig. 5 (left), we report the performance of GER-ALD and GER-CAPTION when varying the length " + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "content": " of" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17318" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 72, + 157, + 152 + ], + "blocks": [ + { + "bbox": [ + 50, + 72, + 157, + 152 + ], + "lines": [ + { + "bbox": [ + 50, + 72, + 157, + 152 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 157, + 152 + ], + "type": "image", + "image_path": "5e982a64297df540f5e1649720c37e332c6e861af130b3ec3da376952bfc2789.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 152, + 544, + 175 + ], + "lines": [ + { + "bbox": [ + 46, + 152, + 544, + 175 + ], + "spans": [ + { + "bbox": [ + 46, + 152, + 544, + 175 + ], + "type": "text", + "content": "Figure 4. Accuracy per entity name length for GER-ALD versus GER-CAPTION codes. (left): Accuracy averaged per entity name length. (right): Qualitative examples of predictions for long entity names. Code tokens are symbolized between brackets." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 159, + 72, + 286, + 151 + ], + "blocks": [ + { + "bbox": [ + 159, + 72, + 286, + 151 + ], + "lines": [ + { + "bbox": [ + 159, + 72, + 286, + 151 + ], + "spans": [ + { + "bbox": [ + 159, + 72, + 286, + 151 + ], + "type": "image", + "image_path": "eedacf2d00bdff76ba9f66b7ef25772fc19c9946e84f65608ef91e873ad609a0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 287, + 72, + 403, + 151 + ], + "blocks": [ + { + "bbox": [ + 287, + 72, + 403, + 151 + ], + "lines": [ + { + "bbox": [ + 287, + 72, + 403, + 151 + ], + "spans": [ + { + "bbox": [ + 287, + 72, + 403, + 151 + ], + "type": "image", + "image_path": "c8771de9c408fdd4a43b14eadb5f4ac86d327ff9e4ecc5d562866f15ec39ff23.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 407, + 72, + 545, + 151 + ], + "blocks": [ + { + "bbox": [ + 407, + 72, + 545, + 151 + ], + "lines": [ + { + "bbox": [ + 407, + 72, + 545, + 151 + ], + "spans": [ + { + "bbox": [ + 407, + 72, + 545, + 151 + ], + "type": "image", + "image_path": "f3a380580b731f41ddcf38c9dd6375c81f4c893e3b55d9dcd30452b178e646d2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 50, + 185, + 167, + 290 + ], + "blocks": [ + { + "bbox": [ + 50, + 185, + 167, + 290 + ], + "lines": [ + { + "bbox": [ + 50, + 185, + 167, + 290 + ], + "spans": [ + { + "bbox": [ + 50, + 185, + 167, + 290 + ], + "type": "image", + "image_path": "7a8b000100afffaa3a3ead775c80cbe07506e2dad8956364ae73be324f6c333d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 292, + 287, + 347 + ], + "lines": [ + { + "bbox": [ + 46, + 292, + 287, + 347 + ], + "spans": [ + { + "bbox": [ + 46, + 292, + 287, + 347 + ], + "type": "text", + "content": "Figure 5. ALD versus captioning codes. (left): Effect of different code lengths for GER-ALD and GER-CAPTION codes. (right): Cumulative distribution function (CDF) of (in green) the position of the least frequent token in the tokenized entity name and of (in pink) the length of tokenized entity name." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 167, + 185, + 283, + 289 + ], + "blocks": [ + { + "bbox": [ + 167, + 185, + 283, + 289 + ], + "lines": [ + { + "bbox": [ + 167, + 185, + 283, + 289 + ], + "spans": [ + { + "bbox": [ + 167, + 185, + 283, + 289 + ], + "type": "image", + "image_path": "60dcc266746f7b206eec86d4e9d14c65ebf83741a0337a71bc15e370e27cbc33.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 353, + 287, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 353, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 46, + 353, + 287, + 437 + ], + "type": "text", + "content": "the codes. Fixing a code length " + }, + { + "bbox": [ + 46, + 353, + 287, + 437 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 46, + 353, + 287, + 437 + ], + "type": "text", + "content": " to a caption corresponds to keeping only the first " + }, + { + "bbox": [ + 46, + 353, + 287, + 437 + ], + "type": "inline_equation", + "content": "L^{\\text{th}}" + }, + { + "bbox": [ + 46, + 353, + 287, + 437 + ], + "type": "text", + "content": " tokens of the entity name. In Fig. 5 (right), we report the cumulative distribution functions (CDF) of (i) the position within the tokenized entity name of the least frequent token among the entire corpus (as described in Sec. 3.2) and (ii) the total number of tokens in the tokenized entity name (" + }, + { + "bbox": [ + 46, + 353, + 287, + 437 + ], + "type": "inline_equation", + "content": "L_{e}" + }, + { + "bbox": [ + 46, + 353, + 287, + 437 + ], + "type": "text", + "content": " in the notations of Sec. 3.2))." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "text", + "content": "Discriminative tokens versus number of tokens. We observe in Fig. 5 (left) that the performance of GER-CAPTION increases drastically from " + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "inline_equation", + "content": "L = 2" + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "inline_equation", + "content": "L = 4" + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "text", + "content": ". At the same time, we see in Fig. 5 (right) that for " + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "inline_equation", + "content": "L = 4" + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "text", + "content": ", less than half of the entity names are considered in full while more than " + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "text", + "content": " of the GER-CAPTION codes contain the least frequent token of the entire tokenized name. This hints that what is important for language-based codes is not to describe the full entity name but to include its most discriminative part. We also observe that the performance of captioning increases only moderately from " + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "inline_equation", + "content": "L = 4" + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "inline_equation", + "content": "L = 8" + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "text", + "content": " even though the number of entities considered in full increases drastically from " + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "inline_equation", + "content": "46.6\\%" + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 46, + 437, + 288, + 664 + ], + "type": "text", + "content": ". This confirms our intuition that decoding all the entity name tokens does not have a major impact on the performance as long as the most discriminative tokens are decoded. Overall, these observations motivate the ALD design of keeping only the most discriminative tokens, which is shown in Fig. 5 to lead to improved performance compared to decoding the full tokenized entity name." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "content": "Effect of code length for GER-ALD. We see in Fig. 5 (left) that the performance of GER-ALD is the best for " + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "inline_equation", + "content": "L = 4" + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "content": ". With smaller code lengths, we need to resort to random tokens a lot to achieve unique codes (see Sec. 3.2), which deters the" + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 306, + 182, + 417, + 251 + ], + "blocks": [ + { + "bbox": [ + 306, + 182, + 417, + 251 + ], + "lines": [ + { + "bbox": [ + 306, + 182, + 417, + 251 + ], + "spans": [ + { + "bbox": [ + 306, + 182, + 417, + 251 + ], + "type": "table", + "html": "
Selection strategyHM
Least frequent tokens14.4
Most frequent tokens12.3
First tokens12.0
Random tokens11.3
", + "image_path": "72bf4dc9d70f5830ea073fffbc11e42754c6eb2225f9706b62e98609ed90fd40.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 436, + 182, + 538, + 251 + ], + "blocks": [ + { + "bbox": [ + 436, + 182, + 538, + 251 + ], + "lines": [ + { + "bbox": [ + 436, + 182, + 538, + 251 + ], + "spans": [ + { + "bbox": [ + 436, + 182, + 538, + 251 + ], + "type": "table", + "html": "
Tokens orderHM
Least frequent first14.4
Syntax order14.4
Random order13.0
Least frequent last12.7
", + "image_path": "1b86fe8938b6ef96d3317e4f30f599f745382962b0c45607376ff8242227dda6.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 254, + 545, + 287 + ], + "lines": [ + { + "bbox": [ + 305, + 254, + 545, + 287 + ], + "spans": [ + { + "bbox": [ + 305, + 254, + 545, + 287 + ], + "type": "text", + "content": "Table 3. Ablation study of GER-ALD codes. (left) Word tokens selection. (right) Tokens order. All variants use " + }, + { + "bbox": [ + 305, + 254, + 545, + 287 + ], + "type": "inline_equation", + "content": "L = 4" + }, + { + "bbox": [ + 305, + 254, + 545, + 287 + ], + "type": "text", + "content": ". Default is in top rows. Non language-based GER-ATOMIC gets 11.4 top-1." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "type": "text", + "content": "performance. For example at " + }, + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "type": "inline_equation", + "content": "L = 2" + }, + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "type": "text", + "content": ", more than " + }, + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "type": "text", + "content": " of the entities use a random code token while this percentage decreases to " + }, + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "type": "inline_equation", + "content": "L = 4" + }, + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "type": "text", + "content": ". We also see that the performance of GER-ALD decreases for code length above " + }, + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "type": "inline_equation", + "content": "L = 4" + }, + { + "bbox": [ + 304, + 297, + 545, + 453 + ], + "type": "text", + "content": ", which hints that only the few most discriminative tokens are important while additional ones clutter the entity code. Interestingly we also observe in Fig. 5 (left) that when considering all the tokens, GER-ALD performance is slightly below that of GER-CAPTION. This might seem surprising since the same amount of information is present in both cases. However we find that when considering all the tokens, it is more difficult for the model to decode tokens ordered by frequencies than tokens ordered syntactically." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 453, + 545, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 453, + 545, + 621 + ], + "spans": [ + { + "bbox": [ + 304, + 453, + 545, + 621 + ], + "type": "text", + "content": "Entities with long entity names. In Fig. 4 (left), we report the accuracy per entity name length for both GER-ALD and GER-CAPTION finetuned models. We see that the longer the entity name, the more GER-ALD improves over captioning. Longer entities tend to have more noise with key information further into the code. We also show in Fig. 4 qualitative examples of entities with long entity names (more in Fig. 12 in Appendix). In the left example, we see that GER-ALD use the token combination [col][ob] to represent the semantic concept of colobus monkey species. The last token is used to efficiently differentiate between sub-species of colobus. This compact and discriminative way of encoding the entity allows GER-ALD to successfully predict this entity whereas GER-CAPTION fails to generate the entity tokenized name." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 628, + 444, + 639 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 628, + 444, + 639 + ], + "spans": [ + { + "bbox": [ + 306, + 628, + 444, + 639 + ], + "type": "text", + "content": "4.4.3 Creating codes with ALD" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "content": "Least frequent tokens. In Tab. 3 (left), we validate our choice of selecting the least frequent tokens by evaluating 3 alternatives: random choice, most frequent tokens and first-appearing tokens in tokenized entity name. We see that these alternative strategies hurt the performance significantly. Qualitative examples in Appendix Fig. 11 show that" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17319" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 70, + 282, + 137 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 282, + 137 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 282, + 137 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 282, + 137 + ], + "type": "table", + "html": "
DatasetCodesHM
WebLIWebLI caption1.8
Entity-WebLI (55M)WebLI caption12.9 (+11.1)
Entity-WebLI (55M)Entity name14.8 (+1.9)
Entity-WebLI (55M)ALD17.5 (+2.7)
", + "image_path": "28cbe87488f35f91349ac790f7a382f1275ed50b7595d54ab89b2ad94f98bba8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 50, + 177, + 178, + 271 + ], + "blocks": [ + { + "bbox": [ + 46, + 140, + 287, + 174 + ], + "lines": [ + { + "bbox": [ + 46, + 140, + 287, + 174 + ], + "spans": [ + { + "bbox": [ + 46, + 140, + 287, + 174 + ], + "type": "text", + "content": "Figure 6. Entity-based pretraining ablation. (left): Validation OVEN accuracy. (right): Examples of original WebLI captions versus corresponding OVEN entity names." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 50, + 177, + 178, + 271 + ], + "lines": [ + { + "bbox": [ + 50, + 177, + 178, + 271 + ], + "spans": [ + { + "bbox": [ + 50, + 177, + 178, + 271 + ], + "type": "image", + "image_path": "28887796d8c7d3ad0466ad5434277fcb99be97764d83f27a20f6c6bd45bad6ea.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 180, + 278, + 258 + ], + "lines": [ + { + "bbox": [ + 179, + 180, + 278, + 258 + ], + "spans": [ + { + "bbox": [ + 179, + 180, + 278, + 258 + ], + "type": "text", + "content": "Figure 7. Pretraining. We vary the size of the pretraining dataset by changing the amount of retrieved examples from WebLI for each OVEN entity (see Sec. 3.3)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 278, + 286, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 278, + 286, + 326 + ], + "spans": [ + { + "bbox": [ + 46, + 278, + 286, + 326 + ], + "type": "text", + "content": "the kept tokens are less semantic and discriminative compared to GER-ALD strategy of keeping the least frequent tokens. Note that all these variants are at least as good as GER-ATOMIC (11.4 top-1) which is not based on language at all." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 326, + 286, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 326, + 286, + 409 + ], + "spans": [ + { + "bbox": [ + 46, + 326, + 286, + 409 + ], + "type": "text", + "content": "Decoding order. In Tab. 3 (right), we vary the order of the first " + }, + { + "bbox": [ + 46, + 326, + 286, + 409 + ], + "type": "inline_equation", + "content": "L - 1" + }, + { + "bbox": [ + 46, + 326, + 286, + 409 + ], + "type": "text", + "content": " tokens in GER-ALD codes. Instead of decoding tokens from least to most frequent, we evaluate most to least frequent, syntax order and random order. Note that the selected tokens are the same in all variants, only their order changes. We see that both \"least frequent first\" and \"syntax\" orders achieve the best of performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 419, + 186, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 419, + 186, + 431 + ], + "spans": [ + { + "bbox": [ + 47, + 419, + 186, + 431 + ], + "type": "text", + "content": "4.4.4 Entity-based pretraining" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 438, + 286, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 438, + 286, + 617 + ], + "spans": [ + { + "bbox": [ + 46, + 438, + 286, + 617 + ], + "type": "text", + "content": "Entity-based pretraining. In Fig. 6, we analyze why our entity-based pretraining improves over the standard captioning pretraining of PaLI or GiT models. First, we see that our method of selecting WebLI data relevant to OVEN entities drastically improves the performance (+11.1 in Fig. 6 (left)). This is because, by design, we select image-text pairs from WebLI that have captions similar to OVEN entity names. Hence, this data is directly relevant for the OVEN entity recognition benchmark. Second, we see that replacing the original WebLI caption with its corresponding entity name from OVEN leads to superior performance (+1.9). We see in the qualitative examples of Fig. 6 (right) that original captions contain a lot of descriptive information not directly relevant to the entity. Lastly, we confirm that using GER-ALD codes is better (+2.7) than tokenized entity name." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": "Dataset size. In Fig. 7, we evaluate the effect of the pretraining dataset size for GER models. We control the dataset size by varying the amount of retrieved examples from WebLI for each of the OVEN entities (see Sec. 3.3). We see in Fig. 7 that GER-ALD, GER-CAPTION and GER-ATOMIC benefit greatly from more data and do not seem to have reached saturation yet. As analyzed in Sec. 4.4.1, GER-ATOMIC fails when the amount of pretraining data decreases." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 305, + 70, + 544, + 198 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 544, + 198 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 544, + 198 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 544, + 198 + ], + "type": "table", + "html": "
MethodImageNet-LTWebVision
Classif. MLP74.380.9
GER-ATOMIC L = 280.884.7
GER-ALD L = 280.984.8
GER-ATOMIC L = 1 (~ Classif. MAP)81.084.8
Previously published numbers
NCR [13]-76.8
CurrNet [10]-79.3
PEL [40]78.3-
MAM [14]†82.383.6
", + "image_path": "e0db313c9f8812c0e7d81e835994124684b7edecb88603562464280e32a22e5c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 200, + 545, + 222 + ], + "lines": [ + { + "bbox": [ + 305, + 200, + 545, + 222 + ], + "spans": [ + { + "bbox": [ + 305, + 200, + 545, + 222 + ], + "type": "text", + "content": "Table 4. Evaluation of classification models and GER on small-scale label-spaces. " + }, + { + "bbox": [ + 305, + 200, + 545, + 222 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 305, + 200, + 545, + 222 + ], + "type": "text", + "content": " indicates the use of additional data." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 228, + 437, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 228, + 437, + 239 + ], + "spans": [ + { + "bbox": [ + 306, + 228, + 437, + 239 + ], + "type": "text", + "content": "4.5. Link with classification" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 247, + 545, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 247, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 304, + 247, + 545, + 449 + ], + "type": "text", + "content": "A typical way of tackling visual entity recognition is by training a classifier into the number of entities [35]. This is not a viable solution for web-scale problems such as OVEN where a single fully-connected layer for a 6M classes has an enormous parameter count of 4.6B. In this section, we evaluate GER in cases where learning a classification model is a feasible choice (smaller number of classes). Classification can be cast in our GER framework simply by setting " + }, + { + "bbox": [ + 304, + 247, + 545, + 449 + ], + "type": "inline_equation", + "content": "L = 1" + }, + { + "bbox": [ + 304, + 247, + 545, + 449 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 247, + 545, + 449 + ], + "type": "inline_equation", + "content": "V = |\\mathcal{E}| =" + }, + { + "bbox": [ + 304, + 247, + 545, + 449 + ], + "type": "text", + "content": " number of classes (see Sec. 3.1), making it a special case of atomic codes with " + }, + { + "bbox": [ + 304, + 247, + 545, + 449 + ], + "type": "inline_equation", + "content": "L = 1" + }, + { + "bbox": [ + 304, + 247, + 545, + 449 + ], + "type": "text", + "content": ". Since the decoder decodes a single token, it is equivalent to a multi-layer Multihead Attention Pooling (MAP) head [21, 48]. In Tab. 4, we consider two challenging classification datasets: long-tailed ImageNet-LT [24] and noisy Webvision [22]. We evaluate GER-{ALD, ATOMIC} and a classification baseline using multi-layer perceptron (MLP) on averaged-pooled patch tokens. Implementation details are in Sec 6.3 in Appendix." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 450, + 545, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 450, + 545, + 582 + ], + "spans": [ + { + "bbox": [ + 304, + 450, + 545, + 582 + ], + "type": "text", + "content": "We see in Tab. 4 that using GER-ATOMIC instead of standard MLP improves significantly the performance of the classification model (74.3 versus 81.0 for ImageNet-LT). We also observe that GER-ATOMIC and GER-ALD have comparable performance in this relatively small label-space regime (1k classes). As a matter of fact, this achieves state-of-the-art accuracy for both datasets (when no additional external data is used). This shows that GER framework not only excels for large-scale scenarios, but also works well in datasets with smaller number of visual entities, making GER a general framework for visual entity recognition." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 594, + 378, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 594, + 378, + 605 + ], + "spans": [ + { + "bbox": [ + 306, + 594, + 378, + 605 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 613, + 545, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 613, + 545, + 702 + ], + "spans": [ + { + "bbox": [ + 304, + 613, + 545, + 702 + ], + "type": "text", + "content": "In this work, we propose a novel generative framework for web-scale visual entity recognition. We represent each entity by a compact, discriminative and semantic code that a generative auto-regressive model learns to decode. In future work, we will explore ways of creating better entity codes by leveraging additional information: either from the Wikipedia page such as the description of the entity and its attached image or also by using external tools." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17320" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Rahul Agrawal, Archit Gupta, Yashoteja Prabhu, and Manik Varma. Multi-label learning with millions of labels: Recommending advertiser bid phrases for web pages. In Proceedings of the 22nd international conference on World Wide Web, pages 13-24, 2013. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 146, + 288, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 146, + 288, + 211 + ], + "spans": [ + { + "bbox": [ + 53, + 146, + 288, + 211 + ], + "type": "text", + "content": "[2] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in Neural Information Processing Systems, 35:23716-23736, 2022. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 213, + 287, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 213, + 287, + 246 + ], + "spans": [ + { + "bbox": [ + 53, + 213, + 287, + 246 + ], + "type": "text", + "content": "[3] Samy Bengio, Krzysztof Dembczynski, Thorsten Joachims, Marius Kloft, and Manik Varma. Extreme Classification (Dagstuhl Seminar 18291). Dagstuhl Reports, 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 247, + 287, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 247, + 287, + 278 + ], + "spans": [ + { + "bbox": [ + 53, + 247, + 287, + 278 + ], + "type": "text", + "content": "[4] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In ECCV, 2014. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 280, + 287, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 280, + 287, + 334 + ], + "spans": [ + { + "bbox": [ + 53, + 280, + 287, + 334 + ], + "type": "text", + "content": "[5] Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, et al. Pali: A jointly-scaled multilingual language-image model. *ICLR*, 2023. 1, 2, 4, 5, 11, 12" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 335, + 287, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 335, + 287, + 368 + ], + "spans": [ + { + "bbox": [ + 53, + 335, + 287, + 368 + ], + "type": "text", + "content": "[6] Nicola De Cao, Gautier Izacard, Sebastian Riedel, and Fabio Petroni. Autoregressive entity retrieval. arXiv preprint arXiv:2010.00904, 2020. 1, 2, 3, 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 369, + 287, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 369, + 287, + 412 + ], + "spans": [ + { + "bbox": [ + 53, + 369, + 287, + 412 + ], + "type": "text", + "content": "[7] Mostafa Dehghani, Alexey Gritsenko, Anurag Arnab, Matthias Minderer, and Yi Tay. Scenic: A JAX library for computer vision research and beyond. arXiv preprint arXiv:2110.11403, 2021. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 414, + 287, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 414, + 287, + 446 + ], + "spans": [ + { + "bbox": [ + 53, + 414, + 287, + 446 + ], + "type": "text", + "content": "[8] Mark Everingham, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes (voc) challenge. *IJCV*, 88, 2010. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 447, + 287, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 447, + 287, + 490 + ], + "spans": [ + { + "bbox": [ + 53, + 447, + 287, + 490 + ], + "type": "text", + "content": "[9] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In CVPR, 2004. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 491, + 287, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 491, + 287, + 535 + ], + "spans": [ + { + "bbox": [ + 48, + 491, + 287, + 535 + ], + "type": "text", + "content": "[10] Sheng Guo, Weilin Huang, Haozhi Zhang, Chenfan Zhuang, Dengke Dong, Matthew R Scott, and Dinglong Huang. CurriculumNet: Weakly supervised learning from large-scale web images. In ECCV, pages 135-150, 2018. 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 536, + 287, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 536, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 48, + 536, + 287, + 590 + ], + "type": "text", + "content": "[11] Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics. JMLR Workshop and Conference Proceedings, 2010. 1, 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 591, + 287, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 591, + 287, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 591, + 287, + 645 + ], + "type": "text", + "content": "[12] Hexiang Hu, Yi Luan, Yang Chen, Urvashi Khandelwal, Mandar Joshi, Kenton Lee, Kristina Toutanova, and Ming-Wei Chang. Open-domain visual entity recognition: Towards recognizing millions of wikipedia entities. ICCV, 2023. 1, 2, 3, 5, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 647, + 287, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 679 + ], + "type": "text", + "content": "[13] Ahmet Iscen, Jack Valmadre, Anurag Arnab, and Cordelia Schmid. Learning with neighbor consistency for noisy labels. In CVPR, 2022. 8" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "text", + "content": "[14] Ahmet Iscen, Alireza Fathi, and Cordelia Schmid. Improving image recognition by retrieving from web-scale image-text data. CVPR, 2023. 8" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[15] Ahmet Iscen, Mathilde Caron, Alireza Fathi, and Cordelia Schmid. Retrieval-enhanced contrastive vision-text models. *ICLR*, 2024. 1, 2, 4, 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "text", + "content": "[16] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 152, + 545, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 195 + ], + "type": "text", + "content": "[17] Aditya Khosla, Nityananda Jayadevaprakash, Bangpeng Yao, and Li Fei-Fei. Novel dataset for fine-grained image categorization. In First Workshop on Fine-Grained Visual Categorization, CVPR, 2011. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 197, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 545, + 228 + ], + "type": "text", + "content": "[18] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV, 2013. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 230, + 545, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 230, + 545, + 263 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 545, + 263 + ], + "type": "text", + "content": "[19] Taku Kudo. Subword regularization: Improving neural network translation models with multiple subword candidates. arXiv preprint arXiv:1804.10959, 2018.3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 265, + 545, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 265, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 265, + 545, + 308 + ], + "type": "text", + "content": "[20] Taku Kudo and John Richardson. Sentencepiece: A simple and language independent subword tokenizer and detokenizer for neural text processing. arXiv preprint arXiv:1808.06226, 2018. 2, 3, 14" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 309, + 545, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 309, + 545, + 363 + ], + "spans": [ + { + "bbox": [ + 307, + 309, + 545, + 363 + ], + "type": "text", + "content": "[21] Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In International conference on machine learning, 2019. 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 365, + 545, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 365, + 545, + 409 + ], + "spans": [ + { + "bbox": [ + 307, + 365, + 545, + 409 + ], + "type": "text", + "content": "[22] Wen Li, Limin Wang, Wei Li, Eirikur Agustsson, and Luc Van Gool. Webvision database: Visual learning and understanding from web data. arXiv preprint arXiv:1708.02862, 2017. 2, 8, 11" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 411, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 411, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 411, + 545, + 453 + ], + "type": "text", + "content": "[23] Haotian Liu, Kilho Son, Jianwei Yang, Ce Liu, Jianfeng Gao, Yong Jae Lee, and Chunyuan Li. Learning customized visual models with retrieval-augmented knowledge. In CVPR, 2023. 2, 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 455, + 545, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 455, + 545, + 488 + ], + "spans": [ + { + "bbox": [ + 307, + 455, + 545, + 488 + ], + "type": "text", + "content": "[24] Ziwei Liu, Zhongqi Miao, Xiaohang Zhan, Jiayun Wang, Boqing Gong, and Stella X. Yu. Large-scale long-tailed recognition in an open world. In CVPR, 2019. 2, 8, 11" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 490, + 545, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 490, + 545, + 543 + ], + "spans": [ + { + "bbox": [ + 307, + 490, + 545, + 543 + ], + "type": "text", + "content": "[25] Sanket Vaibhav Mehta, Jai Gupta, Yi Tay, Mostafa Dehghani, Vinh Q Tran, Jinfeng Rao, Marc Najork, Emma Strubell, and Donald Metzler. Dsi++: Updating transformer memory with new documents. arXiv preprint arXiv:2212.09744, 2022. 1, 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 545, + 545, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 545, + 545, + 599 + ], + "spans": [ + { + "bbox": [ + 307, + 545, + 545, + 599 + ], + "type": "text", + "content": "[26] Anshul Mittal, Kunal Dahiya, Shreya Malani, Janani Ramaswamy, Seba Kuruvilla, Jitendra Ajmera, Keng-hao Chang, Sumeet Agarwal, Purushottam Kar, and Manik Varma. Multi-modal extreme classification. In CVPR, 2022. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 601, + 545, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 601, + 545, + 634 + ], + "spans": [ + { + "bbox": [ + 307, + 601, + 545, + 634 + ], + "type": "text", + "content": "[27] Rafael Müller, Simon Kornblith, and Geoffrey E Hinton. When does label smoothing help? Advances in neural information processing systems, 32, 2019. 4" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 635, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 545, + 679 + ], + "type": "text", + "content": "[28] Jianmo Ni, Gustavo Hernández Ábrego, Noah Constant, Ji Ma, Keith B Hall, Daniel Cer, and Yinfei Yang. Sentence-t5: Scalable sentence encoders from pre-trained text-to-text models. arXiv preprint arXiv:2108.08877, 2021. 4, 12" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "text", + "content": "[29] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. 1, 5" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17321" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 94 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 94 + ], + "type": "text", + "content": "[30] OpenAI. GPT-4 technical report. arXiv preprint arXiv:2303.08774, 2023.1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "text", + "content": "[31] Ronak Pradeep, Kai Hui, Jai Gupta, Adam D Lelkes, Honglei Zhuang, Jimmy Lin, Donald Metzler, and Vinh Q Tran. How does generative retrieval scale to millions of passages? arXiv preprint arXiv:2305.11841, 2023. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 195 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 195 + ], + "type": "text", + "content": "[32] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 4, 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 198, + 287, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 198, + 287, + 251 + ], + "spans": [ + { + "bbox": [ + 48, + 198, + 287, + 251 + ], + "type": "text", + "content": "[33] Shashank Rajput, Nikhil Mehta, Anima Singh, Raghunanandan H Keshavan, Trung Vu, Lukasz Heldt, Lichan Hong, Yi Tay, Vinh Q Tran, Jonah Samost, et al. Recommender systems with generative retrieval. arXiv preprint arXiv:2305.05065, 2023. 1, 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 254, + 287, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 254, + 287, + 285 + ], + "spans": [ + { + "bbox": [ + 48, + 254, + 287, + 285 + ], + "type": "text", + "content": "[34] Stephen Robertson, Hugo Zaragoza, et al. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 2009. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 287, + 287, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 287, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 48, + 287, + 287, + 342 + ], + "type": "text", + "content": "[35] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 2015. 2, 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 343, + 287, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 343, + 287, + 375 + ], + "spans": [ + { + "bbox": [ + 48, + 343, + 287, + 375 + ], + "type": "text", + "content": "[36] Florian Schroff, Dmitry Kalenichenko, and James Philbin. Facenet: A unified embedding for face recognition and clustering. In CVPR, 2015. 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 377, + 287, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 377, + 287, + 432 + ], + "spans": [ + { + "bbox": [ + 48, + 377, + 287, + 432 + ], + "type": "text", + "content": "[37] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114, 2021. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 434, + 287, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 434, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 48, + 434, + 287, + 498 + ], + "type": "text", + "content": "[38] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. arXiv preprint arXiv:2210.08402, 2022. 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 500, + 287, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 500, + 287, + 533 + ], + "spans": [ + { + "bbox": [ + 48, + 500, + 287, + 533 + ], + "type": "text", + "content": "[39] Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909, 2015. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 535, + 287, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 535, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 535, + 287, + 567 + ], + "type": "text", + "content": "[40] Jiang-Xin Shi, Tong Wei, Zhi Zhou, Xin-Yan Han, Jie-Jing Shao, and Yu-Feng Li. Parameter-efficient long-tailed recognition. arXiv preprint arXiv:2309.10019, 2023. 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 568, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 568, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 568, + 287, + 612 + ], + "type": "text", + "content": "[41] Weiwei Sun, Lingyong Yan, Zheng Chen, Shuaiqiang Wang, Haichao Zhu, Pengjie Ren, Zhumin Chen, Dawei Yin, Maarten Rijke, and Zhaochun Ren. Learning to tokenize for generative retrieval. NeurIPS, 2023. 1, 2, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 613, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 667 + ], + "type": "text", + "content": "[42] Yi Tay, Vinh Tran, Mostafa Dehghani, Jianmo Ni, Dara Bahri, Harsh Mehta, Zhen Qin, Kai Hui, Zhe Zhao, Jai Gupta, et al. Transformer memory as a differentiable search index. Advances in Neural Information Processing Systems, 2022. 1, 2, 5, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "text", + "content": "[43] Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In CVPR, 2018. 2" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 364 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "text", + "content": "[44] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 107, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 161 + ], + "type": "text", + "content": "[45] Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, and Lijuan Wang. Git: A generative image-to-text transformer for vision and language. arXiv preprint arXiv:2205.14100, 2022. 3, 4, 5, 11, 12" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 163, + 545, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 206 + ], + "type": "text", + "content": "[46] Yujing Wang, Yingyan Hou, Haonan Wang, Ziming Miao, Shibin Wu, Qi Chen, Yuqing Xia, Chengmin Chi, Guoshuai Zhao, Zheng Liu, et al. A neural corpus indexer for document retrieval. NeurIPS, 2022. 2, 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 208, + 545, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 208, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 307, + 208, + 545, + 239 + ], + "type": "text", + "content": "[47] Tobias Weyand, Andre Araujo, Bingyi Cao, and Jack Sim. Google landmarks dataset v2-a large-scale benchmark for instance-level recognition and retrieval. In CVPR, 2020. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 241, + 545, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 545, + 263 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 545, + 263 + ], + "type": "text", + "content": "[48] Xiaohua Zhai, Alexander Kolesnikov, Neil Houlsby, and Lucas Beyer. Scaling vision transformers. In CVPR, 2022. 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 265, + 545, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 265, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 265, + 545, + 308 + ], + "type": "text", + "content": "[49] Yidan Zhang, Ting Zhang, Dong Chen, Yujing Wang, Qi Chen, Xing Xie, Hao Sun, Weiwei Deng, Qi Zhang, Fan Yang, et al. Irgen: Generative modeling for image retrieval. arXiv preprint arXiv:2303.10126, 2023. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 309, + 545, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 309, + 545, + 364 + ], + "spans": [ + { + "bbox": [ + 308, + 309, + 545, + 364 + ], + "type": "text", + "content": "[50] Zheng Zhu, Guan Huang, Jiankang Deng, Yun Ye, Junjie Huang, Xinze Chen, Jiagang Zhu, Tian Yang, Dalong Du, Jiwen Lu, et al. Webface260m: A benchmark for million-scale deep face recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17322" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/8eab7491-89d9-4dd1-8ee1-40b9bd851b01_content_list.json b/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/8eab7491-89d9-4dd1-8ee1-40b9bd851b01_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..91f76ea6a40d2530af1e99632f0638f9da6a2525 --- /dev/null +++ b/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/8eab7491-89d9-4dd1-8ee1-40b9bd851b01_content_list.json @@ -0,0 +1,1356 @@ +[ + { + "type": "text", + "text": "A noisy elephant in the room: Is your out-of-distribution detector robust to label noise?", + "text_level": 1, + "bbox": [ + 199, + 130, + 772, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Galadrielle Humblot-Renaux $^{1}$ Sergio Escalera $^{1,2}$ Thomas B. Moeslund $^{1}$", + "bbox": [ + 187, + 202, + 823, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Visual Analysis and Perception lab, Aalborg University, Denmark", + "bbox": [ + 243, + 220, + 769, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Department of Mathematics and Informatics, University of Barcelona and Computer Vision Center, Spain", + "bbox": [ + 88, + 239, + 924, + 257 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "gegeh@create.aau.dk sescalera@ub.edu tbm@create.aau.dk", + "bbox": [ + 236, + 260, + 777, + 272 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 308, + 313, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The ability to detect unfamiliar or unexpected images is essential for safe deployment of computer vision systems. In the context of classification, the task of detecting images outside of a model's training domain is known as out-of-distribution (OOD) detection. While there has been a growing research interest in developing post-hoc OOD detection methods, there has been comparably little discussion around how these methods perform when the underlying classifier is not trained on a clean, carefully curated dataset. In this work, we take a closer look at 20 state-of-the-art OOD detection methods in the (more realistic) scenario where the labels used to train the underlying classifier are unreliable (e.g. crowd-sourced or web-scraped labels). Extensive experiments across different datasets, noise types & levels, architectures and checkpointing strategies provide insights into the effect of class label noise on OOD detection, and show that poor separation between incorrectly classified ID samples vs. OOD samples is an overlooked yet important limitation of existing methods. Code: https://github.com/glhr/ood-labelnoise", + "bbox": [ + 76, + 339, + 473, + 618 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 647, + 209, + 664 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In many real-world applications where deep neural networks are deployed \"in the wild\", it is desirable to have models that not only correctly classify samples drawn from the distribution of labeled data but also flag unexpected inputs as out-of-distribution (OOD). This has motivated the development of a wide range of OOD detection methods and benchmarks for computer vision [47, 66]. In particular, post-hoc OOD detection methods have shown wide appeal: compared to training-based methods, post-hoc OOD detectors can be applied on top of existing image classifiers without the need for re-training, have little to no architecture constraints, do not compromise classification performance, and achieve strong performance in large-scale settings [67].", + "bbox": [ + 76, + 672, + 468, + 869 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Existing OOD benchmarks place significant emphasis on carefully designing the selection of OOD datasets used", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0afe293cd79a0a438a21007b19d3aa6cf5c9549034d9328e62b1bf42b9aafe8c.jpg", + "image_caption": [ + "Figure 1. Can state-of-the-art OOD detectors tell incorrectly classified ID images apart from OOD inputs? Not really. Here we compare their performance across 396 trained classifiers." + ], + "image_footnote": [], + "bbox": [ + 532, + 306, + 859, + 426 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "for evaluation [4, 14, 60, 67]. In contrast, the role of the in-distribution (ID) dataset used for training the underlying classifier is seldom discussed. Among the most popular choices of ID dataset are MNIST, CIFAR10, CIFAR100 and ImageNet [30, 67] - all of which have been carefully curated and reliably annotated. Yet, in practice, the collection of labelled datasets involves a trade-off between acquisition time/cost and annotation quality - human inattention, mis-clicking, limited expertise, crowd-sourcing, automated annotation, and other cost-saving measures inevitably introduce labelling errors [54]. Besides, some images are inherently ambiguous to label even for the most knowledgeable and careful of annotators [28]. Considering how pervasive the problem of label noise is in real-world image classification datasets, its effect on OOD detection is crucial to study.", + "bbox": [ + 496, + 506, + 892, + 733 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address this gap, we systematically analyse the label noise robustness of a wide range of OOD detectors, ranging from the widely adopted Maximum Softmax Probability (MSP) baseline [14, 19], to distance-based methods operating in feature space [32, 57], to more recent, complex methods such as SHE [70] and ASH [10]. In particular:", + "bbox": [ + 496, + 733, + 893, + 824 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. We present the first study of post-hoc OOD detection in the presence of noisy classification labels, examining the performance of 20 state-of-the-art methods under different types and levels of label noise in the training data. Our study includes multiple classification architectures", + "bbox": [ + 500, + 824, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "22626", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "and datasets, ranging from the beloved CIFAR10 to the more difficult Clothing1M, and shows that even at a low noise rate, the label noise setting poses an interesting challenge for many methods.", + "bbox": [ + 96, + 90, + 467, + 151 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2. We revisit the notion that OOD detection performance correlates with ID accuracy [14, 60], examining when and why this relation holds. Robustness to inaccurate classification requires that OOD detectors effectively separate mistakes on ID data from OOD samples - yet most existing methods confound the two (Figure 1).", + "3. Our analysis includes key takeaways and recommendations for future evaluation and development of OOD detection methods considering an unreliable label setting." + ], + "bbox": [ + 76, + 152, + 467, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Problem set-up", + "text_level": 1, + "bbox": [ + 76, + 301, + 228, + 318 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we tackle the question: what happens when post-hoc OOD detectors are applied on top of a classifier trained with unreliable labels - a common setting in practice? We introduce the main relevant concepts below.", + "bbox": [ + 75, + 325, + 467, + 386 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Classifier We study OOD detection in the context of supervised image classification, where a discriminative model $h: \\mathcal{X} \\to \\mathcal{Y}$ is trained on a dataset of $N$ labelled examples $D_{train} = \\{(x_i, y_i)\\}_{i=1}^N \\in \\mathcal{X} \\times \\mathcal{Y}$ , where each $x$ is an input image and each $y$ is the corresponding class from the label space $\\mathcal{Y}$ . A common choice would be CIFAR10 [31]. $P_{train}(X, Y)$ defines the underlying training data distribution. The classifier is evaluated on a test set $D_{test}$ drawn from the same distribution $P_{test}(X, Y) = P_{train}(X, Y)$ .", + "bbox": [ + 75, + 387, + 467, + 537 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "OD detector Post-hoc OOD detection equips the trained classifier $h$ with a scoring function $o: X \\to \\mathbb{R}$ aiming to distinguish usual examples drawn from $P_{test}(X)$ (ID samples) and anomalous (OOD) examples drawn from a disjoint, held-out distribution $P_{out}(X)$ . In practice, a collection of auxiliary datasets with minimal semantic overlap (e.g. CIFAR10 → SVHN [39]) is commonly used for evaluation [67]. Ideally, the score assigned to ID samples should be consistently lower (or higher) than for OOD samples, such that anomalous inputs can easily be flagged.", + "bbox": [ + 75, + 537, + 467, + 688 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Label noise We consider a noisy label setting, where the classifier $h$ does not have access to the true target values $y_{i}$ during training, but rather learns from a noisy dataset $D_{\\text{noisy}} = \\{(x_i, \\hat{y}_i)\\}_{i=1}^N$ , where the target labels are corrupted: $\\exists i$ such that $\\hat{y}_i \\neq y_i$ . In this work, we consider only closed-set label noise, where $D_{\\text{noisy}} \\in \\mathcal{X} \\times \\mathcal{Y}$ (that is, the noisy labels lie in the same label space as the true labels [46]). The noise level $\\epsilon$ is given by $P(y \\neq \\hat{y})$ , the probability that an observed label is incorrect. Common models for studying and simulating label noise are (we refer to [13] for a detailed taxonomy):", + "bbox": [ + 75, + 688, + 467, + 854 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1. Noisy Completely at Random (NCAR) or uniform label noise: labels are flipped at a constant rate $\\epsilon$ , regardless of class or image.", + "bbox": [ + 76, + 854, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2. Noisy at Random (NAR) or class-conditional label noise: a constant noise rate across all images of the same class, but different classes may have different noise rates.", + "3. Noisy Not at Random (NNAR) or instance-dependent label noise: noisy labels are jointly determined by the true class and the associated image." + ], + "bbox": [ + 500, + 90, + 890, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In practice, real (as opposed to synthetically generated) label noise occurring from an imperfect annotation pipeline follows complex patterns, and is thus best represented by an instance-dependent model: some classes are more likely to be mislabeled than others, and so are some images (e.g. ambiguous or rare samples) [50, 64].", + "bbox": [ + 498, + 181, + 890, + 272 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Related work", + "text_level": 1, + "bbox": [ + 500, + 286, + 635, + 301 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Studying label noise The effect of unreliable labels on supervised learning is a well-studied problem in deep learning [51] and computer vision [1, 28], as errors or inconsistencies are a natural part of label collection in many real applications. Though increased dataset size can help [45], noisy labels degrade classification performance, especially in the later stages of training where over-parameterized models are prone to memorizing them [36, 69]. The precise effects of label noise have been shown to depend on the noise model and distribution [41]. A recent CIFAR classification benchmark suggests that models trained on real, instance-dependent noisy labels are significantly more prone to memorization than those trained on synthetic class-conditional labels with the same overall noise rate [64]. We therefore consider real noisy labels in our benchmark (stemming from human annotation error), which we compare to two sets of synthetic noisy labels (uniform and class-conditional). We also compare the effect of validation-based early stopping vs. converging on the training set.", + "bbox": [ + 496, + 311, + 890, + 598 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Effect of label noise on reliability Existing studies of label noise are largely focused on classification accuracy, and few works address the other side of the coin: reliability. We look at reliability from the angle of OOD detection performance - to the best of our knowledge, there is currently no comparable study of OOD detection under a noisy label setting. Most closely related to our work are perhaps the experiments in [42] and the analysis in [40]. [42] evaluates the effect of synthetic uniform label noise on MC-dropout and deep ensembles' uncertainty estimates, showing a significant degradation in OOD detection performance with increasing noise levels - in comparison, we study post-hoc OOD detection (with a wider variety of architectures, datasets, and methods) and consider real noisy datasets. [40] studies label noise robustness in terms of model calibration, showing that early stopping, while beneficial in terms of accuracy, offers no reliability guarantees.", + "bbox": [ + 496, + 598, + 890, + 854 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Benchmarking OOD detection robustness Previous works have investigated the limits of state-of-the-art OOD detection methods in various challenging settings, such as", + "bbox": [ + 498, + 854, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "22627", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "semantic similarity between ID vs. OOD classes [12, 14, 60], fine-grained ID labels [23], large-scale datasets [22] and adversarial attacks [49, 68]. In contrast, we focus on robustness to degraded classification performance on the ID dataset due to noisy labels, which to the best of our knowledge has comparably received little attention.", + "bbox": [ + 75, + 90, + 468, + 181 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Relation between ID classification and OOD detection performance In the standard clean label setting, a strong relationship between ID classification and OOD detection performance has been observed in prior work. [60] studies the relation between closed-set (ID) classification and open-set recognition performance (AUROC), and finds open-set recognition performance to be highly correlated with classification accuracy. [14] observes a similar trend for out-of-distribution detection performance across a large variety of pre-trained deep learning architectures, using the MSP as OOD score. Both works only consider clean training datasets, and a small subset of methods. We study the extent to which this relation holds across a wider range of OOD detection methods and noisy datasets, and provide a very simple explanation for why some methods like MSP reach such a high correlation.", + "bbox": [ + 75, + 181, + 472, + 422 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4. OOD detection methods", + "text_level": 1, + "bbox": [ + 76, + 436, + 303, + 452 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We evaluate 20 post-hoc OOD detection methods from the OpenOOD benchmark [67] - currently the most comprehensive open-source benchmark available. Here we present and broadly categorize these methods based on how their scoring function is designed.", + "bbox": [ + 75, + 462, + 468, + 537 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Softmax-based OOD detection revolves around the idea that ID samples are associated with higher-confidence, lower-entropy predictions than OOD samples. The baseline Maximum Softmax Probability (MSP) [19] simply takes the Softmax \"confidence\" of the predicted class as OOD score. While MSP implicitly assumes a Softmax temperature of 1, TempScaling [15] treats the temperature as a hyper-parameter, softening or sharpening the Softmax probabilities (essentially modulating categorical entropy), with the aim of improving calibration. ODIN [35] combines temperature scaling with input perturbation - \"pushing\" the input image a little in the direction that increases the MSP. In contrast, the Generalized ENtropy (GEN) score considers the full predictive distribution and captures how much it deviates from a one-hot distribution.", + "bbox": [ + 75, + 537, + 468, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Logit-based OOD detection bypasses the squashing effect of Softmax normalization. The Maximum Logit Score (MLS) [21] directly takes the logit of the predicted class. In a similar vein, energy-based OOD detection (EBO) was first proposed in [37]: a score is derived by applying the LogSumExp function to the logits - essentially a smooth version of the MLS, with an additional temperature parameter. Several post-hoc methods using an energy score have since followed suit, proposing various modifications to the", + "bbox": [ + 75, + 763, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "network [55] or features [10, 53, 56] before extracting an energy score: REACT [56] clips activations at an upper bound, RankFeat [53] subtracts the rank-1 matrix from activations, DICE [55] applies weight sparsification such that only the strongest contributors remain, and ASH [10] sparsifies activations based on a pruning percentile.", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Distance-based OOD detection aims to capture how much a test sample deviates from the ID dataset. The Mahalanobis distance score (MDS) [32] method fits a Gaussian distribution to the features of each class in the ID dataset; at test-time, the OOD score is taken as the distance to the closest class. The same authors also proposed MDSE Ensemble [32], which computes an MDS score not just from the features extracted before the final layer, but also at earlier points in the network, and aggregates them. Alternatively, the Relative Mahalanobis distance score (RMDS) [44] was proposed as a simple fix to MDS, which additionally fits a class-independent Gaussian to the entire ID dataset to compute a background score which is subtracted from the class-specific MDS score. Among other distance-based methods which rely on class-wise statistics, KLMatching (KLM) [21] takes the smallest KL Divergence between a test sample's Softmax probabilities and the mean Softmax probability vector for each ID class. OpenMax [3] operates in logit space, fitting a class-wise Weibull distribution to the distances of ID samples from the mean logit vector. Simplified Hopfield Energy (SHE) [70] computes the inner product between a test sample's features and the mean ID feature of the predicted class. GRAM [48] computes the Gram matrices of intermediate feature representations throughout the network, comparing them with the range of values observed for each class in the ID data. In contrast, deep k-nearest neighbor (KNN) [57] proposes a simple approach with no distributional assumptions - computing its score as the Euclidean distance to the closest samples from the ID set, regardless of class. Lastly, Virtual-logit Matching (VIM) [62] combines a logit energy score with a class-agnostic term capturing how features deviate from a principal subspace defined by the training set.", + "bbox": [ + 496, + 181, + 892, + 679 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Gradient-based OOD detection: GradNorm [24] is the only method in OpenOOD which directly derives its score from the gradient space, claiming that gradient magnitude is higher for ID inputs. The KL divergence between predicted Softmax probabilities and a uniform target is backpropagated to obtain gradients w.r.t the last layer parameters, followed by an $L_{1}$ norm to obtain the magnitude.", + "bbox": [ + 496, + 680, + 890, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 500, + 799, + 632, + 816 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We summarize our experimental set-up below, and refer to the supplementary for further details.", + "bbox": [ + 496, + 825, + 890, + 854 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ID Datasets We select popular image classification datasets from the label noise literature featuring real noisy labels alongside clean reference labels, spanning different", + "bbox": [ + 496, + 854, + 892, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "22628", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/5f6463c2a9402f4959cefa897cbb995bb7253b636962c880f075ad90fe343eb7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ID datasetclasses# images (train/val/test)resolutionnoise rate
[31]CIFAR-100%
CIFAR-10N-Agg1050,000/1,000/9,00032x329.01%
[64]CIFAR-10N-Rand117.23%
CIFAR-10N-Worst40.21%
[31]CIFAR-100-Fine10050,000/1,000/9,00032x320%
[64]CIFAR-100N-Fine40.20%
[31]CIFAR-100-Coarse2050,000/1,000/9,00032x320%
[64]CIFAR-100N-Coarse26.40%
[65]Clothing1M1424,637/7,465/5,395256x2560%
Clothing1M-Noisy38.26%
", + "bbox": [ + 78, + 88, + 470, + 220 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1. Dataset overview. Clean ones are shown in bold. The training set (clean or noisy labels) is used to train the classifier; the validation set (clean labels) is used for early stopping; the test set (clean labels) is used for evaluating classification and OOD detection performance. We always use clean labels for evaluation.", + "bbox": [ + 75, + 229, + 468, + 299 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "input sizes, number of classes, and sources & levels of label noise - see Table 1. The recently released CIFAR-N dataset [64] provides noisy re-annotations of CIFAR-10 and CIFAR-100 collected via crowd-sourcing: each image was annotated by 3 people, and different noisy label sets were created for different label selection methods (majority voting, random selection, or worst label selection). Note that CIFAR-100-Fine and CIFAR-100-Coarse contain the same set of images - only the class definitions and labels differ. Clothing1M [65] is a large-scale dataset collected by scraping shopping websites. Although the raw Clothing1M contains over a million images, we consider the smaller subset of images for which there is both a noisy and clean label.", + "bbox": [ + 75, + 325, + 468, + 521 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Synthetic noise For each real noisy label set, using the corresponding clean labels, we additionally create 2 synthetic counterparts with the same overall noise rate: one following a uniform (NCAR, class-independent) label noise model, and the other following a class-conditional label noise model with the exact same noise transition matrix as the real noise. We name these synthetic variants SU (Synthetic Uniform noise) and SCC (Synthetic Class-Conditional noise) - for example, from CIFAR-10N-Agg we create 2 synthetic versions, SU and SCC.", + "bbox": [ + 75, + 522, + 468, + 672 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ODD Datasets For fair comparison, we use the same selection of OOD datasets for all models - the OOD datasets are therefore chosen such that there is minimal semantic overlap with any of the ID datasets. We include MNIST [9], SVHN [39], Textures [6] as they are commonly used as examples of \"far\"-OOD [67] (very different appearance and semantics than the ID dataset). As examples of more natural images, we also include EuroSAT-RGB [18], Food-101 [5], a sub-set of the Stanford Online Products [52], and a 12-class sub-set of ImageNet. Since some methods require an OOD validation set for hyperparameter tuning, half of these classes are randomly selected and held-out for this purpose. The other 6 ImageNet classes, and the other OOD datasets make up the OOD test set.", + "bbox": [ + 75, + 674, + 468, + 883 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Evaluation metrics We evaluate OOD detectors'abil", + "bbox": [ + 96, + 885, + 467, + 898 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ity to separate ID vs. OOD samples in terms of the Area Under the Receiver Operating Characteristic Curve (AUROC), where images from the ID test set (e.g. CIFAR10 test set) are considered positive samples, and those from the OOD test set (e.g. SVHN test set) as negatives. This is the most commonly reported metric in the literature [25], and we denote it as AUROC $_{\\text{ID vs. OOD}}$ . In addition, unlike previous works, we separately measure the AUROC $_{\\text{correct vs. OOD}}$ (and AUROC $_{\\text{incorrect vs. OOD}}$ ), where only correctly (or incorrectly) classified samples from the ID test set are considered - ideally, performance should be high on both metrics.", + "bbox": [ + 496, + 90, + 890, + 256 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Architectures We include 3 architecture families: CNNs, MLPs and transformers. We select lightweight architectures which have shown competitive results when trained on small-scale datasets: ResNet18 [17], MLP-Mixer [58] and Compact Transformers [16]. Following the OpenOOD benchmark [67], we do not adopt any advanced training strategies besides standard data augmentation. For each training dataset, we repeat training with 3 random seeds, and save 2 model checkpoints: an early checkpoint (based on best validation accuracy) and the last checkpoint (after a pre-defined number of epochs has elapsed, allowing for convergence - differs per architecture).", + "bbox": [ + 496, + 257, + 890, + 436 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Bird's eye view To summarize, we train 3 different classifier architectures on 22 datasets (4 clean, 6 with real label noise, 12 with synthetic label noise), with 3 random seeds and 2 checkpoints saved per model - adding up to 396 distinct classifiers. On top of each classifier, 20 different OOD detection methods are applied and evaluated on 7 OOD datasets. Throughout the paper, OOD detection performance is taken as the median across the 7 OOD datasets (see the supplementary for results and a discussion of the median vs. mean OOD detection performance).", + "bbox": [ + 496, + 438, + 890, + 589 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Statistical significance tests When comparing pairs of methods or settings, we use the Almost Stochastic Order (ASO) test [7, 11] as implemented by Ulmer et al. [59]. This statistical test was specifically designed to compare deep learning models, making no distributional assumptions. We apply ASO with a significance level $\\alpha = 0.05$ and report $\\epsilon_{\\mathrm{min}}[\\mathrm{A} > \\mathrm{B}]$ . If $\\epsilon_{\\mathrm{min}}[\\mathrm{A} > \\mathrm{B}] \\geq 0.5$ we cannot claim that method A is better than method B; the smaller $\\epsilon_{\\mathrm{min}}$ , the more confident we can be that method A is superior.", + "bbox": [ + 496, + 589, + 890, + 726 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "6. Analysis", + "text_level": 1, + "bbox": [ + 500, + 739, + 596, + 755 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We explore the effect of label noise on OOD detection, starting with an overall view of performance trends in Section 6.1, then looking at OOD detection in relation to classification performance in Section 6.2, delving into what works (and what doesn't) in Section 6.3, and raising important considerations about how/whether to make use of a clean validation set in Section 6.4. Section 6.5 extends results to a more practical setting. More detailed analyses and additional supporting figures are in the supplementary.", + "bbox": [ + 496, + 763, + 890, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "22629", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/af65bec6ca3f55edae60a57194fc37b906b6a4d212b91a25bc7df68bb7a3793d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
training labelsCIFAR10CIFAR100-CoarseCIFAR100-FineClothing1M
AggRand1WorstcleanNSCCSUcleanNSCCSUcleanNSCCSU
methodcleanNSCCSUNSCCSUNSCCSU
GRAM94.4589.4989.1290.788.8289.1990.5288.688.7387.9882.0780.0582.179.282.9376.3180.2482.6491.0489.0794.7195.37
MDS96.0787.9392.492.9792.3789.2587.4586.7486.4989.280.0778.8982.8480.1280.174.9674.4873.4987.1290.9888.5892.38
VIM95.6589.991.8192.388.7588.684.4986.3187.2388.7584.2976.6180.047881.3775.3173.2473.9488.9983.0987.1790.14
MDSEns92.5783.8983.6283.7981.883.0680.3682.9584.0284.1179.2578.3177.4173.684.8577.4778.4379.8595.3695.4495.7895.69
KNN93.6390.0788.7590.1487.7486.6685.1186.383.7384.1484.1674.480.3975.8283.2975.6776.4871.3585.3285.584.5980.87
RMDS92.9289.3887.9488.1489.0785.7387.0484.0381.9982.3582.1475.9377.3674.8183.287675.7573.4875.8171.4378.2266.66
DICE9083.3384.1886.2488.528186.2182.7979.5879.0782.7977.6875.0170.4382.5276.5173.9268.4184.9675.7286.6982.89
ReAct90.9187.3286.6382.1689.7482.9681.9784.578.4180.1182.7973.0973.6270.3883.7673.5773.7167.5582.5773.180.2276.58
GEN91.8685.9985.4482.0889.8682.8980.8483.7581.8180.5782.6973.2571.4770.9981.3473.473.167.1183.9173.5779.7976.78
EBO91.3184.8785.7381.6289.8881.9377.8883.0481.3877.482.7472.9970.9367.8581.4173.6573.0167.4285.1976.3285.3176.64
SHE89.687.8184.3386.4886.6383.1683.0483.2480.0678.9880.4271.88070.1180.3869.6369.5666.6882.2978.0778.477.73
ODIN91.4787.7186.3182.4889.7982.2180.6884.0982.1380.0981.4273.170.8869.9783.5974.8572.1967.1683.3871.5977.7375.47
MLS91.2684.7685.5981.5788.8182.317883.5482.0180.0182.6672.9270.8569.1981.4573.6472.567.0383.372.7577.7475.54
TempScale91.6785.7685.0482.2585.0778.179.7882.8580.5180.1381.6371.6769.9469.3580.7572.6671.2866.9879.8268.7786.2874.45
ASH88.3384.7582.3781.6682.2976.4772.4885.2778.2675.4882.7871.1973.2168.4382.7474.1370.9567.4881.5374.8976.6375.74
OpenMax90.586.1283.4682.2683.0582.8779.1280.3975.6877.4181.1476.6972.6569.1780.1372.8275.667.6671.7469.2372.5574.36
MSP91.3485.1984.8782.4185.1382.2180.6882.4879.480.0980.5170.4268.8869.9779.8570.9269.6166.9277.5766.0272.4473.89
KLM90.8483.782.1581.6980.1381.8880.6474.7975.9376.8379.3770.269.2467.8179.7875.5269.8967.2577.2665.4965.2262.01
GradNorm86.2279.5577.177.8481.6679.8876.9684.1172.9672.8869.2966.2773.1767.4470.9565.6571.2663.2979.7175.3273.9377.52
RankFeat81.8383.5375.8673.1278.2975.0779.1482.5777.2575.7573.1564.669.5465.4468.7776.0770.2467.6469.1675.8569.9373.37
", + "bbox": [ + 109, + 87, + 859, + 287 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2. Best-case OOD detection performance (AUROCID vs. OOD in %) per method (that is, after selecting the best architecture-seed-checkpoint combination for each training label set). N, SCC, and SU refer to the real and synthetic noisy label sets described in Section 5. The top-3 for each training dataset are highlighted in bold, and the top-1 is underlined. In red are scores $< {75}\\%$ and in orange scores between 75 and 80%. Rows are sorted based on the total performance across columns.", + "bbox": [ + 75, + 296, + 893, + 354 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/896adb955c27ac2a2771da422929e2ef9e25fc6813e3812b4ebfdb9c98d2b316.jpg", + "image_caption": [ + "Figure 2. Distribution of OOD detection performance across methods & models when training the classifier on different label sets." + ], + "image_footnote": [], + "bbox": [ + 76, + 376, + 470, + 465 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "6.1. Where there's noise there's trouble", + "text_level": 1, + "bbox": [ + 75, + 527, + 382, + 542 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Figure 2 gives an overview of OOD detection performance for different training datasets and label noise settings. We see a clear drop in overall OOD detection performance when label noise is introduced in the training dataset, compared to training on a cleanly labelled dataset (green). Even with only $9\\%$ of incorrect CIFAR10 labels (CIFAR-Agg labels sets), the median $\\mathrm{AUROC}_{\\mathrm{IDvs.OOD}}$ across all models drops by over $5\\%$ . In Table 2, for each method we report the best-case OOD detection performance for a given training label set. While most methods are able to reach $80\\%$ $\\mathrm{AUROC}_{\\mathrm{IDvs.OOD}}$ with a classifier trained on clean labels, the number of competitive methods falls with increasing label noise, especially at noise rates $>20\\%$ . GRAM, KNN, MDS, MDSEsemble and VIM are the only methods able to reach $90+\\%$ AUROC on at least one of the noisy datasets. Takeaway: Enter the elephant Label noise in the classifier's training data makes it more difficult for post-hoc OOD detection methods to flag unfamiliar samples at test-time, even in small-scale settings like CIFAR10.", + "bbox": [ + 73, + 551, + 468, + 838 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "6.2. Does accuracy tell the whole story?", + "text_level": 1, + "bbox": [ + 75, + 847, + 382, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The most obvious effect of label noise in the training data is a decrease in classification performance on ID test data. At", + "bbox": [ + 75, + 869, + 470, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "the same time, previous works have remarked a strong relation between classification performance and OOD detection for popular post-hoc methods like MSP [14] and MLS [60]. We dig deeper. When does this relation hold and why?", + "bbox": [ + 496, + 378, + 890, + 439 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For which methods does this relation hold? In Figure 3, we quantify the relationship between ID accuracy and $\\mathrm{AUROC}_{\\mathrm{IDvs.OD}}$ in terms of Spearman correlation $\\rho$ . We find that correlation varies widely across methods, being the strongest for MSP, and is generally weaker for those which operate earlier in the network. We also note that for all methods except KNN and RMDS, the label noise setting makes OOD detection performance less predictable - and so does early stopping (cf. Section 6.4). This points to the distribution of ID scores playing an important role in OOD detection performance.", + "bbox": [ + 496, + 439, + 892, + 604 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When it does - why? We provide a simple observation which is lacking in prior work: methods whose OOD detection performance predictably degrades along-", + "bbox": [ + 496, + 606, + 890, + 652 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b1d33226e2a35de3fd1a6f3f6707c35b2922d9aff889460e23b1678bfd92f613.jpg", + "image_caption": [ + "Figure 3. Does OOD detection performance (AUROC $_{\\text{ID vs. OOD}}$ ) correlate with ID classification performance (accuracy)? We measure the rank correlation across different architectures, seeds, checkpoints, and datasets for different label sets. All results shown here are statistically significant ( $p < 0.001$ )." + ], + "image_footnote": [], + "bbox": [ + 500, + 674, + 890, + 816 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "22630", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/45c2dac7ad1409de0c8224e809bab08eefc1309e0476a4fd22144b8ae4b1aabc.jpg", + "image_caption": [ + "Figure 4. Relationship between ID classification performance and OOD detection performance, considering all ID test samples (top) or only incorrectly classified ones (bottom) in the AUROC metric. Each point corresponds to a single model." + ], + "image_footnote": [], + "bbox": [ + 76, + 87, + 470, + 195 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "side classification accuracy are characterized by a high $\\mathrm{AUROC}_{\\mathrm{correct}}$ vs. OOD and a low $\\mathrm{AUROC}_{\\mathrm{incorrect}}$ vs. OOD. On clean, easy datasets like CIFAR10, they exhibit strong OOD detection performance as there are few incorrectly predicted ID samples in the test set (thus the $\\mathrm{AUROC}_{\\mathrm{incorrect}}$ vs. OOD term is negligible in the overall $\\mathrm{AUROC}_{\\mathrm{ID}}$ vs. OOD) - however, when the number of incorrect prediction grows, the low $\\mathrm{AUROC}_{\\mathrm{incorrect}}$ vs. OOD becomes a more significant factor. Importantly and as exemplified by Figure 4, for all methods, $\\mathrm{AUROC}_{\\mathrm{incorrect}}$ vs. OOD is not (or only weakly, $\\rho < 0.2$ ) correlated with classification accuracy. MSP is the most clear-cut example, with a median $\\mathrm{AUROC}_{\\mathrm{incorrect}}$ vs. OOD of around 0.5 across all dataset-architecture-seed-checkpoint combinations (bottom left of Figure 4) - that is, MSP often is no better (or worse) than a random detector at separating ID mistakes and OOD inputs, no matter how accurate the underlying classifier is. The Top-4 methods in Table 2 are the only ones with a median $\\mathrm{AUROC}_{\\mathrm{incorrect}}$ vs. OOD $\\geq 0.6$ - none of the other methods exceed a median $\\mathrm{AUROC}_{\\mathrm{incorrect}}$ vs. OOD of 0.55 - see Figure 1.", + "bbox": [ + 75, + 291, + 468, + 593 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Takeaway: Would your OOD detector be better off as a failure detector? Accuracy correlating with OOD detection performance is partly symptomatic of many seemingly effective methods being unable to separate incorrectly classified ID samples from OOD samples - a bottleneck for robustness to imperfect classification. Claims that post-hoc OOD detection can be improved by simply improving the underlying classifier [60] overlook this fundamental issue.", + "bbox": [ + 75, + 595, + 468, + 717 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "It's not just about the noise rate We find that for a fixed noise rate in a given dataset, different types/models of label noise yield comparable classification accuracy ( $\\epsilon_{\\mathrm{min}} \\geq 0.5$ for all pair-wise comparisons), yet have different effects on OOD performance. Indeed, real label noise is better handled than the same level of synthetic by most methods, with SU labels being the most challenging - this trend is clear in Figure 2. Figure 5 shows an example of how different noise types and checkpointing strategies shape the magnitude and spread of logits. Intuitively, when the noise is spread randomly across samples (SU noise model), it is more difficult to learn which kinds of images or classes to be uncer", + "bbox": [ + 75, + 719, + 470, + 901 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/14c178369c4c0362f36de7e815ae9f15ffca1efd88177ac79bc982c814ed471d.jpg", + "image_caption": [ + "Figure 5. Max Logit ID and OOD score statistics across models trained on Clothing1M, for different noise types & checkpointing." + ], + "image_footnote": [], + "bbox": [ + 500, + 87, + 893, + 167 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "tain about, leading to consistently lower-confidence predictions across all ID samples (low median, low spread). Conversely, when label noise is more concentrated for certain classes (SCC) and/or for certain features (real noise), the classifier can learn to be more confident in some parts of the input space than others (higher median, higher spread).", + "bbox": [ + 496, + 231, + 890, + 321 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Takeaway: Faking it is better than ignoring it Uniform (synthetic) label noise in the training data tends to degrade OOD detection more strongly than class-dependant (synthetic) and instance-dependent (real) label noise. We encourage the use of synthetic uniform labels to evaluate the worst-case performance of OOD detectors, as they can be easily generated for any image classification dataset.", + "bbox": [ + 496, + 321, + 890, + 428 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.3. Design features which hurt or help", + "text_level": 1, + "bbox": [ + 498, + 439, + 802, + 455 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Why are the winners the best? In terms of design features, the methods with the strongest performance in a label noise setting have a distance-based scoring function, and take features as input rather than class probabilities. SHE is the only OOD detector satisfying both criteria which doesn't sit at the top of the pile in Table 2 - we attribute its lower performance to two factors: it summarizes the ID dataset only with class-wise means (which may be overly reductive in a label noise setting where variance is larger), and it only considers correctly predicted samples when computing them (which may be small in number if the classifier is inaccurate or the number of classes is high). In contrast, GRAM which includes higher-order raw moments to describe ID data statistics is the top-1 method in Table 2. In the comparison of Figure 6, GRAM and MDSEnsemble - the only methods in our benchmark which incorporate features at different depths in the network - stand out as having the \"flattest\" accuracy-AUROC curves, which is especially beneficial when the training dataset is inherently difficult (e.g. CIFAR100 due to fine-grained labels or Clothing1M due to the image complexity and diversity). However, we note that the performance of MDSEnsemble and GRAM is highly architecture-dependent - the best OOD detection performance is achieved with a ResNet18 classifier, while MLPlexer and CCT architectures give sub-par results (often sub-50% ie. even worse than a random detector). Whether this large performance variation is due to the layer types, feature dimensionality or other factors, and whether it can be remedied warrants further investigation.", + "bbox": [ + 496, + 462, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "22631", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/16d051ef1305583d91ed6ff374eb8d3a3950bf775d031a5b80e1ee7fec6dcacf.jpg", + "image_caption": [ + "Figure 6. Relation between the drop in accuracy caused by noisy labels and the resulting drop in OOD detection performance across all 20 methods. Each point corresponds to a single model trained with noisy labels." + ], + "image_footnote": [], + "bbox": [ + 76, + 85, + 890, + 239 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Takeaway: Distance is healthy Out of the 20 post-hoc OOD detectors in our benchmark, distance-based OOD detectors operating in feature space appear the most promising to cope with the problem of unreliable predictions. Intuitively, distance-based methods are more dissociated from the classifier's prediction, and more dependent on the content/appearance of ID images. In contrast, we did not find compelling evidence that methods targeting class logits or class probabilities for OOD detection are better suited for the noisy label setting.", + "bbox": [ + 75, + 305, + 467, + 455 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Are there tricks that work? We consider 3 popular \"tricks\" aiming to better separate ID vs. OOD samples in logit or probability space - temperature scaling, input perturbation and sparsification - and assess their effectiveness in a noisy label setting (excluding cleanly trained models). To isolate the effect of Softmax temperature scaling and input perturbation, we introduce $\\mathrm{ODIN}_{\\mathrm{notemp}}$ (ODIN with temperature $T$ fixed to 1) and $\\mathrm{ODIN}_{\\mathrm{nopert}}$ (perturbation magnitude $m$ set to 0). We find that scaling $T$ by maximizing likelihood on ID validation labels is detrimental ( $\\epsilon_{\\mathrm{min}}[\\mathrm{MSP} > \\mathrm{TempScale}] = 0.15$ ), however picking $T$ based OOD validation detection performance does make a statistically significant (though not practically significant) difference ( $\\epsilon_{\\mathrm{min}}[\\mathrm{ODIN}_{\\mathrm{nopert}} > \\mathrm{MSP}] = 0.05$ ). Input perturbation does not help in a label noise setting: looking at the optimal $m$ selected during $\\mathrm{ODIN}_{\\mathrm{notemp}}$ 's automatic parameter tuning, we observe that as label noise rate increases, the more likely that $m = 0$ is picked (no perturbation). As for feature or weight sparsification, we note that REACT and DICE are the most promising logit-based methods in the AUROCincorrect vs. OOD ranking of Figure 1.", + "bbox": [ + 75, + 455, + 468, + 773 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.4. Let's not forget about the validation set", + "text_level": 1, + "bbox": [ + 76, + 786, + 413, + 801 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Picking a model checkpoint While it is well-understood that early stopping is beneficial to classification accuracy when training a classifier with noisy labels [34], we investigate whether this extends to OOD detection performance. We compare the OOD detection performance for the 2 checkpointing strategies, and find that for almost all meth", + "bbox": [ + 75, + 809, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ods, early stopping is beneficial ( $\\epsilon_{\\mathrm{min}}$ [early>last] < 0.5). However, looking at Figure 6, we note that early stopping may increase the rate at which OOD detection performance drops due to label noise for a given drop in accuracy - to an extreme in the case of TempScaling. A closer look at Figure 5 gives some insight into its effect on the logits.", + "bbox": [ + 496, + 305, + 890, + 396 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "What about OOD detector parameter tuning? Many of the methods in our benchmark involve a set-up step where dataset-specific parameters are computed (e.g. statistics for ID samples) and/or a tuning step where hyperparameters are tuned to maximize OOD detection performance on a held-out validation OOD set. The set of (hyper)parameters for each method is outlined in the supplementary. Among these methods, some make use of classification labels during set-up/tuning - e.g. to compute statistics for each class. In a label noise setting, this raises the question of whether to use a clean validation set or the noisy training set for set-up/tuning, or whether this makes a difference. We compare both settings for the 6 methods in our benchmark making use of class labels during set-up: MDS, RMDS, MDSEsemble, GRAM, OpenMax and SHE, with results visualized in the supplementary. For SHE which computes the mean of features for each class during setup, there is no statistically significant difference between using clean validation labels or potentially noisy training labels, although the latter may be better in some cases $(\\epsilon_{\\mathrm{min}}[\\mathrm{SHE}_{\\mathrm{val}} > \\mathrm{SHE}_{\\mathrm{train}}] = 1$ and $\\epsilon_{\\mathrm{min}}[\\mathrm{SHE}_{\\mathrm{train}} > \\mathrm{SHE}_{\\mathrm{val}}] = 0.63)$ . For methods based on the Malahanobis score, using noisy training labels to compute class-wise feature means and tied covariance is better $(\\epsilon_{\\mathrm{min}}[\\mathrm{MDS}_{\\mathrm{train}} > \\mathrm{MDS}_{\\mathrm{val}}] = 0.19$ and $\\epsilon_{\\mathrm{min}}[\\mathrm{RMDS}_{\\mathrm{train}} > \\mathrm{RMDS}_{\\mathrm{val}}] = 0)$ - intuitively, the class-specific statistics are more accurate with more data. Common to these 3 methods is that the OOD score at test-time does not depend on the predicted class (likely to be incorrect in a label noise setting), but is rather based on distance to the closest class in feature space (regardless of what class is predicted). OpenMax computes the mean logit per class, only considering correctly predicted samples (labels are used to check correctness) - using a potentially", + "bbox": [ + 496, + 401, + 892, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "22632", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "noisy training set yields consistently better performance $(\\epsilon_{\\mathrm{min}}[\\mathrm{OpenMax}_{\\mathrm{train}} > \\mathrm{OpenMax}_{\\mathrm{val}}] = 0)$ . Lastly, and in contrast to the other methods, GRAM benefits from using clean validation samples rather than a large number of noisy training samples for computing class-specific bounds of feature correlations $(\\epsilon_{\\mathrm{min}}[\\mathrm{GRAM}_{\\mathrm{val}} > \\mathrm{GRAM}_{\\mathrm{train}}] = 0.23)$ . However, the performance gap between the two settings is small.", + "bbox": [ + 75, + 90, + 472, + 196 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Takeaway: Clean isn't always better or possible The use of clean vs. noisy labels during label-based parameter tuning is an important consideration. For distance-based methods which compute class-wise statistics, it appears that quantity often trumps quality, even when over $30\\%$ of training labels are incorrect. This is promising for applications where a clean validation set is not available (e.g. medical imaging where labels are inherently subjective [28]).", + "bbox": [ + 75, + 202, + 472, + 325 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.5. What about a more realistic setting?", + "text_level": 1, + "bbox": [ + 76, + 351, + 390, + 368 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We have thus far studied OOD detection in a simple (but standard [67]) setting where the base classifier is trained from scratch, and where there is strong semantic and covariate shift between ID and OOD images. Yet in practice, pre-training is widely adopted, and distribution shifts may be much more subtle. We therefore extend our study of label noise to fine-grained semantic shift detection with a base classifier that has been pre-trained on ImageNet [8] before being trained on a dataset of interest. We follow the Semantic Shift Benchmark (SSB), where the goal is to detect unknown classes from a known dataset (e.g. held-out bird species from the CUB [61] dataset or held-out aircraft model variants from FGVC-Aircraft [38]). Using SSB splits, we train ResNet50s (pre-trained) on half of the classes from CUB/FGVC-Aircraft (448x448 images), and we evaluate post-hoc OOD detection performance on known classes from the test set (ID) vs. the remaining unseen classes (OOD) split into 3 increasingly difficult sets. Since clean vs. real noisy label pairs are not available, we inject synthetic label noise in the training set (SU noise model) and follow the same evaluation procedure as in previous sections. Fig. 7 summarizes its detrimental effect on fine-grained semantic shift detection across the 20 studied OOD detection methods: increasing label noise and \"difficulty\" of the OOD set act as orthogonal bottle-necks to detection performance. Increased label noise pulls AUROC $_{\\text{ID vs. OOD}}$ and AUROC $_{\\text{incorrect vs. OOD}}$ to $50\\%$ .", + "bbox": [ + 75, + 380, + 472, + 789 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Takeaway: Limitations of post-hoc OOD detectors extend beyond toy settings Even in a more realistic setting where the base classifier has first been pre-trained on ImageNet and OOD samples are similar in appearance to the ID dataset, all 20 methods poorly separate incorrectly classified ID samples from OOD samples, and degrade when the classifier has been trained on noisy labels.", + "bbox": [ + 75, + 795, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/890c31feb3aff5fdc415243edfb80ccf250df6d2bdac7ea35fbda2b154a8a0fa.jpg", + "image_caption": [ + "Figure 7. Each boxplot shows the performance distribution across 6 classifiers (3 seeds, 2 checkpoints) $\\times$ 20 post-hoc methods, considering all ID test samples (top) or only incorrectly classified ones (bottom) in the AUROC metric." + ], + "image_footnote": [], + "bbox": [ + 498, + 87, + 893, + 281 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Zooming out", + "text_level": 1, + "bbox": [ + 500, + 369, + 633, + 388 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Study limitations and possible extensions We have focused on post-hoc OOD detection methods due to their pragmatic appeal and to maintain experimental feasibility. Extending this study to training-based OOD detection methods [71] would of course be valuable. Aligning with OOD benchmarks [67], we also trained the base classifiers with a standard discriminative objective. Alternative supervision schemes may also be considered, and the effect of pre-training (and on what?) would be interesting to further analyse in a label noise setting, as it been shown to improve post-hoc OOD detection performance [2, 20, 33]. Lastly, the potential of noisy label removal [29, 43] or noise-robust learning [27, 63] techniques from the label noise literature (designed with classification performance in mind) for improving OOD detection would be a natural next step.", + "bbox": [ + 496, + 396, + 893, + 622 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Conclusion We have explored the intersection between classification label noise and OOD detection, and conducted extensive experiments to extract new insights into the limitations of existing post-hoc methods. Our findings also echo the need to re-think the aims and evaluation of OOD detection in the context of safe deployment [26] (e.g. do we really want to exclude ID misclassifications from detection?). We hope that this work paves the way for future investigations which prioritize the robustness and applicability of OOD detection models in practical, imperfect classification scenarios which account for data uncertainty.", + "bbox": [ + 496, + 623, + 895, + 789 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "8. Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 801, + 687, + 819 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported by the Danish Data Science Academy, which is funded by the Novo Nordisk Foundation (NNF21SA0069429) and VILLUM FONDEN (40516). Thanks to the Pioneer Centre for AI (DNRF grant P1).", + "bbox": [ + 498, + 827, + 893, + 888 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "22633", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Gorkem Algan and Ilkay Ulusoy. Image classification with deep learning in the presence of noisy labels: A survey. Knowledge-Based Systems, 215:106771, 2021. 2", + "[2] Anders Johan Andreassen, Yasaman Bahri, Behnam Neyshabur, and Rebecca Roelofs. The evolution of out-of-distribution robustness throughout fine-tuning. Transactions on Machine Learning Research, 2022. 8", + "[3] Abhijit Bendale and Terrance E. Boult. Towards open set deep networks. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1563-1572, 2016. 3", + "[4] Julian Bitterwolf, Maximilian Mueller, and Matthias Hein. In or out? fixing imagenet out-of-distribution detection evaluation. In ICML, 2023. 1", + "[5] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 – mining discriminative components with random forests. In European Conference on Computer Vision, 2014. 4", + "[6] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 3606-3613, 2014. 4", + "[7] Eustasio Del Barrio, Juan A Cuesta-Albertos, and Carlos Matrán. An optimal transportation approach for assessing almost stochastic order. In The Mathematics of the Uncertain, pages 33-44. Springer, 2018. 4", + "[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, 2009. 8", + "[9] Li Deng. The mnist database of handwritten digit images for machine learning research [best of the web]. IEEE Signal Processing Magazine, 29(6):141-142, 2012. 4", + "[10] Andrija Djurisic, Nebojsa Bozanic, Arjun Ashok, and Rosanne Liu. Extremely simple activation shaping for out-of-distribution detection. In The Eleventh International Conference on Learning Representations, 2023. 1, 3", + "[11] Rotem Dror, Segev Shlomov, and Roi Reichart. Deep dominance - how to properly compare deep neural models. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28-August 2, 2019, Volume 1: Long Papers, pages 2773- 2785. Association for Computational Linguistics, 2019. 4", + "[12] Stanislav Fort, Jie Ren, and Balaji Lakshminarayanan. Exploring the limits of out-of-distribution detection. In Advances in Neural Information Processing Systems, 2021. 3", + "[13] Benoit Frenay and Michel Verleysen. Classification in the presence of label noise: A survey. IEEE Transactions on Neural Networks and Learning Systems, 25(5):845-869, 2014. 2", + "[14] Ido Galil, Mohammed Dabbah, and Ran El-Yaniv. A framework for benchmarking class-out-of-distribution detection and its application toImagenet. In The Eleventh International Conference on Learning Representations, 2023. 1, 2, 3, 5" + ], + "bbox": [ + 78, + 114, + 467, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q. Weinberger. On calibration of modern neural networks. In Proceedings of the 34th International Conference on Machine Learning, pages 1321-1330. PMLR, 2017. 3", + "[16] Ali Hassani, Steven Walton, Nikhil Shah, Abulikemu Abuduweili, Jiachen Li, and Humphrey Shi. Escaping the big data paradigm with compact transformers. 2021. 4", + "[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 4", + "[18] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. Eurosat: A novel dataset and deep learning benchmark for land use and land cover classification. IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, 2019. 4", + "[19] Dan Hendrycks and Kevin Gimpel. A baseline for detecting misclassified and out-of-distribution examples in neural networks. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net, 2017. 1, 3", + "[20] Dan Hendrycks, Kimin Lee, and Mantas Mazeika. Using pre-training can improve model robustness and uncertainty. In Proceedings of the 36th International Conference on Machine Learning, pages 2712-2721. PMLR, 2019. 8", + "[21] Dan Hendrycks, Steven Basart, Mantas Mazeika, Andy Zou, Joseph Kwon, Mohammadreza Mostajabi, Jacob Steinhardt, and Dawn Song. Scaling out-of-distribution detection for real-world settings. In Proceedings of the 39th International Conference on Machine Learning, pages 8759-8773. PMLR, 2022. 3", + "[22] Dan Hendrycks, Steven Basart, Mantas Mazeika, Andy Zou, Joe Kwon, Mohammadreza Mostajabi, Jacob Steinhardt, and Dawn Song. Scaling out-of-distribution detection for real-world settings. ICML, 2022. 3", + "[23] R. Huang and Y. Li. Mos: Towards scaling out-of-distribution detection for large semantic space. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8706-8715, Los Alamitos, CA, USA, 2021. IEEE Computer Society. 3", + "[24] Rui Huang, Andrew Geng, and Yixuan Li. On the importance of gradients for detecting distributional shifts in the wild. In Advances in Neural Information Processing Systems, 2021. 3", + "[25] Galadrielle Humblot-Renaux, Sergio Escalera, and Thomas B. Moeslund. Beyond auroc & co. for evaluating out-of-distribution detection performance. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 3881-3890, 2023. 4", + "[26] Paul F Jaeger et al. A call to reflect on evaluation practices for failure detection in image classification. In ICLR, 2023. 8", + "[27] Lu Jiang, Di Huang, Mason Liu, and Weilong Yang. Beyond synthetic noise: Deep learning on controlled noisy labels. In Proceedings of the 37th International Conference on Machine Learning, pages 4804-4815. PMLR, 2020. 8" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "22634", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Davood Karimi, Haoran Dou, Simon K. Warfield, and Ali Gholipour. Deep learning with noisy labels: Exploring techniques and remedies in medical image analysis. Medical Image Analysis, 65:101759, 2020. 1, 2, 8", + "[29] Taehyeon Kim, Jongwoo Ko, sangwook Cho, JinHwan Choi, and Se-Young Yun. Fine samples for learning with noisy labels. In Advances in Neural Information Processing Systems, pages 24137–24149. Curran Associates, Inc., 2021. 8", + "[30] Konstantin Kirchheim, Marco Filax, and Frank Ortmeier. Pytorch-ood: A library for out-of-distribution detection based on pytorch. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 4351-4360, 2022. 1", + "[31] Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. Technical Report 0, University of Toronto, Toronto, Ontario, 2009. 2, 4", + "[32] Kimin Lee, Kibok Lee, Honglak Lee, and Jinwoo Shin. A simple unified framework for detecting out-of-distribution samples and adversarial attacks. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2018. 1, 3", + "[33] Jingyao Li, Pengguang Chen, Zexin He, Shaozuo Yu, Shu Liu, and Jiaya Jia. Rethinking out-of-distribution (ood) detection: Masked image modeling is all you need. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11578-11589, 2023. 8", + "[34] Mingchen Li, Mahdi Soltanolkotabi, and Samet Oymak. Gradient descent with early stopping is provably robust to label noise for overparameterized neural networks. In Proceedings of the Twenty Third International Conference on Artificial Intelligence and Statistics, pages 4313-4324. PMLR, 2020. 7", + "[35] Shiyu Liang, Yixuan Li, and R. Srikant. Enhancing the reliability of out-of-distribution image detection in neural networks. In International Conference on Learning Representations, 2018. 3", + "[36] Sheng Liu, Jonathan Niles-Weed, Narges Razavian, and Carlos Fernandez-Granda. Early-learning regularization prevents memorization of noisy labels. Advances in Neural Information Processing Systems, 33, 2020. 2", + "[37] Weitang Liu, Xiaoyun Wang, John Owens, and Yixuan Li. Energy-based out-of-distribution detection. In Advances in Neural Information Processing Systems, pages 21464-21475. Curran Associates, Inc., 2020. 3", + "[38] S. Maji, J. Kannala, E. Rahtu, M. Blaschko, and A. Vedaldi. Fine-grained visual classification of aircraft. Technical report, 2013. 8", + "[39] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Bo Wu, and Andrew Y. Ng. Reading Digits in Natural Images with Unsupervised Feature Learning. In NIPS Workshop on Deep Learning and Unsupervised Feature Learning 2011, 2011. 2, 4", + "[40] Amanda Olmin and Fredrik Lindsten. Robustness and reliability when training with noisy labels. In Proceedings of The 25th International Conference on Artificial Intelligence and Statistics, pages 922–942. PMLR, 2022. 2", + "[41] Diane Oyen, Michal Kucer, Nick Hengartner, and Har Simrat Singh. Robustness to label noise depends on the shape of" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "the noise distribution. In Advances in Neural Information Processing Systems, 2022. 2", + "[42] Chao Pan, Bo Yuan, Wei Zhou, and Xin Yao. Towards robust uncertainty estimation in the presence of noisy labels. In Artificial Neural Networks and Machine Learning - ICANN 2022, pages 673-684, Cham, 2022. Springer International Publishing. 2", + "[43] Geoff Pleiss, Tianyi Zhang, Ethan Elenberg, and Kilian Q Weinberger. Identifying mislabeled data using the area under the margin ranking. In Advances in Neural Information Processing Systems, pages 17044-17056. Curran Associates, Inc., 2020. 8", + "[44] Jie Ren, Stanislav Fort, Jeremiah Liu, Abhijit Guha Roy, Shreyas Padhy, and Balaji Lakshminarayanan. A simple fix to mahalanobis distance for improving near-ood detection, 2021. 3", + "[45] David Rolnick, Andreas Veit, Serge Belongie, and Nir Shavit. Deep learning is robust to massive label noise, 2018. 2", + "[46] Ragav Sachdeva, Filipe R. Cordeiro, Vasileios Belagiannis, Ian Reid, and Gustavo Carneiro. Evidentialmix: Learning with combined open-set and closed-set noisy labels. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 3607-3615, 2021. 2", + "[47] Mohammadreza Salehi, Hossein Mirzaei, Dan Hendrycks, Yixuan Li, Mohammad Hossein Rohban, and Mohammad Sabokrou. A unified survey on anomaly, novelty, open-set, and out of-distribution detection: Solutions and future challenges. Transactions on Machine Learning Research, 2022. 1", + "[48] Chandramouli Shama Sastry and Sageev Oore. Detecting out-of-distribution examples with Gram matrices. In Proceedings of the 37th International Conference on Machine Learning, pages 8491-8501. PMLR, 2020. 3", + "[49] Vikash Sehwag, Arjun Nitin Bhagoji, Liwei Song, Chawin Sitawarin, Daniel Cullina, Mung Chiang, and Prateek Mittal. Analyzing the robustness of open-world machine learning. In Proceedings of the 12th ACM Workshop on Artificial Intelligence and Security, page 105–116, New York, NY, USA, 2019. Association for Computing Machinery. 3", + "[50] Hwanjun Song, Minseok Kim, and Jae-Gil Lee. SELFIE: Refurbishing unclean samples for robust deep learning. In ICML, 2019. 2", + "[51] Hwanjun Song, Minseok Kim, Dongmin Park, Yooju Shin, and Jae-Gil Lee. Learning from noisy labels with deep neural networks: A survey. IEEE Transactions on Neural Networks and Learning Systems, pages 1-19, 2022. 2", + "[52] Hyun Oh Song, Yu Xiang, Stefanie Jegelka, and Silvio Savarese. Deep metric learning via lifted structured feature embedding. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 4", + "[53] Yue Song, Nicu Sebe, and Wei Wang. Rankfeat: Rank-1 feature removal for out-of-distribution detection. In Advances in Neural Information Processing Systems, 2022. 3", + "[54] Alexander Sorokin and David Forsyth. Utility data annotation with amazon mechanical turk. In 2008 IEEE Computer" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "22635", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Society Conference on Computer Vision and Pattern Recognition Workshops, pages 1-8, 2008. 1", + "[55] Yiyou Sun and Yixuan Li. Dice: Leveraging sparsification for out-of-distribution detection. In Computer Vision – ECCV 2022, pages 691–708, Cham, 2022. Springer Nature Switzerland. 3", + "[56] Yiyou Sun, Chuan Guo, and Yixuan Li. React: Out-of-distribution detection with rectified activations. In Advances in Neural Information Processing Systems, pages 144–157. Curran Associates, Inc., 2021. 3", + "[57] Yiyou Sun, Yifei Ming, Xiaojin Zhu, and Yixuan Li. Out-of-distribution detection with deep nearest neighbors. In Proceedings of the 39th International Conference on Machine Learning, pages 20827-20840. PMLR, 2022. 1, 3", + "[58] Ilya Tolstikhin, Neil Houlsby, Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Thomas Unterthiner, Jessica Yung, Andreas Peter Steiner, Daniel Keysers, Jakob Uszkoreit, Mario Lucic, and Alexey Dosovitskiy. MLP-mixer: An allMLP architecture for vision. In Advances in Neural Information Processing Systems, 2021. 4", + "[59] Dennis Ulmer, Christian Hardmeier, and Jes Frellsen. deepsignificance-easy and meaningful statistical significance testing in the age of neural networks. arXiv preprint arXiv:2204.06815, 2022. 4", + "[60] Sagar Vaze, Kai Han, Andrea Vedaldi, and Andrew Zisserman. Open-set recognition: A good closed-set classifier is all you need. In International Conference on Learning Representations, 2022. 1, 2, 3, 5, 6", + "[61] C. Wah, S. Branson, P. Welinder, P. Perona, and S. Belongie. The caltech-ucsd birds-200-2011 dataset. Technical Report CNS-TR-2011-001, California Institute of Technology, 2011. 8", + "[62] H. Wang, Z. Li, L. Feng, and W. Zhang. Vim: Out-of-distribution with virtual-logit matching. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4911-4920, Los Alamitos, CA, USA, 2022. IEEE Computer Society. 3", + "[63] Hongxin Wei, Huiping Zhuang, Renchunzi Xie, Lei Feng, Gang Niu, Bo An, and Yixuan Li. Mitigating memorization of noisy labels by clipping the model prediction. In Proceedings of the 40th International Conference on Machine Learning. JMLR.org, 2023. 8", + "[64] Jiaheng Wei, Zhaowei Zhu, Hao Cheng, Tongliang Liu, Gang Niu, and Yang Liu. Learning with noisy labels revisited: A study using real-world human annotations. In International Conference on Learning Representations, 2022. 2, 4", + "[65] Tong Xiao, Tian Xia, Yi Yang, Chang Huang, and Xiaogang Wang. Learning from massive noisy labeled data for image classification. In 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2691-2699, 2015. 4", + "[66] Jingkang Yang, Kaiyang Zhou, Yixuan Li, and Ziwei Liu. Generalized out-of-distribution detection: A survey. arXiv preprint arXiv:2110.11334, 2021. 1", + "[67] Jingkang Yang, Pengyun Wang, Dejian Zou, Zitang Zhou, Kunyuan Ding, WENXUAN PENG, Haoqi Wang, Guangyao Chen, Bo Li, Yiyou Sun, Xuefeng Du, Kaiyang" + ], + "bbox": [ + 78, + 90, + 468, + 901 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zhou, Wayne Zhang, Dan Hendrycks, Yixuan Li, and Zwei Liu. OpenOOD: Benchmarking generalized out-of-distribution detection. In Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2022. 1, 2, 3, 4, 8", + "[68] Xiaoyong Yuan, Pan He, Qile Zhu, and Xiaolin Li. Adversarial examples: Attacks and defenses for deep learning. IEEE Transactions on Neural Networks and Learning Systems, 30 (9):2805-2824, 2019. 3", + "[69] Chiyuan Zhang, Samy Bengio, Moritz Hardt, Benjamin Recht, and Oriol Vinyals. Understanding deep learning requires rethinking generalization. In International Conference on Learning Representations, 2017. 2", + "[70] Jinsong Zhang, Qiang Fu, Xu Chen, Lun Du, Zelin Li, Gang Wang, xiaoguang Liu, Shi Han, and Dongmei Zhang. Out-of-distribution detection based on in-distribution data patterns memorization with modern hopfield energy. In The Eleventh International Conference on Learning Representations, 2023. 1, 3", + "[71] Jingyang Zhang, Jingkang Yang, Pengyun Wang, Haoqi Wang, Yueqian Lin, Haoran Zhang, Yiyou Sun, Xuefeng Du, Kaiyang Zhou, Wayne Zhang, Yixuan Li, Ziwei Liu, Yiran Chen, and Hai Li. Openood v1.5: Enhanced benchmark for out-of-distribution detection. arXiv preprint arXiv:2306.09301, 2023.8" + ], + "bbox": [ + 501, + 92, + 890, + 441 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "22636", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/8eab7491-89d9-4dd1-8ee1-40b9bd851b01_model.json b/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/8eab7491-89d9-4dd1-8ee1-40b9bd851b01_model.json new file mode 100644 index 0000000000000000000000000000000000000000..834e1a6756027d1114708b360b1c0db6ae4adbfe --- /dev/null +++ b/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/8eab7491-89d9-4dd1-8ee1-40b9bd851b01_model.json @@ -0,0 +1,2169 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.2, + 0.131, + 0.773, + 0.175 + ], + "angle": 0, + "content": "A noisy elephant in the room: Is your out-of-distribution detector robust to label noise?" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.203, + 0.824, + 0.221 + ], + "angle": 0, + "content": "Galadrielle Humblot-Renaux\\(^{1}\\) Sergio Escalera\\(^{1,2}\\) Thomas B. Moeslund\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.222, + 0.771, + 0.239 + ], + "angle": 0, + "content": "Visual Analysis and Perception lab, Aalborg University, Denmark" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.24, + 0.926, + 0.258 + ], + "angle": 0, + "content": "\\(^{2}\\)Department of Mathematics and Informatics, University of Barcelona and Computer Vision Center, Spain" + }, + { + "type": "text", + "bbox": [ + 0.237, + 0.261, + 0.778, + 0.273 + ], + "angle": 0, + "content": "gegeh@create.aau.dk sescalera@ub.edu tbm@create.aau.dk" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.309, + 0.314, + 0.327 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.34, + 0.474, + 0.619 + ], + "angle": 0, + "content": "The ability to detect unfamiliar or unexpected images is essential for safe deployment of computer vision systems. In the context of classification, the task of detecting images outside of a model's training domain is known as out-of-distribution (OOD) detection. While there has been a growing research interest in developing post-hoc OOD detection methods, there has been comparably little discussion around how these methods perform when the underlying classifier is not trained on a clean, carefully curated dataset. In this work, we take a closer look at 20 state-of-the-art OOD detection methods in the (more realistic) scenario where the labels used to train the underlying classifier are unreliable (e.g. crowd-sourced or web-scraped labels). Extensive experiments across different datasets, noise types & levels, architectures and checkpointing strategies provide insights into the effect of class label noise on OOD detection, and show that poor separation between incorrectly classified ID samples vs. OOD samples is an overlooked yet important limitation of existing methods. Code: https://github.com/glhr/ood-labelnoise" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.648, + 0.21, + 0.665 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.674, + 0.47, + 0.87 + ], + "angle": 0, + "content": "In many real-world applications where deep neural networks are deployed \"in the wild\", it is desirable to have models that not only correctly classify samples drawn from the distribution of labeled data but also flag unexpected inputs as out-of-distribution (OOD). This has motivated the development of a wide range of OOD detection methods and benchmarks for computer vision [47, 66]. In particular, post-hoc OOD detection methods have shown wide appeal: compared to training-based methods, post-hoc OOD detectors can be applied on top of existing image classifiers without the need for re-training, have little to no architecture constraints, do not compromise classification performance, and achieve strong performance in large-scale settings [67]." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Existing OOD benchmarks place significant emphasis on carefully designing the selection of OOD datasets used" + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.307, + 0.861, + 0.428 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.436, + 0.895, + 0.478 + ], + "angle": 0, + "content": "Figure 1. Can state-of-the-art OOD detectors tell incorrectly classified ID images apart from OOD inputs? Not really. Here we compare their performance across 396 trained classifiers." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.507, + 0.893, + 0.734 + ], + "angle": 0, + "content": "for evaluation [4, 14, 60, 67]. In contrast, the role of the in-distribution (ID) dataset used for training the underlying classifier is seldom discussed. Among the most popular choices of ID dataset are MNIST, CIFAR10, CIFAR100 and ImageNet [30, 67] - all of which have been carefully curated and reliably annotated. Yet, in practice, the collection of labelled datasets involves a trade-off between acquisition time/cost and annotation quality - human inattention, mis-clicking, limited expertise, crowd-sourcing, automated annotation, and other cost-saving measures inevitably introduce labelling errors [54]. Besides, some images are inherently ambiguous to label even for the most knowledgeable and careful of annotators [28]. Considering how pervasive the problem of label noise is in real-world image classification datasets, its effect on OOD detection is crucial to study." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.734, + 0.894, + 0.825 + ], + "angle": 0, + "content": "To address this gap, we systematically analyse the label noise robustness of a wide range of OOD detectors, ranging from the widely adopted Maximum Softmax Probability (MSP) baseline [14, 19], to distance-based methods operating in feature space [32, 57], to more recent, complex methods such as SHE [70] and ASH [10]. In particular:" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.825, + 0.894, + 0.901 + ], + "angle": 0, + "content": "1. We present the first study of post-hoc OOD detection in the presence of noisy classification labels, examining the performance of 20 state-of-the-art methods under different types and levels of label noise in the training data. Our study includes multiple classification architectures" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.521, + 0.957 + ], + "angle": 0, + "content": "22626" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.097, + 0.092, + 0.468, + 0.152 + ], + "angle": 0, + "content": "and datasets, ranging from the beloved CIFAR10 to the more difficult Clothing1M, and shows that even at a low noise rate, the label noise setting poses an interesting challenge for many methods." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.153, + 0.468, + 0.243 + ], + "angle": 0, + "content": "2. We revisit the notion that OOD detection performance correlates with ID accuracy [14, 60], examining when and why this relation holds. Robustness to inaccurate classification requires that OOD detectors effectively separate mistakes on ID data from OOD samples - yet most existing methods confound the two (Figure 1)." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.243, + 0.468, + 0.288 + ], + "angle": 0, + "content": "3. Our analysis includes key takeaways and recommendations for future evaluation and development of OOD detection methods considering an unreliable label setting." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.153, + 0.468, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.302, + 0.23, + 0.319 + ], + "angle": 0, + "content": "2. Problem set-up" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.327, + 0.468, + 0.387 + ], + "angle": 0, + "content": "In this work, we tackle the question: what happens when post-hoc OOD detectors are applied on top of a classifier trained with unreliable labels - a common setting in practice? We introduce the main relevant concepts below." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.388, + 0.468, + 0.539 + ], + "angle": 0, + "content": "Classifier We study OOD detection in the context of supervised image classification, where a discriminative model \\( h: \\mathcal{X} \\to \\mathcal{Y} \\) is trained on a dataset of \\( N \\) labelled examples \\( D_{train} = \\{(x_i, y_i)\\}_{i=1}^N \\in \\mathcal{X} \\times \\mathcal{Y} \\), where each \\( x \\) is an input image and each \\( y \\) is the corresponding class from the label space \\( \\mathcal{Y} \\). A common choice would be CIFAR10 [31]. \\( P_{train}(X, Y) \\) defines the underlying training data distribution. The classifier is evaluated on a test set \\( D_{test} \\) drawn from the same distribution \\( P_{test}(X, Y) = P_{train}(X, Y) \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.539, + 0.468, + 0.689 + ], + "angle": 0, + "content": "OD detector Post-hoc OOD detection equips the trained classifier \\( h \\) with a scoring function \\( o: X \\to \\mathbb{R} \\) aiming to distinguish usual examples drawn from \\( P_{test}(X) \\) (ID samples) and anomalous (OOD) examples drawn from a disjoint, held-out distribution \\( P_{out}(X) \\). In practice, a collection of auxiliary datasets with minimal semantic overlap (e.g. CIFAR10 → SVHN [39]) is commonly used for evaluation [67]. Ideally, the score assigned to ID samples should be consistently lower (or higher) than for OOD samples, such that anomalous inputs can easily be flagged." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.468, + 0.855 + ], + "angle": 0, + "content": "Label noise We consider a noisy label setting, where the classifier \\( h \\) does not have access to the true target values \\( y_{i} \\) during training, but rather learns from a noisy dataset \\( D_{\\text{noisy}} = \\{(x_i, \\hat{y}_i)\\}_{i=1}^N \\), where the target labels are corrupted: \\( \\exists i \\) such that \\( \\hat{y}_i \\neq y_i \\). In this work, we consider only closed-set label noise, where \\( D_{\\text{noisy}} \\in \\mathcal{X} \\times \\mathcal{Y} \\) (that is, the noisy labels lie in the same label space as the true labels [46]). The noise level \\( \\epsilon \\) is given by \\( P(y \\neq \\hat{y}) \\), the probability that an observed label is incorrect. Common models for studying and simulating label noise are (we refer to [13] for a detailed taxonomy):" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.856, + 0.468, + 0.901 + ], + "angle": 0, + "content": "1. Noisy Completely at Random (NCAR) or uniform label noise: labels are flipped at a constant rate \\(\\epsilon\\), regardless of class or image." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.891, + 0.136 + ], + "angle": 0, + "content": "2. Noisy at Random (NAR) or class-conditional label noise: a constant noise rate across all images of the same class, but different classes may have different noise rates." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.137, + 0.891, + 0.182 + ], + "angle": 0, + "content": "3. Noisy Not at Random (NNAR) or instance-dependent label noise: noisy labels are jointly determined by the true class and the associated image." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.092, + 0.891, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.183, + 0.892, + 0.273 + ], + "angle": 0, + "content": "In practice, real (as opposed to synthetically generated) label noise occurring from an imperfect annotation pipeline follows complex patterns, and is thus best represented by an instance-dependent model: some classes are more likely to be mislabeled than others, and so are some images (e.g. ambiguous or rare samples) [50, 64]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.287, + 0.637, + 0.302 + ], + "angle": 0, + "content": "3. Related work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.312, + 0.892, + 0.599 + ], + "angle": 0, + "content": "Studying label noise The effect of unreliable labels on supervised learning is a well-studied problem in deep learning [51] and computer vision [1, 28], as errors or inconsistencies are a natural part of label collection in many real applications. Though increased dataset size can help [45], noisy labels degrade classification performance, especially in the later stages of training where over-parameterized models are prone to memorizing them [36, 69]. The precise effects of label noise have been shown to depend on the noise model and distribution [41]. A recent CIFAR classification benchmark suggests that models trained on real, instance-dependent noisy labels are significantly more prone to memorization than those trained on synthetic class-conditional labels with the same overall noise rate [64]. We therefore consider real noisy labels in our benchmark (stemming from human annotation error), which we compare to two sets of synthetic noisy labels (uniform and class-conditional). We also compare the effect of validation-based early stopping vs. converging on the training set." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.599, + 0.892, + 0.855 + ], + "angle": 0, + "content": "Effect of label noise on reliability Existing studies of label noise are largely focused on classification accuracy, and few works address the other side of the coin: reliability. We look at reliability from the angle of OOD detection performance - to the best of our knowledge, there is currently no comparable study of OOD detection under a noisy label setting. Most closely related to our work are perhaps the experiments in [42] and the analysis in [40]. [42] evaluates the effect of synthetic uniform label noise on MC-dropout and deep ensembles' uncertainty estimates, showing a significant degradation in OOD detection performance with increasing noise levels - in comparison, we study post-hoc OOD detection (with a wider variety of architectures, datasets, and methods) and consider real noisy datasets. [40] studies label noise robustness in terms of model calibration, showing that early stopping, while beneficial in terms of accuracy, offers no reliability guarantees." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Benchmarking OOD detection robustness Previous works have investigated the limits of state-of-the-art OOD detection methods in various challenging settings, such as" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "22627" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.182 + ], + "angle": 0, + "content": "semantic similarity between ID vs. OOD classes [12, 14, 60], fine-grained ID labels [23], large-scale datasets [22] and adversarial attacks [49, 68]. In contrast, we focus on robustness to degraded classification performance on the ID dataset due to noisy labels, which to the best of our knowledge has comparably received little attention." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.183, + 0.473, + 0.424 + ], + "angle": 0, + "content": "Relation between ID classification and OOD detection performance In the standard clean label setting, a strong relationship between ID classification and OOD detection performance has been observed in prior work. [60] studies the relation between closed-set (ID) classification and open-set recognition performance (AUROC), and finds open-set recognition performance to be highly correlated with classification accuracy. [14] observes a similar trend for out-of-distribution detection performance across a large variety of pre-trained deep learning architectures, using the MSP as OOD score. Both works only consider clean training datasets, and a small subset of methods. We study the extent to which this relation holds across a wider range of OOD detection methods and noisy datasets, and provide a very simple explanation for why some methods like MSP reach such a high correlation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.438, + 0.304, + 0.453 + ], + "angle": 0, + "content": "4. OOD detection methods" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.463, + 0.47, + 0.538 + ], + "angle": 0, + "content": "We evaluate 20 post-hoc OOD detection methods from the OpenOOD benchmark [67] - currently the most comprehensive open-source benchmark available. Here we present and broadly categorize these methods based on how their scoring function is designed." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.539, + 0.47, + 0.765 + ], + "angle": 0, + "content": "Softmax-based OOD detection revolves around the idea that ID samples are associated with higher-confidence, lower-entropy predictions than OOD samples. The baseline Maximum Softmax Probability (MSP) [19] simply takes the Softmax \"confidence\" of the predicted class as OOD score. While MSP implicitly assumes a Softmax temperature of 1, TempScaling [15] treats the temperature as a hyper-parameter, softening or sharpening the Softmax probabilities (essentially modulating categorical entropy), with the aim of improving calibration. ODIN [35] combines temperature scaling with input perturbation - \"pushing\" the input image a little in the direction that increases the MSP. In contrast, the Generalized ENtropy (GEN) score considers the full predictive distribution and captures how much it deviates from a one-hot distribution." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Logit-based OOD detection bypasses the squashing effect of Softmax normalization. The Maximum Logit Score (MLS) [21] directly takes the logit of the predicted class. In a similar vein, energy-based OOD detection (EBO) was first proposed in [37]: a score is derived by applying the LogSumExp function to the logits - essentially a smooth version of the MLS, with an additional temperature parameter. Several post-hoc methods using an energy score have since followed suit, proposing various modifications to the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.182 + ], + "angle": 0, + "content": "network [55] or features [10, 53, 56] before extracting an energy score: REACT [56] clips activations at an upper bound, RankFeat [53] subtracts the rank-1 matrix from activations, DICE [55] applies weight sparsification such that only the strongest contributors remain, and ASH [10] sparsifies activations based on a pruning percentile." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.183, + 0.893, + 0.68 + ], + "angle": 0, + "content": "Distance-based OOD detection aims to capture how much a test sample deviates from the ID dataset. The Mahalanobis distance score (MDS) [32] method fits a Gaussian distribution to the features of each class in the ID dataset; at test-time, the OOD score is taken as the distance to the closest class. The same authors also proposed MDSE Ensemble [32], which computes an MDS score not just from the features extracted before the final layer, but also at earlier points in the network, and aggregates them. Alternatively, the Relative Mahalanobis distance score (RMDS) [44] was proposed as a simple fix to MDS, which additionally fits a class-independent Gaussian to the entire ID dataset to compute a background score which is subtracted from the class-specific MDS score. Among other distance-based methods which rely on class-wise statistics, KLMatching (KLM) [21] takes the smallest KL Divergence between a test sample's Softmax probabilities and the mean Softmax probability vector for each ID class. OpenMax [3] operates in logit space, fitting a class-wise Weibull distribution to the distances of ID samples from the mean logit vector. Simplified Hopfield Energy (SHE) [70] computes the inner product between a test sample's features and the mean ID feature of the predicted class. GRAM [48] computes the Gram matrices of intermediate feature representations throughout the network, comparing them with the range of values observed for each class in the ID data. In contrast, deep k-nearest neighbor (KNN) [57] proposes a simple approach with no distributional assumptions - computing its score as the Euclidean distance to the closest samples from the ID set, regardless of class. Lastly, Virtual-logit Matching (VIM) [62] combines a logit energy score with a class-agnostic term capturing how features deviate from a principal subspace defined by the training set." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.681, + 0.892, + 0.788 + ], + "angle": 0, + "content": "Gradient-based OOD detection: GradNorm [24] is the only method in OpenOOD which directly derives its score from the gradient space, claiming that gradient magnitude is higher for ID inputs. The KL divergence between predicted Softmax probabilities and a uniform target is backpropagated to obtain gradients w.r.t the last layer parameters, followed by an \\( L_{1} \\) norm to obtain the magnitude." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.8, + 0.633, + 0.817 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.855 + ], + "angle": 0, + "content": "We summarize our experimental set-up below, and refer to the supplementary for further details." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.893, + 0.901 + ], + "angle": 0, + "content": "ID Datasets We select popular image classification datasets from the label noise literature featuring real noisy labels alongside clean reference labels, spanning different" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "22628" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.079, + 0.089, + 0.472, + 0.221 + ], + "angle": 0, + "content": "
ID datasetclasses# images (train/val/test)resolutionnoise rate
[31]CIFAR-100%
CIFAR-10N-Agg1050,000/1,000/9,00032x329.01%
[64]CIFAR-10N-Rand117.23%
CIFAR-10N-Worst40.21%
[31]CIFAR-100-Fine10050,000/1,000/9,00032x320%
[64]CIFAR-100N-Fine40.20%
[31]CIFAR-100-Coarse2050,000/1,000/9,00032x320%
[64]CIFAR-100N-Coarse26.40%
[65]Clothing1M1424,637/7,465/5,395256x2560%
Clothing1M-Noisy38.26%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.231, + 0.47, + 0.3 + ], + "angle": 0, + "content": "Table 1. Dataset overview. Clean ones are shown in bold. The training set (clean or noisy labels) is used to train the classifier; the validation set (clean labels) is used for early stopping; the test set (clean labels) is used for evaluating classification and OOD detection performance. We always use clean labels for evaluation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.327, + 0.47, + 0.522 + ], + "angle": 0, + "content": "input sizes, number of classes, and sources & levels of label noise - see Table 1. The recently released CIFAR-N dataset [64] provides noisy re-annotations of CIFAR-10 and CIFAR-100 collected via crowd-sourcing: each image was annotated by 3 people, and different noisy label sets were created for different label selection methods (majority voting, random selection, or worst label selection). Note that CIFAR-100-Fine and CIFAR-100-Coarse contain the same set of images - only the class definitions and labels differ. Clothing1M [65] is a large-scale dataset collected by scraping shopping websites. Although the raw Clothing1M contains over a million images, we consider the smaller subset of images for which there is both a noisy and clean label." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.523, + 0.47, + 0.674 + ], + "angle": 0, + "content": "Synthetic noise For each real noisy label set, using the corresponding clean labels, we additionally create 2 synthetic counterparts with the same overall noise rate: one following a uniform (NCAR, class-independent) label noise model, and the other following a class-conditional label noise model with the exact same noise transition matrix as the real noise. We name these synthetic variants SU (Synthetic Uniform noise) and SCC (Synthetic Class-Conditional noise) - for example, from CIFAR-10N-Agg we create 2 synthetic versions, SU and SCC." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.675, + 0.47, + 0.885 + ], + "angle": 0, + "content": "ODD Datasets For fair comparison, we use the same selection of OOD datasets for all models - the OOD datasets are therefore chosen such that there is minimal semantic overlap with any of the ID datasets. We include MNIST [9], SVHN [39], Textures [6] as they are commonly used as examples of \"far\"-OOD [67] (very different appearance and semantics than the ID dataset). As examples of more natural images, we also include EuroSAT-RGB [18], Food-101 [5], a sub-set of the Stanford Online Products [52], and a 12-class sub-set of ImageNet. Since some methods require an OOD validation set for hyperparameter tuning, half of these classes are randomly selected and held-out for this purpose. The other 6 ImageNet classes, and the other OOD datasets make up the OOD test set." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.468, + 0.9 + ], + "angle": 0, + "content": "Evaluation metrics We evaluate OOD detectors'abil" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.257 + ], + "angle": 0, + "content": "ity to separate ID vs. OOD samples in terms of the Area Under the Receiver Operating Characteristic Curve (AUROC), where images from the ID test set (e.g. CIFAR10 test set) are considered positive samples, and those from the OOD test set (e.g. SVHN test set) as negatives. This is the most commonly reported metric in the literature [25], and we denote it as AUROC\\(_{\\text{ID vs. OOD}}\\). In addition, unlike previous works, we separately measure the AUROC\\(_{\\text{correct vs. OOD}}\\) (and AUROC\\(_{\\text{incorrect vs. OOD}}\\)), where only correctly (or incorrectly) classified samples from the ID test set are considered - ideally, performance should be high on both metrics." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.258, + 0.892, + 0.438 + ], + "angle": 0, + "content": "Architectures We include 3 architecture families: CNNs, MLPs and transformers. We select lightweight architectures which have shown competitive results when trained on small-scale datasets: ResNet18 [17], MLP-Mixer [58] and Compact Transformers [16]. Following the OpenOOD benchmark [67], we do not adopt any advanced training strategies besides standard data augmentation. For each training dataset, we repeat training with 3 random seeds, and save 2 model checkpoints: an early checkpoint (based on best validation accuracy) and the last checkpoint (after a pre-defined number of epochs has elapsed, allowing for convergence - differs per architecture)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.439, + 0.892, + 0.59 + ], + "angle": 0, + "content": "Bird's eye view To summarize, we train 3 different classifier architectures on 22 datasets (4 clean, 6 with real label noise, 12 with synthetic label noise), with 3 random seeds and 2 checkpoints saved per model - adding up to 396 distinct classifiers. On top of each classifier, 20 different OOD detection methods are applied and evaluated on 7 OOD datasets. Throughout the paper, OOD detection performance is taken as the median across the 7 OOD datasets (see the supplementary for results and a discussion of the median vs. mean OOD detection performance)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.59, + 0.892, + 0.727 + ], + "angle": 0, + "content": "Statistical significance tests When comparing pairs of methods or settings, we use the Almost Stochastic Order (ASO) test [7, 11] as implemented by Ulmer et al. [59]. This statistical test was specifically designed to compare deep learning models, making no distributional assumptions. We apply ASO with a significance level \\(\\alpha = 0.05\\) and report \\(\\epsilon_{\\mathrm{min}}[\\mathrm{A} > \\mathrm{B}]\\). If \\(\\epsilon_{\\mathrm{min}}[\\mathrm{A} > \\mathrm{B}] \\geq 0.5\\) we cannot claim that method A is better than method B; the smaller \\(\\epsilon_{\\mathrm{min}}\\), the more confident we can be that method A is superior." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.74, + 0.597, + 0.756 + ], + "angle": 0, + "content": "6. Analysis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.902 + ], + "angle": 0, + "content": "We explore the effect of label noise on OOD detection, starting with an overall view of performance trends in Section 6.1, then looking at OOD detection in relation to classification performance in Section 6.2, delving into what works (and what doesn't) in Section 6.3, and raising important considerations about how/whether to make use of a clean validation set in Section 6.4. Section 6.5 extends results to a more practical setting. More detailed analyses and additional supporting figures are in the supplementary." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22629" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.11, + 0.088, + 0.86, + 0.288 + ], + "angle": 0, + "content": "
training labelsCIFAR10CIFAR100-CoarseCIFAR100-FineClothing1M
AggRand1WorstcleanNSCCSUcleanNSCCSUcleanNSCCSU
methodcleanNSCCSUNSCCSUNSCCSU
GRAM94.4589.4989.1290.788.8289.1990.5288.688.7387.9882.0780.0582.179.282.9376.3180.2482.6491.0489.0794.7195.37
MDS96.0787.9392.492.9792.3789.2587.4586.7486.4989.280.0778.8982.8480.1280.174.9674.4873.4987.1290.9888.5892.38
VIM95.6589.991.8192.388.7588.684.4986.3187.2388.7584.2976.6180.047881.3775.3173.2473.9488.9983.0987.1790.14
MDSEns92.5783.8983.6283.7981.883.0680.3682.9584.0284.1179.2578.3177.4173.684.8577.4778.4379.8595.3695.4495.7895.69
KNN93.6390.0788.7590.1487.7486.6685.1186.383.7384.1484.1674.480.3975.8283.2975.6776.4871.3585.3285.584.5980.87
RMDS92.9289.3887.9488.1489.0785.7387.0484.0381.9982.3582.1475.9377.3674.8183.287675.7573.4875.8171.4378.2266.66
DICE9083.3384.1886.2488.528186.2182.7979.5879.0782.7977.6875.0170.4382.5276.5173.9268.4184.9675.7286.6982.89
ReAct90.9187.3286.6382.1689.7482.9681.9784.578.4180.1182.7973.0973.6270.3883.7673.5773.7167.5582.5773.180.2276.58
GEN91.8685.9985.4482.0889.8682.8980.8483.7581.8180.5782.6973.2571.4770.9981.3473.473.167.1183.9173.5779.7976.78
EBO91.3184.8785.7381.6289.8881.9377.8883.0481.3877.482.7472.9970.9367.8581.4173.6573.0167.4285.1976.3285.3176.64
SHE89.687.8184.3386.4886.6383.1683.0483.2480.0678.9880.4271.88070.1180.3869.6369.5666.6882.2978.0778.477.73
ODIN91.4787.7186.3182.4889.7982.2180.6884.0982.1380.0981.4273.170.8869.9783.5974.8572.1967.1683.3871.5977.7375.47
MLS91.2684.7685.5981.5788.8182.317883.5482.0180.0182.6672.9270.8569.1981.4573.6472.567.0383.372.7577.7475.54
TempScale91.6785.7685.0482.2585.0778.179.7882.8580.5180.1381.6371.6769.9469.3580.7572.6671.2866.9879.8268.7786.2874.45
ASH88.3384.7582.3781.6682.2976.4772.4885.2778.2675.4882.7871.1973.2168.4382.7474.1370.9567.4881.5374.8976.6375.74
OpenMax90.586.1283.4682.2683.0582.8779.1280.3975.6877.4181.1476.6972.6569.1780.1372.8275.667.6671.7469.2372.5574.36
MSP91.3485.1984.8782.4185.1382.2180.6882.4879.480.0980.5170.4268.8869.9779.8570.9269.6166.9277.5766.0272.4473.89
KLM90.8483.782.1581.6980.1381.8880.6474.7975.9376.8379.3770.269.2467.8179.7875.5269.8967.2577.2665.4965.2262.01
GradNorm86.2279.5577.177.8481.6679.8876.9684.1172.9672.8869.2966.2773.1767.4470.9565.6571.2663.2979.7175.3273.9377.52
RankFeat81.8383.5375.8673.1278.2975.0779.1482.5777.2575.7573.1564.669.5465.4468.7776.0770.2467.6469.1675.8569.9373.37
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.297, + 0.895, + 0.355 + ], + "angle": 0, + "content": "Table 2. Best-case OOD detection performance (AUROCID vs. OOD in %) per method (that is, after selecting the best architecture-seed-checkpoint combination for each training label set). N, SCC, and SU refer to the real and synthetic noisy label sets described in Section 5. The top-3 for each training dataset are highlighted in bold, and the top-1 is underlined. In red are scores \\( < {75}\\% \\) and in orange scores between 75 and 80%. Rows are sorted based on the total performance across columns." + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.377, + 0.472, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.475, + 0.47, + 0.505 + ], + "angle": 0, + "content": "Figure 2. Distribution of OOD detection performance across methods & models when training the classifier on different label sets." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.528, + 0.383, + 0.543 + ], + "angle": 0, + "content": "6.1. Where there's noise there's trouble" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.552, + 0.47, + 0.839 + ], + "angle": 0, + "content": "Figure 2 gives an overview of OOD detection performance for different training datasets and label noise settings. We see a clear drop in overall OOD detection performance when label noise is introduced in the training dataset, compared to training on a cleanly labelled dataset (green). Even with only \\(9\\%\\) of incorrect CIFAR10 labels (CIFAR-Agg labels sets), the median \\(\\mathrm{AUROC}_{\\mathrm{IDvs.OOD}}\\) across all models drops by over \\(5\\%\\). In Table 2, for each method we report the best-case OOD detection performance for a given training label set. While most methods are able to reach \\(80\\%\\) \\(\\mathrm{AUROC}_{\\mathrm{IDvs.OOD}}\\) with a classifier trained on clean labels, the number of competitive methods falls with increasing label noise, especially at noise rates \\(>20\\%\\). GRAM, KNN, MDS, MDSEsemble and VIM are the only methods able to reach \\(90+\\%\\) AUROC on at least one of the noisy datasets. Takeaway: Enter the elephant Label noise in the classifier's training data makes it more difficult for post-hoc OOD detection methods to flag unfamiliar samples at test-time, even in small-scale settings like CIFAR10." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.848, + 0.383, + 0.865 + ], + "angle": 0, + "content": "6.2. Does accuracy tell the whole story?" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.471, + 0.903 + ], + "angle": 0, + "content": "The most obvious effect of label noise in the training data is a decrease in classification performance on ID test data. At" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.38, + 0.892, + 0.44 + ], + "angle": 0, + "content": "the same time, previous works have remarked a strong relation between classification performance and OOD detection for popular post-hoc methods like MSP [14] and MLS [60]. We dig deeper. When does this relation hold and why?" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.44, + 0.893, + 0.606 + ], + "angle": 0, + "content": "For which methods does this relation hold? In Figure 3, we quantify the relationship between ID accuracy and \\(\\mathrm{AUROC}_{\\mathrm{IDvs.OD}}\\) in terms of Spearman correlation \\(\\rho\\). We find that correlation varies widely across methods, being the strongest for MSP, and is generally weaker for those which operate earlier in the network. We also note that for all methods except KNN and RMDS, the label noise setting makes OOD detection performance less predictable - and so does early stopping (cf. Section 6.4). This points to the distribution of ID scores playing an important role in OOD detection performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.607, + 0.892, + 0.653 + ], + "angle": 0, + "content": "When it does - why? We provide a simple observation which is lacking in prior work: methods whose OOD detection performance predictably degrades along-" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.675, + 0.892, + 0.817 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.827, + 0.895, + 0.898 + ], + "angle": 0, + "content": "Figure 3. Does OOD detection performance (AUROC\\(_{\\text{ID vs. OOD}}\\)) correlate with ID classification performance (accuracy)? We measure the rank correlation across different architectures, seeds, checkpoints, and datasets for different label sets. All results shown here are statistically significant (\\(p < 0.001\\))." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.946, + 0.521, + 0.957 + ], + "angle": 0, + "content": "22630" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.088, + 0.472, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.204, + 0.471, + 0.261 + ], + "angle": 0, + "content": "Figure 4. Relationship between ID classification performance and OOD detection performance, considering all ID test samples (top) or only incorrectly classified ones (bottom) in the AUROC metric. Each point corresponds to a single model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.292, + 0.47, + 0.594 + ], + "angle": 0, + "content": "side classification accuracy are characterized by a high \\(\\mathrm{AUROC}_{\\mathrm{correct}}\\) vs. OOD and a low \\(\\mathrm{AUROC}_{\\mathrm{incorrect}}\\) vs. OOD. On clean, easy datasets like CIFAR10, they exhibit strong OOD detection performance as there are few incorrectly predicted ID samples in the test set (thus the \\(\\mathrm{AUROC}_{\\mathrm{incorrect}}\\) vs. OOD term is negligible in the overall \\(\\mathrm{AUROC}_{\\mathrm{ID}}\\) vs. OOD) - however, when the number of incorrect prediction grows, the low \\(\\mathrm{AUROC}_{\\mathrm{incorrect}}\\) vs. OOD becomes a more significant factor. Importantly and as exemplified by Figure 4, for all methods, \\(\\mathrm{AUROC}_{\\mathrm{incorrect}}\\) vs. OOD is not (or only weakly, \\(\\rho < 0.2\\)) correlated with classification accuracy. MSP is the most clear-cut example, with a median \\(\\mathrm{AUROC}_{\\mathrm{incorrect}}\\) vs. OOD of around 0.5 across all dataset-architecture-seed-checkpoint combinations (bottom left of Figure 4) - that is, MSP often is no better (or worse) than a random detector at separating ID mistakes and OOD inputs, no matter how accurate the underlying classifier is. The Top-4 methods in Table 2 are the only ones with a median \\(\\mathrm{AUROC}_{\\mathrm{incorrect}}\\) vs. OOD \\(\\geq 0.6\\) - none of the other methods exceed a median \\(\\mathrm{AUROC}_{\\mathrm{incorrect}}\\) vs. OOD of 0.55 - see Figure 1." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.596, + 0.47, + 0.718 + ], + "angle": 0, + "content": "Takeaway: Would your OOD detector be better off as a failure detector? Accuracy correlating with OOD detection performance is partly symptomatic of many seemingly effective methods being unable to separate incorrectly classified ID samples from OOD samples - a bottleneck for robustness to imperfect classification. Claims that post-hoc OOD detection can be improved by simply improving the underlying classifier [60] overlook this fundamental issue." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.471, + 0.902 + ], + "angle": 0, + "content": "It's not just about the noise rate We find that for a fixed noise rate in a given dataset, different types/models of label noise yield comparable classification accuracy (\\(\\epsilon_{\\mathrm{min}} \\geq 0.5\\) for all pair-wise comparisons), yet have different effects on OOD performance. Indeed, real label noise is better handled than the same level of synthetic by most methods, with SU labels being the most challenging - this trend is clear in Figure 2. Figure 5 shows an example of how different noise types and checkpointing strategies shape the magnitude and spread of logits. Intuitively, when the noise is spread randomly across samples (SU noise model), it is more difficult to learn which kinds of images or classes to be uncer" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.088, + 0.895, + 0.168 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.175, + 0.892, + 0.204 + ], + "angle": 0, + "content": "Figure 5. Max Logit ID and OOD score statistics across models trained on Clothing1M, for different noise types & checkpointing." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.232, + 0.892, + 0.322 + ], + "angle": 0, + "content": "tain about, leading to consistently lower-confidence predictions across all ID samples (low median, low spread). Conversely, when label noise is more concentrated for certain classes (SCC) and/or for certain features (real noise), the classifier can learn to be more confident in some parts of the input space than others (higher median, higher spread)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.322, + 0.892, + 0.429 + ], + "angle": 0, + "content": "Takeaway: Faking it is better than ignoring it Uniform (synthetic) label noise in the training data tends to degrade OOD detection more strongly than class-dependant (synthetic) and instance-dependent (real) label noise. We encourage the use of synthetic uniform labels to evaluate the worst-case performance of OOD detectors, as they can be easily generated for any image classification dataset." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.44, + 0.803, + 0.456 + ], + "angle": 0, + "content": "6.3. Design features which hurt or help" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.463, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Why are the winners the best? In terms of design features, the methods with the strongest performance in a label noise setting have a distance-based scoring function, and take features as input rather than class probabilities. SHE is the only OOD detector satisfying both criteria which doesn't sit at the top of the pile in Table 2 - we attribute its lower performance to two factors: it summarizes the ID dataset only with class-wise means (which may be overly reductive in a label noise setting where variance is larger), and it only considers correctly predicted samples when computing them (which may be small in number if the classifier is inaccurate or the number of classes is high). In contrast, GRAM which includes higher-order raw moments to describe ID data statistics is the top-1 method in Table 2. In the comparison of Figure 6, GRAM and MDSEnsemble - the only methods in our benchmark which incorporate features at different depths in the network - stand out as having the \"flattest\" accuracy-AUROC curves, which is especially beneficial when the training dataset is inherently difficult (e.g. CIFAR100 due to fine-grained labels or Clothing1M due to the image complexity and diversity). However, we note that the performance of MDSEnsemble and GRAM is highly architecture-dependent - the best OOD detection performance is achieved with a ResNet18 classifier, while MLPlexer and CCT architectures give sub-par results (often sub-50% ie. even worse than a random detector). Whether this large performance variation is due to the layer types, feature dimensionality or other factors, and whether it can be remedied warrants further investigation." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.946, + 0.518, + 0.957 + ], + "angle": 0, + "content": "22631" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.087, + 0.892, + 0.241 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.251, + 0.893, + 0.279 + ], + "angle": 0, + "content": "Figure 6. Relation between the drop in accuracy caused by noisy labels and the resulting drop in OOD detection performance across all 20 methods. Each point corresponds to a single model trained with noisy labels." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.306, + 0.468, + 0.456 + ], + "angle": 0, + "content": "Takeaway: Distance is healthy Out of the 20 post-hoc OOD detectors in our benchmark, distance-based OOD detectors operating in feature space appear the most promising to cope with the problem of unreliable predictions. Intuitively, distance-based methods are more dissociated from the classifier's prediction, and more dependent on the content/appearance of ID images. In contrast, we did not find compelling evidence that methods targeting class logits or class probabilities for OOD detection are better suited for the noisy label setting." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.457, + 0.47, + 0.775 + ], + "angle": 0, + "content": "Are there tricks that work? We consider 3 popular \"tricks\" aiming to better separate ID vs. OOD samples in logit or probability space - temperature scaling, input perturbation and sparsification - and assess their effectiveness in a noisy label setting (excluding cleanly trained models). To isolate the effect of Softmax temperature scaling and input perturbation, we introduce \\(\\mathrm{ODIN}_{\\mathrm{notemp}}\\) (ODIN with temperature \\(T\\) fixed to 1) and \\(\\mathrm{ODIN}_{\\mathrm{nopert}}\\) (perturbation magnitude \\(m\\) set to 0). We find that scaling \\(T\\) by maximizing likelihood on ID validation labels is detrimental (\\(\\epsilon_{\\mathrm{min}}[\\mathrm{MSP} > \\mathrm{TempScale}] = 0.15\\)), however picking \\(T\\) based OOD validation detection performance does make a statistically significant (though not practically significant) difference (\\(\\epsilon_{\\mathrm{min}}[\\mathrm{ODIN}_{\\mathrm{nopert}} > \\mathrm{MSP}] = 0.05\\)). Input perturbation does not help in a label noise setting: looking at the optimal \\(m\\) selected during \\(\\mathrm{ODIN}_{\\mathrm{notemp}}\\) 's automatic parameter tuning, we observe that as label noise rate increases, the more likely that \\(m = 0\\) is picked (no perturbation). As for feature or weight sparsification, we note that REACT and DICE are the most promising logit-based methods in the AUROCincorrect vs. OOD ranking of Figure 1." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.787, + 0.414, + 0.803 + ], + "angle": 0, + "content": "6.4. Let's not forget about the validation set" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Picking a model checkpoint While it is well-understood that early stopping is beneficial to classification accuracy when training a classifier with noisy labels [34], we investigate whether this extends to OOD detection performance. We compare the OOD detection performance for the 2 checkpointing strategies, and find that for almost all meth" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.306, + 0.892, + 0.397 + ], + "angle": 0, + "content": "ods, early stopping is beneficial (\\(\\epsilon_{\\mathrm{min}}\\) [early>last] < 0.5). However, looking at Figure 6, we note that early stopping may increase the rate at which OOD detection performance drops due to label noise for a given drop in accuracy - to an extreme in the case of TempScaling. A closer look at Figure 5 gives some insight into its effect on the logits." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.402, + 0.893, + 0.901 + ], + "angle": 0, + "content": "What about OOD detector parameter tuning? Many of the methods in our benchmark involve a set-up step where dataset-specific parameters are computed (e.g. statistics for ID samples) and/or a tuning step where hyperparameters are tuned to maximize OOD detection performance on a held-out validation OOD set. The set of (hyper)parameters for each method is outlined in the supplementary. Among these methods, some make use of classification labels during set-up/tuning - e.g. to compute statistics for each class. In a label noise setting, this raises the question of whether to use a clean validation set or the noisy training set for set-up/tuning, or whether this makes a difference. We compare both settings for the 6 methods in our benchmark making use of class labels during set-up: MDS, RMDS, MDSEsemble, GRAM, OpenMax and SHE, with results visualized in the supplementary. For SHE which computes the mean of features for each class during setup, there is no statistically significant difference between using clean validation labels or potentially noisy training labels, although the latter may be better in some cases \\((\\epsilon_{\\mathrm{min}}[\\mathrm{SHE}_{\\mathrm{val}} > \\mathrm{SHE}_{\\mathrm{train}}] = 1\\) and \\(\\epsilon_{\\mathrm{min}}[\\mathrm{SHE}_{\\mathrm{train}} > \\mathrm{SHE}_{\\mathrm{val}}] = 0.63)\\). For methods based on the Malahanobis score, using noisy training labels to compute class-wise feature means and tied covariance is better \\((\\epsilon_{\\mathrm{min}}[\\mathrm{MDS}_{\\mathrm{train}} > \\mathrm{MDS}_{\\mathrm{val}}] = 0.19\\) and \\(\\epsilon_{\\mathrm{min}}[\\mathrm{RMDS}_{\\mathrm{train}} > \\mathrm{RMDS}_{\\mathrm{val}}] = 0)\\) - intuitively, the class-specific statistics are more accurate with more data. Common to these 3 methods is that the OOD score at test-time does not depend on the predicted class (likely to be incorrect in a label noise setting), but is rather based on distance to the closest class in feature space (regardless of what class is predicted). OpenMax computes the mean logit per class, only considering correctly predicted samples (labels are used to check correctness) - using a potentially" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.946, + 0.518, + 0.957 + ], + "angle": 0, + "content": "22632" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.198 + ], + "angle": 0, + "content": "noisy training set yields consistently better performance \\((\\epsilon_{\\mathrm{min}}[\\mathrm{OpenMax}_{\\mathrm{train}} > \\mathrm{OpenMax}_{\\mathrm{val}}] = 0)\\). Lastly, and in contrast to the other methods, GRAM benefits from using clean validation samples rather than a large number of noisy training samples for computing class-specific bounds of feature correlations \\((\\epsilon_{\\mathrm{min}}[\\mathrm{GRAM}_{\\mathrm{val}} > \\mathrm{GRAM}_{\\mathrm{train}}] = 0.23)\\). However, the performance gap between the two settings is small." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.203, + 0.473, + 0.326 + ], + "angle": 0, + "content": "Takeaway: Clean isn't always better or possible The use of clean vs. noisy labels during label-based parameter tuning is an important consideration. For distance-based methods which compute class-wise statistics, it appears that quantity often trumps quality, even when over \\(30\\%\\) of training labels are incorrect. This is promising for applications where a clean validation set is not available (e.g. medical imaging where labels are inherently subjective [28])." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.352, + 0.392, + 0.369 + ], + "angle": 0, + "content": "6.5. What about a more realistic setting?" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.381, + 0.473, + 0.79 + ], + "angle": 0, + "content": "We have thus far studied OOD detection in a simple (but standard [67]) setting where the base classifier is trained from scratch, and where there is strong semantic and covariate shift between ID and OOD images. Yet in practice, pre-training is widely adopted, and distribution shifts may be much more subtle. We therefore extend our study of label noise to fine-grained semantic shift detection with a base classifier that has been pre-trained on ImageNet [8] before being trained on a dataset of interest. We follow the Semantic Shift Benchmark (SSB), where the goal is to detect unknown classes from a known dataset (e.g. held-out bird species from the CUB [61] dataset or held-out aircraft model variants from FGVC-Aircraft [38]). Using SSB splits, we train ResNet50s (pre-trained) on half of the classes from CUB/FGVC-Aircraft (448x448 images), and we evaluate post-hoc OOD detection performance on known classes from the test set (ID) vs. the remaining unseen classes (OOD) split into 3 increasingly difficult sets. Since clean vs. real noisy label pairs are not available, we inject synthetic label noise in the training set (SU noise model) and follow the same evaluation procedure as in previous sections. Fig. 7 summarizes its detrimental effect on fine-grained semantic shift detection across the 20 studied OOD detection methods: increasing label noise and \"difficulty\" of the OOD set act as orthogonal bottle-necks to detection performance. Increased label noise pulls AUROC\\(_{\\text{ID vs. OOD}}\\) and AUROC\\(_{\\text{incorrect vs. OOD}}\\) to \\(50\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.472, + 0.903 + ], + "angle": 0, + "content": "Takeaway: Limitations of post-hoc OOD detectors extend beyond toy settings Even in a more realistic setting where the base classifier has first been pre-trained on ImageNet and OOD samples are similar in appearance to the ID dataset, all 20 methods poorly separate incorrectly classified ID samples from OOD samples, and degrade when the classifier has been trained on noisy labels." + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.088, + 0.895, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.29, + 0.895, + 0.348 + ], + "angle": 0, + "content": "Figure 7. Each boxplot shows the performance distribution across 6 classifiers (3 seeds, 2 checkpoints) \\(\\times\\) 20 post-hoc methods, considering all ID test samples (top) or only incorrectly classified ones (bottom) in the AUROC metric." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.371, + 0.634, + 0.389 + ], + "angle": 0, + "content": "7. Zooming out" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.397, + 0.895, + 0.623 + ], + "angle": 0, + "content": "Study limitations and possible extensions We have focused on post-hoc OOD detection methods due to their pragmatic appeal and to maintain experimental feasibility. Extending this study to training-based OOD detection methods [71] would of course be valuable. Aligning with OOD benchmarks [67], we also trained the base classifiers with a standard discriminative objective. Alternative supervision schemes may also be considered, and the effect of pre-training (and on what?) would be interesting to further analyse in a label noise setting, as it been shown to improve post-hoc OOD detection performance [2, 20, 33]. Lastly, the potential of noisy label removal [29, 43] or noise-robust learning [27, 63] techniques from the label noise literature (designed with classification performance in mind) for improving OOD detection would be a natural next step." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.624, + 0.896, + 0.79 + ], + "angle": 0, + "content": "Conclusion We have explored the intersection between classification label noise and OOD detection, and conducted extensive experiments to extract new insights into the limitations of existing post-hoc methods. Our findings also echo the need to re-think the aims and evaluation of OOD detection in the context of safe deployment [26] (e.g. do we really want to exclude ID misclassifications from detection?). We hope that this work paves the way for future investigations which prioritize the robustness and applicability of OOD detection models in practical, imperfect classification scenarios which account for data uncertainty." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.803, + 0.688, + 0.82 + ], + "angle": 0, + "content": "8. Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.828, + 0.895, + 0.89 + ], + "angle": 0, + "content": "This work was supported by the Danish Data Science Academy, which is funded by the Novo Nordisk Foundation (NNF21SA0069429) and VILLUM FONDEN (40516). Thanks to the Pioneer Centre for AI (DNRF grant P1)." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "22633" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.468, + 0.156 + ], + "angle": 0, + "content": "[1] Gorkem Algan and Ilkay Ulusoy. Image classification with deep learning in the presence of noisy labels: A survey. Knowledge-Based Systems, 215:106771, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.468, + 0.213 + ], + "angle": 0, + "content": "[2] Anders Johan Andreassen, Yasaman Bahri, Behnam Neyshabur, and Rebecca Roelofs. The evolution of out-of-distribution robustness throughout fine-tuning. Transactions on Machine Learning Research, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.217, + 0.468, + 0.27 + ], + "angle": 0, + "content": "[3] Abhijit Bendale and Terrance E. Boult. Towards open set deep networks. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1563-1572, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.273, + 0.468, + 0.314 + ], + "angle": 0, + "content": "[4] Julian Bitterwolf, Maximilian Mueller, and Matthias Hein. In or out? fixing imagenet out-of-distribution detection evaluation. In ICML, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.317, + 0.468, + 0.37 + ], + "angle": 0, + "content": "[5] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 – mining discriminative components with random forests. In European Conference on Computer Vision, 2014. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.374, + 0.468, + 0.429 + ], + "angle": 0, + "content": "[6] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 3606-3613, 2014. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.432, + 0.468, + 0.487 + ], + "angle": 0, + "content": "[7] Eustasio Del Barrio, Juan A Cuesta-Albertos, and Carlos Matrán. An optimal transportation approach for assessing almost stochastic order. In The Mathematics of the Uncertain, pages 33-44. Springer, 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.489, + 0.468, + 0.543 + ], + "angle": 0, + "content": "[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, 2009. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.546, + 0.468, + 0.587 + ], + "angle": 0, + "content": "[9] Li Deng. The mnist database of handwritten digit images for machine learning research [best of the web]. IEEE Signal Processing Magazine, 29(6):141-142, 2012. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.589, + 0.468, + 0.644 + ], + "angle": 0, + "content": "[10] Andrija Djurisic, Nebojsa Bozanic, Arjun Ashok, and Rosanne Liu. Extremely simple activation shaping for out-of-distribution detection. In The Eleventh International Conference on Learning Representations, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.647, + 0.468, + 0.729 + ], + "angle": 0, + "content": "[11] Rotem Dror, Segev Shlomov, and Roi Reichart. Deep dominance - how to properly compare deep neural models. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28-August 2, 2019, Volume 1: Long Papers, pages 2773- 2785. Association for Computational Linguistics, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.732, + 0.468, + 0.772 + ], + "angle": 0, + "content": "[12] Stanislav Fort, Jie Ren, and Balaji Lakshminarayanan. Exploring the limits of out-of-distribution detection. In Advances in Neural Information Processing Systems, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.468, + 0.828 + ], + "angle": 0, + "content": "[13] Benoit Frenay and Michel Verleysen. Classification in the presence of label noise: A survey. IEEE Transactions on Neural Networks and Learning Systems, 25(5):845-869, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.468, + 0.899 + ], + "angle": 0, + "content": "[14] Ido Galil, Mohammed Dabbah, and Ran El-Yaniv. A framework for benchmarking class-out-of-distribution detection and its application toImagenet. In The Eleventh International Conference on Learning Representations, 2023. 1, 2, 3, 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.468, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[15] Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q. Weinberger. On calibration of modern neural networks. In Proceedings of the 34th International Conference on Machine Learning, pages 1321-1330. PMLR, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.151, + 0.892, + 0.191 + ], + "angle": 0, + "content": "[16] Ali Hassani, Steven Walton, Nikhil Shah, Abulikemu Abuduweili, Jiachen Li, and Humphrey Shi. Escaping the big data paradigm with compact transformers. 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.194, + 0.892, + 0.247 + ], + "angle": 0, + "content": "[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.25, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[18] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. Eurosat: A novel dataset and deep learning benchmark for land use and land cover classification. IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.322, + 0.892, + 0.389 + ], + "angle": 0, + "content": "[19] Dan Hendrycks and Kevin Gimpel. A baseline for detecting misclassified and out-of-distribution examples in neural networks. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net, 2017. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.392, + 0.892, + 0.446 + ], + "angle": 0, + "content": "[20] Dan Hendrycks, Kimin Lee, and Mantas Mazeika. Using pre-training can improve model robustness and uncertainty. In Proceedings of the 36th International Conference on Machine Learning, pages 2712-2721. PMLR, 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.449, + 0.892, + 0.529 + ], + "angle": 0, + "content": "[21] Dan Hendrycks, Steven Basart, Mantas Mazeika, Andy Zou, Joseph Kwon, Mohammadreza Mostajabi, Jacob Steinhardt, and Dawn Song. Scaling out-of-distribution detection for real-world settings. In Proceedings of the 39th International Conference on Machine Learning, pages 8759-8773. PMLR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.533, + 0.892, + 0.587 + ], + "angle": 0, + "content": "[22] Dan Hendrycks, Steven Basart, Mantas Mazeika, Andy Zou, Joe Kwon, Mohammadreza Mostajabi, Jacob Steinhardt, and Dawn Song. Scaling out-of-distribution detection for real-world settings. ICML, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.591, + 0.892, + 0.658 + ], + "angle": 0, + "content": "[23] R. Huang and Y. Li. Mos: Towards scaling out-of-distribution detection for large semantic space. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8706-8715, Los Alamitos, CA, USA, 2021. IEEE Computer Society. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.661, + 0.892, + 0.715 + ], + "angle": 0, + "content": "[24] Rui Huang, Andrew Geng, and Yixuan Li. On the importance of gradients for detecting distributional shifts in the wild. In Advances in Neural Information Processing Systems, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.718, + 0.892, + 0.798 + ], + "angle": 0, + "content": "[25] Galadrielle Humblot-Renaux, Sergio Escalera, and Thomas B. Moeslund. Beyond auroc & co. for evaluating out-of-distribution detection performance. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 3881-3890, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.803, + 0.892, + 0.841 + ], + "angle": 0, + "content": "[26] Paul F Jaeger et al. A call to reflect on evaluation practices for failure detection in image classification. In ICLR, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[27] Lu Jiang, Di Huang, Mason Liu, and Weilong Yang. Beyond synthetic noise: Deep learning on controlled noisy labels. In Proceedings of the 37th International Conference on Machine Learning, pages 4804-4815. PMLR, 2020. 8" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22634" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[28] Davood Karimi, Haoran Dou, Simon K. Warfield, and Ali Gholipour. Deep learning with noisy labels: Exploring techniques and remedies in medical image analysis. Medical Image Analysis, 65:101759, 2020. 1, 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.148, + 0.47, + 0.204 + ], + "angle": 0, + "content": "[29] Taehyeon Kim, Jongwoo Ko, sangwook Cho, JinHwan Choi, and Se-Young Yun. Fine samples for learning with noisy labels. In Advances in Neural Information Processing Systems, pages 24137–24149. Curran Associates, Inc., 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.205, + 0.471, + 0.273 + ], + "angle": 0, + "content": "[30] Konstantin Kirchheim, Marco Filax, and Frank Ortmeier. Pytorch-ood: A library for out-of-distribution detection based on pytorch. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 4351-4360, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.274, + 0.471, + 0.315 + ], + "angle": 0, + "content": "[31] Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. Technical Report 0, University of Toronto, Toronto, Ontario, 2009. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.316, + 0.471, + 0.383 + ], + "angle": 0, + "content": "[32] Kimin Lee, Kibok Lee, Honglak Lee, and Jinwoo Shin. A simple unified framework for detecting out-of-distribution samples and adversarial attacks. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2018. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.385, + 0.471, + 0.455 + ], + "angle": 0, + "content": "[33] Jingyao Li, Pengguang Chen, Zexin He, Shaozuo Yu, Shu Liu, and Jiaya Jia. Rethinking out-of-distribution (ood) detection: Masked image modeling is all you need. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11578-11589, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.455, + 0.471, + 0.537 + ], + "angle": 0, + "content": "[34] Mingchen Li, Mahdi Soltanolkotabi, and Samet Oymak. Gradient descent with early stopping is provably robust to label noise for overparameterized neural networks. In Proceedings of the Twenty Third International Conference on Artificial Intelligence and Statistics, pages 4313-4324. PMLR, 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.538, + 0.471, + 0.593 + ], + "angle": 0, + "content": "[35] Shiyu Liang, Yixuan Li, and R. Srikant. Enhancing the reliability of out-of-distribution image detection in neural networks. In International Conference on Learning Representations, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.594, + 0.471, + 0.649 + ], + "angle": 0, + "content": "[36] Sheng Liu, Jonathan Niles-Weed, Narges Razavian, and Carlos Fernandez-Granda. Early-learning regularization prevents memorization of noisy labels. Advances in Neural Information Processing Systems, 33, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.65, + 0.471, + 0.704 + ], + "angle": 0, + "content": "[37] Weitang Liu, Xiaoyun Wang, John Owens, and Yixuan Li. Energy-based out-of-distribution detection. In Advances in Neural Information Processing Systems, pages 21464-21475. Curran Associates, Inc., 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.471, + 0.747 + ], + "angle": 0, + "content": "[38] S. Maji, J. Kannala, E. Rahtu, M. Blaschko, and A. Vedaldi. Fine-grained visual classification of aircraft. Technical report, 2013. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.748, + 0.471, + 0.816 + ], + "angle": 0, + "content": "[39] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Bo Wu, and Andrew Y. Ng. Reading Digits in Natural Images with Unsupervised Feature Learning. In NIPS Workshop on Deep Learning and Unsupervised Feature Learning 2011, 2011. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.471, + 0.872 + ], + "angle": 0, + "content": "[40] Amanda Olmin and Fredrik Lindsten. Robustness and reliability when training with noisy labels. In Proceedings of The 25th International Conference on Artificial Intelligence and Statistics, pages 922–942. PMLR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.471, + 0.901 + ], + "angle": 0, + "content": "[41] Diane Oyen, Michal Kucer, Nick Hengartner, and Har Simrat Singh. Robustness to label noise depends on the shape of" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "the noise distribution. In Advances in Neural Information Processing Systems, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.894, + 0.191 + ], + "angle": 0, + "content": "[42] Chao Pan, Bo Yuan, Wei Zhou, and Xin Yao. Towards robust uncertainty estimation in the presence of noisy labels. In Artificial Neural Networks and Machine Learning - ICANN 2022, pages 673-684, Cham, 2022. Springer International Publishing. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.894, + 0.261 + ], + "angle": 0, + "content": "[43] Geoff Pleiss, Tianyi Zhang, Ethan Elenberg, and Kilian Q Weinberger. Identifying mislabeled data using the area under the margin ranking. In Advances in Neural Information Processing Systems, pages 17044-17056. Curran Associates, Inc., 2020. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.262, + 0.894, + 0.317 + ], + "angle": 0, + "content": "[44] Jie Ren, Stanislav Fort, Jeremiah Liu, Abhijit Guha Roy, Shreyas Padhy, and Balaji Lakshminarayanan. A simple fix to mahalanobis distance for improving near-ood detection, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.319, + 0.894, + 0.36 + ], + "angle": 0, + "content": "[45] David Rolnick, Andreas Veit, Serge Belongie, and Nir Shavit. Deep learning is robust to massive label noise, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.362, + 0.894, + 0.445 + ], + "angle": 0, + "content": "[46] Ragav Sachdeva, Filipe R. Cordeiro, Vasileios Belagiannis, Ian Reid, and Gustavo Carneiro. Evidentialmix: Learning with combined open-set and closed-set noisy labels. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 3607-3615, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.447, + 0.894, + 0.53 + ], + "angle": 0, + "content": "[47] Mohammadreza Salehi, Hossein Mirzaei, Dan Hendrycks, Yixuan Li, Mohammad Hossein Rohban, and Mohammad Sabokrou. A unified survey on anomaly, novelty, open-set, and out of-distribution detection: Solutions and future challenges. Transactions on Machine Learning Research, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.531, + 0.894, + 0.587 + ], + "angle": 0, + "content": "[48] Chandramouli Shama Sastry and Sageev Oore. Detecting out-of-distribution examples with Gram matrices. In Proceedings of the 37th International Conference on Machine Learning, pages 8491-8501. PMLR, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.589, + 0.894, + 0.672 + ], + "angle": 0, + "content": "[49] Vikash Sehwag, Arjun Nitin Bhagoji, Liwei Song, Chawin Sitawarin, Daniel Cullina, Mung Chiang, and Prateek Mittal. Analyzing the robustness of open-world machine learning. In Proceedings of the 12th ACM Workshop on Artificial Intelligence and Security, page 105–116, New York, NY, USA, 2019. Association for Computing Machinery. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.673, + 0.894, + 0.714 + ], + "angle": 0, + "content": "[50] Hwanjun Song, Minseok Kim, and Jae-Gil Lee. SELFIE: Refurbishing unclean samples for robust deep learning. In ICML, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.716, + 0.894, + 0.771 + ], + "angle": 0, + "content": "[51] Hwanjun Song, Minseok Kim, Dongmin Park, Yooju Shin, and Jae-Gil Lee. Learning from noisy labels with deep neural networks: A survey. IEEE Transactions on Neural Networks and Learning Systems, pages 1-19, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.772, + 0.894, + 0.828 + ], + "angle": 0, + "content": "[52] Hyun Oh Song, Yu Xiang, Stefanie Jegelka, and Silvio Savarese. Deep metric learning via lifted structured feature embedding. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.829, + 0.894, + 0.872 + ], + "angle": 0, + "content": "[53] Yue Song, Nicu Sebe, and Wei Wang. Rankfeat: Rank-1 feature removal for out-of-distribution detection. In Advances in Neural Information Processing Systems, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[54] Alexander Sorokin and David Forsyth. Utility data annotation with amazon mechanical turk. In 2008 IEEE Computer" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "22635" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "Society Conference on Computer Vision and Pattern Recognition Workshops, pages 1-8, 2008. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[55] Yiyou Sun and Yixuan Li. Dice: Leveraging sparsification for out-of-distribution detection. In Computer Vision – ECCV 2022, pages 691–708, Cham, 2022. Springer Nature Switzerland. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.469, + 0.233 + ], + "angle": 0, + "content": "[56] Yiyou Sun, Chuan Guo, and Yixuan Li. React: Out-of-distribution detection with rectified activations. In Advances in Neural Information Processing Systems, pages 144–157. Curran Associates, Inc., 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.236, + 0.469, + 0.291 + ], + "angle": 0, + "content": "[57] Yiyou Sun, Yifei Ming, Xiaojin Zhu, and Yixuan Li. Out-of-distribution detection with deep nearest neighbors. In Proceedings of the 39th International Conference on Machine Learning, pages 20827-20840. PMLR, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.293, + 0.469, + 0.375 + ], + "angle": 0, + "content": "[58] Ilya Tolstikhin, Neil Houlsby, Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Thomas Unterthiner, Jessica Yung, Andreas Peter Steiner, Daniel Keysers, Jakob Uszkoreit, Mario Lucic, and Alexey Dosovitskiy. MLP-mixer: An allMLP architecture for vision. In Advances in Neural Information Processing Systems, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.377, + 0.469, + 0.432 + ], + "angle": 0, + "content": "[59] Dennis Ulmer, Christian Hardmeier, and Jes Frellsen. deepsignificance-easy and meaningful statistical significance testing in the age of neural networks. arXiv preprint arXiv:2204.06815, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.434, + 0.469, + 0.489 + ], + "angle": 0, + "content": "[60] Sagar Vaze, Kai Han, Andrea Vedaldi, and Andrew Zisserman. Open-set recognition: A good closed-set classifier is all you need. In International Conference on Learning Representations, 2022. 1, 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.491, + 0.469, + 0.546 + ], + "angle": 0, + "content": "[61] C. Wah, S. Branson, P. Welinder, P. Perona, and S. Belongie. The caltech-ucsd birds-200-2011 dataset. Technical Report CNS-TR-2011-001, California Institute of Technology, 2011. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.548, + 0.469, + 0.617 + ], + "angle": 0, + "content": "[62] H. Wang, Z. Li, L. Feng, and W. Zhang. Vim: Out-of-distribution with virtual-logit matching. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4911-4920, Los Alamitos, CA, USA, 2022. IEEE Computer Society. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.619, + 0.469, + 0.687 + ], + "angle": 0, + "content": "[63] Hongxin Wei, Huiping Zhuang, Renchunzi Xie, Lei Feng, Gang Niu, Bo An, and Yixuan Li. Mitigating memorization of noisy labels by clipping the model prediction. In Proceedings of the 40th International Conference on Machine Learning. JMLR.org, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.689, + 0.469, + 0.757 + ], + "angle": 0, + "content": "[64] Jiaheng Wei, Zhaowei Zhu, Hao Cheng, Tongliang Liu, Gang Niu, and Yang Liu. Learning with noisy labels revisited: A study using real-world human annotations. In International Conference on Learning Representations, 2022. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.759, + 0.469, + 0.815 + ], + "angle": 0, + "content": "[65] Tong Xiao, Tian Xia, Yi Yang, Chang Huang, and Xiaogang Wang. Learning from massive noisy labeled data for image classification. In 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2691-2699, 2015. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.469, + 0.858 + ], + "angle": 0, + "content": "[66] Jingkang Yang, Kaiyang Zhou, Yixuan Li, and Ziwei Liu. Generalized out-of-distribution detection: A survey. arXiv preprint arXiv:2110.11334, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.469, + 0.902 + ], + "angle": 0, + "content": "[67] Jingkang Yang, Pengyun Wang, Dejian Zou, Zitang Zhou, Kunyuan Ding, WENXUAN PENG, Haoqi Wang, Guangyao Chen, Bo Li, Yiyou Sun, Xuefeng Du, Kaiyang" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.161 + ], + "angle": 0, + "content": "Zhou, Wayne Zhang, Dan Hendrycks, Yixuan Li, and Zwei Liu. OpenOOD: Benchmarking generalized out-of-distribution detection. In Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2022. 1, 2, 3, 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.892, + 0.218 + ], + "angle": 0, + "content": "[68] Xiaoyong Yuan, Pan He, Qile Zhu, and Xiaolin Li. Adversarial examples: Attacks and defenses for deep learning. IEEE Transactions on Neural Networks and Learning Systems, 30 (9):2805-2824, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[69] Chiyuan Zhang, Samy Bengio, Moritz Hardt, Benjamin Recht, and Oriol Vinyals. Understanding deep learning requires rethinking generalization. In International Conference on Learning Representations, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.277, + 0.892, + 0.358 + ], + "angle": 0, + "content": "[70] Jinsong Zhang, Qiang Fu, Xu Chen, Lun Du, Zelin Li, Gang Wang, xiaoguang Liu, Shi Han, and Dongmei Zhang. Out-of-distribution detection based on in-distribution data patterns memorization with modern hopfield energy. In The Eleventh International Conference on Learning Representations, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.361, + 0.892, + 0.442 + ], + "angle": 0, + "content": "[71] Jingyang Zhang, Jingkang Yang, Pengyun Wang, Haoqi Wang, Yueqian Lin, Haoran Zhang, Yiyou Sun, Xuefeng Du, Kaiyang Zhou, Wayne Zhang, Yixuan Li, Ziwei Liu, Yiran Chen, and Hai Li. Openood v1.5: Enhanced benchmark for out-of-distribution detection. arXiv preprint arXiv:2306.09301, 2023.8" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22636" + } + ] +] \ No newline at end of file diff --git a/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/8eab7491-89d9-4dd1-8ee1-40b9bd851b01_origin.pdf b/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/8eab7491-89d9-4dd1-8ee1-40b9bd851b01_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..23d3225e20ef3785394a22063e1216d51bd3098b --- /dev/null +++ b/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/8eab7491-89d9-4dd1-8ee1-40b9bd851b01_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:945c811e8e1bd168800ac24095e4185d8d8cc1b19b4bb87d3f71af28ee0a59a6 +size 2248008 diff --git a/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/full.md b/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/full.md new file mode 100644 index 0000000000000000000000000000000000000000..49d771437260090a561fab932e2be53eb569440e --- /dev/null +++ b/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/full.md @@ -0,0 +1,270 @@ +# A noisy elephant in the room: Is your out-of-distribution detector robust to label noise? + +Galadrielle Humblot-Renaux $^{1}$ Sergio Escalera $^{1,2}$ Thomas B. Moeslund $^{1}$ + +Visual Analysis and Perception lab, Aalborg University, Denmark + +$^{2}$ Department of Mathematics and Informatics, University of Barcelona and Computer Vision Center, Spain + +gegeh@create.aau.dk sescalera@ub.edu tbm@create.aau.dk + +# Abstract + +The ability to detect unfamiliar or unexpected images is essential for safe deployment of computer vision systems. In the context of classification, the task of detecting images outside of a model's training domain is known as out-of-distribution (OOD) detection. While there has been a growing research interest in developing post-hoc OOD detection methods, there has been comparably little discussion around how these methods perform when the underlying classifier is not trained on a clean, carefully curated dataset. In this work, we take a closer look at 20 state-of-the-art OOD detection methods in the (more realistic) scenario where the labels used to train the underlying classifier are unreliable (e.g. crowd-sourced or web-scraped labels). Extensive experiments across different datasets, noise types & levels, architectures and checkpointing strategies provide insights into the effect of class label noise on OOD detection, and show that poor separation between incorrectly classified ID samples vs. OOD samples is an overlooked yet important limitation of existing methods. Code: https://github.com/glhr/ood-labelnoise + +# 1. Introduction + +In many real-world applications where deep neural networks are deployed "in the wild", it is desirable to have models that not only correctly classify samples drawn from the distribution of labeled data but also flag unexpected inputs as out-of-distribution (OOD). This has motivated the development of a wide range of OOD detection methods and benchmarks for computer vision [47, 66]. In particular, post-hoc OOD detection methods have shown wide appeal: compared to training-based methods, post-hoc OOD detectors can be applied on top of existing image classifiers without the need for re-training, have little to no architecture constraints, do not compromise classification performance, and achieve strong performance in large-scale settings [67]. + +Existing OOD benchmarks place significant emphasis on carefully designing the selection of OOD datasets used + +![](images/0afe293cd79a0a438a21007b19d3aa6cf5c9549034d9328e62b1bf42b9aafe8c.jpg) +Figure 1. Can state-of-the-art OOD detectors tell incorrectly classified ID images apart from OOD inputs? Not really. Here we compare their performance across 396 trained classifiers. + +for evaluation [4, 14, 60, 67]. In contrast, the role of the in-distribution (ID) dataset used for training the underlying classifier is seldom discussed. Among the most popular choices of ID dataset are MNIST, CIFAR10, CIFAR100 and ImageNet [30, 67] - all of which have been carefully curated and reliably annotated. Yet, in practice, the collection of labelled datasets involves a trade-off between acquisition time/cost and annotation quality - human inattention, mis-clicking, limited expertise, crowd-sourcing, automated annotation, and other cost-saving measures inevitably introduce labelling errors [54]. Besides, some images are inherently ambiguous to label even for the most knowledgeable and careful of annotators [28]. Considering how pervasive the problem of label noise is in real-world image classification datasets, its effect on OOD detection is crucial to study. + +To address this gap, we systematically analyse the label noise robustness of a wide range of OOD detectors, ranging from the widely adopted Maximum Softmax Probability (MSP) baseline [14, 19], to distance-based methods operating in feature space [32, 57], to more recent, complex methods such as SHE [70] and ASH [10]. In particular: + +1. We present the first study of post-hoc OOD detection in the presence of noisy classification labels, examining the performance of 20 state-of-the-art methods under different types and levels of label noise in the training data. Our study includes multiple classification architectures + +and datasets, ranging from the beloved CIFAR10 to the more difficult Clothing1M, and shows that even at a low noise rate, the label noise setting poses an interesting challenge for many methods. + +2. We revisit the notion that OOD detection performance correlates with ID accuracy [14, 60], examining when and why this relation holds. Robustness to inaccurate classification requires that OOD detectors effectively separate mistakes on ID data from OOD samples - yet most existing methods confound the two (Figure 1). +3. Our analysis includes key takeaways and recommendations for future evaluation and development of OOD detection methods considering an unreliable label setting. + +# 2. Problem set-up + +In this work, we tackle the question: what happens when post-hoc OOD detectors are applied on top of a classifier trained with unreliable labels - a common setting in practice? We introduce the main relevant concepts below. + +Classifier We study OOD detection in the context of supervised image classification, where a discriminative model $h: \mathcal{X} \to \mathcal{Y}$ is trained on a dataset of $N$ labelled examples $D_{train} = \{(x_i, y_i)\}_{i=1}^N \in \mathcal{X} \times \mathcal{Y}$ , where each $x$ is an input image and each $y$ is the corresponding class from the label space $\mathcal{Y}$ . A common choice would be CIFAR10 [31]. $P_{train}(X, Y)$ defines the underlying training data distribution. The classifier is evaluated on a test set $D_{test}$ drawn from the same distribution $P_{test}(X, Y) = P_{train}(X, Y)$ . + +OD detector Post-hoc OOD detection equips the trained classifier $h$ with a scoring function $o: X \to \mathbb{R}$ aiming to distinguish usual examples drawn from $P_{test}(X)$ (ID samples) and anomalous (OOD) examples drawn from a disjoint, held-out distribution $P_{out}(X)$ . In practice, a collection of auxiliary datasets with minimal semantic overlap (e.g. CIFAR10 → SVHN [39]) is commonly used for evaluation [67]. Ideally, the score assigned to ID samples should be consistently lower (or higher) than for OOD samples, such that anomalous inputs can easily be flagged. + +Label noise We consider a noisy label setting, where the classifier $h$ does not have access to the true target values $y_{i}$ during training, but rather learns from a noisy dataset $D_{\text{noisy}} = \{(x_i, \hat{y}_i)\}_{i=1}^N$ , where the target labels are corrupted: $\exists i$ such that $\hat{y}_i \neq y_i$ . In this work, we consider only closed-set label noise, where $D_{\text{noisy}} \in \mathcal{X} \times \mathcal{Y}$ (that is, the noisy labels lie in the same label space as the true labels [46]). The noise level $\epsilon$ is given by $P(y \neq \hat{y})$ , the probability that an observed label is incorrect. Common models for studying and simulating label noise are (we refer to [13] for a detailed taxonomy): + +1. Noisy Completely at Random (NCAR) or uniform label noise: labels are flipped at a constant rate $\epsilon$ , regardless of class or image. + +2. Noisy at Random (NAR) or class-conditional label noise: a constant noise rate across all images of the same class, but different classes may have different noise rates. +3. Noisy Not at Random (NNAR) or instance-dependent label noise: noisy labels are jointly determined by the true class and the associated image. + +In practice, real (as opposed to synthetically generated) label noise occurring from an imperfect annotation pipeline follows complex patterns, and is thus best represented by an instance-dependent model: some classes are more likely to be mislabeled than others, and so are some images (e.g. ambiguous or rare samples) [50, 64]. + +# 3. Related work + +Studying label noise The effect of unreliable labels on supervised learning is a well-studied problem in deep learning [51] and computer vision [1, 28], as errors or inconsistencies are a natural part of label collection in many real applications. Though increased dataset size can help [45], noisy labels degrade classification performance, especially in the later stages of training where over-parameterized models are prone to memorizing them [36, 69]. The precise effects of label noise have been shown to depend on the noise model and distribution [41]. A recent CIFAR classification benchmark suggests that models trained on real, instance-dependent noisy labels are significantly more prone to memorization than those trained on synthetic class-conditional labels with the same overall noise rate [64]. We therefore consider real noisy labels in our benchmark (stemming from human annotation error), which we compare to two sets of synthetic noisy labels (uniform and class-conditional). We also compare the effect of validation-based early stopping vs. converging on the training set. + +Effect of label noise on reliability Existing studies of label noise are largely focused on classification accuracy, and few works address the other side of the coin: reliability. We look at reliability from the angle of OOD detection performance - to the best of our knowledge, there is currently no comparable study of OOD detection under a noisy label setting. Most closely related to our work are perhaps the experiments in [42] and the analysis in [40]. [42] evaluates the effect of synthetic uniform label noise on MC-dropout and deep ensembles' uncertainty estimates, showing a significant degradation in OOD detection performance with increasing noise levels - in comparison, we study post-hoc OOD detection (with a wider variety of architectures, datasets, and methods) and consider real noisy datasets. [40] studies label noise robustness in terms of model calibration, showing that early stopping, while beneficial in terms of accuracy, offers no reliability guarantees. + +Benchmarking OOD detection robustness Previous works have investigated the limits of state-of-the-art OOD detection methods in various challenging settings, such as + +semantic similarity between ID vs. OOD classes [12, 14, 60], fine-grained ID labels [23], large-scale datasets [22] and adversarial attacks [49, 68]. In contrast, we focus on robustness to degraded classification performance on the ID dataset due to noisy labels, which to the best of our knowledge has comparably received little attention. + +Relation between ID classification and OOD detection performance In the standard clean label setting, a strong relationship between ID classification and OOD detection performance has been observed in prior work. [60] studies the relation between closed-set (ID) classification and open-set recognition performance (AUROC), and finds open-set recognition performance to be highly correlated with classification accuracy. [14] observes a similar trend for out-of-distribution detection performance across a large variety of pre-trained deep learning architectures, using the MSP as OOD score. Both works only consider clean training datasets, and a small subset of methods. We study the extent to which this relation holds across a wider range of OOD detection methods and noisy datasets, and provide a very simple explanation for why some methods like MSP reach such a high correlation. + +# 4. OOD detection methods + +We evaluate 20 post-hoc OOD detection methods from the OpenOOD benchmark [67] - currently the most comprehensive open-source benchmark available. Here we present and broadly categorize these methods based on how their scoring function is designed. + +Softmax-based OOD detection revolves around the idea that ID samples are associated with higher-confidence, lower-entropy predictions than OOD samples. The baseline Maximum Softmax Probability (MSP) [19] simply takes the Softmax "confidence" of the predicted class as OOD score. While MSP implicitly assumes a Softmax temperature of 1, TempScaling [15] treats the temperature as a hyper-parameter, softening or sharpening the Softmax probabilities (essentially modulating categorical entropy), with the aim of improving calibration. ODIN [35] combines temperature scaling with input perturbation - "pushing" the input image a little in the direction that increases the MSP. In contrast, the Generalized ENtropy (GEN) score considers the full predictive distribution and captures how much it deviates from a one-hot distribution. + +Logit-based OOD detection bypasses the squashing effect of Softmax normalization. The Maximum Logit Score (MLS) [21] directly takes the logit of the predicted class. In a similar vein, energy-based OOD detection (EBO) was first proposed in [37]: a score is derived by applying the LogSumExp function to the logits - essentially a smooth version of the MLS, with an additional temperature parameter. Several post-hoc methods using an energy score have since followed suit, proposing various modifications to the + +network [55] or features [10, 53, 56] before extracting an energy score: REACT [56] clips activations at an upper bound, RankFeat [53] subtracts the rank-1 matrix from activations, DICE [55] applies weight sparsification such that only the strongest contributors remain, and ASH [10] sparsifies activations based on a pruning percentile. + +Distance-based OOD detection aims to capture how much a test sample deviates from the ID dataset. The Mahalanobis distance score (MDS) [32] method fits a Gaussian distribution to the features of each class in the ID dataset; at test-time, the OOD score is taken as the distance to the closest class. The same authors also proposed MDSE Ensemble [32], which computes an MDS score not just from the features extracted before the final layer, but also at earlier points in the network, and aggregates them. Alternatively, the Relative Mahalanobis distance score (RMDS) [44] was proposed as a simple fix to MDS, which additionally fits a class-independent Gaussian to the entire ID dataset to compute a background score which is subtracted from the class-specific MDS score. Among other distance-based methods which rely on class-wise statistics, KLMatching (KLM) [21] takes the smallest KL Divergence between a test sample's Softmax probabilities and the mean Softmax probability vector for each ID class. OpenMax [3] operates in logit space, fitting a class-wise Weibull distribution to the distances of ID samples from the mean logit vector. Simplified Hopfield Energy (SHE) [70] computes the inner product between a test sample's features and the mean ID feature of the predicted class. GRAM [48] computes the Gram matrices of intermediate feature representations throughout the network, comparing them with the range of values observed for each class in the ID data. In contrast, deep k-nearest neighbor (KNN) [57] proposes a simple approach with no distributional assumptions - computing its score as the Euclidean distance to the closest samples from the ID set, regardless of class. Lastly, Virtual-logit Matching (VIM) [62] combines a logit energy score with a class-agnostic term capturing how features deviate from a principal subspace defined by the training set. + +Gradient-based OOD detection: GradNorm [24] is the only method in OpenOOD which directly derives its score from the gradient space, claiming that gradient magnitude is higher for ID inputs. The KL divergence between predicted Softmax probabilities and a uniform target is backpropagated to obtain gradients w.r.t the last layer parameters, followed by an $L_{1}$ norm to obtain the magnitude. + +# 5. Experiments + +We summarize our experimental set-up below, and refer to the supplementary for further details. + +ID Datasets We select popular image classification datasets from the label noise literature featuring real noisy labels alongside clean reference labels, spanning different + +
ID datasetclasses# images (train/val/test)resolutionnoise rate
[31]CIFAR-100%
CIFAR-10N-Agg1050,000/1,000/9,00032x329.01%
[64]CIFAR-10N-Rand117.23%
CIFAR-10N-Worst40.21%
[31]CIFAR-100-Fine10050,000/1,000/9,00032x320%
[64]CIFAR-100N-Fine40.20%
[31]CIFAR-100-Coarse2050,000/1,000/9,00032x320%
[64]CIFAR-100N-Coarse26.40%
[65]Clothing1M1424,637/7,465/5,395256x2560%
Clothing1M-Noisy38.26%
+ +Table 1. Dataset overview. Clean ones are shown in bold. The training set (clean or noisy labels) is used to train the classifier; the validation set (clean labels) is used for early stopping; the test set (clean labels) is used for evaluating classification and OOD detection performance. We always use clean labels for evaluation. + +input sizes, number of classes, and sources & levels of label noise - see Table 1. The recently released CIFAR-N dataset [64] provides noisy re-annotations of CIFAR-10 and CIFAR-100 collected via crowd-sourcing: each image was annotated by 3 people, and different noisy label sets were created for different label selection methods (majority voting, random selection, or worst label selection). Note that CIFAR-100-Fine and CIFAR-100-Coarse contain the same set of images - only the class definitions and labels differ. Clothing1M [65] is a large-scale dataset collected by scraping shopping websites. Although the raw Clothing1M contains over a million images, we consider the smaller subset of images for which there is both a noisy and clean label. + +Synthetic noise For each real noisy label set, using the corresponding clean labels, we additionally create 2 synthetic counterparts with the same overall noise rate: one following a uniform (NCAR, class-independent) label noise model, and the other following a class-conditional label noise model with the exact same noise transition matrix as the real noise. We name these synthetic variants SU (Synthetic Uniform noise) and SCC (Synthetic Class-Conditional noise) - for example, from CIFAR-10N-Agg we create 2 synthetic versions, SU and SCC. + +ODD Datasets For fair comparison, we use the same selection of OOD datasets for all models - the OOD datasets are therefore chosen such that there is minimal semantic overlap with any of the ID datasets. We include MNIST [9], SVHN [39], Textures [6] as they are commonly used as examples of "far"-OOD [67] (very different appearance and semantics than the ID dataset). As examples of more natural images, we also include EuroSAT-RGB [18], Food-101 [5], a sub-set of the Stanford Online Products [52], and a 12-class sub-set of ImageNet. Since some methods require an OOD validation set for hyperparameter tuning, half of these classes are randomly selected and held-out for this purpose. The other 6 ImageNet classes, and the other OOD datasets make up the OOD test set. + +Evaluation metrics We evaluate OOD detectors'abil + +ity to separate ID vs. OOD samples in terms of the Area Under the Receiver Operating Characteristic Curve (AUROC), where images from the ID test set (e.g. CIFAR10 test set) are considered positive samples, and those from the OOD test set (e.g. SVHN test set) as negatives. This is the most commonly reported metric in the literature [25], and we denote it as AUROC $_{\text{ID vs. OOD}}$ . In addition, unlike previous works, we separately measure the AUROC $_{\text{correct vs. OOD}}$ (and AUROC $_{\text{incorrect vs. OOD}}$ ), where only correctly (or incorrectly) classified samples from the ID test set are considered - ideally, performance should be high on both metrics. + +Architectures We include 3 architecture families: CNNs, MLPs and transformers. We select lightweight architectures which have shown competitive results when trained on small-scale datasets: ResNet18 [17], MLP-Mixer [58] and Compact Transformers [16]. Following the OpenOOD benchmark [67], we do not adopt any advanced training strategies besides standard data augmentation. For each training dataset, we repeat training with 3 random seeds, and save 2 model checkpoints: an early checkpoint (based on best validation accuracy) and the last checkpoint (after a pre-defined number of epochs has elapsed, allowing for convergence - differs per architecture). + +Bird's eye view To summarize, we train 3 different classifier architectures on 22 datasets (4 clean, 6 with real label noise, 12 with synthetic label noise), with 3 random seeds and 2 checkpoints saved per model - adding up to 396 distinct classifiers. On top of each classifier, 20 different OOD detection methods are applied and evaluated on 7 OOD datasets. Throughout the paper, OOD detection performance is taken as the median across the 7 OOD datasets (see the supplementary for results and a discussion of the median vs. mean OOD detection performance). + +Statistical significance tests When comparing pairs of methods or settings, we use the Almost Stochastic Order (ASO) test [7, 11] as implemented by Ulmer et al. [59]. This statistical test was specifically designed to compare deep learning models, making no distributional assumptions. We apply ASO with a significance level $\alpha = 0.05$ and report $\epsilon_{\mathrm{min}}[\mathrm{A} > \mathrm{B}]$ . If $\epsilon_{\mathrm{min}}[\mathrm{A} > \mathrm{B}] \geq 0.5$ we cannot claim that method A is better than method B; the smaller $\epsilon_{\mathrm{min}}$ , the more confident we can be that method A is superior. + +# 6. Analysis + +We explore the effect of label noise on OOD detection, starting with an overall view of performance trends in Section 6.1, then looking at OOD detection in relation to classification performance in Section 6.2, delving into what works (and what doesn't) in Section 6.3, and raising important considerations about how/whether to make use of a clean validation set in Section 6.4. Section 6.5 extends results to a more practical setting. More detailed analyses and additional supporting figures are in the supplementary. + +
training labelsCIFAR10CIFAR100-CoarseCIFAR100-FineClothing1M
AggRand1WorstcleanNSCCSUcleanNSCCSUcleanNSCCSU
methodcleanNSCCSUNSCCSUNSCCSU
GRAM94.4589.4989.1290.788.8289.1990.5288.688.7387.9882.0780.0582.179.282.9376.3180.2482.6491.0489.0794.7195.37
MDS96.0787.9392.492.9792.3789.2587.4586.7486.4989.280.0778.8982.8480.1280.174.9674.4873.4987.1290.9888.5892.38
VIM95.6589.991.8192.388.7588.684.4986.3187.2388.7584.2976.6180.047881.3775.3173.2473.9488.9983.0987.1790.14
MDSEns92.5783.8983.6283.7981.883.0680.3682.9584.0284.1179.2578.3177.4173.684.8577.4778.4379.8595.3695.4495.7895.69
KNN93.6390.0788.7590.1487.7486.6685.1186.383.7384.1484.1674.480.3975.8283.2975.6776.4871.3585.3285.584.5980.87
RMDS92.9289.3887.9488.1489.0785.7387.0484.0381.9982.3582.1475.9377.3674.8183.287675.7573.4875.8171.4378.2266.66
DICE9083.3384.1886.2488.528186.2182.7979.5879.0782.7977.6875.0170.4382.5276.5173.9268.4184.9675.7286.6982.89
ReAct90.9187.3286.6382.1689.7482.9681.9784.578.4180.1182.7973.0973.6270.3883.7673.5773.7167.5582.5773.180.2276.58
GEN91.8685.9985.4482.0889.8682.8980.8483.7581.8180.5782.6973.2571.4770.9981.3473.473.167.1183.9173.5779.7976.78
EBO91.3184.8785.7381.6289.8881.9377.8883.0481.3877.482.7472.9970.9367.8581.4173.6573.0167.4285.1976.3285.3176.64
SHE89.687.8184.3386.4886.6383.1683.0483.2480.0678.9880.4271.88070.1180.3869.6369.5666.6882.2978.0778.477.73
ODIN91.4787.7186.3182.4889.7982.2180.6884.0982.1380.0981.4273.170.8869.9783.5974.8572.1967.1683.3871.5977.7375.47
MLS91.2684.7685.5981.5788.8182.317883.5482.0180.0182.6672.9270.8569.1981.4573.6472.567.0383.372.7577.7475.54
TempScale91.6785.7685.0482.2585.0778.179.7882.8580.5180.1381.6371.6769.9469.3580.7572.6671.2866.9879.8268.7786.2874.45
ASH88.3384.7582.3781.6682.2976.4772.4885.2778.2675.4882.7871.1973.2168.4382.7474.1370.9567.4881.5374.8976.6375.74
OpenMax90.586.1283.4682.2683.0582.8779.1280.3975.6877.4181.1476.6972.6569.1780.1372.8275.667.6671.7469.2372.5574.36
MSP91.3485.1984.8782.4185.1382.2180.6882.4879.480.0980.5170.4268.8869.9779.8570.9269.6166.9277.5766.0272.4473.89
KLM90.8483.782.1581.6980.1381.8880.6474.7975.9376.8379.3770.269.2467.8179.7875.5269.8967.2577.2665.4965.2262.01
GradNorm86.2279.5577.177.8481.6679.8876.9684.1172.9672.8869.2966.2773.1767.4470.9565.6571.2663.2979.7175.3273.9377.52
RankFeat81.8383.5375.8673.1278.2975.0779.1482.5777.2575.7573.1564.669.5465.4468.7776.0770.2467.6469.1675.8569.9373.37
+ +Table 2. Best-case OOD detection performance (AUROCID vs. OOD in %) per method (that is, after selecting the best architecture-seed-checkpoint combination for each training label set). N, SCC, and SU refer to the real and synthetic noisy label sets described in Section 5. The top-3 for each training dataset are highlighted in bold, and the top-1 is underlined. In red are scores $< {75}\%$ and in orange scores between 75 and 80%. Rows are sorted based on the total performance across columns. + +![](images/896adb955c27ac2a2771da422929e2ef9e25fc6813e3812b4ebfdb9c98d2b316.jpg) +Figure 2. Distribution of OOD detection performance across methods & models when training the classifier on different label sets. + +# 6.1. Where there's noise there's trouble + +Figure 2 gives an overview of OOD detection performance for different training datasets and label noise settings. We see a clear drop in overall OOD detection performance when label noise is introduced in the training dataset, compared to training on a cleanly labelled dataset (green). Even with only $9\%$ of incorrect CIFAR10 labels (CIFAR-Agg labels sets), the median $\mathrm{AUROC}_{\mathrm{IDvs.OOD}}$ across all models drops by over $5\%$ . In Table 2, for each method we report the best-case OOD detection performance for a given training label set. While most methods are able to reach $80\%$ $\mathrm{AUROC}_{\mathrm{IDvs.OOD}}$ with a classifier trained on clean labels, the number of competitive methods falls with increasing label noise, especially at noise rates $>20\%$ . GRAM, KNN, MDS, MDSEsemble and VIM are the only methods able to reach $90+\%$ AUROC on at least one of the noisy datasets. Takeaway: Enter the elephant Label noise in the classifier's training data makes it more difficult for post-hoc OOD detection methods to flag unfamiliar samples at test-time, even in small-scale settings like CIFAR10. + +# 6.2. Does accuracy tell the whole story? + +The most obvious effect of label noise in the training data is a decrease in classification performance on ID test data. At + +the same time, previous works have remarked a strong relation between classification performance and OOD detection for popular post-hoc methods like MSP [14] and MLS [60]. We dig deeper. When does this relation hold and why? + +For which methods does this relation hold? In Figure 3, we quantify the relationship between ID accuracy and $\mathrm{AUROC}_{\mathrm{IDvs.OD}}$ in terms of Spearman correlation $\rho$ . We find that correlation varies widely across methods, being the strongest for MSP, and is generally weaker for those which operate earlier in the network. We also note that for all methods except KNN and RMDS, the label noise setting makes OOD detection performance less predictable - and so does early stopping (cf. Section 6.4). This points to the distribution of ID scores playing an important role in OOD detection performance. + +When it does - why? We provide a simple observation which is lacking in prior work: methods whose OOD detection performance predictably degrades along- + +![](images/b1d33226e2a35de3fd1a6f3f6707c35b2922d9aff889460e23b1678bfd92f613.jpg) +Figure 3. Does OOD detection performance (AUROC $_{\text{ID vs. OOD}}$ ) correlate with ID classification performance (accuracy)? We measure the rank correlation across different architectures, seeds, checkpoints, and datasets for different label sets. All results shown here are statistically significant ( $p < 0.001$ ). + +![](images/45c2dac7ad1409de0c8224e809bab08eefc1309e0476a4fd22144b8ae4b1aabc.jpg) +Figure 4. Relationship between ID classification performance and OOD detection performance, considering all ID test samples (top) or only incorrectly classified ones (bottom) in the AUROC metric. Each point corresponds to a single model. + +side classification accuracy are characterized by a high $\mathrm{AUROC}_{\mathrm{correct}}$ vs. OOD and a low $\mathrm{AUROC}_{\mathrm{incorrect}}$ vs. OOD. On clean, easy datasets like CIFAR10, they exhibit strong OOD detection performance as there are few incorrectly predicted ID samples in the test set (thus the $\mathrm{AUROC}_{\mathrm{incorrect}}$ vs. OOD term is negligible in the overall $\mathrm{AUROC}_{\mathrm{ID}}$ vs. OOD) - however, when the number of incorrect prediction grows, the low $\mathrm{AUROC}_{\mathrm{incorrect}}$ vs. OOD becomes a more significant factor. Importantly and as exemplified by Figure 4, for all methods, $\mathrm{AUROC}_{\mathrm{incorrect}}$ vs. OOD is not (or only weakly, $\rho < 0.2$ ) correlated with classification accuracy. MSP is the most clear-cut example, with a median $\mathrm{AUROC}_{\mathrm{incorrect}}$ vs. OOD of around 0.5 across all dataset-architecture-seed-checkpoint combinations (bottom left of Figure 4) - that is, MSP often is no better (or worse) than a random detector at separating ID mistakes and OOD inputs, no matter how accurate the underlying classifier is. The Top-4 methods in Table 2 are the only ones with a median $\mathrm{AUROC}_{\mathrm{incorrect}}$ vs. OOD $\geq 0.6$ - none of the other methods exceed a median $\mathrm{AUROC}_{\mathrm{incorrect}}$ vs. OOD of 0.55 - see Figure 1. + +Takeaway: Would your OOD detector be better off as a failure detector? Accuracy correlating with OOD detection performance is partly symptomatic of many seemingly effective methods being unable to separate incorrectly classified ID samples from OOD samples - a bottleneck for robustness to imperfect classification. Claims that post-hoc OOD detection can be improved by simply improving the underlying classifier [60] overlook this fundamental issue. + +It's not just about the noise rate We find that for a fixed noise rate in a given dataset, different types/models of label noise yield comparable classification accuracy ( $\epsilon_{\mathrm{min}} \geq 0.5$ for all pair-wise comparisons), yet have different effects on OOD performance. Indeed, real label noise is better handled than the same level of synthetic by most methods, with SU labels being the most challenging - this trend is clear in Figure 2. Figure 5 shows an example of how different noise types and checkpointing strategies shape the magnitude and spread of logits. Intuitively, when the noise is spread randomly across samples (SU noise model), it is more difficult to learn which kinds of images or classes to be uncer + +![](images/14c178369c4c0362f36de7e815ae9f15ffca1efd88177ac79bc982c814ed471d.jpg) +Figure 5. Max Logit ID and OOD score statistics across models trained on Clothing1M, for different noise types & checkpointing. + +tain about, leading to consistently lower-confidence predictions across all ID samples (low median, low spread). Conversely, when label noise is more concentrated for certain classes (SCC) and/or for certain features (real noise), the classifier can learn to be more confident in some parts of the input space than others (higher median, higher spread). + +Takeaway: Faking it is better than ignoring it Uniform (synthetic) label noise in the training data tends to degrade OOD detection more strongly than class-dependant (synthetic) and instance-dependent (real) label noise. We encourage the use of synthetic uniform labels to evaluate the worst-case performance of OOD detectors, as they can be easily generated for any image classification dataset. + +# 6.3. Design features which hurt or help + +Why are the winners the best? In terms of design features, the methods with the strongest performance in a label noise setting have a distance-based scoring function, and take features as input rather than class probabilities. SHE is the only OOD detector satisfying both criteria which doesn't sit at the top of the pile in Table 2 - we attribute its lower performance to two factors: it summarizes the ID dataset only with class-wise means (which may be overly reductive in a label noise setting where variance is larger), and it only considers correctly predicted samples when computing them (which may be small in number if the classifier is inaccurate or the number of classes is high). In contrast, GRAM which includes higher-order raw moments to describe ID data statistics is the top-1 method in Table 2. In the comparison of Figure 6, GRAM and MDSEnsemble - the only methods in our benchmark which incorporate features at different depths in the network - stand out as having the "flattest" accuracy-AUROC curves, which is especially beneficial when the training dataset is inherently difficult (e.g. CIFAR100 due to fine-grained labels or Clothing1M due to the image complexity and diversity). However, we note that the performance of MDSEnsemble and GRAM is highly architecture-dependent - the best OOD detection performance is achieved with a ResNet18 classifier, while MLPlexer and CCT architectures give sub-par results (often sub-50% ie. even worse than a random detector). Whether this large performance variation is due to the layer types, feature dimensionality or other factors, and whether it can be remedied warrants further investigation. + +![](images/16d051ef1305583d91ed6ff374eb8d3a3950bf775d031a5b80e1ee7fec6dcacf.jpg) +Figure 6. Relation between the drop in accuracy caused by noisy labels and the resulting drop in OOD detection performance across all 20 methods. Each point corresponds to a single model trained with noisy labels. + +Takeaway: Distance is healthy Out of the 20 post-hoc OOD detectors in our benchmark, distance-based OOD detectors operating in feature space appear the most promising to cope with the problem of unreliable predictions. Intuitively, distance-based methods are more dissociated from the classifier's prediction, and more dependent on the content/appearance of ID images. In contrast, we did not find compelling evidence that methods targeting class logits or class probabilities for OOD detection are better suited for the noisy label setting. + +Are there tricks that work? We consider 3 popular "tricks" aiming to better separate ID vs. OOD samples in logit or probability space - temperature scaling, input perturbation and sparsification - and assess their effectiveness in a noisy label setting (excluding cleanly trained models). To isolate the effect of Softmax temperature scaling and input perturbation, we introduce $\mathrm{ODIN}_{\mathrm{notemp}}$ (ODIN with temperature $T$ fixed to 1) and $\mathrm{ODIN}_{\mathrm{nopert}}$ (perturbation magnitude $m$ set to 0). We find that scaling $T$ by maximizing likelihood on ID validation labels is detrimental ( $\epsilon_{\mathrm{min}}[\mathrm{MSP} > \mathrm{TempScale}] = 0.15$ ), however picking $T$ based OOD validation detection performance does make a statistically significant (though not practically significant) difference ( $\epsilon_{\mathrm{min}}[\mathrm{ODIN}_{\mathrm{nopert}} > \mathrm{MSP}] = 0.05$ ). Input perturbation does not help in a label noise setting: looking at the optimal $m$ selected during $\mathrm{ODIN}_{\mathrm{notemp}}$ 's automatic parameter tuning, we observe that as label noise rate increases, the more likely that $m = 0$ is picked (no perturbation). As for feature or weight sparsification, we note that REACT and DICE are the most promising logit-based methods in the AUROCincorrect vs. OOD ranking of Figure 1. + +# 6.4. Let's not forget about the validation set + +Picking a model checkpoint While it is well-understood that early stopping is beneficial to classification accuracy when training a classifier with noisy labels [34], we investigate whether this extends to OOD detection performance. We compare the OOD detection performance for the 2 checkpointing strategies, and find that for almost all meth + +ods, early stopping is beneficial ( $\epsilon_{\mathrm{min}}$ [early>last] < 0.5). However, looking at Figure 6, we note that early stopping may increase the rate at which OOD detection performance drops due to label noise for a given drop in accuracy - to an extreme in the case of TempScaling. A closer look at Figure 5 gives some insight into its effect on the logits. + +What about OOD detector parameter tuning? Many of the methods in our benchmark involve a set-up step where dataset-specific parameters are computed (e.g. statistics for ID samples) and/or a tuning step where hyperparameters are tuned to maximize OOD detection performance on a held-out validation OOD set. The set of (hyper)parameters for each method is outlined in the supplementary. Among these methods, some make use of classification labels during set-up/tuning - e.g. to compute statistics for each class. In a label noise setting, this raises the question of whether to use a clean validation set or the noisy training set for set-up/tuning, or whether this makes a difference. We compare both settings for the 6 methods in our benchmark making use of class labels during set-up: MDS, RMDS, MDSEsemble, GRAM, OpenMax and SHE, with results visualized in the supplementary. For SHE which computes the mean of features for each class during setup, there is no statistically significant difference between using clean validation labels or potentially noisy training labels, although the latter may be better in some cases $(\epsilon_{\mathrm{min}}[\mathrm{SHE}_{\mathrm{val}} > \mathrm{SHE}_{\mathrm{train}}] = 1$ and $\epsilon_{\mathrm{min}}[\mathrm{SHE}_{\mathrm{train}} > \mathrm{SHE}_{\mathrm{val}}] = 0.63)$ . For methods based on the Malahanobis score, using noisy training labels to compute class-wise feature means and tied covariance is better $(\epsilon_{\mathrm{min}}[\mathrm{MDS}_{\mathrm{train}} > \mathrm{MDS}_{\mathrm{val}}] = 0.19$ and $\epsilon_{\mathrm{min}}[\mathrm{RMDS}_{\mathrm{train}} > \mathrm{RMDS}_{\mathrm{val}}] = 0)$ - intuitively, the class-specific statistics are more accurate with more data. Common to these 3 methods is that the OOD score at test-time does not depend on the predicted class (likely to be incorrect in a label noise setting), but is rather based on distance to the closest class in feature space (regardless of what class is predicted). OpenMax computes the mean logit per class, only considering correctly predicted samples (labels are used to check correctness) - using a potentially + +noisy training set yields consistently better performance $(\epsilon_{\mathrm{min}}[\mathrm{OpenMax}_{\mathrm{train}} > \mathrm{OpenMax}_{\mathrm{val}}] = 0)$ . Lastly, and in contrast to the other methods, GRAM benefits from using clean validation samples rather than a large number of noisy training samples for computing class-specific bounds of feature correlations $(\epsilon_{\mathrm{min}}[\mathrm{GRAM}_{\mathrm{val}} > \mathrm{GRAM}_{\mathrm{train}}] = 0.23)$ . However, the performance gap between the two settings is small. + +Takeaway: Clean isn't always better or possible The use of clean vs. noisy labels during label-based parameter tuning is an important consideration. For distance-based methods which compute class-wise statistics, it appears that quantity often trumps quality, even when over $30\%$ of training labels are incorrect. This is promising for applications where a clean validation set is not available (e.g. medical imaging where labels are inherently subjective [28]). + +# 6.5. What about a more realistic setting? + +We have thus far studied OOD detection in a simple (but standard [67]) setting where the base classifier is trained from scratch, and where there is strong semantic and covariate shift between ID and OOD images. Yet in practice, pre-training is widely adopted, and distribution shifts may be much more subtle. We therefore extend our study of label noise to fine-grained semantic shift detection with a base classifier that has been pre-trained on ImageNet [8] before being trained on a dataset of interest. We follow the Semantic Shift Benchmark (SSB), where the goal is to detect unknown classes from a known dataset (e.g. held-out bird species from the CUB [61] dataset or held-out aircraft model variants from FGVC-Aircraft [38]). Using SSB splits, we train ResNet50s (pre-trained) on half of the classes from CUB/FGVC-Aircraft (448x448 images), and we evaluate post-hoc OOD detection performance on known classes from the test set (ID) vs. the remaining unseen classes (OOD) split into 3 increasingly difficult sets. Since clean vs. real noisy label pairs are not available, we inject synthetic label noise in the training set (SU noise model) and follow the same evaluation procedure as in previous sections. Fig. 7 summarizes its detrimental effect on fine-grained semantic shift detection across the 20 studied OOD detection methods: increasing label noise and "difficulty" of the OOD set act as orthogonal bottle-necks to detection performance. Increased label noise pulls AUROC $_{\text{ID vs. OOD}}$ and AUROC $_{\text{incorrect vs. OOD}}$ to $50\%$ . + +Takeaway: Limitations of post-hoc OOD detectors extend beyond toy settings Even in a more realistic setting where the base classifier has first been pre-trained on ImageNet and OOD samples are similar in appearance to the ID dataset, all 20 methods poorly separate incorrectly classified ID samples from OOD samples, and degrade when the classifier has been trained on noisy labels. + +![](images/890c31feb3aff5fdc415243edfb80ccf250df6d2bdac7ea35fbda2b154a8a0fa.jpg) +Figure 7. Each boxplot shows the performance distribution across 6 classifiers (3 seeds, 2 checkpoints) $\times$ 20 post-hoc methods, considering all ID test samples (top) or only incorrectly classified ones (bottom) in the AUROC metric. + +# 7. Zooming out + +Study limitations and possible extensions We have focused on post-hoc OOD detection methods due to their pragmatic appeal and to maintain experimental feasibility. Extending this study to training-based OOD detection methods [71] would of course be valuable. Aligning with OOD benchmarks [67], we also trained the base classifiers with a standard discriminative objective. Alternative supervision schemes may also be considered, and the effect of pre-training (and on what?) would be interesting to further analyse in a label noise setting, as it been shown to improve post-hoc OOD detection performance [2, 20, 33]. Lastly, the potential of noisy label removal [29, 43] or noise-robust learning [27, 63] techniques from the label noise literature (designed with classification performance in mind) for improving OOD detection would be a natural next step. + +Conclusion We have explored the intersection between classification label noise and OOD detection, and conducted extensive experiments to extract new insights into the limitations of existing post-hoc methods. Our findings also echo the need to re-think the aims and evaluation of OOD detection in the context of safe deployment [26] (e.g. do we really want to exclude ID misclassifications from detection?). We hope that this work paves the way for future investigations which prioritize the robustness and applicability of OOD detection models in practical, imperfect classification scenarios which account for data uncertainty. + +# 8. Acknowledgements + +This work was supported by the Danish Data Science Academy, which is funded by the Novo Nordisk Foundation (NNF21SA0069429) and VILLUM FONDEN (40516). Thanks to the Pioneer Centre for AI (DNRF grant P1). + +# References + +[1] Gorkem Algan and Ilkay Ulusoy. Image classification with deep learning in the presence of noisy labels: A survey. Knowledge-Based Systems, 215:106771, 2021. 2 +[2] Anders Johan Andreassen, Yasaman Bahri, Behnam Neyshabur, and Rebecca Roelofs. The evolution of out-of-distribution robustness throughout fine-tuning. Transactions on Machine Learning Research, 2022. 8 +[3] Abhijit Bendale and Terrance E. Boult. Towards open set deep networks. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1563-1572, 2016. 3 +[4] Julian Bitterwolf, Maximilian Mueller, and Matthias Hein. In or out? fixing imagenet out-of-distribution detection evaluation. In ICML, 2023. 1 +[5] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 – mining discriminative components with random forests. In European Conference on Computer Vision, 2014. 4 +[6] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 3606-3613, 2014. 4 +[7] Eustasio Del Barrio, Juan A Cuesta-Albertos, and Carlos Matrán. An optimal transportation approach for assessing almost stochastic order. In The Mathematics of the Uncertain, pages 33-44. Springer, 2018. 4 +[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, 2009. 8 +[9] Li Deng. The mnist database of handwritten digit images for machine learning research [best of the web]. IEEE Signal Processing Magazine, 29(6):141-142, 2012. 4 +[10] Andrija Djurisic, Nebojsa Bozanic, Arjun Ashok, and Rosanne Liu. Extremely simple activation shaping for out-of-distribution detection. In The Eleventh International Conference on Learning Representations, 2023. 1, 3 +[11] Rotem Dror, Segev Shlomov, and Roi Reichart. Deep dominance - how to properly compare deep neural models. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28-August 2, 2019, Volume 1: Long Papers, pages 2773- 2785. Association for Computational Linguistics, 2019. 4 +[12] Stanislav Fort, Jie Ren, and Balaji Lakshminarayanan. Exploring the limits of out-of-distribution detection. In Advances in Neural Information Processing Systems, 2021. 3 +[13] Benoit Frenay and Michel Verleysen. Classification in the presence of label noise: A survey. IEEE Transactions on Neural Networks and Learning Systems, 25(5):845-869, 2014. 2 +[14] Ido Galil, Mohammed Dabbah, and Ran El-Yaniv. A framework for benchmarking class-out-of-distribution detection and its application toImagenet. In The Eleventh International Conference on Learning Representations, 2023. 1, 2, 3, 5 + +[15] Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q. Weinberger. On calibration of modern neural networks. In Proceedings of the 34th International Conference on Machine Learning, pages 1321-1330. PMLR, 2017. 3 +[16] Ali Hassani, Steven Walton, Nikhil Shah, Abulikemu Abuduweili, Jiachen Li, and Humphrey Shi. Escaping the big data paradigm with compact transformers. 2021. 4 +[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 4 +[18] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. Eurosat: A novel dataset and deep learning benchmark for land use and land cover classification. IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, 2019. 4 +[19] Dan Hendrycks and Kevin Gimpel. A baseline for detecting misclassified and out-of-distribution examples in neural networks. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net, 2017. 1, 3 +[20] Dan Hendrycks, Kimin Lee, and Mantas Mazeika. Using pre-training can improve model robustness and uncertainty. In Proceedings of the 36th International Conference on Machine Learning, pages 2712-2721. PMLR, 2019. 8 +[21] Dan Hendrycks, Steven Basart, Mantas Mazeika, Andy Zou, Joseph Kwon, Mohammadreza Mostajabi, Jacob Steinhardt, and Dawn Song. Scaling out-of-distribution detection for real-world settings. In Proceedings of the 39th International Conference on Machine Learning, pages 8759-8773. PMLR, 2022. 3 +[22] Dan Hendrycks, Steven Basart, Mantas Mazeika, Andy Zou, Joe Kwon, Mohammadreza Mostajabi, Jacob Steinhardt, and Dawn Song. Scaling out-of-distribution detection for real-world settings. ICML, 2022. 3 +[23] R. Huang and Y. Li. Mos: Towards scaling out-of-distribution detection for large semantic space. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8706-8715, Los Alamitos, CA, USA, 2021. IEEE Computer Society. 3 +[24] Rui Huang, Andrew Geng, and Yixuan Li. On the importance of gradients for detecting distributional shifts in the wild. In Advances in Neural Information Processing Systems, 2021. 3 +[25] Galadrielle Humblot-Renaux, Sergio Escalera, and Thomas B. Moeslund. Beyond auroc & co. for evaluating out-of-distribution detection performance. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 3881-3890, 2023. 4 +[26] Paul F Jaeger et al. A call to reflect on evaluation practices for failure detection in image classification. In ICLR, 2023. 8 +[27] Lu Jiang, Di Huang, Mason Liu, and Weilong Yang. Beyond synthetic noise: Deep learning on controlled noisy labels. In Proceedings of the 37th International Conference on Machine Learning, pages 4804-4815. PMLR, 2020. 8 + +[28] Davood Karimi, Haoran Dou, Simon K. Warfield, and Ali Gholipour. Deep learning with noisy labels: Exploring techniques and remedies in medical image analysis. Medical Image Analysis, 65:101759, 2020. 1, 2, 8 +[29] Taehyeon Kim, Jongwoo Ko, sangwook Cho, JinHwan Choi, and Se-Young Yun. Fine samples for learning with noisy labels. In Advances in Neural Information Processing Systems, pages 24137–24149. Curran Associates, Inc., 2021. 8 +[30] Konstantin Kirchheim, Marco Filax, and Frank Ortmeier. Pytorch-ood: A library for out-of-distribution detection based on pytorch. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 4351-4360, 2022. 1 +[31] Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. Technical Report 0, University of Toronto, Toronto, Ontario, 2009. 2, 4 +[32] Kimin Lee, Kibok Lee, Honglak Lee, and Jinwoo Shin. A simple unified framework for detecting out-of-distribution samples and adversarial attacks. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2018. 1, 3 +[33] Jingyao Li, Pengguang Chen, Zexin He, Shaozuo Yu, Shu Liu, and Jiaya Jia. Rethinking out-of-distribution (ood) detection: Masked image modeling is all you need. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11578-11589, 2023. 8 +[34] Mingchen Li, Mahdi Soltanolkotabi, and Samet Oymak. Gradient descent with early stopping is provably robust to label noise for overparameterized neural networks. In Proceedings of the Twenty Third International Conference on Artificial Intelligence and Statistics, pages 4313-4324. PMLR, 2020. 7 +[35] Shiyu Liang, Yixuan Li, and R. Srikant. Enhancing the reliability of out-of-distribution image detection in neural networks. In International Conference on Learning Representations, 2018. 3 +[36] Sheng Liu, Jonathan Niles-Weed, Narges Razavian, and Carlos Fernandez-Granda. Early-learning regularization prevents memorization of noisy labels. Advances in Neural Information Processing Systems, 33, 2020. 2 +[37] Weitang Liu, Xiaoyun Wang, John Owens, and Yixuan Li. Energy-based out-of-distribution detection. In Advances in Neural Information Processing Systems, pages 21464-21475. Curran Associates, Inc., 2020. 3 +[38] S. Maji, J. Kannala, E. Rahtu, M. Blaschko, and A. Vedaldi. Fine-grained visual classification of aircraft. Technical report, 2013. 8 +[39] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Bo Wu, and Andrew Y. Ng. Reading Digits in Natural Images with Unsupervised Feature Learning. In NIPS Workshop on Deep Learning and Unsupervised Feature Learning 2011, 2011. 2, 4 +[40] Amanda Olmin and Fredrik Lindsten. Robustness and reliability when training with noisy labels. In Proceedings of The 25th International Conference on Artificial Intelligence and Statistics, pages 922–942. PMLR, 2022. 2 +[41] Diane Oyen, Michal Kucer, Nick Hengartner, and Har Simrat Singh. Robustness to label noise depends on the shape of + +the noise distribution. In Advances in Neural Information Processing Systems, 2022. 2 +[42] Chao Pan, Bo Yuan, Wei Zhou, and Xin Yao. Towards robust uncertainty estimation in the presence of noisy labels. In Artificial Neural Networks and Machine Learning - ICANN 2022, pages 673-684, Cham, 2022. Springer International Publishing. 2 +[43] Geoff Pleiss, Tianyi Zhang, Ethan Elenberg, and Kilian Q Weinberger. Identifying mislabeled data using the area under the margin ranking. In Advances in Neural Information Processing Systems, pages 17044-17056. Curran Associates, Inc., 2020. 8 +[44] Jie Ren, Stanislav Fort, Jeremiah Liu, Abhijit Guha Roy, Shreyas Padhy, and Balaji Lakshminarayanan. A simple fix to mahalanobis distance for improving near-ood detection, 2021. 3 +[45] David Rolnick, Andreas Veit, Serge Belongie, and Nir Shavit. Deep learning is robust to massive label noise, 2018. 2 +[46] Ragav Sachdeva, Filipe R. Cordeiro, Vasileios Belagiannis, Ian Reid, and Gustavo Carneiro. Evidentialmix: Learning with combined open-set and closed-set noisy labels. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 3607-3615, 2021. 2 +[47] Mohammadreza Salehi, Hossein Mirzaei, Dan Hendrycks, Yixuan Li, Mohammad Hossein Rohban, and Mohammad Sabokrou. A unified survey on anomaly, novelty, open-set, and out of-distribution detection: Solutions and future challenges. Transactions on Machine Learning Research, 2022. 1 +[48] Chandramouli Shama Sastry and Sageev Oore. Detecting out-of-distribution examples with Gram matrices. In Proceedings of the 37th International Conference on Machine Learning, pages 8491-8501. PMLR, 2020. 3 +[49] Vikash Sehwag, Arjun Nitin Bhagoji, Liwei Song, Chawin Sitawarin, Daniel Cullina, Mung Chiang, and Prateek Mittal. Analyzing the robustness of open-world machine learning. In Proceedings of the 12th ACM Workshop on Artificial Intelligence and Security, page 105–116, New York, NY, USA, 2019. Association for Computing Machinery. 3 +[50] Hwanjun Song, Minseok Kim, and Jae-Gil Lee. SELFIE: Refurbishing unclean samples for robust deep learning. In ICML, 2019. 2 +[51] Hwanjun Song, Minseok Kim, Dongmin Park, Yooju Shin, and Jae-Gil Lee. Learning from noisy labels with deep neural networks: A survey. IEEE Transactions on Neural Networks and Learning Systems, pages 1-19, 2022. 2 +[52] Hyun Oh Song, Yu Xiang, Stefanie Jegelka, and Silvio Savarese. Deep metric learning via lifted structured feature embedding. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 4 +[53] Yue Song, Nicu Sebe, and Wei Wang. Rankfeat: Rank-1 feature removal for out-of-distribution detection. In Advances in Neural Information Processing Systems, 2022. 3 +[54] Alexander Sorokin and David Forsyth. Utility data annotation with amazon mechanical turk. In 2008 IEEE Computer + +Society Conference on Computer Vision and Pattern Recognition Workshops, pages 1-8, 2008. 1 +[55] Yiyou Sun and Yixuan Li. Dice: Leveraging sparsification for out-of-distribution detection. In Computer Vision – ECCV 2022, pages 691–708, Cham, 2022. Springer Nature Switzerland. 3 +[56] Yiyou Sun, Chuan Guo, and Yixuan Li. React: Out-of-distribution detection with rectified activations. In Advances in Neural Information Processing Systems, pages 144–157. Curran Associates, Inc., 2021. 3 +[57] Yiyou Sun, Yifei Ming, Xiaojin Zhu, and Yixuan Li. Out-of-distribution detection with deep nearest neighbors. In Proceedings of the 39th International Conference on Machine Learning, pages 20827-20840. PMLR, 2022. 1, 3 +[58] Ilya Tolstikhin, Neil Houlsby, Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Thomas Unterthiner, Jessica Yung, Andreas Peter Steiner, Daniel Keysers, Jakob Uszkoreit, Mario Lucic, and Alexey Dosovitskiy. MLP-mixer: An allMLP architecture for vision. In Advances in Neural Information Processing Systems, 2021. 4 +[59] Dennis Ulmer, Christian Hardmeier, and Jes Frellsen. deepsignificance-easy and meaningful statistical significance testing in the age of neural networks. arXiv preprint arXiv:2204.06815, 2022. 4 +[60] Sagar Vaze, Kai Han, Andrea Vedaldi, and Andrew Zisserman. Open-set recognition: A good closed-set classifier is all you need. In International Conference on Learning Representations, 2022. 1, 2, 3, 5, 6 +[61] C. Wah, S. Branson, P. Welinder, P. Perona, and S. Belongie. The caltech-ucsd birds-200-2011 dataset. Technical Report CNS-TR-2011-001, California Institute of Technology, 2011. 8 +[62] H. Wang, Z. Li, L. Feng, and W. Zhang. Vim: Out-of-distribution with virtual-logit matching. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4911-4920, Los Alamitos, CA, USA, 2022. IEEE Computer Society. 3 +[63] Hongxin Wei, Huiping Zhuang, Renchunzi Xie, Lei Feng, Gang Niu, Bo An, and Yixuan Li. Mitigating memorization of noisy labels by clipping the model prediction. In Proceedings of the 40th International Conference on Machine Learning. JMLR.org, 2023. 8 +[64] Jiaheng Wei, Zhaowei Zhu, Hao Cheng, Tongliang Liu, Gang Niu, and Yang Liu. Learning with noisy labels revisited: A study using real-world human annotations. In International Conference on Learning Representations, 2022. 2, 4 +[65] Tong Xiao, Tian Xia, Yi Yang, Chang Huang, and Xiaogang Wang. Learning from massive noisy labeled data for image classification. In 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2691-2699, 2015. 4 +[66] Jingkang Yang, Kaiyang Zhou, Yixuan Li, and Ziwei Liu. Generalized out-of-distribution detection: A survey. arXiv preprint arXiv:2110.11334, 2021. 1 +[67] Jingkang Yang, Pengyun Wang, Dejian Zou, Zitang Zhou, Kunyuan Ding, WENXUAN PENG, Haoqi Wang, Guangyao Chen, Bo Li, Yiyou Sun, Xuefeng Du, Kaiyang + +Zhou, Wayne Zhang, Dan Hendrycks, Yixuan Li, and Zwei Liu. OpenOOD: Benchmarking generalized out-of-distribution detection. In Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2022. 1, 2, 3, 4, 8 +[68] Xiaoyong Yuan, Pan He, Qile Zhu, and Xiaolin Li. Adversarial examples: Attacks and defenses for deep learning. IEEE Transactions on Neural Networks and Learning Systems, 30 (9):2805-2824, 2019. 3 +[69] Chiyuan Zhang, Samy Bengio, Moritz Hardt, Benjamin Recht, and Oriol Vinyals. Understanding deep learning requires rethinking generalization. In International Conference on Learning Representations, 2017. 2 +[70] Jinsong Zhang, Qiang Fu, Xu Chen, Lun Du, Zelin Li, Gang Wang, xiaoguang Liu, Shi Han, and Dongmei Zhang. Out-of-distribution detection based on in-distribution data patterns memorization with modern hopfield energy. In The Eleventh International Conference on Learning Representations, 2023. 1, 3 +[71] Jingyang Zhang, Jingkang Yang, Pengyun Wang, Haoqi Wang, Yueqian Lin, Haoran Zhang, Yiyou Sun, Xuefeng Du, Kaiyang Zhou, Wayne Zhang, Yixuan Li, Ziwei Liu, Yiran Chen, and Hai Li. Openood v1.5: Enhanced benchmark for out-of-distribution detection. arXiv preprint arXiv:2306.09301, 2023.8 \ No newline at end of file diff --git a/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/images.zip b/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..3b63ed25876336e771c651f969ac68e0091c6585 --- /dev/null +++ b/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20c8fe83074e80c34fd29ea886977a55345557e6c2899bfb0b0966f3a85e7cac +size 449165 diff --git a/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/layout.json b/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..255a1d2f04400953c36100614f3259aa0e510c05 --- /dev/null +++ b/2024/A Noisy Elephant in the Room_ Is Your Out-of-Distribution Detector Robust to Label Noise_/layout.json @@ -0,0 +1,8110 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 122, + 103, + 473, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 103, + 473, + 138 + ], + "spans": [ + { + "bbox": [ + 122, + 103, + 473, + 138 + ], + "type": "text", + "content": "A noisy elephant in the room: Is your out-of-distribution detector robust to label noise?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 160, + 504, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 160, + 504, + 175 + ], + "spans": [ + { + "bbox": [ + 115, + 160, + 504, + 175 + ], + "type": "text", + "content": "Galadrielle Humblot-Renaux" + }, + { + "bbox": [ + 115, + 160, + 504, + 175 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 115, + 160, + 504, + 175 + ], + "type": "text", + "content": " Sergio Escalera" + }, + { + "bbox": [ + 115, + 160, + 504, + 175 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 115, + 160, + 504, + 175 + ], + "type": "text", + "content": " Thomas B. Moeslund" + }, + { + "bbox": [ + 115, + 160, + 504, + 175 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 149, + 175, + 471, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 175, + 471, + 189 + ], + "spans": [ + { + "bbox": [ + 149, + 175, + 471, + 189 + ], + "type": "text", + "content": "Visual Analysis and Perception lab, Aalborg University, Denmark" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 190, + 566, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 190, + 566, + 204 + ], + "spans": [ + { + "bbox": [ + 54, + 190, + 566, + 204 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 54, + 190, + 566, + 204 + ], + "type": "text", + "content": "Department of Mathematics and Informatics, University of Barcelona and Computer Vision Center, Spain" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 145, + 206, + 476, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 206, + 476, + 216 + ], + "spans": [ + { + "bbox": [ + 145, + 206, + 476, + 216 + ], + "type": "text", + "content": "gegeh@create.aau.dk sescalera@ub.edu tbm@create.aau.dk" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "spans": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 269, + 290, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 269, + 290, + 490 + ], + "spans": [ + { + "bbox": [ + 47, + 269, + 290, + 490 + ], + "type": "text", + "content": "The ability to detect unfamiliar or unexpected images is essential for safe deployment of computer vision systems. In the context of classification, the task of detecting images outside of a model's training domain is known as out-of-distribution (OOD) detection. While there has been a growing research interest in developing post-hoc OOD detection methods, there has been comparably little discussion around how these methods perform when the underlying classifier is not trained on a clean, carefully curated dataset. In this work, we take a closer look at 20 state-of-the-art OOD detection methods in the (more realistic) scenario where the labels used to train the underlying classifier are unreliable (e.g. crowd-sourced or web-scraped labels). Extensive experiments across different datasets, noise types & levels, architectures and checkpointing strategies provide insights into the effect of class label noise on OOD detection, and show that poor separation between incorrectly classified ID samples vs. OOD samples is an overlooked yet important limitation of existing methods. Code: https://github.com/glhr/ood-labelnoise" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 513, + 128, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 513, + 128, + 526 + ], + "spans": [ + { + "bbox": [ + 47, + 513, + 128, + 526 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 533, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 533, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 533, + 287, + 689 + ], + "type": "text", + "content": "In many real-world applications where deep neural networks are deployed \"in the wild\", it is desirable to have models that not only correctly classify samples drawn from the distribution of labeled data but also flag unexpected inputs as out-of-distribution (OOD). This has motivated the development of a wide range of OOD detection methods and benchmarks for computer vision [47, 66]. In particular, post-hoc OOD detection methods have shown wide appeal: compared to training-based methods, post-hoc OOD detectors can be applied on top of existing image classifiers without the need for re-training, have little to no architecture constraints, do not compromise classification performance, and achieve strong performance in large-scale settings [67]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "Existing OOD benchmarks place significant emphasis on carefully designing the selection of OOD datasets used" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 326, + 243, + 526, + 338 + ], + "blocks": [ + { + "bbox": [ + 326, + 243, + 526, + 338 + ], + "lines": [ + { + "bbox": [ + 326, + 243, + 526, + 338 + ], + "spans": [ + { + "bbox": [ + 326, + 243, + 526, + 338 + ], + "type": "image", + "image_path": "0afe293cd79a0a438a21007b19d3aa6cf5c9549034d9328e62b1bf42b9aafe8c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 345, + 547, + 378 + ], + "lines": [ + { + "bbox": [ + 305, + 345, + 547, + 378 + ], + "spans": [ + { + "bbox": [ + 305, + 345, + 547, + 378 + ], + "type": "text", + "content": "Figure 1. Can state-of-the-art OOD detectors tell incorrectly classified ID images apart from OOD inputs? Not really. Here we compare their performance across 396 trained classifiers." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 401, + 546, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 401, + 546, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 401, + 546, + 581 + ], + "type": "text", + "content": "for evaluation [4, 14, 60, 67]. In contrast, the role of the in-distribution (ID) dataset used for training the underlying classifier is seldom discussed. Among the most popular choices of ID dataset are MNIST, CIFAR10, CIFAR100 and ImageNet [30, 67] - all of which have been carefully curated and reliably annotated. Yet, in practice, the collection of labelled datasets involves a trade-off between acquisition time/cost and annotation quality - human inattention, mis-clicking, limited expertise, crowd-sourcing, automated annotation, and other cost-saving measures inevitably introduce labelling errors [54]. Besides, some images are inherently ambiguous to label even for the most knowledgeable and careful of annotators [28]. Considering how pervasive the problem of label noise is in real-world image classification datasets, its effect on OOD detection is crucial to study." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 581, + 547, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 581, + 547, + 653 + ], + "spans": [ + { + "bbox": [ + 304, + 581, + 547, + 653 + ], + "type": "text", + "content": "To address this gap, we systematically analyse the label noise robustness of a wide range of OOD detectors, ranging from the widely adopted Maximum Softmax Probability (MSP) baseline [14, 19], to distance-based methods operating in feature space [32, 57], to more recent, complex methods such as SHE [70] and ASH [10]. In particular:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 653, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 653, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 653, + 547, + 713 + ], + "type": "text", + "content": "1. We present the first study of post-hoc OOD detection in the presence of noisy classification labels, examining the performance of 20 state-of-the-art methods under different types and levels of label noise in the training data. Our study includes multiple classification architectures" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "22626" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 59, + 72, + 286, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 72, + 286, + 120 + ], + "spans": [ + { + "bbox": [ + 59, + 72, + 286, + 120 + ], + "type": "text", + "content": "and datasets, ranging from the beloved CIFAR10 to the more difficult Clothing1M, and shows that even at a low noise rate, the label noise setting poses an interesting challenge for many methods." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 121, + 286, + 228 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 47, + 121, + 286, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 121, + 286, + 192 + ], + "spans": [ + { + "bbox": [ + 47, + 121, + 286, + 192 + ], + "type": "text", + "content": "2. We revisit the notion that OOD detection performance correlates with ID accuracy [14, 60], examining when and why this relation holds. Robustness to inaccurate classification requires that OOD detectors effectively separate mistakes on ID data from OOD samples - yet most existing methods confound the two (Figure 1)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 192, + 286, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 192, + 286, + 228 + ], + "spans": [ + { + "bbox": [ + 47, + 192, + 286, + 228 + ], + "type": "text", + "content": "3. Our analysis includes key takeaways and recommendations for future evaluation and development of OOD detection methods considering an unreliable label setting." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 239, + 140, + 252 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 140, + 252 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 140, + 252 + ], + "type": "text", + "content": "2. Problem set-up" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 258, + 286, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 258, + 286, + 306 + ], + "spans": [ + { + "bbox": [ + 46, + 258, + 286, + 306 + ], + "type": "text", + "content": "In this work, we tackle the question: what happens when post-hoc OOD detectors are applied on top of a classifier trained with unreliable labels - a common setting in practice? We introduce the main relevant concepts below." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "spans": [ + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "text", + "content": "Classifier We study OOD detection in the context of supervised image classification, where a discriminative model " + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "inline_equation", + "content": "h: \\mathcal{X} \\to \\mathcal{Y}" + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "text", + "content": " is trained on a dataset of " + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "text", + "content": " labelled examples " + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "inline_equation", + "content": "D_{train} = \\{(x_i, y_i)\\}_{i=1}^N \\in \\mathcal{X} \\times \\mathcal{Y}" + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "text", + "content": ", where each " + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "text", + "content": " is an input image and each " + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "text", + "content": " is the corresponding class from the label space " + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "inline_equation", + "content": "\\mathcal{Y}" + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "text", + "content": ". A common choice would be CIFAR10 [31]. " + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "inline_equation", + "content": "P_{train}(X, Y)" + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "text", + "content": " defines the underlying training data distribution. The classifier is evaluated on a test set " + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "inline_equation", + "content": "D_{test}" + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "text", + "content": " drawn from the same distribution " + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "inline_equation", + "content": "P_{test}(X, Y) = P_{train}(X, Y)" + }, + { + "bbox": [ + 46, + 307, + 286, + 426 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 426, + 286, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 286, + 545 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 286, + 545 + ], + "type": "text", + "content": "OD detector Post-hoc OOD detection equips the trained classifier " + }, + { + "bbox": [ + 46, + 426, + 286, + 545 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 46, + 426, + 286, + 545 + ], + "type": "text", + "content": " with a scoring function " + }, + { + "bbox": [ + 46, + 426, + 286, + 545 + ], + "type": "inline_equation", + "content": "o: X \\to \\mathbb{R}" + }, + { + "bbox": [ + 46, + 426, + 286, + 545 + ], + "type": "text", + "content": " aiming to distinguish usual examples drawn from " + }, + { + "bbox": [ + 46, + 426, + 286, + 545 + ], + "type": "inline_equation", + "content": "P_{test}(X)" + }, + { + "bbox": [ + 46, + 426, + 286, + 545 + ], + "type": "text", + "content": " (ID samples) and anomalous (OOD) examples drawn from a disjoint, held-out distribution " + }, + { + "bbox": [ + 46, + 426, + 286, + 545 + ], + "type": "inline_equation", + "content": "P_{out}(X)" + }, + { + "bbox": [ + 46, + 426, + 286, + 545 + ], + "type": "text", + "content": ". In practice, a collection of auxiliary datasets with minimal semantic overlap (e.g. CIFAR10 → SVHN [39]) is commonly used for evaluation [67]. Ideally, the score assigned to ID samples should be consistently lower (or higher) than for OOD samples, such that anomalous inputs can easily be flagged." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "text", + "content": "Label noise We consider a noisy label setting, where the classifier " + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "text", + "content": " does not have access to the true target values " + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "inline_equation", + "content": "y_{i}" + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "text", + "content": " during training, but rather learns from a noisy dataset " + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "inline_equation", + "content": "D_{\\text{noisy}} = \\{(x_i, \\hat{y}_i)\\}_{i=1}^N" + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "text", + "content": ", where the target labels are corrupted: " + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "inline_equation", + "content": "\\exists i" + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "inline_equation", + "content": "\\hat{y}_i \\neq y_i" + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "text", + "content": ". In this work, we consider only closed-set label noise, where " + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "inline_equation", + "content": "D_{\\text{noisy}} \\in \\mathcal{X} \\times \\mathcal{Y}" + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "text", + "content": " (that is, the noisy labels lie in the same label space as the true labels [46]). The noise level " + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "text", + "content": " is given by " + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "inline_equation", + "content": "P(y \\neq \\hat{y})" + }, + { + "bbox": [ + 46, + 545, + 286, + 677 + ], + "type": "text", + "content": ", the probability that an observed label is incorrect. Common models for studying and simulating label noise are (we refer to [13] for a detailed taxonomy):" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 677, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 286, + 713 + ], + "type": "text", + "content": "1. Noisy Completely at Random (NCAR) or uniform label noise: labels are flipped at a constant rate " + }, + { + "bbox": [ + 47, + 677, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 47, + 677, + 286, + 713 + ], + "type": "text", + "content": ", regardless of class or image." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 72, + 545, + 144 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 306, + 72, + 545, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 545, + 107 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 545, + 107 + ], + "type": "text", + "content": "2. Noisy at Random (NAR) or class-conditional label noise: a constant noise rate across all images of the same class, but different classes may have different noise rates." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 108, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 108, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 306, + 108, + 545, + 144 + ], + "type": "text", + "content": "3. Noisy Not at Random (NNAR) or instance-dependent label noise: noisy labels are jointly determined by the true class and the associated image." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 305, + 144, + 545, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 144, + 545, + 216 + ], + "spans": [ + { + "bbox": [ + 305, + 144, + 545, + 216 + ], + "type": "text", + "content": "In practice, real (as opposed to synthetically generated) label noise occurring from an imperfect annotation pipeline follows complex patterns, and is thus best represented by an instance-dependent model: some classes are more likely to be mislabeled than others, and so are some images (e.g. ambiguous or rare samples) [50, 64]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 227, + 389, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 227, + 389, + 239 + ], + "spans": [ + { + "bbox": [ + 306, + 227, + 389, + 239 + ], + "type": "text", + "content": "3. Related work" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 247, + 545, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 247, + 545, + 474 + ], + "spans": [ + { + "bbox": [ + 304, + 247, + 545, + 474 + ], + "type": "text", + "content": "Studying label noise The effect of unreliable labels on supervised learning is a well-studied problem in deep learning [51] and computer vision [1, 28], as errors or inconsistencies are a natural part of label collection in many real applications. Though increased dataset size can help [45], noisy labels degrade classification performance, especially in the later stages of training where over-parameterized models are prone to memorizing them [36, 69]. The precise effects of label noise have been shown to depend on the noise model and distribution [41]. A recent CIFAR classification benchmark suggests that models trained on real, instance-dependent noisy labels are significantly more prone to memorization than those trained on synthetic class-conditional labels with the same overall noise rate [64]. We therefore consider real noisy labels in our benchmark (stemming from human annotation error), which we compare to two sets of synthetic noisy labels (uniform and class-conditional). We also compare the effect of validation-based early stopping vs. converging on the training set." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 474, + 545, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 474, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 474, + 545, + 677 + ], + "type": "text", + "content": "Effect of label noise on reliability Existing studies of label noise are largely focused on classification accuracy, and few works address the other side of the coin: reliability. We look at reliability from the angle of OOD detection performance - to the best of our knowledge, there is currently no comparable study of OOD detection under a noisy label setting. Most closely related to our work are perhaps the experiments in [42] and the analysis in [40]. [42] evaluates the effect of synthetic uniform label noise on MC-dropout and deep ensembles' uncertainty estimates, showing a significant degradation in OOD detection performance with increasing noise levels - in comparison, we study post-hoc OOD detection (with a wider variety of architectures, datasets, and methods) and consider real noisy datasets. [40] studies label noise robustness in terms of model calibration, showing that early stopping, while beneficial in terms of accuracy, offers no reliability guarantees." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "content": "Benchmarking OOD detection robustness Previous works have investigated the limits of state-of-the-art OOD detection methods in various challenging settings, such as" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22627" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": "semantic similarity between ID vs. OOD classes [12, 14, 60], fine-grained ID labels [23], large-scale datasets [22] and adversarial attacks [49, 68]. In contrast, we focus on robustness to degraded classification performance on the ID dataset due to noisy labels, which to the best of our knowledge has comparably received little attention." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 144, + 289, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 144, + 289, + 335 + ], + "spans": [ + { + "bbox": [ + 46, + 144, + 289, + 335 + ], + "type": "text", + "content": "Relation between ID classification and OOD detection performance In the standard clean label setting, a strong relationship between ID classification and OOD detection performance has been observed in prior work. [60] studies the relation between closed-set (ID) classification and open-set recognition performance (AUROC), and finds open-set recognition performance to be highly correlated with classification accuracy. [14] observes a similar trend for out-of-distribution detection performance across a large variety of pre-trained deep learning architectures, using the MSP as OOD score. Both works only consider clean training datasets, and a small subset of methods. We study the extent to which this relation holds across a wider range of OOD detection methods and noisy datasets, and provide a very simple explanation for why some methods like MSP reach such a high correlation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 346, + 186, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 346, + 186, + 358 + ], + "spans": [ + { + "bbox": [ + 47, + 346, + 186, + 358 + ], + "type": "text", + "content": "4. OOD detection methods" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 366, + 287, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 366, + 287, + 426 + ], + "spans": [ + { + "bbox": [ + 46, + 366, + 287, + 426 + ], + "type": "text", + "content": "We evaluate 20 post-hoc OOD detection methods from the OpenOOD benchmark [67] - currently the most comprehensive open-source benchmark available. Here we present and broadly categorize these methods based on how their scoring function is designed." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 426, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 287, + 605 + ], + "type": "text", + "content": "Softmax-based OOD detection revolves around the idea that ID samples are associated with higher-confidence, lower-entropy predictions than OOD samples. The baseline Maximum Softmax Probability (MSP) [19] simply takes the Softmax \"confidence\" of the predicted class as OOD score. While MSP implicitly assumes a Softmax temperature of 1, TempScaling [15] treats the temperature as a hyper-parameter, softening or sharpening the Softmax probabilities (essentially modulating categorical entropy), with the aim of improving calibration. ODIN [35] combines temperature scaling with input perturbation - \"pushing\" the input image a little in the direction that increases the MSP. In contrast, the Generalized ENtropy (GEN) score considers the full predictive distribution and captures how much it deviates from a one-hot distribution." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "content": "Logit-based OOD detection bypasses the squashing effect of Softmax normalization. The Maximum Logit Score (MLS) [21] directly takes the logit of the predicted class. In a similar vein, energy-based OOD detection (EBO) was first proposed in [37]: a score is derived by applying the LogSumExp function to the logits - essentially a smooth version of the MLS, with an additional temperature parameter. Several post-hoc methods using an energy score have since followed suit, proposing various modifications to the" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "network [55] or features [10, 53, 56] before extracting an energy score: REACT [56] clips activations at an upper bound, RankFeat [53] subtracts the rank-1 matrix from activations, DICE [55] applies weight sparsification such that only the strongest contributors remain, and ASH [10] sparsifies activations based on a pruning percentile." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 144, + 546, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 144, + 546, + 538 + ], + "spans": [ + { + "bbox": [ + 304, + 144, + 546, + 538 + ], + "type": "text", + "content": "Distance-based OOD detection aims to capture how much a test sample deviates from the ID dataset. The Mahalanobis distance score (MDS) [32] method fits a Gaussian distribution to the features of each class in the ID dataset; at test-time, the OOD score is taken as the distance to the closest class. The same authors also proposed MDSE Ensemble [32], which computes an MDS score not just from the features extracted before the final layer, but also at earlier points in the network, and aggregates them. Alternatively, the Relative Mahalanobis distance score (RMDS) [44] was proposed as a simple fix to MDS, which additionally fits a class-independent Gaussian to the entire ID dataset to compute a background score which is subtracted from the class-specific MDS score. Among other distance-based methods which rely on class-wise statistics, KLMatching (KLM) [21] takes the smallest KL Divergence between a test sample's Softmax probabilities and the mean Softmax probability vector for each ID class. OpenMax [3] operates in logit space, fitting a class-wise Weibull distribution to the distances of ID samples from the mean logit vector. Simplified Hopfield Energy (SHE) [70] computes the inner product between a test sample's features and the mean ID feature of the predicted class. GRAM [48] computes the Gram matrices of intermediate feature representations throughout the network, comparing them with the range of values observed for each class in the ID data. In contrast, deep k-nearest neighbor (KNN) [57] proposes a simple approach with no distributional assumptions - computing its score as the Euclidean distance to the closest samples from the ID set, regardless of class. Lastly, Virtual-logit Matching (VIM) [62] combines a logit energy score with a class-agnostic term capturing how features deviate from a principal subspace defined by the training set." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 539, + 545, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 539, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 304, + 539, + 545, + 624 + ], + "type": "text", + "content": "Gradient-based OOD detection: GradNorm [24] is the only method in OpenOOD which directly derives its score from the gradient space, claiming that gradient magnitude is higher for ID inputs. The KL divergence between predicted Softmax probabilities and a uniform target is backpropagated to obtain gradients w.r.t the last layer parameters, followed by an " + }, + { + "bbox": [ + 304, + 539, + 545, + 624 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 304, + 539, + 545, + 624 + ], + "type": "text", + "content": " norm to obtain the magnitude." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 633, + 387, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 633, + 387, + 647 + ], + "spans": [ + { + "bbox": [ + 306, + 633, + 387, + 647 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 654, + 545, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 677 + ], + "type": "text", + "content": "We summarize our experimental set-up below, and refer to the supplementary for further details." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 677, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 546, + 713 + ], + "type": "text", + "content": "ID Datasets We select popular image classification datasets from the label noise literature featuring real noisy labels alongside clean reference labels, spanning different" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "22628" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 70, + 288, + 175 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 288, + 175 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 288, + 175 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 288, + 175 + ], + "type": "table", + "html": "
ID datasetclasses# images (train/val/test)resolutionnoise rate
[31]CIFAR-100%
CIFAR-10N-Agg1050,000/1,000/9,00032x329.01%
[64]CIFAR-10N-Rand117.23%
CIFAR-10N-Worst40.21%
[31]CIFAR-100-Fine10050,000/1,000/9,00032x320%
[64]CIFAR-100N-Fine40.20%
[31]CIFAR-100-Coarse2050,000/1,000/9,00032x320%
[64]CIFAR-100N-Coarse26.40%
[65]Clothing1M1424,637/7,465/5,395256x2560%
Clothing1M-Noisy38.26%
", + "image_path": "5f6463c2a9402f4959cefa897cbb995bb7253b636962c880f075ad90fe343eb7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 182, + 287, + 237 + ], + "lines": [ + { + "bbox": [ + 46, + 182, + 287, + 237 + ], + "spans": [ + { + "bbox": [ + 46, + 182, + 287, + 237 + ], + "type": "text", + "content": "Table 1. Dataset overview. Clean ones are shown in bold. The training set (clean or noisy labels) is used to train the classifier; the validation set (clean labels) is used for early stopping; the test set (clean labels) is used for evaluating classification and OOD detection performance. We always use clean labels for evaluation." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 258, + 287, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 258, + 287, + 413 + ], + "spans": [ + { + "bbox": [ + 46, + 258, + 287, + 413 + ], + "type": "text", + "content": "input sizes, number of classes, and sources & levels of label noise - see Table 1. The recently released CIFAR-N dataset [64] provides noisy re-annotations of CIFAR-10 and CIFAR-100 collected via crowd-sourcing: each image was annotated by 3 people, and different noisy label sets were created for different label selection methods (majority voting, random selection, or worst label selection). Note that CIFAR-100-Fine and CIFAR-100-Coarse contain the same set of images - only the class definitions and labels differ. Clothing1M [65] is a large-scale dataset collected by scraping shopping websites. Although the raw Clothing1M contains over a million images, we consider the smaller subset of images for which there is both a noisy and clean label." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 414, + 287, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 414, + 287, + 533 + ], + "spans": [ + { + "bbox": [ + 46, + 414, + 287, + 533 + ], + "type": "text", + "content": "Synthetic noise For each real noisy label set, using the corresponding clean labels, we additionally create 2 synthetic counterparts with the same overall noise rate: one following a uniform (NCAR, class-independent) label noise model, and the other following a class-conditional label noise model with the exact same noise transition matrix as the real noise. We name these synthetic variants SU (Synthetic Uniform noise) and SCC (Synthetic Class-Conditional noise) - for example, from CIFAR-10N-Agg we create 2 synthetic versions, SU and SCC." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 534, + 287, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 534, + 287, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 534, + 287, + 700 + ], + "type": "text", + "content": "ODD Datasets For fair comparison, we use the same selection of OOD datasets for all models - the OOD datasets are therefore chosen such that there is minimal semantic overlap with any of the ID datasets. We include MNIST [9], SVHN [39], Textures [6] as they are commonly used as examples of \"far\"-OOD [67] (very different appearance and semantics than the ID dataset). As examples of more natural images, we also include EuroSAT-RGB [18], Food-101 [5], a sub-set of the Stanford Online Products [52], and a 12-class sub-set of ImageNet. Since some methods require an OOD validation set for hyperparameter tuning, half of these classes are randomly selected and held-out for this purpose. The other 6 ImageNet classes, and the other OOD datasets make up the OOD test set." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 59, + 701, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 286, + 712 + ], + "type": "text", + "content": "Evaluation metrics We evaluate OOD detectors'abil" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": "ity to separate ID vs. OOD samples in terms of the Area Under the Receiver Operating Characteristic Curve (AUROC), where images from the ID test set (e.g. CIFAR10 test set) are considered positive samples, and those from the OOD test set (e.g. SVHN test set) as negatives. This is the most commonly reported metric in the literature [25], and we denote it as AUROC" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "_{\\text{ID vs. OOD}}" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": ". In addition, unlike previous works, we separately measure the AUROC" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "_{\\text{correct vs. OOD}}" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": " (and AUROC" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "_{\\text{incorrect vs. OOD}}" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": "), where only correctly (or incorrectly) classified samples from the ID test set are considered - ideally, performance should be high on both metrics." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 204, + 545, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 204, + 545, + 346 + ], + "spans": [ + { + "bbox": [ + 304, + 204, + 545, + 346 + ], + "type": "text", + "content": "Architectures We include 3 architecture families: CNNs, MLPs and transformers. We select lightweight architectures which have shown competitive results when trained on small-scale datasets: ResNet18 [17], MLP-Mixer [58] and Compact Transformers [16]. Following the OpenOOD benchmark [67], we do not adopt any advanced training strategies besides standard data augmentation. For each training dataset, we repeat training with 3 random seeds, and save 2 model checkpoints: an early checkpoint (based on best validation accuracy) and the last checkpoint (after a pre-defined number of epochs has elapsed, allowing for convergence - differs per architecture)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 347, + 545, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 347, + 545, + 467 + ], + "spans": [ + { + "bbox": [ + 304, + 347, + 545, + 467 + ], + "type": "text", + "content": "Bird's eye view To summarize, we train 3 different classifier architectures on 22 datasets (4 clean, 6 with real label noise, 12 with synthetic label noise), with 3 random seeds and 2 checkpoints saved per model - adding up to 396 distinct classifiers. On top of each classifier, 20 different OOD detection methods are applied and evaluated on 7 OOD datasets. Throughout the paper, OOD detection performance is taken as the median across the 7 OOD datasets (see the supplementary for results and a discussion of the median vs. mean OOD detection performance)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 467, + 545, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 467, + 545, + 575 + ], + "spans": [ + { + "bbox": [ + 304, + 467, + 545, + 575 + ], + "type": "text", + "content": "Statistical significance tests When comparing pairs of methods or settings, we use the Almost Stochastic Order (ASO) test [7, 11] as implemented by Ulmer et al. [59]. This statistical test was specifically designed to compare deep learning models, making no distributional assumptions. We apply ASO with a significance level " + }, + { + "bbox": [ + 304, + 467, + 545, + 575 + ], + "type": "inline_equation", + "content": "\\alpha = 0.05" + }, + { + "bbox": [ + 304, + 467, + 545, + 575 + ], + "type": "text", + "content": " and report " + }, + { + "bbox": [ + 304, + 467, + 545, + 575 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{min}}[\\mathrm{A} > \\mathrm{B}]" + }, + { + "bbox": [ + 304, + 467, + 545, + 575 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 304, + 467, + 545, + 575 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{min}}[\\mathrm{A} > \\mathrm{B}] \\geq 0.5" + }, + { + "bbox": [ + 304, + 467, + 545, + 575 + ], + "type": "text", + "content": " we cannot claim that method A is better than method B; the smaller " + }, + { + "bbox": [ + 304, + 467, + 545, + 575 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{min}}" + }, + { + "bbox": [ + 304, + 467, + 545, + 575 + ], + "type": "text", + "content": ", the more confident we can be that method A is superior." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 586, + 365, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 586, + 365, + 598 + ], + "spans": [ + { + "bbox": [ + 306, + 586, + 365, + 598 + ], + "type": "text", + "content": "6. Analysis" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 714 + ], + "type": "text", + "content": "We explore the effect of label noise on OOD detection, starting with an overall view of performance trends in Section 6.1, then looking at OOD detection in relation to classification performance in Section 6.2, delving into what works (and what doesn't) in Section 6.3, and raising important considerations about how/whether to make use of a clean validation set in Section 6.4. Section 6.5 extends results to a more practical setting. More detailed analyses and additional supporting figures are in the supplementary." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22629" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 67, + 69, + 526, + 228 + ], + "blocks": [ + { + "bbox": [ + 67, + 69, + 526, + 228 + ], + "lines": [ + { + "bbox": [ + 67, + 69, + 526, + 228 + ], + "spans": [ + { + "bbox": [ + 67, + 69, + 526, + 228 + ], + "type": "table", + "html": "
training labelsCIFAR10CIFAR100-CoarseCIFAR100-FineClothing1M
AggRand1WorstcleanNSCCSUcleanNSCCSUcleanNSCCSU
methodcleanNSCCSUNSCCSUNSCCSU
GRAM94.4589.4989.1290.788.8289.1990.5288.688.7387.9882.0780.0582.179.282.9376.3180.2482.6491.0489.0794.7195.37
MDS96.0787.9392.492.9792.3789.2587.4586.7486.4989.280.0778.8982.8480.1280.174.9674.4873.4987.1290.9888.5892.38
VIM95.6589.991.8192.388.7588.684.4986.3187.2388.7584.2976.6180.047881.3775.3173.2473.9488.9983.0987.1790.14
MDSEns92.5783.8983.6283.7981.883.0680.3682.9584.0284.1179.2578.3177.4173.684.8577.4778.4379.8595.3695.4495.7895.69
KNN93.6390.0788.7590.1487.7486.6685.1186.383.7384.1484.1674.480.3975.8283.2975.6776.4871.3585.3285.584.5980.87
RMDS92.9289.3887.9488.1489.0785.7387.0484.0381.9982.3582.1475.9377.3674.8183.287675.7573.4875.8171.4378.2266.66
DICE9083.3384.1886.2488.528186.2182.7979.5879.0782.7977.6875.0170.4382.5276.5173.9268.4184.9675.7286.6982.89
ReAct90.9187.3286.6382.1689.7482.9681.9784.578.4180.1182.7973.0973.6270.3883.7673.5773.7167.5582.5773.180.2276.58
GEN91.8685.9985.4482.0889.8682.8980.8483.7581.8180.5782.6973.2571.4770.9981.3473.473.167.1183.9173.5779.7976.78
EBO91.3184.8785.7381.6289.8881.9377.8883.0481.3877.482.7472.9970.9367.8581.4173.6573.0167.4285.1976.3285.3176.64
SHE89.687.8184.3386.4886.6383.1683.0483.2480.0678.9880.4271.88070.1180.3869.6369.5666.6882.2978.0778.477.73
ODIN91.4787.7186.3182.4889.7982.2180.6884.0982.1380.0981.4273.170.8869.9783.5974.8572.1967.1683.3871.5977.7375.47
MLS91.2684.7685.5981.5788.8182.317883.5482.0180.0182.6672.9270.8569.1981.4573.6472.567.0383.372.7577.7475.54
TempScale91.6785.7685.0482.2585.0778.179.7882.8580.5180.1381.6371.6769.9469.3580.7572.6671.2866.9879.8268.7786.2874.45
ASH88.3384.7582.3781.6682.2976.4772.4885.2778.2675.4882.7871.1973.2168.4382.7474.1370.9567.4881.5374.8976.6375.74
OpenMax90.586.1283.4682.2683.0582.8779.1280.3975.6877.4181.1476.6972.6569.1780.1372.8275.667.6671.7469.2372.5574.36
MSP91.3485.1984.8782.4185.1382.2180.6882.4879.480.0980.5170.4268.8869.9779.8570.9269.6166.9277.5766.0272.4473.89
KLM90.8483.782.1581.6980.1381.8880.6474.7975.9376.8379.3770.269.2467.8179.7875.5269.8967.2577.2665.4965.2262.01
GradNorm86.2279.5577.177.8481.6679.8876.9684.1172.9672.8869.2966.2773.1767.4470.9565.6571.2663.2979.7175.3273.9377.52
RankFeat81.8383.5375.8673.1278.2975.0779.1482.5777.2575.7573.1564.669.5465.4468.7776.0770.2467.6469.1675.8569.9373.37
", + "image_path": "af65bec6ca3f55edae60a57194fc37b906b6a4d212b91a25bc7df68bb7a3793d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 235, + 547, + 281 + ], + "lines": [ + { + "bbox": [ + 46, + 235, + 547, + 281 + ], + "spans": [ + { + "bbox": [ + 46, + 235, + 547, + 281 + ], + "type": "text", + "content": "Table 2. Best-case OOD detection performance (AUROCID vs. OOD in %) per method (that is, after selecting the best architecture-seed-checkpoint combination for each training label set). N, SCC, and SU refer to the real and synthetic noisy label sets described in Section 5. The top-3 for each training dataset are highlighted in bold, and the top-1 is underlined. In red are scores " + }, + { + "bbox": [ + 46, + 235, + 547, + 281 + ], + "type": "inline_equation", + "content": "< {75}\\%" + }, + { + "bbox": [ + 46, + 235, + 547, + 281 + ], + "type": "text", + "content": " and in orange scores between 75 and 80%. Rows are sorted based on the total performance across columns." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 47, + 298, + 288, + 369 + ], + "blocks": [ + { + "bbox": [ + 47, + 298, + 288, + 369 + ], + "lines": [ + { + "bbox": [ + 47, + 298, + 288, + 369 + ], + "spans": [ + { + "bbox": [ + 47, + 298, + 288, + 369 + ], + "type": "image", + "image_path": "896adb955c27ac2a2771da422929e2ef9e25fc6813e3812b4ebfdb9c98d2b316.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 376, + 287, + 399 + ], + "lines": [ + { + "bbox": [ + 46, + 376, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 46, + 376, + 287, + 399 + ], + "type": "text", + "content": "Figure 2. Distribution of OOD detection performance across methods & models when training the classifier on different label sets." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 418, + 234, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 418, + 234, + 430 + ], + "spans": [ + { + "bbox": [ + 46, + 418, + 234, + 430 + ], + "type": "text", + "content": "6.1. Where there's noise there's trouble" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "spans": [ + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "text", + "content": "Figure 2 gives an overview of OOD detection performance for different training datasets and label noise settings. We see a clear drop in overall OOD detection performance when label noise is introduced in the training dataset, compared to training on a cleanly labelled dataset (green). Even with only " + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "inline_equation", + "content": "9\\%" + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "text", + "content": " of incorrect CIFAR10 labels (CIFAR-Agg labels sets), the median " + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "inline_equation", + "content": "\\mathrm{AUROC}_{\\mathrm{IDvs.OOD}}" + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "text", + "content": " across all models drops by over " + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "text", + "content": ". In Table 2, for each method we report the best-case OOD detection performance for a given training label set. While most methods are able to reach " + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "inline_equation", + "content": "\\mathrm{AUROC}_{\\mathrm{IDvs.OOD}}" + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "text", + "content": " with a classifier trained on clean labels, the number of competitive methods falls with increasing label noise, especially at noise rates " + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "inline_equation", + "content": ">20\\%" + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "text", + "content": ". GRAM, KNN, MDS, MDSEsemble and VIM are the only methods able to reach " + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "inline_equation", + "content": "90+\\%" + }, + { + "bbox": [ + 45, + 437, + 287, + 664 + ], + "type": "text", + "content": " AUROC on at least one of the noisy datasets. Takeaway: Enter the elephant Label noise in the classifier's training data makes it more difficult for post-hoc OOD detection methods to flag unfamiliar samples at test-time, even in small-scale settings like CIFAR10." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 671, + 234, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 671, + 234, + 685 + ], + "spans": [ + { + "bbox": [ + 46, + 671, + 234, + 685 + ], + "type": "text", + "content": "6.2. Does accuracy tell the whole story?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "content": "The most obvious effect of label noise in the training data is a decrease in classification performance on ID test data. At" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 300, + 545, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 300, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 304, + 300, + 545, + 348 + ], + "type": "text", + "content": "the same time, previous works have remarked a strong relation between classification performance and OOD detection for popular post-hoc methods like MSP [14] and MLS [60]. We dig deeper. When does this relation hold and why?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 348, + 546, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 348, + 546, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 348, + 546, + 479 + ], + "type": "text", + "content": "For which methods does this relation hold? In Figure 3, we quantify the relationship between ID accuracy and " + }, + { + "bbox": [ + 304, + 348, + 546, + 479 + ], + "type": "inline_equation", + "content": "\\mathrm{AUROC}_{\\mathrm{IDvs.OD}}" + }, + { + "bbox": [ + 304, + 348, + 546, + 479 + ], + "type": "text", + "content": " in terms of Spearman correlation " + }, + { + "bbox": [ + 304, + 348, + 546, + 479 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 304, + 348, + 546, + 479 + ], + "type": "text", + "content": ". We find that correlation varies widely across methods, being the strongest for MSP, and is generally weaker for those which operate earlier in the network. We also note that for all methods except KNN and RMDS, the label noise setting makes OOD detection performance less predictable - and so does early stopping (cf. Section 6.4). This points to the distribution of ID scores playing an important role in OOD detection performance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 480, + 545, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 480, + 545, + 517 + ], + "spans": [ + { + "bbox": [ + 304, + 480, + 545, + 517 + ], + "type": "text", + "content": "When it does - why? We provide a simple observation which is lacking in prior work: methods whose OOD detection performance predictably degrades along-" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 534, + 545, + 647 + ], + "blocks": [ + { + "bbox": [ + 306, + 534, + 545, + 647 + ], + "lines": [ + { + "bbox": [ + 306, + 534, + 545, + 647 + ], + "spans": [ + { + "bbox": [ + 306, + 534, + 545, + 647 + ], + "type": "image", + "image_path": "b1d33226e2a35de3fd1a6f3f6707c35b2922d9aff889460e23b1678bfd92f613.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 654, + 547, + 711 + ], + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 711 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 711 + ], + "type": "text", + "content": "Figure 3. Does OOD detection performance (AUROC" + }, + { + "bbox": [ + 304, + 654, + 547, + 711 + ], + "type": "inline_equation", + "content": "_{\\text{ID vs. OOD}}" + }, + { + "bbox": [ + 304, + 654, + 547, + 711 + ], + "type": "text", + "content": ") correlate with ID classification performance (accuracy)? We measure the rank correlation across different architectures, seeds, checkpoints, and datasets for different label sets. All results shown here are statistically significant (" + }, + { + "bbox": [ + 304, + 654, + 547, + 711 + ], + "type": "inline_equation", + "content": "p < 0.001" + }, + { + "bbox": [ + 304, + 654, + 547, + 711 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "22630" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 69, + 288, + 155 + ], + "blocks": [ + { + "bbox": [ + 47, + 69, + 288, + 155 + ], + "lines": [ + { + "bbox": [ + 47, + 69, + 288, + 155 + ], + "spans": [ + { + "bbox": [ + 47, + 69, + 288, + 155 + ], + "type": "image", + "image_path": "45c2dac7ad1409de0c8224e809bab08eefc1309e0476a4fd22144b8ae4b1aabc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 161, + 288, + 206 + ], + "lines": [ + { + "bbox": [ + 46, + 161, + 288, + 206 + ], + "spans": [ + { + "bbox": [ + 46, + 161, + 288, + 206 + ], + "type": "text", + "content": "Figure 4. Relationship between ID classification performance and OOD detection performance, considering all ID test samples (top) or only incorrectly classified ones (bottom) in the AUROC metric. Each point corresponds to a single model." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "spans": [ + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "content": "side classification accuracy are characterized by a high " + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\mathrm{AUROC}_{\\mathrm{correct}}" + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "content": " vs. OOD and a low " + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\mathrm{AUROC}_{\\mathrm{incorrect}}" + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "content": " vs. OOD. On clean, easy datasets like CIFAR10, they exhibit strong OOD detection performance as there are few incorrectly predicted ID samples in the test set (thus the " + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\mathrm{AUROC}_{\\mathrm{incorrect}}" + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "content": " vs. OOD term is negligible in the overall " + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\mathrm{AUROC}_{\\mathrm{ID}}" + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "content": " vs. OOD) - however, when the number of incorrect prediction grows, the low " + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\mathrm{AUROC}_{\\mathrm{incorrect}}" + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "content": " vs. OOD becomes a more significant factor. Importantly and as exemplified by Figure 4, for all methods, " + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\mathrm{AUROC}_{\\mathrm{incorrect}}" + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "content": " vs. OOD is not (or only weakly, " + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\rho < 0.2" + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "content": ") correlated with classification accuracy. MSP is the most clear-cut example, with a median " + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\mathrm{AUROC}_{\\mathrm{incorrect}}" + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "content": " vs. OOD of around 0.5 across all dataset-architecture-seed-checkpoint combinations (bottom left of Figure 4) - that is, MSP often is no better (or worse) than a random detector at separating ID mistakes and OOD inputs, no matter how accurate the underlying classifier is. The Top-4 methods in Table 2 are the only ones with a median " + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\mathrm{AUROC}_{\\mathrm{incorrect}}" + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "content": " vs. OOD " + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\geq 0.6" + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "content": " - none of the other methods exceed a median " + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\mathrm{AUROC}_{\\mathrm{incorrect}}" + }, + { + "bbox": [ + 46, + 231, + 287, + 470 + ], + "type": "text", + "content": " vs. OOD of 0.55 - see Figure 1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 472, + 287, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 472, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 472, + 287, + 568 + ], + "type": "text", + "content": "Takeaway: Would your OOD detector be better off as a failure detector? Accuracy correlating with OOD detection performance is partly symptomatic of many seemingly effective methods being unable to separate incorrectly classified ID samples from OOD samples - a bottleneck for robustness to imperfect classification. Claims that post-hoc OOD detection can be improved by simply improving the underlying classifier [60] overlook this fundamental issue." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": "It's not just about the noise rate We find that for a fixed noise rate in a given dataset, different types/models of label noise yield comparable classification accuracy (" + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{min}} \\geq 0.5" + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": " for all pair-wise comparisons), yet have different effects on OOD performance. Indeed, real label noise is better handled than the same level of synthetic by most methods, with SU labels being the most challenging - this trend is clear in Figure 2. Figure 5 shows an example of how different noise types and checkpointing strategies shape the magnitude and spread of logits. Intuitively, when the noise is spread randomly across samples (SU noise model), it is more difficult to learn which kinds of images or classes to be uncer" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 306, + 69, + 547, + 133 + ], + "blocks": [ + { + "bbox": [ + 306, + 69, + 547, + 133 + ], + "lines": [ + { + "bbox": [ + 306, + 69, + 547, + 133 + ], + "spans": [ + { + "bbox": [ + 306, + 69, + 547, + 133 + ], + "type": "image", + "image_path": "14c178369c4c0362f36de7e815ae9f15ffca1efd88177ac79bc982c814ed471d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 138, + 545, + 161 + ], + "lines": [ + { + "bbox": [ + 305, + 138, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 305, + 138, + 545, + 161 + ], + "type": "text", + "content": "Figure 5. Max Logit ID and OOD score statistics across models trained on Clothing1M, for different noise types & checkpointing." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 183, + 545, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 183, + 545, + 255 + ], + "spans": [ + { + "bbox": [ + 304, + 183, + 545, + 255 + ], + "type": "text", + "content": "tain about, leading to consistently lower-confidence predictions across all ID samples (low median, low spread). Conversely, when label noise is more concentrated for certain classes (SCC) and/or for certain features (real noise), the classifier can learn to be more confident in some parts of the input space than others (higher median, higher spread)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 255, + 545, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 255, + 545, + 339 + ], + "spans": [ + { + "bbox": [ + 304, + 255, + 545, + 339 + ], + "type": "text", + "content": "Takeaway: Faking it is better than ignoring it Uniform (synthetic) label noise in the training data tends to degrade OOD detection more strongly than class-dependant (synthetic) and instance-dependent (real) label noise. We encourage the use of synthetic uniform labels to evaluate the worst-case performance of OOD detectors, as they can be easily generated for any image classification dataset." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 348, + 491, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 348, + 491, + 361 + ], + "spans": [ + { + "bbox": [ + 305, + 348, + 491, + 361 + ], + "type": "text", + "content": "6.3. Design features which hurt or help" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 366, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 545, + 713 + ], + "type": "text", + "content": "Why are the winners the best? In terms of design features, the methods with the strongest performance in a label noise setting have a distance-based scoring function, and take features as input rather than class probabilities. SHE is the only OOD detector satisfying both criteria which doesn't sit at the top of the pile in Table 2 - we attribute its lower performance to two factors: it summarizes the ID dataset only with class-wise means (which may be overly reductive in a label noise setting where variance is larger), and it only considers correctly predicted samples when computing them (which may be small in number if the classifier is inaccurate or the number of classes is high). In contrast, GRAM which includes higher-order raw moments to describe ID data statistics is the top-1 method in Table 2. In the comparison of Figure 6, GRAM and MDSEnsemble - the only methods in our benchmark which incorporate features at different depths in the network - stand out as having the \"flattest\" accuracy-AUROC curves, which is especially beneficial when the training dataset is inherently difficult (e.g. CIFAR100 due to fine-grained labels or Clothing1M due to the image complexity and diversity). However, we note that the performance of MDSEnsemble and GRAM is highly architecture-dependent - the best OOD detection performance is achieved with a ResNet18 classifier, while MLPlexer and CCT architectures give sub-par results (often sub-50% ie. even worse than a random detector). Whether this large performance variation is due to the layer types, feature dimensionality or other factors, and whether it can be remedied warrants further investigation." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "22631" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 68, + 545, + 190 + ], + "blocks": [ + { + "bbox": [ + 47, + 68, + 545, + 190 + ], + "lines": [ + { + "bbox": [ + 47, + 68, + 545, + 190 + ], + "spans": [ + { + "bbox": [ + 47, + 68, + 545, + 190 + ], + "type": "image", + "image_path": "16d051ef1305583d91ed6ff374eb8d3a3950bf775d031a5b80e1ee7fec6dcacf.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 198, + 546, + 220 + ], + "lines": [ + { + "bbox": [ + 46, + 198, + 546, + 220 + ], + "spans": [ + { + "bbox": [ + 46, + 198, + 546, + 220 + ], + "type": "text", + "content": "Figure 6. Relation between the drop in accuracy caused by noisy labels and the resulting drop in OOD detection performance across all 20 methods. Each point corresponds to a single model trained with noisy labels." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 242, + 286, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 242, + 286, + 361 + ], + "spans": [ + { + "bbox": [ + 46, + 242, + 286, + 361 + ], + "type": "text", + "content": "Takeaway: Distance is healthy Out of the 20 post-hoc OOD detectors in our benchmark, distance-based OOD detectors operating in feature space appear the most promising to cope with the problem of unreliable predictions. Intuitively, distance-based methods are more dissociated from the classifier's prediction, and more dependent on the content/appearance of ID images. In contrast, we did not find compelling evidence that methods targeting class logits or class probabilities for OOD detection are better suited for the noisy label setting." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "content": "Are there tricks that work? We consider 3 popular \"tricks\" aiming to better separate ID vs. OOD samples in logit or probability space - temperature scaling, input perturbation and sparsification - and assess their effectiveness in a noisy label setting (excluding cleanly trained models). To isolate the effect of Softmax temperature scaling and input perturbation, we introduce " + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathrm{ODIN}_{\\mathrm{notemp}}" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "content": " (ODIN with temperature " + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "content": " fixed to 1) and " + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathrm{ODIN}_{\\mathrm{nopert}}" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "content": " (perturbation magnitude " + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "content": " set to 0). We find that scaling " + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "content": " by maximizing likelihood on ID validation labels is detrimental (" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{min}}[\\mathrm{MSP} > \\mathrm{TempScale}] = 0.15" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "content": "), however picking " + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "content": " based OOD validation detection performance does make a statistically significant (though not practically significant) difference (" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{min}}[\\mathrm{ODIN}_{\\mathrm{nopert}} > \\mathrm{MSP}] = 0.05" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "content": "). Input perturbation does not help in a label noise setting: looking at the optimal " + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "content": " selected during " + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathrm{ODIN}_{\\mathrm{notemp}}" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "content": " 's automatic parameter tuning, we observe that as label noise rate increases, the more likely that " + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "inline_equation", + "content": "m = 0" + }, + { + "bbox": [ + 46, + 361, + 287, + 613 + ], + "type": "text", + "content": " is picked (no perturbation). As for feature or weight sparsification, we note that REACT and DICE are the most promising logit-based methods in the AUROCincorrect vs. OOD ranking of Figure 1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 623, + 253, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 623, + 253, + 635 + ], + "spans": [ + { + "bbox": [ + 47, + 623, + 253, + 635 + ], + "type": "text", + "content": "6.4. Let's not forget about the validation set" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 286, + 713 + ], + "type": "text", + "content": "Picking a model checkpoint While it is well-understood that early stopping is beneficial to classification accuracy when training a classifier with noisy labels [34], we investigate whether this extends to OOD detection performance. We compare the OOD detection performance for the 2 checkpointing strategies, and find that for almost all meth" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 242, + 545, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 242, + 545, + 314 + ], + "spans": [ + { + "bbox": [ + 304, + 242, + 545, + 314 + ], + "type": "text", + "content": "ods, early stopping is beneficial (" + }, + { + "bbox": [ + 304, + 242, + 545, + 314 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{min}}" + }, + { + "bbox": [ + 304, + 242, + 545, + 314 + ], + "type": "text", + "content": " [early>last] < 0.5). However, looking at Figure 6, we note that early stopping may increase the rate at which OOD detection performance drops due to label noise for a given drop in accuracy - to an extreme in the case of TempScaling. A closer look at Figure 5 gives some insight into its effect on the logits." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 318, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 318, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 318, + 546, + 713 + ], + "type": "text", + "content": "What about OOD detector parameter tuning? Many of the methods in our benchmark involve a set-up step where dataset-specific parameters are computed (e.g. statistics for ID samples) and/or a tuning step where hyperparameters are tuned to maximize OOD detection performance on a held-out validation OOD set. The set of (hyper)parameters for each method is outlined in the supplementary. Among these methods, some make use of classification labels during set-up/tuning - e.g. to compute statistics for each class. In a label noise setting, this raises the question of whether to use a clean validation set or the noisy training set for set-up/tuning, or whether this makes a difference. We compare both settings for the 6 methods in our benchmark making use of class labels during set-up: MDS, RMDS, MDSEsemble, GRAM, OpenMax and SHE, with results visualized in the supplementary. For SHE which computes the mean of features for each class during setup, there is no statistically significant difference between using clean validation labels or potentially noisy training labels, although the latter may be better in some cases " + }, + { + "bbox": [ + 304, + 318, + 546, + 713 + ], + "type": "inline_equation", + "content": "(\\epsilon_{\\mathrm{min}}[\\mathrm{SHE}_{\\mathrm{val}} > \\mathrm{SHE}_{\\mathrm{train}}] = 1" + }, + { + "bbox": [ + 304, + 318, + 546, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 318, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{min}}[\\mathrm{SHE}_{\\mathrm{train}} > \\mathrm{SHE}_{\\mathrm{val}}] = 0.63)" + }, + { + "bbox": [ + 304, + 318, + 546, + 713 + ], + "type": "text", + "content": ". For methods based on the Malahanobis score, using noisy training labels to compute class-wise feature means and tied covariance is better " + }, + { + "bbox": [ + 304, + 318, + 546, + 713 + ], + "type": "inline_equation", + "content": "(\\epsilon_{\\mathrm{min}}[\\mathrm{MDS}_{\\mathrm{train}} > \\mathrm{MDS}_{\\mathrm{val}}] = 0.19" + }, + { + "bbox": [ + 304, + 318, + 546, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 318, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{min}}[\\mathrm{RMDS}_{\\mathrm{train}} > \\mathrm{RMDS}_{\\mathrm{val}}] = 0)" + }, + { + "bbox": [ + 304, + 318, + 546, + 713 + ], + "type": "text", + "content": " - intuitively, the class-specific statistics are more accurate with more data. Common to these 3 methods is that the OOD score at test-time does not depend on the predicted class (likely to be incorrect in a label noise setting), but is rather based on distance to the closest class in feature space (regardless of what class is predicted). OpenMax computes the mean logit per class, only considering correctly predicted samples (labels are used to check correctness) - using a potentially" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "22632" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "content": "noisy training set yields consistently better performance " + }, + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "(\\epsilon_{\\mathrm{min}}[\\mathrm{OpenMax}_{\\mathrm{train}} > \\mathrm{OpenMax}_{\\mathrm{val}}] = 0)" + }, + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "content": ". Lastly, and in contrast to the other methods, GRAM benefits from using clean validation samples rather than a large number of noisy training samples for computing class-specific bounds of feature correlations " + }, + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "(\\epsilon_{\\mathrm{min}}[\\mathrm{GRAM}_{\\mathrm{val}} > \\mathrm{GRAM}_{\\mathrm{train}}] = 0.23)" + }, + { + "bbox": [ + 46, + 72, + 289, + 156 + ], + "type": "text", + "content": ". However, the performance gap between the two settings is small." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 160, + 289, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 160, + 289, + 258 + ], + "spans": [ + { + "bbox": [ + 46, + 160, + 289, + 258 + ], + "type": "text", + "content": "Takeaway: Clean isn't always better or possible The use of clean vs. noisy labels during label-based parameter tuning is an important consideration. For distance-based methods which compute class-wise statistics, it appears that quantity often trumps quality, even when over " + }, + { + "bbox": [ + 46, + 160, + 289, + 258 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 46, + 160, + 289, + 258 + ], + "type": "text", + "content": " of training labels are incorrect. This is promising for applications where a clean validation set is not available (e.g. medical imaging where labels are inherently subjective [28])." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 278, + 239, + 292 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 278, + 239, + 292 + ], + "spans": [ + { + "bbox": [ + 47, + 278, + 239, + 292 + ], + "type": "text", + "content": "6.5. What about a more realistic setting?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 301, + 289, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 301, + 289, + 625 + ], + "spans": [ + { + "bbox": [ + 46, + 301, + 289, + 625 + ], + "type": "text", + "content": "We have thus far studied OOD detection in a simple (but standard [67]) setting where the base classifier is trained from scratch, and where there is strong semantic and covariate shift between ID and OOD images. Yet in practice, pre-training is widely adopted, and distribution shifts may be much more subtle. We therefore extend our study of label noise to fine-grained semantic shift detection with a base classifier that has been pre-trained on ImageNet [8] before being trained on a dataset of interest. We follow the Semantic Shift Benchmark (SSB), where the goal is to detect unknown classes from a known dataset (e.g. held-out bird species from the CUB [61] dataset or held-out aircraft model variants from FGVC-Aircraft [38]). Using SSB splits, we train ResNet50s (pre-trained) on half of the classes from CUB/FGVC-Aircraft (448x448 images), and we evaluate post-hoc OOD detection performance on known classes from the test set (ID) vs. the remaining unseen classes (OOD) split into 3 increasingly difficult sets. Since clean vs. real noisy label pairs are not available, we inject synthetic label noise in the training set (SU noise model) and follow the same evaluation procedure as in previous sections. Fig. 7 summarizes its detrimental effect on fine-grained semantic shift detection across the 20 studied OOD detection methods: increasing label noise and \"difficulty\" of the OOD set act as orthogonal bottle-necks to detection performance. Increased label noise pulls AUROC" + }, + { + "bbox": [ + 46, + 301, + 289, + 625 + ], + "type": "inline_equation", + "content": "_{\\text{ID vs. OOD}}" + }, + { + "bbox": [ + 46, + 301, + 289, + 625 + ], + "type": "text", + "content": " and AUROC" + }, + { + "bbox": [ + 46, + 301, + 289, + 625 + ], + "type": "inline_equation", + "content": "_{\\text{incorrect vs. OOD}}" + }, + { + "bbox": [ + 46, + 301, + 289, + 625 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 301, + 289, + 625 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 46, + 301, + 289, + 625 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 630, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 288, + 715 + ], + "type": "text", + "content": "Takeaway: Limitations of post-hoc OOD detectors extend beyond toy settings Even in a more realistic setting where the base classifier has first been pre-trained on ImageNet and OOD samples are similar in appearance to the ID dataset, all 20 methods poorly separate incorrectly classified ID samples from OOD samples, and degrade when the classifier has been trained on noisy labels." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 305, + 69, + 547, + 223 + ], + "blocks": [ + { + "bbox": [ + 305, + 69, + 547, + 223 + ], + "lines": [ + { + "bbox": [ + 305, + 69, + 547, + 223 + ], + "spans": [ + { + "bbox": [ + 305, + 69, + 547, + 223 + ], + "type": "image", + "image_path": "890c31feb3aff5fdc415243edfb80ccf250df6d2bdac7ea35fbda2b154a8a0fa.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 229, + 547, + 275 + ], + "lines": [ + { + "bbox": [ + 305, + 229, + 547, + 275 + ], + "spans": [ + { + "bbox": [ + 305, + 229, + 547, + 275 + ], + "type": "text", + "content": "Figure 7. Each boxplot shows the performance distribution across 6 classifiers (3 seeds, 2 checkpoints) " + }, + { + "bbox": [ + 305, + 229, + 547, + 275 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 305, + 229, + 547, + 275 + ], + "type": "text", + "content": " 20 post-hoc methods, considering all ID test samples (top) or only incorrectly classified ones (bottom) in the AUROC metric." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 293, + 388, + 308 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 293, + 388, + 308 + ], + "spans": [ + { + "bbox": [ + 306, + 293, + 388, + 308 + ], + "type": "text", + "content": "7. Zooming out" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 314, + 547, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 314, + 547, + 493 + ], + "spans": [ + { + "bbox": [ + 304, + 314, + 547, + 493 + ], + "type": "text", + "content": "Study limitations and possible extensions We have focused on post-hoc OOD detection methods due to their pragmatic appeal and to maintain experimental feasibility. Extending this study to training-based OOD detection methods [71] would of course be valuable. Aligning with OOD benchmarks [67], we also trained the base classifiers with a standard discriminative objective. Alternative supervision schemes may also be considered, and the effect of pre-training (and on what?) would be interesting to further analyse in a label noise setting, as it been shown to improve post-hoc OOD detection performance [2, 20, 33]. Lastly, the potential of noisy label removal [29, 43] or noise-robust learning [27, 63] techniques from the label noise literature (designed with classification performance in mind) for improving OOD detection would be a natural next step." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 494, + 548, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 494, + 548, + 625 + ], + "spans": [ + { + "bbox": [ + 304, + 494, + 548, + 625 + ], + "type": "text", + "content": "Conclusion We have explored the intersection between classification label noise and OOD detection, and conducted extensive experiments to extract new insights into the limitations of existing post-hoc methods. Our findings also echo the need to re-think the aims and evaluation of OOD detection in the context of safe deployment [26] (e.g. do we really want to exclude ID misclassifications from detection?). We hope that this work paves the way for future investigations which prioritize the robustness and applicability of OOD detection models in practical, imperfect classification scenarios which account for data uncertainty." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 635, + 421, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 635, + 421, + 649 + ], + "spans": [ + { + "bbox": [ + 306, + 635, + 421, + 649 + ], + "type": "text", + "content": "8. Acknowledgements" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 655, + 547, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 655, + 547, + 704 + ], + "spans": [ + { + "bbox": [ + 305, + 655, + 547, + 704 + ], + "type": "text", + "content": "This work was supported by the Danish Data Science Academy, which is funded by the Novo Nordisk Foundation (NNF21SA0069429) and VILLUM FONDEN (40516). Thanks to the Pioneer Centre for AI (DNRF grant P1)." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "22633" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 286, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "type": "text", + "content": "[1] Gorkem Algan and Ilkay Ulusoy. Image classification with deep learning in the presence of noisy labels: A survey. Knowledge-Based Systems, 215:106771, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 286, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 286, + 168 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 286, + 168 + ], + "type": "text", + "content": "[2] Anders Johan Andreassen, Yasaman Bahri, Behnam Neyshabur, and Rebecca Roelofs. The evolution of out-of-distribution robustness throughout fine-tuning. Transactions on Machine Learning Research, 2022. 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 171, + 286, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 171, + 286, + 213 + ], + "spans": [ + { + "bbox": [ + 53, + 171, + 286, + 213 + ], + "type": "text", + "content": "[3] Abhijit Bendale and Terrance E. Boult. Towards open set deep networks. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1563-1572, 2016. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 216, + 286, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 216, + 286, + 248 + ], + "spans": [ + { + "bbox": [ + 53, + 216, + 286, + 248 + ], + "type": "text", + "content": "[4] Julian Bitterwolf, Maximilian Mueller, and Matthias Hein. In or out? fixing imagenet out-of-distribution detection evaluation. In ICML, 2023. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 251, + 286, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 251, + 286, + 293 + ], + "spans": [ + { + "bbox": [ + 53, + 251, + 286, + 293 + ], + "type": "text", + "content": "[5] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 – mining discriminative components with random forests. In European Conference on Computer Vision, 2014. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 296, + 286, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 296, + 286, + 339 + ], + "spans": [ + { + "bbox": [ + 53, + 296, + 286, + 339 + ], + "type": "text", + "content": "[6] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 3606-3613, 2014. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 342, + 286, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 342, + 286, + 385 + ], + "spans": [ + { + "bbox": [ + 53, + 342, + 286, + 385 + ], + "type": "text", + "content": "[7] Eustasio Del Barrio, Juan A Cuesta-Albertos, and Carlos Matrán. An optimal transportation approach for assessing almost stochastic order. In The Mathematics of the Uncertain, pages 33-44. Springer, 2018. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 387, + 286, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 387, + 286, + 430 + ], + "spans": [ + { + "bbox": [ + 53, + 387, + 286, + 430 + ], + "type": "text", + "content": "[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, 2009. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 432, + 286, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 432, + 286, + 464 + ], + "spans": [ + { + "bbox": [ + 53, + 432, + 286, + 464 + ], + "type": "text", + "content": "[9] Li Deng. The mnist database of handwritten digit images for machine learning research [best of the web]. IEEE Signal Processing Magazine, 29(6):141-142, 2012. 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 466, + 286, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 466, + 286, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 466, + 286, + 510 + ], + "type": "text", + "content": "[10] Andrija Djurisic, Nebojsa Bozanic, Arjun Ashok, and Rosanne Liu. Extremely simple activation shaping for out-of-distribution detection. In The Eleventh International Conference on Learning Representations, 2023. 1, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 512, + 286, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 512, + 286, + 577 + ], + "spans": [ + { + "bbox": [ + 48, + 512, + 286, + 577 + ], + "type": "text", + "content": "[11] Rotem Dror, Segev Shlomov, and Roi Reichart. Deep dominance - how to properly compare deep neural models. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28-August 2, 2019, Volume 1: Long Papers, pages 2773- 2785. Association for Computational Linguistics, 2019. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 579, + 286, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 579, + 286, + 611 + ], + "spans": [ + { + "bbox": [ + 48, + 579, + 286, + 611 + ], + "type": "text", + "content": "[12] Stanislav Fort, Jie Ren, and Balaji Lakshminarayanan. Exploring the limits of out-of-distribution detection. In Advances in Neural Information Processing Systems, 2021. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 613, + 286, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 286, + 655 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 286, + 655 + ], + "type": "text", + "content": "[13] Benoit Frenay and Michel Verleysen. Classification in the presence of label noise: A survey. IEEE Transactions on Neural Networks and Learning Systems, 25(5):845-869, 2014. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 658, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 286, + 712 + ], + "type": "text", + "content": "[14] Ido Galil, Mohammed Dabbah, and Ran El-Yaniv. A framework for benchmarking class-out-of-distribution detection and its application toImagenet. In The Eleventh International Conference on Learning Representations, 2023. 1, 2, 3, 5" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[15] Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q. Weinberger. On calibration of modern neural networks. In Proceedings of the 34th International Conference on Machine Learning, pages 1321-1330. PMLR, 2017. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 119, + 545, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 119, + 545, + 151 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 545, + 151 + ], + "type": "text", + "content": "[16] Ali Hassani, Steven Walton, Nikhil Shah, Abulikemu Abuduweili, Jiachen Li, and Humphrey Shi. Escaping the big data paradigm with compact transformers. 2021. 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 153, + 545, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 153, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 307, + 153, + 545, + 195 + ], + "type": "text", + "content": "[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 198, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 198, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 198, + 545, + 251 + ], + "type": "text", + "content": "[18] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. Eurosat: A novel dataset and deep learning benchmark for land use and land cover classification. IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, 2019. 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 255, + 545, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 255, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 255, + 545, + 308 + ], + "type": "text", + "content": "[19] Dan Hendrycks and Kevin Gimpel. A baseline for detecting misclassified and out-of-distribution examples in neural networks. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net, 2017. 1, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 310, + 545, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 310, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 307, + 310, + 545, + 353 + ], + "type": "text", + "content": "[20] Dan Hendrycks, Kimin Lee, and Mantas Mazeika. Using pre-training can improve model robustness and uncertainty. In Proceedings of the 36th International Conference on Machine Learning, pages 2712-2721. PMLR, 2019. 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 355, + 545, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 355, + 545, + 418 + ], + "spans": [ + { + "bbox": [ + 307, + 355, + 545, + 418 + ], + "type": "text", + "content": "[21] Dan Hendrycks, Steven Basart, Mantas Mazeika, Andy Zou, Joseph Kwon, Mohammadreza Mostajabi, Jacob Steinhardt, and Dawn Song. Scaling out-of-distribution detection for real-world settings. In Proceedings of the 39th International Conference on Machine Learning, pages 8759-8773. PMLR, 2022. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 422, + 545, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 545, + 464 + ], + "type": "text", + "content": "[22] Dan Hendrycks, Steven Basart, Mantas Mazeika, Andy Zou, Joe Kwon, Mohammadreza Mostajabi, Jacob Steinhardt, and Dawn Song. Scaling out-of-distribution detection for real-world settings. ICML, 2022. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 468, + 545, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 468, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 307, + 468, + 545, + 521 + ], + "type": "text", + "content": "[23] R. Huang and Y. Li. Mos: Towards scaling out-of-distribution detection for large semantic space. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8706-8715, Los Alamitos, CA, USA, 2021. IEEE Computer Society. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 523, + 545, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 523, + 545, + 566 + ], + "spans": [ + { + "bbox": [ + 307, + 523, + 545, + 566 + ], + "type": "text", + "content": "[24] Rui Huang, Andrew Geng, and Yixuan Li. On the importance of gradients for detecting distributional shifts in the wild. In Advances in Neural Information Processing Systems, 2021. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 568, + 545, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 568, + 545, + 632 + ], + "spans": [ + { + "bbox": [ + 307, + 568, + 545, + 632 + ], + "type": "text", + "content": "[25] Galadrielle Humblot-Renaux, Sergio Escalera, and Thomas B. Moeslund. Beyond auroc & co. for evaluating out-of-distribution detection performance. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 3881-3890, 2023. 4" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 635, + 545, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 545, + 666 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 545, + 666 + ], + "type": "text", + "content": "[26] Paul F Jaeger et al. A call to reflect on evaluation practices for failure detection in image classification. In ICLR, 2023. 8" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "type": "text", + "content": "[27] Lu Jiang, Di Huang, Mason Liu, and Weilong Yang. Beyond synthetic noise: Deep learning on controlled noisy labels. In Proceedings of the 37th International Conference on Machine Learning, pages 4804-4815. PMLR, 2020. 8" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "22634" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[28] Davood Karimi, Haoran Dou, Simon K. Warfield, and Ali Gholipour. Deep learning with noisy labels: Exploring techniques and remedies in medical image analysis. Medical Image Analysis, 65:101759, 2020. 1, 2, 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 117, + 287, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 117, + 287, + 161 + ], + "spans": [ + { + "bbox": [ + 48, + 117, + 287, + 161 + ], + "type": "text", + "content": "[29] Taehyeon Kim, Jongwoo Ko, sangwook Cho, JinHwan Choi, and Se-Young Yun. Fine samples for learning with noisy labels. In Advances in Neural Information Processing Systems, pages 24137–24149. Curran Associates, Inc., 2021. 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 162, + 288, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 162, + 288, + 216 + ], + "spans": [ + { + "bbox": [ + 48, + 162, + 288, + 216 + ], + "type": "text", + "content": "[30] Konstantin Kirchheim, Marco Filax, and Frank Ortmeier. Pytorch-ood: A library for out-of-distribution detection based on pytorch. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 4351-4360, 2022. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 217, + 288, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 217, + 288, + 249 + ], + "spans": [ + { + "bbox": [ + 48, + 217, + 288, + 249 + ], + "type": "text", + "content": "[31] Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. Technical Report 0, University of Toronto, Toronto, Ontario, 2009. 2, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 250, + 288, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 250, + 288, + 303 + ], + "spans": [ + { + "bbox": [ + 48, + 250, + 288, + 303 + ], + "type": "text", + "content": "[32] Kimin Lee, Kibok Lee, Honglak Lee, and Jinwoo Shin. A simple unified framework for detecting out-of-distribution samples and adversarial attacks. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2018. 1, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 304, + 288, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 304, + 288, + 360 + ], + "spans": [ + { + "bbox": [ + 48, + 304, + 288, + 360 + ], + "type": "text", + "content": "[33] Jingyao Li, Pengguang Chen, Zexin He, Shaozuo Yu, Shu Liu, and Jiaya Jia. Rethinking out-of-distribution (ood) detection: Masked image modeling is all you need. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11578-11589, 2023. 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 360, + 288, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 360, + 288, + 425 + ], + "spans": [ + { + "bbox": [ + 48, + 360, + 288, + 425 + ], + "type": "text", + "content": "[34] Mingchen Li, Mahdi Soltanolkotabi, and Samet Oymak. Gradient descent with early stopping is provably robust to label noise for overparameterized neural networks. In Proceedings of the Twenty Third International Conference on Artificial Intelligence and Statistics, pages 4313-4324. PMLR, 2020. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 426, + 288, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 426, + 288, + 469 + ], + "spans": [ + { + "bbox": [ + 48, + 426, + 288, + 469 + ], + "type": "text", + "content": "[35] Shiyu Liang, Yixuan Li, and R. Srikant. Enhancing the reliability of out-of-distribution image detection in neural networks. In International Conference on Learning Representations, 2018. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 470, + 288, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 470, + 288, + 514 + ], + "spans": [ + { + "bbox": [ + 48, + 470, + 288, + 514 + ], + "type": "text", + "content": "[36] Sheng Liu, Jonathan Niles-Weed, Narges Razavian, and Carlos Fernandez-Granda. Early-learning regularization prevents memorization of noisy labels. Advances in Neural Information Processing Systems, 33, 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 514, + 288, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 288, + 557 + ], + "type": "text", + "content": "[37] Weitang Liu, Xiaoyun Wang, John Owens, and Yixuan Li. Energy-based out-of-distribution detection. In Advances in Neural Information Processing Systems, pages 21464-21475. Curran Associates, Inc., 2020. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 558, + 288, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 288, + 591 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 288, + 591 + ], + "type": "text", + "content": "[38] S. Maji, J. Kannala, E. Rahtu, M. Blaschko, and A. Vedaldi. Fine-grained visual classification of aircraft. Technical report, 2013. 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 592, + 288, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 592, + 288, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 592, + 288, + 646 + ], + "type": "text", + "content": "[39] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Bo Wu, and Andrew Y. Ng. Reading Digits in Natural Images with Unsupervised Feature Learning. In NIPS Workshop on Deep Learning and Unsupervised Feature Learning 2011, 2011. 2, 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 647, + 288, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 690 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 690 + ], + "type": "text", + "content": "[40] Amanda Olmin and Fredrik Lindsten. Robustness and reliability when training with noisy labels. In Proceedings of The 25th International Conference on Artificial Intelligence and Statistics, pages 922–942. PMLR, 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 713 + ], + "type": "text", + "content": "[41] Diane Oyen, Michal Kucer, Nick Hengartner, and Har Simrat Singh. Robustness to label noise depends on the shape of" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "the noise distribution. In Advances in Neural Information Processing Systems, 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 96, + 547, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 547, + 151 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 547, + 151 + ], + "type": "text", + "content": "[42] Chao Pan, Bo Yuan, Wei Zhou, and Xin Yao. Towards robust uncertainty estimation in the presence of noisy labels. In Artificial Neural Networks and Machine Learning - ICANN 2022, pages 673-684, Cham, 2022. Springer International Publishing. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 152, + 547, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 547, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 547, + 206 + ], + "type": "text", + "content": "[43] Geoff Pleiss, Tianyi Zhang, Ethan Elenberg, and Kilian Q Weinberger. Identifying mislabeled data using the area under the margin ranking. In Advances in Neural Information Processing Systems, pages 17044-17056. Curran Associates, Inc., 2020. 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 207, + 547, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 207, + 547, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 207, + 547, + 251 + ], + "type": "text", + "content": "[44] Jie Ren, Stanislav Fort, Jeremiah Liu, Abhijit Guha Roy, Shreyas Padhy, and Balaji Lakshminarayanan. A simple fix to mahalanobis distance for improving near-ood detection, 2021. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 252, + 547, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 252, + 547, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 252, + 547, + 285 + ], + "type": "text", + "content": "[45] David Rolnick, Andreas Veit, Serge Belongie, and Nir Shavit. Deep learning is robust to massive label noise, 2018. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 286, + 547, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 286, + 547, + 352 + ], + "spans": [ + { + "bbox": [ + 307, + 286, + 547, + 352 + ], + "type": "text", + "content": "[46] Ragav Sachdeva, Filipe R. Cordeiro, Vasileios Belagiannis, Ian Reid, and Gustavo Carneiro. Evidentialmix: Learning with combined open-set and closed-set noisy labels. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 3607-3615, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 354, + 547, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 547, + 419 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 547, + 419 + ], + "type": "text", + "content": "[47] Mohammadreza Salehi, Hossein Mirzaei, Dan Hendrycks, Yixuan Li, Mohammad Hossein Rohban, and Mohammad Sabokrou. A unified survey on anomaly, novelty, open-set, and out of-distribution detection: Solutions and future challenges. Transactions on Machine Learning Research, 2022. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 420, + 547, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 420, + 547, + 464 + ], + "spans": [ + { + "bbox": [ + 307, + 420, + 547, + 464 + ], + "type": "text", + "content": "[48] Chandramouli Shama Sastry and Sageev Oore. Detecting out-of-distribution examples with Gram matrices. In Proceedings of the 37th International Conference on Machine Learning, pages 8491-8501. PMLR, 2020. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 466, + 547, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 466, + 547, + 532 + ], + "spans": [ + { + "bbox": [ + 307, + 466, + 547, + 532 + ], + "type": "text", + "content": "[49] Vikash Sehwag, Arjun Nitin Bhagoji, Liwei Song, Chawin Sitawarin, Daniel Cullina, Mung Chiang, and Prateek Mittal. Analyzing the robustness of open-world machine learning. In Proceedings of the 12th ACM Workshop on Artificial Intelligence and Security, page 105–116, New York, NY, USA, 2019. Association for Computing Machinery. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 533, + 547, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 533, + 547, + 565 + ], + "spans": [ + { + "bbox": [ + 307, + 533, + 547, + 565 + ], + "type": "text", + "content": "[50] Hwanjun Song, Minseok Kim, and Jae-Gil Lee. SELFIE: Refurbishing unclean samples for robust deep learning. In ICML, 2019. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 567, + 547, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 567, + 547, + 610 + ], + "spans": [ + { + "bbox": [ + 307, + 567, + 547, + 610 + ], + "type": "text", + "content": "[51] Hwanjun Song, Minseok Kim, Dongmin Park, Yooju Shin, and Jae-Gil Lee. Learning from noisy labels with deep neural networks: A survey. IEEE Transactions on Neural Networks and Learning Systems, pages 1-19, 2022. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 611, + 547, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 611, + 547, + 655 + ], + "spans": [ + { + "bbox": [ + 307, + 611, + 547, + 655 + ], + "type": "text", + "content": "[52] Hyun Oh Song, Yu Xiang, Stefanie Jegelka, and Silvio Savarese. Deep metric learning via lifted structured feature embedding. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 4" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 656, + 547, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 656, + 547, + 690 + ], + "spans": [ + { + "bbox": [ + 307, + 656, + 547, + 690 + ], + "type": "text", + "content": "[53] Yue Song, Nicu Sebe, and Wei Wang. Rankfeat: Rank-1 feature removal for out-of-distribution detection. In Advances in Neural Information Processing Systems, 2022. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 691, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 547, + 713 + ], + "type": "text", + "content": "[54] Alexander Sorokin and David Forsyth. Utility data annotation with amazon mechanical turk. In 2008 IEEE Computer" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "22635" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 714 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "text", + "content": "Society Conference on Computer Vision and Pattern Recognition Workshops, pages 1-8, 2008. 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 139 + ], + "type": "text", + "content": "[55] Yiyou Sun and Yixuan Li. Dice: Leveraging sparsification for out-of-distribution detection. In Computer Vision – ECCV 2022, pages 691–708, Cham, 2022. Springer Nature Switzerland. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 184 + ], + "type": "text", + "content": "[56] Yiyou Sun, Chuan Guo, and Yixuan Li. React: Out-of-distribution detection with rectified activations. In Advances in Neural Information Processing Systems, pages 144–157. Curran Associates, Inc., 2021. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 287, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 287, + 230 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 287, + 230 + ], + "type": "text", + "content": "[57] Yiyou Sun, Yifei Ming, Xiaojin Zhu, and Yixuan Li. Out-of-distribution detection with deep nearest neighbors. In Proceedings of the 39th International Conference on Machine Learning, pages 20827-20840. PMLR, 2022. 1, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 232, + 287, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 232, + 287, + 297 + ], + "spans": [ + { + "bbox": [ + 48, + 232, + 287, + 297 + ], + "type": "text", + "content": "[58] Ilya Tolstikhin, Neil Houlsby, Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Thomas Unterthiner, Jessica Yung, Andreas Peter Steiner, Daniel Keysers, Jakob Uszkoreit, Mario Lucic, and Alexey Dosovitskiy. MLP-mixer: An allMLP architecture for vision. In Advances in Neural Information Processing Systems, 2021. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 298, + 287, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 298, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 48, + 298, + 287, + 342 + ], + "type": "text", + "content": "[59] Dennis Ulmer, Christian Hardmeier, and Jes Frellsen. deepsignificance-easy and meaningful statistical significance testing in the age of neural networks. arXiv preprint arXiv:2204.06815, 2022. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 343, + 287, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 343, + 287, + 387 + ], + "spans": [ + { + "bbox": [ + 48, + 343, + 287, + 387 + ], + "type": "text", + "content": "[60] Sagar Vaze, Kai Han, Andrea Vedaldi, and Andrew Zisserman. Open-set recognition: A good closed-set classifier is all you need. In International Conference on Learning Representations, 2022. 1, 2, 3, 5, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 388, + 287, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 287, + 432 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 287, + 432 + ], + "type": "text", + "content": "[61] C. Wah, S. Branson, P. Welinder, P. Perona, and S. Belongie. The caltech-ucsd birds-200-2011 dataset. Technical Report CNS-TR-2011-001, California Institute of Technology, 2011. 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 434, + 287, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 434, + 287, + 488 + ], + "spans": [ + { + "bbox": [ + 48, + 434, + 287, + 488 + ], + "type": "text", + "content": "[62] H. Wang, Z. Li, L. Feng, and W. Zhang. Vim: Out-of-distribution with virtual-logit matching. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4911-4920, Los Alamitos, CA, USA, 2022. IEEE Computer Society. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 490, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 490, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 490, + 287, + 544 + ], + "type": "text", + "content": "[63] Hongxin Wei, Huiping Zhuang, Renchunzi Xie, Lei Feng, Gang Niu, Bo An, and Yixuan Li. Mitigating memorization of noisy labels by clipping the model prediction. In Proceedings of the 40th International Conference on Machine Learning. JMLR.org, 2023. 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 545, + 287, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 545, + 287, + 599 + ], + "spans": [ + { + "bbox": [ + 48, + 545, + 287, + 599 + ], + "type": "text", + "content": "[64] Jiaheng Wei, Zhaowei Zhu, Hao Cheng, Tongliang Liu, Gang Niu, and Yang Liu. Learning with noisy labels revisited: A study using real-world human annotations. In International Conference on Learning Representations, 2022. 2, 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 601, + 287, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 287, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 287, + 645 + ], + "type": "text", + "content": "[65] Tong Xiao, Tian Xia, Yi Yang, Chang Huang, and Xiaogang Wang. Learning from massive noisy labeled data for image classification. In 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2691-2699, 2015. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 647, + 287, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 679 + ], + "type": "text", + "content": "[66] Jingkang Yang, Kaiyang Zhou, Yixuan Li, and Ziwei Liu. Generalized out-of-distribution detection: A survey. arXiv preprint arXiv:2110.11334, 2021. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 681, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 287, + 714 + ], + "type": "text", + "content": "[67] Jingkang Yang, Pengyun Wang, Dejian Zou, Zitang Zhou, Kunyuan Ding, WENXUAN PENG, Haoqi Wang, Guangyao Chen, Bo Li, Yiyou Sun, Xuefeng Du, Kaiyang" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 350 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 127 + ], + "type": "text", + "content": "Zhou, Wayne Zhang, Dan Hendrycks, Yixuan Li, and Zwei Liu. OpenOOD: Benchmarking generalized out-of-distribution detection. In Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2022. 1, 2, 3, 4, 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 129, + 545, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 172 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 172 + ], + "type": "text", + "content": "[68] Xiaoyong Yuan, Pan He, Qile Zhu, and Xiaolin Li. Adversarial examples: Attacks and defenses for deep learning. IEEE Transactions on Neural Networks and Learning Systems, 30 (9):2805-2824, 2019. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "type": "text", + "content": "[69] Chiyuan Zhang, Samy Bengio, Moritz Hardt, Benjamin Recht, and Oriol Vinyals. Understanding deep learning requires rethinking generalization. In International Conference on Learning Representations, 2017. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 219, + 545, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 219, + 545, + 283 + ], + "spans": [ + { + "bbox": [ + 307, + 219, + 545, + 283 + ], + "type": "text", + "content": "[70] Jinsong Zhang, Qiang Fu, Xu Chen, Lun Du, Zelin Li, Gang Wang, xiaoguang Liu, Shi Han, and Dongmei Zhang. Out-of-distribution detection based on in-distribution data patterns memorization with modern hopfield energy. In The Eleventh International Conference on Learning Representations, 2023. 1, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 285, + 545, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 285, + 545, + 350 + ], + "spans": [ + { + "bbox": [ + 307, + 285, + 545, + 350 + ], + "type": "text", + "content": "[71] Jingyang Zhang, Jingkang Yang, Pengyun Wang, Haoqi Wang, Yueqian Lin, Haoran Zhang, Yiyou Sun, Xuefeng Du, Kaiyang Zhou, Wayne Zhang, Yixuan Li, Ziwei Liu, Yiran Chen, and Hai Li. Openood v1.5: Enhanced benchmark for out-of-distribution detection. arXiv preprint arXiv:2306.09301, 2023.8" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "22636" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/85746221-1e2b-4579-be8b-1626ff544e58_content_list.json b/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/85746221-1e2b-4579-be8b-1626ff544e58_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..b2df4da8c18a09932a3ab6f7596f2663059507b7 --- /dev/null +++ b/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/85746221-1e2b-4579-be8b-1626ff544e58_content_list.json @@ -0,0 +1,1957 @@ +[ + { + "type": "text", + "text": "A Pedestrian is Worth One Prompt: Towards Language Guidance Person Re-Identification", + "text_level": 1, + "bbox": [ + 114, + 128, + 854, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zexian Yang $^{1,2}$ Dayan Wu $^{1*}$ Chenming Wu $^{3}$ Zheng Lin $^{1}$ Jingzi Gu $^{1}$ Weiping Wang $^{1,2}$", + "bbox": [ + 101, + 202, + 867, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Institute of Information Engineering, Chinese Academy of Sciences", + "bbox": [ + 214, + 220, + 756, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ School of Cyber Security, University of Chinese Academy of Sciences $^{3}$ Baidu Inc", + "bbox": [ + 151, + 238, + 818, + 256 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{yangzexian,wudayan,linzheng,gujingzi,wangweiping}@iie.ac.cn,wuchenming@baidu.com", + "bbox": [ + 127, + 258, + 841, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 308, + 313, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Extensive advancements have been made in person ReID through the mining of semantic information. Nevertheless, existing methods that utilize semantic-parts from a single image modality do not explicitly achieve this goal. Whiteness the impressive capabilities in multimodal understanding of Vision Language Foundation Model CLIP, a recent two-stage CLIP-based method employs automated prompt engineering to obtain specific textual labels for classifying pedestrians. However, we note that the predefined soft prompts may be inadequate in expressing the entire visual context and struggle to generalize to unseen classes. This paper presents an end-to-end Prompt-driven Semantic Guidance (PromptSG) framework that harnesses the rich semantics inherent in CLIP. Specifically, we guide the model to attend to regions that are semantically faithful to the prompt. To provide personalized language descriptions for specific individuals, we propose learning pseudo tokens that represent specific visual contexts. This design not only facilitates learning fine-grained attribute information but also can inherently leverage language prompts during inference. Without requiring additional labeling efforts, our PromptSG achieves state-of-the-art by over $10\\%$ on MSMT17 and nearly $5\\%$ on the Market-1501 benchmark. The codes will be available at https://github.com/Yzxian16/PromptSG", + "bbox": [ + 75, + 340, + 473, + 720 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 746, + 209, + 762 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Person Re-Identification (ReID) is a crucial research area in computer vision that focuses on identifying individuals across different camera views or time instances [4, 44, 45, 57], which is a sub-task of image-based retrieval. Features of the same individual, as captured by various cameras, are prone to alterations due to changes in lighting, background, and body posture. Consequently, the effectiveness of a so", + "bbox": [ + 75, + 771, + 468, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1a1c3dd471251170d4f54f010e19bd75ee21d5479288340c29efd66f8c5de453.jpg", + "image_caption": [ + "(a) (b)", + "(c) (d)", + "Include background \"car\"" + ], + "image_footnote": [], + "bbox": [ + 506, + 309, + 633, + 359 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/96297b5662bd50f968dae48a98e7311bdac0c39e8977ba83975fdb8a0132a8a8.jpg", + "image_caption": [ + "(a) (b) (c) (d)", + "Include two pedestrians" + ], + "image_footnote": [], + "bbox": [ + 635, + 311, + 756, + 359 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6ea4db5cdc1b1af9491cbaa55e9a9c46194374daa6a1b31b3ab6b52377e241ff.jpg", + "image_caption": [ + "(a) (b) (c) (d)", + "More detailed attire" + ], + "image_footnote": [], + "bbox": [ + 759, + 311, + 882, + 359 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a948c436517257f617dc23830059e944b466eb12000858482aa2823d99bbc15b.jpg", + "image_caption": [ + "Figure 1. Transformer visualization [2] of attention maps. (a) Original images, (b) CLIP-ReID, (c) Our method w/o inversion, and (d) Our method guided by the composed prompts captures both the exact semantic parts and the external appearance details.", + "Figure 2. The core idea of our method. Our method inverts input images into pseudo-word tokens $S_{*}$ , which are then composed into a textual prompt to describe the specific visual context. The attention map of patch tokens is further controlled by the semantics of the textual prompt." + ], + "image_footnote": [], + "bbox": [ + 504, + 470, + 887, + 621 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "phisticated ReID model fundamentally depends on its capability to learn discriminative features that are impervious to camera-specific variations, thereby enhancing the model's capacity to generalize to previously unseen classes.", + "bbox": [ + 496, + 733, + 890, + 794 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Modern ReID models, constructed upon uni-modal architectures such as the Convolutional Neural Network (CNN) [22] or Vision Transformer (ViT) [14, 19, 35, 40], have made significant advancements within the field. A substantial portion of these solutions focus on the extraction of pertinent regions to rectify misalignment issues. These strategies are dedicated to the extraction of semantic", + "bbox": [ + 496, + 795, + 892, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author.", + "bbox": [ + 94, + 886, + 220, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "17343", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "data, such as the human body structure, primarily facilitated through the integration of identity classification [33, 43] and metric learning [24, 55]. However, it is worth noting that these attention regions generally highlight only specific locally discriminative parts without explicit semantic control. When a distinct mask or skeleton direction is necessitated [27, 31], the need for additional, labor-intensive, and time-consuming manual labeling becomes inevitable.", + "bbox": [ + 75, + 90, + 470, + 212 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Large-scale Vision Language (VL) models, exemplified by Contrastive Language-Image Pre-Training (CLIP) [26], have recently shown remarkable abilities in reasoning across multi-modal data. CLIP model, when provided with text prompts such as 'A photo of a [CLASS]' displays exceptional zero-shot classification performance at the image level. This leads to a question: Can we further direct attention to regions of interest through natural language descriptions, such as 'A photo of a person'? However, due to the resulting visual representation lacking fine-grained information necessary for distinguishing between identities, integrating CLIP straightforwardly into person ReID is non-trivial. Additionally, the query 'A photo of a person' presents a challenge due to the absence of specific descriptors, thereby lacking a personalized prompt for individual identification. The pioneering CLIP-ReID [21] introduces automated prompt engineering on CLIP by incorporating additional ID-wise learnable vectors customized for specific identities. Particularly, CLIP-ReID employs a two-stage training process that first optimizes the learnable vectors with the frozen CLIP model, and then restricts the image encoder with the learned textual descriptions. However, the disentangled usage, i.e., only the visual embedding is utilized during inference, renders the learned soft prompts ineffective for unseen prompts. As a result, the attention regions potentially do not entirely encompass the body part, and may inadvertently include background elements, such as cars and additional pedestrians captured in the scene, as illustrated in the first two examples in Fig. 1(b). In addition, even though CLIP-ReID adheres to training objectives aimed at vision-language alignment, such predefined soft prompts may not be sufficient to characterize the entire visual context of the specified pedestrian.", + "bbox": [ + 75, + 215, + 472, + 715 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose Prompt-driven Semantic Guidance (PromptSG), that aims to streamline the two-stage pipeline by leveraging the foundational CLIP model effectively and efficiently. As outlined in Fig. 2, our core insight is straightforward: we strive to activate CLIP's cross-modal comprehension using explicit language prompts, and the regions extracted can then be fine-tuned to enhance semantic discriminativeness. Specifically, given a textual prompt, we refine the patch tokens by injecting cross-attention maps, determining which patch attends to the corresponding semantics. Following this rationale, we revisit the fundamental issue that the term 'person' serves as a", + "bbox": [ + 75, + 719, + 470, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "coarse descriptor, lacking personalized language descriptions for individual identities. Beyond semantic information related to the 'person', appearance information is also crucial for identification purposes [5]. While semantic information aids the model in better body part localization, appearance information further refines the focus on an individual's attire. Hence, we employ the textual inversion technique [10], which learns to represent visual context through unique token. We use a lightweight inversion network that maps the image to a pseudo-token. This pseudo-token can then be incorporated into the textual prompt, creating an embedding that closely mirrors the original image. Compared to CLIP-ReID, our solution offers two primary advantages: 1) The textual prompt emphasizes regions in the image via a cross-attention map, capturing the precise semantic part (Fig. 1(c)), and can also be utilized for unseen classes during inference. 2) The model can learn the personal token of the query image in an end-to-end manner, providing more detailed guidance specific to an identity (Fig. 1(d)). Importantly, our proposed method is free, i.e. there is no need to supply additional information, such as masks, bounding boxes, or precise descriptions. We summarize the contribution of this paper as follows.", + "bbox": [ + 496, + 90, + 893, + 438 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Leveraging the exceptional multi-modal reasoning capabilities of CLIP, we propose PromptSG, a novel framework for the person ReID task. This approach uniquely utilizes language prompts, providing explicit assistance to the visual encoder in efficiently capturing semantic information.", + "- To create a more personalized description for the individual, we propose learning to represent the specific, more detailed appearance attributes, by employing the inversion network.", + "- Without any additional labelling efforts, PromptSG surpasses previous SOTA method [21] by over $10\\%$ on the MSMT17 dataset. It also exhibits superior performance on the Market-1501 benchmark, surpassing previous SOTA method [46] by nearly $5\\%$ ." + ], + "bbox": [ + 500, + 439, + 890, + 664 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 678, + 640, + 694 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Person Re-identification remains an important yet challenging task due to the subtle inter-class differences. To learn more discriminative representations, a category of CNN-based techniques has primarily concentrated on optimizing the distance metric via metric learning [15, 33, 34, 37, 38]. Recognizing the importance of semantic information, a substantial body of research [3, 23, 31, 43] explores the use of attention mechanisms, which guide the network to extract attention-aware features for body parts. For example, AAnet [36] adopts a unified learning framework that incorporates attribute attention maps through extra attribute labels. Pioneering work TransReID [14] introduces a self-attention-based architecture, Vision Transformer (ViT) [8],", + "bbox": [ + 496, + 704, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "17344", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4c93416d54c5bb2b08a721e9e8ab3ec52a5887b08c7fa360e0d66e84179597f3.jpg", + "image_caption": [ + "Figure 3. Overview of our framework. PromptSG learns pseudo token $S_{*}$ from the specific visual embedding, and the visual encoder learns semantic faithful representations with the guidance of language prompts that occur in the Multimodal Interaction Module." + ], + "image_footnote": [], + "bbox": [ + 84, + 92, + 602, + 377 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/0b046aeeb20e6eebeda8b6ac236d946962bd55fb3fd6170dad0c0c9b4bfc0b05.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 612, + 92, + 883, + 377 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "for advancing ReID tasks. DCAL [56] proposes to implicitly extract the local features through a global-local cross-attention mechanism. However, these methods solely apply attention mechanisms to the visual modality, and the lack of explicit language guidance potentially constrains their performance. The work most relevant to ours, CLIP-ReID [21], is the first to utilize vision-language pre-training model CLIP in ReID task. However, CLIP-ReID fails to leverage the linguistic capability of the text encoder in CLIP during inference, since the ID-specific learnable tokens only influence the seen identities.", + "bbox": [ + 75, + 446, + 468, + 611 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Large-scale vision-language pre-training model connects the image representation with text embedding in a shared embedding space, has demonstrated effectiveness across a wide range of uni-modal and multimodal downstream tasks. These include classification [6, 48], image captioning [25], and cross-modal retrieval [11, 16, 32, 42, 49]. Foundational VL models, such as CLIP, usually undergo training on extensive image-text pairs with contrastive learning objectives. This foundational pre-training provides the model with strong open-vocabulary classification capabilities. Inherited from prompt learning in NLP [18], CoOp [54] proposes to explore learnable prompt optimization on few-shot classification. Following this soft prompt approach, CLIP-ReID pioneers the adaptation of CLIP for person ReID by classifying images into ID-specific prompts. Differing from CLIP-ReID, which focuses on vision-language alignment, our goal is to exploit rich semantic information from language to explicitly control the weights assigned to each patch or region, and im", + "bbox": [ + 75, + 613, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "prove the two-stage framework by directly inverting images into the language latent space.", + "bbox": [ + 498, + 446, + 890, + 477 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Textual Inversion, originally for personalized text-to-image generation [10], is a learning approach that aims to discover new pseudo-words in the word-embedding space. These pseudo-words are capable of encapsulating both the overall visual content and intricate visual details. Recently, the application of textual inversion has expanded to zero-shot composed image retrieval task [1, 29]. In these studies, a textual inversion network is typically pre-trained using extensive unlabeled image datasets. In this work, we stand out as the first to apply this learning paradigm to person ReID without any additional training data.", + "bbox": [ + 496, + 478, + 892, + 645 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Preliminary", + "text_level": 1, + "bbox": [ + 500, + 661, + 627, + 679 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Contrastive Language-Image Pre-training (CLIP) undergoes pre-training on a large corpus of image-text pairs, aligning visual and linguistic representations within a shared space through the matching of images with their corresponding text descriptions. Specifically, CLIP consists of a visual encoder $\\mathcal{V}(\\cdot)$ and a text encoder $\\mathcal{T}(\\cdot)$ . The visual encoder $\\mathcal{V}(\\cdot)$ takes an image $\\pmb{x} \\in \\mathbb{R}^{H \\times W \\times C}$ as input. The text encoder $\\mathcal{T}(\\cdot)$ takes a tokenized textual description $t \\in \\mathbb{R}^{N \\times D}$ as input, where $N, D$ are the text's length and token feature dimension respectively. The pre-training objective is based on self-supervised contrastive learning, which minimizes cosine distance for matched image-text pairs. For the downstream tasks such as classification, the description of $j$ -th class is typically obtained through the", + "bbox": [ + 496, + 688, + 892, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "17345", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "hand-crafted prompt, e.g., 'A photo of a [CLASS]'. Therefore, the probability of image $x$ being classified as class $y$ can be computed as follows:", + "bbox": [ + 75, + 90, + 468, + 135 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {P} (y | \\boldsymbol {x}) = \\frac {\\exp (\\operatorname {s i m} (\\mathcal {V} (\\boldsymbol {x}) , \\mathcal {T} (\\boldsymbol {t} _ {\\boldsymbol {y}})) / \\tau)}{\\sum_ {j = 1} ^ {K} \\exp (\\operatorname {s i m} (\\mathcal {V} (\\boldsymbol {x}), \\mathcal {T} (\\boldsymbol {t} _ {\\boldsymbol {j}})) / \\tau)}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 124, + 157, + 468, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\tau$ denotes the temperature, and $\\mathrm{sim}(a,b) = \\frac{a\\cdot b}{\\|a\\|_2\\|b\\|_2}$ is the cosine similarity.", + "bbox": [ + 75, + 207, + 467, + 239 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A simple approach to applying CLIP to person ReID involves substituting the linear classifier with image-to-text classification. However, given that labels in ReID tasks are solely index-based, there are no specific words to represent different persons. To tackle this challenge, CLIP-ReID crafts the prompt as 'A photo of a $[X_i]_1[X_i]_2[X_i]_3\\ldots[X_i]_M$ person', where $[X_i]_m, m \\in \\{1,\\dots,M\\}$ represents a set of ID-specific learnable tokens for the $i$ -th ID. Nevertheless, CLIP-ReID optimizes ID-specific prompts exclusively bound to training IDs, it overlooks the chance to fully exploit the open-vocabulary capabilities inherent in CLIP.", + "bbox": [ + 75, + 241, + 468, + 406 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Method", + "text_level": 1, + "bbox": [ + 76, + 419, + 168, + 434 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "An overview of our framework is depicted in Fig. 3. Starting with the visual embeddings derived from CLIP's visual encoder, our approach employs an inversion network to learn pseudo tokens that encapsulate the visual context. Following this, an interaction between visual and textual modalities is facilitated in the interaction module, leading to the final re-weighted representations. During the inference phase, we are presented with two options for textual inputs: an efficiency-driven simplified prompt and an accuracy-driven composed prompt. Note that the text encoder is frozen in our entire framework.", + "bbox": [ + 75, + 445, + 468, + 609 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Learning the Personalized ID-Specific Prompt", + "text_level": 1, + "bbox": [ + 76, + 619, + 467, + 637 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As suggested by prior research, the word-embedding space possesses sufficient expressiveness to encapsulate basic image concepts [7]. However, the inherent limitation lies in the pre-defined prompts in CLIP-ReID, which can only capture limited attributes and may not fully encapsulate the visual context. Contrarily, we propose learning the pseudo token by textual inversion technique that aligns with the context of the query image.", + "bbox": [ + 75, + 643, + 468, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Let $f_{\\theta}(\\cdot)$ denote an inversion network parameterized by $\\theta$ , our goal is to invert the global visual embedding $\\mathbf{v}$ from visual space of CLIP, represented as $\\mathbf{v} \\in V$ , into a pseudo token $s_* \\in T_*$ by $f_{\\theta}(\\mathbf{v}) = s_*$ , where $T_*$ indicates the token embedding space. Subsequently, this pseudo token can be integrated into natural language sentences. As such, the language prompt for the input image is structured as 'A photo of a $s_*$ person'. It is worth noting that this pseudo-token bears no relationship to an actual word but functions as a", + "bbox": [ + 75, + 763, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "representation in the token embedding space. An input language prompt undergoes a tokenization process, resulting in several tokens. The tokenized prompt, denoted as $t_p$ , can be fed into the text encoder of CLIP to obtain text embedding $l_p = \\mathcal{T}(t_p)$ . To ensure that the learned pseudo-token effectively tells the context of the image, one can follow to the reconstruction objective of textual inversion by the symmetric contrastive loss, which is formulated as follows:", + "bbox": [ + 498, + 90, + 890, + 210 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {i 2 t} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\log \\frac {\\exp (\\sin (\\boldsymbol {v} _ {n} , \\boldsymbol {l} _ {p}) / \\tau)}{\\sum_ {i = 1} ^ {N} \\exp (\\sin (\\boldsymbol {v} _ {n} , \\boldsymbol {l} _ {i}) / \\tau)}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 542, + 236, + 890, + 277 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {t 2 i} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\log \\frac {\\exp \\left(\\sin \\left(\\boldsymbol {l} _ {n} , \\boldsymbol {v} _ {p}\\right) / \\tau\\right)}{\\sum_ {i = 1} ^ {N} \\exp \\left(\\sin \\left(\\boldsymbol {l} _ {n} , \\boldsymbol {v} _ {i}\\right) / \\tau\\right)}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 542, + 289, + 890, + 330 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this context, $v_{i}$ or $l_{i}$ represents the $i$ -th image/text embedding in a batch. $l_{p}$ is the corresponding prompt embedding for $v_{n}$ and is constructed in a manner analogous to $v_{p}$ .", + "bbox": [ + 498, + 335, + 890, + 380 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The underlying mechanism is grounded in the principle of cycle-consistency, wherein a pseudo token tends to faithfully represent the context of the image only when the text features closely align with corresponding image features. However, the contrastive loss fails to handle cases where images with the same ID are supposed to share the same appearance. Therefore, we aim to encourage the pseudo token to capture visual details exclusive to the same identity. To this end, we exploit the symmetric supervised contrastive loss as follows:", + "bbox": [ + 498, + 381, + 890, + 531 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\operatorname {S u p C o n}} = \\mathcal {L} _ {i 2 t} ^ {\\operatorname {S u p}} + \\mathcal {L} _ {t 2 i} ^ {\\operatorname {S u p}}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 614, + 542, + 890, + 561 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {i 2 t} ^ {\\operatorname {S u p}} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sum_ {p ^ {+} \\in P (i)} \\log \\frac {\\exp \\left(\\operatorname {s i m} \\left(\\boldsymbol {v} _ {n} , \\boldsymbol {l} _ {\\boldsymbol {p} ^ {+}}\\right) / \\tau\\right)}{\\sum_ {i = 1} ^ {N} \\exp \\left(\\operatorname {s i m} \\left(\\boldsymbol {v} _ {n} , \\boldsymbol {l} _ {i}\\right) / \\tau\\right)}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 573, + 890, + 630 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {t 2 i} ^ {\\operatorname {S u p}} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sum_ {p ^ {+} \\in P (i)} \\log \\frac {\\exp \\left(\\sin \\left(l _ {n} , v _ {p ^ {+}}\\right) / \\tau\\right)}{\\sum_ {i = 1} ^ {N} \\exp \\left(\\sin \\left(l _ {n} , v _ {i}\\right) / \\tau\\right)}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 516, + 630, + 890, + 686 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $P(i)$ represents the positive samples related to $v_{n},l_{n}$", + "bbox": [ + 500, + 686, + 890, + 702 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Prompt-driven Semantic Guidance", + "text_level": 1, + "bbox": [ + 500, + 710, + 803, + 727 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We refine the language prompt by incorporating the pseudo-token that is linked to the identity, enhancing its ability to convey a more specific visual context for the image. Our commitment extends to meticulously directing the image feature through language. At the core of our approach lies the idea of semantic guidance, wherein we explicitly determine which region of the image aligns with the language prompt. Intuitively, image patches corresponding to the semantic \"person\" should inherently have substantial influence to facilitate discrimination. As opposed to the interaction between patches in self-attention layers within a", + "bbox": [ + 498, + 734, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "17346", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "single modality. Based on this observation, we explore a patch-to-prompt interaction that occurs in multi-modality.", + "bbox": [ + 75, + 90, + 468, + 119 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In particular, we employ a language-guided cross-attention module, which uses the textual embedding as query and the patch-wise embedding of the visual encoder as key and value. More formally, given a pair of image and prompt $(\\pmb{x},\\pmb{t_p})$ , we first feed the image $\\pmb{x}$ into the visual encoder, yielding in a sequence of patch embeddings $\\{\\tilde{\\pmb{v}},\\pmb{v}_1,\\dots,\\pmb{v}_M\\}$ . Here, $\\tilde{\\pmb{v}}$ denotes the global visual embedding, while remaining $\\pmb{v}_i,i\\in [1,M]$ belong to the local patch embeddings. In a similar vein, the prompt is fed into the text encoder to derive the text embedding $l_{p}$ . Subsequently, the text embedding is projected onto a query matrix $Q$ and patch embeddings are projected to a key matrix $K$ and a value matrix $V$ , via three different linear-projection layers. As such, the patch-to-prompt interaction can be achieved by:", + "bbox": [ + 75, + 122, + 470, + 348 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {A} (Q, K, V) = \\operatorname {S o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {d}}\\right) V. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 359, + 468, + 393 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This interaction aggregates the attention map to highlight the regions of high semantic response. Drawing from multimodal fusion methods [9], we incorporate two transformer blocks following the cross-attention layer to derive final representations. Ultimately, we utilize the standard ReID loss, i.e., the triplet loss and identity classification loss [12], to optimize our framework.", + "bbox": [ + 75, + 400, + 468, + 505 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {I D}} = \\frac {1}{K} \\sum_ {j = 1} ^ {K} y _ {j} \\log p _ {j}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 518, + 468, + 559 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {T r i p l e t}} = \\max \\left(d _ {p} - d _ {n} + m, 0\\right), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 574, + 468, + 590 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $p_j$ is the prediction probability for the $j$ -th class, and $m$ denotes the margin.", + "bbox": [ + 76, + 598, + 468, + 628 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Optimization and Inference", + "text_level": 1, + "bbox": [ + 76, + 638, + 326, + 654 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Taining optimization. In summary, the overall objective function for our framework is formulated as:", + "bbox": [ + 76, + 661, + 468, + 691 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {\\text {T r i p l e t}} + \\mathcal {L} _ {\\mathrm {I D}} + \\lambda \\mathcal {L} _ {\\operatorname {S u p C o n}}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 705, + 468, + 722 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Similar to CLIP-ReID, the final hidden states of the vision transformer, in conjunction the preceding two layer states, are also employed to calculate $\\mathcal{L}_{\\mathrm{Triplet}}$ .", + "bbox": [ + 76, + 734, + 468, + 779 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Improved inference efficiency. Our approach involves the query-specific pseudo token with the textual prompt, which essentially doubles the inference time compared to using only the visual encoder. Fortunately, our empirical findings suggest that providing only 'A photo of a person' as a simplified guideline yields comparable results. In this way, there will be no increase in the inference time caused by the text encoder.", + "bbox": [ + 75, + 780, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/67df69e3285ba264454f25082c666fcfb9e5986246658eb5b1c4d9421764efa7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Dataset#IDImagesCams
Market-15011,50132,6686
MSMT174,101126,44115
DukeMTMC1,40436,4118
CHUK03-NP1,46713,1642
", + "bbox": [ + 547, + 88, + 846, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1. The statistics of dataset in our experiments", + "bbox": [ + 540, + 178, + 849, + 191 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 500, + 215, + 632, + 233 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Experimental Setting", + "text_level": 1, + "bbox": [ + 500, + 241, + 700, + 258 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets and Evaluation Protocols. To evaluate and compare various methods, four extensive person re-identification datasets Market-1501 [51], MSMT17 [41], DukeMTMC [52] and CUHK03-NP [22] are exploited. Dataset stats are in Tab. 1. In line with conventions in the ReID community [13], two commonly used metrics, i.e., mean Average Precision (mAP) and Rank-1(R-1) accuracy, are used to evaluate the performance.", + "bbox": [ + 496, + 265, + 890, + 385 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation Details. In alignment with prior research, we employ both ResNet-50 and ViT-B/16 Pre-trained from CLIP as our visual encoder and a pre-trained text encoder, i.e., CLIP text Transformer. Our framework additionally features a random-initialized inversion network and a multimodal interaction module. The inversion network is a lightweight model employing a three-layered MLP of 512-dimensional hidden state. A Batch Normalization (BN) layer [17] is placed after the last state of the network. The batch size is configured to 64, encompassing 16 identities with 4 images per identity. All input images are resized to $256 \\times 128$ . We use the Adam optimizer with a learning rate of 5e-6 for the visual encoder, whereas the learning rate for random-initialized modules is set to 5e-5. We find $\\lambda$ in Eq. (10) is not sensitive and performs well across a broad range, thus we consistently set $\\lambda = 0.5$ for all datasets. The model is trained for 60 epochs, with a learning rate decay factor of 0.1 for every 20 epochs. The entire framework is implemented using PyTorch and runs on a single NVIDIA RTX3090 GPU with 24GB VRAM.", + "bbox": [ + 496, + 385, + 892, + 686 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baseline. Most existing approaches are built upon the strong ReID baseline presented in [24]. Specifically, they employ an ImageNet-21k pre-trained CNN model or ViT as the backbone and incorporate ID loss and triplet loss as crucial components. In contrast, our baseline model deviates by leveraging the pre-trained CLIP model and we fine-tune the visual encoder of CLIP by directly applying the two commonly-used losses.", + "bbox": [ + 496, + 686, + 890, + 808 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.2. Comparison with State-of-the-art Methods", + "text_level": 1, + "bbox": [ + 500, + 816, + 864, + 832 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We benchmark PromptSG against the current state-of-the-art, which can generally be divided into three categories: CNN-based, ViT-based, and CLIP-based methods. Tab. 2 summarizes the main results on four widely", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "17347", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/89422447a31d7aa309b229924688c5908f6cdead4892286221a0e5fe05c280d2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BackboneMethodReferenceMarket-1501MSMT17DukeMTMCCUHK03-NP
mAPR-1mAPR-1mAPR-1mAPR-1
ResNet50CNN-based method
OSNeT [53]ICCV'1984.994.852.978.773.588.6--
ISP [57]ECCV'2084.994.2--75.686.974.176.5
RGA-SC [50]CVPR'2088.496.157.580.3--77.481.1
CDNet [20]CVPR'2186.095.154.778.976.888.6--
CAL [28]ICCV'2187.094.556.279.576.487.6--
ALDER* [47]TIP'2188.995.659.182.578.989.978.781.0
LTRID* [39]TMM'2286.994.758.681.080.490.580.382.1
CLIP-based method
Baseline88.194.760.782.179.388.677.679.1
CLIP-ReID [21]AAAI'2389.895.763.084.480.790.078.279.4
PromptSGOurs91.896.668.586.080.490.279.880.5
ViT-B/16ViT-based method
TransReID [14]ICCV'2188.995.267.485.382.090.779.681.7
DCAL [56]CVPR'2287.594.764.083.180.189.0--
AAformer [58]TNNLS'2388.095.465.684.480.990.179.080.3
PHA [46]CVPR'2390.296.168.986.1--83.084.5
CLIP-based method
Baseline86.493.366.184.480.088.880.080.5
CLIP-ReID [21]AAAI'2389.695.573.488.782.590.081.680.9
PromptSGOurs94.697.087.292.681.691.083.185.1
", + "bbox": [ + 120, + 88, + 849, + 439 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Comparison with the state-of-the-art models on Market-1501, MSMT17, DukeMTMC, and CUHK03-NP (labeled) datasets. The superscript star* indicates that the image is resized to a resolution exceeding 256x128. All results are reported without re-ranking. Color Red and blue: the best and second-best results.", + "bbox": [ + 75, + 450, + 890, + 489 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "used person ReID datasets. We observe that our proposed PromptSG attains the best results and sets a new state-of-the-art performance. Remarkably, PromptSG achieves over $10\\%$ improvement on MSMT17 and nearly $5\\%$ on Market-1501, surpassing previous state-of-the-art results.", + "bbox": [ + 76, + 518, + 468, + 594 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Compared with ViT-based method. Pioneering work TransReID [14] sets a strong baseline for the ViT-based method by leveraging the potentials of the transformer. Building upon this groundwork, PHA [46] further enhances the preservation of key high-frequency elements in images. In contrast to existing ViT-based methods that only capture the patch-wise uni-modal information, our PromptSG method demonstrates that the interaction of different modalities can improve the performance of individual modalities.", + "bbox": [ + 75, + 595, + 468, + 731 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Compared with CLIP-based method. Compared with the competing CLIP-based method CLIP-ReID, our PromptSG outperforms it by $5.0\\% / 1.5\\%$ and $13.8\\% / 3.9\\%$ mAP/Rank-1 on Market-1501 and MSMT17 datasets when taking ViT-B/16 as visual backbone. A key distinction between CLIP-ReID and our approach resides in the composition of the query-specific pseudo-token. Our results further underscore that incorporating textual information during the inference process can also enhance performance.", + "bbox": [ + 75, + 733, + 468, + 868 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Compared with CNN-based method. To ensure a fair comparison, we also implement PromptSG with a ResNet-", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "50 backbone. Apart from LTReID [39] that utilize higher resolution images, our method consistently surpasses other methods by a significant margin, especially on the most challenging person ReID dataset, MSMT17. This highlights the robustness and superiority of our approach across various architectures.", + "bbox": [ + 498, + 518, + 890, + 609 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 619, + 653, + 636 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the following, we conduct an ablation study on the essential elements of PromptSG on Market-1501 and MSMT17 datasets, and all the experiments are conducted on the ViT-B/16 backbone.", + "bbox": [ + 498, + 643, + 890, + 702 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Contributions from Different Components. To assess the contribution of various components, we conduct ablation experiments by removing one component at a time. Recall that $\\mathcal{L}_{i2t}^{\\mathrm{sup}}$ and $\\mathcal{L}_{t2i}^{\\mathrm{sup}}$ are the supervised contrastive losses in Eq. (2), Eq. (3) respectively, and MIM denotes the multimodal interaction module. Comparing rows b) and c) with a), we see a similar conclusion where the removal of text-to-image or image-to-text contrastive loss leads to a decent improvement on both datasets. Further comparing rows a) and d), we observe that the removal of semantic information leads to a larger decrease than solely removing ID-specific appearance information. Notably, as seen in row a), our full model, PromptSG, utilizes both semantic and appearance", + "bbox": [ + 496, + 704, + 892, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "17348", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/5af67ad35b61efc3c384b8df449fe4181f598d45286086d6840a070308d0360e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ComponentsMarket-1501MSMT17
\\( {\\mathcal{L}}_{i2t}^{\\text{Sup }} \\)\\( {\\mathcal{L}}_{t2i}^{\\text{Sup }} \\)MIMmAPR-1mAPR-1
a)94.697.087.292.6
b)92.896.785.291.9
c)93.096.784.590.2
d)89.495.371.487.3
", + "bbox": [ + 78, + 88, + 475, + 200 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/03d70c9b9299e0d8f31a81729ab3617851f0a45af4faa333e32af6c63161a871.jpg", + "table_caption": [ + "Table 3. Ablation study on the effectiveness of each component of PromptSG on Market-1501 and MSMT17." + ], + "table_footnote": [], + "table_body": "
MethodMarket-1501MSMT17
mAPR-1mAPR-1
Training w/ composed94.697.087.292.6
Training w/o composed92.096.385.391.6
", + "bbox": [ + 78, + 253, + 467, + 325 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "language supervision during training, achieving a substantial improvement of over a point. The overall conclusion supports that language guidance, through both semantic and appearance cues, plays a crucial role in improving the performance of our model.", + "bbox": [ + 75, + 386, + 467, + 460 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation Study on the Personalized Prompt. To better understand whether the learned pseudo tokens $s_*$ can provide more granular guidance for learning visual embeddings, we train a strong baseline model, where the textual prompt dose not composed with the $s_*$ during training and testing, but instead relies on the simplified prompt \"A photo of a person\" for semantic guidance. Note that we will not use the symmetric supervised contrastive loss in this case. Results in Tab. 4 imply that composing the $s_*$ has a significant impact on the overall performance. When $s_*$ is removed from the training process, the performance decreases by $1.9\\%$ to $2.6\\%$ in terms of mAP. Although we focus on the uni-modal re-identification task, the above formulation could potentially be applied to multimodal test sets, such as text-to-image person retrieval by composing the image feature with the text to achieve better alignment.", + "bbox": [ + 75, + 462, + 467, + 703 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation Study on the Interaction Module. We analyze the impact of different designs of the interaction module on performance and inference speed, as well as the impact of not using a composed prompt during inference. Notably, personalized prompts are consistently included during the training. As shown in Tab. 5, without an attention module (w/o attention module), the model achieves a baseline performance, with inference speed being dependent solely on the visual encoder. Introducing a single cross-attention layer (+1 cross-layer) shows a notable performance improvement, indicating the positive effect of incorporating a cross-layer design. Notably, performances can be stably improved with more self-attention layers, but at the cost of", + "bbox": [ + 75, + 704, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/a12857893a0353a243f8ffc9ed502e1120a32394fca1979cac526e1b159b5539.jpg", + "table_caption": [ + "Table 4. Ablation of training with or without composing the pseudo token on Market-1501 and MSMT17." + ], + "table_footnote": [], + "table_body": "
Traning ModelInference Model
Visual EncoderText EncoderFPS ↑mAP
w/o attention module-1x89.4
+1 cross-layer-0.95x91.1
+1 cross & 1 self-layer-0.91x93.0
+1 cross & 2 self-layer0.48x94.6
+1 cross & 2 self-layer-0.88x94.1
", + "bbox": [ + 504, + 88, + 890, + 212 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/28955852bb7feab265158e6d10dfc0d782433025a22e3cb170dd14f530b858e4.jpg", + "table_caption": [ + "Table 5. The impact of various interaction modules and efficiency comparison with different inference models on Market-1501. Cross and self means cross-attention and self-attention, respectively. FPS denotes the quantity of images processed by the model in one second." + ], + "table_footnote": [], + "table_body": "
Method#Params#Params %CLIPTraining Times ↓
(a) Market-1501
CLIP-ReID89M0.714689s1x
PromptSG94M0.752417s0.51x
(b) MSMT17
CLIP-ReID90M0.7312904s1x
PromptSG94M0.756108s0.47x
", + "bbox": [ + 501, + 309, + 883, + 439 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 6. Comparison of training times and the number of parameters on Market-1501 and MSMT17. #Params denotes the number of learnable parameters in the whole framework. All models are evaluated on a single 3090Ti GPU.", + "bbox": [ + 498, + 450, + 890, + 506 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "lower inference efficiency. Furthermore, our analysis illuminates the impact of employing a composed prompt during the inference phase, revealing that when we follow the same procedure as the training stage—composing text with query images—the Frames Per Second (FPS) is only 0.48 times that of the baseline. This is expected as we need to pass through two encoders for each query. However, we empirically discovered that using a fixed prompt “A photo of a person” for all queries may not lead to significant performance degradation, and it does not compromise efficiency. Therefore, one could opt for this version to achieve a more favorable balance between accuracy and efficiency.", + "bbox": [ + 496, + 536, + 890, + 717 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparison of training efficiency. In order to showcase the efficiency of our proposed approach, we carry out a comparative analysis between our one-stage PromptSG and the two-stage CLIP-ReID method, focusing on the number of learnable parameters and training speed. The details of this comparison are provided in Tab. 6. In terms of training parameters, on top of CLIP, CLIP-ReID incorporates an additional of parameters mainly through the ID-wise learnable prompt, our approach primarily extends through a fixed-size mapping network and an interaction module. Despite CLIP-ReID having $2\\% -4\\%$ fewer parameters than ours on two datasets, it may experience continuous growth in parame", + "bbox": [ + 496, + 719, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "17349", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Market-1501", + "text_level": 1, + "bbox": [ + 230, + 101, + 336, + 118 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/feca2dc7394929335e72ea92959f9ce850f61b8a39a62e2475cdcea0fff5b8cd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 96, + 127, + 138, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1cd4adff6d5382cab00a2019c83ca0381f6d7f07b528314fbc9725150a8801c6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 142, + 127, + 183, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/00c244b701a218a38b63a558615d783f1c8834016a2b33238e2554cc96b33df2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 186, + 127, + 228, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2cb5339f172ef720b9556a30778611c5d73a787469af19402ffdfec410bc4be5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 233, + 127, + 274, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c1334c074ae58cb3d4f0cc4fb760727a4bb18b9883f7286098067448a3f9a072.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 289, + 127, + 331, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a1007a0500191772cdc74e6db5416de2aec4cdaf5c675ff8248d8471392e8820.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 127, + 375, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6b1c7045ebecf179f63f880992e152f5aff049bd3216d784c756a6265e317a4a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 380, + 127, + 421, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f2de007f30ae743ab55260cb03dbd29ceabcbb65d63844c3213a1b54db402ec5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 424, + 127, + 467, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0d2acbcffc7f6fad9bffbce05c82cd0b7095771f741c5c970303f9fd8ede82c4.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 96, + 210, + 138, + 276 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/79c51dc5b2ca4d88a713c96a5adc07ae3e2505ddef88e0214ae449712bdb0f4b.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 142, + 210, + 183, + 276 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7409d2d9755a5aaf979d31dff90ab6e7870feca7c7b9782f6f2a7d24b8ab91b5.jpg", + "image_caption": [ + "Figure 4. Transformer visualization of attention maps. (a) Original images, (b) CLIP-ReID, (c) PromptSG without composed training, (d) PromptSG. We see our method is effective in simultaneously focusing on the semantic clues and exploring more discriminative parts." + ], + "image_footnote": [], + "bbox": [ + 187, + 210, + 228, + 276 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(c)", + "bbox": [ + 199, + 279, + 217, + 291 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5db50ad898b6c4f9a76836da6e0597005229978be9362ec8d10ab51a1c2a8324.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 233, + 210, + 274, + 276 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(c)", + "bbox": [ + 199, + 279, + 217, + 291 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8e76daac518e394b4049590603cd54d5e0038848903f0ed80b2a86a7bd8c17b6.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 290, + 210, + 331, + 276 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/40d336de2572c1136ac1c1e4021094010dcfa5070d595e8bbfb285f1018626ed.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 334, + 210, + 375, + 276 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/882132aed170e1a8c2ffc1d5ef388ab2aaf0af67b6b5953bd19f84af6f3b5908.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 379, + 210, + 419, + 276 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/72bc7d80a9782084e02a24ac60b25dfcfe1e372f77d648997a935d6c30620b95.jpg", + "image_caption": [ + "(d)" + ], + "image_footnote": [], + "bbox": [ + 424, + 210, + 467, + 276 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "MSMT17", + "text_level": 1, + "bbox": [ + 651, + 102, + 732, + 117 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9a3ffad7a1f1767d079dd6293716427669b57664fbd58d0cc223d4816d1ec51f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 125, + 544, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5b7a72291d087492b182ce21b78e143313ed41ee68e2fb29ea811ff44d365fc7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 547, + 126, + 589, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/781fc5e97c4d16243dabe1e8961d94aabcad921c3aa5fa3e738101af85dc77d5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 594, + 127, + 637, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/901b3be8690998e5138d0d00fe5718c681e46f648e4f6979bc84ab6a3e9437e3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 127, + 681, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d0bc48db2e60abe1af9033cdbf65c8b06fc165961652f9859decfe9794fe4c39.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 697, + 127, + 738, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0b1f9b73a77e337d1d4ad083ad965ed33e5e682571a5ee903931a284dc5d94cf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 743, + 127, + 785, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a489345cf73a8435aaf5c1773f5eea7db8e0e3f558fa76b229df34cfa89f29e4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 789, + 127, + 828, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/39844a844c9c01b88108038a341d54bdeabb74547db4f37fd77e338c32019399.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 831, + 127, + 874, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/71171cc05606846363db5be664d3593ff0bbc08712c50c91460d1c0278f69b4f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 210, + 544, + 276 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e8e313c3d97f5e3293d5b9500cf41f28414b4f138014dc5ad8cb557ddc6024a7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 547, + 210, + 589, + 276 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e8bee1ef3891a564e342469ae4ccc45311f5172652038e98eb2a4a638b70edd2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 210, + 635, + 276 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fdbf44fa851a6a7f8ee2411afef4382e461f8255244c71a96cfe241cce4c0e91.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 210, + 681, + 275 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0d10007a16851c423225786130ee858c79ba58e88b23a9305803f0d0e0cbef42.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 697, + 210, + 738, + 275 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/821091c3aaa44faa9fe08b873a766e510c897d256ad5acfb439186ed4f77e69e.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 741, + 210, + 784, + 275 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/06b51ec49d039aeb748d51c61d492bf580c90d40ec911b11a222dcd313abbf32.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 787, + 210, + 828, + 275 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d5fbe033f289713efc8f4787c789748bb48aea4c4d056234d0e7ab9efa1d048b.jpg", + "image_caption": [ + "(d)" + ], + "image_footnote": [], + "bbox": [ + 831, + 210, + 874, + 275 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ters in scenarios with a higher number of classes or evolving dynamics. In contrast, PromptSG demonstrates stronger robustness in the number of parameters and achieves significant faster training speed. It can achieve a speedup of approximately 2 times faster during training compared to the two-stage method.", + "bbox": [ + 75, + 375, + 467, + 465 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Qualitative Analysis", + "text_level": 1, + "bbox": [ + 76, + 481, + 269, + 498 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To have an intuitive understanding and validate the effectiveness of our method, we conduct a qualitative analysis where visualization of attention maps is presented in Fig. 4. Specifically, we exhibit examples from Market-1501 and MSMT17 datasets, each with two training images and two gallery images. We carefully selected some challenging examples, including those with complex backgrounds or images depicting multiple individuals. To gain a better insight into the regions of interest attended by the model in zero-shot scenarios, we do not use the common protocol GramCAM [30], as it needs the class-prediction scores and might be considered less suitable for Transformer-type backbones. Following [21], we use the Transformer-interpretability method in [2].", + "bbox": [ + 75, + 507, + 467, + 718 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We compare our (d) PromptSG with (b) CLIP-ReID and (c) PromptSG without image-composed training. It can be seen that our method exhibits significant effectiveness, as it adeptly captures semantic information while also concentrating on more detailed appearance details. For example, in the first row of the Market-1501 dataset, the attention map of CLIP-ReID is susceptible to interference from background elements like \"car\". On the other hand, PromptSG w/o composed training tends to emphasize semantic information related to the 'person,' focusing on the location of the head, arms, and legs. In contrast, our method goes beyond this by also exploring appearance features, such as", + "bbox": [ + 75, + 719, + 467, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "identifying individuals wearing hats or carrying backpacks. Finally, in the first row examples of MSMT17, where additional pedestrians appear in the image, our method excels in effectively filtering out unnecessary pedestrians, while CLIP-ReID fails.", + "bbox": [ + 498, + 375, + 890, + 449 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 465, + 617, + 481 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose PromptSG, a simple yet effective framework that exploits the foundational model CLIP for the person ReID task. We show that language guidance is an effective way to adapt pre-trained multimodal models for the uni-modal retrieval tasks. Through leveraging the aligned multi-modal latent space provided by CLIP, the textual prompt \"A photo of a person\" can naturally address the challenge of the visual encoder in its struggle to capture semantic information. To probe more fine-grained appearance features, we incorporate an inversion network to learn pseudo tokens that describe the image context.", + "bbox": [ + 496, + 491, + 890, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Discussion and Limitation. Despite the considerable potential of language prompt learning in ReID tasks, prompt learning in the vision branch remains a largely untapped area. Fine-tuning the visual encoder for strong supervised performance may lead to poor zero-shot generalization. We hope our work can inspire future research on fully unleashing the potential of large foundation models in challenging ReID tasks.", + "bbox": [ + 496, + 657, + 890, + 779 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments This work was supported by the National Key R&D Program of China under Grant 2022YFB3103500, the National Natural Science Foundation of China under Grants 62106258, 62006242 and 62202459, and the China Postdoctoral Science Foundation under Grant 2022M713348 and 2022TQ0363, and Young Elite Scientists Sponsorship Program by CAST (2023QNRC001) and BAST (NO.BYESS2023304).", + "bbox": [ + 496, + 780, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "17350", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Alberto Baldrati, Lorenzo Agnolucci, Marco Bertini, and Alberto Del Bimbo. Zero-shot composed image retrieval with textual inversion. In Proceedings of the IEEE international conference on computer vision (ICCV), 2023. 3", + "[2] Hila Chefer, Shir Gur, and Lior Wolf. Transformer interpretability beyond attention visualization. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2021. 1, 8", + "[3] Binghui Chen, Weihong Deng, and Jiani Hu. Mixed high-order attention network for person re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2019. 2", + "[4] Tianlong Chen, Shaojin Ding, Jingyi Xie, Ye Yuan, Wuyang Chen, Yang Yang, Zhou Ren, and Zhangyang Wang. Abdnet: Attentive but diverse person re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2019. 1", + "[5] Weihua Chen, Xianzhe Xu, Jian Jia, Hao Luo, Yaohua Wang, Fan Wang, Rong Jin, and Xiuyu Sun. Beyond appearance: a semantic controllable self-supervised learning framework for human-centric visual tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2", + "[6] Xiaohua Chen, Yucan Zhou, Dayan Wu, Chule Yang, Bo Li, Qinghua Hu, and Weiping Wang. Area: adaptive reweighting via effective area for long-tailed classification. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2023. 3", + "[7] Niv Cohen, Rinon Gal, Eli A Meirom, Gal Chechik, and Yuval Atzmon. \"this is my unicorn, fluffy\": Personalizing frozen vision-language representations. In Proceedings of the European conference on computer vision (ECCV), 2022. 4", + "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 2", + "[9] Zi-Yi Dou, Yichong Xu, Zhe Gan, Jianfeng Wang, Shuohang Wang, Lijuan Wang, Chenguang Zhu, Pengchuan Zhang, Lu Yuan, Nanyun Peng, et al. An empirical study of training end-to-end vision-and-language transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 5", + "[10] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit Haim Bermano, Gal Chechik, and Daniel Cohen-or. An image is worth one word: Personalizing text-to-image generation using textual inversion. In International Conference on Learning Representations (ICLR), 2022. 2, 3", + "[11] Xiaoshuai Hao, Wanqian Zhang, Dayan Wu, Fei Zhu, and Bo Li. Dual alignment unsupervised domain adaptation for video-text retrieval. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2023. 3" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[12] Lingxiao He, Xingyu Liao, Wu Liu, Xinchen Liu, Peng Cheng, and Tao Mei. Fastreid: A pytorch toolbox for general instance re-identification. arXiv preprint arXiv:2006.02631, 2020.5", + "[13] Lingxiao He, Xingyu Liao, Wu Liu, Xinchen Liu, Peng Cheng, and Tao Mei. Fastreid: A pytorch toolbox for general instance re-identification. In Proceedings of the 31st ACM International Conference on Multimedia (ACM MM), 2023. 5", + "[14] Shuting He, Hao Luo, Pichao Wang, Fan Wang, Hao Li, and Wei Jiang. Transreid: Transformer-based object re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2021. 1, 2, 6", + "[15] Pingting Hong, Dayan Wu, Bo Li, and Weiping Wang. Camera-specific informative data augmentation module for unbalanced person re-identification. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), 2022. 2", + "[16] Siteng Huang, Biao Gong, Yulin Pan, Jianwen Jiang, Yiliang Lv, Yuyuan Li, and Donglin Wang. Vop: Text-video co-operative prompt tuning for cross-modal retrieval. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3", + "[17] Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning (ICML), 2015. 5", + "[18] Zhengbao Jiang, Frank F Xu, Jun Araki, and Graham Neubig. How can we know what language models know? Transactions of the Association for Computational Linguistics (ACL), 2020. 3", + "[19] Dengjie Li, Siyu Chen, Yujie Zhong, Fan Liang, and Lin Ma. Dip: Learning discriminative implicit parts for person re-identification. arXiv preprint arXiv:2212.13906, 2022. 1", + "[20] Hanjun Li, Gaojie Wu, and Wei-Shi Zheng. Combined depth space based architecture search for person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2021. 6", + "[21] Siyuan Li, Li Sun, and Qingli Li. Clip-reid: exploiting vision-language model for image re-identification without concrete text labels. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2023. 2, 3, 6, 8", + "[22] Wei Li, Rui Zhao, Tong Xiao, and Xiaogang Wang. Deepreid: Deep filter pairing neural network for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2014. 1, 5", + "[23] Wei Li, Xiatian Zhu, and Shaogang Gong. Harmonious attention network for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2018. 2", + "[24] Hao Luo, Youzhi Gu, Xingyu Liao, Shenqi Lai, and Wei Jiang. Bag of tricks and a strong baseline for deep person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops (CVPRW), 2019. 2, 5" + ], + "bbox": [ + 501, + 92, + 893, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "17351", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[25] Ron Mokady, Amir Hertz, and Amit H Bermano. Clipcap: Clip prefix for image captioning. arXiv preprint arXiv:2111.09734, 2021. 3", + "[26] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning (ICML), 2021. 2", + "[27] Haocong Rao and Chunyan Miao. Transg: Transformer-based skeleton graph prototype contrastive learning with structure-trajectory prompted reconstruction for person re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2", + "[28] Yongming Rao, Guangyi Chen, Jiwen Lu, and Jie Zhou. Counterfactual attention learning for fine-grained visual categorization and re-identification. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 6", + "[29] Kuniaki Saito, Kihyuk Sohn, Xiang Zhang, Chun-Liang Li, Chen-Yu Lee, Kate Saenko, and Tomas Pfister. Pic2word: Mapping pictures to words for zero-shot composed image retrieval. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3", + "[30] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision (ICCV), 2017. 8", + "[31] Chunfeng Song, Yan Huang, Wanli Ouyang, and Liang Wang. Mask-guided contrastive attention model for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2018. 2", + "[32] Qinghang Su, Dayan Wu, Chenming Wu, Bo Li, and Weiping Wang. From data to optimization: Data-free deep incremental hashing with data disambiguation and adaptive proxies. IEEE Transactions on Circuits and Systems for Video Technology (TCSVT), 2024. 3", + "[33] Yifan Sun, Liang Zheng, Yi Yang, Qi Tian, and Shengjin Wang. Beyond part models: Person retrieval with refined part pooling (and a strong convolutional baseline). In Proceedings of the European conference on computer vision (ECCV), 2018. 2", + "[34] Yifan Sun, Changmao Cheng, Yuhan Zhang, Chi Zhang, Liang Zheng, Zhongdao Wang, and Yichen Wei. Circle loss: A unified perspective of pair similarity optimization. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2020. 2", + "[35] Lei Tan, Pingyang Dai, Rongrong Ji, and Yongjian Wu. Dynamic prototype mask for occluded person re-identification. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), 2022. 1", + "[36] Chiat-Pin Tay, Sharmili Roy, and Kim-Hui Yap. Aanet: Attribute attention network for person re-identifications. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2019. 2" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[37] Guanshuo Wang, Yufeng Yuan, Xiong Chen, Jiwei Li, and Xi Zhou. Learning discriminative features with multiple granularities for person re-identification. In Proceedings of the 26th ACM international conference on Multimedia (ACM MM), 2018. 2", + "[38] Lin Wang, Wanqian Zhang, Dayan Wu, Fei Zhu, and Bo Li. Attack is the best defense: Towards preemptive-protection person re-identification. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), 2022. 2", + "[39] Pingyu Wang, Zhicheng Zhao, Fei Su, and Honying Meng. Ltreid: Factorizable feature generation with independent components for long-tailed person re-identification. IEEE Transactions on Multimedia (TMM), 2022. 6", + "[40] Tao Wang, Hong Liu, Pinhao Song, Tianyu Guo, and Wei Shi. Pose-guided feature disentangling for occluded person re-identification based on transformer. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2022. 1", + "[41] Longhui Wei, Shiliang Zhang, Wen Gao, and Qi Tian. Person transfer gan to bridge domain gap for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2018. 5", + "[42] Dayan Wu, Qi Dai, Jing Liu, Bo Li, and Weiping Wang. Deep incremental hashing network for efficient image retrieval. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2019. 3", + "[43] Wenjie Yang, Houjing Huang, Zhang Zhang, Xiaotang Chen, Kaiqi Huang, and Shu Zhang. Towards rich feature discovery with class activation maps augmentation for person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2019. 2", + "[44] Zexian Yang, Dayan Wu, Wanqian Zhang, Bo Li, and Weiping Wang. Handling label uncertainty for camera incremental person re-identification. In Proceedings of the 31st ACM International Conference on Multimedia (ACM MM), 2023. 1", + "[45] Mang Ye, Jianbing Shen, Gaojie Lin, Tao Xiang, Ling Shao, and Steven CH Hoi. Deep learning for person re-identification: A survey and outlook. IEEE transactions on pattern analysis and machine intelligence (TPAMI), 2021. 1", + "[46] Guiwei Zhang, Yongfei Zhang, Tianyu Zhang, Bo Li, and Shiliang Pu. Pha: Patch-wise high-frequency augmentation for transformer-based person re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 6", + "[47] Quan Zhang, Jianhuang Lai, Zhanxiang Feng, and Xiaohua Xie. Seeing like a human: Asynchronous learning with dynamic progressive refinement for person re-identification. IEEE Transactions on Image Processing (TIP), 2021. 6", + "[48] Renrui Zhang, Wei Zhang, Rongyao Fang, Peng Gao, Kunchang Li, Jifeng Dai, Yu Qiao, and Hongsheng Li. Tip-adapter: Training-free adaption of clip for few-shot classification. In European Conference on Computer Vision (ECCV), 2022. 3", + "[49] Wanqian Zhang, Dayan Wu, Yu Zhou, Bo Li, Weiping Wang, and Dan Meng. Binary neural network hashing for image retrieval. In Proceedings of the 44th international ACM SIGIR" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "17352", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "conference on research and development in information retrieval (SIGIR)l, 2021.3", + "[50] Zhizheng Zhang, Cuiling Lan, Wenjun Zeng, Xin Jin, and Zhibo Chen. Relation-aware global attention for person re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6", + "[51] Liang Zheng, Liyue Shen, Lu Tian, Shengjin Wang, Jingdong Wang, and Qi Tian. Scalable person re-identification: A benchmark. In Proceedings of the IEEE international conference on computer vision (ICCV), 2015. 5", + "[52] Zhedong Zheng, Liang Zheng, and Yi Yang. Unlabeled samples generated by gan improve the person re-identification baseline in vitro. In Proceedings of the IEEE international conference on computer vision (ICCV), 2017. 5", + "[53] Kaiyang Zhou, Yongxin Yang, Andrea Cavallaro, and Tao Xiang. Omni-scale feature learning for person re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2019. 6", + "[54] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision (IJCV), 2022. 3", + "[55] Xiao Zhou, Yujie Zhong, Zhen Cheng, Fan Liang, and Lin Ma. Adaptive sparse pairwise loss for object re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2", + "[56] Haowei Zhu, Wenjing Ke, Dong Li, Ji Liu, Lu Tian, and Yi Shan. Dual cross-attention learning for fine-grained visual categorization and object re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3, 6", + "[57] Kuan Zhu, Haiyun Guo, Zhiwei Liu, Ming Tang, and Jinqiao Wang. Identity-guided human semantic parsing for person re-identification. In Proceedings of the European conference on computer vision (ECCV), 2020. 1, 6", + "[58] Kuan Zhu, Haiyun Guo, Shiliang Zhang, Yaowei Wang, Jing Liu, Jinqiao Wang, and Ming Tang. Aaformer: Auto-aligned transformer for person re-identification. IEEE Transactions on Neural Networks and Learning Systems (TNNLS), 2023. 6" + ], + "bbox": [ + 78, + 92, + 468, + 667 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "17353", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/85746221-1e2b-4579-be8b-1626ff544e58_model.json b/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/85746221-1e2b-4579-be8b-1626ff544e58_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c1476c55261346b52b61d383167357dbc368c8b2 --- /dev/null +++ b/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/85746221-1e2b-4579-be8b-1626ff544e58_model.json @@ -0,0 +1,2686 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.044 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.13, + 0.856, + 0.175 + ], + "angle": 0, + "content": "A Pedestrian is Worth One Prompt: Towards Language Guidance Person Re-Identification" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.203, + 0.868, + 0.222 + ], + "angle": 0, + "content": "Zexian Yang\\(^{1,2}\\) Dayan Wu\\(^{1*}\\) Chenming Wu\\(^{3}\\) Zheng Lin\\(^{1}\\) Jingzi Gu\\(^{1}\\) Weiping Wang\\(^{1,2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.222, + 0.757, + 0.239 + ], + "angle": 0, + "content": "\\(^{1}\\)Institute of Information Engineering, Chinese Academy of Sciences" + }, + { + "type": "text", + "bbox": [ + 0.153, + 0.239, + 0.819, + 0.257 + ], + "angle": 0, + "content": "\\(^{2}\\)School of Cyber Security, University of Chinese Academy of Sciences \\(^{3}\\) Baidu Inc" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.26, + 0.842, + 0.275 + ], + "angle": 0, + "content": "{yangzexian,wudayan,linzheng,gujingzi,wangweiping}@iie.ac.cn,wuchenming@baidu.com" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.309, + 0.314, + 0.327 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.342, + 0.474, + 0.721 + ], + "angle": 0, + "content": "Extensive advancements have been made in person ReID through the mining of semantic information. Nevertheless, existing methods that utilize semantic-parts from a single image modality do not explicitly achieve this goal. Whiteness the impressive capabilities in multimodal understanding of Vision Language Foundation Model CLIP, a recent two-stage CLIP-based method employs automated prompt engineering to obtain specific textual labels for classifying pedestrians. However, we note that the predefined soft prompts may be inadequate in expressing the entire visual context and struggle to generalize to unseen classes. This paper presents an end-to-end Prompt-driven Semantic Guidance (PromptSG) framework that harnesses the rich semantics inherent in CLIP. Specifically, we guide the model to attend to regions that are semantically faithful to the prompt. To provide personalized language descriptions for specific individuals, we propose learning pseudo tokens that represent specific visual contexts. This design not only facilitates learning fine-grained attribute information but also can inherently leverage language prompts during inference. Without requiring additional labeling efforts, our PromptSG achieves state-of-the-art by over \\(10\\%\\) on MSMT17 and nearly \\(5\\%\\) on the Market-1501 benchmark. The codes will be available at https://github.com/Yzxian16/PromptSG" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.747, + 0.21, + 0.763 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.772, + 0.47, + 0.879 + ], + "angle": 0, + "content": "Person Re-Identification (ReID) is a crucial research area in computer vision that focuses on identifying individuals across different camera views or time instances [4, 44, 45, 57], which is a sub-task of image-based retrieval. Features of the same individual, as captured by various cameras, are prone to alterations due to changes in lighting, background, and body posture. Consequently, the effectiveness of a so" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.31, + 0.634, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.518, + 0.361, + 0.56, + 0.371 + ], + "angle": 0, + "content": "(a) (b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.563, + 0.361, + 0.626, + 0.372 + ], + "angle": 0, + "content": "(c) (d)" + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.312, + 0.757, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.64, + 0.361, + 0.75, + 0.371 + ], + "angle": 0, + "content": "(a) (b) (c) (d)" + }, + { + "type": "image", + "bbox": [ + 0.76, + 0.312, + 0.883, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.767, + 0.361, + 0.88, + 0.372 + ], + "angle": 0, + "content": "(a) (b) (c) (d)" + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.378, + 0.626, + 0.389 + ], + "angle": 0, + "content": "Include background \"car\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.636, + 0.378, + 0.744, + 0.389 + ], + "angle": 0, + "content": "Include two pedestrians" + }, + { + "type": "image_caption", + "bbox": [ + 0.791, + 0.378, + 0.883, + 0.389 + ], + "angle": 0, + "content": "More detailed attire" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.403, + 0.892, + 0.46 + ], + "angle": 0, + "content": "Figure 1. Transformer visualization [2] of attention maps. (a) Original images, (b) CLIP-ReID, (c) Our method w/o inversion, and (d) Our method guided by the composed prompts captures both the exact semantic parts and the external appearance details." + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.471, + 0.888, + 0.622 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.637, + 0.892, + 0.707 + ], + "angle": 0, + "content": "Figure 2. The core idea of our method. Our method inverts input images into pseudo-word tokens \\( S_{*} \\), which are then composed into a textual prompt to describe the specific visual context. The attention map of patch tokens is further controlled by the semantics of the textual prompt." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.734, + 0.892, + 0.795 + ], + "angle": 0, + "content": "phisticated ReID model fundamentally depends on its capability to learn discriminative features that are impervious to camera-specific variations, thereby enhancing the model's capacity to generalize to previously unseen classes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Modern ReID models, constructed upon uni-modal architectures such as the Convolutional Neural Network (CNN) [22] or Vision Transformer (ViT) [14, 19, 35, 40], have made significant advancements within the field. A substantial portion of these solutions focus on the extraction of pertinent regions to rectify misalignment issues. These strategies are dedicated to the extraction of semantic" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.222, + 0.9 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17343" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.213 + ], + "angle": 0, + "content": "data, such as the human body structure, primarily facilitated through the integration of identity classification [33, 43] and metric learning [24, 55]. However, it is worth noting that these attention regions generally highlight only specific locally discriminative parts without explicit semantic control. When a distinct mask or skeleton direction is necessitated [27, 31], the need for additional, labor-intensive, and time-consuming manual labeling becomes inevitable." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.217, + 0.473, + 0.716 + ], + "angle": 0, + "content": "Large-scale Vision Language (VL) models, exemplified by Contrastive Language-Image Pre-Training (CLIP) [26], have recently shown remarkable abilities in reasoning across multi-modal data. CLIP model, when provided with text prompts such as 'A photo of a [CLASS]' displays exceptional zero-shot classification performance at the image level. This leads to a question: Can we further direct attention to regions of interest through natural language descriptions, such as 'A photo of a person'? However, due to the resulting visual representation lacking fine-grained information necessary for distinguishing between identities, integrating CLIP straightforwardly into person ReID is non-trivial. Additionally, the query 'A photo of a person' presents a challenge due to the absence of specific descriptors, thereby lacking a personalized prompt for individual identification. The pioneering CLIP-ReID [21] introduces automated prompt engineering on CLIP by incorporating additional ID-wise learnable vectors customized for specific identities. Particularly, CLIP-ReID employs a two-stage training process that first optimizes the learnable vectors with the frozen CLIP model, and then restricts the image encoder with the learned textual descriptions. However, the disentangled usage, i.e., only the visual embedding is utilized during inference, renders the learned soft prompts ineffective for unseen prompts. As a result, the attention regions potentially do not entirely encompass the body part, and may inadvertently include background elements, such as cars and additional pedestrians captured in the scene, as illustrated in the first two examples in Fig. 1(b). In addition, even though CLIP-ReID adheres to training objectives aimed at vision-language alignment, such predefined soft prompts may not be sufficient to characterize the entire visual context of the specified pedestrian." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.472, + 0.902 + ], + "angle": 0, + "content": "In this paper, we propose Prompt-driven Semantic Guidance (PromptSG), that aims to streamline the two-stage pipeline by leveraging the foundational CLIP model effectively and efficiently. As outlined in Fig. 2, our core insight is straightforward: we strive to activate CLIP's cross-modal comprehension using explicit language prompts, and the regions extracted can then be fine-tuned to enhance semantic discriminativeness. Specifically, given a textual prompt, we refine the patch tokens by injecting cross-attention maps, determining which patch attends to the corresponding semantics. Following this rationale, we revisit the fundamental issue that the term 'person' serves as a" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.439 + ], + "angle": 0, + "content": "coarse descriptor, lacking personalized language descriptions for individual identities. Beyond semantic information related to the 'person', appearance information is also crucial for identification purposes [5]. While semantic information aids the model in better body part localization, appearance information further refines the focus on an individual's attire. Hence, we employ the textual inversion technique [10], which learns to represent visual context through unique token. We use a lightweight inversion network that maps the image to a pseudo-token. This pseudo-token can then be incorporated into the textual prompt, creating an embedding that closely mirrors the original image. Compared to CLIP-ReID, our solution offers two primary advantages: 1) The textual prompt emphasizes regions in the image via a cross-attention map, capturing the precise semantic part (Fig. 1(c)), and can also be utilized for unseen classes during inference. 2) The model can learn the personal token of the query image in an end-to-end manner, providing more detailed guidance specific to an identity (Fig. 1(d)). Importantly, our proposed method is free, i.e. there is no need to supply additional information, such as masks, bounding boxes, or precise descriptions. We summarize the contribution of this paper as follows." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.44, + 0.892, + 0.528 + ], + "angle": 0, + "content": "- Leveraging the exceptional multi-modal reasoning capabilities of CLIP, we propose PromptSG, a novel framework for the person ReID task. This approach uniquely utilizes language prompts, providing explicit assistance to the visual encoder in efficiently capturing semantic information." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.53, + 0.892, + 0.588 + ], + "angle": 0, + "content": "- To create a more personalized description for the individual, we propose learning to represent the specific, more detailed appearance attributes, by employing the inversion network." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.59, + 0.892, + 0.665 + ], + "angle": 0, + "content": "- Without any additional labelling efforts, PromptSG surpasses previous SOTA method [21] by over \\(10\\%\\) on the MSMT17 dataset. It also exhibits superior performance on the Market-1501 benchmark, surpassing previous SOTA method [46] by nearly \\(5\\%\\)." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.44, + 0.892, + 0.665 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.679, + 0.642, + 0.695 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Person Re-identification remains an important yet challenging task due to the subtle inter-class differences. To learn more discriminative representations, a category of CNN-based techniques has primarily concentrated on optimizing the distance metric via metric learning [15, 33, 34, 37, 38]. Recognizing the importance of semantic information, a substantial body of research [3, 23, 31, 43] explores the use of attention mechanisms, which guide the network to extract attention-aware features for body parts. For example, AAnet [36] adopts a unified learning framework that incorporates attribute attention maps through extra attribute labels. Pioneering work TransReID [14] introduces a self-attention-based architecture, Vision Transformer (ViT) [8]," + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "17344" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.093, + 0.603, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.613, + 0.093, + 0.885, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.392, + 0.893, + 0.422 + ], + "angle": 0, + "content": "Figure 3. Overview of our framework. PromptSG learns pseudo token \\( S_{*} \\) from the specific visual embedding, and the visual encoder learns semantic faithful representations with the guidance of language prompts that occur in the Multimodal Interaction Module." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.447, + 0.47, + 0.612 + ], + "angle": 0, + "content": "for advancing ReID tasks. DCAL [56] proposes to implicitly extract the local features through a global-local cross-attention mechanism. However, these methods solely apply attention mechanisms to the visual modality, and the lack of explicit language guidance potentially constrains their performance. The work most relevant to ours, CLIP-ReID [21], is the first to utilize vision-language pre-training model CLIP in ReID task. However, CLIP-ReID fails to leverage the linguistic capability of the text encoder in CLIP during inference, since the ID-specific learnable tokens only influence the seen identities." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.614, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Large-scale vision-language pre-training model connects the image representation with text embedding in a shared embedding space, has demonstrated effectiveness across a wide range of uni-modal and multimodal downstream tasks. These include classification [6, 48], image captioning [25], and cross-modal retrieval [11, 16, 32, 42, 49]. Foundational VL models, such as CLIP, usually undergo training on extensive image-text pairs with contrastive learning objectives. This foundational pre-training provides the model with strong open-vocabulary classification capabilities. Inherited from prompt learning in NLP [18], CoOp [54] proposes to explore learnable prompt optimization on few-shot classification. Following this soft prompt approach, CLIP-ReID pioneers the adaptation of CLIP for person ReID by classifying images into ID-specific prompts. Differing from CLIP-ReID, which focuses on vision-language alignment, our goal is to exploit rich semantic information from language to explicitly control the weights assigned to each patch or region, and im" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.447, + 0.892, + 0.478 + ], + "angle": 0, + "content": "prove the two-stage framework by directly inverting images into the language latent space." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.479, + 0.893, + 0.646 + ], + "angle": 0, + "content": "Textual Inversion, originally for personalized text-to-image generation [10], is a learning approach that aims to discover new pseudo-words in the word-embedding space. These pseudo-words are capable of encapsulating both the overall visual content and intricate visual details. Recently, the application of textual inversion has expanded to zero-shot composed image retrieval task [1, 29]. In these studies, a textual inversion network is typically pre-trained using extensive unlabeled image datasets. In this work, we stand out as the first to apply this learning paradigm to person ReID without any additional training data." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.662, + 0.628, + 0.68 + ], + "angle": 0, + "content": "3. Preliminary" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.893, + 0.903 + ], + "angle": 0, + "content": "Contrastive Language-Image Pre-training (CLIP) undergoes pre-training on a large corpus of image-text pairs, aligning visual and linguistic representations within a shared space through the matching of images with their corresponding text descriptions. Specifically, CLIP consists of a visual encoder \\(\\mathcal{V}(\\cdot)\\) and a text encoder \\(\\mathcal{T}(\\cdot)\\). The visual encoder \\(\\mathcal{V}(\\cdot)\\) takes an image \\(\\pmb{x} \\in \\mathbb{R}^{H \\times W \\times C}\\) as input. The text encoder \\(\\mathcal{T}(\\cdot)\\) takes a tokenized textual description \\(t \\in \\mathbb{R}^{N \\times D}\\) as input, where \\(N, D\\) are the text's length and token feature dimension respectively. The pre-training objective is based on self-supervised contrastive learning, which minimizes cosine distance for matched image-text pairs. For the downstream tasks such as classification, the description of \\(j\\)-th class is typically obtained through the" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17345" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.136 + ], + "angle": 0, + "content": "hand-crafted prompt, e.g., 'A photo of a [CLASS]'. Therefore, the probability of image \\( x \\) being classified as class \\( y \\) can be computed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.125, + 0.159, + 0.469, + 0.197 + ], + "angle": 0, + "content": "\\[\n\\mathcal {P} (y | \\boldsymbol {x}) = \\frac {\\exp (\\operatorname {s i m} (\\mathcal {V} (\\boldsymbol {x}) , \\mathcal {T} (\\boldsymbol {t} _ {\\boldsymbol {y}})) / \\tau)}{\\sum_ {j = 1} ^ {K} \\exp (\\operatorname {s i m} (\\mathcal {V} (\\boldsymbol {x}), \\mathcal {T} (\\boldsymbol {t} _ {\\boldsymbol {j}})) / \\tau)}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.208, + 0.468, + 0.24 + ], + "angle": 0, + "content": "where \\(\\tau\\) denotes the temperature, and \\(\\mathrm{sim}(a,b) = \\frac{a\\cdot b}{\\|a\\|_2\\|b\\|_2}\\) is the cosine similarity." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.242, + 0.469, + 0.407 + ], + "angle": 0, + "content": "A simple approach to applying CLIP to person ReID involves substituting the linear classifier with image-to-text classification. However, given that labels in ReID tasks are solely index-based, there are no specific words to represent different persons. To tackle this challenge, CLIP-ReID crafts the prompt as 'A photo of a \\([X_i]_1[X_i]_2[X_i]_3\\ldots[X_i]_M\\) person', where \\([X_i]_m, m \\in \\{1,\\dots,M\\}\\) represents a set of ID-specific learnable tokens for the \\(i\\)-th ID. Nevertheless, CLIP-ReID optimizes ID-specific prompts exclusively bound to training IDs, it overlooks the chance to fully exploit the open-vocabulary capabilities inherent in CLIP." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.42, + 0.169, + 0.435 + ], + "angle": 0, + "content": "4. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.446, + 0.469, + 0.611 + ], + "angle": 0, + "content": "An overview of our framework is depicted in Fig. 3. Starting with the visual embeddings derived from CLIP's visual encoder, our approach employs an inversion network to learn pseudo tokens that encapsulate the visual context. Following this, an interaction between visual and textual modalities is facilitated in the interaction module, leading to the final re-weighted representations. During the inference phase, we are presented with two options for textual inputs: an efficiency-driven simplified prompt and an accuracy-driven composed prompt. Note that the text encoder is frozen in our entire framework." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.621, + 0.468, + 0.638 + ], + "angle": 0, + "content": "4.1. Learning the Personalized ID-Specific Prompt" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.645, + 0.469, + 0.765 + ], + "angle": 0, + "content": "As suggested by prior research, the word-embedding space possesses sufficient expressiveness to encapsulate basic image concepts [7]. However, the inherent limitation lies in the pre-defined prompts in CLIP-ReID, which can only capture limited attributes and may not fully encapsulate the visual context. Contrarily, we propose learning the pseudo token by textual inversion technique that aligns with the context of the query image." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Let \\( f_{\\theta}(\\cdot) \\) denote an inversion network parameterized by \\( \\theta \\), our goal is to invert the global visual embedding \\( \\mathbf{v} \\) from visual space of CLIP, represented as \\( \\mathbf{v} \\in V \\), into a pseudo token \\( s_* \\in T_* \\) by \\( f_{\\theta}(\\mathbf{v}) = s_* \\), where \\( T_* \\) indicates the token embedding space. Subsequently, this pseudo token can be integrated into natural language sentences. As such, the language prompt for the input image is structured as 'A photo of a \\( s_* \\) person'. It is worth noting that this pseudo-token bears no relationship to an actual word but functions as a" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.212 + ], + "angle": 0, + "content": "representation in the token embedding space. An input language prompt undergoes a tokenization process, resulting in several tokens. The tokenized prompt, denoted as \\( t_p \\), can be fed into the text encoder of CLIP to obtain text embedding \\( l_p = \\mathcal{T}(t_p) \\). To ensure that the learned pseudo-token effectively tells the context of the image, one can follow to the reconstruction objective of textual inversion by the symmetric contrastive loss, which is formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.544, + 0.237, + 0.892, + 0.279 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {i 2 t} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\log \\frac {\\exp (\\sin (\\boldsymbol {v} _ {n} , \\boldsymbol {l} _ {p}) / \\tau)}{\\sum_ {i = 1} ^ {N} \\exp (\\sin (\\boldsymbol {v} _ {n} , \\boldsymbol {l} _ {i}) / \\tau)}, \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.544, + 0.29, + 0.892, + 0.332 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {t 2 i} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\log \\frac {\\exp \\left(\\sin \\left(\\boldsymbol {l} _ {n} , \\boldsymbol {v} _ {p}\\right) / \\tau\\right)}{\\sum_ {i = 1} ^ {N} \\exp \\left(\\sin \\left(\\boldsymbol {l} _ {n} , \\boldsymbol {v} _ {i}\\right) / \\tau\\right)}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.337, + 0.892, + 0.381 + ], + "angle": 0, + "content": "In this context, \\( v_{i} \\) or \\( l_{i} \\) represents the \\( i \\)-th image/text embedding in a batch. \\( l_{p} \\) is the corresponding prompt embedding for \\( v_{n} \\) and is constructed in a manner analogous to \\( v_{p} \\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.382, + 0.892, + 0.532 + ], + "angle": 0, + "content": "The underlying mechanism is grounded in the principle of cycle-consistency, wherein a pseudo token tends to faithfully represent the context of the image only when the text features closely align with corresponding image features. However, the contrastive loss fails to handle cases where images with the same ID are supposed to share the same appearance. Therefore, we aim to encourage the pseudo token to capture visual details exclusive to the same identity. To this end, we exploit the symmetric supervised contrastive loss as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.616, + 0.543, + 0.892, + 0.563 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\operatorname {S u p C o n}} = \\mathcal {L} _ {i 2 t} ^ {\\operatorname {S u p}} + \\mathcal {L} _ {t 2 i} ^ {\\operatorname {S u p}}. \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.516, + 0.574, + 0.892, + 0.631 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {i 2 t} ^ {\\operatorname {S u p}} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sum_ {p ^ {+} \\in P (i)} \\log \\frac {\\exp \\left(\\operatorname {s i m} \\left(\\boldsymbol {v} _ {n} , \\boldsymbol {l} _ {\\boldsymbol {p} ^ {+}}\\right) / \\tau\\right)}{\\sum_ {i = 1} ^ {N} \\exp \\left(\\operatorname {s i m} \\left(\\boldsymbol {v} _ {n} , \\boldsymbol {l} _ {i}\\right) / \\tau\\right)}, \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.517, + 0.631, + 0.892, + 0.687 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {t 2 i} ^ {\\operatorname {S u p}} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sum_ {p ^ {+} \\in P (i)} \\log \\frac {\\exp \\left(\\sin \\left(l _ {n} , v _ {p ^ {+}}\\right) / \\tau\\right)}{\\sum_ {i = 1} ^ {N} \\exp \\left(\\sin \\left(l _ {n} , v _ {i}\\right) / \\tau\\right)}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.687, + 0.892, + 0.703 + ], + "angle": 0, + "content": "where \\(P(i)\\) represents the positive samples related to \\(v_{n},l_{n}\\)" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.712, + 0.805, + 0.728 + ], + "angle": 0, + "content": "4.2. Prompt-driven Semantic Guidance" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.735, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We refine the language prompt by incorporating the pseudo-token that is linked to the identity, enhancing its ability to convey a more specific visual context for the image. Our commitment extends to meticulously directing the image feature through language. At the core of our approach lies the idea of semantic guidance, wherein we explicitly determine which region of the image aligns with the language prompt. Intuitively, image patches corresponding to the semantic \"person\" should inherently have substantial influence to facilitate discrimination. As opposed to the interaction between patches in self-attention layers within a" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "17346" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.121 + ], + "angle": 0, + "content": "single modality. Based on this observation, we explore a patch-to-prompt interaction that occurs in multi-modality." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.123, + 0.471, + 0.349 + ], + "angle": 0, + "content": "In particular, we employ a language-guided cross-attention module, which uses the textual embedding as query and the patch-wise embedding of the visual encoder as key and value. More formally, given a pair of image and prompt \\((\\pmb{x},\\pmb{t_p})\\), we first feed the image \\(\\pmb{x}\\) into the visual encoder, yielding in a sequence of patch embeddings \\(\\{\\tilde{\\pmb{v}},\\pmb{v}_1,\\dots,\\pmb{v}_M\\}\\). Here, \\(\\tilde{\\pmb{v}}\\) denotes the global visual embedding, while remaining \\(\\pmb{v}_i,i\\in [1,M]\\) belong to the local patch embeddings. In a similar vein, the prompt is fed into the text encoder to derive the text embedding \\(l_{p}\\). Subsequently, the text embedding is projected onto a query matrix \\(Q\\) and patch embeddings are projected to a key matrix \\(K\\) and a value matrix \\(V\\), via three different linear-projection layers. As such, the patch-to-prompt interaction can be achieved by:" + }, + { + "type": "equation", + "bbox": [ + 0.157, + 0.36, + 0.47, + 0.395 + ], + "angle": 0, + "content": "\\[\n\\mathrm {A} (Q, K, V) = \\operatorname {S o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {d}}\\right) V. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.401, + 0.47, + 0.506 + ], + "angle": 0, + "content": "This interaction aggregates the attention map to highlight the regions of high semantic response. Drawing from multimodal fusion methods [9], we incorporate two transformer blocks following the cross-attention layer to derive final representations. Ultimately, we utilize the standard ReID loss, i.e., the triplet loss and identity classification loss [12], to optimize our framework." + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.519, + 0.47, + 0.56 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {I D}} = \\frac {1}{K} \\sum_ {j = 1} ^ {K} y _ {j} \\log p _ {j}, \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.164, + 0.575, + 0.469, + 0.591 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {T r i p l e t}} = \\max \\left(d _ {p} - d _ {n} + m, 0\\right), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.599, + 0.469, + 0.629 + ], + "angle": 0, + "content": "where \\( p_j \\) is the prediction probability for the \\( j \\)-th class, and \\( m \\) denotes the margin." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.639, + 0.327, + 0.655 + ], + "angle": 0, + "content": "4.3. Optimization and Inference" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.662, + 0.469, + 0.692 + ], + "angle": 0, + "content": "Taining optimization. In summary, the overall objective function for our framework is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.17, + 0.706, + 0.469, + 0.723 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {\\text {T r i p l e t}} + \\mathcal {L} _ {\\mathrm {I D}} + \\lambda \\mathcal {L} _ {\\operatorname {S u p C o n}}. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.735, + 0.469, + 0.78 + ], + "angle": 0, + "content": "Similar to CLIP-ReID, the final hidden states of the vision transformer, in conjunction the preceding two layer states, are also employed to calculate \\(\\mathcal{L}_{\\mathrm{Triplet}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Improved inference efficiency. Our approach involves the query-specific pseudo token with the textual prompt, which essentially doubles the inference time compared to using only the visual encoder. Fortunately, our empirical findings suggest that providing only 'A photo of a person' as a simplified guideline yields comparable results. In this way, there will be no increase in the inference time caused by the text encoder." + }, + { + "type": "table", + "bbox": [ + 0.548, + 0.089, + 0.848, + 0.169 + ], + "angle": 0, + "content": "
Dataset#IDImagesCams
Market-15011,50132,6686
MSMT174,101126,44115
DukeMTMC1,40436,4118
CHUK03-NP1,46713,1642
" + }, + { + "type": "table_caption", + "bbox": [ + 0.542, + 0.179, + 0.85, + 0.193 + ], + "angle": 0, + "content": "Table 1. The statistics of dataset in our experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.217, + 0.633, + 0.234 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.242, + 0.702, + 0.259 + ], + "angle": 0, + "content": "5.1. Experimental Setting" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.266, + 0.892, + 0.386 + ], + "angle": 0, + "content": "Datasets and Evaluation Protocols. To evaluate and compare various methods, four extensive person re-identification datasets Market-1501 [51], MSMT17 [41], DukeMTMC [52] and CUHK03-NP [22] are exploited. Dataset stats are in Tab. 1. In line with conventions in the ReID community [13], two commonly used metrics, i.e., mean Average Precision (mAP) and Rank-1(R-1) accuracy, are used to evaluate the performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.386, + 0.893, + 0.687 + ], + "angle": 0, + "content": "Implementation Details. In alignment with prior research, we employ both ResNet-50 and ViT-B/16 Pre-trained from CLIP as our visual encoder and a pre-trained text encoder, i.e., CLIP text Transformer. Our framework additionally features a random-initialized inversion network and a multimodal interaction module. The inversion network is a lightweight model employing a three-layered MLP of 512-dimensional hidden state. A Batch Normalization (BN) layer [17] is placed after the last state of the network. The batch size is configured to 64, encompassing 16 identities with 4 images per identity. All input images are resized to \\(256 \\times 128\\). We use the Adam optimizer with a learning rate of 5e-6 for the visual encoder, whereas the learning rate for random-initialized modules is set to 5e-5. We find \\(\\lambda\\) in Eq. (10) is not sensitive and performs well across a broad range, thus we consistently set \\(\\lambda = 0.5\\) for all datasets. The model is trained for 60 epochs, with a learning rate decay factor of 0.1 for every 20 epochs. The entire framework is implemented using PyTorch and runs on a single NVIDIA RTX3090 GPU with 24GB VRAM." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.688, + 0.892, + 0.809 + ], + "angle": 0, + "content": "Baseline. Most existing approaches are built upon the strong ReID baseline presented in [24]. Specifically, they employ an ImageNet-21k pre-trained CNN model or ViT as the backbone and incorporate ID loss and triplet loss as crucial components. In contrast, our baseline model deviates by leveraging the pre-trained CLIP model and we fine-tune the visual encoder of CLIP by directly applying the two commonly-used losses." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.818, + 0.866, + 0.833 + ], + "angle": 0, + "content": "5.2. Comparison with State-of-the-art Methods" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We benchmark PromptSG against the current state-of-the-art, which can generally be divided into three categories: CNN-based, ViT-based, and CLIP-based methods. Tab. 2 summarizes the main results on four widely" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17347" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.122, + 0.089, + 0.85, + 0.44 + ], + "angle": 0, + "content": "
BackboneMethodReferenceMarket-1501MSMT17DukeMTMCCUHK03-NP
mAPR-1mAPR-1mAPR-1mAPR-1
ResNet50CNN-based method
OSNeT [53]ICCV'1984.994.852.978.773.588.6--
ISP [57]ECCV'2084.994.2--75.686.974.176.5
RGA-SC [50]CVPR'2088.496.157.580.3--77.481.1
CDNet [20]CVPR'2186.095.154.778.976.888.6--
CAL [28]ICCV'2187.094.556.279.576.487.6--
ALDER* [47]TIP'2188.995.659.182.578.989.978.781.0
LTRID* [39]TMM'2286.994.758.681.080.490.580.382.1
CLIP-based method
Baseline88.194.760.782.179.388.677.679.1
CLIP-ReID [21]AAAI'2389.895.763.084.480.790.078.279.4
PromptSGOurs91.896.668.586.080.490.279.880.5
ViT-B/16ViT-based method
TransReID [14]ICCV'2188.995.267.485.382.090.779.681.7
DCAL [56]CVPR'2287.594.764.083.180.189.0--
AAformer [58]TNNLS'2388.095.465.684.480.990.179.080.3
PHA [46]CVPR'2390.296.168.986.1--83.084.5
CLIP-based method
Baseline86.493.366.184.480.088.880.080.5
CLIP-ReID [21]AAAI'2389.695.573.488.782.590.081.680.9
PromptSGOurs94.697.087.292.681.691.083.185.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.451, + 0.892, + 0.491 + ], + "angle": 0, + "content": "Table 2. Comparison with the state-of-the-art models on Market-1501, MSMT17, DukeMTMC, and CUHK03-NP (labeled) datasets. The superscript star* indicates that the image is resized to a resolution exceeding 256x128. All results are reported without re-ranking. Color Red and blue: the best and second-best results." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.52, + 0.469, + 0.595 + ], + "angle": 0, + "content": "used person ReID datasets. We observe that our proposed PromptSG attains the best results and sets a new state-of-the-art performance. Remarkably, PromptSG achieves over \\(10\\%\\) improvement on MSMT17 and nearly \\(5\\%\\) on Market-1501, surpassing previous state-of-the-art results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.597, + 0.469, + 0.732 + ], + "angle": 0, + "content": "Compared with ViT-based method. Pioneering work TransReID [14] sets a strong baseline for the ViT-based method by leveraging the potentials of the transformer. Building upon this groundwork, PHA [46] further enhances the preservation of key high-frequency elements in images. In contrast to existing ViT-based methods that only capture the patch-wise uni-modal information, our PromptSG method demonstrates that the interaction of different modalities can improve the performance of individual modalities." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.734, + 0.469, + 0.869 + ], + "angle": 0, + "content": "Compared with CLIP-based method. Compared with the competing CLIP-based method CLIP-ReID, our PromptSG outperforms it by \\(5.0\\% / 1.5\\%\\) and \\(13.8\\% / 3.9\\%\\) mAP/Rank-1 on Market-1501 and MSMT17 datasets when taking ViT-B/16 as visual backbone. A key distinction between CLIP-ReID and our approach resides in the composition of the query-specific pseudo-token. Our results further underscore that incorporating textual information during the inference process can also enhance performance." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Compared with CNN-based method. To ensure a fair comparison, we also implement PromptSG with a ResNet-" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.52, + 0.892, + 0.61 + ], + "angle": 0, + "content": "50 backbone. Apart from LTReID [39] that utilize higher resolution images, our method consistently surpasses other methods by a significant margin, especially on the most challenging person ReID dataset, MSMT17. This highlights the robustness and superiority of our approach across various architectures." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.621, + 0.655, + 0.637 + ], + "angle": 0, + "content": "5.3. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.644, + 0.892, + 0.703 + ], + "angle": 0, + "content": "In the following, we conduct an ablation study on the essential elements of PromptSG on Market-1501 and MSMT17 datasets, and all the experiments are conducted on the ViT-B/16 backbone." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Contributions from Different Components. To assess the contribution of various components, we conduct ablation experiments by removing one component at a time. Recall that \\(\\mathcal{L}_{i2t}^{\\mathrm{sup}}\\) and \\(\\mathcal{L}_{t2i}^{\\mathrm{sup}}\\) are the supervised contrastive losses in Eq. (2), Eq. (3) respectively, and MIM denotes the multimodal interaction module. Comparing rows b) and c) with a), we see a similar conclusion where the removal of text-to-image or image-to-text contrastive loss leads to a decent improvement on both datasets. Further comparing rows a) and d), we observe that the removal of semantic information leads to a larger decrease than solely removing ID-specific appearance information. Notably, as seen in row a), our full model, PromptSG, utilizes both semantic and appearance" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "17348" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.08, + 0.089, + 0.477, + 0.201 + ], + "angle": 0, + "content": "
ComponentsMarket-1501MSMT17
\\( {\\mathcal{L}}_{i2t}^{\\text{Sup }} \\)\\( {\\mathcal{L}}_{t2i}^{\\text{Sup }} \\)MIMmAPR-1mAPR-1
a)94.697.087.292.6
b)92.896.785.291.9
c)93.096.784.590.2
d)89.495.371.487.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.078, + 0.212, + 0.471, + 0.24 + ], + "angle": 0, + "content": "Table 3. Ablation study on the effectiveness of each component of PromptSG on Market-1501 and MSMT17." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.255, + 0.468, + 0.327 + ], + "angle": 0, + "content": "
MethodMarket-1501MSMT17
mAPR-1mAPR-1
Training w/ composed94.697.087.292.6
Training w/o composed92.096.385.391.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.338, + 0.468, + 0.366 + ], + "angle": 0, + "content": "Table 4. Ablation of training with or without composing the pseudo token on Market-1501 and MSMT17." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.387, + 0.468, + 0.461 + ], + "angle": 0, + "content": "language supervision during training, achieving a substantial improvement of over a point. The overall conclusion supports that language guidance, through both semantic and appearance cues, plays a crucial role in improving the performance of our model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.463, + 0.468, + 0.704 + ], + "angle": 0, + "content": "Ablation Study on the Personalized Prompt. To better understand whether the learned pseudo tokens \\( s_* \\) can provide more granular guidance for learning visual embeddings, we train a strong baseline model, where the textual prompt dose not composed with the \\( s_* \\) during training and testing, but instead relies on the simplified prompt \"A photo of a person\" for semantic guidance. Note that we will not use the symmetric supervised contrastive loss in this case. Results in Tab. 4 imply that composing the \\( s_* \\) has a significant impact on the overall performance. When \\( s_* \\) is removed from the training process, the performance decreases by \\( 1.9\\% \\) to \\( 2.6\\% \\) in terms of mAP. Although we focus on the uni-modal re-identification task, the above formulation could potentially be applied to multimodal test sets, such as text-to-image person retrieval by composing the image feature with the text to achieve better alignment." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Ablation Study on the Interaction Module. We analyze the impact of different designs of the interaction module on performance and inference speed, as well as the impact of not using a composed prompt during inference. Notably, personalized prompts are consistently included during the training. As shown in Tab. 5, without an attention module (w/o attention module), the model achieves a baseline performance, with inference speed being dependent solely on the visual encoder. Introducing a single cross-attention layer (+1 cross-layer) shows a notable performance improvement, indicating the positive effect of incorporating a cross-layer design. Notably, performances can be stably improved with more self-attention layers, but at the cost of" + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.089, + 0.892, + 0.213 + ], + "angle": 0, + "content": "
Traning ModelInference Model
Visual EncoderText EncoderFPS ↑mAP
w/o attention module-1x89.4
+1 cross-layer-0.95x91.1
+1 cross & 1 self-layer-0.91x93.0
+1 cross & 2 self-layer0.48x94.6
+1 cross & 2 self-layer-0.88x94.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.223, + 0.892, + 0.293 + ], + "angle": 0, + "content": "Table 5. The impact of various interaction modules and efficiency comparison with different inference models on Market-1501. Cross and self means cross-attention and self-attention, respectively. FPS denotes the quantity of images processed by the model in one second." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.31, + 0.885, + 0.44 + ], + "angle": 0, + "content": "
Method#Params#Params %CLIPTraining Times ↓
(a) Market-1501
CLIP-ReID89M0.714689s1x
PromptSG94M0.752417s0.51x
(b) MSMT17
CLIP-ReID90M0.7312904s1x
PromptSG94M0.756108s0.47x
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.451, + 0.892, + 0.507 + ], + "angle": 0, + "content": "Table 6. Comparison of training times and the number of parameters on Market-1501 and MSMT17. #Params denotes the number of learnable parameters in the whole framework. All models are evaluated on a single 3090Ti GPU." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.537, + 0.892, + 0.718 + ], + "angle": 0, + "content": "lower inference efficiency. Furthermore, our analysis illuminates the impact of employing a composed prompt during the inference phase, revealing that when we follow the same procedure as the training stage—composing text with query images—the Frames Per Second (FPS) is only 0.48 times that of the baseline. This is expected as we need to pass through two encoders for each query. However, we empirically discovered that using a fixed prompt “A photo of a person” for all queries may not lead to significant performance degradation, and it does not compromise efficiency. Therefore, one could opt for this version to achieve a more favorable balance between accuracy and efficiency." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Comparison of training efficiency. In order to showcase the efficiency of our proposed approach, we carry out a comparative analysis between our one-stage PromptSG and the two-stage CLIP-ReID method, focusing on the number of learnable parameters and training speed. The details of this comparison are provided in Tab. 6. In terms of training parameters, on top of CLIP, CLIP-ReID incorporates an additional of parameters mainly through the ID-wise learnable prompt, our approach primarily extends through a fixed-size mapping network and an interaction module. Despite CLIP-ReID having \\(2\\% -4\\%\\) fewer parameters than ours on two datasets, it may experience continuous growth in parame" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "17349" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.232, + 0.102, + 0.338, + 0.119 + ], + "angle": 0, + "content": "Market-1501" + }, + { + "type": "image", + "bbox": [ + 0.098, + 0.128, + 0.139, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.143, + 0.128, + 0.184, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.187, + 0.128, + 0.23, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.235, + 0.128, + 0.275, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.29, + 0.128, + 0.332, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.128, + 0.377, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.381, + 0.128, + 0.422, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.426, + 0.128, + 0.468, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.098, + 0.211, + 0.139, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.107, + 0.28, + 0.128, + 0.293 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.143, + 0.211, + 0.184, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.155, + 0.28, + 0.174, + 0.292 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.188, + 0.211, + 0.23, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.2, + 0.28, + 0.218, + 0.292 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.235, + 0.211, + 0.275, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.2, + 0.28, + 0.218, + 0.292 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.291, + 0.212, + 0.332, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.301, + 0.28, + 0.318, + 0.292 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.212, + 0.377, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.346, + 0.28, + 0.365, + 0.292 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.212, + 0.421, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.392, + 0.28, + 0.409, + 0.292 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.426, + 0.212, + 0.468, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.437, + 0.28, + 0.456, + 0.292 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "title", + "bbox": [ + 0.653, + 0.103, + 0.733, + 0.118 + ], + "angle": 0, + "content": "MSMT17" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.126, + 0.545, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.548, + 0.127, + 0.591, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.595, + 0.128, + 0.638, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.128, + 0.682, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.128, + 0.739, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.744, + 0.128, + 0.786, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.79, + 0.128, + 0.83, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.833, + 0.128, + 0.875, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.211, + 0.545, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.548, + 0.212, + 0.591, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.212, + 0.636, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.64, + 0.212, + 0.682, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.211, + 0.739, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.743, + 0.212, + 0.785, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.788, + 0.212, + 0.829, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.833, + 0.211, + 0.875, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.706, + 0.279, + 0.727, + 0.292 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image_caption", + "bbox": [ + 0.753, + 0.279, + 0.772, + 0.291 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.799, + 0.279, + 0.817, + 0.291 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image_caption", + "bbox": [ + 0.845, + 0.279, + 0.864, + 0.291 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.32, + 0.892, + 0.349 + ], + "angle": 0, + "content": "Figure 4. Transformer visualization of attention maps. (a) Original images, (b) CLIP-ReID, (c) PromptSG without composed training, (d) PromptSG. We see our method is effective in simultaneously focusing on the semantic clues and exploring more discriminative parts." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.375, + 0.468, + 0.466 + ], + "angle": 0, + "content": "ters in scenarios with a higher number of classes or evolving dynamics. In contrast, PromptSG demonstrates stronger robustness in the number of parameters and achieves significant faster training speed. It can achieve a speedup of approximately 2 times faster during training compared to the two-stage method." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.482, + 0.271, + 0.499 + ], + "angle": 0, + "content": "6. Qualitative Analysis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.508, + 0.468, + 0.719 + ], + "angle": 0, + "content": "To have an intuitive understanding and validate the effectiveness of our method, we conduct a qualitative analysis where visualization of attention maps is presented in Fig. 4. Specifically, we exhibit examples from Market-1501 and MSMT17 datasets, each with two training images and two gallery images. We carefully selected some challenging examples, including those with complex backgrounds or images depicting multiple individuals. To gain a better insight into the regions of interest attended by the model in zero-shot scenarios, we do not use the common protocol GramCAM [30], as it needs the class-prediction scores and might be considered less suitable for Transformer-type backbones. Following [21], we use the Transformer-interpretability method in [2]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.468, + 0.901 + ], + "angle": 0, + "content": "We compare our (d) PromptSG with (b) CLIP-ReID and (c) PromptSG without image-composed training. It can be seen that our method exhibits significant effectiveness, as it adeptly captures semantic information while also concentrating on more detailed appearance details. For example, in the first row of the Market-1501 dataset, the attention map of CLIP-ReID is susceptible to interference from background elements like \"car\". On the other hand, PromptSG w/o composed training tends to emphasize semantic information related to the 'person,' focusing on the location of the head, arms, and legs. In contrast, our method goes beyond this by also exploring appearance features, such as" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.375, + 0.892, + 0.45 + ], + "angle": 0, + "content": "identifying individuals wearing hats or carrying backpacks. Finally, in the first row examples of MSMT17, where additional pedestrians appear in the image, our method excels in effectively filtering out unnecessary pedestrians, while CLIP-ReID fails." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.466, + 0.619, + 0.482 + ], + "angle": 0, + "content": "7. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.492, + 0.892, + 0.658 + ], + "angle": 0, + "content": "In this paper, we propose PromptSG, a simple yet effective framework that exploits the foundational model CLIP for the person ReID task. We show that language guidance is an effective way to adapt pre-trained multimodal models for the uni-modal retrieval tasks. Through leveraging the aligned multi-modal latent space provided by CLIP, the textual prompt \"A photo of a person\" can naturally address the challenge of the visual encoder in its struggle to capture semantic information. To probe more fine-grained appearance features, we incorporate an inversion network to learn pseudo tokens that describe the image context." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.78 + ], + "angle": 0, + "content": "Discussion and Limitation. Despite the considerable potential of language prompt learning in ReID tasks, prompt learning in the vision branch remains a largely untapped area. Fine-tuning the visual encoder for strong supervised performance may lead to poor zero-shot generalization. We hope our work can inspire future research on fully unleashing the potential of large foundation models in challenging ReID tasks." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.781, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Acknowledgments This work was supported by the National Key R&D Program of China under Grant 2022YFB3103500, the National Natural Science Foundation of China under Grants 62106258, 62006242 and 62202459, and the China Postdoctoral Science Foundation under Grant 2022M713348 and 2022TQ0363, and Young Elite Scientists Sponsorship Program by CAST (2023QNRC001) and BAST (NO.BYESS2023304)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17350" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Alberto Baldrati, Lorenzo Agnolucci, Marco Bertini, and Alberto Del Bimbo. Zero-shot composed image retrieval with textual inversion. In Proceedings of the IEEE international conference on computer vision (ICCV), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.228 + ], + "angle": 0, + "content": "[2] Hila Chefer, Shir Gur, and Lior Wolf. Transformer interpretability beyond attention visualization. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2021. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.231, + 0.471, + 0.286 + ], + "angle": 0, + "content": "[3] Binghui Chen, Weihong Deng, and Jiani Hu. Mixed high-order attention network for person re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.289, + 0.471, + 0.357 + ], + "angle": 0, + "content": "[4] Tianlong Chen, Shaojin Ding, Jingyi Xie, Ye Yuan, Wuyang Chen, Yang Yang, Zhou Ren, and Zhangyang Wang. Abdnet: Attentive but diverse person re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.36, + 0.472, + 0.444 + ], + "angle": 0, + "content": "[5] Weihua Chen, Xianzhe Xu, Jian Jia, Hao Luo, Yaohua Wang, Fan Wang, Rong Jin, and Xiuyu Sun. Beyond appearance: a semantic controllable self-supervised learning framework for human-centric visual tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.446, + 0.47, + 0.515 + ], + "angle": 0, + "content": "[6] Xiaohua Chen, Yucan Zhou, Dayan Wu, Chule Yang, Bo Li, Qinghua Hu, and Weiping Wang. Area: adaptive reweighting via effective area for long-tailed classification. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.518, + 0.471, + 0.585 + ], + "angle": 0, + "content": "[7] Niv Cohen, Rinon Gal, Eli A Meirom, Gal Chechik, and Yuval Atzmon. \"this is my unicorn, fluffy\": Personalizing frozen vision-language representations. In Proceedings of the European conference on computer vision (ECCV), 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.589, + 0.47, + 0.672 + ], + "angle": 0, + "content": "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.675, + 0.47, + 0.757 + ], + "angle": 0, + "content": "[9] Zi-Yi Dou, Yichong Xu, Zhe Gan, Jianfeng Wang, Shuohang Wang, Lijuan Wang, Chenguang Zhu, Pengchuan Zhang, Lu Yuan, Nanyun Peng, et al. An empirical study of training end-to-end vision-and-language transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.76, + 0.47, + 0.829 + ], + "angle": 0, + "content": "[10] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit Haim Bermano, Gal Chechik, and Daniel Cohen-or. An image is worth one word: Personalizing text-to-image generation using textual inversion. In International Conference on Learning Representations (ICLR), 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.831, + 0.47, + 0.9 + ], + "angle": 0, + "content": "[11] Xiaoshuai Hao, Wanqian Zhang, Dayan Wu, Fei Zhu, and Bo Li. Dual alignment unsupervised domain adaptation for video-text retrieval. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2023. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[12] Lingxiao He, Xingyu Liao, Wu Liu, Xinchen Liu, Peng Cheng, and Tao Mei. Fastreid: A pytorch toolbox for general instance re-identification. arXiv preprint arXiv:2006.02631, 2020.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.151, + 0.894, + 0.219 + ], + "angle": 0, + "content": "[13] Lingxiao He, Xingyu Liao, Wu Liu, Xinchen Liu, Peng Cheng, and Tao Mei. Fastreid: A pytorch toolbox for general instance re-identification. In Proceedings of the 31st ACM International Conference on Multimedia (ACM MM), 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.223, + 0.892, + 0.278 + ], + "angle": 0, + "content": "[14] Shuting He, Hao Luo, Pichao Wang, Fan Wang, Hao Li, and Wei Jiang. Transreid: Transformer-based object re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2021. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.281, + 0.892, + 0.349 + ], + "angle": 0, + "content": "[15] Pingting Hong, Dayan Wu, Bo Li, and Weiping Wang. Camera-specific informative data augmentation module for unbalanced person re-identification. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.353, + 0.892, + 0.422 + ], + "angle": 0, + "content": "[16] Siteng Huang, Biao Gong, Yulin Pan, Jianwen Jiang, Yiliang Lv, Yuyuan Li, and Donglin Wang. Vop: Text-video co-operative prompt tuning for cross-modal retrieval. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.424, + 0.892, + 0.48 + ], + "angle": 0, + "content": "[17] Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning (ICML), 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.483, + 0.892, + 0.537 + ], + "angle": 0, + "content": "[18] Zhengbao Jiang, Frank F Xu, Jun Araki, and Graham Neubig. How can we know what language models know? Transactions of the Association for Computational Linguistics (ACL), 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.541, + 0.892, + 0.583 + ], + "angle": 0, + "content": "[19] Dengjie Li, Siyu Chen, Yujie Zhong, Fan Liang, and Lin Ma. Dip: Learning discriminative implicit parts for person re-identification. arXiv preprint arXiv:2212.13906, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.585, + 0.892, + 0.641 + ], + "angle": 0, + "content": "[20] Hanjun Li, Gaojie Wu, and Wei-Shi Zheng. Combined depth space based architecture search for person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.643, + 0.892, + 0.699 + ], + "angle": 0, + "content": "[21] Siyuan Li, Li Sun, and Qingli Li. Clip-reid: exploiting vision-language model for image re-identification without concrete text labels. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2023. 2, 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.702, + 0.892, + 0.769 + ], + "angle": 0, + "content": "[22] Wei Li, Rui Zhao, Tong Xiao, and Xiaogang Wang. Deepreid: Deep filter pairing neural network for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2014. 1, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.773, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[23] Wei Li, Xiatian Zhu, and Shaogang Gong. Harmonious attention network for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[24] Hao Luo, Youzhi Gu, Xingyu Liao, Shenqi Lai, and Wei Jiang. Bag of tricks and a strong baseline for deep person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops (CVPRW), 2019. 2, 5" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "17351" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "[25] Ron Mokady, Amir Hertz, and Amit H Bermano. Clipcap: Clip prefix for image captioning. arXiv preprint arXiv:2111.09734, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.472, + 0.219 + ], + "angle": 0, + "content": "[26] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning (ICML), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.221, + 0.47, + 0.303 + ], + "angle": 0, + "content": "[27] Haocong Rao and Chunyan Miao. Transg: Transformer-based skeleton graph prototype contrastive learning with structure-trajectory prompted reconstruction for person re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.47, + 0.375 + ], + "angle": 0, + "content": "[28] Yongming Rao, Guangyi Chen, Jiwen Lu, and Jie Zhou. Counterfactual attention learning for fine-grained visual categorization and re-identification. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.377, + 0.469, + 0.446 + ], + "angle": 0, + "content": "[29] Kuniaki Saito, Kihyuk Sohn, Xiang Zhang, Chun-Liang Li, Chen-Yu Lee, Kate Saenko, and Tomas Pfister. Pic2word: Mapping pictures to words for zero-shot composed image retrieval. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.448, + 0.469, + 0.517 + ], + "angle": 0, + "content": "[30] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision (ICCV), 2017. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.518, + 0.469, + 0.574 + ], + "angle": 0, + "content": "[31] Chunfeng Song, Yan Huang, Wanli Ouyang, and Liang Wang. Mask-guided contrastive attention model for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.576, + 0.469, + 0.645 + ], + "angle": 0, + "content": "[32] Qinghang Su, Dayan Wu, Chenming Wu, Bo Li, and Weiping Wang. From data to optimization: Data-free deep incremental hashing with data disambiguation and adaptive proxies. IEEE Transactions on Circuits and Systems for Video Technology (TCSVT), 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.647, + 0.469, + 0.716 + ], + "angle": 0, + "content": "[33] Yifan Sun, Liang Zheng, Yi Yang, Qi Tian, and Shengjin Wang. Beyond part models: Person retrieval with refined part pooling (and a strong convolutional baseline). In Proceedings of the European conference on computer vision (ECCV), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.718, + 0.469, + 0.787 + ], + "angle": 0, + "content": "[34] Yifan Sun, Changmao Cheng, Yuhan Zhang, Chi Zhang, Liang Zheng, Zhongdao Wang, and Yichen Wei. Circle loss: A unified perspective of pair similarity optimization. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.469, + 0.843 + ], + "angle": 0, + "content": "[35] Lei Tan, Pingyang Dai, Rongrong Ji, and Yongjian Wu. Dynamic prototype mask for occluded person re-identification. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[36] Chiat-Pin Tay, Sharmili Roy, and Kim-Hui Yap. Aanet: Attribute attention network for person re-identifications. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2019. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.162 + ], + "angle": 0, + "content": "[37] Guanshuo Wang, Yufeng Yuan, Xiong Chen, Jiwei Li, and Xi Zhou. Learning discriminative features with multiple granularities for person re-identification. In Proceedings of the 26th ACM international conference on Multimedia (ACM MM), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.893, + 0.232 + ], + "angle": 0, + "content": "[38] Lin Wang, Wanqian Zhang, Dayan Wu, Fei Zhu, and Bo Li. Attack is the best defense: Towards preemptive-protection person re-identification. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.234, + 0.893, + 0.29 + ], + "angle": 0, + "content": "[39] Pingyu Wang, Zhicheng Zhao, Fei Su, and Honying Meng. Ltreid: Factorizable feature generation with independent components for long-tailed person re-identification. IEEE Transactions on Multimedia (TMM), 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.292, + 0.893, + 0.347 + ], + "angle": 0, + "content": "[40] Tao Wang, Hong Liu, Pinhao Song, Tianyu Guo, and Wei Shi. Pose-guided feature disentangling for occluded person re-identification based on transformer. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.349, + 0.893, + 0.404 + ], + "angle": 0, + "content": "[41] Longhui Wei, Shiliang Zhang, Wen Gao, and Qi Tian. Person transfer gan to bridge domain gap for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.406, + 0.893, + 0.462 + ], + "angle": 0, + "content": "[42] Dayan Wu, Qi Dai, Jing Liu, Bo Li, and Weiping Wang. Deep incremental hashing network for efficient image retrieval. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.463, + 0.893, + 0.532 + ], + "angle": 0, + "content": "[43] Wenjie Yang, Houjing Huang, Zhang Zhang, Xiaotang Chen, Kaiqi Huang, and Shu Zhang. Towards rich feature discovery with class activation maps augmentation for person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.533, + 0.893, + 0.601 + ], + "angle": 0, + "content": "[44] Zexian Yang, Dayan Wu, Wanqian Zhang, Bo Li, and Weiping Wang. Handling label uncertainty for camera incremental person re-identification. In Proceedings of the 31st ACM International Conference on Multimedia (ACM MM), 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.604, + 0.893, + 0.659 + ], + "angle": 0, + "content": "[45] Mang Ye, Jianbing Shen, Gaojie Lin, Tao Xiang, Ling Shao, and Steven CH Hoi. Deep learning for person re-identification: A survey and outlook. IEEE transactions on pattern analysis and machine intelligence (TPAMI), 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.661, + 0.893, + 0.73 + ], + "angle": 0, + "content": "[46] Guiwei Zhang, Yongfei Zhang, Tianyu Zhang, Bo Li, and Shiliang Pu. Pha: Patch-wise high-frequency augmentation for transformer-based person re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.732, + 0.893, + 0.787 + ], + "angle": 0, + "content": "[47] Quan Zhang, Jianhuang Lai, Zhanxiang Feng, and Xiaohua Xie. Seeing like a human: Asynchronous learning with dynamic progressive refinement for person re-identification. IEEE Transactions on Image Processing (TIP), 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.789, + 0.893, + 0.857 + ], + "angle": 0, + "content": "[48] Renrui Zhang, Wei Zhang, Rongyao Fang, Peng Gao, Kunchang Li, Jifeng Dai, Yu Qiao, and Hongsheng Li. Tip-adapter: Training-free adaption of clip for few-shot classification. In European Conference on Computer Vision (ECCV), 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.859, + 0.893, + 0.901 + ], + "angle": 0, + "content": "[49] Wanqian Zhang, Dayan Wu, Yu Zhou, Bo Li, Weiping Wang, and Dan Meng. Binary neural network hashing for image retrieval. In Proceedings of the 44th international ACM SIGIR" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "17352" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.093, + 0.468, + 0.12 + ], + "angle": 0, + "content": "conference on research and development in information retrieval (SIGIR)l, 2021.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.189 + ], + "angle": 0, + "content": "[50] Zhizheng Zhang, Cuiling Lan, Wenjun Zeng, Xin Jin, and Zhibo Chen. Relation-aware global attention for person re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.469, + 0.247 + ], + "angle": 0, + "content": "[51] Liang Zheng, Liyue Shen, Lu Tian, Shengjin Wang, Jingdong Wang, and Qi Tian. Scalable person re-identification: A benchmark. In Proceedings of the IEEE international conference on computer vision (ICCV), 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.249, + 0.469, + 0.304 + ], + "angle": 0, + "content": "[52] Zhedong Zheng, Liang Zheng, and Yi Yang. Unlabeled samples generated by gan improve the person re-identification baseline in vitro. In Proceedings of the IEEE international conference on computer vision (ICCV), 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.469, + 0.36 + ], + "angle": 0, + "content": "[53] Kaiyang Zhou, Yongxin Yang, Andrea Cavallaro, and Tao Xiang. Omni-scale feature learning for person re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.362, + 0.469, + 0.403 + ], + "angle": 0, + "content": "[54] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision (IJCV), 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.405, + 0.469, + 0.471 + ], + "angle": 0, + "content": "[55] Xiao Zhou, Yujie Zhong, Zhen Cheng, Fan Liang, and Lin Ma. Adaptive sparse pairwise loss for object re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.475, + 0.469, + 0.544 + ], + "angle": 0, + "content": "[56] Haowei Zhu, Wenjing Ke, Dong Li, Ji Liu, Lu Tian, and Yi Shan. Dual cross-attention learning for fine-grained visual categorization and object re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.546, + 0.469, + 0.6 + ], + "angle": 0, + "content": "[57] Kuan Zhu, Haiyun Guo, Zhiwei Liu, Ming Tang, and Jinqiao Wang. Identity-guided human semantic parsing for person re-identification. In Proceedings of the European conference on computer vision (ECCV), 2020. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.602, + 0.469, + 0.668 + ], + "angle": 0, + "content": "[58] Kuan Zhu, Haiyun Guo, Shiliang Zhang, Yaowei Wang, Jing Liu, Jinqiao Wang, and Ming Tang. Aaformer: Auto-aligned transformer for person re-identification. IEEE Transactions on Neural Networks and Learning Systems (TNNLS), 2023. 6" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.469, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17353" + } + ] +] \ No newline at end of file diff --git a/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/85746221-1e2b-4579-be8b-1626ff544e58_origin.pdf b/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/85746221-1e2b-4579-be8b-1626ff544e58_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e93647e4e51c07cc95e6f88eb53744995ff978fc --- /dev/null +++ b/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/85746221-1e2b-4579-be8b-1626ff544e58_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbb881675454edd96da426cf176ee1c802359ecde7c5c55cd85ce1179dbd0c5b +size 2432962 diff --git a/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/full.md b/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/full.md new file mode 100644 index 0000000000000000000000000000000000000000..adce95526873326c8281c91bdae285cc4536eb9d --- /dev/null +++ b/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/full.md @@ -0,0 +1,386 @@ +# A Pedestrian is Worth One Prompt: Towards Language Guidance Person Re-Identification + +Zexian Yang $^{1,2}$ Dayan Wu $^{1*}$ Chenming Wu $^{3}$ Zheng Lin $^{1}$ Jingzi Gu $^{1}$ Weiping Wang $^{1,2}$ + +$^{1}$ Institute of Information Engineering, Chinese Academy of Sciences + +$^{2}$ School of Cyber Security, University of Chinese Academy of Sciences $^{3}$ Baidu Inc + +{yangzexian,wudayan,linzheng,gujingzi,wangweiping}@iie.ac.cn,wuchenming@baidu.com + +# Abstract + +Extensive advancements have been made in person ReID through the mining of semantic information. Nevertheless, existing methods that utilize semantic-parts from a single image modality do not explicitly achieve this goal. Whiteness the impressive capabilities in multimodal understanding of Vision Language Foundation Model CLIP, a recent two-stage CLIP-based method employs automated prompt engineering to obtain specific textual labels for classifying pedestrians. However, we note that the predefined soft prompts may be inadequate in expressing the entire visual context and struggle to generalize to unseen classes. This paper presents an end-to-end Prompt-driven Semantic Guidance (PromptSG) framework that harnesses the rich semantics inherent in CLIP. Specifically, we guide the model to attend to regions that are semantically faithful to the prompt. To provide personalized language descriptions for specific individuals, we propose learning pseudo tokens that represent specific visual contexts. This design not only facilitates learning fine-grained attribute information but also can inherently leverage language prompts during inference. Without requiring additional labeling efforts, our PromptSG achieves state-of-the-art by over $10\%$ on MSMT17 and nearly $5\%$ on the Market-1501 benchmark. The codes will be available at https://github.com/Yzxian16/PromptSG + +# 1. Introduction + +Person Re-Identification (ReID) is a crucial research area in computer vision that focuses on identifying individuals across different camera views or time instances [4, 44, 45, 57], which is a sub-task of image-based retrieval. Features of the same individual, as captured by various cameras, are prone to alterations due to changes in lighting, background, and body posture. Consequently, the effectiveness of a so + +![](images/1a1c3dd471251170d4f54f010e19bd75ee21d5479288340c29efd66f8c5de453.jpg) +(a) (b) +(c) (d) +Include background "car" + +![](images/96297b5662bd50f968dae48a98e7311bdac0c39e8977ba83975fdb8a0132a8a8.jpg) +(a) (b) (c) (d) +Include two pedestrians + +![](images/6ea4db5cdc1b1af9491cbaa55e9a9c46194374daa6a1b31b3ab6b52377e241ff.jpg) +(a) (b) (c) (d) +More detailed attire + +![](images/a948c436517257f617dc23830059e944b466eb12000858482aa2823d99bbc15b.jpg) +Figure 1. Transformer visualization [2] of attention maps. (a) Original images, (b) CLIP-ReID, (c) Our method w/o inversion, and (d) Our method guided by the composed prompts captures both the exact semantic parts and the external appearance details. +Figure 2. The core idea of our method. Our method inverts input images into pseudo-word tokens $S_{*}$ , which are then composed into a textual prompt to describe the specific visual context. The attention map of patch tokens is further controlled by the semantics of the textual prompt. + +phisticated ReID model fundamentally depends on its capability to learn discriminative features that are impervious to camera-specific variations, thereby enhancing the model's capacity to generalize to previously unseen classes. + +Modern ReID models, constructed upon uni-modal architectures such as the Convolutional Neural Network (CNN) [22] or Vision Transformer (ViT) [14, 19, 35, 40], have made significant advancements within the field. A substantial portion of these solutions focus on the extraction of pertinent regions to rectify misalignment issues. These strategies are dedicated to the extraction of semantic + +data, such as the human body structure, primarily facilitated through the integration of identity classification [33, 43] and metric learning [24, 55]. However, it is worth noting that these attention regions generally highlight only specific locally discriminative parts without explicit semantic control. When a distinct mask or skeleton direction is necessitated [27, 31], the need for additional, labor-intensive, and time-consuming manual labeling becomes inevitable. + +Large-scale Vision Language (VL) models, exemplified by Contrastive Language-Image Pre-Training (CLIP) [26], have recently shown remarkable abilities in reasoning across multi-modal data. CLIP model, when provided with text prompts such as 'A photo of a [CLASS]' displays exceptional zero-shot classification performance at the image level. This leads to a question: Can we further direct attention to regions of interest through natural language descriptions, such as 'A photo of a person'? However, due to the resulting visual representation lacking fine-grained information necessary for distinguishing between identities, integrating CLIP straightforwardly into person ReID is non-trivial. Additionally, the query 'A photo of a person' presents a challenge due to the absence of specific descriptors, thereby lacking a personalized prompt for individual identification. The pioneering CLIP-ReID [21] introduces automated prompt engineering on CLIP by incorporating additional ID-wise learnable vectors customized for specific identities. Particularly, CLIP-ReID employs a two-stage training process that first optimizes the learnable vectors with the frozen CLIP model, and then restricts the image encoder with the learned textual descriptions. However, the disentangled usage, i.e., only the visual embedding is utilized during inference, renders the learned soft prompts ineffective for unseen prompts. As a result, the attention regions potentially do not entirely encompass the body part, and may inadvertently include background elements, such as cars and additional pedestrians captured in the scene, as illustrated in the first two examples in Fig. 1(b). In addition, even though CLIP-ReID adheres to training objectives aimed at vision-language alignment, such predefined soft prompts may not be sufficient to characterize the entire visual context of the specified pedestrian. + +In this paper, we propose Prompt-driven Semantic Guidance (PromptSG), that aims to streamline the two-stage pipeline by leveraging the foundational CLIP model effectively and efficiently. As outlined in Fig. 2, our core insight is straightforward: we strive to activate CLIP's cross-modal comprehension using explicit language prompts, and the regions extracted can then be fine-tuned to enhance semantic discriminativeness. Specifically, given a textual prompt, we refine the patch tokens by injecting cross-attention maps, determining which patch attends to the corresponding semantics. Following this rationale, we revisit the fundamental issue that the term 'person' serves as a + +coarse descriptor, lacking personalized language descriptions for individual identities. Beyond semantic information related to the 'person', appearance information is also crucial for identification purposes [5]. While semantic information aids the model in better body part localization, appearance information further refines the focus on an individual's attire. Hence, we employ the textual inversion technique [10], which learns to represent visual context through unique token. We use a lightweight inversion network that maps the image to a pseudo-token. This pseudo-token can then be incorporated into the textual prompt, creating an embedding that closely mirrors the original image. Compared to CLIP-ReID, our solution offers two primary advantages: 1) The textual prompt emphasizes regions in the image via a cross-attention map, capturing the precise semantic part (Fig. 1(c)), and can also be utilized for unseen classes during inference. 2) The model can learn the personal token of the query image in an end-to-end manner, providing more detailed guidance specific to an identity (Fig. 1(d)). Importantly, our proposed method is free, i.e. there is no need to supply additional information, such as masks, bounding boxes, or precise descriptions. We summarize the contribution of this paper as follows. + +- Leveraging the exceptional multi-modal reasoning capabilities of CLIP, we propose PromptSG, a novel framework for the person ReID task. This approach uniquely utilizes language prompts, providing explicit assistance to the visual encoder in efficiently capturing semantic information. +- To create a more personalized description for the individual, we propose learning to represent the specific, more detailed appearance attributes, by employing the inversion network. +- Without any additional labelling efforts, PromptSG surpasses previous SOTA method [21] by over $10\%$ on the MSMT17 dataset. It also exhibits superior performance on the Market-1501 benchmark, surpassing previous SOTA method [46] by nearly $5\%$ . + +# 2. Related Work + +Person Re-identification remains an important yet challenging task due to the subtle inter-class differences. To learn more discriminative representations, a category of CNN-based techniques has primarily concentrated on optimizing the distance metric via metric learning [15, 33, 34, 37, 38]. Recognizing the importance of semantic information, a substantial body of research [3, 23, 31, 43] explores the use of attention mechanisms, which guide the network to extract attention-aware features for body parts. For example, AAnet [36] adopts a unified learning framework that incorporates attribute attention maps through extra attribute labels. Pioneering work TransReID [14] introduces a self-attention-based architecture, Vision Transformer (ViT) [8], + +![](images/4c93416d54c5bb2b08a721e9e8ab3ec52a5887b08c7fa360e0d66e84179597f3.jpg) +Figure 3. Overview of our framework. PromptSG learns pseudo token $S_{*}$ from the specific visual embedding, and the visual encoder learns semantic faithful representations with the guidance of language prompts that occur in the Multimodal Interaction Module. + +![](images/0b046aeeb20e6eebeda8b6ac236d946962bd55fb3fd6170dad0c0c9b4bfc0b05.jpg) + +for advancing ReID tasks. DCAL [56] proposes to implicitly extract the local features through a global-local cross-attention mechanism. However, these methods solely apply attention mechanisms to the visual modality, and the lack of explicit language guidance potentially constrains their performance. The work most relevant to ours, CLIP-ReID [21], is the first to utilize vision-language pre-training model CLIP in ReID task. However, CLIP-ReID fails to leverage the linguistic capability of the text encoder in CLIP during inference, since the ID-specific learnable tokens only influence the seen identities. + +Large-scale vision-language pre-training model connects the image representation with text embedding in a shared embedding space, has demonstrated effectiveness across a wide range of uni-modal and multimodal downstream tasks. These include classification [6, 48], image captioning [25], and cross-modal retrieval [11, 16, 32, 42, 49]. Foundational VL models, such as CLIP, usually undergo training on extensive image-text pairs with contrastive learning objectives. This foundational pre-training provides the model with strong open-vocabulary classification capabilities. Inherited from prompt learning in NLP [18], CoOp [54] proposes to explore learnable prompt optimization on few-shot classification. Following this soft prompt approach, CLIP-ReID pioneers the adaptation of CLIP for person ReID by classifying images into ID-specific prompts. Differing from CLIP-ReID, which focuses on vision-language alignment, our goal is to exploit rich semantic information from language to explicitly control the weights assigned to each patch or region, and im + +prove the two-stage framework by directly inverting images into the language latent space. + +Textual Inversion, originally for personalized text-to-image generation [10], is a learning approach that aims to discover new pseudo-words in the word-embedding space. These pseudo-words are capable of encapsulating both the overall visual content and intricate visual details. Recently, the application of textual inversion has expanded to zero-shot composed image retrieval task [1, 29]. In these studies, a textual inversion network is typically pre-trained using extensive unlabeled image datasets. In this work, we stand out as the first to apply this learning paradigm to person ReID without any additional training data. + +# 3. Preliminary + +Contrastive Language-Image Pre-training (CLIP) undergoes pre-training on a large corpus of image-text pairs, aligning visual and linguistic representations within a shared space through the matching of images with their corresponding text descriptions. Specifically, CLIP consists of a visual encoder $\mathcal{V}(\cdot)$ and a text encoder $\mathcal{T}(\cdot)$ . The visual encoder $\mathcal{V}(\cdot)$ takes an image $\pmb{x} \in \mathbb{R}^{H \times W \times C}$ as input. The text encoder $\mathcal{T}(\cdot)$ takes a tokenized textual description $t \in \mathbb{R}^{N \times D}$ as input, where $N, D$ are the text's length and token feature dimension respectively. The pre-training objective is based on self-supervised contrastive learning, which minimizes cosine distance for matched image-text pairs. For the downstream tasks such as classification, the description of $j$ -th class is typically obtained through the + +hand-crafted prompt, e.g., 'A photo of a [CLASS]'. Therefore, the probability of image $x$ being classified as class $y$ can be computed as follows: + +$$ +\mathcal {P} (y | \boldsymbol {x}) = \frac {\exp (\operatorname {s i m} (\mathcal {V} (\boldsymbol {x}) , \mathcal {T} (\boldsymbol {t} _ {\boldsymbol {y}})) / \tau)}{\sum_ {j = 1} ^ {K} \exp (\operatorname {s i m} (\mathcal {V} (\boldsymbol {x}), \mathcal {T} (\boldsymbol {t} _ {\boldsymbol {j}})) / \tau)}. \tag {1} +$$ + +where $\tau$ denotes the temperature, and $\mathrm{sim}(a,b) = \frac{a\cdot b}{\|a\|_2\|b\|_2}$ is the cosine similarity. + +A simple approach to applying CLIP to person ReID involves substituting the linear classifier with image-to-text classification. However, given that labels in ReID tasks are solely index-based, there are no specific words to represent different persons. To tackle this challenge, CLIP-ReID crafts the prompt as 'A photo of a $[X_i]_1[X_i]_2[X_i]_3\ldots[X_i]_M$ person', where $[X_i]_m, m \in \{1,\dots,M\}$ represents a set of ID-specific learnable tokens for the $i$ -th ID. Nevertheless, CLIP-ReID optimizes ID-specific prompts exclusively bound to training IDs, it overlooks the chance to fully exploit the open-vocabulary capabilities inherent in CLIP. + +# 4. Method + +An overview of our framework is depicted in Fig. 3. Starting with the visual embeddings derived from CLIP's visual encoder, our approach employs an inversion network to learn pseudo tokens that encapsulate the visual context. Following this, an interaction between visual and textual modalities is facilitated in the interaction module, leading to the final re-weighted representations. During the inference phase, we are presented with two options for textual inputs: an efficiency-driven simplified prompt and an accuracy-driven composed prompt. Note that the text encoder is frozen in our entire framework. + +# 4.1. Learning the Personalized ID-Specific Prompt + +As suggested by prior research, the word-embedding space possesses sufficient expressiveness to encapsulate basic image concepts [7]. However, the inherent limitation lies in the pre-defined prompts in CLIP-ReID, which can only capture limited attributes and may not fully encapsulate the visual context. Contrarily, we propose learning the pseudo token by textual inversion technique that aligns with the context of the query image. + +Let $f_{\theta}(\cdot)$ denote an inversion network parameterized by $\theta$ , our goal is to invert the global visual embedding $\mathbf{v}$ from visual space of CLIP, represented as $\mathbf{v} \in V$ , into a pseudo token $s_* \in T_*$ by $f_{\theta}(\mathbf{v}) = s_*$ , where $T_*$ indicates the token embedding space. Subsequently, this pseudo token can be integrated into natural language sentences. As such, the language prompt for the input image is structured as 'A photo of a $s_*$ person'. It is worth noting that this pseudo-token bears no relationship to an actual word but functions as a + +representation in the token embedding space. An input language prompt undergoes a tokenization process, resulting in several tokens. The tokenized prompt, denoted as $t_p$ , can be fed into the text encoder of CLIP to obtain text embedding $l_p = \mathcal{T}(t_p)$ . To ensure that the learned pseudo-token effectively tells the context of the image, one can follow to the reconstruction objective of textual inversion by the symmetric contrastive loss, which is formulated as follows: + +$$ +\mathcal {L} _ {i 2 t} = \frac {1}{N} \sum_ {n = 1} ^ {N} \log \frac {\exp (\sin (\boldsymbol {v} _ {n} , \boldsymbol {l} _ {p}) / \tau)}{\sum_ {i = 1} ^ {N} \exp (\sin (\boldsymbol {v} _ {n} , \boldsymbol {l} _ {i}) / \tau)}, \tag {2} +$$ + +$$ +\mathcal {L} _ {t 2 i} = \frac {1}{N} \sum_ {n = 1} ^ {N} \log \frac {\exp \left(\sin \left(\boldsymbol {l} _ {n} , \boldsymbol {v} _ {p}\right) / \tau\right)}{\sum_ {i = 1} ^ {N} \exp \left(\sin \left(\boldsymbol {l} _ {n} , \boldsymbol {v} _ {i}\right) / \tau\right)}. \tag {3} +$$ + +In this context, $v_{i}$ or $l_{i}$ represents the $i$ -th image/text embedding in a batch. $l_{p}$ is the corresponding prompt embedding for $v_{n}$ and is constructed in a manner analogous to $v_{p}$ . + +The underlying mechanism is grounded in the principle of cycle-consistency, wherein a pseudo token tends to faithfully represent the context of the image only when the text features closely align with corresponding image features. However, the contrastive loss fails to handle cases where images with the same ID are supposed to share the same appearance. Therefore, we aim to encourage the pseudo token to capture visual details exclusive to the same identity. To this end, we exploit the symmetric supervised contrastive loss as follows: + +$$ +\mathcal {L} _ {\operatorname {S u p C o n}} = \mathcal {L} _ {i 2 t} ^ {\operatorname {S u p}} + \mathcal {L} _ {t 2 i} ^ {\operatorname {S u p}}. \tag {4} +$$ + +$$ +\mathcal {L} _ {i 2 t} ^ {\operatorname {S u p}} = \frac {1}{N} \sum_ {n = 1} ^ {N} \sum_ {p ^ {+} \in P (i)} \log \frac {\exp \left(\operatorname {s i m} \left(\boldsymbol {v} _ {n} , \boldsymbol {l} _ {\boldsymbol {p} ^ {+}}\right) / \tau\right)}{\sum_ {i = 1} ^ {N} \exp \left(\operatorname {s i m} \left(\boldsymbol {v} _ {n} , \boldsymbol {l} _ {i}\right) / \tau\right)}, \tag {5} +$$ + +$$ +\mathcal {L} _ {t 2 i} ^ {\operatorname {S u p}} = \frac {1}{N} \sum_ {n = 1} ^ {N} \sum_ {p ^ {+} \in P (i)} \log \frac {\exp \left(\sin \left(l _ {n} , v _ {p ^ {+}}\right) / \tau\right)}{\sum_ {i = 1} ^ {N} \exp \left(\sin \left(l _ {n} , v _ {i}\right) / \tau\right)}. \tag {6} +$$ + +where $P(i)$ represents the positive samples related to $v_{n},l_{n}$ + +# 4.2. Prompt-driven Semantic Guidance + +We refine the language prompt by incorporating the pseudo-token that is linked to the identity, enhancing its ability to convey a more specific visual context for the image. Our commitment extends to meticulously directing the image feature through language. At the core of our approach lies the idea of semantic guidance, wherein we explicitly determine which region of the image aligns with the language prompt. Intuitively, image patches corresponding to the semantic "person" should inherently have substantial influence to facilitate discrimination. As opposed to the interaction between patches in self-attention layers within a + +single modality. Based on this observation, we explore a patch-to-prompt interaction that occurs in multi-modality. + +In particular, we employ a language-guided cross-attention module, which uses the textual embedding as query and the patch-wise embedding of the visual encoder as key and value. More formally, given a pair of image and prompt $(\pmb{x},\pmb{t_p})$ , we first feed the image $\pmb{x}$ into the visual encoder, yielding in a sequence of patch embeddings $\{\tilde{\pmb{v}},\pmb{v}_1,\dots,\pmb{v}_M\}$ . Here, $\tilde{\pmb{v}}$ denotes the global visual embedding, while remaining $\pmb{v}_i,i\in [1,M]$ belong to the local patch embeddings. In a similar vein, the prompt is fed into the text encoder to derive the text embedding $l_{p}$ . Subsequently, the text embedding is projected onto a query matrix $Q$ and patch embeddings are projected to a key matrix $K$ and a value matrix $V$ , via three different linear-projection layers. As such, the patch-to-prompt interaction can be achieved by: + +$$ +\mathrm {A} (Q, K, V) = \operatorname {S o f t m a x} \left(\frac {Q K ^ {T}}{\sqrt {d}}\right) V. \tag {7} +$$ + +This interaction aggregates the attention map to highlight the regions of high semantic response. Drawing from multimodal fusion methods [9], we incorporate two transformer blocks following the cross-attention layer to derive final representations. Ultimately, we utilize the standard ReID loss, i.e., the triplet loss and identity classification loss [12], to optimize our framework. + +$$ +\mathcal {L} _ {\mathrm {I D}} = \frac {1}{K} \sum_ {j = 1} ^ {K} y _ {j} \log p _ {j}, \tag {8} +$$ + +$$ +\mathcal {L} _ {\text {T r i p l e t}} = \max \left(d _ {p} - d _ {n} + m, 0\right), \tag {9} +$$ + +where $p_j$ is the prediction probability for the $j$ -th class, and $m$ denotes the margin. + +# 4.3. Optimization and Inference + +Taining optimization. In summary, the overall objective function for our framework is formulated as: + +$$ +\mathcal {L} = \mathcal {L} _ {\text {T r i p l e t}} + \mathcal {L} _ {\mathrm {I D}} + \lambda \mathcal {L} _ {\operatorname {S u p C o n}}. \tag {10} +$$ + +Similar to CLIP-ReID, the final hidden states of the vision transformer, in conjunction the preceding two layer states, are also employed to calculate $\mathcal{L}_{\mathrm{Triplet}}$ . + +Improved inference efficiency. Our approach involves the query-specific pseudo token with the textual prompt, which essentially doubles the inference time compared to using only the visual encoder. Fortunately, our empirical findings suggest that providing only 'A photo of a person' as a simplified guideline yields comparable results. In this way, there will be no increase in the inference time caused by the text encoder. + +
Dataset#IDImagesCams
Market-15011,50132,6686
MSMT174,101126,44115
DukeMTMC1,40436,4118
CHUK03-NP1,46713,1642
+ +Table 1. The statistics of dataset in our experiments + +# 5. Experiments + +# 5.1. Experimental Setting + +Datasets and Evaluation Protocols. To evaluate and compare various methods, four extensive person re-identification datasets Market-1501 [51], MSMT17 [41], DukeMTMC [52] and CUHK03-NP [22] are exploited. Dataset stats are in Tab. 1. In line with conventions in the ReID community [13], two commonly used metrics, i.e., mean Average Precision (mAP) and Rank-1(R-1) accuracy, are used to evaluate the performance. + +Implementation Details. In alignment with prior research, we employ both ResNet-50 and ViT-B/16 Pre-trained from CLIP as our visual encoder and a pre-trained text encoder, i.e., CLIP text Transformer. Our framework additionally features a random-initialized inversion network and a multimodal interaction module. The inversion network is a lightweight model employing a three-layered MLP of 512-dimensional hidden state. A Batch Normalization (BN) layer [17] is placed after the last state of the network. The batch size is configured to 64, encompassing 16 identities with 4 images per identity. All input images are resized to $256 \times 128$ . We use the Adam optimizer with a learning rate of 5e-6 for the visual encoder, whereas the learning rate for random-initialized modules is set to 5e-5. We find $\lambda$ in Eq. (10) is not sensitive and performs well across a broad range, thus we consistently set $\lambda = 0.5$ for all datasets. The model is trained for 60 epochs, with a learning rate decay factor of 0.1 for every 20 epochs. The entire framework is implemented using PyTorch and runs on a single NVIDIA RTX3090 GPU with 24GB VRAM. + +Baseline. Most existing approaches are built upon the strong ReID baseline presented in [24]. Specifically, they employ an ImageNet-21k pre-trained CNN model or ViT as the backbone and incorporate ID loss and triplet loss as crucial components. In contrast, our baseline model deviates by leveraging the pre-trained CLIP model and we fine-tune the visual encoder of CLIP by directly applying the two commonly-used losses. + +# 5.2. Comparison with State-of-the-art Methods + +We benchmark PromptSG against the current state-of-the-art, which can generally be divided into three categories: CNN-based, ViT-based, and CLIP-based methods. Tab. 2 summarizes the main results on four widely + +
BackboneMethodReferenceMarket-1501MSMT17DukeMTMCCUHK03-NP
mAPR-1mAPR-1mAPR-1mAPR-1
ResNet50CNN-based method
OSNeT [53]ICCV'1984.994.852.978.773.588.6--
ISP [57]ECCV'2084.994.2--75.686.974.176.5
RGA-SC [50]CVPR'2088.496.157.580.3--77.481.1
CDNet [20]CVPR'2186.095.154.778.976.888.6--
CAL [28]ICCV'2187.094.556.279.576.487.6--
ALDER* [47]TIP'2188.995.659.182.578.989.978.781.0
LTRID* [39]TMM'2286.994.758.681.080.490.580.382.1
CLIP-based method
Baseline88.194.760.782.179.388.677.679.1
CLIP-ReID [21]AAAI'2389.895.763.084.480.790.078.279.4
PromptSGOurs91.896.668.586.080.490.279.880.5
ViT-B/16ViT-based method
TransReID [14]ICCV'2188.995.267.485.382.090.779.681.7
DCAL [56]CVPR'2287.594.764.083.180.189.0--
AAformer [58]TNNLS'2388.095.465.684.480.990.179.080.3
PHA [46]CVPR'2390.296.168.986.1--83.084.5
CLIP-based method
Baseline86.493.366.184.480.088.880.080.5
CLIP-ReID [21]AAAI'2389.695.573.488.782.590.081.680.9
PromptSGOurs94.697.087.292.681.691.083.185.1
+ +Table 2. Comparison with the state-of-the-art models on Market-1501, MSMT17, DukeMTMC, and CUHK03-NP (labeled) datasets. The superscript star* indicates that the image is resized to a resolution exceeding 256x128. All results are reported without re-ranking. Color Red and blue: the best and second-best results. + +used person ReID datasets. We observe that our proposed PromptSG attains the best results and sets a new state-of-the-art performance. Remarkably, PromptSG achieves over $10\%$ improvement on MSMT17 and nearly $5\%$ on Market-1501, surpassing previous state-of-the-art results. + +Compared with ViT-based method. Pioneering work TransReID [14] sets a strong baseline for the ViT-based method by leveraging the potentials of the transformer. Building upon this groundwork, PHA [46] further enhances the preservation of key high-frequency elements in images. In contrast to existing ViT-based methods that only capture the patch-wise uni-modal information, our PromptSG method demonstrates that the interaction of different modalities can improve the performance of individual modalities. + +Compared with CLIP-based method. Compared with the competing CLIP-based method CLIP-ReID, our PromptSG outperforms it by $5.0\% / 1.5\%$ and $13.8\% / 3.9\%$ mAP/Rank-1 on Market-1501 and MSMT17 datasets when taking ViT-B/16 as visual backbone. A key distinction between CLIP-ReID and our approach resides in the composition of the query-specific pseudo-token. Our results further underscore that incorporating textual information during the inference process can also enhance performance. + +Compared with CNN-based method. To ensure a fair comparison, we also implement PromptSG with a ResNet- + +50 backbone. Apart from LTReID [39] that utilize higher resolution images, our method consistently surpasses other methods by a significant margin, especially on the most challenging person ReID dataset, MSMT17. This highlights the robustness and superiority of our approach across various architectures. + +# 5.3. Ablation Study + +In the following, we conduct an ablation study on the essential elements of PromptSG on Market-1501 and MSMT17 datasets, and all the experiments are conducted on the ViT-B/16 backbone. + +Contributions from Different Components. To assess the contribution of various components, we conduct ablation experiments by removing one component at a time. Recall that $\mathcal{L}_{i2t}^{\mathrm{sup}}$ and $\mathcal{L}_{t2i}^{\mathrm{sup}}$ are the supervised contrastive losses in Eq. (2), Eq. (3) respectively, and MIM denotes the multimodal interaction module. Comparing rows b) and c) with a), we see a similar conclusion where the removal of text-to-image or image-to-text contrastive loss leads to a decent improvement on both datasets. Further comparing rows a) and d), we observe that the removal of semantic information leads to a larger decrease than solely removing ID-specific appearance information. Notably, as seen in row a), our full model, PromptSG, utilizes both semantic and appearance + +
ComponentsMarket-1501MSMT17
\( {\mathcal{L}}_{i2t}^{\text{Sup }} \)\( {\mathcal{L}}_{t2i}^{\text{Sup }} \)MIMmAPR-1mAPR-1
a)94.697.087.292.6
b)92.896.785.291.9
c)93.096.784.590.2
d)89.495.371.487.3
+ +Table 3. Ablation study on the effectiveness of each component of PromptSG on Market-1501 and MSMT17. + +
MethodMarket-1501MSMT17
mAPR-1mAPR-1
Training w/ composed94.697.087.292.6
Training w/o composed92.096.385.391.6
+ +language supervision during training, achieving a substantial improvement of over a point. The overall conclusion supports that language guidance, through both semantic and appearance cues, plays a crucial role in improving the performance of our model. + +Ablation Study on the Personalized Prompt. To better understand whether the learned pseudo tokens $s_*$ can provide more granular guidance for learning visual embeddings, we train a strong baseline model, where the textual prompt dose not composed with the $s_*$ during training and testing, but instead relies on the simplified prompt "A photo of a person" for semantic guidance. Note that we will not use the symmetric supervised contrastive loss in this case. Results in Tab. 4 imply that composing the $s_*$ has a significant impact on the overall performance. When $s_*$ is removed from the training process, the performance decreases by $1.9\%$ to $2.6\%$ in terms of mAP. Although we focus on the uni-modal re-identification task, the above formulation could potentially be applied to multimodal test sets, such as text-to-image person retrieval by composing the image feature with the text to achieve better alignment. + +Ablation Study on the Interaction Module. We analyze the impact of different designs of the interaction module on performance and inference speed, as well as the impact of not using a composed prompt during inference. Notably, personalized prompts are consistently included during the training. As shown in Tab. 5, without an attention module (w/o attention module), the model achieves a baseline performance, with inference speed being dependent solely on the visual encoder. Introducing a single cross-attention layer (+1 cross-layer) shows a notable performance improvement, indicating the positive effect of incorporating a cross-layer design. Notably, performances can be stably improved with more self-attention layers, but at the cost of + +Table 4. Ablation of training with or without composing the pseudo token on Market-1501 and MSMT17. + +
Traning ModelInference Model
Visual EncoderText EncoderFPS ↑mAP
w/o attention module-1x89.4
+1 cross-layer-0.95x91.1
+1 cross & 1 self-layer-0.91x93.0
+1 cross & 2 self-layer0.48x94.6
+1 cross & 2 self-layer-0.88x94.1
+ +Table 5. The impact of various interaction modules and efficiency comparison with different inference models on Market-1501. Cross and self means cross-attention and self-attention, respectively. FPS denotes the quantity of images processed by the model in one second. + +
Method#Params#Params %CLIPTraining Times ↓
(a) Market-1501
CLIP-ReID89M0.714689s1x
PromptSG94M0.752417s0.51x
(b) MSMT17
CLIP-ReID90M0.7312904s1x
PromptSG94M0.756108s0.47x
+ +Table 6. Comparison of training times and the number of parameters on Market-1501 and MSMT17. #Params denotes the number of learnable parameters in the whole framework. All models are evaluated on a single 3090Ti GPU. + +lower inference efficiency. Furthermore, our analysis illuminates the impact of employing a composed prompt during the inference phase, revealing that when we follow the same procedure as the training stage—composing text with query images—the Frames Per Second (FPS) is only 0.48 times that of the baseline. This is expected as we need to pass through two encoders for each query. However, we empirically discovered that using a fixed prompt “A photo of a person” for all queries may not lead to significant performance degradation, and it does not compromise efficiency. Therefore, one could opt for this version to achieve a more favorable balance between accuracy and efficiency. + +Comparison of training efficiency. In order to showcase the efficiency of our proposed approach, we carry out a comparative analysis between our one-stage PromptSG and the two-stage CLIP-ReID method, focusing on the number of learnable parameters and training speed. The details of this comparison are provided in Tab. 6. In terms of training parameters, on top of CLIP, CLIP-ReID incorporates an additional of parameters mainly through the ID-wise learnable prompt, our approach primarily extends through a fixed-size mapping network and an interaction module. Despite CLIP-ReID having $2\% -4\%$ fewer parameters than ours on two datasets, it may experience continuous growth in parame + +# Market-1501 + +![](images/feca2dc7394929335e72ea92959f9ce850f61b8a39a62e2475cdcea0fff5b8cd.jpg) + +![](images/1cd4adff6d5382cab00a2019c83ca0381f6d7f07b528314fbc9725150a8801c6.jpg) + +![](images/00c244b701a218a38b63a558615d783f1c8834016a2b33238e2554cc96b33df2.jpg) + +![](images/2cb5339f172ef720b9556a30778611c5d73a787469af19402ffdfec410bc4be5.jpg) + +![](images/c1334c074ae58cb3d4f0cc4fb760727a4bb18b9883f7286098067448a3f9a072.jpg) + +![](images/a1007a0500191772cdc74e6db5416de2aec4cdaf5c675ff8248d8471392e8820.jpg) + +![](images/6b1c7045ebecf179f63f880992e152f5aff049bd3216d784c756a6265e317a4a.jpg) + +![](images/f2de007f30ae743ab55260cb03dbd29ceabcbb65d63844c3213a1b54db402ec5.jpg) + +![](images/0d2acbcffc7f6fad9bffbce05c82cd0b7095771f741c5c970303f9fd8ede82c4.jpg) +(a) + +![](images/79c51dc5b2ca4d88a713c96a5adc07ae3e2505ddef88e0214ae449712bdb0f4b.jpg) +(b) + +![](images/7409d2d9755a5aaf979d31dff90ab6e7870feca7c7b9782f6f2a7d24b8ab91b5.jpg) +Figure 4. Transformer visualization of attention maps. (a) Original images, (b) CLIP-ReID, (c) PromptSG without composed training, (d) PromptSG. We see our method is effective in simultaneously focusing on the semantic clues and exploring more discriminative parts. + +(c) + +![](images/5db50ad898b6c4f9a76836da6e0597005229978be9362ec8d10ab51a1c2a8324.jpg) + +(c) + +![](images/8e76daac518e394b4049590603cd54d5e0038848903f0ed80b2a86a7bd8c17b6.jpg) +(a) + +![](images/40d336de2572c1136ac1c1e4021094010dcfa5070d595e8bbfb285f1018626ed.jpg) +(b) + +![](images/882132aed170e1a8c2ffc1d5ef388ab2aaf0af67b6b5953bd19f84af6f3b5908.jpg) +(c) + +![](images/72bc7d80a9782084e02a24ac60b25dfcfe1e372f77d648997a935d6c30620b95.jpg) +(d) + +# MSMT17 + +![](images/9a3ffad7a1f1767d079dd6293716427669b57664fbd58d0cc223d4816d1ec51f.jpg) + +![](images/5b7a72291d087492b182ce21b78e143313ed41ee68e2fb29ea811ff44d365fc7.jpg) + +![](images/781fc5e97c4d16243dabe1e8961d94aabcad921c3aa5fa3e738101af85dc77d5.jpg) + +![](images/901b3be8690998e5138d0d00fe5718c681e46f648e4f6979bc84ab6a3e9437e3.jpg) + +![](images/d0bc48db2e60abe1af9033cdbf65c8b06fc165961652f9859decfe9794fe4c39.jpg) + +![](images/0b1f9b73a77e337d1d4ad083ad965ed33e5e682571a5ee903931a284dc5d94cf.jpg) + +![](images/a489345cf73a8435aaf5c1773f5eea7db8e0e3f558fa76b229df34cfa89f29e4.jpg) + +![](images/39844a844c9c01b88108038a341d54bdeabb74547db4f37fd77e338c32019399.jpg) + +![](images/71171cc05606846363db5be664d3593ff0bbc08712c50c91460d1c0278f69b4f.jpg) + +![](images/e8e313c3d97f5e3293d5b9500cf41f28414b4f138014dc5ad8cb557ddc6024a7.jpg) + +![](images/e8bee1ef3891a564e342469ae4ccc45311f5172652038e98eb2a4a638b70edd2.jpg) + +![](images/fdbf44fa851a6a7f8ee2411afef4382e461f8255244c71a96cfe241cce4c0e91.jpg) + +![](images/0d10007a16851c423225786130ee858c79ba58e88b23a9305803f0d0e0cbef42.jpg) +(a) + +![](images/821091c3aaa44faa9fe08b873a766e510c897d256ad5acfb439186ed4f77e69e.jpg) +(b) + +![](images/06b51ec49d039aeb748d51c61d492bf580c90d40ec911b11a222dcd313abbf32.jpg) +(c) + +![](images/d5fbe033f289713efc8f4787c789748bb48aea4c4d056234d0e7ab9efa1d048b.jpg) +(d) + +ters in scenarios with a higher number of classes or evolving dynamics. In contrast, PromptSG demonstrates stronger robustness in the number of parameters and achieves significant faster training speed. It can achieve a speedup of approximately 2 times faster during training compared to the two-stage method. + +# 6. Qualitative Analysis + +To have an intuitive understanding and validate the effectiveness of our method, we conduct a qualitative analysis where visualization of attention maps is presented in Fig. 4. Specifically, we exhibit examples from Market-1501 and MSMT17 datasets, each with two training images and two gallery images. We carefully selected some challenging examples, including those with complex backgrounds or images depicting multiple individuals. To gain a better insight into the regions of interest attended by the model in zero-shot scenarios, we do not use the common protocol GramCAM [30], as it needs the class-prediction scores and might be considered less suitable for Transformer-type backbones. Following [21], we use the Transformer-interpretability method in [2]. + +We compare our (d) PromptSG with (b) CLIP-ReID and (c) PromptSG without image-composed training. It can be seen that our method exhibits significant effectiveness, as it adeptly captures semantic information while also concentrating on more detailed appearance details. For example, in the first row of the Market-1501 dataset, the attention map of CLIP-ReID is susceptible to interference from background elements like "car". On the other hand, PromptSG w/o composed training tends to emphasize semantic information related to the 'person,' focusing on the location of the head, arms, and legs. In contrast, our method goes beyond this by also exploring appearance features, such as + +identifying individuals wearing hats or carrying backpacks. Finally, in the first row examples of MSMT17, where additional pedestrians appear in the image, our method excels in effectively filtering out unnecessary pedestrians, while CLIP-ReID fails. + +# 7. Conclusion + +In this paper, we propose PromptSG, a simple yet effective framework that exploits the foundational model CLIP for the person ReID task. We show that language guidance is an effective way to adapt pre-trained multimodal models for the uni-modal retrieval tasks. Through leveraging the aligned multi-modal latent space provided by CLIP, the textual prompt "A photo of a person" can naturally address the challenge of the visual encoder in its struggle to capture semantic information. To probe more fine-grained appearance features, we incorporate an inversion network to learn pseudo tokens that describe the image context. + +Discussion and Limitation. Despite the considerable potential of language prompt learning in ReID tasks, prompt learning in the vision branch remains a largely untapped area. Fine-tuning the visual encoder for strong supervised performance may lead to poor zero-shot generalization. We hope our work can inspire future research on fully unleashing the potential of large foundation models in challenging ReID tasks. + +Acknowledgments This work was supported by the National Key R&D Program of China under Grant 2022YFB3103500, the National Natural Science Foundation of China under Grants 62106258, 62006242 and 62202459, and the China Postdoctoral Science Foundation under Grant 2022M713348 and 2022TQ0363, and Young Elite Scientists Sponsorship Program by CAST (2023QNRC001) and BAST (NO.BYESS2023304). + +# References + +[1] Alberto Baldrati, Lorenzo Agnolucci, Marco Bertini, and Alberto Del Bimbo. Zero-shot composed image retrieval with textual inversion. In Proceedings of the IEEE international conference on computer vision (ICCV), 2023. 3 +[2] Hila Chefer, Shir Gur, and Lior Wolf. Transformer interpretability beyond attention visualization. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2021. 1, 8 +[3] Binghui Chen, Weihong Deng, and Jiani Hu. Mixed high-order attention network for person re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2019. 2 +[4] Tianlong Chen, Shaojin Ding, Jingyi Xie, Ye Yuan, Wuyang Chen, Yang Yang, Zhou Ren, and Zhangyang Wang. Abdnet: Attentive but diverse person re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2019. 1 +[5] Weihua Chen, Xianzhe Xu, Jian Jia, Hao Luo, Yaohua Wang, Fan Wang, Rong Jin, and Xiuyu Sun. Beyond appearance: a semantic controllable self-supervised learning framework for human-centric visual tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2 +[6] Xiaohua Chen, Yucan Zhou, Dayan Wu, Chule Yang, Bo Li, Qinghua Hu, and Weiping Wang. Area: adaptive reweighting via effective area for long-tailed classification. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2023. 3 +[7] Niv Cohen, Rinon Gal, Eli A Meirom, Gal Chechik, and Yuval Atzmon. "this is my unicorn, fluffy": Personalizing frozen vision-language representations. In Proceedings of the European conference on computer vision (ECCV), 2022. 4 +[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 2 +[9] Zi-Yi Dou, Yichong Xu, Zhe Gan, Jianfeng Wang, Shuohang Wang, Lijuan Wang, Chenguang Zhu, Pengchuan Zhang, Lu Yuan, Nanyun Peng, et al. An empirical study of training end-to-end vision-and-language transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 5 +[10] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit Haim Bermano, Gal Chechik, and Daniel Cohen-or. An image is worth one word: Personalizing text-to-image generation using textual inversion. In International Conference on Learning Representations (ICLR), 2022. 2, 3 +[11] Xiaoshuai Hao, Wanqian Zhang, Dayan Wu, Fei Zhu, and Bo Li. Dual alignment unsupervised domain adaptation for video-text retrieval. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2023. 3 + +[12] Lingxiao He, Xingyu Liao, Wu Liu, Xinchen Liu, Peng Cheng, and Tao Mei. Fastreid: A pytorch toolbox for general instance re-identification. arXiv preprint arXiv:2006.02631, 2020.5 +[13] Lingxiao He, Xingyu Liao, Wu Liu, Xinchen Liu, Peng Cheng, and Tao Mei. Fastreid: A pytorch toolbox for general instance re-identification. In Proceedings of the 31st ACM International Conference on Multimedia (ACM MM), 2023. 5 +[14] Shuting He, Hao Luo, Pichao Wang, Fan Wang, Hao Li, and Wei Jiang. Transreid: Transformer-based object re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2021. 1, 2, 6 +[15] Pingting Hong, Dayan Wu, Bo Li, and Weiping Wang. Camera-specific informative data augmentation module for unbalanced person re-identification. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), 2022. 2 +[16] Siteng Huang, Biao Gong, Yulin Pan, Jianwen Jiang, Yiliang Lv, Yuyuan Li, and Donglin Wang. Vop: Text-video co-operative prompt tuning for cross-modal retrieval. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3 +[17] Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning (ICML), 2015. 5 +[18] Zhengbao Jiang, Frank F Xu, Jun Araki, and Graham Neubig. How can we know what language models know? Transactions of the Association for Computational Linguistics (ACL), 2020. 3 +[19] Dengjie Li, Siyu Chen, Yujie Zhong, Fan Liang, and Lin Ma. Dip: Learning discriminative implicit parts for person re-identification. arXiv preprint arXiv:2212.13906, 2022. 1 +[20] Hanjun Li, Gaojie Wu, and Wei-Shi Zheng. Combined depth space based architecture search for person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2021. 6 +[21] Siyuan Li, Li Sun, and Qingli Li. Clip-reid: exploiting vision-language model for image re-identification without concrete text labels. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2023. 2, 3, 6, 8 +[22] Wei Li, Rui Zhao, Tong Xiao, and Xiaogang Wang. Deepreid: Deep filter pairing neural network for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2014. 1, 5 +[23] Wei Li, Xiatian Zhu, and Shaogang Gong. Harmonious attention network for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2018. 2 +[24] Hao Luo, Youzhi Gu, Xingyu Liao, Shenqi Lai, and Wei Jiang. Bag of tricks and a strong baseline for deep person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops (CVPRW), 2019. 2, 5 + +[25] Ron Mokady, Amir Hertz, and Amit H Bermano. Clipcap: Clip prefix for image captioning. arXiv preprint arXiv:2111.09734, 2021. 3 +[26] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning (ICML), 2021. 2 +[27] Haocong Rao and Chunyan Miao. Transg: Transformer-based skeleton graph prototype contrastive learning with structure-trajectory prompted reconstruction for person re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2 +[28] Yongming Rao, Guangyi Chen, Jiwen Lu, and Jie Zhou. Counterfactual attention learning for fine-grained visual categorization and re-identification. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 6 +[29] Kuniaki Saito, Kihyuk Sohn, Xiang Zhang, Chun-Liang Li, Chen-Yu Lee, Kate Saenko, and Tomas Pfister. Pic2word: Mapping pictures to words for zero-shot composed image retrieval. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3 +[30] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision (ICCV), 2017. 8 +[31] Chunfeng Song, Yan Huang, Wanli Ouyang, and Liang Wang. Mask-guided contrastive attention model for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2018. 2 +[32] Qinghang Su, Dayan Wu, Chenming Wu, Bo Li, and Weiping Wang. From data to optimization: Data-free deep incremental hashing with data disambiguation and adaptive proxies. IEEE Transactions on Circuits and Systems for Video Technology (TCSVT), 2024. 3 +[33] Yifan Sun, Liang Zheng, Yi Yang, Qi Tian, and Shengjin Wang. Beyond part models: Person retrieval with refined part pooling (and a strong convolutional baseline). In Proceedings of the European conference on computer vision (ECCV), 2018. 2 +[34] Yifan Sun, Changmao Cheng, Yuhan Zhang, Chi Zhang, Liang Zheng, Zhongdao Wang, and Yichen Wei. Circle loss: A unified perspective of pair similarity optimization. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2020. 2 +[35] Lei Tan, Pingyang Dai, Rongrong Ji, and Yongjian Wu. Dynamic prototype mask for occluded person re-identification. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), 2022. 1 +[36] Chiat-Pin Tay, Sharmili Roy, and Kim-Hui Yap. Aanet: Attribute attention network for person re-identifications. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2019. 2 + +[37] Guanshuo Wang, Yufeng Yuan, Xiong Chen, Jiwei Li, and Xi Zhou. Learning discriminative features with multiple granularities for person re-identification. In Proceedings of the 26th ACM international conference on Multimedia (ACM MM), 2018. 2 +[38] Lin Wang, Wanqian Zhang, Dayan Wu, Fei Zhu, and Bo Li. Attack is the best defense: Towards preemptive-protection person re-identification. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), 2022. 2 +[39] Pingyu Wang, Zhicheng Zhao, Fei Su, and Honying Meng. Ltreid: Factorizable feature generation with independent components for long-tailed person re-identification. IEEE Transactions on Multimedia (TMM), 2022. 6 +[40] Tao Wang, Hong Liu, Pinhao Song, Tianyu Guo, and Wei Shi. Pose-guided feature disentangling for occluded person re-identification based on transformer. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2022. 1 +[41] Longhui Wei, Shiliang Zhang, Wen Gao, and Qi Tian. Person transfer gan to bridge domain gap for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2018. 5 +[42] Dayan Wu, Qi Dai, Jing Liu, Bo Li, and Weiping Wang. Deep incremental hashing network for efficient image retrieval. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2019. 3 +[43] Wenjie Yang, Houjing Huang, Zhang Zhang, Xiaotang Chen, Kaiqi Huang, and Shu Zhang. Towards rich feature discovery with class activation maps augmentation for person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2019. 2 +[44] Zexian Yang, Dayan Wu, Wanqian Zhang, Bo Li, and Weiping Wang. Handling label uncertainty for camera incremental person re-identification. In Proceedings of the 31st ACM International Conference on Multimedia (ACM MM), 2023. 1 +[45] Mang Ye, Jianbing Shen, Gaojie Lin, Tao Xiang, Ling Shao, and Steven CH Hoi. Deep learning for person re-identification: A survey and outlook. IEEE transactions on pattern analysis and machine intelligence (TPAMI), 2021. 1 +[46] Guiwei Zhang, Yongfei Zhang, Tianyu Zhang, Bo Li, and Shiliang Pu. Pha: Patch-wise high-frequency augmentation for transformer-based person re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 6 +[47] Quan Zhang, Jianhuang Lai, Zhanxiang Feng, and Xiaohua Xie. Seeing like a human: Asynchronous learning with dynamic progressive refinement for person re-identification. IEEE Transactions on Image Processing (TIP), 2021. 6 +[48] Renrui Zhang, Wei Zhang, Rongyao Fang, Peng Gao, Kunchang Li, Jifeng Dai, Yu Qiao, and Hongsheng Li. Tip-adapter: Training-free adaption of clip for few-shot classification. In European Conference on Computer Vision (ECCV), 2022. 3 +[49] Wanqian Zhang, Dayan Wu, Yu Zhou, Bo Li, Weiping Wang, and Dan Meng. Binary neural network hashing for image retrieval. In Proceedings of the 44th international ACM SIGIR + +conference on research and development in information retrieval (SIGIR)l, 2021.3 +[50] Zhizheng Zhang, Cuiling Lan, Wenjun Zeng, Xin Jin, and Zhibo Chen. Relation-aware global attention for person re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6 +[51] Liang Zheng, Liyue Shen, Lu Tian, Shengjin Wang, Jingdong Wang, and Qi Tian. Scalable person re-identification: A benchmark. In Proceedings of the IEEE international conference on computer vision (ICCV), 2015. 5 +[52] Zhedong Zheng, Liang Zheng, and Yi Yang. Unlabeled samples generated by gan improve the person re-identification baseline in vitro. In Proceedings of the IEEE international conference on computer vision (ICCV), 2017. 5 +[53] Kaiyang Zhou, Yongxin Yang, Andrea Cavallaro, and Tao Xiang. Omni-scale feature learning for person re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2019. 6 +[54] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision (IJCV), 2022. 3 +[55] Xiao Zhou, Yujie Zhong, Zhen Cheng, Fan Liang, and Lin Ma. Adaptive sparse pairwise loss for object re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2 +[56] Haowei Zhu, Wenjing Ke, Dong Li, Ji Liu, Lu Tian, and Yi Shan. Dual cross-attention learning for fine-grained visual categorization and object re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3, 6 +[57] Kuan Zhu, Haiyun Guo, Zhiwei Liu, Ming Tang, and Jinqiao Wang. Identity-guided human semantic parsing for person re-identification. In Proceedings of the European conference on computer vision (ECCV), 2020. 1, 6 +[58] Kuan Zhu, Haiyun Guo, Shiliang Zhang, Yaowei Wang, Jing Liu, Jinqiao Wang, and Ming Tang. Aaformer: Auto-aligned transformer for person re-identification. IEEE Transactions on Neural Networks and Learning Systems (TNNLS), 2023. 6 \ No newline at end of file diff --git a/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/images.zip b/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..802abee16e8e6c5d2d96a8f1911ddb8e419555ae --- /dev/null +++ b/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6d62069064377a3aa3ebeb5c165446d749c961d5173409faf47adfb3deff9c8 +size 554213 diff --git a/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/layout.json b/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..9e7bfb3a95531bc5d23d088cc20162e5df1c195c --- /dev/null +++ b/2024/A Pedestrian is Worth One Prompt_ Towards Language Guidance Person Re-Identification/layout.json @@ -0,0 +1,10043 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 70, + 102, + 523, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 102, + 523, + 138 + ], + "spans": [ + { + "bbox": [ + 70, + 102, + 523, + 138 + ], + "type": "text", + "content": "A Pedestrian is Worth One Prompt: Towards Language Guidance Person Re-Identification" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "spans": [ + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "text", + "content": "Zexian Yang" + }, + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "text", + "content": " Dayan Wu" + }, + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "text", + "content": " Chenming Wu" + }, + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "text", + "content": " Zheng Lin" + }, + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "text", + "content": " Jingzi Gu" + }, + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "text", + "content": " Weiping Wang" + }, + { + "bbox": [ + 62, + 160, + 531, + 175 + ], + "type": "inline_equation", + "content": "^{1,2}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 175, + 463, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 175, + 463, + 189 + ], + "spans": [ + { + "bbox": [ + 131, + 175, + 463, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 131, + 175, + 463, + 189 + ], + "type": "text", + "content": "Institute of Information Engineering, Chinese Academy of Sciences" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 93, + 189, + 501, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 189, + 501, + 203 + ], + "spans": [ + { + "bbox": [ + 93, + 189, + 501, + 203 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 93, + 189, + 501, + 203 + ], + "type": "text", + "content": "School of Cyber Security, University of Chinese Academy of Sciences " + }, + { + "bbox": [ + 93, + 189, + 501, + 203 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 93, + 189, + 501, + 203 + ], + "type": "text", + "content": " Baidu Inc" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 78, + 205, + 515, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 205, + 515, + 217 + ], + "spans": [ + { + "bbox": [ + 78, + 205, + 515, + 217 + ], + "type": "text", + "content": "{yangzexian,wudayan,linzheng,gujingzi,wangweiping}@iie.ac.cn,wuchenming@baidu.com" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "spans": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 270, + 290, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 270, + 290, + 571 + ], + "spans": [ + { + "bbox": [ + 46, + 270, + 290, + 571 + ], + "type": "text", + "content": "Extensive advancements have been made in person ReID through the mining of semantic information. Nevertheless, existing methods that utilize semantic-parts from a single image modality do not explicitly achieve this goal. Whiteness the impressive capabilities in multimodal understanding of Vision Language Foundation Model CLIP, a recent two-stage CLIP-based method employs automated prompt engineering to obtain specific textual labels for classifying pedestrians. However, we note that the predefined soft prompts may be inadequate in expressing the entire visual context and struggle to generalize to unseen classes. This paper presents an end-to-end Prompt-driven Semantic Guidance (PromptSG) framework that harnesses the rich semantics inherent in CLIP. Specifically, we guide the model to attend to regions that are semantically faithful to the prompt. To provide personalized language descriptions for specific individuals, we propose learning pseudo tokens that represent specific visual contexts. This design not only facilitates learning fine-grained attribute information but also can inherently leverage language prompts during inference. Without requiring additional labeling efforts, our PromptSG achieves state-of-the-art by over " + }, + { + "bbox": [ + 46, + 270, + 290, + 571 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 46, + 270, + 290, + 571 + ], + "type": "text", + "content": " on MSMT17 and nearly " + }, + { + "bbox": [ + 46, + 270, + 290, + 571 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 46, + 270, + 290, + 571 + ], + "type": "text", + "content": " on the Market-1501 benchmark. The codes will be available at https://github.com/Yzxian16/PromptSG" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 591, + 128, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 591, + 128, + 604 + ], + "spans": [ + { + "bbox": [ + 47, + 591, + 128, + 604 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 611, + 287, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 611, + 287, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 611, + 287, + 696 + ], + "type": "text", + "content": "Person Re-Identification (ReID) is a crucial research area in computer vision that focuses on identifying individuals across different camera views or time instances [4, 44, 45, 57], which is a sub-task of image-based retrieval. Features of the same individual, as captured by various cameras, are prone to alterations due to changes in lighting, background, and body posture. Consequently, the effectiveness of a so" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 310, + 245, + 388, + 285 + ], + "blocks": [ + { + "bbox": [ + 310, + 245, + 388, + 285 + ], + "lines": [ + { + "bbox": [ + 310, + 245, + 388, + 285 + ], + "spans": [ + { + "bbox": [ + 310, + 245, + 388, + 285 + ], + "type": "image", + "image_path": "1a1c3dd471251170d4f54f010e19bd75ee21d5479288340c29efd66f8c5de453.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 317, + 285, + 342, + 293 + ], + "lines": [ + { + "bbox": [ + 317, + 285, + 342, + 293 + ], + "spans": [ + { + "bbox": [ + 317, + 285, + 342, + 293 + ], + "type": "text", + "content": "(a) (b)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 344, + 285, + 383, + 294 + ], + "lines": [ + { + "bbox": [ + 344, + 285, + 383, + 294 + ], + "spans": [ + { + "bbox": [ + 344, + 285, + 383, + 294 + ], + "type": "text", + "content": "(c) (d)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 299, + 383, + 308 + ], + "lines": [ + { + "bbox": [ + 310, + 299, + 383, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 299, + 383, + 308 + ], + "type": "text", + "content": "Include background \"car\"" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 389, + 247, + 463, + 285 + ], + "blocks": [ + { + "bbox": [ + 389, + 247, + 463, + 285 + ], + "lines": [ + { + "bbox": [ + 389, + 247, + 463, + 285 + ], + "spans": [ + { + "bbox": [ + 389, + 247, + 463, + 285 + ], + "type": "image", + "image_path": "96297b5662bd50f968dae48a98e7311bdac0c39e8977ba83975fdb8a0132a8a8.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 391, + 285, + 459, + 293 + ], + "lines": [ + { + "bbox": [ + 391, + 285, + 459, + 293 + ], + "spans": [ + { + "bbox": [ + 391, + 285, + 459, + 293 + ], + "type": "text", + "content": "(a) (b) (c) (d)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 389, + 299, + 455, + 308 + ], + "lines": [ + { + "bbox": [ + 389, + 299, + 455, + 308 + ], + "spans": [ + { + "bbox": [ + 389, + 299, + 455, + 308 + ], + "type": "text", + "content": "Include two pedestrians" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 465, + 247, + 540, + 285 + ], + "blocks": [ + { + "bbox": [ + 465, + 247, + 540, + 285 + ], + "lines": [ + { + "bbox": [ + 465, + 247, + 540, + 285 + ], + "spans": [ + { + "bbox": [ + 465, + 247, + 540, + 285 + ], + "type": "image", + "image_path": "6ea4db5cdc1b1af9491cbaa55e9a9c46194374daa6a1b31b3ab6b52377e241ff.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 469, + 285, + 538, + 294 + ], + "lines": [ + { + "bbox": [ + 469, + 285, + 538, + 294 + ], + "spans": [ + { + "bbox": [ + 469, + 285, + 538, + 294 + ], + "type": "text", + "content": "(a) (b) (c) (d)" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 484, + 299, + 540, + 308 + ], + "lines": [ + { + "bbox": [ + 484, + 299, + 540, + 308 + ], + "spans": [ + { + "bbox": [ + 484, + 299, + 540, + 308 + ], + "type": "text", + "content": "More detailed attire" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 309, + 373, + 543, + 492 + ], + "blocks": [ + { + "bbox": [ + 305, + 319, + 545, + 364 + ], + "lines": [ + { + "bbox": [ + 305, + 319, + 545, + 364 + ], + "spans": [ + { + "bbox": [ + 305, + 319, + 545, + 364 + ], + "type": "text", + "content": "Figure 1. Transformer visualization [2] of attention maps. (a) Original images, (b) CLIP-ReID, (c) Our method w/o inversion, and (d) Our method guided by the composed prompts captures both the exact semantic parts and the external appearance details." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 309, + 373, + 543, + 492 + ], + "lines": [ + { + "bbox": [ + 309, + 373, + 543, + 492 + ], + "spans": [ + { + "bbox": [ + 309, + 373, + 543, + 492 + ], + "type": "image", + "image_path": "a948c436517257f617dc23830059e944b466eb12000858482aa2823d99bbc15b.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 504, + 545, + 559 + ], + "lines": [ + { + "bbox": [ + 304, + 504, + 545, + 559 + ], + "spans": [ + { + "bbox": [ + 304, + 504, + 545, + 559 + ], + "type": "text", + "content": "Figure 2. The core idea of our method. Our method inverts input images into pseudo-word tokens " + }, + { + "bbox": [ + 304, + 504, + 545, + 559 + ], + "type": "inline_equation", + "content": "S_{*}" + }, + { + "bbox": [ + 304, + 504, + 545, + 559 + ], + "type": "text", + "content": ", which are then composed into a textual prompt to describe the specific visual context. The attention map of patch tokens is further controlled by the semantics of the textual prompt." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 581, + 545, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 581, + 545, + 629 + ], + "spans": [ + { + "bbox": [ + 304, + 581, + 545, + 629 + ], + "type": "text", + "content": "phisticated ReID model fundamentally depends on its capability to learn discriminative features that are impervious to camera-specific variations, thereby enhancing the model's capacity to generalize to previously unseen classes." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 630, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 546, + 713 + ], + "type": "text", + "content": "Modern ReID models, constructed upon uni-modal architectures such as the Convolutional Neural Network (CNN) [22] or Vision Transformer (ViT) [14, 19, 35, 40], have made significant advancements within the field. A substantial portion of these solutions focus on the extraction of pertinent regions to rectify misalignment issues. These strategies are dedicated to the extraction of semantic" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 135, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 135, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 135, + 712 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17343" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "content": "data, such as the human body structure, primarily facilitated through the integration of identity classification [33, 43] and metric learning [24, 55]. However, it is worth noting that these attention regions generally highlight only specific locally discriminative parts without explicit semantic control. When a distinct mask or skeleton direction is necessitated [27, 31], the need for additional, labor-intensive, and time-consuming manual labeling becomes inevitable." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 171, + 289, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 171, + 289, + 567 + ], + "spans": [ + { + "bbox": [ + 46, + 171, + 289, + 567 + ], + "type": "text", + "content": "Large-scale Vision Language (VL) models, exemplified by Contrastive Language-Image Pre-Training (CLIP) [26], have recently shown remarkable abilities in reasoning across multi-modal data. CLIP model, when provided with text prompts such as 'A photo of a [CLASS]' displays exceptional zero-shot classification performance at the image level. This leads to a question: Can we further direct attention to regions of interest through natural language descriptions, such as 'A photo of a person'? However, due to the resulting visual representation lacking fine-grained information necessary for distinguishing between identities, integrating CLIP straightforwardly into person ReID is non-trivial. Additionally, the query 'A photo of a person' presents a challenge due to the absence of specific descriptors, thereby lacking a personalized prompt for individual identification. The pioneering CLIP-ReID [21] introduces automated prompt engineering on CLIP by incorporating additional ID-wise learnable vectors customized for specific identities. Particularly, CLIP-ReID employs a two-stage training process that first optimizes the learnable vectors with the frozen CLIP model, and then restricts the image encoder with the learned textual descriptions. However, the disentangled usage, i.e., only the visual embedding is utilized during inference, renders the learned soft prompts ineffective for unseen prompts. As a result, the attention regions potentially do not entirely encompass the body part, and may inadvertently include background elements, such as cars and additional pedestrians captured in the scene, as illustrated in the first two examples in Fig. 1(b). In addition, even though CLIP-ReID adheres to training objectives aimed at vision-language alignment, such predefined soft prompts may not be sufficient to characterize the entire visual context of the specified pedestrian." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": "In this paper, we propose Prompt-driven Semantic Guidance (PromptSG), that aims to streamline the two-stage pipeline by leveraging the foundational CLIP model effectively and efficiently. As outlined in Fig. 2, our core insight is straightforward: we strive to activate CLIP's cross-modal comprehension using explicit language prompts, and the regions extracted can then be fine-tuned to enhance semantic discriminativeness. Specifically, given a textual prompt, we refine the patch tokens by injecting cross-attention maps, determining which patch attends to the corresponding semantics. Following this rationale, we revisit the fundamental issue that the term 'person' serves as a" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 304, + 72, + 547, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 347 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 347 + ], + "type": "text", + "content": "coarse descriptor, lacking personalized language descriptions for individual identities. Beyond semantic information related to the 'person', appearance information is also crucial for identification purposes [5]. While semantic information aids the model in better body part localization, appearance information further refines the focus on an individual's attire. Hence, we employ the textual inversion technique [10], which learns to represent visual context through unique token. We use a lightweight inversion network that maps the image to a pseudo-token. This pseudo-token can then be incorporated into the textual prompt, creating an embedding that closely mirrors the original image. Compared to CLIP-ReID, our solution offers two primary advantages: 1) The textual prompt emphasizes regions in the image via a cross-attention map, capturing the precise semantic part (Fig. 1(c)), and can also be utilized for unseen classes during inference. 2) The model can learn the personal token of the query image in an end-to-end manner, providing more detailed guidance specific to an identity (Fig. 1(d)). Importantly, our proposed method is free, i.e. there is no need to supply additional information, such as masks, bounding boxes, or precise descriptions. We summarize the contribution of this paper as follows." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 306, + 348, + 545, + 526 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 306, + 348, + 545, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 348, + 545, + 418 + ], + "spans": [ + { + "bbox": [ + 306, + 348, + 545, + 418 + ], + "type": "text", + "content": "- Leveraging the exceptional multi-modal reasoning capabilities of CLIP, we propose PromptSG, a novel framework for the person ReID task. This approach uniquely utilizes language prompts, providing explicit assistance to the visual encoder in efficiently capturing semantic information." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 419, + 545, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 419, + 545, + 465 + ], + "spans": [ + { + "bbox": [ + 306, + 419, + 545, + 465 + ], + "type": "text", + "content": "- To create a more personalized description for the individual, we propose learning to represent the specific, more detailed appearance attributes, by employing the inversion network." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 467, + 545, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 467, + 545, + 526 + ], + "spans": [ + { + "bbox": [ + 306, + 467, + 545, + 526 + ], + "type": "text", + "content": "- Without any additional labelling efforts, PromptSG surpasses previous SOTA method [21] by over " + }, + { + "bbox": [ + 306, + 467, + 545, + 526 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 306, + 467, + 545, + 526 + ], + "type": "text", + "content": " on the MSMT17 dataset. It also exhibits superior performance on the Market-1501 benchmark, surpassing previous SOTA method [46] by nearly " + }, + { + "bbox": [ + 306, + 467, + 545, + 526 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 306, + 467, + 545, + 526 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 537, + 392, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 537, + 392, + 550 + ], + "spans": [ + { + "bbox": [ + 306, + 537, + 392, + 550 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 558, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 547, + 715 + ], + "type": "text", + "content": "Person Re-identification remains an important yet challenging task due to the subtle inter-class differences. To learn more discriminative representations, a category of CNN-based techniques has primarily concentrated on optimizing the distance metric via metric learning [15, 33, 34, 37, 38]. Recognizing the importance of semantic information, a substantial body of research [3, 23, 31, 43] explores the use of attention mechanisms, which guide the network to extract attention-aware features for body parts. For example, AAnet [36] adopts a unified learning framework that incorporates attribute attention maps through extra attribute labels. Pioneering work TransReID [14] introduces a self-attention-based architecture, Vision Transformer (ViT) [8]," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "17344" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 73, + 369, + 299 + ], + "blocks": [ + { + "bbox": [ + 52, + 73, + 369, + 299 + ], + "lines": [ + { + "bbox": [ + 52, + 73, + 369, + 299 + ], + "spans": [ + { + "bbox": [ + 52, + 73, + 369, + 299 + ], + "type": "image", + "image_path": "4c93416d54c5bb2b08a721e9e8ab3ec52a5887b08c7fa360e0d66e84179597f3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 310, + 546, + 334 + ], + "lines": [ + { + "bbox": [ + 46, + 310, + 546, + 334 + ], + "spans": [ + { + "bbox": [ + 46, + 310, + 546, + 334 + ], + "type": "text", + "content": "Figure 3. Overview of our framework. PromptSG learns pseudo token " + }, + { + "bbox": [ + 46, + 310, + 546, + 334 + ], + "type": "inline_equation", + "content": "S_{*}" + }, + { + "bbox": [ + 46, + 310, + 546, + 334 + ], + "type": "text", + "content": " from the specific visual embedding, and the visual encoder learns semantic faithful representations with the guidance of language prompts that occur in the Multimodal Interaction Module." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 375, + 73, + 541, + 299 + ], + "blocks": [ + { + "bbox": [ + 375, + 73, + 541, + 299 + ], + "lines": [ + { + "bbox": [ + 375, + 73, + 541, + 299 + ], + "spans": [ + { + "bbox": [ + 375, + 73, + 541, + 299 + ], + "type": "image", + "image_path": "0b046aeeb20e6eebeda8b6ac236d946962bd55fb3fd6170dad0c0c9b4bfc0b05.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 354, + 287, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 354, + 287, + 484 + ], + "spans": [ + { + "bbox": [ + 46, + 354, + 287, + 484 + ], + "type": "text", + "content": "for advancing ReID tasks. DCAL [56] proposes to implicitly extract the local features through a global-local cross-attention mechanism. However, these methods solely apply attention mechanisms to the visual modality, and the lack of explicit language guidance potentially constrains their performance. The work most relevant to ours, CLIP-ReID [21], is the first to utilize vision-language pre-training model CLIP in ReID task. However, CLIP-ReID fails to leverage the linguistic capability of the text encoder in CLIP during inference, since the ID-specific learnable tokens only influence the seen identities." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 486, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 486, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 486, + 288, + 715 + ], + "type": "text", + "content": "Large-scale vision-language pre-training model connects the image representation with text embedding in a shared embedding space, has demonstrated effectiveness across a wide range of uni-modal and multimodal downstream tasks. These include classification [6, 48], image captioning [25], and cross-modal retrieval [11, 16, 32, 42, 49]. Foundational VL models, such as CLIP, usually undergo training on extensive image-text pairs with contrastive learning objectives. This foundational pre-training provides the model with strong open-vocabulary classification capabilities. Inherited from prompt learning in NLP [18], CoOp [54] proposes to explore learnable prompt optimization on few-shot classification. Following this soft prompt approach, CLIP-ReID pioneers the adaptation of CLIP for person ReID by classifying images into ID-specific prompts. Differing from CLIP-ReID, which focuses on vision-language alignment, our goal is to exploit rich semantic information from language to explicitly control the weights assigned to each patch or region, and im" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 354, + 545, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 354, + 545, + 378 + ], + "spans": [ + { + "bbox": [ + 305, + 354, + 545, + 378 + ], + "type": "text", + "content": "prove the two-stage framework by directly inverting images into the language latent space." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 379, + 546, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 379, + 546, + 511 + ], + "spans": [ + { + "bbox": [ + 304, + 379, + 546, + 511 + ], + "type": "text", + "content": "Textual Inversion, originally for personalized text-to-image generation [10], is a learning approach that aims to discover new pseudo-words in the word-embedding space. These pseudo-words are capable of encapsulating both the overall visual content and intricate visual details. Recently, the application of textual inversion has expanded to zero-shot composed image retrieval task [1, 29]. In these studies, a textual inversion network is typically pre-trained using extensive unlabeled image datasets. In this work, we stand out as the first to apply this learning paradigm to person ReID without any additional training data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 524, + 384, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 524, + 384, + 538 + ], + "spans": [ + { + "bbox": [ + 306, + 524, + 384, + 538 + ], + "type": "text", + "content": "3. Preliminary" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "text", + "content": "Contrastive Language-Image Pre-training (CLIP) undergoes pre-training on a large corpus of image-text pairs, aligning visual and linguistic representations within a shared space through the matching of images with their corresponding text descriptions. Specifically, CLIP consists of a visual encoder " + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{V}(\\cdot)" + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "text", + "content": " and a text encoder " + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(\\cdot)" + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "text", + "content": ". The visual encoder " + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{V}(\\cdot)" + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "text", + "content": " takes an image " + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathbb{R}^{H \\times W \\times C}" + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "text", + "content": " as input. The text encoder " + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(\\cdot)" + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "text", + "content": " takes a tokenized textual description " + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "inline_equation", + "content": "t \\in \\mathbb{R}^{N \\times D}" + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "text", + "content": " as input, where " + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "inline_equation", + "content": "N, D" + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "text", + "content": " are the text's length and token feature dimension respectively. The pre-training objective is based on self-supervised contrastive learning, which minimizes cosine distance for matched image-text pairs. For the downstream tasks such as classification, the description of " + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 304, + 545, + 546, + 715 + ], + "type": "text", + "content": "-th class is typically obtained through the" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "17345" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "type": "text", + "content": "hand-crafted prompt, e.g., 'A photo of a [CLASS]'. Therefore, the probability of image " + }, + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "type": "text", + "content": " being classified as class " + }, + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "type": "text", + "content": " can be computed as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 125, + 287, + 156 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 125, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 76, + 125, + 287, + 156 + ], + "type": "interline_equation", + "content": "\\mathcal {P} (y | \\boldsymbol {x}) = \\frac {\\exp (\\operatorname {s i m} (\\mathcal {V} (\\boldsymbol {x}) , \\mathcal {T} (\\boldsymbol {t} _ {\\boldsymbol {y}})) / \\tau)}{\\sum_ {j = 1} ^ {K} \\exp (\\operatorname {s i m} (\\mathcal {V} (\\boldsymbol {x}), \\mathcal {T} (\\boldsymbol {t} _ {\\boldsymbol {j}})) / \\tau)}. \\tag {1}", + "image_path": "a9509b2c97df52887b9ddaa6beecd96cdeb37767d0bcf3c2646c848ceb1f13eb.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 164, + 286, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 164, + 286, + 190 + ], + "spans": [ + { + "bbox": [ + 46, + 164, + 286, + 190 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 164, + 286, + 190 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 164, + 286, + 190 + ], + "type": "text", + "content": " denotes the temperature, and " + }, + { + "bbox": [ + 46, + 164, + 286, + 190 + ], + "type": "inline_equation", + "content": "\\mathrm{sim}(a,b) = \\frac{a\\cdot b}{\\|a\\|_2\\|b\\|_2}" + }, + { + "bbox": [ + 46, + 164, + 286, + 190 + ], + "type": "text", + "content": " is the cosine similarity." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 191, + 287, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 191, + 287, + 322 + ], + "spans": [ + { + "bbox": [ + 46, + 191, + 287, + 322 + ], + "type": "text", + "content": "A simple approach to applying CLIP to person ReID involves substituting the linear classifier with image-to-text classification. However, given that labels in ReID tasks are solely index-based, there are no specific words to represent different persons. To tackle this challenge, CLIP-ReID crafts the prompt as 'A photo of a " + }, + { + "bbox": [ + 46, + 191, + 287, + 322 + ], + "type": "inline_equation", + "content": "[X_i]_1[X_i]_2[X_i]_3\\ldots[X_i]_M" + }, + { + "bbox": [ + 46, + 191, + 287, + 322 + ], + "type": "text", + "content": " person', where " + }, + { + "bbox": [ + 46, + 191, + 287, + 322 + ], + "type": "inline_equation", + "content": "[X_i]_m, m \\in \\{1,\\dots,M\\}" + }, + { + "bbox": [ + 46, + 191, + 287, + 322 + ], + "type": "text", + "content": " represents a set of ID-specific learnable tokens for the " + }, + { + "bbox": [ + 46, + 191, + 287, + 322 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 191, + 287, + 322 + ], + "type": "text", + "content": "-th ID. Nevertheless, CLIP-ReID optimizes ID-specific prompts exclusively bound to training IDs, it overlooks the chance to fully exploit the open-vocabulary capabilities inherent in CLIP." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 332, + 103, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 332, + 103, + 344 + ], + "spans": [ + { + "bbox": [ + 47, + 332, + 103, + 344 + ], + "type": "text", + "content": "4. Method" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 353, + 287, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 353, + 287, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 353, + 287, + 483 + ], + "type": "text", + "content": "An overview of our framework is depicted in Fig. 3. Starting with the visual embeddings derived from CLIP's visual encoder, our approach employs an inversion network to learn pseudo tokens that encapsulate the visual context. Following this, an interaction between visual and textual modalities is facilitated in the interaction module, leading to the final re-weighted representations. During the inference phase, we are presented with two options for textual inputs: an efficiency-driven simplified prompt and an accuracy-driven composed prompt. Note that the text encoder is frozen in our entire framework." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 491, + 286, + 505 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 491, + 286, + 505 + ], + "spans": [ + { + "bbox": [ + 47, + 491, + 286, + 505 + ], + "type": "text", + "content": "4.1. Learning the Personalized ID-Specific Prompt" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 510, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 287, + 605 + ], + "type": "text", + "content": "As suggested by prior research, the word-embedding space possesses sufficient expressiveness to encapsulate basic image concepts [7]. However, the inherent limitation lies in the pre-defined prompts in CLIP-ReID, which can only capture limited attributes and may not fully encapsulate the visual context. Contrarily, we propose learning the pseudo token by textual inversion technique that aligns with the context of the query image." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "f_{\\theta}(\\cdot)" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " denote an inversion network parameterized by " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": ", our goal is to invert the global visual embedding " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " from visual space of CLIP, represented as " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{v} \\in V" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": ", into a pseudo token " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "s_* \\in T_*" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "f_{\\theta}(\\mathbf{v}) = s_*" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "T_*" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " indicates the token embedding space. Subsequently, this pseudo token can be integrated into natural language sentences. As such, the language prompt for the input image is structured as 'A photo of a " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "s_*" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " person'. It is worth noting that this pseudo-token bears no relationship to an actual word but functions as a" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "content": "representation in the token embedding space. An input language prompt undergoes a tokenization process, resulting in several tokens. The tokenized prompt, denoted as " + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "t_p" + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "content": ", can be fed into the text encoder of CLIP to obtain text embedding " + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "l_p = \\mathcal{T}(t_p)" + }, + { + "bbox": [ + 305, + 72, + 545, + 167 + ], + "type": "text", + "content": ". To ensure that the learned pseudo-token effectively tells the context of the image, one can follow to the reconstruction objective of textual inversion by the symmetric contrastive loss, which is formulated as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 332, + 187, + 545, + 220 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 187, + 545, + 220 + ], + "spans": [ + { + "bbox": [ + 332, + 187, + 545, + 220 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {i 2 t} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\log \\frac {\\exp (\\sin (\\boldsymbol {v} _ {n} , \\boldsymbol {l} _ {p}) / \\tau)}{\\sum_ {i = 1} ^ {N} \\exp (\\sin (\\boldsymbol {v} _ {n} , \\boldsymbol {l} _ {i}) / \\tau)}, \\tag {2}", + "image_path": "2afaebacca3df7f65015a6a17a899dfd108fd8da9ec378c535a2bf49ae865e41.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 332, + 229, + 545, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 229, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 332, + 229, + 545, + 262 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {t 2 i} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\log \\frac {\\exp \\left(\\sin \\left(\\boldsymbol {l} _ {n} , \\boldsymbol {v} _ {p}\\right) / \\tau\\right)}{\\sum_ {i = 1} ^ {N} \\exp \\left(\\sin \\left(\\boldsymbol {l} _ {n} , \\boldsymbol {v} _ {i}\\right) / \\tau\\right)}. \\tag {3}", + "image_path": "d12808cc85cc63627f5f46711d7fde8816d921b947ef32a36694ab9a0735a127.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "spans": [ + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "text", + "content": "In this context, " + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "inline_equation", + "content": "l_{i}" + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "text", + "content": " represents the " + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "text", + "content": "-th image/text embedding in a batch. " + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "inline_equation", + "content": "l_{p}" + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "text", + "content": " is the corresponding prompt embedding for " + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "inline_equation", + "content": "v_{n}" + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "text", + "content": " and is constructed in a manner analogous to " + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "inline_equation", + "content": "v_{p}" + }, + { + "bbox": [ + 305, + 266, + 545, + 301 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 302, + 545, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 302, + 545, + 421 + ], + "spans": [ + { + "bbox": [ + 305, + 302, + 545, + 421 + ], + "type": "text", + "content": "The underlying mechanism is grounded in the principle of cycle-consistency, wherein a pseudo token tends to faithfully represent the context of the image only when the text features closely align with corresponding image features. However, the contrastive loss fails to handle cases where images with the same ID are supposed to share the same appearance. Therefore, we aim to encourage the pseudo token to capture visual details exclusive to the same identity. To this end, we exploit the symmetric supervised contrastive loss as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 376, + 430, + 545, + 445 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 430, + 545, + 445 + ], + "spans": [ + { + "bbox": [ + 376, + 430, + 545, + 445 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\operatorname {S u p C o n}} = \\mathcal {L} _ {i 2 t} ^ {\\operatorname {S u p}} + \\mathcal {L} _ {t 2 i} ^ {\\operatorname {S u p}}. \\tag {4}", + "image_path": "8fdf9cf790619a6b2a181d3f73daaa04f8c511b09a53efaeab01a4076dbe1857.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 454, + 545, + 499 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 454, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 315, + 454, + 545, + 499 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {i 2 t} ^ {\\operatorname {S u p}} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sum_ {p ^ {+} \\in P (i)} \\log \\frac {\\exp \\left(\\operatorname {s i m} \\left(\\boldsymbol {v} _ {n} , \\boldsymbol {l} _ {\\boldsymbol {p} ^ {+}}\\right) / \\tau\\right)}{\\sum_ {i = 1} ^ {N} \\exp \\left(\\operatorname {s i m} \\left(\\boldsymbol {v} _ {n} , \\boldsymbol {l} _ {i}\\right) / \\tau\\right)}, \\tag {5}", + "image_path": "953eb415ce1fe6cbe9439248c0d0fc64d4000808673c772986fd8b669075e0ca.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 499, + 545, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 499, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 316, + 499, + 545, + 544 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {t 2 i} ^ {\\operatorname {S u p}} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sum_ {p ^ {+} \\in P (i)} \\log \\frac {\\exp \\left(\\sin \\left(l _ {n} , v _ {p ^ {+}}\\right) / \\tau\\right)}{\\sum_ {i = 1} ^ {N} \\exp \\left(\\sin \\left(l _ {n} , v _ {i}\\right) / \\tau\\right)}. \\tag {6}", + "image_path": "d82f05d3d7004871aab497a109c5bb6147963d177bd69f69d614e85291d13ddd.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 544, + 545, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 544, + 545, + 556 + ], + "spans": [ + { + "bbox": [ + 306, + 544, + 545, + 556 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 544, + 545, + 556 + ], + "type": "inline_equation", + "content": "P(i)" + }, + { + "bbox": [ + 306, + 544, + 545, + 556 + ], + "type": "text", + "content": " represents the positive samples related to " + }, + { + "bbox": [ + 306, + 544, + 545, + 556 + ], + "type": "inline_equation", + "content": "v_{n},l_{n}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 563, + 492, + 576 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 563, + 492, + 576 + ], + "spans": [ + { + "bbox": [ + 306, + 563, + 492, + 576 + ], + "type": "text", + "content": "4.2. Prompt-driven Semantic Guidance" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 582, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 582, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 582, + 545, + 713 + ], + "type": "text", + "content": "We refine the language prompt by incorporating the pseudo-token that is linked to the identity, enhancing its ability to convey a more specific visual context for the image. Our commitment extends to meticulously directing the image feature through language. At the core of our approach lies the idea of semantic guidance, wherein we explicitly determine which region of the image aligns with the language prompt. Intuitively, image patches corresponding to the semantic \"person\" should inherently have substantial influence to facilitate discrimination. As opposed to the interaction between patches in self-attention layers within a" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17346" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "content": "single modality. Based on this observation, we explore a patch-to-prompt interaction that occurs in multi-modality." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "spans": [ + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "text", + "content": "In particular, we employ a language-guided cross-attention module, which uses the textual embedding as query and the patch-wise embedding of the visual encoder as key and value. More formally, given a pair of image and prompt " + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "inline_equation", + "content": "(\\pmb{x},\\pmb{t_p})" + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "text", + "content": ", we first feed the image " + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "text", + "content": " into the visual encoder, yielding in a sequence of patch embeddings " + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\pmb{v}},\\pmb{v}_1,\\dots,\\pmb{v}_M\\}" + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{v}}" + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "text", + "content": " denotes the global visual embedding, while remaining " + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "inline_equation", + "content": "\\pmb{v}_i,i\\in [1,M]" + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "text", + "content": " belong to the local patch embeddings. In a similar vein, the prompt is fed into the text encoder to derive the text embedding " + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "inline_equation", + "content": "l_{p}" + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "text", + "content": ". Subsequently, the text embedding is projected onto a query matrix " + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "text", + "content": " and patch embeddings are projected to a key matrix " + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "text", + "content": " and a value matrix " + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 46, + 97, + 288, + 276 + ], + "type": "text", + "content": ", via three different linear-projection layers. As such, the patch-to-prompt interaction can be achieved by:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 96, + 285, + 287, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 285, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 96, + 285, + 287, + 312 + ], + "type": "interline_equation", + "content": "\\mathrm {A} (Q, K, V) = \\operatorname {S o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {d}}\\right) V. \\tag {7}", + "image_path": "5c0c37c4fbda0199267466a9b04c5ec24ee6e3553ebd251170e92251ee9927e7.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 317, + 287, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 317, + 287, + 400 + ], + "spans": [ + { + "bbox": [ + 46, + 317, + 287, + 400 + ], + "type": "text", + "content": "This interaction aggregates the attention map to highlight the regions of high semantic response. Drawing from multimodal fusion methods [9], we incorporate two transformer blocks following the cross-attention layer to derive final representations. Ultimately, we utilize the standard ReID loss, i.e., the triplet loss and identity classification loss [12], to optimize our framework." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 117, + 411, + 287, + 443 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 411, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 117, + 411, + 287, + 443 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {I D}} = \\frac {1}{K} \\sum_ {j = 1} ^ {K} y _ {j} \\log p _ {j}, \\tag {8}", + "image_path": "1e82605839a02067b498f2d6b278d1f8d36a519da46aecc20285fd14bfd513a4.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 100, + 455, + 287, + 468 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 455, + 287, + 468 + ], + "spans": [ + { + "bbox": [ + 100, + 455, + 287, + 468 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {T r i p l e t}} = \\max \\left(d _ {p} - d _ {n} + m, 0\\right), \\tag {9}", + "image_path": "a98d29b59ed9ffe9ddc7f9b09f3fc1f261ae953e9abee62995a33c91afead4b6.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 474, + 287, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 474, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 47, + 474, + 287, + 498 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 474, + 287, + 498 + ], + "type": "inline_equation", + "content": "p_j" + }, + { + "bbox": [ + 47, + 474, + 287, + 498 + ], + "type": "text", + "content": " is the prediction probability for the " + }, + { + "bbox": [ + 47, + 474, + 287, + 498 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 47, + 474, + 287, + 498 + ], + "type": "text", + "content": "-th class, and " + }, + { + "bbox": [ + 47, + 474, + 287, + 498 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 47, + 474, + 287, + 498 + ], + "type": "text", + "content": " denotes the margin." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 506, + 200, + 518 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 506, + 200, + 518 + ], + "spans": [ + { + "bbox": [ + 47, + 506, + 200, + 518 + ], + "type": "text", + "content": "4.3. Optimization and Inference" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 524, + 287, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 524, + 287, + 548 + ], + "spans": [ + { + "bbox": [ + 47, + 524, + 287, + 548 + ], + "type": "text", + "content": "Taining optimization. In summary, the overall objective function for our framework is formulated as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 559, + 287, + 572 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 287, + 572 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 287, + 572 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {\\text {T r i p l e t}} + \\mathcal {L} _ {\\mathrm {I D}} + \\lambda \\mathcal {L} _ {\\operatorname {S u p C o n}}. \\tag {10}", + "image_path": "e979abbf08c06445cb1ee77658d7cb7438d57b676d59c91589f638448e374096.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 582, + 287, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 582, + 287, + 617 + ], + "spans": [ + { + "bbox": [ + 47, + 582, + 287, + 617 + ], + "type": "text", + "content": "Similar to CLIP-ReID, the final hidden states of the vision transformer, in conjunction the preceding two layer states, are also employed to calculate " + }, + { + "bbox": [ + 47, + 582, + 287, + 617 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{Triplet}}" + }, + { + "bbox": [ + 47, + 582, + 287, + 617 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 618, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 287, + 713 + ], + "type": "text", + "content": "Improved inference efficiency. Our approach involves the query-specific pseudo token with the textual prompt, which essentially doubles the inference time compared to using only the visual encoder. Fortunately, our empirical findings suggest that providing only 'A photo of a person' as a simplified guideline yields comparable results. In this way, there will be no increase in the inference time caused by the text encoder." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 335, + 70, + 518, + 133 + ], + "blocks": [ + { + "bbox": [ + 335, + 70, + 518, + 133 + ], + "lines": [ + { + "bbox": [ + 335, + 70, + 518, + 133 + ], + "spans": [ + { + "bbox": [ + 335, + 70, + 518, + 133 + ], + "type": "table", + "html": "
Dataset#IDImagesCams
Market-15011,50132,6686
MSMT174,101126,44115
DukeMTMC1,40436,4118
CHUK03-NP1,46713,1642
", + "image_path": "67df69e3285ba264454f25082c666fcfb9e5986246658eb5b1c4d9421764efa7.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 331, + 141, + 520, + 152 + ], + "lines": [ + { + "bbox": [ + 331, + 141, + 520, + 152 + ], + "spans": [ + { + "bbox": [ + 331, + 141, + 520, + 152 + ], + "type": "text", + "content": "Table 1. The statistics of dataset in our experiments" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 171, + 387, + 185 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 171, + 387, + 185 + ], + "spans": [ + { + "bbox": [ + 306, + 171, + 387, + 185 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 191, + 429, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 191, + 429, + 205 + ], + "spans": [ + { + "bbox": [ + 306, + 191, + 429, + 205 + ], + "type": "text", + "content": "5.1. Experimental Setting" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 210, + 545, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 210, + 545, + 305 + ], + "spans": [ + { + "bbox": [ + 304, + 210, + 545, + 305 + ], + "type": "text", + "content": "Datasets and Evaluation Protocols. To evaluate and compare various methods, four extensive person re-identification datasets Market-1501 [51], MSMT17 [41], DukeMTMC [52] and CUHK03-NP [22] are exploited. Dataset stats are in Tab. 1. In line with conventions in the ReID community [13], two commonly used metrics, i.e., mean Average Precision (mAP) and Rank-1(R-1) accuracy, are used to evaluate the performance." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 305, + 546, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 305, + 546, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 305, + 546, + 544 + ], + "type": "text", + "content": "Implementation Details. In alignment with prior research, we employ both ResNet-50 and ViT-B/16 Pre-trained from CLIP as our visual encoder and a pre-trained text encoder, i.e., CLIP text Transformer. Our framework additionally features a random-initialized inversion network and a multimodal interaction module. The inversion network is a lightweight model employing a three-layered MLP of 512-dimensional hidden state. A Batch Normalization (BN) layer [17] is placed after the last state of the network. The batch size is configured to 64, encompassing 16 identities with 4 images per identity. All input images are resized to " + }, + { + "bbox": [ + 304, + 305, + 546, + 544 + ], + "type": "inline_equation", + "content": "256 \\times 128" + }, + { + "bbox": [ + 304, + 305, + 546, + 544 + ], + "type": "text", + "content": ". We use the Adam optimizer with a learning rate of 5e-6 for the visual encoder, whereas the learning rate for random-initialized modules is set to 5e-5. We find " + }, + { + "bbox": [ + 304, + 305, + 546, + 544 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 304, + 305, + 546, + 544 + ], + "type": "text", + "content": " in Eq. (10) is not sensitive and performs well across a broad range, thus we consistently set " + }, + { + "bbox": [ + 304, + 305, + 546, + 544 + ], + "type": "inline_equation", + "content": "\\lambda = 0.5" + }, + { + "bbox": [ + 304, + 305, + 546, + 544 + ], + "type": "text", + "content": " for all datasets. The model is trained for 60 epochs, with a learning rate decay factor of 0.1 for every 20 epochs. The entire framework is implemented using PyTorch and runs on a single NVIDIA RTX3090 GPU with 24GB VRAM." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 544, + 545, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 544, + 545, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 544, + 545, + 640 + ], + "type": "text", + "content": "Baseline. Most existing approaches are built upon the strong ReID baseline presented in [24]. Specifically, they employ an ImageNet-21k pre-trained CNN model or ViT as the backbone and incorporate ID loss and triplet loss as crucial components. In contrast, our baseline model deviates by leveraging the pre-trained CLIP model and we fine-tune the visual encoder of CLIP by directly applying the two commonly-used losses." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 647, + 529, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 647, + 529, + 659 + ], + "spans": [ + { + "bbox": [ + 306, + 647, + 529, + 659 + ], + "type": "text", + "content": "5.2. Comparison with State-of-the-art Methods" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "We benchmark PromptSG against the current state-of-the-art, which can generally be divided into three categories: CNN-based, ViT-based, and CLIP-based methods. Tab. 2 summarizes the main results on four widely" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17347" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 74, + 70, + 520, + 348 + ], + "blocks": [ + { + "bbox": [ + 74, + 70, + 520, + 348 + ], + "lines": [ + { + "bbox": [ + 74, + 70, + 520, + 348 + ], + "spans": [ + { + "bbox": [ + 74, + 70, + 520, + 348 + ], + "type": "table", + "html": "
BackboneMethodReferenceMarket-1501MSMT17DukeMTMCCUHK03-NP
mAPR-1mAPR-1mAPR-1mAPR-1
ResNet50CNN-based method
OSNeT [53]ICCV'1984.994.852.978.773.588.6--
ISP [57]ECCV'2084.994.2--75.686.974.176.5
RGA-SC [50]CVPR'2088.496.157.580.3--77.481.1
CDNet [20]CVPR'2186.095.154.778.976.888.6--
CAL [28]ICCV'2187.094.556.279.576.487.6--
ALDER* [47]TIP'2188.995.659.182.578.989.978.781.0
LTRID* [39]TMM'2286.994.758.681.080.490.580.382.1
CLIP-based method
Baseline88.194.760.782.179.388.677.679.1
CLIP-ReID [21]AAAI'2389.895.763.084.480.790.078.279.4
PromptSGOurs91.896.668.586.080.490.279.880.5
ViT-B/16ViT-based method
TransReID [14]ICCV'2188.995.267.485.382.090.779.681.7
DCAL [56]CVPR'2287.594.764.083.180.189.0--
AAformer [58]TNNLS'2388.095.465.684.480.990.179.080.3
PHA [46]CVPR'2390.296.168.986.1--83.084.5
CLIP-based method
Baseline86.493.366.184.480.088.880.080.5
CLIP-ReID [21]AAAI'2389.695.573.488.782.590.081.680.9
PromptSGOurs94.697.087.292.681.691.083.185.1
", + "image_path": "89422447a31d7aa309b229924688c5908f6cdead4892286221a0e5fe05c280d2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 357, + 545, + 388 + ], + "lines": [ + { + "bbox": [ + 46, + 357, + 545, + 388 + ], + "spans": [ + { + "bbox": [ + 46, + 357, + 545, + 388 + ], + "type": "text", + "content": "Table 2. Comparison with the state-of-the-art models on Market-1501, MSMT17, DukeMTMC, and CUHK03-NP (labeled) datasets. The superscript star* indicates that the image is resized to a resolution exceeding 256x128. All results are reported without re-ranking. Color Red and blue: the best and second-best results." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 411, + 287, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 411, + 287, + 471 + ], + "spans": [ + { + "bbox": [ + 47, + 411, + 287, + 471 + ], + "type": "text", + "content": "used person ReID datasets. We observe that our proposed PromptSG attains the best results and sets a new state-of-the-art performance. Remarkably, PromptSG achieves over " + }, + { + "bbox": [ + 47, + 411, + 287, + 471 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 47, + 411, + 287, + 471 + ], + "type": "text", + "content": " improvement on MSMT17 and nearly " + }, + { + "bbox": [ + 47, + 411, + 287, + 471 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 47, + 411, + 287, + 471 + ], + "type": "text", + "content": " on Market-1501, surpassing previous state-of-the-art results." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 472, + 287, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 472, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 46, + 472, + 287, + 579 + ], + "type": "text", + "content": "Compared with ViT-based method. Pioneering work TransReID [14] sets a strong baseline for the ViT-based method by leveraging the potentials of the transformer. Building upon this groundwork, PHA [46] further enhances the preservation of key high-frequency elements in images. In contrast to existing ViT-based methods that only capture the patch-wise uni-modal information, our PromptSG method demonstrates that the interaction of different modalities can improve the performance of individual modalities." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "text", + "content": "Compared with CLIP-based method. Compared with the competing CLIP-based method CLIP-ReID, our PromptSG outperforms it by " + }, + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "inline_equation", + "content": "5.0\\% / 1.5\\%" + }, + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "inline_equation", + "content": "13.8\\% / 3.9\\%" + }, + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "text", + "content": " mAP/Rank-1 on Market-1501 and MSMT17 datasets when taking ViT-B/16 as visual backbone. A key distinction between CLIP-ReID and our approach resides in the composition of the query-specific pseudo-token. Our results further underscore that incorporating textual information during the inference process can also enhance performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "Compared with CNN-based method. To ensure a fair comparison, we also implement PromptSG with a ResNet-" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 411, + 545, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 411, + 545, + 483 + ], + "spans": [ + { + "bbox": [ + 305, + 411, + 545, + 483 + ], + "type": "text", + "content": "50 backbone. Apart from LTReID [39] that utilize higher resolution images, our method consistently surpasses other methods by a significant margin, especially on the most challenging person ReID dataset, MSMT17. This highlights the robustness and superiority of our approach across various architectures." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 491, + 400, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 491, + 400, + 504 + ], + "spans": [ + { + "bbox": [ + 306, + 491, + 400, + 504 + ], + "type": "text", + "content": "5.3. Ablation Study" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 510, + 545, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 510, + 545, + 556 + ], + "spans": [ + { + "bbox": [ + 305, + 510, + 545, + 556 + ], + "type": "text", + "content": "In the following, we conduct an ablation study on the essential elements of PromptSG on Market-1501 and MSMT17 datasets, and all the experiments are conducted on the ViT-B/16 backbone." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "text", + "content": "Contributions from Different Components. To assess the contribution of various components, we conduct ablation experiments by removing one component at a time. Recall that " + }, + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{i2t}^{\\mathrm{sup}}" + }, + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{t2i}^{\\mathrm{sup}}" + }, + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "text", + "content": " are the supervised contrastive losses in Eq. (2), Eq. (3) respectively, and MIM denotes the multimodal interaction module. Comparing rows b) and c) with a), we see a similar conclusion where the removal of text-to-image or image-to-text contrastive loss leads to a decent improvement on both datasets. Further comparing rows a) and d), we observe that the removal of semantic information leads to a larger decrease than solely removing ID-specific appearance information. Notably, as seen in row a), our full model, PromptSG, utilizes both semantic and appearance" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17348" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 70, + 291, + 159 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 291, + 159 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 291, + 159 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 291, + 159 + ], + "type": "table", + "html": "
ComponentsMarket-1501MSMT17
\\( {\\mathcal{L}}_{i2t}^{\\text{Sup }} \\)\\( {\\mathcal{L}}_{t2i}^{\\text{Sup }} \\)MIMmAPR-1mAPR-1
a)94.697.087.292.6
b)92.896.785.291.9
c)93.096.784.590.2
d)89.495.371.487.3
", + "image_path": "5af67ad35b61efc3c384b8df449fe4181f598d45286086d6840a070308d0360e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 48, + 201, + 286, + 258 + ], + "blocks": [ + { + "bbox": [ + 47, + 167, + 288, + 190 + ], + "lines": [ + { + "bbox": [ + 47, + 167, + 288, + 190 + ], + "spans": [ + { + "bbox": [ + 47, + 167, + 288, + 190 + ], + "type": "text", + "content": "Table 3. Ablation study on the effectiveness of each component of PromptSG on Market-1501 and MSMT17." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 201, + 286, + 258 + ], + "lines": [ + { + "bbox": [ + 48, + 201, + 286, + 258 + ], + "spans": [ + { + "bbox": [ + 48, + 201, + 286, + 258 + ], + "type": "table", + "html": "
MethodMarket-1501MSMT17
mAPR-1mAPR-1
Training w/ composed94.697.087.292.6
Training w/o composed92.096.385.391.6
", + "image_path": "03d70c9b9299e0d8f31a81729ab3617851f0a45af4faa333e32af6c63161a871.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 306, + 286, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 306, + 286, + 365 + ], + "spans": [ + { + "bbox": [ + 46, + 306, + 286, + 365 + ], + "type": "text", + "content": "language supervision during training, achieving a substantial improvement of over a point. The overall conclusion supports that language guidance, through both semantic and appearance cues, plays a crucial role in improving the performance of our model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "spans": [ + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "text", + "content": "Ablation Study on the Personalized Prompt. To better understand whether the learned pseudo tokens " + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "inline_equation", + "content": "s_*" + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "text", + "content": " can provide more granular guidance for learning visual embeddings, we train a strong baseline model, where the textual prompt dose not composed with the " + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "inline_equation", + "content": "s_*" + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "text", + "content": " during training and testing, but instead relies on the simplified prompt \"A photo of a person\" for semantic guidance. Note that we will not use the symmetric supervised contrastive loss in this case. Results in Tab. 4 imply that composing the " + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "inline_equation", + "content": "s_*" + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "text", + "content": " has a significant impact on the overall performance. When " + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "inline_equation", + "content": "s_*" + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "text", + "content": " is removed from the training process, the performance decreases by " + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "inline_equation", + "content": "1.9\\%" + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "inline_equation", + "content": "2.6\\%" + }, + { + "bbox": [ + 46, + 366, + 286, + 557 + ], + "type": "text", + "content": " in terms of mAP. Although we focus on the uni-modal re-identification task, the above formulation could potentially be applied to multimodal test sets, such as text-to-image person retrieval by composing the image feature with the text to achieve better alignment." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 558, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 286, + 713 + ], + "type": "text", + "content": "Ablation Study on the Interaction Module. We analyze the impact of different designs of the interaction module on performance and inference speed, as well as the impact of not using a composed prompt during inference. Notably, personalized prompts are consistently included during the training. As shown in Tab. 5, without an attention module (w/o attention module), the model achieves a baseline performance, with inference speed being dependent solely on the visual encoder. Introducing a single cross-attention layer (+1 cross-layer) shows a notable performance improvement, indicating the positive effect of incorporating a cross-layer design. Notably, performances can be stably improved with more self-attention layers, but at the cost of" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 309, + 70, + 545, + 168 + ], + "blocks": [ + { + "bbox": [ + 47, + 267, + 286, + 289 + ], + "lines": [ + { + "bbox": [ + 47, + 267, + 286, + 289 + ], + "spans": [ + { + "bbox": [ + 47, + 267, + 286, + 289 + ], + "type": "text", + "content": "Table 4. Ablation of training with or without composing the pseudo token on Market-1501 and MSMT17." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 70, + 545, + 168 + ], + "lines": [ + { + "bbox": [ + 309, + 70, + 545, + 168 + ], + "spans": [ + { + "bbox": [ + 309, + 70, + 545, + 168 + ], + "type": "table", + "html": "
Traning ModelInference Model
Visual EncoderText EncoderFPS ↑mAP
w/o attention module-1x89.4
+1 cross-layer-0.95x91.1
+1 cross & 1 self-layer-0.91x93.0
+1 cross & 2 self-layer0.48x94.6
+1 cross & 2 self-layer-0.88x94.1
", + "image_path": "a12857893a0353a243f8ffc9ed502e1120a32394fca1979cac526e1b159b5539.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 307, + 245, + 541, + 348 + ], + "blocks": [ + { + "bbox": [ + 305, + 176, + 545, + 232 + ], + "lines": [ + { + "bbox": [ + 305, + 176, + 545, + 232 + ], + "spans": [ + { + "bbox": [ + 305, + 176, + 545, + 232 + ], + "type": "text", + "content": "Table 5. The impact of various interaction modules and efficiency comparison with different inference models on Market-1501. Cross and self means cross-attention and self-attention, respectively. FPS denotes the quantity of images processed by the model in one second." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 245, + 541, + 348 + ], + "lines": [ + { + "bbox": [ + 307, + 245, + 541, + 348 + ], + "spans": [ + { + "bbox": [ + 307, + 245, + 541, + 348 + ], + "type": "table", + "html": "
Method#Params#Params %CLIPTraining Times ↓
(a) Market-1501
CLIP-ReID89M0.714689s1x
PromptSG94M0.752417s0.51x
(b) MSMT17
CLIP-ReID90M0.7312904s1x
PromptSG94M0.756108s0.47x
", + "image_path": "28955852bb7feab265158e6d10dfc0d782433025a22e3cb170dd14f530b858e4.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 357, + 545, + 401 + ], + "lines": [ + { + "bbox": [ + 305, + 357, + 545, + 401 + ], + "spans": [ + { + "bbox": [ + 305, + 357, + 545, + 401 + ], + "type": "text", + "content": "Table 6. Comparison of training times and the number of parameters on Market-1501 and MSMT17. #Params denotes the number of learnable parameters in the whole framework. All models are evaluated on a single 3090Ti GPU." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 425, + 545, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 425, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 304, + 425, + 545, + 568 + ], + "type": "text", + "content": "lower inference efficiency. Furthermore, our analysis illuminates the impact of employing a composed prompt during the inference phase, revealing that when we follow the same procedure as the training stage—composing text with query images—the Frames Per Second (FPS) is only 0.48 times that of the baseline. This is expected as we need to pass through two encoders for each query. However, we empirically discovered that using a fixed prompt “A photo of a person” for all queries may not lead to significant performance degradation, and it does not compromise efficiency. Therefore, one could opt for this version to achieve a more favorable balance between accuracy and efficiency." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": "Comparison of training efficiency. In order to showcase the efficiency of our proposed approach, we carry out a comparative analysis between our one-stage PromptSG and the two-stage CLIP-ReID method, focusing on the number of learnable parameters and training speed. The details of this comparison are provided in Tab. 6. In terms of training parameters, on top of CLIP, CLIP-ReID incorporates an additional of parameters mainly through the ID-wise learnable prompt, our approach primarily extends through a fixed-size mapping network and an interaction module. Despite CLIP-ReID having " + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "inline_equation", + "content": "2\\% -4\\%" + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": " fewer parameters than ours on two datasets, it may experience continuous growth in parame" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17349" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 141, + 80, + 206, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 80, + 206, + 94 + ], + "spans": [ + { + "bbox": [ + 141, + 80, + 206, + 94 + ], + "type": "text", + "content": "Market-1501" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 59, + 101, + 85, + 152 + ], + "blocks": [ + { + "bbox": [ + 59, + 101, + 85, + 152 + ], + "lines": [ + { + "bbox": [ + 59, + 101, + 85, + 152 + ], + "spans": [ + { + "bbox": [ + 59, + 101, + 85, + 152 + ], + "type": "image", + "image_path": "feca2dc7394929335e72ea92959f9ce850f61b8a39a62e2475cdcea0fff5b8cd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 87, + 101, + 112, + 152 + ], + "blocks": [ + { + "bbox": [ + 87, + 101, + 112, + 152 + ], + "lines": [ + { + "bbox": [ + 87, + 101, + 112, + 152 + ], + "spans": [ + { + "bbox": [ + 87, + 101, + 112, + 152 + ], + "type": "image", + "image_path": "1cd4adff6d5382cab00a2019c83ca0381f6d7f07b528314fbc9725150a8801c6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 114, + 101, + 140, + 152 + ], + "blocks": [ + { + "bbox": [ + 114, + 101, + 140, + 152 + ], + "lines": [ + { + "bbox": [ + 114, + 101, + 140, + 152 + ], + "spans": [ + { + "bbox": [ + 114, + 101, + 140, + 152 + ], + "type": "image", + "image_path": "00c244b701a218a38b63a558615d783f1c8834016a2b33238e2554cc96b33df2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 143, + 101, + 168, + 152 + ], + "blocks": [ + { + "bbox": [ + 143, + 101, + 168, + 152 + ], + "lines": [ + { + "bbox": [ + 143, + 101, + 168, + 152 + ], + "spans": [ + { + "bbox": [ + 143, + 101, + 168, + 152 + ], + "type": "image", + "image_path": "2cb5339f172ef720b9556a30778611c5d73a787469af19402ffdfec410bc4be5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 177, + 101, + 203, + 152 + ], + "blocks": [ + { + "bbox": [ + 177, + 101, + 203, + 152 + ], + "lines": [ + { + "bbox": [ + 177, + 101, + 203, + 152 + ], + "spans": [ + { + "bbox": [ + 177, + 101, + 203, + 152 + ], + "type": "image", + "image_path": "c1334c074ae58cb3d4f0cc4fb760727a4bb18b9883f7286098067448a3f9a072.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 205, + 101, + 230, + 152 + ], + "blocks": [ + { + "bbox": [ + 205, + 101, + 230, + 152 + ], + "lines": [ + { + "bbox": [ + 205, + 101, + 230, + 152 + ], + "spans": [ + { + "bbox": [ + 205, + 101, + 230, + 152 + ], + "type": "image", + "image_path": "a1007a0500191772cdc74e6db5416de2aec4cdaf5c675ff8248d8471392e8820.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 233, + 101, + 258, + 152 + ], + "blocks": [ + { + "bbox": [ + 233, + 101, + 258, + 152 + ], + "lines": [ + { + "bbox": [ + 233, + 101, + 258, + 152 + ], + "spans": [ + { + "bbox": [ + 233, + 101, + 258, + 152 + ], + "type": "image", + "image_path": "6b1c7045ebecf179f63f880992e152f5aff049bd3216d784c756a6265e317a4a.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 260, + 101, + 286, + 152 + ], + "blocks": [ + { + "bbox": [ + 260, + 101, + 286, + 152 + ], + "lines": [ + { + "bbox": [ + 260, + 101, + 286, + 152 + ], + "spans": [ + { + "bbox": [ + 260, + 101, + 286, + 152 + ], + "type": "image", + "image_path": "f2de007f30ae743ab55260cb03dbd29ceabcbb65d63844c3213a1b54db402ec5.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 59, + 167, + 85, + 219 + ], + "blocks": [ + { + "bbox": [ + 59, + 167, + 85, + 219 + ], + "lines": [ + { + "bbox": [ + 59, + 167, + 85, + 219 + ], + "spans": [ + { + "bbox": [ + 59, + 167, + 85, + 219 + ], + "type": "image", + "image_path": "0d2acbcffc7f6fad9bffbce05c82cd0b7095771f741c5c970303f9fd8ede82c4.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 65, + 221, + 78, + 232 + ], + "lines": [ + { + "bbox": [ + 65, + 221, + 78, + 232 + ], + "spans": [ + { + "bbox": [ + 65, + 221, + 78, + 232 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 87, + 167, + 112, + 219 + ], + "blocks": [ + { + "bbox": [ + 87, + 167, + 112, + 219 + ], + "lines": [ + { + "bbox": [ + 87, + 167, + 112, + 219 + ], + "spans": [ + { + "bbox": [ + 87, + 167, + 112, + 219 + ], + "type": "image", + "image_path": "79c51dc5b2ca4d88a713c96a5adc07ae3e2505ddef88e0214ae449712bdb0f4b.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 94, + 221, + 106, + 231 + ], + "lines": [ + { + "bbox": [ + 94, + 221, + 106, + 231 + ], + "spans": [ + { + "bbox": [ + 94, + 221, + 106, + 231 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 115, + 167, + 140, + 219 + ], + "blocks": [ + { + "bbox": [ + 115, + 167, + 140, + 219 + ], + "lines": [ + { + "bbox": [ + 115, + 167, + 140, + 219 + ], + "spans": [ + { + "bbox": [ + 115, + 167, + 140, + 219 + ], + "type": "image", + "image_path": "7409d2d9755a5aaf979d31dff90ab6e7870feca7c7b9782f6f2a7d24b8ab91b5.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 253, + 545, + 276 + ], + "lines": [ + { + "bbox": [ + 46, + 253, + 545, + 276 + ], + "spans": [ + { + "bbox": [ + 46, + 253, + 545, + 276 + ], + "type": "text", + "content": "Figure 4. Transformer visualization of attention maps. (a) Original images, (b) CLIP-ReID, (c) PromptSG without composed training, (d) PromptSG. We see our method is effective in simultaneously focusing on the semantic clues and exploring more discriminative parts." + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 122, + 221, + 133, + 231 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 221, + 133, + 231 + ], + "spans": [ + { + "bbox": [ + 122, + 221, + 133, + 231 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 14, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 143, + 167, + 168, + 219 + ], + "blocks": [ + { + "bbox": [ + 143, + 167, + 168, + 219 + ], + "lines": [ + { + "bbox": [ + 143, + 167, + 168, + 219 + ], + "spans": [ + { + "bbox": [ + 143, + 167, + 168, + 219 + ], + "type": "image", + "image_path": "5db50ad898b6c4f9a76836da6e0597005229978be9362ec8d10ab51a1c2a8324.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 122, + 221, + 133, + 231 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 221, + 133, + 231 + ], + "spans": [ + { + "bbox": [ + 122, + 221, + 133, + 231 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 16, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 178, + 167, + 203, + 219 + ], + "blocks": [ + { + "bbox": [ + 178, + 167, + 203, + 219 + ], + "lines": [ + { + "bbox": [ + 178, + 167, + 203, + 219 + ], + "spans": [ + { + "bbox": [ + 178, + 167, + 203, + 219 + ], + "type": "image", + "image_path": "8e76daac518e394b4049590603cd54d5e0038848903f0ed80b2a86a7bd8c17b6.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 184, + 221, + 194, + 231 + ], + "lines": [ + { + "bbox": [ + 184, + 221, + 194, + 231 + ], + "spans": [ + { + "bbox": [ + 184, + 221, + 194, + 231 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 205, + 167, + 230, + 219 + ], + "blocks": [ + { + "bbox": [ + 205, + 167, + 230, + 219 + ], + "lines": [ + { + "bbox": [ + 205, + 167, + 230, + 219 + ], + "spans": [ + { + "bbox": [ + 205, + 167, + 230, + 219 + ], + "type": "image", + "image_path": "40d336de2572c1136ac1c1e4021094010dcfa5070d595e8bbfb285f1018626ed.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 221, + 223, + 231 + ], + "lines": [ + { + "bbox": [ + 211, + 221, + 223, + 231 + ], + "spans": [ + { + "bbox": [ + 211, + 221, + 223, + 231 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 232, + 167, + 257, + 219 + ], + "blocks": [ + { + "bbox": [ + 232, + 167, + 257, + 219 + ], + "lines": [ + { + "bbox": [ + 232, + 167, + 257, + 219 + ], + "spans": [ + { + "bbox": [ + 232, + 167, + 257, + 219 + ], + "type": "image", + "image_path": "882132aed170e1a8c2ffc1d5ef388ab2aaf0af67b6b5953bd19f84af6f3b5908.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 221, + 250, + 231 + ], + "lines": [ + { + "bbox": [ + 239, + 221, + 250, + 231 + ], + "spans": [ + { + "bbox": [ + 239, + 221, + 250, + 231 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 260, + 167, + 286, + 219 + ], + "blocks": [ + { + "bbox": [ + 260, + 167, + 286, + 219 + ], + "lines": [ + { + "bbox": [ + 260, + 167, + 286, + 219 + ], + "spans": [ + { + "bbox": [ + 260, + 167, + 286, + 219 + ], + "type": "image", + "image_path": "72bc7d80a9782084e02a24ac60b25dfcfe1e372f77d648997a935d6c30620b95.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 267, + 221, + 279, + 231 + ], + "lines": [ + { + "bbox": [ + 267, + 221, + 279, + 231 + ], + "spans": [ + { + "bbox": [ + 267, + 221, + 279, + 231 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "bbox": [ + 399, + 81, + 448, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 81, + 448, + 93 + ], + "spans": [ + { + "bbox": [ + 399, + 81, + 448, + 93 + ], + "type": "text", + "content": "MSMT17" + } + ] + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 308, + 99, + 333, + 152 + ], + "blocks": [ + { + "bbox": [ + 308, + 99, + 333, + 152 + ], + "lines": [ + { + "bbox": [ + 308, + 99, + 333, + 152 + ], + "spans": [ + { + "bbox": [ + 308, + 99, + 333, + 152 + ], + "type": "image", + "image_path": "9a3ffad7a1f1767d079dd6293716427669b57664fbd58d0cc223d4816d1ec51f.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 335, + 100, + 361, + 152 + ], + "blocks": [ + { + "bbox": [ + 335, + 100, + 361, + 152 + ], + "lines": [ + { + "bbox": [ + 335, + 100, + 361, + 152 + ], + "spans": [ + { + "bbox": [ + 335, + 100, + 361, + 152 + ], + "type": "image", + "image_path": "5b7a72291d087492b182ce21b78e143313ed41ee68e2fb29ea811ff44d365fc7.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 364, + 101, + 390, + 152 + ], + "blocks": [ + { + "bbox": [ + 364, + 101, + 390, + 152 + ], + "lines": [ + { + "bbox": [ + 364, + 101, + 390, + 152 + ], + "spans": [ + { + "bbox": [ + 364, + 101, + 390, + 152 + ], + "type": "image", + "image_path": "781fc5e97c4d16243dabe1e8961d94aabcad921c3aa5fa3e738101af85dc77d5.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 392, + 101, + 417, + 152 + ], + "blocks": [ + { + "bbox": [ + 392, + 101, + 417, + 152 + ], + "lines": [ + { + "bbox": [ + 392, + 101, + 417, + 152 + ], + "spans": [ + { + "bbox": [ + 392, + 101, + 417, + 152 + ], + "type": "image", + "image_path": "901b3be8690998e5138d0d00fe5718c681e46f648e4f6979bc84ab6a3e9437e3.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 427, + 101, + 452, + 152 + ], + "blocks": [ + { + "bbox": [ + 427, + 101, + 452, + 152 + ], + "lines": [ + { + "bbox": [ + 427, + 101, + 452, + 152 + ], + "spans": [ + { + "bbox": [ + 427, + 101, + 452, + 152 + ], + "type": "image", + "image_path": "d0bc48db2e60abe1af9033cdbf65c8b06fc165961652f9859decfe9794fe4c39.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 455, + 101, + 481, + 152 + ], + "blocks": [ + { + "bbox": [ + 455, + 101, + 481, + 152 + ], + "lines": [ + { + "bbox": [ + 455, + 101, + 481, + 152 + ], + "spans": [ + { + "bbox": [ + 455, + 101, + 481, + 152 + ], + "type": "image", + "image_path": "0b1f9b73a77e337d1d4ad083ad965ed33e5e682571a5ee903931a284dc5d94cf.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 483, + 101, + 507, + 152 + ], + "blocks": [ + { + "bbox": [ + 483, + 101, + 507, + 152 + ], + "lines": [ + { + "bbox": [ + 483, + 101, + 507, + 152 + ], + "spans": [ + { + "bbox": [ + 483, + 101, + 507, + 152 + ], + "type": "image", + "image_path": "a489345cf73a8435aaf5c1773f5eea7db8e0e3f558fa76b229df34cfa89f29e4.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 509, + 101, + 535, + 152 + ], + "blocks": [ + { + "bbox": [ + 509, + 101, + 535, + 152 + ], + "lines": [ + { + "bbox": [ + 509, + 101, + 535, + 152 + ], + "spans": [ + { + "bbox": [ + 509, + 101, + 535, + 152 + ], + "type": "image", + "image_path": "39844a844c9c01b88108038a341d54bdeabb74547db4f37fd77e338c32019399.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 308, + 167, + 333, + 219 + ], + "blocks": [ + { + "bbox": [ + 308, + 167, + 333, + 219 + ], + "lines": [ + { + "bbox": [ + 308, + 167, + 333, + 219 + ], + "spans": [ + { + "bbox": [ + 308, + 167, + 333, + 219 + ], + "type": "image", + "image_path": "71171cc05606846363db5be664d3593ff0bbc08712c50c91460d1c0278f69b4f.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 335, + 167, + 361, + 219 + ], + "blocks": [ + { + "bbox": [ + 335, + 167, + 361, + 219 + ], + "lines": [ + { + "bbox": [ + 335, + 167, + 361, + 219 + ], + "spans": [ + { + "bbox": [ + 335, + 167, + 361, + 219 + ], + "type": "image", + "image_path": "e8e313c3d97f5e3293d5b9500cf41f28414b4f138014dc5ad8cb557ddc6024a7.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 363, + 167, + 389, + 219 + ], + "blocks": [ + { + "bbox": [ + 363, + 167, + 389, + 219 + ], + "lines": [ + { + "bbox": [ + 363, + 167, + 389, + 219 + ], + "spans": [ + { + "bbox": [ + 363, + 167, + 389, + 219 + ], + "type": "image", + "image_path": "e8bee1ef3891a564e342469ae4ccc45311f5172652038e98eb2a4a638b70edd2.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 391, + 167, + 417, + 218 + ], + "blocks": [ + { + "bbox": [ + 391, + 167, + 417, + 218 + ], + "lines": [ + { + "bbox": [ + 391, + 167, + 417, + 218 + ], + "spans": [ + { + "bbox": [ + 391, + 167, + 417, + 218 + ], + "type": "image", + "image_path": "fdbf44fa851a6a7f8ee2411afef4382e461f8255244c71a96cfe241cce4c0e91.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 427, + 167, + 452, + 218 + ], + "blocks": [ + { + "bbox": [ + 427, + 167, + 452, + 218 + ], + "lines": [ + { + "bbox": [ + 427, + 167, + 452, + 218 + ], + "spans": [ + { + "bbox": [ + 427, + 167, + 452, + 218 + ], + "type": "image", + "image_path": "0d10007a16851c423225786130ee858c79ba58e88b23a9305803f0d0e0cbef42.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 432, + 220, + 444, + 231 + ], + "lines": [ + { + "bbox": [ + 432, + 220, + 444, + 231 + ], + "spans": [ + { + "bbox": [ + 432, + 220, + 444, + 231 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 454, + 167, + 480, + 218 + ], + "blocks": [ + { + "bbox": [ + 454, + 167, + 480, + 218 + ], + "lines": [ + { + "bbox": [ + 454, + 167, + 480, + 218 + ], + "spans": [ + { + "bbox": [ + 454, + 167, + 480, + 218 + ], + "type": "image", + "image_path": "821091c3aaa44faa9fe08b873a766e510c897d256ad5acfb439186ed4f77e69e.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 460, + 220, + 472, + 230 + ], + "lines": [ + { + "bbox": [ + 460, + 220, + 472, + 230 + ], + "spans": [ + { + "bbox": [ + 460, + 220, + 472, + 230 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_caption" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 482, + 167, + 507, + 218 + ], + "blocks": [ + { + "bbox": [ + 482, + 167, + 507, + 218 + ], + "lines": [ + { + "bbox": [ + 482, + 167, + 507, + 218 + ], + "spans": [ + { + "bbox": [ + 482, + 167, + 507, + 218 + ], + "type": "image", + "image_path": "06b51ec49d039aeb748d51c61d492bf580c90d40ec911b11a222dcd313abbf32.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 488, + 220, + 500, + 230 + ], + "lines": [ + { + "bbox": [ + 488, + 220, + 500, + 230 + ], + "spans": [ + { + "bbox": [ + 488, + 220, + 500, + 230 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 509, + 167, + 535, + 218 + ], + "blocks": [ + { + "bbox": [ + 509, + 167, + 535, + 218 + ], + "lines": [ + { + "bbox": [ + 509, + 167, + 535, + 218 + ], + "spans": [ + { + "bbox": [ + 509, + 167, + 535, + 218 + ], + "type": "image", + "image_path": "d5fbe033f289713efc8f4787c789748bb48aea4c4d056234d0e7ab9efa1d048b.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 517, + 220, + 528, + 230 + ], + "lines": [ + { + "bbox": [ + 517, + 220, + 528, + 230 + ], + "spans": [ + { + "bbox": [ + 517, + 220, + 528, + 230 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_caption" + } + ], + "index": 41 + }, + { + "bbox": [ + 46, + 297, + 286, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 297, + 286, + 369 + ], + "spans": [ + { + "bbox": [ + 46, + 297, + 286, + 369 + ], + "type": "text", + "content": "ters in scenarios with a higher number of classes or evolving dynamics. In contrast, PromptSG demonstrates stronger robustness in the number of parameters and achieves significant faster training speed. It can achieve a speedup of approximately 2 times faster during training compared to the two-stage method." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 47, + 381, + 165, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 381, + 165, + 395 + ], + "spans": [ + { + "bbox": [ + 47, + 381, + 165, + 395 + ], + "type": "text", + "content": "6. Qualitative Analysis" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 46, + 402, + 286, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 402, + 286, + 569 + ], + "spans": [ + { + "bbox": [ + 46, + 402, + 286, + 569 + ], + "type": "text", + "content": "To have an intuitive understanding and validate the effectiveness of our method, we conduct a qualitative analysis where visualization of attention maps is presented in Fig. 4. Specifically, we exhibit examples from Market-1501 and MSMT17 datasets, each with two training images and two gallery images. We carefully selected some challenging examples, including those with complex backgrounds or images depicting multiple individuals. To gain a better insight into the regions of interest attended by the model in zero-shot scenarios, we do not use the common protocol GramCAM [30], as it needs the class-prediction scores and might be considered less suitable for Transformer-type backbones. Following [21], we use the Transformer-interpretability method in [2]." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 46, + 570, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 286, + 713 + ], + "type": "text", + "content": "We compare our (d) PromptSG with (b) CLIP-ReID and (c) PromptSG without image-composed training. It can be seen that our method exhibits significant effectiveness, as it adeptly captures semantic information while also concentrating on more detailed appearance details. For example, in the first row of the Market-1501 dataset, the attention map of CLIP-ReID is susceptible to interference from background elements like \"car\". On the other hand, PromptSG w/o composed training tends to emphasize semantic information related to the 'person,' focusing on the location of the head, arms, and legs. In contrast, our method goes beyond this by also exploring appearance features, such as" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 305, + 297, + 545, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 297, + 545, + 356 + ], + "spans": [ + { + "bbox": [ + 305, + 297, + 545, + 356 + ], + "type": "text", + "content": "identifying individuals wearing hats or carrying backpacks. Finally, in the first row examples of MSMT17, where additional pedestrians appear in the image, our method excels in effectively filtering out unnecessary pedestrians, while CLIP-ReID fails." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 306, + 369, + 378, + 381 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 369, + 378, + 381 + ], + "spans": [ + { + "bbox": [ + 306, + 369, + 378, + 381 + ], + "type": "text", + "content": "7. Conclusion" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 304, + 389, + 545, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 389, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 304, + 389, + 545, + 521 + ], + "type": "text", + "content": "In this paper, we propose PromptSG, a simple yet effective framework that exploits the foundational model CLIP for the person ReID task. We show that language guidance is an effective way to adapt pre-trained multimodal models for the uni-modal retrieval tasks. Through leveraging the aligned multi-modal latent space provided by CLIP, the textual prompt \"A photo of a person\" can naturally address the challenge of the visual encoder in its struggle to capture semantic information. To probe more fine-grained appearance features, we incorporate an inversion network to learn pseudo tokens that describe the image context." + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 304, + 521, + 545, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 617 + ], + "type": "text", + "content": "Discussion and Limitation. Despite the considerable potential of language prompt learning in ReID tasks, prompt learning in the vision branch remains a largely untapped area. Fine-tuning the visual encoder for strong supervised performance may lead to poor zero-shot generalization. We hope our work can inspire future research on fully unleashing the potential of large foundation models in challenging ReID tasks." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 304, + 618, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 618, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 618, + 545, + 713 + ], + "type": "text", + "content": "Acknowledgments This work was supported by the National Key R&D Program of China under Grant 2022YFB3103500, the National Natural Science Foundation of China under Grants 62106258, 62006242 and 62202459, and the China Postdoctoral Science Foundation under Grant 2022M713348 and 2022TQ0363, and Young Elite Scientists Sponsorship Program by CAST (2023QNRC001) and BAST (NO.BYESS2023304)." + } + ] + } + ], + "index": 55 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17350" + } + ] + } + ], + "index": 56 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Alberto Baldrati, Lorenzo Agnolucci, Marco Bertini, and Alberto Del Bimbo. Zero-shot composed image retrieval with textual inversion. In Proceedings of the IEEE international conference on computer vision (ICCV), 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 180 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 180 + ], + "type": "text", + "content": "[2] Hila Chefer, Shir Gur, and Lior Wolf. Transformer interpretability beyond attention visualization. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2021. 1, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 182, + 288, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 182, + 288, + 226 + ], + "spans": [ + { + "bbox": [ + 53, + 182, + 288, + 226 + ], + "type": "text", + "content": "[3] Binghui Chen, Weihong Deng, and Jiani Hu. Mixed high-order attention network for person re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 228, + 288, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 228, + 288, + 282 + ], + "spans": [ + { + "bbox": [ + 53, + 228, + 288, + 282 + ], + "type": "text", + "content": "[4] Tianlong Chen, Shaojin Ding, Jingyi Xie, Ye Yuan, Wuyang Chen, Yang Yang, Zhou Ren, and Zhangyang Wang. Abdnet: Attentive but diverse person re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2019. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 285, + 288, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 285, + 288, + 351 + ], + "spans": [ + { + "bbox": [ + 53, + 285, + 288, + 351 + ], + "type": "text", + "content": "[5] Weihua Chen, Xianzhe Xu, Jian Jia, Hao Luo, Yaohua Wang, Fan Wang, Rong Jin, and Xiuyu Sun. Beyond appearance: a semantic controllable self-supervised learning framework for human-centric visual tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 353, + 287, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 353, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 53, + 353, + 287, + 407 + ], + "type": "text", + "content": "[6] Xiaohua Chen, Yucan Zhou, Dayan Wu, Chule Yang, Bo Li, Qinghua Hu, and Weiping Wang. Area: adaptive reweighting via effective area for long-tailed classification. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 410, + 288, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 410, + 288, + 463 + ], + "spans": [ + { + "bbox": [ + 53, + 410, + 288, + 463 + ], + "type": "text", + "content": "[7] Niv Cohen, Rinon Gal, Eli A Meirom, Gal Chechik, and Yuval Atzmon. \"this is my unicorn, fluffy\": Personalizing frozen vision-language representations. In Proceedings of the European conference on computer vision (ECCV), 2022. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 466, + 287, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 466, + 287, + 532 + ], + "spans": [ + { + "bbox": [ + 53, + 466, + 287, + 532 + ], + "type": "text", + "content": "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 534, + 287, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 534, + 287, + 599 + ], + "spans": [ + { + "bbox": [ + 53, + 534, + 287, + 599 + ], + "type": "text", + "content": "[9] Zi-Yi Dou, Yichong Xu, Zhe Gan, Jianfeng Wang, Shuohang Wang, Lijuan Wang, Chenguang Zhu, Pengchuan Zhang, Lu Yuan, Nanyun Peng, et al. An empirical study of training end-to-end vision-and-language transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 601, + 287, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 287, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 287, + 656 + ], + "type": "text", + "content": "[10] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit Haim Bermano, Gal Chechik, and Daniel Cohen-or. An image is worth one word: Personalizing text-to-image generation using textual inversion. In International Conference on Learning Representations (ICLR), 2022. 2, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "text", + "content": "[11] Xiaoshuai Hao, Wanqian Zhang, Dayan Wu, Fei Zhu, and Bo Li. Dual alignment unsupervised domain adaptation for video-text retrieval. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2023. 3" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[12] Lingxiao He, Xingyu Liao, Wu Liu, Xinchen Liu, Peng Cheng, and Tao Mei. Fastreid: A pytorch toolbox for general instance re-identification. arXiv preprint arXiv:2006.02631, 2020.5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 119, + 547, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 119, + 547, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 547, + 173 + ], + "type": "text", + "content": "[13] Lingxiao He, Xingyu Liao, Wu Liu, Xinchen Liu, Peng Cheng, and Tao Mei. Fastreid: A pytorch toolbox for general instance re-identification. In Proceedings of the 31st ACM International Conference on Multimedia (ACM MM), 2023. 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 176, + 545, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 176, + 545, + 220 + ], + "spans": [ + { + "bbox": [ + 307, + 176, + 545, + 220 + ], + "type": "text", + "content": "[14] Shuting He, Hao Luo, Pichao Wang, Fan Wang, Hao Li, and Wei Jiang. Transreid: Transformer-based object re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2021. 1, 2, 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 222, + 545, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 222, + 545, + 276 + ], + "spans": [ + { + "bbox": [ + 308, + 222, + 545, + 276 + ], + "type": "text", + "content": "[15] Pingting Hong, Dayan Wu, Bo Li, and Weiping Wang. Camera-specific informative data augmentation module for unbalanced person re-identification. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 279, + 545, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 279, + 545, + 334 + ], + "spans": [ + { + "bbox": [ + 308, + 279, + 545, + 334 + ], + "type": "text", + "content": "[16] Siteng Huang, Biao Gong, Yulin Pan, Jianwen Jiang, Yiliang Lv, Yuyuan Li, and Donglin Wang. Vop: Text-video co-operative prompt tuning for cross-modal retrieval. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 335, + 545, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 335, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 308, + 335, + 545, + 380 + ], + "type": "text", + "content": "[17] Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning (ICML), 2015. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 382, + 545, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 382, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 308, + 382, + 545, + 425 + ], + "type": "text", + "content": "[18] Zhengbao Jiang, Frank F Xu, Jun Araki, and Graham Neubig. How can we know what language models know? Transactions of the Association for Computational Linguistics (ACL), 2020. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 428, + 545, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 428, + 545, + 461 + ], + "spans": [ + { + "bbox": [ + 308, + 428, + 545, + 461 + ], + "type": "text", + "content": "[19] Dengjie Li, Siyu Chen, Yujie Zhong, Fan Liang, and Lin Ma. Dip: Learning discriminative implicit parts for person re-identification. arXiv preprint arXiv:2212.13906, 2022. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 463, + 545, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 463, + 545, + 507 + ], + "spans": [ + { + "bbox": [ + 308, + 463, + 545, + 507 + ], + "type": "text", + "content": "[20] Hanjun Li, Gaojie Wu, and Wei-Shi Zheng. Combined depth space based architecture search for person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2021. 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 509, + 545, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 509, + 545, + 553 + ], + "spans": [ + { + "bbox": [ + 308, + 509, + 545, + 553 + ], + "type": "text", + "content": "[21] Siyuan Li, Li Sun, and Qingli Li. Clip-reid: exploiting vision-language model for image re-identification without concrete text labels. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2023. 2, 3, 6, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 555, + 545, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 555, + 545, + 609 + ], + "spans": [ + { + "bbox": [ + 308, + 555, + 545, + 609 + ], + "type": "text", + "content": "[22] Wei Li, Rui Zhao, Tong Xiao, and Xiaogang Wang. Deepreid: Deep filter pairing neural network for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2014. 1, 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 612, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 612, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 308, + 612, + 545, + 656 + ], + "type": "text", + "content": "[23] Wei Li, Xiatian Zhu, and Shaogang Gong. Harmonious attention network for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2018. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "text", + "content": "[24] Hao Luo, Youzhi Gu, Xingyu Liao, Shenqi Lai, and Wei Jiang. Bag of tricks and a strong baseline for deep person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops (CVPRW), 2019. 2, 5" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17351" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[25] Ron Mokady, Amir Hertz, and Amit H Bermano. Clipcap: Clip prefix for image captioning. arXiv preprint arXiv:2111.09734, 2021. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 288, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 288, + 173 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 288, + 173 + ], + "type": "text", + "content": "[26] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning (ICML), 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 175, + 287, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 175, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 175, + 287, + 239 + ], + "type": "text", + "content": "[27] Haocong Rao and Chunyan Miao. Transg: Transformer-based skeleton graph prototype contrastive learning with structure-trajectory prompted reconstruction for person re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 241, + 287, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 287, + 297 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 287, + 297 + ], + "type": "text", + "content": "[28] Yongming Rao, Guangyi Chen, Jiwen Lu, and Jie Zhou. Counterfactual attention learning for fine-grained visual categorization and re-identification. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 298, + 287, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 298, + 287, + 353 + ], + "spans": [ + { + "bbox": [ + 48, + 298, + 287, + 353 + ], + "type": "text", + "content": "[29] Kuniaki Saito, Kihyuk Sohn, Xiang Zhang, Chun-Liang Li, Chen-Yu Lee, Kate Saenko, and Tomas Pfister. Pic2word: Mapping pictures to words for zero-shot composed image retrieval. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 354, + 287, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 354, + 287, + 409 + ], + "spans": [ + { + "bbox": [ + 48, + 354, + 287, + 409 + ], + "type": "text", + "content": "[30] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision (ICCV), 2017. 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 410, + 287, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 410, + 287, + 454 + ], + "spans": [ + { + "bbox": [ + 48, + 410, + 287, + 454 + ], + "type": "text", + "content": "[31] Chunfeng Song, Yan Huang, Wanli Ouyang, and Liang Wang. Mask-guided contrastive attention model for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2018. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 456, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 456, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 456, + 287, + 510 + ], + "type": "text", + "content": "[32] Qinghang Su, Dayan Wu, Chenming Wu, Bo Li, and Weiping Wang. From data to optimization: Data-free deep incremental hashing with data disambiguation and adaptive proxies. IEEE Transactions on Circuits and Systems for Video Technology (TCSVT), 2024. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 512, + 287, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 512, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 512, + 287, + 567 + ], + "type": "text", + "content": "[33] Yifan Sun, Liang Zheng, Yi Yang, Qi Tian, and Shengjin Wang. Beyond part models: Person retrieval with refined part pooling (and a strong convolutional baseline). In Proceedings of the European conference on computer vision (ECCV), 2018. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 568, + 287, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 568, + 287, + 623 + ], + "spans": [ + { + "bbox": [ + 48, + 568, + 287, + 623 + ], + "type": "text", + "content": "[34] Yifan Sun, Changmao Cheng, Yuhan Zhang, Chi Zhang, Liang Zheng, Zhongdao Wang, and Yichen Wei. Circle loss: A unified perspective of pair similarity optimization. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 624, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 667 + ], + "type": "text", + "content": "[35] Lei Tan, Pingyang Dai, Rongrong Ji, and Yongjian Wu. Dynamic prototype mask for occluded person re-identification. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), 2022. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "text", + "content": "[36] Chiat-Pin Tay, Sharmili Roy, and Kim-Hui Yap. Aanet: Attribute attention network for person re-identifications. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 128 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 128 + ], + "type": "text", + "content": "[37] Guanshuo Wang, Yufeng Yuan, Xiong Chen, Jiwei Li, and Xi Zhou. Learning discriminative features with multiple granularities for person re-identification. In Proceedings of the 26th ACM international conference on Multimedia (ACM MM), 2018. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 129, + 546, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 546, + 183 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 546, + 183 + ], + "type": "text", + "content": "[38] Lin Wang, Wanqian Zhang, Dayan Wu, Fei Zhu, and Bo Li. Attack is the best defense: Towards preemptive-protection person re-identification. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), 2022. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 185, + 546, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 185, + 546, + 229 + ], + "spans": [ + { + "bbox": [ + 307, + 185, + 546, + 229 + ], + "type": "text", + "content": "[39] Pingyu Wang, Zhicheng Zhao, Fei Su, and Honying Meng. Ltreid: Factorizable feature generation with independent components for long-tailed person re-identification. IEEE Transactions on Multimedia (TMM), 2022. 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 231, + 546, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 231, + 546, + 274 + ], + "spans": [ + { + "bbox": [ + 308, + 231, + 546, + 274 + ], + "type": "text", + "content": "[40] Tao Wang, Hong Liu, Pinhao Song, Tianyu Guo, and Wei Shi. Pose-guided feature disentangling for occluded person re-identification based on transformer. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2022. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 276, + 546, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 276, + 546, + 319 + ], + "spans": [ + { + "bbox": [ + 308, + 276, + 546, + 319 + ], + "type": "text", + "content": "[41] Longhui Wei, Shiliang Zhang, Wen Gao, and Qi Tian. Person transfer gan to bridge domain gap for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2018. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 321, + 546, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 321, + 546, + 365 + ], + "spans": [ + { + "bbox": [ + 308, + 321, + 546, + 365 + ], + "type": "text", + "content": "[42] Dayan Wu, Qi Dai, Jing Liu, Bo Li, and Weiping Wang. Deep incremental hashing network for efficient image retrieval. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2019. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 366, + 546, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 366, + 546, + 421 + ], + "spans": [ + { + "bbox": [ + 308, + 366, + 546, + 421 + ], + "type": "text", + "content": "[43] Wenjie Yang, Houjing Huang, Zhang Zhang, Xiaotang Chen, Kaiqi Huang, and Shu Zhang. Towards rich feature discovery with class activation maps augmentation for person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 422, + 546, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 422, + 546, + 475 + ], + "spans": [ + { + "bbox": [ + 308, + 422, + 546, + 475 + ], + "type": "text", + "content": "[44] Zexian Yang, Dayan Wu, Wanqian Zhang, Bo Li, and Weiping Wang. Handling label uncertainty for camera incremental person re-identification. In Proceedings of the 31st ACM International Conference on Multimedia (ACM MM), 2023. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 478, + 546, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 478, + 546, + 521 + ], + "spans": [ + { + "bbox": [ + 308, + 478, + 546, + 521 + ], + "type": "text", + "content": "[45] Mang Ye, Jianbing Shen, Gaojie Lin, Tao Xiang, Ling Shao, and Steven CH Hoi. Deep learning for person re-identification: A survey and outlook. IEEE transactions on pattern analysis and machine intelligence (TPAMI), 2021. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 523, + 546, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 523, + 546, + 578 + ], + "spans": [ + { + "bbox": [ + 308, + 523, + 546, + 578 + ], + "type": "text", + "content": "[46] Guiwei Zhang, Yongfei Zhang, Tianyu Zhang, Bo Li, and Shiliang Pu. Pha: Patch-wise high-frequency augmentation for transformer-based person re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 579, + 546, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 579, + 546, + 623 + ], + "spans": [ + { + "bbox": [ + 308, + 579, + 546, + 623 + ], + "type": "text", + "content": "[47] Quan Zhang, Jianhuang Lai, Zhanxiang Feng, and Xiaohua Xie. Seeing like a human: Asynchronous learning with dynamic progressive refinement for person re-identification. IEEE Transactions on Image Processing (TIP), 2021. 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 624, + 546, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 624, + 546, + 678 + ], + "spans": [ + { + "bbox": [ + 308, + 624, + 546, + 678 + ], + "type": "text", + "content": "[48] Renrui Zhang, Wei Zhang, Rongyao Fang, Peng Gao, Kunchang Li, Jifeng Dai, Yu Qiao, and Hongsheng Li. Tip-adapter: Training-free adaption of clip for few-shot classification. In European Conference on Computer Vision (ECCV), 2022. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 680, + 546, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 680, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 680, + 546, + 713 + ], + "type": "text", + "content": "[49] Wanqian Zhang, Dayan Wu, Yu Zhou, Bo Li, Weiping Wang, and Dan Meng. Binary neural network hashing for image retrieval. In Proceedings of the 44th international ACM SIGIR" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17352" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 529 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 67, + 73, + 286, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 286, + 95 + ], + "type": "text", + "content": "conference on research and development in information retrieval (SIGIR)l, 2021.3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "type": "text", + "content": "[50] Zhizheng Zhang, Cuiling Lan, Wenjun Zeng, Xin Jin, and Zhibo Chen. Relation-aware global attention for person re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 287, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 287, + 195 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 287, + 195 + ], + "type": "text", + "content": "[51] Liang Zheng, Liyue Shen, Lu Tian, Shengjin Wang, Jingdong Wang, and Qi Tian. Scalable person re-identification: A benchmark. In Proceedings of the IEEE international conference on computer vision (ICCV), 2015. 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 197, + 287, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 197, + 287, + 240 + ], + "spans": [ + { + "bbox": [ + 48, + 197, + 287, + 240 + ], + "type": "text", + "content": "[52] Zhedong Zheng, Liang Zheng, and Yi Yang. Unlabeled samples generated by gan improve the person re-identification baseline in vitro. In Proceedings of the IEEE international conference on computer vision (ICCV), 2017. 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 241, + 287, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 287, + 285 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 287, + 285 + ], + "type": "text", + "content": "[53] Kaiyang Zhou, Yongxin Yang, Andrea Cavallaro, and Tao Xiang. Omni-scale feature learning for person re-identification. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), 2019. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 286, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 286, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 286, + 287, + 319 + ], + "type": "text", + "content": "[54] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision (IJCV), 2022. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 320, + 287, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 320, + 287, + 373 + ], + "spans": [ + { + "bbox": [ + 48, + 320, + 287, + 373 + ], + "type": "text", + "content": "[55] Xiao Zhou, Yujie Zhong, Zhen Cheng, Fan Liang, and Lin Ma. Adaptive sparse pairwise loss for object re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 376, + 287, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 376, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 48, + 376, + 287, + 430 + ], + "type": "text", + "content": "[56] Haowei Zhu, Wenjing Ke, Dong Li, Ji Liu, Lu Tian, and Yi Shan. Dual cross-attention learning for fine-grained visual categorization and object re-identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 432, + 287, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 432, + 287, + 475 + ], + "spans": [ + { + "bbox": [ + 48, + 432, + 287, + 475 + ], + "type": "text", + "content": "[57] Kuan Zhu, Haiyun Guo, Zhiwei Liu, Ming Tang, and Jinqiao Wang. Identity-guided human semantic parsing for person re-identification. In Proceedings of the European conference on computer vision (ECCV), 2020. 1, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 476, + 287, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 476, + 287, + 529 + ], + "spans": [ + { + "bbox": [ + 48, + 476, + 287, + 529 + ], + "type": "text", + "content": "[58] Kuan Zhu, Haiyun Guo, Shiliang Zhang, Yaowei Wang, Jing Liu, Jinqiao Wang, and Ming Tang. Aaformer: Auto-aligned transformer for person re-identification. IEEE Transactions on Neural Networks and Learning Systems (TNNLS), 2023. 6" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17353" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/aeee8578-b512-4a62-8ec3-b06e011ce338_content_list.json b/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/aeee8578-b512-4a62-8ec3-b06e011ce338_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4d41da1737af23beade7a7caefe8cfc83a775290 --- /dev/null +++ b/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/aeee8578-b512-4a62-8ec3-b06e011ce338_content_list.json @@ -0,0 +1,2001 @@ +[ + { + "type": "text", + "text": "A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction", + "text_level": 1, + "bbox": [ + 200, + 130, + 772, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jin Gong1", + "bbox": [ + 183, + 202, + 267, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Runzhao Yang", + "bbox": [ + 318, + 203, + 444, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Weihang Zhang", + "bbox": [ + 496, + 203, + 630, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jinli Suo1,2,3", + "bbox": [ + 683, + 202, + 782, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Qionghai Dai $^{1,2}$", + "bbox": [ + 419, + 227, + 547, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Department of Automation, Tsinghua University, Beijing, China", + "bbox": [ + 227, + 246, + 741, + 263 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Institute of Brain and Cognitive Sciences, Tsinghua University, Beijing, China", + "bbox": [ + 173, + 263, + 795, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ Shanghai Artificial Intelligence Laboratory, Shanghai, China", + "bbox": [ + 241, + 281, + 725, + 299 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 334, + 313, + 349 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "High-end lenses, although offering high-quality images, suffer from both insufficient affordability and bulky design, which hamper their applications in low-budget scenarios or on low-payload platforms. A flexible scheme is to tackle the optical aberration of low-end lenses computationally. However, it is highly demanded but quite challenging to build a general model capable of handling non-stationary aberrations and covering diverse lenses, especially in a blind manner. To address this issue, we propose a universal solution by extensively utilizing the physical properties of camera lenses: (i) reducing the complexity of lens aberrations, i.e., lens-specific non-stationary blur, by warping annual-ring-shaped sub-images into rectangular stripes to transform non-uniform degenerations into a uniform one, (ii) building a low-dimensional non-negative orthogonal representation of lens blur kernels to cover diverse lenses; (iii) designing a decoupling network to decompose the input low-quality image into several components degenerated by above kernel bases, and applying corresponding pre-trained deconvolution networks to reverse the degeneration. Benefiting from the proper incorporation of lenses' physical properties and unique network design, the proposed method achieves superb imaging quality, wide applicability for various lenses, high running efficiency, and is totally free of kernel calibration. These advantages bring great potential for scenarios requiring lightweight high-quality photography.", + "bbox": [ + 76, + 366, + 473, + 760 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 782, + 209, + 799 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "High-quality photography is of crucial importance for both high-fidelity visual recording (e.g., filmmaking, sports video capturing) and sophisticated computer vision tasks (e.g., surveillance, auto-piloting). High-end camera systems often employ compound lenses comprising approximately ten or more components constructed from diverse materials to com", + "bbox": [ + 75, + 809, + 470, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "pensate for geometric and photometric aberrations. Such complicated designs are proven to be effective in achieving nice image quality, but come with inherent drawbacks, including high costs, bulkiness, and fragility, making them unsuitable for scenarios with low payload capacity or limited budgets. Consequently, the demand for high-quality photography using lightweight lenses has significantly intensified.", + "bbox": [ + 496, + 335, + 893, + 441 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Considering the optical aberration in image formation and the afterward processing jointly, the workload in optical design can be shifted to the later stage [23, 28], where advanced reconstruction algorithms play a crucial role. One can also utilize the physical properties in the imaging setup to facilitate reconstruction, and researchers have made some primary efforts in this direction [32, 45].", + "bbox": [ + 496, + 443, + 893, + 549 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, grand challenges lie in the variability of optical aberrations across the field of view and diverse lenses. On the one hand, the quality degeneration of a simple lens is intrinsically a convolution with non-uniform blur kernels, and the typical compensation algorithms [43] approximate the globally non-uniform deconvolution with patch-wise uniform deconvolutions, leading to a trade-off between precision and computational efficiency. This trade-off comes up with high inflexibility when adopting data-driven approaches [14, 41], which require learning a large number of models to achieve high-performance results. On the other hand, the degradation of different lenses varies significantly, so lens-specific algorithm development or parameter optimizations are required for high reconstruction performance. Furthermore, the calibration of the PSF kernels of camera lenses is quite expertise-demanding[27, 46], and blind compensation is more favorable for users. Overall, computational compensation for lens aberrations holds great promise for achieving lightweight high-quality imaging. However, there is a pressing demand for a general approach to handle spatially varying aberrations of diverse lenses in a blind manner.", + "bbox": [ + 496, + 551, + 895, + 868 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we propose a physics-informed end-to-end solution that (i) capitalizes the characteristics of lens aber-", + "bbox": [ + 500, + 869, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "24861", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/43ac21dc81c98da0b467877caa52ff73559924c9d50ea82bc8a9cf817b333c3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 96, + 84, + 256, + 198 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/81dc18bd5f117798e5669563274de6a1eb43eb5f441b4b3b0fba963ba1548e28.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 96, + 200, + 256, + 316 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d24f4a8cae61e840e5f85b8cd3d16fae3c761af2fe607887c2c06097ccc366a4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 263, + 82, + 462, + 198 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/3179b86b6463bb4cfb6a71d472ff0b90f0af7c89ae9fe49c81833d5d893a39e1.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 263, + 200, + 462, + 316 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0927b0af848e60f9e7b9d4a2d4e73bec4f3d783b8bd8d5d859d1845256b191eb.jpg", + "image_caption": [ + "(c)", + "Figure 1. An illustrative example of our lens-aberration compensation approach. (a) A camera equipped with a simple lens of a large field of view but severe optical aberration (WXSJ-H65HD) and a small-sized unmanned aerial vehicle (UAV) carrying the camera for data capture. (b) The input degenerated image (upper) and our reconstruction result (lower). (c) The zoomed-in comparison on the highlighted region (white box) in (b), where the original recording is shown in the bottom left corner and the reconstructed result in the top right corner." + ], + "image_footnote": [], + "bbox": [ + 468, + 82, + 872, + 316 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ration to construct a low-dimensional representation of nonuniform blur kernels of general camera lenses, and (ii) designs a deep neural network resolving the degeneration components in the low-quality input and ensembling a set of pretrained compensators to reverse the degeneration robustly. Specifically, we represent an arbitrary local point spread function (PSF) with a set of negative orthogonal bases, pretrain their corresponding deconvolution modules, and then retrieve their degeneration from the low-quality image captured by a low-end lens and apply the pre-trained inversion models accordingly. The proposed approach demonstrates high performance and holds high potential in lightweight photography on low-payload platforms, as shown by the impressive results captured with a small drone equipped with a compact surveillance camera in Fig. 1.", + "bbox": [ + 75, + 412, + 472, + 638 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, we target for general, blind, and end-to-end lens aberration correction, and make the following contributions:", + "bbox": [ + 76, + 655, + 470, + 686 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Proposes a unified framework for lens aberration compensation with high flexibility to diverse aberrations produced by the wide range of camera lenses.", + "2) Builds a general low-dimensional model of lens aberrations based on Orthogonal Non-negative Matrix Factorization, utilizing the physical properties of optical lenses.", + "3) Designs an end-to-end network to divide and conquer the optical aberrations in the above low-dimensional space, enabling fast and blind inversion of diverse lens degeneration and ultimately lightweight high-quality imaging.", + "4) Demonstrates performance comparable to state-of-the-art non-blind lens-specific algorithms, validating its great potential in budget-constrained or low-capacity platforms." + ], + "bbox": [ + 76, + 702, + 470, + 897 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 410, + 640, + 425 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Lens aberration modeling. Generally, imaging lenses are physically rotational symmetric around the optical center, imparting rotation symmetry of the lens aberration [25, 37]. Ignoring the fabrication imperfections, almost all types of lens aberrations, such as spherical aberration, coma aberration, and chromatic aberration [1, 9], form a rotational symmetric pattern.", + "bbox": [ + 498, + 431, + 893, + 537 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Utilizing these unique features of lens aberration, researchers have proposed some methods to simplify the degeneration by diverse lenses. For example, Rahbar et al. [29] adopt the Zernike model [24] to describe optical aberrations and can estimate the Zernike coefficients of a single channel through bicoherence and tricoherence estimation techniques, while Schuler et al. [32] represent the non-uniform aberrations with a set of orthonormal Efficient Filter Flow, which is applicable for most cases without large spherical aberration. Differently, Yue et al. [45] leverage the global rotational symmetry properties of regular lenses to transform non-uniform aberrations into uniform rings using radial splitting and warping techniques. This method capitalizes on the inherent physical properties of the imaging lens and largely simplifies the aberration model, offering inspiration for our approach to explore the unique structures of lens aberrations. However, their implementations use conventional optimization by alternatively kernel estimation and deblurring, which is time-consuming and the strong consumption of PSF uniformity within a stretched annular ring harms the performance slightly, which limits the applications demanding real-time and high-quality compensation. In contrast, we design a framework consisting of well-organized sub-networks to address all these issues decently.", + "bbox": [ + 496, + 539, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "24862", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Optical aberration removal. Lens aberration exists widely in optical imaging systems, and computational correction is an essential way to raise imaging quality without increasing hardware budget. One common way to model lens aberration is by convolving the image with spatially varying PSFs and compensation is naturally conducted via deconvolution [38, 44]. Existing correction methods can be broadly categorized into non-blind and blind ones.", + "bbox": [ + 75, + 90, + 470, + 210 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Non-blind correction assumes known PSFs and algorithms are extensively studied [4, 22]. Researchers have proposed different algorithms to estimate kernel PSFs via, e.g., using a combination of defocused and focused images [2], a set of binary random patterns [10], aggregating degenerations in informative nature image patches [11], analyzing the spectral characteristics of the lens system [3, 19, 34]. Further, to cover PSFs of different camera lenses, Shih et al. [33] utilize interpolation methods by fitting a spatial Gaussian model. More recently, Li et al. [20] propose a data-driven deep learning approach explicitly taking the PSF as input and introducing lens-specific priors for high-quality compensation. In sum, non-blind lens aberration compensation techniques have shown promising performance, but they require expertise demanding PSF calibration or robust estimation, which are not friendly for non-expert users and require lens-specific model training.", + "bbox": [ + 75, + 214, + 473, + 473 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Blind methods have gained significant attention due to their convenience for un-trained users and high flexibility to diverse lenses. The typical strategy is to estimate PSFs and conduct compensation sequentially. Among them Rahbar et al. [29] introduce Zernike moments, and Tang & Kutulakos [35] employ the Seidel model to simplify the lens aberration model; while Delbracio et al. [5] propose a robust algorithm based on empirical observations about the distribution of the gradient in clear natural images. Recently, some researchers have adopted the data-driven scheme and developed deep neural networks to compensate for the aberrations, but often focus on specific types of aberrations to ensure good convergence, such as radial lens distortion [31] and chromatic aberrations [7].", + "bbox": [ + 75, + 474, + 470, + 686 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In light of these developments, the blind compensation techniques (either based on conventional optimization or deep neural networks) cannot achieve performance comparable to their non-blind counterpart. Besides, existing methods are incapable of handling diverse lenses and various types of aberrations flexibly [4, 8]. In contrast, the proposed work leverages the physical properties of camera lenses and casts the complex aberrations of diverse lenses into a unified low-dimensional space, in which we divide and conquer the degeneration via incorporating geometric priors and a small number of pre-trained modules. Benefiting from the proper use of lenses' physical properties and elegant network design, we can achieve performance comparable to non-blind techniques as well. Such a general end-to-end", + "bbox": [ + 75, + 688, + 470, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "blind solution with superb performance holds great potential for high-quality lightweight imaging systems on portable devices or low-capacity mobile platforms.", + "bbox": [ + 496, + 90, + 890, + 136 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Non-uniform deconvolution. Mathematically lens aberration compensation can be described as a non-uniform deconvolution process and shares the same formulation with other tasks such as camera shake, defocus, object motion, etc. Various techniques and approaches have been proposed to address the technical challenges posed by non-uniform blurring [39, 47]. There are three main ways to address the non-uniformity. The most intuitive and widely used way is to assume patch-wise uniform PSFs and conduct deconvolution patch by patch [36, 42]. The deconvolution can be implemented via conventional optimization previously and deep neural networks recently, and usually in a non-blind manner. There are a bunch of algorithms, and we do not list them here. The second solution is to transform the various PSFs into a low dimensional space and remove the blur along each dimension [7, 17, 32]. The third way is to adopt data-driven techniques and fed training data with varying PSFs for high generalization ability [48]. One can also make extensive use of the structure of the PSF patterns to circumvent the high complexity by introducing physical constraints, e.g., transform the spatially varying aberrations in an annular ring into uniform via warping [45], and decompose the spatially varying defocus blur into several uniform ones according to the scene depth [18, 49]. Differently, our approach focuses on lens aberration which is of different features from other degenerations and can utilize the unique properties of lenses for a better design, in a similar way to [45]. In addition, we are dedicated to an end-to-end solution working for diverse lenses without model training.", + "bbox": [ + 496, + 137, + 893, + 574 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Physics-inspired Low Rank Lens Aberration Model", + "text_level": 1, + "bbox": [ + 498, + 585, + 890, + 619 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "There exist various lens aberrations, such as spherical aberration and astigmatism, resulting in various non-uniform quality degenerations. In addition, the aberration models of diverse lenses differ a lot. To provide a universal solution for different lenses, it is of crucial importance to reduce the dimension of the PSFs and provide a unified representation, based on which one can design a low-rank model to address the aberrations in a divide-and-conquer manner.", + "bbox": [ + 496, + 628, + 893, + 750 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Considering the rotational-symmetry of optical lenses [15, 16, 40], we divide the lens's field of view into several concentric rings and warp them into rectangular stripes, each of which is of approximately uniform PSFs. This operation can largely decrease the dimension of the lens aberrations [13, 21]. Further, we crop these stripes into patches and apply ONMF to find a set of orthogonal and nonnegative bases to cover the space of the lens PSFs. If learned from a large set of lenses, the bases can represent an arbitrary PSF with high accuracy. The orthogonality can avoid ambiguity", + "bbox": [ + 496, + 750, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "24863", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f9fcfbe181704b0b897fbf0b1141c1eb10eb8a89c1ce6de54eaf3ddd60955158.jpg", + "image_caption": [ + "Figure 2. Illustration of the ONMF-based low-rank lens aberration model. (a) PSF dataset of 20 low-end lenses simulated by Zemax. (b) The PSF lattice consisting of 1840 PSF kernels, generated by dividing the images by each lens into 5 concentric rings, warping into stripes, and further cropping into $13 \\times 13$ -pixel patches. (c) 9 top PSF bases obtained by applying Orthogonal Nonnegative Matrix Factorization onto (b)." + ], + "image_footnote": [], + "bbox": [ + 101, + 89, + 450, + 347 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "during decomposing the complex aberrations and the nonnegativity facilitates compensating aberrations in the same way as handling conventional PSFs.", + "bbox": [ + 75, + 457, + 470, + 501 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To this end, we first collect 20 representative low-end lenses from ZEBASE database and construct a high-dimensional matrix encompassing all their PSFs after annular division (5 rings), ring-to-rectangle warping and cropping. The matrix is then factorized using the ONMF model [30] and yield a set of principal bases. The workflow detailing this procedure as visualized in Fig. 2. So far, we have arrived at a low dimensional representation of the lens aberrations, which can cover the image degeneration of diverse lenses.", + "bbox": [ + 75, + 503, + 470, + 638 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Universal Framework for Lens Aberration Correction", + "text_level": 1, + "bbox": [ + 76, + 648, + 468, + 683 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The above idea of building a low-rank model to compensate the lens aberration in a divide-and-conquer manner is non-trivial for several reasons: the decomposition of aberrations in a blurry input is highly imposed; the deconvolutions need to be conducted on the components which are of different intensity ranges with general natural images; the fusion should also tolerate the imperfections in both decomposition and deconvolution. To overcome these hurdles, we propose to design a jointly trained low-rank deep network that enables flexible optical aberration correction. Specifically, our network comprises three main modules trained in an end-to-end manner. The first module conducts ONMF-based decomposition, followed by several identical adaptive deconvolution modules. Finally, we incorporate a synthetic", + "bbox": [ + 75, + 688, + 472, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "network module to further enhance the correction result. Fig. 3 summarizes the workflow of the whole network and reports the architecture of the key modules.", + "bbox": [ + 496, + 90, + 893, + 137 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Decomposing Aberrations Attributed to PSF Bases", + "text_level": 1, + "bbox": [ + 498, + 145, + 890, + 161 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As aforementioned, after applying matrix decomposition to the PSF library, we get a basis set $\\{\\mathbf{B}_i\\}$ , which can compose an arbitrary PSF $\\mathbf{k}$ with corresponding coefficients $\\{\\alpha_i\\}$ , i.e.,", + "bbox": [ + 496, + 167, + 893, + 214 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {k} = \\sum_ {i} \\alpha_ {i} \\cdot \\mathbf {B} _ {i}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 637, + 223, + 892, + 255 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Hence, a recorded degraded patch $\\mathbf{Y}$ can be represented as", + "bbox": [ + 500, + 265, + 888, + 280 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Y} = \\mathbf {X} \\otimes \\left(\\sum_ {i} \\alpha_ {i} \\cdot \\mathbf {B} _ {i}\\right) + \\mathbf {n} = \\sum_ {i} \\left(\\alpha_ {i} \\cdot \\mathbf {X}\\right) \\otimes \\mathbf {B} _ {i} + \\mathbf {n} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 506, + 289, + 892, + 321 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{X}$ is the latent sharp image, $\\mathbf{n}$ is the noise and $\\otimes$ denotes 2D convolution.", + "bbox": [ + 496, + 330, + 890, + 359 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As Fig. 3 shows, suppose we can decompose the blurry $Y$ into components $\\{\\mathbf{Y}_i = (\\alpha_i \\cdot \\mathbf{X}) \\otimes \\mathbf{B}_i\\}$ , we can pretrain deconvolution models to compensate the aberrations caused by $\\{\\mathbf{B}_i\\}$ and estimate $\\mathbf{X}$ by simply estimating the scaling factor $(\\alpha_i)$ . The decomposition is implemented with a deep neural network built on the U-net structure. Here we replace the plain convolution in original U-net with residual blocks for better convergence. Notice that the network is fully convolutional and can be applied to images with an arbitrary size. In our experiment, we divide the acquired image into equidistant annual rings with different widths based on the lens's physical properties.", + "bbox": [ + 496, + 361, + 893, + 541 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In order to ensure the decomposition performance of the network, we define the following loss function", + "bbox": [ + 498, + 541, + 890, + 571 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {d e c o m}} = \\left\\| \\mathbf {Y} - \\sum_ {i} \\mathbf {Y} _ {i} \\right\\| _ {2} + \\sum_ {i} \\left(\\mathbf {Y} _ {i} - \\tilde {\\mathbf {Y}} _ {i}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 545, + 580, + 892, + 614 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "with $\\mathbf{Y}_i$ and $\\tilde{\\mathbf{Y}}_i$ denoting the decomposed blurry components and the generated version by Zemax software, equivalent to $\\mathbf{X} \\otimes (\\alpha_i \\cdot \\mathbf{B}_i)$ . Here the first term forces the summation of the decomposed components consistent with the input, and the second term ensures that each retrieved component is equal to the convolution of the sharp image $\\mathbf{X}$ with the corresponding PSF basis.", + "bbox": [ + 496, + 625, + 893, + 733 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Adaptive Feature-Domain Wiener Deconvolution", + "text_level": 1, + "bbox": [ + 498, + 742, + 877, + 757 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The ONMF-based decomposition module can extract the blurry components caused by the corresponding PSF bases, so we design matching compensation modules and embed them into our joint framework. However, both the range and pattern of the intensities in the decomposed components differ from natural images, and deconvolving these components in the spatial domain is prone to ringing artifacts and over-smoothness. A recent study [6] demonstrates that high-quality image reconstruction can be achieved by performing", + "bbox": [ + 496, + 763, + 893, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "24864", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3b03efbb0403e509535362c964a301a50b462717eee517107f2abb3984017273.jpg", + "image_caption": [ + "Figure 3. The framework of the proposed approach. The whole pipeline comprises preprocessing to build the low-rank aberration model and a divide-and-conquer compensation. The preprocessing involves annular partitioning of images and corresponding lens PSFs, ring-to-rectangle warping, and learning a low-dimensional representation of the PSFs. The compensation consists of three primary modules: (i) Decomposition, a neural network based on Orthogonal Non-Negative Matrix Factorization (ONMF), decomposing the blurry image into components corresponding to the representation bases of lens aberration; (ii) Deconvolution, implemented as a cascaded encoder-decoder network to map the decomposed components into the feature domain and conducts pre-trained Wiener deconvolution sequentially; (iii) Fusion, which aggregates the multiple scaled versions of the latent sharp image from the previous deconvolution modules to get the final output. The whole network is trained in an end-to-end manner." + ], + "image_footnote": [], + "bbox": [ + 98, + 80, + 869, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "deconvolution in the feature domain. Thus, we propose an adaptive feature-domain Wiener deconvolution module to recover the lost high frequencies better.", + "bbox": [ + 75, + 508, + 468, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Specifically, for a specific patch $\\mathbf{X}_i = \\alpha_i\\mathbf{X}$ , we reconstruct it from the $i$ -th blurry decomposed component $\\mathbf{Y}_i$", + "bbox": [ + 76, + 551, + 470, + 583 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {X} _ {i} ^ {*} = \\arg \\min \\left\\| \\mathbf {Y} _ {i} - \\mathbf {X} _ {i} \\otimes \\mathbf {B} _ {i} \\right\\|, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 589, + 468, + 606 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{X}_i$ and $\\mathbf{Y}_i$ respectively represent the sharp and blurry image components matching the $i$ -th PSF basis. In implementation, we build a feature-based Wiener adaptive deconvolution network. We denote $f_i$ as a set of learnable linear filters and convolve $\\mathbf{Y}_i$ with them to extract useful features and obtain the relationship among the blurry input, PSF, and the high-quality output in the feature domain. According to the properties of convolution, Eq. 2 turns into", + "bbox": [ + 75, + 613, + 468, + 733 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {i} \\mathbf {Y} _ {i} = \\mathbf {F} _ {i} \\left(\\mathbf {X} _ {i} \\otimes \\mathbf {B} _ {i}\\right) + \\mathbf {F} _ {i} \\mathbf {n}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 168, + 741, + 468, + 757 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where multiplying with $\\mathbf{F}_i$ is equivalent to convolving with $f_{i}$ . Correspondingly, the above optimization in Eq. 4 is equivalent to finding a set of feature-based Wiener deconvolution operators $\\mathbf{G}_i$ (which can be obtained based on the conclusion in [6]) to reverse the aberration by $\\mathbf{B}_i$", + "bbox": [ + 75, + 763, + 468, + 839 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {X} _ {i} ^ {*} = \\arg \\min \\left\\| \\mathbf {G} _ {i} \\mathbf {F} _ {i} \\mathbf {Y} _ {i} - \\mathbf {F} _ {i} \\mathbf {X} _ {i} \\right\\|. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 151, + 847, + 468, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The compensation is implemented as a deep neural network with the network structure and learned in a data-driven", + "bbox": [ + 76, + 869, + 470, + 898 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "manner. During training the loss function is designed in an intuitive manner:", + "bbox": [ + 498, + 508, + 890, + 536 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {d e c o n v}} = \\left\\| \\mathbf {F} _ {i} \\mathbf {X} _ {i} - \\mathbf {G} _ {i} \\mathbf {F} _ {i} \\mathbf {Y} _ {i} \\right\\| _ {2}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 550, + 890, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The network is designed to share the parameters across all scales except for the first encoder block at the first cascade, which helps achieve fast and high-quality deconvolution.", + "bbox": [ + 498, + 579, + 893, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Attention-based Fusion", + "text_level": 1, + "bbox": [ + 500, + 636, + 699, + 648 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "According to Eq. 2, each deconvolution model can provide an estimation of the latent high-quality image, multiplied by a scaling factor. However, the potential inaccuracy in decomposition and artifacts in deconvolution would harm the quality of the final output. To overcome this challenge, we propose an effective strategy to streamline the reconstruction process. Specifically, we decompose more components from the blurry input and secondly apply the corresponding deconvolution to obtain multiple aberration-compensated versions, then fuse them together via a weight-trainable fusion network to raise robustness.", + "bbox": [ + 496, + 657, + 893, + 823 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The above strategy introduces a substantial increase in running time. To accelerate the training of the deconvolution modules, we adopt a coarse-to-fine strategy, i.e., training a base model and subsequently fine-tuning it, which is largely faster than training all these basis-specific networks from", + "bbox": [ + 496, + 824, + 893, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "24865", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "scratch. Moreover, our investigation reveals that the errors in the decomposition module can harm the successive deconvolution. Consequently, we introduce the decomposition confidence to serve as a valuable indicator of the decomposition accuracy/reliability of the decomposition process, and use it to guide the fusion.", + "bbox": [ + 75, + 90, + 472, + 181 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 194, + 209, + 210 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, after describing the details of model training in Sec. 5.1, we first analyze the advantages of specific designs and key parameter settings in our approach (Subsection 5.2). Then, we demonstrate our superior performance against the state-of-the-art (SOTA) algorithms on synthetic (Subsection 5.3) and real data (Subsection 5.4).", + "bbox": [ + 75, + 218, + 470, + 309 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Implementation Details", + "text_level": 1, + "bbox": [ + 76, + 316, + 294, + 333 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For model training, we gather 300 images from the Flickr2K dataset for model training, ensuring wide applicability for diverse natural scenes. Specifically, we select 20 common commercial lenses and simulate their spatially varying PSF using Zemax software. Then we simulate the lens aberration via convolving 100 high-definition (2K) images from Flickr2K with the generated PSF and apply the successive operations to train the model, including annular decomposition, ring-to-rectangle warping, patch cropping, etc. In total, we actually obtained the sharp-blurry pair and PSF of 9200 $(100 \\times 92)$ patches.", + "bbox": [ + 75, + 340, + 468, + 507 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "During model training, we adopt the Adam optimizer with default parameters. The learning rate is initialized as $10^{-4}$ , which is halved every 100 epochs. PyTorch code and trained models are available on our project page.", + "bbox": [ + 75, + 507, + 470, + 568 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Influences of the Key Parameters/Settings", + "text_level": 1, + "bbox": [ + 76, + 575, + 433, + 590 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Partitioning strategies. By capitalizing the rotational symmetry of camera lenses, we employ annular partitioning to divide the image into a sequence of concentric rings, where patches in each ring share a highly similar PSF after warping into a stripe. This strategy substantially reduces the spatial variance among PSFs across the field of view, consequently reducing the required number of bases for accurate PSF representation. The illustration and performance of the proposed partitioning method are depicted in Fig. 4(a), whereas Fig. 4(b) shows the counterpart of conventional grid partitioning. From the middle row, one can notice the highly consistent PSFs in the same annular ring in (a), in contrast to the large PSF difference among the patches in (b).", + "bbox": [ + 75, + 598, + 468, + 794 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We also compare the final performance of two partitioning strategies on the synthetic dataset. Remarkably, when utilizing an equal number of bases (9 bases), the annular splitting reaches an impressive PSF representation accuracy of 0.93 and performs 30.16dB in the final reconstruction. In contrast, the conventional grid splitting yields a largely lower accuracy of 0.69 and the reconstruction achieves 27.77dB.", + "bbox": [ + 75, + 795, + 470, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/81400036ee7dbdb5b2ce731e3e49a7d2d14cf2566ab21938cd71632068a3685e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 531, + 90, + 686, + 202 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ddbc00530e18d1cd5c96f869264a5d82d8ffaa902af0393b908dc6f331a09829.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 531, + 203, + 684, + 335 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3986b1a085b09317ce490ba9127143a6356dfa996265bf44074b250c4ec0c56b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 90, + 859, + 202 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e628742b9c634d29f8176adc8ffe888c706307c17a6ac2a65ed6b3b03cf9dffd.jpg", + "image_caption": [ + "(b)", + "Figure 4. Comparison between the annular partitioning in our model (a) and conventional grid partitioning (b). Top row: the illustration of partitioning. Middle row: the PSFs within the highlighted regions in the top row, with the left one stretched to a rectangle. Bottom row: the aberration compensation results with the same number of bases (9 in our experiment)." + ], + "image_footnote": [], + "bbox": [ + 705, + 203, + 857, + 335 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Visually, we show an example in the 3rd row of Fig. 4, which shows that annular partitioning exhibits noticeable advantageous enhancement.", + "bbox": [ + 498, + 452, + 893, + 498 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The number of annular rings. From the previous experiment, we came to the conclusion that annular partitioning is a better option. Further, we study the proper setting of the number of concentric rings, which will directly affect the amount of calculation and the precision of PSF representation. Although increasing the number of split rings can handle the radical difference of lens aberration better, it also brings higher computational complexity. Hence, we traverse several levels of radical division, i.e., the number of annular rings to find a good balance between precision and efficiency. The results are shown in Table 1, which shows that for usual commercial lenses, the performance improvement becomes marginal when the number grows beyond five. Therefore, we compromised and chose to split into 5 rings in our experiments.", + "bbox": [ + 496, + 502, + 893, + 729 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/a35e3b5447959db413d7f4f0106bdc0769a4fc7cd034a9942028aa2c68016384.jpg", + "table_caption": [ + "Table 1. The performance at varying numbers of rings." + ], + "table_footnote": [], + "table_body": "
# of RingsPatch Size (pixels)Data VolumePSNR (dB)SSIM
3214×2147529.120.914
5128×1289230.960.952
792 × 9213230.990.953
972 × 7216030.910.950
", + "bbox": [ + 504, + 755, + 885, + 816 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The number of bases. We applied ONMF to obtain a low dimensional representation of the lens aberration, i.e., PSF. Intuitively, more bases provide a higher PSF representation accuracy that helps the reconstruction but tends to increase the ill-posedness of the decomposition network and the num", + "bbox": [ + 498, + 824, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "24866", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ber of required deconvolution modules, harming the final performance as well as the running efficiency on the contrary. Therefore, pursuing the optimal number of bases is of crucial importance. As Tab. 2 shows, we test the performance with different numbers of bases and obtain a good balance at 9 bases, with high quality and low computational complexity.", + "bbox": [ + 75, + 90, + 472, + 183 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/388eb160e39ca86001e7d68af41165c66441ed73c05cac7221cac146b7630555.jpg", + "table_caption": [ + "Table 2. The performances using different numbers of bases." + ], + "table_footnote": [], + "table_body": "
# of bases5791113
SSIM0.9010.9270.9520.9550.955
PSNR (dB)28.1429.1230.9631.0131.00
Training time~9hrs~11hrs~12hrs~16hrs~18hrs
", + "bbox": [ + 81, + 210, + 464, + 272 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The applicability to various lenses. After determining the optimal number of bases, we test on 5 low-end commercial lenses (not included in the lens dataset) to verify the generalization ability of our model. By calculating the representation accuracy of the new lenses and the final lens correction result in Fig. 5, we arrive at two conclusions: the reconstruction quality is directly proportional to the accuracy of PSF representation; our approach is widely applicable for diverse lenses.", + "bbox": [ + 75, + 282, + 472, + 417 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c9c709edc2c6fe61cccc7bf127461f8e55cdf1c329774a8ad07594ab13f47106.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 109, + 431, + 436, + 553 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0465a978f5c2a83828c257ea5e2b605db2a461b9406ae64a71122d4b6931d048.jpg", + "image_caption": [ + "Figure 5. Our performance on diverse lenses. (a) The representation precision (horizontal axis) and their performance (vertical axis) on 5 test lenses, in terms of PSNR. Here the scattered box plot is drawn from the compensation results of 20 test images synthesized from the Flickr2K dataset. (b) PSF (left column) and our compensation result (right column, with the bottom-left and top-right insets being input and output respectively) of two lenses with the best and worst performance in (a)." + ], + "image_footnote": [], + "bbox": [ + 112, + 553, + 230, + 705 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bd684930377f643a8041092a9461b64c9ffe6e96317958fd4f0f2642dde83ada.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 243, + 553, + 334, + 705 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bc7811f851e1ddea4467bfaad220ebb13ed6a28ad75be2bdc211de9b3efe7d17.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 553, + 434, + 705 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Balancing both effectiveness and efficiency, we empirically divide the original low-quality images $(1280\\times 1280$ pixels) into 5 concentric rings and select 9 bases (preserving an impressive $96.6\\%$ of the PSF variation in our lens", + "bbox": [ + 75, + 839, + 473, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "database) in the final implementations of our model, based on the above quantitative experiment results.", + "bbox": [ + 496, + 90, + 892, + 122 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3. Performance Comparison", + "text_level": 1, + "bbox": [ + 498, + 130, + 736, + 147 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We compare our approach with SOTA blind (DeblurGANv2 [17], MPRNet [26]) and non-blind (DPIR [12], DWDN [6]) deblurring methods, including both optimization-based and CNN-based algorithms. The results are shown in Tab. 3 and Fig. 6, from which one can observe the following trends: (i) Blind deblurring algorithms generally exhibit good performance in addressing motion blur problems, but in terms of lens aberration correction, our approach performs better than SOTAs and yields notably clearer images with finer details and fewer artifacts. (ii) By employing our low-rank PSF learning model, the PSF can be efficiently characterized, facilitating blind deconvolution to attain performance on par with or even superior non-blind algorithms. Overall, we achieve blind lens aberration correction, surpassing the SOTA blind deblurring methods, and perform on par with non-blind approaches.", + "bbox": [ + 496, + 152, + 893, + 393 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/858866131bbebe40f1ab5a4987c6ea7c2b014047bd04b93161882e98f386b68f.jpg", + "table_caption": [ + "Table 3. Quantitative performance comparison with SOTAs" + ], + "table_footnote": [], + "table_body": "
DeblurGANv2MPRNetEboli'sDPIRDWDNOurs
Blind?××
PSNR (dB)23.0428.6729.4231.8631.7830.96
SSIM0.7260.9270.9340.9620.9600.952
", + "bbox": [ + 503, + 414, + 888, + 469 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.4. Real experiments", + "text_level": 1, + "bbox": [ + 498, + 483, + 669, + 500 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To test the performance on real data, we use several low-end compact commercial cameras composed of a simple lens to capture low-quality images and computationally raise their visual quality with our model. Besides the results in Fig. 1, we show two more examples in Fig. 7, with the photo of the cameras in the left column, blurry input in the 2nd column, and the final reconstructed high-quality outputs in the rightmost column. We also show the side-by-side zoomed-in comparison of highlighted regions in the 3rd column for better visualization. One can see that the details in both the center and the periphery are recovered decently. Also, the consistently improved quality validates the wide applicability of the proposed approach to various lenses.", + "bbox": [ + 496, + 500, + 893, + 696 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 498, + 709, + 619, + 724 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We have reported a versatile scheme capable of compensating lens aberrations of various lenses in an end-to-end manner and without model retraining or refinement. The universality of our approach stems from two key designs: Firstly, we incorporate the key physical properties inherent in camera lenses, such as rotational symmetry and low-dimensional structure after ring-to-rectangle warping; (ii) we integrate a deep neural network to reverse the aberration in a divide-and-conquer manner, i.e., decompose the low-quality input into basic components corresponding to the low-dimensional compositions of the aberration model, and", + "bbox": [ + 496, + 734, + 895, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "24867", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3d6708e46782f832076713302bd294b0e602ad1a8562a3d3da39d2b6a797f8cd.jpg", + "image_caption": [ + "(a) Blurry input" + ], + "image_footnote": [], + "bbox": [ + 104, + 84, + 235, + 186 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2f4155c4e9d2a5eea28bab7898c49251973670356b63bb3f9f27f0f55d7c892e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 236, + 84, + 326, + 152 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e37504446d61bd82f9dca711fa56e716608e726e387a3c595364f1e951fdfa4c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 238, + 157, + 326, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/025a9c7f5cfac3f1983445bddfe79e45f13c2712953ff95b25d3af5c9544501e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 84, + 418, + 152 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d46f7b7fa4ad932f21b8e884ca7954509695e1320ee0fe72abdca7d0b82c285a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 421, + 84, + 506, + 152 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7dacc45d77cccf420f3e60001db170c9590eb961fc22362905883171701f5c82.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 84, + 594, + 152 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a367d9b369b9ec397ae39913b6fb782152e29741e5059e661bbfc2fef159d33f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 596, + 84, + 683, + 152 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2194fa63f306ccd56c37c3359752938e92c924bcaa92aaaa488d86f4a77b9f77.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 684, + 84, + 771, + 152 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ce58de75d2c27ab2b2dd3c237256a19db03c317b951e0699fa105e444d5ef8a5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 772, + 84, + 859, + 152 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/88ad4ed0a95cca921a94000abb74874044871fc9f657b307ff59e2d5235d81aa.jpg", + "image_caption": [ + "(b) Our output" + ], + "image_footnote": [], + "bbox": [ + 102, + 198, + 235, + 300 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ab191d559b74034c93012e7e746a64be3ed3c99aeb4b9bc5be2411610d39eaf8.jpg", + "image_caption": [ + "(c) Blurry patches" + ], + "image_footnote": [], + "bbox": [ + 238, + 231, + 326, + 300 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7558571b35c189440432710e460498c37c6f49cdac61c450e1128c316421efd2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 157, + 418, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d0abac9e698509ac7d95f522160f3c54a1d087ad91b5fc330639f3af41e6e8e2.jpg", + "image_caption": [ + "(d) DeblurGANv2" + ], + "image_footnote": [], + "bbox": [ + 331, + 232, + 418, + 300 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1b04f8eae0522bc236b30021e7adf2559742e494e16f66bc403bbf3dffb42088.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 421, + 157, + 506, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/52d12fe2ed12f164333671d028facf29a41ddae821c8c01d1e1450d863382b2a.jpg", + "image_caption": [ + "(e) MPRNet" + ], + "image_footnote": [], + "bbox": [ + 421, + 232, + 506, + 300 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d70e5b0408c59ba873b3e2a10d1c4a66709e860e7d9442452f64e3224aff7298.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 157, + 594, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/840978a1a4c8174b2d7334a6c62696daffba4dbf61023d9da030a678c72c4489.jpg", + "image_caption": [ + "(f) DPIR" + ], + "image_footnote": [], + "bbox": [ + 509, + 232, + 594, + 300 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fcf9f27376e8b76cd1b2c694621b5e464bf8c3bade0224f741440ed9ed2dc85f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 596, + 157, + 683, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1877293483ff65f20cc42b99c1c4fbb6a8c9a4723d849003180f3914913ea034.jpg", + "image_caption": [ + "(g)DWDN" + ], + "image_footnote": [], + "bbox": [ + 596, + 232, + 683, + 300 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bed0ee63cec21be53609195cda412e0831b013b339052eaa5c69830bc08d830b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 684, + 157, + 771, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/914b407719f43d536ccf7dfea48debf26b9335466508700221f7cfc12b2c1edb.jpg", + "image_caption": [ + "(h) Ours" + ], + "image_footnote": [], + "bbox": [ + 684, + 232, + 771, + 300 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8da7297b7ce2cbc1513af0cf468623e6d3778a0548a6443711932273c860a22e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 772, + 157, + 859, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/12c0aa09441262812c1214feac0e38373e3627911f256e4d700d9db9b3befe32.jpg", + "image_caption": [ + "(i) Ground truth" + ], + "image_footnote": [], + "bbox": [ + 772, + 232, + 859, + 300 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e532c30da681326f55d30f0ff4a60400416d6ea3bb0ab04e91802f5f863ea227.jpg", + "image_caption": [ + "Figure 6. Performance comparison with SOTA methods. (a) The input blurry image. (b) The result after aberration correction by our model. (c-i) The comparison among the results produced by different SOTA algorithms (d-h), in contrast to the blurry input (c) and ground-truth sharp version (i), with PSNR and SSIM scores presented in Tab. 3. Here we compare three ROIs, which are cropped from different locations to demonstrate the performance on non-uniform lens aberrations. Note that (d-h) are blind compensation results and (f)(g) are non-blind.", + "(a) Experimental lenses", + "Figure 7. Results on real data captured by two compact cameras with a low-end lens. (a) The photo of two lenses, with the smaller one being only around $1\\mathrm{cm}$ , which is highly portable but exhibits significant aberrations. (b)(d) Raw images captured using the cameras in (a) and the results after compensation. (c) The zoomed-in view of the highlighted regions in (b) and (d), distinctly showcases the performance at both regions closer to the center (red boxes) and toward the corners (yellow boxes) of the sensor's field of view." + ], + "image_footnote": [], + "bbox": [ + 109, + 383, + 279, + 672 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4bbd9908935deef34c0b0d16973d38c535ea9d8d71cb53c4176776a3c5a2fc29.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 383, + 470, + 527 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b1ffba0cdd15c38df41ea6b2ba6eba8ddb6953d972dd4c80bad7e9ef83d87fd3.jpg", + "image_caption": [ + "(b) Input" + ], + "image_footnote": [], + "bbox": [ + 284, + 529, + 470, + 672 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/56a50073158cfbf3667c323c885ff91001a0385a06a04097f96e7f6f6cbb2409.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 475, + 383, + 571, + 455 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/21a076746255712c3fc7776d2b74056b485425da292ae51ca2863e94a030c87b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 475, + 457, + 568, + 527 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/855efd7e6db84251c201d5474af50eae3026297c55451fcd3c8944738d931b08.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 475, + 529, + 568, + 599 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bb24b2b2a6ef4c2b04da8352f107285151eef55bc551238924ea4b933dcfb9dc.jpg", + "image_caption": [ + "(c) ROI performance" + ], + "image_footnote": [], + "bbox": [ + 475, + 601, + 568, + 672 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2963efe7968fc4f43438d6c60d5a95e68ef91b86c059e197e07e1c3f24b6c945.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 571, + 383, + 666, + 455 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5654099080b2869d56a544cfcca9a886676c9a522458fcb27e4f1ed284be141a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 571, + 457, + 665, + 527 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5ff22a150fb68abacd93ab365993a8d6bd61073bd46ed21838f22f9623055409.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 571, + 529, + 665, + 599 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a3d308c3c4bd0f01d0ba1369b7e225cf03bee1c89bd6edf4ca604ce5cc9113e5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 571, + 601, + 665, + 672 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e054d41dd8f0b4bd142c9d7a34a6afe7019feaf1c68153472921f35c221345b7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 668, + 383, + 854, + 527 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/10c339dd1b81f4bea479f0420d2c29e727a7b3fc7e643e67135d3f367ef35309.jpg", + "image_caption": [ + "(d) Output" + ], + "image_footnote": [], + "bbox": [ + 669, + 529, + 854, + 672 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "then adopt pre-trained compensation modules to reconstruct the high-quality image with high robustness. The proposed approach offers high generalization ability to diverse lenses and requires no expertise-demanding calibration. Moreover, we achieve performance comparable to existing methods with careful calibration and lens-specific model training.", + "bbox": [ + 75, + 763, + 468, + 854 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "So far, our experiments assume sufficient exposure and high pixel count, but we acknowledge the potential for future enhancements, such as accounting for more realistic noise", + "bbox": [ + 75, + 854, + 470, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "models and addressing other degradations like downsampling. Looking ahead, we envision further advancements of our model, focusing on the development of a lightweight network and on-chip implementation. As computational aberration compensation continues to progress, our method serves as a promising step towards enabling practical and cost-effective optical aberration correction for a wide range of applications.", + "bbox": [ + 496, + 763, + 893, + 885 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "24868", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Samuel Arba-Mosquera, Shwetabh Verma, and Shady T Awwad. Theoretical effect of coma and spherical aberrations translation on refractive error and higher order aberrations. In Photonics, page 116. MDPI, 2020. 2", + "[2] Johannes Brauers, Claude Seiler, and Til Aach. Direct psf estimation using a random noise target. In Digital Photography VI, pages 96-105. SPIE, 2010. 3", + "[3] Ayan Chakrabarti, Todd Zickler, and William T Freeman. Analyzing spatially-varying blur. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 2512-2519. IEEE, 2010. 3", + "[4] Jinlin Cui and Wei Huang. Optical aberration correction for simple lenses via sparse representation. Optics Communications, pages 201-213, 2018. 3", + "[5] Maurizio Delbracio, Ignacio Garcia-Dorado, SungJoon Choi, Damien Kelly, and Peyman Milanfar. Polyblur: Removing mild blur by polynomial reburring. IEEE Transactions on Computational Imaging, pages 837-848, 2021. 3", + "[6] Jiangxin Dong, Stefan Roth, and Bernt Schiele. DWDN: deep wiener deconvolution network for non-blind image deblurring. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 9960-9976, 2021. 4, 5, 7", + "[7] Thomas Eboli, Jean-Michel Morel, and Gabriele Facciolo. Fast two-step blind optical aberration correction. In European Conference on Computer Vision, pages 693-708. Springer, 2022. 3", + "[8] T Furieri, A Bassi, and S Bonora. Large field of view aberrations correction with deformable lenses and multi conjugate adaptive optics. Journal of Biophotonics, page e202300104, 2023. 3", + "[9] O García-Lievanos and S Vázquez-Montiel. Free system of spherical and coma aberrations by use aspherical and diffractive surfaces. In AIP Conference Proceedings, pages 659-664. American Institute of Physics, 2008. 2", + "[10] Felix Heide, Mushfiqur Rouf, Matthias B Hullin, Bjorn Labitzke, Wolfgang Heidrich, and Andreas Kolb. High-quality computational imaging through simple lenses. ACM Transactions on Graphics, pages 1-14, 2013. 3", + "[11] Michael Hirsch and Bernhard Scholkopf. Self-calibration of optical lenses. In Proceedings of the IEEE International Conference on Computer Vision, pages 612-620, 2015. 3", + "[12] Zhanli Hu, Hengzhi Xue, Qiyang Zhang, Juan Gao, Na Zhang, Sijuan Zou, Yueyang Teng, Xin Liu, Yongfeng Yang, Dong Liang, et al. DPIR-Net: Direct pet image reconstruction based on the wasserstein generative adversarial network. IEEE Transactions on Radiation and Plasma Medical Sciences, pages 35–43, 2020. 7", + "[13] Qi Jiang, Hao Shi, Lei Sun, Shaohua Gao, Kailun Yang, and Kaiwei Wang. Annular computational imaging: Capture clear panoramic images through simple lens. IEEE Transactions on Computational Imaging, 8:1250-1264, 2022. 3", + "[14] Jaihyun Koh, Jangho Lee, and Sungroh Yoon. Single-image deblurring with neural networks: A comparative survey. Computer Vision and Image Understanding, page 103134, 2021. 1" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Amit Kohli, Anastasios Angelopoulos, Sixian You, and Laura Waller. Shift-variant deblurring for rotationally symmetric systems. In Computational Optical Sensing and Imaging, pages CTh5A-4. Optica Publishing Group, 2021. 3", + "[16] Amit Kohli, Anastasios Angelopoulos, Sixian You, Kyrolos Yanny, and Laura Waller. Linear revolution-invariance: Modeling and deblurring spatially-varying imaging systems. arXiv preprint arXiv:2206.08928, 2022. 3", + "[17] Orest Kupyn, Tetiana Martyniuk, Junru Wu, and Zhangyang Wang. Deblurring-v2: Deblurring (orders-of-magnitude) faster and better. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8878-8887, 2019. 3, 7", + "[18] Anat Levin, Rob Fergus, Frédo Durand, and William T Freeman. Image and depth from a conventional camera with a coded aperture. ACM Transactions on Graphics, pages 70-81, 2007. 3", + "[19] Weili Li, Xiaoqing Yin, Yu Liu, and Maojun Zhang. Computational imaging through chromatic aberration corrected simple lenses. Journal of Modern Optics, pages 2211-2220, 2017. 3", + "[20] Xiu Li, Jinli Suo, Weihang Zhang, Xin Yuan, and Qionghai Dai. Universal and flexible optical aberration correction using deep-prior based deconvolution. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2613-2621, 2021. 3", + "[21] Esther YH Lin, Zhecheng Wang, Rebecca Lin, Daniel Miau, Florian Kainz, Jiawen Chen, Xuaner Cecilia Zhang, David B Lindell, and Kiriakos N Kutulakos. Learning lens blur fields. arXiv preprint arXiv:2310.11535, 2023. 3", + "[22] Ting Lin, ShiQi Chen, Huajun Feng, Zhihai Xu, Qi Li, and Yueting Chen. Non-blind optical degradation correction via frequency self-adaptive and finetune tactics. Optics Express, pages 23485-23498, 2022. 3", + "[23] Alice Lucas, Michael Iliadis, Rafael Molina, and Aggelos K Katsaggelos. Using deep neural networks for inverse problems in imaging: Beyond analytical methods. IEEE Signal Processing Magazine, pages 20-36, 2018. 1", + "[24] Virendra N Mahajan. Zernike annular polynomials for imaging systems with annular pupils. Journal of the Optical Society of America, pages 75-85, 1981. 2", + "[25] James P McGuire and Russell A Chipman. Polarization aberrations. 1. rotationally symmetric optical systems. Applied optics, pages 5080-5100, 1994. 2", + "[26] Armin Mehri, Parichehr B Ardakani, and Angel D Sappa. MPRNet: Multi-path residual network for lightweight image super resolution. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2704-2713, 2021. 7", + "[27] Ali Mosleh, Paul Green, Emmanuel Onzon, Isabelle Begin, and JM Pierre Langlois. Camera intrinsic blur kernel estimation: A reliable framework. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4961-4968, 2015. 1", + "[28] Gregory Ongie, Ajil Jalal, Christopher A Metzler, Richard G Baraniuk, Alexandros G Dimakis, and Rebecca Willett. Deep learning techniques for inverse problems in imaging. IEEE" + ], + "bbox": [ + 501, + 92, + 893, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "24869", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Journal on Selected Areas in Information Theory, pages 39-56, 2020. 1", + "[29] Kambiz Rahbar and Karim Faez. Blind correction of lens aberration using zernike moments. In IEEE International Conference on Image Processing, pages 861-864. IEEE, 2011. 2, 3", + "[30] Abderrahmane Rahiche and Mohamed Cheriet. Forgery detection in hyperspectral document images using graph orthogonal nonnegative matrix factorization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 662-663, 2020. 4", + "[31] Jiangpeng Rong, Shiyao Huang, Zeyu Shang, and Xianghua Ying. Radial lens distortion correction using convolutional neural networks trained with synthesized images. In *Asian Conference on Computer Vision*, pages 35-49. Springer, 2017. 3", + "[32] Christian J Schuler, Michael Hirsch, Stefan Harmeling, and Bernhard Schölkopf. Blind correction of optical aberrations. In European Conference on Computer Vision, pages 187-200. Springer, 2012. 1, 2, 3", + "[33] Yichang Shih, Brian Guenter, and Neel Joshi. Image enhancement using calibrated lens simulations. In European Conference on Computer Vision, pages 42-56. Springer, 2012. 3", + "[34] Tiancheng Sun, Yifan Peng, and Wolfgang Heidrich. Revisiting cross-channel information transfer for chromatic aberration correction. In Proceedings of the IEEE International Conference on Computer Vision, pages 3248-3256, 2017. 3", + "[35] Huixuan Tang and Kiriakos N Kutulakos. What does an aberrated photo tell us about the lens and the scene? In IEEE International Conference on Computational Photography, pages 1-10. IEEE, 2013. 3", + "[36] Kaiyi Tang, Shuangyang Zhang, Yang Wang, Xiaoming Zhang, Zhenyang Liu, Zhichao Liang, Huafeng Wang, Lingjian Chen, Wufan Chen, and Li Qi. Learning spatially variant degradation for unsupervised blind photoacoustic tomography image restoration. Photoacoustics, page 100536, 2023. 3", + "[37] Berge Tatian. Aberration balancing in rotationally symmetric lenses. Journal of the Optical Society of America, pages 1083-1091, 1974. 2", + "[38] Chao Wang, Juan Chen, Hongguang Jia, Baosong Shi, Ruifei Zhu, Qun Wei, Linyao Yu, and Mingda Ge. Parameterized modeling of spatially varying psf for lens aberration and defocus. Journal of the Optical Society of Korea, pages 136-143, 2015. 3", + "[39] Pei Wang, Wei Sun, Qingsen Yan, Axi Niu, Rui Li, Yu Zhu, Jinqiu Sun, and Yanning Zhang. Non-uniform motion deblurring with blurry component divided guidance. Pattern Recognition, page 108082, 2021. 3", + "[40] Esther Whang, David McAllister, Ashwin Reddy, Amit Kohli, and Laura Waller. Seidelnet: an aberration-informed deep learning model for spatially varying deblurring. In AI and Optical Data Sciences IV, pages 276-281. SPIE, 2023. 3", + "[41] Chudan Wu, Yan Wo, Guoqing Han, Zhangyong Wu, and Jiyun Liang. Non-uniform image blind deblurring by two-stage fully convolution network. IET Image Processing, pages 2588-2596, 2020. 1" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[42] Zhenhua Xu, Huasong Chen, and Zhenhua Li. Fast blind deconvolution using a deeper sparse patch-wise maximum gradient prior. Signal Processing: Image Communication, page 116050, 2021. 3", + "[43] Jianchao Yang, John Wright, Thomas Huang, and Yi Ma. Image super-resolution as sparse representation of raw image patches. In IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 1", + "[44] Kyrollos Yanny, Kristina Monakhova, Richard W Shuai, and Laura Waller. Deep learning for fast spatially varying deconvolution. Optica, pages 96-99, 2022. 3", + "[45] Tao Yue, Jinli Suo, Jue Wang, Xun Cao, and Qionghai Dai. Blind optical aberration correction by exploring geometric and visual priors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1684-1692, 2015. 1, 2, 3", + "[46] Dazhi Zhan, Weili Li, Xiaoqing Yin, Caiyun Niu, and Jin Liu. Psf estimation method of simple-lens camera using normal sinh-arcsinh model based on noise image pairs. IEEE Access, pages 49338-49353, 2021. 1", + "[47] Kaihao Zhang, Wenqi Ren, Wenhan Luo, Wei-Sheng Lai, Björn Stenger, Ming-Hsuan Yang, and Hongdong Li. Deep image deblurring: A survey. International Journal of Computer Vision, pages 2103-2130, 2022. 3", + "[48] Zhihong Zhang, Yuxiao Cheng, Jinli Suo, Liheng Bian, and Qionghai Dai. INFWIDE: Image and feature space wiener deconvolution network for non-blind image deblurring in low-light conditions. IEEE Transactions on Image Processing, pages 1390-1402, 2023. 3", + "[49] Changyin Zhou, Stephen Lin, and Shree K Nayar. Coded aperture pairs for depth from defocus and defocus deblurring. International Journal of Computer Vision, pages 53-72, 2011. 3" + ], + "bbox": [ + 501, + 92, + 893, + 556 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "24870", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/aeee8578-b512-4a62-8ec3-b06e011ce338_model.json b/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/aeee8578-b512-4a62-8ec3-b06e011ce338_model.json new file mode 100644 index 0000000000000000000000000000000000000000..fd5548c1dc128ad17123141b7d79a81148967837 --- /dev/null +++ b/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/aeee8578-b512-4a62-8ec3-b06e011ce338_model.json @@ -0,0 +1,2673 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.202, + 0.131, + 0.773, + 0.175 + ], + "angle": 0, + "content": "A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.203, + 0.268, + 0.222 + ], + "angle": 0, + "content": "Jin Gong1" + }, + { + "type": "text", + "bbox": [ + 0.319, + 0.204, + 0.445, + 0.222 + ], + "angle": 0, + "content": "Runzhao Yang" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.204, + 0.632, + 0.222 + ], + "angle": 0, + "content": "Weihang Zhang" + }, + { + "type": "text", + "bbox": [ + 0.684, + 0.203, + 0.783, + 0.221 + ], + "angle": 0, + "content": "Jinli Suo1,2,3" + }, + { + "type": "text", + "bbox": [ + 0.421, + 0.228, + 0.548, + 0.246 + ], + "angle": 0, + "content": "Qionghai Dai\\(^{1,2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.247, + 0.743, + 0.265 + ], + "angle": 0, + "content": "\\(^{1}\\)Department of Automation, Tsinghua University, Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.265, + 0.796, + 0.282 + ], + "angle": 0, + "content": "\\(^{2}\\)Institute of Brain and Cognitive Sciences, Tsinghua University, Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.242, + 0.282, + 0.727, + 0.3 + ], + "angle": 0, + "content": "\\(^{3}\\)Shanghai Artificial Intelligence Laboratory, Shanghai, China" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.335, + 0.314, + 0.351 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.367, + 0.474, + 0.761 + ], + "angle": 0, + "content": "High-end lenses, although offering high-quality images, suffer from both insufficient affordability and bulky design, which hamper their applications in low-budget scenarios or on low-payload platforms. A flexible scheme is to tackle the optical aberration of low-end lenses computationally. However, it is highly demanded but quite challenging to build a general model capable of handling non-stationary aberrations and covering diverse lenses, especially in a blind manner. To address this issue, we propose a universal solution by extensively utilizing the physical properties of camera lenses: (i) reducing the complexity of lens aberrations, i.e., lens-specific non-stationary blur, by warping annual-ring-shaped sub-images into rectangular stripes to transform non-uniform degenerations into a uniform one, (ii) building a low-dimensional non-negative orthogonal representation of lens blur kernels to cover diverse lenses; (iii) designing a decoupling network to decompose the input low-quality image into several components degenerated by above kernel bases, and applying corresponding pre-trained deconvolution networks to reverse the degeneration. Benefiting from the proper incorporation of lenses' physical properties and unique network design, the proposed method achieves superb imaging quality, wide applicability for various lenses, high running efficiency, and is totally free of kernel calibration. These advantages bring great potential for scenarios requiring lightweight high-quality photography." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.784, + 0.21, + 0.8 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.472, + 0.902 + ], + "angle": 0, + "content": "High-quality photography is of crucial importance for both high-fidelity visual recording (e.g., filmmaking, sports video capturing) and sophisticated computer vision tasks (e.g., surveillance, auto-piloting). High-end camera systems often employ compound lenses comprising approximately ten or more components constructed from diverse materials to com" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.336, + 0.895, + 0.442 + ], + "angle": 0, + "content": "pensate for geometric and photometric aberrations. Such complicated designs are proven to be effective in achieving nice image quality, but come with inherent drawbacks, including high costs, bulkiness, and fragility, making them unsuitable for scenarios with low payload capacity or limited budgets. Consequently, the demand for high-quality photography using lightweight lenses has significantly intensified." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.444, + 0.895, + 0.55 + ], + "angle": 0, + "content": "Considering the optical aberration in image formation and the afterward processing jointly, the workload in optical design can be shifted to the later stage [23, 28], where advanced reconstruction algorithms play a crucial role. One can also utilize the physical properties in the imaging setup to facilitate reconstruction, and researchers have made some primary efforts in this direction [32, 45]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.552, + 0.896, + 0.869 + ], + "angle": 0, + "content": "However, grand challenges lie in the variability of optical aberrations across the field of view and diverse lenses. On the one hand, the quality degeneration of a simple lens is intrinsically a convolution with non-uniform blur kernels, and the typical compensation algorithms [43] approximate the globally non-uniform deconvolution with patch-wise uniform deconvolutions, leading to a trade-off between precision and computational efficiency. This trade-off comes up with high inflexibility when adopting data-driven approaches [14, 41], which require learning a large number of models to achieve high-performance results. On the other hand, the degradation of different lenses varies significantly, so lens-specific algorithm development or parameter optimizations are required for high reconstruction performance. Furthermore, the calibration of the PSF kernels of camera lenses is quite expertise-demanding[27, 46], and blind compensation is more favorable for users. Overall, computational compensation for lens aberrations holds great promise for achieving lightweight high-quality imaging. However, there is a pressing demand for a general approach to handle spatially varying aberrations of diverse lenses in a blind manner." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.895, + 0.901 + ], + "angle": 0, + "content": "In this paper, we propose a physics-informed end-to-end solution that (i) capitalizes the characteristics of lens aber-" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "24861" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.098, + 0.085, + 0.258, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.098, + 0.201, + 0.258, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.323, + 0.187, + 0.334 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.264, + 0.083, + 0.464, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.264, + 0.201, + 0.464, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.356, + 0.323, + 0.374, + 0.335 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.469, + 0.083, + 0.874, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.663, + 0.323, + 0.679, + 0.334 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.34, + 0.895, + 0.397 + ], + "angle": 0, + "content": "Figure 1. An illustrative example of our lens-aberration compensation approach. (a) A camera equipped with a simple lens of a large field of view but severe optical aberration (WXSJ-H65HD) and a small-sized unmanned aerial vehicle (UAV) carrying the camera for data capture. (b) The input degenerated image (upper) and our reconstruction result (lower). (c) The zoomed-in comparison on the highlighted region (white box) in (b), where the original recording is shown in the bottom left corner and the reconstructed result in the top right corner." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.413, + 0.473, + 0.64 + ], + "angle": 0, + "content": "ration to construct a low-dimensional representation of nonuniform blur kernels of general camera lenses, and (ii) designs a deep neural network resolving the degeneration components in the low-quality input and ensembling a set of pretrained compensators to reverse the degeneration robustly. Specifically, we represent an arbitrary local point spread function (PSF) with a set of negative orthogonal bases, pretrain their corresponding deconvolution modules, and then retrieve their degeneration from the low-quality image captured by a low-end lens and apply the pre-trained inversion models accordingly. The proposed approach demonstrates high performance and holds high potential in lightweight photography on low-payload platforms, as shown by the impressive results captured with a small drone equipped with a compact surveillance camera in Fig. 1." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.656, + 0.471, + 0.687 + ], + "angle": 0, + "content": "In summary, we target for general, blind, and end-to-end lens aberration correction, and make the following contributions:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.703, + 0.471, + 0.746 + ], + "angle": 0, + "content": "1) Proposes a unified framework for lens aberration compensation with high flexibility to diverse aberrations produced by the wide range of camera lenses." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.748, + 0.472, + 0.792 + ], + "angle": 0, + "content": "2) Builds a general low-dimensional model of lens aberrations based on Orthogonal Non-negative Matrix Factorization, utilizing the physical properties of optical lenses." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.794, + 0.471, + 0.853 + ], + "angle": 0, + "content": "3) Designs an end-to-end network to divide and conquer the optical aberrations in the above low-dimensional space, enabling fast and blind inversion of diverse lens degeneration and ultimately lightweight high-quality imaging." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.854, + 0.471, + 0.898 + ], + "angle": 0, + "content": "4) Demonstrates performance comparable to state-of-the-art non-blind lens-specific algorithms, validating its great potential in budget-constrained or low-capacity platforms." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.703, + 0.472, + 0.898 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.411, + 0.642, + 0.426 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.433, + 0.895, + 0.538 + ], + "angle": 0, + "content": "Lens aberration modeling. Generally, imaging lenses are physically rotational symmetric around the optical center, imparting rotation symmetry of the lens aberration [25, 37]. Ignoring the fabrication imperfections, almost all types of lens aberrations, such as spherical aberration, coma aberration, and chromatic aberration [1, 9], form a rotational symmetric pattern." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.54, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Utilizing these unique features of lens aberration, researchers have proposed some methods to simplify the degeneration by diverse lenses. For example, Rahbar et al. [29] adopt the Zernike model [24] to describe optical aberrations and can estimate the Zernike coefficients of a single channel through bicoherence and tricoherence estimation techniques, while Schuler et al. [32] represent the non-uniform aberrations with a set of orthonormal Efficient Filter Flow, which is applicable for most cases without large spherical aberration. Differently, Yue et al. [45] leverage the global rotational symmetry properties of regular lenses to transform non-uniform aberrations into uniform rings using radial splitting and warping techniques. This method capitalizes on the inherent physical properties of the imaging lens and largely simplifies the aberration model, offering inspiration for our approach to explore the unique structures of lens aberrations. However, their implementations use conventional optimization by alternatively kernel estimation and deblurring, which is time-consuming and the strong consumption of PSF uniformity within a stretched annular ring harms the performance slightly, which limits the applications demanding real-time and high-quality compensation. In contrast, we design a framework consisting of well-organized sub-networks to address all these issues decently." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "24862" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.212 + ], + "angle": 0, + "content": "Optical aberration removal. Lens aberration exists widely in optical imaging systems, and computational correction is an essential way to raise imaging quality without increasing hardware budget. One common way to model lens aberration is by convolving the image with spatially varying PSFs and compensation is naturally conducted via deconvolution [38, 44]. Existing correction methods can be broadly categorized into non-blind and blind ones." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.215, + 0.474, + 0.474 + ], + "angle": 0, + "content": "Non-blind correction assumes known PSFs and algorithms are extensively studied [4, 22]. Researchers have proposed different algorithms to estimate kernel PSFs via, e.g., using a combination of defocused and focused images [2], a set of binary random patterns [10], aggregating degenerations in informative nature image patches [11], analyzing the spectral characteristics of the lens system [3, 19, 34]. Further, to cover PSFs of different camera lenses, Shih et al. [33] utilize interpolation methods by fitting a spatial Gaussian model. More recently, Li et al. [20] propose a data-driven deep learning approach explicitly taking the PSF as input and introducing lens-specific priors for high-quality compensation. In sum, non-blind lens aberration compensation techniques have shown promising performance, but they require expertise demanding PSF calibration or robust estimation, which are not friendly for non-expert users and require lens-specific model training." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.476, + 0.471, + 0.688 + ], + "angle": 0, + "content": "Blind methods have gained significant attention due to their convenience for un-trained users and high flexibility to diverse lenses. The typical strategy is to estimate PSFs and conduct compensation sequentially. Among them Rahbar et al. [29] introduce Zernike moments, and Tang & Kutulakos [35] employ the Seidel model to simplify the lens aberration model; while Delbracio et al. [5] propose a robust algorithm based on empirical observations about the distribution of the gradient in clear natural images. Recently, some researchers have adopted the data-driven scheme and developed deep neural networks to compensate for the aberrations, but often focus on specific types of aberrations to ensure good convergence, such as radial lens distortion [31] and chromatic aberrations [7]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.471, + 0.901 + ], + "angle": 0, + "content": "In light of these developments, the blind compensation techniques (either based on conventional optimization or deep neural networks) cannot achieve performance comparable to their non-blind counterpart. Besides, existing methods are incapable of handling diverse lenses and various types of aberrations flexibly [4, 8]. In contrast, the proposed work leverages the physical properties of camera lenses and casts the complex aberrations of diverse lenses into a unified low-dimensional space, in which we divide and conquer the degeneration via incorporating geometric priors and a small number of pre-trained modules. Benefiting from the proper use of lenses' physical properties and elegant network design, we can achieve performance comparable to non-blind techniques as well. Such a general end-to-end" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.137 + ], + "angle": 0, + "content": "blind solution with superb performance holds great potential for high-quality lightweight imaging systems on portable devices or low-capacity mobile platforms." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.138, + 0.895, + 0.575 + ], + "angle": 0, + "content": "Non-uniform deconvolution. Mathematically lens aberration compensation can be described as a non-uniform deconvolution process and shares the same formulation with other tasks such as camera shake, defocus, object motion, etc. Various techniques and approaches have been proposed to address the technical challenges posed by non-uniform blurring [39, 47]. There are three main ways to address the non-uniformity. The most intuitive and widely used way is to assume patch-wise uniform PSFs and conduct deconvolution patch by patch [36, 42]. The deconvolution can be implemented via conventional optimization previously and deep neural networks recently, and usually in a non-blind manner. There are a bunch of algorithms, and we do not list them here. The second solution is to transform the various PSFs into a low dimensional space and remove the blur along each dimension [7, 17, 32]. The third way is to adopt data-driven techniques and fed training data with varying PSFs for high generalization ability [48]. One can also make extensive use of the structure of the PSF patterns to circumvent the high complexity by introducing physical constraints, e.g., transform the spatially varying aberrations in an annular ring into uniform via warping [45], and decompose the spatially varying defocus blur into several uniform ones according to the scene depth [18, 49]. Differently, our approach focuses on lens aberration which is of different features from other degenerations and can utilize the unique properties of lenses for a better design, in a similar way to [45]. In addition, we are dedicated to an end-to-end solution working for diverse lenses without model training." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.587, + 0.892, + 0.621 + ], + "angle": 0, + "content": "3. Physics-inspired Low Rank Lens Aberration Model" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.629, + 0.894, + 0.75 + ], + "angle": 0, + "content": "There exist various lens aberrations, such as spherical aberration and astigmatism, resulting in various non-uniform quality degenerations. In addition, the aberration models of diverse lenses differ a lot. To provide a universal solution for different lenses, it is of crucial importance to reduce the dimension of the PSFs and provide a unified representation, based on which one can design a low-rank model to address the aberrations in a divide-and-conquer manner." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Considering the rotational-symmetry of optical lenses [15, 16, 40], we divide the lens's field of view into several concentric rings and warp them into rectangular stripes, each of which is of approximately uniform PSFs. This operation can largely decrease the dimension of the lens aberrations [13, 21]. Further, we crop these stripes into patches and apply ONMF to find a set of orthogonal and nonnegative bases to cover the space of the lens PSFs. If learned from a large set of lenses, the bases can represent an arbitrary PSF with high accuracy. The orthogonality can avoid ambiguity" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "24863" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.09, + 0.452, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.352, + 0.473, + 0.449 + ], + "angle": 0, + "content": "Figure 2. Illustration of the ONMF-based low-rank lens aberration model. (a) PSF dataset of 20 low-end lenses simulated by Zemax. (b) The PSF lattice consisting of 1840 PSF kernels, generated by dividing the images by each lens into 5 concentric rings, warping into stripes, and further cropping into \\(13 \\times 13\\)-pixel patches. (c) 9 top PSF bases obtained by applying Orthogonal Nonnegative Matrix Factorization onto (b)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.458, + 0.472, + 0.502 + ], + "angle": 0, + "content": "during decomposing the complex aberrations and the nonnegativity facilitates compensating aberrations in the same way as handling conventional PSFs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.504, + 0.472, + 0.64 + ], + "angle": 0, + "content": "To this end, we first collect 20 representative low-end lenses from ZEBASE database and construct a high-dimensional matrix encompassing all their PSFs after annular division (5 rings), ring-to-rectangle warping and cropping. The matrix is then factorized using the ONMF model [30] and yield a set of principal bases. The workflow detailing this procedure as visualized in Fig. 2. So far, we have arrived at a low dimensional representation of the lens aberrations, which can cover the image degeneration of diverse lenses." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.65, + 0.47, + 0.684 + ], + "angle": 0, + "content": "4. Universal Framework for Lens Aberration Correction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.473, + 0.901 + ], + "angle": 0, + "content": "The above idea of building a low-rank model to compensate the lens aberration in a divide-and-conquer manner is non-trivial for several reasons: the decomposition of aberrations in a blurry input is highly imposed; the deconvolutions need to be conducted on the components which are of different intensity ranges with general natural images; the fusion should also tolerate the imperfections in both decomposition and deconvolution. To overcome these hurdles, we propose to design a jointly trained low-rank deep network that enables flexible optical aberration correction. Specifically, our network comprises three main modules trained in an end-to-end manner. The first module conducts ONMF-based decomposition, followed by several identical adaptive deconvolution modules. Finally, we incorporate a synthetic" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.894, + 0.138 + ], + "angle": 0, + "content": "network module to further enhance the correction result. Fig. 3 summarizes the workflow of the whole network and reports the architecture of the key modules." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.146, + 0.892, + 0.162 + ], + "angle": 0, + "content": "4.1. Decomposing Aberrations Attributed to PSF Bases" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.169, + 0.895, + 0.215 + ], + "angle": 0, + "content": "As aforementioned, after applying matrix decomposition to the PSF library, we get a basis set \\(\\{\\mathbf{B}_i\\}\\), which can compose an arbitrary PSF \\(\\mathbf{k}\\) with corresponding coefficients \\(\\{\\alpha_i\\}\\), i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.638, + 0.224, + 0.893, + 0.256 + ], + "angle": 0, + "content": "\\[\n\\mathbf {k} = \\sum_ {i} \\alpha_ {i} \\cdot \\mathbf {B} _ {i}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.266, + 0.889, + 0.281 + ], + "angle": 0, + "content": "Hence, a recorded degraded patch \\(\\mathbf{Y}\\) can be represented as" + }, + { + "type": "equation", + "bbox": [ + 0.507, + 0.29, + 0.893, + 0.322 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Y} = \\mathbf {X} \\otimes \\left(\\sum_ {i} \\alpha_ {i} \\cdot \\mathbf {B} _ {i}\\right) + \\mathbf {n} = \\sum_ {i} \\left(\\alpha_ {i} \\cdot \\mathbf {X}\\right) \\otimes \\mathbf {B} _ {i} + \\mathbf {n} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.331, + 0.892, + 0.36 + ], + "angle": 0, + "content": "where \\(\\mathbf{X}\\) is the latent sharp image, \\(\\mathbf{n}\\) is the noise and \\(\\otimes\\) denotes 2D convolution." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.362, + 0.895, + 0.542 + ], + "angle": 0, + "content": "As Fig. 3 shows, suppose we can decompose the blurry \\( Y \\) into components \\( \\{\\mathbf{Y}_i = (\\alpha_i \\cdot \\mathbf{X}) \\otimes \\mathbf{B}_i\\} \\), we can pretrain deconvolution models to compensate the aberrations caused by \\( \\{\\mathbf{B}_i\\} \\) and estimate \\( \\mathbf{X} \\) by simply estimating the scaling factor \\( (\\alpha_i) \\). The decomposition is implemented with a deep neural network built on the U-net structure. Here we replace the plain convolution in original U-net with residual blocks for better convergence. Notice that the network is fully convolutional and can be applied to images with an arbitrary size. In our experiment, we divide the acquired image into equidistant annual rings with different widths based on the lens's physical properties." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.542, + 0.892, + 0.573 + ], + "angle": 0, + "content": "In order to ensure the decomposition performance of the network, we define the following loss function" + }, + { + "type": "equation", + "bbox": [ + 0.547, + 0.582, + 0.893, + 0.615 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {d e c o m}} = \\left\\| \\mathbf {Y} - \\sum_ {i} \\mathbf {Y} _ {i} \\right\\| _ {2} + \\sum_ {i} \\left(\\mathbf {Y} _ {i} - \\tilde {\\mathbf {Y}} _ {i}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.626, + 0.894, + 0.734 + ], + "angle": 0, + "content": "with \\(\\mathbf{Y}_i\\) and \\(\\tilde{\\mathbf{Y}}_i\\) denoting the decomposed blurry components and the generated version by Zemax software, equivalent to \\(\\mathbf{X} \\otimes (\\alpha_i \\cdot \\mathbf{B}_i)\\). Here the first term forces the summation of the decomposed components consistent with the input, and the second term ensures that each retrieved component is equal to the convolution of the sharp image \\(\\mathbf{X}\\) with the corresponding PSF basis." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.743, + 0.878, + 0.758 + ], + "angle": 0, + "content": "4.2. Adaptive Feature-Domain Wiener Deconvolution" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.903 + ], + "angle": 0, + "content": "The ONMF-based decomposition module can extract the blurry components caused by the corresponding PSF bases, so we design matching compensation modules and embed them into our joint framework. However, both the range and pattern of the intensities in the decomposed components differ from natural images, and deconvolving these components in the spatial domain is prone to ringing artifacts and over-smoothness. A recent study [6] demonstrates that high-quality image reconstruction can be achieved by performing" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "24864" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.082, + 0.87, + 0.371 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.381, + 0.896, + 0.491 + ], + "angle": 0, + "content": "Figure 3. The framework of the proposed approach. The whole pipeline comprises preprocessing to build the low-rank aberration model and a divide-and-conquer compensation. The preprocessing involves annular partitioning of images and corresponding lens PSFs, ring-to-rectangle warping, and learning a low-dimensional representation of the PSFs. The compensation consists of three primary modules: (i) Decomposition, a neural network based on Orthogonal Non-Negative Matrix Factorization (ONMF), decomposing the blurry image into components corresponding to the representation bases of lens aberration; (ii) Deconvolution, implemented as a cascaded encoder-decoder network to map the decomposed components into the feature domain and conducts pre-trained Wiener deconvolution sequentially; (iii) Fusion, which aggregates the multiple scaled versions of the latent sharp image from the previous deconvolution modules to get the final output. The whole network is trained in an end-to-end manner." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.509, + 0.47, + 0.553 + ], + "angle": 0, + "content": "deconvolution in the feature domain. Thus, we propose an adaptive feature-domain Wiener deconvolution module to recover the lost high frequencies better." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.553, + 0.472, + 0.584 + ], + "angle": 0, + "content": "Specifically, for a specific patch \\(\\mathbf{X}_i = \\alpha_i\\mathbf{X}\\), we reconstruct it from the \\(i\\)-th blurry decomposed component \\(\\mathbf{Y}_i\\)" + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.59, + 0.47, + 0.607 + ], + "angle": 0, + "content": "\\[\n\\mathbf {X} _ {i} ^ {*} = \\arg \\min \\left\\| \\mathbf {Y} _ {i} - \\mathbf {X} _ {i} \\otimes \\mathbf {B} _ {i} \\right\\|, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.614, + 0.47, + 0.734 + ], + "angle": 0, + "content": "where \\(\\mathbf{X}_i\\) and \\(\\mathbf{Y}_i\\) respectively represent the sharp and blurry image components matching the \\(i\\)-th PSF basis. In implementation, we build a feature-based Wiener adaptive deconvolution network. We denote \\(f_i\\) as a set of learnable linear filters and convolve \\(\\mathbf{Y}_i\\) with them to extract useful features and obtain the relationship among the blurry input, PSF, and the high-quality output in the feature domain. According to the properties of convolution, Eq. 2 turns into" + }, + { + "type": "equation", + "bbox": [ + 0.169, + 0.742, + 0.47, + 0.758 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {i} \\mathbf {Y} _ {i} = \\mathbf {F} _ {i} \\left(\\mathbf {X} _ {i} \\otimes \\mathbf {B} _ {i}\\right) + \\mathbf {F} _ {i} \\mathbf {n}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.84 + ], + "angle": 0, + "content": "where multiplying with \\(\\mathbf{F}_i\\) is equivalent to convolving with \\(f_{i}\\). Correspondingly, the above optimization in Eq. 4 is equivalent to finding a set of feature-based Wiener deconvolution operators \\(\\mathbf{G}_i\\) (which can be obtained based on the conclusion in [6]) to reverse the aberration by \\(\\mathbf{B}_i\\)" + }, + { + "type": "equation", + "bbox": [ + 0.152, + 0.848, + 0.47, + 0.865 + ], + "angle": 0, + "content": "\\[\n\\mathbf {X} _ {i} ^ {*} = \\arg \\min \\left\\| \\mathbf {G} _ {i} \\mathbf {F} _ {i} \\mathbf {Y} _ {i} - \\mathbf {F} _ {i} \\mathbf {X} _ {i} \\right\\|. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.471, + 0.899 + ], + "angle": 0, + "content": "The compensation is implemented as a deep neural network with the network structure and learned in a data-driven" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.509, + 0.892, + 0.537 + ], + "angle": 0, + "content": "manner. During training the loss function is designed in an intuitive manner:" + }, + { + "type": "equation", + "bbox": [ + 0.588, + 0.551, + 0.892, + 0.568 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {d e c o n v}} = \\left\\| \\mathbf {F} _ {i} \\mathbf {X} _ {i} - \\mathbf {G} _ {i} \\mathbf {F} _ {i} \\mathbf {Y} _ {i} \\right\\| _ {2}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.58, + 0.894, + 0.625 + ], + "angle": 0, + "content": "The network is designed to share the parameters across all scales except for the first encoder block at the first cascade, which helps achieve fast and high-quality deconvolution." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.637, + 0.7, + 0.65 + ], + "angle": 0, + "content": "4.3. Attention-based Fusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.894, + 0.824 + ], + "angle": 0, + "content": "According to Eq. 2, each deconvolution model can provide an estimation of the latent high-quality image, multiplied by a scaling factor. However, the potential inaccuracy in decomposition and artifacts in deconvolution would harm the quality of the final output. To overcome this challenge, we propose an effective strategy to streamline the reconstruction process. Specifically, we decompose more components from the blurry input and secondly apply the corresponding deconvolution to obtain multiple aberration-compensated versions, then fuse them together via a weight-trainable fusion network to raise robustness." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.894, + 0.9 + ], + "angle": 0, + "content": "The above strategy introduces a substantial increase in running time. To accelerate the training of the deconvolution modules, we adopt a coarse-to-fine strategy, i.e., training a base model and subsequently fine-tuning it, which is largely faster than training all these basis-specific networks from" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "24865" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.182 + ], + "angle": 0, + "content": "scratch. Moreover, our investigation reveals that the errors in the decomposition module can harm the successive deconvolution. Consequently, we introduce the decomposition confidence to serve as a valuable indicator of the decomposition accuracy/reliability of the decomposition process, and use it to guide the fusion." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.195, + 0.21, + 0.212 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.219, + 0.471, + 0.31 + ], + "angle": 0, + "content": "In this section, after describing the details of model training in Sec. 5.1, we first analyze the advantages of specific designs and key parameter settings in our approach (Subsection 5.2). Then, we demonstrate our superior performance against the state-of-the-art (SOTA) algorithms on synthetic (Subsection 5.3) and real data (Subsection 5.4)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.318, + 0.295, + 0.334 + ], + "angle": 0, + "content": "5.1. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.341, + 0.47, + 0.508 + ], + "angle": 0, + "content": "For model training, we gather 300 images from the Flickr2K dataset for model training, ensuring wide applicability for diverse natural scenes. Specifically, we select 20 common commercial lenses and simulate their spatially varying PSF using Zemax software. Then we simulate the lens aberration via convolving 100 high-definition (2K) images from Flickr2K with the generated PSF and apply the successive operations to train the model, including annular decomposition, ring-to-rectangle warping, patch cropping, etc. In total, we actually obtained the sharp-blurry pair and PSF of 9200 \\((100 \\times 92)\\) patches." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.508, + 0.471, + 0.569 + ], + "angle": 0, + "content": "During model training, we adopt the Adam optimizer with default parameters. The learning rate is initialized as \\(10^{-4}\\), which is halved every 100 epochs. PyTorch code and trained models are available on our project page." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.576, + 0.434, + 0.592 + ], + "angle": 0, + "content": "5.2. Influences of the Key Parameters/Settings" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.47, + 0.795 + ], + "angle": 0, + "content": "Partitioning strategies. By capitalizing the rotational symmetry of camera lenses, we employ annular partitioning to divide the image into a sequence of concentric rings, where patches in each ring share a highly similar PSF after warping into a stripe. This strategy substantially reduces the spatial variance among PSFs across the field of view, consequently reducing the required number of bases for accurate PSF representation. The illustration and performance of the proposed partitioning method are depicted in Fig. 4(a), whereas Fig. 4(b) shows the counterpart of conventional grid partitioning. From the middle row, one can notice the highly consistent PSFs in the same annular ring in (a), in contrast to the large PSF difference among the patches in (b)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.471, + 0.901 + ], + "angle": 0, + "content": "We also compare the final performance of two partitioning strategies on the synthetic dataset. Remarkably, when utilizing an equal number of bases (9 bases), the annular splitting reaches an impressive PSF representation accuracy of 0.93 and performs 30.16dB in the final reconstruction. In contrast, the conventional grid splitting yields a largely lower accuracy of 0.69 and the reconstruction achieves 27.77dB." + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.092, + 0.687, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.204, + 0.686, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.602, + 0.337, + 0.617, + 0.346 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.092, + 0.86, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.204, + 0.859, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.777, + 0.337, + 0.792, + 0.346 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.355, + 0.894, + 0.439 + ], + "angle": 0, + "content": "Figure 4. Comparison between the annular partitioning in our model (a) and conventional grid partitioning (b). Top row: the illustration of partitioning. Middle row: the PSFs within the highlighted regions in the top row, with the left one stretched to a rectangle. Bottom row: the aberration compensation results with the same number of bases (9 in our experiment)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.453, + 0.894, + 0.499 + ], + "angle": 0, + "content": "Visually, we show an example in the 3rd row of Fig. 4, which shows that annular partitioning exhibits noticeable advantageous enhancement." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.503, + 0.895, + 0.73 + ], + "angle": 0, + "content": "The number of annular rings. From the previous experiment, we came to the conclusion that annular partitioning is a better option. Further, we study the proper setting of the number of concentric rings, which will directly affect the amount of calculation and the precision of PSF representation. Although increasing the number of split rings can handle the radical difference of lens aberration better, it also brings higher computational complexity. Hence, we traverse several levels of radical division, i.e., the number of annular rings to find a good balance between precision and efficiency. The results are shown in Table 1, which shows that for usual commercial lenses, the performance improvement becomes marginal when the number grows beyond five. Therefore, we compromised and chose to split into 5 rings in our experiments." + }, + { + "type": "table_caption", + "bbox": [ + 0.532, + 0.738, + 0.858, + 0.753 + ], + "angle": 0, + "content": "Table 1. The performance at varying numbers of rings." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.756, + 0.887, + 0.817 + ], + "angle": 0, + "content": "
# of RingsPatch Size (pixels)Data VolumePSNR (dB)SSIM
3214×2147529.120.914
5128×1289230.960.952
792 × 9213230.990.953
972 × 7216030.910.950
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.825, + 0.894, + 0.901 + ], + "angle": 0, + "content": "The number of bases. We applied ONMF to obtain a low dimensional representation of the lens aberration, i.e., PSF. Intuitively, more bases provide a higher PSF representation accuracy that helps the reconstruction but tends to increase the ill-posedness of the decomposition network and the num" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "24866" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.184 + ], + "angle": 0, + "content": "ber of required deconvolution modules, harming the final performance as well as the running efficiency on the contrary. Therefore, pursuing the optimal number of bases is of crucial importance. As Tab. 2 shows, we test the performance with different numbers of bases and obtain a good balance at 9 bases, with high quality and low computational complexity." + }, + { + "type": "table_caption", + "bbox": [ + 0.091, + 0.193, + 0.454, + 0.208 + ], + "angle": 0, + "content": "Table 2. The performances using different numbers of bases." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.211, + 0.465, + 0.273 + ], + "angle": 0, + "content": "
# of bases5791113
SSIM0.9010.9270.9520.9550.955
PSNR (dB)28.1429.1230.9631.0131.00
Training time~9hrs~11hrs~12hrs~16hrs~18hrs
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.283, + 0.473, + 0.419 + ], + "angle": 0, + "content": "The applicability to various lenses. After determining the optimal number of bases, we test on 5 low-end commercial lenses (not included in the lens dataset) to verify the generalization ability of our model. By calculating the representation accuracy of the new lenses and the final lens correction result in Fig. 5, we arrive at two conclusions: the reconstruction quality is directly proportional to the accuracy of PSF representation; our approach is widely applicable for diverse lenses." + }, + { + "type": "image", + "bbox": [ + 0.11, + 0.432, + 0.437, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.113, + 0.554, + 0.231, + 0.707 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.245, + 0.554, + 0.335, + 0.707 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.343, + 0.554, + 0.436, + 0.707 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.719, + 0.473, + 0.831 + ], + "angle": 0, + "content": "Figure 5. Our performance on diverse lenses. (a) The representation precision (horizontal axis) and their performance (vertical axis) on 5 test lenses, in terms of PSNR. Here the scattered box plot is drawn from the compensation results of 20 test images synthesized from the Flickr2K dataset. (b) PSF (left column) and our compensation result (right column, with the bottom-left and top-right insets being input and output respectively) of two lenses with the best and worst performance in (a)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.474, + 0.903 + ], + "angle": 0, + "content": "Balancing both effectiveness and efficiency, we empirically divide the original low-quality images \\((1280\\times 1280\\) pixels) into 5 concentric rings and select 9 bases (preserving an impressive \\(96.6\\%\\) of the PSF variation in our lens" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.893, + 0.123 + ], + "angle": 0, + "content": "database) in the final implementations of our model, based on the above quantitative experiment results." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.131, + 0.738, + 0.148 + ], + "angle": 0, + "content": "5.3. Performance Comparison" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.154, + 0.895, + 0.395 + ], + "angle": 0, + "content": "We compare our approach with SOTA blind (DeblurGANv2 [17], MPRNet [26]) and non-blind (DPIR [12], DWDN [6]) deblurring methods, including both optimization-based and CNN-based algorithms. The results are shown in Tab. 3 and Fig. 6, from which one can observe the following trends: (i) Blind deblurring algorithms generally exhibit good performance in addressing motion blur problems, but in terms of lens aberration correction, our approach performs better than SOTAs and yields notably clearer images with finer details and fewer artifacts. (ii) By employing our low-rank PSF learning model, the PSF can be efficiently characterized, facilitating blind deconvolution to attain performance on par with or even superior non-blind algorithms. Overall, we achieve blind lens aberration correction, surpassing the SOTA blind deblurring methods, and perform on par with non-blind approaches." + }, + { + "type": "table_caption", + "bbox": [ + 0.518, + 0.398, + 0.874, + 0.412 + ], + "angle": 0, + "content": "Table 3. Quantitative performance comparison with SOTAs" + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.415, + 0.89, + 0.47 + ], + "angle": 0, + "content": "
DeblurGANv2MPRNetEboli'sDPIRDWDNOurs
Blind?××
PSNR (dB)23.0428.6729.4231.8631.7830.96
SSIM0.7260.9270.9340.9620.9600.952
" + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.484, + 0.671, + 0.5 + ], + "angle": 0, + "content": "5.4. Real experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.5, + 0.895, + 0.698 + ], + "angle": 0, + "content": "To test the performance on real data, we use several low-end compact commercial cameras composed of a simple lens to capture low-quality images and computationally raise their visual quality with our model. Besides the results in Fig. 1, we show two more examples in Fig. 7, with the photo of the cameras in the left column, blurry input in the 2nd column, and the final reconstructed high-quality outputs in the rightmost column. We also show the side-by-side zoomed-in comparison of highlighted regions in the 3rd column for better visualization. One can see that the details in both the center and the periphery are recovered decently. Also, the consistently improved quality validates the wide applicability of the proposed approach to various lenses." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.71, + 0.62, + 0.725 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.897, + 0.903 + ], + "angle": 0, + "content": "We have reported a versatile scheme capable of compensating lens aberrations of various lenses in an end-to-end manner and without model retraining or refinement. The universality of our approach stems from two key designs: Firstly, we incorporate the key physical properties inherent in camera lenses, such as rotational symmetry and low-dimensional structure after ring-to-rectangle warping; (ii) we integrate a deep neural network to reverse the aberration in a divide-and-conquer manner, i.e., decompose the low-quality input into basic components corresponding to the low-dimensional compositions of the aberration model, and" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "24867" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.105, + 0.085, + 0.236, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.139, + 0.188, + 0.207, + 0.197 + ], + "angle": 0, + "content": "(a) Blurry input" + }, + { + "type": "image", + "bbox": [ + 0.238, + 0.085, + 0.328, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.24, + 0.159, + 0.328, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.085, + 0.419, + 0.153 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.422, + 0.085, + 0.508, + 0.153 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.085, + 0.596, + 0.153 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.597, + 0.085, + 0.684, + 0.153 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.685, + 0.085, + 0.772, + 0.153 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.774, + 0.085, + 0.861, + 0.153 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.104, + 0.199, + 0.236, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.141, + 0.308, + 0.207, + 0.318 + ], + "angle": 0, + "content": "(b) Our output" + }, + { + "type": "image", + "bbox": [ + 0.24, + 0.232, + 0.328, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.244, + 0.308, + 0.325, + 0.318 + ], + "angle": 0, + "content": "(c) Blurry patches" + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.159, + 0.419, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.233, + 0.419, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.335, + 0.308, + 0.413, + 0.318 + ], + "angle": 0, + "content": "(d) DeblurGANv2" + }, + { + "type": "image", + "bbox": [ + 0.422, + 0.159, + 0.508, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.422, + 0.233, + 0.508, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.435, + 0.308, + 0.489, + 0.318 + ], + "angle": 0, + "content": "(e) MPRNet" + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.159, + 0.596, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.233, + 0.596, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.53, + 0.308, + 0.568, + 0.318 + ], + "angle": 0, + "content": "(f) DPIR" + }, + { + "type": "image", + "bbox": [ + 0.598, + 0.159, + 0.684, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.598, + 0.233, + 0.684, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.618, + 0.308, + 0.666, + 0.318 + ], + "angle": 0, + "content": "(g)DWDN" + }, + { + "type": "image", + "bbox": [ + 0.686, + 0.159, + 0.772, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.686, + 0.233, + 0.772, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.706, + 0.308, + 0.745, + 0.318 + ], + "angle": 0, + "content": "(h) Ours" + }, + { + "type": "image", + "bbox": [ + 0.774, + 0.159, + 0.861, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.774, + 0.233, + 0.861, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.782, + 0.308, + 0.851, + 0.318 + ], + "angle": 0, + "content": "(i) Ground truth" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.324, + 0.894, + 0.38 + ], + "angle": 0, + "content": "Figure 6. Performance comparison with SOTA methods. (a) The input blurry image. (b) The result after aberration correction by our model. (c-i) The comparison among the results produced by different SOTA algorithms (d-h), in contrast to the blurry input (c) and ground-truth sharp version (i), with PSNR and SSIM scores presented in Tab. 3. Here we compare three ROIs, which are cropped from different locations to demonstrate the performance on non-uniform lens aberrations. Note that (d-h) are blind compensation results and (f)(g) are non-blind." + }, + { + "type": "image", + "bbox": [ + 0.111, + 0.385, + 0.281, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.136, + 0.676, + 0.255, + 0.687 + ], + "angle": 0, + "content": "(a) Experimental lenses" + }, + { + "type": "image", + "bbox": [ + 0.284, + 0.385, + 0.472, + 0.528 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.285, + 0.53, + 0.472, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.357, + 0.676, + 0.402, + 0.688 + ], + "angle": 0, + "content": "(b) Input" + }, + { + "type": "image", + "bbox": [ + 0.476, + 0.385, + 0.572, + 0.456 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.477, + 0.458, + 0.569, + 0.529 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.476, + 0.53, + 0.569, + 0.601 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.476, + 0.602, + 0.569, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.572, + 0.385, + 0.667, + 0.456 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.573, + 0.458, + 0.666, + 0.529 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.573, + 0.53, + 0.666, + 0.601 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.573, + 0.602, + 0.666, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.52, + 0.676, + 0.623, + 0.688 + ], + "angle": 0, + "content": "(c) ROI performance" + }, + { + "type": "image", + "bbox": [ + 0.669, + 0.385, + 0.856, + 0.528 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.67, + 0.53, + 0.855, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.736, + 0.676, + 0.79, + 0.688 + ], + "angle": 0, + "content": "(d) Output" + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.693, + 0.894, + 0.749 + ], + "angle": 0, + "content": "Figure 7. Results on real data captured by two compact cameras with a low-end lens. (a) The photo of two lenses, with the smaller one being only around \\(1\\mathrm{cm}\\), which is highly portable but exhibits significant aberrations. (b)(d) Raw images captured using the cameras in (a) and the results after compensation. (c) The zoomed-in view of the highlighted regions in (b) and (d), distinctly showcases the performance at both regions closer to the center (red boxes) and toward the corners (yellow boxes) of the sensor's field of view." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.855 + ], + "angle": 0, + "content": "then adopt pre-trained compensation modules to reconstruct the high-quality image with high robustness. The proposed approach offers high generalization ability to diverse lenses and requires no expertise-demanding calibration. Moreover, we achieve performance comparable to existing methods with careful calibration and lens-specific model training." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.901 + ], + "angle": 0, + "content": "So far, our experiments assume sufficient exposure and high pixel count, but we acknowledge the potential for future enhancements, such as accounting for more realistic noise" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.886 + ], + "angle": 0, + "content": "models and addressing other degradations like downsampling. Looking ahead, we envision further advancements of our model, focusing on the development of a lightweight network and on-chip implementation. As computational aberration compensation continues to progress, our method serves as a promising step towards enabling practical and cost-effective optical aberration correction for a wide range of applications." + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "24868" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Samuel Arba-Mosquera, Shwetabh Verma, and Shady T Awwad. Theoretical effect of coma and spherical aberrations translation on refractive error and higher order aberrations. In Photonics, page 116. MDPI, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.214 + ], + "angle": 0, + "content": "[2] Johannes Brauers, Claude Seiler, and Til Aach. Direct psf estimation using a random noise target. In Digital Photography VI, pages 96-105. SPIE, 2010. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.216, + 0.472, + 0.272 + ], + "angle": 0, + "content": "[3] Ayan Chakrabarti, Todd Zickler, and William T Freeman. Analyzing spatially-varying blur. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 2512-2519. IEEE, 2010. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.274, + 0.472, + 0.315 + ], + "angle": 0, + "content": "[4] Jinlin Cui and Wei Huang. Optical aberration correction for simple lenses via sparse representation. Optics Communications, pages 201-213, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.317, + 0.472, + 0.373 + ], + "angle": 0, + "content": "[5] Maurizio Delbracio, Ignacio Garcia-Dorado, SungJoon Choi, Damien Kelly, and Peyman Milanfar. Polyblur: Removing mild blur by polynomial reburring. IEEE Transactions on Computational Imaging, pages 837-848, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.374, + 0.472, + 0.429 + ], + "angle": 0, + "content": "[6] Jiangxin Dong, Stefan Roth, and Bernt Schiele. DWDN: deep wiener deconvolution network for non-blind image deblurring. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 9960-9976, 2021. 4, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.431, + 0.472, + 0.485 + ], + "angle": 0, + "content": "[7] Thomas Eboli, Jean-Michel Morel, and Gabriele Facciolo. Fast two-step blind optical aberration correction. In European Conference on Computer Vision, pages 693-708. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.488, + 0.472, + 0.543 + ], + "angle": 0, + "content": "[8] T Furieri, A Bassi, and S Bonora. Large field of view aberrations correction with deformable lenses and multi conjugate adaptive optics. Journal of Biophotonics, page e202300104, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.546, + 0.472, + 0.601 + ], + "angle": 0, + "content": "[9] O García-Lievanos and S Vázquez-Montiel. Free system of spherical and coma aberrations by use aspherical and diffractive surfaces. In AIP Conference Proceedings, pages 659-664. American Institute of Physics, 2008. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.603, + 0.472, + 0.658 + ], + "angle": 0, + "content": "[10] Felix Heide, Mushfiqur Rouf, Matthias B Hullin, Bjorn Labitzke, Wolfgang Heidrich, and Andreas Kolb. High-quality computational imaging through simple lenses. ACM Transactions on Graphics, pages 1-14, 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.66, + 0.472, + 0.702 + ], + "angle": 0, + "content": "[11] Michael Hirsch and Bernhard Scholkopf. Self-calibration of optical lenses. In Proceedings of the IEEE International Conference on Computer Vision, pages 612-620, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.704, + 0.472, + 0.787 + ], + "angle": 0, + "content": "[12] Zhanli Hu, Hengzhi Xue, Qiyang Zhang, Juan Gao, Na Zhang, Sijuan Zou, Yueyang Teng, Xin Liu, Yongfeng Yang, Dong Liang, et al. DPIR-Net: Direct pet image reconstruction based on the wasserstein generative adversarial network. IEEE Transactions on Radiation and Plasma Medical Sciences, pages 35–43, 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.472, + 0.844 + ], + "angle": 0, + "content": "[13] Qi Jiang, Hao Shi, Lei Sun, Shaohua Gao, Kailun Yang, and Kaiwei Wang. Annular computational imaging: Capture clear panoramic images through simple lens. IEEE Transactions on Computational Imaging, 8:1250-1264, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.472, + 0.9 + ], + "angle": 0, + "content": "[14] Jaihyun Koh, Jangho Lee, and Sungroh Yoon. Single-image deblurring with neural networks: A comparative survey. Computer Vision and Image Understanding, page 103134, 2021. 1" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.149 + ], + "angle": 0, + "content": "[15] Amit Kohli, Anastasios Angelopoulos, Sixian You, and Laura Waller. Shift-variant deblurring for rotationally symmetric systems. In Computational Optical Sensing and Imaging, pages CTh5A-4. Optica Publishing Group, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.151, + 0.894, + 0.205 + ], + "angle": 0, + "content": "[16] Amit Kohli, Anastasios Angelopoulos, Sixian You, Kyrolos Yanny, and Laura Waller. Linear revolution-invariance: Modeling and deblurring spatially-varying imaging systems. arXiv preprint arXiv:2206.08928, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.207, + 0.894, + 0.275 + ], + "angle": 0, + "content": "[17] Orest Kupyn, Tetiana Martyniuk, Junru Wu, and Zhangyang Wang. Deblurring-v2: Deblurring (orders-of-magnitude) faster and better. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8878-8887, 2019. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.277, + 0.894, + 0.331 + ], + "angle": 0, + "content": "[18] Anat Levin, Rob Fergus, Frédo Durand, and William T Freeman. Image and depth from a conventional camera with a coded aperture. ACM Transactions on Graphics, pages 70-81, 2007. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.334, + 0.894, + 0.387 + ], + "angle": 0, + "content": "[19] Weili Li, Xiaoqing Yin, Yu Liu, and Maojun Zhang. Computational imaging through chromatic aberration corrected simple lenses. Journal of Modern Optics, pages 2211-2220, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.391, + 0.894, + 0.46 + ], + "angle": 0, + "content": "[20] Xiu Li, Jinli Suo, Weihang Zhang, Xin Yuan, and Qionghai Dai. Universal and flexible optical aberration correction using deep-prior based deconvolution. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2613-2621, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.462, + 0.894, + 0.517 + ], + "angle": 0, + "content": "[21] Esther YH Lin, Zhecheng Wang, Rebecca Lin, Daniel Miau, Florian Kainz, Jiawen Chen, Xuaner Cecilia Zhang, David B Lindell, and Kiriakos N Kutulakos. Learning lens blur fields. arXiv preprint arXiv:2310.11535, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.519, + 0.894, + 0.573 + ], + "angle": 0, + "content": "[22] Ting Lin, ShiQi Chen, Huajun Feng, Zhihai Xu, Qi Li, and Yueting Chen. Non-blind optical degradation correction via frequency self-adaptive and finetune tactics. Optics Express, pages 23485-23498, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.576, + 0.894, + 0.631 + ], + "angle": 0, + "content": "[23] Alice Lucas, Michael Iliadis, Rafael Molina, and Aggelos K Katsaggelos. Using deep neural networks for inverse problems in imaging: Beyond analytical methods. IEEE Signal Processing Magazine, pages 20-36, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.633, + 0.894, + 0.674 + ], + "angle": 0, + "content": "[24] Virendra N Mahajan. Zernike annular polynomials for imaging systems with annular pupils. Journal of the Optical Society of America, pages 75-85, 1981. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.676, + 0.894, + 0.716 + ], + "angle": 0, + "content": "[25] James P McGuire and Russell A Chipman. Polarization aberrations. 1. rotationally symmetric optical systems. Applied optics, pages 5080-5100, 1994. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.718, + 0.894, + 0.786 + ], + "angle": 0, + "content": "[26] Armin Mehri, Parichehr B Ardakani, and Angel D Sappa. MPRNet: Multi-path residual network for lightweight image super resolution. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2704-2713, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.789, + 0.894, + 0.856 + ], + "angle": 0, + "content": "[27] Ali Mosleh, Paul Green, Emmanuel Onzon, Isabelle Begin, and JM Pierre Langlois. Camera intrinsic blur kernel estimation: A reliable framework. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4961-4968, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.859, + 0.894, + 0.902 + ], + "angle": 0, + "content": "[28] Gregory Ongie, Ajil Jalal, Christopher A Metzler, Richard G Baraniuk, Alexandros G Dimakis, and Rebecca Willett. Deep learning techniques for inverse problems in imaging. IEEE" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "24869" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.472, + 0.119 + ], + "angle": 0, + "content": "Journal on Selected Areas in Information Theory, pages 39-56, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.121, + 0.472, + 0.175 + ], + "angle": 0, + "content": "[29] Kambiz Rahbar and Karim Faez. Blind correction of lens aberration using zernike moments. In IEEE International Conference on Image Processing, pages 861-864. IEEE, 2011. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.177, + 0.472, + 0.246 + ], + "angle": 0, + "content": "[30] Abderrahmane Rahiche and Mohamed Cheriet. Forgery detection in hyperspectral document images using graph orthogonal nonnegative matrix factorization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 662-663, 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.247, + 0.472, + 0.314 + ], + "angle": 0, + "content": "[31] Jiangpeng Rong, Shiyao Huang, Zeyu Shang, and Xianghua Ying. Radial lens distortion correction using convolutional neural networks trained with synthesized images. In *Asian Conference on Computer Vision*, pages 35-49. Springer, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.316, + 0.472, + 0.371 + ], + "angle": 0, + "content": "[32] Christian J Schuler, Michael Hirsch, Stefan Harmeling, and Bernhard Schölkopf. Blind correction of optical aberrations. In European Conference on Computer Vision, pages 187-200. Springer, 2012. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.372, + 0.472, + 0.426 + ], + "angle": 0, + "content": "[33] Yichang Shih, Brian Guenter, and Neel Joshi. Image enhancement using calibrated lens simulations. In European Conference on Computer Vision, pages 42-56. Springer, 2012. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.428, + 0.472, + 0.483 + ], + "angle": 0, + "content": "[34] Tiancheng Sun, Yifan Peng, and Wolfgang Heidrich. Revisiting cross-channel information transfer for chromatic aberration correction. In Proceedings of the IEEE International Conference on Computer Vision, pages 3248-3256, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.484, + 0.472, + 0.539 + ], + "angle": 0, + "content": "[35] Huixuan Tang and Kiriakos N Kutulakos. What does an aberrated photo tell us about the lens and the scene? In IEEE International Conference on Computational Photography, pages 1-10. IEEE, 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.539, + 0.472, + 0.621 + ], + "angle": 0, + "content": "[36] Kaiyi Tang, Shuangyang Zhang, Yang Wang, Xiaoming Zhang, Zhenyang Liu, Zhichao Liang, Huafeng Wang, Lingjian Chen, Wufan Chen, and Li Qi. Learning spatially variant degradation for unsupervised blind photoacoustic tomography image restoration. Photoacoustics, page 100536, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.622, + 0.472, + 0.663 + ], + "angle": 0, + "content": "[37] Berge Tatian. Aberration balancing in rotationally symmetric lenses. Journal of the Optical Society of America, pages 1083-1091, 1974. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.665, + 0.472, + 0.733 + ], + "angle": 0, + "content": "[38] Chao Wang, Juan Chen, Hongguang Jia, Baosong Shi, Ruifei Zhu, Qun Wei, Linyao Yu, and Mingda Ge. Parameterized modeling of spatially varying psf for lens aberration and defocus. Journal of the Optical Society of Korea, pages 136-143, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.734, + 0.472, + 0.79 + ], + "angle": 0, + "content": "[39] Pei Wang, Wei Sun, Qingsen Yan, Axi Niu, Rui Li, Yu Zhu, Jinqiu Sun, and Yanning Zhang. Non-uniform motion deblurring with blurry component divided guidance. Pattern Recognition, page 108082, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.472, + 0.845 + ], + "angle": 0, + "content": "[40] Esther Whang, David McAllister, Ashwin Reddy, Amit Kohli, and Laura Waller. Seidelnet: an aberration-informed deep learning model for spatially varying deblurring. In AI and Optical Data Sciences IV, pages 276-281. SPIE, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.472, + 0.9 + ], + "angle": 0, + "content": "[41] Chudan Wu, Yan Wo, Guoqing Han, Zhangyong Wu, and Jiyun Liang. Non-uniform image blind deblurring by two-stage fully convolution network. IET Image Processing, pages 2588-2596, 2020. 1" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.893, + 0.148 + ], + "angle": 0, + "content": "[42] Zhenhua Xu, Huasong Chen, and Zhenhua Li. Fast blind deconvolution using a deeper sparse patch-wise maximum gradient prior. Signal Processing: Image Communication, page 116050, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.894, + 0.205 + ], + "angle": 0, + "content": "[43] Jianchao Yang, John Wright, Thomas Huang, and Yi Ma. Image super-resolution as sparse representation of raw image patches. In IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.206, + 0.894, + 0.248 + ], + "angle": 0, + "content": "[44] Kyrollos Yanny, Kristina Monakhova, Richard W Shuai, and Laura Waller. Deep learning for fast spatially varying deconvolution. Optica, pages 96-99, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.894, + 0.317 + ], + "angle": 0, + "content": "[45] Tao Yue, Jinli Suo, Jue Wang, Xun Cao, and Qionghai Dai. Blind optical aberration correction by exploring geometric and visual priors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1684-1692, 2015. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.319, + 0.894, + 0.374 + ], + "angle": 0, + "content": "[46] Dazhi Zhan, Weili Li, Xiaoqing Yin, Caiyun Niu, and Jin Liu. Psf estimation method of simple-lens camera using normal sinh-arcsinh model based on noise image pairs. IEEE Access, pages 49338-49353, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.375, + 0.894, + 0.432 + ], + "angle": 0, + "content": "[47] Kaihao Zhang, Wenqi Ren, Wenhan Luo, Wei-Sheng Lai, Björn Stenger, Ming-Hsuan Yang, and Hongdong Li. Deep image deblurring: A survey. International Journal of Computer Vision, pages 2103-2130, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.433, + 0.894, + 0.502 + ], + "angle": 0, + "content": "[48] Zhihong Zhang, Yuxiao Cheng, Jinli Suo, Liheng Bian, and Qionghai Dai. INFWIDE: Image and feature space wiener deconvolution network for non-blind image deblurring in low-light conditions. IEEE Transactions on Image Processing, pages 1390-1402, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.503, + 0.894, + 0.557 + ], + "angle": 0, + "content": "[49] Changyin Zhou, Stephen Lin, and Shree K Nayar. Coded aperture pairs for depth from defocus and defocus deblurring. International Journal of Computer Vision, pages 53-72, 2011. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.557 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "24870" + } + ] +] \ No newline at end of file diff --git a/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/aeee8578-b512-4a62-8ec3-b06e011ce338_origin.pdf b/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/aeee8578-b512-4a62-8ec3-b06e011ce338_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..07ea99f3fc5b836b67bd798ba79d42616eadaae8 --- /dev/null +++ b/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/aeee8578-b512-4a62-8ec3-b06e011ce338_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80c7c5c100f37ca535fd3141c0cd06dce7777833f3f48e48a33d6ca01f1b4b4f +size 2567489 diff --git a/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/full.md b/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/full.md new file mode 100644 index 0000000000000000000000000000000000000000..eaeb877fd83f88dcc5a4021ffb8c963e15a4d611 --- /dev/null +++ b/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/full.md @@ -0,0 +1,387 @@ +# A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction + +Jin Gong1 + +Runzhao Yang + +Weihang Zhang + +Jinli Suo1,2,3 + +Qionghai Dai $^{1,2}$ + +$^{1}$ Department of Automation, Tsinghua University, Beijing, China + +$^{2}$ Institute of Brain and Cognitive Sciences, Tsinghua University, Beijing, China + +$^{3}$ Shanghai Artificial Intelligence Laboratory, Shanghai, China + +# Abstract + +High-end lenses, although offering high-quality images, suffer from both insufficient affordability and bulky design, which hamper their applications in low-budget scenarios or on low-payload platforms. A flexible scheme is to tackle the optical aberration of low-end lenses computationally. However, it is highly demanded but quite challenging to build a general model capable of handling non-stationary aberrations and covering diverse lenses, especially in a blind manner. To address this issue, we propose a universal solution by extensively utilizing the physical properties of camera lenses: (i) reducing the complexity of lens aberrations, i.e., lens-specific non-stationary blur, by warping annual-ring-shaped sub-images into rectangular stripes to transform non-uniform degenerations into a uniform one, (ii) building a low-dimensional non-negative orthogonal representation of lens blur kernels to cover diverse lenses; (iii) designing a decoupling network to decompose the input low-quality image into several components degenerated by above kernel bases, and applying corresponding pre-trained deconvolution networks to reverse the degeneration. Benefiting from the proper incorporation of lenses' physical properties and unique network design, the proposed method achieves superb imaging quality, wide applicability for various lenses, high running efficiency, and is totally free of kernel calibration. These advantages bring great potential for scenarios requiring lightweight high-quality photography. + +# 1. Introduction + +High-quality photography is of crucial importance for both high-fidelity visual recording (e.g., filmmaking, sports video capturing) and sophisticated computer vision tasks (e.g., surveillance, auto-piloting). High-end camera systems often employ compound lenses comprising approximately ten or more components constructed from diverse materials to com + +pensate for geometric and photometric aberrations. Such complicated designs are proven to be effective in achieving nice image quality, but come with inherent drawbacks, including high costs, bulkiness, and fragility, making them unsuitable for scenarios with low payload capacity or limited budgets. Consequently, the demand for high-quality photography using lightweight lenses has significantly intensified. + +Considering the optical aberration in image formation and the afterward processing jointly, the workload in optical design can be shifted to the later stage [23, 28], where advanced reconstruction algorithms play a crucial role. One can also utilize the physical properties in the imaging setup to facilitate reconstruction, and researchers have made some primary efforts in this direction [32, 45]. + +However, grand challenges lie in the variability of optical aberrations across the field of view and diverse lenses. On the one hand, the quality degeneration of a simple lens is intrinsically a convolution with non-uniform blur kernels, and the typical compensation algorithms [43] approximate the globally non-uniform deconvolution with patch-wise uniform deconvolutions, leading to a trade-off between precision and computational efficiency. This trade-off comes up with high inflexibility when adopting data-driven approaches [14, 41], which require learning a large number of models to achieve high-performance results. On the other hand, the degradation of different lenses varies significantly, so lens-specific algorithm development or parameter optimizations are required for high reconstruction performance. Furthermore, the calibration of the PSF kernels of camera lenses is quite expertise-demanding[27, 46], and blind compensation is more favorable for users. Overall, computational compensation for lens aberrations holds great promise for achieving lightweight high-quality imaging. However, there is a pressing demand for a general approach to handle spatially varying aberrations of diverse lenses in a blind manner. + +In this paper, we propose a physics-informed end-to-end solution that (i) capitalizes the characteristics of lens aber- + +![](images/43ac21dc81c98da0b467877caa52ff73559924c9d50ea82bc8a9cf817b333c3b.jpg) + +![](images/81dc18bd5f117798e5669563274de6a1eb43eb5f441b4b3b0fba963ba1548e28.jpg) +(a) + +![](images/d24f4a8cae61e840e5f85b8cd3d16fae3c761af2fe607887c2c06097ccc366a4.jpg) + +![](images/3179b86b6463bb4cfb6a71d472ff0b90f0af7c89ae9fe49c81833d5d893a39e1.jpg) +(b) + +![](images/0927b0af848e60f9e7b9d4a2d4e73bec4f3d783b8bd8d5d859d1845256b191eb.jpg) +(c) +Figure 1. An illustrative example of our lens-aberration compensation approach. (a) A camera equipped with a simple lens of a large field of view but severe optical aberration (WXSJ-H65HD) and a small-sized unmanned aerial vehicle (UAV) carrying the camera for data capture. (b) The input degenerated image (upper) and our reconstruction result (lower). (c) The zoomed-in comparison on the highlighted region (white box) in (b), where the original recording is shown in the bottom left corner and the reconstructed result in the top right corner. + +ration to construct a low-dimensional representation of nonuniform blur kernels of general camera lenses, and (ii) designs a deep neural network resolving the degeneration components in the low-quality input and ensembling a set of pretrained compensators to reverse the degeneration robustly. Specifically, we represent an arbitrary local point spread function (PSF) with a set of negative orthogonal bases, pretrain their corresponding deconvolution modules, and then retrieve their degeneration from the low-quality image captured by a low-end lens and apply the pre-trained inversion models accordingly. The proposed approach demonstrates high performance and holds high potential in lightweight photography on low-payload platforms, as shown by the impressive results captured with a small drone equipped with a compact surveillance camera in Fig. 1. + +In summary, we target for general, blind, and end-to-end lens aberration correction, and make the following contributions: + +1) Proposes a unified framework for lens aberration compensation with high flexibility to diverse aberrations produced by the wide range of camera lenses. +2) Builds a general low-dimensional model of lens aberrations based on Orthogonal Non-negative Matrix Factorization, utilizing the physical properties of optical lenses. +3) Designs an end-to-end network to divide and conquer the optical aberrations in the above low-dimensional space, enabling fast and blind inversion of diverse lens degeneration and ultimately lightweight high-quality imaging. +4) Demonstrates performance comparable to state-of-the-art non-blind lens-specific algorithms, validating its great potential in budget-constrained or low-capacity platforms. + +# 2. Related Work + +Lens aberration modeling. Generally, imaging lenses are physically rotational symmetric around the optical center, imparting rotation symmetry of the lens aberration [25, 37]. Ignoring the fabrication imperfections, almost all types of lens aberrations, such as spherical aberration, coma aberration, and chromatic aberration [1, 9], form a rotational symmetric pattern. + +Utilizing these unique features of lens aberration, researchers have proposed some methods to simplify the degeneration by diverse lenses. For example, Rahbar et al. [29] adopt the Zernike model [24] to describe optical aberrations and can estimate the Zernike coefficients of a single channel through bicoherence and tricoherence estimation techniques, while Schuler et al. [32] represent the non-uniform aberrations with a set of orthonormal Efficient Filter Flow, which is applicable for most cases without large spherical aberration. Differently, Yue et al. [45] leverage the global rotational symmetry properties of regular lenses to transform non-uniform aberrations into uniform rings using radial splitting and warping techniques. This method capitalizes on the inherent physical properties of the imaging lens and largely simplifies the aberration model, offering inspiration for our approach to explore the unique structures of lens aberrations. However, their implementations use conventional optimization by alternatively kernel estimation and deblurring, which is time-consuming and the strong consumption of PSF uniformity within a stretched annular ring harms the performance slightly, which limits the applications demanding real-time and high-quality compensation. In contrast, we design a framework consisting of well-organized sub-networks to address all these issues decently. + +Optical aberration removal. Lens aberration exists widely in optical imaging systems, and computational correction is an essential way to raise imaging quality without increasing hardware budget. One common way to model lens aberration is by convolving the image with spatially varying PSFs and compensation is naturally conducted via deconvolution [38, 44]. Existing correction methods can be broadly categorized into non-blind and blind ones. + +Non-blind correction assumes known PSFs and algorithms are extensively studied [4, 22]. Researchers have proposed different algorithms to estimate kernel PSFs via, e.g., using a combination of defocused and focused images [2], a set of binary random patterns [10], aggregating degenerations in informative nature image patches [11], analyzing the spectral characteristics of the lens system [3, 19, 34]. Further, to cover PSFs of different camera lenses, Shih et al. [33] utilize interpolation methods by fitting a spatial Gaussian model. More recently, Li et al. [20] propose a data-driven deep learning approach explicitly taking the PSF as input and introducing lens-specific priors for high-quality compensation. In sum, non-blind lens aberration compensation techniques have shown promising performance, but they require expertise demanding PSF calibration or robust estimation, which are not friendly for non-expert users and require lens-specific model training. + +Blind methods have gained significant attention due to their convenience for un-trained users and high flexibility to diverse lenses. The typical strategy is to estimate PSFs and conduct compensation sequentially. Among them Rahbar et al. [29] introduce Zernike moments, and Tang & Kutulakos [35] employ the Seidel model to simplify the lens aberration model; while Delbracio et al. [5] propose a robust algorithm based on empirical observations about the distribution of the gradient in clear natural images. Recently, some researchers have adopted the data-driven scheme and developed deep neural networks to compensate for the aberrations, but often focus on specific types of aberrations to ensure good convergence, such as radial lens distortion [31] and chromatic aberrations [7]. + +In light of these developments, the blind compensation techniques (either based on conventional optimization or deep neural networks) cannot achieve performance comparable to their non-blind counterpart. Besides, existing methods are incapable of handling diverse lenses and various types of aberrations flexibly [4, 8]. In contrast, the proposed work leverages the physical properties of camera lenses and casts the complex aberrations of diverse lenses into a unified low-dimensional space, in which we divide and conquer the degeneration via incorporating geometric priors and a small number of pre-trained modules. Benefiting from the proper use of lenses' physical properties and elegant network design, we can achieve performance comparable to non-blind techniques as well. Such a general end-to-end + +blind solution with superb performance holds great potential for high-quality lightweight imaging systems on portable devices or low-capacity mobile platforms. + +Non-uniform deconvolution. Mathematically lens aberration compensation can be described as a non-uniform deconvolution process and shares the same formulation with other tasks such as camera shake, defocus, object motion, etc. Various techniques and approaches have been proposed to address the technical challenges posed by non-uniform blurring [39, 47]. There are three main ways to address the non-uniformity. The most intuitive and widely used way is to assume patch-wise uniform PSFs and conduct deconvolution patch by patch [36, 42]. The deconvolution can be implemented via conventional optimization previously and deep neural networks recently, and usually in a non-blind manner. There are a bunch of algorithms, and we do not list them here. The second solution is to transform the various PSFs into a low dimensional space and remove the blur along each dimension [7, 17, 32]. The third way is to adopt data-driven techniques and fed training data with varying PSFs for high generalization ability [48]. One can also make extensive use of the structure of the PSF patterns to circumvent the high complexity by introducing physical constraints, e.g., transform the spatially varying aberrations in an annular ring into uniform via warping [45], and decompose the spatially varying defocus blur into several uniform ones according to the scene depth [18, 49]. Differently, our approach focuses on lens aberration which is of different features from other degenerations and can utilize the unique properties of lenses for a better design, in a similar way to [45]. In addition, we are dedicated to an end-to-end solution working for diverse lenses without model training. + +# 3. Physics-inspired Low Rank Lens Aberration Model + +There exist various lens aberrations, such as spherical aberration and astigmatism, resulting in various non-uniform quality degenerations. In addition, the aberration models of diverse lenses differ a lot. To provide a universal solution for different lenses, it is of crucial importance to reduce the dimension of the PSFs and provide a unified representation, based on which one can design a low-rank model to address the aberrations in a divide-and-conquer manner. + +Considering the rotational-symmetry of optical lenses [15, 16, 40], we divide the lens's field of view into several concentric rings and warp them into rectangular stripes, each of which is of approximately uniform PSFs. This operation can largely decrease the dimension of the lens aberrations [13, 21]. Further, we crop these stripes into patches and apply ONMF to find a set of orthogonal and nonnegative bases to cover the space of the lens PSFs. If learned from a large set of lenses, the bases can represent an arbitrary PSF with high accuracy. The orthogonality can avoid ambiguity + +![](images/f9fcfbe181704b0b897fbf0b1141c1eb10eb8a89c1ce6de54eaf3ddd60955158.jpg) +Figure 2. Illustration of the ONMF-based low-rank lens aberration model. (a) PSF dataset of 20 low-end lenses simulated by Zemax. (b) The PSF lattice consisting of 1840 PSF kernels, generated by dividing the images by each lens into 5 concentric rings, warping into stripes, and further cropping into $13 \times 13$ -pixel patches. (c) 9 top PSF bases obtained by applying Orthogonal Nonnegative Matrix Factorization onto (b). + +during decomposing the complex aberrations and the nonnegativity facilitates compensating aberrations in the same way as handling conventional PSFs. + +To this end, we first collect 20 representative low-end lenses from ZEBASE database and construct a high-dimensional matrix encompassing all their PSFs after annular division (5 rings), ring-to-rectangle warping and cropping. The matrix is then factorized using the ONMF model [30] and yield a set of principal bases. The workflow detailing this procedure as visualized in Fig. 2. So far, we have arrived at a low dimensional representation of the lens aberrations, which can cover the image degeneration of diverse lenses. + +# 4. Universal Framework for Lens Aberration Correction + +The above idea of building a low-rank model to compensate the lens aberration in a divide-and-conquer manner is non-trivial for several reasons: the decomposition of aberrations in a blurry input is highly imposed; the deconvolutions need to be conducted on the components which are of different intensity ranges with general natural images; the fusion should also tolerate the imperfections in both decomposition and deconvolution. To overcome these hurdles, we propose to design a jointly trained low-rank deep network that enables flexible optical aberration correction. Specifically, our network comprises three main modules trained in an end-to-end manner. The first module conducts ONMF-based decomposition, followed by several identical adaptive deconvolution modules. Finally, we incorporate a synthetic + +network module to further enhance the correction result. Fig. 3 summarizes the workflow of the whole network and reports the architecture of the key modules. + +# 4.1. Decomposing Aberrations Attributed to PSF Bases + +As aforementioned, after applying matrix decomposition to the PSF library, we get a basis set $\{\mathbf{B}_i\}$ , which can compose an arbitrary PSF $\mathbf{k}$ with corresponding coefficients $\{\alpha_i\}$ , i.e., + +$$ +\mathbf {k} = \sum_ {i} \alpha_ {i} \cdot \mathbf {B} _ {i}. \tag {1} +$$ + +Hence, a recorded degraded patch $\mathbf{Y}$ can be represented as + +$$ +\mathbf {Y} = \mathbf {X} \otimes \left(\sum_ {i} \alpha_ {i} \cdot \mathbf {B} _ {i}\right) + \mathbf {n} = \sum_ {i} \left(\alpha_ {i} \cdot \mathbf {X}\right) \otimes \mathbf {B} _ {i} + \mathbf {n} \tag {2} +$$ + +where $\mathbf{X}$ is the latent sharp image, $\mathbf{n}$ is the noise and $\otimes$ denotes 2D convolution. + +As Fig. 3 shows, suppose we can decompose the blurry $Y$ into components $\{\mathbf{Y}_i = (\alpha_i \cdot \mathbf{X}) \otimes \mathbf{B}_i\}$ , we can pretrain deconvolution models to compensate the aberrations caused by $\{\mathbf{B}_i\}$ and estimate $\mathbf{X}$ by simply estimating the scaling factor $(\alpha_i)$ . The decomposition is implemented with a deep neural network built on the U-net structure. Here we replace the plain convolution in original U-net with residual blocks for better convergence. Notice that the network is fully convolutional and can be applied to images with an arbitrary size. In our experiment, we divide the acquired image into equidistant annual rings with different widths based on the lens's physical properties. + +In order to ensure the decomposition performance of the network, we define the following loss function + +$$ +\mathcal {L} _ {\mathrm {d e c o m}} = \left\| \mathbf {Y} - \sum_ {i} \mathbf {Y} _ {i} \right\| _ {2} + \sum_ {i} \left(\mathbf {Y} _ {i} - \tilde {\mathbf {Y}} _ {i}\right) \tag {3} +$$ + +with $\mathbf{Y}_i$ and $\tilde{\mathbf{Y}}_i$ denoting the decomposed blurry components and the generated version by Zemax software, equivalent to $\mathbf{X} \otimes (\alpha_i \cdot \mathbf{B}_i)$ . Here the first term forces the summation of the decomposed components consistent with the input, and the second term ensures that each retrieved component is equal to the convolution of the sharp image $\mathbf{X}$ with the corresponding PSF basis. + +# 4.2. Adaptive Feature-Domain Wiener Deconvolution + +The ONMF-based decomposition module can extract the blurry components caused by the corresponding PSF bases, so we design matching compensation modules and embed them into our joint framework. However, both the range and pattern of the intensities in the decomposed components differ from natural images, and deconvolving these components in the spatial domain is prone to ringing artifacts and over-smoothness. A recent study [6] demonstrates that high-quality image reconstruction can be achieved by performing + +![](images/3b03efbb0403e509535362c964a301a50b462717eee517107f2abb3984017273.jpg) +Figure 3. The framework of the proposed approach. The whole pipeline comprises preprocessing to build the low-rank aberration model and a divide-and-conquer compensation. The preprocessing involves annular partitioning of images and corresponding lens PSFs, ring-to-rectangle warping, and learning a low-dimensional representation of the PSFs. The compensation consists of three primary modules: (i) Decomposition, a neural network based on Orthogonal Non-Negative Matrix Factorization (ONMF), decomposing the blurry image into components corresponding to the representation bases of lens aberration; (ii) Deconvolution, implemented as a cascaded encoder-decoder network to map the decomposed components into the feature domain and conducts pre-trained Wiener deconvolution sequentially; (iii) Fusion, which aggregates the multiple scaled versions of the latent sharp image from the previous deconvolution modules to get the final output. The whole network is trained in an end-to-end manner. + +deconvolution in the feature domain. Thus, we propose an adaptive feature-domain Wiener deconvolution module to recover the lost high frequencies better. + +Specifically, for a specific patch $\mathbf{X}_i = \alpha_i\mathbf{X}$ , we reconstruct it from the $i$ -th blurry decomposed component $\mathbf{Y}_i$ + +$$ +\mathbf {X} _ {i} ^ {*} = \arg \min \left\| \mathbf {Y} _ {i} - \mathbf {X} _ {i} \otimes \mathbf {B} _ {i} \right\|, \tag {4} +$$ + +where $\mathbf{X}_i$ and $\mathbf{Y}_i$ respectively represent the sharp and blurry image components matching the $i$ -th PSF basis. In implementation, we build a feature-based Wiener adaptive deconvolution network. We denote $f_i$ as a set of learnable linear filters and convolve $\mathbf{Y}_i$ with them to extract useful features and obtain the relationship among the blurry input, PSF, and the high-quality output in the feature domain. According to the properties of convolution, Eq. 2 turns into + +$$ +\mathbf {F} _ {i} \mathbf {Y} _ {i} = \mathbf {F} _ {i} \left(\mathbf {X} _ {i} \otimes \mathbf {B} _ {i}\right) + \mathbf {F} _ {i} \mathbf {n}, \tag {5} +$$ + +where multiplying with $\mathbf{F}_i$ is equivalent to convolving with $f_{i}$ . Correspondingly, the above optimization in Eq. 4 is equivalent to finding a set of feature-based Wiener deconvolution operators $\mathbf{G}_i$ (which can be obtained based on the conclusion in [6]) to reverse the aberration by $\mathbf{B}_i$ + +$$ +\mathbf {X} _ {i} ^ {*} = \arg \min \left\| \mathbf {G} _ {i} \mathbf {F} _ {i} \mathbf {Y} _ {i} - \mathbf {F} _ {i} \mathbf {X} _ {i} \right\|. \tag {6} +$$ + +The compensation is implemented as a deep neural network with the network structure and learned in a data-driven + +manner. During training the loss function is designed in an intuitive manner: + +$$ +\mathcal {L} _ {\mathrm {d e c o n v}} = \left\| \mathbf {F} _ {i} \mathbf {X} _ {i} - \mathbf {G} _ {i} \mathbf {F} _ {i} \mathbf {Y} _ {i} \right\| _ {2}. \tag {7} +$$ + +The network is designed to share the parameters across all scales except for the first encoder block at the first cascade, which helps achieve fast and high-quality deconvolution. + +# 4.3. Attention-based Fusion + +According to Eq. 2, each deconvolution model can provide an estimation of the latent high-quality image, multiplied by a scaling factor. However, the potential inaccuracy in decomposition and artifacts in deconvolution would harm the quality of the final output. To overcome this challenge, we propose an effective strategy to streamline the reconstruction process. Specifically, we decompose more components from the blurry input and secondly apply the corresponding deconvolution to obtain multiple aberration-compensated versions, then fuse them together via a weight-trainable fusion network to raise robustness. + +The above strategy introduces a substantial increase in running time. To accelerate the training of the deconvolution modules, we adopt a coarse-to-fine strategy, i.e., training a base model and subsequently fine-tuning it, which is largely faster than training all these basis-specific networks from + +scratch. Moreover, our investigation reveals that the errors in the decomposition module can harm the successive deconvolution. Consequently, we introduce the decomposition confidence to serve as a valuable indicator of the decomposition accuracy/reliability of the decomposition process, and use it to guide the fusion. + +# 5. Experiments + +In this section, after describing the details of model training in Sec. 5.1, we first analyze the advantages of specific designs and key parameter settings in our approach (Subsection 5.2). Then, we demonstrate our superior performance against the state-of-the-art (SOTA) algorithms on synthetic (Subsection 5.3) and real data (Subsection 5.4). + +# 5.1. Implementation Details + +For model training, we gather 300 images from the Flickr2K dataset for model training, ensuring wide applicability for diverse natural scenes. Specifically, we select 20 common commercial lenses and simulate their spatially varying PSF using Zemax software. Then we simulate the lens aberration via convolving 100 high-definition (2K) images from Flickr2K with the generated PSF and apply the successive operations to train the model, including annular decomposition, ring-to-rectangle warping, patch cropping, etc. In total, we actually obtained the sharp-blurry pair and PSF of 9200 $(100 \times 92)$ patches. + +During model training, we adopt the Adam optimizer with default parameters. The learning rate is initialized as $10^{-4}$ , which is halved every 100 epochs. PyTorch code and trained models are available on our project page. + +# 5.2. Influences of the Key Parameters/Settings + +Partitioning strategies. By capitalizing the rotational symmetry of camera lenses, we employ annular partitioning to divide the image into a sequence of concentric rings, where patches in each ring share a highly similar PSF after warping into a stripe. This strategy substantially reduces the spatial variance among PSFs across the field of view, consequently reducing the required number of bases for accurate PSF representation. The illustration and performance of the proposed partitioning method are depicted in Fig. 4(a), whereas Fig. 4(b) shows the counterpart of conventional grid partitioning. From the middle row, one can notice the highly consistent PSFs in the same annular ring in (a), in contrast to the large PSF difference among the patches in (b). + +We also compare the final performance of two partitioning strategies on the synthetic dataset. Remarkably, when utilizing an equal number of bases (9 bases), the annular splitting reaches an impressive PSF representation accuracy of 0.93 and performs 30.16dB in the final reconstruction. In contrast, the conventional grid splitting yields a largely lower accuracy of 0.69 and the reconstruction achieves 27.77dB. + +![](images/81400036ee7dbdb5b2ce731e3e49a7d2d14cf2566ab21938cd71632068a3685e.jpg) + +![](images/ddbc00530e18d1cd5c96f869264a5d82d8ffaa902af0393b908dc6f331a09829.jpg) +(a) + +![](images/3986b1a085b09317ce490ba9127143a6356dfa996265bf44074b250c4ec0c56b.jpg) + +![](images/e628742b9c634d29f8176adc8ffe888c706307c17a6ac2a65ed6b3b03cf9dffd.jpg) +(b) +Figure 4. Comparison between the annular partitioning in our model (a) and conventional grid partitioning (b). Top row: the illustration of partitioning. Middle row: the PSFs within the highlighted regions in the top row, with the left one stretched to a rectangle. Bottom row: the aberration compensation results with the same number of bases (9 in our experiment). + +Visually, we show an example in the 3rd row of Fig. 4, which shows that annular partitioning exhibits noticeable advantageous enhancement. + +The number of annular rings. From the previous experiment, we came to the conclusion that annular partitioning is a better option. Further, we study the proper setting of the number of concentric rings, which will directly affect the amount of calculation and the precision of PSF representation. Although increasing the number of split rings can handle the radical difference of lens aberration better, it also brings higher computational complexity. Hence, we traverse several levels of radical division, i.e., the number of annular rings to find a good balance between precision and efficiency. The results are shown in Table 1, which shows that for usual commercial lenses, the performance improvement becomes marginal when the number grows beyond five. Therefore, we compromised and chose to split into 5 rings in our experiments. + +Table 1. The performance at varying numbers of rings. + +
# of RingsPatch Size (pixels)Data VolumePSNR (dB)SSIM
3214×2147529.120.914
5128×1289230.960.952
792 × 9213230.990.953
972 × 7216030.910.950
+ +The number of bases. We applied ONMF to obtain a low dimensional representation of the lens aberration, i.e., PSF. Intuitively, more bases provide a higher PSF representation accuracy that helps the reconstruction but tends to increase the ill-posedness of the decomposition network and the num + +ber of required deconvolution modules, harming the final performance as well as the running efficiency on the contrary. Therefore, pursuing the optimal number of bases is of crucial importance. As Tab. 2 shows, we test the performance with different numbers of bases and obtain a good balance at 9 bases, with high quality and low computational complexity. + +Table 2. The performances using different numbers of bases. + +
# of bases5791113
SSIM0.9010.9270.9520.9550.955
PSNR (dB)28.1429.1230.9631.0131.00
Training time~9hrs~11hrs~12hrs~16hrs~18hrs
+ +The applicability to various lenses. After determining the optimal number of bases, we test on 5 low-end commercial lenses (not included in the lens dataset) to verify the generalization ability of our model. By calculating the representation accuracy of the new lenses and the final lens correction result in Fig. 5, we arrive at two conclusions: the reconstruction quality is directly proportional to the accuracy of PSF representation; our approach is widely applicable for diverse lenses. + +![](images/c9c709edc2c6fe61cccc7bf127461f8e55cdf1c329774a8ad07594ab13f47106.jpg) + +![](images/0465a978f5c2a83828c257ea5e2b605db2a461b9406ae64a71122d4b6931d048.jpg) +Figure 5. Our performance on diverse lenses. (a) The representation precision (horizontal axis) and their performance (vertical axis) on 5 test lenses, in terms of PSNR. Here the scattered box plot is drawn from the compensation results of 20 test images synthesized from the Flickr2K dataset. (b) PSF (left column) and our compensation result (right column, with the bottom-left and top-right insets being input and output respectively) of two lenses with the best and worst performance in (a). + +![](images/bd684930377f643a8041092a9461b64c9ffe6e96317958fd4f0f2642dde83ada.jpg) + +![](images/bc7811f851e1ddea4467bfaad220ebb13ed6a28ad75be2bdc211de9b3efe7d17.jpg) + +Balancing both effectiveness and efficiency, we empirically divide the original low-quality images $(1280\times 1280$ pixels) into 5 concentric rings and select 9 bases (preserving an impressive $96.6\%$ of the PSF variation in our lens + +database) in the final implementations of our model, based on the above quantitative experiment results. + +# 5.3. Performance Comparison + +We compare our approach with SOTA blind (DeblurGANv2 [17], MPRNet [26]) and non-blind (DPIR [12], DWDN [6]) deblurring methods, including both optimization-based and CNN-based algorithms. The results are shown in Tab. 3 and Fig. 6, from which one can observe the following trends: (i) Blind deblurring algorithms generally exhibit good performance in addressing motion blur problems, but in terms of lens aberration correction, our approach performs better than SOTAs and yields notably clearer images with finer details and fewer artifacts. (ii) By employing our low-rank PSF learning model, the PSF can be efficiently characterized, facilitating blind deconvolution to attain performance on par with or even superior non-blind algorithms. Overall, we achieve blind lens aberration correction, surpassing the SOTA blind deblurring methods, and perform on par with non-blind approaches. + +Table 3. Quantitative performance comparison with SOTAs + +
DeblurGANv2MPRNetEboli'sDPIRDWDNOurs
Blind?××
PSNR (dB)23.0428.6729.4231.8631.7830.96
SSIM0.7260.9270.9340.9620.9600.952
+ +# 5.4. Real experiments + +To test the performance on real data, we use several low-end compact commercial cameras composed of a simple lens to capture low-quality images and computationally raise their visual quality with our model. Besides the results in Fig. 1, we show two more examples in Fig. 7, with the photo of the cameras in the left column, blurry input in the 2nd column, and the final reconstructed high-quality outputs in the rightmost column. We also show the side-by-side zoomed-in comparison of highlighted regions in the 3rd column for better visualization. One can see that the details in both the center and the periphery are recovered decently. Also, the consistently improved quality validates the wide applicability of the proposed approach to various lenses. + +# 6. Conclusion + +We have reported a versatile scheme capable of compensating lens aberrations of various lenses in an end-to-end manner and without model retraining or refinement. The universality of our approach stems from two key designs: Firstly, we incorporate the key physical properties inherent in camera lenses, such as rotational symmetry and low-dimensional structure after ring-to-rectangle warping; (ii) we integrate a deep neural network to reverse the aberration in a divide-and-conquer manner, i.e., decompose the low-quality input into basic components corresponding to the low-dimensional compositions of the aberration model, and + +![](images/3d6708e46782f832076713302bd294b0e602ad1a8562a3d3da39d2b6a797f8cd.jpg) +(a) Blurry input + +![](images/2f4155c4e9d2a5eea28bab7898c49251973670356b63bb3f9f27f0f55d7c892e.jpg) + +![](images/e37504446d61bd82f9dca711fa56e716608e726e387a3c595364f1e951fdfa4c.jpg) + +![](images/025a9c7f5cfac3f1983445bddfe79e45f13c2712953ff95b25d3af5c9544501e.jpg) + +![](images/d46f7b7fa4ad932f21b8e884ca7954509695e1320ee0fe72abdca7d0b82c285a.jpg) + +![](images/7dacc45d77cccf420f3e60001db170c9590eb961fc22362905883171701f5c82.jpg) + +![](images/a367d9b369b9ec397ae39913b6fb782152e29741e5059e661bbfc2fef159d33f.jpg) + +![](images/2194fa63f306ccd56c37c3359752938e92c924bcaa92aaaa488d86f4a77b9f77.jpg) + +![](images/ce58de75d2c27ab2b2dd3c237256a19db03c317b951e0699fa105e444d5ef8a5.jpg) + +![](images/88ad4ed0a95cca921a94000abb74874044871fc9f657b307ff59e2d5235d81aa.jpg) +(b) Our output + +![](images/ab191d559b74034c93012e7e746a64be3ed3c99aeb4b9bc5be2411610d39eaf8.jpg) +(c) Blurry patches + +![](images/7558571b35c189440432710e460498c37c6f49cdac61c450e1128c316421efd2.jpg) + +![](images/d0abac9e698509ac7d95f522160f3c54a1d087ad91b5fc330639f3af41e6e8e2.jpg) +(d) DeblurGANv2 + +![](images/1b04f8eae0522bc236b30021e7adf2559742e494e16f66bc403bbf3dffb42088.jpg) + +![](images/52d12fe2ed12f164333671d028facf29a41ddae821c8c01d1e1450d863382b2a.jpg) +(e) MPRNet + +![](images/d70e5b0408c59ba873b3e2a10d1c4a66709e860e7d9442452f64e3224aff7298.jpg) + +![](images/840978a1a4c8174b2d7334a6c62696daffba4dbf61023d9da030a678c72c4489.jpg) +(f) DPIR + +![](images/fcf9f27376e8b76cd1b2c694621b5e464bf8c3bade0224f741440ed9ed2dc85f.jpg) + +![](images/1877293483ff65f20cc42b99c1c4fbb6a8c9a4723d849003180f3914913ea034.jpg) +(g)DWDN + +![](images/bed0ee63cec21be53609195cda412e0831b013b339052eaa5c69830bc08d830b.jpg) + +![](images/914b407719f43d536ccf7dfea48debf26b9335466508700221f7cfc12b2c1edb.jpg) +(h) Ours + +![](images/8da7297b7ce2cbc1513af0cf468623e6d3778a0548a6443711932273c860a22e.jpg) + +![](images/12c0aa09441262812c1214feac0e38373e3627911f256e4d700d9db9b3befe32.jpg) +(i) Ground truth + +![](images/e532c30da681326f55d30f0ff4a60400416d6ea3bb0ab04e91802f5f863ea227.jpg) +Figure 6. Performance comparison with SOTA methods. (a) The input blurry image. (b) The result after aberration correction by our model. (c-i) The comparison among the results produced by different SOTA algorithms (d-h), in contrast to the blurry input (c) and ground-truth sharp version (i), with PSNR and SSIM scores presented in Tab. 3. Here we compare three ROIs, which are cropped from different locations to demonstrate the performance on non-uniform lens aberrations. Note that (d-h) are blind compensation results and (f)(g) are non-blind. +(a) Experimental lenses +Figure 7. Results on real data captured by two compact cameras with a low-end lens. (a) The photo of two lenses, with the smaller one being only around $1\mathrm{cm}$ , which is highly portable but exhibits significant aberrations. (b)(d) Raw images captured using the cameras in (a) and the results after compensation. (c) The zoomed-in view of the highlighted regions in (b) and (d), distinctly showcases the performance at both regions closer to the center (red boxes) and toward the corners (yellow boxes) of the sensor's field of view. + +![](images/4bbd9908935deef34c0b0d16973d38c535ea9d8d71cb53c4176776a3c5a2fc29.jpg) + +![](images/b1ffba0cdd15c38df41ea6b2ba6eba8ddb6953d972dd4c80bad7e9ef83d87fd3.jpg) +(b) Input + +![](images/56a50073158cfbf3667c323c885ff91001a0385a06a04097f96e7f6f6cbb2409.jpg) + +![](images/21a076746255712c3fc7776d2b74056b485425da292ae51ca2863e94a030c87b.jpg) + +![](images/855efd7e6db84251c201d5474af50eae3026297c55451fcd3c8944738d931b08.jpg) + +![](images/bb24b2b2a6ef4c2b04da8352f107285151eef55bc551238924ea4b933dcfb9dc.jpg) +(c) ROI performance + +![](images/2963efe7968fc4f43438d6c60d5a95e68ef91b86c059e197e07e1c3f24b6c945.jpg) + +![](images/5654099080b2869d56a544cfcca9a886676c9a522458fcb27e4f1ed284be141a.jpg) + +![](images/5ff22a150fb68abacd93ab365993a8d6bd61073bd46ed21838f22f9623055409.jpg) + +![](images/a3d308c3c4bd0f01d0ba1369b7e225cf03bee1c89bd6edf4ca604ce5cc9113e5.jpg) + +![](images/e054d41dd8f0b4bd142c9d7a34a6afe7019feaf1c68153472921f35c221345b7.jpg) + +![](images/10c339dd1b81f4bea479f0420d2c29e727a7b3fc7e643e67135d3f367ef35309.jpg) +(d) Output + +then adopt pre-trained compensation modules to reconstruct the high-quality image with high robustness. The proposed approach offers high generalization ability to diverse lenses and requires no expertise-demanding calibration. Moreover, we achieve performance comparable to existing methods with careful calibration and lens-specific model training. + +So far, our experiments assume sufficient exposure and high pixel count, but we acknowledge the potential for future enhancements, such as accounting for more realistic noise + +models and addressing other degradations like downsampling. Looking ahead, we envision further advancements of our model, focusing on the development of a lightweight network and on-chip implementation. As computational aberration compensation continues to progress, our method serves as a promising step towards enabling practical and cost-effective optical aberration correction for a wide range of applications. + +# References + +[1] Samuel Arba-Mosquera, Shwetabh Verma, and Shady T Awwad. Theoretical effect of coma and spherical aberrations translation on refractive error and higher order aberrations. In Photonics, page 116. MDPI, 2020. 2 +[2] Johannes Brauers, Claude Seiler, and Til Aach. Direct psf estimation using a random noise target. In Digital Photography VI, pages 96-105. SPIE, 2010. 3 +[3] Ayan Chakrabarti, Todd Zickler, and William T Freeman. Analyzing spatially-varying blur. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 2512-2519. IEEE, 2010. 3 +[4] Jinlin Cui and Wei Huang. Optical aberration correction for simple lenses via sparse representation. Optics Communications, pages 201-213, 2018. 3 +[5] Maurizio Delbracio, Ignacio Garcia-Dorado, SungJoon Choi, Damien Kelly, and Peyman Milanfar. Polyblur: Removing mild blur by polynomial reburring. IEEE Transactions on Computational Imaging, pages 837-848, 2021. 3 +[6] Jiangxin Dong, Stefan Roth, and Bernt Schiele. DWDN: deep wiener deconvolution network for non-blind image deblurring. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 9960-9976, 2021. 4, 5, 7 +[7] Thomas Eboli, Jean-Michel Morel, and Gabriele Facciolo. Fast two-step blind optical aberration correction. In European Conference on Computer Vision, pages 693-708. Springer, 2022. 3 +[8] T Furieri, A Bassi, and S Bonora. Large field of view aberrations correction with deformable lenses and multi conjugate adaptive optics. Journal of Biophotonics, page e202300104, 2023. 3 +[9] O García-Lievanos and S Vázquez-Montiel. Free system of spherical and coma aberrations by use aspherical and diffractive surfaces. In AIP Conference Proceedings, pages 659-664. American Institute of Physics, 2008. 2 +[10] Felix Heide, Mushfiqur Rouf, Matthias B Hullin, Bjorn Labitzke, Wolfgang Heidrich, and Andreas Kolb. High-quality computational imaging through simple lenses. ACM Transactions on Graphics, pages 1-14, 2013. 3 +[11] Michael Hirsch and Bernhard Scholkopf. Self-calibration of optical lenses. In Proceedings of the IEEE International Conference on Computer Vision, pages 612-620, 2015. 3 +[12] Zhanli Hu, Hengzhi Xue, Qiyang Zhang, Juan Gao, Na Zhang, Sijuan Zou, Yueyang Teng, Xin Liu, Yongfeng Yang, Dong Liang, et al. DPIR-Net: Direct pet image reconstruction based on the wasserstein generative adversarial network. IEEE Transactions on Radiation and Plasma Medical Sciences, pages 35–43, 2020. 7 +[13] Qi Jiang, Hao Shi, Lei Sun, Shaohua Gao, Kailun Yang, and Kaiwei Wang. Annular computational imaging: Capture clear panoramic images through simple lens. IEEE Transactions on Computational Imaging, 8:1250-1264, 2022. 3 +[14] Jaihyun Koh, Jangho Lee, and Sungroh Yoon. Single-image deblurring with neural networks: A comparative survey. Computer Vision and Image Understanding, page 103134, 2021. 1 + +[15] Amit Kohli, Anastasios Angelopoulos, Sixian You, and Laura Waller. Shift-variant deblurring for rotationally symmetric systems. In Computational Optical Sensing and Imaging, pages CTh5A-4. Optica Publishing Group, 2021. 3 +[16] Amit Kohli, Anastasios Angelopoulos, Sixian You, Kyrolos Yanny, and Laura Waller. Linear revolution-invariance: Modeling and deblurring spatially-varying imaging systems. arXiv preprint arXiv:2206.08928, 2022. 3 +[17] Orest Kupyn, Tetiana Martyniuk, Junru Wu, and Zhangyang Wang. Deblurring-v2: Deblurring (orders-of-magnitude) faster and better. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8878-8887, 2019. 3, 7 +[18] Anat Levin, Rob Fergus, Frédo Durand, and William T Freeman. Image and depth from a conventional camera with a coded aperture. ACM Transactions on Graphics, pages 70-81, 2007. 3 +[19] Weili Li, Xiaoqing Yin, Yu Liu, and Maojun Zhang. Computational imaging through chromatic aberration corrected simple lenses. Journal of Modern Optics, pages 2211-2220, 2017. 3 +[20] Xiu Li, Jinli Suo, Weihang Zhang, Xin Yuan, and Qionghai Dai. Universal and flexible optical aberration correction using deep-prior based deconvolution. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2613-2621, 2021. 3 +[21] Esther YH Lin, Zhecheng Wang, Rebecca Lin, Daniel Miau, Florian Kainz, Jiawen Chen, Xuaner Cecilia Zhang, David B Lindell, and Kiriakos N Kutulakos. Learning lens blur fields. arXiv preprint arXiv:2310.11535, 2023. 3 +[22] Ting Lin, ShiQi Chen, Huajun Feng, Zhihai Xu, Qi Li, and Yueting Chen. Non-blind optical degradation correction via frequency self-adaptive and finetune tactics. Optics Express, pages 23485-23498, 2022. 3 +[23] Alice Lucas, Michael Iliadis, Rafael Molina, and Aggelos K Katsaggelos. Using deep neural networks for inverse problems in imaging: Beyond analytical methods. IEEE Signal Processing Magazine, pages 20-36, 2018. 1 +[24] Virendra N Mahajan. Zernike annular polynomials for imaging systems with annular pupils. Journal of the Optical Society of America, pages 75-85, 1981. 2 +[25] James P McGuire and Russell A Chipman. Polarization aberrations. 1. rotationally symmetric optical systems. Applied optics, pages 5080-5100, 1994. 2 +[26] Armin Mehri, Parichehr B Ardakani, and Angel D Sappa. MPRNet: Multi-path residual network for lightweight image super resolution. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2704-2713, 2021. 7 +[27] Ali Mosleh, Paul Green, Emmanuel Onzon, Isabelle Begin, and JM Pierre Langlois. Camera intrinsic blur kernel estimation: A reliable framework. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4961-4968, 2015. 1 +[28] Gregory Ongie, Ajil Jalal, Christopher A Metzler, Richard G Baraniuk, Alexandros G Dimakis, and Rebecca Willett. Deep learning techniques for inverse problems in imaging. IEEE + +Journal on Selected Areas in Information Theory, pages 39-56, 2020. 1 +[29] Kambiz Rahbar and Karim Faez. Blind correction of lens aberration using zernike moments. In IEEE International Conference on Image Processing, pages 861-864. IEEE, 2011. 2, 3 +[30] Abderrahmane Rahiche and Mohamed Cheriet. Forgery detection in hyperspectral document images using graph orthogonal nonnegative matrix factorization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 662-663, 2020. 4 +[31] Jiangpeng Rong, Shiyao Huang, Zeyu Shang, and Xianghua Ying. Radial lens distortion correction using convolutional neural networks trained with synthesized images. In *Asian Conference on Computer Vision*, pages 35-49. Springer, 2017. 3 +[32] Christian J Schuler, Michael Hirsch, Stefan Harmeling, and Bernhard Schölkopf. Blind correction of optical aberrations. In European Conference on Computer Vision, pages 187-200. Springer, 2012. 1, 2, 3 +[33] Yichang Shih, Brian Guenter, and Neel Joshi. Image enhancement using calibrated lens simulations. In European Conference on Computer Vision, pages 42-56. Springer, 2012. 3 +[34] Tiancheng Sun, Yifan Peng, and Wolfgang Heidrich. Revisiting cross-channel information transfer for chromatic aberration correction. In Proceedings of the IEEE International Conference on Computer Vision, pages 3248-3256, 2017. 3 +[35] Huixuan Tang and Kiriakos N Kutulakos. What does an aberrated photo tell us about the lens and the scene? In IEEE International Conference on Computational Photography, pages 1-10. IEEE, 2013. 3 +[36] Kaiyi Tang, Shuangyang Zhang, Yang Wang, Xiaoming Zhang, Zhenyang Liu, Zhichao Liang, Huafeng Wang, Lingjian Chen, Wufan Chen, and Li Qi. Learning spatially variant degradation for unsupervised blind photoacoustic tomography image restoration. Photoacoustics, page 100536, 2023. 3 +[37] Berge Tatian. Aberration balancing in rotationally symmetric lenses. Journal of the Optical Society of America, pages 1083-1091, 1974. 2 +[38] Chao Wang, Juan Chen, Hongguang Jia, Baosong Shi, Ruifei Zhu, Qun Wei, Linyao Yu, and Mingda Ge. Parameterized modeling of spatially varying psf for lens aberration and defocus. Journal of the Optical Society of Korea, pages 136-143, 2015. 3 +[39] Pei Wang, Wei Sun, Qingsen Yan, Axi Niu, Rui Li, Yu Zhu, Jinqiu Sun, and Yanning Zhang. Non-uniform motion deblurring with blurry component divided guidance. Pattern Recognition, page 108082, 2021. 3 +[40] Esther Whang, David McAllister, Ashwin Reddy, Amit Kohli, and Laura Waller. Seidelnet: an aberration-informed deep learning model for spatially varying deblurring. In AI and Optical Data Sciences IV, pages 276-281. SPIE, 2023. 3 +[41] Chudan Wu, Yan Wo, Guoqing Han, Zhangyong Wu, and Jiyun Liang. Non-uniform image blind deblurring by two-stage fully convolution network. IET Image Processing, pages 2588-2596, 2020. 1 + +[42] Zhenhua Xu, Huasong Chen, and Zhenhua Li. Fast blind deconvolution using a deeper sparse patch-wise maximum gradient prior. Signal Processing: Image Communication, page 116050, 2021. 3 +[43] Jianchao Yang, John Wright, Thomas Huang, and Yi Ma. Image super-resolution as sparse representation of raw image patches. In IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 1 +[44] Kyrollos Yanny, Kristina Monakhova, Richard W Shuai, and Laura Waller. Deep learning for fast spatially varying deconvolution. Optica, pages 96-99, 2022. 3 +[45] Tao Yue, Jinli Suo, Jue Wang, Xun Cao, and Qionghai Dai. Blind optical aberration correction by exploring geometric and visual priors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1684-1692, 2015. 1, 2, 3 +[46] Dazhi Zhan, Weili Li, Xiaoqing Yin, Caiyun Niu, and Jin Liu. Psf estimation method of simple-lens camera using normal sinh-arcsinh model based on noise image pairs. IEEE Access, pages 49338-49353, 2021. 1 +[47] Kaihao Zhang, Wenqi Ren, Wenhan Luo, Wei-Sheng Lai, Björn Stenger, Ming-Hsuan Yang, and Hongdong Li. Deep image deblurring: A survey. International Journal of Computer Vision, pages 2103-2130, 2022. 3 +[48] Zhihong Zhang, Yuxiao Cheng, Jinli Suo, Liheng Bian, and Qionghai Dai. INFWIDE: Image and feature space wiener deconvolution network for non-blind image deblurring in low-light conditions. IEEE Transactions on Image Processing, pages 1390-1402, 2023. 3 +[49] Changyin Zhou, Stephen Lin, and Shree K Nayar. Coded aperture pairs for depth from defocus and defocus deblurring. International Journal of Computer Vision, pages 53-72, 2011. 3 \ No newline at end of file diff --git a/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/images.zip b/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..c7e0ba12b5e17a52c33833055f0156b0591021d9 --- /dev/null +++ b/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7e1658edffa600e6079515bb9118d1a1cf8d70e5dee6c055d6ae6f0ea935337 +size 779586 diff --git a/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/layout.json b/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ac3beecf8999e870b874f6fbcb2c595d43c52392 --- /dev/null +++ b/2024/A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction/layout.json @@ -0,0 +1,9332 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 123, + 103, + 473, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 103, + 473, + 138 + ], + "spans": [ + { + "bbox": [ + 123, + 103, + 473, + 138 + ], + "type": "text", + "content": "A Physics-informed Low-rank Deep Neural Network for Blind and Universal Lens Aberration Correction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 160, + 164, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 160, + 164, + 175 + ], + "spans": [ + { + "bbox": [ + 112, + 160, + 164, + 175 + ], + "type": "text", + "content": "Jin Gong1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 195, + 161, + 272, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 161, + 272, + 175 + ], + "spans": [ + { + "bbox": [ + 195, + 161, + 272, + 175 + ], + "type": "text", + "content": "Runzhao Yang" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 161, + 386, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 161, + 386, + 175 + ], + "spans": [ + { + "bbox": [ + 304, + 161, + 386, + 175 + ], + "type": "text", + "content": "Weihang Zhang" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 418, + 160, + 479, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 418, + 160, + 479, + 175 + ], + "spans": [ + { + "bbox": [ + 418, + 160, + 479, + 175 + ], + "type": "text", + "content": "Jinli Suo1,2,3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 257, + 180, + 335, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 180, + 335, + 194 + ], + "spans": [ + { + "bbox": [ + 257, + 180, + 335, + 194 + ], + "type": "text", + "content": "Qionghai Dai" + }, + { + "bbox": [ + 257, + 180, + 335, + 194 + ], + "type": "inline_equation", + "content": "^{1,2}" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 139, + 195, + 454, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 195, + 454, + 209 + ], + "spans": [ + { + "bbox": [ + 139, + 195, + 454, + 209 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 139, + 195, + 454, + 209 + ], + "type": "text", + "content": "Department of Automation, Tsinghua University, Beijing, China" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 209, + 487, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 209, + 487, + 223 + ], + "spans": [ + { + "bbox": [ + 106, + 209, + 487, + 223 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 106, + 209, + 487, + 223 + ], + "type": "text", + "content": "Institute of Brain and Cognitive Sciences, Tsinghua University, Beijing, China" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 148, + 223, + 444, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 223, + 444, + 237 + ], + "spans": [ + { + "bbox": [ + 148, + 223, + 444, + 237 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 148, + 223, + 444, + 237 + ], + "type": "text", + "content": "Shanghai Artificial Intelligence Laboratory, Shanghai, China" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 143, + 265, + 192, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 265, + 192, + 277 + ], + "spans": [ + { + "bbox": [ + 143, + 265, + 192, + 277 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 290, + 290, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 290, + 290, + 602 + ], + "spans": [ + { + "bbox": [ + 47, + 290, + 290, + 602 + ], + "type": "text", + "content": "High-end lenses, although offering high-quality images, suffer from both insufficient affordability and bulky design, which hamper their applications in low-budget scenarios or on low-payload platforms. A flexible scheme is to tackle the optical aberration of low-end lenses computationally. However, it is highly demanded but quite challenging to build a general model capable of handling non-stationary aberrations and covering diverse lenses, especially in a blind manner. To address this issue, we propose a universal solution by extensively utilizing the physical properties of camera lenses: (i) reducing the complexity of lens aberrations, i.e., lens-specific non-stationary blur, by warping annual-ring-shaped sub-images into rectangular stripes to transform non-uniform degenerations into a uniform one, (ii) building a low-dimensional non-negative orthogonal representation of lens blur kernels to cover diverse lenses; (iii) designing a decoupling network to decompose the input low-quality image into several components degenerated by above kernel bases, and applying corresponding pre-trained deconvolution networks to reverse the degeneration. Benefiting from the proper incorporation of lenses' physical properties and unique network design, the proposed method achieves superb imaging quality, wide applicability for various lenses, high running efficiency, and is totally free of kernel calibration. These advantages bring great potential for scenarios requiring lightweight high-quality photography." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 620, + 128, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 620, + 128, + 633 + ], + "spans": [ + { + "bbox": [ + 47, + 620, + 128, + 633 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": "High-quality photography is of crucial importance for both high-fidelity visual recording (e.g., filmmaking, sports video capturing) and sophisticated computer vision tasks (e.g., surveillance, auto-piloting). High-end camera systems often employ compound lenses comprising approximately ten or more components constructed from diverse materials to com" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 266, + 547, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 266, + 547, + 350 + ], + "spans": [ + { + "bbox": [ + 304, + 266, + 547, + 350 + ], + "type": "text", + "content": "pensate for geometric and photometric aberrations. Such complicated designs are proven to be effective in achieving nice image quality, but come with inherent drawbacks, including high costs, bulkiness, and fragility, making them unsuitable for scenarios with low payload capacity or limited budgets. Consequently, the demand for high-quality photography using lightweight lenses has significantly intensified." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "spans": [ + { + "bbox": [ + 304, + 351, + 547, + 435 + ], + "type": "text", + "content": "Considering the optical aberration in image formation and the afterward processing jointly, the workload in optical design can be shifted to the later stage [23, 28], where advanced reconstruction algorithms play a crucial role. One can also utilize the physical properties in the imaging setup to facilitate reconstruction, and researchers have made some primary efforts in this direction [32, 45]." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 437, + 548, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 548, + 688 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 548, + 688 + ], + "type": "text", + "content": "However, grand challenges lie in the variability of optical aberrations across the field of view and diverse lenses. On the one hand, the quality degeneration of a simple lens is intrinsically a convolution with non-uniform blur kernels, and the typical compensation algorithms [43] approximate the globally non-uniform deconvolution with patch-wise uniform deconvolutions, leading to a trade-off between precision and computational efficiency. This trade-off comes up with high inflexibility when adopting data-driven approaches [14, 41], which require learning a large number of models to achieve high-performance results. On the other hand, the degradation of different lenses varies significantly, so lens-specific algorithm development or parameter optimizations are required for high reconstruction performance. Furthermore, the calibration of the PSF kernels of camera lenses is quite expertise-demanding[27, 46], and blind compensation is more favorable for users. Overall, computational compensation for lens aberrations holds great promise for achieving lightweight high-quality imaging. However, there is a pressing demand for a general approach to handle spatially varying aberrations of diverse lenses in a blind manner." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 689, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 547, + 713 + ], + "type": "text", + "content": "In this paper, we propose a physics-informed end-to-end solution that (i) capitalizes the characteristics of lens aber-" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24861" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 67, + 157, + 157 + ], + "blocks": [ + { + "bbox": [ + 59, + 67, + 157, + 157 + ], + "lines": [ + { + "bbox": [ + 59, + 67, + 157, + 157 + ], + "spans": [ + { + "bbox": [ + 59, + 67, + 157, + 157 + ], + "type": "image", + "image_path": "43ac21dc81c98da0b467877caa52ff73559924c9d50ea82bc8a9cf817b333c3b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 59, + 159, + 157, + 251 + ], + "blocks": [ + { + "bbox": [ + 59, + 159, + 157, + 251 + ], + "lines": [ + { + "bbox": [ + 59, + 159, + 157, + 251 + ], + "spans": [ + { + "bbox": [ + 59, + 159, + 157, + 251 + ], + "type": "image", + "image_path": "81dc18bd5f117798e5669563274de6a1eb43eb5f441b4b3b0fba963ba1548e28.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 255, + 114, + 264 + ], + "lines": [ + { + "bbox": [ + 104, + 255, + 114, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 114, + 264 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 161, + 65, + 283, + 157 + ], + "blocks": [ + { + "bbox": [ + 161, + 65, + 283, + 157 + ], + "lines": [ + { + "bbox": [ + 161, + 65, + 283, + 157 + ], + "spans": [ + { + "bbox": [ + 161, + 65, + 283, + 157 + ], + "type": "image", + "image_path": "d24f4a8cae61e840e5f85b8cd3d16fae3c761af2fe607887c2c06097ccc366a4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 161, + 159, + 283, + 251 + ], + "blocks": [ + { + "bbox": [ + 161, + 159, + 283, + 251 + ], + "lines": [ + { + "bbox": [ + 161, + 159, + 283, + 251 + ], + "spans": [ + { + "bbox": [ + 161, + 159, + 283, + 251 + ], + "type": "image", + "image_path": "3179b86b6463bb4cfb6a71d472ff0b90f0af7c89ae9fe49c81833d5d893a39e1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 217, + 255, + 228, + 265 + ], + "lines": [ + { + "bbox": [ + 217, + 255, + 228, + 265 + ], + "spans": [ + { + "bbox": [ + 217, + 255, + 228, + 265 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 287, + 65, + 534, + 251 + ], + "blocks": [ + { + "bbox": [ + 287, + 65, + 534, + 251 + ], + "lines": [ + { + "bbox": [ + 287, + 65, + 534, + 251 + ], + "spans": [ + { + "bbox": [ + 287, + 65, + 534, + 251 + ], + "type": "image", + "image_path": "0927b0af848e60f9e7b9d4a2d4e73bec4f3d783b8bd8d5d859d1845256b191eb.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 405, + 255, + 415, + 264 + ], + "lines": [ + { + "bbox": [ + 405, + 255, + 415, + 264 + ], + "spans": [ + { + "bbox": [ + 405, + 255, + 415, + 264 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 269, + 547, + 314 + ], + "lines": [ + { + "bbox": [ + 46, + 269, + 547, + 314 + ], + "spans": [ + { + "bbox": [ + 46, + 269, + 547, + 314 + ], + "type": "text", + "content": "Figure 1. An illustrative example of our lens-aberration compensation approach. (a) A camera equipped with a simple lens of a large field of view but severe optical aberration (WXSJ-H65HD) and a small-sized unmanned aerial vehicle (UAV) carrying the camera for data capture. (b) The input degenerated image (upper) and our reconstruction result (lower). (c) The zoomed-in comparison on the highlighted region (white box) in (b), where the original recording is shown in the bottom left corner and the reconstructed result in the top right corner." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 327, + 289, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 327, + 289, + 506 + ], + "spans": [ + { + "bbox": [ + 46, + 327, + 289, + 506 + ], + "type": "text", + "content": "ration to construct a low-dimensional representation of nonuniform blur kernels of general camera lenses, and (ii) designs a deep neural network resolving the degeneration components in the low-quality input and ensembling a set of pretrained compensators to reverse the degeneration robustly. Specifically, we represent an arbitrary local point spread function (PSF) with a set of negative orthogonal bases, pretrain their corresponding deconvolution modules, and then retrieve their degeneration from the low-quality image captured by a low-end lens and apply the pre-trained inversion models accordingly. The proposed approach demonstrates high performance and holds high potential in lightweight photography on low-payload platforms, as shown by the impressive results captured with a small drone equipped with a compact surveillance camera in Fig. 1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 519, + 288, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 519, + 288, + 544 + ], + "spans": [ + { + "bbox": [ + 47, + 519, + 288, + 544 + ], + "type": "text", + "content": "In summary, we target for general, blind, and end-to-end lens aberration correction, and make the following contributions:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 556, + 288, + 711 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 47, + 556, + 288, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 556, + 288, + 590 + ], + "spans": [ + { + "bbox": [ + 47, + 556, + 288, + 590 + ], + "type": "text", + "content": "1) Proposes a unified framework for lens aberration compensation with high flexibility to diverse aberrations produced by the wide range of camera lenses." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 592, + 288, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 592, + 288, + 627 + ], + "spans": [ + { + "bbox": [ + 47, + 592, + 288, + 627 + ], + "type": "text", + "content": "2) Builds a general low-dimensional model of lens aberrations based on Orthogonal Non-negative Matrix Factorization, utilizing the physical properties of optical lenses." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 628, + 288, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 628, + 288, + 675 + ], + "spans": [ + { + "bbox": [ + 47, + 628, + 288, + 675 + ], + "type": "text", + "content": "3) Designs an end-to-end network to divide and conquer the optical aberrations in the above low-dimensional space, enabling fast and blind inversion of diverse lens degeneration and ultimately lightweight high-quality imaging." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 676, + 288, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 676, + 288, + 711 + ], + "spans": [ + { + "bbox": [ + 47, + 676, + 288, + 711 + ], + "type": "text", + "content": "4) Demonstrates performance comparable to state-of-the-art non-blind lens-specific algorithms, validating its great potential in budget-constrained or low-capacity platforms." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 325, + 392, + 337 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 325, + 392, + 337 + ], + "spans": [ + { + "bbox": [ + 306, + 325, + 392, + 337 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 342, + 547, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 342, + 547, + 426 + ], + "spans": [ + { + "bbox": [ + 305, + 342, + 547, + 426 + ], + "type": "text", + "content": "Lens aberration modeling. Generally, imaging lenses are physically rotational symmetric around the optical center, imparting rotation symmetry of the lens aberration [25, 37]. Ignoring the fabrication imperfections, almost all types of lens aberrations, such as spherical aberration, coma aberration, and chromatic aberration [1, 9], form a rotational symmetric pattern." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 427, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 427, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 427, + 547, + 715 + ], + "type": "text", + "content": "Utilizing these unique features of lens aberration, researchers have proposed some methods to simplify the degeneration by diverse lenses. For example, Rahbar et al. [29] adopt the Zernike model [24] to describe optical aberrations and can estimate the Zernike coefficients of a single channel through bicoherence and tricoherence estimation techniques, while Schuler et al. [32] represent the non-uniform aberrations with a set of orthonormal Efficient Filter Flow, which is applicable for most cases without large spherical aberration. Differently, Yue et al. [45] leverage the global rotational symmetry properties of regular lenses to transform non-uniform aberrations into uniform rings using radial splitting and warping techniques. This method capitalizes on the inherent physical properties of the imaging lens and largely simplifies the aberration model, offering inspiration for our approach to explore the unique structures of lens aberrations. However, their implementations use conventional optimization by alternatively kernel estimation and deblurring, which is time-consuming and the strong consumption of PSF uniformity within a stretched annular ring harms the performance slightly, which limits the applications demanding real-time and high-quality compensation. In contrast, we design a framework consisting of well-organized sub-networks to address all these issues decently." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "24862" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "type": "text", + "content": "Optical aberration removal. Lens aberration exists widely in optical imaging systems, and computational correction is an essential way to raise imaging quality without increasing hardware budget. One common way to model lens aberration is by convolving the image with spatially varying PSFs and compensation is naturally conducted via deconvolution [38, 44]. Existing correction methods can be broadly categorized into non-blind and blind ones." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 170, + 290, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 170, + 290, + 375 + ], + "spans": [ + { + "bbox": [ + 46, + 170, + 290, + 375 + ], + "type": "text", + "content": "Non-blind correction assumes known PSFs and algorithms are extensively studied [4, 22]. Researchers have proposed different algorithms to estimate kernel PSFs via, e.g., using a combination of defocused and focused images [2], a set of binary random patterns [10], aggregating degenerations in informative nature image patches [11], analyzing the spectral characteristics of the lens system [3, 19, 34]. Further, to cover PSFs of different camera lenses, Shih et al. [33] utilize interpolation methods by fitting a spatial Gaussian model. More recently, Li et al. [20] propose a data-driven deep learning approach explicitly taking the PSF as input and introducing lens-specific priors for high-quality compensation. In sum, non-blind lens aberration compensation techniques have shown promising performance, but they require expertise demanding PSF calibration or robust estimation, which are not friendly for non-expert users and require lens-specific model training." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 376, + 288, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 376, + 288, + 544 + ], + "spans": [ + { + "bbox": [ + 46, + 376, + 288, + 544 + ], + "type": "text", + "content": "Blind methods have gained significant attention due to their convenience for un-trained users and high flexibility to diverse lenses. The typical strategy is to estimate PSFs and conduct compensation sequentially. Among them Rahbar et al. [29] introduce Zernike moments, and Tang & Kutulakos [35] employ the Seidel model to simplify the lens aberration model; while Delbracio et al. [5] propose a robust algorithm based on empirical observations about the distribution of the gradient in clear natural images. Recently, some researchers have adopted the data-driven scheme and developed deep neural networks to compensate for the aberrations, but often focus on specific types of aberrations to ensure good convergence, such as radial lens distortion [31] and chromatic aberrations [7]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "content": "In light of these developments, the blind compensation techniques (either based on conventional optimization or deep neural networks) cannot achieve performance comparable to their non-blind counterpart. Besides, existing methods are incapable of handling diverse lenses and various types of aberrations flexibly [4, 8]. In contrast, the proposed work leverages the physical properties of camera lenses and casts the complex aberrations of diverse lenses into a unified low-dimensional space, in which we divide and conquer the degeneration via incorporating geometric priors and a small number of pre-trained modules. Benefiting from the proper use of lenses' physical properties and elegant network design, we can achieve performance comparable to non-blind techniques as well. Such a general end-to-end" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "content": "blind solution with superb performance holds great potential for high-quality lightweight imaging systems on portable devices or low-capacity mobile platforms." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 109, + 547, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 109, + 547, + 455 + ], + "spans": [ + { + "bbox": [ + 304, + 109, + 547, + 455 + ], + "type": "text", + "content": "Non-uniform deconvolution. Mathematically lens aberration compensation can be described as a non-uniform deconvolution process and shares the same formulation with other tasks such as camera shake, defocus, object motion, etc. Various techniques and approaches have been proposed to address the technical challenges posed by non-uniform blurring [39, 47]. There are three main ways to address the non-uniformity. The most intuitive and widely used way is to assume patch-wise uniform PSFs and conduct deconvolution patch by patch [36, 42]. The deconvolution can be implemented via conventional optimization previously and deep neural networks recently, and usually in a non-blind manner. There are a bunch of algorithms, and we do not list them here. The second solution is to transform the various PSFs into a low dimensional space and remove the blur along each dimension [7, 17, 32]. The third way is to adopt data-driven techniques and fed training data with varying PSFs for high generalization ability [48]. One can also make extensive use of the structure of the PSF patterns to circumvent the high complexity by introducing physical constraints, e.g., transform the spatially varying aberrations in an annular ring into uniform via warping [45], and decompose the spatially varying defocus blur into several uniform ones according to the scene depth [18, 49]. Differently, our approach focuses on lens aberration which is of different features from other degenerations and can utilize the unique properties of lenses for a better design, in a similar way to [45]. In addition, we are dedicated to an end-to-end solution working for diverse lenses without model training." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 464, + 545, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 464, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 305, + 464, + 545, + 491 + ], + "type": "text", + "content": "3. Physics-inspired Low Rank Lens Aberration Model" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 498, + 547, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 547, + 594 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 547, + 594 + ], + "type": "text", + "content": "There exist various lens aberrations, such as spherical aberration and astigmatism, resulting in various non-uniform quality degenerations. In addition, the aberration models of diverse lenses differ a lot. To provide a universal solution for different lenses, it is of crucial importance to reduce the dimension of the PSFs and provide a unified representation, based on which one can design a low-rank model to address the aberrations in a divide-and-conquer manner." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 594, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 547, + 715 + ], + "type": "text", + "content": "Considering the rotational-symmetry of optical lenses [15, 16, 40], we divide the lens's field of view into several concentric rings and warp them into rectangular stripes, each of which is of approximately uniform PSFs. This operation can largely decrease the dimension of the lens aberrations [13, 21]. Further, we crop these stripes into patches and apply ONMF to find a set of orthogonal and nonnegative bases to cover the space of the lens PSFs. If learned from a large set of lenses, the bases can represent an arbitrary PSF with high accuracy. The orthogonality can avoid ambiguity" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24863" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 71, + 276, + 275 + ], + "blocks": [ + { + "bbox": [ + 62, + 71, + 276, + 275 + ], + "lines": [ + { + "bbox": [ + 62, + 71, + 276, + 275 + ], + "spans": [ + { + "bbox": [ + 62, + 71, + 276, + 275 + ], + "type": "image", + "image_path": "f9fcfbe181704b0b897fbf0b1141c1eb10eb8a89c1ce6de54eaf3ddd60955158.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 278, + 289, + 355 + ], + "lines": [ + { + "bbox": [ + 46, + 278, + 289, + 355 + ], + "spans": [ + { + "bbox": [ + 46, + 278, + 289, + 355 + ], + "type": "text", + "content": "Figure 2. Illustration of the ONMF-based low-rank lens aberration model. (a) PSF dataset of 20 low-end lenses simulated by Zemax. (b) The PSF lattice consisting of 1840 PSF kernels, generated by dividing the images by each lens into 5 concentric rings, warping into stripes, and further cropping into " + }, + { + "bbox": [ + 46, + 278, + 289, + 355 + ], + "type": "inline_equation", + "content": "13 \\times 13" + }, + { + "bbox": [ + 46, + 278, + 289, + 355 + ], + "type": "text", + "content": "-pixel patches. (c) 9 top PSF bases obtained by applying Orthogonal Nonnegative Matrix Factorization onto (b)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 362, + 288, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 362, + 288, + 397 + ], + "spans": [ + { + "bbox": [ + 46, + 362, + 288, + 397 + ], + "type": "text", + "content": "during decomposing the complex aberrations and the nonnegativity facilitates compensating aberrations in the same way as handling conventional PSFs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 399, + 288, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 399, + 288, + 506 + ], + "spans": [ + { + "bbox": [ + 46, + 399, + 288, + 506 + ], + "type": "text", + "content": "To this end, we first collect 20 representative low-end lenses from ZEBASE database and construct a high-dimensional matrix encompassing all their PSFs after annular division (5 rings), ring-to-rectangle warping and cropping. The matrix is then factorized using the ONMF model [30] and yield a set of principal bases. The workflow detailing this procedure as visualized in Fig. 2. So far, we have arrived at a low dimensional representation of the lens aberrations, which can cover the image degeneration of diverse lenses." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 514, + 287, + 541 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 514, + 287, + 541 + ], + "spans": [ + { + "bbox": [ + 47, + 514, + 287, + 541 + ], + "type": "text", + "content": "4. Universal Framework for Lens Aberration Correction" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 545, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 289, + 713 + ], + "type": "text", + "content": "The above idea of building a low-rank model to compensate the lens aberration in a divide-and-conquer manner is non-trivial for several reasons: the decomposition of aberrations in a blurry input is highly imposed; the deconvolutions need to be conducted on the components which are of different intensity ranges with general natural images; the fusion should also tolerate the imperfections in both decomposition and deconvolution. To overcome these hurdles, we propose to design a jointly trained low-rank deep network that enables flexible optical aberration correction. Specifically, our network comprises three main modules trained in an end-to-end manner. The first module conducts ONMF-based decomposition, followed by several identical adaptive deconvolution modules. Finally, we incorporate a synthetic" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 547, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 109 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 109 + ], + "type": "text", + "content": "network module to further enhance the correction result. Fig. 3 summarizes the workflow of the whole network and reports the architecture of the key modules." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 115, + 545, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 115, + 545, + 128 + ], + "spans": [ + { + "bbox": [ + 305, + 115, + 545, + 128 + ], + "type": "text", + "content": "4.1. Decomposing Aberrations Attributed to PSF Bases" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 133, + 547, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 133, + 547, + 170 + ], + "spans": [ + { + "bbox": [ + 304, + 133, + 547, + 170 + ], + "type": "text", + "content": "As aforementioned, after applying matrix decomposition to the PSF library, we get a basis set " + }, + { + "bbox": [ + 304, + 133, + 547, + 170 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{B}_i\\}" + }, + { + "bbox": [ + 304, + 133, + 547, + 170 + ], + "type": "text", + "content": ", which can compose an arbitrary PSF " + }, + { + "bbox": [ + 304, + 133, + 547, + 170 + ], + "type": "inline_equation", + "content": "\\mathbf{k}" + }, + { + "bbox": [ + 304, + 133, + 547, + 170 + ], + "type": "text", + "content": " with corresponding coefficients " + }, + { + "bbox": [ + 304, + 133, + 547, + 170 + ], + "type": "inline_equation", + "content": "\\{\\alpha_i\\}" + }, + { + "bbox": [ + 304, + 133, + 547, + 170 + ], + "type": "text", + "content": ", i.e.," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 390, + 177, + 546, + 202 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 390, + 177, + 546, + 202 + ], + "spans": [ + { + "bbox": [ + 390, + 177, + 546, + 202 + ], + "type": "interline_equation", + "content": "\\mathbf {k} = \\sum_ {i} \\alpha_ {i} \\cdot \\mathbf {B} _ {i}. \\tag {1}", + "image_path": "345f36cc9732893f6d0e2bc18ef3ea1db7ca99733ffaee13633a177d31ad8bd4.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 210, + 544, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 210, + 544, + 222 + ], + "spans": [ + { + "bbox": [ + 306, + 210, + 544, + 222 + ], + "type": "text", + "content": "Hence, a recorded degraded patch " + }, + { + "bbox": [ + 306, + 210, + 544, + 222 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}" + }, + { + "bbox": [ + 306, + 210, + 544, + 222 + ], + "type": "text", + "content": " can be represented as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 310, + 229, + 546, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 229, + 546, + 255 + ], + "spans": [ + { + "bbox": [ + 310, + 229, + 546, + 255 + ], + "type": "interline_equation", + "content": "\\mathbf {Y} = \\mathbf {X} \\otimes \\left(\\sum_ {i} \\alpha_ {i} \\cdot \\mathbf {B} _ {i}\\right) + \\mathbf {n} = \\sum_ {i} \\left(\\alpha_ {i} \\cdot \\mathbf {X}\\right) \\otimes \\mathbf {B} _ {i} + \\mathbf {n} \\tag {2}", + "image_path": "8f5ead0fd23140b62b683e6ae43ceabf47dc40e33f4aee13b2129641bcaf4a46.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 262, + 545, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 262, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 304, + 262, + 545, + 285 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 262, + 545, + 285 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 304, + 262, + 545, + 285 + ], + "type": "text", + "content": " is the latent sharp image, " + }, + { + "bbox": [ + 304, + 262, + 545, + 285 + ], + "type": "inline_equation", + "content": "\\mathbf{n}" + }, + { + "bbox": [ + 304, + 262, + 545, + 285 + ], + "type": "text", + "content": " is the noise and " + }, + { + "bbox": [ + 304, + 262, + 545, + 285 + ], + "type": "inline_equation", + "content": "\\otimes" + }, + { + "bbox": [ + 304, + 262, + 545, + 285 + ], + "type": "text", + "content": " denotes 2D convolution." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "spans": [ + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "type": "text", + "content": "As Fig. 3 shows, suppose we can decompose the blurry " + }, + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "type": "text", + "content": " into components " + }, + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{Y}_i = (\\alpha_i \\cdot \\mathbf{X}) \\otimes \\mathbf{B}_i\\}" + }, + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "type": "text", + "content": ", we can pretrain deconvolution models to compensate the aberrations caused by " + }, + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{B}_i\\}" + }, + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "type": "text", + "content": " and estimate " + }, + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "type": "text", + "content": " by simply estimating the scaling factor " + }, + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "type": "inline_equation", + "content": "(\\alpha_i)" + }, + { + "bbox": [ + 304, + 286, + 547, + 429 + ], + "type": "text", + "content": ". The decomposition is implemented with a deep neural network built on the U-net structure. Here we replace the plain convolution in original U-net with residual blocks for better convergence. Notice that the network is fully convolutional and can be applied to images with an arbitrary size. In our experiment, we divide the acquired image into equidistant annual rings with different widths based on the lens's physical properties." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 429, + 545, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 429, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 305, + 429, + 545, + 453 + ], + "type": "text", + "content": "In order to ensure the decomposition performance of the network, we define the following loss function" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 334, + 460, + 546, + 487 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 460, + 546, + 487 + ], + "spans": [ + { + "bbox": [ + 334, + 460, + 546, + 487 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {d e c o m}} = \\left\\| \\mathbf {Y} - \\sum_ {i} \\mathbf {Y} _ {i} \\right\\| _ {2} + \\sum_ {i} \\left(\\mathbf {Y} _ {i} - \\tilde {\\mathbf {Y}} _ {i}\\right) \\tag {3}", + "image_path": "fab45c2aa53961b253a48a4ec8e82d05134ab9ea7c8a77339b3b9d4a30d8cd68.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 495, + 547, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 495, + 547, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 495, + 547, + 581 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 304, + 495, + 547, + 581 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_i" + }, + { + "bbox": [ + 304, + 495, + 547, + 581 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 495, + 547, + 581 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{Y}}_i" + }, + { + "bbox": [ + 304, + 495, + 547, + 581 + ], + "type": "text", + "content": " denoting the decomposed blurry components and the generated version by Zemax software, equivalent to " + }, + { + "bbox": [ + 304, + 495, + 547, + 581 + ], + "type": "inline_equation", + "content": "\\mathbf{X} \\otimes (\\alpha_i \\cdot \\mathbf{B}_i)" + }, + { + "bbox": [ + 304, + 495, + 547, + 581 + ], + "type": "text", + "content": ". Here the first term forces the summation of the decomposed components consistent with the input, and the second term ensures that each retrieved component is equal to the convolution of the sharp image " + }, + { + "bbox": [ + 304, + 495, + 547, + 581 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 304, + 495, + 547, + 581 + ], + "type": "text", + "content": " with the corresponding PSF basis." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 588, + 537, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 588, + 537, + 600 + ], + "spans": [ + { + "bbox": [ + 305, + 588, + 537, + 600 + ], + "type": "text", + "content": "4.2. Adaptive Feature-Domain Wiener Deconvolution" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": "The ONMF-based decomposition module can extract the blurry components caused by the corresponding PSF bases, so we design matching compensation modules and embed them into our joint framework. However, both the range and pattern of the intensities in the decomposed components differ from natural images, and deconvolving these components in the spatial domain is prone to ringing artifacts and over-smoothness. A recent study [6] demonstrates that high-quality image reconstruction can be achieved by performing" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "24864" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 64, + 532, + 293 + ], + "blocks": [ + { + "bbox": [ + 60, + 64, + 532, + 293 + ], + "lines": [ + { + "bbox": [ + 60, + 64, + 532, + 293 + ], + "spans": [ + { + "bbox": [ + 60, + 64, + 532, + 293 + ], + "type": "image", + "image_path": "3b03efbb0403e509535362c964a301a50b462717eee517107f2abb3984017273.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 301, + 548, + 388 + ], + "lines": [ + { + "bbox": [ + 46, + 301, + 548, + 388 + ], + "spans": [ + { + "bbox": [ + 46, + 301, + 548, + 388 + ], + "type": "text", + "content": "Figure 3. The framework of the proposed approach. The whole pipeline comprises preprocessing to build the low-rank aberration model and a divide-and-conquer compensation. The preprocessing involves annular partitioning of images and corresponding lens PSFs, ring-to-rectangle warping, and learning a low-dimensional representation of the PSFs. The compensation consists of three primary modules: (i) Decomposition, a neural network based on Orthogonal Non-Negative Matrix Factorization (ONMF), decomposing the blurry image into components corresponding to the representation bases of lens aberration; (ii) Deconvolution, implemented as a cascaded encoder-decoder network to map the decomposed components into the feature domain and conducts pre-trained Wiener deconvolution sequentially; (iii) Fusion, which aggregates the multiple scaled versions of the latent sharp image from the previous deconvolution modules to get the final output. The whole network is trained in an end-to-end manner." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 403, + 287, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 403, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 46, + 403, + 287, + 437 + ], + "type": "text", + "content": "deconvolution in the feature domain. Thus, we propose an adaptive feature-domain Wiener deconvolution module to recover the lost high frequencies better." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 437, + 288, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 437, + 288, + 462 + ], + "spans": [ + { + "bbox": [ + 47, + 437, + 288, + 462 + ], + "type": "text", + "content": "Specifically, for a specific patch " + }, + { + "bbox": [ + 47, + 437, + 288, + 462 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_i = \\alpha_i\\mathbf{X}" + }, + { + "bbox": [ + 47, + 437, + 288, + 462 + ], + "type": "text", + "content": ", we reconstruct it from the " + }, + { + "bbox": [ + 47, + 437, + 288, + 462 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 437, + 288, + 462 + ], + "type": "text", + "content": "-th blurry decomposed component " + }, + { + "bbox": [ + 47, + 437, + 288, + 462 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_i" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 97, + 467, + 287, + 480 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 467, + 287, + 480 + ], + "spans": [ + { + "bbox": [ + 97, + 467, + 287, + 480 + ], + "type": "interline_equation", + "content": "\\mathbf {X} _ {i} ^ {*} = \\arg \\min \\left\\| \\mathbf {Y} _ {i} - \\mathbf {X} _ {i} \\otimes \\mathbf {B} _ {i} \\right\\|, \\tag {4}", + "image_path": "6087e4b3f62874cdf6c7ff9748d61bc5b9f5c7c63948d40534f70e71587ea34f.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_i" + }, + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_i" + }, + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "type": "text", + "content": " respectively represent the sharp and blurry image components matching the " + }, + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "type": "text", + "content": "-th PSF basis. In implementation, we build a feature-based Wiener adaptive deconvolution network. We denote " + }, + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "type": "inline_equation", + "content": "f_i" + }, + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "type": "text", + "content": " as a set of learnable linear filters and convolve " + }, + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_i" + }, + { + "bbox": [ + 46, + 486, + 287, + 581 + ], + "type": "text", + "content": " with them to extract useful features and obtain the relationship among the blurry input, PSF, and the high-quality output in the feature domain. According to the properties of convolution, Eq. 2 turns into" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 103, + 587, + 287, + 600 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 587, + 287, + 600 + ], + "spans": [ + { + "bbox": [ + 103, + 587, + 287, + 600 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {i} \\mathbf {Y} _ {i} = \\mathbf {F} _ {i} \\left(\\mathbf {X} _ {i} \\otimes \\mathbf {B} _ {i}\\right) + \\mathbf {F} _ {i} \\mathbf {n}, \\tag {5}", + "image_path": "503beb20527b30ea87226e6356f6ffd25d890f79906520e55383e130b6a141b2.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 605, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 665 + ], + "type": "text", + "content": "where multiplying with " + }, + { + "bbox": [ + 46, + 605, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_i" + }, + { + "bbox": [ + 46, + 605, + 287, + 665 + ], + "type": "text", + "content": " is equivalent to convolving with " + }, + { + "bbox": [ + 46, + 605, + 287, + 665 + ], + "type": "inline_equation", + "content": "f_{i}" + }, + { + "bbox": [ + 46, + 605, + 287, + 665 + ], + "type": "text", + "content": ". Correspondingly, the above optimization in Eq. 4 is equivalent to finding a set of feature-based Wiener deconvolution operators " + }, + { + "bbox": [ + 46, + 605, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{G}_i" + }, + { + "bbox": [ + 46, + 605, + 287, + 665 + ], + "type": "text", + "content": " (which can be obtained based on the conclusion in [6]) to reverse the aberration by " + }, + { + "bbox": [ + 46, + 605, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_i" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 93, + 671, + 287, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 671, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 93, + 671, + 287, + 685 + ], + "type": "interline_equation", + "content": "\\mathbf {X} _ {i} ^ {*} = \\arg \\min \\left\\| \\mathbf {G} _ {i} \\mathbf {F} _ {i} \\mathbf {Y} _ {i} - \\mathbf {F} _ {i} \\mathbf {X} _ {i} \\right\\|. \\tag {6}", + "image_path": "346743162bbeca36a5f323a66b08220184465650742882ce984e0955816eccc1.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 689, + 288, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 288, + 712 + ], + "type": "text", + "content": "The compensation is implemented as a deep neural network with the network structure and learned in a data-driven" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 403, + 545, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 403, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 305, + 403, + 545, + 425 + ], + "type": "text", + "content": "manner. During training the loss function is designed in an intuitive manner:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 359, + 436, + 545, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 436, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 359, + 436, + 545, + 449 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {d e c o n v}} = \\left\\| \\mathbf {F} _ {i} \\mathbf {X} _ {i} - \\mathbf {G} _ {i} \\mathbf {F} _ {i} \\mathbf {Y} _ {i} \\right\\| _ {2}. \\tag {7}", + "image_path": "a597cc4d0f918d675129c324f825134a5dc971513b69cbe0ce4cd38976da1a4c.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 459, + 547, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 459, + 547, + 495 + ], + "spans": [ + { + "bbox": [ + 305, + 459, + 547, + 495 + ], + "type": "text", + "content": "The network is designed to share the parameters across all scales except for the first encoder block at the first cascade, which helps achieve fast and high-quality deconvolution." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 504, + 428, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 504, + 428, + 514 + ], + "spans": [ + { + "bbox": [ + 306, + 504, + 428, + 514 + ], + "type": "text", + "content": "4.3. Attention-based Fusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 521, + 547, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 547, + 652 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 547, + 652 + ], + "type": "text", + "content": "According to Eq. 2, each deconvolution model can provide an estimation of the latent high-quality image, multiplied by a scaling factor. However, the potential inaccuracy in decomposition and artifacts in deconvolution would harm the quality of the final output. To overcome this challenge, we propose an effective strategy to streamline the reconstruction process. Specifically, we decompose more components from the blurry input and secondly apply the corresponding deconvolution to obtain multiple aberration-compensated versions, then fuse them together via a weight-trainable fusion network to raise robustness." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 653, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 547, + 712 + ], + "type": "text", + "content": "The above strategy introduces a substantial increase in running time. To accelerate the training of the deconvolution modules, we adopt a coarse-to-fine strategy, i.e., training a base model and subsequently fine-tuning it, which is largely faster than training all these basis-specific networks from" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24865" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 144 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 144 + ], + "type": "text", + "content": "scratch. Moreover, our investigation reveals that the errors in the decomposition module can harm the successive deconvolution. Consequently, we introduce the decomposition confidence to serve as a valuable indicator of the decomposition accuracy/reliability of the decomposition process, and use it to guide the fusion." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 154, + 128, + 167 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 154, + 128, + 167 + ], + "spans": [ + { + "bbox": [ + 47, + 154, + 128, + 167 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 173, + 288, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 173, + 288, + 245 + ], + "spans": [ + { + "bbox": [ + 46, + 173, + 288, + 245 + ], + "type": "text", + "content": "In this section, after describing the details of model training in Sec. 5.1, we first analyze the advantages of specific designs and key parameter settings in our approach (Subsection 5.2). Then, we demonstrate our superior performance against the state-of-the-art (SOTA) algorithms on synthetic (Subsection 5.3) and real data (Subsection 5.4)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 251, + 180, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 180, + 264 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 180, + 264 + ], + "type": "text", + "content": "5.1. Implementation Details" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 270, + 287, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 270, + 287, + 402 + ], + "spans": [ + { + "bbox": [ + 46, + 270, + 287, + 402 + ], + "type": "text", + "content": "For model training, we gather 300 images from the Flickr2K dataset for model training, ensuring wide applicability for diverse natural scenes. Specifically, we select 20 common commercial lenses and simulate their spatially varying PSF using Zemax software. Then we simulate the lens aberration via convolving 100 high-definition (2K) images from Flickr2K with the generated PSF and apply the successive operations to train the model, including annular decomposition, ring-to-rectangle warping, patch cropping, etc. In total, we actually obtained the sharp-blurry pair and PSF of 9200 " + }, + { + "bbox": [ + 46, + 270, + 287, + 402 + ], + "type": "inline_equation", + "content": "(100 \\times 92)" + }, + { + "bbox": [ + 46, + 270, + 287, + 402 + ], + "type": "text", + "content": " patches." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 402, + 288, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 402, + 288, + 450 + ], + "spans": [ + { + "bbox": [ + 46, + 402, + 288, + 450 + ], + "type": "text", + "content": "During model training, we adopt the Adam optimizer with default parameters. The learning rate is initialized as " + }, + { + "bbox": [ + 46, + 402, + 288, + 450 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 46, + 402, + 288, + 450 + ], + "type": "text", + "content": ", which is halved every 100 epochs. PyTorch code and trained models are available on our project page." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 456, + 265, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 456, + 265, + 468 + ], + "spans": [ + { + "bbox": [ + 47, + 456, + 265, + 468 + ], + "type": "text", + "content": "5.2. Influences of the Key Parameters/Settings" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 474, + 287, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 287, + 629 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 287, + 629 + ], + "type": "text", + "content": "Partitioning strategies. By capitalizing the rotational symmetry of camera lenses, we employ annular partitioning to divide the image into a sequence of concentric rings, where patches in each ring share a highly similar PSF after warping into a stripe. This strategy substantially reduces the spatial variance among PSFs across the field of view, consequently reducing the required number of bases for accurate PSF representation. The illustration and performance of the proposed partitioning method are depicted in Fig. 4(a), whereas Fig. 4(b) shows the counterpart of conventional grid partitioning. From the middle row, one can notice the highly consistent PSFs in the same annular ring in (a), in contrast to the large PSF difference among the patches in (b)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 630, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 288, + 713 + ], + "type": "text", + "content": "We also compare the final performance of two partitioning strategies on the synthetic dataset. Remarkably, when utilizing an equal number of bases (9 bases), the annular splitting reaches an impressive PSF representation accuracy of 0.93 and performs 30.16dB in the final reconstruction. In contrast, the conventional grid splitting yields a largely lower accuracy of 0.69 and the reconstruction achieves 27.77dB." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 325, + 72, + 420, + 160 + ], + "blocks": [ + { + "bbox": [ + 325, + 72, + 420, + 160 + ], + "lines": [ + { + "bbox": [ + 325, + 72, + 420, + 160 + ], + "spans": [ + { + "bbox": [ + 325, + 72, + 420, + 160 + ], + "type": "image", + "image_path": "81400036ee7dbdb5b2ce731e3e49a7d2d14cf2566ab21938cd71632068a3685e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 325, + 161, + 419, + 266 + ], + "blocks": [ + { + "bbox": [ + 325, + 161, + 419, + 266 + ], + "lines": [ + { + "bbox": [ + 325, + 161, + 419, + 266 + ], + "spans": [ + { + "bbox": [ + 325, + 161, + 419, + 266 + ], + "type": "image", + "image_path": "ddbc00530e18d1cd5c96f869264a5d82d8ffaa902af0393b908dc6f331a09829.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 368, + 266, + 377, + 274 + ], + "lines": [ + { + "bbox": [ + 368, + 266, + 377, + 274 + ], + "spans": [ + { + "bbox": [ + 368, + 266, + 377, + 274 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 432, + 72, + 526, + 160 + ], + "blocks": [ + { + "bbox": [ + 432, + 72, + 526, + 160 + ], + "lines": [ + { + "bbox": [ + 432, + 72, + 526, + 160 + ], + "spans": [ + { + "bbox": [ + 432, + 72, + 526, + 160 + ], + "type": "image", + "image_path": "3986b1a085b09317ce490ba9127143a6356dfa996265bf44074b250c4ec0c56b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 432, + 161, + 525, + 266 + ], + "blocks": [ + { + "bbox": [ + 432, + 161, + 525, + 266 + ], + "lines": [ + { + "bbox": [ + 432, + 161, + 525, + 266 + ], + "spans": [ + { + "bbox": [ + 432, + 161, + 525, + 266 + ], + "type": "image", + "image_path": "e628742b9c634d29f8176adc8ffe888c706307c17a6ac2a65ed6b3b03cf9dffd.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 475, + 266, + 484, + 274 + ], + "lines": [ + { + "bbox": [ + 475, + 266, + 484, + 274 + ], + "spans": [ + { + "bbox": [ + 475, + 266, + 484, + 274 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 281, + 547, + 347 + ], + "lines": [ + { + "bbox": [ + 305, + 281, + 547, + 347 + ], + "spans": [ + { + "bbox": [ + 305, + 281, + 547, + 347 + ], + "type": "text", + "content": "Figure 4. Comparison between the annular partitioning in our model (a) and conventional grid partitioning (b). Top row: the illustration of partitioning. Middle row: the PSFs within the highlighted regions in the top row, with the left one stretched to a rectangle. Bottom row: the aberration compensation results with the same number of bases (9 in our experiment)." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 358, + 547, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 358, + 547, + 395 + ], + "spans": [ + { + "bbox": [ + 305, + 358, + 547, + 395 + ], + "type": "text", + "content": "Visually, we show an example in the 3rd row of Fig. 4, which shows that annular partitioning exhibits noticeable advantageous enhancement." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 398, + 547, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 398, + 547, + 578 + ], + "spans": [ + { + "bbox": [ + 304, + 398, + 547, + 578 + ], + "type": "text", + "content": "The number of annular rings. From the previous experiment, we came to the conclusion that annular partitioning is a better option. Further, we study the proper setting of the number of concentric rings, which will directly affect the amount of calculation and the precision of PSF representation. Although increasing the number of split rings can handle the radical difference of lens aberration better, it also brings higher computational complexity. Hence, we traverse several levels of radical division, i.e., the number of annular rings to find a good balance between precision and efficiency. The results are shown in Table 1, which shows that for usual commercial lenses, the performance improvement becomes marginal when the number grows beyond five. Therefore, we compromised and chose to split into 5 rings in our experiments." + } + ] + } + ], + "index": 17 + }, + { + "type": "table", + "bbox": [ + 309, + 598, + 542, + 647 + ], + "blocks": [ + { + "bbox": [ + 325, + 584, + 525, + 596 + ], + "lines": [ + { + "bbox": [ + 325, + 584, + 525, + 596 + ], + "spans": [ + { + "bbox": [ + 325, + 584, + 525, + 596 + ], + "type": "text", + "content": "Table 1. The performance at varying numbers of rings." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 598, + 542, + 647 + ], + "lines": [ + { + "bbox": [ + 309, + 598, + 542, + 647 + ], + "spans": [ + { + "bbox": [ + 309, + 598, + 542, + 647 + ], + "type": "table", + "html": "
# of RingsPatch Size (pixels)Data VolumePSNR (dB)SSIM
3214×2147529.120.914
5128×1289230.960.952
792 × 9213230.990.953
972 × 7216030.910.950
", + "image_path": "a35e3b5447959db413d7f4f0106bdc0769a4fc7cd034a9942028aa2c68016384.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 653, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 653, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 653, + 547, + 713 + ], + "type": "text", + "content": "The number of bases. We applied ONMF to obtain a low dimensional representation of the lens aberration, i.e., PSF. Intuitively, more bases provide a higher PSF representation accuracy that helps the reconstruction but tends to increase the ill-posedness of the decomposition network and the num" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "24866" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "content": "ber of required deconvolution modules, harming the final performance as well as the running efficiency on the contrary. Therefore, pursuing the optimal number of bases is of crucial importance. As Tab. 2 shows, we test the performance with different numbers of bases and obtain a good balance at 9 bases, with high quality and low computational complexity." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 167, + 284, + 216 + ], + "blocks": [ + { + "bbox": [ + 55, + 152, + 277, + 164 + ], + "lines": [ + { + "bbox": [ + 55, + 152, + 277, + 164 + ], + "spans": [ + { + "bbox": [ + 55, + 152, + 277, + 164 + ], + "type": "text", + "content": "Table 2. The performances using different numbers of bases." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 167, + 284, + 216 + ], + "lines": [ + { + "bbox": [ + 50, + 167, + 284, + 216 + ], + "spans": [ + { + "bbox": [ + 50, + 167, + 284, + 216 + ], + "type": "table", + "html": "
# of bases5791113
SSIM0.9010.9270.9520.9550.955
PSNR (dB)28.1429.1230.9631.0131.00
Training time~9hrs~11hrs~12hrs~16hrs~18hrs
", + "image_path": "388eb160e39ca86001e7d68af41165c66441ed73c05cac7221cac146b7630555.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 224, + 289, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 224, + 289, + 331 + ], + "spans": [ + { + "bbox": [ + 46, + 224, + 289, + 331 + ], + "type": "text", + "content": "The applicability to various lenses. After determining the optimal number of bases, we test on 5 low-end commercial lenses (not included in the lens dataset) to verify the generalization ability of our model. By calculating the representation accuracy of the new lenses and the final lens correction result in Fig. 5, we arrive at two conclusions: the reconstruction quality is directly proportional to the accuracy of PSF representation; our approach is widely applicable for diverse lenses." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 67, + 342, + 267, + 438 + ], + "blocks": [ + { + "bbox": [ + 67, + 342, + 267, + 438 + ], + "lines": [ + { + "bbox": [ + 67, + 342, + 267, + 438 + ], + "spans": [ + { + "bbox": [ + 67, + 342, + 267, + 438 + ], + "type": "image", + "image_path": "c9c709edc2c6fe61cccc7bf127461f8e55cdf1c329774a8ad07594ab13f47106.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 69, + 438, + 141, + 559 + ], + "blocks": [ + { + "bbox": [ + 69, + 438, + 141, + 559 + ], + "lines": [ + { + "bbox": [ + 69, + 438, + 141, + 559 + ], + "spans": [ + { + "bbox": [ + 69, + 438, + 141, + 559 + ], + "type": "image", + "image_path": "0465a978f5c2a83828c257ea5e2b605db2a461b9406ae64a71122d4b6931d048.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 569, + 289, + 658 + ], + "lines": [ + { + "bbox": [ + 46, + 569, + 289, + 658 + ], + "spans": [ + { + "bbox": [ + 46, + 569, + 289, + 658 + ], + "type": "text", + "content": "Figure 5. Our performance on diverse lenses. (a) The representation precision (horizontal axis) and their performance (vertical axis) on 5 test lenses, in terms of PSNR. Here the scattered box plot is drawn from the compensation results of 20 test images synthesized from the Flickr2K dataset. (b) PSF (left column) and our compensation result (right column, with the bottom-left and top-right insets being input and output respectively) of two lenses with the best and worst performance in (a)." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 149, + 438, + 205, + 559 + ], + "blocks": [ + { + "bbox": [ + 149, + 438, + 205, + 559 + ], + "lines": [ + { + "bbox": [ + 149, + 438, + 205, + 559 + ], + "spans": [ + { + "bbox": [ + 149, + 438, + 205, + 559 + ], + "type": "image", + "image_path": "bd684930377f643a8041092a9461b64c9ffe6e96317958fd4f0f2642dde83ada.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 209, + 438, + 266, + 559 + ], + "blocks": [ + { + "bbox": [ + 209, + 438, + 266, + 559 + ], + "lines": [ + { + "bbox": [ + 209, + 438, + 266, + 559 + ], + "spans": [ + { + "bbox": [ + 209, + 438, + 266, + 559 + ], + "type": "image", + "image_path": "bc7811f851e1ddea4467bfaad220ebb13ed6a28ad75be2bdc211de9b3efe7d17.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 665, + 290, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 290, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 290, + 715 + ], + "type": "text", + "content": "Balancing both effectiveness and efficiency, we empirically divide the original low-quality images " + }, + { + "bbox": [ + 46, + 665, + 290, + 715 + ], + "type": "inline_equation", + "content": "(1280\\times 1280" + }, + { + "bbox": [ + 46, + 665, + 290, + 715 + ], + "type": "text", + "content": " pixels) into 5 concentric rings and select 9 bases (preserving an impressive " + }, + { + "bbox": [ + 46, + 665, + 290, + 715 + ], + "type": "inline_equation", + "content": "96.6\\%" + }, + { + "bbox": [ + 46, + 665, + 290, + 715 + ], + "type": "text", + "content": " of the PSF variation in our lens" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 546, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 546, + 97 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 546, + 97 + ], + "type": "text", + "content": "database) in the final implementations of our model, based on the above quantitative experiment results." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 103, + 451, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 103, + 451, + 117 + ], + "spans": [ + { + "bbox": [ + 305, + 103, + 451, + 117 + ], + "type": "text", + "content": "5.3. Performance Comparison" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 121, + 547, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 121, + 547, + 312 + ], + "spans": [ + { + "bbox": [ + 304, + 121, + 547, + 312 + ], + "type": "text", + "content": "We compare our approach with SOTA blind (DeblurGANv2 [17], MPRNet [26]) and non-blind (DPIR [12], DWDN [6]) deblurring methods, including both optimization-based and CNN-based algorithms. The results are shown in Tab. 3 and Fig. 6, from which one can observe the following trends: (i) Blind deblurring algorithms generally exhibit good performance in addressing motion blur problems, but in terms of lens aberration correction, our approach performs better than SOTAs and yields notably clearer images with finer details and fewer artifacts. (ii) By employing our low-rank PSF learning model, the PSF can be efficiently characterized, facilitating blind deconvolution to attain performance on par with or even superior non-blind algorithms. Overall, we achieve blind lens aberration correction, surpassing the SOTA blind deblurring methods, and perform on par with non-blind approaches." + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 308, + 328, + 544, + 372 + ], + "blocks": [ + { + "bbox": [ + 317, + 315, + 534, + 326 + ], + "lines": [ + { + "bbox": [ + 317, + 315, + 534, + 326 + ], + "spans": [ + { + "bbox": [ + 317, + 315, + 534, + 326 + ], + "type": "text", + "content": "Table 3. Quantitative performance comparison with SOTAs" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 328, + 544, + 372 + ], + "lines": [ + { + "bbox": [ + 308, + 328, + 544, + 372 + ], + "spans": [ + { + "bbox": [ + 308, + 328, + 544, + 372 + ], + "type": "table", + "html": "
DeblurGANv2MPRNetEboli'sDPIRDWDNOurs
Blind?××
PSNR (dB)23.0428.6729.4231.8631.7830.96
SSIM0.7260.9270.9340.9620.9600.952
", + "image_path": "858866131bbebe40f1ab5a4987c6ea7c2b014047bd04b93161882e98f386b68f.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 383, + 410, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 383, + 410, + 396 + ], + "spans": [ + { + "bbox": [ + 305, + 383, + 410, + 396 + ], + "type": "text", + "content": "5.4. Real experiments" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 396, + 547, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 396, + 547, + 552 + ], + "spans": [ + { + "bbox": [ + 304, + 396, + 547, + 552 + ], + "type": "text", + "content": "To test the performance on real data, we use several low-end compact commercial cameras composed of a simple lens to capture low-quality images and computationally raise their visual quality with our model. Besides the results in Fig. 1, we show two more examples in Fig. 7, with the photo of the cameras in the left column, blurry input in the 2nd column, and the final reconstructed high-quality outputs in the rightmost column. We also show the side-by-side zoomed-in comparison of highlighted regions in the 3rd column for better visualization. One can see that the details in both the center and the periphery are recovered decently. Also, the consistently improved quality validates the wide applicability of the proposed approach to various lenses." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 562, + 379, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 562, + 379, + 574 + ], + "spans": [ + { + "bbox": [ + 305, + 562, + 379, + 574 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 582, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 548, + 715 + ], + "type": "text", + "content": "We have reported a versatile scheme capable of compensating lens aberrations of various lenses in an end-to-end manner and without model retraining or refinement. The universality of our approach stems from two key designs: Firstly, we incorporate the key physical properties inherent in camera lenses, such as rotational symmetry and low-dimensional structure after ring-to-rectangle warping; (ii) we integrate a deep neural network to reverse the aberration in a divide-and-conquer manner, i.e., decompose the low-quality input into basic components corresponding to the low-dimensional compositions of the aberration model, and" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24867" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 64, + 67, + 144, + 148 + ], + "blocks": [ + { + "bbox": [ + 64, + 67, + 144, + 148 + ], + "lines": [ + { + "bbox": [ + 64, + 67, + 144, + 148 + ], + "spans": [ + { + "bbox": [ + 64, + 67, + 144, + 148 + ], + "type": "image", + "image_path": "3d6708e46782f832076713302bd294b0e602ad1a8562a3d3da39d2b6a797f8cd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 85, + 148, + 126, + 156 + ], + "lines": [ + { + "bbox": [ + 85, + 148, + 126, + 156 + ], + "spans": [ + { + "bbox": [ + 85, + 148, + 126, + 156 + ], + "type": "text", + "content": "(a) Blurry input" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 145, + 67, + 200, + 121 + ], + "blocks": [ + { + "bbox": [ + 145, + 67, + 200, + 121 + ], + "lines": [ + { + "bbox": [ + 145, + 67, + 200, + 121 + ], + "spans": [ + { + "bbox": [ + 145, + 67, + 200, + 121 + ], + "type": "image", + "image_path": "2f4155c4e9d2a5eea28bab7898c49251973670356b63bb3f9f27f0f55d7c892e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 146, + 125, + 200, + 178 + ], + "blocks": [ + { + "bbox": [ + 146, + 125, + 200, + 178 + ], + "lines": [ + { + "bbox": [ + 146, + 125, + 200, + 178 + ], + "spans": [ + { + "bbox": [ + 146, + 125, + 200, + 178 + ], + "type": "image", + "image_path": "e37504446d61bd82f9dca711fa56e716608e726e387a3c595364f1e951fdfa4c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 203, + 67, + 256, + 121 + ], + "blocks": [ + { + "bbox": [ + 203, + 67, + 256, + 121 + ], + "lines": [ + { + "bbox": [ + 203, + 67, + 256, + 121 + ], + "spans": [ + { + "bbox": [ + 203, + 67, + 256, + 121 + ], + "type": "image", + "image_path": "025a9c7f5cfac3f1983445bddfe79e45f13c2712953ff95b25d3af5c9544501e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 258, + 67, + 310, + 121 + ], + "blocks": [ + { + "bbox": [ + 258, + 67, + 310, + 121 + ], + "lines": [ + { + "bbox": [ + 258, + 67, + 310, + 121 + ], + "spans": [ + { + "bbox": [ + 258, + 67, + 310, + 121 + ], + "type": "image", + "image_path": "d46f7b7fa4ad932f21b8e884ca7954509695e1320ee0fe72abdca7d0b82c285a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 312, + 67, + 364, + 121 + ], + "blocks": [ + { + "bbox": [ + 312, + 67, + 364, + 121 + ], + "lines": [ + { + "bbox": [ + 312, + 67, + 364, + 121 + ], + "spans": [ + { + "bbox": [ + 312, + 67, + 364, + 121 + ], + "type": "image", + "image_path": "7dacc45d77cccf420f3e60001db170c9590eb961fc22362905883171701f5c82.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 365, + 67, + 418, + 121 + ], + "blocks": [ + { + "bbox": [ + 365, + 67, + 418, + 121 + ], + "lines": [ + { + "bbox": [ + 365, + 67, + 418, + 121 + ], + "spans": [ + { + "bbox": [ + 365, + 67, + 418, + 121 + ], + "type": "image", + "image_path": "a367d9b369b9ec397ae39913b6fb782152e29741e5059e661bbfc2fef159d33f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 419, + 67, + 472, + 121 + ], + "blocks": [ + { + "bbox": [ + 419, + 67, + 472, + 121 + ], + "lines": [ + { + "bbox": [ + 419, + 67, + 472, + 121 + ], + "spans": [ + { + "bbox": [ + 419, + 67, + 472, + 121 + ], + "type": "image", + "image_path": "2194fa63f306ccd56c37c3359752938e92c924bcaa92aaaa488d86f4a77b9f77.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 473, + 67, + 526, + 121 + ], + "blocks": [ + { + "bbox": [ + 473, + 67, + 526, + 121 + ], + "lines": [ + { + "bbox": [ + 473, + 67, + 526, + 121 + ], + "spans": [ + { + "bbox": [ + 473, + 67, + 526, + 121 + ], + "type": "image", + "image_path": "ce58de75d2c27ab2b2dd3c237256a19db03c317b951e0699fa105e444d5ef8a5.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 63, + 157, + 144, + 238 + ], + "blocks": [ + { + "bbox": [ + 63, + 157, + 144, + 238 + ], + "lines": [ + { + "bbox": [ + 63, + 157, + 144, + 238 + ], + "spans": [ + { + "bbox": [ + 63, + 157, + 144, + 238 + ], + "type": "image", + "image_path": "88ad4ed0a95cca921a94000abb74874044871fc9f657b307ff59e2d5235d81aa.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 86, + 243, + 126, + 251 + ], + "lines": [ + { + "bbox": [ + 86, + 243, + 126, + 251 + ], + "spans": [ + { + "bbox": [ + 86, + 243, + 126, + 251 + ], + "type": "text", + "content": "(b) Our output" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 146, + 183, + 200, + 238 + ], + "blocks": [ + { + "bbox": [ + 146, + 183, + 200, + 238 + ], + "lines": [ + { + "bbox": [ + 146, + 183, + 200, + 238 + ], + "spans": [ + { + "bbox": [ + 146, + 183, + 200, + 238 + ], + "type": "image", + "image_path": "ab191d559b74034c93012e7e746a64be3ed3c99aeb4b9bc5be2411610d39eaf8.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 243, + 198, + 251 + ], + "lines": [ + { + "bbox": [ + 149, + 243, + 198, + 251 + ], + "spans": [ + { + "bbox": [ + 149, + 243, + 198, + 251 + ], + "type": "text", + "content": "(c) Blurry patches" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 203, + 125, + 256, + 178 + ], + "blocks": [ + { + "bbox": [ + 203, + 125, + 256, + 178 + ], + "lines": [ + { + "bbox": [ + 203, + 125, + 256, + 178 + ], + "spans": [ + { + "bbox": [ + 203, + 125, + 256, + 178 + ], + "type": "image", + "image_path": "7558571b35c189440432710e460498c37c6f49cdac61c450e1128c316421efd2.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 203, + 184, + 256, + 238 + ], + "blocks": [ + { + "bbox": [ + 203, + 184, + 256, + 238 + ], + "lines": [ + { + "bbox": [ + 203, + 184, + 256, + 238 + ], + "spans": [ + { + "bbox": [ + 203, + 184, + 256, + 238 + ], + "type": "image", + "image_path": "d0abac9e698509ac7d95f522160f3c54a1d087ad91b5fc330639f3af41e6e8e2.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 205, + 243, + 252, + 251 + ], + "lines": [ + { + "bbox": [ + 205, + 243, + 252, + 251 + ], + "spans": [ + { + "bbox": [ + 205, + 243, + 252, + 251 + ], + "type": "text", + "content": "(d) DeblurGANv2" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 258, + 125, + 310, + 178 + ], + "blocks": [ + { + "bbox": [ + 258, + 125, + 310, + 178 + ], + "lines": [ + { + "bbox": [ + 258, + 125, + 310, + 178 + ], + "spans": [ + { + "bbox": [ + 258, + 125, + 310, + 178 + ], + "type": "image", + "image_path": "1b04f8eae0522bc236b30021e7adf2559742e494e16f66bc403bbf3dffb42088.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 258, + 184, + 310, + 238 + ], + "blocks": [ + { + "bbox": [ + 258, + 184, + 310, + 238 + ], + "lines": [ + { + "bbox": [ + 258, + 184, + 310, + 238 + ], + "spans": [ + { + "bbox": [ + 258, + 184, + 310, + 238 + ], + "type": "image", + "image_path": "52d12fe2ed12f164333671d028facf29a41ddae821c8c01d1e1450d863382b2a.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 266, + 243, + 299, + 251 + ], + "lines": [ + { + "bbox": [ + 266, + 243, + 299, + 251 + ], + "spans": [ + { + "bbox": [ + 266, + 243, + 299, + 251 + ], + "type": "text", + "content": "(e) MPRNet" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 312, + 125, + 364, + 178 + ], + "blocks": [ + { + "bbox": [ + 312, + 125, + 364, + 178 + ], + "lines": [ + { + "bbox": [ + 312, + 125, + 364, + 178 + ], + "spans": [ + { + "bbox": [ + 312, + 125, + 364, + 178 + ], + "type": "image", + "image_path": "d70e5b0408c59ba873b3e2a10d1c4a66709e860e7d9442452f64e3224aff7298.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 312, + 184, + 364, + 238 + ], + "blocks": [ + { + "bbox": [ + 312, + 184, + 364, + 238 + ], + "lines": [ + { + "bbox": [ + 312, + 184, + 364, + 238 + ], + "spans": [ + { + "bbox": [ + 312, + 184, + 364, + 238 + ], + "type": "image", + "image_path": "840978a1a4c8174b2d7334a6c62696daffba4dbf61023d9da030a678c72c4489.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 324, + 243, + 347, + 251 + ], + "lines": [ + { + "bbox": [ + 324, + 243, + 347, + 251 + ], + "spans": [ + { + "bbox": [ + 324, + 243, + 347, + 251 + ], + "type": "text", + "content": "(f) DPIR" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 365, + 125, + 418, + 178 + ], + "blocks": [ + { + "bbox": [ + 365, + 125, + 418, + 178 + ], + "lines": [ + { + "bbox": [ + 365, + 125, + 418, + 178 + ], + "spans": [ + { + "bbox": [ + 365, + 125, + 418, + 178 + ], + "type": "image", + "image_path": "fcf9f27376e8b76cd1b2c694621b5e464bf8c3bade0224f741440ed9ed2dc85f.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 365, + 184, + 418, + 238 + ], + "blocks": [ + { + "bbox": [ + 365, + 184, + 418, + 238 + ], + "lines": [ + { + "bbox": [ + 365, + 184, + 418, + 238 + ], + "spans": [ + { + "bbox": [ + 365, + 184, + 418, + 238 + ], + "type": "image", + "image_path": "1877293483ff65f20cc42b99c1c4fbb6a8c9a4723d849003180f3914913ea034.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 378, + 243, + 407, + 251 + ], + "lines": [ + { + "bbox": [ + 378, + 243, + 407, + 251 + ], + "spans": [ + { + "bbox": [ + 378, + 243, + 407, + 251 + ], + "type": "text", + "content": "(g)DWDN" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 419, + 125, + 472, + 178 + ], + "blocks": [ + { + "bbox": [ + 419, + 125, + 472, + 178 + ], + "lines": [ + { + "bbox": [ + 419, + 125, + 472, + 178 + ], + "spans": [ + { + "bbox": [ + 419, + 125, + 472, + 178 + ], + "type": "image", + "image_path": "bed0ee63cec21be53609195cda412e0831b013b339052eaa5c69830bc08d830b.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 419, + 184, + 472, + 238 + ], + "blocks": [ + { + "bbox": [ + 419, + 184, + 472, + 238 + ], + "lines": [ + { + "bbox": [ + 419, + 184, + 472, + 238 + ], + "spans": [ + { + "bbox": [ + 419, + 184, + 472, + 238 + ], + "type": "image", + "image_path": "914b407719f43d536ccf7dfea48debf26b9335466508700221f7cfc12b2c1edb.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 432, + 243, + 455, + 251 + ], + "lines": [ + { + "bbox": [ + 432, + 243, + 455, + 251 + ], + "spans": [ + { + "bbox": [ + 432, + 243, + 455, + 251 + ], + "type": "text", + "content": "(h) Ours" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 473, + 125, + 526, + 178 + ], + "blocks": [ + { + "bbox": [ + 473, + 125, + 526, + 178 + ], + "lines": [ + { + "bbox": [ + 473, + 125, + 526, + 178 + ], + "spans": [ + { + "bbox": [ + 473, + 125, + 526, + 178 + ], + "type": "image", + "image_path": "8da7297b7ce2cbc1513af0cf468623e6d3778a0548a6443711932273c860a22e.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 473, + 184, + 526, + 238 + ], + "blocks": [ + { + "bbox": [ + 473, + 184, + 526, + 238 + ], + "lines": [ + { + "bbox": [ + 473, + 184, + 526, + 238 + ], + "spans": [ + { + "bbox": [ + 473, + 184, + 526, + 238 + ], + "type": "image", + "image_path": "12c0aa09441262812c1214feac0e38373e3627911f256e4d700d9db9b3befe32.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 478, + 243, + 520, + 251 + ], + "lines": [ + { + "bbox": [ + 478, + 243, + 520, + 251 + ], + "spans": [ + { + "bbox": [ + 478, + 243, + 520, + 251 + ], + "type": "text", + "content": "(i) Ground truth" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 67, + 304, + 171, + 533 + ], + "blocks": [ + { + "bbox": [ + 46, + 256, + 547, + 300 + ], + "lines": [ + { + "bbox": [ + 46, + 256, + 547, + 300 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 547, + 300 + ], + "type": "text", + "content": "Figure 6. Performance comparison with SOTA methods. (a) The input blurry image. (b) The result after aberration correction by our model. (c-i) The comparison among the results produced by different SOTA algorithms (d-h), in contrast to the blurry input (c) and ground-truth sharp version (i), with PSNR and SSIM scores presented in Tab. 3. Here we compare three ROIs, which are cropped from different locations to demonstrate the performance on non-uniform lens aberrations. Note that (d-h) are blind compensation results and (f)(g) are non-blind." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 304, + 171, + 533 + ], + "lines": [ + { + "bbox": [ + 67, + 304, + 171, + 533 + ], + "spans": [ + { + "bbox": [ + 67, + 304, + 171, + 533 + ], + "type": "image", + "image_path": "e532c30da681326f55d30f0ff4a60400416d6ea3bb0ab04e91802f5f863ea227.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 83, + 535, + 156, + 544 + ], + "lines": [ + { + "bbox": [ + 83, + 535, + 156, + 544 + ], + "spans": [ + { + "bbox": [ + 83, + 535, + 156, + 544 + ], + "type": "text", + "content": "(a) Experimental lenses" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 45, + 548, + 547, + 593 + ], + "lines": [ + { + "bbox": [ + 45, + 548, + 547, + 593 + ], + "spans": [ + { + "bbox": [ + 45, + 548, + 547, + 593 + ], + "type": "text", + "content": "Figure 7. Results on real data captured by two compact cameras with a low-end lens. (a) The photo of two lenses, with the smaller one being only around " + }, + { + "bbox": [ + 45, + 548, + 547, + 593 + ], + "type": "inline_equation", + "content": "1\\mathrm{cm}" + }, + { + "bbox": [ + 45, + 548, + 547, + 593 + ], + "type": "text", + "content": ", which is highly portable but exhibits significant aberrations. (b)(d) Raw images captured using the cameras in (a) and the results after compensation. (c) The zoomed-in view of the highlighted regions in (b) and (d), distinctly showcases the performance at both regions closer to the center (red boxes) and toward the corners (yellow boxes) of the sensor's field of view." + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 173, + 304, + 288, + 418 + ], + "blocks": [ + { + "bbox": [ + 173, + 304, + 288, + 418 + ], + "lines": [ + { + "bbox": [ + 173, + 304, + 288, + 418 + ], + "spans": [ + { + "bbox": [ + 173, + 304, + 288, + 418 + ], + "type": "image", + "image_path": "4bbd9908935deef34c0b0d16973d38c535ea9d8d71cb53c4176776a3c5a2fc29.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 174, + 419, + 288, + 533 + ], + "blocks": [ + { + "bbox": [ + 174, + 419, + 288, + 533 + ], + "lines": [ + { + "bbox": [ + 174, + 419, + 288, + 533 + ], + "spans": [ + { + "bbox": [ + 174, + 419, + 288, + 533 + ], + "type": "image", + "image_path": "b1ffba0cdd15c38df41ea6b2ba6eba8ddb6953d972dd4c80bad7e9ef83d87fd3.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 218, + 535, + 246, + 544 + ], + "lines": [ + { + "bbox": [ + 218, + 535, + 246, + 544 + ], + "spans": [ + { + "bbox": [ + 218, + 535, + 246, + 544 + ], + "type": "text", + "content": "(b) Input" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_caption" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 291, + 304, + 350, + 361 + ], + "blocks": [ + { + "bbox": [ + 291, + 304, + 350, + 361 + ], + "lines": [ + { + "bbox": [ + 291, + 304, + 350, + 361 + ], + "spans": [ + { + "bbox": [ + 291, + 304, + 350, + 361 + ], + "type": "image", + "image_path": "56a50073158cfbf3667c323c885ff91001a0385a06a04097f96e7f6f6cbb2409.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 291, + 362, + 348, + 418 + ], + "blocks": [ + { + "bbox": [ + 291, + 362, + 348, + 418 + ], + "lines": [ + { + "bbox": [ + 291, + 362, + 348, + 418 + ], + "spans": [ + { + "bbox": [ + 291, + 362, + 348, + 418 + ], + "type": "image", + "image_path": "21a076746255712c3fc7776d2b74056b485425da292ae51ca2863e94a030c87b.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 291, + 419, + 348, + 475 + ], + "blocks": [ + { + "bbox": [ + 291, + 419, + 348, + 475 + ], + "lines": [ + { + "bbox": [ + 291, + 419, + 348, + 475 + ], + "spans": [ + { + "bbox": [ + 291, + 419, + 348, + 475 + ], + "type": "image", + "image_path": "855efd7e6db84251c201d5474af50eae3026297c55451fcd3c8944738d931b08.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 291, + 476, + 348, + 533 + ], + "blocks": [ + { + "bbox": [ + 291, + 476, + 348, + 533 + ], + "lines": [ + { + "bbox": [ + 291, + 476, + 348, + 533 + ], + "spans": [ + { + "bbox": [ + 291, + 476, + 348, + 533 + ], + "type": "image", + "image_path": "bb24b2b2a6ef4c2b04da8352f107285151eef55bc551238924ea4b933dcfb9dc.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 318, + 535, + 381, + 544 + ], + "lines": [ + { + "bbox": [ + 318, + 535, + 381, + 544 + ], + "spans": [ + { + "bbox": [ + 318, + 535, + 381, + 544 + ], + "type": "text", + "content": "(c) ROI performance" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_caption" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 350, + 304, + 408, + 361 + ], + "blocks": [ + { + "bbox": [ + 350, + 304, + 408, + 361 + ], + "lines": [ + { + "bbox": [ + 350, + 304, + 408, + 361 + ], + "spans": [ + { + "bbox": [ + 350, + 304, + 408, + 361 + ], + "type": "image", + "image_path": "2963efe7968fc4f43438d6c60d5a95e68ef91b86c059e197e07e1c3f24b6c945.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + } + ], + "index": 42 + }, + { + "type": "image", + "bbox": [ + 350, + 362, + 407, + 418 + ], + "blocks": [ + { + "bbox": [ + 350, + 362, + 407, + 418 + ], + "lines": [ + { + "bbox": [ + 350, + 362, + 407, + 418 + ], + "spans": [ + { + "bbox": [ + 350, + 362, + 407, + 418 + ], + "type": "image", + "image_path": "5654099080b2869d56a544cfcca9a886676c9a522458fcb27e4f1ed284be141a.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 350, + 419, + 407, + 475 + ], + "blocks": [ + { + "bbox": [ + 350, + 419, + 407, + 475 + ], + "lines": [ + { + "bbox": [ + 350, + 419, + 407, + 475 + ], + "spans": [ + { + "bbox": [ + 350, + 419, + 407, + 475 + ], + "type": "image", + "image_path": "5ff22a150fb68abacd93ab365993a8d6bd61073bd46ed21838f22f9623055409.jpg" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_body" + } + ], + "index": 44 + }, + { + "type": "image", + "bbox": [ + 350, + 476, + 407, + 533 + ], + "blocks": [ + { + "bbox": [ + 350, + 476, + 407, + 533 + ], + "lines": [ + { + "bbox": [ + 350, + 476, + 407, + 533 + ], + "spans": [ + { + "bbox": [ + 350, + 476, + 407, + 533 + ], + "type": "image", + "image_path": "a3d308c3c4bd0f01d0ba1369b7e225cf03bee1c89bd6edf4ca604ce5cc9113e5.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 409, + 304, + 523, + 418 + ], + "blocks": [ + { + "bbox": [ + 409, + 304, + 523, + 418 + ], + "lines": [ + { + "bbox": [ + 409, + 304, + 523, + 418 + ], + "spans": [ + { + "bbox": [ + 409, + 304, + 523, + 418 + ], + "type": "image", + "image_path": "e054d41dd8f0b4bd142c9d7a34a6afe7019feaf1c68153472921f35c221345b7.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 410, + 419, + 523, + 533 + ], + "blocks": [ + { + "bbox": [ + 410, + 419, + 523, + 533 + ], + "lines": [ + { + "bbox": [ + 410, + 419, + 523, + 533 + ], + "spans": [ + { + "bbox": [ + 410, + 419, + 523, + 533 + ], + "type": "image", + "image_path": "10c339dd1b81f4bea479f0420d2c29e727a7b3fc7e643e67135d3f367ef35309.jpg" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 450, + 535, + 483, + 544 + ], + "lines": [ + { + "bbox": [ + 450, + 535, + 483, + 544 + ], + "spans": [ + { + "bbox": [ + 450, + 535, + 483, + 544 + ], + "type": "text", + "content": "(d) Output" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_caption" + } + ], + "index": 48 + }, + { + "bbox": [ + 46, + 605, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 677 + ], + "type": "text", + "content": "then adopt pre-trained compensation modules to reconstruct the high-quality image with high robustness. The proposed approach offers high generalization ability to diverse lenses and requires no expertise-demanding calibration. Moreover, we achieve performance comparable to existing methods with careful calibration and lens-specific model training." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "content": "So far, our experiments assume sufficient exposure and high pixel count, but we acknowledge the potential for future enhancements, such as accounting for more realistic noise" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 304, + 605, + 547, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 701 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 701 + ], + "type": "text", + "content": "models and addressing other degradations like downsampling. Looking ahead, we envision further advancements of our model, focusing on the development of a lightweight network and on-chip implementation. As computational aberration compensation continues to progress, our method serves as a promising step towards enabling practical and cost-effective optical aberration correction for a wide range of applications." + } + ] + } + ], + "index": 53 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24868" + } + ] + } + ], + "index": 54 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Samuel Arba-Mosquera, Shwetabh Verma, and Shady T Awwad. Theoretical effect of coma and spherical aberrations translation on refractive error and higher order aberrations. In Photonics, page 116. MDPI, 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 169 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 169 + ], + "type": "text", + "content": "[2] Johannes Brauers, Claude Seiler, and Til Aach. Direct psf estimation using a random noise target. In Digital Photography VI, pages 96-105. SPIE, 2010. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 171, + 288, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 171, + 288, + 215 + ], + "spans": [ + { + "bbox": [ + 53, + 171, + 288, + 215 + ], + "type": "text", + "content": "[3] Ayan Chakrabarti, Todd Zickler, and William T Freeman. Analyzing spatially-varying blur. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 2512-2519. IEEE, 2010. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 217, + 288, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 217, + 288, + 249 + ], + "spans": [ + { + "bbox": [ + 53, + 217, + 288, + 249 + ], + "type": "text", + "content": "[4] Jinlin Cui and Wei Huang. Optical aberration correction for simple lenses via sparse representation. Optics Communications, pages 201-213, 2018. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 251, + 288, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 251, + 288, + 295 + ], + "spans": [ + { + "bbox": [ + 53, + 251, + 288, + 295 + ], + "type": "text", + "content": "[5] Maurizio Delbracio, Ignacio Garcia-Dorado, SungJoon Choi, Damien Kelly, and Peyman Milanfar. Polyblur: Removing mild blur by polynomial reburring. IEEE Transactions on Computational Imaging, pages 837-848, 2021. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 296, + 288, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 296, + 288, + 339 + ], + "spans": [ + { + "bbox": [ + 53, + 296, + 288, + 339 + ], + "type": "text", + "content": "[6] Jiangxin Dong, Stefan Roth, and Bernt Schiele. DWDN: deep wiener deconvolution network for non-blind image deblurring. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 9960-9976, 2021. 4, 5, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 341, + 288, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 341, + 288, + 384 + ], + "spans": [ + { + "bbox": [ + 53, + 341, + 288, + 384 + ], + "type": "text", + "content": "[7] Thomas Eboli, Jean-Michel Morel, and Gabriele Facciolo. Fast two-step blind optical aberration correction. In European Conference on Computer Vision, pages 693-708. Springer, 2022. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 386, + 288, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 386, + 288, + 430 + ], + "spans": [ + { + "bbox": [ + 53, + 386, + 288, + 430 + ], + "type": "text", + "content": "[8] T Furieri, A Bassi, and S Bonora. Large field of view aberrations correction with deformable lenses and multi conjugate adaptive optics. Journal of Biophotonics, page e202300104, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 432, + 288, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 432, + 288, + 475 + ], + "spans": [ + { + "bbox": [ + 53, + 432, + 288, + 475 + ], + "type": "text", + "content": "[9] O García-Lievanos and S Vázquez-Montiel. Free system of spherical and coma aberrations by use aspherical and diffractive surfaces. In AIP Conference Proceedings, pages 659-664. American Institute of Physics, 2008. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 477, + 288, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 477, + 288, + 521 + ], + "spans": [ + { + "bbox": [ + 48, + 477, + 288, + 521 + ], + "type": "text", + "content": "[10] Felix Heide, Mushfiqur Rouf, Matthias B Hullin, Bjorn Labitzke, Wolfgang Heidrich, and Andreas Kolb. High-quality computational imaging through simple lenses. ACM Transactions on Graphics, pages 1-14, 2013. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 522, + 288, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 522, + 288, + 555 + ], + "spans": [ + { + "bbox": [ + 48, + 522, + 288, + 555 + ], + "type": "text", + "content": "[11] Michael Hirsch and Bernhard Scholkopf. Self-calibration of optical lenses. In Proceedings of the IEEE International Conference on Computer Vision, pages 612-620, 2015. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 557, + 288, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 557, + 288, + 623 + ], + "spans": [ + { + "bbox": [ + 48, + 557, + 288, + 623 + ], + "type": "text", + "content": "[12] Zhanli Hu, Hengzhi Xue, Qiyang Zhang, Juan Gao, Na Zhang, Sijuan Zou, Yueyang Teng, Xin Liu, Yongfeng Yang, Dong Liang, et al. DPIR-Net: Direct pet image reconstruction based on the wasserstein generative adversarial network. IEEE Transactions on Radiation and Plasma Medical Sciences, pages 35–43, 2020. 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 624, + 288, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 288, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 288, + 668 + ], + "type": "text", + "content": "[13] Qi Jiang, Hao Shi, Lei Sun, Shaohua Gao, Kailun Yang, and Kaiwei Wang. Annular computational imaging: Capture clear panoramic images through simple lens. IEEE Transactions on Computational Imaging, 8:1250-1264, 2022. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "type": "text", + "content": "[14] Jaihyun Koh, Jangho Lee, and Sungroh Yoon. Single-image deblurring with neural networks: A comparative survey. Computer Vision and Image Understanding, page 103134, 2021. 1" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 714 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 118 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 118 + ], + "type": "text", + "content": "[15] Amit Kohli, Anastasios Angelopoulos, Sixian You, and Laura Waller. Shift-variant deblurring for rotationally symmetric systems. In Computational Optical Sensing and Imaging, pages CTh5A-4. Optica Publishing Group, 2021. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 119, + 547, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 119, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 547, + 162 + ], + "type": "text", + "content": "[16] Amit Kohli, Anastasios Angelopoulos, Sixian You, Kyrolos Yanny, and Laura Waller. Linear revolution-invariance: Modeling and deblurring spatially-varying imaging systems. arXiv preprint arXiv:2206.08928, 2022. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 163, + 547, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 163, + 547, + 217 + ], + "spans": [ + { + "bbox": [ + 308, + 163, + 547, + 217 + ], + "type": "text", + "content": "[17] Orest Kupyn, Tetiana Martyniuk, Junru Wu, and Zhangyang Wang. Deblurring-v2: Deblurring (orders-of-magnitude) faster and better. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8878-8887, 2019. 3, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 219, + 547, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 219, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 308, + 219, + 547, + 262 + ], + "type": "text", + "content": "[18] Anat Levin, Rob Fergus, Frédo Durand, and William T Freeman. Image and depth from a conventional camera with a coded aperture. ACM Transactions on Graphics, pages 70-81, 2007. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 264, + 547, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 264, + 547, + 306 + ], + "spans": [ + { + "bbox": [ + 308, + 264, + 547, + 306 + ], + "type": "text", + "content": "[19] Weili Li, Xiaoqing Yin, Yu Liu, and Maojun Zhang. Computational imaging through chromatic aberration corrected simple lenses. Journal of Modern Optics, pages 2211-2220, 2017. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 309, + 547, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 309, + 547, + 364 + ], + "spans": [ + { + "bbox": [ + 308, + 309, + 547, + 364 + ], + "type": "text", + "content": "[20] Xiu Li, Jinli Suo, Weihang Zhang, Xin Yuan, and Qionghai Dai. Universal and flexible optical aberration correction using deep-prior based deconvolution. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2613-2621, 2021. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 365, + 547, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 365, + 547, + 409 + ], + "spans": [ + { + "bbox": [ + 308, + 365, + 547, + 409 + ], + "type": "text", + "content": "[21] Esther YH Lin, Zhecheng Wang, Rebecca Lin, Daniel Miau, Florian Kainz, Jiawen Chen, Xuaner Cecilia Zhang, David B Lindell, and Kiriakos N Kutulakos. Learning lens blur fields. arXiv preprint arXiv:2310.11535, 2023. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 411, + 547, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 411, + 547, + 453 + ], + "spans": [ + { + "bbox": [ + 308, + 411, + 547, + 453 + ], + "type": "text", + "content": "[22] Ting Lin, ShiQi Chen, Huajun Feng, Zhihai Xu, Qi Li, and Yueting Chen. Non-blind optical degradation correction via frequency self-adaptive and finetune tactics. Optics Express, pages 23485-23498, 2022. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 456, + 547, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 456, + 547, + 499 + ], + "spans": [ + { + "bbox": [ + 308, + 456, + 547, + 499 + ], + "type": "text", + "content": "[23] Alice Lucas, Michael Iliadis, Rafael Molina, and Aggelos K Katsaggelos. Using deep neural networks for inverse problems in imaging: Beyond analytical methods. IEEE Signal Processing Magazine, pages 20-36, 2018. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 501, + 547, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 501, + 547, + 533 + ], + "spans": [ + { + "bbox": [ + 308, + 501, + 547, + 533 + ], + "type": "text", + "content": "[24] Virendra N Mahajan. Zernike annular polynomials for imaging systems with annular pupils. Journal of the Optical Society of America, pages 75-85, 1981. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 535, + 547, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 535, + 547, + 567 + ], + "spans": [ + { + "bbox": [ + 308, + 535, + 547, + 567 + ], + "type": "text", + "content": "[25] James P McGuire and Russell A Chipman. Polarization aberrations. 1. rotationally symmetric optical systems. Applied optics, pages 5080-5100, 1994. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 568, + 547, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 568, + 547, + 622 + ], + "spans": [ + { + "bbox": [ + 308, + 568, + 547, + 622 + ], + "type": "text", + "content": "[26] Armin Mehri, Parichehr B Ardakani, and Angel D Sappa. MPRNet: Multi-path residual network for lightweight image super resolution. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2704-2713, 2021. 7" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 624, + 547, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 624, + 547, + 677 + ], + "spans": [ + { + "bbox": [ + 308, + 624, + 547, + 677 + ], + "type": "text", + "content": "[27] Ali Mosleh, Paul Green, Emmanuel Onzon, Isabelle Begin, and JM Pierre Langlois. Camera intrinsic blur kernel estimation: A reliable framework. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4961-4968, 2015. 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 680, + 547, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 680, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 308, + 680, + 547, + 714 + ], + "type": "text", + "content": "[28] Gregory Ongie, Ajil Jalal, Christopher A Metzler, Richard G Baraniuk, Alexandros G Dimakis, and Rebecca Willett. Deep learning techniques for inverse problems in imaging. IEEE" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "24869" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 66, + 72, + 288, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 288, + 94 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 288, + 94 + ], + "type": "text", + "content": "Journal on Selected Areas in Information Theory, pages 39-56, 2020. 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 95, + 288, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 95, + 288, + 138 + ], + "spans": [ + { + "bbox": [ + 48, + 95, + 288, + 138 + ], + "type": "text", + "content": "[29] Kambiz Rahbar and Karim Faez. Blind correction of lens aberration using zernike moments. In IEEE International Conference on Image Processing, pages 861-864. IEEE, 2011. 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 140, + 288, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 140, + 288, + 194 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 288, + 194 + ], + "type": "text", + "content": "[30] Abderrahmane Rahiche and Mohamed Cheriet. Forgery detection in hyperspectral document images using graph orthogonal nonnegative matrix factorization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 662-663, 2020. 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 195, + 288, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 195, + 288, + 248 + ], + "spans": [ + { + "bbox": [ + 48, + 195, + 288, + 248 + ], + "type": "text", + "content": "[31] Jiangpeng Rong, Shiyao Huang, Zeyu Shang, and Xianghua Ying. Radial lens distortion correction using convolutional neural networks trained with synthesized images. In *Asian Conference on Computer Vision*, pages 35-49. Springer, 2017. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 250, + 288, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 250, + 288, + 293 + ], + "spans": [ + { + "bbox": [ + 48, + 250, + 288, + 293 + ], + "type": "text", + "content": "[32] Christian J Schuler, Michael Hirsch, Stefan Harmeling, and Bernhard Schölkopf. Blind correction of optical aberrations. In European Conference on Computer Vision, pages 187-200. Springer, 2012. 1, 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 294, + 288, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 294, + 288, + 337 + ], + "spans": [ + { + "bbox": [ + 48, + 294, + 288, + 337 + ], + "type": "text", + "content": "[33] Yichang Shih, Brian Guenter, and Neel Joshi. Image enhancement using calibrated lens simulations. In European Conference on Computer Vision, pages 42-56. Springer, 2012. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 338, + 288, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 338, + 288, + 382 + ], + "spans": [ + { + "bbox": [ + 48, + 338, + 288, + 382 + ], + "type": "text", + "content": "[34] Tiancheng Sun, Yifan Peng, and Wolfgang Heidrich. Revisiting cross-channel information transfer for chromatic aberration correction. In Proceedings of the IEEE International Conference on Computer Vision, pages 3248-3256, 2017. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 383, + 288, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 383, + 288, + 426 + ], + "spans": [ + { + "bbox": [ + 48, + 383, + 288, + 426 + ], + "type": "text", + "content": "[35] Huixuan Tang and Kiriakos N Kutulakos. What does an aberrated photo tell us about the lens and the scene? In IEEE International Conference on Computational Photography, pages 1-10. IEEE, 2013. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 426, + 288, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 426, + 288, + 491 + ], + "spans": [ + { + "bbox": [ + 48, + 426, + 288, + 491 + ], + "type": "text", + "content": "[36] Kaiyi Tang, Shuangyang Zhang, Yang Wang, Xiaoming Zhang, Zhenyang Liu, Zhichao Liang, Huafeng Wang, Lingjian Chen, Wufan Chen, and Li Qi. Learning spatially variant degradation for unsupervised blind photoacoustic tomography image restoration. Photoacoustics, page 100536, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 492, + 288, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 492, + 288, + 525 + ], + "spans": [ + { + "bbox": [ + 48, + 492, + 288, + 525 + ], + "type": "text", + "content": "[37] Berge Tatian. Aberration balancing in rotationally symmetric lenses. Journal of the Optical Society of America, pages 1083-1091, 1974. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 526, + 288, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 526, + 288, + 580 + ], + "spans": [ + { + "bbox": [ + 48, + 526, + 288, + 580 + ], + "type": "text", + "content": "[38] Chao Wang, Juan Chen, Hongguang Jia, Baosong Shi, Ruifei Zhu, Qun Wei, Linyao Yu, and Mingda Ge. Parameterized modeling of spatially varying psf for lens aberration and defocus. Journal of the Optical Society of Korea, pages 136-143, 2015. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 581, + 288, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 581, + 288, + 625 + ], + "spans": [ + { + "bbox": [ + 48, + 581, + 288, + 625 + ], + "type": "text", + "content": "[39] Pei Wang, Wei Sun, Qingsen Yan, Axi Niu, Rui Li, Yu Zhu, Jinqiu Sun, and Yanning Zhang. Non-uniform motion deblurring with blurry component divided guidance. Pattern Recognition, page 108082, 2021. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 625, + 288, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 288, + 669 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 288, + 669 + ], + "type": "text", + "content": "[40] Esther Whang, David McAllister, Ashwin Reddy, Amit Kohli, and Laura Waller. Seidelnet: an aberration-informed deep learning model for spatially varying deblurring. In AI and Optical Data Sciences IV, pages 276-281. SPIE, 2023. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "type": "text", + "content": "[41] Chudan Wu, Yan Wo, Guoqing Han, Zhangyong Wu, and Jiyun Liang. Non-uniform image blind deblurring by two-stage fully convolution network. IET Image Processing, pages 2588-2596, 2020. 1" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 441 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 307, + 73, + 546, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 546, + 117 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 546, + 117 + ], + "type": "text", + "content": "[42] Zhenhua Xu, Huasong Chen, and Zhenhua Li. Fast blind deconvolution using a deeper sparse patch-wise maximum gradient prior. Signal Processing: Image Communication, page 116050, 2021. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 118, + 547, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 547, + 162 + ], + "type": "text", + "content": "[43] Jianchao Yang, John Wright, Thomas Huang, and Yi Ma. Image super-resolution as sparse representation of raw image patches. In IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 163, + 547, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 547, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 547, + 196 + ], + "type": "text", + "content": "[44] Kyrollos Yanny, Kristina Monakhova, Richard W Shuai, and Laura Waller. Deep learning for fast spatially varying deconvolution. Optica, pages 96-99, 2022. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 197, + 547, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 547, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 547, + 251 + ], + "type": "text", + "content": "[45] Tao Yue, Jinli Suo, Jue Wang, Xun Cao, and Qionghai Dai. Blind optical aberration correction by exploring geometric and visual priors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1684-1692, 2015. 1, 2, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 252, + 547, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 252, + 547, + 296 + ], + "spans": [ + { + "bbox": [ + 307, + 252, + 547, + 296 + ], + "type": "text", + "content": "[46] Dazhi Zhan, Weili Li, Xiaoqing Yin, Caiyun Niu, and Jin Liu. Psf estimation method of simple-lens camera using normal sinh-arcsinh model based on noise image pairs. IEEE Access, pages 49338-49353, 2021. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 297, + 547, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 297, + 547, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 297, + 547, + 342 + ], + "type": "text", + "content": "[47] Kaihao Zhang, Wenqi Ren, Wenhan Luo, Wei-Sheng Lai, Björn Stenger, Ming-Hsuan Yang, and Hongdong Li. Deep image deblurring: A survey. International Journal of Computer Vision, pages 2103-2130, 2022. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 342, + 547, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 342, + 547, + 397 + ], + "spans": [ + { + "bbox": [ + 307, + 342, + 547, + 397 + ], + "type": "text", + "content": "[48] Zhihong Zhang, Yuxiao Cheng, Jinli Suo, Liheng Bian, and Qionghai Dai. INFWIDE: Image and feature space wiener deconvolution network for non-blind image deblurring in low-light conditions. IEEE Transactions on Image Processing, pages 1390-1402, 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 398, + 547, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 398, + 547, + 441 + ], + "spans": [ + { + "bbox": [ + 307, + 398, + 547, + 441 + ], + "type": "text", + "content": "[49] Changyin Zhou, Stephen Lin, and Shree K Nayar. Coded aperture pairs for depth from defocus and defocus deblurring. International Journal of Computer Vision, pages 53-72, 2011. 3" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "24870" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/f5f3c319-3887-4d17-8f43-fce0198c0c77_content_list.json b/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/f5f3c319-3887-4d17-8f43-fce0198c0c77_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7db52b3c97a6927a155fb85f13c45ef16fc05a50 --- /dev/null +++ b/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/f5f3c319-3887-4d17-8f43-fce0198c0c77_content_list.json @@ -0,0 +1,1232 @@ +[ + { + "type": "text", + "text": "A Picture is Worth More Than 77 Text Tokens: Evaluating CLIP-Style Models on Dense Captions", + "text_level": 1, + "bbox": [ + 233, + 130, + 738, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jack Urbanek*† Florian Bordes1,2,3† Pietro Astolfi1 Mary Williamson1 Vasu Sharma1 Adriana Romero-Soriano1,3,4,5", + "bbox": [ + 86, + 200, + 880, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ FAIR, Meta $^{2}$ Mila $^{3}$ Universite de Montreal, $^{4}$ McGill University $^{5}$ Canada CIFAR AI chair", + "bbox": [ + 109, + 253, + 851, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 308, + 313, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Curation methods for massive vision-language datasets trade off between dataset size and quality. However, even the highest quality of available curated captions are far too short to capture the rich visual detail in an image. To show the value of dense and highly-aligned image-text pairs, we collect the Densely Captioned Images (DCI) dataset, containing 7805 natural images human-annotated with mask-aligned descriptions averaging above 1000 words each. With precise and reliable captions associated with specific parts of an image, we can evaluate vision-language models' (VLMs) understanding of image content with a novel task that matches each caption with its corresponding subcrop. As current models are often limited to 77 text tokens, we also introduce a summarized version (sDCI) in which each caption length is limited. We show that modern techniques that make progress on standard benchmarks do not correspond with significant improvement on our sDCI based benchmark. Lastly, we finetune CLIP using sDCI and show significant improvements over the baseline despite a small training set. By releasing the first human annotated dense image captioning dataset, we hope to enable the development of new benchmarks or fine-tuning recipes for the next generation of VLMs to come.", + "bbox": [ + 76, + 340, + 472, + 690 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 717, + 209, + 734 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "State-of-the-art vision-language models (VLMs) are often trained on large scale datasets such as LAION-400M [28], YFCC100M [34], or other undisclosed datasets crawled from the web. These datasets are formed by collecting images from the web and using alt-text (or other local text on the webpage) to create loose image-text pairs. These can then be filtered down trading off on quantity for quality [26, 30]. Still, recent work has demonstrated that throwing these", + "bbox": [ + 75, + 743, + 468, + 864 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "loose captions out entirely in favor of generated captions, with enhanced quality and density, can produce improved results [10]. Other works [1, 20, 21, 38] have demonstrated that it is possible to get CLIP-level performance using a vastly reduced compute, often by throwing away portions of the data resulting in more balance between image and text modalities. However, those approaches rely on automatic pipelines which do not generate reliable and long captions that can capture rich visual details in an image. From this it appears no existing dataset has high-quality image descriptions that are tightly-coupled enough with the image to train for or evaluate a deep alignment between the two domains.", + "bbox": [ + 500, + 310, + 890, + 491 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In the absence of high quality captions to evaluate VLMs, benchmarks such as ARO [42] and VL-Checklist [45] often complement image-caption pairs with hard negatives that are generated by slightly altering the initial (positive) description. Progress on these benchmarks has been rooted in training VLMs with negatives of similar construction to the tests [42] rendering the methodologies ineffective on datasets such as Winoground [35]. Recent works [22] have called the evaluation capacity of many of these benchmarks into question, given how effective language-prior-based methods perform. More specifically, given the unlikelihood of the hard negative captions in these benchmarks, a good text encoder can achieve close to $100\\%$ accuracy without looking at the images. Moreover, Bordes et al. [3] have shown that most improvements observed on ARO or VL-Checklist do not translate on simple synthetic benchmarks for which the negative caption is as likely as the positive one. Since the use of VLMs is significantly increasing, it is crucial to make sure that we have a diverse suite of reliable benchmarks to assess their abilities.", + "bbox": [ + 496, + 492, + 892, + 792 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we introduce the Densely Captioned Images dataset, a collection of 7805 images with dense and mask-aligned descriptions averaging above 1000 words each. One such example is provided in Figure 1, displaying just a subset of the collected text paired with their aligned masks. We demonstrate how to leverage this dataset to evaluate VLMs in two ways after summarizing captions", + "bbox": [ + 496, + 795, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Work done while at Meta", + "bbox": [ + 94, + 875, + 243, + 886 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Equal contribution", + "bbox": [ + 96, + 887, + 209, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "26700", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7f6543bdabddedeba6c92fb7457f95520997ceb0aafab20b4e17c13566ebc735.jpg", + "image_caption": [ + "Figure 1. One example from the Densely Captioned Images dataset. Only part of the submask hierarchy is shown." + ], + "image_footnote": [], + "bbox": [ + 135, + 99, + 848, + 739 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "to fit into CLIP's 77 token limit, both with a negatives-based test as well as a novel matching task, referred to as subcrop-caption matching, that requires selecting appropriate captions for different regions of the same image. We evaluate existing baselines, and observe that no models perform well at both concurrently, and improved", + "bbox": [ + 75, + 801, + 472, + 895 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "performance via negatives-based training comes at the cost of decreased performance on subcrop-caption matching. We also run some experiments using the summarized DCI as a fine-tuning dataset to evaluate the effectiveness of these captions for improving a model's performance on other benchmarks, and compare the efficiency per-example", + "bbox": [ + 496, + 801, + 893, + 895 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "26701", + "bbox": [ + 478, + 944, + 517, + 957 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "to that from the automated annotation setup in DAC [10]. To summarize, our contributions are:", + "bbox": [ + 76, + 90, + 454, + 119 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We release the Densely Captioned Images (DCI) dataset, which contains dense and mask-aligned captions, alongside an LLM-summarized version (sDCI) containing captions under 77 tokens for use with current VLMs.", + "- We provide a new benchmark for VLMs based on sDCI to evaluate fine-grained vision-language understanding, and show that no existing model can perform well at matching captions from within one image to corresponding subsections of that image.", + "- We show that fine-tuning with high quality image-caption pairs is as good on ARO and VL-Checklist as fine-tuning on at least $10 \\times$ the automatically annotated data, and that even without utilizing explicit negatives these pairs can improve performance on VL-C-Object from $81.17\\%$ to $88.37\\%$ ." + ], + "bbox": [ + 76, + 123, + 467, + 347 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 76, + 366, + 225, + 382 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The massive, loosely-labeled dataset approach that has enabled VLMs like CLIP [27] and powerful successors like BLIP2 [19], Flamingo [2], CM3leon [41], and many others, has been a clear forward step in vision-language modeling. Still recent benchmarks show that models trained in this manner display clear drawbacks in reasoning skills. Additional techniques have been proposed and adopted recently to close this gap, discussed below.", + "bbox": [ + 75, + 393, + 467, + 513 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Vision-Language Datasets. Over the last decade, there have been significant dataset collection efforts connecting images and text. Earlier works focused on curating datasets by leveraging human annotations, see e.g., COCO [8], Visual Genome [16], and Flickr30k [40]. The process resulted in high quality annotations, which were however oftentimes limited by the caption content - i.e., relatively short phrases (5.1 to 10.3 words on average) grounded at image level or region level - and the data annotation scale (30k to 130k images). To increase scale, researchers gathered web-crawled data and introduced large scale datasets such as YFCC100M [34], which contains 100M media objects. Yet, crawling the web oftentimes results in little correspondence between image and text pairs. To reduce noise between image and text pairs, efforts such as SBU [24] queried Flickr and filtered the noisy results, obtaining a $\\sim 1\\mathrm{M}$ images. Moreover, Conceptual Captions (CC) [30] crawled a dataset of $\\sim 12\\mathrm{M}$ images and alt-text pairs, and included a protocol to filter noisy text-image pairs, resulting in 3M data points. Relaxing the filtering protocol allows to trade data quality for scale. Crawling alt-text also resulted in relatively short text descriptions with 10.3 words on average, which are most often grounded at image level. Localized Narratives [25] was introduced", + "bbox": [ + 75, + 537, + 467, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "as a dense visual grounding dataset leveraging a multimodal annotation procedure, collecting $\\sim 850\\mathrm{k}$ text-image pairs with 36.5 words/caption on average. **RedCaps** [9] constituted another effort yielding large scale ( $\\sim 12\\mathrm{M}$ ) web-curated data by exploring alternate data sources of high quality data instead of devising complex filtering strategies. Wikipedia-based image-text dataset (WIT) [32] extended dataset creation efforts by gathering a multilingual dataset of text-image-pairs consisting of $\\sim 11.5\\mathrm{M}$ images. LAION-5B [29] further increased the web-crawling efforts by gathering a multilingual dataset of text-image pairs, and filtered the collected data with a pre-trained CLIP [27] model. Following, LAION-CAT [26] reduced noisy examples from LAION-5B by filtering for caption complexity, i.e., captions that do not contain any action, and for text spotting, i.e., images that contain rendered text. **MetaCLIP** [39] has also been released as an open dataset for reproducing CLIP. These very large scale datasets have been successfully used to advance the state-of-the-art of VLMs.", + "bbox": [ + 496, + 90, + 890, + 378 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Vision-Language Evaluation Benchmarks. Several recent advances in visual-language learning have focused on creating comprehensive benchmarks to evaluate model performance in more holistic ways. These benchmarks are instrumental in pushing the envelope of what VLM can understand and process, ensuring they move beyond superficial image-text matching towards genuine understanding of intricate relationships between visual and linguistic elements. In particular, VL-CheckList [45] and ARO [42] assess the VLM capabilities beyond average downstream task accuracy, by focusing on a model's ability to understand objects, attributes, order or relations. ARO's extensive scope, uncovers limitations in VLMs such as poor relational understanding and lack of order sensitivity. Winoground [35] tests models for visio-linguistic compositional reasoning by asking VLM to match two images with two captions containing the same set of words but in different orders. This task requires models to discern the meaning conveyed by the order of words, reflecting different visual scenes. Current VLMs perform only marginally better than chance, highlighting a significant gap in compositional reasoning. CREPE (Compositional REpresentation Evaluation) [23] evaluates two aspects of compositionality: systematicity and productivity. Systematicity is measured by the model's ability to represent seen versus unseen atoms and their compositions, while productivity gauges the model's capacity to understand an unbounded set of increasingly complex expressions. Finally, PUG (Photorealistic Unreal Graphics) [3] uses synthetic data to assess the compositional reasoning abilities of VLMs by progressively increasing the complexity of a given generated scene. One issue with these evaluation datasets is their frequent reliance on COCO, either directly as in ARO, or through Visual Genome as in", + "bbox": [ + 496, + 402, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "26702", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "VL-Checklist or CREPE. It is difficult to find an evaluation dataset of sufficient scale without COCO.", + "bbox": [ + 76, + 90, + 468, + 119 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Vision-Language models. Recent VLM advancements have built upon the foundational work of CLIP [27], which leveraged large-scale image-text pairs to jointly pre-train an image encoder and a text encoder to predict which images are paired with which texts in a contrastive learning paradigm. NegCLIP build upon CLIP by leveraging negative captions when training. BLIP (Bootstrapping Language-Image Pre-training) [18] uses a new framework that bootstraps the captions from noisy web data for both understanding and generation tasks. Its successor BLIP-2 [19] further streamlines the process by utilizing off-the-shelf frozen pre-trained image encoders and language models, bridging the modality gap with a lightweight querying mechanism. Clip-rocket [12] improves VLM baselines by showing that applying image and text augmentations makes up for most of the improvement attained by prior VLMs. Flava [31] proposes a foundation VLM model by combining existing VLMs objectives together with auxiliary in-modality losses for the text and vision encoders. X-VLM [43] achieves success with a pretraining method matching sub- portions of the text to regions of the image at multiple granularities. These models introduces improvements over CLIP, focusing on efficiency, adaptability, and reducing the need for extensive labeled datasets, thereby pushing the boundaries of vision-language pre-training. The closest work to our approach is DAC (Densely Aligned Captions) [10], which improves with an automated LLM based pipeline the caption quality and density. By showing that DAC-enhanced CLIP models exhibit substantial gains on some benchmarks, this work underscores the critical role that caption quality and density play in the efficacy of VLMs. We build on this insight and explore how to further increase the caption quality and density by relying on human annotators, and analyze how that impacts downstream model performance.", + "bbox": [ + 76, + 138, + 472, + 667 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Dataset Construction", + "text_level": 1, + "bbox": [ + 76, + 679, + 282, + 695 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The Densely Captioned Images dataset, or DCI, consists of 7805 images from SA-1B [15], each with a complete description aiming to capture the full visual detail of what is present in the image. Much of the description is directly aligned to submasks of the image. An example is shown in Figure 1. In the top left we see the full image of a water pump, with an associated description. The italicized section is collected as a standard caption, aiming to summarize the full image in about a sentence, similar to existing caption datasets. The remainder of that first description contains details about the relationship between visible entities in the image, as well as in-depth descriptions of regions that are not described as part of the submasks. All other", + "bbox": [ + 76, + 704, + 472, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "text describing the image is associated with submasks of the image. Each submask has its own free-text label (not pictured) and description, and may also contain further submasks. Here for instance we see submasks for windows and balconies as being contained in the submask capturing three buildings in the background.", + "bbox": [ + 496, + 90, + 893, + 183 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Preparation", + "text_level": 1, + "bbox": [ + 500, + 191, + 629, + 209 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In order to collect the data, we first select images from a random privacy-mitigated subset of SA-1B. We then procedurally extract subregions of each image to annotate, as we found in initial trials that crowdsourcing both regions and descriptions concurrently overcomplicated the task and successful annotation rate. For this process, we turn to the Segment Anything Model (SAM) [15] and adapt their standard method to extract all masks from an image.", + "bbox": [ + 496, + 215, + 893, + 335 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For the extraction process, SAM usually relies on a grid of points across the entire image. In order to increase the possibility of selecting interesting regions worth annotating, we additionally apply a canny filter [4] and select random points within a radius from discovered edges. We then run SAM to detect all masks using both the grid and the near-edge points. Once the masks are returned, we establish a hierarchy of submasks by thresholding the number of overlapping pixels between two masks to determine if one is a submask of the other, or if the two masks should be joined. This helps reduce some of the noise introduced by the automatic masking process, and leaves us with a tree-like structure for the masks. Lastly, we remove any masks that are too small. We note that undergoing this process does not result in every detail of each image being selected as a candidate for annotation, and as such instances in the DCI dataset are not expected to have complete submask-aligned coverage of all elements one could recognize in or discuss about an image.", + "bbox": [ + 496, + 337, + 893, + 609 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Collection Process", + "text_level": 1, + "bbox": [ + 500, + 619, + 676, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We use Mephisto [37] to host our task, pay crowdworkers to provide annotations on the dataset, and additionally run qualification steps. Workers that pass our qualifications are eligible to work on the main task which contains 3 stages:", + "bbox": [ + 496, + 643, + 890, + 703 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1. Workers are provided with the whole image, and asked to provide a short description of it. This is considered the standard caption.", + "bbox": [ + 500, + 704, + 890, + 748 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2. Workers are provided with submasks of the image, one at a time starting with the leaves of the mask tree, displaying a SAM-selected region of the image as well as an indicator for where that region comes from. They are generally asked to provide a label and complete description for the pictured region, though are allowed to mark the region as 'uninteresting' and only provide a label, or 'bad' and provide nothing. These options allow us to focus worker time on useful annotations and help capture some of the noise of the automatic selection pro", + "bbox": [ + 500, + 750, + 893, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "26703", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a95679abff74c421fb7258abfea90d8523cd65a8d27b1e1068b9aed4a41dc101.jpg", + "image_caption": [ + "Figure 2. Annotation view for writing description for masks of the image. The masked region appears highlighted for clarity." + ], + "image_footnote": [], + "bbox": [ + 117, + 88, + 851, + 313 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6f66c7649ed1279ac6568d0737850cc9b090d120ab21eb4808d4af70773fa917.jpg", + "image_caption": [ + "Figure 3. Example of a Llama2-generated summary and negative that comprise sDCI. Each image and submask have multiple summarizations and negatives. We also compare the caption quality between DAC [10] and DCI. In contrast to DCI that relies on human annotations, DAC used an automatic pipeline based on LLM for captioning. As we observe in this example, the DAC captions can suffer from hallucinations and miss important elements of the photo. In this work we argue that while improving automatic pipeline is an important research direction, for now the captions proposed are not reliable enough to be used to evaluate models and assess their abilities." + ], + "image_footnote": [], + "bbox": [ + 78, + 359, + 336, + 659 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A black metal water pump in a town square in the middle of a circular stone shape across from several buildings. Black metal water pump in foreground with spigot in front and curved handle with globular end. It is located in the middle of a smaller circle of stones. There is some grass growing in between some of the stones. There is a metal grated drain near the bottom of the pump. To its left is a dark brown metal manhole cover and behind it is a grey stone tiled road. The road leads to three buildings across the street with storefronts on the bottom level and different units with balconies and large windows on the second and third level. There are people out front walking by the lower level. There are benches out front. Human annotated", + "bbox": [ + 348, + 366, + 851, + 460 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A black metal water pump stands in a town square, surrounded by a circle of stones, with buildings and people in the background LLM Summary", + "bbox": [ + 348, + 472, + 830, + 496 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A pump stands metal water black in square town, surrounded by stones circle, with background buildings people in.", + "bbox": [ + 348, + 512, + 859, + 536 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "DAC Quality: a fire hydrant in the middle of a city street \nDAC Dense: 1 A fire hydrant can be seen in the center of a street, its metallic spray emanating water vapor from a nozzle as it shoots water into the air. 2 The spray creates a steady waterfall, making it appear as though the hydrant is spraying water from its nozzle the whole time. 3 The spray is relatively smooth, creating a gradual wave that covers a large portion of a city block. \nDAC SAM: a black plastic drain cover, a building, a water fountain, a bicycle, a water fountain, the image shows a circular stone path with a circular stone base, a water pump, the logo of the company, the image shows a black metal handle with a long handle, home bank, a dragon", + "bbox": [ + 346, + 556, + 857, + 650 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "cess. This is shown in Figure 2. For masks that contain submasks, workers are also provided with overlays that show the regions already annotated, and are asked to annotate in terms of what has already been written.", + "bbox": [ + 94, + 773, + 468, + 833 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3. After completing all the submasks, the worker is then shown the complete image again and asked to provide an overall description, paying attention to the relationship between previously annotated regions.", + "bbox": [ + 76, + 834, + 468, + 895 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "An in-depth description of the filtering and quality assurance process can be found in Appendix 8 while the Datasheet [13] is available in Appendix 12. Complete annotation instructions, dataset download links as well as reproducible code are available on our GitHub1. The DCI dataset is released under the CC-BY-NC license.", + "bbox": [ + 500, + 773, + 890, + 864 + ], + "page_idx": 4 + }, + { + "type": "aside_text", + "text": "DCI (Our)", + "bbox": [ + 870, + 443, + 885, + 484 + ], + "page_idx": 4 + }, + { + "type": "aside_text", + "text": "DAC", + "bbox": [ + 870, + 592, + 885, + 613 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "DatasetImgSCapsToks/CapToks/ImgDCI7,8057,8051,282.091,282.09\\( DCI_{sub} \\)96,00796,007199.33199.33sDCI8,01287,26849.21536.00\\( sDCI_{sub} \\)96,007714,63036.60263.01\\( LN_{COCO} \\)142,845142,84549.1149.11\\( LN_{COCO<77} \\)127,456127,45643.7043.70COCO123,287616,76713.5467.74", + "bbox": [ + 503, + 88, + 890, + 227 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Comparison of DCI dataset statistics to other datasets, focusing on average CLIP tokens per image or caption. Note the $26\\mathbf{x}$ difference between DCI and the previous longest annotated dataset, Localized Narratives (LN). sub denotes including sub-masks and their descriptions as examples, and sDCI refers to the LLM-summarized version of DCI that fits captions to 77 tokens (Sec. 3.3), while $\\mathrm{LN}_{\\mathrm{COCO}} < 77$ simply drops examples longer than 77 tokens ( $\\sim 10.8\\%$ ).", + "bbox": [ + 496, + 238, + 890, + 349 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Evaluating VLMs with summarized DCI", + "text_level": 1, + "bbox": [ + 498, + 377, + 864, + 393 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Methodology", + "text_level": 1, + "bbox": [ + 500, + 404, + 637, + 420 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Using the 7805 images in the summarized Densely Captioned Images (sDCI) dataset, we construct a few different evaluations. As noted above, the ability to select multiple submasks from the same image and include them in the same batch allows us to create a CLIP-style test, wherein the model can evaluate a full batch of images and captions and score correctly which caption belongs to which image. As we provide models with a crop around the selected masks, we call this Subcrop-Caption Matching (SCM), and we use a batch size of 8. We can run against our LLM-generated negatives as well. Given that LLM-summarization has provided us with multiple captions and negatives per image and submask, we supply the first unless noted otherwise. With this in mind, we construct 6 evaluations as follows:", + "bbox": [ + 496, + 428, + 890, + 638 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "[All SCM]: Group each image with their subcrops, alongside one summarized caption per subcrop. Then use the model to find the most likely caption associated to each subcrop. This test measures the ability of the VLM to distinguish between the different parts that compose an image.[2]", + "bbox": [ + 496, + 638, + 890, + 715 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "[All Neg]: Select one LLM summarized caption and the corresponding LLM-generated negative for each image and subcrop. Score a model on its ability to distinguish between the positive and negative.", + "bbox": [ + 496, + 715, + 890, + 775 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "[All Pick5-SCM]: Use the same setup as All SCM, but rather than using only one caption per subcrop, we use 5 LLM generated captions per subcrop. We score a model as succeeding only when the worst-scoring positive caption", + "bbox": [ + 496, + 775, + 890, + 835 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "2Since we used sDCI to fit current models token length, it is possible that some of the summaries remove the information that make possible to distinguish between the captions. Ideally this test should be performed on the non-summaramized version once VLMs can handle 1000+ tokens.", + "bbox": [ + 498, + 849, + 890, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "26705", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/dfa10fb6dc7a8594a7560277c92a125bd8ac8838cfef67224abb22fec7c1c529.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelAllAll Pick5BaseAll
SCMNegSCMNegNegHard Negs
CLIP Baseline [27]40.06%60.79%11.21%24.06%67.56%41.34%
NegCLIP [42]43.35%56.00%13.22%4.82%76.69%50.84%
BLIP [18]39.13%54.02%10.73%5.51%63.41%53.23%
Flava [31]38.08%47.99%8.01%9.82%11.6%45.59%
X-VLM [43]38.45%53.46%10.96%5.10%44.29%52.42%
DAC\\( _{LLM} \\)[10]37.45%81.71%8.13%37.84%90.56%71.21%
DAC\\( _{SAM} \\)[10]37.90%84.17%6.70%39.94%89.66%73.61%
", + "bbox": [ + 184, + 87, + 787, + 233 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. sDCI test result: We compare existing baselines on our Subcrop-Caption Matching (SCM) and negatives tests. Additional results are available in Table 10 in the Appendix. We note our best model fine-tuned on sDCI from section 5 achieved $64.02\\%$ and $31.60\\%$ on a held-out test of All SCM and All SCM Pick5 respectively, setting an upper bound for model performance.", + "bbox": [ + 75, + 243, + 893, + 287 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "scores higher than the best-scoring caption of any other image in the batch. This test evaluates if the representation space is structured such that captions belonging to a specific image are closest to the target image in the space.", + "bbox": [ + 75, + 311, + 467, + 372 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "[All Pick5-Neg]: Use the same setup as All Neg, but rather than using one caption, we use 5 LLM summarized captions for each image and subcrop. If any of these captions score worse than the negative, the model fails the example.", + "bbox": [ + 75, + 372, + 467, + 433 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "[Base Neg]: Using only the 7805 base images without subcrops, evaluate the model's ability to distinguish between an LLM generated caption and its corresponding LLM-generated negative. Note, this is a strict subset of All Neg, though these captions are on the longer side on average and cover a different distribution.", + "bbox": [ + 75, + 433, + 467, + 523 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "[All Hard-Negs]: Using the same setup as All Neg, but rather than using a single negative, use the negative across all LLM-generated negatives that CLIP scores highest.", + "bbox": [ + 75, + 523, + 467, + 569 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Results", + "text_level": 1, + "bbox": [ + 76, + 584, + 171, + 599 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We compare in Table 2 the sDCI performances given by different state-of-the-art models: CLIP [27], NegCLIP [42], BLIP [18], Flava [31] and X-VLM [43]. Additional experiments on different architectures and pretraining datasets are available in Table 10 (see Appendix). The CLIP baseline starts at $40.12\\%$ on All SCM and $60.63\\%$ on All Neg. The only model to improve over CLIP on SCM tasks is NegCLIP, which follows the fact that the hard image negatives that NegCLIP is trained on provide the most similar task to what we test of any of these models. None of the models trained without an explicit CLIP-loss component outperform CLIP on SCM tasks, but DAC ultimately performs the worst.", + "bbox": [ + 75, + 609, + 467, + 805 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Performance on the Pick5 variations of each task follow the trends of the standard performance. Performance on Base Neg for Flava point to a weakness in comparing longer text examples, given the significant drop from $47.99\\%$ to $11.6\\%$ that is not demonstrated in other models.", + "bbox": [ + 75, + 806, + 467, + 882 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Interestingly, models trained absent of CLIP (BLIP,", + "bbox": [ + 96, + 883, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Flava, X-VLM) experience a far less noticeable drop in performance between All Neg and All Hard Negs. This validates that sDCI's CLIP-hard negatives are not simply a higher proportion of 'impossible' negatives, but rather capture some underlying trait about the negatives that CLIP models and their descendants all struggle with.", + "bbox": [ + 496, + 311, + 890, + 402 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "None of the models presented perform well across all of the sDCI test set. Given each of the CLIP-style models have some kind of advantage on this test set due to being trained on some objective that sDCI directly evaluates, we expect that the BLIP, Flava, and X-VLM scores are somewhat representative for existing state-of-the-art models' true performance on this test set.", + "bbox": [ + 496, + 402, + 890, + 508 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5. Using summarized DCI as fine-tuning dataset", + "text_level": 1, + "bbox": [ + 498, + 523, + 890, + 556 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To evaluate the use and difficulty of the sDCI dataset for training, we fine-tune state-of-the-art models with it. In particular, we use a ViT/32B CLIP model in all of our experiments, requiring use of the CLIP-bounded version of our dataset. We split sDCI into 7800 train, 100 validation, 112 test samples for this purpose. We use a training batch size of 32 and a learning rate of $5 \\mathrm{e} - 5$ for all experiments, and run for up to 10 epochs. We train using both standard CLIP loss as well as an additional Negatives loss component, which follows the 'text negative' of NegCLIP [42]. Given the tiny size of our finetuning sets relative to the 400M pretraining images, we use LoRA [14] to reduce the trainable parameters. We train a model with and without negatives loss.", + "bbox": [ + 496, + 566, + 890, + 763 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In order to make good use of the multiple summarized captions we have per image and submask, we randomly select one to be used in each individual epoch. We call this method Pick1. We describe this method and other ablations we attempted in more detail in Appendix 9.", + "bbox": [ + 496, + 763, + 890, + 839 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We follow the experimental setup of DAC [10] by evaluating our sDCI fine-tuned CLIP on the ARO and VL-Checklist benchmarks. We compare to DAC directly as it is the most similar work to ours in attempting to increase", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "26706", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/9c02114c6f81cbc0699ee0bfc9dedf1e6171bdf3c7112dc4448b5941df47955f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelVG-RAROVL-Checklist
VG-ACOCOFLICKRObjectAttributeRelation
sDCIP176.23%67.56%88.58%91.30%80.71%68.69%70.12%
sDCIP1NL057.34%61.98%39.36%44.62%88.37%70.42%61.28%
DACLLM10,00061.53%63.89%46.28±1.5%59.41±1.9%66.90%57.4%56.96%
DACLLM100,00061.0%63.6%48.2%61.42%66.87%57.22%57.18%
DACLLM500,00060.1%63.8%50.2%61.6%66.54%57.39%56.77%
DACLLM3,000,00081.28%73.91%94.47%95.68%87.30%77.27%86.41%
DACSAM3,000,00077.16%70.5%91.22%93.88%88.50%75.83%89.75%
CLIP Baseline [27]59.98%63.18%47.9%60.2%81.17%67.67%61.95%
BLIP2 [19]41.16%71.25%13.57%13.72%84.14%80.12%70.72%
NegCLIP [42]81%71%86%91%81.35%72.24%63.53%
SVLC [11]80.61%73.03%84.73%91.7%85%71.97%68.95%
", + "bbox": [ + 122, + 87, + 848, + 310 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3. sDCI fine-tuned CLIP performance against the ARO and VL-Checklist benchmark. We compare CLIP fine-tuned with sDCI against models fine-tuned using DAC captions. Since the DAC dataset contains 3M images whereas sDCI contains only 7805 images, we performed an ablation of the number of training images used in the DAC dataset. In this instance, $\\mathrm{DAC}_{LLM_{10000}}$ refer to fine-tuning CLIP using only 10,000 images from DAC. We plot the mean across 5 different seeds and display the standard deviation when it is above $1\\%$ accuracy. We observe that training on sDCI lead to significant improvement in comparison to DAC for a comparable number of examples.", + "bbox": [ + 75, + 319, + 890, + 390 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "caption density. As noted in Figure 3, these automatically generated captions are generally noisy. As DAC is using 3M images for fine-tuning, we performed a small ablation on the number of DAC images to use for fine-tuning to be similar to our base image count (10,000 compared to our 8,012), or to our full mask count (100,000 compared to our 99,445).", + "bbox": [ + 75, + 416, + 468, + 506 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.1. Results", + "text_level": 1, + "bbox": [ + 76, + 517, + 171, + 532 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Table 3, we show that, while the DCI Pick1 model trained with negatives loss $(\\mathrm{sDCI}_{P1})$ does not reach the performance of DAC models trained on 3M images, it does improve over the CLIP baseline on most metrics3, and outperforms some baselines trained on more data. $\\mathrm{sDCI}_{P1}$ does however outperform both sample-limited ablations of DAC, suggesting that a small number of highly aligned image to dense text pairs are more effective for training models than larger quantities of more loosely aligned or sparse data. Unsurprisingly, the version trained without negatives loss, $\\mathrm{sDCI}_{P1NL0}$ , does not improve across most benchmarks, and even somewhat degrades when compared to the CLIP baseline.4 Of note however is the significant bump in VL-Object, alongside some improvement to VL-Attribute. Improvements here suggest that the sDCI dataset successfully includes more object, and to a lesser degree attribute, information than the captions in the source dataset for CLIP. It does, however, point to a limitation of using the LLM summarizations and not incorporating mask information, as relational information is sometimes lost.", + "bbox": [ + 75, + 540, + 470, + 843 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion and Future Work", + "text_level": 1, + "bbox": [ + 500, + 414, + 769, + 430 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We introduce the Densely Captioned Images dataset, and display clear use for it as a evaluation benchmark. We also show initial potential for using the dataset for fine-tuning. Given that in order to evaluate today's models on DCI we had to reduce the size of the text to only 77 tokens, DCI should prove to be useful for a longer period of time as models that are able to consume and utilize larger amounts of text context become the norm. We envision that in those cases the full human annotated captions without length reduction would be provided. Today's context size limitation also prevented us from fine-tuning existing models on the highly aligned text-image data within DCI, as existing models don't have enough context size to handle the full text, but the dataset isn't nearly large enough to pre-train a new set of models that could use the full text. It could be relevant to treat developing highly aligned text-image datasets in a similar manner to that used in machine translation for low-resource languages, which run into a similar issue with cost and difficulty to collect. This area of work has relied on automated methods such as bitext mining [33] to bootstrap up from an initial set of expertly collected examples, which DCI may already provide the foundation for. Further, we haven't attempted to incorporate the pixel-level masks that the dataset has in any of our experiments, instead opting to use crops around the masks to retain parity with our test set. This dataset is unique for both the extreme density and high degree of alignment present, and in this introductory work we've only scratched the surface of using this information to its fullest extent.", + "bbox": [ + 496, + 484, + 890, + 888 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "3The decreased performance on VL-Object may be explained by our LLM-generated negatives not closely covering the test set negatives. 4The degradation is likely due to the distribution shift and small sample size, given the training objective is the same as CLIP.", + "bbox": [ + 75, + 849, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "26707", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Amro Kamal Mohamed Abbas, Kushal Tirumala, Daniel Simig, Surya Ganguli, and Ari S. Morcos. Semdedup: Data-efficient learning at web-scale through semantic dedduplication. In ICLR 2023 Workshop on Mathematical and Empirical Understanding of Foundation Models, 2023. 1", + "[2] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katie Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andrew Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning, 2022. 3", + "[3] Florian Bordes, Shashank Shekhar, Mark Ibrahim, Diane Bouchacourt, Pascal Vincent, and Ari S. Morcos. Pug: Photorealistic and semantically controllable synthetic data for representation learning. In Advances in Neural Information Processing Systems, 2023. 1, 3", + "[4] John Canny. A computational approach to edge detection. IEEE Transactions on Pattern Analysis and Machine Intelligence, PAMI-8(6):679-698, 1986. 4", + "[5] Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments, 2021. 6", + "[6] Ilias Chalkidis, Xiang Dai, Manos Fergadiotis, Prodromos Malakasiotis, and Desmond Elliott. An exploration of hierarchical attention transformers for efficient long document classification, 2022. 6", + "[7] Qian Chen, Zhen-Hua Ling, and Xiaodan Zhu. Enhancing sentence embedding with generalized pooling, 2018. 6", + "[8] Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Dollar, and C. Lawrence Zitnick. Microsoft coco captions: Data collection and evaluation server, 2015. 3, 6", + "[9] Karan Desai, Gaurav Kaul, Zubin Aysola, and Justin Johnson. Redcaps: web-curated image-text data created by the people, for the people, 2021. 3", + "[10] Sivan Doveh, Assaf Arbelle, Sivan Harary, Roei Herzig, Donghyun Kim, Paola Cascante-bonilla, Amit Alfassy, Rameswar Panda, Raja Giryes, Rogerio Feris, Shimon Ullman, and Leonid Karlinsky. Dense and aligned captions (dac) promote compositional reasoning in v1 models, 2023. 1, 3, 4, 5, 6, 7", + "[11] Sivan Doveh, Assaf Arbelle, Sivan Harary, Rameswar Panda, Roei Herzig, Eli Schwartz, Donghyun Kim, Raja Giryes, Rogerio Feris, Shimon Ullman, and Leonid Karlinsky. Teaching structured vision&language concepts to vision&language models, 2023. 8", + "[12] Enrico Fini, Pietro Astolfi, Adriana Romero-Soriano, Jakob Verbeek, and Michal Drozdzal. Improved baselines for vision-language pre-training. Transactions on Machine Learning Research (TMLR), 2023. 4" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Timnit Gebru, Jamie Morgenstern, Briana Vecchione, Jennifer Wortman Vaughan, Hanna Wallach, Hal Daumé III au2, and Kate Crawford. Datasheets for datasets, 2021. 5, 13", + "[14] Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models, 2021. 7", + "[15] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything, 2023. 4, 13", + "[16] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael S. Bernstein, and Fei-Fei Li. Visual genome: Connecting language and vision using crowdsourced dense image annotations, 2016. 3", + "[17] Chunyuan Li, Haotian Liu, Liunian Harold Li, Pengchuan Zhang, Jyoti Aneja, Jianwei Yang, Ping Jin, Houdong Hu, Zicheng Liu, Yong Jae Lee, and Jianfeng Gao. Elevater: A benchmark and toolkit for evaluating language-augmented visual models, 2022. 5", + "[18] Junnan Li, Dongxu Li, Caiming Xiong, and Steven C. H. Hoi. BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. CoRR, abs/2201.12086, 2022. 4, 7", + "[19] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models, 2023. 3, 4, 8", + "[20] Xianhang Li, Zeyu Wang, and Cihang Xie. An inverse scaling law for clip training, 2023. 1", + "[21] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking, 2023. 1", + "[22] Zhiqiu Lin, Xinyue Chen, Deepak Pathak, Pengchuan Zhang, and Deva Ramanan. Visualgptscore: Visio-linguistic reasoning with multimodal generative pre-training scores, 2023. 1, 4", + "[23] Zixian Ma, Jerry Hong, Mustafa Omer Gul, Mona Gandhi, Irena Gao, and Ranjay Krishna. Crepe: Can vision-language foundation models reason compositionally?, 2023. 3", + "[24] Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2011. 3", + "[25] Jordi Pont-Tuset, Jasper Uijlings, Soravit Changpinyo, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with localized narratives, 2020. 3, 6", + "[26] Filip Radenovic, Abhimanyu Dubey, Abhishek Kadian, Todor Mihaylov, Simon Vandenhende, Yash Patel, Yi Wen, Vignesh Ramanathan, and Dhruv Mahajan. Filtering, distillation, and hard negatives for vision-language pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6967-6977, 2023. 1, 3", + "[27] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry," + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "26708", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. CoRR, abs/2103.00020, 2021. 3, 4, 7, 8", + "[28] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs, 2021. 1", + "[29] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. Laion-5b: An open large-scale dataset for training next generation image-text models, 2022. 3", + "[30] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, Melbourne, Australia, 2018. Association for Computational Linguistics. 1, 3", + "[31] Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. FLAVA: A foundational language and vision alignment model. In CVPR, 2022. 4, 7", + "[32] Krishna Srinivasan, Karthik Raman, Jiecao Chen, Mike Bendersky, and Marc Najork. Wit: Wikipedia-based image text dataset for multimodal multilingual machine learning. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '21), 2021. 3", + "[33] NLLB Team, Marta R. Costa-jussà, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia González, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. No language left behind: Scaling human-centered machine translation, 2022. 8", + "[34] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. YFCC100m. Communications of the ACM, 59(2): 64-73, 2016. 1, 3", + "[35] Tristan Thrush, Ryan Jiang, Max Bartolo, Amanpreet Singh, Adina Williams, Douwe Kiela, and Candace Ross. Winoground: Probing vision and language models for visio-linguistic compositionality, 2022. 1, 3", + "[36] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer," + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and finetuned chat models, 2023. 6", + "[37] Jack Urbanek and Pratik Ringshia. Mephisto: A framework for portable, reproducible, and iterative crowdsourcing, 2023. 4", + "[38] Hu Xu, Saining Xie, Po-Yao Huang, Licheng Yu, Russell Howes, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Cit: Curation in training for effective vision-language data. arXiv preprint arXiv:2301.02241, 2023. 1", + "[39] Hu Xu, Saining Xie, Xiaqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data, 2023. 3", + "[40] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2:67-78, 2014. 3", + "[41] Lili Yu, Bowen Shi, Ramakanth Pasunuru, Benjamin Muller, Olga Golovneva, Tianlu Wang, Arun Babu, Binh Tang, Brian Karrer, Shelly Sheynin, Candace Ross, Adam Polyak, Russell Howes, Vasu Sharma, Puxin Xu, Hovhannes Tamoyan, Oron Ashual, Uriel Singer, Shang-Wen Li, Susan Zhang, Richard James, Gargi Ghosh, Yaniv Taigman, Maryam Fazel-Zarandi, Asli Celikyilmaz, Luke Zettlemoyer, and Armen Aghajanyan. Scaling autoregressive multi-modal models: Pretraining and instruction tuning, 2023. 3", + "[42] Mert Yuksekgonul, Federico Bianchi, Pratyusha Kalluri, Dan Jurafsky, and James Zou. When and why vision-language models behave like bags-of-words, and what to do about it? In International Conference on Learning Representations, 2023. 1, 3, 7, 8, 4", + "[43] Yan Zeng, Xinsong Zhang, and Hang Li. Multi-grained vision language pre-training: Aligning texts with visual concepts. arXiv preprint arXiv:2111.08276, 2021. 4, 7", + "[44] Hang Zhang, Yeyun Gong, Yelong Shen, Weisheng Li, Jiancheng Lv, Nan Duan, and Weizhu Chen. Poolingformer: Long document modeling with pooling attention, 2022. 6", + "[45] Tiancheng Zhao, Tianqi Zhang, Mingwei Zhu, Haozhan Shen, Kyusong Lee, Xiaopeng Lu, and Jianwei Yin. Vl-checklist: Evaluating pre-trained vision-language models with objects, attributes and relations, 2023. 1, 3" + ], + "bbox": [ + 501, + 90, + 890, + 891 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "26709", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/f5f3c319-3887-4d17-8f43-fce0198c0c77_model.json b/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/f5f3c319-3887-4d17-8f43-fce0198c0c77_model.json new file mode 100644 index 0000000000000000000000000000000000000000..18c7e0d2e69cb983c0acdf50f7108671f667899b --- /dev/null +++ b/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/f5f3c319-3887-4d17-8f43-fce0198c0c77_model.json @@ -0,0 +1,1749 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.234, + 0.131, + 0.74, + 0.177 + ], + "angle": 0, + "content": "A Picture is Worth More Than 77 Text Tokens: Evaluating CLIP-Style Models on Dense Captions" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.202, + 0.882, + 0.24 + ], + "angle": 0, + "content": "Jack Urbanek*† Florian Bordes1,2,3† Pietro Astolfi1 Mary Williamson1 Vasu Sharma1 Adriana Romero-Soriano1,3,4,5" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.255, + 0.852, + 0.275 + ], + "angle": 0, + "content": "\\(^{1}\\)FAIR, Meta \\(^{2}\\)Mila \\(^{3}\\)Universite de Montreal, \\(^{4}\\)McGill University \\(^{5}\\)Canada CIFAR AI chair" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.309, + 0.314, + 0.327 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.342, + 0.473, + 0.691 + ], + "angle": 0, + "content": "Curation methods for massive vision-language datasets trade off between dataset size and quality. However, even the highest quality of available curated captions are far too short to capture the rich visual detail in an image. To show the value of dense and highly-aligned image-text pairs, we collect the Densely Captioned Images (DCI) dataset, containing 7805 natural images human-annotated with mask-aligned descriptions averaging above 1000 words each. With precise and reliable captions associated with specific parts of an image, we can evaluate vision-language models' (VLMs) understanding of image content with a novel task that matches each caption with its corresponding subcrop. As current models are often limited to 77 text tokens, we also introduce a summarized version (sDCI) in which each caption length is limited. We show that modern techniques that make progress on standard benchmarks do not correspond with significant improvement on our sDCI based benchmark. Lastly, we finetune CLIP using sDCI and show significant improvements over the baseline despite a small training set. By releasing the first human annotated dense image captioning dataset, we hope to enable the development of new benchmarks or fine-tuning recipes for the next generation of VLMs to come." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.718, + 0.21, + 0.735 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.744, + 0.47, + 0.866 + ], + "angle": 0, + "content": "State-of-the-art vision-language models (VLMs) are often trained on large scale datasets such as LAION-400M [28], YFCC100M [34], or other undisclosed datasets crawled from the web. These datasets are formed by collecting images from the web and using alt-text (or other local text on the webpage) to create loose image-text pairs. These can then be filtered down trading off on quantity for quality [26, 30]. Still, recent work has demonstrated that throwing these" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.311, + 0.892, + 0.492 + ], + "angle": 0, + "content": "loose captions out entirely in favor of generated captions, with enhanced quality and density, can produce improved results [10]. Other works [1, 20, 21, 38] have demonstrated that it is possible to get CLIP-level performance using a vastly reduced compute, often by throwing away portions of the data resulting in more balance between image and text modalities. However, those approaches rely on automatic pipelines which do not generate reliable and long captions that can capture rich visual details in an image. From this it appears no existing dataset has high-quality image descriptions that are tightly-coupled enough with the image to train for or evaluate a deep alignment between the two domains." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.493, + 0.893, + 0.794 + ], + "angle": 0, + "content": "In the absence of high quality captions to evaluate VLMs, benchmarks such as ARO [42] and VL-Checklist [45] often complement image-caption pairs with hard negatives that are generated by slightly altering the initial (positive) description. Progress on these benchmarks has been rooted in training VLMs with negatives of similar construction to the tests [42] rendering the methodologies ineffective on datasets such as Winoground [35]. Recent works [22] have called the evaluation capacity of many of these benchmarks into question, given how effective language-prior-based methods perform. More specifically, given the unlikelihood of the hard negative captions in these benchmarks, a good text encoder can achieve close to \\(100\\%\\) accuracy without looking at the images. Moreover, Bordes et al. [3] have shown that most improvements observed on ARO or VL-Checklist do not translate on simple synthetic benchmarks for which the negative caption is as likely as the positive one. Since the use of VLMs is significantly increasing, it is crucial to make sure that we have a diverse suite of reliable benchmarks to assess their abilities." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.895, + 0.903 + ], + "angle": 0, + "content": "In this paper, we introduce the Densely Captioned Images dataset, a collection of 7805 images with dense and mask-aligned descriptions averaging above 1000 words each. One such example is provided in Figure 1, displaying just a subset of the collected text paired with their aligned masks. We demonstrate how to leverage this dataset to evaluate VLMs in two ways after summarizing captions" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.875, + 0.245, + 0.887 + ], + "angle": 0, + "content": "* Work done while at Meta" + }, + { + "type": "page_footnote", + "bbox": [ + 0.097, + 0.888, + 0.21, + 0.9 + ], + "angle": 0, + "content": "Equal contribution" + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.875, + 0.245, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "26700" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.137, + 0.101, + 0.849, + 0.74 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.146, + 0.761, + 0.822, + 0.777 + ], + "angle": 0, + "content": "Figure 1. One example from the Densely Captioned Images dataset. Only part of the submask hierarchy is shown." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.802, + 0.473, + 0.896 + ], + "angle": 0, + "content": "to fit into CLIP's 77 token limit, both with a negatives-based test as well as a novel matching task, referred to as subcrop-caption matching, that requires selecting appropriate captions for different regions of the same image. We evaluate existing baselines, and observe that no models perform well at both concurrently, and improved" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.802, + 0.895, + 0.896 + ], + "angle": 0, + "content": "performance via negatives-based training comes at the cost of decreased performance on subcrop-caption matching. We also run some experiments using the summarized DCI as a fine-tuning dataset to evaluate the effectiveness of these captions for improving a model's performance on other benchmarks, and compare the efficiency per-example" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.958 + ], + "angle": 0, + "content": "26701" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.078, + 0.092, + 0.455, + 0.121 + ], + "angle": 0, + "content": "to that from the automated annotation setup in DAC [10]. To summarize, our contributions are:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.124, + 0.468, + 0.182 + ], + "angle": 0, + "content": "- We release the Densely Captioned Images (DCI) dataset, which contains dense and mask-aligned captions, alongside an LLM-summarized version (sDCI) containing captions under 77 tokens for use with current VLMs." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.185, + 0.468, + 0.258 + ], + "angle": 0, + "content": "- We provide a new benchmark for VLMs based on sDCI to evaluate fine-grained vision-language understanding, and show that no existing model can perform well at matching captions from within one image to corresponding subsections of that image." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.26, + 0.468, + 0.348 + ], + "angle": 0, + "content": "- We show that fine-tuning with high quality image-caption pairs is as good on ARO and VL-Checklist as fine-tuning on at least \\(10 \\times\\) the automatically annotated data, and that even without utilizing explicit negatives these pairs can improve performance on VL-C-Object from \\(81.17\\%\\) to \\(88.37\\%\\)." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.124, + 0.468, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.367, + 0.226, + 0.383 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.394, + 0.468, + 0.514 + ], + "angle": 0, + "content": "The massive, loosely-labeled dataset approach that has enabled VLMs like CLIP [27] and powerful successors like BLIP2 [19], Flamingo [2], CM3leon [41], and many others, has been a clear forward step in vision-language modeling. Still recent benchmarks show that models trained in this manner display clear drawbacks in reasoning skills. Additional techniques have been proposed and adopted recently to close this gap, discussed below." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.539, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Vision-Language Datasets. Over the last decade, there have been significant dataset collection efforts connecting images and text. Earlier works focused on curating datasets by leveraging human annotations, see e.g., COCO [8], Visual Genome [16], and Flickr30k [40]. The process resulted in high quality annotations, which were however oftentimes limited by the caption content - i.e., relatively short phrases (5.1 to 10.3 words on average) grounded at image level or region level - and the data annotation scale (30k to 130k images). To increase scale, researchers gathered web-crawled data and introduced large scale datasets such as YFCC100M [34], which contains 100M media objects. Yet, crawling the web oftentimes results in little correspondence between image and text pairs. To reduce noise between image and text pairs, efforts such as SBU [24] queried Flickr and filtered the noisy results, obtaining a \\(\\sim 1\\mathrm{M}\\) images. Moreover, Conceptual Captions (CC) [30] crawled a dataset of \\(\\sim 12\\mathrm{M}\\) images and alt-text pairs, and included a protocol to filter noisy text-image pairs, resulting in 3M data points. Relaxing the filtering protocol allows to trade data quality for scale. Crawling alt-text also resulted in relatively short text descriptions with 10.3 words on average, which are most often grounded at image level. Localized Narratives [25] was introduced" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.379 + ], + "angle": 0, + "content": "as a dense visual grounding dataset leveraging a multimodal annotation procedure, collecting \\(\\sim 850\\mathrm{k}\\) text-image pairs with 36.5 words/caption on average. **RedCaps** [9] constituted another effort yielding large scale (\\(\\sim 12\\mathrm{M}\\)) web-curated data by exploring alternate data sources of high quality data instead of devising complex filtering strategies. Wikipedia-based image-text dataset (WIT) [32] extended dataset creation efforts by gathering a multilingual dataset of text-image-pairs consisting of \\(\\sim 11.5\\mathrm{M}\\) images. LAION-5B [29] further increased the web-crawling efforts by gathering a multilingual dataset of text-image pairs, and filtered the collected data with a pre-trained CLIP [27] model. Following, LAION-CAT [26] reduced noisy examples from LAION-5B by filtering for caption complexity, i.e., captions that do not contain any action, and for text spotting, i.e., images that contain rendered text. **MetaCLIP** [39] has also been released as an open dataset for reproducing CLIP. These very large scale datasets have been successfully used to advance the state-of-the-art of VLMs." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.403, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Vision-Language Evaluation Benchmarks. Several recent advances in visual-language learning have focused on creating comprehensive benchmarks to evaluate model performance in more holistic ways. These benchmarks are instrumental in pushing the envelope of what VLM can understand and process, ensuring they move beyond superficial image-text matching towards genuine understanding of intricate relationships between visual and linguistic elements. In particular, VL-CheckList [45] and ARO [42] assess the VLM capabilities beyond average downstream task accuracy, by focusing on a model's ability to understand objects, attributes, order or relations. ARO's extensive scope, uncovers limitations in VLMs such as poor relational understanding and lack of order sensitivity. Winoground [35] tests models for visio-linguistic compositional reasoning by asking VLM to match two images with two captions containing the same set of words but in different orders. This task requires models to discern the meaning conveyed by the order of words, reflecting different visual scenes. Current VLMs perform only marginally better than chance, highlighting a significant gap in compositional reasoning. CREPE (Compositional REpresentation Evaluation) [23] evaluates two aspects of compositionality: systematicity and productivity. Systematicity is measured by the model's ability to represent seen versus unseen atoms and their compositions, while productivity gauges the model's capacity to understand an unbounded set of increasingly complex expressions. Finally, PUG (Photorealistic Unreal Graphics) [3] uses synthetic data to assess the compositional reasoning abilities of VLMs by progressively increasing the complexity of a given generated scene. One issue with these evaluation datasets is their frequent reliance on COCO, either directly as in ARO, or through Visual Genome as in" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "26702" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.121 + ], + "angle": 0, + "content": "VL-Checklist or CREPE. It is difficult to find an evaluation dataset of sufficient scale without COCO." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.139, + 0.473, + 0.669 + ], + "angle": 0, + "content": "Vision-Language models. Recent VLM advancements have built upon the foundational work of CLIP [27], which leveraged large-scale image-text pairs to jointly pre-train an image encoder and a text encoder to predict which images are paired with which texts in a contrastive learning paradigm. NegCLIP build upon CLIP by leveraging negative captions when training. BLIP (Bootstrapping Language-Image Pre-training) [18] uses a new framework that bootstraps the captions from noisy web data for both understanding and generation tasks. Its successor BLIP-2 [19] further streamlines the process by utilizing off-the-shelf frozen pre-trained image encoders and language models, bridging the modality gap with a lightweight querying mechanism. Clip-rocket [12] improves VLM baselines by showing that applying image and text augmentations makes up for most of the improvement attained by prior VLMs. Flava [31] proposes a foundation VLM model by combining existing VLMs objectives together with auxiliary in-modality losses for the text and vision encoders. X-VLM [43] achieves success with a pretraining method matching sub- portions of the text to regions of the image at multiple granularities. These models introduces improvements over CLIP, focusing on efficiency, adaptability, and reducing the need for extensive labeled datasets, thereby pushing the boundaries of vision-language pre-training. The closest work to our approach is DAC (Densely Aligned Captions) [10], which improves with an automated LLM based pipeline the caption quality and density. By showing that DAC-enhanced CLIP models exhibit substantial gains on some benchmarks, this work underscores the critical role that caption quality and density play in the efficacy of VLMs. We build on this insight and explore how to further increase the caption quality and density by relying on human annotators, and analyze how that impacts downstream model performance." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.68, + 0.283, + 0.696 + ], + "angle": 0, + "content": "3. Dataset Construction" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.705, + 0.473, + 0.901 + ], + "angle": 0, + "content": "The Densely Captioned Images dataset, or DCI, consists of 7805 images from SA-1B [15], each with a complete description aiming to capture the full visual detail of what is present in the image. Much of the description is directly aligned to submasks of the image. An example is shown in Figure 1. In the top left we see the full image of a water pump, with an associated description. The italicized section is collected as a standard caption, aiming to summarize the full image in about a sentence, similar to existing caption datasets. The remainder of that first description contains details about the relationship between visible entities in the image, as well as in-depth descriptions of regions that are not described as part of the submasks. All other" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.184 + ], + "angle": 0, + "content": "text describing the image is associated with submasks of the image. Each submask has its own free-text label (not pictured) and description, and may also contain further submasks. Here for instance we see submasks for windows and balconies as being contained in the submask capturing three buildings in the background." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.193, + 0.63, + 0.21 + ], + "angle": 0, + "content": "3.1. Preparation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.217, + 0.895, + 0.337 + ], + "angle": 0, + "content": "In order to collect the data, we first select images from a random privacy-mitigated subset of SA-1B. We then procedurally extract subregions of each image to annotate, as we found in initial trials that crowdsourcing both regions and descriptions concurrently overcomplicated the task and successful annotation rate. For this process, we turn to the Segment Anything Model (SAM) [15] and adapt their standard method to extract all masks from an image." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.338, + 0.895, + 0.61 + ], + "angle": 0, + "content": "For the extraction process, SAM usually relies on a grid of points across the entire image. In order to increase the possibility of selecting interesting regions worth annotating, we additionally apply a canny filter [4] and select random points within a radius from discovered edges. We then run SAM to detect all masks using both the grid and the near-edge points. Once the masks are returned, we establish a hierarchy of submasks by thresholding the number of overlapping pixels between two masks to determine if one is a submask of the other, or if the two masks should be joined. This helps reduce some of the noise introduced by the automatic masking process, and leaves us with a tree-like structure for the masks. Lastly, we remove any masks that are too small. We note that undergoing this process does not result in every detail of each image being selected as a candidate for annotation, and as such instances in the DCI dataset are not expected to have complete submask-aligned coverage of all elements one could recognize in or discuss about an image." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.62, + 0.677, + 0.635 + ], + "angle": 0, + "content": "3.2. Collection Process" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.704 + ], + "angle": 0, + "content": "We use Mephisto [37] to host our task, pay crowdworkers to provide annotations on the dataset, and additionally run qualification steps. Workers that pass our qualifications are eligible to work on the main task which contains 3 stages:" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.705, + 0.892, + 0.749 + ], + "angle": 0, + "content": "1. Workers are provided with the whole image, and asked to provide a short description of it. This is considered the standard caption." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.75, + 0.894, + 0.901 + ], + "angle": 0, + "content": "2. Workers are provided with submasks of the image, one at a time starting with the leaves of the mask tree, displaying a SAM-selected region of the image as well as an indicator for where that region comes from. They are generally asked to provide a label and complete description for the pictured region, though are allowed to mark the region as 'uninteresting' and only provide a label, or 'bad' and provide nothing. These options allow us to focus worker time on useful annotations and help capture some of the noise of the automatic selection pro" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "26703" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.089, + 0.852, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.325, + 0.854, + 0.34 + ], + "angle": 0, + "content": "Figure 2. Annotation view for writing description for masks of the image. The masked region appears highlighted for clarity." + }, + { + "type": "image", + "bbox": [ + 0.079, + 0.36, + 0.337, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.349, + 0.367, + 0.852, + 0.462 + ], + "angle": 0, + "content": "A black metal water pump in a town square in the middle of a circular stone shape across from several buildings. Black metal water pump in foreground with spigot in front and curved handle with globular end. It is located in the middle of a smaller circle of stones. There is some grass growing in between some of the stones. There is a metal grated drain near the bottom of the pump. To its left is a dark brown metal manhole cover and behind it is a grey stone tiled road. The road leads to three buildings across the street with storefronts on the bottom level and different units with balconies and large windows on the second and third level. There are people out front walking by the lower level. There are benches out front. Human annotated" + }, + { + "type": "text", + "bbox": [ + 0.349, + 0.473, + 0.831, + 0.497 + ], + "angle": 0, + "content": "A black metal water pump stands in a town square, surrounded by a circle of stones, with buildings and people in the background LLM Summary" + }, + { + "type": "text", + "bbox": [ + 0.349, + 0.513, + 0.861, + 0.537 + ], + "angle": 0, + "content": "A pump stands metal water black in square town, surrounded by stones circle, with background buildings people in." + }, + { + "type": "text", + "bbox": [ + 0.348, + 0.557, + 0.858, + 0.651 + ], + "angle": 0, + "content": "DAC Quality: a fire hydrant in the middle of a city street \nDAC Dense: 1 A fire hydrant can be seen in the center of a street, its metallic spray emanating water vapor from a nozzle as it shoots water into the air. 2 The spray creates a steady waterfall, making it appear as though the hydrant is spraying water from its nozzle the whole time. 3 The spray is relatively smooth, creating a gradual wave that covers a large portion of a city block. \nDAC SAM: a black plastic drain cover, a building, a water fountain, a bicycle, a water fountain, the image shows a circular stone path with a circular stone base, a water pump, the logo of the company, the image shows a black metal handle with a long handle, home bank, a dragon" + }, + { + "type": "aside_text", + "bbox": [ + 0.872, + 0.444, + 0.887, + 0.486 + ], + "angle": 90, + "content": "DCI (Our)" + }, + { + "type": "aside_text", + "bbox": [ + 0.872, + 0.593, + 0.886, + 0.614 + ], + "angle": 90, + "content": "DAC" + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.678, + 0.892, + 0.749 + ], + "angle": 0, + "content": "Figure 3. Example of a Llama2-generated summary and negative that comprise sDCI. Each image and submask have multiple summarizations and negatives. We also compare the caption quality between DAC [10] and DCI. In contrast to DCI that relies on human annotations, DAC used an automatic pipeline based on LLM for captioning. As we observe in this example, the DAC captions can suffer from hallucinations and miss important elements of the photo. In this work we argue that while improving automatic pipeline is an important research direction, for now the captions proposed are not reliable enough to be used to evaluate models and assess their abilities." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.775, + 0.469, + 0.834 + ], + "angle": 0, + "content": "cess. This is shown in Figure 2. For masks that contain submasks, workers are also provided with overlays that show the regions already annotated, and are asked to annotate in terms of what has already been written." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.835, + 0.47, + 0.896 + ], + "angle": 0, + "content": "3. After completing all the submasks, the worker is then shown the complete image again and asked to provide an overall description, paying attention to the relationship between previously annotated regions." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.775, + 0.892, + 0.865 + ], + "angle": 0, + "content": "An in-depth description of the filtering and quality assurance process can be found in Appendix 8 while the Datasheet [13] is available in Appendix 12. Complete annotation instructions, dataset download links as well as reproducible code are available on our GitHub1. The DCI dataset is released under the CC-BY-NC license." + }, + { + "type": "page_footnote", + "bbox": [ + 0.518, + 0.888, + 0.833, + 0.9 + ], + "angle": 0, + "content": "DatasetImgSCapsToks/CapToks/ImgDCI7,8057,8051,282.091,282.09\\( DCI_{sub} \\)96,00796,007199.33199.33sDCI8,01287,26849.21536.00\\( sDCI_{sub} \\)96,007714,63036.60263.01\\( LN_{COCO} \\)142,845142,84549.1149.11\\( LN_{COCO<77} \\)127,456127,45643.7043.70COCO123,287616,76713.5467.74" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.239, + 0.892, + 0.35 + ], + "angle": 0, + "content": "Table 1. Comparison of DCI dataset statistics to other datasets, focusing on average CLIP tokens per image or caption. Note the \\(26\\mathbf{x}\\) difference between DCI and the previous longest annotated dataset, Localized Narratives (LN). sub denotes including sub-masks and their descriptions as examples, and sDCI refers to the LLM-summarized version of DCI that fits captions to 77 tokens (Sec. 3.3), while \\(\\mathrm{LN}_{\\mathrm{COCO}} < 77\\) simply drops examples longer than 77 tokens (\\(\\sim 10.8\\%\\))." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.378, + 0.866, + 0.395 + ], + "angle": 0, + "content": "4. Evaluating VLMs with summarized DCI" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.405, + 0.638, + 0.421 + ], + "angle": 0, + "content": "4.1. Methodology" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.429, + 0.892, + 0.64 + ], + "angle": 0, + "content": "Using the 7805 images in the summarized Densely Captioned Images (sDCI) dataset, we construct a few different evaluations. As noted above, the ability to select multiple submasks from the same image and include them in the same batch allows us to create a CLIP-style test, wherein the model can evaluate a full batch of images and captions and score correctly which caption belongs to which image. As we provide models with a crop around the selected masks, we call this Subcrop-Caption Matching (SCM), and we use a batch size of 8. We can run against our LLM-generated negatives as well. Given that LLM-summarization has provided us with multiple captions and negatives per image and submask, we supply the first unless noted otherwise. With this in mind, we construct 6 evaluations as follows:" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.64, + 0.892, + 0.716 + ], + "angle": 0, + "content": "[All SCM]: Group each image with their subcrops, alongside one summarized caption per subcrop. Then use the model to find the most likely caption associated to each subcrop. This test measures the ability of the VLM to distinguish between the different parts that compose an image.[2]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.716, + 0.892, + 0.776 + ], + "angle": 0, + "content": "[All Neg]: Select one LLM summarized caption and the corresponding LLM-generated negative for each image and subcrop. Score a model on its ability to distinguish between the positive and negative." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.776, + 0.892, + 0.837 + ], + "angle": 0, + "content": "[All Pick5-SCM]: Use the same setup as All SCM, but rather than using only one caption per subcrop, we use 5 LLM generated captions per subcrop. We score a model as succeeding only when the worst-scoring positive caption" + }, + { + "type": "page_footnote", + "bbox": [ + 0.499, + 0.851, + 0.892, + 0.9 + ], + "angle": 0, + "content": "2Since we used sDCI to fit current models token length, it is possible that some of the summaries remove the information that make possible to distinguish between the captions. Ideally this test should be performed on the non-summaramized version once VLMs can handle 1000+ tokens." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "26705" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.185, + 0.088, + 0.788, + 0.234 + ], + "angle": 0, + "content": "
ModelAllAll Pick5BaseAll
SCMNegSCMNegNegHard Negs
CLIP Baseline [27]40.06%60.79%11.21%24.06%67.56%41.34%
NegCLIP [42]43.35%56.00%13.22%4.82%76.69%50.84%
BLIP [18]39.13%54.02%10.73%5.51%63.41%53.23%
Flava [31]38.08%47.99%8.01%9.82%11.6%45.59%
X-VLM [43]38.45%53.46%10.96%5.10%44.29%52.42%
DAC\\( _{LLM} \\)[10]37.45%81.71%8.13%37.84%90.56%71.21%
DAC\\( _{SAM} \\)[10]37.90%84.17%6.70%39.94%89.66%73.61%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.244, + 0.895, + 0.288 + ], + "angle": 0, + "content": "Table 2. sDCI test result: We compare existing baselines on our Subcrop-Caption Matching (SCM) and negatives tests. Additional results are available in Table 10 in the Appendix. We note our best model fine-tuned on sDCI from section 5 achieved \\(64.02\\%\\) and \\(31.60\\%\\) on a held-out test of All SCM and All SCM Pick5 respectively, setting an upper bound for model performance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.313, + 0.468, + 0.373 + ], + "angle": 0, + "content": "scores higher than the best-scoring caption of any other image in the batch. This test evaluates if the representation space is structured such that captions belonging to a specific image are closest to the target image in the space." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.373, + 0.468, + 0.434 + ], + "angle": 0, + "content": "[All Pick5-Neg]: Use the same setup as All Neg, but rather than using one caption, we use 5 LLM summarized captions for each image and subcrop. If any of these captions score worse than the negative, the model fails the example." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.434, + 0.468, + 0.524 + ], + "angle": 0, + "content": "[Base Neg]: Using only the 7805 base images without subcrops, evaluate the model's ability to distinguish between an LLM generated caption and its corresponding LLM-generated negative. Note, this is a strict subset of All Neg, though these captions are on the longer side on average and cover a different distribution." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.524, + 0.468, + 0.57 + ], + "angle": 0, + "content": "[All Hard-Negs]: Using the same setup as All Neg, but rather than using a single negative, use the negative across all LLM-generated negatives that CLIP scores highest." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.585, + 0.172, + 0.6 + ], + "angle": 0, + "content": "4.2. Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.61, + 0.468, + 0.806 + ], + "angle": 0, + "content": "We compare in Table 2 the sDCI performances given by different state-of-the-art models: CLIP [27], NegCLIP [42], BLIP [18], Flava [31] and X-VLM [43]. Additional experiments on different architectures and pretraining datasets are available in Table 10 (see Appendix). The CLIP baseline starts at \\(40.12\\%\\) on All SCM and \\(60.63\\%\\) on All Neg. The only model to improve over CLIP on SCM tasks is NegCLIP, which follows the fact that the hard image negatives that NegCLIP is trained on provide the most similar task to what we test of any of these models. None of the models trained without an explicit CLIP-loss component outperform CLIP on SCM tasks, but DAC ultimately performs the worst." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.808, + 0.468, + 0.883 + ], + "angle": 0, + "content": "Performance on the Pick5 variations of each task follow the trends of the standard performance. Performance on Base Neg for Flava point to a weakness in comparing longer text examples, given the significant drop from \\(47.99\\%\\) to \\(11.6\\%\\) that is not demonstrated in other models." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.885, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Interestingly, models trained absent of CLIP (BLIP," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.313, + 0.892, + 0.403 + ], + "angle": 0, + "content": "Flava, X-VLM) experience a far less noticeable drop in performance between All Neg and All Hard Negs. This validates that sDCI's CLIP-hard negatives are not simply a higher proportion of 'impossible' negatives, but rather capture some underlying trait about the negatives that CLIP models and their descendants all struggle with." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.404, + 0.892, + 0.509 + ], + "angle": 0, + "content": "None of the models presented perform well across all of the sDCI test set. Given each of the CLIP-style models have some kind of advantage on this test set due to being trained on some objective that sDCI directly evaluates, we expect that the BLIP, Flava, and X-VLM scores are somewhat representative for existing state-of-the-art models' true performance on this test set." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.524, + 0.892, + 0.558 + ], + "angle": 0, + "content": "5. Using summarized DCI as fine-tuning dataset" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.568, + 0.892, + 0.764 + ], + "angle": 0, + "content": "To evaluate the use and difficulty of the sDCI dataset for training, we fine-tune state-of-the-art models with it. In particular, we use a ViT/32B CLIP model in all of our experiments, requiring use of the CLIP-bounded version of our dataset. We split sDCI into 7800 train, 100 validation, 112 test samples for this purpose. We use a training batch size of 32 and a learning rate of \\(5 \\mathrm{e} - 5\\) for all experiments, and run for up to 10 epochs. We train using both standard CLIP loss as well as an additional Negatives loss component, which follows the 'text negative' of NegCLIP [42]. Given the tiny size of our finetuning sets relative to the 400M pretraining images, we use LoRA [14] to reduce the trainable parameters. We train a model with and without negatives loss." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.84 + ], + "angle": 0, + "content": "In order to make good use of the multiple summarized captions we have per image and submask, we randomly select one to be used in each individual epoch. We call this method Pick1. We describe this method and other ablations we attempted in more detail in Appendix 9." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We follow the experimental setup of DAC [10] by evaluating our sDCI fine-tuned CLIP on the ARO and VL-Checklist benchmarks. We compare to DAC directly as it is the most similar work to ours in attempting to increase" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "26706" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.123, + 0.088, + 0.849, + 0.311 + ], + "angle": 0, + "content": "
ModelVG-RAROVL-Checklist
VG-ACOCOFLICKRObjectAttributeRelation
sDCIP176.23%67.56%88.58%91.30%80.71%68.69%70.12%
sDCIP1NL057.34%61.98%39.36%44.62%88.37%70.42%61.28%
DACLLM10,00061.53%63.89%46.28±1.5%59.41±1.9%66.90%57.4%56.96%
DACLLM100,00061.0%63.6%48.2%61.42%66.87%57.22%57.18%
DACLLM500,00060.1%63.8%50.2%61.6%66.54%57.39%56.77%
DACLLM3,000,00081.28%73.91%94.47%95.68%87.30%77.27%86.41%
DACSAM3,000,00077.16%70.5%91.22%93.88%88.50%75.83%89.75%
CLIP Baseline [27]59.98%63.18%47.9%60.2%81.17%67.67%61.95%
BLIP2 [19]41.16%71.25%13.57%13.72%84.14%80.12%70.72%
NegCLIP [42]81%71%86%91%81.35%72.24%63.53%
SVLC [11]80.61%73.03%84.73%91.7%85%71.97%68.95%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.32, + 0.892, + 0.391 + ], + "angle": 0, + "content": "Table 3. sDCI fine-tuned CLIP performance against the ARO and VL-Checklist benchmark. We compare CLIP fine-tuned with sDCI against models fine-tuned using DAC captions. Since the DAC dataset contains 3M images whereas sDCI contains only 7805 images, we performed an ablation of the number of training images used in the DAC dataset. In this instance, \\(\\mathrm{DAC}_{LLM_{10000}}\\) refer to fine-tuning CLIP using only 10,000 images from DAC. We plot the mean across 5 different seeds and display the standard deviation when it is above \\(1\\%\\) accuracy. We observe that training on sDCI lead to significant improvement in comparison to DAC for a comparable number of examples." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.417, + 0.47, + 0.507 + ], + "angle": 0, + "content": "caption density. As noted in Figure 3, these automatically generated captions are generally noisy. As DAC is using 3M images for fine-tuning, we performed a small ablation on the number of DAC images to use for fine-tuning to be similar to our base image count (10,000 compared to our 8,012), or to our full mask count (100,000 compared to our 99,445)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.518, + 0.172, + 0.533 + ], + "angle": 0, + "content": "5.1. Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.541, + 0.471, + 0.844 + ], + "angle": 0, + "content": "In Table 3, we show that, while the DCI Pick1 model trained with negatives loss \\((\\mathrm{sDCI}_{P1})\\) does not reach the performance of DAC models trained on 3M images, it does improve over the CLIP baseline on most metrics3, and outperforms some baselines trained on more data. \\(\\mathrm{sDCI}_{P1}\\) does however outperform both sample-limited ablations of DAC, suggesting that a small number of highly aligned image to dense text pairs are more effective for training models than larger quantities of more loosely aligned or sparse data. Unsurprisingly, the version trained without negatives loss, \\(\\mathrm{sDCI}_{P1NL0}\\), does not improve across most benchmarks, and even somewhat degrades when compared to the CLIP baseline.4 Of note however is the significant bump in VL-Object, alongside some improvement to VL-Attribute. Improvements here suggest that the sDCI dataset successfully includes more object, and to a lesser degree attribute, information than the captions in the source dataset for CLIP. It does, however, point to a limitation of using the LLM summarizations and not incorporating mask information, as relational information is sometimes lost." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.415, + 0.771, + 0.431 + ], + "angle": 0, + "content": "6. Conclusion and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.485, + 0.892, + 0.889 + ], + "angle": 0, + "content": "We introduce the Densely Captioned Images dataset, and display clear use for it as a evaluation benchmark. We also show initial potential for using the dataset for fine-tuning. Given that in order to evaluate today's models on DCI we had to reduce the size of the text to only 77 tokens, DCI should prove to be useful for a longer period of time as models that are able to consume and utilize larger amounts of text context become the norm. We envision that in those cases the full human annotated captions without length reduction would be provided. Today's context size limitation also prevented us from fine-tuning existing models on the highly aligned text-image data within DCI, as existing models don't have enough context size to handle the full text, but the dataset isn't nearly large enough to pre-train a new set of models that could use the full text. It could be relevant to treat developing highly aligned text-image datasets in a similar manner to that used in machine translation for low-resource languages, which run into a similar issue with cost and difficulty to collect. This area of work has relied on automated methods such as bitext mining [33] to bootstrap up from an initial set of expertly collected examples, which DCI may already provide the foundation for. Further, we haven't attempted to incorporate the pixel-level masks that the dataset has in any of our experiments, instead opting to use crops around the masks to retain parity with our test set. This dataset is unique for both the extreme density and high degree of alignment present, and in this introductory work we've only scratched the surface of using this information to its fullest extent." + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.85, + 0.47, + 0.901 + ], + "angle": 0, + "content": "3The decreased performance on VL-Object may be explained by our LLM-generated negatives not closely covering the test set negatives. 4The degradation is likely due to the distribution shift and small sample size, given the training objective is the same as CLIP." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "26707" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.185 + ], + "angle": 0, + "content": "[1] Amro Kamal Mohamed Abbas, Kushal Tirumala, Daniel Simig, Surya Ganguli, and Ari S. Morcos. Semdedup: Data-efficient learning at web-scale through semantic dedduplication. In ICLR 2023 Workshop on Mathematical and Empirical Understanding of Foundation Models, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.187, + 0.472, + 0.325 + ], + "angle": 0, + "content": "[2] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katie Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andrew Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.327, + 0.472, + 0.398 + ], + "angle": 0, + "content": "[3] Florian Bordes, Shashank Shekhar, Mark Ibrahim, Diane Bouchacourt, Pascal Vincent, and Ari S. Morcos. Pug: Photorealistic and semantically controllable synthetic data for representation learning. In Advances in Neural Information Processing Systems, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.4, + 0.472, + 0.44 + ], + "angle": 0, + "content": "[4] John Canny. A computational approach to edge detection. IEEE Transactions on Pattern Analysis and Machine Intelligence, PAMI-8(6):679-698, 1986. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.443, + 0.472, + 0.496 + ], + "angle": 0, + "content": "[5] Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.5, + 0.472, + 0.555 + ], + "angle": 0, + "content": "[6] Ilias Chalkidis, Xiang Dai, Manos Fergadiotis, Prodromos Malakasiotis, and Desmond Elliott. An exploration of hierarchical attention transformers for efficient long document classification, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.557, + 0.472, + 0.586 + ], + "angle": 0, + "content": "[7] Qian Chen, Zhen-Hua Ling, and Xiaodan Zhu. Enhancing sentence embedding with generalized pooling, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.588, + 0.472, + 0.642 + ], + "angle": 0, + "content": "[8] Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Dollar, and C. Lawrence Zitnick. Microsoft coco captions: Data collection and evaluation server, 2015. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.645, + 0.472, + 0.687 + ], + "angle": 0, + "content": "[9] Karan Desai, Gaurav Kaul, Zubin Aysola, and Justin Johnson. Redcaps: web-curated image-text data created by the people, for the people, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.689, + 0.47, + 0.771 + ], + "angle": 0, + "content": "[10] Sivan Doveh, Assaf Arbelle, Sivan Harary, Roei Herzig, Donghyun Kim, Paola Cascante-bonilla, Amit Alfassy, Rameswar Panda, Raja Giryes, Rogerio Feris, Shimon Ullman, and Leonid Karlinsky. Dense and aligned captions (dac) promote compositional reasoning in v1 models, 2023. 1, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.774, + 0.47, + 0.843 + ], + "angle": 0, + "content": "[11] Sivan Doveh, Assaf Arbelle, Sivan Harary, Rameswar Panda, Roei Herzig, Eli Schwartz, Donghyun Kim, Raja Giryes, Rogerio Feris, Shimon Ullman, and Leonid Karlinsky. Teaching structured vision&language concepts to vision&language models, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[12] Enrico Fini, Pietro Astolfi, Adriana Romero-Soriano, Jakob Verbeek, and Michal Drozdzal. Improved baselines for vision-language pre-training. Transactions on Machine Learning Research (TMLR), 2023. 4" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[13] Timnit Gebru, Jamie Morgenstern, Briana Vecchione, Jennifer Wortman Vaughan, Hanna Wallach, Hal Daumé III au2, and Kate Crawford. Datasheets for datasets, 2021. 5, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.189 + ], + "angle": 0, + "content": "[14] Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.193, + 0.892, + 0.248 + ], + "angle": 0, + "content": "[15] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything, 2023. 4, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.25, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[16] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael S. Bernstein, and Fei-Fei Li. Visual genome: Connecting language and vision using crowdsourced dense image annotations, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.32, + 0.892, + 0.388 + ], + "angle": 0, + "content": "[17] Chunyuan Li, Haotian Liu, Liunian Harold Li, Pengchuan Zhang, Jyoti Aneja, Jianwei Yang, Ping Jin, Houdong Hu, Zicheng Liu, Yong Jae Lee, and Jianfeng Gao. Elevater: A benchmark and toolkit for evaluating language-augmented visual models, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.391, + 0.892, + 0.445 + ], + "angle": 0, + "content": "[18] Junnan Li, Dongxu Li, Caiming Xiong, and Steven C. H. Hoi. BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. CoRR, abs/2201.12086, 2022. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.447, + 0.892, + 0.502 + ], + "angle": 0, + "content": "[19] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models, 2023. 3, 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.504, + 0.892, + 0.532 + ], + "angle": 0, + "content": "[20] Xianhang Li, Zeyu Wang, and Cihang Xie. An inverse scaling law for clip training, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.534, + 0.892, + 0.574 + ], + "angle": 0, + "content": "[21] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.576, + 0.892, + 0.63 + ], + "angle": 0, + "content": "[22] Zhiqiu Lin, Xinyue Chen, Deepak Pathak, Pengchuan Zhang, and Deva Ramanan. Visualgptscore: Visio-linguistic reasoning with multimodal generative pre-training scores, 2023. 1, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.633, + 0.892, + 0.674 + ], + "angle": 0, + "content": "[23] Zixian Ma, Jerry Hong, Mustafa Omer Gul, Mona Gandhi, Irena Gao, and Ranjay Krishna. Crepe: Can vision-language foundation models reason compositionally?, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.676, + 0.892, + 0.731 + ], + "angle": 0, + "content": "[24] Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2011. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.733, + 0.892, + 0.773 + ], + "angle": 0, + "content": "[25] Jordi Pont-Tuset, Jasper Uijlings, Soravit Changpinyo, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with localized narratives, 2020. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.775, + 0.892, + 0.87 + ], + "angle": 0, + "content": "[26] Filip Radenovic, Abhimanyu Dubey, Abhishek Kadian, Todor Mihaylov, Simon Vandenhende, Yash Patel, Yi Wen, Vignesh Ramanathan, and Dhruv Mahajan. Filtering, distillation, and hard negatives for vision-language pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6967-6977, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[27] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry," + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "26708" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.469, + 0.147 + ], + "angle": 0, + "content": "Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. CoRR, abs/2103.00020, 2021. 3, 4, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.469, + 0.217 + ], + "angle": 0, + "content": "[28] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.218, + 0.469, + 0.315 + ], + "angle": 0, + "content": "[29] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. Laion-5b: An open large-scale dataset for training next generation image-text models, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.316, + 0.469, + 0.412 + ], + "angle": 0, + "content": "[30] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, Melbourne, Australia, 2018. Association for Computational Linguistics. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.413, + 0.469, + 0.468 + ], + "angle": 0, + "content": "[31] Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. FLAVA: A foundational language and vision alignment model. In CVPR, 2022. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.469, + 0.469, + 0.551 + ], + "angle": 0, + "content": "[32] Krishna Srinivasan, Karthik Raman, Jiecao Chen, Mike Bendersky, and Marc Najork. Wit: Wikipedia-based image text dataset for multimodal multilingual machine learning. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '21), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.553, + 0.469, + 0.732 + ], + "angle": 0, + "content": "[33] NLLB Team, Marta R. Costa-jussà, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia González, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. No language left behind: Scaling human-centered machine translation, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.733, + 0.469, + 0.788 + ], + "angle": 0, + "content": "[34] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. YFCC100m. Communications of the ACM, 59(2): 64-73, 2016. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.469, + 0.844 + ], + "angle": 0, + "content": "[35] Tristan Thrush, Ryan Jiang, Max Bartolo, Amanpreet Singh, Adina Williams, Douwe Kiela, and Candace Ross. Winoground: Probing vision and language models for visio-linguistic compositionality, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[36] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer," + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.092, + 0.892, + 0.327 + ], + "angle": 0, + "content": "Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and finetuned chat models, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.329, + 0.892, + 0.369 + ], + "angle": 0, + "content": "[37] Jack Urbanek and Pratik Ringshia. Mephisto: A framework for portable, reproducible, and iterative crowdsourcing, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.371, + 0.892, + 0.427 + ], + "angle": 0, + "content": "[38] Hu Xu, Saining Xie, Po-Yao Huang, Licheng Yu, Russell Howes, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Cit: Curation in training for effective vision-language data. arXiv preprint arXiv:2301.02241, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.429, + 0.892, + 0.483 + ], + "angle": 0, + "content": "[39] Hu Xu, Saining Xie, Xiaqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.485, + 0.892, + 0.553 + ], + "angle": 0, + "content": "[40] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2:67-78, 2014. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.555, + 0.892, + 0.68 + ], + "angle": 0, + "content": "[41] Lili Yu, Bowen Shi, Ramakanth Pasunuru, Benjamin Muller, Olga Golovneva, Tianlu Wang, Arun Babu, Binh Tang, Brian Karrer, Shelly Sheynin, Candace Ross, Adam Polyak, Russell Howes, Vasu Sharma, Puxin Xu, Hovhannes Tamoyan, Oron Ashual, Uriel Singer, Shang-Wen Li, Susan Zhang, Richard James, Gargi Ghosh, Yaniv Taigman, Maryam Fazel-Zarandi, Asli Celikyilmaz, Luke Zettlemoyer, and Armen Aghajanyan. Scaling autoregressive multi-modal models: Pretraining and instruction tuning, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.681, + 0.892, + 0.75 + ], + "angle": 0, + "content": "[42] Mert Yuksekgonul, Federico Bianchi, Pratyusha Kalluri, Dan Jurafsky, and James Zou. When and why vision-language models behave like bags-of-words, and what to do about it? In International Conference on Learning Representations, 2023. 1, 3, 7, 8, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.751, + 0.892, + 0.793 + ], + "angle": 0, + "content": "[43] Yan Zeng, Xinsong Zhang, and Hang Li. Multi-grained vision language pre-training: Aligning texts with visual concepts. arXiv preprint arXiv:2111.08276, 2021. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.794, + 0.892, + 0.836 + ], + "angle": 0, + "content": "[44] Hang Zhang, Yeyun Gong, Yelong Shen, Weisheng Li, Jiancheng Lv, Nan Duan, and Weizhu Chen. Poolingformer: Long document modeling with pooling attention, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.837, + 0.892, + 0.892 + ], + "angle": 0, + "content": "[45] Tiancheng Zhao, Tianqi Zhang, Mingwei Zhu, Haozhan Shen, Kyusong Lee, Xiaopeng Lu, and Jianwei Yin. Vl-checklist: Evaluating pre-trained vision-language models with objects, attributes and relations, 2023. 1, 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.092, + 0.892, + 0.892 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "26709" + } + ] +] \ No newline at end of file diff --git a/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/f5f3c319-3887-4d17-8f43-fce0198c0c77_origin.pdf b/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/f5f3c319-3887-4d17-8f43-fce0198c0c77_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..42cf05f75ef3b25a1650e1709f720b67fa512d21 --- /dev/null +++ b/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/f5f3c319-3887-4d17-8f43-fce0198c0c77_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d57c7d275070bbe83576a1407aeb157543515e23686c5d86c2274b3a2b53a092 +size 10051483 diff --git a/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/full.md b/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/full.md new file mode 100644 index 0000000000000000000000000000000000000000..6140cd14437442ad76a7081e135924855631e9c9 --- /dev/null +++ b/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/full.md @@ -0,0 +1,217 @@ +# A Picture is Worth More Than 77 Text Tokens: Evaluating CLIP-Style Models on Dense Captions + +Jack Urbanek*† Florian Bordes1,2,3† Pietro Astolfi1 Mary Williamson1 Vasu Sharma1 Adriana Romero-Soriano1,3,4,5 + +$^{1}$ FAIR, Meta $^{2}$ Mila $^{3}$ Universite de Montreal, $^{4}$ McGill University $^{5}$ Canada CIFAR AI chair + +# Abstract + +Curation methods for massive vision-language datasets trade off between dataset size and quality. However, even the highest quality of available curated captions are far too short to capture the rich visual detail in an image. To show the value of dense and highly-aligned image-text pairs, we collect the Densely Captioned Images (DCI) dataset, containing 7805 natural images human-annotated with mask-aligned descriptions averaging above 1000 words each. With precise and reliable captions associated with specific parts of an image, we can evaluate vision-language models' (VLMs) understanding of image content with a novel task that matches each caption with its corresponding subcrop. As current models are often limited to 77 text tokens, we also introduce a summarized version (sDCI) in which each caption length is limited. We show that modern techniques that make progress on standard benchmarks do not correspond with significant improvement on our sDCI based benchmark. Lastly, we finetune CLIP using sDCI and show significant improvements over the baseline despite a small training set. By releasing the first human annotated dense image captioning dataset, we hope to enable the development of new benchmarks or fine-tuning recipes for the next generation of VLMs to come. + +# 1. Introduction + +State-of-the-art vision-language models (VLMs) are often trained on large scale datasets such as LAION-400M [28], YFCC100M [34], or other undisclosed datasets crawled from the web. These datasets are formed by collecting images from the web and using alt-text (or other local text on the webpage) to create loose image-text pairs. These can then be filtered down trading off on quantity for quality [26, 30]. Still, recent work has demonstrated that throwing these + +loose captions out entirely in favor of generated captions, with enhanced quality and density, can produce improved results [10]. Other works [1, 20, 21, 38] have demonstrated that it is possible to get CLIP-level performance using a vastly reduced compute, often by throwing away portions of the data resulting in more balance between image and text modalities. However, those approaches rely on automatic pipelines which do not generate reliable and long captions that can capture rich visual details in an image. From this it appears no existing dataset has high-quality image descriptions that are tightly-coupled enough with the image to train for or evaluate a deep alignment between the two domains. + +In the absence of high quality captions to evaluate VLMs, benchmarks such as ARO [42] and VL-Checklist [45] often complement image-caption pairs with hard negatives that are generated by slightly altering the initial (positive) description. Progress on these benchmarks has been rooted in training VLMs with negatives of similar construction to the tests [42] rendering the methodologies ineffective on datasets such as Winoground [35]. Recent works [22] have called the evaluation capacity of many of these benchmarks into question, given how effective language-prior-based methods perform. More specifically, given the unlikelihood of the hard negative captions in these benchmarks, a good text encoder can achieve close to $100\%$ accuracy without looking at the images. Moreover, Bordes et al. [3] have shown that most improvements observed on ARO or VL-Checklist do not translate on simple synthetic benchmarks for which the negative caption is as likely as the positive one. Since the use of VLMs is significantly increasing, it is crucial to make sure that we have a diverse suite of reliable benchmarks to assess their abilities. + +In this paper, we introduce the Densely Captioned Images dataset, a collection of 7805 images with dense and mask-aligned descriptions averaging above 1000 words each. One such example is provided in Figure 1, displaying just a subset of the collected text paired with their aligned masks. We demonstrate how to leverage this dataset to evaluate VLMs in two ways after summarizing captions + +![](images/7f6543bdabddedeba6c92fb7457f95520997ceb0aafab20b4e17c13566ebc735.jpg) +Figure 1. One example from the Densely Captioned Images dataset. Only part of the submask hierarchy is shown. + +to fit into CLIP's 77 token limit, both with a negatives-based test as well as a novel matching task, referred to as subcrop-caption matching, that requires selecting appropriate captions for different regions of the same image. We evaluate existing baselines, and observe that no models perform well at both concurrently, and improved + +performance via negatives-based training comes at the cost of decreased performance on subcrop-caption matching. We also run some experiments using the summarized DCI as a fine-tuning dataset to evaluate the effectiveness of these captions for improving a model's performance on other benchmarks, and compare the efficiency per-example + +to that from the automated annotation setup in DAC [10]. To summarize, our contributions are: + +- We release the Densely Captioned Images (DCI) dataset, which contains dense and mask-aligned captions, alongside an LLM-summarized version (sDCI) containing captions under 77 tokens for use with current VLMs. +- We provide a new benchmark for VLMs based on sDCI to evaluate fine-grained vision-language understanding, and show that no existing model can perform well at matching captions from within one image to corresponding subsections of that image. +- We show that fine-tuning with high quality image-caption pairs is as good on ARO and VL-Checklist as fine-tuning on at least $10 \times$ the automatically annotated data, and that even without utilizing explicit negatives these pairs can improve performance on VL-C-Object from $81.17\%$ to $88.37\%$ . + +# 2. Related Works + +The massive, loosely-labeled dataset approach that has enabled VLMs like CLIP [27] and powerful successors like BLIP2 [19], Flamingo [2], CM3leon [41], and many others, has been a clear forward step in vision-language modeling. Still recent benchmarks show that models trained in this manner display clear drawbacks in reasoning skills. Additional techniques have been proposed and adopted recently to close this gap, discussed below. + +Vision-Language Datasets. Over the last decade, there have been significant dataset collection efforts connecting images and text. Earlier works focused on curating datasets by leveraging human annotations, see e.g., COCO [8], Visual Genome [16], and Flickr30k [40]. The process resulted in high quality annotations, which were however oftentimes limited by the caption content - i.e., relatively short phrases (5.1 to 10.3 words on average) grounded at image level or region level - and the data annotation scale (30k to 130k images). To increase scale, researchers gathered web-crawled data and introduced large scale datasets such as YFCC100M [34], which contains 100M media objects. Yet, crawling the web oftentimes results in little correspondence between image and text pairs. To reduce noise between image and text pairs, efforts such as SBU [24] queried Flickr and filtered the noisy results, obtaining a $\sim 1\mathrm{M}$ images. Moreover, Conceptual Captions (CC) [30] crawled a dataset of $\sim 12\mathrm{M}$ images and alt-text pairs, and included a protocol to filter noisy text-image pairs, resulting in 3M data points. Relaxing the filtering protocol allows to trade data quality for scale. Crawling alt-text also resulted in relatively short text descriptions with 10.3 words on average, which are most often grounded at image level. Localized Narratives [25] was introduced + +as a dense visual grounding dataset leveraging a multimodal annotation procedure, collecting $\sim 850\mathrm{k}$ text-image pairs with 36.5 words/caption on average. **RedCaps** [9] constituted another effort yielding large scale ( $\sim 12\mathrm{M}$ ) web-curated data by exploring alternate data sources of high quality data instead of devising complex filtering strategies. Wikipedia-based image-text dataset (WIT) [32] extended dataset creation efforts by gathering a multilingual dataset of text-image-pairs consisting of $\sim 11.5\mathrm{M}$ images. LAION-5B [29] further increased the web-crawling efforts by gathering a multilingual dataset of text-image pairs, and filtered the collected data with a pre-trained CLIP [27] model. Following, LAION-CAT [26] reduced noisy examples from LAION-5B by filtering for caption complexity, i.e., captions that do not contain any action, and for text spotting, i.e., images that contain rendered text. **MetaCLIP** [39] has also been released as an open dataset for reproducing CLIP. These very large scale datasets have been successfully used to advance the state-of-the-art of VLMs. + +Vision-Language Evaluation Benchmarks. Several recent advances in visual-language learning have focused on creating comprehensive benchmarks to evaluate model performance in more holistic ways. These benchmarks are instrumental in pushing the envelope of what VLM can understand and process, ensuring they move beyond superficial image-text matching towards genuine understanding of intricate relationships between visual and linguistic elements. In particular, VL-CheckList [45] and ARO [42] assess the VLM capabilities beyond average downstream task accuracy, by focusing on a model's ability to understand objects, attributes, order or relations. ARO's extensive scope, uncovers limitations in VLMs such as poor relational understanding and lack of order sensitivity. Winoground [35] tests models for visio-linguistic compositional reasoning by asking VLM to match two images with two captions containing the same set of words but in different orders. This task requires models to discern the meaning conveyed by the order of words, reflecting different visual scenes. Current VLMs perform only marginally better than chance, highlighting a significant gap in compositional reasoning. CREPE (Compositional REpresentation Evaluation) [23] evaluates two aspects of compositionality: systematicity and productivity. Systematicity is measured by the model's ability to represent seen versus unseen atoms and their compositions, while productivity gauges the model's capacity to understand an unbounded set of increasingly complex expressions. Finally, PUG (Photorealistic Unreal Graphics) [3] uses synthetic data to assess the compositional reasoning abilities of VLMs by progressively increasing the complexity of a given generated scene. One issue with these evaluation datasets is their frequent reliance on COCO, either directly as in ARO, or through Visual Genome as in + +VL-Checklist or CREPE. It is difficult to find an evaluation dataset of sufficient scale without COCO. + +Vision-Language models. Recent VLM advancements have built upon the foundational work of CLIP [27], which leveraged large-scale image-text pairs to jointly pre-train an image encoder and a text encoder to predict which images are paired with which texts in a contrastive learning paradigm. NegCLIP build upon CLIP by leveraging negative captions when training. BLIP (Bootstrapping Language-Image Pre-training) [18] uses a new framework that bootstraps the captions from noisy web data for both understanding and generation tasks. Its successor BLIP-2 [19] further streamlines the process by utilizing off-the-shelf frozen pre-trained image encoders and language models, bridging the modality gap with a lightweight querying mechanism. Clip-rocket [12] improves VLM baselines by showing that applying image and text augmentations makes up for most of the improvement attained by prior VLMs. Flava [31] proposes a foundation VLM model by combining existing VLMs objectives together with auxiliary in-modality losses for the text and vision encoders. X-VLM [43] achieves success with a pretraining method matching sub- portions of the text to regions of the image at multiple granularities. These models introduces improvements over CLIP, focusing on efficiency, adaptability, and reducing the need for extensive labeled datasets, thereby pushing the boundaries of vision-language pre-training. The closest work to our approach is DAC (Densely Aligned Captions) [10], which improves with an automated LLM based pipeline the caption quality and density. By showing that DAC-enhanced CLIP models exhibit substantial gains on some benchmarks, this work underscores the critical role that caption quality and density play in the efficacy of VLMs. We build on this insight and explore how to further increase the caption quality and density by relying on human annotators, and analyze how that impacts downstream model performance. + +# 3. Dataset Construction + +The Densely Captioned Images dataset, or DCI, consists of 7805 images from SA-1B [15], each with a complete description aiming to capture the full visual detail of what is present in the image. Much of the description is directly aligned to submasks of the image. An example is shown in Figure 1. In the top left we see the full image of a water pump, with an associated description. The italicized section is collected as a standard caption, aiming to summarize the full image in about a sentence, similar to existing caption datasets. The remainder of that first description contains details about the relationship between visible entities in the image, as well as in-depth descriptions of regions that are not described as part of the submasks. All other + +text describing the image is associated with submasks of the image. Each submask has its own free-text label (not pictured) and description, and may also contain further submasks. Here for instance we see submasks for windows and balconies as being contained in the submask capturing three buildings in the background. + +# 3.1. Preparation + +In order to collect the data, we first select images from a random privacy-mitigated subset of SA-1B. We then procedurally extract subregions of each image to annotate, as we found in initial trials that crowdsourcing both regions and descriptions concurrently overcomplicated the task and successful annotation rate. For this process, we turn to the Segment Anything Model (SAM) [15] and adapt their standard method to extract all masks from an image. + +For the extraction process, SAM usually relies on a grid of points across the entire image. In order to increase the possibility of selecting interesting regions worth annotating, we additionally apply a canny filter [4] and select random points within a radius from discovered edges. We then run SAM to detect all masks using both the grid and the near-edge points. Once the masks are returned, we establish a hierarchy of submasks by thresholding the number of overlapping pixels between two masks to determine if one is a submask of the other, or if the two masks should be joined. This helps reduce some of the noise introduced by the automatic masking process, and leaves us with a tree-like structure for the masks. Lastly, we remove any masks that are too small. We note that undergoing this process does not result in every detail of each image being selected as a candidate for annotation, and as such instances in the DCI dataset are not expected to have complete submask-aligned coverage of all elements one could recognize in or discuss about an image. + +# 3.2. Collection Process + +We use Mephisto [37] to host our task, pay crowdworkers to provide annotations on the dataset, and additionally run qualification steps. Workers that pass our qualifications are eligible to work on the main task which contains 3 stages: + +1. Workers are provided with the whole image, and asked to provide a short description of it. This is considered the standard caption. + +2. Workers are provided with submasks of the image, one at a time starting with the leaves of the mask tree, displaying a SAM-selected region of the image as well as an indicator for where that region comes from. They are generally asked to provide a label and complete description for the pictured region, though are allowed to mark the region as 'uninteresting' and only provide a label, or 'bad' and provide nothing. These options allow us to focus worker time on useful annotations and help capture some of the noise of the automatic selection pro + +![](images/a95679abff74c421fb7258abfea90d8523cd65a8d27b1e1068b9aed4a41dc101.jpg) +Figure 2. Annotation view for writing description for masks of the image. The masked region appears highlighted for clarity. + +![](images/6f66c7649ed1279ac6568d0737850cc9b090d120ab21eb4808d4af70773fa917.jpg) +Figure 3. Example of a Llama2-generated summary and negative that comprise sDCI. Each image and submask have multiple summarizations and negatives. We also compare the caption quality between DAC [10] and DCI. In contrast to DCI that relies on human annotations, DAC used an automatic pipeline based on LLM for captioning. As we observe in this example, the DAC captions can suffer from hallucinations and miss important elements of the photo. In this work we argue that while improving automatic pipeline is an important research direction, for now the captions proposed are not reliable enough to be used to evaluate models and assess their abilities. + +A black metal water pump in a town square in the middle of a circular stone shape across from several buildings. Black metal water pump in foreground with spigot in front and curved handle with globular end. It is located in the middle of a smaller circle of stones. There is some grass growing in between some of the stones. There is a metal grated drain near the bottom of the pump. To its left is a dark brown metal manhole cover and behind it is a grey stone tiled road. The road leads to three buildings across the street with storefronts on the bottom level and different units with balconies and large windows on the second and third level. There are people out front walking by the lower level. There are benches out front. Human annotated + +A black metal water pump stands in a town square, surrounded by a circle of stones, with buildings and people in the background LLM Summary + +A pump stands metal water black in square town, surrounded by stones circle, with background buildings people in. + +DAC Quality: a fire hydrant in the middle of a city street +DAC Dense: 1 A fire hydrant can be seen in the center of a street, its metallic spray emanating water vapor from a nozzle as it shoots water into the air. 2 The spray creates a steady waterfall, making it appear as though the hydrant is spraying water from its nozzle the whole time. 3 The spray is relatively smooth, creating a gradual wave that covers a large portion of a city block. +DAC SAM: a black plastic drain cover, a building, a water fountain, a bicycle, a water fountain, the image shows a circular stone path with a circular stone base, a water pump, the logo of the company, the image shows a black metal handle with a long handle, home bank, a dragon + +cess. This is shown in Figure 2. For masks that contain submasks, workers are also provided with overlays that show the regions already annotated, and are asked to annotate in terms of what has already been written. + +3. After completing all the submasks, the worker is then shown the complete image again and asked to provide an overall description, paying attention to the relationship between previously annotated regions. + +An in-depth description of the filtering and quality assurance process can be found in Appendix 8 while the Datasheet [13] is available in Appendix 12. Complete annotation instructions, dataset download links as well as reproducible code are available on our GitHub1. The DCI dataset is released under the CC-BY-NC license. + +# 3.3. Fitting DCI into 77 CLIP tokens + +Ultimately, we collected an average of 1111 words (1279 CLIP tokens) per image, with a median of 941 words. This proves problematic for evaluating or fine-tuning CLIP-based VLMs given their maximum text token length of 77. Embedding pooling methods [7] to extend the effective input size for text modeling is an active research area [6, 44], and current work suggests average-pooling embeddings over these longer descriptions would be ineffective. + +One possible approach would be to utilize the subsections of the image while providing the corresponding subcaption, in a manner akin to a multi-modal multi-crop approach [5]. Still, even when considering just the 91,424 submasks, the average token length is nearly 200 per caption. We instead use the longer context capabilities of Llama2 [36] to summarize down the overall information in the image into CLIP-consumable portions. We generate multiple captions for each image and submask, using prompts that attempt to summarize down recursively until the result is in bounds. As this modification to the dataset is generated automatically, the summarizations may have introduced noise, and may not capture all of the detail in the full original captions. Summarizations also occasionally mix references or include context in a submask that isn't the main focus. Still, the summaries are fairly high quality and more dense than those found in other datasets, especially when using more than one distinct summarization per image. We also prompt the LLM to generate negatives from these summaries, achieving a set of particularly hard negatives for CLIP to evaluate. We call this version of the dataset summarized DCI (sDCI). Examples of full caption, LLM summary and LLM negative are included in Figure 3 and contrasted with DAC [10] data. More detail including the prompts used can be found in Appendix 7. + +Ultimately, this fitting step produces a lower bound on the level of vision-language understanding 'resolution' that the overall DCI dataset is capable of evaluating a model for. As newer models arise that are able to handle embedding much larger quantities of text content, it will be possible to make full use of DCI's original annotated captions. + +# 3.4. Statistics + +All-in-all the Densely Captioned Images dataset is far more dense than Localized Narratives on COCO images [25] (later referred to as $LN_{\text{COCO}}$ ) and nearly $100 \times$ more dense than standard COCO captions [8]. After reducing to CLIP-bounded summaries, it still contains more text density than both. Complete details can be found in Table 1. + +Here we see that the multiple-summarization method of sDCI produces fairly similar token per image values to the original dataset while keeping individual captions' token lengths in bounds for CLIP. To get Localized Narratives into the 77 token bound, we simply drop longer examples. + +
DatasetImgSCapsToks/CapToks/Img
DCI7,8057,8051,282.091,282.09
\( DCI_{sub} \)96,00796,007199.33199.33
sDCI8,01287,26849.21536.00
\( sDCI_{sub} \)96,007714,63036.60263.01
\( LN_{COCO} \)142,845142,84549.1149.11
\( LN_{COCO<77} \)127,456127,45643.7043.70
COCO123,287616,76713.5467.74
+ +Table 1. Comparison of DCI dataset statistics to other datasets, focusing on average CLIP tokens per image or caption. Note the $26\mathbf{x}$ difference between DCI and the previous longest annotated dataset, Localized Narratives (LN). sub denotes including sub-masks and their descriptions as examples, and sDCI refers to the LLM-summarized version of DCI that fits captions to 77 tokens (Sec. 3.3), while $\mathrm{LN}_{\mathrm{COCO}} < 77$ simply drops examples longer than 77 tokens ( $\sim 10.8\%$ ). + +# 4. Evaluating VLMs with summarized DCI + +# 4.1. Methodology + +Using the 7805 images in the summarized Densely Captioned Images (sDCI) dataset, we construct a few different evaluations. As noted above, the ability to select multiple submasks from the same image and include them in the same batch allows us to create a CLIP-style test, wherein the model can evaluate a full batch of images and captions and score correctly which caption belongs to which image. As we provide models with a crop around the selected masks, we call this Subcrop-Caption Matching (SCM), and we use a batch size of 8. We can run against our LLM-generated negatives as well. Given that LLM-summarization has provided us with multiple captions and negatives per image and submask, we supply the first unless noted otherwise. With this in mind, we construct 6 evaluations as follows: + +[All SCM]: Group each image with their subcrops, alongside one summarized caption per subcrop. Then use the model to find the most likely caption associated to each subcrop. This test measures the ability of the VLM to distinguish between the different parts that compose an image.[2] + +[All Neg]: Select one LLM summarized caption and the corresponding LLM-generated negative for each image and subcrop. Score a model on its ability to distinguish between the positive and negative. + +[All Pick5-SCM]: Use the same setup as All SCM, but rather than using only one caption per subcrop, we use 5 LLM generated captions per subcrop. We score a model as succeeding only when the worst-scoring positive caption + +
ModelAllAll Pick5BaseAll
SCMNegSCMNegNegHard Negs
CLIP Baseline [27]40.06%60.79%11.21%24.06%67.56%41.34%
NegCLIP [42]43.35%56.00%13.22%4.82%76.69%50.84%
BLIP [18]39.13%54.02%10.73%5.51%63.41%53.23%
Flava [31]38.08%47.99%8.01%9.82%11.6%45.59%
X-VLM [43]38.45%53.46%10.96%5.10%44.29%52.42%
DAC\( _{LLM} \)[10]37.45%81.71%8.13%37.84%90.56%71.21%
DAC\( _{SAM} \)[10]37.90%84.17%6.70%39.94%89.66%73.61%
+ +Table 2. sDCI test result: We compare existing baselines on our Subcrop-Caption Matching (SCM) and negatives tests. Additional results are available in Table 10 in the Appendix. We note our best model fine-tuned on sDCI from section 5 achieved $64.02\%$ and $31.60\%$ on a held-out test of All SCM and All SCM Pick5 respectively, setting an upper bound for model performance. + +scores higher than the best-scoring caption of any other image in the batch. This test evaluates if the representation space is structured such that captions belonging to a specific image are closest to the target image in the space. + +[All Pick5-Neg]: Use the same setup as All Neg, but rather than using one caption, we use 5 LLM summarized captions for each image and subcrop. If any of these captions score worse than the negative, the model fails the example. + +[Base Neg]: Using only the 7805 base images without subcrops, evaluate the model's ability to distinguish between an LLM generated caption and its corresponding LLM-generated negative. Note, this is a strict subset of All Neg, though these captions are on the longer side on average and cover a different distribution. + +[All Hard-Negs]: Using the same setup as All Neg, but rather than using a single negative, use the negative across all LLM-generated negatives that CLIP scores highest. + +# 4.2. Results + +We compare in Table 2 the sDCI performances given by different state-of-the-art models: CLIP [27], NegCLIP [42], BLIP [18], Flava [31] and X-VLM [43]. Additional experiments on different architectures and pretraining datasets are available in Table 10 (see Appendix). The CLIP baseline starts at $40.12\%$ on All SCM and $60.63\%$ on All Neg. The only model to improve over CLIP on SCM tasks is NegCLIP, which follows the fact that the hard image negatives that NegCLIP is trained on provide the most similar task to what we test of any of these models. None of the models trained without an explicit CLIP-loss component outperform CLIP on SCM tasks, but DAC ultimately performs the worst. + +Performance on the Pick5 variations of each task follow the trends of the standard performance. Performance on Base Neg for Flava point to a weakness in comparing longer text examples, given the significant drop from $47.99\%$ to $11.6\%$ that is not demonstrated in other models. + +Interestingly, models trained absent of CLIP (BLIP, + +Flava, X-VLM) experience a far less noticeable drop in performance between All Neg and All Hard Negs. This validates that sDCI's CLIP-hard negatives are not simply a higher proportion of 'impossible' negatives, but rather capture some underlying trait about the negatives that CLIP models and their descendants all struggle with. + +None of the models presented perform well across all of the sDCI test set. Given each of the CLIP-style models have some kind of advantage on this test set due to being trained on some objective that sDCI directly evaluates, we expect that the BLIP, Flava, and X-VLM scores are somewhat representative for existing state-of-the-art models' true performance on this test set. + +# 5. Using summarized DCI as fine-tuning dataset + +To evaluate the use and difficulty of the sDCI dataset for training, we fine-tune state-of-the-art models with it. In particular, we use a ViT/32B CLIP model in all of our experiments, requiring use of the CLIP-bounded version of our dataset. We split sDCI into 7800 train, 100 validation, 112 test samples for this purpose. We use a training batch size of 32 and a learning rate of $5 \mathrm{e} - 5$ for all experiments, and run for up to 10 epochs. We train using both standard CLIP loss as well as an additional Negatives loss component, which follows the 'text negative' of NegCLIP [42]. Given the tiny size of our finetuning sets relative to the 400M pretraining images, we use LoRA [14] to reduce the trainable parameters. We train a model with and without negatives loss. + +In order to make good use of the multiple summarized captions we have per image and submask, we randomly select one to be used in each individual epoch. We call this method Pick1. We describe this method and other ablations we attempted in more detail in Appendix 9. + +We follow the experimental setup of DAC [10] by evaluating our sDCI fine-tuned CLIP on the ARO and VL-Checklist benchmarks. We compare to DAC directly as it is the most similar work to ours in attempting to increase + +
ModelVG-RAROVL-Checklist
VG-ACOCOFLICKRObjectAttributeRelation
sDCIP176.23%67.56%88.58%91.30%80.71%68.69%70.12%
sDCIP1NL057.34%61.98%39.36%44.62%88.37%70.42%61.28%
DACLLM10,00061.53%63.89%46.28±1.5%59.41±1.9%66.90%57.4%56.96%
DACLLM100,00061.0%63.6%48.2%61.42%66.87%57.22%57.18%
DACLLM500,00060.1%63.8%50.2%61.6%66.54%57.39%56.77%
DACLLM3,000,00081.28%73.91%94.47%95.68%87.30%77.27%86.41%
DACSAM3,000,00077.16%70.5%91.22%93.88%88.50%75.83%89.75%
CLIP Baseline [27]59.98%63.18%47.9%60.2%81.17%67.67%61.95%
BLIP2 [19]41.16%71.25%13.57%13.72%84.14%80.12%70.72%
NegCLIP [42]81%71%86%91%81.35%72.24%63.53%
SVLC [11]80.61%73.03%84.73%91.7%85%71.97%68.95%
+ +Table 3. sDCI fine-tuned CLIP performance against the ARO and VL-Checklist benchmark. We compare CLIP fine-tuned with sDCI against models fine-tuned using DAC captions. Since the DAC dataset contains 3M images whereas sDCI contains only 7805 images, we performed an ablation of the number of training images used in the DAC dataset. In this instance, $\mathrm{DAC}_{LLM_{10000}}$ refer to fine-tuning CLIP using only 10,000 images from DAC. We plot the mean across 5 different seeds and display the standard deviation when it is above $1\%$ accuracy. We observe that training on sDCI lead to significant improvement in comparison to DAC for a comparable number of examples. + +caption density. As noted in Figure 3, these automatically generated captions are generally noisy. As DAC is using 3M images for fine-tuning, we performed a small ablation on the number of DAC images to use for fine-tuning to be similar to our base image count (10,000 compared to our 8,012), or to our full mask count (100,000 compared to our 99,445). + +# 5.1. Results + +In Table 3, we show that, while the DCI Pick1 model trained with negatives loss $(\mathrm{sDCI}_{P1})$ does not reach the performance of DAC models trained on 3M images, it does improve over the CLIP baseline on most metrics3, and outperforms some baselines trained on more data. $\mathrm{sDCI}_{P1}$ does however outperform both sample-limited ablations of DAC, suggesting that a small number of highly aligned image to dense text pairs are more effective for training models than larger quantities of more loosely aligned or sparse data. Unsurprisingly, the version trained without negatives loss, $\mathrm{sDCI}_{P1NL0}$ , does not improve across most benchmarks, and even somewhat degrades when compared to the CLIP baseline.4 Of note however is the significant bump in VL-Object, alongside some improvement to VL-Attribute. Improvements here suggest that the sDCI dataset successfully includes more object, and to a lesser degree attribute, information than the captions in the source dataset for CLIP. It does, however, point to a limitation of using the LLM summarizations and not incorporating mask information, as relational information is sometimes lost. + +# 6. Conclusion and Future Work + +We introduce the Densely Captioned Images dataset, and display clear use for it as a evaluation benchmark. We also show initial potential for using the dataset for fine-tuning. Given that in order to evaluate today's models on DCI we had to reduce the size of the text to only 77 tokens, DCI should prove to be useful for a longer period of time as models that are able to consume and utilize larger amounts of text context become the norm. We envision that in those cases the full human annotated captions without length reduction would be provided. Today's context size limitation also prevented us from fine-tuning existing models on the highly aligned text-image data within DCI, as existing models don't have enough context size to handle the full text, but the dataset isn't nearly large enough to pre-train a new set of models that could use the full text. It could be relevant to treat developing highly aligned text-image datasets in a similar manner to that used in machine translation for low-resource languages, which run into a similar issue with cost and difficulty to collect. This area of work has relied on automated methods such as bitext mining [33] to bootstrap up from an initial set of expertly collected examples, which DCI may already provide the foundation for. Further, we haven't attempted to incorporate the pixel-level masks that the dataset has in any of our experiments, instead opting to use crops around the masks to retain parity with our test set. This dataset is unique for both the extreme density and high degree of alignment present, and in this introductory work we've only scratched the surface of using this information to its fullest extent. + +# References + +[1] Amro Kamal Mohamed Abbas, Kushal Tirumala, Daniel Simig, Surya Ganguli, and Ari S. Morcos. Semdedup: Data-efficient learning at web-scale through semantic dedduplication. In ICLR 2023 Workshop on Mathematical and Empirical Understanding of Foundation Models, 2023. 1 +[2] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katie Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andrew Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning, 2022. 3 +[3] Florian Bordes, Shashank Shekhar, Mark Ibrahim, Diane Bouchacourt, Pascal Vincent, and Ari S. Morcos. Pug: Photorealistic and semantically controllable synthetic data for representation learning. In Advances in Neural Information Processing Systems, 2023. 1, 3 +[4] John Canny. A computational approach to edge detection. IEEE Transactions on Pattern Analysis and Machine Intelligence, PAMI-8(6):679-698, 1986. 4 +[5] Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments, 2021. 6 +[6] Ilias Chalkidis, Xiang Dai, Manos Fergadiotis, Prodromos Malakasiotis, and Desmond Elliott. An exploration of hierarchical attention transformers for efficient long document classification, 2022. 6 +[7] Qian Chen, Zhen-Hua Ling, and Xiaodan Zhu. Enhancing sentence embedding with generalized pooling, 2018. 6 +[8] Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Dollar, and C. Lawrence Zitnick. Microsoft coco captions: Data collection and evaluation server, 2015. 3, 6 +[9] Karan Desai, Gaurav Kaul, Zubin Aysola, and Justin Johnson. Redcaps: web-curated image-text data created by the people, for the people, 2021. 3 +[10] Sivan Doveh, Assaf Arbelle, Sivan Harary, Roei Herzig, Donghyun Kim, Paola Cascante-bonilla, Amit Alfassy, Rameswar Panda, Raja Giryes, Rogerio Feris, Shimon Ullman, and Leonid Karlinsky. Dense and aligned captions (dac) promote compositional reasoning in v1 models, 2023. 1, 3, 4, 5, 6, 7 +[11] Sivan Doveh, Assaf Arbelle, Sivan Harary, Rameswar Panda, Roei Herzig, Eli Schwartz, Donghyun Kim, Raja Giryes, Rogerio Feris, Shimon Ullman, and Leonid Karlinsky. Teaching structured vision&language concepts to vision&language models, 2023. 8 +[12] Enrico Fini, Pietro Astolfi, Adriana Romero-Soriano, Jakob Verbeek, and Michal Drozdzal. Improved baselines for vision-language pre-training. Transactions on Machine Learning Research (TMLR), 2023. 4 + +[13] Timnit Gebru, Jamie Morgenstern, Briana Vecchione, Jennifer Wortman Vaughan, Hanna Wallach, Hal Daumé III au2, and Kate Crawford. Datasheets for datasets, 2021. 5, 13 +[14] Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models, 2021. 7 +[15] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything, 2023. 4, 13 +[16] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael S. Bernstein, and Fei-Fei Li. Visual genome: Connecting language and vision using crowdsourced dense image annotations, 2016. 3 +[17] Chunyuan Li, Haotian Liu, Liunian Harold Li, Pengchuan Zhang, Jyoti Aneja, Jianwei Yang, Ping Jin, Houdong Hu, Zicheng Liu, Yong Jae Lee, and Jianfeng Gao. Elevater: A benchmark and toolkit for evaluating language-augmented visual models, 2022. 5 +[18] Junnan Li, Dongxu Li, Caiming Xiong, and Steven C. H. Hoi. BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. CoRR, abs/2201.12086, 2022. 4, 7 +[19] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models, 2023. 3, 4, 8 +[20] Xianhang Li, Zeyu Wang, and Cihang Xie. An inverse scaling law for clip training, 2023. 1 +[21] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking, 2023. 1 +[22] Zhiqiu Lin, Xinyue Chen, Deepak Pathak, Pengchuan Zhang, and Deva Ramanan. Visualgptscore: Visio-linguistic reasoning with multimodal generative pre-training scores, 2023. 1, 4 +[23] Zixian Ma, Jerry Hong, Mustafa Omer Gul, Mona Gandhi, Irena Gao, and Ranjay Krishna. Crepe: Can vision-language foundation models reason compositionally?, 2023. 3 +[24] Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2011. 3 +[25] Jordi Pont-Tuset, Jasper Uijlings, Soravit Changpinyo, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with localized narratives, 2020. 3, 6 +[26] Filip Radenovic, Abhimanyu Dubey, Abhishek Kadian, Todor Mihaylov, Simon Vandenhende, Yash Patel, Yi Wen, Vignesh Ramanathan, and Dhruv Mahajan. Filtering, distillation, and hard negatives for vision-language pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6967-6977, 2023. 1, 3 +[27] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, + +Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. CoRR, abs/2103.00020, 2021. 3, 4, 7, 8 +[28] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs, 2021. 1 +[29] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. Laion-5b: An open large-scale dataset for training next generation image-text models, 2022. 3 +[30] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, Melbourne, Australia, 2018. Association for Computational Linguistics. 1, 3 +[31] Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. FLAVA: A foundational language and vision alignment model. In CVPR, 2022. 4, 7 +[32] Krishna Srinivasan, Karthik Raman, Jiecao Chen, Mike Bendersky, and Marc Najork. Wit: Wikipedia-based image text dataset for multimodal multilingual machine learning. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '21), 2021. 3 +[33] NLLB Team, Marta R. Costa-jussà, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia González, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. No language left behind: Scaling human-centered machine translation, 2022. 8 +[34] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. YFCC100m. Communications of the ACM, 59(2): 64-73, 2016. 1, 3 +[35] Tristan Thrush, Ryan Jiang, Max Bartolo, Amanpreet Singh, Adina Williams, Douwe Kiela, and Candace Ross. Winoground: Probing vision and language models for visio-linguistic compositionality, 2022. 1, 3 +[36] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, + +Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and finetuned chat models, 2023. 6 +[37] Jack Urbanek and Pratik Ringshia. Mephisto: A framework for portable, reproducible, and iterative crowdsourcing, 2023. 4 +[38] Hu Xu, Saining Xie, Po-Yao Huang, Licheng Yu, Russell Howes, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Cit: Curation in training for effective vision-language data. arXiv preprint arXiv:2301.02241, 2023. 1 +[39] Hu Xu, Saining Xie, Xiaqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data, 2023. 3 +[40] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2:67-78, 2014. 3 +[41] Lili Yu, Bowen Shi, Ramakanth Pasunuru, Benjamin Muller, Olga Golovneva, Tianlu Wang, Arun Babu, Binh Tang, Brian Karrer, Shelly Sheynin, Candace Ross, Adam Polyak, Russell Howes, Vasu Sharma, Puxin Xu, Hovhannes Tamoyan, Oron Ashual, Uriel Singer, Shang-Wen Li, Susan Zhang, Richard James, Gargi Ghosh, Yaniv Taigman, Maryam Fazel-Zarandi, Asli Celikyilmaz, Luke Zettlemoyer, and Armen Aghajanyan. Scaling autoregressive multi-modal models: Pretraining and instruction tuning, 2023. 3 +[42] Mert Yuksekgonul, Federico Bianchi, Pratyusha Kalluri, Dan Jurafsky, and James Zou. When and why vision-language models behave like bags-of-words, and what to do about it? In International Conference on Learning Representations, 2023. 1, 3, 7, 8, 4 +[43] Yan Zeng, Xinsong Zhang, and Hang Li. Multi-grained vision language pre-training: Aligning texts with visual concepts. arXiv preprint arXiv:2111.08276, 2021. 4, 7 +[44] Hang Zhang, Yeyun Gong, Yelong Shen, Weisheng Li, Jiancheng Lv, Nan Duan, and Weizhu Chen. Poolingformer: Long document modeling with pooling attention, 2022. 6 +[45] Tiancheng Zhao, Tianqi Zhang, Mingwei Zhu, Haozhan Shen, Kyusong Lee, Xiaopeng Lu, and Jianwei Yin. Vl-checklist: Evaluating pre-trained vision-language models with objects, attributes and relations, 2023. 1, 3 \ No newline at end of file diff --git a/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/images.zip b/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..e71a40f3fcfbe468cf22a2aa841e76ed9689eff3 --- /dev/null +++ b/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df8c6ab052ca35c47441d1693e9b94b9bfbaf2e44c2c764923c26931b4d975c0 +size 637147 diff --git a/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/layout.json b/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..00141eaa37ad4dd339087b8216d34a1746b73c45 --- /dev/null +++ b/2024/A Picture is Worth More Than 77 Text Tokens_ Evaluating CLIP-Style Models on Dense Captions/layout.json @@ -0,0 +1,5854 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 143, + 103, + 452, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 103, + 452, + 140 + ], + "spans": [ + { + "bbox": [ + 143, + 103, + 452, + 140 + ], + "type": "text", + "content": "A Picture is Worth More Than 77 Text Tokens: Evaluating CLIP-Style Models on Dense Captions" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 159, + 539, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 159, + 539, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 159, + 539, + 190 + ], + "type": "text", + "content": "Jack Urbanek*† Florian Bordes1,2,3† Pietro Astolfi1 Mary Williamson1 Vasu Sharma1 Adriana Romero-Soriano1,3,4,5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 201, + 521, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 201, + 521, + 217 + ], + "spans": [ + { + "bbox": [ + 67, + 201, + 521, + 217 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 67, + 201, + 521, + 217 + ], + "type": "text", + "content": "FAIR, Meta " + }, + { + "bbox": [ + 67, + 201, + 521, + 217 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 67, + 201, + 521, + 217 + ], + "type": "text", + "content": "Mila " + }, + { + "bbox": [ + 67, + 201, + 521, + 217 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 67, + 201, + 521, + 217 + ], + "type": "text", + "content": "Universite de Montreal, " + }, + { + "bbox": [ + 67, + 201, + 521, + 217 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 67, + 201, + 521, + 217 + ], + "type": "text", + "content": "McGill University " + }, + { + "bbox": [ + 67, + 201, + 521, + 217 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 67, + 201, + 521, + 217 + ], + "type": "text", + "content": "Canada CIFAR AI chair" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "spans": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 270, + 289, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 270, + 289, + 547 + ], + "spans": [ + { + "bbox": [ + 47, + 270, + 289, + 547 + ], + "type": "text", + "content": "Curation methods for massive vision-language datasets trade off between dataset size and quality. However, even the highest quality of available curated captions are far too short to capture the rich visual detail in an image. To show the value of dense and highly-aligned image-text pairs, we collect the Densely Captioned Images (DCI) dataset, containing 7805 natural images human-annotated with mask-aligned descriptions averaging above 1000 words each. With precise and reliable captions associated with specific parts of an image, we can evaluate vision-language models' (VLMs) understanding of image content with a novel task that matches each caption with its corresponding subcrop. As current models are often limited to 77 text tokens, we also introduce a summarized version (sDCI) in which each caption length is limited. We show that modern techniques that make progress on standard benchmarks do not correspond with significant improvement on our sDCI based benchmark. Lastly, we finetune CLIP using sDCI and show significant improvements over the baseline despite a small training set. By releasing the first human annotated dense image captioning dataset, we hope to enable the development of new benchmarks or fine-tuning recipes for the next generation of VLMs to come." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 568, + 128, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 568, + 128, + 582 + ], + "spans": [ + { + "bbox": [ + 47, + 568, + 128, + 582 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 589, + 287, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 589, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 46, + 589, + 287, + 685 + ], + "type": "text", + "content": "State-of-the-art vision-language models (VLMs) are often trained on large scale datasets such as LAION-400M [28], YFCC100M [34], or other undisclosed datasets crawled from the web. These datasets are formed by collecting images from the web and using alt-text (or other local text on the webpage) to create loose image-text pairs. These can then be filtered down trading off on quantity for quality [26, 30]. Still, recent work has demonstrated that throwing these" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 246, + 545, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 246, + 545, + 389 + ], + "spans": [ + { + "bbox": [ + 306, + 246, + 545, + 389 + ], + "type": "text", + "content": "loose captions out entirely in favor of generated captions, with enhanced quality and density, can produce improved results [10]. Other works [1, 20, 21, 38] have demonstrated that it is possible to get CLIP-level performance using a vastly reduced compute, often by throwing away portions of the data resulting in more balance between image and text modalities. However, those approaches rely on automatic pipelines which do not generate reliable and long captions that can capture rich visual details in an image. From this it appears no existing dataset has high-quality image descriptions that are tightly-coupled enough with the image to train for or evaluate a deep alignment between the two domains." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 390, + 546, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 390, + 546, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 390, + 546, + 628 + ], + "type": "text", + "content": "In the absence of high quality captions to evaluate VLMs, benchmarks such as ARO [42] and VL-Checklist [45] often complement image-caption pairs with hard negatives that are generated by slightly altering the initial (positive) description. Progress on these benchmarks has been rooted in training VLMs with negatives of similar construction to the tests [42] rendering the methodologies ineffective on datasets such as Winoground [35]. Recent works [22] have called the evaluation capacity of many of these benchmarks into question, given how effective language-prior-based methods perform. More specifically, given the unlikelihood of the hard negative captions in these benchmarks, a good text encoder can achieve close to " + }, + { + "bbox": [ + 304, + 390, + 546, + 628 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 304, + 390, + 546, + 628 + ], + "type": "text", + "content": " accuracy without looking at the images. Moreover, Bordes et al. [3] have shown that most improvements observed on ARO or VL-Checklist do not translate on simple synthetic benchmarks for which the negative caption is as likely as the positive one. Since the use of VLMs is significantly increasing, it is crucial to make sure that we have a diverse suite of reliable benchmarks to assess their abilities." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "content": "In this paper, we introduce the Densely Captioned Images dataset, a collection of 7805 images with dense and mask-aligned descriptions averaging above 1000 words each. One such example is provided in Figure 1, displaying just a subset of the collected text paired with their aligned masks. We demonstrate how to leverage this dataset to evaluate VLMs in two ways after summarizing captions" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 693, + 149, + 702 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 693, + 149, + 702 + ], + "spans": [ + { + "bbox": [ + 58, + 693, + 149, + 702 + ], + "type": "text", + "content": "* Work done while at Meta" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 59, + 703, + 128, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 703, + 128, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 703, + 128, + 712 + ], + "type": "text", + "content": "Equal contribution" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "26700" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 83, + 79, + 519, + 586 + ], + "blocks": [ + { + "bbox": [ + 83, + 79, + 519, + 586 + ], + "lines": [ + { + "bbox": [ + 83, + 79, + 519, + 586 + ], + "spans": [ + { + "bbox": [ + 83, + 79, + 519, + 586 + ], + "type": "image", + "image_path": "7f6543bdabddedeba6c92fb7457f95520997ceb0aafab20b4e17c13566ebc735.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 89, + 602, + 503, + 615 + ], + "lines": [ + { + "bbox": [ + 89, + 602, + 503, + 615 + ], + "spans": [ + { + "bbox": [ + 89, + 602, + 503, + 615 + ], + "type": "text", + "content": "Figure 1. One example from the Densely Captioned Images dataset. Only part of the submask hierarchy is shown." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 635, + 289, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 635, + 289, + 709 + ], + "spans": [ + { + "bbox": [ + 46, + 635, + 289, + 709 + ], + "type": "text", + "content": "to fit into CLIP's 77 token limit, both with a negatives-based test as well as a novel matching task, referred to as subcrop-caption matching, that requires selecting appropriate captions for different regions of the same image. We evaluate existing baselines, and observe that no models perform well at both concurrently, and improved" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 304, + 635, + 547, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 635, + 547, + 709 + ], + "spans": [ + { + "bbox": [ + 304, + 635, + 547, + 709 + ], + "type": "text", + "content": "performance via negatives-based training comes at the cost of decreased performance on subcrop-caption matching. We also run some experiments using the summarized DCI as a fine-tuning dataset to evaluate the effectiveness of these captions for improving a model's performance on other benchmarks, and compare the efficiency per-example" + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "text", + "content": "26701" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 278, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 278, + 95 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 278, + 95 + ], + "type": "text", + "content": "to that from the automated annotation setup in DAC [10]. To summarize, our contributions are:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 98, + 286, + 275 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 47, + 98, + 286, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 98, + 286, + 144 + ], + "spans": [ + { + "bbox": [ + 47, + 98, + 286, + 144 + ], + "type": "text", + "content": "- We release the Densely Captioned Images (DCI) dataset, which contains dense and mask-aligned captions, alongside an LLM-summarized version (sDCI) containing captions under 77 tokens for use with current VLMs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 146, + 286, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 146, + 286, + 204 + ], + "spans": [ + { + "bbox": [ + 47, + 146, + 286, + 204 + ], + "type": "text", + "content": "- We provide a new benchmark for VLMs based on sDCI to evaluate fine-grained vision-language understanding, and show that no existing model can perform well at matching captions from within one image to corresponding subsections of that image." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 205, + 286, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 205, + 286, + 275 + ], + "spans": [ + { + "bbox": [ + 47, + 205, + 286, + 275 + ], + "type": "text", + "content": "- We show that fine-tuning with high quality image-caption pairs is as good on ARO and VL-Checklist as fine-tuning on at least " + }, + { + "bbox": [ + 47, + 205, + 286, + 275 + ], + "type": "inline_equation", + "content": "10 \\times" + }, + { + "bbox": [ + 47, + 205, + 286, + 275 + ], + "type": "text", + "content": " the automatically annotated data, and that even without utilizing explicit negatives these pairs can improve performance on VL-C-Object from " + }, + { + "bbox": [ + 47, + 205, + 286, + 275 + ], + "type": "inline_equation", + "content": "81.17\\%" + }, + { + "bbox": [ + 47, + 205, + 286, + 275 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 47, + 205, + 286, + 275 + ], + "type": "inline_equation", + "content": "88.37\\%" + }, + { + "bbox": [ + 47, + 205, + 286, + 275 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 290, + 138, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 290, + 138, + 303 + ], + "spans": [ + { + "bbox": [ + 47, + 290, + 138, + 303 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 312, + 286, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 312, + 286, + 407 + ], + "spans": [ + { + "bbox": [ + 46, + 312, + 286, + 407 + ], + "type": "text", + "content": "The massive, loosely-labeled dataset approach that has enabled VLMs like CLIP [27] and powerful successors like BLIP2 [19], Flamingo [2], CM3leon [41], and many others, has been a clear forward step in vision-language modeling. Still recent benchmarks show that models trained in this manner display clear drawbacks in reasoning skills. Additional techniques have been proposed and adopted recently to close this gap, discussed below." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 426, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 286, + 713 + ], + "type": "text", + "content": "Vision-Language Datasets. Over the last decade, there have been significant dataset collection efforts connecting images and text. Earlier works focused on curating datasets by leveraging human annotations, see e.g., COCO [8], Visual Genome [16], and Flickr30k [40]. The process resulted in high quality annotations, which were however oftentimes limited by the caption content - i.e., relatively short phrases (5.1 to 10.3 words on average) grounded at image level or region level - and the data annotation scale (30k to 130k images). To increase scale, researchers gathered web-crawled data and introduced large scale datasets such as YFCC100M [34], which contains 100M media objects. Yet, crawling the web oftentimes results in little correspondence between image and text pairs. To reduce noise between image and text pairs, efforts such as SBU [24] queried Flickr and filtered the noisy results, obtaining a " + }, + { + "bbox": [ + 46, + 426, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\sim 1\\mathrm{M}" + }, + { + "bbox": [ + 46, + 426, + 286, + 713 + ], + "type": "text", + "content": " images. Moreover, Conceptual Captions (CC) [30] crawled a dataset of " + }, + { + "bbox": [ + 46, + 426, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\sim 12\\mathrm{M}" + }, + { + "bbox": [ + 46, + 426, + 286, + 713 + ], + "type": "text", + "content": " images and alt-text pairs, and included a protocol to filter noisy text-image pairs, resulting in 3M data points. Relaxing the filtering protocol allows to trade data quality for scale. Crawling alt-text also resulted in relatively short text descriptions with 10.3 words on average, which are most often grounded at image level. Localized Narratives [25] was introduced" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 545, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 300 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 300 + ], + "type": "text", + "content": "as a dense visual grounding dataset leveraging a multimodal annotation procedure, collecting " + }, + { + "bbox": [ + 304, + 72, + 545, + 300 + ], + "type": "inline_equation", + "content": "\\sim 850\\mathrm{k}" + }, + { + "bbox": [ + 304, + 72, + 545, + 300 + ], + "type": "text", + "content": " text-image pairs with 36.5 words/caption on average. **RedCaps** [9] constituted another effort yielding large scale (" + }, + { + "bbox": [ + 304, + 72, + 545, + 300 + ], + "type": "inline_equation", + "content": "\\sim 12\\mathrm{M}" + }, + { + "bbox": [ + 304, + 72, + 545, + 300 + ], + "type": "text", + "content": ") web-curated data by exploring alternate data sources of high quality data instead of devising complex filtering strategies. Wikipedia-based image-text dataset (WIT) [32] extended dataset creation efforts by gathering a multilingual dataset of text-image-pairs consisting of " + }, + { + "bbox": [ + 304, + 72, + 545, + 300 + ], + "type": "inline_equation", + "content": "\\sim 11.5\\mathrm{M}" + }, + { + "bbox": [ + 304, + 72, + 545, + 300 + ], + "type": "text", + "content": " images. LAION-5B [29] further increased the web-crawling efforts by gathering a multilingual dataset of text-image pairs, and filtered the collected data with a pre-trained CLIP [27] model. Following, LAION-CAT [26] reduced noisy examples from LAION-5B by filtering for caption complexity, i.e., captions that do not contain any action, and for text spotting, i.e., images that contain rendered text. **MetaCLIP** [39] has also been released as an open dataset for reproducing CLIP. These very large scale datasets have been successfully used to advance the state-of-the-art of VLMs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 319, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 319, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 319, + 545, + 713 + ], + "type": "text", + "content": "Vision-Language Evaluation Benchmarks. Several recent advances in visual-language learning have focused on creating comprehensive benchmarks to evaluate model performance in more holistic ways. These benchmarks are instrumental in pushing the envelope of what VLM can understand and process, ensuring they move beyond superficial image-text matching towards genuine understanding of intricate relationships between visual and linguistic elements. In particular, VL-CheckList [45] and ARO [42] assess the VLM capabilities beyond average downstream task accuracy, by focusing on a model's ability to understand objects, attributes, order or relations. ARO's extensive scope, uncovers limitations in VLMs such as poor relational understanding and lack of order sensitivity. Winoground [35] tests models for visio-linguistic compositional reasoning by asking VLM to match two images with two captions containing the same set of words but in different orders. This task requires models to discern the meaning conveyed by the order of words, reflecting different visual scenes. Current VLMs perform only marginally better than chance, highlighting a significant gap in compositional reasoning. CREPE (Compositional REpresentation Evaluation) [23] evaluates two aspects of compositionality: systematicity and productivity. Systematicity is measured by the model's ability to represent seen versus unseen atoms and their compositions, while productivity gauges the model's capacity to understand an unbounded set of increasingly complex expressions. Finally, PUG (Photorealistic Unreal Graphics) [3] uses synthetic data to assess the compositional reasoning abilities of VLMs by progressively increasing the complexity of a given generated scene. One issue with these evaluation datasets is their frequent reliance on COCO, either directly as in ARO, or through Visual Genome as in" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "26702" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "type": "text", + "content": "VL-Checklist or CREPE. It is difficult to find an evaluation dataset of sufficient scale without COCO." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 110, + 289, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 110, + 289, + 529 + ], + "spans": [ + { + "bbox": [ + 47, + 110, + 289, + 529 + ], + "type": "text", + "content": "Vision-Language models. Recent VLM advancements have built upon the foundational work of CLIP [27], which leveraged large-scale image-text pairs to jointly pre-train an image encoder and a text encoder to predict which images are paired with which texts in a contrastive learning paradigm. NegCLIP build upon CLIP by leveraging negative captions when training. BLIP (Bootstrapping Language-Image Pre-training) [18] uses a new framework that bootstraps the captions from noisy web data for both understanding and generation tasks. Its successor BLIP-2 [19] further streamlines the process by utilizing off-the-shelf frozen pre-trained image encoders and language models, bridging the modality gap with a lightweight querying mechanism. Clip-rocket [12] improves VLM baselines by showing that applying image and text augmentations makes up for most of the improvement attained by prior VLMs. Flava [31] proposes a foundation VLM model by combining existing VLMs objectives together with auxiliary in-modality losses for the text and vision encoders. X-VLM [43] achieves success with a pretraining method matching sub- portions of the text to regions of the image at multiple granularities. These models introduces improvements over CLIP, focusing on efficiency, adaptability, and reducing the need for extensive labeled datasets, thereby pushing the boundaries of vision-language pre-training. The closest work to our approach is DAC (Densely Aligned Captions) [10], which improves with an automated LLM based pipeline the caption quality and density. By showing that DAC-enhanced CLIP models exhibit substantial gains on some benchmarks, this work underscores the critical role that caption quality and density play in the efficacy of VLMs. We build on this insight and explore how to further increase the caption quality and density by relying on human annotators, and analyze how that impacts downstream model performance." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 538, + 173, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 538, + 173, + 551 + ], + "spans": [ + { + "bbox": [ + 47, + 538, + 173, + 551 + ], + "type": "text", + "content": "3. Dataset Construction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 558, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 558, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 558, + 289, + 713 + ], + "type": "text", + "content": "The Densely Captioned Images dataset, or DCI, consists of 7805 images from SA-1B [15], each with a complete description aiming to capture the full visual detail of what is present in the image. Much of the description is directly aligned to submasks of the image. An example is shown in Figure 1. In the top left we see the full image of a water pump, with an associated description. The italicized section is collected as a standard caption, aiming to summarize the full image in about a sentence, similar to existing caption datasets. The remainder of that first description contains details about the relationship between visible entities in the image, as well as in-depth descriptions of regions that are not described as part of the submasks. All other" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 145 + ], + "type": "text", + "content": "text describing the image is associated with submasks of the image. Each submask has its own free-text label (not pictured) and description, and may also contain further submasks. Here for instance we see submasks for windows and balconies as being contained in the submask capturing three buildings in the background." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 152, + 385, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 152, + 385, + 166 + ], + "spans": [ + { + "bbox": [ + 306, + 152, + 385, + 166 + ], + "type": "text", + "content": "3.1. Preparation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 171, + 547, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 171, + 547, + 266 + ], + "spans": [ + { + "bbox": [ + 304, + 171, + 547, + 266 + ], + "type": "text", + "content": "In order to collect the data, we first select images from a random privacy-mitigated subset of SA-1B. We then procedurally extract subregions of each image to annotate, as we found in initial trials that crowdsourcing both regions and descriptions concurrently overcomplicated the task and successful annotation rate. For this process, we turn to the Segment Anything Model (SAM) [15] and adapt their standard method to extract all masks from an image." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 267, + 547, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 267, + 547, + 483 + ], + "spans": [ + { + "bbox": [ + 304, + 267, + 547, + 483 + ], + "type": "text", + "content": "For the extraction process, SAM usually relies on a grid of points across the entire image. In order to increase the possibility of selecting interesting regions worth annotating, we additionally apply a canny filter [4] and select random points within a radius from discovered edges. We then run SAM to detect all masks using both the grid and the near-edge points. Once the masks are returned, we establish a hierarchy of submasks by thresholding the number of overlapping pixels between two masks to determine if one is a submask of the other, or if the two masks should be joined. This helps reduce some of the noise introduced by the automatic masking process, and leaves us with a tree-like structure for the masks. Lastly, we remove any masks that are too small. We note that undergoing this process does not result in every detail of each image being selected as a candidate for annotation, and as such instances in the DCI dataset are not expected to have complete submask-aligned coverage of all elements one could recognize in or discuss about an image." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 491, + 414, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 491, + 414, + 502 + ], + "spans": [ + { + "bbox": [ + 306, + 491, + 414, + 502 + ], + "type": "text", + "content": "3.2. Collection Process" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 510, + 545, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 557 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 557 + ], + "type": "text", + "content": "We use Mephisto [37] to host our task, pay crowdworkers to provide annotations on the dataset, and additionally run qualification steps. Workers that pass our qualifications are eligible to work on the main task which contains 3 stages:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 558, + 545, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 558, + 545, + 593 + ], + "spans": [ + { + "bbox": [ + 306, + 558, + 545, + 593 + ], + "type": "text", + "content": "1. Workers are provided with the whole image, and asked to provide a short description of it. This is considered the standard caption." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 594, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 594, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 594, + 547, + 713 + ], + "type": "text", + "content": "2. Workers are provided with submasks of the image, one at a time starting with the leaves of the mask tree, displaying a SAM-selected region of the image as well as an indicator for where that region comes from. They are generally asked to provide a label and complete description for the pictured region, though are allowed to mark the region as 'uninteresting' and only provide a label, or 'bad' and provide nothing. These options allow us to focus worker time on useful annotations and help capture some of the noise of the automatic selection pro" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "26703" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 70, + 521, + 248 + ], + "blocks": [ + { + "bbox": [ + 72, + 70, + 521, + 248 + ], + "lines": [ + { + "bbox": [ + 72, + 70, + 521, + 248 + ], + "spans": [ + { + "bbox": [ + 72, + 70, + 521, + 248 + ], + "type": "image", + "image_path": "a95679abff74c421fb7258abfea90d8523cd65a8d27b1e1068b9aed4a41dc101.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 70, + 257, + 522, + 269 + ], + "lines": [ + { + "bbox": [ + 70, + 257, + 522, + 269 + ], + "spans": [ + { + "bbox": [ + 70, + 257, + 522, + 269 + ], + "type": "text", + "content": "Figure 2. Annotation view for writing description for masks of the image. The masked region appears highlighted for clarity." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 48, + 285, + 206, + 522 + ], + "blocks": [ + { + "bbox": [ + 48, + 285, + 206, + 522 + ], + "lines": [ + { + "bbox": [ + 48, + 285, + 206, + 522 + ], + "spans": [ + { + "bbox": [ + 48, + 285, + 206, + 522 + ], + "type": "image", + "image_path": "6f66c7649ed1279ac6568d0737850cc9b090d120ab21eb4808d4af70773fa917.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 536, + 545, + 593 + ], + "lines": [ + { + "bbox": [ + 45, + 536, + 545, + 593 + ], + "spans": [ + { + "bbox": [ + 45, + 536, + 545, + 593 + ], + "type": "text", + "content": "Figure 3. Example of a Llama2-generated summary and negative that comprise sDCI. Each image and submask have multiple summarizations and negatives. We also compare the caption quality between DAC [10] and DCI. In contrast to DCI that relies on human annotations, DAC used an automatic pipeline based on LLM for captioning. As we observe in this example, the DAC captions can suffer from hallucinations and miss important elements of the photo. In this work we argue that while improving automatic pipeline is an important research direction, for now the captions proposed are not reliable enough to be used to evaluate models and assess their abilities." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 213, + 290, + 521, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 290, + 521, + 365 + ], + "spans": [ + { + "bbox": [ + 213, + 290, + 521, + 365 + ], + "type": "text", + "content": "A black metal water pump in a town square in the middle of a circular stone shape across from several buildings. Black metal water pump in foreground with spigot in front and curved handle with globular end. It is located in the middle of a smaller circle of stones. There is some grass growing in between some of the stones. There is a metal grated drain near the bottom of the pump. To its left is a dark brown metal manhole cover and behind it is a grey stone tiled road. The road leads to three buildings across the street with storefronts on the bottom level and different units with balconies and large windows on the second and third level. There are people out front walking by the lower level. There are benches out front. Human annotated" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 213, + 374, + 508, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 374, + 508, + 393 + ], + "spans": [ + { + "bbox": [ + 213, + 374, + 508, + 393 + ], + "type": "text", + "content": "A black metal water pump stands in a town square, surrounded by a circle of stones, with buildings and people in the background LLM Summary" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 213, + 406, + 526, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 406, + 526, + 425 + ], + "spans": [ + { + "bbox": [ + 213, + 406, + 526, + 425 + ], + "type": "text", + "content": "A pump stands metal water black in square town, surrounded by stones circle, with background buildings people in." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 212, + 441, + 525, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 441, + 525, + 515 + ], + "spans": [ + { + "bbox": [ + 212, + 441, + 525, + 515 + ], + "type": "text", + "content": "DAC Quality: a fire hydrant in the middle of a city street \nDAC Dense: 1 A fire hydrant can be seen in the center of a street, its metallic spray emanating water vapor from a nozzle as it shoots water into the air. 2 The spray creates a steady waterfall, making it appear as though the hydrant is spraying water from its nozzle the whole time. 3 The spray is relatively smooth, creating a gradual wave that covers a large portion of a city block. \nDAC SAM: a black plastic drain cover, a building, a water fountain, a bicycle, a water fountain, the image shows a circular stone path with a circular stone base, a water pump, the logo of the company, the image shows a black metal handle with a long handle, home bank, a dragon" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 58, + 613, + 287, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 613, + 287, + 660 + ], + "spans": [ + { + "bbox": [ + 58, + 613, + 287, + 660 + ], + "type": "text", + "content": "cess. This is shown in Figure 2. For masks that contain submasks, workers are also provided with overlays that show the regions already annotated, and are asked to annotate in terms of what has already been written." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 661, + 287, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 661, + 287, + 709 + ], + "spans": [ + { + "bbox": [ + 47, + 661, + 287, + 709 + ], + "type": "text", + "content": "3. After completing all the submasks, the worker is then shown the complete image again and asked to provide an overall description, paying attention to the relationship between previously annotated regions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 613, + 545, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 613, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 306, + 613, + 545, + 685 + ], + "type": "text", + "content": "An in-depth description of the filtering and quality assurance process can be found in Appendix 8 while the Datasheet [13] is available in Appendix 12. Complete annotation instructions, dataset download links as well as reproducible code are available on our GitHub1. The DCI dataset is released under the CC-BY-NC license." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 533, + 351, + 542, + 384 + ], + "type": "aside_text", + "angle": 90, + "lines": [ + { + "bbox": [ + 533, + 351, + 542, + 384 + ], + "spans": [ + { + "bbox": [ + 533, + 351, + 542, + 384 + ], + "type": "text", + "content": "DCI (Our)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 533, + 469, + 542, + 486 + ], + "type": "aside_text", + "angle": 90, + "lines": [ + { + "bbox": [ + 533, + 469, + 542, + 486 + ], + "spans": [ + { + "bbox": [ + 533, + 469, + 542, + 486 + ], + "type": "text", + "content": "DAC" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 703, + 509, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 703, + 509, + 712 + ], + "spans": [ + { + "bbox": [ + 317, + 703, + 509, + 712 + ], + "type": "text", + "content": "DatasetImgSCapsToks/CapToks/ImgDCI7,8057,8051,282.091,282.09\\( DCI_{sub} \\)96,00796,007199.33199.33sDCI8,01287,26849.21536.00\\( sDCI_{sub} \\)96,007714,63036.60263.01\\( LN_{COCO} \\)142,845142,84549.1149.11\\( LN_{COCO<77} \\)127,456127,45643.7043.70COCO123,287616,76713.5467.74", + "image_path": "976cbdf6905ceeb7ad05e2624cae57df9fbb559d1980561413d1cc3ddb383ceb.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 189, + 545, + 277 + ], + "lines": [ + { + "bbox": [ + 304, + 189, + 545, + 277 + ], + "spans": [ + { + "bbox": [ + 304, + 189, + 545, + 277 + ], + "type": "text", + "content": "Table 1. Comparison of DCI dataset statistics to other datasets, focusing on average CLIP tokens per image or caption. Note the " + }, + { + "bbox": [ + 304, + 189, + 545, + 277 + ], + "type": "inline_equation", + "content": "26\\mathbf{x}" + }, + { + "bbox": [ + 304, + 189, + 545, + 277 + ], + "type": "text", + "content": " difference between DCI and the previous longest annotated dataset, Localized Narratives (LN). sub denotes including sub-masks and their descriptions as examples, and sDCI refers to the LLM-summarized version of DCI that fits captions to 77 tokens (Sec. 3.3), while " + }, + { + "bbox": [ + 304, + 189, + 545, + 277 + ], + "type": "inline_equation", + "content": "\\mathrm{LN}_{\\mathrm{COCO}} < 77" + }, + { + "bbox": [ + 304, + 189, + 545, + 277 + ], + "type": "text", + "content": " simply drops examples longer than 77 tokens (" + }, + { + "bbox": [ + 304, + 189, + 545, + 277 + ], + "type": "inline_equation", + "content": "\\sim 10.8\\%" + }, + { + "bbox": [ + 304, + 189, + 545, + 277 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 299, + 529, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 299, + 529, + 312 + ], + "spans": [ + { + "bbox": [ + 305, + 299, + 529, + 312 + ], + "type": "text", + "content": "4. Evaluating VLMs with summarized DCI" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 320, + 390, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 320, + 390, + 333 + ], + "spans": [ + { + "bbox": [ + 306, + 320, + 390, + 333 + ], + "type": "text", + "content": "4.1. Methodology" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 339, + 545, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 339, + 545, + 506 + ], + "spans": [ + { + "bbox": [ + 304, + 339, + 545, + 506 + ], + "type": "text", + "content": "Using the 7805 images in the summarized Densely Captioned Images (sDCI) dataset, we construct a few different evaluations. As noted above, the ability to select multiple submasks from the same image and include them in the same batch allows us to create a CLIP-style test, wherein the model can evaluate a full batch of images and captions and score correctly which caption belongs to which image. As we provide models with a crop around the selected masks, we call this Subcrop-Caption Matching (SCM), and we use a batch size of 8. We can run against our LLM-generated negatives as well. Given that LLM-summarization has provided us with multiple captions and negatives per image and submask, we supply the first unless noted otherwise. With this in mind, we construct 6 evaluations as follows:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 506, + 545, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 506, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 506, + 545, + 567 + ], + "type": "text", + "content": "[All SCM]: Group each image with their subcrops, alongside one summarized caption per subcrop. Then use the model to find the most likely caption associated to each subcrop. This test measures the ability of the VLM to distinguish between the different parts that compose an image.[2]" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 567, + 545, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 567, + 545, + 614 + ], + "spans": [ + { + "bbox": [ + 304, + 567, + 545, + 614 + ], + "type": "text", + "content": "[All Neg]: Select one LLM summarized caption and the corresponding LLM-generated negative for each image and subcrop. Score a model on its ability to distinguish between the positive and negative." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 614, + 545, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 614, + 545, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 614, + 545, + 662 + ], + "type": "text", + "content": "[All Pick5-SCM]: Use the same setup as All SCM, but rather than using only one caption per subcrop, we use 5 LLM generated captions per subcrop. We score a model as succeeding only when the worst-scoring positive caption" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 305, + 673, + 545, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 673, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 673, + 545, + 712 + ], + "type": "text", + "content": "2Since we used sDCI to fit current models token length, it is possible that some of the summaries remove the information that make possible to distinguish between the captions. Ideally this test should be performed on the non-summaramized version once VLMs can handle 1000+ tokens." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "26705" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 113, + 69, + 482, + 185 + ], + "blocks": [ + { + "bbox": [ + 113, + 69, + 482, + 185 + ], + "lines": [ + { + "bbox": [ + 113, + 69, + 482, + 185 + ], + "spans": [ + { + "bbox": [ + 113, + 69, + 482, + 185 + ], + "type": "table", + "html": "
ModelAllAll Pick5BaseAll
SCMNegSCMNegNegHard Negs
CLIP Baseline [27]40.06%60.79%11.21%24.06%67.56%41.34%
NegCLIP [42]43.35%56.00%13.22%4.82%76.69%50.84%
BLIP [18]39.13%54.02%10.73%5.51%63.41%53.23%
Flava [31]38.08%47.99%8.01%9.82%11.6%45.59%
X-VLM [43]38.45%53.46%10.96%5.10%44.29%52.42%
DAC\\( _{LLM} \\)[10]37.45%81.71%8.13%37.84%90.56%71.21%
DAC\\( _{SAM} \\)[10]37.90%84.17%6.70%39.94%89.66%73.61%
", + "image_path": "dfa10fb6dc7a8594a7560277c92a125bd8ac8838cfef67224abb22fec7c1c529.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 193, + 547, + 228 + ], + "lines": [ + { + "bbox": [ + 46, + 193, + 547, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 193, + 547, + 228 + ], + "type": "text", + "content": "Table 2. sDCI test result: We compare existing baselines on our Subcrop-Caption Matching (SCM) and negatives tests. Additional results are available in Table 10 in the Appendix. We note our best model fine-tuned on sDCI from section 5 achieved " + }, + { + "bbox": [ + 46, + 193, + 547, + 228 + ], + "type": "inline_equation", + "content": "64.02\\%" + }, + { + "bbox": [ + 46, + 193, + 547, + 228 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 193, + 547, + 228 + ], + "type": "inline_equation", + "content": "31.60\\%" + }, + { + "bbox": [ + 46, + 193, + 547, + 228 + ], + "type": "text", + "content": " on a held-out test of All SCM and All SCM Pick5 respectively, setting an upper bound for model performance." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 247, + 286, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 247, + 286, + 295 + ], + "spans": [ + { + "bbox": [ + 46, + 247, + 286, + 295 + ], + "type": "text", + "content": "scores higher than the best-scoring caption of any other image in the batch. This test evaluates if the representation space is structured such that captions belonging to a specific image are closest to the target image in the space." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 295, + 286, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 295, + 286, + 343 + ], + "spans": [ + { + "bbox": [ + 46, + 295, + 286, + 343 + ], + "type": "text", + "content": "[All Pick5-Neg]: Use the same setup as All Neg, but rather than using one caption, we use 5 LLM summarized captions for each image and subcrop. If any of these captions score worse than the negative, the model fails the example." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 343, + 286, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 343, + 286, + 415 + ], + "spans": [ + { + "bbox": [ + 46, + 343, + 286, + 415 + ], + "type": "text", + "content": "[Base Neg]: Using only the 7805 base images without subcrops, evaluate the model's ability to distinguish between an LLM generated caption and its corresponding LLM-generated negative. Note, this is a strict subset of All Neg, though these captions are on the longer side on average and cover a different distribution." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 415, + 286, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 415, + 286, + 451 + ], + "spans": [ + { + "bbox": [ + 46, + 415, + 286, + 451 + ], + "type": "text", + "content": "[All Hard-Negs]: Using the same setup as All Neg, but rather than using a single negative, use the negative across all LLM-generated negatives that CLIP scores highest." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 463, + 105, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 463, + 105, + 475 + ], + "spans": [ + { + "bbox": [ + 47, + 463, + 105, + 475 + ], + "type": "text", + "content": "4.2. Results" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 483, + 286, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 483, + 286, + 638 + ], + "spans": [ + { + "bbox": [ + 46, + 483, + 286, + 638 + ], + "type": "text", + "content": "We compare in Table 2 the sDCI performances given by different state-of-the-art models: CLIP [27], NegCLIP [42], BLIP [18], Flava [31] and X-VLM [43]. Additional experiments on different architectures and pretraining datasets are available in Table 10 (see Appendix). The CLIP baseline starts at " + }, + { + "bbox": [ + 46, + 483, + 286, + 638 + ], + "type": "inline_equation", + "content": "40.12\\%" + }, + { + "bbox": [ + 46, + 483, + 286, + 638 + ], + "type": "text", + "content": " on All SCM and " + }, + { + "bbox": [ + 46, + 483, + 286, + 638 + ], + "type": "inline_equation", + "content": "60.63\\%" + }, + { + "bbox": [ + 46, + 483, + 286, + 638 + ], + "type": "text", + "content": " on All Neg. The only model to improve over CLIP on SCM tasks is NegCLIP, which follows the fact that the hard image negatives that NegCLIP is trained on provide the most similar task to what we test of any of these models. None of the models trained without an explicit CLIP-loss component outperform CLIP on SCM tasks, but DAC ultimately performs the worst." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 639, + 286, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 639, + 286, + 699 + ], + "spans": [ + { + "bbox": [ + 46, + 639, + 286, + 699 + ], + "type": "text", + "content": "Performance on the Pick5 variations of each task follow the trends of the standard performance. Performance on Base Neg for Flava point to a weakness in comparing longer text examples, given the significant drop from " + }, + { + "bbox": [ + 46, + 639, + 286, + 699 + ], + "type": "inline_equation", + "content": "47.99\\%" + }, + { + "bbox": [ + 46, + 639, + 286, + 699 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 639, + 286, + 699 + ], + "type": "inline_equation", + "content": "11.6\\%" + }, + { + "bbox": [ + 46, + 639, + 286, + 699 + ], + "type": "text", + "content": " that is not demonstrated in other models." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 59, + 700, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 700, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 59, + 700, + 286, + 713 + ], + "type": "text", + "content": "Interestingly, models trained absent of CLIP (BLIP," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 247, + 545, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 247, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 304, + 247, + 545, + 319 + ], + "type": "text", + "content": "Flava, X-VLM) experience a far less noticeable drop in performance between All Neg and All Hard Negs. This validates that sDCI's CLIP-hard negatives are not simply a higher proportion of 'impossible' negatives, but rather capture some underlying trait about the negatives that CLIP models and their descendants all struggle with." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 319, + 545, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 319, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 304, + 319, + 545, + 403 + ], + "type": "text", + "content": "None of the models presented perform well across all of the sDCI test set. Given each of the CLIP-style models have some kind of advantage on this test set due to being trained on some objective that sDCI directly evaluates, we expect that the BLIP, Flava, and X-VLM scores are somewhat representative for existing state-of-the-art models' true performance on this test set." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 415, + 545, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 415, + 545, + 441 + ], + "spans": [ + { + "bbox": [ + 305, + 415, + 545, + 441 + ], + "type": "text", + "content": "5. Using summarized DCI as fine-tuning dataset" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 449, + 545, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 545, + 605 + ], + "type": "text", + "content": "To evaluate the use and difficulty of the sDCI dataset for training, we fine-tune state-of-the-art models with it. In particular, we use a ViT/32B CLIP model in all of our experiments, requiring use of the CLIP-bounded version of our dataset. We split sDCI into 7800 train, 100 validation, 112 test samples for this purpose. We use a training batch size of 32 and a learning rate of " + }, + { + "bbox": [ + 304, + 449, + 545, + 605 + ], + "type": "inline_equation", + "content": "5 \\mathrm{e} - 5" + }, + { + "bbox": [ + 304, + 449, + 545, + 605 + ], + "type": "text", + "content": " for all experiments, and run for up to 10 epochs. We train using both standard CLIP loss as well as an additional Negatives loss component, which follows the 'text negative' of NegCLIP [42]. Given the tiny size of our finetuning sets relative to the 400M pretraining images, we use LoRA [14] to reduce the trainable parameters. We train a model with and without negatives loss." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 605, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 665 + ], + "type": "text", + "content": "In order to make good use of the multiple summarized captions we have per image and submask, we randomly select one to be used in each individual epoch. We call this method Pick1. We describe this method and other ablations we attempted in more detail in Appendix 9." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "We follow the experimental setup of DAC [10] by evaluating our sDCI fine-tuned CLIP on the ARO and VL-Checklist benchmarks. We compare to DAC directly as it is the most similar work to ours in attempting to increase" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "26706" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 75, + 69, + 519, + 246 + ], + "blocks": [ + { + "bbox": [ + 75, + 69, + 519, + 246 + ], + "lines": [ + { + "bbox": [ + 75, + 69, + 519, + 246 + ], + "spans": [ + { + "bbox": [ + 75, + 69, + 519, + 246 + ], + "type": "table", + "html": "
ModelVG-RAROVL-Checklist
VG-ACOCOFLICKRObjectAttributeRelation
sDCIP176.23%67.56%88.58%91.30%80.71%68.69%70.12%
sDCIP1NL057.34%61.98%39.36%44.62%88.37%70.42%61.28%
DACLLM10,00061.53%63.89%46.28±1.5%59.41±1.9%66.90%57.4%56.96%
DACLLM100,00061.0%63.6%48.2%61.42%66.87%57.22%57.18%
DACLLM500,00060.1%63.8%50.2%61.6%66.54%57.39%56.77%
DACLLM3,000,00081.28%73.91%94.47%95.68%87.30%77.27%86.41%
DACSAM3,000,00077.16%70.5%91.22%93.88%88.50%75.83%89.75%
CLIP Baseline [27]59.98%63.18%47.9%60.2%81.17%67.67%61.95%
BLIP2 [19]41.16%71.25%13.57%13.72%84.14%80.12%70.72%
NegCLIP [42]81%71%86%91%81.35%72.24%63.53%
SVLC [11]80.61%73.03%84.73%91.7%85%71.97%68.95%
", + "image_path": "9c02114c6f81cbc0699ee0bfc9dedf1e6171bdf3c7112dc4448b5941df47955f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 253, + 545, + 309 + ], + "lines": [ + { + "bbox": [ + 46, + 253, + 545, + 309 + ], + "spans": [ + { + "bbox": [ + 46, + 253, + 545, + 309 + ], + "type": "text", + "content": "Table 3. sDCI fine-tuned CLIP performance against the ARO and VL-Checklist benchmark. We compare CLIP fine-tuned with sDCI against models fine-tuned using DAC captions. Since the DAC dataset contains 3M images whereas sDCI contains only 7805 images, we performed an ablation of the number of training images used in the DAC dataset. In this instance, " + }, + { + "bbox": [ + 46, + 253, + 545, + 309 + ], + "type": "inline_equation", + "content": "\\mathrm{DAC}_{LLM_{10000}}" + }, + { + "bbox": [ + 46, + 253, + 545, + 309 + ], + "type": "text", + "content": " refer to fine-tuning CLIP using only 10,000 images from DAC. We plot the mean across 5 different seeds and display the standard deviation when it is above " + }, + { + "bbox": [ + 46, + 253, + 545, + 309 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 46, + 253, + 545, + 309 + ], + "type": "text", + "content": " accuracy. We observe that training on sDCI lead to significant improvement in comparison to DAC for a comparable number of examples." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 330, + 287, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 330, + 287, + 401 + ], + "spans": [ + { + "bbox": [ + 46, + 330, + 287, + 401 + ], + "type": "text", + "content": "caption density. As noted in Figure 3, these automatically generated captions are generally noisy. As DAC is using 3M images for fine-tuning, we performed a small ablation on the number of DAC images to use for fine-tuning to be similar to our base image count (10,000 compared to our 8,012), or to our full mask count (100,000 compared to our 99,445)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 410, + 105, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 410, + 105, + 422 + ], + "spans": [ + { + "bbox": [ + 47, + 410, + 105, + 422 + ], + "type": "text", + "content": "5.1. Results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 428, + 288, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 428, + 288, + 668 + ], + "spans": [ + { + "bbox": [ + 46, + 428, + 288, + 668 + ], + "type": "text", + "content": "In Table 3, we show that, while the DCI Pick1 model trained with negatives loss " + }, + { + "bbox": [ + 46, + 428, + 288, + 668 + ], + "type": "inline_equation", + "content": "(\\mathrm{sDCI}_{P1})" + }, + { + "bbox": [ + 46, + 428, + 288, + 668 + ], + "type": "text", + "content": " does not reach the performance of DAC models trained on 3M images, it does improve over the CLIP baseline on most metrics3, and outperforms some baselines trained on more data. " + }, + { + "bbox": [ + 46, + 428, + 288, + 668 + ], + "type": "inline_equation", + "content": "\\mathrm{sDCI}_{P1}" + }, + { + "bbox": [ + 46, + 428, + 288, + 668 + ], + "type": "text", + "content": " does however outperform both sample-limited ablations of DAC, suggesting that a small number of highly aligned image to dense text pairs are more effective for training models than larger quantities of more loosely aligned or sparse data. Unsurprisingly, the version trained without negatives loss, " + }, + { + "bbox": [ + 46, + 428, + 288, + 668 + ], + "type": "inline_equation", + "content": "\\mathrm{sDCI}_{P1NL0}" + }, + { + "bbox": [ + 46, + 428, + 288, + 668 + ], + "type": "text", + "content": ", does not improve across most benchmarks, and even somewhat degrades when compared to the CLIP baseline.4 Of note however is the significant bump in VL-Object, alongside some improvement to VL-Attribute. Improvements here suggest that the sDCI dataset successfully includes more object, and to a lesser degree attribute, information than the captions in the source dataset for CLIP. It does, however, point to a limitation of using the LLM summarizations and not incorporating mask information, as relational information is sometimes lost." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 328, + 471, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 328, + 471, + 341 + ], + "spans": [ + { + "bbox": [ + 306, + 328, + 471, + 341 + ], + "type": "text", + "content": "6. Conclusion and Future Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 384, + 545, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 384, + 545, + 704 + ], + "spans": [ + { + "bbox": [ + 304, + 384, + 545, + 704 + ], + "type": "text", + "content": "We introduce the Densely Captioned Images dataset, and display clear use for it as a evaluation benchmark. We also show initial potential for using the dataset for fine-tuning. Given that in order to evaluate today's models on DCI we had to reduce the size of the text to only 77 tokens, DCI should prove to be useful for a longer period of time as models that are able to consume and utilize larger amounts of text context become the norm. We envision that in those cases the full human annotated captions without length reduction would be provided. Today's context size limitation also prevented us from fine-tuning existing models on the highly aligned text-image data within DCI, as existing models don't have enough context size to handle the full text, but the dataset isn't nearly large enough to pre-train a new set of models that could use the full text. It could be relevant to treat developing highly aligned text-image datasets in a similar manner to that used in machine translation for low-resource languages, which run into a similar issue with cost and difficulty to collect. This area of work has relied on automated methods such as bitext mining [33] to bootstrap up from an initial set of expertly collected examples, which DCI may already provide the foundation for. Further, we haven't attempted to incorporate the pixel-level masks that the dataset has in any of our experiments, instead opting to use crops around the masks to retain parity with our test set. This dataset is unique for both the extreme density and high degree of alignment present, and in this introductory work we've only scratched the surface of using this information to its fullest extent." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 673, + 287, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 673, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 673, + 287, + 713 + ], + "type": "text", + "content": "3The decreased performance on VL-Object may be explained by our LLM-generated negatives not closely covering the test set negatives. 4The degradation is likely due to the distribution shift and small sample size, given the training objective is the same as CLIP." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "26707" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 146 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 146 + ], + "type": "text", + "content": "[1] Amro Kamal Mohamed Abbas, Kushal Tirumala, Daniel Simig, Surya Ganguli, and Ari S. Morcos. Semdedup: Data-efficient learning at web-scale through semantic dedduplication. In ICLR 2023 Workshop on Mathematical and Empirical Understanding of Foundation Models, 2023. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 148, + 288, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 148, + 288, + 257 + ], + "spans": [ + { + "bbox": [ + 53, + 148, + 288, + 257 + ], + "type": "text", + "content": "[2] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katie Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andrew Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning, 2022. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 258, + 288, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 288, + 315 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 288, + 315 + ], + "type": "text", + "content": "[3] Florian Bordes, Shashank Shekhar, Mark Ibrahim, Diane Bouchacourt, Pascal Vincent, and Ari S. Morcos. Pug: Photorealistic and semantically controllable synthetic data for representation learning. In Advances in Neural Information Processing Systems, 2023. 1, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 316, + 288, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 316, + 288, + 348 + ], + "spans": [ + { + "bbox": [ + 53, + 316, + 288, + 348 + ], + "type": "text", + "content": "[4] John Canny. A computational approach to edge detection. IEEE Transactions on Pattern Analysis and Machine Intelligence, PAMI-8(6):679-698, 1986. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 350, + 288, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 350, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 53, + 350, + 288, + 392 + ], + "type": "text", + "content": "[5] Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments, 2021. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 396, + 288, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 396, + 288, + 439 + ], + "spans": [ + { + "bbox": [ + 53, + 396, + 288, + 439 + ], + "type": "text", + "content": "[6] Ilias Chalkidis, Xiang Dai, Manos Fergadiotis, Prodromos Malakasiotis, and Desmond Elliott. An exploration of hierarchical attention transformers for efficient long document classification, 2022. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 441, + 288, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 441, + 288, + 464 + ], + "spans": [ + { + "bbox": [ + 53, + 441, + 288, + 464 + ], + "type": "text", + "content": "[7] Qian Chen, Zhen-Hua Ling, and Xiaodan Zhu. Enhancing sentence embedding with generalized pooling, 2018. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 465, + 288, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 465, + 288, + 508 + ], + "spans": [ + { + "bbox": [ + 53, + 465, + 288, + 508 + ], + "type": "text", + "content": "[8] Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Dollar, and C. Lawrence Zitnick. Microsoft coco captions: Data collection and evaluation server, 2015. 3, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 510, + 288, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 510, + 288, + 544 + ], + "spans": [ + { + "bbox": [ + 53, + 510, + 288, + 544 + ], + "type": "text", + "content": "[9] Karan Desai, Gaurav Kaul, Zubin Aysola, and Justin Johnson. Redcaps: web-curated image-text data created by the people, for the people, 2021. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 545, + 287, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 545, + 287, + 610 + ], + "spans": [ + { + "bbox": [ + 48, + 545, + 287, + 610 + ], + "type": "text", + "content": "[10] Sivan Doveh, Assaf Arbelle, Sivan Harary, Roei Herzig, Donghyun Kim, Paola Cascante-bonilla, Amit Alfassy, Rameswar Panda, Raja Giryes, Rogerio Feris, Shimon Ullman, and Leonid Karlinsky. Dense and aligned captions (dac) promote compositional reasoning in v1 models, 2023. 1, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 613, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 667 + ], + "type": "text", + "content": "[11] Sivan Doveh, Assaf Arbelle, Sivan Harary, Rameswar Panda, Roei Herzig, Eli Schwartz, Donghyun Kim, Raja Giryes, Rogerio Feris, Shimon Ullman, and Leonid Karlinsky. Teaching structured vision&language concepts to vision&language models, 2023. 8" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "text", + "content": "[12] Enrico Fini, Pietro Astolfi, Adriana Romero-Soriano, Jakob Verbeek, and Michal Drozdzal. Improved baselines for vision-language pre-training. Transactions on Machine Learning Research (TMLR), 2023. 4" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[13] Timnit Gebru, Jamie Morgenstern, Briana Vecchione, Jennifer Wortman Vaughan, Hanna Wallach, Hal Daumé III au2, and Kate Crawford. Datasheets for datasets, 2021. 5, 13" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 107, + 545, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 149 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 149 + ], + "type": "text", + "content": "[14] Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models, 2021. 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 152, + 545, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 196 + ], + "type": "text", + "content": "[15] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything, 2023. 4, 13" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 198, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 198, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 198, + 545, + 251 + ], + "type": "text", + "content": "[16] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael S. Bernstein, and Fei-Fei Li. Visual genome: Connecting language and vision using crowdsourced dense image annotations, 2016. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 253, + 545, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 253, + 545, + 307 + ], + "spans": [ + { + "bbox": [ + 307, + 253, + 545, + 307 + ], + "type": "text", + "content": "[17] Chunyuan Li, Haotian Liu, Liunian Harold Li, Pengchuan Zhang, Jyoti Aneja, Jianwei Yang, Ping Jin, Houdong Hu, Zicheng Liu, Yong Jae Lee, and Jianfeng Gao. Elevater: A benchmark and toolkit for evaluating language-augmented visual models, 2022. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 309, + 545, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 309, + 545, + 352 + ], + "spans": [ + { + "bbox": [ + 307, + 309, + 545, + 352 + ], + "type": "text", + "content": "[18] Junnan Li, Dongxu Li, Caiming Xiong, and Steven C. H. Hoi. BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. CoRR, abs/2201.12086, 2022. 4, 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 354, + 545, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 545, + 397 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 545, + 397 + ], + "type": "text", + "content": "[19] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models, 2023. 3, 4, 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 399, + 545, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 545, + 421 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 545, + 421 + ], + "type": "text", + "content": "[20] Xianhang Li, Zeyu Wang, and Cihang Xie. An inverse scaling law for clip training, 2023. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 422, + 545, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 545, + 454 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 545, + 454 + ], + "type": "text", + "content": "[21] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking, 2023. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 456, + 545, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 545, + 498 + ], + "type": "text", + "content": "[22] Zhiqiu Lin, Xinyue Chen, Deepak Pathak, Pengchuan Zhang, and Deva Ramanan. Visualgptscore: Visio-linguistic reasoning with multimodal generative pre-training scores, 2023. 1, 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 501, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 501, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 307, + 501, + 545, + 533 + ], + "type": "text", + "content": "[23] Zixian Ma, Jerry Hong, Mustafa Omer Gul, Mona Gandhi, Irena Gao, and Ranjay Krishna. Crepe: Can vision-language foundation models reason compositionally?, 2023. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 535, + 545, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 535, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 307, + 535, + 545, + 578 + ], + "type": "text", + "content": "[24] Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2011. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 580, + 545, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 580, + 545, + 612 + ], + "spans": [ + { + "bbox": [ + 307, + 580, + 545, + 612 + ], + "type": "text", + "content": "[25] Jordi Pont-Tuset, Jasper Uijlings, Soravit Changpinyo, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with localized narratives, 2020. 3, 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 613, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 613, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 613, + 545, + 689 + ], + "type": "text", + "content": "[26] Filip Radenovic, Abhimanyu Dubey, Abhishek Kadian, Todor Mihaylov, Simon Vandenhende, Yash Patel, Yi Wen, Vignesh Ramanathan, and Dhruv Mahajan. Filtering, distillation, and hard negatives for vision-language pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6967-6977, 2023. 1, 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "type": "text", + "content": "[27] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry," + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "26708" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "text", + "content": "Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. CoRR, abs/2103.00020, 2021. 3, 4, 7, 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 171 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 171 + ], + "type": "text", + "content": "[28] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs, 2021. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 172, + 287, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 172, + 287, + 249 + ], + "spans": [ + { + "bbox": [ + 48, + 172, + 287, + 249 + ], + "type": "text", + "content": "[29] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. Laion-5b: An open large-scale dataset for training next generation image-text models, 2022. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 250, + 287, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 250, + 287, + 326 + ], + "spans": [ + { + "bbox": [ + 48, + 250, + 287, + 326 + ], + "type": "text", + "content": "[30] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, Melbourne, Australia, 2018. Association for Computational Linguistics. 1, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 327, + 287, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 287, + 370 + ], + "type": "text", + "content": "[31] Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. FLAVA: A foundational language and vision alignment model. In CVPR, 2022. 4, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 371, + 287, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 371, + 287, + 436 + ], + "spans": [ + { + "bbox": [ + 48, + 371, + 287, + 436 + ], + "type": "text", + "content": "[32] Krishna Srinivasan, Karthik Raman, Jiecao Chen, Mike Bendersky, and Marc Najork. Wit: Wikipedia-based image text dataset for multimodal multilingual machine learning. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '21), 2021. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 437, + 287, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 437, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 48, + 437, + 287, + 579 + ], + "type": "text", + "content": "[33] NLLB Team, Marta R. Costa-jussà, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia González, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. No language left behind: Scaling human-centered machine translation, 2022. 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 580, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 580, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 580, + 287, + 624 + ], + "type": "text", + "content": "[34] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. YFCC100m. Communications of the ACM, 59(2): 64-73, 2016. 1, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 625, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 287, + 668 + ], + "type": "text", + "content": "[35] Tristan Thrush, Ryan Jiang, Max Bartolo, Amanpreet Singh, Adina Williams, Douwe Kiela, and Candace Ross. Winoground: Probing vision and language models for visio-linguistic compositionality, 2022. 1, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "text", + "content": "[36] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer," + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 72, + 545, + 706 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 327, + 72, + 545, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 72, + 545, + 258 + ], + "spans": [ + { + "bbox": [ + 327, + 72, + 545, + 258 + ], + "type": "text", + "content": "Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and finetuned chat models, 2023. 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 260, + 545, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 260, + 545, + 292 + ], + "spans": [ + { + "bbox": [ + 307, + 260, + 545, + 292 + ], + "type": "text", + "content": "[37] Jack Urbanek and Pratik Ringshia. Mephisto: A framework for portable, reproducible, and iterative crowdsourcing, 2023. 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 293, + 545, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 293, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 307, + 293, + 545, + 338 + ], + "type": "text", + "content": "[38] Hu Xu, Saining Xie, Po-Yao Huang, Licheng Yu, Russell Howes, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Cit: Curation in training for effective vision-language data. arXiv preprint arXiv:2301.02241, 2023. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 339, + 545, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 339, + 545, + 382 + ], + "spans": [ + { + "bbox": [ + 307, + 339, + 545, + 382 + ], + "type": "text", + "content": "[39] Hu Xu, Saining Xie, Xiaqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data, 2023. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 384, + 545, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 384, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 307, + 384, + 545, + 437 + ], + "type": "text", + "content": "[40] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2:67-78, 2014. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 439, + 545, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 439, + 545, + 538 + ], + "spans": [ + { + "bbox": [ + 307, + 439, + 545, + 538 + ], + "type": "text", + "content": "[41] Lili Yu, Bowen Shi, Ramakanth Pasunuru, Benjamin Muller, Olga Golovneva, Tianlu Wang, Arun Babu, Binh Tang, Brian Karrer, Shelly Sheynin, Candace Ross, Adam Polyak, Russell Howes, Vasu Sharma, Puxin Xu, Hovhannes Tamoyan, Oron Ashual, Uriel Singer, Shang-Wen Li, Susan Zhang, Richard James, Gargi Ghosh, Yaniv Taigman, Maryam Fazel-Zarandi, Asli Celikyilmaz, Luke Zettlemoyer, and Armen Aghajanyan. Scaling autoregressive multi-modal models: Pretraining and instruction tuning, 2023. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 539, + 545, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 539, + 545, + 594 + ], + "spans": [ + { + "bbox": [ + 307, + 539, + 545, + 594 + ], + "type": "text", + "content": "[42] Mert Yuksekgonul, Federico Bianchi, Pratyusha Kalluri, Dan Jurafsky, and James Zou. When and why vision-language models behave like bags-of-words, and what to do about it? In International Conference on Learning Representations, 2023. 1, 3, 7, 8, 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 594, + 545, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 594, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 307, + 594, + 545, + 628 + ], + "type": "text", + "content": "[43] Yan Zeng, Xinsong Zhang, and Hang Li. Multi-grained vision language pre-training: Aligning texts with visual concepts. arXiv preprint arXiv:2111.08276, 2021. 4, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 628, + 545, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 628, + 545, + 662 + ], + "spans": [ + { + "bbox": [ + 307, + 628, + 545, + 662 + ], + "type": "text", + "content": "[44] Hang Zhang, Yeyun Gong, Yelong Shen, Weisheng Li, Jiancheng Lv, Nan Duan, and Weizhu Chen. Poolingformer: Long document modeling with pooling attention, 2022. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 662, + 545, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 662, + 545, + 706 + ], + "spans": [ + { + "bbox": [ + 307, + 662, + 545, + 706 + ], + "type": "text", + "content": "[45] Tiancheng Zhao, Tianqi Zhang, Mingwei Zhu, Haozhan Shen, Kyusong Lee, Xiaopeng Lu, and Jianwei Yin. Vl-checklist: Evaluating pre-trained vision-language models with objects, attributes and relations, 2023. 1, 3" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "26709" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/873ecf7b-8814-4ac3-a70f-20982249ac1d_content_list.json b/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/873ecf7b-8814-4ac3-a70f-20982249ac1d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..9cc8bccd5eda322cfedb9955160baab710cffd71 --- /dev/null +++ b/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/873ecf7b-8814-4ac3-a70f-20982249ac1d_content_list.json @@ -0,0 +1,1528 @@ +[ + { + "type": "text", + "text": "A Recipe for Scaling up Text-to-Video Generation with Text-free Videos", + "text_level": 1, + "bbox": [ + 122, + 130, + 846, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiang Wang $^{1*}$ Shiwei Zhang $^{2\\dagger}$ Hangjie Yuan $^{3}$ Zhiwu Qing $^{1}$ Biao Gong $^{2}$ Yingya Zhang $^{2}$ Yujun Shen $^{4}$ Changxin Gao $^{1}$ Nong Sang $^{1\\dagger}$", + "bbox": [ + 119, + 178, + 843, + 218 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Key Laboratory of Image Processing and Intelligent Control, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology $^{2}$ Alibaba Group $^{3}$ Zhejiang University $^{4}$ Ant Group", + "bbox": [ + 96, + 222, + 872, + 279 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{wxiang,qzw,cgao,nsang}@hust.edu.cn,{zhangjin.zsw,yingya.zyy}@alibaba-inc.com hj.yuan@zju.edu.cn,{a.biao.gong,shenyujun0302}@gmail.com", + "bbox": [ + 220, + 277, + 754, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "\"Close-up of Caucasian mother and baby girl sitting at windowsill and reading book. Young woman educating daughter at home.\"", + "bbox": [ + 222, + 305, + 823, + 316 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d328df4e5ed3fa12e9f8353718952f20550582afdba3ec6f9d53d0bf2d3ba22f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 99, + 316, + 867, + 448 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8afa70d7b19d287c2a4dda7cacecbeaaddc7cab81f17bad8a617543f4bddcc07.jpg", + "image_caption": [ + "Figure 1. Example video results generated by the proposed TF-T2V on text-to-video generation and compositional video synthesis tasks without training on any video-text pairs." + ], + "image_footnote": [], + "bbox": [ + 99, + 449, + 867, + 678 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 724, + 313, + 739 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Diffusion-based text-to-video generation has witnessed impressive progress in the past year yet still falls behind text-to-image generation. One of the key reasons is the limited scale of publicly available data (e.g., 10M video-text pairs in WebVid10M vs. 5B image-text pairs in LAION), considering the high cost of video captioning. Instead, it could be far easier to collect unlabeled clips from video platforms like YouTube. Motivated by this, we come up", + "bbox": [ + 75, + 756, + 470, + 878 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "with a novel text-to-video generation framework, termed $TF-T2V$ , which can directly learn with text-free videos. The rationale behind is to separate the process of text decoding from that of temporal modeling. To this end, we employ a content branch and a motion branch, which are jointly optimized with weights shared. Following such a pipeline, we study the effect of doubling the scale of training set (i.e., video-only WebVid10M) with some randomly collected text-free videos and are encouraged to observe the performance improvement (FID from 9.67 to 8.19 and FVD from 484 to 441), demonstrating the scalability of", + "bbox": [ + 500, + 724, + 893, + 892 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation.", + "bbox": [ + 236, + 0, + 810, + 20 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Except for this watermark, it is identical to the accepted version;", + "bbox": [ + 323, + 17, + 722, + 30 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 292, + 31, + 754, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Intern at Alibaba Group. † Corresponding authors.", + "bbox": [ + 99, + 887, + 387, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "6572", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "our approach. We also find that our model could enjoy sustainable performance gain (FID from 8.19 to 7.64 and FVD from 441 to 366) after reintroducing some text labels for training. Finally, we validate the effectiveness and generalizability of our ideology on both native text-to-video generation and compositional video synthesis paradigms. Code and models will be publicly available at here.", + "bbox": [ + 73, + 90, + 470, + 196 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 210, + 209, + 226 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Video generation aims to synthesize realistic videos that possess visually appealing spatial contents and temporally coherent motions. It has witnessed unprecedented progress in recent years with the advent of deep generative techniques [22, 53], especially with the emergence of video diffusion models [4, 34, 40, 54, 60, 67, 78]. Pioneering approaches [28, 33, 67] utilize pure image diffusion models or fine-tuning on a small amount of video-text data to synthesize videos, leading to temporally discontinuous results due to insufficient motion perception [39, 79]. To achieve plausible results, current text-to-video methods like VideoLDM [4] and ModelScopeT2V [54] usually insert temporal blocks into latent 2D-UNet [43] and train the model on expansive video-text datasets, e.g., WebVid10M [2]. To enable more controllable generation, VideoComposer [58] proposes a compositional paradigm that incorporates additional conditions (e.g., depth, sketch, motion vectors, etc.) to guide synthesis, allowing customizable creation.", + "bbox": [ + 73, + 234, + 468, + 507 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite this, the progress in text-to-video generation still falls behind text-to-image generation [42, 43]. One of the key reasons is the limited scale of publicly available videotext data, considering the high cost of video captioning [83]. Instead, it could be far easier to collect text-free video clips from media platforms like YouTube. There are some works sharing similar inspiration, Make-A-Video [50] and Gen-1 [12] employ a two-step strategy that first leverages a large ( $\\sim$ 1B parameters) diffusion prior model [42] to convert text embedding into image embedding of CLIP [41] and then enters it into an image-conditioned generator to synthesize videos. However, the separate two-step manner may cause issues such as error accumulation [13], increased model size and latency [42, 69], and does not support text-conditional optimization if extra video-text data is available, leading to sub-optimal results. Moreover, the characteristics of scaling potential on video generation are still under-explored.", + "bbox": [ + 73, + 508, + 468, + 763 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we aim to train a single unified video diffusion model that allows text-guided video generation by exploiting the widely accessible text-free videos and explore its scaling trend. To achieve this, we present a novel two-branch framework named TF-T2V, where a content branch is designed for spatial appearance generation, and a motion branch specializes in temporal dynamics synthesis. More specifically, we utilize the publicly available image-text datasets such as LAION-5B [48] to learn text-guided", + "bbox": [ + 73, + 765, + 468, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "and image-guided spatial appearance generation. In the motion branch, we harness the video-only data to conduct image-conditioned video synthesis, allowing the temporal modules to learn intricate motion patterns without relying on textual annotations. Paired video-text data, if available, can also be incorporated into co-optimization. Furthermore, unlike previous methods that impose training loss on each frame individually, we introduce a temporal coherence loss to explicitly enforce the learning of correlations between adjacent frames, enhancing the continuity of generated videos. In this way, the proposed TF-T2V achieves text-to-video generation by assembling contents and motions with a unified model, overcoming the high cost of video captioning and eliminating the need for complex cascading steps.", + "bbox": [ + 496, + 90, + 890, + 303 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Notably, TF-T2V is a plug-and-play paradigm, which can be integrated into existing text-to-video generation and compositional video synthesis frameworks as shown in Fig. 1. Different from most prior works that rely heavily on video-text data and train models on the widely-used watermarked and low-resolution (around 360P) WebVid10M [2], TF-T2V opens up new possibilities for optimizing with text-free videos or partially paired video-text data, making it more scalable and versatile in widespread scenarios, such as high-definition video generation. To study the scaling trend, we double the scale of the training set with some randomly collected text-free videos and are encouraged to observe the performance improvement, with FID from 9.67 to 8.19 and FVD from 484 to 441. Extensive quantitative and qualitative experiments collectively demonstrate the effectiveness and scaling potential of the proposed TF-T2V in terms of synthetic continuity, fidelity, and controllability.", + "bbox": [ + 496, + 304, + 892, + 561 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 580, + 640, + 595 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we provide a brief review of relevant literature on text-to-image generation, text-to-video generation, and compositional video synthesis.", + "bbox": [ + 496, + 608, + 890, + 654 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Text-to-image generation. Recently, text-to-image generation has made significant strides with the development of large-scale image-text datasets such as LAION-5B [48], allowing users to create high-resolution and photorealistic images that accurately depict the given natural language descriptions. Previous methods [16, 26, 49] primarily focus on synthesizing images by adopting generative adversarial networks (GANs) to estimate training sample distributions. Distinguished by the promising stability and scalability, diffusion-based generation models have attracted increasing attention [27, 42-45]. Diffusion models utilize iterative steps to gradually refine the generated image, resulting in improved quality and realism. Typically,Imagen [45] and GLIDE [38] explore text-conditional diffusion models and boost sample quality by applying classifier-free guidance [19]. DALL-E 2 [42] first leverages an image prior", + "bbox": [ + 496, + 659, + 892, + 901 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "6573", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/aada0f9a862f0c672dc173904d4a8c31e67f4cb754cc05b7c7ea0481fa8840f8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 101, + 90, + 697, + 306 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/792581ee81c6e66e56eee6847243bfbfe763b19931c1d9603acb0bc60e620fc8.jpg", + "image_caption": [ + "Figure 2. Overall pipeline of TF-T2V, which consists of two branches. In the content branch, paired image-text data is leveraged to learn text-conditioned and image-conditioned spatial appearance generation. The motion branch supports the training of motion dynamic synthesis by feeding text-free videos (or partially paired video-text data if available). During the training stage, both branches are optimized jointly. Notably, TF-T2V can be seamlessly integrated into the compositional video synthesis framework by incorporating composable conditions. In inference, TF-T2V enables text-guided video generation by taking text prompts and random noise sequences as input." + ], + "image_footnote": [], + "bbox": [ + 705, + 90, + 867, + 309 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "to bridge multi-modal embedding spaces and then learns a diffusion decoder to synthesize images in the pixel space. Stable Diffusion [43] introduces latent diffusion models that conduct iterative denoising processes at the latent level to save computational costs. There are also some works that generate customized and desirable images by incorporating additional spatial control signals [24, 36, 77].", + "bbox": [ + 75, + 390, + 470, + 496 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Text-to-video generation. This task poses additional challenges compared to text-to-image generation due to the temporal dynamics involved in videos. Various early techniques have been proposed to tackle this problem, such as recurrent neural networks combined with GANs [3, 51, 53, 61, 64] or transformer-based autoregressive models [22, 73]. With the subsequent advent of video diffusion models pretrained on large-scale video-text datasets [2, 63, 71], video content creation has demonstrated remarkable advances [1, 4, 6-9, 14, 15, 17, 18, 21, 23, 28, 31-33, 35, 37, 39, 56, 57, 62, 65, 67, 69, 74-76]. Imagen Video [21] learns cascaded pixel-level diffusion models to produce high-resolution videos. Following [42], Make-A-Video [50] introduces a two-step strategy that first maps the input text to image embedding by a large ( $\\sim$ 1B parameters) diffusion prior model and then embeds the resulting embedding into an image-conditional video diffusion model to synthesize videos in pixel space. VideoLDM [4] and ModelScopeT2V [54] extend 2D-UNet into 3D-UNet by injecting temporal layers and operate a latent denoising process to save computational resources. In this paper, we present a single unified framework for text-to-video generation and study the scaling trend by harnessing widely accessible text-free videos.", + "bbox": [ + 75, + 503, + 470, + 851 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Compositional video synthesis. Traditional text-to-video methods solely rely on textual descriptions to control the video generation process, limiting desired fine-grained cus-", + "bbox": [ + 75, + 854, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tomization such as texture, object position, motion patterns, etc. To tackle this constraint and pursue higher controllability, several controllable video synthesis methods [8, 9, 12, 29, 58, 68, 72, 79, 81] have been proposed. These methods utilize additional control signals, such as depth or sketch, to guide the generation of videos. By incorporating extra structured guidance, the generated content can be precisely controlled and customized. Among these approaches, VideoComposer [58] stands out as a pioneering and versatile compositional technique. It integrates multiple conditioning signals including textual, spatial and temporal conditions within a unified framework, offering enhanced controllability, compositionality, and realism in the generated videos. Despite the remarkable quality, these methods still rely on high-quality video-text data to unleash powerful and customizable synthesis. In contrast, our method can be directly merged into existing controllable frameworks to customize videos by exploiting text-free videos.", + "bbox": [ + 496, + 390, + 893, + 662 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 676, + 589, + 691 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We first provide a brief introduction to the preliminaries of the video diffusion model. Then, we will elaborate on the mechanisms of TF-T2V in detail. The overall framework of the proposed TF-T2V is displayed in Fig. 2.", + "bbox": [ + 496, + 702, + 892, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminaries of video diffusion model", + "text_level": 1, + "bbox": [ + 500, + 773, + 828, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Diffusion models involve a forward diffusion process and a reverse iterative denoising stage. The forward process of diffusion models is gradually imposing random noise to clean data $x_0$ in a Markovian chain:", + "bbox": [ + 496, + 796, + 890, + 857 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(x _ {t} \\mid x _ {t - 1}\\right) = \\mathcal {N} \\left(x _ {t}; \\sqrt {1 - \\beta_ {t - 1}} x _ {t - 1}, \\beta_ {t} I\\right), t = 1, \\dots , T \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 868, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "6574", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\beta_{t} \\in (0,1)$ is a noise schedule and $T$ is the total time step. When $T$ is sufficiently large, e.g. $T = 1000$ , the resulting $x_{T}$ is nearly a random Gaussian distribution $\\mathcal{N}(0,I)$ . The role of diffusion model is to denoise $x_{T}$ and learn to iteratively estimate the reversed process:", + "bbox": [ + 75, + 90, + 468, + 167 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\theta} (x _ {t - 1} | x _ {t}) = \\mathcal {N} (x _ {t - 1}; \\mu_ {\\theta} (x _ {t}, t), \\sum_ {\\theta} (x _ {t}, t)) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 119, + 179, + 468, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We usually train a denoising model $\\hat{x}_{\\theta}$ parameterized by $\\theta$ to approximate the original data $x_0$ and optimize the following v-prediction [21, 46] problem:", + "bbox": [ + 76, + 207, + 468, + 253 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {b a s e}} = \\mathbb {E} _ {\\theta} [ \\| v - \\hat {x} _ {\\theta} (x _ {t}, t, c) \\| _ {2} ^ {2} ] \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 263, + 468, + 282 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $c$ is conditional information such as textual prompt, and $v$ is the parameterized prediction objective. In representative video diffusion models [4, 54, 58], the denoising model $\\hat{x}_{\\theta}$ is a latent 3D-UNet [4, 54] modified from its 2D version [43] by inserting additional temporal blocks, which is optimized in the latent feature space by applying a variational autoencoder [11], and Eq. (3) is applied on each frame of the input video to train the whole model.", + "bbox": [ + 75, + 294, + 468, + 414 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. TF-T2V", + "text_level": 1, + "bbox": [ + 76, + 424, + 174, + 438 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The objective of TF-T2V is to learn a text-conditioned video diffusion model to create visually appealing and temporally coherent videos with text-free videos or partially paired video-text data. Without loss of generality, we first describe the workflow of our TF-T2V in the scenario where only text-free video is used. With merely text-free videos available for training, it is challenging to guide content creation by textual information since there lacks text-visual correspondence. To tackle this issue, we propose to resort to web-scale and high-quality image-text datasets [47, 48], which are publicly accessible on the Internet. However, this raises another question: how can we leverage the image-text data and text-free videos in a unified framework?", + "bbox": [ + 75, + 449, + 468, + 643 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Recalling the network architecture in 3D-UNet, the spatial modules mainly focus on appearance modeling, and the temporal modules primarily aim to operate motion coherence. The intuition is that we can utilize image-text data to learn text-conditioned spatial appearance generation and adopt high-quality text-free videos to guide consistent motion dynamic synthesis. In this way, we can perform text-to-video generation in a single model to synthesize high-quality and consistent videos during the inference stage. Based on this, the proposed TF-T2V consists of two branches: a content branch for spatial appearance generation and a motion branch for motion dynamic synthesis.", + "bbox": [ + 75, + 645, + 468, + 825 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 Spatial appearance generation", + "text_level": 1, + "bbox": [ + 76, + 845, + 341, + 861 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Like previous text-to-image works [43, 77], the content branch of TF-T2V takes a noised image $I_{image} \\in H \\times$", + "bbox": [ + 76, + 869, + 468, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$W \\times C$ as input, where $H$ , $W$ , $C$ are the height, width, and channel dimensions respectively, and employs conditional signals (i.e., text and image embeddings) to offer semantic guidance for content generation. This branch primarily concentrates on optimizing the spatial modules in the video diffusion model and plays a crucial role in determining appealing visual quality. In order to ensure that each condition can also control the created content separately, we randomly drop text or image embeddings with a certain probability during training. The text and image encoders from CLIP [41] are adopted to encode embeddings.", + "bbox": [ + 496, + 90, + 890, + 257 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 Motion dynamic synthesis", + "text_level": 1, + "bbox": [ + 500, + 282, + 733, + 297 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The pursuit of producing highly temporally consistent videos is a unique hallmark of video creation. Recent advancements [4, 54, 57, 58] in the realm of video synthesis usually utilize large-scale video-text datasets such as WebVid10M [2] to achieve coherent video generation. However, acquiring large-scale video-text pairs consumes extensive manpower and time, hindering the scaling up of video diffusion models. To make matters worse, the widely used WebVid10M is a watermarked and low-resolution (around 360P) dataset, resulting in unsatisfactory video creation that cannot meet the high-quality video synthesis requirements.", + "bbox": [ + 496, + 308, + 890, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To mitigate the above issues, we propose to leverage high-quality text-free videos that are easily accessible on video media platforms, e.g., YouTube and TikTok. To fully excavate the abundant motion dynamics within the text-free videos, we train a image-conditioned model. By optimizing this image-to-video generation task, the temporal modules in the video diffusion model can learn to perceive and model diverse motion dynamics. Specifically, given a noised video $I_{video} \\in F \\times H \\times W \\times C$ , where $F$ is the temporal length, the motion branch of TF-T2V learns to recover the undisturbed video guided by the image embedding. The image embedding is extracted from the center frame of the original video by applying CLIP's image encoder [41].", + "bbox": [ + 496, + 476, + 890, + 672 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Since large-scale image-text data used for training contains abundant movement intentions [30], TF-T2V can achieve text-to-video generation by assembling spatial appearances involving motion trends and predicted motion dynamics. When extra paired video-text data is available, we conduct both text-to-video and image-to-video generation based on video-text pairs to train TF-T2V and further enhance the perception ability for desirable textual control.", + "bbox": [ + 496, + 672, + 890, + 792 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In addition, we notice that previous works apply the training loss (i.e., Eq. (3)) on each frame of the input video individually without considering temporal correlations between frames, suffering from incoherent appearances and motions. Inspired by the early study [25, 55, 59, 80] finding that the difference between two adjacent frames usually contains motion patterns, e.g., dynamic trajectory, we thus", + "bbox": [ + 496, + 795, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "6575", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/5257ba0c927e3d21e64cfd986d59e872210b0a096b7deb033adb40c815a5fc31.jpg", + "table_caption": [ + "Table 1. Quantitative comparison with state-of-the-art methods for text-to-video task on MSR-VTT in terms of FID, FVD, and CLIPSIM." + ], + "table_footnote": [], + "table_body": "
MethodZero-shotParametersFID (↓)FVD (↓)CLIPSIM (↑)
Nüwa [66]No-47.68-0.2439
CogVideo (Chinese) [22]Yes15.5B24.78-0.2614
CogVideo (English) [22]Yes15.5B23.5912940.2631
MagicVideo [82]Yes--1290-
Make-A-Video [50]Yes9.7B13.17-0.3049
ModelScopeT2V [54]Yes1.7B11.095500.2930
VideoComposer [58]Yes1.9B10.775800.2932
Latent-Shift [1]Yes1.5B15.23-0.2773
VideoLDM [4]Yes4.2B--0.2929
PYoCo [14]Yes-9.73--
TF-T2V (WebVid10M)Yes1.8B9.674840.2953
TF-T2V (WebVid10M+Internal10M)Yes1.8B8.194410.2991
", + "bbox": [ + 86, + 104, + 880, + 280 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "propose a temporal coherence loss that utilizes the frame difference as an additional supervisory signal:", + "bbox": [ + 75, + 292, + 468, + 324 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {c o h e r e n c e}} = \\mathbb {E} _ {\\theta} [ \\sum_ {j = 1} ^ {F - 1} | | (v _ {j + 1} - v _ {j}) - (o _ {j + 1} - o _ {j}) | | _ {2} ^ {2} ] \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 330, + 468, + 352 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $o_j$ and $v_j$ are the predicted frame and corresponding ground truth. This loss term measures the discrepancy between the predicted frame differences and the ground truth frame differences of the input parameterized video. By minimizing Eq. (4), TF-T2V helps to alleviate frame flickering and ensures that the generated videos exhibit seamless transitions and promising temporal dynamics.", + "bbox": [ + 75, + 359, + 468, + 465 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2.3 Training and inference", + "text_level": 1, + "bbox": [ + 76, + 482, + 290, + 498 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In order to mine the complementary advantages of spatial appearance generation and motion dynamic synthesis, we jointly optimize the entire model in an end-to-end manner. The total loss can be formulated as:", + "bbox": [ + 75, + 506, + 468, + 566 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {b a s e}} + \\lambda \\mathcal {L} _ {\\text {c o h e r e n c e}} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 578, + 468, + 594 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_{base}$ is imposed on video and image together by treating the image as a \"single frame\" video, and $\\lambda$ is a balance coefficient that is set empirically to 0.1.", + "bbox": [ + 75, + 604, + 468, + 648 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After training, we can perform text-guided video generation to synthesize temporally consistent video content that aligns well with the given text prompt. Moreover, TF-T2V is a general framework and can also be inserted into existing compositional video synthesis paradigm [58] by incorporating additional spatial and temporal structural conditions, allowing for customized video creation.", + "bbox": [ + 75, + 648, + 468, + 753 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 767, + 209, + 785 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we present a comprehensive quantitative and qualitative evaluation of the proposed TF-T2V on text-to-video generation and composition video synthesis.", + "bbox": [ + 75, + 792, + 468, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental setup", + "text_level": 1, + "bbox": [ + 76, + 847, + 264, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation details. TF-T2V is built on two typical open-source baselines, i.e., ModelScopeT2V [54] and", + "bbox": [ + 75, + 869, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/fb647ca3b6f107076023e82613719da678dacd72efc7329db221b9c3279115a8.jpg", + "table_caption": [ + "Table 2. Human preference results on text-to-video generation." + ], + "table_footnote": [], + "table_body": "
MethodText alignmentVisual qualityTemporal coherence
ModelScopeT2V [54]83.5%74.0%81.3%
TF-T2V86.5%87.0%92.5%
", + "bbox": [ + 506, + 305, + 879, + 362 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "VideoComposer [58]. DDPM sampler [20] with $T = 1000$ steps is adopted for training, and we employ DDIM [52] with 50 steps for inference. We optimize TF-T2V using AdamW optimizer with a learning rate of 5e-5. For input videos, we sample 16 frames from each video at 4FPS and crop a $448 \\times 256$ region at the center as the basic setting. Note that we can also easily train high-definition video diffusion models by collecting high-quality text-free videos (see examples in the Appendix). LAION-5B [48] is utilized to provide image-text pairs. Unless otherwise stated, we treat WebVid10M, which includes about 10.7M video-text pairs, as a text-free dataset to train TF-T2V and do not use any textual annotations. To study scaling trends, we gathered about 10M high-quality videos without text labels from internal data, termed the Internal10M dataset.", + "bbox": [ + 496, + 375, + 890, + 601 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Metrics. (i) To evaluate text-to-video generation, following previous works [4, 54], we leverage the standard Fréchet Inception Distance (FID), Fréchet Video Distance (FVD), and CLIP Similarity (CLIPSIM) as quantitative evaluation metrics and report results on MSR-VTT dataset [70]. (ii) For controllability evaluation, we leverage depth error, sketch error, and end-point-error (EPE) [10] to verify whether the generated videos obey the control of input conditions. Depth error measures the divergence between the input depth conditions and the eliminated depth of the synthesized video. Similarly, sketch error examines the sketch control. EPE evaluates the flow consistency between the reference video and the generated video. In addition, human evaluation is also introduced to validate our method.", + "bbox": [ + 496, + 606, + 890, + 818 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Evaluation on text-to-video generation", + "text_level": 1, + "bbox": [ + 500, + 830, + 830, + 847 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Tab. 1 displays the comparative quantitative results with existing state-of-the-art methods. We observe that TF-T2V achieves remarkable performance under various metrics.", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "6576", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/fb7d52d8fa8ea8a6059e66f7a6648f27fbc0ee0ab5ebdb07e5f5a7f8aec808d0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 101, + 89, + 521, + 287 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/080155e6c674a561b7bf2c8f0acb899b74590c7a7aadd92bc52dac9f0878e3c4.jpg", + "image_caption": [ + "\"Portrait of smiling young woman outdoors\"" + ], + "image_footnote": [], + "bbox": [ + 531, + 102, + 864, + 287 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/07158c131aa37a10d9155b3328822e13ac461918504a4a1bd8e7e17bcf879c95.jpg", + "image_caption": [ + "Figure 3. Qualitative comparison on text-to-video generation. Three representative open-source text-to-video approaches are compared, including ModelScopeT2V [54], Text2video-Zero [28] and ZeroScope [5]. Please refer to the Appendix for videos and more comparisons." + ], + "image_footnote": [], + "bbox": [ + 99, + 289, + 519, + 484 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/518f579bc505d8932a972d7ec19ea75385874abf3e5d255f6e01b7dfc5eb9a9b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 531, + 289, + 864, + 483 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/1841e8335e9b3555f8d5cc5d21f46eefeedbc1740d14f963dad8802e037c19a0.jpg", + "table_caption": [ + "Table 3. Evaluation of structure control based on depth signals." + ], + "table_footnote": [], + "table_body": "
MethodConditionDepth error (↓)
VideoComposer [58]Text0.382
VideoComposer [58]Text and depth0.217
TF-T2VText and depth0.209
", + "bbox": [ + 81, + 536, + 457, + 592 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/5389185c465f9660033640805ba99302605e2361b7091b15f3ed3a5c2405e15e.jpg", + "table_caption": [ + "Table 4. Evaluation of structure control based on sketch signals." + ], + "table_footnote": [], + "table_body": "
MethodConditionSketch error (↓)
VideoComposer [58]Text0.1854
VideoComposer [58]Text and sketch0.1161
TF-T2VText and sketch0.1146
", + "bbox": [ + 80, + 613, + 460, + 670 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Notably, TF-T2V trained on WebVid10M and Internal10M obtains higher performance than the counterpart on WebVid10M, revealing promising scalable capability. We show the qualitative visualizations in Fig. 3. From the results, we can find that compared with previous methods, TF-T2V obtains impressive video creation in terms of both temporal continuity and visual quality. The human assessment in Tab. 2 also reveals the above observations. The user study is performed on 100 randomly synthesized videos.", + "bbox": [ + 75, + 686, + 468, + 821 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Evaluation on compositional video synthesis", + "text_level": 1, + "bbox": [ + 76, + 832, + 450, + 848 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We compare the controllability of TF-T2V and VideoComposer on 1,000 generated videos in terms of depth control (Tab. 3), sketch control (Tab. 4) and motion control", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/9e20fa0134515430f3a71c1750d175c46e5d97450c2c7abd986cd5c9895b65a6.jpg", + "table_caption": [ + "Table 5. Evaluation of motion control based on motion vectors." + ], + "table_footnote": [], + "table_body": "
MethodConditionEPE (↓)
VideoComposer [58]Text4.13
VideoComposer [58]Text and motion vector1.98
TF-T2VText and motion vector1.88
", + "bbox": [ + 504, + 536, + 880, + 592 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/5e080d26e7c16aff2ce64ff85c48bd80e8d451b4e53d7a57db74f6c77f6d0956.jpg", + "table_caption": [ + "Table 6. Human evaluations on compositional video synthesis." + ], + "table_footnote": [], + "table_body": "
MethodStructure alignmentVisual qualityTemporal coherence
VideoComposer [58]79.0%66.0%77.5%
TF-T2V89.0%79.5%84.5%
", + "bbox": [ + 504, + 613, + 879, + 670 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(Tab. 5). The above experimental evaluations highlight the effectiveness of TF-T2V by leveraging text-free videos. In Fig. 4 and 5, we show the comparison of TF-T2V and existing methods on compositional video generation. We notice that TF-T2V exhibits high-fidelity and consistent video generation. In addition, we conduct a human evaluation on 100 randomly sampled videos and report the results in Tab. 6. The preference assessment provides further evidence of the superiority of the proposed TF-T2V.", + "bbox": [ + 496, + 686, + 890, + 823 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Ablation study", + "text_level": 1, + "bbox": [ + 500, + 832, + 648, + 848 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Effect of temporal coherence loss. To enhance temporal consistency, we propose a temporal coherence loss. In Tab. 7, we show the effectiveness of the proposed tem", + "bbox": [ + 498, + 854, + 892, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6577", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1dbd35f68d3c338efe18cc55eaf235a11a30806683003399d7976da847c27853.jpg", + "image_caption": [ + "Figure 4. Qualitative comparison on compositional depth-to-video generation. The videos are generated by taking textual prompts and structural guidance as conditions. Compared with existing methods, TF-T2V yields more structural compliance and high-fidelity results." + ], + "image_footnote": [], + "bbox": [ + 99, + 88, + 524, + 412 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3735fc2eb1cfb67dfa052700068ce00d8c9aebbfb953465cbb121db0d998a6b0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 531, + 88, + 867, + 412 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a1b38b182a3df35b98ff798c2d3c90b275be717432fd4f6a344f7da990385241.jpg", + "image_caption": [ + "Figure 5. Qualitative comparison on compositional sketch-to-video generation. The videos are generated by taking textual descriptions and structural guidance as conditions. Compared with other methods, TF-T2V produces more realistic and consistent results." + ], + "image_footnote": [], + "bbox": [ + 116, + 441, + 522, + 705 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a1a8b0c884ae1c5e9e28e865d0458d74b75583ff0d6c740b87b09fceff256270.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 531, + 441, + 846, + 705 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "poral coherence loss in terms of frame consistency. The metric results are obtained by calculating the average CLIP similarity of two consecutive frames in 1,000 videos. We further display the qualitative comparative results in Fig. 6 and observe that temporal coherence loss helps to alleviate temporal discontinuity such as color shift.", + "bbox": [ + 75, + 744, + 468, + 835 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Evaluation on semi-supervised setting", + "text_level": 1, + "bbox": [ + 76, + 847, + 403, + 864 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Through the above experiments and observations, we verify that text-free video can help improve the continuity", + "bbox": [ + 76, + 869, + 468, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/de1e66da63f19f858b29938b48e6004c61d15e9f3bd43036876d91712bfc96ad.jpg", + "table_caption": [ + "Table 7. Text-to-video evaluation on frame consistency." + ], + "table_footnote": [], + "table_body": "
MethodFrame consistency (%) ↑
w/o temporal coherence loss89.71
TF-T2V91.06
", + "bbox": [ + 506, + 758, + 877, + 801 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "and quality of generated video. As previously stated, TF-T2V also supports the combination of annotated videotext data and text-free videos to train the model, i.e., the semi-supervised manner. The annotated text can provide additional fine-grained motion signals, enhancing the align-", + "bbox": [ + 498, + 825, + 890, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "6578", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9cac116d8a05d0dde3036325f45639c95e1ca48c34eadd6aca83ace489bf563d.jpg", + "image_caption": [ + "Figure 6. Qualitative ablation study. The videos are generated by taking textual descriptions and structural guidance as conditions." + ], + "image_footnote": [], + "bbox": [ + 84, + 87, + 527, + 292 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bb1c6fc511293c8ad61048f34b5192974e2968de7b14734700b312e9963bd4bb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 534, + 88, + 883, + 292 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e4eec659f65d2e744dda08a136d5a7f8dea9a5107245524e889a01243b104334.jpg", + "image_caption": [ + "Figure 7. Qualitative evaluation on text-to-video generation with temporally-correlated text prompts involving the evolution of movement." + ], + "image_footnote": [], + "bbox": [ + 86, + 315, + 883, + 607 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/5d07baf0c9cdb2cf88b5df55e2beb40574d90c63b9e9d8bf089e5e19c1713372.jpg", + "table_caption": [ + "Table 8. Quantitative experiments on text-to-video generation. TF-T2V-Semi means the semi-supervised setting where labeled WebVid10M and text-free Internal10M are adopted." + ], + "table_footnote": [], + "table_body": "
MethodFID (↓)FVD (↓)CLIPSIM (↑)
ModelScopeT2V [54]11.095500.2930
TF-T2V8.194410.2991
TF-T2V-Semi7.643660.3032
", + "bbox": [ + 81, + 676, + 465, + 732 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ment of generated videos and the provided prompts involving desired motion evolution. We show the comparison results in Tab. 8 and find that the semi-supervised manner reaches the best performance, indicating the effectiveness of harnessing text-free videos. Notably, TF-T2V-Semi outperforms ModelScopeT2V trained on labeled WebVid10M, possessing good scalability. Moreover, the qualitative evaluations in Fig. 7 show that existing methods may struggle to synthesize text-aligned consistent videos when textual prompts involve desired temporal evolution. In contrast,", + "bbox": [ + 75, + 750, + 472, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "TF-T2V in the semi-supervised setting exhibits excellent text-video alignment and temporally smooth generation.", + "bbox": [ + 500, + 638, + 890, + 670 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 685, + 619, + 700 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we present a novel and versatile video generation framework named TF-T2V to exploit text-free videos and explore its scaling trend. TF-T2V effectively decomposes video generation into spatial appearance generation and motion dynamic synthesis. A temporal coherence loss is introduced to explicitly constrain the learning of correlations between adjacent frames. Experimental results demonstrate the effectiveness and potential of TF-T2V in terms of fidelity, controllability, and scalability.", + "bbox": [ + 496, + 710, + 893, + 849 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. This work is supported by the National Natural Science Foundation of China under grant U22B2053 and Alibaba Research Intern Program.", + "bbox": [ + 498, + 854, + 893, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "6579", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jie An, Songyang Zhang, Harry Yang, Sonal Gupta, Jia-Bin Huang, Jiebo Luo, and Xi Yin. Latent-shift: Latent diffusion with temporal shift for efficient text-to-video generation. arXiv preprint arXiv:2304.08477, 2023. 3, 5", + "[2] Max Bain, Arsha Nagrani, Gül Varol, and Andrew Zisserman. Frozen in time: A joint video and image encoder for end-to-end retrieval. In ICCV, pages 1728-1738, 2021. 2, 3, 4", + "[3] Yogesh Balaji, Martin Renqiang Min, Bing Bai, Rama Chellappa, and Hans Peter Graf. Conditional GAN with discriminative filter generation for text-to-video synthesis. In IJCAI, page 2, 2019. 3", + "[4] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In CVPR, pages 22563-22575, 2023. 2, 3, 4, 5", + "[5] Cerspense. Zeroscope: Diffusion-based text-to-video synthesis. https://huggingface.co/cerspense/zeroscope_v2_576w, 2023.6", + "[6] Duygu Ceylan, Chun-Hao P Huang, and Niloy J Mitra. Pix2video: Video editing using image diffusion. In ICCV, pages 23206-23217, 2023. 3", + "[7] Wenhao Chai, Xun Guo, Gaoang Wang, and Yan Lu. Stablevideo: Text-driven consistency-aware diffusion video editing. In ICCV, pages 23040-23050, 2023.", + "[8] Tsai-Shien Chen, Chieh Hubert Lin, Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang. Motion-conditioned diffusion model for controllable video synthesis. arXiv preprint arXiv:2304.14404, 2023. 3", + "[9] Weifeng Chen, Jie Wu, Pan Xie, Hefeng Wu, Jiashi Li, Xin Xia, Xuefeng Xiao, and Liang Lin. Control-a-video: Controllable text-to-video generation with diffusion models. arXiv preprint arXiv:2305.13840, 2023. 3", + "[10] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. Flownet: Learning optical flow with convolutional networks. In ICCV, pages 2758-2766, 2015. 5", + "[11] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming Transformers for high-resolution image synthesis. In CVPR, pages 12873-12883, 2021. 4", + "[12] Patrick Esser, Johnathan Chiu, Parmida Atighechian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In ICCV, pages 7346-7356, 2023. 2, 3", + "[13] Rafail Fridman, Amit Abecasis, Yoni Kasten, and Tali Dekel. Scenescape: Text-driven consistent scene generation. arXiv preprint arXiv:2302.01133, 2023. 2", + "[14] Songwei Ge, Seungjun Nah, Guilin Liu, Tyler Poon, Andrew Tao, Bryan Catanzaro, David Jacobs, Jia-Bin Huang, Ming-Yu Liu, and Yogesh Balaji. Preserve your own correlation: A noise prior for video diffusion models. In ICCV, pages 22930-22941, 2023. 3, 5", + "[15] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel." + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 3", + "[16] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. NeurIPS, 27, 2014. 2", + "[17] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 3", + "[18] Yingqing He, Tianyu Yang, Yong Zhang, Ying Shan, and Qifeng Chen. Latent video diffusion models for high-fidelity video generation with arbitrary lengths. arXiv preprint arXiv:2211.13221, 2022. 3", + "[19] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 2", + "[20] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. NeurIPS, 33:6840-6851, 2020. 5", + "[21] Jonathan Ho, William Chan, Chitwan Sahara, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 3, 4", + "[22] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via Transformers. In ICLR, 2023. 2, 3, 5", + "[23] Hanzhuo Huang, Yufan Feng, Cheng Shi, Lan Xu, Jingyi Yu, and Sibei Yang. Free-bloom: Zero-shot text-to-video generator with llm director and ldm animator. arXiv preprint arXiv:2309.14494, 2023. 3", + "[24] Lianghua Huang, Di Chen, Yu Liu, Yujun Shen, Deli Zhao, and Jingren Zhou.Composer: Creative and controllable image synthesis with composable conditions.ICML, 2023. 3", + "[25] Hueihan Jhuang, Juergen Gall, Silvia Zuffi, Cordelia Schmid, and Michael J Black. Towards understanding action recognition. In ICCV, pages 3192-3199, 2013. 4", + "[26] Minguk Kang, Jun-Yan Zhu, Richard Zhang, Jaesik Park, Eli Shechtman, Sylvain Paris, and Taesung Park. Scaling up GANs for text-to-image synthesis. In CVPR, pages 10124-10134, 2023. 2", + "[27] Bahjat Kawar, Shiran Zada, Oran Lang, Omer Tov, Huiwen Chang, Tali Dekel, Inbar Mosseri, and Michal Irani. Imagic: Text-based real image editing with diffusion models. In CVPR, pages 6007-6017, 2023. 2", + "[28] Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439, 2023. 2, 3, 6", + "[29] Ariel Lapid, Idan Achituve, Lior Bracha, and Ethan Fetaya. Gd-vdm: Generated depth for better diffusion-based video generation. arXiv preprint arXiv:2306.11173, 2023. 3", + "[30] Jiangtong Li, Li Niu, and Liqing Zhang. Action-aware embedding enhancement for image-text retrieval. In AAAI, pages 1323-1331, 2022. 4" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "6580", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[31] Shaoteng Liu, Yuechen Zhang, Wenbo Li, Zhe Lin, and Jiaya Jia. Video-p2p: Video editing with cross-attention control. arXiv preprint arXiv:2303.04761, 2023. 3", + "[32] Haoyu Lu, Guoxing Yang, Nanyi Fei, Yuqi Huo, Zhiwu Lu, Ping Luo, and Mingyu Ding. Vdt: An empirical study on video diffusion with Transformers. arXiv preprint arXiv:2305.13311, 2023.", + "[33] Zhengxiong Luo, Dayou Chen, Yingya Zhang, Yan Huang, Liang Wang, Yujun Shen, Deli Zhao, Jingren Zhou, and Tieniu Tan. Videofusion: Decomposed diffusion models for high-quality video generation. In CVPR, pages 10209-10218, 2023. 2, 3", + "[34] Yifeng Ma, Shiwei Zhang, Jiayu Wang, Xiang Wang, Yingya Zhang, and Zhidong Deng. Dreamtalk: When expressive talking head generation meets diffusion probabilistic models. arXiv preprint arXiv:2312.09767, 2023. 2", + "[35] Eyal Molad, Eliahu Horwitz, Dani Valevski, Alex Rav Acha, Yossi Matias, Yael Pritch, Yaniv Leviathan, and Yedid Hoshen. Dreamix: Video diffusion models are general video editors. arXiv preprint arXiv:2302.01329, 2023. 3", + "[36] Chong Mou, Xintao Wang, Liangbin Xie, Jian Zhang, Zhonggang Qi, Ying Shan, and Xiaohu Qie. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453, 2023. 3", + "[37] Haomiao Ni, Changhao Shi, Kai Li, Sharon X Huang, and Martin Renqiang Min. Conditional image-to-video generation with latent flow diffusion models. In CVPR, pages 18444-18455, 2023. 3", + "[38] Alexander Quinn Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. In ICML, pages 16784-16804. PMLR, 2022. 2", + "[39] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing attentions for zero-shot text-based video editing. In ICCV, 2023. 2, 3", + "[40] Zhiwu Qing, Shiwei Zhang, Jiayu Wang, Xiang Wang, Yujie Wei, Yingya Zhang, Changxin Gao, and Nong Sang. Hierarchical spatio-temporal decoupling for text-to-video generation. arXiv preprint arXiv:2312.04483, 2023. 2", + "[41] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763. PMLR, 2021. 2, 4", + "[42] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 2, 3", + "[43] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, pages 10684-10695, 2022. 2, 3, 4", + "[44] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, pages 22500-22510, 2023." + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[45] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. NeurIPS, 35:36479-36494, 2022. 2", + "[46] Tim Salimans and Jonathan Ho. Progressive distillation for fast sampling of diffusion models. arXiv preprint arXiv:2202.00512, 2022. 4", + "[47] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114, 2021. 4", + "[48] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. NeurIPS, 35:25278-25294, 2022. 2, 4, 5", + "[49] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in GANs. In CVPR, pages 1532-1540, 2021. 2", + "[50] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. ICLR, 2023. 2, 3, 5", + "[51] Ivan Skorokhodov, Sergey Tulyakov, and Mohamed Elhoseiny. StyleGAN-v: A continuous video generator with the price, image quality and perks of StyleGAN2. In CVPR, pages 3626-3636, 2022. 3", + "[52] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In ICLR, 2021. 5", + "[53] Sergey Tulyakov, Ming-Yu Liu, Xiaodong Yang, and Jan Kautz. MocoGAN: Decomposing motion and content for video generation. In CVPR, pages 1526-1535, 2018. 2, 3", + "[54] Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023. 2, 3, 4, 5, 6, 8", + "[55] Limin Wang, Zhan Tong, Bin Ji, and Gangshan Wu. Tdn: Temporal difference networks for efficient action recognition. In CVPR, pages 1895-1904, 2021. 4", + "[56] Wen Wang, Kangyang Xie, Zide Liu, Hao Chen, Yue Cao, Xinlong Wang, and Chunhua Shen. Zero-shot video editing using off-the-shelf image diffusion models. arXiv preprint arXiv:2303.17599, 2023. 3", + "[57] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 3, 4", + "[58] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. NeurIPS, 2023. 2, 3, 4, 5, 6", + "[59] Xiang Wang, Shiwei Zhang, Zhiwu Qing, Changxin Gao, Yingya Zhang, Deli Zhao, and Nong Sang. Molo: Motion-" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "6581", + "bbox": [ + 482, + 945, + 513, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "augmented long-short contrastive learning for few-shot action recognition. In CVPR, pages 18011-18021, 2023. 4", + "[60] Xiang Wang, Shiwei Zhang, Han Zhang, Yu Liu, Yingya Zhang, Changxin Gao, and Nong Sang. Videolcm: Video latent consistency model. arXiv preprint arXiv:2312.09109, 2023. 2", + "[61] Yaohui Wang, Piotr Bilinski, Francois Bremond, and Antitza Dantcheva. G3an: Disentangling appearance and motion for video generation. In CVPR, pages 5264-5273, 2020. 3", + "[62] Yaohui Wang, Xinyuan Chen, Xin Ma, Shangchen Zhou, Ziqi Huang, Yi Wang, Ceyuan Yang, Yinan He, Jiashuo Yu, Peiqing Yang, et al. Lavie: High-quality video generation with cascaded latent diffusion models. arXiv preprint arXiv:2309.15103, 2023. 3", + "[63] Yi Wang, Yinan He, Yizhuo Li, Kunchang Li, Jiashuo Yu, Xin Ma, Xinyuan Chen, Yaohui Wang, Ping Luo, Ziwei Liu, et al. Intervid: A large-scale video-text dataset for multimodal understanding and generation. arXiv preprint arXiv:2307.06942, 2023. 3", + "[64] Yuhan Wang, Liming Jiang, and Chen Change Loy. Styleinv: A temporal style modulated inversion network for unconditional video generation. In ICCV, pages 22851-22861, 2023. 3", + "[65] Yujie Wei, Shiwei Zhang, Zhiwu Qing, Hangjie Yuan, Zhiheng Liu, Yu Liu, Yingya Zhang, Jingren Zhou, and Hongming Shan. Dreamvideo: Composing your dream videos with customized subject and motion. arXiv preprint arXiv:2312.04433, 2023. 3", + "[66] Chenfei Wu, Jian Liang, Lei Ji, Fan Yang, Yuejian Fang, Daxin Jiang, and Nan Duan. Nüwa: Visual synthesis pretraining for neural visual world creation. In ECCV, pages 720-736. Springer, 2022. 5", + "[67] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation. In ICCV, pages 7623-7633, 2023. 2, 3", + "[68] Jinbo Xing, Menghan Xia, Yuxin Liu, Yuechen Zhang, Yong Zhang, Yingqing He, Hanyuan Liu, Haoxin Chen, Xiaodong Cun, Xintao Wang, et al. Make-your-video: Customized video generation using textual and structural guidance. arXiv preprint arXiv:2306.00943, 2023. 3", + "[69] Zhen Xing, Qi Dai, Han Hu, Zuxuan Wu, and Yu-Gang Jiang. Simda: Simple diffusion adapter for efficient video generation. arXiv preprint arXiv:2308.09710, 2023. 2, 3", + "[70] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In CVPR, pages 5288-5296, 2016. 5", + "[71] Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In CVPR, 2022. 3", + "[72] Shengming Yin, Chenfei Wu, Jian Liang, Jie Shi, Houqiang Li, Gong Ming, and Nan Duan. Dragnuwa: Fine-grained control in video generation by integrating text, image, and trajectory. arXiv preprint arXiv:2308.08089, 2023. 3" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[73] Lijun Yu, Yong Cheng, Kihyuk Sohn, José Lezama, Han Zhang, Huiwen Chang, Alexander G Hauptmann, Ming-Hsuan Yang, Yuan Hao, Irfan Essa, et al. Magvit: Masked generative video Transformer. In CVPR, pages 10459-10469, 2023. 3", + "[74] Sihyun Yu, Kihyuk Sohn, Subin Kim, and Jinwoo Shin. Video probabilistic diffusion models in projected latent space. In CVPR, pages 18456-18466, 2023. 3", + "[75] Hangjie Yuan, Shiwei Zhang, Xiang Wang, Yujie Wei, Tao Feng, Yining Pan, Yingya Zhang, Ziwei Liu, Samuel Albanie, and Dong Ni. Instructvideo: Instructing video diffusion models with human feedback. arXiv preprint arXiv:2312.12490, 2023.", + "[76] David Junhao Zhang, Jay Zhangjie Wu, Jia-Wei Liu, Rui Zhao, Lingmin Ran, Yuchao Gu, Difei Gao, and Mike Zheng Shou. Show-1: Marrying pixel and latent diffusion models for text-to-video generation. arXiv preprint arXiv:2309.15818, 2023. 3", + "[77] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In ICCV, pages 3836-3847, 2023. 3, 4", + "[78] Shiwei Zhang, Jiayu Wang, Yingya Zhang, Kang Zhao, Hangjie Yuan, Zhiwu Qin, Xiang Wang, Deli Zhao, and Jingren Zhou. I2vgen-xl: High-quality image-to-video synthesis via cascaded diffusion models. arXiv preprint arXiv:2311.04145, 2023. 2", + "[79] Yabo Zhang, Yuxiang Wei, Dongsheng Jiang, Xiaopeng Zhang, Wangmeng Zuo, and Qi Tian. Controlvideo: Training-free controllable text-to-video generation. arXiv preprint arXiv:2305.13077, 2023. 2, 3", + "[80] Zhang Zhang and Dacheng Tao. Slow feature analysis for human action recognition. TPAMI, 34(3):436-450, 2012. 4", + "[81] Min Zhao, Rongzhen Wang, Fan Bao, Chongxuan Li, and Jun Zhu. Controlvideo: Adding conditional control for one shot text-to-video editing. arXiv preprint arXiv:2305.17098, 2023. 3", + "[82] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 5", + "[83] Junbao Zhuo, Xingyu Zhao, Shuhui Wang, Huimin Ma, and Qingming Huang. Synthesizing videos from images for image-to-video adaptation. In ACMMM, pages 8294-8303, 2023. 2" + ], + "bbox": [ + 501, + 92, + 890, + 712 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "6582", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/873ecf7b-8814-4ac3-a70f-20982249ac1d_model.json b/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/873ecf7b-8814-4ac3-a70f-20982249ac1d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..cbaf19885627ef195b22686acfa4afcd313a1e63 --- /dev/null +++ b/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/873ecf7b-8814-4ac3-a70f-20982249ac1d_model.json @@ -0,0 +1,2455 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.021 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation." + }, + { + "type": "header", + "bbox": [ + 0.325, + 0.018, + 0.723, + 0.031 + ], + "angle": 0, + "content": "Except for this watermark, it is identical to the accepted version;" + }, + { + "type": "header", + "bbox": [ + 0.294, + 0.032, + 0.755, + 0.047 + ], + "angle": 0, + "content": "the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.124, + 0.131, + 0.848, + 0.154 + ], + "angle": 0, + "content": "A Recipe for Scaling up Text-to-Video Generation with Text-free Videos" + }, + { + "type": "text", + "bbox": [ + 0.12, + 0.179, + 0.844, + 0.219 + ], + "angle": 0, + "content": "Xiang Wang\\(^{1*}\\) Shiwei Zhang\\(^{2\\dagger}\\) Hangjie Yuan\\(^{3}\\) Zhiwu Qing\\(^{1}\\) Biao Gong\\(^{2}\\) Yingya Zhang\\(^{2}\\) Yujun Shen\\(^{4}\\) Changxin Gao\\(^{1}\\) Nong Sang\\(^{1\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.223, + 0.874, + 0.28 + ], + "angle": 0, + "content": "\\(^{1}\\)Key Laboratory of Image Processing and Intelligent Control, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology \\(^{2}\\)Alibaba Group \\(^{3}\\)Zhejiang University \\(^{4}\\)Ant Group" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.279, + 0.756, + 0.302 + ], + "angle": 0, + "content": "{wxiang,qzw,cgao,nsang}@hust.edu.cn,{zhangjin.zsw,yingya.zyy}@alibaba-inc.com hj.yuan@zju.edu.cn,{a.biao.gong,shenyujun0302}@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.306, + 0.825, + 0.317 + ], + "angle": 0, + "content": "\"Close-up of Caucasian mother and baby girl sitting at windowsill and reading book. Young woman educating daughter at home.\"" + }, + { + "type": "image", + "bbox": [ + 0.1, + 0.317, + 0.868, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.1, + 0.45, + 0.868, + 0.679 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.679, + 0.893, + 0.706 + ], + "angle": 0, + "content": "Figure 1. Example video results generated by the proposed TF-T2V on text-to-video generation and compositional video synthesis tasks without training on any video-text pairs." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.725, + 0.314, + 0.74 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.757, + 0.471, + 0.879 + ], + "angle": 0, + "content": "Diffusion-based text-to-video generation has witnessed impressive progress in the past year yet still falls behind text-to-image generation. One of the key reasons is the limited scale of publicly available data (e.g., 10M video-text pairs in WebVid10M vs. 5B image-text pairs in LAION), considering the high cost of video captioning. Instead, it could be far easier to collect unlabeled clips from video platforms like YouTube. Motivated by this, we come up" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.726, + 0.895, + 0.893 + ], + "angle": 0, + "content": "with a novel text-to-video generation framework, termed \\( TF-T2V \\), which can directly learn with text-free videos. The rationale behind is to separate the process of text decoding from that of temporal modeling. To this end, we employ a content branch and a motion branch, which are jointly optimized with weights shared. Following such a pipeline, we study the effect of doubling the scale of training set (i.e., video-only WebVid10M) with some randomly collected text-free videos and are encouraged to observe the performance improvement (FID from 9.67 to 8.19 and FVD from 484 to 441), demonstrating the scalability of" + }, + { + "type": "page_footnote", + "bbox": [ + 0.101, + 0.888, + 0.388, + 0.901 + ], + "angle": 0, + "content": "* Intern at Alibaba Group. † Corresponding authors." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6572" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.075, + 0.092, + 0.472, + 0.198 + ], + "angle": 0, + "content": "our approach. We also find that our model could enjoy sustainable performance gain (FID from 8.19 to 7.64 and FVD from 441 to 366) after reintroducing some text labels for training. Finally, we validate the effectiveness and generalizability of our ideology on both native text-to-video generation and compositional video synthesis paradigms. Code and models will be publicly available at here." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.211, + 0.21, + 0.227 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.236, + 0.47, + 0.508 + ], + "angle": 0, + "content": "Video generation aims to synthesize realistic videos that possess visually appealing spatial contents and temporally coherent motions. It has witnessed unprecedented progress in recent years with the advent of deep generative techniques [22, 53], especially with the emergence of video diffusion models [4, 34, 40, 54, 60, 67, 78]. Pioneering approaches [28, 33, 67] utilize pure image diffusion models or fine-tuning on a small amount of video-text data to synthesize videos, leading to temporally discontinuous results due to insufficient motion perception [39, 79]. To achieve plausible results, current text-to-video methods like VideoLDM [4] and ModelScopeT2V [54] usually insert temporal blocks into latent 2D-UNet [43] and train the model on expansive video-text datasets, e.g., WebVid10M [2]. To enable more controllable generation, VideoComposer [58] proposes a compositional paradigm that incorporates additional conditions (e.g., depth, sketch, motion vectors, etc.) to guide synthesis, allowing customizable creation." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.509, + 0.47, + 0.765 + ], + "angle": 0, + "content": "Despite this, the progress in text-to-video generation still falls behind text-to-image generation [42, 43]. One of the key reasons is the limited scale of publicly available videotext data, considering the high cost of video captioning [83]. Instead, it could be far easier to collect text-free video clips from media platforms like YouTube. There are some works sharing similar inspiration, Make-A-Video [50] and Gen-1 [12] employ a two-step strategy that first leverages a large (\\(\\sim\\)1B parameters) diffusion prior model [42] to convert text embedding into image embedding of CLIP [41] and then enters it into an image-conditioned generator to synthesize videos. However, the separate two-step manner may cause issues such as error accumulation [13], increased model size and latency [42, 69], and does not support text-conditional optimization if extra video-text data is available, leading to sub-optimal results. Moreover, the characteristics of scaling potential on video generation are still under-explored." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.766, + 0.47, + 0.902 + ], + "angle": 0, + "content": "In this work, we aim to train a single unified video diffusion model that allows text-guided video generation by exploiting the widely accessible text-free videos and explore its scaling trend. To achieve this, we present a novel two-branch framework named TF-T2V, where a content branch is designed for spatial appearance generation, and a motion branch specializes in temporal dynamics synthesis. More specifically, we utilize the publicly available image-text datasets such as LAION-5B [48] to learn text-guided" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.304 + ], + "angle": 0, + "content": "and image-guided spatial appearance generation. In the motion branch, we harness the video-only data to conduct image-conditioned video synthesis, allowing the temporal modules to learn intricate motion patterns without relying on textual annotations. Paired video-text data, if available, can also be incorporated into co-optimization. Furthermore, unlike previous methods that impose training loss on each frame individually, we introduce a temporal coherence loss to explicitly enforce the learning of correlations between adjacent frames, enhancing the continuity of generated videos. In this way, the proposed TF-T2V achieves text-to-video generation by assembling contents and motions with a unified model, overcoming the high cost of video captioning and eliminating the need for complex cascading steps." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.305, + 0.893, + 0.562 + ], + "angle": 0, + "content": "Notably, TF-T2V is a plug-and-play paradigm, which can be integrated into existing text-to-video generation and compositional video synthesis frameworks as shown in Fig. 1. Different from most prior works that rely heavily on video-text data and train models on the widely-used watermarked and low-resolution (around 360P) WebVid10M [2], TF-T2V opens up new possibilities for optimizing with text-free videos or partially paired video-text data, making it more scalable and versatile in widespread scenarios, such as high-definition video generation. To study the scaling trend, we double the scale of the training set with some randomly collected text-free videos and are encouraged to observe the performance improvement, with FID from 9.67 to 8.19 and FVD from 484 to 441. Extensive quantitative and qualitative experiments collectively demonstrate the effectiveness and scaling potential of the proposed TF-T2V in terms of synthetic continuity, fidelity, and controllability." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.581, + 0.642, + 0.597 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.609, + 0.892, + 0.655 + ], + "angle": 0, + "content": "In this section, we provide a brief review of relevant literature on text-to-image generation, text-to-video generation, and compositional video synthesis." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.893, + 0.902 + ], + "angle": 0, + "content": "Text-to-image generation. Recently, text-to-image generation has made significant strides with the development of large-scale image-text datasets such as LAION-5B [48], allowing users to create high-resolution and photorealistic images that accurately depict the given natural language descriptions. Previous methods [16, 26, 49] primarily focus on synthesizing images by adopting generative adversarial networks (GANs) to estimate training sample distributions. Distinguished by the promising stability and scalability, diffusion-based generation models have attracted increasing attention [27, 42-45]. Diffusion models utilize iterative steps to gradually refine the generated image, resulting in improved quality and realism. Typically,Imagen [45] and GLIDE [38] explore text-conditional diffusion models and boost sample quality by applying classifier-free guidance [19]. DALL-E 2 [42] first leverages an image prior" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6573" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.092, + 0.699, + 0.308 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.092, + 0.868, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.309, + 0.894, + 0.381 + ], + "angle": 0, + "content": "Figure 2. Overall pipeline of TF-T2V, which consists of two branches. In the content branch, paired image-text data is leveraged to learn text-conditioned and image-conditioned spatial appearance generation. The motion branch supports the training of motion dynamic synthesis by feeding text-free videos (or partially paired video-text data if available). During the training stage, both branches are optimized jointly. Notably, TF-T2V can be seamlessly integrated into the compositional video synthesis framework by incorporating composable conditions. In inference, TF-T2V enables text-guided video generation by taking text prompts and random noise sequences as input." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.391, + 0.471, + 0.497 + ], + "angle": 0, + "content": "to bridge multi-modal embedding spaces and then learns a diffusion decoder to synthesize images in the pixel space. Stable Diffusion [43] introduces latent diffusion models that conduct iterative denoising processes at the latent level to save computational costs. There are also some works that generate customized and desirable images by incorporating additional spatial control signals [24, 36, 77]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.505, + 0.471, + 0.852 + ], + "angle": 0, + "content": "Text-to-video generation. This task poses additional challenges compared to text-to-image generation due to the temporal dynamics involved in videos. Various early techniques have been proposed to tackle this problem, such as recurrent neural networks combined with GANs [3, 51, 53, 61, 64] or transformer-based autoregressive models [22, 73]. With the subsequent advent of video diffusion models pretrained on large-scale video-text datasets [2, 63, 71], video content creation has demonstrated remarkable advances [1, 4, 6-9, 14, 15, 17, 18, 21, 23, 28, 31-33, 35, 37, 39, 56, 57, 62, 65, 67, 69, 74-76]. Imagen Video [21] learns cascaded pixel-level diffusion models to produce high-resolution videos. Following [42], Make-A-Video [50] introduces a two-step strategy that first maps the input text to image embedding by a large (\\(\\sim\\)1B parameters) diffusion prior model and then embeds the resulting embedding into an image-conditional video diffusion model to synthesize videos in pixel space. VideoLDM [4] and ModelScopeT2V [54] extend 2D-UNet into 3D-UNet by injecting temporal layers and operate a latent denoising process to save computational resources. In this paper, we present a single unified framework for text-to-video generation and study the scaling trend by harnessing widely accessible text-free videos." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Compositional video synthesis. Traditional text-to-video methods solely rely on textual descriptions to control the video generation process, limiting desired fine-grained cus-" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.391, + 0.895, + 0.664 + ], + "angle": 0, + "content": "tomization such as texture, object position, motion patterns, etc. To tackle this constraint and pursue higher controllability, several controllable video synthesis methods [8, 9, 12, 29, 58, 68, 72, 79, 81] have been proposed. These methods utilize additional control signals, such as depth or sketch, to guide the generation of videos. By incorporating extra structured guidance, the generated content can be precisely controlled and customized. Among these approaches, VideoComposer [58] stands out as a pioneering and versatile compositional technique. It integrates multiple conditioning signals including textual, spatial and temporal conditions within a unified framework, offering enhanced controllability, compositionality, and realism in the generated videos. Despite the remarkable quality, these methods still rely on high-quality video-text data to unleash powerful and customizable synthesis. In contrast, our method can be directly merged into existing controllable frameworks to customize videos by exploiting text-free videos." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.678, + 0.591, + 0.693 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.703, + 0.893, + 0.765 + ], + "angle": 0, + "content": "We first provide a brief introduction to the preliminaries of the video diffusion model. Then, we will elaborate on the mechanisms of TF-T2V in detail. The overall framework of the proposed TF-T2V is displayed in Fig. 2." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.774, + 0.829, + 0.789 + ], + "angle": 0, + "content": "3.1. Preliminaries of video diffusion model" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.797, + 0.892, + 0.858 + ], + "angle": 0, + "content": "Diffusion models involve a forward diffusion process and a reverse iterative denoising stage. The forward process of diffusion models is gradually imposing random noise to clean data \\( x_0 \\) in a Markovian chain:" + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.869, + 0.892, + 0.901 + ], + "angle": 0, + "content": "\\[\nq \\left(x _ {t} \\mid x _ {t - 1}\\right) = \\mathcal {N} \\left(x _ {t}; \\sqrt {1 - \\beta_ {t - 1}} x _ {t - 1}, \\beta_ {t} I\\right), t = 1, \\dots , T \\tag {1}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6574" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.47, + 0.168 + ], + "angle": 0, + "content": "where \\(\\beta_{t} \\in (0,1)\\) is a noise schedule and \\(T\\) is the total time step. When \\(T\\) is sufficiently large, e.g. \\(T = 1000\\), the resulting \\(x_{T}\\) is nearly a random Gaussian distribution \\(\\mathcal{N}(0,I)\\). The role of diffusion model is to denoise \\(x_{T}\\) and learn to iteratively estimate the reversed process:" + }, + { + "type": "equation", + "bbox": [ + 0.12, + 0.18, + 0.47, + 0.198 + ], + "angle": 0, + "content": "\\[\np _ {\\theta} (x _ {t - 1} | x _ {t}) = \\mathcal {N} (x _ {t - 1}; \\mu_ {\\theta} (x _ {t}, t), \\sum_ {\\theta} (x _ {t}, t)) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.208, + 0.469, + 0.255 + ], + "angle": 0, + "content": "We usually train a denoising model \\(\\hat{x}_{\\theta}\\) parameterized by \\(\\theta\\) to approximate the original data \\(x_0\\) and optimize the following v-prediction [21, 46] problem:" + }, + { + "type": "equation", + "bbox": [ + 0.165, + 0.265, + 0.469, + 0.283 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {b a s e}} = \\mathbb {E} _ {\\theta} [ \\| v - \\hat {x} _ {\\theta} (x _ {t}, t, c) \\| _ {2} ^ {2} ] \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.295, + 0.469, + 0.415 + ], + "angle": 0, + "content": "where \\( c \\) is conditional information such as textual prompt, and \\( v \\) is the parameterized prediction objective. In representative video diffusion models [4, 54, 58], the denoising model \\( \\hat{x}_{\\theta} \\) is a latent 3D-UNet [4, 54] modified from its 2D version [43] by inserting additional temporal blocks, which is optimized in the latent feature space by applying a variational autoencoder [11], and Eq. (3) is applied on each frame of the input video to train the whole model." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.425, + 0.176, + 0.439 + ], + "angle": 0, + "content": "3.2. TF-T2V" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.45, + 0.47, + 0.645 + ], + "angle": 0, + "content": "The objective of TF-T2V is to learn a text-conditioned video diffusion model to create visually appealing and temporally coherent videos with text-free videos or partially paired video-text data. Without loss of generality, we first describe the workflow of our TF-T2V in the scenario where only text-free video is used. With merely text-free videos available for training, it is challenging to guide content creation by textual information since there lacks text-visual correspondence. To tackle this issue, we propose to resort to web-scale and high-quality image-text datasets [47, 48], which are publicly accessible on the Internet. However, this raises another question: how can we leverage the image-text data and text-free videos in a unified framework?" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.646, + 0.469, + 0.827 + ], + "angle": 0, + "content": "Recalling the network architecture in 3D-UNet, the spatial modules mainly focus on appearance modeling, and the temporal modules primarily aim to operate motion coherence. The intuition is that we can utilize image-text data to learn text-conditioned spatial appearance generation and adopt high-quality text-free videos to guide consistent motion dynamic synthesis. In this way, we can perform text-to-video generation in a single model to synthesize high-quality and consistent videos during the inference stage. Based on this, the proposed TF-T2V consists of two branches: a content branch for spatial appearance generation and a motion branch for motion dynamic synthesis." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.847, + 0.343, + 0.862 + ], + "angle": 0, + "content": "3.2.1 Spatial appearance generation" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.903 + ], + "angle": 0, + "content": "Like previous text-to-image works [43, 77], the content branch of TF-T2V takes a noised image \\( I_{image} \\in H \\times \\)" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.091, + 0.892, + 0.258 + ], + "angle": 0, + "content": "\\(W \\times C\\) as input, where \\(H\\), \\(W\\), \\(C\\) are the height, width, and channel dimensions respectively, and employs conditional signals (i.e., text and image embeddings) to offer semantic guidance for content generation. This branch primarily concentrates on optimizing the spatial modules in the video diffusion model and plays a crucial role in determining appealing visual quality. In order to ensure that each condition can also control the created content separately, we randomly drop text or image embeddings with a certain probability during training. The text and image encoders from CLIP [41] are adopted to encode embeddings." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.283, + 0.735, + 0.298 + ], + "angle": 0, + "content": "3.2.2 Motion dynamic synthesis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.309, + 0.892, + 0.475 + ], + "angle": 0, + "content": "The pursuit of producing highly temporally consistent videos is a unique hallmark of video creation. Recent advancements [4, 54, 57, 58] in the realm of video synthesis usually utilize large-scale video-text datasets such as WebVid10M [2] to achieve coherent video generation. However, acquiring large-scale video-text pairs consumes extensive manpower and time, hindering the scaling up of video diffusion models. To make matters worse, the widely used WebVid10M is a watermarked and low-resolution (around 360P) dataset, resulting in unsatisfactory video creation that cannot meet the high-quality video synthesis requirements." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.477, + 0.892, + 0.673 + ], + "angle": 0, + "content": "To mitigate the above issues, we propose to leverage high-quality text-free videos that are easily accessible on video media platforms, e.g., YouTube and TikTok. To fully excavate the abundant motion dynamics within the text-free videos, we train a image-conditioned model. By optimizing this image-to-video generation task, the temporal modules in the video diffusion model can learn to perceive and model diverse motion dynamics. Specifically, given a noised video \\( I_{video} \\in F \\times H \\times W \\times C \\), where \\( F \\) is the temporal length, the motion branch of TF-T2V learns to recover the undisturbed video guided by the image embedding. The image embedding is extracted from the center frame of the original video by applying CLIP's image encoder [41]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.673, + 0.892, + 0.794 + ], + "angle": 0, + "content": "Since large-scale image-text data used for training contains abundant movement intentions [30], TF-T2V can achieve text-to-video generation by assembling spatial appearances involving motion trends and predicted motion dynamics. When extra paired video-text data is available, we conduct both text-to-video and image-to-video generation based on video-text pairs to train TF-T2V and further enhance the perception ability for desirable textual control." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In addition, we notice that previous works apply the training loss (i.e., Eq. (3)) on each frame of the input video individually without considering temporal correlations between frames, suffering from incoherent appearances and motions. Inspired by the early study [25, 55, 59, 80] finding that the difference between two adjacent frames usually contains motion patterns, e.g., dynamic trajectory, we thus" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6575" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.892, + 0.103 + ], + "angle": 0, + "content": "Table 1. Quantitative comparison with state-of-the-art methods for text-to-video task on MSR-VTT in terms of FID, FVD, and CLIPSIM." + }, + { + "type": "table", + "bbox": [ + 0.087, + 0.105, + 0.881, + 0.281 + ], + "angle": 0, + "content": "
MethodZero-shotParametersFID (↓)FVD (↓)CLIPSIM (↑)
Nüwa [66]No-47.68-0.2439
CogVideo (Chinese) [22]Yes15.5B24.78-0.2614
CogVideo (English) [22]Yes15.5B23.5912940.2631
MagicVideo [82]Yes--1290-
Make-A-Video [50]Yes9.7B13.17-0.3049
ModelScopeT2V [54]Yes1.7B11.095500.2930
VideoComposer [58]Yes1.9B10.775800.2932
Latent-Shift [1]Yes1.5B15.23-0.2773
VideoLDM [4]Yes4.2B--0.2929
PYoCo [14]Yes-9.73--
TF-T2V (WebVid10M)Yes1.8B9.674840.2953
TF-T2V (WebVid10M+Internal10M)Yes1.8B8.194410.2991
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.293, + 0.47, + 0.325 + ], + "angle": 0, + "content": "propose a temporal coherence loss that utilizes the frame difference as an additional supervisory signal:" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.331, + 0.47, + 0.353 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {c o h e r e n c e}} = \\mathbb {E} _ {\\theta} [ \\sum_ {j = 1} ^ {F - 1} | | (v _ {j + 1} - v _ {j}) - (o _ {j + 1} - o _ {j}) | | _ {2} ^ {2} ] \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.36, + 0.47, + 0.466 + ], + "angle": 0, + "content": "where \\( o_j \\) and \\( v_j \\) are the predicted frame and corresponding ground truth. This loss term measures the discrepancy between the predicted frame differences and the ground truth frame differences of the input parameterized video. By minimizing Eq. (4), TF-T2V helps to alleviate frame flickering and ensures that the generated videos exhibit seamless transitions and promising temporal dynamics." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.483, + 0.291, + 0.499 + ], + "angle": 0, + "content": "3.2.3 Training and inference" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.507, + 0.47, + 0.568 + ], + "angle": 0, + "content": "In order to mine the complementary advantages of spatial appearance generation and motion dynamic synthesis, we jointly optimize the entire model in an end-to-end manner. The total loss can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.171, + 0.579, + 0.469, + 0.595 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {b a s e}} + \\lambda \\mathcal {L} _ {\\text {c o h e r e n c e}} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.605, + 0.469, + 0.65 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_{base}\\) is imposed on video and image together by treating the image as a \"single frame\" video, and \\(\\lambda\\) is a balance coefficient that is set empirically to 0.1." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.65, + 0.469, + 0.755 + ], + "angle": 0, + "content": "After training, we can perform text-guided video generation to synthesize temporally consistent video content that aligns well with the given text prompt. Moreover, TF-T2V is a general framework and can also be inserted into existing compositional video synthesis paradigm [58] by incorporating additional spatial and temporal structural conditions, allowing for customized video creation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.768, + 0.21, + 0.786 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.794, + 0.469, + 0.84 + ], + "angle": 0, + "content": "In this section, we present a comprehensive quantitative and qualitative evaluation of the proposed TF-T2V on text-to-video generation and composition video synthesis." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.848, + 0.265, + 0.865 + ], + "angle": 0, + "content": "4.1. Experimental setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Implementation details. TF-T2V is built on two typical open-source baselines, i.e., ModelScopeT2V [54] and" + }, + { + "type": "table_caption", + "bbox": [ + 0.503, + 0.292, + 0.887, + 0.305 + ], + "angle": 0, + "content": "Table 2. Human preference results on text-to-video generation." + }, + { + "type": "table", + "bbox": [ + 0.507, + 0.306, + 0.88, + 0.363 + ], + "angle": 0, + "content": "
MethodText alignmentVisual qualityTemporal coherence
ModelScopeT2V [54]83.5%74.0%81.3%
TF-T2V86.5%87.0%92.5%
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.376, + 0.892, + 0.602 + ], + "angle": 0, + "content": "VideoComposer [58]. DDPM sampler [20] with \\( T = 1000 \\) steps is adopted for training, and we employ DDIM [52] with 50 steps for inference. We optimize TF-T2V using AdamW optimizer with a learning rate of 5e-5. For input videos, we sample 16 frames from each video at 4FPS and crop a \\( 448 \\times 256 \\) region at the center as the basic setting. Note that we can also easily train high-definition video diffusion models by collecting high-quality text-free videos (see examples in the Appendix). LAION-5B [48] is utilized to provide image-text pairs. Unless otherwise stated, we treat WebVid10M, which includes about 10.7M video-text pairs, as a text-free dataset to train TF-T2V and do not use any textual annotations. To study scaling trends, we gathered about 10M high-quality videos without text labels from internal data, termed the Internal10M dataset." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.607, + 0.892, + 0.819 + ], + "angle": 0, + "content": "Metrics. (i) To evaluate text-to-video generation, following previous works [4, 54], we leverage the standard Fréchet Inception Distance (FID), Fréchet Video Distance (FVD), and CLIP Similarity (CLIPSIM) as quantitative evaluation metrics and report results on MSR-VTT dataset [70]. (ii) For controllability evaluation, we leverage depth error, sketch error, and end-point-error (EPE) [10] to verify whether the generated videos obey the control of input conditions. Depth error measures the divergence between the input depth conditions and the eliminated depth of the synthesized video. Similarly, sketch error examines the sketch control. EPE evaluates the flow consistency between the reference video and the generated video. In addition, human evaluation is also introduced to validate our method." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.831, + 0.848 + ], + "angle": 0, + "content": "4.2. Evaluation on text-to-video generation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Tab. 1 displays the comparative quantitative results with existing state-of-the-art methods. We observe that TF-T2V achieves remarkable performance under various metrics." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6576" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.09, + 0.522, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.591, + 0.092, + 0.807, + 0.102 + ], + "angle": 0, + "content": "\"Portrait of smiling young woman outdoors\"" + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.103, + 0.865, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.29, + 0.52, + 0.485 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.29, + 0.865, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.485, + 0.891, + 0.511 + ], + "angle": 0, + "content": "Figure 3. Qualitative comparison on text-to-video generation. Three representative open-source text-to-video approaches are compared, including ModelScopeT2V [54], Text2video-Zero [28] and ZeroScope [5]. Please refer to the Appendix for videos and more comparisons." + }, + { + "type": "table_caption", + "bbox": [ + 0.079, + 0.522, + 0.467, + 0.536 + ], + "angle": 0, + "content": "Table 3. Evaluation of structure control based on depth signals." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.537, + 0.458, + 0.593 + ], + "angle": 0, + "content": "
MethodConditionDepth error (↓)
VideoComposer [58]Text0.382
VideoComposer [58]Text and depth0.217
TF-T2VText and depth0.209
" + }, + { + "type": "table_caption", + "bbox": [ + 0.078, + 0.6, + 0.468, + 0.613 + ], + "angle": 0, + "content": "Table 4. Evaluation of structure control based on sketch signals." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.614, + 0.462, + 0.671 + ], + "angle": 0, + "content": "
MethodConditionSketch error (↓)
VideoComposer [58]Text0.1854
VideoComposer [58]Text and sketch0.1161
TF-T2VText and sketch0.1146
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.688, + 0.47, + 0.822 + ], + "angle": 0, + "content": "Notably, TF-T2V trained on WebVid10M and Internal10M obtains higher performance than the counterpart on WebVid10M, revealing promising scalable capability. We show the qualitative visualizations in Fig. 3. From the results, we can find that compared with previous methods, TF-T2V obtains impressive video creation in terms of both temporal continuity and visual quality. The human assessment in Tab. 2 also reveals the above observations. The user study is performed on 100 randomly synthesized videos." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.833, + 0.452, + 0.849 + ], + "angle": 0, + "content": "4.3. Evaluation on compositional video synthesis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "We compare the controllability of TF-T2V and VideoComposer on 1,000 generated videos in terms of depth control (Tab. 3), sketch control (Tab. 4) and motion control" + }, + { + "type": "table_caption", + "bbox": [ + 0.503, + 0.522, + 0.889, + 0.535 + ], + "angle": 0, + "content": "Table 5. Evaluation of motion control based on motion vectors." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.537, + 0.882, + 0.593 + ], + "angle": 0, + "content": "
MethodConditionEPE (↓)
VideoComposer [58]Text4.13
VideoComposer [58]Text and motion vector1.98
TF-T2VText and motion vector1.88
" + }, + { + "type": "table_caption", + "bbox": [ + 0.505, + 0.6, + 0.885, + 0.613 + ], + "angle": 0, + "content": "Table 6. Human evaluations on compositional video synthesis." + }, + { + "type": "table", + "bbox": [ + 0.506, + 0.614, + 0.88, + 0.671 + ], + "angle": 0, + "content": "
MethodStructure alignmentVisual qualityTemporal coherence
VideoComposer [58]79.0%66.0%77.5%
TF-T2V89.0%79.5%84.5%
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.688, + 0.892, + 0.824 + ], + "angle": 0, + "content": "(Tab. 5). The above experimental evaluations highlight the effectiveness of TF-T2V by leveraging text-free videos. In Fig. 4 and 5, we show the comparison of TF-T2V and existing methods on compositional video generation. We notice that TF-T2V exhibits high-fidelity and consistent video generation. In addition, we conduct a human evaluation on 100 randomly sampled videos and report the results in Tab. 6. The preference assessment provides further evidence of the superiority of the proposed TF-T2V." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.833, + 0.65, + 0.849 + ], + "angle": 0, + "content": "4.4. Ablation study" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Effect of temporal coherence loss. To enhance temporal consistency, we propose a temporal coherence loss. In Tab. 7, we show the effectiveness of the proposed tem" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6577" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.101, + 0.089, + 0.526, + 0.414 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.089, + 0.868, + 0.414 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.414, + 0.892, + 0.441 + ], + "angle": 0, + "content": "Figure 4. Qualitative comparison on compositional depth-to-video generation. The videos are generated by taking textual prompts and structural guidance as conditions. Compared with existing methods, TF-T2V yields more structural compliance and high-fidelity results." + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.442, + 0.523, + 0.707 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.442, + 0.848, + 0.707 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.707, + 0.89, + 0.734 + ], + "angle": 0, + "content": "Figure 5. Qualitative comparison on compositional sketch-to-video generation. The videos are generated by taking textual descriptions and structural guidance as conditions. Compared with other methods, TF-T2V produces more realistic and consistent results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.746, + 0.47, + 0.837 + ], + "angle": 0, + "content": "poral coherence loss in terms of frame consistency. The metric results are obtained by calculating the average CLIP similarity of two consecutive frames in 1,000 videos. We further display the qualitative comparative results in Fig. 6 and observe that temporal coherence loss helps to alleviate temporal discontinuity such as color shift." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.848, + 0.405, + 0.865 + ], + "angle": 0, + "content": "4.5. Evaluation on semi-supervised setting" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Through the above experiments and observations, we verify that text-free video can help improve the continuity" + }, + { + "type": "table_caption", + "bbox": [ + 0.527, + 0.745, + 0.865, + 0.758 + ], + "angle": 0, + "content": "Table 7. Text-to-video evaluation on frame consistency." + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.759, + 0.878, + 0.803 + ], + "angle": 0, + "content": "
MethodFrame consistency (%) ↑
w/o temporal coherence loss89.71
TF-T2V91.06
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.826, + 0.892, + 0.903 + ], + "angle": 0, + "content": "and quality of generated video. As previously stated, TF-T2V also supports the combination of annotated videotext data and text-free videos to train the model, i.e., the semi-supervised manner. The annotated text can provide additional fine-grained motion signals, enhancing the align-" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6578" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.088, + 0.528, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.535, + 0.089, + 0.885, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.093, + 0.296, + 0.877, + 0.311 + ], + "angle": 0, + "content": "Figure 6. Qualitative ablation study. The videos are generated by taking textual descriptions and structural guidance as conditions." + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.316, + 0.885, + 0.608 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.609, + 0.892, + 0.624 + ], + "angle": 0, + "content": "Figure 7. Qualitative evaluation on text-to-video generation with temporally-correlated text prompts involving the evolution of movement." + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.637, + 0.47, + 0.678 + ], + "angle": 0, + "content": "Table 8. Quantitative experiments on text-to-video generation. TF-T2V-Semi means the semi-supervised setting where labeled WebVid10M and text-free Internal10M are adopted." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.678, + 0.467, + 0.733 + ], + "angle": 0, + "content": "
MethodFID (↓)FVD (↓)CLIPSIM (↑)
ModelScopeT2V [54]11.095500.2930
TF-T2V8.194410.2991
TF-T2V-Semi7.643660.3032
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.473, + 0.903 + ], + "angle": 0, + "content": "ment of generated videos and the provided prompts involving desired motion evolution. We show the comparison results in Tab. 8 and find that the semi-supervised manner reaches the best performance, indicating the effectiveness of harnessing text-free videos. Notably, TF-T2V-Semi outperforms ModelScopeT2V trained on labeled WebVid10M, possessing good scalability. Moreover, the qualitative evaluations in Fig. 7 show that existing methods may struggle to synthesize text-aligned consistent videos when textual prompts involve desired temporal evolution. In contrast," + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.639, + 0.892, + 0.671 + ], + "angle": 0, + "content": "TF-T2V in the semi-supervised setting exhibits excellent text-video alignment and temporally smooth generation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.686, + 0.62, + 0.702 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.712, + 0.894, + 0.85 + ], + "angle": 0, + "content": "In this paper, we present a novel and versatile video generation framework named TF-T2V to exploit text-free videos and explore its scaling trend. TF-T2V effectively decomposes video generation into spatial appearance generation and motion dynamic synthesis. A temporal coherence loss is introduced to explicitly constrain the learning of correlations between adjacent frames. Experimental results demonstrate the effectiveness and potential of TF-T2V in terms of fidelity, controllability, and scalability." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.894, + 0.903 + ], + "angle": 0, + "content": "Acknowledgements. This work is supported by the National Natural Science Foundation of China under grant U22B2053 and Alibaba Research Intern Program." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6579" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Jie An, Songyang Zhang, Harry Yang, Sonal Gupta, Jia-Bin Huang, Jiebo Luo, and Xi Yin. Latent-shift: Latent diffusion with temporal shift for efficient text-to-video generation. arXiv preprint arXiv:2304.08477, 2023. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.226 + ], + "angle": 0, + "content": "[2] Max Bain, Arsha Nagrani, Gül Varol, and Andrew Zisserman. Frozen in time: A joint video and image encoder for end-to-end retrieval. In ICCV, pages 1728-1738, 2021. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.229, + 0.47, + 0.285 + ], + "angle": 0, + "content": "[3] Yogesh Balaji, Martin Renqiang Min, Bing Bai, Rama Chellappa, and Hans Peter Graf. Conditional GAN with discriminative filter generation for text-to-video synthesis. In IJCAI, page 2, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.287, + 0.47, + 0.355 + ], + "angle": 0, + "content": "[4] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In CVPR, pages 22563-22575, 2023. 2, 3, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.357, + 0.47, + 0.399 + ], + "angle": 0, + "content": "[5] Cerspense. Zeroscope: Diffusion-based text-to-video synthesis. https://huggingface.co/cerspense/zeroscope_v2_576w, 2023.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.401, + 0.47, + 0.442 + ], + "angle": 0, + "content": "[6] Duygu Ceylan, Chun-Hao P Huang, and Niloy J Mitra. Pix2video: Video editing using image diffusion. In ICCV, pages 23206-23217, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.444, + 0.47, + 0.486 + ], + "angle": 0, + "content": "[7] Wenhao Chai, Xun Guo, Gaoang Wang, and Yan Lu. Stablevideo: Text-driven consistency-aware diffusion video editing. In ICCV, pages 23040-23050, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.488, + 0.47, + 0.542 + ], + "angle": 0, + "content": "[8] Tsai-Shien Chen, Chieh Hubert Lin, Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang. Motion-conditioned diffusion model for controllable video synthesis. arXiv preprint arXiv:2304.14404, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.544, + 0.47, + 0.599 + ], + "angle": 0, + "content": "[9] Weifeng Chen, Jie Wu, Pan Xie, Hefeng Wu, Jiashi Li, Xin Xia, Xuefeng Xiao, and Liang Lin. Control-a-video: Controllable text-to-video generation with diffusion models. arXiv preprint arXiv:2305.13840, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.602, + 0.47, + 0.671 + ], + "angle": 0, + "content": "[10] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. Flownet: Learning optical flow with convolutional networks. In ICCV, pages 2758-2766, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.673, + 0.47, + 0.714 + ], + "angle": 0, + "content": "[11] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming Transformers for high-resolution image synthesis. In CVPR, pages 12873-12883, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.716, + 0.47, + 0.771 + ], + "angle": 0, + "content": "[12] Patrick Esser, Johnathan Chiu, Parmida Atighechian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In ICCV, pages 7346-7356, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.773, + 0.47, + 0.814 + ], + "angle": 0, + "content": "[13] Rafail Fridman, Amit Abecasis, Yoni Kasten, and Tali Dekel. Scenescape: Text-driven consistent scene generation. arXiv preprint arXiv:2302.01133, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.47, + 0.884 + ], + "angle": 0, + "content": "[14] Songwei Ge, Seungjun Nah, Guilin Liu, Tyler Poon, Andrew Tao, Bryan Catanzaro, David Jacobs, Jia-Bin Huang, Ming-Yu Liu, and Yogesh Balaji. Preserve your own correlation: A noise prior for video diffusion models. In ICCV, pages 22930-22941, 2023. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.887, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[15] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.176 + ], + "angle": 0, + "content": "[16] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. NeurIPS, 27, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.234 + ], + "angle": 0, + "content": "[17] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.235, + 0.892, + 0.289 + ], + "angle": 0, + "content": "[18] Yingqing He, Tianyu Yang, Yong Zhang, Ying Shan, and Qifeng Chen. Latent video diffusion models for high-fidelity video generation with arbitrary lengths. arXiv preprint arXiv:2211.13221, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.291, + 0.892, + 0.32 + ], + "angle": 0, + "content": "[19] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.321, + 0.892, + 0.36 + ], + "angle": 0, + "content": "[20] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. NeurIPS, 33:6840-6851, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.363, + 0.892, + 0.433 + ], + "angle": 0, + "content": "[21] Jonathan Ho, William Chan, Chitwan Sahara, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.434, + 0.892, + 0.476 + ], + "angle": 0, + "content": "[22] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via Transformers. In ICLR, 2023. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.477, + 0.892, + 0.532 + ], + "angle": 0, + "content": "[23] Hanzhuo Huang, Yufan Feng, Cheng Shi, Lan Xu, Jingyi Yu, and Sibei Yang. Free-bloom: Zero-shot text-to-video generator with llm director and ldm animator. arXiv preprint arXiv:2309.14494, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.533, + 0.892, + 0.587 + ], + "angle": 0, + "content": "[24] Lianghua Huang, Di Chen, Yu Liu, Yujun Shen, Deli Zhao, and Jingren Zhou.Composer: Creative and controllable image synthesis with composable conditions.ICML, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.59, + 0.892, + 0.631 + ], + "angle": 0, + "content": "[25] Hueihan Jhuang, Juergen Gall, Silvia Zuffi, Cordelia Schmid, and Michael J Black. Towards understanding action recognition. In ICCV, pages 3192-3199, 2013. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.633, + 0.892, + 0.687 + ], + "angle": 0, + "content": "[26] Minguk Kang, Jun-Yan Zhu, Richard Zhang, Jaesik Park, Eli Shechtman, Sylvain Paris, and Taesung Park. Scaling up GANs for text-to-image synthesis. In CVPR, pages 10124-10134, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.689, + 0.892, + 0.745 + ], + "angle": 0, + "content": "[27] Bahjat Kawar, Shiran Zada, Oran Lang, Omer Tov, Huiwen Chang, Tali Dekel, Inbar Mosseri, and Michal Irani. Imagic: Text-based real image editing with diffusion models. In CVPR, pages 6007-6017, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.747, + 0.892, + 0.816 + ], + "angle": 0, + "content": "[28] Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439, 2023. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.817, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[29] Ariel Lapid, Idan Achituve, Lior Bracha, and Ethan Fetaya. Gd-vdm: Generated depth for better diffusion-based video generation. arXiv preprint arXiv:2306.11173, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[30] Jiangtong Li, Li Niu, and Liqing Zhang. Action-aware embedding enhancement for image-text retrieval. In AAAI, pages 1323-1331, 2022. 4" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "6580" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.134 + ], + "angle": 0, + "content": "[31] Shaoteng Liu, Yuechen Zhang, Wenbo Li, Zhe Lin, and Jiaya Jia. Video-p2p: Video editing with cross-attention control. arXiv preprint arXiv:2303.04761, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.134, + 0.469, + 0.189 + ], + "angle": 0, + "content": "[32] Haoyu Lu, Guoxing Yang, Nanyi Fei, Yuqi Huo, Zhiwu Lu, Ping Luo, and Mingyu Ding. Vdt: An empirical study on video diffusion with Transformers. arXiv preprint arXiv:2305.13311, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.19, + 0.469, + 0.258 + ], + "angle": 0, + "content": "[33] Zhengxiong Luo, Dayou Chen, Yingya Zhang, Yan Huang, Liang Wang, Yujun Shen, Deli Zhao, Jingren Zhou, and Tieniu Tan. Videofusion: Decomposed diffusion models for high-quality video generation. In CVPR, pages 10209-10218, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.26, + 0.469, + 0.315 + ], + "angle": 0, + "content": "[34] Yifeng Ma, Shiwei Zhang, Jiayu Wang, Xiang Wang, Yingya Zhang, and Zhidong Deng. Dreamtalk: When expressive talking head generation meets diffusion probabilistic models. arXiv preprint arXiv:2312.09767, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.316, + 0.469, + 0.372 + ], + "angle": 0, + "content": "[35] Eyal Molad, Eliahu Horwitz, Dani Valevski, Alex Rav Acha, Yossi Matias, Yael Pritch, Yaniv Leviathan, and Yedid Hoshen. Dreamix: Video diffusion models are general video editors. arXiv preprint arXiv:2302.01329, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.372, + 0.469, + 0.427 + ], + "angle": 0, + "content": "[36] Chong Mou, Xintao Wang, Liangbin Xie, Jian Zhang, Zhonggang Qi, Ying Shan, and Xiaohu Qie. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.428, + 0.469, + 0.482 + ], + "angle": 0, + "content": "[37] Haomiao Ni, Changhao Shi, Kai Li, Sharon X Huang, and Martin Renqiang Min. Conditional image-to-video generation with latent flow diffusion models. In CVPR, pages 18444-18455, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.484, + 0.469, + 0.552 + ], + "angle": 0, + "content": "[38] Alexander Quinn Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. In ICML, pages 16784-16804. PMLR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.553, + 0.469, + 0.607 + ], + "angle": 0, + "content": "[39] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing attentions for zero-shot text-based video editing. In ICCV, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.608, + 0.469, + 0.664 + ], + "angle": 0, + "content": "[40] Zhiwu Qing, Shiwei Zhang, Jiayu Wang, Xiang Wang, Yujie Wei, Yingya Zhang, Changxin Gao, and Nong Sang. Hierarchical spatio-temporal decoupling for text-to-video generation. arXiv preprint arXiv:2312.04483, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.664, + 0.469, + 0.734 + ], + "angle": 0, + "content": "[41] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763. PMLR, 2021. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.735, + 0.469, + 0.789 + ], + "angle": 0, + "content": "[42] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.469, + 0.844 + ], + "angle": 0, + "content": "[43] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, pages 10684-10695, 2022. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[44] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, pages 22500-22510, 2023." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.162 + ], + "angle": 0, + "content": "[45] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. NeurIPS, 35:36479-36494, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.163, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[46] Tim Salimans and Jonathan Ho. Progressive distillation for fast sampling of diffusion models. arXiv preprint arXiv:2202.00512, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.206, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[47] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.277, + 0.892, + 0.359 + ], + "angle": 0, + "content": "[48] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. NeurIPS, 35:25278-25294, 2022. 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.361, + 0.892, + 0.402 + ], + "angle": 0, + "content": "[49] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in GANs. In CVPR, pages 1532-1540, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.404, + 0.892, + 0.459 + ], + "angle": 0, + "content": "[50] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. ICLR, 2023. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.46, + 0.892, + 0.516 + ], + "angle": 0, + "content": "[51] Ivan Skorokhodov, Sergey Tulyakov, and Mohamed Elhoseiny. StyleGAN-v: A continuous video generator with the price, image quality and perks of StyleGAN2. In CVPR, pages 3626-3636, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.518, + 0.892, + 0.546 + ], + "angle": 0, + "content": "[52] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In ICLR, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.547, + 0.892, + 0.588 + ], + "angle": 0, + "content": "[53] Sergey Tulyakov, Ming-Yu Liu, Xiaodong Yang, and Jan Kautz. MocoGAN: Decomposing motion and content for video generation. In CVPR, pages 1526-1535, 2018. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.589, + 0.892, + 0.644 + ], + "angle": 0, + "content": "[54] Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023. 2, 3, 4, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.646, + 0.892, + 0.687 + ], + "angle": 0, + "content": "[55] Limin Wang, Zhan Tong, Bin Ji, and Gangshan Wu. Tdn: Temporal difference networks for efficient action recognition. In CVPR, pages 1895-1904, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.689, + 0.892, + 0.744 + ], + "angle": 0, + "content": "[56] Wen Wang, Kangyang Xie, Zide Liu, Hao Chen, Yue Cao, Xinlong Wang, and Chunhua Shen. Zero-shot video editing using off-the-shelf image diffusion models. arXiv preprint arXiv:2303.17599, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.745, + 0.892, + 0.801 + ], + "angle": 0, + "content": "[57] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.802, + 0.892, + 0.871 + ], + "angle": 0, + "content": "[58] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. NeurIPS, 2023. 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[59] Xiang Wang, Shiwei Zhang, Zhiwu Qing, Changxin Gao, Yingya Zhang, Deli Zhao, and Nong Sang. Molo: Motion-" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.514, + 0.957 + ], + "angle": 0, + "content": "6581" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "augmented long-short contrastive learning for few-shot action recognition. In CVPR, pages 18011-18021, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.123, + 0.47, + 0.178 + ], + "angle": 0, + "content": "[60] Xiang Wang, Shiwei Zhang, Han Zhang, Yu Liu, Yingya Zhang, Changxin Gao, and Nong Sang. Videolcm: Video latent consistency model. arXiv preprint arXiv:2312.09109, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.181, + 0.47, + 0.223 + ], + "angle": 0, + "content": "[61] Yaohui Wang, Piotr Bilinski, Francois Bremond, and Antitza Dantcheva. G3an: Disentangling appearance and motion for video generation. In CVPR, pages 5264-5273, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.225, + 0.47, + 0.293 + ], + "angle": 0, + "content": "[62] Yaohui Wang, Xinyuan Chen, Xin Ma, Shangchen Zhou, Ziqi Huang, Yi Wang, Ceyuan Yang, Yinan He, Jiashuo Yu, Peiqing Yang, et al. Lavie: High-quality video generation with cascaded latent diffusion models. arXiv preprint arXiv:2309.15103, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.296, + 0.47, + 0.365 + ], + "angle": 0, + "content": "[63] Yi Wang, Yinan He, Yizhuo Li, Kunchang Li, Jiashuo Yu, Xin Ma, Xinyuan Chen, Yaohui Wang, Ping Luo, Ziwei Liu, et al. Intervid: A large-scale video-text dataset for multimodal understanding and generation. arXiv preprint arXiv:2307.06942, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.368, + 0.469, + 0.422 + ], + "angle": 0, + "content": "[64] Yuhan Wang, Liming Jiang, and Chen Change Loy. Styleinv: A temporal style modulated inversion network for unconditional video generation. In ICCV, pages 22851-22861, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.426, + 0.47, + 0.495 + ], + "angle": 0, + "content": "[65] Yujie Wei, Shiwei Zhang, Zhiwu Qing, Hangjie Yuan, Zhiheng Liu, Yu Liu, Yingya Zhang, Jingren Zhou, and Hongming Shan. Dreamvideo: Composing your dream videos with customized subject and motion. arXiv preprint arXiv:2312.04433, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.498, + 0.469, + 0.553 + ], + "angle": 0, + "content": "[66] Chenfei Wu, Jian Liang, Lei Ji, Fan Yang, Yuejian Fang, Daxin Jiang, and Nan Duan. Nüwa: Visual synthesis pretraining for neural visual world creation. In ECCV, pages 720-736. Springer, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.556, + 0.469, + 0.625 + ], + "angle": 0, + "content": "[67] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation. In ICCV, pages 7623-7633, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.628, + 0.469, + 0.697 + ], + "angle": 0, + "content": "[68] Jinbo Xing, Menghan Xia, Yuxin Liu, Yuechen Zhang, Yong Zhang, Yingqing He, Hanyuan Liu, Haoxin Chen, Xiaodong Cun, Xintao Wang, et al. Make-your-video: Customized video generation using textual and structural guidance. arXiv preprint arXiv:2306.00943, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.699, + 0.469, + 0.741 + ], + "angle": 0, + "content": "[69] Zhen Xing, Qi Dai, Han Hu, Zuxuan Wu, and Yu-Gang Jiang. Simda: Simple diffusion adapter for efficient video generation. arXiv preprint arXiv:2308.09710, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.743, + 0.469, + 0.785 + ], + "angle": 0, + "content": "[70] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In CVPR, pages 5288-5296, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.788, + 0.469, + 0.843 + ], + "angle": 0, + "content": "[71] Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In CVPR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[72] Shengming Yin, Chenfei Wu, Jian Liang, Jie Shi, Houqiang Li, Gong Ming, and Nan Duan. Dragnuwa: Fine-grained control in video generation by integrating text, image, and trajectory. arXiv preprint arXiv:2308.08089, 2023. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.161 + ], + "angle": 0, + "content": "[73] Lijun Yu, Yong Cheng, Kihyuk Sohn, José Lezama, Han Zhang, Huiwen Chang, Alexander G Hauptmann, Ming-Hsuan Yang, Yuan Hao, Irfan Essa, et al. Magvit: Masked generative video Transformer. In CVPR, pages 10459-10469, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[74] Sihyun Yu, Kihyuk Sohn, Subin Kim, and Jinwoo Shin. Video probabilistic diffusion models in projected latent space. In CVPR, pages 18456-18466, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.207, + 0.892, + 0.274 + ], + "angle": 0, + "content": "[75] Hangjie Yuan, Shiwei Zhang, Xiang Wang, Yujie Wei, Tao Feng, Yining Pan, Yingya Zhang, Ziwei Liu, Samuel Albanie, and Dong Ni. Instructvideo: Instructing video diffusion models with human feedback. arXiv preprint arXiv:2312.12490, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.277, + 0.892, + 0.344 + ], + "angle": 0, + "content": "[76] David Junhao Zhang, Jay Zhangjie Wu, Jia-Wei Liu, Rui Zhao, Lingmin Ran, Yuchao Gu, Difei Gao, and Mike Zheng Shou. Show-1: Marrying pixel and latent diffusion models for text-to-video generation. arXiv preprint arXiv:2309.15818, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.347, + 0.892, + 0.388 + ], + "angle": 0, + "content": "[77] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In ICCV, pages 3836-3847, 2023. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.39, + 0.892, + 0.458 + ], + "angle": 0, + "content": "[78] Shiwei Zhang, Jiayu Wang, Yingya Zhang, Kang Zhao, Hangjie Yuan, Zhiwu Qin, Xiang Wang, Deli Zhao, and Jingren Zhou. I2vgen-xl: High-quality image-to-video synthesis via cascaded diffusion models. arXiv preprint arXiv:2311.04145, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.46, + 0.892, + 0.515 + ], + "angle": 0, + "content": "[79] Yabo Zhang, Yuxiang Wei, Dongsheng Jiang, Xiaopeng Zhang, Wangmeng Zuo, and Qi Tian. Controlvideo: Training-free controllable text-to-video generation. arXiv preprint arXiv:2305.13077, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.517, + 0.892, + 0.544 + ], + "angle": 0, + "content": "[80] Zhang Zhang and Dacheng Tao. Slow feature analysis for human action recognition. TPAMI, 34(3):436-450, 2012. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.546, + 0.892, + 0.599 + ], + "angle": 0, + "content": "[81] Min Zhao, Rongzhen Wang, Fan Bao, Chongxuan Li, and Jun Zhu. Controlvideo: Adding conditional control for one shot text-to-video editing. arXiv preprint arXiv:2305.17098, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.602, + 0.892, + 0.657 + ], + "angle": 0, + "content": "[82] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.659, + 0.892, + 0.713 + ], + "angle": 0, + "content": "[83] Junbao Zhuo, Xingyu Zhao, Shuhui Wang, Huimin Ma, and Qingming Huang. Synthesizing videos from images for image-to-video adaptation. In ACMMM, pages 8294-8303, 2023. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6582" + } + ] +] \ No newline at end of file diff --git a/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/873ecf7b-8814-4ac3-a70f-20982249ac1d_origin.pdf b/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/873ecf7b-8814-4ac3-a70f-20982249ac1d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2794bd19819b00ec62c5ae834040cf2ffeaf7f53 --- /dev/null +++ b/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/873ecf7b-8814-4ac3-a70f-20982249ac1d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c84d7bdaae7ea0b7862d3ad8f5079dbbc97e45062490b7884239534a2bef6de +size 8547679 diff --git a/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/full.md b/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/full.md new file mode 100644 index 0000000000000000000000000000000000000000..6f058e7d71331206073238df4d1fec0b6e4777e4 --- /dev/null +++ b/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/full.md @@ -0,0 +1,320 @@ +# A Recipe for Scaling up Text-to-Video Generation with Text-free Videos + +Xiang Wang $^{1*}$ Shiwei Zhang $^{2\dagger}$ Hangjie Yuan $^{3}$ Zhiwu Qing $^{1}$ Biao Gong $^{2}$ Yingya Zhang $^{2}$ Yujun Shen $^{4}$ Changxin Gao $^{1}$ Nong Sang $^{1\dagger}$ + +$^{1}$ Key Laboratory of Image Processing and Intelligent Control, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology $^{2}$ Alibaba Group $^{3}$ Zhejiang University $^{4}$ Ant Group + +{wxiang,qzw,cgao,nsang}@hust.edu.cn,{zhangjin.zsw,yingya.zyy}@alibaba-inc.com hj.yuan@zju.edu.cn,{a.biao.gong,shenyujun0302}@gmail.com + +"Close-up of Caucasian mother and baby girl sitting at windowsill and reading book. Young woman educating daughter at home." + +![](images/d328df4e5ed3fa12e9f8353718952f20550582afdba3ec6f9d53d0bf2d3ba22f.jpg) + +![](images/8afa70d7b19d287c2a4dda7cacecbeaaddc7cab81f17bad8a617543f4bddcc07.jpg) +Figure 1. Example video results generated by the proposed TF-T2V on text-to-video generation and compositional video synthesis tasks without training on any video-text pairs. + +# Abstract + +Diffusion-based text-to-video generation has witnessed impressive progress in the past year yet still falls behind text-to-image generation. One of the key reasons is the limited scale of publicly available data (e.g., 10M video-text pairs in WebVid10M vs. 5B image-text pairs in LAION), considering the high cost of video captioning. Instead, it could be far easier to collect unlabeled clips from video platforms like YouTube. Motivated by this, we come up + +with a novel text-to-video generation framework, termed $TF-T2V$ , which can directly learn with text-free videos. The rationale behind is to separate the process of text decoding from that of temporal modeling. To this end, we employ a content branch and a motion branch, which are jointly optimized with weights shared. Following such a pipeline, we study the effect of doubling the scale of training set (i.e., video-only WebVid10M) with some randomly collected text-free videos and are encouraged to observe the performance improvement (FID from 9.67 to 8.19 and FVD from 484 to 441), demonstrating the scalability of + +our approach. We also find that our model could enjoy sustainable performance gain (FID from 8.19 to 7.64 and FVD from 441 to 366) after reintroducing some text labels for training. Finally, we validate the effectiveness and generalizability of our ideology on both native text-to-video generation and compositional video synthesis paradigms. Code and models will be publicly available at here. + +# 1. Introduction + +Video generation aims to synthesize realistic videos that possess visually appealing spatial contents and temporally coherent motions. It has witnessed unprecedented progress in recent years with the advent of deep generative techniques [22, 53], especially with the emergence of video diffusion models [4, 34, 40, 54, 60, 67, 78]. Pioneering approaches [28, 33, 67] utilize pure image diffusion models or fine-tuning on a small amount of video-text data to synthesize videos, leading to temporally discontinuous results due to insufficient motion perception [39, 79]. To achieve plausible results, current text-to-video methods like VideoLDM [4] and ModelScopeT2V [54] usually insert temporal blocks into latent 2D-UNet [43] and train the model on expansive video-text datasets, e.g., WebVid10M [2]. To enable more controllable generation, VideoComposer [58] proposes a compositional paradigm that incorporates additional conditions (e.g., depth, sketch, motion vectors, etc.) to guide synthesis, allowing customizable creation. + +Despite this, the progress in text-to-video generation still falls behind text-to-image generation [42, 43]. One of the key reasons is the limited scale of publicly available videotext data, considering the high cost of video captioning [83]. Instead, it could be far easier to collect text-free video clips from media platforms like YouTube. There are some works sharing similar inspiration, Make-A-Video [50] and Gen-1 [12] employ a two-step strategy that first leverages a large ( $\sim$ 1B parameters) diffusion prior model [42] to convert text embedding into image embedding of CLIP [41] and then enters it into an image-conditioned generator to synthesize videos. However, the separate two-step manner may cause issues such as error accumulation [13], increased model size and latency [42, 69], and does not support text-conditional optimization if extra video-text data is available, leading to sub-optimal results. Moreover, the characteristics of scaling potential on video generation are still under-explored. + +In this work, we aim to train a single unified video diffusion model that allows text-guided video generation by exploiting the widely accessible text-free videos and explore its scaling trend. To achieve this, we present a novel two-branch framework named TF-T2V, where a content branch is designed for spatial appearance generation, and a motion branch specializes in temporal dynamics synthesis. More specifically, we utilize the publicly available image-text datasets such as LAION-5B [48] to learn text-guided + +and image-guided spatial appearance generation. In the motion branch, we harness the video-only data to conduct image-conditioned video synthesis, allowing the temporal modules to learn intricate motion patterns without relying on textual annotations. Paired video-text data, if available, can also be incorporated into co-optimization. Furthermore, unlike previous methods that impose training loss on each frame individually, we introduce a temporal coherence loss to explicitly enforce the learning of correlations between adjacent frames, enhancing the continuity of generated videos. In this way, the proposed TF-T2V achieves text-to-video generation by assembling contents and motions with a unified model, overcoming the high cost of video captioning and eliminating the need for complex cascading steps. + +Notably, TF-T2V is a plug-and-play paradigm, which can be integrated into existing text-to-video generation and compositional video synthesis frameworks as shown in Fig. 1. Different from most prior works that rely heavily on video-text data and train models on the widely-used watermarked and low-resolution (around 360P) WebVid10M [2], TF-T2V opens up new possibilities for optimizing with text-free videos or partially paired video-text data, making it more scalable and versatile in widespread scenarios, such as high-definition video generation. To study the scaling trend, we double the scale of the training set with some randomly collected text-free videos and are encouraged to observe the performance improvement, with FID from 9.67 to 8.19 and FVD from 484 to 441. Extensive quantitative and qualitative experiments collectively demonstrate the effectiveness and scaling potential of the proposed TF-T2V in terms of synthetic continuity, fidelity, and controllability. + +# 2. Related Work + +In this section, we provide a brief review of relevant literature on text-to-image generation, text-to-video generation, and compositional video synthesis. + +Text-to-image generation. Recently, text-to-image generation has made significant strides with the development of large-scale image-text datasets such as LAION-5B [48], allowing users to create high-resolution and photorealistic images that accurately depict the given natural language descriptions. Previous methods [16, 26, 49] primarily focus on synthesizing images by adopting generative adversarial networks (GANs) to estimate training sample distributions. Distinguished by the promising stability and scalability, diffusion-based generation models have attracted increasing attention [27, 42-45]. Diffusion models utilize iterative steps to gradually refine the generated image, resulting in improved quality and realism. Typically,Imagen [45] and GLIDE [38] explore text-conditional diffusion models and boost sample quality by applying classifier-free guidance [19]. DALL-E 2 [42] first leverages an image prior + +![](images/aada0f9a862f0c672dc173904d4a8c31e67f4cb754cc05b7c7ea0481fa8840f8.jpg) + +![](images/792581ee81c6e66e56eee6847243bfbfe763b19931c1d9603acb0bc60e620fc8.jpg) +Figure 2. Overall pipeline of TF-T2V, which consists of two branches. In the content branch, paired image-text data is leveraged to learn text-conditioned and image-conditioned spatial appearance generation. The motion branch supports the training of motion dynamic synthesis by feeding text-free videos (or partially paired video-text data if available). During the training stage, both branches are optimized jointly. Notably, TF-T2V can be seamlessly integrated into the compositional video synthesis framework by incorporating composable conditions. In inference, TF-T2V enables text-guided video generation by taking text prompts and random noise sequences as input. + +to bridge multi-modal embedding spaces and then learns a diffusion decoder to synthesize images in the pixel space. Stable Diffusion [43] introduces latent diffusion models that conduct iterative denoising processes at the latent level to save computational costs. There are also some works that generate customized and desirable images by incorporating additional spatial control signals [24, 36, 77]. + +Text-to-video generation. This task poses additional challenges compared to text-to-image generation due to the temporal dynamics involved in videos. Various early techniques have been proposed to tackle this problem, such as recurrent neural networks combined with GANs [3, 51, 53, 61, 64] or transformer-based autoregressive models [22, 73]. With the subsequent advent of video diffusion models pretrained on large-scale video-text datasets [2, 63, 71], video content creation has demonstrated remarkable advances [1, 4, 6-9, 14, 15, 17, 18, 21, 23, 28, 31-33, 35, 37, 39, 56, 57, 62, 65, 67, 69, 74-76]. Imagen Video [21] learns cascaded pixel-level diffusion models to produce high-resolution videos. Following [42], Make-A-Video [50] introduces a two-step strategy that first maps the input text to image embedding by a large ( $\sim$ 1B parameters) diffusion prior model and then embeds the resulting embedding into an image-conditional video diffusion model to synthesize videos in pixel space. VideoLDM [4] and ModelScopeT2V [54] extend 2D-UNet into 3D-UNet by injecting temporal layers and operate a latent denoising process to save computational resources. In this paper, we present a single unified framework for text-to-video generation and study the scaling trend by harnessing widely accessible text-free videos. + +Compositional video synthesis. Traditional text-to-video methods solely rely on textual descriptions to control the video generation process, limiting desired fine-grained cus- + +tomization such as texture, object position, motion patterns, etc. To tackle this constraint and pursue higher controllability, several controllable video synthesis methods [8, 9, 12, 29, 58, 68, 72, 79, 81] have been proposed. These methods utilize additional control signals, such as depth or sketch, to guide the generation of videos. By incorporating extra structured guidance, the generated content can be precisely controlled and customized. Among these approaches, VideoComposer [58] stands out as a pioneering and versatile compositional technique. It integrates multiple conditioning signals including textual, spatial and temporal conditions within a unified framework, offering enhanced controllability, compositionality, and realism in the generated videos. Despite the remarkable quality, these methods still rely on high-quality video-text data to unleash powerful and customizable synthesis. In contrast, our method can be directly merged into existing controllable frameworks to customize videos by exploiting text-free videos. + +# 3. Method + +We first provide a brief introduction to the preliminaries of the video diffusion model. Then, we will elaborate on the mechanisms of TF-T2V in detail. The overall framework of the proposed TF-T2V is displayed in Fig. 2. + +# 3.1. Preliminaries of video diffusion model + +Diffusion models involve a forward diffusion process and a reverse iterative denoising stage. The forward process of diffusion models is gradually imposing random noise to clean data $x_0$ in a Markovian chain: + +$$ +q \left(x _ {t} \mid x _ {t - 1}\right) = \mathcal {N} \left(x _ {t}; \sqrt {1 - \beta_ {t - 1}} x _ {t - 1}, \beta_ {t} I\right), t = 1, \dots , T \tag {1} +$$ + +where $\beta_{t} \in (0,1)$ is a noise schedule and $T$ is the total time step. When $T$ is sufficiently large, e.g. $T = 1000$ , the resulting $x_{T}$ is nearly a random Gaussian distribution $\mathcal{N}(0,I)$ . The role of diffusion model is to denoise $x_{T}$ and learn to iteratively estimate the reversed process: + +$$ +p _ {\theta} (x _ {t - 1} | x _ {t}) = \mathcal {N} (x _ {t - 1}; \mu_ {\theta} (x _ {t}, t), \sum_ {\theta} (x _ {t}, t)) \tag {2} +$$ + +We usually train a denoising model $\hat{x}_{\theta}$ parameterized by $\theta$ to approximate the original data $x_0$ and optimize the following v-prediction [21, 46] problem: + +$$ +\mathcal {L} _ {\text {b a s e}} = \mathbb {E} _ {\theta} [ \| v - \hat {x} _ {\theta} (x _ {t}, t, c) \| _ {2} ^ {2} ] \tag {3} +$$ + +where $c$ is conditional information such as textual prompt, and $v$ is the parameterized prediction objective. In representative video diffusion models [4, 54, 58], the denoising model $\hat{x}_{\theta}$ is a latent 3D-UNet [4, 54] modified from its 2D version [43] by inserting additional temporal blocks, which is optimized in the latent feature space by applying a variational autoencoder [11], and Eq. (3) is applied on each frame of the input video to train the whole model. + +# 3.2. TF-T2V + +The objective of TF-T2V is to learn a text-conditioned video diffusion model to create visually appealing and temporally coherent videos with text-free videos or partially paired video-text data. Without loss of generality, we first describe the workflow of our TF-T2V in the scenario where only text-free video is used. With merely text-free videos available for training, it is challenging to guide content creation by textual information since there lacks text-visual correspondence. To tackle this issue, we propose to resort to web-scale and high-quality image-text datasets [47, 48], which are publicly accessible on the Internet. However, this raises another question: how can we leverage the image-text data and text-free videos in a unified framework? + +Recalling the network architecture in 3D-UNet, the spatial modules mainly focus on appearance modeling, and the temporal modules primarily aim to operate motion coherence. The intuition is that we can utilize image-text data to learn text-conditioned spatial appearance generation and adopt high-quality text-free videos to guide consistent motion dynamic synthesis. In this way, we can perform text-to-video generation in a single model to synthesize high-quality and consistent videos during the inference stage. Based on this, the proposed TF-T2V consists of two branches: a content branch for spatial appearance generation and a motion branch for motion dynamic synthesis. + +# 3.2.1 Spatial appearance generation + +Like previous text-to-image works [43, 77], the content branch of TF-T2V takes a noised image $I_{image} \in H \times$ + +$W \times C$ as input, where $H$ , $W$ , $C$ are the height, width, and channel dimensions respectively, and employs conditional signals (i.e., text and image embeddings) to offer semantic guidance for content generation. This branch primarily concentrates on optimizing the spatial modules in the video diffusion model and plays a crucial role in determining appealing visual quality. In order to ensure that each condition can also control the created content separately, we randomly drop text or image embeddings with a certain probability during training. The text and image encoders from CLIP [41] are adopted to encode embeddings. + +# 3.2.2 Motion dynamic synthesis + +The pursuit of producing highly temporally consistent videos is a unique hallmark of video creation. Recent advancements [4, 54, 57, 58] in the realm of video synthesis usually utilize large-scale video-text datasets such as WebVid10M [2] to achieve coherent video generation. However, acquiring large-scale video-text pairs consumes extensive manpower and time, hindering the scaling up of video diffusion models. To make matters worse, the widely used WebVid10M is a watermarked and low-resolution (around 360P) dataset, resulting in unsatisfactory video creation that cannot meet the high-quality video synthesis requirements. + +To mitigate the above issues, we propose to leverage high-quality text-free videos that are easily accessible on video media platforms, e.g., YouTube and TikTok. To fully excavate the abundant motion dynamics within the text-free videos, we train a image-conditioned model. By optimizing this image-to-video generation task, the temporal modules in the video diffusion model can learn to perceive and model diverse motion dynamics. Specifically, given a noised video $I_{video} \in F \times H \times W \times C$ , where $F$ is the temporal length, the motion branch of TF-T2V learns to recover the undisturbed video guided by the image embedding. The image embedding is extracted from the center frame of the original video by applying CLIP's image encoder [41]. + +Since large-scale image-text data used for training contains abundant movement intentions [30], TF-T2V can achieve text-to-video generation by assembling spatial appearances involving motion trends and predicted motion dynamics. When extra paired video-text data is available, we conduct both text-to-video and image-to-video generation based on video-text pairs to train TF-T2V and further enhance the perception ability for desirable textual control. + +In addition, we notice that previous works apply the training loss (i.e., Eq. (3)) on each frame of the input video individually without considering temporal correlations between frames, suffering from incoherent appearances and motions. Inspired by the early study [25, 55, 59, 80] finding that the difference between two adjacent frames usually contains motion patterns, e.g., dynamic trajectory, we thus + +Table 1. Quantitative comparison with state-of-the-art methods for text-to-video task on MSR-VTT in terms of FID, FVD, and CLIPSIM. + +
MethodZero-shotParametersFID (↓)FVD (↓)CLIPSIM (↑)
Nüwa [66]No-47.68-0.2439
CogVideo (Chinese) [22]Yes15.5B24.78-0.2614
CogVideo (English) [22]Yes15.5B23.5912940.2631
MagicVideo [82]Yes--1290-
Make-A-Video [50]Yes9.7B13.17-0.3049
ModelScopeT2V [54]Yes1.7B11.095500.2930
VideoComposer [58]Yes1.9B10.775800.2932
Latent-Shift [1]Yes1.5B15.23-0.2773
VideoLDM [4]Yes4.2B--0.2929
PYoCo [14]Yes-9.73--
TF-T2V (WebVid10M)Yes1.8B9.674840.2953
TF-T2V (WebVid10M+Internal10M)Yes1.8B8.194410.2991
+ +propose a temporal coherence loss that utilizes the frame difference as an additional supervisory signal: + +$$ +\mathcal {L} _ {\text {c o h e r e n c e}} = \mathbb {E} _ {\theta} [ \sum_ {j = 1} ^ {F - 1} | | (v _ {j + 1} - v _ {j}) - (o _ {j + 1} - o _ {j}) | | _ {2} ^ {2} ] \tag {4} +$$ + +where $o_j$ and $v_j$ are the predicted frame and corresponding ground truth. This loss term measures the discrepancy between the predicted frame differences and the ground truth frame differences of the input parameterized video. By minimizing Eq. (4), TF-T2V helps to alleviate frame flickering and ensures that the generated videos exhibit seamless transitions and promising temporal dynamics. + +# 3.2.3 Training and inference + +In order to mine the complementary advantages of spatial appearance generation and motion dynamic synthesis, we jointly optimize the entire model in an end-to-end manner. The total loss can be formulated as: + +$$ +\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\text {b a s e}} + \lambda \mathcal {L} _ {\text {c o h e r e n c e}} \tag {5} +$$ + +where $\mathcal{L}_{base}$ is imposed on video and image together by treating the image as a "single frame" video, and $\lambda$ is a balance coefficient that is set empirically to 0.1. + +After training, we can perform text-guided video generation to synthesize temporally consistent video content that aligns well with the given text prompt. Moreover, TF-T2V is a general framework and can also be inserted into existing compositional video synthesis paradigm [58] by incorporating additional spatial and temporal structural conditions, allowing for customized video creation. + +# 4. Experiments + +In this section, we present a comprehensive quantitative and qualitative evaluation of the proposed TF-T2V on text-to-video generation and composition video synthesis. + +# 4.1. Experimental setup + +Implementation details. TF-T2V is built on two typical open-source baselines, i.e., ModelScopeT2V [54] and + +Table 2. Human preference results on text-to-video generation. + +
MethodText alignmentVisual qualityTemporal coherence
ModelScopeT2V [54]83.5%74.0%81.3%
TF-T2V86.5%87.0%92.5%
+ +VideoComposer [58]. DDPM sampler [20] with $T = 1000$ steps is adopted for training, and we employ DDIM [52] with 50 steps for inference. We optimize TF-T2V using AdamW optimizer with a learning rate of 5e-5. For input videos, we sample 16 frames from each video at 4FPS and crop a $448 \times 256$ region at the center as the basic setting. Note that we can also easily train high-definition video diffusion models by collecting high-quality text-free videos (see examples in the Appendix). LAION-5B [48] is utilized to provide image-text pairs. Unless otherwise stated, we treat WebVid10M, which includes about 10.7M video-text pairs, as a text-free dataset to train TF-T2V and do not use any textual annotations. To study scaling trends, we gathered about 10M high-quality videos without text labels from internal data, termed the Internal10M dataset. + +Metrics. (i) To evaluate text-to-video generation, following previous works [4, 54], we leverage the standard Fréchet Inception Distance (FID), Fréchet Video Distance (FVD), and CLIP Similarity (CLIPSIM) as quantitative evaluation metrics and report results on MSR-VTT dataset [70]. (ii) For controllability evaluation, we leverage depth error, sketch error, and end-point-error (EPE) [10] to verify whether the generated videos obey the control of input conditions. Depth error measures the divergence between the input depth conditions and the eliminated depth of the synthesized video. Similarly, sketch error examines the sketch control. EPE evaluates the flow consistency between the reference video and the generated video. In addition, human evaluation is also introduced to validate our method. + +# 4.2. Evaluation on text-to-video generation + +Tab. 1 displays the comparative quantitative results with existing state-of-the-art methods. We observe that TF-T2V achieves remarkable performance under various metrics. + +![](images/fb7d52d8fa8ea8a6059e66f7a6648f27fbc0ee0ab5ebdb07e5f5a7f8aec808d0.jpg) + +![](images/080155e6c674a561b7bf2c8f0acb899b74590c7a7aadd92bc52dac9f0878e3c4.jpg) +"Portrait of smiling young woman outdoors" + +![](images/07158c131aa37a10d9155b3328822e13ac461918504a4a1bd8e7e17bcf879c95.jpg) +Figure 3. Qualitative comparison on text-to-video generation. Three representative open-source text-to-video approaches are compared, including ModelScopeT2V [54], Text2video-Zero [28] and ZeroScope [5]. Please refer to the Appendix for videos and more comparisons. + +![](images/518f579bc505d8932a972d7ec19ea75385874abf3e5d255f6e01b7dfc5eb9a9b.jpg) + +Table 3. Evaluation of structure control based on depth signals. + +
MethodConditionDepth error (↓)
VideoComposer [58]Text0.382
VideoComposer [58]Text and depth0.217
TF-T2VText and depth0.209
+ +Table 4. Evaluation of structure control based on sketch signals. + +
MethodConditionSketch error (↓)
VideoComposer [58]Text0.1854
VideoComposer [58]Text and sketch0.1161
TF-T2VText and sketch0.1146
+ +Notably, TF-T2V trained on WebVid10M and Internal10M obtains higher performance than the counterpart on WebVid10M, revealing promising scalable capability. We show the qualitative visualizations in Fig. 3. From the results, we can find that compared with previous methods, TF-T2V obtains impressive video creation in terms of both temporal continuity and visual quality. The human assessment in Tab. 2 also reveals the above observations. The user study is performed on 100 randomly synthesized videos. + +# 4.3. Evaluation on compositional video synthesis + +We compare the controllability of TF-T2V and VideoComposer on 1,000 generated videos in terms of depth control (Tab. 3), sketch control (Tab. 4) and motion control + +Table 5. Evaluation of motion control based on motion vectors. + +
MethodConditionEPE (↓)
VideoComposer [58]Text4.13
VideoComposer [58]Text and motion vector1.98
TF-T2VText and motion vector1.88
+ +Table 6. Human evaluations on compositional video synthesis. + +
MethodStructure alignmentVisual qualityTemporal coherence
VideoComposer [58]79.0%66.0%77.5%
TF-T2V89.0%79.5%84.5%
+ +(Tab. 5). The above experimental evaluations highlight the effectiveness of TF-T2V by leveraging text-free videos. In Fig. 4 and 5, we show the comparison of TF-T2V and existing methods on compositional video generation. We notice that TF-T2V exhibits high-fidelity and consistent video generation. In addition, we conduct a human evaluation on 100 randomly sampled videos and report the results in Tab. 6. The preference assessment provides further evidence of the superiority of the proposed TF-T2V. + +# 4.4. Ablation study + +Effect of temporal coherence loss. To enhance temporal consistency, we propose a temporal coherence loss. In Tab. 7, we show the effectiveness of the proposed tem + +![](images/1dbd35f68d3c338efe18cc55eaf235a11a30806683003399d7976da847c27853.jpg) +Figure 4. Qualitative comparison on compositional depth-to-video generation. The videos are generated by taking textual prompts and structural guidance as conditions. Compared with existing methods, TF-T2V yields more structural compliance and high-fidelity results. + +![](images/3735fc2eb1cfb67dfa052700068ce00d8c9aebbfb953465cbb121db0d998a6b0.jpg) + +![](images/a1b38b182a3df35b98ff798c2d3c90b275be717432fd4f6a344f7da990385241.jpg) +Figure 5. Qualitative comparison on compositional sketch-to-video generation. The videos are generated by taking textual descriptions and structural guidance as conditions. Compared with other methods, TF-T2V produces more realistic and consistent results. + +![](images/a1a8b0c884ae1c5e9e28e865d0458d74b75583ff0d6c740b87b09fceff256270.jpg) + +poral coherence loss in terms of frame consistency. The metric results are obtained by calculating the average CLIP similarity of two consecutive frames in 1,000 videos. We further display the qualitative comparative results in Fig. 6 and observe that temporal coherence loss helps to alleviate temporal discontinuity such as color shift. + +# 4.5. Evaluation on semi-supervised setting + +Through the above experiments and observations, we verify that text-free video can help improve the continuity + +Table 7. Text-to-video evaluation on frame consistency. + +
MethodFrame consistency (%) ↑
w/o temporal coherence loss89.71
TF-T2V91.06
+ +and quality of generated video. As previously stated, TF-T2V also supports the combination of annotated videotext data and text-free videos to train the model, i.e., the semi-supervised manner. The annotated text can provide additional fine-grained motion signals, enhancing the align- + +![](images/9cac116d8a05d0dde3036325f45639c95e1ca48c34eadd6aca83ace489bf563d.jpg) +Figure 6. Qualitative ablation study. The videos are generated by taking textual descriptions and structural guidance as conditions. + +![](images/bb1c6fc511293c8ad61048f34b5192974e2968de7b14734700b312e9963bd4bb.jpg) + +![](images/e4eec659f65d2e744dda08a136d5a7f8dea9a5107245524e889a01243b104334.jpg) +Figure 7. Qualitative evaluation on text-to-video generation with temporally-correlated text prompts involving the evolution of movement. + +Table 8. Quantitative experiments on text-to-video generation. TF-T2V-Semi means the semi-supervised setting where labeled WebVid10M and text-free Internal10M are adopted. + +
MethodFID (↓)FVD (↓)CLIPSIM (↑)
ModelScopeT2V [54]11.095500.2930
TF-T2V8.194410.2991
TF-T2V-Semi7.643660.3032
+ +ment of generated videos and the provided prompts involving desired motion evolution. We show the comparison results in Tab. 8 and find that the semi-supervised manner reaches the best performance, indicating the effectiveness of harnessing text-free videos. Notably, TF-T2V-Semi outperforms ModelScopeT2V trained on labeled WebVid10M, possessing good scalability. Moreover, the qualitative evaluations in Fig. 7 show that existing methods may struggle to synthesize text-aligned consistent videos when textual prompts involve desired temporal evolution. In contrast, + +TF-T2V in the semi-supervised setting exhibits excellent text-video alignment and temporally smooth generation. + +# 5. Conclusion + +In this paper, we present a novel and versatile video generation framework named TF-T2V to exploit text-free videos and explore its scaling trend. TF-T2V effectively decomposes video generation into spatial appearance generation and motion dynamic synthesis. A temporal coherence loss is introduced to explicitly constrain the learning of correlations between adjacent frames. Experimental results demonstrate the effectiveness and potential of TF-T2V in terms of fidelity, controllability, and scalability. + +Acknowledgements. This work is supported by the National Natural Science Foundation of China under grant U22B2053 and Alibaba Research Intern Program. + +# References + +[1] Jie An, Songyang Zhang, Harry Yang, Sonal Gupta, Jia-Bin Huang, Jiebo Luo, and Xi Yin. Latent-shift: Latent diffusion with temporal shift for efficient text-to-video generation. arXiv preprint arXiv:2304.08477, 2023. 3, 5 +[2] Max Bain, Arsha Nagrani, Gül Varol, and Andrew Zisserman. Frozen in time: A joint video and image encoder for end-to-end retrieval. In ICCV, pages 1728-1738, 2021. 2, 3, 4 +[3] Yogesh Balaji, Martin Renqiang Min, Bing Bai, Rama Chellappa, and Hans Peter Graf. Conditional GAN with discriminative filter generation for text-to-video synthesis. In IJCAI, page 2, 2019. 3 +[4] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In CVPR, pages 22563-22575, 2023. 2, 3, 4, 5 +[5] Cerspense. Zeroscope: Diffusion-based text-to-video synthesis. https://huggingface.co/cerspense/zeroscope_v2_576w, 2023.6 +[6] Duygu Ceylan, Chun-Hao P Huang, and Niloy J Mitra. Pix2video: Video editing using image diffusion. In ICCV, pages 23206-23217, 2023. 3 +[7] Wenhao Chai, Xun Guo, Gaoang Wang, and Yan Lu. Stablevideo: Text-driven consistency-aware diffusion video editing. In ICCV, pages 23040-23050, 2023. +[8] Tsai-Shien Chen, Chieh Hubert Lin, Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang. Motion-conditioned diffusion model for controllable video synthesis. arXiv preprint arXiv:2304.14404, 2023. 3 +[9] Weifeng Chen, Jie Wu, Pan Xie, Hefeng Wu, Jiashi Li, Xin Xia, Xuefeng Xiao, and Liang Lin. Control-a-video: Controllable text-to-video generation with diffusion models. arXiv preprint arXiv:2305.13840, 2023. 3 +[10] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. Flownet: Learning optical flow with convolutional networks. In ICCV, pages 2758-2766, 2015. 5 +[11] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming Transformers for high-resolution image synthesis. In CVPR, pages 12873-12883, 2021. 4 +[12] Patrick Esser, Johnathan Chiu, Parmida Atighechian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In ICCV, pages 7346-7356, 2023. 2, 3 +[13] Rafail Fridman, Amit Abecasis, Yoni Kasten, and Tali Dekel. Scenescape: Text-driven consistent scene generation. arXiv preprint arXiv:2302.01133, 2023. 2 +[14] Songwei Ge, Seungjun Nah, Guilin Liu, Tyler Poon, Andrew Tao, Bryan Catanzaro, David Jacobs, Jia-Bin Huang, Ming-Yu Liu, and Yogesh Balaji. Preserve your own correlation: A noise prior for video diffusion models. In ICCV, pages 22930-22941, 2023. 3, 5 +[15] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. + +Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 3 +[16] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. NeurIPS, 27, 2014. 2 +[17] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 3 +[18] Yingqing He, Tianyu Yang, Yong Zhang, Ying Shan, and Qifeng Chen. Latent video diffusion models for high-fidelity video generation with arbitrary lengths. arXiv preprint arXiv:2211.13221, 2022. 3 +[19] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 2 +[20] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. NeurIPS, 33:6840-6851, 2020. 5 +[21] Jonathan Ho, William Chan, Chitwan Sahara, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 3, 4 +[22] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via Transformers. In ICLR, 2023. 2, 3, 5 +[23] Hanzhuo Huang, Yufan Feng, Cheng Shi, Lan Xu, Jingyi Yu, and Sibei Yang. Free-bloom: Zero-shot text-to-video generator with llm director and ldm animator. arXiv preprint arXiv:2309.14494, 2023. 3 +[24] Lianghua Huang, Di Chen, Yu Liu, Yujun Shen, Deli Zhao, and Jingren Zhou.Composer: Creative and controllable image synthesis with composable conditions.ICML, 2023. 3 +[25] Hueihan Jhuang, Juergen Gall, Silvia Zuffi, Cordelia Schmid, and Michael J Black. Towards understanding action recognition. In ICCV, pages 3192-3199, 2013. 4 +[26] Minguk Kang, Jun-Yan Zhu, Richard Zhang, Jaesik Park, Eli Shechtman, Sylvain Paris, and Taesung Park. Scaling up GANs for text-to-image synthesis. In CVPR, pages 10124-10134, 2023. 2 +[27] Bahjat Kawar, Shiran Zada, Oran Lang, Omer Tov, Huiwen Chang, Tali Dekel, Inbar Mosseri, and Michal Irani. Imagic: Text-based real image editing with diffusion models. In CVPR, pages 6007-6017, 2023. 2 +[28] Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439, 2023. 2, 3, 6 +[29] Ariel Lapid, Idan Achituve, Lior Bracha, and Ethan Fetaya. Gd-vdm: Generated depth for better diffusion-based video generation. arXiv preprint arXiv:2306.11173, 2023. 3 +[30] Jiangtong Li, Li Niu, and Liqing Zhang. Action-aware embedding enhancement for image-text retrieval. In AAAI, pages 1323-1331, 2022. 4 + +[31] Shaoteng Liu, Yuechen Zhang, Wenbo Li, Zhe Lin, and Jiaya Jia. Video-p2p: Video editing with cross-attention control. arXiv preprint arXiv:2303.04761, 2023. 3 +[32] Haoyu Lu, Guoxing Yang, Nanyi Fei, Yuqi Huo, Zhiwu Lu, Ping Luo, and Mingyu Ding. Vdt: An empirical study on video diffusion with Transformers. arXiv preprint arXiv:2305.13311, 2023. +[33] Zhengxiong Luo, Dayou Chen, Yingya Zhang, Yan Huang, Liang Wang, Yujun Shen, Deli Zhao, Jingren Zhou, and Tieniu Tan. Videofusion: Decomposed diffusion models for high-quality video generation. In CVPR, pages 10209-10218, 2023. 2, 3 +[34] Yifeng Ma, Shiwei Zhang, Jiayu Wang, Xiang Wang, Yingya Zhang, and Zhidong Deng. Dreamtalk: When expressive talking head generation meets diffusion probabilistic models. arXiv preprint arXiv:2312.09767, 2023. 2 +[35] Eyal Molad, Eliahu Horwitz, Dani Valevski, Alex Rav Acha, Yossi Matias, Yael Pritch, Yaniv Leviathan, and Yedid Hoshen. Dreamix: Video diffusion models are general video editors. arXiv preprint arXiv:2302.01329, 2023. 3 +[36] Chong Mou, Xintao Wang, Liangbin Xie, Jian Zhang, Zhonggang Qi, Ying Shan, and Xiaohu Qie. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453, 2023. 3 +[37] Haomiao Ni, Changhao Shi, Kai Li, Sharon X Huang, and Martin Renqiang Min. Conditional image-to-video generation with latent flow diffusion models. In CVPR, pages 18444-18455, 2023. 3 +[38] Alexander Quinn Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. In ICML, pages 16784-16804. PMLR, 2022. 2 +[39] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing attentions for zero-shot text-based video editing. In ICCV, 2023. 2, 3 +[40] Zhiwu Qing, Shiwei Zhang, Jiayu Wang, Xiang Wang, Yujie Wei, Yingya Zhang, Changxin Gao, and Nong Sang. Hierarchical spatio-temporal decoupling for text-to-video generation. arXiv preprint arXiv:2312.04483, 2023. 2 +[41] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763. PMLR, 2021. 2, 4 +[42] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 2, 3 +[43] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, pages 10684-10695, 2022. 2, 3, 4 +[44] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, pages 22500-22510, 2023. + +[45] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. NeurIPS, 35:36479-36494, 2022. 2 +[46] Tim Salimans and Jonathan Ho. Progressive distillation for fast sampling of diffusion models. arXiv preprint arXiv:2202.00512, 2022. 4 +[47] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114, 2021. 4 +[48] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. NeurIPS, 35:25278-25294, 2022. 2, 4, 5 +[49] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in GANs. In CVPR, pages 1532-1540, 2021. 2 +[50] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. ICLR, 2023. 2, 3, 5 +[51] Ivan Skorokhodov, Sergey Tulyakov, and Mohamed Elhoseiny. StyleGAN-v: A continuous video generator with the price, image quality and perks of StyleGAN2. In CVPR, pages 3626-3636, 2022. 3 +[52] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In ICLR, 2021. 5 +[53] Sergey Tulyakov, Ming-Yu Liu, Xiaodong Yang, and Jan Kautz. MocoGAN: Decomposing motion and content for video generation. In CVPR, pages 1526-1535, 2018. 2, 3 +[54] Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023. 2, 3, 4, 5, 6, 8 +[55] Limin Wang, Zhan Tong, Bin Ji, and Gangshan Wu. Tdn: Temporal difference networks for efficient action recognition. In CVPR, pages 1895-1904, 2021. 4 +[56] Wen Wang, Kangyang Xie, Zide Liu, Hao Chen, Yue Cao, Xinlong Wang, and Chunhua Shen. Zero-shot video editing using off-the-shelf image diffusion models. arXiv preprint arXiv:2303.17599, 2023. 3 +[57] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 3, 4 +[58] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. NeurIPS, 2023. 2, 3, 4, 5, 6 +[59] Xiang Wang, Shiwei Zhang, Zhiwu Qing, Changxin Gao, Yingya Zhang, Deli Zhao, and Nong Sang. Molo: Motion- + +augmented long-short contrastive learning for few-shot action recognition. In CVPR, pages 18011-18021, 2023. 4 +[60] Xiang Wang, Shiwei Zhang, Han Zhang, Yu Liu, Yingya Zhang, Changxin Gao, and Nong Sang. Videolcm: Video latent consistency model. arXiv preprint arXiv:2312.09109, 2023. 2 +[61] Yaohui Wang, Piotr Bilinski, Francois Bremond, and Antitza Dantcheva. G3an: Disentangling appearance and motion for video generation. In CVPR, pages 5264-5273, 2020. 3 +[62] Yaohui Wang, Xinyuan Chen, Xin Ma, Shangchen Zhou, Ziqi Huang, Yi Wang, Ceyuan Yang, Yinan He, Jiashuo Yu, Peiqing Yang, et al. Lavie: High-quality video generation with cascaded latent diffusion models. arXiv preprint arXiv:2309.15103, 2023. 3 +[63] Yi Wang, Yinan He, Yizhuo Li, Kunchang Li, Jiashuo Yu, Xin Ma, Xinyuan Chen, Yaohui Wang, Ping Luo, Ziwei Liu, et al. Intervid: A large-scale video-text dataset for multimodal understanding and generation. arXiv preprint arXiv:2307.06942, 2023. 3 +[64] Yuhan Wang, Liming Jiang, and Chen Change Loy. Styleinv: A temporal style modulated inversion network for unconditional video generation. In ICCV, pages 22851-22861, 2023. 3 +[65] Yujie Wei, Shiwei Zhang, Zhiwu Qing, Hangjie Yuan, Zhiheng Liu, Yu Liu, Yingya Zhang, Jingren Zhou, and Hongming Shan. Dreamvideo: Composing your dream videos with customized subject and motion. arXiv preprint arXiv:2312.04433, 2023. 3 +[66] Chenfei Wu, Jian Liang, Lei Ji, Fan Yang, Yuejian Fang, Daxin Jiang, and Nan Duan. Nüwa: Visual synthesis pretraining for neural visual world creation. In ECCV, pages 720-736. Springer, 2022. 5 +[67] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation. In ICCV, pages 7623-7633, 2023. 2, 3 +[68] Jinbo Xing, Menghan Xia, Yuxin Liu, Yuechen Zhang, Yong Zhang, Yingqing He, Hanyuan Liu, Haoxin Chen, Xiaodong Cun, Xintao Wang, et al. Make-your-video: Customized video generation using textual and structural guidance. arXiv preprint arXiv:2306.00943, 2023. 3 +[69] Zhen Xing, Qi Dai, Han Hu, Zuxuan Wu, and Yu-Gang Jiang. Simda: Simple diffusion adapter for efficient video generation. arXiv preprint arXiv:2308.09710, 2023. 2, 3 +[70] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In CVPR, pages 5288-5296, 2016. 5 +[71] Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In CVPR, 2022. 3 +[72] Shengming Yin, Chenfei Wu, Jian Liang, Jie Shi, Houqiang Li, Gong Ming, and Nan Duan. Dragnuwa: Fine-grained control in video generation by integrating text, image, and trajectory. arXiv preprint arXiv:2308.08089, 2023. 3 + +[73] Lijun Yu, Yong Cheng, Kihyuk Sohn, José Lezama, Han Zhang, Huiwen Chang, Alexander G Hauptmann, Ming-Hsuan Yang, Yuan Hao, Irfan Essa, et al. Magvit: Masked generative video Transformer. In CVPR, pages 10459-10469, 2023. 3 +[74] Sihyun Yu, Kihyuk Sohn, Subin Kim, and Jinwoo Shin. Video probabilistic diffusion models in projected latent space. In CVPR, pages 18456-18466, 2023. 3 +[75] Hangjie Yuan, Shiwei Zhang, Xiang Wang, Yujie Wei, Tao Feng, Yining Pan, Yingya Zhang, Ziwei Liu, Samuel Albanie, and Dong Ni. Instructvideo: Instructing video diffusion models with human feedback. arXiv preprint arXiv:2312.12490, 2023. +[76] David Junhao Zhang, Jay Zhangjie Wu, Jia-Wei Liu, Rui Zhao, Lingmin Ran, Yuchao Gu, Difei Gao, and Mike Zheng Shou. Show-1: Marrying pixel and latent diffusion models for text-to-video generation. arXiv preprint arXiv:2309.15818, 2023. 3 +[77] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In ICCV, pages 3836-3847, 2023. 3, 4 +[78] Shiwei Zhang, Jiayu Wang, Yingya Zhang, Kang Zhao, Hangjie Yuan, Zhiwu Qin, Xiang Wang, Deli Zhao, and Jingren Zhou. I2vgen-xl: High-quality image-to-video synthesis via cascaded diffusion models. arXiv preprint arXiv:2311.04145, 2023. 2 +[79] Yabo Zhang, Yuxiang Wei, Dongsheng Jiang, Xiaopeng Zhang, Wangmeng Zuo, and Qi Tian. Controlvideo: Training-free controllable text-to-video generation. arXiv preprint arXiv:2305.13077, 2023. 2, 3 +[80] Zhang Zhang and Dacheng Tao. Slow feature analysis for human action recognition. TPAMI, 34(3):436-450, 2012. 4 +[81] Min Zhao, Rongzhen Wang, Fan Bao, Chongxuan Li, and Jun Zhu. Controlvideo: Adding conditional control for one shot text-to-video editing. arXiv preprint arXiv:2305.17098, 2023. 3 +[82] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 5 +[83] Junbao Zhuo, Xingyu Zhao, Shuhui Wang, Huimin Ma, and Qingming Huang. Synthesizing videos from images for image-to-video adaptation. In ACMMM, pages 8294-8303, 2023. 2 \ No newline at end of file diff --git a/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/images.zip b/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..4154e5c02dcc79d9686ff09d4883530dec610633 --- /dev/null +++ b/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da3cb6b3a6bab89e8a3190206dc6c732df60ae86178122b3fc565b0475d9ca74 +size 1287367 diff --git a/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/layout.json b/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..71369a46573d26af74b77e776173ae5875c30715 --- /dev/null +++ b/2024/A Recipe for Scaling up Text-to-Video Generation with Text-free Videos/layout.json @@ -0,0 +1,8415 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 75, + 103, + 518, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 103, + 518, + 121 + ], + "spans": [ + { + "bbox": [ + 75, + 103, + 518, + 121 + ], + "type": "text", + "content": "A Recipe for Scaling up Text-to-Video Generation with Text-free Videos" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "spans": [ + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "text", + "content": "Xiang Wang" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "text", + "content": " Shiwei Zhang" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{2\\dagger}" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "text", + "content": " Hangjie Yuan" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "text", + "content": " Zhiwu Qing" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "text", + "content": " Biao Gong" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "text", + "content": " Yingya Zhang" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "text", + "content": " Yujun Shen" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "text", + "content": " Changxin Gao" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "text", + "content": " Nong Sang" + }, + { + "bbox": [ + 73, + 141, + 516, + 173 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 59, + 176, + 534, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 176, + 534, + 221 + ], + "spans": [ + { + "bbox": [ + 59, + 176, + 534, + 221 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 59, + 176, + 534, + 221 + ], + "type": "text", + "content": "Key Laboratory of Image Processing and Intelligent Control, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology " + }, + { + "bbox": [ + 59, + 176, + 534, + 221 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 59, + 176, + 534, + 221 + ], + "type": "text", + "content": "Alibaba Group " + }, + { + "bbox": [ + 59, + 176, + 534, + 221 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 59, + 176, + 534, + 221 + ], + "type": "text", + "content": "Zhejiang University " + }, + { + "bbox": [ + 59, + 176, + 534, + 221 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 59, + 176, + 534, + 221 + ], + "type": "text", + "content": "Ant Group" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 135, + 220, + 462, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 220, + 462, + 239 + ], + "spans": [ + { + "bbox": [ + 135, + 220, + 462, + 239 + ], + "type": "text", + "content": "{wxiang,qzw,cgao,nsang}@hust.edu.cn,{zhangjin.zsw,yingya.zyy}@alibaba-inc.com hj.yuan@zju.edu.cn,{a.biao.gong,shenyujun0302}@gmail.com" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 136, + 242, + 504, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 242, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 136, + 242, + 504, + 251 + ], + "type": "text", + "content": "\"Close-up of Caucasian mother and baby girl sitting at windowsill and reading book. Young woman educating daughter at home.\"" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 61, + 251, + 531, + 355 + ], + "blocks": [ + { + "bbox": [ + 61, + 251, + 531, + 355 + ], + "lines": [ + { + "bbox": [ + 61, + 251, + 531, + 355 + ], + "spans": [ + { + "bbox": [ + 61, + 251, + 531, + 355 + ], + "type": "image", + "image_path": "d328df4e5ed3fa12e9f8353718952f20550582afdba3ec6f9d53d0bf2d3ba22f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 61, + 356, + 531, + 537 + ], + "blocks": [ + { + "bbox": [ + 61, + 356, + 531, + 537 + ], + "lines": [ + { + "bbox": [ + 61, + 356, + 531, + 537 + ], + "spans": [ + { + "bbox": [ + 61, + 356, + 531, + 537 + ], + "type": "image", + "image_path": "8afa70d7b19d287c2a4dda7cacecbeaaddc7cab81f17bad8a617543f4bddcc07.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 537, + 546, + 559 + ], + "lines": [ + { + "bbox": [ + 46, + 537, + 546, + 559 + ], + "spans": [ + { + "bbox": [ + 46, + 537, + 546, + 559 + ], + "type": "text", + "content": "Figure 1. Example video results generated by the proposed TF-T2V on text-to-video generation and compositional video synthesis tasks without training on any video-text pairs." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 143, + 574, + 192, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 574, + 192, + 586 + ], + "spans": [ + { + "bbox": [ + 143, + 574, + 192, + 586 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 599, + 288, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 599, + 288, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 599, + 288, + 696 + ], + "type": "text", + "content": "Diffusion-based text-to-video generation has witnessed impressive progress in the past year yet still falls behind text-to-image generation. One of the key reasons is the limited scale of publicly available data (e.g., 10M video-text pairs in WebVid10M vs. 5B image-text pairs in LAION), considering the high cost of video captioning. Instead, it could be far easier to collect unlabeled clips from video platforms like YouTube. Motivated by this, we come up" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 574, + 547, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 574, + 547, + 707 + ], + "spans": [ + { + "bbox": [ + 306, + 574, + 547, + 707 + ], + "type": "text", + "content": "with a novel text-to-video generation framework, termed " + }, + { + "bbox": [ + 306, + 574, + 547, + 707 + ], + "type": "inline_equation", + "content": "TF-T2V" + }, + { + "bbox": [ + 306, + 574, + 547, + 707 + ], + "type": "text", + "content": ", which can directly learn with text-free videos. The rationale behind is to separate the process of text decoding from that of temporal modeling. To this end, we employ a content branch and a motion branch, which are jointly optimized with weights shared. Following such a pipeline, we study the effect of doubling the scale of training set (i.e., video-only WebVid10M) with some randomly collected text-free videos and are encouraged to observe the performance improvement (FID from 9.67 to 8.19 and FVD from 484 to 441), demonstrating the scalability of" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 16 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 16 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 16 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 198, + 14, + 442, + 24 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 14, + 442, + 24 + ], + "spans": [ + { + "bbox": [ + 198, + 14, + 442, + 24 + ], + "type": "text", + "content": "Except for this watermark, it is identical to the accepted version;" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 179, + 25, + 462, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 25, + 462, + 37 + ], + "spans": [ + { + "bbox": [ + 179, + 25, + 462, + 37 + ], + "type": "text", + "content": "the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 703, + 237, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 703, + 237, + 713 + ], + "spans": [ + { + "bbox": [ + 61, + 703, + 237, + 713 + ], + "type": "text", + "content": "* Intern at Alibaba Group. † Corresponding authors." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6572" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 72, + 288, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 72, + 288, + 156 + ], + "spans": [ + { + "bbox": [ + 45, + 72, + 288, + 156 + ], + "type": "text", + "content": "our approach. We also find that our model could enjoy sustainable performance gain (FID from 8.19 to 7.64 and FVD from 441 to 366) after reintroducing some text labels for training. Finally, we validate the effectiveness and generalizability of our ideology on both native text-to-video generation and compositional video synthesis paradigms. Code and models will be publicly available at here." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 167, + 128, + 179 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 167, + 128, + 179 + ], + "spans": [ + { + "bbox": [ + 47, + 167, + 128, + 179 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 186, + 287, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 186, + 287, + 402 + ], + "spans": [ + { + "bbox": [ + 45, + 186, + 287, + 402 + ], + "type": "text", + "content": "Video generation aims to synthesize realistic videos that possess visually appealing spatial contents and temporally coherent motions. It has witnessed unprecedented progress in recent years with the advent of deep generative techniques [22, 53], especially with the emergence of video diffusion models [4, 34, 40, 54, 60, 67, 78]. Pioneering approaches [28, 33, 67] utilize pure image diffusion models or fine-tuning on a small amount of video-text data to synthesize videos, leading to temporally discontinuous results due to insufficient motion perception [39, 79]. To achieve plausible results, current text-to-video methods like VideoLDM [4] and ModelScopeT2V [54] usually insert temporal blocks into latent 2D-UNet [43] and train the model on expansive video-text datasets, e.g., WebVid10M [2]. To enable more controllable generation, VideoComposer [58] proposes a compositional paradigm that incorporates additional conditions (e.g., depth, sketch, motion vectors, etc.) to guide synthesis, allowing customizable creation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 403, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 403, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 45, + 403, + 287, + 605 + ], + "type": "text", + "content": "Despite this, the progress in text-to-video generation still falls behind text-to-image generation [42, 43]. One of the key reasons is the limited scale of publicly available videotext data, considering the high cost of video captioning [83]. Instead, it could be far easier to collect text-free video clips from media platforms like YouTube. There are some works sharing similar inspiration, Make-A-Video [50] and Gen-1 [12] employ a two-step strategy that first leverages a large (" + }, + { + "bbox": [ + 45, + 403, + 287, + 605 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 45, + 403, + 287, + 605 + ], + "type": "text", + "content": "1B parameters) diffusion prior model [42] to convert text embedding into image embedding of CLIP [41] and then enters it into an image-conditioned generator to synthesize videos. However, the separate two-step manner may cause issues such as error accumulation [13], increased model size and latency [42, 69], and does not support text-conditional optimization if extra video-text data is available, leading to sub-optimal results. Moreover, the characteristics of scaling potential on video generation are still under-explored." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 606, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 606, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 45, + 606, + 287, + 714 + ], + "type": "text", + "content": "In this work, we aim to train a single unified video diffusion model that allows text-guided video generation by exploiting the widely accessible text-free videos and explore its scaling trend. To achieve this, we present a novel two-branch framework named TF-T2V, where a content branch is designed for spatial appearance generation, and a motion branch specializes in temporal dynamics synthesis. More specifically, we utilize the publicly available image-text datasets such as LAION-5B [48] to learn text-guided" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "content": "and image-guided spatial appearance generation. In the motion branch, we harness the video-only data to conduct image-conditioned video synthesis, allowing the temporal modules to learn intricate motion patterns without relying on textual annotations. Paired video-text data, if available, can also be incorporated into co-optimization. Furthermore, unlike previous methods that impose training loss on each frame individually, we introduce a temporal coherence loss to explicitly enforce the learning of correlations between adjacent frames, enhancing the continuity of generated videos. In this way, the proposed TF-T2V achieves text-to-video generation by assembling contents and motions with a unified model, overcoming the high cost of video captioning and eliminating the need for complex cascading steps." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 241, + 546, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 241, + 546, + 445 + ], + "spans": [ + { + "bbox": [ + 304, + 241, + 546, + 445 + ], + "type": "text", + "content": "Notably, TF-T2V is a plug-and-play paradigm, which can be integrated into existing text-to-video generation and compositional video synthesis frameworks as shown in Fig. 1. Different from most prior works that rely heavily on video-text data and train models on the widely-used watermarked and low-resolution (around 360P) WebVid10M [2], TF-T2V opens up new possibilities for optimizing with text-free videos or partially paired video-text data, making it more scalable and versatile in widespread scenarios, such as high-definition video generation. To study the scaling trend, we double the scale of the training set with some randomly collected text-free videos and are encouraged to observe the performance improvement, with FID from 9.67 to 8.19 and FVD from 484 to 441. Extensive quantitative and qualitative experiments collectively demonstrate the effectiveness and scaling potential of the proposed TF-T2V in terms of synthetic continuity, fidelity, and controllability." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 460, + 392, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 460, + 392, + 472 + ], + "spans": [ + { + "bbox": [ + 306, + 460, + 392, + 472 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 482, + 545, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 482, + 545, + 518 + ], + "spans": [ + { + "bbox": [ + 304, + 482, + 545, + 518 + ], + "type": "text", + "content": "In this section, we provide a brief review of relevant literature on text-to-image generation, text-to-video generation, and compositional video synthesis." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 522, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 546, + 714 + ], + "type": "text", + "content": "Text-to-image generation. Recently, text-to-image generation has made significant strides with the development of large-scale image-text datasets such as LAION-5B [48], allowing users to create high-resolution and photorealistic images that accurately depict the given natural language descriptions. Previous methods [16, 26, 49] primarily focus on synthesizing images by adopting generative adversarial networks (GANs) to estimate training sample distributions. Distinguished by the promising stability and scalability, diffusion-based generation models have attracted increasing attention [27, 42-45]. Diffusion models utilize iterative steps to gradually refine the generated image, resulting in improved quality and realism. Typically,Imagen [45] and GLIDE [38] explore text-conditional diffusion models and boost sample quality by applying classifier-free guidance [19]. DALL-E 2 [42] first leverages an image prior" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6573" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 72, + 427, + 243 + ], + "blocks": [ + { + "bbox": [ + 62, + 72, + 427, + 243 + ], + "lines": [ + { + "bbox": [ + 62, + 72, + 427, + 243 + ], + "spans": [ + { + "bbox": [ + 62, + 72, + 427, + 243 + ], + "type": "image", + "image_path": "aada0f9a862f0c672dc173904d4a8c31e67f4cb754cc05b7c7ea0481fa8840f8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 432, + 72, + 531, + 245 + ], + "blocks": [ + { + "bbox": [ + 432, + 72, + 531, + 245 + ], + "lines": [ + { + "bbox": [ + 432, + 72, + 531, + 245 + ], + "spans": [ + { + "bbox": [ + 432, + 72, + 531, + 245 + ], + "type": "image", + "image_path": "792581ee81c6e66e56eee6847243bfbfe763b19931c1d9603acb0bc60e620fc8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 244, + 547, + 301 + ], + "lines": [ + { + "bbox": [ + 45, + 244, + 547, + 301 + ], + "spans": [ + { + "bbox": [ + 45, + 244, + 547, + 301 + ], + "type": "text", + "content": "Figure 2. Overall pipeline of TF-T2V, which consists of two branches. In the content branch, paired image-text data is leveraged to learn text-conditioned and image-conditioned spatial appearance generation. The motion branch supports the training of motion dynamic synthesis by feeding text-free videos (or partially paired video-text data if available). During the training stage, both branches are optimized jointly. Notably, TF-T2V can be seamlessly integrated into the compositional video synthesis framework by incorporating composable conditions. In inference, TF-T2V enables text-guided video generation by taking text prompts and random noise sequences as input." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 309, + 288, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 309, + 288, + 393 + ], + "spans": [ + { + "bbox": [ + 46, + 309, + 288, + 393 + ], + "type": "text", + "content": "to bridge multi-modal embedding spaces and then learns a diffusion decoder to synthesize images in the pixel space. Stable Diffusion [43] introduces latent diffusion models that conduct iterative denoising processes at the latent level to save computational costs. There are also some works that generate customized and desirable images by incorporating additional spatial control signals [24, 36, 77]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 399, + 288, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 399, + 288, + 674 + ], + "spans": [ + { + "bbox": [ + 46, + 399, + 288, + 674 + ], + "type": "text", + "content": "Text-to-video generation. This task poses additional challenges compared to text-to-image generation due to the temporal dynamics involved in videos. Various early techniques have been proposed to tackle this problem, such as recurrent neural networks combined with GANs [3, 51, 53, 61, 64] or transformer-based autoregressive models [22, 73]. With the subsequent advent of video diffusion models pretrained on large-scale video-text datasets [2, 63, 71], video content creation has demonstrated remarkable advances [1, 4, 6-9, 14, 15, 17, 18, 21, 23, 28, 31-33, 35, 37, 39, 56, 57, 62, 65, 67, 69, 74-76]. Imagen Video [21] learns cascaded pixel-level diffusion models to produce high-resolution videos. Following [42], Make-A-Video [50] introduces a two-step strategy that first maps the input text to image embedding by a large (" + }, + { + "bbox": [ + 46, + 399, + 288, + 674 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 46, + 399, + 288, + 674 + ], + "type": "text", + "content": "1B parameters) diffusion prior model and then embeds the resulting embedding into an image-conditional video diffusion model to synthesize videos in pixel space. VideoLDM [4] and ModelScopeT2V [54] extend 2D-UNet into 3D-UNet by injecting temporal layers and operate a latent denoising process to save computational resources. In this paper, we present a single unified framework for text-to-video generation and study the scaling trend by harnessing widely accessible text-free videos." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "content": "Compositional video synthesis. Traditional text-to-video methods solely rely on textual descriptions to control the video generation process, limiting desired fine-grained cus-" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 309, + 547, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 309, + 547, + 525 + ], + "spans": [ + { + "bbox": [ + 304, + 309, + 547, + 525 + ], + "type": "text", + "content": "tomization such as texture, object position, motion patterns, etc. To tackle this constraint and pursue higher controllability, several controllable video synthesis methods [8, 9, 12, 29, 58, 68, 72, 79, 81] have been proposed. These methods utilize additional control signals, such as depth or sketch, to guide the generation of videos. By incorporating extra structured guidance, the generated content can be precisely controlled and customized. Among these approaches, VideoComposer [58] stands out as a pioneering and versatile compositional technique. It integrates multiple conditioning signals including textual, spatial and temporal conditions within a unified framework, offering enhanced controllability, compositionality, and realism in the generated videos. Despite the remarkable quality, these methods still rely on high-quality video-text data to unleash powerful and customizable synthesis. In contrast, our method can be directly merged into existing controllable frameworks to customize videos by exploiting text-free videos." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 536, + 361, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 536, + 361, + 548 + ], + "spans": [ + { + "bbox": [ + 306, + 536, + 361, + 548 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 556, + 546, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 556, + 546, + 605 + ], + "spans": [ + { + "bbox": [ + 304, + 556, + 546, + 605 + ], + "type": "text", + "content": "We first provide a brief introduction to the preliminaries of the video diffusion model. Then, we will elaborate on the mechanisms of TF-T2V in detail. The overall framework of the proposed TF-T2V is displayed in Fig. 2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 613, + 507, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 613, + 507, + 624 + ], + "spans": [ + { + "bbox": [ + 306, + 613, + 507, + 624 + ], + "type": "text", + "content": "3.1. Preliminaries of video diffusion model" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 631, + 545, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 631, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 304, + 631, + 545, + 679 + ], + "type": "text", + "content": "Diffusion models involve a forward diffusion process and a reverse iterative denoising stage. The forward process of diffusion models is gradually imposing random noise to clean data " + }, + { + "bbox": [ + 304, + 631, + 545, + 679 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 304, + 631, + 545, + 679 + ], + "type": "text", + "content": " in a Markovian chain:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 688, + 545, + 713 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 688, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 312, + 688, + 545, + 713 + ], + "type": "interline_equation", + "content": "q \\left(x _ {t} \\mid x _ {t - 1}\\right) = \\mathcal {N} \\left(x _ {t}; \\sqrt {1 - \\beta_ {t - 1}} x _ {t - 1}, \\beta_ {t} I\\right), t = 1, \\dots , T \\tag {1}", + "image_path": "6fefe069e02342e8d889f556cded74b4387acbe90a5b39a1703c3a22b84a9e94.jpg" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6574" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\beta_{t} \\in (0,1)" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " is a noise schedule and " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " is the total time step. When " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " is sufficiently large, e.g. " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "T = 1000" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": ", the resulting " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "x_{T}" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " is nearly a random Gaussian distribution " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0,I)" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": ". The role of diffusion model is to denoise " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "x_{T}" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " and learn to iteratively estimate the reversed process:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 73, + 142, + 287, + 156 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 142, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 73, + 142, + 287, + 156 + ], + "type": "interline_equation", + "content": "p _ {\\theta} (x _ {t - 1} | x _ {t}) = \\mathcal {N} (x _ {t - 1}; \\mu_ {\\theta} (x _ {t}, t), \\sum_ {\\theta} (x _ {t}, t)) \\tag {2}", + "image_path": "87da9218eae2645cadbe4088ffb01bd16071178038e9ba122f0a6ac700386e12.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 164, + 287, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 164, + 287, + 201 + ], + "spans": [ + { + "bbox": [ + 47, + 164, + 287, + 201 + ], + "type": "text", + "content": "We usually train a denoising model " + }, + { + "bbox": [ + 47, + 164, + 287, + 201 + ], + "type": "inline_equation", + "content": "\\hat{x}_{\\theta}" + }, + { + "bbox": [ + 47, + 164, + 287, + 201 + ], + "type": "text", + "content": " parameterized by " + }, + { + "bbox": [ + 47, + 164, + 287, + 201 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 47, + 164, + 287, + 201 + ], + "type": "text", + "content": " to approximate the original data " + }, + { + "bbox": [ + 47, + 164, + 287, + 201 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 47, + 164, + 287, + 201 + ], + "type": "text", + "content": " and optimize the following v-prediction [21, 46] problem:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 100, + 209, + 287, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 209, + 287, + 224 + ], + "spans": [ + { + "bbox": [ + 100, + 209, + 287, + 224 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {b a s e}} = \\mathbb {E} _ {\\theta} [ \\| v - \\hat {x} _ {\\theta} (x _ {t}, t, c) \\| _ {2} ^ {2} ] \\tag {3}", + "image_path": "0cfa11e4abfe0f9b12a28afe47effe64a35b7d3be22d836c780b1bdaf768e858.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 233, + 287, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 233, + 287, + 328 + ], + "spans": [ + { + "bbox": [ + 46, + 233, + 287, + 328 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 233, + 287, + 328 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 46, + 233, + 287, + 328 + ], + "type": "text", + "content": " is conditional information such as textual prompt, and " + }, + { + "bbox": [ + 46, + 233, + 287, + 328 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 46, + 233, + 287, + 328 + ], + "type": "text", + "content": " is the parameterized prediction objective. In representative video diffusion models [4, 54, 58], the denoising model " + }, + { + "bbox": [ + 46, + 233, + 287, + 328 + ], + "type": "inline_equation", + "content": "\\hat{x}_{\\theta}" + }, + { + "bbox": [ + 46, + 233, + 287, + 328 + ], + "type": "text", + "content": " is a latent 3D-UNet [4, 54] modified from its 2D version [43] by inserting additional temporal blocks, which is optimized in the latent feature space by applying a variational autoencoder [11], and Eq. (3) is applied on each frame of the input video to train the whole model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 336, + 107, + 347 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 336, + 107, + 347 + ], + "spans": [ + { + "bbox": [ + 47, + 336, + 107, + 347 + ], + "type": "text", + "content": "3.2. TF-T2V" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 356, + 287, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 356, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 46, + 356, + 287, + 510 + ], + "type": "text", + "content": "The objective of TF-T2V is to learn a text-conditioned video diffusion model to create visually appealing and temporally coherent videos with text-free videos or partially paired video-text data. Without loss of generality, we first describe the workflow of our TF-T2V in the scenario where only text-free video is used. With merely text-free videos available for training, it is challenging to guide content creation by textual information since there lacks text-visual correspondence. To tackle this issue, we propose to resort to web-scale and high-quality image-text datasets [47, 48], which are publicly accessible on the Internet. However, this raises another question: how can we leverage the image-text data and text-free videos in a unified framework?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 511, + 287, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 511, + 287, + 654 + ], + "spans": [ + { + "bbox": [ + 46, + 511, + 287, + 654 + ], + "type": "text", + "content": "Recalling the network architecture in 3D-UNet, the spatial modules mainly focus on appearance modeling, and the temporal modules primarily aim to operate motion coherence. The intuition is that we can utilize image-text data to learn text-conditioned spatial appearance generation and adopt high-quality text-free videos to guide consistent motion dynamic synthesis. In this way, we can perform text-to-video generation in a single model to synthesize high-quality and consistent videos during the inference stage. Based on this, the proposed TF-T2V consists of two branches: a content branch for spatial appearance generation and a motion branch for motion dynamic synthesis." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 670, + 209, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 209, + 682 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 209, + 682 + ], + "type": "text", + "content": "3.2.1 Spatial appearance generation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "content": "Like previous text-to-image works [43, 77], the content branch of TF-T2V takes a noised image " + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "inline_equation", + "content": "I_{image} \\in H \\times" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "W \\times C" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " as input, where " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " are the height, width, and channel dimensions respectively, and employs conditional signals (i.e., text and image embeddings) to offer semantic guidance for content generation. This branch primarily concentrates on optimizing the spatial modules in the video diffusion model and plays a crucial role in determining appealing visual quality. In order to ensure that each condition can also control the created content separately, we randomly drop text or image embeddings with a certain probability during training. The text and image encoders from CLIP [41] are adopted to encode embeddings." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 224, + 449, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 224, + 449, + 236 + ], + "spans": [ + { + "bbox": [ + 306, + 224, + 449, + 236 + ], + "type": "text", + "content": "3.2.2 Motion dynamic synthesis" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 244, + 545, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 244, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 304, + 244, + 545, + 376 + ], + "type": "text", + "content": "The pursuit of producing highly temporally consistent videos is a unique hallmark of video creation. Recent advancements [4, 54, 57, 58] in the realm of video synthesis usually utilize large-scale video-text datasets such as WebVid10M [2] to achieve coherent video generation. However, acquiring large-scale video-text pairs consumes extensive manpower and time, hindering the scaling up of video diffusion models. To make matters worse, the widely used WebVid10M is a watermarked and low-resolution (around 360P) dataset, resulting in unsatisfactory video creation that cannot meet the high-quality video synthesis requirements." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 377, + 545, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 377, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 377, + 545, + 533 + ], + "type": "text", + "content": "To mitigate the above issues, we propose to leverage high-quality text-free videos that are easily accessible on video media platforms, e.g., YouTube and TikTok. To fully excavate the abundant motion dynamics within the text-free videos, we train a image-conditioned model. By optimizing this image-to-video generation task, the temporal modules in the video diffusion model can learn to perceive and model diverse motion dynamics. Specifically, given a noised video " + }, + { + "bbox": [ + 304, + 377, + 545, + 533 + ], + "type": "inline_equation", + "content": "I_{video} \\in F \\times H \\times W \\times C" + }, + { + "bbox": [ + 304, + 377, + 545, + 533 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 377, + 545, + 533 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 377, + 545, + 533 + ], + "type": "text", + "content": " is the temporal length, the motion branch of TF-T2V learns to recover the undisturbed video guided by the image embedding. The image embedding is extracted from the center frame of the original video by applying CLIP's image encoder [41]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 533, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 545, + 628 + ], + "type": "text", + "content": "Since large-scale image-text data used for training contains abundant movement intentions [30], TF-T2V can achieve text-to-video generation by assembling spatial appearances involving motion trends and predicted motion dynamics. When extra paired video-text data is available, we conduct both text-to-video and image-to-video generation based on video-text pairs to train TF-T2V and further enhance the perception ability for desirable textual control." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": "In addition, we notice that previous works apply the training loss (i.e., Eq. (3)) on each frame of the input video individually without considering temporal correlations between frames, suffering from incoherent appearances and motions. Inspired by the early study [25, 55, 59, 80] finding that the difference between two adjacent frames usually contains motion patterns, e.g., dynamic trajectory, we thus" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6575" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 83, + 539, + 222 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 545, + 81 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 545, + 81 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 545, + 81 + ], + "type": "text", + "content": "Table 1. Quantitative comparison with state-of-the-art methods for text-to-video task on MSR-VTT in terms of FID, FVD, and CLIPSIM." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 83, + 539, + 222 + ], + "lines": [ + { + "bbox": [ + 53, + 83, + 539, + 222 + ], + "spans": [ + { + "bbox": [ + 53, + 83, + 539, + 222 + ], + "type": "table", + "html": "
MethodZero-shotParametersFID (↓)FVD (↓)CLIPSIM (↑)
Nüwa [66]No-47.68-0.2439
CogVideo (Chinese) [22]Yes15.5B24.78-0.2614
CogVideo (English) [22]Yes15.5B23.5912940.2631
MagicVideo [82]Yes--1290-
Make-A-Video [50]Yes9.7B13.17-0.3049
ModelScopeT2V [54]Yes1.7B11.095500.2930
VideoComposer [58]Yes1.9B10.775800.2932
Latent-Shift [1]Yes1.5B15.23-0.2773
VideoLDM [4]Yes4.2B--0.2929
PYoCo [14]Yes-9.73--
TF-T2V (WebVid10M)Yes1.8B9.674840.2953
TF-T2V (WebVid10M+Internal10M)Yes1.8B8.194410.2991
", + "image_path": "5257ba0c927e3d21e64cfd986d59e872210b0a096b7deb033adb40c815a5fc31.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 232, + 287, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 232, + 287, + 257 + ], + "spans": [ + { + "bbox": [ + 46, + 232, + 287, + 257 + ], + "type": "text", + "content": "propose a temporal coherence loss that utilizes the frame difference as an additional supervisory signal:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 262, + 287, + 279 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 262, + 287, + 279 + ], + "spans": [ + { + "bbox": [ + 52, + 262, + 287, + 279 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {c o h e r e n c e}} = \\mathbb {E} _ {\\theta} [ \\sum_ {j = 1} ^ {F - 1} | | (v _ {j + 1} - v _ {j}) - (o _ {j + 1} - o _ {j}) | | _ {2} ^ {2} ] \\tag {4}", + "image_path": "d6a9ba69c319152f55ec441f16ad4bcbeae91f40270e4b4160ce382249282016.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 285, + 287, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 285, + 287, + 369 + ], + "spans": [ + { + "bbox": [ + 46, + 285, + 287, + 369 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 285, + 287, + 369 + ], + "type": "inline_equation", + "content": "o_j" + }, + { + "bbox": [ + 46, + 285, + 287, + 369 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 285, + 287, + 369 + ], + "type": "inline_equation", + "content": "v_j" + }, + { + "bbox": [ + 46, + 285, + 287, + 369 + ], + "type": "text", + "content": " are the predicted frame and corresponding ground truth. This loss term measures the discrepancy between the predicted frame differences and the ground truth frame differences of the input parameterized video. By minimizing Eq. (4), TF-T2V helps to alleviate frame flickering and ensures that the generated videos exhibit seamless transitions and promising temporal dynamics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 382, + 178, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 382, + 178, + 395 + ], + "spans": [ + { + "bbox": [ + 47, + 382, + 178, + 395 + ], + "type": "text", + "content": "3.2.3 Training and inference" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 401, + 287, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 401, + 287, + 449 + ], + "spans": [ + { + "bbox": [ + 46, + 401, + 287, + 449 + ], + "type": "text", + "content": "In order to mine the complementary advantages of spatial appearance generation and motion dynamic synthesis, we jointly optimize the entire model in an end-to-end manner. The total loss can be formulated as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 458, + 287, + 471 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 458, + 287, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 458, + 287, + 471 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {b a s e}} + \\lambda \\mathcal {L} _ {\\text {c o h e r e n c e}} \\tag {5}", + "image_path": "d7faa9cf19bcc7d4b42d762517f285587b896f6d648ade25bb4f4d7664336638.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 479, + 287, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 479, + 287, + 514 + ], + "spans": [ + { + "bbox": [ + 46, + 479, + 287, + 514 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 479, + 287, + 514 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{base}" + }, + { + "bbox": [ + 46, + 479, + 287, + 514 + ], + "type": "text", + "content": " is imposed on video and image together by treating the image as a \"single frame\" video, and " + }, + { + "bbox": [ + 46, + 479, + 287, + 514 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 479, + 287, + 514 + ], + "type": "text", + "content": " is a balance coefficient that is set empirically to 0.1." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 514, + 287, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 514, + 287, + 597 + ], + "spans": [ + { + "bbox": [ + 46, + 514, + 287, + 597 + ], + "type": "text", + "content": "After training, we can perform text-guided video generation to synthesize temporally consistent video content that aligns well with the given text prompt. Moreover, TF-T2V is a general framework and can also be inserted into existing compositional video synthesis paradigm [58] by incorporating additional spatial and temporal structural conditions, allowing for customized video creation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 608, + 128, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 608, + 128, + 622 + ], + "spans": [ + { + "bbox": [ + 47, + 608, + 128, + 622 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 628, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 628, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 628, + 287, + 665 + ], + "type": "text", + "content": "In this section, we present a comprehensive quantitative and qualitative evaluation of the proposed TF-T2V on text-to-video generation and composition video synthesis." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 671, + 162, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 671, + 162, + 685 + ], + "spans": [ + { + "bbox": [ + 47, + 671, + 162, + 685 + ], + "type": "text", + "content": "4.1. Experimental setup" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "content": "Implementation details. TF-T2V is built on two typical open-source baselines, i.e., ModelScopeT2V [54] and" + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 310, + 242, + 538, + 287 + ], + "blocks": [ + { + "bbox": [ + 307, + 231, + 542, + 241 + ], + "lines": [ + { + "bbox": [ + 307, + 231, + 542, + 241 + ], + "spans": [ + { + "bbox": [ + 307, + 231, + 542, + 241 + ], + "type": "text", + "content": "Table 2. Human preference results on text-to-video generation." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 242, + 538, + 287 + ], + "lines": [ + { + "bbox": [ + 310, + 242, + 538, + 287 + ], + "spans": [ + { + "bbox": [ + 310, + 242, + 538, + 287 + ], + "type": "table", + "html": "
MethodText alignmentVisual qualityTemporal coherence
ModelScopeT2V [54]83.5%74.0%81.3%
TF-T2V86.5%87.0%92.5%
", + "image_path": "fb647ca3b6f107076023e82613719da678dacd72efc7329db221b9c3279115a8.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 297, + 545, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 297, + 545, + 476 + ], + "spans": [ + { + "bbox": [ + 304, + 297, + 545, + 476 + ], + "type": "text", + "content": "VideoComposer [58]. DDPM sampler [20] with " + }, + { + "bbox": [ + 304, + 297, + 545, + 476 + ], + "type": "inline_equation", + "content": "T = 1000" + }, + { + "bbox": [ + 304, + 297, + 545, + 476 + ], + "type": "text", + "content": " steps is adopted for training, and we employ DDIM [52] with 50 steps for inference. We optimize TF-T2V using AdamW optimizer with a learning rate of 5e-5. For input videos, we sample 16 frames from each video at 4FPS and crop a " + }, + { + "bbox": [ + 304, + 297, + 545, + 476 + ], + "type": "inline_equation", + "content": "448 \\times 256" + }, + { + "bbox": [ + 304, + 297, + 545, + 476 + ], + "type": "text", + "content": " region at the center as the basic setting. Note that we can also easily train high-definition video diffusion models by collecting high-quality text-free videos (see examples in the Appendix). LAION-5B [48] is utilized to provide image-text pairs. Unless otherwise stated, we treat WebVid10M, which includes about 10.7M video-text pairs, as a text-free dataset to train TF-T2V and do not use any textual annotations. To study scaling trends, we gathered about 10M high-quality videos without text labels from internal data, termed the Internal10M dataset." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 480, + 545, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 480, + 545, + 648 + ], + "spans": [ + { + "bbox": [ + 304, + 480, + 545, + 648 + ], + "type": "text", + "content": "Metrics. (i) To evaluate text-to-video generation, following previous works [4, 54], we leverage the standard Fréchet Inception Distance (FID), Fréchet Video Distance (FVD), and CLIP Similarity (CLIPSIM) as quantitative evaluation metrics and report results on MSR-VTT dataset [70]. (ii) For controllability evaluation, we leverage depth error, sketch error, and end-point-error (EPE) [10] to verify whether the generated videos obey the control of input conditions. Depth error measures the divergence between the input depth conditions and the eliminated depth of the synthesized video. Similarly, sketch error examines the sketch control. EPE evaluates the flow consistency between the reference video and the generated video. In addition, human evaluation is also introduced to validate our method." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 658, + 508, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 508, + 671 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 508, + 671 + ], + "type": "text", + "content": "4.2. Evaluation on text-to-video generation" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "Tab. 1 displays the comparative quantitative results with existing state-of-the-art methods. We observe that TF-T2V achieves remarkable performance under various metrics." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "6576" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 71, + 319, + 228 + ], + "blocks": [ + { + "bbox": [ + 62, + 71, + 319, + 228 + ], + "lines": [ + { + "bbox": [ + 62, + 71, + 319, + 228 + ], + "spans": [ + { + "bbox": [ + 62, + 71, + 319, + 228 + ], + "type": "image", + "image_path": "fb7d52d8fa8ea8a6059e66f7a6648f27fbc0ee0ab5ebdb07e5f5a7f8aec808d0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 325, + 81, + 529, + 228 + ], + "blocks": [ + { + "bbox": [ + 361, + 72, + 493, + 80 + ], + "lines": [ + { + "bbox": [ + 361, + 72, + 493, + 80 + ], + "spans": [ + { + "bbox": [ + 361, + 72, + 493, + 80 + ], + "type": "text", + "content": "\"Portrait of smiling young woman outdoors\"" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 325, + 81, + 529, + 228 + ], + "lines": [ + { + "bbox": [ + 325, + 81, + 529, + 228 + ], + "spans": [ + { + "bbox": [ + 325, + 81, + 529, + 228 + ], + "type": "image", + "image_path": "080155e6c674a561b7bf2c8f0acb899b74590c7a7aadd92bc52dac9f0878e3c4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 61, + 229, + 318, + 384 + ], + "blocks": [ + { + "bbox": [ + 61, + 229, + 318, + 384 + ], + "lines": [ + { + "bbox": [ + 61, + 229, + 318, + 384 + ], + "spans": [ + { + "bbox": [ + 61, + 229, + 318, + 384 + ], + "type": "image", + "image_path": "07158c131aa37a10d9155b3328822e13ac461918504a4a1bd8e7e17bcf879c95.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 384, + 545, + 404 + ], + "lines": [ + { + "bbox": [ + 46, + 384, + 545, + 404 + ], + "spans": [ + { + "bbox": [ + 46, + 384, + 545, + 404 + ], + "type": "text", + "content": "Figure 3. Qualitative comparison on text-to-video generation. Three representative open-source text-to-video approaches are compared, including ModelScopeT2V [54], Text2video-Zero [28] and ZeroScope [5]. Please refer to the Appendix for videos and more comparisons." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 325, + 229, + 529, + 383 + ], + "blocks": [ + { + "bbox": [ + 325, + 229, + 529, + 383 + ], + "lines": [ + { + "bbox": [ + 325, + 229, + 529, + 383 + ], + "spans": [ + { + "bbox": [ + 325, + 229, + 529, + 383 + ], + "type": "image", + "image_path": "518f579bc505d8932a972d7ec19ea75385874abf3e5d255f6e01b7dfc5eb9a9b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 50, + 425, + 280, + 469 + ], + "blocks": [ + { + "bbox": [ + 48, + 413, + 285, + 424 + ], + "lines": [ + { + "bbox": [ + 48, + 413, + 285, + 424 + ], + "spans": [ + { + "bbox": [ + 48, + 413, + 285, + 424 + ], + "type": "text", + "content": "Table 3. Evaluation of structure control based on depth signals." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 425, + 280, + 469 + ], + "lines": [ + { + "bbox": [ + 50, + 425, + 280, + 469 + ], + "spans": [ + { + "bbox": [ + 50, + 425, + 280, + 469 + ], + "type": "table", + "html": "
MethodConditionDepth error (↓)
VideoComposer [58]Text0.382
VideoComposer [58]Text and depth0.217
TF-T2VText and depth0.209
", + "image_path": "1841e8335e9b3555f8d5cc5d21f46eefeedbc1740d14f963dad8802e037c19a0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 49, + 486, + 282, + 531 + ], + "blocks": [ + { + "bbox": [ + 47, + 475, + 286, + 485 + ], + "lines": [ + { + "bbox": [ + 47, + 475, + 286, + 485 + ], + "spans": [ + { + "bbox": [ + 47, + 475, + 286, + 485 + ], + "type": "text", + "content": "Table 4. Evaluation of structure control based on sketch signals." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 486, + 282, + 531 + ], + "lines": [ + { + "bbox": [ + 49, + 486, + 282, + 531 + ], + "spans": [ + { + "bbox": [ + 49, + 486, + 282, + 531 + ], + "type": "table", + "html": "
MethodConditionSketch error (↓)
VideoComposer [58]Text0.1854
VideoComposer [58]Text and sketch0.1161
TF-T2VText and sketch0.1146
", + "image_path": "5389185c465f9660033640805ba99302605e2361b7091b15f3ed3a5c2405e15e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 544, + 287, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 651 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 651 + ], + "type": "text", + "content": "Notably, TF-T2V trained on WebVid10M and Internal10M obtains higher performance than the counterpart on WebVid10M, revealing promising scalable capability. We show the qualitative visualizations in Fig. 3. From the results, we can find that compared with previous methods, TF-T2V obtains impressive video creation in terms of both temporal continuity and visual quality. The human assessment in Tab. 2 also reveals the above observations. The user study is performed on 100 randomly synthesized videos." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 659, + 276, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 659, + 276, + 672 + ], + "spans": [ + { + "bbox": [ + 47, + 659, + 276, + 672 + ], + "type": "text", + "content": "4.3. Evaluation on compositional video synthesis" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "We compare the controllability of TF-T2V and VideoComposer on 1,000 generated videos in terms of depth control (Tab. 3), sketch control (Tab. 4) and motion control" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 309, + 425, + 539, + 469 + ], + "blocks": [ + { + "bbox": [ + 307, + 413, + 544, + 423 + ], + "lines": [ + { + "bbox": [ + 307, + 413, + 544, + 423 + ], + "spans": [ + { + "bbox": [ + 307, + 413, + 544, + 423 + ], + "type": "text", + "content": "Table 5. Evaluation of motion control based on motion vectors." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 425, + 539, + 469 + ], + "lines": [ + { + "bbox": [ + 309, + 425, + 539, + 469 + ], + "spans": [ + { + "bbox": [ + 309, + 425, + 539, + 469 + ], + "type": "table", + "html": "
MethodConditionEPE (↓)
VideoComposer [58]Text4.13
VideoComposer [58]Text and motion vector1.98
TF-T2VText and motion vector1.88
", + "image_path": "9e20fa0134515430f3a71c1750d175c46e5d97450c2c7abd986cd5c9895b65a6.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 309, + 486, + 538, + 531 + ], + "blocks": [ + { + "bbox": [ + 309, + 475, + 541, + 485 + ], + "lines": [ + { + "bbox": [ + 309, + 475, + 541, + 485 + ], + "spans": [ + { + "bbox": [ + 309, + 475, + 541, + 485 + ], + "type": "text", + "content": "Table 6. Human evaluations on compositional video synthesis." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 486, + 538, + 531 + ], + "lines": [ + { + "bbox": [ + 309, + 486, + 538, + 531 + ], + "spans": [ + { + "bbox": [ + 309, + 486, + 538, + 531 + ], + "type": "table", + "html": "
MethodStructure alignmentVisual qualityTemporal coherence
VideoComposer [58]79.0%66.0%77.5%
TF-T2V89.0%79.5%84.5%
", + "image_path": "5e080d26e7c16aff2ce64ff85c48bd80e8d451b4e53d7a57db74f6c77f6d0956.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 544, + 545, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 544, + 545, + 652 + ], + "spans": [ + { + "bbox": [ + 304, + 544, + 545, + 652 + ], + "type": "text", + "content": "(Tab. 5). The above experimental evaluations highlight the effectiveness of TF-T2V by leveraging text-free videos. In Fig. 4 and 5, we show the comparison of TF-T2V and existing methods on compositional video generation. We notice that TF-T2V exhibits high-fidelity and consistent video generation. In addition, we conduct a human evaluation on 100 randomly sampled videos and report the results in Tab. 6. The preference assessment provides further evidence of the superiority of the proposed TF-T2V." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 659, + 397, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 659, + 397, + 672 + ], + "spans": [ + { + "bbox": [ + 306, + 659, + 397, + 672 + ], + "type": "text", + "content": "4.4. Ablation study" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 677, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 546, + 713 + ], + "type": "text", + "content": "Effect of temporal coherence loss. To enhance temporal consistency, we propose a temporal coherence loss. In Tab. 7, we show the effectiveness of the proposed tem" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6577" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 70, + 321, + 327 + ], + "blocks": [ + { + "bbox": [ + 61, + 70, + 321, + 327 + ], + "lines": [ + { + "bbox": [ + 61, + 70, + 321, + 327 + ], + "spans": [ + { + "bbox": [ + 61, + 70, + 321, + 327 + ], + "type": "image", + "image_path": "1dbd35f68d3c338efe18cc55eaf235a11a30806683003399d7976da847c27853.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 327, + 545, + 349 + ], + "lines": [ + { + "bbox": [ + 47, + 327, + 545, + 349 + ], + "spans": [ + { + "bbox": [ + 47, + 327, + 545, + 349 + ], + "type": "text", + "content": "Figure 4. Qualitative comparison on compositional depth-to-video generation. The videos are generated by taking textual prompts and structural guidance as conditions. Compared with existing methods, TF-T2V yields more structural compliance and high-fidelity results." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 325, + 70, + 531, + 327 + ], + "blocks": [ + { + "bbox": [ + 325, + 70, + 531, + 327 + ], + "lines": [ + { + "bbox": [ + 325, + 70, + 531, + 327 + ], + "spans": [ + { + "bbox": [ + 325, + 70, + 531, + 327 + ], + "type": "image", + "image_path": "3735fc2eb1cfb67dfa052700068ce00d8c9aebbfb953465cbb121db0d998a6b0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 71, + 350, + 320, + 559 + ], + "blocks": [ + { + "bbox": [ + 71, + 350, + 320, + 559 + ], + "lines": [ + { + "bbox": [ + 71, + 350, + 320, + 559 + ], + "spans": [ + { + "bbox": [ + 71, + 350, + 320, + 559 + ], + "type": "image", + "image_path": "a1b38b182a3df35b98ff798c2d3c90b275be717432fd4f6a344f7da990385241.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 559, + 544, + 581 + ], + "lines": [ + { + "bbox": [ + 46, + 559, + 544, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 559, + 544, + 581 + ], + "type": "text", + "content": "Figure 5. Qualitative comparison on compositional sketch-to-video generation. The videos are generated by taking textual descriptions and structural guidance as conditions. Compared with other methods, TF-T2V produces more realistic and consistent results." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 325, + 350, + 518, + 559 + ], + "blocks": [ + { + "bbox": [ + 325, + 350, + 518, + 559 + ], + "lines": [ + { + "bbox": [ + 325, + 350, + 518, + 559 + ], + "spans": [ + { + "bbox": [ + 325, + 350, + 518, + 559 + ], + "type": "image", + "image_path": "a1a8b0c884ae1c5e9e28e865d0458d74b75583ff0d6c740b87b09fceff256270.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 590, + 287, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 590, + 287, + 662 + ], + "spans": [ + { + "bbox": [ + 46, + 590, + 287, + 662 + ], + "type": "text", + "content": "poral coherence loss in terms of frame consistency. The metric results are obtained by calculating the average CLIP similarity of two consecutive frames in 1,000 videos. We further display the qualitative comparative results in Fig. 6 and observe that temporal coherence loss helps to alleviate temporal discontinuity such as color shift." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 671, + 247, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 671, + 247, + 685 + ], + "spans": [ + { + "bbox": [ + 47, + 671, + 247, + 685 + ], + "type": "text", + "content": "4.5. Evaluation on semi-supervised setting" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "content": "Through the above experiments and observations, we verify that text-free video can help improve the continuity" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 310, + 601, + 537, + 635 + ], + "blocks": [ + { + "bbox": [ + 322, + 590, + 529, + 600 + ], + "lines": [ + { + "bbox": [ + 322, + 590, + 529, + 600 + ], + "spans": [ + { + "bbox": [ + 322, + 590, + 529, + 600 + ], + "type": "text", + "content": "Table 7. Text-to-video evaluation on frame consistency." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 601, + 537, + 635 + ], + "lines": [ + { + "bbox": [ + 310, + 601, + 537, + 635 + ], + "spans": [ + { + "bbox": [ + 310, + 601, + 537, + 635 + ], + "type": "table", + "html": "
MethodFrame consistency (%) ↑
w/o temporal coherence loss89.71
TF-T2V91.06
", + "image_path": "de1e66da63f19f858b29938b48e6004c61d15e9f3bd43036876d91712bfc96ad.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 654, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 654, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 654, + 545, + 715 + ], + "type": "text", + "content": "and quality of generated video. As previously stated, TF-T2V also supports the combination of annotated videotext data and text-free videos to train the model, i.e., the semi-supervised manner. The annotated text can provide additional fine-grained motion signals, enhancing the align-" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "6578" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 69, + 323, + 232 + ], + "blocks": [ + { + "bbox": [ + 52, + 69, + 323, + 232 + ], + "lines": [ + { + "bbox": [ + 52, + 69, + 323, + 232 + ], + "spans": [ + { + "bbox": [ + 52, + 69, + 323, + 232 + ], + "type": "image", + "image_path": "9cac116d8a05d0dde3036325f45639c95e1ca48c34eadd6aca83ace489bf563d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 56, + 234, + 536, + 246 + ], + "lines": [ + { + "bbox": [ + 56, + 234, + 536, + 246 + ], + "spans": [ + { + "bbox": [ + 56, + 234, + 536, + 246 + ], + "type": "text", + "content": "Figure 6. Qualitative ablation study. The videos are generated by taking textual descriptions and structural guidance as conditions." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 327, + 70, + 541, + 232 + ], + "blocks": [ + { + "bbox": [ + 327, + 70, + 541, + 232 + ], + "lines": [ + { + "bbox": [ + 327, + 70, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 327, + 70, + 541, + 232 + ], + "type": "image", + "image_path": "bb1c6fc511293c8ad61048f34b5192974e2968de7b14734700b312e9963bd4bb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 53, + 250, + 541, + 481 + ], + "blocks": [ + { + "bbox": [ + 53, + 250, + 541, + 481 + ], + "lines": [ + { + "bbox": [ + 53, + 250, + 541, + 481 + ], + "spans": [ + { + "bbox": [ + 53, + 250, + 541, + 481 + ], + "type": "image", + "image_path": "e4eec659f65d2e744dda08a136d5a7f8dea9a5107245524e889a01243b104334.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 482, + 545, + 494 + ], + "lines": [ + { + "bbox": [ + 47, + 482, + 545, + 494 + ], + "spans": [ + { + "bbox": [ + 47, + 482, + 545, + 494 + ], + "type": "text", + "content": "Figure 7. Qualitative evaluation on text-to-video generation with temporally-correlated text prompts involving the evolution of movement." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 50, + 536, + 285, + 580 + ], + "blocks": [ + { + "bbox": [ + 47, + 504, + 287, + 536 + ], + "lines": [ + { + "bbox": [ + 47, + 504, + 287, + 536 + ], + "spans": [ + { + "bbox": [ + 47, + 504, + 287, + 536 + ], + "type": "text", + "content": "Table 8. Quantitative experiments on text-to-video generation. TF-T2V-Semi means the semi-supervised setting where labeled WebVid10M and text-free Internal10M are adopted." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 536, + 285, + 580 + ], + "lines": [ + { + "bbox": [ + 50, + 536, + 285, + 580 + ], + "spans": [ + { + "bbox": [ + 50, + 536, + 285, + 580 + ], + "type": "table", + "html": "
MethodFID (↓)FVD (↓)CLIPSIM (↑)
ModelScopeT2V [54]11.095500.2930
TF-T2V8.194410.2991
TF-T2V-Semi7.643660.3032
", + "image_path": "5d07baf0c9cdb2cf88b5df55e2beb40574d90c63b9e9d8bf089e5e19c1713372.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": "ment of generated videos and the provided prompts involving desired motion evolution. We show the comparison results in Tab. 8 and find that the semi-supervised manner reaches the best performance, indicating the effectiveness of harnessing text-free videos. Notably, TF-T2V-Semi outperforms ModelScopeT2V trained on labeled WebVid10M, possessing good scalability. Moreover, the qualitative evaluations in Fig. 7 show that existing methods may struggle to synthesize text-aligned consistent videos when textual prompts involve desired temporal evolution. In contrast," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 506, + 545, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 506, + 545, + 531 + ], + "spans": [ + { + "bbox": [ + 306, + 506, + 545, + 531 + ], + "type": "text", + "content": "TF-T2V in the semi-supervised setting exhibits excellent text-video alignment and temporally smooth generation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 543, + 379, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 543, + 379, + 555 + ], + "spans": [ + { + "bbox": [ + 306, + 543, + 379, + 555 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 563, + 547, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 563, + 547, + 673 + ], + "spans": [ + { + "bbox": [ + 304, + 563, + 547, + 673 + ], + "type": "text", + "content": "In this paper, we present a novel and versatile video generation framework named TF-T2V to exploit text-free videos and explore its scaling trend. TF-T2V effectively decomposes video generation into spatial appearance generation and motion dynamic synthesis. A temporal coherence loss is introduced to explicitly constrain the learning of correlations between adjacent frames. Experimental results demonstrate the effectiveness and potential of TF-T2V in terms of fidelity, controllability, and scalability." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "type": "text", + "content": "Acknowledgements. This work is supported by the National Natural Science Foundation of China under grant U22B2053 and Alibaba Research Intern Program." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6579" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Jie An, Songyang Zhang, Harry Yang, Sonal Gupta, Jia-Bin Huang, Jiebo Luo, and Xi Yin. Latent-shift: Latent diffusion with temporal shift for efficient text-to-video generation. arXiv preprint arXiv:2304.08477, 2023. 3, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 178 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 178 + ], + "type": "text", + "content": "[2] Max Bain, Arsha Nagrani, Gül Varol, and Andrew Zisserman. Frozen in time: A joint video and image encoder for end-to-end retrieval. In ICCV, pages 1728-1738, 2021. 2, 3, 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 181, + 287, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 181, + 287, + 225 + ], + "spans": [ + { + "bbox": [ + 53, + 181, + 287, + 225 + ], + "type": "text", + "content": "[3] Yogesh Balaji, Martin Renqiang Min, Bing Bai, Rama Chellappa, and Hans Peter Graf. Conditional GAN with discriminative filter generation for text-to-video synthesis. In IJCAI, page 2, 2019. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 227, + 287, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 227, + 287, + 281 + ], + "spans": [ + { + "bbox": [ + 53, + 227, + 287, + 281 + ], + "type": "text", + "content": "[4] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In CVPR, pages 22563-22575, 2023. 2, 3, 4, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 282, + 287, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 282, + 287, + 316 + ], + "spans": [ + { + "bbox": [ + 53, + 282, + 287, + 316 + ], + "type": "text", + "content": "[5] Cerspense. Zeroscope: Diffusion-based text-to-video synthesis. https://huggingface.co/cerspense/zeroscope_v2_576w, 2023.6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 317, + 287, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 317, + 287, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 317, + 287, + 350 + ], + "type": "text", + "content": "[6] Duygu Ceylan, Chun-Hao P Huang, and Niloy J Mitra. Pix2video: Video editing using image diffusion. In ICCV, pages 23206-23217, 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 351, + 287, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 351, + 287, + 384 + ], + "spans": [ + { + "bbox": [ + 53, + 351, + 287, + 384 + ], + "type": "text", + "content": "[7] Wenhao Chai, Xun Guo, Gaoang Wang, and Yan Lu. Stablevideo: Text-driven consistency-aware diffusion video editing. In ICCV, pages 23040-23050, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 386, + 287, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 386, + 287, + 429 + ], + "spans": [ + { + "bbox": [ + 53, + 386, + 287, + 429 + ], + "type": "text", + "content": "[8] Tsai-Shien Chen, Chieh Hubert Lin, Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang. Motion-conditioned diffusion model for controllable video synthesis. arXiv preprint arXiv:2304.14404, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 430, + 287, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 430, + 287, + 474 + ], + "spans": [ + { + "bbox": [ + 53, + 430, + 287, + 474 + ], + "type": "text", + "content": "[9] Weifeng Chen, Jie Wu, Pan Xie, Hefeng Wu, Jiashi Li, Xin Xia, Xuefeng Xiao, and Liang Lin. Control-a-video: Controllable text-to-video generation with diffusion models. arXiv preprint arXiv:2305.13840, 2023. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 476, + 287, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 476, + 287, + 531 + ], + "spans": [ + { + "bbox": [ + 48, + 476, + 287, + 531 + ], + "type": "text", + "content": "[10] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. Flownet: Learning optical flow with convolutional networks. In ICCV, pages 2758-2766, 2015. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 533, + 287, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 533, + 287, + 565 + ], + "spans": [ + { + "bbox": [ + 48, + 533, + 287, + 565 + ], + "type": "text", + "content": "[11] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming Transformers for high-resolution image synthesis. In CVPR, pages 12873-12883, 2021. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 567, + 287, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 287, + 610 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 287, + 610 + ], + "type": "text", + "content": "[12] Patrick Esser, Johnathan Chiu, Parmida Atighechian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In ICCV, pages 7346-7356, 2023. 2, 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 612, + 287, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 612, + 287, + 644 + ], + "spans": [ + { + "bbox": [ + 48, + 612, + 287, + 644 + ], + "type": "text", + "content": "[13] Rafail Fridman, Amit Abecasis, Yoni Kasten, and Tali Dekel. Scenescape: Text-driven consistent scene generation. arXiv preprint arXiv:2302.01133, 2023. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 646, + 287, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 287, + 700 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 287, + 700 + ], + "type": "text", + "content": "[14] Songwei Ge, Seungjun Nah, Guilin Liu, Tyler Poon, Andrew Tao, Bryan Catanzaro, David Jacobs, Jia-Bin Huang, Ming-Yu Liu, and Yogesh Balaji. Preserve your own correlation: A noise prior for video diffusion models. In ICCV, pages 22930-22941, 2023. 3, 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 702, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 702, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 702, + 287, + 713 + ], + "type": "text", + "content": "[15] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 96, + 545, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 139 + ], + "type": "text", + "content": "[16] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. NeurIPS, 27, 2014. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 141, + 545, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 185 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 185 + ], + "type": "text", + "content": "[17] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 186, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 228 + ], + "type": "text", + "content": "[18] Yingqing He, Tianyu Yang, Yong Zhang, Ying Shan, and Qifeng Chen. Latent video diffusion models for high-fidelity video generation with arbitrary lengths. arXiv preprint arXiv:2211.13221, 2022. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 230, + 545, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 230, + 545, + 253 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 545, + 253 + ], + "type": "text", + "content": "[19] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 254, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 254, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 254, + 545, + 285 + ], + "type": "text", + "content": "[20] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. NeurIPS, 33:6840-6851, 2020. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 287, + 545, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 287, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 287, + 545, + 342 + ], + "type": "text", + "content": "[21] Jonathan Ho, William Chan, Chitwan Sahara, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 3, 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 343, + 545, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 343, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 307, + 343, + 545, + 376 + ], + "type": "text", + "content": "[22] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via Transformers. In ICLR, 2023. 2, 3, 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 377, + 545, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 377, + 545, + 421 + ], + "spans": [ + { + "bbox": [ + 307, + 377, + 545, + 421 + ], + "type": "text", + "content": "[23] Hanzhuo Huang, Yufan Feng, Cheng Shi, Lan Xu, Jingyi Yu, and Sibei Yang. Free-bloom: Zero-shot text-to-video generator with llm director and ldm animator. arXiv preprint arXiv:2309.14494, 2023. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 422, + 545, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 545, + 464 + ], + "type": "text", + "content": "[24] Lianghua Huang, Di Chen, Yu Liu, Yujun Shen, Deli Zhao, and Jingren Zhou.Composer: Creative and controllable image synthesis with composable conditions.ICML, 2023. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 467, + 545, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 467, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 307, + 467, + 545, + 499 + ], + "type": "text", + "content": "[25] Hueihan Jhuang, Juergen Gall, Silvia Zuffi, Cordelia Schmid, and Michael J Black. Towards understanding action recognition. In ICCV, pages 3192-3199, 2013. 4" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 501, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 501, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 307, + 501, + 545, + 544 + ], + "type": "text", + "content": "[26] Minguk Kang, Jun-Yan Zhu, Richard Zhang, Jaesik Park, Eli Shechtman, Sylvain Paris, and Taesung Park. Scaling up GANs for text-to-image synthesis. In CVPR, pages 10124-10134, 2023. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 545, + 545, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 545, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 307, + 545, + 545, + 590 + ], + "type": "text", + "content": "[27] Bahjat Kawar, Shiran Zada, Oran Lang, Omer Tov, Huiwen Chang, Tali Dekel, Inbar Mosseri, and Michal Irani. Imagic: Text-based real image editing with diffusion models. In CVPR, pages 6007-6017, 2023. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 591, + 545, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 591, + 545, + 646 + ], + "spans": [ + { + "bbox": [ + 307, + 591, + 545, + 646 + ], + "type": "text", + "content": "[28] Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439, 2023. 2, 3, 6" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 647, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 679 + ], + "type": "text", + "content": "[29] Ariel Lapid, Idan Achituve, Lior Bracha, and Ethan Fetaya. Gd-vdm: Generated depth for better diffusion-based video generation. arXiv preprint arXiv:2306.11173, 2023. 3" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "text", + "content": "[30] Jiangtong Li, Li Niu, and Liqing Zhang. Action-aware embedding enhancement for image-text retrieval. In AAAI, pages 1323-1331, 2022. 4" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "6580" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[31] Shaoteng Liu, Yuechen Zhang, Wenbo Li, Zhe Lin, and Jiaya Jia. Video-p2p: Video editing with cross-attention control. arXiv preprint arXiv:2303.04761, 2023. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 106, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 106, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 106, + 287, + 149 + ], + "type": "text", + "content": "[32] Haoyu Lu, Guoxing Yang, Nanyi Fei, Yuqi Huo, Zhiwu Lu, Ping Luo, and Mingyu Ding. Vdt: An empirical study on video diffusion with Transformers. arXiv preprint arXiv:2305.13311, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 150, + 287, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 150, + 287, + 204 + ], + "spans": [ + { + "bbox": [ + 48, + 150, + 287, + 204 + ], + "type": "text", + "content": "[33] Zhengxiong Luo, Dayou Chen, Yingya Zhang, Yan Huang, Liang Wang, Yujun Shen, Deli Zhao, Jingren Zhou, and Tieniu Tan. Videofusion: Decomposed diffusion models for high-quality video generation. In CVPR, pages 10209-10218, 2023. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 205, + 287, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 205, + 287, + 249 + ], + "spans": [ + { + "bbox": [ + 48, + 205, + 287, + 249 + ], + "type": "text", + "content": "[34] Yifeng Ma, Shiwei Zhang, Jiayu Wang, Xiang Wang, Yingya Zhang, and Zhidong Deng. Dreamtalk: When expressive talking head generation meets diffusion probabilistic models. arXiv preprint arXiv:2312.09767, 2023. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 250, + 287, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 250, + 287, + 294 + ], + "spans": [ + { + "bbox": [ + 48, + 250, + 287, + 294 + ], + "type": "text", + "content": "[35] Eyal Molad, Eliahu Horwitz, Dani Valevski, Alex Rav Acha, Yossi Matias, Yael Pritch, Yaniv Leviathan, and Yedid Hoshen. Dreamix: Video diffusion models are general video editors. arXiv preprint arXiv:2302.01329, 2023. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 294, + 287, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 294, + 287, + 338 + ], + "spans": [ + { + "bbox": [ + 48, + 294, + 287, + 338 + ], + "type": "text", + "content": "[36] Chong Mou, Xintao Wang, Liangbin Xie, Jian Zhang, Zhonggang Qi, Ying Shan, and Xiaohu Qie. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 338, + 287, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 338, + 287, + 381 + ], + "spans": [ + { + "bbox": [ + 48, + 338, + 287, + 381 + ], + "type": "text", + "content": "[37] Haomiao Ni, Changhao Shi, Kai Li, Sharon X Huang, and Martin Renqiang Min. Conditional image-to-video generation with latent flow diffusion models. In CVPR, pages 18444-18455, 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 383, + 287, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 383, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 48, + 383, + 287, + 437 + ], + "type": "text", + "content": "[38] Alexander Quinn Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. In ICML, pages 16784-16804. PMLR, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 437, + 287, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 437, + 287, + 480 + ], + "spans": [ + { + "bbox": [ + 48, + 437, + 287, + 480 + ], + "type": "text", + "content": "[39] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing attentions for zero-shot text-based video editing. In ICCV, 2023. 2, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 481, + 287, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 481, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 48, + 481, + 287, + 525 + ], + "type": "text", + "content": "[40] Zhiwu Qing, Shiwei Zhang, Jiayu Wang, Xiang Wang, Yujie Wei, Yingya Zhang, Changxin Gao, and Nong Sang. Hierarchical spatio-temporal decoupling for text-to-video generation. arXiv preprint arXiv:2312.04483, 2023. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 525, + 287, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 525, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 48, + 525, + 287, + 581 + ], + "type": "text", + "content": "[41] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763. PMLR, 2021. 2, 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 582, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 582, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 582, + 287, + 624 + ], + "type": "text", + "content": "[42] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 625, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 287, + 668 + ], + "type": "text", + "content": "[43] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, pages 10684-10695, 2022. 2, 3, 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "type": "text", + "content": "[44] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, pages 22500-22510, 2023." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 128 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 128 + ], + "type": "text", + "content": "[45] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. NeurIPS, 35:36479-36494, 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 129, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 161 + ], + "type": "text", + "content": "[46] Tim Salimans and Jonathan Ho. Progressive distillation for fast sampling of diffusion models. arXiv preprint arXiv:2202.00512, 2022. 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "text", + "content": "[47] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114, 2021. 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 219, + 545, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 219, + 545, + 284 + ], + "spans": [ + { + "bbox": [ + 307, + 219, + 545, + 284 + ], + "type": "text", + "content": "[48] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. NeurIPS, 35:25278-25294, 2022. 2, 4, 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 285, + 545, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 285, + 545, + 318 + ], + "spans": [ + { + "bbox": [ + 307, + 285, + 545, + 318 + ], + "type": "text", + "content": "[49] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in GANs. In CVPR, pages 1532-1540, 2021. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 319, + 545, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 319, + 545, + 363 + ], + "spans": [ + { + "bbox": [ + 307, + 319, + 545, + 363 + ], + "type": "text", + "content": "[50] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. ICLR, 2023. 2, 3, 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 364, + 545, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 364, + 545, + 408 + ], + "spans": [ + { + "bbox": [ + 307, + 364, + 545, + 408 + ], + "type": "text", + "content": "[51] Ivan Skorokhodov, Sergey Tulyakov, and Mohamed Elhoseiny. StyleGAN-v: A continuous video generator with the price, image quality and perks of StyleGAN2. In CVPR, pages 3626-3636, 2022. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 410, + 545, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 410, + 545, + 432 + ], + "spans": [ + { + "bbox": [ + 307, + 410, + 545, + 432 + ], + "type": "text", + "content": "[52] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In ICLR, 2021. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 433, + 545, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 433, + 545, + 465 + ], + "spans": [ + { + "bbox": [ + 307, + 433, + 545, + 465 + ], + "type": "text", + "content": "[53] Sergey Tulyakov, Ming-Yu Liu, Xiaodong Yang, and Jan Kautz. MocoGAN: Decomposing motion and content for video generation. In CVPR, pages 1526-1535, 2018. 2, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 466, + 545, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 466, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 307, + 466, + 545, + 510 + ], + "type": "text", + "content": "[54] Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023. 2, 3, 4, 5, 6, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 511, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 511, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 307, + 511, + 545, + 544 + ], + "type": "text", + "content": "[55] Limin Wang, Zhan Tong, Bin Ji, and Gangshan Wu. Tdn: Temporal difference networks for efficient action recognition. In CVPR, pages 1895-1904, 2021. 4" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 545, + 545, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 545, + 545, + 589 + ], + "spans": [ + { + "bbox": [ + 307, + 545, + 545, + 589 + ], + "type": "text", + "content": "[56] Wen Wang, Kangyang Xie, Zide Liu, Hao Chen, Yue Cao, Xinlong Wang, and Chunhua Shen. Zero-shot video editing using off-the-shelf image diffusion models. arXiv preprint arXiv:2303.17599, 2023. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 590, + 545, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 545, + 634 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 545, + 634 + ], + "type": "text", + "content": "[57] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 3, 4" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 635, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 545, + 689 + ], + "type": "text", + "content": "[58] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. NeurIPS, 2023. 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "type": "text", + "content": "[59] Xiang Wang, Shiwei Zhang, Zhiwu Qing, Changxin Gao, Yingya Zhang, Deli Zhao, and Nong Sang. Molo: Motion-" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "text", + "content": "6581" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "text", + "content": "augmented long-short contrastive learning for few-shot action recognition. In CVPR, pages 18011-18021, 2023. 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 97, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 97, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 97, + 287, + 140 + ], + "type": "text", + "content": "[60] Xiang Wang, Shiwei Zhang, Han Zhang, Yu Liu, Yingya Zhang, Changxin Gao, and Nong Sang. Videolcm: Video latent consistency model. arXiv preprint arXiv:2312.09109, 2023. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 143, + 287, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 143, + 287, + 176 + ], + "spans": [ + { + "bbox": [ + 48, + 143, + 287, + 176 + ], + "type": "text", + "content": "[61] Yaohui Wang, Piotr Bilinski, Francois Bremond, and Antitza Dantcheva. G3an: Disentangling appearance and motion for video generation. In CVPR, pages 5264-5273, 2020. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 178, + 287, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 178, + 287, + 232 + ], + "spans": [ + { + "bbox": [ + 48, + 178, + 287, + 232 + ], + "type": "text", + "content": "[62] Yaohui Wang, Xinyuan Chen, Xin Ma, Shangchen Zhou, Ziqi Huang, Yi Wang, Ceyuan Yang, Yinan He, Jiashuo Yu, Peiqing Yang, et al. Lavie: High-quality video generation with cascaded latent diffusion models. arXiv preprint arXiv:2309.15103, 2023. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 234, + 287, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 234, + 287, + 289 + ], + "spans": [ + { + "bbox": [ + 48, + 234, + 287, + 289 + ], + "type": "text", + "content": "[63] Yi Wang, Yinan He, Yizhuo Li, Kunchang Li, Jiashuo Yu, Xin Ma, Xinyuan Chen, Yaohui Wang, Ping Luo, Ziwei Liu, et al. Intervid: A large-scale video-text dataset for multimodal understanding and generation. arXiv preprint arXiv:2307.06942, 2023. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 291, + 287, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 291, + 287, + 334 + ], + "spans": [ + { + "bbox": [ + 48, + 291, + 287, + 334 + ], + "type": "text", + "content": "[64] Yuhan Wang, Liming Jiang, and Chen Change Loy. Styleinv: A temporal style modulated inversion network for unconditional video generation. In ICCV, pages 22851-22861, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 337, + 287, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 337, + 287, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 337, + 287, + 392 + ], + "type": "text", + "content": "[65] Yujie Wei, Shiwei Zhang, Zhiwu Qing, Hangjie Yuan, Zhiheng Liu, Yu Liu, Yingya Zhang, Jingren Zhou, and Hongming Shan. Dreamvideo: Composing your dream videos with customized subject and motion. arXiv preprint arXiv:2312.04433, 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 394, + 287, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 394, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 48, + 394, + 287, + 437 + ], + "type": "text", + "content": "[66] Chenfei Wu, Jian Liang, Lei Ji, Fan Yang, Yuejian Fang, Daxin Jiang, and Nan Duan. Nüwa: Visual synthesis pretraining for neural visual world creation. In ECCV, pages 720-736. Springer, 2022. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 440, + 287, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 440, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 48, + 440, + 287, + 495 + ], + "type": "text", + "content": "[67] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation. In ICCV, pages 7623-7633, 2023. 2, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 497, + 287, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 497, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 48, + 497, + 287, + 552 + ], + "type": "text", + "content": "[68] Jinbo Xing, Menghan Xia, Yuxin Liu, Yuechen Zhang, Yong Zhang, Yingqing He, Hanyuan Liu, Haoxin Chen, Xiaodong Cun, Xintao Wang, et al. Make-your-video: Customized video generation using textual and structural guidance. arXiv preprint arXiv:2306.00943, 2023. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 553, + 287, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 553, + 287, + 586 + ], + "spans": [ + { + "bbox": [ + 48, + 553, + 287, + 586 + ], + "type": "text", + "content": "[69] Zhen Xing, Qi Dai, Han Hu, Zuxuan Wu, and Yu-Gang Jiang. Simda: Simple diffusion adapter for efficient video generation. arXiv preprint arXiv:2308.09710, 2023. 2, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 588, + 287, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 588, + 287, + 621 + ], + "spans": [ + { + "bbox": [ + 48, + 588, + 287, + 621 + ], + "type": "text", + "content": "[70] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In CVPR, pages 5288-5296, 2016. 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 624, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 667 + ], + "type": "text", + "content": "[71] Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In CVPR, 2022. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "text", + "content": "[72] Shengming Yin, Chenfei Wu, Jian Liang, Jie Shi, Houqiang Li, Gong Ming, and Nan Duan. Dragnuwa: Fine-grained control in video generation by integrating text, image, and trajectory. arXiv preprint arXiv:2308.08089, 2023. 3" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 564 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "text", + "content": "[73] Lijun Yu, Yong Cheng, Kihyuk Sohn, José Lezama, Han Zhang, Huiwen Chang, Alexander G Hauptmann, Ming-Hsuan Yang, Yuan Hao, Irfan Essa, et al. Magvit: Masked generative video Transformer. In CVPR, pages 10459-10469, 2023. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 129, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 162 + ], + "type": "text", + "content": "[74] Sihyun Yu, Kihyuk Sohn, Subin Kim, and Jinwoo Shin. Video probabilistic diffusion models in projected latent space. In CVPR, pages 18456-18466, 2023. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "text", + "content": "[75] Hangjie Yuan, Shiwei Zhang, Xiang Wang, Yujie Wei, Tao Feng, Yining Pan, Yingya Zhang, Ziwei Liu, Samuel Albanie, and Dong Ni. Instructvideo: Instructing video diffusion models with human feedback. arXiv preprint arXiv:2312.12490, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 219, + 545, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 219, + 545, + 272 + ], + "spans": [ + { + "bbox": [ + 307, + 219, + 545, + 272 + ], + "type": "text", + "content": "[76] David Junhao Zhang, Jay Zhangjie Wu, Jia-Wei Liu, Rui Zhao, Lingmin Ran, Yuchao Gu, Difei Gao, and Mike Zheng Shou. Show-1: Marrying pixel and latent diffusion models for text-to-video generation. arXiv preprint arXiv:2309.15818, 2023. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 274, + 545, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 274, + 545, + 307 + ], + "spans": [ + { + "bbox": [ + 307, + 274, + 545, + 307 + ], + "type": "text", + "content": "[77] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In ICCV, pages 3836-3847, 2023. 3, 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 308, + 545, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 308, + 545, + 362 + ], + "spans": [ + { + "bbox": [ + 307, + 308, + 545, + 362 + ], + "type": "text", + "content": "[78] Shiwei Zhang, Jiayu Wang, Yingya Zhang, Kang Zhao, Hangjie Yuan, Zhiwu Qin, Xiang Wang, Deli Zhao, and Jingren Zhou. I2vgen-xl: High-quality image-to-video synthesis via cascaded diffusion models. arXiv preprint arXiv:2311.04145, 2023. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 364, + 545, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 364, + 545, + 407 + ], + "spans": [ + { + "bbox": [ + 307, + 364, + 545, + 407 + ], + "type": "text", + "content": "[79] Yabo Zhang, Yuxiang Wei, Dongsheng Jiang, Xiaopeng Zhang, Wangmeng Zuo, and Qi Tian. Controlvideo: Training-free controllable text-to-video generation. arXiv preprint arXiv:2305.13077, 2023. 2, 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 409, + 545, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 409, + 545, + 430 + ], + "spans": [ + { + "bbox": [ + 307, + 409, + 545, + 430 + ], + "type": "text", + "content": "[80] Zhang Zhang and Dacheng Tao. Slow feature analysis for human action recognition. TPAMI, 34(3):436-450, 2012. 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 432, + 545, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 432, + 545, + 474 + ], + "spans": [ + { + "bbox": [ + 307, + 432, + 545, + 474 + ], + "type": "text", + "content": "[81] Min Zhao, Rongzhen Wang, Fan Bao, Chongxuan Li, and Jun Zhu. Controlvideo: Adding conditional control for one shot text-to-video editing. arXiv preprint arXiv:2305.17098, 2023. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 476, + 545, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 476, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 307, + 476, + 545, + 520 + ], + "type": "text", + "content": "[82] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 521, + 545, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 521, + 545, + 564 + ], + "spans": [ + { + "bbox": [ + 307, + 521, + 545, + 564 + ], + "type": "text", + "content": "[83] Junbao Zhuo, Xingyu Zhao, Shuhui Wang, Huimin Ma, and Qingming Huang. Synthesizing videos from images for image-to-video adaptation. In ACMMM, pages 8294-8303, 2023. 2" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "6582" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/4a280801-3209-4899-b345-f6dbc9c9ec52_content_list.json b/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/4a280801-3209-4899-b345-f6dbc9c9ec52_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a66f1f6f427951d9a5357570a166254f2fcbe121 --- /dev/null +++ b/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/4a280801-3209-4899-b345-f6dbc9c9ec52_content_list.json @@ -0,0 +1,2465 @@ +[ + { + "type": "text", + "text": "A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint", + "text_level": 1, + "bbox": [ + 86, + 130, + 883, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiaofeng Cong $^{1}$ Jie Gui $^{1*}$ Jing Zhang $^{2}$ Junming Hou $^{1}$ Hao Shen $^{3}$ $^{1}$ Southeast University $^{2}$ University of Sydney $^{3}$ Hefei University of Technology \ncxf_svip@163.com, {guijie, junming_hou}@seu.edu.cn, {jingzhang.cy, haoshenhs}@gmail.com", + "bbox": [ + 112, + 202, + 854, + 257 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 291, + 313, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Existing research based on deep learning has extensively explored the problem of daytime image dehazing. However, few studies have considered the characteristics of nighttime hazy scenes. There are two distinctions between nighttime and daytime haze. First, there may be multiple active colored light sources with lower illumination intensity in nighttime scenes, which may cause haze, glow and noise with localized, coupled and frequency inconsistent characteristics. Second, due to the domain discrepancy between simulated and real-world data, unrealistic brightness may occur when applying a dehazing model trained on simulated data to real-world data. To address the above two issues, we propose a semi-supervised model for real-world nighttime dehazing. First, the spatial attention and frequency spectrum filtering are implemented as a spatial-frequency domain information interaction module to handle the first issue. Second, a pseudo-label-based retraining strategy and a local window-based brightness loss for semi-supervised training process is designed to suppress haze and glow while achieving realistic brightness. Experiments on public benchmarks validate the effectiveness of the proposed method and its superiority over state-of-the-art methods. The source code and Supplementary Materials are placed in the https://github.com/Xiaofeng-life/SFSNiD.", + "bbox": [ + 75, + 323, + 473, + 686 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 715, + 209, + 729 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Nighttime and daytime images may contain hazy effects, which may cause their quality to be degraded [7, 15, 39, 50]. Therefore, two valuable research fields are proposed, which are daytime single image dehazing (DaSID) [2, 38, 54] and nighttime single image dehazing (NiSID) [14, 19, 31], respectively. Compared with the daytime hazy image, the imaging of the nighttime hazy image is more complex [28, 49]. Currently, NiSID is still a challenging problem.", + "bbox": [ + 75, + 739, + 468, + 861 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Existing research on DaSID [20, 25, 27, 34, 43, 48, 52,", + "bbox": [ + 96, + 861, + 468, + 876 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a33d695ef8f9fc31a28d41e5473915cd25a36ad8f9b2a8f3f6bf2613b399764d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 290, + 630, + 353 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2b1d3fc840b83a0715269c12a83f1689a798b9b2464081e3a8992eef4eba2c5f.jpg", + "image_caption": [ + "(a) Hazy" + ], + "image_footnote": [], + "bbox": [ + 504, + 353, + 630, + 414 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1dbb0964724e7b4f24d9cdf92ff3031f16d43c74e8b90295dbf443cf2d6986d8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 632, + 290, + 758, + 353 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/537781b48e17bde0102d00a0edb8668f11f205dc415d80779ca0ff0556abec5c.jpg", + "image_caption": [ + "(b) IM-YellowHaze [26]" + ], + "image_footnote": [], + "bbox": [ + 632, + 353, + 756, + 414 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4ab5fd967d08912e272d0bad0f4f6f5564a3b65613c4b370f66d59da9858bed7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 759, + 290, + 887, + 353 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4eb97ee1d084f19f5763913d6a10dd76a3b4dc0d8102011ddc85cf43a7a4e7d7.jpg", + "image_caption": [ + "(c) IM-NightHaze [26]" + ], + "image_footnote": [], + "bbox": [ + 759, + 353, + 887, + 414 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7f7b2a86c660f1d9e4f4a168a2f0a93519799ea7f9397799cbe837b292d5eee0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 431, + 630, + 492 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/da101e16f6745e92d23f32670ec8a941c29b11bca10a37913f01ef50fcdaa2b3.jpg", + "image_caption": [ + "(d) IM-NHR [51]" + ], + "image_footnote": [], + "bbox": [ + 504, + 492, + 630, + 554 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d2f8fd9bc6e0c58035da4fe6674f003084b11b227b4717759e534de6274d269f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 632, + 431, + 758, + 492 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ea4291327498c84a7c199af06e65c2f41c2af6e6db82312b0a4c50433af740ba.jpg", + "image_caption": [ + "(e) GE-UNREAL-NH [31]" + ], + "image_footnote": [], + "bbox": [ + 632, + 492, + 756, + 554 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/43294787656ebc2ebe7ddfbc83fe3e712ab1f3e458b69498201689087745bab9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 759, + 431, + 887, + 492 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4d23e497a790337f1a70700cbfe7d6ee70f99a49c88ba1a00af7120260776ca5.jpg", + "image_caption": [ + "(f) Ours", + "Figure 1. Visualization of real-world dehazed images, where the \"IM\" and \"GE-\" denote the dehazed results obtained by training on imaging model (IM) and game engine (GE) simulated datasets, respectively. The curve figure represents the pixel histogram, where the $x$ and $y$ coordinates represent the pixel values and corresponding numbers, respectively. The $x$ and $y$ coordinates of the bar figure represent the color channel and the corresponding average pixel value, respectively." + ], + "image_footnote": [], + "bbox": [ + 759, + 492, + 887, + 554 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "53] have achieved impressive performance. Various effective DaSID algorithms have been proposed and verified on benchmark daytime datasets [21]. However, these DaSID algorithms are designed for the properties of daytime hazy and haze-free images, without taking into account the characteristics of nighttime hazy and haze-free images.", + "bbox": [ + 496, + 717, + 890, + 808 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Currently, NiSID research is divided into two types, namely non-deep learning-based NiSID and deep learning-based NiSID. On the one hand, the prior hypotheses and statistical laws are explored [50, 51]. The maximum reflectance prior to estimate the varying ambient illumination is proposed by [50]. The illumination estimation, color", + "bbox": [ + 496, + 810, + 892, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author", + "bbox": [ + 94, + 887, + 220, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "2631", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "correction and image prior are integrated by [49]. On the other hand, the deep learning-based architectures are designed for the NiSID task [14, 31]. Liu et al. [31] combine the dark channel and bright channel prior with the Transformer mechanism [32] into an end-to-end training flow. The gradient-adaptive convolution and glow pair synthesis are designed by Jin et al. [14]. Existing learning-based algorithms have achieved remarkable performance on synthetic datasets. However, these methods still lack consideration of the characteristics of nighttime hazy images.", + "bbox": [ + 75, + 90, + 472, + 242 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "During the day, the main source of imaging light is sunlight [7]. The formation of the daytime hazy image can be described by the atmospheric scattering model [7] as", + "bbox": [ + 75, + 242, + 470, + 287 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nI (a) = J (a) t (a) + A (a) (1 - t (a)), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 151, + 296, + 468, + 313 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $I(a)$ , $J(a)$ , $t(a)$ and $A(a)$ denote the hazy image, clear image, transmission map and global atmospheric light, respectively. The $a$ means the pixel location. Meanwhile, a widely used physical model [16, 18] in the NiSID task is", + "bbox": [ + 75, + 321, + 468, + 383 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nI (a) = J (a) t (a) + A (a) (1 - t (a)) + L _ {s} (a) * \\varkappa (a), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 88, + 391, + 468, + 409 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $L_{s}(a)$ and $\\varkappa(a)$ denote the light sources and atmospheric point spread function. As shown in Eq. 1 and Eq. 2, the main distinction between daytime and nighttime haze imaging is light sources [1, 4, 24, 29, 30, 41, 46], which we consider to be the main source of the difficulty. Specifically, two outstanding issues are considered as follows.", + "bbox": [ + 75, + 417, + 468, + 507 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Localized, Coupled and Frequency Inconsistent: As shown in Figure 1, multiple active light sources may exist simultaneously. Therefore, the distortion of nighttime images, namely the haze that is mainly generated by suspended particles and liquid water droplets, the glow that is mainly produced by active light sources and the noise that is mainly caused by low intensity, is usually localized. Meanwhile, these types of distortions are mixed throughout the image, which is coupled. Furthermore, the haze and glow will cause the loss of high-frequency signals, while the noise belongs to high-frequency disturbance signals [22] that needs to be eliminated. This means that these distortions have inconsistent frequency characteristics. In a word, a challenging issue is how to simultaneously handle distortions with localized, coupled and frequency inconsistent characteristics.", + "bbox": [ + 76, + 508, + 472, + 750 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Unrealistic Brightness Intensity: Nighttime hazy datasets based on real-world images synthesized by imaging model (IM) are difficult to simulate multiple active light sources, while nighttime hazy datasets based on game engine (GE) cannot perfectly reproduce the harmonious brightness of real-world nighttime scenes. As we observed in Figure 1, the dehazed images obtained under IM datasets still suffer from the glow and haze that caused by multiple light sources, but the overall brightness is realistic. The dehazed images obtained under GE", + "bbox": [ + 76, + 750, + 472, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "dataset show less haze and glow, but the scene brightness is unrealistic. In a word, an unsolved problem faced by data-driven algorithms is how to suppress haze and glow while achieving realistic brightness.", + "bbox": [ + 511, + 90, + 893, + 151 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Therefore, we propose a semi-supervised dehazing framework that can be used for the real-world NiSID task. Firstly, the local attention [32] is adopted to learn the inductive bias in the spatial domain to suppress local distortions. A frequency spectrum dynamic filtering strategy is designed to handle distortions with inconsistent frequency characteristics. Considering the coupled of these distortions, the spatial and frequency information are integrated as a bidomain interaction module for feature extraction and image reconstruction. Secondly, aiming at suppressing distortions while achieving realistic brightness. The simulation data provided by the game engine is utilized to generate pseudo labels that can suppress haze and glow for retraining process. Then, real-world hazy images are adopted as brightness-realistic signals for the realistic brightness constraint. Overall, the main contributions of this paper are as follows.", + "bbox": [ + 496, + 152, + 893, + 393 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a spatial and frequency domain aware semi-supervised nighttime dehazing network (SFSNiD). SFS-NiD can remove nighttime haze that is accompanied by glow and noise. The experimental results on synthetic and real-world datasets show that the proposed method can achieve impressive performance.", + "- We design a spatial and frequency domain information interaction (SFII) module to simultaneously handle the haze, glow and noise with localized, coupled and frequency inconsistent characteristics. The multi-channel amplitude and phase spectrums are dynamically filtered and aggregated. The spatial and frequency domain features are integrated by local attention.", + "- A retraining strategy and a local window-based brightness loss for semi-supervised training process are designed to suppress haze and glow while achieving realistic brightness. The retraining strategy is based on pseudo labels. The hazy image is divided into non-overlapping windows for the calculation of local brightness map to provide realistic brightness supervision." + ], + "bbox": [ + 500, + 395, + 890, + 696 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 714, + 640, + 729 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Daytime Dehazing", + "text_level": 1, + "bbox": [ + 500, + 739, + 678, + 756 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A variety of effective dehazing algorithms for DaSID have been proposed. An ultra-high resolution dehazing method based on bilateral gird is proposed by 4KDehazing [54]. AECRNet [43] introduces the contrastive learning to the dehazing process. The prior information and visual attention mechanism are utilized in DeHamer [9]. DF [38] designs an encoder-decoder architecture which totally based on multi-head self-attention [32]. MITNet [37] combines the mutual information-driven constraint and adaptive triple interaction", + "bbox": [ + 496, + 763, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2632", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4790f658478349dda72da0fc88502959c577c4c4aa39a1319b2417d27580426a.jpg", + "image_caption": [ + "Figure 2. The overall pipeline of the proposed SFSNiD." + ], + "image_footnote": [], + "bbox": [ + 81, + 87, + 890, + 305 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "strategy into a supervised training process. Although these DaSID algorithms have achieve impressive performance, they are not designed for the characteristics of nighttime hazy images, which may cause them to have certain limitations on the NiSID task [31].", + "bbox": [ + 75, + 357, + 470, + 434 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Nighttime Dehazing", + "text_level": 1, + "bbox": [ + 76, + 443, + 267, + 459 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Compared with DaSID, NiSID has received fewer attention. On the one hand, the prior hypotheses and statistical laws are utilized in the non-deep learning-based NiSID methods [50, 51]. A maximum reflectance prior is proposed by MRP [50], which providing a way to estimate the varying ambient illumination. An optimal-scale fusion-based method is designed by OSFD [51], which utilizes a parameter estimation dehazing flow. On the other hand, the data-driven strategies [23, 40] are adopted in the deep learning-based NiSID methods [14, 31, 45]. NightHazeFormer [31] combines the visual transformer and prior knowledge (dark channel and bright channel) into an end-to-end enhancement process. GAC [14] utilizes the angular point spread function to reduce the glow effect in nighttime scenes. Yan et al. [45] propose a strategy which decomposes the image into scene texture information and scene structure information. According to recent research, deep learning-based NiSID algorithms can achieve relatively better quantitative performance according to sufficient synthetic data. However, the haze, glow, and noise with localized, coupled and frequency inconsistent characteristics are not fully considered by these deep learning-based NiSID algorithms.", + "bbox": [ + 75, + 465, + 473, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methods", + "text_level": 1, + "bbox": [ + 76, + 814, + 174, + 829 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The hazy domain and haze-free domain are marked as $X$ and $Y$ , respectively. The synthesized hazy and haze-free image datasets are denoted $\\mathcal{D}_X$ and $\\mathcal{D}_Y$ , which contain $N$ images, respectively. The real-world hazy image and haze-", + "bbox": [ + 75, + 839, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "free datasets are denoted as $\\mathcal{R}_X$ and $\\mathcal{R}_Y$ , which include $M$ images, respectively. The convolution operation is denoted as $C_t^k (\\cdot)$ , where the superscript $k$ and subscript $t$ denote the kernel size and stride, respectively. The $\\varpi (\\cdot),\\sigma (\\cdot),\\delta (\\cdot)$ and $sf(\\cdot)$ denote the global average pooling, LeakyReLU, sigmoid and softmax operations, respectively. The input hazy images and predicted dehazed images at three scales are marked $x_{i}^{s}\\in \\mathcal{D}_{X}$ and $p_i^s$ respectively, where $s\\in \\{0,1,2\\}$ and $i$ denotes the $i$ -th example. The size of $x_{i}^{0},x_{i}^{1}$ and $x_{i}^{2}$ are $H\\times W\\times C,\\frac{H}{2}\\times \\frac{W}{2}\\times C$ and $\\frac{H}{4}\\times \\frac{W}{4}\\times C$ , respectively. The $H,W$ and $C$ denote the height, width and number of channels, respectively. The size of $p_i^s$ remains the same as $x_{i}^{s}$ . The network at scale $s$ is denoted as $\\Psi^s (\\cdot)$ .", + "bbox": [ + 496, + 357, + 893, + 555 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Network Structure", + "text_level": 1, + "bbox": [ + 498, + 560, + 683, + 575 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The multi-scale structure [3] of the SFSNiD is shown in Figure 2. Two kinds of modules are included in the proposed network, namely (i) spatial and frequency information interaction (SFII) model, (ii) convolution input (ConvI), convolution output (ConvO), convolution downsampling (ConvD), and convolution upsampling (ConvU). The ConvI projects the image into the feature space, while ConvO does the opposite. ConvD reduces the length and width of the feature map by half, while ConvU does the opposite.", + "bbox": [ + 496, + 583, + 890, + 720 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Spatial and Frequency Information Interaction", + "text_level": 1, + "bbox": [ + 498, + 727, + 890, + 743 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Preliminary. For a feature map $z\\in \\mathbb{R}^{\\widetilde{H}\\times \\widetilde{W}\\times \\widetilde{C}}$ , where $\\widetilde{H}$ $\\widetilde{W}$ and $\\widetilde{C}$ denote the height, width and number of channels, respectively. We first project each of its channel $z_{\\widetilde{c}}$ to the frequency domain by the Fourier [10] transformation $\\mathcal{F}$ as", + "bbox": [ + 496, + 750, + 890, + 815 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {F} \\left(z _ {\\widetilde {c}}\\right) (u, v) = \\sum_ {h = 0} ^ {\\widetilde {H} - 1} \\sum_ {w = 0} ^ {\\widetilde {W} - 1} z _ {\\widetilde {c}} (h, w) e ^ {- j 2 \\pi \\left(\\frac {h}{H} u + \\frac {w}{W} v\\right)}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 517, + 821, + 890, + 864 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $(h,w)$ and $(u,v)$ represent the coordinates in the spatial and frequency domain, respectively. The $\\widetilde{c} \\in$", + "bbox": [ + 496, + 869, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "2633", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/e56bd87343b14a8a98a0392a8affc67aa1fa03c389ced00931675553659cf27c.jpg", + "image_caption": [ + "Figure 3. The sub-modules of the proposed SFII." + ], + "image_footnote": [], + "bbox": [ + 84, + 88, + 462, + 252 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a8e6dd8c397caabedf982609fed5a59bd0b4456e3ab6b59affbe254348d996b9.jpg", + "image_caption": [ + "Figure 4. The overall architecture of the proposed SFII." + ], + "image_footnote": [], + "bbox": [ + 84, + 290, + 460, + 416 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\{0,1,\\dots,\\widetilde{C}\\}$ denotes the channel index. Correspondingly, the $\\mathcal{F}^{-1}$ is defined as the inverse Fourier transformation [55]. Then, the real part $\\mathcal{R}(z_{\\widetilde{c}})(u,v)$ and imaginary part $\\mathcal{I}(z_{\\widetilde{c}})(u,v)$ can be obtained by $\\mathcal{F}(z_{\\widetilde{c}})(u,v)$ . The amplitude spectrum $\\mathcal{A}(z_{\\widetilde{c}})(u,v)$ and phase spectrum $\\mathcal{P}(z_{\\widetilde{c}})(u,v)$ of $\\mathcal{F}(z_{\\widetilde{c}})(u,v)$ on the single channel can be obtained by", + "bbox": [ + 76, + 465, + 468, + 556 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {A} \\left(z _ {\\bar {c}}\\right) (u, v) = \\sqrt {\\mathcal {R} ^ {2} \\left(z _ {\\bar {c}}\\right) (u , v) + \\mathcal {I} ^ {2} \\left(z _ {\\bar {c}}\\right) (u , v)}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 114, + 566, + 468, + 585 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {P} \\left(z _ {\\widetilde {c}}\\right) (u, v) = \\arctan \\left[ \\frac {\\mathcal {I} \\left(z _ {\\widetilde {c}}\\right) (u , v)}{\\mathcal {R} \\left(z _ {\\widetilde {c}}\\right) (u , v)} \\right]. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 148, + 604, + 468, + 638 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The full channel amplitude spectrum $\\mathcal{A}(z)(u,v)\\in$ $\\mathbb{R}^{\\widetilde{H}\\times \\widetilde{W}\\times \\widetilde{C}}$ and phase spectrum $\\mathcal{P}(z)(u,v)\\in \\mathbb{R}^{\\widetilde{H}\\times \\widetilde{W}\\times \\widetilde{C}}$ can be obtained by applying the Eq. 3, Eq. 4 and Eq. 5 on each channel of $z$ .", + "bbox": [ + 76, + 641, + 468, + 703 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Frequency Spectrum Dynamic Aggregation (FSDA). The haze, glow and noise with inconsistent frequency characteristics can be processed in the frequency domain by dynamic spectrum filter. The amplitude spectrum and phase spectrum of different channels are aggregated by the pointwise convolution as", + "bbox": [ + 76, + 704, + 468, + 792 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {S} ^ {*} (z) (u, v) = \\sigma \\left(C _ {1} ^ {1} (\\mathcal {S} (z) (u, v))\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 153, + 804, + 468, + 821 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $S(z)(u,v)\\in \\{\\mathcal{A}(z)(u,v),\\mathcal{P}(z)(u,v)\\}$ . To perform channel aggregation of spectral information, the channel weight [12] map $\\mathcal{W}$ are calculated as", + "bbox": [ + 76, + 829, + 468, + 875 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {W} (z) (u, v) = \\delta \\left(C _ {1} ^ {1} \\left(\\sigma \\left(C _ {1} ^ {1} \\left(\\varpi \\left(S ^ {*} (z) (u, v)\\right)\\right)\\right)\\right)\\right), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 102, + 883, + 468, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{W}(z)(u,v)\\in \\mathbb{R}^{1\\times 1\\times \\widetilde{C}}$ . Then the channel weight map is applied to the frequency spectrum as", + "bbox": [ + 498, + 89, + 890, + 122 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\dot {S} (z) (u, v) = C _ {1} ^ {1} (\\mathcal {W} (z) (u, v) \\cdot \\mathcal {S} ^ {*} (z) (u, v)), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 545, + 132, + 890, + 148 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the spectrum filter (SF) of $\\dot{S} (z)(u,v)$ is shown in Figure 3-(a). The filtering operation is performed by the residual connection, the filtered component is obtained by", + "bbox": [ + 498, + 159, + 890, + 205 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {\\mathcal {S}} (z) (u, v) = \\dot {\\mathcal {S}} (z) (u, v) + \\mathcal {S} (z) (u, v). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 214, + 890, + 234 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The filtered $\\widetilde{\\mathcal{A}}(z)(u,v)$ and $\\widetilde{\\mathcal{P}}(z)(u,v)$ can be obtained based on the processing flow from $S(z)(u,v)$ to $\\widetilde{S}(z)(u,v)$ . Then, the real and imaginary parts are obtained by", + "bbox": [ + 498, + 244, + 890, + 292 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {\\mathcal {R}} (z) (u, v) = \\widetilde {\\mathcal {A}} (z) (u, v) \\cdot \\cos \\widetilde {\\mathcal {P}} (z) (u, v), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 540, + 301, + 890, + 321 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {\\mathcal {I}} (z) (u, v) = \\widetilde {\\mathcal {A}} (z) (u, v) \\cdot \\sin \\widetilde {\\mathcal {P}} (z) (u, v). \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 344, + 890, + 363 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After dynamic parameter learning in the frequency domain, we remap the feature map to the spatial domain as", + "bbox": [ + 498, + 369, + 890, + 398 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nz _ {f} = \\mathcal {F} ^ {- 1} (\\widetilde {\\mathcal {R}} (z) (u, v), \\widetilde {\\mathcal {I}} (z) (u, v)), \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 575, + 412, + 890, + 431 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $z_{f} \\in \\mathbb{R}^{\\widetilde{H} \\times \\widetilde{W} \\times \\widetilde{C}}$ . The Fourier transformation and inverse Fourier transformation can be implemented using DFT and IDFT algorithms [6, 11, 56]. Here, we define the calculation from Eq. 3 to Eq. 12 as frequency spectrum dynamic aggregation (FSDA), which represent the processing flow from $z$ to $z_{f}$ that is shown in Figure 3-(b). For convenience, the FSDA is denoted as $\\mathcal{FS}(\\cdot)$ .", + "bbox": [ + 496, + 436, + 890, + 545 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Frequency Domain Projection (FDP). To deal with distortions in the frequency domain, we first introduce frequency domain interactions before computing local inductive bias. For the input feature map $z \\in \\mathbb{R}^{\\widetilde{H} \\times \\widetilde{W} \\times \\widetilde{C}}$ , it is processed by the layer normalization operation ( $LN(\\cdot)$ ) [32] to obtain the normalized feature $z_{l} = LN(z)$ . Then, the normalized feature $z_{l}$ is projected into $Q_{f}$ (query), $K_{f}$ (key) and $V_{f}$ (value) by the projection in the frequency domain as", + "bbox": [ + 496, + 545, + 890, + 669 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nQ _ {f} = \\mathcal {F S} _ {Q} (z _ {l}), K _ {f} = \\mathcal {F S} _ {K} (z _ {l}), V _ {f} = \\mathcal {F S} _ {V} (z _ {l}), \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 513, + 679, + 890, + 696 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the $\\mathcal{FS}_Q(\\cdot),\\mathcal{FS}_K(\\cdot)$ and $\\mathcal{FS}_V(\\cdot)$ denote three independent projection operations with learnable parameters, respectively. The generation process of the $Q_{f},K_{f}$ and $V_{f}$ is denoted as the frequency domain projection (FDP).", + "bbox": [ + 496, + 705, + 890, + 767 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Bidomain Local Perception (BLP). After obtaining the features $Q_{f}$ , $K_{f}$ and $V_{f}$ which consider the information in frequency domain, we perform spatial domain learning on the features from a local perspective. The self-attention [32] with local perception (LP) that is shown in Figure 3-(c) is computed within $8 \\times 8$ non-overlapping windows as", + "bbox": [ + 496, + 768, + 890, + 859 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {A T} \\left(Q _ {f}, K _ {f}, V _ {f}\\right) = s f \\left(\\frac {Q _ {f} \\otimes K _ {f} ^ {T}}{\\sqrt {d}} + B\\right) \\otimes V _ {f}, \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 521, + 868, + 890, + 904 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "2634", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $d$ and $B$ denote the dimensionality and position bias, respectively. The $\\otimes$ denotes the matrix multiplication (MatMul). Information is transferred by the residual connection", + "bbox": [ + 76, + 90, + 468, + 137 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nz ^ {*} = \\mathcal {A T} \\left(Q _ {f}, K _ {f}, V _ {f}\\right) + z, \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 152, + 468, + 170 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the calculation from $z$ to $z^{*}$ is marked as bidomain local perception (BLP), which is shown in Figure 4-(a).", + "bbox": [ + 76, + 178, + 468, + 208 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Bidomain Nonlinear Mapping (BNM). The computation of window attention does not provide nonlinear representation capabilities. Therefore, we use the frequency and spatial domain interaction module to learn nonlinear mapping. The FSDA is used to provide the frequency domain information. Besides, a residual block which consists of $C_1^3 (\\sigma (C_1^3 (\\cdot)))$ is used to provide the spatial interaction. The immediate feature $z^{*}$ is fed into the frequency nonlinear mapping branch and spatial nonlinear mapping branch, as", + "bbox": [ + 76, + 209, + 468, + 345 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nz _ {f n} = \\mathcal {F S} _ {A} \\left(z ^ {*}\\right), \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 361, + 468, + 378 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nz _ {s n} = C _ {1} ^ {3} \\left(\\sigma \\left(C _ {1} ^ {3} \\left(z ^ {*}\\right)\\right)\\right), \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 402, + 468, + 421 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the subscript $A$ in $\\mathcal{FS}_A(\\cdot)$ means the frequency interaction performed after the attention operation. Then frequency domain and spatial domain features are fused as the final nonlinear mapping output by", + "bbox": [ + 76, + 428, + 468, + 491 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {z} = C _ {1} ^ {3} \\left(\\left[ z _ {f n}, z _ {s n} + z ^ {*} \\right]\\right) + z ^ {*}, \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 168, + 501, + 468, + 520 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the $[\\cdot, \\cdot]$ denotes the channel concatenation. The calculation from $z^*$ to $\\widetilde{z}$ is marked as the bidomain nonlinear mapping (BNM), which is shown in Figure 4-(b).", + "bbox": [ + 76, + 532, + 468, + 577 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Spatial and Frequency Information Interaction (SFII). As shown in Figure 4, the calculation process from $z$ to $\\widetilde{z}$ is called spatial and frequency information interaction (SFII). The proposed SFII aggregates spatial domain information and frequency domain information from a local perspective.", + "bbox": [ + 76, + 578, + 468, + 654 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Spatial and Frequency Loss", + "text_level": 1, + "bbox": [ + 76, + 665, + 326, + 681 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The supervised loss consists of two parts, namely the pixel-by-pixel loss in geometric space and the frequency domain loss obtained by Fourier transform [3]. By sampling $x_{i}^{s} \\in \\mathcal{D}_{X}$ and $y_{i}^{s} \\in \\mathcal{D}_{Y}$ , the losses calculated at three scales are", + "bbox": [ + 76, + 689, + 468, + 750 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {G} = \\sum_ {s = 0} ^ {2} \\lambda_ {g} \\cdot \\sum_ {i = 0} ^ {N - 1} | | \\Psi^ {s} \\left(x _ {i} ^ {s}\\right) - y _ {i} ^ {s} | | _ {1}, \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 147, + 762, + 468, + 804 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {F} = \\sum_ {s = 0} ^ {2} \\lambda_ {f} \\cdot \\sum_ {i = 0} ^ {N - 1} \\left| \\left| \\mathcal {F} \\left(\\Psi^ {s} \\left(x _ {i} ^ {s}\\right)\\right) - \\mathcal {F} \\left(y _ {i} ^ {s}\\right) \\right| \\right| _ {1}, \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 107, + 832, + 468, + 873 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda_{g}$ and $\\lambda_{f}$ denote weight factors.", + "bbox": [ + 76, + 885, + 339, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Retraining and Realistic Brightness Loss", + "text_level": 1, + "bbox": [ + 500, + 90, + 848, + 106 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Pseudo-label Fusion Retraining. There are inherent domain discrepancy between synthetic hazy images and real-world hazy images. Therefore, we adopt a retraining strategy which utilizes peso labels. Pseudo labels $\\mathcal{R}_Y^P$ are obtained based on the model trained on synthetic datasets. We put the original synthetic dataset $\\{\\mathcal{D}_X,\\mathcal{D}_Y\\}$ and the pseudo-labeled dataset $\\{\\mathcal{R}_X,\\mathcal{R}_Y^P\\}$ into the network simultaneously for retraining. Supervised losses Eq. 19 and Eq. 20 are used in the retraining process at three scales.", + "bbox": [ + 498, + 114, + 890, + 250 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Prior Brightness Constraint. We conduct a quantitative statistics on the brightness of nighttime hazy and clear images provided by [14]. The brightness intensity corresponding to $x_{i}^{0} \\in \\mathcal{R}_{X}$ and $y_{i}^{0} \\in \\mathcal{R}_{Y}$ are $\\mu (x_i^0)$ and $\\mu (y_i^0)$ , respectively, where $\\mu (\\cdot)$ denote the average pixel value across three channels. We randomly select $M = \\frac{M}{2}$ images from the dataset multiple times, and we get", + "bbox": [ + 498, + 251, + 890, + 357 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i = 0} ^ {\\hat {M} - 1} \\mu \\left(y _ {i} ^ {0}\\right) < \\sum_ {i = 0} ^ {\\hat {M} - 1} \\mu \\left(x _ {i} ^ {0}\\right). \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 611, + 371, + 890, + 414 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Therefore, we assume the brightness of the dehazed image $p_i^s$ should be lower than that of the $x_i^s$ . This assumption is consistent with the imaging model Eq. 2.", + "bbox": [ + 498, + 428, + 890, + 472 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Local Brightness Map (LBM). We divide the image into non-overlapping local windows. The width and height of each square window is denoted as $\\gamma^s$ , where $s \\in \\{0,1,2\\}$ . The value in $\\underline{\\mathrm{local~brightness}}$ map (LBM) $\\varphi_{x_i^s}$ that corresponding to $x_i^s$ is obtained by", + "bbox": [ + 498, + 474, + 890, + 550 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\varphi_ {x _ {i} ^ {s}} (\\hat {h}, \\hat {w}) = \\frac {1}{3 (\\gamma^ {s}) ^ {2}} \\sum_ {c = 0} ^ {2} \\sum_ {h = \\hat {h} \\cdot \\gamma^ {s}} ^ {(\\hat {h} + 1) \\cdot \\gamma^ {s}} \\sum_ {w = \\hat {w} \\cdot \\gamma^ {s}} ^ {(\\hat {w} + 1) \\cdot \\gamma^ {s}} x _ {i} ^ {s} (h, w, c), \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 564, + 888, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $(\\hat{h},\\hat{w})$ and $(h,w)$ denote the pixel index of $\\varphi_{x_i^s}$ and $x_{i}^{s}$ , respectively. Meanwhile, the local brightness map $\\varphi_{p_i^s}$ corresponding to $p_i^s$ is defined in the same way. As shown in Figure 2-(c), the locations with high brightness may be active light sources or objects close to the light source, while the locations with low brightness may be objects and backgrounds far away from the light source.", + "bbox": [ + 498, + 625, + 890, + 731 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Realistic Brightness Loss. The brightness of hazy images is approximately globally realistic, so it can be used to supervise the brightness of dehazed images. As we observed in Eq. 21, the brightness of the dehazed image should be lower than that of the hazy image. Meanwhile, in order to ensure the relative numerical relationship between areas with high brightness and low brightness before and after dehazing, we use a power function with monotonically increasing properties to process the $\\varphi_{x_i^s}(\\hat{h},\\hat{w})$ , as", + "bbox": [ + 498, + 732, + 890, + 869 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {\\varphi} _ {x _ {i} ^ {s}} (\\hat {h}, \\hat {w}) = \\left(\\varphi_ {x _ {i} ^ {s}} (\\hat {h}, \\hat {w})\\right) ^ {\\kappa}, \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 602, + 883, + 890, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "2635", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/40ddf7cd4c54a9cf836c96025155f65351a206a6a894ae6a19bf88ba1ab297c6.jpg", + "image_caption": [ + "(a) Hazy" + ], + "image_footnote": [], + "bbox": [ + 83, + 87, + 197, + 143 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/83755efa09e8f7af77f754f621b55d111c8c321ef3b2190616e60f5e05c34749.jpg", + "image_caption": [ + "(b) MRP" + ], + "image_footnote": [], + "bbox": [ + 199, + 88, + 313, + 143 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/13482730aff8388d55b14453daaafca013fb237da171bad07d8fdeb66b3ae61a.jpg", + "image_caption": [ + "(c) OSFD" + ], + "image_footnote": [], + "bbox": [ + 313, + 88, + 426, + 143 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a409ab44e08a21ecfd2a339aaed8175cb808bf4b2daae7715984762e47d86ac9.jpg", + "image_caption": [ + "(d) GD" + ], + "image_footnote": [], + "bbox": [ + 428, + 88, + 542, + 143 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c1f76453a20f05f5f43ed1b0ac2d75da9dffab37917caa9f4bf331e0e437ff1f.jpg", + "image_caption": [ + "(e) MSBDN" + ], + "image_footnote": [], + "bbox": [ + 542, + 88, + 656, + 143 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f8c73b47bb3ec9bcaef2918b8912874d8f7a51461a7fc7e686e9be25677e448e.jpg", + "image_caption": [ + "(f) 4KDehazing" + ], + "image_footnote": [], + "bbox": [ + 656, + 88, + 771, + 143 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b632aa6f0474872ef75965211d96e0a544b100aa6ebc1d9c0a4f3e4bdf7383a6.jpg", + "image_caption": [ + "(g) AECRNet" + ], + "image_footnote": [], + "bbox": [ + 771, + 88, + 885, + 143 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/249dfd533549490ba31093efaa3840ad4591499dea329f7b686588a9af10a2ef.jpg", + "image_caption": [ + "(h) DeHamer" + ], + "image_footnote": [], + "bbox": [ + 83, + 160, + 197, + 217 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/99d2751b8712dcf5829b1a521e15470b1abd9f553fc12e655f7ec70d11cbb498.jpg", + "image_caption": [ + "(i) FSDGN" + ], + "image_footnote": [], + "bbox": [ + 199, + 160, + 313, + 217 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/72346426a5ce827757bce67b5bcc7cf2461a2a1df98eaae746ddd10d785c2d54.jpg", + "image_caption": [ + "(j) DF" + ], + "image_footnote": [], + "bbox": [ + 313, + 160, + 426, + 217 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3fe97d65e7bcd8f02a1493a13dd40d05a933c2d8fd19a8079e9380bf0dc63681.jpg", + "image_caption": [ + "(k) MITNet" + ], + "image_footnote": [], + "bbox": [ + 428, + 160, + 542, + 217 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e4e25aa749d0c8a0281cd2212413468edc5a58b9ceef5b8211ba847a2006134b.jpg", + "image_caption": [ + "(1)Fourmer", + "Figure 5. Visual results on synthetic dataset [31]." + ], + "image_footnote": [], + "bbox": [ + 542, + 160, + 656, + 217 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/75c14a44593a3000f035b679ff4ab46f6e0bdcbaef14f03b7c2d0f01f6317f05.jpg", + "image_caption": [ + "(m) Ours" + ], + "image_footnote": [], + "bbox": [ + 656, + 160, + 771, + 217 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/85e6d1c03e6b7ba772bb2086a1a2fa2fbc43074248332799df22787835fae17d.jpg", + "image_caption": [ + "(n) Label" + ], + "image_footnote": [], + "bbox": [ + 771, + 160, + 885, + 217 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/006d236ae7e24ad9cc359d0dd4b7769931fdeeb51ce8f05180ce5f8ae8f46dc2.jpg", + "table_caption": [ + "Table 1. Quantitative results on datasets that generated by imaging model." + ], + "table_footnote": [], + "table_body": "
MethodsNHRNHMNHCLNHCMNHCDNightHazeYellowHaze
SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑
MRP (CVPR 2017)0.77619.8480.66615.9930.74722.4970.69320.4940.62417.6510.29512.1380.24913.473
GD (ICCV 2019)0.96930.1070.86120.6890.97336.5060.95834.4480.93231.5090.83225.3240.91527.410
OSFD (ACMMM 2020)0.80821.0280.72218.4910.78622.3290.73920.9290.67218.5010.30413.3870.25914.775
MSBDN (CVPR2020)0.97031.3350.81820.5140.96535.9630.93832.8480.90330.4750.95033.1560.92129.834
4KDehazing (CVPR2021)0.95028.6130.83020.4290.96735.0060.95835.1620.91230.0480.85026.5620.86125.835
AECRNet (CVPR 2021)0.91524.8640.81719.4200.95133.1830.94333.4980.89028.7420.94632.3440.93729.417
DeHamer (CVPR 2022)0.96631.0170.82323.0950.96636.0380.94433.9080.91531.3890.95433.4320.93130.334
FSDGN (ECCV 2022)0.97532.0720.87421.4150.97236.4320.95233.7230.92231.5590.94833.5210.95533.062
DF (TIP 2023)0.96931.6440.89623.2070.97537.3830.96035.0380.93432.0790.93131.4890.94832.244
MITNet (ACMMM 2023)0.97431.9690.85920.8840.96935.7940.94532.8490.91630.6280.94634.1140.93231.186
Fourmer (ICML 2023)0.96931.6600.86221.4230.96335.7140.94333.2010.92832.1030.94933.4190.95831.978
Ours0.97833.1800.90523.7050.97938.1460.96836.1460.95134.0010.96835.5270.96532.981
", + "bbox": [ + 83, + 296, + 888, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\kappa \\geq 1$ is the brightness intensity coefficient. The realistic brightness constraint within one single window is", + "bbox": [ + 75, + 494, + 468, + 525 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {B} ^ {p _ {i} ^ {s}} (\\hat {h}, \\hat {w}) = \\left(\\varphi_ {p _ {i} ^ {s}} (\\hat {h}, \\hat {w}) - \\xi \\cdot \\widetilde {\\varphi} _ {x _ {i} ^ {s}} (\\hat {h}, \\hat {w})\\right) ^ {2}, \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 114, + 532, + 468, + 555 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\xi$ is a hyperparameter. The realistic brightness loss calculated over all windows is", + "bbox": [ + 75, + 561, + 468, + 590 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {B} = \\sum_ {s = 0} ^ {2} \\frac {\\lambda_ {b}}{\\hat {N} \\hat {W} ^ {s} \\hat {H} ^ {s}} \\cdot \\sum_ {i = 0} ^ {\\hat {N} - 1} \\sum_ {\\hat {h} = 0} ^ {\\hat {H} ^ {s} - 1} \\sum_ {\\hat {w} = 0} ^ {\\hat {W} ^ {s} - 1} \\mathcal {L} _ {B} ^ {p _ {i} ^ {s}} (\\hat {h}, \\hat {w}), \\tag {25}\n$$\n", + "text_format": "latex", + "bbox": [ + 93, + 598, + 468, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\hat{W}^s = W^s /\\gamma^s$ $\\hat{H}^s = H^s /\\gamma^s$ . And $\\hat{N} = N + M$ The $\\lambda_{b}$ denotes the weights of scale loss of $\\mathcal{L}_B$", + "bbox": [ + 75, + 652, + 468, + 684 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.5. Total Loss", + "text_level": 1, + "bbox": [ + 76, + 691, + 192, + 705 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The overall loss is a combination of supervised and semi-supervised losses, which is", + "bbox": [ + 75, + 715, + 468, + 744 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {G} + \\alpha \\mathcal {L} _ {F} + \\beta \\mathcal {L} _ {B}, \\tag {26}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 755, + 468, + 771 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\alpha$ and $\\beta$ are the weights of the frequency domain loss and the realistic brightness loss, respectively.", + "bbox": [ + 75, + 780, + 468, + 810 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 821, + 209, + 838 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Experiment Setting", + "text_level": 1, + "bbox": [ + 76, + 847, + 263, + 863 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets. To comprehensively compare the performance of different algorithms, we conducted experiments on both", + "bbox": [ + 75, + 869, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "synthetic and real-world datasets. The synthetic datasets include GTA5 [45], UNREAL-NH [31], {NHR, NHM, HNCL, NHCM, NHCD} [51] and {NightHaze, YellowHaze} [26]. The real-world nighttime haze (RWNH) is provided by [14]. Since the brightness level of the ground-truth label in the UNREAL-NH is close to daytime, we adjust the brightness of the hazy image and corresponding label to the level of the nighttime low-light image by the Gamma correction [33] for the evaluation of the RWNH.", + "bbox": [ + 496, + 494, + 890, + 630 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison Methods and Evaluation Metrics. MRP [50], GD [27], OSFD [51], MSBDN [5]. 4KDehazing [54], AECRNet [43], DeHamer [9], FSDGN [47], DF [38], MIT-Net [37] and Fourmer [55] are used as comparisons. PSNR [22, 35, 36] and SSIM [8, 42] are used to evaluate the performance on labeled datasets. BRISQUE [44] and MUSIQ [13, 17] are computed to evaluate the performance on unlabeled dataset. The $\\uparrow$ represents a larger value, a higher quality, while $\\downarrow$ represents a larger value, a lower quality.", + "bbox": [ + 496, + 637, + 892, + 773 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation Details. The batch size is chosen as 4. The image size is set to $256 \\times 256 \\times 3$ . The learning rate is initialized to 0.0001 and linearly decays by a factor of 0.95 every 10 epochs. The Adam $(\\beta_{1} = 0.9, \\beta_{2} = 0.999)$ is used. The $\\lambda_{g}$ , $\\lambda_{f}$ and $\\lambda_{b}$ are all set to 1. The $\\alpha$ and $\\beta$ are set to 0.1 and 20, respectively. The window size $\\gamma^{s}$ are set to 16, 8 and 4, where $s \\in \\{0,1,2\\}$ , respectively. The coefficient $\\xi$ and $\\kappa$ is set to 1 and 1.3, respectively. The", + "bbox": [ + 496, + 779, + 892, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "2636", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f2c71a783412da48ae45b925130b1675a92c9e14d4285ef737d4a3c227783c51.jpg", + "image_caption": [ + "(a) Hazy" + ], + "image_footnote": [], + "bbox": [ + 84, + 87, + 218, + 198 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2f13d28929ef75be03f1aa5c80c7a4869326bd52ad7d684bcd25ce15c800ea85.jpg", + "image_caption": [ + "(b)MRP" + ], + "image_footnote": [], + "bbox": [ + 218, + 88, + 352, + 198 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2ab4e850fa9e989735bcf9f1fe5f822509134e1bc047c77246cfdcce13fdfdb2.jpg", + "image_caption": [ + "(c) OSFD" + ], + "image_footnote": [], + "bbox": [ + 352, + 88, + 483, + 198 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2fa293e1f28c4bf50eb2bf511896c6e2c8cab6f722bce710588fdf47f8fe8408.jpg", + "image_caption": [ + "(d) GD" + ], + "image_footnote": [], + "bbox": [ + 483, + 88, + 616, + 198 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9eb01e2043a460f4405d414fc44ccee117da18e9e7643e37519fc26f4633fb3b.jpg", + "image_caption": [ + "(e) MSBDN" + ], + "image_footnote": [], + "bbox": [ + 617, + 88, + 750, + 198 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f4d5c9877d1d4a150d75137ad8161fb663c2a2fda42bc09f5cdb96333d7be9d0.jpg", + "image_caption": [ + "(f) 4KDehazing" + ], + "image_footnote": [], + "bbox": [ + 751, + 88, + 883, + 198 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/778aaff2b30e27c198a8c5c67c11be01014258f51949cce6e5c6d53e2cd349ed.jpg", + "image_caption": [ + "(g) AECRNet" + ], + "image_footnote": [], + "bbox": [ + 84, + 215, + 218, + 325 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0b2756e365a887d592be74b006513c110e5c8ea63f25d49727ff914d8870f115.jpg", + "image_caption": [ + "(h) DeHamer" + ], + "image_footnote": [], + "bbox": [ + 220, + 215, + 351, + 325 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0af63a08186521c8359dc840ed855cb6dc5cb4d0ebb0733de438c60e3b3644e3.jpg", + "image_caption": [ + "(i) DF" + ], + "image_footnote": [], + "bbox": [ + 352, + 215, + 483, + 325 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/31292a61a2749d2f8a56a49504c082a91ebce385117ffe9a5e53e0900ca8dd93.jpg", + "image_caption": [ + "(j) MITNet" + ], + "image_footnote": [], + "bbox": [ + 483, + 215, + 617, + 325 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/840917b6f0fc0d28140d03c1a3e4f187d717aac7ec90ba6ab7c125ae24146d67.jpg", + "image_caption": [ + "(k)Fourmer" + ], + "image_footnote": [], + "bbox": [ + 617, + 215, + 750, + 325 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/caf5d5ebbc15f975794703dad70d9332388490a1a391d78b5f3c3d1bf6537b19.jpg", + "image_caption": [ + "(1) Ours" + ], + "image_footnote": [], + "bbox": [ + 751, + 215, + 883, + 325 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6bc70cb929104cc92ee2896d58959002809ddc649bf32052be11050893323cef.jpg", + "image_caption": [ + "(a) Hazy" + ], + "image_footnote": [], + "bbox": [ + 81, + 380, + 161, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/84c6aea0def8a97bf688a610d6cf069567a8af2fad072c9a2ed4f33f79514a8a.jpg", + "image_caption": [ + "(b) $\\kappa = 1.0$" + ], + "image_footnote": [], + "bbox": [ + 163, + 380, + 241, + 479 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9838b44ebf7f5bf29b3c3fb7d70f36c4690d6e262554a59050d5b186ebf0697b.jpg", + "image_caption": [ + "Figure 6. Visual results on real-world hazy images [14].", + "(c) $\\kappa = 1.3$", + "Figure 7. Dehazed images obtained under different $\\kappa$ ." + ], + "image_footnote": [], + "bbox": [ + 243, + 380, + 323, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/05abaae28072af4244ebc728c9ca242e78b5e7b4d19778cb71e777692f30eefc.jpg", + "image_caption": [ + "(d) $\\kappa = 1.5$" + ], + "image_footnote": [], + "bbox": [ + 323, + 380, + 403, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e8dd696bfb955802e39a5f44efc691f9c137e7050fab57bf4b4a00deee0efb61.jpg", + "image_caption": [ + "(e) $\\kappa = 1.8$" + ], + "image_footnote": [], + "bbox": [ + 405, + 380, + 483, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/efac5d22c673dce2c9a5640d53dd4bc4c10d65fcd541d5a7b30348a62bf6d6b0.jpg", + "image_caption": [ + "(f) $\\kappa = 2.0$" + ], + "image_footnote": [], + "bbox": [ + 485, + 380, + 563, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6c9911d2e59e8ab292cc26a6d0f918ae830e3a7460aeacde4a51555570b96084.jpg", + "image_caption": [ + "(g) $\\kappa = 2.3$" + ], + "image_footnote": [], + "bbox": [ + 566, + 380, + 643, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c66e4d4caa57dcf3a0cf2ffee880925265670971b44d42202931588516ad1eda.jpg", + "image_caption": [ + "(h) $\\kappa = 2.5$" + ], + "image_footnote": [], + "bbox": [ + 645, + 380, + 725, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bf7b5296406ec0c2ab3087ef043f8275bace726ec821cc0ae52a37d4c43ac645.jpg", + "image_caption": [ + "(i) $\\kappa = 2.8$" + ], + "image_footnote": [], + "bbox": [ + 725, + 380, + 807, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/11ed9bb178b2069315f5b6a79735fefd08ea869723a88559a62259317c15f8ee.jpg", + "image_caption": [ + "(j) $\\kappa = 3.0$" + ], + "image_footnote": [], + "bbox": [ + 808, + 380, + 888, + 477 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "proposed model is implemented by PyTorch and trained on the single NVIDIA RTX 4090 platform.", + "bbox": [ + 75, + 542, + 468, + 573 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Comparison with State-of-the-art Algorithms", + "text_level": 1, + "bbox": [ + 76, + 585, + 460, + 603 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation on Synthetic Datasets. Table 1 and Table 2 show the quantitative dehazing results obtained by state-of-the-art methods. Figure 5 shows the corresponding visual results. The quantitative and visual results demonstrate that the proposed methods achieve an overall better performance than state-of-the-art algorithms.", + "bbox": [ + 75, + 609, + 468, + 700 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation on Real-world Datasets. Table 2 shows the evaluation results of real-world dehazed images. It is worth pointing out that existing research [7] proposes that the reliability of no-reference metrics in the dehazing task is lower than that of full-reference metrics. Figure 6 shows that the details of the dehazed results obtained by our method are visually better. Meanwhile, the brightness of the dehazed images obtained by most comparison algorithms is obvious unrealistic, while the brightness of the dehazed images obtained by our algorithm is approximately globally realistic.", + "bbox": [ + 75, + 703, + 468, + 853 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Overall Evaluation. According to the quantitative and visual results on synthetic and real-world datasets, the proposed SFSNiD achieves overall better performance. More", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "results are placed at Supplementary Materials.", + "bbox": [ + 500, + 542, + 805, + 559 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Ablation Study and Discussions", + "text_level": 1, + "bbox": [ + 500, + 571, + 777, + 588 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Spatial and Frequency Information Interaction. The spatial and frequency information interaction (SFII) modules and naive convolution module are used in the proposed SFSNiD. In order to prove the usefulness of the FDP, LP and BNM that contained in the SFII, ablation experiments for different sub-blocks are performed. The ablation experiment on the proposed SFII includes (i) removing the FDP, (ii) removing the LP, (iii) removing the frequency domain processing in BNM, and (iv) removing the spatial domain process in BNM. These four settings are denoted $R1$ , $R2$ , $R3$ and $R4$ , respectively. Table 3 shows the ablation results under different settings on the UNREAL-NH [31]. The quantitative results demonstrate that the FDP, LP and BNM all have a positive effect on the dehazing performance. Since we must control the size of the paper, visualizations of the amplitude and phase spectrums are placed in Supplementary Materials.", + "bbox": [ + 496, + 597, + 890, + 853 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Hierarchical Training and Frequency Domain Loss. The training process of the proposed SFSNiD takes a hierarchical strategy by using differ scales $s \\in \\{0,1,2\\}$ . Two", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "2637", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/27074d0e13feb110124c553ca04fb3512589ee57513ea2e5611a9ab135ad8862.jpg", + "table_caption": [ + "Table 2. Quantitative results on datasets generated by game engine (GTA5 and UNREAL-NH) and the real-world dataset (RWNH)." + ], + "table_footnote": [], + "table_body": "
MethodsGTA5UNREAL-NHRWNH
SSIM↑PSNR↑SSIM↑PSNR↑BRISQUE ↓MUSIQ ↑
MRP0.66219.4600.46710.03919.41841.194
GD0.90030.0900.76721.20231.35933.433
OSFD0.71121.4610.4439.16920.86041.779
MSBDN0.90932.0290.82725.68038.91029.968
4KDehazing0.90330.3140.77423.08734.96533.536
AECRNet0.88826.8460.73121.56627.08437.034
DeHamer0.92832.5970.74022.44142.26926.788
FSDGN0.92332.6420.70221.73632.21635.200
DF0.91832.8560.77023.01733.67831.663
MITNet0.89931.1180.76621.86035.40431.768
Fourmer0.91731.9260.77222.79935.85031.367
Ours0.93533.7080.86225.90730.97532.120
", + "bbox": [ + 78, + 127, + 470, + 303 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5abd6dd2f674234b94412ccb616c6d25a2431d7ccf30fdec562ee30824fe8c74.jpg", + "image_caption": [ + "(a) Hazy" + ], + "image_footnote": [], + "bbox": [ + 81, + 316, + 174, + 405 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d5b9c2bafd8259ea81c2f970185c9a7295c9aafebfa0b93fa8162ad2a0b2f155.jpg", + "image_caption": [ + "(b) Pseudo Label", + "Figure 8. Visual results under different training strategies." + ], + "image_footnote": [], + "bbox": [ + 178, + 318, + 272, + 406 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1f3fa71fcd49295d08e3ad014b56977adedbfb27397aaffff308f520a0d38012.jpg", + "image_caption": [ + "(c) Retraining" + ], + "image_footnote": [], + "bbox": [ + 274, + 318, + 370, + 406 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e09788f7f60b4aaf8ff5dff2b50bf82ff2a789d55878016703283298cce0e536.jpg", + "image_caption": [ + "(d) Retraining +LB" + ], + "image_footnote": [], + "bbox": [ + 370, + 318, + 467, + 406 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ablation studies are adopted, which are denoted as (i) $S1$ : $s \\in \\{0\\}$ , and (ii) $S2$ : $s \\in \\{0, 1\\}$ . Meanwhile, in our experimental setup, the spatial domain loss $L_{G}$ and the frequency domain loss $L_{F}$ are applied simultaneously. To verify the effectiveness of frequency domain loss, the setting when $L_{F}$ is not used is denoted as $S3$ ( $s \\in \\{0, 1, 2\\}$ ). Table 4 shows the ablation results under the three different settings. The quantitative results demonstrate two main conclusions. First, the hierarchical training strategy can improve the dehazing performance. Second, the loss in the frequency domain is crucial as it improves the SSIM from 0.816 to 0.862.", + "bbox": [ + 76, + 474, + 468, + 640 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Retraining Strategy and Realistic Brightness Loss. To verify the effectiveness of the retraining strategy and the realistic brightness loss $\\mathcal{L}_B$ , the visual effects are shown in Figure 8. As shown in Figure 8-(b), the texture of the pseudo-labels is blurred due to the domain discrepancy between the synthetic and real-world data. The dehazed images obtained after retraining has unrealistic brightness as shown in Figure 8-(c). It can be seen that the best effect occurs when the retraining strategy and $\\mathcal{L}_B$ are used simultaneously as shown in Figure 8-(d). The BRISQUE $(\\downarrow)$ and MUSIQ $(\\uparrow)$ obtained for the three settings (b), (c) and (d) in Figure 8 are $\\{33.316, 30.432\\}$ , $\\{34.210, 32.373\\}$ and $\\{30.975, 32.120\\}$ , respectively. Taking a comprehensive look at the visual and quantitative evaluation results, our proposed strategy is effective.", + "bbox": [ + 76, + 642, + 468, + 868 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Brightness intensity coefficient $\\kappa$ in $\\mathcal{L}_B$ . In order to demonstrate the effectiveness of $\\kappa$ on the real-world dehaz-", + "bbox": [ + 76, + 869, + 468, + 898 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fb642f7783d1d9a3e6e71a1cc8cc831c973f3d84cfa3a4471f79aa7487b8d403.jpg", + "image_caption": [ + "Figure 9. The average pixel value obtained under different $\\kappa$ . The horizontal dashed line represents the average pixel value of real-world nighttime clear images [14]." + ], + "image_footnote": [], + "bbox": [ + 509, + 89, + 880, + 196 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/35d5ed958ed05aee57c320eddea553e56a578f3957310eb59e31cbcb72187577.jpg", + "table_caption": [ + "Table 3. Ablation study on the SFII." + ], + "table_footnote": [], + "table_body": "
SettingsR1R2R3R4Ours
SSIM0.8480.8580.8510.8450.862
PSNR25.35325.80825.64224.30125.907
", + "bbox": [ + 504, + 290, + 888, + 332 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/81a08b0e6c6d014bc1665fa4b2a38743be64642954a2ae486688570072beec46.jpg", + "table_caption": [ + "Table 4. Ablation study on the scale loss and frequency loss." + ], + "table_footnote": [], + "table_body": "
SettingsS1S2S3Ours
SSIM0.8540.8510.8160.862
PSNR25.60125.13424.46425.907
", + "bbox": [ + 509, + 369, + 883, + 410 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ing task, we manually set $\\kappa$ to different values. The dehazed images and average pixel value when $\\kappa$ takes different values are shown in Figure 7 and Figure 9, respectively. There are two conclusions that can be drawn. First, as $\\kappa$ increases, the brightness of the dehazed image continues to decrease, which proves that $\\kappa$ can control the brightness of the dehazed image. Second, when $\\kappa$ equals 1.3, the average pixel value (0.225) of dehazed images is close to the average pixel value real-world nighttime clear images (0.217) [14]. Therefore, we set $\\kappa$ to 1.3 as the final setting.", + "bbox": [ + 496, + 435, + 890, + 585 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 599, + 617, + 616 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, a semi-supervised nighttime image dehazing baseline SFSNiD is proposed for real-world nighttime dehazing. A spatial and frequency domain information interaction module is proposed to handle the haze, glow, and noise with localized, coupled and frequency inconsistent characteristics. A retraining strategy and a local window-based brightness loss for semi-supervised training process are designed to suppress haze and glow while achieving realistic brightness. Experiments on public benchmarks validate the effectiveness of the proposed method and its superiority over state-of-the-art methods.", + "bbox": [ + 496, + 625, + 890, + 790 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgment. This work was supported in part by the grant of the National Science Foundation of China under Grant 62172090; Start-up Research Fund of Southeast University under Grant RF1028623097; CAAI-Huawei MindSpore Open Fund. We thank the Big Data Computing Center of Southeast University for providing the facility support on the numerical calculations in this paper.", + "bbox": [ + 496, + 791, + 890, + 897 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "2638", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Cosmin Ancuti, Codruta O Ancuti, Christophe De Vleeschouwer, and Alan C Bovik. Day and night-time dehazing by local airlight estimation. IEEE Transactions on Image Processing, 29:6264-6275, 2020. 2", + "[2] Xiaofeng Cong, Jie Gui, Kai-Chao Miao, Jun Zhang, Bing Wang, and Peng Chen. Discrete haze level dehazing network. In ACM International Conference on Multimedia, pages 1828-1836, 2020. 1", + "[3] Yuning Cui, Yi Tao, Zhenshan Bing, Wenqi Ren, Xinwei Gao, Xiaochun Cao, Kai Huang, and Alois Knoll. Selective frequency network for image restoration. In The Eleventh International Conference on Learning Representations, 2022. 3, 5", + "[4] Yuekun Dai, Chongyi Li, Shangchen Zhou, Ruicheng Feng, and Chen Change Loy. Flare7k: A phenomenological nighttime flare removal dataset. Advances in Neural Information Processing Systems, 35:3926-3937, 2022. 2", + "[5] Hang Dong, Jinshan Pan, Lei Xiang, Zhe Hu, Xinyi Zhang, Fei Wang, and Ming-Hsuan Yang. Multi-scale boosted dehazing network with dense feature fusion. In IEEE Conference on Computer Vision and Pattern Recognition, pages 2157-2167, 2020. 6", + "[6] Matteo Frigo and Steven G Johnson. Fftw: An adaptive software architecture for the fft. In IEEE International Conference on Acoustics, Speech and Signal Processing, pages 1381-1384, 1998. 4", + "[7] Jie Gui, Xiaofeng Cong, Yuan Cao, Wenqi Ren, Jun Zhang, Jing Zhang, Jiuxin Cao, and Dacheng Tao. A comprehensive survey and taxonomy on single image dehazing based on deep learning. ACM Computing Surveys, 2023. 1, 2, 7", + "[8] Chunle Guo, Chongyi Li, Jichang Guo, Chen Change Loy, Junhui Hou, Sam Kwong, and Runmin Cong. Zero-reference deep curve estimation for low-light image enhancement. In IEEE Conference on Computer Vision and Pattern Recognition, pages 1780-1789, 2020. 6", + "[9] Chun-Le Guo, Qixin Yan, Saeed Anwar, Runmin Cong, Wenqi Ren, and Chongyi Li. Image dehazing transformer with transmission-aware 3d position embedding. In IEEE Conference on Computer Vision and Pattern Recognition, pages 5812-5820, 2022. 2, 6", + "[10] Xin Guo, Xueyang Fu, Man Zhou, Zhen Huang, Jialun Peng, and Zheng-Jun Zha. Exploring fourier prior for single image rain removal. In International Joint Conferences on Artificial Intelligence, pages 935–941, 2022. 3", + "[11] Junming Hou, Qi Cao, Ran Ran, Che Liu, Junling Li, and Liang-jian Deng. Bidomain modeling paradigm for pan-sharpening. In ACM International Conference on Multimedia, pages 347-357, 2023. 4", + "[12] Jie Hu, Li Shen, and Gang Sun. Squeeze-and-excitation networks. In IEEE Conference on Computer Vision and Pattern Recognition, pages 7132-7141, 2018. 4", + "[13] Shirui Huang, Keyan Wang, Huan Liu, Jun Chen, and Yun-song Li. Contrastive semi-supervised learning for underwater image restoration via reliable bank. In IEEE Conference on Computer Vision and Pattern Recognition, pages 18145-18155, 2023. 6" + ], + "bbox": [ + 78, + 114, + 468, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Yeying Jin, Beibei Lin, Wending Yan, Wei Ye, Yuan Yuan, and Robby T Tan. Enhancing visibility in nighttime haze images using guided apsf and gradient adaptive convolution. In ACM International Conference on Multimedia, 2023. 1, 2, 3, 5, 6, 7, 8", + "[15] Mingye Ju, Can Ding, Charles A Guo, Wenqi Ren, and Dacheng Tao. Idrlp: Image dehazing using region line prior. IEEE Transactions on Image Processing, 30:9043-9057, 2021. 1", + "[16] Mingye Ju, Can Ding, Wenqi Ren, Yi Yang, Dengyin Zhang, and Y Jay Guo. Ide: Image dehazing and exposure using an enhanced atmospheric scattering model. IEEE Transactions on Image Processing, 30:2180-2192, 2021. 2", + "[17] Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. Musiq: Multi-scale image quality transformer. In IEEE International Conference on Computer Vision, pages 5148-5157, 2021. 6", + "[18] Beomhyuk Koo and Gyeonghwan Kim. Nighttime haze removal with glow decomposition using gan. In Pattern Recognition: 5th Asian Conference, pages 807-820, 2020. 2", + "[19] Shiba Kuanar, Dwarikanath Mahapatra, Monalisa Bilas, and KR Rao. Multi-path dilated convolution network for haze and glow removal in nighttime images. The Visual Computer, pages 1-14, 2022. 1", + "[20] Boyi Li, Xiulian Peng, Zhangyang Wang, Jizheng Xu, and Dan Feng. Aod-net: All-in-one dehazing network. In IEEE International Conference on Computer Vision, pages 4770-4778, 2017. 1", + "[21] Boyi Li, Wenqi Ren, Dengpan Fu, Dacheng Tao, Dan Feng, Wenjun Zeng, and Zhangyang Wang. Benchmarking single-image dehazing and beyond. IEEE Transactions on Image Processing, 28(1):492-505, 2018. 1", + "[22] Chongyi Li, Chun-Le Guo, Man Zhou, Zhexin Liang, Shangchen Zhou, Ruicheng Feng, and Chen Change Loy. Embedding fourier for ultra-high-definition low-light image enhancement. arXiv preprint arXiv:2302.11831, 2023. 2, 6", + "[23] Kun Li, Dan Guo, and Meng Wang. Proposal-free video grounding with contextual pyramid network. In AAAI Conference on Artificial Intelligence, pages 1902-1910, 2021. 3", + "[24] Yu Li, Robby T Tan, and Michael S Brown. Nighttime haze removal with glow and multiple light colors. In IEEE International Conference on Computer Vision, pages 226-234, 2015. 2", + "[25] Yudong Liang, Bin Wang, Wangmeng Zuo, Jiaying Liu, and Wenqi Ren. Self-supervised learning and adaptation for single image dehazing. In International Joint Conference on Artificial Intelligence, pages 1-15, 2022. 1", + "[26] Yinghong Liao, Zhuo Su, Xiangguo Liang, and Bin Qiu. Hdp-net: Haze density prediction network for nighttime de-hazing. In Pacific Rim Conference on Multimedia, pages 469-480, 2018. 1, 6", + "[27] Xiaohong Liu, Yongrui Ma, Zhihao Shi, and Jun Chen. Griddehazenet: Attention-based multi-scale network for image dehazing. In IEEE International Conference on Computer Vision, pages 7314-7323, 2019. 1, 6", + "[28] Yun Liu, Anzhi Wang, Hao Zhou, and Pengfei Jia. Single nighttime image dehazing based on image decomposition. Signal Processing, 183:107986, 2021. 1" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "2639", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[29] Yun Liu, Zhongsheng Yan, Jinge Tan, and Yuche Li. Multipurpose oriented single nighttime image haze removal based on unified variational retina model. IEEE Transactions on Circuits and Systems for Video Technology, 33(4):1643-1657, 2022. 2", + "[30] Yun Liu, Zhongsheng Yan, Aimin Wu, and Tian Ye. Night-time image dehazing based on variational decomposition model. In IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 640-649, 2022. 2", + "[31] Yun Liu, Zhongsheng Yan, Sixiang Chen, Tian Ye, Wenqi Ren, and Erkang Chen. Nighthazeformer: Single nighttime haze removal using prior query transformer. In ACM International Conference on Multimedia, 2023. 1, 2, 3, 6, 7", + "[32] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In IEEE International Conference on Computer Vision, pages 10012-10022, 2021. 2, 4", + "[33] Wenqi Ren, Sifei Liu, Lin Ma, Qianqian Xu, Xiangyu Xu, Xiaochun Cao, Junping Du, and Ming-Hsuan Yang. Low-light image enhancement via a deep hybrid network. IEEE Transactions on Image Processing, 28(9):4364-4375, 2019. 6", + "[34] Wenqi Ren, Jinshan Pan, Hua Zhang, Xiaochun Cao, and Ming-Hsuan Yang. Single image dehazing via multi-scale convolutional neural networks with holistic edges. International Journal of Computer Vision, 128:240-259, 2020. 1", + "[35] Yuanjie Shao, Lerenhan Li, Wenqi Ren, Changxin Gao, and Nong Sang. Domain adaptation for image dehazing. In IEEE Conference on Computer Vision and Pattern Recognition, pages 2808-2817, 2020. 6", + "[36] Hao Shen, Zhong-Qiu Zhao, and Wandi Zhang. Adaptive dynamic filtering network for image denoising. In AAAI Conference on Artificial Intelligence, pages 2227-2235, 2023. 6", + "[37] Hao Shen, Zhong-Qiu Zhao, Yulun Zhang, and Zhao Zhang. Mutual information-driven triple interaction network for efficient image dehazing. In ACM International Conference on Multimedia, pages 7-16, 2023. 2, 6", + "[38] Yuda Song, Zhuqing He, Hui Qian, and Xin Du. Vision transformers for single image dehazing. IEEE TIP, 32:1927-1941, 2023. 1, 2, 6", + "[39] Shangquan Sun, Wenqi Ren, and Tao Wang. Rethinking image restoration for object detection. Advances in Neural Information Processing Systems, 35:4461-4474, 2022. 1", + "[40] Fei Wang, Dan Guo, and Kun Li. Eulermormer: Robust eulerian motion magnification via dynamic filtering within transformer. arXiv preprint arXiv:2312.04152, 2023. 3", + "[41] Wenhui Wang, Anna Wang, and Chen Liu. Variational single nighttime image haze removal with a gray haze-line prior. IEEE Transactions on Image Processing, 31:1349-1363, 2022. 2", + "[42] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 6", + "[43] Haiyan Wu, Yanyun Qu, Shaohui Lin, Jian Zhou, Ruizhi Qiao, Zhizhong Zhang, Yuan Xie, and Lizhuang Ma. Contrastive learning for compact single image dehazing. In IEEE" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Conference on Computer Vision and Pattern Recognition, pages 10551-10560, 2021. 1, 2, 6", + "[44] Rui-Qi Wu, Zheng-Peng Duan, Chun-Le Guo, Zhi Chai, and Chongyi Li. Ridcp: Revitalizing real image dehazing via high-quality codebook priors. In IEEE Conference on Computer Vision and Pattern Recognition, pages 22282-22291, 2023. 6", + "[45] Wending Yan, Robby T Tan, and Dengxin Dai. Night-time defogging using high-low frequency decomposition and grayscale-color networks. In European Conference on Computer Vision, pages 473-488, 2020. 3, 6", + "[46] Minmin Yang, Jianchang Liu, and Zhengguo Li. Superpixel-based single nighttime image haze removal. IEEE Transactions on Multimedia, 20(11):3008-3018, 2018. 2", + "[47] Hu Yu, Naishan Zheng, Man Zhou, Jie Huang, Zeyu Xiao, and Feng Zhao. Frequency and spatial dual guidance for image dehazing. In European Conference on Computer Vision, pages 181-198, 2022. 6", + "[48] Jing Zhang and Dacheng Tao. Famed-net: A fast and accurate multi-scale end-to-end dehazing network. IEEE Transactions on Image Processing, 29:72-84, 2019. 1", + "[49] Jing Zhang, Yang Cao, and Zengfu Wang. Nighttime haze removal based on a new imaging model. In IEEE International Conference on Image Processing, pages 4557-4561, 2014. 1, 2", + "[50] Jing Zhang, Yang Cao, Shuai Fang, Yu Kang, and Chang Wen Chen. Fast haze removal for nighttime image using maximum reflectance prior. In IEEE Conference on Computer Vision and Pattern Recognition, pages 7418-7426, 2017. 1, 3, 6", + "[51] Jing Zhang, Yang Cao, Zheng-Jun Zha, and Dacheng Tao. Nighttime dehazing with a synthetic benchmark. In ACM International Conference on Multimedia, pages 2355-2363, 2020. 1, 3, 6", + "[52] Jingang Zhang, Wenqi Ren, Shengdong Zhang, He Zhang, Yunfeng Nie, Zhe Xue, and Xiaochun Cao. Hierarchical density-aware dehazing network. IEEE Transactions on Cybernetics, 52(10):11187-11199, 2021. 1", + "[53] Shengdong Zhang, Wenqi Ren, Xin Tan, Zhi-Jie Wang, Yong Liu, Jingang Zhang, Xiaoqin Zhang, and Xiaochun Cao. Semantic-aware dehazing network with adaptive feature fusion. IEEE Transactions on Cybernetics, 53(1):454-467, 2021. 1", + "[54] Zhuoran Zheng, Wenqi Ren, Xiaochun Cao, Xiaobin Hu, Tao Wang, Fenglong Song, and Xiuyi Jia. Ultra-high-definition image dehazing via multi-guided bilateral learning. In IEEE Conference on Computer Vision and Pattern Recognition, pages 16180-16189, 2021. 1, 2, 6", + "[55] Man Zhou, Jie Huang, Chun-Le Guo, and Chongyi Li. FOurmer: an efficient global modeling paradigm for image restoration. In International Conference on Machine Learning, pages 42589-42601, 2023. 4, 6", + "[56] Man Zhou, Keyu Yan, Xueyang Fu, Aiping Liu, and Chengjun Xie. Pan-guided band-aware multi-spectral feature enhancement for pan-sharpening. IEEE Transactions on Computational Imaging, 9:238-249, 2023. 4" + ], + "bbox": [ + 501, + 92, + 890, + 883 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "2640", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/4a280801-3209-4899-b345-f6dbc9c9ec52_model.json b/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/4a280801-3209-4899-b345-f6dbc9c9ec52_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a3814b5d097a3adf3850caf69b78c9c6e56ce241 --- /dev/null +++ b/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/4a280801-3209-4899-b345-f6dbc9c9ec52_model.json @@ -0,0 +1,3443 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.044 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.131, + 0.885, + 0.177 + ], + "angle": 0, + "content": "A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.203, + 0.856, + 0.258 + ], + "angle": 0, + "content": "Xiaofeng Cong\\(^{1}\\) Jie Gui\\(^{1*}\\) Jing Zhang\\(^{2}\\) Junming Hou\\(^{1}\\) Hao Shen\\(^{3}\\) \n\\(^{1}\\)Southeast University \\(^{2}\\)University of Sydney \\(^{3}\\)Hefei University of Technology \ncxf_svip@163.com, {guijie, junming_hou}@seu.edu.cn, {jingzhang.cy, haoshenhs}@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.292, + 0.314, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.324, + 0.474, + 0.687 + ], + "angle": 0, + "content": "Existing research based on deep learning has extensively explored the problem of daytime image dehazing. However, few studies have considered the characteristics of nighttime hazy scenes. There are two distinctions between nighttime and daytime haze. First, there may be multiple active colored light sources with lower illumination intensity in nighttime scenes, which may cause haze, glow and noise with localized, coupled and frequency inconsistent characteristics. Second, due to the domain discrepancy between simulated and real-world data, unrealistic brightness may occur when applying a dehazing model trained on simulated data to real-world data. To address the above two issues, we propose a semi-supervised model for real-world nighttime dehazing. First, the spatial attention and frequency spectrum filtering are implemented as a spatial-frequency domain information interaction module to handle the first issue. Second, a pseudo-label-based retraining strategy and a local window-based brightness loss for semi-supervised training process is designed to suppress haze and glow while achieving realistic brightness. Experiments on public benchmarks validate the effectiveness of the proposed method and its superiority over state-of-the-art methods. The source code and Supplementary Materials are placed in the https://github.com/Xiaofeng-life/SFSNiD." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.716, + 0.21, + 0.731 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.741, + 0.47, + 0.862 + ], + "angle": 0, + "content": "Nighttime and daytime images may contain hazy effects, which may cause their quality to be degraded [7, 15, 39, 50]. Therefore, two valuable research fields are proposed, which are daytime single image dehazing (DaSID) [2, 38, 54] and nighttime single image dehazing (NiSID) [14, 19, 31], respectively. Compared with the daytime hazy image, the imaging of the nighttime hazy image is more complex [28, 49]. Currently, NiSID is still a challenging problem." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.862, + 0.47, + 0.877 + ], + "angle": 0, + "content": "Existing research on DaSID [20, 25, 27, 34, 43, 48, 52," + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.291, + 0.632, + 0.354 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.354, + 0.631, + 0.415 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.541, + 0.417, + 0.585, + 0.428 + ], + "angle": 0, + "content": "(a) Hazy" + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.291, + 0.759, + 0.354 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.354, + 0.758, + 0.415 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.639, + 0.417, + 0.752, + 0.427 + ], + "angle": 0, + "content": "(b) IM-YellowHaze [26]" + }, + { + "type": "image", + "bbox": [ + 0.761, + 0.291, + 0.888, + 0.354 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.761, + 0.354, + 0.888, + 0.415 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.774, + 0.417, + 0.881, + 0.428 + ], + "angle": 0, + "content": "(c) IM-NightHaze [26]" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.432, + 0.631, + 0.493 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.493, + 0.631, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.528, + 0.557, + 0.611, + 0.568 + ], + "angle": 0, + "content": "(d) IM-NHR [51]" + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.432, + 0.759, + 0.493 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.493, + 0.758, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.638, + 0.557, + 0.762, + 0.568 + ], + "angle": 0, + "content": "(e) GE-UNREAL-NH [31]" + }, + { + "type": "image", + "bbox": [ + 0.761, + 0.432, + 0.888, + 0.493 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.761, + 0.493, + 0.888, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.798, + 0.557, + 0.837, + 0.567 + ], + "angle": 0, + "content": "(f) Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.578, + 0.892, + 0.69 + ], + "angle": 0, + "content": "Figure 1. Visualization of real-world dehazed images, where the \"IM\" and \"GE-\" denote the dehazed results obtained by training on imaging model (IM) and game engine (GE) simulated datasets, respectively. The curve figure represents the pixel histogram, where the \\( x \\) and \\( y \\) coordinates represent the pixel values and corresponding numbers, respectively. The \\( x \\) and \\( y \\) coordinates of the bar figure represent the color channel and the corresponding average pixel value, respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.718, + 0.892, + 0.809 + ], + "angle": 0, + "content": "53] have achieved impressive performance. Various effective DaSID algorithms have been proposed and verified on benchmark daytime datasets [21]. However, these DaSID algorithms are designed for the properties of daytime hazy and haze-free images, without taking into account the characteristics of nighttime hazy and haze-free images." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Currently, NiSID research is divided into two types, namely non-deep learning-based NiSID and deep learning-based NiSID. On the one hand, the prior hypotheses and statistical laws are explored [50, 51]. The maximum reflectance prior to estimate the varying ambient illumination is proposed by [50]. The illumination estimation, color" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.888, + 0.221, + 0.9 + ], + "angle": 0, + "content": "*Corresponding author" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "2631" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.243 + ], + "angle": 0, + "content": "correction and image prior are integrated by [49]. On the other hand, the deep learning-based architectures are designed for the NiSID task [14, 31]. Liu et al. [31] combine the dark channel and bright channel prior with the Transformer mechanism [32] into an end-to-end training flow. The gradient-adaptive convolution and glow pair synthesis are designed by Jin et al. [14]. Existing learning-based algorithms have achieved remarkable performance on synthetic datasets. However, these methods still lack consideration of the characteristics of nighttime hazy images." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.243, + 0.472, + 0.289 + ], + "angle": 0, + "content": "During the day, the main source of imaging light is sunlight [7]. The formation of the daytime hazy image can be described by the atmospheric scattering model [7] as" + }, + { + "type": "equation", + "bbox": [ + 0.152, + 0.297, + 0.47, + 0.314 + ], + "angle": 0, + "content": "\\[\nI (a) = J (a) t (a) + A (a) (1 - t (a)), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.322, + 0.47, + 0.385 + ], + "angle": 0, + "content": "where \\( I(a) \\), \\( J(a) \\), \\( t(a) \\) and \\( A(a) \\) denote the hazy image, clear image, transmission map and global atmospheric light, respectively. The \\( a \\) means the pixel location. Meanwhile, a widely used physical model [16, 18] in the NiSID task is" + }, + { + "type": "equation", + "bbox": [ + 0.089, + 0.392, + 0.47, + 0.41 + ], + "angle": 0, + "content": "\\[\nI (a) = J (a) t (a) + A (a) (1 - t (a)) + L _ {s} (a) * \\varkappa (a), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.418, + 0.47, + 0.508 + ], + "angle": 0, + "content": "where \\( L_{s}(a) \\) and \\( \\varkappa(a) \\) denote the light sources and atmospheric point spread function. As shown in Eq. 1 and Eq. 2, the main distinction between daytime and nighttime haze imaging is light sources [1, 4, 24, 29, 30, 41, 46], which we consider to be the main source of the difficulty. Specifically, two outstanding issues are considered as follows." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.509, + 0.473, + 0.75 + ], + "angle": 0, + "content": "- Localized, Coupled and Frequency Inconsistent: As shown in Figure 1, multiple active light sources may exist simultaneously. Therefore, the distortion of nighttime images, namely the haze that is mainly generated by suspended particles and liquid water droplets, the glow that is mainly produced by active light sources and the noise that is mainly caused by low intensity, is usually localized. Meanwhile, these types of distortions are mixed throughout the image, which is coupled. Furthermore, the haze and glow will cause the loss of high-frequency signals, while the noise belongs to high-frequency disturbance signals [22] that needs to be eliminated. This means that these distortions have inconsistent frequency characteristics. In a word, a challenging issue is how to simultaneously handle distortions with localized, coupled and frequency inconsistent characteristics." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.75, + 0.473, + 0.902 + ], + "angle": 0, + "content": "- Unrealistic Brightness Intensity: Nighttime hazy datasets based on real-world images synthesized by imaging model (IM) are difficult to simulate multiple active light sources, while nighttime hazy datasets based on game engine (GE) cannot perfectly reproduce the harmonious brightness of real-world nighttime scenes. As we observed in Figure 1, the dehazed images obtained under IM datasets still suffer from the glow and haze that caused by multiple light sources, but the overall brightness is realistic. The dehazed images obtained under GE" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.894, + 0.152 + ], + "angle": 0, + "content": "dataset show less haze and glow, but the scene brightness is unrealistic. In a word, an unsolved problem faced by data-driven algorithms is how to suppress haze and glow while achieving realistic brightness." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.153, + 0.895, + 0.395 + ], + "angle": 0, + "content": "Therefore, we propose a semi-supervised dehazing framework that can be used for the real-world NiSID task. Firstly, the local attention [32] is adopted to learn the inductive bias in the spatial domain to suppress local distortions. A frequency spectrum dynamic filtering strategy is designed to handle distortions with inconsistent frequency characteristics. Considering the coupled of these distortions, the spatial and frequency information are integrated as a bidomain interaction module for feature extraction and image reconstruction. Secondly, aiming at suppressing distortions while achieving realistic brightness. The simulation data provided by the game engine is utilized to generate pseudo labels that can suppress haze and glow for retraining process. Then, real-world hazy images are adopted as brightness-realistic signals for the realistic brightness constraint. Overall, the main contributions of this paper are as follows." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.396, + 0.892, + 0.486 + ], + "angle": 0, + "content": "- We propose a spatial and frequency domain aware semi-supervised nighttime dehazing network (SFSNiD). SFS-NiD can remove nighttime haze that is accompanied by glow and noise. The experimental results on synthetic and real-world datasets show that the proposed method can achieve impressive performance." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.486, + 0.892, + 0.591 + ], + "angle": 0, + "content": "- We design a spatial and frequency domain information interaction (SFII) module to simultaneously handle the haze, glow and noise with localized, coupled and frequency inconsistent characteristics. The multi-channel amplitude and phase spectrums are dynamically filtered and aggregated. The spatial and frequency domain features are integrated by local attention." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.592, + 0.892, + 0.698 + ], + "angle": 0, + "content": "- A retraining strategy and a local window-based brightness loss for semi-supervised training process are designed to suppress haze and glow while achieving realistic brightness. The retraining strategy is based on pseudo labels. The hazy image is divided into non-overlapping windows for the calculation of local brightness map to provide realistic brightness supervision." + }, + { + "type": "list", + "bbox": [ + 0.501, + 0.396, + 0.892, + 0.698 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.715, + 0.642, + 0.73 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.741, + 0.679, + 0.757 + ], + "angle": 0, + "content": "2.1. Daytime Dehazing" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.894, + 0.903 + ], + "angle": 0, + "content": "A variety of effective dehazing algorithms for DaSID have been proposed. An ultra-high resolution dehazing method based on bilateral gird is proposed by 4KDehazing [54]. AECRNet [43] introduces the contrastive learning to the dehazing process. The prior information and visual attention mechanism are utilized in DeHamer [9]. DF [38] designs an encoder-decoder architecture which totally based on multi-head self-attention [32]. MITNet [37] combines the mutual information-driven constraint and adaptive triple interaction" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2632" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.088, + 0.891, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.318, + 0.317, + 0.651, + 0.332 + ], + "angle": 0, + "content": "Figure 2. The overall pipeline of the proposed SFSNiD." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.358, + 0.471, + 0.435 + ], + "angle": 0, + "content": "strategy into a supervised training process. Although these DaSID algorithms have achieve impressive performance, they are not designed for the characteristics of nighttime hazy images, which may cause them to have certain limitations on the NiSID task [31]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.444, + 0.269, + 0.46 + ], + "angle": 0, + "content": "2.2. Nighttime Dehazing" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.467, + 0.474, + 0.803 + ], + "angle": 0, + "content": "Compared with DaSID, NiSID has received fewer attention. On the one hand, the prior hypotheses and statistical laws are utilized in the non-deep learning-based NiSID methods [50, 51]. A maximum reflectance prior is proposed by MRP [50], which providing a way to estimate the varying ambient illumination. An optimal-scale fusion-based method is designed by OSFD [51], which utilizes a parameter estimation dehazing flow. On the other hand, the data-driven strategies [23, 40] are adopted in the deep learning-based NiSID methods [14, 31, 45]. NightHazeFormer [31] combines the visual transformer and prior knowledge (dark channel and bright channel) into an end-to-end enhancement process. GAC [14] utilizes the angular point spread function to reduce the glow effect in nighttime scenes. Yan et al. [45] propose a strategy which decomposes the image into scene texture information and scene structure information. According to recent research, deep learning-based NiSID algorithms can achieve relatively better quantitative performance according to sufficient synthetic data. However, the haze, glow, and noise with localized, coupled and frequency inconsistent characteristics are not fully considered by these deep learning-based NiSID algorithms." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.815, + 0.176, + 0.83 + ], + "angle": 0, + "content": "3. Methods" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.903 + ], + "angle": 0, + "content": "The hazy domain and haze-free domain are marked as \\( X \\) and \\( Y \\), respectively. The synthesized hazy and haze-free image datasets are denoted \\( \\mathcal{D}_X \\) and \\( \\mathcal{D}_Y \\), which contain \\( N \\) images, respectively. The real-world hazy image and haze-" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.358, + 0.895, + 0.556 + ], + "angle": 0, + "content": "free datasets are denoted as \\(\\mathcal{R}_X\\) and \\(\\mathcal{R}_Y\\), which include \\(M\\) images, respectively. The convolution operation is denoted as \\(C_t^k (\\cdot)\\), where the superscript \\(k\\) and subscript \\(t\\) denote the kernel size and stride, respectively. The \\(\\varpi (\\cdot),\\sigma (\\cdot),\\delta (\\cdot)\\) and \\(sf(\\cdot)\\) denote the global average pooling, LeakyReLU, sigmoid and softmax operations, respectively. The input hazy images and predicted dehazed images at three scales are marked \\(x_{i}^{s}\\in \\mathcal{D}_{X}\\) and \\(p_i^s\\) respectively, where \\(s\\in \\{0,1,2\\}\\) and \\(i\\) denotes the \\(i\\)-th example. The size of \\(x_{i}^{0},x_{i}^{1}\\) and \\(x_{i}^{2}\\) are \\(H\\times W\\times C,\\frac{H}{2}\\times \\frac{W}{2}\\times C\\) and \\(\\frac{H}{4}\\times \\frac{W}{4}\\times C\\), respectively. The \\(H,W\\) and \\(C\\) denote the height, width and number of channels, respectively. The size of \\(p_i^s\\) remains the same as \\(x_{i}^{s}\\). The network at scale \\(s\\) is denoted as \\(\\Psi^s (\\cdot)\\)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.561, + 0.684, + 0.576 + ], + "angle": 0, + "content": "3.1. Network Structure" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.584, + 0.892, + 0.721 + ], + "angle": 0, + "content": "The multi-scale structure [3] of the SFSNiD is shown in Figure 2. Two kinds of modules are included in the proposed network, namely (i) spatial and frequency information interaction (SFII) model, (ii) convolution input (ConvI), convolution output (ConvO), convolution downsampling (ConvD), and convolution upsampling (ConvU). The ConvI projects the image into the feature space, while ConvO does the opposite. ConvD reduces the length and width of the feature map by half, while ConvU does the opposite." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.728, + 0.892, + 0.744 + ], + "angle": 0, + "content": "3.2. Spatial and Frequency Information Interaction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.816 + ], + "angle": 0, + "content": "Preliminary. For a feature map \\(z\\in \\mathbb{R}^{\\widetilde{H}\\times \\widetilde{W}\\times \\widetilde{C}}\\) , where \\(\\widetilde{H}\\) \\(\\widetilde{W}\\) and \\(\\widetilde{C}\\) denote the height, width and number of channels, respectively. We first project each of its channel \\(z_{\\widetilde{c}}\\) to the frequency domain by the Fourier [10] transformation \\(\\mathcal{F}\\) as" + }, + { + "type": "equation", + "bbox": [ + 0.519, + 0.822, + 0.892, + 0.865 + ], + "angle": 0, + "content": "\\[\n\\mathcal {F} \\left(z _ {\\widetilde {c}}\\right) (u, v) = \\sum_ {h = 0} ^ {\\widetilde {H} - 1} \\sum_ {w = 0} ^ {\\widetilde {W} - 1} z _ {\\widetilde {c}} (h, w) e ^ {- j 2 \\pi \\left(\\frac {h}{H} u + \\frac {w}{W} v\\right)}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "where \\((h,w)\\) and \\((u,v)\\) represent the coordinates in the spatial and frequency domain, respectively. The \\(\\widetilde{c} \\in\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2633" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.089, + 0.464, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.263, + 0.419, + 0.277 + ], + "angle": 0, + "content": "Figure 3. The sub-modules of the proposed SFII." + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.291, + 0.462, + 0.417 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.107, + 0.429, + 0.438, + 0.443 + ], + "angle": 0, + "content": "Figure 4. The overall architecture of the proposed SFII." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.466, + 0.47, + 0.558 + ], + "angle": 0, + "content": "\\(\\{0,1,\\dots,\\widetilde{C}\\}\\) denotes the channel index. Correspondingly, the \\(\\mathcal{F}^{-1}\\) is defined as the inverse Fourier transformation [55]. Then, the real part \\(\\mathcal{R}(z_{\\widetilde{c}})(u,v)\\) and imaginary part \\(\\mathcal{I}(z_{\\widetilde{c}})(u,v)\\) can be obtained by \\(\\mathcal{F}(z_{\\widetilde{c}})(u,v)\\). The amplitude spectrum \\(\\mathcal{A}(z_{\\widetilde{c}})(u,v)\\) and phase spectrum \\(\\mathcal{P}(z_{\\widetilde{c}})(u,v)\\) of \\(\\mathcal{F}(z_{\\widetilde{c}})(u,v)\\) on the single channel can be obtained by" + }, + { + "type": "equation", + "bbox": [ + 0.116, + 0.568, + 0.469, + 0.587 + ], + "angle": 0, + "content": "\\[\n\\mathcal {A} \\left(z _ {\\bar {c}}\\right) (u, v) = \\sqrt {\\mathcal {R} ^ {2} \\left(z _ {\\bar {c}}\\right) (u , v) + \\mathcal {I} ^ {2} \\left(z _ {\\bar {c}}\\right) (u , v)}, \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.15, + 0.605, + 0.469, + 0.639 + ], + "angle": 0, + "content": "\\[\n\\mathcal {P} \\left(z _ {\\widetilde {c}}\\right) (u, v) = \\arctan \\left[ \\frac {\\mathcal {I} \\left(z _ {\\widetilde {c}}\\right) (u , v)}{\\mathcal {R} \\left(z _ {\\widetilde {c}}\\right) (u , v)} \\right]. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.642, + 0.469, + 0.704 + ], + "angle": 0, + "content": "The full channel amplitude spectrum \\(\\mathcal{A}(z)(u,v)\\in\\) \\(\\mathbb{R}^{\\widetilde{H}\\times \\widetilde{W}\\times \\widetilde{C}}\\) and phase spectrum \\(\\mathcal{P}(z)(u,v)\\in \\mathbb{R}^{\\widetilde{H}\\times \\widetilde{W}\\times \\widetilde{C}}\\) can be obtained by applying the Eq. 3, Eq. 4 and Eq. 5 on each channel of \\(z\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.705, + 0.469, + 0.794 + ], + "angle": 0, + "content": "Frequency Spectrum Dynamic Aggregation (FSDA). The haze, glow and noise with inconsistent frequency characteristics can be processed in the frequency domain by dynamic spectrum filter. The amplitude spectrum and phase spectrum of different channels are aggregated by the pointwise convolution as" + }, + { + "type": "equation", + "bbox": [ + 0.155, + 0.805, + 0.469, + 0.822 + ], + "angle": 0, + "content": "\\[\n\\mathcal {S} ^ {*} (z) (u, v) = \\sigma \\left(C _ {1} ^ {1} (\\mathcal {S} (z) (u, v))\\right), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.83, + 0.469, + 0.876 + ], + "angle": 0, + "content": "where \\(S(z)(u,v)\\in \\{\\mathcal{A}(z)(u,v),\\mathcal{P}(z)(u,v)\\}\\). To perform channel aggregation of spectral information, the channel weight [12] map \\(\\mathcal{W}\\) are calculated as" + }, + { + "type": "equation", + "bbox": [ + 0.104, + 0.884, + 0.469, + 0.903 + ], + "angle": 0, + "content": "\\[\n\\mathcal {W} (z) (u, v) = \\delta \\left(C _ {1} ^ {1} \\left(\\sigma \\left(C _ {1} ^ {1} \\left(\\varpi \\left(S ^ {*} (z) (u, v)\\right)\\right)\\right)\\right)\\right), \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.09, + 0.892, + 0.123 + ], + "angle": 0, + "content": "where \\(\\mathcal{W}(z)(u,v)\\in \\mathbb{R}^{1\\times 1\\times \\widetilde{C}}\\). Then the channel weight map is applied to the frequency spectrum as" + }, + { + "type": "equation", + "bbox": [ + 0.546, + 0.133, + 0.892, + 0.15 + ], + "angle": 0, + "content": "\\[\n\\dot {S} (z) (u, v) = C _ {1} ^ {1} (\\mathcal {W} (z) (u, v) \\cdot \\mathcal {S} ^ {*} (z) (u, v)), \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.16, + 0.892, + 0.207 + ], + "angle": 0, + "content": "where the spectrum filter (SF) of \\(\\dot{S} (z)(u,v)\\) is shown in Figure 3-(a). The filtering operation is performed by the residual connection, the filtered component is obtained by" + }, + { + "type": "equation", + "bbox": [ + 0.565, + 0.215, + 0.892, + 0.235 + ], + "angle": 0, + "content": "\\[\n\\widetilde {\\mathcal {S}} (z) (u, v) = \\dot {\\mathcal {S}} (z) (u, v) + \\mathcal {S} (z) (u, v). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.245, + 0.892, + 0.294 + ], + "angle": 0, + "content": "The filtered \\(\\widetilde{\\mathcal{A}}(z)(u,v)\\) and \\(\\widetilde{\\mathcal{P}}(z)(u,v)\\) can be obtained based on the processing flow from \\(S(z)(u,v)\\) to \\(\\widetilde{S}(z)(u,v)\\). Then, the real and imaginary parts are obtained by" + }, + { + "type": "equation", + "bbox": [ + 0.541, + 0.302, + 0.892, + 0.322 + ], + "angle": 0, + "content": "\\[\n\\widetilde {\\mathcal {R}} (z) (u, v) = \\widetilde {\\mathcal {A}} (z) (u, v) \\cdot \\cos \\widetilde {\\mathcal {P}} (z) (u, v), \\tag {10}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.556, + 0.345, + 0.892, + 0.364 + ], + "angle": 0, + "content": "\\[\n\\widetilde {\\mathcal {I}} (z) (u, v) = \\widetilde {\\mathcal {A}} (z) (u, v) \\cdot \\sin \\widetilde {\\mathcal {P}} (z) (u, v). \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.37, + 0.892, + 0.4 + ], + "angle": 0, + "content": "After dynamic parameter learning in the frequency domain, we remap the feature map to the spatial domain as" + }, + { + "type": "equation", + "bbox": [ + 0.576, + 0.413, + 0.891, + 0.432 + ], + "angle": 0, + "content": "\\[\nz _ {f} = \\mathcal {F} ^ {- 1} (\\widetilde {\\mathcal {R}} (z) (u, v), \\widetilde {\\mathcal {I}} (z) (u, v)), \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.438, + 0.892, + 0.546 + ], + "angle": 0, + "content": "where \\(z_{f} \\in \\mathbb{R}^{\\widetilde{H} \\times \\widetilde{W} \\times \\widetilde{C}}\\). The Fourier transformation and inverse Fourier transformation can be implemented using DFT and IDFT algorithms [6, 11, 56]. Here, we define the calculation from Eq. 3 to Eq. 12 as frequency spectrum dynamic aggregation (FSDA), which represent the processing flow from \\(z\\) to \\(z_{f}\\) that is shown in Figure 3-(b). For convenience, the FSDA is denoted as \\(\\mathcal{FS}(\\cdot)\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.546, + 0.892, + 0.67 + ], + "angle": 0, + "content": "Frequency Domain Projection (FDP). To deal with distortions in the frequency domain, we first introduce frequency domain interactions before computing local inductive bias. For the input feature map \\( z \\in \\mathbb{R}^{\\widetilde{H} \\times \\widetilde{W} \\times \\widetilde{C}} \\), it is processed by the layer normalization operation (\\( LN(\\cdot) \\)) [32] to obtain the normalized feature \\( z_{l} = LN(z) \\). Then, the normalized feature \\( z_{l} \\) is projected into \\( Q_{f} \\) (query), \\( K_{f} \\) (key) and \\( V_{f} \\) (value) by the projection in the frequency domain as" + }, + { + "type": "equation", + "bbox": [ + 0.514, + 0.68, + 0.891, + 0.698 + ], + "angle": 0, + "content": "\\[\nQ _ {f} = \\mathcal {F S} _ {Q} (z _ {l}), K _ {f} = \\mathcal {F S} _ {K} (z _ {l}), V _ {f} = \\mathcal {F S} _ {V} (z _ {l}), \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.707, + 0.891, + 0.768 + ], + "angle": 0, + "content": "where the \\(\\mathcal{FS}_Q(\\cdot),\\mathcal{FS}_K(\\cdot)\\) and \\(\\mathcal{FS}_V(\\cdot)\\) denote three independent projection operations with learnable parameters, respectively. The generation process of the \\(Q_{f},K_{f}\\) and \\(V_{f}\\) is denoted as the frequency domain projection (FDP)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.769, + 0.892, + 0.86 + ], + "angle": 0, + "content": "Bidomain Local Perception (BLP). After obtaining the features \\( Q_{f} \\), \\( K_{f} \\) and \\( V_{f} \\) which consider the information in frequency domain, we perform spatial domain learning on the features from a local perspective. The self-attention [32] with local perception (LP) that is shown in Figure 3-(c) is computed within \\( 8 \\times 8 \\) non-overlapping windows as" + }, + { + "type": "equation", + "bbox": [ + 0.522, + 0.869, + 0.891, + 0.905 + ], + "angle": 0, + "content": "\\[\n\\mathcal {A T} \\left(Q _ {f}, K _ {f}, V _ {f}\\right) = s f \\left(\\frac {Q _ {f} \\otimes K _ {f} ^ {T}}{\\sqrt {d}} + B\\right) \\otimes V _ {f}, \\tag {14}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2634" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.138 + ], + "angle": 0, + "content": "where \\(d\\) and \\(B\\) denote the dimensionality and position bias, respectively. The \\(\\otimes\\) denotes the matrix multiplication (MatMul). Information is transferred by the residual connection" + }, + { + "type": "equation", + "bbox": [ + 0.178, + 0.154, + 0.469, + 0.171 + ], + "angle": 0, + "content": "\\[\nz ^ {*} = \\mathcal {A T} \\left(Q _ {f}, K _ {f}, V _ {f}\\right) + z, \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.179, + 0.469, + 0.209 + ], + "angle": 0, + "content": "where the calculation from \\(z\\) to \\(z^{*}\\) is marked as bidomain local perception (BLP), which is shown in Figure 4-(a)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.21, + 0.47, + 0.346 + ], + "angle": 0, + "content": "Bidomain Nonlinear Mapping (BNM). The computation of window attention does not provide nonlinear representation capabilities. Therefore, we use the frequency and spatial domain interaction module to learn nonlinear mapping. The FSDA is used to provide the frequency domain information. Besides, a residual block which consists of \\( C_1^3 (\\sigma (C_1^3 (\\cdot))) \\) is used to provide the spatial interaction. The immediate feature \\( z^{*} \\) is fed into the frequency nonlinear mapping branch and spatial nonlinear mapping branch, as" + }, + { + "type": "equation", + "bbox": [ + 0.214, + 0.362, + 0.469, + 0.38 + ], + "angle": 0, + "content": "\\[\nz _ {f n} = \\mathcal {F S} _ {A} \\left(z ^ {*}\\right), \\tag {16}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.195, + 0.403, + 0.469, + 0.422 + ], + "angle": 0, + "content": "\\[\nz _ {s n} = C _ {1} ^ {3} \\left(\\sigma \\left(C _ {1} ^ {3} \\left(z ^ {*}\\right)\\right)\\right), \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.429, + 0.469, + 0.492 + ], + "angle": 0, + "content": "where the subscript \\(A\\) in \\(\\mathcal{FS}_A(\\cdot)\\) means the frequency interaction performed after the attention operation. Then frequency domain and spatial domain features are fused as the final nonlinear mapping output by" + }, + { + "type": "equation", + "bbox": [ + 0.169, + 0.502, + 0.469, + 0.521 + ], + "angle": 0, + "content": "\\[\n\\widetilde {z} = C _ {1} ^ {3} \\left(\\left[ z _ {f n}, z _ {s n} + z ^ {*} \\right]\\right) + z ^ {*}, \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.533, + 0.469, + 0.578 + ], + "angle": 0, + "content": "where the \\([\\cdot, \\cdot]\\) denotes the channel concatenation. The calculation from \\(z^*\\) to \\(\\widetilde{z}\\) is marked as the bidomain nonlinear mapping (BNM), which is shown in Figure 4-(b)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.579, + 0.469, + 0.655 + ], + "angle": 0, + "content": "Spatial and Frequency Information Interaction (SFII). As shown in Figure 4, the calculation process from \\( z \\) to \\( \\widetilde{z} \\) is called spatial and frequency information interaction (SFII). The proposed SFII aggregates spatial domain information and frequency domain information from a local perspective." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.666, + 0.327, + 0.682 + ], + "angle": 0, + "content": "3.3. Spatial and Frequency Loss" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.69, + 0.469, + 0.751 + ], + "angle": 0, + "content": "The supervised loss consists of two parts, namely the pixel-by-pixel loss in geometric space and the frequency domain loss obtained by Fourier transform [3]. By sampling \\( x_{i}^{s} \\in \\mathcal{D}_{X} \\) and \\( y_{i}^{s} \\in \\mathcal{D}_{Y} \\), the losses calculated at three scales are" + }, + { + "type": "equation", + "bbox": [ + 0.148, + 0.763, + 0.469, + 0.805 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {G} = \\sum_ {s = 0} ^ {2} \\lambda_ {g} \\cdot \\sum_ {i = 0} ^ {N - 1} | | \\Psi^ {s} \\left(x _ {i} ^ {s}\\right) - y _ {i} ^ {s} | | _ {1}, \\tag {19}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.109, + 0.833, + 0.469, + 0.874 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {F} = \\sum_ {s = 0} ^ {2} \\lambda_ {f} \\cdot \\sum_ {i = 0} ^ {N - 1} \\left| \\left| \\mathcal {F} \\left(\\Psi^ {s} \\left(x _ {i} ^ {s}\\right)\\right) - \\mathcal {F} \\left(y _ {i} ^ {s}\\right) \\right| \\right| _ {1}, \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.886, + 0.341, + 0.902 + ], + "angle": 0, + "content": "where \\(\\lambda_{g}\\) and \\(\\lambda_{f}\\) denote weight factors." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.849, + 0.107 + ], + "angle": 0, + "content": "3.4. Retraining and Realistic Brightness Loss" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.115, + 0.892, + 0.251 + ], + "angle": 0, + "content": "Pseudo-label Fusion Retraining. There are inherent domain discrepancy between synthetic hazy images and real-world hazy images. Therefore, we adopt a retraining strategy which utilizes peso labels. Pseudo labels \\(\\mathcal{R}_Y^P\\) are obtained based on the model trained on synthetic datasets. We put the original synthetic dataset \\(\\{\\mathcal{D}_X,\\mathcal{D}_Y\\}\\) and the pseudo-labeled dataset \\(\\{\\mathcal{R}_X,\\mathcal{R}_Y^P\\}\\) into the network simultaneously for retraining. Supervised losses Eq. 19 and Eq. 20 are used in the retraining process at three scales." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.252, + 0.892, + 0.358 + ], + "angle": 0, + "content": "Prior Brightness Constraint. We conduct a quantitative statistics on the brightness of nighttime hazy and clear images provided by [14]. The brightness intensity corresponding to \\( x_{i}^{0} \\in \\mathcal{R}_{X} \\) and \\( y_{i}^{0} \\in \\mathcal{R}_{Y} \\) are \\( \\mu (x_i^0) \\) and \\( \\mu (y_i^0) \\), respectively, where \\( \\mu (\\cdot) \\) denote the average pixel value across three channels. We randomly select \\( M = \\frac{M}{2} \\) images from the dataset multiple times, and we get" + }, + { + "type": "equation", + "bbox": [ + 0.612, + 0.372, + 0.891, + 0.415 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i = 0} ^ {\\hat {M} - 1} \\mu \\left(y _ {i} ^ {0}\\right) < \\sum_ {i = 0} ^ {\\hat {M} - 1} \\mu \\left(x _ {i} ^ {0}\\right). \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.429, + 0.891, + 0.473 + ], + "angle": 0, + "content": "Therefore, we assume the brightness of the dehazed image \\( p_i^s \\) should be lower than that of the \\( x_i^s \\). This assumption is consistent with the imaging model Eq. 2." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.475, + 0.892, + 0.551 + ], + "angle": 0, + "content": "Local Brightness Map (LBM). We divide the image into non-overlapping local windows. The width and height of each square window is denoted as \\(\\gamma^s\\), where \\(s \\in \\{0,1,2\\}\\). The value in \\(\\underline{\\mathrm{local~brightness}}\\) map (LBM) \\(\\varphi_{x_i^s}\\) that corresponding to \\(x_i^s\\) is obtained by" + }, + { + "type": "equation", + "bbox": [ + 0.516, + 0.565, + 0.89, + 0.625 + ], + "angle": 0, + "content": "\\[\n\\varphi_ {x _ {i} ^ {s}} (\\hat {h}, \\hat {w}) = \\frac {1}{3 (\\gamma^ {s}) ^ {2}} \\sum_ {c = 0} ^ {2} \\sum_ {h = \\hat {h} \\cdot \\gamma^ {s}} ^ {(\\hat {h} + 1) \\cdot \\gamma^ {s}} \\sum_ {w = \\hat {w} \\cdot \\gamma^ {s}} ^ {(\\hat {w} + 1) \\cdot \\gamma^ {s}} x _ {i} ^ {s} (h, w, c), \\tag {22}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.625, + 0.892, + 0.732 + ], + "angle": 0, + "content": "where \\((\\hat{h},\\hat{w})\\) and \\((h,w)\\) denote the pixel index of \\(\\varphi_{x_i^s}\\) and \\(x_{i}^{s}\\), respectively. Meanwhile, the local brightness map \\(\\varphi_{p_i^s}\\) corresponding to \\(p_i^s\\) is defined in the same way. As shown in Figure 2-(c), the locations with high brightness may be active light sources or objects close to the light source, while the locations with low brightness may be objects and backgrounds far away from the light source." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.733, + 0.892, + 0.871 + ], + "angle": 0, + "content": "Realistic Brightness Loss. The brightness of hazy images is approximately globally realistic, so it can be used to supervise the brightness of dehazed images. As we observed in Eq. 21, the brightness of the dehazed image should be lower than that of the hazy image. Meanwhile, in order to ensure the relative numerical relationship between areas with high brightness and low brightness before and after dehazing, we use a power function with monotonically increasing properties to process the \\(\\varphi_{x_i^s}(\\hat{h},\\hat{w})\\) , as" + }, + { + "type": "equation", + "bbox": [ + 0.603, + 0.884, + 0.891, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\widetilde {\\varphi} _ {x _ {i} ^ {s}} (\\hat {h}, \\hat {w}) = \\left(\\varphi_ {x _ {i} ^ {s}} (\\hat {h}, \\hat {w})\\right) ^ {\\kappa}, \\tag {23}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.515, + 0.957 + ], + "angle": 0, + "content": "2635" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.088, + 0.199, + 0.145 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.12, + 0.147, + 0.168, + 0.159 + ], + "angle": 0, + "content": "(a) Hazy" + }, + { + "type": "image", + "bbox": [ + 0.2, + 0.089, + 0.314, + 0.145 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.232, + 0.147, + 0.28, + 0.159 + ], + "angle": 0, + "content": "(b) MRP" + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.089, + 0.428, + 0.145 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.345, + 0.147, + 0.398, + 0.158 + ], + "angle": 0, + "content": "(c) OSFD" + }, + { + "type": "image", + "bbox": [ + 0.429, + 0.089, + 0.543, + 0.145 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.462, + 0.147, + 0.502, + 0.159 + ], + "angle": 0, + "content": "(d) GD" + }, + { + "type": "image", + "bbox": [ + 0.543, + 0.089, + 0.657, + 0.145 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.567, + 0.147, + 0.633, + 0.159 + ], + "angle": 0, + "content": "(e) MSBDN" + }, + { + "type": "image", + "bbox": [ + 0.658, + 0.089, + 0.772, + 0.145 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.674, + 0.147, + 0.759, + 0.159 + ], + "angle": 0, + "content": "(f) 4KDehazing" + }, + { + "type": "image", + "bbox": [ + 0.772, + 0.089, + 0.887, + 0.145 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.795, + 0.147, + 0.87, + 0.159 + ], + "angle": 0, + "content": "(g) AECRNet" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.161, + 0.199, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.106, + 0.22, + 0.179, + 0.232 + ], + "angle": 0, + "content": "(h) DeHamer" + }, + { + "type": "image", + "bbox": [ + 0.2, + 0.161, + 0.314, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.224, + 0.22, + 0.284, + 0.232 + ], + "angle": 0, + "content": "(i) FSDGN" + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.161, + 0.428, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.349, + 0.22, + 0.383, + 0.232 + ], + "angle": 0, + "content": "(j) DF" + }, + { + "type": "image", + "bbox": [ + 0.429, + 0.161, + 0.543, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.452, + 0.22, + 0.516, + 0.232 + ], + "angle": 0, + "content": "(k) MITNet" + }, + { + "type": "image", + "bbox": [ + 0.543, + 0.161, + 0.657, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.571, + 0.22, + 0.633, + 0.232 + ], + "angle": 0, + "content": "(1)Fourmer" + }, + { + "type": "image", + "bbox": [ + 0.658, + 0.161, + 0.772, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.689, + 0.22, + 0.738, + 0.232 + ], + "angle": 0, + "content": "(m) Ours" + }, + { + "type": "image", + "bbox": [ + 0.772, + 0.161, + 0.887, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.803, + 0.22, + 0.853, + 0.232 + ], + "angle": 0, + "content": "(n) Label" + }, + { + "type": "image_caption", + "bbox": [ + 0.338, + 0.243, + 0.631, + 0.257 + ], + "angle": 0, + "content": "Figure 5. Visual results on synthetic dataset [31]." + }, + { + "type": "table_caption", + "bbox": [ + 0.265, + 0.272, + 0.704, + 0.286 + ], + "angle": 0, + "content": "Table 1. Quantitative results on datasets that generated by imaging model." + }, + { + "type": "table", + "bbox": [ + 0.084, + 0.297, + 0.889, + 0.472 + ], + "angle": 0, + "content": "
MethodsNHRNHMNHCLNHCMNHCDNightHazeYellowHaze
SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑
MRP (CVPR 2017)0.77619.8480.66615.9930.74722.4970.69320.4940.62417.6510.29512.1380.24913.473
GD (ICCV 2019)0.96930.1070.86120.6890.97336.5060.95834.4480.93231.5090.83225.3240.91527.410
OSFD (ACMMM 2020)0.80821.0280.72218.4910.78622.3290.73920.9290.67218.5010.30413.3870.25914.775
MSBDN (CVPR2020)0.97031.3350.81820.5140.96535.9630.93832.8480.90330.4750.95033.1560.92129.834
4KDehazing (CVPR2021)0.95028.6130.83020.4290.96735.0060.95835.1620.91230.0480.85026.5620.86125.835
AECRNet (CVPR 2021)0.91524.8640.81719.4200.95133.1830.94333.4980.89028.7420.94632.3440.93729.417
DeHamer (CVPR 2022)0.96631.0170.82323.0950.96636.0380.94433.9080.91531.3890.95433.4320.93130.334
FSDGN (ECCV 2022)0.97532.0720.87421.4150.97236.4320.95233.7230.92231.5590.94833.5210.95533.062
DF (TIP 2023)0.96931.6440.89623.2070.97537.3830.96035.0380.93432.0790.93131.4890.94832.244
MITNet (ACMMM 2023)0.97431.9690.85920.8840.96935.7940.94532.8490.91630.6280.94634.1140.93231.186
Fourmer (ICML 2023)0.96931.6600.86221.4230.96335.7140.94333.2010.92832.1030.94933.4190.95831.978
Ours0.97833.1800.90523.7050.97938.1460.96836.1460.95134.0010.96835.5270.96532.981
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.496, + 0.469, + 0.526 + ], + "angle": 0, + "content": "where \\(\\kappa \\geq 1\\) is the brightness intensity coefficient. The realistic brightness constraint within one single window is" + }, + { + "type": "equation", + "bbox": [ + 0.115, + 0.534, + 0.469, + 0.556 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {B} ^ {p _ {i} ^ {s}} (\\hat {h}, \\hat {w}) = \\left(\\varphi_ {p _ {i} ^ {s}} (\\hat {h}, \\hat {w}) - \\xi \\cdot \\widetilde {\\varphi} _ {x _ {i} ^ {s}} (\\hat {h}, \\hat {w})\\right) ^ {2}, \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.563, + 0.469, + 0.592 + ], + "angle": 0, + "content": "where \\(\\xi\\) is a hyperparameter. The realistic brightness loss calculated over all windows is" + }, + { + "type": "equation", + "bbox": [ + 0.094, + 0.599, + 0.47, + 0.645 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {B} = \\sum_ {s = 0} ^ {2} \\frac {\\lambda_ {b}}{\\hat {N} \\hat {W} ^ {s} \\hat {H} ^ {s}} \\cdot \\sum_ {i = 0} ^ {\\hat {N} - 1} \\sum_ {\\hat {h} = 0} ^ {\\hat {H} ^ {s} - 1} \\sum_ {\\hat {w} = 0} ^ {\\hat {W} ^ {s} - 1} \\mathcal {L} _ {B} ^ {p _ {i} ^ {s}} (\\hat {h}, \\hat {w}), \\tag {25}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.653, + 0.469, + 0.685 + ], + "angle": 0, + "content": "where \\(\\hat{W}^s = W^s /\\gamma^s\\) \\(\\hat{H}^s = H^s /\\gamma^s\\) . And \\(\\hat{N} = N + M\\) The \\(\\lambda_{b}\\) denotes the weights of scale loss of \\(\\mathcal{L}_B\\)" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.693, + 0.194, + 0.707 + ], + "angle": 0, + "content": "3.5. Total Loss" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.716, + 0.469, + 0.746 + ], + "angle": 0, + "content": "The overall loss is a combination of supervised and semi-supervised losses, which is" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.756, + 0.469, + 0.772 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {G} + \\alpha \\mathcal {L} _ {F} + \\beta \\mathcal {L} _ {B}, \\tag {26}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.469, + 0.811 + ], + "angle": 0, + "content": "where \\(\\alpha\\) and \\(\\beta\\) are the weights of the frequency domain loss and the realistic brightness loss, respectively." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.823, + 0.21, + 0.839 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.848, + 0.264, + 0.864 + ], + "angle": 0, + "content": "4.1. Experiment Setting" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Datasets. To comprehensively compare the performance of different algorithms, we conducted experiments on both" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.496, + 0.892, + 0.631 + ], + "angle": 0, + "content": "synthetic and real-world datasets. The synthetic datasets include GTA5 [45], UNREAL-NH [31], {NHR, NHM, HNCL, NHCM, NHCD} [51] and {NightHaze, YellowHaze} [26]. The real-world nighttime haze (RWNH) is provided by [14]. Since the brightness level of the ground-truth label in the UNREAL-NH is close to daytime, we adjust the brightness of the hazy image and corresponding label to the level of the nighttime low-light image by the Gamma correction [33] for the evaluation of the RWNH." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.638, + 0.893, + 0.774 + ], + "angle": 0, + "content": "Comparison Methods and Evaluation Metrics. MRP [50], GD [27], OSFD [51], MSBDN [5]. 4KDehazing [54], AECRNet [43], DeHamer [9], FSDGN [47], DF [38], MIT-Net [37] and Fourmer [55] are used as comparisons. PSNR [22, 35, 36] and SSIM [8, 42] are used to evaluate the performance on labeled datasets. BRISQUE [44] and MUSIQ [13, 17] are computed to evaluate the performance on unlabeled dataset. The \\(\\uparrow\\) represents a larger value, a higher quality, while \\(\\downarrow\\) represents a larger value, a lower quality." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Implementation Details. The batch size is chosen as 4. The image size is set to \\(256 \\times 256 \\times 3\\). The learning rate is initialized to 0.0001 and linearly decays by a factor of 0.95 every 10 epochs. The Adam \\((\\beta_{1} = 0.9, \\beta_{2} = 0.999)\\) is used. The \\(\\lambda_{g}\\), \\(\\lambda_{f}\\) and \\(\\lambda_{b}\\) are all set to 1. The \\(\\alpha\\) and \\(\\beta\\) are set to 0.1 and 20, respectively. The window size \\(\\gamma^{s}\\) are set to 16, 8 and 4, where \\(s \\in \\{0,1,2\\}\\), respectively. The coefficient \\(\\xi\\) and \\(\\kappa\\) is set to 1 and 1.3, respectively. The" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "2636" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.088, + 0.219, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.129, + 0.202, + 0.177, + 0.213 + ], + "angle": 0, + "content": "(a) Hazy" + }, + { + "type": "image", + "bbox": [ + 0.22, + 0.089, + 0.353, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.265, + 0.202, + 0.314, + 0.213 + ], + "angle": 0, + "content": "(b)MRP" + }, + { + "type": "image", + "bbox": [ + 0.354, + 0.089, + 0.485, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.392, + 0.202, + 0.445, + 0.213 + ], + "angle": 0, + "content": "(c) OSFD" + }, + { + "type": "image", + "bbox": [ + 0.485, + 0.089, + 0.617, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.532, + 0.202, + 0.572, + 0.213 + ], + "angle": 0, + "content": "(d) GD" + }, + { + "type": "image", + "bbox": [ + 0.619, + 0.089, + 0.751, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.65, + 0.202, + 0.715, + 0.213 + ], + "angle": 0, + "content": "(e) MSBDN" + }, + { + "type": "image", + "bbox": [ + 0.752, + 0.089, + 0.885, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.777, + 0.202, + 0.86, + 0.213 + ], + "angle": 0, + "content": "(f) 4KDehazing" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.216, + 0.219, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.33, + 0.185, + 0.342 + ], + "angle": 0, + "content": "(g) AECRNet" + }, + { + "type": "image", + "bbox": [ + 0.221, + 0.216, + 0.352, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.33, + 0.317, + 0.341 + ], + "angle": 0, + "content": "(h) DeHamer" + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.216, + 0.485, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.405, + 0.33, + 0.439, + 0.341 + ], + "angle": 0, + "content": "(i) DF" + }, + { + "type": "image", + "bbox": [ + 0.485, + 0.216, + 0.618, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.522, + 0.33, + 0.582, + 0.342 + ], + "angle": 0, + "content": "(j) MITNet" + }, + { + "type": "image", + "bbox": [ + 0.619, + 0.216, + 0.751, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.656, + 0.33, + 0.721, + 0.341 + ], + "angle": 0, + "content": "(k)Fourmer" + }, + { + "type": "image", + "bbox": [ + 0.752, + 0.216, + 0.885, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.795, + 0.33, + 0.838, + 0.341 + ], + "angle": 0, + "content": "(1) Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.318, + 0.353, + 0.651, + 0.367 + ], + "angle": 0, + "content": "Figure 6. Visual results on real-world hazy images [14]." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.381, + 0.162, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.102, + 0.48, + 0.149, + 0.492 + ], + "angle": 0, + "content": "(a) Hazy" + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.381, + 0.243, + 0.48 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.168, + 0.48, + 0.233, + 0.492 + ], + "angle": 0, + "content": "(b) \\(\\kappa = 1.0\\)" + }, + { + "type": "image", + "bbox": [ + 0.245, + 0.381, + 0.324, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.249, + 0.48, + 0.312, + 0.492 + ], + "angle": 0, + "content": "(c) \\(\\kappa = 1.3\\)" + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.381, + 0.404, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.331, + 0.48, + 0.395, + 0.492 + ], + "angle": 0, + "content": "(d) \\(\\kappa = 1.5\\)" + }, + { + "type": "image", + "bbox": [ + 0.406, + 0.381, + 0.485, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.413, + 0.48, + 0.477, + 0.492 + ], + "angle": 0, + "content": "(e) \\(\\kappa = 1.8\\)" + }, + { + "type": "image", + "bbox": [ + 0.486, + 0.381, + 0.565, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.496, + 0.48, + 0.558, + 0.492 + ], + "angle": 0, + "content": "(f) \\(\\kappa = 2.0\\)" + }, + { + "type": "image", + "bbox": [ + 0.567, + 0.381, + 0.645, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.575, + 0.48, + 0.639, + 0.492 + ], + "angle": 0, + "content": "(g) \\(\\kappa = 2.3\\)" + }, + { + "type": "image", + "bbox": [ + 0.647, + 0.381, + 0.726, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.657, + 0.48, + 0.721, + 0.492 + ], + "angle": 0, + "content": "(h) \\(\\kappa = 2.5\\)" + }, + { + "type": "image", + "bbox": [ + 0.727, + 0.381, + 0.808, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.738, + 0.48, + 0.8, + 0.492 + ], + "angle": 0, + "content": "(i) \\(\\kappa = 2.8\\)" + }, + { + "type": "image", + "bbox": [ + 0.81, + 0.381, + 0.889, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.816, + 0.48, + 0.878, + 0.492 + ], + "angle": 0, + "content": "(j) \\(\\kappa = 3.0\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.325, + 0.503, + 0.644, + 0.517 + ], + "angle": 0, + "content": "Figure 7. Dehazed images obtained under different \\(\\kappa\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.544, + 0.47, + 0.574 + ], + "angle": 0, + "content": "proposed model is implemented by PyTorch and trained on the single NVIDIA RTX 4090 platform." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.587, + 0.462, + 0.604 + ], + "angle": 0, + "content": "4.2. Comparison with State-of-the-art Algorithms" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.611, + 0.47, + 0.702 + ], + "angle": 0, + "content": "Evaluation on Synthetic Datasets. Table 1 and Table 2 show the quantitative dehazing results obtained by state-of-the-art methods. Figure 5 shows the corresponding visual results. The quantitative and visual results demonstrate that the proposed methods achieve an overall better performance than state-of-the-art algorithms." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.704, + 0.47, + 0.854 + ], + "angle": 0, + "content": "Evaluation on Real-world Datasets. Table 2 shows the evaluation results of real-world dehazed images. It is worth pointing out that existing research [7] proposes that the reliability of no-reference metrics in the dehazing task is lower than that of full-reference metrics. Figure 6 shows that the details of the dehazed results obtained by our method are visually better. Meanwhile, the brightness of the dehazed images obtained by most comparison algorithms is obvious unrealistic, while the brightness of the dehazed images obtained by our algorithm is approximately globally realistic." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Overall Evaluation. According to the quantitative and visual results on synthetic and real-world datasets, the proposed SFSNiD achieves overall better performance. More" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.544, + 0.807, + 0.56 + ], + "angle": 0, + "content": "results are placed at Supplementary Materials." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.572, + 0.779, + 0.589 + ], + "angle": 0, + "content": "4.3. Ablation Study and Discussions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.892, + 0.854 + ], + "angle": 0, + "content": "Spatial and Frequency Information Interaction. The spatial and frequency information interaction (SFII) modules and naive convolution module are used in the proposed SFSNiD. In order to prove the usefulness of the FDP, LP and BNM that contained in the SFII, ablation experiments for different sub-blocks are performed. The ablation experiment on the proposed SFII includes (i) removing the FDP, (ii) removing the LP, (iii) removing the frequency domain processing in BNM, and (iv) removing the spatial domain process in BNM. These four settings are denoted \\( R1 \\), \\( R2 \\), \\( R3 \\) and \\( R4 \\), respectively. Table 3 shows the ablation results under different settings on the UNREAL-NH [31]. The quantitative results demonstrate that the FDP, LP and BNM all have a positive effect on the dehazing performance. Since we must control the size of the paper, visualizations of the amplitude and phase spectrums are placed in Supplementary Materials." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Hierarchical Training and Frequency Domain Loss. The training process of the proposed SFSNiD takes a hierarchical strategy by using differ scales \\( s \\in \\{0,1,2\\} \\). Two" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2637" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.078, + 0.09, + 0.47, + 0.119 + ], + "angle": 0, + "content": "Table 2. Quantitative results on datasets generated by game engine (GTA5 and UNREAL-NH) and the real-world dataset (RWNH)." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.128, + 0.472, + 0.304 + ], + "angle": 0, + "content": "
MethodsGTA5UNREAL-NHRWNH
SSIM↑PSNR↑SSIM↑PSNR↑BRISQUE ↓MUSIQ ↑
MRP0.66219.4600.46710.03919.41841.194
GD0.90030.0900.76721.20231.35933.433
OSFD0.71121.4610.4439.16920.86041.779
MSBDN0.90932.0290.82725.68038.91029.968
4KDehazing0.90330.3140.77423.08734.96533.536
AECRNet0.88826.8460.73121.56627.08437.034
DeHamer0.92832.5970.74022.44142.26926.788
FSDGN0.92332.6420.70221.73632.21635.200
DF0.91832.8560.77023.01733.67831.663
MITNet0.89931.1180.76621.86035.40431.768
Fourmer0.91731.9260.77222.79935.85031.367
Ours0.93533.7080.86225.90730.97532.120
" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.318, + 0.176, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.105, + 0.409, + 0.153, + 0.421 + ], + "angle": 0, + "content": "(a) Hazy" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.319, + 0.273, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.177, + 0.409, + 0.267, + 0.421 + ], + "angle": 0, + "content": "(b) Pseudo Label" + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.319, + 0.371, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.283, + 0.409, + 0.359, + 0.421 + ], + "angle": 0, + "content": "(c) Retraining" + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.319, + 0.468, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.369, + 0.409, + 0.475, + 0.422 + ], + "angle": 0, + "content": "(d) Retraining +LB" + }, + { + "type": "image_caption", + "bbox": [ + 0.101, + 0.433, + 0.445, + 0.447 + ], + "angle": 0, + "content": "Figure 8. Visual results under different training strategies." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.476, + 0.47, + 0.641 + ], + "angle": 0, + "content": "ablation studies are adopted, which are denoted as (i) \\(S1\\): \\(s \\in \\{0\\}\\), and (ii) \\(S2\\): \\(s \\in \\{0, 1\\}\\). Meanwhile, in our experimental setup, the spatial domain loss \\(L_{G}\\) and the frequency domain loss \\(L_{F}\\) are applied simultaneously. To verify the effectiveness of frequency domain loss, the setting when \\(L_{F}\\) is not used is denoted as \\(S3\\) (\\(s \\in \\{0, 1, 2\\}\\)). Table 4 shows the ablation results under the three different settings. The quantitative results demonstrate two main conclusions. First, the hierarchical training strategy can improve the dehazing performance. Second, the loss in the frequency domain is crucial as it improves the SSIM from 0.816 to 0.862." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.643, + 0.47, + 0.869 + ], + "angle": 0, + "content": "Retraining Strategy and Realistic Brightness Loss. To verify the effectiveness of the retraining strategy and the realistic brightness loss \\(\\mathcal{L}_B\\), the visual effects are shown in Figure 8. As shown in Figure 8-(b), the texture of the pseudo-labels is blurred due to the domain discrepancy between the synthetic and real-world data. The dehazed images obtained after retraining has unrealistic brightness as shown in Figure 8-(c). It can be seen that the best effect occurs when the retraining strategy and \\(\\mathcal{L}_B\\) are used simultaneously as shown in Figure 8-(d). The BRISQUE \\((\\downarrow)\\) and MUSIQ \\((\\uparrow)\\) obtained for the three settings (b), (c) and (d) in Figure 8 are \\(\\{33.316, 30.432\\}\\), \\(\\{34.210, 32.373\\}\\) and \\(\\{30.975, 32.120\\}\\), respectively. Taking a comprehensive look at the visual and quantitative evaluation results, our proposed strategy is effective." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.9 + ], + "angle": 0, + "content": "Brightness intensity coefficient \\(\\kappa\\) in \\(\\mathcal{L}_B\\). In order to demonstrate the effectiveness of \\(\\kappa\\) on the real-world dehaz-" + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.09, + 0.882, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.209, + 0.892, + 0.252 + ], + "angle": 0, + "content": "Figure 9. The average pixel value obtained under different \\(\\kappa\\). The horizontal dashed line represents the average pixel value of real-world nighttime clear images [14]." + }, + { + "type": "table_caption", + "bbox": [ + 0.588, + 0.267, + 0.804, + 0.28 + ], + "angle": 0, + "content": "Table 3. Ablation study on the SFII." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.291, + 0.889, + 0.333 + ], + "angle": 0, + "content": "
SettingsR1R2R3R4Ours
SSIM0.8480.8580.8510.8450.862
PSNR25.35325.80825.64224.30125.907
" + }, + { + "type": "table_caption", + "bbox": [ + 0.516, + 0.345, + 0.875, + 0.36 + ], + "angle": 0, + "content": "Table 4. Ablation study on the scale loss and frequency loss." + }, + { + "type": "table", + "bbox": [ + 0.51, + 0.37, + 0.884, + 0.411 + ], + "angle": 0, + "content": "
SettingsS1S2S3Ours
SSIM0.8540.8510.8160.862
PSNR25.60125.13424.46425.907
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.436, + 0.892, + 0.587 + ], + "angle": 0, + "content": "ing task, we manually set \\(\\kappa\\) to different values. The dehazed images and average pixel value when \\(\\kappa\\) takes different values are shown in Figure 7 and Figure 9, respectively. There are two conclusions that can be drawn. First, as \\(\\kappa\\) increases, the brightness of the dehazed image continues to decrease, which proves that \\(\\kappa\\) can control the brightness of the dehazed image. Second, when \\(\\kappa\\) equals 1.3, the average pixel value (0.225) of dehazed images is close to the average pixel value real-world nighttime clear images (0.217) [14]. Therefore, we set \\(\\kappa\\) to 1.3 as the final setting." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.601, + 0.619, + 0.617 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.626, + 0.892, + 0.791 + ], + "angle": 0, + "content": "In this paper, a semi-supervised nighttime image dehazing baseline SFSNiD is proposed for real-world nighttime dehazing. A spatial and frequency domain information interaction module is proposed to handle the haze, glow, and noise with localized, coupled and frequency inconsistent characteristics. A retraining strategy and a local window-based brightness loss for semi-supervised training process are designed to suppress haze and glow while achieving realistic brightness. Experiments on public benchmarks validate the effectiveness of the proposed method and its superiority over state-of-the-art methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.792, + 0.892, + 0.898 + ], + "angle": 0, + "content": "Acknowledgment. This work was supported in part by the grant of the National Science Foundation of China under Grant 62172090; Start-up Research Fund of Southeast University under Grant RF1028623097; CAAI-Huawei MindSpore Open Fund. We thank the Big Data Computing Center of Southeast University for providing the facility support on the numerical calculations in this paper." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2638" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.17 + ], + "angle": 0, + "content": "[1] Cosmin Ancuti, Codruta O Ancuti, Christophe De Vleeschouwer, and Alan C Bovik. Day and night-time dehazing by local airlight estimation. IEEE Transactions on Image Processing, 29:6264-6275, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.47, + 0.227 + ], + "angle": 0, + "content": "[2] Xiaofeng Cong, Jie Gui, Kai-Chao Miao, Jun Zhang, Bing Wang, and Peng Chen. Discrete haze level dehazing network. In ACM International Conference on Multimedia, pages 1828-1836, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.228, + 0.47, + 0.295 + ], + "angle": 0, + "content": "[3] Yuning Cui, Yi Tao, Zhenshan Bing, Wenqi Ren, Xinwei Gao, Xiaochun Cao, Kai Huang, and Alois Knoll. Selective frequency network for image restoration. In The Eleventh International Conference on Learning Representations, 2022. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.298, + 0.47, + 0.352 + ], + "angle": 0, + "content": "[4] Yuekun Dai, Chongyi Li, Shangchen Zhou, Ruicheng Feng, and Chen Change Loy. Flare7k: A phenomenological nighttime flare removal dataset. Advances in Neural Information Processing Systems, 35:3926-3937, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.354, + 0.47, + 0.422 + ], + "angle": 0, + "content": "[5] Hang Dong, Jinshan Pan, Lei Xiang, Zhe Hu, Xinyi Zhang, Fei Wang, and Ming-Hsuan Yang. Multi-scale boosted dehazing network with dense feature fusion. In IEEE Conference on Computer Vision and Pattern Recognition, pages 2157-2167, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.424, + 0.47, + 0.478 + ], + "angle": 0, + "content": "[6] Matteo Frigo and Steven G Johnson. Fftw: An adaptive software architecture for the fft. In IEEE International Conference on Acoustics, Speech and Signal Processing, pages 1381-1384, 1998. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.481, + 0.47, + 0.536 + ], + "angle": 0, + "content": "[7] Jie Gui, Xiaofeng Cong, Yuan Cao, Wenqi Ren, Jun Zhang, Jing Zhang, Jiuxin Cao, and Dacheng Tao. A comprehensive survey and taxonomy on single image dehazing based on deep learning. ACM Computing Surveys, 2023. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.537, + 0.47, + 0.606 + ], + "angle": 0, + "content": "[8] Chunle Guo, Chongyi Li, Jichang Guo, Chen Change Loy, Junhui Hou, Sam Kwong, and Runmin Cong. Zero-reference deep curve estimation for low-light image enhancement. In IEEE Conference on Computer Vision and Pattern Recognition, pages 1780-1789, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.607, + 0.47, + 0.675 + ], + "angle": 0, + "content": "[9] Chun-Le Guo, Qixin Yan, Saeed Anwar, Runmin Cong, Wenqi Ren, and Chongyi Li. Image dehazing transformer with transmission-aware 3d position embedding. In IEEE Conference on Computer Vision and Pattern Recognition, pages 5812-5820, 2022. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.677, + 0.469, + 0.732 + ], + "angle": 0, + "content": "[10] Xin Guo, Xueyang Fu, Man Zhou, Zhen Huang, Jialun Peng, and Zheng-Jun Zha. Exploring fourier prior for single image rain removal. In International Joint Conferences on Artificial Intelligence, pages 935–941, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.733, + 0.469, + 0.788 + ], + "angle": 0, + "content": "[11] Junming Hou, Qi Cao, Ran Ran, Che Liu, Junling Li, and Liang-jian Deng. Bidomain modeling paradigm for pan-sharpening. In ACM International Conference on Multimedia, pages 347-357, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.469, + 0.83 + ], + "angle": 0, + "content": "[12] Jie Hu, Li Shen, and Gang Sun. Squeeze-and-excitation networks. In IEEE Conference on Computer Vision and Pattern Recognition, pages 7132-7141, 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[13] Shirui Huang, Keyan Wang, Huan Liu, Jun Chen, and Yun-song Li. Contrastive semi-supervised learning for underwater image restoration via reliable bank. In IEEE Conference on Computer Vision and Pattern Recognition, pages 18145-18155, 2023. 6" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.16 + ], + "angle": 0, + "content": "[14] Yeying Jin, Beibei Lin, Wending Yan, Wei Ye, Yuan Yuan, and Robby T Tan. Enhancing visibility in nighttime haze images using guided apsf and gradient adaptive convolution. In ACM International Conference on Multimedia, 2023. 1, 2, 3, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.163, + 0.892, + 0.216 + ], + "angle": 0, + "content": "[15] Mingye Ju, Can Ding, Charles A Guo, Wenqi Ren, and Dacheng Tao. Idrlp: Image dehazing using region line prior. IEEE Transactions on Image Processing, 30:9043-9057, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.218, + 0.892, + 0.273 + ], + "angle": 0, + "content": "[16] Mingye Ju, Can Ding, Wenqi Ren, Yi Yang, Dengyin Zhang, and Y Jay Guo. Ide: Image dehazing and exposure using an enhanced atmospheric scattering model. IEEE Transactions on Image Processing, 30:2180-2192, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.274, + 0.892, + 0.327 + ], + "angle": 0, + "content": "[17] Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. Musiq: Multi-scale image quality transformer. In IEEE International Conference on Computer Vision, pages 5148-5157, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.329, + 0.892, + 0.371 + ], + "angle": 0, + "content": "[18] Beomhyuk Koo and Gyeonghwan Kim. Nighttime haze removal with glow decomposition using gan. In Pattern Recognition: 5th Asian Conference, pages 807-820, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.372, + 0.892, + 0.426 + ], + "angle": 0, + "content": "[19] Shiba Kuanar, Dwarikanath Mahapatra, Monalisa Bilas, and KR Rao. Multi-path dilated convolution network for haze and glow removal in nighttime images. The Visual Computer, pages 1-14, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.428, + 0.892, + 0.481 + ], + "angle": 0, + "content": "[20] Boyi Li, Xiulian Peng, Zhangyang Wang, Jizheng Xu, and Dan Feng. Aod-net: All-in-one dehazing network. In IEEE International Conference on Computer Vision, pages 4770-4778, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.483, + 0.892, + 0.537 + ], + "angle": 0, + "content": "[21] Boyi Li, Wenqi Ren, Dengpan Fu, Dacheng Tao, Dan Feng, Wenjun Zeng, and Zhangyang Wang. Benchmarking single-image dehazing and beyond. IEEE Transactions on Image Processing, 28(1):492-505, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.539, + 0.892, + 0.593 + ], + "angle": 0, + "content": "[22] Chongyi Li, Chun-Le Guo, Man Zhou, Zhexin Liang, Shangchen Zhou, Ruicheng Feng, and Chen Change Loy. Embedding fourier for ultra-high-definition low-light image enhancement. arXiv preprint arXiv:2302.11831, 2023. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.595, + 0.892, + 0.636 + ], + "angle": 0, + "content": "[23] Kun Li, Dan Guo, and Meng Wang. Proposal-free video grounding with contextual pyramid network. In AAAI Conference on Artificial Intelligence, pages 1902-1910, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.637, + 0.892, + 0.69 + ], + "angle": 0, + "content": "[24] Yu Li, Robby T Tan, and Michael S Brown. Nighttime haze removal with glow and multiple light colors. In IEEE International Conference on Computer Vision, pages 226-234, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.692, + 0.892, + 0.747 + ], + "angle": 0, + "content": "[25] Yudong Liang, Bin Wang, Wangmeng Zuo, Jiaying Liu, and Wenqi Ren. Self-supervised learning and adaptation for single image dehazing. In International Joint Conference on Artificial Intelligence, pages 1-15, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.748, + 0.892, + 0.801 + ], + "angle": 0, + "content": "[26] Yinghong Liao, Zhuo Su, Xiangguo Liang, and Bin Qiu. Hdp-net: Haze density prediction network for nighttime de-hazing. In Pacific Rim Conference on Multimedia, pages 469-480, 2018. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.803, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[27] Xiaohong Liu, Yongrui Ma, Zhihao Shi, and Jun Chen. Griddehazenet: Attention-based multi-scale network for image dehazing. In IEEE International Conference on Computer Vision, pages 7314-7323, 2019. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[28] Yun Liu, Anzhi Wang, Hao Zhou, and Pengfei Jia. Single nighttime image dehazing based on image decomposition. Signal Processing, 183:107986, 2021. 1" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "2639" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[29] Yun Liu, Zhongsheng Yan, Jinge Tan, and Yuche Li. Multipurpose oriented single nighttime image haze removal based on unified variational retina model. IEEE Transactions on Circuits and Systems for Video Technology, 33(4):1643-1657, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.162, + 0.47, + 0.218 + ], + "angle": 0, + "content": "[30] Yun Liu, Zhongsheng Yan, Aimin Wu, and Tian Ye. Night-time image dehazing based on variational decomposition model. In IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 640-649, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.218, + 0.469, + 0.273 + ], + "angle": 0, + "content": "[31] Yun Liu, Zhongsheng Yan, Sixiang Chen, Tian Ye, Wenqi Ren, and Erkang Chen. Nighthazeformer: Single nighttime haze removal using prior query transformer. In ACM International Conference on Multimedia, 2023. 1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.274, + 0.469, + 0.342 + ], + "angle": 0, + "content": "[32] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In IEEE International Conference on Computer Vision, pages 10012-10022, 2021. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.343, + 0.469, + 0.411 + ], + "angle": 0, + "content": "[33] Wenqi Ren, Sifei Liu, Lin Ma, Qianqian Xu, Xiangyu Xu, Xiaochun Cao, Junping Du, and Ming-Hsuan Yang. Low-light image enhancement via a deep hybrid network. IEEE Transactions on Image Processing, 28(9):4364-4375, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.412, + 0.469, + 0.468 + ], + "angle": 0, + "content": "[34] Wenqi Ren, Jinshan Pan, Hua Zhang, Xiaochun Cao, and Ming-Hsuan Yang. Single image dehazing via multi-scale convolutional neural networks with holistic edges. International Journal of Computer Vision, 128:240-259, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.469, + 0.469, + 0.523 + ], + "angle": 0, + "content": "[35] Yuanjie Shao, Lerenhan Li, Wenqi Ren, Changxin Gao, and Nong Sang. Domain adaptation for image dehazing. In IEEE Conference on Computer Vision and Pattern Recognition, pages 2808-2817, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.524, + 0.469, + 0.566 + ], + "angle": 0, + "content": "[36] Hao Shen, Zhong-Qiu Zhao, and Wandi Zhang. Adaptive dynamic filtering network for image denoising. In AAAI Conference on Artificial Intelligence, pages 2227-2235, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.567, + 0.469, + 0.621 + ], + "angle": 0, + "content": "[37] Hao Shen, Zhong-Qiu Zhao, Yulun Zhang, and Zhao Zhang. Mutual information-driven triple interaction network for efficient image dehazing. In ACM International Conference on Multimedia, pages 7-16, 2023. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.622, + 0.469, + 0.662 + ], + "angle": 0, + "content": "[38] Yuda Song, Zhuqing He, Hui Qian, and Xin Du. Vision transformers for single image dehazing. IEEE TIP, 32:1927-1941, 2023. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.663, + 0.469, + 0.705 + ], + "angle": 0, + "content": "[39] Shangquan Sun, Wenqi Ren, and Tao Wang. Rethinking image restoration for object detection. Advances in Neural Information Processing Systems, 35:4461-4474, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.706, + 0.469, + 0.748 + ], + "angle": 0, + "content": "[40] Fei Wang, Dan Guo, and Kun Li. Eulermormer: Robust eulerian motion magnification via dynamic filtering within transformer. arXiv preprint arXiv:2312.04152, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.748, + 0.469, + 0.802 + ], + "angle": 0, + "content": "[41] Wenhui Wang, Anna Wang, and Chen Liu. Variational single nighttime image haze removal with a gray haze-line prior. IEEE Transactions on Image Processing, 31:1349-1363, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.469, + 0.858 + ], + "angle": 0, + "content": "[42] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[43] Haiyan Wu, Yanyun Qu, Shaohui Lin, Jian Zhou, Ruizhi Qiao, Zhizhong Zhang, Yuan Xie, and Lizhuang Ma. Contrastive learning for compact single image dehazing. In IEEE" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "Conference on Computer Vision and Pattern Recognition, pages 10551-10560, 2021. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.121, + 0.892, + 0.189 + ], + "angle": 0, + "content": "[44] Rui-Qi Wu, Zheng-Peng Duan, Chun-Le Guo, Zhi Chai, and Chongyi Li. Ridcp: Revitalizing real image dehazing via high-quality codebook priors. In IEEE Conference on Computer Vision and Pattern Recognition, pages 22282-22291, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.892, + 0.247 + ], + "angle": 0, + "content": "[45] Wending Yan, Robby T Tan, and Dengxin Dai. Night-time defogging using high-low frequency decomposition and grayscale-color networks. In European Conference on Computer Vision, pages 473-488, 2020. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.892, + 0.289 + ], + "angle": 0, + "content": "[46] Minmin Yang, Jianchang Liu, and Zhengguo Li. Superpixel-based single nighttime image haze removal. IEEE Transactions on Multimedia, 20(11):3008-3018, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.291, + 0.892, + 0.346 + ], + "angle": 0, + "content": "[47] Hu Yu, Naishan Zheng, Man Zhou, Jie Huang, Zeyu Xiao, and Feng Zhao. Frequency and spatial dual guidance for image dehazing. In European Conference on Computer Vision, pages 181-198, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.348, + 0.892, + 0.389 + ], + "angle": 0, + "content": "[48] Jing Zhang and Dacheng Tao. Famed-net: A fast and accurate multi-scale end-to-end dehazing network. IEEE Transactions on Image Processing, 29:72-84, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.391, + 0.892, + 0.445 + ], + "angle": 0, + "content": "[49] Jing Zhang, Yang Cao, and Zengfu Wang. Nighttime haze removal based on a new imaging model. In IEEE International Conference on Image Processing, pages 4557-4561, 2014. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.447, + 0.892, + 0.516 + ], + "angle": 0, + "content": "[50] Jing Zhang, Yang Cao, Shuai Fang, Yu Kang, and Chang Wen Chen. Fast haze removal for nighttime image using maximum reflectance prior. In IEEE Conference on Computer Vision and Pattern Recognition, pages 7418-7426, 2017. 1, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.518, + 0.892, + 0.572 + ], + "angle": 0, + "content": "[51] Jing Zhang, Yang Cao, Zheng-Jun Zha, and Dacheng Tao. Nighttime dehazing with a synthetic benchmark. In ACM International Conference on Multimedia, pages 2355-2363, 2020. 1, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.575, + 0.892, + 0.629 + ], + "angle": 0, + "content": "[52] Jingang Zhang, Wenqi Ren, Shengdong Zhang, He Zhang, Yunfeng Nie, Zhe Xue, and Xiaochun Cao. Hierarchical density-aware dehazing network. IEEE Transactions on Cybernetics, 52(10):11187-11199, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.631, + 0.892, + 0.699 + ], + "angle": 0, + "content": "[53] Shengdong Zhang, Wenqi Ren, Xin Tan, Zhi-Jie Wang, Yong Liu, Jingang Zhang, Xiaoqin Zhang, and Xiaochun Cao. Semantic-aware dehazing network with adaptive feature fusion. IEEE Transactions on Cybernetics, 53(1):454-467, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.701, + 0.892, + 0.77 + ], + "angle": 0, + "content": "[54] Zhuoran Zheng, Wenqi Ren, Xiaochun Cao, Xiaobin Hu, Tao Wang, Fenglong Song, and Xiuyi Jia. Ultra-high-definition image dehazing via multi-guided bilateral learning. In IEEE Conference on Computer Vision and Pattern Recognition, pages 16180-16189, 2021. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.772, + 0.892, + 0.827 + ], + "angle": 0, + "content": "[55] Man Zhou, Jie Huang, Chun-Le Guo, and Chongyi Li. FOurmer: an efficient global modeling paradigm for image restoration. In International Conference on Machine Learning, pages 42589-42601, 2023. 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.829, + 0.892, + 0.884 + ], + "angle": 0, + "content": "[56] Man Zhou, Keyu Yan, Xueyang Fu, Aiping Liu, and Chengjun Xie. Pan-guided band-aware multi-spectral feature enhancement for pan-sharpening. IEEE Transactions on Computational Imaging, 9:238-249, 2023. 4" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.884 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.956 + ], + "angle": 0, + "content": "2640" + } + ] +] \ No newline at end of file diff --git a/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/4a280801-3209-4899-b345-f6dbc9c9ec52_origin.pdf b/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/4a280801-3209-4899-b345-f6dbc9c9ec52_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f3e3cc7813e41ce2cdfc018b2915db44e680ad55 --- /dev/null +++ b/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/4a280801-3209-4899-b345-f6dbc9c9ec52_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1cf5f40d458f02583ffb3181dc23d56149b360ac20368287288e9549bc1782b +size 3856272 diff --git a/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/full.md b/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/full.md new file mode 100644 index 0000000000000000000000000000000000000000..4fb359d6f78bb95621dcf824f305e9af2425a946 --- /dev/null +++ b/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/full.md @@ -0,0 +1,528 @@ +# A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint + +Xiaofeng Cong $^{1}$ Jie Gui $^{1*}$ Jing Zhang $^{2}$ Junming Hou $^{1}$ Hao Shen $^{3}$ $^{1}$ Southeast University $^{2}$ University of Sydney $^{3}$ Hefei University of Technology +cxf_svip@163.com, {guijie, junming_hou}@seu.edu.cn, {jingzhang.cy, haoshenhs}@gmail.com + +# Abstract + +Existing research based on deep learning has extensively explored the problem of daytime image dehazing. However, few studies have considered the characteristics of nighttime hazy scenes. There are two distinctions between nighttime and daytime haze. First, there may be multiple active colored light sources with lower illumination intensity in nighttime scenes, which may cause haze, glow and noise with localized, coupled and frequency inconsistent characteristics. Second, due to the domain discrepancy between simulated and real-world data, unrealistic brightness may occur when applying a dehazing model trained on simulated data to real-world data. To address the above two issues, we propose a semi-supervised model for real-world nighttime dehazing. First, the spatial attention and frequency spectrum filtering are implemented as a spatial-frequency domain information interaction module to handle the first issue. Second, a pseudo-label-based retraining strategy and a local window-based brightness loss for semi-supervised training process is designed to suppress haze and glow while achieving realistic brightness. Experiments on public benchmarks validate the effectiveness of the proposed method and its superiority over state-of-the-art methods. The source code and Supplementary Materials are placed in the https://github.com/Xiaofeng-life/SFSNiD. + +# 1. Introduction + +Nighttime and daytime images may contain hazy effects, which may cause their quality to be degraded [7, 15, 39, 50]. Therefore, two valuable research fields are proposed, which are daytime single image dehazing (DaSID) [2, 38, 54] and nighttime single image dehazing (NiSID) [14, 19, 31], respectively. Compared with the daytime hazy image, the imaging of the nighttime hazy image is more complex [28, 49]. Currently, NiSID is still a challenging problem. + +Existing research on DaSID [20, 25, 27, 34, 43, 48, 52, + +![](images/a33d695ef8f9fc31a28d41e5473915cd25a36ad8f9b2a8f3f6bf2613b399764d.jpg) + +![](images/2b1d3fc840b83a0715269c12a83f1689a798b9b2464081e3a8992eef4eba2c5f.jpg) +(a) Hazy + +![](images/1dbb0964724e7b4f24d9cdf92ff3031f16d43c74e8b90295dbf443cf2d6986d8.jpg) + +![](images/537781b48e17bde0102d00a0edb8668f11f205dc415d80779ca0ff0556abec5c.jpg) +(b) IM-YellowHaze [26] + +![](images/4ab5fd967d08912e272d0bad0f4f6f5564a3b65613c4b370f66d59da9858bed7.jpg) + +![](images/4eb97ee1d084f19f5763913d6a10dd76a3b4dc0d8102011ddc85cf43a7a4e7d7.jpg) +(c) IM-NightHaze [26] + +![](images/7f7b2a86c660f1d9e4f4a168a2f0a93519799ea7f9397799cbe837b292d5eee0.jpg) + +![](images/da101e16f6745e92d23f32670ec8a941c29b11bca10a37913f01ef50fcdaa2b3.jpg) +(d) IM-NHR [51] + +![](images/d2f8fd9bc6e0c58035da4fe6674f003084b11b227b4717759e534de6274d269f.jpg) + +![](images/ea4291327498c84a7c199af06e65c2f41c2af6e6db82312b0a4c50433af740ba.jpg) +(e) GE-UNREAL-NH [31] + +![](images/43294787656ebc2ebe7ddfbc83fe3e712ab1f3e458b69498201689087745bab9.jpg) + +![](images/4d23e497a790337f1a70700cbfe7d6ee70f99a49c88ba1a00af7120260776ca5.jpg) +(f) Ours +Figure 1. Visualization of real-world dehazed images, where the "IM" and "GE-" denote the dehazed results obtained by training on imaging model (IM) and game engine (GE) simulated datasets, respectively. The curve figure represents the pixel histogram, where the $x$ and $y$ coordinates represent the pixel values and corresponding numbers, respectively. The $x$ and $y$ coordinates of the bar figure represent the color channel and the corresponding average pixel value, respectively. + +53] have achieved impressive performance. Various effective DaSID algorithms have been proposed and verified on benchmark daytime datasets [21]. However, these DaSID algorithms are designed for the properties of daytime hazy and haze-free images, without taking into account the characteristics of nighttime hazy and haze-free images. + +Currently, NiSID research is divided into two types, namely non-deep learning-based NiSID and deep learning-based NiSID. On the one hand, the prior hypotheses and statistical laws are explored [50, 51]. The maximum reflectance prior to estimate the varying ambient illumination is proposed by [50]. The illumination estimation, color + +correction and image prior are integrated by [49]. On the other hand, the deep learning-based architectures are designed for the NiSID task [14, 31]. Liu et al. [31] combine the dark channel and bright channel prior with the Transformer mechanism [32] into an end-to-end training flow. The gradient-adaptive convolution and glow pair synthesis are designed by Jin et al. [14]. Existing learning-based algorithms have achieved remarkable performance on synthetic datasets. However, these methods still lack consideration of the characteristics of nighttime hazy images. + +During the day, the main source of imaging light is sunlight [7]. The formation of the daytime hazy image can be described by the atmospheric scattering model [7] as + +$$ +I (a) = J (a) t (a) + A (a) (1 - t (a)), \tag {1} +$$ + +where $I(a)$ , $J(a)$ , $t(a)$ and $A(a)$ denote the hazy image, clear image, transmission map and global atmospheric light, respectively. The $a$ means the pixel location. Meanwhile, a widely used physical model [16, 18] in the NiSID task is + +$$ +I (a) = J (a) t (a) + A (a) (1 - t (a)) + L _ {s} (a) * \varkappa (a), \tag {2} +$$ + +where $L_{s}(a)$ and $\varkappa(a)$ denote the light sources and atmospheric point spread function. As shown in Eq. 1 and Eq. 2, the main distinction between daytime and nighttime haze imaging is light sources [1, 4, 24, 29, 30, 41, 46], which we consider to be the main source of the difficulty. Specifically, two outstanding issues are considered as follows. + +- Localized, Coupled and Frequency Inconsistent: As shown in Figure 1, multiple active light sources may exist simultaneously. Therefore, the distortion of nighttime images, namely the haze that is mainly generated by suspended particles and liquid water droplets, the glow that is mainly produced by active light sources and the noise that is mainly caused by low intensity, is usually localized. Meanwhile, these types of distortions are mixed throughout the image, which is coupled. Furthermore, the haze and glow will cause the loss of high-frequency signals, while the noise belongs to high-frequency disturbance signals [22] that needs to be eliminated. This means that these distortions have inconsistent frequency characteristics. In a word, a challenging issue is how to simultaneously handle distortions with localized, coupled and frequency inconsistent characteristics. + +- Unrealistic Brightness Intensity: Nighttime hazy datasets based on real-world images synthesized by imaging model (IM) are difficult to simulate multiple active light sources, while nighttime hazy datasets based on game engine (GE) cannot perfectly reproduce the harmonious brightness of real-world nighttime scenes. As we observed in Figure 1, the dehazed images obtained under IM datasets still suffer from the glow and haze that caused by multiple light sources, but the overall brightness is realistic. The dehazed images obtained under GE + +dataset show less haze and glow, but the scene brightness is unrealistic. In a word, an unsolved problem faced by data-driven algorithms is how to suppress haze and glow while achieving realistic brightness. + +Therefore, we propose a semi-supervised dehazing framework that can be used for the real-world NiSID task. Firstly, the local attention [32] is adopted to learn the inductive bias in the spatial domain to suppress local distortions. A frequency spectrum dynamic filtering strategy is designed to handle distortions with inconsistent frequency characteristics. Considering the coupled of these distortions, the spatial and frequency information are integrated as a bidomain interaction module for feature extraction and image reconstruction. Secondly, aiming at suppressing distortions while achieving realistic brightness. The simulation data provided by the game engine is utilized to generate pseudo labels that can suppress haze and glow for retraining process. Then, real-world hazy images are adopted as brightness-realistic signals for the realistic brightness constraint. Overall, the main contributions of this paper are as follows. + +- We propose a spatial and frequency domain aware semi-supervised nighttime dehazing network (SFSNiD). SFS-NiD can remove nighttime haze that is accompanied by glow and noise. The experimental results on synthetic and real-world datasets show that the proposed method can achieve impressive performance. +- We design a spatial and frequency domain information interaction (SFII) module to simultaneously handle the haze, glow and noise with localized, coupled and frequency inconsistent characteristics. The multi-channel amplitude and phase spectrums are dynamically filtered and aggregated. The spatial and frequency domain features are integrated by local attention. +- A retraining strategy and a local window-based brightness loss for semi-supervised training process are designed to suppress haze and glow while achieving realistic brightness. The retraining strategy is based on pseudo labels. The hazy image is divided into non-overlapping windows for the calculation of local brightness map to provide realistic brightness supervision. + +# 2. Related Work + +# 2.1. Daytime Dehazing + +A variety of effective dehazing algorithms for DaSID have been proposed. An ultra-high resolution dehazing method based on bilateral gird is proposed by 4KDehazing [54]. AECRNet [43] introduces the contrastive learning to the dehazing process. The prior information and visual attention mechanism are utilized in DeHamer [9]. DF [38] designs an encoder-decoder architecture which totally based on multi-head self-attention [32]. MITNet [37] combines the mutual information-driven constraint and adaptive triple interaction + +![](images/4790f658478349dda72da0fc88502959c577c4c4aa39a1319b2417d27580426a.jpg) +Figure 2. The overall pipeline of the proposed SFSNiD. + +strategy into a supervised training process. Although these DaSID algorithms have achieve impressive performance, they are not designed for the characteristics of nighttime hazy images, which may cause them to have certain limitations on the NiSID task [31]. + +# 2.2. Nighttime Dehazing + +Compared with DaSID, NiSID has received fewer attention. On the one hand, the prior hypotheses and statistical laws are utilized in the non-deep learning-based NiSID methods [50, 51]. A maximum reflectance prior is proposed by MRP [50], which providing a way to estimate the varying ambient illumination. An optimal-scale fusion-based method is designed by OSFD [51], which utilizes a parameter estimation dehazing flow. On the other hand, the data-driven strategies [23, 40] are adopted in the deep learning-based NiSID methods [14, 31, 45]. NightHazeFormer [31] combines the visual transformer and prior knowledge (dark channel and bright channel) into an end-to-end enhancement process. GAC [14] utilizes the angular point spread function to reduce the glow effect in nighttime scenes. Yan et al. [45] propose a strategy which decomposes the image into scene texture information and scene structure information. According to recent research, deep learning-based NiSID algorithms can achieve relatively better quantitative performance according to sufficient synthetic data. However, the haze, glow, and noise with localized, coupled and frequency inconsistent characteristics are not fully considered by these deep learning-based NiSID algorithms. + +# 3. Methods + +The hazy domain and haze-free domain are marked as $X$ and $Y$ , respectively. The synthesized hazy and haze-free image datasets are denoted $\mathcal{D}_X$ and $\mathcal{D}_Y$ , which contain $N$ images, respectively. The real-world hazy image and haze- + +free datasets are denoted as $\mathcal{R}_X$ and $\mathcal{R}_Y$ , which include $M$ images, respectively. The convolution operation is denoted as $C_t^k (\cdot)$ , where the superscript $k$ and subscript $t$ denote the kernel size and stride, respectively. The $\varpi (\cdot),\sigma (\cdot),\delta (\cdot)$ and $sf(\cdot)$ denote the global average pooling, LeakyReLU, sigmoid and softmax operations, respectively. The input hazy images and predicted dehazed images at three scales are marked $x_{i}^{s}\in \mathcal{D}_{X}$ and $p_i^s$ respectively, where $s\in \{0,1,2\}$ and $i$ denotes the $i$ -th example. The size of $x_{i}^{0},x_{i}^{1}$ and $x_{i}^{2}$ are $H\times W\times C,\frac{H}{2}\times \frac{W}{2}\times C$ and $\frac{H}{4}\times \frac{W}{4}\times C$ , respectively. The $H,W$ and $C$ denote the height, width and number of channels, respectively. The size of $p_i^s$ remains the same as $x_{i}^{s}$ . The network at scale $s$ is denoted as $\Psi^s (\cdot)$ . + +# 3.1. Network Structure + +The multi-scale structure [3] of the SFSNiD is shown in Figure 2. Two kinds of modules are included in the proposed network, namely (i) spatial and frequency information interaction (SFII) model, (ii) convolution input (ConvI), convolution output (ConvO), convolution downsampling (ConvD), and convolution upsampling (ConvU). The ConvI projects the image into the feature space, while ConvO does the opposite. ConvD reduces the length and width of the feature map by half, while ConvU does the opposite. + +# 3.2. Spatial and Frequency Information Interaction + +Preliminary. For a feature map $z\in \mathbb{R}^{\widetilde{H}\times \widetilde{W}\times \widetilde{C}}$ , where $\widetilde{H}$ $\widetilde{W}$ and $\widetilde{C}$ denote the height, width and number of channels, respectively. We first project each of its channel $z_{\widetilde{c}}$ to the frequency domain by the Fourier [10] transformation $\mathcal{F}$ as + +$$ +\mathcal {F} \left(z _ {\widetilde {c}}\right) (u, v) = \sum_ {h = 0} ^ {\widetilde {H} - 1} \sum_ {w = 0} ^ {\widetilde {W} - 1} z _ {\widetilde {c}} (h, w) e ^ {- j 2 \pi \left(\frac {h}{H} u + \frac {w}{W} v\right)}, \tag {3} +$$ + +where $(h,w)$ and $(u,v)$ represent the coordinates in the spatial and frequency domain, respectively. The $\widetilde{c} \in$ + +![](images/e56bd87343b14a8a98a0392a8affc67aa1fa03c389ced00931675553659cf27c.jpg) +Figure 3. The sub-modules of the proposed SFII. + +![](images/a8e6dd8c397caabedf982609fed5a59bd0b4456e3ab6b59affbe254348d996b9.jpg) +Figure 4. The overall architecture of the proposed SFII. + +$\{0,1,\dots,\widetilde{C}\}$ denotes the channel index. Correspondingly, the $\mathcal{F}^{-1}$ is defined as the inverse Fourier transformation [55]. Then, the real part $\mathcal{R}(z_{\widetilde{c}})(u,v)$ and imaginary part $\mathcal{I}(z_{\widetilde{c}})(u,v)$ can be obtained by $\mathcal{F}(z_{\widetilde{c}})(u,v)$ . The amplitude spectrum $\mathcal{A}(z_{\widetilde{c}})(u,v)$ and phase spectrum $\mathcal{P}(z_{\widetilde{c}})(u,v)$ of $\mathcal{F}(z_{\widetilde{c}})(u,v)$ on the single channel can be obtained by + +$$ +\mathcal {A} \left(z _ {\bar {c}}\right) (u, v) = \sqrt {\mathcal {R} ^ {2} \left(z _ {\bar {c}}\right) (u , v) + \mathcal {I} ^ {2} \left(z _ {\bar {c}}\right) (u , v)}, \tag {4} +$$ + +$$ +\mathcal {P} \left(z _ {\widetilde {c}}\right) (u, v) = \arctan \left[ \frac {\mathcal {I} \left(z _ {\widetilde {c}}\right) (u , v)}{\mathcal {R} \left(z _ {\widetilde {c}}\right) (u , v)} \right]. \tag {5} +$$ + +The full channel amplitude spectrum $\mathcal{A}(z)(u,v)\in$ $\mathbb{R}^{\widetilde{H}\times \widetilde{W}\times \widetilde{C}}$ and phase spectrum $\mathcal{P}(z)(u,v)\in \mathbb{R}^{\widetilde{H}\times \widetilde{W}\times \widetilde{C}}$ can be obtained by applying the Eq. 3, Eq. 4 and Eq. 5 on each channel of $z$ . + +Frequency Spectrum Dynamic Aggregation (FSDA). The haze, glow and noise with inconsistent frequency characteristics can be processed in the frequency domain by dynamic spectrum filter. The amplitude spectrum and phase spectrum of different channels are aggregated by the pointwise convolution as + +$$ +\mathcal {S} ^ {*} (z) (u, v) = \sigma \left(C _ {1} ^ {1} (\mathcal {S} (z) (u, v))\right), \tag {6} +$$ + +where $S(z)(u,v)\in \{\mathcal{A}(z)(u,v),\mathcal{P}(z)(u,v)\}$ . To perform channel aggregation of spectral information, the channel weight [12] map $\mathcal{W}$ are calculated as + +$$ +\mathcal {W} (z) (u, v) = \delta \left(C _ {1} ^ {1} \left(\sigma \left(C _ {1} ^ {1} \left(\varpi \left(S ^ {*} (z) (u, v)\right)\right)\right)\right)\right), \tag {7} +$$ + +where $\mathcal{W}(z)(u,v)\in \mathbb{R}^{1\times 1\times \widetilde{C}}$ . Then the channel weight map is applied to the frequency spectrum as + +$$ +\dot {S} (z) (u, v) = C _ {1} ^ {1} (\mathcal {W} (z) (u, v) \cdot \mathcal {S} ^ {*} (z) (u, v)), \tag {8} +$$ + +where the spectrum filter (SF) of $\dot{S} (z)(u,v)$ is shown in Figure 3-(a). The filtering operation is performed by the residual connection, the filtered component is obtained by + +$$ +\widetilde {\mathcal {S}} (z) (u, v) = \dot {\mathcal {S}} (z) (u, v) + \mathcal {S} (z) (u, v). \tag {9} +$$ + +The filtered $\widetilde{\mathcal{A}}(z)(u,v)$ and $\widetilde{\mathcal{P}}(z)(u,v)$ can be obtained based on the processing flow from $S(z)(u,v)$ to $\widetilde{S}(z)(u,v)$ . Then, the real and imaginary parts are obtained by + +$$ +\widetilde {\mathcal {R}} (z) (u, v) = \widetilde {\mathcal {A}} (z) (u, v) \cdot \cos \widetilde {\mathcal {P}} (z) (u, v), \tag {10} +$$ + +$$ +\widetilde {\mathcal {I}} (z) (u, v) = \widetilde {\mathcal {A}} (z) (u, v) \cdot \sin \widetilde {\mathcal {P}} (z) (u, v). \tag {11} +$$ + +After dynamic parameter learning in the frequency domain, we remap the feature map to the spatial domain as + +$$ +z _ {f} = \mathcal {F} ^ {- 1} (\widetilde {\mathcal {R}} (z) (u, v), \widetilde {\mathcal {I}} (z) (u, v)), \tag {12} +$$ + +where $z_{f} \in \mathbb{R}^{\widetilde{H} \times \widetilde{W} \times \widetilde{C}}$ . The Fourier transformation and inverse Fourier transformation can be implemented using DFT and IDFT algorithms [6, 11, 56]. Here, we define the calculation from Eq. 3 to Eq. 12 as frequency spectrum dynamic aggregation (FSDA), which represent the processing flow from $z$ to $z_{f}$ that is shown in Figure 3-(b). For convenience, the FSDA is denoted as $\mathcal{FS}(\cdot)$ . + +Frequency Domain Projection (FDP). To deal with distortions in the frequency domain, we first introduce frequency domain interactions before computing local inductive bias. For the input feature map $z \in \mathbb{R}^{\widetilde{H} \times \widetilde{W} \times \widetilde{C}}$ , it is processed by the layer normalization operation ( $LN(\cdot)$ ) [32] to obtain the normalized feature $z_{l} = LN(z)$ . Then, the normalized feature $z_{l}$ is projected into $Q_{f}$ (query), $K_{f}$ (key) and $V_{f}$ (value) by the projection in the frequency domain as + +$$ +Q _ {f} = \mathcal {F S} _ {Q} (z _ {l}), K _ {f} = \mathcal {F S} _ {K} (z _ {l}), V _ {f} = \mathcal {F S} _ {V} (z _ {l}), \tag {13} +$$ + +where the $\mathcal{FS}_Q(\cdot),\mathcal{FS}_K(\cdot)$ and $\mathcal{FS}_V(\cdot)$ denote three independent projection operations with learnable parameters, respectively. The generation process of the $Q_{f},K_{f}$ and $V_{f}$ is denoted as the frequency domain projection (FDP). + +Bidomain Local Perception (BLP). After obtaining the features $Q_{f}$ , $K_{f}$ and $V_{f}$ which consider the information in frequency domain, we perform spatial domain learning on the features from a local perspective. The self-attention [32] with local perception (LP) that is shown in Figure 3-(c) is computed within $8 \times 8$ non-overlapping windows as + +$$ +\mathcal {A T} \left(Q _ {f}, K _ {f}, V _ {f}\right) = s f \left(\frac {Q _ {f} \otimes K _ {f} ^ {T}}{\sqrt {d}} + B\right) \otimes V _ {f}, \tag {14} +$$ + +where $d$ and $B$ denote the dimensionality and position bias, respectively. The $\otimes$ denotes the matrix multiplication (MatMul). Information is transferred by the residual connection + +$$ +z ^ {*} = \mathcal {A T} \left(Q _ {f}, K _ {f}, V _ {f}\right) + z, \tag {15} +$$ + +where the calculation from $z$ to $z^{*}$ is marked as bidomain local perception (BLP), which is shown in Figure 4-(a). + +Bidomain Nonlinear Mapping (BNM). The computation of window attention does not provide nonlinear representation capabilities. Therefore, we use the frequency and spatial domain interaction module to learn nonlinear mapping. The FSDA is used to provide the frequency domain information. Besides, a residual block which consists of $C_1^3 (\sigma (C_1^3 (\cdot)))$ is used to provide the spatial interaction. The immediate feature $z^{*}$ is fed into the frequency nonlinear mapping branch and spatial nonlinear mapping branch, as + +$$ +z _ {f n} = \mathcal {F S} _ {A} \left(z ^ {*}\right), \tag {16} +$$ + +$$ +z _ {s n} = C _ {1} ^ {3} \left(\sigma \left(C _ {1} ^ {3} \left(z ^ {*}\right)\right)\right), \tag {17} +$$ + +where the subscript $A$ in $\mathcal{FS}_A(\cdot)$ means the frequency interaction performed after the attention operation. Then frequency domain and spatial domain features are fused as the final nonlinear mapping output by + +$$ +\widetilde {z} = C _ {1} ^ {3} \left(\left[ z _ {f n}, z _ {s n} + z ^ {*} \right]\right) + z ^ {*}, \tag {18} +$$ + +where the $[\cdot, \cdot]$ denotes the channel concatenation. The calculation from $z^*$ to $\widetilde{z}$ is marked as the bidomain nonlinear mapping (BNM), which is shown in Figure 4-(b). + +Spatial and Frequency Information Interaction (SFII). As shown in Figure 4, the calculation process from $z$ to $\widetilde{z}$ is called spatial and frequency information interaction (SFII). The proposed SFII aggregates spatial domain information and frequency domain information from a local perspective. + +# 3.3. Spatial and Frequency Loss + +The supervised loss consists of two parts, namely the pixel-by-pixel loss in geometric space and the frequency domain loss obtained by Fourier transform [3]. By sampling $x_{i}^{s} \in \mathcal{D}_{X}$ and $y_{i}^{s} \in \mathcal{D}_{Y}$ , the losses calculated at three scales are + +$$ +\mathcal {L} _ {G} = \sum_ {s = 0} ^ {2} \lambda_ {g} \cdot \sum_ {i = 0} ^ {N - 1} | | \Psi^ {s} \left(x _ {i} ^ {s}\right) - y _ {i} ^ {s} | | _ {1}, \tag {19} +$$ + +$$ +\mathcal {L} _ {F} = \sum_ {s = 0} ^ {2} \lambda_ {f} \cdot \sum_ {i = 0} ^ {N - 1} \left| \left| \mathcal {F} \left(\Psi^ {s} \left(x _ {i} ^ {s}\right)\right) - \mathcal {F} \left(y _ {i} ^ {s}\right) \right| \right| _ {1}, \tag {20} +$$ + +where $\lambda_{g}$ and $\lambda_{f}$ denote weight factors. + +# 3.4. Retraining and Realistic Brightness Loss + +Pseudo-label Fusion Retraining. There are inherent domain discrepancy between synthetic hazy images and real-world hazy images. Therefore, we adopt a retraining strategy which utilizes peso labels. Pseudo labels $\mathcal{R}_Y^P$ are obtained based on the model trained on synthetic datasets. We put the original synthetic dataset $\{\mathcal{D}_X,\mathcal{D}_Y\}$ and the pseudo-labeled dataset $\{\mathcal{R}_X,\mathcal{R}_Y^P\}$ into the network simultaneously for retraining. Supervised losses Eq. 19 and Eq. 20 are used in the retraining process at three scales. + +Prior Brightness Constraint. We conduct a quantitative statistics on the brightness of nighttime hazy and clear images provided by [14]. The brightness intensity corresponding to $x_{i}^{0} \in \mathcal{R}_{X}$ and $y_{i}^{0} \in \mathcal{R}_{Y}$ are $\mu (x_i^0)$ and $\mu (y_i^0)$ , respectively, where $\mu (\cdot)$ denote the average pixel value across three channels. We randomly select $M = \frac{M}{2}$ images from the dataset multiple times, and we get + +$$ +\sum_ {i = 0} ^ {\hat {M} - 1} \mu \left(y _ {i} ^ {0}\right) < \sum_ {i = 0} ^ {\hat {M} - 1} \mu \left(x _ {i} ^ {0}\right). \tag {21} +$$ + +Therefore, we assume the brightness of the dehazed image $p_i^s$ should be lower than that of the $x_i^s$ . This assumption is consistent with the imaging model Eq. 2. + +Local Brightness Map (LBM). We divide the image into non-overlapping local windows. The width and height of each square window is denoted as $\gamma^s$ , where $s \in \{0,1,2\}$ . The value in $\underline{\mathrm{local~brightness}}$ map (LBM) $\varphi_{x_i^s}$ that corresponding to $x_i^s$ is obtained by + +$$ +\varphi_ {x _ {i} ^ {s}} (\hat {h}, \hat {w}) = \frac {1}{3 (\gamma^ {s}) ^ {2}} \sum_ {c = 0} ^ {2} \sum_ {h = \hat {h} \cdot \gamma^ {s}} ^ {(\hat {h} + 1) \cdot \gamma^ {s}} \sum_ {w = \hat {w} \cdot \gamma^ {s}} ^ {(\hat {w} + 1) \cdot \gamma^ {s}} x _ {i} ^ {s} (h, w, c), \tag {22} +$$ + +where $(\hat{h},\hat{w})$ and $(h,w)$ denote the pixel index of $\varphi_{x_i^s}$ and $x_{i}^{s}$ , respectively. Meanwhile, the local brightness map $\varphi_{p_i^s}$ corresponding to $p_i^s$ is defined in the same way. As shown in Figure 2-(c), the locations with high brightness may be active light sources or objects close to the light source, while the locations with low brightness may be objects and backgrounds far away from the light source. + +Realistic Brightness Loss. The brightness of hazy images is approximately globally realistic, so it can be used to supervise the brightness of dehazed images. As we observed in Eq. 21, the brightness of the dehazed image should be lower than that of the hazy image. Meanwhile, in order to ensure the relative numerical relationship between areas with high brightness and low brightness before and after dehazing, we use a power function with monotonically increasing properties to process the $\varphi_{x_i^s}(\hat{h},\hat{w})$ , as + +$$ +\widetilde {\varphi} _ {x _ {i} ^ {s}} (\hat {h}, \hat {w}) = \left(\varphi_ {x _ {i} ^ {s}} (\hat {h}, \hat {w})\right) ^ {\kappa}, \tag {23} +$$ + +![](images/40ddf7cd4c54a9cf836c96025155f65351a206a6a894ae6a19bf88ba1ab297c6.jpg) +(a) Hazy + +![](images/83755efa09e8f7af77f754f621b55d111c8c321ef3b2190616e60f5e05c34749.jpg) +(b) MRP + +![](images/13482730aff8388d55b14453daaafca013fb237da171bad07d8fdeb66b3ae61a.jpg) +(c) OSFD + +![](images/a409ab44e08a21ecfd2a339aaed8175cb808bf4b2daae7715984762e47d86ac9.jpg) +(d) GD + +![](images/c1f76453a20f05f5f43ed1b0ac2d75da9dffab37917caa9f4bf331e0e437ff1f.jpg) +(e) MSBDN + +![](images/f8c73b47bb3ec9bcaef2918b8912874d8f7a51461a7fc7e686e9be25677e448e.jpg) +(f) 4KDehazing + +![](images/b632aa6f0474872ef75965211d96e0a544b100aa6ebc1d9c0a4f3e4bdf7383a6.jpg) +(g) AECRNet + +![](images/249dfd533549490ba31093efaa3840ad4591499dea329f7b686588a9af10a2ef.jpg) +(h) DeHamer + +![](images/99d2751b8712dcf5829b1a521e15470b1abd9f553fc12e655f7ec70d11cbb498.jpg) +(i) FSDGN + +![](images/72346426a5ce827757bce67b5bcc7cf2461a2a1df98eaae746ddd10d785c2d54.jpg) +(j) DF + +![](images/3fe97d65e7bcd8f02a1493a13dd40d05a933c2d8fd19a8079e9380bf0dc63681.jpg) +(k) MITNet + +![](images/e4e25aa749d0c8a0281cd2212413468edc5a58b9ceef5b8211ba847a2006134b.jpg) +(1)Fourmer +Figure 5. Visual results on synthetic dataset [31]. + +![](images/75c14a44593a3000f035b679ff4ab46f6e0bdcbaef14f03b7c2d0f01f6317f05.jpg) +(m) Ours + +![](images/85e6d1c03e6b7ba772bb2086a1a2fa2fbc43074248332799df22787835fae17d.jpg) +(n) Label + +Table 1. Quantitative results on datasets that generated by imaging model. + +
MethodsNHRNHMNHCLNHCMNHCDNightHazeYellowHaze
SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑
MRP (CVPR 2017)0.77619.8480.66615.9930.74722.4970.69320.4940.62417.6510.29512.1380.24913.473
GD (ICCV 2019)0.96930.1070.86120.6890.97336.5060.95834.4480.93231.5090.83225.3240.91527.410
OSFD (ACMMM 2020)0.80821.0280.72218.4910.78622.3290.73920.9290.67218.5010.30413.3870.25914.775
MSBDN (CVPR2020)0.97031.3350.81820.5140.96535.9630.93832.8480.90330.4750.95033.1560.92129.834
4KDehazing (CVPR2021)0.95028.6130.83020.4290.96735.0060.95835.1620.91230.0480.85026.5620.86125.835
AECRNet (CVPR 2021)0.91524.8640.81719.4200.95133.1830.94333.4980.89028.7420.94632.3440.93729.417
DeHamer (CVPR 2022)0.96631.0170.82323.0950.96636.0380.94433.9080.91531.3890.95433.4320.93130.334
FSDGN (ECCV 2022)0.97532.0720.87421.4150.97236.4320.95233.7230.92231.5590.94833.5210.95533.062
DF (TIP 2023)0.96931.6440.89623.2070.97537.3830.96035.0380.93432.0790.93131.4890.94832.244
MITNet (ACMMM 2023)0.97431.9690.85920.8840.96935.7940.94532.8490.91630.6280.94634.1140.93231.186
Fourmer (ICML 2023)0.96931.6600.86221.4230.96335.7140.94333.2010.92832.1030.94933.4190.95831.978
Ours0.97833.1800.90523.7050.97938.1460.96836.1460.95134.0010.96835.5270.96532.981
+ +where $\kappa \geq 1$ is the brightness intensity coefficient. The realistic brightness constraint within one single window is + +$$ +\mathcal {L} _ {B} ^ {p _ {i} ^ {s}} (\hat {h}, \hat {w}) = \left(\varphi_ {p _ {i} ^ {s}} (\hat {h}, \hat {w}) - \xi \cdot \widetilde {\varphi} _ {x _ {i} ^ {s}} (\hat {h}, \hat {w})\right) ^ {2}, \tag {24} +$$ + +where $\xi$ is a hyperparameter. The realistic brightness loss calculated over all windows is + +$$ +\mathcal {L} _ {B} = \sum_ {s = 0} ^ {2} \frac {\lambda_ {b}}{\hat {N} \hat {W} ^ {s} \hat {H} ^ {s}} \cdot \sum_ {i = 0} ^ {\hat {N} - 1} \sum_ {\hat {h} = 0} ^ {\hat {H} ^ {s} - 1} \sum_ {\hat {w} = 0} ^ {\hat {W} ^ {s} - 1} \mathcal {L} _ {B} ^ {p _ {i} ^ {s}} (\hat {h}, \hat {w}), \tag {25} +$$ + +where $\hat{W}^s = W^s /\gamma^s$ $\hat{H}^s = H^s /\gamma^s$ . And $\hat{N} = N + M$ The $\lambda_{b}$ denotes the weights of scale loss of $\mathcal{L}_B$ + +# 3.5. Total Loss + +The overall loss is a combination of supervised and semi-supervised losses, which is + +$$ +\mathcal {L} = \mathcal {L} _ {G} + \alpha \mathcal {L} _ {F} + \beta \mathcal {L} _ {B}, \tag {26} +$$ + +where $\alpha$ and $\beta$ are the weights of the frequency domain loss and the realistic brightness loss, respectively. + +# 4. Experiments + +# 4.1. Experiment Setting + +Datasets. To comprehensively compare the performance of different algorithms, we conducted experiments on both + +synthetic and real-world datasets. The synthetic datasets include GTA5 [45], UNREAL-NH [31], {NHR, NHM, HNCL, NHCM, NHCD} [51] and {NightHaze, YellowHaze} [26]. The real-world nighttime haze (RWNH) is provided by [14]. Since the brightness level of the ground-truth label in the UNREAL-NH is close to daytime, we adjust the brightness of the hazy image and corresponding label to the level of the nighttime low-light image by the Gamma correction [33] for the evaluation of the RWNH. + +Comparison Methods and Evaluation Metrics. MRP [50], GD [27], OSFD [51], MSBDN [5]. 4KDehazing [54], AECRNet [43], DeHamer [9], FSDGN [47], DF [38], MIT-Net [37] and Fourmer [55] are used as comparisons. PSNR [22, 35, 36] and SSIM [8, 42] are used to evaluate the performance on labeled datasets. BRISQUE [44] and MUSIQ [13, 17] are computed to evaluate the performance on unlabeled dataset. The $\uparrow$ represents a larger value, a higher quality, while $\downarrow$ represents a larger value, a lower quality. + +Implementation Details. The batch size is chosen as 4. The image size is set to $256 \times 256 \times 3$ . The learning rate is initialized to 0.0001 and linearly decays by a factor of 0.95 every 10 epochs. The Adam $(\beta_{1} = 0.9, \beta_{2} = 0.999)$ is used. The $\lambda_{g}$ , $\lambda_{f}$ and $\lambda_{b}$ are all set to 1. The $\alpha$ and $\beta$ are set to 0.1 and 20, respectively. The window size $\gamma^{s}$ are set to 16, 8 and 4, where $s \in \{0,1,2\}$ , respectively. The coefficient $\xi$ and $\kappa$ is set to 1 and 1.3, respectively. The + +![](images/f2c71a783412da48ae45b925130b1675a92c9e14d4285ef737d4a3c227783c51.jpg) +(a) Hazy + +![](images/2f13d28929ef75be03f1aa5c80c7a4869326bd52ad7d684bcd25ce15c800ea85.jpg) +(b)MRP + +![](images/2ab4e850fa9e989735bcf9f1fe5f822509134e1bc047c77246cfdcce13fdfdb2.jpg) +(c) OSFD + +![](images/2fa293e1f28c4bf50eb2bf511896c6e2c8cab6f722bce710588fdf47f8fe8408.jpg) +(d) GD + +![](images/9eb01e2043a460f4405d414fc44ccee117da18e9e7643e37519fc26f4633fb3b.jpg) +(e) MSBDN + +![](images/f4d5c9877d1d4a150d75137ad8161fb663c2a2fda42bc09f5cdb96333d7be9d0.jpg) +(f) 4KDehazing + +![](images/778aaff2b30e27c198a8c5c67c11be01014258f51949cce6e5c6d53e2cd349ed.jpg) +(g) AECRNet + +![](images/0b2756e365a887d592be74b006513c110e5c8ea63f25d49727ff914d8870f115.jpg) +(h) DeHamer + +![](images/0af63a08186521c8359dc840ed855cb6dc5cb4d0ebb0733de438c60e3b3644e3.jpg) +(i) DF + +![](images/31292a61a2749d2f8a56a49504c082a91ebce385117ffe9a5e53e0900ca8dd93.jpg) +(j) MITNet + +![](images/840917b6f0fc0d28140d03c1a3e4f187d717aac7ec90ba6ab7c125ae24146d67.jpg) +(k)Fourmer + +![](images/caf5d5ebbc15f975794703dad70d9332388490a1a391d78b5f3c3d1bf6537b19.jpg) +(1) Ours + +![](images/6bc70cb929104cc92ee2896d58959002809ddc649bf32052be11050893323cef.jpg) +(a) Hazy + +![](images/84c6aea0def8a97bf688a610d6cf069567a8af2fad072c9a2ed4f33f79514a8a.jpg) +(b) $\kappa = 1.0$ + +![](images/9838b44ebf7f5bf29b3c3fb7d70f36c4690d6e262554a59050d5b186ebf0697b.jpg) +Figure 6. Visual results on real-world hazy images [14]. +(c) $\kappa = 1.3$ +Figure 7. Dehazed images obtained under different $\kappa$ . + +![](images/05abaae28072af4244ebc728c9ca242e78b5e7b4d19778cb71e777692f30eefc.jpg) +(d) $\kappa = 1.5$ + +![](images/e8dd696bfb955802e39a5f44efc691f9c137e7050fab57bf4b4a00deee0efb61.jpg) +(e) $\kappa = 1.8$ + +![](images/efac5d22c673dce2c9a5640d53dd4bc4c10d65fcd541d5a7b30348a62bf6d6b0.jpg) +(f) $\kappa = 2.0$ + +![](images/6c9911d2e59e8ab292cc26a6d0f918ae830e3a7460aeacde4a51555570b96084.jpg) +(g) $\kappa = 2.3$ + +![](images/c66e4d4caa57dcf3a0cf2ffee880925265670971b44d42202931588516ad1eda.jpg) +(h) $\kappa = 2.5$ + +![](images/bf7b5296406ec0c2ab3087ef043f8275bace726ec821cc0ae52a37d4c43ac645.jpg) +(i) $\kappa = 2.8$ + +![](images/11ed9bb178b2069315f5b6a79735fefd08ea869723a88559a62259317c15f8ee.jpg) +(j) $\kappa = 3.0$ + +proposed model is implemented by PyTorch and trained on the single NVIDIA RTX 4090 platform. + +# 4.2. Comparison with State-of-the-art Algorithms + +Evaluation on Synthetic Datasets. Table 1 and Table 2 show the quantitative dehazing results obtained by state-of-the-art methods. Figure 5 shows the corresponding visual results. The quantitative and visual results demonstrate that the proposed methods achieve an overall better performance than state-of-the-art algorithms. + +Evaluation on Real-world Datasets. Table 2 shows the evaluation results of real-world dehazed images. It is worth pointing out that existing research [7] proposes that the reliability of no-reference metrics in the dehazing task is lower than that of full-reference metrics. Figure 6 shows that the details of the dehazed results obtained by our method are visually better. Meanwhile, the brightness of the dehazed images obtained by most comparison algorithms is obvious unrealistic, while the brightness of the dehazed images obtained by our algorithm is approximately globally realistic. + +Overall Evaluation. According to the quantitative and visual results on synthetic and real-world datasets, the proposed SFSNiD achieves overall better performance. More + +results are placed at Supplementary Materials. + +# 4.3. Ablation Study and Discussions + +Spatial and Frequency Information Interaction. The spatial and frequency information interaction (SFII) modules and naive convolution module are used in the proposed SFSNiD. In order to prove the usefulness of the FDP, LP and BNM that contained in the SFII, ablation experiments for different sub-blocks are performed. The ablation experiment on the proposed SFII includes (i) removing the FDP, (ii) removing the LP, (iii) removing the frequency domain processing in BNM, and (iv) removing the spatial domain process in BNM. These four settings are denoted $R1$ , $R2$ , $R3$ and $R4$ , respectively. Table 3 shows the ablation results under different settings on the UNREAL-NH [31]. The quantitative results demonstrate that the FDP, LP and BNM all have a positive effect on the dehazing performance. Since we must control the size of the paper, visualizations of the amplitude and phase spectrums are placed in Supplementary Materials. + +Hierarchical Training and Frequency Domain Loss. The training process of the proposed SFSNiD takes a hierarchical strategy by using differ scales $s \in \{0,1,2\}$ . Two + +Table 2. Quantitative results on datasets generated by game engine (GTA5 and UNREAL-NH) and the real-world dataset (RWNH). + +
MethodsGTA5UNREAL-NHRWNH
SSIM↑PSNR↑SSIM↑PSNR↑BRISQUE ↓MUSIQ ↑
MRP0.66219.4600.46710.03919.41841.194
GD0.90030.0900.76721.20231.35933.433
OSFD0.71121.4610.4439.16920.86041.779
MSBDN0.90932.0290.82725.68038.91029.968
4KDehazing0.90330.3140.77423.08734.96533.536
AECRNet0.88826.8460.73121.56627.08437.034
DeHamer0.92832.5970.74022.44142.26926.788
FSDGN0.92332.6420.70221.73632.21635.200
DF0.91832.8560.77023.01733.67831.663
MITNet0.89931.1180.76621.86035.40431.768
Fourmer0.91731.9260.77222.79935.85031.367
Ours0.93533.7080.86225.90730.97532.120
+ +![](images/5abd6dd2f674234b94412ccb616c6d25a2431d7ccf30fdec562ee30824fe8c74.jpg) +(a) Hazy + +![](images/d5b9c2bafd8259ea81c2f970185c9a7295c9aafebfa0b93fa8162ad2a0b2f155.jpg) +(b) Pseudo Label +Figure 8. Visual results under different training strategies. + +![](images/1f3fa71fcd49295d08e3ad014b56977adedbfb27397aaffff308f520a0d38012.jpg) +(c) Retraining + +![](images/e09788f7f60b4aaf8ff5dff2b50bf82ff2a789d55878016703283298cce0e536.jpg) +(d) Retraining +LB + +ablation studies are adopted, which are denoted as (i) $S1$ : $s \in \{0\}$ , and (ii) $S2$ : $s \in \{0, 1\}$ . Meanwhile, in our experimental setup, the spatial domain loss $L_{G}$ and the frequency domain loss $L_{F}$ are applied simultaneously. To verify the effectiveness of frequency domain loss, the setting when $L_{F}$ is not used is denoted as $S3$ ( $s \in \{0, 1, 2\}$ ). Table 4 shows the ablation results under the three different settings. The quantitative results demonstrate two main conclusions. First, the hierarchical training strategy can improve the dehazing performance. Second, the loss in the frequency domain is crucial as it improves the SSIM from 0.816 to 0.862. + +Retraining Strategy and Realistic Brightness Loss. To verify the effectiveness of the retraining strategy and the realistic brightness loss $\mathcal{L}_B$ , the visual effects are shown in Figure 8. As shown in Figure 8-(b), the texture of the pseudo-labels is blurred due to the domain discrepancy between the synthetic and real-world data. The dehazed images obtained after retraining has unrealistic brightness as shown in Figure 8-(c). It can be seen that the best effect occurs when the retraining strategy and $\mathcal{L}_B$ are used simultaneously as shown in Figure 8-(d). The BRISQUE $(\downarrow)$ and MUSIQ $(\uparrow)$ obtained for the three settings (b), (c) and (d) in Figure 8 are $\{33.316, 30.432\}$ , $\{34.210, 32.373\}$ and $\{30.975, 32.120\}$ , respectively. Taking a comprehensive look at the visual and quantitative evaluation results, our proposed strategy is effective. + +Brightness intensity coefficient $\kappa$ in $\mathcal{L}_B$ . In order to demonstrate the effectiveness of $\kappa$ on the real-world dehaz- + +![](images/fb642f7783d1d9a3e6e71a1cc8cc831c973f3d84cfa3a4471f79aa7487b8d403.jpg) +Figure 9. The average pixel value obtained under different $\kappa$ . The horizontal dashed line represents the average pixel value of real-world nighttime clear images [14]. + +Table 3. Ablation study on the SFII. + +
SettingsR1R2R3R4Ours
SSIM0.8480.8580.8510.8450.862
PSNR25.35325.80825.64224.30125.907
+ +Table 4. Ablation study on the scale loss and frequency loss. + +
SettingsS1S2S3Ours
SSIM0.8540.8510.8160.862
PSNR25.60125.13424.46425.907
+ +ing task, we manually set $\kappa$ to different values. The dehazed images and average pixel value when $\kappa$ takes different values are shown in Figure 7 and Figure 9, respectively. There are two conclusions that can be drawn. First, as $\kappa$ increases, the brightness of the dehazed image continues to decrease, which proves that $\kappa$ can control the brightness of the dehazed image. Second, when $\kappa$ equals 1.3, the average pixel value (0.225) of dehazed images is close to the average pixel value real-world nighttime clear images (0.217) [14]. Therefore, we set $\kappa$ to 1.3 as the final setting. + +# 5. Conclusion + +In this paper, a semi-supervised nighttime image dehazing baseline SFSNiD is proposed for real-world nighttime dehazing. A spatial and frequency domain information interaction module is proposed to handle the haze, glow, and noise with localized, coupled and frequency inconsistent characteristics. A retraining strategy and a local window-based brightness loss for semi-supervised training process are designed to suppress haze and glow while achieving realistic brightness. Experiments on public benchmarks validate the effectiveness of the proposed method and its superiority over state-of-the-art methods. + +Acknowledgment. This work was supported in part by the grant of the National Science Foundation of China under Grant 62172090; Start-up Research Fund of Southeast University under Grant RF1028623097; CAAI-Huawei MindSpore Open Fund. We thank the Big Data Computing Center of Southeast University for providing the facility support on the numerical calculations in this paper. + +# References + +[1] Cosmin Ancuti, Codruta O Ancuti, Christophe De Vleeschouwer, and Alan C Bovik. Day and night-time dehazing by local airlight estimation. IEEE Transactions on Image Processing, 29:6264-6275, 2020. 2 +[2] Xiaofeng Cong, Jie Gui, Kai-Chao Miao, Jun Zhang, Bing Wang, and Peng Chen. Discrete haze level dehazing network. In ACM International Conference on Multimedia, pages 1828-1836, 2020. 1 +[3] Yuning Cui, Yi Tao, Zhenshan Bing, Wenqi Ren, Xinwei Gao, Xiaochun Cao, Kai Huang, and Alois Knoll. Selective frequency network for image restoration. In The Eleventh International Conference on Learning Representations, 2022. 3, 5 +[4] Yuekun Dai, Chongyi Li, Shangchen Zhou, Ruicheng Feng, and Chen Change Loy. Flare7k: A phenomenological nighttime flare removal dataset. Advances in Neural Information Processing Systems, 35:3926-3937, 2022. 2 +[5] Hang Dong, Jinshan Pan, Lei Xiang, Zhe Hu, Xinyi Zhang, Fei Wang, and Ming-Hsuan Yang. Multi-scale boosted dehazing network with dense feature fusion. In IEEE Conference on Computer Vision and Pattern Recognition, pages 2157-2167, 2020. 6 +[6] Matteo Frigo and Steven G Johnson. Fftw: An adaptive software architecture for the fft. In IEEE International Conference on Acoustics, Speech and Signal Processing, pages 1381-1384, 1998. 4 +[7] Jie Gui, Xiaofeng Cong, Yuan Cao, Wenqi Ren, Jun Zhang, Jing Zhang, Jiuxin Cao, and Dacheng Tao. A comprehensive survey and taxonomy on single image dehazing based on deep learning. ACM Computing Surveys, 2023. 1, 2, 7 +[8] Chunle Guo, Chongyi Li, Jichang Guo, Chen Change Loy, Junhui Hou, Sam Kwong, and Runmin Cong. Zero-reference deep curve estimation for low-light image enhancement. In IEEE Conference on Computer Vision and Pattern Recognition, pages 1780-1789, 2020. 6 +[9] Chun-Le Guo, Qixin Yan, Saeed Anwar, Runmin Cong, Wenqi Ren, and Chongyi Li. Image dehazing transformer with transmission-aware 3d position embedding. In IEEE Conference on Computer Vision and Pattern Recognition, pages 5812-5820, 2022. 2, 6 +[10] Xin Guo, Xueyang Fu, Man Zhou, Zhen Huang, Jialun Peng, and Zheng-Jun Zha. Exploring fourier prior for single image rain removal. In International Joint Conferences on Artificial Intelligence, pages 935–941, 2022. 3 +[11] Junming Hou, Qi Cao, Ran Ran, Che Liu, Junling Li, and Liang-jian Deng. Bidomain modeling paradigm for pan-sharpening. In ACM International Conference on Multimedia, pages 347-357, 2023. 4 +[12] Jie Hu, Li Shen, and Gang Sun. Squeeze-and-excitation networks. In IEEE Conference on Computer Vision and Pattern Recognition, pages 7132-7141, 2018. 4 +[13] Shirui Huang, Keyan Wang, Huan Liu, Jun Chen, and Yun-song Li. Contrastive semi-supervised learning for underwater image restoration via reliable bank. In IEEE Conference on Computer Vision and Pattern Recognition, pages 18145-18155, 2023. 6 + +[14] Yeying Jin, Beibei Lin, Wending Yan, Wei Ye, Yuan Yuan, and Robby T Tan. Enhancing visibility in nighttime haze images using guided apsf and gradient adaptive convolution. In ACM International Conference on Multimedia, 2023. 1, 2, 3, 5, 6, 7, 8 +[15] Mingye Ju, Can Ding, Charles A Guo, Wenqi Ren, and Dacheng Tao. Idrlp: Image dehazing using region line prior. IEEE Transactions on Image Processing, 30:9043-9057, 2021. 1 +[16] Mingye Ju, Can Ding, Wenqi Ren, Yi Yang, Dengyin Zhang, and Y Jay Guo. Ide: Image dehazing and exposure using an enhanced atmospheric scattering model. IEEE Transactions on Image Processing, 30:2180-2192, 2021. 2 +[17] Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. Musiq: Multi-scale image quality transformer. In IEEE International Conference on Computer Vision, pages 5148-5157, 2021. 6 +[18] Beomhyuk Koo and Gyeonghwan Kim. Nighttime haze removal with glow decomposition using gan. In Pattern Recognition: 5th Asian Conference, pages 807-820, 2020. 2 +[19] Shiba Kuanar, Dwarikanath Mahapatra, Monalisa Bilas, and KR Rao. Multi-path dilated convolution network for haze and glow removal in nighttime images. The Visual Computer, pages 1-14, 2022. 1 +[20] Boyi Li, Xiulian Peng, Zhangyang Wang, Jizheng Xu, and Dan Feng. Aod-net: All-in-one dehazing network. In IEEE International Conference on Computer Vision, pages 4770-4778, 2017. 1 +[21] Boyi Li, Wenqi Ren, Dengpan Fu, Dacheng Tao, Dan Feng, Wenjun Zeng, and Zhangyang Wang. Benchmarking single-image dehazing and beyond. IEEE Transactions on Image Processing, 28(1):492-505, 2018. 1 +[22] Chongyi Li, Chun-Le Guo, Man Zhou, Zhexin Liang, Shangchen Zhou, Ruicheng Feng, and Chen Change Loy. Embedding fourier for ultra-high-definition low-light image enhancement. arXiv preprint arXiv:2302.11831, 2023. 2, 6 +[23] Kun Li, Dan Guo, and Meng Wang. Proposal-free video grounding with contextual pyramid network. In AAAI Conference on Artificial Intelligence, pages 1902-1910, 2021. 3 +[24] Yu Li, Robby T Tan, and Michael S Brown. Nighttime haze removal with glow and multiple light colors. In IEEE International Conference on Computer Vision, pages 226-234, 2015. 2 +[25] Yudong Liang, Bin Wang, Wangmeng Zuo, Jiaying Liu, and Wenqi Ren. Self-supervised learning and adaptation for single image dehazing. In International Joint Conference on Artificial Intelligence, pages 1-15, 2022. 1 +[26] Yinghong Liao, Zhuo Su, Xiangguo Liang, and Bin Qiu. Hdp-net: Haze density prediction network for nighttime de-hazing. In Pacific Rim Conference on Multimedia, pages 469-480, 2018. 1, 6 +[27] Xiaohong Liu, Yongrui Ma, Zhihao Shi, and Jun Chen. Griddehazenet: Attention-based multi-scale network for image dehazing. In IEEE International Conference on Computer Vision, pages 7314-7323, 2019. 1, 6 +[28] Yun Liu, Anzhi Wang, Hao Zhou, and Pengfei Jia. Single nighttime image dehazing based on image decomposition. Signal Processing, 183:107986, 2021. 1 + +[29] Yun Liu, Zhongsheng Yan, Jinge Tan, and Yuche Li. Multipurpose oriented single nighttime image haze removal based on unified variational retina model. IEEE Transactions on Circuits and Systems for Video Technology, 33(4):1643-1657, 2022. 2 +[30] Yun Liu, Zhongsheng Yan, Aimin Wu, and Tian Ye. Night-time image dehazing based on variational decomposition model. In IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 640-649, 2022. 2 +[31] Yun Liu, Zhongsheng Yan, Sixiang Chen, Tian Ye, Wenqi Ren, and Erkang Chen. Nighthazeformer: Single nighttime haze removal using prior query transformer. In ACM International Conference on Multimedia, 2023. 1, 2, 3, 6, 7 +[32] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In IEEE International Conference on Computer Vision, pages 10012-10022, 2021. 2, 4 +[33] Wenqi Ren, Sifei Liu, Lin Ma, Qianqian Xu, Xiangyu Xu, Xiaochun Cao, Junping Du, and Ming-Hsuan Yang. Low-light image enhancement via a deep hybrid network. IEEE Transactions on Image Processing, 28(9):4364-4375, 2019. 6 +[34] Wenqi Ren, Jinshan Pan, Hua Zhang, Xiaochun Cao, and Ming-Hsuan Yang. Single image dehazing via multi-scale convolutional neural networks with holistic edges. International Journal of Computer Vision, 128:240-259, 2020. 1 +[35] Yuanjie Shao, Lerenhan Li, Wenqi Ren, Changxin Gao, and Nong Sang. Domain adaptation for image dehazing. In IEEE Conference on Computer Vision and Pattern Recognition, pages 2808-2817, 2020. 6 +[36] Hao Shen, Zhong-Qiu Zhao, and Wandi Zhang. Adaptive dynamic filtering network for image denoising. In AAAI Conference on Artificial Intelligence, pages 2227-2235, 2023. 6 +[37] Hao Shen, Zhong-Qiu Zhao, Yulun Zhang, and Zhao Zhang. Mutual information-driven triple interaction network for efficient image dehazing. In ACM International Conference on Multimedia, pages 7-16, 2023. 2, 6 +[38] Yuda Song, Zhuqing He, Hui Qian, and Xin Du. Vision transformers for single image dehazing. IEEE TIP, 32:1927-1941, 2023. 1, 2, 6 +[39] Shangquan Sun, Wenqi Ren, and Tao Wang. Rethinking image restoration for object detection. Advances in Neural Information Processing Systems, 35:4461-4474, 2022. 1 +[40] Fei Wang, Dan Guo, and Kun Li. Eulermormer: Robust eulerian motion magnification via dynamic filtering within transformer. arXiv preprint arXiv:2312.04152, 2023. 3 +[41] Wenhui Wang, Anna Wang, and Chen Liu. Variational single nighttime image haze removal with a gray haze-line prior. IEEE Transactions on Image Processing, 31:1349-1363, 2022. 2 +[42] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 6 +[43] Haiyan Wu, Yanyun Qu, Shaohui Lin, Jian Zhou, Ruizhi Qiao, Zhizhong Zhang, Yuan Xie, and Lizhuang Ma. Contrastive learning for compact single image dehazing. In IEEE + +Conference on Computer Vision and Pattern Recognition, pages 10551-10560, 2021. 1, 2, 6 +[44] Rui-Qi Wu, Zheng-Peng Duan, Chun-Le Guo, Zhi Chai, and Chongyi Li. Ridcp: Revitalizing real image dehazing via high-quality codebook priors. In IEEE Conference on Computer Vision and Pattern Recognition, pages 22282-22291, 2023. 6 +[45] Wending Yan, Robby T Tan, and Dengxin Dai. Night-time defogging using high-low frequency decomposition and grayscale-color networks. In European Conference on Computer Vision, pages 473-488, 2020. 3, 6 +[46] Minmin Yang, Jianchang Liu, and Zhengguo Li. Superpixel-based single nighttime image haze removal. IEEE Transactions on Multimedia, 20(11):3008-3018, 2018. 2 +[47] Hu Yu, Naishan Zheng, Man Zhou, Jie Huang, Zeyu Xiao, and Feng Zhao. Frequency and spatial dual guidance for image dehazing. In European Conference on Computer Vision, pages 181-198, 2022. 6 +[48] Jing Zhang and Dacheng Tao. Famed-net: A fast and accurate multi-scale end-to-end dehazing network. IEEE Transactions on Image Processing, 29:72-84, 2019. 1 +[49] Jing Zhang, Yang Cao, and Zengfu Wang. Nighttime haze removal based on a new imaging model. In IEEE International Conference on Image Processing, pages 4557-4561, 2014. 1, 2 +[50] Jing Zhang, Yang Cao, Shuai Fang, Yu Kang, and Chang Wen Chen. Fast haze removal for nighttime image using maximum reflectance prior. In IEEE Conference on Computer Vision and Pattern Recognition, pages 7418-7426, 2017. 1, 3, 6 +[51] Jing Zhang, Yang Cao, Zheng-Jun Zha, and Dacheng Tao. Nighttime dehazing with a synthetic benchmark. In ACM International Conference on Multimedia, pages 2355-2363, 2020. 1, 3, 6 +[52] Jingang Zhang, Wenqi Ren, Shengdong Zhang, He Zhang, Yunfeng Nie, Zhe Xue, and Xiaochun Cao. Hierarchical density-aware dehazing network. IEEE Transactions on Cybernetics, 52(10):11187-11199, 2021. 1 +[53] Shengdong Zhang, Wenqi Ren, Xin Tan, Zhi-Jie Wang, Yong Liu, Jingang Zhang, Xiaoqin Zhang, and Xiaochun Cao. Semantic-aware dehazing network with adaptive feature fusion. IEEE Transactions on Cybernetics, 53(1):454-467, 2021. 1 +[54] Zhuoran Zheng, Wenqi Ren, Xiaochun Cao, Xiaobin Hu, Tao Wang, Fenglong Song, and Xiuyi Jia. Ultra-high-definition image dehazing via multi-guided bilateral learning. In IEEE Conference on Computer Vision and Pattern Recognition, pages 16180-16189, 2021. 1, 2, 6 +[55] Man Zhou, Jie Huang, Chun-Le Guo, and Chongyi Li. FOurmer: an efficient global modeling paradigm for image restoration. In International Conference on Machine Learning, pages 42589-42601, 2023. 4, 6 +[56] Man Zhou, Keyu Yan, Xueyang Fu, Aiping Liu, and Chengjun Xie. Pan-guided band-aware multi-spectral feature enhancement for pan-sharpening. IEEE Transactions on Computational Imaging, 9:238-249, 2023. 4 \ No newline at end of file diff --git a/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/images.zip b/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..3e948787554e95bd9f745dd298fb6dd87c805bb5 --- /dev/null +++ b/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3e53f385c44c4be2aea2a47cbc2c8ba85fc99413d519377be849ec5f86815c4 +size 899738 diff --git a/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/layout.json b/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..10c4f43d0bf2a47cccc957cf46c4aae13549891a --- /dev/null +++ b/2024/A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint/layout.json @@ -0,0 +1,14854 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 53, + 103, + 541, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 103, + 541, + 140 + ], + "spans": [ + { + "bbox": [ + 53, + 103, + 541, + 140 + ], + "type": "text", + "content": "A Semi-supervised Nighttime Dehazing Baseline with Spatial-Frequency Aware and Realistic Brightness Constraint" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "spans": [ + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "text", + "content": "Xiaofeng Cong" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "text", + "content": " Jie Gui" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "text", + "content": " Jing Zhang" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "text", + "content": " Junming Hou" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "text", + "content": " Hao Shen" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "text", + "content": "Southeast University " + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "text", + "content": "University of Sydney " + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 69, + 160, + 523, + 204 + ], + "type": "text", + "content": "Hefei University of Technology \ncxf_svip@163.com, {guijie, junming_hou}@seu.edu.cn, {jingzhang.cy, haoshenhs}@gmail.com" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "spans": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 256, + 290, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 256, + 290, + 544 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 290, + 544 + ], + "type": "text", + "content": "Existing research based on deep learning has extensively explored the problem of daytime image dehazing. However, few studies have considered the characteristics of nighttime hazy scenes. There are two distinctions between nighttime and daytime haze. First, there may be multiple active colored light sources with lower illumination intensity in nighttime scenes, which may cause haze, glow and noise with localized, coupled and frequency inconsistent characteristics. Second, due to the domain discrepancy between simulated and real-world data, unrealistic brightness may occur when applying a dehazing model trained on simulated data to real-world data. To address the above two issues, we propose a semi-supervised model for real-world nighttime dehazing. First, the spatial attention and frequency spectrum filtering are implemented as a spatial-frequency domain information interaction module to handle the first issue. Second, a pseudo-label-based retraining strategy and a local window-based brightness loss for semi-supervised training process is designed to suppress haze and glow while achieving realistic brightness. Experiments on public benchmarks validate the effectiveness of the proposed method and its superiority over state-of-the-art methods. The source code and Supplementary Materials are placed in the https://github.com/Xiaofeng-life/SFSNiD." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 567, + 128, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 567, + 128, + 578 + ], + "spans": [ + { + "bbox": [ + 47, + 567, + 128, + 578 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 586, + 287, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 586, + 287, + 682 + ], + "spans": [ + { + "bbox": [ + 46, + 586, + 287, + 682 + ], + "type": "text", + "content": "Nighttime and daytime images may contain hazy effects, which may cause their quality to be degraded [7, 15, 39, 50]. Therefore, two valuable research fields are proposed, which are daytime single image dehazing (DaSID) [2, 38, 54] and nighttime single image dehazing (NiSID) [14, 19, 31], respectively. Compared with the daytime hazy image, the imaging of the nighttime hazy image is more complex [28, 49]. Currently, NiSID is still a challenging problem." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 59, + 682, + 287, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 682, + 287, + 694 + ], + "spans": [ + { + "bbox": [ + 59, + 682, + 287, + 694 + ], + "type": "text", + "content": "Existing research on DaSID [20, 25, 27, 34, 43, 48, 52," + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 309, + 230, + 386, + 280 + ], + "blocks": [ + { + "bbox": [ + 309, + 230, + 386, + 280 + ], + "lines": [ + { + "bbox": [ + 309, + 230, + 386, + 280 + ], + "spans": [ + { + "bbox": [ + 309, + 230, + 386, + 280 + ], + "type": "image", + "image_path": "a33d695ef8f9fc31a28d41e5473915cd25a36ad8f9b2a8f3f6bf2613b399764d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 280, + 386, + 328 + ], + "blocks": [ + { + "bbox": [ + 309, + 280, + 386, + 328 + ], + "lines": [ + { + "bbox": [ + 309, + 280, + 386, + 328 + ], + "spans": [ + { + "bbox": [ + 309, + 280, + 386, + 328 + ], + "type": "image", + "image_path": "2b1d3fc840b83a0715269c12a83f1689a798b9b2464081e3a8992eef4eba2c5f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 330, + 358, + 338 + ], + "lines": [ + { + "bbox": [ + 331, + 330, + 358, + 338 + ], + "spans": [ + { + "bbox": [ + 331, + 330, + 358, + 338 + ], + "type": "text", + "content": "(a) Hazy" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 387, + 230, + 464, + 280 + ], + "blocks": [ + { + "bbox": [ + 387, + 230, + 464, + 280 + ], + "lines": [ + { + "bbox": [ + 387, + 230, + 464, + 280 + ], + "spans": [ + { + "bbox": [ + 387, + 230, + 464, + 280 + ], + "type": "image", + "image_path": "1dbb0964724e7b4f24d9cdf92ff3031f16d43c74e8b90295dbf443cf2d6986d8.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 387, + 280, + 463, + 328 + ], + "blocks": [ + { + "bbox": [ + 387, + 280, + 463, + 328 + ], + "lines": [ + { + "bbox": [ + 387, + 280, + 463, + 328 + ], + "spans": [ + { + "bbox": [ + 387, + 280, + 463, + 328 + ], + "type": "image", + "image_path": "537781b48e17bde0102d00a0edb8668f11f205dc415d80779ca0ff0556abec5c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 391, + 330, + 460, + 338 + ], + "lines": [ + { + "bbox": [ + 391, + 330, + 460, + 338 + ], + "spans": [ + { + "bbox": [ + 391, + 330, + 460, + 338 + ], + "type": "text", + "content": "(b) IM-YellowHaze [26]" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 465, + 230, + 543, + 280 + ], + "blocks": [ + { + "bbox": [ + 465, + 230, + 543, + 280 + ], + "lines": [ + { + "bbox": [ + 465, + 230, + 543, + 280 + ], + "spans": [ + { + "bbox": [ + 465, + 230, + 543, + 280 + ], + "type": "image", + "image_path": "4ab5fd967d08912e272d0bad0f4f6f5564a3b65613c4b370f66d59da9858bed7.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 465, + 280, + 543, + 328 + ], + "blocks": [ + { + "bbox": [ + 465, + 280, + 543, + 328 + ], + "lines": [ + { + "bbox": [ + 465, + 280, + 543, + 328 + ], + "spans": [ + { + "bbox": [ + 465, + 280, + 543, + 328 + ], + "type": "image", + "image_path": "4eb97ee1d084f19f5763913d6a10dd76a3b4dc0d8102011ddc85cf43a7a4e7d7.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 473, + 330, + 539, + 338 + ], + "lines": [ + { + "bbox": [ + 473, + 330, + 539, + 338 + ], + "spans": [ + { + "bbox": [ + 473, + 330, + 539, + 338 + ], + "type": "text", + "content": "(c) IM-NightHaze [26]" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 309, + 342, + 386, + 390 + ], + "blocks": [ + { + "bbox": [ + 309, + 342, + 386, + 390 + ], + "lines": [ + { + "bbox": [ + 309, + 342, + 386, + 390 + ], + "spans": [ + { + "bbox": [ + 309, + 342, + 386, + 390 + ], + "type": "image", + "image_path": "7f7b2a86c660f1d9e4f4a168a2f0a93519799ea7f9397799cbe837b292d5eee0.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 309, + 390, + 386, + 439 + ], + "blocks": [ + { + "bbox": [ + 309, + 390, + 386, + 439 + ], + "lines": [ + { + "bbox": [ + 309, + 390, + 386, + 439 + ], + "spans": [ + { + "bbox": [ + 309, + 390, + 386, + 439 + ], + "type": "image", + "image_path": "da101e16f6745e92d23f32670ec8a941c29b11bca10a37913f01ef50fcdaa2b3.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 323, + 441, + 373, + 449 + ], + "lines": [ + { + "bbox": [ + 323, + 441, + 373, + 449 + ], + "spans": [ + { + "bbox": [ + 323, + 441, + 373, + 449 + ], + "type": "text", + "content": "(d) IM-NHR [51]" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 387, + 342, + 464, + 390 + ], + "blocks": [ + { + "bbox": [ + 387, + 342, + 464, + 390 + ], + "lines": [ + { + "bbox": [ + 387, + 342, + 464, + 390 + ], + "spans": [ + { + "bbox": [ + 387, + 342, + 464, + 390 + ], + "type": "image", + "image_path": "d2f8fd9bc6e0c58035da4fe6674f003084b11b227b4717759e534de6274d269f.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 387, + 390, + 463, + 439 + ], + "blocks": [ + { + "bbox": [ + 387, + 390, + 463, + 439 + ], + "lines": [ + { + "bbox": [ + 387, + 390, + 463, + 439 + ], + "spans": [ + { + "bbox": [ + 387, + 390, + 463, + 439 + ], + "type": "image", + "image_path": "ea4291327498c84a7c199af06e65c2f41c2af6e6db82312b0a4c50433af740ba.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 390, + 441, + 466, + 449 + ], + "lines": [ + { + "bbox": [ + 390, + 441, + 466, + 449 + ], + "spans": [ + { + "bbox": [ + 390, + 441, + 466, + 449 + ], + "type": "text", + "content": "(e) GE-UNREAL-NH [31]" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 465, + 342, + 543, + 390 + ], + "blocks": [ + { + "bbox": [ + 465, + 342, + 543, + 390 + ], + "lines": [ + { + "bbox": [ + 465, + 342, + 543, + 390 + ], + "spans": [ + { + "bbox": [ + 465, + 342, + 543, + 390 + ], + "type": "image", + "image_path": "43294787656ebc2ebe7ddfbc83fe3e712ab1f3e458b69498201689087745bab9.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 465, + 390, + 543, + 439 + ], + "blocks": [ + { + "bbox": [ + 465, + 390, + 543, + 439 + ], + "lines": [ + { + "bbox": [ + 465, + 390, + 543, + 439 + ], + "spans": [ + { + "bbox": [ + 465, + 390, + 543, + 439 + ], + "type": "image", + "image_path": "4d23e497a790337f1a70700cbfe7d6ee70f99a49c88ba1a00af7120260776ca5.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 488, + 441, + 512, + 449 + ], + "lines": [ + { + "bbox": [ + 488, + 441, + 512, + 449 + ], + "spans": [ + { + "bbox": [ + 488, + 441, + 512, + 449 + ], + "type": "text", + "content": "(f) Ours" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 457, + 545, + 546 + ], + "lines": [ + { + "bbox": [ + 304, + 457, + 545, + 546 + ], + "spans": [ + { + "bbox": [ + 304, + 457, + 545, + 546 + ], + "type": "text", + "content": "Figure 1. Visualization of real-world dehazed images, where the \"IM\" and \"GE-\" denote the dehazed results obtained by training on imaging model (IM) and game engine (GE) simulated datasets, respectively. The curve figure represents the pixel histogram, where the " + }, + { + "bbox": [ + 304, + 457, + 545, + 546 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 457, + 545, + 546 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 457, + 545, + 546 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 304, + 457, + 545, + 546 + ], + "type": "text", + "content": " coordinates represent the pixel values and corresponding numbers, respectively. The " + }, + { + "bbox": [ + 304, + 457, + 545, + 546 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 457, + 545, + 546 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 457, + 545, + 546 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 304, + 457, + 545, + 546 + ], + "type": "text", + "content": " coordinates of the bar figure represent the color channel and the corresponding average pixel value, respectively." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 568, + 545, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 568, + 545, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 568, + 545, + 640 + ], + "type": "text", + "content": "53] have achieved impressive performance. Various effective DaSID algorithms have been proposed and verified on benchmark daytime datasets [21]. However, these DaSID algorithms are designed for the properties of daytime hazy and haze-free images, without taking into account the characteristics of nighttime hazy and haze-free images." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 304, + 642, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 546, + 713 + ], + "type": "text", + "content": "Currently, NiSID research is divided into two types, namely non-deep learning-based NiSID and deep learning-based NiSID. On the one hand, the prior hypotheses and statistical laws are explored [50, 51]. The maximum reflectance prior to estimate the varying ambient illumination is proposed by [50]. The illumination estimation, color" + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 703, + 135, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 703, + 135, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 703, + 135, + 712 + ], + "type": "text", + "content": "*Corresponding author" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "2631" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 192 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 192 + ], + "type": "text", + "content": "correction and image prior are integrated by [49]. On the other hand, the deep learning-based architectures are designed for the NiSID task [14, 31]. Liu et al. [31] combine the dark channel and bright channel prior with the Transformer mechanism [32] into an end-to-end training flow. The gradient-adaptive convolution and glow pair synthesis are designed by Jin et al. [14]. Existing learning-based algorithms have achieved remarkable performance on synthetic datasets. However, these methods still lack consideration of the characteristics of nighttime hazy images." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 192, + 288, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 192, + 288, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 192, + 288, + 228 + ], + "type": "text", + "content": "During the day, the main source of imaging light is sunlight [7]. The formation of the daytime hazy image can be described by the atmospheric scattering model [7] as" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 93, + 235, + 287, + 248 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 235, + 287, + 248 + ], + "spans": [ + { + "bbox": [ + 93, + 235, + 287, + 248 + ], + "type": "interline_equation", + "content": "I (a) = J (a) t (a) + A (a) (1 - t (a)), \\tag {1}", + "image_path": "524f24e179b5320e62b8a57b31a82d36d215191948441d6b19fc96d26765a680.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "type": "inline_equation", + "content": "I(a)" + }, + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "type": "inline_equation", + "content": "J(a)" + }, + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "type": "inline_equation", + "content": "t(a)" + }, + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "type": "inline_equation", + "content": "A(a)" + }, + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "type": "text", + "content": " denote the hazy image, clear image, transmission map and global atmospheric light, respectively. The " + }, + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 46, + 255, + 287, + 304 + ], + "type": "text", + "content": " means the pixel location. Meanwhile, a widely used physical model [16, 18] in the NiSID task is" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 310, + 287, + 324 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 310, + 287, + 324 + ], + "spans": [ + { + "bbox": [ + 54, + 310, + 287, + 324 + ], + "type": "interline_equation", + "content": "I (a) = J (a) t (a) + A (a) (1 - t (a)) + L _ {s} (a) * \\varkappa (a), \\tag {2}", + "image_path": "94728527086e9b9cceb98df8780cddbe06c7cee6f6c669583ff5db3fa8716a10.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 331, + 287, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 331, + 287, + 402 + ], + "spans": [ + { + "bbox": [ + 46, + 331, + 287, + 402 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 331, + 287, + 402 + ], + "type": "inline_equation", + "content": "L_{s}(a)" + }, + { + "bbox": [ + 46, + 331, + 287, + 402 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 331, + 287, + 402 + ], + "type": "inline_equation", + "content": "\\varkappa(a)" + }, + { + "bbox": [ + 46, + 331, + 287, + 402 + ], + "type": "text", + "content": " denote the light sources and atmospheric point spread function. As shown in Eq. 1 and Eq. 2, the main distinction between daytime and nighttime haze imaging is light sources [1, 4, 24, 29, 30, 41, 46], which we consider to be the main source of the difficulty. Specifically, two outstanding issues are considered as follows." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 403, + 289, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 403, + 289, + 594 + ], + "spans": [ + { + "bbox": [ + 47, + 403, + 289, + 594 + ], + "type": "text", + "content": "- Localized, Coupled and Frequency Inconsistent: As shown in Figure 1, multiple active light sources may exist simultaneously. Therefore, the distortion of nighttime images, namely the haze that is mainly generated by suspended particles and liquid water droplets, the glow that is mainly produced by active light sources and the noise that is mainly caused by low intensity, is usually localized. Meanwhile, these types of distortions are mixed throughout the image, which is coupled. Furthermore, the haze and glow will cause the loss of high-frequency signals, while the noise belongs to high-frequency disturbance signals [22] that needs to be eliminated. This means that these distortions have inconsistent frequency characteristics. In a word, a challenging issue is how to simultaneously handle distortions with localized, coupled and frequency inconsistent characteristics." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 594, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 594, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 594, + 289, + 714 + ], + "type": "text", + "content": "- Unrealistic Brightness Intensity: Nighttime hazy datasets based on real-world images synthesized by imaging model (IM) are difficult to simulate multiple active light sources, while nighttime hazy datasets based on game engine (GE) cannot perfectly reproduce the harmonious brightness of real-world nighttime scenes. As we observed in Figure 1, the dehazed images obtained under IM datasets still suffer from the glow and haze that caused by multiple light sources, but the overall brightness is realistic. The dehazed images obtained under GE" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 547, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 547, + 120 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 547, + 120 + ], + "type": "text", + "content": "dataset show less haze and glow, but the scene brightness is unrealistic. In a word, an unsolved problem faced by data-driven algorithms is how to suppress haze and glow while achieving realistic brightness." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 121, + 547, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 121, + 547, + 312 + ], + "spans": [ + { + "bbox": [ + 304, + 121, + 547, + 312 + ], + "type": "text", + "content": "Therefore, we propose a semi-supervised dehazing framework that can be used for the real-world NiSID task. Firstly, the local attention [32] is adopted to learn the inductive bias in the spatial domain to suppress local distortions. A frequency spectrum dynamic filtering strategy is designed to handle distortions with inconsistent frequency characteristics. Considering the coupled of these distortions, the spatial and frequency information are integrated as a bidomain interaction module for feature extraction and image reconstruction. Secondly, aiming at suppressing distortions while achieving realistic brightness. The simulation data provided by the game engine is utilized to generate pseudo labels that can suppress haze and glow for retraining process. Then, real-world hazy images are adopted as brightness-realistic signals for the realistic brightness constraint. Overall, the main contributions of this paper are as follows." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 313, + 545, + 552 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 306, + 313, + 545, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 313, + 545, + 384 + ], + "spans": [ + { + "bbox": [ + 306, + 313, + 545, + 384 + ], + "type": "text", + "content": "- We propose a spatial and frequency domain aware semi-supervised nighttime dehazing network (SFSNiD). SFS-NiD can remove nighttime haze that is accompanied by glow and noise. The experimental results on synthetic and real-world datasets show that the proposed method can achieve impressive performance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 384, + 545, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 384, + 545, + 468 + ], + "spans": [ + { + "bbox": [ + 306, + 384, + 545, + 468 + ], + "type": "text", + "content": "- We design a spatial and frequency domain information interaction (SFII) module to simultaneously handle the haze, glow and noise with localized, coupled and frequency inconsistent characteristics. The multi-channel amplitude and phase spectrums are dynamically filtered and aggregated. The spatial and frequency domain features are integrated by local attention." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 468, + 545, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 468, + 545, + 552 + ], + "spans": [ + { + "bbox": [ + 306, + 468, + 545, + 552 + ], + "type": "text", + "content": "- A retraining strategy and a local window-based brightness loss for semi-supervised training process are designed to suppress haze and glow while achieving realistic brightness. The retraining strategy is based on pseudo labels. The hazy image is divided into non-overlapping windows for the calculation of local brightness map to provide realistic brightness supervision." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 566, + 392, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 566, + 392, + 578 + ], + "spans": [ + { + "bbox": [ + 306, + 566, + 392, + 578 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 586, + 415, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 586, + 415, + 599 + ], + "spans": [ + { + "bbox": [ + 306, + 586, + 415, + 599 + ], + "type": "text", + "content": "2.1. Daytime Dehazing" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": "A variety of effective dehazing algorithms for DaSID have been proposed. An ultra-high resolution dehazing method based on bilateral gird is proposed by 4KDehazing [54]. AECRNet [43] introduces the contrastive learning to the dehazing process. The prior information and visual attention mechanism are utilized in DeHamer [9]. DF [38] designs an encoder-decoder architecture which totally based on multi-head self-attention [32]. MITNet [37] combines the mutual information-driven constraint and adaptive triple interaction" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2632" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 69, + 545, + 242 + ], + "blocks": [ + { + "bbox": [ + 50, + 69, + 545, + 242 + ], + "lines": [ + { + "bbox": [ + 50, + 69, + 545, + 242 + ], + "spans": [ + { + "bbox": [ + 50, + 69, + 545, + 242 + ], + "type": "image", + "image_path": "4790f658478349dda72da0fc88502959c577c4c4aa39a1319b2417d27580426a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 194, + 251, + 398, + 262 + ], + "lines": [ + { + "bbox": [ + 194, + 251, + 398, + 262 + ], + "spans": [ + { + "bbox": [ + 194, + 251, + 398, + 262 + ], + "type": "text", + "content": "Figure 2. The overall pipeline of the proposed SFSNiD." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 283, + 288, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 283, + 288, + 344 + ], + "spans": [ + { + "bbox": [ + 46, + 283, + 288, + 344 + ], + "type": "text", + "content": "strategy into a supervised training process. Although these DaSID algorithms have achieve impressive performance, they are not designed for the characteristics of nighttime hazy images, which may cause them to have certain limitations on the NiSID task [31]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 351, + 164, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 351, + 164, + 364 + ], + "spans": [ + { + "bbox": [ + 47, + 351, + 164, + 364 + ], + "type": "text", + "content": "2.2. Nighttime Dehazing" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 369, + 290, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 369, + 290, + 635 + ], + "spans": [ + { + "bbox": [ + 46, + 369, + 290, + 635 + ], + "type": "text", + "content": "Compared with DaSID, NiSID has received fewer attention. On the one hand, the prior hypotheses and statistical laws are utilized in the non-deep learning-based NiSID methods [50, 51]. A maximum reflectance prior is proposed by MRP [50], which providing a way to estimate the varying ambient illumination. An optimal-scale fusion-based method is designed by OSFD [51], which utilizes a parameter estimation dehazing flow. On the other hand, the data-driven strategies [23, 40] are adopted in the deep learning-based NiSID methods [14, 31, 45]. NightHazeFormer [31] combines the visual transformer and prior knowledge (dark channel and bright channel) into an end-to-end enhancement process. GAC [14] utilizes the angular point spread function to reduce the glow effect in nighttime scenes. Yan et al. [45] propose a strategy which decomposes the image into scene texture information and scene structure information. According to recent research, deep learning-based NiSID algorithms can achieve relatively better quantitative performance according to sufficient synthetic data. However, the haze, glow, and noise with localized, coupled and frequency inconsistent characteristics are not fully considered by these deep learning-based NiSID algorithms." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 645, + 107, + 657 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 645, + 107, + 657 + ], + "spans": [ + { + "bbox": [ + 47, + 645, + 107, + 657 + ], + "type": "text", + "content": "3. Methods" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": "The hazy domain and haze-free domain are marked as " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": ", respectively. The synthesized hazy and haze-free image datasets are denoted " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_X" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_Y" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": ", which contain " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": " images, respectively. The real-world hazy image and haze-" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "spans": [ + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": "free datasets are denoted as " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_X" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_Y" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": ", which include " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " images, respectively. The convolution operation is denoted as " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "C_t^k (\\cdot)" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": ", where the superscript " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " and subscript " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " denote the kernel size and stride, respectively. The " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "\\varpi (\\cdot),\\sigma (\\cdot),\\delta (\\cdot)" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "sf(\\cdot)" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " denote the global average pooling, LeakyReLU, sigmoid and softmax operations, respectively. The input hazy images and predicted dehazed images at three scales are marked " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "x_{i}^{s}\\in \\mathcal{D}_{X}" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "p_i^s" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " respectively, where " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "s\\in \\{0,1,2\\}" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": "-th example. The size of " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "x_{i}^{0},x_{i}^{1}" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "x_{i}^{2}" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "H\\times W\\times C,\\frac{H}{2}\\times \\frac{W}{2}\\times C" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "\\frac{H}{4}\\times \\frac{W}{4}\\times C" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": ", respectively. The " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "H,W" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " denote the height, width and number of channels, respectively. The size of " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "p_i^s" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " remains the same as " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "x_{i}^{s}" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": ". The network at scale " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": " is denoted as " + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "inline_equation", + "content": "\\Psi^s (\\cdot)" + }, + { + "bbox": [ + 304, + 283, + 547, + 440 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 444, + 418, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 444, + 418, + 456 + ], + "spans": [ + { + "bbox": [ + 305, + 444, + 418, + 456 + ], + "type": "text", + "content": "3.1. Network Structure" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 462, + 545, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 462, + 545, + 571 + ], + "spans": [ + { + "bbox": [ + 304, + 462, + 545, + 571 + ], + "type": "text", + "content": "The multi-scale structure [3] of the SFSNiD is shown in Figure 2. Two kinds of modules are included in the proposed network, namely (i) spatial and frequency information interaction (SFII) model, (ii) convolution input (ConvI), convolution output (ConvO), convolution downsampling (ConvD), and convolution upsampling (ConvU). The ConvI projects the image into the feature space, while ConvO does the opposite. ConvD reduces the length and width of the feature map by half, while ConvU does the opposite." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 576, + 545, + 589 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 576, + 545, + 589 + ], + "spans": [ + { + "bbox": [ + 305, + 576, + 545, + 589 + ], + "type": "text", + "content": "3.2. Spatial and Frequency Information Interaction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "text", + "content": "Preliminary. For a feature map " + }, + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "inline_equation", + "content": "z\\in \\mathbb{R}^{\\widetilde{H}\\times \\widetilde{W}\\times \\widetilde{C}}" + }, + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "text", + "content": " , where " + }, + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "inline_equation", + "content": "\\widetilde{H}" + }, + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "inline_equation", + "content": "\\widetilde{W}" + }, + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "inline_equation", + "content": "\\widetilde{C}" + }, + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "text", + "content": " denote the height, width and number of channels, respectively. We first project each of its channel " + }, + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "inline_equation", + "content": "z_{\\widetilde{c}}" + }, + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "text", + "content": " to the frequency domain by the Fourier [10] transformation " + }, + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 304, + 594, + 545, + 646 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 651, + 545, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 651, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 317, + 651, + 545, + 685 + ], + "type": "interline_equation", + "content": "\\mathcal {F} \\left(z _ {\\widetilde {c}}\\right) (u, v) = \\sum_ {h = 0} ^ {\\widetilde {H} - 1} \\sum_ {w = 0} ^ {\\widetilde {W} - 1} z _ {\\widetilde {c}} (h, w) e ^ {- j 2 \\pi \\left(\\frac {h}{H} u + \\frac {w}{W} v\\right)}, \\tag {3}", + "image_path": "d7b9c30b824914fdf114257921b82c69a5f7ccce70588ad880b3e6759bc13827.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "(h,w)" + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "(u,v)" + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": " represent the coordinates in the spatial and frequency domain, respectively. The " + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\widetilde{c} \\in" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2633" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 70, + 283, + 200 + ], + "blocks": [ + { + "bbox": [ + 52, + 70, + 283, + 200 + ], + "lines": [ + { + "bbox": [ + 52, + 70, + 283, + 200 + ], + "spans": [ + { + "bbox": [ + 52, + 70, + 283, + 200 + ], + "type": "image", + "image_path": "e56bd87343b14a8a98a0392a8affc67aa1fa03c389ced00931675553659cf27c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 208, + 256, + 219 + ], + "lines": [ + { + "bbox": [ + 77, + 208, + 256, + 219 + ], + "spans": [ + { + "bbox": [ + 77, + 208, + 256, + 219 + ], + "type": "text", + "content": "Figure 3. The sub-modules of the proposed SFII." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 52, + 230, + 282, + 330 + ], + "blocks": [ + { + "bbox": [ + 52, + 230, + 282, + 330 + ], + "lines": [ + { + "bbox": [ + 52, + 230, + 282, + 330 + ], + "spans": [ + { + "bbox": [ + 52, + 230, + 282, + 330 + ], + "type": "image", + "image_path": "a8e6dd8c397caabedf982609fed5a59bd0b4456e3ab6b59affbe254348d996b9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 65, + 339, + 268, + 350 + ], + "lines": [ + { + "bbox": [ + 65, + 339, + 268, + 350 + ], + "spans": [ + { + "bbox": [ + 65, + 339, + 268, + 350 + ], + "type": "text", + "content": "Figure 4. The overall architecture of the proposed SFII." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "inline_equation", + "content": "\\{0,1,\\dots,\\widetilde{C}\\}" + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "text", + "content": " denotes the channel index. Correspondingly, the " + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^{-1}" + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "text", + "content": " is defined as the inverse Fourier transformation [55]. Then, the real part " + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{R}(z_{\\widetilde{c}})(u,v)" + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "text", + "content": " and imaginary part " + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{I}(z_{\\widetilde{c}})(u,v)" + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "text", + "content": " can be obtained by " + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{F}(z_{\\widetilde{c}})(u,v)" + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "text", + "content": ". The amplitude spectrum " + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(z_{\\widetilde{c}})(u,v)" + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "text", + "content": " and phase spectrum " + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{P}(z_{\\widetilde{c}})(u,v)" + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{F}(z_{\\widetilde{c}})(u,v)" + }, + { + "bbox": [ + 47, + 369, + 287, + 441 + ], + "type": "text", + "content": " on the single channel can be obtained by" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 449, + 287, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 449, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 70, + 449, + 287, + 464 + ], + "type": "interline_equation", + "content": "\\mathcal {A} \\left(z _ {\\bar {c}}\\right) (u, v) = \\sqrt {\\mathcal {R} ^ {2} \\left(z _ {\\bar {c}}\\right) (u , v) + \\mathcal {I} ^ {2} \\left(z _ {\\bar {c}}\\right) (u , v)}, \\tag {4}", + "image_path": "5dff4ecd08b8c2ff7cdad95d00a3b4dca2b095bd5b49b2ad3d0b2697eabe5c36.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 91, + 479, + 287, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 479, + 287, + 506 + ], + "spans": [ + { + "bbox": [ + 91, + 479, + 287, + 506 + ], + "type": "interline_equation", + "content": "\\mathcal {P} \\left(z _ {\\widetilde {c}}\\right) (u, v) = \\arctan \\left[ \\frac {\\mathcal {I} \\left(z _ {\\widetilde {c}}\\right) (u , v)}{\\mathcal {R} \\left(z _ {\\widetilde {c}}\\right) (u , v)} \\right]. \\tag {5}", + "image_path": "d2f0c0463471cbb9b19a29e07d9f173745f3a0962b2153ffea291ff722b3226a.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 508, + 287, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 508, + 287, + 557 + ], + "spans": [ + { + "bbox": [ + 47, + 508, + 287, + 557 + ], + "type": "text", + "content": "The full channel amplitude spectrum " + }, + { + "bbox": [ + 47, + 508, + 287, + 557 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(z)(u,v)\\in" + }, + { + "bbox": [ + 47, + 508, + 287, + 557 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{\\widetilde{H}\\times \\widetilde{W}\\times \\widetilde{C}}" + }, + { + "bbox": [ + 47, + 508, + 287, + 557 + ], + "type": "text", + "content": " and phase spectrum " + }, + { + "bbox": [ + 47, + 508, + 287, + 557 + ], + "type": "inline_equation", + "content": "\\mathcal{P}(z)(u,v)\\in \\mathbb{R}^{\\widetilde{H}\\times \\widetilde{W}\\times \\widetilde{C}}" + }, + { + "bbox": [ + 47, + 508, + 287, + 557 + ], + "type": "text", + "content": " can be obtained by applying the Eq. 3, Eq. 4 and Eq. 5 on each channel of " + }, + { + "bbox": [ + 47, + 508, + 287, + 557 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 47, + 508, + 287, + 557 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 558, + 287, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 558, + 287, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 558, + 287, + 628 + ], + "type": "text", + "content": "Frequency Spectrum Dynamic Aggregation (FSDA). The haze, glow and noise with inconsistent frequency characteristics can be processed in the frequency domain by dynamic spectrum filter. The amplitude spectrum and phase spectrum of different channels are aggregated by the pointwise convolution as" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 94, + 637, + 287, + 651 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 637, + 287, + 651 + ], + "spans": [ + { + "bbox": [ + 94, + 637, + 287, + 651 + ], + "type": "interline_equation", + "content": "\\mathcal {S} ^ {*} (z) (u, v) = \\sigma \\left(C _ {1} ^ {1} (\\mathcal {S} (z) (u, v))\\right), \\tag {6}", + "image_path": "deddff4dbc73b73af5f7a6425eb8119f8cb91394a84452c708e9f6d2a96b4121.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 657, + 287, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 657, + 287, + 693 + ], + "spans": [ + { + "bbox": [ + 47, + 657, + 287, + 693 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 657, + 287, + 693 + ], + "type": "inline_equation", + "content": "S(z)(u,v)\\in \\{\\mathcal{A}(z)(u,v),\\mathcal{P}(z)(u,v)\\}" + }, + { + "bbox": [ + 47, + 657, + 287, + 693 + ], + "type": "text", + "content": ". To perform channel aggregation of spectral information, the channel weight [12] map " + }, + { + "bbox": [ + 47, + 657, + 287, + 693 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 47, + 657, + 287, + 693 + ], + "type": "text", + "content": " are calculated as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 63, + 700, + 287, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 700, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 63, + 700, + 287, + 715 + ], + "type": "interline_equation", + "content": "\\mathcal {W} (z) (u, v) = \\delta \\left(C _ {1} ^ {1} \\left(\\sigma \\left(C _ {1} ^ {1} \\left(\\varpi \\left(S ^ {*} (z) (u, v)\\right)\\right)\\right)\\right)\\right), \\tag {7}", + "image_path": "fcd7684657e4f0eca53348a2737c8b8b89aa2f34559b478db3ac926e6180f4ac.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 71, + 545, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 71, + 545, + 97 + ], + "spans": [ + { + "bbox": [ + 305, + 71, + 545, + 97 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 71, + 545, + 97 + ], + "type": "inline_equation", + "content": "\\mathcal{W}(z)(u,v)\\in \\mathbb{R}^{1\\times 1\\times \\widetilde{C}}" + }, + { + "bbox": [ + 305, + 71, + 545, + 97 + ], + "type": "text", + "content": ". Then the channel weight map is applied to the frequency spectrum as" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 334, + 105, + 545, + 118 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 105, + 545, + 118 + ], + "spans": [ + { + "bbox": [ + 334, + 105, + 545, + 118 + ], + "type": "interline_equation", + "content": "\\dot {S} (z) (u, v) = C _ {1} ^ {1} (\\mathcal {W} (z) (u, v) \\cdot \\mathcal {S} ^ {*} (z) (u, v)), \\tag {8}", + "image_path": "f94801adb5ec31bcfdfbbf641ad16b289436512cbefdadf787c2c3e52d18ab89.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 126, + 545, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 126, + 545, + 163 + ], + "spans": [ + { + "bbox": [ + 305, + 126, + 545, + 163 + ], + "type": "text", + "content": "where the spectrum filter (SF) of " + }, + { + "bbox": [ + 305, + 126, + 545, + 163 + ], + "type": "inline_equation", + "content": "\\dot{S} (z)(u,v)" + }, + { + "bbox": [ + 305, + 126, + 545, + 163 + ], + "type": "text", + "content": " is shown in Figure 3-(a). The filtering operation is performed by the residual connection, the filtered component is obtained by" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 345, + 170, + 545, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 170, + 545, + 186 + ], + "spans": [ + { + "bbox": [ + 345, + 170, + 545, + 186 + ], + "type": "interline_equation", + "content": "\\widetilde {\\mathcal {S}} (z) (u, v) = \\dot {\\mathcal {S}} (z) (u, v) + \\mathcal {S} (z) (u, v). \\tag {9}", + "image_path": "733b1b5ceb72cfc94a89a5232ddfd79dd6211edf9175e50674adc4e54f2145ae.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 194, + 545, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 194, + 545, + 232 + ], + "spans": [ + { + "bbox": [ + 305, + 194, + 545, + 232 + ], + "type": "text", + "content": "The filtered " + }, + { + "bbox": [ + 305, + 194, + 545, + 232 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathcal{A}}(z)(u,v)" + }, + { + "bbox": [ + 305, + 194, + 545, + 232 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 194, + 545, + 232 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathcal{P}}(z)(u,v)" + }, + { + "bbox": [ + 305, + 194, + 545, + 232 + ], + "type": "text", + "content": " can be obtained based on the processing flow from " + }, + { + "bbox": [ + 305, + 194, + 545, + 232 + ], + "type": "inline_equation", + "content": "S(z)(u,v)" + }, + { + "bbox": [ + 305, + 194, + 545, + 232 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 305, + 194, + 545, + 232 + ], + "type": "inline_equation", + "content": "\\widetilde{S}(z)(u,v)" + }, + { + "bbox": [ + 305, + 194, + 545, + 232 + ], + "type": "text", + "content": ". Then, the real and imaginary parts are obtained by" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 331, + 239, + 545, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 239, + 545, + 255 + ], + "spans": [ + { + "bbox": [ + 331, + 239, + 545, + 255 + ], + "type": "interline_equation", + "content": "\\widetilde {\\mathcal {R}} (z) (u, v) = \\widetilde {\\mathcal {A}} (z) (u, v) \\cdot \\cos \\widetilde {\\mathcal {P}} (z) (u, v), \\tag {10}", + "image_path": "ade6649994958fc618aa058b797575b3cd2b51347b59a31e06217400086c0502.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 340, + 273, + 545, + 288 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 273, + 545, + 288 + ], + "spans": [ + { + "bbox": [ + 340, + 273, + 545, + 288 + ], + "type": "interline_equation", + "content": "\\widetilde {\\mathcal {I}} (z) (u, v) = \\widetilde {\\mathcal {A}} (z) (u, v) \\cdot \\sin \\widetilde {\\mathcal {P}} (z) (u, v). \\tag {11}", + "image_path": "9f7596eba3dad647bd7051f47551c79f8ebe762d83040611d5c1c7073212d7ce.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 293, + 545, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 293, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 305, + 293, + 545, + 316 + ], + "type": "text", + "content": "After dynamic parameter learning in the frequency domain, we remap the feature map to the spatial domain as" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 352, + 327, + 545, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 352, + 327, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 352, + 327, + 545, + 342 + ], + "type": "interline_equation", + "content": "z _ {f} = \\mathcal {F} ^ {- 1} (\\widetilde {\\mathcal {R}} (z) (u, v), \\widetilde {\\mathcal {I}} (z) (u, v)), \\tag {12}", + "image_path": "9600fb63d4d85a0ab456bddc9a897845e317ea2ac6e25292a828dc49096a2c43.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 346, + 545, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 346, + 545, + 432 + ], + "spans": [ + { + "bbox": [ + 304, + 346, + 545, + 432 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 346, + 545, + 432 + ], + "type": "inline_equation", + "content": "z_{f} \\in \\mathbb{R}^{\\widetilde{H} \\times \\widetilde{W} \\times \\widetilde{C}}" + }, + { + "bbox": [ + 304, + 346, + 545, + 432 + ], + "type": "text", + "content": ". The Fourier transformation and inverse Fourier transformation can be implemented using DFT and IDFT algorithms [6, 11, 56]. Here, we define the calculation from Eq. 3 to Eq. 12 as frequency spectrum dynamic aggregation (FSDA), which represent the processing flow from " + }, + { + "bbox": [ + 304, + 346, + 545, + 432 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 304, + 346, + 545, + 432 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 346, + 545, + 432 + ], + "type": "inline_equation", + "content": "z_{f}" + }, + { + "bbox": [ + 304, + 346, + 545, + 432 + ], + "type": "text", + "content": " that is shown in Figure 3-(b). For convenience, the FSDA is denoted as " + }, + { + "bbox": [ + 304, + 346, + 545, + 432 + ], + "type": "inline_equation", + "content": "\\mathcal{FS}(\\cdot)" + }, + { + "bbox": [ + 304, + 346, + 545, + 432 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "spans": [ + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "text", + "content": "Frequency Domain Projection (FDP). To deal with distortions in the frequency domain, we first introduce frequency domain interactions before computing local inductive bias. For the input feature map " + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "inline_equation", + "content": "z \\in \\mathbb{R}^{\\widetilde{H} \\times \\widetilde{W} \\times \\widetilde{C}}" + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "text", + "content": ", it is processed by the layer normalization operation (" + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "inline_equation", + "content": "LN(\\cdot)" + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "text", + "content": ") [32] to obtain the normalized feature " + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "inline_equation", + "content": "z_{l} = LN(z)" + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "text", + "content": ". Then, the normalized feature " + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "inline_equation", + "content": "z_{l}" + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "text", + "content": " is projected into " + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "inline_equation", + "content": "Q_{f}" + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "text", + "content": " (query), " + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "inline_equation", + "content": "K_{f}" + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "text", + "content": " (key) and " + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "inline_equation", + "content": "V_{f}" + }, + { + "bbox": [ + 304, + 432, + 545, + 530 + ], + "type": "text", + "content": " (value) by the projection in the frequency domain as" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 538, + 545, + 552 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 538, + 545, + 552 + ], + "spans": [ + { + "bbox": [ + 314, + 538, + 545, + 552 + ], + "type": "interline_equation", + "content": "Q _ {f} = \\mathcal {F S} _ {Q} (z _ {l}), K _ {f} = \\mathcal {F S} _ {K} (z _ {l}), V _ {f} = \\mathcal {F S} _ {V} (z _ {l}), \\tag {13}", + "image_path": "907f77a750d1dd99fcd10ccf6da0ebcbab324cdc4126c82380d3ed7cb61b81b1.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 559, + 545, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 559, + 545, + 608 + ], + "spans": [ + { + "bbox": [ + 304, + 559, + 545, + 608 + ], + "type": "text", + "content": "where the " + }, + { + "bbox": [ + 304, + 559, + 545, + 608 + ], + "type": "inline_equation", + "content": "\\mathcal{FS}_Q(\\cdot),\\mathcal{FS}_K(\\cdot)" + }, + { + "bbox": [ + 304, + 559, + 545, + 608 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 559, + 545, + 608 + ], + "type": "inline_equation", + "content": "\\mathcal{FS}_V(\\cdot)" + }, + { + "bbox": [ + 304, + 559, + 545, + 608 + ], + "type": "text", + "content": " denote three independent projection operations with learnable parameters, respectively. The generation process of the " + }, + { + "bbox": [ + 304, + 559, + 545, + 608 + ], + "type": "inline_equation", + "content": "Q_{f},K_{f}" + }, + { + "bbox": [ + 304, + 559, + 545, + 608 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 559, + 545, + 608 + ], + "type": "inline_equation", + "content": "V_{f}" + }, + { + "bbox": [ + 304, + 559, + 545, + 608 + ], + "type": "text", + "content": " is denoted as the frequency domain projection (FDP)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 609, + 545, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 609, + 545, + 681 + ], + "spans": [ + { + "bbox": [ + 304, + 609, + 545, + 681 + ], + "type": "text", + "content": "Bidomain Local Perception (BLP). After obtaining the features " + }, + { + "bbox": [ + 304, + 609, + 545, + 681 + ], + "type": "inline_equation", + "content": "Q_{f}" + }, + { + "bbox": [ + 304, + 609, + 545, + 681 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 609, + 545, + 681 + ], + "type": "inline_equation", + "content": "K_{f}" + }, + { + "bbox": [ + 304, + 609, + 545, + 681 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 609, + 545, + 681 + ], + "type": "inline_equation", + "content": "V_{f}" + }, + { + "bbox": [ + 304, + 609, + 545, + 681 + ], + "type": "text", + "content": " which consider the information in frequency domain, we perform spatial domain learning on the features from a local perspective. The self-attention [32] with local perception (LP) that is shown in Figure 3-(c) is computed within " + }, + { + "bbox": [ + 304, + 609, + 545, + 681 + ], + "type": "inline_equation", + "content": "8 \\times 8" + }, + { + "bbox": [ + 304, + 609, + 545, + 681 + ], + "type": "text", + "content": " non-overlapping windows as" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 319, + 688, + 545, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 688, + 545, + 716 + ], + "spans": [ + { + "bbox": [ + 319, + 688, + 545, + 716 + ], + "type": "interline_equation", + "content": "\\mathcal {A T} \\left(Q _ {f}, K _ {f}, V _ {f}\\right) = s f \\left(\\frac {Q _ {f} \\otimes K _ {f} ^ {T}}{\\sqrt {d}} + B\\right) \\otimes V _ {f}, \\tag {14}", + "image_path": "35ad94e436612bba30633a3498696eba7c541759bb9d69e087e6196fabdd7ca2.jpg" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2634" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": " denote the dimensionality and position bias, respectively. The " + }, + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "inline_equation", + "content": "\\otimes" + }, + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": " denotes the matrix multiplication (MatMul). Information is transferred by the residual connection" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 108, + 121, + 287, + 135 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 121, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 108, + 121, + 287, + 135 + ], + "type": "interline_equation", + "content": "z ^ {*} = \\mathcal {A T} \\left(Q _ {f}, K _ {f}, V _ {f}\\right) + z, \\tag {15}", + "image_path": "3abe7f3c39b9e50208fe09186eade0a3024e33c13883dd0fed658a5a98b19762.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 141, + 287, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 141, + 287, + 165 + ], + "spans": [ + { + "bbox": [ + 47, + 141, + 287, + 165 + ], + "type": "text", + "content": "where the calculation from " + }, + { + "bbox": [ + 47, + 141, + 287, + 165 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 47, + 141, + 287, + 165 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 47, + 141, + 287, + 165 + ], + "type": "inline_equation", + "content": "z^{*}" + }, + { + "bbox": [ + 47, + 141, + 287, + 165 + ], + "type": "text", + "content": " is marked as bidomain local perception (BLP), which is shown in Figure 4-(a)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 166, + 287, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 166, + 287, + 274 + ], + "spans": [ + { + "bbox": [ + 47, + 166, + 287, + 274 + ], + "type": "text", + "content": "Bidomain Nonlinear Mapping (BNM). The computation of window attention does not provide nonlinear representation capabilities. Therefore, we use the frequency and spatial domain interaction module to learn nonlinear mapping. The FSDA is used to provide the frequency domain information. Besides, a residual block which consists of " + }, + { + "bbox": [ + 47, + 166, + 287, + 274 + ], + "type": "inline_equation", + "content": "C_1^3 (\\sigma (C_1^3 (\\cdot)))" + }, + { + "bbox": [ + 47, + 166, + 287, + 274 + ], + "type": "text", + "content": " is used to provide the spatial interaction. The immediate feature " + }, + { + "bbox": [ + 47, + 166, + 287, + 274 + ], + "type": "inline_equation", + "content": "z^{*}" + }, + { + "bbox": [ + 47, + 166, + 287, + 274 + ], + "type": "text", + "content": " is fed into the frequency nonlinear mapping branch and spatial nonlinear mapping branch, as" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 286, + 287, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 286, + 287, + 300 + ], + "spans": [ + { + "bbox": [ + 130, + 286, + 287, + 300 + ], + "type": "interline_equation", + "content": "z _ {f n} = \\mathcal {F S} _ {A} \\left(z ^ {*}\\right), \\tag {16}", + "image_path": "eca74edc4017b084606296c93e49fbed5be54ec24b936cf2788565fd9adbfabd.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 119, + 319, + 287, + 334 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 319, + 287, + 334 + ], + "spans": [ + { + "bbox": [ + 119, + 319, + 287, + 334 + ], + "type": "interline_equation", + "content": "z _ {s n} = C _ {1} ^ {3} \\left(\\sigma \\left(C _ {1} ^ {3} \\left(z ^ {*}\\right)\\right)\\right), \\tag {17}", + "image_path": "ae2330cbf354cd43629b5848510b0901de5ff83164d0f1b98895f6879e22453b.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 339, + 287, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 339, + 287, + 389 + ], + "spans": [ + { + "bbox": [ + 47, + 339, + 287, + 389 + ], + "type": "text", + "content": "where the subscript " + }, + { + "bbox": [ + 47, + 339, + 287, + 389 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 47, + 339, + 287, + 389 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 47, + 339, + 287, + 389 + ], + "type": "inline_equation", + "content": "\\mathcal{FS}_A(\\cdot)" + }, + { + "bbox": [ + 47, + 339, + 287, + 389 + ], + "type": "text", + "content": " means the frequency interaction performed after the attention operation. Then frequency domain and spatial domain features are fused as the final nonlinear mapping output by" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 103, + 397, + 287, + 412 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 397, + 287, + 412 + ], + "spans": [ + { + "bbox": [ + 103, + 397, + 287, + 412 + ], + "type": "interline_equation", + "content": "\\widetilde {z} = C _ {1} ^ {3} \\left(\\left[ z _ {f n}, z _ {s n} + z ^ {*} \\right]\\right) + z ^ {*}, \\tag {18}", + "image_path": "6d71f45586285003315d8afc657e390f8d14d6bc3238860c80ad7189f6f691f7.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 422, + 287, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 422, + 287, + 457 + ], + "spans": [ + { + "bbox": [ + 47, + 422, + 287, + 457 + ], + "type": "text", + "content": "where the " + }, + { + "bbox": [ + 47, + 422, + 287, + 457 + ], + "type": "inline_equation", + "content": "[\\cdot, \\cdot]" + }, + { + "bbox": [ + 47, + 422, + 287, + 457 + ], + "type": "text", + "content": " denotes the channel concatenation. The calculation from " + }, + { + "bbox": [ + 47, + 422, + 287, + 457 + ], + "type": "inline_equation", + "content": "z^*" + }, + { + "bbox": [ + 47, + 422, + 287, + 457 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 47, + 422, + 287, + 457 + ], + "type": "inline_equation", + "content": "\\widetilde{z}" + }, + { + "bbox": [ + 47, + 422, + 287, + 457 + ], + "type": "text", + "content": " is marked as the bidomain nonlinear mapping (BNM), which is shown in Figure 4-(b)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 458, + 287, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 458, + 287, + 518 + ], + "spans": [ + { + "bbox": [ + 47, + 458, + 287, + 518 + ], + "type": "text", + "content": "Spatial and Frequency Information Interaction (SFII). As shown in Figure 4, the calculation process from " + }, + { + "bbox": [ + 47, + 458, + 287, + 518 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 47, + 458, + 287, + 518 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 47, + 458, + 287, + 518 + ], + "type": "inline_equation", + "content": "\\widetilde{z}" + }, + { + "bbox": [ + 47, + 458, + 287, + 518 + ], + "type": "text", + "content": " is called spatial and frequency information interaction (SFII). The proposed SFII aggregates spatial domain information and frequency domain information from a local perspective." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 527, + 200, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 200, + 540 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 200, + 540 + ], + "type": "text", + "content": "3.3. Spatial and Frequency Loss" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 546, + 287, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 546, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 47, + 546, + 287, + 594 + ], + "type": "text", + "content": "The supervised loss consists of two parts, namely the pixel-by-pixel loss in geometric space and the frequency domain loss obtained by Fourier transform [3]. By sampling " + }, + { + "bbox": [ + 47, + 546, + 287, + 594 + ], + "type": "inline_equation", + "content": "x_{i}^{s} \\in \\mathcal{D}_{X}" + }, + { + "bbox": [ + 47, + 546, + 287, + 594 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 546, + 287, + 594 + ], + "type": "inline_equation", + "content": "y_{i}^{s} \\in \\mathcal{D}_{Y}" + }, + { + "bbox": [ + 47, + 546, + 287, + 594 + ], + "type": "text", + "content": ", the losses calculated at three scales are" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 90, + 604, + 287, + 637 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 604, + 287, + 637 + ], + "spans": [ + { + "bbox": [ + 90, + 604, + 287, + 637 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {G} = \\sum_ {s = 0} ^ {2} \\lambda_ {g} \\cdot \\sum_ {i = 0} ^ {N - 1} | | \\Psi^ {s} \\left(x _ {i} ^ {s}\\right) - y _ {i} ^ {s} | | _ {1}, \\tag {19}", + "image_path": "fac90f433bf14a40873e4021f0d25c4f4b1534830e520a5d5e808ef2b2097586.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 66, + 659, + 287, + 692 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 659, + 287, + 692 + ], + "spans": [ + { + "bbox": [ + 66, + 659, + 287, + 692 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {F} = \\sum_ {s = 0} ^ {2} \\lambda_ {f} \\cdot \\sum_ {i = 0} ^ {N - 1} \\left| \\left| \\mathcal {F} \\left(\\Psi^ {s} \\left(x _ {i} ^ {s}\\right)\\right) - \\mathcal {F} \\left(y _ {i} ^ {s}\\right) \\right| \\right| _ {1}, \\tag {20}", + "image_path": "7d4543b00112ded613bd09bb56be1c89faed91c0e0d16fb473c0c7a874fe7e2c.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 701, + 208, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 208, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 208, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 701, + 208, + 714 + ], + "type": "inline_equation", + "content": "\\lambda_{g}" + }, + { + "bbox": [ + 47, + 701, + 208, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 701, + 208, + 714 + ], + "type": "inline_equation", + "content": "\\lambda_{f}" + }, + { + "bbox": [ + 47, + 701, + 208, + 714 + ], + "type": "text", + "content": " denote weight factors." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 72, + 519, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 519, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 519, + 84 + ], + "type": "text", + "content": "3.4. Retraining and Realistic Brightness Loss" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 91, + 545, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 91, + 545, + 198 + ], + "spans": [ + { + "bbox": [ + 305, + 91, + 545, + 198 + ], + "type": "text", + "content": "Pseudo-label Fusion Retraining. There are inherent domain discrepancy between synthetic hazy images and real-world hazy images. Therefore, we adopt a retraining strategy which utilizes peso labels. Pseudo labels " + }, + { + "bbox": [ + 305, + 91, + 545, + 198 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_Y^P" + }, + { + "bbox": [ + 305, + 91, + 545, + 198 + ], + "type": "text", + "content": " are obtained based on the model trained on synthetic datasets. We put the original synthetic dataset " + }, + { + "bbox": [ + 305, + 91, + 545, + 198 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{D}_X,\\mathcal{D}_Y\\}" + }, + { + "bbox": [ + 305, + 91, + 545, + 198 + ], + "type": "text", + "content": " and the pseudo-labeled dataset " + }, + { + "bbox": [ + 305, + 91, + 545, + 198 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{R}_X,\\mathcal{R}_Y^P\\}" + }, + { + "bbox": [ + 305, + 91, + 545, + 198 + ], + "type": "text", + "content": " into the network simultaneously for retraining. Supervised losses Eq. 19 and Eq. 20 are used in the retraining process at three scales." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "spans": [ + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "text", + "content": "Prior Brightness Constraint. We conduct a quantitative statistics on the brightness of nighttime hazy and clear images provided by [14]. The brightness intensity corresponding to " + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "inline_equation", + "content": "x_{i}^{0} \\in \\mathcal{R}_{X}" + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "inline_equation", + "content": "y_{i}^{0} \\in \\mathcal{R}_{Y}" + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "inline_equation", + "content": "\\mu (x_i^0)" + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "inline_equation", + "content": "\\mu (y_i^0)" + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "text", + "content": ", respectively, where " + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "inline_equation", + "content": "\\mu (\\cdot)" + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "text", + "content": " denote the average pixel value across three channels. We randomly select " + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "inline_equation", + "content": "M = \\frac{M}{2}" + }, + { + "bbox": [ + 305, + 199, + 545, + 283 + ], + "type": "text", + "content": " images from the dataset multiple times, and we get" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 374, + 294, + 545, + 328 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 294, + 545, + 328 + ], + "spans": [ + { + "bbox": [ + 374, + 294, + 545, + 328 + ], + "type": "interline_equation", + "content": "\\sum_ {i = 0} ^ {\\hat {M} - 1} \\mu \\left(y _ {i} ^ {0}\\right) < \\sum_ {i = 0} ^ {\\hat {M} - 1} \\mu \\left(x _ {i} ^ {0}\\right). \\tag {21}", + "image_path": "d531492a30d2a059691679c2a2464c4ff9419f9561c01d0a4bcc6d676fe2f83b.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 339, + 545, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 339, + 545, + 374 + ], + "spans": [ + { + "bbox": [ + 305, + 339, + 545, + 374 + ], + "type": "text", + "content": "Therefore, we assume the brightness of the dehazed image " + }, + { + "bbox": [ + 305, + 339, + 545, + 374 + ], + "type": "inline_equation", + "content": "p_i^s" + }, + { + "bbox": [ + 305, + 339, + 545, + 374 + ], + "type": "text", + "content": " should be lower than that of the " + }, + { + "bbox": [ + 305, + 339, + 545, + 374 + ], + "type": "inline_equation", + "content": "x_i^s" + }, + { + "bbox": [ + 305, + 339, + 545, + 374 + ], + "type": "text", + "content": ". This assumption is consistent with the imaging model Eq. 2." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "spans": [ + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "type": "text", + "content": "Local Brightness Map (LBM). We divide the image into non-overlapping local windows. The width and height of each square window is denoted as " + }, + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "type": "inline_equation", + "content": "\\gamma^s" + }, + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "type": "inline_equation", + "content": "s \\in \\{0,1,2\\}" + }, + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "type": "text", + "content": ". The value in " + }, + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "type": "inline_equation", + "content": "\\underline{\\mathrm{local~brightness}}" + }, + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "type": "text", + "content": " map (LBM) " + }, + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "type": "inline_equation", + "content": "\\varphi_{x_i^s}" + }, + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "type": "text", + "content": " that corresponding to " + }, + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "type": "inline_equation", + "content": "x_i^s" + }, + { + "bbox": [ + 305, + 376, + 545, + 436 + ], + "type": "text", + "content": " is obtained by" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 447, + 544, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 447, + 544, + 495 + ], + "spans": [ + { + "bbox": [ + 315, + 447, + 544, + 495 + ], + "type": "interline_equation", + "content": "\\varphi_ {x _ {i} ^ {s}} (\\hat {h}, \\hat {w}) = \\frac {1}{3 (\\gamma^ {s}) ^ {2}} \\sum_ {c = 0} ^ {2} \\sum_ {h = \\hat {h} \\cdot \\gamma^ {s}} ^ {(\\hat {h} + 1) \\cdot \\gamma^ {s}} \\sum_ {w = \\hat {w} \\cdot \\gamma^ {s}} ^ {(\\hat {w} + 1) \\cdot \\gamma^ {s}} x _ {i} ^ {s} (h, w, c), \\tag {22}", + "image_path": "0b61fcca6c388b8fa1c517add3b9d211cfd716f1d12e47e1da4e300b9de3b214.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "spans": [ + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "inline_equation", + "content": "(\\hat{h},\\hat{w})" + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "inline_equation", + "content": "(h,w)" + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "text", + "content": " denote the pixel index of " + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "inline_equation", + "content": "\\varphi_{x_i^s}" + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "inline_equation", + "content": "x_{i}^{s}" + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "text", + "content": ", respectively. Meanwhile, the local brightness map " + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "inline_equation", + "content": "\\varphi_{p_i^s}" + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "text", + "content": " corresponding to " + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "inline_equation", + "content": "p_i^s" + }, + { + "bbox": [ + 305, + 495, + 545, + 579 + ], + "type": "text", + "content": " is defined in the same way. As shown in Figure 2-(c), the locations with high brightness may be active light sources or objects close to the light source, while the locations with low brightness may be objects and backgrounds far away from the light source." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 580, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 580, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 305, + 580, + 545, + 689 + ], + "type": "text", + "content": "Realistic Brightness Loss. The brightness of hazy images is approximately globally realistic, so it can be used to supervise the brightness of dehazed images. As we observed in Eq. 21, the brightness of the dehazed image should be lower than that of the hazy image. Meanwhile, in order to ensure the relative numerical relationship between areas with high brightness and low brightness before and after dehazing, we use a power function with monotonically increasing properties to process the " + }, + { + "bbox": [ + 305, + 580, + 545, + 689 + ], + "type": "inline_equation", + "content": "\\varphi_{x_i^s}(\\hat{h},\\hat{w})" + }, + { + "bbox": [ + 305, + 580, + 545, + 689 + ], + "type": "text", + "content": " , as" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 369, + 700, + 545, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 700, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 369, + 700, + 545, + 715 + ], + "type": "interline_equation", + "content": "\\widetilde {\\varphi} _ {x _ {i} ^ {s}} (\\hat {h}, \\hat {w}) = \\left(\\varphi_ {x _ {i} ^ {s}} (\\hat {h}, \\hat {w})\\right) ^ {\\kappa}, \\tag {23}", + "image_path": "720b4709658e3434f12ce79d3776f9b8a569ec456ab982b1f6862539816f12c3.jpg" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2635" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 69, + 121, + 114 + ], + "blocks": [ + { + "bbox": [ + 51, + 69, + 121, + 114 + ], + "lines": [ + { + "bbox": [ + 51, + 69, + 121, + 114 + ], + "spans": [ + { + "bbox": [ + 51, + 69, + 121, + 114 + ], + "type": "image", + "image_path": "40ddf7cd4c54a9cf836c96025155f65351a206a6a894ae6a19bf88ba1ab297c6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 73, + 116, + 102, + 125 + ], + "lines": [ + { + "bbox": [ + 73, + 116, + 102, + 125 + ], + "spans": [ + { + "bbox": [ + 73, + 116, + 102, + 125 + ], + "type": "text", + "content": "(a) Hazy" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 122, + 70, + 192, + 114 + ], + "blocks": [ + { + "bbox": [ + 122, + 70, + 192, + 114 + ], + "lines": [ + { + "bbox": [ + 122, + 70, + 192, + 114 + ], + "spans": [ + { + "bbox": [ + 122, + 70, + 192, + 114 + ], + "type": "image", + "image_path": "83755efa09e8f7af77f754f621b55d111c8c321ef3b2190616e60f5e05c34749.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 141, + 116, + 171, + 125 + ], + "lines": [ + { + "bbox": [ + 141, + 116, + 171, + 125 + ], + "spans": [ + { + "bbox": [ + 141, + 116, + 171, + 125 + ], + "type": "text", + "content": "(b) MRP" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 192, + 70, + 261, + 114 + ], + "blocks": [ + { + "bbox": [ + 192, + 70, + 261, + 114 + ], + "lines": [ + { + "bbox": [ + 192, + 70, + 261, + 114 + ], + "spans": [ + { + "bbox": [ + 192, + 70, + 261, + 114 + ], + "type": "image", + "image_path": "13482730aff8388d55b14453daaafca013fb237da171bad07d8fdeb66b3ae61a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 116, + 243, + 125 + ], + "lines": [ + { + "bbox": [ + 211, + 116, + 243, + 125 + ], + "spans": [ + { + "bbox": [ + 211, + 116, + 243, + 125 + ], + "type": "text", + "content": "(c) OSFD" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 262, + 70, + 332, + 114 + ], + "blocks": [ + { + "bbox": [ + 262, + 70, + 332, + 114 + ], + "lines": [ + { + "bbox": [ + 262, + 70, + 332, + 114 + ], + "spans": [ + { + "bbox": [ + 262, + 70, + 332, + 114 + ], + "type": "image", + "image_path": "a409ab44e08a21ecfd2a339aaed8175cb808bf4b2daae7715984762e47d86ac9.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 116, + 307, + 125 + ], + "lines": [ + { + "bbox": [ + 282, + 116, + 307, + 125 + ], + "spans": [ + { + "bbox": [ + 282, + 116, + 307, + 125 + ], + "type": "text", + "content": "(d) GD" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 332, + 70, + 402, + 114 + ], + "blocks": [ + { + "bbox": [ + 332, + 70, + 402, + 114 + ], + "lines": [ + { + "bbox": [ + 332, + 70, + 402, + 114 + ], + "spans": [ + { + "bbox": [ + 332, + 70, + 402, + 114 + ], + "type": "image", + "image_path": "c1f76453a20f05f5f43ed1b0ac2d75da9dffab37917caa9f4bf331e0e437ff1f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 347, + 116, + 387, + 125 + ], + "lines": [ + { + "bbox": [ + 347, + 116, + 387, + 125 + ], + "spans": [ + { + "bbox": [ + 347, + 116, + 387, + 125 + ], + "type": "text", + "content": "(e) MSBDN" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 402, + 70, + 472, + 114 + ], + "blocks": [ + { + "bbox": [ + 402, + 70, + 472, + 114 + ], + "lines": [ + { + "bbox": [ + 402, + 70, + 472, + 114 + ], + "spans": [ + { + "bbox": [ + 402, + 70, + 472, + 114 + ], + "type": "image", + "image_path": "f8c73b47bb3ec9bcaef2918b8912874d8f7a51461a7fc7e686e9be25677e448e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 412, + 116, + 464, + 125 + ], + "lines": [ + { + "bbox": [ + 412, + 116, + 464, + 125 + ], + "spans": [ + { + "bbox": [ + 412, + 116, + 464, + 125 + ], + "type": "text", + "content": "(f) 4KDehazing" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 472, + 70, + 542, + 114 + ], + "blocks": [ + { + "bbox": [ + 472, + 70, + 542, + 114 + ], + "lines": [ + { + "bbox": [ + 472, + 70, + 542, + 114 + ], + "spans": [ + { + "bbox": [ + 472, + 70, + 542, + 114 + ], + "type": "image", + "image_path": "b632aa6f0474872ef75965211d96e0a544b100aa6ebc1d9c0a4f3e4bdf7383a6.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 486, + 116, + 532, + 125 + ], + "lines": [ + { + "bbox": [ + 486, + 116, + 532, + 125 + ], + "spans": [ + { + "bbox": [ + 486, + 116, + 532, + 125 + ], + "type": "text", + "content": "(g) AECRNet" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 51, + 127, + 121, + 172 + ], + "blocks": [ + { + "bbox": [ + 51, + 127, + 121, + 172 + ], + "lines": [ + { + "bbox": [ + 51, + 127, + 121, + 172 + ], + "spans": [ + { + "bbox": [ + 51, + 127, + 121, + 172 + ], + "type": "image", + "image_path": "249dfd533549490ba31093efaa3840ad4591499dea329f7b686588a9af10a2ef.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 64, + 174, + 109, + 183 + ], + "lines": [ + { + "bbox": [ + 64, + 174, + 109, + 183 + ], + "spans": [ + { + "bbox": [ + 64, + 174, + 109, + 183 + ], + "type": "text", + "content": "(h) DeHamer" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 122, + 127, + 192, + 172 + ], + "blocks": [ + { + "bbox": [ + 122, + 127, + 192, + 172 + ], + "lines": [ + { + "bbox": [ + 122, + 127, + 192, + 172 + ], + "spans": [ + { + "bbox": [ + 122, + 127, + 192, + 172 + ], + "type": "image", + "image_path": "99d2751b8712dcf5829b1a521e15470b1abd9f553fc12e655f7ec70d11cbb498.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 174, + 173, + 183 + ], + "lines": [ + { + "bbox": [ + 137, + 174, + 173, + 183 + ], + "spans": [ + { + "bbox": [ + 137, + 174, + 173, + 183 + ], + "type": "text", + "content": "(i) FSDGN" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 192, + 127, + 261, + 172 + ], + "blocks": [ + { + "bbox": [ + 192, + 127, + 261, + 172 + ], + "lines": [ + { + "bbox": [ + 192, + 127, + 261, + 172 + ], + "spans": [ + { + "bbox": [ + 192, + 127, + 261, + 172 + ], + "type": "image", + "image_path": "72346426a5ce827757bce67b5bcc7cf2461a2a1df98eaae746ddd10d785c2d54.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 213, + 174, + 234, + 183 + ], + "lines": [ + { + "bbox": [ + 213, + 174, + 234, + 183 + ], + "spans": [ + { + "bbox": [ + 213, + 174, + 234, + 183 + ], + "type": "text", + "content": "(j) DF" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 262, + 127, + 332, + 172 + ], + "blocks": [ + { + "bbox": [ + 262, + 127, + 332, + 172 + ], + "lines": [ + { + "bbox": [ + 262, + 127, + 332, + 172 + ], + "spans": [ + { + "bbox": [ + 262, + 127, + 332, + 172 + ], + "type": "image", + "image_path": "3fe97d65e7bcd8f02a1493a13dd40d05a933c2d8fd19a8079e9380bf0dc63681.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 276, + 174, + 315, + 183 + ], + "lines": [ + { + "bbox": [ + 276, + 174, + 315, + 183 + ], + "spans": [ + { + "bbox": [ + 276, + 174, + 315, + 183 + ], + "type": "text", + "content": "(k) MITNet" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 332, + 127, + 402, + 172 + ], + "blocks": [ + { + "bbox": [ + 332, + 127, + 402, + 172 + ], + "lines": [ + { + "bbox": [ + 332, + 127, + 402, + 172 + ], + "spans": [ + { + "bbox": [ + 332, + 127, + 402, + 172 + ], + "type": "image", + "image_path": "e4e25aa749d0c8a0281cd2212413468edc5a58b9ceef5b8211ba847a2006134b.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 349, + 174, + 387, + 183 + ], + "lines": [ + { + "bbox": [ + 349, + 174, + 387, + 183 + ], + "spans": [ + { + "bbox": [ + 349, + 174, + 387, + 183 + ], + "type": "text", + "content": "(1)Fourmer" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 206, + 192, + 386, + 203 + ], + "lines": [ + { + "bbox": [ + 206, + 192, + 386, + 203 + ], + "spans": [ + { + "bbox": [ + 206, + 192, + 386, + 203 + ], + "type": "text", + "content": "Figure 5. Visual results on synthetic dataset [31]." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 402, + 127, + 472, + 172 + ], + "blocks": [ + { + "bbox": [ + 402, + 127, + 472, + 172 + ], + "lines": [ + { + "bbox": [ + 402, + 127, + 472, + 172 + ], + "spans": [ + { + "bbox": [ + 402, + 127, + 472, + 172 + ], + "type": "image", + "image_path": "75c14a44593a3000f035b679ff4ab46f6e0bdcbaef14f03b7c2d0f01f6317f05.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 421, + 174, + 451, + 183 + ], + "lines": [ + { + "bbox": [ + 421, + 174, + 451, + 183 + ], + "spans": [ + { + "bbox": [ + 421, + 174, + 451, + 183 + ], + "type": "text", + "content": "(m) Ours" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 472, + 127, + 542, + 172 + ], + "blocks": [ + { + "bbox": [ + 472, + 127, + 542, + 172 + ], + "lines": [ + { + "bbox": [ + 472, + 127, + 542, + 172 + ], + "spans": [ + { + "bbox": [ + 472, + 127, + 542, + 172 + ], + "type": "image", + "image_path": "85e6d1c03e6b7ba772bb2086a1a2fa2fbc43074248332799df22787835fae17d.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 491, + 174, + 522, + 183 + ], + "lines": [ + { + "bbox": [ + 491, + 174, + 522, + 183 + ], + "spans": [ + { + "bbox": [ + 491, + 174, + 522, + 183 + ], + "type": "text", + "content": "(n) Label" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "table", + "bbox": [ + 51, + 235, + 544, + 373 + ], + "blocks": [ + { + "bbox": [ + 162, + 215, + 430, + 226 + ], + "lines": [ + { + "bbox": [ + 162, + 215, + 430, + 226 + ], + "spans": [ + { + "bbox": [ + 162, + 215, + 430, + 226 + ], + "type": "text", + "content": "Table 1. Quantitative results on datasets that generated by imaging model." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 51, + 235, + 544, + 373 + ], + "lines": [ + { + "bbox": [ + 51, + 235, + 544, + 373 + ], + "spans": [ + { + "bbox": [ + 51, + 235, + 544, + 373 + ], + "type": "table", + "html": "
MethodsNHRNHMNHCLNHCMNHCDNightHazeYellowHaze
SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑
MRP (CVPR 2017)0.77619.8480.66615.9930.74722.4970.69320.4940.62417.6510.29512.1380.24913.473
GD (ICCV 2019)0.96930.1070.86120.6890.97336.5060.95834.4480.93231.5090.83225.3240.91527.410
OSFD (ACMMM 2020)0.80821.0280.72218.4910.78622.3290.73920.9290.67218.5010.30413.3870.25914.775
MSBDN (CVPR2020)0.97031.3350.81820.5140.96535.9630.93832.8480.90330.4750.95033.1560.92129.834
4KDehazing (CVPR2021)0.95028.6130.83020.4290.96735.0060.95835.1620.91230.0480.85026.5620.86125.835
AECRNet (CVPR 2021)0.91524.8640.81719.4200.95133.1830.94333.4980.89028.7420.94632.3440.93729.417
DeHamer (CVPR 2022)0.96631.0170.82323.0950.96636.0380.94433.9080.91531.3890.95433.4320.93130.334
FSDGN (ECCV 2022)0.97532.0720.87421.4150.97236.4320.95233.7230.92231.5590.94833.5210.95533.062
DF (TIP 2023)0.96931.6440.89623.2070.97537.3830.96035.0380.93432.0790.93131.4890.94832.244
MITNet (ACMMM 2023)0.97431.9690.85920.8840.96935.7940.94532.8490.91630.6280.94634.1140.93231.186
Fourmer (ICML 2023)0.96931.6600.86221.4230.96335.7140.94333.2010.92832.1030.94933.4190.95831.978
Ours0.97833.1800.90523.7050.97938.1460.96836.1460.95134.0010.96835.5270.96532.981
", + "image_path": "006d236ae7e24ad9cc359d0dd4b7769931fdeeb51ce8f05180ce5f8ae8f46dc2.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "table_body" + } + ], + "index": 30 + }, + { + "bbox": [ + 46, + 392, + 287, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 392, + 287, + 416 + ], + "spans": [ + { + "bbox": [ + 46, + 392, + 287, + 416 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 392, + 287, + 416 + ], + "type": "inline_equation", + "content": "\\kappa \\geq 1" + }, + { + "bbox": [ + 46, + 392, + 287, + 416 + ], + "type": "text", + "content": " is the brightness intensity coefficient. The realistic brightness constraint within one single window is" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 70, + 422, + 287, + 440 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 422, + 287, + 440 + ], + "spans": [ + { + "bbox": [ + 70, + 422, + 287, + 440 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {B} ^ {p _ {i} ^ {s}} (\\hat {h}, \\hat {w}) = \\left(\\varphi_ {p _ {i} ^ {s}} (\\hat {h}, \\hat {w}) - \\xi \\cdot \\widetilde {\\varphi} _ {x _ {i} ^ {s}} (\\hat {h}, \\hat {w})\\right) ^ {2}, \\tag {24}", + "image_path": "ec8f7e81f378c53442a8f33f9c0d2143e536da06fdbbc257384668d9436feb2d.jpg" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 46, + 445, + 287, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 445, + 287, + 468 + ], + "spans": [ + { + "bbox": [ + 46, + 445, + 287, + 468 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 445, + 287, + 468 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 46, + 445, + 287, + 468 + ], + "type": "text", + "content": " is a hyperparameter. The realistic brightness loss calculated over all windows is" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 57, + 474, + 287, + 510 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 474, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 57, + 474, + 287, + 510 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {B} = \\sum_ {s = 0} ^ {2} \\frac {\\lambda_ {b}}{\\hat {N} \\hat {W} ^ {s} \\hat {H} ^ {s}} \\cdot \\sum_ {i = 0} ^ {\\hat {N} - 1} \\sum_ {\\hat {h} = 0} ^ {\\hat {H} ^ {s} - 1} \\sum_ {\\hat {w} = 0} ^ {\\hat {W} ^ {s} - 1} \\mathcal {L} _ {B} ^ {p _ {i} ^ {s}} (\\hat {h}, \\hat {w}), \\tag {25}", + "image_path": "e1f9f37672cddb87d28aff491c06c9828c0868a8462ee16100b541f9c188f718.jpg" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 46, + 517, + 287, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 517, + 287, + 542 + ], + "spans": [ + { + "bbox": [ + 46, + 517, + 287, + 542 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 517, + 287, + 542 + ], + "type": "inline_equation", + "content": "\\hat{W}^s = W^s /\\gamma^s" + }, + { + "bbox": [ + 46, + 517, + 287, + 542 + ], + "type": "inline_equation", + "content": "\\hat{H}^s = H^s /\\gamma^s" + }, + { + "bbox": [ + 46, + 517, + 287, + 542 + ], + "type": "text", + "content": " . And " + }, + { + "bbox": [ + 46, + 517, + 287, + 542 + ], + "type": "inline_equation", + "content": "\\hat{N} = N + M" + }, + { + "bbox": [ + 46, + 517, + 287, + 542 + ], + "type": "text", + "content": " The " + }, + { + "bbox": [ + 46, + 517, + 287, + 542 + ], + "type": "inline_equation", + "content": "\\lambda_{b}" + }, + { + "bbox": [ + 46, + 517, + 287, + 542 + ], + "type": "text", + "content": " denotes the weights of scale loss of " + }, + { + "bbox": [ + 46, + 517, + 287, + 542 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_B" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 47, + 548, + 118, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 548, + 118, + 559 + ], + "spans": [ + { + "bbox": [ + 47, + 548, + 118, + 559 + ], + "type": "text", + "content": "3.5. Total Loss" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 46, + 567, + 287, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 567, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 46, + 567, + 287, + 590 + ], + "type": "text", + "content": "The overall loss is a combination of supervised and semi-supervised losses, which is" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 115, + 598, + 287, + 611 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 598, + 287, + 611 + ], + "spans": [ + { + "bbox": [ + 115, + 598, + 287, + 611 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {G} + \\alpha \\mathcal {L} _ {F} + \\beta \\mathcal {L} _ {B}, \\tag {26}", + "image_path": "15b0bfbd17841d0f2907619726c47ab776a8902bae0eabcac4b7aaa9d56ac43a.jpg" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 46, + 618, + 287, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 287, + 642 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 287, + 642 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 618, + 287, + 642 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 618, + 287, + 642 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 618, + 287, + 642 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 46, + 618, + 287, + 642 + ], + "type": "text", + "content": " are the weights of the frequency domain loss and the realistic brightness loss, respectively." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 47, + 651, + 128, + 664 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 651, + 128, + 664 + ], + "spans": [ + { + "bbox": [ + 47, + 651, + 128, + 664 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 47, + 671, + 161, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 671, + 161, + 684 + ], + "spans": [ + { + "bbox": [ + 47, + 671, + 161, + 684 + ], + "type": "text", + "content": "4.1. Experiment Setting" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "content": "Datasets. To comprehensively compare the performance of different algorithms, we conducted experiments on both" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 304, + 392, + 545, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 392, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 304, + 392, + 545, + 499 + ], + "type": "text", + "content": "synthetic and real-world datasets. The synthetic datasets include GTA5 [45], UNREAL-NH [31], {NHR, NHM, HNCL, NHCM, NHCD} [51] and {NightHaze, YellowHaze} [26]. The real-world nighttime haze (RWNH) is provided by [14]. Since the brightness level of the ground-truth label in the UNREAL-NH is close to daytime, we adjust the brightness of the hazy image and corresponding label to the level of the nighttime low-light image by the Gamma correction [33] for the evaluation of the RWNH." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 304, + 505, + 546, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 505, + 546, + 613 + ], + "spans": [ + { + "bbox": [ + 304, + 505, + 546, + 613 + ], + "type": "text", + "content": "Comparison Methods and Evaluation Metrics. MRP [50], GD [27], OSFD [51], MSBDN [5]. 4KDehazing [54], AECRNet [43], DeHamer [9], FSDGN [47], DF [38], MIT-Net [37] and Fourmer [55] are used as comparisons. PSNR [22, 35, 36] and SSIM [8, 42] are used to evaluate the performance on labeled datasets. BRISQUE [44] and MUSIQ [13, 17] are computed to evaluate the performance on unlabeled dataset. The " + }, + { + "bbox": [ + 304, + 505, + 546, + 613 + ], + "type": "inline_equation", + "content": "\\uparrow" + }, + { + "bbox": [ + 304, + 505, + 546, + 613 + ], + "type": "text", + "content": " represents a larger value, a higher quality, while " + }, + { + "bbox": [ + 304, + 505, + 546, + 613 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 304, + 505, + 546, + 613 + ], + "type": "text", + "content": " represents a larger value, a lower quality." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": "Implementation Details. The batch size is chosen as 4. The image size is set to " + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "inline_equation", + "content": "256 \\times 256 \\times 3" + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": ". The learning rate is initialized to 0.0001 and linearly decays by a factor of 0.95 every 10 epochs. The Adam " + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "inline_equation", + "content": "(\\beta_{1} = 0.9, \\beta_{2} = 0.999)" + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": " is used. The " + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\lambda_{g}" + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\lambda_{f}" + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\lambda_{b}" + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": " are all set to 1. The " + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": " are set to 0.1 and 20, respectively. The window size " + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\gamma^{s}" + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": " are set to 16, 8 and 4, where " + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "inline_equation", + "content": "s \\in \\{0,1,2\\}" + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": ", respectively. The coefficient " + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": " is set to 1 and 1.3, respectively. The" + } + ] + } + ], + "index": 45 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2636" + } + ] + } + ], + "index": 46 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 69, + 134, + 157 + ], + "blocks": [ + { + "bbox": [ + 52, + 69, + 134, + 157 + ], + "lines": [ + { + "bbox": [ + 52, + 69, + 134, + 157 + ], + "spans": [ + { + "bbox": [ + 52, + 69, + 134, + 157 + ], + "type": "image", + "image_path": "f2c71a783412da48ae45b925130b1675a92c9e14d4285ef737d4a3c227783c51.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 78, + 159, + 108, + 168 + ], + "lines": [ + { + "bbox": [ + 78, + 159, + 108, + 168 + ], + "spans": [ + { + "bbox": [ + 78, + 159, + 108, + 168 + ], + "type": "text", + "content": "(a) Hazy" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 134, + 70, + 216, + 157 + ], + "blocks": [ + { + "bbox": [ + 134, + 70, + 216, + 157 + ], + "lines": [ + { + "bbox": [ + 134, + 70, + 216, + 157 + ], + "spans": [ + { + "bbox": [ + 134, + 70, + 216, + 157 + ], + "type": "image", + "image_path": "2f13d28929ef75be03f1aa5c80c7a4869326bd52ad7d684bcd25ce15c800ea85.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 162, + 159, + 192, + 168 + ], + "lines": [ + { + "bbox": [ + 162, + 159, + 192, + 168 + ], + "spans": [ + { + "bbox": [ + 162, + 159, + 192, + 168 + ], + "type": "text", + "content": "(b)MRP" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 216, + 70, + 296, + 157 + ], + "blocks": [ + { + "bbox": [ + 216, + 70, + 296, + 157 + ], + "lines": [ + { + "bbox": [ + 216, + 70, + 296, + 157 + ], + "spans": [ + { + "bbox": [ + 216, + 70, + 296, + 157 + ], + "type": "image", + "image_path": "2ab4e850fa9e989735bcf9f1fe5f822509134e1bc047c77246cfdcce13fdfdb2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 159, + 272, + 168 + ], + "lines": [ + { + "bbox": [ + 239, + 159, + 272, + 168 + ], + "spans": [ + { + "bbox": [ + 239, + 159, + 272, + 168 + ], + "type": "text", + "content": "(c) OSFD" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 296, + 70, + 377, + 157 + ], + "blocks": [ + { + "bbox": [ + 296, + 70, + 377, + 157 + ], + "lines": [ + { + "bbox": [ + 296, + 70, + 377, + 157 + ], + "spans": [ + { + "bbox": [ + 296, + 70, + 377, + 157 + ], + "type": "image", + "image_path": "2fa293e1f28c4bf50eb2bf511896c6e2c8cab6f722bce710588fdf47f8fe8408.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 325, + 159, + 350, + 168 + ], + "lines": [ + { + "bbox": [ + 325, + 159, + 350, + 168 + ], + "spans": [ + { + "bbox": [ + 325, + 159, + 350, + 168 + ], + "type": "text", + "content": "(d) GD" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 378, + 70, + 459, + 157 + ], + "blocks": [ + { + "bbox": [ + 378, + 70, + 459, + 157 + ], + "lines": [ + { + "bbox": [ + 378, + 70, + 459, + 157 + ], + "spans": [ + { + "bbox": [ + 378, + 70, + 459, + 157 + ], + "type": "image", + "image_path": "9eb01e2043a460f4405d414fc44ccee117da18e9e7643e37519fc26f4633fb3b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 397, + 159, + 437, + 168 + ], + "lines": [ + { + "bbox": [ + 397, + 159, + 437, + 168 + ], + "spans": [ + { + "bbox": [ + 397, + 159, + 437, + 168 + ], + "type": "text", + "content": "(e) MSBDN" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 460, + 70, + 541, + 157 + ], + "blocks": [ + { + "bbox": [ + 460, + 70, + 541, + 157 + ], + "lines": [ + { + "bbox": [ + 460, + 70, + 541, + 157 + ], + "spans": [ + { + "bbox": [ + 460, + 70, + 541, + 157 + ], + "type": "image", + "image_path": "f4d5c9877d1d4a150d75137ad8161fb663c2a2fda42bc09f5cdb96333d7be9d0.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 475, + 159, + 526, + 168 + ], + "lines": [ + { + "bbox": [ + 475, + 159, + 526, + 168 + ], + "spans": [ + { + "bbox": [ + 475, + 159, + 526, + 168 + ], + "type": "text", + "content": "(f) 4KDehazing" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 52, + 171, + 134, + 258 + ], + "blocks": [ + { + "bbox": [ + 52, + 171, + 134, + 258 + ], + "lines": [ + { + "bbox": [ + 52, + 171, + 134, + 258 + ], + "spans": [ + { + "bbox": [ + 52, + 171, + 134, + 258 + ], + "type": "image", + "image_path": "778aaff2b30e27c198a8c5c67c11be01014258f51949cce6e5c6d53e2cd349ed.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 261, + 113, + 270 + ], + "lines": [ + { + "bbox": [ + 67, + 261, + 113, + 270 + ], + "spans": [ + { + "bbox": [ + 67, + 261, + 113, + 270 + ], + "type": "text", + "content": "(g) AECRNet" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 135, + 171, + 215, + 258 + ], + "blocks": [ + { + "bbox": [ + 135, + 171, + 215, + 258 + ], + "lines": [ + { + "bbox": [ + 135, + 171, + 215, + 258 + ], + "spans": [ + { + "bbox": [ + 135, + 171, + 215, + 258 + ], + "type": "image", + "image_path": "0b2756e365a887d592be74b006513c110e5c8ea63f25d49727ff914d8870f115.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 261, + 194, + 270 + ], + "lines": [ + { + "bbox": [ + 149, + 261, + 194, + 270 + ], + "spans": [ + { + "bbox": [ + 149, + 261, + 194, + 270 + ], + "type": "text", + "content": "(h) DeHamer" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 216, + 171, + 296, + 258 + ], + "blocks": [ + { + "bbox": [ + 216, + 171, + 296, + 258 + ], + "lines": [ + { + "bbox": [ + 216, + 171, + 296, + 258 + ], + "spans": [ + { + "bbox": [ + 216, + 171, + 296, + 258 + ], + "type": "image", + "image_path": "0af63a08186521c8359dc840ed855cb6dc5cb4d0ebb0733de438c60e3b3644e3.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 247, + 261, + 268, + 270 + ], + "lines": [ + { + "bbox": [ + 247, + 261, + 268, + 270 + ], + "spans": [ + { + "bbox": [ + 247, + 261, + 268, + 270 + ], + "type": "text", + "content": "(i) DF" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 296, + 171, + 378, + 258 + ], + "blocks": [ + { + "bbox": [ + 296, + 171, + 378, + 258 + ], + "lines": [ + { + "bbox": [ + 296, + 171, + 378, + 258 + ], + "spans": [ + { + "bbox": [ + 296, + 171, + 378, + 258 + ], + "type": "image", + "image_path": "31292a61a2749d2f8a56a49504c082a91ebce385117ffe9a5e53e0900ca8dd93.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 319, + 261, + 356, + 270 + ], + "lines": [ + { + "bbox": [ + 319, + 261, + 356, + 270 + ], + "spans": [ + { + "bbox": [ + 319, + 261, + 356, + 270 + ], + "type": "text", + "content": "(j) MITNet" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 378, + 171, + 459, + 258 + ], + "blocks": [ + { + "bbox": [ + 378, + 171, + 459, + 258 + ], + "lines": [ + { + "bbox": [ + 378, + 171, + 459, + 258 + ], + "spans": [ + { + "bbox": [ + 378, + 171, + 459, + 258 + ], + "type": "image", + "image_path": "840917b6f0fc0d28140d03c1a3e4f187d717aac7ec90ba6ab7c125ae24146d67.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 401, + 261, + 441, + 270 + ], + "lines": [ + { + "bbox": [ + 401, + 261, + 441, + 270 + ], + "spans": [ + { + "bbox": [ + 401, + 261, + 441, + 270 + ], + "type": "text", + "content": "(k)Fourmer" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 460, + 171, + 541, + 258 + ], + "blocks": [ + { + "bbox": [ + 460, + 171, + 541, + 258 + ], + "lines": [ + { + "bbox": [ + 460, + 171, + 541, + 258 + ], + "spans": [ + { + "bbox": [ + 460, + 171, + 541, + 258 + ], + "type": "image", + "image_path": "caf5d5ebbc15f975794703dad70d9332388490a1a391d78b5f3c3d1bf6537b19.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 486, + 261, + 512, + 270 + ], + "lines": [ + { + "bbox": [ + 486, + 261, + 512, + 270 + ], + "spans": [ + { + "bbox": [ + 486, + 261, + 512, + 270 + ], + "type": "text", + "content": "(1) Ours" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 50, + 301, + 99, + 378 + ], + "blocks": [ + { + "bbox": [ + 50, + 301, + 99, + 378 + ], + "lines": [ + { + "bbox": [ + 50, + 301, + 99, + 378 + ], + "spans": [ + { + "bbox": [ + 50, + 301, + 99, + 378 + ], + "type": "image", + "image_path": "6bc70cb929104cc92ee2896d58959002809ddc649bf32052be11050893323cef.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 62, + 380, + 91, + 389 + ], + "lines": [ + { + "bbox": [ + 62, + 380, + 91, + 389 + ], + "spans": [ + { + "bbox": [ + 62, + 380, + 91, + 389 + ], + "type": "text", + "content": "(a) Hazy" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 100, + 301, + 148, + 380 + ], + "blocks": [ + { + "bbox": [ + 100, + 301, + 148, + 380 + ], + "lines": [ + { + "bbox": [ + 100, + 301, + 148, + 380 + ], + "spans": [ + { + "bbox": [ + 100, + 301, + 148, + 380 + ], + "type": "image", + "image_path": "84c6aea0def8a97bf688a610d6cf069567a8af2fad072c9a2ed4f33f79514a8a.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 102, + 380, + 142, + 389 + ], + "lines": [ + { + "bbox": [ + 102, + 380, + 142, + 389 + ], + "spans": [ + { + "bbox": [ + 102, + 380, + 142, + 389 + ], + "type": "text", + "content": "(b) " + }, + { + "bbox": [ + 102, + 380, + 142, + 389 + ], + "type": "inline_equation", + "content": "\\kappa = 1.0" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 149, + 301, + 198, + 378 + ], + "blocks": [ + { + "bbox": [ + 194, + 279, + 398, + 290 + ], + "lines": [ + { + "bbox": [ + 194, + 279, + 398, + 290 + ], + "spans": [ + { + "bbox": [ + 194, + 279, + 398, + 290 + ], + "type": "text", + "content": "Figure 6. Visual results on real-world hazy images [14]." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 149, + 301, + 198, + 378 + ], + "lines": [ + { + "bbox": [ + 149, + 301, + 198, + 378 + ], + "spans": [ + { + "bbox": [ + 149, + 301, + 198, + 378 + ], + "type": "image", + "image_path": "9838b44ebf7f5bf29b3c3fb7d70f36c4690d6e262554a59050d5b186ebf0697b.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 152, + 380, + 190, + 389 + ], + "lines": [ + { + "bbox": [ + 152, + 380, + 190, + 389 + ], + "spans": [ + { + "bbox": [ + 152, + 380, + 190, + 389 + ], + "type": "text", + "content": "(c) " + }, + { + "bbox": [ + 152, + 380, + 190, + 389 + ], + "type": "inline_equation", + "content": "\\kappa = 1.3" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 198, + 398, + 394, + 409 + ], + "lines": [ + { + "bbox": [ + 198, + 398, + 394, + 409 + ], + "spans": [ + { + "bbox": [ + 198, + 398, + 394, + 409 + ], + "type": "text", + "content": "Figure 7. Dehazed images obtained under different " + }, + { + "bbox": [ + 198, + 398, + 394, + 409 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 198, + 398, + 394, + 409 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 198, + 301, + 247, + 378 + ], + "blocks": [ + { + "bbox": [ + 198, + 301, + 247, + 378 + ], + "lines": [ + { + "bbox": [ + 198, + 301, + 247, + 378 + ], + "spans": [ + { + "bbox": [ + 198, + 301, + 247, + 378 + ], + "type": "image", + "image_path": "05abaae28072af4244ebc728c9ca242e78b5e7b4d19778cb71e777692f30eefc.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 202, + 380, + 241, + 389 + ], + "lines": [ + { + "bbox": [ + 202, + 380, + 241, + 389 + ], + "spans": [ + { + "bbox": [ + 202, + 380, + 241, + 389 + ], + "type": "text", + "content": "(d) " + }, + { + "bbox": [ + 202, + 380, + 241, + 389 + ], + "type": "inline_equation", + "content": "\\kappa = 1.5" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 248, + 301, + 296, + 378 + ], + "blocks": [ + { + "bbox": [ + 248, + 301, + 296, + 378 + ], + "lines": [ + { + "bbox": [ + 248, + 301, + 296, + 378 + ], + "spans": [ + { + "bbox": [ + 248, + 301, + 296, + 378 + ], + "type": "image", + "image_path": "e8dd696bfb955802e39a5f44efc691f9c137e7050fab57bf4b4a00deee0efb61.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 252, + 380, + 291, + 389 + ], + "lines": [ + { + "bbox": [ + 252, + 380, + 291, + 389 + ], + "spans": [ + { + "bbox": [ + 252, + 380, + 291, + 389 + ], + "type": "text", + "content": "(e) " + }, + { + "bbox": [ + 252, + 380, + 291, + 389 + ], + "type": "inline_equation", + "content": "\\kappa = 1.8" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 297, + 301, + 345, + 378 + ], + "blocks": [ + { + "bbox": [ + 297, + 301, + 345, + 378 + ], + "lines": [ + { + "bbox": [ + 297, + 301, + 345, + 378 + ], + "spans": [ + { + "bbox": [ + 297, + 301, + 345, + 378 + ], + "type": "image", + "image_path": "efac5d22c673dce2c9a5640d53dd4bc4c10d65fcd541d5a7b30348a62bf6d6b0.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 303, + 380, + 341, + 389 + ], + "lines": [ + { + "bbox": [ + 303, + 380, + 341, + 389 + ], + "spans": [ + { + "bbox": [ + 303, + 380, + 341, + 389 + ], + "type": "text", + "content": "(f) " + }, + { + "bbox": [ + 303, + 380, + 341, + 389 + ], + "type": "inline_equation", + "content": "\\kappa = 2.0" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 347, + 301, + 394, + 378 + ], + "blocks": [ + { + "bbox": [ + 347, + 301, + 394, + 378 + ], + "lines": [ + { + "bbox": [ + 347, + 301, + 394, + 378 + ], + "spans": [ + { + "bbox": [ + 347, + 301, + 394, + 378 + ], + "type": "image", + "image_path": "6c9911d2e59e8ab292cc26a6d0f918ae830e3a7460aeacde4a51555570b96084.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 351, + 380, + 391, + 389 + ], + "lines": [ + { + "bbox": [ + 351, + 380, + 391, + 389 + ], + "spans": [ + { + "bbox": [ + 351, + 380, + 391, + 389 + ], + "type": "text", + "content": "(g) " + }, + { + "bbox": [ + 351, + 380, + 391, + 389 + ], + "type": "inline_equation", + "content": "\\kappa = 2.3" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 395, + 301, + 444, + 378 + ], + "blocks": [ + { + "bbox": [ + 395, + 301, + 444, + 378 + ], + "lines": [ + { + "bbox": [ + 395, + 301, + 444, + 378 + ], + "spans": [ + { + "bbox": [ + 395, + 301, + 444, + 378 + ], + "type": "image", + "image_path": "c66e4d4caa57dcf3a0cf2ffee880925265670971b44d42202931588516ad1eda.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 402, + 380, + 441, + 389 + ], + "lines": [ + { + "bbox": [ + 402, + 380, + 441, + 389 + ], + "spans": [ + { + "bbox": [ + 402, + 380, + 441, + 389 + ], + "type": "text", + "content": "(h) " + }, + { + "bbox": [ + 402, + 380, + 441, + 389 + ], + "type": "inline_equation", + "content": "\\kappa = 2.5" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 444, + 301, + 494, + 378 + ], + "blocks": [ + { + "bbox": [ + 444, + 301, + 494, + 378 + ], + "lines": [ + { + "bbox": [ + 444, + 301, + 494, + 378 + ], + "spans": [ + { + "bbox": [ + 444, + 301, + 494, + 378 + ], + "type": "image", + "image_path": "bf7b5296406ec0c2ab3087ef043f8275bace726ec821cc0ae52a37d4c43ac645.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 451, + 380, + 489, + 389 + ], + "lines": [ + { + "bbox": [ + 451, + 380, + 489, + 389 + ], + "spans": [ + { + "bbox": [ + 451, + 380, + 489, + 389 + ], + "type": "text", + "content": "(i) " + }, + { + "bbox": [ + 451, + 380, + 489, + 389 + ], + "type": "inline_equation", + "content": "\\kappa = 2.8" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 495, + 301, + 544, + 378 + ], + "blocks": [ + { + "bbox": [ + 495, + 301, + 544, + 378 + ], + "lines": [ + { + "bbox": [ + 495, + 301, + 544, + 378 + ], + "spans": [ + { + "bbox": [ + 495, + 301, + 544, + 378 + ], + "type": "image", + "image_path": "11ed9bb178b2069315f5b6a79735fefd08ea869723a88559a62259317c15f8ee.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 499, + 380, + 537, + 389 + ], + "lines": [ + { + "bbox": [ + 499, + 380, + 537, + 389 + ], + "spans": [ + { + "bbox": [ + 499, + 380, + 537, + 389 + ], + "type": "text", + "content": "(j) " + }, + { + "bbox": [ + 499, + 380, + 537, + 389 + ], + "type": "inline_equation", + "content": "\\kappa = 3.0" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 43 + }, + { + "bbox": [ + 46, + 430, + 287, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 430, + 287, + 454 + ], + "spans": [ + { + "bbox": [ + 46, + 430, + 287, + 454 + ], + "type": "text", + "content": "proposed model is implemented by PyTorch and trained on the single NVIDIA RTX 4090 platform." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 47, + 464, + 282, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 464, + 282, + 478 + ], + "spans": [ + { + "bbox": [ + 47, + 464, + 282, + 478 + ], + "type": "text", + "content": "4.2. Comparison with State-of-the-art Algorithms" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "text", + "content": "Evaluation on Synthetic Datasets. Table 1 and Table 2 show the quantitative dehazing results obtained by state-of-the-art methods. Figure 5 shows the corresponding visual results. The quantitative and visual results demonstrate that the proposed methods achieve an overall better performance than state-of-the-art algorithms." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 46, + 557, + 287, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 557, + 287, + 676 + ], + "spans": [ + { + "bbox": [ + 46, + 557, + 287, + 676 + ], + "type": "text", + "content": "Evaluation on Real-world Datasets. Table 2 shows the evaluation results of real-world dehazed images. It is worth pointing out that existing research [7] proposes that the reliability of no-reference metrics in the dehazing task is lower than that of full-reference metrics. Figure 6 shows that the details of the dehazed results obtained by our method are visually better. Meanwhile, the brightness of the dehazed images obtained by most comparison algorithms is obvious unrealistic, while the brightness of the dehazed images obtained by our algorithm is approximately globally realistic." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "Overall Evaluation. According to the quantitative and visual results on synthetic and real-world datasets, the proposed SFSNiD achieves overall better performance. More" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 306, + 430, + 493, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 430, + 493, + 443 + ], + "spans": [ + { + "bbox": [ + 306, + 430, + 493, + 443 + ], + "type": "text", + "content": "results are placed at Supplementary Materials." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 306, + 453, + 476, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 453, + 476, + 466 + ], + "spans": [ + { + "bbox": [ + 306, + 453, + 476, + 466 + ], + "type": "text", + "content": "4.3. Ablation Study and Discussions" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 304, + 473, + 545, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 545, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 545, + 676 + ], + "type": "text", + "content": "Spatial and Frequency Information Interaction. The spatial and frequency information interaction (SFII) modules and naive convolution module are used in the proposed SFSNiD. In order to prove the usefulness of the FDP, LP and BNM that contained in the SFII, ablation experiments for different sub-blocks are performed. The ablation experiment on the proposed SFII includes (i) removing the FDP, (ii) removing the LP, (iii) removing the frequency domain processing in BNM, and (iv) removing the spatial domain process in BNM. These four settings are denoted " + }, + { + "bbox": [ + 304, + 473, + 545, + 676 + ], + "type": "inline_equation", + "content": "R1" + }, + { + "bbox": [ + 304, + 473, + 545, + 676 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 473, + 545, + 676 + ], + "type": "inline_equation", + "content": "R2" + }, + { + "bbox": [ + 304, + 473, + 545, + 676 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 473, + 545, + 676 + ], + "type": "inline_equation", + "content": "R3" + }, + { + "bbox": [ + 304, + 473, + 545, + 676 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 473, + 545, + 676 + ], + "type": "inline_equation", + "content": "R4" + }, + { + "bbox": [ + 304, + 473, + 545, + 676 + ], + "type": "text", + "content": ", respectively. Table 3 shows the ablation results under different settings on the UNREAL-NH [31]. The quantitative results demonstrate that the FDP, LP and BNM all have a positive effect on the dehazing performance. Since we must control the size of the paper, visualizations of the amplitude and phase spectrums are placed in Supplementary Materials." + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "Hierarchical Training and Frequency Domain Loss. The training process of the proposed SFSNiD takes a hierarchical strategy by using differ scales " + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "inline_equation", + "content": "s \\in \\{0,1,2\\}" + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": ". Two" + } + ] + } + ], + "index": 54 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2637" + } + ] + } + ], + "index": 55 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 101, + 288, + 240 + ], + "blocks": [ + { + "bbox": [ + 47, + 71, + 287, + 94 + ], + "lines": [ + { + "bbox": [ + 47, + 71, + 287, + 94 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 287, + 94 + ], + "type": "text", + "content": "Table 2. Quantitative results on datasets generated by game engine (GTA5 and UNREAL-NH) and the real-world dataset (RWNH)." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 101, + 288, + 240 + ], + "lines": [ + { + "bbox": [ + 48, + 101, + 288, + 240 + ], + "spans": [ + { + "bbox": [ + 48, + 101, + 288, + 240 + ], + "type": "table", + "html": "
MethodsGTA5UNREAL-NHRWNH
SSIM↑PSNR↑SSIM↑PSNR↑BRISQUE ↓MUSIQ ↑
MRP0.66219.4600.46710.03919.41841.194
GD0.90030.0900.76721.20231.35933.433
OSFD0.71121.4610.4439.16920.86041.779
MSBDN0.90932.0290.82725.68038.91029.968
4KDehazing0.90330.3140.77423.08734.96533.536
AECRNet0.88826.8460.73121.56627.08437.034
DeHamer0.92832.5970.74022.44142.26926.788
FSDGN0.92332.6420.70221.73632.21635.200
DF0.91832.8560.77023.01733.67831.663
MITNet0.89931.1180.76621.86035.40431.768
Fourmer0.91731.9260.77222.79935.85031.367
Ours0.93533.7080.86225.90730.97532.120
", + "image_path": "27074d0e13feb110124c553ca04fb3512589ee57513ea2e5611a9ab135ad8862.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 50, + 251, + 107, + 321 + ], + "blocks": [ + { + "bbox": [ + 50, + 251, + 107, + 321 + ], + "lines": [ + { + "bbox": [ + 50, + 251, + 107, + 321 + ], + "spans": [ + { + "bbox": [ + 50, + 251, + 107, + 321 + ], + "type": "image", + "image_path": "5abd6dd2f674234b94412ccb616c6d25a2431d7ccf30fdec562ee30824fe8c74.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 64, + 323, + 93, + 333 + ], + "lines": [ + { + "bbox": [ + 64, + 323, + 93, + 333 + ], + "spans": [ + { + "bbox": [ + 64, + 323, + 93, + 333 + ], + "type": "text", + "content": "(a) Hazy" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 109, + 252, + 167, + 322 + ], + "blocks": [ + { + "bbox": [ + 109, + 252, + 167, + 322 + ], + "lines": [ + { + "bbox": [ + 109, + 252, + 167, + 322 + ], + "spans": [ + { + "bbox": [ + 109, + 252, + 167, + 322 + ], + "type": "image", + "image_path": "d5b9c2bafd8259ea81c2f970185c9a7295c9aafebfa0b93fa8162ad2a0b2f155.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 108, + 323, + 163, + 333 + ], + "lines": [ + { + "bbox": [ + 108, + 323, + 163, + 333 + ], + "spans": [ + { + "bbox": [ + 108, + 323, + 163, + 333 + ], + "type": "text", + "content": "(b) Pseudo Label" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 61, + 342, + 272, + 354 + ], + "lines": [ + { + "bbox": [ + 61, + 342, + 272, + 354 + ], + "spans": [ + { + "bbox": [ + 61, + 342, + 272, + 354 + ], + "type": "text", + "content": "Figure 8. Visual results under different training strategies." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 168, + 252, + 227, + 322 + ], + "blocks": [ + { + "bbox": [ + 168, + 252, + 227, + 322 + ], + "lines": [ + { + "bbox": [ + 168, + 252, + 227, + 322 + ], + "spans": [ + { + "bbox": [ + 168, + 252, + 227, + 322 + ], + "type": "image", + "image_path": "1f3fa71fcd49295d08e3ad014b56977adedbfb27397aaffff308f520a0d38012.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 173, + 323, + 219, + 333 + ], + "lines": [ + { + "bbox": [ + 173, + 323, + 219, + 333 + ], + "spans": [ + { + "bbox": [ + 173, + 323, + 219, + 333 + ], + "type": "text", + "content": "(c) Retraining" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 227, + 252, + 286, + 322 + ], + "blocks": [ + { + "bbox": [ + 227, + 252, + 286, + 322 + ], + "lines": [ + { + "bbox": [ + 227, + 252, + 286, + 322 + ], + "spans": [ + { + "bbox": [ + 227, + 252, + 286, + 322 + ], + "type": "image", + "image_path": "e09788f7f60b4aaf8ff5dff2b50bf82ff2a789d55878016703283298cce0e536.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 323, + 290, + 334 + ], + "lines": [ + { + "bbox": [ + 225, + 323, + 290, + 334 + ], + "spans": [ + { + "bbox": [ + 225, + 323, + 290, + 334 + ], + "type": "text", + "content": "(d) Retraining +LB" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "spans": [ + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "text", + "content": "ablation studies are adopted, which are denoted as (i) " + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "inline_equation", + "content": "S1" + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "inline_equation", + "content": "s \\in \\{0\\}" + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "text", + "content": ", and (ii) " + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "inline_equation", + "content": "S2" + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "inline_equation", + "content": "s \\in \\{0, 1\\}" + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "text", + "content": ". Meanwhile, in our experimental setup, the spatial domain loss " + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "inline_equation", + "content": "L_{G}" + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "text", + "content": " and the frequency domain loss " + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "inline_equation", + "content": "L_{F}" + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "text", + "content": " are applied simultaneously. To verify the effectiveness of frequency domain loss, the setting when " + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "inline_equation", + "content": "L_{F}" + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "text", + "content": " is not used is denoted as " + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "inline_equation", + "content": "S3" + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "inline_equation", + "content": "s \\in \\{0, 1, 2\\}" + }, + { + "bbox": [ + 47, + 376, + 287, + 507 + ], + "type": "text", + "content": "). Table 4 shows the ablation results under the three different settings. The quantitative results demonstrate two main conclusions. First, the hierarchical training strategy can improve the dehazing performance. Second, the loss in the frequency domain is crucial as it improves the SSIM from 0.816 to 0.862." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "spans": [ + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "text", + "content": "Retraining Strategy and Realistic Brightness Loss. To verify the effectiveness of the retraining strategy and the realistic brightness loss " + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_B" + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "text", + "content": ", the visual effects are shown in Figure 8. As shown in Figure 8-(b), the texture of the pseudo-labels is blurred due to the domain discrepancy between the synthetic and real-world data. The dehazed images obtained after retraining has unrealistic brightness as shown in Figure 8-(c). It can be seen that the best effect occurs when the retraining strategy and " + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_B" + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "text", + "content": " are used simultaneously as shown in Figure 8-(d). The BRISQUE " + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "inline_equation", + "content": "(\\downarrow)" + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "text", + "content": " and MUSIQ " + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "inline_equation", + "content": "(\\uparrow)" + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "text", + "content": " obtained for the three settings (b), (c) and (d) in Figure 8 are " + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\{33.316, 30.432\\}" + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\{34.210, 32.373\\}" + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\{30.975, 32.120\\}" + }, + { + "bbox": [ + 47, + 509, + 287, + 688 + ], + "type": "text", + "content": ", respectively. Taking a comprehensive look at the visual and quantitative evaluation results, our proposed strategy is effective." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 689, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 712 + ], + "type": "text", + "content": "Brightness intensity coefficient " + }, + { + "bbox": [ + 47, + 689, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 47, + 689, + 287, + 712 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 47, + 689, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_B" + }, + { + "bbox": [ + 47, + 689, + 287, + 712 + ], + "type": "text", + "content": ". In order to demonstrate the effectiveness of " + }, + { + "bbox": [ + 47, + 689, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 47, + 689, + 287, + 712 + ], + "type": "text", + "content": " on the real-world dehaz-" + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 312, + 71, + 539, + 156 + ], + "blocks": [ + { + "bbox": [ + 312, + 71, + 539, + 156 + ], + "lines": [ + { + "bbox": [ + 312, + 71, + 539, + 156 + ], + "spans": [ + { + "bbox": [ + 312, + 71, + 539, + 156 + ], + "type": "image", + "image_path": "fb642f7783d1d9a3e6e71a1cc8cc831c973f3d84cfa3a4471f79aa7487b8d403.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 165, + 545, + 199 + ], + "lines": [ + { + "bbox": [ + 306, + 165, + 545, + 199 + ], + "spans": [ + { + "bbox": [ + 306, + 165, + 545, + 199 + ], + "type": "text", + "content": "Figure 9. The average pixel value obtained under different " + }, + { + "bbox": [ + 306, + 165, + 545, + 199 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 306, + 165, + 545, + 199 + ], + "type": "text", + "content": ". The horizontal dashed line represents the average pixel value of real-world nighttime clear images [14]." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 309, + 230, + 544, + 263 + ], + "blocks": [ + { + "bbox": [ + 359, + 211, + 492, + 221 + ], + "lines": [ + { + "bbox": [ + 359, + 211, + 492, + 221 + ], + "spans": [ + { + "bbox": [ + 359, + 211, + 492, + 221 + ], + "type": "text", + "content": "Table 3. Ablation study on the SFII." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 230, + 544, + 263 + ], + "lines": [ + { + "bbox": [ + 309, + 230, + 544, + 263 + ], + "spans": [ + { + "bbox": [ + 309, + 230, + 544, + 263 + ], + "type": "table", + "html": "
SettingsR1R2R3R4Ours
SSIM0.8480.8580.8510.8450.862
PSNR25.35325.80825.64224.30125.907
", + "image_path": "35d5ed958ed05aee57c320eddea553e56a578f3957310eb59e31cbcb72187577.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "type": "table", + "bbox": [ + 312, + 293, + 541, + 325 + ], + "blocks": [ + { + "bbox": [ + 315, + 273, + 535, + 285 + ], + "lines": [ + { + "bbox": [ + 315, + 273, + 535, + 285 + ], + "spans": [ + { + "bbox": [ + 315, + 273, + 535, + 285 + ], + "type": "text", + "content": "Table 4. Ablation study on the scale loss and frequency loss." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 312, + 293, + 541, + 325 + ], + "lines": [ + { + "bbox": [ + 312, + 293, + 541, + 325 + ], + "spans": [ + { + "bbox": [ + 312, + 293, + 541, + 325 + ], + "type": "table", + "html": "
SettingsS1S2S3Ours
SSIM0.8540.8510.8160.862
PSNR25.60125.13424.46425.907
", + "image_path": "81a08b0e6c6d014bc1665fa4b2a38743be64642954a2ae486688570072beec46.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "text", + "content": "ing task, we manually set " + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "text", + "content": " to different values. The dehazed images and average pixel value when " + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "text", + "content": " takes different values are shown in Figure 7 and Figure 9, respectively. There are two conclusions that can be drawn. First, as " + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "text", + "content": " increases, the brightness of the dehazed image continues to decrease, which proves that " + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "text", + "content": " can control the brightness of the dehazed image. Second, when " + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "text", + "content": " equals 1.3, the average pixel value (0.225) of dehazed images is close to the average pixel value real-world nighttime clear images (0.217) [14]. Therefore, we set " + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "text", + "content": " to 1.3 as the final setting." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 475, + 378, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 475, + 378, + 488 + ], + "spans": [ + { + "bbox": [ + 306, + 475, + 378, + 488 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 495, + 545, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 495, + 545, + 626 + ], + "spans": [ + { + "bbox": [ + 304, + 495, + 545, + 626 + ], + "type": "text", + "content": "In this paper, a semi-supervised nighttime image dehazing baseline SFSNiD is proposed for real-world nighttime dehazing. A spatial and frequency domain information interaction module is proposed to handle the haze, glow, and noise with localized, coupled and frequency inconsistent characteristics. A retraining strategy and a local window-based brightness loss for semi-supervised training process are designed to suppress haze and glow while achieving realistic brightness. Experiments on public benchmarks validate the effectiveness of the proposed method and its superiority over state-of-the-art methods." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 627, + 545, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 627, + 545, + 711 + ], + "spans": [ + { + "bbox": [ + 304, + 627, + 545, + 711 + ], + "type": "text", + "content": "Acknowledgment. This work was supported in part by the grant of the National Science Foundation of China under Grant 62172090; Start-up Research Fund of Southeast University under Grant RF1028623097; CAAI-Huawei MindSpore Open Fund. We thank the Big Data Computing Center of Southeast University for providing the facility support on the numerical calculations in this paper." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2638" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "text", + "content": "[1] Cosmin Ancuti, Codruta O Ancuti, Christophe De Vleeschouwer, and Alan C Bovik. Day and night-time dehazing by local airlight estimation. IEEE Transactions on Image Processing, 29:6264-6275, 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 135, + 287, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 287, + 179 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 287, + 179 + ], + "type": "text", + "content": "[2] Xiaofeng Cong, Jie Gui, Kai-Chao Miao, Jun Zhang, Bing Wang, and Peng Chen. Discrete haze level dehazing network. In ACM International Conference on Multimedia, pages 1828-1836, 2020. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 180, + 287, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 180, + 287, + 233 + ], + "spans": [ + { + "bbox": [ + 53, + 180, + 287, + 233 + ], + "type": "text", + "content": "[3] Yuning Cui, Yi Tao, Zhenshan Bing, Wenqi Ren, Xinwei Gao, Xiaochun Cao, Kai Huang, and Alois Knoll. Selective frequency network for image restoration. In The Eleventh International Conference on Learning Representations, 2022. 3, 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 236, + 287, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 287, + 278 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 287, + 278 + ], + "type": "text", + "content": "[4] Yuekun Dai, Chongyi Li, Shangchen Zhou, Ruicheng Feng, and Chen Change Loy. Flare7k: A phenomenological nighttime flare removal dataset. Advances in Neural Information Processing Systems, 35:3926-3937, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 280, + 287, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 280, + 287, + 334 + ], + "spans": [ + { + "bbox": [ + 53, + 280, + 287, + 334 + ], + "type": "text", + "content": "[5] Hang Dong, Jinshan Pan, Lei Xiang, Zhe Hu, Xinyi Zhang, Fei Wang, and Ming-Hsuan Yang. Multi-scale boosted dehazing network with dense feature fusion. In IEEE Conference on Computer Vision and Pattern Recognition, pages 2157-2167, 2020. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 335, + 287, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 335, + 287, + 378 + ], + "spans": [ + { + "bbox": [ + 53, + 335, + 287, + 378 + ], + "type": "text", + "content": "[6] Matteo Frigo and Steven G Johnson. Fftw: An adaptive software architecture for the fft. In IEEE International Conference on Acoustics, Speech and Signal Processing, pages 1381-1384, 1998. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 380, + 287, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 380, + 287, + 424 + ], + "spans": [ + { + "bbox": [ + 53, + 380, + 287, + 424 + ], + "type": "text", + "content": "[7] Jie Gui, Xiaofeng Cong, Yuan Cao, Wenqi Ren, Jun Zhang, Jing Zhang, Jiuxin Cao, and Dacheng Tao. A comprehensive survey and taxonomy on single image dehazing based on deep learning. ACM Computing Surveys, 2023. 1, 2, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 425, + 287, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 425, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 53, + 425, + 287, + 479 + ], + "type": "text", + "content": "[8] Chunle Guo, Chongyi Li, Jichang Guo, Chen Change Loy, Junhui Hou, Sam Kwong, and Runmin Cong. Zero-reference deep curve estimation for low-light image enhancement. In IEEE Conference on Computer Vision and Pattern Recognition, pages 1780-1789, 2020. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 480, + 287, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 480, + 287, + 534 + ], + "spans": [ + { + "bbox": [ + 53, + 480, + 287, + 534 + ], + "type": "text", + "content": "[9] Chun-Le Guo, Qixin Yan, Saeed Anwar, Runmin Cong, Wenqi Ren, and Chongyi Li. Image dehazing transformer with transmission-aware 3d position embedding. In IEEE Conference on Computer Vision and Pattern Recognition, pages 5812-5820, 2022. 2, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 536, + 287, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 536, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 48, + 536, + 287, + 579 + ], + "type": "text", + "content": "[10] Xin Guo, Xueyang Fu, Man Zhou, Zhen Huang, Jialun Peng, and Zheng-Jun Zha. Exploring fourier prior for single image rain removal. In International Joint Conferences on Artificial Intelligence, pages 935–941, 2022. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 580, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 580, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 580, + 287, + 624 + ], + "type": "text", + "content": "[11] Junming Hou, Qi Cao, Ran Ran, Che Liu, Junling Li, and Liang-jian Deng. Bidomain modeling paradigm for pan-sharpening. In ACM International Conference on Multimedia, pages 347-357, 2023. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 624, + 287, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 657 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 657 + ], + "type": "text", + "content": "[12] Jie Hu, Li Shen, and Gang Sun. Squeeze-and-excitation networks. In IEEE Conference on Computer Vision and Pattern Recognition, pages 7132-7141, 2018. 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "text", + "content": "[13] Shirui Huang, Keyan Wang, Huan Liu, Jun Chen, and Yun-song Li. Contrastive semi-supervised learning for underwater image restoration via reliable bank. In IEEE Conference on Computer Vision and Pattern Recognition, pages 18145-18155, 2023. 6" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "type": "text", + "content": "[14] Yeying Jin, Beibei Lin, Wending Yan, Wei Ye, Yuan Yuan, and Robby T Tan. Enhancing visibility in nighttime haze images using guided apsf and gradient adaptive convolution. In ACM International Conference on Multimedia, 2023. 1, 2, 3, 5, 6, 7, 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 129, + 545, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 171 + ], + "type": "text", + "content": "[15] Mingye Ju, Can Ding, Charles A Guo, Wenqi Ren, and Dacheng Tao. Idrlp: Image dehazing using region line prior. IEEE Transactions on Image Processing, 30:9043-9057, 2021. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 172, + 545, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 172, + 545, + 216 + ], + "spans": [ + { + "bbox": [ + 307, + 172, + 545, + 216 + ], + "type": "text", + "content": "[16] Mingye Ju, Can Ding, Wenqi Ren, Yi Yang, Dengyin Zhang, and Y Jay Guo. Ide: Image dehazing and exposure using an enhanced atmospheric scattering model. IEEE Transactions on Image Processing, 30:2180-2192, 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 217, + 545, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 217, + 545, + 258 + ], + "spans": [ + { + "bbox": [ + 307, + 217, + 545, + 258 + ], + "type": "text", + "content": "[17] Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. Musiq: Multi-scale image quality transformer. In IEEE International Conference on Computer Vision, pages 5148-5157, 2021. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 260, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 260, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 307, + 260, + 545, + 293 + ], + "type": "text", + "content": "[18] Beomhyuk Koo and Gyeonghwan Kim. Nighttime haze removal with glow decomposition using gan. In Pattern Recognition: 5th Asian Conference, pages 807-820, 2020. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 294, + 545, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 294, + 545, + 337 + ], + "spans": [ + { + "bbox": [ + 307, + 294, + 545, + 337 + ], + "type": "text", + "content": "[19] Shiba Kuanar, Dwarikanath Mahapatra, Monalisa Bilas, and KR Rao. Multi-path dilated convolution network for haze and glow removal in nighttime images. The Visual Computer, pages 1-14, 2022. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 338, + 545, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 338, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 307, + 338, + 545, + 380 + ], + "type": "text", + "content": "[20] Boyi Li, Xiulian Peng, Zhangyang Wang, Jizheng Xu, and Dan Feng. Aod-net: All-in-one dehazing network. In IEEE International Conference on Computer Vision, pages 4770-4778, 2017. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 382, + 545, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 382, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 307, + 382, + 545, + 425 + ], + "type": "text", + "content": "[21] Boyi Li, Wenqi Ren, Dengpan Fu, Dacheng Tao, Dan Feng, Wenjun Zeng, and Zhangyang Wang. Benchmarking single-image dehazing and beyond. IEEE Transactions on Image Processing, 28(1):492-505, 2018. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 426, + 545, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 426, + 545, + 469 + ], + "spans": [ + { + "bbox": [ + 307, + 426, + 545, + 469 + ], + "type": "text", + "content": "[22] Chongyi Li, Chun-Le Guo, Man Zhou, Zhexin Liang, Shangchen Zhou, Ruicheng Feng, and Chen Change Loy. Embedding fourier for ultra-high-definition low-light image enhancement. arXiv preprint arXiv:2302.11831, 2023. 2, 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 471, + 545, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 471, + 545, + 503 + ], + "spans": [ + { + "bbox": [ + 307, + 471, + 545, + 503 + ], + "type": "text", + "content": "[23] Kun Li, Dan Guo, and Meng Wang. Proposal-free video grounding with contextual pyramid network. In AAAI Conference on Artificial Intelligence, pages 1902-1910, 2021. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 504, + 545, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 504, + 545, + 546 + ], + "spans": [ + { + "bbox": [ + 307, + 504, + 545, + 546 + ], + "type": "text", + "content": "[24] Yu Li, Robby T Tan, and Michael S Brown. Nighttime haze removal with glow and multiple light colors. In IEEE International Conference on Computer Vision, pages 226-234, 2015. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 548, + 545, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 548, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 307, + 548, + 545, + 591 + ], + "type": "text", + "content": "[25] Yudong Liang, Bin Wang, Wangmeng Zuo, Jiaying Liu, and Wenqi Ren. Self-supervised learning and adaptation for single image dehazing. In International Joint Conference on Artificial Intelligence, pages 1-15, 2022. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 592, + 545, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 592, + 545, + 634 + ], + "spans": [ + { + "bbox": [ + 307, + 592, + 545, + 634 + ], + "type": "text", + "content": "[26] Yinghong Liao, Zhuo Su, Xiangguo Liang, and Bin Qiu. Hdp-net: Haze density prediction network for nighttime de-hazing. In Pacific Rim Conference on Multimedia, pages 469-480, 2018. 1, 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 635, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 545, + 679 + ], + "type": "text", + "content": "[27] Xiaohong Liu, Yongrui Ma, Zhihao Shi, and Jun Chen. Griddehazenet: Attention-based multi-scale network for image dehazing. In IEEE International Conference on Computer Vision, pages 7314-7323, 2019. 1, 6" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 681, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 545, + 712 + ], + "type": "text", + "content": "[28] Yun Liu, Anzhi Wang, Hao Zhou, and Pengfei Jia. Single nighttime image dehazing based on image decomposition. Signal Processing, 183:107986, 2021. 1" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2639" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[29] Yun Liu, Zhongsheng Yan, Jinge Tan, and Yuche Li. Multipurpose oriented single nighttime image haze removal based on unified variational retina model. IEEE Transactions on Circuits and Systems for Video Technology, 33(4):1643-1657, 2022. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 128, + 287, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 128, + 287, + 172 + ], + "spans": [ + { + "bbox": [ + 48, + 128, + 287, + 172 + ], + "type": "text", + "content": "[30] Yun Liu, Zhongsheng Yan, Aimin Wu, and Tian Ye. Night-time image dehazing based on variational decomposition model. In IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 640-649, 2022. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 172, + 287, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 172, + 287, + 216 + ], + "spans": [ + { + "bbox": [ + 48, + 172, + 287, + 216 + ], + "type": "text", + "content": "[31] Yun Liu, Zhongsheng Yan, Sixiang Chen, Tian Ye, Wenqi Ren, and Erkang Chen. Nighthazeformer: Single nighttime haze removal using prior query transformer. In ACM International Conference on Multimedia, 2023. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 217, + 287, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 217, + 287, + 270 + ], + "spans": [ + { + "bbox": [ + 48, + 217, + 287, + 270 + ], + "type": "text", + "content": "[32] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In IEEE International Conference on Computer Vision, pages 10012-10022, 2021. 2, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 271, + 287, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 271, + 287, + 325 + ], + "spans": [ + { + "bbox": [ + 48, + 271, + 287, + 325 + ], + "type": "text", + "content": "[33] Wenqi Ren, Sifei Liu, Lin Ma, Qianqian Xu, Xiangyu Xu, Xiaochun Cao, Junping Du, and Ming-Hsuan Yang. Low-light image enhancement via a deep hybrid network. IEEE Transactions on Image Processing, 28(9):4364-4375, 2019. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 326, + 287, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 326, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 48, + 326, + 287, + 370 + ], + "type": "text", + "content": "[34] Wenqi Ren, Jinshan Pan, Hua Zhang, Xiaochun Cao, and Ming-Hsuan Yang. Single image dehazing via multi-scale convolutional neural networks with holistic edges. International Journal of Computer Vision, 128:240-259, 2020. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 371, + 287, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 371, + 287, + 414 + ], + "spans": [ + { + "bbox": [ + 48, + 371, + 287, + 414 + ], + "type": "text", + "content": "[35] Yuanjie Shao, Lerenhan Li, Wenqi Ren, Changxin Gao, and Nong Sang. Domain adaptation for image dehazing. In IEEE Conference on Computer Vision and Pattern Recognition, pages 2808-2817, 2020. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 415, + 287, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 415, + 287, + 448 + ], + "spans": [ + { + "bbox": [ + 48, + 415, + 287, + 448 + ], + "type": "text", + "content": "[36] Hao Shen, Zhong-Qiu Zhao, and Wandi Zhang. Adaptive dynamic filtering network for image denoising. In AAAI Conference on Artificial Intelligence, pages 2227-2235, 2023. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 449, + 287, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 449, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 48, + 449, + 287, + 491 + ], + "type": "text", + "content": "[37] Hao Shen, Zhong-Qiu Zhao, Yulun Zhang, and Zhao Zhang. Mutual information-driven triple interaction network for efficient image dehazing. In ACM International Conference on Multimedia, pages 7-16, 2023. 2, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 492, + 287, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 492, + 287, + 524 + ], + "spans": [ + { + "bbox": [ + 48, + 492, + 287, + 524 + ], + "type": "text", + "content": "[38] Yuda Song, Zhuqing He, Hui Qian, and Xin Du. Vision transformers for single image dehazing. IEEE TIP, 32:1927-1941, 2023. 1, 2, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 525, + 287, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 525, + 287, + 558 + ], + "spans": [ + { + "bbox": [ + 48, + 525, + 287, + 558 + ], + "type": "text", + "content": "[39] Shangquan Sun, Wenqi Ren, and Tao Wang. Rethinking image restoration for object detection. Advances in Neural Information Processing Systems, 35:4461-4474, 2022. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 559, + 287, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 287, + 592 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 287, + 592 + ], + "type": "text", + "content": "[40] Fei Wang, Dan Guo, and Kun Li. Eulermormer: Robust eulerian motion magnification via dynamic filtering within transformer. arXiv preprint arXiv:2312.04152, 2023. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 592, + 287, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 592, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 48, + 592, + 287, + 635 + ], + "type": "text", + "content": "[41] Wenhui Wang, Anna Wang, and Chen Liu. Variational single nighttime image haze removal with a gray haze-line prior. IEEE Transactions on Image Processing, 31:1349-1363, 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 635, + 287, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 679 + ], + "type": "text", + "content": "[42] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "text", + "content": "[43] Haiyan Wu, Yanyun Qu, Shaohui Lin, Jian Zhou, Ruizhi Qiao, Zhizhong Zhang, Yuan Xie, and Lizhuang Ma. Contrastive learning for compact single image dehazing. In IEEE" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 700 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "Conference on Computer Vision and Pattern Recognition, pages 10551-10560, 2021. 1, 2, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 95, + 545, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 95, + 545, + 149 + ], + "spans": [ + { + "bbox": [ + 307, + 95, + 545, + 149 + ], + "type": "text", + "content": "[44] Rui-Qi Wu, Zheng-Peng Duan, Chun-Le Guo, Zhi Chai, and Chongyi Li. Ridcp: Revitalizing real image dehazing via high-quality codebook priors. In IEEE Conference on Computer Vision and Pattern Recognition, pages 22282-22291, 2023. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 152, + 545, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 195 + ], + "type": "text", + "content": "[45] Wending Yan, Robby T Tan, and Dengxin Dai. Night-time defogging using high-low frequency decomposition and grayscale-color networks. In European Conference on Computer Vision, pages 473-488, 2020. 3, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 197, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 545, + 228 + ], + "type": "text", + "content": "[46] Minmin Yang, Jianchang Liu, and Zhengguo Li. Superpixel-based single nighttime image haze removal. IEEE Transactions on Multimedia, 20(11):3008-3018, 2018. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 230, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 230, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 545, + 274 + ], + "type": "text", + "content": "[47] Hu Yu, Naishan Zheng, Man Zhou, Jie Huang, Zeyu Xiao, and Feng Zhao. Frequency and spatial dual guidance for image dehazing. In European Conference on Computer Vision, pages 181-198, 2022. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 275, + 545, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 275, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 275, + 545, + 308 + ], + "type": "text", + "content": "[48] Jing Zhang and Dacheng Tao. Famed-net: A fast and accurate multi-scale end-to-end dehazing network. IEEE Transactions on Image Processing, 29:72-84, 2019. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 309, + 545, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 309, + 545, + 352 + ], + "spans": [ + { + "bbox": [ + 307, + 309, + 545, + 352 + ], + "type": "text", + "content": "[49] Jing Zhang, Yang Cao, and Zengfu Wang. Nighttime haze removal based on a new imaging model. In IEEE International Conference on Image Processing, pages 4557-4561, 2014. 1, 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 354, + 545, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 545, + 408 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 545, + 408 + ], + "type": "text", + "content": "[50] Jing Zhang, Yang Cao, Shuai Fang, Yu Kang, and Chang Wen Chen. Fast haze removal for nighttime image using maximum reflectance prior. In IEEE Conference on Computer Vision and Pattern Recognition, pages 7418-7426, 2017. 1, 3, 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 410, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 410, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 410, + 545, + 453 + ], + "type": "text", + "content": "[51] Jing Zhang, Yang Cao, Zheng-Jun Zha, and Dacheng Tao. Nighttime dehazing with a synthetic benchmark. In ACM International Conference on Multimedia, pages 2355-2363, 2020. 1, 3, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 455, + 545, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 455, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 307, + 455, + 545, + 498 + ], + "type": "text", + "content": "[52] Jingang Zhang, Wenqi Ren, Shengdong Zhang, He Zhang, Yunfeng Nie, Zhe Xue, and Xiaochun Cao. Hierarchical density-aware dehazing network. IEEE Transactions on Cybernetics, 52(10):11187-11199, 2021. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 499, + 545, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 499, + 545, + 553 + ], + "spans": [ + { + "bbox": [ + 307, + 499, + 545, + 553 + ], + "type": "text", + "content": "[53] Shengdong Zhang, Wenqi Ren, Xin Tan, Zhi-Jie Wang, Yong Liu, Jingang Zhang, Xiaoqin Zhang, and Xiaochun Cao. Semantic-aware dehazing network with adaptive feature fusion. IEEE Transactions on Cybernetics, 53(1):454-467, 2021. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 555, + 545, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 555, + 545, + 609 + ], + "spans": [ + { + "bbox": [ + 307, + 555, + 545, + 609 + ], + "type": "text", + "content": "[54] Zhuoran Zheng, Wenqi Ren, Xiaochun Cao, Xiaobin Hu, Tao Wang, Fenglong Song, and Xiuyi Jia. Ultra-high-definition image dehazing via multi-guided bilateral learning. In IEEE Conference on Computer Vision and Pattern Recognition, pages 16180-16189, 2021. 1, 2, 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 611, + 545, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 611, + 545, + 654 + ], + "spans": [ + { + "bbox": [ + 307, + 611, + 545, + 654 + ], + "type": "text", + "content": "[55] Man Zhou, Jie Huang, Chun-Le Guo, and Chongyi Li. FOurmer: an efficient global modeling paradigm for image restoration. In International Conference on Machine Learning, pages 42589-42601, 2023. 4, 6" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 656, + 545, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 656, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 307, + 656, + 545, + 700 + ], + "type": "text", + "content": "[56] Man Zhou, Keyu Yan, Xueyang Fu, Aiping Liu, and Chengjun Xie. Pan-guided band-aware multi-spectral feature enhancement for pan-sharpening. IEEE Transactions on Computational Imaging, 9:238-249, 2023. 4" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "2640" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/edb8cbac-0e71-45cd-8cdb-45284f946ab7_content_list.json b/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/edb8cbac-0e71-45cd-8cdb-45284f946ab7_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..28b66c674f063b5fe32dc3dd8a707b8ec84d2143 --- /dev/null +++ b/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/edb8cbac-0e71-45cd-8cdb-45284f946ab7_content_list.json @@ -0,0 +1,1592 @@ +[ + { + "type": "text", + "text": "A Simple Baseline for Efficient Hand Mesh Reconstruction", + "text_level": 1, + "bbox": [ + 187, + 130, + 781, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhishan Zhou*, Shihao Zhou*, Zhi Lv, Minqiang Zou, Yao Tang, Jiajun Liang† \nJiiov Technology", + "bbox": [ + 166, + 179, + 802, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{zhishan.zhou, shihao.zhou, zhi.lv, minqiang.zou, yao.tang, jiajun.liang}@jiiov.com", + "bbox": [ + 114, + 219, + 854, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "http://simplehand.github.io", + "bbox": [ + 372, + 252, + 589, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 264, + 304, + 341, + 320 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hand mesh reconstruction has attracted considerable attention in recent years, with various approaches and techniques being proposed. Some of these methods incorporate complex components and designs, which, while effective, may complicate the model and hinder efficiency. In this paper, we decompose the mesh decoder into token generator and mesh regressor. Through extensive ablation experiments, we found that the token generator should select discriminating and representative points, while the mesh regressor needs to upsample sparse keypoints into dense meshes in multiple stages. Given these functionalities, we can achieve high performance with minimal computational resources. Based on this observation, we propose a simple yet effective baseline that outperforms state-of-the-art methods by a large margin, while maintaining real-time efficiency. Our method outperforms existing solutions, achieving state-of-the-art (SOTA) results across multiple datasets. On the FreiHAND dataset, our approach produced a PA-MPJPE of 5.8mm and a PA-MPVPE of 6.1mm. Similarly, on the DexYCB dataset, we observed a PA-MPJPE of 5.5mm and a PA-MPVPE of 5.5mm. As for performance speed, our method reached up to 33 frames per second (fps) when using HRNet and up to 70 fps when employing FastViT-MA36. Code will be made available.", + "bbox": [ + 76, + 335, + 473, + 698 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 728, + 209, + 744 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The field of hand mesh reconstruction has seen rapid advancements, with various types of mesh decoders being proposed. Despite their commendable performance, these methods often suffer from high system complexity, involving unnecessary components that may hinder efficiency. To facilitate a clear discussion, we decompose the mesh decoder into two primary components: the token generator and the mesh regressor.", + "bbox": [ + 75, + 753, + 468, + 876 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3ee37e25ed7c7f5afd0b7a6936538d9ed086bbbeea4e3a8ac57f87270d61a929.jpg", + "image_caption": [ + "Figure 1. Trade-off between Accuracy and Inference Speed. Our technique surpasses non-real-time methods $(\\leq 40$ fps) in both speed and precision. Compared to real-time methods $(\\geq 70$ fps), it offers a substantial boost in accuracy while preserving comparable speeds. For fair comparison, all speed evaluations were conducted on a 2080ti GPU with a batch size of one." + ], + "image_footnote": [], + "bbox": [ + 500, + 311, + 898, + 511 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The token generator serves a crucial role by integrating prior information with image features to extract task-specific features. For instance, FastMETRO [5] employs a strategy to predict weak-perspective camera parameters, which aggregates image features. MobRecon [4] develops a stacked encoding network to obtain gradually refined encoding features, and applies a technique known as pose pooling to suppress features that are unrelated to joint landmarks. PointHMR [7] on the other hand, proposes to use features sampled at positions of vertices projected from 3D to 2D spaces as intermediate guidance. These approaches collectively provide informative and discriminating features that enhance the overall performance of the system.", + "bbox": [ + 496, + 640, + 893, + 837 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The mesh regressor, the second component, decodes the tokenized features obtained from the token generator into mesh predictions. FastMETRO [5] takes a set of learnable joint tokens and vertex tokens as input and masks", + "bbox": [ + 496, + 839, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equally contribution. † Corresponding author.", + "bbox": [ + 106, + 886, + 362, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1367", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "self-attention of non-adjacent vertices according to the topology of the triangle mesh during training. MobRecon [4] employs a strategy of 2D-to-3D lifting and Pose-to-vertex lifting to gradually approximate meshes. MeshGraphormer [11] uses a coarse template mesh for positional encoding and then applies a linear Multi-Layer Perceptron (MLP) to sample the coarse mesh up to the original resolution. These methods aim to alleviate training difficulties due to heterogeneous modalities.", + "bbox": [ + 75, + 90, + 472, + 227 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Through investigation on existing methods, we found a interesting phenomenon that, although some methods shares same performance, they differ in specific failure cases. Namely, methods with coarse sampling strategy lack perceptual ability for fine-grained gestures such as pinch. Methods with limited upsample layers struggles in generating reasonable hand shapes. This observation prompts us to question: How different structures make effect on mesh decoder? By answering the question, we can streamline the process, eliminating excessive computation and complex components, to complete mesh prediction in a simple and efficient way. To design concise experiments, we start from the simplest structure for the aforementioned two modules, then gradually add and optimize the most commonly used components abstracted from the state-of-the-art (SOTA) methods.", + "bbox": [ + 75, + 228, + 472, + 470 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Through extensive ablation experiments, we discovered that the important structure of token generator is to sample discriminating and representative points, while the important structure of mesh generator is to upsample sparse keypoints into dense meshes. For implicitly, in the following paper, we define each of these structure as core structure. In the model design process, provided that the core structure's functionality is fulfilled, high performance can be achieved with minimal computational resources.", + "bbox": [ + 75, + 470, + 472, + 607 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Based on these observations, we propose a simple baseline that surpasses the SOTA methods by a significant margin and is computationally efficient. Referring to Figure 1, our proposed technique delivers state-of-the-art performance on various datasets. On the FreiHAND [28] dataset, it recorded a PA-MPJPE of $5.8\\mathrm{mm}$ and PA-MPVPE of $6.1\\mathrm{mm}$ . When tested on the DexYCB [1] dataset, these metrics were further refined to a PA-MPJPE of $5.5\\mathrm{mm}$ and a PA-MPVPE of $5.5\\mathrm{mm}$ . Our method is also advantaged in efficiency, achieving 33 frames per second (fps) on HRNet[23] and an impressive 70 fps on FastViTMA36 [21]. Our contributions can be summarized as follows:", + "bbox": [ + 75, + 608, + 472, + 806 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. We abstract existing methods into token generator and mesh regressor modules, and reveal the core structures of these two modules respectively.", + "2. Based on these core structures, we developed a streamlined, real-time hand mesh regression module that excels in both efficiency and accuracy." + ], + "bbox": [ + 76, + 809, + 468, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Our method has achieved PA-MPJPE of $5.7\\mathrm{mm}$ and PA-MPVPE of $6.0\\mathrm{mm}$ on FreiHAND, and achieved SOTA results on multiple datasets, demonstrating its effectiveness and generalizability.", + "bbox": [ + 500, + 90, + 890, + 152 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 164, + 640, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we briefly review existing methods of hand mesh reconstruction which usually include two main components: a token generator and a mesh regressor. Token generator processes the backbone image feature and generates tokens fed to the decoder. Mesh regressor decodes the input tokens into 3D mesh directly or parametric hand model coefficient.", + "bbox": [ + 496, + 190, + 893, + 297 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Hand Mesh Reconstruction", + "text_level": 1, + "bbox": [ + 500, + 303, + 746, + 319 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Estimating the 3D hand mesh from a single image has been widely researched. [27] proposes an end-to-end framework to recover hand mesh from a monocular RGB image. They use the 2D heatmap as input tokens and fully convolutional and fully connected layers to regress the MANO [17] parameters.", + "bbox": [ + 496, + 325, + 890, + 416 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Transformer [22] has shown powerful performance in language and vision tasks which could model long range relation among input tokens. MetaFormer [26] argues that the general architecture of transformers instead of the specific token mixer is the key player. They replace the self-attention module with a simple spatial pooling operator and achieve competitive performance with fewer parameters and less computation. METRO [11] extracts a single global image feature with a convolutional neural network and performs position encoding by repeatedly concatenating the image feature with 3D coordinates of a mesh template. A multi-layer transformer encoder with progressive dimensionality reduction regresses the 3D coordinates of mesh vertices with these input tokens. Due to the constraints of memory and computation, the transformer only processes a coarse mesh by sub-sampling twice with a sampling algorithm [16], and Multi-Layer Perceptrons (MLPs) are then used to upsample the coarse mesh to the original mesh.", + "bbox": [ + 496, + 417, + 892, + 702 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Graph convolutional neural network(GCNN) [9] is good at modeling the local interaction between neighbor vertices, thus it is very appropriate for mesh reconstruction. Pose2Mesh [6] designs a cascaded architecture to regress 3D mesh vertices from 2D pose directly using GCNN. MeshGraphormer [11] combines the ability of transformer and GCNN presenting a graph-convolution-reinforced transformer to model both local and global interactions.", + "bbox": [ + 496, + 704, + 893, + 839 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Instead of extracting a global feature from the input image, pointHMR [7] argues that sampling features guided by vertex-relevant points could better utilize the correspondence between encoded features and spatial positions. They", + "bbox": [ + 496, + 839, + 893, + 901 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "1368", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6a3720ecd9fe43273f61ce6d6529d5125c1f798c85e7f5a280401577a18d3ca2.jpg", + "image_caption": [ + "A" + ], + "image_footnote": [], + "bbox": [ + 76, + 85, + 261, + 141 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6d35ca7d1d67629082f9ac3149aebeabbaae6697081343addc14ff7b7c145b08.jpg", + "image_caption": [ + "B" + ], + "image_footnote": [], + "bbox": [ + 295, + 87, + 475, + 141 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/1607683d64c667f87876a60767c40cc27dbe78cf9ce6feb39a211de33c49f849.jpg", + "image_caption": [ + "C", + "Image Feature", + "Convolutions" + ], + "image_footnote": [], + "bbox": [ + 504, + 87, + 767, + 161 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f9c6a8993b853348f669b0126f42922d74f944f22459f59dd032e35c64de2986.jpg", + "image_caption": [ + "D" + ], + "image_footnote": [], + "bbox": [ + 78, + 199, + 254, + 277 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/b488f650baf99856152306e24e45ca446d0354ef0e2de6103a86430e7531656b.jpg", + "image_caption": [ + "E" + ], + "image_footnote": [], + "bbox": [ + 297, + 200, + 475, + 277 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/b5023cd8a8b274462102be0957e2ea11211cafea6c3895238bccf03e059cc838.jpg", + "image_caption": [ + "F", + "Figure 2. An illustration demonstrates various designs of token generators. The grids colored in red represent the sampled points. a) Global feature; b) Grid sampling; c) Keypoint-guided sampling on the original feature map; d) Keypoint-guided sampling with $4\\mathrm{x}$ upsampling, resulting in an enhanced feature; e) Keypoint-guided sampling with $4\\mathrm{x}$ upsampling, where the feature is further improved by convolution; f) Coarse-mesh-guided point sampling with $4\\mathrm{x}$ upsampling." + ], + "image_footnote": [], + "bbox": [ + 504, + 200, + 681, + 277 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/90895b0a416b190f4fd1725853a12cb7b05cf084bb787e60a120cdb1938d299f.jpg", + "image_caption": [ + "Coarse Mesh Predictions" + ], + "image_footnote": [], + "bbox": [ + 700, + 178, + 883, + 199 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "conduct feature sampling by element-wise multiplication of image feature and 2D heatmap trained by projection of 3D mesh vertices. These sampled features are then fed into the transformer encoder with progressive attention mask as the form of vertex token. The progressively decreased local connection range realized by constraining the attention mask encourage the model to consider the local relationship between neighbor vertices. They also use linear projection to reduce the dimension of the encoded token and upsampling algorithm [16] to expand the sparse vertices into original dense vertices.", + "bbox": [ + 75, + 401, + 472, + 566 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2.Lightweight Networks", + "text_level": 1, + "bbox": [ + 76, + 578, + 284, + 594 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To achieve real-time hand mesh reconstruction, many lightweight networks have been studied for years. FastViT [21] is a hybrid vision transformer architecture which obtains state-of-the-art latency-accuracy tradeoff by structural reparameterization and train-time overparametrization techniques. MobRecon [4] designs multiple complicated modules to improve efficiency, including a stacked 2D encoding structure, a map-based position regression 2D-to-3D block and a graph operator based on spiral sampling [10]. FastMETRO [5] identifies the performance bottleneck of encoder-based transformers is caused by token design. They propose an encoder-decoder architecture to disentangle the interaction among input tokens which reduces the parameter.", + "bbox": [ + 75, + 602, + 468, + 815 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 829, + 166, + 844 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In our research, we dissected the existing methods into two key components: a token generator and a mesh regressor. However, defining the optimal core structure for each of", + "bbox": [ + 75, + 854, + 470, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "these modules remains a challenging task. For each module, we start with a fundamental, intuitive structure, and then progressively incorporate the most commonly used components, which we have abstracted from state-of-the-art (SOTA) methods.", + "bbox": [ + 496, + 401, + 892, + 476 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given that these two modules, the token generator and the mesh regressor, operate in tandem, it's important to keep one constant when analysing the other. In practical terms, we first conduct experiments on the mesh regressor while keeping the token generator, as depicted in Figure 2-B, constant. Then, we apply the mesh regressor configuration that demonstrated the best performance to the token generator in subsequent experiments.", + "bbox": [ + 496, + 478, + 892, + 599 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Token Generator", + "text_level": 1, + "bbox": [ + 500, + 612, + 668, + 627 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a single image of dimensions $\\{H,W\\}$ , our model utilizes a backbone to extract image features $X_{b}^{\\in \\frac{H}{32} \\times \\frac{W}{32} \\times C}$ . The token generator $T$ takes $X_{b}$ as input and produces tokenized mesh feature $X_{m}^{\\in N \\times C}$ , where $N$ denotes the number of sampled points. Thus, we can express this as $X_{m} = T(X_{b})$ .", + "bbox": [ + 496, + 636, + 890, + 733 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Starting with the simplest implementation, we apply a single spatial pooling (Figure 2-A). This approach establishes a surprisingly competitive baseline, comparable to the Fastmetro [5]. Changing spatial pooling to point sample (Figure 2-B) improves the performance. To further improve the quality of the feature, we follow the MobRecon [4] model and conduct keypoint-guided point sampling (Figure 2-C). However, this modification did not yield any noticeable improvement.", + "bbox": [ + 496, + 733, + 892, + 869 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Upon visual inspection, it appears that a $7 \\times 7$ resolution is not sufficiently discriminating. Consequently, we apply", + "bbox": [ + 498, + 869, + 890, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "1369", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "deconvolution on $X_{b}$ to sample the feature map to $14 \\times 14$ then $28 \\times 28$ (Figure 2-D), respectively. This approach results in progressive improvement, but it does not work for $8 \\times$ deconvolution or larger.", + "bbox": [ + 76, + 90, + 468, + 151 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Models such as MobRecon [4] and PointHMR [7] report improvements by enhancing features, for example, using a FPN-like structure or stacked blocks. In our study, we tested different $4 \\times$ upsample schemes, including double $2 \\times$ upsampling, directly $4 \\times$ upsampling, and adding more convolution layers during the upsampling process (Figure 2-E). Although these schemes vary in computational complexity, their performance remains consistent.", + "bbox": [ + 75, + 152, + 467, + 272 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We also tested the coarse mesh sampling method proposed by FastMETRO [5]. This method (Figure 2-F) generates denser points compared to keypoint-guided sampling but does not offer any significant advantages. Detailed results are shown in table 5. These experiments suggest that keypoint-guided point sampling at an appropriate resolution is a crucial structure for the token generator. As such, feature enhancement and exhaustive point sampling are not as necessary as initially thought.", + "bbox": [ + 75, + 273, + 467, + 411 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Mesh Regressor", + "text_level": 1, + "bbox": [ + 76, + 422, + 235, + 438 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The mesh regressor $R$ takes the tokenized mesh feature $X_{m}^{\\in N\\times C}$ as input and outputs predicted meshes Figure 3. [5] [4] adopts a multi-stage approximation approach and proposes various methods to formulate the topology relationship between joints and mesh. Finding their intersection components, we construct a cascading upsampling mesh regressor $R$ using a series of decoder layers:", + "bbox": [ + 76, + 446, + 467, + 551 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nR = H _ {k} H _ {k - 1} \\dots H _ {0} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 568, + 467, + 583 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Each decoder layer $H_{k}$ takes the calculated tokens $T_{k}$ as input, then subsequently processes these using a dimension reduce layer, metaformer, and upsample layer:", + "bbox": [ + 75, + 598, + 467, + 643 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nH _ {k} \\left(X _ {k}\\right) = U _ {k} \\left(M F _ {k} \\left(P _ {k} \\left(X _ {k}\\right)\\right)\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 165, + 657, + 467, + 672 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $U_{k}$ , $P_{k}$ denotes the $k_{th}$ upsample layer and dimension reduce layer respectively, each composed of a single-layer MLP. The upsample layer increases token numbers, while the dimension reduce layer modifies channel shapes. $MF_{k}$ denotes the $k_{th}$ metaformer block, $T_{k}$ is the $k_{th}$ output token where $X_{k + 1} = H_{k}(X_{k})$ , $X_0 = X_m$ .", + "bbox": [ + 76, + 686, + 467, + 777 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Let $d_{k}$ be the output dimension of $U_{k}$ and token numbers of $MF_{k}$ , $n_{k}, c_{k}$ and $m_{k}$ are the block number, tokenmixer and block dimensions for $MF_{k}$ . We start with the first layer $MF_{0}$ to demonstrate its operation. For the $N \\times C$ shaped tensor $T_{0}$ , $P_{0}$ projects it to $N \\times c$ , which is then processed by $MF_{0}$ and outputs a tensor of the same shape. Subsequently, $U_{k}$ upsamples it to $d \\times c$ . The following decoder layers repeat this procedure to output $X_{k}$ .", + "bbox": [ + 75, + 780, + 467, + 900 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/20bc45f111a15b135439c8fb106e664e46cf845dd2b2e73932e6dacaf2d9fa1f.jpg", + "image_caption": [ + "Figure 3. Architecture of decoder layer in mesh regressor. It is composed of sequentially connected dimension reduce layer, metaformer block and upsample layer." + ], + "image_footnote": [], + "bbox": [ + 504, + 90, + 885, + 238 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We began from a baseline where $\\{k = 1, n = \\{1\\}, d = \\{778\\}, m = \\{\\text{identity}\\}\\}$ , which yielded competitive performance despite its simplicity. We then increase flops by enlarge $n$ but observe no improvement. Inspired by [4], We sequentially add blocks with an increasing value of $d$ . When $k \\leq 3$ , Significant performance improvements are observed. However, as $k$ continues to increase beyond this point, no further gains are detected. Moreover, Modifying the token mixer from ide to attn also beneficial. However, for fixed $d$ , simply increasing $n$ did not improve performance. According to our experimental findings, the core function of each decoder layer is to incrementally elevate the number of tokens from an initial quantity of 21 up to 778. Additional strategies like augmenting computational workload or altering intricate specifics of the network appear to have minimal impact. In our best practice, parameters were set to $\\{k = 3, n = \\{1, 1, 1\\}, d = \\{21, 84, 336\\}, m = \\{\\text{attn}, \\text{attn}, \\text{attn}\\}\\}$ .", + "bbox": [ + 498, + 321, + 890, + 594 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Existing hand joints and mesh topology modulation approaches stand out due to their ability to incorporate spatial information. However, their heuristic design is heavily reliant on hyperparameters and can be labor-intensive. Recognizing these strengths, we propose a novel method that modulates spatial relations without requiring manual design or additional computational resources. We achieve this by introducing learnable position embedding parameters $emb_{k}$ to each output tensor $X_{k}$ where", + "bbox": [ + 498, + 595, + 890, + 731 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nX _ {k} = X _ {k} + e m b _ {k} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 632, + 748, + 890, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Framework Design", + "text_level": 1, + "bbox": [ + 500, + 785, + 683, + 801 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As discussed above, the image feature extracted by the backbone is sequentially processed by both the token generator and the mesh regressor. The overall framework can be simply computed by $R(T(X_b))$ .", + "bbox": [ + 498, + 809, + 890, + 869 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The core structures form the basis of the overall structure, which is depicted in Figure 4. Given an input image of", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "1370", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/8edb852e0115beeab0c0bc7e7aeb837e5e357b5ae47ed067234abb1996615d81.jpg", + "image_caption": [ + "Figure 4. Overview of our architecture. The architecture of our model proceeds as follows: Firstly, the image feature $X_{b}$ is extracted via a backbone network. These features are then passed to our token generator module, responsible for predicting 2D keypoints and performing point sampling on the upsampled feature map, thus generating joint tokens. Next, these joint tokens are input into our mesh regressor module, which carries out the mesh prediction to get the final coordinates." + ], + "image_footnote": [], + "bbox": [ + 81, + 89, + 880, + 224 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "size $\\{H, W\\}$ , we conduct point sampling guided by the predicted 21 keypoints at a resolution of $H/8$ , $W/8$ . For image classification style backbones like Fast-ViT, we apply a 4x upsample deconvolution to its final layer. However, for segmentation style backbones like HRNet, we directly use the feature on the corresponding resolution. In the mesh regressor, we apply position encoding before each MetaFormer block. Although this is not regarded as a core structure, it serves as a beneficial addition.", + "bbox": [ + 75, + 316, + 473, + 454 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Loss Functions", + "text_level": 1, + "bbox": [ + 76, + 463, + 230, + 478 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The method proposed in this paper is trained with supervision for vertices, 3D joints, and 2D joints. In our implementation, both the 2D joints, denoted as $J_{2d}$ , and the vertices, denoted as $V_{3d}$ , are directly predicted by the model's output. The 3D joints, represented as $J_{3d}'$ , are calculated using the equation $J_{3d} = J \\times V_{3d}$ , where $J$ signifies the regression matrix. All of these components utilize the L1 loss to compute the discrepancy between the ground truth and the predictions. The losses for the vertex, 3D joint, and 2D joint, denoted as $L_{vert}$ , $L_{J_{3d}}$ , and $L_{J_{2d}}$ , are respectively formulated as follows:", + "bbox": [ + 75, + 486, + 472, + 652 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {J _ {3 d}} = \\frac {1}{M _ {J _ {3 d}}} \\| J _ {3 d} - J _ {3 d} ^ {\\prime} \\| _ {1} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 178, + 662, + 468, + 696 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {J _ {2 d}} = \\frac {1}{M _ {J _ {2 d}}} \\| J _ {2 d} - J _ {2 d} ^ {\\prime} \\| _ {1} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 178, + 705, + 468, + 738 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {v e r t} = \\frac {1}{M _ {V _ {3 d}}} \\| V _ {3 d} - V _ {3 d} ^ {\\prime} \\| _ {1} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 747, + 468, + 779 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, $J_{3d} \\in R^{M \\times 3}$ represents all the ground truth points, and the symbols annotated with primes denote the predicted values. The overall loss function is defined as:", + "bbox": [ + 76, + 785, + 470, + 832 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL = w _ {3 d} L _ {J _ {3 d}} + w _ {2 d} L _ {J _ {2 d}} + w _ {v e r t} L _ {v e r t} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 847, + 468, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given that the primary objective of this study is mesh prediction, 2D keypoints only affect point sampling and", + "bbox": [ + 76, + 869, + 472, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "thus do not need to be highly accurate, we have accordingly adjusted the coefficients $w_{3d}$ , $w_{2d}$ , and $w_{vert}$ to 10, 1, and 10, respectively.", + "bbox": [ + 498, + 316, + 893, + 363 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 498, + 376, + 633, + 393 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Implementation Details", + "text_level": 1, + "bbox": [ + 500, + 401, + 718, + 417 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our network is implemented based on Pytorch [15]. We use HRNet64[23] and FastViT-MA36 [21] as our backbones, with their initial weights pre-trained on ImageNet. We use the AdamW [8] optimizer to train our network, with a total of 100 epochs. The learning rate is initially set to 5e-4, and then adjusted to 5e-5 after 50 epochs. We train the network with eight RTX2080Ti GPUs, with a batch size of 32 per GPU. It costs 7 hours training with FastViT-MA36 backbone and 11 hours with HRNet. The features of intermediate layers are directly fed to the Token Generator without extra upsampling layer when the backbone is HRNetw64. The Mesh Regressor has three Encoder Layers, with the corresponding input token numbers being [21, 84, 336], output token numbers being [84, 336, 778], and feature dimensions being [256, 128, 64] respectively. We adopt Attention as the default token mixer, as its performance is slightly better.", + "bbox": [ + 496, + 425, + 893, + 667 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Datasets", + "text_level": 1, + "bbox": [ + 500, + 676, + 602, + 691 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our primary experiments and analyses are conducted on the FreiHAND [28] dataset. In order to validate the generalization of our method, we also do experiments on the large-scale 3D hand-object dataset, DexYCB [1]. The FreiHAND dataset contains 130,240 training samples and 3,960 testing samples. DexYCB contains 406,888 training samples and 78,768 testing samples.", + "bbox": [ + 496, + 700, + 893, + 808 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Evaluation Metrics", + "text_level": 1, + "bbox": [ + 500, + 816, + 684, + 830 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To evaluate the accuracy of 3D Hand Mesh Reconstruction methods, we adopt five metrics: Procrustes-aligned mean per joint position error (PA-MPJPE), Procrustes-aligned mean per vertex position error (PA-MPVPE), mean per", + "bbox": [ + 496, + 839, + 893, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "1371", + "bbox": [ + 483, + 944, + 513, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/5bc7a39c8b8b93a6e512d7ecfcf55dcac12498215009216129c03c51e79f79bd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodBackbonePA-MPJPE ↓PA-MPVPE ↓F@05 ↑F@15 ↑FPS
I2L-MeshNet [13]ResNet507.47.60.6810.97372
CMR [3]ResNet506.97.00.7150.977-
I2UV [2]ResNet507.27.40.6820.973-
Tang et al. [20]ResNet506.76.70.7240.98147
MobRecon [4]DenseStack6.97.20.6940.97980
METRO [27]HRNet6.76.80.7170.98127
MeshGraphomer [11]HRNet6.36.50.7380.98324
FastMETRO [5]HRNet6.57.10.6870.98328
Deformer [25]HRNet6.26.40.7430.984-
PointHMR [7]HRNet6.16.60.7200.984-
FastViT [21]FastViT-MA366.66.70.7220.98184
OursHRNet5.86.10.7660.98633
OursFastViT-MA365.76.00.7720.98670
", + "bbox": [ + 163, + 87, + 807, + 290 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "joint position error (MPJPE), mean per vertex position error (MPVPE), and F-Score. PA-MPJPE and PA-MPVPE refer to the MPJPE and MPVPE after aligning the predicted hand results with the Ground Truth using Procrustes alignment, respectively. These two metrics do not consider the impact of global rotation and scale.", + "bbox": [ + 75, + 354, + 470, + 446 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Results", + "text_level": 1, + "bbox": [ + 76, + 457, + 171, + 472 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison with previous methods To validate our proposed modules., we adopted HRNet and FastViT-MA36 as backbones for non-real-time and real-time methods respectively, following established models [27] [11] [5] [21]. For fair comparison, we provide performance metrics without Test-Time Augmentation (TTA) and FPS without TensorRT optimization. Table 1 shows that our method, despite being slightly slower than FastViT, improves PA-MPJPE by $0.9\\mathrm{mm}$ . Compared to transformer-based methods, our approach demonstrates superior speed and performance, while only requiring $10\\%$ of parameters, as shown in Table 2.", + "bbox": [ + 75, + 481, + 468, + 648 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The qualitative comparison results are shown in the figure 5. Compared to previous methods, our method produces more accurate hand reconstruction results.", + "bbox": [ + 75, + 648, + 468, + 691 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/b745dfb06061538698f666f3c7deba060c90faa4769c618efc261b94865d0150.jpg", + "table_caption": [ + "Table 1. Results on the FreiHAND dataset. Our results are shown in bold. “-” indicates not reported. Our results surpass all existing methods in terms of accuracy metrics." + ], + "table_footnote": [], + "table_body": "
Method#ParamsPA-MPJPE ↓PA-MPVPE ↓
METRO [27]102M6.76.8
MeshGraphomer [11]98M6.36.5
FastMETRO [5]25M6.57.1
Ours1.9M5.86.1
", + "bbox": [ + 81, + 704, + 465, + 770 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation on DexYCB We employed the large-scale hand-object dataset DexYCB to validate our method's ef", + "bbox": [ + 75, + 869, + 468, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "fectiveness and generalizability. As shown in Table 3, our model outperforms existing single-image input methods on all metrics. Significantly, we surpassed previous benchmarks by $1.5\\mathrm{mm}$ and $0.8\\mathrm{mm}$ on the MPJPE and MPVPE measures respectively, thereby setting new standards and demonstrating our method's broad applicability.", + "bbox": [ + 496, + 354, + 893, + 446 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/4c59ef976a867e9b9a219538399ea3cfe27df634ed2e44ef2dcd7931fc3c8246.jpg", + "table_caption": [ + "Table 2. Comparison of transformer-based approaches. #Params refer to the network parameters that are not included within the backbone structure of the model. Our approach not only surpasses existing benchmarks in key metrics but also achieves a parameter reduction of one to two orders of magnitude." + ], + "table_footnote": [], + "table_body": "
MethodPA-MPJPE ↓PA-MPVPE ↓MPJPE ↓MPVPE ↓
METRO [27]7.0---
Spurr et al. [19]6.8---
Liu et al. [12]6.6---
HandOccNet [14]5.85.514.013.1
MobRecon [4]6.45.614.213.1
H2ONet [24]5.75.514.013.0
Ours5.55.512.412.1
", + "bbox": [ + 504, + 459, + 888, + 551 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 3. Results on DexYCB. Our method shows advantages on Procrustes-Aligned metrics and surpassed the previous methods by a large margin on non-Procrustes-Aligned metrics.", + "bbox": [ + 496, + 561, + 893, + 604 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.5. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 633, + 653, + 648 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To thoroughly validate the various parameter combinations, a large number of ablation experiments were conducted. For efficiency, all ablation experiments were implemented on smaller models (e.g., Hiera-tiny). After identifying the optimal parameter combination, it is then applied to the standard models to facilitate a fair comparison with existing methods.", + "bbox": [ + 496, + 657, + 890, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The state-of-the-art backbone, Hiera-Tiny [18], is utilized in our study as a strong baseline. We conduct a series of ablation experiments on the FreiHAND dataset with the aim of examining the efficacy of the structure we propose.", + "bbox": [ + 496, + 763, + 890, + 824 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Effectiveness of Our Token Generator and Mesh Regressor. In order to evaluate the effectiveness of our Token Generator and Mesh Regressor, we initially set up a standard baseline model. This model's Token Generator is constructed based on global features, while its Mesh Regres", + "bbox": [ + 496, + 825, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "1372", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f44ac2e201fb60a95408b2b9992d075ef5af700aff0b82debf429c81e83c67a5.jpg", + "image_caption": [ + "Figure 5. Qualitative comparison between our method and other state-of-the-art approaches." + ], + "image_footnote": [], + "bbox": [ + 76, + 87, + 194, + 443 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b7830c556ed21659605f4aeeea3f861caaccc100d64b367eb9e91a2f246b9101.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 217, + 103, + 803, + 445 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "sor is designed as a Multilayer Perceptron (MLP). We subsequently substitute these components with our proposed structures individually. The results of these experiments, detailed in Table 4, confirm that both modules, when incorporated in place of the original structures, contribute positively towards enhancing overall performance. When implemented together, these modifications lead to even further improvements.", + "bbox": [ + 75, + 494, + 470, + 617 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/e6e50004c6bf9f7707d01cf7b49747bef9c10a4d74b31ada6341000a2cdd145c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodPA-MPJPE ↓PA-MPVPE ↓
Simple Baseline6.97.2
+ mesh regressor6.56.8
+ token generator6.67.1
+ both6.26.5
", + "bbox": [ + 120, + 627, + 426, + 698 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Analysing Core Structure of Token Generator. As shown in Table 5, performance using only global features is competitive. The grid sampling and point sampling on a $7 \\times 7$ feature map show similar efficiencies. Increasing the resolution of the feature map to $28 \\times 28$ through a single four-fold deconvolution improves performance. However, further optimization is not achieved by replacing single four-fold deconvolution with two layers of two-fold de", + "bbox": [ + 75, + 779, + 470, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "convolutions or adding more convolutions. Similarly, no improvement is observed when changing from point sampling to coarse mesh sampling. Qualitative comparison of different point sampling strategies is shown in Fig. 6", + "bbox": [ + 496, + 494, + 893, + 556 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/3271717fe35a8371ada85ecfb247df677e12679cd443861d7a5ed025e1b794f1.jpg", + "table_caption": [ + "Table 4. Ablation study of our proposed modules. Each of these methods brings about enhancements when utilized individually. However, when these strategies are integrated, they yield an even more substantial improvement." + ], + "table_footnote": [], + "table_body": "
Sample MethodResolutionPA-MPJPE ↓PA-MPVPE ↓
Global1x16.56.8
Grid7x76.36.6
Point7x76.36.6
Point14x146.36.6
Point28x286.26.5
Point28 x 28 enhanced6.26.5
Coarse mesh28x286.26.5
", + "bbox": [ + 504, + 566, + 888, + 670 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5. Ablation study of Our Token generator A point sample at a resolution of $28 \\times 28$ achieves optimal efficiency. Contrarily, increasing the number of sampled points or incorporating additional convolutional layers do not lead to any further improvements.", + "bbox": [ + 496, + 678, + 893, + 737 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Analysing the Core Structure of the Mesh Regressor. As shown in Table 6, for a single encoder layer, adding an extra encoder layer with a larger token number sharply increases performance by $0.3\\mathrm{mm}$ . The optimal setting consists of three encoder layers, with token numbers progressively multiplied by 4. As the layer number increases further, the marginal benefit becomes inconsequential and sometimes even decreases. Furthermore, as shown in Table 7, given a fixed set of token numbers, increasing computational complexity produces negligible differences in either", + "bbox": [ + 496, + 750, + 893, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "1373", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "block numbers or block dimensions in the encoder layer. A middle-sized block dimensions setting is optimal. Qualitative comparison of different upsample layers is shown in Figure 7.", + "bbox": [ + 75, + 90, + 468, + 150 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/81fe7f6fd230018d7df438811a6ac21757f507379fb72755e352783974cdc767.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Layer NumsToken NumsPA-MPJPE ↓PA-MPVPE ↓
1[21]6.67.1
2[21, 256]6.36.6
2[21, 384]6.36.6
3[21, 256, 384]6.26.5
3[21, 84, 336]6.26.5
4[21, 128, 256, 384]6.26.5
4[21, 63, 126, 252]6.36.6
", + "bbox": [ + 81, + 167, + 465, + 271 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/e0b74e5b96ff46414b13f7aee3b1e05d090281628d549a75e06607f4e4158b54.jpg", + "table_caption": [ + "Table 6. The Number of Upsampling Layers and Corresponding Token Numbers in Encoding Layers. Three encoding layers yield optimal efficiency." + ], + "table_footnote": [], + "table_body": "
DimensionsPA-MPJPE ↓PA-MPVPE ↓
64, 32, 166.56.9
128, 64, 326.36.6
256, 128, 646.26.5
512, 256, 126.26.5
1024, 512, 2566.46.7
Block NumsPA-MPJPE ↓PA-MPVPE ↓
1, 1, 16.26.5
2, 2, 26.26.6
3, 3, 36.36.6
", + "bbox": [ + 120, + 357, + 426, + 500 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Position Encoding and Attention Mixer. We utilized position encoding layer and attention mixer during ablation experiments because they are intuitively helpful. Based on our SOTA result, we remove position encoding layers to observe a slightly $0.1\\mathrm{mm}$ degrade. Similar thing happens when we substitute attention mixer to identity mixer, see Tab. 8.", + "bbox": [ + 75, + 559, + 468, + 662 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/083ab6fa4d9992fed4df85fd01569bc32117942c73fdf10865a886fd078075e0.jpg", + "table_caption": [ + "Table 7. Dimensions and block nums. Single layer blocks with middle sized dimensions are optimal." + ], + "table_footnote": [], + "table_body": "
MethodPA-MPJPE ↓PA-MPVPE ↓
hiera-tiny sota6.26.5
Identity mixer6.36.6
w/o position emb6.36.6
", + "bbox": [ + 120, + 676, + 426, + 734 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 8. Token mixer and position embedding. When substitute attention mixer to identity mixer, or remove position encoding layer, the performance dropped slightly by $0.1\\mathrm{mm}$ .", + "bbox": [ + 75, + 744, + 468, + 787 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limits and Failure cases. As mentioned earlier, our work is dedicated to summarizing and abstracting from existing work. Since no targeted optimization was performed, some failure cases present in previous work remains challenging. These cases are concentrated in scenes with self-occlusion and object occlusion, see Fig. 8.", + "bbox": [ + 75, + 810, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7b772d9a2614a343087179c363d7891d4ede3c368a87e7cd9d24fd6f7887265c.jpg", + "image_caption": [ + "Figure 6. Qualitative Comparison of Different Point Sampling Strategies. The global/coarse feature fails in scenarios with detailed finger interactions, where upsampled feature works well." + ], + "image_footnote": [], + "bbox": [ + 500, + 89, + 870, + 209 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/cae26c4b995200f60e2eb3c7a96e17ee4903d8ba1ca56f7e89cd8c14d4972b56.jpg", + "image_caption": [ + "Figure 7. Qualitative Comparison of the Number of Layers of Mesh Decoder. When constrained to one, the reconstructed mesh tends to corrupt into unnatural shapes. Performance improves as the number of layers increases." + ], + "image_footnote": [], + "bbox": [ + 501, + 282, + 870, + 401 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/18513b34b4a664abb9520fb38878b2eb1f5417ff247a00fd2c1646e9eaeb2307.jpg", + "image_caption": [ + "Figure 8. Typical Failure Cases. Failure cases are concentrated in scenes with self-occlusion and object occlusion. Some are difficult to discern due to the small area of exposure, while others present ambiguities caused by the occlusion." + ], + "image_footnote": [], + "bbox": [ + 501, + 489, + 859, + 588 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion and Future Work", + "text_level": 1, + "bbox": [ + 500, + 686, + 767, + 702 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We observed shared advantages and disadvantages of typical structures. Based on these observations, we introduce the concept of the core structure. Through experiments, we revealed that a framework with the core structure could achieve high performance with limited computational load. We evaluated our approach quantitatively and qualitatively to demonstrate its effectiveness.", + "bbox": [ + 496, + 715, + 890, + 820 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "However, our method is explicitly designed to reconstruct single hand gestures. Other scenarios, such as extreme lighting, occlusion, interactions, or out-of-distribution cases, showed no improvement over existing methods. Such cases require specifically designed methods.", + "bbox": [ + 496, + 825, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "1374", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Yu-Wei Chao, Wei Yang, Yu Xiang, Pavlo Molchanov, Ankur Handa, Jonathan Tremblay, Yashraj S Narang, Karl Van Wyk, Umar Iqbal, Stan Birchfield, et al. Dexycb: A benchmark for capturing hand grasping of objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9044–9053, 2021. 2, 5", + "[2] Ping Chen, Yujin Chen, Dong Yang, Fangyin Wu, Qin Li, Qingpei Xia, and Yong Tan. I2uv-handnet: Image-to-uv prediction network for accurate and high-fidelity 3d hand mesh modeling. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12929–12938, 2021. 6", + "[3] Xingyu Chen, Yufeng Liu, Chongyang Ma, Jianlong Chang, Huayan Wang, Tian Chen, Xiaoyan Guo, Pengfei Wan, and Wen Zheng. Camera-space hand mesh recovery via semantic aggregation and adaptive 2d-1d registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13274–13283, 2021. 6", + "[4] Xingyu Chen, Yufeng Liu, Yajiao Dong, Xiong Zhang, Chongyang Ma, Yanmin Xiong, Yuan Zhang, and Xiaoyan Guo. Mobrecon: Mobile-friendly hand mesh reconstruction from monocular image. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 2, 3, 4, 6, 7", + "[5] Junhyeong Cho, Kim Youwang, and Tae-Hyun Oh. Crossattention of disentangled modalities for 3d human mesh recovery with transformers. 1, 3, 4, 6", + "[6] Hongsuk Choi, Gyeongsik Moon, and Kyoung Mu Lee. Pose2Mesh: Graph Convolutional Network for 3D Human Pose and Mesh Recovery from a 2D Human Pose, page 769-787. 2020. 2", + "[7] Jeonghwan Kim, Mi-Gyeong Gwon, Hyunwoo Park, Hyukmin Kwon, Gi-Mun Um, and Wonjun Kim. Sampling is matter: Point-guided 3d human mesh reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12880-12889, 2023. 1, 2, 4, 6", + "[8] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5", + "[9] Thomas Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv: Learning, arXiv: Learning, 2016. 2", + "[10] Isaak Lim, Alexander Dielen, Marcel Campen, and Leif Kobbelt. A Simple Approach to Intrinsic Correspondence Learning on Unstructured 3D Meshes, page 349-362. 2019. 3", + "[11] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In 2021 IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 2, 6, 7", + "[12] Shaowei Liu, Hanwen Jiang, Jiarui Xu, Sifei Liu, and Xiaolong Wang. Semi-supervised 3d hand-object poses estimation with interactions in time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14687-14697, 2021. 6" + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Gyeongsik Moon and Kyoung Mu Lee. I2l-meshnet: Image-to-lixel prediction network for accurate 3d human pose and mesh estimation from a single rgb image. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VII 16, pages 752-768. Springer, 2020. 6", + "[14] JoonKyu Park, Yeonguk Oh, Gyeongsik Moon, Hongsuk Choi, and Kyoung Mu Lee. Handoccnet: Occlusion-robust 3d hand mesh estimation network. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1496–1505, 2022. 6", + "[15] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017. 5", + "[16] Anurag Ranjan, Timo Bolkart, Soubhik Sanyal, and Michael J. Black. Generating 3D faces using Convolutional Mesh Autoencoders, page 725-741. 2018. 2, 3", + "[17] Javier Romero, Dimitrios Tzionas, and Michael J. Black. Embodied hands: modeling and capturing hands and bodies together. ACM Transactions on Graphics, page 1-17, 2017. 2", + "[18] Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan, Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed, Judy Hoffman, et al. Hiera: A hierarchical vision transformer without the bells-and-whistles. arXiv preprint arXiv:2306.00989, 2023. 6", + "[19] Adrian Spurr, Umar Iqbal, Pavlo Molchanov, Otmar Hilliges, and Jan Kautz. Weakly supervised 3d hand pose estimation via biomechanical constraints. In European conference on computer vision, pages 211-228. Springer, 2020. 6", + "[20] Xiao Tang, Tianyu Wang, and Chi-Wing Fu. Towards accurate alignment in real-time 3d hand-mesh reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11698-11707, 2021. 6", + "[21] PavanKumarAnasosalu Vasu, James Gabriel, Jeff Zhu, Oncel Tuzel, and Anurag Ranjan. Fastvit: A fast hybrid vision transformer using structural reparameterization. 2023. 2, 3, 5, 6", + "[22] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, AidanN. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Neural Information Processing Systems, Neural Information Processing Systems, 2017. 2", + "[23] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, et al. Deep high-resolution representation learning for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 43(10):3349-3364, 2020. 2, 5", + "[24] Hao Xu, Tianyu Wang, Xiao Tang, and Chi-Wing Fu. H2onet: Hand-occlusion-and-orientation-aware network for real-time 3d hand mesh reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17048–17058, 2023. 6", + "[25] Yusuke Yoshiyasu. Deformable mesh transformer for 3d human mesh recovery. In Proceedings of the IEEE/CVF Con" + ], + "bbox": [ + 501, + 92, + 893, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "1375", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ference on Computer Vision and Pattern Recognition, pages 17006-17015, 2023. 6", + "[26] Weihao Yu, Mi Luo, Pan Zhou, Chenyang Si, Yichen Zhou, Xinchao Wang, Jiashi Feng, and Shuicheng Yan. Metaformer is actually what you need for vision. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[27] Xiong Zhang, Qiang Li, Hong Mo, Wenbo Zhang, and Wen Zheng. End-to-end hand mesh recovery from a monocular rgb image. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 2, 6, 7", + "[28] Christian Zimmermann, Duygu Ceylan, Jimei Yang, Bryan Russell, Max Argus, and Thomas Brox. Freihand: A dataset for markerless capture of hand pose and shape from single rgb images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 813-822, 2019. 2, 5" + ], + "bbox": [ + 78, + 92, + 468, + 316 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "1376", + "bbox": [ + 483, + 945, + 514, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/edb8cbac-0e71-45cd-8cdb-45284f946ab7_model.json b/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/edb8cbac-0e71-45cd-8cdb-45284f946ab7_model.json new file mode 100644 index 0000000000000000000000000000000000000000..cf411c09e15ba5f7f8ca41fc8308bfe2192bc4be --- /dev/null +++ b/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/edb8cbac-0e71-45cd-8cdb-45284f946ab7_model.json @@ -0,0 +1,2035 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.131, + 0.782, + 0.153 + ], + "angle": 0, + "content": "A Simple Baseline for Efficient Hand Mesh Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.167, + 0.18, + 0.803, + 0.218 + ], + "angle": 0, + "content": "Zhishan Zhou*, Shihao Zhou*, Zhi Lv, Minqiang Zou, Yao Tang, Jiajun Liang† \nJiiov Technology" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.22, + 0.856, + 0.235 + ], + "angle": 0, + "content": "{zhishan.zhou, shihao.zhou, zhi.lv, minqiang.zou, yao.tang, jiajun.liang}@jiiov.com" + }, + { + "type": "text", + "bbox": [ + 0.374, + 0.253, + 0.591, + 0.27 + ], + "angle": 0, + "content": "http://simplehand.github.io" + }, + { + "type": "title", + "bbox": [ + 0.265, + 0.305, + 0.343, + 0.321 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.336, + 0.474, + 0.699 + ], + "angle": 0, + "content": "Hand mesh reconstruction has attracted considerable attention in recent years, with various approaches and techniques being proposed. Some of these methods incorporate complex components and designs, which, while effective, may complicate the model and hinder efficiency. In this paper, we decompose the mesh decoder into token generator and mesh regressor. Through extensive ablation experiments, we found that the token generator should select discriminating and representative points, while the mesh regressor needs to upsample sparse keypoints into dense meshes in multiple stages. Given these functionalities, we can achieve high performance with minimal computational resources. Based on this observation, we propose a simple yet effective baseline that outperforms state-of-the-art methods by a large margin, while maintaining real-time efficiency. Our method outperforms existing solutions, achieving state-of-the-art (SOTA) results across multiple datasets. On the FreiHAND dataset, our approach produced a PA-MPJPE of 5.8mm and a PA-MPVPE of 6.1mm. Similarly, on the DexYCB dataset, we observed a PA-MPJPE of 5.5mm and a PA-MPVPE of 5.5mm. As for performance speed, our method reached up to 33 frames per second (fps) when using HRNet and up to 70 fps when employing FastViT-MA36. Code will be made available." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.729, + 0.21, + 0.745 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.755, + 0.47, + 0.877 + ], + "angle": 0, + "content": "The field of hand mesh reconstruction has seen rapid advancements, with various types of mesh decoders being proposed. Despite their commendable performance, these methods often suffer from high system complexity, involving unnecessary components that may hinder efficiency. To facilitate a clear discussion, we decompose the mesh decoder into two primary components: the token generator and the mesh regressor." + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.312, + 0.9, + 0.512 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.524, + 0.895, + 0.608 + ], + "angle": 0, + "content": "Figure 1. Trade-off between Accuracy and Inference Speed. Our technique surpasses non-real-time methods \\((\\leq 40\\) fps) in both speed and precision. Compared to real-time methods \\((\\geq 70\\) fps), it offers a substantial boost in accuracy while preserving comparable speeds. For fair comparison, all speed evaluations were conducted on a 2080ti GPU with a batch size of one." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.641, + 0.895, + 0.838 + ], + "angle": 0, + "content": "The token generator serves a crucial role by integrating prior information with image features to extract task-specific features. For instance, FastMETRO [5] employs a strategy to predict weak-perspective camera parameters, which aggregates image features. MobRecon [4] develops a stacked encoding network to obtain gradually refined encoding features, and applies a technique known as pose pooling to suppress features that are unrelated to joint landmarks. PointHMR [7] on the other hand, proposes to use features sampled at positions of vertices projected from 3D to 2D spaces as intermediate guidance. These approaches collectively provide informative and discriminating features that enhance the overall performance of the system." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.895, + 0.903 + ], + "angle": 0, + "content": "The mesh regressor, the second component, decodes the tokenized features obtained from the token generator into mesh predictions. FastMETRO [5] takes a set of learnable joint tokens and vertex tokens as input and masks" + }, + { + "type": "page_footnote", + "bbox": [ + 0.107, + 0.887, + 0.363, + 0.901 + ], + "angle": 0, + "content": "* Equally contribution. † Corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1367" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.228 + ], + "angle": 0, + "content": "self-attention of non-adjacent vertices according to the topology of the triangle mesh during training. MobRecon [4] employs a strategy of 2D-to-3D lifting and Pose-to-vertex lifting to gradually approximate meshes. MeshGraphormer [11] uses a coarse template mesh for positional encoding and then applies a linear Multi-Layer Perceptron (MLP) to sample the coarse mesh up to the original resolution. These methods aim to alleviate training difficulties due to heterogeneous modalities." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.229, + 0.473, + 0.472 + ], + "angle": 0, + "content": "Through investigation on existing methods, we found a interesting phenomenon that, although some methods shares same performance, they differ in specific failure cases. Namely, methods with coarse sampling strategy lack perceptual ability for fine-grained gestures such as pinch. Methods with limited upsample layers struggles in generating reasonable hand shapes. This observation prompts us to question: How different structures make effect on mesh decoder? By answering the question, we can streamline the process, eliminating excessive computation and complex components, to complete mesh prediction in a simple and efficient way. To design concise experiments, we start from the simplest structure for the aforementioned two modules, then gradually add and optimize the most commonly used components abstracted from the state-of-the-art (SOTA) methods." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.472, + 0.473, + 0.608 + ], + "angle": 0, + "content": "Through extensive ablation experiments, we discovered that the important structure of token generator is to sample discriminating and representative points, while the important structure of mesh generator is to upsample sparse keypoints into dense meshes. For implicitly, in the following paper, we define each of these structure as core structure. In the model design process, provided that the core structure's functionality is fulfilled, high performance can be achieved with minimal computational resources." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.609, + 0.473, + 0.807 + ], + "angle": 0, + "content": "Based on these observations, we propose a simple baseline that surpasses the SOTA methods by a significant margin and is computationally efficient. Referring to Figure 1, our proposed technique delivers state-of-the-art performance on various datasets. On the FreiHAND [28] dataset, it recorded a PA-MPJPE of \\(5.8\\mathrm{mm}\\) and PA-MPVPE of \\(6.1\\mathrm{mm}\\). When tested on the DexYCB [1] dataset, these metrics were further refined to a PA-MPJPE of \\(5.5\\mathrm{mm}\\) and a PA-MPVPE of \\(5.5\\mathrm{mm}\\). Our method is also advantaged in efficiency, achieving 33 frames per second (fps) on HRNet[23] and an impressive 70 fps on FastViTMA36 [21]. Our contributions can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.81, + 0.47, + 0.856 + ], + "angle": 0, + "content": "1. We abstract existing methods into token generator and mesh regressor modules, and reveal the core structures of these two modules respectively." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.856, + 0.47, + 0.902 + ], + "angle": 0, + "content": "2. Based on these core structures, we developed a streamlined, real-time hand mesh regression module that excels in both efficiency and accuracy." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.81, + 0.47, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.892, + 0.153 + ], + "angle": 0, + "content": "3. Our method has achieved PA-MPJPE of \\(5.7\\mathrm{mm}\\) and PA-MPVPE of \\(6.0\\mathrm{mm}\\) on FreiHAND, and achieved SOTA results on multiple datasets, demonstrating its effectiveness and generalizability." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.165, + 0.642, + 0.182 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.191, + 0.895, + 0.298 + ], + "angle": 0, + "content": "In this section, we briefly review existing methods of hand mesh reconstruction which usually include two main components: a token generator and a mesh regressor. Token generator processes the backbone image feature and generates tokens fed to the decoder. Mesh regressor decodes the input tokens into 3D mesh directly or parametric hand model coefficient." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.304, + 0.747, + 0.32 + ], + "angle": 0, + "content": "2.1. Hand Mesh Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.327, + 0.892, + 0.417 + ], + "angle": 0, + "content": "Estimating the 3D hand mesh from a single image has been widely researched. [27] proposes an end-to-end framework to recover hand mesh from a monocular RGB image. They use the 2D heatmap as input tokens and fully convolutional and fully connected layers to regress the MANO [17] parameters." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.418, + 0.893, + 0.703 + ], + "angle": 0, + "content": "Transformer [22] has shown powerful performance in language and vision tasks which could model long range relation among input tokens. MetaFormer [26] argues that the general architecture of transformers instead of the specific token mixer is the key player. They replace the self-attention module with a simple spatial pooling operator and achieve competitive performance with fewer parameters and less computation. METRO [11] extracts a single global image feature with a convolutional neural network and performs position encoding by repeatedly concatenating the image feature with 3D coordinates of a mesh template. A multi-layer transformer encoder with progressive dimensionality reduction regresses the 3D coordinates of mesh vertices with these input tokens. Due to the constraints of memory and computation, the transformer only processes a coarse mesh by sub-sampling twice with a sampling algorithm [16], and Multi-Layer Perceptrons (MLPs) are then used to upsample the coarse mesh to the original mesh." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.895, + 0.84 + ], + "angle": 0, + "content": "Graph convolutional neural network(GCNN) [9] is good at modeling the local interaction between neighbor vertices, thus it is very appropriate for mesh reconstruction. Pose2Mesh [6] designs a cascaded architecture to regress 3D mesh vertices from 2D pose directly using GCNN. MeshGraphormer [11] combines the ability of transformer and GCNN presenting a graph-convolution-reinforced transformer to model both local and global interactions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.895, + 0.902 + ], + "angle": 0, + "content": "Instead of extracting a global feature from the input image, pointHMR [7] argues that sampling features guided by vertex-relevant points could better utilize the correspondence between encoded features and spatial positions. They" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1368" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.086, + 0.262, + 0.142 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.155, + 0.165, + 0.171, + 0.177 + ], + "angle": 0, + "content": "A" + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.088, + 0.477, + 0.142 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.379, + 0.165, + 0.394, + 0.177 + ], + "angle": 0, + "content": "B" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.088, + 0.768, + 0.162 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.587, + 0.165, + 0.602, + 0.176 + ], + "angle": 0, + "content": "C" + }, + { + "type": "image_caption", + "bbox": [ + 0.782, + 0.101, + 0.882, + 0.116 + ], + "angle": 0, + "content": "Image Feature" + }, + { + "type": "image_caption", + "bbox": [ + 0.787, + 0.142, + 0.877, + 0.155 + ], + "angle": 0, + "content": "Convolutions" + }, + { + "type": "image", + "bbox": [ + 0.079, + 0.2, + 0.256, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.156, + 0.289, + 0.171, + 0.301 + ], + "angle": 0, + "content": "D" + }, + { + "type": "image", + "bbox": [ + 0.299, + 0.202, + 0.477, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.379, + 0.289, + 0.393, + 0.301 + ], + "angle": 0, + "content": "E" + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.202, + 0.682, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.588, + 0.289, + 0.601, + 0.301 + ], + "angle": 0, + "content": "F" + }, + { + "type": "image", + "bbox": [ + 0.701, + 0.179, + 0.884, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.788, + 0.214, + 0.877, + 0.24 + ], + "angle": 0, + "content": "Coarse Mesh Predictions" + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.319, + 0.895, + 0.378 + ], + "angle": 0, + "content": "Figure 2. An illustration demonstrates various designs of token generators. The grids colored in red represent the sampled points. a) Global feature; b) Grid sampling; c) Keypoint-guided sampling on the original feature map; d) Keypoint-guided sampling with \\(4\\mathrm{x}\\) upsampling, resulting in an enhanced feature; e) Keypoint-guided sampling with \\(4\\mathrm{x}\\) upsampling, where the feature is further improved by convolution; f) Coarse-mesh-guided point sampling with \\(4\\mathrm{x}\\) upsampling." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.402, + 0.473, + 0.568 + ], + "angle": 0, + "content": "conduct feature sampling by element-wise multiplication of image feature and 2D heatmap trained by projection of 3D mesh vertices. These sampled features are then fed into the transformer encoder with progressive attention mask as the form of vertex token. The progressively decreased local connection range realized by constraining the attention mask encourage the model to consider the local relationship between neighbor vertices. They also use linear projection to reduce the dimension of the encoded token and upsampling algorithm [16] to expand the sparse vertices into original dense vertices." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.579, + 0.285, + 0.595 + ], + "angle": 0, + "content": "2.2.Lightweight Networks" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.603, + 0.47, + 0.816 + ], + "angle": 0, + "content": "To achieve real-time hand mesh reconstruction, many lightweight networks have been studied for years. FastViT [21] is a hybrid vision transformer architecture which obtains state-of-the-art latency-accuracy tradeoff by structural reparameterization and train-time overparametrization techniques. MobRecon [4] designs multiple complicated modules to improve efficiency, including a stacked 2D encoding structure, a map-based position regression 2D-to-3D block and a graph operator based on spiral sampling [10]. FastMETRO [5] identifies the performance bottleneck of encoder-based transformers is caused by token design. They propose an encoder-decoder architecture to disentangle the interaction among input tokens which reduces the parameter." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.83, + 0.168, + 0.845 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.901 + ], + "angle": 0, + "content": "In our research, we dissected the existing methods into two key components: a token generator and a mesh regressor. However, defining the optimal core structure for each of" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.402, + 0.893, + 0.477 + ], + "angle": 0, + "content": "these modules remains a challenging task. For each module, we start with a fundamental, intuitive structure, and then progressively incorporate the most commonly used components, which we have abstracted from state-of-the-art (SOTA) methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.479, + 0.893, + 0.6 + ], + "angle": 0, + "content": "Given that these two modules, the token generator and the mesh regressor, operate in tandem, it's important to keep one constant when analysing the other. In practical terms, we first conduct experiments on the mesh regressor while keeping the token generator, as depicted in Figure 2-B, constant. Then, we apply the mesh regressor configuration that demonstrated the best performance to the token generator in subsequent experiments." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.613, + 0.669, + 0.628 + ], + "angle": 0, + "content": "3.1. Token Generator" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.637, + 0.892, + 0.734 + ], + "angle": 0, + "content": "Given a single image of dimensions \\(\\{H,W\\}\\), our model utilizes a backbone to extract image features \\(X_{b}^{\\in \\frac{H}{32} \\times \\frac{W}{32} \\times C}\\). The token generator \\(T\\) takes \\(X_{b}\\) as input and produces tokenized mesh feature \\(X_{m}^{\\in N \\times C}\\), where \\(N\\) denotes the number of sampled points. Thus, we can express this as \\(X_{m} = T(X_{b})\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.734, + 0.893, + 0.87 + ], + "angle": 0, + "content": "Starting with the simplest implementation, we apply a single spatial pooling (Figure 2-A). This approach establishes a surprisingly competitive baseline, comparable to the Fastmetro [5]. Changing spatial pooling to point sample (Figure 2-B) improves the performance. To further improve the quality of the feature, we follow the MobRecon [4] model and conduct keypoint-guided point sampling (Figure 2-C). However, this modification did not yield any noticeable improvement." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.903 + ], + "angle": 0, + "content": "Upon visual inspection, it appears that a \\(7 \\times 7\\) resolution is not sufficiently discriminating. Consequently, we apply" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1369" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "deconvolution on \\( X_{b} \\) to sample the feature map to \\( 14 \\times 14 \\) then \\( 28 \\times 28 \\) (Figure 2-D), respectively. This approach results in progressive improvement, but it does not work for \\( 8 \\times \\) deconvolution or larger." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.153, + 0.468, + 0.273 + ], + "angle": 0, + "content": "Models such as MobRecon [4] and PointHMR [7] report improvements by enhancing features, for example, using a FPN-like structure or stacked blocks. In our study, we tested different \\(4 \\times\\) upsample schemes, including double \\(2 \\times\\) upsampling, directly \\(4 \\times\\) upsampling, and adding more convolution layers during the upsampling process (Figure 2-E). Although these schemes vary in computational complexity, their performance remains consistent." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.275, + 0.468, + 0.412 + ], + "angle": 0, + "content": "We also tested the coarse mesh sampling method proposed by FastMETRO [5]. This method (Figure 2-F) generates denser points compared to keypoint-guided sampling but does not offer any significant advantages. Detailed results are shown in table 5. These experiments suggest that keypoint-guided point sampling at an appropriate resolution is a crucial structure for the token generator. As such, feature enhancement and exhaustive point sampling are not as necessary as initially thought." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.424, + 0.236, + 0.439 + ], + "angle": 0, + "content": "3.2. Mesh Regressor" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.448, + 0.468, + 0.553 + ], + "angle": 0, + "content": "The mesh regressor \\( R \\) takes the tokenized mesh feature \\( X_{m}^{\\in N\\times C} \\) as input and outputs predicted meshes Figure 3. [5] [4] adopts a multi-stage approximation approach and proposes various methods to formulate the topology relationship between joints and mesh. Finding their intersection components, we construct a cascading upsampling mesh regressor \\( R \\) using a series of decoder layers:" + }, + { + "type": "equation", + "bbox": [ + 0.207, + 0.569, + 0.468, + 0.584 + ], + "angle": 0, + "content": "\\[\nR = H _ {k} H _ {k - 1} \\dots H _ {0} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.468, + 0.644 + ], + "angle": 0, + "content": "Each decoder layer \\( H_{k} \\) takes the calculated tokens \\( T_{k} \\) as input, then subsequently processes these using a dimension reduce layer, metaformer, and upsample layer:" + }, + { + "type": "equation", + "bbox": [ + 0.166, + 0.658, + 0.468, + 0.674 + ], + "angle": 0, + "content": "\\[\nH _ {k} \\left(X _ {k}\\right) = U _ {k} \\left(M F _ {k} \\left(P _ {k} \\left(X _ {k}\\right)\\right)\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.688, + 0.468, + 0.779 + ], + "angle": 0, + "content": "where \\(U_{k}\\), \\(P_{k}\\) denotes the \\(k_{th}\\) upsample layer and dimension reduce layer respectively, each composed of a single-layer MLP. The upsample layer increases token numbers, while the dimension reduce layer modifies channel shapes. \\(MF_{k}\\) denotes the \\(k_{th}\\) metaformer block, \\(T_{k}\\) is the \\(k_{th}\\) output token where \\(X_{k + 1} = H_{k}(X_{k})\\), \\(X_0 = X_m\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Let \\( d_{k} \\) be the output dimension of \\( U_{k} \\) and token numbers of \\( MF_{k} \\), \\( n_{k}, c_{k} \\) and \\( m_{k} \\) are the block number, tokenmixer and block dimensions for \\( MF_{k} \\). We start with the first layer \\( MF_{0} \\) to demonstrate its operation. For the \\( N \\times C \\) shaped tensor \\( T_{0} \\), \\( P_{0} \\) projects it to \\( N \\times c \\), which is then processed by \\( MF_{0} \\) and outputs a tensor of the same shape. Subsequently, \\( U_{k} \\) upsamples it to \\( d \\times c \\). The following decoder layers repeat this procedure to output \\( X_{k} \\)." + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.091, + 0.887, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.253, + 0.892, + 0.295 + ], + "angle": 0, + "content": "Figure 3. Architecture of decoder layer in mesh regressor. It is composed of sequentially connected dimension reduce layer, metaformer block and upsample layer." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.323, + 0.892, + 0.595 + ], + "angle": 0, + "content": "We began from a baseline where \\(\\{k = 1, n = \\{1\\}, d = \\{778\\}, m = \\{\\text{identity}\\}\\}\\), which yielded competitive performance despite its simplicity. We then increase flops by enlarge \\(n\\) but observe no improvement. Inspired by [4], We sequentially add blocks with an increasing value of \\(d\\). When \\(k \\leq 3\\), Significant performance improvements are observed. However, as \\(k\\) continues to increase beyond this point, no further gains are detected. Moreover, Modifying the token mixer from ide to attn also beneficial. However, for fixed \\(d\\), simply increasing \\(n\\) did not improve performance. According to our experimental findings, the core function of each decoder layer is to incrementally elevate the number of tokens from an initial quantity of 21 up to 778. Additional strategies like augmenting computational workload or altering intricate specifics of the network appear to have minimal impact. In our best practice, parameters were set to \\(\\{k = 3, n = \\{1, 1, 1\\}, d = \\{21, 84, 336\\}, m = \\{\\text{attn}, \\text{attn}, \\text{attn}\\}\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.596, + 0.891, + 0.732 + ], + "angle": 0, + "content": "Existing hand joints and mesh topology modulation approaches stand out due to their ability to incorporate spatial information. However, their heuristic design is heavily reliant on hyperparameters and can be labor-intensive. Recognizing these strengths, we propose a novel method that modulates spatial relations without requiring manual design or additional computational resources. We achieve this by introducing learnable position embedding parameters \\(emb_{k}\\) to each output tensor \\(X_{k}\\) where" + }, + { + "type": "equation", + "bbox": [ + 0.633, + 0.749, + 0.891, + 0.765 + ], + "angle": 0, + "content": "\\[\nX _ {k} = X _ {k} + e m b _ {k} \\tag {3}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.786, + 0.684, + 0.802 + ], + "angle": 0, + "content": "3.3. Framework Design" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.81, + 0.891, + 0.87 + ], + "angle": 0, + "content": "As discussed above, the image feature extracted by the backbone is sequentially processed by both the token generator and the mesh regressor. The overall framework can be simply computed by \\( R(T(X_b)) \\)." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "The core structures form the basis of the overall structure, which is depicted in Figure 4. Given an input image of" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "1370" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.09, + 0.881, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.235, + 0.895, + 0.293 + ], + "angle": 0, + "content": "Figure 4. Overview of our architecture. The architecture of our model proceeds as follows: Firstly, the image feature \\( X_{b} \\) is extracted via a backbone network. These features are then passed to our token generator module, responsible for predicting 2D keypoints and performing point sampling on the upsampled feature map, thus generating joint tokens. Next, these joint tokens are input into our mesh regressor module, which carries out the mesh prediction to get the final coordinates." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.317, + 0.475, + 0.455 + ], + "angle": 0, + "content": "size \\(\\{H, W\\}\\), we conduct point sampling guided by the predicted 21 keypoints at a resolution of \\(H/8\\), \\(W/8\\). For image classification style backbones like Fast-ViT, we apply a 4x upsample deconvolution to its final layer. However, for segmentation style backbones like HRNet, we directly use the feature on the corresponding resolution. In the mesh regressor, we apply position encoding before each MetaFormer block. Although this is not regarded as a core structure, it serves as a beneficial addition." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.464, + 0.232, + 0.479 + ], + "angle": 0, + "content": "3.4. Loss Functions" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.487, + 0.473, + 0.653 + ], + "angle": 0, + "content": "The method proposed in this paper is trained with supervision for vertices, 3D joints, and 2D joints. In our implementation, both the 2D joints, denoted as \\( J_{2d} \\), and the vertices, denoted as \\( V_{3d} \\), are directly predicted by the model's output. The 3D joints, represented as \\( J_{3d}' \\), are calculated using the equation \\( J_{3d} = J \\times V_{3d} \\), where \\( J \\) signifies the regression matrix. All of these components utilize the L1 loss to compute the discrepancy between the ground truth and the predictions. The losses for the vertex, 3D joint, and 2D joint, denoted as \\( L_{vert} \\), \\( L_{J_{3d}} \\), and \\( L_{J_{2d}} \\), are respectively formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.179, + 0.663, + 0.47, + 0.697 + ], + "angle": 0, + "content": "\\[\nL _ {J _ {3 d}} = \\frac {1}{M _ {J _ {3 d}}} \\| J _ {3 d} - J _ {3 d} ^ {\\prime} \\| _ {1} \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.179, + 0.706, + 0.47, + 0.739 + ], + "angle": 0, + "content": "\\[\nL _ {J _ {2 d}} = \\frac {1}{M _ {J _ {2 d}}} \\| J _ {2 d} - J _ {2 d} ^ {\\prime} \\| _ {1} \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.176, + 0.748, + 0.47, + 0.78 + ], + "angle": 0, + "content": "\\[\nL _ {v e r t} = \\frac {1}{M _ {V _ {3 d}}} \\| V _ {3 d} - V _ {3 d} ^ {\\prime} \\| _ {1} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.786, + 0.471, + 0.833 + ], + "angle": 0, + "content": "Here, \\( J_{3d} \\in R^{M \\times 3} \\) represents all the ground truth points, and the symbols annotated with primes denote the predicted values. The overall loss function is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.14, + 0.848, + 0.47, + 0.865 + ], + "angle": 0, + "content": "\\[\nL = w _ {3 d} L _ {J _ {3 d}} + w _ {2 d} L _ {J _ {2 d}} + w _ {v e r t} L _ {v e r t} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Given that the primary objective of this study is mesh prediction, 2D keypoints only affect point sampling and" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.317, + 0.895, + 0.364 + ], + "angle": 0, + "content": "thus do not need to be highly accurate, we have accordingly adjusted the coefficients \\( w_{3d} \\), \\( w_{2d} \\), and \\( w_{vert} \\) to 10, 1, and 10, respectively." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.377, + 0.634, + 0.395 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.402, + 0.719, + 0.419 + ], + "angle": 0, + "content": "4.1. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.426, + 0.895, + 0.669 + ], + "angle": 0, + "content": "Our network is implemented based on Pytorch [15]. We use HRNet64[23] and FastViT-MA36 [21] as our backbones, with their initial weights pre-trained on ImageNet. We use the AdamW [8] optimizer to train our network, with a total of 100 epochs. The learning rate is initially set to 5e-4, and then adjusted to 5e-5 after 50 epochs. We train the network with eight RTX2080Ti GPUs, with a batch size of 32 per GPU. It costs 7 hours training with FastViT-MA36 backbone and 11 hours with HRNet. The features of intermediate layers are directly fed to the Token Generator without extra upsampling layer when the backbone is HRNetw64. The Mesh Regressor has three Encoder Layers, with the corresponding input token numbers being [21, 84, 336], output token numbers being [84, 336, 778], and feature dimensions being [256, 128, 64] respectively. We adopt Attention as the default token mixer, as its performance is slightly better." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.678, + 0.603, + 0.693 + ], + "angle": 0, + "content": "4.2. Datasets" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.702, + 0.894, + 0.809 + ], + "angle": 0, + "content": "Our primary experiments and analyses are conducted on the FreiHAND [28] dataset. In order to validate the generalization of our method, we also do experiments on the large-scale 3D hand-object dataset, DexYCB [1]. The FreiHAND dataset contains 130,240 training samples and 3,960 testing samples. DexYCB contains 406,888 training samples and 78,768 testing samples." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.817, + 0.686, + 0.832 + ], + "angle": 0, + "content": "4.3. Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.895, + 0.903 + ], + "angle": 0, + "content": "To evaluate the accuracy of 3D Hand Mesh Reconstruction methods, we adopt five metrics: Procrustes-aligned mean per joint position error (PA-MPJPE), Procrustes-aligned mean per vertex position error (PA-MPVPE), mean per" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "1371" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.164, + 0.088, + 0.808, + 0.291 + ], + "angle": 0, + "content": "
MethodBackbonePA-MPJPE ↓PA-MPVPE ↓F@05 ↑F@15 ↑FPS
I2L-MeshNet [13]ResNet507.47.60.6810.97372
CMR [3]ResNet506.97.00.7150.977-
I2UV [2]ResNet507.27.40.6820.973-
Tang et al. [20]ResNet506.76.70.7240.98147
MobRecon [4]DenseStack6.97.20.6940.97980
METRO [27]HRNet6.76.80.7170.98127
MeshGraphomer [11]HRNet6.36.50.7380.98324
FastMETRO [5]HRNet6.57.10.6870.98328
Deformer [25]HRNet6.26.40.7430.984-
PointHMR [7]HRNet6.16.60.7200.984-
FastViT [21]FastViT-MA366.66.70.7220.98184
OursHRNet5.86.10.7660.98633
OursFastViT-MA365.76.00.7720.98670
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.301, + 0.895, + 0.331 + ], + "angle": 0, + "content": "Table 1. Results on the FreiHAND dataset. Our results are shown in bold. “-” indicates not reported. Our results surpass all existing methods in terms of accuracy metrics." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.356, + 0.472, + 0.448 + ], + "angle": 0, + "content": "joint position error (MPJPE), mean per vertex position error (MPVPE), and F-Score. PA-MPJPE and PA-MPVPE refer to the MPJPE and MPVPE after aligning the predicted hand results with the Ground Truth using Procrustes alignment, respectively. These two metrics do not consider the impact of global rotation and scale." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.458, + 0.172, + 0.473 + ], + "angle": 0, + "content": "4.4. Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.482, + 0.47, + 0.649 + ], + "angle": 0, + "content": "Comparison with previous methods To validate our proposed modules., we adopted HRNet and FastViT-MA36 as backbones for non-real-time and real-time methods respectively, following established models [27] [11] [5] [21]. For fair comparison, we provide performance metrics without Test-Time Augmentation (TTA) and FPS without TensorRT optimization. Table 1 shows that our method, despite being slightly slower than FastViT, improves PA-MPJPE by \\(0.9\\mathrm{mm}\\). Compared to transformer-based methods, our approach demonstrates superior speed and performance, while only requiring \\(10\\%\\) of parameters, as shown in Table 2." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.649, + 0.47, + 0.693 + ], + "angle": 0, + "content": "The qualitative comparison results are shown in the figure 5. Compared to previous methods, our method produces more accurate hand reconstruction results." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.705, + 0.466, + 0.771 + ], + "angle": 0, + "content": "
Method#ParamsPA-MPJPE ↓PA-MPVPE ↓
METRO [27]102M6.76.8
MeshGraphomer [11]98M6.36.5
FastMETRO [5]25M6.57.1
Ours1.9M5.86.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.781, + 0.47, + 0.852 + ], + "angle": 0, + "content": "Table 2. Comparison of transformer-based approaches. #Params refer to the network parameters that are not included within the backbone structure of the model. Our approach not only surpasses existing benchmarks in key metrics but also achieves a parameter reduction of one to two orders of magnitude." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Evaluation on DexYCB We employed the large-scale hand-object dataset DexYCB to validate our method's ef" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.356, + 0.895, + 0.448 + ], + "angle": 0, + "content": "fectiveness and generalizability. As shown in Table 3, our model outperforms existing single-image input methods on all metrics. Significantly, we surpassed previous benchmarks by \\(1.5\\mathrm{mm}\\) and \\(0.8\\mathrm{mm}\\) on the MPJPE and MPVPE measures respectively, thereby setting new standards and demonstrating our method's broad applicability." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.46, + 0.89, + 0.553 + ], + "angle": 0, + "content": "
MethodPA-MPJPE ↓PA-MPVPE ↓MPJPE ↓MPVPE ↓
METRO [27]7.0---
Spurr et al. [19]6.8---
Liu et al. [12]6.6---
HandOccNet [14]5.85.514.013.1
MobRecon [4]6.45.614.213.1
H2ONet [24]5.75.514.013.0
Ours5.55.512.412.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.563, + 0.895, + 0.606 + ], + "angle": 0, + "content": "Table 3. Results on DexYCB. Our method shows advantages on Procrustes-Aligned metrics and surpassed the previous methods by a large margin on non-Procrustes-Aligned metrics." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.634, + 0.655, + 0.65 + ], + "angle": 0, + "content": "4.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.658, + 0.892, + 0.763 + ], + "angle": 0, + "content": "To thoroughly validate the various parameter combinations, a large number of ablation experiments were conducted. For efficiency, all ablation experiments were implemented on smaller models (e.g., Hiera-tiny). After identifying the optimal parameter combination, it is then applied to the standard models to facilitate a fair comparison with existing methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.825 + ], + "angle": 0, + "content": "The state-of-the-art backbone, Hiera-Tiny [18], is utilized in our study as a strong baseline. We conduct a series of ablation experiments on the FreiHAND dataset with the aim of examining the efficacy of the structure we propose." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Effectiveness of Our Token Generator and Mesh Regressor. In order to evaluate the effectiveness of our Token Generator and Mesh Regressor, we initially set up a standard baseline model. This model's Token Generator is constructed based on global features, while its Mesh Regres" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1372" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.088, + 0.195, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.104, + 0.805, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.211, + 0.455, + 0.757, + 0.471 + ], + "angle": 0, + "content": "Figure 5. Qualitative comparison between our method and other state-of-the-art approaches." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.496, + 0.471, + 0.618 + ], + "angle": 0, + "content": "sor is designed as a Multilayer Perceptron (MLP). We subsequently substitute these components with our proposed structures individually. The results of these experiments, detailed in Table 4, confirm that both modules, when incorporated in place of the original structures, contribute positively towards enhancing overall performance. When implemented together, these modifications lead to even further improvements." + }, + { + "type": "table", + "bbox": [ + 0.121, + 0.628, + 0.428, + 0.699 + ], + "angle": 0, + "content": "
MethodPA-MPJPE ↓PA-MPVPE ↓
Simple Baseline6.97.2
+ mesh regressor6.56.8
+ token generator6.67.1
+ both6.26.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.708, + 0.471, + 0.765 + ], + "angle": 0, + "content": "Table 4. Ablation study of our proposed modules. Each of these methods brings about enhancements when utilized individually. However, when these strategies are integrated, they yield an even more substantial improvement." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Analysing Core Structure of Token Generator. As shown in Table 5, performance using only global features is competitive. The grid sampling and point sampling on a \\(7 \\times 7\\) feature map show similar efficiencies. Increasing the resolution of the feature map to \\(28 \\times 28\\) through a single four-fold deconvolution improves performance. However, further optimization is not achieved by replacing single four-fold deconvolution with two layers of two-fold de" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.496, + 0.895, + 0.558 + ], + "angle": 0, + "content": "convolutions or adding more convolutions. Similarly, no improvement is observed when changing from point sampling to coarse mesh sampling. Qualitative comparison of different point sampling strategies is shown in Fig. 6" + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.567, + 0.89, + 0.671 + ], + "angle": 0, + "content": "
Sample MethodResolutionPA-MPJPE ↓PA-MPVPE ↓
Global1x16.56.8
Grid7x76.36.6
Point7x76.36.6
Point14x146.36.6
Point28x286.26.5
Point28 x 28 enhanced6.26.5
Coarse mesh28x286.26.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.679, + 0.895, + 0.738 + ], + "angle": 0, + "content": "Table 5. Ablation study of Our Token generator A point sample at a resolution of \\(28 \\times 28\\) achieves optimal efficiency. Contrarily, increasing the number of sampled points or incorporating additional convolutional layers do not lead to any further improvements." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Analysing the Core Structure of the Mesh Regressor. As shown in Table 6, for a single encoder layer, adding an extra encoder layer with a larger token number sharply increases performance by \\(0.3\\mathrm{mm}\\). The optimal setting consists of three encoder layers, with token numbers progressively multiplied by 4. As the layer number increases further, the marginal benefit becomes inconsequential and sometimes even decreases. Furthermore, as shown in Table 7, given a fixed set of token numbers, increasing computational complexity produces negligible differences in either" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1373" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.151 + ], + "angle": 0, + "content": "block numbers or block dimensions in the encoder layer. A middle-sized block dimensions setting is optimal. Qualitative comparison of different upsample layers is shown in Figure 7." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.168, + 0.466, + 0.272 + ], + "angle": 0, + "content": "
Layer NumsToken NumsPA-MPJPE ↓PA-MPVPE ↓
1[21]6.67.1
2[21, 256]6.36.6
2[21, 384]6.36.6
3[21, 256, 384]6.26.5
3[21, 84, 336]6.26.5
4[21, 128, 256, 384]6.26.5
4[21, 63, 126, 252]6.36.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.282, + 0.469, + 0.325 + ], + "angle": 0, + "content": "Table 6. The Number of Upsampling Layers and Corresponding Token Numbers in Encoding Layers. Three encoding layers yield optimal efficiency." + }, + { + "type": "table", + "bbox": [ + 0.122, + 0.358, + 0.428, + 0.501 + ], + "angle": 0, + "content": "
DimensionsPA-MPJPE ↓PA-MPVPE ↓
64, 32, 166.56.9
128, 64, 326.36.6
256, 128, 646.26.5
512, 256, 126.26.5
1024, 512, 2566.46.7
Block NumsPA-MPJPE ↓PA-MPVPE ↓
1, 1, 16.26.5
2, 2, 26.26.6
3, 3, 36.36.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.511, + 0.469, + 0.539 + ], + "angle": 0, + "content": "Table 7. Dimensions and block nums. Single layer blocks with middle sized dimensions are optimal." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.56, + 0.469, + 0.663 + ], + "angle": 0, + "content": "Position Encoding and Attention Mixer. We utilized position encoding layer and attention mixer during ablation experiments because they are intuitively helpful. Based on our SOTA result, we remove position encoding layers to observe a slightly \\(0.1\\mathrm{mm}\\) degrade. Similar thing happens when we substitute attention mixer to identity mixer, see Tab. 8." + }, + { + "type": "table", + "bbox": [ + 0.121, + 0.678, + 0.428, + 0.735 + ], + "angle": 0, + "content": "
MethodPA-MPJPE ↓PA-MPVPE ↓
hiera-tiny sota6.26.5
Identity mixer6.36.6
w/o position emb6.36.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.746, + 0.469, + 0.788 + ], + "angle": 0, + "content": "Table 8. Token mixer and position embedding. When substitute attention mixer to identity mixer, or remove position encoding layer, the performance dropped slightly by \\(0.1\\mathrm{mm}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Limits and Failure cases. As mentioned earlier, our work is dedicated to summarizing and abstracting from existing work. Since no targeted optimization was performed, some failure cases present in previous work remains challenging. These cases are concentrated in scenes with self-occlusion and object occlusion, see Fig. 8." + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.09, + 0.872, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.22, + 0.892, + 0.262 + ], + "angle": 0, + "content": "Figure 6. Qualitative Comparison of Different Point Sampling Strategies. The global/coarse feature fails in scenarios with detailed finger interactions, where upsampled feature works well." + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.283, + 0.871, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.414, + 0.892, + 0.469 + ], + "angle": 0, + "content": "Figure 7. Qualitative Comparison of the Number of Layers of Mesh Decoder. When constrained to one, the reconstructed mesh tends to corrupt into unnatural shapes. Performance improves as the number of layers increases." + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.49, + 0.861, + 0.589 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.6, + 0.892, + 0.656 + ], + "angle": 0, + "content": "Figure 8. Typical Failure Cases. Failure cases are concentrated in scenes with self-occlusion and object occlusion. Some are difficult to discern due to the small area of exposure, while others present ambiguities caused by the occlusion." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.688, + 0.769, + 0.703 + ], + "angle": 0, + "content": "5. Conclusion and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.716, + 0.892, + 0.821 + ], + "angle": 0, + "content": "We observed shared advantages and disadvantages of typical structures. Based on these observations, we introduce the concept of the core structure. Through experiments, we revealed that a framework with the core structure could achieve high performance with limited computational load. We evaluated our approach quantitatively and qualitatively to demonstrate its effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.901 + ], + "angle": 0, + "content": "However, our method is explicitly designed to reconstruct single hand gestures. Other scenarios, such as extreme lighting, occlusion, interactions, or out-of-distribution cases, showed no improvement over existing methods. Such cases require specifically designed methods." + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1374" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.199 + ], + "angle": 0, + "content": "[1] Yu-Wei Chao, Wei Yang, Yu Xiang, Pavlo Molchanov, Ankur Handa, Jonathan Tremblay, Yashraj S Narang, Karl Van Wyk, Umar Iqbal, Stan Birchfield, et al. Dexycb: A benchmark for capturing hand grasping of objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9044–9053, 2021. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.201, + 0.472, + 0.284 + ], + "angle": 0, + "content": "[2] Ping Chen, Yujin Chen, Dong Yang, Fangyin Wu, Qin Li, Qingpei Xia, and Yong Tan. I2uv-handnet: Image-to-uv prediction network for accurate and high-fidelity 3d hand mesh modeling. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12929–12938, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.286, + 0.472, + 0.369 + ], + "angle": 0, + "content": "[3] Xingyu Chen, Yufeng Liu, Chongyang Ma, Jianlong Chang, Huayan Wang, Tian Chen, Xiaoyan Guo, Pengfei Wan, and Wen Zheng. Camera-space hand mesh recovery via semantic aggregation and adaptive 2d-1d registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13274–13283, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.371, + 0.472, + 0.453 + ], + "angle": 0, + "content": "[4] Xingyu Chen, Yufeng Liu, Yajiao Dong, Xiong Zhang, Chongyang Ma, Yanmin Xiong, Yuan Zhang, and Xiaoyan Guo. Mobrecon: Mobile-friendly hand mesh reconstruction from monocular image. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 2, 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.457, + 0.472, + 0.498 + ], + "angle": 0, + "content": "[5] Junhyeong Cho, Kim Youwang, and Tae-Hyun Oh. Crossattention of disentangled modalities for 3d human mesh recovery with transformers. 1, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.5, + 0.472, + 0.555 + ], + "angle": 0, + "content": "[6] Hongsuk Choi, Gyeongsik Moon, and Kyoung Mu Lee. Pose2Mesh: Graph Convolutional Network for 3D Human Pose and Mesh Recovery from a 2D Human Pose, page 769-787. 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.557, + 0.472, + 0.64 + ], + "angle": 0, + "content": "[7] Jeonghwan Kim, Mi-Gyeong Gwon, Hyunwoo Park, Hyukmin Kwon, Gi-Mun Um, and Wonjun Kim. Sampling is matter: Point-guided 3d human mesh reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12880-12889, 2023. 1, 2, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.643, + 0.472, + 0.684 + ], + "angle": 0, + "content": "[8] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.686, + 0.472, + 0.728 + ], + "angle": 0, + "content": "[9] Thomas Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv: Learning, arXiv: Learning, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.73, + 0.472, + 0.785 + ], + "angle": 0, + "content": "[10] Isaak Lim, Alexander Dielen, Marcel Campen, and Leif Kobbelt. A Simple Approach to Intrinsic Correspondence Learning on Unstructured 3D Meshes, page 349-362. 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.788, + 0.472, + 0.83 + ], + "angle": 0, + "content": "[11] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In 2021 IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.832, + 0.472, + 0.902 + ], + "angle": 0, + "content": "[12] Shaowei Liu, Hanwen Jiang, Jiarui Xu, Sifei Liu, and Xiaolong Wang. Semi-supervised 3d hand-object poses estimation with interactions in time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14687-14697, 2021. 6" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.176 + ], + "angle": 0, + "content": "[13] Gyeongsik Moon and Kyoung Mu Lee. I2l-meshnet: Image-to-lixel prediction network for accurate 3d human pose and mesh estimation from a single rgb image. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VII 16, pages 752-768. Springer, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.178, + 0.894, + 0.247 + ], + "angle": 0, + "content": "[14] JoonKyu Park, Yeonguk Oh, Gyeongsik Moon, Hongsuk Choi, and Kyoung Mu Lee. Handoccnet: Occlusion-robust 3d hand mesh estimation network. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1496–1505, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.893, + 0.303 + ], + "angle": 0, + "content": "[15] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.305, + 0.893, + 0.347 + ], + "angle": 0, + "content": "[16] Anurag Ranjan, Timo Bolkart, Soubhik Sanyal, and Michael J. Black. Generating 3D faces using Convolutional Mesh Autoencoders, page 725-741. 2018. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.349, + 0.893, + 0.403 + ], + "angle": 0, + "content": "[17] Javier Romero, Dimitrios Tzionas, and Michael J. Black. Embodied hands: modeling and capturing hands and bodies together. ACM Transactions on Graphics, page 1-17, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.405, + 0.893, + 0.475 + ], + "angle": 0, + "content": "[18] Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan, Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed, Judy Hoffman, et al. Hiera: A hierarchical vision transformer without the bells-and-whistles. arXiv preprint arXiv:2306.00989, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.476, + 0.893, + 0.532 + ], + "angle": 0, + "content": "[19] Adrian Spurr, Umar Iqbal, Pavlo Molchanov, Otmar Hilliges, and Jan Kautz. Weakly supervised 3d hand pose estimation via biomechanical constraints. In European conference on computer vision, pages 211-228. Springer, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.533, + 0.893, + 0.589 + ], + "angle": 0, + "content": "[20] Xiao Tang, Tianyu Wang, and Chi-Wing Fu. Towards accurate alignment in real-time 3d hand-mesh reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11698-11707, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.59, + 0.893, + 0.645 + ], + "angle": 0, + "content": "[21] PavanKumarAnasosalu Vasu, James Gabriel, Jeff Zhu, Oncel Tuzel, and Anurag Ranjan. Fastvit: A fast hybrid vision transformer using structural reparameterization. 2023. 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.647, + 0.893, + 0.716 + ], + "angle": 0, + "content": "[22] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, AidanN. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Neural Information Processing Systems, Neural Information Processing Systems, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.718, + 0.893, + 0.8 + ], + "angle": 0, + "content": "[23] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, et al. Deep high-resolution representation learning for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 43(10):3349-3364, 2020. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.802, + 0.894, + 0.872 + ], + "angle": 0, + "content": "[24] Hao Xu, Tianyu Wang, Xiao Tang, and Chi-Wing Fu. H2onet: Hand-occlusion-and-orientation-aware network for real-time 3d hand mesh reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17048–17058, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.893, + 0.902 + ], + "angle": 0, + "content": "[25] Yusuke Yoshiyasu. Deformable mesh transformer for 3d human mesh recovery. In Proceedings of the IEEE/CVF Con" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1375" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.093, + 0.468, + 0.119 + ], + "angle": 0, + "content": "ference on Computer Vision and Pattern Recognition, pages 17006-17015, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.189 + ], + "angle": 0, + "content": "[26] Weihao Yu, Mi Luo, Pan Zhou, Chenyang Si, Yichen Zhou, Xinchao Wang, Jiashi Feng, and Shuicheng Yan. Metaformer is actually what you need for vision. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.469, + 0.247 + ], + "angle": 0, + "content": "[27] Xiong Zhang, Qiang Li, Hong Mo, Wenbo Zhang, and Wen Zheng. End-to-end hand mesh recovery from a monocular rgb image. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.25, + 0.469, + 0.318 + ], + "angle": 0, + "content": "[28] Christian Zimmermann, Duygu Ceylan, Jimei Yang, Bryan Russell, Max Argus, and Thomas Brox. Freihand: A dataset for markerless capture of hand pose and shape from single rgb images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 813-822, 2019. 2, 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.469, + 0.318 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "1376" + } + ] +] \ No newline at end of file diff --git a/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/edb8cbac-0e71-45cd-8cdb-45284f946ab7_origin.pdf b/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/edb8cbac-0e71-45cd-8cdb-45284f946ab7_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..97c44cad9fc7290b471c00646d1081ae593cde40 --- /dev/null +++ b/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/edb8cbac-0e71-45cd-8cdb-45284f946ab7_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f15743c6307029256ba3e3dade2b150f3458409b65eafc7151bd953eaf18af4 +size 4818176 diff --git a/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/full.md b/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/full.md new file mode 100644 index 0000000000000000000000000000000000000000..2a3c612293c3a8b19a44e469a6f717e0b6f5f51f --- /dev/null +++ b/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/full.md @@ -0,0 +1,302 @@ +# A Simple Baseline for Efficient Hand Mesh Reconstruction + +Zhishan Zhou*, Shihao Zhou*, Zhi Lv, Minqiang Zou, Yao Tang, Jiajun Liang† +Jiiov Technology + +{zhishan.zhou, shihao.zhou, zhi.lv, minqiang.zou, yao.tang, jiajun.liang}@jiiov.com + +http://simplehand.github.io + +# Abstract + +Hand mesh reconstruction has attracted considerable attention in recent years, with various approaches and techniques being proposed. Some of these methods incorporate complex components and designs, which, while effective, may complicate the model and hinder efficiency. In this paper, we decompose the mesh decoder into token generator and mesh regressor. Through extensive ablation experiments, we found that the token generator should select discriminating and representative points, while the mesh regressor needs to upsample sparse keypoints into dense meshes in multiple stages. Given these functionalities, we can achieve high performance with minimal computational resources. Based on this observation, we propose a simple yet effective baseline that outperforms state-of-the-art methods by a large margin, while maintaining real-time efficiency. Our method outperforms existing solutions, achieving state-of-the-art (SOTA) results across multiple datasets. On the FreiHAND dataset, our approach produced a PA-MPJPE of 5.8mm and a PA-MPVPE of 6.1mm. Similarly, on the DexYCB dataset, we observed a PA-MPJPE of 5.5mm and a PA-MPVPE of 5.5mm. As for performance speed, our method reached up to 33 frames per second (fps) when using HRNet and up to 70 fps when employing FastViT-MA36. Code will be made available. + +# 1. Introduction + +The field of hand mesh reconstruction has seen rapid advancements, with various types of mesh decoders being proposed. Despite their commendable performance, these methods often suffer from high system complexity, involving unnecessary components that may hinder efficiency. To facilitate a clear discussion, we decompose the mesh decoder into two primary components: the token generator and the mesh regressor. + +![](images/3ee37e25ed7c7f5afd0b7a6936538d9ed086bbbeea4e3a8ac57f87270d61a929.jpg) +Figure 1. Trade-off between Accuracy and Inference Speed. Our technique surpasses non-real-time methods $(\leq 40$ fps) in both speed and precision. Compared to real-time methods $(\geq 70$ fps), it offers a substantial boost in accuracy while preserving comparable speeds. For fair comparison, all speed evaluations were conducted on a 2080ti GPU with a batch size of one. + +The token generator serves a crucial role by integrating prior information with image features to extract task-specific features. For instance, FastMETRO [5] employs a strategy to predict weak-perspective camera parameters, which aggregates image features. MobRecon [4] develops a stacked encoding network to obtain gradually refined encoding features, and applies a technique known as pose pooling to suppress features that are unrelated to joint landmarks. PointHMR [7] on the other hand, proposes to use features sampled at positions of vertices projected from 3D to 2D spaces as intermediate guidance. These approaches collectively provide informative and discriminating features that enhance the overall performance of the system. + +The mesh regressor, the second component, decodes the tokenized features obtained from the token generator into mesh predictions. FastMETRO [5] takes a set of learnable joint tokens and vertex tokens as input and masks + +self-attention of non-adjacent vertices according to the topology of the triangle mesh during training. MobRecon [4] employs a strategy of 2D-to-3D lifting and Pose-to-vertex lifting to gradually approximate meshes. MeshGraphormer [11] uses a coarse template mesh for positional encoding and then applies a linear Multi-Layer Perceptron (MLP) to sample the coarse mesh up to the original resolution. These methods aim to alleviate training difficulties due to heterogeneous modalities. + +Through investigation on existing methods, we found a interesting phenomenon that, although some methods shares same performance, they differ in specific failure cases. Namely, methods with coarse sampling strategy lack perceptual ability for fine-grained gestures such as pinch. Methods with limited upsample layers struggles in generating reasonable hand shapes. This observation prompts us to question: How different structures make effect on mesh decoder? By answering the question, we can streamline the process, eliminating excessive computation and complex components, to complete mesh prediction in a simple and efficient way. To design concise experiments, we start from the simplest structure for the aforementioned two modules, then gradually add and optimize the most commonly used components abstracted from the state-of-the-art (SOTA) methods. + +Through extensive ablation experiments, we discovered that the important structure of token generator is to sample discriminating and representative points, while the important structure of mesh generator is to upsample sparse keypoints into dense meshes. For implicitly, in the following paper, we define each of these structure as core structure. In the model design process, provided that the core structure's functionality is fulfilled, high performance can be achieved with minimal computational resources. + +Based on these observations, we propose a simple baseline that surpasses the SOTA methods by a significant margin and is computationally efficient. Referring to Figure 1, our proposed technique delivers state-of-the-art performance on various datasets. On the FreiHAND [28] dataset, it recorded a PA-MPJPE of $5.8\mathrm{mm}$ and PA-MPVPE of $6.1\mathrm{mm}$ . When tested on the DexYCB [1] dataset, these metrics were further refined to a PA-MPJPE of $5.5\mathrm{mm}$ and a PA-MPVPE of $5.5\mathrm{mm}$ . Our method is also advantaged in efficiency, achieving 33 frames per second (fps) on HRNet[23] and an impressive 70 fps on FastViTMA36 [21]. Our contributions can be summarized as follows: + +1. We abstract existing methods into token generator and mesh regressor modules, and reveal the core structures of these two modules respectively. +2. Based on these core structures, we developed a streamlined, real-time hand mesh regression module that excels in both efficiency and accuracy. + +3. Our method has achieved PA-MPJPE of $5.7\mathrm{mm}$ and PA-MPVPE of $6.0\mathrm{mm}$ on FreiHAND, and achieved SOTA results on multiple datasets, demonstrating its effectiveness and generalizability. + +# 2. Related Work + +In this section, we briefly review existing methods of hand mesh reconstruction which usually include two main components: a token generator and a mesh regressor. Token generator processes the backbone image feature and generates tokens fed to the decoder. Mesh regressor decodes the input tokens into 3D mesh directly or parametric hand model coefficient. + +# 2.1. Hand Mesh Reconstruction + +Estimating the 3D hand mesh from a single image has been widely researched. [27] proposes an end-to-end framework to recover hand mesh from a monocular RGB image. They use the 2D heatmap as input tokens and fully convolutional and fully connected layers to regress the MANO [17] parameters. + +Transformer [22] has shown powerful performance in language and vision tasks which could model long range relation among input tokens. MetaFormer [26] argues that the general architecture of transformers instead of the specific token mixer is the key player. They replace the self-attention module with a simple spatial pooling operator and achieve competitive performance with fewer parameters and less computation. METRO [11] extracts a single global image feature with a convolutional neural network and performs position encoding by repeatedly concatenating the image feature with 3D coordinates of a mesh template. A multi-layer transformer encoder with progressive dimensionality reduction regresses the 3D coordinates of mesh vertices with these input tokens. Due to the constraints of memory and computation, the transformer only processes a coarse mesh by sub-sampling twice with a sampling algorithm [16], and Multi-Layer Perceptrons (MLPs) are then used to upsample the coarse mesh to the original mesh. + +Graph convolutional neural network(GCNN) [9] is good at modeling the local interaction between neighbor vertices, thus it is very appropriate for mesh reconstruction. Pose2Mesh [6] designs a cascaded architecture to regress 3D mesh vertices from 2D pose directly using GCNN. MeshGraphormer [11] combines the ability of transformer and GCNN presenting a graph-convolution-reinforced transformer to model both local and global interactions. + +Instead of extracting a global feature from the input image, pointHMR [7] argues that sampling features guided by vertex-relevant points could better utilize the correspondence between encoded features and spatial positions. They + +![](images/6a3720ecd9fe43273f61ce6d6529d5125c1f798c85e7f5a280401577a18d3ca2.jpg) +A + +![](images/6d35ca7d1d67629082f9ac3149aebeabbaae6697081343addc14ff7b7c145b08.jpg) +B + +![](images/1607683d64c667f87876a60767c40cc27dbe78cf9ce6feb39a211de33c49f849.jpg) +C +Image Feature +Convolutions + +![](images/f9c6a8993b853348f669b0126f42922d74f944f22459f59dd032e35c64de2986.jpg) +D + +![](images/b488f650baf99856152306e24e45ca446d0354ef0e2de6103a86430e7531656b.jpg) +E + +![](images/b5023cd8a8b274462102be0957e2ea11211cafea6c3895238bccf03e059cc838.jpg) +F +Figure 2. An illustration demonstrates various designs of token generators. The grids colored in red represent the sampled points. a) Global feature; b) Grid sampling; c) Keypoint-guided sampling on the original feature map; d) Keypoint-guided sampling with $4\mathrm{x}$ upsampling, resulting in an enhanced feature; e) Keypoint-guided sampling with $4\mathrm{x}$ upsampling, where the feature is further improved by convolution; f) Coarse-mesh-guided point sampling with $4\mathrm{x}$ upsampling. + +![](images/90895b0a416b190f4fd1725853a12cb7b05cf084bb787e60a120cdb1938d299f.jpg) +Coarse Mesh Predictions + +conduct feature sampling by element-wise multiplication of image feature and 2D heatmap trained by projection of 3D mesh vertices. These sampled features are then fed into the transformer encoder with progressive attention mask as the form of vertex token. The progressively decreased local connection range realized by constraining the attention mask encourage the model to consider the local relationship between neighbor vertices. They also use linear projection to reduce the dimension of the encoded token and upsampling algorithm [16] to expand the sparse vertices into original dense vertices. + +# 2.2.Lightweight Networks + +To achieve real-time hand mesh reconstruction, many lightweight networks have been studied for years. FastViT [21] is a hybrid vision transformer architecture which obtains state-of-the-art latency-accuracy tradeoff by structural reparameterization and train-time overparametrization techniques. MobRecon [4] designs multiple complicated modules to improve efficiency, including a stacked 2D encoding structure, a map-based position regression 2D-to-3D block and a graph operator based on spiral sampling [10]. FastMETRO [5] identifies the performance bottleneck of encoder-based transformers is caused by token design. They propose an encoder-decoder architecture to disentangle the interaction among input tokens which reduces the parameter. + +# 3. Method + +In our research, we dissected the existing methods into two key components: a token generator and a mesh regressor. However, defining the optimal core structure for each of + +these modules remains a challenging task. For each module, we start with a fundamental, intuitive structure, and then progressively incorporate the most commonly used components, which we have abstracted from state-of-the-art (SOTA) methods. + +Given that these two modules, the token generator and the mesh regressor, operate in tandem, it's important to keep one constant when analysing the other. In practical terms, we first conduct experiments on the mesh regressor while keeping the token generator, as depicted in Figure 2-B, constant. Then, we apply the mesh regressor configuration that demonstrated the best performance to the token generator in subsequent experiments. + +# 3.1. Token Generator + +Given a single image of dimensions $\{H,W\}$ , our model utilizes a backbone to extract image features $X_{b}^{\in \frac{H}{32} \times \frac{W}{32} \times C}$ . The token generator $T$ takes $X_{b}$ as input and produces tokenized mesh feature $X_{m}^{\in N \times C}$ , where $N$ denotes the number of sampled points. Thus, we can express this as $X_{m} = T(X_{b})$ . + +Starting with the simplest implementation, we apply a single spatial pooling (Figure 2-A). This approach establishes a surprisingly competitive baseline, comparable to the Fastmetro [5]. Changing spatial pooling to point sample (Figure 2-B) improves the performance. To further improve the quality of the feature, we follow the MobRecon [4] model and conduct keypoint-guided point sampling (Figure 2-C). However, this modification did not yield any noticeable improvement. + +Upon visual inspection, it appears that a $7 \times 7$ resolution is not sufficiently discriminating. Consequently, we apply + +deconvolution on $X_{b}$ to sample the feature map to $14 \times 14$ then $28 \times 28$ (Figure 2-D), respectively. This approach results in progressive improvement, but it does not work for $8 \times$ deconvolution or larger. + +Models such as MobRecon [4] and PointHMR [7] report improvements by enhancing features, for example, using a FPN-like structure or stacked blocks. In our study, we tested different $4 \times$ upsample schemes, including double $2 \times$ upsampling, directly $4 \times$ upsampling, and adding more convolution layers during the upsampling process (Figure 2-E). Although these schemes vary in computational complexity, their performance remains consistent. + +We also tested the coarse mesh sampling method proposed by FastMETRO [5]. This method (Figure 2-F) generates denser points compared to keypoint-guided sampling but does not offer any significant advantages. Detailed results are shown in table 5. These experiments suggest that keypoint-guided point sampling at an appropriate resolution is a crucial structure for the token generator. As such, feature enhancement and exhaustive point sampling are not as necessary as initially thought. + +# 3.2. Mesh Regressor + +The mesh regressor $R$ takes the tokenized mesh feature $X_{m}^{\in N\times C}$ as input and outputs predicted meshes Figure 3. [5] [4] adopts a multi-stage approximation approach and proposes various methods to formulate the topology relationship between joints and mesh. Finding their intersection components, we construct a cascading upsampling mesh regressor $R$ using a series of decoder layers: + +$$ +R = H _ {k} H _ {k - 1} \dots H _ {0} \tag {1} +$$ + +Each decoder layer $H_{k}$ takes the calculated tokens $T_{k}$ as input, then subsequently processes these using a dimension reduce layer, metaformer, and upsample layer: + +$$ +H _ {k} \left(X _ {k}\right) = U _ {k} \left(M F _ {k} \left(P _ {k} \left(X _ {k}\right)\right)\right) \tag {2} +$$ + +where $U_{k}$ , $P_{k}$ denotes the $k_{th}$ upsample layer and dimension reduce layer respectively, each composed of a single-layer MLP. The upsample layer increases token numbers, while the dimension reduce layer modifies channel shapes. $MF_{k}$ denotes the $k_{th}$ metaformer block, $T_{k}$ is the $k_{th}$ output token where $X_{k + 1} = H_{k}(X_{k})$ , $X_0 = X_m$ . + +Let $d_{k}$ be the output dimension of $U_{k}$ and token numbers of $MF_{k}$ , $n_{k}, c_{k}$ and $m_{k}$ are the block number, tokenmixer and block dimensions for $MF_{k}$ . We start with the first layer $MF_{0}$ to demonstrate its operation. For the $N \times C$ shaped tensor $T_{0}$ , $P_{0}$ projects it to $N \times c$ , which is then processed by $MF_{0}$ and outputs a tensor of the same shape. Subsequently, $U_{k}$ upsamples it to $d \times c$ . The following decoder layers repeat this procedure to output $X_{k}$ . + +![](images/20bc45f111a15b135439c8fb106e664e46cf845dd2b2e73932e6dacaf2d9fa1f.jpg) +Figure 3. Architecture of decoder layer in mesh regressor. It is composed of sequentially connected dimension reduce layer, metaformer block and upsample layer. + +We began from a baseline where $\{k = 1, n = \{1\}, d = \{778\}, m = \{\text{identity}\}\}$ , which yielded competitive performance despite its simplicity. We then increase flops by enlarge $n$ but observe no improvement. Inspired by [4], We sequentially add blocks with an increasing value of $d$ . When $k \leq 3$ , Significant performance improvements are observed. However, as $k$ continues to increase beyond this point, no further gains are detected. Moreover, Modifying the token mixer from ide to attn also beneficial. However, for fixed $d$ , simply increasing $n$ did not improve performance. According to our experimental findings, the core function of each decoder layer is to incrementally elevate the number of tokens from an initial quantity of 21 up to 778. Additional strategies like augmenting computational workload or altering intricate specifics of the network appear to have minimal impact. In our best practice, parameters were set to $\{k = 3, n = \{1, 1, 1\}, d = \{21, 84, 336\}, m = \{\text{attn}, \text{attn}, \text{attn}\}\}$ . + +Existing hand joints and mesh topology modulation approaches stand out due to their ability to incorporate spatial information. However, their heuristic design is heavily reliant on hyperparameters and can be labor-intensive. Recognizing these strengths, we propose a novel method that modulates spatial relations without requiring manual design or additional computational resources. We achieve this by introducing learnable position embedding parameters $emb_{k}$ to each output tensor $X_{k}$ where + +$$ +X _ {k} = X _ {k} + e m b _ {k} \tag {3} +$$ + +# 3.3. Framework Design + +As discussed above, the image feature extracted by the backbone is sequentially processed by both the token generator and the mesh regressor. The overall framework can be simply computed by $R(T(X_b))$ . + +The core structures form the basis of the overall structure, which is depicted in Figure 4. Given an input image of + +![](images/8edb852e0115beeab0c0bc7e7aeb837e5e357b5ae47ed067234abb1996615d81.jpg) +Figure 4. Overview of our architecture. The architecture of our model proceeds as follows: Firstly, the image feature $X_{b}$ is extracted via a backbone network. These features are then passed to our token generator module, responsible for predicting 2D keypoints and performing point sampling on the upsampled feature map, thus generating joint tokens. Next, these joint tokens are input into our mesh regressor module, which carries out the mesh prediction to get the final coordinates. + +size $\{H, W\}$ , we conduct point sampling guided by the predicted 21 keypoints at a resolution of $H/8$ , $W/8$ . For image classification style backbones like Fast-ViT, we apply a 4x upsample deconvolution to its final layer. However, for segmentation style backbones like HRNet, we directly use the feature on the corresponding resolution. In the mesh regressor, we apply position encoding before each MetaFormer block. Although this is not regarded as a core structure, it serves as a beneficial addition. + +# 3.4. Loss Functions + +The method proposed in this paper is trained with supervision for vertices, 3D joints, and 2D joints. In our implementation, both the 2D joints, denoted as $J_{2d}$ , and the vertices, denoted as $V_{3d}$ , are directly predicted by the model's output. The 3D joints, represented as $J_{3d}'$ , are calculated using the equation $J_{3d} = J \times V_{3d}$ , where $J$ signifies the regression matrix. All of these components utilize the L1 loss to compute the discrepancy between the ground truth and the predictions. The losses for the vertex, 3D joint, and 2D joint, denoted as $L_{vert}$ , $L_{J_{3d}}$ , and $L_{J_{2d}}$ , are respectively formulated as follows: + +$$ +L _ {J _ {3 d}} = \frac {1}{M _ {J _ {3 d}}} \| J _ {3 d} - J _ {3 d} ^ {\prime} \| _ {1} \tag {4} +$$ + +$$ +L _ {J _ {2 d}} = \frac {1}{M _ {J _ {2 d}}} \| J _ {2 d} - J _ {2 d} ^ {\prime} \| _ {1} \tag {5} +$$ + +$$ +L _ {v e r t} = \frac {1}{M _ {V _ {3 d}}} \| V _ {3 d} - V _ {3 d} ^ {\prime} \| _ {1} \tag {6} +$$ + +Here, $J_{3d} \in R^{M \times 3}$ represents all the ground truth points, and the symbols annotated with primes denote the predicted values. The overall loss function is defined as: + +$$ +L = w _ {3 d} L _ {J _ {3 d}} + w _ {2 d} L _ {J _ {2 d}} + w _ {v e r t} L _ {v e r t} \tag {7} +$$ + +Given that the primary objective of this study is mesh prediction, 2D keypoints only affect point sampling and + +thus do not need to be highly accurate, we have accordingly adjusted the coefficients $w_{3d}$ , $w_{2d}$ , and $w_{vert}$ to 10, 1, and 10, respectively. + +# 4. Experiments + +# 4.1. Implementation Details + +Our network is implemented based on Pytorch [15]. We use HRNet64[23] and FastViT-MA36 [21] as our backbones, with their initial weights pre-trained on ImageNet. We use the AdamW [8] optimizer to train our network, with a total of 100 epochs. The learning rate is initially set to 5e-4, and then adjusted to 5e-5 after 50 epochs. We train the network with eight RTX2080Ti GPUs, with a batch size of 32 per GPU. It costs 7 hours training with FastViT-MA36 backbone and 11 hours with HRNet. The features of intermediate layers are directly fed to the Token Generator without extra upsampling layer when the backbone is HRNetw64. The Mesh Regressor has three Encoder Layers, with the corresponding input token numbers being [21, 84, 336], output token numbers being [84, 336, 778], and feature dimensions being [256, 128, 64] respectively. We adopt Attention as the default token mixer, as its performance is slightly better. + +# 4.2. Datasets + +Our primary experiments and analyses are conducted on the FreiHAND [28] dataset. In order to validate the generalization of our method, we also do experiments on the large-scale 3D hand-object dataset, DexYCB [1]. The FreiHAND dataset contains 130,240 training samples and 3,960 testing samples. DexYCB contains 406,888 training samples and 78,768 testing samples. + +# 4.3. Evaluation Metrics + +To evaluate the accuracy of 3D Hand Mesh Reconstruction methods, we adopt five metrics: Procrustes-aligned mean per joint position error (PA-MPJPE), Procrustes-aligned mean per vertex position error (PA-MPVPE), mean per + +
MethodBackbonePA-MPJPE ↓PA-MPVPE ↓F@05 ↑F@15 ↑FPS
I2L-MeshNet [13]ResNet507.47.60.6810.97372
CMR [3]ResNet506.97.00.7150.977-
I2UV [2]ResNet507.27.40.6820.973-
Tang et al. [20]ResNet506.76.70.7240.98147
MobRecon [4]DenseStack6.97.20.6940.97980
METRO [27]HRNet6.76.80.7170.98127
MeshGraphomer [11]HRNet6.36.50.7380.98324
FastMETRO [5]HRNet6.57.10.6870.98328
Deformer [25]HRNet6.26.40.7430.984-
PointHMR [7]HRNet6.16.60.7200.984-
FastViT [21]FastViT-MA366.66.70.7220.98184
OursHRNet5.86.10.7660.98633
OursFastViT-MA365.76.00.7720.98670
+ +joint position error (MPJPE), mean per vertex position error (MPVPE), and F-Score. PA-MPJPE and PA-MPVPE refer to the MPJPE and MPVPE after aligning the predicted hand results with the Ground Truth using Procrustes alignment, respectively. These two metrics do not consider the impact of global rotation and scale. + +# 4.4. Results + +Comparison with previous methods To validate our proposed modules., we adopted HRNet and FastViT-MA36 as backbones for non-real-time and real-time methods respectively, following established models [27] [11] [5] [21]. For fair comparison, we provide performance metrics without Test-Time Augmentation (TTA) and FPS without TensorRT optimization. Table 1 shows that our method, despite being slightly slower than FastViT, improves PA-MPJPE by $0.9\mathrm{mm}$ . Compared to transformer-based methods, our approach demonstrates superior speed and performance, while only requiring $10\%$ of parameters, as shown in Table 2. + +The qualitative comparison results are shown in the figure 5. Compared to previous methods, our method produces more accurate hand reconstruction results. + +Table 1. Results on the FreiHAND dataset. Our results are shown in bold. “-” indicates not reported. Our results surpass all existing methods in terms of accuracy metrics. + +
Method#ParamsPA-MPJPE ↓PA-MPVPE ↓
METRO [27]102M6.76.8
MeshGraphomer [11]98M6.36.5
FastMETRO [5]25M6.57.1
Ours1.9M5.86.1
+ +Evaluation on DexYCB We employed the large-scale hand-object dataset DexYCB to validate our method's ef + +fectiveness and generalizability. As shown in Table 3, our model outperforms existing single-image input methods on all metrics. Significantly, we surpassed previous benchmarks by $1.5\mathrm{mm}$ and $0.8\mathrm{mm}$ on the MPJPE and MPVPE measures respectively, thereby setting new standards and demonstrating our method's broad applicability. + +Table 2. Comparison of transformer-based approaches. #Params refer to the network parameters that are not included within the backbone structure of the model. Our approach not only surpasses existing benchmarks in key metrics but also achieves a parameter reduction of one to two orders of magnitude. + +
MethodPA-MPJPE ↓PA-MPVPE ↓MPJPE ↓MPVPE ↓
METRO [27]7.0---
Spurr et al. [19]6.8---
Liu et al. [12]6.6---
HandOccNet [14]5.85.514.013.1
MobRecon [4]6.45.614.213.1
H2ONet [24]5.75.514.013.0
Ours5.55.512.412.1
+ +Table 3. Results on DexYCB. Our method shows advantages on Procrustes-Aligned metrics and surpassed the previous methods by a large margin on non-Procrustes-Aligned metrics. + +# 4.5. Ablation Study + +To thoroughly validate the various parameter combinations, a large number of ablation experiments were conducted. For efficiency, all ablation experiments were implemented on smaller models (e.g., Hiera-tiny). After identifying the optimal parameter combination, it is then applied to the standard models to facilitate a fair comparison with existing methods. + +The state-of-the-art backbone, Hiera-Tiny [18], is utilized in our study as a strong baseline. We conduct a series of ablation experiments on the FreiHAND dataset with the aim of examining the efficacy of the structure we propose. + +Effectiveness of Our Token Generator and Mesh Regressor. In order to evaluate the effectiveness of our Token Generator and Mesh Regressor, we initially set up a standard baseline model. This model's Token Generator is constructed based on global features, while its Mesh Regres + +![](images/f44ac2e201fb60a95408b2b9992d075ef5af700aff0b82debf429c81e83c67a5.jpg) +Figure 5. Qualitative comparison between our method and other state-of-the-art approaches. + +![](images/b7830c556ed21659605f4aeeea3f861caaccc100d64b367eb9e91a2f246b9101.jpg) + +sor is designed as a Multilayer Perceptron (MLP). We subsequently substitute these components with our proposed structures individually. The results of these experiments, detailed in Table 4, confirm that both modules, when incorporated in place of the original structures, contribute positively towards enhancing overall performance. When implemented together, these modifications lead to even further improvements. + +
MethodPA-MPJPE ↓PA-MPVPE ↓
Simple Baseline6.97.2
+ mesh regressor6.56.8
+ token generator6.67.1
+ both6.26.5
+ +Analysing Core Structure of Token Generator. As shown in Table 5, performance using only global features is competitive. The grid sampling and point sampling on a $7 \times 7$ feature map show similar efficiencies. Increasing the resolution of the feature map to $28 \times 28$ through a single four-fold deconvolution improves performance. However, further optimization is not achieved by replacing single four-fold deconvolution with two layers of two-fold de + +convolutions or adding more convolutions. Similarly, no improvement is observed when changing from point sampling to coarse mesh sampling. Qualitative comparison of different point sampling strategies is shown in Fig. 6 + +Table 4. Ablation study of our proposed modules. Each of these methods brings about enhancements when utilized individually. However, when these strategies are integrated, they yield an even more substantial improvement. + +
Sample MethodResolutionPA-MPJPE ↓PA-MPVPE ↓
Global1x16.56.8
Grid7x76.36.6
Point7x76.36.6
Point14x146.36.6
Point28x286.26.5
Point28 x 28 enhanced6.26.5
Coarse mesh28x286.26.5
+ +Table 5. Ablation study of Our Token generator A point sample at a resolution of $28 \times 28$ achieves optimal efficiency. Contrarily, increasing the number of sampled points or incorporating additional convolutional layers do not lead to any further improvements. + +Analysing the Core Structure of the Mesh Regressor. As shown in Table 6, for a single encoder layer, adding an extra encoder layer with a larger token number sharply increases performance by $0.3\mathrm{mm}$ . The optimal setting consists of three encoder layers, with token numbers progressively multiplied by 4. As the layer number increases further, the marginal benefit becomes inconsequential and sometimes even decreases. Furthermore, as shown in Table 7, given a fixed set of token numbers, increasing computational complexity produces negligible differences in either + +block numbers or block dimensions in the encoder layer. A middle-sized block dimensions setting is optimal. Qualitative comparison of different upsample layers is shown in Figure 7. + +
Layer NumsToken NumsPA-MPJPE ↓PA-MPVPE ↓
1[21]6.67.1
2[21, 256]6.36.6
2[21, 384]6.36.6
3[21, 256, 384]6.26.5
3[21, 84, 336]6.26.5
4[21, 128, 256, 384]6.26.5
4[21, 63, 126, 252]6.36.6
+ +Table 6. The Number of Upsampling Layers and Corresponding Token Numbers in Encoding Layers. Three encoding layers yield optimal efficiency. + +
DimensionsPA-MPJPE ↓PA-MPVPE ↓
64, 32, 166.56.9
128, 64, 326.36.6
256, 128, 646.26.5
512, 256, 126.26.5
1024, 512, 2566.46.7
Block NumsPA-MPJPE ↓PA-MPVPE ↓
1, 1, 16.26.5
2, 2, 26.26.6
3, 3, 36.36.6
+ +Position Encoding and Attention Mixer. We utilized position encoding layer and attention mixer during ablation experiments because they are intuitively helpful. Based on our SOTA result, we remove position encoding layers to observe a slightly $0.1\mathrm{mm}$ degrade. Similar thing happens when we substitute attention mixer to identity mixer, see Tab. 8. + +Table 7. Dimensions and block nums. Single layer blocks with middle sized dimensions are optimal. + +
MethodPA-MPJPE ↓PA-MPVPE ↓
hiera-tiny sota6.26.5
Identity mixer6.36.6
w/o position emb6.36.6
+ +Table 8. Token mixer and position embedding. When substitute attention mixer to identity mixer, or remove position encoding layer, the performance dropped slightly by $0.1\mathrm{mm}$ . + +Limits and Failure cases. As mentioned earlier, our work is dedicated to summarizing and abstracting from existing work. Since no targeted optimization was performed, some failure cases present in previous work remains challenging. These cases are concentrated in scenes with self-occlusion and object occlusion, see Fig. 8. + +![](images/7b772d9a2614a343087179c363d7891d4ede3c368a87e7cd9d24fd6f7887265c.jpg) +Figure 6. Qualitative Comparison of Different Point Sampling Strategies. The global/coarse feature fails in scenarios with detailed finger interactions, where upsampled feature works well. + +![](images/cae26c4b995200f60e2eb3c7a96e17ee4903d8ba1ca56f7e89cd8c14d4972b56.jpg) +Figure 7. Qualitative Comparison of the Number of Layers of Mesh Decoder. When constrained to one, the reconstructed mesh tends to corrupt into unnatural shapes. Performance improves as the number of layers increases. + +![](images/18513b34b4a664abb9520fb38878b2eb1f5417ff247a00fd2c1646e9eaeb2307.jpg) +Figure 8. Typical Failure Cases. Failure cases are concentrated in scenes with self-occlusion and object occlusion. Some are difficult to discern due to the small area of exposure, while others present ambiguities caused by the occlusion. + +# 5. Conclusion and Future Work + +We observed shared advantages and disadvantages of typical structures. Based on these observations, we introduce the concept of the core structure. Through experiments, we revealed that a framework with the core structure could achieve high performance with limited computational load. We evaluated our approach quantitatively and qualitatively to demonstrate its effectiveness. + +However, our method is explicitly designed to reconstruct single hand gestures. Other scenarios, such as extreme lighting, occlusion, interactions, or out-of-distribution cases, showed no improvement over existing methods. Such cases require specifically designed methods. + +# References + +[1] Yu-Wei Chao, Wei Yang, Yu Xiang, Pavlo Molchanov, Ankur Handa, Jonathan Tremblay, Yashraj S Narang, Karl Van Wyk, Umar Iqbal, Stan Birchfield, et al. Dexycb: A benchmark for capturing hand grasping of objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9044–9053, 2021. 2, 5 +[2] Ping Chen, Yujin Chen, Dong Yang, Fangyin Wu, Qin Li, Qingpei Xia, and Yong Tan. I2uv-handnet: Image-to-uv prediction network for accurate and high-fidelity 3d hand mesh modeling. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12929–12938, 2021. 6 +[3] Xingyu Chen, Yufeng Liu, Chongyang Ma, Jianlong Chang, Huayan Wang, Tian Chen, Xiaoyan Guo, Pengfei Wan, and Wen Zheng. Camera-space hand mesh recovery via semantic aggregation and adaptive 2d-1d registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13274–13283, 2021. 6 +[4] Xingyu Chen, Yufeng Liu, Yajiao Dong, Xiong Zhang, Chongyang Ma, Yanmin Xiong, Yuan Zhang, and Xiaoyan Guo. Mobrecon: Mobile-friendly hand mesh reconstruction from monocular image. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 2, 3, 4, 6, 7 +[5] Junhyeong Cho, Kim Youwang, and Tae-Hyun Oh. Crossattention of disentangled modalities for 3d human mesh recovery with transformers. 1, 3, 4, 6 +[6] Hongsuk Choi, Gyeongsik Moon, and Kyoung Mu Lee. Pose2Mesh: Graph Convolutional Network for 3D Human Pose and Mesh Recovery from a 2D Human Pose, page 769-787. 2020. 2 +[7] Jeonghwan Kim, Mi-Gyeong Gwon, Hyunwoo Park, Hyukmin Kwon, Gi-Mun Um, and Wonjun Kim. Sampling is matter: Point-guided 3d human mesh reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12880-12889, 2023. 1, 2, 4, 6 +[8] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5 +[9] Thomas Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv: Learning, arXiv: Learning, 2016. 2 +[10] Isaak Lim, Alexander Dielen, Marcel Campen, and Leif Kobbelt. A Simple Approach to Intrinsic Correspondence Learning on Unstructured 3D Meshes, page 349-362. 2019. 3 +[11] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In 2021 IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 2, 6, 7 +[12] Shaowei Liu, Hanwen Jiang, Jiarui Xu, Sifei Liu, and Xiaolong Wang. Semi-supervised 3d hand-object poses estimation with interactions in time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14687-14697, 2021. 6 + +[13] Gyeongsik Moon and Kyoung Mu Lee. I2l-meshnet: Image-to-lixel prediction network for accurate 3d human pose and mesh estimation from a single rgb image. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VII 16, pages 752-768. Springer, 2020. 6 +[14] JoonKyu Park, Yeonguk Oh, Gyeongsik Moon, Hongsuk Choi, and Kyoung Mu Lee. Handoccnet: Occlusion-robust 3d hand mesh estimation network. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1496–1505, 2022. 6 +[15] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017. 5 +[16] Anurag Ranjan, Timo Bolkart, Soubhik Sanyal, and Michael J. Black. Generating 3D faces using Convolutional Mesh Autoencoders, page 725-741. 2018. 2, 3 +[17] Javier Romero, Dimitrios Tzionas, and Michael J. Black. Embodied hands: modeling and capturing hands and bodies together. ACM Transactions on Graphics, page 1-17, 2017. 2 +[18] Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan, Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed, Judy Hoffman, et al. Hiera: A hierarchical vision transformer without the bells-and-whistles. arXiv preprint arXiv:2306.00989, 2023. 6 +[19] Adrian Spurr, Umar Iqbal, Pavlo Molchanov, Otmar Hilliges, and Jan Kautz. Weakly supervised 3d hand pose estimation via biomechanical constraints. In European conference on computer vision, pages 211-228. Springer, 2020. 6 +[20] Xiao Tang, Tianyu Wang, and Chi-Wing Fu. Towards accurate alignment in real-time 3d hand-mesh reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11698-11707, 2021. 6 +[21] PavanKumarAnasosalu Vasu, James Gabriel, Jeff Zhu, Oncel Tuzel, and Anurag Ranjan. Fastvit: A fast hybrid vision transformer using structural reparameterization. 2023. 2, 3, 5, 6 +[22] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, AidanN. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Neural Information Processing Systems, Neural Information Processing Systems, 2017. 2 +[23] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, et al. Deep high-resolution representation learning for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 43(10):3349-3364, 2020. 2, 5 +[24] Hao Xu, Tianyu Wang, Xiao Tang, and Chi-Wing Fu. H2onet: Hand-occlusion-and-orientation-aware network for real-time 3d hand mesh reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17048–17058, 2023. 6 +[25] Yusuke Yoshiyasu. Deformable mesh transformer for 3d human mesh recovery. In Proceedings of the IEEE/CVF Con + +ference on Computer Vision and Pattern Recognition, pages 17006-17015, 2023. 6 +[26] Weihao Yu, Mi Luo, Pan Zhou, Chenyang Si, Yichen Zhou, Xinchao Wang, Jiashi Feng, and Shuicheng Yan. Metaformer is actually what you need for vision. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[27] Xiong Zhang, Qiang Li, Hong Mo, Wenbo Zhang, and Wen Zheng. End-to-end hand mesh recovery from a monocular rgb image. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 2, 6, 7 +[28] Christian Zimmermann, Duygu Ceylan, Jimei Yang, Bryan Russell, Max Argus, and Thomas Brox. Freihand: A dataset for markerless capture of hand pose and shape from single rgb images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 813-822, 2019. 2, 5 \ No newline at end of file diff --git a/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/images.zip b/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..7a2d420fef2fd8abb7b8235cb072d4264e46bf4b --- /dev/null +++ b/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c123fb5f82158ce6aea7ea3162365f7d524c086cedf4f4895b070a550b618d10 +size 450187 diff --git a/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/layout.json b/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0394b254943488f45a2443d8e738b9e2e8e2072c --- /dev/null +++ b/2024/A Simple Baseline for Efficient Hand Mesh Reconstruction/layout.json @@ -0,0 +1,8210 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 115, + 103, + 478, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 103, + 478, + 121 + ], + "spans": [ + { + "bbox": [ + 115, + 103, + 478, + 121 + ], + "type": "text", + "content": "A Simple Baseline for Efficient Hand Mesh Reconstruction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 102, + 142, + 491, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 142, + 491, + 172 + ], + "spans": [ + { + "bbox": [ + 102, + 142, + 491, + 172 + ], + "type": "text", + "content": "Zhishan Zhou*, Shihao Zhou*, Zhi Lv, Minqiang Zou, Yao Tang, Jiajun Liang† \nJiiov Technology" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 174, + 523, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 174, + 523, + 186 + ], + "spans": [ + { + "bbox": [ + 70, + 174, + 523, + 186 + ], + "type": "text", + "content": "{zhishan.zhou, shihao.zhou, zhi.lv, minqiang.zou, yao.tang, jiajun.liang}@jiiov.com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 228, + 200, + 361, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 200, + 361, + 213 + ], + "spans": [ + { + "bbox": [ + 228, + 200, + 361, + 213 + ], + "type": "text", + "content": "http://simplehand.github.io" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 162, + 241, + 209, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 241, + 209, + 254 + ], + "spans": [ + { + "bbox": [ + 162, + 241, + 209, + 254 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 266, + 290, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 266, + 290, + 553 + ], + "spans": [ + { + "bbox": [ + 47, + 266, + 290, + 553 + ], + "type": "text", + "content": "Hand mesh reconstruction has attracted considerable attention in recent years, with various approaches and techniques being proposed. Some of these methods incorporate complex components and designs, which, while effective, may complicate the model and hinder efficiency. In this paper, we decompose the mesh decoder into token generator and mesh regressor. Through extensive ablation experiments, we found that the token generator should select discriminating and representative points, while the mesh regressor needs to upsample sparse keypoints into dense meshes in multiple stages. Given these functionalities, we can achieve high performance with minimal computational resources. Based on this observation, we propose a simple yet effective baseline that outperforms state-of-the-art methods by a large margin, while maintaining real-time efficiency. Our method outperforms existing solutions, achieving state-of-the-art (SOTA) results across multiple datasets. On the FreiHAND dataset, our approach produced a PA-MPJPE of 5.8mm and a PA-MPVPE of 6.1mm. Similarly, on the DexYCB dataset, we observed a PA-MPJPE of 5.5mm and a PA-MPVPE of 5.5mm. As for performance speed, our method reached up to 33 frames per second (fps) when using HRNet and up to 70 fps when employing FastViT-MA36. Code will be made available." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 577, + 128, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 577, + 128, + 590 + ], + "spans": [ + { + "bbox": [ + 47, + 577, + 128, + 590 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 597, + 287, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 597, + 287, + 694 + ], + "spans": [ + { + "bbox": [ + 46, + 597, + 287, + 694 + ], + "type": "text", + "content": "The field of hand mesh reconstruction has seen rapid advancements, with various types of mesh decoders being proposed. Despite their commendable performance, these methods often suffer from high system complexity, involving unnecessary components that may hinder efficiency. To facilitate a clear discussion, we decompose the mesh decoder into two primary components: the token generator and the mesh regressor." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 306, + 247, + 550, + 405 + ], + "blocks": [ + { + "bbox": [ + 306, + 247, + 550, + 405 + ], + "lines": [ + { + "bbox": [ + 306, + 247, + 550, + 405 + ], + "spans": [ + { + "bbox": [ + 306, + 247, + 550, + 405 + ], + "type": "image", + "image_path": "3ee37e25ed7c7f5afd0b7a6936538d9ed086bbbeea4e3a8ac57f87270d61a929.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 415, + 547, + 481 + ], + "lines": [ + { + "bbox": [ + 304, + 415, + 547, + 481 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 547, + 481 + ], + "type": "text", + "content": "Figure 1. Trade-off between Accuracy and Inference Speed. Our technique surpasses non-real-time methods " + }, + { + "bbox": [ + 304, + 415, + 547, + 481 + ], + "type": "inline_equation", + "content": "(\\leq 40" + }, + { + "bbox": [ + 304, + 415, + 547, + 481 + ], + "type": "text", + "content": " fps) in both speed and precision. Compared to real-time methods " + }, + { + "bbox": [ + 304, + 415, + 547, + 481 + ], + "type": "inline_equation", + "content": "(\\geq 70" + }, + { + "bbox": [ + 304, + 415, + 547, + 481 + ], + "type": "text", + "content": " fps), it offers a substantial boost in accuracy while preserving comparable speeds. For fair comparison, all speed evaluations were conducted on a 2080ti GPU with a batch size of one." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 507, + 547, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 507, + 547, + 663 + ], + "spans": [ + { + "bbox": [ + 304, + 507, + 547, + 663 + ], + "type": "text", + "content": "The token generator serves a crucial role by integrating prior information with image features to extract task-specific features. For instance, FastMETRO [5] employs a strategy to predict weak-perspective camera parameters, which aggregates image features. MobRecon [4] develops a stacked encoding network to obtain gradually refined encoding features, and applies a technique known as pose pooling to suppress features that are unrelated to joint landmarks. PointHMR [7] on the other hand, proposes to use features sampled at positions of vertices projected from 3D to 2D spaces as intermediate guidance. These approaches collectively provide informative and discriminating features that enhance the overall performance of the system." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": "The mesh regressor, the second component, decodes the tokenized features obtained from the token generator into mesh predictions. FastMETRO [5] takes a set of learnable joint tokens and vertex tokens as input and masks" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 65, + 702, + 222, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 702, + 222, + 713 + ], + "spans": [ + { + "bbox": [ + 65, + 702, + 222, + 713 + ], + "type": "text", + "content": "* Equally contribution. † Corresponding author." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "1367" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": "self-attention of non-adjacent vertices according to the topology of the triangle mesh during training. MobRecon [4] employs a strategy of 2D-to-3D lifting and Pose-to-vertex lifting to gradually approximate meshes. MeshGraphormer [11] uses a coarse template mesh for positional encoding and then applies a linear Multi-Layer Perceptron (MLP) to sample the coarse mesh up to the original resolution. These methods aim to alleviate training difficulties due to heterogeneous modalities." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 181, + 289, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 181, + 289, + 373 + ], + "spans": [ + { + "bbox": [ + 46, + 181, + 289, + 373 + ], + "type": "text", + "content": "Through investigation on existing methods, we found a interesting phenomenon that, although some methods shares same performance, they differ in specific failure cases. Namely, methods with coarse sampling strategy lack perceptual ability for fine-grained gestures such as pinch. Methods with limited upsample layers struggles in generating reasonable hand shapes. This observation prompts us to question: How different structures make effect on mesh decoder? By answering the question, we can streamline the process, eliminating excessive computation and complex components, to complete mesh prediction in a simple and efficient way. To design concise experiments, we start from the simplest structure for the aforementioned two modules, then gradually add and optimize the most commonly used components abstracted from the state-of-the-art (SOTA) methods." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 373, + 289, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 373, + 289, + 481 + ], + "spans": [ + { + "bbox": [ + 46, + 373, + 289, + 481 + ], + "type": "text", + "content": "Through extensive ablation experiments, we discovered that the important structure of token generator is to sample discriminating and representative points, while the important structure of mesh generator is to upsample sparse keypoints into dense meshes. For implicitly, in the following paper, we define each of these structure as core structure. In the model design process, provided that the core structure's functionality is fulfilled, high performance can be achieved with minimal computational resources." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 482, + 289, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 482, + 289, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 482, + 289, + 639 + ], + "type": "text", + "content": "Based on these observations, we propose a simple baseline that surpasses the SOTA methods by a significant margin and is computationally efficient. Referring to Figure 1, our proposed technique delivers state-of-the-art performance on various datasets. On the FreiHAND [28] dataset, it recorded a PA-MPJPE of " + }, + { + "bbox": [ + 46, + 482, + 289, + 639 + ], + "type": "inline_equation", + "content": "5.8\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 482, + 289, + 639 + ], + "type": "text", + "content": " and PA-MPVPE of " + }, + { + "bbox": [ + 46, + 482, + 289, + 639 + ], + "type": "inline_equation", + "content": "6.1\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 482, + 289, + 639 + ], + "type": "text", + "content": ". When tested on the DexYCB [1] dataset, these metrics were further refined to a PA-MPJPE of " + }, + { + "bbox": [ + 46, + 482, + 289, + 639 + ], + "type": "inline_equation", + "content": "5.5\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 482, + 289, + 639 + ], + "type": "text", + "content": " and a PA-MPVPE of " + }, + { + "bbox": [ + 46, + 482, + 289, + 639 + ], + "type": "inline_equation", + "content": "5.5\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 482, + 289, + 639 + ], + "type": "text", + "content": ". Our method is also advantaged in efficiency, achieving 33 frames per second (fps) on HRNet[23] and an impressive 70 fps on FastViTMA36 [21]. Our contributions can be summarized as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 641, + 287, + 714 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 47, + 641, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 641, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 641, + 287, + 677 + ], + "type": "text", + "content": "1. We abstract existing methods into token generator and mesh regressor modules, and reveal the core structures of these two modules respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 714 + ], + "type": "text", + "content": "2. Based on these core structures, we developed a streamlined, real-time hand mesh regression module that excels in both efficiency and accuracy." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 72, + 545, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 545, + 121 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 545, + 121 + ], + "type": "text", + "content": "3. Our method has achieved PA-MPJPE of " + }, + { + "bbox": [ + 306, + 72, + 545, + 121 + ], + "type": "inline_equation", + "content": "5.7\\mathrm{mm}" + }, + { + "bbox": [ + 306, + 72, + 545, + 121 + ], + "type": "text", + "content": " and PA-MPVPE of " + }, + { + "bbox": [ + 306, + 72, + 545, + 121 + ], + "type": "inline_equation", + "content": "6.0\\mathrm{mm}" + }, + { + "bbox": [ + 306, + 72, + 545, + 121 + ], + "type": "text", + "content": " on FreiHAND, and achieved SOTA results on multiple datasets, demonstrating its effectiveness and generalizability." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 130, + 392, + 144 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 130, + 392, + 144 + ], + "spans": [ + { + "bbox": [ + 306, + 130, + 392, + 144 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 151, + 547, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 151, + 547, + 236 + ], + "spans": [ + { + "bbox": [ + 304, + 151, + 547, + 236 + ], + "type": "text", + "content": "In this section, we briefly review existing methods of hand mesh reconstruction which usually include two main components: a token generator and a mesh regressor. Token generator processes the backbone image feature and generates tokens fed to the decoder. Mesh regressor decodes the input tokens into 3D mesh directly or parametric hand model coefficient." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 240, + 457, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 240, + 457, + 253 + ], + "spans": [ + { + "bbox": [ + 306, + 240, + 457, + 253 + ], + "type": "text", + "content": "2.1. Hand Mesh Reconstruction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 258, + 545, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 258, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 304, + 258, + 545, + 330 + ], + "type": "text", + "content": "Estimating the 3D hand mesh from a single image has been widely researched. [27] proposes an end-to-end framework to recover hand mesh from a monocular RGB image. They use the 2D heatmap as input tokens and fully convolutional and fully connected layers to regress the MANO [17] parameters." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 331, + 546, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 331, + 546, + 556 + ], + "spans": [ + { + "bbox": [ + 304, + 331, + 546, + 556 + ], + "type": "text", + "content": "Transformer [22] has shown powerful performance in language and vision tasks which could model long range relation among input tokens. MetaFormer [26] argues that the general architecture of transformers instead of the specific token mixer is the key player. They replace the self-attention module with a simple spatial pooling operator and achieve competitive performance with fewer parameters and less computation. METRO [11] extracts a single global image feature with a convolutional neural network and performs position encoding by repeatedly concatenating the image feature with 3D coordinates of a mesh template. A multi-layer transformer encoder with progressive dimensionality reduction regresses the 3D coordinates of mesh vertices with these input tokens. Due to the constraints of memory and computation, the transformer only processes a coarse mesh by sub-sampling twice with a sampling algorithm [16], and Multi-Layer Perceptrons (MLPs) are then used to upsample the coarse mesh to the original mesh." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 558, + 547, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 547, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 547, + 665 + ], + "type": "text", + "content": "Graph convolutional neural network(GCNN) [9] is good at modeling the local interaction between neighbor vertices, thus it is very appropriate for mesh reconstruction. Pose2Mesh [6] designs a cascaded architecture to regress 3D mesh vertices from 2D pose directly using GCNN. MeshGraphormer [11] combines the ability of transformer and GCNN presenting a graph-convolution-reinforced transformer to model both local and global interactions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "content": "Instead of extracting a global feature from the input image, pointHMR [7] argues that sampling features guided by vertex-relevant points could better utilize the correspondence between encoded features and spatial positions. They" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1368" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 68, + 160, + 112 + ], + "blocks": [ + { + "bbox": [ + 47, + 68, + 160, + 112 + ], + "lines": [ + { + "bbox": [ + 47, + 68, + 160, + 112 + ], + "spans": [ + { + "bbox": [ + 47, + 68, + 160, + 112 + ], + "type": "image", + "image_path": "6a3720ecd9fe43273f61ce6d6529d5125c1f798c85e7f5a280401577a18d3ca2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 94, + 130, + 104, + 140 + ], + "lines": [ + { + "bbox": [ + 94, + 130, + 104, + 140 + ], + "spans": [ + { + "bbox": [ + 94, + 130, + 104, + 140 + ], + "type": "text", + "content": "A" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 181, + 69, + 291, + 112 + ], + "blocks": [ + { + "bbox": [ + 181, + 69, + 291, + 112 + ], + "lines": [ + { + "bbox": [ + 181, + 69, + 291, + 112 + ], + "spans": [ + { + "bbox": [ + 181, + 69, + 291, + 112 + ], + "type": "image", + "image_path": "6d35ca7d1d67629082f9ac3149aebeabbaae6697081343addc14ff7b7c145b08.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 231, + 130, + 241, + 140 + ], + "lines": [ + { + "bbox": [ + 231, + 130, + 241, + 140 + ], + "spans": [ + { + "bbox": [ + 231, + 130, + 241, + 140 + ], + "type": "text", + "content": "B" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 309, + 69, + 470, + 128 + ], + "blocks": [ + { + "bbox": [ + 309, + 69, + 470, + 128 + ], + "lines": [ + { + "bbox": [ + 309, + 69, + 470, + 128 + ], + "spans": [ + { + "bbox": [ + 309, + 69, + 470, + 128 + ], + "type": "image", + "image_path": "1607683d64c667f87876a60767c40cc27dbe78cf9ce6feb39a211de33c49f849.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 130, + 368, + 139 + ], + "lines": [ + { + "bbox": [ + 359, + 130, + 368, + 139 + ], + "spans": [ + { + "bbox": [ + 359, + 130, + 368, + 139 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 478, + 79, + 539, + 91 + ], + "lines": [ + { + "bbox": [ + 478, + 79, + 539, + 91 + ], + "spans": [ + { + "bbox": [ + 478, + 79, + 539, + 91 + ], + "type": "text", + "content": "Image Feature" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 481, + 112, + 536, + 122 + ], + "lines": [ + { + "bbox": [ + 481, + 112, + 536, + 122 + ], + "spans": [ + { + "bbox": [ + 481, + 112, + 536, + 122 + ], + "type": "text", + "content": "Convolutions" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 48, + 158, + 156, + 220 + ], + "blocks": [ + { + "bbox": [ + 48, + 158, + 156, + 220 + ], + "lines": [ + { + "bbox": [ + 48, + 158, + 156, + 220 + ], + "spans": [ + { + "bbox": [ + 48, + 158, + 156, + 220 + ], + "type": "image", + "image_path": "f9c6a8993b853348f669b0126f42922d74f944f22459f59dd032e35c64de2986.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 95, + 228, + 104, + 238 + ], + "lines": [ + { + "bbox": [ + 95, + 228, + 104, + 238 + ], + "spans": [ + { + "bbox": [ + 95, + 228, + 104, + 238 + ], + "type": "text", + "content": "D" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 182, + 159, + 291, + 220 + ], + "blocks": [ + { + "bbox": [ + 182, + 159, + 291, + 220 + ], + "lines": [ + { + "bbox": [ + 182, + 159, + 291, + 220 + ], + "spans": [ + { + "bbox": [ + 182, + 159, + 291, + 220 + ], + "type": "image", + "image_path": "b488f650baf99856152306e24e45ca446d0354ef0e2de6103a86430e7531656b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 231, + 228, + 240, + 238 + ], + "lines": [ + { + "bbox": [ + 231, + 228, + 240, + 238 + ], + "spans": [ + { + "bbox": [ + 231, + 228, + 240, + 238 + ], + "type": "text", + "content": "E" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 309, + 159, + 417, + 220 + ], + "blocks": [ + { + "bbox": [ + 309, + 159, + 417, + 220 + ], + "lines": [ + { + "bbox": [ + 309, + 159, + 417, + 220 + ], + "spans": [ + { + "bbox": [ + 309, + 159, + 417, + 220 + ], + "type": "image", + "image_path": "b5023cd8a8b274462102be0957e2ea11211cafea6c3895238bccf03e059cc838.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 228, + 367, + 238 + ], + "lines": [ + { + "bbox": [ + 359, + 228, + 367, + 238 + ], + "spans": [ + { + "bbox": [ + 359, + 228, + 367, + 238 + ], + "type": "text", + "content": "F" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 45, + 252, + 547, + 299 + ], + "lines": [ + { + "bbox": [ + 45, + 252, + 547, + 299 + ], + "spans": [ + { + "bbox": [ + 45, + 252, + 547, + 299 + ], + "type": "text", + "content": "Figure 2. An illustration demonstrates various designs of token generators. The grids colored in red represent the sampled points. a) Global feature; b) Grid sampling; c) Keypoint-guided sampling on the original feature map; d) Keypoint-guided sampling with " + }, + { + "bbox": [ + 45, + 252, + 547, + 299 + ], + "type": "inline_equation", + "content": "4\\mathrm{x}" + }, + { + "bbox": [ + 45, + 252, + 547, + 299 + ], + "type": "text", + "content": " upsampling, resulting in an enhanced feature; e) Keypoint-guided sampling with " + }, + { + "bbox": [ + 45, + 252, + 547, + 299 + ], + "type": "inline_equation", + "content": "4\\mathrm{x}" + }, + { + "bbox": [ + 45, + 252, + 547, + 299 + ], + "type": "text", + "content": " upsampling, where the feature is further improved by convolution; f) Coarse-mesh-guided point sampling with " + }, + { + "bbox": [ + 45, + 252, + 547, + 299 + ], + "type": "inline_equation", + "content": "4\\mathrm{x}" + }, + { + "bbox": [ + 45, + 252, + 547, + 299 + ], + "type": "text", + "content": " upsampling." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 429, + 141, + 541, + 158 + ], + "blocks": [ + { + "bbox": [ + 429, + 141, + 541, + 158 + ], + "lines": [ + { + "bbox": [ + 429, + 141, + 541, + 158 + ], + "spans": [ + { + "bbox": [ + 429, + 141, + 541, + 158 + ], + "type": "image", + "image_path": "90895b0a416b190f4fd1725853a12cb7b05cf084bb787e60a120cdb1938d299f.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 482, + 169, + 536, + 190 + ], + "lines": [ + { + "bbox": [ + 482, + 169, + 536, + 190 + ], + "spans": [ + { + "bbox": [ + 482, + 169, + 536, + 190 + ], + "type": "text", + "content": "Coarse Mesh Predictions" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 318, + 289, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 318, + 289, + 449 + ], + "spans": [ + { + "bbox": [ + 46, + 318, + 289, + 449 + ], + "type": "text", + "content": "conduct feature sampling by element-wise multiplication of image feature and 2D heatmap trained by projection of 3D mesh vertices. These sampled features are then fed into the transformer encoder with progressive attention mask as the form of vertex token. The progressively decreased local connection range realized by constraining the attention mask encourage the model to consider the local relationship between neighbor vertices. They also use linear projection to reduce the dimension of the encoded token and upsampling algorithm [16] to expand the sparse vertices into original dense vertices." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 458, + 174, + 471 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 458, + 174, + 471 + ], + "spans": [ + { + "bbox": [ + 47, + 458, + 174, + 471 + ], + "type": "text", + "content": "2.2.Lightweight Networks" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 46, + 477, + 287, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 477, + 287, + 646 + ], + "spans": [ + { + "bbox": [ + 46, + 477, + 287, + 646 + ], + "type": "text", + "content": "To achieve real-time hand mesh reconstruction, many lightweight networks have been studied for years. FastViT [21] is a hybrid vision transformer architecture which obtains state-of-the-art latency-accuracy tradeoff by structural reparameterization and train-time overparametrization techniques. MobRecon [4] designs multiple complicated modules to improve efficiency, including a stacked 2D encoding structure, a map-based position regression 2D-to-3D block and a graph operator based on spiral sampling [10]. FastMETRO [5] identifies the performance bottleneck of encoder-based transformers is caused by token design. They propose an encoder-decoder architecture to disentangle the interaction among input tokens which reduces the parameter." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 657, + 102, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 657, + 102, + 669 + ], + "spans": [ + { + "bbox": [ + 47, + 657, + 102, + 669 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "content": "In our research, we dissected the existing methods into two key components: a token generator and a mesh regressor. However, defining the optimal core structure for each of" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 318, + 546, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 318, + 546, + 377 + ], + "spans": [ + { + "bbox": [ + 304, + 318, + 546, + 377 + ], + "type": "text", + "content": "these modules remains a challenging task. For each module, we start with a fundamental, intuitive structure, and then progressively incorporate the most commonly used components, which we have abstracted from state-of-the-art (SOTA) methods." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 379, + 546, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 379, + 546, + 475 + ], + "spans": [ + { + "bbox": [ + 304, + 379, + 546, + 475 + ], + "type": "text", + "content": "Given that these two modules, the token generator and the mesh regressor, operate in tandem, it's important to keep one constant when analysing the other. In practical terms, we first conduct experiments on the mesh regressor while keeping the token generator, as depicted in Figure 2-B, constant. Then, we apply the mesh regressor configuration that demonstrated the best performance to the token generator in subsequent experiments." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 485, + 409, + 497 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 485, + 409, + 497 + ], + "spans": [ + { + "bbox": [ + 306, + 485, + 409, + 497 + ], + "type": "text", + "content": "3.1. Token Generator" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "text", + "content": "Given a single image of dimensions " + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "inline_equation", + "content": "\\{H,W\\}" + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "text", + "content": ", our model utilizes a backbone to extract image features " + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "inline_equation", + "content": "X_{b}^{\\in \\frac{H}{32} \\times \\frac{W}{32} \\times C}" + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "text", + "content": ". The token generator " + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "text", + "content": " takes " + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "inline_equation", + "content": "X_{b}" + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "text", + "content": " as input and produces tokenized mesh feature " + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "inline_equation", + "content": "X_{m}^{\\in N \\times C}" + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "text", + "content": " denotes the number of sampled points. Thus, we can express this as " + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "inline_equation", + "content": "X_{m} = T(X_{b})" + }, + { + "bbox": [ + 304, + 504, + 545, + 581 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 581, + 546, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 581, + 546, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 581, + 546, + 689 + ], + "type": "text", + "content": "Starting with the simplest implementation, we apply a single spatial pooling (Figure 2-A). This approach establishes a surprisingly competitive baseline, comparable to the Fastmetro [5]. Changing spatial pooling to point sample (Figure 2-B) improves the performance. To further improve the quality of the feature, we follow the MobRecon [4] model and conduct keypoint-guided point sampling (Figure 2-C). However, this modification did not yield any noticeable improvement." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "type": "text", + "content": "Upon visual inspection, it appears that a " + }, + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "type": "inline_equation", + "content": "7 \\times 7" + }, + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "type": "text", + "content": " resolution is not sufficiently discriminating. Consequently, we apply" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1369" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": "deconvolution on " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "X_{b}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " to sample the feature map to " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "14 \\times 14" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "28 \\times 28" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " (Figure 2-D), respectively. This approach results in progressive improvement, but it does not work for " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "8 \\times" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " deconvolution or larger." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 121, + 286, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 121, + 286, + 216 + ], + "spans": [ + { + "bbox": [ + 46, + 121, + 286, + 216 + ], + "type": "text", + "content": "Models such as MobRecon [4] and PointHMR [7] report improvements by enhancing features, for example, using a FPN-like structure or stacked blocks. In our study, we tested different " + }, + { + "bbox": [ + 46, + 121, + 286, + 216 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 46, + 121, + 286, + 216 + ], + "type": "text", + "content": " upsample schemes, including double " + }, + { + "bbox": [ + 46, + 121, + 286, + 216 + ], + "type": "inline_equation", + "content": "2 \\times" + }, + { + "bbox": [ + 46, + 121, + 286, + 216 + ], + "type": "text", + "content": " upsampling, directly " + }, + { + "bbox": [ + 46, + 121, + 286, + 216 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 46, + 121, + 286, + 216 + ], + "type": "text", + "content": " upsampling, and adding more convolution layers during the upsampling process (Figure 2-E). Although these schemes vary in computational complexity, their performance remains consistent." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 217, + 286, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 217, + 286, + 326 + ], + "spans": [ + { + "bbox": [ + 46, + 217, + 286, + 326 + ], + "type": "text", + "content": "We also tested the coarse mesh sampling method proposed by FastMETRO [5]. This method (Figure 2-F) generates denser points compared to keypoint-guided sampling but does not offer any significant advantages. Detailed results are shown in table 5. These experiments suggest that keypoint-guided point sampling at an appropriate resolution is a crucial structure for the token generator. As such, feature enhancement and exhaustive point sampling are not as necessary as initially thought." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 335, + 144, + 347 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 335, + 144, + 347 + ], + "spans": [ + { + "bbox": [ + 47, + 335, + 144, + 347 + ], + "type": "text", + "content": "3.2. Mesh Regressor" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 354, + 286, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 286, + 437 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 286, + 437 + ], + "type": "text", + "content": "The mesh regressor " + }, + { + "bbox": [ + 47, + 354, + 286, + 437 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 47, + 354, + 286, + 437 + ], + "type": "text", + "content": " takes the tokenized mesh feature " + }, + { + "bbox": [ + 47, + 354, + 286, + 437 + ], + "type": "inline_equation", + "content": "X_{m}^{\\in N\\times C}" + }, + { + "bbox": [ + 47, + 354, + 286, + 437 + ], + "type": "text", + "content": " as input and outputs predicted meshes Figure 3. [5] [4] adopts a multi-stage approximation approach and proposes various methods to formulate the topology relationship between joints and mesh. Finding their intersection components, we construct a cascading upsampling mesh regressor " + }, + { + "bbox": [ + 47, + 354, + 286, + 437 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 47, + 354, + 286, + 437 + ], + "type": "text", + "content": " using a series of decoder layers:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 126, + 450, + 286, + 462 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 450, + 286, + 462 + ], + "spans": [ + { + "bbox": [ + 126, + 450, + 286, + 462 + ], + "type": "interline_equation", + "content": "R = H _ {k} H _ {k - 1} \\dots H _ {0} \\tag {1}", + "image_path": "86405d61812bfcdb744bb1104ea2bb3a969aa427d6aefc5aeee81c2e6857b500.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 474, + 286, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 286, + 510 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 286, + 510 + ], + "type": "text", + "content": "Each decoder layer " + }, + { + "bbox": [ + 46, + 474, + 286, + 510 + ], + "type": "inline_equation", + "content": "H_{k}" + }, + { + "bbox": [ + 46, + 474, + 286, + 510 + ], + "type": "text", + "content": " takes the calculated tokens " + }, + { + "bbox": [ + 46, + 474, + 286, + 510 + ], + "type": "inline_equation", + "content": "T_{k}" + }, + { + "bbox": [ + 46, + 474, + 286, + 510 + ], + "type": "text", + "content": " as input, then subsequently processes these using a dimension reduce layer, metaformer, and upsample layer:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 101, + 521, + 286, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 521, + 286, + 533 + ], + "spans": [ + { + "bbox": [ + 101, + 521, + 286, + 533 + ], + "type": "interline_equation", + "content": "H _ {k} \\left(X _ {k}\\right) = U _ {k} \\left(M F _ {k} \\left(P _ {k} \\left(X _ {k}\\right)\\right)\\right) \\tag {2}", + "image_path": "053c2328af3634163fe567bab103a41c5e4f6fc30fc2b2ffbbf829ec91d07627.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "spans": [ + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "inline_equation", + "content": "U_{k}" + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "inline_equation", + "content": "P_{k}" + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "inline_equation", + "content": "k_{th}" + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "text", + "content": " upsample layer and dimension reduce layer respectively, each composed of a single-layer MLP. The upsample layer increases token numbers, while the dimension reduce layer modifies channel shapes. " + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "inline_equation", + "content": "MF_{k}" + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "inline_equation", + "content": "k_{th}" + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "text", + "content": " metaformer block, " + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "inline_equation", + "content": "T_{k}" + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "inline_equation", + "content": "k_{th}" + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "text", + "content": " output token where " + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "inline_equation", + "content": "X_{k + 1} = H_{k}(X_{k})" + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "inline_equation", + "content": "X_0 = X_m" + }, + { + "bbox": [ + 47, + 544, + 286, + 616 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "d_{k}" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": " be the output dimension of " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "U_{k}" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": " and token numbers of " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "MF_{k}" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "n_{k}, c_{k}" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "m_{k}" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": " are the block number, tokenmixer and block dimensions for " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "MF_{k}" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": ". We start with the first layer " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "MF_{0}" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": " to demonstrate its operation. For the " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "N \\times C" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": " shaped tensor " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "T_{0}" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "P_{0}" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": " projects it to " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "N \\times c" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": ", which is then processed by " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "MF_{0}" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": " and outputs a tensor of the same shape. Subsequently, " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "U_{k}" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": " upsamples it to " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "d \\times c" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": ". The following decoder layers repeat this procedure to output " + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "inline_equation", + "content": "X_{k}" + }, + { + "bbox": [ + 46, + 618, + 286, + 713 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 72, + 542, + 189 + ], + "blocks": [ + { + "bbox": [ + 309, + 72, + 542, + 189 + ], + "lines": [ + { + "bbox": [ + 309, + 72, + 542, + 189 + ], + "spans": [ + { + "bbox": [ + 309, + 72, + 542, + 189 + ], + "type": "image", + "image_path": "20bc45f111a15b135439c8fb106e664e46cf845dd2b2e73932e6dacaf2d9fa1f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 200, + 545, + 233 + ], + "lines": [ + { + "bbox": [ + 305, + 200, + 545, + 233 + ], + "spans": [ + { + "bbox": [ + 305, + 200, + 545, + 233 + ], + "type": "text", + "content": "Figure 3. Architecture of decoder layer in mesh regressor. It is composed of sequentially connected dimension reduce layer, metaformer block and upsample layer." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "spans": [ + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "text", + "content": "We began from a baseline where " + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "inline_equation", + "content": "\\{k = 1, n = \\{1\\}, d = \\{778\\}, m = \\{\\text{identity}\\}\\}" + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "text", + "content": ", which yielded competitive performance despite its simplicity. We then increase flops by enlarge " + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "text", + "content": " but observe no improvement. Inspired by [4], We sequentially add blocks with an increasing value of " + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "text", + "content": ". When " + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "inline_equation", + "content": "k \\leq 3" + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "text", + "content": ", Significant performance improvements are observed. However, as " + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "text", + "content": " continues to increase beyond this point, no further gains are detected. Moreover, Modifying the token mixer from ide to attn also beneficial. However, for fixed " + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "text", + "content": ", simply increasing " + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "text", + "content": " did not improve performance. According to our experimental findings, the core function of each decoder layer is to incrementally elevate the number of tokens from an initial quantity of 21 up to 778. Additional strategies like augmenting computational workload or altering intricate specifics of the network appear to have minimal impact. In our best practice, parameters were set to " + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "inline_equation", + "content": "\\{k = 3, n = \\{1, 1, 1\\}, d = \\{21, 84, 336\\}, m = \\{\\text{attn}, \\text{attn}, \\text{attn}\\}\\}" + }, + { + "bbox": [ + 305, + 255, + 545, + 471 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 472, + 545, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 472, + 545, + 579 + ], + "spans": [ + { + "bbox": [ + 305, + 472, + 545, + 579 + ], + "type": "text", + "content": "Existing hand joints and mesh topology modulation approaches stand out due to their ability to incorporate spatial information. However, their heuristic design is heavily reliant on hyperparameters and can be labor-intensive. Recognizing these strengths, we propose a novel method that modulates spatial relations without requiring manual design or additional computational resources. We achieve this by introducing learnable position embedding parameters " + }, + { + "bbox": [ + 305, + 472, + 545, + 579 + ], + "type": "inline_equation", + "content": "emb_{k}" + }, + { + "bbox": [ + 305, + 472, + 545, + 579 + ], + "type": "text", + "content": " to each output tensor " + }, + { + "bbox": [ + 305, + 472, + 545, + 579 + ], + "type": "inline_equation", + "content": "X_{k}" + }, + { + "bbox": [ + 305, + 472, + 545, + 579 + ], + "type": "text", + "content": " where" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 387, + 593, + 545, + 605 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 387, + 593, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 387, + 593, + 545, + 605 + ], + "type": "interline_equation", + "content": "X _ {k} = X _ {k} + e m b _ {k} \\tag {3}", + "image_path": "e018436f8dad4a4b8b3bc4e5c1c63529f1777fa87836ded6e7a5c1c9bfdb7b62.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 622, + 418, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 622, + 418, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 622, + 418, + 635 + ], + "type": "text", + "content": "3.3. Framework Design" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 641, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 641, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 305, + 641, + 545, + 689 + ], + "type": "text", + "content": "As discussed above, the image feature extracted by the backbone is sequentially processed by both the token generator and the mesh regressor. The overall framework can be simply computed by " + }, + { + "bbox": [ + 305, + 641, + 545, + 689 + ], + "type": "inline_equation", + "content": "R(T(X_b))" + }, + { + "bbox": [ + 305, + 641, + 545, + 689 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "The core structures form the basis of the overall structure, which is depicted in Figure 4. Given an input image of" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1370" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 71, + 539, + 178 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 539, + 178 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 539, + 178 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 539, + 178 + ], + "type": "image", + "image_path": "8edb852e0115beeab0c0bc7e7aeb837e5e357b5ae47ed067234abb1996615d81.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 186, + 547, + 232 + ], + "lines": [ + { + "bbox": [ + 46, + 186, + 547, + 232 + ], + "spans": [ + { + "bbox": [ + 46, + 186, + 547, + 232 + ], + "type": "text", + "content": "Figure 4. Overview of our architecture. The architecture of our model proceeds as follows: Firstly, the image feature " + }, + { + "bbox": [ + 46, + 186, + 547, + 232 + ], + "type": "inline_equation", + "content": "X_{b}" + }, + { + "bbox": [ + 46, + 186, + 547, + 232 + ], + "type": "text", + "content": " is extracted via a backbone network. These features are then passed to our token generator module, responsible for predicting 2D keypoints and performing point sampling on the upsampled feature map, thus generating joint tokens. Next, these joint tokens are input into our mesh regressor module, which carries out the mesh prediction to get the final coordinates." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 251, + 290, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 251, + 290, + 360 + ], + "spans": [ + { + "bbox": [ + 46, + 251, + 290, + 360 + ], + "type": "text", + "content": "size " + }, + { + "bbox": [ + 46, + 251, + 290, + 360 + ], + "type": "inline_equation", + "content": "\\{H, W\\}" + }, + { + "bbox": [ + 46, + 251, + 290, + 360 + ], + "type": "text", + "content": ", we conduct point sampling guided by the predicted 21 keypoints at a resolution of " + }, + { + "bbox": [ + 46, + 251, + 290, + 360 + ], + "type": "inline_equation", + "content": "H/8" + }, + { + "bbox": [ + 46, + 251, + 290, + 360 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 251, + 290, + 360 + ], + "type": "inline_equation", + "content": "W/8" + }, + { + "bbox": [ + 46, + 251, + 290, + 360 + ], + "type": "text", + "content": ". For image classification style backbones like Fast-ViT, we apply a 4x upsample deconvolution to its final layer. However, for segmentation style backbones like HRNet, we directly use the feature on the corresponding resolution. In the mesh regressor, we apply position encoding before each MetaFormer block. Although this is not regarded as a core structure, it serves as a beneficial addition." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 367, + 141, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 367, + 141, + 379 + ], + "spans": [ + { + "bbox": [ + 47, + 367, + 141, + 379 + ], + "type": "text", + "content": "3.4. Loss Functions" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "spans": [ + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "text", + "content": "The method proposed in this paper is trained with supervision for vertices, 3D joints, and 2D joints. In our implementation, both the 2D joints, denoted as " + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "inline_equation", + "content": "J_{2d}" + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "text", + "content": ", and the vertices, denoted as " + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "inline_equation", + "content": "V_{3d}" + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "text", + "content": ", are directly predicted by the model's output. The 3D joints, represented as " + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "inline_equation", + "content": "J_{3d}'" + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "text", + "content": ", are calculated using the equation " + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "inline_equation", + "content": "J_{3d} = J \\times V_{3d}" + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "text", + "content": " signifies the regression matrix. All of these components utilize the L1 loss to compute the discrepancy between the ground truth and the predictions. The losses for the vertex, 3D joint, and 2D joint, denoted as " + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "inline_equation", + "content": "L_{vert}" + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "inline_equation", + "content": "L_{J_{3d}}" + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "inline_equation", + "content": "L_{J_{2d}}" + }, + { + "bbox": [ + 46, + 385, + 289, + 517 + ], + "type": "text", + "content": ", are respectively formulated as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 109, + 525, + 287, + 552 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 525, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 109, + 525, + 287, + 552 + ], + "type": "interline_equation", + "content": "L _ {J _ {3 d}} = \\frac {1}{M _ {J _ {3 d}}} \\| J _ {3 d} - J _ {3 d} ^ {\\prime} \\| _ {1} \\tag {4}", + "image_path": "226d593051886a8f41138c8b4cde22e606ecaf90639a46635b4c479f32df75cb.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 109, + 559, + 287, + 585 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 559, + 287, + 585 + ], + "spans": [ + { + "bbox": [ + 109, + 559, + 287, + 585 + ], + "type": "interline_equation", + "content": "L _ {J _ {2 d}} = \\frac {1}{M _ {J _ {2 d}}} \\| J _ {2 d} - J _ {2 d} ^ {\\prime} \\| _ {1} \\tag {5}", + "image_path": "b5a3927ffc8284450e377fa3acc445c61de1990eb593b892a5072b3b5a1750dc.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 592, + 287, + 617 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 592, + 287, + 617 + ], + "spans": [ + { + "bbox": [ + 107, + 592, + 287, + 617 + ], + "type": "interline_equation", + "content": "L _ {v e r t} = \\frac {1}{M _ {V _ {3 d}}} \\| V _ {3 d} - V _ {3 d} ^ {\\prime} \\| _ {1} \\tag {6}", + "image_path": "48ddbba2955f23ec01d3757de055dd86ba466e053e7ed730ec0635604003aa7c.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 622, + 288, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 622, + 288, + 659 + ], + "spans": [ + { + "bbox": [ + 47, + 622, + 288, + 659 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 47, + 622, + 288, + 659 + ], + "type": "inline_equation", + "content": "J_{3d} \\in R^{M \\times 3}" + }, + { + "bbox": [ + 47, + 622, + 288, + 659 + ], + "type": "text", + "content": " represents all the ground truth points, and the symbols annotated with primes denote the predicted values. The overall loss function is defined as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 85, + 671, + 287, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 671, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 85, + 671, + 287, + 685 + ], + "type": "interline_equation", + "content": "L = w _ {3 d} L _ {J _ {3 d}} + w _ {2 d} L _ {J _ {2 d}} + w _ {v e r t} L _ {v e r t} \\tag {7}", + "image_path": "a01a2a2a856bc82c0f9f500b19cfbc21acbd7da7346600268362a64b01bb2d7a.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 689, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 289, + 715 + ], + "type": "text", + "content": "Given that the primary objective of this study is mesh prediction, 2D keypoints only affect point sampling and" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 251, + 547, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 251, + 547, + 288 + ], + "spans": [ + { + "bbox": [ + 305, + 251, + 547, + 288 + ], + "type": "text", + "content": "thus do not need to be highly accurate, we have accordingly adjusted the coefficients " + }, + { + "bbox": [ + 305, + 251, + 547, + 288 + ], + "type": "inline_equation", + "content": "w_{3d}" + }, + { + "bbox": [ + 305, + 251, + 547, + 288 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 251, + 547, + 288 + ], + "type": "inline_equation", + "content": "w_{2d}" + }, + { + "bbox": [ + 305, + 251, + 547, + 288 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 305, + 251, + 547, + 288 + ], + "type": "inline_equation", + "content": "w_{vert}" + }, + { + "bbox": [ + 305, + 251, + 547, + 288 + ], + "type": "text", + "content": " to 10, 1, and 10, respectively." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 298, + 388, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 298, + 388, + 312 + ], + "spans": [ + { + "bbox": [ + 305, + 298, + 388, + 312 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 318, + 440, + 331 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 318, + 440, + 331 + ], + "spans": [ + { + "bbox": [ + 306, + 318, + 440, + 331 + ], + "type": "text", + "content": "4.1. Implementation Details" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 337, + 547, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 337, + 547, + 529 + ], + "spans": [ + { + "bbox": [ + 304, + 337, + 547, + 529 + ], + "type": "text", + "content": "Our network is implemented based on Pytorch [15]. We use HRNet64[23] and FastViT-MA36 [21] as our backbones, with their initial weights pre-trained on ImageNet. We use the AdamW [8] optimizer to train our network, with a total of 100 epochs. The learning rate is initially set to 5e-4, and then adjusted to 5e-5 after 50 epochs. We train the network with eight RTX2080Ti GPUs, with a batch size of 32 per GPU. It costs 7 hours training with FastViT-MA36 backbone and 11 hours with HRNet. The features of intermediate layers are directly fed to the Token Generator without extra upsampling layer when the backbone is HRNetw64. The Mesh Regressor has three Encoder Layers, with the corresponding input token numbers being [21, 84, 336], output token numbers being [84, 336, 778], and feature dimensions being [256, 128, 64] respectively. We adopt Attention as the default token mixer, as its performance is slightly better." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 536, + 369, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 536, + 369, + 548 + ], + "spans": [ + { + "bbox": [ + 306, + 536, + 369, + 548 + ], + "type": "text", + "content": "4.2. Datasets" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 555, + 547, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 555, + 547, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 555, + 547, + 640 + ], + "type": "text", + "content": "Our primary experiments and analyses are conducted on the FreiHAND [28] dataset. In order to validate the generalization of our method, we also do experiments on the large-scale 3D hand-object dataset, DexYCB [1]. The FreiHAND dataset contains 130,240 training samples and 3,960 testing samples. DexYCB contains 406,888 training samples and 78,768 testing samples." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 647, + 419, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 647, + 419, + 658 + ], + "spans": [ + { + "bbox": [ + 306, + 647, + 419, + 658 + ], + "type": "text", + "content": "4.3. Evaluation Metrics" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": "To evaluate the accuracy of 3D Hand Mesh Reconstruction methods, we adopt five metrics: Procrustes-aligned mean per joint position error (PA-MPJPE), Procrustes-aligned mean per vertex position error (PA-MPVPE), mean per" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 314, + 757 + ], + "type": "text", + "content": "1371" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 100, + 69, + 494, + 230 + ], + "blocks": [ + { + "bbox": [ + 100, + 69, + 494, + 230 + ], + "lines": [ + { + "bbox": [ + 100, + 69, + 494, + 230 + ], + "spans": [ + { + "bbox": [ + 100, + 69, + 494, + 230 + ], + "type": "table", + "html": "
MethodBackbonePA-MPJPE ↓PA-MPVPE ↓F@05 ↑F@15 ↑FPS
I2L-MeshNet [13]ResNet507.47.60.6810.97372
CMR [3]ResNet506.97.00.7150.977-
I2UV [2]ResNet507.27.40.6820.973-
Tang et al. [20]ResNet506.76.70.7240.98147
MobRecon [4]DenseStack6.97.20.6940.97980
METRO [27]HRNet6.76.80.7170.98127
MeshGraphomer [11]HRNet6.36.50.7380.98324
FastMETRO [5]HRNet6.57.10.6870.98328
Deformer [25]HRNet6.26.40.7430.984-
PointHMR [7]HRNet6.16.60.7200.984-
FastViT [21]FastViT-MA366.66.70.7220.98184
OursHRNet5.86.10.7660.98633
OursFastViT-MA365.76.00.7720.98670
", + "image_path": "5bc7a39c8b8b93a6e512d7ecfcf55dcac12498215009216129c03c51e79f79bd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 281, + 288, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 281, + 288, + 354 + ], + "spans": [ + { + "bbox": [ + 46, + 281, + 288, + 354 + ], + "type": "text", + "content": "joint position error (MPJPE), mean per vertex position error (MPVPE), and F-Score. PA-MPJPE and PA-MPVPE refer to the MPJPE and MPVPE after aligning the predicted hand results with the Ground Truth using Procrustes alignment, respectively. These two metrics do not consider the impact of global rotation and scale." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 362, + 105, + 374 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 362, + 105, + 374 + ], + "spans": [ + { + "bbox": [ + 47, + 362, + 105, + 374 + ], + "type": "text", + "content": "4.4. Results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 381, + 287, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 381, + 287, + 514 + ], + "spans": [ + { + "bbox": [ + 46, + 381, + 287, + 514 + ], + "type": "text", + "content": "Comparison with previous methods To validate our proposed modules., we adopted HRNet and FastViT-MA36 as backbones for non-real-time and real-time methods respectively, following established models [27] [11] [5] [21]. For fair comparison, we provide performance metrics without Test-Time Augmentation (TTA) and FPS without TensorRT optimization. Table 1 shows that our method, despite being slightly slower than FastViT, improves PA-MPJPE by " + }, + { + "bbox": [ + 46, + 381, + 287, + 514 + ], + "type": "inline_equation", + "content": "0.9\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 381, + 287, + 514 + ], + "type": "text", + "content": ". Compared to transformer-based methods, our approach demonstrates superior speed and performance, while only requiring " + }, + { + "bbox": [ + 46, + 381, + 287, + 514 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 46, + 381, + 287, + 514 + ], + "type": "text", + "content": " of parameters, as shown in Table 2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 514, + 287, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 514, + 287, + 548 + ], + "spans": [ + { + "bbox": [ + 46, + 514, + 287, + 548 + ], + "type": "text", + "content": "The qualitative comparison results are shown in the figure 5. Compared to previous methods, our method produces more accurate hand reconstruction results." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 50, + 558, + 285, + 610 + ], + "blocks": [ + { + "bbox": [ + 46, + 238, + 547, + 262 + ], + "lines": [ + { + "bbox": [ + 46, + 238, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 46, + 238, + 547, + 262 + ], + "type": "text", + "content": "Table 1. Results on the FreiHAND dataset. Our results are shown in bold. “-” indicates not reported. Our results surpass all existing methods in terms of accuracy metrics." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 558, + 285, + 610 + ], + "lines": [ + { + "bbox": [ + 50, + 558, + 285, + 610 + ], + "spans": [ + { + "bbox": [ + 50, + 558, + 285, + 610 + ], + "type": "table", + "html": "
Method#ParamsPA-MPJPE ↓PA-MPVPE ↓
METRO [27]102M6.76.8
MeshGraphomer [11]98M6.36.5
FastMETRO [5]25M6.57.1
Ours1.9M5.86.1
", + "image_path": "b745dfb06061538698f666f3c7deba060c90faa4769c618efc261b94865d0150.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 689, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 715 + ], + "type": "text", + "content": "Evaluation on DexYCB We employed the large-scale hand-object dataset DexYCB to validate our method's ef" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 281, + 547, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 281, + 547, + 354 + ], + "spans": [ + { + "bbox": [ + 304, + 281, + 547, + 354 + ], + "type": "text", + "content": "fectiveness and generalizability. As shown in Table 3, our model outperforms existing single-image input methods on all metrics. Significantly, we surpassed previous benchmarks by " + }, + { + "bbox": [ + 304, + 281, + 547, + 354 + ], + "type": "inline_equation", + "content": "1.5\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 281, + 547, + 354 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 281, + 547, + 354 + ], + "type": "inline_equation", + "content": "0.8\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 281, + 547, + 354 + ], + "type": "text", + "content": " on the MPJPE and MPVPE measures respectively, thereby setting new standards and demonstrating our method's broad applicability." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 309, + 364, + 544, + 437 + ], + "blocks": [ + { + "bbox": [ + 46, + 618, + 287, + 674 + ], + "lines": [ + { + "bbox": [ + 46, + 618, + 287, + 674 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 287, + 674 + ], + "type": "text", + "content": "Table 2. Comparison of transformer-based approaches. #Params refer to the network parameters that are not included within the backbone structure of the model. Our approach not only surpasses existing benchmarks in key metrics but also achieves a parameter reduction of one to two orders of magnitude." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 364, + 544, + 437 + ], + "lines": [ + { + "bbox": [ + 309, + 364, + 544, + 437 + ], + "spans": [ + { + "bbox": [ + 309, + 364, + 544, + 437 + ], + "type": "table", + "html": "
MethodPA-MPJPE ↓PA-MPVPE ↓MPJPE ↓MPVPE ↓
METRO [27]7.0---
Spurr et al. [19]6.8---
Liu et al. [12]6.6---
HandOccNet [14]5.85.514.013.1
MobRecon [4]6.45.614.213.1
H2ONet [24]5.75.514.013.0
Ours5.55.512.412.1
", + "image_path": "4c59ef976a867e9b9a219538399ea3cfe27df634ed2e44ef2dcd7931fc3c8246.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 445, + 547, + 479 + ], + "lines": [ + { + "bbox": [ + 304, + 445, + 547, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 445, + 547, + 479 + ], + "type": "text", + "content": "Table 3. Results on DexYCB. Our method shows advantages on Procrustes-Aligned metrics and surpassed the previous methods by a large margin on non-Procrustes-Aligned metrics." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 502, + 400, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 502, + 400, + 514 + ], + "spans": [ + { + "bbox": [ + 306, + 502, + 400, + 514 + ], + "type": "text", + "content": "4.5. Ablation Study" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 521, + 545, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 604 + ], + "type": "text", + "content": "To thoroughly validate the various parameter combinations, a large number of ablation experiments were conducted. For efficiency, all ablation experiments were implemented on smaller models (e.g., Hiera-tiny). After identifying the optimal parameter combination, it is then applied to the standard models to facilitate a fair comparison with existing methods." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 605, + 545, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 653 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 653 + ], + "type": "text", + "content": "The state-of-the-art backbone, Hiera-Tiny [18], is utilized in our study as a strong baseline. We conduct a series of ablation experiments on the FreiHAND dataset with the aim of examining the efficacy of the structure we propose." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": "Effectiveness of Our Token Generator and Mesh Regressor. In order to evaluate the effectiveness of our Token Generator and Mesh Regressor, we initially set up a standard baseline model. This model's Token Generator is constructed based on global features, while its Mesh Regres" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "1372" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 69, + 119, + 351 + ], + "blocks": [ + { + "bbox": [ + 47, + 69, + 119, + 351 + ], + "lines": [ + { + "bbox": [ + 47, + 69, + 119, + 351 + ], + "spans": [ + { + "bbox": [ + 47, + 69, + 119, + 351 + ], + "type": "image", + "image_path": "f44ac2e201fb60a95408b2b9992d075ef5af700aff0b82debf429c81e83c67a5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 129, + 360, + 463, + 373 + ], + "lines": [ + { + "bbox": [ + 129, + 360, + 463, + 373 + ], + "spans": [ + { + "bbox": [ + 129, + 360, + 463, + 373 + ], + "type": "text", + "content": "Figure 5. Qualitative comparison between our method and other state-of-the-art approaches." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 133, + 82, + 492, + 353 + ], + "blocks": [ + { + "bbox": [ + 133, + 82, + 492, + 353 + ], + "lines": [ + { + "bbox": [ + 133, + 82, + 492, + 353 + ], + "spans": [ + { + "bbox": [ + 133, + 82, + 492, + 353 + ], + "type": "image", + "image_path": "b7830c556ed21659605f4aeeea3f861caaccc100d64b367eb9e91a2f246b9101.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 392, + 288, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 392, + 288, + 489 + ], + "spans": [ + { + "bbox": [ + 46, + 392, + 288, + 489 + ], + "type": "text", + "content": "sor is designed as a Multilayer Perceptron (MLP). We subsequently substitute these components with our proposed structures individually. The results of these experiments, detailed in Table 4, confirm that both modules, when incorporated in place of the original structures, contribute positively towards enhancing overall performance. When implemented together, these modifications lead to even further improvements." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 74, + 497, + 261, + 553 + ], + "blocks": [ + { + "bbox": [ + 74, + 497, + 261, + 553 + ], + "lines": [ + { + "bbox": [ + 74, + 497, + 261, + 553 + ], + "spans": [ + { + "bbox": [ + 74, + 497, + 261, + 553 + ], + "type": "table", + "html": "
MethodPA-MPJPE ↓PA-MPVPE ↓
Simple Baseline6.97.2
+ mesh regressor6.56.8
+ token generator6.67.1
+ both6.26.5
", + "image_path": "e6e50004c6bf9f7707d01cf7b49747bef9c10a4d74b31ada6341000a2cdd145c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": "Analysing Core Structure of Token Generator. As shown in Table 5, performance using only global features is competitive. The grid sampling and point sampling on a " + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "inline_equation", + "content": "7 \\times 7" + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": " feature map show similar efficiencies. Increasing the resolution of the feature map to " + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "inline_equation", + "content": "28 \\times 28" + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": " through a single four-fold deconvolution improves performance. However, further optimization is not achieved by replacing single four-fold deconvolution with two layers of two-fold de" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 392, + 547, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 392, + 547, + 441 + ], + "spans": [ + { + "bbox": [ + 304, + 392, + 547, + 441 + ], + "type": "text", + "content": "convolutions or adding more convolutions. Similarly, no improvement is observed when changing from point sampling to coarse mesh sampling. Qualitative comparison of different point sampling strategies is shown in Fig. 6" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 309, + 449, + 544, + 531 + ], + "blocks": [ + { + "bbox": [ + 46, + 560, + 288, + 605 + ], + "lines": [ + { + "bbox": [ + 46, + 560, + 288, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 560, + 288, + 605 + ], + "type": "text", + "content": "Table 4. Ablation study of our proposed modules. Each of these methods brings about enhancements when utilized individually. However, when these strategies are integrated, they yield an even more substantial improvement." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 449, + 544, + 531 + ], + "lines": [ + { + "bbox": [ + 309, + 449, + 544, + 531 + ], + "spans": [ + { + "bbox": [ + 309, + 449, + 544, + 531 + ], + "type": "table", + "html": "
Sample MethodResolutionPA-MPJPE ↓PA-MPVPE ↓
Global1x16.56.8
Grid7x76.36.6
Point7x76.36.6
Point14x146.36.6
Point28x286.26.5
Point28 x 28 enhanced6.26.5
Coarse mesh28x286.26.5
", + "image_path": "3271717fe35a8371ada85ecfb247df677e12679cd443861d7a5ed025e1b794f1.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 537, + 547, + 584 + ], + "lines": [ + { + "bbox": [ + 304, + 537, + 547, + 584 + ], + "spans": [ + { + "bbox": [ + 304, + 537, + 547, + 584 + ], + "type": "text", + "content": "Table 5. Ablation study of Our Token generator A point sample at a resolution of " + }, + { + "bbox": [ + 304, + 537, + 547, + 584 + ], + "type": "inline_equation", + "content": "28 \\times 28" + }, + { + "bbox": [ + 304, + 537, + 547, + 584 + ], + "type": "text", + "content": " achieves optimal efficiency. Contrarily, increasing the number of sampled points or incorporating additional convolutional layers do not lead to any further improvements." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 594, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 547, + 715 + ], + "type": "text", + "content": "Analysing the Core Structure of the Mesh Regressor. As shown in Table 6, for a single encoder layer, adding an extra encoder layer with a larger token number sharply increases performance by " + }, + { + "bbox": [ + 304, + 594, + 547, + 715 + ], + "type": "inline_equation", + "content": "0.3\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 594, + 547, + 715 + ], + "type": "text", + "content": ". The optimal setting consists of three encoder layers, with token numbers progressively multiplied by 4. As the layer number increases further, the marginal benefit becomes inconsequential and sometimes even decreases. Furthermore, as shown in Table 7, given a fixed set of token numbers, increasing computational complexity produces negligible differences in either" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "1373" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 119 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 119 + ], + "type": "text", + "content": "block numbers or block dimensions in the encoder layer. A middle-sized block dimensions setting is optimal. Qualitative comparison of different upsample layers is shown in Figure 7." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 133, + 285, + 215 + ], + "blocks": [ + { + "bbox": [ + 50, + 133, + 285, + 215 + ], + "lines": [ + { + "bbox": [ + 50, + 133, + 285, + 215 + ], + "spans": [ + { + "bbox": [ + 50, + 133, + 285, + 215 + ], + "type": "table", + "html": "
Layer NumsToken NumsPA-MPJPE ↓PA-MPVPE ↓
1[21]6.67.1
2[21, 256]6.36.6
2[21, 384]6.36.6
3[21, 256, 384]6.26.5
3[21, 84, 336]6.26.5
4[21, 128, 256, 384]6.26.5
4[21, 63, 126, 252]6.36.6
", + "image_path": "81fe7f6fd230018d7df438811a6ac21757f507379fb72755e352783974cdc767.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 74, + 283, + 261, + 396 + ], + "blocks": [ + { + "bbox": [ + 46, + 223, + 287, + 257 + ], + "lines": [ + { + "bbox": [ + 46, + 223, + 287, + 257 + ], + "spans": [ + { + "bbox": [ + 46, + 223, + 287, + 257 + ], + "type": "text", + "content": "Table 6. The Number of Upsampling Layers and Corresponding Token Numbers in Encoding Layers. Three encoding layers yield optimal efficiency." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 74, + 283, + 261, + 396 + ], + "lines": [ + { + "bbox": [ + 74, + 283, + 261, + 396 + ], + "spans": [ + { + "bbox": [ + 74, + 283, + 261, + 396 + ], + "type": "table", + "html": "
DimensionsPA-MPJPE ↓PA-MPVPE ↓
64, 32, 166.56.9
128, 64, 326.36.6
256, 128, 646.26.5
512, 256, 126.26.5
1024, 512, 2566.46.7
Block NumsPA-MPJPE ↓PA-MPVPE ↓
1, 1, 16.26.5
2, 2, 26.26.6
3, 3, 36.36.6
", + "image_path": "e0b74e5b96ff46414b13f7aee3b1e05d090281628d549a75e06607f4e4158b54.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 443, + 287, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 443, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 46, + 443, + 287, + 525 + ], + "type": "text", + "content": "Position Encoding and Attention Mixer. We utilized position encoding layer and attention mixer during ablation experiments because they are intuitively helpful. Based on our SOTA result, we remove position encoding layers to observe a slightly " + }, + { + "bbox": [ + 46, + 443, + 287, + 525 + ], + "type": "inline_equation", + "content": "0.1\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 443, + 287, + 525 + ], + "type": "text", + "content": " degrade. Similar thing happens when we substitute attention mixer to identity mixer, see Tab. 8." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 74, + 536, + 261, + 582 + ], + "blocks": [ + { + "bbox": [ + 46, + 404, + 287, + 426 + ], + "lines": [ + { + "bbox": [ + 46, + 404, + 287, + 426 + ], + "spans": [ + { + "bbox": [ + 46, + 404, + 287, + 426 + ], + "type": "text", + "content": "Table 7. Dimensions and block nums. Single layer blocks with middle sized dimensions are optimal." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 74, + 536, + 261, + 582 + ], + "lines": [ + { + "bbox": [ + 74, + 536, + 261, + 582 + ], + "spans": [ + { + "bbox": [ + 74, + 536, + 261, + 582 + ], + "type": "table", + "html": "
MethodPA-MPJPE ↓PA-MPVPE ↓
hiera-tiny sota6.26.5
Identity mixer6.36.6
w/o position emb6.36.6
", + "image_path": "083ab6fa4d9992fed4df85fd01569bc32117942c73fdf10865a886fd078075e0.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 590, + 287, + 624 + ], + "lines": [ + { + "bbox": [ + 46, + 590, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 46, + 590, + 287, + 624 + ], + "type": "text", + "content": "Table 8. Token mixer and position embedding. When substitute attention mixer to identity mixer, or remove position encoding layer, the performance dropped slightly by " + }, + { + "bbox": [ + 46, + 590, + 287, + 624 + ], + "type": "inline_equation", + "content": "0.1\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 590, + 287, + 624 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 642, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 287, + 713 + ], + "type": "text", + "content": "Limits and Failure cases. As mentioned earlier, our work is dedicated to summarizing and abstracting from existing work. Since no targeted optimization was performed, some failure cases present in previous work remains challenging. These cases are concentrated in scenes with self-occlusion and object occlusion, see Fig. 8." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 306, + 71, + 533, + 166 + ], + "blocks": [ + { + "bbox": [ + 306, + 71, + 533, + 166 + ], + "lines": [ + { + "bbox": [ + 306, + 71, + 533, + 166 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 533, + 166 + ], + "type": "image", + "image_path": "7b772d9a2614a343087179c363d7891d4ede3c368a87e7cd9d24fd6f7887265c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 174, + 545, + 207 + ], + "lines": [ + { + "bbox": [ + 305, + 174, + 545, + 207 + ], + "spans": [ + { + "bbox": [ + 305, + 174, + 545, + 207 + ], + "type": "text", + "content": "Figure 6. Qualitative Comparison of Different Point Sampling Strategies. The global/coarse feature fails in scenarios with detailed finger interactions, where upsampled feature works well." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 307, + 224, + 533, + 318 + ], + "blocks": [ + { + "bbox": [ + 307, + 224, + 533, + 318 + ], + "lines": [ + { + "bbox": [ + 307, + 224, + 533, + 318 + ], + "spans": [ + { + "bbox": [ + 307, + 224, + 533, + 318 + ], + "type": "image", + "image_path": "cae26c4b995200f60e2eb3c7a96e17ee4903d8ba1ca56f7e89cd8c14d4972b56.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 327, + 545, + 371 + ], + "lines": [ + { + "bbox": [ + 305, + 327, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 305, + 327, + 545, + 371 + ], + "type": "text", + "content": "Figure 7. Qualitative Comparison of the Number of Layers of Mesh Decoder. When constrained to one, the reconstructed mesh tends to corrupt into unnatural shapes. Performance improves as the number of layers increases." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 307, + 388, + 526, + 466 + ], + "blocks": [ + { + "bbox": [ + 307, + 388, + 526, + 466 + ], + "lines": [ + { + "bbox": [ + 307, + 388, + 526, + 466 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 526, + 466 + ], + "type": "image", + "image_path": "18513b34b4a664abb9520fb38878b2eb1f5417ff247a00fd2c1646e9eaeb2307.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 475, + 545, + 519 + ], + "lines": [ + { + "bbox": [ + 305, + 475, + 545, + 519 + ], + "spans": [ + { + "bbox": [ + 305, + 475, + 545, + 519 + ], + "type": "text", + "content": "Figure 8. Typical Failure Cases. Failure cases are concentrated in scenes with self-occlusion and object occlusion. Some are difficult to discern due to the small area of exposure, while others present ambiguities caused by the occlusion." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 544, + 470, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 544, + 470, + 556 + ], + "spans": [ + { + "bbox": [ + 306, + 544, + 470, + 556 + ], + "type": "text", + "content": "5. Conclusion and Future Work" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 567, + 545, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 567, + 545, + 650 + ], + "spans": [ + { + "bbox": [ + 304, + 567, + 545, + 650 + ], + "type": "text", + "content": "We observed shared advantages and disadvantages of typical structures. Based on these observations, we introduce the concept of the core structure. Through experiments, we revealed that a framework with the core structure could achieve high performance with limited computational load. We evaluated our approach quantitatively and qualitatively to demonstrate its effectiveness." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": "However, our method is explicitly designed to reconstruct single hand gestures. Other scenarios, such as extreme lighting, occlusion, interactions, or out-of-distribution cases, showed no improvement over existing methods. Such cases require specifically designed methods." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1374" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 157 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 157 + ], + "type": "text", + "content": "[1] Yu-Wei Chao, Wei Yang, Yu Xiang, Pavlo Molchanov, Ankur Handa, Jonathan Tremblay, Yashraj S Narang, Karl Van Wyk, Umar Iqbal, Stan Birchfield, et al. Dexycb: A benchmark for capturing hand grasping of objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9044–9053, 2021. 2, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 159, + 288, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 159, + 288, + 224 + ], + "spans": [ + { + "bbox": [ + 53, + 159, + 288, + 224 + ], + "type": "text", + "content": "[2] Ping Chen, Yujin Chen, Dong Yang, Fangyin Wu, Qin Li, Qingpei Xia, and Yong Tan. I2uv-handnet: Image-to-uv prediction network for accurate and high-fidelity 3d hand mesh modeling. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12929–12938, 2021. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 226, + 288, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 226, + 288, + 292 + ], + "spans": [ + { + "bbox": [ + 53, + 226, + 288, + 292 + ], + "type": "text", + "content": "[3] Xingyu Chen, Yufeng Liu, Chongyang Ma, Jianlong Chang, Huayan Wang, Tian Chen, Xiaoyan Guo, Pengfei Wan, and Wen Zheng. Camera-space hand mesh recovery via semantic aggregation and adaptive 2d-1d registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13274–13283, 2021. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 293, + 288, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 293, + 288, + 358 + ], + "spans": [ + { + "bbox": [ + 53, + 293, + 288, + 358 + ], + "type": "text", + "content": "[4] Xingyu Chen, Yufeng Liu, Yajiao Dong, Xiong Zhang, Chongyang Ma, Yanmin Xiong, Yuan Zhang, and Xiaoyan Guo. Mobrecon: Mobile-friendly hand mesh reconstruction from monocular image. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 2, 3, 4, 6, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 361, + 288, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 361, + 288, + 394 + ], + "spans": [ + { + "bbox": [ + 53, + 361, + 288, + 394 + ], + "type": "text", + "content": "[5] Junhyeong Cho, Kim Youwang, and Tae-Hyun Oh. Crossattention of disentangled modalities for 3d human mesh recovery with transformers. 1, 3, 4, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 396, + 288, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 396, + 288, + 439 + ], + "spans": [ + { + "bbox": [ + 53, + 396, + 288, + 439 + ], + "type": "text", + "content": "[6] Hongsuk Choi, Gyeongsik Moon, and Kyoung Mu Lee. Pose2Mesh: Graph Convolutional Network for 3D Human Pose and Mesh Recovery from a 2D Human Pose, page 769-787. 2020. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 441, + 288, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 441, + 288, + 506 + ], + "spans": [ + { + "bbox": [ + 53, + 441, + 288, + 506 + ], + "type": "text", + "content": "[7] Jeonghwan Kim, Mi-Gyeong Gwon, Hyunwoo Park, Hyukmin Kwon, Gi-Mun Um, and Wonjun Kim. Sampling is matter: Point-guided 3d human mesh reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12880-12889, 2023. 1, 2, 4, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 509, + 288, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 509, + 288, + 541 + ], + "spans": [ + { + "bbox": [ + 53, + 509, + 288, + 541 + ], + "type": "text", + "content": "[8] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 543, + 288, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 543, + 288, + 576 + ], + "spans": [ + { + "bbox": [ + 53, + 543, + 288, + 576 + ], + "type": "text", + "content": "[9] Thomas Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv: Learning, arXiv: Learning, 2016. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 578, + 288, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 288, + 621 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 288, + 621 + ], + "type": "text", + "content": "[10] Isaak Lim, Alexander Dielen, Marcel Campen, and Leif Kobbelt. A Simple Approach to Intrinsic Correspondence Learning on Unstructured 3D Meshes, page 349-362. 2019. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 624, + 288, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 288, + 657 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 288, + 657 + ], + "type": "text", + "content": "[11] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In 2021 IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 2, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 658, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 288, + 714 + ], + "type": "text", + "content": "[12] Shaowei Liu, Hanwen Jiang, Jiarui Xu, Sifei Liu, and Xiaolong Wang. Semi-supervised 3d hand-object poses estimation with interactions in time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14687-14697, 2021. 6" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 714 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 139 + ], + "type": "text", + "content": "[13] Gyeongsik Moon and Kyoung Mu Lee. I2l-meshnet: Image-to-lixel prediction network for accurate 3d human pose and mesh estimation from a single rgb image. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VII 16, pages 752-768. Springer, 2020. 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 140, + 547, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 140, + 547, + 195 + ], + "spans": [ + { + "bbox": [ + 307, + 140, + 547, + 195 + ], + "type": "text", + "content": "[14] JoonKyu Park, Yeonguk Oh, Gyeongsik Moon, Hongsuk Choi, and Kyoung Mu Lee. Handoccnet: Occlusion-robust 3d hand mesh estimation network. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1496–1505, 2022. 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 197, + 546, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 546, + 239 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 546, + 239 + ], + "type": "text", + "content": "[15] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017. 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 241, + 546, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 546, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 546, + 274 + ], + "type": "text", + "content": "[16] Anurag Ranjan, Timo Bolkart, Soubhik Sanyal, and Michael J. Black. Generating 3D faces using Convolutional Mesh Autoencoders, page 725-741. 2018. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 276, + 546, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 276, + 546, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 276, + 546, + 319 + ], + "type": "text", + "content": "[17] Javier Romero, Dimitrios Tzionas, and Michael J. Black. Embodied hands: modeling and capturing hands and bodies together. ACM Transactions on Graphics, page 1-17, 2017. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 320, + 546, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 320, + 546, + 376 + ], + "spans": [ + { + "bbox": [ + 307, + 320, + 546, + 376 + ], + "type": "text", + "content": "[18] Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan, Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed, Judy Hoffman, et al. Hiera: A hierarchical vision transformer without the bells-and-whistles. arXiv preprint arXiv:2306.00989, 2023. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 376, + 546, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 376, + 546, + 421 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 546, + 421 + ], + "type": "text", + "content": "[19] Adrian Spurr, Umar Iqbal, Pavlo Molchanov, Otmar Hilliges, and Jan Kautz. Weakly supervised 3d hand pose estimation via biomechanical constraints. In European conference on computer vision, pages 211-228. Springer, 2020. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 422, + 546, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 546, + 466 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 546, + 466 + ], + "type": "text", + "content": "[20] Xiao Tang, Tianyu Wang, and Chi-Wing Fu. Towards accurate alignment in real-time 3d hand-mesh reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11698-11707, 2021. 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 467, + 546, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 467, + 546, + 510 + ], + "spans": [ + { + "bbox": [ + 307, + 467, + 546, + 510 + ], + "type": "text", + "content": "[21] PavanKumarAnasosalu Vasu, James Gabriel, Jeff Zhu, Oncel Tuzel, and Anurag Ranjan. Fastvit: A fast hybrid vision transformer using structural reparameterization. 2023. 2, 3, 5, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 512, + 546, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 512, + 546, + 567 + ], + "spans": [ + { + "bbox": [ + 307, + 512, + 546, + 567 + ], + "type": "text", + "content": "[22] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, AidanN. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Neural Information Processing Systems, Neural Information Processing Systems, 2017. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 568, + 546, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 568, + 546, + 633 + ], + "spans": [ + { + "bbox": [ + 307, + 568, + 546, + 633 + ], + "type": "text", + "content": "[23] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, et al. Deep high-resolution representation learning for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 43(10):3349-3364, 2020. 2, 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 635, + 547, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 547, + 690 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 547, + 690 + ], + "type": "text", + "content": "[24] Hao Xu, Tianyu Wang, Xiao Tang, and Chi-Wing Fu. H2onet: Hand-occlusion-and-orientation-aware network for real-time 3d hand mesh reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17048–17058, 2023. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 691, + 546, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 546, + 714 + ], + "type": "text", + "content": "[25] Yusuke Yoshiyasu. Deformable mesh transformer for 3d human mesh recovery. In Proceedings of the IEEE/CVF Con" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1375" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 251 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 66, + 73, + 286, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 73, + 286, + 94 + ], + "spans": [ + { + "bbox": [ + 66, + 73, + 286, + 94 + ], + "type": "text", + "content": "ference on Computer Vision and Pattern Recognition, pages 17006-17015, 2023. 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "type": "text", + "content": "[26] Weihao Yu, Mi Luo, Pan Zhou, Chenyang Si, Yichen Zhou, Xinchao Wang, Jiashi Feng, and Shuicheng Yan. Metaformer is actually what you need for vision. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 287, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 287, + 195 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 287, + 195 + ], + "type": "text", + "content": "[27] Xiong Zhang, Qiang Li, Hong Mo, Wenbo Zhang, and Wen Zheng. End-to-end hand mesh recovery from a monocular rgb image. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 2, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 198, + 287, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 198, + 287, + 251 + ], + "spans": [ + { + "bbox": [ + 48, + 198, + 287, + 251 + ], + "type": "text", + "content": "[28] Christian Zimmermann, Duygu Ceylan, Jimei Yang, Bryan Russell, Max Argus, and Thomas Brox. Freihand: A dataset for markerless capture of hand pose and shape from single rgb images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 813-822, 2019. 2, 5" + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "type": "text", + "content": "1376" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/e81a3abe-11ba-459a-b183-aa765dce41a0_content_list.json b/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/e81a3abe-11ba-459a-b183-aa765dce41a0_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..75e92b54112b671cb8cc3d3e3f8e13705b9a1a00 --- /dev/null +++ b/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/e81a3abe-11ba-459a-b183-aa765dce41a0_content_list.json @@ -0,0 +1,1467 @@ +[ + { + "type": "text", + "text": "A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames", + "text_level": 1, + "bbox": [ + 137, + 128, + 833, + 175 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0de35df57d2e48914ff510fed54683dc42a97c7b0b08752eb8756a87480cc145.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 119, + 202, + 851, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 325, + 313, + 342 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Understanding long, real-world videos requires modeling of long-range visual dependencies. To this end, we explore video-first architectures, building on the common paradigm of transferring large-scale, image-text models to video via shallow temporal fusion. However, we expose two limitations to the approach: (1) decreased spatial capabilities, likely due to poor video-language alignment in standard video datasets, and (2) higher memory consumption, bottlenecking the number of frames that can be processed. To mitigate the memory bottleneck, we systematically analyze the memory/accuracy trade-off of various efficient methods: factorized attention, parameter-efficient image-to-video adaptation, input masking, and multi-resolution patchification. Surprisingly, simply masking large portions of the video (up to $75\\%$ ) during contrastive pre-training proves to be one of the most robust ways to scale encoders to videos up to 4.3 minutes at 1 FPS. Our simple approach for training long video-to-text models, which scales to 1B parameters, does not add new architectural complexity and is able to outperform the popular paradigm of using much larger LLMs as an information aggregator over segment-based information on benchmarks with long-range temporal dependencies (YouCook2, EgoSchema).", + "bbox": [ + 75, + 358, + 473, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 733, + 209, + 748 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Long-video understanding requires modeling of the temporal dynamics and long-range visual dependencies of real-world scenes [63, 64]. However, capturing long-range visual content is challenging, even when equipped with large language models. In this paper, we overcome hardware memory limitations and demonstrate how to extend video encoders to directly process minutes-long visual content using language grounding, and simple, established techniques", + "bbox": [ + 75, + 758, + 468, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c16b68480e1203f0e5e223f6db6f111050b2918be435b981a17ae7372761d112.jpg", + "image_caption": [ + "Figure 1. Two main training steps: (1) training a video encoder via Noise Contrastive Estimation and (2) using this frozen video encoder with a pre-trained, frozen LM and visual adapter layers for video-to-text generation (e.g., video summarization and Q/A)." + ], + "image_footnote": [], + "bbox": [ + 521, + 325, + 867, + 563 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "without additional architectural complexity [24, 64]. We focus on long videos through the lens of language, assessing our models on the widely applicable tasks of visual summarization and question-answering.", + "bbox": [ + 496, + 637, + 893, + 698 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent work on vision-language models have yielded impressive results, predominantly focusing on understanding images or short clips of 16 frames or less [1, 13, 30, 72, 73]. This work recycles strong pre-trained image encoders, performs late temporal fusion [1, 71, 73], and employs mostly-frozen, powerful LLMs. The lack of video-first encoders, equipped with early temporal aggregation, may handicap the ability to process complex visual dependencies, and this is usually reflected in prior work's focus on short video benchmarks ( $< 30$ seconds) in which sixteen frames are sufficient for competitive performance [4, 29].", + "bbox": [ + 496, + 700, + 893, + 867 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work, we systematically explore video-first models starting from a standard image-language recipe using", + "bbox": [ + 498, + 869, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution.", + "bbox": [ + 94, + 887, + 205, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "14386", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "two-step training and large pre-trained LMs (Figure 1; [1]). This baseline enables us to start from a demonstrably scalable, simpler-to-tune, widely-used recipe that performs competitively [15, 30]. Through our analysis, we are able to scale this method in a memory-efficient manner to longer sequences of frames, up to 4.3 minutes of video at 1 FPS.", + "bbox": [ + 75, + 90, + 467, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We first explore video-first models on short-video benchmarks (MSR-VTT [67], VATEX [60], YouCook2 [81], ActivityNet [28]) and compare against the SoTA VideoCoCa model [71]. We show that simple joint space-time attention significantly improves performance over frame-level encodings on benchmarks with rich temporal dependencies (YouCook2, VATEX). Overall, our models are able to reach VideoCoCa performance, while requiring fewer parameters and lower frame resolution.", + "bbox": [ + 75, + 184, + 467, + 319 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This performance gain incurs extra compute and memory costs that grow quadratically with the video length. To address this, we provide one of the first systematic analyses of the memory/accuracy pareto-front of popular memory-efficient methods; this includes factorized attention, parameter-efficient image-to-video adaptation, input masking, and multi-resolution patchification. Through this analysis, we find that among all these options, simple token masking (up to $75\\%$ ) during contrastive pre-training incurs only a $1\\%$ Recall@1 drop on zero-shot text-video retrieval, and no drop in zero-shot video captioning. At the same time, such high masking offers 2-3x memory savings and allows us to generalize to longer video contexts. The alternatives we explore (e.g., efficient backbone architectures, more sophisticated TubeViT-style patchification [49]), do not maintain the same robustness against noisy video inputs and present a $25\\%$ relative decrease in performance for text-video retrieval on challenging benchmarks (YouCook2, VATEX). Finally, although parameter-efficient methods [21, 22] fail to adapt image encoders to video-first models without suffering performance drops, we find that they can adapt video models trained on short contexts (e.g., 16 second videos) to longer temporal horizons.", + "bbox": [ + 75, + 323, + 467, + 671 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Based on the above learnings, we extend our best performing short-video encoder to longer contexts of 256 frames (4.3 minutes at 1 FPS). We use the full-length videos of HowTo100M [42] accompanied by LLM-generated summaries based on the ASR to further contrastively train our LONGViVIT while masking $75\\%$ of the input video tokens and freezing most parameters of the encoder. LONGViVIT-to-text ( $\\sim$ 1B parameters) is able to outperform modular methods that use LLM assistance and PALI-3 [9] for frame captioning on temporally rich benchmarks (YouCook2, EgoSchema). Even modular methods that consider frame selection (SeViLA [74]) or an oracle segmentation of the video for localizing and captioning key events (on YouCook2) cannot reach LONGViVIT's performance. An interesting byproduct of our work is that we can glean", + "bbox": [ + 75, + 674, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "which video-language benchmarks have strong temporal dependencies, and thus are suitable for testing long video models; we find that papers often use benchmarks in which short video or even blind models perform well [5, 41, 67]. In short, we provide the following contributions:", + "bbox": [ + 498, + 90, + 890, + 166 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We explore the memory/accuracy pareto-frontier of video-first vision-language models, and systematically evaluate many architectural, data, and training alternatives. In the end, we identify a simple recipe that enables scaling to 4.3 minutes at 1 FPS, many times longer than comparable video-language models [1, 71].", + "- We identify short and long video benchmarks with substantial temporal dependencies, for which we demonstrate that the traditional image-first, late-temporal fusion recipe is convincingly weaker than a video-first approach.", + "- Finally, we compare our long video models to a variety of strong baselines and show competitive performance with far fewer parameters; this includes baselines that use LLM-based aggregation over visual captions, and we quantitatively evaluate this common approach for the first time on standard video benchmarks." + ], + "bbox": [ + 500, + 167, + 890, + 407 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 419, + 640, + 434 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We base our recipes on [1, 30], which provide a strong two-step video-language recipe that leverages pre-trained LLMs and works at scale. Similar work at smaller scale has additionally included captioning losses [32, 76], more contrastive losses [10, 38, 43, 66], masking/masked autoencoding [15, 16, 18, 19, 33, 35, 40, 55], and combinations thereof [13, 23, 54, 58, 61, 72, 73, 75, 82]. This work focuses on image-text modeling and extends to $< 30$ seconds via image-to-video transfer, selective fine-tuning, or temporal fusion of frame encodings [1, 71, 73].", + "bbox": [ + 496, + 445, + 890, + 595 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A volume of work focuses on video-first learning. This includes some of the very early work in image-to-video kernel inflation [6, 53, 56], transformer-based video architectures [2, 3, 37], image-to-video parameter-efficient adaptation [7, 36, 46], and multiple spatiotemporal resolutions along different network paths [14, 39, 68, 70]. These have still only been demonstrated on short videos, so other works have broached the challenge of temporal scalability: [24, 51, 64] propose alternative encoders, and [27, 48, 59] propose more exotic attention mechanisms. TubeViT [49] proposes multi-granularity patchification. We systematically dissect what works and scales among some of these alternatives, electing options that enable us to re-use strong pre-trained models and use standard, more easily-tuned architectures.", + "bbox": [ + 496, + 598, + 890, + 808 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Specifically in video-to-text generation, approaches that handle longer videos are very limited and mostly target images or short videos [15, 31, 61]. A dominant approach is to summarize frames and aggregate information via LLMs [31, 34, 62, 77]. To the best of our knowledge, we are the first to attempt to train large-scale video", + "bbox": [ + 496, + 810, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "14387", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "to-text models on longer sequences of frames and directly test them against LLM-assisted modular methods on challenging temporal benchmarks [41, 81].", + "bbox": [ + 76, + 90, + 468, + 137 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. The Video-to-Text Architecture", + "text_level": 1, + "bbox": [ + 76, + 148, + 362, + 165 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We base our approach on the successful two-step recipe that combines pre-trained vision and language models [e.g., 1, 30, 72, 73] as shown in Figure 1: (1) we first pre-train a vision encoder, and then (2) fuse the frozen vision representations into a pre-trained, frozen LM.", + "bbox": [ + 76, + 174, + 467, + 250 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Video-Language Contrastive Pre-training", + "text_level": 1, + "bbox": [ + 76, + 258, + 434, + 275 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Following common practice [1, 30], we use a dual vision-language architecture with a Noise Contrastive Estimation (NCE) loss [17, 45, 65] to pre-train our vision encoder, similar to CLIP [50], ALIGN [26] and VideoCLIP [66]. Both encoders are transformers [57]: a BERT-medium (77M) or base (117M) language encoder and ViT-Base (86M parameters) or Large (307M parameters) vision encoder. On the language side, caption representations are computed by averaging across the corresponding token representations. On the vision side, video frames are patched into a sequence of visual tokens, fed into a vision encoder, and then average pooled to produce a final video representation.", + "bbox": [ + 75, + 281, + 467, + 462 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Most prior larger-scale video-language models use pretrained image encoders and patchify frames individually via 2D convolutions [e.g., 1, 66, 71]. Instead, we create spatiotemporal tubelets via 3D convolutions as done in recent vision-only models [2, 49, 55]. Using 3D tubelets instead of flat patches has the dual advantage of higher input compression and more explicit temporal contextualization; our early experiments yielded improved performance. The tubelet embedding sequence is then flattened, added to learnable positional embeddings, and fed into the vision encoder. The vision encoder uses spatio-temporal attention as in ViViT [2]: Joint space-time attention does not add any new parameters to vanilla image ViT [12], facilitating transfer between image and video models.", + "bbox": [ + 75, + 463, + 467, + 672 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Training a large-scale transformer-based video encoder can be challenging because self-attention across thousands of visual tokens is both compute and memory intensive. Memory bottlenecks a model in two ways: (1) limiting the number of frames, and (2) limiting the contrastive batch size during training, negatively impacting performance. To address (2), we use a pre-trained image encoder trained with large batch sizes, and further tune it on videos, instead of jointly training from scratch on images and videos. For initializing the 3D convolution, we repeat the pre-trained weights across the temporal dimension similarly to [2] (see Appendix A). During video-language pre-training, we maintain different embedding paths for images vs. videos: images are embedded with the original 2D convolution and videos with a separate 3D convolution (no weight sharing).", + "bbox": [ + 75, + 674, + 467, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Video-to-Text Tuning", + "text_level": 1, + "bbox": [ + 500, + 90, + 700, + 107 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We follow prior work [e.g., 1, 72, 73] by plugging the frozen pre-trained vision encoder into a frozen pre-trained LM. We first temporally mean pool the video representations to keep a fixed number of tokens independently of the number of frames and next use a randomly initialized Perceiver-resampler [25] to project the representations to the LM embedding space (Appendix A). We add new randomly initialized cross-attention layers at each layer of the LM to ground generation on the visual content. We train the new layers and Perceiver resampler with a standard auto-regressive video captioning loss: $-\\log p(w_t|w < t; \\mathcal{V})$ , where $w_t$ is its $t^{th}$ token, and $\\mathcal{V}$ is the video representation.", + "bbox": [ + 496, + 113, + 890, + 295 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4. Memory-Efficient Encoder Design Space", + "text_level": 1, + "bbox": [ + 498, + 308, + 864, + 325 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Device memory is a key bottleneck for video training with joint space-time attention. To overcome this, we explore four broad categories of solutions: (1) efficient attention, (2) parameter-efficient image-to-video adaptation, (3) input token masking, and (4) multi-resolution patchification.", + "bbox": [ + 496, + 334, + 890, + 409 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Attention mechanism. Factorized attention [2, 3] separates the temporal and spatial dimensions over which self-attention is applied, reducing both memory and computational costs. However, this modification introduces a new temporal block within each transformer layer making initialization and model tuning more challenging. In contrast to [2], that initializes the new blocks with zeroes, we find that we achieve best performance when initializing the temporal blocks with the same self-attention weights of ViT. However, we add a gating mechanism which acts as a residual connection between the self-attention blocks: $h = h + \\tanh(\\alpha) h_{temporal}$ . Here, $\\alpha$ is a trainable parameter initialized to zero, that helps maintain the capabilities of the original ViT during training.", + "2. Parameter-efficient adaptation. We explore using parameter-efficient methods from NLP [8] to adapt image encoders to video, while only tuning a small percentage of model parameters. Most prior work adapts image-based models by freezing an image backbone and adding late, trainable temporal-fusion layers [10, 71, 78]. In contrast, we explore ways to use pre-trained image encoders and adapt them to video-first architectures [7, 36, 46]. Inspired by the success of parameter-efficient adaptation in NLP [79], we consider using MLP Adapters [21] and LoRA [22] (details in Appendix A). We also explore tuning only temporal self-attention blocks [7], effectively as adapter layers, in factorized attention. In all variants, we still tune the video-specific 3D patch convolution.", + "3. Token masking. Most existing work samples videos at a fixed frames per second (FPS) rate [e.g., 1, 2, 55, 74]. However, semantics required for many video-language tasks vary slowly in the temporal dimension [80] and videos" + ], + "bbox": [ + 496, + 412, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "14388", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "present high degree of redundancy between consecutive frames [55]. We explore ways to sparsely sample the video input to reduce the number of input visual tokens. Specifically, we test random masking of input tubelet embeddings. Since consecutive frames are largely redundant, the same semantic signals could potentially be extracted even with high masking rates. For example, [55] masks up to $95\\%$ of the input video to reach optimal performance on the task of video-masked autoencoding. We demonstrate similar results in a video-language setting.", + "bbox": [ + 75, + 90, + 472, + 241 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Multi-resolution patchification. Finally, we test a simple approach to reduce redundancy in videos via more coarse-grained patchification in the temporal or spatial dimension, as commonly done in multiple-view video models [14, 39, 70]. However, this decreases frame resolution, and may lose fine-grained information. As a result, we also experiment with TubeViT [49] variant that combines flat patches and tubelets of different granularity to mitigate information loss. Following [49], we use four different convolution kernels that can encode either coarse-grained temporal or spatial information; details are in Appendix A.", + "bbox": [ + 75, + 241, + 473, + 409 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "5. Datasets and Benchmarks", + "text_level": 1, + "bbox": [ + 76, + 414, + 321, + 429 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For contrastive pre-training, we use: (1) 27M video-text pairs (VTP) as described in [1], (2) HowTo100M [42] (HT100M; 100M instructional YouTube clips aligned with ASR using their timestamps, called HowTo100M Clips), and (3) VideoCC3M [44] (3M video-text pairs based on Conceptual Captions [52]). Unfortunately, we find the text-video alignment in VideoCC3M to be of poor quality; instead, we use a modified variant with generated pseudolabeled captions of every video by PALI [9] (see Appendices B, C). To pre-train with longer videos, we use a long version of HowTo100M (referred to as HowTo100M Summary) consisting of (1) the full-length videos with an average duration of 6.5 minutes and (2) their textual summaries generated by automatically cleaning and summarizing the ASR transcripts using an LLM [20]. We also include the image datasets of [1]. For video-to-text tuning, we use the same mixture of datasets but exclude HowTo100M Clips, since the noisy video-text alignments hurt performance.", + "bbox": [ + 75, + 439, + 472, + 710 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We report text-video retrieval and captioning results on short video benchmarks, with average video length $\\leq 30$ seconds: MSR-VTT [67], YouCook2 [81], ActivityNet Captions [28], and VATEX [60]. To evaluate performance on longer videos, we consider video summarization on full-length versions of YouCook2 and ActivityNet Captions, with a video duration of up to 5 minutes, and multiple-choice video question answering (QA) on EgoSchema [41].", + "bbox": [ + 75, + 710, + 470, + 833 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "6. Experimental Results", + "text_level": 1, + "bbox": [ + 76, + 844, + 284, + 861 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In Section 6.1, we describe our results evaluating alternatives in memory-efficient video encoder design; options de", + "bbox": [ + 75, + 869, + 470, + 902 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/b0955a00718fc111bb34de8fea5bd7ac3220f220ae216dcf619b52630c09d0b7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MSR-VTTVATEXYC2AN
T2VV2TT2VV2TT2VV2TT2VV2T
Joint ST-ViViT39.638.123.826.312.313.66.76.4
Factorized ST-ViViT40.236.925.325.411.612.76.67.4
Avg Frame-level39.334.824.825.09.17.96.87.1
Att-pool Frame-level38.437.521.926.19.08.96.16.2
", + "bbox": [ + 498, + 88, + 893, + 183 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1. Text-video retrieval results (\\% Recall@1) when considering different visual backbones.", + "bbox": [ + 498, + 184, + 890, + 213 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "scribed in Section 4. For this analysis, we use ViT-B/BERT-medium, with training details in Appendix B and ablations on experimental design in Appendix C.", + "bbox": [ + 496, + 229, + 890, + 275 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In Section 6.2, we combine our most competitive design choices from 6.1 and test our models on short and long video understanding benchmarks. We scale our best model variants to ViT-L/BERT-base with a 400M (or 1B) language decoder. We test our short video models on text-video retrieval and video captioning, and our long video models on video summarization and QA on 256-frame videos.", + "bbox": [ + 496, + 276, + 890, + 381 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In Section 6.3, we share our experience working across short and long video benchmarks [5, 11, 41, 60, 67], offering insights about which ones yield robust temporal signal.", + "bbox": [ + 496, + 382, + 890, + 429 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "6.1. Exploration of Memory-Efficient Designs", + "text_level": 1, + "bbox": [ + 498, + 438, + 852, + 455 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We explore memory-efficient methods to train video-first encoders as described in Section 4. We first consider short video inputs of 16 frames at 1 FPS and report peak train-time memory consumption vs. performance on text-video retrieval on short video benchmarks [5]. Then, we test whether our main findings hold for longer inputs (128+ frames) on video summarization on full-length YouCook2.", + "bbox": [ + 496, + 460, + 890, + 568 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Base architectures. We explore the memory/accuracy trade-off of different visual backbones in Table 1: ViViT with joint space-time attention (i.e., Joint ST-ViViT), ViViT with factorized attention (i.e., Factorized ST-ViViT) [2], and frame-level (ViT-based) image encodings with average or attentional pooling ('att-pool') [1, 71]. Different methods perform similarly, especially on MSR-VTT and ActivityNet (AN). Interestingly, attentional pooling on top of frame-level encodings does not improve performance. ViViT with either joint or factorized attention performs best and presents higher gains for YouCook2 (YC2), the more temporally challenging benchmark [6.3]. In contrast to prior work [e.g., 10, 71] which tests frozen image-to-video transfer and claims joint attention to be inferior, we find it to be competitive in this fully fine-tuned setting.", + "bbox": [ + 496, + 571, + 890, + 799 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Architectures and token masking. We now test robustness of backbones when masking part of the input tubelets (0-75%). We report Recall@1 on text-to-video retrieval for YouCook2 and VATEX1 per backbone for different masking", + "bbox": [ + 496, + 803, + 890, + 866 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "1We do not observe significant sensitivity to input masking for MSR-VTT and ActivityNet Captions across all configurations (Section 6.3).", + "bbox": [ + 500, + 875, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "14389", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4b8beaa9ce43e207c59263995739190a120d568c668804090ce659197e1107db.jpg", + "image_caption": [ + "Figure 2. Trade-offs between performance (% text-to-video Recall@1; y axis) and train-time memory consumption (x axis) for different backbones (joint space-time (JST), factorized space-time (FST), and drame-level encodings) with random input masking (0% up to 75%) or parameter-efficient methods for training (Adapters, LoRA, factorized temporal (FST) adaptation; lower opacity)." + ], + "image_footnote": [], + "bbox": [ + 89, + 88, + 467, + 241 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/cc97d257a8490c1fb860941daaa993b99524732efdf2c8551c3fcf6b7514a227.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 498, + 88, + 870, + 241 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/4939a3e4c1ce6e4aad89a2afa88035c47845e56683f38f3e4e88c966c1b14783.jpg", + "image_caption": [ + "Figure 3. Difference (\\%) in memory consumption for different model scales: (ViT-B vs ViT-L). We also report performance drop of efficient methods presented in Figure 2 in comparison with the vanilla approach (i.e., no input masking and full fine-tuning) at different model scales to test whether behavior is similar." + ], + "image_footnote": [], + "bbox": [ + 117, + 319, + 428, + 476 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ratios in Figure 2. Joint space-time attention (JST) is robust against noise from masking up to $75\\%$ during pre-training. The same does not hold for frame-level encodings and factorized attention (FST), where performance drops consistently as we increase masking. We conclude that JST can better handle noisy inputs and use it in further exploration.", + "bbox": [ + 75, + 571, + 468, + 662 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Parameter-efficient adaptation. We next report performance of parameter-efficient image-to-video adaptation in Figure 2. We consider (1) JST with (a) MLP Adapters at every layer of the encoder, (b) LoRA with rank decomposition matrices in the self-attention and feed-forward transformer blocks, and (2) factorized temporal adaptation where we tune the temporal self-attention. No adaptation method can reach the memory savings provided by high input masking, since we tune parameters depthwise and gradient computation still requires backpropagation through the model. At the same time, we see significant performance drop, suggesting that adaptation of spatial-only models to the temporal dimension cannot be sufficiently addressed in semifrozen fashion. Comparing parameter-efficient methods, we find MLP Adapters to be more competitive than LoRA,", + "bbox": [ + 75, + 674, + 470, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "which is now canonical for LLMs. We hypothesize that LoRA is successful for tuning very small portions of the network and performing \"easier\" in-modality transfer.", + "bbox": [ + 496, + 320, + 890, + 366 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Adaptation at scale. We next scale from ViT-B/86M to ViT-L/307M in Figure 3 and test whether observations hold with different model scales. We present the $\\%$ memory increase from base to large (left bar set) and $\\%$ performance decrease of each method at each scale $^2$ . Joint ST exhibits a similar memory pattern to frame-level, while leading to smaller accuracy drops, whereas factorized ST presents significant memory overhead with model scale due to the extra temporal parameters. For this reason, we exclude factorized ST from further experimentation. Finally, parameter-efficient methods are unable to achieve competitive performance at both model scales, although their memory requirements scale better with model size.", + "bbox": [ + 496, + 368, + 892, + 564 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Multi-resolution patchification. Given the outsized memory impact of input token count in Figure 4, we additionally analyze: (1) coarse-grained patchification in the temporal (convolution over 4 instead of 2 frames) and/or spatial (convolution over $32 \\times 32$ instead of $16 \\times 16$ pixel spaces) dimension, and (2) the TubeViT [49] approach of multiple tube kernels of different spatiotemporal size and strides. For all benchmarks, masking the input at high ratios while maintaining a fine granularity of tubelets decreases performance significantly less than other input processing methods. Temporal coarse-grained patchification negatively affects benchmarks with richer temporal dependencies (i.e., YouCook2, VATEX) more than spatial. The opposite trend holds for datasets depending on spatial understanding (i.e., MSR-VTT, ActivityNet Captions3). TubeViT acts as the middle ground between the two by employing multiple kernels, with some performance degradation across all benchmarks. However, it is not able to alleviate the negative effects caused by considering coarser", + "bbox": [ + 496, + 566, + 892, + 854 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "2Performance drop for factorized ST is omitted since the variant without masking leads to out of memory issues.", + "bbox": [ + 500, + 862, + 890, + 887 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "3Omitted from Figure 4 but follows same patterns as MSR-VTT.", + "bbox": [ + 517, + 887, + 859, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "14390", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/331c8ae41358d767e8f44c4f3567c4ee44f654154ab72356ba8d75a1e5a32bbb.jpg", + "image_caption": [ + "Figure 4. Trade-offs between performance (text-to-video Recall@1; y axis) and memory consumption (x axis) for input sampling methods: (1) high input masking ratios (0% to 75%) with joint space-time attention, (2) coarse-grained temporal (Coarse temp) and/or spatial (Coarse space) patchification with a fixed kernel and TubeViT which samples parts of the video with multiple 3D kernels of different granularity." + ], + "image_footnote": [], + "bbox": [ + 71, + 98, + 344, + 208 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/599b7d18991dac70929f8f2d35e1e90bda51c44b55b313d2b17b5560a7e142fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 99, + 620, + 208 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/863c4270f95741d222f2cc1287dfa29362209ec49fa3c97641bb96b4ee1a6fd3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 101, + 897, + 208 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/bf04ac44859cff76f2c83a72cce116e5002366b55efb0b0ab827a6c34c07f1a8.jpg", + "image_caption": [ + "Figure 5. Scaling memory-efficient methods to more frames (i.e., 128 frames) for ViViT-B and variants. We measure performance for video-to-text summarization on the full-length YouCook2 videos via Rouge-L (color-coded) while keeping track of memory consumption during short-to-long video contrastive tuning ( $x$ -axis) and video-to-text tuning ( $y$ -axis)." + ], + "image_footnote": [], + "bbox": [ + 117, + 258, + 851, + 377 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "grained information and presents higher memory requirements due to the multiple convolutions. Overall, we find that high masking with Joint ST and small tubelets yields the strongest memory/performance curves.", + "bbox": [ + 75, + 453, + 470, + 513 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Scaling to longer videos. We now test the best methods from Figure 4 on 128 input frames (32.7k visual tokens). We select methods that are within a memory budget (red vertical lines) and would fit on a 16GB device when expanded to long videos (128+ frames). We contrastively fine-tune [3.1] our best performing video model (i.e., Joint ST referred to as SHORTVIVIT) on sequences of 128 frames on HowTo100M Summary [5], as detailed in Appendix B. We refer to this model as LONGVIVIT. Finally, we fine-tune LONGVIVIT for text generation (Section 3.2) on the full-length YouCook2, and report Rouge-L in Figure 5, measuring memory consumption during both long-context contrastive ( $x$ -axis) and video-to-text ( $y$ -axis) tuning.", + "bbox": [ + 75, + 525, + 468, + 720 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Validating our previous results, IMAGEViT (frame-level encodings) trained on longer videos with $75\\%$ masking significantly under-performs video-first models (10 R-L drop). SHORTViVIT without further HT100M Summary training performs better than IMAGEViT, but cannot match models adapted to longer videos. LONGViVIT improves performance by 1.8 Rouge-L points over SHORTViVIT. Comparing input masking with coarser-grained patchification provides similar insights to the previous paragraph.", + "bbox": [ + 75, + 722, + 470, + 859 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Finally, we test MLP Adapters [21] for tuning SHORT-VIViT to longer videos and observe no performance drop in comparison with full fine-tuning. This provides further evidence that parameter-efficient methods can be used for \"easier transfers\" but not temporal adaptation of spatial-only models. One downside of MLP Adapters is that it increases parameter count during video-to-text tuning (y-axis in Figure 5). Thus, we also experiment with contrastively tuning only the last four layers of the model. With this, we observe a further $3\\mathrm{x}$ decrease in memory, since we tune the network widthwise and excise early layer gradient computation. At the same time, there is no memory increase for video-to-text and no performance degradation. We conclude that this combination (high input masking and tuning the last layers) is an effective setting for longer video adaptation. Given the observed robustness to masking, to further decrease video-to-text memory, we also mask $30\\%$ of the input video during training and inference without observing any drop in summarization performance (see Appendix C).", + "bbox": [ + 496, + 453, + 890, + 741 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.2. Main Results", + "text_level": 1, + "bbox": [ + 500, + 757, + 638, + 772 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Short video benchmarks. We present our main results on short video benchmarks in Table 2. We use ViT-L with BERT-base for contrastive pre-training (Section 3.1) and a 400M frozen LM for video-to-text tuning (Section 3.2). Our entire video-to-text model accounts for $\\sim 900\\mathrm{M}$ parameters, although we additionally test scaling the frozen LM to 1B parameters ( $\\sim 1.5\\mathrm{B}$ total count). We report Recall@1 for zero-shot text-video retrieval and CIDEr for zero-shot and", + "bbox": [ + 496, + 779, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "4We start from IMAGEViT trained on short videos with no masking.", + "bbox": [ + 93, + 875, + 457, + 886 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "5Using the same fine-grained SHORTVIViT model for initialization.", + "bbox": [ + 93, + 886, + 457, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "14391", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/f58afae7b89fe101fd7e45df83b05ef58675a234cd2cf537a1ef7bea38dd256b.jpg", + "table_caption": [], + "table_footnote": [ + "Table 2. We present three model variants: IMAGEViT-L, that uses frame-level encodings with a late temporal fusion trained on images and videos, SHORTViT-L, our best performing video-first model with joint space-time attention, and Efficient SHORTViT-L (EffSHORTViT-L) where we apply $75\\%$ train-time masking for 3x memory savings. We also report performance for SoTA image-first models: VideoCoCa-L and Flamingo-3B, although they are bigger and not directly comparable. We report Recall@1 for zero-shot text-to-video (T2V) and video-to-text (V2T) retrieval, and CIDEr for zero-shot and fine-tuned (FT) captioning when considering a 400M (C1) or 1B (C2) frozen LM for generation. ActivityNet retrieval results marked with $*$ are not directly comparable, as these models uniformly sample frames, whereas we use the first frames of the long video with a fixed FPS of 1 to match experimental settings across benchmarks." + ], + "table_body": "
MSR-VTTZero-shotFTVATEXZero-shotFTYouCook2Zero-shotFTActivityNetZero-shotFT
T2V/V2TC1/C2C1T2V/V2TC1/C2C1T2V/V2TC1/C2C1T2V/V2TC1/C2C1
IMAGEViT-L30.9/41.624.6/25.163.636.2/42.937.9/39.461.118.2/16.814.5/16.595.920.6/18.216.3/17.741.1
SHORTViT-L31.9/38.932.7/32.963.137.8/42.843.6/43.067.520.4/20.521.0/22.1131.921.3/18.925.2/26.144.8
EffSHORTViT-L29.9/38.333.8/33.963.834.4/42.741.3/42.764.720.5/20.321.1/21.7127.120.1/17.727.0/26.541.1
VideoCoCa-L [71]33.3/-24.3----18.9/-20.7-31.5*/-17.4-
VideoCoCa-2.1B34.3/64.727.173.253.2/73.622.877.820.3/-34.3128.034.5*/33.0*19.339.3
Flamingo-3B [1]----40.1--55.8----
", + "bbox": [ + 78, + 88, + 893, + 224 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "fine-tuned video captioning. We consider three model variants: frame-level encodings IMAGEViT, SHORTViVIT, and SHORTViVIT with $75\\%$ masking that uses 2-3x less memory (referred to as Efficient SHORTViVIT). We also report results for VideoCoCa [71] and Flamingo [1] $^6$ .", + "bbox": [ + 75, + 340, + 468, + 416 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our results remain consistent with our earlier observations. Contextualizing only intra-frame dependencies coupled with late temporal fusion (IMAGEVIT) leads to inferior performance for retrieval and captioning on benchmarks with richer temporal dependencies (YouCook2, VATEX) but performs better on retrieval on MSR-VTT which relies on spatial understanding. Video-first architectures further tuned on video datasets (substantially noisier than curated image ones) improve temporal capabilities at the expense of spatial. For Efficient SHORTVIVIT, we find that masking $75\\%$ of the input video causes a performance drop: an average of $1\\%$ absolute difference on zero-shot retrieval and no significant difference on zero-shot captioning across all benchmarks. The efficient model still performs similarly or better than IMAGEVIT, especially on captioning and temporally rich benchmarks (e.g., YouCook2, VATEX), while consuming significantly less memory. Finally, when scaling the frozen LM component from 400M to 1B $(\\mathrm{C}1\\rightarrow \\mathrm{C}2)$ for zero-shot video-to-text generation, we observe moderate improvements across benchmarks and variants.", + "bbox": [ + 75, + 417, + 470, + 718 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We compare our results against large image-based models with SoTA performance on video benchmarks (second block of Table 2). Although results are not directly comparable due to different experimental settings, we are competitive and achieve even better results for temporally rich benchmarks (i.e., YouCook2) on text-video retrieval for models of similar parameter count7. Moreover, our models", + "bbox": [ + 75, + 719, + 468, + 825 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "significantly outperform VideoCoCa on most video captioning benchmarks even when considering their much larger versions in the zero-shot setting. Finally, when fine-tuning our video-to-text models with the 400M LM, we are again able to match and surpass the performance of the larger VideoCoCa-2.1B in two out of four benchmarks.", + "bbox": [ + 500, + 340, + 890, + 431 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Long video understanding. We further tune LONG-VIViT-L on 256-frame HT100M Summary videos and evaluate zero-shot/fine-tuned summarization (YouCook2, ActivityNet) and QA (EgoSchema released subset); this is shown in Table 3. We additionally report results of LONG-VIViT on Perception Test [47] in Appendix D, where videos are short but can benefit from higher FPS.", + "bbox": [ + 496, + 435, + 890, + 541 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We consider two families of models. 1. Models that take as input 256 frames (first block of Table 3): IMAGEVIT and SHORTVIVIT pre-trained on 16-frame clips, and LONG-VIVIT further trained on 256-frame clips. 2. Modular approaches from prior work (second block of Table 3): (a) SeViLA Localization [74] for localizing important frames in the long video given a textual query which are then fed into SHORTVIVIT for performing the task $^8$ , and (b) the popular paradigm of captioning video segments or frames and using an LLM to aggregate information and form coherent summaries or answer questions [31, 34, 77]. We try the latter approach with IMAGEVIT and SHORTVIVIT, generating captions over 16-second video segments and then feeding the captions to the September 2023 release of Bard, a much larger LLM than the ones used in previous results. We caption clips using uniform video segmentation (every 16 seconds) or an oracle segmentation when available (i.e., we consider ground-truth start and end timestamps for different events within ActivityNet and YouCook2 videos). We", + "bbox": [ + 496, + 541, + 892, + 828 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "6Models are not directly comparable due to different pre-training datasets, model sizes, training regimes, and input resolution. For instance, [71] fully fine-tune the LM and report results for $576 \\times 576$ frame resolution instead of $256 \\times 256$ .", + "bbox": [ + 75, + 838, + 468, + 886 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "Video-text retrieval results on ActivityNet Captions are not comparable", + "bbox": [ + 94, + 887, + 468, + 898 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "ble since we are only considering the first 16 seconds of the video, whereas [71] uniformly sample frames from the entire video ( $\\sim$ 180 seconds).", + "bbox": [ + 500, + 839, + 890, + 863 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "8We select 16 frames using the pre-trained localizer provided by [74]. For video summarization, we use synthetic summaries of the video generated by PALI+Bard as the textual query for retrieving frames.", + "bbox": [ + 500, + 863, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "14392", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/05ea390e836d41525f310e7f618be08d5a9be2a4b651a77858c6d736c1c79c8d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Zero-shotFine-tuned
ANYC2ESANYC2
Inference with 256 frames
IMAGEViT14.44.640.823.829.4
SHORTViVIT15.47.047.924.329.5
LONGViVIT15.220.356.824.030.6
Modular approaches with 16-frame video models
SeViLA-to-SHORTViVIT16.24.249.624.428.3
IMAGEViT-to-Bard18.115.835.022.919.1
+ oracle segments16.316.2-22.722.1
SHORTViVIT-to-Bard19.318.142.022.720.8
+ oracle segments18.318.2-22.724.7
PALI [9] 5B-to-Bard22.019.944.8--
Blind Bard--27.0--
SoTA [69]---36.934.6
", + "bbox": [ + 101, + 88, + 444, + 313 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3. Results on long video-to-text benchmarks. We report Rouge-L for zero-shot and fine-tuned video summarization on ActivityNet Captions (AN) and YouCook2 (YC2) and zero-shot accuracy (\\%) for multiple choice QA on EgoSchema (ES).", + "bbox": [ + 75, + 316, + 468, + 372 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "also test substituting our small video models with PALI-3 (5B parameters) for frame captioning9. Finally, we reference the SoTA fine-tuned performance on ActivityNet and YouCook2, when using specialized models with precomputed features by multiple networks, object detectors, and domain-specific vocabulary [69].", + "bbox": [ + 75, + 378, + 468, + 469 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Looking through Table 3, we find that on ActivityNet, which contains less temporal dependencies [6.3], modular approaches via frame selection or LLM-based aggregation of information (second block) perform well. Frame captioning via PALI combined with the power of LLMs is enough for the task in a zero-shot setting. For fine-tuned models, feeding either the long input or selected frames into SHORTVIVIT perform better than utilizing Bard. On ActivityNet, we see no benefit from training further on longer videos.", + "bbox": [ + 75, + 469, + 468, + 604 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In contrast, we find that short video and modular models are insufficient for addressing video tasks with longer-range temporal dependencies (YouCook2, EgoSchema). Adapting SHORTVIVIT to longer contexts (LONGVIVIT) significantly improves performance and achieves the best scores across all comparison approaches. Using Bard as an information aggregator over individual clip captions cannot achieve competitive performance, even when considering an oracle video segmentation for YouCook2 (Lines 3 and 5 in the second block of Table 3). Surprisingly, even using a much larger and more powerful image-based model (PALI) cannot reach LONGVIVIT on YouCook2 and EgoSchema. Interestingly, selecting 16 key frames and feeding them into SHORTVIVIT also outperforms Bard-based methods on EgoSchema and fine-tuned YouCook2. This suggests there can be temporal dependencies in long videos that cannot be resolved even with an optimal event segmentation for the video, or be aggregated by LLMs given inprecise visual", + "bbox": [ + 75, + 604, + 468, + 876 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4442e7c74a3c600d257d792481338f515eed7d5f0aacafe8e67b64eaf8e60eea.jpg", + "image_caption": [ + "Figure 6. Performance difference $(\\%)$ per benchmark when we remove (1) video or (2) image data from the training mixture." + ], + "image_footnote": [], + "bbox": [ + 531, + 89, + 857, + 189 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "information. On such benchmarks, LONGVIVIT demonstrates strong performance even without LLM assistance.", + "bbox": [ + 500, + 234, + 890, + 265 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.3. Brief Notes on Video Evaluations", + "text_level": 1, + "bbox": [ + 500, + 277, + 790, + 292 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We briefly describe some of our findings on video evaluations. Firstly, we find that blind Bard is able to achieve SoTA results on the full set of EgoSchema (no visual input; $33.9\\%$ accuracy vs. $32.1\\%$ for the best model in [41]). Adding visual information from PALI into Bard increases performance to just $39.2\\%$ . However, on EgoSchema's released subset, performance of blind Bard is $27\\%$ , which is much lower than PALI-to-Bard $(44.8\\%)$ , suggesting that the subset contains questions that rely more on visual grounding than pure language reasoning, so we report numbers on the subset in Table 3 and on the full set in Appendix ??", + "bbox": [ + 498, + 301, + 890, + 468 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 6 details a simple ablation across other video benchmarks to quantify temporal richness. We test removing either video or image data from the training mix and measure the effect on performance (video-to-text Recall@1). We see a dramatic performance drop when removing video data for YouCook2 and VATEX (up to $75\\%$ ). ActivityNet and MSRVTT suffer more from the absence of image data, whereas non-video training influences performance in lesser degree (as little as $18\\%$ for MSR-VTT). We believe there's room for more fine-grained, temporal-focused video-language benchmarks in the community.", + "bbox": [ + 498, + 469, + 892, + 635 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 652, + 625, + 667 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In short, we systematically analyze memory-efficient methods to scale video-first architectures to longer sequences of frames and demonstrate that just masking high percentages of the video $(\\leq 75\\%)$ yields competitive results on long video-language tasks. Such masking shows a very small performance drop on short videos, provides 2-3x memory savings and allows scaling up to 4.3 minutes at 1 FPS (LONGVIVIT) when freezing part of the short video network in our two-stage training. LONGVIVIT outperforms modular approaches with LLM assistance on video summarization and QA on benchmarks with richer temporal dependencies (YouCook2, EgoSchema). We overall demonstrate that encoding longer-range visual dependencies can make a difference in downstream performance and corrects mistakes that LLMs are unable to rectify.", + "bbox": [ + 498, + 676, + 890, + 886 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "9We consider captions of key frames per 8 seconds of video.", + "bbox": [ + 94, + 886, + 413, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "14393", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in Neural Information Processing Systems, 35:23716-23736, 2022. 1, 2, 3, 4, 7", + "[2] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. Vivit: A video vision transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6836-6846, 2021. 2, 3, 4", + "[3] Gedas Bertasius, Heng Wang, and Lorenzo Torresani. Is space-time attention all you need for video understanding? In ICML, page 4, 2021. 2, 3", + "[4] Shyamal Buch, Cristóbal Eyzaguirre, Adrien Gaidon, Jiajun Wu, Li Fei-Fei, and Juan Carlos Niebles. Revisiting the \"video\" in video-language understanding. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2917-2927, 2022. 1", + "[5] Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 961-970, 2015. 2, 4", + "[6] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 2", + "[7] Dongsheng Chen, Chaofan Tao, Lu Hou, Lifeng Shang, Xin Jiang, and Qun Liu. Litevl: Efficient video-language learning with enhanced spatial-temporal modeling. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 7985-7997, 2022. 2, 3", + "[8] Jiao Chen, Aston Zhang, Xingjian Shi, Mu Li, Alex Smola, and Diyi Yang. Parameter-efficient fine-tuning design spaces. arXiv preprint arXiv:2301.01821, 2023. 3", + "[9] Xi Chen, Xiao Wang, Lucas Beyer, Alexander Kolesnikov, Jialin Wu, Paul Voigtlaender, Basil Mustafa, Sebastian Goodman, Ibrahim Alabdulmohsin, Piotr Padlewski, et al. Pali-3 vision language models: Smaller, faster, stronger. arXiv preprint arXiv:2310.09199, 2023. 2, 4, 8", + "[10] Feng Cheng, Xizi Wang, Jie Lei, David Crandall, Mohit Bansal, and Gedas Bertasius. Vindlu: A recipe for effective video-and-language pretraining. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10739-10750, 2023. 2, 3, 4", + "[11] Pradipto Das, Chenliang Xu, Richard F Doell, and Jason J Corso. A thousand frames in just a few words: Linguual description of videos through latent topics and sparse object stitching. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2634-2641, 2013. 4", + "[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Trans-" + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "formers for image recognition at scale. In International Conference on Learning Representations, 2020. 3", + "[13] Danny Driess, Fei Xia, Mehdi SM Sajjadi, Corey Lynch, Aakanksha Chowdhery, Brian Ichter, Ayzaan Wahid, Jonathan Tompson, Quan Vuong, Tianhe Yu, et al. Palm-: An embodied multimodal language model. arXiv preprint arXiv:2303.03378, 2023. 1, 2", + "[14] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6202-6211, 2019. 2, 4", + "[15] Tsu-Jui Fu, Linjie Li, Zhe Gan, Kevin Lin, William Yang Wang, Lijuan Wang, and Zicheng Liu. Violet: End-to-end video-language transformers with masked visual-token modeling. arXiv preprint arXiv:2111.12681, 2021. 2", + "[16] Tsu-Jui Fu, Linjie Li, Zhe Gan, Kevin Lin, William Yang Wang, Lijuan Wang, and Zicheng Liu. An empirical study of end-to-end video-language transformers with masked visual modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22898-22909, 2023. 2", + "[17] Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pages 297–304. JMLR Workshop and Conference Proceedings, 2010. 3", + "[18] Tengda Han, Weidi Xie, and Andrew Zisserman. Turbo training with token dropout. arXiv preprint arXiv:2210.04889, 2022. 2", + "[19] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Doll'ar, and Ross B Girshick. Masked autoencoders are scalable vision learners. 2022 IEEE. In CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15979-15988, 2021. 2", + "[20] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. Training compute-optimal large language models. arXiv preprint arXiv:2203.15556, 2022. 4", + "[21] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. 2, 3, 6", + "[22] Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. In International Conference on Learning Representations, 2021. 2, 3", + "[23] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Qiang Liu, et al. Language is not all you need: Aligning perception with language models. arXiv preprint arXiv:2302.14045, 2023. 2", + "[24] Md Mohaiminul Islam and Gedas Bertasius. Long movie clip classification with state-space video models. In European Conference on Computer Vision, pages 87-104. Springer, 2022. 1, 2" + ], + "bbox": [ + 503, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "14394", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[25] Andrew Jaegle, Felix Gimeno, Andy Brock, Oriol Vinyals, Andrew Zisserman, and Joao Carreira. Perceiver: General perception with iterative attention. In International conference on machine learning, pages 4651-4664. PMLR, 2021. 3", + "[26] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning, pages 4904-4916. PMLR, 2021. 3", + "[27] Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. arXiv preprint arXiv:2001.04451, 2020. 2", + "[28] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017. 2, 4", + "[29] Jie Lei, Tamara L Berg, and Mohit Bansal. Revealing single frame bias for video-and-language learning. arXiv preprint arXiv:2206.03428, 2022. 1", + "[30] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023. 1, 2, 3", + "[31] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023. 2, 7", + "[32] Linjie Li, Zhe Gan, Kevin Lin, Chung-Ching Lin, Zicheng Liu, Ce Liu, and Lijuan Wang. Lavender: Unifying videolanguage understanding as masked language modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23119-23129, 2023. 2", + "[33] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23390-23400, 2023. 2", + "[34] Kevin Lin, Faisal Ahmed, Linjie Li, Chung-Ching Lin, Ehsan Azarnasab, Zhengyuan Yang, Jianfeng Wang, Lin Liang, Zicheng Liu, Yumao Lu, Ce Liu, and Lijuan Wang. Mm-vid: Advancing video understanding with gpt-4v(ision), 2023. 2, 7", + "[35] Yuanze Lin, Chen Wei, Huiyu Wang, Alan Yuille, and Cihang Xie. Smaug: Sparse masked autoencoder for efficient video-language pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2459-2469, 2023. 2", + "[36] Ruyang Liu, Jingjia Huang, Ge Li, Jiashi Feng, Xinglong Wu, and Thomas H Li. Revisiting temporal modeling for clip-based image-to-video knowledge transferring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6555-6564, 2023. 2, 3", + "[37] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3202-3211, 2022. 2" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[38] Huaishao Luo, Lei Ji, Ming Zhong, Yang Chen, Wen Lei, Nan Duan, and Tianrui Li. Clip4clip: An empirical study of clip for end to end video clip retrieval and captioning. Neurocomputing, 508:293-304, 2022. 2", + "[39] Chuofan Ma, Qiushan Guo, Yi Jiang, Ping Luo, Zehuan Yuan, and Xiaojuan Qi. Rethinking resolution in the context of efficient video recognition. Advances in Neural Information Processing Systems, 35:37865-37877, 2022. 2, 4", + "[40] Yue Ma, Tianyu Yang, Yin Shan, and Xiu Li. Simvtp: Simple video text pre-training with masked autoencoders. arXiv preprint arXiv:2212.03490, 2022. 2", + "[41] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. arXiv preprint arXiv:2308.09126, 2023. 2, 3, 4, 8", + "[42] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2630-2640, 2019. 2, 4", + "[43] Antoine Miech, Jean-Baptiste Alayrac, Lucas Smaira, Ivan Laptev, Josef Sivic, and Andrew Zisserman. End-to-end learning of visual representations from uncurated instructional videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9879-9889, 2020. 2", + "[44] Arsha Nagrani, Paul Hongsuck Seo, Bryan Seybold, Anja Hauth, Santiago Manen, Chen Sun, and Cordelia Schmid. Learning audio-video modalities from image captions. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XIV, pages 407-426. Springer, 2022. 4", + "[45] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. 3", + "[46] Junting Pan, Ziyi Lin, Xiatian Zhu, Jing Shao, and Hongsheng Li. St-adapter: Parameter-efficient image-to-video transfer learning. Advances in Neural Information Processing Systems, 35:26462-26477, 2022. 2, 3", + "[47] Viorica Pătrăucean, Lucas Smaira, Ankush Gupta, Adrià Recasens Continente, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Joseph Heyward, Mateusz Malinowski, Yi Yang, et al. Perception test: A diagnostic benchmark for multimodal video models. arXiv preprint arXiv:2305.13786, 2023. 7", + "[48] Bowen Peng, Jeffrey Quesnelle, Honglu Fan, and Enrico Shippole. Yarn: Efficient context window extension of large language models. arXiv preprint arXiv:2309.00071, 2023. 2", + "[49] AJ Piergiovanni, Weicheng Kuo, and Anelia Angelova. Rethinking video vits: Sparse video tubes for joint image and video learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2214-2224, 2023. 2, 3, 4, 5", + "[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "14395", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 3", + "[51] Michael S Ryoo, Keerthana Gopalakrishnan, Kumara Kahapatitiya, Ted Xiao, Kanishka Rao, Austin Stone, Yao Lu, Julian Ibarz, and Anurag Arnab. Token turing machines. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19070-19081, 2023. 2", + "[52] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, 2018. 4", + "[53] Karen Simonyan and Andrew Zisserman. Two-stream convolutional networks for action recognition in videos. Advances in neural information processing systems, 27, 2014. 2", + "[54] Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15638-15650, 2022. 2", + "[55] Zhan Tong, Yibing Song, Jue Wang, and Limin Wang. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. In Advances in Neural Information Processing Systems, 2022. 2, 3, 4", + "[56] Du Tran, Heng Wang, Lorenzo Torresani, Jamie Ray, Yann LeCun, and Manohar Paluri. A closer look at spatiotemporal convolutions for action recognition. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 6450-6459, 2018. 2", + "[57] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 3", + "[58] Junke Wang, Dongdong Chen, Zuxuan Wu, Chong Luo, Luowei Zhou, Yucheng Zhao, Yujia Xie, Ce Liu, Yu-Gang Jiang, and Lu Yuan. Omnivl: One foundation model for image-language and video-language tasks. Advances in neural information processing systems, 35:5696-5710, 2022. 2", + "[59] Sinong Wang, Belinda Z Li, Madian Khabsa, Han Fang, and Hao Ma. Linformer: Self-attention with linear complexity. arXiv preprint arXiv:2006.04768, 2020. 2", + "[60] Xin Wang, Jiawei Wu, Junkun Chen, Lei Li, Yuan-Fang Wang, and William Yang Wang. Vatex: A large-scale, high-quality multilingual dataset for video-and-language research. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4581-4591, 2019. 2, 4", + "[61] Yi Wang, Kunchang Li, Yizhuo Li, Yinan He, Bingkun Huang, Zhiyu Zhao, Hongjie Zhang, Jilan Xu, Yi Liu, Zun Wang, et al. Internvideo: General video foundation models via generative and discriminative learning. arXiv preprint arXiv:2212.03191, 2022. 2", + "[62] Zhenhailong Wang, Manling Li, Ruochen Xu, Luowei Zhou, Jie Lei, Xudong Lin, Shuohang Wang, Ziyi Yang, Chenguang Zhu, Derek Hoiem, et al. Language models with" + ], + "bbox": [ + 78, + 90, + 470, + 901 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "image descriptors are strong few-shot video-language learners. Advances in Neural Information Processing Systems, 35: 8483-8497, 2022. 2", + "[63] Chao-Yuan Wu and Philipp Krahenbuhl. Towards long-form video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1884-1894, 2021. 1", + "[64] Chao-Yuan Wu, Yanghao Li, Karttikeya Mangalam, Haoqi Fan, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Memvit: Memory-augmented multiscale vision transformer for efficient long-term video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13587-13597, 2022. 1, 2", + "[65] Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3733-3742, 2018. 3", + "[66] Hu Xu, Gargi Ghosh, Po-Yao Huang, Dmytro Okhonko, Armen Aghajanyan, Florian Metze, Luke Zettlemoyer, and Christoph Feichtenhofer. Videoclip: Contrastive pre-training for zero-shot video-text understanding. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 6787-6800, 2021. 2, 3", + "[67] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5288-5296, 2016. 2, 4", + "[68] Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5036-5045, 2022. 2", + "[69] Kashu Yamazaki, Khoa Vo, Quang Sang Truong, Bhiksha Raj, and Ngan Le. Vlint: visual-linguistic transformer-in-transformer for coherent video paragraph captioning. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 3081–3090, 2023. 8", + "[70] Shen Yan, Xuehan Xiong, Anurag Arnab, Zhichao Lu, Mi Zhang, Chen Sun, and Cordelia Schmid. Multiview transformers for video recognition. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3333-3343, 2022. 2, 4", + "[71] Shen Yan, Tao Zhu, Zirui Wang, Yuan Cao, Mi Zhang, Soham Ghosh, Yonghui Wu, and Jiahui Yu. Video-text modeling with zero-shot transfer from contrastive captioners. arXiv preprint arXiv:2212.04979, 2022. 1, 2, 3, 4, 7", + "[72] Antoine Yang, Antoine Miech, Josef Sivic, Ivan Laptev, and Cordelia Schmid. Zero-shot video question answering via frozen bidirectional language models. Advances in Neural Information Processing Systems, 35:124-141, 2022. 1, 2, 3", + "[73] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 1, 2, 3" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "14396", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[74] Shoubin Yu, Jaemin Cho, Prateek Yadav, and Mohit Bansal. Self-chained image-language model for video localization and question answering. arXiv preprint arXiv:2305.06988, 2023. 2, 3, 7", + "[75] Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, et al. Florence: A new foundation model for computer vision. arXiv preprint arXiv:2111.11432, 2021. 2", + "[76] Rowan Zellers, Ximing Lu, Jack Hessel, Youngjae Yu, Jae Sung Park, Jize Cao, Ali Farhadi, and Yejin Choi. Merlot: Multimodal neural script knowledge models. Advances in Neural Information Processing Systems, 34:23634-23651, 2021. 2", + "[77] Andy Zeng, Maria Attarian, Krzysztof Marcin Choromanski, Adrian Wong, Stefan Welker, Federico Tombari, Aveek Purohit, Michael S Ryoo, Vikas Sindhwani, Johnny Lee, et al. Socratic models: Composing zero-shot multimodal reasoning with language. In The Eleventh International Conference on Learning Representations, 2022. 2, 7", + "[78] Bowen Zhang, Xiaojie Jin, Weibo Gong, Kai Xu, Zhao Zhang, Peng Wang, Xiaohui Shen, and Jiashi Feng. Multimodal video adapter for parameter efficient video text retrieval. arXiv preprint arXiv:2301.07868, 2023. 3", + "[79] Qingru Zhang, Minshuo Chen, Alexander Bukharin, Pengcheng He, Yu Cheng, Weizhu Chen, and Tuo Zhao. Adaptive budget allocation for parameter-efficient finetuning. arXiv preprint arXiv:2303.10512, 2023. 3", + "[80] Zhang Zhang and Dacheng Tao. Slow feature analysis for human action recognition. IEEE transactions on pattern analysis and machine intelligence, 34(3):436-450, 2012. 3", + "[81] Luowei Zhou, Chenliang Xu, and Jason Corso. Towards automatic learning of procedures from web instructional videos. In Proceedings of the AAAI Conference on Artificial Intelligence, 2018. 2, 3, 4", + "[82] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 2" + ], + "bbox": [ + 78, + 90, + 468, + 641 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "14397", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/e81a3abe-11ba-459a-b183-aa765dce41a0_model.json b/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/e81a3abe-11ba-459a-b183-aa765dce41a0_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c47c25d2ff8eac997ecf9ca3965aa63c3dac81ba --- /dev/null +++ b/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/e81a3abe-11ba-459a-b183-aa765dce41a0_model.json @@ -0,0 +1,2446 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.138, + 0.13, + 0.834, + 0.176 + ], + "angle": 0, + "content": "A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames" + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.203, + 0.852, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.327, + 0.314, + 0.343 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.359, + 0.474, + 0.707 + ], + "angle": 0, + "content": "Understanding long, real-world videos requires modeling of long-range visual dependencies. To this end, we explore video-first architectures, building on the common paradigm of transferring large-scale, image-text models to video via shallow temporal fusion. However, we expose two limitations to the approach: (1) decreased spatial capabilities, likely due to poor video-language alignment in standard video datasets, and (2) higher memory consumption, bottlenecking the number of frames that can be processed. To mitigate the memory bottleneck, we systematically analyze the memory/accuracy trade-off of various efficient methods: factorized attention, parameter-efficient image-to-video adaptation, input masking, and multi-resolution patchification. Surprisingly, simply masking large portions of the video (up to \\(75\\%\\)) during contrastive pre-training proves to be one of the most robust ways to scale encoders to videos up to 4.3 minutes at 1 FPS. Our simple approach for training long video-to-text models, which scales to 1B parameters, does not add new architectural complexity and is able to outperform the popular paradigm of using much larger LLMs as an information aggregator over segment-based information on benchmarks with long-range temporal dependencies (YouCook2, EgoSchema)." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.734, + 0.21, + 0.749 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.759, + 0.47, + 0.88 + ], + "angle": 0, + "content": "Long-video understanding requires modeling of the temporal dynamics and long-range visual dependencies of real-world scenes [63, 64]. However, capturing long-range visual content is challenging, even when equipped with large language models. In this paper, we overcome hardware memory limitations and demonstrate how to extend video encoders to directly process minutes-long visual content using language grounding, and simple, established techniques" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.327, + 0.868, + 0.564 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.569, + 0.895, + 0.626 + ], + "angle": 0, + "content": "Figure 1. Two main training steps: (1) training a video encoder via Noise Contrastive Estimation and (2) using this frozen video encoder with a pre-trained, frozen LM and visual adapter layers for video-to-text generation (e.g., video summarization and Q/A)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.638, + 0.895, + 0.699 + ], + "angle": 0, + "content": "without additional architectural complexity [24, 64]. We focus on long videos through the lens of language, assessing our models on the widely applicable tasks of visual summarization and question-answering." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.702, + 0.895, + 0.868 + ], + "angle": 0, + "content": "Recent work on vision-language models have yielded impressive results, predominantly focusing on understanding images or short clips of 16 frames or less [1, 13, 30, 72, 73]. This work recycles strong pre-trained image encoders, performs late temporal fusion [1, 71, 73], and employs mostly-frozen, powerful LLMs. The lack of video-first encoders, equipped with early temporal aggregation, may handicap the ability to process complex visual dependencies, and this is usually reflected in prior work's focus on short video benchmarks (\\(< 30\\) seconds) in which sixteen frames are sufficient for competitive performance [4, 29]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.895, + 0.902 + ], + "angle": 0, + "content": "In this work, we systematically explore video-first models starting from a standard image-language recipe using" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.888, + 0.206, + 0.9 + ], + "angle": 0, + "content": "*Equal contribution." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "14386" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.468, + 0.182 + ], + "angle": 0, + "content": "two-step training and large pre-trained LMs (Figure 1; [1]). This baseline enables us to start from a demonstrably scalable, simpler-to-tune, widely-used recipe that performs competitively [15, 30]. Through our analysis, we are able to scale this method in a memory-efficient manner to longer sequences of frames, up to 4.3 minutes of video at 1 FPS." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.185, + 0.468, + 0.32 + ], + "angle": 0, + "content": "We first explore video-first models on short-video benchmarks (MSR-VTT [67], VATEX [60], YouCook2 [81], ActivityNet [28]) and compare against the SoTA VideoCoCa model [71]. We show that simple joint space-time attention significantly improves performance over frame-level encodings on benchmarks with rich temporal dependencies (YouCook2, VATEX). Overall, our models are able to reach VideoCoCa performance, while requiring fewer parameters and lower frame resolution." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.324, + 0.468, + 0.672 + ], + "angle": 0, + "content": "This performance gain incurs extra compute and memory costs that grow quadratically with the video length. To address this, we provide one of the first systematic analyses of the memory/accuracy pareto-front of popular memory-efficient methods; this includes factorized attention, parameter-efficient image-to-video adaptation, input masking, and multi-resolution patchification. Through this analysis, we find that among all these options, simple token masking (up to \\(75\\%\\)) during contrastive pre-training incurs only a \\(1\\%\\) Recall@1 drop on zero-shot text-video retrieval, and no drop in zero-shot video captioning. At the same time, such high masking offers 2-3x memory savings and allows us to generalize to longer video contexts. The alternatives we explore (e.g., efficient backbone architectures, more sophisticated TubeViT-style patchification [49]), do not maintain the same robustness against noisy video inputs and present a \\(25\\%\\) relative decrease in performance for text-video retrieval on challenging benchmarks (YouCook2, VATEX). Finally, although parameter-efficient methods [21, 22] fail to adapt image encoders to video-first models without suffering performance drops, we find that they can adapt video models trained on short contexts (e.g., 16 second videos) to longer temporal horizons." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.675, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Based on the above learnings, we extend our best performing short-video encoder to longer contexts of 256 frames (4.3 minutes at 1 FPS). We use the full-length videos of HowTo100M [42] accompanied by LLM-generated summaries based on the ASR to further contrastively train our LONGViVIT while masking \\(75\\%\\) of the input video tokens and freezing most parameters of the encoder. LONGViVIT-to-text (\\(\\sim\\)1B parameters) is able to outperform modular methods that use LLM assistance and PALI-3 [9] for frame captioning on temporally rich benchmarks (YouCook2, EgoSchema). Even modular methods that consider frame selection (SeViLA [74]) or an oracle segmentation of the video for localizing and captioning key events (on YouCook2) cannot reach LONGViVIT's performance. An interesting byproduct of our work is that we can glean" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.167 + ], + "angle": 0, + "content": "which video-language benchmarks have strong temporal dependencies, and thus are suitable for testing long video models; we find that papers often use benchmarks in which short video or even blind models perform well [5, 41, 67]. In short, we provide the following contributions:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.169, + 0.892, + 0.259 + ], + "angle": 0, + "content": "- We explore the memory/accuracy pareto-frontier of video-first vision-language models, and systematically evaluate many architectural, data, and training alternatives. In the end, we identify a simple recipe that enables scaling to 4.3 minutes at 1 FPS, many times longer than comparable video-language models [1, 71]." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.26, + 0.892, + 0.319 + ], + "angle": 0, + "content": "- We identify short and long video benchmarks with substantial temporal dependencies, for which we demonstrate that the traditional image-first, late-temporal fusion recipe is convincingly weaker than a video-first approach." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.321, + 0.892, + 0.409 + ], + "angle": 0, + "content": "- Finally, we compare our long video models to a variety of strong baselines and show competitive performance with far fewer parameters; this includes baselines that use LLM-based aggregation over visual captions, and we quantitatively evaluate this common approach for the first time on standard video benchmarks." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.169, + 0.892, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.42, + 0.642, + 0.435 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.446, + 0.892, + 0.597 + ], + "angle": 0, + "content": "We base our recipes on [1, 30], which provide a strong two-step video-language recipe that leverages pre-trained LLMs and works at scale. Similar work at smaller scale has additionally included captioning losses [32, 76], more contrastive losses [10, 38, 43, 66], masking/masked autoencoding [15, 16, 18, 19, 33, 35, 40, 55], and combinations thereof [13, 23, 54, 58, 61, 72, 73, 75, 82]. This work focuses on image-text modeling and extends to \\(< 30\\) seconds via image-to-video transfer, selective fine-tuning, or temporal fusion of frame encodings [1, 71, 73]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.599, + 0.892, + 0.809 + ], + "angle": 0, + "content": "A volume of work focuses on video-first learning. This includes some of the very early work in image-to-video kernel inflation [6, 53, 56], transformer-based video architectures [2, 3, 37], image-to-video parameter-efficient adaptation [7, 36, 46], and multiple spatiotemporal resolutions along different network paths [14, 39, 68, 70]. These have still only been demonstrated on short videos, so other works have broached the challenge of temporal scalability: [24, 51, 64] propose alternative encoders, and [27, 48, 59] propose more exotic attention mechanisms. TubeViT [49] proposes multi-granularity patchification. We systematically dissect what works and scales among some of these alternatives, electing options that enable us to re-use strong pre-trained models and use standard, more easily-tuned architectures." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Specifically in video-to-text generation, approaches that handle longer videos are very limited and mostly target images or short videos [15, 31, 61]. A dominant approach is to summarize frames and aggregate information via LLMs [31, 34, 62, 77]. To the best of our knowledge, we are the first to attempt to train large-scale video" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "14387" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.138 + ], + "angle": 0, + "content": "to-text models on longer sequences of frames and directly test them against LLM-assisted modular methods on challenging temporal benchmarks [41, 81]." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.149, + 0.364, + 0.166 + ], + "angle": 0, + "content": "3. The Video-to-Text Architecture" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.175, + 0.468, + 0.251 + ], + "angle": 0, + "content": "We base our approach on the successful two-step recipe that combines pre-trained vision and language models [e.g., 1, 30, 72, 73] as shown in Figure 1: (1) we first pre-train a vision encoder, and then (2) fuse the frozen vision representations into a pre-trained, frozen LM." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.259, + 0.436, + 0.276 + ], + "angle": 0, + "content": "3.1. Video-Language Contrastive Pre-training" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.282, + 0.468, + 0.463 + ], + "angle": 0, + "content": "Following common practice [1, 30], we use a dual vision-language architecture with a Noise Contrastive Estimation (NCE) loss [17, 45, 65] to pre-train our vision encoder, similar to CLIP [50], ALIGN [26] and VideoCLIP [66]. Both encoders are transformers [57]: a BERT-medium (77M) or base (117M) language encoder and ViT-Base (86M parameters) or Large (307M parameters) vision encoder. On the language side, caption representations are computed by averaging across the corresponding token representations. On the vision side, video frames are patched into a sequence of visual tokens, fed into a vision encoder, and then average pooled to produce a final video representation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.464, + 0.468, + 0.674 + ], + "angle": 0, + "content": "Most prior larger-scale video-language models use pretrained image encoders and patchify frames individually via 2D convolutions [e.g., 1, 66, 71]. Instead, we create spatiotemporal tubelets via 3D convolutions as done in recent vision-only models [2, 49, 55]. Using 3D tubelets instead of flat patches has the dual advantage of higher input compression and more explicit temporal contextualization; our early experiments yielded improved performance. The tubelet embedding sequence is then flattened, added to learnable positional embeddings, and fed into the vision encoder. The vision encoder uses spatio-temporal attention as in ViViT [2]: Joint space-time attention does not add any new parameters to vanilla image ViT [12], facilitating transfer between image and video models." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.675, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Training a large-scale transformer-based video encoder can be challenging because self-attention across thousands of visual tokens is both compute and memory intensive. Memory bottlenecks a model in two ways: (1) limiting the number of frames, and (2) limiting the contrastive batch size during training, negatively impacting performance. To address (2), we use a pre-trained image encoder trained with large batch sizes, and further tune it on videos, instead of jointly training from scratch on images and videos. For initializing the 3D convolution, we repeat the pre-trained weights across the temporal dimension similarly to [2] (see Appendix A). During video-language pre-training, we maintain different embedding paths for images vs. videos: images are embedded with the original 2D convolution and videos with a separate 3D convolution (no weight sharing)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.702, + 0.108 + ], + "angle": 0, + "content": "3.2. Video-to-Text Tuning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.114, + 0.892, + 0.296 + ], + "angle": 0, + "content": "We follow prior work [e.g., 1, 72, 73] by plugging the frozen pre-trained vision encoder into a frozen pre-trained LM. We first temporally mean pool the video representations to keep a fixed number of tokens independently of the number of frames and next use a randomly initialized Perceiver-resampler [25] to project the representations to the LM embedding space (Appendix A). We add new randomly initialized cross-attention layers at each layer of the LM to ground generation on the visual content. We train the new layers and Perceiver resampler with a standard auto-regressive video captioning loss: \\(-\\log p(w_t|w < t; \\mathcal{V})\\), where \\(w_t\\) is its \\(t^{th}\\) token, and \\(\\mathcal{V}\\) is the video representation." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.309, + 0.866, + 0.327 + ], + "angle": 0, + "content": "4. Memory-Efficient Encoder Design Space" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.335, + 0.892, + 0.41 + ], + "angle": 0, + "content": "Device memory is a key bottleneck for video training with joint space-time attention. To overcome this, we explore four broad categories of solutions: (1) efficient attention, (2) parameter-efficient image-to-video adaptation, (3) input token masking, and (4) multi-resolution patchification." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.413, + 0.892, + 0.625 + ], + "angle": 0, + "content": "1. Attention mechanism. Factorized attention [2, 3] separates the temporal and spatial dimensions over which self-attention is applied, reducing both memory and computational costs. However, this modification introduces a new temporal block within each transformer layer making initialization and model tuning more challenging. In contrast to [2], that initializes the new blocks with zeroes, we find that we achieve best performance when initializing the temporal blocks with the same self-attention weights of ViT. However, we add a gating mechanism which acts as a residual connection between the self-attention blocks: \\( h = h + \\tanh(\\alpha) h_{temporal} \\). Here, \\( \\alpha \\) is a trainable parameter initialized to zero, that helps maintain the capabilities of the original ViT during training." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.627, + 0.892, + 0.838 + ], + "angle": 0, + "content": "2. Parameter-efficient adaptation. We explore using parameter-efficient methods from NLP [8] to adapt image encoders to video, while only tuning a small percentage of model parameters. Most prior work adapts image-based models by freezing an image backbone and adding late, trainable temporal-fusion layers [10, 71, 78]. In contrast, we explore ways to use pre-trained image encoders and adapt them to video-first architectures [7, 36, 46]. Inspired by the success of parameter-efficient adaptation in NLP [79], we consider using MLP Adapters [21] and LoRA [22] (details in Appendix A). We also explore tuning only temporal self-attention blocks [7], effectively as adapter layers, in factorized attention. In all variants, we still tune the video-specific 3D patch convolution." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "3. Token masking. Most existing work samples videos at a fixed frames per second (FPS) rate [e.g., 1, 2, 55, 74]. However, semantics required for many video-language tasks vary slowly in the temporal dimension [80] and videos" + }, + { + "type": "list", + "bbox": [ + 0.498, + 0.413, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "14388" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.242 + ], + "angle": 0, + "content": "present high degree of redundancy between consecutive frames [55]. We explore ways to sparsely sample the video input to reduce the number of input visual tokens. Specifically, we test random masking of input tubelet embeddings. Since consecutive frames are largely redundant, the same semantic signals could potentially be extracted even with high masking rates. For example, [55] masks up to \\(95\\%\\) of the input video to reach optimal performance on the task of video-masked autoencoding. We demonstrate similar results in a video-language setting." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.242, + 0.474, + 0.41 + ], + "angle": 0, + "content": "4. Multi-resolution patchification. Finally, we test a simple approach to reduce redundancy in videos via more coarse-grained patchification in the temporal or spatial dimension, as commonly done in multiple-view video models [14, 39, 70]. However, this decreases frame resolution, and may lose fine-grained information. As a result, we also experiment with TubeViT [49] variant that combines flat patches and tubelets of different granularity to mitigate information loss. Following [49], we use four different convolution kernels that can encode either coarse-grained temporal or spatial information; details are in Appendix A." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.415, + 0.322, + 0.43 + ], + "angle": 0, + "content": "5. Datasets and Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.44, + 0.473, + 0.712 + ], + "angle": 0, + "content": "For contrastive pre-training, we use: (1) 27M video-text pairs (VTP) as described in [1], (2) HowTo100M [42] (HT100M; 100M instructional YouTube clips aligned with ASR using their timestamps, called HowTo100M Clips), and (3) VideoCC3M [44] (3M video-text pairs based on Conceptual Captions [52]). Unfortunately, we find the text-video alignment in VideoCC3M to be of poor quality; instead, we use a modified variant with generated pseudolabeled captions of every video by PALI [9] (see Appendices B, C). To pre-train with longer videos, we use a long version of HowTo100M (referred to as HowTo100M Summary) consisting of (1) the full-length videos with an average duration of 6.5 minutes and (2) their textual summaries generated by automatically cleaning and summarizing the ASR transcripts using an LLM [20]. We also include the image datasets of [1]. For video-to-text tuning, we use the same mixture of datasets but exclude HowTo100M Clips, since the noisy video-text alignments hurt performance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.712, + 0.472, + 0.834 + ], + "angle": 0, + "content": "We report text-video retrieval and captioning results on short video benchmarks, with average video length \\(\\leq 30\\) seconds: MSR-VTT [67], YouCook2 [81], ActivityNet Captions [28], and VATEX [60]. To evaluate performance on longer videos, we consider video summarization on full-length versions of YouCook2 and ActivityNet Captions, with a video duration of up to 5 minutes, and multiple-choice video question answering (QA) on EgoSchema [41]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.845, + 0.285, + 0.862 + ], + "angle": 0, + "content": "6. Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.472, + 0.903 + ], + "angle": 0, + "content": "In Section 6.1, we describe our results evaluating alternatives in memory-efficient video encoder design; options de" + }, + { + "type": "table", + "bbox": [ + 0.499, + 0.089, + 0.895, + 0.184 + ], + "angle": 0, + "content": "
MSR-VTTVATEXYC2AN
T2VV2TT2VV2TT2VV2TT2VV2T
Joint ST-ViViT39.638.123.826.312.313.66.76.4
Factorized ST-ViViT40.236.925.325.411.612.76.67.4
Avg Frame-level39.334.824.825.09.17.96.87.1
Att-pool Frame-level38.437.521.926.19.08.96.16.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.185, + 0.892, + 0.214 + ], + "angle": 0, + "content": "Table 1. Text-video retrieval results (\\% Recall@1) when considering different visual backbones." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.231, + 0.892, + 0.276 + ], + "angle": 0, + "content": "scribed in Section 4. For this analysis, we use ViT-B/BERT-medium, with training details in Appendix B and ablations on experimental design in Appendix C." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.277, + 0.892, + 0.382 + ], + "angle": 0, + "content": "In Section 6.2, we combine our most competitive design choices from 6.1 and test our models on short and long video understanding benchmarks. We scale our best model variants to ViT-L/BERT-base with a 400M (or 1B) language decoder. We test our short video models on text-video retrieval and video captioning, and our long video models on video summarization and QA on 256-frame videos." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.383, + 0.892, + 0.43 + ], + "angle": 0, + "content": "In Section 6.3, we share our experience working across short and long video benchmarks [5, 11, 41, 60, 67], offering insights about which ones yield robust temporal signal." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.439, + 0.854, + 0.456 + ], + "angle": 0, + "content": "6.1. Exploration of Memory-Efficient Designs" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.462, + 0.892, + 0.569 + ], + "angle": 0, + "content": "We explore memory-efficient methods to train video-first encoders as described in Section 4. We first consider short video inputs of 16 frames at 1 FPS and report peak train-time memory consumption vs. performance on text-video retrieval on short video benchmarks [5]. Then, we test whether our main findings hold for longer inputs (128+ frames) on video summarization on full-length YouCook2." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.573, + 0.892, + 0.8 + ], + "angle": 0, + "content": "Base architectures. We explore the memory/accuracy trade-off of different visual backbones in Table 1: ViViT with joint space-time attention (i.e., Joint ST-ViViT), ViViT with factorized attention (i.e., Factorized ST-ViViT) [2], and frame-level (ViT-based) image encodings with average or attentional pooling ('att-pool') [1, 71]. Different methods perform similarly, especially on MSR-VTT and ActivityNet (AN). Interestingly, attentional pooling on top of frame-level encodings does not improve performance. ViViT with either joint or factorized attention performs best and presents higher gains for YouCook2 (YC2), the more temporally challenging benchmark [6.3]. In contrast to prior work [e.g., 10, 71] which tests frozen image-to-video transfer and claims joint attention to be inferior, we find it to be competitive in this fully fine-tuned setting." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.804, + 0.892, + 0.867 + ], + "angle": 0, + "content": "Architectures and token masking. We now test robustness of backbones when masking part of the input tubelets (0-75%). We report Recall@1 on text-to-video retrieval for YouCook2 and VATEX1 per backbone for different masking" + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.875, + 0.892, + 0.901 + ], + "angle": 0, + "content": "1We do not observe significant sensitivity to input masking for MSR-VTT and ActivityNet Captions across all configurations (Section 6.3)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "14389" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.091, + 0.089, + 0.468, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.089, + 0.872, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.253, + 0.895, + 0.296 + ], + "angle": 0, + "content": "Figure 2. Trade-offs between performance (% text-to-video Recall@1; y axis) and train-time memory consumption (x axis) for different backbones (joint space-time (JST), factorized space-time (FST), and drame-level encodings) with random input masking (0% up to 75%) or parameter-efficient methods for training (Adapters, LoRA, factorized temporal (FST) adaptation; lower opacity)." + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.32, + 0.429, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.482, + 0.472, + 0.553 + ], + "angle": 0, + "content": "Figure 3. Difference (\\%) in memory consumption for different model scales: (ViT-B vs ViT-L). We also report performance drop of efficient methods presented in Figure 2 in comparison with the vanilla approach (i.e., no input masking and full fine-tuning) at different model scales to test whether behavior is similar." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.573, + 0.47, + 0.664 + ], + "angle": 0, + "content": "ratios in Figure 2. Joint space-time attention (JST) is robust against noise from masking up to \\(75\\%\\) during pre-training. The same does not hold for frame-level encodings and factorized attention (FST), where performance drops consistently as we increase masking. We conclude that JST can better handle noisy inputs and use it in further exploration." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.675, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Parameter-efficient adaptation. We next report performance of parameter-efficient image-to-video adaptation in Figure 2. We consider (1) JST with (a) MLP Adapters at every layer of the encoder, (b) LoRA with rank decomposition matrices in the self-attention and feed-forward transformer blocks, and (2) factorized temporal adaptation where we tune the temporal self-attention. No adaptation method can reach the memory savings provided by high input masking, since we tune parameters depthwise and gradient computation still requires backpropagation through the model. At the same time, we see significant performance drop, suggesting that adaptation of spatial-only models to the temporal dimension cannot be sufficiently addressed in semifrozen fashion. Comparing parameter-efficient methods, we find MLP Adapters to be more competitive than LoRA," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.321, + 0.892, + 0.367 + ], + "angle": 0, + "content": "which is now canonical for LLMs. We hypothesize that LoRA is successful for tuning very small portions of the network and performing \"easier\" in-modality transfer." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.369, + 0.893, + 0.565 + ], + "angle": 0, + "content": "Adaptation at scale. We next scale from ViT-B/86M to ViT-L/307M in Figure 3 and test whether observations hold with different model scales. We present the \\(\\%\\) memory increase from base to large (left bar set) and \\(\\%\\) performance decrease of each method at each scale\\(^2\\). Joint ST exhibits a similar memory pattern to frame-level, while leading to smaller accuracy drops, whereas factorized ST presents significant memory overhead with model scale due to the extra temporal parameters. For this reason, we exclude factorized ST from further experimentation. Finally, parameter-efficient methods are unable to achieve competitive performance at both model scales, although their memory requirements scale better with model size." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.568, + 0.893, + 0.855 + ], + "angle": 0, + "content": "Multi-resolution patchification. Given the outsized memory impact of input token count in Figure 4, we additionally analyze: (1) coarse-grained patchification in the temporal (convolution over 4 instead of 2 frames) and/or spatial (convolution over \\(32 \\times 32\\) instead of \\(16 \\times 16\\) pixel spaces) dimension, and (2) the TubeViT [49] approach of multiple tube kernels of different spatiotemporal size and strides. For all benchmarks, masking the input at high ratios while maintaining a fine granularity of tubelets decreases performance significantly less than other input processing methods. Temporal coarse-grained patchification negatively affects benchmarks with richer temporal dependencies (i.e., YouCook2, VATEX) more than spatial. The opposite trend holds for datasets depending on spatial understanding (i.e., MSR-VTT, ActivityNet Captions3). TubeViT acts as the middle ground between the two by employing multiple kernels, with some performance degradation across all benchmarks. However, it is not able to alleviate the negative effects caused by considering coarser" + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.863, + 0.892, + 0.888 + ], + "angle": 0, + "content": "2Performance drop for factorized ST is omitted since the variant without masking leads to out of memory issues." + }, + { + "type": "page_footnote", + "bbox": [ + 0.518, + 0.888, + 0.86, + 0.901 + ], + "angle": 0, + "content": "3Omitted from Figure 4 but follows same patterns as MSR-VTT." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.863, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "14390" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.073, + 0.099, + 0.346, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.101, + 0.621, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.102, + 0.898, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.212, + 0.895, + 0.256 + ], + "angle": 0, + "content": "Figure 4. Trade-offs between performance (text-to-video Recall@1; y axis) and memory consumption (x axis) for input sampling methods: (1) high input masking ratios (0% to 75%) with joint space-time attention, (2) coarse-grained temporal (Coarse temp) and/or spatial (Coarse space) patchification with a fixed kernel and TubeViT which samples parts of the video with multiple 3D kernels of different granularity." + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.259, + 0.852, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.386, + 0.895, + 0.43 + ], + "angle": 0, + "content": "Figure 5. Scaling memory-efficient methods to more frames (i.e., 128 frames) for ViViT-B and variants. We measure performance for video-to-text summarization on the full-length YouCook2 videos via Rouge-L (color-coded) while keeping track of memory consumption during short-to-long video contrastive tuning (\\(x\\)-axis) and video-to-text tuning (\\(y\\)-axis)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.454, + 0.471, + 0.515 + ], + "angle": 0, + "content": "grained information and presents higher memory requirements due to the multiple convolutions. Overall, we find that high masking with Joint ST and small tubelets yields the strongest memory/performance curves." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.526, + 0.47, + 0.721 + ], + "angle": 0, + "content": "Scaling to longer videos. We now test the best methods from Figure 4 on 128 input frames (32.7k visual tokens). We select methods that are within a memory budget (red vertical lines) and would fit on a 16GB device when expanded to long videos (128+ frames). We contrastively fine-tune [3.1] our best performing video model (i.e., Joint ST referred to as SHORTVIVIT) on sequences of 128 frames on HowTo100M Summary [5], as detailed in Appendix B. We refer to this model as LONGVIVIT. Finally, we fine-tune LONGVIVIT for text generation (Section 3.2) on the full-length YouCook2, and report Rouge-L in Figure 5, measuring memory consumption during both long-context contrastive (\\(x\\)-axis) and video-to-text (\\(y\\)-axis) tuning." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.723, + 0.471, + 0.86 + ], + "angle": 0, + "content": "Validating our previous results, IMAGEViT (frame-level encodings) trained on longer videos with \\(75\\%\\) masking significantly under-performs video-first models (10 R-L drop). SHORTViVIT without further HT100M Summary training performs better than IMAGEViT, but cannot match models adapted to longer videos. LONGViVIT improves performance by 1.8 Rouge-L points over SHORTViVIT. Comparing input masking with coarser-grained patchification provides similar insights to the previous paragraph." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.454, + 0.892, + 0.742 + ], + "angle": 0, + "content": "Finally, we test MLP Adapters [21] for tuning SHORT-VIViT to longer videos and observe no performance drop in comparison with full fine-tuning. This provides further evidence that parameter-efficient methods can be used for \"easier transfers\" but not temporal adaptation of spatial-only models. One downside of MLP Adapters is that it increases parameter count during video-to-text tuning (y-axis in Figure 5). Thus, we also experiment with contrastively tuning only the last four layers of the model. With this, we observe a further \\(3\\mathrm{x}\\) decrease in memory, since we tune the network widthwise and excise early layer gradient computation. At the same time, there is no memory increase for video-to-text and no performance degradation. We conclude that this combination (high input masking and tuning the last layers) is an effective setting for longer video adaptation. Given the observed robustness to masking, to further decrease video-to-text memory, we also mask \\(30\\%\\) of the input video during training and inference without observing any drop in summarization performance (see Appendix C)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.758, + 0.64, + 0.773 + ], + "angle": 0, + "content": "6.2. Main Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Short video benchmarks. We present our main results on short video benchmarks in Table 2. We use ViT-L with BERT-base for contrastive pre-training (Section 3.1) and a 400M frozen LM for video-to-text tuning (Section 3.2). Our entire video-to-text model accounts for \\(\\sim 900\\mathrm{M}\\) parameters, although we additionally test scaling the frozen LM to 1B parameters (\\(\\sim 1.5\\mathrm{B}\\) total count). We report Recall@1 for zero-shot text-video retrieval and CIDEr for zero-shot and" + }, + { + "type": "page_footnote", + "bbox": [ + 0.094, + 0.875, + 0.458, + 0.887 + ], + "angle": 0, + "content": "4We start from IMAGEViT trained on short videos with no masking." + }, + { + "type": "page_footnote", + "bbox": [ + 0.094, + 0.887, + 0.458, + 0.9 + ], + "angle": 0, + "content": "5Using the same fine-grained SHORTVIViT model for initialization." + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.875, + 0.458, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "14391" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.079, + 0.089, + 0.894, + 0.226 + ], + "angle": 0, + "content": "
MSR-VTTZero-shotFTVATEXZero-shotFTYouCook2Zero-shotFTActivityNetZero-shotFT
T2V/V2TC1/C2C1T2V/V2TC1/C2C1T2V/V2TC1/C2C1T2V/V2TC1/C2C1
IMAGEViT-L30.9/41.624.6/25.163.636.2/42.937.9/39.461.118.2/16.814.5/16.595.920.6/18.216.3/17.741.1
SHORTViT-L31.9/38.932.7/32.963.137.8/42.843.6/43.067.520.4/20.521.0/22.1131.921.3/18.925.2/26.144.8
EffSHORTViT-L29.9/38.333.8/33.963.834.4/42.741.3/42.764.720.5/20.321.1/21.7127.120.1/17.727.0/26.541.1
VideoCoCa-L [71]33.3/-24.3----18.9/-20.7-31.5*/-17.4-
VideoCoCa-2.1B34.3/64.727.173.253.2/73.622.877.820.3/-34.3128.034.5*/33.0*19.339.3
Flamingo-3B [1]----40.1--55.8----
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.076, + 0.227, + 0.895, + 0.326 + ], + "angle": 0, + "content": "Table 2. We present three model variants: IMAGEViT-L, that uses frame-level encodings with a late temporal fusion trained on images and videos, SHORTViT-L, our best performing video-first model with joint space-time attention, and Efficient SHORTViT-L (EffSHORTViT-L) where we apply \\(75\\%\\) train-time masking for 3x memory savings. We also report performance for SoTA image-first models: VideoCoCa-L and Flamingo-3B, although they are bigger and not directly comparable. We report Recall@1 for zero-shot text-to-video (T2V) and video-to-text (V2T) retrieval, and CIDEr for zero-shot and fine-tuned (FT) captioning when considering a 400M (C1) or 1B (C2) frozen LM for generation. ActivityNet retrieval results marked with \\(*\\) are not directly comparable, as these models uniformly sample frames, whereas we use the first frames of the long video with a fixed FPS of 1 to match experimental settings across benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.341, + 0.47, + 0.417 + ], + "angle": 0, + "content": "fine-tuned video captioning. We consider three model variants: frame-level encodings IMAGEViT, SHORTViVIT, and SHORTViVIT with \\(75\\%\\) masking that uses 2-3x less memory (referred to as Efficient SHORTViVIT). We also report results for VideoCoCa [71] and Flamingo [1]\\(^6\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.418, + 0.471, + 0.719 + ], + "angle": 0, + "content": "Our results remain consistent with our earlier observations. Contextualizing only intra-frame dependencies coupled with late temporal fusion (IMAGEVIT) leads to inferior performance for retrieval and captioning on benchmarks with richer temporal dependencies (YouCook2, VATEX) but performs better on retrieval on MSR-VTT which relies on spatial understanding. Video-first architectures further tuned on video datasets (substantially noisier than curated image ones) improve temporal capabilities at the expense of spatial. For Efficient SHORTVIVIT, we find that masking \\(75\\%\\) of the input video causes a performance drop: an average of \\(1\\%\\) absolute difference on zero-shot retrieval and no significant difference on zero-shot captioning across all benchmarks. The efficient model still performs similarly or better than IMAGEVIT, especially on captioning and temporally rich benchmarks (e.g., YouCook2, VATEX), while consuming significantly less memory. Finally, when scaling the frozen LM component from 400M to 1B \\((\\mathrm{C}1\\rightarrow \\mathrm{C}2)\\) for zero-shot video-to-text generation, we observe moderate improvements across benchmarks and variants." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.47, + 0.827 + ], + "angle": 0, + "content": "We compare our results against large image-based models with SoTA performance on video benchmarks (second block of Table 2). Although results are not directly comparable due to different experimental settings, we are competitive and achieve even better results for temporally rich benchmarks (i.e., YouCook2) on text-video retrieval for models of similar parameter count7. Moreover, our models" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.341, + 0.892, + 0.432 + ], + "angle": 0, + "content": "significantly outperform VideoCoCa on most video captioning benchmarks even when considering their much larger versions in the zero-shot setting. Finally, when fine-tuning our video-to-text models with the 400M LM, we are again able to match and surpass the performance of the larger VideoCoCa-2.1B in two out of four benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.436, + 0.892, + 0.542 + ], + "angle": 0, + "content": "Long video understanding. We further tune LONG-VIViT-L on 256-frame HT100M Summary videos and evaluate zero-shot/fine-tuned summarization (YouCook2, ActivityNet) and QA (EgoSchema released subset); this is shown in Table 3. We additionally report results of LONG-VIViT on Perception Test [47] in Appendix D, where videos are short but can benefit from higher FPS." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.542, + 0.893, + 0.829 + ], + "angle": 0, + "content": "We consider two families of models. 1. Models that take as input 256 frames (first block of Table 3): IMAGEVIT and SHORTVIVIT pre-trained on 16-frame clips, and LONG-VIVIT further trained on 256-frame clips. 2. Modular approaches from prior work (second block of Table 3): (a) SeViLA Localization [74] for localizing important frames in the long video given a textual query which are then fed into SHORTVIVIT for performing the task\\(^8\\), and (b) the popular paradigm of captioning video segments or frames and using an LLM to aggregate information and form coherent summaries or answer questions [31, 34, 77]. We try the latter approach with IMAGEVIT and SHORTVIVIT, generating captions over 16-second video segments and then feeding the captions to the September 2023 release of Bard, a much larger LLM than the ones used in previous results. We caption clips using uniform video segmentation (every 16 seconds) or an oracle segmentation when available (i.e., we consider ground-truth start and end timestamps for different events within ActivityNet and YouCook2 videos). We" + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.839, + 0.47, + 0.887 + ], + "angle": 0, + "content": "6Models are not directly comparable due to different pre-training datasets, model sizes, training regimes, and input resolution. For instance, [71] fully fine-tune the LM and report results for \\(576 \\times 576\\) frame resolution instead of \\(256 \\times 256\\)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.888, + 0.469, + 0.9 + ], + "angle": 0, + "content": "Video-text retrieval results on ActivityNet Captions are not comparable" + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.839, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.84, + 0.892, + 0.864 + ], + "angle": 0, + "content": "ble since we are only considering the first 16 seconds of the video, whereas [71] uniformly sample frames from the entire video (\\(\\sim\\)180 seconds)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.864, + 0.892, + 0.901 + ], + "angle": 0, + "content": "8We select 16 frames using the pre-trained localizer provided by [74]. For video summarization, we use synthetic summaries of the video generated by PALI+Bard as the textual query for retrieving frames." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "14392" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.102, + 0.089, + 0.446, + 0.314 + ], + "angle": 0, + "content": "
Zero-shotFine-tuned
ANYC2ESANYC2
Inference with 256 frames
IMAGEViT14.44.640.823.829.4
SHORTViVIT15.47.047.924.329.5
LONGViVIT15.220.356.824.030.6
Modular approaches with 16-frame video models
SeViLA-to-SHORTViVIT16.24.249.624.428.3
IMAGEViT-to-Bard18.115.835.022.919.1
+ oracle segments16.316.2-22.722.1
SHORTViVIT-to-Bard19.318.142.022.720.8
+ oracle segments18.318.2-22.724.7
PALI [9] 5B-to-Bard22.019.944.8--
Blind Bard--27.0--
SoTA [69]---36.934.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.317, + 0.47, + 0.373 + ], + "angle": 0, + "content": "Table 3. Results on long video-to-text benchmarks. We report Rouge-L for zero-shot and fine-tuned video summarization on ActivityNet Captions (AN) and YouCook2 (YC2) and zero-shot accuracy (\\%) for multiple choice QA on EgoSchema (ES)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.379, + 0.469, + 0.47 + ], + "angle": 0, + "content": "also test substituting our small video models with PALI-3 (5B parameters) for frame captioning9. Finally, we reference the SoTA fine-tuned performance on ActivityNet and YouCook2, when using specialized models with precomputed features by multiple networks, object detectors, and domain-specific vocabulary [69]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.47, + 0.469, + 0.606 + ], + "angle": 0, + "content": "Looking through Table 3, we find that on ActivityNet, which contains less temporal dependencies [6.3], modular approaches via frame selection or LLM-based aggregation of information (second block) perform well. Frame captioning via PALI combined with the power of LLMs is enough for the task in a zero-shot setting. For fine-tuned models, feeding either the long input or selected frames into SHORTVIVIT perform better than utilizing Bard. On ActivityNet, we see no benefit from training further on longer videos." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.606, + 0.469, + 0.877 + ], + "angle": 0, + "content": "In contrast, we find that short video and modular models are insufficient for addressing video tasks with longer-range temporal dependencies (YouCook2, EgoSchema). Adapting SHORTVIVIT to longer contexts (LONGVIVIT) significantly improves performance and achieves the best scores across all comparison approaches. Using Bard as an information aggregator over individual clip captions cannot achieve competitive performance, even when considering an oracle video segmentation for YouCook2 (Lines 3 and 5 in the second block of Table 3). Surprisingly, even using a much larger and more powerful image-based model (PALI) cannot reach LONGVIVIT on YouCook2 and EgoSchema. Interestingly, selecting 16 key frames and feeding them into SHORTVIVIT also outperforms Bard-based methods on EgoSchema and fine-tuned YouCook2. This suggests there can be temporal dependencies in long videos that cannot be resolved even with an optimal event segmentation for the video, or be aggregated by LLMs given inprecise visual" + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.09, + 0.858, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.197, + 0.892, + 0.225 + ], + "angle": 0, + "content": "Figure 6. Performance difference \\((\\%)\\) per benchmark when we remove (1) video or (2) image data from the training mixture." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.235, + 0.892, + 0.266 + ], + "angle": 0, + "content": "information. On such benchmarks, LONGVIVIT demonstrates strong performance even without LLM assistance." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.279, + 0.791, + 0.294 + ], + "angle": 0, + "content": "6.3. Brief Notes on Video Evaluations" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.303, + 0.892, + 0.469 + ], + "angle": 0, + "content": "We briefly describe some of our findings on video evaluations. Firstly, we find that blind Bard is able to achieve SoTA results on the full set of EgoSchema (no visual input; \\(33.9\\%\\) accuracy vs. \\(32.1\\%\\) for the best model in [41]). Adding visual information from PALI into Bard increases performance to just \\(39.2\\%\\). However, on EgoSchema's released subset, performance of blind Bard is \\(27\\%\\), which is much lower than PALI-to-Bard \\((44.8\\%)\\), suggesting that the subset contains questions that rely more on visual grounding than pure language reasoning, so we report numbers on the subset in Table 3 and on the full set in Appendix ??" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.47, + 0.893, + 0.636 + ], + "angle": 0, + "content": "Figure 6 details a simple ablation across other video benchmarks to quantify temporal richness. We test removing either video or image data from the training mix and measure the effect on performance (video-to-text Recall@1). We see a dramatic performance drop when removing video data for YouCook2 and VATEX (up to \\(75\\%\\)). ActivityNet and MSRVTT suffer more from the absence of image data, whereas non-video training influences performance in lesser degree (as little as \\(18\\%\\) for MSR-VTT). We believe there's room for more fine-grained, temporal-focused video-language benchmarks in the community." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.653, + 0.627, + 0.669 + ], + "angle": 0, + "content": "7. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.678, + 0.892, + 0.887 + ], + "angle": 0, + "content": "In short, we systematically analyze memory-efficient methods to scale video-first architectures to longer sequences of frames and demonstrate that just masking high percentages of the video \\((\\leq 75\\%)\\) yields competitive results on long video-language tasks. Such masking shows a very small performance drop on short videos, provides 2-3x memory savings and allows scaling up to 4.3 minutes at 1 FPS (LONGVIVIT) when freezing part of the short video network in our two-stage training. LONGVIVIT outperforms modular approaches with LLM assistance on video summarization and QA on benchmarks with richer temporal dependencies (YouCook2, EgoSchema). We overall demonstrate that encoding longer-range visual dependencies can make a difference in downstream performance and corrects mistakes that LLMs are unable to rectify." + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.415, + 0.9 + ], + "angle": 0, + "content": "9We consider captions of key frames per 8 seconds of video." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "14393" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.198 + ], + "angle": 0, + "content": "[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in Neural Information Processing Systems, 35:23716-23736, 2022. 1, 2, 3, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.2, + 0.472, + 0.268 + ], + "angle": 0, + "content": "[2] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. Vivit: A video vision transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6836-6846, 2021. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.269, + 0.47, + 0.311 + ], + "angle": 0, + "content": "[3] Gedas Bertasius, Heng Wang, and Lorenzo Torresani. Is space-time attention all you need for video understanding? In ICML, page 4, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.312, + 0.472, + 0.382 + ], + "angle": 0, + "content": "[4] Shyamal Buch, Cristóbal Eyzaguirre, Adrien Gaidon, Jiajun Wu, Li Fei-Fei, and Juan Carlos Niebles. Revisiting the \"video\" in video-language understanding. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2917-2927, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.382, + 0.47, + 0.452 + ], + "angle": 0, + "content": "[5] Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 961-970, 2015. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.453, + 0.47, + 0.508 + ], + "angle": 0, + "content": "[6] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.509, + 0.47, + 0.579 + ], + "angle": 0, + "content": "[7] Dongsheng Chen, Chaofan Tao, Lu Hou, Lifeng Shang, Xin Jiang, and Qun Liu. Litevl: Efficient video-language learning with enhanced spatial-temporal modeling. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 7985-7997, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.579, + 0.47, + 0.621 + ], + "angle": 0, + "content": "[8] Jiao Chen, Aston Zhang, Xingjian Shi, Mu Li, Alex Smola, and Diyi Yang. Parameter-efficient fine-tuning design spaces. arXiv preprint arXiv:2301.01821, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.621, + 0.47, + 0.69 + ], + "angle": 0, + "content": "[9] Xi Chen, Xiao Wang, Lucas Beyer, Alexander Kolesnikov, Jialin Wu, Paul Voigtlaender, Basil Mustafa, Sebastian Goodman, Ibrahim Alabdulmohsin, Piotr Padlewski, et al. Pali-3 vision language models: Smaller, faster, stronger. arXiv preprint arXiv:2310.09199, 2023. 2, 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.691, + 0.472, + 0.761 + ], + "angle": 0, + "content": "[10] Feng Cheng, Xizi Wang, Jie Lei, David Crandall, Mohit Bansal, and Gedas Bertasius. Vindlu: A recipe for effective video-and-language pretraining. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10739-10750, 2023. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.472, + 0.843 + ], + "angle": 0, + "content": "[11] Pradipto Das, Chenliang Xu, Richard F Doell, and Jason J Corso. A thousand frames in just a few words: Linguual description of videos through latent topics and sparse object stitching. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2634-2641, 2013. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.472, + 0.902 + ], + "angle": 0, + "content": "[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Trans-" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "formers for image recognition at scale. In International Conference on Learning Representations, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.121, + 0.894, + 0.189 + ], + "angle": 0, + "content": "[13] Danny Driess, Fei Xia, Mehdi SM Sajjadi, Corey Lynch, Aakanksha Chowdhery, Brian Ichter, Ayzaan Wahid, Jonathan Tompson, Quan Vuong, Tianhe Yu, et al. Palm-: An embodied multimodal language model. arXiv preprint arXiv:2303.03378, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.19, + 0.892, + 0.246 + ], + "angle": 0, + "content": "[14] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6202-6211, 2019. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.246, + 0.892, + 0.302 + ], + "angle": 0, + "content": "[15] Tsu-Jui Fu, Linjie Li, Zhe Gan, Kevin Lin, William Yang Wang, Lijuan Wang, and Zicheng Liu. Violet: End-to-end video-language transformers with masked visual-token modeling. arXiv preprint arXiv:2111.12681, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.302, + 0.892, + 0.385 + ], + "angle": 0, + "content": "[16] Tsu-Jui Fu, Linjie Li, Zhe Gan, Kevin Lin, William Yang Wang, Lijuan Wang, and Zicheng Liu. An empirical study of end-to-end video-language transformers with masked visual modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22898-22909, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.386, + 0.892, + 0.469 + ], + "angle": 0, + "content": "[17] Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pages 297–304. JMLR Workshop and Conference Proceedings, 2010. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.469, + 0.892, + 0.51 + ], + "angle": 0, + "content": "[18] Tengda Han, Weidi Xie, and Andrew Zisserman. Turbo training with token dropout. arXiv preprint arXiv:2210.04889, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.511, + 0.892, + 0.58 + ], + "angle": 0, + "content": "[19] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Doll'ar, and Ross B Girshick. Masked autoencoders are scalable vision learners. 2022 IEEE. In CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15979-15988, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.58, + 0.892, + 0.65 + ], + "angle": 0, + "content": "[20] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. Training compute-optimal large language models. arXiv preprint arXiv:2203.15556, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.65, + 0.892, + 0.72 + ], + "angle": 0, + "content": "[21] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.72, + 0.892, + 0.776 + ], + "angle": 0, + "content": "[22] Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. In International Conference on Learning Representations, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.776, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[23] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Qiang Liu, et al. Language is not all you need: Aligning perception with language models. arXiv preprint arXiv:2302.14045, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.845, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[24] Md Mohaiminul Islam and Gedas Bertasius. Long movie clip classification with state-space video models. In European Conference on Computer Vision, pages 87-104. Springer, 2022. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "14394" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.16 + ], + "angle": 0, + "content": "[25] Andrew Jaegle, Felix Gimeno, Andy Brock, Oriol Vinyals, Andrew Zisserman, and Joao Carreira. Perceiver: General perception with iterative attention. In International conference on machine learning, pages 4651-4664. PMLR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.162, + 0.472, + 0.244 + ], + "angle": 0, + "content": "[26] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning, pages 4904-4916. PMLR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.246, + 0.471, + 0.286 + ], + "angle": 0, + "content": "[27] Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. arXiv preprint arXiv:2001.04451, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.288, + 0.471, + 0.343 + ], + "angle": 0, + "content": "[28] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.344, + 0.471, + 0.384 + ], + "angle": 0, + "content": "[29] Jie Lei, Tamara L Berg, and Mohit Bansal. Revealing single frame bias for video-and-language learning. arXiv preprint arXiv:2206.03428, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.385, + 0.471, + 0.441 + ], + "angle": 0, + "content": "[30] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.442, + 0.471, + 0.496 + ], + "angle": 0, + "content": "[31] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.498, + 0.471, + 0.566 + ], + "angle": 0, + "content": "[32] Linjie Li, Zhe Gan, Kevin Lin, Chung-Ching Lin, Zicheng Liu, Ce Liu, and Lijuan Wang. Lavender: Unifying videolanguage understanding as masked language modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23119-23129, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.567, + 0.471, + 0.635 + ], + "angle": 0, + "content": "[33] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23390-23400, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.637, + 0.471, + 0.704 + ], + "angle": 0, + "content": "[34] Kevin Lin, Faisal Ahmed, Linjie Li, Chung-Ching Lin, Ehsan Azarnasab, Zhengyuan Yang, Jianfeng Wang, Lin Liang, Zicheng Liu, Yumao Lu, Ce Liu, and Lijuan Wang. Mm-vid: Advancing video understanding with gpt-4v(ision), 2023. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.706, + 0.471, + 0.775 + ], + "angle": 0, + "content": "[35] Yuanze Lin, Chen Wei, Huiyu Wang, Alan Yuille, and Cihang Xie. Smaug: Sparse masked autoencoder for efficient video-language pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2459-2469, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.471, + 0.845 + ], + "angle": 0, + "content": "[36] Ruyang Liu, Jingjia Huang, Ge Li, Jiashi Feng, Xinglong Wu, and Thomas H Li. Revisiting temporal modeling for clip-based image-to-video knowledge transferring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6555-6564, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.471, + 0.901 + ], + "angle": 0, + "content": "[37] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3202-3211, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[38] Huaishao Luo, Lei Ji, Ming Zhong, Yang Chen, Wen Lei, Nan Duan, and Tianrui Li. Clip4clip: An empirical study of clip for end to end video clip retrieval and captioning. Neurocomputing, 508:293-304, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[39] Chuofan Ma, Qiushan Guo, Yi Jiang, Ping Luo, Zehuan Yuan, and Xiaojuan Qi. Rethinking resolution in the context of efficient video recognition. Advances in Neural Information Processing Systems, 35:37865-37877, 2022. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.207, + 0.892, + 0.248 + ], + "angle": 0, + "content": "[40] Yue Ma, Tianyu Yang, Yin Shan, and Xiu Li. Simvtp: Simple video text pre-training with masked autoencoders. arXiv preprint arXiv:2212.03490, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.25, + 0.892, + 0.305 + ], + "angle": 0, + "content": "[41] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. arXiv preprint arXiv:2308.09126, 2023. 2, 3, 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.307, + 0.892, + 0.39 + ], + "angle": 0, + "content": "[42] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2630-2640, 2019. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.392, + 0.892, + 0.473 + ], + "angle": 0, + "content": "[43] Antoine Miech, Jean-Baptiste Alayrac, Lucas Smaira, Ivan Laptev, Josef Sivic, and Andrew Zisserman. End-to-end learning of visual representations from uncurated instructional videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9879-9889, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.476, + 0.892, + 0.559 + ], + "angle": 0, + "content": "[44] Arsha Nagrani, Paul Hongsuck Seo, Bryan Seybold, Anja Hauth, Santiago Manen, Chen Sun, and Cordelia Schmid. Learning audio-video modalities from image captions. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XIV, pages 407-426. Springer, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.561, + 0.892, + 0.602 + ], + "angle": 0, + "content": "[45] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.604, + 0.892, + 0.659 + ], + "angle": 0, + "content": "[46] Junting Pan, Ziyi Lin, Xiatian Zhu, Jing Shao, and Hongsheng Li. St-adapter: Parameter-efficient image-to-video transfer learning. Advances in Neural Information Processing Systems, 35:26462-26477, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.661, + 0.892, + 0.743 + ], + "angle": 0, + "content": "[47] Viorica Pătrăucean, Lucas Smaira, Ankush Gupta, Adrià Recasens Continente, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Joseph Heyward, Mateusz Malinowski, Yi Yang, et al. Perception test: A diagnostic benchmark for multimodal video models. arXiv preprint arXiv:2305.13786, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.745, + 0.892, + 0.787 + ], + "angle": 0, + "content": "[48] Bowen Peng, Jeffrey Quesnelle, Honglu Fan, and Enrico Shippole. Yarn: Efficient context window extension of large language models. arXiv preprint arXiv:2309.00071, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.789, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[49] AJ Piergiovanni, Weicheng Kuo, and Anelia Angelova. Rethinking video vits: Sparse video tubes for joint image and video learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2214-2224, 2023. 2, 3, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.859, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "14395" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.47, + 0.133 + ], + "angle": 0, + "content": "transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.134, + 0.47, + 0.205 + ], + "angle": 0, + "content": "[51] Michael S Ryoo, Keerthana Gopalakrishnan, Kumara Kahapatitiya, Ted Xiao, Kanishka Rao, Austin Stone, Yao Lu, Julian Ibarz, and Anurag Arnab. Token turing machines. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19070-19081, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.205, + 0.471, + 0.287 + ], + "angle": 0, + "content": "[52] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.288, + 0.471, + 0.342 + ], + "angle": 0, + "content": "[53] Karen Simonyan and Andrew Zisserman. Two-stream convolutional networks for action recognition in videos. Advances in neural information processing systems, 27, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.344, + 0.471, + 0.426 + ], + "angle": 0, + "content": "[54] Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15638-15650, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.427, + 0.471, + 0.482 + ], + "angle": 0, + "content": "[55] Zhan Tong, Yibing Song, Jue Wang, and Limin Wang. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. In Advances in Neural Information Processing Systems, 2022. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.484, + 0.471, + 0.552 + ], + "angle": 0, + "content": "[56] Du Tran, Heng Wang, Lorenzo Torresani, Jamie Ray, Yann LeCun, and Manohar Paluri. A closer look at spatiotemporal convolutions for action recognition. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 6450-6459, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.553, + 0.471, + 0.608 + ], + "angle": 0, + "content": "[57] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.609, + 0.471, + 0.678 + ], + "angle": 0, + "content": "[58] Junke Wang, Dongdong Chen, Zuxuan Wu, Chong Luo, Luowei Zhou, Yucheng Zhao, Yujia Xie, Ce Liu, Yu-Gang Jiang, and Lu Yuan. Omnivl: One foundation model for image-language and video-language tasks. Advances in neural information processing systems, 35:5696-5710, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.679, + 0.471, + 0.719 + ], + "angle": 0, + "content": "[59] Sinong Wang, Belinda Z Li, Madian Khabsa, Han Fang, and Hao Ma. Linformer: Self-attention with linear complexity. arXiv preprint arXiv:2006.04768, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.72, + 0.471, + 0.789 + ], + "angle": 0, + "content": "[60] Xin Wang, Jiawei Wu, Junkun Chen, Lei Li, Yuan-Fang Wang, and William Yang Wang. Vatex: A large-scale, high-quality multilingual dataset for video-and-language research. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4581-4591, 2019. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.471, + 0.858 + ], + "angle": 0, + "content": "[61] Yi Wang, Kunchang Li, Yizhuo Li, Yinan He, Bingkun Huang, Zhiyu Zhao, Hongjie Zhang, Jilan Xu, Yi Liu, Zun Wang, et al. Internvideo: General video foundation models via generative and discriminative learning. arXiv preprint arXiv:2212.03191, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.471, + 0.902 + ], + "angle": 0, + "content": "[62] Zhenhailong Wang, Manling Li, Ruochen Xu, Luowei Zhou, Jie Lei, Xudong Lin, Shuohang Wang, Ziyi Yang, Chenguang Zhu, Derek Hoiem, et al. Language models with" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "image descriptors are strong few-shot video-language learners. Advances in Neural Information Processing Systems, 35: 8483-8497, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[63] Chao-Yuan Wu and Philipp Krahenbuhl. Towards long-form video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1884-1894, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.193, + 0.895, + 0.277 + ], + "angle": 0, + "content": "[64] Chao-Yuan Wu, Yanghao Li, Karttikeya Mangalam, Haoqi Fan, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Memvit: Memory-augmented multiscale vision transformer for efficient long-term video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13587-13597, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.892, + 0.346 + ], + "angle": 0, + "content": "[65] Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3733-3742, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.349, + 0.895, + 0.433 + ], + "angle": 0, + "content": "[66] Hu Xu, Gargi Ghosh, Po-Yao Huang, Dmytro Okhonko, Armen Aghajanyan, Florian Metze, Luke Zettlemoyer, and Christoph Feichtenhofer. Videoclip: Contrastive pre-training for zero-shot video-text understanding. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 6787-6800, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.434, + 0.892, + 0.49 + ], + "angle": 0, + "content": "[67] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5288-5296, 2016. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.491, + 0.892, + 0.574 + ], + "angle": 0, + "content": "[68] Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5036-5045, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.576, + 0.892, + 0.645 + ], + "angle": 0, + "content": "[69] Kashu Yamazaki, Khoa Vo, Quang Sang Truong, Bhiksha Raj, and Ngan Le. Vlint: visual-linguistic transformer-in-transformer for coherent video paragraph captioning. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 3081–3090, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.647, + 0.892, + 0.716 + ], + "angle": 0, + "content": "[70] Shen Yan, Xuehan Xiong, Anurag Arnab, Zhichao Lu, Mi Zhang, Chen Sun, and Cordelia Schmid. Multiview transformers for video recognition. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3333-3343, 2022. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.718, + 0.892, + 0.773 + ], + "angle": 0, + "content": "[71] Shen Yan, Tao Zhu, Zirui Wang, Yuan Cao, Mi Zhang, Soham Ghosh, Yonghui Wu, and Jiahui Yu. Video-text modeling with zero-shot transfer from contrastive captioners. arXiv preprint arXiv:2212.04979, 2022. 1, 2, 3, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.775, + 0.892, + 0.83 + ], + "angle": 0, + "content": "[72] Antoine Yang, Antoine Miech, Josef Sivic, Ivan Laptev, and Cordelia Schmid. Zero-shot video question answering via frozen bidirectional language models. Advances in Neural Information Processing Systems, 35:124-141, 2022. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.832, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[73] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 1, 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.895, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "14396" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.146 + ], + "angle": 0, + "content": "[74] Shoubin Yu, Jaemin Cho, Prateek Yadav, and Mohit Bansal. Self-chained image-language model for video localization and question answering. arXiv preprint arXiv:2305.06988, 2023. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.47, + 0.218 + ], + "angle": 0, + "content": "[75] Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, et al. Florence: A new foundation model for computer vision. arXiv preprint arXiv:2111.11432, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.22, + 0.469, + 0.288 + ], + "angle": 0, + "content": "[76] Rowan Zellers, Ximing Lu, Jack Hessel, Youngjae Yu, Jae Sung Park, Jize Cao, Ali Farhadi, and Yejin Choi. Merlot: Multimodal neural script knowledge models. Advances in Neural Information Processing Systems, 34:23634-23651, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.291, + 0.469, + 0.373 + ], + "angle": 0, + "content": "[77] Andy Zeng, Maria Attarian, Krzysztof Marcin Choromanski, Adrian Wong, Stefan Welker, Federico Tombari, Aveek Purohit, Michael S Ryoo, Vikas Sindhwani, Johnny Lee, et al. Socratic models: Composing zero-shot multimodal reasoning with language. In The Eleventh International Conference on Learning Representations, 2022. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.375, + 0.469, + 0.43 + ], + "angle": 0, + "content": "[78] Bowen Zhang, Xiaojie Jin, Weibo Gong, Kai Xu, Zhao Zhang, Peng Wang, Xiaohui Shen, and Jiashi Feng. Multimodal video adapter for parameter efficient video text retrieval. arXiv preprint arXiv:2301.07868, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.432, + 0.469, + 0.487 + ], + "angle": 0, + "content": "[79] Qingru Zhang, Minshuo Chen, Alexander Bukharin, Pengcheng He, Yu Cheng, Weizhu Chen, and Tuo Zhao. Adaptive budget allocation for parameter-efficient finetuning. arXiv preprint arXiv:2303.10512, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.488, + 0.469, + 0.53 + ], + "angle": 0, + "content": "[80] Zhang Zhang and Dacheng Tao. Slow feature analysis for human action recognition. IEEE transactions on pattern analysis and machine intelligence, 34(3):436-450, 2012. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.531, + 0.469, + 0.586 + ], + "angle": 0, + "content": "[81] Luowei Zhou, Chenliang Xu, and Jason Corso. Towards automatic learning of procedures from web instructional videos. In Proceedings of the AAAI Conference on Artificial Intelligence, 2018. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.588, + 0.469, + 0.642 + ], + "angle": 0, + "content": "[82] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "14397" + } + ] +] \ No newline at end of file diff --git a/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/e81a3abe-11ba-459a-b183-aa765dce41a0_origin.pdf b/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/e81a3abe-11ba-459a-b183-aa765dce41a0_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..13d58170655b18b18db31d40cbce43d73a7eaa1d --- /dev/null +++ b/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/e81a3abe-11ba-459a-b183-aa765dce41a0_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfb83af3c76577faf6780096d402be8a6a0e884504cf213ec3048b117fcbc2b0 +size 823642 diff --git a/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/full.md b/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/full.md new file mode 100644 index 0000000000000000000000000000000000000000..776c2791d9357ec39f3066f155d656a43d26c87e --- /dev/null +++ b/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/full.md @@ -0,0 +1,275 @@ +# A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames + +![](images/0de35df57d2e48914ff510fed54683dc42a97c7b0b08752eb8756a87480cc145.jpg) + +# Abstract + +Understanding long, real-world videos requires modeling of long-range visual dependencies. To this end, we explore video-first architectures, building on the common paradigm of transferring large-scale, image-text models to video via shallow temporal fusion. However, we expose two limitations to the approach: (1) decreased spatial capabilities, likely due to poor video-language alignment in standard video datasets, and (2) higher memory consumption, bottlenecking the number of frames that can be processed. To mitigate the memory bottleneck, we systematically analyze the memory/accuracy trade-off of various efficient methods: factorized attention, parameter-efficient image-to-video adaptation, input masking, and multi-resolution patchification. Surprisingly, simply masking large portions of the video (up to $75\%$ ) during contrastive pre-training proves to be one of the most robust ways to scale encoders to videos up to 4.3 minutes at 1 FPS. Our simple approach for training long video-to-text models, which scales to 1B parameters, does not add new architectural complexity and is able to outperform the popular paradigm of using much larger LLMs as an information aggregator over segment-based information on benchmarks with long-range temporal dependencies (YouCook2, EgoSchema). + +# 1. Introduction + +Long-video understanding requires modeling of the temporal dynamics and long-range visual dependencies of real-world scenes [63, 64]. However, capturing long-range visual content is challenging, even when equipped with large language models. In this paper, we overcome hardware memory limitations and demonstrate how to extend video encoders to directly process minutes-long visual content using language grounding, and simple, established techniques + +![](images/c16b68480e1203f0e5e223f6db6f111050b2918be435b981a17ae7372761d112.jpg) +Figure 1. Two main training steps: (1) training a video encoder via Noise Contrastive Estimation and (2) using this frozen video encoder with a pre-trained, frozen LM and visual adapter layers for video-to-text generation (e.g., video summarization and Q/A). + +without additional architectural complexity [24, 64]. We focus on long videos through the lens of language, assessing our models on the widely applicable tasks of visual summarization and question-answering. + +Recent work on vision-language models have yielded impressive results, predominantly focusing on understanding images or short clips of 16 frames or less [1, 13, 30, 72, 73]. This work recycles strong pre-trained image encoders, performs late temporal fusion [1, 71, 73], and employs mostly-frozen, powerful LLMs. The lack of video-first encoders, equipped with early temporal aggregation, may handicap the ability to process complex visual dependencies, and this is usually reflected in prior work's focus on short video benchmarks ( $< 30$ seconds) in which sixteen frames are sufficient for competitive performance [4, 29]. + +In this work, we systematically explore video-first models starting from a standard image-language recipe using + +two-step training and large pre-trained LMs (Figure 1; [1]). This baseline enables us to start from a demonstrably scalable, simpler-to-tune, widely-used recipe that performs competitively [15, 30]. Through our analysis, we are able to scale this method in a memory-efficient manner to longer sequences of frames, up to 4.3 minutes of video at 1 FPS. + +We first explore video-first models on short-video benchmarks (MSR-VTT [67], VATEX [60], YouCook2 [81], ActivityNet [28]) and compare against the SoTA VideoCoCa model [71]. We show that simple joint space-time attention significantly improves performance over frame-level encodings on benchmarks with rich temporal dependencies (YouCook2, VATEX). Overall, our models are able to reach VideoCoCa performance, while requiring fewer parameters and lower frame resolution. + +This performance gain incurs extra compute and memory costs that grow quadratically with the video length. To address this, we provide one of the first systematic analyses of the memory/accuracy pareto-front of popular memory-efficient methods; this includes factorized attention, parameter-efficient image-to-video adaptation, input masking, and multi-resolution patchification. Through this analysis, we find that among all these options, simple token masking (up to $75\%$ ) during contrastive pre-training incurs only a $1\%$ Recall@1 drop on zero-shot text-video retrieval, and no drop in zero-shot video captioning. At the same time, such high masking offers 2-3x memory savings and allows us to generalize to longer video contexts. The alternatives we explore (e.g., efficient backbone architectures, more sophisticated TubeViT-style patchification [49]), do not maintain the same robustness against noisy video inputs and present a $25\%$ relative decrease in performance for text-video retrieval on challenging benchmarks (YouCook2, VATEX). Finally, although parameter-efficient methods [21, 22] fail to adapt image encoders to video-first models without suffering performance drops, we find that they can adapt video models trained on short contexts (e.g., 16 second videos) to longer temporal horizons. + +Based on the above learnings, we extend our best performing short-video encoder to longer contexts of 256 frames (4.3 minutes at 1 FPS). We use the full-length videos of HowTo100M [42] accompanied by LLM-generated summaries based on the ASR to further contrastively train our LONGViVIT while masking $75\%$ of the input video tokens and freezing most parameters of the encoder. LONGViVIT-to-text ( $\sim$ 1B parameters) is able to outperform modular methods that use LLM assistance and PALI-3 [9] for frame captioning on temporally rich benchmarks (YouCook2, EgoSchema). Even modular methods that consider frame selection (SeViLA [74]) or an oracle segmentation of the video for localizing and captioning key events (on YouCook2) cannot reach LONGViVIT's performance. An interesting byproduct of our work is that we can glean + +which video-language benchmarks have strong temporal dependencies, and thus are suitable for testing long video models; we find that papers often use benchmarks in which short video or even blind models perform well [5, 41, 67]. In short, we provide the following contributions: + +- We explore the memory/accuracy pareto-frontier of video-first vision-language models, and systematically evaluate many architectural, data, and training alternatives. In the end, we identify a simple recipe that enables scaling to 4.3 minutes at 1 FPS, many times longer than comparable video-language models [1, 71]. +- We identify short and long video benchmarks with substantial temporal dependencies, for which we demonstrate that the traditional image-first, late-temporal fusion recipe is convincingly weaker than a video-first approach. +- Finally, we compare our long video models to a variety of strong baselines and show competitive performance with far fewer parameters; this includes baselines that use LLM-based aggregation over visual captions, and we quantitatively evaluate this common approach for the first time on standard video benchmarks. + +# 2. Related Work + +We base our recipes on [1, 30], which provide a strong two-step video-language recipe that leverages pre-trained LLMs and works at scale. Similar work at smaller scale has additionally included captioning losses [32, 76], more contrastive losses [10, 38, 43, 66], masking/masked autoencoding [15, 16, 18, 19, 33, 35, 40, 55], and combinations thereof [13, 23, 54, 58, 61, 72, 73, 75, 82]. This work focuses on image-text modeling and extends to $< 30$ seconds via image-to-video transfer, selective fine-tuning, or temporal fusion of frame encodings [1, 71, 73]. + +A volume of work focuses on video-first learning. This includes some of the very early work in image-to-video kernel inflation [6, 53, 56], transformer-based video architectures [2, 3, 37], image-to-video parameter-efficient adaptation [7, 36, 46], and multiple spatiotemporal resolutions along different network paths [14, 39, 68, 70]. These have still only been demonstrated on short videos, so other works have broached the challenge of temporal scalability: [24, 51, 64] propose alternative encoders, and [27, 48, 59] propose more exotic attention mechanisms. TubeViT [49] proposes multi-granularity patchification. We systematically dissect what works and scales among some of these alternatives, electing options that enable us to re-use strong pre-trained models and use standard, more easily-tuned architectures. + +Specifically in video-to-text generation, approaches that handle longer videos are very limited and mostly target images or short videos [15, 31, 61]. A dominant approach is to summarize frames and aggregate information via LLMs [31, 34, 62, 77]. To the best of our knowledge, we are the first to attempt to train large-scale video + +to-text models on longer sequences of frames and directly test them against LLM-assisted modular methods on challenging temporal benchmarks [41, 81]. + +# 3. The Video-to-Text Architecture + +We base our approach on the successful two-step recipe that combines pre-trained vision and language models [e.g., 1, 30, 72, 73] as shown in Figure 1: (1) we first pre-train a vision encoder, and then (2) fuse the frozen vision representations into a pre-trained, frozen LM. + +# 3.1. Video-Language Contrastive Pre-training + +Following common practice [1, 30], we use a dual vision-language architecture with a Noise Contrastive Estimation (NCE) loss [17, 45, 65] to pre-train our vision encoder, similar to CLIP [50], ALIGN [26] and VideoCLIP [66]. Both encoders are transformers [57]: a BERT-medium (77M) or base (117M) language encoder and ViT-Base (86M parameters) or Large (307M parameters) vision encoder. On the language side, caption representations are computed by averaging across the corresponding token representations. On the vision side, video frames are patched into a sequence of visual tokens, fed into a vision encoder, and then average pooled to produce a final video representation. + +Most prior larger-scale video-language models use pretrained image encoders and patchify frames individually via 2D convolutions [e.g., 1, 66, 71]. Instead, we create spatiotemporal tubelets via 3D convolutions as done in recent vision-only models [2, 49, 55]. Using 3D tubelets instead of flat patches has the dual advantage of higher input compression and more explicit temporal contextualization; our early experiments yielded improved performance. The tubelet embedding sequence is then flattened, added to learnable positional embeddings, and fed into the vision encoder. The vision encoder uses spatio-temporal attention as in ViViT [2]: Joint space-time attention does not add any new parameters to vanilla image ViT [12], facilitating transfer between image and video models. + +Training a large-scale transformer-based video encoder can be challenging because self-attention across thousands of visual tokens is both compute and memory intensive. Memory bottlenecks a model in two ways: (1) limiting the number of frames, and (2) limiting the contrastive batch size during training, negatively impacting performance. To address (2), we use a pre-trained image encoder trained with large batch sizes, and further tune it on videos, instead of jointly training from scratch on images and videos. For initializing the 3D convolution, we repeat the pre-trained weights across the temporal dimension similarly to [2] (see Appendix A). During video-language pre-training, we maintain different embedding paths for images vs. videos: images are embedded with the original 2D convolution and videos with a separate 3D convolution (no weight sharing). + +# 3.2. Video-to-Text Tuning + +We follow prior work [e.g., 1, 72, 73] by plugging the frozen pre-trained vision encoder into a frozen pre-trained LM. We first temporally mean pool the video representations to keep a fixed number of tokens independently of the number of frames and next use a randomly initialized Perceiver-resampler [25] to project the representations to the LM embedding space (Appendix A). We add new randomly initialized cross-attention layers at each layer of the LM to ground generation on the visual content. We train the new layers and Perceiver resampler with a standard auto-regressive video captioning loss: $-\log p(w_t|w < t; \mathcal{V})$ , where $w_t$ is its $t^{th}$ token, and $\mathcal{V}$ is the video representation. + +# 4. Memory-Efficient Encoder Design Space + +Device memory is a key bottleneck for video training with joint space-time attention. To overcome this, we explore four broad categories of solutions: (1) efficient attention, (2) parameter-efficient image-to-video adaptation, (3) input token masking, and (4) multi-resolution patchification. + +1. Attention mechanism. Factorized attention [2, 3] separates the temporal and spatial dimensions over which self-attention is applied, reducing both memory and computational costs. However, this modification introduces a new temporal block within each transformer layer making initialization and model tuning more challenging. In contrast to [2], that initializes the new blocks with zeroes, we find that we achieve best performance when initializing the temporal blocks with the same self-attention weights of ViT. However, we add a gating mechanism which acts as a residual connection between the self-attention blocks: $h = h + \tanh(\alpha) h_{temporal}$ . Here, $\alpha$ is a trainable parameter initialized to zero, that helps maintain the capabilities of the original ViT during training. +2. Parameter-efficient adaptation. We explore using parameter-efficient methods from NLP [8] to adapt image encoders to video, while only tuning a small percentage of model parameters. Most prior work adapts image-based models by freezing an image backbone and adding late, trainable temporal-fusion layers [10, 71, 78]. In contrast, we explore ways to use pre-trained image encoders and adapt them to video-first architectures [7, 36, 46]. Inspired by the success of parameter-efficient adaptation in NLP [79], we consider using MLP Adapters [21] and LoRA [22] (details in Appendix A). We also explore tuning only temporal self-attention blocks [7], effectively as adapter layers, in factorized attention. In all variants, we still tune the video-specific 3D patch convolution. +3. Token masking. Most existing work samples videos at a fixed frames per second (FPS) rate [e.g., 1, 2, 55, 74]. However, semantics required for many video-language tasks vary slowly in the temporal dimension [80] and videos + +present high degree of redundancy between consecutive frames [55]. We explore ways to sparsely sample the video input to reduce the number of input visual tokens. Specifically, we test random masking of input tubelet embeddings. Since consecutive frames are largely redundant, the same semantic signals could potentially be extracted even with high masking rates. For example, [55] masks up to $95\%$ of the input video to reach optimal performance on the task of video-masked autoencoding. We demonstrate similar results in a video-language setting. + +4. Multi-resolution patchification. Finally, we test a simple approach to reduce redundancy in videos via more coarse-grained patchification in the temporal or spatial dimension, as commonly done in multiple-view video models [14, 39, 70]. However, this decreases frame resolution, and may lose fine-grained information. As a result, we also experiment with TubeViT [49] variant that combines flat patches and tubelets of different granularity to mitigate information loss. Following [49], we use four different convolution kernels that can encode either coarse-grained temporal or spatial information; details are in Appendix A. + +# 5. Datasets and Benchmarks + +For contrastive pre-training, we use: (1) 27M video-text pairs (VTP) as described in [1], (2) HowTo100M [42] (HT100M; 100M instructional YouTube clips aligned with ASR using their timestamps, called HowTo100M Clips), and (3) VideoCC3M [44] (3M video-text pairs based on Conceptual Captions [52]). Unfortunately, we find the text-video alignment in VideoCC3M to be of poor quality; instead, we use a modified variant with generated pseudolabeled captions of every video by PALI [9] (see Appendices B, C). To pre-train with longer videos, we use a long version of HowTo100M (referred to as HowTo100M Summary) consisting of (1) the full-length videos with an average duration of 6.5 minutes and (2) their textual summaries generated by automatically cleaning and summarizing the ASR transcripts using an LLM [20]. We also include the image datasets of [1]. For video-to-text tuning, we use the same mixture of datasets but exclude HowTo100M Clips, since the noisy video-text alignments hurt performance. + +We report text-video retrieval and captioning results on short video benchmarks, with average video length $\leq 30$ seconds: MSR-VTT [67], YouCook2 [81], ActivityNet Captions [28], and VATEX [60]. To evaluate performance on longer videos, we consider video summarization on full-length versions of YouCook2 and ActivityNet Captions, with a video duration of up to 5 minutes, and multiple-choice video question answering (QA) on EgoSchema [41]. + +# 6. Experimental Results + +In Section 6.1, we describe our results evaluating alternatives in memory-efficient video encoder design; options de + +
MSR-VTTVATEXYC2AN
T2VV2TT2VV2TT2VV2TT2VV2T
Joint ST-ViViT39.638.123.826.312.313.66.76.4
Factorized ST-ViViT40.236.925.325.411.612.76.67.4
Avg Frame-level39.334.824.825.09.17.96.87.1
Att-pool Frame-level38.437.521.926.19.08.96.16.2
+ +Table 1. Text-video retrieval results (\% Recall@1) when considering different visual backbones. + +scribed in Section 4. For this analysis, we use ViT-B/BERT-medium, with training details in Appendix B and ablations on experimental design in Appendix C. + +In Section 6.2, we combine our most competitive design choices from 6.1 and test our models on short and long video understanding benchmarks. We scale our best model variants to ViT-L/BERT-base with a 400M (or 1B) language decoder. We test our short video models on text-video retrieval and video captioning, and our long video models on video summarization and QA on 256-frame videos. + +In Section 6.3, we share our experience working across short and long video benchmarks [5, 11, 41, 60, 67], offering insights about which ones yield robust temporal signal. + +# 6.1. Exploration of Memory-Efficient Designs + +We explore memory-efficient methods to train video-first encoders as described in Section 4. We first consider short video inputs of 16 frames at 1 FPS and report peak train-time memory consumption vs. performance on text-video retrieval on short video benchmarks [5]. Then, we test whether our main findings hold for longer inputs (128+ frames) on video summarization on full-length YouCook2. + +Base architectures. We explore the memory/accuracy trade-off of different visual backbones in Table 1: ViViT with joint space-time attention (i.e., Joint ST-ViViT), ViViT with factorized attention (i.e., Factorized ST-ViViT) [2], and frame-level (ViT-based) image encodings with average or attentional pooling ('att-pool') [1, 71]. Different methods perform similarly, especially on MSR-VTT and ActivityNet (AN). Interestingly, attentional pooling on top of frame-level encodings does not improve performance. ViViT with either joint or factorized attention performs best and presents higher gains for YouCook2 (YC2), the more temporally challenging benchmark [6.3]. In contrast to prior work [e.g., 10, 71] which tests frozen image-to-video transfer and claims joint attention to be inferior, we find it to be competitive in this fully fine-tuned setting. + +Architectures and token masking. We now test robustness of backbones when masking part of the input tubelets (0-75%). We report Recall@1 on text-to-video retrieval for YouCook2 and VATEX1 per backbone for different masking + +![](images/4b8beaa9ce43e207c59263995739190a120d568c668804090ce659197e1107db.jpg) +Figure 2. Trade-offs between performance (% text-to-video Recall@1; y axis) and train-time memory consumption (x axis) for different backbones (joint space-time (JST), factorized space-time (FST), and drame-level encodings) with random input masking (0% up to 75%) or parameter-efficient methods for training (Adapters, LoRA, factorized temporal (FST) adaptation; lower opacity). + +![](images/cc97d257a8490c1fb860941daaa993b99524732efdf2c8551c3fcf6b7514a227.jpg) + +![](images/4939a3e4c1ce6e4aad89a2afa88035c47845e56683f38f3e4e88c966c1b14783.jpg) +Figure 3. Difference (\%) in memory consumption for different model scales: (ViT-B vs ViT-L). We also report performance drop of efficient methods presented in Figure 2 in comparison with the vanilla approach (i.e., no input masking and full fine-tuning) at different model scales to test whether behavior is similar. + +ratios in Figure 2. Joint space-time attention (JST) is robust against noise from masking up to $75\%$ during pre-training. The same does not hold for frame-level encodings and factorized attention (FST), where performance drops consistently as we increase masking. We conclude that JST can better handle noisy inputs and use it in further exploration. + +Parameter-efficient adaptation. We next report performance of parameter-efficient image-to-video adaptation in Figure 2. We consider (1) JST with (a) MLP Adapters at every layer of the encoder, (b) LoRA with rank decomposition matrices in the self-attention and feed-forward transformer blocks, and (2) factorized temporal adaptation where we tune the temporal self-attention. No adaptation method can reach the memory savings provided by high input masking, since we tune parameters depthwise and gradient computation still requires backpropagation through the model. At the same time, we see significant performance drop, suggesting that adaptation of spatial-only models to the temporal dimension cannot be sufficiently addressed in semifrozen fashion. Comparing parameter-efficient methods, we find MLP Adapters to be more competitive than LoRA, + +which is now canonical for LLMs. We hypothesize that LoRA is successful for tuning very small portions of the network and performing "easier" in-modality transfer. + +Adaptation at scale. We next scale from ViT-B/86M to ViT-L/307M in Figure 3 and test whether observations hold with different model scales. We present the $\%$ memory increase from base to large (left bar set) and $\%$ performance decrease of each method at each scale $^2$ . Joint ST exhibits a similar memory pattern to frame-level, while leading to smaller accuracy drops, whereas factorized ST presents significant memory overhead with model scale due to the extra temporal parameters. For this reason, we exclude factorized ST from further experimentation. Finally, parameter-efficient methods are unable to achieve competitive performance at both model scales, although their memory requirements scale better with model size. + +Multi-resolution patchification. Given the outsized memory impact of input token count in Figure 4, we additionally analyze: (1) coarse-grained patchification in the temporal (convolution over 4 instead of 2 frames) and/or spatial (convolution over $32 \times 32$ instead of $16 \times 16$ pixel spaces) dimension, and (2) the TubeViT [49] approach of multiple tube kernels of different spatiotemporal size and strides. For all benchmarks, masking the input at high ratios while maintaining a fine granularity of tubelets decreases performance significantly less than other input processing methods. Temporal coarse-grained patchification negatively affects benchmarks with richer temporal dependencies (i.e., YouCook2, VATEX) more than spatial. The opposite trend holds for datasets depending on spatial understanding (i.e., MSR-VTT, ActivityNet Captions3). TubeViT acts as the middle ground between the two by employing multiple kernels, with some performance degradation across all benchmarks. However, it is not able to alleviate the negative effects caused by considering coarser + +![](images/331c8ae41358d767e8f44c4f3567c4ee44f654154ab72356ba8d75a1e5a32bbb.jpg) +Figure 4. Trade-offs between performance (text-to-video Recall@1; y axis) and memory consumption (x axis) for input sampling methods: (1) high input masking ratios (0% to 75%) with joint space-time attention, (2) coarse-grained temporal (Coarse temp) and/or spatial (Coarse space) patchification with a fixed kernel and TubeViT which samples parts of the video with multiple 3D kernels of different granularity. + +![](images/599b7d18991dac70929f8f2d35e1e90bda51c44b55b313d2b17b5560a7e142fb.jpg) + +![](images/863c4270f95741d222f2cc1287dfa29362209ec49fa3c97641bb96b4ee1a6fd3.jpg) + +![](images/bf04ac44859cff76f2c83a72cce116e5002366b55efb0b0ab827a6c34c07f1a8.jpg) +Figure 5. Scaling memory-efficient methods to more frames (i.e., 128 frames) for ViViT-B and variants. We measure performance for video-to-text summarization on the full-length YouCook2 videos via Rouge-L (color-coded) while keeping track of memory consumption during short-to-long video contrastive tuning ( $x$ -axis) and video-to-text tuning ( $y$ -axis). + +grained information and presents higher memory requirements due to the multiple convolutions. Overall, we find that high masking with Joint ST and small tubelets yields the strongest memory/performance curves. + +Scaling to longer videos. We now test the best methods from Figure 4 on 128 input frames (32.7k visual tokens). We select methods that are within a memory budget (red vertical lines) and would fit on a 16GB device when expanded to long videos (128+ frames). We contrastively fine-tune [3.1] our best performing video model (i.e., Joint ST referred to as SHORTVIVIT) on sequences of 128 frames on HowTo100M Summary [5], as detailed in Appendix B. We refer to this model as LONGVIVIT. Finally, we fine-tune LONGVIVIT for text generation (Section 3.2) on the full-length YouCook2, and report Rouge-L in Figure 5, measuring memory consumption during both long-context contrastive ( $x$ -axis) and video-to-text ( $y$ -axis) tuning. + +Validating our previous results, IMAGEViT (frame-level encodings) trained on longer videos with $75\%$ masking significantly under-performs video-first models (10 R-L drop). SHORTViVIT without further HT100M Summary training performs better than IMAGEViT, but cannot match models adapted to longer videos. LONGViVIT improves performance by 1.8 Rouge-L points over SHORTViVIT. Comparing input masking with coarser-grained patchification provides similar insights to the previous paragraph. + +Finally, we test MLP Adapters [21] for tuning SHORT-VIViT to longer videos and observe no performance drop in comparison with full fine-tuning. This provides further evidence that parameter-efficient methods can be used for "easier transfers" but not temporal adaptation of spatial-only models. One downside of MLP Adapters is that it increases parameter count during video-to-text tuning (y-axis in Figure 5). Thus, we also experiment with contrastively tuning only the last four layers of the model. With this, we observe a further $3\mathrm{x}$ decrease in memory, since we tune the network widthwise and excise early layer gradient computation. At the same time, there is no memory increase for video-to-text and no performance degradation. We conclude that this combination (high input masking and tuning the last layers) is an effective setting for longer video adaptation. Given the observed robustness to masking, to further decrease video-to-text memory, we also mask $30\%$ of the input video during training and inference without observing any drop in summarization performance (see Appendix C). + +# 6.2. Main Results + +Short video benchmarks. We present our main results on short video benchmarks in Table 2. We use ViT-L with BERT-base for contrastive pre-training (Section 3.1) and a 400M frozen LM for video-to-text tuning (Section 3.2). Our entire video-to-text model accounts for $\sim 900\mathrm{M}$ parameters, although we additionally test scaling the frozen LM to 1B parameters ( $\sim 1.5\mathrm{B}$ total count). We report Recall@1 for zero-shot text-video retrieval and CIDEr for zero-shot and + +
MSR-VTTZero-shotFTVATEXZero-shotFTYouCook2Zero-shotFTActivityNetZero-shotFT
T2V/V2TC1/C2C1T2V/V2TC1/C2C1T2V/V2TC1/C2C1T2V/V2TC1/C2C1
IMAGEViT-L30.9/41.624.6/25.163.636.2/42.937.9/39.461.118.2/16.814.5/16.595.920.6/18.216.3/17.741.1
SHORTViT-L31.9/38.932.7/32.963.137.8/42.843.6/43.067.520.4/20.521.0/22.1131.921.3/18.925.2/26.144.8
EffSHORTViT-L29.9/38.333.8/33.963.834.4/42.741.3/42.764.720.5/20.321.1/21.7127.120.1/17.727.0/26.541.1
VideoCoCa-L [71]33.3/-24.3----18.9/-20.7-31.5*/-17.4-
VideoCoCa-2.1B34.3/64.727.173.253.2/73.622.877.820.3/-34.3128.034.5*/33.0*19.339.3
Flamingo-3B [1]----40.1--55.8----
+ +Table 2. We present three model variants: IMAGEViT-L, that uses frame-level encodings with a late temporal fusion trained on images and videos, SHORTViT-L, our best performing video-first model with joint space-time attention, and Efficient SHORTViT-L (EffSHORTViT-L) where we apply $75\%$ train-time masking for 3x memory savings. We also report performance for SoTA image-first models: VideoCoCa-L and Flamingo-3B, although they are bigger and not directly comparable. We report Recall@1 for zero-shot text-to-video (T2V) and video-to-text (V2T) retrieval, and CIDEr for zero-shot and fine-tuned (FT) captioning when considering a 400M (C1) or 1B (C2) frozen LM for generation. ActivityNet retrieval results marked with $*$ are not directly comparable, as these models uniformly sample frames, whereas we use the first frames of the long video with a fixed FPS of 1 to match experimental settings across benchmarks. + +fine-tuned video captioning. We consider three model variants: frame-level encodings IMAGEViT, SHORTViVIT, and SHORTViVIT with $75\%$ masking that uses 2-3x less memory (referred to as Efficient SHORTViVIT). We also report results for VideoCoCa [71] and Flamingo [1] $^6$ . + +Our results remain consistent with our earlier observations. Contextualizing only intra-frame dependencies coupled with late temporal fusion (IMAGEVIT) leads to inferior performance for retrieval and captioning on benchmarks with richer temporal dependencies (YouCook2, VATEX) but performs better on retrieval on MSR-VTT which relies on spatial understanding. Video-first architectures further tuned on video datasets (substantially noisier than curated image ones) improve temporal capabilities at the expense of spatial. For Efficient SHORTVIVIT, we find that masking $75\%$ of the input video causes a performance drop: an average of $1\%$ absolute difference on zero-shot retrieval and no significant difference on zero-shot captioning across all benchmarks. The efficient model still performs similarly or better than IMAGEVIT, especially on captioning and temporally rich benchmarks (e.g., YouCook2, VATEX), while consuming significantly less memory. Finally, when scaling the frozen LM component from 400M to 1B $(\mathrm{C}1\rightarrow \mathrm{C}2)$ for zero-shot video-to-text generation, we observe moderate improvements across benchmarks and variants. + +We compare our results against large image-based models with SoTA performance on video benchmarks (second block of Table 2). Although results are not directly comparable due to different experimental settings, we are competitive and achieve even better results for temporally rich benchmarks (i.e., YouCook2) on text-video retrieval for models of similar parameter count7. Moreover, our models + +significantly outperform VideoCoCa on most video captioning benchmarks even when considering their much larger versions in the zero-shot setting. Finally, when fine-tuning our video-to-text models with the 400M LM, we are again able to match and surpass the performance of the larger VideoCoCa-2.1B in two out of four benchmarks. + +Long video understanding. We further tune LONG-VIViT-L on 256-frame HT100M Summary videos and evaluate zero-shot/fine-tuned summarization (YouCook2, ActivityNet) and QA (EgoSchema released subset); this is shown in Table 3. We additionally report results of LONG-VIViT on Perception Test [47] in Appendix D, where videos are short but can benefit from higher FPS. + +We consider two families of models. 1. Models that take as input 256 frames (first block of Table 3): IMAGEVIT and SHORTVIVIT pre-trained on 16-frame clips, and LONG-VIVIT further trained on 256-frame clips. 2. Modular approaches from prior work (second block of Table 3): (a) SeViLA Localization [74] for localizing important frames in the long video given a textual query which are then fed into SHORTVIVIT for performing the task $^8$ , and (b) the popular paradigm of captioning video segments or frames and using an LLM to aggregate information and form coherent summaries or answer questions [31, 34, 77]. We try the latter approach with IMAGEVIT and SHORTVIVIT, generating captions over 16-second video segments and then feeding the captions to the September 2023 release of Bard, a much larger LLM than the ones used in previous results. We caption clips using uniform video segmentation (every 16 seconds) or an oracle segmentation when available (i.e., we consider ground-truth start and end timestamps for different events within ActivityNet and YouCook2 videos). We + +
Zero-shotFine-tuned
ANYC2ESANYC2
Inference with 256 frames
IMAGEViT14.44.640.823.829.4
SHORTViVIT15.47.047.924.329.5
LONGViVIT15.220.356.824.030.6
Modular approaches with 16-frame video models
SeViLA-to-SHORTViVIT16.24.249.624.428.3
IMAGEViT-to-Bard18.115.835.022.919.1
+ oracle segments16.316.2-22.722.1
SHORTViVIT-to-Bard19.318.142.022.720.8
+ oracle segments18.318.2-22.724.7
PALI [9] 5B-to-Bard22.019.944.8--
Blind Bard--27.0--
SoTA [69]---36.934.6
+ +Table 3. Results on long video-to-text benchmarks. We report Rouge-L for zero-shot and fine-tuned video summarization on ActivityNet Captions (AN) and YouCook2 (YC2) and zero-shot accuracy (\%) for multiple choice QA on EgoSchema (ES). + +also test substituting our small video models with PALI-3 (5B parameters) for frame captioning9. Finally, we reference the SoTA fine-tuned performance on ActivityNet and YouCook2, when using specialized models with precomputed features by multiple networks, object detectors, and domain-specific vocabulary [69]. + +Looking through Table 3, we find that on ActivityNet, which contains less temporal dependencies [6.3], modular approaches via frame selection or LLM-based aggregation of information (second block) perform well. Frame captioning via PALI combined with the power of LLMs is enough for the task in a zero-shot setting. For fine-tuned models, feeding either the long input or selected frames into SHORTVIVIT perform better than utilizing Bard. On ActivityNet, we see no benefit from training further on longer videos. + +In contrast, we find that short video and modular models are insufficient for addressing video tasks with longer-range temporal dependencies (YouCook2, EgoSchema). Adapting SHORTVIVIT to longer contexts (LONGVIVIT) significantly improves performance and achieves the best scores across all comparison approaches. Using Bard as an information aggregator over individual clip captions cannot achieve competitive performance, even when considering an oracle video segmentation for YouCook2 (Lines 3 and 5 in the second block of Table 3). Surprisingly, even using a much larger and more powerful image-based model (PALI) cannot reach LONGVIVIT on YouCook2 and EgoSchema. Interestingly, selecting 16 key frames and feeding them into SHORTVIVIT also outperforms Bard-based methods on EgoSchema and fine-tuned YouCook2. This suggests there can be temporal dependencies in long videos that cannot be resolved even with an optimal event segmentation for the video, or be aggregated by LLMs given inprecise visual + +![](images/4442e7c74a3c600d257d792481338f515eed7d5f0aacafe8e67b64eaf8e60eea.jpg) +Figure 6. Performance difference $(\%)$ per benchmark when we remove (1) video or (2) image data from the training mixture. + +information. On such benchmarks, LONGVIVIT demonstrates strong performance even without LLM assistance. + +# 6.3. Brief Notes on Video Evaluations + +We briefly describe some of our findings on video evaluations. Firstly, we find that blind Bard is able to achieve SoTA results on the full set of EgoSchema (no visual input; $33.9\%$ accuracy vs. $32.1\%$ for the best model in [41]). Adding visual information from PALI into Bard increases performance to just $39.2\%$ . However, on EgoSchema's released subset, performance of blind Bard is $27\%$ , which is much lower than PALI-to-Bard $(44.8\%)$ , suggesting that the subset contains questions that rely more on visual grounding than pure language reasoning, so we report numbers on the subset in Table 3 and on the full set in Appendix ?? + +Figure 6 details a simple ablation across other video benchmarks to quantify temporal richness. We test removing either video or image data from the training mix and measure the effect on performance (video-to-text Recall@1). We see a dramatic performance drop when removing video data for YouCook2 and VATEX (up to $75\%$ ). ActivityNet and MSRVTT suffer more from the absence of image data, whereas non-video training influences performance in lesser degree (as little as $18\%$ for MSR-VTT). We believe there's room for more fine-grained, temporal-focused video-language benchmarks in the community. + +# 7. Conclusions + +In short, we systematically analyze memory-efficient methods to scale video-first architectures to longer sequences of frames and demonstrate that just masking high percentages of the video $(\leq 75\%)$ yields competitive results on long video-language tasks. Such masking shows a very small performance drop on short videos, provides 2-3x memory savings and allows scaling up to 4.3 minutes at 1 FPS (LONGVIVIT) when freezing part of the short video network in our two-stage training. LONGVIVIT outperforms modular approaches with LLM assistance on video summarization and QA on benchmarks with richer temporal dependencies (YouCook2, EgoSchema). We overall demonstrate that encoding longer-range visual dependencies can make a difference in downstream performance and corrects mistakes that LLMs are unable to rectify. + +# References + +[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in Neural Information Processing Systems, 35:23716-23736, 2022. 1, 2, 3, 4, 7 +[2] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. Vivit: A video vision transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6836-6846, 2021. 2, 3, 4 +[3] Gedas Bertasius, Heng Wang, and Lorenzo Torresani. Is space-time attention all you need for video understanding? In ICML, page 4, 2021. 2, 3 +[4] Shyamal Buch, Cristóbal Eyzaguirre, Adrien Gaidon, Jiajun Wu, Li Fei-Fei, and Juan Carlos Niebles. Revisiting the "video" in video-language understanding. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2917-2927, 2022. 1 +[5] Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 961-970, 2015. 2, 4 +[6] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 2 +[7] Dongsheng Chen, Chaofan Tao, Lu Hou, Lifeng Shang, Xin Jiang, and Qun Liu. Litevl: Efficient video-language learning with enhanced spatial-temporal modeling. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 7985-7997, 2022. 2, 3 +[8] Jiao Chen, Aston Zhang, Xingjian Shi, Mu Li, Alex Smola, and Diyi Yang. Parameter-efficient fine-tuning design spaces. arXiv preprint arXiv:2301.01821, 2023. 3 +[9] Xi Chen, Xiao Wang, Lucas Beyer, Alexander Kolesnikov, Jialin Wu, Paul Voigtlaender, Basil Mustafa, Sebastian Goodman, Ibrahim Alabdulmohsin, Piotr Padlewski, et al. Pali-3 vision language models: Smaller, faster, stronger. arXiv preprint arXiv:2310.09199, 2023. 2, 4, 8 +[10] Feng Cheng, Xizi Wang, Jie Lei, David Crandall, Mohit Bansal, and Gedas Bertasius. Vindlu: A recipe for effective video-and-language pretraining. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10739-10750, 2023. 2, 3, 4 +[11] Pradipto Das, Chenliang Xu, Richard F Doell, and Jason J Corso. A thousand frames in just a few words: Linguual description of videos through latent topics and sparse object stitching. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2634-2641, 2013. 4 +[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Trans- + +formers for image recognition at scale. In International Conference on Learning Representations, 2020. 3 +[13] Danny Driess, Fei Xia, Mehdi SM Sajjadi, Corey Lynch, Aakanksha Chowdhery, Brian Ichter, Ayzaan Wahid, Jonathan Tompson, Quan Vuong, Tianhe Yu, et al. Palm-: An embodied multimodal language model. arXiv preprint arXiv:2303.03378, 2023. 1, 2 +[14] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6202-6211, 2019. 2, 4 +[15] Tsu-Jui Fu, Linjie Li, Zhe Gan, Kevin Lin, William Yang Wang, Lijuan Wang, and Zicheng Liu. Violet: End-to-end video-language transformers with masked visual-token modeling. arXiv preprint arXiv:2111.12681, 2021. 2 +[16] Tsu-Jui Fu, Linjie Li, Zhe Gan, Kevin Lin, William Yang Wang, Lijuan Wang, and Zicheng Liu. An empirical study of end-to-end video-language transformers with masked visual modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22898-22909, 2023. 2 +[17] Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pages 297–304. JMLR Workshop and Conference Proceedings, 2010. 3 +[18] Tengda Han, Weidi Xie, and Andrew Zisserman. Turbo training with token dropout. arXiv preprint arXiv:2210.04889, 2022. 2 +[19] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Doll'ar, and Ross B Girshick. Masked autoencoders are scalable vision learners. 2022 IEEE. In CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15979-15988, 2021. 2 +[20] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. Training compute-optimal large language models. arXiv preprint arXiv:2203.15556, 2022. 4 +[21] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. 2, 3, 6 +[22] Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. In International Conference on Learning Representations, 2021. 2, 3 +[23] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Qiang Liu, et al. Language is not all you need: Aligning perception with language models. arXiv preprint arXiv:2302.14045, 2023. 2 +[24] Md Mohaiminul Islam and Gedas Bertasius. Long movie clip classification with state-space video models. In European Conference on Computer Vision, pages 87-104. Springer, 2022. 1, 2 + +[25] Andrew Jaegle, Felix Gimeno, Andy Brock, Oriol Vinyals, Andrew Zisserman, and Joao Carreira. Perceiver: General perception with iterative attention. In International conference on machine learning, pages 4651-4664. PMLR, 2021. 3 +[26] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning, pages 4904-4916. PMLR, 2021. 3 +[27] Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. arXiv preprint arXiv:2001.04451, 2020. 2 +[28] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017. 2, 4 +[29] Jie Lei, Tamara L Berg, and Mohit Bansal. Revealing single frame bias for video-and-language learning. arXiv preprint arXiv:2206.03428, 2022. 1 +[30] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023. 1, 2, 3 +[31] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023. 2, 7 +[32] Linjie Li, Zhe Gan, Kevin Lin, Chung-Ching Lin, Zicheng Liu, Ce Liu, and Lijuan Wang. Lavender: Unifying videolanguage understanding as masked language modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23119-23129, 2023. 2 +[33] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23390-23400, 2023. 2 +[34] Kevin Lin, Faisal Ahmed, Linjie Li, Chung-Ching Lin, Ehsan Azarnasab, Zhengyuan Yang, Jianfeng Wang, Lin Liang, Zicheng Liu, Yumao Lu, Ce Liu, and Lijuan Wang. Mm-vid: Advancing video understanding with gpt-4v(ision), 2023. 2, 7 +[35] Yuanze Lin, Chen Wei, Huiyu Wang, Alan Yuille, and Cihang Xie. Smaug: Sparse masked autoencoder for efficient video-language pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2459-2469, 2023. 2 +[36] Ruyang Liu, Jingjia Huang, Ge Li, Jiashi Feng, Xinglong Wu, and Thomas H Li. Revisiting temporal modeling for clip-based image-to-video knowledge transferring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6555-6564, 2023. 2, 3 +[37] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3202-3211, 2022. 2 + +[38] Huaishao Luo, Lei Ji, Ming Zhong, Yang Chen, Wen Lei, Nan Duan, and Tianrui Li. Clip4clip: An empirical study of clip for end to end video clip retrieval and captioning. Neurocomputing, 508:293-304, 2022. 2 +[39] Chuofan Ma, Qiushan Guo, Yi Jiang, Ping Luo, Zehuan Yuan, and Xiaojuan Qi. Rethinking resolution in the context of efficient video recognition. Advances in Neural Information Processing Systems, 35:37865-37877, 2022. 2, 4 +[40] Yue Ma, Tianyu Yang, Yin Shan, and Xiu Li. Simvtp: Simple video text pre-training with masked autoencoders. arXiv preprint arXiv:2212.03490, 2022. 2 +[41] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. arXiv preprint arXiv:2308.09126, 2023. 2, 3, 4, 8 +[42] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2630-2640, 2019. 2, 4 +[43] Antoine Miech, Jean-Baptiste Alayrac, Lucas Smaira, Ivan Laptev, Josef Sivic, and Andrew Zisserman. End-to-end learning of visual representations from uncurated instructional videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9879-9889, 2020. 2 +[44] Arsha Nagrani, Paul Hongsuck Seo, Bryan Seybold, Anja Hauth, Santiago Manen, Chen Sun, and Cordelia Schmid. Learning audio-video modalities from image captions. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XIV, pages 407-426. Springer, 2022. 4 +[45] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. 3 +[46] Junting Pan, Ziyi Lin, Xiatian Zhu, Jing Shao, and Hongsheng Li. St-adapter: Parameter-efficient image-to-video transfer learning. Advances in Neural Information Processing Systems, 35:26462-26477, 2022. 2, 3 +[47] Viorica Pătrăucean, Lucas Smaira, Ankush Gupta, Adrià Recasens Continente, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Joseph Heyward, Mateusz Malinowski, Yi Yang, et al. Perception test: A diagnostic benchmark for multimodal video models. arXiv preprint arXiv:2305.13786, 2023. 7 +[48] Bowen Peng, Jeffrey Quesnelle, Honglu Fan, and Enrico Shippole. Yarn: Efficient context window extension of large language models. arXiv preprint arXiv:2309.00071, 2023. 2 +[49] AJ Piergiovanni, Weicheng Kuo, and Anelia Angelova. Rethinking video vits: Sparse video tubes for joint image and video learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2214-2224, 2023. 2, 3, 4, 5 +[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning + +transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 3 +[51] Michael S Ryoo, Keerthana Gopalakrishnan, Kumara Kahapatitiya, Ted Xiao, Kanishka Rao, Austin Stone, Yao Lu, Julian Ibarz, and Anurag Arnab. Token turing machines. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19070-19081, 2023. 2 +[52] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, 2018. 4 +[53] Karen Simonyan and Andrew Zisserman. Two-stream convolutional networks for action recognition in videos. Advances in neural information processing systems, 27, 2014. 2 +[54] Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15638-15650, 2022. 2 +[55] Zhan Tong, Yibing Song, Jue Wang, and Limin Wang. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. In Advances in Neural Information Processing Systems, 2022. 2, 3, 4 +[56] Du Tran, Heng Wang, Lorenzo Torresani, Jamie Ray, Yann LeCun, and Manohar Paluri. A closer look at spatiotemporal convolutions for action recognition. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 6450-6459, 2018. 2 +[57] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 3 +[58] Junke Wang, Dongdong Chen, Zuxuan Wu, Chong Luo, Luowei Zhou, Yucheng Zhao, Yujia Xie, Ce Liu, Yu-Gang Jiang, and Lu Yuan. Omnivl: One foundation model for image-language and video-language tasks. Advances in neural information processing systems, 35:5696-5710, 2022. 2 +[59] Sinong Wang, Belinda Z Li, Madian Khabsa, Han Fang, and Hao Ma. Linformer: Self-attention with linear complexity. arXiv preprint arXiv:2006.04768, 2020. 2 +[60] Xin Wang, Jiawei Wu, Junkun Chen, Lei Li, Yuan-Fang Wang, and William Yang Wang. Vatex: A large-scale, high-quality multilingual dataset for video-and-language research. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4581-4591, 2019. 2, 4 +[61] Yi Wang, Kunchang Li, Yizhuo Li, Yinan He, Bingkun Huang, Zhiyu Zhao, Hongjie Zhang, Jilan Xu, Yi Liu, Zun Wang, et al. Internvideo: General video foundation models via generative and discriminative learning. arXiv preprint arXiv:2212.03191, 2022. 2 +[62] Zhenhailong Wang, Manling Li, Ruochen Xu, Luowei Zhou, Jie Lei, Xudong Lin, Shuohang Wang, Ziyi Yang, Chenguang Zhu, Derek Hoiem, et al. Language models with + +image descriptors are strong few-shot video-language learners. Advances in Neural Information Processing Systems, 35: 8483-8497, 2022. 2 +[63] Chao-Yuan Wu and Philipp Krahenbuhl. Towards long-form video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1884-1894, 2021. 1 +[64] Chao-Yuan Wu, Yanghao Li, Karttikeya Mangalam, Haoqi Fan, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Memvit: Memory-augmented multiscale vision transformer for efficient long-term video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13587-13597, 2022. 1, 2 +[65] Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3733-3742, 2018. 3 +[66] Hu Xu, Gargi Ghosh, Po-Yao Huang, Dmytro Okhonko, Armen Aghajanyan, Florian Metze, Luke Zettlemoyer, and Christoph Feichtenhofer. Videoclip: Contrastive pre-training for zero-shot video-text understanding. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 6787-6800, 2021. 2, 3 +[67] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5288-5296, 2016. 2, 4 +[68] Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5036-5045, 2022. 2 +[69] Kashu Yamazaki, Khoa Vo, Quang Sang Truong, Bhiksha Raj, and Ngan Le. Vlint: visual-linguistic transformer-in-transformer for coherent video paragraph captioning. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 3081–3090, 2023. 8 +[70] Shen Yan, Xuehan Xiong, Anurag Arnab, Zhichao Lu, Mi Zhang, Chen Sun, and Cordelia Schmid. Multiview transformers for video recognition. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3333-3343, 2022. 2, 4 +[71] Shen Yan, Tao Zhu, Zirui Wang, Yuan Cao, Mi Zhang, Soham Ghosh, Yonghui Wu, and Jiahui Yu. Video-text modeling with zero-shot transfer from contrastive captioners. arXiv preprint arXiv:2212.04979, 2022. 1, 2, 3, 4, 7 +[72] Antoine Yang, Antoine Miech, Josef Sivic, Ivan Laptev, and Cordelia Schmid. Zero-shot video question answering via frozen bidirectional language models. Advances in Neural Information Processing Systems, 35:124-141, 2022. 1, 2, 3 +[73] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 1, 2, 3 + +[74] Shoubin Yu, Jaemin Cho, Prateek Yadav, and Mohit Bansal. Self-chained image-language model for video localization and question answering. arXiv preprint arXiv:2305.06988, 2023. 2, 3, 7 +[75] Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, et al. Florence: A new foundation model for computer vision. arXiv preprint arXiv:2111.11432, 2021. 2 +[76] Rowan Zellers, Ximing Lu, Jack Hessel, Youngjae Yu, Jae Sung Park, Jize Cao, Ali Farhadi, and Yejin Choi. Merlot: Multimodal neural script knowledge models. Advances in Neural Information Processing Systems, 34:23634-23651, 2021. 2 +[77] Andy Zeng, Maria Attarian, Krzysztof Marcin Choromanski, Adrian Wong, Stefan Welker, Federico Tombari, Aveek Purohit, Michael S Ryoo, Vikas Sindhwani, Johnny Lee, et al. Socratic models: Composing zero-shot multimodal reasoning with language. In The Eleventh International Conference on Learning Representations, 2022. 2, 7 +[78] Bowen Zhang, Xiaojie Jin, Weibo Gong, Kai Xu, Zhao Zhang, Peng Wang, Xiaohui Shen, and Jiashi Feng. Multimodal video adapter for parameter efficient video text retrieval. arXiv preprint arXiv:2301.07868, 2023. 3 +[79] Qingru Zhang, Minshuo Chen, Alexander Bukharin, Pengcheng He, Yu Cheng, Weizhu Chen, and Tuo Zhao. Adaptive budget allocation for parameter-efficient finetuning. arXiv preprint arXiv:2303.10512, 2023. 3 +[80] Zhang Zhang and Dacheng Tao. Slow feature analysis for human action recognition. IEEE transactions on pattern analysis and machine intelligence, 34(3):436-450, 2012. 3 +[81] Luowei Zhou, Chenliang Xu, and Jason Corso. Towards automatic learning of procedures from web instructional videos. In Proceedings of the AAAI Conference on Artificial Intelligence, 2018. 2, 3, 4 +[82] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 2 \ No newline at end of file diff --git a/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/images.zip b/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..ed8b8f572edcfebd36104fcd522fcec9503f6579 --- /dev/null +++ b/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93ef582a3c3d9732780af0b47aa7861b1ec8660cb6511cb1fa00f522d08eaf96 +size 405183 diff --git a/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/layout.json b/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f8a26a82033fce91f372de27d9e683b1eb7bd59c --- /dev/null +++ b/2024/A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames/layout.json @@ -0,0 +1,8274 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 84, + 102, + 510, + 139 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 102, + 510, + 139 + ], + "spans": [ + { + "bbox": [ + 84, + 102, + 510, + 139 + ], + "type": "text", + "content": "A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 73, + 160, + 521, + 232 + ], + "blocks": [ + { + "bbox": [ + 73, + 160, + 521, + 232 + ], + "lines": [ + { + "bbox": [ + 73, + 160, + 521, + 232 + ], + "spans": [ + { + "bbox": [ + 73, + 160, + 521, + 232 + ], + "type": "image", + "image_path": "0de35df57d2e48914ff510fed54683dc42a97c7b0b08752eb8756a87480cc145.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 258, + 192, + 271 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 258, + 192, + 271 + ], + "spans": [ + { + "bbox": [ + 143, + 258, + 192, + 271 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 284, + 290, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 284, + 290, + 559 + ], + "spans": [ + { + "bbox": [ + 46, + 284, + 290, + 559 + ], + "type": "text", + "content": "Understanding long, real-world videos requires modeling of long-range visual dependencies. To this end, we explore video-first architectures, building on the common paradigm of transferring large-scale, image-text models to video via shallow temporal fusion. However, we expose two limitations to the approach: (1) decreased spatial capabilities, likely due to poor video-language alignment in standard video datasets, and (2) higher memory consumption, bottlenecking the number of frames that can be processed. To mitigate the memory bottleneck, we systematically analyze the memory/accuracy trade-off of various efficient methods: factorized attention, parameter-efficient image-to-video adaptation, input masking, and multi-resolution patchification. Surprisingly, simply masking large portions of the video (up to " + }, + { + "bbox": [ + 46, + 284, + 290, + 559 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 46, + 284, + 290, + 559 + ], + "type": "text", + "content": ") during contrastive pre-training proves to be one of the most robust ways to scale encoders to videos up to 4.3 minutes at 1 FPS. Our simple approach for training long video-to-text models, which scales to 1B parameters, does not add new architectural complexity and is able to outperform the popular paradigm of using much larger LLMs as an information aggregator over segment-based information on benchmarks with long-range temporal dependencies (YouCook2, EgoSchema)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 581, + 128, + 593 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 581, + 128, + 593 + ], + "spans": [ + { + "bbox": [ + 47, + 581, + 128, + 593 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 601, + 287, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 601, + 287, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 601, + 287, + 696 + ], + "type": "text", + "content": "Long-video understanding requires modeling of the temporal dynamics and long-range visual dependencies of real-world scenes [63, 64]. However, capturing long-range visual content is challenging, even when equipped with large language models. In this paper, we overcome hardware memory limitations and demonstrate how to extend video encoders to directly process minutes-long visual content using language grounding, and simple, established techniques" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 319, + 258, + 531, + 446 + ], + "blocks": [ + { + "bbox": [ + 319, + 258, + 531, + 446 + ], + "lines": [ + { + "bbox": [ + 319, + 258, + 531, + 446 + ], + "spans": [ + { + "bbox": [ + 319, + 258, + 531, + 446 + ], + "type": "image", + "image_path": "c16b68480e1203f0e5e223f6db6f111050b2918be435b981a17ae7372761d112.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 450, + 547, + 495 + ], + "lines": [ + { + "bbox": [ + 304, + 450, + 547, + 495 + ], + "spans": [ + { + "bbox": [ + 304, + 450, + 547, + 495 + ], + "type": "text", + "content": "Figure 1. Two main training steps: (1) training a video encoder via Noise Contrastive Estimation and (2) using this frozen video encoder with a pre-trained, frozen LM and visual adapter layers for video-to-text generation (e.g., video summarization and Q/A)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 505, + 547, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 505, + 547, + 553 + ], + "spans": [ + { + "bbox": [ + 304, + 505, + 547, + 553 + ], + "type": "text", + "content": "without additional architectural complexity [24, 64]. We focus on long videos through the lens of language, assessing our models on the widely applicable tasks of visual summarization and question-answering." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 555, + 547, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 555, + 547, + 687 + ], + "spans": [ + { + "bbox": [ + 304, + 555, + 547, + 687 + ], + "type": "text", + "content": "Recent work on vision-language models have yielded impressive results, predominantly focusing on understanding images or short clips of 16 frames or less [1, 13, 30, 72, 73]. This work recycles strong pre-trained image encoders, performs late temporal fusion [1, 71, 73], and employs mostly-frozen, powerful LLMs. The lack of video-first encoders, equipped with early temporal aggregation, may handicap the ability to process complex visual dependencies, and this is usually reflected in prior work's focus on short video benchmarks (" + }, + { + "bbox": [ + 304, + 555, + 547, + 687 + ], + "type": "inline_equation", + "content": "< 30" + }, + { + "bbox": [ + 304, + 555, + 547, + 687 + ], + "type": "text", + "content": " seconds) in which sixteen frames are sufficient for competitive performance [4, 29]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "text", + "content": "In this work, we systematically explore video-first models starting from a standard image-language recipe using" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 703, + 126, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 703, + 126, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 703, + 126, + 712 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "14386" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 286, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 286, + 144 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 286, + 144 + ], + "type": "text", + "content": "two-step training and large pre-trained LMs (Figure 1; [1]). This baseline enables us to start from a demonstrably scalable, simpler-to-tune, widely-used recipe that performs competitively [15, 30]. Through our analysis, we are able to scale this method in a memory-efficient manner to longer sequences of frames, up to 4.3 minutes of video at 1 FPS." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 146, + 286, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 146, + 286, + 253 + ], + "spans": [ + { + "bbox": [ + 46, + 146, + 286, + 253 + ], + "type": "text", + "content": "We first explore video-first models on short-video benchmarks (MSR-VTT [67], VATEX [60], YouCook2 [81], ActivityNet [28]) and compare against the SoTA VideoCoCa model [71]. We show that simple joint space-time attention significantly improves performance over frame-level encodings on benchmarks with rich temporal dependencies (YouCook2, VATEX). Overall, our models are able to reach VideoCoCa performance, while requiring fewer parameters and lower frame resolution." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 256, + 286, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 256, + 286, + 532 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 286, + 532 + ], + "type": "text", + "content": "This performance gain incurs extra compute and memory costs that grow quadratically with the video length. To address this, we provide one of the first systematic analyses of the memory/accuracy pareto-front of popular memory-efficient methods; this includes factorized attention, parameter-efficient image-to-video adaptation, input masking, and multi-resolution patchification. Through this analysis, we find that among all these options, simple token masking (up to " + }, + { + "bbox": [ + 46, + 256, + 286, + 532 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 46, + 256, + 286, + 532 + ], + "type": "text", + "content": ") during contrastive pre-training incurs only a " + }, + { + "bbox": [ + 46, + 256, + 286, + 532 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 46, + 256, + 286, + 532 + ], + "type": "text", + "content": " Recall@1 drop on zero-shot text-video retrieval, and no drop in zero-shot video captioning. At the same time, such high masking offers 2-3x memory savings and allows us to generalize to longer video contexts. The alternatives we explore (e.g., efficient backbone architectures, more sophisticated TubeViT-style patchification [49]), do not maintain the same robustness against noisy video inputs and present a " + }, + { + "bbox": [ + 46, + 256, + 286, + 532 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 46, + 256, + 286, + 532 + ], + "type": "text", + "content": " relative decrease in performance for text-video retrieval on challenging benchmarks (YouCook2, VATEX). Finally, although parameter-efficient methods [21, 22] fail to adapt image encoders to video-first models without suffering performance drops, we find that they can adapt video models trained on short contexts (e.g., 16 second videos) to longer temporal horizons." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 534, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 534, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 534, + 286, + 713 + ], + "type": "text", + "content": "Based on the above learnings, we extend our best performing short-video encoder to longer contexts of 256 frames (4.3 minutes at 1 FPS). We use the full-length videos of HowTo100M [42] accompanied by LLM-generated summaries based on the ASR to further contrastively train our LONGViVIT while masking " + }, + { + "bbox": [ + 46, + 534, + 286, + 713 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 46, + 534, + 286, + 713 + ], + "type": "text", + "content": " of the input video tokens and freezing most parameters of the encoder. LONGViVIT-to-text (" + }, + { + "bbox": [ + 46, + 534, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 46, + 534, + 286, + 713 + ], + "type": "text", + "content": "1B parameters) is able to outperform modular methods that use LLM assistance and PALI-3 [9] for frame captioning on temporally rich benchmarks (YouCook2, EgoSchema). Even modular methods that consider frame selection (SeViLA [74]) or an oracle segmentation of the video for localizing and captioning key events (on YouCook2) cannot reach LONGViVIT's performance. An interesting byproduct of our work is that we can glean" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 305, + 72, + 545, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 132 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 132 + ], + "type": "text", + "content": "which video-language benchmarks have strong temporal dependencies, and thus are suitable for testing long video models; we find that papers often use benchmarks in which short video or even blind models perform well [5, 41, 67]. In short, we provide the following contributions:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 133, + 545, + 323 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 306, + 133, + 545, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 133, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 306, + 133, + 545, + 205 + ], + "type": "text", + "content": "- We explore the memory/accuracy pareto-frontier of video-first vision-language models, and systematically evaluate many architectural, data, and training alternatives. In the end, we identify a simple recipe that enables scaling to 4.3 minutes at 1 FPS, many times longer than comparable video-language models [1, 71]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 205, + 545, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 205, + 545, + 252 + ], + "spans": [ + { + "bbox": [ + 306, + 205, + 545, + 252 + ], + "type": "text", + "content": "- We identify short and long video benchmarks with substantial temporal dependencies, for which we demonstrate that the traditional image-first, late-temporal fusion recipe is convincingly weaker than a video-first approach." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 254, + 545, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 254, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 306, + 254, + 545, + 323 + ], + "type": "text", + "content": "- Finally, we compare our long video models to a variety of strong baselines and show competitive performance with far fewer parameters; this includes baselines that use LLM-based aggregation over visual captions, and we quantitatively evaluate this common approach for the first time on standard video benchmarks." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 332, + 392, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 332, + 392, + 344 + ], + "spans": [ + { + "bbox": [ + 306, + 332, + 392, + 344 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 353, + 545, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 353, + 545, + 472 + ], + "spans": [ + { + "bbox": [ + 304, + 353, + 545, + 472 + ], + "type": "text", + "content": "We base our recipes on [1, 30], which provide a strong two-step video-language recipe that leverages pre-trained LLMs and works at scale. Similar work at smaller scale has additionally included captioning losses [32, 76], more contrastive losses [10, 38, 43, 66], masking/masked autoencoding [15, 16, 18, 19, 33, 35, 40, 55], and combinations thereof [13, 23, 54, 58, 61, 72, 73, 75, 82]. This work focuses on image-text modeling and extends to " + }, + { + "bbox": [ + 304, + 353, + 545, + 472 + ], + "type": "inline_equation", + "content": "< 30" + }, + { + "bbox": [ + 304, + 353, + 545, + 472 + ], + "type": "text", + "content": " seconds via image-to-video transfer, selective fine-tuning, or temporal fusion of frame encodings [1, 71, 73]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 474, + 545, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 474, + 545, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 474, + 545, + 640 + ], + "type": "text", + "content": "A volume of work focuses on video-first learning. This includes some of the very early work in image-to-video kernel inflation [6, 53, 56], transformer-based video architectures [2, 3, 37], image-to-video parameter-efficient adaptation [7, 36, 46], and multiple spatiotemporal resolutions along different network paths [14, 39, 68, 70]. These have still only been demonstrated on short videos, so other works have broached the challenge of temporal scalability: [24, 51, 64] propose alternative encoders, and [27, 48, 59] propose more exotic attention mechanisms. TubeViT [49] proposes multi-granularity patchification. We systematically dissect what works and scales among some of these alternatives, electing options that enable us to re-use strong pre-trained models and use standard, more easily-tuned architectures." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "Specifically in video-to-text generation, approaches that handle longer videos are very limited and mostly target images or short videos [15, 31, 61]. A dominant approach is to summarize frames and aggregate information via LLMs [31, 34, 62, 77]. To the best of our knowledge, we are the first to attempt to train large-scale video" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14387" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": "to-text models on longer sequences of frames and directly test them against LLM-assisted modular methods on challenging temporal benchmarks [41, 81]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 118, + 222, + 131 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 118, + 222, + 131 + ], + "spans": [ + { + "bbox": [ + 47, + 118, + 222, + 131 + ], + "type": "text", + "content": "3. The Video-to-Text Architecture" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 138, + 286, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 138, + 286, + 198 + ], + "spans": [ + { + "bbox": [ + 47, + 138, + 286, + 198 + ], + "type": "text", + "content": "We base our approach on the successful two-step recipe that combines pre-trained vision and language models [e.g., 1, 30, 72, 73] as shown in Figure 1: (1) we first pre-train a vision encoder, and then (2) fuse the frozen vision representations into a pre-trained, frozen LM." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 205, + 266, + 218 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 205, + 266, + 218 + ], + "spans": [ + { + "bbox": [ + 47, + 205, + 266, + 218 + ], + "type": "text", + "content": "3.1. Video-Language Contrastive Pre-training" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 223, + 286, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 223, + 286, + 366 + ], + "spans": [ + { + "bbox": [ + 46, + 223, + 286, + 366 + ], + "type": "text", + "content": "Following common practice [1, 30], we use a dual vision-language architecture with a Noise Contrastive Estimation (NCE) loss [17, 45, 65] to pre-train our vision encoder, similar to CLIP [50], ALIGN [26] and VideoCLIP [66]. Both encoders are transformers [57]: a BERT-medium (77M) or base (117M) language encoder and ViT-Base (86M parameters) or Large (307M parameters) vision encoder. On the language side, caption representations are computed by averaging across the corresponding token representations. On the vision side, video frames are patched into a sequence of visual tokens, fed into a vision encoder, and then average pooled to produce a final video representation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 367, + 286, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 367, + 286, + 533 + ], + "spans": [ + { + "bbox": [ + 46, + 367, + 286, + 533 + ], + "type": "text", + "content": "Most prior larger-scale video-language models use pretrained image encoders and patchify frames individually via 2D convolutions [e.g., 1, 66, 71]. Instead, we create spatiotemporal tubelets via 3D convolutions as done in recent vision-only models [2, 49, 55]. Using 3D tubelets instead of flat patches has the dual advantage of higher input compression and more explicit temporal contextualization; our early experiments yielded improved performance. The tubelet embedding sequence is then flattened, added to learnable positional embeddings, and fed into the vision encoder. The vision encoder uses spatio-temporal attention as in ViViT [2]: Joint space-time attention does not add any new parameters to vanilla image ViT [12], facilitating transfer between image and video models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 534, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 534, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 534, + 286, + 713 + ], + "type": "text", + "content": "Training a large-scale transformer-based video encoder can be challenging because self-attention across thousands of visual tokens is both compute and memory intensive. Memory bottlenecks a model in two ways: (1) limiting the number of frames, and (2) limiting the contrastive batch size during training, negatively impacting performance. To address (2), we use a pre-trained image encoder trained with large batch sizes, and further tune it on videos, instead of jointly training from scratch on images and videos. For initializing the 3D convolution, we repeat the pre-trained weights across the temporal dimension similarly to [2] (see Appendix A). During video-language pre-training, we maintain different embedding paths for images vs. videos: images are embedded with the original 2D convolution and videos with a separate 3D convolution (no weight sharing)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 72, + 429, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 429, + 85 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 429, + 85 + ], + "type": "text", + "content": "3.2. Video-to-Text Tuning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 90, + 545, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 90, + 545, + 234 + ], + "spans": [ + { + "bbox": [ + 304, + 90, + 545, + 234 + ], + "type": "text", + "content": "We follow prior work [e.g., 1, 72, 73] by plugging the frozen pre-trained vision encoder into a frozen pre-trained LM. We first temporally mean pool the video representations to keep a fixed number of tokens independently of the number of frames and next use a randomly initialized Perceiver-resampler [25] to project the representations to the LM embedding space (Appendix A). We add new randomly initialized cross-attention layers at each layer of the LM to ground generation on the visual content. We train the new layers and Perceiver resampler with a standard auto-regressive video captioning loss: " + }, + { + "bbox": [ + 304, + 90, + 545, + 234 + ], + "type": "inline_equation", + "content": "-\\log p(w_t|w < t; \\mathcal{V})" + }, + { + "bbox": [ + 304, + 90, + 545, + 234 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 90, + 545, + 234 + ], + "type": "inline_equation", + "content": "w_t" + }, + { + "bbox": [ + 304, + 90, + 545, + 234 + ], + "type": "text", + "content": " is its " + }, + { + "bbox": [ + 304, + 90, + 545, + 234 + ], + "type": "inline_equation", + "content": "t^{th}" + }, + { + "bbox": [ + 304, + 90, + 545, + 234 + ], + "type": "text", + "content": " token, and " + }, + { + "bbox": [ + 304, + 90, + 545, + 234 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 304, + 90, + 545, + 234 + ], + "type": "text", + "content": " is the video representation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 244, + 529, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 244, + 529, + 258 + ], + "spans": [ + { + "bbox": [ + 305, + 244, + 529, + 258 + ], + "type": "text", + "content": "4. Memory-Efficient Encoder Design Space" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 265, + 545, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 265, + 545, + 324 + ], + "spans": [ + { + "bbox": [ + 304, + 265, + 545, + 324 + ], + "type": "text", + "content": "Device memory is a key bottleneck for video training with joint space-time attention. To overcome this, we explore four broad categories of solutions: (1) efficient attention, (2) parameter-efficient image-to-video adaptation, (3) input token masking, and (4) multi-resolution patchification." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 327, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 304, + 327, + 545, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 327, + 545, + 495 + ], + "spans": [ + { + "bbox": [ + 304, + 327, + 545, + 495 + ], + "type": "text", + "content": "1. Attention mechanism. Factorized attention [2, 3] separates the temporal and spatial dimensions over which self-attention is applied, reducing both memory and computational costs. However, this modification introduces a new temporal block within each transformer layer making initialization and model tuning more challenging. In contrast to [2], that initializes the new blocks with zeroes, we find that we achieve best performance when initializing the temporal blocks with the same self-attention weights of ViT. However, we add a gating mechanism which acts as a residual connection between the self-attention blocks: " + }, + { + "bbox": [ + 304, + 327, + 545, + 495 + ], + "type": "inline_equation", + "content": "h = h + \\tanh(\\alpha) h_{temporal}" + }, + { + "bbox": [ + 304, + 327, + 545, + 495 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 304, + 327, + 545, + 495 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 327, + 545, + 495 + ], + "type": "text", + "content": " is a trainable parameter initialized to zero, that helps maintain the capabilities of the original ViT during training." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 496, + 545, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 496, + 545, + 663 + ], + "spans": [ + { + "bbox": [ + 304, + 496, + 545, + 663 + ], + "type": "text", + "content": "2. Parameter-efficient adaptation. We explore using parameter-efficient methods from NLP [8] to adapt image encoders to video, while only tuning a small percentage of model parameters. Most prior work adapts image-based models by freezing an image backbone and adding late, trainable temporal-fusion layers [10, 71, 78]. In contrast, we explore ways to use pre-trained image encoders and adapt them to video-first architectures [7, 36, 46]. Inspired by the success of parameter-efficient adaptation in NLP [79], we consider using MLP Adapters [21] and LoRA [22] (details in Appendix A). We also explore tuning only temporal self-attention blocks [7], effectively as adapter layers, in factorized attention. In all variants, we still tune the video-specific 3D patch convolution." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "3. Token masking. Most existing work samples videos at a fixed frames per second (FPS) rate [e.g., 1, 2, 55, 74]. However, semantics required for many video-language tasks vary slowly in the temporal dimension [80] and videos" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14388" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "content": "present high degree of redundancy between consecutive frames [55]. We explore ways to sparsely sample the video input to reduce the number of input visual tokens. Specifically, we test random masking of input tubelet embeddings. Since consecutive frames are largely redundant, the same semantic signals could potentially be extracted even with high masking rates. For example, [55] masks up to " + }, + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "content": " of the input video to reach optimal performance on the task of video-masked autoencoding. We demonstrate similar results in a video-language setting." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 191, + 290, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 191, + 290, + 324 + ], + "spans": [ + { + "bbox": [ + 46, + 191, + 290, + 324 + ], + "type": "text", + "content": "4. Multi-resolution patchification. Finally, we test a simple approach to reduce redundancy in videos via more coarse-grained patchification in the temporal or spatial dimension, as commonly done in multiple-view video models [14, 39, 70]. However, this decreases frame resolution, and may lose fine-grained information. As a result, we also experiment with TubeViT [49] variant that combines flat patches and tubelets of different granularity to mitigate information loss. Following [49], we use four different convolution kernels that can encode either coarse-grained temporal or spatial information; details are in Appendix A." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 328, + 197, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 328, + 197, + 340 + ], + "spans": [ + { + "bbox": [ + 47, + 328, + 197, + 340 + ], + "type": "text", + "content": "5. Datasets and Benchmarks" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 348, + 289, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 348, + 289, + 563 + ], + "spans": [ + { + "bbox": [ + 46, + 348, + 289, + 563 + ], + "type": "text", + "content": "For contrastive pre-training, we use: (1) 27M video-text pairs (VTP) as described in [1], (2) HowTo100M [42] (HT100M; 100M instructional YouTube clips aligned with ASR using their timestamps, called HowTo100M Clips), and (3) VideoCC3M [44] (3M video-text pairs based on Conceptual Captions [52]). Unfortunately, we find the text-video alignment in VideoCC3M to be of poor quality; instead, we use a modified variant with generated pseudolabeled captions of every video by PALI [9] (see Appendices B, C). To pre-train with longer videos, we use a long version of HowTo100M (referred to as HowTo100M Summary) consisting of (1) the full-length videos with an average duration of 6.5 minutes and (2) their textual summaries generated by automatically cleaning and summarizing the ASR transcripts using an LLM [20]. We also include the image datasets of [1]. For video-to-text tuning, we use the same mixture of datasets but exclude HowTo100M Clips, since the noisy video-text alignments hurt performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 563, + 288, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 563, + 288, + 660 + ], + "spans": [ + { + "bbox": [ + 46, + 563, + 288, + 660 + ], + "type": "text", + "content": "We report text-video retrieval and captioning results on short video benchmarks, with average video length " + }, + { + "bbox": [ + 46, + 563, + 288, + 660 + ], + "type": "inline_equation", + "content": "\\leq 30" + }, + { + "bbox": [ + 46, + 563, + 288, + 660 + ], + "type": "text", + "content": " seconds: MSR-VTT [67], YouCook2 [81], ActivityNet Captions [28], and VATEX [60]. To evaluate performance on longer videos, we consider video summarization on full-length versions of YouCook2 and ActivityNet Captions, with a video duration of up to 5 minutes, and multiple-choice video question answering (QA) on EgoSchema [41]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 669, + 174, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 669, + 174, + 682 + ], + "spans": [ + { + "bbox": [ + 47, + 669, + 174, + 682 + ], + "type": "text", + "content": "6. Experimental Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "content": "In Section 6.1, we describe our results evaluating alternatives in memory-efficient video encoder design; options de" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 305, + 70, + 547, + 145 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 547, + 145 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 547, + 145 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 547, + 145 + ], + "type": "table", + "html": "
MSR-VTTVATEXYC2AN
T2VV2TT2VV2TT2VV2TT2VV2T
Joint ST-ViViT39.638.123.826.312.313.66.76.4
Factorized ST-ViViT40.236.925.325.411.612.76.67.4
Avg Frame-level39.334.824.825.09.17.96.87.1
Att-pool Frame-level38.437.521.926.19.08.96.16.2
", + "image_path": "b0955a00718fc111bb34de8fea5bd7ac3220f220ae216dcf619b52630c09d0b7.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 146, + 545, + 169 + ], + "lines": [ + { + "bbox": [ + 305, + 146, + 545, + 169 + ], + "spans": [ + { + "bbox": [ + 305, + 146, + 545, + 169 + ], + "type": "text", + "content": "Table 1. Text-video retrieval results (\\% Recall@1) when considering different visual backbones." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 182, + 545, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 182, + 545, + 218 + ], + "spans": [ + { + "bbox": [ + 304, + 182, + 545, + 218 + ], + "type": "text", + "content": "scribed in Section 4. For this analysis, we use ViT-B/BERT-medium, with training details in Appendix B and ablations on experimental design in Appendix C." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 219, + 545, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 219, + 545, + 302 + ], + "spans": [ + { + "bbox": [ + 304, + 219, + 545, + 302 + ], + "type": "text", + "content": "In Section 6.2, we combine our most competitive design choices from 6.1 and test our models on short and long video understanding benchmarks. We scale our best model variants to ViT-L/BERT-base with a 400M (or 1B) language decoder. We test our short video models on text-video retrieval and video captioning, and our long video models on video summarization and QA on 256-frame videos." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 303, + 545, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 303, + 545, + 340 + ], + "spans": [ + { + "bbox": [ + 304, + 303, + 545, + 340 + ], + "type": "text", + "content": "In Section 6.3, we share our experience working across short and long video benchmarks [5, 11, 41, 60, 67], offering insights about which ones yield robust temporal signal." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 347, + 522, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 347, + 522, + 361 + ], + "spans": [ + { + "bbox": [ + 305, + 347, + 522, + 361 + ], + "type": "text", + "content": "6.1. Exploration of Memory-Efficient Designs" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 365, + 545, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 365, + 545, + 450 + ], + "spans": [ + { + "bbox": [ + 304, + 365, + 545, + 450 + ], + "type": "text", + "content": "We explore memory-efficient methods to train video-first encoders as described in Section 4. We first consider short video inputs of 16 frames at 1 FPS and report peak train-time memory consumption vs. performance on text-video retrieval on short video benchmarks [5]. Then, we test whether our main findings hold for longer inputs (128+ frames) on video summarization on full-length YouCook2." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 453, + 545, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 453, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 304, + 453, + 545, + 633 + ], + "type": "text", + "content": "Base architectures. We explore the memory/accuracy trade-off of different visual backbones in Table 1: ViViT with joint space-time attention (i.e., Joint ST-ViViT), ViViT with factorized attention (i.e., Factorized ST-ViViT) [2], and frame-level (ViT-based) image encodings with average or attentional pooling ('att-pool') [1, 71]. Different methods perform similarly, especially on MSR-VTT and ActivityNet (AN). Interestingly, attentional pooling on top of frame-level encodings does not improve performance. ViViT with either joint or factorized attention performs best and presents higher gains for YouCook2 (YC2), the more temporally challenging benchmark [6.3]. In contrast to prior work [e.g., 10, 71] which tests frozen image-to-video transfer and claims joint attention to be inferior, we find it to be competitive in this fully fine-tuned setting." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 636, + 545, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 636, + 545, + 686 + ], + "spans": [ + { + "bbox": [ + 304, + 636, + 545, + 686 + ], + "type": "text", + "content": "Architectures and token masking. We now test robustness of backbones when masking part of the input tubelets (0-75%). We report Recall@1 on text-to-video retrieval for YouCook2 and VATEX1 per backbone for different masking" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "text", + "content": "1We do not observe significant sensitivity to input masking for MSR-VTT and ActivityNet Captions across all configurations (Section 6.3)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "14389" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 70, + 286, + 191 + ], + "blocks": [ + { + "bbox": [ + 55, + 70, + 286, + 191 + ], + "lines": [ + { + "bbox": [ + 55, + 70, + 286, + 191 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 286, + 191 + ], + "type": "image", + "image_path": "4b8beaa9ce43e207c59263995739190a120d568c668804090ce659197e1107db.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 200, + 547, + 234 + ], + "lines": [ + { + "bbox": [ + 46, + 200, + 547, + 234 + ], + "spans": [ + { + "bbox": [ + 46, + 200, + 547, + 234 + ], + "type": "text", + "content": "Figure 2. Trade-offs between performance (% text-to-video Recall@1; y axis) and train-time memory consumption (x axis) for different backbones (joint space-time (JST), factorized space-time (FST), and drame-level encodings) with random input masking (0% up to 75%) or parameter-efficient methods for training (Adapters, LoRA, factorized temporal (FST) adaptation; lower opacity)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 305, + 70, + 533, + 191 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 533, + 191 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 533, + 191 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 533, + 191 + ], + "type": "image", + "image_path": "cc97d257a8490c1fb860941daaa993b99524732efdf2c8551c3fcf6b7514a227.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 72, + 253, + 262, + 377 + ], + "blocks": [ + { + "bbox": [ + 72, + 253, + 262, + 377 + ], + "lines": [ + { + "bbox": [ + 72, + 253, + 262, + 377 + ], + "spans": [ + { + "bbox": [ + 72, + 253, + 262, + 377 + ], + "type": "image", + "image_path": "4939a3e4c1ce6e4aad89a2afa88035c47845e56683f38f3e4e88c966c1b14783.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 381, + 288, + 437 + ], + "lines": [ + { + "bbox": [ + 46, + 381, + 288, + 437 + ], + "spans": [ + { + "bbox": [ + 46, + 381, + 288, + 437 + ], + "type": "text", + "content": "Figure 3. Difference (\\%) in memory consumption for different model scales: (ViT-B vs ViT-L). We also report performance drop of efficient methods presented in Figure 2 in comparison with the vanilla approach (i.e., no input masking and full fine-tuning) at different model scales to test whether behavior is similar." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 453, + 287, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 453, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 46, + 453, + 287, + 525 + ], + "type": "text", + "content": "ratios in Figure 2. Joint space-time attention (JST) is robust against noise from masking up to " + }, + { + "bbox": [ + 46, + 453, + 287, + 525 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 46, + 453, + 287, + 525 + ], + "type": "text", + "content": " during pre-training. The same does not hold for frame-level encodings and factorized attention (FST), where performance drops consistently as we increase masking. We conclude that JST can better handle noisy inputs and use it in further exploration." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 534, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 534, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 534, + 288, + 714 + ], + "type": "text", + "content": "Parameter-efficient adaptation. We next report performance of parameter-efficient image-to-video adaptation in Figure 2. We consider (1) JST with (a) MLP Adapters at every layer of the encoder, (b) LoRA with rank decomposition matrices in the self-attention and feed-forward transformer blocks, and (2) factorized temporal adaptation where we tune the temporal self-attention. No adaptation method can reach the memory savings provided by high input masking, since we tune parameters depthwise and gradient computation still requires backpropagation through the model. At the same time, we see significant performance drop, suggesting that adaptation of spatial-only models to the temporal dimension cannot be sufficiently addressed in semifrozen fashion. Comparing parameter-efficient methods, we find MLP Adapters to be more competitive than LoRA," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 254, + 545, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 254, + 545, + 290 + ], + "spans": [ + { + "bbox": [ + 304, + 254, + 545, + 290 + ], + "type": "text", + "content": "which is now canonical for LLMs. We hypothesize that LoRA is successful for tuning very small portions of the network and performing \"easier\" in-modality transfer." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 292, + 546, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 292, + 546, + 447 + ], + "spans": [ + { + "bbox": [ + 304, + 292, + 546, + 447 + ], + "type": "text", + "content": "Adaptation at scale. We next scale from ViT-B/86M to ViT-L/307M in Figure 3 and test whether observations hold with different model scales. We present the " + }, + { + "bbox": [ + 304, + 292, + 546, + 447 + ], + "type": "inline_equation", + "content": "\\%" + }, + { + "bbox": [ + 304, + 292, + 546, + 447 + ], + "type": "text", + "content": " memory increase from base to large (left bar set) and " + }, + { + "bbox": [ + 304, + 292, + 546, + 447 + ], + "type": "inline_equation", + "content": "\\%" + }, + { + "bbox": [ + 304, + 292, + 546, + 447 + ], + "type": "text", + "content": " performance decrease of each method at each scale" + }, + { + "bbox": [ + 304, + 292, + 546, + 447 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 304, + 292, + 546, + 447 + ], + "type": "text", + "content": ". Joint ST exhibits a similar memory pattern to frame-level, while leading to smaller accuracy drops, whereas factorized ST presents significant memory overhead with model scale due to the extra temporal parameters. For this reason, we exclude factorized ST from further experimentation. Finally, parameter-efficient methods are unable to achieve competitive performance at both model scales, although their memory requirements scale better with model size." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 449, + 546, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 546, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 546, + 677 + ], + "type": "text", + "content": "Multi-resolution patchification. Given the outsized memory impact of input token count in Figure 4, we additionally analyze: (1) coarse-grained patchification in the temporal (convolution over 4 instead of 2 frames) and/or spatial (convolution over " + }, + { + "bbox": [ + 304, + 449, + 546, + 677 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 304, + 449, + 546, + 677 + ], + "type": "text", + "content": " instead of " + }, + { + "bbox": [ + 304, + 449, + 546, + 677 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 304, + 449, + 546, + 677 + ], + "type": "text", + "content": " pixel spaces) dimension, and (2) the TubeViT [49] approach of multiple tube kernels of different spatiotemporal size and strides. For all benchmarks, masking the input at high ratios while maintaining a fine granularity of tubelets decreases performance significantly less than other input processing methods. Temporal coarse-grained patchification negatively affects benchmarks with richer temporal dependencies (i.e., YouCook2, VATEX) more than spatial. The opposite trend holds for datasets depending on spatial understanding (i.e., MSR-VTT, ActivityNet Captions3). TubeViT acts as the middle ground between the two by employing multiple kernels, with some performance degradation across all benchmarks. However, it is not able to alleviate the negative effects caused by considering coarser" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 683, + 545, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 683, + 545, + 703 + ], + "spans": [ + { + "bbox": [ + 306, + 683, + 545, + 703 + ], + "type": "text", + "content": "2Performance drop for factorized ST is omitted since the variant without masking leads to out of memory issues." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 703, + 526, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 703, + 526, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 703, + 526, + 713 + ], + "type": "text", + "content": "3Omitted from Figure 4 but follows same patterns as MSR-VTT." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "14390" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 44, + 78, + 211, + 165 + ], + "blocks": [ + { + "bbox": [ + 44, + 78, + 211, + 165 + ], + "lines": [ + { + "bbox": [ + 44, + 78, + 211, + 165 + ], + "spans": [ + { + "bbox": [ + 44, + 78, + 211, + 165 + ], + "type": "image", + "image_path": "331c8ae41358d767e8f44c4f3567c4ee44f654154ab72356ba8d75a1e5a32bbb.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 167, + 547, + 202 + ], + "lines": [ + { + "bbox": [ + 46, + 167, + 547, + 202 + ], + "spans": [ + { + "bbox": [ + 46, + 167, + 547, + 202 + ], + "type": "text", + "content": "Figure 4. Trade-offs between performance (text-to-video Recall@1; y axis) and memory consumption (x axis) for input sampling methods: (1) high input masking ratios (0% to 75%) with joint space-time attention, (2) coarse-grained temporal (Coarse temp) and/or spatial (Coarse space) patchification with a fixed kernel and TubeViT which samples parts of the video with multiple 3D kernels of different granularity." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 214, + 79, + 380, + 165 + ], + "blocks": [ + { + "bbox": [ + 214, + 79, + 380, + 165 + ], + "lines": [ + { + "bbox": [ + 214, + 79, + 380, + 165 + ], + "spans": [ + { + "bbox": [ + 214, + 79, + 380, + 165 + ], + "type": "image", + "image_path": "599b7d18991dac70929f8f2d35e1e90bda51c44b55b313d2b17b5560a7e142fb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 389, + 80, + 549, + 165 + ], + "blocks": [ + { + "bbox": [ + 389, + 80, + 549, + 165 + ], + "lines": [ + { + "bbox": [ + 389, + 80, + 549, + 165 + ], + "spans": [ + { + "bbox": [ + 389, + 80, + 549, + 165 + ], + "type": "image", + "image_path": "863c4270f95741d222f2cc1287dfa29362209ec49fa3c97641bb96b4ee1a6fd3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 72, + 205, + 521, + 299 + ], + "blocks": [ + { + "bbox": [ + 72, + 205, + 521, + 299 + ], + "lines": [ + { + "bbox": [ + 72, + 205, + 521, + 299 + ], + "spans": [ + { + "bbox": [ + 72, + 205, + 521, + 299 + ], + "type": "image", + "image_path": "bf04ac44859cff76f2c83a72cce116e5002366b55efb0b0ab827a6c34c07f1a8.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 305, + 547, + 340 + ], + "lines": [ + { + "bbox": [ + 46, + 305, + 547, + 340 + ], + "spans": [ + { + "bbox": [ + 46, + 305, + 547, + 340 + ], + "type": "text", + "content": "Figure 5. Scaling memory-efficient methods to more frames (i.e., 128 frames) for ViViT-B and variants. We measure performance for video-to-text summarization on the full-length YouCook2 videos via Rouge-L (color-coded) while keeping track of memory consumption during short-to-long video contrastive tuning (" + }, + { + "bbox": [ + 46, + 305, + 547, + 340 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 305, + 547, + 340 + ], + "type": "text", + "content": "-axis) and video-to-text tuning (" + }, + { + "bbox": [ + 46, + 305, + 547, + 340 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 305, + 547, + 340 + ], + "type": "text", + "content": "-axis)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 359, + 288, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 359, + 288, + 407 + ], + "spans": [ + { + "bbox": [ + 46, + 359, + 288, + 407 + ], + "type": "text", + "content": "grained information and presents higher memory requirements due to the multiple convolutions. Overall, we find that high masking with Joint ST and small tubelets yields the strongest memory/performance curves." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 416, + 287, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 416, + 287, + 571 + ], + "spans": [ + { + "bbox": [ + 46, + 416, + 287, + 571 + ], + "type": "text", + "content": "Scaling to longer videos. We now test the best methods from Figure 4 on 128 input frames (32.7k visual tokens). We select methods that are within a memory budget (red vertical lines) and would fit on a 16GB device when expanded to long videos (128+ frames). We contrastively fine-tune [3.1] our best performing video model (i.e., Joint ST referred to as SHORTVIVIT) on sequences of 128 frames on HowTo100M Summary [5], as detailed in Appendix B. We refer to this model as LONGVIVIT. Finally, we fine-tune LONGVIVIT for text generation (Section 3.2) on the full-length YouCook2, and report Rouge-L in Figure 5, measuring memory consumption during both long-context contrastive (" + }, + { + "bbox": [ + 46, + 416, + 287, + 571 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 416, + 287, + 571 + ], + "type": "text", + "content": "-axis) and video-to-text (" + }, + { + "bbox": [ + 46, + 416, + 287, + 571 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 416, + 287, + 571 + ], + "type": "text", + "content": "-axis) tuning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 572, + 288, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 572, + 288, + 681 + ], + "spans": [ + { + "bbox": [ + 46, + 572, + 288, + 681 + ], + "type": "text", + "content": "Validating our previous results, IMAGEViT (frame-level encodings) trained on longer videos with " + }, + { + "bbox": [ + 46, + 572, + 288, + 681 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 46, + 572, + 288, + 681 + ], + "type": "text", + "content": " masking significantly under-performs video-first models (10 R-L drop). SHORTViVIT without further HT100M Summary training performs better than IMAGEViT, but cannot match models adapted to longer videos. LONGViVIT improves performance by 1.8 Rouge-L points over SHORTViVIT. Comparing input masking with coarser-grained patchification provides similar insights to the previous paragraph." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 359, + 545, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 359, + 545, + 587 + ], + "spans": [ + { + "bbox": [ + 304, + 359, + 545, + 587 + ], + "type": "text", + "content": "Finally, we test MLP Adapters [21] for tuning SHORT-VIViT to longer videos and observe no performance drop in comparison with full fine-tuning. This provides further evidence that parameter-efficient methods can be used for \"easier transfers\" but not temporal adaptation of spatial-only models. One downside of MLP Adapters is that it increases parameter count during video-to-text tuning (y-axis in Figure 5). Thus, we also experiment with contrastively tuning only the last four layers of the model. With this, we observe a further " + }, + { + "bbox": [ + 304, + 359, + 545, + 587 + ], + "type": "inline_equation", + "content": "3\\mathrm{x}" + }, + { + "bbox": [ + 304, + 359, + 545, + 587 + ], + "type": "text", + "content": " decrease in memory, since we tune the network widthwise and excise early layer gradient computation. At the same time, there is no memory increase for video-to-text and no performance degradation. We conclude that this combination (high input masking and tuning the last layers) is an effective setting for longer video adaptation. Given the observed robustness to masking, to further decrease video-to-text memory, we also mask " + }, + { + "bbox": [ + 304, + 359, + 545, + 587 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 304, + 359, + 545, + 587 + ], + "type": "text", + "content": " of the input video during training and inference without observing any drop in summarization performance (see Appendix C)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 600, + 391, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 600, + 391, + 612 + ], + "spans": [ + { + "bbox": [ + 306, + 600, + 391, + 612 + ], + "type": "text", + "content": "6.2. Main Results" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 617, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 713 + ], + "type": "text", + "content": "Short video benchmarks. We present our main results on short video benchmarks in Table 2. We use ViT-L with BERT-base for contrastive pre-training (Section 3.1) and a 400M frozen LM for video-to-text tuning (Section 3.2). Our entire video-to-text model accounts for " + }, + { + "bbox": [ + 304, + 617, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\sim 900\\mathrm{M}" + }, + { + "bbox": [ + 304, + 617, + 547, + 713 + ], + "type": "text", + "content": " parameters, although we additionally test scaling the frozen LM to 1B parameters (" + }, + { + "bbox": [ + 304, + 617, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\sim 1.5\\mathrm{B}" + }, + { + "bbox": [ + 304, + 617, + 547, + 713 + ], + "type": "text", + "content": " total count). We report Recall@1 for zero-shot text-video retrieval and CIDEr for zero-shot and" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 57, + 693, + 280, + 702 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 693, + 280, + 702 + ], + "spans": [ + { + "bbox": [ + 57, + 693, + 280, + 702 + ], + "type": "text", + "content": "4We start from IMAGEViT trained on short videos with no masking." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 702, + 280, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 702, + 280, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 702, + 280, + 712 + ], + "type": "text", + "content": "5Using the same fine-grained SHORTVIViT model for initialization." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14391" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 70, + 547, + 178 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 547, + 178 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 547, + 178 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 547, + 178 + ], + "type": "table", + "html": "
MSR-VTTZero-shotFTVATEXZero-shotFTYouCook2Zero-shotFTActivityNetZero-shotFT
T2V/V2TC1/C2C1T2V/V2TC1/C2C1T2V/V2TC1/C2C1T2V/V2TC1/C2C1
IMAGEViT-L30.9/41.624.6/25.163.636.2/42.937.9/39.461.118.2/16.814.5/16.595.920.6/18.216.3/17.741.1
SHORTViT-L31.9/38.932.7/32.963.137.8/42.843.6/43.067.520.4/20.521.0/22.1131.921.3/18.925.2/26.144.8
EffSHORTViT-L29.9/38.333.8/33.963.834.4/42.741.3/42.764.720.5/20.321.1/21.7127.120.1/17.727.0/26.541.1
VideoCoCa-L [71]33.3/-24.3----18.9/-20.7-31.5*/-17.4-
VideoCoCa-2.1B34.3/64.727.173.253.2/73.622.877.820.3/-34.3128.034.5*/33.0*19.339.3
Flamingo-3B [1]----40.1--55.8----
", + "image_path": "f58afae7b89fe101fd7e45df83b05ef58675a234cd2cf537a1ef7bea38dd256b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 46, + 179, + 547, + 258 + ], + "lines": [ + { + "bbox": [ + 46, + 179, + 547, + 258 + ], + "spans": [ + { + "bbox": [ + 46, + 179, + 547, + 258 + ], + "type": "text", + "content": "Table 2. We present three model variants: IMAGEViT-L, that uses frame-level encodings with a late temporal fusion trained on images and videos, SHORTViT-L, our best performing video-first model with joint space-time attention, and Efficient SHORTViT-L (EffSHORTViT-L) where we apply " + }, + { + "bbox": [ + 46, + 179, + 547, + 258 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 46, + 179, + 547, + 258 + ], + "type": "text", + "content": " train-time masking for 3x memory savings. We also report performance for SoTA image-first models: VideoCoCa-L and Flamingo-3B, although they are bigger and not directly comparable. We report Recall@1 for zero-shot text-to-video (T2V) and video-to-text (V2T) retrieval, and CIDEr for zero-shot and fine-tuned (FT) captioning when considering a 400M (C1) or 1B (C2) frozen LM for generation. ActivityNet retrieval results marked with " + }, + { + "bbox": [ + 46, + 179, + 547, + 258 + ], + "type": "inline_equation", + "content": "*" + }, + { + "bbox": [ + 46, + 179, + 547, + 258 + ], + "type": "text", + "content": " are not directly comparable, as these models uniformly sample frames, whereas we use the first frames of the long video with a fixed FPS of 1 to match experimental settings across benchmarks." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 270, + 287, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 270, + 287, + 330 + ], + "spans": [ + { + "bbox": [ + 46, + 270, + 287, + 330 + ], + "type": "text", + "content": "fine-tuned video captioning. We consider three model variants: frame-level encodings IMAGEViT, SHORTViVIT, and SHORTViVIT with " + }, + { + "bbox": [ + 46, + 270, + 287, + 330 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 46, + 270, + 287, + 330 + ], + "type": "text", + "content": " masking that uses 2-3x less memory (referred to as Efficient SHORTViVIT). We also report results for VideoCoCa [71] and Flamingo [1]" + }, + { + "bbox": [ + 46, + 270, + 287, + 330 + ], + "type": "inline_equation", + "content": "^6" + }, + { + "bbox": [ + 46, + 270, + 287, + 330 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 331, + 288, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 331, + 288, + 569 + ], + "spans": [ + { + "bbox": [ + 46, + 331, + 288, + 569 + ], + "type": "text", + "content": "Our results remain consistent with our earlier observations. Contextualizing only intra-frame dependencies coupled with late temporal fusion (IMAGEVIT) leads to inferior performance for retrieval and captioning on benchmarks with richer temporal dependencies (YouCook2, VATEX) but performs better on retrieval on MSR-VTT which relies on spatial understanding. Video-first architectures further tuned on video datasets (substantially noisier than curated image ones) improve temporal capabilities at the expense of spatial. For Efficient SHORTVIVIT, we find that masking " + }, + { + "bbox": [ + 46, + 331, + 288, + 569 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 46, + 331, + 288, + 569 + ], + "type": "text", + "content": " of the input video causes a performance drop: an average of " + }, + { + "bbox": [ + 46, + 331, + 288, + 569 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 46, + 331, + 288, + 569 + ], + "type": "text", + "content": " absolute difference on zero-shot retrieval and no significant difference on zero-shot captioning across all benchmarks. The efficient model still performs similarly or better than IMAGEVIT, especially on captioning and temporally rich benchmarks (e.g., YouCook2, VATEX), while consuming significantly less memory. Finally, when scaling the frozen LM component from 400M to 1B " + }, + { + "bbox": [ + 46, + 331, + 288, + 569 + ], + "type": "inline_equation", + "content": "(\\mathrm{C}1\\rightarrow \\mathrm{C}2)" + }, + { + "bbox": [ + 46, + 331, + 288, + 569 + ], + "type": "text", + "content": " for zero-shot video-to-text generation, we observe moderate improvements across benchmarks and variants." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 570, + 287, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 287, + 654 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 287, + 654 + ], + "type": "text", + "content": "We compare our results against large image-based models with SoTA performance on video benchmarks (second block of Table 2). Although results are not directly comparable due to different experimental settings, we are competitive and achieve even better results for temporally rich benchmarks (i.e., YouCook2) on text-video retrieval for models of similar parameter count7. Moreover, our models" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 270, + 545, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 270, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 306, + 270, + 545, + 342 + ], + "type": "text", + "content": "significantly outperform VideoCoCa on most video captioning benchmarks even when considering their much larger versions in the zero-shot setting. Finally, when fine-tuning our video-to-text models with the 400M LM, we are again able to match and surpass the performance of the larger VideoCoCa-2.1B in two out of four benchmarks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 345, + 545, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 345, + 545, + 429 + ], + "spans": [ + { + "bbox": [ + 304, + 345, + 545, + 429 + ], + "type": "text", + "content": "Long video understanding. We further tune LONG-VIViT-L on 256-frame HT100M Summary videos and evaluate zero-shot/fine-tuned summarization (YouCook2, ActivityNet) and QA (EgoSchema released subset); this is shown in Table 3. We additionally report results of LONG-VIViT on Perception Test [47] in Appendix D, where videos are short but can benefit from higher FPS." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 429, + 546, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 429, + 546, + 656 + ], + "spans": [ + { + "bbox": [ + 304, + 429, + 546, + 656 + ], + "type": "text", + "content": "We consider two families of models. 1. Models that take as input 256 frames (first block of Table 3): IMAGEVIT and SHORTVIVIT pre-trained on 16-frame clips, and LONG-VIVIT further trained on 256-frame clips. 2. Modular approaches from prior work (second block of Table 3): (a) SeViLA Localization [74] for localizing important frames in the long video given a textual query which are then fed into SHORTVIVIT for performing the task" + }, + { + "bbox": [ + 304, + 429, + 546, + 656 + ], + "type": "inline_equation", + "content": "^8" + }, + { + "bbox": [ + 304, + 429, + 546, + 656 + ], + "type": "text", + "content": ", and (b) the popular paradigm of captioning video segments or frames and using an LLM to aggregate information and form coherent summaries or answer questions [31, 34, 77]. We try the latter approach with IMAGEVIT and SHORTVIVIT, generating captions over 16-second video segments and then feeding the captions to the September 2023 release of Bard, a much larger LLM than the ones used in previous results. We caption clips using uniform video segmentation (every 16 seconds) or an oracle segmentation when available (i.e., we consider ground-truth start and end timestamps for different events within ActivityNet and YouCook2 videos). We" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 664, + 287, + 702 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 664, + 287, + 702 + ], + "spans": [ + { + "bbox": [ + 46, + 664, + 287, + 702 + ], + "type": "text", + "content": "6Models are not directly comparable due to different pre-training datasets, model sizes, training regimes, and input resolution. For instance, [71] fully fine-tune the LM and report results for " + }, + { + "bbox": [ + 46, + 664, + 287, + 702 + ], + "type": "inline_equation", + "content": "576 \\times 576" + }, + { + "bbox": [ + 46, + 664, + 287, + 702 + ], + "type": "text", + "content": " frame resolution instead of " + }, + { + "bbox": [ + 46, + 664, + 287, + 702 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 46, + 664, + 287, + 702 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 58, + 703, + 287, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 703, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 703, + 287, + 712 + ], + "type": "text", + "content": "Video-text retrieval results on ActivityNet Captions are not comparable" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 665, + 545, + 684 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 665, + 545, + 684 + ], + "spans": [ + { + "bbox": [ + 306, + 665, + 545, + 684 + ], + "type": "text", + "content": "ble since we are only considering the first 16 seconds of the video, whereas [71] uniformly sample frames from the entire video (" + }, + { + "bbox": [ + 306, + 665, + 545, + 684 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 306, + 665, + 545, + 684 + ], + "type": "text", + "content": "180 seconds)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 684, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 684, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 684, + 545, + 713 + ], + "type": "text", + "content": "8We select 16 frames using the pre-trained localizer provided by [74]. For video summarization, we use synthetic summaries of the video generated by PALI+Bard as the textual query for retrieving frames." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "14392" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 62, + 70, + 272, + 248 + ], + "blocks": [ + { + "bbox": [ + 62, + 70, + 272, + 248 + ], + "lines": [ + { + "bbox": [ + 62, + 70, + 272, + 248 + ], + "spans": [ + { + "bbox": [ + 62, + 70, + 272, + 248 + ], + "type": "table", + "html": "
Zero-shotFine-tuned
ANYC2ESANYC2
Inference with 256 frames
IMAGEViT14.44.640.823.829.4
SHORTViVIT15.47.047.924.329.5
LONGViVIT15.220.356.824.030.6
Modular approaches with 16-frame video models
SeViLA-to-SHORTViVIT16.24.249.624.428.3
IMAGEViT-to-Bard18.115.835.022.919.1
+ oracle segments16.316.2-22.722.1
SHORTViVIT-to-Bard19.318.142.022.720.8
+ oracle segments18.318.2-22.724.7
PALI [9] 5B-to-Bard22.019.944.8--
Blind Bard--27.0--
SoTA [69]---36.934.6
", + "image_path": "05ea390e836d41525f310e7f618be08d5a9be2a4b651a77858c6d736c1c79c8d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 251, + 287, + 295 + ], + "lines": [ + { + "bbox": [ + 46, + 251, + 287, + 295 + ], + "spans": [ + { + "bbox": [ + 46, + 251, + 287, + 295 + ], + "type": "text", + "content": "Table 3. Results on long video-to-text benchmarks. We report Rouge-L for zero-shot and fine-tuned video summarization on ActivityNet Captions (AN) and YouCook2 (YC2) and zero-shot accuracy (\\%) for multiple choice QA on EgoSchema (ES)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 300, + 287, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 300, + 287, + 372 + ], + "spans": [ + { + "bbox": [ + 46, + 300, + 287, + 372 + ], + "type": "text", + "content": "also test substituting our small video models with PALI-3 (5B parameters) for frame captioning9. Finally, we reference the SoTA fine-tuned performance on ActivityNet and YouCook2, when using specialized models with precomputed features by multiple networks, object detectors, and domain-specific vocabulary [69]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 372, + 287, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 372, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 46, + 372, + 287, + 479 + ], + "type": "text", + "content": "Looking through Table 3, we find that on ActivityNet, which contains less temporal dependencies [6.3], modular approaches via frame selection or LLM-based aggregation of information (second block) perform well. Frame captioning via PALI combined with the power of LLMs is enough for the task in a zero-shot setting. For fine-tuned models, feeding either the long input or selected frames into SHORTVIVIT perform better than utilizing Bard. On ActivityNet, we see no benefit from training further on longer videos." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 479, + 287, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 479, + 287, + 694 + ], + "spans": [ + { + "bbox": [ + 46, + 479, + 287, + 694 + ], + "type": "text", + "content": "In contrast, we find that short video and modular models are insufficient for addressing video tasks with longer-range temporal dependencies (YouCook2, EgoSchema). Adapting SHORTVIVIT to longer contexts (LONGVIVIT) significantly improves performance and achieves the best scores across all comparison approaches. Using Bard as an information aggregator over individual clip captions cannot achieve competitive performance, even when considering an oracle video segmentation for YouCook2 (Lines 3 and 5 in the second block of Table 3). Surprisingly, even using a much larger and more powerful image-based model (PALI) cannot reach LONGVIVIT on YouCook2 and EgoSchema. Interestingly, selecting 16 key frames and feeding them into SHORTVIVIT also outperforms Bard-based methods on EgoSchema and fine-tuned YouCook2. This suggests there can be temporal dependencies in long videos that cannot be resolved even with an optimal event segmentation for the video, or be aggregated by LLMs given inprecise visual" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 325, + 71, + 525, + 150 + ], + "blocks": [ + { + "bbox": [ + 325, + 71, + 525, + 150 + ], + "lines": [ + { + "bbox": [ + 325, + 71, + 525, + 150 + ], + "spans": [ + { + "bbox": [ + 325, + 71, + 525, + 150 + ], + "type": "image", + "image_path": "4442e7c74a3c600d257d792481338f515eed7d5f0aacafe8e67b64eaf8e60eea.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 156, + 545, + 178 + ], + "lines": [ + { + "bbox": [ + 306, + 156, + 545, + 178 + ], + "spans": [ + { + "bbox": [ + 306, + 156, + 545, + 178 + ], + "type": "text", + "content": "Figure 6. Performance difference " + }, + { + "bbox": [ + 306, + 156, + 545, + 178 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 306, + 156, + 545, + 178 + ], + "type": "text", + "content": " per benchmark when we remove (1) video or (2) image data from the training mixture." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 186, + 545, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 186, + 545, + 210 + ], + "spans": [ + { + "bbox": [ + 306, + 186, + 545, + 210 + ], + "type": "text", + "content": "information. On such benchmarks, LONGVIVIT demonstrates strong performance even without LLM assistance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 220, + 484, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 220, + 484, + 232 + ], + "spans": [ + { + "bbox": [ + 306, + 220, + 484, + 232 + ], + "type": "text", + "content": "6.3. Brief Notes on Video Evaluations" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "type": "text", + "content": "We briefly describe some of our findings on video evaluations. Firstly, we find that blind Bard is able to achieve SoTA results on the full set of EgoSchema (no visual input; " + }, + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "type": "inline_equation", + "content": "33.9\\%" + }, + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "type": "text", + "content": " accuracy vs. " + }, + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "type": "inline_equation", + "content": "32.1\\%" + }, + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "type": "text", + "content": " for the best model in [41]). Adding visual information from PALI into Bard increases performance to just " + }, + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "type": "inline_equation", + "content": "39.2\\%" + }, + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "type": "text", + "content": ". However, on EgoSchema's released subset, performance of blind Bard is " + }, + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "type": "inline_equation", + "content": "27\\%" + }, + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "type": "text", + "content": ", which is much lower than PALI-to-Bard " + }, + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "type": "inline_equation", + "content": "(44.8\\%)" + }, + { + "bbox": [ + 305, + 239, + 545, + 371 + ], + "type": "text", + "content": ", suggesting that the subset contains questions that rely more on visual grounding than pure language reasoning, so we report numbers on the subset in Table 3 and on the full set in Appendix ??" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 372, + 546, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 372, + 546, + 503 + ], + "spans": [ + { + "bbox": [ + 305, + 372, + 546, + 503 + ], + "type": "text", + "content": "Figure 6 details a simple ablation across other video benchmarks to quantify temporal richness. We test removing either video or image data from the training mix and measure the effect on performance (video-to-text Recall@1). We see a dramatic performance drop when removing video data for YouCook2 and VATEX (up to " + }, + { + "bbox": [ + 305, + 372, + 546, + 503 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 305, + 372, + 546, + 503 + ], + "type": "text", + "content": "). ActivityNet and MSRVTT suffer more from the absence of image data, whereas non-video training influences performance in lesser degree (as little as " + }, + { + "bbox": [ + 305, + 372, + 546, + 503 + ], + "type": "inline_equation", + "content": "18\\%" + }, + { + "bbox": [ + 305, + 372, + 546, + 503 + ], + "type": "text", + "content": " for MSR-VTT). We believe there's room for more fine-grained, temporal-focused video-language benchmarks in the community." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 517, + 383, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 517, + 383, + 529 + ], + "spans": [ + { + "bbox": [ + 306, + 517, + 383, + 529 + ], + "type": "text", + "content": "7. Conclusions" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 536, + 545, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 536, + 545, + 702 + ], + "spans": [ + { + "bbox": [ + 305, + 536, + 545, + 702 + ], + "type": "text", + "content": "In short, we systematically analyze memory-efficient methods to scale video-first architectures to longer sequences of frames and demonstrate that just masking high percentages of the video " + }, + { + "bbox": [ + 305, + 536, + 545, + 702 + ], + "type": "inline_equation", + "content": "(\\leq 75\\%)" + }, + { + "bbox": [ + 305, + 536, + 545, + 702 + ], + "type": "text", + "content": " yields competitive results on long video-language tasks. Such masking shows a very small performance drop on short videos, provides 2-3x memory savings and allows scaling up to 4.3 minutes at 1 FPS (LONGVIVIT) when freezing part of the short video network in our two-stage training. LONGVIVIT outperforms modular approaches with LLM assistance on video summarization and QA on benchmarks with richer temporal dependencies (YouCook2, EgoSchema). We overall demonstrate that encoding longer-range visual dependencies can make a difference in downstream performance and corrects mistakes that LLMs are unable to rectify." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 58, + 702, + 253, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 253, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 253, + 712 + ], + "type": "text", + "content": "9We consider captions of key frames per 8 seconds of video." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14393" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "text", + "content": "[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in Neural Information Processing Systems, 35:23716-23736, 2022. 1, 2, 3, 4, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 158, + 288, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 158, + 288, + 212 + ], + "spans": [ + { + "bbox": [ + 53, + 158, + 288, + 212 + ], + "type": "text", + "content": "[2] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. Vivit: A video vision transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6836-6846, 2021. 2, 3, 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 213, + 287, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 213, + 287, + 246 + ], + "spans": [ + { + "bbox": [ + 53, + 213, + 287, + 246 + ], + "type": "text", + "content": "[3] Gedas Bertasius, Heng Wang, and Lorenzo Torresani. Is space-time attention all you need for video understanding? In ICML, page 4, 2021. 2, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 247, + 288, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 247, + 288, + 302 + ], + "spans": [ + { + "bbox": [ + 53, + 247, + 288, + 302 + ], + "type": "text", + "content": "[4] Shyamal Buch, Cristóbal Eyzaguirre, Adrien Gaidon, Jiajun Wu, Li Fei-Fei, and Juan Carlos Niebles. Revisiting the \"video\" in video-language understanding. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2917-2927, 2022. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 302, + 287, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 302, + 287, + 357 + ], + "spans": [ + { + "bbox": [ + 53, + 302, + 287, + 357 + ], + "type": "text", + "content": "[5] Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 961-970, 2015. 2, 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 358, + 287, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 358, + 287, + 402 + ], + "spans": [ + { + "bbox": [ + 53, + 358, + 287, + 402 + ], + "type": "text", + "content": "[6] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 403, + 287, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 403, + 287, + 458 + ], + "spans": [ + { + "bbox": [ + 53, + 403, + 287, + 458 + ], + "type": "text", + "content": "[7] Dongsheng Chen, Chaofan Tao, Lu Hou, Lifeng Shang, Xin Jiang, and Qun Liu. Litevl: Efficient video-language learning with enhanced spatial-temporal modeling. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 7985-7997, 2022. 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 458, + 287, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 458, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 53, + 458, + 287, + 491 + ], + "type": "text", + "content": "[8] Jiao Chen, Aston Zhang, Xingjian Shi, Mu Li, Alex Smola, and Diyi Yang. Parameter-efficient fine-tuning design spaces. arXiv preprint arXiv:2301.01821, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 491, + 287, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 491, + 287, + 546 + ], + "spans": [ + { + "bbox": [ + 53, + 491, + 287, + 546 + ], + "type": "text", + "content": "[9] Xi Chen, Xiao Wang, Lucas Beyer, Alexander Kolesnikov, Jialin Wu, Paul Voigtlaender, Basil Mustafa, Sebastian Goodman, Ibrahim Alabdulmohsin, Piotr Padlewski, et al. Pali-3 vision language models: Smaller, faster, stronger. arXiv preprint arXiv:2310.09199, 2023. 2, 4, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 547, + 288, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 547, + 288, + 602 + ], + "spans": [ + { + "bbox": [ + 48, + 547, + 288, + 602 + ], + "type": "text", + "content": "[10] Feng Cheng, Xizi Wang, Jie Lei, David Crandall, Mohit Bansal, and Gedas Bertasius. Vindlu: A recipe for effective video-and-language pretraining. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10739-10750, 2023. 2, 3, 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 602, + 288, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 288, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 288, + 667 + ], + "type": "text", + "content": "[11] Pradipto Das, Chenliang Xu, Richard F Doell, and Jason J Corso. A thousand frames in just a few words: Linguual description of videos through latent topics and sparse object stitching. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2634-2641, 2013. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 669, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 288, + 714 + ], + "type": "text", + "content": "[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Trans-" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "formers for image recognition at scale. In International Conference on Learning Representations, 2020. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 95, + 547, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 95, + 547, + 149 + ], + "spans": [ + { + "bbox": [ + 308, + 95, + 547, + 149 + ], + "type": "text", + "content": "[13] Danny Driess, Fei Xia, Mehdi SM Sajjadi, Corey Lynch, Aakanksha Chowdhery, Brian Ichter, Ayzaan Wahid, Jonathan Tompson, Quan Vuong, Tianhe Yu, et al. Palm-: An embodied multimodal language model. arXiv preprint arXiv:2303.03378, 2023. 1, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 150, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 150, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 308, + 150, + 545, + 194 + ], + "type": "text", + "content": "[14] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6202-6211, 2019. 2, 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 194, + 545, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 194, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 308, + 194, + 545, + 239 + ], + "type": "text", + "content": "[15] Tsu-Jui Fu, Linjie Li, Zhe Gan, Kevin Lin, William Yang Wang, Lijuan Wang, and Zicheng Liu. Violet: End-to-end video-language transformers with masked visual-token modeling. arXiv preprint arXiv:2111.12681, 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 239, + 545, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 239, + 545, + 304 + ], + "spans": [ + { + "bbox": [ + 308, + 239, + 545, + 304 + ], + "type": "text", + "content": "[16] Tsu-Jui Fu, Linjie Li, Zhe Gan, Kevin Lin, William Yang Wang, Lijuan Wang, and Zicheng Liu. An empirical study of end-to-end video-language transformers with masked visual modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22898-22909, 2023. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 305, + 545, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 305, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 308, + 305, + 545, + 371 + ], + "type": "text", + "content": "[17] Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pages 297–304. JMLR Workshop and Conference Proceedings, 2010. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 371, + 545, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 371, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 308, + 371, + 545, + 403 + ], + "type": "text", + "content": "[18] Tengda Han, Weidi Xie, and Andrew Zisserman. Turbo training with token dropout. arXiv preprint arXiv:2210.04889, 2022. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 404, + 545, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 404, + 545, + 459 + ], + "spans": [ + { + "bbox": [ + 308, + 404, + 545, + 459 + ], + "type": "text", + "content": "[19] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Doll'ar, and Ross B Girshick. Masked autoencoders are scalable vision learners. 2022 IEEE. In CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15979-15988, 2021. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 459, + 545, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 459, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 308, + 459, + 545, + 514 + ], + "type": "text", + "content": "[20] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. Training compute-optimal large language models. arXiv preprint arXiv:2203.15556, 2022. 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 514, + 545, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 514, + 545, + 570 + ], + "spans": [ + { + "bbox": [ + 308, + 514, + 545, + 570 + ], + "type": "text", + "content": "[21] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. 2, 3, 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 570, + 545, + 614 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 570, + 545, + 614 + ], + "spans": [ + { + "bbox": [ + 308, + 570, + 545, + 614 + ], + "type": "text", + "content": "[22] Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. In International Conference on Learning Representations, 2021. 2, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 614, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 614, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 308, + 614, + 545, + 669 + ], + "type": "text", + "content": "[23] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Qiang Liu, et al. Language is not all you need: Aligning perception with language models. arXiv preprint arXiv:2302.14045, 2023. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 669, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 669, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 669, + 545, + 713 + ], + "type": "text", + "content": "[24] Md Mohaiminul Islam and Gedas Bertasius. Long movie clip classification with state-space video models. In European Conference on Computer Vision, pages 87-104. Springer, 2022. 1, 2" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "14394" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 126 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 126 + ], + "type": "text", + "content": "[25] Andrew Jaegle, Felix Gimeno, Andy Brock, Oriol Vinyals, Andrew Zisserman, and Joao Carreira. Perceiver: General perception with iterative attention. In International conference on machine learning, pages 4651-4664. PMLR, 2021. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 128, + 288, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 128, + 288, + 193 + ], + "spans": [ + { + "bbox": [ + 48, + 128, + 288, + 193 + ], + "type": "text", + "content": "[26] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning, pages 4904-4916. PMLR, 2021. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 194, + 288, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 194, + 288, + 226 + ], + "spans": [ + { + "bbox": [ + 48, + 194, + 288, + 226 + ], + "type": "text", + "content": "[27] Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. arXiv preprint arXiv:2001.04451, 2020. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 228, + 288, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 228, + 288, + 271 + ], + "spans": [ + { + "bbox": [ + 48, + 228, + 288, + 271 + ], + "type": "text", + "content": "[28] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017. 2, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 272, + 288, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 272, + 288, + 304 + ], + "spans": [ + { + "bbox": [ + 48, + 272, + 288, + 304 + ], + "type": "text", + "content": "[29] Jie Lei, Tamara L Berg, and Mohit Bansal. Revealing single frame bias for video-and-language learning. arXiv preprint arXiv:2206.03428, 2022. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 304, + 288, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 304, + 288, + 349 + ], + "spans": [ + { + "bbox": [ + 48, + 304, + 288, + 349 + ], + "type": "text", + "content": "[30] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023. 1, 2, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 350, + 288, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 350, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 350, + 288, + 392 + ], + "type": "text", + "content": "[31] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023. 2, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 394, + 288, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 394, + 288, + 448 + ], + "spans": [ + { + "bbox": [ + 48, + 394, + 288, + 448 + ], + "type": "text", + "content": "[32] Linjie Li, Zhe Gan, Kevin Lin, Chung-Ching Lin, Zicheng Liu, Ce Liu, and Lijuan Wang. Lavender: Unifying videolanguage understanding as masked language modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23119-23129, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 449, + 288, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 449, + 288, + 502 + ], + "spans": [ + { + "bbox": [ + 48, + 449, + 288, + 502 + ], + "type": "text", + "content": "[33] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23390-23400, 2023. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 504, + 288, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 504, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 504, + 288, + 557 + ], + "type": "text", + "content": "[34] Kevin Lin, Faisal Ahmed, Linjie Li, Chung-Ching Lin, Ehsan Azarnasab, Zhengyuan Yang, Jianfeng Wang, Lin Liang, Zicheng Liu, Yumao Lu, Ce Liu, and Lijuan Wang. Mm-vid: Advancing video understanding with gpt-4v(ision), 2023. 2, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 559, + 288, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 288, + 613 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 288, + 613 + ], + "type": "text", + "content": "[35] Yuanze Lin, Chen Wei, Huiyu Wang, Alan Yuille, and Cihang Xie. Smaug: Sparse masked autoencoder for efficient video-language pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2459-2469, 2023. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 614, + 288, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 288, + 669 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 288, + 669 + ], + "type": "text", + "content": "[36] Ruyang Liu, Jingjia Huang, Ge Li, Jiashi Feng, Xinglong Wu, and Thomas H Li. Revisiting temporal modeling for clip-based image-to-video knowledge transferring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6555-6564, 2023. 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "type": "text", + "content": "[37] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3202-3211, 2022. 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[38] Huaishao Luo, Lei Ji, Ming Zhong, Yang Chen, Wen Lei, Nan Duan, and Tianrui Li. Clip4clip: An empirical study of clip for end to end video clip retrieval and captioning. Neurocomputing, 508:293-304, 2022. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 118, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 162 + ], + "type": "text", + "content": "[39] Chuofan Ma, Qiushan Guo, Yi Jiang, Ping Luo, Zehuan Yuan, and Xiaojuan Qi. Rethinking resolution in the context of efficient video recognition. Advances in Neural Information Processing Systems, 35:37865-37877, 2022. 2, 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 163, + 545, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 196 + ], + "type": "text", + "content": "[40] Yue Ma, Tianyu Yang, Yin Shan, and Xiu Li. Simvtp: Simple video text pre-training with masked autoencoders. arXiv preprint arXiv:2212.03490, 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 198, + 545, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 198, + 545, + 241 + ], + "spans": [ + { + "bbox": [ + 307, + 198, + 545, + 241 + ], + "type": "text", + "content": "[41] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. arXiv preprint arXiv:2308.09126, 2023. 2, 3, 4, 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 243, + 545, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 243, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 243, + 545, + 308 + ], + "type": "text", + "content": "[42] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2630-2640, 2019. 2, 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 310, + 545, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 310, + 545, + 374 + ], + "spans": [ + { + "bbox": [ + 307, + 310, + 545, + 374 + ], + "type": "text", + "content": "[43] Antoine Miech, Jean-Baptiste Alayrac, Lucas Smaira, Ivan Laptev, Josef Sivic, and Andrew Zisserman. End-to-end learning of visual representations from uncurated instructional videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9879-9889, 2020. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 376, + 545, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 376, + 545, + 442 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 545, + 442 + ], + "type": "text", + "content": "[44] Arsha Nagrani, Paul Hongsuck Seo, Bryan Seybold, Anja Hauth, Santiago Manen, Chen Sun, and Cordelia Schmid. Learning audio-video modalities from image captions. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XIV, pages 407-426. Springer, 2022. 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 444, + 545, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 444, + 545, + 476 + ], + "spans": [ + { + "bbox": [ + 307, + 444, + 545, + 476 + ], + "type": "text", + "content": "[45] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 478, + 545, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 478, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 307, + 478, + 545, + 521 + ], + "type": "text", + "content": "[46] Junting Pan, Ziyi Lin, Xiatian Zhu, Jing Shao, and Hongsheng Li. St-adapter: Parameter-efficient image-to-video transfer learning. Advances in Neural Information Processing Systems, 35:26462-26477, 2022. 2, 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 523, + 545, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 523, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 307, + 523, + 545, + 588 + ], + "type": "text", + "content": "[47] Viorica Pătrăucean, Lucas Smaira, Ankush Gupta, Adrià Recasens Continente, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Joseph Heyward, Mateusz Malinowski, Yi Yang, et al. Perception test: A diagnostic benchmark for multimodal video models. arXiv preprint arXiv:2305.13786, 2023. 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 590, + 545, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 545, + 623 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 545, + 623 + ], + "type": "text", + "content": "[48] Bowen Peng, Jeffrey Quesnelle, Honglu Fan, and Enrico Shippole. Yarn: Efficient context window extension of large language models. arXiv preprint arXiv:2309.00071, 2023. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 624, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 624, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 307, + 624, + 545, + 678 + ], + "type": "text", + "content": "[49] AJ Piergiovanni, Weicheng Kuo, and Anelia Angelova. Rethinking video vits: Sparse video tubes for joint image and video learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2214-2224, 2023. 2, 3, 4, 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "text", + "content": "[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "14395" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 105 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 105 + ], + "type": "text", + "content": "transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 106, + 287, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 106, + 287, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 106, + 287, + 162 + ], + "type": "text", + "content": "[51] Michael S Ryoo, Keerthana Gopalakrishnan, Kumara Kahapatitiya, Ted Xiao, Kanishka Rao, Austin Stone, Yao Lu, Julian Ibarz, and Anurag Arnab. Token turing machines. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19070-19081, 2023. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 162, + 288, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 162, + 288, + 227 + ], + "spans": [ + { + "bbox": [ + 48, + 162, + 288, + 227 + ], + "type": "text", + "content": "[52] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, 2018. 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 228, + 288, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 228, + 288, + 270 + ], + "spans": [ + { + "bbox": [ + 48, + 228, + 288, + 270 + ], + "type": "text", + "content": "[53] Karen Simonyan and Andrew Zisserman. Two-stream convolutional networks for action recognition in videos. Advances in neural information processing systems, 27, 2014. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 272, + 288, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 272, + 288, + 337 + ], + "spans": [ + { + "bbox": [ + 48, + 272, + 288, + 337 + ], + "type": "text", + "content": "[54] Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15638-15650, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 338, + 288, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 338, + 288, + 381 + ], + "spans": [ + { + "bbox": [ + 48, + 338, + 288, + 381 + ], + "type": "text", + "content": "[55] Zhan Tong, Yibing Song, Jue Wang, and Limin Wang. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. In Advances in Neural Information Processing Systems, 2022. 2, 3, 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 383, + 288, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 383, + 288, + 437 + ], + "spans": [ + { + "bbox": [ + 48, + 383, + 288, + 437 + ], + "type": "text", + "content": "[56] Du Tran, Heng Wang, Lorenzo Torresani, Jamie Ray, Yann LeCun, and Manohar Paluri. A closer look at spatiotemporal convolutions for action recognition. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 6450-6459, 2018. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 437, + 288, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 437, + 288, + 481 + ], + "spans": [ + { + "bbox": [ + 48, + 437, + 288, + 481 + ], + "type": "text", + "content": "[57] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 482, + 288, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 482, + 288, + 536 + ], + "spans": [ + { + "bbox": [ + 48, + 482, + 288, + 536 + ], + "type": "text", + "content": "[58] Junke Wang, Dongdong Chen, Zuxuan Wu, Chong Luo, Luowei Zhou, Yucheng Zhao, Yujia Xie, Ce Liu, Yu-Gang Jiang, and Lu Yuan. Omnivl: One foundation model for image-language and video-language tasks. Advances in neural information processing systems, 35:5696-5710, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 537, + 288, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 537, + 288, + 569 + ], + "spans": [ + { + "bbox": [ + 48, + 537, + 288, + 569 + ], + "type": "text", + "content": "[59] Sinong Wang, Belinda Z Li, Madian Khabsa, Han Fang, and Hao Ma. Linformer: Self-attention with linear complexity. arXiv preprint arXiv:2006.04768, 2020. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 570, + 288, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 570, + 288, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 570, + 288, + 624 + ], + "type": "text", + "content": "[60] Xin Wang, Jiawei Wu, Junkun Chen, Lei Li, Yuan-Fang Wang, and William Yang Wang. Vatex: A large-scale, high-quality multilingual dataset for video-and-language research. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4581-4591, 2019. 2, 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 625, + 288, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 288, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 288, + 679 + ], + "type": "text", + "content": "[61] Yi Wang, Kunchang Li, Yizhuo Li, Yinan He, Bingkun Huang, Zhiyu Zhao, Hongjie Zhang, Jilan Xu, Yi Liu, Zun Wang, et al. Internvideo: General video foundation models via generative and discriminative learning. arXiv preprint arXiv:2212.03191, 2022. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 681, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 288, + 714 + ], + "type": "text", + "content": "[62] Zhenhailong Wang, Manling Li, Ruochen Xu, Luowei Zhou, Jie Lei, Xudong Lin, Shuohang Wang, Ziyi Yang, Chenguang Zhu, Derek Hoiem, et al. Language models with" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "text", + "content": "image descriptors are strong few-shot video-language learners. Advances in Neural Information Processing Systems, 35: 8483-8497, 2022. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "text", + "content": "[63] Chao-Yuan Wu and Philipp Krahenbuhl. Towards long-form video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1884-1894, 2021. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 152, + 547, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 547, + 219 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 547, + 219 + ], + "type": "text", + "content": "[64] Chao-Yuan Wu, Yanghao Li, Karttikeya Mangalam, Haoqi Fan, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Memvit: Memory-augmented multiscale vision transformer for efficient long-term video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13587-13597, 2022. 1, 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 220, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 274 + ], + "type": "text", + "content": "[65] Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3733-3742, 2018. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 276, + 547, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 276, + 547, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 276, + 547, + 342 + ], + "type": "text", + "content": "[66] Hu Xu, Gargi Ghosh, Po-Yao Huang, Dmytro Okhonko, Armen Aghajanyan, Florian Metze, Luke Zettlemoyer, and Christoph Feichtenhofer. Videoclip: Contrastive pre-training for zero-shot video-text understanding. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 6787-6800, 2021. 2, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 343, + 545, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 343, + 545, + 388 + ], + "spans": [ + { + "bbox": [ + 307, + 343, + 545, + 388 + ], + "type": "text", + "content": "[67] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5288-5296, 2016. 2, 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 388, + 545, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 454 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 454 + ], + "type": "text", + "content": "[68] Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5036-5045, 2022. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 456, + 545, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 545, + 510 + ], + "type": "text", + "content": "[69] Kashu Yamazaki, Khoa Vo, Quang Sang Truong, Bhiksha Raj, and Ngan Le. Vlint: visual-linguistic transformer-in-transformer for coherent video paragraph captioning. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 3081–3090, 2023. 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 512, + 545, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 512, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 307, + 512, + 545, + 567 + ], + "type": "text", + "content": "[70] Shen Yan, Xuehan Xiong, Anurag Arnab, Zhichao Lu, Mi Zhang, Chen Sun, and Cordelia Schmid. Multiview transformers for video recognition. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3333-3343, 2022. 2, 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 568, + 545, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 568, + 545, + 612 + ], + "spans": [ + { + "bbox": [ + 307, + 568, + 545, + 612 + ], + "type": "text", + "content": "[71] Shen Yan, Tao Zhu, Zirui Wang, Yuan Cao, Mi Zhang, Soham Ghosh, Yonghui Wu, and Jiahui Yu. Video-text modeling with zero-shot transfer from contrastive captioners. arXiv preprint arXiv:2212.04979, 2022. 1, 2, 3, 4, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 613, + 545, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 613, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 307, + 613, + 545, + 657 + ], + "type": "text", + "content": "[72] Antoine Yang, Antoine Miech, Josef Sivic, Ivan Laptev, and Cordelia Schmid. Zero-shot video question answering via frozen bidirectional language models. Advances in Neural Information Processing Systems, 35:124-141, 2022. 1, 2, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 658, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 713 + ], + "type": "text", + "content": "[73] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 1, 2, 3" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "14396" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 508 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "text", + "content": "[74] Shoubin Yu, Jaemin Cho, Prateek Yadav, and Mohit Bansal. Self-chained image-language model for video localization and question answering. arXiv preprint arXiv:2305.06988, 2023. 2, 3, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 287, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 287, + 172 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 287, + 172 + ], + "type": "text", + "content": "[75] Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, et al. Florence: A new foundation model for computer vision. arXiv preprint arXiv:2111.11432, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 174, + 287, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 174, + 287, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 174, + 287, + 228 + ], + "type": "text", + "content": "[76] Rowan Zellers, Ximing Lu, Jack Hessel, Youngjae Yu, Jae Sung Park, Jize Cao, Ali Farhadi, and Yejin Choi. Merlot: Multimodal neural script knowledge models. Advances in Neural Information Processing Systems, 34:23634-23651, 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 230, + 287, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 230, + 287, + 295 + ], + "spans": [ + { + "bbox": [ + 48, + 230, + 287, + 295 + ], + "type": "text", + "content": "[77] Andy Zeng, Maria Attarian, Krzysztof Marcin Choromanski, Adrian Wong, Stefan Welker, Federico Tombari, Aveek Purohit, Michael S Ryoo, Vikas Sindhwani, Johnny Lee, et al. Socratic models: Composing zero-shot multimodal reasoning with language. In The Eleventh International Conference on Learning Representations, 2022. 2, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 297, + 287, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 297, + 287, + 340 + ], + "spans": [ + { + "bbox": [ + 48, + 297, + 287, + 340 + ], + "type": "text", + "content": "[78] Bowen Zhang, Xiaojie Jin, Weibo Gong, Kai Xu, Zhao Zhang, Peng Wang, Xiaohui Shen, and Jiashi Feng. Multimodal video adapter for parameter efficient video text retrieval. arXiv preprint arXiv:2301.07868, 2023. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 342, + 287, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 342, + 287, + 385 + ], + "spans": [ + { + "bbox": [ + 48, + 342, + 287, + 385 + ], + "type": "text", + "content": "[79] Qingru Zhang, Minshuo Chen, Alexander Bukharin, Pengcheng He, Yu Cheng, Weizhu Chen, and Tuo Zhao. Adaptive budget allocation for parameter-efficient finetuning. arXiv preprint arXiv:2303.10512, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 386, + 287, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 386, + 287, + 419 + ], + "spans": [ + { + "bbox": [ + 48, + 386, + 287, + 419 + ], + "type": "text", + "content": "[80] Zhang Zhang and Dacheng Tao. Slow feature analysis for human action recognition. IEEE transactions on pattern analysis and machine intelligence, 34(3):436-450, 2012. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 420, + 287, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 420, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 48, + 420, + 287, + 464 + ], + "type": "text", + "content": "[81] Luowei Zhou, Chenliang Xu, and Jason Corso. Towards automatic learning of procedures from web instructional videos. In Proceedings of the AAAI Conference on Artificial Intelligence, 2018. 2, 3, 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 465, + 287, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 465, + 287, + 508 + ], + "spans": [ + { + "bbox": [ + 48, + 465, + 287, + 508 + ], + "type": "text", + "content": "[82] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 2" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14397" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/b9db7707-a86d-4d4c-b962-58bd8f08eecd_content_list.json b/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/b9db7707-a86d-4d4c-b962-58bd8f08eecd_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7848e4d251b45a891674f0801a71d526babd6319 --- /dev/null +++ b/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/b9db7707-a86d-4d4c-b962-58bd8f08eecd_content_list.json @@ -0,0 +1,1702 @@ +[ + { + "type": "text", + "text": "A Simple Recipe for Language-guided Domain Generalized Segmentation", + "text_level": 1, + "bbox": [ + 112, + 130, + 854, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mohammad Fahes1", + "bbox": [ + 91, + 178, + 250, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tuan-Hung $\\mathrm{V_u^{1,2}}$", + "bbox": [ + 271, + 179, + 408, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Inria", + "bbox": [ + 323, + 198, + 377, + 214 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Andrei Bursuc $^{1,2}$", + "bbox": [ + 431, + 180, + 566, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2 Valeo.ai", + "bbox": [ + 436, + 198, + 516, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Patrick Pérez3", + "bbox": [ + 583, + 180, + 702, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3 Kyutai", + "bbox": [ + 576, + 198, + 643, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Raoul de Charette1", + "bbox": [ + 723, + 180, + 872, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://astravision.github.io/FAMix", + "bbox": [ + 272, + 227, + 700, + 243 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 266, + 313, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Generalization to new domains not seen during training is one of the long-standing challenges in deploying neural networks in real-world applications. Existing generalization techniques either necessitate external images for augmentation, and/or aim at learning invariant representations by imposing various alignment constraints. Large-scale pretraining has recently shown promising generalization capabilities, along with the potential of binding different modalities. For instance, the advent of vision-language models like CLIP has opened the doorway for vision models to exploit the textual modality. In this paper, we introduce a simple framework for generalizing semantic segmentation networks by employing language as the source of randomization. Our recipe comprises three key ingredients: (i) the preservation of the intrinsic CLIP robustness through minimal fine-tuning, (ii) language-driven local style augmentation, and (iii) randomization by locally mixing the source and augmented styles during training. Extensive experiments report state-of-the-art results on various generalization benchmarks. Code is accessible on the project page1.", + "bbox": [ + 75, + 299, + 473, + 602 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 648, + 209, + 664 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A prominent challenge associated with deep neural networks is their constrained capacity to generalize when confronted with shifts in data distribution. This limitation is rooted in the assumption of data being independent and identically distributed, a presumption that frequently proves unrealistic in real-world scenarios. For instance, in safety-critical applications like autonomous driving, it is imperative for a segmentation model to exhibit resilient generalization capabilities when dealing with alterations in lighting, variations in weather conditions, and shifts in geographic location, among other considerations.", + "bbox": [ + 75, + 675, + 468, + 840 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address this challenge, domain adaptation [13, 18, 33, 34, 47, 48] has emerged; its core principle revolves around", + "bbox": [ + 76, + 842, + 468, + 873 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "aligning the distributions of both the source and target domains. However, DA hinges on having access to target data, which may not always be available. Even when accessible, this data might not encompass the full spectrum of distributions encountered in diverse real-world scenarios. Domain generalization [31, 32, 49, 52, 62, 63] overcomes this limitation by enhancing the robustness of models to arbitrary and previously unseen domains.", + "bbox": [ + 500, + 267, + 890, + 388 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The training of segmentation networks is often backed by large-scale pretraining as initialization for the feature representation. Until now, to the best of our knowledge, domain generalization for semantic segmentation (DGSS) networks [7, 19, 23, 24, 29, 36, 37, 51, 53, 58] are pretrained with ImageNet [9]. The underlying concept is to transfer the representations from the upstream task of classification to the downstream task of segmentation.", + "bbox": [ + 496, + 391, + 892, + 513 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lately, contrastive language image pretraining (CLIP) [22, 39, 55, 56] has demonstrated that transferable visual representations could be learned from the sole supervision of loose natural language descriptions at very large scale. Subsequently, a plethora of applications have been proposed using CLIP [39], including zero-shot semantic segmentation [30, 59], image editing [27], transfer learning [10, 40], open-vocabulary object detection [16], few-shot learning [64, 65] etc. A recent line of research proposes fine-tuning techniques to preserve the robustness of CLIP under distribution shift [15, 26, 45, 50], but they are limited to classification.", + "bbox": [ + 496, + 515, + 893, + 696 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we aim at answering the following question: How to leverage CLIP pretraining for enhanced domain generalization for semantic segmentation? The motivation for rethinking DGSS with CLIP is twofold. On one hand, distribution robustness is a notable characteristic of CLIP [12]. On the other hand, the language modality offers an extra source of information compared to unimodal pretrained models.", + "bbox": [ + 496, + 700, + 893, + 821 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A direct comparison of training two segmentation models under identical conditions but with different pretraining, i.e. ImageNet vs. CLIP, shows that CLIP pretraining does not yield promising results. Indeed, Tab. 1 shows that fine-tuning CLIP-initialized network performs worse than", + "bbox": [ + 496, + 825, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1https://astra-vision.github.io/FAMix", + "bbox": [ + 94, + 886, + 387, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "23428", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/b4e04f344950334779b3d7b3db056ea2afe6a5fef4790b8ab892f14fcd288078.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PretrainingCBMSANASARAFMean
ImageNet29.0432.1734.2629.874.3622.3828.3426.7625.90
CLIP16.8116.3117.8027.102.958.5814.3513.6114.69
", + "bbox": [ + 81, + 89, + 467, + 137 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table 1. Comparison of ImageNet and CLIP pretraining for out-of-distribution semantic segmentation. The network is DeepLabv3+ with ResNet-50 as backbone. The models are trained on GTAV and the performance (mIoU %) is reported on Cityscapes (C), BDD-100K (B), Mapillary (M), Synthia (S), and ACDC Night (AN), Snow (AS), Rain (AR) and Fog (AF).", + "bbox": [ + 76, + 146, + 468, + 229 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "its ImageNet counterpart on out-of-distribution (OOD) data. This raises doubts about the suitability of CLIP pretraining for DGSS and indicates that it is more prone to overfitting the source distribution at the expense of degrading its original distributional robustness properties. Note that both models converge and achieve similar results on in-domain data. More details are provided in Appendix A.", + "bbox": [ + 76, + 255, + 468, + 361 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This paper shows that we can prevent such behavior with a simple recipe involving minimal fine-tuning, language-driven style augmentation, and mixing. Our approach is coined FAMix, for Freeze, Augment and Mix.", + "bbox": [ + 76, + 366, + 468, + 425 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "It was recently argued that fine-tuning might distort the pretrained representations and negatively affect OOD generalization [26]. To maintain the integrity of the representation, one extreme approach is to entirely freeze the backbone. However, this can undermine representation adaptability and lead to subpar OOD generalization. As a middle-ground strategy balancing adaptation and feature preservation, we suggest minimal fine-tuning of the backbone, where a substantial portion remains frozen, and only the final layers undergo fine-tuning.", + "bbox": [ + 75, + 431, + 468, + 582 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "For generalization, we show that rethinking MixStyle [62] leads to significant performance gains. As illustrated in Fig. 1, we mix the statistics of the original source features with augmented statistics mined using language. This helps explore styles beyond the source distribution at training time without using additional image.", + "bbox": [ + 76, + 587, + 468, + 676 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We summarize our contributions as follows:", + "bbox": [ + 96, + 681, + 387, + 695 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a simple framework for DGSS based on minimal fine-tuning of the backbone and language-driven style augmentation. To the best of our knowledge, we are the first to study DGSS with CLIP pretraining.", + "- We propose language-driven class-wise local style augmentation. We mine class-specific local statistics using prompts that express random styles and names of patchwise dominant classes. During training, randomization is performed through patch-wise style mixing of the source and mined styles.", + "- We conduct careful ablations to show the effectiveness of FAMix. Our framework outperforms state-of-the-art approaches in single and multi-source DGSS settings." + ], + "bbox": [ + 76, + 702, + 467, + 897 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/b9af8f4cf04702bc00671f6450080cc0d1804d090acfc233174df61ecbd8914a.jpg", + "image_caption": [ + "Figure 1. Mixing strategies. (Left) MixStyle [62] consists of a linear mixing between the feature statistics of the source domain(s) S samples. (Right) We apply an augmentation $\\mathcal{A}(.)$ on the source domain statistics, then perform linear mixing between original and augmented statistics. Intuitively, this enlarges the support of the training distribution by leveraging statistics beyond the source domain(s), as well as discovering intermediate domains. $\\mathcal{A}(.)$ could be a language-driven or Gaussian noise augmentation, and we show that the former leads to better generalization results." + ], + "image_footnote": [], + "bbox": [ + 514, + 88, + 880, + 186 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related works", + "text_level": 1, + "bbox": [ + 500, + 362, + 643, + 376 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Domain generalization (DG). The goal of DG is to train, from a single or multiple source domains, models that perform well under arbitrary domain shifts. The DG literature spans a broad range of approaches, including adversarial learning [32, 57], meta-learning [4, 38], data augmentation [60-62] and domain-invariant representation learning [1, 3, 7, 25]. We refer the reader to [49, 63] for comprehensive surveys on DG.", + "bbox": [ + 498, + 388, + 890, + 508 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Domain generalization with CLIP. CLIP [39] exhibits a remarkable distributional robustness [12]. Nevertheless, fine-tuning comes at the expense of sacrificing generalization. Kumar et al. [26] observe that full fine-tuning can distort the pretrained representation, and propose a two-stage strategy, consisting of training a linear probe with a frozen feature extractor, then fine-tuning both. Wortman et al. [50] propose assembling the weights of zero-shot and fine-tuned models. Goyal et al. [15] show that preserving the pretraining paradigm (i.e. contrastive learning) during the adaptation to the downstream task improves both in-domain (ID) and OOD performance without multi-step fine-tuning or weight assembling. CLIPood [45] introduces margin metric softmax training objective and Beta moving average for optimization to handle both open-class and open-domain at test time. On the other hand, distributional robustness could be improved by training a small amount of parameters on top of a frozen CLIP backbone in a teacher-student manner [21, 28]. Other works show that specialized prompt assembling and/or image assembling strategies [2, 14] coupled with label augmentation using the WordNet hierarchy improve robustness in classification.", + "bbox": [ + 496, + 516, + 890, + 848 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Domain Generalized Semantic Segmentation. DGSS methods could be categorized into three main groups: normalization methods, domain randomization (DR) and in", + "bbox": [ + 498, + 854, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "23429", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6dca2118158c16ccc62ceca3ae190802f7c87077359af3f3e1fee053af4e6ef2.jpg", + "image_caption": [ + "Figure 2. Overall process of FAMix. FAMix consists of two steps. (Left) Local style mining consists of dividing the low-level feature activations into patches, which are used for style mining using Prompt-driven Instance Normalization (PIN) [10]. Specifically, for each patch, the dominant class is queried from the ground truth, and the mined style is added to corresponding class-specific style bank. (Right) Training the segmentation network is performed with minimal fine-tuning of the backbone. At each iteration, the low-level feature activations are viewed as grids of patches. For each patch, the dominant class is queried using the ground truth, then a style is sampled from the corresponding style bank. Style randomization is performed by normalizing each patch in the grid by its statistics, and transferring the new style which is a mixing between the original style and the sampled one. The network is trained using only a cross-entropy loss." + ], + "image_footnote": [], + "bbox": [ + 98, + 89, + 867, + 265 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "variant representation learning. Normalization methods aim at removing style contribution from the representation. For instance, IBN-Net [36] shows that Instance Normalization (IN) makes the representation invariant to variations in the scene appearance (e.g., change of colors, illumination, etc.), and that combining IN and batch normalization (BN) helps the synthetic-to-real generalization. SAN & SAW [37] proposes semantic-aware feature normalization and whitening, while RobustNet [7] proposes an instance selective whitening loss, where only feature covariances that are sensitive to photometric transformations are whitened. DR aims instead at diversifying the data during training. Some methods use additional data for DR. For example, WildNet [29] uses ImageNet [9] data for content and style extension learning, while TLDR [24] proposes learning texture from random style images. Other methods like SiamDoGe [51] perform DR solely by data augmentation, using a Siamese [6] structure. Finally in the invariant representation learning group, SPC-Net [19] builds a representation space based on style and semantic projection and clustering, and SHADE [58] regularizes the training with a style consistency loss and a retrospection consistency loss.", + "bbox": [ + 75, + 398, + 472, + 733 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 752, + 166, + 767 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "FAMix proposes an effective recipe for DGSS through the blending of simple ingredients. It consists of two stages (see Fig. 2): (i) Local style mining from language (Sec. 3.2); (ii) Training of a segmentation network with minimal fin-tuning and local style mixing (Sec. 3.3). In Fig. 2 and in the following, CLIP-I1 denotes the stem layers and Layer1 of CLIP image encoder, CLIP-I2 the remaining layers excluding the attention pooling, and CLIP-T the text encoder.", + "bbox": [ + 75, + 779, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We start with some preliminary background knowledge, introducing AdaIN and PIN which are essential to our work.", + "bbox": [ + 498, + 398, + 888, + 429 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminaries", + "text_level": 1, + "bbox": [ + 500, + 438, + 638, + 453 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Adaptive Instance Normalization (AdaIN). For a feature map $\\mathbf{f} \\in \\mathbb{R}^{h \\times w \\times c}$ , AdaIN [20] shows that the channel-wise mean $\\boldsymbol{\\mu} \\in \\mathbb{R}^c$ and standard deviation $\\sigma \\in \\mathbb{R}^c$ capture information about the style of the input image, allowing style transfer between images. Hence, stylizing a source feature $\\mathbf{f}_s$ with an arbitrary target style $(\\mu(\\mathbf{f}_t), \\sigma(\\mathbf{f}_t))$ reads:", + "bbox": [ + 498, + 462, + 890, + 553 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {A d a I N} \\left(\\mathbf {f} _ {\\mathrm {s}}, \\mathbf {f} _ {\\mathrm {t}}\\right) = \\sigma \\left(\\mathbf {f} _ {\\mathrm {t}}\\right) \\left(\\frac {\\mathbf {f} _ {\\mathrm {s}} - \\mu \\left(\\mathbf {f} _ {\\mathrm {s}}\\right)}{\\sigma \\left(\\mathbf {f} _ {\\mathrm {s}}\\right)}\\right) + \\mu \\left(\\mathbf {f} _ {\\mathrm {t}}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 557, + 559, + 890, + 589 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "with $\\mu (\\cdot)$ and $\\sigma (\\cdot)$ the mean and standard deviation of input feature; multiplications and additions being element-wise.", + "bbox": [ + 498, + 597, + 888, + 627 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Prompt-driven Instance Normalization (PIN). PIN was introduced for prompt-driven zero-shot domain adaptation in PØDA [10]. It replaces the target style $(\\mu(\\mathbf{f}_{\\mathrm{t}}), \\sigma(\\mathbf{f}_{\\mathrm{t}}))$ in AdaIN (1) with two estimizable variables $(\\mu, \\sigma)$ guided by a single prompt in natural language. The rationale is to leverage a frozen CLIP [39] to mine visual styles from the prompt representation in the shared space. Given a prompt $P$ and a feature map $\\mathbf{f}_{\\mathrm{s}}$ , PIN reads as:", + "bbox": [ + 498, + 630, + 890, + 751 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {P I N} _ {(P)} \\left(\\mathbf {f} _ {\\mathrm {s}}\\right) = \\sigma \\left(\\frac {\\mathbf {f} _ {\\mathrm {s}} - \\mu \\left(\\mathbf {f} _ {\\mathrm {s}}\\right)}{\\sigma \\left(\\mathbf {f} _ {\\mathrm {s}}\\right)}\\right) + \\boldsymbol {\\mu}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 758, + 890, + 789 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mu$ and $\\sigma$ are optimized using gradient descent, such that the cosine distance between the visual feature representation and the prompt representation is minimized.", + "bbox": [ + 496, + 795, + 890, + 839 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Different from PØDA which mines styles globally with a predetermined prompt describing the target domain, we make use of PIN to mine class-specific styles using local patches of the features, leveraging random style prompts.", + "bbox": [ + 496, + 839, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "23430", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Further, we show the effectiveness of incorporating the class name in the prompt for better style mining.", + "bbox": [ + 76, + 90, + 470, + 121 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Local Style Mining", + "text_level": 1, + "bbox": [ + 76, + 128, + 261, + 147 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our approach is to leverage PIN to mine class-specific style banks that used for feature augmentation when training FAMix. Given a set of cropped images $\\mathcal{I}_{\\mathrm{s}}$ , we encode them using CLIP-I1 to get a set of low-level features $\\mathcal{F}_{\\mathrm{s}}$ . Each batch $b$ of features $\\mathbf{f}_{\\mathrm{s}} \\in \\mathcal{F}_{\\mathrm{s}}$ is cropped into $m$ patches, resulting in $b \\times m$ patches $\\mathbf{f}_p$ , associated ground-truth annotation $\\mathbf{y}_p$ , of size $h / \\sqrt{m} \\times w / \\sqrt{m} \\times c$ .", + "bbox": [ + 75, + 152, + 468, + 258 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We aim at populating $K$ style banks, $K$ being the total number of classes. For a feature patch $\\mathbf{f}_p$ , we compute the dominant class from the corresponding label patch $\\mathbf{y}_p$ , and get its name $t_p$ from the predefined classes in the training dataset. Given a set of prompts describing random styles $\\mathcal{R}$ , the target prompt $P_p$ is formed by concatenating a randomly sampled style prompt $r$ from $\\mathcal{R}$ and $t_p$ (e.g., retro futurism style building). We show in the experiments (Sec. 4.4) that our method is not very sensitive to the prompt design, yet our prompt construction works best.", + "bbox": [ + 75, + 258, + 470, + 410 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The idea is to mine proxy domains and explore intermediate ones in a class-aware manner (as detailed in Sec. 3.3), which makes our work fundamentally different from [10], that steers features towards a particular target style and corresponding domain, and better suited to generalization.", + "bbox": [ + 75, + 410, + 468, + 484 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To handle the class imbalance problem, we simply select one feature patch $\\mathbf{f}_p$ per class among the total $b\\times m$ patches, as shown in Fig. 2. Consequently, we apply PIN (2) to optimize the local styles to match the representations of their corresponding prompts, and use the mined styles to populate the corresponding style banks. The complete procedure is outlined in Algorithm 1.", + "bbox": [ + 75, + 484, + 470, + 590 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The resulting style banks $\\{\\mathcal{T}^{(1)},\\dots ,\\mathcal{T}^{(K)}\\}$ are used for domain randomization during training.", + "bbox": [ + 76, + 590, + 470, + 622 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Training FAMix", + "text_level": 1, + "bbox": [ + 76, + 630, + 238, + 647 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Style randomization. During training, randomly cropped images $\\mathcal{I}_{\\mathrm{s}}$ are encoded into $\\mathbf{f}_{\\mathrm{s}}$ using CLIP-11. Each batch of feature maps $\\mathbf{f}_{\\mathrm{s}}$ is viewed as a grid of $m$ patches, without cropping them. For each patch $\\mathbf{f}_{\\mathrm{s}}^{(ij)}$ within the grid, the dominant class $c_{p}^{(ij)}$ is queried using the corresponding ground truth patch $\\mathbf{y}_{\\mathrm{s}}^{(ij)}$ , and a style is randomly sampled from the corresponding mined bank $\\mathcal{T}(c_p^{(ij)})$ . We then apply patch-wise convex combination (i.e., style mixing) of the original style of the patch and the mined style. Specifically, for an arbitrary patch $\\mathbf{f}_{\\mathrm{s}}^{(ij)}$ , our local style mixing reads:", + "bbox": [ + 75, + 652, + 468, + 820 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mu_ {m i x} \\leftarrow (1 - \\alpha) \\mu \\left(\\mathbf {f} _ {\\mathrm {s}} ^ {(i j)}\\right) + \\alpha \\boldsymbol {\\mu} ^ {(i j)} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 160, + 830, + 468, + 849 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma_ {m i x} \\leftarrow (1 - \\alpha) \\sigma \\left(\\mathbf {f} _ {\\mathrm {s}} ^ {(i j)}\\right) + \\alpha \\boldsymbol {\\sigma} ^ {(i j)}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 852, + 468, + 871 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "with $(\\pmb{\\mu}^{(ij)},\\pmb{\\sigma}^{(ij)})\\in \\mathcal{T}^{(c_p^{(ij)})}$ and $\\alpha \\in [0,1]^c$", + "bbox": [ + 76, + 881, + 377, + 902 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: Local Style Mining." + ], + "code_body": "Input: Set $\\mathcal{F}_{\\mathrm{s}}$ of source features batches. Label set $\\mathcal{V}_{\\mathrm{s}}$ in $\\mathcal{D}_{s}$ Set of random prompts $\\mathcal{R}$ and class names $\\mathcal{C}$ \nParam: Number of patches $m$ Number of classes $K$ Output: $K$ sets $\\{\\mathcal{T}^{(1)},\\dots ,\\mathcal{T}^{(K)}\\}$ of class-wise augmented statistics. \n1 $\\{\\mathcal{T}^{(1)},\\dots ,\\mathcal{T}^{(K)}\\} \\gets \\emptyset$ \n2 foreach $(\\mathbf{f}_s\\in \\mathcal{F}_s,\\mathbf{y}_s\\in \\mathcal{Y}_s)$ do \n3 $\\{\\mathbf{y}_p\\} \\leftarrow$ crop-patch(y,s,m) \n4 $\\{c_p\\} ,\\{P_p\\} ,\\{f_p\\} \\leftarrow \\emptyset$ \n5 foreach $\\mathbf{y}_p\\in \\{\\mathbf{y}_p\\}$ do \n6 $c_{p}\\gets$ get-dominant-class(yp) if $c_{p}$ not in $\\{c_p\\}$ then \n8 $\\{c_p\\} \\leftarrow c_p$ \n9 $\\{P_p\\} \\leftarrow$ concat(sample(R),get-name(cp)) \n10 $\\{f_p\\} \\leftarrow f_p$ \n11 end \n12 end \n13 $\\mu^{(c_p)},\\sigma^{(c_p)},\\mathbf{f}_p'\\gets \\mathrm{PIN}_{(P_p)}(\\mathbf{f}_p)$ \n14 $\\mathcal{T}^{(c_p)}\\gets \\mathcal{T}^{(c_p)}\\cup \\{(\\boldsymbol{\\mu}^{(c_p)},\\boldsymbol{\\sigma}^{(c_p)})\\}$ \n15 end", + "bbox": [ + 503, + 111, + 898, + 426 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2: Training FAMix." + ], + "code_body": "Input: Set $\\mathcal{F}_{\\mathrm{s}}$ of source features batches. Label set $\\mathcal{V}_{\\mathrm{s}}$ in $\\mathcal{D}_{\\mathrm{s}}$ $K$ sets $\\{\\mathcal{T}^{(1)},\\dots ,\\mathcal{T}^{(K)}\\}$ of class-wise augmented statistics. \nParam: Number of patches m. \nforeach $(\\mathbf{f}_s\\in \\mathcal{F}_s,\\mathbf{y}_s\\in \\mathcal{Y}_s)$ do \n $\\alpha \\sim \\mathrm{Beta}(0.1,0.1)$ \nfor $(i,j)\\in [1,\\sqrt{m} ]\\times [1,\\sqrt{m} ]$ do $c_{p}^{(ij)}\\gets \\mathrm{get - dominant - class}(\\mathbf{y}_{s}^{(ij)})$ $\\mu^{(ij)},\\sigma^{(ij)}\\gets \\mathrm{sample}(\\mathcal{T}^{(c_p^{(ij)})})$ $\\mu_{mix}\\gets (1 - \\alpha).\\mu (\\mathbf{f}_s^{(ij)}) + \\alpha .\\mu^{(ij)}$ $\\sigma_{mix}\\gets (1 - \\alpha).\\sigma (\\mathbf{f}_s^{(ij)}) + \\alpha .\\sigma^{(ij)}$ $\\mathbf{f}_{\\mathrm{s}}^{(ij)}\\gets \\mathrm{AdaIN}(\\mathbf{f}_{\\mathrm{s}}^{(ij)},\\mu_{mix},\\sigma_{mix})$ \nend \n $\\tilde{\\mathbf{y}}_{\\mathrm{s}}\\gets \\mathrm{CLIP - I2}(\\mathbf{f}_{\\mathrm{s}})$ \nLoss $=$ cross-entropy $(\\tilde{\\mathbf{y}}_{s},\\mathbf{y}_{s})$ \nend", + "bbox": [ + 501, + 472, + 898, + 729 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Fig. 1, our style mixing strategy differs from [62] which applies a linear interpolation between styles extracted from the images of a limited set of source domain(s) assumed to be available for training. Here, we view the mined styles as variations of multiple proxy target domains defined by the prompts. Training is conducted over all the paths in the feature space between the source and proxy domains without requiring any additional image during training other than the one from source.", + "bbox": [ + 496, + 763, + 893, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "23431", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/549c3483b09c37d57964f807531ef4d42d53cc49031eea6252f012ebb3b26a6c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Methodarch.CBMSANASARAFMean
RobustNet [7]36.5835.2040.3328.306.3229.9733.0232.5630.29
SAN & SAW [37]39.7537.3441.8630.79-----
Pin the memory [23]41.0034.6037.4027.083.845.515.897.2720.32
SHADE [58]44.6539.2843.3428.418.1830.3835.4436.8733.32
SiamDoGe [51]42.9637.5440.6428.3410.6030.7135.8436.4532.89
DPCL [53]RN5044.8740.2146.74------
SPC-Net [19]44.1040.4645.51------
NP [11]40.6235.5638.9227.65-----
WildNet* [29]44.6238.4246.0931.348.2730.2936.3235.3933.84
TLDR* [24]46.5142.5846.1830.5713.1336.0238.8940.5836.81
FAMix (ours)48.1545.6152.1134.2314.9637.0938.6640.2538.88
SAN & SAW [37]45.3341.1840.7731.84-----
\\( SHADE^† \\)[58]46.6643.6645.5031.587.5832.4836.9036.6935.13
WildNet* [29]RN10145.7941.7347.0832.51-----
TLDR* [24]47.5844.8848.8033.14-----
FAMix (ours)49.4746.4051.9736.7219.8941.3840.9142.1541.11
", + "bbox": [ + 78, + 89, + 470, + 277 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2. Single-source DGSS trained on G. Performance (mIoU %) of FAMix compared to other DGSS methods trained on G and evaluated on C, S, M, S, A for ResNet-50 ('RN50') and ResNet-101 ('RN101') backbone architecture ('arch'). * indicates the use of extra-data. † indicates the use of the full data for training. We emphasize best and second best results.", + "bbox": [ + 75, + 285, + 468, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Style transfer is applied through AdaIN (1). Only the standard cross-entropy loss between the ground truth $\\mathbf{y}_{\\mathrm{s}}$ and the prediction $\\tilde{\\mathbf{y}}_{\\mathrm{s}}$ is applied for training the network. Algorithm 2 shows the training steps of FAMix.", + "bbox": [ + 75, + 386, + 468, + 446 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Minimal fine-tuning. During training, we fine-tune only the last few layers of the backbone. Subsequently, we examine various alternatives and show that the minimal extent of fine-tuning is the crucial factor in witnessing the effectiveness of our local style mixing strategy.", + "bbox": [ + 75, + 452, + 468, + 527 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Previous works [11, 36, 62] suggest that shallow feature statistics capture style information while deeper features encode semantic content. Consequently, some DGSS methods focus on learning style-agnostic representations [7, 36, 37], but this can compromise the expressiveness of the representation and suppress content information. In contrast, our intuition is to retain these identified traits by introducing variability to the shallow features through augmentation and mixing. Simultaneously, we guide the network to learn invariant high-level representations by training the final layers of the backbone with a label-preserving assumption, using a standard cross-entropy loss.", + "bbox": [ + 75, + 527, + 470, + 710 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 724, + 209, + 742 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental setup", + "text_level": 1, + "bbox": [ + 76, + 750, + 264, + 767 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Synthetic datasets. GTAV [41] and SYNTHIA [42] are used as synthetic datasets. GTAV consists of 24966 images split into 12403 images for training, 6382 for validation and 6181 for testing. SYNTHIA consists of 9400 images: 6580 for training and 2820 for validation. GTAV and SYNTHIA are denoted by $\\mathsf{G}$ and $\\mathsf{S}$ , respectively.", + "bbox": [ + 75, + 773, + 468, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Real datasets. Cityscapes [8], BDD-100K [54], and Mapillary [35] contain 2975, 7000, and 18000 images for train-", + "bbox": [ + 75, + 869, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ing and 500, 1000, and 2000 images for validation, respectively. ACDC [44] is a dataset of driving scenes in adverse conditions: night, snow, rain and fog with respectively 106, 100, 100 and 100 images in the validation sets. C, B, and M denote Cityscapes, BDD-100K and Mapillary, respectively; AN, AS, AR and AF denote night, snow, rain and fog subsets of ACDC, respectively.", + "bbox": [ + 496, + 90, + 890, + 196 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation details. Following previous works [7, 19, 23, 24, 29, 37, 51, 53, 58], we adopt DeepLabv $3+$ [5] as segmentation model. ResNet-50 and ResNet-101 [17], initialized with CLIP pretrained weights, are used in our experiments as backbones. Specifically, we remove the attention pooling layer and add a randomly initialized decoder head. The output stride is 16. Single-source and multisource models are trained respectively for $40K$ and $60K$ iterations with a batch size of 8. The training images are cropped to $768 \\times 768$ . Stochastic Gradient Descent (SGD) with a momentum of 0.9 and weight decay of $10^{-4}$ is used as optimizer. Polynomial decay with a power of 0.9 is used, with an initial learning rate of $10^{-1}$ for the classifier and $10^{-2}$ for the backbone. We use color jittering and horizontal flip as data augmentation. Label smoothing regularization [46] is adopted. For style mining, Layer1 features are divided into 9 patches. Each patch is resized to $56 \\times 56$ , corresponding to the dimensions of Layer1 features for an input image of size $224 \\times 224$ (i.e. the input dimension of CLIP). We use ImageNet templates for each prompt.", + "bbox": [ + 496, + 199, + 892, + 501 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation metric. We evaluate our models on the validation sets of the unseen target domains with mean Intersection over Union (mIoU%) of the 19 shared semantic classes. For each experiment, we report the average of three runs.", + "bbox": [ + 496, + 505, + 890, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Comparison with DGSS methods", + "text_level": 1, + "bbox": [ + 498, + 573, + 790, + 589 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Single-source DGSS. We compare FAMix with state-of-the-art DGSS methods under the single-source setting.", + "bbox": [ + 496, + 597, + 890, + 627 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training on GTAV (G) as source, Tab. 2 reports models trained with either ResNet-50 or ResNet-101 backbones. The unseen target datasets are C, B, M, S, and the four subsets of A. Tab. 2 shows that our method significantly outperforms all the baselines on all the datasets for both backbones. We note that WildNet [29] and TLDR [24] use extra-data, while SHADE [58] uses the full G dataset (24,966 images) for training with ResNet-101. Class-wise performances are reported in Appendix B.", + "bbox": [ + 496, + 630, + 890, + 766 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training on Cityscapes (C) as source, Tab. 3 reports performance with ResNet-50 backbone. The unseen target datasets are B, M, G, and S. The table shows that our method outperforms the baseline in average, and is competitive to SOTA on G and M.", + "bbox": [ + 496, + 770, + 890, + 844 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Multi-source DGSS. We also show the effectiveness of FAMix in the multi-source setting, training on $G + S$ and", + "bbox": [ + 496, + 848, + 890, + 878 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "2https://github.com/openai/CLIP/", + "bbox": [ + 514, + 886, + 769, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "23432", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/88d3c19ee7fc48c226b43975247f85a4ab40ad5b0c885e6fd7ff84f084493544.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodBMGSMean
RobustNet [7]50.7358.6445.0026.2045.14
Pin the memory [23]46.7855.10---
SiamDoGe [51]51.5359.0045.0826.6745.57
WildNet* [29]50.9458.7947.0127.9546.17
DPCL [53]52.29-46.0026.60-
FAMix (ours)54.0758.7245.1232.6747.65
", + "bbox": [ + 83, + 88, + 464, + 195 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/11cfc289ef28356b4e518fbec7433a77e23e0a0d96a44636df411f07df20899d.jpg", + "table_caption": [ + "Table 3. Single-source DGSS trained on C. Performance (mIoU %) of FAMix compared to other DGSS methods trained on C and evaluated on B, M, G and S for ResNet-50 backbone. * indicates the use of extra-data. We emphasize best and second best results." + ], + "table_footnote": [], + "table_body": "
MethodCBMMean
RobustNet [7]37.6934.0938.4936.76
Pin the memory [23]44.5138.0742.7041.76
SHADE [58]47.4340.3047.6045.11
SPC-Net [19]46.3643.1848.2345.92
TLDR* [24]48.8342.5847.8046.40
FAMix (ours)49.4145.5151.6148.84
", + "bbox": [ + 83, + 275, + 464, + 378 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "evaluating on C, B and M. The results reported in Tab. 4 for ResNet-50 backbone outperform state-of-the-art.", + "bbox": [ + 75, + 472, + 468, + 502 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Qualitative results. We visually compare the segmentation results with Pin the memory [23], SHADE [58] and WildNet [29] in Fig. 3. FAMix clearly outperforms other DGSS methods on \"stuff\" (e.g., road and sky) and \"things\" (e.g., bicycle and bus) classes.", + "bbox": [ + 75, + 506, + 468, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Decoder-Probing Fine-Tuning (DP-FT)", + "text_level": 1, + "bbox": [ + 76, + 589, + 411, + 606 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Kumar et al. [26] show that standard fine-tuning may distort the pretrained feature representation, leading to degraded OOD performances for classification. Consequently, they propose a two-step training strategy: (1) Training a linear probe (LP) on top of the frozen backbone features, (2) Finetuning (FT) both the linear probe and the backbone. Inspired by it, Saito et al. [43] apply the same strategy for object detection, which is referred to as Decoder-probing Fine-tuning (DP-FT). They observe that DP-FT improves over DP depending on the architecture. We hypothesize that the effect is also dependent on the pretraining paradigm and the downstream task. As observed in Tab. 1, CLIP might remarkably overfit the source domain when finetuned. In Tab. 5, we compare fine-tuning (FT), decoder-probing (DP) and DP-FT. DP brings improvements over FT since it completely preserves the pretrained representation. Yet, DP major drawback lies in its limitation to adapt features for the downstream task, resulting in suboptimal results. Surprisingly, DP-FT largely falls behind DP, meaning", + "bbox": [ + 75, + 613, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/23379019fd09e40f46d41f0c34d579bedcc41d5edf3536696469cda13103be59.jpg", + "table_caption": [ + "Table 4. Multi-source DGSS. Performance (mIoU %) of FAMix compared to other DGSS methods trained on $\\mathrm{G} + \\mathrm{S}$ and evaluated on C, B, M for ResNet-50 backbone. * indicates the use of extra-data. We emphasize best and second best results." + ], + "table_footnote": [], + "table_body": "
MethodCBMSANASARAFMean
FT16.8116.3117.8027.102.958.5814.3513.6114.69
DP34.1337.6742.2129.1010.7126.2629.4730.4029.99
DP-FT25.6221.7126.3931.454.2218.2620.0720.8521.07
FAMix (ours)48.1545.6152.1134.2314.9637.0938.6640.2538.88
", + "bbox": [ + 504, + 88, + 888, + 157 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/7dcc1802c79c27dc643967a30478f1ca791a9c243bc7273009fdbc2294db888a.jpg", + "table_caption": [ + "Table 5. FAMix vs. DP-FT. Performance (mIoU%) of FAMix compared to Fine-tuning (FT), Decoder-probing (DP) and Decoder-probing Fine-tuning (DP-FT). We use here ResNet-50, trained on G. We emphasize best and second best results." + ], + "table_footnote": [], + "table_body": "
FreezeAugmentMixCBMSANASARAFMean
XXX16.8116.3117.8027.102.958.5814.3513.6114.69
XX22.4826.0524.1525.404.8317.6122.8619.7520.39
XX20.0721.2422.9126.521.2814.9922.0920.5118.70
X27.5326.5926.2726.914.9018.9125.6022.1422.36
XX37.8338.8844.2431.9312.4129.5931.5633.0532.44
X36.6535.7337.3230.4414.7234.6534.9138.9832.93
X43.4343.7948.1933.7011.3235.5536.1538.1936.29
48.1545.6152.1134.2314.9637.0938.6640.2538.88
", + "bbox": [ + 504, + 239, + 888, + 344 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 6. Ablation of FAMix components. Performance (mIoU %) after removing one or more components of FAMix.", + "bbox": [ + 498, + 354, + 890, + 383 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "that the learned features over-specialize to the source domain distribution even with a \"decoder warm-up\".", + "bbox": [ + 498, + 411, + 888, + 441 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The results advocate for the need of specific strategies to preserve CLIP robustness for semantic segmentation. This need emerges from the additional gap between pretraining (i.e. aligning object-level and language representations) and fine-tuning (i.e. supervised pixel classification).", + "bbox": [ + 498, + 441, + 890, + 517 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Ablation studies", + "text_level": 1, + "bbox": [ + 500, + 527, + 660, + 542 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct all the ablations on a ResNet-50 backbone with GTAV (G) as source dataset.", + "bbox": [ + 498, + 551, + 888, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Removing ingredients from the recipe. FAMix is based on minimal fine-tuning of the backbone (i.e., Freeze), style augmentation and mixing. We show in Tab. 6 that the best generalization results are only obtained when combining the three ingredients. Specifically, when the backbone is fine-tuned (i.e., Freeze $\\mathcal{X}$ ), the performances are largely harmed. When minimal fine-tuning is performed (i.e., Freeze $\\sqrt{\\cdot}$ ), we argue that the augmentations are too strong to be applied without style mixing; the latter brings both effects of domain interpolation and use of the original statistics. Subsequently, when style mixing is not applied (i.e. Freeze $\\sqrt{\\cdot}$ , Augment $\\sqrt{\\cdot}$ , Mix $\\mathcal{X}$ ), the use of mined styles brings mostly no improvement on OOD segmentation compared to training without augmentation (i.e. Freeze $\\sqrt{\\cdot}$ , Augment $\\mathcal{X}$ , Mix $\\mathcal{X}$ ). Note that for Freeze $\\sqrt{\\cdot}$ , Augment $\\sqrt{\\cdot}$ , Mix $\\mathcal{X}$ , the line 8 in Algorithm 2 becomes:", + "bbox": [ + 496, + 585, + 890, + 828 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} _ {\\mathrm {s}} ^ {(i j)} \\leftarrow \\operatorname {A d a I N} \\left(\\mathbf {f} _ {\\mathrm {s}} ^ {(i j)}, \\boldsymbol {\\mu} ^ {(i j)}, \\boldsymbol {\\sigma} ^ {(i j)}\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 578, + 839, + 888, + 858 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our style mixing is different from MixStyle [62] for being applied: (1) patch-wise and (2) between original styles of", + "bbox": [ + 498, + 869, + 890, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "23433", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/915966bc1c7d1def4c2fc10800f731623e4ee3cef57d0b6be4b8c2528ef3cb3a.jpg", + "image_caption": [ + "Figure 3. Qualitative results. Columns 1-2: Image and ground truth (GT), Columns 3-4-5: DGSS methods results, Column 6: Our results. The models are trained on G with ResNet-50 backbone." + ], + "image_footnote": [], + "bbox": [ + 114, + 85, + 857, + 281 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/fc1c31dcfe7077facd6a9267834b32cbc0b5f40e65d8613ca7ded5ff06692014.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
RCPRSPCNCBMSANASARAFMean
45.9943.7150.4834.7515.2235.0934.9238.1737.29
46.1044.2448.9033.6213.3935.9936.6839.8637.35
45.6444.5949.1333.6415.3337.3235.9838.8537.56
47.8344.8350.3834.2714.4337.0737.0738.7638.08
48.1545.6152.1134.2314.9637.0938.6640.2538.88
", + "bbox": [ + 78, + 332, + 467, + 412 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 7. Ablation on the prompt construction. Performance (mIoU %) for different prompt constructions. RCP, RSP and CN refer to , and , respectively.", + "bbox": [ + 75, + 422, + 467, + 477 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "the source data and augmented versions of them. Note that the case (Freeze $\\checkmark$ , Augment $\\times$ , Mix $\\checkmark$ ) could be seen as a variant of MixStyle, yet applied locally and class-wise. Our complete recipe is proved to be significantly more effective with a boost of $\\approx +6$ mean mIoU w.r.t. the baseline of training without augmentation and mixing.", + "bbox": [ + 75, + 493, + 467, + 583 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Prompt construction. Tab. 7 reports results when ablating the prompt construction. In FAMix, the final prompt is derived by concatenating and ; removing either of those leads to inferior results. Interestingly, replacing the style prompt by random characters - e.g. \"ioscjspa\" - does not significantly degrade the performance. In certain aspects, using random prompts still induces a randomization effect within the FAMix framework. However, meaningful prompts still consistently lead to the best results.", + "bbox": [ + 75, + 587, + 467, + 737 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Number of style prompts. FAMix uses a set $\\mathcal{R}$ of random style prompts which are concatenated with the class names; $\\mathcal{R}$ is formed by querying ChatGPT using . The output prompts are provided in Appendix C. Fig. 4a shows that the size of $\\mathcal{R}$ has a marginal impact on FAMix performance. Yet, the mIoU scores on C, B, M and AR are higher for $|\\mathcal{R}| = 20$ compared to $|\\mathcal{R}| = 1$ and almost equal for the other datasets.", + "bbox": [ + 75, + 742, + 467, + 877 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d684a08d693b9a6ddee52bf8e7b6971de283ae9beba9f066560c916100c1a0a3.jpg", + "image_caption": [ + "(a) Number of prompts" + ], + "image_footnote": [], + "bbox": [ + 501, + 333, + 678, + 434 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3b6ff42e4890093dff93fb5a8030dd8680c1dabd71307c93f9bc12bee73f7eb9.jpg", + "image_caption": [ + "(b) Effect of layer freezing", + "Figure 4. Ablation of prompt set and freezing strategy. (a) Performance $(\\mathrm{mIoU}\\%)$ on test datasets w.r.t. the number of random style prompts in $\\mathcal{R}$ . (b) Effect of freezing layers reporting on x-axis the last frozen layer. For example, 'L3' means freezing L1, L2 and L3. 'L4' indicates that the Layer4 is partially frozen." + ], + "image_footnote": [], + "bbox": [ + 679, + 333, + 888, + 433 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The low sensitivity of the performance to the size of $\\mathcal{R}$ could be explained by two factors. First, mining even from a single prompt results in different style variations as the optimization starts from different anchor points in the latent space, as argued in [10]. Second, mixing style between the source and the mined proxy domains is the crucial factor making the network explore intermediate domains during training. This does not contradict the effect of our prompt construction which leads to the best results (Tab. 7).", + "bbox": [ + 496, + 553, + 890, + 689 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Local vs. global style mining. To highlight the effect of our class-wise local style mining, we perform an ablation replacing it with global style mining. Specifically, the same set of are used, though being concatenated with as a global description instead of local class name. Intuitively, local style mining and mixing induces richer style variations and more contrast among patches. The results in Tab. 8 show the effectiveness of our local style mining and mixing strategy, bringing about 3 mIoU improvement on $G \\to C$ .", + "bbox": [ + 496, + 695, + 890, + 845 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "What to mix? Let $S = \\bigcup_{k=1}^{K} S^{(k)}$ and $\\mathcal{T} = \\bigcup_{k=1}^{K} \\mathcal{T}^{(k)}$ the sets of class-wise source and augmented features, respectively. In FAMix training, for an arbitrary patch $\\mathbf{f}_s^{(ij)}$ ,", + "bbox": [ + 496, + 849, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "3https://chat.openai.com/", + "bbox": [ + 94, + 886, + 290, + 898 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "23434", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/0c20ebb9033d39cf7ee2413a7cbeb7863a88dcdeca9be044368a3be00d775951.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Style miningCBMSANASARAFMean
“street view”45.5145.1250.4033.6514.5936.9237.3840.5338.01
“urban scene”46.5945.3851.3333.6714.4235.9637.3040.5238.15
global w/ “roadscape”45.4945.5550.6333.6614.7736.7537.0740.3338.03
“commute snapshot”45.3945.0850.5033.6813.6536.6337.9340.9237.97
“driving”45.0644.9850.6733.3614.8435.1136.2139.5237.47
local48.1545.6152.1134.2314.9637.0938.6640.2538.88
", + "bbox": [ + 81, + 89, + 464, + 162 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/31be26bfeb86e91093370b56a37430e6ab816e12d51f63a58aaa34cd9776b79c.jpg", + "table_caption": [ + "Table 8. Ablation on style mining. Global style mining consists of mining one style per feature map, using + as prompt." + ], + "table_footnote": [], + "table_body": "
Syle miningCBMSANASARAFMean
S43.4343.7948.1933.7011.3235.5536.1538.1936.29
S∪T44.7645.5950.7834.0513.6736.9237.1838.1337.64
T(ours)48.1545.6152.1134.2314.9637.0938.6640.2538.88
", + "bbox": [ + 81, + 232, + 464, + 290 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "style mixing is performed between the original source statistics and statistics sampled from the augmented set (i.e., $(\\pmb{\\mu}^{(ij)},\\pmb{\\sigma}^{(ij)})\\in \\mathcal{T}^{(c_p^{(ij)})}$ , see (3) and (4)). In class-wise vanilla MixStyle, $(\\pmb{\\mu}^{(ij)},\\pmb{\\sigma}^{(ij)})\\in S^{(c_p^{(ij)})}$ . In Tab. 9, we show that sampling $(\\pmb{\\mu}^{(ij)},\\pmb{\\sigma}^{(ij)})$ from $S^{(c_p^{(ij)})}\\cup T^{(c_p^{(ij)})}$ does not lead to better generalization, despite sampling from a set with twice the cardinality. This supports our mixing strategy visualized in Fig. 1. Intuitively, sampling from $S\\cup T$ could be viewed as applying either MixStyle or our mixing with a probability $p = 0.5$ .", + "bbox": [ + 75, + 357, + 468, + 516 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Minimal fine-tuning. We argue for minimal fine-tuning as a compromise between pretrained feature preservation and adaptation. Fig. 4b shows an increasing OOD generalization trend with more freezing. Interestingly, only fine-tuning the last layers of the last convolutional block (where the dilation is applied) achieves the best results. When training on Cityscapes, we observed that freezing all the layers except Layer4 achieves the best results.", + "bbox": [ + 75, + 535, + 467, + 655 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Does FAMix require language?", + "text_level": 1, + "bbox": [ + 76, + 665, + 349, + 681 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Inspired by the observation that target statistics deviate around the source ones in real cases [11], we conduct an experiment where we replace language-driven style mining by noise perturbation. The same procedure of FAMix is kept: (i) Features are divided into patches, perturbed with noise and then saved into a style bank based on the dominant class; (ii) During training, patch-wise style mixing of original and perturbed styles is performed.", + "bbox": [ + 75, + 688, + 467, + 809 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Different from Fan et al. [11], who perform a perturbation on the feature statistics using a normal distribution with pre-defined parameters, we experiment perturbation with different magnitudes of noise controlled by the signal-to-noise ratio (SNR). Consider the mean of a patch $\\mu \\in \\mathbb{R}^c$ as a signal, the goal is to perturb it with some noise", + "bbox": [ + 75, + 810, + 467, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/9dba61af06c8fa1990b7b843371ba0af51ef45c607bf2ae5ad74d93bca809956.jpg", + "table_caption": [ + "Table 9. Ablation on the sets used for mixing. The styles $(\\mu, \\sigma)$ used in (3) and (4) are sampled either from $S$ or $S \\cup \\mathcal{T}$ or $\\mathcal{T}$ ." + ], + "table_footnote": [], + "table_body": "
SNRCBMSANASARAFMean
Baseline37.8338.8844.2431.9312.4129.5931.5633.0532.44
528.7829.2430.3221.6712.6024.0025.9525.8724.80
1040.0939.5043.4529.0913.3633.4733.1136.1733.53
1545.0244.1648.6332.9614.5536.0935.9940.9637.30
2045.5244.2949.2633.4512.4035.9636.5238.6037.00
2544.8244.2648.5433.3011.3834.5135.4637.6136.24
3043.0743.8048.3133.4712.3335.0535.5838.1036.21
43.4343.7948.1933.7011.3235.5536.1538.1936.29
MixStyle [62]40.9742.0448.3633.1513.1431.2634.9438.1235.25
Prompts48.1545.6152.1134.2314.9637.0938.6640.2538.88
", + "bbox": [ + 504, + 89, + 888, + 239 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 10. Noise vs prompt-driven augmentation. The prompt-driven augmentation in FAMix is replaced by random noise with different levels defined by SNR. We also include vanilla MixStyle. The prompt-driven strategy is superior.", + "bbox": [ + 498, + 256, + 890, + 311 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "$n_{\\mu} \\in \\mathbb{R}^{c}$ . The $\\mathrm{SNR}_{\\mathrm{dB}}$ between $\\| \\mu \\|$ and $\\| n_{\\mu}\\|$ is defined as $\\mathrm{SNR}_{\\mathrm{dB}} = 20\\log_{10}\\left(\\frac{\\|\\mu\\| / \\|n_{\\mu}\\|}{\\|\\mu\\|}\\right)$ . Given $\\mu$ , $\\mathrm{SNR}_{\\mathrm{dB}}$ , and $n \\sim \\mathcal{N}(0,I)$ , where $I \\in \\mathbb{R}^{c \\times c}$ is the identity matrix, the noise is computed as $n_{\\mu} = 10^{\\frac{-\\mathrm{SNR}}{20}}\\frac{\\|\\mu\\|}{\\|n\\|} n$ . We add $\\mu + n_{\\mu}$ to the style bank corresponding to the dominant class in the patch. The same applies to $\\sigma \\in \\mathbb{R}^{c}$ . The results of training for different noise levels are in Tab. 10. Using language as source of randomization outperforms any noise level. The baseline corresponds to the case where no augmentation nor mixing are performed (See Tab. 6, Freeze $\\checkmark$ , Augment $\\pmb{x}$ , Mix $\\pmb{x}$ ). $\\mathrm{SNR} = \\infty$ could be seen as a variant of MixStyle, applied class-wise to patches (See Tab. 6, Freeze $\\checkmark$ , Augment $\\pmb{x}$ , Mix $\\checkmark$ ). The vanilla MixStyle gets inferior results.", + "bbox": [ + 498, + 348, + 890, + 547 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Besides lower OOD performance, one more disadvantage of noise augmentation compared to our language-driven augmentation is the need to select a value for the SNR, for which the optimal value might vary depending on the target domain encountered at the test time.", + "bbox": [ + 498, + 551, + 890, + 627 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 655, + 617, + 670 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We presented FAMix, a simple recipe for domain generalized semantic segmentation with CLIP pretraining. We proposed to locally mix the styles of source features with their augmented counterparts obtained using language prompts. Combined with minimal fine-tuning, FAMix significantly outperforms the state-of-the-art approaches. Extensive experiments showcase the effectiveness of our framework. We hope that FAMix will serve as a strong baseline in future works, exploring the potential of leveraging large-scale vision-language models for perception tasks.", + "bbox": [ + 496, + 685, + 890, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgment. This work was partially funded by French project SIGHT (ANR-20-CE23-0016) and was supported by ELSA - European Lighthouse on Secure and Safe AI funded by the European Union under grant agreement No. 101070617. It was performed using HPC resources from GENCI-IDRIS (Grant AD011014477).", + "bbox": [ + 498, + 839, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "23435", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Kartik Ahuja, Ethan Caballero, Dinghuai Zhang, Jean-Christophe Gagnon-Audet, Yoshua Bengio, Ioannis Mitliagkas, and Irina Rish. Invariance principle meets information bottleneck for out-of-distribution generalization. In NeurIPS, 2021. 2", + "[2] James Urquhart Allingham, Jie Ren, Michael W Dusenberry, Xiuye Gu, Yin Cui, Dustin Tran, Jeremiah Zhe Liu, and Balaji Lakshminarayanan. A simple zero-shot prompt weighting technique to improve prompt ensembling in text-image models. In ICML, 2023. 2", + "[3] Martin Arjovsky, Léon Bottou, Ishaan Gulrajani, and David Lopez-Paz. Invariant risk minimization. arXiv preprint arXiv:1907.02893, 2019. 2", + "[4] Yogesh Balaji, Swami Sankaranarayanan, and Rama Chellappa. Metareg: Towards domain generalization using metaregularization. In NeurIPS, 2018. 2", + "[5] Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and Hartwig Adam. Encoder-decoder with atrous separable convolution for semantic image segmentation. In ECCV, 2018. 5", + "[6] Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In CVPR, 2021. 3", + "[7] Sungha Choi, Sanghun Jung, Huiwon Yun, Joanne T Kim, Seungryong Kim, and Jaegul Choo. Robustnet: Improving domain generalization in urban-scene segmentation via instance selective whitening. In CVPR, 2021. 1, 2, 3, 5, 6", + "[8] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In CVPR, 2016. 5", + "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, 2009. 1, 3", + "[10] Mohammad Fahes, Tuan-Hung Vu, Andrei Bursuc, Patrick Pérez, and Raoul de Charette. Poda: Prompt-driven zero-shot domain adaptation. In ICCV, 2023. 1, 3, 4, 7", + "[11] Qi Fan, Mattia Segu, Yu-Wing Tai, Fisher Yu, Chi-Keung Tang, Bernt Schiele, and Dengxin Dai. Towards robust object detection invariant to real-world domain shifts. In ICLR, 2023. 5, 8", + "[12] Alex Fang, Gabriel Ilharco, Mitchell Wortsman, Yuhao Wan, Vaishaal Shankar, Achal Dave, and Ludwig Schmidt. Data determines distributional robustness in contrastive language image pre-training (clip). In ICML, 2022. 1, 2", + "[13] Yaroslav Ganin, Evgeniya Ustinova, Hana Ajakan, Pascal Germain, Hugo Larochelle, François Laviolette, Mario Marchand, and Victor Lempitsky. Domain-adversarial training of neural networks. JMLR, 2016. 1", + "[14] Yunhao Ge, Jie Ren, Andrew Gallagher, Yuxiao Wang, Ming-Hsuan Yang, Hartwig Adam, Laurent Itti, Balaji Lakshminarayanan, and Jiaping Zhao. Improving zero-shot generalization and robustness of multi-modal models. In CVPR, 2023. 2", + "[15] Sachin Goyal, Ananya Kumar, Sankalp Garg, Zico Kolter, and Aditi Raghunathan. Finetune like you pretrain: Im-" + ], + "bbox": [ + 78, + 114, + 470, + 902 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "proved finetuning of zero-shot vision models. In CVPR, 2023. 1, 2", + "[16] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2022. 1", + "[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 5", + "[18] Judy Hoffman, Eric Tzeng, Taesung Park, Jun-Yan Zhu, Phillip Isola, Kate Saenko, Alexei Efros, and Trevor Darrell. Cycada: Cycle-consistent adversarial domain adaptation. In ICML, 2018. 1", + "[19] Wei Huang, Chang Chen, Yong Li, Jiacheng Li, Cheng Li, Fenglong Song, Youliang Yan, and Zhiwei Xiong. Style projected clustering for domain generalized semantic segmentation. In CVPR, 2023. 1, 3, 5, 6", + "[20] Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In ICCV, 2017. 3", + "[21] Nishant Jain, Harkirat Behl, Yogesh Singh Rawat, and Vibhav Vineet. Efficiently robustify pre-trained models. In ICCV, 2023. 2", + "[22] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 1", + "[23] Jin Kim, Jiyoung Lee, Jungin Park, Dongbo Min, and Kwanghoon Sohn. Pin the memory: Learning to generalize semantic segmentation. In CVPR, 2022. 1, 5, 6, 7", + "[24] Sunghwan Kim, Dae-hwan Kim, and Hoseong Kim. Texture learning domain randomization for domain generalized segmentation. In ICCV, 2023. 1, 3, 5, 6", + "[25] David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, and Aaron Courville. Out-of-distribution generalization via risk extrapolation (rex). In ICML, 2021. 2", + "[26] Ananya Kumar, Aditi Raghunathan, Robbie Matthew Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. In ICLR, 2022. 1, 2, 6", + "[27] Gihyun Kwon and Jong Chul Ye. Clipstyler: Image style transfer with a single text condition. In CVPR, 2022. 1", + "[28] Clement Laroudie, Andrei Bursuc, Mai Lan Ha, and Gianni Franchi. Improving clip robustness with knowledge distillation and self-training. arXiv preprint arXiv:2309.10361, 2023. 2", + "[29] Suhyeon Lee, Hongje Seong, Seongwon Lee, and Euntai Kim. Wildnet: Learning domain generalized semantic segmentation from the wild. In CVPR, 2022. 1, 3, 5, 6, 7", + "[30] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and Rene Ranftl. Language-driven semantic segmentation. In ICLR, 2022. 1", + "[31] Haoliang Li, Sinno Jialin Pan, Shiqi Wang, and Alex C Kot. Domain generalization with adversarial feature learning. In CVPR, 2018. 1", + "[32] Ya Li, Xinmei Tian, Mingming Gong, Yajing Liu, Tongliang Liu, Kun Zhang, and Dacheng Tao. Deep domain generaliza" + ], + "bbox": [ + 501, + 92, + 890, + 902 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "23436", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "tion via conditional invariant adversarial networks. In ECCV, 2018. 1, 2", + "[33] Yunsheng Li, Lu Yuan, and Nuno Vasconcelos. Bidirectional learning for domain adaptation of semantic segmentation. In CVPR, 2019. 1", + "[34] Mingsheng Long, Zhangjie Cao, Jianmin Wang, and Michael I Jordan. Conditional adversarial domain adaptation. In NeurIPS, 2018. 1", + "[35] Gerhard Neuhold, Tobias Ollmann, Samuel Rota Bulo, and Peter Kontschieder. The mapillary vistas dataset for semantic understanding of street scenes. In ICCV, 2017. 5", + "[36] Xingang Pan, Ping Luo, Jianping Shi, and Xiaou Tang. Two at once: Enhancing learning and generalization capacities via ibn-net. In ECCV, 2018. 1, 3, 5", + "[37] Duo Peng, Yinjie Lei, Munawar Hayat, Yulan Guo, and Wen Li. Semantic-aware domain generalized segmentation. In CVPR, 2022. 1, 3, 5", + "[38] Fengchun Qiao, Long Zhao, and Xi Peng. Learning to learn single domain generalization. In CVPR, 2020. 2", + "[39] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 2, 3", + "[40] Yongming Rao, Wenliang Zhao, Guangyi Chen, Yansong Tang, Zheng Zhu, Guan Huang, Jie Zhou, and Jiwen Lu. Denseclip: Language-guided dense prediction with context-aware prompting. In CVPR, 2022. 1", + "[41] Stephan R Richter, Vibhav Vineet, Stefan Roth, and Vladlen Koltun. Playing for data: Ground truth from computer games. In ECCV, 2016. 5", + "[42] German Ros, Laura Sellart, Joanna Materzynska, David Vazquez, and Antonio M Lopez. The synthia dataset: A large collection of synthetic images for semantic segmentation of urban scenes. In CVPR, 2016. 5", + "[43] Kuniaki Saito, Donghyun Kim, Piotr Teterwak, Rogerio Feris, and Kate Saenko. Mind the backbone: Minimizing backbone distortion for robust object detection. arXiv preprint arXiv:2303.14744, 2023. 6", + "[44] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. ACDC: The adverse conditions dataset with correspondences for semantic driving scene understanding. In ICCV, 2021. 5", + "[45] Yang Shu, Xingzhuo Guo, Jialong Wu, Ximei Wang, Jianmin Wang, and Mingsheng Long. Clipood: Generalizing clip to out-of-distributions. In ICML, 2023. 1, 2", + "[46] Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In CVPR, 2016. 5", + "[47] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In CVPR, 2017. 1", + "[48] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Advent: Adversarial entropy minimization for domain adaptation in semantic segmentation. In CVPR, 2019. 1", + "[49] Jindong Wang, Cuiling Lan, Chang Liu, Yidong Ouyang, Tao Qin, Wang Lu, Yiqiang Chen, Wenjun Zeng, and Philip" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yu. Generalizing to unseen domains: A survey on domain generalization. T-KDE, 2022. 1, 2", + "[50] Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, et al. Robust fine-tuning of zero-shot models. In CVPR, 2022. 1, 2", + "[51] Zhenyao Wu, Xinyi Wu, Xiaoping Zhang, Lili Ju, and Song Wang. Siamdoge: Domain generalizable semantic segmentation using siamese network. In ECCV, 2022. 1, 3, 5, 6", + "[52] Qinwei Xu, Ruipeng Zhang, Ya Zhang, Yanfeng Wang, and Qi Tian. A fourier-based framework for domain generalization. In CVPR, 2021. 1", + "[53] Liwei Yang, Xiang Gu, and Jian Sun. Generalized semantic segmentation by self-supervised source domain projection and multi-level contrastive learning. In AAAI, 2023. 1, 5, 6", + "[54] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darryll. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In CVPR, 2020. 5", + "[55] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. LiT: Zero-shot transfer with locked-image text tuning. In CVPR, 2022. 1", + "[56] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023. 1", + "[57] Shanshan Zhao, Mingming Gong, Tongliang Liu, Huan Fu, and Dacheng Tao. Domain generalization via entropy regularization. In NeurIPS, 2020. 2", + "[58] Yuyang Zhao, Zhun Zhong, Na Zhao, Nicu Sebe, and Gim Hee Lee. Style-hallucinated dual consistency learning for domain generalized semantic segmentation. In ECCV, 2022. 1, 3, 5, 6, 7", + "[59] Chong Zhou, Chen Change Loy, and Bo Dai. Extract free dense labels from clip. In ECCV, 2022. 1", + "[60] Kaiyang Zhou, Yongxin Yang, Timothy Hospedales, and Tao Xiang. Deep domain-adversarial image generation for domain generalisation. In AAAI, 2020. 2", + "[61] Kaiyang Zhou, Yongxin Yang, Timothy Hospedales, and Tao Xiang. Learning to generate novel domains for domain generalization. In ECCV, 2020.", + "[62] Kaiyang Zhou, Yongxin Yang, Yu Qiao, and Tao Xiang. Domain generalization with mixstyle. In ICLR, 2021. 1, 2, 4, 5, 6, 8", + "[63] Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. TPAMI, 2022. 1, 2", + "[64] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Zwei Liu. Conditional prompt learning for vision-language models. In CVPR, 2022. 1", + "[65] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 2022. 1" + ], + "bbox": [ + 501, + 92, + 890, + 871 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "23437", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/b9db7707-a86d-4d4c-b962-58bd8f08eecd_model.json b/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/b9db7707-a86d-4d4c-b962-58bd8f08eecd_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f581acfd7f718f68825163bc966d2322c6b8bec9 --- /dev/null +++ b/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/b9db7707-a86d-4d4c-b962-58bd8f08eecd_model.json @@ -0,0 +1,2475 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.131, + 0.856, + 0.153 + ], + "angle": 0, + "content": "A Simple Recipe for Language-guided Domain Generalized Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.179, + 0.25, + 0.198 + ], + "angle": 0, + "content": "Mohammad Fahes1" + }, + { + "type": "text", + "bbox": [ + 0.272, + 0.18, + 0.41, + 0.199 + ], + "angle": 0, + "content": "Tuan-Hung \\(\\mathrm{V_u^{1,2}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.325, + 0.199, + 0.378, + 0.215 + ], + "angle": 0, + "content": "1 Inria" + }, + { + "type": "text", + "bbox": [ + 0.432, + 0.181, + 0.567, + 0.198 + ], + "angle": 0, + "content": "Andrei Bursuc\\(^{1,2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.437, + 0.199, + 0.517, + 0.216 + ], + "angle": 0, + "content": "2 Valeo.ai" + }, + { + "type": "text", + "bbox": [ + 0.584, + 0.181, + 0.703, + 0.198 + ], + "angle": 0, + "content": "Patrick Pérez3" + }, + { + "type": "text", + "bbox": [ + 0.578, + 0.199, + 0.645, + 0.217 + ], + "angle": 0, + "content": "3 Kyutai" + }, + { + "type": "text", + "bbox": [ + 0.724, + 0.181, + 0.874, + 0.198 + ], + "angle": 0, + "content": "Raoul de Charette1" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.228, + 0.701, + 0.244 + ], + "angle": 0, + "content": "https://astravision.github.io/FAMix" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.267, + 0.314, + 0.283 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.3, + 0.474, + 0.603 + ], + "angle": 0, + "content": "Generalization to new domains not seen during training is one of the long-standing challenges in deploying neural networks in real-world applications. Existing generalization techniques either necessitate external images for augmentation, and/or aim at learning invariant representations by imposing various alignment constraints. Large-scale pretraining has recently shown promising generalization capabilities, along with the potential of binding different modalities. For instance, the advent of vision-language models like CLIP has opened the doorway for vision models to exploit the textual modality. In this paper, we introduce a simple framework for generalizing semantic segmentation networks by employing language as the source of randomization. Our recipe comprises three key ingredients: (i) the preservation of the intrinsic CLIP robustness through minimal fine-tuning, (ii) language-driven local style augmentation, and (iii) randomization by locally mixing the source and augmented styles during training. Extensive experiments report state-of-the-art results on various generalization benchmarks. Code is accessible on the project page1." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.649, + 0.21, + 0.665 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.676, + 0.47, + 0.841 + ], + "angle": 0, + "content": "A prominent challenge associated with deep neural networks is their constrained capacity to generalize when confronted with shifts in data distribution. This limitation is rooted in the assumption of data being independent and identically distributed, a presumption that frequently proves unrealistic in real-world scenarios. For instance, in safety-critical applications like autonomous driving, it is imperative for a segmentation model to exhibit resilient generalization capabilities when dealing with alterations in lighting, variations in weather conditions, and shifts in geographic location, among other considerations." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.843, + 0.47, + 0.874 + ], + "angle": 0, + "content": "To address this challenge, domain adaptation [13, 18, 33, 34, 47, 48] has emerged; its core principle revolves around" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.268, + 0.892, + 0.389 + ], + "angle": 0, + "content": "aligning the distributions of both the source and target domains. However, DA hinges on having access to target data, which may not always be available. Even when accessible, this data might not encompass the full spectrum of distributions encountered in diverse real-world scenarios. Domain generalization [31, 32, 49, 52, 62, 63] overcomes this limitation by enhancing the robustness of models to arbitrary and previously unseen domains." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.392, + 0.893, + 0.514 + ], + "angle": 0, + "content": "The training of segmentation networks is often backed by large-scale pretraining as initialization for the feature representation. Until now, to the best of our knowledge, domain generalization for semantic segmentation (DGSS) networks [7, 19, 23, 24, 29, 36, 37, 51, 53, 58] are pretrained with ImageNet [9]. The underlying concept is to transfer the representations from the upstream task of classification to the downstream task of segmentation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.516, + 0.895, + 0.698 + ], + "angle": 0, + "content": "Lately, contrastive language image pretraining (CLIP) [22, 39, 55, 56] has demonstrated that transferable visual representations could be learned from the sole supervision of loose natural language descriptions at very large scale. Subsequently, a plethora of applications have been proposed using CLIP [39], including zero-shot semantic segmentation [30, 59], image editing [27], transfer learning [10, 40], open-vocabulary object detection [16], few-shot learning [64, 65] etc. A recent line of research proposes fine-tuning techniques to preserve the robustness of CLIP under distribution shift [15, 26, 45, 50], but they are limited to classification." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.701, + 0.895, + 0.822 + ], + "angle": 0, + "content": "In this paper, we aim at answering the following question: How to leverage CLIP pretraining for enhanced domain generalization for semantic segmentation? The motivation for rethinking DGSS with CLIP is twofold. On one hand, distribution robustness is a notable characteristic of CLIP [12]. On the other hand, the language modality offers an extra source of information compared to unimodal pretrained models." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.895, + 0.901 + ], + "angle": 0, + "content": "A direct comparison of training two segmentation models under identical conditions but with different pretraining, i.e. ImageNet vs. CLIP, shows that CLIP pretraining does not yield promising results. Indeed, Tab. 1 shows that fine-tuning CLIP-initialized network performs worse than" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.388, + 0.9 + ], + "angle": 0, + "content": "1https://astra-vision.github.io/FAMix" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "23428" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.09, + 0.468, + 0.138 + ], + "angle": 0, + "content": "
PretrainingCBMSANASARAFMean
ImageNet29.0432.1734.2629.874.3622.3828.3426.7625.90
CLIP16.8116.3117.8027.102.958.5814.3513.6114.69
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.147, + 0.47, + 0.231 + ], + "angle": 0, + "content": "Table 1. Comparison of ImageNet and CLIP pretraining for out-of-distribution semantic segmentation. The network is DeepLabv3+ with ResNet-50 as backbone. The models are trained on GTAV and the performance (mIoU %) is reported on Cityscapes (C), BDD-100K (B), Mapillary (M), Synthia (S), and ACDC Night (AN), Snow (AS), Rain (AR) and Fog (AF)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.256, + 0.469, + 0.362 + ], + "angle": 0, + "content": "its ImageNet counterpart on out-of-distribution (OOD) data. This raises doubts about the suitability of CLIP pretraining for DGSS and indicates that it is more prone to overfitting the source distribution at the expense of degrading its original distributional robustness properties. Note that both models converge and achieve similar results on in-domain data. More details are provided in Appendix A." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.367, + 0.469, + 0.426 + ], + "angle": 0, + "content": "This paper shows that we can prevent such behavior with a simple recipe involving minimal fine-tuning, language-driven style augmentation, and mixing. Our approach is coined FAMix, for Freeze, Augment and Mix." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.432, + 0.469, + 0.583 + ], + "angle": 0, + "content": "It was recently argued that fine-tuning might distort the pretrained representations and negatively affect OOD generalization [26]. To maintain the integrity of the representation, one extreme approach is to entirely freeze the backbone. However, this can undermine representation adaptability and lead to subpar OOD generalization. As a middle-ground strategy balancing adaptation and feature preservation, we suggest minimal fine-tuning of the backbone, where a substantial portion remains frozen, and only the final layers undergo fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.588, + 0.469, + 0.678 + ], + "angle": 0, + "content": "For generalization, we show that rethinking MixStyle [62] leads to significant performance gains. As illustrated in Fig. 1, we mix the statistics of the original source features with augmented statistics mined using language. This helps explore styles beyond the source distribution at training time without using additional image." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.682, + 0.388, + 0.696 + ], + "angle": 0, + "content": "We summarize our contributions as follows:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.703, + 0.468, + 0.761 + ], + "angle": 0, + "content": "- We propose a simple framework for DGSS based on minimal fine-tuning of the backbone and language-driven style augmentation. To the best of our knowledge, we are the first to study DGSS with CLIP pretraining." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.763, + 0.468, + 0.852 + ], + "angle": 0, + "content": "- We propose language-driven class-wise local style augmentation. We mine class-specific local statistics using prompts that express random styles and names of patchwise dominant classes. During training, randomization is performed through patch-wise style mixing of the source and mined styles." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.853, + 0.468, + 0.898 + ], + "angle": 0, + "content": "- We conduct careful ablations to show the effectiveness of FAMix. Our framework outperforms state-of-the-art approaches in single and multi-source DGSS settings." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.703, + 0.468, + 0.898 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.089, + 0.882, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.209, + 0.892, + 0.334 + ], + "angle": 0, + "content": "Figure 1. Mixing strategies. (Left) MixStyle [62] consists of a linear mixing between the feature statistics of the source domain(s) S samples. (Right) We apply an augmentation \\(\\mathcal{A}(.)\\) on the source domain statistics, then perform linear mixing between original and augmented statistics. Intuitively, this enlarges the support of the training distribution by leveraging statistics beyond the source domain(s), as well as discovering intermediate domains. \\(\\mathcal{A}(.)\\) could be a language-driven or Gaussian noise augmentation, and we show that the former leads to better generalization results." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.363, + 0.644, + 0.377 + ], + "angle": 0, + "content": "2. Related works" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.389, + 0.891, + 0.51 + ], + "angle": 0, + "content": "Domain generalization (DG). The goal of DG is to train, from a single or multiple source domains, models that perform well under arbitrary domain shifts. The DG literature spans a broad range of approaches, including adversarial learning [32, 57], meta-learning [4, 38], data augmentation [60-62] and domain-invariant representation learning [1, 3, 7, 25]. We refer the reader to [49, 63] for comprehensive surveys on DG." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.517, + 0.892, + 0.849 + ], + "angle": 0, + "content": "Domain generalization with CLIP. CLIP [39] exhibits a remarkable distributional robustness [12]. Nevertheless, fine-tuning comes at the expense of sacrificing generalization. Kumar et al. [26] observe that full fine-tuning can distort the pretrained representation, and propose a two-stage strategy, consisting of training a linear probe with a frozen feature extractor, then fine-tuning both. Wortman et al. [50] propose assembling the weights of zero-shot and fine-tuned models. Goyal et al. [15] show that preserving the pretraining paradigm (i.e. contrastive learning) during the adaptation to the downstream task improves both in-domain (ID) and OOD performance without multi-step fine-tuning or weight assembling. CLIPood [45] introduces margin metric softmax training objective and Beta moving average for optimization to handle both open-class and open-domain at test time. On the other hand, distributional robustness could be improved by training a small amount of parameters on top of a frozen CLIP backbone in a teacher-student manner [21, 28]. Other works show that specialized prompt assembling and/or image assembling strategies [2, 14] coupled with label augmentation using the WordNet hierarchy improve robustness in classification." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Domain Generalized Semantic Segmentation. DGSS methods could be categorized into three main groups: normalization methods, domain randomization (DR) and in" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "23429" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.09, + 0.868, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.283, + 0.892, + 0.382 + ], + "angle": 0, + "content": "Figure 2. Overall process of FAMix. FAMix consists of two steps. (Left) Local style mining consists of dividing the low-level feature activations into patches, which are used for style mining using Prompt-driven Instance Normalization (PIN) [10]. Specifically, for each patch, the dominant class is queried from the ground truth, and the mined style is added to corresponding class-specific style bank. (Right) Training the segmentation network is performed with minimal fine-tuning of the backbone. At each iteration, the low-level feature activations are viewed as grids of patches. For each patch, the dominant class is queried using the ground truth, then a style is sampled from the corresponding style bank. Style randomization is performed by normalizing each patch in the grid by its statistics, and transferring the new style which is a mixing between the original style and the sampled one. The network is trained using only a cross-entropy loss." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.4, + 0.473, + 0.734 + ], + "angle": 0, + "content": "variant representation learning. Normalization methods aim at removing style contribution from the representation. For instance, IBN-Net [36] shows that Instance Normalization (IN) makes the representation invariant to variations in the scene appearance (e.g., change of colors, illumination, etc.), and that combining IN and batch normalization (BN) helps the synthetic-to-real generalization. SAN & SAW [37] proposes semantic-aware feature normalization and whitening, while RobustNet [7] proposes an instance selective whitening loss, where only feature covariances that are sensitive to photometric transformations are whitened. DR aims instead at diversifying the data during training. Some methods use additional data for DR. For example, WildNet [29] uses ImageNet [9] data for content and style extension learning, while TLDR [24] proposes learning texture from random style images. Other methods like SiamDoGe [51] perform DR solely by data augmentation, using a Siamese [6] structure. Finally in the invariant representation learning group, SPC-Net [19] builds a representation space based on style and semantic projection and clustering, and SHADE [58] regularizes the training with a style consistency loss and a retrospection consistency loss." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.753, + 0.168, + 0.768 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.47, + 0.901 + ], + "angle": 0, + "content": "FAMix proposes an effective recipe for DGSS through the blending of simple ingredients. It consists of two stages (see Fig. 2): (i) Local style mining from language (Sec. 3.2); (ii) Training of a segmentation network with minimal fin-tuning and local style mixing (Sec. 3.3). In Fig. 2 and in the following, CLIP-I1 denotes the stem layers and Layer1 of CLIP image encoder, CLIP-I2 the remaining layers excluding the attention pooling, and CLIP-T the text encoder." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.4, + 0.89, + 0.43 + ], + "angle": 0, + "content": "We start with some preliminary background knowledge, introducing AdaIN and PIN which are essential to our work." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.439, + 0.64, + 0.454 + ], + "angle": 0, + "content": "3.1. Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.463, + 0.892, + 0.554 + ], + "angle": 0, + "content": "Adaptive Instance Normalization (AdaIN). For a feature map \\(\\mathbf{f} \\in \\mathbb{R}^{h \\times w \\times c}\\), AdaIN [20] shows that the channel-wise mean \\(\\boldsymbol{\\mu} \\in \\mathbb{R}^c\\) and standard deviation \\(\\sigma \\in \\mathbb{R}^c\\) capture information about the style of the input image, allowing style transfer between images. Hence, stylizing a source feature \\(\\mathbf{f}_s\\) with an arbitrary target style \\((\\mu(\\mathbf{f}_t), \\sigma(\\mathbf{f}_t))\\) reads:" + }, + { + "type": "equation", + "bbox": [ + 0.558, + 0.56, + 0.892, + 0.59 + ], + "angle": 0, + "content": "\\[\n\\operatorname {A d a I N} \\left(\\mathbf {f} _ {\\mathrm {s}}, \\mathbf {f} _ {\\mathrm {t}}\\right) = \\sigma \\left(\\mathbf {f} _ {\\mathrm {t}}\\right) \\left(\\frac {\\mathbf {f} _ {\\mathrm {s}} - \\mu \\left(\\mathbf {f} _ {\\mathrm {s}}\\right)}{\\sigma \\left(\\mathbf {f} _ {\\mathrm {s}}\\right)}\\right) + \\mu \\left(\\mathbf {f} _ {\\mathrm {t}}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.598, + 0.89, + 0.628 + ], + "angle": 0, + "content": "with \\(\\mu (\\cdot)\\) and \\(\\sigma (\\cdot)\\) the mean and standard deviation of input feature; multiplications and additions being element-wise." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.631, + 0.892, + 0.752 + ], + "angle": 0, + "content": "Prompt-driven Instance Normalization (PIN). PIN was introduced for prompt-driven zero-shot domain adaptation in PØDA [10]. It replaces the target style \\((\\mu(\\mathbf{f}_{\\mathrm{t}}), \\sigma(\\mathbf{f}_{\\mathrm{t}}))\\) in AdaIN (1) with two estimizable variables \\((\\mu, \\sigma)\\) guided by a single prompt in natural language. The rationale is to leverage a frozen CLIP [39] to mine visual styles from the prompt representation in the shared space. Given a prompt \\(P\\) and a feature map \\(\\mathbf{f}_{\\mathrm{s}}\\), PIN reads as:" + }, + { + "type": "equation", + "bbox": [ + 0.586, + 0.759, + 0.892, + 0.79 + ], + "angle": 0, + "content": "\\[\n\\mathrm {P I N} _ {(P)} \\left(\\mathbf {f} _ {\\mathrm {s}}\\right) = \\sigma \\left(\\frac {\\mathbf {f} _ {\\mathrm {s}} - \\mu \\left(\\mathbf {f} _ {\\mathrm {s}}\\right)}{\\sigma \\left(\\mathbf {f} _ {\\mathrm {s}}\\right)}\\right) + \\boldsymbol {\\mu}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.891, + 0.84 + ], + "angle": 0, + "content": "where \\(\\mu\\) and \\(\\sigma\\) are optimized using gradient descent, such that the cosine distance between the visual feature representation and the prompt representation is minimized." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Different from PØDA which mines styles globally with a predetermined prompt describing the target domain, we make use of PIN to mine class-specific styles using local patches of the features, leveraging random style prompts." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "23430" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.471, + 0.122 + ], + "angle": 0, + "content": "Further, we show the effectiveness of incorporating the class name in the prompt for better style mining." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.13, + 0.262, + 0.148 + ], + "angle": 0, + "content": "3.2. Local Style Mining" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.154, + 0.47, + 0.26 + ], + "angle": 0, + "content": "Our approach is to leverage PIN to mine class-specific style banks that used for feature augmentation when training FAMix. Given a set of cropped images \\(\\mathcal{I}_{\\mathrm{s}}\\), we encode them using CLIP-I1 to get a set of low-level features \\(\\mathcal{F}_{\\mathrm{s}}\\). Each batch \\(b\\) of features \\(\\mathbf{f}_{\\mathrm{s}} \\in \\mathcal{F}_{\\mathrm{s}}\\) is cropped into \\(m\\) patches, resulting in \\(b \\times m\\) patches \\(\\mathbf{f}_p\\), associated ground-truth annotation \\(\\mathbf{y}_p\\), of size \\(h / \\sqrt{m} \\times w / \\sqrt{m} \\times c\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.26, + 0.471, + 0.411 + ], + "angle": 0, + "content": "We aim at populating \\(K\\) style banks, \\(K\\) being the total number of classes. For a feature patch \\(\\mathbf{f}_p\\), we compute the dominant class from the corresponding label patch \\(\\mathbf{y}_p\\), and get its name \\(t_p\\) from the predefined classes in the training dataset. Given a set of prompts describing random styles \\(\\mathcal{R}\\), the target prompt \\(P_p\\) is formed by concatenating a randomly sampled style prompt \\(r\\) from \\(\\mathcal{R}\\) and \\(t_p\\) (e.g., retro futurism style building). We show in the experiments (Sec. 4.4) that our method is not very sensitive to the prompt design, yet our prompt construction works best." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.411, + 0.47, + 0.486 + ], + "angle": 0, + "content": "The idea is to mine proxy domains and explore intermediate ones in a class-aware manner (as detailed in Sec. 3.3), which makes our work fundamentally different from [10], that steers features towards a particular target style and corresponding domain, and better suited to generalization." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.486, + 0.471, + 0.591 + ], + "angle": 0, + "content": "To handle the class imbalance problem, we simply select one feature patch \\(\\mathbf{f}_p\\) per class among the total \\(b\\times m\\) patches, as shown in Fig. 2. Consequently, we apply PIN (2) to optimize the local styles to match the representations of their corresponding prompts, and use the mined styles to populate the corresponding style banks. The complete procedure is outlined in Algorithm 1." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.591, + 0.471, + 0.623 + ], + "angle": 0, + "content": "The resulting style banks \\(\\{\\mathcal{T}^{(1)},\\dots ,\\mathcal{T}^{(K)}\\}\\) are used for domain randomization during training." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.631, + 0.24, + 0.648 + ], + "angle": 0, + "content": "3.3. Training FAMix" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.654, + 0.47, + 0.821 + ], + "angle": 0, + "content": "Style randomization. During training, randomly cropped images \\(\\mathcal{I}_{\\mathrm{s}}\\) are encoded into \\(\\mathbf{f}_{\\mathrm{s}}\\) using CLIP-11. Each batch of feature maps \\(\\mathbf{f}_{\\mathrm{s}}\\) is viewed as a grid of \\(m\\) patches, without cropping them. For each patch \\(\\mathbf{f}_{\\mathrm{s}}^{(ij)}\\) within the grid, the dominant class \\(c_{p}^{(ij)}\\) is queried using the corresponding ground truth patch \\(\\mathbf{y}_{\\mathrm{s}}^{(ij)}\\), and a style is randomly sampled from the corresponding mined bank \\(\\mathcal{T}(c_p^{(ij)})\\). We then apply patch-wise convex combination (i.e., style mixing) of the original style of the patch and the mined style. Specifically, for an arbitrary patch \\(\\mathbf{f}_{\\mathrm{s}}^{(ij)}\\), our local style mixing reads:" + }, + { + "type": "equation", + "bbox": [ + 0.161, + 0.831, + 0.469, + 0.851 + ], + "angle": 0, + "content": "\\[\n\\mu_ {m i x} \\leftarrow (1 - \\alpha) \\mu \\left(\\mathbf {f} _ {\\mathrm {s}} ^ {(i j)}\\right) + \\alpha \\boldsymbol {\\mu} ^ {(i j)} \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.159, + 0.853, + 0.469, + 0.872 + ], + "angle": 0, + "content": "\\[\n\\sigma_ {m i x} \\leftarrow (1 - \\alpha) \\sigma \\left(\\mathbf {f} _ {\\mathrm {s}} ^ {(i j)}\\right) + \\alpha \\boldsymbol {\\sigma} ^ {(i j)}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.882, + 0.378, + 0.903 + ], + "angle": 0, + "content": "with \\((\\pmb{\\mu}^{(ij)},\\pmb{\\sigma}^{(ij)})\\in \\mathcal{T}^{(c_p^{(ij)})}\\) and \\(\\alpha \\in [0,1]^c\\)" + }, + { + "type": "code_caption", + "bbox": [ + 0.511, + 0.095, + 0.741, + 0.11 + ], + "angle": 0, + "content": "Algorithm 1: Local Style Mining." + }, + { + "type": "algorithm", + "bbox": [ + 0.504, + 0.112, + 0.9, + 0.427 + ], + "angle": 0, + "content": "Input: Set \\(\\mathcal{F}_{\\mathrm{s}}\\) of source features batches. Label set \\(\\mathcal{V}_{\\mathrm{s}}\\) in \\(\\mathcal{D}_{s}\\) Set of random prompts \\(\\mathcal{R}\\) and class names \\(\\mathcal{C}\\) \nParam: Number of patches \\(m\\) Number of classes \\(K\\) Output: \\(K\\) sets \\(\\{\\mathcal{T}^{(1)},\\dots ,\\mathcal{T}^{(K)}\\}\\) of class-wise augmented statistics. \n1 \\(\\{\\mathcal{T}^{(1)},\\dots ,\\mathcal{T}^{(K)}\\} \\gets \\emptyset\\) \n2 foreach \\((\\mathbf{f}_s\\in \\mathcal{F}_s,\\mathbf{y}_s\\in \\mathcal{Y}_s)\\) do \n3 \\(\\{\\mathbf{y}_p\\} \\leftarrow\\) crop-patch(y,s,m) \n4 \\(\\{c_p\\} ,\\{P_p\\} ,\\{f_p\\} \\leftarrow \\emptyset\\) \n5 foreach \\(\\mathbf{y}_p\\in \\{\\mathbf{y}_p\\}\\) do \n6 \\(c_{p}\\gets\\) get-dominant-class(yp) if \\(c_{p}\\) not in \\(\\{c_p\\}\\) then \n8 \\(\\{c_p\\} \\leftarrow c_p\\) \n9 \\(\\{P_p\\} \\leftarrow\\) concat(sample(R),get-name(cp)) \n10 \\(\\{f_p\\} \\leftarrow f_p\\) \n11 end \n12 end \n13 \\(\\mu^{(c_p)},\\sigma^{(c_p)},\\mathbf{f}_p'\\gets \\mathrm{PIN}_{(P_p)}(\\mathbf{f}_p)\\) \n14 \\(\\mathcal{T}^{(c_p)}\\gets \\mathcal{T}^{(c_p)}\\cup \\{(\\boldsymbol{\\mu}^{(c_p)},\\boldsymbol{\\sigma}^{(c_p)})\\}\\) \n15 end" + }, + { + "type": "code_caption", + "bbox": [ + 0.512, + 0.456, + 0.724, + 0.471 + ], + "angle": 0, + "content": "Algorithm 2: Training FAMix." + }, + { + "type": "algorithm", + "bbox": [ + 0.503, + 0.473, + 0.899, + 0.731 + ], + "angle": 0, + "content": "Input: Set \\(\\mathcal{F}_{\\mathrm{s}}\\) of source features batches. Label set \\(\\mathcal{V}_{\\mathrm{s}}\\) in \\(\\mathcal{D}_{\\mathrm{s}}\\) \\(K\\) sets \\(\\{\\mathcal{T}^{(1)},\\dots ,\\mathcal{T}^{(K)}\\}\\) of class-wise augmented statistics. \nParam: Number of patches m. \nforeach \\((\\mathbf{f}_s\\in \\mathcal{F}_s,\\mathbf{y}_s\\in \\mathcal{Y}_s)\\) do \n\\(\\alpha \\sim \\mathrm{Beta}(0.1,0.1)\\) \nfor \\((i,j)\\in [1,\\sqrt{m} ]\\times [1,\\sqrt{m} ]\\) do \\(c_{p}^{(ij)}\\gets \\mathrm{get - dominant - class}(\\mathbf{y}_{s}^{(ij)})\\) \\(\\mu^{(ij)},\\sigma^{(ij)}\\gets \\mathrm{sample}(\\mathcal{T}^{(c_p^{(ij)})})\\) \\(\\mu_{mix}\\gets (1 - \\alpha).\\mu (\\mathbf{f}_s^{(ij)}) + \\alpha .\\mu^{(ij)}\\) \\(\\sigma_{mix}\\gets (1 - \\alpha).\\sigma (\\mathbf{f}_s^{(ij)}) + \\alpha .\\sigma^{(ij)}\\) \\(\\mathbf{f}_{\\mathrm{s}}^{(ij)}\\gets \\mathrm{AdaIN}(\\mathbf{f}_{\\mathrm{s}}^{(ij)},\\mu_{mix},\\sigma_{mix})\\) \nend \n\\(\\tilde{\\mathbf{y}}_{\\mathrm{s}}\\gets \\mathrm{CLIP - I2}(\\mathbf{f}_{\\mathrm{s}})\\) \nLoss \\(=\\) cross-entropy \\((\\tilde{\\mathbf{y}}_{s},\\mathbf{y}_{s})\\) \nend" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.894, + 0.903 + ], + "angle": 0, + "content": "As shown in Fig. 1, our style mixing strategy differs from [62] which applies a linear interpolation between styles extracted from the images of a limited set of source domain(s) assumed to be available for training. Here, we view the mined styles as variations of multiple proxy target domains defined by the prompts. Training is conducted over all the paths in the feature space between the source and proxy domains without requiring any additional image during training other than the one from source." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "23431" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.079, + 0.09, + 0.472, + 0.278 + ], + "angle": 0, + "content": "
Methodarch.CBMSANASARAFMean
RobustNet [7]36.5835.2040.3328.306.3229.9733.0232.5630.29
SAN & SAW [37]39.7537.3441.8630.79-----
Pin the memory [23]41.0034.6037.4027.083.845.515.897.2720.32
SHADE [58]44.6539.2843.3428.418.1830.3835.4436.8733.32
SiamDoGe [51]42.9637.5440.6428.3410.6030.7135.8436.4532.89
DPCL [53]RN5044.8740.2146.74------
SPC-Net [19]44.1040.4645.51------
NP [11]40.6235.5638.9227.65-----
WildNet* [29]44.6238.4246.0931.348.2730.2936.3235.3933.84
TLDR* [24]46.5142.5846.1830.5713.1336.0238.8940.5836.81
FAMix (ours)48.1545.6152.1134.2314.9637.0938.6640.2538.88
SAN & SAW [37]45.3341.1840.7731.84-----
\\( SHADE^† \\)[58]46.6643.6645.5031.587.5832.4836.9036.6935.13
WildNet* [29]RN10145.7941.7347.0832.51-----
TLDR* [24]47.5844.8848.8033.14-----
FAMix (ours)49.4746.4051.9736.7219.8941.3840.9142.1541.11
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.286, + 0.47, + 0.37 + ], + "angle": 0, + "content": "Table 2. Single-source DGSS trained on G. Performance (mIoU %) of FAMix compared to other DGSS methods trained on G and evaluated on C, S, M, S, A for ResNet-50 ('RN50') and ResNet-101 ('RN101') backbone architecture ('arch'). * indicates the use of extra-data. † indicates the use of the full data for training. We emphasize best and second best results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.387, + 0.47, + 0.448 + ], + "angle": 0, + "content": "Style transfer is applied through AdaIN (1). Only the standard cross-entropy loss between the ground truth \\(\\mathbf{y}_{\\mathrm{s}}\\) and the prediction \\(\\tilde{\\mathbf{y}}_{\\mathrm{s}}\\) is applied for training the network. Algorithm 2 shows the training steps of FAMix." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.453, + 0.47, + 0.528 + ], + "angle": 0, + "content": "Minimal fine-tuning. During training, we fine-tune only the last few layers of the backbone. Subsequently, we examine various alternatives and show that the minimal extent of fine-tuning is the crucial factor in witnessing the effectiveness of our local style mixing strategy." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.529, + 0.471, + 0.711 + ], + "angle": 0, + "content": "Previous works [11, 36, 62] suggest that shallow feature statistics capture style information while deeper features encode semantic content. Consequently, some DGSS methods focus on learning style-agnostic representations [7, 36, 37], but this can compromise the expressiveness of the representation and suppress content information. In contrast, our intuition is to retain these identified traits by introducing variability to the shallow features through augmentation and mixing. Simultaneously, we guide the network to learn invariant high-level representations by training the final layers of the backbone with a label-preserving assumption, using a standard cross-entropy loss." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.726, + 0.21, + 0.743 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.751, + 0.265, + 0.768 + ], + "angle": 0, + "content": "4.1. Experimental setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.775, + 0.47, + 0.866 + ], + "angle": 0, + "content": "Synthetic datasets. GTAV [41] and SYNTHIA [42] are used as synthetic datasets. GTAV consists of 24966 images split into 12403 images for training, 6382 for validation and 6181 for testing. SYNTHIA consists of 9400 images: 6580 for training and 2820 for validation. GTAV and SYNTHIA are denoted by \\( \\mathsf{G} \\) and \\( \\mathsf{S} \\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Real datasets. Cityscapes [8], BDD-100K [54], and Mapillary [35] contain 2975, 7000, and 18000 images for train-" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.198 + ], + "angle": 0, + "content": "ing and 500, 1000, and 2000 images for validation, respectively. ACDC [44] is a dataset of driving scenes in adverse conditions: night, snow, rain and fog with respectively 106, 100, 100 and 100 images in the validation sets. C, B, and M denote Cityscapes, BDD-100K and Mapillary, respectively; AN, AS, AR and AF denote night, snow, rain and fog subsets of ACDC, respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.2, + 0.893, + 0.502 + ], + "angle": 0, + "content": "Implementation details. Following previous works [7, 19, 23, 24, 29, 37, 51, 53, 58], we adopt DeepLabv \\(3+\\) [5] as segmentation model. ResNet-50 and ResNet-101 [17], initialized with CLIP pretrained weights, are used in our experiments as backbones. Specifically, we remove the attention pooling layer and add a randomly initialized decoder head. The output stride is 16. Single-source and multisource models are trained respectively for \\(40K\\) and \\(60K\\) iterations with a batch size of 8. The training images are cropped to \\(768 \\times 768\\). Stochastic Gradient Descent (SGD) with a momentum of 0.9 and weight decay of \\(10^{-4}\\) is used as optimizer. Polynomial decay with a power of 0.9 is used, with an initial learning rate of \\(10^{-1}\\) for the classifier and \\(10^{-2}\\) for the backbone. We use color jittering and horizontal flip as data augmentation. Label smoothing regularization [46] is adopted. For style mining, Layer1 features are divided into 9 patches. Each patch is resized to \\(56 \\times 56\\), corresponding to the dimensions of Layer1 features for an input image of size \\(224 \\times 224\\) (i.e. the input dimension of CLIP). We use ImageNet templates for each prompt." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.506, + 0.892, + 0.567 + ], + "angle": 0, + "content": "Evaluation metric. We evaluate our models on the validation sets of the unseen target domains with mean Intersection over Union (mIoU%) of the 19 shared semantic classes. For each experiment, we report the average of three runs." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.574, + 0.792, + 0.59 + ], + "angle": 0, + "content": "4.2. Comparison with DGSS methods" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.891, + 0.628 + ], + "angle": 0, + "content": "Single-source DGSS. We compare FAMix with state-of-the-art DGSS methods under the single-source setting." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.631, + 0.892, + 0.767 + ], + "angle": 0, + "content": "Training on GTAV (G) as source, Tab. 2 reports models trained with either ResNet-50 or ResNet-101 backbones. The unseen target datasets are C, B, M, S, and the four subsets of A. Tab. 2 shows that our method significantly outperforms all the baselines on all the datasets for both backbones. We note that WildNet [29] and TLDR [24] use extra-data, while SHADE [58] uses the full G dataset (24,966 images) for training with ResNet-101. Class-wise performances are reported in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.771, + 0.892, + 0.845 + ], + "angle": 0, + "content": "Training on Cityscapes (C) as source, Tab. 3 reports performance with ResNet-50 backbone. The unseen target datasets are B, M, G, and S. The table shows that our method outperforms the baseline in average, and is competitive to SOTA on G and M." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.849, + 0.892, + 0.88 + ], + "angle": 0, + "content": "Multi-source DGSS. We also show the effectiveness of FAMix in the multi-source setting, training on \\( G + S \\) and" + }, + { + "type": "page_footnote", + "bbox": [ + 0.516, + 0.887, + 0.771, + 0.9 + ], + "angle": 0, + "content": "2https://github.com/openai/CLIP/" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "23432" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.084, + 0.089, + 0.465, + 0.196 + ], + "angle": 0, + "content": "
MethodBMGSMean
RobustNet [7]50.7358.6445.0026.2045.14
Pin the memory [23]46.7855.10---
SiamDoGe [51]51.5359.0045.0826.6745.57
WildNet* [29]50.9458.7947.0127.9546.17
DPCL [53]52.29-46.0026.60-
FAMix (ours)54.0758.7245.1232.6747.65
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.206, + 0.47, + 0.263 + ], + "angle": 0, + "content": "Table 3. Single-source DGSS trained on C. Performance (mIoU %) of FAMix compared to other DGSS methods trained on C and evaluated on B, M, G and S for ResNet-50 backbone. * indicates the use of extra-data. We emphasize best and second best results." + }, + { + "type": "table", + "bbox": [ + 0.084, + 0.276, + 0.465, + 0.38 + ], + "angle": 0, + "content": "
MethodCBMMean
RobustNet [7]37.6934.0938.4936.76
Pin the memory [23]44.5138.0742.7041.76
SHADE [58]47.4340.3047.6045.11
SPC-Net [19]46.3643.1848.2345.92
TLDR* [24]48.8342.5847.8046.40
FAMix (ours)49.4145.5151.6148.84
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.391, + 0.469, + 0.448 + ], + "angle": 0, + "content": "Table 4. Multi-source DGSS. Performance (mIoU %) of FAMix compared to other DGSS methods trained on \\( \\mathrm{G} + \\mathrm{S} \\) and evaluated on C, B, M for ResNet-50 backbone. * indicates the use of extra-data. We emphasize best and second best results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.473, + 0.469, + 0.503 + ], + "angle": 0, + "content": "evaluating on C, B and M. The results reported in Tab. 4 for ResNet-50 backbone outperform state-of-the-art." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.507, + 0.469, + 0.582 + ], + "angle": 0, + "content": "Qualitative results. We visually compare the segmentation results with Pin the memory [23], SHADE [58] and WildNet [29] in Fig. 3. FAMix clearly outperforms other DGSS methods on \"stuff\" (e.g., road and sky) and \"things\" (e.g., bicycle and bus) classes." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.59, + 0.412, + 0.607 + ], + "angle": 0, + "content": "4.3. Decoder-Probing Fine-Tuning (DP-FT)" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.614, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Kumar et al. [26] show that standard fine-tuning may distort the pretrained feature representation, leading to degraded OOD performances for classification. Consequently, they propose a two-step training strategy: (1) Training a linear probe (LP) on top of the frozen backbone features, (2) Finetuning (FT) both the linear probe and the backbone. Inspired by it, Saito et al. [43] apply the same strategy for object detection, which is referred to as Decoder-probing Fine-tuning (DP-FT). They observe that DP-FT improves over DP depending on the architecture. We hypothesize that the effect is also dependent on the pretraining paradigm and the downstream task. As observed in Tab. 1, CLIP might remarkably overfit the source domain when finetuned. In Tab. 5, we compare fine-tuning (FT), decoder-probing (DP) and DP-FT. DP brings improvements over FT since it completely preserves the pretrained representation. Yet, DP major drawback lies in its limitation to adapt features for the downstream task, resulting in suboptimal results. Surprisingly, DP-FT largely falls behind DP, meaning" + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.089, + 0.89, + 0.158 + ], + "angle": 0, + "content": "
MethodCBMSANASARAFMean
FT16.8116.3117.8027.102.958.5814.3513.6114.69
DP34.1337.6742.2129.1010.7126.2629.4730.4029.99
DP-FT25.6221.7126.3931.454.2218.2620.0720.8521.07
FAMix (ours)48.1545.6152.1134.2314.9637.0938.6640.2538.88
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.17, + 0.892, + 0.226 + ], + "angle": 0, + "content": "Table 5. FAMix vs. DP-FT. Performance (mIoU%) of FAMix compared to Fine-tuning (FT), Decoder-probing (DP) and Decoder-probing Fine-tuning (DP-FT). We use here ResNet-50, trained on G. We emphasize best and second best results." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.241, + 0.89, + 0.345 + ], + "angle": 0, + "content": "
FreezeAugmentMixCBMSANASARAFMean
XXX16.8116.3117.8027.102.958.5814.3513.6114.69
XX22.4826.0524.1525.404.8317.6122.8619.7520.39
XX20.0721.2422.9126.521.2814.9922.0920.5118.70
X27.5326.5926.2726.914.9018.9125.6022.1422.36
XX37.8338.8844.2431.9312.4129.5931.5633.0532.44
X36.6535.7337.3230.4414.7234.6534.9138.9832.93
X43.4343.7948.1933.7011.3235.5536.1538.1936.29
48.1545.6152.1134.2314.9637.0938.6640.2538.88
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.356, + 0.892, + 0.385 + ], + "angle": 0, + "content": "Table 6. Ablation of FAMix components. Performance (mIoU %) after removing one or more components of FAMix." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.412, + 0.89, + 0.442 + ], + "angle": 0, + "content": "that the learned features over-specialize to the source domain distribution even with a \"decoder warm-up\"." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.443, + 0.892, + 0.518 + ], + "angle": 0, + "content": "The results advocate for the need of specific strategies to preserve CLIP robustness for semantic segmentation. This need emerges from the additional gap between pretraining (i.e. aligning object-level and language representations) and fine-tuning (i.e. supervised pixel classification)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.529, + 0.661, + 0.543 + ], + "angle": 0, + "content": "4.4. Ablation studies" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.553, + 0.89, + 0.582 + ], + "angle": 0, + "content": "We conduct all the ablations on a ResNet-50 backbone with GTAV (G) as source dataset." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.587, + 0.892, + 0.829 + ], + "angle": 0, + "content": "Removing ingredients from the recipe. FAMix is based on minimal fine-tuning of the backbone (i.e., Freeze), style augmentation and mixing. We show in Tab. 6 that the best generalization results are only obtained when combining the three ingredients. Specifically, when the backbone is fine-tuned (i.e., Freeze \\(\\mathcal{X}\\)), the performances are largely harmed. When minimal fine-tuning is performed (i.e., Freeze \\(\\sqrt{\\cdot}\\)), we argue that the augmentations are too strong to be applied without style mixing; the latter brings both effects of domain interpolation and use of the original statistics. Subsequently, when style mixing is not applied (i.e. Freeze \\(\\sqrt{\\cdot}\\), Augment \\(\\sqrt{\\cdot}\\), Mix \\(\\mathcal{X}\\)), the use of mined styles brings mostly no improvement on OOD segmentation compared to training without augmentation (i.e. Freeze \\(\\sqrt{\\cdot}\\), Augment \\(\\mathcal{X}\\), Mix \\(\\mathcal{X}\\)). Note that for Freeze \\(\\sqrt{\\cdot}\\), Augment \\(\\sqrt{\\cdot}\\), Mix \\(\\mathcal{X}\\), the line 8 in Algorithm 2 becomes:" + }, + { + "type": "equation", + "bbox": [ + 0.579, + 0.84, + 0.89, + 0.859 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} _ {\\mathrm {s}} ^ {(i j)} \\leftarrow \\operatorname {A d a I N} \\left(\\mathbf {f} _ {\\mathrm {s}} ^ {(i j)}, \\boldsymbol {\\mu} ^ {(i j)}, \\boldsymbol {\\sigma} ^ {(i j)}\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Our style mixing is different from MixStyle [62] for being applied: (1) patch-wise and (2) between original styles of" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "23433" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.115, + 0.086, + 0.858, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.292, + 0.89, + 0.319 + ], + "angle": 0, + "content": "Figure 3. Qualitative results. Columns 1-2: Image and ground truth (GT), Columns 3-4-5: DGSS methods results, Column 6: Our results. The models are trained on G with ResNet-50 backbone." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.333, + 0.468, + 0.414 + ], + "angle": 0, + "content": "
RCPRSPCNCBMSANASARAFMean
45.9943.7150.4834.7515.2235.0934.9238.1737.29
46.1044.2448.9033.6213.3935.9936.6839.8637.35
45.6444.5949.1333.6415.3337.3235.9838.8537.56
47.8344.8350.3834.2714.4337.0737.0738.7638.08
48.1545.6152.1134.2314.9637.0938.6640.2538.88
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.423, + 0.468, + 0.478 + ], + "angle": 0, + "content": "Table 7. Ablation on the prompt construction. Performance (mIoU %) for different prompt constructions. RCP, RSP and CN refer to , and , respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.494, + 0.468, + 0.584 + ], + "angle": 0, + "content": "the source data and augmented versions of them. Note that the case (Freeze \\(\\checkmark\\), Augment \\(\\times\\), Mix \\(\\checkmark\\)) could be seen as a variant of MixStyle, yet applied locally and class-wise. Our complete recipe is proved to be significantly more effective with a boost of \\(\\approx +6\\) mean mIoU w.r.t. the baseline of training without augmentation and mixing." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.588, + 0.468, + 0.738 + ], + "angle": 0, + "content": "Prompt construction. Tab. 7 reports results when ablating the prompt construction. In FAMix, the final prompt is derived by concatenating and ; removing either of those leads to inferior results. Interestingly, replacing the style prompt by random characters - e.g. \"ioscjspa\" - does not significantly degrade the performance. In certain aspects, using random prompts still induces a randomization effect within the FAMix framework. However, meaningful prompts still consistently lead to the best results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.743, + 0.468, + 0.878 + ], + "angle": 0, + "content": "Number of style prompts. FAMix uses a set \\(\\mathcal{R}\\) of random style prompts which are concatenated with the class names; \\(\\mathcal{R}\\) is formed by querying ChatGPT using . The output prompts are provided in Appendix C. Fig. 4a shows that the size of \\(\\mathcal{R}\\) has a marginal impact on FAMix performance. Yet, the mIoU scores on C, B, M and AR are higher for \\(|\\mathcal{R}| = 20\\) compared to \\(|\\mathcal{R}| = 1\\) and almost equal for the other datasets." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.334, + 0.679, + 0.435 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.531, + 0.438, + 0.653, + 0.451 + ], + "angle": 0, + "content": "(a) Number of prompts" + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.334, + 0.889, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.715, + 0.438, + 0.856, + 0.451 + ], + "angle": 0, + "content": "(b) Effect of layer freezing" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.467, + 0.89, + 0.536 + ], + "angle": 0, + "content": "Figure 4. Ablation of prompt set and freezing strategy. (a) Performance \\((\\mathrm{mIoU}\\%)\\) on test datasets w.r.t. the number of random style prompts in \\(\\mathcal{R}\\). (b) Effect of freezing layers reporting on x-axis the last frozen layer. For example, 'L3' means freezing L1, L2 and L3. 'L4' indicates that the Layer4 is partially frozen." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.554, + 0.892, + 0.69 + ], + "angle": 0, + "content": "The low sensitivity of the performance to the size of \\(\\mathcal{R}\\) could be explained by two factors. First, mining even from a single prompt results in different style variations as the optimization starts from different anchor points in the latent space, as argued in [10]. Second, mixing style between the source and the mined proxy domains is the crucial factor making the network explore intermediate domains during training. This does not contradict the effect of our prompt construction which leads to the best results (Tab. 7)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.696, + 0.892, + 0.846 + ], + "angle": 0, + "content": "Local vs. global style mining. To highlight the effect of our class-wise local style mining, we perform an ablation replacing it with global style mining. Specifically, the same set of are used, though being concatenated with as a global description instead of local class name. Intuitively, local style mining and mixing induces richer style variations and more contrast among patches. The results in Tab. 8 show the effectiveness of our local style mining and mixing strategy, bringing about 3 mIoU improvement on \\( G \\to C \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.851, + 0.891, + 0.901 + ], + "angle": 0, + "content": "What to mix? Let \\(S = \\bigcup_{k=1}^{K} S^{(k)}\\) and \\(\\mathcal{T} = \\bigcup_{k=1}^{K} \\mathcal{T}^{(k)}\\) the sets of class-wise source and augmented features, respectively. In FAMix training, for an arbitrary patch \\(\\mathbf{f}_s^{(ij)}\\)," + }, + { + "type": "page_footnote", + "bbox": [ + 0.095, + 0.887, + 0.292, + 0.9 + ], + "angle": 0, + "content": "3https://chat.openai.com/" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.946, + 0.518, + 0.957 + ], + "angle": 0, + "content": "23434" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.09, + 0.465, + 0.164 + ], + "angle": 0, + "content": "
Style miningCBMSANASARAFMean
“street view”45.5145.1250.4033.6514.5936.9237.3840.5338.01
“urban scene”46.5945.3851.3333.6714.4235.9637.3040.5238.15
global w/ “roadscape”45.4945.5550.6333.6614.7736.7537.0740.3338.03
“commute snapshot”45.3945.0850.5033.6813.6536.6337.9340.9237.97
“driving”45.0644.9850.6733.3614.8435.1136.2139.5237.47
local48.1545.6152.1134.2314.9637.0938.6640.2538.88
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.177, + 0.468, + 0.218 + ], + "angle": 0, + "content": "Table 8. Ablation on style mining. Global style mining consists of mining one style per feature map, using + as prompt." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.233, + 0.465, + 0.291 + ], + "angle": 0, + "content": "
Syle miningCBMSANASARAFMean
S43.4343.7948.1933.7011.3235.5536.1538.1936.29
S∪T44.7645.5950.7834.0513.6736.9237.1838.1337.64
T(ours)48.1545.6152.1134.2314.9637.0938.6640.2538.88
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.303, + 0.468, + 0.331 + ], + "angle": 0, + "content": "Table 9. Ablation on the sets used for mixing. The styles \\((\\mu, \\sigma)\\) used in (3) and (4) are sampled either from \\(S\\) or \\(S \\cup \\mathcal{T}\\) or \\(\\mathcal{T}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.358, + 0.469, + 0.517 + ], + "angle": 0, + "content": "style mixing is performed between the original source statistics and statistics sampled from the augmented set (i.e., \\((\\pmb{\\mu}^{(ij)},\\pmb{\\sigma}^{(ij)})\\in \\mathcal{T}^{(c_p^{(ij)})}\\), see (3) and (4)). In class-wise vanilla MixStyle, \\((\\pmb{\\mu}^{(ij)},\\pmb{\\sigma}^{(ij)})\\in S^{(c_p^{(ij)})}\\). In Tab. 9, we show that sampling \\((\\pmb{\\mu}^{(ij)},\\pmb{\\sigma}^{(ij)})\\) from \\(S^{(c_p^{(ij)})}\\cup T^{(c_p^{(ij)})}\\) does not lead to better generalization, despite sampling from a set with twice the cardinality. This supports our mixing strategy visualized in Fig. 1. Intuitively, sampling from \\(S\\cup T\\) could be viewed as applying either MixStyle or our mixing with a probability \\(p = 0.5\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.536, + 0.468, + 0.656 + ], + "angle": 0, + "content": "Minimal fine-tuning. We argue for minimal fine-tuning as a compromise between pretrained feature preservation and adaptation. Fig. 4b shows an increasing OOD generalization trend with more freezing. Interestingly, only fine-tuning the last layers of the last convolutional block (where the dilation is applied) achieves the best results. When training on Cityscapes, we observed that freezing all the layers except Layer4 achieves the best results." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.666, + 0.35, + 0.683 + ], + "angle": 0, + "content": "4.5. Does FAMix require language?" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.468, + 0.81 + ], + "angle": 0, + "content": "Inspired by the observation that target statistics deviate around the source ones in real cases [11], we conduct an experiment where we replace language-driven style mining by noise perturbation. The same procedure of FAMix is kept: (i) Features are divided into patches, perturbed with noise and then saved into a style bank based on the dominant class; (ii) During training, patch-wise style mixing of original and perturbed styles is performed." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Different from Fan et al. [11], who perform a perturbation on the feature statistics using a normal distribution with pre-defined parameters, we experiment perturbation with different magnitudes of noise controlled by the signal-to-noise ratio (SNR). Consider the mean of a patch \\(\\mu \\in \\mathbb{R}^c\\) as a signal, the goal is to perturb it with some noise" + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.09, + 0.889, + 0.24 + ], + "angle": 0, + "content": "
SNRCBMSANASARAFMean
Baseline37.8338.8844.2431.9312.4129.5931.5633.0532.44
528.7829.2430.3221.6712.6024.0025.9525.8724.80
1040.0939.5043.4529.0913.3633.4733.1136.1733.53
1545.0244.1648.6332.9614.5536.0935.9940.9637.30
2045.5244.2949.2633.4512.4035.9636.5238.6037.00
2544.8244.2648.5433.3011.3834.5135.4637.6136.24
3043.0743.8048.3133.4712.3335.0535.5838.1036.21
43.4343.7948.1933.7011.3235.5536.1538.1936.29
MixStyle [62]40.9742.0448.3633.1513.1431.2634.9438.1235.25
Prompts48.1545.6152.1134.2314.9637.0938.6640.2538.88
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.257, + 0.892, + 0.313 + ], + "angle": 0, + "content": "Table 10. Noise vs prompt-driven augmentation. The prompt-driven augmentation in FAMix is replaced by random noise with different levels defined by SNR. We also include vanilla MixStyle. The prompt-driven strategy is superior." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.349, + 0.892, + 0.548 + ], + "angle": 0, + "content": "\\(n_{\\mu} \\in \\mathbb{R}^{c}\\). The \\(\\mathrm{SNR}_{\\mathrm{dB}}\\) between \\(\\| \\mu \\|\\) and \\(\\| n_{\\mu}\\|\\) is defined as \\(\\mathrm{SNR}_{\\mathrm{dB}} = 20\\log_{10}\\left(\\frac{\\|\\mu\\| / \\|n_{\\mu}\\|}{\\|\\mu\\|}\\right)\\). Given \\(\\mu\\), \\(\\mathrm{SNR}_{\\mathrm{dB}}\\), and \\(n \\sim \\mathcal{N}(0,I)\\), where \\(I \\in \\mathbb{R}^{c \\times c}\\) is the identity matrix, the noise is computed as \\(n_{\\mu} = 10^{\\frac{-\\mathrm{SNR}}{20}}\\frac{\\|\\mu\\|}{\\|n\\|} n\\). We add \\(\\mu + n_{\\mu}\\) to the style bank corresponding to the dominant class in the patch. The same applies to \\(\\sigma \\in \\mathbb{R}^{c}\\). The results of training for different noise levels are in Tab. 10. Using language as source of randomization outperforms any noise level. The baseline corresponds to the case where no augmentation nor mixing are performed (See Tab. 6, Freeze \\(\\checkmark\\), Augment \\(\\pmb{x}\\), Mix \\(\\pmb{x}\\)). \\(\\mathrm{SNR} = \\infty\\) could be seen as a variant of MixStyle, applied class-wise to patches (See Tab. 6, Freeze \\(\\checkmark\\), Augment \\(\\pmb{x}\\), Mix \\(\\checkmark\\)). The vanilla MixStyle gets inferior results." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.553, + 0.892, + 0.628 + ], + "angle": 0, + "content": "Besides lower OOD performance, one more disadvantage of noise augmentation compared to our language-driven augmentation is the need to select a value for the SNR, for which the optimal value might vary depending on the target domain encountered at the test time." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.656, + 0.619, + 0.671 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.686, + 0.892, + 0.837 + ], + "angle": 0, + "content": "We presented FAMix, a simple recipe for domain generalized semantic segmentation with CLIP pretraining. We proposed to locally mix the styles of source features with their augmented counterparts obtained using language prompts. Combined with minimal fine-tuning, FAMix significantly outperforms the state-of-the-art approaches. Extensive experiments showcase the effectiveness of our framework. We hope that FAMix will serve as a strong baseline in future works, exploring the potential of leveraging large-scale vision-language models for perception tasks." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.84, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Acknowledgment. This work was partially funded by French project SIGHT (ANR-20-CE23-0016) and was supported by ELSA - European Lighthouse on Secure and Safe AI funded by the European Union under grant agreement No. 101070617. It was performed using HPC resources from GENCI-IDRIS (Grant AD011014477)." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "23435" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Kartik Ahuja, Ethan Caballero, Dinghuai Zhang, Jean-Christophe Gagnon-Audet, Yoshua Bengio, Ioannis Mitliagkas, and Irina Rish. Invariance principle meets information bottleneck for out-of-distribution generalization. In NeurIPS, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.185, + 0.472, + 0.254 + ], + "angle": 0, + "content": "[2] James Urquhart Allingham, Jie Ren, Michael W Dusenberry, Xiuye Gu, Yin Cui, Dustin Tran, Jeremiah Zhe Liu, and Balaji Lakshminarayanan. A simple zero-shot prompt weighting technique to improve prompt ensembling in text-image models. In ICML, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.256, + 0.471, + 0.297 + ], + "angle": 0, + "content": "[3] Martin Arjovsky, Léon Bottou, Ishaan Gulrajani, and David Lopez-Paz. Invariant risk minimization. arXiv preprint arXiv:1907.02893, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.298, + 0.471, + 0.339 + ], + "angle": 0, + "content": "[4] Yogesh Balaji, Swami Sankaranarayanan, and Rama Chellappa. Metareg: Towards domain generalization using metaregularization. In NeurIPS, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.34, + 0.471, + 0.395 + ], + "angle": 0, + "content": "[5] Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and Hartwig Adam. Encoder-decoder with atrous separable convolution for semantic image segmentation. In ECCV, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.397, + 0.471, + 0.424 + ], + "angle": 0, + "content": "[6] Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In CVPR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.425, + 0.471, + 0.48 + ], + "angle": 0, + "content": "[7] Sungha Choi, Sanghun Jung, Huiwon Yun, Joanne T Kim, Seungryong Kim, and Jaegul Choo. Robustnet: Improving domain generalization in urban-scene segmentation via instance selective whitening. In CVPR, 2021. 1, 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.481, + 0.471, + 0.548 + ], + "angle": 0, + "content": "[8] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In CVPR, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.55, + 0.471, + 0.592 + ], + "angle": 0, + "content": "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, 2009. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.593, + 0.471, + 0.635 + ], + "angle": 0, + "content": "[10] Mohammad Fahes, Tuan-Hung Vu, Andrei Bursuc, Patrick Pérez, and Raoul de Charette. Poda: Prompt-driven zero-shot domain adaptation. In ICCV, 2023. 1, 3, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.635, + 0.471, + 0.69 + ], + "angle": 0, + "content": "[11] Qi Fan, Mattia Segu, Yu-Wing Tai, Fisher Yu, Chi-Keung Tang, Bernt Schiele, and Dengxin Dai. Towards robust object detection invariant to real-world domain shifts. In ICLR, 2023. 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.691, + 0.471, + 0.747 + ], + "angle": 0, + "content": "[12] Alex Fang, Gabriel Ilharco, Mitchell Wortsman, Yuhao Wan, Vaishaal Shankar, Achal Dave, and Ludwig Schmidt. Data determines distributional robustness in contrastive language image pre-training (clip). In ICML, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.747, + 0.471, + 0.803 + ], + "angle": 0, + "content": "[13] Yaroslav Ganin, Evgeniya Ustinova, Hana Ajakan, Pascal Germain, Hugo Larochelle, François Laviolette, Mario Marchand, and Victor Lempitsky. Domain-adversarial training of neural networks. JMLR, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.804, + 0.471, + 0.871 + ], + "angle": 0, + "content": "[14] Yunhao Ge, Jie Ren, Andrew Gallagher, Yuxiao Wang, Ming-Hsuan Yang, Hartwig Adam, Laurent Itti, Balaji Lakshminarayanan, and Jiaping Zhao. Improving zero-shot generalization and robustness of multi-modal models. In CVPR, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.872, + 0.471, + 0.903 + ], + "angle": 0, + "content": "[15] Sachin Goyal, Ananya Kumar, Sankalp Garg, Zico Kolter, and Aditi Raghunathan. Finetune like you pretrain: Im-" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "proved finetuning of zero-shot vision models. In CVPR, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.121, + 0.892, + 0.164 + ], + "angle": 0, + "content": "[16] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.165, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.207, + 0.892, + 0.262 + ], + "angle": 0, + "content": "[18] Judy Hoffman, Eric Tzeng, Taesung Park, Jun-Yan Zhu, Phillip Isola, Kate Saenko, Alexei Efros, and Trevor Darrell. Cycada: Cycle-consistent adversarial domain adaptation. In ICML, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.263, + 0.892, + 0.319 + ], + "angle": 0, + "content": "[19] Wei Huang, Chang Chen, Yong Li, Jiacheng Li, Cheng Li, Fenglong Song, Youliang Yan, and Zhiwei Xiong. Style projected clustering for domain generalized semantic segmentation. In CVPR, 2023. 1, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.32, + 0.892, + 0.36 + ], + "angle": 0, + "content": "[20] Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In ICCV, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.362, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[21] Nishant Jain, Harkirat Behl, Yogesh Singh Rawat, and Vibhav Vineet. Efficiently robustify pre-trained models. In ICCV, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.405, + 0.892, + 0.462 + ], + "angle": 0, + "content": "[22] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.462, + 0.892, + 0.504 + ], + "angle": 0, + "content": "[23] Jin Kim, Jiyoung Lee, Jungin Park, Dongbo Min, and Kwanghoon Sohn. Pin the memory: Learning to generalize semantic segmentation. In CVPR, 2022. 1, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.505, + 0.892, + 0.546 + ], + "angle": 0, + "content": "[24] Sunghwan Kim, Dae-hwan Kim, and Hoseong Kim. Texture learning domain randomization for domain generalized segmentation. In ICCV, 2023. 1, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.547, + 0.892, + 0.603 + ], + "angle": 0, + "content": "[25] David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, and Aaron Courville. Out-of-distribution generalization via risk extrapolation (rex). In ICML, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.604, + 0.892, + 0.658 + ], + "angle": 0, + "content": "[26] Ananya Kumar, Aditi Raghunathan, Robbie Matthew Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. In ICLR, 2022. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.66, + 0.892, + 0.688 + ], + "angle": 0, + "content": "[27] Gihyun Kwon and Jong Chul Ye. Clipstyler: Image style transfer with a single text condition. In CVPR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.689, + 0.892, + 0.744 + ], + "angle": 0, + "content": "[28] Clement Laroudie, Andrei Bursuc, Mai Lan Ha, and Gianni Franchi. Improving clip robustness with knowledge distillation and self-training. arXiv preprint arXiv:2309.10361, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.745, + 0.892, + 0.787 + ], + "angle": 0, + "content": "[29] Suhyeon Lee, Hongje Seong, Seongwon Lee, and Euntai Kim. Wildnet: Learning domain generalized semantic segmentation from the wild. In CVPR, 2022. 1, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.788, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[30] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and Rene Ranftl. Language-driven semantic segmentation. In ICLR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.831, + 0.892, + 0.872 + ], + "angle": 0, + "content": "[31] Haoliang Li, Sinno Jialin Pan, Shiqi Wang, and Alex C Kot. Domain generalization with adversarial feature learning. In CVPR, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.903 + ], + "angle": 0, + "content": "[32] Ya Li, Xinmei Tian, Mingming Gong, Yajing Liu, Tongliang Liu, Kun Zhang, and Dacheng Tao. Deep domain generaliza" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "23436" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "tion via conditional invariant adversarial networks. In ECCV, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.162 + ], + "angle": 0, + "content": "[33] Yunsheng Li, Lu Yuan, and Nuno Vasconcelos. Bidirectional learning for domain adaptation of semantic segmentation. In CVPR, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.165, + 0.469, + 0.205 + ], + "angle": 0, + "content": "[34] Mingsheng Long, Zhangjie Cao, Jianmin Wang, and Michael I Jordan. Conditional adversarial domain adaptation. In NeurIPS, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.208, + 0.469, + 0.248 + ], + "angle": 0, + "content": "[35] Gerhard Neuhold, Tobias Ollmann, Samuel Rota Bulo, and Peter Kontschieder. The mapillary vistas dataset for semantic understanding of street scenes. In ICCV, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.251, + 0.469, + 0.29 + ], + "angle": 0, + "content": "[36] Xingang Pan, Ping Luo, Jianping Shi, and Xiaou Tang. Two at once: Enhancing learning and generalization capacities via ibn-net. In ECCV, 2018. 1, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.293, + 0.469, + 0.332 + ], + "angle": 0, + "content": "[37] Duo Peng, Yinjie Lei, Munawar Hayat, Yulan Guo, and Wen Li. Semantic-aware domain generalized segmentation. In CVPR, 2022. 1, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.335, + 0.469, + 0.362 + ], + "angle": 0, + "content": "[38] Fengchun Qiao, Long Zhao, and Xi Peng. Learning to learn single domain generalization. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.365, + 0.469, + 0.432 + ], + "angle": 0, + "content": "[39] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.434, + 0.469, + 0.489 + ], + "angle": 0, + "content": "[40] Yongming Rao, Wenliang Zhao, Guangyi Chen, Yansong Tang, Zheng Zhu, Guan Huang, Jie Zhou, and Jiwen Lu. Denseclip: Language-guided dense prediction with context-aware prompting. In CVPR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.491, + 0.469, + 0.531 + ], + "angle": 0, + "content": "[41] Stephan R Richter, Vibhav Vineet, Stefan Roth, and Vladlen Koltun. Playing for data: Ground truth from computer games. In ECCV, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.534, + 0.469, + 0.587 + ], + "angle": 0, + "content": "[42] German Ros, Laura Sellart, Joanna Materzynska, David Vazquez, and Antonio M Lopez. The synthia dataset: A large collection of synthetic images for semantic segmentation of urban scenes. In CVPR, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.591, + 0.469, + 0.644 + ], + "angle": 0, + "content": "[43] Kuniaki Saito, Donghyun Kim, Piotr Teterwak, Rogerio Feris, and Kate Saenko. Mind the backbone: Minimizing backbone distortion for robust object detection. arXiv preprint arXiv:2303.14744, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.647, + 0.469, + 0.687 + ], + "angle": 0, + "content": "[44] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. ACDC: The adverse conditions dataset with correspondences for semantic driving scene understanding. In ICCV, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.689, + 0.469, + 0.73 + ], + "angle": 0, + "content": "[45] Yang Shu, Xingzhuo Guo, Jialong Wu, Ximei Wang, Jianmin Wang, and Mingsheng Long. Clipood: Generalizing clip to out-of-distributions. In ICML, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.733, + 0.469, + 0.772 + ], + "angle": 0, + "content": "[46] Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In CVPR, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.469, + 0.814 + ], + "angle": 0, + "content": "[47] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In CVPR, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.469, + 0.87 + ], + "angle": 0, + "content": "[48] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Advent: Adversarial entropy minimization for domain adaptation in semantic segmentation. In CVPR, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.874, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[49] Jindong Wang, Cuiling Lan, Chang Liu, Yidong Ouyang, Tao Qin, Wang Lu, Yiqiang Chen, Wenjun Zeng, and Philip" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "Yu. Generalizing to unseen domains: A survey on domain generalization. T-KDE, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.189 + ], + "angle": 0, + "content": "[50] Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, et al. Robust fine-tuning of zero-shot models. In CVPR, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.193, + 0.892, + 0.233 + ], + "angle": 0, + "content": "[51] Zhenyao Wu, Xinyi Wu, Xiaoping Zhang, Lili Ju, and Song Wang. Siamdoge: Domain generalizable semantic segmentation using siamese network. In ECCV, 2022. 1, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.236, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[52] Qinwei Xu, Ruipeng Zhang, Ya Zhang, Yanfeng Wang, and Qi Tian. A fourier-based framework for domain generalization. In CVPR, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.892, + 0.331 + ], + "angle": 0, + "content": "[53] Liwei Yang, Xiang Gu, and Jian Sun. Generalized semantic segmentation by self-supervised source domain projection and multi-level contrastive learning. In AAAI, 2023. 1, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.335, + 0.892, + 0.388 + ], + "angle": 0, + "content": "[54] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darryll. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In CVPR, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.392, + 0.892, + 0.445 + ], + "angle": 0, + "content": "[55] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. LiT: Zero-shot transfer with locked-image text tuning. In CVPR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.448, + 0.892, + 0.488 + ], + "angle": 0, + "content": "[56] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.491, + 0.892, + 0.531 + ], + "angle": 0, + "content": "[57] Shanshan Zhao, Mingming Gong, Tongliang Liu, Huan Fu, and Dacheng Tao. Domain generalization via entropy regularization. In NeurIPS, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.534, + 0.892, + 0.587 + ], + "angle": 0, + "content": "[58] Yuyang Zhao, Zhun Zhong, Na Zhao, Nicu Sebe, and Gim Hee Lee. Style-hallucinated dual consistency learning for domain generalized semantic segmentation. In ECCV, 2022. 1, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.591, + 0.892, + 0.617 + ], + "angle": 0, + "content": "[59] Chong Zhou, Chen Change Loy, and Bo Dai. Extract free dense labels from clip. In ECCV, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.62, + 0.892, + 0.659 + ], + "angle": 0, + "content": "[60] Kaiyang Zhou, Yongxin Yang, Timothy Hospedales, and Tao Xiang. Deep domain-adversarial image generation for domain generalisation. In AAAI, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.662, + 0.892, + 0.702 + ], + "angle": 0, + "content": "[61] Kaiyang Zhou, Yongxin Yang, Timothy Hospedales, and Tao Xiang. Learning to generate novel domains for domain generalization. In ECCV, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.706, + 0.892, + 0.745 + ], + "angle": 0, + "content": "[62] Kaiyang Zhou, Yongxin Yang, Yu Qiao, and Tao Xiang. Domain generalization with mixstyle. In ICLR, 2021. 1, 2, 4, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.748, + 0.892, + 0.787 + ], + "angle": 0, + "content": "[63] Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. TPAMI, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.79, + 0.892, + 0.83 + ], + "angle": 0, + "content": "[64] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Zwei Liu. Conditional prompt learning for vision-language models. In CVPR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.833, + 0.892, + 0.872 + ], + "angle": 0, + "content": "[65] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 2022. 1" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.872 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "23437" + } + ] +] \ No newline at end of file diff --git a/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/b9db7707-a86d-4d4c-b962-58bd8f08eecd_origin.pdf b/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/b9db7707-a86d-4d4c-b962-58bd8f08eecd_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c6b3f8e2f3ac8f8852d83b30adf54cb848d94995 --- /dev/null +++ b/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/b9db7707-a86d-4d4c-b962-58bd8f08eecd_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:173f23f6b70ac801b3b940db7896be8cf99618fef99770952b9ffa9812ef9dad +size 10029942 diff --git a/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/full.md b/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e9287f617b9bfaf910a5c9bbf2f19a379639c621 --- /dev/null +++ b/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/full.md @@ -0,0 +1,364 @@ +# A Simple Recipe for Language-guided Domain Generalized Segmentation + +Mohammad Fahes1 + +Tuan-Hung $\mathrm{V_u^{1,2}}$ + +1 Inria + +Andrei Bursuc $^{1,2}$ + +2 Valeo.ai + +Patrick Pérez3 + +3 Kyutai + +Raoul de Charette1 + +https://astravision.github.io/FAMix + +# Abstract + +Generalization to new domains not seen during training is one of the long-standing challenges in deploying neural networks in real-world applications. Existing generalization techniques either necessitate external images for augmentation, and/or aim at learning invariant representations by imposing various alignment constraints. Large-scale pretraining has recently shown promising generalization capabilities, along with the potential of binding different modalities. For instance, the advent of vision-language models like CLIP has opened the doorway for vision models to exploit the textual modality. In this paper, we introduce a simple framework for generalizing semantic segmentation networks by employing language as the source of randomization. Our recipe comprises three key ingredients: (i) the preservation of the intrinsic CLIP robustness through minimal fine-tuning, (ii) language-driven local style augmentation, and (iii) randomization by locally mixing the source and augmented styles during training. Extensive experiments report state-of-the-art results on various generalization benchmarks. Code is accessible on the project page1. + +# 1. Introduction + +A prominent challenge associated with deep neural networks is their constrained capacity to generalize when confronted with shifts in data distribution. This limitation is rooted in the assumption of data being independent and identically distributed, a presumption that frequently proves unrealistic in real-world scenarios. For instance, in safety-critical applications like autonomous driving, it is imperative for a segmentation model to exhibit resilient generalization capabilities when dealing with alterations in lighting, variations in weather conditions, and shifts in geographic location, among other considerations. + +To address this challenge, domain adaptation [13, 18, 33, 34, 47, 48] has emerged; its core principle revolves around + +aligning the distributions of both the source and target domains. However, DA hinges on having access to target data, which may not always be available. Even when accessible, this data might not encompass the full spectrum of distributions encountered in diverse real-world scenarios. Domain generalization [31, 32, 49, 52, 62, 63] overcomes this limitation by enhancing the robustness of models to arbitrary and previously unseen domains. + +The training of segmentation networks is often backed by large-scale pretraining as initialization for the feature representation. Until now, to the best of our knowledge, domain generalization for semantic segmentation (DGSS) networks [7, 19, 23, 24, 29, 36, 37, 51, 53, 58] are pretrained with ImageNet [9]. The underlying concept is to transfer the representations from the upstream task of classification to the downstream task of segmentation. + +Lately, contrastive language image pretraining (CLIP) [22, 39, 55, 56] has demonstrated that transferable visual representations could be learned from the sole supervision of loose natural language descriptions at very large scale. Subsequently, a plethora of applications have been proposed using CLIP [39], including zero-shot semantic segmentation [30, 59], image editing [27], transfer learning [10, 40], open-vocabulary object detection [16], few-shot learning [64, 65] etc. A recent line of research proposes fine-tuning techniques to preserve the robustness of CLIP under distribution shift [15, 26, 45, 50], but they are limited to classification. + +In this paper, we aim at answering the following question: How to leverage CLIP pretraining for enhanced domain generalization for semantic segmentation? The motivation for rethinking DGSS with CLIP is twofold. On one hand, distribution robustness is a notable characteristic of CLIP [12]. On the other hand, the language modality offers an extra source of information compared to unimodal pretrained models. + +A direct comparison of training two segmentation models under identical conditions but with different pretraining, i.e. ImageNet vs. CLIP, shows that CLIP pretraining does not yield promising results. Indeed, Tab. 1 shows that fine-tuning CLIP-initialized network performs worse than + +
PretrainingCBMSANASARAFMean
ImageNet29.0432.1734.2629.874.3622.3828.3426.7625.90
CLIP16.8116.3117.8027.102.958.5814.3513.6114.69
+ +Table 1. Comparison of ImageNet and CLIP pretraining for out-of-distribution semantic segmentation. The network is DeepLabv3+ with ResNet-50 as backbone. The models are trained on GTAV and the performance (mIoU %) is reported on Cityscapes (C), BDD-100K (B), Mapillary (M), Synthia (S), and ACDC Night (AN), Snow (AS), Rain (AR) and Fog (AF). + +its ImageNet counterpart on out-of-distribution (OOD) data. This raises doubts about the suitability of CLIP pretraining for DGSS and indicates that it is more prone to overfitting the source distribution at the expense of degrading its original distributional robustness properties. Note that both models converge and achieve similar results on in-domain data. More details are provided in Appendix A. + +This paper shows that we can prevent such behavior with a simple recipe involving minimal fine-tuning, language-driven style augmentation, and mixing. Our approach is coined FAMix, for Freeze, Augment and Mix. + +It was recently argued that fine-tuning might distort the pretrained representations and negatively affect OOD generalization [26]. To maintain the integrity of the representation, one extreme approach is to entirely freeze the backbone. However, this can undermine representation adaptability and lead to subpar OOD generalization. As a middle-ground strategy balancing adaptation and feature preservation, we suggest minimal fine-tuning of the backbone, where a substantial portion remains frozen, and only the final layers undergo fine-tuning. + +For generalization, we show that rethinking MixStyle [62] leads to significant performance gains. As illustrated in Fig. 1, we mix the statistics of the original source features with augmented statistics mined using language. This helps explore styles beyond the source distribution at training time without using additional image. + +We summarize our contributions as follows: + +- We propose a simple framework for DGSS based on minimal fine-tuning of the backbone and language-driven style augmentation. To the best of our knowledge, we are the first to study DGSS with CLIP pretraining. +- We propose language-driven class-wise local style augmentation. We mine class-specific local statistics using prompts that express random styles and names of patchwise dominant classes. During training, randomization is performed through patch-wise style mixing of the source and mined styles. +- We conduct careful ablations to show the effectiveness of FAMix. Our framework outperforms state-of-the-art approaches in single and multi-source DGSS settings. + +![](images/b9af8f4cf04702bc00671f6450080cc0d1804d090acfc233174df61ecbd8914a.jpg) +Figure 1. Mixing strategies. (Left) MixStyle [62] consists of a linear mixing between the feature statistics of the source domain(s) S samples. (Right) We apply an augmentation $\mathcal{A}(.)$ on the source domain statistics, then perform linear mixing between original and augmented statistics. Intuitively, this enlarges the support of the training distribution by leveraging statistics beyond the source domain(s), as well as discovering intermediate domains. $\mathcal{A}(.)$ could be a language-driven or Gaussian noise augmentation, and we show that the former leads to better generalization results. + +# 2. Related works + +Domain generalization (DG). The goal of DG is to train, from a single or multiple source domains, models that perform well under arbitrary domain shifts. The DG literature spans a broad range of approaches, including adversarial learning [32, 57], meta-learning [4, 38], data augmentation [60-62] and domain-invariant representation learning [1, 3, 7, 25]. We refer the reader to [49, 63] for comprehensive surveys on DG. + +Domain generalization with CLIP. CLIP [39] exhibits a remarkable distributional robustness [12]. Nevertheless, fine-tuning comes at the expense of sacrificing generalization. Kumar et al. [26] observe that full fine-tuning can distort the pretrained representation, and propose a two-stage strategy, consisting of training a linear probe with a frozen feature extractor, then fine-tuning both. Wortman et al. [50] propose assembling the weights of zero-shot and fine-tuned models. Goyal et al. [15] show that preserving the pretraining paradigm (i.e. contrastive learning) during the adaptation to the downstream task improves both in-domain (ID) and OOD performance without multi-step fine-tuning or weight assembling. CLIPood [45] introduces margin metric softmax training objective and Beta moving average for optimization to handle both open-class and open-domain at test time. On the other hand, distributional robustness could be improved by training a small amount of parameters on top of a frozen CLIP backbone in a teacher-student manner [21, 28]. Other works show that specialized prompt assembling and/or image assembling strategies [2, 14] coupled with label augmentation using the WordNet hierarchy improve robustness in classification. + +Domain Generalized Semantic Segmentation. DGSS methods could be categorized into three main groups: normalization methods, domain randomization (DR) and in + +![](images/6dca2118158c16ccc62ceca3ae190802f7c87077359af3f3e1fee053af4e6ef2.jpg) +Figure 2. Overall process of FAMix. FAMix consists of two steps. (Left) Local style mining consists of dividing the low-level feature activations into patches, which are used for style mining using Prompt-driven Instance Normalization (PIN) [10]. Specifically, for each patch, the dominant class is queried from the ground truth, and the mined style is added to corresponding class-specific style bank. (Right) Training the segmentation network is performed with minimal fine-tuning of the backbone. At each iteration, the low-level feature activations are viewed as grids of patches. For each patch, the dominant class is queried using the ground truth, then a style is sampled from the corresponding style bank. Style randomization is performed by normalizing each patch in the grid by its statistics, and transferring the new style which is a mixing between the original style and the sampled one. The network is trained using only a cross-entropy loss. + +variant representation learning. Normalization methods aim at removing style contribution from the representation. For instance, IBN-Net [36] shows that Instance Normalization (IN) makes the representation invariant to variations in the scene appearance (e.g., change of colors, illumination, etc.), and that combining IN and batch normalization (BN) helps the synthetic-to-real generalization. SAN & SAW [37] proposes semantic-aware feature normalization and whitening, while RobustNet [7] proposes an instance selective whitening loss, where only feature covariances that are sensitive to photometric transformations are whitened. DR aims instead at diversifying the data during training. Some methods use additional data for DR. For example, WildNet [29] uses ImageNet [9] data for content and style extension learning, while TLDR [24] proposes learning texture from random style images. Other methods like SiamDoGe [51] perform DR solely by data augmentation, using a Siamese [6] structure. Finally in the invariant representation learning group, SPC-Net [19] builds a representation space based on style and semantic projection and clustering, and SHADE [58] regularizes the training with a style consistency loss and a retrospection consistency loss. + +# 3. Method + +FAMix proposes an effective recipe for DGSS through the blending of simple ingredients. It consists of two stages (see Fig. 2): (i) Local style mining from language (Sec. 3.2); (ii) Training of a segmentation network with minimal fin-tuning and local style mixing (Sec. 3.3). In Fig. 2 and in the following, CLIP-I1 denotes the stem layers and Layer1 of CLIP image encoder, CLIP-I2 the remaining layers excluding the attention pooling, and CLIP-T the text encoder. + +We start with some preliminary background knowledge, introducing AdaIN and PIN which are essential to our work. + +# 3.1. Preliminaries + +Adaptive Instance Normalization (AdaIN). For a feature map $\mathbf{f} \in \mathbb{R}^{h \times w \times c}$ , AdaIN [20] shows that the channel-wise mean $\boldsymbol{\mu} \in \mathbb{R}^c$ and standard deviation $\sigma \in \mathbb{R}^c$ capture information about the style of the input image, allowing style transfer between images. Hence, stylizing a source feature $\mathbf{f}_s$ with an arbitrary target style $(\mu(\mathbf{f}_t), \sigma(\mathbf{f}_t))$ reads: + +$$ +\operatorname {A d a I N} \left(\mathbf {f} _ {\mathrm {s}}, \mathbf {f} _ {\mathrm {t}}\right) = \sigma \left(\mathbf {f} _ {\mathrm {t}}\right) \left(\frac {\mathbf {f} _ {\mathrm {s}} - \mu \left(\mathbf {f} _ {\mathrm {s}}\right)}{\sigma \left(\mathbf {f} _ {\mathrm {s}}\right)}\right) + \mu \left(\mathbf {f} _ {\mathrm {t}}\right), \tag {1} +$$ + +with $\mu (\cdot)$ and $\sigma (\cdot)$ the mean and standard deviation of input feature; multiplications and additions being element-wise. + +Prompt-driven Instance Normalization (PIN). PIN was introduced for prompt-driven zero-shot domain adaptation in PØDA [10]. It replaces the target style $(\mu(\mathbf{f}_{\mathrm{t}}), \sigma(\mathbf{f}_{\mathrm{t}}))$ in AdaIN (1) with two estimizable variables $(\mu, \sigma)$ guided by a single prompt in natural language. The rationale is to leverage a frozen CLIP [39] to mine visual styles from the prompt representation in the shared space. Given a prompt $P$ and a feature map $\mathbf{f}_{\mathrm{s}}$ , PIN reads as: + +$$ +\mathrm {P I N} _ {(P)} \left(\mathbf {f} _ {\mathrm {s}}\right) = \sigma \left(\frac {\mathbf {f} _ {\mathrm {s}} - \mu \left(\mathbf {f} _ {\mathrm {s}}\right)}{\sigma \left(\mathbf {f} _ {\mathrm {s}}\right)}\right) + \boldsymbol {\mu}, \tag {2} +$$ + +where $\mu$ and $\sigma$ are optimized using gradient descent, such that the cosine distance between the visual feature representation and the prompt representation is minimized. + +Different from PØDA which mines styles globally with a predetermined prompt describing the target domain, we make use of PIN to mine class-specific styles using local patches of the features, leveraging random style prompts. + +Further, we show the effectiveness of incorporating the class name in the prompt for better style mining. + +# 3.2. Local Style Mining + +Our approach is to leverage PIN to mine class-specific style banks that used for feature augmentation when training FAMix. Given a set of cropped images $\mathcal{I}_{\mathrm{s}}$ , we encode them using CLIP-I1 to get a set of low-level features $\mathcal{F}_{\mathrm{s}}$ . Each batch $b$ of features $\mathbf{f}_{\mathrm{s}} \in \mathcal{F}_{\mathrm{s}}$ is cropped into $m$ patches, resulting in $b \times m$ patches $\mathbf{f}_p$ , associated ground-truth annotation $\mathbf{y}_p$ , of size $h / \sqrt{m} \times w / \sqrt{m} \times c$ . + +We aim at populating $K$ style banks, $K$ being the total number of classes. For a feature patch $\mathbf{f}_p$ , we compute the dominant class from the corresponding label patch $\mathbf{y}_p$ , and get its name $t_p$ from the predefined classes in the training dataset. Given a set of prompts describing random styles $\mathcal{R}$ , the target prompt $P_p$ is formed by concatenating a randomly sampled style prompt $r$ from $\mathcal{R}$ and $t_p$ (e.g., retro futurism style building). We show in the experiments (Sec. 4.4) that our method is not very sensitive to the prompt design, yet our prompt construction works best. + +The idea is to mine proxy domains and explore intermediate ones in a class-aware manner (as detailed in Sec. 3.3), which makes our work fundamentally different from [10], that steers features towards a particular target style and corresponding domain, and better suited to generalization. + +To handle the class imbalance problem, we simply select one feature patch $\mathbf{f}_p$ per class among the total $b\times m$ patches, as shown in Fig. 2. Consequently, we apply PIN (2) to optimize the local styles to match the representations of their corresponding prompts, and use the mined styles to populate the corresponding style banks. The complete procedure is outlined in Algorithm 1. + +The resulting style banks $\{\mathcal{T}^{(1)},\dots ,\mathcal{T}^{(K)}\}$ are used for domain randomization during training. + +# 3.3. Training FAMix + +Style randomization. During training, randomly cropped images $\mathcal{I}_{\mathrm{s}}$ are encoded into $\mathbf{f}_{\mathrm{s}}$ using CLIP-11. Each batch of feature maps $\mathbf{f}_{\mathrm{s}}$ is viewed as a grid of $m$ patches, without cropping them. For each patch $\mathbf{f}_{\mathrm{s}}^{(ij)}$ within the grid, the dominant class $c_{p}^{(ij)}$ is queried using the corresponding ground truth patch $\mathbf{y}_{\mathrm{s}}^{(ij)}$ , and a style is randomly sampled from the corresponding mined bank $\mathcal{T}(c_p^{(ij)})$ . We then apply patch-wise convex combination (i.e., style mixing) of the original style of the patch and the mined style. Specifically, for an arbitrary patch $\mathbf{f}_{\mathrm{s}}^{(ij)}$ , our local style mixing reads: + +$$ +\mu_ {m i x} \leftarrow (1 - \alpha) \mu \left(\mathbf {f} _ {\mathrm {s}} ^ {(i j)}\right) + \alpha \boldsymbol {\mu} ^ {(i j)} \tag {3} +$$ + +$$ +\sigma_ {m i x} \leftarrow (1 - \alpha) \sigma \left(\mathbf {f} _ {\mathrm {s}} ^ {(i j)}\right) + \alpha \boldsymbol {\sigma} ^ {(i j)}, \tag {4} +$$ + +with $(\pmb{\mu}^{(ij)},\pmb{\sigma}^{(ij)})\in \mathcal{T}^{(c_p^{(ij)})}$ and $\alpha \in [0,1]^c$ + +Algorithm 1: Local Style Mining. +Input: Set $\mathcal{F}_{\mathrm{s}}$ of source features batches. Label set $\mathcal{V}_{\mathrm{s}}$ in $\mathcal{D}_{s}$ Set of random prompts $\mathcal{R}$ and class names $\mathcal{C}$ +Param: Number of patches $m$ Number of classes $K$ Output: $K$ sets $\{\mathcal{T}^{(1)},\dots ,\mathcal{T}^{(K)}\}$ of class-wise augmented statistics. +1 $\{\mathcal{T}^{(1)},\dots ,\mathcal{T}^{(K)}\} \gets \emptyset$ +2 foreach $(\mathbf{f}_s\in \mathcal{F}_s,\mathbf{y}_s\in \mathcal{Y}_s)$ do +3 $\{\mathbf{y}_p\} \leftarrow$ crop-patch(y,s,m) +4 $\{c_p\} ,\{P_p\} ,\{f_p\} \leftarrow \emptyset$ +5 foreach $\mathbf{y}_p\in \{\mathbf{y}_p\}$ do +6 $c_{p}\gets$ get-dominant-class(yp) if $c_{p}$ not in $\{c_p\}$ then +8 $\{c_p\} \leftarrow c_p$ +9 $\{P_p\} \leftarrow$ concat(sample(R),get-name(cp)) +10 $\{f_p\} \leftarrow f_p$ +11 end +12 end +13 $\mu^{(c_p)},\sigma^{(c_p)},\mathbf{f}_p'\gets \mathrm{PIN}_{(P_p)}(\mathbf{f}_p)$ +14 $\mathcal{T}^{(c_p)}\gets \mathcal{T}^{(c_p)}\cup \{(\boldsymbol{\mu}^{(c_p)},\boldsymbol{\sigma}^{(c_p)})\}$ +15 end + +Algorithm 2: Training FAMix. +Input: Set $\mathcal{F}_{\mathrm{s}}$ of source features batches. Label set $\mathcal{V}_{\mathrm{s}}$ in $\mathcal{D}_{\mathrm{s}}$ $K$ sets $\{\mathcal{T}^{(1)},\dots ,\mathcal{T}^{(K)}\}$ of class-wise augmented statistics. +Param: Number of patches m. +foreach $(\mathbf{f}_s\in \mathcal{F}_s,\mathbf{y}_s\in \mathcal{Y}_s)$ do + $\alpha \sim \mathrm{Beta}(0.1,0.1)$ +for $(i,j)\in [1,\sqrt{m} ]\times [1,\sqrt{m} ]$ do $c_{p}^{(ij)}\gets \mathrm{get - dominant - class}(\mathbf{y}_{s}^{(ij)})$ $\mu^{(ij)},\sigma^{(ij)}\gets \mathrm{sample}(\mathcal{T}^{(c_p^{(ij)})})$ $\mu_{mix}\gets (1 - \alpha).\mu (\mathbf{f}_s^{(ij)}) + \alpha .\mu^{(ij)}$ $\sigma_{mix}\gets (1 - \alpha).\sigma (\mathbf{f}_s^{(ij)}) + \alpha .\sigma^{(ij)}$ $\mathbf{f}_{\mathrm{s}}^{(ij)}\gets \mathrm{AdaIN}(\mathbf{f}_{\mathrm{s}}^{(ij)},\mu_{mix},\sigma_{mix})$ +end + $\tilde{\mathbf{y}}_{\mathrm{s}}\gets \mathrm{CLIP - I2}(\mathbf{f}_{\mathrm{s}})$ +Loss $=$ cross-entropy $(\tilde{\mathbf{y}}_{s},\mathbf{y}_{s})$ +end + +As shown in Fig. 1, our style mixing strategy differs from [62] which applies a linear interpolation between styles extracted from the images of a limited set of source domain(s) assumed to be available for training. Here, we view the mined styles as variations of multiple proxy target domains defined by the prompts. Training is conducted over all the paths in the feature space between the source and proxy domains without requiring any additional image during training other than the one from source. + +
Methodarch.CBMSANASARAFMean
RobustNet [7]36.5835.2040.3328.306.3229.9733.0232.5630.29
SAN & SAW [37]39.7537.3441.8630.79-----
Pin the memory [23]41.0034.6037.4027.083.845.515.897.2720.32
SHADE [58]44.6539.2843.3428.418.1830.3835.4436.8733.32
SiamDoGe [51]42.9637.5440.6428.3410.6030.7135.8436.4532.89
DPCL [53]RN5044.8740.2146.74------
SPC-Net [19]44.1040.4645.51------
NP [11]40.6235.5638.9227.65-----
WildNet* [29]44.6238.4246.0931.348.2730.2936.3235.3933.84
TLDR* [24]46.5142.5846.1830.5713.1336.0238.8940.5836.81
FAMix (ours)48.1545.6152.1134.2314.9637.0938.6640.2538.88
SAN & SAW [37]45.3341.1840.7731.84-----
\( SHADE^† \)[58]46.6643.6645.5031.587.5832.4836.9036.6935.13
WildNet* [29]RN10145.7941.7347.0832.51-----
TLDR* [24]47.5844.8848.8033.14-----
FAMix (ours)49.4746.4051.9736.7219.8941.3840.9142.1541.11
+ +Table 2. Single-source DGSS trained on G. Performance (mIoU %) of FAMix compared to other DGSS methods trained on G and evaluated on C, S, M, S, A for ResNet-50 ('RN50') and ResNet-101 ('RN101') backbone architecture ('arch'). * indicates the use of extra-data. † indicates the use of the full data for training. We emphasize best and second best results. + +Style transfer is applied through AdaIN (1). Only the standard cross-entropy loss between the ground truth $\mathbf{y}_{\mathrm{s}}$ and the prediction $\tilde{\mathbf{y}}_{\mathrm{s}}$ is applied for training the network. Algorithm 2 shows the training steps of FAMix. + +Minimal fine-tuning. During training, we fine-tune only the last few layers of the backbone. Subsequently, we examine various alternatives and show that the minimal extent of fine-tuning is the crucial factor in witnessing the effectiveness of our local style mixing strategy. + +Previous works [11, 36, 62] suggest that shallow feature statistics capture style information while deeper features encode semantic content. Consequently, some DGSS methods focus on learning style-agnostic representations [7, 36, 37], but this can compromise the expressiveness of the representation and suppress content information. In contrast, our intuition is to retain these identified traits by introducing variability to the shallow features through augmentation and mixing. Simultaneously, we guide the network to learn invariant high-level representations by training the final layers of the backbone with a label-preserving assumption, using a standard cross-entropy loss. + +# 4. Experiments + +# 4.1. Experimental setup + +Synthetic datasets. GTAV [41] and SYNTHIA [42] are used as synthetic datasets. GTAV consists of 24966 images split into 12403 images for training, 6382 for validation and 6181 for testing. SYNTHIA consists of 9400 images: 6580 for training and 2820 for validation. GTAV and SYNTHIA are denoted by $\mathsf{G}$ and $\mathsf{S}$ , respectively. + +Real datasets. Cityscapes [8], BDD-100K [54], and Mapillary [35] contain 2975, 7000, and 18000 images for train- + +ing and 500, 1000, and 2000 images for validation, respectively. ACDC [44] is a dataset of driving scenes in adverse conditions: night, snow, rain and fog with respectively 106, 100, 100 and 100 images in the validation sets. C, B, and M denote Cityscapes, BDD-100K and Mapillary, respectively; AN, AS, AR and AF denote night, snow, rain and fog subsets of ACDC, respectively. + +Implementation details. Following previous works [7, 19, 23, 24, 29, 37, 51, 53, 58], we adopt DeepLabv $3+$ [5] as segmentation model. ResNet-50 and ResNet-101 [17], initialized with CLIP pretrained weights, are used in our experiments as backbones. Specifically, we remove the attention pooling layer and add a randomly initialized decoder head. The output stride is 16. Single-source and multisource models are trained respectively for $40K$ and $60K$ iterations with a batch size of 8. The training images are cropped to $768 \times 768$ . Stochastic Gradient Descent (SGD) with a momentum of 0.9 and weight decay of $10^{-4}$ is used as optimizer. Polynomial decay with a power of 0.9 is used, with an initial learning rate of $10^{-1}$ for the classifier and $10^{-2}$ for the backbone. We use color jittering and horizontal flip as data augmentation. Label smoothing regularization [46] is adopted. For style mining, Layer1 features are divided into 9 patches. Each patch is resized to $56 \times 56$ , corresponding to the dimensions of Layer1 features for an input image of size $224 \times 224$ (i.e. the input dimension of CLIP). We use ImageNet templates for each prompt. + +Evaluation metric. We evaluate our models on the validation sets of the unseen target domains with mean Intersection over Union (mIoU%) of the 19 shared semantic classes. For each experiment, we report the average of three runs. + +# 4.2. Comparison with DGSS methods + +Single-source DGSS. We compare FAMix with state-of-the-art DGSS methods under the single-source setting. + +Training on GTAV (G) as source, Tab. 2 reports models trained with either ResNet-50 or ResNet-101 backbones. The unseen target datasets are C, B, M, S, and the four subsets of A. Tab. 2 shows that our method significantly outperforms all the baselines on all the datasets for both backbones. We note that WildNet [29] and TLDR [24] use extra-data, while SHADE [58] uses the full G dataset (24,966 images) for training with ResNet-101. Class-wise performances are reported in Appendix B. + +Training on Cityscapes (C) as source, Tab. 3 reports performance with ResNet-50 backbone. The unseen target datasets are B, M, G, and S. The table shows that our method outperforms the baseline in average, and is competitive to SOTA on G and M. + +Multi-source DGSS. We also show the effectiveness of FAMix in the multi-source setting, training on $G + S$ and + +
MethodBMGSMean
RobustNet [7]50.7358.6445.0026.2045.14
Pin the memory [23]46.7855.10---
SiamDoGe [51]51.5359.0045.0826.6745.57
WildNet* [29]50.9458.7947.0127.9546.17
DPCL [53]52.29-46.0026.60-
FAMix (ours)54.0758.7245.1232.6747.65
+ +Table 3. Single-source DGSS trained on C. Performance (mIoU %) of FAMix compared to other DGSS methods trained on C and evaluated on B, M, G and S for ResNet-50 backbone. * indicates the use of extra-data. We emphasize best and second best results. + +
MethodCBMMean
RobustNet [7]37.6934.0938.4936.76
Pin the memory [23]44.5138.0742.7041.76
SHADE [58]47.4340.3047.6045.11
SPC-Net [19]46.3643.1848.2345.92
TLDR* [24]48.8342.5847.8046.40
FAMix (ours)49.4145.5151.6148.84
+ +evaluating on C, B and M. The results reported in Tab. 4 for ResNet-50 backbone outperform state-of-the-art. + +Qualitative results. We visually compare the segmentation results with Pin the memory [23], SHADE [58] and WildNet [29] in Fig. 3. FAMix clearly outperforms other DGSS methods on "stuff" (e.g., road and sky) and "things" (e.g., bicycle and bus) classes. + +# 4.3. Decoder-Probing Fine-Tuning (DP-FT) + +Kumar et al. [26] show that standard fine-tuning may distort the pretrained feature representation, leading to degraded OOD performances for classification. Consequently, they propose a two-step training strategy: (1) Training a linear probe (LP) on top of the frozen backbone features, (2) Finetuning (FT) both the linear probe and the backbone. Inspired by it, Saito et al. [43] apply the same strategy for object detection, which is referred to as Decoder-probing Fine-tuning (DP-FT). They observe that DP-FT improves over DP depending on the architecture. We hypothesize that the effect is also dependent on the pretraining paradigm and the downstream task. As observed in Tab. 1, CLIP might remarkably overfit the source domain when finetuned. In Tab. 5, we compare fine-tuning (FT), decoder-probing (DP) and DP-FT. DP brings improvements over FT since it completely preserves the pretrained representation. Yet, DP major drawback lies in its limitation to adapt features for the downstream task, resulting in suboptimal results. Surprisingly, DP-FT largely falls behind DP, meaning + +Table 4. Multi-source DGSS. Performance (mIoU %) of FAMix compared to other DGSS methods trained on $\mathrm{G} + \mathrm{S}$ and evaluated on C, B, M for ResNet-50 backbone. * indicates the use of extra-data. We emphasize best and second best results. + +
MethodCBMSANASARAFMean
FT16.8116.3117.8027.102.958.5814.3513.6114.69
DP34.1337.6742.2129.1010.7126.2629.4730.4029.99
DP-FT25.6221.7126.3931.454.2218.2620.0720.8521.07
FAMix (ours)48.1545.6152.1134.2314.9637.0938.6640.2538.88
+ +Table 5. FAMix vs. DP-FT. Performance (mIoU%) of FAMix compared to Fine-tuning (FT), Decoder-probing (DP) and Decoder-probing Fine-tuning (DP-FT). We use here ResNet-50, trained on G. We emphasize best and second best results. + +
FreezeAugmentMixCBMSANASARAFMean
XXX16.8116.3117.8027.102.958.5814.3513.6114.69
XX22.4826.0524.1525.404.8317.6122.8619.7520.39
XX20.0721.2422.9126.521.2814.9922.0920.5118.70
X27.5326.5926.2726.914.9018.9125.6022.1422.36
XX37.8338.8844.2431.9312.4129.5931.5633.0532.44
X36.6535.7337.3230.4414.7234.6534.9138.9832.93
X43.4343.7948.1933.7011.3235.5536.1538.1936.29
48.1545.6152.1134.2314.9637.0938.6640.2538.88
+ +Table 6. Ablation of FAMix components. Performance (mIoU %) after removing one or more components of FAMix. + +that the learned features over-specialize to the source domain distribution even with a "decoder warm-up". + +The results advocate for the need of specific strategies to preserve CLIP robustness for semantic segmentation. This need emerges from the additional gap between pretraining (i.e. aligning object-level and language representations) and fine-tuning (i.e. supervised pixel classification). + +# 4.4. Ablation studies + +We conduct all the ablations on a ResNet-50 backbone with GTAV (G) as source dataset. + +Removing ingredients from the recipe. FAMix is based on minimal fine-tuning of the backbone (i.e., Freeze), style augmentation and mixing. We show in Tab. 6 that the best generalization results are only obtained when combining the three ingredients. Specifically, when the backbone is fine-tuned (i.e., Freeze $\mathcal{X}$ ), the performances are largely harmed. When minimal fine-tuning is performed (i.e., Freeze $\sqrt{\cdot}$ ), we argue that the augmentations are too strong to be applied without style mixing; the latter brings both effects of domain interpolation and use of the original statistics. Subsequently, when style mixing is not applied (i.e. Freeze $\sqrt{\cdot}$ , Augment $\sqrt{\cdot}$ , Mix $\mathcal{X}$ ), the use of mined styles brings mostly no improvement on OOD segmentation compared to training without augmentation (i.e. Freeze $\sqrt{\cdot}$ , Augment $\mathcal{X}$ , Mix $\mathcal{X}$ ). Note that for Freeze $\sqrt{\cdot}$ , Augment $\sqrt{\cdot}$ , Mix $\mathcal{X}$ , the line 8 in Algorithm 2 becomes: + +$$ +\mathbf {f} _ {\mathrm {s}} ^ {(i j)} \leftarrow \operatorname {A d a I N} \left(\mathbf {f} _ {\mathrm {s}} ^ {(i j)}, \boldsymbol {\mu} ^ {(i j)}, \boldsymbol {\sigma} ^ {(i j)}\right) \tag {5} +$$ + +Our style mixing is different from MixStyle [62] for being applied: (1) patch-wise and (2) between original styles of + +![](images/915966bc1c7d1def4c2fc10800f731623e4ee3cef57d0b6be4b8c2528ef3cb3a.jpg) +Figure 3. Qualitative results. Columns 1-2: Image and ground truth (GT), Columns 3-4-5: DGSS methods results, Column 6: Our results. The models are trained on G with ResNet-50 backbone. + +
RCPRSPCNCBMSANASARAFMean
45.9943.7150.4834.7515.2235.0934.9238.1737.29
46.1044.2448.9033.6213.3935.9936.6839.8637.35
45.6444.5949.1333.6415.3337.3235.9838.8537.56
47.8344.8350.3834.2714.4337.0737.0738.7638.08
48.1545.6152.1134.2314.9637.0938.6640.2538.88
+ +Table 7. Ablation on the prompt construction. Performance (mIoU %) for different prompt constructions. RCP, RSP and CN refer to , and , respectively. + +the source data and augmented versions of them. Note that the case (Freeze $\checkmark$ , Augment $\times$ , Mix $\checkmark$ ) could be seen as a variant of MixStyle, yet applied locally and class-wise. Our complete recipe is proved to be significantly more effective with a boost of $\approx +6$ mean mIoU w.r.t. the baseline of training without augmentation and mixing. + +Prompt construction. Tab. 7 reports results when ablating the prompt construction. In FAMix, the final prompt is derived by concatenating and ; removing either of those leads to inferior results. Interestingly, replacing the style prompt by random characters - e.g. "ioscjspa" - does not significantly degrade the performance. In certain aspects, using random prompts still induces a randomization effect within the FAMix framework. However, meaningful prompts still consistently lead to the best results. + +Number of style prompts. FAMix uses a set $\mathcal{R}$ of random style prompts which are concatenated with the class names; $\mathcal{R}$ is formed by querying ChatGPT using . The output prompts are provided in Appendix C. Fig. 4a shows that the size of $\mathcal{R}$ has a marginal impact on FAMix performance. Yet, the mIoU scores on C, B, M and AR are higher for $|\mathcal{R}| = 20$ compared to $|\mathcal{R}| = 1$ and almost equal for the other datasets. + +![](images/d684a08d693b9a6ddee52bf8e7b6971de283ae9beba9f066560c916100c1a0a3.jpg) +(a) Number of prompts + +![](images/3b6ff42e4890093dff93fb5a8030dd8680c1dabd71307c93f9bc12bee73f7eb9.jpg) +(b) Effect of layer freezing +Figure 4. Ablation of prompt set and freezing strategy. (a) Performance $(\mathrm{mIoU}\%)$ on test datasets w.r.t. the number of random style prompts in $\mathcal{R}$ . (b) Effect of freezing layers reporting on x-axis the last frozen layer. For example, 'L3' means freezing L1, L2 and L3. 'L4' indicates that the Layer4 is partially frozen. + +The low sensitivity of the performance to the size of $\mathcal{R}$ could be explained by two factors. First, mining even from a single prompt results in different style variations as the optimization starts from different anchor points in the latent space, as argued in [10]. Second, mixing style between the source and the mined proxy domains is the crucial factor making the network explore intermediate domains during training. This does not contradict the effect of our prompt construction which leads to the best results (Tab. 7). + +Local vs. global style mining. To highlight the effect of our class-wise local style mining, we perform an ablation replacing it with global style mining. Specifically, the same set of are used, though being concatenated with as a global description instead of local class name. Intuitively, local style mining and mixing induces richer style variations and more contrast among patches. The results in Tab. 8 show the effectiveness of our local style mining and mixing strategy, bringing about 3 mIoU improvement on $G \to C$ . + +What to mix? Let $S = \bigcup_{k=1}^{K} S^{(k)}$ and $\mathcal{T} = \bigcup_{k=1}^{K} \mathcal{T}^{(k)}$ the sets of class-wise source and augmented features, respectively. In FAMix training, for an arbitrary patch $\mathbf{f}_s^{(ij)}$ , + +
Style miningCBMSANASARAFMean
“street view”45.5145.1250.4033.6514.5936.9237.3840.5338.01
“urban scene”46.5945.3851.3333.6714.4235.9637.3040.5238.15
global w/ “roadscape”45.4945.5550.6333.6614.7736.7537.0740.3338.03
“commute snapshot”45.3945.0850.5033.6813.6536.6337.9340.9237.97
“driving”45.0644.9850.6733.3614.8435.1136.2139.5237.47
local48.1545.6152.1134.2314.9637.0938.6640.2538.88
+ +Table 8. Ablation on style mining. Global style mining consists of mining one style per feature map, using + as prompt. + +
Syle miningCBMSANASARAFMean
S43.4343.7948.1933.7011.3235.5536.1538.1936.29
S∪T44.7645.5950.7834.0513.6736.9237.1838.1337.64
T(ours)48.1545.6152.1134.2314.9637.0938.6640.2538.88
+ +style mixing is performed between the original source statistics and statistics sampled from the augmented set (i.e., $(\pmb{\mu}^{(ij)},\pmb{\sigma}^{(ij)})\in \mathcal{T}^{(c_p^{(ij)})}$ , see (3) and (4)). In class-wise vanilla MixStyle, $(\pmb{\mu}^{(ij)},\pmb{\sigma}^{(ij)})\in S^{(c_p^{(ij)})}$ . In Tab. 9, we show that sampling $(\pmb{\mu}^{(ij)},\pmb{\sigma}^{(ij)})$ from $S^{(c_p^{(ij)})}\cup T^{(c_p^{(ij)})}$ does not lead to better generalization, despite sampling from a set with twice the cardinality. This supports our mixing strategy visualized in Fig. 1. Intuitively, sampling from $S\cup T$ could be viewed as applying either MixStyle or our mixing with a probability $p = 0.5$ . + +Minimal fine-tuning. We argue for minimal fine-tuning as a compromise between pretrained feature preservation and adaptation. Fig. 4b shows an increasing OOD generalization trend with more freezing. Interestingly, only fine-tuning the last layers of the last convolutional block (where the dilation is applied) achieves the best results. When training on Cityscapes, we observed that freezing all the layers except Layer4 achieves the best results. + +# 4.5. Does FAMix require language? + +Inspired by the observation that target statistics deviate around the source ones in real cases [11], we conduct an experiment where we replace language-driven style mining by noise perturbation. The same procedure of FAMix is kept: (i) Features are divided into patches, perturbed with noise and then saved into a style bank based on the dominant class; (ii) During training, patch-wise style mixing of original and perturbed styles is performed. + +Different from Fan et al. [11], who perform a perturbation on the feature statistics using a normal distribution with pre-defined parameters, we experiment perturbation with different magnitudes of noise controlled by the signal-to-noise ratio (SNR). Consider the mean of a patch $\mu \in \mathbb{R}^c$ as a signal, the goal is to perturb it with some noise + +Table 9. Ablation on the sets used for mixing. The styles $(\mu, \sigma)$ used in (3) and (4) are sampled either from $S$ or $S \cup \mathcal{T}$ or $\mathcal{T}$ . + +
SNRCBMSANASARAFMean
Baseline37.8338.8844.2431.9312.4129.5931.5633.0532.44
528.7829.2430.3221.6712.6024.0025.9525.8724.80
1040.0939.5043.4529.0913.3633.4733.1136.1733.53
1545.0244.1648.6332.9614.5536.0935.9940.9637.30
2045.5244.2949.2633.4512.4035.9636.5238.6037.00
2544.8244.2648.5433.3011.3834.5135.4637.6136.24
3043.0743.8048.3133.4712.3335.0535.5838.1036.21
43.4343.7948.1933.7011.3235.5536.1538.1936.29
MixStyle [62]40.9742.0448.3633.1513.1431.2634.9438.1235.25
Prompts48.1545.6152.1134.2314.9637.0938.6640.2538.88
+ +Table 10. Noise vs prompt-driven augmentation. The prompt-driven augmentation in FAMix is replaced by random noise with different levels defined by SNR. We also include vanilla MixStyle. The prompt-driven strategy is superior. + +$n_{\mu} \in \mathbb{R}^{c}$ . The $\mathrm{SNR}_{\mathrm{dB}}$ between $\| \mu \|$ and $\| n_{\mu}\|$ is defined as $\mathrm{SNR}_{\mathrm{dB}} = 20\log_{10}\left(\frac{\|\mu\| / \|n_{\mu}\|}{\|\mu\|}\right)$ . Given $\mu$ , $\mathrm{SNR}_{\mathrm{dB}}$ , and $n \sim \mathcal{N}(0,I)$ , where $I \in \mathbb{R}^{c \times c}$ is the identity matrix, the noise is computed as $n_{\mu} = 10^{\frac{-\mathrm{SNR}}{20}}\frac{\|\mu\|}{\|n\|} n$ . We add $\mu + n_{\mu}$ to the style bank corresponding to the dominant class in the patch. The same applies to $\sigma \in \mathbb{R}^{c}$ . The results of training for different noise levels are in Tab. 10. Using language as source of randomization outperforms any noise level. The baseline corresponds to the case where no augmentation nor mixing are performed (See Tab. 6, Freeze $\checkmark$ , Augment $\pmb{x}$ , Mix $\pmb{x}$ ). $\mathrm{SNR} = \infty$ could be seen as a variant of MixStyle, applied class-wise to patches (See Tab. 6, Freeze $\checkmark$ , Augment $\pmb{x}$ , Mix $\checkmark$ ). The vanilla MixStyle gets inferior results. + +Besides lower OOD performance, one more disadvantage of noise augmentation compared to our language-driven augmentation is the need to select a value for the SNR, for which the optimal value might vary depending on the target domain encountered at the test time. + +# 5. Conclusion + +We presented FAMix, a simple recipe for domain generalized semantic segmentation with CLIP pretraining. We proposed to locally mix the styles of source features with their augmented counterparts obtained using language prompts. Combined with minimal fine-tuning, FAMix significantly outperforms the state-of-the-art approaches. Extensive experiments showcase the effectiveness of our framework. We hope that FAMix will serve as a strong baseline in future works, exploring the potential of leveraging large-scale vision-language models for perception tasks. + +Acknowledgment. This work was partially funded by French project SIGHT (ANR-20-CE23-0016) and was supported by ELSA - European Lighthouse on Secure and Safe AI funded by the European Union under grant agreement No. 101070617. It was performed using HPC resources from GENCI-IDRIS (Grant AD011014477). + +# References + +[1] Kartik Ahuja, Ethan Caballero, Dinghuai Zhang, Jean-Christophe Gagnon-Audet, Yoshua Bengio, Ioannis Mitliagkas, and Irina Rish. Invariance principle meets information bottleneck for out-of-distribution generalization. In NeurIPS, 2021. 2 +[2] James Urquhart Allingham, Jie Ren, Michael W Dusenberry, Xiuye Gu, Yin Cui, Dustin Tran, Jeremiah Zhe Liu, and Balaji Lakshminarayanan. A simple zero-shot prompt weighting technique to improve prompt ensembling in text-image models. In ICML, 2023. 2 +[3] Martin Arjovsky, Léon Bottou, Ishaan Gulrajani, and David Lopez-Paz. Invariant risk minimization. arXiv preprint arXiv:1907.02893, 2019. 2 +[4] Yogesh Balaji, Swami Sankaranarayanan, and Rama Chellappa. Metareg: Towards domain generalization using metaregularization. In NeurIPS, 2018. 2 +[5] Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and Hartwig Adam. Encoder-decoder with atrous separable convolution for semantic image segmentation. In ECCV, 2018. 5 +[6] Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In CVPR, 2021. 3 +[7] Sungha Choi, Sanghun Jung, Huiwon Yun, Joanne T Kim, Seungryong Kim, and Jaegul Choo. Robustnet: Improving domain generalization in urban-scene segmentation via instance selective whitening. In CVPR, 2021. 1, 2, 3, 5, 6 +[8] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In CVPR, 2016. 5 +[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, 2009. 1, 3 +[10] Mohammad Fahes, Tuan-Hung Vu, Andrei Bursuc, Patrick Pérez, and Raoul de Charette. Poda: Prompt-driven zero-shot domain adaptation. In ICCV, 2023. 1, 3, 4, 7 +[11] Qi Fan, Mattia Segu, Yu-Wing Tai, Fisher Yu, Chi-Keung Tang, Bernt Schiele, and Dengxin Dai. Towards robust object detection invariant to real-world domain shifts. In ICLR, 2023. 5, 8 +[12] Alex Fang, Gabriel Ilharco, Mitchell Wortsman, Yuhao Wan, Vaishaal Shankar, Achal Dave, and Ludwig Schmidt. Data determines distributional robustness in contrastive language image pre-training (clip). In ICML, 2022. 1, 2 +[13] Yaroslav Ganin, Evgeniya Ustinova, Hana Ajakan, Pascal Germain, Hugo Larochelle, François Laviolette, Mario Marchand, and Victor Lempitsky. Domain-adversarial training of neural networks. JMLR, 2016. 1 +[14] Yunhao Ge, Jie Ren, Andrew Gallagher, Yuxiao Wang, Ming-Hsuan Yang, Hartwig Adam, Laurent Itti, Balaji Lakshminarayanan, and Jiaping Zhao. Improving zero-shot generalization and robustness of multi-modal models. In CVPR, 2023. 2 +[15] Sachin Goyal, Ananya Kumar, Sankalp Garg, Zico Kolter, and Aditi Raghunathan. Finetune like you pretrain: Im- + +proved finetuning of zero-shot vision models. In CVPR, 2023. 1, 2 +[16] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2022. 1 +[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 5 +[18] Judy Hoffman, Eric Tzeng, Taesung Park, Jun-Yan Zhu, Phillip Isola, Kate Saenko, Alexei Efros, and Trevor Darrell. Cycada: Cycle-consistent adversarial domain adaptation. In ICML, 2018. 1 +[19] Wei Huang, Chang Chen, Yong Li, Jiacheng Li, Cheng Li, Fenglong Song, Youliang Yan, and Zhiwei Xiong. Style projected clustering for domain generalized semantic segmentation. In CVPR, 2023. 1, 3, 5, 6 +[20] Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In ICCV, 2017. 3 +[21] Nishant Jain, Harkirat Behl, Yogesh Singh Rawat, and Vibhav Vineet. Efficiently robustify pre-trained models. In ICCV, 2023. 2 +[22] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 1 +[23] Jin Kim, Jiyoung Lee, Jungin Park, Dongbo Min, and Kwanghoon Sohn. Pin the memory: Learning to generalize semantic segmentation. In CVPR, 2022. 1, 5, 6, 7 +[24] Sunghwan Kim, Dae-hwan Kim, and Hoseong Kim. Texture learning domain randomization for domain generalized segmentation. In ICCV, 2023. 1, 3, 5, 6 +[25] David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, and Aaron Courville. Out-of-distribution generalization via risk extrapolation (rex). In ICML, 2021. 2 +[26] Ananya Kumar, Aditi Raghunathan, Robbie Matthew Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. In ICLR, 2022. 1, 2, 6 +[27] Gihyun Kwon and Jong Chul Ye. Clipstyler: Image style transfer with a single text condition. In CVPR, 2022. 1 +[28] Clement Laroudie, Andrei Bursuc, Mai Lan Ha, and Gianni Franchi. Improving clip robustness with knowledge distillation and self-training. arXiv preprint arXiv:2309.10361, 2023. 2 +[29] Suhyeon Lee, Hongje Seong, Seongwon Lee, and Euntai Kim. Wildnet: Learning domain generalized semantic segmentation from the wild. In CVPR, 2022. 1, 3, 5, 6, 7 +[30] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and Rene Ranftl. Language-driven semantic segmentation. In ICLR, 2022. 1 +[31] Haoliang Li, Sinno Jialin Pan, Shiqi Wang, and Alex C Kot. Domain generalization with adversarial feature learning. In CVPR, 2018. 1 +[32] Ya Li, Xinmei Tian, Mingming Gong, Yajing Liu, Tongliang Liu, Kun Zhang, and Dacheng Tao. Deep domain generaliza + +tion via conditional invariant adversarial networks. In ECCV, 2018. 1, 2 +[33] Yunsheng Li, Lu Yuan, and Nuno Vasconcelos. Bidirectional learning for domain adaptation of semantic segmentation. In CVPR, 2019. 1 +[34] Mingsheng Long, Zhangjie Cao, Jianmin Wang, and Michael I Jordan. Conditional adversarial domain adaptation. In NeurIPS, 2018. 1 +[35] Gerhard Neuhold, Tobias Ollmann, Samuel Rota Bulo, and Peter Kontschieder. The mapillary vistas dataset for semantic understanding of street scenes. In ICCV, 2017. 5 +[36] Xingang Pan, Ping Luo, Jianping Shi, and Xiaou Tang. Two at once: Enhancing learning and generalization capacities via ibn-net. In ECCV, 2018. 1, 3, 5 +[37] Duo Peng, Yinjie Lei, Munawar Hayat, Yulan Guo, and Wen Li. Semantic-aware domain generalized segmentation. In CVPR, 2022. 1, 3, 5 +[38] Fengchun Qiao, Long Zhao, and Xi Peng. Learning to learn single domain generalization. In CVPR, 2020. 2 +[39] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 2, 3 +[40] Yongming Rao, Wenliang Zhao, Guangyi Chen, Yansong Tang, Zheng Zhu, Guan Huang, Jie Zhou, and Jiwen Lu. Denseclip: Language-guided dense prediction with context-aware prompting. In CVPR, 2022. 1 +[41] Stephan R Richter, Vibhav Vineet, Stefan Roth, and Vladlen Koltun. Playing for data: Ground truth from computer games. In ECCV, 2016. 5 +[42] German Ros, Laura Sellart, Joanna Materzynska, David Vazquez, and Antonio M Lopez. The synthia dataset: A large collection of synthetic images for semantic segmentation of urban scenes. In CVPR, 2016. 5 +[43] Kuniaki Saito, Donghyun Kim, Piotr Teterwak, Rogerio Feris, and Kate Saenko. Mind the backbone: Minimizing backbone distortion for robust object detection. arXiv preprint arXiv:2303.14744, 2023. 6 +[44] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. ACDC: The adverse conditions dataset with correspondences for semantic driving scene understanding. In ICCV, 2021. 5 +[45] Yang Shu, Xingzhuo Guo, Jialong Wu, Ximei Wang, Jianmin Wang, and Mingsheng Long. Clipood: Generalizing clip to out-of-distributions. In ICML, 2023. 1, 2 +[46] Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In CVPR, 2016. 5 +[47] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In CVPR, 2017. 1 +[48] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Advent: Adversarial entropy minimization for domain adaptation in semantic segmentation. In CVPR, 2019. 1 +[49] Jindong Wang, Cuiling Lan, Chang Liu, Yidong Ouyang, Tao Qin, Wang Lu, Yiqiang Chen, Wenjun Zeng, and Philip + +Yu. Generalizing to unseen domains: A survey on domain generalization. T-KDE, 2022. 1, 2 +[50] Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, et al. Robust fine-tuning of zero-shot models. In CVPR, 2022. 1, 2 +[51] Zhenyao Wu, Xinyi Wu, Xiaoping Zhang, Lili Ju, and Song Wang. Siamdoge: Domain generalizable semantic segmentation using siamese network. In ECCV, 2022. 1, 3, 5, 6 +[52] Qinwei Xu, Ruipeng Zhang, Ya Zhang, Yanfeng Wang, and Qi Tian. A fourier-based framework for domain generalization. In CVPR, 2021. 1 +[53] Liwei Yang, Xiang Gu, and Jian Sun. Generalized semantic segmentation by self-supervised source domain projection and multi-level contrastive learning. In AAAI, 2023. 1, 5, 6 +[54] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darryll. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In CVPR, 2020. 5 +[55] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. LiT: Zero-shot transfer with locked-image text tuning. In CVPR, 2022. 1 +[56] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023. 1 +[57] Shanshan Zhao, Mingming Gong, Tongliang Liu, Huan Fu, and Dacheng Tao. Domain generalization via entropy regularization. In NeurIPS, 2020. 2 +[58] Yuyang Zhao, Zhun Zhong, Na Zhao, Nicu Sebe, and Gim Hee Lee. Style-hallucinated dual consistency learning for domain generalized semantic segmentation. In ECCV, 2022. 1, 3, 5, 6, 7 +[59] Chong Zhou, Chen Change Loy, and Bo Dai. Extract free dense labels from clip. In ECCV, 2022. 1 +[60] Kaiyang Zhou, Yongxin Yang, Timothy Hospedales, and Tao Xiang. Deep domain-adversarial image generation for domain generalisation. In AAAI, 2020. 2 +[61] Kaiyang Zhou, Yongxin Yang, Timothy Hospedales, and Tao Xiang. Learning to generate novel domains for domain generalization. In ECCV, 2020. +[62] Kaiyang Zhou, Yongxin Yang, Yu Qiao, and Tao Xiang. Domain generalization with mixstyle. In ICLR, 2021. 1, 2, 4, 5, 6, 8 +[63] Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. TPAMI, 2022. 1, 2 +[64] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Zwei Liu. Conditional prompt learning for vision-language models. In CVPR, 2022. 1 +[65] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 2022. 1 \ No newline at end of file diff --git a/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/images.zip b/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..f43a3e253bec9031c74c1785b7194db8b37c8493 --- /dev/null +++ b/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cb23b51aaadabb88d457d3aa82bcec131a644fa8a0d3f0b080521794f5d149e +size 543089 diff --git a/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/layout.json b/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3a0ab3961cd0ff0d6e51e670ced58086a00bbd26 --- /dev/null +++ b/2024/A Simple Recipe for Language-guided Domain Generalized Segmentation/layout.json @@ -0,0 +1,10711 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 69, + 103, + 523, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 103, + 523, + 121 + ], + "spans": [ + { + "bbox": [ + 69, + 103, + 523, + 121 + ], + "type": "text", + "content": "A Simple Recipe for Language-guided Domain Generalized Segmentation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 141, + 153, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 141, + 153, + 156 + ], + "spans": [ + { + "bbox": [ + 56, + 141, + 153, + 156 + ], + "type": "text", + "content": "Mohammad Fahes1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 166, + 142, + 250, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 142, + 250, + 157 + ], + "spans": [ + { + "bbox": [ + 166, + 142, + 250, + 157 + ], + "type": "text", + "content": "Tuan-Hung " + }, + { + "bbox": [ + 166, + 142, + 250, + 157 + ], + "type": "inline_equation", + "content": "\\mathrm{V_u^{1,2}}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 198, + 157, + 231, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 157, + 231, + 170 + ], + "spans": [ + { + "bbox": [ + 198, + 157, + 231, + 170 + ], + "type": "text", + "content": "1 Inria" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 264, + 143, + 347, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 143, + 347, + 156 + ], + "spans": [ + { + "bbox": [ + 264, + 143, + 347, + 156 + ], + "type": "text", + "content": "Andrei Bursuc" + }, + { + "bbox": [ + 264, + 143, + 347, + 156 + ], + "type": "inline_equation", + "content": "^{1,2}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 267, + 157, + 316, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 157, + 316, + 171 + ], + "spans": [ + { + "bbox": [ + 267, + 157, + 316, + 171 + ], + "type": "text", + "content": "2 Valeo.ai" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 357, + 143, + 430, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 143, + 430, + 156 + ], + "spans": [ + { + "bbox": [ + 357, + 143, + 430, + 156 + ], + "type": "text", + "content": "Patrick Pérez3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 353, + 157, + 394, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 157, + 394, + 171 + ], + "spans": [ + { + "bbox": [ + 353, + 157, + 394, + 171 + ], + "type": "text", + "content": "3 Kyutai" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 443, + 143, + 534, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 443, + 143, + 534, + 156 + ], + "spans": [ + { + "bbox": [ + 443, + 143, + 534, + 156 + ], + "type": "text", + "content": "Raoul de Charette1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 167, + 180, + 429, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 180, + 429, + 193 + ], + "spans": [ + { + "bbox": [ + 167, + 180, + 429, + 193 + ], + "type": "text", + "content": "https://astravision.github.io/FAMix" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 143, + 211, + 192, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 211, + 192, + 224 + ], + "spans": [ + { + "bbox": [ + 143, + 211, + 192, + 224 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 237, + 290, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 237, + 290, + 477 + ], + "spans": [ + { + "bbox": [ + 46, + 237, + 290, + 477 + ], + "type": "text", + "content": "Generalization to new domains not seen during training is one of the long-standing challenges in deploying neural networks in real-world applications. Existing generalization techniques either necessitate external images for augmentation, and/or aim at learning invariant representations by imposing various alignment constraints. Large-scale pretraining has recently shown promising generalization capabilities, along with the potential of binding different modalities. For instance, the advent of vision-language models like CLIP has opened the doorway for vision models to exploit the textual modality. In this paper, we introduce a simple framework for generalizing semantic segmentation networks by employing language as the source of randomization. Our recipe comprises three key ingredients: (i) the preservation of the intrinsic CLIP robustness through minimal fine-tuning, (ii) language-driven local style augmentation, and (iii) randomization by locally mixing the source and augmented styles during training. Extensive experiments report state-of-the-art results on various generalization benchmarks. Code is accessible on the project page1." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 514, + 128, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 514, + 128, + 526 + ], + "spans": [ + { + "bbox": [ + 47, + 514, + 128, + 526 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 535, + 287, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 535, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 46, + 535, + 287, + 666 + ], + "type": "text", + "content": "A prominent challenge associated with deep neural networks is their constrained capacity to generalize when confronted with shifts in data distribution. This limitation is rooted in the assumption of data being independent and identically distributed, a presumption that frequently proves unrealistic in real-world scenarios. For instance, in safety-critical applications like autonomous driving, it is imperative for a segmentation model to exhibit resilient generalization capabilities when dealing with alterations in lighting, variations in weather conditions, and shifts in geographic location, among other considerations." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 667, + 287, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 667, + 287, + 692 + ], + "spans": [ + { + "bbox": [ + 47, + 667, + 287, + 692 + ], + "type": "text", + "content": "To address this challenge, domain adaptation [13, 18, 33, 34, 47, 48] has emerged; its core principle revolves around" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 212, + 545, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 212, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 306, + 212, + 545, + 308 + ], + "type": "text", + "content": "aligning the distributions of both the source and target domains. However, DA hinges on having access to target data, which may not always be available. Even when accessible, this data might not encompass the full spectrum of distributions encountered in diverse real-world scenarios. Domain generalization [31, 32, 49, 52, 62, 63] overcomes this limitation by enhancing the robustness of models to arbitrary and previously unseen domains." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 310, + 546, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 310, + 546, + 407 + ], + "spans": [ + { + "bbox": [ + 304, + 310, + 546, + 407 + ], + "type": "text", + "content": "The training of segmentation networks is often backed by large-scale pretraining as initialization for the feature representation. Until now, to the best of our knowledge, domain generalization for semantic segmentation (DGSS) networks [7, 19, 23, 24, 29, 36, 37, 51, 53, 58] are pretrained with ImageNet [9]. The underlying concept is to transfer the representations from the upstream task of classification to the downstream task of segmentation." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 408, + 547, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 408, + 547, + 552 + ], + "spans": [ + { + "bbox": [ + 304, + 408, + 547, + 552 + ], + "type": "text", + "content": "Lately, contrastive language image pretraining (CLIP) [22, 39, 55, 56] has demonstrated that transferable visual representations could be learned from the sole supervision of loose natural language descriptions at very large scale. Subsequently, a plethora of applications have been proposed using CLIP [39], including zero-shot semantic segmentation [30, 59], image editing [27], transfer learning [10, 40], open-vocabulary object detection [16], few-shot learning [64, 65] etc. A recent line of research proposes fine-tuning techniques to preserve the robustness of CLIP under distribution shift [15, 26, 45, 50], but they are limited to classification." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 555, + 547, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 555, + 547, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 555, + 547, + 651 + ], + "type": "text", + "content": "In this paper, we aim at answering the following question: How to leverage CLIP pretraining for enhanced domain generalization for semantic segmentation? The motivation for rethinking DGSS with CLIP is twofold. On one hand, distribution robustness is a notable characteristic of CLIP [12]. On the other hand, the language modality offers an extra source of information compared to unimodal pretrained models." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "content": "A direct comparison of training two segmentation models under identical conditions but with different pretraining, i.e. ImageNet vs. CLIP, shows that CLIP pretraining does not yield promising results. Indeed, Tab. 1 shows that fine-tuning CLIP-initialized network performs worse than" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 237, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 237, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 237, + 712 + ], + "type": "text", + "content": "1https://astra-vision.github.io/FAMix" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "23428" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 71, + 286, + 109 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 286, + 109 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 286, + 109 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 286, + 109 + ], + "type": "table", + "html": "
PretrainingCBMSANASARAFMean
ImageNet29.0432.1734.2629.874.3622.3828.3426.7625.90
CLIP16.8116.3117.8027.102.958.5814.3513.6114.69
", + "image_path": "b4e04f344950334779b3d7b3db056ea2afe6a5fef4790b8ab892f14fcd288078.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 116, + 287, + 182 + ], + "lines": [ + { + "bbox": [ + 47, + 116, + 287, + 182 + ], + "spans": [ + { + "bbox": [ + 47, + 116, + 287, + 182 + ], + "type": "text", + "content": "Table 1. Comparison of ImageNet and CLIP pretraining for out-of-distribution semantic segmentation. The network is DeepLabv3+ with ResNet-50 as backbone. The models are trained on GTAV and the performance (mIoU %) is reported on Cityscapes (C), BDD-100K (B), Mapillary (M), Synthia (S), and ACDC Night (AN), Snow (AS), Rain (AR) and Fog (AF)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 202, + 287, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 202, + 287, + 286 + ], + "spans": [ + { + "bbox": [ + 47, + 202, + 287, + 286 + ], + "type": "text", + "content": "its ImageNet counterpart on out-of-distribution (OOD) data. This raises doubts about the suitability of CLIP pretraining for DGSS and indicates that it is more prone to overfitting the source distribution at the expense of degrading its original distributional robustness properties. Note that both models converge and achieve similar results on in-domain data. More details are provided in Appendix A." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 290, + 287, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 290, + 287, + 337 + ], + "spans": [ + { + "bbox": [ + 47, + 290, + 287, + 337 + ], + "type": "text", + "content": "This paper shows that we can prevent such behavior with a simple recipe involving minimal fine-tuning, language-driven style augmentation, and mixing. Our approach is coined FAMix, for Freeze, Augment and Mix." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 342, + 287, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 287, + 461 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 287, + 461 + ], + "type": "text", + "content": "It was recently argued that fine-tuning might distort the pretrained representations and negatively affect OOD generalization [26]. To maintain the integrity of the representation, one extreme approach is to entirely freeze the backbone. However, this can undermine representation adaptability and lead to subpar OOD generalization. As a middle-ground strategy balancing adaptation and feature preservation, we suggest minimal fine-tuning of the backbone, where a substantial portion remains frozen, and only the final layers undergo fine-tuning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 465, + 287, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 465, + 287, + 536 + ], + "spans": [ + { + "bbox": [ + 47, + 465, + 287, + 536 + ], + "type": "text", + "content": "For generalization, we show that rethinking MixStyle [62] leads to significant performance gains. As illustrated in Fig. 1, we mix the statistics of the original source features with augmented statistics mined using language. This helps explore styles beyond the source distribution at training time without using additional image." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 59, + 540, + 237, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 540, + 237, + 551 + ], + "spans": [ + { + "bbox": [ + 59, + 540, + 237, + 551 + ], + "type": "text", + "content": "We summarize our contributions as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 556, + 286, + 711 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 47, + 556, + 286, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 556, + 286, + 602 + ], + "spans": [ + { + "bbox": [ + 47, + 556, + 286, + 602 + ], + "type": "text", + "content": "- We propose a simple framework for DGSS based on minimal fine-tuning of the backbone and language-driven style augmentation. To the best of our knowledge, we are the first to study DGSS with CLIP pretraining." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 604, + 286, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 604, + 286, + 674 + ], + "spans": [ + { + "bbox": [ + 47, + 604, + 286, + 674 + ], + "type": "text", + "content": "- We propose language-driven class-wise local style augmentation. We mine class-specific local statistics using prompts that express random styles and names of patchwise dominant classes. During training, randomization is performed through patch-wise style mixing of the source and mined styles." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 675, + 286, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 675, + 286, + 711 + ], + "spans": [ + { + "bbox": [ + 47, + 675, + 286, + 711 + ], + "type": "text", + "content": "- We conduct careful ablations to show the effectiveness of FAMix. Our framework outperforms state-of-the-art approaches in single and multi-source DGSS settings." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 315, + 70, + 539, + 148 + ], + "blocks": [ + { + "bbox": [ + 315, + 70, + 539, + 148 + ], + "lines": [ + { + "bbox": [ + 315, + 70, + 539, + 148 + ], + "spans": [ + { + "bbox": [ + 315, + 70, + 539, + 148 + ], + "type": "image", + "image_path": "b9af8f4cf04702bc00671f6450080cc0d1804d090acfc233174df61ecbd8914a.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 165, + 545, + 264 + ], + "lines": [ + { + "bbox": [ + 305, + 165, + 545, + 264 + ], + "spans": [ + { + "bbox": [ + 305, + 165, + 545, + 264 + ], + "type": "text", + "content": "Figure 1. Mixing strategies. (Left) MixStyle [62] consists of a linear mixing between the feature statistics of the source domain(s) S samples. (Right) We apply an augmentation " + }, + { + "bbox": [ + 305, + 165, + 545, + 264 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(.)" + }, + { + "bbox": [ + 305, + 165, + 545, + 264 + ], + "type": "text", + "content": " on the source domain statistics, then perform linear mixing between original and augmented statistics. Intuitively, this enlarges the support of the training distribution by leveraging statistics beyond the source domain(s), as well as discovering intermediate domains. " + }, + { + "bbox": [ + 305, + 165, + 545, + 264 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(.)" + }, + { + "bbox": [ + 305, + 165, + 545, + 264 + ], + "type": "text", + "content": " could be a language-driven or Gaussian noise augmentation, and we show that the former leads to better generalization results." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 287, + 394, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 287, + 394, + 298 + ], + "spans": [ + { + "bbox": [ + 306, + 287, + 394, + 298 + ], + "type": "text", + "content": "2. Related works" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 308, + 545, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 308, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 305, + 308, + 545, + 403 + ], + "type": "text", + "content": "Domain generalization (DG). The goal of DG is to train, from a single or multiple source domains, models that perform well under arbitrary domain shifts. The DG literature spans a broad range of approaches, including adversarial learning [32, 57], meta-learning [4, 38], data augmentation [60-62] and domain-invariant representation learning [1, 3, 7, 25]. We refer the reader to [49, 63] for comprehensive surveys on DG." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 409, + 545, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 409, + 545, + 672 + ], + "spans": [ + { + "bbox": [ + 304, + 409, + 545, + 672 + ], + "type": "text", + "content": "Domain generalization with CLIP. CLIP [39] exhibits a remarkable distributional robustness [12]. Nevertheless, fine-tuning comes at the expense of sacrificing generalization. Kumar et al. [26] observe that full fine-tuning can distort the pretrained representation, and propose a two-stage strategy, consisting of training a linear probe with a frozen feature extractor, then fine-tuning both. Wortman et al. [50] propose assembling the weights of zero-shot and fine-tuned models. Goyal et al. [15] show that preserving the pretraining paradigm (i.e. contrastive learning) during the adaptation to the downstream task improves both in-domain (ID) and OOD performance without multi-step fine-tuning or weight assembling. CLIPood [45] introduces margin metric softmax training objective and Beta moving average for optimization to handle both open-class and open-domain at test time. On the other hand, distributional robustness could be improved by training a small amount of parameters on top of a frozen CLIP backbone in a teacher-student manner [21, 28]. Other works show that specialized prompt assembling and/or image assembling strategies [2, 14] coupled with label augmentation using the WordNet hierarchy improve robustness in classification." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "content": "Domain Generalized Semantic Segmentation. DGSS methods could be categorized into three main groups: normalization methods, domain randomization (DR) and in" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "23429" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 71, + 531, + 210 + ], + "blocks": [ + { + "bbox": [ + 60, + 71, + 531, + 210 + ], + "lines": [ + { + "bbox": [ + 60, + 71, + 531, + 210 + ], + "spans": [ + { + "bbox": [ + 60, + 71, + 531, + 210 + ], + "type": "image", + "image_path": "6dca2118158c16ccc62ceca3ae190802f7c87077359af3f3e1fee053af4e6ef2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 224, + 545, + 302 + ], + "lines": [ + { + "bbox": [ + 46, + 224, + 545, + 302 + ], + "spans": [ + { + "bbox": [ + 46, + 224, + 545, + 302 + ], + "type": "text", + "content": "Figure 2. Overall process of FAMix. FAMix consists of two steps. (Left) Local style mining consists of dividing the low-level feature activations into patches, which are used for style mining using Prompt-driven Instance Normalization (PIN) [10]. Specifically, for each patch, the dominant class is queried from the ground truth, and the mined style is added to corresponding class-specific style bank. (Right) Training the segmentation network is performed with minimal fine-tuning of the backbone. At each iteration, the low-level feature activations are viewed as grids of patches. For each patch, the dominant class is queried using the ground truth, then a style is sampled from the corresponding style bank. Style randomization is performed by normalizing each patch in the grid by its statistics, and transferring the new style which is a mixing between the original style and the sampled one. The network is trained using only a cross-entropy loss." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 316, + 289, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 316, + 289, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 316, + 289, + 581 + ], + "type": "text", + "content": "variant representation learning. Normalization methods aim at removing style contribution from the representation. For instance, IBN-Net [36] shows that Instance Normalization (IN) makes the representation invariant to variations in the scene appearance (e.g., change of colors, illumination, etc.), and that combining IN and batch normalization (BN) helps the synthetic-to-real generalization. SAN & SAW [37] proposes semantic-aware feature normalization and whitening, while RobustNet [7] proposes an instance selective whitening loss, where only feature covariances that are sensitive to photometric transformations are whitened. DR aims instead at diversifying the data during training. Some methods use additional data for DR. For example, WildNet [29] uses ImageNet [9] data for content and style extension learning, while TLDR [24] proposes learning texture from random style images. Other methods like SiamDoGe [51] perform DR solely by data augmentation, using a Siamese [6] structure. Finally in the invariant representation learning group, SPC-Net [19] builds a representation space based on style and semantic projection and clustering, and SHADE [58] regularizes the training with a style consistency loss and a retrospection consistency loss." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 596, + 102, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 596, + 102, + 608 + ], + "spans": [ + { + "bbox": [ + 47, + 596, + 102, + 608 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 617, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 287, + 713 + ], + "type": "text", + "content": "FAMix proposes an effective recipe for DGSS through the blending of simple ingredients. It consists of two stages (see Fig. 2): (i) Local style mining from language (Sec. 3.2); (ii) Training of a segmentation network with minimal fin-tuning and local style mixing (Sec. 3.3). In Fig. 2 and in the following, CLIP-I1 denotes the stem layers and Layer1 of CLIP image encoder, CLIP-I2 the remaining layers excluding the attention pooling, and CLIP-T the text encoder." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 316, + 544, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 316, + 544, + 340 + ], + "spans": [ + { + "bbox": [ + 305, + 316, + 544, + 340 + ], + "type": "text", + "content": "We start with some preliminary background knowledge, introducing AdaIN and PIN which are essential to our work." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 347, + 391, + 359 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 347, + 391, + 359 + ], + "spans": [ + { + "bbox": [ + 306, + 347, + 391, + 359 + ], + "type": "text", + "content": "3.1. Preliminaries" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "spans": [ + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "type": "text", + "content": "Adaptive Instance Normalization (AdaIN). For a feature map " + }, + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "type": "inline_equation", + "content": "\\mathbf{f} \\in \\mathbb{R}^{h \\times w \\times c}" + }, + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "type": "text", + "content": ", AdaIN [20] shows that the channel-wise mean " + }, + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\mu} \\in \\mathbb{R}^c" + }, + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "type": "text", + "content": " and standard deviation " + }, + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "type": "inline_equation", + "content": "\\sigma \\in \\mathbb{R}^c" + }, + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "type": "text", + "content": " capture information about the style of the input image, allowing style transfer between images. Hence, stylizing a source feature " + }, + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_s" + }, + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "type": "text", + "content": " with an arbitrary target style " + }, + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "type": "inline_equation", + "content": "(\\mu(\\mathbf{f}_t), \\sigma(\\mathbf{f}_t))" + }, + { + "bbox": [ + 305, + 366, + 545, + 438 + ], + "type": "text", + "content": " reads:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 341, + 443, + 545, + 467 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 443, + 545, + 467 + ], + "spans": [ + { + "bbox": [ + 341, + 443, + 545, + 467 + ], + "type": "interline_equation", + "content": "\\operatorname {A d a I N} \\left(\\mathbf {f} _ {\\mathrm {s}}, \\mathbf {f} _ {\\mathrm {t}}\\right) = \\sigma \\left(\\mathbf {f} _ {\\mathrm {t}}\\right) \\left(\\frac {\\mathbf {f} _ {\\mathrm {s}} - \\mu \\left(\\mathbf {f} _ {\\mathrm {s}}\\right)}{\\sigma \\left(\\mathbf {f} _ {\\mathrm {s}}\\right)}\\right) + \\mu \\left(\\mathbf {f} _ {\\mathrm {t}}\\right), \\tag {1}", + "image_path": "4ee681382012cf8bd6f8bd0e245aa094e26c3aee8f57b9ca439772458a3c277b.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 473, + 544, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 473, + 544, + 497 + ], + "spans": [ + { + "bbox": [ + 305, + 473, + 544, + 497 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 305, + 473, + 544, + 497 + ], + "type": "inline_equation", + "content": "\\mu (\\cdot)" + }, + { + "bbox": [ + 305, + 473, + 544, + 497 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 473, + 544, + 497 + ], + "type": "inline_equation", + "content": "\\sigma (\\cdot)" + }, + { + "bbox": [ + 305, + 473, + 544, + 497 + ], + "type": "text", + "content": " the mean and standard deviation of input feature; multiplications and additions being element-wise." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 499, + 545, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 499, + 545, + 595 + ], + "spans": [ + { + "bbox": [ + 305, + 499, + 545, + 595 + ], + "type": "text", + "content": "Prompt-driven Instance Normalization (PIN). PIN was introduced for prompt-driven zero-shot domain adaptation in PØDA [10]. It replaces the target style " + }, + { + "bbox": [ + 305, + 499, + 545, + 595 + ], + "type": "inline_equation", + "content": "(\\mu(\\mathbf{f}_{\\mathrm{t}}), \\sigma(\\mathbf{f}_{\\mathrm{t}}))" + }, + { + "bbox": [ + 305, + 499, + 545, + 595 + ], + "type": "text", + "content": " in AdaIN (1) with two estimizable variables " + }, + { + "bbox": [ + 305, + 499, + 545, + 595 + ], + "type": "inline_equation", + "content": "(\\mu, \\sigma)" + }, + { + "bbox": [ + 305, + 499, + 545, + 595 + ], + "type": "text", + "content": " guided by a single prompt in natural language. The rationale is to leverage a frozen CLIP [39] to mine visual styles from the prompt representation in the shared space. Given a prompt " + }, + { + "bbox": [ + 305, + 499, + 545, + 595 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 305, + 499, + 545, + 595 + ], + "type": "text", + "content": " and a feature map " + }, + { + "bbox": [ + 305, + 499, + 545, + 595 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{\\mathrm{s}}" + }, + { + "bbox": [ + 305, + 499, + 545, + 595 + ], + "type": "text", + "content": ", PIN reads as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 358, + 601, + 545, + 625 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 601, + 545, + 625 + ], + "spans": [ + { + "bbox": [ + 358, + 601, + 545, + 625 + ], + "type": "interline_equation", + "content": "\\mathrm {P I N} _ {(P)} \\left(\\mathbf {f} _ {\\mathrm {s}}\\right) = \\sigma \\left(\\frac {\\mathbf {f} _ {\\mathrm {s}} - \\mu \\left(\\mathbf {f} _ {\\mathrm {s}}\\right)}{\\sigma \\left(\\mathbf {f} _ {\\mathrm {s}}\\right)}\\right) + \\boldsymbol {\\mu}, \\tag {2}", + "image_path": "7a2e1cd25b06cdfbede41649267e31c529230b782b9e5f01686db958ccea59c4.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 630, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 665 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 630, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 304, + 630, + 545, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 630, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 304, + 630, + 545, + 665 + ], + "type": "text", + "content": " are optimized using gradient descent, such that the cosine distance between the visual feature representation and the prompt representation is minimized." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 665, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 714 + ], + "type": "text", + "content": "Different from PØDA which mines styles globally with a predetermined prompt describing the target domain, we make use of PIN to mine class-specific styles using local patches of the features, leveraging random style prompts." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "23430" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 288, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 288, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 288, + 96 + ], + "type": "text", + "content": "Further, we show the effectiveness of incorporating the class name in the prompt for better style mining." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 102, + 160, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 102, + 160, + 117 + ], + "spans": [ + { + "bbox": [ + 47, + 102, + 160, + 117 + ], + "type": "text", + "content": "3.2. Local Style Mining" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "spans": [ + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "text", + "content": "Our approach is to leverage PIN to mine class-specific style banks that used for feature augmentation when training FAMix. Given a set of cropped images " + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "text", + "content": ", we encode them using CLIP-I1 to get a set of low-level features " + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "text", + "content": ". Each batch " + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "text", + "content": " of features " + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{\\mathrm{s}} \\in \\mathcal{F}_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "text", + "content": " is cropped into " + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "text", + "content": " patches, resulting in " + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "inline_equation", + "content": "b \\times m" + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "text", + "content": " patches " + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_p" + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "text", + "content": ", associated ground-truth annotation " + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_p" + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "text", + "content": ", of size " + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "inline_equation", + "content": "h / \\sqrt{m} \\times w / \\sqrt{m} \\times c" + }, + { + "bbox": [ + 46, + 121, + 287, + 205 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "spans": [ + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "text", + "content": "We aim at populating " + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "text", + "content": " style banks, " + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "text", + "content": " being the total number of classes. For a feature patch " + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_p" + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "text", + "content": ", we compute the dominant class from the corresponding label patch " + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_p" + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "text", + "content": ", and get its name " + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "inline_equation", + "content": "t_p" + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "text", + "content": " from the predefined classes in the training dataset. Given a set of prompts describing random styles " + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "text", + "content": ", the target prompt " + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "inline_equation", + "content": "P_p" + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "text", + "content": " is formed by concatenating a randomly sampled style prompt " + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "inline_equation", + "content": "t_p" + }, + { + "bbox": [ + 46, + 205, + 288, + 325 + ], + "type": "text", + "content": " (e.g., retro futurism style building). We show in the experiments (Sec. 4.4) that our method is not very sensitive to the prompt design, yet our prompt construction works best." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 325, + 287, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 325, + 287, + 384 + ], + "spans": [ + { + "bbox": [ + 46, + 325, + 287, + 384 + ], + "type": "text", + "content": "The idea is to mine proxy domains and explore intermediate ones in a class-aware manner (as detailed in Sec. 3.3), which makes our work fundamentally different from [10], that steers features towards a particular target style and corresponding domain, and better suited to generalization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 384, + 288, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 384, + 288, + 468 + ], + "spans": [ + { + "bbox": [ + 46, + 384, + 288, + 468 + ], + "type": "text", + "content": "To handle the class imbalance problem, we simply select one feature patch " + }, + { + "bbox": [ + 46, + 384, + 288, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_p" + }, + { + "bbox": [ + 46, + 384, + 288, + 468 + ], + "type": "text", + "content": " per class among the total " + }, + { + "bbox": [ + 46, + 384, + 288, + 468 + ], + "type": "inline_equation", + "content": "b\\times m" + }, + { + "bbox": [ + 46, + 384, + 288, + 468 + ], + "type": "text", + "content": " patches, as shown in Fig. 2. Consequently, we apply PIN (2) to optimize the local styles to match the representations of their corresponding prompts, and use the mined styles to populate the corresponding style banks. The complete procedure is outlined in Algorithm 1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 468, + 288, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 468, + 288, + 493 + ], + "spans": [ + { + "bbox": [ + 47, + 468, + 288, + 493 + ], + "type": "text", + "content": "The resulting style banks " + }, + { + "bbox": [ + 47, + 468, + 288, + 493 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{T}^{(1)},\\dots ,\\mathcal{T}^{(K)}\\}" + }, + { + "bbox": [ + 47, + 468, + 288, + 493 + ], + "type": "text", + "content": " are used for domain randomization during training." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 499, + 146, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 499, + 146, + 513 + ], + "spans": [ + { + "bbox": [ + 47, + 499, + 146, + 513 + ], + "type": "text", + "content": "3.3. Training FAMix" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "spans": [ + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "text", + "content": "Style randomization. During training, randomly cropped images " + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "text", + "content": " are encoded into " + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "text", + "content": " using CLIP-11. Each batch of feature maps " + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "text", + "content": " is viewed as a grid of " + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "text", + "content": " patches, without cropping them. For each patch " + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{\\mathrm{s}}^{(ij)}" + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "text", + "content": " within the grid, the dominant class " + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "inline_equation", + "content": "c_{p}^{(ij)}" + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "text", + "content": " is queried using the corresponding ground truth patch " + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_{\\mathrm{s}}^{(ij)}" + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "text", + "content": ", and a style is randomly sampled from the corresponding mined bank " + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(c_p^{(ij)})" + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "text", + "content": ". We then apply patch-wise convex combination (i.e., style mixing) of the original style of the patch and the mined style. Specifically, for an arbitrary patch " + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{\\mathrm{s}}^{(ij)}" + }, + { + "bbox": [ + 46, + 517, + 287, + 650 + ], + "type": "text", + "content": ", our local style mixing reads:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 98, + 658, + 287, + 673 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 658, + 287, + 673 + ], + "spans": [ + { + "bbox": [ + 98, + 658, + 287, + 673 + ], + "type": "interline_equation", + "content": "\\mu_ {m i x} \\leftarrow (1 - \\alpha) \\mu \\left(\\mathbf {f} _ {\\mathrm {s}} ^ {(i j)}\\right) + \\alpha \\boldsymbol {\\mu} ^ {(i j)} \\tag {3}", + "image_path": "b3fe3b0dd67030ec59324c8c67bbcd1416c07964925a1307300b630cceba6a52.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 97, + 675, + 287, + 690 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 675, + 287, + 690 + ], + "spans": [ + { + "bbox": [ + 97, + 675, + 287, + 690 + ], + "type": "interline_equation", + "content": "\\sigma_ {m i x} \\leftarrow (1 - \\alpha) \\sigma \\left(\\mathbf {f} _ {\\mathrm {s}} ^ {(i j)}\\right) + \\alpha \\boldsymbol {\\sigma} ^ {(i j)}, \\tag {4}", + "image_path": "73224baeab094be0ca7d63732d0cecd6c5c85677ace78df22ce2c5cf02500d97.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 698, + 231, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 698, + 231, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 698, + 231, + 715 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 47, + 698, + 231, + 715 + ], + "type": "inline_equation", + "content": "(\\pmb{\\mu}^{(ij)},\\pmb{\\sigma}^{(ij)})\\in \\mathcal{T}^{(c_p^{(ij)})}" + }, + { + "bbox": [ + 47, + 698, + 231, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 698, + 231, + 715 + ], + "type": "inline_equation", + "content": "\\alpha \\in [0,1]^c" + } + ] + } + ], + "index": 11 + }, + { + "type": "code", + "bbox": [ + 308, + 88, + 550, + 338 + ], + "blocks": [ + { + "bbox": [ + 312, + 75, + 453, + 87 + ], + "lines": [ + { + "bbox": [ + 312, + 75, + 453, + 87 + ], + "spans": [ + { + "bbox": [ + 312, + 75, + 453, + 87 + ], + "type": "text", + "content": "Algorithm 1: Local Style Mining." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "lines": [ + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "spans": [ + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": "Input: Set " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{s}}" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " of source features batches. Label set " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\mathcal{V}_{\\mathrm{s}}" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{s}" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " Set of random prompts " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " and class names " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " \nParam: Number of patches " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " Number of classes " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " Output: " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " sets " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{T}^{(1)},\\dots ,\\mathcal{T}^{(K)}\\}" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " of class-wise augmented statistics. \n1 " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{T}^{(1)},\\dots ,\\mathcal{T}^{(K)}\\} \\gets \\emptyset" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " \n2 foreach " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "(\\mathbf{f}_s\\in \\mathcal{F}_s,\\mathbf{y}_s\\in \\mathcal{Y}_s)" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " do \n3 " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{y}_p\\} \\leftarrow" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " crop-patch(y,s,m) \n4 " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\{c_p\\} ,\\{P_p\\} ,\\{f_p\\} \\leftarrow \\emptyset" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " \n5 foreach " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_p\\in \\{\\mathbf{y}_p\\}" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " do \n6 " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "c_{p}\\gets" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " get-dominant-class(yp) if " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "c_{p}" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " not in " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\{c_p\\}" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " then \n8 " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\{c_p\\} \\leftarrow c_p" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " \n9 " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\{P_p\\} \\leftarrow" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " concat(sample(R),get-name(cp)) \n10 " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\{f_p\\} \\leftarrow f_p" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " \n11 end \n12 end \n13 " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\mu^{(c_p)},\\sigma^{(c_p)},\\mathbf{f}_p'\\gets \\mathrm{PIN}_{(P_p)}(\\mathbf{f}_p)" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " \n14 " + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{(c_p)}\\gets \\mathcal{T}^{(c_p)}\\cup \\{(\\boldsymbol{\\mu}^{(c_p)},\\boldsymbol{\\sigma}^{(c_p)})\\}" + }, + { + "bbox": [ + 308, + 88, + 550, + 338 + ], + "type": "text", + "content": " \n15 end" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "code_body" + } + ], + "index": 13, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 307, + 374, + 550, + 578 + ], + "blocks": [ + { + "bbox": [ + 313, + 361, + 443, + 373 + ], + "lines": [ + { + "bbox": [ + 313, + 361, + 443, + 373 + ], + "spans": [ + { + "bbox": [ + 313, + 361, + 443, + 373 + ], + "type": "text", + "content": "Algorithm 2: Training FAMix." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "lines": [ + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "spans": [ + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "text", + "content": "Input: Set " + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{s}}" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "text", + "content": " of source features batches. Label set " + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "\\mathcal{V}_{\\mathrm{s}}" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{s}}" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "text", + "content": " sets " + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{T}^{(1)},\\dots ,\\mathcal{T}^{(K)}\\}" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "text", + "content": " of class-wise augmented statistics. \nParam: Number of patches m. \nforeach " + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "(\\mathbf{f}_s\\in \\mathcal{F}_s,\\mathbf{y}_s\\in \\mathcal{Y}_s)" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "text", + "content": " do \n" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "\\alpha \\sim \\mathrm{Beta}(0.1,0.1)" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "text", + "content": " \nfor " + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "(i,j)\\in [1,\\sqrt{m} ]\\times [1,\\sqrt{m} ]" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "text", + "content": " do " + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "c_{p}^{(ij)}\\gets \\mathrm{get - dominant - class}(\\mathbf{y}_{s}^{(ij)})" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "\\mu^{(ij)},\\sigma^{(ij)}\\gets \\mathrm{sample}(\\mathcal{T}^{(c_p^{(ij)})})" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "\\mu_{mix}\\gets (1 - \\alpha).\\mu (\\mathbf{f}_s^{(ij)}) + \\alpha .\\mu^{(ij)}" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "\\sigma_{mix}\\gets (1 - \\alpha).\\sigma (\\mathbf{f}_s^{(ij)}) + \\alpha .\\sigma^{(ij)}" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{\\mathrm{s}}^{(ij)}\\gets \\mathrm{AdaIN}(\\mathbf{f}_{\\mathrm{s}}^{(ij)},\\mu_{mix},\\sigma_{mix})" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "text", + "content": " \nend \n" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{y}}_{\\mathrm{s}}\\gets \\mathrm{CLIP - I2}(\\mathbf{f}_{\\mathrm{s}})" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "text", + "content": " \nLoss " + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "text", + "content": " cross-entropy " + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "inline_equation", + "content": "(\\tilde{\\mathbf{y}}_{s},\\mathbf{y}_{s})" + }, + { + "bbox": [ + 307, + 374, + 550, + 578 + ], + "type": "text", + "content": " \nend" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "code_body" + } + ], + "index": 15, + "sub_type": "algorithm" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": "As shown in Fig. 1, our style mixing strategy differs from [62] which applies a linear interpolation between styles extracted from the images of a limited set of source domain(s) assumed to be available for training. Here, we view the mined styles as variations of multiple proxy target domains defined by the prompts. Training is conducted over all the paths in the feature space between the source and proxy domains without requiring any additional image during training other than the one from source." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "23431" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 71, + 288, + 220 + ], + "blocks": [ + { + "bbox": [ + 48, + 71, + 288, + 220 + ], + "lines": [ + { + "bbox": [ + 48, + 71, + 288, + 220 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 288, + 220 + ], + "type": "table", + "html": "
Methodarch.CBMSANASARAFMean
RobustNet [7]36.5835.2040.3328.306.3229.9733.0232.5630.29
SAN & SAW [37]39.7537.3441.8630.79-----
Pin the memory [23]41.0034.6037.4027.083.845.515.897.2720.32
SHADE [58]44.6539.2843.3428.418.1830.3835.4436.8733.32
SiamDoGe [51]42.9637.5440.6428.3410.6030.7135.8436.4532.89
DPCL [53]RN5044.8740.2146.74------
SPC-Net [19]44.1040.4645.51------
NP [11]40.6235.5638.9227.65-----
WildNet* [29]44.6238.4246.0931.348.2730.2936.3235.3933.84
TLDR* [24]46.5142.5846.1830.5713.1336.0238.8940.5836.81
FAMix (ours)48.1545.6152.1134.2314.9637.0938.6640.2538.88
SAN & SAW [37]45.3341.1840.7731.84-----
\\( SHADE^† \\)[58]46.6643.6645.5031.587.5832.4836.9036.6935.13
WildNet* [29]RN10145.7941.7347.0832.51-----
TLDR* [24]47.5844.8848.8033.14-----
FAMix (ours)49.4746.4051.9736.7219.8941.3840.9142.1541.11
", + "image_path": "549c3483b09c37d57964f807531ef4d42d53cc49031eea6252f012ebb3b26a6c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 226, + 287, + 293 + ], + "lines": [ + { + "bbox": [ + 46, + 226, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 46, + 226, + 287, + 293 + ], + "type": "text", + "content": "Table 2. Single-source DGSS trained on G. Performance (mIoU %) of FAMix compared to other DGSS methods trained on G and evaluated on C, S, M, S, A for ResNet-50 ('RN50') and ResNet-101 ('RN101') backbone architecture ('arch'). * indicates the use of extra-data. † indicates the use of the full data for training. We emphasize best and second best results." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 306, + 287, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 306, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 46, + 306, + 287, + 354 + ], + "type": "text", + "content": "Style transfer is applied through AdaIN (1). Only the standard cross-entropy loss between the ground truth " + }, + { + "bbox": [ + 46, + 306, + 287, + 354 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 306, + 287, + 354 + ], + "type": "text", + "content": " and the prediction " + }, + { + "bbox": [ + 46, + 306, + 287, + 354 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{y}}_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 306, + 287, + 354 + ], + "type": "text", + "content": " is applied for training the network. Algorithm 2 shows the training steps of FAMix." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 358, + 287, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 358, + 287, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 358, + 287, + 418 + ], + "type": "text", + "content": "Minimal fine-tuning. During training, we fine-tune only the last few layers of the backbone. Subsequently, we examine various alternatives and show that the minimal extent of fine-tuning is the crucial factor in witnessing the effectiveness of our local style mixing strategy." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 418, + 288, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 418, + 288, + 563 + ], + "spans": [ + { + "bbox": [ + 46, + 418, + 288, + 563 + ], + "type": "text", + "content": "Previous works [11, 36, 62] suggest that shallow feature statistics capture style information while deeper features encode semantic content. Consequently, some DGSS methods focus on learning style-agnostic representations [7, 36, 37], but this can compromise the expressiveness of the representation and suppress content information. In contrast, our intuition is to retain these identified traits by introducing variability to the shallow features through augmentation and mixing. Simultaneously, we guide the network to learn invariant high-level representations by training the final layers of the backbone with a label-preserving assumption, using a standard cross-entropy loss." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 574, + 128, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 574, + 128, + 588 + ], + "spans": [ + { + "bbox": [ + 47, + 574, + 128, + 588 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 594, + 162, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 594, + 162, + 608 + ], + "spans": [ + { + "bbox": [ + 47, + 594, + 162, + 608 + ], + "type": "text", + "content": "4.1. Experimental setup" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 613, + 287, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 613, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 46, + 613, + 287, + 685 + ], + "type": "text", + "content": "Synthetic datasets. GTAV [41] and SYNTHIA [42] are used as synthetic datasets. GTAV consists of 24966 images split into 12403 images for training, 6382 for validation and 6181 for testing. SYNTHIA consists of 9400 images: 6580 for training and 2820 for validation. GTAV and SYNTHIA are denoted by " + }, + { + "bbox": [ + 46, + 613, + 287, + 685 + ], + "type": "inline_equation", + "content": "\\mathsf{G}" + }, + { + "bbox": [ + 46, + 613, + 287, + 685 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 613, + 287, + 685 + ], + "type": "inline_equation", + "content": "\\mathsf{S}" + }, + { + "bbox": [ + 46, + 613, + 287, + 685 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "content": "Real datasets. Cityscapes [8], BDD-100K [54], and Mapillary [35] contain 2975, 7000, and 18000 images for train-" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "ing and 500, 1000, and 2000 images for validation, respectively. ACDC [44] is a dataset of driving scenes in adverse conditions: night, snow, rain and fog with respectively 106, 100, 100 and 100 images in the validation sets. C, B, and M denote Cityscapes, BDD-100K and Mapillary, respectively; AN, AS, AR and AF denote night, snow, rain and fog subsets of ACDC, respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "spans": [ + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "text", + "content": "Implementation details. Following previous works [7, 19, 23, 24, 29, 37, 51, 53, 58], we adopt DeepLabv " + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "inline_equation", + "content": "3+" + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "text", + "content": " [5] as segmentation model. ResNet-50 and ResNet-101 [17], initialized with CLIP pretrained weights, are used in our experiments as backbones. Specifically, we remove the attention pooling layer and add a randomly initialized decoder head. The output stride is 16. Single-source and multisource models are trained respectively for " + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "inline_equation", + "content": "40K" + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "inline_equation", + "content": "60K" + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "text", + "content": " iterations with a batch size of 8. The training images are cropped to " + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "inline_equation", + "content": "768 \\times 768" + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "text", + "content": ". Stochastic Gradient Descent (SGD) with a momentum of 0.9 and weight decay of " + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "text", + "content": " is used as optimizer. Polynomial decay with a power of 0.9 is used, with an initial learning rate of " + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "inline_equation", + "content": "10^{-1}" + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "text", + "content": " for the classifier and " + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "inline_equation", + "content": "10^{-2}" + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "text", + "content": " for the backbone. We use color jittering and horizontal flip as data augmentation. Label smoothing regularization [46] is adopted. For style mining, Layer1 features are divided into 9 patches. Each patch is resized to " + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "inline_equation", + "content": "56 \\times 56" + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "text", + "content": ", corresponding to the dimensions of Layer1 features for an input image of size " + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 304, + 158, + 546, + 397 + ], + "type": "text", + "content": " (i.e. the input dimension of CLIP). We use ImageNet templates for each prompt." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 400, + 545, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 400, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 304, + 400, + 545, + 449 + ], + "type": "text", + "content": "Evaluation metric. We evaluate our models on the validation sets of the unseen target domains with mean Intersection over Union (mIoU%) of the 19 shared semantic classes. For each experiment, we report the average of three runs." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 454, + 484, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 454, + 484, + 467 + ], + "spans": [ + { + "bbox": [ + 305, + 454, + 484, + 467 + ], + "type": "text", + "content": "4.2. Comparison with DGSS methods" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 473, + 545, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 545, + 497 + ], + "type": "text", + "content": "Single-source DGSS. We compare FAMix with state-of-the-art DGSS methods under the single-source setting." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 499, + 545, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 499, + 545, + 607 + ], + "spans": [ + { + "bbox": [ + 304, + 499, + 545, + 607 + ], + "type": "text", + "content": "Training on GTAV (G) as source, Tab. 2 reports models trained with either ResNet-50 or ResNet-101 backbones. The unseen target datasets are C, B, M, S, and the four subsets of A. Tab. 2 shows that our method significantly outperforms all the baselines on all the datasets for both backbones. We note that WildNet [29] and TLDR [24] use extra-data, while SHADE [58] uses the full G dataset (24,966 images) for training with ResNet-101. Class-wise performances are reported in Appendix B." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 610, + 545, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 610, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 304, + 610, + 545, + 669 + ], + "type": "text", + "content": "Training on Cityscapes (C) as source, Tab. 3 reports performance with ResNet-50 backbone. The unseen target datasets are B, M, G, and S. The table shows that our method outperforms the baseline in average, and is competitive to SOTA on G and M." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 672, + 545, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 672, + 545, + 696 + ], + "spans": [ + { + "bbox": [ + 304, + 672, + 545, + 696 + ], + "type": "text", + "content": "Multi-source DGSS. We also show the effectiveness of FAMix in the multi-source setting, training on " + }, + { + "bbox": [ + 304, + 672, + 545, + 696 + ], + "type": "inline_equation", + "content": "G + S" + }, + { + "bbox": [ + 304, + 672, + 545, + 696 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 315, + 702, + 471, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 702, + 471, + 712 + ], + "spans": [ + { + "bbox": [ + 315, + 702, + 471, + 712 + ], + "type": "text", + "content": "2https://github.com/openai/CLIP/" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "23432" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 51, + 70, + 284, + 155 + ], + "blocks": [ + { + "bbox": [ + 51, + 70, + 284, + 155 + ], + "lines": [ + { + "bbox": [ + 51, + 70, + 284, + 155 + ], + "spans": [ + { + "bbox": [ + 51, + 70, + 284, + 155 + ], + "type": "table", + "html": "
MethodBMGSMean
RobustNet [7]50.7358.6445.0026.2045.14
Pin the memory [23]46.7855.10---
SiamDoGe [51]51.5359.0045.0826.6745.57
WildNet* [29]50.9458.7947.0127.9546.17
DPCL [53]52.29-46.0026.60-
FAMix (ours)54.0758.7245.1232.6747.65
", + "image_path": "88d3c19ee7fc48c226b43975247f85a4ab40ad5b0c885e6fd7ff84f084493544.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 51, + 218, + 284, + 300 + ], + "blocks": [ + { + "bbox": [ + 47, + 163, + 287, + 208 + ], + "lines": [ + { + "bbox": [ + 47, + 163, + 287, + 208 + ], + "spans": [ + { + "bbox": [ + 47, + 163, + 287, + 208 + ], + "type": "text", + "content": "Table 3. Single-source DGSS trained on C. Performance (mIoU %) of FAMix compared to other DGSS methods trained on C and evaluated on B, M, G and S for ResNet-50 backbone. * indicates the use of extra-data. We emphasize best and second best results." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 51, + 218, + 284, + 300 + ], + "lines": [ + { + "bbox": [ + 51, + 218, + 284, + 300 + ], + "spans": [ + { + "bbox": [ + 51, + 218, + 284, + 300 + ], + "type": "table", + "html": "
MethodCBMMean
RobustNet [7]37.6934.0938.4936.76
Pin the memory [23]44.5138.0742.7041.76
SHADE [58]47.4340.3047.6045.11
SPC-Net [19]46.3643.1848.2345.92
TLDR* [24]48.8342.5847.8046.40
FAMix (ours)49.4145.5151.6148.84
", + "image_path": "11cfc289ef28356b4e518fbec7433a77e23e0a0d96a44636df411f07df20899d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 374, + 287, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 374, + 287, + 398 + ], + "spans": [ + { + "bbox": [ + 46, + 374, + 287, + 398 + ], + "type": "text", + "content": "evaluating on C, B and M. The results reported in Tab. 4 for ResNet-50 backbone outperform state-of-the-art." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 401, + 287, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 401, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 401, + 287, + 460 + ], + "type": "text", + "content": "Qualitative results. We visually compare the segmentation results with Pin the memory [23], SHADE [58] and WildNet [29] in Fig. 3. FAMix clearly outperforms other DGSS methods on \"stuff\" (e.g., road and sky) and \"things\" (e.g., bicycle and bus) classes." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 467, + 252, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 467, + 252, + 480 + ], + "spans": [ + { + "bbox": [ + 47, + 467, + 252, + 480 + ], + "type": "text", + "content": "4.3. Decoder-Probing Fine-Tuning (DP-FT)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 486, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 486, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 486, + 287, + 713 + ], + "type": "text", + "content": "Kumar et al. [26] show that standard fine-tuning may distort the pretrained feature representation, leading to degraded OOD performances for classification. Consequently, they propose a two-step training strategy: (1) Training a linear probe (LP) on top of the frozen backbone features, (2) Finetuning (FT) both the linear probe and the backbone. Inspired by it, Saito et al. [43] apply the same strategy for object detection, which is referred to as Decoder-probing Fine-tuning (DP-FT). They observe that DP-FT improves over DP depending on the architecture. We hypothesize that the effect is also dependent on the pretraining paradigm and the downstream task. As observed in Tab. 1, CLIP might remarkably overfit the source domain when finetuned. In Tab. 5, we compare fine-tuning (FT), decoder-probing (DP) and DP-FT. DP brings improvements over FT since it completely preserves the pretrained representation. Yet, DP major drawback lies in its limitation to adapt features for the downstream task, resulting in suboptimal results. Surprisingly, DP-FT largely falls behind DP, meaning" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 309, + 70, + 544, + 125 + ], + "blocks": [ + { + "bbox": [ + 46, + 309, + 287, + 354 + ], + "lines": [ + { + "bbox": [ + 46, + 309, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 46, + 309, + 287, + 354 + ], + "type": "text", + "content": "Table 4. Multi-source DGSS. Performance (mIoU %) of FAMix compared to other DGSS methods trained on " + }, + { + "bbox": [ + 46, + 309, + 287, + 354 + ], + "type": "inline_equation", + "content": "\\mathrm{G} + \\mathrm{S}" + }, + { + "bbox": [ + 46, + 309, + 287, + 354 + ], + "type": "text", + "content": " and evaluated on C, B, M for ResNet-50 backbone. * indicates the use of extra-data. We emphasize best and second best results." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 70, + 544, + 125 + ], + "lines": [ + { + "bbox": [ + 309, + 70, + 544, + 125 + ], + "spans": [ + { + "bbox": [ + 309, + 70, + 544, + 125 + ], + "type": "table", + "html": "
MethodCBMSANASARAFMean
FT16.8116.3117.8027.102.958.5814.3513.6114.69
DP34.1337.6742.2129.1010.7126.2629.4730.4029.99
DP-FT25.6221.7126.3931.454.2218.2620.0720.8521.07
FAMix (ours)48.1545.6152.1134.2314.9637.0938.6640.2538.88
", + "image_path": "23379019fd09e40f46d41f0c34d579bedcc41d5edf3536696469cda13103be59.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 309, + 190, + 544, + 273 + ], + "blocks": [ + { + "bbox": [ + 305, + 134, + 545, + 178 + ], + "lines": [ + { + "bbox": [ + 305, + 134, + 545, + 178 + ], + "spans": [ + { + "bbox": [ + 305, + 134, + 545, + 178 + ], + "type": "text", + "content": "Table 5. FAMix vs. DP-FT. Performance (mIoU%) of FAMix compared to Fine-tuning (FT), Decoder-probing (DP) and Decoder-probing Fine-tuning (DP-FT). We use here ResNet-50, trained on G. We emphasize best and second best results." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 190, + 544, + 273 + ], + "lines": [ + { + "bbox": [ + 309, + 190, + 544, + 273 + ], + "spans": [ + { + "bbox": [ + 309, + 190, + 544, + 273 + ], + "type": "table", + "html": "
FreezeAugmentMixCBMSANASARAFMean
XXX16.8116.3117.8027.102.958.5814.3513.6114.69
XX22.4826.0524.1525.404.8317.6122.8619.7520.39
XX20.0721.2422.9126.521.2814.9922.0920.5118.70
X27.5326.5926.2726.914.9018.9125.6022.1422.36
XX37.8338.8844.2431.9312.4129.5931.5633.0532.44
X36.6535.7337.3230.4414.7234.6534.9138.9832.93
X43.4343.7948.1933.7011.3235.5536.1538.1936.29
48.1545.6152.1134.2314.9637.0938.6640.2538.88
", + "image_path": "7dcc1802c79c27dc643967a30478f1ca791a9c243bc7273009fdbc2294db888a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 281, + 545, + 304 + ], + "lines": [ + { + "bbox": [ + 305, + 281, + 545, + 304 + ], + "spans": [ + { + "bbox": [ + 305, + 281, + 545, + 304 + ], + "type": "text", + "content": "Table 6. Ablation of FAMix components. Performance (mIoU %) after removing one or more components of FAMix." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 326, + 544, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 326, + 544, + 350 + ], + "spans": [ + { + "bbox": [ + 305, + 326, + 544, + 350 + ], + "type": "text", + "content": "that the learned features over-specialize to the source domain distribution even with a \"decoder warm-up\"." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 350, + 545, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 350, + 545, + 410 + ], + "spans": [ + { + "bbox": [ + 305, + 350, + 545, + 410 + ], + "type": "text", + "content": "The results advocate for the need of specific strategies to preserve CLIP robustness for semantic segmentation. This need emerges from the additional gap between pretraining (i.e. aligning object-level and language representations) and fine-tuning (i.e. supervised pixel classification)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 418, + 404, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 418, + 404, + 430 + ], + "spans": [ + { + "bbox": [ + 306, + 418, + 404, + 430 + ], + "type": "text", + "content": "4.4. Ablation studies" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 437, + 544, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 437, + 544, + 460 + ], + "spans": [ + { + "bbox": [ + 305, + 437, + 544, + 460 + ], + "type": "text", + "content": "We conduct all the ablations on a ResNet-50 backbone with GTAV (G) as source dataset." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "content": "Removing ingredients from the recipe. FAMix is based on minimal fine-tuning of the backbone (i.e., Freeze), style augmentation and mixing. We show in Tab. 6 that the best generalization results are only obtained when combining the three ingredients. Specifically, when the backbone is fine-tuned (i.e., Freeze " + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "content": "), the performances are largely harmed. When minimal fine-tuning is performed (i.e., Freeze " + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\sqrt{\\cdot}" + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "content": "), we argue that the augmentations are too strong to be applied without style mixing; the latter brings both effects of domain interpolation and use of the original statistics. Subsequently, when style mixing is not applied (i.e. Freeze " + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\sqrt{\\cdot}" + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "content": ", Augment " + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\sqrt{\\cdot}" + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "content": ", Mix " + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "content": "), the use of mined styles brings mostly no improvement on OOD segmentation compared to training without augmentation (i.e. Freeze " + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\sqrt{\\cdot}" + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "content": ", Augment " + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "content": ", Mix " + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "content": "). Note that for Freeze " + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\sqrt{\\cdot}" + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "content": ", Augment " + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\sqrt{\\cdot}" + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "content": ", Mix " + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 304, + 464, + 545, + 656 + ], + "type": "text", + "content": ", the line 8 in Algorithm 2 becomes:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 354, + 665, + 544, + 680 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 354, + 665, + 544, + 680 + ], + "spans": [ + { + "bbox": [ + 354, + 665, + 544, + 680 + ], + "type": "interline_equation", + "content": "\\mathbf {f} _ {\\mathrm {s}} ^ {(i j)} \\leftarrow \\operatorname {A d a I N} \\left(\\mathbf {f} _ {\\mathrm {s}} ^ {(i j)}, \\boldsymbol {\\mu} ^ {(i j)}, \\boldsymbol {\\sigma} ^ {(i j)}\\right) \\tag {5}", + "image_path": "bdd2fb6b5bbf2c58996e6a9106aaeac8e7990da4639c9877a882de3808a0eed3.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": "Our style mixing is different from MixStyle [62] for being applied: (1) patch-wise and (2) between original styles of" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "23433" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 68, + 525, + 223 + ], + "blocks": [ + { + "bbox": [ + 70, + 68, + 525, + 223 + ], + "lines": [ + { + "bbox": [ + 70, + 68, + 525, + 223 + ], + "spans": [ + { + "bbox": [ + 70, + 68, + 525, + 223 + ], + "type": "image", + "image_path": "915966bc1c7d1def4c2fc10800f731623e4ee3cef57d0b6be4b8c2528ef3cb3a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 231, + 544, + 252 + ], + "lines": [ + { + "bbox": [ + 46, + 231, + 544, + 252 + ], + "spans": [ + { + "bbox": [ + 46, + 231, + 544, + 252 + ], + "type": "text", + "content": "Figure 3. Qualitative results. Columns 1-2: Image and ground truth (GT), Columns 3-4-5: DGSS methods results, Column 6: Our results. The models are trained on G with ResNet-50 backbone." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 48, + 263, + 286, + 327 + ], + "blocks": [ + { + "bbox": [ + 48, + 263, + 286, + 327 + ], + "lines": [ + { + "bbox": [ + 48, + 263, + 286, + 327 + ], + "spans": [ + { + "bbox": [ + 48, + 263, + 286, + 327 + ], + "type": "table", + "html": "
RCPRSPCNCBMSANASARAFMean
45.9943.7150.4834.7515.2235.0934.9238.1737.29
46.1044.2448.9033.6213.3935.9936.6839.8637.35
45.6444.5949.1333.6415.3337.3235.9838.8537.56
47.8344.8350.3834.2714.4337.0737.0738.7638.08
48.1545.6152.1134.2314.9637.0938.6640.2538.88
", + "image_path": "fc1c31dcfe7077facd6a9267834b32cbc0b5f40e65d8613ca7ded5ff06692014.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 335, + 286, + 378 + ], + "lines": [ + { + "bbox": [ + 46, + 335, + 286, + 378 + ], + "spans": [ + { + "bbox": [ + 46, + 335, + 286, + 378 + ], + "type": "text", + "content": "Table 7. Ablation on the prompt construction. Performance (mIoU %) for different prompt constructions. RCP, RSP and CN refer to , and , respectively." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 391, + 286, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 391, + 286, + 462 + ], + "spans": [ + { + "bbox": [ + 46, + 391, + 286, + 462 + ], + "type": "text", + "content": "the source data and augmented versions of them. Note that the case (Freeze " + }, + { + "bbox": [ + 46, + 391, + 286, + 462 + ], + "type": "inline_equation", + "content": "\\checkmark" + }, + { + "bbox": [ + 46, + 391, + 286, + 462 + ], + "type": "text", + "content": ", Augment " + }, + { + "bbox": [ + 46, + 391, + 286, + 462 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 46, + 391, + 286, + 462 + ], + "type": "text", + "content": ", Mix " + }, + { + "bbox": [ + 46, + 391, + 286, + 462 + ], + "type": "inline_equation", + "content": "\\checkmark" + }, + { + "bbox": [ + 46, + 391, + 286, + 462 + ], + "type": "text", + "content": ") could be seen as a variant of MixStyle, yet applied locally and class-wise. Our complete recipe is proved to be significantly more effective with a boost of " + }, + { + "bbox": [ + 46, + 391, + 286, + 462 + ], + "type": "inline_equation", + "content": "\\approx +6" + }, + { + "bbox": [ + 46, + 391, + 286, + 462 + ], + "type": "text", + "content": " mean mIoU w.r.t. the baseline of training without augmentation and mixing." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 465, + 286, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 465, + 286, + 584 + ], + "spans": [ + { + "bbox": [ + 46, + 465, + 286, + 584 + ], + "type": "text", + "content": "Prompt construction. Tab. 7 reports results when ablating the prompt construction. In FAMix, the final prompt is derived by concatenating and ; removing either of those leads to inferior results. Interestingly, replacing the style prompt by random characters - e.g. \"ioscjspa\" - does not significantly degrade the performance. In certain aspects, using random prompts still induces a randomization effect within the FAMix framework. However, meaningful prompts still consistently lead to the best results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "spans": [ + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "type": "text", + "content": "Number of style prompts. FAMix uses a set " + }, + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "type": "text", + "content": " of random style prompts which are concatenated with the class names; " + }, + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "type": "text", + "content": " is formed by querying ChatGPT using . The output prompts are provided in Appendix C. Fig. 4a shows that the size of " + }, + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "type": "text", + "content": " has a marginal impact on FAMix performance. Yet, the mIoU scores on C, B, M and AR are higher for " + }, + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "type": "inline_equation", + "content": "|\\mathcal{R}| = 20" + }, + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "type": "text", + "content": " compared to " + }, + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "type": "inline_equation", + "content": "|\\mathcal{R}| = 1" + }, + { + "bbox": [ + 46, + 588, + 286, + 695 + ], + "type": "text", + "content": " and almost equal for the other datasets." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 264, + 415, + 344 + ], + "blocks": [ + { + "bbox": [ + 307, + 264, + 415, + 344 + ], + "lines": [ + { + "bbox": [ + 307, + 264, + 415, + 344 + ], + "spans": [ + { + "bbox": [ + 307, + 264, + 415, + 344 + ], + "type": "image", + "image_path": "d684a08d693b9a6ddee52bf8e7b6971de283ae9beba9f066560c916100c1a0a3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 324, + 346, + 399, + 357 + ], + "lines": [ + { + "bbox": [ + 324, + 346, + 399, + 357 + ], + "spans": [ + { + "bbox": [ + 324, + 346, + 399, + 357 + ], + "type": "text", + "content": "(a) Number of prompts" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 416, + 264, + 544, + 343 + ], + "blocks": [ + { + "bbox": [ + 416, + 264, + 544, + 343 + ], + "lines": [ + { + "bbox": [ + 416, + 264, + 544, + 343 + ], + "spans": [ + { + "bbox": [ + 416, + 264, + 544, + 343 + ], + "type": "image", + "image_path": "3b6ff42e4890093dff93fb5a8030dd8680c1dabd71307c93f9bc12bee73f7eb9.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 437, + 346, + 523, + 357 + ], + "lines": [ + { + "bbox": [ + 437, + 346, + 523, + 357 + ], + "spans": [ + { + "bbox": [ + 437, + 346, + 523, + 357 + ], + "type": "text", + "content": "(b) Effect of layer freezing" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 369, + 544, + 424 + ], + "lines": [ + { + "bbox": [ + 305, + 369, + 544, + 424 + ], + "spans": [ + { + "bbox": [ + 305, + 369, + 544, + 424 + ], + "type": "text", + "content": "Figure 4. Ablation of prompt set and freezing strategy. (a) Performance " + }, + { + "bbox": [ + 305, + 369, + 544, + 424 + ], + "type": "inline_equation", + "content": "(\\mathrm{mIoU}\\%)" + }, + { + "bbox": [ + 305, + 369, + 544, + 424 + ], + "type": "text", + "content": " on test datasets w.r.t. the number of random style prompts in " + }, + { + "bbox": [ + 305, + 369, + 544, + 424 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 305, + 369, + 544, + 424 + ], + "type": "text", + "content": ". (b) Effect of freezing layers reporting on x-axis the last frozen layer. For example, 'L3' means freezing L1, L2 and L3. 'L4' indicates that the Layer4 is partially frozen." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 438, + 545, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 438, + 545, + 546 + ], + "spans": [ + { + "bbox": [ + 304, + 438, + 545, + 546 + ], + "type": "text", + "content": "The low sensitivity of the performance to the size of " + }, + { + "bbox": [ + 304, + 438, + 545, + 546 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 304, + 438, + 545, + 546 + ], + "type": "text", + "content": " could be explained by two factors. First, mining even from a single prompt results in different style variations as the optimization starts from different anchor points in the latent space, as argued in [10]. Second, mixing style between the source and the mined proxy domains is the crucial factor making the network explore intermediate domains during training. This does not contradict the effect of our prompt construction which leads to the best results (Tab. 7)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 551, + 545, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 551, + 545, + 670 + ], + "spans": [ + { + "bbox": [ + 304, + 551, + 545, + 670 + ], + "type": "text", + "content": "Local vs. global style mining. To highlight the effect of our class-wise local style mining, we perform an ablation replacing it with global style mining. Specifically, the same set of are used, though being concatenated with as a global description instead of local class name. Intuitively, local style mining and mixing induces richer style variations and more contrast among patches. The results in Tab. 8 show the effectiveness of our local style mining and mixing strategy, bringing about 3 mIoU improvement on " + }, + { + "bbox": [ + 304, + 551, + 545, + 670 + ], + "type": "inline_equation", + "content": "G \\to C" + }, + { + "bbox": [ + 304, + 551, + 545, + 670 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 673, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 673, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 673, + 545, + 713 + ], + "type": "text", + "content": "What to mix? Let " + }, + { + "bbox": [ + 304, + 673, + 545, + 713 + ], + "type": "inline_equation", + "content": "S = \\bigcup_{k=1}^{K} S^{(k)}" + }, + { + "bbox": [ + 304, + 673, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 673, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = \\bigcup_{k=1}^{K} \\mathcal{T}^{(k)}" + }, + { + "bbox": [ + 304, + 673, + 545, + 713 + ], + "type": "text", + "content": " the sets of class-wise source and augmented features, respectively. In FAMix training, for an arbitrary patch " + }, + { + "bbox": [ + 304, + 673, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_s^{(ij)}" + }, + { + "bbox": [ + 304, + 673, + 545, + 713 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 58, + 702, + 178, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 178, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 178, + 712 + ], + "type": "text", + "content": "3https://chat.openai.com/" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "23434" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 71, + 284, + 129 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 284, + 129 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 284, + 129 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 284, + 129 + ], + "type": "table", + "html": "
Style miningCBMSANASARAFMean
“street view”45.5145.1250.4033.6514.5936.9237.3840.5338.01
“urban scene”46.5945.3851.3333.6714.4235.9637.3040.5238.15
global w/ “roadscape”45.4945.5550.6333.6614.7736.7537.0740.3338.03
“commute snapshot”45.3945.0850.5033.6813.6536.6337.9340.9237.97
“driving”45.0644.9850.6733.3614.8435.1136.2139.5237.47
local48.1545.6152.1134.2314.9637.0938.6640.2538.88
", + "image_path": "0c20ebb9033d39cf7ee2413a7cbeb7863a88dcdeca9be044368a3be00d775951.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 184, + 284, + 230 + ], + "blocks": [ + { + "bbox": [ + 47, + 140, + 286, + 172 + ], + "lines": [ + { + "bbox": [ + 47, + 140, + 286, + 172 + ], + "spans": [ + { + "bbox": [ + 47, + 140, + 286, + 172 + ], + "type": "text", + "content": "Table 8. Ablation on style mining. Global style mining consists of mining one style per feature map, using + as prompt." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 184, + 284, + 230 + ], + "lines": [ + { + "bbox": [ + 50, + 184, + 284, + 230 + ], + "spans": [ + { + "bbox": [ + 50, + 184, + 284, + 230 + ], + "type": "table", + "html": "
Syle miningCBMSANASARAFMean
S43.4343.7948.1933.7011.3235.5536.1538.1936.29
S∪T44.7645.5950.7834.0513.6736.9237.1838.1337.64
T(ours)48.1545.6152.1134.2314.9637.0938.6640.2538.88
", + "image_path": "31be26bfeb86e91093370b56a37430e6ab816e12d51f63a58aaa34cd9776b79c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "spans": [ + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "text", + "content": "style mixing is performed between the original source statistics and statistics sampled from the augmented set (i.e., " + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "inline_equation", + "content": "(\\pmb{\\mu}^{(ij)},\\pmb{\\sigma}^{(ij)})\\in \\mathcal{T}^{(c_p^{(ij)})}" + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "text", + "content": ", see (3) and (4)). In class-wise vanilla MixStyle, " + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "inline_equation", + "content": "(\\pmb{\\mu}^{(ij)},\\pmb{\\sigma}^{(ij)})\\in S^{(c_p^{(ij)})}" + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "text", + "content": ". In Tab. 9, we show that sampling " + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "inline_equation", + "content": "(\\pmb{\\mu}^{(ij)},\\pmb{\\sigma}^{(ij)})" + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "inline_equation", + "content": "S^{(c_p^{(ij)})}\\cup T^{(c_p^{(ij)})}" + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "text", + "content": " does not lead to better generalization, despite sampling from a set with twice the cardinality. This supports our mixing strategy visualized in Fig. 1. Intuitively, sampling from " + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "inline_equation", + "content": "S\\cup T" + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "text", + "content": " could be viewed as applying either MixStyle or our mixing with a probability " + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "inline_equation", + "content": "p = 0.5" + }, + { + "bbox": [ + 46, + 283, + 287, + 409 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 424, + 286, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 424, + 286, + 519 + ], + "spans": [ + { + "bbox": [ + 46, + 424, + 286, + 519 + ], + "type": "text", + "content": "Minimal fine-tuning. We argue for minimal fine-tuning as a compromise between pretrained feature preservation and adaptation. Fig. 4b shows an increasing OOD generalization trend with more freezing. Interestingly, only fine-tuning the last layers of the last convolutional block (where the dilation is applied) achieves the best results. When training on Cityscapes, we observed that freezing all the layers except Layer4 achieves the best results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 527, + 214, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 214, + 540 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 214, + 540 + ], + "type": "text", + "content": "4.5. Does FAMix require language?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 545, + 286, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 286, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 286, + 641 + ], + "type": "text", + "content": "Inspired by the observation that target statistics deviate around the source ones in real cases [11], we conduct an experiment where we replace language-driven style mining by noise perturbation. The same procedure of FAMix is kept: (i) Features are divided into patches, perturbed with noise and then saved into a style bank based on the dominant class; (ii) During training, patch-wise style mixing of original and perturbed styles is performed." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 642, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 286, + 713 + ], + "type": "text", + "content": "Different from Fan et al. [11], who perform a perturbation on the feature statistics using a normal distribution with pre-defined parameters, we experiment perturbation with different magnitudes of noise controlled by the signal-to-noise ratio (SNR). Consider the mean of a patch " + }, + { + "bbox": [ + 46, + 642, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\mu \\in \\mathbb{R}^c" + }, + { + "bbox": [ + 46, + 642, + 286, + 713 + ], + "type": "text", + "content": " as a signal, the goal is to perturb it with some noise" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 309, + 71, + 544, + 190 + ], + "blocks": [ + { + "bbox": [ + 47, + 239, + 286, + 262 + ], + "lines": [ + { + "bbox": [ + 47, + 239, + 286, + 262 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 286, + 262 + ], + "type": "text", + "content": "Table 9. Ablation on the sets used for mixing. The styles " + }, + { + "bbox": [ + 47, + 239, + 286, + 262 + ], + "type": "inline_equation", + "content": "(\\mu, \\sigma)" + }, + { + "bbox": [ + 47, + 239, + 286, + 262 + ], + "type": "text", + "content": " used in (3) and (4) are sampled either from " + }, + { + "bbox": [ + 47, + 239, + 286, + 262 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 47, + 239, + 286, + 262 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 47, + 239, + 286, + 262 + ], + "type": "inline_equation", + "content": "S \\cup \\mathcal{T}" + }, + { + "bbox": [ + 47, + 239, + 286, + 262 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 47, + 239, + 286, + 262 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 47, + 239, + 286, + 262 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 71, + 544, + 190 + ], + "lines": [ + { + "bbox": [ + 309, + 71, + 544, + 190 + ], + "spans": [ + { + "bbox": [ + 309, + 71, + 544, + 190 + ], + "type": "table", + "html": "
SNRCBMSANASARAFMean
Baseline37.8338.8844.2431.9312.4129.5931.5633.0532.44
528.7829.2430.3221.6712.6024.0025.9525.8724.80
1040.0939.5043.4529.0913.3633.4733.1136.1733.53
1545.0244.1648.6332.9614.5536.0935.9940.9637.30
2045.5244.2949.2633.4512.4035.9636.5238.6037.00
2544.8244.2648.5433.3011.3834.5135.4637.6136.24
3043.0743.8048.3133.4712.3335.0535.5838.1036.21
43.4343.7948.1933.7011.3235.5536.1538.1936.29
MixStyle [62]40.9742.0448.3633.1513.1431.2634.9438.1235.25
Prompts48.1545.6152.1134.2314.9637.0938.6640.2538.88
", + "image_path": "9dba61af06c8fa1990b7b843371ba0af51ef45c607bf2ae5ad74d93bca809956.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 203, + 545, + 247 + ], + "lines": [ + { + "bbox": [ + 305, + 203, + 545, + 247 + ], + "spans": [ + { + "bbox": [ + 305, + 203, + 545, + 247 + ], + "type": "text", + "content": "Table 10. Noise vs prompt-driven augmentation. The prompt-driven augmentation in FAMix is replaced by random noise with different levels defined by SNR. We also include vanilla MixStyle. The prompt-driven strategy is superior." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "n_{\\mu} \\in \\mathbb{R}^{c}" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\mathrm{SNR}_{\\mathrm{dB}}" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": " between " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\| \\mu \\|" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\| n_{\\mu}\\|" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": " is defined as " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\mathrm{SNR}_{\\mathrm{dB}} = 20\\log_{10}\\left(\\frac{\\|\\mu\\| / \\|n_{\\mu}\\|}{\\|\\mu\\|}\\right)" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": ". Given " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\mathrm{SNR}_{\\mathrm{dB}}" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "n \\sim \\mathcal{N}(0,I)" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "I \\in \\mathbb{R}^{c \\times c}" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": " is the identity matrix, the noise is computed as " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "n_{\\mu} = 10^{\\frac{-\\mathrm{SNR}}{20}}\\frac{\\|\\mu\\|}{\\|n\\|} n" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": ". We add " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\mu + n_{\\mu}" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": " to the style bank corresponding to the dominant class in the patch. The same applies to " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\sigma \\in \\mathbb{R}^{c}" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": ". The results of training for different noise levels are in Tab. 10. Using language as source of randomization outperforms any noise level. The baseline corresponds to the case where no augmentation nor mixing are performed (See Tab. 6, Freeze " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\checkmark" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": ", Augment " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": ", Mix " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": "). " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\mathrm{SNR} = \\infty" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": " could be seen as a variant of MixStyle, applied class-wise to patches (See Tab. 6, Freeze " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\checkmark" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": ", Augment " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": ", Mix " + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\checkmark" + }, + { + "bbox": [ + 305, + 276, + 545, + 434 + ], + "type": "text", + "content": "). The vanilla MixStyle gets inferior results." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 437, + 545, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 437, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 305, + 437, + 545, + 497 + ], + "type": "text", + "content": "Besides lower OOD performance, one more disadvantage of noise augmentation compared to our language-driven augmentation is the need to select a value for the SNR, for which the optimal value might vary depending on the target domain encountered at the test time." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 519, + 378, + 531 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 519, + 378, + 531 + ], + "spans": [ + { + "bbox": [ + 306, + 519, + 378, + 531 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 543, + 545, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 543, + 545, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 543, + 545, + 662 + ], + "type": "text", + "content": "We presented FAMix, a simple recipe for domain generalized semantic segmentation with CLIP pretraining. We proposed to locally mix the styles of source features with their augmented counterparts obtained using language prompts. Combined with minimal fine-tuning, FAMix significantly outperforms the state-of-the-art approaches. Extensive experiments showcase the effectiveness of our framework. We hope that FAMix will serve as a strong baseline in future works, exploring the potential of leveraging large-scale vision-language models for perception tasks." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 665, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 665, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 665, + 545, + 712 + ], + "type": "text", + "content": "Acknowledgment. This work was partially funded by French project SIGHT (ANR-20-CE23-0016) and was supported by ELSA - European Lighthouse on Secure and Safe AI funded by the European Union under grant agreement No. 101070617. It was performed using HPC resources from GENCI-IDRIS (Grant AD011014477)." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "23435" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 715 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Kartik Ahuja, Ethan Caballero, Dinghuai Zhang, Jean-Christophe Gagnon-Audet, Yoshua Bengio, Ioannis Mitliagkas, and Irina Rish. Invariance principle meets information bottleneck for out-of-distribution generalization. In NeurIPS, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 146, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 146, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 146, + 288, + 201 + ], + "type": "text", + "content": "[2] James Urquhart Allingham, Jie Ren, Michael W Dusenberry, Xiuye Gu, Yin Cui, Dustin Tran, Jeremiah Zhe Liu, and Balaji Lakshminarayanan. A simple zero-shot prompt weighting technique to improve prompt ensembling in text-image models. In ICML, 2023. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 202, + 288, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 288, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 288, + 235 + ], + "type": "text", + "content": "[3] Martin Arjovsky, Léon Bottou, Ishaan Gulrajani, and David Lopez-Paz. Invariant risk minimization. arXiv preprint arXiv:1907.02893, 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 236, + 288, + 268 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 288, + 268 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 288, + 268 + ], + "type": "text", + "content": "[4] Yogesh Balaji, Swami Sankaranarayanan, and Rama Chellappa. Metareg: Towards domain generalization using metaregularization. In NeurIPS, 2018. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 269, + 288, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 269, + 288, + 312 + ], + "spans": [ + { + "bbox": [ + 53, + 269, + 288, + 312 + ], + "type": "text", + "content": "[5] Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and Hartwig Adam. Encoder-decoder with atrous separable convolution for semantic image segmentation. In ECCV, 2018. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 314, + 288, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 314, + 288, + 335 + ], + "spans": [ + { + "bbox": [ + 53, + 314, + 288, + 335 + ], + "type": "text", + "content": "[6] Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In CVPR, 2021. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 336, + 288, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 336, + 288, + 380 + ], + "spans": [ + { + "bbox": [ + 53, + 336, + 288, + 380 + ], + "type": "text", + "content": "[7] Sungha Choi, Sanghun Jung, Huiwon Yun, Joanne T Kim, Seungryong Kim, and Jaegul Choo. Robustnet: Improving domain generalization in urban-scene segmentation via instance selective whitening. In CVPR, 2021. 1, 2, 3, 5, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 380, + 288, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 380, + 288, + 434 + ], + "spans": [ + { + "bbox": [ + 53, + 380, + 288, + 434 + ], + "type": "text", + "content": "[8] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In CVPR, 2016. 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 435, + 288, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 435, + 288, + 468 + ], + "spans": [ + { + "bbox": [ + 53, + 435, + 288, + 468 + ], + "type": "text", + "content": "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, 2009. 1, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 469, + 288, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 469, + 288, + 502 + ], + "spans": [ + { + "bbox": [ + 48, + 469, + 288, + 502 + ], + "type": "text", + "content": "[10] Mohammad Fahes, Tuan-Hung Vu, Andrei Bursuc, Patrick Pérez, and Raoul de Charette. Poda: Prompt-driven zero-shot domain adaptation. In ICCV, 2023. 1, 3, 4, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 502, + 288, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 502, + 288, + 546 + ], + "spans": [ + { + "bbox": [ + 48, + 502, + 288, + 546 + ], + "type": "text", + "content": "[11] Qi Fan, Mattia Segu, Yu-Wing Tai, Fisher Yu, Chi-Keung Tang, Bernt Schiele, and Dengxin Dai. Towards robust object detection invariant to real-world domain shifts. In ICLR, 2023. 5, 8" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 547, + 288, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 547, + 288, + 591 + ], + "spans": [ + { + "bbox": [ + 48, + 547, + 288, + 591 + ], + "type": "text", + "content": "[12] Alex Fang, Gabriel Ilharco, Mitchell Wortsman, Yuhao Wan, Vaishaal Shankar, Achal Dave, and Ludwig Schmidt. Data determines distributional robustness in contrastive language image pre-training (clip). In ICML, 2022. 1, 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 591, + 288, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 591, + 288, + 635 + ], + "spans": [ + { + "bbox": [ + 48, + 591, + 288, + 635 + ], + "type": "text", + "content": "[13] Yaroslav Ganin, Evgeniya Ustinova, Hana Ajakan, Pascal Germain, Hugo Larochelle, François Laviolette, Mario Marchand, and Victor Lempitsky. Domain-adversarial training of neural networks. JMLR, 2016. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 636, + 288, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 636, + 288, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 636, + 288, + 689 + ], + "type": "text", + "content": "[14] Yunhao Ge, Jie Ren, Andrew Gallagher, Yuxiao Wang, Ming-Hsuan Yang, Hartwig Adam, Laurent Itti, Balaji Lakshminarayanan, and Jiaping Zhao. Improving zero-shot generalization and robustness of multi-modal models. In CVPR, 2023. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 690, + 288, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 690, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 48, + 690, + 288, + 715 + ], + "type": "text", + "content": "[15] Sachin Goyal, Ananya Kumar, Sankalp Garg, Zico Kolter, and Aditi Raghunathan. Finetune like you pretrain: Im-" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 715 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "proved finetuning of zero-shot vision models. In CVPR, 2023. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 95, + 545, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 95, + 545, + 129 + ], + "spans": [ + { + "bbox": [ + 307, + 95, + 545, + 129 + ], + "type": "text", + "content": "[16] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2022. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 130, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 130, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 130, + 545, + 162 + ], + "type": "text", + "content": "[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 163, + 545, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 207 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 207 + ], + "type": "text", + "content": "[18] Judy Hoffman, Eric Tzeng, Taesung Park, Jun-Yan Zhu, Phillip Isola, Kate Saenko, Alexei Efros, and Trevor Darrell. Cycada: Cycle-consistent adversarial domain adaptation. In ICML, 2018. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 208, + 545, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 208, + 545, + 252 + ], + "spans": [ + { + "bbox": [ + 307, + 208, + 545, + 252 + ], + "type": "text", + "content": "[19] Wei Huang, Chang Chen, Yong Li, Jiacheng Li, Cheng Li, Fenglong Song, Youliang Yan, and Zhiwei Xiong. Style projected clustering for domain generalized semantic segmentation. In CVPR, 2023. 1, 3, 5, 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 253, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 253, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 253, + 545, + 285 + ], + "type": "text", + "content": "[20] Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In ICCV, 2017. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 286, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 286, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 286, + 545, + 319 + ], + "type": "text", + "content": "[21] Nishant Jain, Harkirat Behl, Yogesh Singh Rawat, and Vibhav Vineet. Efficiently robustify pre-trained models. In ICCV, 2023. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 320, + 545, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 320, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 307, + 320, + 545, + 365 + ], + "type": "text", + "content": "[22] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 365, + 545, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 365, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 307, + 365, + 545, + 399 + ], + "type": "text", + "content": "[23] Jin Kim, Jiyoung Lee, Jungin Park, Dongbo Min, and Kwanghoon Sohn. Pin the memory: Learning to generalize semantic segmentation. In CVPR, 2022. 1, 5, 6, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 399, + 545, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 545, + 432 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 545, + 432 + ], + "type": "text", + "content": "[24] Sunghwan Kim, Dae-hwan Kim, and Hoseong Kim. Texture learning domain randomization for domain generalized segmentation. In ICCV, 2023. 1, 3, 5, 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 433, + 545, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 433, + 545, + 477 + ], + "spans": [ + { + "bbox": [ + 307, + 433, + 545, + 477 + ], + "type": "text", + "content": "[25] David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, and Aaron Courville. Out-of-distribution generalization via risk extrapolation (rex). In ICML, 2021. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 478, + 545, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 478, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 307, + 478, + 545, + 521 + ], + "type": "text", + "content": "[26] Ananya Kumar, Aditi Raghunathan, Robbie Matthew Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. In ICLR, 2022. 1, 2, 6" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 522, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 522, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 307, + 522, + 545, + 544 + ], + "type": "text", + "content": "[27] Gihyun Kwon and Jong Chul Ye. Clipstyler: Image style transfer with a single text condition. In CVPR, 2022. 1" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 545, + 545, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 545, + 545, + 589 + ], + "spans": [ + { + "bbox": [ + 307, + 545, + 545, + 589 + ], + "type": "text", + "content": "[28] Clement Laroudie, Andrei Bursuc, Mai Lan Ha, and Gianni Franchi. Improving clip robustness with knowledge distillation and self-training. arXiv preprint arXiv:2309.10361, 2023. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 590, + 545, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 545, + 623 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 545, + 623 + ], + "type": "text", + "content": "[29] Suhyeon Lee, Hongje Seong, Seongwon Lee, and Euntai Kim. Wildnet: Learning domain generalized semantic segmentation from the wild. In CVPR, 2022. 1, 3, 5, 6, 7" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 624, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 624, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 307, + 624, + 545, + 656 + ], + "type": "text", + "content": "[30] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and Rene Ranftl. Language-driven semantic segmentation. In ICLR, 2022. 1" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 658, + 545, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 690 + ], + "type": "text", + "content": "[31] Haoliang Li, Sinno Jialin Pan, Shiqi Wang, and Alex C Kot. Domain generalization with adversarial feature learning. In CVPR, 2018. 1" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 691, + 545, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 715 + ], + "type": "text", + "content": "[32] Ya Li, Xinmei Tian, Mingming Gong, Yajing Liu, Tongliang Liu, Kun Zhang, and Dacheng Tao. Deep domain generaliza" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "23436" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "text", + "content": "tion via conditional invariant adversarial networks. In ECCV, 2018. 1, 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 128 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 128 + ], + "type": "text", + "content": "[33] Yunsheng Li, Lu Yuan, and Nuno Vasconcelos. Bidirectional learning for domain adaptation of semantic segmentation. In CVPR, 2019. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 130, + 287, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 130, + 287, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 130, + 287, + 162 + ], + "type": "text", + "content": "[34] Mingsheng Long, Zhangjie Cao, Jianmin Wang, and Michael I Jordan. Conditional adversarial domain adaptation. In NeurIPS, 2018. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 164, + 287, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 164, + 287, + 196 + ], + "spans": [ + { + "bbox": [ + 48, + 164, + 287, + 196 + ], + "type": "text", + "content": "[35] Gerhard Neuhold, Tobias Ollmann, Samuel Rota Bulo, and Peter Kontschieder. The mapillary vistas dataset for semantic understanding of street scenes. In ICCV, 2017. 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 198, + 287, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 198, + 287, + 229 + ], + "spans": [ + { + "bbox": [ + 48, + 198, + 287, + 229 + ], + "type": "text", + "content": "[36] Xingang Pan, Ping Luo, Jianping Shi, and Xiaou Tang. Two at once: Enhancing learning and generalization capacities via ibn-net. In ECCV, 2018. 1, 3, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 232, + 287, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 232, + 287, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 232, + 287, + 262 + ], + "type": "text", + "content": "[37] Duo Peng, Yinjie Lei, Munawar Hayat, Yulan Guo, and Wen Li. Semantic-aware domain generalized segmentation. In CVPR, 2022. 1, 3, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 265, + 287, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 265, + 287, + 286 + ], + "spans": [ + { + "bbox": [ + 48, + 265, + 287, + 286 + ], + "type": "text", + "content": "[38] Fengchun Qiao, Long Zhao, and Xi Peng. Learning to learn single domain generalization. In CVPR, 2020. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 289, + 287, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 289, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 48, + 289, + 287, + 342 + ], + "type": "text", + "content": "[39] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 343, + 287, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 343, + 287, + 387 + ], + "spans": [ + { + "bbox": [ + 48, + 343, + 287, + 387 + ], + "type": "text", + "content": "[40] Yongming Rao, Wenliang Zhao, Guangyi Chen, Yansong Tang, Zheng Zhu, Guan Huang, Jie Zhou, and Jiwen Lu. Denseclip: Language-guided dense prediction with context-aware prompting. In CVPR, 2022. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 388, + 287, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 287, + 420 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 287, + 420 + ], + "type": "text", + "content": "[41] Stephan R Richter, Vibhav Vineet, Stefan Roth, and Vladlen Koltun. Playing for data: Ground truth from computer games. In ECCV, 2016. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 422, + 287, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 422, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 48, + 422, + 287, + 464 + ], + "type": "text", + "content": "[42] German Ros, Laura Sellart, Joanna Materzynska, David Vazquez, and Antonio M Lopez. The synthia dataset: A large collection of synthetic images for semantic segmentation of urban scenes. In CVPR, 2016. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 468, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 468, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 468, + 287, + 510 + ], + "type": "text", + "content": "[43] Kuniaki Saito, Donghyun Kim, Piotr Teterwak, Rogerio Feris, and Kate Saenko. Mind the backbone: Minimizing backbone distortion for robust object detection. arXiv preprint arXiv:2303.14744, 2023. 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 512, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 512, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 512, + 287, + 544 + ], + "type": "text", + "content": "[44] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. ACDC: The adverse conditions dataset with correspondences for semantic driving scene understanding. In ICCV, 2021. 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 545, + 287, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 545, + 287, + 578 + ], + "spans": [ + { + "bbox": [ + 48, + 545, + 287, + 578 + ], + "type": "text", + "content": "[45] Yang Shu, Xingzhuo Guo, Jialong Wu, Ximei Wang, Jianmin Wang, and Mingsheng Long. Clipood: Generalizing clip to out-of-distributions. In ICML, 2023. 1, 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 580, + 287, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 580, + 287, + 611 + ], + "spans": [ + { + "bbox": [ + 48, + 580, + 287, + 611 + ], + "type": "text", + "content": "[46] Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In CVPR, 2016. 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 613, + 287, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 644 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 644 + ], + "type": "text", + "content": "[47] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In CVPR, 2017. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "text", + "content": "[48] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Advent: Adversarial entropy minimization for domain adaptation in semantic segmentation. In CVPR, 2019. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "type": "text", + "content": "[49] Jindong Wang, Cuiling Lan, Chang Liu, Yidong Ouyang, Tao Qin, Wang Lu, Yiqiang Chen, Wenjun Zeng, and Philip" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 690 + ], + "type": "list", + "angle": 0, + "index": 36, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "Yu. Generalizing to unseen domains: A survey on domain generalization. T-KDE, 2022. 1, 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 96, + 545, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 149 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 149 + ], + "type": "text", + "content": "[50] Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, et al. Robust fine-tuning of zero-shot models. In CVPR, 2022. 1, 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 152, + 545, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 184 + ], + "type": "text", + "content": "[51] Zhenyao Wu, Xinyi Wu, Xiaoping Zhang, Lili Ju, and Song Wang. Siamdoge: Domain generalizable semantic segmentation using siamese network. In ECCV, 2022. 1, 3, 5, 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 186, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 217 + ], + "type": "text", + "content": "[52] Qinwei Xu, Ruipeng Zhang, Ya Zhang, Yanfeng Wang, and Qi Tian. A fourier-based framework for domain generalization. In CVPR, 2021. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 220, + 545, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 262 + ], + "type": "text", + "content": "[53] Liwei Yang, Xiang Gu, and Jian Sun. Generalized semantic segmentation by self-supervised source domain projection and multi-level contrastive learning. In AAAI, 2023. 1, 5, 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 265, + 545, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 265, + 545, + 307 + ], + "spans": [ + { + "bbox": [ + 307, + 265, + 545, + 307 + ], + "type": "text", + "content": "[54] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darryll. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In CVPR, 2020. 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 310, + 545, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 310, + 545, + 352 + ], + "spans": [ + { + "bbox": [ + 307, + 310, + 545, + 352 + ], + "type": "text", + "content": "[55] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. LiT: Zero-shot transfer with locked-image text tuning. In CVPR, 2022. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 354, + 545, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 545, + 386 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 545, + 386 + ], + "type": "text", + "content": "[56] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 388, + 545, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 420 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 420 + ], + "type": "text", + "content": "[57] Shanshan Zhao, Mingming Gong, Tongliang Liu, Huan Fu, and Dacheng Tao. Domain generalization via entropy regularization. In NeurIPS, 2020. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 422, + 545, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 545, + 464 + ], + "type": "text", + "content": "[58] Yuyang Zhao, Zhun Zhong, Na Zhao, Nicu Sebe, and Gim Hee Lee. Style-hallucinated dual consistency learning for domain generalized semantic segmentation. In ECCV, 2022. 1, 3, 5, 6, 7" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 468, + 545, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 468, + 545, + 488 + ], + "spans": [ + { + "bbox": [ + 307, + 468, + 545, + 488 + ], + "type": "text", + "content": "[59] Chong Zhou, Chen Change Loy, and Bo Dai. Extract free dense labels from clip. In ECCV, 2022. 1" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 491, + 545, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 491, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 307, + 491, + 545, + 521 + ], + "type": "text", + "content": "[60] Kaiyang Zhou, Yongxin Yang, Timothy Hospedales, and Tao Xiang. Deep domain-adversarial image generation for domain generalisation. In AAAI, 2020. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 524, + 545, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 524, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 307, + 524, + 545, + 555 + ], + "type": "text", + "content": "[61] Kaiyang Zhou, Yongxin Yang, Timothy Hospedales, and Tao Xiang. Learning to generate novel domains for domain generalization. In ECCV, 2020." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 559, + 545, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 559, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 307, + 559, + 545, + 590 + ], + "type": "text", + "content": "[62] Kaiyang Zhou, Yongxin Yang, Yu Qiao, and Tao Xiang. Domain generalization with mixstyle. In ICLR, 2021. 1, 2, 4, 5, 6, 8" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 592, + 545, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 592, + 545, + 623 + ], + "spans": [ + { + "bbox": [ + 307, + 592, + 545, + 623 + ], + "type": "text", + "content": "[63] Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. TPAMI, 2022. 1, 2" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 625, + 545, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 625, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 307, + 625, + 545, + 657 + ], + "type": "text", + "content": "[64] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Zwei Liu. Conditional prompt learning for vision-language models. In CVPR, 2022. 1" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 307, + 659, + 545, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 659, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 307, + 659, + 545, + 690 + ], + "type": "text", + "content": "[65] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 2022. 1" + } + ] + } + ], + "index": 35 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "23437" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/1bfdaa4b-618c-45a6-9de6-a1e062fefbcd_content_list.json b/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/1bfdaa4b-618c-45a6-9de6-a1e062fefbcd_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4552c444970378fadb757e7025a508726c5fd4f0 --- /dev/null +++ b/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/1bfdaa4b-618c-45a6-9de6-a1e062fefbcd_content_list.json @@ -0,0 +1,1626 @@ +[ + { + "type": "text", + "text": "A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization", + "text_level": 1, + "bbox": [ + 250, + 130, + 718, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hongwei Ren*, Jiadong Zhu*, Yue Zhou, Haotian Fu, Yulong Huang, Bojun Cheng † \nThe Hong Kong University of Science and Technology(Guangzhou)", + "bbox": [ + 161, + 203, + 828, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{hren066, jzhu484, yzhou833, hfu373, yhuang496}@connect.hkust-gz.edu.cn, bocheng@hkust-gz.edu.cn", + "bbox": [ + 86, + 241, + 898, + 257 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 291, + 313, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Event cameras exhibit remarkable attributes such as high dynamic range, asynchronicity, and low latency, making them highly suitable for vision tasks that involve high-speed motion in challenging lighting conditions. These cameras implicitly capture movement and depth information in events, making them appealing sensors for Camera Pose Relocalization (CPR) tasks. Nevertheless, existing CPR networks based on events neglect the pivotal fine-grained temporal information in events, resulting in unsatisfactory performance. Moreover, the energy-efficient features are further compromised by the use of excessively complex models, hindering efficient deployment on edge devices. In this paper, we introduce PEPNet, a simple and effective point-based network designed to regress six degrees of freedom (6-DOFs) event camera poses. We rethink the relationship between the event camera and CPR tasks, leveraging the raw Point Cloud directly as network input to harness the high-temporal resolution and inherent sparsity of events. PEPNet is adept at abstracting the spatial and implicit temporal features through hierarchical structure and explicit temporal features by Attentive Bidirectional Long Short-Term Memory (A-Bi-LSTM). By employing a carefully crafted lightweight design, PEPNet delivers state-of-the-art (SOTA) performance on both indoor and outdoor datasets with meager computational resources. Specifically, PEPNet attains a significant $38\\%$ and $33\\%$ performance improvement on the random split IJRR and M3ED datasets, respectively. Moreover, the lightweight design version PEPNet™ accomplishes results comparable to the SOTA while employing a mere $0.5\\%$ of the parameters.", + "bbox": [ + 76, + 323, + 472, + 790 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 820, + 209, + 835 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Event camera is a type of bio-inspired vision sensor that responds to local changes in illumination exceeding a pre", + "bbox": [ + 76, + 845, + 468, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/242f5762ec36755ca7783afef0d2fe43bb2ea949b09337431b84fb987e107094.jpg", + "image_caption": [ + "Figure 1. The average results using the random split method benchmarked on the CPR dataset [23]. The vertical axis represents the combined rotational and translational errors $(\\mathrm{m} + \\mathrm{rad})$ . PEPNet is the first point-based CPR network for event cameras." + ], + "image_footnote": [], + "bbox": [ + 568, + 289, + 823, + 425 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "defined threshold [17]. Differing from conventional frame-based cameras, event cameras independently and asynchronously produce pixel-level events. Notably, event cameras boast an exceptional triad: high dynamic range, low latency, and ultra-high temporal resolution. This unique combination empowers superior performance under challenging light conditions, adeptly capturing the swift scene and rapid motion changes in near-microsecond precision [27]. Additionally, event cameras boast remarkably low power consumption positioning them as a popular choice for many power-constrained devices. Camera Pose Relocalization (CPR) is an emerging application in power-constrained devices and has gained significant attention. It aims to train several scene-specific neural networks to accurately relocalize the camera pose within the original scene used for training. It is extensively employed in numerous applications, including Virtual Reality (VR), Augmented Reality (AR), and robotics [35], all of which are deployed on battery-powered devices and are power-constrained.", + "bbox": [ + 496, + 520, + 892, + 808 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CPR tasks using event cameras significantly diverge from their conventional CPR counterpart that employs frame-based cameras, primarily due to the inherent dissimilarity in data output mechanisms between these two camera types. Furthermore, events inherently encompass information regarding object motion and depth changes", + "bbox": [ + 496, + 810, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*equal contribution. †corresponding author.", + "bbox": [ + 94, + 886, + 328, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "18112", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "across precise temporal and spatial dimensions attributes of paramount significance within the domain of CPR tasks [8, 31]. Regrettably, existing event-based CPR networks often derive from the conventional camera network paradigms and inadequately address the unique attributes of event data. More specifically, events are transformed into various representations such as event images [26], time surfaces [18], and other representations[18], leading to the loss of their fine-grained temporal information. Furthermore, most event-based methods tend to overlook the computational load of the network, only prioritizing elevated accuracy, which contradicts the fundamental design principles of event cameras [9].", + "bbox": [ + 75, + 90, + 472, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A suitable and faithful data representation is crucial for event cloud processing. Point Cloud is a collection of 3D points $(x, y, z)$ that represents the shape and surface of an object or environment commonly used in lidar and depth cameras [10]. The distance $(z)$ is of great meaning to the tasks. As for event camera, by treating each event's temporal information as the third dimension, event inputs $(x, y, t)$ can be regarded as points and aggregated into a pseudo-Point Cloud [28, 29, 32-34, 40]. However, given that the $t$ dimension of Event Cloud is not strictly equivalent to the spatial dimensions $(x, y, z)$ , direct transplantation of the Point Cloud network has not yet exhibited a satisfactory performance advantage in processing event data [32, 40].", + "bbox": [ + 75, + 291, + 472, + 488 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this study, we introduce PEPNet, the first point-based end-to-end CPR network designed to harness the attributes of event cameras. A comparison of our performance and method to other frame-based methods is illustrated Fig. 1 and Fig. 2, respectively. Moreover, diverging from other point-based approaches in event data processing [32, 40], PEPNet demonstrates careful attention to detail by systematically assessing the difference between Event Cloud and Point Cloud in its design approach. This approach enables a more precise extraction of spatio-temporal features and facilitates solutions for a spectrum of event-based tasks. Our main contributions are as follows: First, in the preprocessing stage, PEPNet directly processes the raw data obtained from the event cameras, meticulously preserving the fine-grained temporal coordinate and the order inherent in the event data. Second, PEPNet proficiently captures spatial features and implicit temporal features through its hierarchical structure with temporal aggregation. Subsequently, the explicit temporal feature is processed by the A-BiLSTM, thanks to the preservation of the input sequence in previous stages. As such, this architecture is tailored to accommodate the high temporal resolution and sparse characteristics inherent in event cameras. Thirdly, by restricting ourselves to minimal hardware resources and deliberately avoiding heavy computational modules, PEPNet not only attains SOTA results on IJRR [23] and M3ED [4] dataset but also features a lightweight design that can be executed", + "bbox": [ + 75, + 493, + 472, + 901 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e27644a896423cce95a5f5900f5d8f6106a29cc8b919f2003ad654c7e13a782b.jpg", + "image_caption": [ + "Figure 2. Two different event-based processing methods, frame-based and point-based." + ], + "image_footnote": [], + "bbox": [ + 568, + 88, + 826, + 205 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "in real-time. We hope such an approach could potentially democratize computer vision technology by making it accessible to a wider range of devices and applications in the community of edge computing.", + "bbox": [ + 498, + 271, + 890, + 333 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 347, + 640, + 363 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Frame-based CPR Learning Methods", + "text_level": 1, + "bbox": [ + 500, + 372, + 823, + 388 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Deep learning, crucial for vision tasks like classification and object detection [16], has seen advancements such as PoseNet's innovative transfer learning [14]. Utilizing VGG, ResNet [11, 36], LSTM, and customized loss functions [25, 39, 41], researchers enhanced this approach. Auxiliary Learning methods further improved performance [19, 30, 38], although overfitting remains a challenge. Hybrid pose-based methods, combining learning with traditional pipelines [1, 15], offer promise. DSAC series, for instance, achieve high pose estimation accuracy [2, 3], but come with increased computational costs and latency, especially for edge devices.", + "bbox": [ + 498, + 397, + 890, + 579 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Event-based CPR Learning Methods", + "text_level": 1, + "bbox": [ + 500, + 589, + 818, + 606 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Event-based CPR methods often derive from the frame-based CPR network. SP-LSTM [26] employed the stacked spatial LSTM networks to process event images, facilitating a real-time pose estimator. To address the inherent noise in event images, [12] proposed a network structure combining denoise networks, convolutional neural networks, and LSTM, achieving good performance under complex working conditions. In contrast to the aforementioned methods, a novel representation named Reversed Window Entropy Image (RWEI) [18] is introduced, which is based on the widely used event surface [22] and serves as the input to an attention-based DSAC* pipeline [2] to achieve SOTA results. However, the computationally demanding architecture involving representation transformation and hybrid pipeline poses challenges for real-time execution. Additionally, all existing methods ignore the fine-grained temporal feature of the event cameras, and accumulate events into frames for processing, resulting in unsatisfactory performance.", + "bbox": [ + 496, + 613, + 890, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "18113", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.3. Point Cloud Network", + "text_level": 1, + "bbox": [ + 76, + 90, + 276, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Point-based methodologies have transformed the direct processing of Point Cloud, with PointNet [28] as a standout example. Taking a step beyond, PointNet++ [29] introduced a Set Abstraction module. While it initially employed a straightforward MLP in the feature extractor, recent advancements have seen the development of more sophisticated feature extractors to enhance Point Cloud processing [5, 21, 42, 44]. When extending these techniques to Event Cloud, Wang et al. [40] addressed the temporal information processing challenge while maintaining representation in both the x and y axes, enabling gesture recognition using PointNet++. Further enhancements came with PAT [43], which incorporated self-attention and Gumbel subset sampling, leading to improved performance in recognition tasks. However, existing point-based models still fall short in performance compared to frame-based methods. This phenomenon can be attributed to the distinctively different characteristics of Point Cloud and Event Cloud. Event Cloud contradicts the permutation and transformation invariance present in Point Cloud due to its temporal nature. Additionally, the Point Cloud network is not equipped to extract explicit temporal features.", + "bbox": [ + 75, + 114, + 472, + 448 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. PEPNet", + "text_level": 1, + "bbox": [ + 76, + 463, + 171, + 478 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "PEPNet pipeline consists of four essential modules: (1) a preprocessing module for the original Event Cloud, (2) a hierarchical Point Cloud feature extraction structure, (3) an Attentive Bi-directional LSTM, and (4) a 6-DOFs pose regressor, as illustrated in Fig. 3. In the following sections, we will provide detailed descriptions and formulations for each module.", + "bbox": [ + 75, + 488, + 468, + 594 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Event Cloud", + "text_level": 1, + "bbox": [ + 76, + 606, + 210, + 621 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To preserve the fine-grained temporal information and original data distribution attributes from the Event Cloud, the 2D-spatial and 1D-temporal event information is constructed into a three-dimensional representation to be processed in Point Cloud. Event Cloud consists of time-series data capturing spatial intensity changes of images in chronological order, and an individual event is denoted as $e_k = (x_k, y_k, t_k, p_k)$ , where $k$ is the index representing the $k_{th}$ element in the sequence. Consequently, the set of events within a single sequence $(\\mathcal{E})$ in the dataset can be expressed as:", + "bbox": [ + 75, + 631, + 468, + 796 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {E} = \\left\\{e _ {k} = \\left(x _ {k}, y _ {k}, t _ {k}, p _ {k}\\right) \\mid k = 1, \\dots , n \\right\\} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 129, + 799, + 468, + 816 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For a given pose in the dataset, the ground truth resolution is limited to $5ms$ , while the event resolution is $1\\mu s$ . Therefore, it is necessary to acquire the events that transpire within the time period we call it sliding window corresponding to the poses, which will serve as the input for the model,", + "bbox": [ + 75, + 825, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "as depicted by the following equation:", + "bbox": [ + 500, + 90, + 756, + 107 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nP _ {i} = \\left\\{e _ {j \\rightarrow l} \\mid t _ {l} - t _ {j} = R \\right\\} \\quad i = 1, \\dots , M \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 550, + 119, + 890, + 136 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The symbol $R$ represents the time interval of the sliding window, where $j$ and $l$ denote the start and end event index of the sequence, respectively. The variable $M$ represents the number of sliding windows into which the sequence of events $\\mathcal{E}$ is divided. Before being fed into the neural network, $P_{i}$ also needs to undergo sampling and normalization. Sampling is to unify the number of points $N$ as network inputs. We set $N = 1024$ in PEPNet. Additionally, as the spatial coordinates are normalized by the camera's resolution $w$ and $h$ . The normalization process is described by the following equation:", + "bbox": [ + 496, + 148, + 893, + 316 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nP N _ {i} = \\left(\\frac {X _ {i}}{w}, \\frac {Y _ {i}}{h}, \\frac {T _ {i} - t _ {j}}{t _ {l} - t _ {j}}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 325, + 890, + 359 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nX _ {i}, Y _ {i}, T _ {i} = \\left\\{x _ {j}, \\dots , x _ {l} \\right\\}, \\left\\{y _ {j}, \\dots , y _ {l} \\right\\}, \\left\\{t _ {j}, \\dots , t _ {l} \\right\\} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 372, + 890, + 388 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The $X, Y$ is divided by the resolution of the event camera. To normalize $T$ , we subtract the smallest timestamp $t_j$ of the window and divide it by the time difference $t_l - t_j$ , where $t_l$ represents the largest timestamp within the window. After pre-processing, Event Cloud is converted into the pseudo-Point Cloud, which comprises explicit spatial information $(x, y)$ and implicit temporal information $t$ .", + "bbox": [ + 496, + 396, + 892, + 503 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Hierarchy Structure", + "text_level": 1, + "bbox": [ + 500, + 513, + 692, + 529 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The hierarchy structure is the backbone for processing the pseudo-3D Point Cloud and is composed of four primary modules: grouping and sampling, standardization, feature extractor, and aggregation, as described in the following subsection. To efficiently extract deeper explicit spatial and implicit temporal features, the hierarchical structure is tailored and differs from conventional hierarchical structure in a few ways: First, we no longer force permutation invariance as usually done in mainstream point-based methods [21, 28], as the motion information is inherently related to the sequential order of events. Instead, we keep the sequence of all events strictly in the same order as they are generated to preserve the temporal information to be used in the next stage. Second, we replace MaxPooling in aggregation and deploy temporal aggregation which leverages the attention mechanism with softmax, which improves the effective assimilation of temporal information into the resultant feature vectors.", + "bbox": [ + 496, + 537, + 890, + 809 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2.1 Grouping and Sampling", + "text_level": 1, + "bbox": [ + 500, + 830, + 723, + 845 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Aligned with the frame-based design concept, our focus is to capture both local and global information. Local information is acquired by leveraging Farthest Point Sampling", + "bbox": [ + 496, + 854, + 892, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "18114", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/c27d39af9633d3daf5f2f2ecd2e7491f1cab468601934972cd464355a32a6eb6.jpg", + "image_caption": [ + "Figure 3. PEPNet overall architecture (the time resolution of $t_1, t_2, \\ldots, t_n$ is $1\\mu s$ ). The input Event Cloud undergoes direct handling through a sliding window, sampling, and normalization, eliminating the need for any format conversion. Sequentially, the input passes through $S_{num}$ hierarchy structures for spatial feature abstraction and extraction. It further traverses a bidirectional LSTM for temporal feature extraction, culminating in a regressor responsible for 6-DOFs camera pose relocalization." + ], + "image_footnote": [], + "bbox": [ + 86, + 85, + 872, + 305 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "(FPS) and K-Nearest Neighbors (KNN), while global information is obtained through a dedicated aggregation module.", + "bbox": [ + 76, + 378, + 470, + 409 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP S _ {i} = F P S \\left(P N _ {i}\\right) \\quad P G _ {i} = K N N \\left(P N _ {i}, P S _ {i}\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 419, + 468, + 436 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The input dimension $PN_{i}$ is $[N,3 + D]$ , and the centroid dimension $PS_{i}$ is $[N^{\\prime},3 + D]$ and the group dimension $PG_{i}$ is $[N^{\\prime},K,3 + 2*D]$ . $K$ represents the nearest $K$ points of the center point (centroid), $D$ is the feature dimension of the points of the current stage, and 3 is the most original $(X,Y,T)$ coordinate value. Importantly, it should be noted that the ordering of all points in the grouping and sampling process strictly adheres to the timestamp $(T)$ , and the dimension $2*D$ of the points in the group is the result of being concatenated to the centroid.", + "bbox": [ + 76, + 446, + 472, + 598 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 Standardization", + "text_level": 1, + "bbox": [ + 76, + 616, + 243, + 630 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Next, each group undergoes a standardization process to ensure consistent variability between points within the group, as illustrated in this formula:", + "bbox": [ + 75, + 638, + 468, + 685 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} P G S _ {i} = \\frac {P G _ {i} - P S _ {i}}{S t d \\left(P G _ {i}\\right)} \\quad S t d \\left(P G _ {i}\\right) = \\sqrt {\\frac {\\sum_ {j = 0} ^ {3 n - 1} \\left(g _ {j} - \\bar {g}\\right) ^ {2}}{3 n - 1}} (6) \\\\ g = \\left[ x _ {0}, y _ {0}, t _ {0}, \\dots , x _ {n}, y _ {n}, t _ {n} \\right] (7) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 693, + 468, + 762 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Where $PG_{i}$ and $PS_{i}$ are the subsets of $PG$ and $PS$ , $Std$ is the standard deviation, the dimension of $Std(PG)$ is $M$ which is consistent with the number of sliding windows, and $g$ is the set of coordinates of all points in the $PG_{i}$ .", + "bbox": [ + 75, + 767, + 470, + 829 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.3 Feature extractor", + "text_level": 1, + "bbox": [ + 76, + 845, + 253, + 859 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Following the standardization of $PG$ by dividing the variance by the subtracted mean, the feature extraction is per", + "bbox": [ + 75, + 869, + 470, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "formed using a Multi-Layer Perceptron (MLP) with a residual connection. This process encompasses two steps: local feature extraction and global feature extraction. The feature extractor with a bottleneck can be mathematically represented as:", + "bbox": [ + 496, + 378, + 890, + 453 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nI (x) = f (\\mathrm {B N} (\\mathrm {M L P} _ {1} (x))) \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 467, + 890, + 484 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nO (x) = \\operatorname {B N} \\left(\\operatorname {M L P} _ {2} (x)\\right) \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 487, + 890, + 503 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nE x t (x) = f (x + O (I (x))) \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 506, + 890, + 522 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "BN represents batch normalization layer, while $f$ signifies the nonlinear activation function. Both local feature extraction and global feature extraction maintain identical input and output dimensions. The dimension increase occurs solely when combining the feature dimension $D$ of the current point with the feature dimension $D$ of the centroid during grouping, resulting in a final dimension of $2 * D$ . The feature extractor takes an input dimension of $[B, N, K, D]$ , and following local feature extraction, the dimension remains $[B, N, K, D]$ , $B$ represents batch size. We adopt the attention mechanism for aggregation, yielding an aggregated feature dimension of $[B, N, D]$ . Subsequently, the aggregated feature map is then processed through the global feature extractor, completing the feature extraction for the current stage.", + "bbox": [ + 496, + 535, + 892, + 762 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.4 Temporal Aggregation", + "text_level": 1, + "bbox": [ + 498, + 785, + 712, + 800 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Conventional Point Cloud methods favor MaxPooling operations for feature aggregation because it is efficient in extracting the feature from one point among a group of points and discarding the rest. However, MaxPooling involves extracting only the maximum value along each dimension of the temporal axis. It is robust to noise perturbation but", + "bbox": [ + 496, + 809, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "18115", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "also ignores the temporal nuances embedded within the features. Conversely, the integration of attention mechanisms enhances the preservation of those nuanced and useful temporal attributes by aggregating features along the temporal axis through the attention value. To provide a more comprehensive exposition, we employ a direct attention mechanism within the $K$ temporal dimensions to effectively aggregate features as shown in Fig. 3. This mechanism enables the explicit integration of temporal attributes, capitalizing on the inherent strict ordering of the $K$ points. The ensuing formula succinctly elucidates the essence of this attention mechanism:", + "bbox": [ + 75, + 90, + 472, + 271 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nF _ {\\text {l o c a l}} = \\operatorname {E x t} (x) = \\left(F _ {t 1}, F _ {t 2}, \\dots , F _ {t k}\\right) \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 277, + 468, + 294 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nA = \\operatorname {S o f t M a x} \\left(\\operatorname {M L P} \\left(F _ {\\text {l o c a l}}\\right)\\right) = \\left(a _ {t 1}, a _ {t 2}, \\dots , a _ {t k}\\right) \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 300, + 467, + 316 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nF _ {\\text {a g g r e}} = A \\cdot F _ {\\text {l o c a l}} = F _ {t 1} \\cdot a _ {t 1} + F _ {t 2} \\cdot a _ {t 2} + \\dots + F _ {t k} \\cdot a _ {t k} \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 319, + 467, + 335 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Upon the application of the local feature extractor, the ensuing features are denoted as $F_{\\mathrm{local}}$ , and $F_{tk}$ mean the extracted feature of $k_{th}$ point in a group. The attention mechanism comprises an MLP layer with an input layer dimension of $D$ and an output $a_{tk}$ dimension of 1, along with softmax layers. Subsequently, the attention mechanism computes attention values, represented as $A$ . These attention values are then multiplied with the original features through batch matrix multiplication, resulting in the aggregated feature $F_{\\mathrm{aggre}}$ .", + "bbox": [ + 75, + 338, + 468, + 489 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. A-Bi-LSTM", + "text_level": 1, + "bbox": [ + 76, + 494, + 207, + 508 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The temporal features extracted through the hierarchical structure are independent and parallel, lacking recurrent mechanisms within the network. This distinctive attribute, referred to as 'implicit', contrasts with the conventional treatment of temporal information as an indexed process. Consequently, implicit temporal features inadequately capture the interrelations among events along the timeline, whereas explicit temporal features assume a pivotal role in facilitating the CPR task. To explicitly capture temporal patterns, we introduce the LSTM network, which has been proven effective in learning temporal dependencies. For optimal network performance, controlled feature dimensionality, and comprehensive capture of bidirectional relationships in pose context, we adopt a bidirectional LSTM network with a lightweight design. The regressor attentively focuses on the output of Bi-LSTM at each timestep and is more inclined towards the start and end features as demonstrated in Fig. 6. The integration of bidirectional connections into the recurrent neural network (RNN) is succinctly presented through the following equation:", + "bbox": [ + 75, + 518, + 472, + 835 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {h} _ {t} = f \\left(\\mathbf {W} _ {h} \\cdot \\mathbf {x} _ {t} + \\mathbf {U} _ {h} \\cdot \\mathbf {h} _ {t - 1} + \\mathbf {b} _ {h}\\right) \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 842, + 468, + 858 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {h} _ {t} ^ {\\prime} = f \\left(\\mathbf {W} _ {h} ^ {\\prime} \\cdot \\mathbf {x} _ {t} + \\mathbf {U} _ {h} ^ {\\prime} \\cdot \\mathbf {h} _ {t + 1} ^ {\\prime} + \\mathbf {b} _ {h} ^ {\\prime}\\right) \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 142, + 861, + 467, + 878 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {y} _ {t} = \\mathbf {V} \\cdot \\mathbf {h} _ {t} + \\mathbf {b} _ {y} \\quad \\mathbf {y} _ {t} ^ {\\prime} = \\mathbf {V} ^ {\\prime} \\cdot \\mathbf {h} _ {t} ^ {\\prime} + \\mathbf {b} _ {y} ^ {\\prime} \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 142, + 880, + 467, + 897 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$\\mathbf{x}_t$ represents the feature vector at the $t$ -th time step of the input sequence, while $\\mathbf{h}_{t-1}$ and $\\mathbf{h}_{t+1}'$ correspond to the hidden states of the forward and backward RNN units, respectively, from the previous time step. The matrices $\\mathbf{W}_h$ , $\\mathbf{U}_h$ , and $\\mathbf{b}_h$ denote the weight matrix and bias vector of the forward RNN unit, while $\\mathbf{V}$ and $\\mathbf{b}_y$ represent the weight matrix and bias vector of its output layer. Similarly, $\\mathbf{W}_h'$ , $\\mathbf{U}_h'$ , and $\\mathbf{b}_h'$ are associated with the weight matrix and bias vector of the backward RNN unit, and $\\mathbf{V}'$ and $\\mathbf{b}_y'$ pertain to the weight matrix and bias vector of its output layer. The activation function, denoted as $f(\\cdot)$ , can be chosen as sigmoid or tanh or other functions. The final output $Y_a$ is aggregated at each moment using the attention mechanism, and $\\oplus$ means concat operation.", + "bbox": [ + 496, + 90, + 893, + 303 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nY _ {t} = y _ {t} \\oplus y _ {t} ^ {\\prime} \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 669, + 316, + 890, + 333 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nA = \\operatorname {S o f t M a x} \\left(\\operatorname {M L P} \\left(Y _ {t}\\right)\\right) \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 612, + 335, + 890, + 351 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nY _ {a} = A \\cdot Y _ {t} \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 668, + 354, + 890, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Loss Function", + "text_level": 1, + "bbox": [ + 500, + 386, + 643, + 400 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A fully connected layer with a hidden layer is employed to address the final 6-DOFs pose regression task. The displacement vector of the regression is denoted as $\\hat{p}$ representing the magnitude and direction of movement, while the rotational Euler angles are denoted as $\\hat{q}$ indicating the rotational orientation in three-dimensional space.", + "bbox": [ + 496, + 410, + 892, + 501 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {L o s s} = \\alpha | | \\hat {p} - p | | _ {2} + \\beta | | \\hat {q} - q | | _ {2} + \\lambda \\sum_ {i = 0} ^ {n} w _ {i} ^ {2} \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 526, + 515, + 890, + 534 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$p$ and $q$ represent the ground truth obtained from the dataset, while $\\alpha$ , $\\beta$ , and $\\lambda$ serve as weight proportion coefficients. In order to tackle the prominent concern of overfitting, especially in the end-to-end setting, we incorporate the L2 regularization into the loss function. This regularization, implemented as the second paradigm for the network weights $w$ , effectively mitigates overfitting.", + "bbox": [ + 496, + 546, + 893, + 652 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Overall Architecture", + "text_level": 1, + "bbox": [ + 500, + 665, + 694, + 680 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Next, we will present the PEPNet pipeline in pseudo-code, utilizing the previously defined variables and formulas as described in Algorithm 1.", + "bbox": [ + 496, + 690, + 890, + 736 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiment", + "text_level": 1, + "bbox": [ + 500, + 753, + 624, + 768 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we present an extensive and in-depth analysis of PEPNet's performance on both indoor and outdoor datasets, encompassing evaluations based on rotational and translational mean squared error (MSE), model parameters, floating-point operations (FLOPs), and inference time. PEPNet's training and testing are performed on a server furnished with an AMD Ryzen 7950X CPU, an RTX GeForce 4090 GPU, and 32GB of memory.", + "bbox": [ + 496, + 779, + 893, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "18116", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/87cc3789c1bcb4f5e467c8fd27e6367cb42f56cced06a19032593cae04147682.jpg", + "image_caption": [ + "Figure 4. Event-based CPR Dataset visualization." + ], + "image_footnote": [], + "bbox": [ + 75, + 92, + 903, + 256 + ], + "page_idx": 5 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 PEPNet pipeline" + ], + "code_body": "Input: Raw event stream $\\mathcal{E}$ \nParameters: $N_{p} = 1024,R = 1e + 3,S_{\\mathrm{num}} = 3,K = 24$ \nOutput: 6-DOFs pose $(\\hat{p},\\hat{q})$ \n1: Preprocessing \n2: for $j$ in len(ε) do \n3: $P_{i}$ .append $(e_{j\\rightarrow l})$ .. $j = l$ ; where $t_l - t_j = R$ \n4: if $(len(P_i) > N_p)$ .. $i = i + 1$ \n5: end for \n6: $PN =$ Normalize(Sampling $(P))$ \n7: Hierarchy structure \n8: for stage in range $(S_{\\mathrm{num}})$ do \n9: Grouping and Sampling $(PN)$ \n10: Get $PGS\\in [B,N_{\\mathrm{stage}},K,2*D_{\\mathrm{stage}-1}]$ \n11: Local Extractor $(PGS)$ \n12: Get $F_{\\mathrm{local}}\\in [B,N_{\\mathrm{stage}},K,D_{\\mathrm{stage}}]$ \n13: Attentive Aggregate $(F_{\\mathrm{local}})$ \n14: Get $F_{\\mathrm{aggre}}\\in [B,N_{\\mathrm{stage}},D_{\\mathrm{stage}}]$ \n15: Global Extractor $(F_{\\mathrm{aggre}})$ \n16: Get $PN = F_{\\mathrm{global}}\\in [B,N_{\\mathrm{stage}},D_{\\mathrm{stage}}]$ \n17: end for \n18: A-Bi-LSTM \n19: Forward Get $y_{t}\\in [B,N_{3},DS_{\\mathrm{num}} / 2]$ \n20: Reverse Get $y_t^\\prime \\in [B,N_3,D_{S_{\\mathrm{num}}} / 2]$ \n21: Attention Get $Y_{a}\\in [B,D_{S_{\\mathrm{num}}}]$ \n22: Regressor \n23: Get 6-DOFs pose $(\\hat{p},\\hat{q})$", + "bbox": [ + 76, + 325, + 486, + 723 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Dataset", + "text_level": 1, + "bbox": [ + 76, + 753, + 171, + 768 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We employ the widely evaluated event-based CPR dataset IJRR [23] and M3ED [4], encompassing both indoor and outdoor scenes. Two distinct methods to partition the CPR dataset [26] have been benchmarked: random split and novel split. In the random split approach, the dataset is randomly selected $70\\%$ of all sequences for training and allocated the remaining sequences for testing. On the other hand, in the novel split, we divide the data chronologically,", + "bbox": [ + 75, + 779, + 472, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "using the initial $70\\%$ of sequences for training and the subsequent $30\\%$ for testing.", + "bbox": [ + 498, + 308, + 890, + 339 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Baseline", + "text_level": 1, + "bbox": [ + 500, + 349, + 601, + 364 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We perform a thorough evaluation of our proposed method by comparing it with SOTA event-based approaches, namely CNN-LSTM [37] and AECRN [18]. Moreover, we present results derived from other well-established computer vision methods, including PoseNet[14], Bayesian PoseNet [13], Pairwise-CNN [15], LSTM-Pose [39], and SP-LSTM[26].", + "bbox": [ + 496, + 373, + 892, + 479 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. IJRR Dataset Results", + "text_level": 1, + "bbox": [ + 500, + 489, + 702, + 503 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3.1 Random Split Results", + "text_level": 1, + "bbox": [ + 500, + 513, + 705, + 529 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Based on the findings presented in Tab. 1, it is apparent that PEPNet surpasses other models concerning both rotation and translation errors across all sequences. Notably, PEPNet achieves these impressive results despite utilizing significantly fewer model parameters and FLOPs compared to the frame-based approach. Moreover, PEPNet not only exhibits a remarkable $38\\%$ improvement in the average error compared to the SOTA CNN-LSTM method but also attains superior results across nearly all sequences. In addressing the more intricate and challenging hdr_poster sequences, while the frame-based approach relies on a denoising network to yield improved results [12], PEPNet excels by achieving remarkable performance without any additional processing. This observation strongly implies that PEPNet's Point Cloud approach exhibits greater robustness compared to the frame-based method, highlighting its inherent superiority in handling complex scenarios.", + "bbox": [ + 496, + 537, + 890, + 794 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Furthermore, we introduce an alternative variant, PEPNet $_{tinyy}$ , which integrates a lighter model architecture while preserving relatively strong performance. As depicted in Fig. 3, PEPNet consists of three stages, and the model's size is contingent upon the dimensionality of MLPs at each stage. The dimensions for the standard structure are [64, 128, 256], whereas those for the tiny structure are [16,", + "bbox": [ + 496, + 795, + 890, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "18117", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/bc56e0911fd7a879a1a5cb8c7aa1d63886680b3997df7462167acd85f96cd008.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
NetworkPoseNetBayesian PoseNetPairwise-CNNLSTM-PoseSP-LSTMCNN-LSTMPEPNetPEPNettiny
Parameter12.43M22.35M22.34M16.05M135.25M12.63M0.774M0.064M
FLOPs1.584G3.679G7.359G1.822G15.623G1.998G0.459G0.033G
shapesRotation0.109m,7.388°0.142m,9.557°0.095m,6.332°0.032m,4.439°0.025m,2.256°0.012m,1.652°0.005m,1.372°0.006m,1.592°
boxTranslation0.193m,6.977°0.190m,6.636°0.178m,6.153°0.083m,6.215°0.036m,2.195°0.013m,0.873°0.017m,0.845°0.031m,1.516°
shapesTranslation0.238m,6.001°0.264m,6.235°0.201m,5.146°0.056m,5.018°0.035m,2.117°0.020m,1.471°0.011m,0.582°0.013m,0.769°
dynamic_6dof0.297m,9.332°0.296m,8.963°0.245m,5.962°0.097m,6.732°0.031m,2.047°0.016m,1.662°0.015m,1.045°0.018m,1.144°
hdr_poster0.282m,8.513°0.290m,8.710°0.232m,7.234°0.108m,6.186°0.051m,3.354°0.033m,2.421°0.016m,0.991°0.028m,1.863°
posterTranslation0.266m,6.516°0.264m,5.459°0.211m,6.439°0.079m,5.734°0.036m,2.074°0.020m,1.468°0.012m,0.588°0.019m,0.953°
Average0.231m,7.455°0.241m,7.593°0.194m,6.211°0.076m,5.721°0.036m,2.341°0.019m,1.591°0.013m,0.904°0.019m,1.306°
", + "bbox": [ + 81, + 87, + 911, + 227 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 1. IJRR random split results. The table presents the median error for each sequence, as well as the average error across the six sequences. It also presents the number of parameters and FLOPs for each model. Bold indicates the most advanced result, while underline signifies the second-best result.", + "bbox": [ + 75, + 237, + 893, + 280 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/592b7ac577719929ef85ee94af2b04b4fe037688f1d81538cd3ab38249865a88.jpg", + "image_caption": [ + "Figure 5. Error distribution of event-based CPR results achieved by PEPNet using a random split. (a) Translation errors. (b) Rotation errors." + ], + "image_footnote": [], + "bbox": [ + 116, + 303, + 431, + 415 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "32, 64]. As indicated in Tab. 1, even with a mere $0.5\\%$ of the CNN-LSTM's parameter, $\\mathrm{PEPNet}_{tiny}$ achieves comparable and even slightly superior results. This remarkable outcome emphasizes the superiority of leveraging event cloud data processing directly.", + "bbox": [ + 75, + 494, + 468, + 570 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3.2 Error Distribution", + "text_level": 1, + "bbox": [ + 76, + 590, + 261, + 604 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fig. 5 illustrates the error distribution of PEPNet across six distinct sequences using the random split method, specifically: shape rotation, box translation, shape translation, dynamic 6-dof, hdr poster, and poster translation. To enhance clarity, the top and bottom boundaries of the box represent the first and third quartiles, respectively, indicating the inter-quartile range (IQR). The median is denoted by the band within the box. It is observed that the IQR of the translation error approximately locates between $0.004\\mathrm{m}$ and $0.024\\mathrm{m}$ , while the orientation error ranges from $0.4^{\\circ}$ to $1.9^{\\circ}$ .", + "bbox": [ + 75, + 614, + 468, + 765 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3.3 Novel Split Results", + "text_level": 1, + "bbox": [ + 76, + 786, + 263, + 801 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To assess the model's robustness, we adopt the novel split as an evaluation criterion, as shown in Tab. 2. During the training process, we observe a more pronounced overfitting phenomenon in PEPNet compared to the random split. We attribute this observation to the disparities in data distributions between the trainset and the testset, as well as the lim", + "bbox": [ + 75, + 810, + 470, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ited data size. Contrary to the methods we compared, PEP-Net does not necessitate pre-trained weights. For instance, SP-LSTM relies on pre-trained VGG19 weights from ImageNet, while AECRN requires synthetic heuristic depth and an extensive pretraining process.", + "bbox": [ + 496, + 306, + 890, + 382 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To address overfitting, PEPNet employs conventional methods that yield consistent and comparable results with the SOTA on three shape sequences that are displayed in the network column of Tab. 2. It is essential to note that AE-CRN adopts a hybrid approach, combining neural network regression for scene coordinates with derivable RANSAC for pose estimation. Moreover, this method incurs significant time consumption, with even the SOTA DSAC* algorithm taking nearly 30ms, excluding additional time for data format conversion. This time constraint presents compatibility challenges with the low-latency nature of event cameras. In contrast, PEPNet can execute on a server in just 6.7ms, with the main time-consuming module being grouping and sampling. Furthermore, with potential field programmable gate array (FPGA) or application-specific integrated chip (ASIC) support for these operations[6, 20], PEPNet's performance can be further accelerated.", + "bbox": [ + 496, + 382, + 892, + 638 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. M3ED Dataset Results", + "text_level": 1, + "bbox": [ + 498, + 648, + 710, + 662 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We selected three robots (Car, Falcon, and Spot) to extend the application scope of PEPNet across five sequences in an outdoor night setting, as illustrated in the Tab. 3. Due to its much higher resolution than IJRR, we performed downsampling processing and more number of points (1024 to 2048), and other experimental configurations are consistent with the IJRR dataset with random split. The results demonstrate the superior performance of PEPNet even in more challenging outdoor environments.", + "bbox": [ + 496, + 671, + 890, + 806 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Attention Visualization", + "text_level": 1, + "bbox": [ + 500, + 816, + 714, + 830 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Fig. 6, We observe that the attention scores exhibit larger at both the beginning and end. We tentatively infer that the model focuses more on the difference in features between the start and the end for CPR, which is also", + "bbox": [ + 496, + 840, + 890, + 898 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "18118", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/93188618a3a61773516235e79195bd9e5c3ac26dbd48ebafa0755c011866e445.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
NetworkPoseNetBayesian PoseNetPairwise-CNNLSTM-PoseSP-LSTMDSAC*AECRNPEPNet
shapes Rotation0.201m,12.499°0.164m,12.188°0.187m,10.426°0.061m,7.625°0.045m,5.017°0.029m,2.3°0.025m,2.0°0.016m,1.745°
shapes Translation0.198m,6.696°0.213m,7.441°0.225m,11.627°0.108m,8.468°0.072m,4.496°0.038m,2.2°0.029m,1.7°0.026m,1.659°
shapes_6dof0.320m,13.733°0.326m,13.296°0.314m,13.245°0.096m,8.973°0.078m,5.524°0.054m,3.1°0.052m,3.0°0.045m,2.984°
Average0.240m,11.067°0.234m,10.975°0.242m,11.766°0.088m,8.355°0.065m,5.012°0.040m,2.53°0.035m,2.23°0.029m,2.13°
Inference time5ms6ms12ms9.49ms4.79ms30ms30ms6.7ms
", + "bbox": [ + 99, + 88, + 872, + 170 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/3863ad134b66fc40ed82dc9e903642b5b6c26ebd236dbca005001a12bece884b.jpg", + "table_caption": [ + "Table 2. IJRR novel split results. Referred to as Tab. 1, showcases identical information. To assess the model's runtime, we conduct tests on a server platform, specifically focusing on the average time required for inference on a single sample." + ], + "table_footnote": [], + "table_body": "
M3EDPoseNetLSTM-PoseCNN-LSTMPEPNet
INPUTEvent FrameEvent FrameEvent framePoint Cloud
Falcon_Night_High_Beans0.181m,2.221°0.112m,0.946°0.107m,1.435°0.082m,0.575°
Car_Night_Pen_S_Loop1.618m,8.126°0.667m,4.914°0.773m,3.005°0.577m,1.319°
Spot_Night_Pen_Loop1.735m,5.502°0.761m,7.898°0.401m,1.771°0.468m,1.062°
Car_Pen_S_Loop_darker1.841m,4.575°0.751m,3.738°0.598m,2.772°0.385m,1.01°
Spot_Plaza_Light1.372m,9.564°0.565m,5.221°0.273m,2.001°0.348m,1.234°
Avergae1.349m,5.998°0.571m,4.543°0.43m,2.197°0.372m,1.04°
", + "bbox": [ + 81, + 231, + 485, + 323 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/309405956b32f5b3a782423314bcccc6797ff1847efd7e8bc35240e496f57965.jpg", + "table_caption": [ + "Table 3. Outdoor extension on M3ED dataset with random split." + ], + "table_footnote": [], + "table_body": "
ConditionHSLSTMBi-LSTMAggregationTranslationRotationT+R
1Max0.015m0.884°3.04
2Temporal0.014m0.786°2.77
3Max0.014m0.833°2.85
4Temporal0.012m0.603°2.25
5Max0.014m0.813°2.82
6Temporal0.011m0.582°2.12
", + "bbox": [ + 81, + 359, + 468, + 446 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0d7401f1ccd5efb782c94f401a63faae94e0db35c05e1b0198b14a0f92312caa.jpg", + "image_caption": [ + "Figure 6. Visualization of the attention values in the time domain. 128 points in chronological order on the horizontal axis and the attention values of the corresponding point on the vertical axis." + ], + "image_footnote": [], + "bbox": [ + 114, + 500, + 421, + 638 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "seen in the geometry approach [7, 24].", + "bbox": [ + 76, + 717, + 333, + 732 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.6. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 741, + 230, + 758 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Key Module Ablation: In order to validate the efficacy of key modules, we conducted an ablation experiment focusing on three primary components: hierarchy structure, Bi-LSTM, and temporal aggregation. These experiments are designed to evaluate rotation and translation errors on the shape translation sequence with the random split. The combined error $(\\mathrm{T} + \\mathrm{R})$ is measured after processing. Our experimental setup comprises four distinct conditions, as illustrated in Tab. 4. Condition 1 represents the sole utility of the system.", + "bbox": [ + 75, + 763, + 470, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/8725570befbce6faf8c855012b241a89b3d3b57683239e015bb3604fdf9141e8.jpg", + "table_caption": [ + "Table 4. Abalation Study for three key modules. $\\mathrm{T} + \\mathrm{R} =$ Translation + Rotation $\\cdot \\pi /{180}\\left( {\\mathrm{\\;m} + \\mathrm{{rad}}}\\right)$" + ], + "table_footnote": [], + "table_body": "
Scenceα = 0.5, β = 0.5α = 0.25, β = 0.75α = 0.75, β = 0.25
shape Translation0.0302m,1.684°,5.960.0359m,1.72°,6.590.0303m,2.056°,6.62
shapeRotation0.0143m,2.888°,6.470.0159m,2.68°,6.270.014m,3.36°,7.26
dynamic_6 dof0.0542m,2.799°,10.30.0611m,2.488°,10.50.0516m,3.251°,10.8
", + "bbox": [ + 511, + 231, + 879, + 280 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 5. Abalation Study for loss function's coefficient.", + "bbox": [ + 529, + 289, + 859, + 303 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ization of the hierarchy structure (HS), while Condition 2 combines the ordinary LSTM. Condition 3 incorporates the bidirectional LSTM, and Condition 4 integrates the attention mechanism for feature aggregation. The ablation experiments reveal significant insights. Experiments 1 and 3 demonstrate that augmenting LSTM enhances the extraction of explicit temporal features. Moreover, experiments 3 and 5 reveal the effectiveness of the bidirectional LSTM in extracting motion information. Additionally, experiments 5 and 6 confirm the notable impact of attention in feature aggregation, resulting in a substantial reduction in error rates.", + "bbox": [ + 496, + 332, + 890, + 498 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Loss ablation: We incorporated the experiment involving scaling coefficients of the loss function in Tab. 5. This experiment utilized a tiny version of PEPNet, trained for 100 epochs, and the outcome is MSE in translation, rotation, and $\\mathrm{T} + \\mathrm{R}$ . Across three distinct motion scenarios (translation, rotation, and 6dof) varied coefficient ratios induced deviations in the obtained results. For example, in shape rotation, increasing the weight on rotation makes the results better.", + "bbox": [ + 496, + 500, + 892, + 633 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 652, + 619, + 669 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we introduce an end-to-end CPR network that operates directly on raw event clouds without frame-based preprocessing. PEPNet boasts an impressively lightweight framework that adeptly extracts spatial and temporal features, leading to SOTA performance. Diverging from frame-based approaches, our method prioritizes preserving the inherent distribution of the event cloud, capitalizing on its sparse nature to achieve extraordinary capabilities for ultra-low-power applications.", + "bbox": [ + 496, + 680, + 890, + 815 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgment. This work was supported in part by the Young Scientists Fund of the National Natural Science Foundation of China (Grant 62305278), as well as the Hong Kong University of Science and Technology (Guangzhou) Joint Funding Program under Grant 2023A03J0154 and 2024A03J0618.", + "bbox": [ + 496, + 816, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "18119", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 121, + 173, + 136 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Vassileios Balntas, Shuda Li, and Victor Prisacariu. Relocnet: Continuous metric learning relocalisation using neural nets. In Proceedings of the European Conference on Computer Vision (ECCV), pages 751-767, 2018. 2", + "[2] Eric Brachmann and Carsten Rother. Visual camera relocalization from rgb and rgb-d images using dsac. IEEE transactions on pattern analysis and machine intelligence, 44(9):5847-5865, 2021. 2", + "[3] Eric Brachmann, Alexander Krull, Sebastian Nowozin, Jamie Shotton, Frank Michel, Stefan Gumhold, and Carsten Rother. Dsac-differentiable ransac for camera localization. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6684-6692, 2017. 2", + "[4] Kenneth Chaney, Fernando Cladora, Ziyun Wang, Anthony Bisulco, M Ani Hsieh, Christopher Korpela, Vijay Kumar, Camillo J Taylor, and Kostas Daniilidis. M3ed: Multi-robot, multi-sensor, multi-environment event dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4015–4022, 2023. 2, 6", + "[5] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020. 3", + "[6] Haotian Fu, Yulong Huang, Tingran Chen, Chenyi Fu, Hongwei Ren, Yue Zhou, Shouzhong Peng, Zhirui Zong, Biao Pan, and Bojun Cheng. Ds-cim: A 40nm asynchronous dual-spike driven, mram compute-in-memory macro for spiking neural network. IEEE Transactions on Circuits and Systems I: Regular Papers, 2024. 7", + "[7] Guillermo Gallego, Christian Forster, Elias Mueggler, and Davide Scaramuzza. Event-based camera pose tracking using a generative event model. arXiv preprint arXiv:1510.01972, 2015. 8", + "[8] Guillermo Gallego, Jon EA Lund, Elias Mueggler, Henri Rebecq, Tobi Delbruck, and Davide Scaramuzza. Event-based, 6-dof camera tracking from photometric depth maps. IEEE transactions on pattern analysis and machine intelligence, 40(10):2402-2412, 2017. 2", + "[9] Guillermo Gallego, Tobi Delbrück, Garrick Orchard, Chiara Bartolozzi, Brian Taba, Andrea Censi, Stefan Leutenegger, Andrew J Davison, Jörg Conradt, Kostas Daniilidis, et al. Event-based vision: A survey. IEEE transactions on pattern analysis and machine intelligence, 44(1):154-180, 2020. 2", + "[10] Yulan Guo, Hanyun Wang, Qingyong Hu, Hao Liu, Li Liu, and Mohammed Bennamoun. Deep learning for 3d point clouds: A survey. IEEE transactions on pattern analysis and machine intelligence, 43(12):4338-4364, 2020. 2", + "[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 2" + ], + "bbox": [ + 78, + 147, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[12] Yifan Jin, Lei Yu, Guangqiang Li, and Shumin Fei. A 6-dofs event-based camera relocalization system by cnn-lstm and image denoising. Expert Systems with Applications, 170: 114535, 2021. 2, 6", + "[13] Alex Kendall and Roberto Cipolla. Modelling uncertainty in deep learning for camera relocalization. In 2016 IEEE international conference on Robotics and Automation (ICRA), pages 4762-4769. IEEE, 2016. 6", + "[14] Alex Kendall, Matthew Grimes, and Roberto Cipolla. Posenet: A convolutional network for real-time 6-dof camera relocalization. In Proceedings of the IEEE international conference on computer vision, pages 2938-2946, 2015. 2, 6", + "[15] Zakaria Laskar, Iaroslav Melekhov, Surya Kalia, and Juho Kannala. Camera relocalization by computing pairwise relative poses using convolutional neural network. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 929-938, 2017. 2, 6", + "[16] Yann LeCun, Yoshua Bengio, and Geoffrey Hinton. Deep learning. nature, 521(7553):436-444, 2015. 2", + "[17] Patrick Lichtsteiner, Christoph Posch, and Tobi Delbruck. A $128 \\times 128$ 120 db $15\\mu s$ latency asynchronous temporal contrast vision sensor. IEEE journal of solid-state circuits, 43 (2):566-576, 2008. 1", + "[18] Hu Lin, Meng Li, Qianchen Xia, Yifeng Fei, Baocai Yin, and Xin Yang. 6-dof pose relocalization for event cameras with entropy frame and attention networks. In The 18th ACM SIGGRAPH International Conference on Virtual-Reality Continuum and its Applications in Industry, pages 1–8, 2022. 2, 6", + "[19] Yimin Lin, Zhaoxiang Liu, Jianfeng Huang, Chaopeng Wang, Guoguang Du, Jinqiang Bai, and Shiguo Lian. Deep global-relative networks for end-to-end 6-dof visual localization and odometry. In PRICAI 2019: Trends in Artificial Intelligence: 16th Pacific Rim International Conference on Artificial Intelligence, Cuvu, Yanuca Island, Fiji, August 26–30, 2019, Proceedings, Part II, pages 454–467. Springer, 2019. 2", + "[20] Haobo Liu, Zhengyang Qian, Wei Wu, Hongwei Ren, Zhiwei Liu, and Leibin Ni. Afpr-cim: An analog-domain floating-point rram-based compute-in-memory architecture with dynamic range adaptive fp-adc. arXiv preprint arXiv:2402.13798, 2024. 7", + "[21] Xu Ma, Can Qin, Haoxuan You, Haoxi Ran, and Yun Fu. Rethinking network design and local geometry in point cloud: A simple residual mlp framework. In International Conference on Learning Representations, 2021. 3", + "[22] Anton Mitrokhin, Zhiyuan Hua, Cornelia Fermuller, and Yiannis Aloimonos. Learning visual motion segmentation using event surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14414-14423, 2020. 2", + "[23] Elias Mueggler, Henri Rebecq, Guillermo Gallego, Tobi Delbruck, and Davide Scaramuzza. The event-camera dataset and simulator: Event-based data for pose estimation, visual odometry, and slam. The International Journal of Robotics Research, 36(2):142-149, 2017. 1, 2, 6", + "[24] Elias Mueggler, Guillermo Gallego, Henri Rebecq, and Davide Scaramuzza. Continuous-time visual-inertial odometry" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "18120", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "for event cameras. IEEE Transactions on Robotics, 34(6): 1425-1440, 2018. 8", + "[25] Tayyab Naseer and Wolfram Burgard. Deep regression for monocular camera-based 6-dof global localization in outdoor environments. In 2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 1525-1530. IEEE, 2017. 2", + "[26] Anh Nguyen, Thanh-Toan Do, Darwin G Caldwell, and Nikos G Tsagarakis. Real-time 6dof pose relocalization for event cameras with stacked spatial LSTM networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 2, 6", + "[27] Christoph Posch, Daniel Matolin, and Rainer Wohlgenannt. A qvga 143 db dynamic range frame-free pwm image sensor with lossless pixel-level video compression and time-domain cds. IEEE Journal of Solid-State Circuits, 46(1):259-275, 2010. 1", + "[28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 2, 3", + "[29] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 2, 3", + "[30] Noha Radwan, Abhinav Valada, and Wolfram Burgard. Vlocnet++: Deep multitask learning for semantic visual localization and odometry. IEEE Robotics and Automation Letters, 3(4):4407-4414, 2018. 2", + "[31] Henri Rebecq, Daniel Gehrig, and Davide Scaramuzza. Esim: an open event camera simulator. In Conference on robot learning, pages 969-982. PMLR, 2018. 2", + "[32] Hongwei Ren, Yue Zhou, Haotian Fu, Yulong Huang, Renjing Xu, and Bojun Cheng. Ttpoint: A tensorized point cloud network for lightweight action recognition with event cameras. arXiv preprint arXiv:2308.09993, 2023. 2", + "[33] Hongwei Ren, Yue Zhou, Yulong Huang, Haotian Fu, Xiaopeng Lin, Jie Song, and Bojun Cheng. Spikepoint: An efficient point-based spiking neural network for event cameras action recognition. arXiv preprint arXiv:2310.07189, 2023.", + "[34] Yusuke Sekikawa, Kosuke Hara, and Hideo Saito. Eventnet: Asynchronous recursive event processing. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3887-3896, 2019. 2", + "[35] Yoli Shavit and Ron Ferens. Introduction to camera pose estimation with deep learning. arXiv preprint arXiv:1907.05272, 2019. 1", + "[36] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 2", + "[37] Ahmed Tabia, Fabien Bonardi, and Samia Bouchafa. Deep learning for pose estimation from event camera. In 2022 International Conference on Digital Image Computing: Techniques and Applications (DICTA), pages 1-7. IEEE, 2022. 6" + ], + "bbox": [ + 78, + 92, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[38] Abhinav Valada, Noha Radwan, and Wolfram Burgard. Deep auxiliary learning for visual localization and odometry. In 2018 IEEE international conference on robotics and automation (ICRA), pages 6939-6946. IEEE, 2018. 2", + "[39] Florian Walch, Caner Hazirbas, Laura Leal-Taixe, Torsten Sattler, Sebastian Hilsenbeck, and Daniel Cremers. Image-based localization using lstms for structured feature correlation. In Proceedings of the IEEE International Conference on Computer Vision, pages 627-637, 2017. 2, 6", + "[40] Qinyi Wang, Yexin Zhang, Junsong Yuan, and Yilong Lu. Space-time event clouds for gesture recognition: From rgb cameras to event cameras. In 2019 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 1826-1835. IEEE, 2019. 2, 3", + "[41] Jian Wu, Liwei Ma, and Xiaolin Hu. Delving deeper into convolutional neural networks for camera relocalization. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 5644-5651. IEEE, 2017. 2", + "[42] Wenxuan Wu, Zhongang Qi, and Li Fuxin. Pointconv: Deep convolutional networks on 3d point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9621-9630, 2019. 3", + "[43] Jiancheng Yang, Qiang Zhang, Bingbing Ni, Linguuo Li, Jinxian Liu, Mengdie Zhou, and Qi Tian. Modeling point clouds with self-attention and gumbel subset sampling. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3323–3332, 2019. 3", + "[44] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16259-16268, 2021. 3" + ], + "bbox": [ + 501, + 92, + 890, + 529 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "18121", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/1bfdaa4b-618c-45a6-9de6-a1e062fefbcd_model.json b/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/1bfdaa4b-618c-45a6-9de6-a1e062fefbcd_model.json new file mode 100644 index 0000000000000000000000000000000000000000..60adcaec57b8ca93a6f47b24e8e23468a6a0a4c9 --- /dev/null +++ b/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/1bfdaa4b-618c-45a6-9de6-a1e062fefbcd_model.json @@ -0,0 +1,2101 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.251, + 0.131, + 0.72, + 0.175 + ], + "angle": 0, + "content": "A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization" + }, + { + "type": "text", + "bbox": [ + 0.162, + 0.204, + 0.829, + 0.24 + ], + "angle": 0, + "content": "Hongwei Ren*, Jiadong Zhu*, Yue Zhou, Haotian Fu, Yulong Huang, Bojun Cheng † \nThe Hong Kong University of Science and Technology(Guangzhou)" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.242, + 0.9, + 0.258 + ], + "angle": 0, + "content": "{hren066, jzhu484, yzhou833, hfu373, yhuang496}@connect.hkust-gz.edu.cn, bocheng@hkust-gz.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.292, + 0.314, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.324, + 0.473, + 0.791 + ], + "angle": 0, + "content": "Event cameras exhibit remarkable attributes such as high dynamic range, asynchronicity, and low latency, making them highly suitable for vision tasks that involve high-speed motion in challenging lighting conditions. These cameras implicitly capture movement and depth information in events, making them appealing sensors for Camera Pose Relocalization (CPR) tasks. Nevertheless, existing CPR networks based on events neglect the pivotal fine-grained temporal information in events, resulting in unsatisfactory performance. Moreover, the energy-efficient features are further compromised by the use of excessively complex models, hindering efficient deployment on edge devices. In this paper, we introduce PEPNet, a simple and effective point-based network designed to regress six degrees of freedom (6-DOFs) event camera poses. We rethink the relationship between the event camera and CPR tasks, leveraging the raw Point Cloud directly as network input to harness the high-temporal resolution and inherent sparsity of events. PEPNet is adept at abstracting the spatial and implicit temporal features through hierarchical structure and explicit temporal features by Attentive Bidirectional Long Short-Term Memory (A-Bi-LSTM). By employing a carefully crafted lightweight design, PEPNet delivers state-of-the-art (SOTA) performance on both indoor and outdoor datasets with meager computational resources. Specifically, PEPNet attains a significant \\(38\\%\\) and \\(33\\%\\) performance improvement on the random split IJRR and M3ED datasets, respectively. Moreover, the lightweight design version PEPNet™ accomplishes results comparable to the SOTA while employing a mere \\(0.5\\%\\) of the parameters." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.821, + 0.21, + 0.837 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.847, + 0.47, + 0.879 + ], + "angle": 0, + "content": "Event camera is a type of bio-inspired vision sensor that responds to local changes in illumination exceeding a pre" + }, + { + "type": "image", + "bbox": [ + 0.569, + 0.29, + 0.825, + 0.426 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.436, + 0.895, + 0.493 + ], + "angle": 0, + "content": "Figure 1. The average results using the random split method benchmarked on the CPR dataset [23]. The vertical axis represents the combined rotational and translational errors \\((\\mathrm{m} + \\mathrm{rad})\\). PEPNet is the first point-based CPR network for event cameras." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.521, + 0.893, + 0.809 + ], + "angle": 0, + "content": "defined threshold [17]. Differing from conventional frame-based cameras, event cameras independently and asynchronously produce pixel-level events. Notably, event cameras boast an exceptional triad: high dynamic range, low latency, and ultra-high temporal resolution. This unique combination empowers superior performance under challenging light conditions, adeptly capturing the swift scene and rapid motion changes in near-microsecond precision [27]. Additionally, event cameras boast remarkably low power consumption positioning them as a popular choice for many power-constrained devices. Camera Pose Relocalization (CPR) is an emerging application in power-constrained devices and has gained significant attention. It aims to train several scene-specific neural networks to accurately relocalize the camera pose within the original scene used for training. It is extensively employed in numerous applications, including Virtual Reality (VR), Augmented Reality (AR), and robotics [35], all of which are deployed on battery-powered devices and are power-constrained." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.895, + 0.903 + ], + "angle": 0, + "content": "CPR tasks using event cameras significantly diverge from their conventional CPR counterpart that employs frame-based cameras, primarily due to the inherent dissimilarity in data output mechanisms between these two camera types. Furthermore, events inherently encompass information regarding object motion and depth changes" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.329, + 0.901 + ], + "angle": 0, + "content": "*equal contribution. †corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "18112" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.288 + ], + "angle": 0, + "content": "across precise temporal and spatial dimensions attributes of paramount significance within the domain of CPR tasks [8, 31]. Regrettably, existing event-based CPR networks often derive from the conventional camera network paradigms and inadequately address the unique attributes of event data. More specifically, events are transformed into various representations such as event images [26], time surfaces [18], and other representations[18], leading to the loss of their fine-grained temporal information. Furthermore, most event-based methods tend to overlook the computational load of the network, only prioritizing elevated accuracy, which contradicts the fundamental design principles of event cameras [9]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.292, + 0.473, + 0.489 + ], + "angle": 0, + "content": "A suitable and faithful data representation is crucial for event cloud processing. Point Cloud is a collection of 3D points \\((x, y, z)\\) that represents the shape and surface of an object or environment commonly used in lidar and depth cameras [10]. The distance \\((z)\\) is of great meaning to the tasks. As for event camera, by treating each event's temporal information as the third dimension, event inputs \\((x, y, t)\\) can be regarded as points and aggregated into a pseudo-Point Cloud [28, 29, 32-34, 40]. However, given that the \\(t\\) dimension of Event Cloud is not strictly equivalent to the spatial dimensions \\((x, y, z)\\), direct transplantation of the Point Cloud network has not yet exhibited a satisfactory performance advantage in processing event data [32, 40]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.494, + 0.473, + 0.902 + ], + "angle": 0, + "content": "In this study, we introduce PEPNet, the first point-based end-to-end CPR network designed to harness the attributes of event cameras. A comparison of our performance and method to other frame-based methods is illustrated Fig. 1 and Fig. 2, respectively. Moreover, diverging from other point-based approaches in event data processing [32, 40], PEPNet demonstrates careful attention to detail by systematically assessing the difference between Event Cloud and Point Cloud in its design approach. This approach enables a more precise extraction of spatio-temporal features and facilitates solutions for a spectrum of event-based tasks. Our main contributions are as follows: First, in the preprocessing stage, PEPNet directly processes the raw data obtained from the event cameras, meticulously preserving the fine-grained temporal coordinate and the order inherent in the event data. Second, PEPNet proficiently captures spatial features and implicit temporal features through its hierarchical structure with temporal aggregation. Subsequently, the explicit temporal feature is processed by the A-BiLSTM, thanks to the preservation of the input sequence in previous stages. As such, this architecture is tailored to accommodate the high temporal resolution and sparse characteristics inherent in event cameras. Thirdly, by restricting ourselves to minimal hardware resources and deliberately avoiding heavy computational modules, PEPNet not only attains SOTA results on IJRR [23] and M3ED [4] dataset but also features a lightweight design that can be executed" + }, + { + "type": "image", + "bbox": [ + 0.57, + 0.089, + 0.828, + 0.206 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.216, + 0.892, + 0.245 + ], + "angle": 0, + "content": "Figure 2. Two different event-based processing methods, frame-based and point-based." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.272, + 0.892, + 0.334 + ], + "angle": 0, + "content": "in real-time. We hope such an approach could potentially democratize computer vision technology by making it accessible to a wider range of devices and applications in the community of edge computing." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.348, + 0.642, + 0.364 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.373, + 0.825, + 0.39 + ], + "angle": 0, + "content": "2.1. Frame-based CPR Learning Methods" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.398, + 0.892, + 0.58 + ], + "angle": 0, + "content": "Deep learning, crucial for vision tasks like classification and object detection [16], has seen advancements such as PoseNet's innovative transfer learning [14]. Utilizing VGG, ResNet [11, 36], LSTM, and customized loss functions [25, 39, 41], researchers enhanced this approach. Auxiliary Learning methods further improved performance [19, 30, 38], although overfitting remains a challenge. Hybrid pose-based methods, combining learning with traditional pipelines [1, 15], offer promise. DSAC series, for instance, achieve high pose estimation accuracy [2, 3], but come with increased computational costs and latency, especially for edge devices." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.59, + 0.819, + 0.607 + ], + "angle": 0, + "content": "2.2. Event-based CPR Learning Methods" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.614, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Event-based CPR methods often derive from the frame-based CPR network. SP-LSTM [26] employed the stacked spatial LSTM networks to process event images, facilitating a real-time pose estimator. To address the inherent noise in event images, [12] proposed a network structure combining denoise networks, convolutional neural networks, and LSTM, achieving good performance under complex working conditions. In contrast to the aforementioned methods, a novel representation named Reversed Window Entropy Image (RWEI) [18] is introduced, which is based on the widely used event surface [22] and serves as the input to an attention-based DSAC* pipeline [2] to achieve SOTA results. However, the computationally demanding architecture involving representation transformation and hybrid pipeline poses challenges for real-time execution. Additionally, all existing methods ignore the fine-grained temporal feature of the event cameras, and accumulate events into frames for processing, resulting in unsatisfactory performance." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18113" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.091, + 0.277, + 0.106 + ], + "angle": 0, + "content": "2.3. Point Cloud Network" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.115, + 0.473, + 0.449 + ], + "angle": 0, + "content": "Point-based methodologies have transformed the direct processing of Point Cloud, with PointNet [28] as a standout example. Taking a step beyond, PointNet++ [29] introduced a Set Abstraction module. While it initially employed a straightforward MLP in the feature extractor, recent advancements have seen the development of more sophisticated feature extractors to enhance Point Cloud processing [5, 21, 42, 44]. When extending these techniques to Event Cloud, Wang et al. [40] addressed the temporal information processing challenge while maintaining representation in both the x and y axes, enabling gesture recognition using PointNet++. Further enhancements came with PAT [43], which incorporated self-attention and Gumbel subset sampling, leading to improved performance in recognition tasks. However, existing point-based models still fall short in performance compared to frame-based methods. This phenomenon can be attributed to the distinctively different characteristics of Point Cloud and Event Cloud. Event Cloud contradicts the permutation and transformation invariance present in Point Cloud due to its temporal nature. Additionally, the Point Cloud network is not equipped to extract explicit temporal features." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.464, + 0.172, + 0.479 + ], + "angle": 0, + "content": "3. PEPNet" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.489, + 0.47, + 0.595 + ], + "angle": 0, + "content": "PEPNet pipeline consists of four essential modules: (1) a preprocessing module for the original Event Cloud, (2) a hierarchical Point Cloud feature extraction structure, (3) an Attentive Bi-directional LSTM, and (4) a 6-DOFs pose regressor, as illustrated in Fig. 3. In the following sections, we will provide detailed descriptions and formulations for each module." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.607, + 0.212, + 0.622 + ], + "angle": 0, + "content": "3.1. Event Cloud" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.632, + 0.47, + 0.797 + ], + "angle": 0, + "content": "To preserve the fine-grained temporal information and original data distribution attributes from the Event Cloud, the 2D-spatial and 1D-temporal event information is constructed into a three-dimensional representation to be processed in Point Cloud. Event Cloud consists of time-series data capturing spatial intensity changes of images in chronological order, and an individual event is denoted as \\( e_k = (x_k, y_k, t_k, p_k) \\), where \\( k \\) is the index representing the \\( k_{th} \\) element in the sequence. Consequently, the set of events within a single sequence \\( (\\mathcal{E}) \\) in the dataset can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.13, + 0.8, + 0.469, + 0.817 + ], + "angle": 0, + "content": "\\[\n\\mathcal {E} = \\left\\{e _ {k} = \\left(x _ {k}, y _ {k}, t _ {k}, p _ {k}\\right) \\mid k = 1, \\dots , n \\right\\} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.47, + 0.902 + ], + "angle": 0, + "content": "For a given pose in the dataset, the ground truth resolution is limited to \\(5ms\\), while the event resolution is \\(1\\mu s\\). Therefore, it is necessary to acquire the events that transpire within the time period we call it sliding window corresponding to the poses, which will serve as the input for the model," + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.757, + 0.108 + ], + "angle": 0, + "content": "as depicted by the following equation:" + }, + { + "type": "equation", + "bbox": [ + 0.552, + 0.12, + 0.892, + 0.137 + ], + "angle": 0, + "content": "\\[\nP _ {i} = \\left\\{e _ {j \\rightarrow l} \\mid t _ {l} - t _ {j} = R \\right\\} \\quad i = 1, \\dots , M \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.149, + 0.894, + 0.317 + ], + "angle": 0, + "content": "The symbol \\( R \\) represents the time interval of the sliding window, where \\( j \\) and \\( l \\) denote the start and end event index of the sequence, respectively. The variable \\( M \\) represents the number of sliding windows into which the sequence of events \\( \\mathcal{E} \\) is divided. Before being fed into the neural network, \\( P_{i} \\) also needs to undergo sampling and normalization. Sampling is to unify the number of points \\( N \\) as network inputs. We set \\( N = 1024 \\) in PEPNet. Additionally, as the spatial coordinates are normalized by the camera's resolution \\( w \\) and \\( h \\). The normalization process is described by the following equation:" + }, + { + "type": "equation", + "bbox": [ + 0.606, + 0.326, + 0.892, + 0.36 + ], + "angle": 0, + "content": "\\[\nP N _ {i} = \\left(\\frac {X _ {i}}{w}, \\frac {Y _ {i}}{h}, \\frac {T _ {i} - t _ {j}}{t _ {l} - t _ {j}}\\right) \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.515, + 0.373, + 0.892, + 0.39 + ], + "angle": 0, + "content": "\\[\nX _ {i}, Y _ {i}, T _ {i} = \\left\\{x _ {j}, \\dots , x _ {l} \\right\\}, \\left\\{y _ {j}, \\dots , y _ {l} \\right\\}, \\left\\{t _ {j}, \\dots , t _ {l} \\right\\} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.397, + 0.893, + 0.504 + ], + "angle": 0, + "content": "The \\(X, Y\\) is divided by the resolution of the event camera. To normalize \\(T\\), we subtract the smallest timestamp \\(t_j\\) of the window and divide it by the time difference \\(t_l - t_j\\), where \\(t_l\\) represents the largest timestamp within the window. After pre-processing, Event Cloud is converted into the pseudo-Point Cloud, which comprises explicit spatial information \\((x, y)\\) and implicit temporal information \\(t\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.514, + 0.694, + 0.53 + ], + "angle": 0, + "content": "3.2. Hierarchy Structure" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.538, + 0.892, + 0.81 + ], + "angle": 0, + "content": "The hierarchy structure is the backbone for processing the pseudo-3D Point Cloud and is composed of four primary modules: grouping and sampling, standardization, feature extractor, and aggregation, as described in the following subsection. To efficiently extract deeper explicit spatial and implicit temporal features, the hierarchical structure is tailored and differs from conventional hierarchical structure in a few ways: First, we no longer force permutation invariance as usually done in mainstream point-based methods [21, 28], as the motion information is inherently related to the sequential order of events. Instead, we keep the sequence of all events strictly in the same order as they are generated to preserve the temporal information to be used in the next stage. Second, we replace MaxPooling in aggregation and deploy temporal aggregation which leverages the attention mechanism with softmax, which improves the effective assimilation of temporal information into the resultant feature vectors." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.831, + 0.724, + 0.847 + ], + "angle": 0, + "content": "3.2.1 Grouping and Sampling" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.893, + 0.902 + ], + "angle": 0, + "content": "Aligned with the frame-based design concept, our focus is to capture both local and global information. Local information is acquired by leveraging Farthest Point Sampling" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "18114" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.087, + 0.873, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.315, + 0.895, + 0.371 + ], + "angle": 0, + "content": "Figure 3. PEPNet overall architecture (the time resolution of \\( t_1, t_2, \\ldots, t_n \\) is \\( 1\\mu s \\)). The input Event Cloud undergoes direct handling through a sliding window, sampling, and normalization, eliminating the need for any format conversion. Sequentially, the input passes through \\( S_{num} \\) hierarchy structures for spatial feature abstraction and extraction. It further traverses a bidirectional LSTM for temporal feature extraction, culminating in a regressor responsible for 6-DOFs camera pose relocalization." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.379, + 0.471, + 0.41 + ], + "angle": 0, + "content": "(FPS) and K-Nearest Neighbors (KNN), while global information is obtained through a dedicated aggregation module." + }, + { + "type": "equation", + "bbox": [ + 0.1, + 0.42, + 0.47, + 0.438 + ], + "angle": 0, + "content": "\\[\nP S _ {i} = F P S \\left(P N _ {i}\\right) \\quad P G _ {i} = K N N \\left(P N _ {i}, P S _ {i}\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.447, + 0.473, + 0.599 + ], + "angle": 0, + "content": "The input dimension \\(PN_{i}\\) is \\([N,3 + D]\\), and the centroid dimension \\(PS_{i}\\) is \\([N^{\\prime},3 + D]\\) and the group dimension \\(PG_{i}\\) is \\([N^{\\prime},K,3 + 2*D]\\). \\(K\\) represents the nearest \\(K\\) points of the center point (centroid), \\(D\\) is the feature dimension of the points of the current stage, and 3 is the most original \\((X,Y,T)\\) coordinate value. Importantly, it should be noted that the ordering of all points in the grouping and sampling process strictly adheres to the timestamp \\((T)\\), and the dimension \\(2*D\\) of the points in the group is the result of being concatenated to the centroid." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.617, + 0.245, + 0.631 + ], + "angle": 0, + "content": "3.2.2 Standardization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.64, + 0.47, + 0.686 + ], + "angle": 0, + "content": "Next, each group undergoes a standardization process to ensure consistent variability between points within the group, as illustrated in this formula:" + }, + { + "type": "equation", + "bbox": [ + 0.078, + 0.694, + 0.47, + 0.763 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} P G S _ {i} = \\frac {P G _ {i} - P S _ {i}}{S t d \\left(P G _ {i}\\right)} \\quad S t d \\left(P G _ {i}\\right) = \\sqrt {\\frac {\\sum_ {j = 0} ^ {3 n - 1} \\left(g _ {j} - \\bar {g}\\right) ^ {2}}{3 n - 1}} (6) \\\\ g = \\left[ x _ {0}, y _ {0}, t _ {0}, \\dots , x _ {n}, y _ {n}, t _ {n} \\right] (7) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.768, + 0.471, + 0.83 + ], + "angle": 0, + "content": "Where \\( PG_{i} \\) and \\( PS_{i} \\) are the subsets of \\( PG \\) and \\( PS \\), \\( Std \\) is the standard deviation, the dimension of \\( Std(PG) \\) is \\( M \\) which is consistent with the number of sliding windows, and \\( g \\) is the set of coordinates of all points in the \\( PG_{i} \\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.847, + 0.254, + 0.861 + ], + "angle": 0, + "content": "3.2.3 Feature extractor" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Following the standardization of \\( PG \\) by dividing the variance by the subtracted mean, the feature extraction is per" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.379, + 0.892, + 0.454 + ], + "angle": 0, + "content": "formed using a Multi-Layer Perceptron (MLP) with a residual connection. This process encompasses two steps: local feature extraction and global feature extraction. The feature extractor with a bottleneck can be mathematically represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.605, + 0.468, + 0.892, + 0.485 + ], + "angle": 0, + "content": "\\[\nI (x) = f (\\mathrm {B N} (\\mathrm {M L P} _ {1} (x))) \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.619, + 0.488, + 0.891, + 0.504 + ], + "angle": 0, + "content": "\\[\nO (x) = \\operatorname {B N} \\left(\\operatorname {M L P} _ {2} (x)\\right) \\tag {9}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.605, + 0.507, + 0.892, + 0.523 + ], + "angle": 0, + "content": "\\[\nE x t (x) = f (x + O (I (x))) \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.536, + 0.893, + 0.763 + ], + "angle": 0, + "content": "BN represents batch normalization layer, while \\( f \\) signifies the nonlinear activation function. Both local feature extraction and global feature extraction maintain identical input and output dimensions. The dimension increase occurs solely when combining the feature dimension \\( D \\) of the current point with the feature dimension \\( D \\) of the centroid during grouping, resulting in a final dimension of \\( 2 * D \\). The feature extractor takes an input dimension of \\( [B, N, K, D] \\), and following local feature extraction, the dimension remains \\( [B, N, K, D] \\), \\( B \\) represents batch size. We adopt the attention mechanism for aggregation, yielding an aggregated feature dimension of \\( [B, N, D] \\). Subsequently, the aggregated feature map is then processed through the global feature extractor, completing the feature extraction for the current stage." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.786, + 0.713, + 0.801 + ], + "angle": 0, + "content": "3.2.4 Temporal Aggregation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Conventional Point Cloud methods favor MaxPooling operations for feature aggregation because it is efficient in extracting the feature from one point among a group of points and discarding the rest. However, MaxPooling involves extracting only the maximum value along each dimension of the temporal axis. It is robust to noise perturbation but" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18115" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.272 + ], + "angle": 0, + "content": "also ignores the temporal nuances embedded within the features. Conversely, the integration of attention mechanisms enhances the preservation of those nuanced and useful temporal attributes by aggregating features along the temporal axis through the attention value. To provide a more comprehensive exposition, we employ a direct attention mechanism within the \\(K\\) temporal dimensions to effectively aggregate features as shown in Fig. 3. This mechanism enables the explicit integration of temporal attributes, capitalizing on the inherent strict ordering of the \\(K\\) points. The ensuing formula succinctly elucidates the essence of this attention mechanism:" + }, + { + "type": "equation", + "bbox": [ + 0.145, + 0.278, + 0.47, + 0.295 + ], + "angle": 0, + "content": "\\[\nF _ {\\text {l o c a l}} = \\operatorname {E x t} (x) = \\left(F _ {t 1}, F _ {t 2}, \\dots , F _ {t k}\\right) \\tag {11}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.096, + 0.301, + 0.468, + 0.317 + ], + "angle": 0, + "content": "\\[\nA = \\operatorname {S o f t M a x} \\left(\\operatorname {M L P} \\left(F _ {\\text {l o c a l}}\\right)\\right) = \\left(a _ {t 1}, a _ {t 2}, \\dots , a _ {t k}\\right) \\tag {12}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.086, + 0.32, + 0.468, + 0.336 + ], + "angle": 0, + "content": "\\[\nF _ {\\text {a g g r e}} = A \\cdot F _ {\\text {l o c a l}} = F _ {t 1} \\cdot a _ {t 1} + F _ {t 2} \\cdot a _ {t 2} + \\dots + F _ {t k} \\cdot a _ {t k} \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.339, + 0.47, + 0.491 + ], + "angle": 0, + "content": "Upon the application of the local feature extractor, the ensuing features are denoted as \\( F_{\\mathrm{local}} \\), and \\( F_{tk} \\) mean the extracted feature of \\( k_{th} \\) point in a group. The attention mechanism comprises an MLP layer with an input layer dimension of \\( D \\) and an output \\( a_{tk} \\) dimension of 1, along with softmax layers. Subsequently, the attention mechanism computes attention values, represented as \\( A \\). These attention values are then multiplied with the original features through batch matrix multiplication, resulting in the aggregated feature \\( F_{\\mathrm{aggre}} \\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.496, + 0.208, + 0.51 + ], + "angle": 0, + "content": "3.3. A-Bi-LSTM" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.52, + 0.473, + 0.836 + ], + "angle": 0, + "content": "The temporal features extracted through the hierarchical structure are independent and parallel, lacking recurrent mechanisms within the network. This distinctive attribute, referred to as 'implicit', contrasts with the conventional treatment of temporal information as an indexed process. Consequently, implicit temporal features inadequately capture the interrelations among events along the timeline, whereas explicit temporal features assume a pivotal role in facilitating the CPR task. To explicitly capture temporal patterns, we introduce the LSTM network, which has been proven effective in learning temporal dependencies. For optimal network performance, controlled feature dimensionality, and comprehensive capture of bidirectional relationships in pose context, we adopt a bidirectional LSTM network with a lightweight design. The regressor attentively focuses on the output of Bi-LSTM at each timestep and is more inclined towards the start and end features as demonstrated in Fig. 6. The integration of bidirectional connections into the recurrent neural network (RNN) is succinctly presented through the following equation:" + }, + { + "type": "equation", + "bbox": [ + 0.14, + 0.843, + 0.47, + 0.859 + ], + "angle": 0, + "content": "\\[\n\\mathbf {h} _ {t} = f \\left(\\mathbf {W} _ {h} \\cdot \\mathbf {x} _ {t} + \\mathbf {U} _ {h} \\cdot \\mathbf {h} _ {t - 1} + \\mathbf {b} _ {h}\\right) \\tag {14}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.143, + 0.862, + 0.468, + 0.879 + ], + "angle": 0, + "content": "\\[\n\\mathbf {h} _ {t} ^ {\\prime} = f \\left(\\mathbf {W} _ {h} ^ {\\prime} \\cdot \\mathbf {x} _ {t} + \\mathbf {U} _ {h} ^ {\\prime} \\cdot \\mathbf {h} _ {t + 1} ^ {\\prime} + \\mathbf {b} _ {h} ^ {\\prime}\\right) \\tag {15}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.143, + 0.881, + 0.468, + 0.898 + ], + "angle": 0, + "content": "\\[\n\\mathbf {y} _ {t} = \\mathbf {V} \\cdot \\mathbf {h} _ {t} + \\mathbf {b} _ {y} \\quad \\mathbf {y} _ {t} ^ {\\prime} = \\mathbf {V} ^ {\\prime} \\cdot \\mathbf {h} _ {t} ^ {\\prime} + \\mathbf {b} _ {y} ^ {\\prime} \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.304 + ], + "angle": 0, + "content": "\\(\\mathbf{x}_t\\) represents the feature vector at the \\(t\\)-th time step of the input sequence, while \\(\\mathbf{h}_{t-1}\\) and \\(\\mathbf{h}_{t+1}'\\) correspond to the hidden states of the forward and backward RNN units, respectively, from the previous time step. The matrices \\(\\mathbf{W}_h\\), \\(\\mathbf{U}_h\\), and \\(\\mathbf{b}_h\\) denote the weight matrix and bias vector of the forward RNN unit, while \\(\\mathbf{V}\\) and \\(\\mathbf{b}_y\\) represent the weight matrix and bias vector of its output layer. Similarly, \\(\\mathbf{W}_h'\\), \\(\\mathbf{U}_h'\\), and \\(\\mathbf{b}_h'\\) are associated with the weight matrix and bias vector of the backward RNN unit, and \\(\\mathbf{V}'\\) and \\(\\mathbf{b}_y'\\) pertain to the weight matrix and bias vector of its output layer. The activation function, denoted as \\(f(\\cdot)\\), can be chosen as sigmoid or tanh or other functions. The final output \\(Y_a\\) is aggregated at each moment using the attention mechanism, and \\(\\oplus\\) means concat operation." + }, + { + "type": "equation", + "bbox": [ + 0.67, + 0.318, + 0.891, + 0.334 + ], + "angle": 0, + "content": "\\[\nY _ {t} = y _ {t} \\oplus y _ {t} ^ {\\prime} \\tag {17}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.613, + 0.337, + 0.891, + 0.352 + ], + "angle": 0, + "content": "\\[\nA = \\operatorname {S o f t M a x} \\left(\\operatorname {M L P} \\left(Y _ {t}\\right)\\right) \\tag {18}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.669, + 0.356, + 0.891, + 0.371 + ], + "angle": 0, + "content": "\\[\nY _ {a} = A \\cdot Y _ {t} \\tag {19}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.387, + 0.645, + 0.401 + ], + "angle": 0, + "content": "3.4. Loss Function" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.411, + 0.893, + 0.502 + ], + "angle": 0, + "content": "A fully connected layer with a hidden layer is employed to address the final 6-DOFs pose regression task. The displacement vector of the regression is denoted as \\(\\hat{p}\\) representing the magnitude and direction of movement, while the rotational Euler angles are denoted as \\(\\hat{q}\\) indicating the rotational orientation in three-dimensional space." + }, + { + "type": "equation", + "bbox": [ + 0.527, + 0.516, + 0.892, + 0.535 + ], + "angle": 0, + "content": "\\[\n\\operatorname {L o s s} = \\alpha | | \\hat {p} - p | | _ {2} + \\beta | | \\hat {q} - q | | _ {2} + \\lambda \\sum_ {i = 0} ^ {n} w _ {i} ^ {2} \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.547, + 0.894, + 0.654 + ], + "angle": 0, + "content": "\\(p\\) and \\(q\\) represent the ground truth obtained from the dataset, while \\(\\alpha\\), \\(\\beta\\), and \\(\\lambda\\) serve as weight proportion coefficients. In order to tackle the prominent concern of overfitting, especially in the end-to-end setting, we incorporate the L2 regularization into the loss function. This regularization, implemented as the second paradigm for the network weights \\(w\\), effectively mitigates overfitting." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.666, + 0.696, + 0.681 + ], + "angle": 0, + "content": "3.5. Overall Architecture" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.691, + 0.892, + 0.737 + ], + "angle": 0, + "content": "Next, we will present the PEPNet pipeline in pseudo-code, utilizing the previously defined variables and formulas as described in Algorithm 1." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.754, + 0.625, + 0.77 + ], + "angle": 0, + "content": "4. Experiment" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.894, + 0.902 + ], + "angle": 0, + "content": "In this section, we present an extensive and in-depth analysis of PEPNet's performance on both indoor and outdoor datasets, encompassing evaluations based on rotational and translational mean squared error (MSE), model parameters, floating-point operations (FLOPs), and inference time. PEPNet's training and testing are performed on a server furnished with an AMD Ryzen 7950X CPU, an RTX GeForce 4090 GPU, and 32GB of memory." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "18116" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.076, + 0.093, + 0.905, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.336, + 0.268, + 0.633, + 0.283 + ], + "angle": 0, + "content": "Figure 4. Event-based CPR Dataset visualization." + }, + { + "type": "code_caption", + "bbox": [ + 0.078, + 0.309, + 0.28, + 0.325 + ], + "angle": 0, + "content": "Algorithm 1 PEPNet pipeline" + }, + { + "type": "algorithm", + "bbox": [ + 0.078, + 0.327, + 0.487, + 0.724 + ], + "angle": 0, + "content": "Input: Raw event stream \\(\\mathcal{E}\\) \nParameters: \\(N_{p} = 1024,R = 1e + 3,S_{\\mathrm{num}} = 3,K = 24\\) \nOutput: 6-DOFs pose \\((\\hat{p},\\hat{q})\\) \n1: Preprocessing \n2: for \\(j\\) in len(ε) do \n3: \\(P_{i}\\) .append \\((e_{j\\rightarrow l})\\) .. \\(j = l\\) ; where \\(t_l - t_j = R\\) \n4: if \\((len(P_i) > N_p)\\) .. \\(i = i + 1\\) \n5: end for \n6: \\(PN =\\) Normalize(Sampling \\((P))\\) \n7: Hierarchy structure \n8: for stage in range \\((S_{\\mathrm{num}})\\) do \n9: Grouping and Sampling \\((PN)\\) \n10: Get \\(PGS\\in [B,N_{\\mathrm{stage}},K,2*D_{\\mathrm{stage}-1}]\\) \n11: Local Extractor \\((PGS)\\) \n12: Get \\(F_{\\mathrm{local}}\\in [B,N_{\\mathrm{stage}},K,D_{\\mathrm{stage}}]\\) \n13: Attentive Aggregate \\((F_{\\mathrm{local}})\\) \n14: Get \\(F_{\\mathrm{aggre}}\\in [B,N_{\\mathrm{stage}},D_{\\mathrm{stage}}]\\) \n15: Global Extractor \\((F_{\\mathrm{aggre}})\\) \n16: Get \\(PN = F_{\\mathrm{global}}\\in [B,N_{\\mathrm{stage}},D_{\\mathrm{stage}}]\\) \n17: end for \n18: A-Bi-LSTM \n19: Forward Get \\(y_{t}\\in [B,N_{3},DS_{\\mathrm{num}} / 2]\\) \n20: Reverse Get \\(y_t^\\prime \\in [B,N_3,D_{S_{\\mathrm{num}}} / 2]\\) \n21: Attention Get \\(Y_{a}\\in [B,D_{S_{\\mathrm{num}}}]\\) \n22: Regressor \n23: Get 6-DOFs pose \\((\\hat{p},\\hat{q})\\)" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.755, + 0.173, + 0.77 + ], + "angle": 0, + "content": "4.1. Dataset" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.473, + 0.903 + ], + "angle": 0, + "content": "We employ the widely evaluated event-based CPR dataset IJRR [23] and M3ED [4], encompassing both indoor and outdoor scenes. Two distinct methods to partition the CPR dataset [26] have been benchmarked: random split and novel split. In the random split approach, the dataset is randomly selected \\(70\\%\\) of all sequences for training and allocated the remaining sequences for testing. On the other hand, in the novel split, we divide the data chronologically," + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.309, + 0.892, + 0.34 + ], + "angle": 0, + "content": "using the initial \\(70\\%\\) of sequences for training and the subsequent \\(30\\%\\) for testing." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.35, + 0.602, + 0.365 + ], + "angle": 0, + "content": "4.2. Baseline" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.374, + 0.893, + 0.48 + ], + "angle": 0, + "content": "We perform a thorough evaluation of our proposed method by comparing it with SOTA event-based approaches, namely CNN-LSTM [37] and AECRN [18]. Moreover, we present results derived from other well-established computer vision methods, including PoseNet[14], Bayesian PoseNet [13], Pairwise-CNN [15], LSTM-Pose [39], and SP-LSTM[26]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.49, + 0.704, + 0.505 + ], + "angle": 0, + "content": "4.3. IJRR Dataset Results" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.514, + 0.706, + 0.53 + ], + "angle": 0, + "content": "4.3.1 Random Split Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.538, + 0.892, + 0.795 + ], + "angle": 0, + "content": "Based on the findings presented in Tab. 1, it is apparent that PEPNet surpasses other models concerning both rotation and translation errors across all sequences. Notably, PEPNet achieves these impressive results despite utilizing significantly fewer model parameters and FLOPs compared to the frame-based approach. Moreover, PEPNet not only exhibits a remarkable \\(38\\%\\) improvement in the average error compared to the SOTA CNN-LSTM method but also attains superior results across nearly all sequences. In addressing the more intricate and challenging hdr_poster sequences, while the frame-based approach relies on a denoising network to yield improved results [12], PEPNet excels by achieving remarkable performance without any additional processing. This observation strongly implies that PEPNet's Point Cloud approach exhibits greater robustness compared to the frame-based method, highlighting its inherent superiority in handling complex scenarios." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Furthermore, we introduce an alternative variant, PEPNet\\(_{tinyy}\\), which integrates a lighter model architecture while preserving relatively strong performance. As depicted in Fig. 3, PEPNet consists of three stages, and the model's size is contingent upon the dimensionality of MLPs at each stage. The dimensions for the standard structure are [64, 128, 256], whereas those for the tiny structure are [16," + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18117" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.088, + 0.912, + 0.228 + ], + "angle": 0, + "content": "
NetworkPoseNetBayesian PoseNetPairwise-CNNLSTM-PoseSP-LSTMCNN-LSTMPEPNetPEPNettiny
Parameter12.43M22.35M22.34M16.05M135.25M12.63M0.774M0.064M
FLOPs1.584G3.679G7.359G1.822G15.623G1.998G0.459G0.033G
shapesRotation0.109m,7.388°0.142m,9.557°0.095m,6.332°0.032m,4.439°0.025m,2.256°0.012m,1.652°0.005m,1.372°0.006m,1.592°
boxTranslation0.193m,6.977°0.190m,6.636°0.178m,6.153°0.083m,6.215°0.036m,2.195°0.013m,0.873°0.017m,0.845°0.031m,1.516°
shapesTranslation0.238m,6.001°0.264m,6.235°0.201m,5.146°0.056m,5.018°0.035m,2.117°0.020m,1.471°0.011m,0.582°0.013m,0.769°
dynamic_6dof0.297m,9.332°0.296m,8.963°0.245m,5.962°0.097m,6.732°0.031m,2.047°0.016m,1.662°0.015m,1.045°0.018m,1.144°
hdr_poster0.282m,8.513°0.290m,8.710°0.232m,7.234°0.108m,6.186°0.051m,3.354°0.033m,2.421°0.016m,0.991°0.028m,1.863°
posterTranslation0.266m,6.516°0.264m,5.459°0.211m,6.439°0.079m,5.734°0.036m,2.074°0.020m,1.468°0.012m,0.588°0.019m,0.953°
Average0.231m,7.455°0.241m,7.593°0.194m,6.211°0.076m,5.721°0.036m,2.341°0.019m,1.591°0.013m,0.904°0.019m,1.306°
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.238, + 0.895, + 0.281 + ], + "angle": 0, + "content": "Table 1. IJRR random split results. The table presents the median error for each sequence, as well as the average error across the six sequences. It also presents the number of parameters and FLOPs for each model. Bold indicates the most advanced result, while underline signifies the second-best result." + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.304, + 0.432, + 0.416 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.426, + 0.471, + 0.468 + ], + "angle": 0, + "content": "Figure 5. Error distribution of event-based CPR results achieved by PEPNet using a random split. (a) Translation errors. (b) Rotation errors." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.495, + 0.47, + 0.571 + ], + "angle": 0, + "content": "32, 64]. As indicated in Tab. 1, even with a mere \\(0.5\\%\\) of the CNN-LSTM's parameter, \\(\\mathrm{PEPNet}_{tiny}\\) achieves comparable and even slightly superior results. This remarkable outcome emphasizes the superiority of leveraging event cloud data processing directly." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.591, + 0.262, + 0.605 + ], + "angle": 0, + "content": "4.3.2 Error Distribution" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.615, + 0.47, + 0.766 + ], + "angle": 0, + "content": "Fig. 5 illustrates the error distribution of PEPNet across six distinct sequences using the random split method, specifically: shape rotation, box translation, shape translation, dynamic 6-dof, hdr poster, and poster translation. To enhance clarity, the top and bottom boundaries of the box represent the first and third quartiles, respectively, indicating the inter-quartile range (IQR). The median is denoted by the band within the box. It is observed that the IQR of the translation error approximately locates between \\(0.004\\mathrm{m}\\) and \\(0.024\\mathrm{m}\\), while the orientation error ranges from \\(0.4^{\\circ}\\) to \\(1.9^{\\circ}\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.787, + 0.264, + 0.802 + ], + "angle": 0, + "content": "4.3.3 Novel Split Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.901 + ], + "angle": 0, + "content": "To assess the model's robustness, we adopt the novel split as an evaluation criterion, as shown in Tab. 2. During the training process, we observe a more pronounced overfitting phenomenon in PEPNet compared to the random split. We attribute this observation to the disparities in data distributions between the trainset and the testset, as well as the lim" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.307, + 0.892, + 0.383 + ], + "angle": 0, + "content": "ited data size. Contrary to the methods we compared, PEP-Net does not necessitate pre-trained weights. For instance, SP-LSTM relies on pre-trained VGG19 weights from ImageNet, while AECRN requires synthetic heuristic depth and an extensive pretraining process." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.383, + 0.893, + 0.639 + ], + "angle": 0, + "content": "To address overfitting, PEPNet employs conventional methods that yield consistent and comparable results with the SOTA on three shape sequences that are displayed in the network column of Tab. 2. It is essential to note that AE-CRN adopts a hybrid approach, combining neural network regression for scene coordinates with derivable RANSAC for pose estimation. Moreover, this method incurs significant time consumption, with even the SOTA DSAC* algorithm taking nearly 30ms, excluding additional time for data format conversion. This time constraint presents compatibility challenges with the low-latency nature of event cameras. In contrast, PEPNet can execute on a server in just 6.7ms, with the main time-consuming module being grouping and sampling. Furthermore, with potential field programmable gate array (FPGA) or application-specific integrated chip (ASIC) support for these operations[6, 20], PEPNet's performance can be further accelerated." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.649, + 0.712, + 0.663 + ], + "angle": 0, + "content": "4.4. M3ED Dataset Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.672, + 0.892, + 0.808 + ], + "angle": 0, + "content": "We selected three robots (Car, Falcon, and Spot) to extend the application scope of PEPNet across five sequences in an outdoor night setting, as illustrated in the Tab. 3. Due to its much higher resolution than IJRR, we performed downsampling processing and more number of points (1024 to 2048), and other experimental configurations are consistent with the IJRR dataset with random split. The results demonstrate the superior performance of PEPNet even in more challenging outdoor environments." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.817, + 0.715, + 0.832 + ], + "angle": 0, + "content": "4.5. Attention Visualization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.841, + 0.892, + 0.9 + ], + "angle": 0, + "content": "As shown in Fig. 6, We observe that the attention scores exhibit larger at both the beginning and end. We tentatively infer that the model focuses more on the difference in features between the start and the end for CPR, which is also" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "18118" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.101, + 0.089, + 0.874, + 0.171 + ], + "angle": 0, + "content": "
NetworkPoseNetBayesian PoseNetPairwise-CNNLSTM-PoseSP-LSTMDSAC*AECRNPEPNet
shapes Rotation0.201m,12.499°0.164m,12.188°0.187m,10.426°0.061m,7.625°0.045m,5.017°0.029m,2.3°0.025m,2.0°0.016m,1.745°
shapes Translation0.198m,6.696°0.213m,7.441°0.225m,11.627°0.108m,8.468°0.072m,4.496°0.038m,2.2°0.029m,1.7°0.026m,1.659°
shapes_6dof0.320m,13.733°0.326m,13.296°0.314m,13.245°0.096m,8.973°0.078m,5.524°0.054m,3.1°0.052m,3.0°0.045m,2.984°
Average0.240m,11.067°0.234m,10.975°0.242m,11.766°0.088m,8.355°0.065m,5.012°0.040m,2.53°0.035m,2.23°0.029m,2.13°
Inference time5ms6ms12ms9.49ms4.79ms30ms30ms6.7ms
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.18, + 0.893, + 0.208 + ], + "angle": 0, + "content": "Table 2. IJRR novel split results. Referred to as Tab. 1, showcases identical information. To assess the model's runtime, we conduct tests on a server platform, specifically focusing on the average time required for inference on a single sample." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.232, + 0.486, + 0.324 + ], + "angle": 0, + "content": "
M3EDPoseNetLSTM-PoseCNN-LSTMPEPNet
INPUTEvent FrameEvent FrameEvent framePoint Cloud
Falcon_Night_High_Beans0.181m,2.221°0.112m,0.946°0.107m,1.435°0.082m,0.575°
Car_Night_Pen_S_Loop1.618m,8.126°0.667m,4.914°0.773m,3.005°0.577m,1.319°
Spot_Night_Pen_Loop1.735m,5.502°0.761m,7.898°0.401m,1.771°0.468m,1.062°
Car_Pen_S_Loop_darker1.841m,4.575°0.751m,3.738°0.598m,2.772°0.385m,1.01°
Spot_Plaza_Light1.372m,9.564°0.565m,5.221°0.273m,2.001°0.348m,1.234°
Avergae1.349m,5.998°0.571m,4.543°0.43m,2.197°0.372m,1.04°
" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.334, + 0.465, + 0.348 + ], + "angle": 0, + "content": "Table 3. Outdoor extension on M3ED dataset with random split." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.361, + 0.47, + 0.448 + ], + "angle": 0, + "content": "
ConditionHSLSTMBi-LSTMAggregationTranslationRotationT+R
1Max0.015m0.884°3.04
2Temporal0.014m0.786°2.77
3Max0.014m0.833°2.85
4Temporal0.012m0.603°2.25
5Max0.014m0.813°2.82
6Temporal0.011m0.582°2.12
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.458, + 0.47, + 0.487 + ], + "angle": 0, + "content": "Table 4. Abalation Study for three key modules. \\( \\mathrm{T} + \\mathrm{R} = \\) Translation + Rotation \\( \\cdot \\pi /{180}\\left( {\\mathrm{\\;m} + \\mathrm{{rad}}}\\right) \\)" + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.5, + 0.422, + 0.64 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.649, + 0.47, + 0.692 + ], + "angle": 0, + "content": "Figure 6. Visualization of the attention values in the time domain. 128 points in chronological order on the horizontal axis and the attention values of the corresponding point on the vertical axis." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.718, + 0.334, + 0.733 + ], + "angle": 0, + "content": "seen in the geometry approach [7, 24]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.742, + 0.231, + 0.759 + ], + "angle": 0, + "content": "4.6. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Key Module Ablation: In order to validate the efficacy of key modules, we conducted an ablation experiment focusing on three primary components: hierarchy structure, Bi-LSTM, and temporal aggregation. These experiments are designed to evaluate rotation and translation errors on the shape translation sequence with the random split. The combined error \\((\\mathrm{T} + \\mathrm{R})\\) is measured after processing. Our experimental setup comprises four distinct conditions, as illustrated in Tab. 4. Condition 1 represents the sole utility of the system." + }, + { + "type": "table", + "bbox": [ + 0.513, + 0.232, + 0.88, + 0.281 + ], + "angle": 0, + "content": "
Scenceα = 0.5, β = 0.5α = 0.25, β = 0.75α = 0.75, β = 0.25
shape Translation0.0302m,1.684°,5.960.0359m,1.72°,6.590.0303m,2.056°,6.62
shapeRotation0.0143m,2.888°,6.470.0159m,2.68°,6.270.014m,3.36°,7.26
dynamic_6 dof0.0542m,2.799°,10.30.0611m,2.488°,10.50.0516m,3.251°,10.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.53, + 0.29, + 0.861, + 0.304 + ], + "angle": 0, + "content": "Table 5. Abalation Study for loss function's coefficient." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.333, + 0.892, + 0.499 + ], + "angle": 0, + "content": "ization of the hierarchy structure (HS), while Condition 2 combines the ordinary LSTM. Condition 3 incorporates the bidirectional LSTM, and Condition 4 integrates the attention mechanism for feature aggregation. The ablation experiments reveal significant insights. Experiments 1 and 3 demonstrate that augmenting LSTM enhances the extraction of explicit temporal features. Moreover, experiments 3 and 5 reveal the effectiveness of the bidirectional LSTM in extracting motion information. Additionally, experiments 5 and 6 confirm the notable impact of attention in feature aggregation, resulting in a substantial reduction in error rates." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.501, + 0.893, + 0.635 + ], + "angle": 0, + "content": "Loss ablation: We incorporated the experiment involving scaling coefficients of the loss function in Tab. 5. This experiment utilized a tiny version of PEPNet, trained for 100 epochs, and the outcome is MSE in translation, rotation, and \\(\\mathrm{T} + \\mathrm{R}\\). Across three distinct motion scenarios (translation, rotation, and 6dof) varied coefficient ratios induced deviations in the obtained results. For example, in shape rotation, increasing the weight on rotation makes the results better." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.654, + 0.62, + 0.67 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.681, + 0.892, + 0.816 + ], + "angle": 0, + "content": "In this paper, we introduce an end-to-end CPR network that operates directly on raw event clouds without frame-based preprocessing. PEPNet boasts an impressively lightweight framework that adeptly extracts spatial and temporal features, leading to SOTA performance. Diverging from frame-based approaches, our method prioritizes preserving the inherent distribution of the event cloud, capitalizing on its sparse nature to achieve extraordinary capabilities for ultra-low-power applications." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.817, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Acknowledgment. This work was supported in part by the Young Scientists Fund of the National Natural Science Foundation of China (Grant 62305278), as well as the Hong Kong University of Science and Technology (Guangzhou) Joint Funding Program under Grant 2023A03J0154 and 2024A03J0618." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "18119" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.122, + 0.174, + 0.137 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.148, + 0.47, + 0.204 + ], + "angle": 0, + "content": "[1] Vassileios Balntas, Shuda Li, and Victor Prisacariu. Relocnet: Continuous metric learning relocalisation using neural nets. In Proceedings of the European Conference on Computer Vision (ECCV), pages 751-767, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.206, + 0.472, + 0.26 + ], + "angle": 0, + "content": "[2] Eric Brachmann and Carsten Rother. Visual camera relocalization from rgb and rgb-d images using dsac. IEEE transactions on pattern analysis and machine intelligence, 44(9):5847-5865, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.263, + 0.47, + 0.332 + ], + "angle": 0, + "content": "[3] Eric Brachmann, Alexander Krull, Sebastian Nowozin, Jamie Shotton, Frank Michel, Stefan Gumhold, and Carsten Rother. Dsac-differentiable ransac for camera localization. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6684-6692, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.334, + 0.47, + 0.417 + ], + "angle": 0, + "content": "[4] Kenneth Chaney, Fernando Cladora, Ziyun Wang, Anthony Bisulco, M Ani Hsieh, Christopher Korpela, Vijay Kumar, Camillo J Taylor, and Kostas Daniilidis. M3ed: Multi-robot, multi-sensor, multi-environment event dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4015–4022, 2023. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.419, + 0.47, + 0.502 + ], + "angle": 0, + "content": "[5] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.504, + 0.47, + 0.587 + ], + "angle": 0, + "content": "[6] Haotian Fu, Yulong Huang, Tingran Chen, Chenyi Fu, Hongwei Ren, Yue Zhou, Shouzhong Peng, Zhirui Zong, Biao Pan, and Bojun Cheng. Ds-cim: A 40nm asynchronous dual-spike driven, mram compute-in-memory macro for spiking neural network. IEEE Transactions on Circuits and Systems I: Regular Papers, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.589, + 0.47, + 0.643 + ], + "angle": 0, + "content": "[7] Guillermo Gallego, Christian Forster, Elias Mueggler, and Davide Scaramuzza. Event-based camera pose tracking using a generative event model. arXiv preprint arXiv:1510.01972, 2015. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.646, + 0.47, + 0.714 + ], + "angle": 0, + "content": "[8] Guillermo Gallego, Jon EA Lund, Elias Mueggler, Henri Rebecq, Tobi Delbruck, and Davide Scaramuzza. Event-based, 6-dof camera tracking from photometric depth maps. IEEE transactions on pattern analysis and machine intelligence, 40(10):2402-2412, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.717, + 0.47, + 0.786 + ], + "angle": 0, + "content": "[9] Guillermo Gallego, Tobi Delbrück, Garrick Orchard, Chiara Bartolozzi, Brian Taba, Andrea Censi, Stefan Leutenegger, Andrew J Davison, Jörg Conradt, Kostas Daniilidis, et al. Event-based vision: A survey. IEEE transactions on pattern analysis and machine intelligence, 44(1):154-180, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.47, + 0.843 + ], + "angle": 0, + "content": "[10] Yulan Guo, Hanyun Wang, Qingyong Hu, Hao Liu, Li Liu, and Mohammed Bennamoun. Deep learning for 3d point clouds: A survey. IEEE transactions on pattern analysis and machine intelligence, 43(12):4338-4364, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.148, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[12] Yifan Jin, Lei Yu, Guangqiang Li, and Shumin Fei. A 6-dofs event-based camera relocalization system by cnn-lstm and image denoising. Expert Systems with Applications, 170: 114535, 2021. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[13] Alex Kendall and Roberto Cipolla. Modelling uncertainty in deep learning for camera relocalization. In 2016 IEEE international conference on Robotics and Automation (ICRA), pages 4762-4769. IEEE, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.205, + 0.892, + 0.272 + ], + "angle": 0, + "content": "[14] Alex Kendall, Matthew Grimes, and Roberto Cipolla. Posenet: A convolutional network for real-time 6-dof camera relocalization. In Proceedings of the IEEE international conference on computer vision, pages 2938-2946, 2015. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.274, + 0.892, + 0.343 + ], + "angle": 0, + "content": "[15] Zakaria Laskar, Iaroslav Melekhov, Surya Kalia, and Juho Kannala. Camera relocalization by computing pairwise relative poses using convolutional neural network. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 929-938, 2017. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.345, + 0.892, + 0.371 + ], + "angle": 0, + "content": "[16] Yann LeCun, Yoshua Bengio, and Geoffrey Hinton. Deep learning. nature, 521(7553):436-444, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.373, + 0.892, + 0.426 + ], + "angle": 0, + "content": "[17] Patrick Lichtsteiner, Christoph Posch, and Tobi Delbruck. A \\(128 \\times 128\\) 120 db \\(15\\mu s\\) latency asynchronous temporal contrast vision sensor. IEEE journal of solid-state circuits, 43 (2):566-576, 2008. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.428, + 0.892, + 0.497 + ], + "angle": 0, + "content": "[18] Hu Lin, Meng Li, Qianchen Xia, Yifeng Fei, Baocai Yin, and Xin Yang. 6-dof pose relocalization for event cameras with entropy frame and attention networks. In The 18th ACM SIGGRAPH International Conference on Virtual-Reality Continuum and its Applications in Industry, pages 1–8, 2022. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.498, + 0.892, + 0.607 + ], + "angle": 0, + "content": "[19] Yimin Lin, Zhaoxiang Liu, Jianfeng Huang, Chaopeng Wang, Guoguang Du, Jinqiang Bai, and Shiguo Lian. Deep global-relative networks for end-to-end 6-dof visual localization and odometry. In PRICAI 2019: Trends in Artificial Intelligence: 16th Pacific Rim International Conference on Artificial Intelligence, Cuvu, Yanuca Island, Fiji, August 26–30, 2019, Proceedings, Part II, pages 454–467. Springer, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.609, + 0.892, + 0.676 + ], + "angle": 0, + "content": "[20] Haobo Liu, Zhengyang Qian, Wei Wu, Hongwei Ren, Zhiwei Liu, and Leibin Ni. Afpr-cim: An analog-domain floating-point rram-based compute-in-memory architecture with dynamic range adaptive fp-adc. arXiv preprint arXiv:2402.13798, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.679, + 0.892, + 0.734 + ], + "angle": 0, + "content": "[21] Xu Ma, Can Qin, Haoxuan You, Haoxi Ran, and Yun Fu. Rethinking network design and local geometry in point cloud: A simple residual mlp framework. In International Conference on Learning Representations, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.735, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[22] Anton Mitrokhin, Zhiyuan Hua, Cornelia Fermuller, and Yiannis Aloimonos. Learning visual motion segmentation using event surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14414-14423, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.804, + 0.892, + 0.872 + ], + "angle": 0, + "content": "[23] Elias Mueggler, Henri Rebecq, Guillermo Gallego, Tobi Delbruck, and Davide Scaramuzza. The event-camera dataset and simulator: Event-based data for pose estimation, visual odometry, and slam. The International Journal of Robotics Research, 36(2):142-149, 2017. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.874, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[24] Elias Mueggler, Guillermo Gallego, Henri Rebecq, and Davide Scaramuzza. Continuous-time visual-inertial odometry" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "18120" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.093, + 0.468, + 0.12 + ], + "angle": 0, + "content": "for event cameras. IEEE Transactions on Robotics, 34(6): 1425-1440, 2018. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.19 + ], + "angle": 0, + "content": "[25] Tayyab Naseer and Wolfram Burgard. Deep regression for monocular camera-based 6-dof global localization in outdoor environments. In 2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 1525-1530. IEEE, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.471, + 0.262 + ], + "angle": 0, + "content": "[26] Anh Nguyen, Thanh-Toan Do, Darwin G Caldwell, and Nikos G Tsagarakis. Real-time 6dof pose relocalization for event cameras with stacked spatial LSTM networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.264, + 0.47, + 0.332 + ], + "angle": 0, + "content": "[27] Christoph Posch, Daniel Matolin, and Rainer Wohlgenannt. A qvga 143 db dynamic range frame-free pwm image sensor with lossless pixel-level video compression and time-domain cds. IEEE Journal of Solid-State Circuits, 46(1):259-275, 2010. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.334, + 0.47, + 0.403 + ], + "angle": 0, + "content": "[28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.405, + 0.469, + 0.461 + ], + "angle": 0, + "content": "[29] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.462, + 0.469, + 0.516 + ], + "angle": 0, + "content": "[30] Noha Radwan, Abhinav Valada, and Wolfram Burgard. Vlocnet++: Deep multitask learning for semantic visual localization and odometry. IEEE Robotics and Automation Letters, 3(4):4407-4414, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.518, + 0.469, + 0.56 + ], + "angle": 0, + "content": "[31] Henri Rebecq, Daniel Gehrig, and Davide Scaramuzza. Esim: an open event camera simulator. In Conference on robot learning, pages 969-982. PMLR, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.562, + 0.469, + 0.617 + ], + "angle": 0, + "content": "[32] Hongwei Ren, Yue Zhou, Haotian Fu, Yulong Huang, Renjing Xu, and Bojun Cheng. Ttpoint: A tensorized point cloud network for lightweight action recognition with event cameras. arXiv preprint arXiv:2308.09993, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.619, + 0.469, + 0.686 + ], + "angle": 0, + "content": "[33] Hongwei Ren, Yue Zhou, Yulong Huang, Haotian Fu, Xiaopeng Lin, Jie Song, and Bojun Cheng. Spikepoint: An efficient point-based spiking neural network for event cameras action recognition. arXiv preprint arXiv:2310.07189, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.689, + 0.469, + 0.744 + ], + "angle": 0, + "content": "[34] Yusuke Sekikawa, Kosuke Hara, and Hideo Saito. Eventnet: Asynchronous recursive event processing. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3887-3896, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.469, + 0.787 + ], + "angle": 0, + "content": "[35] Yoli Shavit and Ron Ferens. Introduction to camera pose estimation with deep learning. arXiv preprint arXiv:1907.05272, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.469, + 0.83 + ], + "angle": 0, + "content": "[36] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[37] Ahmed Tabia, Fabien Bonardi, and Samia Bouchafa. Deep learning for pose estimation from event camera. In 2022 International Conference on Digital Image Computing: Techniques and Applications (DICTA), pages 1-7. IEEE, 2022. 6" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.471, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[38] Abhinav Valada, Noha Radwan, and Wolfram Burgard. Deep auxiliary learning for visual localization and odometry. In 2018 IEEE international conference on robotics and automation (ICRA), pages 6939-6946. IEEE, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.219 + ], + "angle": 0, + "content": "[39] Florian Walch, Caner Hazirbas, Laura Leal-Taixe, Torsten Sattler, Sebastian Hilsenbeck, and Daniel Cremers. Image-based localization using lstms for structured feature correlation. In Proceedings of the IEEE International Conference on Computer Vision, pages 627-637, 2017. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.288 + ], + "angle": 0, + "content": "[40] Qinyi Wang, Yexin Zhang, Junsong Yuan, and Yilong Lu. Space-time event clouds for gesture recognition: From rgb cameras to event cameras. In 2019 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 1826-1835. IEEE, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.291, + 0.892, + 0.345 + ], + "angle": 0, + "content": "[41] Jian Wu, Liwei Ma, and Xiaolin Hu. Delving deeper into convolutional neural networks for camera relocalization. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 5644-5651. IEEE, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.347, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[42] Wenxuan Wu, Zhongang Qi, and Li Fuxin. Pointconv: Deep convolutional networks on 3d point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9621-9630, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.405, + 0.892, + 0.473 + ], + "angle": 0, + "content": "[43] Jiancheng Yang, Qiang Zhang, Bingbing Ni, Linguuo Li, Jinxian Liu, Mengdie Zhou, and Qi Tian. Modeling point clouds with self-attention and gumbel subset sampling. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3323–3332, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.475, + 0.892, + 0.53 + ], + "angle": 0, + "content": "[44] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16259-16268, 2021. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.53 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.518, + 0.957 + ], + "angle": 0, + "content": "18121" + } + ] +] \ No newline at end of file diff --git a/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/1bfdaa4b-618c-45a6-9de6-a1e062fefbcd_origin.pdf b/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/1bfdaa4b-618c-45a6-9de6-a1e062fefbcd_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1968bb62127e4b20647e432eabf01dca79684656 --- /dev/null +++ b/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/1bfdaa4b-618c-45a6-9de6-a1e062fefbcd_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dec0106f887e21b408d677060cca008abb6d7d25b9b1e599ce6a49261258b0c8 +size 6802305 diff --git a/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/full.md b/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/full.md new file mode 100644 index 0000000000000000000000000000000000000000..79fec488863a5d64481816722ffe482a8bf52212 --- /dev/null +++ b/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/full.md @@ -0,0 +1,359 @@ +# A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization + +Hongwei Ren*, Jiadong Zhu*, Yue Zhou, Haotian Fu, Yulong Huang, Bojun Cheng † +The Hong Kong University of Science and Technology(Guangzhou) + +{hren066, jzhu484, yzhou833, hfu373, yhuang496}@connect.hkust-gz.edu.cn, bocheng@hkust-gz.edu.cn + +# Abstract + +Event cameras exhibit remarkable attributes such as high dynamic range, asynchronicity, and low latency, making them highly suitable for vision tasks that involve high-speed motion in challenging lighting conditions. These cameras implicitly capture movement and depth information in events, making them appealing sensors for Camera Pose Relocalization (CPR) tasks. Nevertheless, existing CPR networks based on events neglect the pivotal fine-grained temporal information in events, resulting in unsatisfactory performance. Moreover, the energy-efficient features are further compromised by the use of excessively complex models, hindering efficient deployment on edge devices. In this paper, we introduce PEPNet, a simple and effective point-based network designed to regress six degrees of freedom (6-DOFs) event camera poses. We rethink the relationship between the event camera and CPR tasks, leveraging the raw Point Cloud directly as network input to harness the high-temporal resolution and inherent sparsity of events. PEPNet is adept at abstracting the spatial and implicit temporal features through hierarchical structure and explicit temporal features by Attentive Bidirectional Long Short-Term Memory (A-Bi-LSTM). By employing a carefully crafted lightweight design, PEPNet delivers state-of-the-art (SOTA) performance on both indoor and outdoor datasets with meager computational resources. Specifically, PEPNet attains a significant $38\%$ and $33\%$ performance improvement on the random split IJRR and M3ED datasets, respectively. Moreover, the lightweight design version PEPNet™ accomplishes results comparable to the SOTA while employing a mere $0.5\%$ of the parameters. + +# 1. Introduction + +Event camera is a type of bio-inspired vision sensor that responds to local changes in illumination exceeding a pre + +![](images/242f5762ec36755ca7783afef0d2fe43bb2ea949b09337431b84fb987e107094.jpg) +Figure 1. The average results using the random split method benchmarked on the CPR dataset [23]. The vertical axis represents the combined rotational and translational errors $(\mathrm{m} + \mathrm{rad})$ . PEPNet is the first point-based CPR network for event cameras. + +defined threshold [17]. Differing from conventional frame-based cameras, event cameras independently and asynchronously produce pixel-level events. Notably, event cameras boast an exceptional triad: high dynamic range, low latency, and ultra-high temporal resolution. This unique combination empowers superior performance under challenging light conditions, adeptly capturing the swift scene and rapid motion changes in near-microsecond precision [27]. Additionally, event cameras boast remarkably low power consumption positioning them as a popular choice for many power-constrained devices. Camera Pose Relocalization (CPR) is an emerging application in power-constrained devices and has gained significant attention. It aims to train several scene-specific neural networks to accurately relocalize the camera pose within the original scene used for training. It is extensively employed in numerous applications, including Virtual Reality (VR), Augmented Reality (AR), and robotics [35], all of which are deployed on battery-powered devices and are power-constrained. + +CPR tasks using event cameras significantly diverge from their conventional CPR counterpart that employs frame-based cameras, primarily due to the inherent dissimilarity in data output mechanisms between these two camera types. Furthermore, events inherently encompass information regarding object motion and depth changes + +across precise temporal and spatial dimensions attributes of paramount significance within the domain of CPR tasks [8, 31]. Regrettably, existing event-based CPR networks often derive from the conventional camera network paradigms and inadequately address the unique attributes of event data. More specifically, events are transformed into various representations such as event images [26], time surfaces [18], and other representations[18], leading to the loss of their fine-grained temporal information. Furthermore, most event-based methods tend to overlook the computational load of the network, only prioritizing elevated accuracy, which contradicts the fundamental design principles of event cameras [9]. + +A suitable and faithful data representation is crucial for event cloud processing. Point Cloud is a collection of 3D points $(x, y, z)$ that represents the shape and surface of an object or environment commonly used in lidar and depth cameras [10]. The distance $(z)$ is of great meaning to the tasks. As for event camera, by treating each event's temporal information as the third dimension, event inputs $(x, y, t)$ can be regarded as points and aggregated into a pseudo-Point Cloud [28, 29, 32-34, 40]. However, given that the $t$ dimension of Event Cloud is not strictly equivalent to the spatial dimensions $(x, y, z)$ , direct transplantation of the Point Cloud network has not yet exhibited a satisfactory performance advantage in processing event data [32, 40]. + +In this study, we introduce PEPNet, the first point-based end-to-end CPR network designed to harness the attributes of event cameras. A comparison of our performance and method to other frame-based methods is illustrated Fig. 1 and Fig. 2, respectively. Moreover, diverging from other point-based approaches in event data processing [32, 40], PEPNet demonstrates careful attention to detail by systematically assessing the difference between Event Cloud and Point Cloud in its design approach. This approach enables a more precise extraction of spatio-temporal features and facilitates solutions for a spectrum of event-based tasks. Our main contributions are as follows: First, in the preprocessing stage, PEPNet directly processes the raw data obtained from the event cameras, meticulously preserving the fine-grained temporal coordinate and the order inherent in the event data. Second, PEPNet proficiently captures spatial features and implicit temporal features through its hierarchical structure with temporal aggregation. Subsequently, the explicit temporal feature is processed by the A-BiLSTM, thanks to the preservation of the input sequence in previous stages. As such, this architecture is tailored to accommodate the high temporal resolution and sparse characteristics inherent in event cameras. Thirdly, by restricting ourselves to minimal hardware resources and deliberately avoiding heavy computational modules, PEPNet not only attains SOTA results on IJRR [23] and M3ED [4] dataset but also features a lightweight design that can be executed + +![](images/e27644a896423cce95a5f5900f5d8f6106a29cc8b919f2003ad654c7e13a782b.jpg) +Figure 2. Two different event-based processing methods, frame-based and point-based. + +in real-time. We hope such an approach could potentially democratize computer vision technology by making it accessible to a wider range of devices and applications in the community of edge computing. + +# 2. Related Work + +# 2.1. Frame-based CPR Learning Methods + +Deep learning, crucial for vision tasks like classification and object detection [16], has seen advancements such as PoseNet's innovative transfer learning [14]. Utilizing VGG, ResNet [11, 36], LSTM, and customized loss functions [25, 39, 41], researchers enhanced this approach. Auxiliary Learning methods further improved performance [19, 30, 38], although overfitting remains a challenge. Hybrid pose-based methods, combining learning with traditional pipelines [1, 15], offer promise. DSAC series, for instance, achieve high pose estimation accuracy [2, 3], but come with increased computational costs and latency, especially for edge devices. + +# 2.2. Event-based CPR Learning Methods + +Event-based CPR methods often derive from the frame-based CPR network. SP-LSTM [26] employed the stacked spatial LSTM networks to process event images, facilitating a real-time pose estimator. To address the inherent noise in event images, [12] proposed a network structure combining denoise networks, convolutional neural networks, and LSTM, achieving good performance under complex working conditions. In contrast to the aforementioned methods, a novel representation named Reversed Window Entropy Image (RWEI) [18] is introduced, which is based on the widely used event surface [22] and serves as the input to an attention-based DSAC* pipeline [2] to achieve SOTA results. However, the computationally demanding architecture involving representation transformation and hybrid pipeline poses challenges for real-time execution. Additionally, all existing methods ignore the fine-grained temporal feature of the event cameras, and accumulate events into frames for processing, resulting in unsatisfactory performance. + +# 2.3. Point Cloud Network + +Point-based methodologies have transformed the direct processing of Point Cloud, with PointNet [28] as a standout example. Taking a step beyond, PointNet++ [29] introduced a Set Abstraction module. While it initially employed a straightforward MLP in the feature extractor, recent advancements have seen the development of more sophisticated feature extractors to enhance Point Cloud processing [5, 21, 42, 44]. When extending these techniques to Event Cloud, Wang et al. [40] addressed the temporal information processing challenge while maintaining representation in both the x and y axes, enabling gesture recognition using PointNet++. Further enhancements came with PAT [43], which incorporated self-attention and Gumbel subset sampling, leading to improved performance in recognition tasks. However, existing point-based models still fall short in performance compared to frame-based methods. This phenomenon can be attributed to the distinctively different characteristics of Point Cloud and Event Cloud. Event Cloud contradicts the permutation and transformation invariance present in Point Cloud due to its temporal nature. Additionally, the Point Cloud network is not equipped to extract explicit temporal features. + +# 3. PEPNet + +PEPNet pipeline consists of four essential modules: (1) a preprocessing module for the original Event Cloud, (2) a hierarchical Point Cloud feature extraction structure, (3) an Attentive Bi-directional LSTM, and (4) a 6-DOFs pose regressor, as illustrated in Fig. 3. In the following sections, we will provide detailed descriptions and formulations for each module. + +# 3.1. Event Cloud + +To preserve the fine-grained temporal information and original data distribution attributes from the Event Cloud, the 2D-spatial and 1D-temporal event information is constructed into a three-dimensional representation to be processed in Point Cloud. Event Cloud consists of time-series data capturing spatial intensity changes of images in chronological order, and an individual event is denoted as $e_k = (x_k, y_k, t_k, p_k)$ , where $k$ is the index representing the $k_{th}$ element in the sequence. Consequently, the set of events within a single sequence $(\mathcal{E})$ in the dataset can be expressed as: + +$$ +\mathcal {E} = \left\{e _ {k} = \left(x _ {k}, y _ {k}, t _ {k}, p _ {k}\right) \mid k = 1, \dots , n \right\} \tag {1} +$$ + +For a given pose in the dataset, the ground truth resolution is limited to $5ms$ , while the event resolution is $1\mu s$ . Therefore, it is necessary to acquire the events that transpire within the time period we call it sliding window corresponding to the poses, which will serve as the input for the model, + +as depicted by the following equation: + +$$ +P _ {i} = \left\{e _ {j \rightarrow l} \mid t _ {l} - t _ {j} = R \right\} \quad i = 1, \dots , M \tag {2} +$$ + +The symbol $R$ represents the time interval of the sliding window, where $j$ and $l$ denote the start and end event index of the sequence, respectively. The variable $M$ represents the number of sliding windows into which the sequence of events $\mathcal{E}$ is divided. Before being fed into the neural network, $P_{i}$ also needs to undergo sampling and normalization. Sampling is to unify the number of points $N$ as network inputs. We set $N = 1024$ in PEPNet. Additionally, as the spatial coordinates are normalized by the camera's resolution $w$ and $h$ . The normalization process is described by the following equation: + +$$ +P N _ {i} = \left(\frac {X _ {i}}{w}, \frac {Y _ {i}}{h}, \frac {T _ {i} - t _ {j}}{t _ {l} - t _ {j}}\right) \tag {3} +$$ + +$$ +X _ {i}, Y _ {i}, T _ {i} = \left\{x _ {j}, \dots , x _ {l} \right\}, \left\{y _ {j}, \dots , y _ {l} \right\}, \left\{t _ {j}, \dots , t _ {l} \right\} \tag {4} +$$ + +The $X, Y$ is divided by the resolution of the event camera. To normalize $T$ , we subtract the smallest timestamp $t_j$ of the window and divide it by the time difference $t_l - t_j$ , where $t_l$ represents the largest timestamp within the window. After pre-processing, Event Cloud is converted into the pseudo-Point Cloud, which comprises explicit spatial information $(x, y)$ and implicit temporal information $t$ . + +# 3.2. Hierarchy Structure + +The hierarchy structure is the backbone for processing the pseudo-3D Point Cloud and is composed of four primary modules: grouping and sampling, standardization, feature extractor, and aggregation, as described in the following subsection. To efficiently extract deeper explicit spatial and implicit temporal features, the hierarchical structure is tailored and differs from conventional hierarchical structure in a few ways: First, we no longer force permutation invariance as usually done in mainstream point-based methods [21, 28], as the motion information is inherently related to the sequential order of events. Instead, we keep the sequence of all events strictly in the same order as they are generated to preserve the temporal information to be used in the next stage. Second, we replace MaxPooling in aggregation and deploy temporal aggregation which leverages the attention mechanism with softmax, which improves the effective assimilation of temporal information into the resultant feature vectors. + +# 3.2.1 Grouping and Sampling + +Aligned with the frame-based design concept, our focus is to capture both local and global information. Local information is acquired by leveraging Farthest Point Sampling + +![](images/c27d39af9633d3daf5f2f2ecd2e7491f1cab468601934972cd464355a32a6eb6.jpg) +Figure 3. PEPNet overall architecture (the time resolution of $t_1, t_2, \ldots, t_n$ is $1\mu s$ ). The input Event Cloud undergoes direct handling through a sliding window, sampling, and normalization, eliminating the need for any format conversion. Sequentially, the input passes through $S_{num}$ hierarchy structures for spatial feature abstraction and extraction. It further traverses a bidirectional LSTM for temporal feature extraction, culminating in a regressor responsible for 6-DOFs camera pose relocalization. + +(FPS) and K-Nearest Neighbors (KNN), while global information is obtained through a dedicated aggregation module. + +$$ +P S _ {i} = F P S \left(P N _ {i}\right) \quad P G _ {i} = K N N \left(P N _ {i}, P S _ {i}\right) \tag {5} +$$ + +The input dimension $PN_{i}$ is $[N,3 + D]$ , and the centroid dimension $PS_{i}$ is $[N^{\prime},3 + D]$ and the group dimension $PG_{i}$ is $[N^{\prime},K,3 + 2*D]$ . $K$ represents the nearest $K$ points of the center point (centroid), $D$ is the feature dimension of the points of the current stage, and 3 is the most original $(X,Y,T)$ coordinate value. Importantly, it should be noted that the ordering of all points in the grouping and sampling process strictly adheres to the timestamp $(T)$ , and the dimension $2*D$ of the points in the group is the result of being concatenated to the centroid. + +# 3.2.2 Standardization + +Next, each group undergoes a standardization process to ensure consistent variability between points within the group, as illustrated in this formula: + +$$ +\begin{array}{l} P G S _ {i} = \frac {P G _ {i} - P S _ {i}}{S t d \left(P G _ {i}\right)} \quad S t d \left(P G _ {i}\right) = \sqrt {\frac {\sum_ {j = 0} ^ {3 n - 1} \left(g _ {j} - \bar {g}\right) ^ {2}}{3 n - 1}} (6) \\ g = \left[ x _ {0}, y _ {0}, t _ {0}, \dots , x _ {n}, y _ {n}, t _ {n} \right] (7) \\ \end{array} +$$ + +Where $PG_{i}$ and $PS_{i}$ are the subsets of $PG$ and $PS$ , $Std$ is the standard deviation, the dimension of $Std(PG)$ is $M$ which is consistent with the number of sliding windows, and $g$ is the set of coordinates of all points in the $PG_{i}$ . + +# 3.2.3 Feature extractor + +Following the standardization of $PG$ by dividing the variance by the subtracted mean, the feature extraction is per + +formed using a Multi-Layer Perceptron (MLP) with a residual connection. This process encompasses two steps: local feature extraction and global feature extraction. The feature extractor with a bottleneck can be mathematically represented as: + +$$ +I (x) = f (\mathrm {B N} (\mathrm {M L P} _ {1} (x))) \tag {8} +$$ + +$$ +O (x) = \operatorname {B N} \left(\operatorname {M L P} _ {2} (x)\right) \tag {9} +$$ + +$$ +E x t (x) = f (x + O (I (x))) \tag {10} +$$ + +BN represents batch normalization layer, while $f$ signifies the nonlinear activation function. Both local feature extraction and global feature extraction maintain identical input and output dimensions. The dimension increase occurs solely when combining the feature dimension $D$ of the current point with the feature dimension $D$ of the centroid during grouping, resulting in a final dimension of $2 * D$ . The feature extractor takes an input dimension of $[B, N, K, D]$ , and following local feature extraction, the dimension remains $[B, N, K, D]$ , $B$ represents batch size. We adopt the attention mechanism for aggregation, yielding an aggregated feature dimension of $[B, N, D]$ . Subsequently, the aggregated feature map is then processed through the global feature extractor, completing the feature extraction for the current stage. + +# 3.2.4 Temporal Aggregation + +Conventional Point Cloud methods favor MaxPooling operations for feature aggregation because it is efficient in extracting the feature from one point among a group of points and discarding the rest. However, MaxPooling involves extracting only the maximum value along each dimension of the temporal axis. It is robust to noise perturbation but + +also ignores the temporal nuances embedded within the features. Conversely, the integration of attention mechanisms enhances the preservation of those nuanced and useful temporal attributes by aggregating features along the temporal axis through the attention value. To provide a more comprehensive exposition, we employ a direct attention mechanism within the $K$ temporal dimensions to effectively aggregate features as shown in Fig. 3. This mechanism enables the explicit integration of temporal attributes, capitalizing on the inherent strict ordering of the $K$ points. The ensuing formula succinctly elucidates the essence of this attention mechanism: + +$$ +F _ {\text {l o c a l}} = \operatorname {E x t} (x) = \left(F _ {t 1}, F _ {t 2}, \dots , F _ {t k}\right) \tag {11} +$$ + +$$ +A = \operatorname {S o f t M a x} \left(\operatorname {M L P} \left(F _ {\text {l o c a l}}\right)\right) = \left(a _ {t 1}, a _ {t 2}, \dots , a _ {t k}\right) \tag {12} +$$ + +$$ +F _ {\text {a g g r e}} = A \cdot F _ {\text {l o c a l}} = F _ {t 1} \cdot a _ {t 1} + F _ {t 2} \cdot a _ {t 2} + \dots + F _ {t k} \cdot a _ {t k} \tag {13} +$$ + +Upon the application of the local feature extractor, the ensuing features are denoted as $F_{\mathrm{local}}$ , and $F_{tk}$ mean the extracted feature of $k_{th}$ point in a group. The attention mechanism comprises an MLP layer with an input layer dimension of $D$ and an output $a_{tk}$ dimension of 1, along with softmax layers. Subsequently, the attention mechanism computes attention values, represented as $A$ . These attention values are then multiplied with the original features through batch matrix multiplication, resulting in the aggregated feature $F_{\mathrm{aggre}}$ . + +# 3.3. A-Bi-LSTM + +The temporal features extracted through the hierarchical structure are independent and parallel, lacking recurrent mechanisms within the network. This distinctive attribute, referred to as 'implicit', contrasts with the conventional treatment of temporal information as an indexed process. Consequently, implicit temporal features inadequately capture the interrelations among events along the timeline, whereas explicit temporal features assume a pivotal role in facilitating the CPR task. To explicitly capture temporal patterns, we introduce the LSTM network, which has been proven effective in learning temporal dependencies. For optimal network performance, controlled feature dimensionality, and comprehensive capture of bidirectional relationships in pose context, we adopt a bidirectional LSTM network with a lightweight design. The regressor attentively focuses on the output of Bi-LSTM at each timestep and is more inclined towards the start and end features as demonstrated in Fig. 6. The integration of bidirectional connections into the recurrent neural network (RNN) is succinctly presented through the following equation: + +$$ +\mathbf {h} _ {t} = f \left(\mathbf {W} _ {h} \cdot \mathbf {x} _ {t} + \mathbf {U} _ {h} \cdot \mathbf {h} _ {t - 1} + \mathbf {b} _ {h}\right) \tag {14} +$$ + +$$ +\mathbf {h} _ {t} ^ {\prime} = f \left(\mathbf {W} _ {h} ^ {\prime} \cdot \mathbf {x} _ {t} + \mathbf {U} _ {h} ^ {\prime} \cdot \mathbf {h} _ {t + 1} ^ {\prime} + \mathbf {b} _ {h} ^ {\prime}\right) \tag {15} +$$ + +$$ +\mathbf {y} _ {t} = \mathbf {V} \cdot \mathbf {h} _ {t} + \mathbf {b} _ {y} \quad \mathbf {y} _ {t} ^ {\prime} = \mathbf {V} ^ {\prime} \cdot \mathbf {h} _ {t} ^ {\prime} + \mathbf {b} _ {y} ^ {\prime} \tag {16} +$$ + +$\mathbf{x}_t$ represents the feature vector at the $t$ -th time step of the input sequence, while $\mathbf{h}_{t-1}$ and $\mathbf{h}_{t+1}'$ correspond to the hidden states of the forward and backward RNN units, respectively, from the previous time step. The matrices $\mathbf{W}_h$ , $\mathbf{U}_h$ , and $\mathbf{b}_h$ denote the weight matrix and bias vector of the forward RNN unit, while $\mathbf{V}$ and $\mathbf{b}_y$ represent the weight matrix and bias vector of its output layer. Similarly, $\mathbf{W}_h'$ , $\mathbf{U}_h'$ , and $\mathbf{b}_h'$ are associated with the weight matrix and bias vector of the backward RNN unit, and $\mathbf{V}'$ and $\mathbf{b}_y'$ pertain to the weight matrix and bias vector of its output layer. The activation function, denoted as $f(\cdot)$ , can be chosen as sigmoid or tanh or other functions. The final output $Y_a$ is aggregated at each moment using the attention mechanism, and $\oplus$ means concat operation. + +$$ +Y _ {t} = y _ {t} \oplus y _ {t} ^ {\prime} \tag {17} +$$ + +$$ +A = \operatorname {S o f t M a x} \left(\operatorname {M L P} \left(Y _ {t}\right)\right) \tag {18} +$$ + +$$ +Y _ {a} = A \cdot Y _ {t} \tag {19} +$$ + +# 3.4. Loss Function + +A fully connected layer with a hidden layer is employed to address the final 6-DOFs pose regression task. The displacement vector of the regression is denoted as $\hat{p}$ representing the magnitude and direction of movement, while the rotational Euler angles are denoted as $\hat{q}$ indicating the rotational orientation in three-dimensional space. + +$$ +\operatorname {L o s s} = \alpha | | \hat {p} - p | | _ {2} + \beta | | \hat {q} - q | | _ {2} + \lambda \sum_ {i = 0} ^ {n} w _ {i} ^ {2} \tag {20} +$$ + +$p$ and $q$ represent the ground truth obtained from the dataset, while $\alpha$ , $\beta$ , and $\lambda$ serve as weight proportion coefficients. In order to tackle the prominent concern of overfitting, especially in the end-to-end setting, we incorporate the L2 regularization into the loss function. This regularization, implemented as the second paradigm for the network weights $w$ , effectively mitigates overfitting. + +# 3.5. Overall Architecture + +Next, we will present the PEPNet pipeline in pseudo-code, utilizing the previously defined variables and formulas as described in Algorithm 1. + +# 4. Experiment + +In this section, we present an extensive and in-depth analysis of PEPNet's performance on both indoor and outdoor datasets, encompassing evaluations based on rotational and translational mean squared error (MSE), model parameters, floating-point operations (FLOPs), and inference time. PEPNet's training and testing are performed on a server furnished with an AMD Ryzen 7950X CPU, an RTX GeForce 4090 GPU, and 32GB of memory. + +![](images/87cc3789c1bcb4f5e467c8fd27e6367cb42f56cced06a19032593cae04147682.jpg) +Figure 4. Event-based CPR Dataset visualization. + +Algorithm 1 PEPNet pipeline +Input: Raw event stream $\mathcal{E}$ +Parameters: $N_{p} = 1024,R = 1e + 3,S_{\mathrm{num}} = 3,K = 24$ +Output: 6-DOFs pose $(\hat{p},\hat{q})$ +1: Preprocessing +2: for $j$ in len(ε) do +3: $P_{i}$ .append $(e_{j\rightarrow l})$ .. $j = l$ ; where $t_l - t_j = R$ +4: if $(len(P_i) > N_p)$ .. $i = i + 1$ +5: end for +6: $PN =$ Normalize(Sampling $(P))$ +7: Hierarchy structure +8: for stage in range $(S_{\mathrm{num}})$ do +9: Grouping and Sampling $(PN)$ +10: Get $PGS\in [B,N_{\mathrm{stage}},K,2*D_{\mathrm{stage}-1}]$ +11: Local Extractor $(PGS)$ +12: Get $F_{\mathrm{local}}\in [B,N_{\mathrm{stage}},K,D_{\mathrm{stage}}]$ +13: Attentive Aggregate $(F_{\mathrm{local}})$ +14: Get $F_{\mathrm{aggre}}\in [B,N_{\mathrm{stage}},D_{\mathrm{stage}}]$ +15: Global Extractor $(F_{\mathrm{aggre}})$ +16: Get $PN = F_{\mathrm{global}}\in [B,N_{\mathrm{stage}},D_{\mathrm{stage}}]$ +17: end for +18: A-Bi-LSTM +19: Forward Get $y_{t}\in [B,N_{3},DS_{\mathrm{num}} / 2]$ +20: Reverse Get $y_t^\prime \in [B,N_3,D_{S_{\mathrm{num}}} / 2]$ +21: Attention Get $Y_{a}\in [B,D_{S_{\mathrm{num}}}]$ +22: Regressor +23: Get 6-DOFs pose $(\hat{p},\hat{q})$ + +# 4.1. Dataset + +We employ the widely evaluated event-based CPR dataset IJRR [23] and M3ED [4], encompassing both indoor and outdoor scenes. Two distinct methods to partition the CPR dataset [26] have been benchmarked: random split and novel split. In the random split approach, the dataset is randomly selected $70\%$ of all sequences for training and allocated the remaining sequences for testing. On the other hand, in the novel split, we divide the data chronologically, + +using the initial $70\%$ of sequences for training and the subsequent $30\%$ for testing. + +# 4.2. Baseline + +We perform a thorough evaluation of our proposed method by comparing it with SOTA event-based approaches, namely CNN-LSTM [37] and AECRN [18]. Moreover, we present results derived from other well-established computer vision methods, including PoseNet[14], Bayesian PoseNet [13], Pairwise-CNN [15], LSTM-Pose [39], and SP-LSTM[26]. + +# 4.3. IJRR Dataset Results + +# 4.3.1 Random Split Results + +Based on the findings presented in Tab. 1, it is apparent that PEPNet surpasses other models concerning both rotation and translation errors across all sequences. Notably, PEPNet achieves these impressive results despite utilizing significantly fewer model parameters and FLOPs compared to the frame-based approach. Moreover, PEPNet not only exhibits a remarkable $38\%$ improvement in the average error compared to the SOTA CNN-LSTM method but also attains superior results across nearly all sequences. In addressing the more intricate and challenging hdr_poster sequences, while the frame-based approach relies on a denoising network to yield improved results [12], PEPNet excels by achieving remarkable performance without any additional processing. This observation strongly implies that PEPNet's Point Cloud approach exhibits greater robustness compared to the frame-based method, highlighting its inherent superiority in handling complex scenarios. + +Furthermore, we introduce an alternative variant, PEPNet $_{tinyy}$ , which integrates a lighter model architecture while preserving relatively strong performance. As depicted in Fig. 3, PEPNet consists of three stages, and the model's size is contingent upon the dimensionality of MLPs at each stage. The dimensions for the standard structure are [64, 128, 256], whereas those for the tiny structure are [16, + +
NetworkPoseNetBayesian PoseNetPairwise-CNNLSTM-PoseSP-LSTMCNN-LSTMPEPNetPEPNettiny
Parameter12.43M22.35M22.34M16.05M135.25M12.63M0.774M0.064M
FLOPs1.584G3.679G7.359G1.822G15.623G1.998G0.459G0.033G
shapesRotation0.109m,7.388°0.142m,9.557°0.095m,6.332°0.032m,4.439°0.025m,2.256°0.012m,1.652°0.005m,1.372°0.006m,1.592°
boxTranslation0.193m,6.977°0.190m,6.636°0.178m,6.153°0.083m,6.215°0.036m,2.195°0.013m,0.873°0.017m,0.845°0.031m,1.516°
shapesTranslation0.238m,6.001°0.264m,6.235°0.201m,5.146°0.056m,5.018°0.035m,2.117°0.020m,1.471°0.011m,0.582°0.013m,0.769°
dynamic_6dof0.297m,9.332°0.296m,8.963°0.245m,5.962°0.097m,6.732°0.031m,2.047°0.016m,1.662°0.015m,1.045°0.018m,1.144°
hdr_poster0.282m,8.513°0.290m,8.710°0.232m,7.234°0.108m,6.186°0.051m,3.354°0.033m,2.421°0.016m,0.991°0.028m,1.863°
posterTranslation0.266m,6.516°0.264m,5.459°0.211m,6.439°0.079m,5.734°0.036m,2.074°0.020m,1.468°0.012m,0.588°0.019m,0.953°
Average0.231m,7.455°0.241m,7.593°0.194m,6.211°0.076m,5.721°0.036m,2.341°0.019m,1.591°0.013m,0.904°0.019m,1.306°
+ +Table 1. IJRR random split results. The table presents the median error for each sequence, as well as the average error across the six sequences. It also presents the number of parameters and FLOPs for each model. Bold indicates the most advanced result, while underline signifies the second-best result. + +![](images/592b7ac577719929ef85ee94af2b04b4fe037688f1d81538cd3ab38249865a88.jpg) +Figure 5. Error distribution of event-based CPR results achieved by PEPNet using a random split. (a) Translation errors. (b) Rotation errors. + +32, 64]. As indicated in Tab. 1, even with a mere $0.5\%$ of the CNN-LSTM's parameter, $\mathrm{PEPNet}_{tiny}$ achieves comparable and even slightly superior results. This remarkable outcome emphasizes the superiority of leveraging event cloud data processing directly. + +# 4.3.2 Error Distribution + +Fig. 5 illustrates the error distribution of PEPNet across six distinct sequences using the random split method, specifically: shape rotation, box translation, shape translation, dynamic 6-dof, hdr poster, and poster translation. To enhance clarity, the top and bottom boundaries of the box represent the first and third quartiles, respectively, indicating the inter-quartile range (IQR). The median is denoted by the band within the box. It is observed that the IQR of the translation error approximately locates between $0.004\mathrm{m}$ and $0.024\mathrm{m}$ , while the orientation error ranges from $0.4^{\circ}$ to $1.9^{\circ}$ . + +# 4.3.3 Novel Split Results + +To assess the model's robustness, we adopt the novel split as an evaluation criterion, as shown in Tab. 2. During the training process, we observe a more pronounced overfitting phenomenon in PEPNet compared to the random split. We attribute this observation to the disparities in data distributions between the trainset and the testset, as well as the lim + +ited data size. Contrary to the methods we compared, PEP-Net does not necessitate pre-trained weights. For instance, SP-LSTM relies on pre-trained VGG19 weights from ImageNet, while AECRN requires synthetic heuristic depth and an extensive pretraining process. + +To address overfitting, PEPNet employs conventional methods that yield consistent and comparable results with the SOTA on three shape sequences that are displayed in the network column of Tab. 2. It is essential to note that AE-CRN adopts a hybrid approach, combining neural network regression for scene coordinates with derivable RANSAC for pose estimation. Moreover, this method incurs significant time consumption, with even the SOTA DSAC* algorithm taking nearly 30ms, excluding additional time for data format conversion. This time constraint presents compatibility challenges with the low-latency nature of event cameras. In contrast, PEPNet can execute on a server in just 6.7ms, with the main time-consuming module being grouping and sampling. Furthermore, with potential field programmable gate array (FPGA) or application-specific integrated chip (ASIC) support for these operations[6, 20], PEPNet's performance can be further accelerated. + +# 4.4. M3ED Dataset Results + +We selected three robots (Car, Falcon, and Spot) to extend the application scope of PEPNet across five sequences in an outdoor night setting, as illustrated in the Tab. 3. Due to its much higher resolution than IJRR, we performed downsampling processing and more number of points (1024 to 2048), and other experimental configurations are consistent with the IJRR dataset with random split. The results demonstrate the superior performance of PEPNet even in more challenging outdoor environments. + +# 4.5. Attention Visualization + +As shown in Fig. 6, We observe that the attention scores exhibit larger at both the beginning and end. We tentatively infer that the model focuses more on the difference in features between the start and the end for CPR, which is also + +
NetworkPoseNetBayesian PoseNetPairwise-CNNLSTM-PoseSP-LSTMDSAC*AECRNPEPNet
shapes Rotation0.201m,12.499°0.164m,12.188°0.187m,10.426°0.061m,7.625°0.045m,5.017°0.029m,2.3°0.025m,2.0°0.016m,1.745°
shapes Translation0.198m,6.696°0.213m,7.441°0.225m,11.627°0.108m,8.468°0.072m,4.496°0.038m,2.2°0.029m,1.7°0.026m,1.659°
shapes_6dof0.320m,13.733°0.326m,13.296°0.314m,13.245°0.096m,8.973°0.078m,5.524°0.054m,3.1°0.052m,3.0°0.045m,2.984°
Average0.240m,11.067°0.234m,10.975°0.242m,11.766°0.088m,8.355°0.065m,5.012°0.040m,2.53°0.035m,2.23°0.029m,2.13°
Inference time5ms6ms12ms9.49ms4.79ms30ms30ms6.7ms
+ +Table 2. IJRR novel split results. Referred to as Tab. 1, showcases identical information. To assess the model's runtime, we conduct tests on a server platform, specifically focusing on the average time required for inference on a single sample. + +
M3EDPoseNetLSTM-PoseCNN-LSTMPEPNet
INPUTEvent FrameEvent FrameEvent framePoint Cloud
Falcon_Night_High_Beans0.181m,2.221°0.112m,0.946°0.107m,1.435°0.082m,0.575°
Car_Night_Pen_S_Loop1.618m,8.126°0.667m,4.914°0.773m,3.005°0.577m,1.319°
Spot_Night_Pen_Loop1.735m,5.502°0.761m,7.898°0.401m,1.771°0.468m,1.062°
Car_Pen_S_Loop_darker1.841m,4.575°0.751m,3.738°0.598m,2.772°0.385m,1.01°
Spot_Plaza_Light1.372m,9.564°0.565m,5.221°0.273m,2.001°0.348m,1.234°
Avergae1.349m,5.998°0.571m,4.543°0.43m,2.197°0.372m,1.04°
+ +Table 3. Outdoor extension on M3ED dataset with random split. + +
ConditionHSLSTMBi-LSTMAggregationTranslationRotationT+R
1Max0.015m0.884°3.04
2Temporal0.014m0.786°2.77
3Max0.014m0.833°2.85
4Temporal0.012m0.603°2.25
5Max0.014m0.813°2.82
6Temporal0.011m0.582°2.12
+ +![](images/0d7401f1ccd5efb782c94f401a63faae94e0db35c05e1b0198b14a0f92312caa.jpg) +Figure 6. Visualization of the attention values in the time domain. 128 points in chronological order on the horizontal axis and the attention values of the corresponding point on the vertical axis. + +seen in the geometry approach [7, 24]. + +# 4.6. Ablation Study + +Key Module Ablation: In order to validate the efficacy of key modules, we conducted an ablation experiment focusing on three primary components: hierarchy structure, Bi-LSTM, and temporal aggregation. These experiments are designed to evaluate rotation and translation errors on the shape translation sequence with the random split. The combined error $(\mathrm{T} + \mathrm{R})$ is measured after processing. Our experimental setup comprises four distinct conditions, as illustrated in Tab. 4. Condition 1 represents the sole utility of the system. + +Table 4. Abalation Study for three key modules. $\mathrm{T} + \mathrm{R} =$ Translation + Rotation $\cdot \pi /{180}\left( {\mathrm{\;m} + \mathrm{{rad}}}\right)$ + +
Scenceα = 0.5, β = 0.5α = 0.25, β = 0.75α = 0.75, β = 0.25
shape Translation0.0302m,1.684°,5.960.0359m,1.72°,6.590.0303m,2.056°,6.62
shapeRotation0.0143m,2.888°,6.470.0159m,2.68°,6.270.014m,3.36°,7.26
dynamic_6 dof0.0542m,2.799°,10.30.0611m,2.488°,10.50.0516m,3.251°,10.8
+ +Table 5. Abalation Study for loss function's coefficient. + +ization of the hierarchy structure (HS), while Condition 2 combines the ordinary LSTM. Condition 3 incorporates the bidirectional LSTM, and Condition 4 integrates the attention mechanism for feature aggregation. The ablation experiments reveal significant insights. Experiments 1 and 3 demonstrate that augmenting LSTM enhances the extraction of explicit temporal features. Moreover, experiments 3 and 5 reveal the effectiveness of the bidirectional LSTM in extracting motion information. Additionally, experiments 5 and 6 confirm the notable impact of attention in feature aggregation, resulting in a substantial reduction in error rates. + +Loss ablation: We incorporated the experiment involving scaling coefficients of the loss function in Tab. 5. This experiment utilized a tiny version of PEPNet, trained for 100 epochs, and the outcome is MSE in translation, rotation, and $\mathrm{T} + \mathrm{R}$ . Across three distinct motion scenarios (translation, rotation, and 6dof) varied coefficient ratios induced deviations in the obtained results. For example, in shape rotation, increasing the weight on rotation makes the results better. + +# 5. Conclusion + +In this paper, we introduce an end-to-end CPR network that operates directly on raw event clouds without frame-based preprocessing. PEPNet boasts an impressively lightweight framework that adeptly extracts spatial and temporal features, leading to SOTA performance. Diverging from frame-based approaches, our method prioritizes preserving the inherent distribution of the event cloud, capitalizing on its sparse nature to achieve extraordinary capabilities for ultra-low-power applications. + +Acknowledgment. This work was supported in part by the Young Scientists Fund of the National Natural Science Foundation of China (Grant 62305278), as well as the Hong Kong University of Science and Technology (Guangzhou) Joint Funding Program under Grant 2023A03J0154 and 2024A03J0618. + +# References + +[1] Vassileios Balntas, Shuda Li, and Victor Prisacariu. Relocnet: Continuous metric learning relocalisation using neural nets. In Proceedings of the European Conference on Computer Vision (ECCV), pages 751-767, 2018. 2 +[2] Eric Brachmann and Carsten Rother. Visual camera relocalization from rgb and rgb-d images using dsac. IEEE transactions on pattern analysis and machine intelligence, 44(9):5847-5865, 2021. 2 +[3] Eric Brachmann, Alexander Krull, Sebastian Nowozin, Jamie Shotton, Frank Michel, Stefan Gumhold, and Carsten Rother. Dsac-differentiable ransac for camera localization. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6684-6692, 2017. 2 +[4] Kenneth Chaney, Fernando Cladora, Ziyun Wang, Anthony Bisulco, M Ani Hsieh, Christopher Korpela, Vijay Kumar, Camillo J Taylor, and Kostas Daniilidis. M3ed: Multi-robot, multi-sensor, multi-environment event dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4015–4022, 2023. 2, 6 +[5] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020. 3 +[6] Haotian Fu, Yulong Huang, Tingran Chen, Chenyi Fu, Hongwei Ren, Yue Zhou, Shouzhong Peng, Zhirui Zong, Biao Pan, and Bojun Cheng. Ds-cim: A 40nm asynchronous dual-spike driven, mram compute-in-memory macro for spiking neural network. IEEE Transactions on Circuits and Systems I: Regular Papers, 2024. 7 +[7] Guillermo Gallego, Christian Forster, Elias Mueggler, and Davide Scaramuzza. Event-based camera pose tracking using a generative event model. arXiv preprint arXiv:1510.01972, 2015. 8 +[8] Guillermo Gallego, Jon EA Lund, Elias Mueggler, Henri Rebecq, Tobi Delbruck, and Davide Scaramuzza. Event-based, 6-dof camera tracking from photometric depth maps. IEEE transactions on pattern analysis and machine intelligence, 40(10):2402-2412, 2017. 2 +[9] Guillermo Gallego, Tobi Delbrück, Garrick Orchard, Chiara Bartolozzi, Brian Taba, Andrea Censi, Stefan Leutenegger, Andrew J Davison, Jörg Conradt, Kostas Daniilidis, et al. Event-based vision: A survey. IEEE transactions on pattern analysis and machine intelligence, 44(1):154-180, 2020. 2 +[10] Yulan Guo, Hanyun Wang, Qingyong Hu, Hao Liu, Li Liu, and Mohammed Bennamoun. Deep learning for 3d point clouds: A survey. IEEE transactions on pattern analysis and machine intelligence, 43(12):4338-4364, 2020. 2 +[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 2 + +[12] Yifan Jin, Lei Yu, Guangqiang Li, and Shumin Fei. A 6-dofs event-based camera relocalization system by cnn-lstm and image denoising. Expert Systems with Applications, 170: 114535, 2021. 2, 6 +[13] Alex Kendall and Roberto Cipolla. Modelling uncertainty in deep learning for camera relocalization. In 2016 IEEE international conference on Robotics and Automation (ICRA), pages 4762-4769. IEEE, 2016. 6 +[14] Alex Kendall, Matthew Grimes, and Roberto Cipolla. Posenet: A convolutional network for real-time 6-dof camera relocalization. In Proceedings of the IEEE international conference on computer vision, pages 2938-2946, 2015. 2, 6 +[15] Zakaria Laskar, Iaroslav Melekhov, Surya Kalia, and Juho Kannala. Camera relocalization by computing pairwise relative poses using convolutional neural network. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 929-938, 2017. 2, 6 +[16] Yann LeCun, Yoshua Bengio, and Geoffrey Hinton. Deep learning. nature, 521(7553):436-444, 2015. 2 +[17] Patrick Lichtsteiner, Christoph Posch, and Tobi Delbruck. A $128 \times 128$ 120 db $15\mu s$ latency asynchronous temporal contrast vision sensor. IEEE journal of solid-state circuits, 43 (2):566-576, 2008. 1 +[18] Hu Lin, Meng Li, Qianchen Xia, Yifeng Fei, Baocai Yin, and Xin Yang. 6-dof pose relocalization for event cameras with entropy frame and attention networks. In The 18th ACM SIGGRAPH International Conference on Virtual-Reality Continuum and its Applications in Industry, pages 1–8, 2022. 2, 6 +[19] Yimin Lin, Zhaoxiang Liu, Jianfeng Huang, Chaopeng Wang, Guoguang Du, Jinqiang Bai, and Shiguo Lian. Deep global-relative networks for end-to-end 6-dof visual localization and odometry. In PRICAI 2019: Trends in Artificial Intelligence: 16th Pacific Rim International Conference on Artificial Intelligence, Cuvu, Yanuca Island, Fiji, August 26–30, 2019, Proceedings, Part II, pages 454–467. Springer, 2019. 2 +[20] Haobo Liu, Zhengyang Qian, Wei Wu, Hongwei Ren, Zhiwei Liu, and Leibin Ni. Afpr-cim: An analog-domain floating-point rram-based compute-in-memory architecture with dynamic range adaptive fp-adc. arXiv preprint arXiv:2402.13798, 2024. 7 +[21] Xu Ma, Can Qin, Haoxuan You, Haoxi Ran, and Yun Fu. Rethinking network design and local geometry in point cloud: A simple residual mlp framework. In International Conference on Learning Representations, 2021. 3 +[22] Anton Mitrokhin, Zhiyuan Hua, Cornelia Fermuller, and Yiannis Aloimonos. Learning visual motion segmentation using event surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14414-14423, 2020. 2 +[23] Elias Mueggler, Henri Rebecq, Guillermo Gallego, Tobi Delbruck, and Davide Scaramuzza. The event-camera dataset and simulator: Event-based data for pose estimation, visual odometry, and slam. The International Journal of Robotics Research, 36(2):142-149, 2017. 1, 2, 6 +[24] Elias Mueggler, Guillermo Gallego, Henri Rebecq, and Davide Scaramuzza. Continuous-time visual-inertial odometry + +for event cameras. IEEE Transactions on Robotics, 34(6): 1425-1440, 2018. 8 +[25] Tayyab Naseer and Wolfram Burgard. Deep regression for monocular camera-based 6-dof global localization in outdoor environments. In 2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 1525-1530. IEEE, 2017. 2 +[26] Anh Nguyen, Thanh-Toan Do, Darwin G Caldwell, and Nikos G Tsagarakis. Real-time 6dof pose relocalization for event cameras with stacked spatial LSTM networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 2, 6 +[27] Christoph Posch, Daniel Matolin, and Rainer Wohlgenannt. A qvga 143 db dynamic range frame-free pwm image sensor with lossless pixel-level video compression and time-domain cds. IEEE Journal of Solid-State Circuits, 46(1):259-275, 2010. 1 +[28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 2, 3 +[29] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 2, 3 +[30] Noha Radwan, Abhinav Valada, and Wolfram Burgard. Vlocnet++: Deep multitask learning for semantic visual localization and odometry. IEEE Robotics and Automation Letters, 3(4):4407-4414, 2018. 2 +[31] Henri Rebecq, Daniel Gehrig, and Davide Scaramuzza. Esim: an open event camera simulator. In Conference on robot learning, pages 969-982. PMLR, 2018. 2 +[32] Hongwei Ren, Yue Zhou, Haotian Fu, Yulong Huang, Renjing Xu, and Bojun Cheng. Ttpoint: A tensorized point cloud network for lightweight action recognition with event cameras. arXiv preprint arXiv:2308.09993, 2023. 2 +[33] Hongwei Ren, Yue Zhou, Yulong Huang, Haotian Fu, Xiaopeng Lin, Jie Song, and Bojun Cheng. Spikepoint: An efficient point-based spiking neural network for event cameras action recognition. arXiv preprint arXiv:2310.07189, 2023. +[34] Yusuke Sekikawa, Kosuke Hara, and Hideo Saito. Eventnet: Asynchronous recursive event processing. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3887-3896, 2019. 2 +[35] Yoli Shavit and Ron Ferens. Introduction to camera pose estimation with deep learning. arXiv preprint arXiv:1907.05272, 2019. 1 +[36] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 2 +[37] Ahmed Tabia, Fabien Bonardi, and Samia Bouchafa. Deep learning for pose estimation from event camera. In 2022 International Conference on Digital Image Computing: Techniques and Applications (DICTA), pages 1-7. IEEE, 2022. 6 + +[38] Abhinav Valada, Noha Radwan, and Wolfram Burgard. Deep auxiliary learning for visual localization and odometry. In 2018 IEEE international conference on robotics and automation (ICRA), pages 6939-6946. IEEE, 2018. 2 +[39] Florian Walch, Caner Hazirbas, Laura Leal-Taixe, Torsten Sattler, Sebastian Hilsenbeck, and Daniel Cremers. Image-based localization using lstms for structured feature correlation. In Proceedings of the IEEE International Conference on Computer Vision, pages 627-637, 2017. 2, 6 +[40] Qinyi Wang, Yexin Zhang, Junsong Yuan, and Yilong Lu. Space-time event clouds for gesture recognition: From rgb cameras to event cameras. In 2019 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 1826-1835. IEEE, 2019. 2, 3 +[41] Jian Wu, Liwei Ma, and Xiaolin Hu. Delving deeper into convolutional neural networks for camera relocalization. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 5644-5651. IEEE, 2017. 2 +[42] Wenxuan Wu, Zhongang Qi, and Li Fuxin. Pointconv: Deep convolutional networks on 3d point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9621-9630, 2019. 3 +[43] Jiancheng Yang, Qiang Zhang, Bingbing Ni, Linguuo Li, Jinxian Liu, Mengdie Zhou, and Qi Tian. Modeling point clouds with self-attention and gumbel subset sampling. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3323–3332, 2019. 3 +[44] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16259-16268, 2021. 3 \ No newline at end of file diff --git a/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/images.zip b/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..5af8f61022f5c333d73626b5cef8602366e56837 --- /dev/null +++ b/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9cfd732c903d0e015f390ae9a3bf3c638ac2918b4ec495dd8e9d15edb83feda +size 591657 diff --git a/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/layout.json b/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e07ab847f237cc4641096745b227ef72eab3cbe3 --- /dev/null +++ b/2024/A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization/layout.json @@ -0,0 +1,9226 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 153, + 103, + 440, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 103, + 440, + 138 + ], + "spans": [ + { + "bbox": [ + 153, + 103, + 440, + 138 + ], + "type": "text", + "content": "A Simple and Effective Point-based Network for Event Camera 6-DOFs Pose Relocalization" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 99, + 161, + 507, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 161, + 507, + 190 + ], + "spans": [ + { + "bbox": [ + 99, + 161, + 507, + 190 + ], + "type": "text", + "content": "Hongwei Ren*, Jiadong Zhu*, Yue Zhou, Haotian Fu, Yulong Huang, Bojun Cheng † \nThe Hong Kong University of Science and Technology(Guangzhou)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 191, + 550, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 191, + 550, + 204 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 550, + 204 + ], + "type": "text", + "content": "{hren066, jzhu484, yzhou833, hfu373, yhuang496}@connect.hkust-gz.edu.cn, bocheng@hkust-gz.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "spans": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 256, + 289, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 256, + 289, + 626 + ], + "spans": [ + { + "bbox": [ + 47, + 256, + 289, + 626 + ], + "type": "text", + "content": "Event cameras exhibit remarkable attributes such as high dynamic range, asynchronicity, and low latency, making them highly suitable for vision tasks that involve high-speed motion in challenging lighting conditions. These cameras implicitly capture movement and depth information in events, making them appealing sensors for Camera Pose Relocalization (CPR) tasks. Nevertheless, existing CPR networks based on events neglect the pivotal fine-grained temporal information in events, resulting in unsatisfactory performance. Moreover, the energy-efficient features are further compromised by the use of excessively complex models, hindering efficient deployment on edge devices. In this paper, we introduce PEPNet, a simple and effective point-based network designed to regress six degrees of freedom (6-DOFs) event camera poses. We rethink the relationship between the event camera and CPR tasks, leveraging the raw Point Cloud directly as network input to harness the high-temporal resolution and inherent sparsity of events. PEPNet is adept at abstracting the spatial and implicit temporal features through hierarchical structure and explicit temporal features by Attentive Bidirectional Long Short-Term Memory (A-Bi-LSTM). By employing a carefully crafted lightweight design, PEPNet delivers state-of-the-art (SOTA) performance on both indoor and outdoor datasets with meager computational resources. Specifically, PEPNet attains a significant " + }, + { + "bbox": [ + 47, + 256, + 289, + 626 + ], + "type": "inline_equation", + "content": "38\\%" + }, + { + "bbox": [ + 47, + 256, + 289, + 626 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 256, + 289, + 626 + ], + "type": "inline_equation", + "content": "33\\%" + }, + { + "bbox": [ + 47, + 256, + 289, + 626 + ], + "type": "text", + "content": " performance improvement on the random split IJRR and M3ED datasets, respectively. Moreover, the lightweight design version PEPNet™ accomplishes results comparable to the SOTA while employing a mere " + }, + { + "bbox": [ + 47, + 256, + 289, + 626 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 47, + 256, + 289, + 626 + ], + "type": "text", + "content": " of the parameters." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 650, + 128, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 650, + 128, + 662 + ], + "spans": [ + { + "bbox": [ + 47, + 650, + 128, + 662 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 670, + 287, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 287, + 696 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 287, + 696 + ], + "type": "text", + "content": "Event camera is a type of bio-inspired vision sensor that responds to local changes in illumination exceeding a pre" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 348, + 229, + 504, + 337 + ], + "blocks": [ + { + "bbox": [ + 348, + 229, + 504, + 337 + ], + "lines": [ + { + "bbox": [ + 348, + 229, + 504, + 337 + ], + "spans": [ + { + "bbox": [ + 348, + 229, + 504, + 337 + ], + "type": "image", + "image_path": "242f5762ec36755ca7783afef0d2fe43bb2ea949b09337431b84fb987e107094.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 345, + 547, + 390 + ], + "lines": [ + { + "bbox": [ + 305, + 345, + 547, + 390 + ], + "spans": [ + { + "bbox": [ + 305, + 345, + 547, + 390 + ], + "type": "text", + "content": "Figure 1. The average results using the random split method benchmarked on the CPR dataset [23]. The vertical axis represents the combined rotational and translational errors " + }, + { + "bbox": [ + 305, + 345, + 547, + 390 + ], + "type": "inline_equation", + "content": "(\\mathrm{m} + \\mathrm{rad})" + }, + { + "bbox": [ + 305, + 345, + 547, + 390 + ], + "type": "text", + "content": ". PEPNet is the first point-based CPR network for event cameras." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 412, + 546, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 412, + 546, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 412, + 546, + 640 + ], + "type": "text", + "content": "defined threshold [17]. Differing from conventional frame-based cameras, event cameras independently and asynchronously produce pixel-level events. Notably, event cameras boast an exceptional triad: high dynamic range, low latency, and ultra-high temporal resolution. This unique combination empowers superior performance under challenging light conditions, adeptly capturing the swift scene and rapid motion changes in near-microsecond precision [27]. Additionally, event cameras boast remarkably low power consumption positioning them as a popular choice for many power-constrained devices. Camera Pose Relocalization (CPR) is an emerging application in power-constrained devices and has gained significant attention. It aims to train several scene-specific neural networks to accurately relocalize the camera pose within the original scene used for training. It is extensively employed in numerous applications, including Virtual Reality (VR), Augmented Reality (AR), and robotics [35], all of which are deployed on battery-powered devices and are power-constrained." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 547, + 715 + ], + "type": "text", + "content": "CPR tasks using event cameras significantly diverge from their conventional CPR counterpart that employs frame-based cameras, primarily due to the inherent dissimilarity in data output mechanisms between these two camera types. Furthermore, events inherently encompass information regarding object motion and depth changes" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 201, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 201, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 201, + 713 + ], + "type": "text", + "content": "*equal contribution. †corresponding author." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "18112" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": "across precise temporal and spatial dimensions attributes of paramount significance within the domain of CPR tasks [8, 31]. Regrettably, existing event-based CPR networks often derive from the conventional camera network paradigms and inadequately address the unique attributes of event data. More specifically, events are transformed into various representations such as event images [26], time surfaces [18], and other representations[18], leading to the loss of their fine-grained temporal information. Furthermore, most event-based methods tend to overlook the computational load of the network, only prioritizing elevated accuracy, which contradicts the fundamental design principles of event cameras [9]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "spans": [ + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "type": "text", + "content": "A suitable and faithful data representation is crucial for event cloud processing. Point Cloud is a collection of 3D points " + }, + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "type": "inline_equation", + "content": "(x, y, z)" + }, + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "type": "text", + "content": " that represents the shape and surface of an object or environment commonly used in lidar and depth cameras [10]. The distance " + }, + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "type": "inline_equation", + "content": "(z)" + }, + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "type": "text", + "content": " is of great meaning to the tasks. As for event camera, by treating each event's temporal information as the third dimension, event inputs " + }, + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "type": "inline_equation", + "content": "(x, y, t)" + }, + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "type": "text", + "content": " can be regarded as points and aggregated into a pseudo-Point Cloud [28, 29, 32-34, 40]. However, given that the " + }, + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "type": "text", + "content": " dimension of Event Cloud is not strictly equivalent to the spatial dimensions " + }, + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "type": "inline_equation", + "content": "(x, y, z)" + }, + { + "bbox": [ + 46, + 231, + 289, + 387 + ], + "type": "text", + "content": ", direct transplantation of the Point Cloud network has not yet exhibited a satisfactory performance advantage in processing event data [32, 40]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 391, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 391, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 391, + 289, + 714 + ], + "type": "text", + "content": "In this study, we introduce PEPNet, the first point-based end-to-end CPR network designed to harness the attributes of event cameras. A comparison of our performance and method to other frame-based methods is illustrated Fig. 1 and Fig. 2, respectively. Moreover, diverging from other point-based approaches in event data processing [32, 40], PEPNet demonstrates careful attention to detail by systematically assessing the difference between Event Cloud and Point Cloud in its design approach. This approach enables a more precise extraction of spatio-temporal features and facilitates solutions for a spectrum of event-based tasks. Our main contributions are as follows: First, in the preprocessing stage, PEPNet directly processes the raw data obtained from the event cameras, meticulously preserving the fine-grained temporal coordinate and the order inherent in the event data. Second, PEPNet proficiently captures spatial features and implicit temporal features through its hierarchical structure with temporal aggregation. Subsequently, the explicit temporal feature is processed by the A-BiLSTM, thanks to the preservation of the input sequence in previous stages. As such, this architecture is tailored to accommodate the high temporal resolution and sparse characteristics inherent in event cameras. Thirdly, by restricting ourselves to minimal hardware resources and deliberately avoiding heavy computational modules, PEPNet not only attains SOTA results on IJRR [23] and M3ED [4] dataset but also features a lightweight design that can be executed" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 348, + 70, + 506, + 163 + ], + "blocks": [ + { + "bbox": [ + 348, + 70, + 506, + 163 + ], + "lines": [ + { + "bbox": [ + 348, + 70, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 348, + 70, + 506, + 163 + ], + "type": "image", + "image_path": "e27644a896423cce95a5f5900f5d8f6106a29cc8b919f2003ad654c7e13a782b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 171, + 545, + 194 + ], + "lines": [ + { + "bbox": [ + 305, + 171, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 305, + 171, + 545, + 194 + ], + "type": "text", + "content": "Figure 2. Two different event-based processing methods, frame-based and point-based." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 305, + 215, + 545, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 215, + 545, + 264 + ], + "spans": [ + { + "bbox": [ + 305, + 215, + 545, + 264 + ], + "type": "text", + "content": "in real-time. We hope such an approach could potentially democratize computer vision technology by making it accessible to a wider range of devices and applications in the community of edge computing." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 275, + 392, + 288 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 275, + 392, + 288 + ], + "spans": [ + { + "bbox": [ + 306, + 275, + 392, + 288 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 295, + 504, + 308 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 295, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 306, + 295, + 504, + 308 + ], + "type": "text", + "content": "2.1. Frame-based CPR Learning Methods" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 315, + 545, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 315, + 545, + 459 + ], + "spans": [ + { + "bbox": [ + 305, + 315, + 545, + 459 + ], + "type": "text", + "content": "Deep learning, crucial for vision tasks like classification and object detection [16], has seen advancements such as PoseNet's innovative transfer learning [14]. Utilizing VGG, ResNet [11, 36], LSTM, and customized loss functions [25, 39, 41], researchers enhanced this approach. Auxiliary Learning methods further improved performance [19, 30, 38], although overfitting remains a challenge. Hybrid pose-based methods, combining learning with traditional pipelines [1, 15], offer promise. DSAC series, for instance, achieve high pose estimation accuracy [2, 3], but come with increased computational costs and latency, especially for edge devices." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 467, + 501, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 467, + 501, + 480 + ], + "spans": [ + { + "bbox": [ + 306, + 467, + 501, + 480 + ], + "type": "text", + "content": "2.2. Event-based CPR Learning Methods" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 486, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 486, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 486, + 545, + 712 + ], + "type": "text", + "content": "Event-based CPR methods often derive from the frame-based CPR network. SP-LSTM [26] employed the stacked spatial LSTM networks to process event images, facilitating a real-time pose estimator. To address the inherent noise in event images, [12] proposed a network structure combining denoise networks, convolutional neural networks, and LSTM, achieving good performance under complex working conditions. In contrast to the aforementioned methods, a novel representation named Reversed Window Entropy Image (RWEI) [18] is introduced, which is based on the widely used event surface [22] and serves as the input to an attention-based DSAC* pipeline [2] to achieve SOTA results. However, the computationally demanding architecture involving representation transformation and hybrid pipeline poses challenges for real-time execution. Additionally, all existing methods ignore the fine-grained temporal feature of the event cameras, and accumulate events into frames for processing, resulting in unsatisfactory performance." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18113" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 169, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 169, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 169, + 83 + ], + "type": "text", + "content": "2.3. Point Cloud Network" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 91, + 289, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 91, + 289, + 355 + ], + "spans": [ + { + "bbox": [ + 46, + 91, + 289, + 355 + ], + "type": "text", + "content": "Point-based methodologies have transformed the direct processing of Point Cloud, with PointNet [28] as a standout example. Taking a step beyond, PointNet++ [29] introduced a Set Abstraction module. While it initially employed a straightforward MLP in the feature extractor, recent advancements have seen the development of more sophisticated feature extractors to enhance Point Cloud processing [5, 21, 42, 44]. When extending these techniques to Event Cloud, Wang et al. [40] addressed the temporal information processing challenge while maintaining representation in both the x and y axes, enabling gesture recognition using PointNet++. Further enhancements came with PAT [43], which incorporated self-attention and Gumbel subset sampling, leading to improved performance in recognition tasks. However, existing point-based models still fall short in performance compared to frame-based methods. This phenomenon can be attributed to the distinctively different characteristics of Point Cloud and Event Cloud. Event Cloud contradicts the permutation and transformation invariance present in Point Cloud due to its temporal nature. Additionally, the Point Cloud network is not equipped to extract explicit temporal features." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 367, + 105, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 367, + 105, + 379 + ], + "spans": [ + { + "bbox": [ + 47, + 367, + 105, + 379 + ], + "type": "text", + "content": "3. PEPNet" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 387, + 287, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 387, + 287, + 471 + ], + "spans": [ + { + "bbox": [ + 46, + 387, + 287, + 471 + ], + "type": "text", + "content": "PEPNet pipeline consists of four essential modules: (1) a preprocessing module for the original Event Cloud, (2) a hierarchical Point Cloud feature extraction structure, (3) an Attentive Bi-directional LSTM, and (4) a 6-DOFs pose regressor, as illustrated in Fig. 3. In the following sections, we will provide detailed descriptions and formulations for each module." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 480, + 129, + 492 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 480, + 129, + 492 + ], + "spans": [ + { + "bbox": [ + 47, + 480, + 129, + 492 + ], + "type": "text", + "content": "3.1. Event Cloud" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 500, + 287, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 500, + 287, + 631 + ], + "spans": [ + { + "bbox": [ + 46, + 500, + 287, + 631 + ], + "type": "text", + "content": "To preserve the fine-grained temporal information and original data distribution attributes from the Event Cloud, the 2D-spatial and 1D-temporal event information is constructed into a three-dimensional representation to be processed in Point Cloud. Event Cloud consists of time-series data capturing spatial intensity changes of images in chronological order, and an individual event is denoted as " + }, + { + "bbox": [ + 46, + 500, + 287, + 631 + ], + "type": "inline_equation", + "content": "e_k = (x_k, y_k, t_k, p_k)" + }, + { + "bbox": [ + 46, + 500, + 287, + 631 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 500, + 287, + 631 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 500, + 287, + 631 + ], + "type": "text", + "content": " is the index representing the " + }, + { + "bbox": [ + 46, + 500, + 287, + 631 + ], + "type": "inline_equation", + "content": "k_{th}" + }, + { + "bbox": [ + 46, + 500, + 287, + 631 + ], + "type": "text", + "content": " element in the sequence. Consequently, the set of events within a single sequence " + }, + { + "bbox": [ + 46, + 500, + 287, + 631 + ], + "type": "inline_equation", + "content": "(\\mathcal{E})" + }, + { + "bbox": [ + 46, + 500, + 287, + 631 + ], + "type": "text", + "content": " in the dataset can be expressed as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 79, + 633, + 287, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 633, + 287, + 647 + ], + "spans": [ + { + "bbox": [ + 79, + 633, + 287, + 647 + ], + "type": "interline_equation", + "content": "\\mathcal {E} = \\left\\{e _ {k} = \\left(x _ {k}, y _ {k}, t _ {k}, p _ {k}\\right) \\mid k = 1, \\dots , n \\right\\} \\tag {1}", + "image_path": "d1969db11a15c6994d5e7107665ddbb3eb5920c72cec1d4d79513d126f0c223d.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 654, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 287, + 714 + ], + "type": "text", + "content": "For a given pose in the dataset, the ground truth resolution is limited to " + }, + { + "bbox": [ + 46, + 654, + 287, + 714 + ], + "type": "inline_equation", + "content": "5ms" + }, + { + "bbox": [ + 46, + 654, + 287, + 714 + ], + "type": "text", + "content": ", while the event resolution is " + }, + { + "bbox": [ + 46, + 654, + 287, + 714 + ], + "type": "inline_equation", + "content": "1\\mu s" + }, + { + "bbox": [ + 46, + 654, + 287, + 714 + ], + "type": "text", + "content": ". Therefore, it is necessary to acquire the events that transpire within the time period we call it sliding window corresponding to the poses, which will serve as the input for the model," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 72, + 463, + 85 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 463, + 85 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 463, + 85 + ], + "type": "text", + "content": "as depicted by the following equation:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 337, + 95, + 545, + 108 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 95, + 545, + 108 + ], + "spans": [ + { + "bbox": [ + 337, + 95, + 545, + 108 + ], + "type": "interline_equation", + "content": "P _ {i} = \\left\\{e _ {j \\rightarrow l} \\mid t _ {l} - t _ {j} = R \\right\\} \\quad i = 1, \\dots , M \\tag {2}", + "image_path": "6d717ad4c463ce8eefa165ce45fe73fe2c5114e9bb0efa0a7f5355d55a1f42f2.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "spans": [ + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "text", + "content": "The symbol " + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "text", + "content": " represents the time interval of the sliding window, where " + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "text", + "content": " denote the start and end event index of the sequence, respectively. The variable " + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "text", + "content": " represents the number of sliding windows into which the sequence of events " + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "text", + "content": " is divided. Before being fed into the neural network, " + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "inline_equation", + "content": "P_{i}" + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "text", + "content": " also needs to undergo sampling and normalization. Sampling is to unify the number of points " + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "text", + "content": " as network inputs. We set " + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "inline_equation", + "content": "N = 1024" + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "text", + "content": " in PEPNet. Additionally, as the spatial coordinates are normalized by the camera's resolution " + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 304, + 118, + 547, + 251 + ], + "type": "text", + "content": ". The normalization process is described by the following equation:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 370, + 258, + 545, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 258, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 370, + 258, + 545, + 285 + ], + "type": "interline_equation", + "content": "P N _ {i} = \\left(\\frac {X _ {i}}{w}, \\frac {Y _ {i}}{h}, \\frac {T _ {i} - t _ {j}}{t _ {l} - t _ {j}}\\right) \\tag {3}", + "image_path": "2403a1de221dc1908cd878b73979aac7c0dab883f05575a89a8f16a208cb8dc7.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 295, + 545, + 308 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 295, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 315, + 295, + 545, + 308 + ], + "type": "interline_equation", + "content": "X _ {i}, Y _ {i}, T _ {i} = \\left\\{x _ {j}, \\dots , x _ {l} \\right\\}, \\left\\{y _ {j}, \\dots , y _ {l} \\right\\}, \\left\\{t _ {j}, \\dots , t _ {l} \\right\\} \\tag {4}", + "image_path": "5c613132621c0fb11f714fc203bf7d1f8c5e86442a4c60c682ad6b0711fea17b.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "spans": [ + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "text", + "content": "The " + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "inline_equation", + "content": "X, Y" + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "text", + "content": " is divided by the resolution of the event camera. To normalize " + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "text", + "content": ", we subtract the smallest timestamp " + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "inline_equation", + "content": "t_j" + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "text", + "content": " of the window and divide it by the time difference " + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "inline_equation", + "content": "t_l - t_j" + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "inline_equation", + "content": "t_l" + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "text", + "content": " represents the largest timestamp within the window. After pre-processing, Event Cloud is converted into the pseudo-Point Cloud, which comprises explicit spatial information " + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "inline_equation", + "content": "(x, y)" + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "text", + "content": " and implicit temporal information " + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 314, + 546, + 399 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 407, + 424, + 419 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 407, + 424, + 419 + ], + "spans": [ + { + "bbox": [ + 306, + 407, + 424, + 419 + ], + "type": "text", + "content": "3.2. Hierarchy Structure" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 426, + 545, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 426, + 545, + 641 + ], + "spans": [ + { + "bbox": [ + 304, + 426, + 545, + 641 + ], + "type": "text", + "content": "The hierarchy structure is the backbone for processing the pseudo-3D Point Cloud and is composed of four primary modules: grouping and sampling, standardization, feature extractor, and aggregation, as described in the following subsection. To efficiently extract deeper explicit spatial and implicit temporal features, the hierarchical structure is tailored and differs from conventional hierarchical structure in a few ways: First, we no longer force permutation invariance as usually done in mainstream point-based methods [21, 28], as the motion information is inherently related to the sequential order of events. Instead, we keep the sequence of all events strictly in the same order as they are generated to preserve the temporal information to be used in the next stage. Second, we replace MaxPooling in aggregation and deploy temporal aggregation which leverages the attention mechanism with softmax, which improves the effective assimilation of temporal information into the resultant feature vectors." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 658, + 443, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 443, + 670 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 443, + 670 + ], + "type": "text", + "content": "3.2.1 Grouping and Sampling" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "type": "text", + "content": "Aligned with the frame-based design concept, our focus is to capture both local and global information. Local information is acquired by leveraging Farthest Point Sampling" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "18114" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 68, + 534, + 242 + ], + "blocks": [ + { + "bbox": [ + 53, + 68, + 534, + 242 + ], + "lines": [ + { + "bbox": [ + 53, + 68, + 534, + 242 + ], + "spans": [ + { + "bbox": [ + 53, + 68, + 534, + 242 + ], + "type": "image", + "image_path": "c27d39af9633d3daf5f2f2ecd2e7491f1cab468601934972cd464355a32a6eb6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 249, + 547, + 293 + ], + "lines": [ + { + "bbox": [ + 46, + 249, + 547, + 293 + ], + "spans": [ + { + "bbox": [ + 46, + 249, + 547, + 293 + ], + "type": "text", + "content": "Figure 3. PEPNet overall architecture (the time resolution of " + }, + { + "bbox": [ + 46, + 249, + 547, + 293 + ], + "type": "inline_equation", + "content": "t_1, t_2, \\ldots, t_n" + }, + { + "bbox": [ + 46, + 249, + 547, + 293 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 46, + 249, + 547, + 293 + ], + "type": "inline_equation", + "content": "1\\mu s" + }, + { + "bbox": [ + 46, + 249, + 547, + 293 + ], + "type": "text", + "content": "). The input Event Cloud undergoes direct handling through a sliding window, sampling, and normalization, eliminating the need for any format conversion. Sequentially, the input passes through " + }, + { + "bbox": [ + 46, + 249, + 547, + 293 + ], + "type": "inline_equation", + "content": "S_{num}" + }, + { + "bbox": [ + 46, + 249, + 547, + 293 + ], + "type": "text", + "content": " hierarchy structures for spatial feature abstraction and extraction. It further traverses a bidirectional LSTM for temporal feature extraction, culminating in a regressor responsible for 6-DOFs camera pose relocalization." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 300, + 288, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 300, + 288, + 324 + ], + "spans": [ + { + "bbox": [ + 47, + 300, + 288, + 324 + ], + "type": "text", + "content": "(FPS) and K-Nearest Neighbors (KNN), while global information is obtained through a dedicated aggregation module." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 332, + 287, + 346 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 332, + 287, + 346 + ], + "spans": [ + { + "bbox": [ + 61, + 332, + 287, + 346 + ], + "type": "interline_equation", + "content": "P S _ {i} = F P S \\left(P N _ {i}\\right) \\quad P G _ {i} = K N N \\left(P N _ {i}, P S _ {i}\\right) \\tag {5}", + "image_path": "a7c8dcd41c04af7f6e0f2cd93b8e43222fbea1f2275d2a297faa3de7bd6bc51e.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": "The input dimension " + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "inline_equation", + "content": "PN_{i}" + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "inline_equation", + "content": "[N,3 + D]" + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": ", and the centroid dimension " + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "inline_equation", + "content": "PS_{i}" + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "inline_equation", + "content": "[N^{\\prime},3 + D]" + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": " and the group dimension " + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "inline_equation", + "content": "PG_{i}" + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "inline_equation", + "content": "[N^{\\prime},K,3 + 2*D]" + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": " represents the nearest " + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": " points of the center point (centroid), " + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": " is the feature dimension of the points of the current stage, and 3 is the most original " + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "inline_equation", + "content": "(X,Y,T)" + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": " coordinate value. Importantly, it should be noted that the ordering of all points in the grouping and sampling process strictly adheres to the timestamp " + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "inline_equation", + "content": "(T)" + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": ", and the dimension " + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "inline_equation", + "content": "2*D" + }, + { + "bbox": [ + 47, + 354, + 289, + 474 + ], + "type": "text", + "content": " of the points in the group is the result of being concatenated to the centroid." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 488, + 149, + 499 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 488, + 149, + 499 + ], + "spans": [ + { + "bbox": [ + 47, + 488, + 149, + 499 + ], + "type": "text", + "content": "3.2.2 Standardization" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 506, + 287, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 506, + 287, + 543 + ], + "spans": [ + { + "bbox": [ + 46, + 506, + 287, + 543 + ], + "type": "text", + "content": "Next, each group undergoes a standardization process to ensure consistent variability between points within the group, as illustrated in this formula:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 549, + 287, + 604 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 549, + 287, + 604 + ], + "spans": [ + { + "bbox": [ + 47, + 549, + 287, + 604 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} P G S _ {i} = \\frac {P G _ {i} - P S _ {i}}{S t d \\left(P G _ {i}\\right)} \\quad S t d \\left(P G _ {i}\\right) = \\sqrt {\\frac {\\sum_ {j = 0} ^ {3 n - 1} \\left(g _ {j} - \\bar {g}\\right) ^ {2}}{3 n - 1}} (6) \\\\ g = \\left[ x _ {0}, y _ {0}, t _ {0}, \\dots , x _ {n}, y _ {n}, t _ {n} \\right] (7) \\\\ \\end{array}", + "image_path": "c5c77520373315bb9f86bc9c9fa3e87a77bea3c0f99e9a8ae3cfd282894b766e.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "spans": [ + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "text", + "content": "Where " + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "inline_equation", + "content": "PG_{i}" + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "inline_equation", + "content": "PS_{i}" + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "text", + "content": " are the subsets of " + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "inline_equation", + "content": "PG" + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "inline_equation", + "content": "PS" + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "inline_equation", + "content": "Std" + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "text", + "content": " is the standard deviation, the dimension of " + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "inline_equation", + "content": "Std(PG)" + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "text", + "content": " which is consistent with the number of sliding windows, and " + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "text", + "content": " is the set of coordinates of all points in the " + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "inline_equation", + "content": "PG_{i}" + }, + { + "bbox": [ + 46, + 608, + 288, + 657 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 670, + 155, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 155, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 155, + 681 + ], + "type": "text", + "content": "3.2.3 Feature extractor" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "content": "Following the standardization of " + }, + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "inline_equation", + "content": "PG" + }, + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "content": " by dividing the variance by the subtracted mean, the feature extraction is per" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 300, + 545, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 300, + 545, + 359 + ], + "spans": [ + { + "bbox": [ + 304, + 300, + 545, + 359 + ], + "type": "text", + "content": "formed using a Multi-Layer Perceptron (MLP) with a residual connection. This process encompasses two steps: local feature extraction and global feature extraction. The feature extractor with a bottleneck can be mathematically represented as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 370, + 370, + 545, + 384 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 370, + 545, + 384 + ], + "spans": [ + { + "bbox": [ + 370, + 370, + 545, + 384 + ], + "type": "interline_equation", + "content": "I (x) = f (\\mathrm {B N} (\\mathrm {M L P} _ {1} (x))) \\tag {8}", + "image_path": "6e7c4fe93f48857f9b3f97c3bf47c0ebdc471a028ed92f9ff2abc004a6ed3c68.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 378, + 386, + 545, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 386, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 378, + 386, + 545, + 399 + ], + "type": "interline_equation", + "content": "O (x) = \\operatorname {B N} \\left(\\operatorname {M L P} _ {2} (x)\\right) \\tag {9}", + "image_path": "82b0d1f6dfc75429da09261037c3ce3fd0094ba6d06998c8d9ceeded185659d7.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 370, + 401, + 545, + 414 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 401, + 545, + 414 + ], + "spans": [ + { + "bbox": [ + 370, + 401, + 545, + 414 + ], + "type": "interline_equation", + "content": "E x t (x) = f (x + O (I (x))) \\tag {10}", + "image_path": "23c15dc8e5d211a6698c15f97e8a3c10d2602388da41206db0e264cac6b46309.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "text", + "content": "BN represents batch normalization layer, while " + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "text", + "content": " signifies the nonlinear activation function. Both local feature extraction and global feature extraction maintain identical input and output dimensions. The dimension increase occurs solely when combining the feature dimension " + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "text", + "content": " of the current point with the feature dimension " + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "text", + "content": " of the centroid during grouping, resulting in a final dimension of " + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "inline_equation", + "content": "2 * D" + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "text", + "content": ". The feature extractor takes an input dimension of " + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "inline_equation", + "content": "[B, N, K, D]" + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "text", + "content": ", and following local feature extraction, the dimension remains " + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "inline_equation", + "content": "[B, N, K, D]" + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "text", + "content": " represents batch size. We adopt the attention mechanism for aggregation, yielding an aggregated feature dimension of " + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "inline_equation", + "content": "[B, N, D]" + }, + { + "bbox": [ + 304, + 424, + 546, + 604 + ], + "type": "text", + "content": ". Subsequently, the aggregated feature map is then processed through the global feature extractor, completing the feature extraction for the current stage." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 622, + 436, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 622, + 436, + 634 + ], + "spans": [ + { + "bbox": [ + 305, + 622, + 436, + 634 + ], + "type": "text", + "content": "3.2.4 Temporal Aggregation" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 545, + 713 + ], + "type": "text", + "content": "Conventional Point Cloud methods favor MaxPooling operations for feature aggregation because it is efficient in extracting the feature from one point among a group of points and discarding the rest. However, MaxPooling involves extracting only the maximum value along each dimension of the temporal axis. It is robust to noise perturbation but" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18115" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "type": "text", + "content": "also ignores the temporal nuances embedded within the features. Conversely, the integration of attention mechanisms enhances the preservation of those nuanced and useful temporal attributes by aggregating features along the temporal axis through the attention value. To provide a more comprehensive exposition, we employ a direct attention mechanism within the " + }, + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "type": "text", + "content": " temporal dimensions to effectively aggregate features as shown in Fig. 3. This mechanism enables the explicit integration of temporal attributes, capitalizing on the inherent strict ordering of the " + }, + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "type": "text", + "content": " points. The ensuing formula succinctly elucidates the essence of this attention mechanism:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 88, + 220, + 287, + 233 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 220, + 287, + 233 + ], + "spans": [ + { + "bbox": [ + 88, + 220, + 287, + 233 + ], + "type": "interline_equation", + "content": "F _ {\\text {l o c a l}} = \\operatorname {E x t} (x) = \\left(F _ {t 1}, F _ {t 2}, \\dots , F _ {t k}\\right) \\tag {11}", + "image_path": "daa79adf011e4bb73c6591728cc5af1a9a8137acf1c4609b374447ff6301e297.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 238, + 286, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 238, + 286, + 251 + ], + "spans": [ + { + "bbox": [ + 58, + 238, + 286, + 251 + ], + "type": "interline_equation", + "content": "A = \\operatorname {S o f t M a x} \\left(\\operatorname {M L P} \\left(F _ {\\text {l o c a l}}\\right)\\right) = \\left(a _ {t 1}, a _ {t 2}, \\dots , a _ {t k}\\right) \\tag {12}", + "image_path": "42338b5b99e7367aed7969f77bf8376135bb364f3c1f7b0a4fc017ad1085ca14.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 253, + 286, + 266 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 253, + 286, + 266 + ], + "spans": [ + { + "bbox": [ + 52, + 253, + 286, + 266 + ], + "type": "interline_equation", + "content": "F _ {\\text {a g g r e}} = A \\cdot F _ {\\text {l o c a l}} = F _ {t 1} \\cdot a _ {t 1} + F _ {t 2} \\cdot a _ {t 2} + \\dots + F _ {t k} \\cdot a _ {t k} \\tag {13}", + "image_path": "620c99b250b3199715cba9a2493711b4d7cd51825001929336a8e482b6d0e32a.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "spans": [ + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "text", + "content": "Upon the application of the local feature extractor, the ensuing features are denoted as " + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{local}}" + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "inline_equation", + "content": "F_{tk}" + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "text", + "content": " mean the extracted feature of " + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "inline_equation", + "content": "k_{th}" + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "text", + "content": " point in a group. The attention mechanism comprises an MLP layer with an input layer dimension of " + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "text", + "content": " and an output " + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "inline_equation", + "content": "a_{tk}" + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "text", + "content": " dimension of 1, along with softmax layers. Subsequently, the attention mechanism computes attention values, represented as " + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "text", + "content": ". These attention values are then multiplied with the original features through batch matrix multiplication, resulting in the aggregated feature " + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{aggre}}" + }, + { + "bbox": [ + 46, + 268, + 287, + 388 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 392, + 127, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 392, + 127, + 403 + ], + "spans": [ + { + "bbox": [ + 47, + 392, + 127, + 403 + ], + "type": "text", + "content": "3.3. A-Bi-LSTM" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 411, + 289, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 411, + 289, + 662 + ], + "spans": [ + { + "bbox": [ + 46, + 411, + 289, + 662 + ], + "type": "text", + "content": "The temporal features extracted through the hierarchical structure are independent and parallel, lacking recurrent mechanisms within the network. This distinctive attribute, referred to as 'implicit', contrasts with the conventional treatment of temporal information as an indexed process. Consequently, implicit temporal features inadequately capture the interrelations among events along the timeline, whereas explicit temporal features assume a pivotal role in facilitating the CPR task. To explicitly capture temporal patterns, we introduce the LSTM network, which has been proven effective in learning temporal dependencies. For optimal network performance, controlled feature dimensionality, and comprehensive capture of bidirectional relationships in pose context, we adopt a bidirectional LSTM network with a lightweight design. The regressor attentively focuses on the output of Bi-LSTM at each timestep and is more inclined towards the start and end features as demonstrated in Fig. 6. The integration of bidirectional connections into the recurrent neural network (RNN) is succinctly presented through the following equation:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 85, + 667, + 287, + 680 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 667, + 287, + 680 + ], + "spans": [ + { + "bbox": [ + 85, + 667, + 287, + 680 + ], + "type": "interline_equation", + "content": "\\mathbf {h} _ {t} = f \\left(\\mathbf {W} _ {h} \\cdot \\mathbf {x} _ {t} + \\mathbf {U} _ {h} \\cdot \\mathbf {h} _ {t - 1} + \\mathbf {b} _ {h}\\right) \\tag {14}", + "image_path": "b46414ba1abace9e4413d3daf30bbce89b364a3b25df67eed00daba5b453cb65.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 87, + 682, + 286, + 696 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 682, + 286, + 696 + ], + "spans": [ + { + "bbox": [ + 87, + 682, + 286, + 696 + ], + "type": "interline_equation", + "content": "\\mathbf {h} _ {t} ^ {\\prime} = f \\left(\\mathbf {W} _ {h} ^ {\\prime} \\cdot \\mathbf {x} _ {t} + \\mathbf {U} _ {h} ^ {\\prime} \\cdot \\mathbf {h} _ {t + 1} ^ {\\prime} + \\mathbf {b} _ {h} ^ {\\prime}\\right) \\tag {15}", + "image_path": "d1ac7c9a247ee74c18467eab1732a3578b3b55965cf3d348b2823fac47bb5706.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 87, + 697, + 286, + 711 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 697, + 286, + 711 + ], + "spans": [ + { + "bbox": [ + 87, + 697, + 286, + 711 + ], + "type": "interline_equation", + "content": "\\mathbf {y} _ {t} = \\mathbf {V} \\cdot \\mathbf {h} _ {t} + \\mathbf {b} _ {y} \\quad \\mathbf {y} _ {t} ^ {\\prime} = \\mathbf {V} ^ {\\prime} \\cdot \\mathbf {h} _ {t} ^ {\\prime} + \\mathbf {b} _ {y} ^ {\\prime} \\tag {16}", + "image_path": "d570a79da98acdfea5294dcb22cbc07ce66a53390d6ef7ce97a68b57bfa135e1.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": " represents the feature vector at the " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": "-th time step of the input sequence, while " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_{t-1}" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_{t+1}'" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": " correspond to the hidden states of the forward and backward RNN units, respectively, from the previous time step. The matrices " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_h" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{U}_h" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{b}_h" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": " denote the weight matrix and bias vector of the forward RNN unit, while " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{b}_y" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": " represent the weight matrix and bias vector of its output layer. Similarly, " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_h'" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{U}_h'" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{b}_h'" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": " are associated with the weight matrix and bias vector of the backward RNN unit, and " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{V}'" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{b}_y'" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": " pertain to the weight matrix and bias vector of its output layer. The activation function, denoted as " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "f(\\cdot)" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": ", can be chosen as sigmoid or tanh or other functions. The final output " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "Y_a" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": " is aggregated at each moment using the attention mechanism, and " + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 304, + 72, + 547, + 240 + ], + "type": "text", + "content": " means concat operation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 410, + 251, + 545, + 264 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 251, + 545, + 264 + ], + "spans": [ + { + "bbox": [ + 410, + 251, + 545, + 264 + ], + "type": "interline_equation", + "content": "Y _ {t} = y _ {t} \\oplus y _ {t} ^ {\\prime} \\tag {17}", + "image_path": "ff0530efc43379a72d2f119010669dfb667c8f5eae1dfa6955238c7cac38fd49.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 375, + 266, + 545, + 278 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 266, + 545, + 278 + ], + "spans": [ + { + "bbox": [ + 375, + 266, + 545, + 278 + ], + "type": "interline_equation", + "content": "A = \\operatorname {S o f t M a x} \\left(\\operatorname {M L P} \\left(Y _ {t}\\right)\\right) \\tag {18}", + "image_path": "b8bbec8a5c29b3504f64d11a46a0ac8e560c6f96de777d45f1786fdf21527bb1.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 409, + 281, + 545, + 293 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 409, + 281, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 409, + 281, + 545, + 293 + ], + "type": "interline_equation", + "content": "Y _ {a} = A \\cdot Y _ {t} \\tag {19}", + "image_path": "680862586eb11785f0124e5db729e4693f5449693273454b242b6de687560c0a.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 306, + 394, + 317 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 306, + 394, + 317 + ], + "spans": [ + { + "bbox": [ + 306, + 306, + 394, + 317 + ], + "type": "text", + "content": "3.4. Loss Function" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 325, + 546, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 325, + 546, + 397 + ], + "spans": [ + { + "bbox": [ + 304, + 325, + 546, + 397 + ], + "type": "text", + "content": "A fully connected layer with a hidden layer is employed to address the final 6-DOFs pose regression task. The displacement vector of the regression is denoted as " + }, + { + "bbox": [ + 304, + 325, + 546, + 397 + ], + "type": "inline_equation", + "content": "\\hat{p}" + }, + { + "bbox": [ + 304, + 325, + 546, + 397 + ], + "type": "text", + "content": " representing the magnitude and direction of movement, while the rotational Euler angles are denoted as " + }, + { + "bbox": [ + 304, + 325, + 546, + 397 + ], + "type": "inline_equation", + "content": "\\hat{q}" + }, + { + "bbox": [ + 304, + 325, + 546, + 397 + ], + "type": "text", + "content": " indicating the rotational orientation in three-dimensional space." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 322, + 408, + 545, + 423 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 408, + 545, + 423 + ], + "spans": [ + { + "bbox": [ + 322, + 408, + 545, + 423 + ], + "type": "interline_equation", + "content": "\\operatorname {L o s s} = \\alpha | | \\hat {p} - p | | _ {2} + \\beta | | \\hat {q} - q | | _ {2} + \\lambda \\sum_ {i = 0} ^ {n} w _ {i} ^ {2} \\tag {20}", + "image_path": "3e30f71eec5145115ebdc53fa63e9ecc6fd72605325c652deedc636aa295b990.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "spans": [ + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "text", + "content": " represent the ground truth obtained from the dataset, while " + }, + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "text", + "content": " serve as weight proportion coefficients. In order to tackle the prominent concern of overfitting, especially in the end-to-end setting, we incorporate the L2 regularization into the loss function. This regularization, implemented as the second paradigm for the network weights " + }, + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 433, + 547, + 517 + ], + "type": "text", + "content": ", effectively mitigates overfitting." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 527, + 425, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 527, + 425, + 539 + ], + "spans": [ + { + "bbox": [ + 306, + 527, + 425, + 539 + ], + "type": "text", + "content": "3.5. Overall Architecture" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 547, + 545, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 547, + 545, + 583 + ], + "spans": [ + { + "bbox": [ + 304, + 547, + 545, + 583 + ], + "type": "text", + "content": "Next, we will present the PEPNet pipeline in pseudo-code, utilizing the previously defined variables and formulas as described in Algorithm 1." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 597, + 382, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 597, + 382, + 609 + ], + "spans": [ + { + "bbox": [ + 306, + 597, + 382, + 609 + ], + "type": "text", + "content": "4. Experiment" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "content": "In this section, we present an extensive and in-depth analysis of PEPNet's performance on both indoor and outdoor datasets, encompassing evaluations based on rotational and translational mean squared error (MSE), model parameters, floating-point operations (FLOPs), and inference time. PEPNet's training and testing are performed on a server furnished with an AMD Ryzen 7950X CPU, an RTX GeForce 4090 GPU, and 32GB of memory." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "18116" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 46, + 73, + 553, + 203 + ], + "blocks": [ + { + "bbox": [ + 46, + 73, + 553, + 203 + ], + "lines": [ + { + "bbox": [ + 46, + 73, + 553, + 203 + ], + "spans": [ + { + "bbox": [ + 46, + 73, + 553, + 203 + ], + "type": "image", + "image_path": "87cc3789c1bcb4f5e467c8fd27e6367cb42f56cced06a19032593cae04147682.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 205, + 212, + 387, + 224 + ], + "lines": [ + { + "bbox": [ + 205, + 212, + 387, + 224 + ], + "spans": [ + { + "bbox": [ + 205, + 212, + 387, + 224 + ], + "type": "text", + "content": "Figure 4. Event-based CPR Dataset visualization." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "code", + "bbox": [ + 47, + 258, + 298, + 573 + ], + "blocks": [ + { + "bbox": [ + 47, + 244, + 171, + 257 + ], + "lines": [ + { + "bbox": [ + 47, + 244, + 171, + 257 + ], + "spans": [ + { + "bbox": [ + 47, + 244, + 171, + 257 + ], + "type": "text", + "content": "Algorithm 1 PEPNet pipeline" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "lines": [ + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "spans": [ + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": "Input: Raw event stream " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \nParameters: " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "N_{p} = 1024,R = 1e + 3,S_{\\mathrm{num}} = 3,K = 24" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \nOutput: 6-DOFs pose " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "(\\hat{p},\\hat{q})" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n1: Preprocessing \n2: for " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " in len(ε) do \n3: " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "P_{i}" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " .append " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "(e_{j\\rightarrow l})" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " .. " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "j = l" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " ; where " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "t_l - t_j = R" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n4: if " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "(len(P_i) > N_p)" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " .. " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "i = i + 1" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n5: end for \n6: " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "PN =" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " Normalize(Sampling " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "(P))" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n7: Hierarchy structure \n8: for stage in range " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "(S_{\\mathrm{num}})" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " do \n9: Grouping and Sampling " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "(PN)" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n10: Get " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "PGS\\in [B,N_{\\mathrm{stage}},K,2*D_{\\mathrm{stage}-1}]" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n11: Local Extractor " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "(PGS)" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n12: Get " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{local}}\\in [B,N_{\\mathrm{stage}},K,D_{\\mathrm{stage}}]" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n13: Attentive Aggregate " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "(F_{\\mathrm{local}})" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n14: Get " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{aggre}}\\in [B,N_{\\mathrm{stage}},D_{\\mathrm{stage}}]" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n15: Global Extractor " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "(F_{\\mathrm{aggre}})" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n16: Get " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "PN = F_{\\mathrm{global}}\\in [B,N_{\\mathrm{stage}},D_{\\mathrm{stage}}]" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n17: end for \n18: A-Bi-LSTM \n19: Forward Get " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "y_{t}\\in [B,N_{3},DS_{\\mathrm{num}} / 2]" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n20: Reverse Get " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "y_t^\\prime \\in [B,N_3,D_{S_{\\mathrm{num}}} / 2]" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n21: Attention Get " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "Y_{a}\\in [B,D_{S_{\\mathrm{num}}}]" + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "text", + "content": " \n22: Regressor \n23: Get 6-DOFs pose " + }, + { + "bbox": [ + 47, + 258, + 298, + 573 + ], + "type": "inline_equation", + "content": "(\\hat{p},\\hat{q})" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "algorithm" + }, + { + "bbox": [ + 47, + 597, + 105, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 597, + 105, + 609 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 105, + 609 + ], + "type": "text", + "content": "4.1. Dataset" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 617, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 289, + 715 + ], + "type": "text", + "content": "We employ the widely evaluated event-based CPR dataset IJRR [23] and M3ED [4], encompassing both indoor and outdoor scenes. Two distinct methods to partition the CPR dataset [26] have been benchmarked: random split and novel split. In the random split approach, the dataset is randomly selected " + }, + { + "bbox": [ + 46, + 617, + 289, + 715 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 46, + 617, + 289, + 715 + ], + "type": "text", + "content": " of all sequences for training and allocated the remaining sequences for testing. On the other hand, in the novel split, we divide the data chronologically," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 244, + 545, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 244, + 545, + 269 + ], + "spans": [ + { + "bbox": [ + 305, + 244, + 545, + 269 + ], + "type": "text", + "content": "using the initial " + }, + { + "bbox": [ + 305, + 244, + 545, + 269 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 305, + 244, + 545, + 269 + ], + "type": "text", + "content": " of sequences for training and the subsequent " + }, + { + "bbox": [ + 305, + 244, + 545, + 269 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 305, + 244, + 545, + 269 + ], + "type": "text", + "content": " for testing." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 277, + 368, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 277, + 368, + 289 + ], + "spans": [ + { + "bbox": [ + 306, + 277, + 368, + 289 + ], + "type": "text", + "content": "4.2. Baseline" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 296, + 546, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 296, + 546, + 380 + ], + "spans": [ + { + "bbox": [ + 304, + 296, + 546, + 380 + ], + "type": "text", + "content": "We perform a thorough evaluation of our proposed method by comparing it with SOTA event-based approaches, namely CNN-LSTM [37] and AECRN [18]. Moreover, we present results derived from other well-established computer vision methods, including PoseNet[14], Bayesian PoseNet [13], Pairwise-CNN [15], LSTM-Pose [39], and SP-LSTM[26]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 388, + 430, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 388, + 430, + 399 + ], + "spans": [ + { + "bbox": [ + 306, + 388, + 430, + 399 + ], + "type": "text", + "content": "4.3. IJRR Dataset Results" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 407, + 432, + 419 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 407, + 432, + 419 + ], + "spans": [ + { + "bbox": [ + 306, + 407, + 432, + 419 + ], + "type": "text", + "content": "4.3.1 Random Split Results" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 426, + 545, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 426, + 545, + 629 + ], + "spans": [ + { + "bbox": [ + 304, + 426, + 545, + 629 + ], + "type": "text", + "content": "Based on the findings presented in Tab. 1, it is apparent that PEPNet surpasses other models concerning both rotation and translation errors across all sequences. Notably, PEPNet achieves these impressive results despite utilizing significantly fewer model parameters and FLOPs compared to the frame-based approach. Moreover, PEPNet not only exhibits a remarkable " + }, + { + "bbox": [ + 304, + 426, + 545, + 629 + ], + "type": "inline_equation", + "content": "38\\%" + }, + { + "bbox": [ + 304, + 426, + 545, + 629 + ], + "type": "text", + "content": " improvement in the average error compared to the SOTA CNN-LSTM method but also attains superior results across nearly all sequences. In addressing the more intricate and challenging hdr_poster sequences, while the frame-based approach relies on a denoising network to yield improved results [12], PEPNet excels by achieving remarkable performance without any additional processing. This observation strongly implies that PEPNet's Point Cloud approach exhibits greater robustness compared to the frame-based method, highlighting its inherent superiority in handling complex scenarios." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 630, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 714 + ], + "type": "text", + "content": "Furthermore, we introduce an alternative variant, PEPNet" + }, + { + "bbox": [ + 304, + 630, + 545, + 714 + ], + "type": "inline_equation", + "content": "_{tinyy}" + }, + { + "bbox": [ + 304, + 630, + 545, + 714 + ], + "type": "text", + "content": ", which integrates a lighter model architecture while preserving relatively strong performance. As depicted in Fig. 3, PEPNet consists of three stages, and the model's size is contingent upon the dimensionality of MLPs at each stage. The dimensions for the standard structure are [64, 128, 256], whereas those for the tiny structure are [16," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18117" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 69, + 558, + 180 + ], + "blocks": [ + { + "bbox": [ + 50, + 69, + 558, + 180 + ], + "lines": [ + { + "bbox": [ + 50, + 69, + 558, + 180 + ], + "spans": [ + { + "bbox": [ + 50, + 69, + 558, + 180 + ], + "type": "table", + "html": "
NetworkPoseNetBayesian PoseNetPairwise-CNNLSTM-PoseSP-LSTMCNN-LSTMPEPNetPEPNettiny
Parameter12.43M22.35M22.34M16.05M135.25M12.63M0.774M0.064M
FLOPs1.584G3.679G7.359G1.822G15.623G1.998G0.459G0.033G
shapesRotation0.109m,7.388°0.142m,9.557°0.095m,6.332°0.032m,4.439°0.025m,2.256°0.012m,1.652°0.005m,1.372°0.006m,1.592°
boxTranslation0.193m,6.977°0.190m,6.636°0.178m,6.153°0.083m,6.215°0.036m,2.195°0.013m,0.873°0.017m,0.845°0.031m,1.516°
shapesTranslation0.238m,6.001°0.264m,6.235°0.201m,5.146°0.056m,5.018°0.035m,2.117°0.020m,1.471°0.011m,0.582°0.013m,0.769°
dynamic_6dof0.297m,9.332°0.296m,8.963°0.245m,5.962°0.097m,6.732°0.031m,2.047°0.016m,1.662°0.015m,1.045°0.018m,1.144°
hdr_poster0.282m,8.513°0.290m,8.710°0.232m,7.234°0.108m,6.186°0.051m,3.354°0.033m,2.421°0.016m,0.991°0.028m,1.863°
posterTranslation0.266m,6.516°0.264m,5.459°0.211m,6.439°0.079m,5.734°0.036m,2.074°0.020m,1.468°0.012m,0.588°0.019m,0.953°
Average0.231m,7.455°0.241m,7.593°0.194m,6.211°0.076m,5.721°0.036m,2.341°0.019m,1.591°0.013m,0.904°0.019m,1.306°
", + "image_path": "bc56e0911fd7a879a1a5cb8c7aa1d63886680b3997df7462167acd85f96cd008.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 188, + 547, + 222 + ], + "lines": [ + { + "bbox": [ + 46, + 188, + 547, + 222 + ], + "spans": [ + { + "bbox": [ + 46, + 188, + 547, + 222 + ], + "type": "text", + "content": "Table 1. IJRR random split results. The table presents the median error for each sequence, as well as the average error across the six sequences. It also presents the number of parameters and FLOPs for each model. Bold indicates the most advanced result, while underline signifies the second-best result." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 71, + 240, + 264, + 329 + ], + "blocks": [ + { + "bbox": [ + 71, + 240, + 264, + 329 + ], + "lines": [ + { + "bbox": [ + 71, + 240, + 264, + 329 + ], + "spans": [ + { + "bbox": [ + 71, + 240, + 264, + 329 + ], + "type": "image", + "image_path": "592b7ac577719929ef85ee94af2b04b4fe037688f1d81538cd3ab38249865a88.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 337, + 288, + 370 + ], + "lines": [ + { + "bbox": [ + 46, + 337, + 288, + 370 + ], + "spans": [ + { + "bbox": [ + 46, + 337, + 288, + 370 + ], + "type": "text", + "content": "Figure 5. Error distribution of event-based CPR results achieved by PEPNet using a random split. (a) Translation errors. (b) Rotation errors." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 392, + 287, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 392, + 287, + 452 + ], + "spans": [ + { + "bbox": [ + 46, + 392, + 287, + 452 + ], + "type": "text", + "content": "32, 64]. As indicated in Tab. 1, even with a mere " + }, + { + "bbox": [ + 46, + 392, + 287, + 452 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 46, + 392, + 287, + 452 + ], + "type": "text", + "content": " of the CNN-LSTM's parameter, " + }, + { + "bbox": [ + 46, + 392, + 287, + 452 + ], + "type": "inline_equation", + "content": "\\mathrm{PEPNet}_{tiny}" + }, + { + "bbox": [ + 46, + 392, + 287, + 452 + ], + "type": "text", + "content": " achieves comparable and even slightly superior results. This remarkable outcome emphasizes the superiority of leveraging event cloud data processing directly." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 468, + 160, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 468, + 160, + 479 + ], + "spans": [ + { + "bbox": [ + 47, + 468, + 160, + 479 + ], + "type": "text", + "content": "4.3.2 Error Distribution" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 487, + 287, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 487, + 287, + 606 + ], + "spans": [ + { + "bbox": [ + 46, + 487, + 287, + 606 + ], + "type": "text", + "content": "Fig. 5 illustrates the error distribution of PEPNet across six distinct sequences using the random split method, specifically: shape rotation, box translation, shape translation, dynamic 6-dof, hdr poster, and poster translation. To enhance clarity, the top and bottom boundaries of the box represent the first and third quartiles, respectively, indicating the inter-quartile range (IQR). The median is denoted by the band within the box. It is observed that the IQR of the translation error approximately locates between " + }, + { + "bbox": [ + 46, + 487, + 287, + 606 + ], + "type": "inline_equation", + "content": "0.004\\mathrm{m}" + }, + { + "bbox": [ + 46, + 487, + 287, + 606 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 487, + 287, + 606 + ], + "type": "inline_equation", + "content": "0.024\\mathrm{m}" + }, + { + "bbox": [ + 46, + 487, + 287, + 606 + ], + "type": "text", + "content": ", while the orientation error ranges from " + }, + { + "bbox": [ + 46, + 487, + 287, + 606 + ], + "type": "inline_equation", + "content": "0.4^{\\circ}" + }, + { + "bbox": [ + 46, + 487, + 287, + 606 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 487, + 287, + 606 + ], + "type": "inline_equation", + "content": "1.9^{\\circ}" + }, + { + "bbox": [ + 46, + 487, + 287, + 606 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 623, + 161, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 623, + 161, + 635 + ], + "spans": [ + { + "bbox": [ + 47, + 623, + 161, + 635 + ], + "type": "text", + "content": "4.3.3 Novel Split Results" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 642, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 713 + ], + "type": "text", + "content": "To assess the model's robustness, we adopt the novel split as an evaluation criterion, as shown in Tab. 2. During the training process, we observe a more pronounced overfitting phenomenon in PEPNet compared to the random split. We attribute this observation to the disparities in data distributions between the trainset and the testset, as well as the lim" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 243, + 545, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 243, + 545, + 303 + ], + "spans": [ + { + "bbox": [ + 304, + 243, + 545, + 303 + ], + "type": "text", + "content": "ited data size. Contrary to the methods we compared, PEP-Net does not necessitate pre-trained weights. For instance, SP-LSTM relies on pre-trained VGG19 weights from ImageNet, while AECRN requires synthetic heuristic depth and an extensive pretraining process." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 303, + 546, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 303, + 546, + 506 + ], + "spans": [ + { + "bbox": [ + 304, + 303, + 546, + 506 + ], + "type": "text", + "content": "To address overfitting, PEPNet employs conventional methods that yield consistent and comparable results with the SOTA on three shape sequences that are displayed in the network column of Tab. 2. It is essential to note that AE-CRN adopts a hybrid approach, combining neural network regression for scene coordinates with derivable RANSAC for pose estimation. Moreover, this method incurs significant time consumption, with even the SOTA DSAC* algorithm taking nearly 30ms, excluding additional time for data format conversion. This time constraint presents compatibility challenges with the low-latency nature of event cameras. In contrast, PEPNet can execute on a server in just 6.7ms, with the main time-consuming module being grouping and sampling. Furthermore, with potential field programmable gate array (FPGA) or application-specific integrated chip (ASIC) support for these operations[6, 20], PEPNet's performance can be further accelerated." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 514, + 435, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 514, + 435, + 525 + ], + "spans": [ + { + "bbox": [ + 305, + 514, + 435, + 525 + ], + "type": "text", + "content": "4.4. M3ED Dataset Results" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 532, + 545, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 532, + 545, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 532, + 545, + 639 + ], + "type": "text", + "content": "We selected three robots (Car, Falcon, and Spot) to extend the application scope of PEPNet across five sequences in an outdoor night setting, as illustrated in the Tab. 3. Due to its much higher resolution than IJRR, we performed downsampling processing and more number of points (1024 to 2048), and other experimental configurations are consistent with the IJRR dataset with random split. The results demonstrate the superior performance of PEPNet even in more challenging outdoor environments." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 647, + 437, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 647, + 437, + 658 + ], + "spans": [ + { + "bbox": [ + 306, + 647, + 437, + 658 + ], + "type": "text", + "content": "4.5. Attention Visualization" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 666, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 666, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 666, + 545, + 712 + ], + "type": "text", + "content": "As shown in Fig. 6, We observe that the attention scores exhibit larger at both the beginning and end. We tentatively infer that the model focuses more on the difference in features between the start and the end for CPR, which is also" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18118" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 61, + 70, + 534, + 135 + ], + "blocks": [ + { + "bbox": [ + 61, + 70, + 534, + 135 + ], + "lines": [ + { + "bbox": [ + 61, + 70, + 534, + 135 + ], + "spans": [ + { + "bbox": [ + 61, + 70, + 534, + 135 + ], + "type": "table", + "html": "
NetworkPoseNetBayesian PoseNetPairwise-CNNLSTM-PoseSP-LSTMDSAC*AECRNPEPNet
shapes Rotation0.201m,12.499°0.164m,12.188°0.187m,10.426°0.061m,7.625°0.045m,5.017°0.029m,2.3°0.025m,2.0°0.016m,1.745°
shapes Translation0.198m,6.696°0.213m,7.441°0.225m,11.627°0.108m,8.468°0.072m,4.496°0.038m,2.2°0.029m,1.7°0.026m,1.659°
shapes_6dof0.320m,13.733°0.326m,13.296°0.314m,13.245°0.096m,8.973°0.078m,5.524°0.054m,3.1°0.052m,3.0°0.045m,2.984°
Average0.240m,11.067°0.234m,10.975°0.242m,11.766°0.088m,8.355°0.065m,5.012°0.040m,2.53°0.035m,2.23°0.029m,2.13°
Inference time5ms6ms12ms9.49ms4.79ms30ms30ms6.7ms
", + "image_path": "93188618a3a61773516235e79195bd9e5c3ac26dbd48ebafa0755c011866e445.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 183, + 297, + 256 + ], + "blocks": [ + { + "bbox": [ + 46, + 142, + 546, + 164 + ], + "lines": [ + { + "bbox": [ + 46, + 142, + 546, + 164 + ], + "spans": [ + { + "bbox": [ + 46, + 142, + 546, + 164 + ], + "type": "text", + "content": "Table 2. IJRR novel split results. Referred to as Tab. 1, showcases identical information. To assess the model's runtime, we conduct tests on a server platform, specifically focusing on the average time required for inference on a single sample." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 183, + 297, + 256 + ], + "lines": [ + { + "bbox": [ + 50, + 183, + 297, + 256 + ], + "spans": [ + { + "bbox": [ + 50, + 183, + 297, + 256 + ], + "type": "table", + "html": "
M3EDPoseNetLSTM-PoseCNN-LSTMPEPNet
INPUTEvent FrameEvent FrameEvent framePoint Cloud
Falcon_Night_High_Beans0.181m,2.221°0.112m,0.946°0.107m,1.435°0.082m,0.575°
Car_Night_Pen_S_Loop1.618m,8.126°0.667m,4.914°0.773m,3.005°0.577m,1.319°
Spot_Night_Pen_Loop1.735m,5.502°0.761m,7.898°0.401m,1.771°0.468m,1.062°
Car_Pen_S_Loop_darker1.841m,4.575°0.751m,3.738°0.598m,2.772°0.385m,1.01°
Spot_Plaza_Light1.372m,9.564°0.565m,5.221°0.273m,2.001°0.348m,1.234°
Avergae1.349m,5.998°0.571m,4.543°0.43m,2.197°0.372m,1.04°
", + "image_path": "3863ad134b66fc40ed82dc9e903642b5b6c26ebd236dbca005001a12bece884b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 50, + 285, + 287, + 354 + ], + "blocks": [ + { + "bbox": [ + 50, + 264, + 284, + 275 + ], + "lines": [ + { + "bbox": [ + 50, + 264, + 284, + 275 + ], + "spans": [ + { + "bbox": [ + 50, + 264, + 284, + 275 + ], + "type": "text", + "content": "Table 3. Outdoor extension on M3ED dataset with random split." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 285, + 287, + 354 + ], + "lines": [ + { + "bbox": [ + 50, + 285, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 50, + 285, + 287, + 354 + ], + "type": "table", + "html": "
ConditionHSLSTMBi-LSTMAggregationTranslationRotationT+R
1Max0.015m0.884°3.04
2Temporal0.014m0.786°2.77
3Max0.014m0.833°2.85
4Temporal0.012m0.603°2.25
5Max0.014m0.813°2.82
6Temporal0.011m0.582°2.12
", + "image_path": "309405956b32f5b3a782423314bcccc6797ff1847efd7e8bc35240e496f57965.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 70, + 396, + 258, + 506 + ], + "blocks": [ + { + "bbox": [ + 70, + 396, + 258, + 506 + ], + "lines": [ + { + "bbox": [ + 70, + 396, + 258, + 506 + ], + "spans": [ + { + "bbox": [ + 70, + 396, + 258, + 506 + ], + "type": "image", + "image_path": "0d7401f1ccd5efb782c94f401a63faae94e0db35c05e1b0198b14a0f92312caa.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 514, + 287, + 548 + ], + "lines": [ + { + "bbox": [ + 46, + 514, + 287, + 548 + ], + "spans": [ + { + "bbox": [ + 46, + 514, + 287, + 548 + ], + "type": "text", + "content": "Figure 6. Visualization of the attention values in the time domain. 128 points in chronological order on the horizontal axis and the attention values of the corresponding point on the vertical axis." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 568, + 204, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 568, + 204, + 580 + ], + "spans": [ + { + "bbox": [ + 47, + 568, + 204, + 580 + ], + "type": "text", + "content": "seen in the geometry approach [7, 24]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 587, + 141, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 587, + 141, + 601 + ], + "spans": [ + { + "bbox": [ + 47, + 587, + 141, + 601 + ], + "type": "text", + "content": "4.6. Ablation Study" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 605, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 288, + 713 + ], + "type": "text", + "content": "Key Module Ablation: In order to validate the efficacy of key modules, we conducted an ablation experiment focusing on three primary components: hierarchy structure, Bi-LSTM, and temporal aggregation. These experiments are designed to evaluate rotation and translation errors on the shape translation sequence with the random split. The combined error " + }, + { + "bbox": [ + 46, + 605, + 288, + 713 + ], + "type": "inline_equation", + "content": "(\\mathrm{T} + \\mathrm{R})" + }, + { + "bbox": [ + 46, + 605, + 288, + 713 + ], + "type": "text", + "content": " is measured after processing. Our experimental setup comprises four distinct conditions, as illustrated in Tab. 4. Condition 1 represents the sole utility of the system." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 313, + 183, + 538, + 222 + ], + "blocks": [ + { + "bbox": [ + 46, + 362, + 287, + 385 + ], + "lines": [ + { + "bbox": [ + 46, + 362, + 287, + 385 + ], + "spans": [ + { + "bbox": [ + 46, + 362, + 287, + 385 + ], + "type": "text", + "content": "Table 4. Abalation Study for three key modules. " + }, + { + "bbox": [ + 46, + 362, + 287, + 385 + ], + "type": "inline_equation", + "content": "\\mathrm{T} + \\mathrm{R} =" + }, + { + "bbox": [ + 46, + 362, + 287, + 385 + ], + "type": "text", + "content": " Translation + Rotation " + }, + { + "bbox": [ + 46, + 362, + 287, + 385 + ], + "type": "inline_equation", + "content": "\\cdot \\pi /{180}\\left( {\\mathrm{\\;m} + \\mathrm{{rad}}}\\right)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 313, + 183, + 538, + 222 + ], + "lines": [ + { + "bbox": [ + 313, + 183, + 538, + 222 + ], + "spans": [ + { + "bbox": [ + 313, + 183, + 538, + 222 + ], + "type": "table", + "html": "
Scenceα = 0.5, β = 0.5α = 0.25, β = 0.75α = 0.75, β = 0.25
shape Translation0.0302m,1.684°,5.960.0359m,1.72°,6.590.0303m,2.056°,6.62
shapeRotation0.0143m,2.888°,6.470.0159m,2.68°,6.270.014m,3.36°,7.26
dynamic_6 dof0.0542m,2.799°,10.30.0611m,2.488°,10.50.0516m,3.251°,10.8
", + "image_path": "8725570befbce6faf8c855012b241a89b3d3b57683239e015bb3604fdf9141e8.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 324, + 229, + 526, + 240 + ], + "lines": [ + { + "bbox": [ + 324, + 229, + 526, + 240 + ], + "spans": [ + { + "bbox": [ + 324, + 229, + 526, + 240 + ], + "type": "text", + "content": "Table 5. Abalation Study for loss function's coefficient." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 263, + 545, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 263, + 545, + 395 + ], + "spans": [ + { + "bbox": [ + 304, + 263, + 545, + 395 + ], + "type": "text", + "content": "ization of the hierarchy structure (HS), while Condition 2 combines the ordinary LSTM. Condition 3 incorporates the bidirectional LSTM, and Condition 4 integrates the attention mechanism for feature aggregation. The ablation experiments reveal significant insights. Experiments 1 and 3 demonstrate that augmenting LSTM enhances the extraction of explicit temporal features. Moreover, experiments 3 and 5 reveal the effectiveness of the bidirectional LSTM in extracting motion information. Additionally, experiments 5 and 6 confirm the notable impact of attention in feature aggregation, resulting in a substantial reduction in error rates." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 396, + 546, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 396, + 546, + 502 + ], + "spans": [ + { + "bbox": [ + 304, + 396, + 546, + 502 + ], + "type": "text", + "content": "Loss ablation: We incorporated the experiment involving scaling coefficients of the loss function in Tab. 5. This experiment utilized a tiny version of PEPNet, trained for 100 epochs, and the outcome is MSE in translation, rotation, and " + }, + { + "bbox": [ + 304, + 396, + 546, + 502 + ], + "type": "inline_equation", + "content": "\\mathrm{T} + \\mathrm{R}" + }, + { + "bbox": [ + 304, + 396, + 546, + 502 + ], + "type": "text", + "content": ". Across three distinct motion scenarios (translation, rotation, and 6dof) varied coefficient ratios induced deviations in the obtained results. For example, in shape rotation, increasing the weight on rotation makes the results better." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 517, + 379, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 517, + 379, + 530 + ], + "spans": [ + { + "bbox": [ + 306, + 517, + 379, + 530 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 539, + 545, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 539, + 545, + 646 + ], + "spans": [ + { + "bbox": [ + 304, + 539, + 545, + 646 + ], + "type": "text", + "content": "In this paper, we introduce an end-to-end CPR network that operates directly on raw event clouds without frame-based preprocessing. PEPNet boasts an impressively lightweight framework that adeptly extracts spatial and temporal features, leading to SOTA performance. Diverging from frame-based approaches, our method prioritizes preserving the inherent distribution of the event cloud, capitalizing on its sparse nature to achieve extraordinary capabilities for ultra-low-power applications." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 647, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 647, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 647, + 545, + 713 + ], + "type": "text", + "content": "Acknowledgment. This work was supported in part by the Young Scientists Fund of the National Natural Science Foundation of China (Grant 62305278), as well as the Hong Kong University of Science and Technology (Guangzhou) Joint Funding Program under Grant 2023A03J0154 and 2024A03J0618." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "18119" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 96, + 106, + 108 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 106, + 108 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 106, + 108 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 117, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 117, + 287, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 117, + 287, + 161 + ], + "spans": [ + { + "bbox": [ + 53, + 117, + 287, + 161 + ], + "type": "text", + "content": "[1] Vassileios Balntas, Shuda Li, and Victor Prisacariu. Relocnet: Continuous metric learning relocalisation using neural nets. In Proceedings of the European Conference on Computer Vision (ECCV), pages 751-767, 2018. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 163, + 288, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 163, + 288, + 205 + ], + "spans": [ + { + "bbox": [ + 53, + 163, + 288, + 205 + ], + "type": "text", + "content": "[2] Eric Brachmann and Carsten Rother. Visual camera relocalization from rgb and rgb-d images using dsac. IEEE transactions on pattern analysis and machine intelligence, 44(9):5847-5865, 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 208, + 287, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 208, + 287, + 262 + ], + "spans": [ + { + "bbox": [ + 53, + 208, + 287, + 262 + ], + "type": "text", + "content": "[3] Eric Brachmann, Alexander Krull, Sebastian Nowozin, Jamie Shotton, Frank Michel, Stefan Gumhold, and Carsten Rother. Dsac-differentiable ransac for camera localization. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6684-6692, 2017. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 264, + 287, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 264, + 287, + 330 + ], + "spans": [ + { + "bbox": [ + 53, + 264, + 287, + 330 + ], + "type": "text", + "content": "[4] Kenneth Chaney, Fernando Cladora, Ziyun Wang, Anthony Bisulco, M Ani Hsieh, Christopher Korpela, Vijay Kumar, Camillo J Taylor, and Kostas Daniilidis. M3ed: Multi-robot, multi-sensor, multi-environment event dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4015–4022, 2023. 2, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 331, + 287, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 331, + 287, + 397 + ], + "spans": [ + { + "bbox": [ + 53, + 331, + 287, + 397 + ], + "type": "text", + "content": "[5] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 399, + 287, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 399, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 53, + 399, + 287, + 464 + ], + "type": "text", + "content": "[6] Haotian Fu, Yulong Huang, Tingran Chen, Chenyi Fu, Hongwei Ren, Yue Zhou, Shouzhong Peng, Zhirui Zong, Biao Pan, and Bojun Cheng. Ds-cim: A 40nm asynchronous dual-spike driven, mram compute-in-memory macro for spiking neural network. IEEE Transactions on Circuits and Systems I: Regular Papers, 2024. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 466, + 287, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 466, + 287, + 509 + ], + "spans": [ + { + "bbox": [ + 53, + 466, + 287, + 509 + ], + "type": "text", + "content": "[7] Guillermo Gallego, Christian Forster, Elias Mueggler, and Davide Scaramuzza. Event-based camera pose tracking using a generative event model. arXiv preprint arXiv:1510.01972, 2015. 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 511, + 287, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 511, + 287, + 565 + ], + "spans": [ + { + "bbox": [ + 53, + 511, + 287, + 565 + ], + "type": "text", + "content": "[8] Guillermo Gallego, Jon EA Lund, Elias Mueggler, Henri Rebecq, Tobi Delbruck, and Davide Scaramuzza. Event-based, 6-dof camera tracking from photometric depth maps. IEEE transactions on pattern analysis and machine intelligence, 40(10):2402-2412, 2017. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 567, + 287, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 567, + 287, + 622 + ], + "spans": [ + { + "bbox": [ + 53, + 567, + 287, + 622 + ], + "type": "text", + "content": "[9] Guillermo Gallego, Tobi Delbrück, Garrick Orchard, Chiara Bartolozzi, Brian Taba, Andrea Censi, Stefan Leutenegger, Andrew J Davison, Jörg Conradt, Kostas Daniilidis, et al. Event-based vision: A survey. IEEE transactions on pattern analysis and machine intelligence, 44(1):154-180, 2020. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 624, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 667 + ], + "type": "text", + "content": "[10] Yulan Guo, Hanyun Wang, Qingyong Hu, Hao Liu, Li Liu, and Mohammed Bennamoun. Deep learning for 3d point clouds: A survey. IEEE transactions on pattern analysis and machine intelligence, 43(12):4338-4364, 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "text", + "content": "[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 2" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[12] Yifan Jin, Lei Yu, Guangqiang Li, and Shumin Fei. A 6-dofs event-based camera relocalization system by cnn-lstm and image denoising. Expert Systems with Applications, 170: 114535, 2021. 2, 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 118, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 161 + ], + "type": "text", + "content": "[13] Alex Kendall and Roberto Cipolla. Modelling uncertainty in deep learning for camera relocalization. In 2016 IEEE international conference on Robotics and Automation (ICRA), pages 4762-4769. IEEE, 2016. 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 162, + 545, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 162, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 308, + 162, + 545, + 215 + ], + "type": "text", + "content": "[14] Alex Kendall, Matthew Grimes, and Roberto Cipolla. Posenet: A convolutional network for real-time 6-dof camera relocalization. In Proceedings of the IEEE international conference on computer vision, pages 2938-2946, 2015. 2, 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 217, + 545, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 217, + 545, + 271 + ], + "spans": [ + { + "bbox": [ + 308, + 217, + 545, + 271 + ], + "type": "text", + "content": "[15] Zakaria Laskar, Iaroslav Melekhov, Surya Kalia, and Juho Kannala. Camera relocalization by computing pairwise relative poses using convolutional neural network. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 929-938, 2017. 2, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 273, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 273, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 308, + 273, + 545, + 293 + ], + "type": "text", + "content": "[16] Yann LeCun, Yoshua Bengio, and Geoffrey Hinton. Deep learning. nature, 521(7553):436-444, 2015. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 295, + 545, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 295, + 545, + 337 + ], + "spans": [ + { + "bbox": [ + 308, + 295, + 545, + 337 + ], + "type": "text", + "content": "[17] Patrick Lichtsteiner, Christoph Posch, and Tobi Delbruck. A " + }, + { + "bbox": [ + 308, + 295, + 545, + 337 + ], + "type": "inline_equation", + "content": "128 \\times 128" + }, + { + "bbox": [ + 308, + 295, + 545, + 337 + ], + "type": "text", + "content": " 120 db " + }, + { + "bbox": [ + 308, + 295, + 545, + 337 + ], + "type": "inline_equation", + "content": "15\\mu s" + }, + { + "bbox": [ + 308, + 295, + 545, + 337 + ], + "type": "text", + "content": " latency asynchronous temporal contrast vision sensor. IEEE journal of solid-state circuits, 43 (2):566-576, 2008. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 338, + 545, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 338, + 545, + 393 + ], + "spans": [ + { + "bbox": [ + 308, + 338, + 545, + 393 + ], + "type": "text", + "content": "[18] Hu Lin, Meng Li, Qianchen Xia, Yifeng Fei, Baocai Yin, and Xin Yang. 6-dof pose relocalization for event cameras with entropy frame and attention networks. In The 18th ACM SIGGRAPH International Conference on Virtual-Reality Continuum and its Applications in Industry, pages 1–8, 2022. 2, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 394, + 545, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 394, + 545, + 480 + ], + "spans": [ + { + "bbox": [ + 308, + 394, + 545, + 480 + ], + "type": "text", + "content": "[19] Yimin Lin, Zhaoxiang Liu, Jianfeng Huang, Chaopeng Wang, Guoguang Du, Jinqiang Bai, and Shiguo Lian. Deep global-relative networks for end-to-end 6-dof visual localization and odometry. In PRICAI 2019: Trends in Artificial Intelligence: 16th Pacific Rim International Conference on Artificial Intelligence, Cuvu, Yanuca Island, Fiji, August 26–30, 2019, Proceedings, Part II, pages 454–467. Springer, 2019. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 482, + 545, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 482, + 545, + 535 + ], + "spans": [ + { + "bbox": [ + 308, + 482, + 545, + 535 + ], + "type": "text", + "content": "[20] Haobo Liu, Zhengyang Qian, Wei Wu, Hongwei Ren, Zhiwei Liu, and Leibin Ni. Afpr-cim: An analog-domain floating-point rram-based compute-in-memory architecture with dynamic range adaptive fp-adc. arXiv preprint arXiv:2402.13798, 2024. 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 537, + 545, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 545, + 581 + ], + "type": "text", + "content": "[21] Xu Ma, Can Qin, Haoxuan You, Haoxi Ran, and Yun Fu. Rethinking network design and local geometry in point cloud: A simple residual mlp framework. In International Conference on Learning Representations, 2021. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 582, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 582, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 308, + 582, + 545, + 635 + ], + "type": "text", + "content": "[22] Anton Mitrokhin, Zhiyuan Hua, Cornelia Fermuller, and Yiannis Aloimonos. Learning visual motion segmentation using event surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14414-14423, 2020. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 636, + 545, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 636, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 308, + 636, + 545, + 690 + ], + "type": "text", + "content": "[23] Elias Mueggler, Henri Rebecq, Guillermo Gallego, Tobi Delbruck, and Davide Scaramuzza. The event-camera dataset and simulator: Event-based data for pose estimation, visual odometry, and slam. The International Journal of Robotics Research, 36(2):142-149, 2017. 1, 2, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 692, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 692, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 692, + 545, + 713 + ], + "type": "text", + "content": "[24] Elias Mueggler, Guillermo Gallego, Henri Rebecq, and Davide Scaramuzza. Continuous-time visual-inertial odometry" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "18120" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 67, + 73, + 286, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 286, + 95 + ], + "type": "text", + "content": "for event cameras. IEEE Transactions on Robotics, 34(6): 1425-1440, 2018. 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 150 + ], + "type": "text", + "content": "[25] Tayyab Naseer and Wolfram Burgard. Deep regression for monocular camera-based 6-dof global localization in outdoor environments. In 2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 1525-1530. IEEE, 2017. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 288, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 288, + 207 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 288, + 207 + ], + "type": "text", + "content": "[26] Anh Nguyen, Thanh-Toan Do, Darwin G Caldwell, and Nikos G Tsagarakis. Real-time 6dof pose relocalization for event cameras with stacked spatial LSTM networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 2, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 209, + 287, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 209, + 287, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 209, + 287, + 262 + ], + "type": "text", + "content": "[27] Christoph Posch, Daniel Matolin, and Rainer Wohlgenannt. A qvga 143 db dynamic range frame-free pwm image sensor with lossless pixel-level video compression and time-domain cds. IEEE Journal of Solid-State Circuits, 46(1):259-275, 2010. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 264, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 264, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 264, + 287, + 319 + ], + "type": "text", + "content": "[28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 320, + 287, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 320, + 287, + 365 + ], + "spans": [ + { + "bbox": [ + 48, + 320, + 287, + 365 + ], + "type": "text", + "content": "[29] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 2, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 365, + 287, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 287, + 408 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 287, + 408 + ], + "type": "text", + "content": "[30] Noha Radwan, Abhinav Valada, and Wolfram Burgard. Vlocnet++: Deep multitask learning for semantic visual localization and odometry. IEEE Robotics and Automation Letters, 3(4):4407-4414, 2018. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 410, + 287, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 410, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 48, + 410, + 287, + 443 + ], + "type": "text", + "content": "[31] Henri Rebecq, Daniel Gehrig, and Davide Scaramuzza. Esim: an open event camera simulator. In Conference on robot learning, pages 969-982. PMLR, 2018. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 445, + 287, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 445, + 287, + 488 + ], + "spans": [ + { + "bbox": [ + 48, + 445, + 287, + 488 + ], + "type": "text", + "content": "[32] Hongwei Ren, Yue Zhou, Haotian Fu, Yulong Huang, Renjing Xu, and Bojun Cheng. Ttpoint: A tensorized point cloud network for lightweight action recognition with event cameras. arXiv preprint arXiv:2308.09993, 2023. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 490, + 287, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 490, + 287, + 543 + ], + "spans": [ + { + "bbox": [ + 48, + 490, + 287, + 543 + ], + "type": "text", + "content": "[33] Hongwei Ren, Yue Zhou, Yulong Huang, Haotian Fu, Xiaopeng Lin, Jie Song, and Bojun Cheng. Spikepoint: An efficient point-based spiking neural network for event cameras action recognition. arXiv preprint arXiv:2310.07189, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 545, + 287, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 545, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 48, + 545, + 287, + 589 + ], + "type": "text", + "content": "[34] Yusuke Sekikawa, Kosuke Hara, and Hideo Saito. Eventnet: Asynchronous recursive event processing. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3887-3896, 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 590, + 287, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 623 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 623 + ], + "type": "text", + "content": "[35] Yoli Shavit and Ron Ferens. Introduction to camera pose estimation with deep learning. arXiv preprint arXiv:1907.05272, 2019. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 624, + 287, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 657 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 657 + ], + "type": "text", + "content": "[36] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "text", + "content": "[37] Ahmed Tabia, Fabien Bonardi, and Samia Bouchafa. Deep learning for pose estimation from event camera. In 2022 International Conference on Digital Image Computing: Techniques and Applications (DICTA), pages 1-7. IEEE, 2022. 6" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 419 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[38] Abhinav Valada, Noha Radwan, and Wolfram Burgard. Deep auxiliary learning for visual localization and odometry. In 2018 IEEE international conference on robotics and automation (ICRA), pages 6939-6946. IEEE, 2018. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 118, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 173 + ], + "type": "text", + "content": "[39] Florian Walch, Caner Hazirbas, Laura Leal-Taixe, Torsten Sattler, Sebastian Hilsenbeck, and Daniel Cremers. Image-based localization using lstms for structured feature correlation. In Proceedings of the IEEE International Conference on Computer Vision, pages 627-637, 2017. 2, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 175, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 228 + ], + "type": "text", + "content": "[40] Qinyi Wang, Yexin Zhang, Junsong Yuan, and Yilong Lu. Space-time event clouds for gesture recognition: From rgb cameras to event cameras. In 2019 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 1826-1835. IEEE, 2019. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 230, + 545, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 230, + 545, + 273 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 545, + 273 + ], + "type": "text", + "content": "[41] Jian Wu, Liwei Ma, and Xiaolin Hu. Delving deeper into convolutional neural networks for camera relocalization. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 5644-5651. IEEE, 2017. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 274, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 274, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 274, + 545, + 319 + ], + "type": "text", + "content": "[42] Wenxuan Wu, Zhongang Qi, and Li Fuxin. Pointconv: Deep convolutional networks on 3d point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9621-9630, 2019. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 320, + 545, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 320, + 545, + 374 + ], + "spans": [ + { + "bbox": [ + 307, + 320, + 545, + 374 + ], + "type": "text", + "content": "[43] Jiancheng Yang, Qiang Zhang, Bingbing Ni, Linguuo Li, Jinxian Liu, Mengdie Zhou, and Qi Tian. Modeling point clouds with self-attention and gumbel subset sampling. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3323–3332, 2019. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 376, + 545, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 376, + 545, + 419 + ], + "spans": [ + { + "bbox": [ + 308, + 376, + 545, + 419 + ], + "type": "text", + "content": "[44] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16259-16268, 2021. 3" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "18121" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/de2cb66c-4154-4b26-aa15-91e83e19d783_content_list.json b/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/de2cb66c-4154-4b26-aa15-91e83e19d783_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..52c1304f31f149441ccf4bdc8564945726c33852 --- /dev/null +++ b/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/de2cb66c-4154-4b26-aa15-91e83e19d783_content_list.json @@ -0,0 +1,1530 @@ +[ + { + "type": "text", + "text": "A Stealthy Wrongdoer: Feature-Oriented Reconstruction Attack against Split Learning", + "text_level": 1, + "bbox": [ + 158, + 130, + 810, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiaoyang Xu $^{1}$ Mengda Yang $^{1}$ Wenzhe Yi $^{1}$ Ziang Li $^{1}$ Juan Wang $^{1*}$ Hongxin Hu $^{2}$ Yong Zhuang $^{1}$ Yaxin Liu $^{1}$", + "bbox": [ + 104, + 200, + 862, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Key Laboratory of Aerospace Information Security and Trusted Computing, Ministry of Education, School of Cyber Science and Engineering, Wuhan University", + "bbox": [ + 88, + 239, + 880, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Department of Computer Science and Engineering, University at Buffalo, SUNY", + "bbox": [ + 158, + 273, + 808, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{xiaoyangx, mengday, wenzhey, ziangli, yong.zhuang, yaxin.liu}@whu.edu.cn jwang@whu.edu.cn, hongxinh@buffalo.edu", + "bbox": [ + 161, + 294, + 805, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 361, + 313, + 378 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Split Learning (SL) is a distributed learning framework renowned for its privacy-preserving features and minimal computational requirements. Previous research consistently highlights the potential privacy breaches in SL systems by server adversaries reconstructing training data. However, these studies often rely on strong assumptions or compromise system utility to enhance attack performance. This paper introduces a new semi-honest Data Reconstruction Attack on SL, named Feature-Oriented Reconstruction Attack (FORA). In contrast to prior works, FORA relies on limited prior knowledge, specifically that the server utilizes auxiliary samples from the public without knowing any client's private information. This allows FORA to conduct the attack stealthily and achieve robust performance. The key vulnerability exploited by FORA is the revelation of the model representation preference in the smashed data output by victim client. FORA constructs a substitute client through feature-level transfer learning, aiming to closely mimic the victim client's representation preference. Leveraging this substitute client, the server trains the attack model to effectively reconstruct private data. Extensive experiments showcase FORA's superior performance compared to state-of-the-art methods. Furthermore, the paper systematically evaluates the proposed method's applicability across diverse settings and advanced defense strategies.", + "bbox": [ + 76, + 393, + 472, + 773 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 803, + 209, + 819 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Deep Neural Networks (DNN) have gained widespread usage in computer vision due to their excellent learning ability and expressive power. Split Learning (SL) [2, 11, 16, 32, 38,", + "bbox": [ + 76, + 829, + 468, + 875 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "42, 44] emerged as a distributed collaborative framework that enables clients to cooperate with a server to perform learning task. In SL, the complete DNN model is divided into two parts, which are deployed on the client and server respectively. For a normal training process in SL, the client performs the computational process locally and communicates with the server solely based on intermediate features (referred to as smashed data) and their corresponding gradients. In this case, the server does not have access to any private information (raw data, parameters, architecture) about the client. Therefore, SL is considered effective in protecting the privacy of clients.", + "bbox": [ + 500, + 362, + 890, + 545 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, recent works [6, 10, 19, 31, 36] have shown that there are still privacy risks associated with SL. It is possible for the server to steal private information about the client according to auxiliary knowledge. One particular concern is the Data Reconstruction Attack (DRA) [6, 10, 31], where a server attempts to recover the training data of a client in SL systems. Depending on whether the server affects the normal process of SL, we can categorize adversaries into malicious and semi-honest attackers. Malicious servers such as FSHA [31] can manipulate the SL training process to conduct more effective attack. However, the latest findings [5, 8] show that FSHA's mischief is easily detected by the client, leading to the termination of SL training protocol For semi-honest attackers, e.g. PCAT [10] and UnSplit [6], their superior camouflage makes them less likely to be detected. But current semi-honest attackers often rely overly on assumptions that favor their performances. For example, UnSplit requires knowledge of the client's architecture and is only applicable to simple networks or datasets. As for PCAT, it unduly depends on the availability of partial private data to assist in training the pseudo-client. These assumptions contradict the basic principle of SL, which is to ensure that the client's knowledge", + "bbox": [ + 496, + 553, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author.", + "bbox": [ + 94, + 886, + 220, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "12130", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "remains hidden from the server. In summary, we find previous attacks lack consideration of the intrinsic security of SL and the plausibility of their attack hypothesis, which limits the effectiveness and threat of their approach in real-world SL systems scenarios.", + "bbox": [ + 75, + 90, + 470, + 167 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we introduce a novel DRA toward more realistic and more challenging scenarios, where the server cannot access private data or the structures and parameters of the client model. Our scheme stems from new insights into potential privacy breaches in SL. We discover a fundamental phenomenon that the client model has its own representation preference, which can be reflected through the output smashed data. More importantly, this unique information can indicate the feature extraction behavior of the client. Based on this new insight, we propose a semi-honest privacy threat, namely Feature-Oriented Reconstruction Attack (FORA). A server adversary could establish a substitute client by narrowing the reference distance with the real client, which allows the substitute model to mimic the behavior of the target model at a finer granularity. To efficiently measure the preference distance of different representations, we introduce domain Discriminator network [9, 14] and Multi-Kernel Maximum Mean Discrepancy (MK-MMD) [15, 29]. These techniques are widely used in domain adaptation [45], enabling us to project various representation preferences into a shared space for comparison. With a well-trained substitute client, the server can successfully recover the private data by constructing an inverse network.", + "bbox": [ + 75, + 169, + 472, + 531 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We conduct our evaluation on two benchmark datasets and corresponding networks against different model partitioning strategies. The experimental results indicate that the proposed method significantly outperforms baseline attacks. Taking the reconstructed images of CelebA at layer 2 as an example, UnSplit, PCAT and FORA achieve effects of 8.70, 12.05, and 17.11 on the PSNR [20]. This demonstrates that FORA has significantly outperformed by $1.97\\mathrm{x}$ and $1.42\\mathrm{x}$ compared to the other two attacks. Although FSHA can achieve attack performance similar to ours, its malicious attack process can be promptly halted through monitoring mechanisms [8], resulting in poor reconstructions. Furthermore, we investigate the potential influences on FORA, including different public knowledge conditions and existing defense strategies, to validate the robustness of FORA.", + "bbox": [ + 75, + 534, + 472, + 773 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The main contributions of this paper can be summarized as follows:", + "bbox": [ + 76, + 777, + 470, + 808 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- We propose a novel attack, named Feature-Oriented Reconstruction Attack (FORA). As far as we know, FORA is the first work enabling a semi-honest server to perform powerful DRA in more realistic and challenging SL systems. In such scenarios, the server has no prior knowledge of the client model or access to raw data.", + "bbox": [ + 75, + 810, + 470, + 901 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/f6bf651513d456665468a4daae80b5b42c851a484cae83889e7ec689272883a5.jpg", + "image_caption": [ + "Figure 1. Architecture of two-part split learning." + ], + "image_footnote": [], + "bbox": [ + 524, + 90, + 870, + 171 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We have uncovered an inherent vulnerability in SL, where the server can exploit rich information in the smashed data to steal client representation preference, thereby building a substitute client for better reconstruction.", + "- We conduct comprehensive experiments with various adversarial knowledge against different benchmark datasets and models. The results demonstrate that FORA can achieve state-of-the-art attack performance compared with baselines and exhibits notable robustness across different settings." + ], + "bbox": [ + 496, + 224, + 890, + 377 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Background and Related Work", + "text_level": 1, + "bbox": [ + 500, + 391, + 785, + 407 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Split Learning (SL). SL [2, 16, 32, 38, 42] is an emerging distributed learning paradigm for resource-limited scenarios, which can split the neural network model into both client-side and server-side. As shown in Fig. 1, the client performs forward propagation and transmits the smashed data to the server, which then uses the computed loss for backward propagation and sends the gradients of the smashed data back to the client. Both the client and server will update their weights after receiving the gradients. It is generally believed that SL provides a secure and efficient training protocol by allowing the client to retain a portion of the model and training data locally while offloading most of the computing overhead to the server [2, 16, 32, 42]. However, recent studies [6, 7, 10, 23, 31] have highlighted vulnerabilities in SL, where the server can exploit the latter part of the model to carry out privacy attacks.", + "bbox": [ + 496, + 417, + 890, + 657 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Data Reconstruction Attack (DRA) on SL. DRA [19, 27, 35, 48] is one of the most powerful privacy attacks that aim to steal the input data by the model's intermediate features. In SL, the server can utilize the smashed data output by the client to reconstruct the training data [6, 10, 31]. One notable attack is known as FSHA [31], where a malicious attacker utilizes the elaborated loss to alter the feature space of the victim client for reconstructing private data. In UnSplit [6], the semi-honest server attempts to reconstruct the training data and client's parameters simultaneously by utilizing the smashed data. Specifically, UnSplit optimizes parameters and inputs sequentially by minimizing the outputs between the clone client and the target client. To the best of our knowledge, PCAT [10] represents the most advanced attack under the semi-honest assumption. PCAT leverages the knowledge embedded in various stages of the server models", + "bbox": [ + 496, + 659, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "12131", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d5e835309a9162567d4315273cdb244bf158eb3b68e49d594462f326311a9f2c.jpg", + "image_caption": [ + "(a) Original" + ], + "image_footnote": [], + "bbox": [ + 78, + 88, + 166, + 155 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/1ae30e6820f48215640529140ee0d38c9e61840d9c6c9f9bd188d677fee27913.jpg", + "image_caption": [ + "(b) Model 1" + ], + "image_footnote": [], + "bbox": [ + 181, + 88, + 266, + 154 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/bea9c5acd0057794372126887ef9a442e835ce3c0fe1b3bd8176c144add6ff24.jpg", + "image_caption": [ + "(c) Model 2", + "Figure 2. Input image and behavior visualization by Grad-CAM [33]. All the models are trained in CelebA with the task of smiling classification. The figure displays the original images and the representation preferences of three models trained under the same hyperparameter settings but with different random seeds." + ], + "image_footnote": [], + "bbox": [ + 281, + 88, + 366, + 154 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/fb14206aa49cb0de784e118d4771ab9d576820e83a6bd4b4943f1d753c191055.jpg", + "image_caption": [ + "(d) Model 3" + ], + "image_footnote": [], + "bbox": [ + 383, + 88, + 468, + 154 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "to steal private data by constructing a pseudo-client. Unlike previous work, SFA [30] focuses on reconstructing samples during the inference stage rather than the training samples.", + "bbox": [ + 75, + 277, + 468, + 321 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Although existing works claim that their attacks pose significant privacy threats to SL, they disregard the plausibility of their threat model. For FSHA, the server reconstructs the raw data while at the cost of destroying the client's utility. While FSHA assumes that the client is entirely free of any awareness of being maliciously disrupted, recent research [5, 8] indicates that such a malicious server can be easily detected by the client, leading to a halt in the SL. UnSplit needs the knowledge of the client's structure and is not suitable for complex networks and datasets due to the infinite searching space of input data and model parameters. As for PCAT, it requires the adversary to have access to a portion of the private dataset. This is an unreasonable assumption that violates the original intention of SL since one of the distinctive characteristics of SL is the ability to train models without sharing the raw data [42]. As a result, how to explore DRA under more realistic assumptions in SL remains an open question.", + "bbox": [ + 75, + 323, + 468, + 595 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Domain Adaptation. Domain adaptation [9, 12, 15, 29, 40, 41, 46] is a technique that seeks to enhance the generalization of a model by transferring knowledge acquired from a source domain to a distinct yet related target domain. The core idea of domain adaptation is to map data from different domains into the same space for comparison. Here, we apply two popular methods: the domain Discriminator network [9, 41, 46] and the Multi-Kernel Maximum Mean Discrepancy (MK-MMD) function [15, 29, 46] to compare the feature spaces of different models.", + "bbox": [ + 75, + 595, + 468, + 746 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 761, + 166, + 776 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Threat Model", + "text_level": 1, + "bbox": [ + 76, + 785, + 218, + 800 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Without loss of generality, given a two-party SL protocol, the SL model $F$ is partitioned to a server model $F_{s}$ and a client model $F_{c}$ . The server aims to stealthily recover the private training data of the client through the smashed data $Z$ output by $F_{c}$ .", + "bbox": [ + 75, + 809, + 468, + 883 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We assume that the server adversary is a semi-honest en", + "bbox": [ + 96, + 885, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tity, ensuring that the training process is indistinguishable from ordinary training during attack. Furthermore, we posit that the server adversary must adhere to the foundational principle of the SL — she lacks any means of accessing client-sensitive information. Specifically, the server does not require knowledge of the structure or hyperparameters of $F_{c}$ and is devoid of access to the client's private training dataset $D_{priv}$ . The sole piece of public knowledge available to the server pertains to the auxiliary dataset $D_{aux}$ , sourced from the same domain as the private samples. It's important to note that the distribution of $D_{aux}$ typically differs from that of $D_{priv}$ . Compared to the threat model of previous works, this assumption is more reasonable and realistic.", + "bbox": [ + 496, + 90, + 890, + 287 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Motivation", + "text_level": 1, + "bbox": [ + 500, + 297, + 620, + 311 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Current DRAs rely overly on constructing inverse networks from input-output pairs obtained by querying the target model. However, this approach is impractical for SL because the server only has access to the client's outputs and is not qualified to query. A potential solution is to build a substitute client to mimic the target client, thus enabling the training of the inverse network. However, the variability of the substitute client's behavior affects the generalization of the inverse network to the target client, leading to the failure of the reconstruction, especially without the knowledge of the client model structure and private data distribution.", + "bbox": [ + 496, + 321, + 890, + 487 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As illustrated in Fig. 2, we employ Grad-CAM [33] to visualize the attention of intermediate features generated by different clients. From Fig. 2 (a)-(d), it can be noticed that even for models trained under the same setup, there still exists evident differences between their image processing attention. This phenomenon suggests that the smashed data output by the client reflects its distinctive feature extraction behavior, which we define as representation preferences. Our general assumption is that narrowing the gap between the substitute client and the target client in terms of intermediate features can make the representation preferences of the two models more similar, which ensures that the inverse network trained by the substitute client perfectly maps the target smashed data back to the private raw data.", + "bbox": [ + 496, + 488, + 890, + 700 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Feature-Oriented Reconstruction Attack", + "text_level": 1, + "bbox": [ + 500, + 710, + 846, + 724 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Inspired by the differences in model representation preferences, we propose a novel data reconstruction attack against SL, called Feature-Oriented Reconstruction Attack (FORA). In order to mount FORA, the adversary needs to contrive a way to obtain the representation preferences of the $F_{c}$ . To address this problem, we utilize domain adaptation techniques [9, 15, 29] to project different preference representations into the same space. Specifically, the adversary conducts feature-level transfer learning by exploiting the $Z_{c}$ collected in each training iteration and then obtains a substitute model that mimics well the feature extraction be", + "bbox": [ + 496, + 734, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "12132", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/fdb59d6d1256216406721cda90d25ba234660dad3dfe8f60a179ae56d16f4c76.jpg", + "image_caption": [ + "a) Substitute Model Construction" + ], + "image_footnote": [], + "bbox": [ + 133, + 89, + 491, + 268 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/979d596657903a8d50335e0fc936c272c2a31f2ed595604514504eb6a08a5742.jpg", + "image_caption": [ + "c) Private Data Reconstruction", + "Figure 3. Attack pipeline of Feature-Oriented Reconstruction Attack (FORA) against SL. (a) shows the substitute model training phase. The attacker constructs a substitute model $\\hat{F}_c$ using $\\mathcal{L}_{DISC}$ and $\\mathcal{L}_{MK-MMD}$ to mimic the behavior of the client model $F_c$ . (b) means training an inverse network $f_c^{-1}$ using public data $X_{aux}$ . (c) represents the final attack phase using the attack model to reconstruct training data from snapshot $Z_{snap}$ of target smashed data." + ], + "image_footnote": [], + "bbox": [ + 513, + 88, + 841, + 266 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "havior of the $F_{c}$ . Through this approach, the adversary can smoothly construct an attack model (inverse mapping network) to recover the private samples. The detailed pipeline of FORA is shown in Fig. 3. It consists of three phases: substitute model construction, attack model training, and private data reconstruction.", + "bbox": [ + 75, + 382, + 468, + 472 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Substitute Model Construction. Before SL training commences, the server initializes a substitute client, denoted by $\\hat{F}_c$ . The $\\hat{F}_c$ will be trained locally at the server in parallel with the victim's $F_c$ , and such process will take place throughout the entire SL collaboration. In each training iteration, the client will send smashed data of the current batch to the server for completing the subsequent computations. Concurrently, the server will use the collected smashed data to perform training on the $\\hat{F}_c$ . For this purpose, the server introduces the Discriminator module and the MK-MMD module to extract the representation preferences. We define its training objective as:", + "bbox": [ + 75, + 473, + 470, + 655 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\hat {F} _ {c}} \\mathcal {L} _ {D I S C} + \\mathcal {L} _ {M K - M M D}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 667, + 468, + 694 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_{DISC}$ is the Discriminator module constraining $Z_{aux} = \\hat{F}_c(X_{aux})$ and $Z_{priv} = F_c(X_{priv})$ to be indistinguishable, while $\\mathcal{L}_{MK - MMD}$ is the MK-MMD module making $Z_{aux}$ as close as possible to $Z_{priv}$ in shared space.", + "bbox": [ + 76, + 705, + 468, + 766 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The Discriminator [3, 9, 13] $D$ is also a network that needs to be trained synchronously and is tasked with efficiently distinguishing the generated features between $F_{c}$ and $\\hat{F}_{c}$ , maximizing probabilities of the former and minimizing probabilities of the latter [31]. Therefore, the parameters of $D$ will be updated to minimize the following loss function:", + "bbox": [ + 75, + 766, + 470, + 872 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {D} = \\log \\left(1 - \\mathcal {D} \\big (F _ {c} (X _ {p r i v}) \\big) + \\log \\mathcal {D} \\big (\\hat {F} _ {c} (X _ {a u x}) \\big)\\right). \\quad (2)\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 883, + 468, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After each local training step of $D$ , the server utilizes $D$ to instruct substitute client's representation preference to be consistent with that of the victim client. Specifically, an adversarial loss is constructed as the following:", + "bbox": [ + 498, + 382, + 892, + 444 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {D I S C} = \\log (1 - D (\\hat {F} _ {c} (X _ {a u x}))). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 576, + 452, + 890, + 470 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The MK-MMD module [15, 29] is designed to align two sets of generated features into a shared space using kernel functions and compute their difference, where a smaller difference signifies closer representation preferences. Then, for the substitute client, the objective extends beyond maximizing the probabilities output by the $D$ , it also seeks to minimize the MK-MMD loss function, namely:", + "bbox": [ + 496, + 479, + 892, + 585 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {M K - M M D} = \\left\\| \\phi \\left(\\hat {F} _ {c} \\left(X _ {a u x}\\right)\\right) - \\phi \\left(F _ {c} \\left(X _ {p r i v}\\right)\\right) \\right\\| _ {\\mathcal {H}}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 513, + 594, + 890, + 636 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{ \\begin{array}{c} \\phi = \\sum_ {j = 1} ^ {m} \\beta_ {j} k _ {j}, \\\\ \\sum_ {j = 1} ^ {m} \\beta_ {j} = 1, \\beta_ {j} \\geq 0, \\forall j, \\end{array} \\right. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 609, + 638, + 890, + 724 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $k$ is a single kernel function, $\\phi$ denotes a set of kernel functions that project different smashed data into Reproducing Kernel Hilbert Space $\\mathcal{H}$ , $\\beta$ is the weight coefficient corresponding to the single kernel function.", + "bbox": [ + 496, + 734, + 892, + 795 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Attack Model Training. At the end of the training of SL, the server can obtain a substitute client with a feature extraction behavior extremely similar to that of the victim client. Moreover, its feature space is known to the adversary, who can recover the original input from the smashed data by applying an inverse network (denoted as $f_{c}^{-1}$ ). Following previous DRAs [19, 35], we adopt the $f_{c}^{-1}$ consist", + "bbox": [ + 496, + 795, + 893, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "12133", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ing of a set of Transposed Convolution layers and Tanh activations as our attack model. The server can leverage the auxiliary dataset to train the attack model by minimizing the mean square error between $f_{c}^{-1}(\\hat{F}_{c}(X_{aux}))$ and $X_{aux}$ as follows:", + "bbox": [ + 75, + 90, + 468, + 165 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {f _ {c} ^ {- 1}} = \\left\\| f _ {c} ^ {- 1} \\left(\\hat {F} _ {c} \\left(X _ {a u x}\\right)\\right) - X _ {a u x} \\right\\| _ {2} ^ {2}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 145, + 176, + 468, + 198 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Private Data Reconstruction. The server keeps a snapshot $Z_{\\text{snap}} = F_c(X_{\\text{priv}})$ of all smashed data output by the target client under the final training iteration for reconstruction. Since the substitute client is able to mimic the target client's representation preferences well, the server can subtly use $f_c^{-1}$ to perform the attack by mapping the target smashed data directly into the private raw data space, namely:", + "bbox": [ + 75, + 208, + 468, + 328 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nX _ {p r i v} ^ {*} = f _ {c} ^ {- 1} \\left(Z _ {s n a p}\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 329, + 468, + 347 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, $X_{priv}^{*}$ are the reconstructed private training samples.", + "bbox": [ + 76, + 353, + 464, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 383, + 209, + 400 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 76, + 407, + 266, + 425 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. In our experiments, we rely on CIFAR-10 [26] and CelebA [28] to validate the attacks, due to their dominance in the research on SL [6, 10, 31]. They will be used as private data for the client's target training tasks. According to Sec. 3.1, we assume that the server adversary has access to a set of auxiliary samples that are distinct from the client's private data. Therefore, we choose CINIC-10 [4] and FFHQ [24] as the adversary's auxiliary dataset, respectively. We exclude images in CINIC-10 that overlapped with CIFAR-10, and randomly select 5,000 samples and 10,000 samples from the preprocessed CINIC-10 and FFHQ as the final auxiliary data. Appendix A.1 provides the detailed information for different datasets.", + "bbox": [ + 75, + 431, + 468, + 626 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Models. We consider two popular types of neural network architectures, including MobileNet [21] and ResNet-18 [17], as target models for the classification tasks of CIFRA-10 and CelebA, respectively. We set various split points for different target models to show our attack performance. Since the server is entirely unaware of the client's model structure from Sec. 3.1, we use VGG blocks [34] (consisting of a sequence of Convolutional, BatchNorm, ReLU, and MaxPool layers) to construct substitute models. In addition, the adversary's substitute models adaptively depend on the size of the intermediate features output by the client. All the architecture information and splitting schemes used in this paper are reported in Appendix A.2.", + "bbox": [ + 75, + 628, + 468, + 824 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Metrics. In addition to analyzing the qualitative results of attack performances visually, we chose three quantitative metrics to evaluate the quality of the reconstructed images: Structural Similarity (SSIM) [47], Peak Signal-to-Noise Ratio (PSNR) [20], and Learned Perceptual Image", + "bbox": [ + 75, + 825, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/836bb92b9f1c7a3cfd9825cb75d5f249da0af7787e60ecd1fafb4bdc52d54786.jpg", + "image_caption": [ + "(a) Detection Score" + ], + "image_footnote": [], + "bbox": [ + 509, + 89, + 722, + 213 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e65cfc024f9d9f27e345480615bc19d12409b3e354862a61dae73b7dfa2ccd6c.jpg", + "image_caption": [ + "(b) Reconstruction Results", + "Figure 4. Attack performance comparison of FSHA [31] and FORA on CIFAR-10 with layer 2. (a) shows the detection score of two attacks detected by GS. (b) represents the reconstruction results of two attacks, and FSHA-GS is the reconstructed images when detected by GS." + ], + "image_footnote": [], + "bbox": [ + 733, + 87, + 883, + 214 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Patch Similarity (LPIPS) [49]. We also use Cosine Similarity and Mean Square Error to measure the similarity between the substitute client and the target client in feature space.", + "bbox": [ + 496, + 342, + 890, + 402 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Attack Baselines. We mainly compare our approach with three representative existing methods, which are FSHA [31], UnSplit [6], and PCAT [10]. For the malicious attack FSHA, we use sophisticated detection mechanism to jointly evaluate the attack's effectiveness. For the semi-honest attack UnSplit, we make it consistent with our experimental settings to ensure fairness. PCAT requires an understanding of the learning task while relying on a subset of the private training data to build the pseudo-client, and in order to comply with this assumption, we set the proportion of the CIFAR-10 private dataset to be $5\\%$ (the maximal threshold suggested by the original paper), and for more complex CelebA dataset, we extend the proportion to be $10\\%$ .", + "bbox": [ + 496, + 405, + 892, + 602 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Comparison with Malicious Attack", + "text_level": 1, + "bbox": [ + 500, + 618, + 805, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Since FSHA severely undermines the utility of the target client, recent work has proposed the Gradients Scrutinizer (GS) [8] to defend against such hijacking attacks by detecting the gradients returned from the server to the client. The GS will perform a similarity computation on the gradients, and if the calculated value is lower than a set threshold, it will be considered as a potential attack, resulting in the training of SL being immediately suspended. More details about GS can be found in Appendix C.1. We can observe from Fig. 4 that the reconstruction results of FORA are almost the same as those of FSHA in the unprotected SL system. Although FSHA performs well in capturing fine graphical details, it also leads to noticeable color shifts in some reconstruction results. Moreover, since FSHA drastically tampers with the updated gradient returned to the client model, it is easily detected by GS, leading to the failure of reconstruction.", + "bbox": [ + 496, + 643, + 890, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "12134", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/2ba9f9548dbd84d43d058b967ae6a199cb78c325e996f787d1308eca388d5de6.jpg", + "table_caption": [ + "Table 1. Data reconstruction results of UnSplit, PCAT, and FORA on CIFAR-10 and CelebA in different splitting settings." + ], + "table_footnote": [], + "table_body": "
Split PointUnSplitCIFAR-10 PCATFORACelebA PCATFORA
Ground Truth
layer 1
layer 2
layer 3
layer 4
", + "bbox": [ + 81, + 114, + 888, + 300 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/c88701109558b0800b3df6794b6ffb57bb9e55cfd854e31b84a1b0a77efaf200.jpg", + "table_caption": [ + "Table 2. SSIM, PSNR, and LPIPS of the reconstructed images on CIFAR-10 among three attacks." + ], + "table_footnote": [], + "table_body": "
Split PointSSIM†PSNR†LPIPS↓
UnSplitPCATFORAUnSplitPCATFORAUnSplitPCATFORA
layer 10.1710.8530.92611.0322.1025.870.6770.2190.120
layer 20.1010.6420.83010.4817.2922.190.6890.4320.252
layer 30.1040.2910.62211.1413.1818.930.7410.6150.381
layer 40.1080.1210.0308.6211.0810.450.7220.6760.628
", + "bbox": [ + 81, + 364, + 467, + 421 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Comparison with Semi-Honest Attacks", + "text_level": 1, + "bbox": [ + 76, + 450, + 413, + 467 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Reconstruction Performance. We show in detail the reconstruction results for UnSplit, PCAT, and our proposed FORA on all split points for both datasets. As depicted in Tab. 1, compared to other attacks, the images reconstructed by FORA exhibit a significant improvement visually. Due to the vast search space and inefficient optimization approach, UnSplit almost fails to recover training data in both datasets, even at layer 1. Although PCAT can reconstruct training samples in the shallow settings of the CIFAR-10 dataset, such as layer 1 and layer 2, the reconstruction quality is still lower than that of FORA. For the more complex CelebA dataset, PCAT struggles to produce quality reconstructions. Tab. 2 and Tab. 3 provides the quantitative results of the attacks. Except for the anomaly at the layer 4 split point of CIFAR-10, where FORA slightly underperforms PCAT in terms of SSIM and PSNR metric, FORA is superior to both methods in all other settings, especially in terms of the LPIPS metric, which is considered to be more aligned with human perception. Notably, even though PCAT has access to a subset of the private data, while FORA only obtains samples with different distributions, FORA substantially surpasses PCAT for reconstruction. This further emphasizes the robust privacy threat our approach poses to SL. More reconstructed images are presented in Appendix B.1.", + "bbox": [ + 75, + 476, + 468, + 838 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Feature Similarity. As shown in Tab. 4, we measure the feature distance between the proxy clients built by UnSplit, PCAT, and FORA and the target client at layer 2. The results show that the substitute clients trained by our method", + "bbox": [ + 75, + 839, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/f3f2abea8553bf01eacd7597f8275305bea6a827bd292adc2416586f0712896e.jpg", + "table_caption": [ + "Table 3. SSIM, PSNR and LPIPS of the reconstructed images on CelebA among three attacks." + ], + "table_footnote": [], + "table_body": "
Split PointSSIM↑PSNR↑LPIPS↓
UnSplitPCATFORAUnSplitPCATFORAUnSplitPCATFORA
layer 10.1370.3330.4859.2613.4517.720.8040.6340.320
layer 20.1700.3160.4768.7012.0517.110.7470.6530.381
layer 30.1560.1640.19110.6611.6314.190.7930.7310.509
layer 40.0840.0920.1927.9410.6013.000.8040.7380.621
", + "bbox": [ + 503, + 364, + 888, + 421 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/d494332e5272a043c25fcb5e07e620bd904acadbed53c2f404ab7373fb889fc2.jpg", + "table_caption": [ + "Table 4. Feature similarity measured by Mean Square Error and Cosine Similarity on CIFAR-10 and CelebA at layer 2." + ], + "table_footnote": [], + "table_body": "
MethodCIFAR-10CelebA
UnSplitPCATFORAUnSplitPCATFORA
Mean Square Error↓1.0410.5280.27450.7731.3530.753
Cosine Similarity↑0.2000.5920.8100.3330.4800.778
", + "bbox": [ + 503, + 476, + 888, + 522 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "exhibit more similar representation preferences to the target client. The basic optimization approach of UnSplit makes it difficult to regularize the feature space of the proxy client. As for PCAT, it simply makes the smashed data generated by the pseudo model more favorable to the server model but fails to mimic the behavior of the client model. In contrast, FORA can impose stronger constraints in the feature space, which directly contributes to successful reconstruction.", + "bbox": [ + 496, + 549, + 890, + 670 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Effect of Auxiliary Dataset", + "text_level": 1, + "bbox": [ + 498, + 680, + 740, + 696 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Next, we analyze the effect of several important factors regarding the auxiliary dataset on attack performance. We first explore the impact of the fitting level of substitute models by varying the size of the auxiliary data. Then, we discuss the impact of the presence of a more significant distribution shift, i.e., the absence of some categories, between the auxiliary and target samples. Finally, we relax the major assumption about the adversary, namely that the server has access to the similarly distributed auxiliary dataset. We set the split point at layer 2 for ablation, and the full experimental results are provided in Appendix B.2.", + "bbox": [ + 496, + 703, + 890, + 869 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Auxiliary Set Size. As shown in Fig. 5, when we reduce the size of the auxiliary dataset to half of the previous", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "12135", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/701ddc8906a6caf8a08f949f056e6942ca427b20a20dbba3f861c22ee36bd4ff.jpg", + "image_caption": [ + "Figure 5. Effects of varying auxiliary data size on FORA performed on CIFAR-10 and CelebA at layer 2." + ], + "image_footnote": [], + "bbox": [ + 68, + 85, + 467, + 204 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "one, the attack performance of FORA remains almost unchanged. When we further reduce the number of auxiliary samples to $20\\%$ , the quality of the reconstructed images decreases slightly but still preserves the full outline and most of the details. In that case, the percentage of the public auxiliary dataset is very small compared to the huge private training set (50,000 for CIFAR-10 and 162770 for CelebA), only $2\\%$ and $1.2\\%$ , respectively. This implies that even with a rather limited auxiliary dataset, FORA is still able to effectively reconstruct the client's training samples.", + "bbox": [ + 75, + 273, + 467, + 422 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/8900c65da9484302d93788fc7566785d1853587a0eba187f8b99d4d485f611d1.jpg", + "table_caption": [ + "Table 5. Effect of absence of categories on FORA performed on CIFAR-10 at layer 2." + ], + "table_footnote": [], + "table_body": "
Absent CategoriesSSIM↑PSNR↑LPIPS↓
Living0.76820.440.300
Non-living0.73218.430.395
", + "bbox": [ + 81, + 478, + 465, + 565 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Absence of Categories. It is likely that the adversary's public auxiliary data misses some semantic classes of the private data distribution. To model this situation, we create two special auxiliary datasets for CIFAR-10, one containing \"Living\" items (birds, cats, etc.), and the other containing \"Non-living\" items (airplanes, cars, etc.), both with 5,000 randomly sampled samples from CINIC-10. As presented in Tab. 5, even if a class is absent from the auxiliary dataset, FORA can still reconstruct samples of that class. In fact, FORA focuses on stealing the mapping relationship between client inputs and smashed data and therefore does not require class alignment. We observe that the absence of the \"Non-living\" category leads to a moderate degradation in the reconstruction results. We believe that the reason behind this phenomenon is that the greater variation of classes within the \"Non-living\" category helps to increase the generalization level of the substitute client, which in turn facilitates improved attack performance.", + "bbox": [ + 75, + 582, + 467, + 852 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Distribution Shift. Here we further analyze the impact of the auxiliary dataset distribution on FORA. In contrast to our default experimental setup, we selected 5000", + "bbox": [ + 75, + 854, + 467, + 898 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 6. Effects of auxiliary dataset distribution shift on FORA performed on CIFAR-10 and CelebA at layer 2. \"Different\" represents auxiliary data sampled from CINIC-10, and FFHQ respectively, and \"Same\" means auxiliary dataset come from their original test set.", + "bbox": [ + 498, + 88, + 890, + 157 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/edd4b73216dcfe8709721d132ebbce09a545e6a1ebccf03eecbfa909d0fe2e77.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Dataset SizeCIFAR-10CelebA
DifferentSameDifferentSame
SSIM↑0.8300.8320.4760.777
PSNR↑22.1922.7817.1121.55
LPIPS↓0.2520.2070.3810.264
", + "bbox": [ + 544, + 169, + 843, + 239 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "and 10000 images from the original testing sets of CIFAR-10 and CelebA, respectively, as the auxiliary datasets with the same distribution. As shown in Tab. 6, a more similar distribution can facilitate substitute clients stealing the representation preference, resulting in better reconstruction performance. We observe that the attack results on the facial dataset are more vulnerable to the data distribution shift compared to the object dataset. One possible reason is that tasks related to facial datasets are more sensitive to variations in sampling methods and alignment conditions across different datasets. For object datasets, due to substantial distribution variation between different categories of themselves, e.g. ranging from animals to vehicles, which contributes to their robustness in handling distribution shifts.", + "bbox": [ + 496, + 271, + 890, + 482 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Effect of Substitute Client Structure", + "text_level": 1, + "bbox": [ + 500, + 503, + 812, + 518 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "After validating the impact of the auxiliary dataset, here we are interested in the impact of substitute client architectures on FORA. We chose three different model structures as attack variants: the VGG block [34], the ResNet block [18], and the DenseNet block [22]. As can be seen in Fig. 6, the SSIM and LPIPS quantization results for the reconstructed images remain similar. This indicates that the extracted representation preferences on the basis of MK-MDD and Discriminator are close to that of the target client, despite the fact that the substitute clients use different architectures. Additional results are shown in Appendix B.3.", + "bbox": [ + 496, + 531, + 890, + 696 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1da3abbe82a698155e555fcc9f607cf556db9aec0699ed6a4138cc6533ebfd61.jpg", + "image_caption": [ + "(a) SSIM↑" + ], + "image_footnote": [], + "bbox": [ + 509, + 727, + 694, + 838 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ac4bad8af3fc1c0c6fcc83a17a528bb70236a7b2e279079f5ae816bce37794a7.jpg", + "image_caption": [ + "(b) LPIPS↓", + "Figure 6. Effect for FORA with varying substitute model architectures on both datasets at layer 2." + ], + "image_footnote": [], + "bbox": [ + 696, + 728, + 880, + 838 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "12136", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.6. Counter Defense Techniques", + "text_level": 1, + "bbox": [ + 76, + 90, + 331, + 107 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "There have been a number of defenses aimed at perturbing the smashed data claiming that they can reduce the risk of privacy leakage in SL to a certain extent. We select three well-known defense techniques, i.e., distance correlation minimization [37, 43, 44], differential privacy [1], and noise obfuscation [39], to evaluate the effectiveness of FORA. Tab. 7 shows the limited impact of these defenses on FORA. See Appendix C.1 for more details on defense techniques. See Appendix C.2 for more defense results and discussions about possible adaptive defenses.", + "bbox": [ + 75, + 114, + 468, + 263 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Distance Correlation Minimization (DCOR). DCOR can uncorrelate irrelevant and sensitive features from the smashed data associated with the target client, which results in a lack of detailed expression of the input data in the representation preferences learned by the substitute client, especially in colors. However, FORA retains the ability to reconstruct the structural details of the private image.", + "bbox": [ + 75, + 266, + 468, + 371 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Differential Privacy (DP). DP protects training data privacy by adding carefully crafted Laplace noise to the gradients. However, the effectiveness of DP against FORA is very limited under all privacy budgets. When the test accuracy of the model is reduced by nearly $10\\%$ (the functionality is severely damaged), the SSIM of the reconstructed samples still reaches about $75\\%$ of the original. This tradeoff between classification accuracy and defense strength makes DP not feasible for practical applications of SL.", + "bbox": [ + 75, + 372, + 468, + 508 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Noise Obfuscation (NO). NO is a direct defense to destroy the mapping relationship between smashed and input data. We observe that on the one hand, the noise of a small scale enhances the generalization level of the SL model to maintain or even improve the classification accuracy, on the other hand raising the noise scale helps to introduce deviations to the features extracted from the target client, making it more difficult to learn the representations and reconstruct the data for FORA.", + "bbox": [ + 75, + 508, + 468, + 643 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Discussion and Conclusion", + "text_level": 1, + "bbox": [ + 76, + 660, + 323, + 676 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we first discuss the potential improvement and scalability of FORA, then we summarize this work. We also show limitation and future work in Appendix D.", + "bbox": [ + 75, + 686, + 468, + 733 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Improvement using Generative Adversarial Networks. Li et al. [27] propose a novel StyleGAN-based reconstruction attack against split inference, and their research focus is orthogonal to our contribution. Therefore, the reconstruction task in FORA can be further optimized using pre-trained StyleGAN [25]. As shown in Fig. 7, the well-trained substitute client in FORA combined with StyleGAN optimization can provide additional improvements in reconstruction performance.", + "bbox": [ + 75, + 734, + 468, + 868 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Attack on Label-Protected SL. Another popular setup for SL requires the client to keep the labels locally [42], but", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/379b51ce8496100593363a9674f8bb353c358774a2dcff3a6e567cff62f441fd.jpg", + "table_caption": [ + "Table 7. Effect of utility and FORA performance against three defense techniques on CIFAR-10 at layer 2." + ], + "table_footnote": [], + "table_body": "
Defense HyperparamTest Acc (%)SSIM↑PSNR↑LPIPS↓
0 (w/o defense)71.250.83022.190.252
DCOR (α)
0.270.910.69217.910.360
0.570.060.62815.990.441
0.869.720.56315.400.471
DP (ε)
+∞69.680.82322.360.225
10063.050.71120.360.394
1061.930.62118.030.487
NO (σ)
1.074.390.64017.290.367
2.073.140.58316.290.444
5.070.620.39414.350.550
", + "bbox": [ + 503, + 127, + 888, + 431 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/238c6feaaeab283549e787aaf5c94c2dbf6042f312a0d23b6bd54a3758866b99.jpg", + "image_caption": [ + "Figure 7. Reconstructed CelebA images of FORA and FORA-G, FOAR-G represents FORA combined with StyleGAN." + ], + "image_footnote": [], + "bbox": [ + 500, + 446, + 883, + 545 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "this case does not have any influence on the implementation and performance of FORA. Since FORA is only related to the smashed data output from the target client, it does not depend on the server model as well as the training task.", + "bbox": [ + 496, + 612, + 890, + 672 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Conclusion. In this work, we propose a novel data reconstruction attack against SL, named Feature-Oriented Reconstruction Attack (FORA). Unlike all previous attack schemes, FORA enables a semi-honest server to secretly reconstruct the client's private training data with very little prior knowledge. Thanks to our new perspective of extracting representation preferences from smashed data, the server can contemporaneously train a substitute client that approximates the target client's behavior to conduct the attack. Our extensive experiments in various settings demonstrate the state-of-the-art performance of FORA. Due to its stealth and effectiveness, it poses a real privacy threat to SL. We hope our work can inspire future efforts to explore it in more practical SL, and we are eager to draw attention to more robust defense techniques.", + "bbox": [ + 496, + 674, + 892, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "12137", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Martin Abadi, Andy Chu, Ian Goodfellow, H Brendan McMahan, Ilya Mironov, Kunal Talwar, and Li Zhang. Deep learning with differential privacy. In Proceedings of the 2016 ACM SIGSAC conference on computer and communications security, pages 308-318, 2016. 8", + "[2] Sharif Abuadbba, Kyuyeon Kim, Minki Kim, Chandra Thapa, Seyit A Camtepe, Yansong Gao, Hyoungshick Kim, and Surya Nepal. Can we use split learning on 1d cnn models for privacy preserving training? In Proceedings of the 15th ACM Asia Conference on Computer and Communications Security, pages 305-318, 2020. 1, 2", + "[3] Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In International conference on machine learning, pages 214-223. PMLR, 2017. 4", + "[4] Luke N Darlow, Elliot J Crowley, Antreas Antoniou, and Amos J Storkey. Cinic-10 is not imagenet or cifar-10. arXiv preprint arXiv:1810.03505, 2018. 5", + "[5] Ege Erdoğan, Alptekin Kupçü, and A Ercument Cicek. Splitguard: Detecting and mitigating training-hijacking attacks in split learning. In Proceedings of the 21st Workshop on Privacy in the Electronic Society, pages 125-137, 2022. 1, 3", + "[6] Ege Erdoğan, Alptekin Kupçü, and A Ercümen Çiçek. Unsplit: Data-oblivious model inversion, model stealing, and label inference attacks against split learning. In Proceedings of the 21st Workshop on Privacy in the Electronic Society, pages 115-124, 2022. 1, 2, 5", + "[7] Chong Fu, Xuhong Zhang, Shouling Ji, Jinyin Chen, Jingzheng Wu, Shanqing Guo, Jun Zhou, Alex X Liu, and Ting Wang. Label inference attacks against vertical federated learning. In 31st USENIX Security Symposium (USENIX Security 22), pages 1397-1414, 2022. 2", + "[8] Jiayun Fu, Xiaojing Ma, Bin B. Zhu, Pingyi Hu, Ruixin Zhao, Yaru Jia, Peng Xu, Hai Jin, , and Dongmei Zhang. Focusing on pinocchio's nose: A gradients scrutinizer to thwart split-learning hijacking attacks using intrinsic attributes. In 30th Annual Network and Distributed System Security Symposium, NDSS 2023, San Diego, California, USA, February 27-March 3, 2023. The Internet Society, 2023. 1, 2, 3, 5", + "[9] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In International conference on machine learning, pages 1180-1189. PMLR, 2015. 2, 3, 4", + "[10] Xinben Gao and Lan Zhang. PCAT: Functionality and data stealing from split learning by Pseudo-Client attack. In 32nd USENIX Security Symposium (USENIX Security 23), pages 5271–5288, Anaheim, CA, 2023. USENIX Association. 1, 2, 5", + "[11] Yansong Gao, Minki Kim, Sharif Abuadbba, Yeonjae Kim, Chandra Thapa, Kyuyeon Kim, Seyit A Camtepe, Hyoungshick Kim, and Surya Nepal. End-to-end evaluation of federated learning and split learning for internet of things. arXiv preprint arXiv:2003.13376, 2020. 1", + "[12] Muhammad Ghifary, W Bastiaan Kleijn, and Mengjie Zhang. Domain adaptive neural networks for object recognition. In PRICAI 2014: Trends in Artificial Intelligence:" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "13th Pacific Rim International Conference on Artificial Intelligence, Gold Coast, QLD, Australia, December 1-5, 2014. Proceedings 13, pages 898-904. Springer, 2014. 3", + "[13] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2014. 4", + "[14] Ian J Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Proceedings of the 27th International Conference on Neural Information Processing Systems-Volume 2, pages 2672–2680, 2014. 2", + "[15] Arthur Gretton, Dino Sejdinovic, Heiko Strathmann, Sivaraman Balakrishnan, Massimiliano Pontil, Kenji Fukumizu, and Bharath K Sriperumbudur. Optimal kernel choice for large-scale two-sample tests. Advances in neural information processing systems, 25, 2012. 2, 3, 4", + "[16] Otkrist Gupta and Ramesh Raskar. Distributed learning of deep neural network over multiple agents. Journal of Network and Computer Applications, 116:1-8, 2018. 1, 2", + "[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5", + "[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 7", + "[19] Zecheng He, Tianwei Zhang, and Ruby B Lee. Model inversion attacks against collaborative inference. In Proceedings of the 35th Annual Computer Security Applications Conference, pages 148-162, 2019. 1, 2, 4", + "[20] Alain Hore and Djemel Ziou. Image quality metrics: Psnr vs. ssm. In 2010 20th international conference on pattern recognition, pages 2366-2369. IEEE, 2010. 2, 5", + "[21] Andrew G Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam. Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861, 2017. 5", + "[22] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4700-4708, 2017. 7", + "[23] Sanjay Kariyappa and Moinuddin K Qureshi. Exploit: Extracting private labels in split learning. In 2023 IEEE Conference on Secure and Trustworthy Machine Learning (SaTML), pages 165-175. IEEE, 2023. 2", + "[24] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 5", + "[25] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 8" + ], + "bbox": [ + 503, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "12138", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[26] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5", + "[27] Ziang Li, Mengda Yang, Yaxin Liu, Juan Wang, Hongxin Hu, Wenzhe Yi, and Xiaoyang Xu. GAN you see me? enhanced data reconstruction attacks against split inference. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 2, 8", + "[28] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of the IEEE international conference on computer vision, pages 3730-3738, 2015. 5", + "[29] Mingsheng Long, Yue Cao, Jianmin Wang, and Michael Jordan. Learning transferable features with deep adaptation networks. In International conference on machine learning, pages 97-105. PMLR, 2015. 2, 3, 4", + "[30] Sida Luo, Fangchao Yu, Lina Wang, Bo Zeng, Zhi Pang, and Kai Zhao. Feature sniffer: A stealthy inference attacks framework on split learning. In International Conference on Artificial Neural Networks, pages 66-77. Springer, 2023. 3", + "[31] Dario Pasquini, Giuseppe Ateniese, and Massimo Bernaschi. Unleashing the tiger: Inference attacks on split learning. In Proceedings of the 2021 ACM SIGSAC Conference on Computer and Communications Security, pages 2113-2129, 2021. 1, 2, 4, 5", + "[32] Maarten G Poirot, Praneeth Vepakomma, Ken Chang, Jayashree Kalpathy-Cramer, Rajiv Gupta, and Ramesh Raskar. Split learning for collaborative deep learning in healthcare. arXiv preprint arXiv:1912.12115, 2019. 1, 2", + "[33] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision, pages 618-626, 2017. 3", + "[34] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 5, 7", + "[35] Abhishek Singh, Ayush Chopra, Ethan Garza, Emily Zhang, Praneeth Vepakomma, Vivek Sharma, and Ramesh Raskar. Disco: Dynamic and invariant sensitive channel obfuscation for deep neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12125-12135, 2021. 2, 4", + "[36] Congzheng Song and Vitaly Shmatikov. Overlearning reveals sensitive attributes. arXiv preprint arXiv:1905.11742, 2019. 1", + "[37] Gábor J Székely, Maria L Rizzo, and Nail K Bakirov. Measuring and testing dependence by correlation of distances. 2007. 8", + "[38] Chandra Thapa, Pathum Chamikara Mahawaga Arachchige, Seyit Camtepe, and Lichao Sun. Splitfed: When federated learning meets split learning. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 8485-8493, 2022. 1, 2", + "[39] Tom Titcombe, Adam J Hall, Pavlos Papadopoulos, and Daniele Romanini. Practical defences against model inversion attacks for split neural networks. arXiv preprint arXiv:2104.05743, 2021. 8" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[40] Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014. 3", + "[41] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7167-7176, 2017. 3", + "[42] Praneeth Vepakomma, Otkrist Gupta, Tristan Swedish, and Ramesh Raskar. Split learning for health: Distributed deep learning without sharing raw patient data. arXiv preprint arXiv:1812.00564, 2018. 1, 2, 3, 8", + "[43] Praneeth Vepakomma, Otkrist Gupta, Abhimanyu Dubey, and Ramesh Raskar. Reducing leakage in distributed deep learning for sensitive health data. arXiv preprint arXiv:1812.00564, 2, 2019. 8", + "[44] Praneeth Vepakomma, Abhishek Singh, Otkrist Gupta, and Ramesh Raskar. Nopeek: Information leakage reduction to share activations in distributed deep learning. In 2020 International Conference on Data Mining Workshops (ICDMW), pages 933–942. IEEE, 2020. 1, 8", + "[45] Mei Wang and Weihong Deng. Deep visual domain adaptation: A survey. Neurocomput., 312(C):135-153, 2018. 2", + "[46] Mei Wang and Weihong Deng. Deep visual domain adaptation: A survey. Neurocomputing, 312:135-153, 2018. 3", + "[47] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 5", + "[48] Mengda Yang, Ziang Li, Juan Wang, Hongxin Hu, Ao Ren, Xiaoyang Xu, and Wenzhe Yi. Measuring data reconstruction defenses in collaborative inference systems. Advances in Neural Information Processing Systems, 35:12855-12867, 2022. 2", + "[49] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5" + ], + "bbox": [ + 501, + 92, + 890, + 628 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "12139", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/de2cb66c-4154-4b26-aa15-91e83e19d783_model.json b/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/de2cb66c-4154-4b26-aa15-91e83e19d783_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4caead3fce591c34cc0f962bfb05f34a03637ac2 --- /dev/null +++ b/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/de2cb66c-4154-4b26-aa15-91e83e19d783_model.json @@ -0,0 +1,2200 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.16, + 0.131, + 0.812, + 0.177 + ], + "angle": 0, + "content": "A Stealthy Wrongdoer: Feature-Oriented Reconstruction Attack against Split Learning" + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.202, + 0.864, + 0.239 + ], + "angle": 0, + "content": "Xiaoyang Xu\\(^{1}\\) Mengda Yang\\(^{1}\\) Wenzhe Yi\\(^{1}\\) Ziang Li\\(^{1}\\) Juan Wang\\(^{1*}\\) Hongxin Hu\\(^{2}\\) Yong Zhuang\\(^{1}\\) Yaxin Liu\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.24, + 0.882, + 0.275 + ], + "angle": 0, + "content": "1 Key Laboratory of Aerospace Information Security and Trusted Computing, Ministry of Education, School of Cyber Science and Engineering, Wuhan University" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.275, + 0.81, + 0.293 + ], + "angle": 0, + "content": "\\(^{2}\\) Department of Computer Science and Engineering, University at Buffalo, SUNY" + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.295, + 0.807, + 0.327 + ], + "angle": 0, + "content": "{xiaoyangx, mengday, wenzhey, ziangli, yong.zhuang, yaxin.liu}@whu.edu.cn jwang@whu.edu.cn, hongxinh@buffalo.edu" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.362, + 0.314, + 0.379 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.395, + 0.473, + 0.775 + ], + "angle": 0, + "content": "Split Learning (SL) is a distributed learning framework renowned for its privacy-preserving features and minimal computational requirements. Previous research consistently highlights the potential privacy breaches in SL systems by server adversaries reconstructing training data. However, these studies often rely on strong assumptions or compromise system utility to enhance attack performance. This paper introduces a new semi-honest Data Reconstruction Attack on SL, named Feature-Oriented Reconstruction Attack (FORA). In contrast to prior works, FORA relies on limited prior knowledge, specifically that the server utilizes auxiliary samples from the public without knowing any client's private information. This allows FORA to conduct the attack stealthily and achieve robust performance. The key vulnerability exploited by FORA is the revelation of the model representation preference in the smashed data output by victim client. FORA constructs a substitute client through feature-level transfer learning, aiming to closely mimic the victim client's representation preference. Leveraging this substitute client, the server trains the attack model to effectively reconstruct private data. Extensive experiments showcase FORA's superior performance compared to state-of-the-art methods. Furthermore, the paper systematically evaluates the proposed method's applicability across diverse settings and advanced defense strategies." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.804, + 0.21, + 0.82 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.83, + 0.47, + 0.876 + ], + "angle": 0, + "content": "Deep Neural Networks (DNN) have gained widespread usage in computer vision due to their excellent learning ability and expressive power. Split Learning (SL) [2, 11, 16, 32, 38," + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.363, + 0.892, + 0.546 + ], + "angle": 0, + "content": "42, 44] emerged as a distributed collaborative framework that enables clients to cooperate with a server to perform learning task. In SL, the complete DNN model is divided into two parts, which are deployed on the client and server respectively. For a normal training process in SL, the client performs the computational process locally and communicates with the server solely based on intermediate features (referred to as smashed data) and their corresponding gradients. In this case, the server does not have access to any private information (raw data, parameters, architecture) about the client. Therefore, SL is considered effective in protecting the privacy of clients." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.554, + 0.895, + 0.903 + ], + "angle": 0, + "content": "However, recent works [6, 10, 19, 31, 36] have shown that there are still privacy risks associated with SL. It is possible for the server to steal private information about the client according to auxiliary knowledge. One particular concern is the Data Reconstruction Attack (DRA) [6, 10, 31], where a server attempts to recover the training data of a client in SL systems. Depending on whether the server affects the normal process of SL, we can categorize adversaries into malicious and semi-honest attackers. Malicious servers such as FSHA [31] can manipulate the SL training process to conduct more effective attack. However, the latest findings [5, 8] show that FSHA's mischief is easily detected by the client, leading to the termination of SL training protocol For semi-honest attackers, e.g. PCAT [10] and UnSplit [6], their superior camouflage makes them less likely to be detected. But current semi-honest attackers often rely overly on assumptions that favor their performances. For example, UnSplit requires knowledge of the client's architecture and is only applicable to simple networks or datasets. As for PCAT, it unduly depends on the availability of partial private data to assist in training the pseudo-client. These assumptions contradict the basic principle of SL, which is to ensure that the client's knowledge" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.222, + 0.9 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "12130" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.168 + ], + "angle": 0, + "content": "remains hidden from the server. In summary, we find previous attacks lack consideration of the intrinsic security of SL and the plausibility of their attack hypothesis, which limits the effectiveness and threat of their approach in real-world SL systems scenarios." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.17, + 0.473, + 0.532 + ], + "angle": 0, + "content": "In this work, we introduce a novel DRA toward more realistic and more challenging scenarios, where the server cannot access private data or the structures and parameters of the client model. Our scheme stems from new insights into potential privacy breaches in SL. We discover a fundamental phenomenon that the client model has its own representation preference, which can be reflected through the output smashed data. More importantly, this unique information can indicate the feature extraction behavior of the client. Based on this new insight, we propose a semi-honest privacy threat, namely Feature-Oriented Reconstruction Attack (FORA). A server adversary could establish a substitute client by narrowing the reference distance with the real client, which allows the substitute model to mimic the behavior of the target model at a finer granularity. To efficiently measure the preference distance of different representations, we introduce domain Discriminator network [9, 14] and Multi-Kernel Maximum Mean Discrepancy (MK-MMD) [15, 29]. These techniques are widely used in domain adaptation [45], enabling us to project various representation preferences into a shared space for comparison. With a well-trained substitute client, the server can successfully recover the private data by constructing an inverse network." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.535, + 0.473, + 0.775 + ], + "angle": 0, + "content": "We conduct our evaluation on two benchmark datasets and corresponding networks against different model partitioning strategies. The experimental results indicate that the proposed method significantly outperforms baseline attacks. Taking the reconstructed images of CelebA at layer 2 as an example, UnSplit, PCAT and FORA achieve effects of 8.70, 12.05, and 17.11 on the PSNR [20]. This demonstrates that FORA has significantly outperformed by \\(1.97\\mathrm{x}\\) and \\(1.42\\mathrm{x}\\) compared to the other two attacks. Although FSHA can achieve attack performance similar to ours, its malicious attack process can be promptly halted through monitoring mechanisms [8], resulting in poor reconstructions. Furthermore, we investigate the potential influences on FORA, including different public knowledge conditions and existing defense strategies, to validate the robustness of FORA." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.778, + 0.471, + 0.809 + ], + "angle": 0, + "content": "The main contributions of this paper can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.472, + 0.902 + ], + "angle": 0, + "content": "- We propose a novel attack, named Feature-Oriented Reconstruction Attack (FORA). As far as we know, FORA is the first work enabling a semi-honest server to perform powerful DRA in more realistic and challenging SL systems. In such scenarios, the server has no prior knowledge of the client model or access to raw data." + }, + { + "type": "image", + "bbox": [ + 0.526, + 0.092, + 0.872, + 0.172 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.55, + 0.184, + 0.842, + 0.199 + ], + "angle": 0, + "content": "Figure 1. Architecture of two-part split learning." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.226, + 0.892, + 0.286 + ], + "angle": 0, + "content": "- We have uncovered an inherent vulnerability in SL, where the server can exploit rich information in the smashed data to steal client representation preference, thereby building a substitute client for better reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.287, + 0.892, + 0.378 + ], + "angle": 0, + "content": "- We conduct comprehensive experiments with various adversarial knowledge against different benchmark datasets and models. The results demonstrate that FORA can achieve state-of-the-art attack performance compared with baselines and exhibits notable robustness across different settings." + }, + { + "type": "list", + "bbox": [ + 0.498, + 0.226, + 0.892, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.392, + 0.787, + 0.409 + ], + "angle": 0, + "content": "2. Background and Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.418, + 0.892, + 0.659 + ], + "angle": 0, + "content": "Split Learning (SL). SL [2, 16, 32, 38, 42] is an emerging distributed learning paradigm for resource-limited scenarios, which can split the neural network model into both client-side and server-side. As shown in Fig. 1, the client performs forward propagation and transmits the smashed data to the server, which then uses the computed loss for backward propagation and sends the gradients of the smashed data back to the client. Both the client and server will update their weights after receiving the gradients. It is generally believed that SL provides a secure and efficient training protocol by allowing the client to retain a portion of the model and training data locally while offloading most of the computing overhead to the server [2, 16, 32, 42]. However, recent studies [6, 7, 10, 23, 31] have highlighted vulnerabilities in SL, where the server can exploit the latter part of the model to carry out privacy attacks." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Data Reconstruction Attack (DRA) on SL. DRA [19, 27, 35, 48] is one of the most powerful privacy attacks that aim to steal the input data by the model's intermediate features. In SL, the server can utilize the smashed data output by the client to reconstruct the training data [6, 10, 31]. One notable attack is known as FSHA [31], where a malicious attacker utilizes the elaborated loss to alter the feature space of the victim client for reconstructing private data. In UnSplit [6], the semi-honest server attempts to reconstruct the training data and client's parameters simultaneously by utilizing the smashed data. Specifically, UnSplit optimizes parameters and inputs sequentially by minimizing the outputs between the clone client and the target client. To the best of our knowledge, PCAT [10] represents the most advanced attack under the semi-honest assumption. PCAT leverages the knowledge embedded in various stages of the server models" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "12131" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.08, + 0.089, + 0.167, + 0.156 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.157, + 0.154, + 0.169 + ], + "angle": 0, + "content": "(a) Original" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.089, + 0.267, + 0.155 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.192, + 0.157, + 0.255, + 0.169 + ], + "angle": 0, + "content": "(b) Model 1" + }, + { + "type": "image", + "bbox": [ + 0.282, + 0.089, + 0.367, + 0.155 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.293, + 0.157, + 0.357, + 0.169 + ], + "angle": 0, + "content": "(c) Model 2" + }, + { + "type": "image", + "bbox": [ + 0.384, + 0.089, + 0.469, + 0.155 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.394, + 0.157, + 0.458, + 0.169 + ], + "angle": 0, + "content": "(d) Model 3" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.182, + 0.47, + 0.251 + ], + "angle": 0, + "content": "Figure 2. Input image and behavior visualization by Grad-CAM [33]. All the models are trained in CelebA with the task of smiling classification. The figure displays the original images and the representation preferences of three models trained under the same hyperparameter settings but with different random seeds." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.279, + 0.469, + 0.323 + ], + "angle": 0, + "content": "to steal private data by constructing a pseudo-client. Unlike previous work, SFA [30] focuses on reconstructing samples during the inference stage rather than the training samples." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.324, + 0.47, + 0.596 + ], + "angle": 0, + "content": "Although existing works claim that their attacks pose significant privacy threats to SL, they disregard the plausibility of their threat model. For FSHA, the server reconstructs the raw data while at the cost of destroying the client's utility. While FSHA assumes that the client is entirely free of any awareness of being maliciously disrupted, recent research [5, 8] indicates that such a malicious server can be easily detected by the client, leading to a halt in the SL. UnSplit needs the knowledge of the client's structure and is not suitable for complex networks and datasets due to the infinite searching space of input data and model parameters. As for PCAT, it requires the adversary to have access to a portion of the private dataset. This is an unreasonable assumption that violates the original intention of SL since one of the distinctive characteristics of SL is the ability to train models without sharing the raw data [42]. As a result, how to explore DRA under more realistic assumptions in SL remains an open question." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.596, + 0.47, + 0.747 + ], + "angle": 0, + "content": "Domain Adaptation. Domain adaptation [9, 12, 15, 29, 40, 41, 46] is a technique that seeks to enhance the generalization of a model by transferring knowledge acquired from a source domain to a distinct yet related target domain. The core idea of domain adaptation is to map data from different domains into the same space for comparison. Here, we apply two popular methods: the domain Discriminator network [9, 41, 46] and the Multi-Kernel Maximum Mean Discrepancy (MK-MMD) function [15, 29, 46] to compare the feature spaces of different models." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.762, + 0.168, + 0.777 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.786, + 0.22, + 0.801 + ], + "angle": 0, + "content": "3.1. Threat Model" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.469, + 0.885 + ], + "angle": 0, + "content": "Without loss of generality, given a two-party SL protocol, the SL model \\( F \\) is partitioned to a server model \\( F_{s} \\) and a client model \\( F_{c} \\). The server aims to stealthily recover the private training data of the client through the smashed data \\( Z \\) output by \\( F_{c} \\)." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.469, + 0.901 + ], + "angle": 0, + "content": "We assume that the server adversary is a semi-honest en" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.288 + ], + "angle": 0, + "content": "tity, ensuring that the training process is indistinguishable from ordinary training during attack. Furthermore, we posit that the server adversary must adhere to the foundational principle of the SL — she lacks any means of accessing client-sensitive information. Specifically, the server does not require knowledge of the structure or hyperparameters of \\( F_{c} \\) and is devoid of access to the client's private training dataset \\( D_{priv} \\). The sole piece of public knowledge available to the server pertains to the auxiliary dataset \\( D_{aux} \\), sourced from the same domain as the private samples. It's important to note that the distribution of \\( D_{aux} \\) typically differs from that of \\( D_{priv} \\). Compared to the threat model of previous works, this assumption is more reasonable and realistic." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.299, + 0.621, + 0.313 + ], + "angle": 0, + "content": "3.2. Motivation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.323, + 0.892, + 0.488 + ], + "angle": 0, + "content": "Current DRAs rely overly on constructing inverse networks from input-output pairs obtained by querying the target model. However, this approach is impractical for SL because the server only has access to the client's outputs and is not qualified to query. A potential solution is to build a substitute client to mimic the target client, thus enabling the training of the inverse network. However, the variability of the substitute client's behavior affects the generalization of the inverse network to the target client, leading to the failure of the reconstruction, especially without the knowledge of the client model structure and private data distribution." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.489, + 0.892, + 0.701 + ], + "angle": 0, + "content": "As illustrated in Fig. 2, we employ Grad-CAM [33] to visualize the attention of intermediate features generated by different clients. From Fig. 2 (a)-(d), it can be noticed that even for models trained under the same setup, there still exists evident differences between their image processing attention. This phenomenon suggests that the smashed data output by the client reflects its distinctive feature extraction behavior, which we define as representation preferences. Our general assumption is that narrowing the gap between the substitute client and the target client in terms of intermediate features can make the representation preferences of the two models more similar, which ensures that the inverse network trained by the substitute client perfectly maps the target smashed data back to the private raw data." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.711, + 0.848, + 0.726 + ], + "angle": 0, + "content": "3.3. Feature-Oriented Reconstruction Attack" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Inspired by the differences in model representation preferences, we propose a novel data reconstruction attack against SL, called Feature-Oriented Reconstruction Attack (FORA). In order to mount FORA, the adversary needs to contrive a way to obtain the representation preferences of the \\( F_{c} \\). To address this problem, we utilize domain adaptation techniques [9, 15, 29] to project different preference representations into the same space. Specifically, the adversary conducts feature-level transfer learning by exploiting the \\( Z_{c} \\) collected in each training iteration and then obtains a substitute model that mimics well the feature extraction be" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12132" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.134, + 0.09, + 0.492, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.276, + 0.42, + 0.289 + ], + "angle": 0, + "content": "a) Substitute Model Construction" + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.089, + 0.843, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.581, + 0.276, + 0.761, + 0.289 + ], + "angle": 0, + "content": "c) Private Data Reconstruction" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.301, + 0.894, + 0.358 + ], + "angle": 0, + "content": "Figure 3. Attack pipeline of Feature-Oriented Reconstruction Attack (FORA) against SL. (a) shows the substitute model training phase. The attacker constructs a substitute model \\(\\hat{F}_c\\) using \\(\\mathcal{L}_{DISC}\\) and \\(\\mathcal{L}_{MK-MMD}\\) to mimic the behavior of the client model \\(F_c\\). (b) means training an inverse network \\(f_c^{-1}\\) using public data \\(X_{aux}\\). (c) represents the final attack phase using the attack model to reconstruct training data from snapshot \\(Z_{snap}\\) of target smashed data." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.383, + 0.47, + 0.473 + ], + "angle": 0, + "content": "havior of the \\( F_{c} \\). Through this approach, the adversary can smoothly construct an attack model (inverse mapping network) to recover the private samples. The detailed pipeline of FORA is shown in Fig. 3. It consists of three phases: substitute model construction, attack model training, and private data reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.474, + 0.471, + 0.656 + ], + "angle": 0, + "content": "Substitute Model Construction. Before SL training commences, the server initializes a substitute client, denoted by \\(\\hat{F}_c\\). The \\(\\hat{F}_c\\) will be trained locally at the server in parallel with the victim's \\(F_c\\), and such process will take place throughout the entire SL collaboration. In each training iteration, the client will send smashed data of the current batch to the server for completing the subsequent computations. Concurrently, the server will use the collected smashed data to perform training on the \\(\\hat{F}_c\\). For this purpose, the server introduces the Discriminator module and the MK-MMD module to extract the representation preferences. We define its training objective as:" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.669, + 0.47, + 0.695 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\hat {F} _ {c}} \\mathcal {L} _ {D I S C} + \\mathcal {L} _ {M K - M M D}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.706, + 0.47, + 0.767 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_{DISC}\\) is the Discriminator module constraining \\(Z_{aux} = \\hat{F}_c(X_{aux})\\) and \\(Z_{priv} = F_c(X_{priv})\\) to be indistinguishable, while \\(\\mathcal{L}_{MK - MMD}\\) is the MK-MMD module making \\(Z_{aux}\\) as close as possible to \\(Z_{priv}\\) in shared space." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.767, + 0.471, + 0.873 + ], + "angle": 0, + "content": "The Discriminator [3, 9, 13] \\( D \\) is also a network that needs to be trained synchronously and is tasked with efficiently distinguishing the generated features between \\( F_{c} \\) and \\( \\hat{F}_{c} \\), maximizing probabilities of the former and minimizing probabilities of the latter [31]. Therefore, the parameters of \\( D \\) will be updated to minimize the following loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.092, + 0.884, + 0.47, + 0.903 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {D} = \\log \\left(1 - \\mathcal {D} \\big (F _ {c} (X _ {p r i v}) \\big) + \\log \\mathcal {D} \\big (\\hat {F} _ {c} (X _ {a u x}) \\big)\\right). \\quad (2)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.383, + 0.893, + 0.445 + ], + "angle": 0, + "content": "After each local training step of \\( D \\), the server utilizes \\( D \\) to instruct substitute client's representation preference to be consistent with that of the victim client. Specifically, an adversarial loss is constructed as the following:" + }, + { + "type": "equation", + "bbox": [ + 0.577, + 0.453, + 0.892, + 0.471 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {D I S C} = \\log (1 - D (\\hat {F} _ {c} (X _ {a u x}))). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.481, + 0.893, + 0.587 + ], + "angle": 0, + "content": "The MK-MMD module [15, 29] is designed to align two sets of generated features into a shared space using kernel functions and compute their difference, where a smaller difference signifies closer representation preferences. Then, for the substitute client, the objective extends beyond maximizing the probabilities output by the \\( D \\), it also seeks to minimize the MK-MMD loss function, namely:" + }, + { + "type": "equation", + "bbox": [ + 0.514, + 0.595, + 0.892, + 0.637 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {M K - M M D} = \\left\\| \\phi \\left(\\hat {F} _ {c} \\left(X _ {a u x}\\right)\\right) - \\phi \\left(F _ {c} \\left(X _ {p r i v}\\right)\\right) \\right\\| _ {\\mathcal {H}}, \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.61, + 0.64, + 0.892, + 0.726 + ], + "angle": 0, + "content": "\\[\n\\left\\{ \\begin{array}{c} \\phi = \\sum_ {j = 1} ^ {m} \\beta_ {j} k _ {j}, \\\\ \\sum_ {j = 1} ^ {m} \\beta_ {j} = 1, \\beta_ {j} \\geq 0, \\forall j, \\end{array} \\right. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.893, + 0.796 + ], + "angle": 0, + "content": "where \\( k \\) is a single kernel function, \\( \\phi \\) denotes a set of kernel functions that project different smashed data into Reproducing Kernel Hilbert Space \\( \\mathcal{H} \\), \\( \\beta \\) is the weight coefficient corresponding to the single kernel function." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.894, + 0.903 + ], + "angle": 0, + "content": "Attack Model Training. At the end of the training of SL, the server can obtain a substitute client with a feature extraction behavior extremely similar to that of the victim client. Moreover, its feature space is known to the adversary, who can recover the original input from the smashed data by applying an inverse network (denoted as \\( f_{c}^{-1} \\)). Following previous DRAs [19, 35], we adopt the \\( f_{c}^{-1} \\) consist" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12133" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.166 + ], + "angle": 0, + "content": "ing of a set of Transposed Convolution layers and Tanh activations as our attack model. The server can leverage the auxiliary dataset to train the attack model by minimizing the mean square error between \\( f_{c}^{-1}(\\hat{F}_{c}(X_{aux})) \\) and \\( X_{aux} \\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.147, + 0.178, + 0.47, + 0.199 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {f _ {c} ^ {- 1}} = \\left\\| f _ {c} ^ {- 1} \\left(\\hat {F} _ {c} \\left(X _ {a u x}\\right)\\right) - X _ {a u x} \\right\\| _ {2} ^ {2}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.209, + 0.469, + 0.329 + ], + "angle": 0, + "content": "Private Data Reconstruction. The server keeps a snapshot \\( Z_{\\text{snap}} = F_c(X_{\\text{priv}}) \\) of all smashed data output by the target client under the final training iteration for reconstruction. Since the substitute client is able to mimic the target client's representation preferences well, the server can subtly use \\( f_c^{-1} \\) to perform the attack by mapping the target smashed data directly into the private raw data space, namely:" + }, + { + "type": "equation", + "bbox": [ + 0.198, + 0.33, + 0.469, + 0.348 + ], + "angle": 0, + "content": "\\[\nX _ {p r i v} ^ {*} = f _ {c} ^ {- 1} \\left(Z _ {s n a p}\\right). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.354, + 0.465, + 0.371 + ], + "angle": 0, + "content": "Here, \\(X_{priv}^{*}\\) are the reconstructed private training samples." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.384, + 0.21, + 0.401 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.409, + 0.267, + 0.426 + ], + "angle": 0, + "content": "4.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.432, + 0.469, + 0.627 + ], + "angle": 0, + "content": "Datasets. In our experiments, we rely on CIFAR-10 [26] and CelebA [28] to validate the attacks, due to their dominance in the research on SL [6, 10, 31]. They will be used as private data for the client's target training tasks. According to Sec. 3.1, we assume that the server adversary has access to a set of auxiliary samples that are distinct from the client's private data. Therefore, we choose CINIC-10 [4] and FFHQ [24] as the adversary's auxiliary dataset, respectively. We exclude images in CINIC-10 that overlapped with CIFAR-10, and randomly select 5,000 samples and 10,000 samples from the preprocessed CINIC-10 and FFHQ as the final auxiliary data. Appendix A.1 provides the detailed information for different datasets." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.63, + 0.469, + 0.825 + ], + "angle": 0, + "content": "Models. We consider two popular types of neural network architectures, including MobileNet [21] and ResNet-18 [17], as target models for the classification tasks of CIFRA-10 and CelebA, respectively. We set various split points for different target models to show our attack performance. Since the server is entirely unaware of the client's model structure from Sec. 3.1, we use VGG blocks [34] (consisting of a sequence of Convolutional, BatchNorm, ReLU, and MaxPool layers) to construct substitute models. In addition, the adversary's substitute models adaptively depend on the size of the intermediate features output by the client. All the architecture information and splitting schemes used in this paper are reported in Appendix A.2." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Metrics. In addition to analyzing the qualitative results of attack performances visually, we chose three quantitative metrics to evaluate the quality of the reconstructed images: Structural Similarity (SSIM) [47], Peak Signal-to-Noise Ratio (PSNR) [20], and Learned Perceptual Image" + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.09, + 0.723, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.562, + 0.218, + 0.667, + 0.23 + ], + "angle": 0, + "content": "(a) Detection Score" + }, + { + "type": "image", + "bbox": [ + 0.735, + 0.088, + 0.884, + 0.215 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.738, + 0.218, + 0.88, + 0.23 + ], + "angle": 0, + "content": "(b) Reconstruction Results" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.243, + 0.892, + 0.312 + ], + "angle": 0, + "content": "Figure 4. Attack performance comparison of FSHA [31] and FORA on CIFAR-10 with layer 2. (a) shows the detection score of two attacks detected by GS. (b) represents the reconstruction results of two attacks, and FSHA-GS is the reconstructed images when detected by GS." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.343, + 0.892, + 0.403 + ], + "angle": 0, + "content": "Patch Similarity (LPIPS) [49]. We also use Cosine Similarity and Mean Square Error to measure the similarity between the substitute client and the target client in feature space." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.406, + 0.893, + 0.603 + ], + "angle": 0, + "content": "Attack Baselines. We mainly compare our approach with three representative existing methods, which are FSHA [31], UnSplit [6], and PCAT [10]. For the malicious attack FSHA, we use sophisticated detection mechanism to jointly evaluate the attack's effectiveness. For the semi-honest attack UnSplit, we make it consistent with our experimental settings to ensure fairness. PCAT requires an understanding of the learning task while relying on a subset of the private training data to build the pseudo-client, and in order to comply with this assumption, we set the proportion of the CIFAR-10 private dataset to be \\(5\\%\\) (the maximal threshold suggested by the original paper), and for more complex CelebA dataset, we extend the proportion to be \\(10\\%\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.619, + 0.806, + 0.635 + ], + "angle": 0, + "content": "4.2. Comparison with Malicious Attack" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Since FSHA severely undermines the utility of the target client, recent work has proposed the Gradients Scrutinizer (GS) [8] to defend against such hijacking attacks by detecting the gradients returned from the server to the client. The GS will perform a similarity computation on the gradients, and if the calculated value is lower than a set threshold, it will be considered as a potential attack, resulting in the training of SL being immediately suspended. More details about GS can be found in Appendix C.1. We can observe from Fig. 4 that the reconstruction results of FORA are almost the same as those of FSHA in the unprotected SL system. Although FSHA performs well in capturing fine graphical details, it also leads to noticeable color shifts in some reconstruction results. Moreover, since FSHA drastically tampers with the updated gradient returned to the client model, it is easily detected by GS, leading to the failure of reconstruction." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12134" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.124, + 0.089, + 0.846, + 0.105 + ], + "angle": 0, + "content": "Table 1. Data reconstruction results of UnSplit, PCAT, and FORA on CIFAR-10 and CelebA in different splitting settings." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.115, + 0.889, + 0.301 + ], + "angle": 0, + "content": "
Split PointUnSplitCIFAR-10 PCATFORACelebA PCATFORA
Ground Truth
layer 1
layer 2
layer 3
layer 4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.325, + 0.47, + 0.354 + ], + "angle": 0, + "content": "Table 2. SSIM, PSNR, and LPIPS of the reconstructed images on CIFAR-10 among three attacks." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.365, + 0.468, + 0.422 + ], + "angle": 0, + "content": "
Split PointSSIM†PSNR†LPIPS↓
UnSplitPCATFORAUnSplitPCATFORAUnSplitPCATFORA
layer 10.1710.8530.92611.0322.1025.870.6770.2190.120
layer 20.1010.6420.83010.4817.2922.190.6890.4320.252
layer 30.1040.2910.62211.1413.1818.930.7410.6150.381
layer 40.1080.1210.0308.6211.0810.450.7220.6760.628
" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.451, + 0.414, + 0.468 + ], + "angle": 0, + "content": "4.3. Comparison with Semi-Honest Attacks" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.477, + 0.47, + 0.839 + ], + "angle": 0, + "content": "Reconstruction Performance. We show in detail the reconstruction results for UnSplit, PCAT, and our proposed FORA on all split points for both datasets. As depicted in Tab. 1, compared to other attacks, the images reconstructed by FORA exhibit a significant improvement visually. Due to the vast search space and inefficient optimization approach, UnSplit almost fails to recover training data in both datasets, even at layer 1. Although PCAT can reconstruct training samples in the shallow settings of the CIFAR-10 dataset, such as layer 1 and layer 2, the reconstruction quality is still lower than that of FORA. For the more complex CelebA dataset, PCAT struggles to produce quality reconstructions. Tab. 2 and Tab. 3 provides the quantitative results of the attacks. Except for the anomaly at the layer 4 split point of CIFAR-10, where FORA slightly underperforms PCAT in terms of SSIM and PSNR metric, FORA is superior to both methods in all other settings, especially in terms of the LPIPS metric, which is considered to be more aligned with human perception. Notably, even though PCAT has access to a subset of the private data, while FORA only obtains samples with different distributions, FORA substantially surpasses PCAT for reconstruction. This further emphasizes the robust privacy threat our approach poses to SL. More reconstructed images are presented in Appendix B.1." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Feature Similarity. As shown in Tab. 4, we measure the feature distance between the proxy clients built by UnSplit, PCAT, and FORA and the target client at layer 2. The results show that the substitute clients trained by our method" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.325, + 0.892, + 0.354 + ], + "angle": 0, + "content": "Table 3. SSIM, PSNR and LPIPS of the reconstructed images on CelebA among three attacks." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.365, + 0.89, + 0.422 + ], + "angle": 0, + "content": "
Split PointSSIM↑PSNR↑LPIPS↓
UnSplitPCATFORAUnSplitPCATFORAUnSplitPCATFORA
layer 10.1370.3330.4859.2613.4517.720.8040.6340.320
layer 20.1700.3160.4768.7012.0517.110.7470.6530.381
layer 30.1560.1640.19110.6611.6314.190.7930.7310.509
layer 40.0840.0920.1927.9410.6013.000.8040.7380.621
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.437, + 0.892, + 0.465 + ], + "angle": 0, + "content": "Table 4. Feature similarity measured by Mean Square Error and Cosine Similarity on CIFAR-10 and CelebA at layer 2." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.477, + 0.889, + 0.523 + ], + "angle": 0, + "content": "
MethodCIFAR-10CelebA
UnSplitPCATFORAUnSplitPCATFORA
Mean Square Error↓1.0410.5280.27450.7731.3530.753
Cosine Similarity↑0.2000.5920.8100.3330.4800.778
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.55, + 0.892, + 0.671 + ], + "angle": 0, + "content": "exhibit more similar representation preferences to the target client. The basic optimization approach of UnSplit makes it difficult to regularize the feature space of the proxy client. As for PCAT, it simply makes the smashed data generated by the pseudo model more favorable to the server model but fails to mimic the behavior of the client model. In contrast, FORA can impose stronger constraints in the feature space, which directly contributes to successful reconstruction." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.681, + 0.741, + 0.697 + ], + "angle": 0, + "content": "4.4. Effect of Auxiliary Dataset" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.704, + 0.892, + 0.87 + ], + "angle": 0, + "content": "Next, we analyze the effect of several important factors regarding the auxiliary dataset on attack performance. We first explore the impact of the fitting level of substitute models by varying the size of the auxiliary data. Then, we discuss the impact of the presence of a more significant distribution shift, i.e., the absence of some categories, between the auxiliary and target samples. Finally, we relax the major assumption about the adversary, namely that the server has access to the similarly distributed auxiliary dataset. We set the split point at layer 2 for ablation, and the full experimental results are provided in Appendix B.2." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Auxiliary Set Size. As shown in Fig. 5, when we reduce the size of the auxiliary dataset to half of the previous" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12135" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.07, + 0.087, + 0.468, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.217, + 0.468, + 0.245 + ], + "angle": 0, + "content": "Figure 5. Effects of varying auxiliary data size on FORA performed on CIFAR-10 and CelebA at layer 2." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.274, + 0.468, + 0.424 + ], + "angle": 0, + "content": "one, the attack performance of FORA remains almost unchanged. When we further reduce the number of auxiliary samples to \\(20\\%\\), the quality of the reconstructed images decreases slightly but still preserves the full outline and most of the details. In that case, the percentage of the public auxiliary dataset is very small compared to the huge private training set (50,000 for CIFAR-10 and 162770 for CelebA), only \\(2\\%\\) and \\(1.2\\%\\), respectively. This implies that even with a rather limited auxiliary dataset, FORA is still able to effectively reconstruct the client's training samples." + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.44, + 0.468, + 0.467 + ], + "angle": 0, + "content": "Table 5. Effect of absence of categories on FORA performed on CIFAR-10 at layer 2." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.479, + 0.466, + 0.566 + ], + "angle": 0, + "content": "
Absent CategoriesSSIM↑PSNR↑LPIPS↓
Living0.76820.440.300
Non-living0.73218.430.395
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.583, + 0.468, + 0.853 + ], + "angle": 0, + "content": "Absence of Categories. It is likely that the adversary's public auxiliary data misses some semantic classes of the private data distribution. To model this situation, we create two special auxiliary datasets for CIFAR-10, one containing \"Living\" items (birds, cats, etc.), and the other containing \"Non-living\" items (airplanes, cars, etc.), both with 5,000 randomly sampled samples from CINIC-10. As presented in Tab. 5, even if a class is absent from the auxiliary dataset, FORA can still reconstruct samples of that class. In fact, FORA focuses on stealing the mapping relationship between client inputs and smashed data and therefore does not require class alignment. We observe that the absence of the \"Non-living\" category leads to a moderate degradation in the reconstruction results. We believe that the reason behind this phenomenon is that the greater variation of classes within the \"Non-living\" category helps to increase the generalization level of the substitute client, which in turn facilitates improved attack performance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.468, + 0.9 + ], + "angle": 0, + "content": "Distribution Shift. Here we further analyze the impact of the auxiliary dataset distribution on FORA. In contrast to our default experimental setup, we selected 5000" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.089, + 0.892, + 0.158 + ], + "angle": 0, + "content": "Table 6. Effects of auxiliary dataset distribution shift on FORA performed on CIFAR-10 and CelebA at layer 2. \"Different\" represents auxiliary data sampled from CINIC-10, and FFHQ respectively, and \"Same\" means auxiliary dataset come from their original test set." + }, + { + "type": "table", + "bbox": [ + 0.545, + 0.17, + 0.844, + 0.24 + ], + "angle": 0, + "content": "
Dataset SizeCIFAR-10CelebA
DifferentSameDifferentSame
SSIM↑0.8300.8320.4760.777
PSNR↑22.1922.7817.1121.55
LPIPS↓0.2520.2070.3810.264
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.272, + 0.892, + 0.483 + ], + "angle": 0, + "content": "and 10000 images from the original testing sets of CIFAR-10 and CelebA, respectively, as the auxiliary datasets with the same distribution. As shown in Tab. 6, a more similar distribution can facilitate substitute clients stealing the representation preference, resulting in better reconstruction performance. We observe that the attack results on the facial dataset are more vulnerable to the data distribution shift compared to the object dataset. One possible reason is that tasks related to facial datasets are more sensitive to variations in sampling methods and alignment conditions across different datasets. For object datasets, due to substantial distribution variation between different categories of themselves, e.g. ranging from animals to vehicles, which contributes to their robustness in handling distribution shifts." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.505, + 0.813, + 0.52 + ], + "angle": 0, + "content": "4.5. Effect of Substitute Client Structure" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.532, + 0.892, + 0.698 + ], + "angle": 0, + "content": "After validating the impact of the auxiliary dataset, here we are interested in the impact of substitute client architectures on FORA. We chose three different model structures as attack variants: the VGG block [34], the ResNet block [18], and the DenseNet block [22]. As can be seen in Fig. 6, the SSIM and LPIPS quantization results for the reconstructed images remain similar. This indicates that the extracted representation preferences on the basis of MK-MDD and Discriminator are close to that of the target client, despite the fact that the substitute clients use different architectures. Additional results are shown in Appendix B.3." + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.728, + 0.695, + 0.839 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.575, + 0.844, + 0.632, + 0.857 + ], + "angle": 0, + "content": "(a) SSIM↑" + }, + { + "type": "image", + "bbox": [ + 0.697, + 0.729, + 0.882, + 0.839 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.758, + 0.844, + 0.821, + 0.857 + ], + "angle": 0, + "content": "(b) LPIPS↓" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.869, + 0.891, + 0.897 + ], + "angle": 0, + "content": "Figure 6. Effect for FORA with varying substitute model architectures on both datasets at layer 2." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12136" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.091, + 0.333, + 0.108 + ], + "angle": 0, + "content": "4.6. Counter Defense Techniques" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.115, + 0.47, + 0.265 + ], + "angle": 0, + "content": "There have been a number of defenses aimed at perturbing the smashed data claiming that they can reduce the risk of privacy leakage in SL to a certain extent. We select three well-known defense techniques, i.e., distance correlation minimization [37, 43, 44], differential privacy [1], and noise obfuscation [39], to evaluate the effectiveness of FORA. Tab. 7 shows the limited impact of these defenses on FORA. See Appendix C.1 for more details on defense techniques. See Appendix C.2 for more defense results and discussions about possible adaptive defenses." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.267, + 0.469, + 0.372 + ], + "angle": 0, + "content": "Distance Correlation Minimization (DCOR). DCOR can uncorrelate irrelevant and sensitive features from the smashed data associated with the target client, which results in a lack of detailed expression of the input data in the representation preferences learned by the substitute client, especially in colors. However, FORA retains the ability to reconstruct the structural details of the private image." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.373, + 0.469, + 0.509 + ], + "angle": 0, + "content": "Differential Privacy (DP). DP protects training data privacy by adding carefully crafted Laplace noise to the gradients. However, the effectiveness of DP against FORA is very limited under all privacy budgets. When the test accuracy of the model is reduced by nearly \\(10\\%\\) (the functionality is severely damaged), the SSIM of the reconstructed samples still reaches about \\(75\\%\\) of the original. This tradeoff between classification accuracy and defense strength makes DP not feasible for practical applications of SL." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.51, + 0.469, + 0.644 + ], + "angle": 0, + "content": "Noise Obfuscation (NO). NO is a direct defense to destroy the mapping relationship between smashed and input data. We observe that on the one hand, the noise of a small scale enhances the generalization level of the SL model to maintain or even improve the classification accuracy, on the other hand raising the noise scale helps to introduce deviations to the features extracted from the target client, making it more difficult to learn the representations and reconstruct the data for FORA." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.661, + 0.325, + 0.677 + ], + "angle": 0, + "content": "5. Discussion and Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.688, + 0.469, + 0.734 + ], + "angle": 0, + "content": "In this section, we first discuss the potential improvement and scalability of FORA, then we summarize this work. We also show limitation and future work in Appendix D." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.469, + 0.869 + ], + "angle": 0, + "content": "Improvement using Generative Adversarial Networks. Li et al. [27] propose a novel StyleGAN-based reconstruction attack against split inference, and their research focus is orthogonal to our contribution. Therefore, the reconstruction task in FORA can be further optimized using pre-trained StyleGAN [25]. As shown in Fig. 7, the well-trained substitute client in FORA combined with StyleGAN optimization can provide additional improvements in reconstruction performance." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Attack on Label-Protected SL. Another popular setup for SL requires the client to keep the labels locally [42], but" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.09, + 0.892, + 0.117 + ], + "angle": 0, + "content": "Table 7. Effect of utility and FORA performance against three defense techniques on CIFAR-10 at layer 2." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.128, + 0.89, + 0.433 + ], + "angle": 0, + "content": "
Defense HyperparamTest Acc (%)SSIM↑PSNR↑LPIPS↓
0 (w/o defense)71.250.83022.190.252
DCOR (α)
0.270.910.69217.910.360
0.570.060.62815.990.441
0.869.720.56315.400.471
DP (ε)
+∞69.680.82322.360.225
10063.050.71120.360.394
1061.930.62118.030.487
NO (σ)
1.074.390.64017.290.367
2.073.140.58316.290.444
5.070.620.39414.350.550
" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.447, + 0.885, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.556, + 0.892, + 0.585 + ], + "angle": 0, + "content": "Figure 7. Reconstructed CelebA images of FORA and FORA-G, FOAR-G represents FORA combined with StyleGAN." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.613, + 0.892, + 0.674 + ], + "angle": 0, + "content": "this case does not have any influence on the implementation and performance of FORA. Since FORA is only related to the smashed data output from the target client, it does not depend on the server model as well as the training task." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Conclusion. In this work, we propose a novel data reconstruction attack against SL, named Feature-Oriented Reconstruction Attack (FORA). Unlike all previous attack schemes, FORA enables a semi-honest server to secretly reconstruct the client's private training data with very little prior knowledge. Thanks to our new perspective of extracting representation preferences from smashed data, the server can contemporaneously train a substitute client that approximates the target client's behavior to conduct the attack. Our extensive experiments in various settings demonstrate the state-of-the-art performance of FORA. Due to its stealth and effectiveness, it poses a real privacy threat to SL. We hope our work can inspire future efforts to explore it in more practical SL, and we are eager to draw attention to more robust defense techniques." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12137" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Martin Abadi, Andy Chu, Ian Goodfellow, H Brendan McMahan, Ilya Mironov, Kunal Talwar, and Li Zhang. Deep learning with differential privacy. In Proceedings of the 2016 ACM SIGSAC conference on computer and communications security, pages 308-318, 2016. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.186, + 0.472, + 0.269 + ], + "angle": 0, + "content": "[2] Sharif Abuadbba, Kyuyeon Kim, Minki Kim, Chandra Thapa, Seyit A Camtepe, Yansong Gao, Hyoungshick Kim, and Surya Nepal. Can we use split learning on 1d cnn models for privacy preserving training? In Proceedings of the 15th ACM Asia Conference on Computer and Communications Security, pages 305-318, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.271, + 0.47, + 0.324 + ], + "angle": 0, + "content": "[3] Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In International conference on machine learning, pages 214-223. PMLR, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.326, + 0.47, + 0.368 + ], + "angle": 0, + "content": "[4] Luke N Darlow, Elliot J Crowley, Antreas Antoniou, and Amos J Storkey. Cinic-10 is not imagenet or cifar-10. arXiv preprint arXiv:1810.03505, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.369, + 0.47, + 0.424 + ], + "angle": 0, + "content": "[5] Ege Erdoğan, Alptekin Kupçü, and A Ercument Cicek. Splitguard: Detecting and mitigating training-hijacking attacks in split learning. In Proceedings of the 21st Workshop on Privacy in the Electronic Society, pages 125-137, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.425, + 0.47, + 0.493 + ], + "angle": 0, + "content": "[6] Ege Erdoğan, Alptekin Kupçü, and A Ercümen Çiçek. Unsplit: Data-oblivious model inversion, model stealing, and label inference attacks against split learning. In Proceedings of the 21st Workshop on Privacy in the Electronic Society, pages 115-124, 2022. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.495, + 0.47, + 0.564 + ], + "angle": 0, + "content": "[7] Chong Fu, Xuhong Zhang, Shouling Ji, Jinyin Chen, Jingzheng Wu, Shanqing Guo, Jun Zhou, Alex X Liu, and Ting Wang. Label inference attacks against vertical federated learning. In 31st USENIX Security Symposium (USENIX Security 22), pages 1397-1414, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.565, + 0.47, + 0.661 + ], + "angle": 0, + "content": "[8] Jiayun Fu, Xiaojing Ma, Bin B. Zhu, Pingyi Hu, Ruixin Zhao, Yaru Jia, Peng Xu, Hai Jin, , and Dongmei Zhang. Focusing on pinocchio's nose: A gradients scrutinizer to thwart split-learning hijacking attacks using intrinsic attributes. In 30th Annual Network and Distributed System Security Symposium, NDSS 2023, San Diego, California, USA, February 27-March 3, 2023. The Internet Society, 2023. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.663, + 0.47, + 0.717 + ], + "angle": 0, + "content": "[9] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In International conference on machine learning, pages 1180-1189. PMLR, 2015. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.719, + 0.47, + 0.787 + ], + "angle": 0, + "content": "[10] Xinben Gao and Lan Zhang. PCAT: Functionality and data stealing from split learning by Pseudo-Client attack. In 32nd USENIX Security Symposium (USENIX Security 23), pages 5271–5288, Anaheim, CA, 2023. USENIX Association. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.789, + 0.47, + 0.858 + ], + "angle": 0, + "content": "[11] Yansong Gao, Minki Kim, Sharif Abuadbba, Yeonjae Kim, Chandra Thapa, Kyuyeon Kim, Seyit A Camtepe, Hyoungshick Kim, and Surya Nepal. End-to-end evaluation of federated learning and split learning for internet of things. arXiv preprint arXiv:2003.13376, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.859, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[12] Muhammad Ghifary, W Bastiaan Kleijn, and Mengjie Zhang. Domain adaptive neural networks for object recognition. In PRICAI 2014: Trends in Artificial Intelligence:" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.135 + ], + "angle": 0, + "content": "13th Pacific Rim International Conference on Artificial Intelligence, Gold Coast, QLD, Australia, December 1-5, 2014. Proceedings 13, pages 898-904. Springer, 2014. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.892, + 0.203 + ], + "angle": 0, + "content": "[13] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2014. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.205, + 0.892, + 0.274 + ], + "angle": 0, + "content": "[14] Ian J Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Proceedings of the 27th International Conference on Neural Information Processing Systems-Volume 2, pages 2672–2680, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.275, + 0.892, + 0.343 + ], + "angle": 0, + "content": "[15] Arthur Gretton, Dino Sejdinovic, Heiko Strathmann, Sivaraman Balakrishnan, Massimiliano Pontil, Kenji Fukumizu, and Bharath K Sriperumbudur. Optimal kernel choice for large-scale two-sample tests. Advances in neural information processing systems, 25, 2012. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.344, + 0.892, + 0.385 + ], + "angle": 0, + "content": "[16] Otkrist Gupta and Ramesh Raskar. Distributed learning of deep neural network over multiple agents. Journal of Network and Computer Applications, 116:1-8, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.386, + 0.892, + 0.441 + ], + "angle": 0, + "content": "[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.442, + 0.892, + 0.496 + ], + "angle": 0, + "content": "[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.497, + 0.892, + 0.552 + ], + "angle": 0, + "content": "[19] Zecheng He, Tianwei Zhang, and Ruby B Lee. Model inversion attacks against collaborative inference. In Proceedings of the 35th Annual Computer Security Applications Conference, pages 148-162, 2019. 1, 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.553, + 0.892, + 0.594 + ], + "angle": 0, + "content": "[20] Alain Hore and Djemel Ziou. Image quality metrics: Psnr vs. ssm. In 2010 20th international conference on pattern recognition, pages 2366-2369. IEEE, 2010. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.595, + 0.892, + 0.663 + ], + "angle": 0, + "content": "[21] Andrew G Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam. Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.664, + 0.892, + 0.719 + ], + "angle": 0, + "content": "[22] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4700-4708, 2017. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.72, + 0.892, + 0.775 + ], + "angle": 0, + "content": "[23] Sanjay Kariyappa and Moinuddin K Qureshi. Exploit: Extracting private labels in split learning. In 2023 IEEE Conference on Secure and Trustworthy Machine Learning (SaTML), pages 165-175. IEEE, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.776, + 0.892, + 0.831 + ], + "angle": 0, + "content": "[24] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[25] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 8" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "12138" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "[26] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.121, + 0.469, + 0.189 + ], + "angle": 0, + "content": "[27] Ziang Li, Mengda Yang, Yaxin Liu, Juan Wang, Hongxin Hu, Wenzhe Yi, and Xiaoyang Xu. GAN you see me? enhanced data reconstruction attacks against split inference. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.19, + 0.47, + 0.245 + ], + "angle": 0, + "content": "[28] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of the IEEE international conference on computer vision, pages 3730-3738, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.246, + 0.469, + 0.302 + ], + "angle": 0, + "content": "[29] Mingsheng Long, Yue Cao, Jianmin Wang, and Michael Jordan. Learning transferable features with deep adaptation networks. In International conference on machine learning, pages 97-105. PMLR, 2015. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.302, + 0.469, + 0.357 + ], + "angle": 0, + "content": "[30] Sida Luo, Fangchao Yu, Lina Wang, Bo Zeng, Zhi Pang, and Kai Zhao. Feature sniffer: A stealthy inference attacks framework on split learning. In International Conference on Artificial Neural Networks, pages 66-77. Springer, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.359, + 0.469, + 0.426 + ], + "angle": 0, + "content": "[31] Dario Pasquini, Giuseppe Ateniese, and Massimo Bernaschi. Unleashing the tiger: Inference attacks on split learning. In Proceedings of the 2021 ACM SIGSAC Conference on Computer and Communications Security, pages 2113-2129, 2021. 1, 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.428, + 0.469, + 0.482 + ], + "angle": 0, + "content": "[32] Maarten G Poirot, Praneeth Vepakomma, Ken Chang, Jayashree Kalpathy-Cramer, Rajiv Gupta, and Ramesh Raskar. Split learning for collaborative deep learning in healthcare. arXiv preprint arXiv:1912.12115, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.484, + 0.469, + 0.564 + ], + "angle": 0, + "content": "[33] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision, pages 618-626, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.567, + 0.469, + 0.607 + ], + "angle": 0, + "content": "[34] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.609, + 0.469, + 0.691 + ], + "angle": 0, + "content": "[35] Abhishek Singh, Ayush Chopra, Ethan Garza, Emily Zhang, Praneeth Vepakomma, Vivek Sharma, and Ramesh Raskar. Disco: Dynamic and invariant sensitive channel obfuscation for deep neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12125-12135, 2021. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.692, + 0.469, + 0.732 + ], + "angle": 0, + "content": "[36] Congzheng Song and Vitaly Shmatikov. Overlearning reveals sensitive attributes. arXiv preprint arXiv:1905.11742, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.734, + 0.469, + 0.773 + ], + "angle": 0, + "content": "[37] Gábor J Székely, Maria L Rizzo, and Nail K Bakirov. Measuring and testing dependence by correlation of distances. 2007. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.469, + 0.843 + ], + "angle": 0, + "content": "[38] Chandra Thapa, Pathum Chamikara Mahawaga Arachchige, Seyit Camtepe, and Lichao Sun. Splitfed: When federated learning meets split learning. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 8485-8493, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[39] Tom Titcombe, Adam J Hall, Pavlos Papadopoulos, and Daniele Romanini. Practical defences against model inversion attacks for split neural networks. arXiv preprint arXiv:2104.05743, 2021. 8" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[40] Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[41] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7167-7176, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.892, + 0.247 + ], + "angle": 0, + "content": "[42] Praneeth Vepakomma, Otkrist Gupta, Tristan Swedish, and Ramesh Raskar. Split learning for health: Distributed deep learning without sharing raw patient data. arXiv preprint arXiv:1812.00564, 2018. 1, 2, 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.892, + 0.303 + ], + "angle": 0, + "content": "[43] Praneeth Vepakomma, Otkrist Gupta, Abhimanyu Dubey, and Ramesh Raskar. Reducing leakage in distributed deep learning for sensitive health data. arXiv preprint arXiv:1812.00564, 2, 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.305, + 0.892, + 0.374 + ], + "angle": 0, + "content": "[44] Praneeth Vepakomma, Abhishek Singh, Otkrist Gupta, and Ramesh Raskar. Nopeek: Information leakage reduction to share activations in distributed deep learning. In 2020 International Conference on Data Mining Workshops (ICDMW), pages 933–942. IEEE, 2020. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.376, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[45] Mei Wang and Weihong Deng. Deep visual domain adaptation: A survey. Neurocomput., 312(C):135-153, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.405, + 0.892, + 0.432 + ], + "angle": 0, + "content": "[46] Mei Wang and Weihong Deng. Deep visual domain adaptation: A survey. Neurocomputing, 312:135-153, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.434, + 0.892, + 0.488 + ], + "angle": 0, + "content": "[47] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.49, + 0.892, + 0.558 + ], + "angle": 0, + "content": "[48] Mengda Yang, Ziang Li, Juan Wang, Hongxin Hu, Ao Ren, Xiaoyang Xu, and Wenzhe Yi. Measuring data reconstruction defenses in collaborative inference systems. Advances in Neural Information Processing Systems, 35:12855-12867, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.56, + 0.892, + 0.63 + ], + "angle": 0, + "content": "[49] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "12139" + } + ] +] \ No newline at end of file diff --git a/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/de2cb66c-4154-4b26-aa15-91e83e19d783_origin.pdf b/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/de2cb66c-4154-4b26-aa15-91e83e19d783_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6940ed9c3e5eedd295c72135b02845f53d96a016 --- /dev/null +++ b/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/de2cb66c-4154-4b26-aa15-91e83e19d783_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bc0bbb5918c2106cf308946caf0eac99e80b2b4864699630b0dee03ea29d886 +size 1785523 diff --git a/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/full.md b/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8d8f095f46386f4f58fd7d8f450fed2970c60654 --- /dev/null +++ b/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/full.md @@ -0,0 +1,309 @@ +# A Stealthy Wrongdoer: Feature-Oriented Reconstruction Attack against Split Learning + +Xiaoyang Xu $^{1}$ Mengda Yang $^{1}$ Wenzhe Yi $^{1}$ Ziang Li $^{1}$ Juan Wang $^{1*}$ Hongxin Hu $^{2}$ Yong Zhuang $^{1}$ Yaxin Liu $^{1}$ + +1 Key Laboratory of Aerospace Information Security and Trusted Computing, Ministry of Education, School of Cyber Science and Engineering, Wuhan University + +$^{2}$ Department of Computer Science and Engineering, University at Buffalo, SUNY + +{xiaoyangx, mengday, wenzhey, ziangli, yong.zhuang, yaxin.liu}@whu.edu.cn jwang@whu.edu.cn, hongxinh@buffalo.edu + +# Abstract + +Split Learning (SL) is a distributed learning framework renowned for its privacy-preserving features and minimal computational requirements. Previous research consistently highlights the potential privacy breaches in SL systems by server adversaries reconstructing training data. However, these studies often rely on strong assumptions or compromise system utility to enhance attack performance. This paper introduces a new semi-honest Data Reconstruction Attack on SL, named Feature-Oriented Reconstruction Attack (FORA). In contrast to prior works, FORA relies on limited prior knowledge, specifically that the server utilizes auxiliary samples from the public without knowing any client's private information. This allows FORA to conduct the attack stealthily and achieve robust performance. The key vulnerability exploited by FORA is the revelation of the model representation preference in the smashed data output by victim client. FORA constructs a substitute client through feature-level transfer learning, aiming to closely mimic the victim client's representation preference. Leveraging this substitute client, the server trains the attack model to effectively reconstruct private data. Extensive experiments showcase FORA's superior performance compared to state-of-the-art methods. Furthermore, the paper systematically evaluates the proposed method's applicability across diverse settings and advanced defense strategies. + +# 1. Introduction + +Deep Neural Networks (DNN) have gained widespread usage in computer vision due to their excellent learning ability and expressive power. Split Learning (SL) [2, 11, 16, 32, 38, + +42, 44] emerged as a distributed collaborative framework that enables clients to cooperate with a server to perform learning task. In SL, the complete DNN model is divided into two parts, which are deployed on the client and server respectively. For a normal training process in SL, the client performs the computational process locally and communicates with the server solely based on intermediate features (referred to as smashed data) and their corresponding gradients. In this case, the server does not have access to any private information (raw data, parameters, architecture) about the client. Therefore, SL is considered effective in protecting the privacy of clients. + +However, recent works [6, 10, 19, 31, 36] have shown that there are still privacy risks associated with SL. It is possible for the server to steal private information about the client according to auxiliary knowledge. One particular concern is the Data Reconstruction Attack (DRA) [6, 10, 31], where a server attempts to recover the training data of a client in SL systems. Depending on whether the server affects the normal process of SL, we can categorize adversaries into malicious and semi-honest attackers. Malicious servers such as FSHA [31] can manipulate the SL training process to conduct more effective attack. However, the latest findings [5, 8] show that FSHA's mischief is easily detected by the client, leading to the termination of SL training protocol For semi-honest attackers, e.g. PCAT [10] and UnSplit [6], their superior camouflage makes them less likely to be detected. But current semi-honest attackers often rely overly on assumptions that favor their performances. For example, UnSplit requires knowledge of the client's architecture and is only applicable to simple networks or datasets. As for PCAT, it unduly depends on the availability of partial private data to assist in training the pseudo-client. These assumptions contradict the basic principle of SL, which is to ensure that the client's knowledge + +remains hidden from the server. In summary, we find previous attacks lack consideration of the intrinsic security of SL and the plausibility of their attack hypothesis, which limits the effectiveness and threat of their approach in real-world SL systems scenarios. + +In this work, we introduce a novel DRA toward more realistic and more challenging scenarios, where the server cannot access private data or the structures and parameters of the client model. Our scheme stems from new insights into potential privacy breaches in SL. We discover a fundamental phenomenon that the client model has its own representation preference, which can be reflected through the output smashed data. More importantly, this unique information can indicate the feature extraction behavior of the client. Based on this new insight, we propose a semi-honest privacy threat, namely Feature-Oriented Reconstruction Attack (FORA). A server adversary could establish a substitute client by narrowing the reference distance with the real client, which allows the substitute model to mimic the behavior of the target model at a finer granularity. To efficiently measure the preference distance of different representations, we introduce domain Discriminator network [9, 14] and Multi-Kernel Maximum Mean Discrepancy (MK-MMD) [15, 29]. These techniques are widely used in domain adaptation [45], enabling us to project various representation preferences into a shared space for comparison. With a well-trained substitute client, the server can successfully recover the private data by constructing an inverse network. + +We conduct our evaluation on two benchmark datasets and corresponding networks against different model partitioning strategies. The experimental results indicate that the proposed method significantly outperforms baseline attacks. Taking the reconstructed images of CelebA at layer 2 as an example, UnSplit, PCAT and FORA achieve effects of 8.70, 12.05, and 17.11 on the PSNR [20]. This demonstrates that FORA has significantly outperformed by $1.97\mathrm{x}$ and $1.42\mathrm{x}$ compared to the other two attacks. Although FSHA can achieve attack performance similar to ours, its malicious attack process can be promptly halted through monitoring mechanisms [8], resulting in poor reconstructions. Furthermore, we investigate the potential influences on FORA, including different public knowledge conditions and existing defense strategies, to validate the robustness of FORA. + +The main contributions of this paper can be summarized as follows: + +- We propose a novel attack, named Feature-Oriented Reconstruction Attack (FORA). As far as we know, FORA is the first work enabling a semi-honest server to perform powerful DRA in more realistic and challenging SL systems. In such scenarios, the server has no prior knowledge of the client model or access to raw data. + +![](images/f6bf651513d456665468a4daae80b5b42c851a484cae83889e7ec689272883a5.jpg) +Figure 1. Architecture of two-part split learning. + +- We have uncovered an inherent vulnerability in SL, where the server can exploit rich information in the smashed data to steal client representation preference, thereby building a substitute client for better reconstruction. +- We conduct comprehensive experiments with various adversarial knowledge against different benchmark datasets and models. The results demonstrate that FORA can achieve state-of-the-art attack performance compared with baselines and exhibits notable robustness across different settings. + +# 2. Background and Related Work + +Split Learning (SL). SL [2, 16, 32, 38, 42] is an emerging distributed learning paradigm for resource-limited scenarios, which can split the neural network model into both client-side and server-side. As shown in Fig. 1, the client performs forward propagation and transmits the smashed data to the server, which then uses the computed loss for backward propagation and sends the gradients of the smashed data back to the client. Both the client and server will update their weights after receiving the gradients. It is generally believed that SL provides a secure and efficient training protocol by allowing the client to retain a portion of the model and training data locally while offloading most of the computing overhead to the server [2, 16, 32, 42]. However, recent studies [6, 7, 10, 23, 31] have highlighted vulnerabilities in SL, where the server can exploit the latter part of the model to carry out privacy attacks. + +Data Reconstruction Attack (DRA) on SL. DRA [19, 27, 35, 48] is one of the most powerful privacy attacks that aim to steal the input data by the model's intermediate features. In SL, the server can utilize the smashed data output by the client to reconstruct the training data [6, 10, 31]. One notable attack is known as FSHA [31], where a malicious attacker utilizes the elaborated loss to alter the feature space of the victim client for reconstructing private data. In UnSplit [6], the semi-honest server attempts to reconstruct the training data and client's parameters simultaneously by utilizing the smashed data. Specifically, UnSplit optimizes parameters and inputs sequentially by minimizing the outputs between the clone client and the target client. To the best of our knowledge, PCAT [10] represents the most advanced attack under the semi-honest assumption. PCAT leverages the knowledge embedded in various stages of the server models + +![](images/d5e835309a9162567d4315273cdb244bf158eb3b68e49d594462f326311a9f2c.jpg) +(a) Original + +![](images/1ae30e6820f48215640529140ee0d38c9e61840d9c6c9f9bd188d677fee27913.jpg) +(b) Model 1 + +![](images/bea9c5acd0057794372126887ef9a442e835ce3c0fe1b3bd8176c144add6ff24.jpg) +(c) Model 2 +Figure 2. Input image and behavior visualization by Grad-CAM [33]. All the models are trained in CelebA with the task of smiling classification. The figure displays the original images and the representation preferences of three models trained under the same hyperparameter settings but with different random seeds. + +![](images/fb14206aa49cb0de784e118d4771ab9d576820e83a6bd4b4943f1d753c191055.jpg) +(d) Model 3 + +to steal private data by constructing a pseudo-client. Unlike previous work, SFA [30] focuses on reconstructing samples during the inference stage rather than the training samples. + +Although existing works claim that their attacks pose significant privacy threats to SL, they disregard the plausibility of their threat model. For FSHA, the server reconstructs the raw data while at the cost of destroying the client's utility. While FSHA assumes that the client is entirely free of any awareness of being maliciously disrupted, recent research [5, 8] indicates that such a malicious server can be easily detected by the client, leading to a halt in the SL. UnSplit needs the knowledge of the client's structure and is not suitable for complex networks and datasets due to the infinite searching space of input data and model parameters. As for PCAT, it requires the adversary to have access to a portion of the private dataset. This is an unreasonable assumption that violates the original intention of SL since one of the distinctive characteristics of SL is the ability to train models without sharing the raw data [42]. As a result, how to explore DRA under more realistic assumptions in SL remains an open question. + +Domain Adaptation. Domain adaptation [9, 12, 15, 29, 40, 41, 46] is a technique that seeks to enhance the generalization of a model by transferring knowledge acquired from a source domain to a distinct yet related target domain. The core idea of domain adaptation is to map data from different domains into the same space for comparison. Here, we apply two popular methods: the domain Discriminator network [9, 41, 46] and the Multi-Kernel Maximum Mean Discrepancy (MK-MMD) function [15, 29, 46] to compare the feature spaces of different models. + +# 3. Method + +# 3.1. Threat Model + +Without loss of generality, given a two-party SL protocol, the SL model $F$ is partitioned to a server model $F_{s}$ and a client model $F_{c}$ . The server aims to stealthily recover the private training data of the client through the smashed data $Z$ output by $F_{c}$ . + +We assume that the server adversary is a semi-honest en + +tity, ensuring that the training process is indistinguishable from ordinary training during attack. Furthermore, we posit that the server adversary must adhere to the foundational principle of the SL — she lacks any means of accessing client-sensitive information. Specifically, the server does not require knowledge of the structure or hyperparameters of $F_{c}$ and is devoid of access to the client's private training dataset $D_{priv}$ . The sole piece of public knowledge available to the server pertains to the auxiliary dataset $D_{aux}$ , sourced from the same domain as the private samples. It's important to note that the distribution of $D_{aux}$ typically differs from that of $D_{priv}$ . Compared to the threat model of previous works, this assumption is more reasonable and realistic. + +# 3.2. Motivation + +Current DRAs rely overly on constructing inverse networks from input-output pairs obtained by querying the target model. However, this approach is impractical for SL because the server only has access to the client's outputs and is not qualified to query. A potential solution is to build a substitute client to mimic the target client, thus enabling the training of the inverse network. However, the variability of the substitute client's behavior affects the generalization of the inverse network to the target client, leading to the failure of the reconstruction, especially without the knowledge of the client model structure and private data distribution. + +As illustrated in Fig. 2, we employ Grad-CAM [33] to visualize the attention of intermediate features generated by different clients. From Fig. 2 (a)-(d), it can be noticed that even for models trained under the same setup, there still exists evident differences between their image processing attention. This phenomenon suggests that the smashed data output by the client reflects its distinctive feature extraction behavior, which we define as representation preferences. Our general assumption is that narrowing the gap between the substitute client and the target client in terms of intermediate features can make the representation preferences of the two models more similar, which ensures that the inverse network trained by the substitute client perfectly maps the target smashed data back to the private raw data. + +# 3.3. Feature-Oriented Reconstruction Attack + +Inspired by the differences in model representation preferences, we propose a novel data reconstruction attack against SL, called Feature-Oriented Reconstruction Attack (FORA). In order to mount FORA, the adversary needs to contrive a way to obtain the representation preferences of the $F_{c}$ . To address this problem, we utilize domain adaptation techniques [9, 15, 29] to project different preference representations into the same space. Specifically, the adversary conducts feature-level transfer learning by exploiting the $Z_{c}$ collected in each training iteration and then obtains a substitute model that mimics well the feature extraction be + +![](images/fdb59d6d1256216406721cda90d25ba234660dad3dfe8f60a179ae56d16f4c76.jpg) +a) Substitute Model Construction + +![](images/979d596657903a8d50335e0fc936c272c2a31f2ed595604514504eb6a08a5742.jpg) +c) Private Data Reconstruction +Figure 3. Attack pipeline of Feature-Oriented Reconstruction Attack (FORA) against SL. (a) shows the substitute model training phase. The attacker constructs a substitute model $\hat{F}_c$ using $\mathcal{L}_{DISC}$ and $\mathcal{L}_{MK-MMD}$ to mimic the behavior of the client model $F_c$ . (b) means training an inverse network $f_c^{-1}$ using public data $X_{aux}$ . (c) represents the final attack phase using the attack model to reconstruct training data from snapshot $Z_{snap}$ of target smashed data. + +havior of the $F_{c}$ . Through this approach, the adversary can smoothly construct an attack model (inverse mapping network) to recover the private samples. The detailed pipeline of FORA is shown in Fig. 3. It consists of three phases: substitute model construction, attack model training, and private data reconstruction. + +Substitute Model Construction. Before SL training commences, the server initializes a substitute client, denoted by $\hat{F}_c$ . The $\hat{F}_c$ will be trained locally at the server in parallel with the victim's $F_c$ , and such process will take place throughout the entire SL collaboration. In each training iteration, the client will send smashed data of the current batch to the server for completing the subsequent computations. Concurrently, the server will use the collected smashed data to perform training on the $\hat{F}_c$ . For this purpose, the server introduces the Discriminator module and the MK-MMD module to extract the representation preferences. We define its training objective as: + +$$ +\min _ {\hat {F} _ {c}} \mathcal {L} _ {D I S C} + \mathcal {L} _ {M K - M M D}, \tag {1} +$$ + +where $\mathcal{L}_{DISC}$ is the Discriminator module constraining $Z_{aux} = \hat{F}_c(X_{aux})$ and $Z_{priv} = F_c(X_{priv})$ to be indistinguishable, while $\mathcal{L}_{MK - MMD}$ is the MK-MMD module making $Z_{aux}$ as close as possible to $Z_{priv}$ in shared space. + +The Discriminator [3, 9, 13] $D$ is also a network that needs to be trained synchronously and is tasked with efficiently distinguishing the generated features between $F_{c}$ and $\hat{F}_{c}$ , maximizing probabilities of the former and minimizing probabilities of the latter [31]. Therefore, the parameters of $D$ will be updated to minimize the following loss function: + +$$ +\mathcal {L} _ {D} = \log \left(1 - \mathcal {D} \big (F _ {c} (X _ {p r i v}) \big) + \log \mathcal {D} \big (\hat {F} _ {c} (X _ {a u x}) \big)\right). \quad (2) +$$ + +After each local training step of $D$ , the server utilizes $D$ to instruct substitute client's representation preference to be consistent with that of the victim client. Specifically, an adversarial loss is constructed as the following: + +$$ +\mathcal {L} _ {D I S C} = \log (1 - D (\hat {F} _ {c} (X _ {a u x}))). \tag {3} +$$ + +The MK-MMD module [15, 29] is designed to align two sets of generated features into a shared space using kernel functions and compute their difference, where a smaller difference signifies closer representation preferences. Then, for the substitute client, the objective extends beyond maximizing the probabilities output by the $D$ , it also seeks to minimize the MK-MMD loss function, namely: + +$$ +\mathcal {L} _ {M K - M M D} = \left\| \phi \left(\hat {F} _ {c} \left(X _ {a u x}\right)\right) - \phi \left(F _ {c} \left(X _ {p r i v}\right)\right) \right\| _ {\mathcal {H}}, \tag {4} +$$ + +$$ +\left\{ \begin{array}{c} \phi = \sum_ {j = 1} ^ {m} \beta_ {j} k _ {j}, \\ \sum_ {j = 1} ^ {m} \beta_ {j} = 1, \beta_ {j} \geq 0, \forall j, \end{array} \right. \tag {5} +$$ + +where $k$ is a single kernel function, $\phi$ denotes a set of kernel functions that project different smashed data into Reproducing Kernel Hilbert Space $\mathcal{H}$ , $\beta$ is the weight coefficient corresponding to the single kernel function. + +Attack Model Training. At the end of the training of SL, the server can obtain a substitute client with a feature extraction behavior extremely similar to that of the victim client. Moreover, its feature space is known to the adversary, who can recover the original input from the smashed data by applying an inverse network (denoted as $f_{c}^{-1}$ ). Following previous DRAs [19, 35], we adopt the $f_{c}^{-1}$ consist + +ing of a set of Transposed Convolution layers and Tanh activations as our attack model. The server can leverage the auxiliary dataset to train the attack model by minimizing the mean square error between $f_{c}^{-1}(\hat{F}_{c}(X_{aux}))$ and $X_{aux}$ as follows: + +$$ +\mathcal {L} _ {f _ {c} ^ {- 1}} = \left\| f _ {c} ^ {- 1} \left(\hat {F} _ {c} \left(X _ {a u x}\right)\right) - X _ {a u x} \right\| _ {2} ^ {2}. \tag {6} +$$ + +Private Data Reconstruction. The server keeps a snapshot $Z_{\text{snap}} = F_c(X_{\text{priv}})$ of all smashed data output by the target client under the final training iteration for reconstruction. Since the substitute client is able to mimic the target client's representation preferences well, the server can subtly use $f_c^{-1}$ to perform the attack by mapping the target smashed data directly into the private raw data space, namely: + +$$ +X _ {p r i v} ^ {*} = f _ {c} ^ {- 1} \left(Z _ {s n a p}\right). \tag {7} +$$ + +Here, $X_{priv}^{*}$ are the reconstructed private training samples. + +# 4. Experiments + +# 4.1. Experimental Setup + +Datasets. In our experiments, we rely on CIFAR-10 [26] and CelebA [28] to validate the attacks, due to their dominance in the research on SL [6, 10, 31]. They will be used as private data for the client's target training tasks. According to Sec. 3.1, we assume that the server adversary has access to a set of auxiliary samples that are distinct from the client's private data. Therefore, we choose CINIC-10 [4] and FFHQ [24] as the adversary's auxiliary dataset, respectively. We exclude images in CINIC-10 that overlapped with CIFAR-10, and randomly select 5,000 samples and 10,000 samples from the preprocessed CINIC-10 and FFHQ as the final auxiliary data. Appendix A.1 provides the detailed information for different datasets. + +Models. We consider two popular types of neural network architectures, including MobileNet [21] and ResNet-18 [17], as target models for the classification tasks of CIFRA-10 and CelebA, respectively. We set various split points for different target models to show our attack performance. Since the server is entirely unaware of the client's model structure from Sec. 3.1, we use VGG blocks [34] (consisting of a sequence of Convolutional, BatchNorm, ReLU, and MaxPool layers) to construct substitute models. In addition, the adversary's substitute models adaptively depend on the size of the intermediate features output by the client. All the architecture information and splitting schemes used in this paper are reported in Appendix A.2. + +Metrics. In addition to analyzing the qualitative results of attack performances visually, we chose three quantitative metrics to evaluate the quality of the reconstructed images: Structural Similarity (SSIM) [47], Peak Signal-to-Noise Ratio (PSNR) [20], and Learned Perceptual Image + +![](images/836bb92b9f1c7a3cfd9825cb75d5f249da0af7787e60ecd1fafb4bdc52d54786.jpg) +(a) Detection Score + +![](images/e65cfc024f9d9f27e345480615bc19d12409b3e354862a61dae73b7dfa2ccd6c.jpg) +(b) Reconstruction Results +Figure 4. Attack performance comparison of FSHA [31] and FORA on CIFAR-10 with layer 2. (a) shows the detection score of two attacks detected by GS. (b) represents the reconstruction results of two attacks, and FSHA-GS is the reconstructed images when detected by GS. + +Patch Similarity (LPIPS) [49]. We also use Cosine Similarity and Mean Square Error to measure the similarity between the substitute client and the target client in feature space. + +Attack Baselines. We mainly compare our approach with three representative existing methods, which are FSHA [31], UnSplit [6], and PCAT [10]. For the malicious attack FSHA, we use sophisticated detection mechanism to jointly evaluate the attack's effectiveness. For the semi-honest attack UnSplit, we make it consistent with our experimental settings to ensure fairness. PCAT requires an understanding of the learning task while relying on a subset of the private training data to build the pseudo-client, and in order to comply with this assumption, we set the proportion of the CIFAR-10 private dataset to be $5\%$ (the maximal threshold suggested by the original paper), and for more complex CelebA dataset, we extend the proportion to be $10\%$ . + +# 4.2. Comparison with Malicious Attack + +Since FSHA severely undermines the utility of the target client, recent work has proposed the Gradients Scrutinizer (GS) [8] to defend against such hijacking attacks by detecting the gradients returned from the server to the client. The GS will perform a similarity computation on the gradients, and if the calculated value is lower than a set threshold, it will be considered as a potential attack, resulting in the training of SL being immediately suspended. More details about GS can be found in Appendix C.1. We can observe from Fig. 4 that the reconstruction results of FORA are almost the same as those of FSHA in the unprotected SL system. Although FSHA performs well in capturing fine graphical details, it also leads to noticeable color shifts in some reconstruction results. Moreover, since FSHA drastically tampers with the updated gradient returned to the client model, it is easily detected by GS, leading to the failure of reconstruction. + +Table 1. Data reconstruction results of UnSplit, PCAT, and FORA on CIFAR-10 and CelebA in different splitting settings. + +
Split PointUnSplitCIFAR-10 PCATFORACelebA PCATFORA
Ground Truth
layer 1
layer 2
layer 3
layer 4
+ +Table 2. SSIM, PSNR, and LPIPS of the reconstructed images on CIFAR-10 among three attacks. + +
Split PointSSIM†PSNR†LPIPS↓
UnSplitPCATFORAUnSplitPCATFORAUnSplitPCATFORA
layer 10.1710.8530.92611.0322.1025.870.6770.2190.120
layer 20.1010.6420.83010.4817.2922.190.6890.4320.252
layer 30.1040.2910.62211.1413.1818.930.7410.6150.381
layer 40.1080.1210.0308.6211.0810.450.7220.6760.628
+ +# 4.3. Comparison with Semi-Honest Attacks + +Reconstruction Performance. We show in detail the reconstruction results for UnSplit, PCAT, and our proposed FORA on all split points for both datasets. As depicted in Tab. 1, compared to other attacks, the images reconstructed by FORA exhibit a significant improvement visually. Due to the vast search space and inefficient optimization approach, UnSplit almost fails to recover training data in both datasets, even at layer 1. Although PCAT can reconstruct training samples in the shallow settings of the CIFAR-10 dataset, such as layer 1 and layer 2, the reconstruction quality is still lower than that of FORA. For the more complex CelebA dataset, PCAT struggles to produce quality reconstructions. Tab. 2 and Tab. 3 provides the quantitative results of the attacks. Except for the anomaly at the layer 4 split point of CIFAR-10, where FORA slightly underperforms PCAT in terms of SSIM and PSNR metric, FORA is superior to both methods in all other settings, especially in terms of the LPIPS metric, which is considered to be more aligned with human perception. Notably, even though PCAT has access to a subset of the private data, while FORA only obtains samples with different distributions, FORA substantially surpasses PCAT for reconstruction. This further emphasizes the robust privacy threat our approach poses to SL. More reconstructed images are presented in Appendix B.1. + +Feature Similarity. As shown in Tab. 4, we measure the feature distance between the proxy clients built by UnSplit, PCAT, and FORA and the target client at layer 2. The results show that the substitute clients trained by our method + +Table 3. SSIM, PSNR and LPIPS of the reconstructed images on CelebA among three attacks. + +
Split PointSSIM↑PSNR↑LPIPS↓
UnSplitPCATFORAUnSplitPCATFORAUnSplitPCATFORA
layer 10.1370.3330.4859.2613.4517.720.8040.6340.320
layer 20.1700.3160.4768.7012.0517.110.7470.6530.381
layer 30.1560.1640.19110.6611.6314.190.7930.7310.509
layer 40.0840.0920.1927.9410.6013.000.8040.7380.621
+ +Table 4. Feature similarity measured by Mean Square Error and Cosine Similarity on CIFAR-10 and CelebA at layer 2. + +
MethodCIFAR-10CelebA
UnSplitPCATFORAUnSplitPCATFORA
Mean Square Error↓1.0410.5280.27450.7731.3530.753
Cosine Similarity↑0.2000.5920.8100.3330.4800.778
+ +exhibit more similar representation preferences to the target client. The basic optimization approach of UnSplit makes it difficult to regularize the feature space of the proxy client. As for PCAT, it simply makes the smashed data generated by the pseudo model more favorable to the server model but fails to mimic the behavior of the client model. In contrast, FORA can impose stronger constraints in the feature space, which directly contributes to successful reconstruction. + +# 4.4. Effect of Auxiliary Dataset + +Next, we analyze the effect of several important factors regarding the auxiliary dataset on attack performance. We first explore the impact of the fitting level of substitute models by varying the size of the auxiliary data. Then, we discuss the impact of the presence of a more significant distribution shift, i.e., the absence of some categories, between the auxiliary and target samples. Finally, we relax the major assumption about the adversary, namely that the server has access to the similarly distributed auxiliary dataset. We set the split point at layer 2 for ablation, and the full experimental results are provided in Appendix B.2. + +Auxiliary Set Size. As shown in Fig. 5, when we reduce the size of the auxiliary dataset to half of the previous + +![](images/701ddc8906a6caf8a08f949f056e6942ca427b20a20dbba3f861c22ee36bd4ff.jpg) +Figure 5. Effects of varying auxiliary data size on FORA performed on CIFAR-10 and CelebA at layer 2. + +one, the attack performance of FORA remains almost unchanged. When we further reduce the number of auxiliary samples to $20\%$ , the quality of the reconstructed images decreases slightly but still preserves the full outline and most of the details. In that case, the percentage of the public auxiliary dataset is very small compared to the huge private training set (50,000 for CIFAR-10 and 162770 for CelebA), only $2\%$ and $1.2\%$ , respectively. This implies that even with a rather limited auxiliary dataset, FORA is still able to effectively reconstruct the client's training samples. + +Table 5. Effect of absence of categories on FORA performed on CIFAR-10 at layer 2. + +
Absent CategoriesSSIM↑PSNR↑LPIPS↓
Living0.76820.440.300
Non-living0.73218.430.395
+ +Absence of Categories. It is likely that the adversary's public auxiliary data misses some semantic classes of the private data distribution. To model this situation, we create two special auxiliary datasets for CIFAR-10, one containing "Living" items (birds, cats, etc.), and the other containing "Non-living" items (airplanes, cars, etc.), both with 5,000 randomly sampled samples from CINIC-10. As presented in Tab. 5, even if a class is absent from the auxiliary dataset, FORA can still reconstruct samples of that class. In fact, FORA focuses on stealing the mapping relationship between client inputs and smashed data and therefore does not require class alignment. We observe that the absence of the "Non-living" category leads to a moderate degradation in the reconstruction results. We believe that the reason behind this phenomenon is that the greater variation of classes within the "Non-living" category helps to increase the generalization level of the substitute client, which in turn facilitates improved attack performance. + +Distribution Shift. Here we further analyze the impact of the auxiliary dataset distribution on FORA. In contrast to our default experimental setup, we selected 5000 + +Table 6. Effects of auxiliary dataset distribution shift on FORA performed on CIFAR-10 and CelebA at layer 2. "Different" represents auxiliary data sampled from CINIC-10, and FFHQ respectively, and "Same" means auxiliary dataset come from their original test set. + +
Dataset SizeCIFAR-10CelebA
DifferentSameDifferentSame
SSIM↑0.8300.8320.4760.777
PSNR↑22.1922.7817.1121.55
LPIPS↓0.2520.2070.3810.264
+ +and 10000 images from the original testing sets of CIFAR-10 and CelebA, respectively, as the auxiliary datasets with the same distribution. As shown in Tab. 6, a more similar distribution can facilitate substitute clients stealing the representation preference, resulting in better reconstruction performance. We observe that the attack results on the facial dataset are more vulnerable to the data distribution shift compared to the object dataset. One possible reason is that tasks related to facial datasets are more sensitive to variations in sampling methods and alignment conditions across different datasets. For object datasets, due to substantial distribution variation between different categories of themselves, e.g. ranging from animals to vehicles, which contributes to their robustness in handling distribution shifts. + +# 4.5. Effect of Substitute Client Structure + +After validating the impact of the auxiliary dataset, here we are interested in the impact of substitute client architectures on FORA. We chose three different model structures as attack variants: the VGG block [34], the ResNet block [18], and the DenseNet block [22]. As can be seen in Fig. 6, the SSIM and LPIPS quantization results for the reconstructed images remain similar. This indicates that the extracted representation preferences on the basis of MK-MDD and Discriminator are close to that of the target client, despite the fact that the substitute clients use different architectures. Additional results are shown in Appendix B.3. + +![](images/1da3abbe82a698155e555fcc9f607cf556db9aec0699ed6a4138cc6533ebfd61.jpg) +(a) SSIM↑ + +![](images/ac4bad8af3fc1c0c6fcc83a17a528bb70236a7b2e279079f5ae816bce37794a7.jpg) +(b) LPIPS↓ +Figure 6. Effect for FORA with varying substitute model architectures on both datasets at layer 2. + +# 4.6. Counter Defense Techniques + +There have been a number of defenses aimed at perturbing the smashed data claiming that they can reduce the risk of privacy leakage in SL to a certain extent. We select three well-known defense techniques, i.e., distance correlation minimization [37, 43, 44], differential privacy [1], and noise obfuscation [39], to evaluate the effectiveness of FORA. Tab. 7 shows the limited impact of these defenses on FORA. See Appendix C.1 for more details on defense techniques. See Appendix C.2 for more defense results and discussions about possible adaptive defenses. + +Distance Correlation Minimization (DCOR). DCOR can uncorrelate irrelevant and sensitive features from the smashed data associated with the target client, which results in a lack of detailed expression of the input data in the representation preferences learned by the substitute client, especially in colors. However, FORA retains the ability to reconstruct the structural details of the private image. + +Differential Privacy (DP). DP protects training data privacy by adding carefully crafted Laplace noise to the gradients. However, the effectiveness of DP against FORA is very limited under all privacy budgets. When the test accuracy of the model is reduced by nearly $10\%$ (the functionality is severely damaged), the SSIM of the reconstructed samples still reaches about $75\%$ of the original. This tradeoff between classification accuracy and defense strength makes DP not feasible for practical applications of SL. + +Noise Obfuscation (NO). NO is a direct defense to destroy the mapping relationship between smashed and input data. We observe that on the one hand, the noise of a small scale enhances the generalization level of the SL model to maintain or even improve the classification accuracy, on the other hand raising the noise scale helps to introduce deviations to the features extracted from the target client, making it more difficult to learn the representations and reconstruct the data for FORA. + +# 5. Discussion and Conclusion + +In this section, we first discuss the potential improvement and scalability of FORA, then we summarize this work. We also show limitation and future work in Appendix D. + +Improvement using Generative Adversarial Networks. Li et al. [27] propose a novel StyleGAN-based reconstruction attack against split inference, and their research focus is orthogonal to our contribution. Therefore, the reconstruction task in FORA can be further optimized using pre-trained StyleGAN [25]. As shown in Fig. 7, the well-trained substitute client in FORA combined with StyleGAN optimization can provide additional improvements in reconstruction performance. + +Attack on Label-Protected SL. Another popular setup for SL requires the client to keep the labels locally [42], but + +Table 7. Effect of utility and FORA performance against three defense techniques on CIFAR-10 at layer 2. + +
Defense HyperparamTest Acc (%)SSIM↑PSNR↑LPIPS↓
0 (w/o defense)71.250.83022.190.252
DCOR (α)
0.270.910.69217.910.360
0.570.060.62815.990.441
0.869.720.56315.400.471
DP (ε)
+∞69.680.82322.360.225
10063.050.71120.360.394
1061.930.62118.030.487
NO (σ)
1.074.390.64017.290.367
2.073.140.58316.290.444
5.070.620.39414.350.550
+ +![](images/238c6feaaeab283549e787aaf5c94c2dbf6042f312a0d23b6bd54a3758866b99.jpg) +Figure 7. Reconstructed CelebA images of FORA and FORA-G, FOAR-G represents FORA combined with StyleGAN. + +this case does not have any influence on the implementation and performance of FORA. Since FORA is only related to the smashed data output from the target client, it does not depend on the server model as well as the training task. + +Conclusion. In this work, we propose a novel data reconstruction attack against SL, named Feature-Oriented Reconstruction Attack (FORA). Unlike all previous attack schemes, FORA enables a semi-honest server to secretly reconstruct the client's private training data with very little prior knowledge. Thanks to our new perspective of extracting representation preferences from smashed data, the server can contemporaneously train a substitute client that approximates the target client's behavior to conduct the attack. Our extensive experiments in various settings demonstrate the state-of-the-art performance of FORA. Due to its stealth and effectiveness, it poses a real privacy threat to SL. We hope our work can inspire future efforts to explore it in more practical SL, and we are eager to draw attention to more robust defense techniques. + +# References + +[1] Martin Abadi, Andy Chu, Ian Goodfellow, H Brendan McMahan, Ilya Mironov, Kunal Talwar, and Li Zhang. Deep learning with differential privacy. In Proceedings of the 2016 ACM SIGSAC conference on computer and communications security, pages 308-318, 2016. 8 +[2] Sharif Abuadbba, Kyuyeon Kim, Minki Kim, Chandra Thapa, Seyit A Camtepe, Yansong Gao, Hyoungshick Kim, and Surya Nepal. Can we use split learning on 1d cnn models for privacy preserving training? In Proceedings of the 15th ACM Asia Conference on Computer and Communications Security, pages 305-318, 2020. 1, 2 +[3] Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In International conference on machine learning, pages 214-223. PMLR, 2017. 4 +[4] Luke N Darlow, Elliot J Crowley, Antreas Antoniou, and Amos J Storkey. Cinic-10 is not imagenet or cifar-10. arXiv preprint arXiv:1810.03505, 2018. 5 +[5] Ege Erdoğan, Alptekin Kupçü, and A Ercument Cicek. Splitguard: Detecting and mitigating training-hijacking attacks in split learning. In Proceedings of the 21st Workshop on Privacy in the Electronic Society, pages 125-137, 2022. 1, 3 +[6] Ege Erdoğan, Alptekin Kupçü, and A Ercümen Çiçek. Unsplit: Data-oblivious model inversion, model stealing, and label inference attacks against split learning. In Proceedings of the 21st Workshop on Privacy in the Electronic Society, pages 115-124, 2022. 1, 2, 5 +[7] Chong Fu, Xuhong Zhang, Shouling Ji, Jinyin Chen, Jingzheng Wu, Shanqing Guo, Jun Zhou, Alex X Liu, and Ting Wang. Label inference attacks against vertical federated learning. In 31st USENIX Security Symposium (USENIX Security 22), pages 1397-1414, 2022. 2 +[8] Jiayun Fu, Xiaojing Ma, Bin B. Zhu, Pingyi Hu, Ruixin Zhao, Yaru Jia, Peng Xu, Hai Jin, , and Dongmei Zhang. Focusing on pinocchio's nose: A gradients scrutinizer to thwart split-learning hijacking attacks using intrinsic attributes. In 30th Annual Network and Distributed System Security Symposium, NDSS 2023, San Diego, California, USA, February 27-March 3, 2023. The Internet Society, 2023. 1, 2, 3, 5 +[9] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In International conference on machine learning, pages 1180-1189. PMLR, 2015. 2, 3, 4 +[10] Xinben Gao and Lan Zhang. PCAT: Functionality and data stealing from split learning by Pseudo-Client attack. In 32nd USENIX Security Symposium (USENIX Security 23), pages 5271–5288, Anaheim, CA, 2023. USENIX Association. 1, 2, 5 +[11] Yansong Gao, Minki Kim, Sharif Abuadbba, Yeonjae Kim, Chandra Thapa, Kyuyeon Kim, Seyit A Camtepe, Hyoungshick Kim, and Surya Nepal. End-to-end evaluation of federated learning and split learning for internet of things. arXiv preprint arXiv:2003.13376, 2020. 1 +[12] Muhammad Ghifary, W Bastiaan Kleijn, and Mengjie Zhang. Domain adaptive neural networks for object recognition. In PRICAI 2014: Trends in Artificial Intelligence: + +13th Pacific Rim International Conference on Artificial Intelligence, Gold Coast, QLD, Australia, December 1-5, 2014. Proceedings 13, pages 898-904. Springer, 2014. 3 +[13] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2014. 4 +[14] Ian J Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Proceedings of the 27th International Conference on Neural Information Processing Systems-Volume 2, pages 2672–2680, 2014. 2 +[15] Arthur Gretton, Dino Sejdinovic, Heiko Strathmann, Sivaraman Balakrishnan, Massimiliano Pontil, Kenji Fukumizu, and Bharath K Sriperumbudur. Optimal kernel choice for large-scale two-sample tests. Advances in neural information processing systems, 25, 2012. 2, 3, 4 +[16] Otkrist Gupta and Ramesh Raskar. Distributed learning of deep neural network over multiple agents. Journal of Network and Computer Applications, 116:1-8, 2018. 1, 2 +[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5 +[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 7 +[19] Zecheng He, Tianwei Zhang, and Ruby B Lee. Model inversion attacks against collaborative inference. In Proceedings of the 35th Annual Computer Security Applications Conference, pages 148-162, 2019. 1, 2, 4 +[20] Alain Hore and Djemel Ziou. Image quality metrics: Psnr vs. ssm. In 2010 20th international conference on pattern recognition, pages 2366-2369. IEEE, 2010. 2, 5 +[21] Andrew G Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam. Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861, 2017. 5 +[22] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4700-4708, 2017. 7 +[23] Sanjay Kariyappa and Moinuddin K Qureshi. Exploit: Extracting private labels in split learning. In 2023 IEEE Conference on Secure and Trustworthy Machine Learning (SaTML), pages 165-175. IEEE, 2023. 2 +[24] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 5 +[25] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 8 + +[26] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5 +[27] Ziang Li, Mengda Yang, Yaxin Liu, Juan Wang, Hongxin Hu, Wenzhe Yi, and Xiaoyang Xu. GAN you see me? enhanced data reconstruction attacks against split inference. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 2, 8 +[28] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of the IEEE international conference on computer vision, pages 3730-3738, 2015. 5 +[29] Mingsheng Long, Yue Cao, Jianmin Wang, and Michael Jordan. Learning transferable features with deep adaptation networks. In International conference on machine learning, pages 97-105. PMLR, 2015. 2, 3, 4 +[30] Sida Luo, Fangchao Yu, Lina Wang, Bo Zeng, Zhi Pang, and Kai Zhao. Feature sniffer: A stealthy inference attacks framework on split learning. In International Conference on Artificial Neural Networks, pages 66-77. Springer, 2023. 3 +[31] Dario Pasquini, Giuseppe Ateniese, and Massimo Bernaschi. Unleashing the tiger: Inference attacks on split learning. In Proceedings of the 2021 ACM SIGSAC Conference on Computer and Communications Security, pages 2113-2129, 2021. 1, 2, 4, 5 +[32] Maarten G Poirot, Praneeth Vepakomma, Ken Chang, Jayashree Kalpathy-Cramer, Rajiv Gupta, and Ramesh Raskar. Split learning for collaborative deep learning in healthcare. arXiv preprint arXiv:1912.12115, 2019. 1, 2 +[33] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision, pages 618-626, 2017. 3 +[34] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 5, 7 +[35] Abhishek Singh, Ayush Chopra, Ethan Garza, Emily Zhang, Praneeth Vepakomma, Vivek Sharma, and Ramesh Raskar. Disco: Dynamic and invariant sensitive channel obfuscation for deep neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12125-12135, 2021. 2, 4 +[36] Congzheng Song and Vitaly Shmatikov. Overlearning reveals sensitive attributes. arXiv preprint arXiv:1905.11742, 2019. 1 +[37] Gábor J Székely, Maria L Rizzo, and Nail K Bakirov. Measuring and testing dependence by correlation of distances. 2007. 8 +[38] Chandra Thapa, Pathum Chamikara Mahawaga Arachchige, Seyit Camtepe, and Lichao Sun. Splitfed: When federated learning meets split learning. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 8485-8493, 2022. 1, 2 +[39] Tom Titcombe, Adam J Hall, Pavlos Papadopoulos, and Daniele Romanini. Practical defences against model inversion attacks for split neural networks. arXiv preprint arXiv:2104.05743, 2021. 8 + +[40] Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014. 3 +[41] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7167-7176, 2017. 3 +[42] Praneeth Vepakomma, Otkrist Gupta, Tristan Swedish, and Ramesh Raskar. Split learning for health: Distributed deep learning without sharing raw patient data. arXiv preprint arXiv:1812.00564, 2018. 1, 2, 3, 8 +[43] Praneeth Vepakomma, Otkrist Gupta, Abhimanyu Dubey, and Ramesh Raskar. Reducing leakage in distributed deep learning for sensitive health data. arXiv preprint arXiv:1812.00564, 2, 2019. 8 +[44] Praneeth Vepakomma, Abhishek Singh, Otkrist Gupta, and Ramesh Raskar. Nopeek: Information leakage reduction to share activations in distributed deep learning. In 2020 International Conference on Data Mining Workshops (ICDMW), pages 933–942. IEEE, 2020. 1, 8 +[45] Mei Wang and Weihong Deng. Deep visual domain adaptation: A survey. Neurocomput., 312(C):135-153, 2018. 2 +[46] Mei Wang and Weihong Deng. Deep visual domain adaptation: A survey. Neurocomputing, 312:135-153, 2018. 3 +[47] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 5 +[48] Mengda Yang, Ziang Li, Juan Wang, Hongxin Hu, Ao Ren, Xiaoyang Xu, and Wenzhe Yi. Measuring data reconstruction defenses in collaborative inference systems. Advances in Neural Information Processing Systems, 35:12855-12867, 2022. 2 +[49] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5 \ No newline at end of file diff --git a/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/images.zip b/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..b8c6b2ed423820197314034bf1d963a0fa8980b3 --- /dev/null +++ b/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00a80dad750e7b67de78ad74a17bc757e9be371bdff1fab574696abf64915485 +size 484434 diff --git a/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/layout.json b/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..9b254ca634b3a0bb64ff1f0517507d5f8ed0b539 --- /dev/null +++ b/2024/A Stealthy Wrongdoer_ Feature-Oriented Reconstruction Attack against Split Learning/layout.json @@ -0,0 +1,8109 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 97, + 103, + 496, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 103, + 496, + 140 + ], + "spans": [ + { + "bbox": [ + 97, + 103, + 496, + 140 + ], + "type": "text", + "content": "A Stealthy Wrongdoer: Feature-Oriented Reconstruction Attack against Split Learning" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "spans": [ + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "text", + "content": "Xiaoyang Xu" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "text", + "content": " Mengda Yang" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "text", + "content": " Wenzhe Yi" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "text", + "content": " Ziang Li" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "text", + "content": " Juan Wang" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "text", + "content": " Hongxin Hu" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "text", + "content": " Yong Zhuang" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "text", + "content": " Yaxin Liu" + }, + { + "bbox": [ + 64, + 159, + 528, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 190, + 539, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 190, + 539, + 217 + ], + "spans": [ + { + "bbox": [ + 54, + 190, + 539, + 217 + ], + "type": "text", + "content": "1 Key Laboratory of Aerospace Information Security and Trusted Computing, Ministry of Education, School of Cyber Science and Engineering, Wuhan University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 97, + 217, + 495, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 217, + 495, + 232 + ], + "spans": [ + { + "bbox": [ + 97, + 217, + 495, + 232 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 97, + 217, + 495, + 232 + ], + "type": "text", + "content": " Department of Computer Science and Engineering, University at Buffalo, SUNY" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 99, + 233, + 493, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 233, + 493, + 258 + ], + "spans": [ + { + "bbox": [ + 99, + 233, + 493, + 258 + ], + "type": "text", + "content": "{xiaoyangx, mengday, wenzhey, ziangli, yong.zhuang, yaxin.liu}@whu.edu.cn jwang@whu.edu.cn, hongxinh@buffalo.edu" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 286, + 192, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 286, + 192, + 300 + ], + "spans": [ + { + "bbox": [ + 143, + 286, + 192, + 300 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 312, + 289, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 312, + 289, + 613 + ], + "spans": [ + { + "bbox": [ + 47, + 312, + 289, + 613 + ], + "type": "text", + "content": "Split Learning (SL) is a distributed learning framework renowned for its privacy-preserving features and minimal computational requirements. Previous research consistently highlights the potential privacy breaches in SL systems by server adversaries reconstructing training data. However, these studies often rely on strong assumptions or compromise system utility to enhance attack performance. This paper introduces a new semi-honest Data Reconstruction Attack on SL, named Feature-Oriented Reconstruction Attack (FORA). In contrast to prior works, FORA relies on limited prior knowledge, specifically that the server utilizes auxiliary samples from the public without knowing any client's private information. This allows FORA to conduct the attack stealthily and achieve robust performance. The key vulnerability exploited by FORA is the revelation of the model representation preference in the smashed data output by victim client. FORA constructs a substitute client through feature-level transfer learning, aiming to closely mimic the victim client's representation preference. Leveraging this substitute client, the server trains the attack model to effectively reconstruct private data. Extensive experiments showcase FORA's superior performance compared to state-of-the-art methods. Furthermore, the paper systematically evaluates the proposed method's applicability across diverse settings and advanced defense strategies." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 636, + 128, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 636, + 128, + 649 + ], + "spans": [ + { + "bbox": [ + 47, + 636, + 128, + 649 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 657, + 287, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 657, + 287, + 693 + ], + "spans": [ + { + "bbox": [ + 47, + 657, + 287, + 693 + ], + "type": "text", + "content": "Deep Neural Networks (DNN) have gained widespread usage in computer vision due to their excellent learning ability and expressive power. Split Learning (SL) [2, 11, 16, 32, 38," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 287, + 545, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 287, + 545, + 432 + ], + "spans": [ + { + "bbox": [ + 306, + 287, + 545, + 432 + ], + "type": "text", + "content": "42, 44] emerged as a distributed collaborative framework that enables clients to cooperate with a server to perform learning task. In SL, the complete DNN model is divided into two parts, which are deployed on the client and server respectively. For a normal training process in SL, the client performs the computational process locally and communicates with the server solely based on intermediate features (referred to as smashed data) and their corresponding gradients. In this case, the server does not have access to any private information (raw data, parameters, architecture) about the client. Therefore, SL is considered effective in protecting the privacy of clients." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 438, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 438, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 438, + 547, + 715 + ], + "type": "text", + "content": "However, recent works [6, 10, 19, 31, 36] have shown that there are still privacy risks associated with SL. It is possible for the server to steal private information about the client according to auxiliary knowledge. One particular concern is the Data Reconstruction Attack (DRA) [6, 10, 31], where a server attempts to recover the training data of a client in SL systems. Depending on whether the server affects the normal process of SL, we can categorize adversaries into malicious and semi-honest attackers. Malicious servers such as FSHA [31] can manipulate the SL training process to conduct more effective attack. However, the latest findings [5, 8] show that FSHA's mischief is easily detected by the client, leading to the termination of SL training protocol For semi-honest attackers, e.g. PCAT [10] and UnSplit [6], their superior camouflage makes them less likely to be detected. But current semi-honest attackers often rely overly on assumptions that favor their performances. For example, UnSplit requires knowledge of the client's architecture and is only applicable to simple networks or datasets. As for PCAT, it unduly depends on the availability of partial private data to assist in training the pseudo-client. These assumptions contradict the basic principle of SL, which is to ensure that the client's knowledge" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 135, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 135, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 135, + 712 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "12130" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 133 + ], + "type": "text", + "content": "remains hidden from the server. In summary, we find previous attacks lack consideration of the intrinsic security of SL and the plausibility of their attack hypothesis, which limits the effectiveness and threat of their approach in real-world SL systems scenarios." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 134, + 289, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 134, + 289, + 421 + ], + "spans": [ + { + "bbox": [ + 46, + 134, + 289, + 421 + ], + "type": "text", + "content": "In this work, we introduce a novel DRA toward more realistic and more challenging scenarios, where the server cannot access private data or the structures and parameters of the client model. Our scheme stems from new insights into potential privacy breaches in SL. We discover a fundamental phenomenon that the client model has its own representation preference, which can be reflected through the output smashed data. More importantly, this unique information can indicate the feature extraction behavior of the client. Based on this new insight, we propose a semi-honest privacy threat, namely Feature-Oriented Reconstruction Attack (FORA). A server adversary could establish a substitute client by narrowing the reference distance with the real client, which allows the substitute model to mimic the behavior of the target model at a finer granularity. To efficiently measure the preference distance of different representations, we introduce domain Discriminator network [9, 14] and Multi-Kernel Maximum Mean Discrepancy (MK-MMD) [15, 29]. These techniques are widely used in domain adaptation [45], enabling us to project various representation preferences into a shared space for comparison. With a well-trained substitute client, the server can successfully recover the private data by constructing an inverse network." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 423, + 289, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 423, + 289, + 613 + ], + "spans": [ + { + "bbox": [ + 46, + 423, + 289, + 613 + ], + "type": "text", + "content": "We conduct our evaluation on two benchmark datasets and corresponding networks against different model partitioning strategies. The experimental results indicate that the proposed method significantly outperforms baseline attacks. Taking the reconstructed images of CelebA at layer 2 as an example, UnSplit, PCAT and FORA achieve effects of 8.70, 12.05, and 17.11 on the PSNR [20]. This demonstrates that FORA has significantly outperformed by " + }, + { + "bbox": [ + 46, + 423, + 289, + 613 + ], + "type": "inline_equation", + "content": "1.97\\mathrm{x}" + }, + { + "bbox": [ + 46, + 423, + 289, + 613 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 423, + 289, + 613 + ], + "type": "inline_equation", + "content": "1.42\\mathrm{x}" + }, + { + "bbox": [ + 46, + 423, + 289, + 613 + ], + "type": "text", + "content": " compared to the other two attacks. Although FSHA can achieve attack performance similar to ours, its malicious attack process can be promptly halted through monitoring mechanisms [8], resulting in poor reconstructions. Furthermore, we investigate the potential influences on FORA, including different public knowledge conditions and existing defense strategies, to validate the robustness of FORA." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 616, + 288, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 616, + 288, + 640 + ], + "spans": [ + { + "bbox": [ + 47, + 616, + 288, + 640 + ], + "type": "text", + "content": "The main contributions of this paper can be summarized as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "content": "- We propose a novel attack, named Feature-Oriented Reconstruction Attack (FORA). As far as we know, FORA is the first work enabling a semi-honest server to perform powerful DRA in more realistic and challenging SL systems. In such scenarios, the server has no prior knowledge of the client model or access to raw data." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 321, + 72, + 533, + 136 + ], + "blocks": [ + { + "bbox": [ + 321, + 72, + 533, + 136 + ], + "lines": [ + { + "bbox": [ + 321, + 72, + 533, + 136 + ], + "spans": [ + { + "bbox": [ + 321, + 72, + 533, + 136 + ], + "type": "image", + "image_path": "f6bf651513d456665468a4daae80b5b42c851a484cae83889e7ec689272883a5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 336, + 145, + 515, + 157 + ], + "lines": [ + { + "bbox": [ + 336, + 145, + 515, + 157 + ], + "spans": [ + { + "bbox": [ + 336, + 145, + 515, + 157 + ], + "type": "text", + "content": "Figure 1. Architecture of two-part split learning." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 178, + 545, + 299 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 304, + 178, + 545, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 178, + 545, + 226 + ], + "spans": [ + { + "bbox": [ + 304, + 178, + 545, + 226 + ], + "type": "text", + "content": "- We have uncovered an inherent vulnerability in SL, where the server can exploit rich information in the smashed data to steal client representation preference, thereby building a substitute client for better reconstruction." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 227, + 545, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 227, + 545, + 299 + ], + "spans": [ + { + "bbox": [ + 304, + 227, + 545, + 299 + ], + "type": "text", + "content": "- We conduct comprehensive experiments with various adversarial knowledge against different benchmark datasets and models. The results demonstrate that FORA can achieve state-of-the-art attack performance compared with baselines and exhibits notable robustness across different settings." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 310, + 481, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 310, + 481, + 323 + ], + "spans": [ + { + "bbox": [ + 306, + 310, + 481, + 323 + ], + "type": "text", + "content": "2. Background and Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 331, + 545, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 331, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 304, + 331, + 545, + 521 + ], + "type": "text", + "content": "Split Learning (SL). SL [2, 16, 32, 38, 42] is an emerging distributed learning paradigm for resource-limited scenarios, which can split the neural network model into both client-side and server-side. As shown in Fig. 1, the client performs forward propagation and transmits the smashed data to the server, which then uses the computed loss for backward propagation and sends the gradients of the smashed data back to the client. Both the client and server will update their weights after receiving the gradients. It is generally believed that SL provides a secure and efficient training protocol by allowing the client to retain a portion of the model and training data locally while offloading most of the computing overhead to the server [2, 16, 32, 42]. However, recent studies [6, 7, 10, 23, 31] have highlighted vulnerabilities in SL, where the server can exploit the latter part of the model to carry out privacy attacks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 522, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 545, + 713 + ], + "type": "text", + "content": "Data Reconstruction Attack (DRA) on SL. DRA [19, 27, 35, 48] is one of the most powerful privacy attacks that aim to steal the input data by the model's intermediate features. In SL, the server can utilize the smashed data output by the client to reconstruct the training data [6, 10, 31]. One notable attack is known as FSHA [31], where a malicious attacker utilizes the elaborated loss to alter the feature space of the victim client for reconstructing private data. In UnSplit [6], the semi-honest server attempts to reconstruct the training data and client's parameters simultaneously by utilizing the smashed data. Specifically, UnSplit optimizes parameters and inputs sequentially by minimizing the outputs between the clone client and the target client. To the best of our knowledge, PCAT [10] represents the most advanced attack under the semi-honest assumption. PCAT leverages the knowledge embedded in various stages of the server models" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12131" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 70, + 102, + 123 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 102, + 123 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 102, + 123 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 102, + 123 + ], + "type": "image", + "image_path": "d5e835309a9162567d4315273cdb244bf158eb3b68e49d594462f326311a9f2c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 124, + 94, + 133 + ], + "lines": [ + { + "bbox": [ + 55, + 124, + 94, + 133 + ], + "spans": [ + { + "bbox": [ + 55, + 124, + 94, + 133 + ], + "type": "text", + "content": "(a) Original" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 111, + 70, + 163, + 122 + ], + "blocks": [ + { + "bbox": [ + 111, + 70, + 163, + 122 + ], + "lines": [ + { + "bbox": [ + 111, + 70, + 163, + 122 + ], + "spans": [ + { + "bbox": [ + 111, + 70, + 163, + 122 + ], + "type": "image", + "image_path": "1ae30e6820f48215640529140ee0d38c9e61840d9c6c9f9bd188d677fee27913.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 117, + 124, + 156, + 133 + ], + "lines": [ + { + "bbox": [ + 117, + 124, + 156, + 133 + ], + "spans": [ + { + "bbox": [ + 117, + 124, + 156, + 133 + ], + "type": "text", + "content": "(b) Model 1" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 172, + 70, + 224, + 122 + ], + "blocks": [ + { + "bbox": [ + 172, + 70, + 224, + 122 + ], + "lines": [ + { + "bbox": [ + 172, + 70, + 224, + 122 + ], + "spans": [ + { + "bbox": [ + 172, + 70, + 224, + 122 + ], + "type": "image", + "image_path": "bea9c5acd0057794372126887ef9a442e835ce3c0fe1b3bd8176c144add6ff24.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 124, + 218, + 133 + ], + "lines": [ + { + "bbox": [ + 179, + 124, + 218, + 133 + ], + "spans": [ + { + "bbox": [ + 179, + 124, + 218, + 133 + ], + "type": "text", + "content": "(c) Model 2" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 144, + 287, + 198 + ], + "lines": [ + { + "bbox": [ + 46, + 144, + 287, + 198 + ], + "spans": [ + { + "bbox": [ + 46, + 144, + 287, + 198 + ], + "type": "text", + "content": "Figure 2. Input image and behavior visualization by Grad-CAM [33]. All the models are trained in CelebA with the task of smiling classification. The figure displays the original images and the representation preferences of three models trained under the same hyperparameter settings but with different random seeds." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 235, + 70, + 287, + 122 + ], + "blocks": [ + { + "bbox": [ + 235, + 70, + 287, + 122 + ], + "lines": [ + { + "bbox": [ + 235, + 70, + 287, + 122 + ], + "spans": [ + { + "bbox": [ + 235, + 70, + 287, + 122 + ], + "type": "image", + "image_path": "fb14206aa49cb0de784e118d4771ab9d576820e83a6bd4b4943f1d753c191055.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 241, + 124, + 280, + 133 + ], + "lines": [ + { + "bbox": [ + 241, + 124, + 280, + 133 + ], + "spans": [ + { + "bbox": [ + 241, + 124, + 280, + 133 + ], + "type": "text", + "content": "(d) Model 3" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 220, + 287, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 220, + 287, + 255 + ], + "spans": [ + { + "bbox": [ + 46, + 220, + 287, + 255 + ], + "type": "text", + "content": "to steal private data by constructing a pseudo-client. Unlike previous work, SFA [30] focuses on reconstructing samples during the inference stage rather than the training samples." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 256, + 287, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 256, + 287, + 472 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 287, + 472 + ], + "type": "text", + "content": "Although existing works claim that their attacks pose significant privacy threats to SL, they disregard the plausibility of their threat model. For FSHA, the server reconstructs the raw data while at the cost of destroying the client's utility. While FSHA assumes that the client is entirely free of any awareness of being maliciously disrupted, recent research [5, 8] indicates that such a malicious server can be easily detected by the client, leading to a halt in the SL. UnSplit needs the knowledge of the client's structure and is not suitable for complex networks and datasets due to the infinite searching space of input data and model parameters. As for PCAT, it requires the adversary to have access to a portion of the private dataset. This is an unreasonable assumption that violates the original intention of SL since one of the distinctive characteristics of SL is the ability to train models without sharing the raw data [42]. As a result, how to explore DRA under more realistic assumptions in SL remains an open question." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 472, + 287, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 472, + 287, + 591 + ], + "spans": [ + { + "bbox": [ + 46, + 472, + 287, + 591 + ], + "type": "text", + "content": "Domain Adaptation. Domain adaptation [9, 12, 15, 29, 40, 41, 46] is a technique that seeks to enhance the generalization of a model by transferring knowledge acquired from a source domain to a distinct yet related target domain. The core idea of domain adaptation is to map data from different domains into the same space for comparison. Here, we apply two popular methods: the domain Discriminator network [9, 41, 46] and the Multi-Kernel Maximum Mean Discrepancy (MK-MMD) function [15, 29, 46] to compare the feature spaces of different models." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 603, + 102, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 603, + 102, + 615 + ], + "spans": [ + { + "bbox": [ + 47, + 603, + 102, + 615 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 622, + 134, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 622, + 134, + 634 + ], + "spans": [ + { + "bbox": [ + 47, + 622, + 134, + 634 + ], + "type": "text", + "content": "3.1. Threat Model" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "type": "text", + "content": "Without loss of generality, given a two-party SL protocol, the SL model " + }, + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "type": "text", + "content": " is partitioned to a server model " + }, + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "type": "inline_equation", + "content": "F_{s}" + }, + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "type": "text", + "content": " and a client model " + }, + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "type": "inline_equation", + "content": "F_{c}" + }, + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "type": "text", + "content": ". The server aims to stealthily recover the private training data of the client through the smashed data " + }, + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "type": "text", + "content": " output by " + }, + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "type": "inline_equation", + "content": "F_{c}" + }, + { + "bbox": [ + 46, + 641, + 287, + 700 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 59, + 701, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 287, + 713 + ], + "type": "text", + "content": "We assume that the server adversary is a semi-honest en" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "type": "text", + "content": "tity, ensuring that the training process is indistinguishable from ordinary training during attack. Furthermore, we posit that the server adversary must adhere to the foundational principle of the SL — she lacks any means of accessing client-sensitive information. Specifically, the server does not require knowledge of the structure or hyperparameters of " + }, + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "type": "inline_equation", + "content": "F_{c}" + }, + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "type": "text", + "content": " and is devoid of access to the client's private training dataset " + }, + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "type": "inline_equation", + "content": "D_{priv}" + }, + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "type": "text", + "content": ". The sole piece of public knowledge available to the server pertains to the auxiliary dataset " + }, + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "type": "inline_equation", + "content": "D_{aux}" + }, + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "type": "text", + "content": ", sourced from the same domain as the private samples. It's important to note that the distribution of " + }, + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "type": "inline_equation", + "content": "D_{aux}" + }, + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "type": "text", + "content": " typically differs from that of " + }, + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "type": "inline_equation", + "content": "D_{priv}" + }, + { + "bbox": [ + 304, + 72, + 545, + 228 + ], + "type": "text", + "content": ". Compared to the threat model of previous works, this assumption is more reasonable and realistic." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 236, + 380, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 236, + 380, + 247 + ], + "spans": [ + { + "bbox": [ + 306, + 236, + 380, + 247 + ], + "type": "text", + "content": "3.2. Motivation" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 255, + 545, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 255, + 545, + 386 + ], + "spans": [ + { + "bbox": [ + 304, + 255, + 545, + 386 + ], + "type": "text", + "content": "Current DRAs rely overly on constructing inverse networks from input-output pairs obtained by querying the target model. However, this approach is impractical for SL because the server only has access to the client's outputs and is not qualified to query. A potential solution is to build a substitute client to mimic the target client, thus enabling the training of the inverse network. However, the variability of the substitute client's behavior affects the generalization of the inverse network to the target client, leading to the failure of the reconstruction, especially without the knowledge of the client model structure and private data distribution." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 387, + 545, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 387, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 304, + 387, + 545, + 555 + ], + "type": "text", + "content": "As illustrated in Fig. 2, we employ Grad-CAM [33] to visualize the attention of intermediate features generated by different clients. From Fig. 2 (a)-(d), it can be noticed that even for models trained under the same setup, there still exists evident differences between their image processing attention. This phenomenon suggests that the smashed data output by the client reflects its distinctive feature extraction behavior, which we define as representation preferences. Our general assumption is that narrowing the gap between the substitute client and the target client in terms of intermediate features can make the representation preferences of the two models more similar, which ensures that the inverse network trained by the substitute client perfectly maps the target smashed data back to the private raw data." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 563, + 518, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 563, + 518, + 574 + ], + "spans": [ + { + "bbox": [ + 306, + 563, + 518, + 574 + ], + "type": "text", + "content": "3.3. Feature-Oriented Reconstruction Attack" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": "Inspired by the differences in model representation preferences, we propose a novel data reconstruction attack against SL, called Feature-Oriented Reconstruction Attack (FORA). In order to mount FORA, the adversary needs to contrive a way to obtain the representation preferences of the " + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "inline_equation", + "content": "F_{c}" + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": ". To address this problem, we utilize domain adaptation techniques [9, 15, 29] to project different preference representations into the same space. Specifically, the adversary conducts feature-level transfer learning by exploiting the " + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "inline_equation", + "content": "Z_{c}" + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": " collected in each training iteration and then obtains a substitute model that mimics well the feature extraction be" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12132" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 82, + 71, + 301, + 213 + ], + "blocks": [ + { + "bbox": [ + 82, + 71, + 301, + 213 + ], + "lines": [ + { + "bbox": [ + 82, + 71, + 301, + 213 + ], + "spans": [ + { + "bbox": [ + 82, + 71, + 301, + 213 + ], + "type": "image", + "image_path": "fdb59d6d1256216406721cda90d25ba234660dad3dfe8f60a179ae56d16f4c76.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 218, + 257, + 228 + ], + "lines": [ + { + "bbox": [ + 137, + 218, + 257, + 228 + ], + "spans": [ + { + "bbox": [ + 137, + 218, + 257, + 228 + ], + "type": "text", + "content": "a) Substitute Model Construction" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 314, + 70, + 515, + 211 + ], + "blocks": [ + { + "bbox": [ + 314, + 70, + 515, + 211 + ], + "lines": [ + { + "bbox": [ + 314, + 70, + 515, + 211 + ], + "spans": [ + { + "bbox": [ + 314, + 70, + 515, + 211 + ], + "type": "image", + "image_path": "979d596657903a8d50335e0fc936c272c2a31f2ed595604514504eb6a08a5742.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 355, + 218, + 465, + 228 + ], + "lines": [ + { + "bbox": [ + 355, + 218, + 465, + 228 + ], + "spans": [ + { + "bbox": [ + 355, + 218, + 465, + 228 + ], + "type": "text", + "content": "c) Private Data Reconstruction" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "lines": [ + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "spans": [ + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "text", + "content": "Figure 3. Attack pipeline of Feature-Oriented Reconstruction Attack (FORA) against SL. (a) shows the substitute model training phase. The attacker constructs a substitute model " + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "inline_equation", + "content": "\\hat{F}_c" + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "text", + "content": " using " + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{DISC}" + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{MK-MMD}" + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "text", + "content": " to mimic the behavior of the client model " + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "inline_equation", + "content": "F_c" + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "text", + "content": ". (b) means training an inverse network " + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "inline_equation", + "content": "f_c^{-1}" + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "text", + "content": " using public data " + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "inline_equation", + "content": "X_{aux}" + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "text", + "content": ". (c) represents the final attack phase using the attack model to reconstruct training data from snapshot " + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "inline_equation", + "content": "Z_{snap}" + }, + { + "bbox": [ + 46, + 238, + 547, + 283 + ], + "type": "text", + "content": " of target smashed data." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 303, + 287, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 303, + 287, + 374 + ], + "spans": [ + { + "bbox": [ + 46, + 303, + 287, + 374 + ], + "type": "text", + "content": "havior of the " + }, + { + "bbox": [ + 46, + 303, + 287, + 374 + ], + "type": "inline_equation", + "content": "F_{c}" + }, + { + "bbox": [ + 46, + 303, + 287, + 374 + ], + "type": "text", + "content": ". Through this approach, the adversary can smoothly construct an attack model (inverse mapping network) to recover the private samples. The detailed pipeline of FORA is shown in Fig. 3. It consists of three phases: substitute model construction, attack model training, and private data reconstruction." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 375, + 288, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 375, + 288, + 519 + ], + "spans": [ + { + "bbox": [ + 46, + 375, + 288, + 519 + ], + "type": "text", + "content": "Substitute Model Construction. Before SL training commences, the server initializes a substitute client, denoted by " + }, + { + "bbox": [ + 46, + 375, + 288, + 519 + ], + "type": "inline_equation", + "content": "\\hat{F}_c" + }, + { + "bbox": [ + 46, + 375, + 288, + 519 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 46, + 375, + 288, + 519 + ], + "type": "inline_equation", + "content": "\\hat{F}_c" + }, + { + "bbox": [ + 46, + 375, + 288, + 519 + ], + "type": "text", + "content": " will be trained locally at the server in parallel with the victim's " + }, + { + "bbox": [ + 46, + 375, + 288, + 519 + ], + "type": "inline_equation", + "content": "F_c" + }, + { + "bbox": [ + 46, + 375, + 288, + 519 + ], + "type": "text", + "content": ", and such process will take place throughout the entire SL collaboration. In each training iteration, the client will send smashed data of the current batch to the server for completing the subsequent computations. Concurrently, the server will use the collected smashed data to perform training on the " + }, + { + "bbox": [ + 46, + 375, + 288, + 519 + ], + "type": "inline_equation", + "content": "\\hat{F}_c" + }, + { + "bbox": [ + 46, + 375, + 288, + 519 + ], + "type": "text", + "content": ". For this purpose, the server introduces the Discriminator module and the MK-MMD module to extract the representation preferences. We define its training objective as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 529, + 287, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 529, + 287, + 550 + ], + "spans": [ + { + "bbox": [ + 106, + 529, + 287, + 550 + ], + "type": "interline_equation", + "content": "\\min _ {\\hat {F} _ {c}} \\mathcal {L} _ {D I S C} + \\mathcal {L} _ {M K - M M D}, \\tag {1}", + "image_path": "4a0f2a6b9401bb25bff683c9974959c9ecd712bff97765152af628ce60267b34.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "spans": [ + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{DISC}" + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "text", + "content": " is the Discriminator module constraining " + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "inline_equation", + "content": "Z_{aux} = \\hat{F}_c(X_{aux})" + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "inline_equation", + "content": "Z_{priv} = F_c(X_{priv})" + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "text", + "content": " to be indistinguishable, while " + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{MK - MMD}" + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "text", + "content": " is the MK-MMD module making " + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "inline_equation", + "content": "Z_{aux}" + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "text", + "content": " as close as possible to " + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "inline_equation", + "content": "Z_{priv}" + }, + { + "bbox": [ + 47, + 559, + 287, + 607 + ], + "type": "text", + "content": " in shared space." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 607, + 288, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 607, + 288, + 691 + ], + "spans": [ + { + "bbox": [ + 46, + 607, + 288, + 691 + ], + "type": "text", + "content": "The Discriminator [3, 9, 13] " + }, + { + "bbox": [ + 46, + 607, + 288, + 691 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 46, + 607, + 288, + 691 + ], + "type": "text", + "content": " is also a network that needs to be trained synchronously and is tasked with efficiently distinguishing the generated features between " + }, + { + "bbox": [ + 46, + 607, + 288, + 691 + ], + "type": "inline_equation", + "content": "F_{c}" + }, + { + "bbox": [ + 46, + 607, + 288, + 691 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 607, + 288, + 691 + ], + "type": "inline_equation", + "content": "\\hat{F}_{c}" + }, + { + "bbox": [ + 46, + 607, + 288, + 691 + ], + "type": "text", + "content": ", maximizing probabilities of the former and minimizing probabilities of the latter [31]. Therefore, the parameters of " + }, + { + "bbox": [ + 46, + 607, + 288, + 691 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 46, + 607, + 288, + 691 + ], + "type": "text", + "content": " will be updated to minimize the following loss function:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 700, + 287, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 700, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 56, + 700, + 287, + 715 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {D} = \\log \\left(1 - \\mathcal {D} \\big (F _ {c} (X _ {p r i v}) \\big) + \\log \\mathcal {D} \\big (\\hat {F} _ {c} (X _ {a u x}) \\big)\\right). \\quad (2)", + "image_path": "c119c549280ee877b13bbd9980b5012aefe9e7d7cac312838bd1caf1d2d13e7e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 303, + 546, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 303, + 546, + 352 + ], + "spans": [ + { + "bbox": [ + 305, + 303, + 546, + 352 + ], + "type": "text", + "content": "After each local training step of " + }, + { + "bbox": [ + 305, + 303, + 546, + 352 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 305, + 303, + 546, + 352 + ], + "type": "text", + "content": ", the server utilizes " + }, + { + "bbox": [ + 305, + 303, + 546, + 352 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 305, + 303, + 546, + 352 + ], + "type": "text", + "content": " to instruct substitute client's representation preference to be consistent with that of the victim client. Specifically, an adversarial loss is constructed as the following:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 353, + 358, + 545, + 373 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 358, + 545, + 373 + ], + "spans": [ + { + "bbox": [ + 353, + 358, + 545, + 373 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {D I S C} = \\log (1 - D (\\hat {F} _ {c} (X _ {a u x}))). \\tag {3}", + "image_path": "c239d55ca448953d41ccecd2bffed8185a9acf459b21c4951a0ee4b3be6ad75f.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 380, + 546, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 380, + 546, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 380, + 546, + 464 + ], + "type": "text", + "content": "The MK-MMD module [15, 29] is designed to align two sets of generated features into a shared space using kernel functions and compute their difference, where a smaller difference signifies closer representation preferences. Then, for the substitute client, the objective extends beyond maximizing the probabilities output by the " + }, + { + "bbox": [ + 304, + 380, + 546, + 464 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 380, + 546, + 464 + ], + "type": "text", + "content": ", it also seeks to minimize the MK-MMD loss function, namely:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 471, + 545, + 504 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 471, + 545, + 504 + ], + "spans": [ + { + "bbox": [ + 314, + 471, + 545, + 504 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {M K - M M D} = \\left\\| \\phi \\left(\\hat {F} _ {c} \\left(X _ {a u x}\\right)\\right) - \\phi \\left(F _ {c} \\left(X _ {p r i v}\\right)\\right) \\right\\| _ {\\mathcal {H}}, \\tag {4}", + "image_path": "9074a555b6fdf8d40c8357c76a379ee1fac6ff51da91f36fc5b4c021660e4bc5.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 373, + 506, + 545, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 506, + 545, + 574 + ], + "spans": [ + { + "bbox": [ + 373, + 506, + 545, + 574 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{c} \\phi = \\sum_ {j = 1} ^ {m} \\beta_ {j} k _ {j}, \\\\ \\sum_ {j = 1} ^ {m} \\beta_ {j} = 1, \\beta_ {j} \\geq 0, \\forall j, \\end{array} \\right. \\tag {5}", + "image_path": "4ef601f6693e96cd2d915be186a1f6e90719955d9ed55e573d1aa358b10460fa.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 582, + 546, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 546, + 630 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 546, + 630 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 582, + 546, + 630 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 582, + 546, + 630 + ], + "type": "text", + "content": " is a single kernel function, " + }, + { + "bbox": [ + 304, + 582, + 546, + 630 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 304, + 582, + 546, + 630 + ], + "type": "text", + "content": " denotes a set of kernel functions that project different smashed data into Reproducing Kernel Hilbert Space " + }, + { + "bbox": [ + 304, + 582, + 546, + 630 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 304, + 582, + 546, + 630 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 582, + 546, + 630 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 582, + 546, + 630 + ], + "type": "text", + "content": " is the weight coefficient corresponding to the single kernel function." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "content": "Attack Model Training. At the end of the training of SL, the server can obtain a substitute client with a feature extraction behavior extremely similar to that of the victim client. Moreover, its feature space is known to the adversary, who can recover the original input from the smashed data by applying an inverse network (denoted as " + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "inline_equation", + "content": "f_{c}^{-1}" + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "content": "). Following previous DRAs [19, 35], we adopt the " + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "inline_equation", + "content": "f_{c}^{-1}" + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "content": " consist" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "12133" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 131 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 131 + ], + "type": "text", + "content": "ing of a set of Transposed Convolution layers and Tanh activations as our attack model. The server can leverage the auxiliary dataset to train the attack model by minimizing the mean square error between " + }, + { + "bbox": [ + 46, + 72, + 287, + 131 + ], + "type": "inline_equation", + "content": "f_{c}^{-1}(\\hat{F}_{c}(X_{aux}))" + }, + { + "bbox": [ + 46, + 72, + 287, + 131 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 72, + 287, + 131 + ], + "type": "inline_equation", + "content": "X_{aux}" + }, + { + "bbox": [ + 46, + 72, + 287, + 131 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 89, + 140, + 287, + 157 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 140, + 287, + 157 + ], + "spans": [ + { + "bbox": [ + 89, + 140, + 287, + 157 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {f _ {c} ^ {- 1}} = \\left\\| f _ {c} ^ {- 1} \\left(\\hat {F} _ {c} \\left(X _ {a u x}\\right)\\right) - X _ {a u x} \\right\\| _ {2} ^ {2}. \\tag {6}", + "image_path": "7aac2688113100bfeb2e7cee28f42b269ebc2c269acbe3382698b872e96f63c4.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 165, + 287, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 165, + 287, + 260 + ], + "spans": [ + { + "bbox": [ + 46, + 165, + 287, + 260 + ], + "type": "text", + "content": "Private Data Reconstruction. The server keeps a snapshot " + }, + { + "bbox": [ + 46, + 165, + 287, + 260 + ], + "type": "inline_equation", + "content": "Z_{\\text{snap}} = F_c(X_{\\text{priv}})" + }, + { + "bbox": [ + 46, + 165, + 287, + 260 + ], + "type": "text", + "content": " of all smashed data output by the target client under the final training iteration for reconstruction. Since the substitute client is able to mimic the target client's representation preferences well, the server can subtly use " + }, + { + "bbox": [ + 46, + 165, + 287, + 260 + ], + "type": "inline_equation", + "content": "f_c^{-1}" + }, + { + "bbox": [ + 46, + 165, + 287, + 260 + ], + "type": "text", + "content": " to perform the attack by mapping the target smashed data directly into the private raw data space, namely:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 261, + 287, + 275 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 261, + 287, + 275 + ], + "spans": [ + { + "bbox": [ + 121, + 261, + 287, + 275 + ], + "type": "interline_equation", + "content": "X _ {p r i v} ^ {*} = f _ {c} ^ {- 1} \\left(Z _ {s n a p}\\right). \\tag {7}", + "image_path": "5619195d83222db027dd3000d5a2ae4f425172743d57237fc8cf1fd7b9dc4d6c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 280, + 284, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 280, + 284, + 293 + ], + "spans": [ + { + "bbox": [ + 47, + 280, + 284, + 293 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 47, + 280, + 284, + 293 + ], + "type": "inline_equation", + "content": "X_{priv}^{*}" + }, + { + "bbox": [ + 47, + 280, + 284, + 293 + ], + "type": "text", + "content": " are the reconstructed private training samples." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 304, + 128, + 317 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 304, + 128, + 317 + ], + "spans": [ + { + "bbox": [ + 47, + 304, + 128, + 317 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 323, + 163, + 337 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 323, + 163, + 337 + ], + "spans": [ + { + "bbox": [ + 47, + 323, + 163, + 337 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 342, + 287, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 287, + 496 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 287, + 496 + ], + "type": "text", + "content": "Datasets. In our experiments, we rely on CIFAR-10 [26] and CelebA [28] to validate the attacks, due to their dominance in the research on SL [6, 10, 31]. They will be used as private data for the client's target training tasks. According to Sec. 3.1, we assume that the server adversary has access to a set of auxiliary samples that are distinct from the client's private data. Therefore, we choose CINIC-10 [4] and FFHQ [24] as the adversary's auxiliary dataset, respectively. We exclude images in CINIC-10 that overlapped with CIFAR-10, and randomly select 5,000 samples and 10,000 samples from the preprocessed CINIC-10 and FFHQ as the final auxiliary data. Appendix A.1 provides the detailed information for different datasets." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 498, + 287, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 287, + 653 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 287, + 653 + ], + "type": "text", + "content": "Models. We consider two popular types of neural network architectures, including MobileNet [21] and ResNet-18 [17], as target models for the classification tasks of CIFRA-10 and CelebA, respectively. We set various split points for different target models to show our attack performance. Since the server is entirely unaware of the client's model structure from Sec. 3.1, we use VGG blocks [34] (consisting of a sequence of Convolutional, BatchNorm, ReLU, and MaxPool layers) to construct substitute models. In addition, the adversary's substitute models adaptively depend on the size of the intermediate features output by the client. All the architecture information and splitting schemes used in this paper are reported in Appendix A.2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 654, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 287, + 713 + ], + "type": "text", + "content": "Metrics. In addition to analyzing the qualitative results of attack performances visually, we chose three quantitative metrics to evaluate the quality of the reconstructed images: Structural Similarity (SSIM) [47], Peak Signal-to-Noise Ratio (PSNR) [20], and Learned Perceptual Image" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 312, + 71, + 442, + 169 + ], + "blocks": [ + { + "bbox": [ + 312, + 71, + 442, + 169 + ], + "lines": [ + { + "bbox": [ + 312, + 71, + 442, + 169 + ], + "spans": [ + { + "bbox": [ + 312, + 71, + 442, + 169 + ], + "type": "image", + "image_path": "836bb92b9f1c7a3cfd9825cb75d5f249da0af7787e60ecd1fafb4bdc52d54786.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 343, + 172, + 408, + 182 + ], + "lines": [ + { + "bbox": [ + 343, + 172, + 408, + 182 + ], + "spans": [ + { + "bbox": [ + 343, + 172, + 408, + 182 + ], + "type": "text", + "content": "(a) Detection Score" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 449, + 69, + 541, + 170 + ], + "blocks": [ + { + "bbox": [ + 449, + 69, + 541, + 170 + ], + "lines": [ + { + "bbox": [ + 449, + 69, + 541, + 170 + ], + "spans": [ + { + "bbox": [ + 449, + 69, + 541, + 170 + ], + "type": "image", + "image_path": "e65cfc024f9d9f27e345480615bc19d12409b3e354862a61dae73b7dfa2ccd6c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 451, + 172, + 538, + 182 + ], + "lines": [ + { + "bbox": [ + 451, + 172, + 538, + 182 + ], + "spans": [ + { + "bbox": [ + 451, + 172, + 538, + 182 + ], + "type": "text", + "content": "(b) Reconstruction Results" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 192, + 545, + 247 + ], + "lines": [ + { + "bbox": [ + 304, + 192, + 545, + 247 + ], + "spans": [ + { + "bbox": [ + 304, + 192, + 545, + 247 + ], + "type": "text", + "content": "Figure 4. Attack performance comparison of FSHA [31] and FORA on CIFAR-10 with layer 2. (a) shows the detection score of two attacks detected by GS. (b) represents the reconstruction results of two attacks, and FSHA-GS is the reconstructed images when detected by GS." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 271, + 545, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 271, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 304, + 271, + 545, + 319 + ], + "type": "text", + "content": "Patch Similarity (LPIPS) [49]. We also use Cosine Similarity and Mean Square Error to measure the similarity between the substitute client and the target client in feature space." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 321, + 546, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 321, + 546, + 477 + ], + "spans": [ + { + "bbox": [ + 304, + 321, + 546, + 477 + ], + "type": "text", + "content": "Attack Baselines. We mainly compare our approach with three representative existing methods, which are FSHA [31], UnSplit [6], and PCAT [10]. For the malicious attack FSHA, we use sophisticated detection mechanism to jointly evaluate the attack's effectiveness. For the semi-honest attack UnSplit, we make it consistent with our experimental settings to ensure fairness. PCAT requires an understanding of the learning task while relying on a subset of the private training data to build the pseudo-client, and in order to comply with this assumption, we set the proportion of the CIFAR-10 private dataset to be " + }, + { + "bbox": [ + 304, + 321, + 546, + 477 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 304, + 321, + 546, + 477 + ], + "type": "text", + "content": " (the maximal threshold suggested by the original paper), and for more complex CelebA dataset, we extend the proportion to be " + }, + { + "bbox": [ + 304, + 321, + 546, + 477 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 304, + 321, + 546, + 477 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 490, + 493, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 490, + 493, + 502 + ], + "spans": [ + { + "bbox": [ + 306, + 490, + 493, + 502 + ], + "type": "text", + "content": "4.2. Comparison with Malicious Attack" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 510, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 712 + ], + "type": "text", + "content": "Since FSHA severely undermines the utility of the target client, recent work has proposed the Gradients Scrutinizer (GS) [8] to defend against such hijacking attacks by detecting the gradients returned from the server to the client. The GS will perform a similarity computation on the gradients, and if the calculated value is lower than a set threshold, it will be considered as a potential attack, resulting in the training of SL being immediately suspended. More details about GS can be found in Appendix C.1. We can observe from Fig. 4 that the reconstruction results of FORA are almost the same as those of FSHA in the unprotected SL system. Although FSHA performs well in capturing fine graphical details, it also leads to noticeable color shifts in some reconstruction results. Moreover, since FSHA drastically tampers with the updated gradient returned to the client model, it is easily detected by GS, leading to the failure of reconstruction." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12134" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 91, + 544, + 238 + ], + "blocks": [ + { + "bbox": [ + 75, + 70, + 517, + 83 + ], + "lines": [ + { + "bbox": [ + 75, + 70, + 517, + 83 + ], + "spans": [ + { + "bbox": [ + 75, + 70, + 517, + 83 + ], + "type": "text", + "content": "Table 1. Data reconstruction results of UnSplit, PCAT, and FORA on CIFAR-10 and CelebA in different splitting settings." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 91, + 544, + 238 + ], + "lines": [ + { + "bbox": [ + 50, + 91, + 544, + 238 + ], + "spans": [ + { + "bbox": [ + 50, + 91, + 544, + 238 + ], + "type": "table", + "html": "
Split PointUnSplitCIFAR-10 PCATFORACelebA PCATFORA
Ground Truth
layer 1
layer 2
layer 3
layer 4
", + "image_path": "2ba9f9548dbd84d43d058b967ae6a199cb78c325e996f787d1308eca388d5de6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 50, + 289, + 286, + 334 + ], + "blocks": [ + { + "bbox": [ + 47, + 257, + 287, + 280 + ], + "lines": [ + { + "bbox": [ + 47, + 257, + 287, + 280 + ], + "spans": [ + { + "bbox": [ + 47, + 257, + 287, + 280 + ], + "type": "text", + "content": "Table 2. SSIM, PSNR, and LPIPS of the reconstructed images on CIFAR-10 among three attacks." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 289, + 286, + 334 + ], + "lines": [ + { + "bbox": [ + 50, + 289, + 286, + 334 + ], + "spans": [ + { + "bbox": [ + 50, + 289, + 286, + 334 + ], + "type": "table", + "html": "
Split PointSSIM†PSNR†LPIPS↓
UnSplitPCATFORAUnSplitPCATFORAUnSplitPCATFORA
layer 10.1710.8530.92611.0322.1025.870.6770.2190.120
layer 20.1010.6420.83010.4817.2922.190.6890.4320.252
layer 30.1040.2910.62211.1413.1818.930.7410.6150.381
layer 40.1080.1210.0308.6211.0810.450.7220.6760.628
", + "image_path": "c88701109558b0800b3df6794b6ffb57bb9e55cfd854e31b84a1b0a77efaf200.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 357, + 253, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 357, + 253, + 370 + ], + "spans": [ + { + "bbox": [ + 47, + 357, + 253, + 370 + ], + "type": "text", + "content": "4.3. Comparison with Semi-Honest Attacks" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 377, + 287, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 377, + 287, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 377, + 287, + 664 + ], + "type": "text", + "content": "Reconstruction Performance. We show in detail the reconstruction results for UnSplit, PCAT, and our proposed FORA on all split points for both datasets. As depicted in Tab. 1, compared to other attacks, the images reconstructed by FORA exhibit a significant improvement visually. Due to the vast search space and inefficient optimization approach, UnSplit almost fails to recover training data in both datasets, even at layer 1. Although PCAT can reconstruct training samples in the shallow settings of the CIFAR-10 dataset, such as layer 1 and layer 2, the reconstruction quality is still lower than that of FORA. For the more complex CelebA dataset, PCAT struggles to produce quality reconstructions. Tab. 2 and Tab. 3 provides the quantitative results of the attacks. Except for the anomaly at the layer 4 split point of CIFAR-10, where FORA slightly underperforms PCAT in terms of SSIM and PSNR metric, FORA is superior to both methods in all other settings, especially in terms of the LPIPS metric, which is considered to be more aligned with human perception. Notably, even though PCAT has access to a subset of the private data, while FORA only obtains samples with different distributions, FORA substantially surpasses PCAT for reconstruction. This further emphasizes the robust privacy threat our approach poses to SL. More reconstructed images are presented in Appendix B.1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "content": "Feature Similarity. As shown in Tab. 4, we measure the feature distance between the proxy clients built by UnSplit, PCAT, and FORA and the target client at layer 2. The results show that the substitute clients trained by our method" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 308, + 289, + 544, + 334 + ], + "blocks": [ + { + "bbox": [ + 305, + 257, + 545, + 280 + ], + "lines": [ + { + "bbox": [ + 305, + 257, + 545, + 280 + ], + "spans": [ + { + "bbox": [ + 305, + 257, + 545, + 280 + ], + "type": "text", + "content": "Table 3. SSIM, PSNR and LPIPS of the reconstructed images on CelebA among three attacks." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 289, + 544, + 334 + ], + "lines": [ + { + "bbox": [ + 308, + 289, + 544, + 334 + ], + "spans": [ + { + "bbox": [ + 308, + 289, + 544, + 334 + ], + "type": "table", + "html": "
Split PointSSIM↑PSNR↑LPIPS↓
UnSplitPCATFORAUnSplitPCATFORAUnSplitPCATFORA
layer 10.1370.3330.4859.2613.4517.720.8040.6340.320
layer 20.1700.3160.4768.7012.0517.110.7470.6530.381
layer 30.1560.1640.19110.6611.6314.190.7930.7310.509
layer 40.0840.0920.1927.9410.6013.000.8040.7380.621
", + "image_path": "f3f2abea8553bf01eacd7597f8275305bea6a827bd292adc2416586f0712896e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 308, + 377, + 544, + 414 + ], + "blocks": [ + { + "bbox": [ + 305, + 346, + 545, + 368 + ], + "lines": [ + { + "bbox": [ + 305, + 346, + 545, + 368 + ], + "spans": [ + { + "bbox": [ + 305, + 346, + 545, + 368 + ], + "type": "text", + "content": "Table 4. Feature similarity measured by Mean Square Error and Cosine Similarity on CIFAR-10 and CelebA at layer 2." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 377, + 544, + 414 + ], + "lines": [ + { + "bbox": [ + 308, + 377, + 544, + 414 + ], + "spans": [ + { + "bbox": [ + 308, + 377, + 544, + 414 + ], + "type": "table", + "html": "
MethodCIFAR-10CelebA
UnSplitPCATFORAUnSplitPCATFORA
Mean Square Error↓1.0410.5280.27450.7731.3530.753
Cosine Similarity↑0.2000.5920.8100.3330.4800.778
", + "image_path": "d494332e5272a043c25fcb5e07e620bd904acadbed53c2f404ab7373fb889fc2.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 435, + 545, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 435, + 545, + 531 + ], + "spans": [ + { + "bbox": [ + 304, + 435, + 545, + 531 + ], + "type": "text", + "content": "exhibit more similar representation preferences to the target client. The basic optimization approach of UnSplit makes it difficult to regularize the feature space of the proxy client. As for PCAT, it simply makes the smashed data generated by the pseudo model more favorable to the server model but fails to mimic the behavior of the client model. In contrast, FORA can impose stronger constraints in the feature space, which directly contributes to successful reconstruction." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 539, + 453, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 539, + 453, + 552 + ], + "spans": [ + { + "bbox": [ + 305, + 539, + 453, + 552 + ], + "type": "text", + "content": "4.4. Effect of Auxiliary Dataset" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 557, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 545, + 689 + ], + "type": "text", + "content": "Next, we analyze the effect of several important factors regarding the auxiliary dataset on attack performance. We first explore the impact of the fitting level of substitute models by varying the size of the auxiliary data. Then, we discuss the impact of the presence of a more significant distribution shift, i.e., the absence of some categories, between the auxiliary and target samples. Finally, we relax the major assumption about the adversary, namely that the server has access to the similarly distributed auxiliary dataset. We set the split point at layer 2 for ablation, and the full experimental results are provided in Appendix B.2." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "Auxiliary Set Size. As shown in Fig. 5, when we reduce the size of the auxiliary dataset to half of the previous" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12135" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 42, + 68, + 286, + 162 + ], + "blocks": [ + { + "bbox": [ + 42, + 68, + 286, + 162 + ], + "lines": [ + { + "bbox": [ + 42, + 68, + 286, + 162 + ], + "spans": [ + { + "bbox": [ + 42, + 68, + 286, + 162 + ], + "type": "image", + "image_path": "701ddc8906a6caf8a08f949f056e6942ca427b20a20dbba3f861c22ee36bd4ff.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 171, + 286, + 194 + ], + "lines": [ + { + "bbox": [ + 46, + 171, + 286, + 194 + ], + "spans": [ + { + "bbox": [ + 46, + 171, + 286, + 194 + ], + "type": "text", + "content": "Figure 5. Effects of varying auxiliary data size on FORA performed on CIFAR-10 and CelebA at layer 2." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 217, + 286, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 217, + 286, + 335 + ], + "spans": [ + { + "bbox": [ + 46, + 217, + 286, + 335 + ], + "type": "text", + "content": "one, the attack performance of FORA remains almost unchanged. When we further reduce the number of auxiliary samples to " + }, + { + "bbox": [ + 46, + 217, + 286, + 335 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 46, + 217, + 286, + 335 + ], + "type": "text", + "content": ", the quality of the reconstructed images decreases slightly but still preserves the full outline and most of the details. In that case, the percentage of the public auxiliary dataset is very small compared to the huge private training set (50,000 for CIFAR-10 and 162770 for CelebA), only " + }, + { + "bbox": [ + 46, + 217, + 286, + 335 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 46, + 217, + 286, + 335 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 217, + 286, + 335 + ], + "type": "inline_equation", + "content": "1.2\\%" + }, + { + "bbox": [ + 46, + 217, + 286, + 335 + ], + "type": "text", + "content": ", respectively. This implies that even with a rather limited auxiliary dataset, FORA is still able to effectively reconstruct the client's training samples." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 50, + 379, + 285, + 448 + ], + "blocks": [ + { + "bbox": [ + 47, + 348, + 286, + 369 + ], + "lines": [ + { + "bbox": [ + 47, + 348, + 286, + 369 + ], + "spans": [ + { + "bbox": [ + 47, + 348, + 286, + 369 + ], + "type": "text", + "content": "Table 5. Effect of absence of categories on FORA performed on CIFAR-10 at layer 2." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 379, + 285, + 448 + ], + "lines": [ + { + "bbox": [ + 50, + 379, + 285, + 448 + ], + "spans": [ + { + "bbox": [ + 50, + 379, + 285, + 448 + ], + "type": "table", + "html": "
Absent CategoriesSSIM↑PSNR↑LPIPS↓
Living0.76820.440.300
Non-living0.73218.430.395
", + "image_path": "8900c65da9484302d93788fc7566785d1853587a0eba187f8b99d4d485f611d1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 461, + 286, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 461, + 286, + 675 + ], + "spans": [ + { + "bbox": [ + 46, + 461, + 286, + 675 + ], + "type": "text", + "content": "Absence of Categories. It is likely that the adversary's public auxiliary data misses some semantic classes of the private data distribution. To model this situation, we create two special auxiliary datasets for CIFAR-10, one containing \"Living\" items (birds, cats, etc.), and the other containing \"Non-living\" items (airplanes, cars, etc.), both with 5,000 randomly sampled samples from CINIC-10. As presented in Tab. 5, even if a class is absent from the auxiliary dataset, FORA can still reconstruct samples of that class. In fact, FORA focuses on stealing the mapping relationship between client inputs and smashed data and therefore does not require class alignment. We observe that the absence of the \"Non-living\" category leads to a moderate degradation in the reconstruction results. We believe that the reason behind this phenomenon is that the greater variation of classes within the \"Non-living\" category helps to increase the generalization level of the substitute client, which in turn facilitates improved attack performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 677, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 286, + 712 + ], + "type": "text", + "content": "Distribution Shift. Here we further analyze the impact of the auxiliary dataset distribution on FORA. In contrast to our default experimental setup, we selected 5000" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 70, + 545, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 70, + 545, + 125 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 545, + 125 + ], + "type": "text", + "content": "Table 6. Effects of auxiliary dataset distribution shift on FORA performed on CIFAR-10 and CelebA at layer 2. \"Different\" represents auxiliary data sampled from CINIC-10, and FFHQ respectively, and \"Same\" means auxiliary dataset come from their original test set." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 333, + 134, + 516, + 190 + ], + "blocks": [ + { + "bbox": [ + 333, + 134, + 516, + 190 + ], + "lines": [ + { + "bbox": [ + 333, + 134, + 516, + 190 + ], + "spans": [ + { + "bbox": [ + 333, + 134, + 516, + 190 + ], + "type": "table", + "html": "
Dataset SizeCIFAR-10CelebA
DifferentSameDifferentSame
SSIM↑0.8300.8320.4760.777
PSNR↑22.1922.7817.1121.55
LPIPS↓0.2520.2070.3810.264
", + "image_path": "edd4b73216dcfe8709721d132ebbce09a545e6a1ebccf03eecbfa909d0fe2e77.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 215, + 545, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 215, + 545, + 382 + ], + "spans": [ + { + "bbox": [ + 304, + 215, + 545, + 382 + ], + "type": "text", + "content": "and 10000 images from the original testing sets of CIFAR-10 and CelebA, respectively, as the auxiliary datasets with the same distribution. As shown in Tab. 6, a more similar distribution can facilitate substitute clients stealing the representation preference, resulting in better reconstruction performance. We observe that the attack results on the facial dataset are more vulnerable to the data distribution shift compared to the object dataset. One possible reason is that tasks related to facial datasets are more sensitive to variations in sampling methods and alignment conditions across different datasets. For object datasets, due to substantial distribution variation between different categories of themselves, e.g. ranging from animals to vehicles, which contributes to their robustness in handling distribution shifts." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 399, + 497, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 399, + 497, + 411 + ], + "spans": [ + { + "bbox": [ + 306, + 399, + 497, + 411 + ], + "type": "text", + "content": "4.5. Effect of Substitute Client Structure" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 421, + 545, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 421, + 545, + 552 + ], + "spans": [ + { + "bbox": [ + 304, + 421, + 545, + 552 + ], + "type": "text", + "content": "After validating the impact of the auxiliary dataset, here we are interested in the impact of substitute client architectures on FORA. We chose three different model structures as attack variants: the VGG block [34], the ResNet block [18], and the DenseNet block [22]. As can be seen in Fig. 6, the SSIM and LPIPS quantization results for the reconstructed images remain similar. This indicates that the extracted representation preferences on the basis of MK-MDD and Discriminator are close to that of the target client, despite the fact that the substitute clients use different architectures. Additional results are shown in Appendix B.3." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 312, + 576, + 425, + 664 + ], + "blocks": [ + { + "bbox": [ + 312, + 576, + 425, + 664 + ], + "lines": [ + { + "bbox": [ + 312, + 576, + 425, + 664 + ], + "spans": [ + { + "bbox": [ + 312, + 576, + 425, + 664 + ], + "type": "image", + "image_path": "1da3abbe82a698155e555fcc9f607cf556db9aec0699ed6a4138cc6533ebfd61.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 351, + 668, + 386, + 678 + ], + "lines": [ + { + "bbox": [ + 351, + 668, + 386, + 678 + ], + "spans": [ + { + "bbox": [ + 351, + 668, + 386, + 678 + ], + "type": "text", + "content": "(a) SSIM↑" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 426, + 577, + 539, + 664 + ], + "blocks": [ + { + "bbox": [ + 426, + 577, + 539, + 664 + ], + "lines": [ + { + "bbox": [ + 426, + 577, + 539, + 664 + ], + "spans": [ + { + "bbox": [ + 426, + 577, + 539, + 664 + ], + "type": "image", + "image_path": "ac4bad8af3fc1c0c6fcc83a17a528bb70236a7b2e279079f5ae816bce37794a7.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 463, + 668, + 502, + 678 + ], + "lines": [ + { + "bbox": [ + 463, + 668, + 502, + 678 + ], + "spans": [ + { + "bbox": [ + 463, + 668, + 502, + 678 + ], + "type": "text", + "content": "(b) LPIPS↓" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 688, + 545, + 710 + ], + "lines": [ + { + "bbox": [ + 305, + 688, + 545, + 710 + ], + "spans": [ + { + "bbox": [ + 305, + 688, + 545, + 710 + ], + "type": "text", + "content": "Figure 6. Effect for FORA with varying substitute model architectures on both datasets at layer 2." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12136" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 203, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 203, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 203, + 85 + ], + "type": "text", + "content": "4.6. Counter Defense Techniques" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 91, + 287, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 91, + 287, + 209 + ], + "spans": [ + { + "bbox": [ + 46, + 91, + 287, + 209 + ], + "type": "text", + "content": "There have been a number of defenses aimed at perturbing the smashed data claiming that they can reduce the risk of privacy leakage in SL to a certain extent. We select three well-known defense techniques, i.e., distance correlation minimization [37, 43, 44], differential privacy [1], and noise obfuscation [39], to evaluate the effectiveness of FORA. Tab. 7 shows the limited impact of these defenses on FORA. See Appendix C.1 for more details on defense techniques. See Appendix C.2 for more defense results and discussions about possible adaptive defenses." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 211, + 287, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 211, + 287, + 294 + ], + "spans": [ + { + "bbox": [ + 46, + 211, + 287, + 294 + ], + "type": "text", + "content": "Distance Correlation Minimization (DCOR). DCOR can uncorrelate irrelevant and sensitive features from the smashed data associated with the target client, which results in a lack of detailed expression of the input data in the representation preferences learned by the substitute client, especially in colors. However, FORA retains the ability to reconstruct the structural details of the private image." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "text", + "content": "Differential Privacy (DP). DP protects training data privacy by adding carefully crafted Laplace noise to the gradients. However, the effectiveness of DP against FORA is very limited under all privacy budgets. When the test accuracy of the model is reduced by nearly " + }, + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "text", + "content": " (the functionality is severely damaged), the SSIM of the reconstructed samples still reaches about " + }, + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 46, + 295, + 287, + 403 + ], + "type": "text", + "content": " of the original. This tradeoff between classification accuracy and defense strength makes DP not feasible for practical applications of SL." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 403, + 287, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 403, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 46, + 403, + 287, + 510 + ], + "type": "text", + "content": "Noise Obfuscation (NO). NO is a direct defense to destroy the mapping relationship between smashed and input data. We observe that on the one hand, the noise of a small scale enhances the generalization level of the SL model to maintain or even improve the classification accuracy, on the other hand raising the noise scale helps to introduce deviations to the features extracted from the target client, making it more difficult to learn the representations and reconstruct the data for FORA." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 523, + 198, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 523, + 198, + 536 + ], + "spans": [ + { + "bbox": [ + 47, + 523, + 198, + 536 + ], + "type": "text", + "content": "5. Discussion and Conclusion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 544, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 581 + ], + "type": "text", + "content": "In this section, we first discuss the potential improvement and scalability of FORA, then we summarize this work. We also show limitation and future work in Appendix D." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "text", + "content": "Improvement using Generative Adversarial Networks. Li et al. [27] propose a novel StyleGAN-based reconstruction attack against split inference, and their research focus is orthogonal to our contribution. Therefore, the reconstruction task in FORA can be further optimized using pre-trained StyleGAN [25]. As shown in Fig. 7, the well-trained substitute client in FORA combined with StyleGAN optimization can provide additional improvements in reconstruction performance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "Attack on Label-Protected SL. Another popular setup for SL requires the client to keep the labels locally [42], but" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 308, + 101, + 544, + 342 + ], + "blocks": [ + { + "bbox": [ + 305, + 71, + 545, + 92 + ], + "lines": [ + { + "bbox": [ + 305, + 71, + 545, + 92 + ], + "spans": [ + { + "bbox": [ + 305, + 71, + 545, + 92 + ], + "type": "text", + "content": "Table 7. Effect of utility and FORA performance against three defense techniques on CIFAR-10 at layer 2." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 101, + 544, + 342 + ], + "lines": [ + { + "bbox": [ + 308, + 101, + 544, + 342 + ], + "spans": [ + { + "bbox": [ + 308, + 101, + 544, + 342 + ], + "type": "table", + "html": "
Defense HyperparamTest Acc (%)SSIM↑PSNR↑LPIPS↓
0 (w/o defense)71.250.83022.190.252
DCOR (α)
0.270.910.69217.910.360
0.570.060.62815.990.441
0.869.720.56315.400.471
DP (ε)
+∞69.680.82322.360.225
10063.050.71120.360.394
1061.930.62118.030.487
NO (σ)
1.074.390.64017.290.367
2.073.140.58316.290.444
5.070.620.39414.350.550
", + "image_path": "379b51ce8496100593363a9674f8bb353c358774a2dcff3a6e567cff62f441fd.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 354, + 541, + 432 + ], + "blocks": [ + { + "bbox": [ + 306, + 354, + 541, + 432 + ], + "lines": [ + { + "bbox": [ + 306, + 354, + 541, + 432 + ], + "spans": [ + { + "bbox": [ + 306, + 354, + 541, + 432 + ], + "type": "image", + "image_path": "238c6feaaeab283549e787aaf5c94c2dbf6042f312a0d23b6bd54a3758866b99.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 440, + 545, + 463 + ], + "lines": [ + { + "bbox": [ + 305, + 440, + 545, + 463 + ], + "spans": [ + { + "bbox": [ + 305, + 440, + 545, + 463 + ], + "type": "text", + "content": "Figure 7. Reconstructed CelebA images of FORA and FORA-G, FOAR-G represents FORA combined with StyleGAN." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 485, + 545, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 485, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 485, + 545, + 533 + ], + "type": "text", + "content": "this case does not have any influence on the implementation and performance of FORA. Since FORA is only related to the smashed data output from the target client, it does not depend on the server model as well as the training task." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 534, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 546, + 713 + ], + "type": "text", + "content": "Conclusion. In this work, we propose a novel data reconstruction attack against SL, named Feature-Oriented Reconstruction Attack (FORA). Unlike all previous attack schemes, FORA enables a semi-honest server to secretly reconstruct the client's private training data with very little prior knowledge. Thanks to our new perspective of extracting representation preferences from smashed data, the server can contemporaneously train a substitute client that approximates the target client's behavior to conduct the attack. Our extensive experiments in various settings demonstrate the state-of-the-art performance of FORA. Due to its stealth and effectiveness, it poses a real privacy threat to SL. We hope our work can inspire future efforts to explore it in more practical SL, and we are eager to draw attention to more robust defense techniques." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12137" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Martin Abadi, Andy Chu, Ian Goodfellow, H Brendan McMahan, Ilya Mironov, Kunal Talwar, and Li Zhang. Deep learning with differential privacy. In Proceedings of the 2016 ACM SIGSAC conference on computer and communications security, pages 308-318, 2016. 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 147, + 288, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 147, + 288, + 213 + ], + "spans": [ + { + "bbox": [ + 53, + 147, + 288, + 213 + ], + "type": "text", + "content": "[2] Sharif Abuadbba, Kyuyeon Kim, Minki Kim, Chandra Thapa, Seyit A Camtepe, Yansong Gao, Hyoungshick Kim, and Surya Nepal. Can we use split learning on 1d cnn models for privacy preserving training? In Proceedings of the 15th ACM Asia Conference on Computer and Communications Security, pages 305-318, 2020. 1, 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 214, + 287, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 214, + 287, + 256 + ], + "spans": [ + { + "bbox": [ + 53, + 214, + 287, + 256 + ], + "type": "text", + "content": "[3] Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In International conference on machine learning, pages 214-223. PMLR, 2017. 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 258, + 287, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 287, + 291 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 287, + 291 + ], + "type": "text", + "content": "[4] Luke N Darlow, Elliot J Crowley, Antreas Antoniou, and Amos J Storkey. Cinic-10 is not imagenet or cifar-10. arXiv preprint arXiv:1810.03505, 2018. 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 292, + 287, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 292, + 287, + 335 + ], + "spans": [ + { + "bbox": [ + 53, + 292, + 287, + 335 + ], + "type": "text", + "content": "[5] Ege Erdoğan, Alptekin Kupçü, and A Ercument Cicek. Splitguard: Detecting and mitigating training-hijacking attacks in split learning. In Proceedings of the 21st Workshop on Privacy in the Electronic Society, pages 125-137, 2022. 1, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 336, + 287, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 336, + 287, + 390 + ], + "spans": [ + { + "bbox": [ + 53, + 336, + 287, + 390 + ], + "type": "text", + "content": "[6] Ege Erdoğan, Alptekin Kupçü, and A Ercümen Çiçek. Unsplit: Data-oblivious model inversion, model stealing, and label inference attacks against split learning. In Proceedings of the 21st Workshop on Privacy in the Electronic Society, pages 115-124, 2022. 1, 2, 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 392, + 287, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 392, + 287, + 446 + ], + "spans": [ + { + "bbox": [ + 53, + 392, + 287, + 446 + ], + "type": "text", + "content": "[7] Chong Fu, Xuhong Zhang, Shouling Ji, Jinyin Chen, Jingzheng Wu, Shanqing Guo, Jun Zhou, Alex X Liu, and Ting Wang. Label inference attacks against vertical federated learning. In 31st USENIX Security Symposium (USENIX Security 22), pages 1397-1414, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 447, + 287, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 447, + 287, + 523 + ], + "spans": [ + { + "bbox": [ + 53, + 447, + 287, + 523 + ], + "type": "text", + "content": "[8] Jiayun Fu, Xiaojing Ma, Bin B. Zhu, Pingyi Hu, Ruixin Zhao, Yaru Jia, Peng Xu, Hai Jin, , and Dongmei Zhang. Focusing on pinocchio's nose: A gradients scrutinizer to thwart split-learning hijacking attacks using intrinsic attributes. In 30th Annual Network and Distributed System Security Symposium, NDSS 2023, San Diego, California, USA, February 27-March 3, 2023. The Internet Society, 2023. 1, 2, 3, 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 525, + 287, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 525, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 53, + 525, + 287, + 567 + ], + "type": "text", + "content": "[9] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In International conference on machine learning, pages 1180-1189. PMLR, 2015. 2, 3, 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 569, + 287, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 569, + 287, + 623 + ], + "spans": [ + { + "bbox": [ + 48, + 569, + 287, + 623 + ], + "type": "text", + "content": "[10] Xinben Gao and Lan Zhang. PCAT: Functionality and data stealing from split learning by Pseudo-Client attack. In 32nd USENIX Security Symposium (USENIX Security 23), pages 5271–5288, Anaheim, CA, 2023. USENIX Association. 1, 2, 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 624, + 287, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 679 + ], + "type": "text", + "content": "[11] Yansong Gao, Minki Kim, Sharif Abuadbba, Yeonjae Kim, Chandra Thapa, Kyuyeon Kim, Seyit A Camtepe, Hyoungshick Kim, and Surya Nepal. End-to-end evaluation of federated learning and split learning for internet of things. arXiv preprint arXiv:2003.13376, 2020. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "text", + "content": "[12] Muhammad Ghifary, W Bastiaan Kleijn, and Mengjie Zhang. Domain adaptive neural networks for object recognition. In PRICAI 2014: Trends in Artificial Intelligence:" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "text", + "content": "13th Pacific Rim International Conference on Artificial Intelligence, Gold Coast, QLD, Australia, December 1-5, 2014. Proceedings 13, pages 898-904. Springer, 2014. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 107, + 545, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 545, + 160 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 545, + 160 + ], + "type": "text", + "content": "[13] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2014. 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 162, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 162, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 308, + 162, + 545, + 217 + ], + "type": "text", + "content": "[14] Ian J Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Proceedings of the 27th International Conference on Neural Information Processing Systems-Volume 2, pages 2672–2680, 2014. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 217, + 545, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 217, + 545, + 271 + ], + "spans": [ + { + "bbox": [ + 308, + 217, + 545, + 271 + ], + "type": "text", + "content": "[15] Arthur Gretton, Dino Sejdinovic, Heiko Strathmann, Sivaraman Balakrishnan, Massimiliano Pontil, Kenji Fukumizu, and Bharath K Sriperumbudur. Optimal kernel choice for large-scale two-sample tests. Advances in neural information processing systems, 25, 2012. 2, 3, 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 272, + 545, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 272, + 545, + 304 + ], + "spans": [ + { + "bbox": [ + 308, + 272, + 545, + 304 + ], + "type": "text", + "content": "[16] Otkrist Gupta and Ramesh Raskar. Distributed learning of deep neural network over multiple agents. Journal of Network and Computer Applications, 116:1-8, 2018. 1, 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 305, + 545, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 305, + 545, + 349 + ], + "spans": [ + { + "bbox": [ + 308, + 305, + 545, + 349 + ], + "type": "text", + "content": "[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 350, + 545, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 350, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 308, + 350, + 545, + 392 + ], + "type": "text", + "content": "[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 393, + 545, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 393, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 308, + 393, + 545, + 437 + ], + "type": "text", + "content": "[19] Zecheng He, Tianwei Zhang, and Ruby B Lee. Model inversion attacks against collaborative inference. In Proceedings of the 35th Annual Computer Security Applications Conference, pages 148-162, 2019. 1, 2, 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 437, + 545, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 437, + 545, + 470 + ], + "spans": [ + { + "bbox": [ + 308, + 437, + 545, + 470 + ], + "type": "text", + "content": "[20] Alain Hore and Djemel Ziou. Image quality metrics: Psnr vs. ssm. In 2010 20th international conference on pattern recognition, pages 2366-2369. IEEE, 2010. 2, 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 471, + 545, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 471, + 545, + 525 + ], + "spans": [ + { + "bbox": [ + 308, + 471, + 545, + 525 + ], + "type": "text", + "content": "[21] Andrew G Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam. Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861, 2017. 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 525, + 545, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 525, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 308, + 525, + 545, + 569 + ], + "type": "text", + "content": "[22] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4700-4708, 2017. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 570, + 545, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 570, + 545, + 613 + ], + "spans": [ + { + "bbox": [ + 308, + 570, + 545, + 613 + ], + "type": "text", + "content": "[23] Sanjay Kariyappa and Moinuddin K Qureshi. Exploit: Extracting private labels in split learning. In 2023 IEEE Conference on Secure and Trustworthy Machine Learning (SaTML), pages 165-175. IEEE, 2023. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 614, + 545, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 614, + 545, + 658 + ], + "spans": [ + { + "bbox": [ + 308, + 614, + 545, + 658 + ], + "type": "text", + "content": "[24] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 5" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 658, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 547, + 713 + ], + "type": "text", + "content": "[25] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 8" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "12138" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 95 + ], + "type": "text", + "content": "[26] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 95, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 95, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 95, + 287, + 149 + ], + "type": "text", + "content": "[27] Ziang Li, Mengda Yang, Yaxin Liu, Juan Wang, Hongxin Hu, Wenzhe Yi, and Xiaoyang Xu. GAN you see me? enhanced data reconstruction attacks against split inference. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 2, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 150, + 287, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 150, + 287, + 194 + ], + "spans": [ + { + "bbox": [ + 48, + 150, + 287, + 194 + ], + "type": "text", + "content": "[28] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of the IEEE international conference on computer vision, pages 3730-3738, 2015. 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 194, + 287, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 194, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 194, + 287, + 239 + ], + "type": "text", + "content": "[29] Mingsheng Long, Yue Cao, Jianmin Wang, and Michael Jordan. Learning transferable features with deep adaptation networks. In International conference on machine learning, pages 97-105. PMLR, 2015. 2, 3, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "spans": [ + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "type": "text", + "content": "[30] Sida Luo, Fangchao Yu, Lina Wang, Bo Zeng, Zhi Pang, and Kai Zhao. Feature sniffer: A stealthy inference attacks framework on split learning. In International Conference on Artificial Neural Networks, pages 66-77. Springer, 2023. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 284, + 287, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 284, + 287, + 337 + ], + "spans": [ + { + "bbox": [ + 48, + 284, + 287, + 337 + ], + "type": "text", + "content": "[31] Dario Pasquini, Giuseppe Ateniese, and Massimo Bernaschi. Unleashing the tiger: Inference attacks on split learning. In Proceedings of the 2021 ACM SIGSAC Conference on Computer and Communications Security, pages 2113-2129, 2021. 1, 2, 4, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 338, + 287, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 338, + 287, + 381 + ], + "spans": [ + { + "bbox": [ + 48, + 338, + 287, + 381 + ], + "type": "text", + "content": "[32] Maarten G Poirot, Praneeth Vepakomma, Ken Chang, Jayashree Kalpathy-Cramer, Rajiv Gupta, and Ramesh Raskar. Split learning for collaborative deep learning in healthcare. arXiv preprint arXiv:1912.12115, 2019. 1, 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 383, + 287, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 383, + 287, + 446 + ], + "spans": [ + { + "bbox": [ + 48, + 383, + 287, + 446 + ], + "type": "text", + "content": "[33] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision, pages 618-626, 2017. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 449, + 287, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 449, + 287, + 480 + ], + "spans": [ + { + "bbox": [ + 48, + 449, + 287, + 480 + ], + "type": "text", + "content": "[34] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 5, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 482, + 287, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 482, + 287, + 547 + ], + "spans": [ + { + "bbox": [ + 48, + 482, + 287, + 547 + ], + "type": "text", + "content": "[35] Abhishek Singh, Ayush Chopra, Ethan Garza, Emily Zhang, Praneeth Vepakomma, Vivek Sharma, and Ramesh Raskar. Disco: Dynamic and invariant sensitive channel obfuscation for deep neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12125-12135, 2021. 2, 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 548, + 287, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 548, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 48, + 548, + 287, + 579 + ], + "type": "text", + "content": "[36] Congzheng Song and Vitaly Shmatikov. Overlearning reveals sensitive attributes. arXiv preprint arXiv:1905.11742, 2019. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 581, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 581, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 581, + 287, + 612 + ], + "type": "text", + "content": "[37] Gábor J Székely, Maria L Rizzo, and Nail K Bakirov. Measuring and testing dependence by correlation of distances. 2007. 8" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 614, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 287, + 667 + ], + "type": "text", + "content": "[38] Chandra Thapa, Pathum Chamikara Mahawaga Arachchige, Seyit Camtepe, and Lichao Sun. Splitfed: When federated learning meets split learning. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 8485-8493, 2022. 1, 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "text", + "content": "[39] Tom Titcombe, Adam J Hall, Pavlos Papadopoulos, and Daniele Romanini. Practical defences against model inversion attacks for split neural networks. arXiv preprint arXiv:2104.05743, 2021. 8" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 498 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[40] Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "text", + "content": "[41] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7167-7176, 2017. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 152, + 545, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 195 + ], + "type": "text", + "content": "[42] Praneeth Vepakomma, Otkrist Gupta, Tristan Swedish, and Ramesh Raskar. Split learning for health: Distributed deep learning without sharing raw patient data. arXiv preprint arXiv:1812.00564, 2018. 1, 2, 3, 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 197, + 545, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 545, + 239 + ], + "type": "text", + "content": "[43] Praneeth Vepakomma, Otkrist Gupta, Abhimanyu Dubey, and Ramesh Raskar. Reducing leakage in distributed deep learning for sensitive health data. arXiv preprint arXiv:1812.00564, 2, 2019. 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 241, + 545, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 545, + 296 + ], + "type": "text", + "content": "[44] Praneeth Vepakomma, Abhishek Singh, Otkrist Gupta, and Ramesh Raskar. Nopeek: Information leakage reduction to share activations in distributed deep learning. In 2020 International Conference on Data Mining Workshops (ICDMW), pages 933–942. IEEE, 2020. 1, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 297, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 297, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 297, + 545, + 319 + ], + "type": "text", + "content": "[45] Mei Wang and Weihong Deng. Deep visual domain adaptation: A survey. Neurocomput., 312(C):135-153, 2018. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 320, + 545, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 320, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 320, + 545, + 342 + ], + "type": "text", + "content": "[46] Mei Wang and Weihong Deng. Deep visual domain adaptation: A survey. Neurocomputing, 312:135-153, 2018. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 343, + 545, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 343, + 545, + 386 + ], + "spans": [ + { + "bbox": [ + 307, + 343, + 545, + 386 + ], + "type": "text", + "content": "[47] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 388, + 545, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 441 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 441 + ], + "type": "text", + "content": "[48] Mengda Yang, Ziang Li, Juan Wang, Hongxin Hu, Ao Ren, Xiaoyang Xu, and Wenzhe Yi. Measuring data reconstruction defenses in collaborative inference systems. Advances in Neural Information Processing Systems, 35:12855-12867, 2022. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 443, + 545, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 443, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 307, + 443, + 545, + 498 + ], + "type": "text", + "content": "[49] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "12139" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/6897fec6-4bb7-4167-a28e-16a34134af6a_content_list.json b/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/6897fec6-4bb7-4167-a28e-16a34134af6a_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..397cd5377c86da0a52dcd48622fc570f8788e9ea --- /dev/null +++ b/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/6897fec6-4bb7-4167-a28e-16a34134af6a_content_list.json @@ -0,0 +1,1446 @@ +[ + { + "type": "text", + "text": "A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition", + "text_level": 1, + "bbox": [ + 106, + 128, + 867, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yusheng Dai†, Hang Chen†, Jun Du†*, Ruoyu Wang†, Shihao Chen†, Haotian Wang†, Chin-Hui Lee‡ \n† University of Science and Technology of China, Hefei, China \n‡ Georgia Institute of Technology, Atlanta, America \njundu@ustc.edu.cn", + "bbox": [ + 89, + 214, + 875, + 290 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 318, + 313, + 334 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Advanced Audio-Visual Speech Recognition (AVSR) systems have been observed to be sensitive to missing video frames, performing even worse than single-modality models. While applying the common dropout techniques to the video modality enhances robustness to missing frames, it simultaneously results in a performance loss when dealing with complete data input. In this study, we delve into this contrasting phenomenon through the lens of modality bias and uncover that an excessive modality bias towards the audio modality induced by dropout constitutes the fundamental cause. Next, we present the Modality Bias Hypothesis (MBH) to systematically describe the relationship between the modality bias and the robustness against missing modality in multimodal systems. Building on these findings, we propose a novel Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) framework to reduce over-reliance on the audio modality, maintaining performance and robustness simultaneously. Finally, to address an entirely missing modality, we adopt adapters to dynamically switch decision strategies. The effectiveness of our proposed approach is evaluated through comprehensive experiments on the MISP2021 and MISP2022 datasets. Our code is available at https://github.com/dalision/ModalBiasAVSR.", + "bbox": [ + 76, + 351, + 473, + 714 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 731, + 209, + 748 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Audio-Visual Speech Recognition (AVSR) is a multimodal application inspired by human speech perception. It outperforms single-modality models by incorporating noise-invariant complementary information from visual cues, especially in noisy environments. Driven by increasingly large open-source datasets and models [1-4], AVSR has achieved significant advancements across various bench", + "bbox": [ + 75, + 757, + 468, + 864 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "marks with a simple end-to-end design [5, 6].", + "bbox": [ + 500, + 320, + 802, + 335 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent research on AVSR focuses on more challenging real-life scenarios. Techniques such as reinforcement learning [7] and carefully designed fusion architecture [8-10] are used to accommodate varying noise levels and overlapping speech. Self-supervised learning [11] and automatic labeling techniques [12] are applied facing insufficient audiovisual pairs. Meanwhile, various synchronization modules have been developed for audio-visual alignment.[13-15]. However, restricted to the open-source datasets [1, 2, 16], most studies often assume that each video is recorded in relatively high quality, without blurring, corruption, or loss. Moreover, there is growing evidence to suggest that current advanced AVSR systems are highly susceptible to perturbations in video modality [17, 18], resulting in significant performance degradation even perform worse than single-modality models [19, 20].", + "bbox": [ + 496, + 337, + 892, + 578 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Missing video modality is a crucial and common problem for AVSR applied in real-life scenarios [1, 17, 19, 20]. It arises from various causes, including losses induced by network latency or hardware limitations, as well as errors in lip movement tracking due to occlusion and side-face. Most researchers utilize dropout techniques on video training data to improve robustness against missing modalities [19-23]. It has been demonstrated to effectively mitigate the out-of-distribution (OOD) issue and alleviate performance degradation without additional inference consumption or complex modules. However, it leads to new challenges on real-life scenarios with low-quality input. In our early experiments on MISP datasets [24, 25], a contradictory phenomenon could be observed in Figure 1: while applying the dropout strategy to video training data enhance the robustness against missing video modality, it also leads to performance degradation when dealing with complete data input.", + "bbox": [ + 496, + 580, + 893, + 838 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{1}$ Distinguished from the classic dropout that randomly deactivates nodes during neural network training, dropout in this paper specifically refers to a data augmentation technique that partially or entirely replaces original video frames with padding frames.", + "bbox": [ + 500, + 849, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author. This work was supported by the National Natural Science Foundation of China under Grant No. 62171427.", + "bbox": [ + 75, + 875, + 468, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "27445", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "On the other hand, all AVSR systems consistently lag behind unimodal ASR when facing completely missing video.", + "bbox": [ + 76, + 90, + 468, + 121 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We attempt to analyze the reasons behind the above-mentioned phenomenon from the perspective of modality bias. Existing multimodal applications can be categorized into two types: (1) modality-balanced systems, in which each modality contributes relatively equally to the model decision, such as Multimodal Emotion Recognition (MER) [26] and Hate Speech Detection (HSD) [27]; (2) modality-biased systems that over-relies on certain modality that contains more task-related information. AVSR is a typical modality-biased system dominated by audio. Therefore, an intuitive insight suggests that although dropout on the video modality could address the OOD problem between the training and inference stages, it may exacerbate the modality bias on audio, subsequently demonstrating robustness towards missing video input.", + "bbox": [ + 75, + 123, + 467, + 349 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we first verify this intuitive hypothesis in Section 2 by quantitatively analyzing the differences between AVSR and unimodal automatic speech recognition (ASR). The results uncover that the modality bias essentially represents a shift from a multimodal to a unimodal distribution on audio modality in latent representation space. Next in Section 3, we extend our findings to more general multimodal applications and propose the Modality Bias Hypothesis (MBH) to systematically describe the relationship between modality bias and robustness to missing modality. In Sections 4 and 5, we are committed to achieving two objectives: improving the robustness of AVSR without degradation with complete input, and ensuring that AVSR consistently outperforms ASR when faced with severe or complete video missing. To this end, we present Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD), in which the robust student model leverages hidden knowledge extracted by a relatively unbiased teacher model to prevent the distribution of task-relevant representations from transferring into a unimodal distribution. The method is observed to enhance missing robustness through the learning of complementary information from the other modality and utilizing context information from adjacent frames. For video severely or entirely missing situations, adapters are adopted to the modality-specific branch to dynamically switch decision bias dominated by modality-specific representations. The key contributions can be summarized as follows:", + "bbox": [ + 75, + 352, + 467, + 773 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We investigate dropout-induced modality bias and uncover that it fundamentally manifests as a shift from a multimodal to a unimodal distribution of audio modality in the hidden representation subspace as detailed in Section 2.", + "- We propose using the Modality Bias Hypothesis (MBH) to systematically describe the decision-making process influenced by modal bias in a multimodal system, as well as the relationship between modal bias and modality" + ], + "bbox": [ + 75, + 777, + 467, + 901 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6a0267eae5b2c781ec1afd17d26ab22fc95c8dbb2c438d4226566d8f42616102.jpg", + "image_caption": [ + "Figure 1. CER (in %) degradation curves of AVSR trained with different dropout rates on video frames. Compared with the baseline AVSR without dropout (in red), other AVSR systems perform better with missing input but worse with complete data input. As the training dropout rate increases, the CER curve of AVSR gradually converges to that of ASR (dotted line)." + ], + "image_footnote": [], + "bbox": [ + 513, + 90, + 875, + 237 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/10d69b3367aba09ae0b39b2e4ecfa79114bfddc6aecf2be02a65bf1238c9ed9d.jpg", + "image_caption": [ + "Figure 2. Two groups of similarity analysis between ASR and AVSR transcriptions. In both groups, an increase in the similarity of recognition transcriptions is observed as the training dropout rate increases. The similarity is measured by relative CER (in $\\%$ ), where the ASR transcription replaces the ground truth." + ], + "image_footnote": [], + "bbox": [ + 504, + 343, + 880, + 450 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/545f39a42791b5bb0533696dab1c6818d4580dc726561b65356f3f6773430bdf.jpg", + "image_caption": [ + "Figure 3. Similarity matrices of intermediate representations between ASR and different AVSR settings. As training dropout rates increase, the diagonal lines become brighter, indicating closer proximity between the multimodal and the unimodal distributions of the latent decisive subspace in AVSR." + ], + "image_footnote": [], + "bbox": [ + 501, + 542, + 888, + 664 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "missing robustness as detailed in Section 3.", + "bbox": [ + 500, + 747, + 790, + 761 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) to enhance robustness against missing video and avoid performance degradation with complete input. For entirely missing modalities, adapters are adopted to dynamically switch decision bias to the specific modality as detailed in Section 5.", + "- We achieve top AVSR performances on MISP2021 and MISP2022 datasets while maintaining robustness against missing video frames as detailed in Section 7." + ], + "bbox": [ + 496, + 763, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "27446", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5c5202081d9db7ac9cea6d81d4169310a37b96165cb383e093b282056bce0291.jpg", + "image_caption": [ + "Figure 4. An illustration of the Modality Bias Hypothesis (MBH). In the left subplot, the task-relevant component (shaded part) of the latent representations consists of $Z^{sa}$ , $Z^{sv}$ and $Z^g$ , representing audio-specific, visual-specific decision features and modality-general decisive features respectively. The corresponding proportions are denoted by $\\alpha$ , $\\beta$ , and $\\gamma$ . The right subplot shows a dynamic process of decisive bias with an increasing training dropout rate. Dropout leads to a consistent modality bias on audio, regardless of the extent of the missing." + ], + "image_footnote": [], + "bbox": [ + 117, + 89, + 851, + 287 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Dropout-Induced Modality Bias", + "text_level": 1, + "bbox": [ + 76, + 356, + 372, + 372 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We investigate the contradictory phenomenon 1 by examining the character error rate (CER) across five Mandarin AVSR systems varying training dropout rates (from 0.0 to 0.7) and testing video missing rates (from 0.0 to 1.0). As shown in Figure 1, two trends are observed: (1) in terms of absolute CER, the model trained with a higher dropout rate deteriorate more on no-missing complete multimodal data and slightly missing video frames, but it performs better on severely and entirely missing video frames; and (2) in term relative performance, the CER degradation curve of the AVSR model trained with a higher dropout rate tends to converge to the unimodal ASR recognition curve. We further ensure whether the similarity of performance degradation curves directly corresponds to the recognition transcription similarity of ASR and AVSR in Figure 2. As we expected, an increase in training dropout rate leads to higher transcription similarity between AVSR and ASR across different test settings.", + "bbox": [ + 75, + 383, + 473, + 655 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To understand this, we investigate the discrepancy in decisive patterns of ASR and each AVSR. We aim to quantify the divergence between latent decision distributions of these models by measuring the distance of intermediate representation samples. Through random sampling of complete audio-visual data batches, we generate intermediate layer representations using the encoder of ASR or AVSR trained at different dropout rates. Figure 3 illustrates cosine distance-based similarity matrices for the intermediate representations between ASR and different AVSR configurations. The diagonal elements in each subplot represent the similarity between intermediate representations from the same inputs. Notably, with an increase training dropout rate, these diagonal lines brighten, signifying a rise in intermediate representation similarity. This suggests closer proximity of the AVSR multimodal distribution in the la", + "bbox": [ + 75, + 659, + 470, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tent decisive subspace to the unimodal distribution of ASR.", + "bbox": [ + 498, + 357, + 890, + 372 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Through the aforementioned three experiments, we have discovered that increasing the training dropout rate on video data leads to increased similarity between AVSR and ASR in the performance degradation curves, recognition results, and intermediate representation subspace distribution. The findings reveal the significant impact of dropout in introducing effectively perturbs the distribution of multimodal training data. It leads to a shift from multimodal joint distribution to unimodal distribution, resulting in a decision bias towards audio during the decision-making process, as reflected in the output similarity of ASR. We refer to this phenomenon induced by dropout as dropout-induced modality bias. Although dropout-induced bias enhances the robustness of missing video data to some extent, we emphasize that it contradicts the primary design of AVSR as a robust application in noisy environments with supplementary visual cues. The introduction of artificial noise (padding frames) in video data induces the model to converge toward trivial solutions, leading to an excessive dependence on the audio modality. This over-reliance, in turn, leads to a degradation in performance when presented with complete multimodal input in a noisy environment.", + "bbox": [ + 496, + 372, + 892, + 705 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Modality Bias Hypothesis (MBH)", + "text_level": 1, + "bbox": [ + 498, + 718, + 805, + 736 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we propose the Modality Bias Hypothesis (MBH) based on the Modality Bias Venn diagram (MBVD) to systematically describe the relationship between modality bias and robustness to missing modality.", + "bbox": [ + 496, + 743, + 893, + 804 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Modality Bias Venn Diagram As shown in Figure 4 on the left, the MBVD depicts the components of the latent decisive feature of multimodal systems in the form of a Venn Diagram. It is a variant of the Modality Venn Diagram (MVD) employed in multimodal knowledge distillation [28]. Without loss of generality, we take AVSR as an", + "bbox": [ + 496, + 809, + 893, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "27447", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "example and define $\\mathcal{X}^a$ , $\\mathcal{X}^v$ , and $\\mathcal{V}$ as the original feature space of audio, video and label space, respectively. The decisive feature $z$ , commonly a form of intermediate layer representation, consists of two modality components $z^a$ (blue circles) and $z^v$ (green circle). We denote $I(\\cdot)$ as mutual information and $I(\\cdot|\\cdot)$ as conditional mutual information. The task-relevant decisive feature $z^u$ ( $I(z,y)$ ) is depicted by the shaded region and can be further divided into three components. $z^g$ ( $I(z^a,z^v,y)$ ) represents modality-general decisive features, while $z^{sa}$ ( $I(z^u,z^a|z^g)$ ) and $z^{sv}$ ( $I(z^u,z^v|z^g)$ ) represent modality-specific decisive features. We denote their proportions in $z^u$ as $\\alpha$ , $\\beta$ , and $\\gamma$ , respectively. These features collectively contribute to determining the final task output $\\hat{y}$ . For AVSR, a higher $\\alpha$ represents a greater decision bias of the model on the audio modality, focusing more on speech than lip movements. A larger $\\gamma$ indicates a model's inclination towards modality synergy by maximizing the mutual information between modalities for decision-making, as in some modality-balanced models [26, 27]. Furthermore, $z^u$ is generated by the original features $x^a,x^v$ as $g(x^a,x^v;\\phi)$ , where $g(\\phi)$ can be seen as a neural network-based transfer such as an encoder with parameters $\\phi$ . Therefore, the decision process of the multimodal system can be decomposed into two steps, following the Bayesian process: the MBVD hidden decisive feature generation step and the decision step:", + "bbox": [ + 76, + 90, + 472, + 484 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP \\left(y \\mid x ^ {a}, x ^ {v}\\right) = P \\left(y \\mid z ^ {\\mu}\\right) P \\left(z ^ {\\mu} \\mid x ^ {a}, x ^ {v}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 127, + 493, + 468, + 511 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Modality Bias Hypothesis Based on MBVD, we give a systematic description of the relationship between modality bias and robustness to missing modality in the view of MBH. As shown in Figure 4 on the right, by applying dropout with different rates $k_{i} \\in [0,1]$ on video training data, the original video feature space $\\mathcal{X}^v$ can be split into a series of subsets $\\{\\mathcal{X}_{k_1}^v, \\mathcal{X}_{k_2}^v, \\dots, \\mathcal{X}_{k_n}^v\\}$ . The samples from space $\\mathcal{X}^a \\times \\mathcal{X}_{k_i}^v$ are denoted as dyads $(x^a, x_{k_i}^v)$ . Compared to the model trained on complete multimodal datas $(x^a, x_{0.0}^v)$ , the model trained on data pairs $(x^a, x_\\theta^v)$ with a video dropout rate $\\theta_{train} \\in (0.0,1.0)$ exhibits a greater decision bias on audio modality with larger $\\alpha$ , smaller $\\beta$ , and $\\gamma$ . As $\\theta$ approaches 1.0, the task-relevant decisive feature $z_u$ becomes steadily dominated by the audio-specific decisive feature $z_a$ , resulting in a transformation from a bimodal distribution in the latent representation subspace to a unimodal one. The decision pattern of the multimodal model shifts from $p(y|z_u)$ to $p(y|z_a)$ .", + "bbox": [ + 76, + 521, + 468, + 794 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During the inference stage, these multimodal models display different modality biases. For the model trained on complete multimodal data or dropout on audio with a larger $\\gamma$ , they tend to search general information shared among modalities. This hypothesis effectively explains the observed experimental phenomena in previous studies. For modality-biased models, such as Multimodal Senti", + "bbox": [ + 75, + 795, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ment Analysis (MSA) [22] dominated by text, Multimodal Speech Enhancement (MSE) [29] dominated by audio, as well as AVSR dominated by audio [21, 23, 30], it has been observed that applying dropout on the primary modality helps alleviate modality bias and brings about slight improvements when dealing with complete input. On the other hand, the AVSR model with larger $\\alpha$ and smaller $\\gamma$ values tends to focus more on speech and neglect complementary information from lip movements. When dealing with partially or completely missing video data, the model with larger $\\alpha$ shows its robustness, which aligns well with the aforementioned experimental observations.", + "bbox": [ + 496, + 90, + 893, + 272 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD)", + "text_level": 1, + "bbox": [ + 498, + 282, + 893, + 319 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For the robustness training of modality-bias systems, it is crucial to avoid dropout-induced modality bias on the primary modality. Dropout indeed alleviates the OOD problem to some extent but encourages multimodal models to pursue trivial solutions at the same time. Ideal robust multimodal models are expected to achieve two goals: (1) learn to extract mutual information across modalities rather than relying on a certain modality when facing complete paired input, and (2) learn to complement information from the other modality and utilize context information from adjacent frames. To prevent excessive modality bias caused by dropout, we propose a novel Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) framework to constrain the distribution of the multimodal feature space during the robustness training phase.", + "bbox": [ + 496, + 325, + 893, + 551 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unlike traditional knowledge distillation methods, firstly, the teacher model is trained on the complete multimodal data pairs, while the student model is trained on missing video data. The teacher model is relatively unbiased with a higher proportion of modality-general decisive features $z^g$ in the MBVD space. During the training process of the student model, the teacher model serves as an anchor point, preventing the student model from shifting towards a unimodal distribution on the audio modality. Note that the difference between teacher and student models in our method is modality bias varies, rather than size, architecture as in common KD methods [31-34]. Additionally, distillation occurs at the hidden layer rather than the logistic outputs, aiming to minimize the distances between decision distribution samples of the teacher and student models and further constrain the intermediate representation subspace distribution of the student model. In practice, we take the knowledge from the intermediate representation of the cross-modal encoder layers.", + "bbox": [ + 496, + 553, + 893, + 839 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Here, we adopt the symbol definitions from Section 3 and provide a formal description of MDA-KD. For a naturally modal-biased multimodal system, the data samples from original feature space $\\mathcal{X}^a\\times \\mathcal{X}_{k_i}^v\\times \\mathcal{Y}$ can be de", + "bbox": [ + 496, + 839, + 893, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "27448", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/2347c63917e0f5e204c25d112e973d1dbe14d5be0e8c0cc6666d68101e21c453.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 117, + 93, + 166, + 109 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/2a6b94d043398449cf5959309e9f5beba5e85c745f34a5952de8bc2ab581c9e9.jpg", + "image_caption": [ + "#" + ], + "image_footnote": [], + "bbox": [ + 125, + 109, + 166, + 119 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Module with adapter", + "bbox": [ + 171, + 97, + 253, + 106 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Cross-attention module", + "bbox": [ + 174, + 109, + 263, + 118 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Element-wise summation", + "bbox": [ + 174, + 122, + 269, + 130 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/46eff9de648e3d26883a52fc7df7cc6cc55883095d0a243925718fe9ae73fb23.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 133, + 163, + 142 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Data flow when activating adapters", + "bbox": [ + 171, + 133, + 305, + 143 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f4dd81b5bbe68a5613e660802a40ddd40a1d85187e579743fd61f0ebdd1f90b0.jpg", + "image_caption": [ + "Teacher AVSR Model (Complete data input)" + ], + "image_footnote": [], + "bbox": [ + 127, + 167, + 359, + 325 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5787d642549b4f3a2ef5c4d4e97105f287a027a219b92484233511a129808ad6.jpg", + "image_caption": [ + "Audiovisual Speech Recognition Network Architecture" + ], + "image_footnote": [], + "bbox": [ + 379, + 107, + 844, + 273 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1bb738a5a29126ea7c344cb6c593778f81c6c8b0dd7d625608b41c88a2a330b9.jpg", + "image_caption": [ + "Lip Video Frames", + "Figure 5. Overall framework of the proposed AVSR system. We address challenging real-world scenarios involving missing video frames and noisy speech with an overlap rate exceeding $40\\%$ during both the training and testing stages. In MDA-KD, latent knowledge is sampled from the latent distribution of the teacher model with complete data input. This latent knowledge serves as an anchor point to prevent dropout-induced modality bias during the robustness training of the student network. For entirely missing video input, the MS-Adapter is activated to enable a dynamic decision switch." + ], + "image_footnote": [], + "bbox": [ + 383, + 290, + 586, + 332 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/810312020fab927186c92f5ac20058a5aad783bc615ad0562d567a0e6a688c7b.jpg", + "image_caption": [ + "Noisy Audio Waveform" + ], + "image_footnote": [], + "bbox": [ + 596, + 290, + 841, + 330 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "noted as triples $(x^{a}, x_{k_{i}}^{v}, y)$ . For simplicity, we denote $x_{0.0}^{v}$ as $x^{v}$ . The teacher model $T e(\\phi)$ is first trained on complete multimodal data $(x^{a}, x^{v}, y)$ model with parameters $\\phi$ , and the model's decision process can be formulated as $P_{t e}(y \\mid x^{a}, x^{v})$ in a Bayesian decision problem. We assume that the teacher model is a neural network $g(\\phi)$ and it is trained by minimizing the following loss function, a form of multitask learning.", + "bbox": [ + 76, + 422, + 468, + 542 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nT e (\\phi) = \\min _ {\\phi} \\mathcal {L} _ {\\mathrm {M L T}} (g (x ^ {a}, x ^ {v}; \\phi), y), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 558, + 468, + 580 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {M L T}} \\left(x ^ {a}, x ^ {v}; \\phi\\right) = \\lambda \\log P _ {\\mathrm {C T C}} \\left(y \\mid x ^ {a}, x ^ {v}\\right) \\tag {3} \\\\ + (1 - \\lambda) \\log P _ {\\mathrm {A t t}} \\left(y _ {i} \\mid x ^ {a}, x ^ {v}\\right), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 96, + 583, + 468, + 619 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the tunable parameter $\\lambda \\in [0,1]$ is used to balance the sequence-level Connectionist Temporal Classification (CTC) loss and the frame-wise Cross Entropy (CE) loss, which serve as the standard end-to-end ASR training objectives. During the training of the student model, the dropout strategy is applied to the secondary modality $v$ , while the teacher model is frozen with complete multimodal data as input. It is important to note that the student and teacher models have the same network architecture. From the perspective of MBVD, the whole decision process of the multimodal model can be divided into hidden feature generation step and decision step.", + "bbox": [ + 75, + 623, + 468, + 804 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nP _ {s t} \\left(y \\mid x ^ {a}, x _ {k _ {i}} ^ {v}\\right) = P _ {s t} \\left(y \\mid z ^ {\\mu}\\right) P _ {s t} \\left(z ^ {\\mu} \\mid x ^ {a}, x _ {k _ {i}} ^ {v}\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 93, + 825, + 468, + 844 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nP _ {t e} \\left(y \\mid x ^ {a}, x ^ {v}\\right) = P _ {t e} \\left(y \\mid z ^ {\\mu}\\right) P _ {t e} \\left(z ^ {\\mu} \\mid x ^ {a}, x ^ {v}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 101, + 849, + 468, + 866 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $z^{\\mu} \\in \\mathbb{R}^{d\\mu}$ represents the combined representation of modality-specific decisive features $z^{sa} \\in \\mathbb{R}^{da}$ , $z^{sv} \\in$", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$\\mathbb{R}^{dv}$ , and modality-general decisive features $z^g \\in \\mathbb{R}^{dg}$ . The tuple $(z^{sa}, z^{sv}, z^g)$ represents a sample drawn from the MBVD hidden features space, denoted as $\\mathcal{Z}^{sa} \\times \\mathcal{Z}^{sv} \\times \\mathcal{Z}^g$ .", + "bbox": [ + 498, + 422, + 890, + 467 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "initialized on the parameter of the teacher model, we introduce an additional loss term to constrain the dynamic process of the student model's MBVD feature distribution in robust training. The distance between batch samples from the student and the teacher model is used to approximate the difference of distribution, which serves as a form of frame-level knowledge distillation.", + "bbox": [ + 496, + 468, + 890, + 573 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {K D}} \\left(x ^ {a}, x ^ {v}, x _ {k} ^ {v}; \\phi_ {t e}, \\phi_ {s t}\\right) = \\mathrm {K L} \\left(S _ {t e}, S _ {s t}\\right), \\\\ S _ {t e} = \\sigma_ {T} \\left(\\operatorname {S a m p l e} \\left(P _ {t e} \\left(z ^ {\\mu} \\mid x ^ {a}, x ^ {v}\\right)\\right)\\right), \\tag {6} \\\\ S _ {s t} = \\sigma_ {T} \\left(\\operatorname {S a m p l e} \\left(P _ {s t} \\left(z ^ {\\mu} \\mid x ^ {a}, x _ {k _ {i}} ^ {v}\\right)\\right)\\right), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 550, + 580, + 890, + 637 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\sigma_T(x)$ denotes the SoftMax function with temperature $T$ and Sample represents the sample function. This distribution approximation serves two main purposes. Firstly, during training, when the student network encounters a missing modality feature $x_{k_i}^v$ , the convergence of the student's decisive feature $z^u = g(x^a,x_{k_i}^v;\\phi_{st})$ towards the teacher's decisive feature $z^{u} = g(x^{a},x^{v};\\phi_{te})$ encourages the utilization of contextual information from $x_{k_i}^v$ . Additionally, with the dual cross-attention design, the process complements the information extracted from $x^a$ , effectively addressing the condition of missing frames and promoting out-of-distribution generality. On the other hand, the KD loss is used to minimize the distance between the distributions of the teacher and student models, preventing the student model from converging to trivial solutions. Subsequently, we train the student model jointly with a weighted sum of the standard training loss and distillation loss:", + "bbox": [ + 496, + 643, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "27449", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {M L T}} \\left(x ^ {a}, x ^ {v}, x _ {k} ^ {v}; \\phi_ {t e}, \\phi_ {s t}\\right) = \\beta \\mathcal {L} _ {\\mathrm {K D}} \\left(x ^ {a}, x ^ {v}, x _ {k} ^ {v}; \\phi_ {t e}, \\phi_ {s t}\\right) \\\\ + (1 - \\beta) \\mathcal {L} _ {\\mathrm {M L T}} \\left(x ^ {a}, x _ {k} ^ {v}; \\phi_ {s t}\\right). \\tag {7} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 119, + 467, + 154 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Modality-Specific Adapter (MS-Adapter)", + "text_level": 1, + "bbox": [ + 76, + 166, + 444, + 183 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As illustrated in Figure 4 on the right, when facing severely or entirely missing video data, we consider it unreliable to continue employing a synergistic decision-making strategy like MDA-KD with relatively high values of $\\gamma$ and $\\beta$ . Padding frames lack sufficient contextual information and may introduce noise. Therefore, in such scenarios, a dynamic switch in decision strategy from $P(y|z^u)$ to $P(y|z^a)$ is necessary as a complement to MDA-KD. In view of the success of adapters applied in foundation model fine-tuning [35-38], we attempt to extend it to address the modality missing issue in multimodal models. For clarity, we refer to this extension as Modality-Specific Adapter (MS-Adapter). Specifically, LORA [39] is adopted to self-attention layers in the audio branch, marked with a dashed box in Figure 5. These adapters perform residual-style feature blending with the original pre-trained features. The residual weight could be represented as low-rank matrices $\\Delta W \\in \\mathbb{R}^{d \\times d}$ , and it could be decomposed into a pair of fan-in and fan-out linear layers with weights $A \\in \\mathbb{R}^{r \\times d}$ and $B \\in \\mathbb{R}^{d \\times r}$ ( $r \\ll d$ ). The reparametrization operation can be formulated below.", + "bbox": [ + 75, + 191, + 472, + 494 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nH _ {o} = H _ {i} \\left(W _ {0} + \\Delta W\\right) = H _ {i} \\left(W _ {0} + B A\\right) \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 503, + 468, + 520 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "By activating the MS-Adapter, we can dynamically switch the decision-making pattern by activating the adapters. We highlight two advantages of the MS-Adapter. First, a substantial amount of unpaired unimodal training data and data augmentation techniques could be used in the training process of the adapters. Second, the adapter training process provides an opportunity to modify the computation pathway. As illustrated in Figure 5 with dashed arrows, in both training and inference stage with audio-only input, the computation flow of the video branch will be directly cut off, and the modality fusion cross-attention module will be skipped to reduce computational costs.", + "bbox": [ + 75, + 530, + 468, + 712 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6. Experiment Settings", + "text_level": 1, + "bbox": [ + 76, + 724, + 272, + 742 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Dataset We conduct our experiments on MISP2021 [24] and MISP2022 [40]. These two open-source datasets present a large-scale audio-visual corpus recorded in real-life home TV scenarios with multiple groups of speakers chatting simultaneously. Multiple microphone arrays and cameras are used to collect far/middle/near-field audio and far/middle-field video. Compared to the carefully recorded videos in LRS2 [1] and LRS3 [2] from BBC interviews and TED talks, MISP datasets offer static shooting perspectives with diverse resolutions, including naturally blurred", + "bbox": [ + 75, + 750, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "and obstructed frames. The videos are accompanied by various background noises and high speech overlap rates (42% in training set and 49% in test set). Compared oracle segment-level AVSR task in MISP201, MISP2022 presents a more challenging task of session-level AVSR without oracle speaker diarization results. To avoid limitations associated with noise simulation, all experiments are evaluated exclusively on far-field data, which aligns well with common in-car, office meeting, or smart home scenarios.", + "bbox": [ + 496, + 90, + 890, + 227 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation Detail We strictly adhere to the approaches outlined in [18] for model training and network architectures. We initialize the AVSR model with two pretrained unimodal models and fine-tune it in an end-to-end manner. As shown in Figure 5, the AVSR model is a dual-branch network where $N = 3$ , $M = 9$ and $K = 6$ . For the loss function in Equation 3, we set $\\lambda$ to 0.7 and CTC loss consists of the same weighted intermediate CTC [41] losses in 3, 6, 9, 12 layers. In Equation 4, we use 0.1 for $\\beta$ . We follow [18] to establish two baselines A0 and AV0 trained on complete modality data with dropout techniques. AV0 is fine-tuned based on A0 and a pre-trained ResNet-18 encoder with a 3D-CNN head. 3", + "bbox": [ + 496, + 233, + 892, + 429 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Dropout Settings Similar to [19], we evaluate the robustness to missing video modality with various dropout methods and rates: Segment Dropout, Utterance Dropout, and Interval Dropout. Testing involves dropout rates from 0.0 to 1.0 in 0.25 intervals. Results from the three dropout methods are averaged at each rate to obtain overall dropout results. When conducting ablation studies, segments with naturally missing video frames (17%) are excluded from the test set, ensuring a consistent and controlled video missing rate. In our method, during training, A certain proportion of sample is assigned a random dropout method from the above three methods and an extra one from [21] with an optimized dropout rate. In both training and testing stages, we pad the missing video frame pixels with zeros instead of using interpolation or repetition methods. We conduct a hyper-parameter search over the training dropout rate and found that 0.5 is optimal for our method (when $D_{p}rod$ is 0.5). This rate implies that half of the video frames in a selected sample are padded with zeros. 3", + "bbox": [ + 496, + 436, + 892, + 724 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "7. Experiments and Result Analysis", + "text_level": 1, + "bbox": [ + 500, + 738, + 802, + 755 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "7.1. Overall Comparison of Experiment Settings", + "text_level": 1, + "bbox": [ + 500, + 762, + 872, + 779 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Table 1, we conduct key parameter analysis and abolation study of the proposed methods on the MISP2022 dataset with oracle speaker diarization results. We first explore the impact of dropout probability in training videos. In contrast to AV1, AV2 introduces half of the complete data pairs. As a result, it mitigates dropout-induced modality", + "bbox": [ + 496, + 786, + 890, + 878 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "3 More details can be found in Appendix.", + "bbox": [ + 514, + 886, + 735, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "27450", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/9bdfa766bc6e6748e4f369844e88dee81d23a798fe6169eb019a78d35befc6fe.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelTraining settingsTest dropout rate
DropoutDprobInit.MDA-KDMS-Adapter0.000.250.500.751.00
A0X0.0RandomXX25.1325.1325.1325.1325.13
AV0X0.0A0XX21.1423.7725.5725.8726.65
AV11.0A0XX23.2623.6824.2724.9525.91
AV20.5A0XX21.7222.5623.3724.4625.64
AV30.5AV0XX21.5322.4723.6524.5525.90
AV40.5AV0X21.3822.1823.2024.4025.70
AV50.5AV0X21.1121.7722.7824.0225.45
AV60.5AV021.1121.7722.7824.0224.94
", + "bbox": [ + 106, + 89, + 864, + 250 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/acf157ca44a6174a1f56b56dfdf6347f5a1d635c5482aca6e15bd997c8421118.jpg", + "table_caption": [ + "Table 1. An overall comparison in CER (%) of different system configurations. Different from the dropout rate, $D_{prob}$ represents the proportion of data with missing frames in the training set. Init. refers to the network initialization method." + ], + "table_footnote": [], + "table_body": "
Insert partRankDAParams(MB)CER(%)
Encoder32×4.5025.35
Encoder324.5025.08
En&Decoder329.0025.20
Encoder649.0025.08
En&Decoder6418.0025.05
Encoder12818.0025.01
En&Decoder12836.0024.94
", + "bbox": [ + 78, + 289, + 478, + 421 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "bias to some extent, since a higher proportion of complete data tend to encourages the model to learn general information across modalities. This finding aligns with previous research [19], highlighting the superiority of utterance dropout over random frame dropout (the former means a larger $D_{prob}$ ). Next, AV3 is trained based on AV0, which means the subsequent optimized processing starts from a relatively stable convergence state with complete input. In the robust training stage, the balanced state tends to be disrupted when trained on incomplete modality pairs, searching for a new optimization coverage range. However, when trained on complete data pairs, the scenario is reversed. Thus, while AV3 outperforms AV2 with low test missing rates, it lags behind when facing severe video absence, illustrating a tug-of-war dynamic without clear guidance.", + "bbox": [ + 75, + 460, + 468, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Next, we validate the effectiveness of MDA-KD. Compared with AV3, AV5 demonstrates superior performance for both complete and missing video modality inputs. AV4 successfully achieves our goal of enhancing robustness without any performance degradation on complete input (21.11% vs. 21.14%). This implies that the teacher model AV0 provides an explicitly optimized target in robustness training. It effectively constrains the distribution shift to the audio modality, preventing excessive modality bias caused by dropout. Furthermore, in AV4, we restrict the flow of audio data into the video branch within the dual cross-attention module. Consequently, a performance drop is observed across all test suites, highlighting the effectiveness of MDA-KD in leveraging the dual cross-attention mod", + "bbox": [ + 75, + 688, + 470, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/a80268e4e499d91018298121427ea2c65bb3f92c9098d88e3fed3a8632c9d956.jpg", + "table_caption": [ + "Table 2. Performance analysis of MS-Adapter. DA means data augmentation, including speed perturbation and utterance concat." + ], + "table_footnote": [], + "table_body": "
MethodTest dropout rate
0.000.250.500.751.00
Cascade Utt [19]22.5423.8925.2326.0528.15
AV Dropout Utt [21]22.0023.3725.3526.2126.78
Dropout Utt [20]22.0823.2124.5625.0825.46
Ours21.1121.7722.7824.0224.94
", + "bbox": [ + 504, + 289, + 888, + 378 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. A $\\mathrm{{CER}}\\left( \\% \\right)$ comparison with other dropout methods.", + "bbox": [ + 511, + 382, + 877, + 397 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ule to extract modality-general information from audio for complementing missing information. Subsequently, we integrate MS-Adapters into the audio branch in AV6 based on AV5. Consequently, the performance with audio-only input improves to a $24.94\\%$ CER, surpassing A0 for the first time $(24.94\\%$ vs. $25.13\\%)$ . These results show the effectiveness of MS-Adapters by dynamically switching to the decision patterns on audio modality with audio-only input.", + "bbox": [ + 496, + 404, + 890, + 526 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "7.2. Validation of MS-Adapter", + "text_level": 1, + "bbox": [ + 498, + 537, + 736, + 555 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We further explore three key factors in MS-Adapter adaptation: data augmentation, insert part and rank dimension. In Table 2, we observe a decrease in CER from $25.45\\%$ (AV4) to $25.35\\%$ , and it further improves to $25.08\\%$ with data augmentation doubling audio training data. These results suggest that the adapter adaptation effectively enhances the robustness of AVSR with completely missing video, requiring only an additional 4.50MB in parameters. It provides an opportunity to apply data augmentation that is effective for unimodal model training and to use extra unpaired data. Next, increasing the ranks and the quantity of adapters results in further performance gains at the expense of a larger parameter. The best performance, achieving $24.94\\%$ , is shown in the bottom row and attained with the adapter inserted in both encoder and decoder blocks.", + "bbox": [ + 496, + 561, + 890, + 787 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "7.3. Comparisons with Other Dropout Techniques", + "text_level": 1, + "bbox": [ + 498, + 800, + 885, + 816 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Table 3, we compare our proposed framework with three widely used dropout techniques [19-21]. Cascade Utt employs a separable cascade structure, where an AV model is superimposed on an audio-only model. Inputs are then routed through either the audio-only path or", + "bbox": [ + 496, + 824, + 890, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "27451", + "bbox": [ + 478, + 944, + 517, + 957 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/52b50756d210d0fe6c1d4e081bf92307e11000f8ea459cb0e2a7ca24ed771e5f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BenchmarkSystemTraining DataBackboneObj. FunctionCER / cpCER(%)
AV
MISP2021SJTU [42]300 hoursLRW-1000ConformerED + SE34.02
NIO [43]3300 hoursLRW-1000 [4]TransformerED25.07
USTC [18]500 hoursw/o extra dataConformerED24.58
Ours1000 hoursw/o extra dataConformerED + InterCTC21.53
MISP2022NIO [44]3300 hoursLRW-1000ConformerED29.58
XMU [45]2100 hoursLRW-1000ConformerED + InterCTC31.88
NPU [46]1300 hoursw/o extra dataE-BranchformerED + InterCTC29.13
Ours1000 hoursw/o extra dataConformerED + InterCTC28.06
", + "bbox": [ + 84, + 88, + 885, + 251 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4. A Comparison of the state-of-the-art systems. InterCTC refers to Intermediate CTC loss [41], the ED loss is formulated in Equation (3) and SE represents the mean square error loss. We use evaluate the performance using the concatenated minimum-permutation character error rate (cpCER) [47] metric for the session-level AVSR task.", + "bbox": [ + 75, + 253, + 893, + 299 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "the AV path with a probability of $p_1$ . AV Dropout Utt randomly drops either the entire video or the entire audio segments with a probability of $p_2$ . Dropout Utt exclusively drops the video segments with a probability of $p_3$ . We adopt the optimal dropout settings from [19], where $p_1 = 0.25$ , $p_2 = 0.25$ , and $p_3 = 0.5$ . For Cascade Utt, we follow [19] to build the network and maintain comparable parameters numbers. As a result, our proposed methods outperforms the other three techniques in all test suites and does not cause performance degradation.", + "bbox": [ + 75, + 305, + 472, + 455 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7.4. Comparisons with State-of-the-art Systems", + "text_level": 1, + "bbox": [ + 76, + 464, + 444, + 481 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Finally, we compare our system with the state-of-the-art systems on the MISP2021 and MISP2022 challenges[18, 42-45, 48] as shown in Table 4. With Recognizer Output Voting Error Reduction (ROVER) [49], we rescore the output transcripts of A0, AV0, and A6 mentioned in Table 1. In the MISP2021 utterance-level AVSR challenge with oracle speaker diarization results, our system outperforms the previous SOTA system by achieving an absolute CER reduction of $3.05\\%$ from $24.58\\%$ to $21.53\\%$ . Our top-performing system, AV6, attains a CER of $22.13\\%$ . Moving to the MISP2022 session-level AVSR challenge, we build our diarization system closely adhering to [50]. We secure a ROVER cpCER score of $28.06\\%$ and obtain the best system score with a cpCER of $28.55\\%$ . When oracle segmentations are utilized, our system achieves a ROVER CER score of $21.80\\%$ and the best model score of $21.53\\%$ in CER.", + "bbox": [ + 75, + 487, + 472, + 729 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "8. Related Works", + "text_level": 1, + "bbox": [ + 76, + 739, + 227, + 753 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Modality Missing in Multimodal Learning The prevalent issue of missing modalities in multimodal applications has prompted research that specifically targets severe modality absences. Generative models [51, 52] and meta-learning predict missing modalities using available or few-shot paired samples. Balanced models utilize joint multimodal representations [53-55]. Models addressing modality bias employ data augmentation methods like modality dropout [19, 22] to tackle out-of-distribution challenges.", + "bbox": [ + 75, + 763, + 468, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For AVSR, we prioritize efficiency and opt for dropout due to its plug-and-play nature and lightweight implementation. More discussion could be found in Appendix.", + "bbox": [ + 496, + 305, + 890, + 349 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Video Modality Robustness in AVSR To enhance performance on low-resolution videos, visual extractors are commonly pre-trained on relatively high-quality videos with isolated words [5] or acoustic pseudo-labeling classification tasks [18]. Addressing situations involving corruption, Hong et al. [17] have designed an explicit scoring module to identify reliable streams and effectively manage input scenarios. Regarding the issue of missing video frames, most researchers have applied dropout techniques to enhance missing robustness [19-23]. In classical dropout methods, frame level dropout is utilized in [23] and utterance-level dropout is applied in AV-Hubert [21]. As a recent work focusing on this issue, Chang et al. [19] unify test suites of missing videos. However, the proposed binary evaluation metric overly emphasizes relative robustness trends, neglecting absolute performance. Compared to the methods mentioned earlier, we explore the problem of missing video frames from the perspective of modality bias. Leveraging classical techniques and simple designs, our approach achieves both performance and robustness without introducing additional inference time. It adapts to various scenarios of frame absence through a unified model.", + "bbox": [ + 496, + 353, + 892, + 685 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "9. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 695, + 619, + 709 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we discover and analyze the essence of dropout-induced modality bias. Based on these findings, we proposed MBH to provide a systematic description of the relationship between modality bias and missing robustness in multimodal systems. Consequently, we propose a new multimodal distribution approximation with knowledge distillation approach to deal with missing video frames for AVSR. Furthermore, we apply adapters to handle videos with both severe and complete missing rates. For future work, we intend to validate our findings in this study across a wide range of multimodal applications beyond AVSR.", + "bbox": [ + 496, + 719, + 893, + 886 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "27452", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Joon Son Chung, Andrew Senior, Oriol Vinyals, and Andrew Zisserman. Lip reading sentences in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6447-6456, 2017. 1, 6", + "[2] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. LRS3-TED: a large-scale dataset for visual speech recognition. arXiv preprint arXiv:1809.00496, 2018. 1, 6", + "[3] Hang Chen, Jun Du, Yusheng Dai, Chin Hui Lee, Sabato Marco Siniscalchi, Shinji Watanabe, Odette Scharenborg, Jingdong Chen, Bao Cai Yin, and Jia Pan. Audio-visual speech recognition in misp2021 challenge: Dataset release and deep analysis. In Proceedings of the Annual Conference of the International Speech Communication Association, IN-TERSPEECH, volume 2022, pages 1766–1770, 2022. 3", + "[4] Shuang Yang, Yuanhang Zhang, Dalu Feng, Mingmin Yang, Chenhao Wang, Jingyun Xiao, Keyu Long, Shiguang Shan, and Xilin Chen. Lrw-1000: A naturally-distributed large-scale benchmark for lip reading in the wild. In 2019 14th IEEE international conference on automatic face & gesture recognition (FG 2019), pages 1-8. IEEE, 2019. 1, 8", + "[5] Pingchuan Ma, Stavros Petridis, and Maja Pantic. End-to-end audio-visual speech recognition with conformers. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 7613-7617. IEEE, 2021. 1, 8", + "[6] Xichen Pan, Peiyu Chen, Yichen Gong, Helong Zhou, Xinbing Wang, and Zhouhan Lin. Leveraging unimodal self-supervised learning for multimodal audio-visual speech recognition, 2022. 1", + "[7] Chen Chen, Yuchen Hu, Qiang Zhang, Heqing Zou, Beier Zhu, and Eng Siong Chng. Leveraging modality-specific representations for audio-visual speech recognition via reinforcement learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pages 12607–12615, 2023. 1", + "[8] Bo Xu, Cheng Lu, Yandong Guo, and Jacob Wang. Discriminative multi-modality speech recognition. In Proceedings of the IEEE/CVF conference on Computer Vision and Pattern Recognition, pages 14433-14442, 2020. 1", + "[9] Jianwei Yu, Shi-Xiong Zhang, Jian Wu, Shahram Ghorbani, Bo Wu, Shiyin Kang, Shansong Liu, Xunying Liu, Helen Meng, and Dong Yu. Audio-visual recognition of overlapped speech for the lrs2 dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6984-6988. IEEE, 2020.", + "[10] Joanna Hong, Minsu Kim, Daehun Yoo, and Yong Man Ro. Visual context-driven audio feature enhancement for robust end-to-end audio-visual speech recognition. arXiv preprint arXiv:2207.06020, 2022. 1", + "[11] Alexandros Haliassos, Pingchuan Ma, Rodrigo Mira, Stavros Petridis, and Maja Pantic. Jointly learning visual and auditory speech representations from raw data. arXiv preprint arXiv:2212.06246, 2022. 1", + "[12] Pingchuan Ma, Alexandros Haliassos, Adriana Fernandez-Lopez, Honglie Chen, Stavros Petridis, and Maja Pantic." + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Auto-AVSR: Audio-visual speech recognition with automatic labels. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5. IEEE, 2023. 1", + "[13] George Sterpu, Christian Saam, and Naomi Harte. Attention-based audio-visual fusion for robust automatic speech recognition. In Proceedings of the 20th ACM International conference on Multimodal Interaction, pages 111–115, 2018. 1", + "[14] George Sterpu, Christian Saam, and Naomi Harte. How to teach DNNs to pay attention to the visual modality in speech recognition. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 28:1052-1064, 2020.", + "[15] Yuchen Hu, Ruizhe Li, Chen Chen, Heqing Zou, Qiushi Zhu, and Eng Siong Chng. Cross-Modal Global Interaction and Local Alignment for Audio-Visual Speech Recognition. arXiv preprint arXiv:2305.09212, 2023. 1", + "[16] Triantafyllos Afouras, Joon Son Chung, Andrew Senior, Oriol Vinyals, and Andrew Zisserman. Deep audio-visual speech recognition. IEEE transactions on pattern analysis and machine intelligence, 44(12):8717-8727, 2018. 1", + "[17] Joanna Hong, Minsu Kim, Jeongsoo Choi, and Yong Man Ro. Watch or Listen: Robust Audio-Visual Speech Recognition with Visual Corruption Modeling and Reliability Scoring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18783–18794, 2023. 1, 8", + "[18] Yusheng Dai, Hang Chen, Jun Du, Xiaofei Ding, Ning Ding, Feijun Jiang, and Chin-Hui Lee. Improving Audio-Visual Speech Recognition by Lip-Subword Correlation Based Visual Pre-training and Cross-Modal Fusion Encoder. In 2023 IEEE International Conference on Multimedia and Expo (ICME), pages 2627–2632. IEEE, 2023. 1, 6, 8, 3", + "[19] Oscar Chang, Otavio de Pinho Forin Braga, Hank Liao, Dmitriy Dima Serdyuk, and Olivier Siohan. On robustness to missing video for audiovisual speech recognition. Transactions on Machine Learning Research (TMLR), 2022. 1, 6, 7, 8, 3, 4", + "[20] Takaki Makino, Hank Liao, Yannis Assael, Brendan Shillingford, Basilio Garcia, Otavio Braga, and Olivier Siohan. Recurrent neural network transducer for audio-visual speech recognition. In 2019 IEEE automatic speech recognition and understanding workshop (ASRU), pages 905–912. IEEE, 2019. 1, 7, 4", + "[21] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. Learning audio-visual speech representation by masked multimodal cluster prediction, 2022. 4, 6, 7, 8", + "[22] Devamanyu Hazarika, Yingting Li, Bo Cheng, Shuai Zhao, Roger Zimmermann, and Soujanya Poria. Analyzing modality robustness in multimodal sentiment analysis, 2022. 4, 8", + "[23] Shiliang Zhang, Ming Lei, Bin Ma, and Lei Xie. Robust audio-visual speech recognition using bimodal DFSMN with multi-condition training and dropout regularization. In ICASSP 2019-2019 IEEE international conference on acoustics, speech and signal processing (ICASSP), pages 6570-6574. IEEE, 2019. 1, 4, 8", + "[24] Hang Chen, Hengshun Zhou, Jun Du, Chin-Hui Lee, Jingdong Chen, Shinji Watanabe, Sabato Marco Siniscalchi," + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "27453", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Odette Scharenborg, Di-Yuan Liu, Bao-Cai Yin, Jia Pan, Jian-Qing Gao, and Cong Liu. The First Multimodal Information Based Speech Processing (Misp) Challenge: Data, Tasks, Baselines And Results. In ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 9266-9270, 2022. 1, 6", + "[25] Jun Du, Chin-Hui Lee, Jingdong Chen, Shinji Watanabe, Sabato Marco Siniscalchi, and Odette Scharenborg. Multimodal Information Based Speech Processing (MISP) Challenge 2022. https://mispchallenge.github.io/mispchallenge2022/, 2022. Accessed: 2023-06-26.1", + "[26] Jinming Zhao, Ruichen Li, and Qin Jin. Missing modality imagination network for emotion recognition with uncertain missing modalities. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 2608-2618, 2021. 2, 4", + "[27] Mengmeng Ma, Jian Ren, Long Zhao, Davide Testuggine, and Xi Peng. Are multimodal transformers robust to missing modality? In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18177-18186, 2022. 2, 4", + "[28] Zihui Xue, Zhengqi Gao, Sucheng Ren, and Hang Zhao. The modality focusing hypothesis: Towards understanding cross-modal knowledge distillation, 2022. 3, 1", + "[29] Hang Chen, Jun Du, Yu Hu, Li-Rong Dai, Bao-Cai Yin, and Chin-Hui Lee. Correlating subword articulation with lip shapes for embedding aware audio-visual speech enhancement. Neural Networks, 143:171–182, 2021. 4", + "[30] Pan Zhou, Wenwen Yang, Wei Chen, Yanfeng Wang, and Jia Jia. Modality attention for end-to-end audio-visual speech recognition. In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6565-6569. IEEE, 2019. 4", + "[31] Xianing Chen, Qiong Cao, Yujie Zhong, Jing Zhang, Shenghua Gao, and Dacheng Tao. Dearkd: data-efficient early knowledge distillation for vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12052-12062, 2022. 4", + "[32] Francisco Rivera Valverde, Juana Valeria Hurtado, and Abhinav Valada. There is more than meets the eye: Self-supervised multi-object detection and tracking with sound by distilling multimodal knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11612-11621, 2021.", + "[33] Zihui Xue, Sucheng Ren, Zhengqi Gao, and Hang Zhao. Multimodal knowledge expansion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 854-863, 2021.", + "[34] Baoyun Peng, Xiao Jin, Jiaheng Liu, Dongsheng Li, Yichao Wu, Yu Liu, Shunfeng Zhou, and Zhaoning Zhang. Correlation congruence for knowledge distillation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5007-5016, 2019. 4", + "[35] Renrui Zhang, Rongyao Fang, Wei Zhang, Peng Gao, Kunchang Li, Jifeng Dai, Yu Qiao, and Hongsheng Li." + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tip-adapter: Training-free clip-adapter for better vision-language modeling. arXiv preprint arXiv:2111.03930, 2021.6", + "[36] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019.", + "[37] Sylvestre-Alvise Rebuffi, Hakan Bilen, and Andrea Vedaldi. Learning multiple visual domains with residual adapters. Advances in neural information processing systems, 30, 2017.", + "[38] Peng Gao, Shijie Geng, Renrui Zhang, Teli Ma, Rongyao Fang, Yongfeng Zhang, Hongsheng Li, and Yu Qiao. Clip-adapter: Better vision-language models with feature adapters. International Journal of Computer Vision, pages 1–15, 2023. 6", + "[39] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models, 2021. 6", + "[40] Zhe Wang, Shilong Wu, Hang Chen, Mao-Kui He, Jun Du, Chin-Hui Lee, Jingdong Chen, Shinji Watanabe, Sabato Siniscalchi, Odette Scharenborg, et al. The multimodal information based speech processing (misp) 2022 challenge: Audio-visual diarization and recognition. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5. IEEE, 2023. 6", + "[41] Jaesong Lee and Shinji Watanabe. Intermediate loss regularization for ctc-based speech recognition. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6224-6228. IEEE, 2021. 6, 8", + "[42] Wei Wang, Xun Gong, Yifei Wu, Zhikai Zhou, Chenda Li, Wangyou Zhang, Bing Han, and Yanmin Qian. The sjtu system for multimodal information based speech processing challenge 2021. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 9261-9265. IEEE, 2022. 8", + "[43] Gaopeng Xu, Song Yang, Wei Li, et al. Channel-Wise AV-Fusion Attention for Multi-Channel Audio-Visual Speech Recognition. In Proc. ICASSP 2022, pages 9251–9255. IEEE, 2022. 8", + "[44] Sang Wang Gaopeng Xu, Xianliang Wang et al. The NIO system for audio-visual diarization and recognition in MISP challenge 2022. https://mispchallenge.github.io/mispchallenge2022/papers/task2/Track2_NIO.pdf, 2022.8", + "[45] Tao Li, Haodong Zhou, Jie Wang, Qingyang Hong, and Lin Li. The XMU System for Audio-Visual Diarization and Recognition in MISP Challenge 2022. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-2. IEEE, 2023. 8", + "[46] He Wang, Pengcheng Guo, Pan Zhou, and Lei Xie. Mlcaavsr: Multi-layer cross attention fusion based audio-visual speech recognition. arXiv preprint arXiv:2401.03424, 2024. 8", + "[47] Shinji Watanabe, Michael Mandel, Jon Barker, Emmanuel Vincent, Ashish Arora, Xuankai Chang, Sanjeev Khudan-" + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "27454", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "pur, Vimal Manohar, Daniel Povey, Desh Raj, et al. Chime-6 challenge: Tackling multispeaker speech recognition for unsegmented recordings. arXiv preprint arXiv:2004.09249, 2020.8", + "[48] Pengcheng Guo, He Wang, Bingshen Mu, Ao Zhang, and Peikun Chen. The NPU-ASLP System for Audio-Visual Speech Recognition in MISP 2022 Challenge. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-2. IEEE, 2023. 8", + "[49] Jonathan G Fiscus. A post-processing system to yield reduced word error rates: Recognizer output voting error reduction (ROVER). In Proc. asrU 1997, pages 347-354. IEEE, 1997. 8", + "[50] Ming Cheng, Haoxu Wang, Ziteng Wang, Qiang Fu, and Ming Li. The whu-alibaba audio-visual speaker diarization system for the misp 2022 challenge. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-2. IEEE, 2023. 8", + "[51] Qiuling Suo, Weida Zhong, Fenglong Ma, Ye Yuan, Jing Gao, and Aidong Zhang. Metric Learning on Healthcare Data with Incomplete Modalities. In IJCAI, volume 3534, page 3540, 2019. 8, 4", + "[52] Lei Cai, Zhengyang Wang, Hongyang Gao, Dinggang Shen, and Shuiwang Ji. Deep adversarial learning for multimodality missing data completion. In Proceedings of the 24th ACM SIGKDD international conference on knowledge discovery & data mining, pages 1158-1166, 2018. 8, 4", + "[53] Zilong Wang, Zhaohong Wan, and Xiaojun Wan. Transmodality: An end2end fusion method with transformer for multimodal sentiment analysis. In Proceedings of The Web Conference 2020, pages 2514-2520, 2020. 8, 4", + "[54] Hai Pham, Paul Pu Liang, Thomas Manzini, Louis-Philippe Morency, and Barnabás Póczos. Found in translation: Learning robust joint representations by cyclic translations between modalities. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 6892–6899, 2019. 4", + "[55] Jiale Li, Hang Dai, Hao Han, and Yong Ding. Mseg3d: Multi-modal 3d semantic segmentation for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21694-21704, 2023. 8", + "[56] A Varga, HJM Steeneken, et al. Noisex-92: A database and an experiment to study the effect of additive noise on speech recognition systems. Speech Commun, 12(3):247–253, 1993. 1", + "[57] Lukas Drude, Jahn Heymann, Christoph Boeddeker, and Reinhold Haeb-Umbach. Nara-wpe: A python package for weighted prediction error dereverberation in numpy and tensorflow for online and offline processing. In Speech Communication; 13th ITG-Symposium, pages 1-5. VDE, 2018. 1", + "[58] Christoph Boeddecker, Jens Heitkaemper, Joerg Schmalenstroeer, et al. Front-end processing for the CHiME-5 dinner party scenario. In Proc. CHiME 2018, pages 35–40, 2018. 1, 3" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[59] Desh Raj, Daniel Povey, and Sanjeev Khudanpur. GPU-accelerated guided source separation for meeting transcription, 2022. 3", + "[60] Mengmeng Ma, Jian Ren, Long Zhao, Sergey Tulyakov, Cathy Wu, and Xi Peng. Smil: Multimodal learning with severely missing modality. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 2302-2310, 2021. 4", + "[61] Itai Gat, Idan Schwartz, Alexander Schwing, and Tamir Hazan. Removing bias in multi-modal classifiers: Regularization by maximizing functional entropies. Advances in Neural Information Processing Systems, 33:3197-3208, 2020. 4", + "[62] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2901–2910, 2017. 4", + "[63] Yangyang Guo, Liqiang Nie, Harry Cheng, Zhiyong Cheng, Mohan Kankanhalli, and Alberto Del Bimbo. On modality bias recognition and reduction. ACM Transactions on Multimedia Computing, Communications and Applications, 19(3):1-22, 2023. 4" + ], + "bbox": [ + 501, + 92, + 890, + 428 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "27455", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/6897fec6-4bb7-4167-a28e-16a34134af6a_model.json b/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/6897fec6-4bb7-4167-a28e-16a34134af6a_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c872a82d10a09e6c6c24d9358063dbdc066d27e3 --- /dev/null +++ b/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/6897fec6-4bb7-4167-a28e-16a34134af6a_model.json @@ -0,0 +1,2213 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.107, + 0.13, + 0.868, + 0.177 + ], + "angle": 0, + "content": "A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.215, + 0.877, + 0.291 + ], + "angle": 0, + "content": "Yusheng Dai†, Hang Chen†, Jun Du†*, Ruoyu Wang†, Shihao Chen†, Haotian Wang†, Chin-Hui Lee‡ \n† University of Science and Technology of China, Hefei, China \n‡ Georgia Institute of Technology, Atlanta, America \njundu@ustc.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.319, + 0.314, + 0.335 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.352, + 0.474, + 0.715 + ], + "angle": 0, + "content": "Advanced Audio-Visual Speech Recognition (AVSR) systems have been observed to be sensitive to missing video frames, performing even worse than single-modality models. While applying the common dropout techniques to the video modality enhances robustness to missing frames, it simultaneously results in a performance loss when dealing with complete data input. In this study, we delve into this contrasting phenomenon through the lens of modality bias and uncover that an excessive modality bias towards the audio modality induced by dropout constitutes the fundamental cause. Next, we present the Modality Bias Hypothesis (MBH) to systematically describe the relationship between the modality bias and the robustness against missing modality in multimodal systems. Building on these findings, we propose a novel Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) framework to reduce over-reliance on the audio modality, maintaining performance and robustness simultaneously. Finally, to address an entirely missing modality, we adopt adapters to dynamically switch decision strategies. The effectiveness of our proposed approach is evaluated through comprehensive experiments on the MISP2021 and MISP2022 datasets. Our code is available at https://github.com/dalision/ModalBiasAVSR." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.732, + 0.21, + 0.749 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.758, + 0.47, + 0.866 + ], + "angle": 0, + "content": "Audio-Visual Speech Recognition (AVSR) is a multimodal application inspired by human speech perception. It outperforms single-modality models by incorporating noise-invariant complementary information from visual cues, especially in noisy environments. Driven by increasingly large open-source datasets and models [1-4], AVSR has achieved significant advancements across various bench" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.321, + 0.803, + 0.337 + ], + "angle": 0, + "content": "marks with a simple end-to-end design [5, 6]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.338, + 0.893, + 0.579 + ], + "angle": 0, + "content": "Recent research on AVSR focuses on more challenging real-life scenarios. Techniques such as reinforcement learning [7] and carefully designed fusion architecture [8-10] are used to accommodate varying noise levels and overlapping speech. Self-supervised learning [11] and automatic labeling techniques [12] are applied facing insufficient audiovisual pairs. Meanwhile, various synchronization modules have been developed for audio-visual alignment.[13-15]. However, restricted to the open-source datasets [1, 2, 16], most studies often assume that each video is recorded in relatively high quality, without blurring, corruption, or loss. Moreover, there is growing evidence to suggest that current advanced AVSR systems are highly susceptible to perturbations in video modality [17, 18], resulting in significant performance degradation even perform worse than single-modality models [19, 20]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.581, + 0.895, + 0.839 + ], + "angle": 0, + "content": "Missing video modality is a crucial and common problem for AVSR applied in real-life scenarios [1, 17, 19, 20]. It arises from various causes, including losses induced by network latency or hardware limitations, as well as errors in lip movement tracking due to occlusion and side-face. Most researchers utilize dropout techniques on video training data to improve robustness against missing modalities [19-23]. It has been demonstrated to effectively mitigate the out-of-distribution (OOD) issue and alleviate performance degradation without additional inference consumption or complex modules. However, it leads to new challenges on real-life scenarios with low-quality input. In our early experiments on MISP datasets [24, 25], a contradictory phenomenon could be observed in Figure 1: while applying the dropout strategy to video training data enhance the robustness against missing video modality, it also leads to performance degradation when dealing with complete data input." + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.851, + 0.894, + 0.901 + ], + "angle": 0, + "content": "\\(^{1}\\)Distinguished from the classic dropout that randomly deactivates nodes during neural network training, dropout in this paper specifically refers to a data augmentation technique that partially or entirely replaces original video frames with padding frames." + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.875, + 0.47, + 0.9 + ], + "angle": 0, + "content": "*Corresponding author. This work was supported by the National Natural Science Foundation of China under Grant No. 62171427." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "27445" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "On the other hand, all AVSR systems consistently lag behind unimodal ASR when facing completely missing video." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.124, + 0.468, + 0.35 + ], + "angle": 0, + "content": "We attempt to analyze the reasons behind the above-mentioned phenomenon from the perspective of modality bias. Existing multimodal applications can be categorized into two types: (1) modality-balanced systems, in which each modality contributes relatively equally to the model decision, such as Multimodal Emotion Recognition (MER) [26] and Hate Speech Detection (HSD) [27]; (2) modality-biased systems that over-relies on certain modality that contains more task-related information. AVSR is a typical modality-biased system dominated by audio. Therefore, an intuitive insight suggests that although dropout on the video modality could address the OOD problem between the training and inference stages, it may exacerbate the modality bias on audio, subsequently demonstrating robustness towards missing video input." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.353, + 0.468, + 0.775 + ], + "angle": 0, + "content": "In this paper, we first verify this intuitive hypothesis in Section 2 by quantitatively analyzing the differences between AVSR and unimodal automatic speech recognition (ASR). The results uncover that the modality bias essentially represents a shift from a multimodal to a unimodal distribution on audio modality in latent representation space. Next in Section 3, we extend our findings to more general multimodal applications and propose the Modality Bias Hypothesis (MBH) to systematically describe the relationship between modality bias and robustness to missing modality. In Sections 4 and 5, we are committed to achieving two objectives: improving the robustness of AVSR without degradation with complete input, and ensuring that AVSR consistently outperforms ASR when faced with severe or complete video missing. To this end, we present Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD), in which the robust student model leverages hidden knowledge extracted by a relatively unbiased teacher model to prevent the distribution of task-relevant representations from transferring into a unimodal distribution. The method is observed to enhance missing robustness through the learning of complementary information from the other modality and utilizing context information from adjacent frames. For video severely or entirely missing situations, adapters are adopted to the modality-specific branch to dynamically switch decision bias dominated by modality-specific representations. The key contributions can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.778, + 0.468, + 0.839 + ], + "angle": 0, + "content": "- We investigate dropout-induced modality bias and uncover that it fundamentally manifests as a shift from a multimodal to a unimodal distribution of audio modality in the hidden representation subspace as detailed in Section 2." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.468, + 0.902 + ], + "angle": 0, + "content": "- We propose using the Modality Bias Hypothesis (MBH) to systematically describe the decision-making process influenced by modal bias in a multimodal system, as well as the relationship between modal bias and modality" + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.778, + 0.468, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.092, + 0.877, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.244, + 0.892, + 0.328 + ], + "angle": 0, + "content": "Figure 1. CER (in %) degradation curves of AVSR trained with different dropout rates on video frames. Compared with the baseline AVSR without dropout (in red), other AVSR systems perform better with missing input but worse with complete data input. As the training dropout rate increases, the CER curve of AVSR gradually converges to that of ASR (dotted line)." + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.344, + 0.882, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.458, + 0.892, + 0.528 + ], + "angle": 0, + "content": "Figure 2. Two groups of similarity analysis between ASR and AVSR transcriptions. In both groups, an increase in the similarity of recognition transcriptions is observed as the training dropout rate increases. The similarity is measured by relative CER (in \\(\\%\\) ), where the ASR transcription replaces the ground truth." + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.544, + 0.89, + 0.665 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.67, + 0.892, + 0.74 + ], + "angle": 0, + "content": "Figure 3. Similarity matrices of intermediate representations between ASR and different AVSR settings. As training dropout rates increase, the diagonal lines become brighter, indicating closer proximity between the multimodal and the unimodal distributions of the latent decisive subspace in AVSR." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.748, + 0.791, + 0.762 + ], + "angle": 0, + "content": "missing robustness as detailed in Section 3." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.764, + 0.892, + 0.854 + ], + "angle": 0, + "content": "- We propose Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) to enhance robustness against missing video and avoid performance degradation with complete input. For entirely missing modalities, adapters are adopted to dynamically switch decision bias to the specific modality as detailed in Section 5." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "- We achieve top AVSR performances on MISP2021 and MISP2022 datasets while maintaining robustness against missing video frames as detailed in Section 7." + }, + { + "type": "list", + "bbox": [ + 0.498, + 0.764, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27446" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.09, + 0.852, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.294, + 0.895, + 0.351 + ], + "angle": 0, + "content": "Figure 4. An illustration of the Modality Bias Hypothesis (MBH). In the left subplot, the task-relevant component (shaded part) of the latent representations consists of \\( Z^{sa} \\), \\( Z^{sv} \\) and \\( Z^g \\), representing audio-specific, visual-specific decision features and modality-general decisive features respectively. The corresponding proportions are denoted by \\( \\alpha \\), \\( \\beta \\), and \\( \\gamma \\). The right subplot shows a dynamic process of decisive bias with an increasing training dropout rate. Dropout leads to a consistent modality bias on audio, regardless of the extent of the missing." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.357, + 0.373, + 0.373 + ], + "angle": 0, + "content": "2. Dropout-Induced Modality Bias" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.384, + 0.474, + 0.656 + ], + "angle": 0, + "content": "We investigate the contradictory phenomenon 1 by examining the character error rate (CER) across five Mandarin AVSR systems varying training dropout rates (from 0.0 to 0.7) and testing video missing rates (from 0.0 to 1.0). As shown in Figure 1, two trends are observed: (1) in terms of absolute CER, the model trained with a higher dropout rate deteriorate more on no-missing complete multimodal data and slightly missing video frames, but it performs better on severely and entirely missing video frames; and (2) in term relative performance, the CER degradation curve of the AVSR model trained with a higher dropout rate tends to converge to the unimodal ASR recognition curve. We further ensure whether the similarity of performance degradation curves directly corresponds to the recognition transcription similarity of ASR and AVSR in Figure 2. As we expected, an increase in training dropout rate leads to higher transcription similarity between AVSR and ASR across different test settings." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.472, + 0.901 + ], + "angle": 0, + "content": "To understand this, we investigate the discrepancy in decisive patterns of ASR and each AVSR. We aim to quantify the divergence between latent decision distributions of these models by measuring the distance of intermediate representation samples. Through random sampling of complete audio-visual data batches, we generate intermediate layer representations using the encoder of ASR or AVSR trained at different dropout rates. Figure 3 illustrates cosine distance-based similarity matrices for the intermediate representations between ASR and different AVSR configurations. The diagonal elements in each subplot represent the similarity between intermediate representations from the same inputs. Notably, with an increase training dropout rate, these diagonal lines brighten, signifying a rise in intermediate representation similarity. This suggests closer proximity of the AVSR multimodal distribution in the la" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.358, + 0.892, + 0.373 + ], + "angle": 0, + "content": "tent decisive subspace to the unimodal distribution of ASR." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.373, + 0.893, + 0.706 + ], + "angle": 0, + "content": "Through the aforementioned three experiments, we have discovered that increasing the training dropout rate on video data leads to increased similarity between AVSR and ASR in the performance degradation curves, recognition results, and intermediate representation subspace distribution. The findings reveal the significant impact of dropout in introducing effectively perturbs the distribution of multimodal training data. It leads to a shift from multimodal joint distribution to unimodal distribution, resulting in a decision bias towards audio during the decision-making process, as reflected in the output similarity of ASR. We refer to this phenomenon induced by dropout as dropout-induced modality bias. Although dropout-induced bias enhances the robustness of missing video data to some extent, we emphasize that it contradicts the primary design of AVSR as a robust application in noisy environments with supplementary visual cues. The introduction of artificial noise (padding frames) in video data induces the model to converge toward trivial solutions, leading to an excessive dependence on the audio modality. This over-reliance, in turn, leads to a degradation in performance when presented with complete multimodal input in a noisy environment." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.719, + 0.807, + 0.737 + ], + "angle": 0, + "content": "3. Modality Bias Hypothesis (MBH)" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.744, + 0.894, + 0.805 + ], + "angle": 0, + "content": "In this section, we propose the Modality Bias Hypothesis (MBH) based on the Modality Bias Venn diagram (MBVD) to systematically describe the relationship between modality bias and robustness to missing modality." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Modality Bias Venn Diagram As shown in Figure 4 on the left, the MBVD depicts the components of the latent decisive feature of multimodal systems in the form of a Venn Diagram. It is a variant of the Modality Venn Diagram (MVD) employed in multimodal knowledge distillation [28]. Without loss of generality, we take AVSR as an" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "27447" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.485 + ], + "angle": 0, + "content": "example and define \\(\\mathcal{X}^a\\), \\(\\mathcal{X}^v\\), and \\(\\mathcal{V}\\) as the original feature space of audio, video and label space, respectively. The decisive feature \\(z\\), commonly a form of intermediate layer representation, consists of two modality components \\(z^a\\) (blue circles) and \\(z^v\\) (green circle). We denote \\(I(\\cdot)\\) as mutual information and \\(I(\\cdot|\\cdot)\\) as conditional mutual information. The task-relevant decisive feature \\(z^u\\) (\\(I(z,y)\\)) is depicted by the shaded region and can be further divided into three components. \\(z^g\\) (\\(I(z^a,z^v,y)\\)) represents modality-general decisive features, while \\(z^{sa}\\) (\\(I(z^u,z^a|z^g)\\)) and \\(z^{sv}\\) (\\(I(z^u,z^v|z^g)\\)) represent modality-specific decisive features. We denote their proportions in \\(z^u\\) as \\(\\alpha\\), \\(\\beta\\), and \\(\\gamma\\), respectively. These features collectively contribute to determining the final task output \\(\\hat{y}\\). For AVSR, a higher \\(\\alpha\\) represents a greater decision bias of the model on the audio modality, focusing more on speech than lip movements. A larger \\(\\gamma\\) indicates a model's inclination towards modality synergy by maximizing the mutual information between modalities for decision-making, as in some modality-balanced models [26, 27]. Furthermore, \\(z^u\\) is generated by the original features \\(x^a,x^v\\) as \\(g(x^a,x^v;\\phi)\\), where \\(g(\\phi)\\) can be seen as a neural network-based transfer such as an encoder with parameters \\(\\phi\\). Therefore, the decision process of the multimodal system can be decomposed into two steps, following the Bayesian process: the MBVD hidden decisive feature generation step and the decision step:" + }, + { + "type": "equation", + "bbox": [ + 0.129, + 0.494, + 0.47, + 0.512 + ], + "angle": 0, + "content": "\\[\nP \\left(y \\mid x ^ {a}, x ^ {v}\\right) = P \\left(y \\mid z ^ {\\mu}\\right) P \\left(z ^ {\\mu} \\mid x ^ {a}, x ^ {v}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.522, + 0.47, + 0.795 + ], + "angle": 0, + "content": "Modality Bias Hypothesis Based on MBVD, we give a systematic description of the relationship between modality bias and robustness to missing modality in the view of MBH. As shown in Figure 4 on the right, by applying dropout with different rates \\( k_{i} \\in [0,1] \\) on video training data, the original video feature space \\( \\mathcal{X}^v \\) can be split into a series of subsets \\( \\{\\mathcal{X}_{k_1}^v, \\mathcal{X}_{k_2}^v, \\dots, \\mathcal{X}_{k_n}^v\\} \\). The samples from space \\( \\mathcal{X}^a \\times \\mathcal{X}_{k_i}^v \\) are denoted as dyads \\( (x^a, x_{k_i}^v) \\). Compared to the model trained on complete multimodal datas \\( (x^a, x_{0.0}^v) \\), the model trained on data pairs \\( (x^a, x_\\theta^v) \\) with a video dropout rate \\( \\theta_{train} \\in (0.0,1.0) \\) exhibits a greater decision bias on audio modality with larger \\( \\alpha \\), smaller \\( \\beta \\), and \\( \\gamma \\). As \\( \\theta \\) approaches 1.0, the task-relevant decisive feature \\( z_u \\) becomes steadily dominated by the audio-specific decisive feature \\( z_a \\), resulting in a transformation from a bimodal distribution in the latent representation subspace to a unimodal one. The decision pattern of the multimodal model shifts from \\( p(y|z_u) \\) to \\( p(y|z_a) \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.47, + 0.901 + ], + "angle": 0, + "content": "During the inference stage, these multimodal models display different modality biases. For the model trained on complete multimodal data or dropout on audio with a larger \\(\\gamma\\), they tend to search general information shared among modalities. This hypothesis effectively explains the observed experimental phenomena in previous studies. For modality-biased models, such as Multimodal Senti" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.273 + ], + "angle": 0, + "content": "ment Analysis (MSA) [22] dominated by text, Multimodal Speech Enhancement (MSE) [29] dominated by audio, as well as AVSR dominated by audio [21, 23, 30], it has been observed that applying dropout on the primary modality helps alleviate modality bias and brings about slight improvements when dealing with complete input. On the other hand, the AVSR model with larger \\(\\alpha\\) and smaller \\(\\gamma\\) values tends to focus more on speech and neglect complementary information from lip movements. When dealing with partially or completely missing video data, the model with larger \\(\\alpha\\) shows its robustness, which aligns well with the aforementioned experimental observations." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.284, + 0.895, + 0.32 + ], + "angle": 0, + "content": "4. Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD)" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.327, + 0.895, + 0.553 + ], + "angle": 0, + "content": "For the robustness training of modality-bias systems, it is crucial to avoid dropout-induced modality bias on the primary modality. Dropout indeed alleviates the OOD problem to some extent but encourages multimodal models to pursue trivial solutions at the same time. Ideal robust multimodal models are expected to achieve two goals: (1) learn to extract mutual information across modalities rather than relying on a certain modality when facing complete paired input, and (2) learn to complement information from the other modality and utilize context information from adjacent frames. To prevent excessive modality bias caused by dropout, we propose a novel Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) framework to constrain the distribution of the multimodal feature space during the robustness training phase." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.554, + 0.895, + 0.84 + ], + "angle": 0, + "content": "Unlike traditional knowledge distillation methods, firstly, the teacher model is trained on the complete multimodal data pairs, while the student model is trained on missing video data. The teacher model is relatively unbiased with a higher proportion of modality-general decisive features \\( z^g \\) in the MBVD space. During the training process of the student model, the teacher model serves as an anchor point, preventing the student model from shifting towards a unimodal distribution on the audio modality. Note that the difference between teacher and student models in our method is modality bias varies, rather than size, architecture as in common KD methods [31-34]. Additionally, distillation occurs at the hidden layer rather than the logistic outputs, aiming to minimize the distances between decision distribution samples of the teacher and student models and further constrain the intermediate representation subspace distribution of the student model. In practice, we take the knowledge from the intermediate representation of the cross-modal encoder layers." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Here, we adopt the symbol definitions from Section 3 and provide a formal description of MDA-KD. For a naturally modal-biased multimodal system, the data samples from original feature space \\(\\mathcal{X}^a\\times \\mathcal{X}_{k_i}^v\\times \\mathcal{Y}\\) can be de" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "27448" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.094, + 0.168, + 0.11 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.127, + 0.111, + 0.167, + 0.121 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.142, + 0.123, + 0.155, + 0.133 + ], + "angle": 0, + "content": "#" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.098, + 0.254, + 0.107 + ], + "angle": 0, + "content": "Module with adapter" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.111, + 0.264, + 0.119 + ], + "angle": 0, + "content": "Cross-attention module" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.123, + 0.27, + 0.131 + ], + "angle": 0, + "content": "Element-wise summation" + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.134, + 0.164, + 0.143 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.135, + 0.307, + 0.144 + ], + "angle": 0, + "content": "Data flow when activating adapters" + }, + { + "type": "image_caption", + "bbox": [ + 0.129, + 0.159, + 0.229, + 0.18 + ], + "angle": 0, + "content": "Teacher AVSR Model (Complete data input)" + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.169, + 0.361, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.442, + 0.096, + 0.724, + 0.107 + ], + "angle": 0, + "content": "Audiovisual Speech Recognition Network Architecture" + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.108, + 0.846, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.439, + 0.28, + 0.524, + 0.289 + ], + "angle": 0, + "content": "Lip Video Frames" + }, + { + "type": "image", + "bbox": [ + 0.385, + 0.291, + 0.588, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.626, + 0.28, + 0.734, + 0.289 + ], + "angle": 0, + "content": "Noisy Audio Waveform" + }, + { + "type": "image", + "bbox": [ + 0.597, + 0.291, + 0.843, + 0.331 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.346, + 0.892, + 0.415 + ], + "angle": 0, + "content": "Figure 5. Overall framework of the proposed AVSR system. We address challenging real-world scenarios involving missing video frames and noisy speech with an overlap rate exceeding \\(40\\%\\) during both the training and testing stages. In MDA-KD, latent knowledge is sampled from the latent distribution of the teacher model with complete data input. This latent knowledge serves as an anchor point to prevent dropout-induced modality bias during the robustness training of the student network. For entirely missing video input, the MS-Adapter is activated to enable a dynamic decision switch." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.423, + 0.47, + 0.544 + ], + "angle": 0, + "content": "noted as triples \\((x^{a}, x_{k_{i}}^{v}, y)\\). For simplicity, we denote \\(x_{0.0}^{v}\\) as \\(x^{v}\\). The teacher model \\(T e(\\phi)\\) is first trained on complete multimodal data \\((x^{a}, x^{v}, y)\\) model with parameters \\(\\phi\\), and the model's decision process can be formulated as \\(P_{t e}(y \\mid x^{a}, x^{v})\\) in a Bayesian decision problem. We assume that the teacher model is a neural network \\(g(\\phi)\\) and it is trained by minimizing the following loss function, a form of multitask learning." + }, + { + "type": "equation", + "bbox": [ + 0.144, + 0.559, + 0.469, + 0.581 + ], + "angle": 0, + "content": "\\[\nT e (\\phi) = \\min _ {\\phi} \\mathcal {L} _ {\\mathrm {M L T}} (g (x ^ {a}, x ^ {v}; \\phi), y), \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.097, + 0.584, + 0.469, + 0.62 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {M L T}} \\left(x ^ {a}, x ^ {v}; \\phi\\right) = \\lambda \\log P _ {\\mathrm {C T C}} \\left(y \\mid x ^ {a}, x ^ {v}\\right) \\tag {3} \\\\ + (1 - \\lambda) \\log P _ {\\mathrm {A t t}} \\left(y _ {i} \\mid x ^ {a}, x ^ {v}\\right), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.624, + 0.469, + 0.805 + ], + "angle": 0, + "content": "where the tunable parameter \\(\\lambda \\in [0,1]\\) is used to balance the sequence-level Connectionist Temporal Classification (CTC) loss and the frame-wise Cross Entropy (CE) loss, which serve as the standard end-to-end ASR training objectives. During the training of the student model, the dropout strategy is applied to the secondary modality \\(v\\), while the teacher model is frozen with complete multimodal data as input. It is important to note that the student and teacher models have the same network architecture. From the perspective of MBVD, the whole decision process of the multimodal model can be divided into hidden feature generation step and decision step." + }, + { + "type": "equation", + "bbox": [ + 0.094, + 0.827, + 0.469, + 0.845 + ], + "angle": 0, + "content": "\\[\nP _ {s t} \\left(y \\mid x ^ {a}, x _ {k _ {i}} ^ {v}\\right) = P _ {s t} \\left(y \\mid z ^ {\\mu}\\right) P _ {s t} \\left(z ^ {\\mu} \\mid x ^ {a}, x _ {k _ {i}} ^ {v}\\right), \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.102, + 0.851, + 0.469, + 0.867 + ], + "angle": 0, + "content": "\\[\nP _ {t e} \\left(y \\mid x ^ {a}, x ^ {v}\\right) = P _ {t e} \\left(y \\mid z ^ {\\mu}\\right) P _ {t e} \\left(z ^ {\\mu} \\mid x ^ {a}, x ^ {v}\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "where \\(z^{\\mu} \\in \\mathbb{R}^{d\\mu}\\) represents the combined representation of modality-specific decisive features \\(z^{sa} \\in \\mathbb{R}^{da}\\), \\(z^{sv} \\in\\)" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.423, + 0.891, + 0.468 + ], + "angle": 0, + "content": "\\(\\mathbb{R}^{dv}\\), and modality-general decisive features \\(z^g \\in \\mathbb{R}^{dg}\\). The tuple \\((z^{sa}, z^{sv}, z^g)\\) represents a sample drawn from the MBVD hidden features space, denoted as \\(\\mathcal{Z}^{sa} \\times \\mathcal{Z}^{sv} \\times \\mathcal{Z}^g\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.469, + 0.891, + 0.574 + ], + "angle": 0, + "content": "initialized on the parameter of the teacher model, we introduce an additional loss term to constrain the dynamic process of the student model's MBVD feature distribution in robust training. The distance between batch samples from the student and the teacher model is used to approximate the difference of distribution, which serves as a form of frame-level knowledge distillation." + }, + { + "type": "equation", + "bbox": [ + 0.552, + 0.581, + 0.891, + 0.638 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {K D}} \\left(x ^ {a}, x ^ {v}, x _ {k} ^ {v}; \\phi_ {t e}, \\phi_ {s t}\\right) = \\mathrm {K L} \\left(S _ {t e}, S _ {s t}\\right), \\\\ S _ {t e} = \\sigma_ {T} \\left(\\operatorname {S a m p l e} \\left(P _ {t e} \\left(z ^ {\\mu} \\mid x ^ {a}, x ^ {v}\\right)\\right)\\right), \\tag {6} \\\\ S _ {s t} = \\sigma_ {T} \\left(\\operatorname {S a m p l e} \\left(P _ {s t} \\left(z ^ {\\mu} \\mid x ^ {a}, x _ {k _ {i}} ^ {v}\\right)\\right)\\right), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.901 + ], + "angle": 0, + "content": "where \\(\\sigma_T(x)\\) denotes the SoftMax function with temperature \\(T\\) and Sample represents the sample function. This distribution approximation serves two main purposes. Firstly, during training, when the student network encounters a missing modality feature \\(x_{k_i}^v\\), the convergence of the student's decisive feature \\(z^u = g(x^a,x_{k_i}^v;\\phi_{st})\\) towards the teacher's decisive feature \\(z^{u} = g(x^{a},x^{v};\\phi_{te})\\) encourages the utilization of contextual information from \\(x_{k_i}^v\\). Additionally, with the dual cross-attention design, the process complements the information extracted from \\(x^a\\), effectively addressing the condition of missing frames and promoting out-of-distribution generality. On the other hand, the KD loss is used to minimize the distance between the distributions of the teacher and student models, preventing the student model from converging to trivial solutions. Subsequently, we train the student model jointly with a weighted sum of the standard training loss and distillation loss:" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27449" + } + ], + [ + { + "type": "equation", + "bbox": [ + 0.095, + 0.121, + 0.468, + 0.155 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {M L T}} \\left(x ^ {a}, x ^ {v}, x _ {k} ^ {v}; \\phi_ {t e}, \\phi_ {s t}\\right) = \\beta \\mathcal {L} _ {\\mathrm {K D}} \\left(x ^ {a}, x ^ {v}, x _ {k} ^ {v}; \\phi_ {t e}, \\phi_ {s t}\\right) \\\\ + (1 - \\beta) \\mathcal {L} _ {\\mathrm {M L T}} \\left(x ^ {a}, x _ {k} ^ {v}; \\phi_ {s t}\\right). \\tag {7} \\\\ \\end{array}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.167, + 0.446, + 0.184 + ], + "angle": 0, + "content": "5. Modality-Specific Adapter (MS-Adapter)" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.192, + 0.473, + 0.495 + ], + "angle": 0, + "content": "As illustrated in Figure 4 on the right, when facing severely or entirely missing video data, we consider it unreliable to continue employing a synergistic decision-making strategy like MDA-KD with relatively high values of \\(\\gamma\\) and \\(\\beta\\). Padding frames lack sufficient contextual information and may introduce noise. Therefore, in such scenarios, a dynamic switch in decision strategy from \\(P(y|z^u)\\) to \\(P(y|z^a)\\) is necessary as a complement to MDA-KD. In view of the success of adapters applied in foundation model fine-tuning [35-38], we attempt to extend it to address the modality missing issue in multimodal models. For clarity, we refer to this extension as Modality-Specific Adapter (MS-Adapter). Specifically, LORA [39] is adopted to self-attention layers in the audio branch, marked with a dashed box in Figure 5. These adapters perform residual-style feature blending with the original pre-trained features. The residual weight could be represented as low-rank matrices \\(\\Delta W \\in \\mathbb{R}^{d \\times d}\\), and it could be decomposed into a pair of fan-in and fan-out linear layers with weights \\(A \\in \\mathbb{R}^{r \\times d}\\) and \\(B \\in \\mathbb{R}^{d \\times r}\\) (\\(r \\ll d\\)). The reparametrization operation can be formulated below." + }, + { + "type": "equation", + "bbox": [ + 0.137, + 0.505, + 0.469, + 0.521 + ], + "angle": 0, + "content": "\\[\nH _ {o} = H _ {i} \\left(W _ {0} + \\Delta W\\right) = H _ {i} \\left(W _ {0} + B A\\right) \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.531, + 0.47, + 0.713 + ], + "angle": 0, + "content": "By activating the MS-Adapter, we can dynamically switch the decision-making pattern by activating the adapters. We highlight two advantages of the MS-Adapter. First, a substantial amount of unpaired unimodal training data and data augmentation techniques could be used in the training process of the adapters. Second, the adapter training process provides an opportunity to modify the computation pathway. As illustrated in Figure 5 with dashed arrows, in both training and inference stage with audio-only input, the computation flow of the video branch will be directly cut off, and the modality fusion cross-attention module will be skipped to reduce computational costs." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.725, + 0.273, + 0.743 + ], + "angle": 0, + "content": "6. Experiment Settings" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Dataset We conduct our experiments on MISP2021 [24] and MISP2022 [40]. These two open-source datasets present a large-scale audio-visual corpus recorded in real-life home TV scenarios with multiple groups of speakers chatting simultaneously. Multiple microphone arrays and cameras are used to collect far/middle/near-field audio and far/middle-field video. Compared to the carefully recorded videos in LRS2 [1] and LRS3 [2] from BBC interviews and TED talks, MISP datasets offer static shooting perspectives with diverse resolutions, including naturally blurred" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.228 + ], + "angle": 0, + "content": "and obstructed frames. The videos are accompanied by various background noises and high speech overlap rates (42% in training set and 49% in test set). Compared oracle segment-level AVSR task in MISP201, MISP2022 presents a more challenging task of session-level AVSR without oracle speaker diarization results. To avoid limitations associated with noise simulation, all experiments are evaluated exclusively on far-field data, which aligns well with common in-car, office meeting, or smart home scenarios." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.234, + 0.893, + 0.43 + ], + "angle": 0, + "content": "Implementation Detail We strictly adhere to the approaches outlined in [18] for model training and network architectures. We initialize the AVSR model with two pretrained unimodal models and fine-tune it in an end-to-end manner. As shown in Figure 5, the AVSR model is a dual-branch network where \\( N = 3 \\), \\( M = 9 \\) and \\( K = 6 \\). For the loss function in Equation 3, we set \\( \\lambda \\) to 0.7 and CTC loss consists of the same weighted intermediate CTC [41] losses in 3, 6, 9, 12 layers. In Equation 4, we use 0.1 for \\( \\beta \\). We follow [18] to establish two baselines A0 and AV0 trained on complete modality data with dropout techniques. AV0 is fine-tuned based on A0 and a pre-trained ResNet-18 encoder with a 3D-CNN head. 3" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.438, + 0.893, + 0.725 + ], + "angle": 0, + "content": "Dropout Settings Similar to [19], we evaluate the robustness to missing video modality with various dropout methods and rates: Segment Dropout, Utterance Dropout, and Interval Dropout. Testing involves dropout rates from 0.0 to 1.0 in 0.25 intervals. Results from the three dropout methods are averaged at each rate to obtain overall dropout results. When conducting ablation studies, segments with naturally missing video frames (17%) are excluded from the test set, ensuring a consistent and controlled video missing rate. In our method, during training, A certain proportion of sample is assigned a random dropout method from the above three methods and an extra one from [21] with an optimized dropout rate. In both training and testing stages, we pad the missing video frame pixels with zeros instead of using interpolation or repetition methods. We conduct a hyper-parameter search over the training dropout rate and found that 0.5 is optimal for our method (when \\( D_{p}rod \\) is 0.5). This rate implies that half of the video frames in a selected sample are padded with zeros. 3" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.739, + 0.803, + 0.756 + ], + "angle": 0, + "content": "7. Experiments and Result Analysis" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.763, + 0.874, + 0.78 + ], + "angle": 0, + "content": "7.1. Overall Comparison of Experiment Settings" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.787, + 0.892, + 0.879 + ], + "angle": 0, + "content": "In Table 1, we conduct key parameter analysis and abolation study of the proposed methods on the MISP2022 dataset with oracle speaker diarization results. We first explore the impact of dropout probability in training videos. In contrast to AV1, AV2 introduces half of the complete data pairs. As a result, it mitigates dropout-induced modality" + }, + { + "type": "page_footnote", + "bbox": [ + 0.516, + 0.887, + 0.736, + 0.901 + ], + "angle": 0, + "content": "3 More details can be found in Appendix." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27450" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.107, + 0.09, + 0.865, + 0.251 + ], + "angle": 0, + "content": "
ModelTraining settingsTest dropout rate
DropoutDprobInit.MDA-KDMS-Adapter0.000.250.500.751.00
A0X0.0RandomXX25.1325.1325.1325.1325.13
AV0X0.0A0XX21.1423.7725.5725.8726.65
AV11.0A0XX23.2623.6824.2724.9525.91
AV20.5A0XX21.7222.5623.3724.4625.64
AV30.5AV0XX21.5322.4723.6524.5525.90
AV40.5AV0X21.3822.1823.2024.4025.70
AV50.5AV0X21.1121.7722.7824.0225.45
AV60.5AV021.1121.7722.7824.0224.94
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.256, + 0.892, + 0.284 + ], + "angle": 0, + "content": "Table 1. An overall comparison in CER (%) of different system configurations. Different from the dropout rate, \\( D_{prob} \\) represents the proportion of data with missing frames in the training set. Init. refers to the network initialization method." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.29, + 0.48, + 0.422 + ], + "angle": 0, + "content": "
Insert partRankDAParams(MB)CER(%)
Encoder32×4.5025.35
Encoder324.5025.08
En&Decoder329.0025.20
Encoder649.0025.08
En&Decoder6418.0025.05
Encoder12818.0025.01
En&Decoder12836.0024.94
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.426, + 0.471, + 0.455 + ], + "angle": 0, + "content": "Table 2. Performance analysis of MS-Adapter. DA means data augmentation, including speed perturbation and utterance concat." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.462, + 0.47, + 0.688 + ], + "angle": 0, + "content": "bias to some extent, since a higher proportion of complete data tend to encourages the model to learn general information across modalities. This finding aligns with previous research [19], highlighting the superiority of utterance dropout over random frame dropout (the former means a larger \\( D_{prob} \\)). Next, AV3 is trained based on AV0, which means the subsequent optimized processing starts from a relatively stable convergence state with complete input. In the robust training stage, the balanced state tends to be disrupted when trained on incomplete modality pairs, searching for a new optimization coverage range. However, when trained on complete data pairs, the scenario is reversed. Thus, while AV3 outperforms AV2 with low test missing rates, it lags behind when facing severe video absence, illustrating a tug-of-war dynamic without clear guidance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Next, we validate the effectiveness of MDA-KD. Compared with AV3, AV5 demonstrates superior performance for both complete and missing video modality inputs. AV4 successfully achieves our goal of enhancing robustness without any performance degradation on complete input (21.11% vs. 21.14%). This implies that the teacher model AV0 provides an explicitly optimized target in robustness training. It effectively constrains the distribution shift to the audio modality, preventing excessive modality bias caused by dropout. Furthermore, in AV4, we restrict the flow of audio data into the video branch within the dual cross-attention module. Consequently, a performance drop is observed across all test suites, highlighting the effectiveness of MDA-KD in leveraging the dual cross-attention mod" + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.29, + 0.89, + 0.379 + ], + "angle": 0, + "content": "
MethodTest dropout rate
0.000.250.500.751.00
Cascade Utt [19]22.5423.8925.2326.0528.15
AV Dropout Utt [21]22.0023.3725.3526.2126.78
Dropout Utt [20]22.0823.2124.5625.0825.46
Ours21.1121.7722.7824.0224.94
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.383, + 0.878, + 0.398 + ], + "angle": 0, + "content": "Table 3. A \\( \\mathrm{{CER}}\\left( \\% \\right) \\) comparison with other dropout methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.405, + 0.892, + 0.527 + ], + "angle": 0, + "content": "ule to extract modality-general information from audio for complementing missing information. Subsequently, we integrate MS-Adapters into the audio branch in AV6 based on AV5. Consequently, the performance with audio-only input improves to a \\(24.94\\%\\) CER, surpassing A0 for the first time \\((24.94\\%\\) vs. \\(25.13\\%)\\). These results show the effectiveness of MS-Adapters by dynamically switching to the decision patterns on audio modality with audio-only input." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.538, + 0.738, + 0.556 + ], + "angle": 0, + "content": "7.2. Validation of MS-Adapter" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.563, + 0.892, + 0.789 + ], + "angle": 0, + "content": "We further explore three key factors in MS-Adapter adaptation: data augmentation, insert part and rank dimension. In Table 2, we observe a decrease in CER from \\(25.45\\%\\) (AV4) to \\(25.35\\%\\), and it further improves to \\(25.08\\%\\) with data augmentation doubling audio training data. These results suggest that the adapter adaptation effectively enhances the robustness of AVSR with completely missing video, requiring only an additional 4.50MB in parameters. It provides an opportunity to apply data augmentation that is effective for unimodal model training and to use extra unpaired data. Next, increasing the ranks and the quantity of adapters results in further performance gains at the expense of a larger parameter. The best performance, achieving \\(24.94\\%\\), is shown in the bottom row and attained with the adapter inserted in both encoder and decoder blocks." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.801, + 0.887, + 0.818 + ], + "angle": 0, + "content": "7.3. Comparisons with Other Dropout Techniques" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.892, + 0.903 + ], + "angle": 0, + "content": "As shown in Table 3, we compare our proposed framework with three widely used dropout techniques [19-21]. Cascade Utt employs a separable cascade structure, where an AV model is superimposed on an audio-only model. Inputs are then routed through either the audio-only path or" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.958 + ], + "angle": 0, + "content": "27451" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.086, + 0.089, + 0.887, + 0.252 + ], + "angle": 0, + "content": "
BenchmarkSystemTraining DataBackboneObj. FunctionCER / cpCER(%)
AV
MISP2021SJTU [42]300 hoursLRW-1000ConformerED + SE34.02
NIO [43]3300 hoursLRW-1000 [4]TransformerED25.07
USTC [18]500 hoursw/o extra dataConformerED24.58
Ours1000 hoursw/o extra dataConformerED + InterCTC21.53
MISP2022NIO [44]3300 hoursLRW-1000ConformerED29.58
XMU [45]2100 hoursLRW-1000ConformerED + InterCTC31.88
NPU [46]1300 hoursw/o extra dataE-BranchformerED + InterCTC29.13
Ours1000 hoursw/o extra dataConformerED + InterCTC28.06
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.255, + 0.895, + 0.3 + ], + "angle": 0, + "content": "Table 4. A Comparison of the state-of-the-art systems. InterCTC refers to Intermediate CTC loss [41], the ED loss is formulated in Equation (3) and SE represents the mean square error loss. We use evaluate the performance using the concatenated minimum-permutation character error rate (cpCER) [47] metric for the session-level AVSR task." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.306, + 0.473, + 0.457 + ], + "angle": 0, + "content": "the AV path with a probability of \\( p_1 \\). AV Dropout Utt randomly drops either the entire video or the entire audio segments with a probability of \\( p_2 \\). Dropout Utt exclusively drops the video segments with a probability of \\( p_3 \\). We adopt the optimal dropout settings from [19], where \\( p_1 = 0.25 \\), \\( p_2 = 0.25 \\), and \\( p_3 = 0.5 \\). For Cascade Utt, we follow [19] to build the network and maintain comparable parameters numbers. As a result, our proposed methods outperforms the other three techniques in all test suites and does not cause performance degradation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.465, + 0.445, + 0.482 + ], + "angle": 0, + "content": "7.4. Comparisons with State-of-the-art Systems" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.488, + 0.473, + 0.73 + ], + "angle": 0, + "content": "Finally, we compare our system with the state-of-the-art systems on the MISP2021 and MISP2022 challenges[18, 42-45, 48] as shown in Table 4. With Recognizer Output Voting Error Reduction (ROVER) [49], we rescore the output transcripts of A0, AV0, and A6 mentioned in Table 1. In the MISP2021 utterance-level AVSR challenge with oracle speaker diarization results, our system outperforms the previous SOTA system by achieving an absolute CER reduction of \\(3.05\\%\\) from \\(24.58\\%\\) to \\(21.53\\%\\). Our top-performing system, AV6, attains a CER of \\(22.13\\%\\). Moving to the MISP2022 session-level AVSR challenge, we build our diarization system closely adhering to [50]. We secure a ROVER cpCER score of \\(28.06\\%\\) and obtain the best system score with a cpCER of \\(28.55\\%\\). When oracle segmentations are utilized, our system achieves a ROVER CER score of \\(21.80\\%\\) and the best model score of \\(21.53\\%\\) in CER." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.74, + 0.228, + 0.755 + ], + "angle": 0, + "content": "8. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Modality Missing in Multimodal Learning The prevalent issue of missing modalities in multimodal applications has prompted research that specifically targets severe modality absences. Generative models [51, 52] and meta-learning predict missing modalities using available or few-shot paired samples. Balanced models utilize joint multimodal representations [53-55]. Models addressing modality bias employ data augmentation methods like modality dropout [19, 22] to tackle out-of-distribution challenges." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.306, + 0.892, + 0.351 + ], + "angle": 0, + "content": "For AVSR, we prioritize efficiency and opt for dropout due to its plug-and-play nature and lightweight implementation. More discussion could be found in Appendix." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.354, + 0.893, + 0.686 + ], + "angle": 0, + "content": "Video Modality Robustness in AVSR To enhance performance on low-resolution videos, visual extractors are commonly pre-trained on relatively high-quality videos with isolated words [5] or acoustic pseudo-labeling classification tasks [18]. Addressing situations involving corruption, Hong et al. [17] have designed an explicit scoring module to identify reliable streams and effectively manage input scenarios. Regarding the issue of missing video frames, most researchers have applied dropout techniques to enhance missing robustness [19-23]. In classical dropout methods, frame level dropout is utilized in [23] and utterance-level dropout is applied in AV-Hubert [21]. As a recent work focusing on this issue, Chang et al. [19] unify test suites of missing videos. However, the proposed binary evaluation metric overly emphasizes relative robustness trends, neglecting absolute performance. Compared to the methods mentioned earlier, we explore the problem of missing video frames from the perspective of modality bias. Leveraging classical techniques and simple designs, our approach achieves both performance and robustness without introducing additional inference time. It adapts to various scenarios of frame absence through a unified model." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.696, + 0.62, + 0.71 + ], + "angle": 0, + "content": "9. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.887 + ], + "angle": 0, + "content": "In this work, we discover and analyze the essence of dropout-induced modality bias. Based on these findings, we proposed MBH to provide a systematic description of the relationship between modality bias and missing robustness in multimodal systems. Consequently, we propose a new multimodal distribution approximation with knowledge distillation approach to deal with missing video frames for AVSR. Furthermore, we apply adapters to handle videos with both severe and complete missing rates. For future work, we intend to validate our findings in this study across a wide range of multimodal applications beyond AVSR." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "27452" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Joon Son Chung, Andrew Senior, Oriol Vinyals, and Andrew Zisserman. Lip reading sentences in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6447-6456, 2017. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.47, + 0.214 + ], + "angle": 0, + "content": "[2] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. LRS3-TED: a large-scale dataset for visual speech recognition. arXiv preprint arXiv:1809.00496, 2018. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.217, + 0.471, + 0.314 + ], + "angle": 0, + "content": "[3] Hang Chen, Jun Du, Yusheng Dai, Chin Hui Lee, Sabato Marco Siniscalchi, Shinji Watanabe, Odette Scharenborg, Jingdong Chen, Bao Cai Yin, and Jia Pan. Audio-visual speech recognition in misp2021 challenge: Dataset release and deep analysis. In Proceedings of the Annual Conference of the International Speech Communication Association, IN-TERSPEECH, volume 2022, pages 1766–1770, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.316, + 0.471, + 0.399 + ], + "angle": 0, + "content": "[4] Shuang Yang, Yuanhang Zhang, Dalu Feng, Mingmin Yang, Chenhao Wang, Jingyun Xiao, Keyu Long, Shiguang Shan, and Xilin Chen. Lrw-1000: A naturally-distributed large-scale benchmark for lip reading in the wild. In 2019 14th IEEE international conference on automatic face & gesture recognition (FG 2019), pages 1-8. IEEE, 2019. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.402, + 0.471, + 0.469 + ], + "angle": 0, + "content": "[5] Pingchuan Ma, Stavros Petridis, and Maja Pantic. End-to-end audio-visual speech recognition with conformers. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 7613-7617. IEEE, 2021. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.472, + 0.47, + 0.527 + ], + "angle": 0, + "content": "[6] Xichen Pan, Peiyu Chen, Yichen Gong, Helong Zhou, Xinbing Wang, and Zhouhan Lin. Leveraging unimodal self-supervised learning for multimodal audio-visual speech recognition, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.53, + 0.471, + 0.613 + ], + "angle": 0, + "content": "[7] Chen Chen, Yuchen Hu, Qiang Zhang, Heqing Zou, Beier Zhu, and Eng Siong Chng. Leveraging modality-specific representations for audio-visual speech recognition via reinforcement learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pages 12607–12615, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.615, + 0.472, + 0.671 + ], + "angle": 0, + "content": "[8] Bo Xu, Cheng Lu, Yandong Guo, and Jacob Wang. Discriminative multi-modality speech recognition. In Proceedings of the IEEE/CVF conference on Computer Vision and Pattern Recognition, pages 14433-14442, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.673, + 0.47, + 0.756 + ], + "angle": 0, + "content": "[9] Jianwei Yu, Shi-Xiong Zhang, Jian Wu, Shahram Ghorbani, Bo Wu, Shiyin Kang, Shansong Liu, Xunying Liu, Helen Meng, and Dong Yu. Audio-visual recognition of overlapped speech for the lrs2 dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6984-6988. IEEE, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.758, + 0.47, + 0.813 + ], + "angle": 0, + "content": "[10] Joanna Hong, Minsu Kim, Daehun Yoo, and Yong Man Ro. Visual context-driven audio feature enhancement for robust end-to-end audio-visual speech recognition. arXiv preprint arXiv:2207.06020, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.47, + 0.87 + ], + "angle": 0, + "content": "[11] Alexandros Haliassos, Pingchuan Ma, Rodrigo Mira, Stavros Petridis, and Maja Pantic. Jointly learning visual and auditory speech representations from raw data. arXiv preprint arXiv:2212.06246, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[12] Pingchuan Ma, Alexandros Haliassos, Adriana Fernandez-Lopez, Honglie Chen, Stavros Petridis, and Maja Pantic." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.148 + ], + "angle": 0, + "content": "Auto-AVSR: Audio-visual speech recognition with automatic labels. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5. IEEE, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.151, + 0.892, + 0.206 + ], + "angle": 0, + "content": "[13] George Sterpu, Christian Saam, and Naomi Harte. Attention-based audio-visual fusion for robust automatic speech recognition. In Proceedings of the 20th ACM International conference on Multimodal Interaction, pages 111–115, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.207, + 0.892, + 0.262 + ], + "angle": 0, + "content": "[14] George Sterpu, Christian Saam, and Naomi Harte. How to teach DNNs to pay attention to the visual modality in speech recognition. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 28:1052-1064, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.264, + 0.892, + 0.319 + ], + "angle": 0, + "content": "[15] Yuchen Hu, Ruizhe Li, Chen Chen, Heqing Zou, Qiushi Zhu, and Eng Siong Chng. Cross-Modal Global Interaction and Local Alignment for Audio-Visual Speech Recognition. arXiv preprint arXiv:2305.09212, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.321, + 0.892, + 0.376 + ], + "angle": 0, + "content": "[16] Triantafyllos Afouras, Joon Son Chung, Andrew Senior, Oriol Vinyals, and Andrew Zisserman. Deep audio-visual speech recognition. IEEE transactions on pattern analysis and machine intelligence, 44(12):8717-8727, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.378, + 0.892, + 0.459 + ], + "angle": 0, + "content": "[17] Joanna Hong, Minsu Kim, Jeongsoo Choi, and Yong Man Ro. Watch or Listen: Robust Audio-Visual Speech Recognition with Visual Corruption Modeling and Reliability Scoring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18783–18794, 2023. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.462, + 0.892, + 0.545 + ], + "angle": 0, + "content": "[18] Yusheng Dai, Hang Chen, Jun Du, Xiaofei Ding, Ning Ding, Feijun Jiang, and Chin-Hui Lee. Improving Audio-Visual Speech Recognition by Lip-Subword Correlation Based Visual Pre-training and Cross-Modal Fusion Encoder. In 2023 IEEE International Conference on Multimedia and Expo (ICME), pages 2627–2632. IEEE, 2023. 1, 6, 8, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.547, + 0.892, + 0.615 + ], + "angle": 0, + "content": "[19] Oscar Chang, Otavio de Pinho Forin Braga, Hank Liao, Dmitriy Dima Serdyuk, and Olivier Siohan. On robustness to missing video for audiovisual speech recognition. Transactions on Machine Learning Research (TMLR), 2022. 1, 6, 7, 8, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.617, + 0.892, + 0.7 + ], + "angle": 0, + "content": "[20] Takaki Makino, Hank Liao, Yannis Assael, Brendan Shillingford, Basilio Garcia, Otavio Braga, and Olivier Siohan. Recurrent neural network transducer for audio-visual speech recognition. In 2019 IEEE automatic speech recognition and understanding workshop (ASRU), pages 905–912. IEEE, 2019. 1, 7, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.702, + 0.892, + 0.744 + ], + "angle": 0, + "content": "[21] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. Learning audio-visual speech representation by masked multimodal cluster prediction, 2022. 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.746, + 0.892, + 0.787 + ], + "angle": 0, + "content": "[22] Devamanyu Hazarika, Yingting Li, Bo Cheng, Shuai Zhao, Roger Zimmermann, and Soujanya Poria. Analyzing modality robustness in multimodal sentiment analysis, 2022. 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.789, + 0.892, + 0.87 + ], + "angle": 0, + "content": "[23] Shiliang Zhang, Ming Lei, Bin Ma, and Lei Xie. Robust audio-visual speech recognition using bimodal DFSMN with multi-condition training and dropout regularization. In ICASSP 2019-2019 IEEE international conference on acoustics, speech and signal processing (ICASSP), pages 6570-6574. IEEE, 2019. 1, 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[24] Hang Chen, Hengshun Zhou, Jun Du, Chin-Hui Lee, Jingdong Chen, Shinji Watanabe, Sabato Marco Siniscalchi," + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27453" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.47, + 0.176 + ], + "angle": 0, + "content": "Odette Scharenborg, Di-Yuan Liu, Bao-Cai Yin, Jia Pan, Jian-Qing Gao, and Cong Liu. The First Multimodal Information Based Speech Processing (Misp) Challenge: Data, Tasks, Baselines And Results. In ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 9266-9270, 2022. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.47, + 0.247 + ], + "angle": 0, + "content": "[25] Jun Du, Chin-Hui Lee, Jingdong Chen, Shinji Watanabe, Sabato Marco Siniscalchi, and Odette Scharenborg. Multimodal Information Based Speech Processing (MISP) Challenge 2022. https://mispchallenge.github.io/mispchallenge2022/, 2022. Accessed: 2023-06-26.1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.249, + 0.471, + 0.345 + ], + "angle": 0, + "content": "[26] Jinming Zhao, Ruichen Li, and Qin Jin. Missing modality imagination network for emotion recognition with uncertain missing modalities. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 2608-2618, 2021. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.347, + 0.471, + 0.416 + ], + "angle": 0, + "content": "[27] Mengmeng Ma, Jian Ren, Long Zhao, Davide Testuggine, and Xi Peng. Are multimodal transformers robust to missing modality? In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18177-18186, 2022. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.418, + 0.471, + 0.46 + ], + "angle": 0, + "content": "[28] Zihui Xue, Zhengqi Gao, Sucheng Ren, and Hang Zhao. The modality focusing hypothesis: Towards understanding cross-modal knowledge distillation, 2022. 3, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.462, + 0.47, + 0.516 + ], + "angle": 0, + "content": "[29] Hang Chen, Jun Du, Yu Hu, Li-Rong Dai, Bao-Cai Yin, and Chin-Hui Lee. Correlating subword articulation with lip shapes for embedding aware audio-visual speech enhancement. Neural Networks, 143:171–182, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.519, + 0.47, + 0.587 + ], + "angle": 0, + "content": "[30] Pan Zhou, Wenwen Yang, Wei Chen, Yanfeng Wang, and Jia Jia. Modality attention for end-to-end audio-visual speech recognition. In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6565-6569. IEEE, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.59, + 0.47, + 0.659 + ], + "angle": 0, + "content": "[31] Xianing Chen, Qiong Cao, Yujie Zhong, Jing Zhang, Shenghua Gao, and Dacheng Tao. Dearkd: data-efficient early knowledge distillation for vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12052-12062, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.661, + 0.47, + 0.744 + ], + "angle": 0, + "content": "[32] Francisco Rivera Valverde, Juana Valeria Hurtado, and Abhinav Valada. There is more than meets the eye: Self-supervised multi-object detection and tracking with sound by distilling multimodal knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11612-11621, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.47, + 0.801 + ], + "angle": 0, + "content": "[33] Zihui Xue, Sucheng Ren, Zhengqi Gao, and Hang Zhao. Multimodal knowledge expansion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 854-863, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.47, + 0.871 + ], + "angle": 0, + "content": "[34] Baoyun Peng, Xiao Jin, Jiaheng Liu, Dongsheng Li, Yichao Wu, Yu Liu, Shunfeng Zhou, and Zhaoning Zhang. Correlation congruence for knowledge distillation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5007-5016, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.874, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[35] Renrui Zhang, Rongyao Fang, Wei Zhang, Peng Gao, Kunchang Li, Jifeng Dai, Yu Qiao, and Hongsheng Li." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.133 + ], + "angle": 0, + "content": "Tip-adapter: Training-free clip-adapter for better vision-language modeling. arXiv preprint arXiv:2111.03930, 2021.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.135, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[36] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.205, + 0.892, + 0.247 + ], + "angle": 0, + "content": "[37] Sylvestre-Alvise Rebuffi, Hakan Bilen, and Andrea Vedaldi. Learning multiple visual domains with residual adapters. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.248, + 0.892, + 0.314 + ], + "angle": 0, + "content": "[38] Peng Gao, Shijie Geng, Renrui Zhang, Teli Ma, Rongyao Fang, Yongfeng Zhang, Hongsheng Li, and Yu Qiao. Clip-adapter: Better vision-language models with feature adapters. International Journal of Computer Vision, pages 1–15, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.316, + 0.892, + 0.37 + ], + "angle": 0, + "content": "[39] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.372, + 0.892, + 0.469 + ], + "angle": 0, + "content": "[40] Zhe Wang, Shilong Wu, Hang Chen, Mao-Kui He, Jun Du, Chin-Hui Lee, Jingdong Chen, Shinji Watanabe, Sabato Siniscalchi, Odette Scharenborg, et al. The multimodal information based speech processing (misp) 2022 challenge: Audio-visual diarization and recognition. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5. IEEE, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.47, + 0.892, + 0.537 + ], + "angle": 0, + "content": "[41] Jaesong Lee and Shinji Watanabe. Intermediate loss regularization for ctc-based speech recognition. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6224-6228. IEEE, 2021. 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.539, + 0.892, + 0.622 + ], + "angle": 0, + "content": "[42] Wei Wang, Xun Gong, Yifei Wu, Zhikai Zhou, Chenda Li, Wangyou Zhang, Bing Han, and Yanmin Qian. The sjtu system for multimodal information based speech processing challenge 2021. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 9261-9265. IEEE, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.623, + 0.892, + 0.676 + ], + "angle": 0, + "content": "[43] Gaopeng Xu, Song Yang, Wei Li, et al. Channel-Wise AV-Fusion Attention for Multi-Channel Audio-Visual Speech Recognition. In Proc. ICASSP 2022, pages 9251–9255. IEEE, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.678, + 0.892, + 0.747 + ], + "angle": 0, + "content": "[44] Sang Wang Gaopeng Xu, Xianliang Wang et al. The NIO system for audio-visual diarization and recognition in MISP challenge 2022. https://mispchallenge.github.io/mispchallenge2022/papers/task2/Track2_NIO.pdf, 2022.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.748, + 0.892, + 0.817 + ], + "angle": 0, + "content": "[45] Tao Li, Haodong Zhou, Jie Wang, Qingyang Hong, and Lin Li. The XMU System for Audio-Visual Diarization and Recognition in MISP Challenge 2022. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-2. IEEE, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.818, + 0.892, + 0.871 + ], + "angle": 0, + "content": "[46] He Wang, Pengcheng Guo, Pan Zhou, and Lei Xie. Mlcaavsr: Multi-layer cross attention fusion based audio-visual speech recognition. arXiv preprint arXiv:2401.03424, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[47] Shinji Watanabe, Michael Mandel, Jon Barker, Emmanuel Vincent, Ashish Arora, Xuankai Chang, Sanjeev Khudan-" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "27454" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "pur, Vimal Manohar, Daniel Povey, Desh Raj, et al. Chime-6 challenge: Tackling multispeaker speech recognition for unsegmented recordings. arXiv preprint arXiv:2004.09249, 2020.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.151, + 0.47, + 0.233 + ], + "angle": 0, + "content": "[48] Pengcheng Guo, He Wang, Bingshen Mu, Ao Zhang, and Peikun Chen. The NPU-ASLP System for Audio-Visual Speech Recognition in MISP 2022 Challenge. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-2. IEEE, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.236, + 0.47, + 0.292 + ], + "angle": 0, + "content": "[49] Jonathan G Fiscus. A post-processing system to yield reduced word error rates: Recognizer output voting error reduction (ROVER). In Proc. asrU 1997, pages 347-354. IEEE, 1997. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.295, + 0.471, + 0.366 + ], + "angle": 0, + "content": "[50] Ming Cheng, Haoxu Wang, Ziteng Wang, Qiang Fu, and Ming Li. The whu-alibaba audio-visual speaker diarization system for the misp 2022 challenge. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-2. IEEE, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.368, + 0.469, + 0.424 + ], + "angle": 0, + "content": "[51] Qiuling Suo, Weida Zhong, Fenglong Ma, Ye Yuan, Jing Gao, and Aidong Zhang. Metric Learning on Healthcare Data with Incomplete Modalities. In IJCAI, volume 3534, page 3540, 2019. 8, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.426, + 0.469, + 0.496 + ], + "angle": 0, + "content": "[52] Lei Cai, Zhengyang Wang, Hongyang Gao, Dinggang Shen, and Shuiwang Ji. Deep adversarial learning for multimodality missing data completion. In Proceedings of the 24th ACM SIGKDD international conference on knowledge discovery & data mining, pages 1158-1166, 2018. 8, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.499, + 0.469, + 0.554 + ], + "angle": 0, + "content": "[53] Zilong Wang, Zhaohong Wan, and Xiaojun Wan. Transmodality: An end2end fusion method with transformer for multimodal sentiment analysis. In Proceedings of The Web Conference 2020, pages 2514-2520, 2020. 8, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.556, + 0.469, + 0.626 + ], + "angle": 0, + "content": "[54] Hai Pham, Paul Pu Liang, Thomas Manzini, Louis-Philippe Morency, and Barnabás Póczos. Found in translation: Learning robust joint representations by cyclic translations between modalities. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 6892–6899, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.629, + 0.469, + 0.697 + ], + "angle": 0, + "content": "[55] Jiale Li, Hang Dai, Hao Han, and Yong Ding. Mseg3d: Multi-modal 3d semantic segmentation for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21694-21704, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.701, + 0.469, + 0.755 + ], + "angle": 0, + "content": "[56] A Varga, HJM Steeneken, et al. Noisex-92: A database and an experiment to study the effect of additive noise on speech recognition systems. Speech Commun, 12(3):247–253, 1993. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.758, + 0.469, + 0.841 + ], + "angle": 0, + "content": "[57] Lukas Drude, Jahn Heymann, Christoph Boeddeker, and Reinhold Haeb-Umbach. Nara-wpe: A python package for weighted prediction error dereverberation in numpy and tensorflow for online and offline processing. In Speech Communication; 13th ITG-Symposium, pages 1-5. VDE, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[58] Christoph Boeddecker, Jens Heitkaemper, Joerg Schmalenstroeer, et al. Front-end processing for the CHiME-5 dinner party scenario. In Proc. CHiME 2018, pages 35–40, 2018. 1, 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[59] Desh Raj, Daniel Povey, and Sanjeev Khudanpur. GPU-accelerated guided source separation for meeting transcription, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[60] Mengmeng Ma, Jian Ren, Long Zhao, Sergey Tulyakov, Cathy Wu, and Xi Peng. Smil: Multimodal learning with severely missing modality. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 2302-2310, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.206, + 0.892, + 0.274 + ], + "angle": 0, + "content": "[61] Itai Gat, Idan Schwartz, Alexander Schwing, and Tamir Hazan. Removing bias in multi-modal classifiers: Regularization by maximizing functional entropies. Advances in Neural Information Processing Systems, 33:3197-3208, 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.277, + 0.892, + 0.359 + ], + "angle": 0, + "content": "[62] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2901–2910, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.361, + 0.892, + 0.429 + ], + "angle": 0, + "content": "[63] Yangyang Guo, Liqiang Nie, Harry Cheng, Zhiyong Cheng, Mohan Kankanhalli, and Alberto Del Bimbo. On modality bias recognition and reduction. ACM Transactions on Multimedia Computing, Communications and Applications, 19(3):1-22, 2023. 4" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27455" + } + ] +] \ No newline at end of file diff --git a/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/6897fec6-4bb7-4167-a28e-16a34134af6a_origin.pdf b/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/6897fec6-4bb7-4167-a28e-16a34134af6a_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f44b575a4c9201efc40a926ad9e58978e7b63a14 --- /dev/null +++ b/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/6897fec6-4bb7-4167-a28e-16a34134af6a_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c78595e2f43a685e6887cad407640e44f7e52035d013430e3cbe29b74a33380 +size 930998 diff --git a/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/full.md b/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3f0b13513b3b34ff74d9bb134ed961c6b74b9c95 --- /dev/null +++ b/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/full.md @@ -0,0 +1,295 @@ +# A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition + +Yusheng Dai†, Hang Chen†, Jun Du†*, Ruoyu Wang†, Shihao Chen†, Haotian Wang†, Chin-Hui Lee‡ +† University of Science and Technology of China, Hefei, China +‡ Georgia Institute of Technology, Atlanta, America +jundu@ustc.edu.cn + +# Abstract + +Advanced Audio-Visual Speech Recognition (AVSR) systems have been observed to be sensitive to missing video frames, performing even worse than single-modality models. While applying the common dropout techniques to the video modality enhances robustness to missing frames, it simultaneously results in a performance loss when dealing with complete data input. In this study, we delve into this contrasting phenomenon through the lens of modality bias and uncover that an excessive modality bias towards the audio modality induced by dropout constitutes the fundamental cause. Next, we present the Modality Bias Hypothesis (MBH) to systematically describe the relationship between the modality bias and the robustness against missing modality in multimodal systems. Building on these findings, we propose a novel Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) framework to reduce over-reliance on the audio modality, maintaining performance and robustness simultaneously. Finally, to address an entirely missing modality, we adopt adapters to dynamically switch decision strategies. The effectiveness of our proposed approach is evaluated through comprehensive experiments on the MISP2021 and MISP2022 datasets. Our code is available at https://github.com/dalision/ModalBiasAVSR. + +# 1. Introduction + +Audio-Visual Speech Recognition (AVSR) is a multimodal application inspired by human speech perception. It outperforms single-modality models by incorporating noise-invariant complementary information from visual cues, especially in noisy environments. Driven by increasingly large open-source datasets and models [1-4], AVSR has achieved significant advancements across various bench + +marks with a simple end-to-end design [5, 6]. + +Recent research on AVSR focuses on more challenging real-life scenarios. Techniques such as reinforcement learning [7] and carefully designed fusion architecture [8-10] are used to accommodate varying noise levels and overlapping speech. Self-supervised learning [11] and automatic labeling techniques [12] are applied facing insufficient audiovisual pairs. Meanwhile, various synchronization modules have been developed for audio-visual alignment.[13-15]. However, restricted to the open-source datasets [1, 2, 16], most studies often assume that each video is recorded in relatively high quality, without blurring, corruption, or loss. Moreover, there is growing evidence to suggest that current advanced AVSR systems are highly susceptible to perturbations in video modality [17, 18], resulting in significant performance degradation even perform worse than single-modality models [19, 20]. + +Missing video modality is a crucial and common problem for AVSR applied in real-life scenarios [1, 17, 19, 20]. It arises from various causes, including losses induced by network latency or hardware limitations, as well as errors in lip movement tracking due to occlusion and side-face. Most researchers utilize dropout techniques on video training data to improve robustness against missing modalities [19-23]. It has been demonstrated to effectively mitigate the out-of-distribution (OOD) issue and alleviate performance degradation without additional inference consumption or complex modules. However, it leads to new challenges on real-life scenarios with low-quality input. In our early experiments on MISP datasets [24, 25], a contradictory phenomenon could be observed in Figure 1: while applying the dropout strategy to video training data enhance the robustness against missing video modality, it also leads to performance degradation when dealing with complete data input. + +On the other hand, all AVSR systems consistently lag behind unimodal ASR when facing completely missing video. + +We attempt to analyze the reasons behind the above-mentioned phenomenon from the perspective of modality bias. Existing multimodal applications can be categorized into two types: (1) modality-balanced systems, in which each modality contributes relatively equally to the model decision, such as Multimodal Emotion Recognition (MER) [26] and Hate Speech Detection (HSD) [27]; (2) modality-biased systems that over-relies on certain modality that contains more task-related information. AVSR is a typical modality-biased system dominated by audio. Therefore, an intuitive insight suggests that although dropout on the video modality could address the OOD problem between the training and inference stages, it may exacerbate the modality bias on audio, subsequently demonstrating robustness towards missing video input. + +In this paper, we first verify this intuitive hypothesis in Section 2 by quantitatively analyzing the differences between AVSR and unimodal automatic speech recognition (ASR). The results uncover that the modality bias essentially represents a shift from a multimodal to a unimodal distribution on audio modality in latent representation space. Next in Section 3, we extend our findings to more general multimodal applications and propose the Modality Bias Hypothesis (MBH) to systematically describe the relationship between modality bias and robustness to missing modality. In Sections 4 and 5, we are committed to achieving two objectives: improving the robustness of AVSR without degradation with complete input, and ensuring that AVSR consistently outperforms ASR when faced with severe or complete video missing. To this end, we present Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD), in which the robust student model leverages hidden knowledge extracted by a relatively unbiased teacher model to prevent the distribution of task-relevant representations from transferring into a unimodal distribution. The method is observed to enhance missing robustness through the learning of complementary information from the other modality and utilizing context information from adjacent frames. For video severely or entirely missing situations, adapters are adopted to the modality-specific branch to dynamically switch decision bias dominated by modality-specific representations. The key contributions can be summarized as follows: + +- We investigate dropout-induced modality bias and uncover that it fundamentally manifests as a shift from a multimodal to a unimodal distribution of audio modality in the hidden representation subspace as detailed in Section 2. +- We propose using the Modality Bias Hypothesis (MBH) to systematically describe the decision-making process influenced by modal bias in a multimodal system, as well as the relationship between modal bias and modality + +![](images/6a0267eae5b2c781ec1afd17d26ab22fc95c8dbb2c438d4226566d8f42616102.jpg) +Figure 1. CER (in %) degradation curves of AVSR trained with different dropout rates on video frames. Compared with the baseline AVSR without dropout (in red), other AVSR systems perform better with missing input but worse with complete data input. As the training dropout rate increases, the CER curve of AVSR gradually converges to that of ASR (dotted line). + +![](images/10d69b3367aba09ae0b39b2e4ecfa79114bfddc6aecf2be02a65bf1238c9ed9d.jpg) +Figure 2. Two groups of similarity analysis between ASR and AVSR transcriptions. In both groups, an increase in the similarity of recognition transcriptions is observed as the training dropout rate increases. The similarity is measured by relative CER (in $\%$ ), where the ASR transcription replaces the ground truth. + +![](images/545f39a42791b5bb0533696dab1c6818d4580dc726561b65356f3f6773430bdf.jpg) +Figure 3. Similarity matrices of intermediate representations between ASR and different AVSR settings. As training dropout rates increase, the diagonal lines become brighter, indicating closer proximity between the multimodal and the unimodal distributions of the latent decisive subspace in AVSR. + +missing robustness as detailed in Section 3. + +- We propose Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) to enhance robustness against missing video and avoid performance degradation with complete input. For entirely missing modalities, adapters are adopted to dynamically switch decision bias to the specific modality as detailed in Section 5. +- We achieve top AVSR performances on MISP2021 and MISP2022 datasets while maintaining robustness against missing video frames as detailed in Section 7. + +![](images/5c5202081d9db7ac9cea6d81d4169310a37b96165cb383e093b282056bce0291.jpg) +Figure 4. An illustration of the Modality Bias Hypothesis (MBH). In the left subplot, the task-relevant component (shaded part) of the latent representations consists of $Z^{sa}$ , $Z^{sv}$ and $Z^g$ , representing audio-specific, visual-specific decision features and modality-general decisive features respectively. The corresponding proportions are denoted by $\alpha$ , $\beta$ , and $\gamma$ . The right subplot shows a dynamic process of decisive bias with an increasing training dropout rate. Dropout leads to a consistent modality bias on audio, regardless of the extent of the missing. + +# 2. Dropout-Induced Modality Bias + +We investigate the contradictory phenomenon 1 by examining the character error rate (CER) across five Mandarin AVSR systems varying training dropout rates (from 0.0 to 0.7) and testing video missing rates (from 0.0 to 1.0). As shown in Figure 1, two trends are observed: (1) in terms of absolute CER, the model trained with a higher dropout rate deteriorate more on no-missing complete multimodal data and slightly missing video frames, but it performs better on severely and entirely missing video frames; and (2) in term relative performance, the CER degradation curve of the AVSR model trained with a higher dropout rate tends to converge to the unimodal ASR recognition curve. We further ensure whether the similarity of performance degradation curves directly corresponds to the recognition transcription similarity of ASR and AVSR in Figure 2. As we expected, an increase in training dropout rate leads to higher transcription similarity between AVSR and ASR across different test settings. + +To understand this, we investigate the discrepancy in decisive patterns of ASR and each AVSR. We aim to quantify the divergence between latent decision distributions of these models by measuring the distance of intermediate representation samples. Through random sampling of complete audio-visual data batches, we generate intermediate layer representations using the encoder of ASR or AVSR trained at different dropout rates. Figure 3 illustrates cosine distance-based similarity matrices for the intermediate representations between ASR and different AVSR configurations. The diagonal elements in each subplot represent the similarity between intermediate representations from the same inputs. Notably, with an increase training dropout rate, these diagonal lines brighten, signifying a rise in intermediate representation similarity. This suggests closer proximity of the AVSR multimodal distribution in the la + +tent decisive subspace to the unimodal distribution of ASR. + +Through the aforementioned three experiments, we have discovered that increasing the training dropout rate on video data leads to increased similarity between AVSR and ASR in the performance degradation curves, recognition results, and intermediate representation subspace distribution. The findings reveal the significant impact of dropout in introducing effectively perturbs the distribution of multimodal training data. It leads to a shift from multimodal joint distribution to unimodal distribution, resulting in a decision bias towards audio during the decision-making process, as reflected in the output similarity of ASR. We refer to this phenomenon induced by dropout as dropout-induced modality bias. Although dropout-induced bias enhances the robustness of missing video data to some extent, we emphasize that it contradicts the primary design of AVSR as a robust application in noisy environments with supplementary visual cues. The introduction of artificial noise (padding frames) in video data induces the model to converge toward trivial solutions, leading to an excessive dependence on the audio modality. This over-reliance, in turn, leads to a degradation in performance when presented with complete multimodal input in a noisy environment. + +# 3. Modality Bias Hypothesis (MBH) + +In this section, we propose the Modality Bias Hypothesis (MBH) based on the Modality Bias Venn diagram (MBVD) to systematically describe the relationship between modality bias and robustness to missing modality. + +Modality Bias Venn Diagram As shown in Figure 4 on the left, the MBVD depicts the components of the latent decisive feature of multimodal systems in the form of a Venn Diagram. It is a variant of the Modality Venn Diagram (MVD) employed in multimodal knowledge distillation [28]. Without loss of generality, we take AVSR as an + +example and define $\mathcal{X}^a$ , $\mathcal{X}^v$ , and $\mathcal{V}$ as the original feature space of audio, video and label space, respectively. The decisive feature $z$ , commonly a form of intermediate layer representation, consists of two modality components $z^a$ (blue circles) and $z^v$ (green circle). We denote $I(\cdot)$ as mutual information and $I(\cdot|\cdot)$ as conditional mutual information. The task-relevant decisive feature $z^u$ ( $I(z,y)$ ) is depicted by the shaded region and can be further divided into three components. $z^g$ ( $I(z^a,z^v,y)$ ) represents modality-general decisive features, while $z^{sa}$ ( $I(z^u,z^a|z^g)$ ) and $z^{sv}$ ( $I(z^u,z^v|z^g)$ ) represent modality-specific decisive features. We denote their proportions in $z^u$ as $\alpha$ , $\beta$ , and $\gamma$ , respectively. These features collectively contribute to determining the final task output $\hat{y}$ . For AVSR, a higher $\alpha$ represents a greater decision bias of the model on the audio modality, focusing more on speech than lip movements. A larger $\gamma$ indicates a model's inclination towards modality synergy by maximizing the mutual information between modalities for decision-making, as in some modality-balanced models [26, 27]. Furthermore, $z^u$ is generated by the original features $x^a,x^v$ as $g(x^a,x^v;\phi)$ , where $g(\phi)$ can be seen as a neural network-based transfer such as an encoder with parameters $\phi$ . Therefore, the decision process of the multimodal system can be decomposed into two steps, following the Bayesian process: the MBVD hidden decisive feature generation step and the decision step: + +$$ +P \left(y \mid x ^ {a}, x ^ {v}\right) = P \left(y \mid z ^ {\mu}\right) P \left(z ^ {\mu} \mid x ^ {a}, x ^ {v}\right) \tag {1} +$$ + +Modality Bias Hypothesis Based on MBVD, we give a systematic description of the relationship between modality bias and robustness to missing modality in the view of MBH. As shown in Figure 4 on the right, by applying dropout with different rates $k_{i} \in [0,1]$ on video training data, the original video feature space $\mathcal{X}^v$ can be split into a series of subsets $\{\mathcal{X}_{k_1}^v, \mathcal{X}_{k_2}^v, \dots, \mathcal{X}_{k_n}^v\}$ . The samples from space $\mathcal{X}^a \times \mathcal{X}_{k_i}^v$ are denoted as dyads $(x^a, x_{k_i}^v)$ . Compared to the model trained on complete multimodal datas $(x^a, x_{0.0}^v)$ , the model trained on data pairs $(x^a, x_\theta^v)$ with a video dropout rate $\theta_{train} \in (0.0,1.0)$ exhibits a greater decision bias on audio modality with larger $\alpha$ , smaller $\beta$ , and $\gamma$ . As $\theta$ approaches 1.0, the task-relevant decisive feature $z_u$ becomes steadily dominated by the audio-specific decisive feature $z_a$ , resulting in a transformation from a bimodal distribution in the latent representation subspace to a unimodal one. The decision pattern of the multimodal model shifts from $p(y|z_u)$ to $p(y|z_a)$ . + +During the inference stage, these multimodal models display different modality biases. For the model trained on complete multimodal data or dropout on audio with a larger $\gamma$ , they tend to search general information shared among modalities. This hypothesis effectively explains the observed experimental phenomena in previous studies. For modality-biased models, such as Multimodal Senti + +ment Analysis (MSA) [22] dominated by text, Multimodal Speech Enhancement (MSE) [29] dominated by audio, as well as AVSR dominated by audio [21, 23, 30], it has been observed that applying dropout on the primary modality helps alleviate modality bias and brings about slight improvements when dealing with complete input. On the other hand, the AVSR model with larger $\alpha$ and smaller $\gamma$ values tends to focus more on speech and neglect complementary information from lip movements. When dealing with partially or completely missing video data, the model with larger $\alpha$ shows its robustness, which aligns well with the aforementioned experimental observations. + +# 4. Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) + +For the robustness training of modality-bias systems, it is crucial to avoid dropout-induced modality bias on the primary modality. Dropout indeed alleviates the OOD problem to some extent but encourages multimodal models to pursue trivial solutions at the same time. Ideal robust multimodal models are expected to achieve two goals: (1) learn to extract mutual information across modalities rather than relying on a certain modality when facing complete paired input, and (2) learn to complement information from the other modality and utilize context information from adjacent frames. To prevent excessive modality bias caused by dropout, we propose a novel Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) framework to constrain the distribution of the multimodal feature space during the robustness training phase. + +Unlike traditional knowledge distillation methods, firstly, the teacher model is trained on the complete multimodal data pairs, while the student model is trained on missing video data. The teacher model is relatively unbiased with a higher proportion of modality-general decisive features $z^g$ in the MBVD space. During the training process of the student model, the teacher model serves as an anchor point, preventing the student model from shifting towards a unimodal distribution on the audio modality. Note that the difference between teacher and student models in our method is modality bias varies, rather than size, architecture as in common KD methods [31-34]. Additionally, distillation occurs at the hidden layer rather than the logistic outputs, aiming to minimize the distances between decision distribution samples of the teacher and student models and further constrain the intermediate representation subspace distribution of the student model. In practice, we take the knowledge from the intermediate representation of the cross-modal encoder layers. + +Here, we adopt the symbol definitions from Section 3 and provide a formal description of MDA-KD. For a naturally modal-biased multimodal system, the data samples from original feature space $\mathcal{X}^a\times \mathcal{X}_{k_i}^v\times \mathcal{Y}$ can be de + +![](images/2347c63917e0f5e204c25d112e973d1dbe14d5be0e8c0cc6666d68101e21c453.jpg) + +![](images/2a6b94d043398449cf5959309e9f5beba5e85c745f34a5952de8bc2ab581c9e9.jpg) +# + +Module with adapter + +Cross-attention module + +Element-wise summation + +![](images/46eff9de648e3d26883a52fc7df7cc6cc55883095d0a243925718fe9ae73fb23.jpg) + +Data flow when activating adapters + +![](images/f4dd81b5bbe68a5613e660802a40ddd40a1d85187e579743fd61f0ebdd1f90b0.jpg) +Teacher AVSR Model (Complete data input) + +![](images/5787d642549b4f3a2ef5c4d4e97105f287a027a219b92484233511a129808ad6.jpg) +Audiovisual Speech Recognition Network Architecture + +![](images/1bb738a5a29126ea7c344cb6c593778f81c6c8b0dd7d625608b41c88a2a330b9.jpg) +Lip Video Frames +Figure 5. Overall framework of the proposed AVSR system. We address challenging real-world scenarios involving missing video frames and noisy speech with an overlap rate exceeding $40\%$ during both the training and testing stages. In MDA-KD, latent knowledge is sampled from the latent distribution of the teacher model with complete data input. This latent knowledge serves as an anchor point to prevent dropout-induced modality bias during the robustness training of the student network. For entirely missing video input, the MS-Adapter is activated to enable a dynamic decision switch. + +![](images/810312020fab927186c92f5ac20058a5aad783bc615ad0562d567a0e6a688c7b.jpg) +Noisy Audio Waveform + +noted as triples $(x^{a}, x_{k_{i}}^{v}, y)$ . For simplicity, we denote $x_{0.0}^{v}$ as $x^{v}$ . The teacher model $T e(\phi)$ is first trained on complete multimodal data $(x^{a}, x^{v}, y)$ model with parameters $\phi$ , and the model's decision process can be formulated as $P_{t e}(y \mid x^{a}, x^{v})$ in a Bayesian decision problem. We assume that the teacher model is a neural network $g(\phi)$ and it is trained by minimizing the following loss function, a form of multitask learning. + +$$ +T e (\phi) = \min _ {\phi} \mathcal {L} _ {\mathrm {M L T}} (g (x ^ {a}, x ^ {v}; \phi), y), \tag {2} +$$ + +$$ +\begin{array}{l} \mathcal {L} _ {\mathrm {M L T}} \left(x ^ {a}, x ^ {v}; \phi\right) = \lambda \log P _ {\mathrm {C T C}} \left(y \mid x ^ {a}, x ^ {v}\right) \tag {3} \\ + (1 - \lambda) \log P _ {\mathrm {A t t}} \left(y _ {i} \mid x ^ {a}, x ^ {v}\right), \\ \end{array} +$$ + +where the tunable parameter $\lambda \in [0,1]$ is used to balance the sequence-level Connectionist Temporal Classification (CTC) loss and the frame-wise Cross Entropy (CE) loss, which serve as the standard end-to-end ASR training objectives. During the training of the student model, the dropout strategy is applied to the secondary modality $v$ , while the teacher model is frozen with complete multimodal data as input. It is important to note that the student and teacher models have the same network architecture. From the perspective of MBVD, the whole decision process of the multimodal model can be divided into hidden feature generation step and decision step. + +$$ +P _ {s t} \left(y \mid x ^ {a}, x _ {k _ {i}} ^ {v}\right) = P _ {s t} \left(y \mid z ^ {\mu}\right) P _ {s t} \left(z ^ {\mu} \mid x ^ {a}, x _ {k _ {i}} ^ {v}\right), \tag {4} +$$ + +$$ +P _ {t e} \left(y \mid x ^ {a}, x ^ {v}\right) = P _ {t e} \left(y \mid z ^ {\mu}\right) P _ {t e} \left(z ^ {\mu} \mid x ^ {a}, x ^ {v}\right), \tag {5} +$$ + +where $z^{\mu} \in \mathbb{R}^{d\mu}$ represents the combined representation of modality-specific decisive features $z^{sa} \in \mathbb{R}^{da}$ , $z^{sv} \in$ + +$\mathbb{R}^{dv}$ , and modality-general decisive features $z^g \in \mathbb{R}^{dg}$ . The tuple $(z^{sa}, z^{sv}, z^g)$ represents a sample drawn from the MBVD hidden features space, denoted as $\mathcal{Z}^{sa} \times \mathcal{Z}^{sv} \times \mathcal{Z}^g$ . + +initialized on the parameter of the teacher model, we introduce an additional loss term to constrain the dynamic process of the student model's MBVD feature distribution in robust training. The distance between batch samples from the student and the teacher model is used to approximate the difference of distribution, which serves as a form of frame-level knowledge distillation. + +$$ +\begin{array}{l} \mathcal {L} _ {\mathrm {K D}} \left(x ^ {a}, x ^ {v}, x _ {k} ^ {v}; \phi_ {t e}, \phi_ {s t}\right) = \mathrm {K L} \left(S _ {t e}, S _ {s t}\right), \\ S _ {t e} = \sigma_ {T} \left(\operatorname {S a m p l e} \left(P _ {t e} \left(z ^ {\mu} \mid x ^ {a}, x ^ {v}\right)\right)\right), \tag {6} \\ S _ {s t} = \sigma_ {T} \left(\operatorname {S a m p l e} \left(P _ {s t} \left(z ^ {\mu} \mid x ^ {a}, x _ {k _ {i}} ^ {v}\right)\right)\right), \\ \end{array} +$$ + +where $\sigma_T(x)$ denotes the SoftMax function with temperature $T$ and Sample represents the sample function. This distribution approximation serves two main purposes. Firstly, during training, when the student network encounters a missing modality feature $x_{k_i}^v$ , the convergence of the student's decisive feature $z^u = g(x^a,x_{k_i}^v;\phi_{st})$ towards the teacher's decisive feature $z^{u} = g(x^{a},x^{v};\phi_{te})$ encourages the utilization of contextual information from $x_{k_i}^v$ . Additionally, with the dual cross-attention design, the process complements the information extracted from $x^a$ , effectively addressing the condition of missing frames and promoting out-of-distribution generality. On the other hand, the KD loss is used to minimize the distance between the distributions of the teacher and student models, preventing the student model from converging to trivial solutions. Subsequently, we train the student model jointly with a weighted sum of the standard training loss and distillation loss: + +$$ +\begin{array}{l} \mathcal {L} _ {\mathrm {M L T}} \left(x ^ {a}, x ^ {v}, x _ {k} ^ {v}; \phi_ {t e}, \phi_ {s t}\right) = \beta \mathcal {L} _ {\mathrm {K D}} \left(x ^ {a}, x ^ {v}, x _ {k} ^ {v}; \phi_ {t e}, \phi_ {s t}\right) \\ + (1 - \beta) \mathcal {L} _ {\mathrm {M L T}} \left(x ^ {a}, x _ {k} ^ {v}; \phi_ {s t}\right). \tag {7} \\ \end{array} +$$ + +# 5. Modality-Specific Adapter (MS-Adapter) + +As illustrated in Figure 4 on the right, when facing severely or entirely missing video data, we consider it unreliable to continue employing a synergistic decision-making strategy like MDA-KD with relatively high values of $\gamma$ and $\beta$ . Padding frames lack sufficient contextual information and may introduce noise. Therefore, in such scenarios, a dynamic switch in decision strategy from $P(y|z^u)$ to $P(y|z^a)$ is necessary as a complement to MDA-KD. In view of the success of adapters applied in foundation model fine-tuning [35-38], we attempt to extend it to address the modality missing issue in multimodal models. For clarity, we refer to this extension as Modality-Specific Adapter (MS-Adapter). Specifically, LORA [39] is adopted to self-attention layers in the audio branch, marked with a dashed box in Figure 5. These adapters perform residual-style feature blending with the original pre-trained features. The residual weight could be represented as low-rank matrices $\Delta W \in \mathbb{R}^{d \times d}$ , and it could be decomposed into a pair of fan-in and fan-out linear layers with weights $A \in \mathbb{R}^{r \times d}$ and $B \in \mathbb{R}^{d \times r}$ ( $r \ll d$ ). The reparametrization operation can be formulated below. + +$$ +H _ {o} = H _ {i} \left(W _ {0} + \Delta W\right) = H _ {i} \left(W _ {0} + B A\right) \tag {8} +$$ + +By activating the MS-Adapter, we can dynamically switch the decision-making pattern by activating the adapters. We highlight two advantages of the MS-Adapter. First, a substantial amount of unpaired unimodal training data and data augmentation techniques could be used in the training process of the adapters. Second, the adapter training process provides an opportunity to modify the computation pathway. As illustrated in Figure 5 with dashed arrows, in both training and inference stage with audio-only input, the computation flow of the video branch will be directly cut off, and the modality fusion cross-attention module will be skipped to reduce computational costs. + +# 6. Experiment Settings + +Dataset We conduct our experiments on MISP2021 [24] and MISP2022 [40]. These two open-source datasets present a large-scale audio-visual corpus recorded in real-life home TV scenarios with multiple groups of speakers chatting simultaneously. Multiple microphone arrays and cameras are used to collect far/middle/near-field audio and far/middle-field video. Compared to the carefully recorded videos in LRS2 [1] and LRS3 [2] from BBC interviews and TED talks, MISP datasets offer static shooting perspectives with diverse resolutions, including naturally blurred + +and obstructed frames. The videos are accompanied by various background noises and high speech overlap rates (42% in training set and 49% in test set). Compared oracle segment-level AVSR task in MISP201, MISP2022 presents a more challenging task of session-level AVSR without oracle speaker diarization results. To avoid limitations associated with noise simulation, all experiments are evaluated exclusively on far-field data, which aligns well with common in-car, office meeting, or smart home scenarios. + +Implementation Detail We strictly adhere to the approaches outlined in [18] for model training and network architectures. We initialize the AVSR model with two pretrained unimodal models and fine-tune it in an end-to-end manner. As shown in Figure 5, the AVSR model is a dual-branch network where $N = 3$ , $M = 9$ and $K = 6$ . For the loss function in Equation 3, we set $\lambda$ to 0.7 and CTC loss consists of the same weighted intermediate CTC [41] losses in 3, 6, 9, 12 layers. In Equation 4, we use 0.1 for $\beta$ . We follow [18] to establish two baselines A0 and AV0 trained on complete modality data with dropout techniques. AV0 is fine-tuned based on A0 and a pre-trained ResNet-18 encoder with a 3D-CNN head. 3 + +Dropout Settings Similar to [19], we evaluate the robustness to missing video modality with various dropout methods and rates: Segment Dropout, Utterance Dropout, and Interval Dropout. Testing involves dropout rates from 0.0 to 1.0 in 0.25 intervals. Results from the three dropout methods are averaged at each rate to obtain overall dropout results. When conducting ablation studies, segments with naturally missing video frames (17%) are excluded from the test set, ensuring a consistent and controlled video missing rate. In our method, during training, A certain proportion of sample is assigned a random dropout method from the above three methods and an extra one from [21] with an optimized dropout rate. In both training and testing stages, we pad the missing video frame pixels with zeros instead of using interpolation or repetition methods. We conduct a hyper-parameter search over the training dropout rate and found that 0.5 is optimal for our method (when $D_{p}rod$ is 0.5). This rate implies that half of the video frames in a selected sample are padded with zeros. 3 + +# 7. Experiments and Result Analysis + +# 7.1. Overall Comparison of Experiment Settings + +In Table 1, we conduct key parameter analysis and abolation study of the proposed methods on the MISP2022 dataset with oracle speaker diarization results. We first explore the impact of dropout probability in training videos. In contrast to AV1, AV2 introduces half of the complete data pairs. As a result, it mitigates dropout-induced modality + +
ModelTraining settingsTest dropout rate
DropoutDprobInit.MDA-KDMS-Adapter0.000.250.500.751.00
A0X0.0RandomXX25.1325.1325.1325.1325.13
AV0X0.0A0XX21.1423.7725.5725.8726.65
AV11.0A0XX23.2623.6824.2724.9525.91
AV20.5A0XX21.7222.5623.3724.4625.64
AV30.5AV0XX21.5322.4723.6524.5525.90
AV40.5AV0X21.3822.1823.2024.4025.70
AV50.5AV0X21.1121.7722.7824.0225.45
AV60.5AV021.1121.7722.7824.0224.94
+ +Table 1. An overall comparison in CER (%) of different system configurations. Different from the dropout rate, $D_{prob}$ represents the proportion of data with missing frames in the training set. Init. refers to the network initialization method. + +
Insert partRankDAParams(MB)CER(%)
Encoder32×4.5025.35
Encoder324.5025.08
En&Decoder329.0025.20
Encoder649.0025.08
En&Decoder6418.0025.05
Encoder12818.0025.01
En&Decoder12836.0024.94
+ +bias to some extent, since a higher proportion of complete data tend to encourages the model to learn general information across modalities. This finding aligns with previous research [19], highlighting the superiority of utterance dropout over random frame dropout (the former means a larger $D_{prob}$ ). Next, AV3 is trained based on AV0, which means the subsequent optimized processing starts from a relatively stable convergence state with complete input. In the robust training stage, the balanced state tends to be disrupted when trained on incomplete modality pairs, searching for a new optimization coverage range. However, when trained on complete data pairs, the scenario is reversed. Thus, while AV3 outperforms AV2 with low test missing rates, it lags behind when facing severe video absence, illustrating a tug-of-war dynamic without clear guidance. + +Next, we validate the effectiveness of MDA-KD. Compared with AV3, AV5 demonstrates superior performance for both complete and missing video modality inputs. AV4 successfully achieves our goal of enhancing robustness without any performance degradation on complete input (21.11% vs. 21.14%). This implies that the teacher model AV0 provides an explicitly optimized target in robustness training. It effectively constrains the distribution shift to the audio modality, preventing excessive modality bias caused by dropout. Furthermore, in AV4, we restrict the flow of audio data into the video branch within the dual cross-attention module. Consequently, a performance drop is observed across all test suites, highlighting the effectiveness of MDA-KD in leveraging the dual cross-attention mod + +Table 2. Performance analysis of MS-Adapter. DA means data augmentation, including speed perturbation and utterance concat. + +
MethodTest dropout rate
0.000.250.500.751.00
Cascade Utt [19]22.5423.8925.2326.0528.15
AV Dropout Utt [21]22.0023.3725.3526.2126.78
Dropout Utt [20]22.0823.2124.5625.0825.46
Ours21.1121.7722.7824.0224.94
+ +Table 3. A $\mathrm{{CER}}\left( \% \right)$ comparison with other dropout methods. + +ule to extract modality-general information from audio for complementing missing information. Subsequently, we integrate MS-Adapters into the audio branch in AV6 based on AV5. Consequently, the performance with audio-only input improves to a $24.94\%$ CER, surpassing A0 for the first time $(24.94\%$ vs. $25.13\%)$ . These results show the effectiveness of MS-Adapters by dynamically switching to the decision patterns on audio modality with audio-only input. + +# 7.2. Validation of MS-Adapter + +We further explore three key factors in MS-Adapter adaptation: data augmentation, insert part and rank dimension. In Table 2, we observe a decrease in CER from $25.45\%$ (AV4) to $25.35\%$ , and it further improves to $25.08\%$ with data augmentation doubling audio training data. These results suggest that the adapter adaptation effectively enhances the robustness of AVSR with completely missing video, requiring only an additional 4.50MB in parameters. It provides an opportunity to apply data augmentation that is effective for unimodal model training and to use extra unpaired data. Next, increasing the ranks and the quantity of adapters results in further performance gains at the expense of a larger parameter. The best performance, achieving $24.94\%$ , is shown in the bottom row and attained with the adapter inserted in both encoder and decoder blocks. + +# 7.3. Comparisons with Other Dropout Techniques + +As shown in Table 3, we compare our proposed framework with three widely used dropout techniques [19-21]. Cascade Utt employs a separable cascade structure, where an AV model is superimposed on an audio-only model. Inputs are then routed through either the audio-only path or + +
BenchmarkSystemTraining DataBackboneObj. FunctionCER / cpCER(%)
AV
MISP2021SJTU [42]300 hoursLRW-1000ConformerED + SE34.02
NIO [43]3300 hoursLRW-1000 [4]TransformerED25.07
USTC [18]500 hoursw/o extra dataConformerED24.58
Ours1000 hoursw/o extra dataConformerED + InterCTC21.53
MISP2022NIO [44]3300 hoursLRW-1000ConformerED29.58
XMU [45]2100 hoursLRW-1000ConformerED + InterCTC31.88
NPU [46]1300 hoursw/o extra dataE-BranchformerED + InterCTC29.13
Ours1000 hoursw/o extra dataConformerED + InterCTC28.06
+ +Table 4. A Comparison of the state-of-the-art systems. InterCTC refers to Intermediate CTC loss [41], the ED loss is formulated in Equation (3) and SE represents the mean square error loss. We use evaluate the performance using the concatenated minimum-permutation character error rate (cpCER) [47] metric for the session-level AVSR task. + +the AV path with a probability of $p_1$ . AV Dropout Utt randomly drops either the entire video or the entire audio segments with a probability of $p_2$ . Dropout Utt exclusively drops the video segments with a probability of $p_3$ . We adopt the optimal dropout settings from [19], where $p_1 = 0.25$ , $p_2 = 0.25$ , and $p_3 = 0.5$ . For Cascade Utt, we follow [19] to build the network and maintain comparable parameters numbers. As a result, our proposed methods outperforms the other three techniques in all test suites and does not cause performance degradation. + +# 7.4. Comparisons with State-of-the-art Systems + +Finally, we compare our system with the state-of-the-art systems on the MISP2021 and MISP2022 challenges[18, 42-45, 48] as shown in Table 4. With Recognizer Output Voting Error Reduction (ROVER) [49], we rescore the output transcripts of A0, AV0, and A6 mentioned in Table 1. In the MISP2021 utterance-level AVSR challenge with oracle speaker diarization results, our system outperforms the previous SOTA system by achieving an absolute CER reduction of $3.05\%$ from $24.58\%$ to $21.53\%$ . Our top-performing system, AV6, attains a CER of $22.13\%$ . Moving to the MISP2022 session-level AVSR challenge, we build our diarization system closely adhering to [50]. We secure a ROVER cpCER score of $28.06\%$ and obtain the best system score with a cpCER of $28.55\%$ . When oracle segmentations are utilized, our system achieves a ROVER CER score of $21.80\%$ and the best model score of $21.53\%$ in CER. + +# 8. Related Works + +Modality Missing in Multimodal Learning The prevalent issue of missing modalities in multimodal applications has prompted research that specifically targets severe modality absences. Generative models [51, 52] and meta-learning predict missing modalities using available or few-shot paired samples. Balanced models utilize joint multimodal representations [53-55]. Models addressing modality bias employ data augmentation methods like modality dropout [19, 22] to tackle out-of-distribution challenges. + +For AVSR, we prioritize efficiency and opt for dropout due to its plug-and-play nature and lightweight implementation. More discussion could be found in Appendix. + +Video Modality Robustness in AVSR To enhance performance on low-resolution videos, visual extractors are commonly pre-trained on relatively high-quality videos with isolated words [5] or acoustic pseudo-labeling classification tasks [18]. Addressing situations involving corruption, Hong et al. [17] have designed an explicit scoring module to identify reliable streams and effectively manage input scenarios. Regarding the issue of missing video frames, most researchers have applied dropout techniques to enhance missing robustness [19-23]. In classical dropout methods, frame level dropout is utilized in [23] and utterance-level dropout is applied in AV-Hubert [21]. As a recent work focusing on this issue, Chang et al. [19] unify test suites of missing videos. However, the proposed binary evaluation metric overly emphasizes relative robustness trends, neglecting absolute performance. Compared to the methods mentioned earlier, we explore the problem of missing video frames from the perspective of modality bias. Leveraging classical techniques and simple designs, our approach achieves both performance and robustness without introducing additional inference time. It adapts to various scenarios of frame absence through a unified model. + +# 9. Conclusion + +In this work, we discover and analyze the essence of dropout-induced modality bias. Based on these findings, we proposed MBH to provide a systematic description of the relationship between modality bias and missing robustness in multimodal systems. Consequently, we propose a new multimodal distribution approximation with knowledge distillation approach to deal with missing video frames for AVSR. Furthermore, we apply adapters to handle videos with both severe and complete missing rates. For future work, we intend to validate our findings in this study across a wide range of multimodal applications beyond AVSR. + +# References + +[1] Joon Son Chung, Andrew Senior, Oriol Vinyals, and Andrew Zisserman. Lip reading sentences in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6447-6456, 2017. 1, 6 +[2] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. LRS3-TED: a large-scale dataset for visual speech recognition. arXiv preprint arXiv:1809.00496, 2018. 1, 6 +[3] Hang Chen, Jun Du, Yusheng Dai, Chin Hui Lee, Sabato Marco Siniscalchi, Shinji Watanabe, Odette Scharenborg, Jingdong Chen, Bao Cai Yin, and Jia Pan. Audio-visual speech recognition in misp2021 challenge: Dataset release and deep analysis. In Proceedings of the Annual Conference of the International Speech Communication Association, IN-TERSPEECH, volume 2022, pages 1766–1770, 2022. 3 +[4] Shuang Yang, Yuanhang Zhang, Dalu Feng, Mingmin Yang, Chenhao Wang, Jingyun Xiao, Keyu Long, Shiguang Shan, and Xilin Chen. Lrw-1000: A naturally-distributed large-scale benchmark for lip reading in the wild. In 2019 14th IEEE international conference on automatic face & gesture recognition (FG 2019), pages 1-8. IEEE, 2019. 1, 8 +[5] Pingchuan Ma, Stavros Petridis, and Maja Pantic. End-to-end audio-visual speech recognition with conformers. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 7613-7617. IEEE, 2021. 1, 8 +[6] Xichen Pan, Peiyu Chen, Yichen Gong, Helong Zhou, Xinbing Wang, and Zhouhan Lin. Leveraging unimodal self-supervised learning for multimodal audio-visual speech recognition, 2022. 1 +[7] Chen Chen, Yuchen Hu, Qiang Zhang, Heqing Zou, Beier Zhu, and Eng Siong Chng. Leveraging modality-specific representations for audio-visual speech recognition via reinforcement learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pages 12607–12615, 2023. 1 +[8] Bo Xu, Cheng Lu, Yandong Guo, and Jacob Wang. Discriminative multi-modality speech recognition. In Proceedings of the IEEE/CVF conference on Computer Vision and Pattern Recognition, pages 14433-14442, 2020. 1 +[9] Jianwei Yu, Shi-Xiong Zhang, Jian Wu, Shahram Ghorbani, Bo Wu, Shiyin Kang, Shansong Liu, Xunying Liu, Helen Meng, and Dong Yu. Audio-visual recognition of overlapped speech for the lrs2 dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6984-6988. IEEE, 2020. +[10] Joanna Hong, Minsu Kim, Daehun Yoo, and Yong Man Ro. Visual context-driven audio feature enhancement for robust end-to-end audio-visual speech recognition. arXiv preprint arXiv:2207.06020, 2022. 1 +[11] Alexandros Haliassos, Pingchuan Ma, Rodrigo Mira, Stavros Petridis, and Maja Pantic. Jointly learning visual and auditory speech representations from raw data. arXiv preprint arXiv:2212.06246, 2022. 1 +[12] Pingchuan Ma, Alexandros Haliassos, Adriana Fernandez-Lopez, Honglie Chen, Stavros Petridis, and Maja Pantic. + +Auto-AVSR: Audio-visual speech recognition with automatic labels. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5. IEEE, 2023. 1 +[13] George Sterpu, Christian Saam, and Naomi Harte. Attention-based audio-visual fusion for robust automatic speech recognition. In Proceedings of the 20th ACM International conference on Multimodal Interaction, pages 111–115, 2018. 1 +[14] George Sterpu, Christian Saam, and Naomi Harte. How to teach DNNs to pay attention to the visual modality in speech recognition. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 28:1052-1064, 2020. +[15] Yuchen Hu, Ruizhe Li, Chen Chen, Heqing Zou, Qiushi Zhu, and Eng Siong Chng. Cross-Modal Global Interaction and Local Alignment for Audio-Visual Speech Recognition. arXiv preprint arXiv:2305.09212, 2023. 1 +[16] Triantafyllos Afouras, Joon Son Chung, Andrew Senior, Oriol Vinyals, and Andrew Zisserman. Deep audio-visual speech recognition. IEEE transactions on pattern analysis and machine intelligence, 44(12):8717-8727, 2018. 1 +[17] Joanna Hong, Minsu Kim, Jeongsoo Choi, and Yong Man Ro. Watch or Listen: Robust Audio-Visual Speech Recognition with Visual Corruption Modeling and Reliability Scoring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18783–18794, 2023. 1, 8 +[18] Yusheng Dai, Hang Chen, Jun Du, Xiaofei Ding, Ning Ding, Feijun Jiang, and Chin-Hui Lee. Improving Audio-Visual Speech Recognition by Lip-Subword Correlation Based Visual Pre-training and Cross-Modal Fusion Encoder. In 2023 IEEE International Conference on Multimedia and Expo (ICME), pages 2627–2632. IEEE, 2023. 1, 6, 8, 3 +[19] Oscar Chang, Otavio de Pinho Forin Braga, Hank Liao, Dmitriy Dima Serdyuk, and Olivier Siohan. On robustness to missing video for audiovisual speech recognition. Transactions on Machine Learning Research (TMLR), 2022. 1, 6, 7, 8, 3, 4 +[20] Takaki Makino, Hank Liao, Yannis Assael, Brendan Shillingford, Basilio Garcia, Otavio Braga, and Olivier Siohan. Recurrent neural network transducer for audio-visual speech recognition. In 2019 IEEE automatic speech recognition and understanding workshop (ASRU), pages 905–912. IEEE, 2019. 1, 7, 4 +[21] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. Learning audio-visual speech representation by masked multimodal cluster prediction, 2022. 4, 6, 7, 8 +[22] Devamanyu Hazarika, Yingting Li, Bo Cheng, Shuai Zhao, Roger Zimmermann, and Soujanya Poria. Analyzing modality robustness in multimodal sentiment analysis, 2022. 4, 8 +[23] Shiliang Zhang, Ming Lei, Bin Ma, and Lei Xie. Robust audio-visual speech recognition using bimodal DFSMN with multi-condition training and dropout regularization. In ICASSP 2019-2019 IEEE international conference on acoustics, speech and signal processing (ICASSP), pages 6570-6574. IEEE, 2019. 1, 4, 8 +[24] Hang Chen, Hengshun Zhou, Jun Du, Chin-Hui Lee, Jingdong Chen, Shinji Watanabe, Sabato Marco Siniscalchi, + +Odette Scharenborg, Di-Yuan Liu, Bao-Cai Yin, Jia Pan, Jian-Qing Gao, and Cong Liu. The First Multimodal Information Based Speech Processing (Misp) Challenge: Data, Tasks, Baselines And Results. In ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 9266-9270, 2022. 1, 6 +[25] Jun Du, Chin-Hui Lee, Jingdong Chen, Shinji Watanabe, Sabato Marco Siniscalchi, and Odette Scharenborg. Multimodal Information Based Speech Processing (MISP) Challenge 2022. https://mispchallenge.github.io/mispchallenge2022/, 2022. Accessed: 2023-06-26.1 +[26] Jinming Zhao, Ruichen Li, and Qin Jin. Missing modality imagination network for emotion recognition with uncertain missing modalities. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 2608-2618, 2021. 2, 4 +[27] Mengmeng Ma, Jian Ren, Long Zhao, Davide Testuggine, and Xi Peng. Are multimodal transformers robust to missing modality? In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18177-18186, 2022. 2, 4 +[28] Zihui Xue, Zhengqi Gao, Sucheng Ren, and Hang Zhao. The modality focusing hypothesis: Towards understanding cross-modal knowledge distillation, 2022. 3, 1 +[29] Hang Chen, Jun Du, Yu Hu, Li-Rong Dai, Bao-Cai Yin, and Chin-Hui Lee. Correlating subword articulation with lip shapes for embedding aware audio-visual speech enhancement. Neural Networks, 143:171–182, 2021. 4 +[30] Pan Zhou, Wenwen Yang, Wei Chen, Yanfeng Wang, and Jia Jia. Modality attention for end-to-end audio-visual speech recognition. In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6565-6569. IEEE, 2019. 4 +[31] Xianing Chen, Qiong Cao, Yujie Zhong, Jing Zhang, Shenghua Gao, and Dacheng Tao. Dearkd: data-efficient early knowledge distillation for vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12052-12062, 2022. 4 +[32] Francisco Rivera Valverde, Juana Valeria Hurtado, and Abhinav Valada. There is more than meets the eye: Self-supervised multi-object detection and tracking with sound by distilling multimodal knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11612-11621, 2021. +[33] Zihui Xue, Sucheng Ren, Zhengqi Gao, and Hang Zhao. Multimodal knowledge expansion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 854-863, 2021. +[34] Baoyun Peng, Xiao Jin, Jiaheng Liu, Dongsheng Li, Yichao Wu, Yu Liu, Shunfeng Zhou, and Zhaoning Zhang. Correlation congruence for knowledge distillation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5007-5016, 2019. 4 +[35] Renrui Zhang, Rongyao Fang, Wei Zhang, Peng Gao, Kunchang Li, Jifeng Dai, Yu Qiao, and Hongsheng Li. + +Tip-adapter: Training-free clip-adapter for better vision-language modeling. arXiv preprint arXiv:2111.03930, 2021.6 +[36] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. +[37] Sylvestre-Alvise Rebuffi, Hakan Bilen, and Andrea Vedaldi. Learning multiple visual domains with residual adapters. Advances in neural information processing systems, 30, 2017. +[38] Peng Gao, Shijie Geng, Renrui Zhang, Teli Ma, Rongyao Fang, Yongfeng Zhang, Hongsheng Li, and Yu Qiao. Clip-adapter: Better vision-language models with feature adapters. International Journal of Computer Vision, pages 1–15, 2023. 6 +[39] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models, 2021. 6 +[40] Zhe Wang, Shilong Wu, Hang Chen, Mao-Kui He, Jun Du, Chin-Hui Lee, Jingdong Chen, Shinji Watanabe, Sabato Siniscalchi, Odette Scharenborg, et al. The multimodal information based speech processing (misp) 2022 challenge: Audio-visual diarization and recognition. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5. IEEE, 2023. 6 +[41] Jaesong Lee and Shinji Watanabe. Intermediate loss regularization for ctc-based speech recognition. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6224-6228. IEEE, 2021. 6, 8 +[42] Wei Wang, Xun Gong, Yifei Wu, Zhikai Zhou, Chenda Li, Wangyou Zhang, Bing Han, and Yanmin Qian. The sjtu system for multimodal information based speech processing challenge 2021. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 9261-9265. IEEE, 2022. 8 +[43] Gaopeng Xu, Song Yang, Wei Li, et al. Channel-Wise AV-Fusion Attention for Multi-Channel Audio-Visual Speech Recognition. In Proc. ICASSP 2022, pages 9251–9255. IEEE, 2022. 8 +[44] Sang Wang Gaopeng Xu, Xianliang Wang et al. The NIO system for audio-visual diarization and recognition in MISP challenge 2022. https://mispchallenge.github.io/mispchallenge2022/papers/task2/Track2_NIO.pdf, 2022.8 +[45] Tao Li, Haodong Zhou, Jie Wang, Qingyang Hong, and Lin Li. The XMU System for Audio-Visual Diarization and Recognition in MISP Challenge 2022. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-2. IEEE, 2023. 8 +[46] He Wang, Pengcheng Guo, Pan Zhou, and Lei Xie. Mlcaavsr: Multi-layer cross attention fusion based audio-visual speech recognition. arXiv preprint arXiv:2401.03424, 2024. 8 +[47] Shinji Watanabe, Michael Mandel, Jon Barker, Emmanuel Vincent, Ashish Arora, Xuankai Chang, Sanjeev Khudan- + +pur, Vimal Manohar, Daniel Povey, Desh Raj, et al. Chime-6 challenge: Tackling multispeaker speech recognition for unsegmented recordings. arXiv preprint arXiv:2004.09249, 2020.8 +[48] Pengcheng Guo, He Wang, Bingshen Mu, Ao Zhang, and Peikun Chen. The NPU-ASLP System for Audio-Visual Speech Recognition in MISP 2022 Challenge. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-2. IEEE, 2023. 8 +[49] Jonathan G Fiscus. A post-processing system to yield reduced word error rates: Recognizer output voting error reduction (ROVER). In Proc. asrU 1997, pages 347-354. IEEE, 1997. 8 +[50] Ming Cheng, Haoxu Wang, Ziteng Wang, Qiang Fu, and Ming Li. The whu-alibaba audio-visual speaker diarization system for the misp 2022 challenge. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-2. IEEE, 2023. 8 +[51] Qiuling Suo, Weida Zhong, Fenglong Ma, Ye Yuan, Jing Gao, and Aidong Zhang. Metric Learning on Healthcare Data with Incomplete Modalities. In IJCAI, volume 3534, page 3540, 2019. 8, 4 +[52] Lei Cai, Zhengyang Wang, Hongyang Gao, Dinggang Shen, and Shuiwang Ji. Deep adversarial learning for multimodality missing data completion. In Proceedings of the 24th ACM SIGKDD international conference on knowledge discovery & data mining, pages 1158-1166, 2018. 8, 4 +[53] Zilong Wang, Zhaohong Wan, and Xiaojun Wan. Transmodality: An end2end fusion method with transformer for multimodal sentiment analysis. In Proceedings of The Web Conference 2020, pages 2514-2520, 2020. 8, 4 +[54] Hai Pham, Paul Pu Liang, Thomas Manzini, Louis-Philippe Morency, and Barnabás Póczos. Found in translation: Learning robust joint representations by cyclic translations between modalities. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 6892–6899, 2019. 4 +[55] Jiale Li, Hang Dai, Hao Han, and Yong Ding. Mseg3d: Multi-modal 3d semantic segmentation for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21694-21704, 2023. 8 +[56] A Varga, HJM Steeneken, et al. Noisex-92: A database and an experiment to study the effect of additive noise on speech recognition systems. Speech Commun, 12(3):247–253, 1993. 1 +[57] Lukas Drude, Jahn Heymann, Christoph Boeddeker, and Reinhold Haeb-Umbach. Nara-wpe: A python package for weighted prediction error dereverberation in numpy and tensorflow for online and offline processing. In Speech Communication; 13th ITG-Symposium, pages 1-5. VDE, 2018. 1 +[58] Christoph Boeddecker, Jens Heitkaemper, Joerg Schmalenstroeer, et al. Front-end processing for the CHiME-5 dinner party scenario. In Proc. CHiME 2018, pages 35–40, 2018. 1, 3 + +[59] Desh Raj, Daniel Povey, and Sanjeev Khudanpur. GPU-accelerated guided source separation for meeting transcription, 2022. 3 +[60] Mengmeng Ma, Jian Ren, Long Zhao, Sergey Tulyakov, Cathy Wu, and Xi Peng. Smil: Multimodal learning with severely missing modality. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 2302-2310, 2021. 4 +[61] Itai Gat, Idan Schwartz, Alexander Schwing, and Tamir Hazan. Removing bias in multi-modal classifiers: Regularization by maximizing functional entropies. Advances in Neural Information Processing Systems, 33:3197-3208, 2020. 4 +[62] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2901–2910, 2017. 4 +[63] Yangyang Guo, Liqiang Nie, Harry Cheng, Zhiyong Cheng, Mohan Kankanhalli, and Alberto Del Bimbo. On modality bias recognition and reduction. ACM Transactions on Multimedia Computing, Communications and Applications, 19(3):1-22, 2023. 4 \ No newline at end of file diff --git a/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/images.zip b/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..4c93fcb484c9ad1f257d6d86e4021e1467ec2ccc --- /dev/null +++ b/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac057b402c5e03086333c31866502bd327c6d1e1010f7d9c15cb924dfc985419 +size 484341 diff --git a/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/layout.json b/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d5394b7a60700dfbd63b8cc9f25133a278d339c0 --- /dev/null +++ b/2024/A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition/layout.json @@ -0,0 +1,9124 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 65, + 102, + 531, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 102, + 531, + 140 + ], + "spans": [ + { + "bbox": [ + 65, + 102, + 531, + 140 + ], + "type": "text", + "content": "A Study of Dropout-Induced Modality Bias on Robustness to Missing Video Frames for Audio-Visual Speech Recognition" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 170, + 536, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 170, + 536, + 230 + ], + "spans": [ + { + "bbox": [ + 55, + 170, + 536, + 230 + ], + "type": "text", + "content": "Yusheng Dai†, Hang Chen†, Jun Du†*, Ruoyu Wang†, Shihao Chen†, Haotian Wang†, Chin-Hui Lee‡ \n† University of Science and Technology of China, Hefei, China \n‡ Georgia Institute of Technology, Atlanta, America \njundu@ustc.edu.cn" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 252, + 192, + 265 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 252, + 192, + 265 + ], + "spans": [ + { + "bbox": [ + 143, + 252, + 192, + 265 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 278, + 290, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 278, + 290, + 566 + ], + "spans": [ + { + "bbox": [ + 47, + 278, + 290, + 566 + ], + "type": "text", + "content": "Advanced Audio-Visual Speech Recognition (AVSR) systems have been observed to be sensitive to missing video frames, performing even worse than single-modality models. While applying the common dropout techniques to the video modality enhances robustness to missing frames, it simultaneously results in a performance loss when dealing with complete data input. In this study, we delve into this contrasting phenomenon through the lens of modality bias and uncover that an excessive modality bias towards the audio modality induced by dropout constitutes the fundamental cause. Next, we present the Modality Bias Hypothesis (MBH) to systematically describe the relationship between the modality bias and the robustness against missing modality in multimodal systems. Building on these findings, we propose a novel Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) framework to reduce over-reliance on the audio modality, maintaining performance and robustness simultaneously. Finally, to address an entirely missing modality, we adopt adapters to dynamically switch decision strategies. The effectiveness of our proposed approach is evaluated through comprehensive experiments on the MISP2021 and MISP2022 datasets. Our code is available at https://github.com/dalision/ModalBiasAVSR." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 579, + 128, + 593 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 579, + 128, + 593 + ], + "spans": [ + { + "bbox": [ + 47, + 579, + 128, + 593 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 600, + 287, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 600, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 46, + 600, + 287, + 685 + ], + "type": "text", + "content": "Audio-Visual Speech Recognition (AVSR) is a multimodal application inspired by human speech perception. It outperforms single-modality models by incorporating noise-invariant complementary information from visual cues, especially in noisy environments. Driven by increasingly large open-source datasets and models [1-4], AVSR has achieved significant advancements across various bench" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 254, + 491, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 254, + 491, + 266 + ], + "spans": [ + { + "bbox": [ + 306, + 254, + 491, + 266 + ], + "type": "text", + "content": "marks with a simple end-to-end design [5, 6]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 267, + 546, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 267, + 546, + 458 + ], + "spans": [ + { + "bbox": [ + 304, + 267, + 546, + 458 + ], + "type": "text", + "content": "Recent research on AVSR focuses on more challenging real-life scenarios. Techniques such as reinforcement learning [7] and carefully designed fusion architecture [8-10] are used to accommodate varying noise levels and overlapping speech. Self-supervised learning [11] and automatic labeling techniques [12] are applied facing insufficient audiovisual pairs. Meanwhile, various synchronization modules have been developed for audio-visual alignment.[13-15]. However, restricted to the open-source datasets [1, 2, 16], most studies often assume that each video is recorded in relatively high quality, without blurring, corruption, or loss. Moreover, there is growing evidence to suggest that current advanced AVSR systems are highly susceptible to perturbations in video modality [17, 18], resulting in significant performance degradation even perform worse than single-modality models [19, 20]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 460, + 547, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 547, + 664 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 547, + 664 + ], + "type": "text", + "content": "Missing video modality is a crucial and common problem for AVSR applied in real-life scenarios [1, 17, 19, 20]. It arises from various causes, including losses induced by network latency or hardware limitations, as well as errors in lip movement tracking due to occlusion and side-face. Most researchers utilize dropout techniques on video training data to improve robustness against missing modalities [19-23]. It has been demonstrated to effectively mitigate the out-of-distribution (OOD) issue and alleviate performance degradation without additional inference consumption or complex modules. However, it leads to new challenges on real-life scenarios with low-quality input. In our early experiments on MISP datasets [24, 25], a contradictory phenomenon could be observed in Figure 1: while applying the dropout strategy to video training data enhance the robustness against missing video modality, it also leads to performance degradation when dealing with complete data input." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 306, + 673, + 547, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 673, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 673, + 547, + 713 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 306, + 673, + 547, + 713 + ], + "type": "text", + "content": "Distinguished from the classic dropout that randomly deactivates nodes during neural network training, dropout in this paper specifically refers to a data augmentation technique that partially or entirely replaces original video frames with padding frames." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 693, + 287, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 693, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 693, + 287, + 712 + ], + "type": "text", + "content": "*Corresponding author. This work was supported by the National Natural Science Foundation of China under Grant No. 62171427." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "27445" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": "On the other hand, all AVSR systems consistently lag behind unimodal ASR when facing completely missing video." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 98, + 286, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 98, + 286, + 277 + ], + "spans": [ + { + "bbox": [ + 46, + 98, + 286, + 277 + ], + "type": "text", + "content": "We attempt to analyze the reasons behind the above-mentioned phenomenon from the perspective of modality bias. Existing multimodal applications can be categorized into two types: (1) modality-balanced systems, in which each modality contributes relatively equally to the model decision, such as Multimodal Emotion Recognition (MER) [26] and Hate Speech Detection (HSD) [27]; (2) modality-biased systems that over-relies on certain modality that contains more task-related information. AVSR is a typical modality-biased system dominated by audio. Therefore, an intuitive insight suggests that although dropout on the video modality could address the OOD problem between the training and inference stages, it may exacerbate the modality bias on audio, subsequently demonstrating robustness towards missing video input." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 279, + 286, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 279, + 286, + 613 + ], + "spans": [ + { + "bbox": [ + 46, + 279, + 286, + 613 + ], + "type": "text", + "content": "In this paper, we first verify this intuitive hypothesis in Section 2 by quantitatively analyzing the differences between AVSR and unimodal automatic speech recognition (ASR). The results uncover that the modality bias essentially represents a shift from a multimodal to a unimodal distribution on audio modality in latent representation space. Next in Section 3, we extend our findings to more general multimodal applications and propose the Modality Bias Hypothesis (MBH) to systematically describe the relationship between modality bias and robustness to missing modality. In Sections 4 and 5, we are committed to achieving two objectives: improving the robustness of AVSR without degradation with complete input, and ensuring that AVSR consistently outperforms ASR when faced with severe or complete video missing. To this end, we present Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD), in which the robust student model leverages hidden knowledge extracted by a relatively unbiased teacher model to prevent the distribution of task-relevant representations from transferring into a unimodal distribution. The method is observed to enhance missing robustness through the learning of complementary information from the other modality and utilizing context information from adjacent frames. For video severely or entirely missing situations, adapters are adopted to the modality-specific branch to dynamically switch decision bias dominated by modality-specific representations. The key contributions can be summarized as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 616, + 286, + 714 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 46, + 616, + 286, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 616, + 286, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 616, + 286, + 664 + ], + "type": "text", + "content": "- We investigate dropout-induced modality bias and uncover that it fundamentally manifests as a shift from a multimodal to a unimodal distribution of audio modality in the hidden representation subspace as detailed in Section 2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 665, + 286, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 286, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 286, + 714 + ], + "type": "text", + "content": "- We propose using the Modality Bias Hypothesis (MBH) to systematically describe the decision-making process influenced by modal bias in a multimodal system, as well as the relationship between modal bias and modality" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 314, + 72, + 536, + 188 + ], + "blocks": [ + { + "bbox": [ + 314, + 72, + 536, + 188 + ], + "lines": [ + { + "bbox": [ + 314, + 72, + 536, + 188 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 536, + 188 + ], + "type": "image", + "image_path": "6a0267eae5b2c781ec1afd17d26ab22fc95c8dbb2c438d4226566d8f42616102.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 193, + 545, + 259 + ], + "lines": [ + { + "bbox": [ + 304, + 193, + 545, + 259 + ], + "spans": [ + { + "bbox": [ + 304, + 193, + 545, + 259 + ], + "type": "text", + "content": "Figure 1. CER (in %) degradation curves of AVSR trained with different dropout rates on video frames. Compared with the baseline AVSR without dropout (in red), other AVSR systems perform better with missing input but worse with complete data input. As the training dropout rate increases, the CER curve of AVSR gradually converges to that of ASR (dotted line)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 309, + 272, + 539, + 357 + ], + "blocks": [ + { + "bbox": [ + 309, + 272, + 539, + 357 + ], + "lines": [ + { + "bbox": [ + 309, + 272, + 539, + 357 + ], + "spans": [ + { + "bbox": [ + 309, + 272, + 539, + 357 + ], + "type": "image", + "image_path": "10d69b3367aba09ae0b39b2e4ecfa79114bfddc6aecf2be02a65bf1238c9ed9d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 362, + 545, + 418 + ], + "lines": [ + { + "bbox": [ + 304, + 362, + 545, + 418 + ], + "spans": [ + { + "bbox": [ + 304, + 362, + 545, + 418 + ], + "type": "text", + "content": "Figure 2. Two groups of similarity analysis between ASR and AVSR transcriptions. In both groups, an increase in the similarity of recognition transcriptions is observed as the training dropout rate increases. The similarity is measured by relative CER (in " + }, + { + "bbox": [ + 304, + 362, + 545, + 418 + ], + "type": "inline_equation", + "content": "\\%" + }, + { + "bbox": [ + 304, + 362, + 545, + 418 + ], + "type": "text", + "content": " ), where the ASR transcription replaces the ground truth." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 307, + 430, + 544, + 526 + ], + "blocks": [ + { + "bbox": [ + 307, + 430, + 544, + 526 + ], + "lines": [ + { + "bbox": [ + 307, + 430, + 544, + 526 + ], + "spans": [ + { + "bbox": [ + 307, + 430, + 544, + 526 + ], + "type": "image", + "image_path": "545f39a42791b5bb0533696dab1c6818d4580dc726561b65356f3f6773430bdf.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 530, + 545, + 586 + ], + "lines": [ + { + "bbox": [ + 304, + 530, + 545, + 586 + ], + "spans": [ + { + "bbox": [ + 304, + 530, + 545, + 586 + ], + "type": "text", + "content": "Figure 3. Similarity matrices of intermediate representations between ASR and different AVSR settings. As training dropout rates increase, the diagonal lines become brighter, indicating closer proximity between the multimodal and the unimodal distributions of the latent decisive subspace in AVSR." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 592, + 484, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 592, + 484, + 603 + ], + "spans": [ + { + "bbox": [ + 306, + 592, + 484, + 603 + ], + "type": "text", + "content": "missing robustness as detailed in Section 3." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 304, + 605, + 545, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 676 + ], + "type": "text", + "content": "- We propose Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) to enhance robustness against missing video and avoid performance degradation with complete input. For entirely missing modalities, adapters are adopted to dynamically switch decision bias to the specific modality as detailed in Section 5." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "- We achieve top AVSR performances on MISP2021 and MISP2022 datasets while maintaining robustness against missing video frames as detailed in Section 7." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27446" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 71, + 521, + 228 + ], + "blocks": [ + { + "bbox": [ + 72, + 71, + 521, + 228 + ], + "lines": [ + { + "bbox": [ + 72, + 71, + 521, + 228 + ], + "spans": [ + { + "bbox": [ + 72, + 71, + 521, + 228 + ], + "type": "image", + "image_path": "5c5202081d9db7ac9cea6d81d4169310a37b96165cb383e093b282056bce0291.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "lines": [ + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "spans": [ + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "text", + "content": "Figure 4. An illustration of the Modality Bias Hypothesis (MBH). In the left subplot, the task-relevant component (shaded part) of the latent representations consists of " + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "inline_equation", + "content": "Z^{sa}" + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "inline_equation", + "content": "Z^{sv}" + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "inline_equation", + "content": "Z^g" + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "text", + "content": ", representing audio-specific, visual-specific decision features and modality-general decisive features respectively. The corresponding proportions are denoted by " + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 232, + 547, + 277 + ], + "type": "text", + "content": ". The right subplot shows a dynamic process of decisive bias with an increasing training dropout rate. Dropout leads to a consistent modality bias on audio, regardless of the extent of the missing." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 282, + 228, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 282, + 228, + 295 + ], + "spans": [ + { + "bbox": [ + 47, + 282, + 228, + 295 + ], + "type": "text", + "content": "2. Dropout-Induced Modality Bias" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 304, + 290, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 304, + 290, + 519 + ], + "spans": [ + { + "bbox": [ + 46, + 304, + 290, + 519 + ], + "type": "text", + "content": "We investigate the contradictory phenomenon 1 by examining the character error rate (CER) across five Mandarin AVSR systems varying training dropout rates (from 0.0 to 0.7) and testing video missing rates (from 0.0 to 1.0). As shown in Figure 1, two trends are observed: (1) in terms of absolute CER, the model trained with a higher dropout rate deteriorate more on no-missing complete multimodal data and slightly missing video frames, but it performs better on severely and entirely missing video frames; and (2) in term relative performance, the CER degradation curve of the AVSR model trained with a higher dropout rate tends to converge to the unimodal ASR recognition curve. We further ensure whether the similarity of performance degradation curves directly corresponds to the recognition transcription similarity of ASR and AVSR in Figure 2. As we expected, an increase in training dropout rate leads to higher transcription similarity between AVSR and ASR across different test settings." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": "To understand this, we investigate the discrepancy in decisive patterns of ASR and each AVSR. We aim to quantify the divergence between latent decision distributions of these models by measuring the distance of intermediate representation samples. Through random sampling of complete audio-visual data batches, we generate intermediate layer representations using the encoder of ASR or AVSR trained at different dropout rates. Figure 3 illustrates cosine distance-based similarity matrices for the intermediate representations between ASR and different AVSR configurations. The diagonal elements in each subplot represent the similarity between intermediate representations from the same inputs. Notably, with an increase training dropout rate, these diagonal lines brighten, signifying a rise in intermediate representation similarity. This suggests closer proximity of the AVSR multimodal distribution in the la" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 283, + 545, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 283, + 545, + 295 + ], + "spans": [ + { + "bbox": [ + 305, + 283, + 545, + 295 + ], + "type": "text", + "content": "tent decisive subspace to the unimodal distribution of ASR." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 295, + 546, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 295, + 546, + 559 + ], + "spans": [ + { + "bbox": [ + 304, + 295, + 546, + 559 + ], + "type": "text", + "content": "Through the aforementioned three experiments, we have discovered that increasing the training dropout rate on video data leads to increased similarity between AVSR and ASR in the performance degradation curves, recognition results, and intermediate representation subspace distribution. The findings reveal the significant impact of dropout in introducing effectively perturbs the distribution of multimodal training data. It leads to a shift from multimodal joint distribution to unimodal distribution, resulting in a decision bias towards audio during the decision-making process, as reflected in the output similarity of ASR. We refer to this phenomenon induced by dropout as dropout-induced modality bias. Although dropout-induced bias enhances the robustness of missing video data to some extent, we emphasize that it contradicts the primary design of AVSR as a robust application in noisy environments with supplementary visual cues. The introduction of artificial noise (padding frames) in video data induces the model to converge toward trivial solutions, leading to an excessive dependence on the audio modality. This over-reliance, in turn, leads to a degradation in performance when presented with complete multimodal input in a noisy environment." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 569, + 493, + 583 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 569, + 493, + 583 + ], + "spans": [ + { + "bbox": [ + 305, + 569, + 493, + 583 + ], + "type": "text", + "content": "3. Modality Bias Hypothesis (MBH)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 589, + 547, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 589, + 547, + 637 + ], + "spans": [ + { + "bbox": [ + 304, + 589, + 547, + 637 + ], + "type": "text", + "content": "In this section, we propose the Modality Bias Hypothesis (MBH) based on the Modality Bias Venn diagram (MBVD) to systematically describe the relationship between modality bias and robustness to missing modality." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 641, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 547, + 713 + ], + "type": "text", + "content": "Modality Bias Venn Diagram As shown in Figure 4 on the left, the MBVD depicts the components of the latent decisive feature of multimodal systems in the form of a Venn Diagram. It is a variant of the Modality Venn Diagram (MVD) employed in multimodal knowledge distillation [28]. Without loss of generality, we take AVSR as an" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "27447" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": "example and define " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^a" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^v" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " as the original feature space of audio, video and label space, respectively. The decisive feature " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ", commonly a form of intermediate layer representation, consists of two modality components " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "z^a" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " (blue circles) and " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "z^v" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " (green circle). We denote " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "I(\\cdot)" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " as mutual information and " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "I(\\cdot|\\cdot)" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " as conditional mutual information. The task-relevant decisive feature " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "z^u" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "I(z,y)" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ") is depicted by the shaded region and can be further divided into three components. " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "z^g" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "I(z^a,z^v,y)" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ") represents modality-general decisive features, while " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "z^{sa}" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "I(z^u,z^a|z^g)" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ") and " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "z^{sv}" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "I(z^u,z^v|z^g)" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ") represent modality-specific decisive features. We denote their proportions in " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "z^u" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ", respectively. These features collectively contribute to determining the final task output " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "\\hat{y}" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ". For AVSR, a higher " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " represents a greater decision bias of the model on the audio modality, focusing more on speech than lip movements. A larger " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " indicates a model's inclination towards modality synergy by maximizing the mutual information between modalities for decision-making, as in some modality-balanced models [26, 27]. Furthermore, " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "z^u" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " is generated by the original features " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "x^a,x^v" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "g(x^a,x^v;\\phi)" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "g(\\phi)" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": " can be seen as a neural network-based transfer such as an encoder with parameters " + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 47, + 72, + 289, + 384 + ], + "type": "text", + "content": ". Therefore, the decision process of the multimodal system can be decomposed into two steps, following the Bayesian process: the MBVD hidden decisive feature generation step and the decision step:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 78, + 391, + 287, + 405 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 391, + 287, + 405 + ], + "spans": [ + { + "bbox": [ + 78, + 391, + 287, + 405 + ], + "type": "interline_equation", + "content": "P \\left(y \\mid x ^ {a}, x ^ {v}\\right) = P \\left(y \\mid z ^ {\\mu}\\right) P \\left(z ^ {\\mu} \\mid x ^ {a}, x ^ {v}\\right) \\tag {1}", + "image_path": "6dc1f75d25bca313ef301e151fad616446543005ab35a9f6b737b3c8acbae9a0.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "spans": [ + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": "Modality Bias Hypothesis Based on MBVD, we give a systematic description of the relationship between modality bias and robustness to missing modality in the view of MBH. As shown in Figure 4 on the right, by applying dropout with different rates " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "k_{i} \\in [0,1]" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": " on video training data, the original video feature space " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^v" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": " can be split into a series of subsets " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{X}_{k_1}^v, \\mathcal{X}_{k_2}^v, \\dots, \\mathcal{X}_{k_n}^v\\}" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": ". The samples from space " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^a \\times \\mathcal{X}_{k_i}^v" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": " are denoted as dyads " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "(x^a, x_{k_i}^v)" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": ". Compared to the model trained on complete multimodal datas " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "(x^a, x_{0.0}^v)" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": ", the model trained on data pairs " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "(x^a, x_\\theta^v)" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": " with a video dropout rate " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "\\theta_{train} \\in (0.0,1.0)" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": " exhibits a greater decision bias on audio modality with larger " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": ", smaller " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": ". As " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": " approaches 1.0, the task-relevant decisive feature " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "z_u" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": " becomes steadily dominated by the audio-specific decisive feature " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "z_a" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": ", resulting in a transformation from a bimodal distribution in the latent representation subspace to a unimodal one. The decision pattern of the multimodal model shifts from " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "p(y|z_u)" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "inline_equation", + "content": "p(y|z_a)" + }, + { + "bbox": [ + 47, + 413, + 287, + 629 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "type": "text", + "content": "During the inference stage, these multimodal models display different modality biases. For the model trained on complete multimodal data or dropout on audio with a larger " + }, + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "type": "text", + "content": ", they tend to search general information shared among modalities. This hypothesis effectively explains the observed experimental phenomena in previous studies. For modality-biased models, such as Multimodal Senti" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 547, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 216 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 216 + ], + "type": "text", + "content": "ment Analysis (MSA) [22] dominated by text, Multimodal Speech Enhancement (MSE) [29] dominated by audio, as well as AVSR dominated by audio [21, 23, 30], it has been observed that applying dropout on the primary modality helps alleviate modality bias and brings about slight improvements when dealing with complete input. On the other hand, the AVSR model with larger " + }, + { + "bbox": [ + 304, + 72, + 547, + 216 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 72, + 547, + 216 + ], + "type": "text", + "content": " and smaller " + }, + { + "bbox": [ + 304, + 72, + 547, + 216 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 72, + 547, + 216 + ], + "type": "text", + "content": " values tends to focus more on speech and neglect complementary information from lip movements. When dealing with partially or completely missing video data, the model with larger " + }, + { + "bbox": [ + 304, + 72, + 547, + 216 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 72, + 547, + 216 + ], + "type": "text", + "content": " shows its robustness, which aligns well with the aforementioned experimental observations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 224, + 547, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 224, + 547, + 253 + ], + "spans": [ + { + "bbox": [ + 305, + 224, + 547, + 253 + ], + "type": "text", + "content": "4. Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 258, + 547, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 258, + 547, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 258, + 547, + 437 + ], + "type": "text", + "content": "For the robustness training of modality-bias systems, it is crucial to avoid dropout-induced modality bias on the primary modality. Dropout indeed alleviates the OOD problem to some extent but encourages multimodal models to pursue trivial solutions at the same time. Ideal robust multimodal models are expected to achieve two goals: (1) learn to extract mutual information across modalities rather than relying on a certain modality when facing complete paired input, and (2) learn to complement information from the other modality and utilize context information from adjacent frames. To prevent excessive modality bias caused by dropout, we propose a novel Multimodal Distribution Approximation with Knowledge Distillation (MDA-KD) framework to constrain the distribution of the multimodal feature space during the robustness training phase." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 438, + 547, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 438, + 547, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 438, + 547, + 665 + ], + "type": "text", + "content": "Unlike traditional knowledge distillation methods, firstly, the teacher model is trained on the complete multimodal data pairs, while the student model is trained on missing video data. The teacher model is relatively unbiased with a higher proportion of modality-general decisive features " + }, + { + "bbox": [ + 304, + 438, + 547, + 665 + ], + "type": "inline_equation", + "content": "z^g" + }, + { + "bbox": [ + 304, + 438, + 547, + 665 + ], + "type": "text", + "content": " in the MBVD space. During the training process of the student model, the teacher model serves as an anchor point, preventing the student model from shifting towards a unimodal distribution on the audio modality. Note that the difference between teacher and student models in our method is modality bias varies, rather than size, architecture as in common KD methods [31-34]. Additionally, distillation occurs at the hidden layer rather than the logistic outputs, aiming to minimize the distances between decision distribution samples of the teacher and student models and further constrain the intermediate representation subspace distribution of the student model. In practice, we take the knowledge from the intermediate representation of the cross-modal encoder layers." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": "Here, we adopt the symbol definitions from Section 3 and provide a formal description of MDA-KD. For a naturally modal-biased multimodal system, the data samples from original feature space " + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^a\\times \\mathcal{X}_{k_i}^v\\times \\mathcal{Y}" + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": " can be de" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "27448" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 74, + 102, + 87 + ], + "blocks": [ + { + "bbox": [ + 72, + 74, + 102, + 87 + ], + "lines": [ + { + "bbox": [ + 72, + 74, + 102, + 87 + ], + "spans": [ + { + "bbox": [ + 72, + 74, + 102, + 87 + ], + "type": "image", + "image_path": "2347c63917e0f5e204c25d112e973d1dbe14d5be0e8c0cc6666d68101e21c453.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 77, + 87, + 102, + 95 + ], + "blocks": [ + { + "bbox": [ + 77, + 87, + 102, + 95 + ], + "lines": [ + { + "bbox": [ + 77, + 87, + 102, + 95 + ], + "spans": [ + { + "bbox": [ + 77, + 87, + 102, + 95 + ], + "type": "image", + "image_path": "2a6b94d043398449cf5959309e9f5beba5e85c745f34a5952de8bc2ab581c9e9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 86, + 97, + 94, + 105 + ], + "lines": [ + { + "bbox": [ + 86, + 97, + 94, + 105 + ], + "spans": [ + { + "bbox": [ + 86, + 97, + 94, + 105 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 77, + 155, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 77, + 155, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 77, + 155, + 84 + ], + "type": "text", + "content": "Module with adapter" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 87, + 161, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 87, + 161, + 94 + ], + "spans": [ + { + "bbox": [ + 107, + 87, + 161, + 94 + ], + "type": "text", + "content": "Cross-attention module" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 97, + 165, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 97, + 165, + 103 + ], + "spans": [ + { + "bbox": [ + 107, + 97, + 165, + 103 + ], + "type": "text", + "content": "Element-wise summation" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 78, + 106, + 100, + 113 + ], + "blocks": [ + { + "bbox": [ + 78, + 106, + 100, + 113 + ], + "lines": [ + { + "bbox": [ + 78, + 106, + 100, + 113 + ], + "spans": [ + { + "bbox": [ + 78, + 106, + 100, + 113 + ], + "type": "image", + "image_path": "46eff9de648e3d26883a52fc7df7cc6cc55883095d0a243925718fe9ae73fb23.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 106, + 187, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 187, + 114 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 187, + 114 + ], + "type": "text", + "content": "Data flow when activating adapters" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 78, + 133, + 220, + 258 + ], + "blocks": [ + { + "bbox": [ + 78, + 125, + 140, + 142 + ], + "lines": [ + { + "bbox": [ + 78, + 125, + 140, + 142 + ], + "spans": [ + { + "bbox": [ + 78, + 125, + 140, + 142 + ], + "type": "text", + "content": "Teacher AVSR Model (Complete data input)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 78, + 133, + 220, + 258 + ], + "lines": [ + { + "bbox": [ + 78, + 133, + 220, + 258 + ], + "spans": [ + { + "bbox": [ + 78, + 133, + 220, + 258 + ], + "type": "image", + "image_path": "f4dd81b5bbe68a5613e660802a40ddd40a1d85187e579743fd61f0ebdd1f90b0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 232, + 85, + 517, + 217 + ], + "blocks": [ + { + "bbox": [ + 270, + 76, + 443, + 84 + ], + "lines": [ + { + "bbox": [ + 270, + 76, + 443, + 84 + ], + "spans": [ + { + "bbox": [ + 270, + 76, + 443, + 84 + ], + "type": "text", + "content": "Audiovisual Speech Recognition Network Architecture" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 232, + 85, + 517, + 217 + ], + "lines": [ + { + "bbox": [ + 232, + 85, + 517, + 217 + ], + "spans": [ + { + "bbox": [ + 232, + 85, + 517, + 217 + ], + "type": "image", + "image_path": "5787d642549b4f3a2ef5c4d4e97105f287a027a219b92484233511a129808ad6.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 235, + 230, + 359, + 263 + ], + "blocks": [ + { + "bbox": [ + 268, + 221, + 320, + 228 + ], + "lines": [ + { + "bbox": [ + 268, + 221, + 320, + 228 + ], + "spans": [ + { + "bbox": [ + 268, + 221, + 320, + 228 + ], + "type": "text", + "content": "Lip Video Frames" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 235, + 230, + 359, + 263 + ], + "lines": [ + { + "bbox": [ + 235, + 230, + 359, + 263 + ], + "spans": [ + { + "bbox": [ + 235, + 230, + 359, + 263 + ], + "type": "image", + "image_path": "1bb738a5a29126ea7c344cb6c593778f81c6c8b0dd7d625608b41c88a2a330b9.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 274, + 545, + 328 + ], + "lines": [ + { + "bbox": [ + 46, + 274, + 545, + 328 + ], + "spans": [ + { + "bbox": [ + 46, + 274, + 545, + 328 + ], + "type": "text", + "content": "Figure 5. Overall framework of the proposed AVSR system. We address challenging real-world scenarios involving missing video frames and noisy speech with an overlap rate exceeding " + }, + { + "bbox": [ + 46, + 274, + 545, + 328 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 46, + 274, + 545, + 328 + ], + "type": "text", + "content": " during both the training and testing stages. In MDA-KD, latent knowledge is sampled from the latent distribution of the teacher model with complete data input. This latent knowledge serves as an anchor point to prevent dropout-induced modality bias during the robustness training of the student network. For entirely missing video input, the MS-Adapter is activated to enable a dynamic decision switch." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 365, + 230, + 515, + 262 + ], + "blocks": [ + { + "bbox": [ + 383, + 221, + 449, + 228 + ], + "lines": [ + { + "bbox": [ + 383, + 221, + 449, + 228 + ], + "spans": [ + { + "bbox": [ + 383, + 221, + 449, + 228 + ], + "type": "text", + "content": "Noisy Audio Waveform" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 365, + 230, + 515, + 262 + ], + "lines": [ + { + "bbox": [ + 365, + 230, + 515, + 262 + ], + "spans": [ + { + "bbox": [ + 365, + 230, + 515, + 262 + ], + "type": "image", + "image_path": "810312020fab927186c92f5ac20058a5aad783bc615ad0562d567a0e6a688c7b.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "text", + "content": "noted as triples " + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "inline_equation", + "content": "(x^{a}, x_{k_{i}}^{v}, y)" + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "text", + "content": ". For simplicity, we denote " + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "inline_equation", + "content": "x_{0.0}^{v}" + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "inline_equation", + "content": "x^{v}" + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "text", + "content": ". The teacher model " + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "inline_equation", + "content": "T e(\\phi)" + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "text", + "content": " is first trained on complete multimodal data " + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "inline_equation", + "content": "(x^{a}, x^{v}, y)" + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "text", + "content": " model with parameters " + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "text", + "content": ", and the model's decision process can be formulated as " + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "inline_equation", + "content": "P_{t e}(y \\mid x^{a}, x^{v})" + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "text", + "content": " in a Bayesian decision problem. We assume that the teacher model is a neural network " + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "inline_equation", + "content": "g(\\phi)" + }, + { + "bbox": [ + 47, + 335, + 287, + 430 + ], + "type": "text", + "content": " and it is trained by minimizing the following loss function, a form of multitask learning." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 88, + 442, + 287, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 442, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 88, + 442, + 287, + 460 + ], + "type": "interline_equation", + "content": "T e (\\phi) = \\min _ {\\phi} \\mathcal {L} _ {\\mathrm {M L T}} (g (x ^ {a}, x ^ {v}; \\phi), y), \\tag {2}", + "image_path": "5e678c479072804407d42fd3b1355e2dfebdc5c692ddd2a78458f724c537b90b.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 59, + 462, + 287, + 491 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 462, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 59, + 462, + 287, + 491 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathrm {M L T}} \\left(x ^ {a}, x ^ {v}; \\phi\\right) = \\lambda \\log P _ {\\mathrm {C T C}} \\left(y \\mid x ^ {a}, x ^ {v}\\right) \\tag {3} \\\\ + (1 - \\lambda) \\log P _ {\\mathrm {A t t}} \\left(y _ {i} \\mid x ^ {a}, x ^ {v}\\right), \\\\ \\end{array}", + "image_path": "cca42a4b419e002660fcbef6fc58d1da614c827b5433a7080fa4b0209e263caf.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 46, + 494, + 287, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 494, + 287, + 637 + ], + "spans": [ + { + "bbox": [ + 46, + 494, + 287, + 637 + ], + "type": "text", + "content": "where the tunable parameter " + }, + { + "bbox": [ + 46, + 494, + 287, + 637 + ], + "type": "inline_equation", + "content": "\\lambda \\in [0,1]" + }, + { + "bbox": [ + 46, + 494, + 287, + 637 + ], + "type": "text", + "content": " is used to balance the sequence-level Connectionist Temporal Classification (CTC) loss and the frame-wise Cross Entropy (CE) loss, which serve as the standard end-to-end ASR training objectives. During the training of the student model, the dropout strategy is applied to the secondary modality " + }, + { + "bbox": [ + 46, + 494, + 287, + 637 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 46, + 494, + 287, + 637 + ], + "type": "text", + "content": ", while the teacher model is frozen with complete multimodal data as input. It is important to note that the student and teacher models have the same network architecture. From the perspective of MBVD, the whole decision process of the multimodal model can be divided into hidden feature generation step and decision step." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 57, + 654, + 287, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 654, + 287, + 669 + ], + "spans": [ + { + "bbox": [ + 57, + 654, + 287, + 669 + ], + "type": "interline_equation", + "content": "P _ {s t} \\left(y \\mid x ^ {a}, x _ {k _ {i}} ^ {v}\\right) = P _ {s t} \\left(y \\mid z ^ {\\mu}\\right) P _ {s t} \\left(z ^ {\\mu} \\mid x ^ {a}, x _ {k _ {i}} ^ {v}\\right), \\tag {4}", + "image_path": "4e6173f7d48f82cb2f4538ddbb5c84daa44184039be4b7b8fdef75d96e2c84f7.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 62, + 673, + 287, + 686 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 673, + 287, + 686 + ], + "spans": [ + { + "bbox": [ + 62, + 673, + 287, + 686 + ], + "type": "interline_equation", + "content": "P _ {t e} \\left(y \\mid x ^ {a}, x ^ {v}\\right) = P _ {t e} \\left(y \\mid z ^ {\\mu}\\right) P _ {t e} \\left(z ^ {\\mu} \\mid x ^ {a}, x ^ {v}\\right), \\tag {5}", + "image_path": "42141ee0fe04b56908f4192ddb91f4dac9c22675128ac351cc8c18b5a778626a.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "inline_equation", + "content": "z^{\\mu} \\in \\mathbb{R}^{d\\mu}" + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": " represents the combined representation of modality-specific decisive features " + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "inline_equation", + "content": "z^{sa} \\in \\mathbb{R}^{da}" + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "inline_equation", + "content": "z^{sv} \\in" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 335, + 545, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 335, + 545, + 370 + ], + "spans": [ + { + "bbox": [ + 305, + 335, + 545, + 370 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{dv}" + }, + { + "bbox": [ + 305, + 335, + 545, + 370 + ], + "type": "text", + "content": ", and modality-general decisive features " + }, + { + "bbox": [ + 305, + 335, + 545, + 370 + ], + "type": "inline_equation", + "content": "z^g \\in \\mathbb{R}^{dg}" + }, + { + "bbox": [ + 305, + 335, + 545, + 370 + ], + "type": "text", + "content": ". The tuple " + }, + { + "bbox": [ + 305, + 335, + 545, + 370 + ], + "type": "inline_equation", + "content": "(z^{sa}, z^{sv}, z^g)" + }, + { + "bbox": [ + 305, + 335, + 545, + 370 + ], + "type": "text", + "content": " represents a sample drawn from the MBVD hidden features space, denoted as " + }, + { + "bbox": [ + 305, + 335, + 545, + 370 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}^{sa} \\times \\mathcal{Z}^{sv} \\times \\mathcal{Z}^g" + }, + { + "bbox": [ + 305, + 335, + 545, + 370 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 371, + 545, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 371, + 545, + 454 + ], + "spans": [ + { + "bbox": [ + 304, + 371, + 545, + 454 + ], + "type": "text", + "content": "initialized on the parameter of the teacher model, we introduce an additional loss term to constrain the dynamic process of the student model's MBVD feature distribution in robust training. The distance between batch samples from the student and the teacher model is used to approximate the difference of distribution, which serves as a form of frame-level knowledge distillation." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 337, + 460, + 545, + 505 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 460, + 545, + 505 + ], + "spans": [ + { + "bbox": [ + 337, + 460, + 545, + 505 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathrm {K D}} \\left(x ^ {a}, x ^ {v}, x _ {k} ^ {v}; \\phi_ {t e}, \\phi_ {s t}\\right) = \\mathrm {K L} \\left(S _ {t e}, S _ {s t}\\right), \\\\ S _ {t e} = \\sigma_ {T} \\left(\\operatorname {S a m p l e} \\left(P _ {t e} \\left(z ^ {\\mu} \\mid x ^ {a}, x ^ {v}\\right)\\right)\\right), \\tag {6} \\\\ S _ {s t} = \\sigma_ {T} \\left(\\operatorname {S a m p l e} \\left(P _ {s t} \\left(z ^ {\\mu} \\mid x ^ {a}, x _ {k _ {i}} ^ {v}\\right)\\right)\\right), \\\\ \\end{array}", + "image_path": "c25d9904394ba5b4ad659b792cce65c742a3e737f2dd387f08b01a4e2446d855.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\sigma_T(x)" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": " denotes the SoftMax function with temperature " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": " and Sample represents the sample function. This distribution approximation serves two main purposes. Firstly, during training, when the student network encounters a missing modality feature " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "x_{k_i}^v" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": ", the convergence of the student's decisive feature " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "z^u = g(x^a,x_{k_i}^v;\\phi_{st})" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": " towards the teacher's decisive feature " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "z^{u} = g(x^{a},x^{v};\\phi_{te})" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": " encourages the utilization of contextual information from " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "x_{k_i}^v" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": ". Additionally, with the dual cross-attention design, the process complements the information extracted from " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "x^a" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": ", effectively addressing the condition of missing frames and promoting out-of-distribution generality. On the other hand, the KD loss is used to minimize the distance between the distributions of the teacher and student models, preventing the student model from converging to trivial solutions. Subsequently, we train the student model jointly with a weighted sum of the standard training loss and distillation loss:" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27449" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 58, + 95, + 286, + 122 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 95, + 286, + 122 + ], + "spans": [ + { + "bbox": [ + 58, + 95, + 286, + 122 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathrm {M L T}} \\left(x ^ {a}, x ^ {v}, x _ {k} ^ {v}; \\phi_ {t e}, \\phi_ {s t}\\right) = \\beta \\mathcal {L} _ {\\mathrm {K D}} \\left(x ^ {a}, x ^ {v}, x _ {k} ^ {v}; \\phi_ {t e}, \\phi_ {s t}\\right) \\\\ + (1 - \\beta) \\mathcal {L} _ {\\mathrm {M L T}} \\left(x ^ {a}, x _ {k} ^ {v}; \\phi_ {s t}\\right). \\tag {7} \\\\ \\end{array}", + "image_path": "3241ef630f45f27a64c4042ed046d87e320eaae0444cef0356687758bb97139f.jpg" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 132, + 272, + 145 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 132, + 272, + 145 + ], + "spans": [ + { + "bbox": [ + 47, + 132, + 272, + 145 + ], + "type": "text", + "content": "5. Modality-Specific Adapter (MS-Adapter)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "spans": [ + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "text", + "content": "As illustrated in Figure 4 on the right, when facing severely or entirely missing video data, we consider it unreliable to continue employing a synergistic decision-making strategy like MDA-KD with relatively high values of " + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "text", + "content": ". Padding frames lack sufficient contextual information and may introduce noise. Therefore, in such scenarios, a dynamic switch in decision strategy from " + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "inline_equation", + "content": "P(y|z^u)" + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "inline_equation", + "content": "P(y|z^a)" + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "text", + "content": " is necessary as a complement to MDA-KD. In view of the success of adapters applied in foundation model fine-tuning [35-38], we attempt to extend it to address the modality missing issue in multimodal models. For clarity, we refer to this extension as Modality-Specific Adapter (MS-Adapter). Specifically, LORA [39] is adopted to self-attention layers in the audio branch, marked with a dashed box in Figure 5. These adapters perform residual-style feature blending with the original pre-trained features. The residual weight could be represented as low-rank matrices " + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "inline_equation", + "content": "\\Delta W \\in \\mathbb{R}^{d \\times d}" + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "text", + "content": ", and it could be decomposed into a pair of fan-in and fan-out linear layers with weights " + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "inline_equation", + "content": "A \\in \\mathbb{R}^{r \\times d}" + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "inline_equation", + "content": "B \\in \\mathbb{R}^{d \\times r}" + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "inline_equation", + "content": "r \\ll d" + }, + { + "bbox": [ + 46, + 152, + 289, + 392 + ], + "type": "text", + "content": "). The reparametrization operation can be formulated below." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 399, + 287, + 412 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 399, + 287, + 412 + ], + "spans": [ + { + "bbox": [ + 83, + 399, + 287, + 412 + ], + "type": "interline_equation", + "content": "H _ {o} = H _ {i} \\left(W _ {0} + \\Delta W\\right) = H _ {i} \\left(W _ {0} + B A\\right) \\tag {8}", + "image_path": "45277e566df66199d60db9ae10046a9e76200f8d8277d45a117be9fc9d3a0c69.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 420, + 287, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 420, + 287, + 564 + ], + "spans": [ + { + "bbox": [ + 46, + 420, + 287, + 564 + ], + "type": "text", + "content": "By activating the MS-Adapter, we can dynamically switch the decision-making pattern by activating the adapters. We highlight two advantages of the MS-Adapter. First, a substantial amount of unpaired unimodal training data and data augmentation techniques could be used in the training process of the adapters. Second, the adapter training process provides an opportunity to modify the computation pathway. As illustrated in Figure 5 with dashed arrows, in both training and inference stage with audio-only input, the computation flow of the video branch will be directly cut off, and the modality fusion cross-attention module will be skipped to reduce computational costs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 574, + 167, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 574, + 167, + 588 + ], + "spans": [ + { + "bbox": [ + 47, + 574, + 167, + 588 + ], + "type": "text", + "content": "6. Experiment Settings" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 594, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 287, + 713 + ], + "type": "text", + "content": "Dataset We conduct our experiments on MISP2021 [24] and MISP2022 [40]. These two open-source datasets present a large-scale audio-visual corpus recorded in real-life home TV scenarios with multiple groups of speakers chatting simultaneously. Multiple microphone arrays and cameras are used to collect far/middle/near-field audio and far/middle-field video. Compared to the carefully recorded videos in LRS2 [1] and LRS3 [2] from BBC interviews and TED talks, MISP datasets offer static shooting perspectives with diverse resolutions, including naturally blurred" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": "and obstructed frames. The videos are accompanied by various background noises and high speech overlap rates (42% in training set and 49% in test set). Compared oracle segment-level AVSR task in MISP201, MISP2022 presents a more challenging task of session-level AVSR without oracle speaker diarization results. To avoid limitations associated with noise simulation, all experiments are evaluated exclusively on far-field data, which aligns well with common in-car, office meeting, or smart home scenarios." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "spans": [ + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "type": "text", + "content": "Implementation Detail We strictly adhere to the approaches outlined in [18] for model training and network architectures. We initialize the AVSR model with two pretrained unimodal models and fine-tune it in an end-to-end manner. As shown in Figure 5, the AVSR model is a dual-branch network where " + }, + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "type": "inline_equation", + "content": "N = 3" + }, + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "type": "inline_equation", + "content": "M = 9" + }, + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "type": "inline_equation", + "content": "K = 6" + }, + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "type": "text", + "content": ". For the loss function in Equation 3, we set " + }, + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "type": "text", + "content": " to 0.7 and CTC loss consists of the same weighted intermediate CTC [41] losses in 3, 6, 9, 12 layers. In Equation 4, we use 0.1 for " + }, + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 185, + 546, + 340 + ], + "type": "text", + "content": ". We follow [18] to establish two baselines A0 and AV0 trained on complete modality data with dropout techniques. AV0 is fine-tuned based on A0 and a pre-trained ResNet-18 encoder with a 3D-CNN head. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 346, + 546, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 346, + 546, + 574 + ], + "spans": [ + { + "bbox": [ + 304, + 346, + 546, + 574 + ], + "type": "text", + "content": "Dropout Settings Similar to [19], we evaluate the robustness to missing video modality with various dropout methods and rates: Segment Dropout, Utterance Dropout, and Interval Dropout. Testing involves dropout rates from 0.0 to 1.0 in 0.25 intervals. Results from the three dropout methods are averaged at each rate to obtain overall dropout results. When conducting ablation studies, segments with naturally missing video frames (17%) are excluded from the test set, ensuring a consistent and controlled video missing rate. In our method, during training, A certain proportion of sample is assigned a random dropout method from the above three methods and an extra one from [21] with an optimized dropout rate. In both training and testing stages, we pad the missing video frame pixels with zeros instead of using interpolation or repetition methods. We conduct a hyper-parameter search over the training dropout rate and found that 0.5 is optimal for our method (when " + }, + { + "bbox": [ + 304, + 346, + 546, + 574 + ], + "type": "inline_equation", + "content": "D_{p}rod" + }, + { + "bbox": [ + 304, + 346, + 546, + 574 + ], + "type": "text", + "content": " is 0.5). This rate implies that half of the video frames in a selected sample are padded with zeros. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 585, + 491, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 585, + 491, + 598 + ], + "spans": [ + { + "bbox": [ + 306, + 585, + 491, + 598 + ], + "type": "text", + "content": "7. Experiments and Result Analysis" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 604, + 534, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 604, + 534, + 617 + ], + "spans": [ + { + "bbox": [ + 306, + 604, + 534, + 617 + ], + "type": "text", + "content": "7.1. Overall Comparison of Experiment Settings" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 623, + 545, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 623, + 545, + 696 + ], + "spans": [ + { + "bbox": [ + 304, + 623, + 545, + 696 + ], + "type": "text", + "content": "In Table 1, we conduct key parameter analysis and abolation study of the proposed methods on the MISP2022 dataset with oracle speaker diarization results. We first explore the impact of dropout probability in training videos. In contrast to AV1, AV2 introduces half of the complete data pairs. As a result, it mitigates dropout-induced modality" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 315, + 702, + 450, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 702, + 450, + 713 + ], + "spans": [ + { + "bbox": [ + 315, + 702, + 450, + 713 + ], + "type": "text", + "content": "3 More details can be found in Appendix." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27450" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 65, + 71, + 529, + 198 + ], + "blocks": [ + { + "bbox": [ + 65, + 71, + 529, + 198 + ], + "lines": [ + { + "bbox": [ + 65, + 71, + 529, + 198 + ], + "spans": [ + { + "bbox": [ + 65, + 71, + 529, + 198 + ], + "type": "table", + "html": "
ModelTraining settingsTest dropout rate
DropoutDprobInit.MDA-KDMS-Adapter0.000.250.500.751.00
A0X0.0RandomXX25.1325.1325.1325.1325.13
AV0X0.0A0XX21.1423.7725.5725.8726.65
AV11.0A0XX23.2623.6824.2724.9525.91
AV20.5A0XX21.7222.5623.3724.4625.64
AV30.5AV0XX21.5322.4723.6524.5525.90
AV40.5AV0X21.3822.1823.2024.4025.70
AV50.5AV0X21.1121.7722.7824.0225.45
AV60.5AV021.1121.7722.7824.0224.94
", + "image_path": "9bdfa766bc6e6748e4f369844e88dee81d23a798fe6169eb019a78d35befc6fe.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 48, + 229, + 293, + 334 + ], + "blocks": [ + { + "bbox": [ + 46, + 202, + 545, + 224 + ], + "lines": [ + { + "bbox": [ + 46, + 202, + 545, + 224 + ], + "spans": [ + { + "bbox": [ + 46, + 202, + 545, + 224 + ], + "type": "text", + "content": "Table 1. An overall comparison in CER (%) of different system configurations. Different from the dropout rate, " + }, + { + "bbox": [ + 46, + 202, + 545, + 224 + ], + "type": "inline_equation", + "content": "D_{prob}" + }, + { + "bbox": [ + 46, + 202, + 545, + 224 + ], + "type": "text", + "content": " represents the proportion of data with missing frames in the training set. Init. refers to the network initialization method." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 229, + 293, + 334 + ], + "lines": [ + { + "bbox": [ + 48, + 229, + 293, + 334 + ], + "spans": [ + { + "bbox": [ + 48, + 229, + 293, + 334 + ], + "type": "table", + "html": "
Insert partRankDAParams(MB)CER(%)
Encoder32×4.5025.35
Encoder324.5025.08
En&Decoder329.0025.20
Encoder649.0025.08
En&Decoder6418.0025.05
Encoder12818.0025.01
En&Decoder12836.0024.94
", + "image_path": "acf157ca44a6174a1f56b56dfdf6347f5a1d635c5482aca6e15bd997c8421118.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 365, + 287, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 287, + 544 + ], + "type": "text", + "content": "bias to some extent, since a higher proportion of complete data tend to encourages the model to learn general information across modalities. This finding aligns with previous research [19], highlighting the superiority of utterance dropout over random frame dropout (the former means a larger " + }, + { + "bbox": [ + 46, + 365, + 287, + 544 + ], + "type": "inline_equation", + "content": "D_{prob}" + }, + { + "bbox": [ + 46, + 365, + 287, + 544 + ], + "type": "text", + "content": "). Next, AV3 is trained based on AV0, which means the subsequent optimized processing starts from a relatively stable convergence state with complete input. In the robust training stage, the balanced state tends to be disrupted when trained on incomplete modality pairs, searching for a new optimization coverage range. However, when trained on complete data pairs, the scenario is reversed. Thus, while AV3 outperforms AV2 with low test missing rates, it lags behind when facing severe video absence, illustrating a tug-of-war dynamic without clear guidance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "content": "Next, we validate the effectiveness of MDA-KD. Compared with AV3, AV5 demonstrates superior performance for both complete and missing video modality inputs. AV4 successfully achieves our goal of enhancing robustness without any performance degradation on complete input (21.11% vs. 21.14%). This implies that the teacher model AV0 provides an explicitly optimized target in robustness training. It effectively constrains the distribution shift to the audio modality, preventing excessive modality bias caused by dropout. Furthermore, in AV4, we restrict the flow of audio data into the video branch within the dual cross-attention module. Consequently, a performance drop is observed across all test suites, highlighting the effectiveness of MDA-KD in leveraging the dual cross-attention mod" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 309, + 229, + 544, + 300 + ], + "blocks": [ + { + "bbox": [ + 46, + 337, + 288, + 360 + ], + "lines": [ + { + "bbox": [ + 46, + 337, + 288, + 360 + ], + "spans": [ + { + "bbox": [ + 46, + 337, + 288, + 360 + ], + "type": "text", + "content": "Table 2. Performance analysis of MS-Adapter. DA means data augmentation, including speed perturbation and utterance concat." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 229, + 544, + 300 + ], + "lines": [ + { + "bbox": [ + 309, + 229, + 544, + 300 + ], + "spans": [ + { + "bbox": [ + 309, + 229, + 544, + 300 + ], + "type": "table", + "html": "
MethodTest dropout rate
0.000.250.500.751.00
Cascade Utt [19]22.5423.8925.2326.0528.15
AV Dropout Utt [21]22.0023.3725.3526.2126.78
Dropout Utt [20]22.0823.2124.5625.0825.46
Ours21.1121.7722.7824.0224.94
", + "image_path": "a80268e4e499d91018298121427ea2c65bb3f92c9098d88e3fed3a8632c9d956.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 303, + 537, + 315 + ], + "lines": [ + { + "bbox": [ + 313, + 303, + 537, + 315 + ], + "spans": [ + { + "bbox": [ + 313, + 303, + 537, + 315 + ], + "type": "text", + "content": "Table 3. A " + }, + { + "bbox": [ + 313, + 303, + 537, + 315 + ], + "type": "inline_equation", + "content": "\\mathrm{{CER}}\\left( \\% \\right)" + }, + { + "bbox": [ + 313, + 303, + 537, + 315 + ], + "type": "text", + "content": " comparison with other dropout methods." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 320, + 545, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 320, + 545, + 417 + ], + "spans": [ + { + "bbox": [ + 304, + 320, + 545, + 417 + ], + "type": "text", + "content": "ule to extract modality-general information from audio for complementing missing information. Subsequently, we integrate MS-Adapters into the audio branch in AV6 based on AV5. Consequently, the performance with audio-only input improves to a " + }, + { + "bbox": [ + 304, + 320, + 545, + 417 + ], + "type": "inline_equation", + "content": "24.94\\%" + }, + { + "bbox": [ + 304, + 320, + 545, + 417 + ], + "type": "text", + "content": " CER, surpassing A0 for the first time " + }, + { + "bbox": [ + 304, + 320, + 545, + 417 + ], + "type": "inline_equation", + "content": "(24.94\\%" + }, + { + "bbox": [ + 304, + 320, + 545, + 417 + ], + "type": "text", + "content": " vs. " + }, + { + "bbox": [ + 304, + 320, + 545, + 417 + ], + "type": "inline_equation", + "content": "25.13\\%)" + }, + { + "bbox": [ + 304, + 320, + 545, + 417 + ], + "type": "text", + "content": ". These results show the effectiveness of MS-Adapters by dynamically switching to the decision patterns on audio modality with audio-only input." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 426, + 451, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 426, + 451, + 440 + ], + "spans": [ + { + "bbox": [ + 305, + 426, + 451, + 440 + ], + "type": "text", + "content": "7.2. Validation of MS-Adapter" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 445, + 545, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 445, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 304, + 445, + 545, + 624 + ], + "type": "text", + "content": "We further explore three key factors in MS-Adapter adaptation: data augmentation, insert part and rank dimension. In Table 2, we observe a decrease in CER from " + }, + { + "bbox": [ + 304, + 445, + 545, + 624 + ], + "type": "inline_equation", + "content": "25.45\\%" + }, + { + "bbox": [ + 304, + 445, + 545, + 624 + ], + "type": "text", + "content": " (AV4) to " + }, + { + "bbox": [ + 304, + 445, + 545, + 624 + ], + "type": "inline_equation", + "content": "25.35\\%" + }, + { + "bbox": [ + 304, + 445, + 545, + 624 + ], + "type": "text", + "content": ", and it further improves to " + }, + { + "bbox": [ + 304, + 445, + 545, + 624 + ], + "type": "inline_equation", + "content": "25.08\\%" + }, + { + "bbox": [ + 304, + 445, + 545, + 624 + ], + "type": "text", + "content": " with data augmentation doubling audio training data. These results suggest that the adapter adaptation effectively enhances the robustness of AVSR with completely missing video, requiring only an additional 4.50MB in parameters. It provides an opportunity to apply data augmentation that is effective for unimodal model training and to use extra unpaired data. Next, increasing the ranks and the quantity of adapters results in further performance gains at the expense of a larger parameter. The best performance, achieving " + }, + { + "bbox": [ + 304, + 445, + 545, + 624 + ], + "type": "inline_equation", + "content": "24.94\\%" + }, + { + "bbox": [ + 304, + 445, + 545, + 624 + ], + "type": "text", + "content": ", is shown in the bottom row and attained with the adapter inserted in both encoder and decoder blocks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 634, + 542, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 634, + 542, + 647 + ], + "spans": [ + { + "bbox": [ + 305, + 634, + 542, + 647 + ], + "type": "text", + "content": "7.3. Comparisons with Other Dropout Techniques" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 653, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 545, + 715 + ], + "type": "text", + "content": "As shown in Table 3, we compare our proposed framework with three widely used dropout techniques [19-21]. Cascade Utt employs a separable cascade structure, where an AV model is superimposed on an audio-only model. Inputs are then routed through either the audio-only path or" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "text", + "content": "27451" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 70, + 542, + 199 + ], + "blocks": [ + { + "bbox": [ + 52, + 70, + 542, + 199 + ], + "lines": [ + { + "bbox": [ + 52, + 70, + 542, + 199 + ], + "spans": [ + { + "bbox": [ + 52, + 70, + 542, + 199 + ], + "type": "table", + "html": "
BenchmarkSystemTraining DataBackboneObj. FunctionCER / cpCER(%)
AV
MISP2021SJTU [42]300 hoursLRW-1000ConformerED + SE34.02
NIO [43]3300 hoursLRW-1000 [4]TransformerED25.07
USTC [18]500 hoursw/o extra dataConformerED24.58
Ours1000 hoursw/o extra dataConformerED + InterCTC21.53
MISP2022NIO [44]3300 hoursLRW-1000ConformerED29.58
XMU [45]2100 hoursLRW-1000ConformerED + InterCTC31.88
NPU [46]1300 hoursw/o extra dataE-BranchformerED + InterCTC29.13
Ours1000 hoursw/o extra dataConformerED + InterCTC28.06
", + "image_path": "52b50756d210d0fe6c1d4e081bf92307e11000f8ea459cb0e2a7ca24ed771e5f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 201, + 547, + 237 + ], + "lines": [ + { + "bbox": [ + 46, + 201, + 547, + 237 + ], + "spans": [ + { + "bbox": [ + 46, + 201, + 547, + 237 + ], + "type": "text", + "content": "Table 4. A Comparison of the state-of-the-art systems. InterCTC refers to Intermediate CTC loss [41], the ED loss is formulated in Equation (3) and SE represents the mean square error loss. We use evaluate the performance using the concatenated minimum-permutation character error rate (cpCER) [47] metric for the session-level AVSR task." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "spans": [ + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "text", + "content": "the AV path with a probability of " + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "inline_equation", + "content": "p_1" + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "text", + "content": ". AV Dropout Utt randomly drops either the entire video or the entire audio segments with a probability of " + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "inline_equation", + "content": "p_2" + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "text", + "content": ". Dropout Utt exclusively drops the video segments with a probability of " + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "inline_equation", + "content": "p_3" + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "text", + "content": ". We adopt the optimal dropout settings from [19], where " + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "inline_equation", + "content": "p_1 = 0.25" + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "inline_equation", + "content": "p_2 = 0.25" + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "inline_equation", + "content": "p_3 = 0.5" + }, + { + "bbox": [ + 46, + 242, + 289, + 361 + ], + "type": "text", + "content": ". For Cascade Utt, we follow [19] to build the network and maintain comparable parameters numbers. As a result, our proposed methods outperforms the other three techniques in all test suites and does not cause performance degradation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 368, + 272, + 381 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 368, + 272, + 381 + ], + "spans": [ + { + "bbox": [ + 47, + 368, + 272, + 381 + ], + "type": "text", + "content": "7.4. Comparisons with State-of-the-art Systems" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "spans": [ + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "text", + "content": "Finally, we compare our system with the state-of-the-art systems on the MISP2021 and MISP2022 challenges[18, 42-45, 48] as shown in Table 4. With Recognizer Output Voting Error Reduction (ROVER) [49], we rescore the output transcripts of A0, AV0, and A6 mentioned in Table 1. In the MISP2021 utterance-level AVSR challenge with oracle speaker diarization results, our system outperforms the previous SOTA system by achieving an absolute CER reduction of " + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "inline_equation", + "content": "3.05\\%" + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "inline_equation", + "content": "24.58\\%" + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "inline_equation", + "content": "21.53\\%" + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "text", + "content": ". Our top-performing system, AV6, attains a CER of " + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "inline_equation", + "content": "22.13\\%" + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "text", + "content": ". Moving to the MISP2022 session-level AVSR challenge, we build our diarization system closely adhering to [50]. We secure a ROVER cpCER score of " + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "inline_equation", + "content": "28.06\\%" + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "text", + "content": " and obtain the best system score with a cpCER of " + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "inline_equation", + "content": "28.55\\%" + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "text", + "content": ". When oracle segmentations are utilized, our system achieves a ROVER CER score of " + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "inline_equation", + "content": "21.80\\%" + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "text", + "content": " and the best model score of " + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "inline_equation", + "content": "21.53\\%" + }, + { + "bbox": [ + 46, + 386, + 289, + 578 + ], + "type": "text", + "content": " in CER." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 586, + 139, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 586, + 139, + 597 + ], + "spans": [ + { + "bbox": [ + 47, + 586, + 139, + 597 + ], + "type": "text", + "content": "8. Related Works" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 605, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 715 + ], + "type": "text", + "content": "Modality Missing in Multimodal Learning The prevalent issue of missing modalities in multimodal applications has prompted research that specifically targets severe modality absences. Generative models [51, 52] and meta-learning predict missing modalities using available or few-shot paired samples. Balanced models utilize joint multimodal representations [53-55]. Models addressing modality bias employ data augmentation methods like modality dropout [19, 22] to tackle out-of-distribution challenges." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 242, + 545, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 242, + 545, + 277 + ], + "spans": [ + { + "bbox": [ + 304, + 242, + 545, + 277 + ], + "type": "text", + "content": "For AVSR, we prioritize efficiency and opt for dropout due to its plug-and-play nature and lightweight implementation. More discussion could be found in Appendix." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 280, + 546, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 280, + 546, + 543 + ], + "spans": [ + { + "bbox": [ + 304, + 280, + 546, + 543 + ], + "type": "text", + "content": "Video Modality Robustness in AVSR To enhance performance on low-resolution videos, visual extractors are commonly pre-trained on relatively high-quality videos with isolated words [5] or acoustic pseudo-labeling classification tasks [18]. Addressing situations involving corruption, Hong et al. [17] have designed an explicit scoring module to identify reliable streams and effectively manage input scenarios. Regarding the issue of missing video frames, most researchers have applied dropout techniques to enhance missing robustness [19-23]. In classical dropout methods, frame level dropout is utilized in [23] and utterance-level dropout is applied in AV-Hubert [21]. As a recent work focusing on this issue, Chang et al. [19] unify test suites of missing videos. However, the proposed binary evaluation metric overly emphasizes relative robustness trends, neglecting absolute performance. Compared to the methods mentioned earlier, we explore the problem of missing video frames from the perspective of modality bias. Leveraging classical techniques and simple designs, our approach achieves both performance and robustness without introducing additional inference time. It adapts to various scenarios of frame absence through a unified model." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 551, + 379, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 551, + 379, + 562 + ], + "spans": [ + { + "bbox": [ + 306, + 551, + 379, + 562 + ], + "type": "text", + "content": "9. Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 570, + 547, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 702 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 702 + ], + "type": "text", + "content": "In this work, we discover and analyze the essence of dropout-induced modality bias. Based on these findings, we proposed MBH to provide a systematic description of the relationship between modality bias and missing robustness in multimodal systems. Consequently, we propose a new multimodal distribution approximation with knowledge distillation approach to deal with missing video frames for AVSR. Furthermore, we apply adapters to handle videos with both severe and complete missing rates. For future work, we intend to validate our findings in this study across a wide range of multimodal applications beyond AVSR." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "27452" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Joon Son Chung, Andrew Senior, Oriol Vinyals, and Andrew Zisserman. Lip reading sentences in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6447-6456, 2017. 1, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 287, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 287, + 169 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 287, + 169 + ], + "type": "text", + "content": "[2] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. LRS3-TED: a large-scale dataset for visual speech recognition. arXiv preprint arXiv:1809.00496, 2018. 1, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 171, + 288, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 171, + 288, + 248 + ], + "spans": [ + { + "bbox": [ + 53, + 171, + 288, + 248 + ], + "type": "text", + "content": "[3] Hang Chen, Jun Du, Yusheng Dai, Chin Hui Lee, Sabato Marco Siniscalchi, Shinji Watanabe, Odette Scharenborg, Jingdong Chen, Bao Cai Yin, and Jia Pan. Audio-visual speech recognition in misp2021 challenge: Dataset release and deep analysis. In Proceedings of the Annual Conference of the International Speech Communication Association, IN-TERSPEECH, volume 2022, pages 1766–1770, 2022. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 250, + 288, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 250, + 288, + 316 + ], + "spans": [ + { + "bbox": [ + 53, + 250, + 288, + 316 + ], + "type": "text", + "content": "[4] Shuang Yang, Yuanhang Zhang, Dalu Feng, Mingmin Yang, Chenhao Wang, Jingyun Xiao, Keyu Long, Shiguang Shan, and Xilin Chen. Lrw-1000: A naturally-distributed large-scale benchmark for lip reading in the wild. In 2019 14th IEEE international conference on automatic face & gesture recognition (FG 2019), pages 1-8. IEEE, 2019. 1, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 318, + 288, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 318, + 288, + 371 + ], + "spans": [ + { + "bbox": [ + 53, + 318, + 288, + 371 + ], + "type": "text", + "content": "[5] Pingchuan Ma, Stavros Petridis, and Maja Pantic. End-to-end audio-visual speech recognition with conformers. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 7613-7617. IEEE, 2021. 1, 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 373, + 287, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 373, + 287, + 417 + ], + "spans": [ + { + "bbox": [ + 53, + 373, + 287, + 417 + ], + "type": "text", + "content": "[6] Xichen Pan, Peiyu Chen, Yichen Gong, Helong Zhou, Xinbing Wang, and Zhouhan Lin. Leveraging unimodal self-supervised learning for multimodal audio-visual speech recognition, 2022. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 419, + 288, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 419, + 288, + 485 + ], + "spans": [ + { + "bbox": [ + 53, + 419, + 288, + 485 + ], + "type": "text", + "content": "[7] Chen Chen, Yuchen Hu, Qiang Zhang, Heqing Zou, Beier Zhu, and Eng Siong Chng. Leveraging modality-specific representations for audio-visual speech recognition via reinforcement learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pages 12607–12615, 2023. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 487, + 288, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 487, + 288, + 531 + ], + "spans": [ + { + "bbox": [ + 53, + 487, + 288, + 531 + ], + "type": "text", + "content": "[8] Bo Xu, Cheng Lu, Yandong Guo, and Jacob Wang. Discriminative multi-modality speech recognition. In Proceedings of the IEEE/CVF conference on Computer Vision and Pattern Recognition, pages 14433-14442, 2020. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 533, + 287, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 533, + 287, + 598 + ], + "spans": [ + { + "bbox": [ + 53, + 533, + 287, + 598 + ], + "type": "text", + "content": "[9] Jianwei Yu, Shi-Xiong Zhang, Jian Wu, Shahram Ghorbani, Bo Wu, Shiyin Kang, Shansong Liu, Xunying Liu, Helen Meng, and Dong Yu. Audio-visual recognition of overlapped speech for the lrs2 dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6984-6988. IEEE, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 600, + 287, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 600, + 287, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 600, + 287, + 643 + ], + "type": "text", + "content": "[10] Joanna Hong, Minsu Kim, Daehun Yoo, and Yong Man Ro. Visual context-driven audio feature enhancement for robust end-to-end audio-visual speech recognition. arXiv preprint arXiv:2207.06020, 2022. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 646, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 287, + 689 + ], + "type": "text", + "content": "[11] Alexandros Haliassos, Pingchuan Ma, Rodrigo Mira, Stavros Petridis, and Maja Pantic. Jointly learning visual and auditory speech representations from raw data. arXiv preprint arXiv:2212.06246, 2022. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[12] Pingchuan Ma, Alexandros Haliassos, Adriana Fernandez-Lopez, Honglie Chen, Stavros Petridis, and Maja Pantic." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 117 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 117 + ], + "type": "text", + "content": "Auto-AVSR: Audio-visual speech recognition with automatic labels. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5. IEEE, 2023. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 119, + 545, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 119, + 545, + 163 + ], + "spans": [ + { + "bbox": [ + 308, + 119, + 545, + 163 + ], + "type": "text", + "content": "[13] George Sterpu, Christian Saam, and Naomi Harte. Attention-based audio-visual fusion for robust automatic speech recognition. In Proceedings of the 20th ACM International conference on Multimodal Interaction, pages 111–115, 2018. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 163, + 545, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 163, + 545, + 207 + ], + "spans": [ + { + "bbox": [ + 308, + 163, + 545, + 207 + ], + "type": "text", + "content": "[14] George Sterpu, Christian Saam, and Naomi Harte. How to teach DNNs to pay attention to the visual modality in speech recognition. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 28:1052-1064, 2020." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 209, + 545, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 209, + 545, + 252 + ], + "spans": [ + { + "bbox": [ + 308, + 209, + 545, + 252 + ], + "type": "text", + "content": "[15] Yuchen Hu, Ruizhe Li, Chen Chen, Heqing Zou, Qiushi Zhu, and Eng Siong Chng. Cross-Modal Global Interaction and Local Alignment for Audio-Visual Speech Recognition. arXiv preprint arXiv:2305.09212, 2023. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 254, + 545, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 254, + 545, + 297 + ], + "spans": [ + { + "bbox": [ + 308, + 254, + 545, + 297 + ], + "type": "text", + "content": "[16] Triantafyllos Afouras, Joon Son Chung, Andrew Senior, Oriol Vinyals, and Andrew Zisserman. Deep audio-visual speech recognition. IEEE transactions on pattern analysis and machine intelligence, 44(12):8717-8727, 2018. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 299, + 545, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 299, + 545, + 363 + ], + "spans": [ + { + "bbox": [ + 308, + 299, + 545, + 363 + ], + "type": "text", + "content": "[17] Joanna Hong, Minsu Kim, Jeongsoo Choi, and Yong Man Ro. Watch or Listen: Robust Audio-Visual Speech Recognition with Visual Corruption Modeling and Reliability Scoring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18783–18794, 2023. 1, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 365, + 545, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 365, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 308, + 365, + 545, + 431 + ], + "type": "text", + "content": "[18] Yusheng Dai, Hang Chen, Jun Du, Xiaofei Ding, Ning Ding, Feijun Jiang, and Chin-Hui Lee. Improving Audio-Visual Speech Recognition by Lip-Subword Correlation Based Visual Pre-training and Cross-Modal Fusion Encoder. In 2023 IEEE International Conference on Multimedia and Expo (ICME), pages 2627–2632. IEEE, 2023. 1, 6, 8, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 433, + 545, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 433, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 308, + 433, + 545, + 487 + ], + "type": "text", + "content": "[19] Oscar Chang, Otavio de Pinho Forin Braga, Hank Liao, Dmitriy Dima Serdyuk, and Olivier Siohan. On robustness to missing video for audiovisual speech recognition. Transactions on Machine Learning Research (TMLR), 2022. 1, 6, 7, 8, 3, 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 488, + 545, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 488, + 545, + 554 + ], + "spans": [ + { + "bbox": [ + 308, + 488, + 545, + 554 + ], + "type": "text", + "content": "[20] Takaki Makino, Hank Liao, Yannis Assael, Brendan Shillingford, Basilio Garcia, Otavio Braga, and Olivier Siohan. Recurrent neural network transducer for audio-visual speech recognition. In 2019 IEEE automatic speech recognition and understanding workshop (ASRU), pages 905–912. IEEE, 2019. 1, 7, 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 555, + 545, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 555, + 545, + 589 + ], + "spans": [ + { + "bbox": [ + 308, + 555, + 545, + 589 + ], + "type": "text", + "content": "[21] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. Learning audio-visual speech representation by masked multimodal cluster prediction, 2022. 4, 6, 7, 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 590, + 545, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 590, + 545, + 623 + ], + "spans": [ + { + "bbox": [ + 308, + 590, + 545, + 623 + ], + "type": "text", + "content": "[22] Devamanyu Hazarika, Yingting Li, Bo Cheng, Shuai Zhao, Roger Zimmermann, and Soujanya Poria. Analyzing modality robustness in multimodal sentiment analysis, 2022. 4, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 624, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 624, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 624, + 545, + 689 + ], + "type": "text", + "content": "[23] Shiliang Zhang, Ming Lei, Bin Ma, and Lei Xie. Robust audio-visual speech recognition using bimodal DFSMN with multi-condition training and dropout regularization. In ICASSP 2019-2019 IEEE international conference on acoustics, speech and signal processing (ICASSP), pages 6570-6574. IEEE, 2019. 1, 4, 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "text", + "content": "[24] Hang Chen, Hengshun Zhou, Jun Du, Chin-Hui Lee, Jingdong Chen, Shinji Watanabe, Sabato Marco Siniscalchi," + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27453" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 139 + ], + "type": "text", + "content": "Odette Scharenborg, Di-Yuan Liu, Bao-Cai Yin, Jia Pan, Jian-Qing Gao, and Cong Liu. The First Multimodal Information Based Speech Processing (Misp) Challenge: Data, Tasks, Baselines And Results. In ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 9266-9270, 2022. 1, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 141, + 287, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 195 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 195 + ], + "type": "text", + "content": "[25] Jun Du, Chin-Hui Lee, Jingdong Chen, Shinji Watanabe, Sabato Marco Siniscalchi, and Odette Scharenborg. Multimodal Information Based Speech Processing (MISP) Challenge 2022. https://mispchallenge.github.io/mispchallenge2022/, 2022. Accessed: 2023-06-26.1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 197, + 288, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 197, + 288, + 273 + ], + "spans": [ + { + "bbox": [ + 48, + 197, + 288, + 273 + ], + "type": "text", + "content": "[26] Jinming Zhao, Ruichen Li, and Qin Jin. Missing modality imagination network for emotion recognition with uncertain missing modalities. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 2608-2618, 2021. 2, 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 274, + 288, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 274, + 288, + 329 + ], + "spans": [ + { + "bbox": [ + 48, + 274, + 288, + 329 + ], + "type": "text", + "content": "[27] Mengmeng Ma, Jian Ren, Long Zhao, Davide Testuggine, and Xi Peng. Are multimodal transformers robust to missing modality? In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18177-18186, 2022. 2, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 331, + 288, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 331, + 288, + 364 + ], + "spans": [ + { + "bbox": [ + 48, + 331, + 288, + 364 + ], + "type": "text", + "content": "[28] Zihui Xue, Zhengqi Gao, Sucheng Ren, and Hang Zhao. The modality focusing hypothesis: Towards understanding cross-modal knowledge distillation, 2022. 3, 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 365, + 287, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 287, + 408 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 287, + 408 + ], + "type": "text", + "content": "[29] Hang Chen, Jun Du, Yu Hu, Li-Rong Dai, Bao-Cai Yin, and Chin-Hui Lee. Correlating subword articulation with lip shapes for embedding aware audio-visual speech enhancement. Neural Networks, 143:171–182, 2021. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 411, + 287, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 411, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 48, + 411, + 287, + 464 + ], + "type": "text", + "content": "[30] Pan Zhou, Wenwen Yang, Wei Chen, Yanfeng Wang, and Jia Jia. Modality attention for end-to-end audio-visual speech recognition. In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6565-6569. IEEE, 2019. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 467, + 287, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 467, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 48, + 467, + 287, + 521 + ], + "type": "text", + "content": "[31] Xianing Chen, Qiong Cao, Yujie Zhong, Jing Zhang, Shenghua Gao, and Dacheng Tao. Dearkd: data-efficient early knowledge distillation for vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12052-12062, 2022. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 523, + 287, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 523, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 48, + 523, + 287, + 589 + ], + "type": "text", + "content": "[32] Francisco Rivera Valverde, Juana Valeria Hurtado, and Abhinav Valada. There is more than meets the eye: Self-supervised multi-object detection and tracking with sound by distilling multimodal knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11612-11621, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 590, + 287, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 634 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 634 + ], + "type": "text", + "content": "[33] Zihui Xue, Sucheng Ren, Zhengqi Gao, and Hang Zhao. Multimodal knowledge expansion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 854-863, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "type": "text", + "content": "[34] Baoyun Peng, Xiao Jin, Jiaheng Liu, Dongsheng Li, Yichao Wu, Yu Liu, Shunfeng Zhou, and Zhaoning Zhang. Correlation congruence for knowledge distillation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5007-5016, 2019. 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "type": "text", + "content": "[35] Renrui Zhang, Rongyao Fang, Wei Zhang, Peng Gao, Kunchang Li, Jifeng Dai, Yu Qiao, and Hongsheng Li." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 105 + ], + "type": "text", + "content": "Tip-adapter: Training-free clip-adapter for better vision-language modeling. arXiv preprint arXiv:2111.03930, 2021.6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 106, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 106, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 308, + 106, + 545, + 161 + ], + "type": "text", + "content": "[36] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 162, + 545, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 162, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 308, + 162, + 545, + 195 + ], + "type": "text", + "content": "[37] Sylvestre-Alvise Rebuffi, Hakan Bilen, and Andrea Vedaldi. Learning multiple visual domains with residual adapters. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 196, + 545, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 196, + 545, + 248 + ], + "spans": [ + { + "bbox": [ + 308, + 196, + 545, + 248 + ], + "type": "text", + "content": "[38] Peng Gao, Shijie Geng, Renrui Zhang, Teli Ma, Rongyao Fang, Yongfeng Zhang, Hongsheng Li, and Yu Qiao. Clip-adapter: Better vision-language models with feature adapters. International Journal of Computer Vision, pages 1–15, 2023. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 250, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 250, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 308, + 250, + 545, + 293 + ], + "type": "text", + "content": "[39] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models, 2021. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 294, + 545, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 294, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 308, + 294, + 545, + 371 + ], + "type": "text", + "content": "[40] Zhe Wang, Shilong Wu, Hang Chen, Mao-Kui He, Jun Du, Chin-Hui Lee, Jingdong Chen, Shinji Watanabe, Sabato Siniscalchi, Odette Scharenborg, et al. The multimodal information based speech processing (misp) 2022 challenge: Audio-visual diarization and recognition. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5. IEEE, 2023. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 372, + 545, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 372, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 308, + 372, + 545, + 425 + ], + "type": "text", + "content": "[41] Jaesong Lee and Shinji Watanabe. Intermediate loss regularization for ctc-based speech recognition. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6224-6228. IEEE, 2021. 6, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 426, + 545, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 426, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 308, + 426, + 545, + 492 + ], + "type": "text", + "content": "[42] Wei Wang, Xun Gong, Yifei Wu, Zhikai Zhou, Chenda Li, Wangyou Zhang, Bing Han, and Yanmin Qian. The sjtu system for multimodal information based speech processing challenge 2021. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 9261-9265. IEEE, 2022. 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 493, + 545, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 493, + 545, + 535 + ], + "spans": [ + { + "bbox": [ + 308, + 493, + 545, + 535 + ], + "type": "text", + "content": "[43] Gaopeng Xu, Song Yang, Wei Li, et al. Channel-Wise AV-Fusion Attention for Multi-Channel Audio-Visual Speech Recognition. In Proc. ICASSP 2022, pages 9251–9255. IEEE, 2022. 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 536, + 545, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 536, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 308, + 536, + 545, + 591 + ], + "type": "text", + "content": "[44] Sang Wang Gaopeng Xu, Xianliang Wang et al. The NIO system for audio-visual diarization and recognition in MISP challenge 2022. https://mispchallenge.github.io/mispchallenge2022/papers/task2/Track2_NIO.pdf, 2022.8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 592, + 545, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 592, + 545, + 647 + ], + "spans": [ + { + "bbox": [ + 308, + 592, + 545, + 647 + ], + "type": "text", + "content": "[45] Tao Li, Haodong Zhou, Jie Wang, Qingyang Hong, and Lin Li. The XMU System for Audio-Visual Diarization and Recognition in MISP Challenge 2022. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-2. IEEE, 2023. 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "type": "text", + "content": "[46] He Wang, Pengcheng Guo, Pan Zhou, and Lei Xie. Mlcaavsr: Multi-layer cross attention fusion based audio-visual speech recognition. arXiv preprint arXiv:2401.03424, 2024. 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "text", + "content": "[47] Shinji Watanabe, Michael Mandel, Jon Barker, Emmanuel Vincent, Ashish Arora, Xuankai Chang, Sanjeev Khudan-" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "27454" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "text", + "content": "pur, Vimal Manohar, Daniel Povey, Desh Raj, et al. Chime-6 challenge: Tackling multispeaker speech recognition for unsegmented recordings. arXiv preprint arXiv:2004.09249, 2020.8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 119, + 287, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 119, + 287, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 119, + 287, + 184 + ], + "type": "text", + "content": "[48] Pengcheng Guo, He Wang, Bingshen Mu, Ao Zhang, and Peikun Chen. The NPU-ASLP System for Audio-Visual Speech Recognition in MISP 2022 Challenge. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-2. IEEE, 2023. 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 186, + 287, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 287, + 231 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 287, + 231 + ], + "type": "text", + "content": "[49] Jonathan G Fiscus. A post-processing system to yield reduced word error rates: Recognizer output voting error reduction (ROVER). In Proc. asrU 1997, pages 347-354. IEEE, 1997. 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 233, + 288, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 233, + 288, + 289 + ], + "spans": [ + { + "bbox": [ + 48, + 233, + 288, + 289 + ], + "type": "text", + "content": "[50] Ming Cheng, Haoxu Wang, Ziteng Wang, Qiang Fu, and Ming Li. The whu-alibaba audio-visual speaker diarization system for the misp 2022 challenge. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-2. IEEE, 2023. 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 291, + 287, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 291, + 287, + 335 + ], + "spans": [ + { + "bbox": [ + 48, + 291, + 287, + 335 + ], + "type": "text", + "content": "[51] Qiuling Suo, Weida Zhong, Fenglong Ma, Ye Yuan, Jing Gao, and Aidong Zhang. Metric Learning on Healthcare Data with Incomplete Modalities. In IJCAI, volume 3534, page 3540, 2019. 8, 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 337, + 287, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 337, + 287, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 337, + 287, + 392 + ], + "type": "text", + "content": "[52] Lei Cai, Zhengyang Wang, Hongyang Gao, Dinggang Shen, and Shuiwang Ji. Deep adversarial learning for multimodality missing data completion. In Proceedings of the 24th ACM SIGKDD international conference on knowledge discovery & data mining, pages 1158-1166, 2018. 8, 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 395, + 287, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 395, + 287, + 438 + ], + "spans": [ + { + "bbox": [ + 48, + 395, + 287, + 438 + ], + "type": "text", + "content": "[53] Zilong Wang, Zhaohong Wan, and Xiaojun Wan. Transmodality: An end2end fusion method with transformer for multimodal sentiment analysis. In Proceedings of The Web Conference 2020, pages 2514-2520, 2020. 8, 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 440, + 287, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 440, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 48, + 440, + 287, + 495 + ], + "type": "text", + "content": "[54] Hai Pham, Paul Pu Liang, Thomas Manzini, Louis-Philippe Morency, and Barnabás Póczos. Found in translation: Learning robust joint representations by cyclic translations between modalities. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 6892–6899, 2019. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 498, + 287, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 498, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 48, + 498, + 287, + 552 + ], + "type": "text", + "content": "[55] Jiale Li, Hang Dai, Hao Han, and Yong Ding. Mseg3d: Multi-modal 3d semantic segmentation for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21694-21704, 2023. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 555, + 287, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 287, + 597 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 287, + 597 + ], + "type": "text", + "content": "[56] A Varga, HJM Steeneken, et al. Noisex-92: A database and an experiment to study the effect of additive noise on speech recognition systems. Speech Commun, 12(3):247–253, 1993. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 600, + 287, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 600, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 48, + 600, + 287, + 666 + ], + "type": "text", + "content": "[57] Lukas Drude, Jahn Heymann, Christoph Boeddeker, and Reinhold Haeb-Umbach. Nara-wpe: A python package for weighted prediction error dereverberation in numpy and tensorflow for online and offline processing. In Speech Communication; 13th ITG-Symposium, pages 1-5. VDE, 2018. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "text", + "content": "[58] Christoph Boeddecker, Jens Heitkaemper, Joerg Schmalenstroeer, et al. Front-end processing for the CHiME-5 dinner party scenario. In Proc. CHiME 2018, pages 35–40, 2018. 1, 3" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 339 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[59] Desh Raj, Daniel Povey, and Sanjeev Khudanpur. GPU-accelerated guided source separation for meeting transcription, 2022. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 107, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 161 + ], + "type": "text", + "content": "[60] Mengmeng Ma, Jian Ren, Long Zhao, Sergey Tulyakov, Cathy Wu, and Xi Peng. Smil: Multimodal learning with severely missing modality. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 2302-2310, 2021. 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "text", + "content": "[61] Itai Gat, Idan Schwartz, Alexander Schwing, and Tamir Hazan. Removing bias in multi-modal classifiers: Regularization by maximizing functional entropies. Advances in Neural Information Processing Systems, 33:3197-3208, 2020. 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 219, + 545, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 219, + 545, + 284 + ], + "spans": [ + { + "bbox": [ + 307, + 219, + 545, + 284 + ], + "type": "text", + "content": "[62] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2901–2910, 2017. 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 285, + 545, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 285, + 545, + 339 + ], + "spans": [ + { + "bbox": [ + 308, + 285, + 545, + 339 + ], + "type": "text", + "content": "[63] Yangyang Guo, Liqiang Nie, Harry Cheng, Zhiyong Cheng, Mohan Kankanhalli, and Alberto Del Bimbo. On modality bias recognition and reduction. ACM Transactions on Multimedia Computing, Communications and Applications, 19(3):1-22, 2023. 4" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27455" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/02a10508-95ff-4550-b14e-3121c8c91065_content_list.json b/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/02a10508-95ff-4550-b14e-3121c8c91065_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..0473d48caedb9a26d5ea760cc1a800463bc0abe1 --- /dev/null +++ b/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/02a10508-95ff-4550-b14e-3121c8c91065_content_list.json @@ -0,0 +1,1739 @@ +[ + { + "type": "text", + "text": "A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion *", + "text_level": 1, + "bbox": [ + 261, + 130, + 715, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Feng Yu† \nUniversity of Minnesota \nfyu@umn.edu", + "bbox": [ + 147, + 202, + 333, + 257 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Teng Zhang \nUniversity of Central Florida \nteng.zhang@ucf.edu", + "bbox": [ + 375, + 203, + 594, + 256 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Gilad Lerman ‡ \nUniversity of Minnesota \nlerman@umn.edu", + "bbox": [ + 638, + 202, + 823, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 236, + 291, + 310, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present the subspace-constrained Tyler's estimator (STE) designed for recovering a low-dimensional subspace within a dataset that may be highly corrupted with outliers. STE is a fusion of the Tyler's M-estimator (TME) and a variant of the fast median subspace. Our theoretical analysis suggests that, under a common inlier-outlier model, STE can effectively recover the underlying subspace, even when it contains a smaller fraction of inliers relative to other methods in the field of robust subspace recovery. We apply STE in the context of Structure from Motion (SfM) in two ways: for robust estimation of the fundamental matrix and for the removal of outlying cameras, enhancing the robustness of the SfM pipeline. Numerical experiments confirm the state-of-the-art performance of our method in these applications. This research makes significant contributions to the field of robust subspace recovery, particularly in the context of computer vision and 3D reconstruction.", + "bbox": [ + 75, + 323, + 473, + 565 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 595, + 202, + 612 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In many applications, data has been collected in large quantities and dimensions. It is a common practice to represent such data within a low-dimensional subspace that preserves its essential information. Principal Component Analysis (PCA) is frequently employed to identify this subspace. However, PCA faces challenges when dealing with data contaminated by outliers. Consequently, the field of Robust Subspace Recovery (RSR) aims to develop a framework for outlier-robust PCA. RSR is particularly relevant to problems in computer vision, such as fundamental matrix estimation, which involves recovering a hidden subspace associated with \"good correspondence pairs\" among highly corrupted measurements.", + "bbox": [ + 75, + 622, + 470, + 804 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Various algorithms have been proposed to address RSR, employing methods such as projection pursuit [1, 5, 14, 23, 29, 35, 38], subspace energy minimization (in particular least absolute", + "bbox": [ + 76, + 805, + 470, + 849 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "deviations and its relaxations) [9, 24, 27, 33, 34, 42, 43, 51], robust covariance estimation [50], filtering-based methods [4, 8, 49] and exhaustive subspace search methods [10, 16]. An in-depth exploration and comprehensive overview of robust subspace recovery and its diverse algorithms can be found in [25].", + "bbox": [ + 500, + 292, + 893, + 368 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Methods based on robust covariance estimators, such as the Tyler's M-estimator (TME), offer additional useful information on the shape of the data within the subspace, similarly to PCA in the non-robust setting. They also offer maximum-likelihood interpretation, which is missing in many other methods. Application of the TME [47] to RSR has been shown to be successful on basic benchmarks [25, 50]. Moreover, under a model of inliers in a general position on a subspace and outliers in general position in the complement of the subspace, TME was shown to recover the subspace within a desirable fraction of inliers [50]. Below this fraction it was proved to be Small Set Expansion (SSE) hard to solve the RSR problem [16].", + "bbox": [ + 496, + 369, + 893, + 551 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "One may still succeed with solving the RSR problem with a computationally efficient algorithm when the fraction of inliers is lower than the one required by [16], considering a more restricted data model or violating other assumptions made in [16]. For example, some special results in this direction are discussed in [32]. Also, [33] proposes the generalized haystack model of inliers and outliers to demonstrate the possibility of handling lower fractions of inliers by an RSR algorithm. This model extends the limited standard haystack model [27], where basic methods (such as PCA filtering) can easily work with low fractions of outliers. Nevertheless, it is unclear how practical the above theoretical ideas are for applied settings.", + "bbox": [ + 496, + 551, + 895, + 733 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "One practical setting that requires a fraction of inliers significantly lower than the one stated in [16] arises in the problem of robust fundamental (or essential) matrix estimation. The fundamental matrix encompasses the epipolar geometry of two views in stereo vision systems. It is typically computed using point correspondences between the two projected images. This computation requires finding an 8-dimensional subspace within a 9-dimensional ambient space. In this setting, the theoretical framework of [16] requires that the fraction of inliers be at least $8/9 \\approx 88.9\\%$ , which is clearly unreasonable to require.", + "bbox": [ + 496, + 733, + 893, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To date, the RANdom Sample Consensus (RANSAC)", + "bbox": [ + 519, + 885, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*This work was supported by NSF DMS awards 2124913 and 2318926.", + "bbox": [ + 94, + 862, + 457, + 875 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Supplementary code: https://github.com/alexfengg/STE", + "bbox": [ + 96, + 875, + 379, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$\\ddagger$ Corresponding author. All authors equally contributed.", + "bbox": [ + 96, + 887, + 375, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "14575", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "method [10] is the only RSR method that has been highly successful in addressing this nontrivial scenario, gaining widespread popularity in computer vision. RANSAC is an iterative method that randomly selects minimal subsets of the data and fits models, in particular subspaces, to identify the best consensus set, that is, the set in most agreement with the hypothesized model. There are numerous approaches proposed to improve RANSAC, especially for this particular application, including locally optimized RANSAC (LO-RANSAC, [6]), maximum likelihood estimator RANSAC (MLESAC) [45]), degeneracy-check enabled RANSAC (DEGENSAC) [7]) and M-estimator guided RANSAC (MAGSAC) [2]). A near recovery theory for a variant of RANSAC under some assumptions on the outliers was suggested in [32]. Nevertheless, in general, RANSAC is rather slow and its application to higher-dimensional problems is intractable.", + "bbox": [ + 75, + 90, + 472, + 316 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This work introduces a novel RSR algorithm that is guaranteed to robustly handle a lower fraction of outliers than the theoretical threshold proposed by [16], under special settings. Our basic idea is to adapt Tyler's M-Estimator to utilize the information of the underlying $d$ -dimensional subspace, while avoiding estimation of the full covariance. By using less degrees of freedom we obtain a more accurate subspace estimator than the one obtained by TME with improved computational complexity. We show that STE is a fusion of the Tyler's M-estimator (TME) and a variant of the fast median subspace (FMS) [24] that aims to minimize a subspace-based $\\ell_0$ energy.", + "bbox": [ + 75, + 319, + 472, + 487 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our theory shows that our proposed subspace-constrained Tyler's estimator (STE) algorithm can effectively recover the underlying subspace, even when it contains a smaller fraction of inliers relative to other methods. We obtain this nontrivial achievement first in a generic setting, where we establish when an initial estimator for STE is sufficiently well-conditioned to guarantee the desired robustness of STE. We then assume the asymptotic generalized haystack model and show that under this model, TME itself is a well-conditioned initial estimator for STE, and that unlike TME, STE with this initialization can deal with a lower fraction of inliers than the theoretical threshold specified in [16].", + "bbox": [ + 75, + 488, + 472, + 670 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We demonstrate competitive performance in robust fundamental matrix estimation, relying solely on subspace information without additional methods for handling degenerate scenarios, in contrast to [7, 12, 37]. We also propose a potential application of RSR for removing bad cameras in order to enhance the SfM pipeline and show competitive performance of STE. This is a completely new idea and it may require additional exploration to make it practical. Nevertheless, it offers a very different testbed where $N = D$ is very large and RANSAC is generally intractable.", + "bbox": [ + 75, + 671, + 472, + 823 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The rest of the paper is organized as follows: §2 introduces the STE framework, §3 establishes theoretical guarantees of STE, §4 applies STE to two different problems in SfM, demonstrating its competitive performance relative to existing algorithms, and §5 provides conclusions and future directions.", + "bbox": [ + 75, + 824, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. The STE Algorithm", + "text_level": 1, + "bbox": [ + 498, + 89, + 684, + 107 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We present our proposed STE. We review basic notation in §2.1 and Tyler's original estimator in §2.2. We describe our method in §2.3, its computational complexity in §2.4, its algorithmic choices in §2.5 and an interpretation for it as a fusion of TME and FMS with $p = 0$ in §2.6.", + "bbox": [ + 496, + 114, + 893, + 191 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Notation", + "text_level": 1, + "bbox": [ + 500, + 199, + 604, + 214 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We use bold upper and lower case letters for matrices and column vectors, respectively. Let $\\mathbf{I}_k$ denote the identity matrix in $\\mathbb{R}^{k\\times k}$ , where if $k$ is obvious we just write $\\mathbf{I}$ . For a matrix $\\mathbf{A}$ , we denote by $\\operatorname{tr}(\\mathbf{A})$ and $\\operatorname{Im}(\\mathbf{A})$ the trace and image (i.e., column space) of $\\mathbf{A}$ . We denote by $S_{+}(D)$ and $S_{++}(D)$ the sets of positive semidefinite and definite matrices in $\\mathbb{R}^{D\\times D}$ , respectively. We denote by $O(D,d)$ the set of semi-orthogonal $D\\times d$ matrices, i.e., $\\mathbf{U}\\in O(D,d)$ if and only if $\\mathbf{U}\\in \\mathbb{R}^{D\\times d}$ and $\\mathbf{U}^{\\top}\\mathbf{U} = \\mathbf{I}_{d}$ . We refer to linear $d$ -dimensional subspaces as $d$ -subspaces. For a $d$ -subspace $L$ , we denote by $\\mathbf{P}_L$ the $D\\times D$ matrix representing the orthogonal projector onto $L$ . We also arbitrarily fix $\\mathbf{U}_L$ in $O(D,d)$ such that $\\mathbf{U}_L\\mathbf{U}_L^\\top = \\mathbf{P}_L$ (such $\\mathbf{U}_L$ is determined up to right multiplication by an orthogonal matrix in $O(d,d)$ ). Throughout the paper, $\\mathcal{X} = \\{\\pmb{x}_i\\}_{i = 1}^N\\subset \\mathbb{R}^D$ is assumed to be a given centered dataset, that is, $\\sum_{i = 1}^{N}\\pmb{x}_i = \\mathbf{0}$ .", + "bbox": [ + 496, + 220, + 893, + 452 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Tyler's Estimator and its Application to RSR", + "text_level": 1, + "bbox": [ + 498, + 458, + 880, + 474 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Tyler's M-estimator (TME) [47] robustly estimates the covariance $\\pmb{\\Sigma}^{*}$ of the dataset $\\mathcal{X} = \\{\\pmb{x}_i\\}_{i=1}^N \\subset \\mathbb{R}^D$ by minimizing", + "bbox": [ + 496, + 481, + 893, + 513 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {D}{N} \\sum_ {i = 1} ^ {N} \\log \\left(\\boldsymbol {x} _ {i} ^ {\\top} \\boldsymbol {\\Sigma} ^ {- 1} \\boldsymbol {x} _ {i}\\right) + \\operatorname {l o g d e t} (\\boldsymbol {\\Sigma}) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 583, + 521, + 893, + 561 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "over $\\pmb{\\Sigma} \\in S_{++}(D)$ such that $\\mathrm{tr}(\\pmb{\\Sigma}) = 1$ . The cost function in (1) can be motivated by writing the maximum likelihood of the multivariate $t$ -distribution and letting its degrees of freedom parameter, $\\nu$ , approach zero [31]. This cost function is invariant to dilations of $\\pmb{\\Sigma}$ , and the constraint on $\\mathrm{tr}(\\pmb{\\Sigma})$ , whose value can be arbitrarily chosen, fixes a scale. TME also applies to scenarios where the covariance matrix does not exist. In such cases, TME estimates the shape (or scatter matrix) of the distribution, which is defined up to an arbitrary scale. More direct interpretations of TME as a maximum likelihood estimator can be found in [11, 46]. When $D$ is fixed and $N$ approaches infinity, TME is the \"most robust\" estimator of the shape matrix for data i.i.d. sampled from a continuous elliptical distribution [47] in a minimax sense, that is, as a minimizer of the maximal variance.", + "bbox": [ + 496, + 569, + 893, + 780 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Tyler [47] proposed the following iterative formula for computing TME:", + "bbox": [ + 496, + 781, + 893, + 811 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\Sigma} ^ {(k)} = \\sum_ {i = 1} ^ {N} \\frac {\\boldsymbol {x} _ {i} \\boldsymbol {x} _ {i} ^ {\\top}}{\\boldsymbol {x} _ {i} ^ {\\top} (\\boldsymbol {\\Sigma} ^ {(k - 1)}) ^ {- 1} \\boldsymbol {x} _ {i}} / \\operatorname {t r} \\left(\\sum_ {i = 1} ^ {N} \\frac {\\boldsymbol {x} _ {i} \\boldsymbol {x} _ {i} ^ {\\top}}{\\boldsymbol {x} _ {i} ^ {\\top} (\\boldsymbol {\\Sigma} ^ {(k - 1)}) ^ {- 1} \\boldsymbol {x} _ {i}}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 506, + 819, + 883, + 861 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Kent and Tyler [22] proved that if any $d$ -subspace of $\\mathbb{R}^D$ , where $1 \\leq d \\leq D - 1$ , contains fewer than $Nd / D$ data points, then", + "bbox": [ + 498, + 869, + 893, + 901 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "14576", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the above iterative procedure converges to TME. Linear rate of convergence was proved for the regularized TME in [15] and for TME in [13].", + "bbox": [ + 75, + 90, + 468, + 136 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "One can apply the TME estimator to solve the RSR problem with a given dimension $d$ by forming the subspace spanned by the top $d$ eigenvectors of TME. Zhang [50] proved that as long as there are more than $Nd / D$ inliers lying on a subspace, and the projected coordinates of these inliers on the $d$ -subspace and the projected coordinates of the outliers on the $(D - d)$ -dimensional orthogonal complement of the subspace are in general position, then TME recovers this subspace. Zhang [50] also showed that in this setting the above iterative formula converges (note that the condition of convergence in [22] does not apply in this case). The above lower bound of $Nd / D$ on the number of inliers coincides with the general bound for the noiseless RSR problem, beyond which the problem becomes Small Set-Expansion (SSE) hard [16].", + "bbox": [ + 75, + 137, + 468, + 347 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Numerical experiments in [50] and [25] indicated state-of-the-art accuracy of TME compared to other RSR algorithms in various settings. The computational complexity of TME is of order $O(K(ND^2 + D^3))$ , where $K$ is the number of iterations. On the other hand, the cost of faster RSR algorithms is of order $O(KNDd)$ [24, 25, 33].", + "bbox": [ + 75, + 347, + 470, + 439 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Motivation and Formulation of STE", + "text_level": 1, + "bbox": [ + 76, + 448, + 388, + 463 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We aim to use more cleverly the $d$ -subspace information within the TME framework to form an RSR algorithm, instead of first estimating the full covariance. By using less degrees of freedom we can obtain a more accurate subspace estimator, especially when the fraction of outliers can be large. Furthermore, our idea allows us to improve the computational cost to become state-of-the-art for high-dimensional settings.", + "bbox": [ + 75, + 472, + 468, + 577 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Many RSR algorithms can be formulated as minimizing a best orthogonal projector onto a $d$ -subspace [24, 25, 27, 33, 51]. We are going to do something similar, but unlike using an orthogonal projector, we will still use information from TME to get the shape of the data on the projected subspace. We will make the rest of the eigenvalues (i.e., bottom $D - d$ ones) equal and shrink them by a parameter $0 < \\gamma < 1$ . We thus use a regularized version of a reduced-dimension covariance matrix. This parameter $\\gamma$ plays a role in our theoretical estimates. Making $\\gamma$ smaller helps with better subspace recovery, whereas making $\\gamma$ bigger enhances the well-conditioning of the estimator.", + "bbox": [ + 75, + 579, + 468, + 743 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Following these basic ideas, we formulate our method, STE. For simplicity, we utilize covariance matrices and their inverses. Since these covariance matrices are essentially $d$ -dimensional and include an additional simple regularizing component, our overall computations can be expressed in terms of the computation of the top $d$ singular values of an $N \\times D$ matrix (see §2.4).", + "bbox": [ + 75, + 744, + 468, + 834 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "At iteration $k$ we follow a similar step to that of TME:", + "bbox": [ + 96, + 835, + 437, + 849 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Z} ^ {(k)} := \\sum_ {i = 1} ^ {N} \\boldsymbol {x} _ {i} \\boldsymbol {x} _ {i} ^ {\\top} / (\\boldsymbol {x} _ {i} ^ {\\top} (\\boldsymbol {\\Sigma} ^ {(k - 1)}) ^ {- 1} \\boldsymbol {x} _ {i}).\n$$\n", + "text_format": "latex", + "bbox": [ + 147, + 862, + 395, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We compute the eigenvalues $\\{\\sigma_i\\}_{i = 1}^D$ of $\\mathbf{Z}^{(k)}$ and replace each of the bottom $(D - d)$ of them with $\\gamma \\cdot \\sigma_{d + 1,D}$ , where", + "bbox": [ + 498, + 90, + 890, + 122 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma_ {d + 1, D} := \\frac {1}{D - d} \\sum_ {i = d + 1} ^ {D} \\sigma_ {i}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 612, + 132, + 890, + 172 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We also compute the eigenvectors of $\\mathbf{Z}^{(k)}$ and form the matrix $\\boldsymbol{\\Sigma}^{(k)}$ with the same eigenvectors as those of $\\mathbf{Z}^{(k)}$ and the modified eigenvalues, scaled to have trace 1. We iteratively repeat this procedure until the two estimators are sufficiently close. Algorithm 1 summarizes this procedure. Note that it is invariant to scaling of the data, similarly to TME.", + "bbox": [ + 496, + 186, + 890, + 277 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Algorithm 1 STE: Subspace-Constrained Tyler's Estimator", + "text_level": 1, + "bbox": [ + 500, + 291, + 872, + 306 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1: Input: $\\mathbf{X} = [\\pmb{x}_1, \\dots, \\pmb{x}_N] \\in \\mathbb{R}^{D \\times N}$ : centered data matrix, $d$ : subspace dimension, $K$ : maximum number of iterations, $\\tau, \\gamma$ : parameters.", + "2: Output: $L$ : $d$ -subspace in $\\mathbb{R}^D$", + "3: $\\pmb{\\Sigma}^{(0)} = \\mathbf{I}_D / D$", + "4: for $k = 1,2,\\ldots$ do", + "5: $\\mathbf{Z}^{(k)}\\gets \\sum_{i = 1}^{N}\\pmb {x}_i\\pmb {x}_i^\\top /\\left(\\pmb {x}_i^\\top (\\pmb{\\Sigma}^{(k - 1)})^{-1}\\pmb {x}_i\\right)$", + "6: $[\\mathbf{U}^{(k)},\\mathbf{S}^{(k)},\\bar{\\mathbf{U}}^{(k)}]\\gets \\mathrm{EVD}(\\mathbf{Z}^{(k)})$", + "7: $\\sigma_{i}\\gets [\\mathbf{S}^{(k)}]_{ii}$ and $\\sigma_{d + 1,D}\\leftarrow \\sum_{i = d + 1}^{D}\\sigma_{i} / (D - d)$", + "8: $\\widetilde{\\mathbf{S}}^{(k)}\\gets \\mathrm{diag}(\\sigma_1,\\dots ,\\sigma_d,\\gamma \\cdot \\sigma_{d + 1,D},\\dots ,\\gamma \\cdot \\sigma_{d + 1,D}),$", + "9: $\\pmb{\\Sigma}^{(k)}\\gets \\mathbf{U}^{(k)}\\widetilde{\\mathbf{S}}^{(k)}(\\mathbf{U}^{(k)})^{\\top} / \\mathrm{tr}\\bigl (\\mathbf{U}^{(k)}\\widetilde{\\mathbf{S}}^{(k)}(\\mathbf{U}^{(k)})^{\\top}\\bigr)$", + "10: Stop if $k \\geq K$ or $\\| \\pmb{\\Sigma}^{(k)} - \\pmb{\\Sigma}^{(k-1)} \\|_F < \\tau$ .", + "11: end for", + "12: $L = \\operatorname{Span}$ of the first $d$ columns of $\\mathbf{U}^{(k)}$" + ], + "bbox": [ + 504, + 311, + 893, + 532 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.4. Computational Complexity", + "text_level": 1, + "bbox": [ + 500, + 561, + 743, + 578 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Setting $w_{i}^{(k)} = (\\pmb{x}_{i}^{\\top}(\\pmb{\\Sigma}^{(k - 1)})^{-1}\\pmb{x}_{i})^{-1}$ , we can express $\\mathbf{Z}^{(k)}$ as $\\mathbf{Z}^{(k)} = \\widetilde{\\mathbf{X}}\\widetilde{\\mathbf{X}}^{\\top}$ , where $\\widetilde{\\mathbf{X}} = [(w_1^{(k)})^{1 / 2}\\pmb{x}_1,\\dots,(w_N^{(k)})^{1 / 2}\\pmb{x}_N]$ . With some abuse of notation we denote by $\\sigma_{1},\\ldots ,\\sigma_{D}$ the eigenvalues of $\\pmb{\\Sigma}^{(k - 1)}$ (and not $\\pmb{\\Sigma}^{(k)}$ ). Since they are scaled to have trace 1, $\\sigma_{d + 1,D} = (1 - \\sum_{j = 1}^{d}\\sigma_{j}) / (D - d)$ . We thus only need the top $d$ eigenvectors and top $d$ eigenvalues of $\\pmb{\\Sigma}^{(k - 1)}$ to update $\\widetilde{w}_i^{(k)}$ . Therefore, the complexity of STE can be of order $O(KNDd)$ if a special fast algorithm is utilized for computing only the top $d$ eigenvectors.", + "bbox": [ + 496, + 584, + 890, + 732 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.5. Implementation Details", + "text_level": 1, + "bbox": [ + 500, + 741, + 715, + 757 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "STE depends on the parameters $K$ , $\\tau$ and $\\gamma$ and the initialization of $\\pmb{\\Sigma}^{(0)}$ . The first two parameters are rather standard in iterative procedures and do not raise any concern.", + "bbox": [ + 496, + 763, + 890, + 809 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our theory sheds some light on possible choices of $\\gamma$ and in particular it indicates that the algorithm can be more sensitive to choices of $\\gamma$ when the quantity defined later in (3) is relatively small. In this case, it may be beneficial to try several values of $\\gamma$ . We propose here a constructive way of doing it. We first form a sequence of $0 < \\gamma \\leq 1$ , e.g., $\\gamma_{k} = 1 / k, k = 1,\\dots,m$ . In order to", + "bbox": [ + 496, + 810, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "14577", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "determine the best choice of $\\gamma$ , we compute the distance of each data point $\\mathbf{x}$ to each subspace $L_{k}$ , corresponding to the choice of $\\gamma_{k}$ , where $\\mathrm{dist}(\\mathbf{x}, L_{k}) = \\| \\mathbf{x} - \\mathbf{P}_{L_{k}} \\mathbf{x} \\|$ . We set a threshold $\\zeta$ , obtained by the median among all points and all subspaces and for each subspace, $L_{k}$ , we count the number of the inliers with distance below this threshold. The best $\\gamma_{k}$ is determined according to the subspace yielding the largest number of inliers. We describe this procedure in Algorithm 2.", + "bbox": [ + 75, + 90, + 472, + 210 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For simplicity, we initialize with $\\boldsymbol{\\Sigma}^{(0)} = \\mathbf{I}_D / D$ and note that with this choice $\\boldsymbol{\\Sigma}^{(1)}$ reflects the trimmed covariance matrix and thus reflects the PCA subspace. One can also initialize with TME or other subspaces (see §3 where the theory of STE is discussed). One can further try several initialization (with possible random components) and use a strategy similar to Algorithm 2 to choose the best one.", + "bbox": [ + 75, + 212, + 468, + 316 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "At last, we remark that when computing $\\mathbf{Z}^{(k)}$ we want to ensure that $\\pmb{x}_i^\\top (\\pmb{\\Sigma}^{(k-1)})^{-1}\\pmb{x}_i$ cannot be zero and we thus add the arbitrarily small number $10^{-15}$ to this value.", + "bbox": [ + 76, + 316, + 468, + 364 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2 Estimating best $\\gamma$ for STE" + ], + "code_body": "1: Input: $\\mathbf{X} = [\\pmb{x}_1, \\dots, \\pmb{x}_N] \\in \\mathbb{R}^{D \\times N}$ : centered data matrix, $d$ : subspace dimension, $\\{\\gamma_1, \\dots, \\gamma_m\\}$ : a set of pre-selected $\\gamma$ 's. \n2: Output: $\\gamma^*$ : optimal $\\gamma$ among $\\{\\gamma_1, \\dots, \\gamma_m\\}$ \n3: for $j = 1, 2, \\dots, m$ do \n4: $L^{(j)} \\gets \\mathrm{STE}(\\mathbf{X}, d, \\gamma_j)$ \n5: $\\mathcal{D}^{(j)} \\gets \\{\\mathrm{dist}(\\pmb{x}_i, L^{(j)}) | \\pmb{x}_i \\in \\mathcal{X}\\}$ . \n6: end for \n7: Set $\\zeta = \\mathrm{median}(\\{\\mathcal{D}^{(1)}, \\dots, \\mathcal{D}^{(m)}\\})$ \n8: $j^* = \\operatorname{argmax}_{1 \\leq j \\leq m} |\\mathcal{D}^{(j)}| < \\zeta$ \n9: $\\gamma^* = \\gamma_j^*$", + "bbox": [ + 86, + 400, + 470, + 554 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.6. STE fuses TME and a Variant of FMS", + "text_level": 1, + "bbox": [ + 76, + 574, + 406, + 589 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "STE is formally similar to both TME and FMS. Indeed, at each iteration these algorithms essentially compute $\\boldsymbol{\\Sigma}^{(k + 1)} = \\sum_{i = 1}^{N}w_{i}\\pmb{x}_{i}\\pmb{x}_{i}^{\\top}$ , where $w_{i}\\equiv w_{i}\\bigl (\\boldsymbol{\\Sigma}^{(k)}\\bigr)$ . We summarize the formal weights for FMS (with any choice of $p$ for minimizing an $\\ell_p$ energy in [24]), TME and STE. We ignore an additional scaling constant for TME and STE, obtained by dividing $w_{i}\\pmb{x}_{i}\\pmb{x}_{i}^{\\top}$ above by its trace, and a regularization parameter $\\delta$ for FMS. We express these formulas using the eigenvalues $\\sigma_1,\\ldots ,\\sigma_D$ and eigenvectors $\\pmb{u}_1,\\dots ,\\pmb{u}_D$ of the weighted sample covariance, $\\sum_{i = 1}^{N}w_{i}\\pmb{x}_{i}\\pmb{x}_{i}^{\\top}$ for each method and $\\beta \\coloneqq \\gamma \\cdot \\sigma_{d + 1,D}$ (see (2)) as follows:", + "bbox": [ + 75, + 598, + 468, + 765 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} w _ {i} ^ {\\mathrm {F M S}} = \\frac {1}{\\left(\\sum_ {j = d + 1} ^ {D} \\left(\\boldsymbol {x} _ {i} ^ {\\top} \\boldsymbol {u} _ {j}\\right) ^ {2}\\right) ^ {(2 - p) / 2}}, \\\\ w _ {i} ^ {\\mathrm {T M E}} = \\frac {1}{\\sum_ {j = 1} ^ {D} \\sigma_ {j} ^ {- 1} (\\pmb {x} _ {i} ^ {\\top} \\pmb {u} _ {j}) ^ {2}}, \\\\ w _ {i} ^ {\\mathrm {S T E}} = \\frac {1}{\\sum_ {j = 1} ^ {d} \\sigma_ {j} ^ {- 1} (\\boldsymbol {x} _ {i} ^ {\\top} \\boldsymbol {u} _ {j}) ^ {2} + \\beta^ {- 1} \\sum_ {j = d + 1} ^ {D} (\\boldsymbol {x} _ {i} ^ {\\top} \\boldsymbol {u} _ {j}) ^ {2}}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 101, + 776, + 444, + 898 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "These weights aim to mitigate the impact of outliers in different ways. For FMS, $\\sum_{j=d+1}^{D}(\\boldsymbol{x}_i^\\top\\boldsymbol{u}_j)^2$ is the squared distance of a data point $\\boldsymbol{x}_i$ to the subspace $L$ . Thus for $p < 2$ , $w_i^{\\mathrm{FMS}}$ is smaller for \"subspace-outliers\", where the robustness to such outliers increases when $p \\geq 0$ decreases.", + "bbox": [ + 498, + 90, + 893, + 166 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The weights of TME are inversely proportional to the squared Mahalanobis distance of $\\pmb{x}_i$ to the empirical distribution. They mitigate the effect of \"covariance-outliers\". If the dataset is concentrated on a $k$ -subspace where $k < d$ , then TME can provide smaller weights to points lying away from this subspace, unlike FMS that does not distinguish between points within the larger $d$ -subspace.", + "bbox": [ + 496, + 167, + 893, + 271 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We note that the weights of STE fuse the above two weights. Within a $d$ -subspace, they use the shape of the data. They can thus avoid outliers within this $d$ -subspace. Within the orthogonal component of this subspace, they use a term proportional to that of FMS with $p = 0$ . We remark that such $\\ell_0$ minimization has a clear interpretation for RSR, though it is generally hard to guarantee. Indeed, [24] has no guarantees for FMS with $p = 0$ . It can also yield unwanted spurious stationary points [26].", + "bbox": [ + 496, + 272, + 893, + 393 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Theory", + "text_level": 1, + "bbox": [ + 500, + 405, + 581, + 422 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We review a theoretical guarantee for STE, whose proof is given in [28]. It requires some conditions and we verify they hold with high probability under the asymptotic generalized haystack model. We assume a noiseless inliers-outliers RSR model. Let $L_{*}$ denote the underlying $d$ -subspace in $\\mathbb{R}^{D}$ , $\\mathcal{X}_{in} = \\mathcal{X} \\cap L_{*}$ and $\\mathcal{X}_{out} = \\mathcal{X} \\setminus \\mathcal{X}_{in}$ be the set of inliers and outliers, respectively, and $n_1 = |\\mathcal{X}_{in}|$ and $n_0 = |\\mathcal{X}_{out}|$ be the number of inliers and outliers. Our first assumption is a mild one on how well-conditioned the inliers are in $L_{*}$ (compare e.g., other assumptions in [25, 32]).", + "bbox": [ + 498, + 430, + 890, + 566 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Assumption 1: Any $k$ -subspace of $L_{*}$ , $1 \\leq k \\leq d$ , contains at most $n_1 k / d$ points.", + "bbox": [ + 498, + 566, + 890, + 595 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Motivation for Assumption 2: The ratio of inliers per outliers, $n_1 / n_0$ , in RSR is often referred to as the SNR (signal-to-noise ratio) [25, 32, 33]. The smaller it is, the best the subspace recovery is. We define the dimension-scaled SNR (DS-SNR) as the SNR obtained when scaling $n_1$ and $n_0$ by their respective dimensions (of $L_*$ and $L_*^\\perp$ ):", + "bbox": [ + 496, + 597, + 890, + 686 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {D S} - \\mathrm {S N R} := \\frac {n _ {1} / d}{n _ {0} / (D - d)}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 614, + 695, + 890, + 727 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Zhang [50] showed that exact recovery by TME is guaranteed whenever DS-SNR $>1$ (assuming general position assumptions on the inliers and outliers) and Hardt and Moitra [16] showed that when considering general datasets with general position assumptions on the inliers and outliers, the RSR problem is SSE hard if the DS-SNR is lower than 1. We aim to show that under the following weaker generic condition, STE can obtain exact recovery with DS-SNR, strictly lower than 1.", + "bbox": [ + 496, + 734, + 890, + 854 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Assumption 2: DS-SNR $> \\gamma$ , where $\\gamma < 1$ is the STE parameter.", + "bbox": [ + 500, + 854, + 893, + 869 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our last assumption requires a sufficiently good initialization for STE, but also implicitly involves additional hidden", + "bbox": [ + 500, + 869, + 890, + 898 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "14578", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "assumptions on the inliers and outliers. This is expected, since Assumption 1 does not require anything from the outliers and also has a very weak requirement from the inliers. To formulate the new assumption we define below some some basic condition numbers for good initialization (which are more complicated than the one for initialization by PCA suggested by [33] and [32]) and also quantities similar to the ones used to guarantee landscape stability in the theory of RSR [25, 27, 33, 51].", + "bbox": [ + 76, + 90, + 468, + 210 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Definitions required for Assumption 3: Recall that $\\pmb{\\Sigma}^{(0)}$ denotes the initial value in Algorithm 1, and denote", + "bbox": [ + 76, + 210, + 468, + 242 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\Sigma} _ {L _ {1}, L _ {2}} ^ {(0)} = \\mathbf {U} _ {L _ {1}} ^ {\\top} \\boldsymbol {\\Sigma} ^ {(0)} \\mathbf {U} _ {L _ {2}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 250, + 352, + 270 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We define the following condition number", + "bbox": [ + 76, + 276, + 344, + 291 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\kappa_ {1} = \\frac {\\sigma_ {d} \\left(\\boldsymbol {\\Sigma} _ {L _ {*} , L _ {*}} ^ {(0)} - \\boldsymbol {\\Sigma} _ {L _ {*} , L _ {*} ^ {\\perp}} ^ {(0)} \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp} , L _ {*}} ^ {(0) - 1} \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp} , L _ {*}} ^ {(0)}\\right)}{\\sigma_ {1} \\left(\\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp} , L _ {*}} ^ {(0)}\\right)}.\n$$\n", + "text_format": "latex", + "bbox": [ + 124, + 297, + 419, + 349 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To get a better intuition to this primary quantity of Assumption 3, we first express the initial estimator $\\Sigma^{(0)}$ , using basis vectors for $L_{*}$ and $L_{*}^{\\perp}$ , as a $2\\times 2$ block matrix", + "bbox": [ + 76, + 356, + 468, + 402 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left( \\begin{array}{c c} \\boldsymbol {\\Sigma} _ {L _ {*}, L _ {*}} ^ {(0)} & \\boldsymbol {\\Sigma} _ {L _ {*}, L _ {*} ^ {\\perp}} ^ {(0)} \\\\ \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp}, L _ {*}} ^ {(0)} & \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp}, L _ {*} ^ {\\perp}} ^ {(0)} \\end{array} \\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 407, + 349, + 452 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Defining $\\Sigma' = \\Sigma_{L_*, L_*}^{(0)} \\Sigma_{L_*^\\perp, L_*^\\perp}^{(0) - 1} \\Sigma_{L_*^\\perp, L_*}^{(0)}$ , we decompose this block matrix as", + "bbox": [ + 76, + 459, + 468, + 493 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left( \\begin{array}{c c} \\boldsymbol {\\Sigma} ^ {\\prime} & \\boldsymbol {\\Sigma} _ {L _ {*}, L _ {*}} ^ {(0)} \\\\ \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp}, L _ {*}} ^ {(0)} & \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp}, L _ {*}} ^ {(0) ^ {\\perp}}, \\end{array} \\right) + \\left( \\begin{array}{c c} \\boldsymbol {\\Sigma} _ {L _ {*}, L _ {*}} ^ {(0)} - \\boldsymbol {\\Sigma} ^ {\\prime} & 0 \\\\ 0 & 0 \\end{array} \\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 124, + 498, + 423, + 542 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We note that the numerator of $\\kappa_{1}$ is the $d$ -th eigenvalue of the second matrix in the above sum. We show in [28] that this eigenvalue is positive if $\\pmb{\\Sigma}^{(0)}$ is positive definite, which can be easily enforced. The condition number is thus the ratio between the smallest positive eigenvalue of the second matrix of the sum and the largest eigenvalue of the component of the first matrix associated with $L_{*}^{\\perp}$ . Therefore, $\\kappa_{1}$ expresses a ratio between a quantifier of a $d$ -dimensional component of $\\pmb{\\Sigma}^{(0)}$ , associated with $L_{*}$ , and a quantifier of the projection onto $L_{*}^{\\perp}$ of a full rank component of $\\pmb{\\Sigma}^{(0)}$ .", + "bbox": [ + 76, + 547, + 468, + 698 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We also define $\\Sigma_{in,*}$ as the TME solution to the set of the projected inliers $\\{\\mathbf{U}_{L^*}\\pmb {x}|\\pmb {x}\\in \\mathcal{X}_{in}\\} \\subset \\mathbb{R}^d$ and the following two condition numbers", + "bbox": [ + 76, + 699, + 468, + 743 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\kappa_ {2} = \\frac {\\sigma_ {1} \\left(\\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp} , L _ {*} ^ {\\perp}} ^ {(0)}\\right)}{\\sigma_ {D} \\left(\\boldsymbol {\\Sigma} ^ {(0)}\\right)} \\text {a n d} \\kappa_ {i n} = \\frac {\\sigma_ {1} \\left(\\boldsymbol {\\Sigma} _ {i n , *}\\right)}{\\sigma_ {d} \\left(\\boldsymbol {\\Sigma} _ {i n , *}\\right)}.\n$$\n", + "text_format": "latex", + "bbox": [ + 133, + 750, + 411, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We note that $\\kappa_{in}$ is analogous to the condition number in (25) of [32], where we replace the sample covariance by the TME estimator. An analog to the alignment of outliers statistic [27, 33] for STE is", + "bbox": [ + 76, + 797, + 468, + 858 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {A} = \\left\\| \\sum_ {\\mathbf {x} \\in \\mathcal {X} _ {o u t}} \\frac {\\mathbf {x x} ^ {\\top}}{\\| \\mathbf {U} _ {L _ {*} ^ {\\perp}} \\mathbf {x} \\| ^ {2}} \\right\\|.\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 863, + 356, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "An analog to the stability statistic [27, 33] for STE is", + "bbox": [ + 500, + 90, + 830, + 106 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal{S} = \\sigma_{d + 1,D}\\Bigl(\\sum_{\\mathbf{x}\\in \\mathcal{X}}\\frac{\\mathbf{x}\\mathbf{x}^{\\top}}{\\| \\mathbf{x}\\|^{2}}\\Bigr),\n$$\n", + "text_format": "latex", + "bbox": [ + 612, + 116, + 777, + 155 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\sigma_{d + 1,D}(\\mathbf{X})$ was defined in (2).", + "bbox": [ + 498, + 167, + 736, + 181 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Assumption 3: There exists $C = C(\\gamma, \\mathrm{DS-SNR}) > 0$ such that", + "bbox": [ + 500, + 181, + 885, + 196 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\kappa_ {1} \\geq C \\frac {d \\kappa_ {i n} \\mathcal {A}}{n _ {1}} \\left(\\kappa_ {i n} + \\frac {\\mathcal {A}}{\\frac {n _ {1}}{d} - \\gamma \\frac {n _ {0}}{D - d}} + \\frac {\\kappa_ {2} \\mathcal {A}}{\\gamma \\mathcal {S}} (1 + \\kappa_ {i n})\\right). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 516, + 209, + 890, + 250 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The exact technical requirement on $C$ is specified in [28]. In general, the larger the RHS of (4), the more restricted the choice of $\\Sigma^{(0)}$ is. In particular, when $\\kappa_1 = \\infty$ , the definition of $\\kappa_1$ implies that $\\mathrm{Im}(\\Sigma^{(0)}) = L_*$ , so the subspace is already recovered by the initial estimate. Therefore, reducing the lower bound of $\\kappa_1$ may allow some flexibility, so a marginally suboptimal initialization could still work out. In [28], we show that under the asymptotic generalized haystack model, Assumption 3 can be interpreted as an upper bound on the largest principal angle between the initial and ground truth subspaces.", + "bbox": [ + 496, + 258, + 890, + 410 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Generic Theory: The next theorem suggests that under assumptions 1-3, STE nicely converges to an estimator that recovers $L_{*}$ . The main significance of this theory is that its assumptions can allow DS-SNR lower than 1 for special instances of datasets (for which the assumptions hold), unlike the general recovery theories of [16] and [50].", + "bbox": [ + 496, + 410, + 890, + 501 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Theorem 1. Under assumptions 1-3, the sequence $\\pmb{\\Sigma}^{(k)}$ generated by STE converges to $\\mathbf{U}_{L_{*}}\\pmb{\\Sigma}_{in,*}\\mathbf{U}_{L_{*}}^{\\top}$ , the TME solution for the set of inliers $\\mathcal{X}_{in}$ . In addition, let $L^{(k)}$ be the subspace spanned by the top $d$ eigenvectors of $\\pmb{\\Sigma}^{(k)}$ , then the angle between $L^{(k)}$ and $L_{*}$ , $\\angle (L^{(k)},L_{*}) = \\cos^{-1}(\\| \\mathbf{U}_{L^{(k)}}^{\\top}\\mathbf{U}_{L_{*}}\\|)$ , converges $r$ -linearly to zero.", + "bbox": [ + 496, + 510, + 893, + 603 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We discuss insights of this theory on choices of the algorithms and further verify the above stated advantage of STE over TME assuming a common probabilistic model.", + "bbox": [ + 496, + 613, + 890, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Choice of $\\gamma$ for subspace recovery: In order to avoid too large lower bound for $\\kappa_{1}$ in (4), which we motivated above, it is good to find $\\epsilon_{1}$ and $\\epsilon_{2} > 0$ , such that $\\gamma$ lies in $(\\epsilon_{1},\\mathrm{DS - SNR} - \\epsilon_{2})$ (to notice this, observe the terms involving $\\gamma$ in the denominators of the last two additive terms in (4)). We thus note that if the DS-SNR is expected to be sufficiently larger than 1, we can use, e.g., $\\gamma = 0.5$ , but when the DS-SNR can be close to 1 or lower (e.g., in fundamental matrix estimation), it is advisable to choose small values of $\\gamma$ according to Algorithm 2 and their sizes may depend on the expected value of the DS-SNR.", + "bbox": [ + 496, + 659, + 890, + 809 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Possible ways of Initialization: If one expects an initial estimated subspace $\\hat{L}$ to have a sufficiently small angle $\\theta$ with $L_{*}$ , where $\\theta = \\angle (\\hat{L}, L_{*})$ , then for $\\boldsymbol{\\Sigma}^{(0)} \\coloneqq \\Pi_{\\hat{L}} + \\epsilon \\mathbf{I}$ it can be shown that $\\kappa_{1} > O(1 / (\\epsilon + \\theta))$ and $\\kappa_{2} < O(1 + \\frac{\\theta}{\\epsilon})$ . Thus one may use a trusted RSR method, e.g., FMS. As discussed in §2.5, the choice $\\boldsymbol{\\Sigma}^{(0)} = \\mathbf{I}$ (or a scaled version of it) corresponds to $\\hat{L}$ being the", + "bbox": [ + 496, + 810, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "14579", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "PCA subspace (obtained at iteration 1). Also, using the TME solution for $\\pmb{\\Sigma}^{(0)}$ corresponds to using the TME subspace as $\\hat{L}$", + "bbox": [ + 76, + 90, + 468, + 119 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theory under a probabilistic model: We show that under a common probabilistic model, the assumptions of Theorem 1, where $\\pmb{\\Sigma}^{(0)}$ is obtained by TME, hold. Moreover, we show that STE (initialized by TME) can recover the correct subspace in situations with DS-SNR $< 1$ , whereas TME cannot recover the underlying subspace in such cases. We follow [33] and study the Generalized Haystack Model, though for simplicity, we assume Gaussian instead of sub-Gaussian distributions and an asymptotic setting. We assume $n_1$ inliers i.i.d. sampled from a Gaussian distribution $N(0,\\pmb{\\Sigma}^{(in)} / d)$ , where $\\pmb{\\Sigma}^{(in)} \\in S_{+}(D)$ and $L_{*} = \\mathrm{Im}(\\pmb{\\Sigma}^{(in)})$ (so $\\pmb{\\Sigma}^{(in)}$ has $d$ nonzero eigenvalues), and $n_0$ outliers are i.i.d. sampled from a Gaussian distribution $N(0,\\pmb{\\Sigma}^{(out)} / D)$ , where $\\pmb{\\Sigma}^{(out)} / D \\in S_{++}(D)$ . We define the following condition numbers of inliers (in $L_{*}$ ) and outliers:", + "bbox": [ + 76, + 121, + 470, + 333 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\kappa_ {i n} = \\frac {\\sigma_ {1} (\\pmb {\\Sigma} ^ {(i n)})}{\\sigma_ {d} (\\pmb {\\Sigma} ^ {(i n)})} \\mathrm {a n d} \\kappa_ {o u t} = \\frac {\\sigma_ {1} (\\pmb {\\Sigma} ^ {(o u t)})}{\\sigma_ {D} (\\pmb {\\Sigma} ^ {(o u t)})}.\n$$\n", + "text_format": "latex", + "bbox": [ + 132, + 344, + 413, + 380 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Clearly, Assumption 1 holds under this model, and Assumption 2 constrains some of its parameters. Our next theorem shows that Assumption 3 holds under this model when the initial estimate $\\Sigma^{(out)}$ for STE is obtained by TME. It also shows that in this case STE can solve the RSR problem even when DS-SNR $< 1$ , unlike TME. For simplicity, we formulate the theory for the asymptotic case, where $N \\to \\infty$ and the theorem holds almost surely. It is possible to formulate it for a very large $N$ with high probability, but it requires stating complicated constants depending on various parameters.", + "bbox": [ + 75, + 390, + 468, + 542 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theorem 2. Assume data generated from the above generalized haystack model. Assume further that for $0 < \\mu < 1$ , which can be arbitrarily small, $d < (1 - \\mu)D - 2$ . Then, for any chosen $0 < c_{0} < 1$ , which is a lower bound for $\\gamma$ , there exists $\\eta \\coloneqq \\eta (\\kappa_{in},\\kappa_{out},c_0,\\mu) < 1$ such that if DS-SNR $\\geq \\eta$ and $\\pmb{\\Sigma}^{(0)}$ is obtained by TME, then Assumption 3 for $\\pmb{\\Sigma}^{(0)}$ is satisfied with $c_{0} < \\gamma < \\eta - c_{0}$ almost surely as $N \\to \\infty$ . Consequently, the output of the STE algorithm, initialized by TME and with the choice of $c_{0} < \\gamma < \\eta - c_{0}$ , recovers $L_{*}$ . On the other hand, if $\\pmb{\\Sigma}_{L_{*},L_{*}^{\\perp}}^{(out)} \\neq 0$ and DS-SNR $< 1$ , then the top $d$ eigenvectors of TME do not recover $L_{*}$ .", + "bbox": [ + 76, + 551, + 472, + 723 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "There are three different regimes that the theorem covers. When DS-SNR $\\geq 1$ , both TME+STE (i.e., STE initialized by TME) and TME solve the RSR problem. When $\\eta \\leq$ DSSNR $< 1$ , TME+STE solves the RSR problem and TME generally fails. When $\\gamma \\leq$ DS-SNR $< \\eta$ , TME+STE might also fail, but STE with extremely good initialization (that satisfies Assumption 3) can still solve the problem.", + "bbox": [ + 75, + 734, + 468, + 839 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To get a basic idea of the dependence of $\\eta$ on its parameters, we remark that $\\eta \\to 1$ if either $c_0 \\to 0$ , $\\kappa_{in} \\to \\infty$ , $\\kappa_{out} \\to \\infty$ or $\\mu \\to 0$ , where the parameter $\\mu$ is somewhat artificial and might be removed with a tighter proof. Therefore, successful", + "bbox": [ + 75, + 839, + 470, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "performance of TME+STE requires a DS-SNR that is close to 1 when $\\gamma$ is close to either 0 or $\\eta$ (so that $c_{0}$ is very small) or when either the inlier or outlier distribution is highly non-symmetric, that is, either $\\kappa_{in}$ or $\\kappa_{out}$ is large.", + "bbox": [ + 496, + 90, + 893, + 152 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Applications to Structure from Motion", + "text_level": 1, + "bbox": [ + 498, + 165, + 830, + 183 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We apply STE to problems relevant to SfM: robust estimation of fundamental matrices (see §4.1), and initial screening of undesirable cameras (see §4.2).", + "bbox": [ + 496, + 191, + 893, + 236 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Robust Fundamental Matrix Estimation", + "text_level": 1, + "bbox": [ + 498, + 246, + 844, + 262 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Fundamental matrix estimation from noisy and inexact keypoint matches is a core computer vision problem. It provides a challenging setting for applying RSR methods.", + "bbox": [ + 496, + 268, + 892, + 315 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We review this setting as follows. Let $(\\pmb{x},\\pmb{x}^{\\prime})\\in \\mathbb{R}^{3}\\times \\mathbb{R}^{3}$ be a correspondence pair of two points in different images that are projections of the same 3D point in the scene, where $\\pmb{x}$ and $\\pmb{x}^{\\prime}$ are expressed by homogeneous coordinates of planar points. The fundamental matrix $\\mathbf{F}\\in \\mathbb{R}^{3\\times 3}$ relates these corresponding points and the epipolar lines they lie on as follows: $\\pmb{x}^{\\prime \\top}\\mathbf{F}\\pmb{x} = 0$ [17], or equivalently,", + "bbox": [ + 496, + 315, + 893, + 421 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {v e c} (\\mathbf {F}) \\cdot \\operatorname {v e c} \\left(\\boldsymbol {x x} ^ {\\prime \\top}\\right) = 0. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 431, + 892, + 450 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathrm{vec}(\\cdot)$ denotes the vectorized form of a matrix. Therefore, ideally, the set of all vectors in $\\mathbb{R}^9$ of the form $\\mathrm{vec}(\\pmb{x}\\pmb{x}'^\\top)$ , where $(\\pmb{x},\\pmb{x}') \\in \\mathbb{R}^3 \\times \\mathbb{R}^3$ is a correspondence pair, lies on an 8-subspace in $\\mathbb{R}^9$ and its orthogonal complement yields the fundamental matrix. In practice, the measurements of correspondence pairs can be highly corrupted due to poor matching. Moreover, some choices of correspondence pairs and the corruption mechanism may lead to concentration on low-dimensional subspaces within the desired 8-subspace. Furthermore, the corruption mechanism can lead to nontrivial settings of outliers. Lastly, since $d = 8$ and $D = 9$ , the theoretical threshold of [16] translates to having the fraction of inliers among all data points be at least $8/9 \\approx 88.9\\%$ .", + "bbox": [ + 496, + 462, + 893, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Therefore, this application is often a very challenging setting for direct RSR methods. The best performing RSR methods to date for fundamental matrix estimation are variants of RANSAC [10]. RANSAC avoids any subspace-modeling assumptions, but estimates the subspace based on testing myriads of samples, each having 7 or 8 point correspondences [17].", + "bbox": [ + 496, + 643, + 893, + 734 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We test the performance of STE in estimating the fundamental matrix on the Photo Tourism database [41], where the image correspondences are obtained by SIFT feature similarities [30]. We compare STE with the following 3 top RSR performers according to [25]: FMS [24], spherical FMS (SFMS) [24] and TME [47, 50]. We also compared with vanilla RANSAC [10] and two of its specialized extensions, which are state-of-the-art performers for estimating fundamental matrices: locally optimized RANSAC (LO-RANSAC) [6] and degeneracy-check enabled RANSAC (DEGENSAC) [7]. For the RSR methods we used codes from the supplementary material of [25]", + "bbox": [ + 496, + 734, + 893, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "14580", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/953fa5e2e69cf2e35af18f9c70ab4200aff08d22e492096e141a8ffa13c5bacd.jpg", + "image_caption": [ + "Figure 1. Median (relative) rotation errors obtained by seven algorithms for the 14 datasets of Photo Tourism." + ], + "image_footnote": [], + "bbox": [ + 117, + 99, + 851, + 277 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "with their default options. We further used the Python package pydegensac for implementing LO-RANSAC and DEGENSAC with the inlier threshold $\\eta = 0.75$ . For STE, we used Algorithm 2 to estimate the best $\\gamma$ from $\\{(2i)^{-1}\\}_{i=1}^{5}$ .", + "bbox": [ + 75, + 330, + 467, + 391 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We measure the accuracy of the results according to the median and mean errors of relative rotation and direction vectors directly obtained by the fundamental matrices for each method. For computing these errors, we compared with ground-truth values provided by [41, 48]. Figure 1 describes the result of the mean errors for relative rotation per dataset of Photo Tourism, where the other three errors and $\\mathrm{mAA}(10^{\\circ})$ are in the supplemental material. STE is significantly better than top RSR performers (TME, FMS and SFMS). Overall, it appears that STE performs better than vanilla RANSAC, except for the Ellis Island and Vienna Cathedral datasets, where RANSAC outperforms STE. STE is still competitive when compared with LO-RANSAC and DEGENSAC, except for Notre Dame and the latter two datasets.", + "bbox": [ + 75, + 392, + 470, + 589 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Initial Camera Removal for SfM", + "text_level": 1, + "bbox": [ + 76, + 602, + 362, + 617 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We propose a novel application of RSR for SfM and test STE for this application. Even though our framework is not sufficiently practical at this point, it allows testing STE in a different setting where $N = D$ is very large and $d = 6$ . Our idea is to use RSR within the SfM pipeline right after estimating the fundamental matrices, in order to remove some cameras that result in inaccurate estimated fundamental matrices. The hope is that eventually such methods may reduce corruption and speed up the costly later computationally intensive stages of the global SfM pipeline.", + "bbox": [ + 75, + 627, + 468, + 763 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "There are two main reasons to question such a process. One may first question the gain in improving accuracy. Indeed, since the rest of the pipeline already identifies corrupted pairwise measurements, this process may not improve accuracy and may even harm it as it removes whole cameras and not pairs of cameras. That is, it is possible that a camera, which results in bad pairwise measurement, also contributes to some other accurate pairwise estimates that can improve the overall accuracy. The second concern is in terms of speed. In general,", + "bbox": [ + 75, + 765, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "the removal of cameras may result in higher or comparable speed. Indeed, the LUD global pipeline [36], which we follow, examines the parallel rigidity of the viewing graph and extracts the maximal parallel rigid subgraph. Thus earlier removal of cameras may worsen the parallel rigidity of the graph and increase the computation due to the need of finding a maximal parallel rigid subgraph. For example, [40] removes cameras in an earlier stage of the LUD pipeline, but results in higher computational cost than the LUD pipeline. Therefore, improvement of speed for the LUD pipeline by removing cameras is generally non-trivial. Moreover, currently we use scale factors obtained by first running LUD, so we do not get a real speed improvement. Nevertheless, the proposed method is insightful whenever it may indicate clear improvement in accuracy for a dataset, since one can then infer that the current pipeline is not effective enough in handling corrupted measurements, which can be easily recognized by a simple method. Furthermore, improvement in \"speed\" can be indicative of maintaining parallel rigidity.", + "bbox": [ + 496, + 330, + 893, + 602 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our RSR formulation is based on a fundamental observation by Sengupta et al. [39] on the low-rank of the $n$ -view essential (or fundamental) matrix. The $n$ -view essential matrix $\\mathbf{E}$ of size $3n \\times 3n$ is formed by stacking all $\\binom{n}{2}$ essential matrices, while being appropriately scaled. That is, the $ij$ -th block of $\\mathbf{E}$ is the essential matrix for the $i$ -th and $j$ -th cameras, where each $\\mathbf{E}_{ij}$ is scaled by a factor $\\lambda_{ij}$ in accordance with the global coordinate system (see [20, 21, 39]). It was noticed in [39] that $\\mathbf{E}$ has rank 6. Moreover, [39] characterized the set of $n$ -view essential matrices whose camera centers are not all collinear by the satisfaction of a few algebraic conditions, where the major one is $\\mathrm{rank}(\\mathbf{E}) = 6$ . Further explanation appears in [20].", + "bbox": [ + 496, + 607, + 893, + 790 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We propose a straightforward application of RSR, utilizing these ideas to initially eliminate cameras that introduce significant corruption to the essential matrices. For this purpose, we compute the essential matrices (by computing first the fundamental matrices and then using the known camera calibration) and scale each matrix according to the factor obtained by the LUD pipeline [36] (note that this is the initial", + "bbox": [ + 496, + 795, + 893, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "14581", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a2dd0f25f68613bf4d91113af58c600e73c8bbcd18228539c6ad0a04d559561c.jpg", + "image_caption": [ + "Figure 2. Mean (absolute) rotation errors (in degrees, left) and mean translation errors (in degrees, right) of LUD and four RSR methods used to initially screen bad cameras within LUD applied to the 14 datasets of Photo Tourism." + ], + "image_footnote": [], + "bbox": [ + 76, + 97, + 480, + 258 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f4c5770f5c5861e10f2f07e8c7ced3cebc2f423254ac4d0b257d8aabdf79219c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 493, + 98, + 893, + 258 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "scaling applied in [20, 21, 39] before applying a non-convex and nontrivial optimization procedure that refines such scales). Using these appropriately scaled essential matrices, we form the $n$ -view essential matrix $\\mathbf{E}$ of size $3n\\times 3n$ . We denote the $3n\\times 3$ column blocks of $\\mathbf{E}$ by $\\mathbf{E}_{:,1},\\dots,\\mathbf{E}_{:,n}$ (since $\\mathbf{E}$ is symmetric they are the same as the row blocks transposed). We treat $\\mathbf{E}$ as a data matrix with $D = N = 3n$ , where the columns of $\\mathbf{E}$ are the data points. We apply RSR with $d = 6$ , recover a $d$ -dimensional robust subspace and identify the outlying columns whose distance is largest from this subspace. To avoid heuristic methods for the cutoff of outliers we assume a fixed percentage of $20\\%$ outlying columns. If a column block, $\\mathbf{E}_{:,i}$ contains an outlying column, we remove its corresponding camera $i$ . Consequently, a smaller percentage of cameras (about $10 - 15\\%$ ) will be eliminated.", + "bbox": [ + 75, + 323, + 472, + 535 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We use the Photo Tourism database [41] with precomputed pairwise image correspondences provided by [39] (they were obtained by thresholding SIFT feature similarities). To compute scale factors for the essential matrices we use the output of the LUD pipeline [36] as follows (following an idea proposed in [39] for initializing these values): Given the essential matrix for cameras $i$ and $j$ computed at an early stage of our pipeline, $\\mathbf{E}_{ij}$ , and the one obtained by the full LUD pipeline, $\\mathbf{E}_{ij}^{\\mathrm{LUD}}$ , the scaling factor is $\\lambda_{ij} = \\langle \\mathbf{E}_{ij}, [\\mathbf{E}_{ij}^{\\mathrm{LUD}}] \\rangle / \\| [\\mathbf{E}_{ij}^{\\mathrm{LUD}}] \\|_F^2$ . Since many values of $\\mathbf{E}_{ij}$ are missing, we also apply matrix completion.", + "bbox": [ + 75, + 536, + 468, + 688 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We compare the LUD pipeline with the LUD pipeline combined with the filtering processes achieved by STE, FMS, SFMS, and TME. For STE we fix $\\gamma = 1/3$ , though any other value we tried yielded the same result. We report both mean and median errors of rotations and translations and runtime of the standard LUD and the RSR+LUD methods with initial screening of cameras. Figure 2 shows the mean rotation and translation errors, where the rest of the figures and a summarizing table are in the supplementary material. In general, STE demonstrates slightly higher accuracy compared to other RSR methods. Improved accuracy is particularly notable when matrix completion is not utilized, as demonstrated in the supplementary material. We observe that LUD+STE generally improves the estimation of camera parameters (both rotations and translations) over LUD.", + "bbox": [ + 75, + 688, + 470, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The improvement of LUD+STE is noticeable in Roman Forum and Gendarmenmarkt. In the supplementary material we show further improvement for Gendarmenmarkt with the removal of $45\\%$ outlying columns. While the resulting errors are still large, their improvement shows some potential in dealing with difficult SfM structure by initially removing cameras in a way that may help eliminate some scene ambiguities, which are prevalent in Gendarmenmarkt. In terms of runtime, both LUD+STE and LUD+SFMS demonstrate significant improvements, where LUD+SFMS is even faster than LUD+STE. While this does not yet imply faster handling of the datasets (as we use initial scaling factors obtained by LUD), it indicates the efficiency of the removal of outliers in maintaining parallel rigidity.", + "bbox": [ + 496, + 323, + 893, + 521 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 539, + 619, + 555 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We introduce STE, a meticulously crafted adaptation of TME designed to address challenges within RSR. Theoretical guarantees demonstrate its ability to recover the true underlying subspace reliably, even with a smaller fraction of inliers compared to the well-known theoretical threshold. Under the generalized haystack model, we show that this initialization can be chosen as TME itself, leading to improved handling of a smaller fraction of inliers compared to TME. Our exploration extends to practical applications, where STE proves effective in two 3D vision tasks: robust fundamental matrix estimation and screening of bad cameras for improved SfM.", + "bbox": [ + 496, + 566, + 893, + 733 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Several avenues for future research include: $\\bullet$ Exploring adaptations of other robust covariance estimation methods to RSR. $\\bullet$ Studying effective initialization for STE both in theory and in practice. $\\bullet$ In-depth theoretical exploration of the optimal choice of the parameter $\\gamma$ . $\\bullet$ Study of alternative ways of adapting TME to RSR problems. $\\bullet$ Improving STE for fundamental matrix estimation following ideas similar to those in [7, 12, 37] for addressing challenging degeneracies. $\\bullet$ Enhancing our initial idea of initial removal of bad cameras, specifically attempting to use it to rectify challenging scene ambiguities. $\\bullet$ Testing our methods for SfM using more recent feature matching algorithms.", + "bbox": [ + 496, + 734, + 893, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "14582", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 169, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Larry P. Ammann. Robust singular value decompositions: A new approach to projection pursuit. Journal of the American Statistical Association, 88(422):pp. 505-514, 1993. 1", + "[2] Daniel Barath, Jiri Matas, and Jana Noskova. MAGSAC: marginalizing sample consensus. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10197-10205, 2019. 2", + "[3] Jian-Feng Cai, Emmanuel J. Candès, and Zuowei Shen. A singular value thresholding algorithm for matrix completion. SIAM Journal on optimization, 20(4):1956-1982, 2010. 17", + "[4] Yeshwanth Cherapanamjeri, Prateek Jain, and Praneeth Netrapalli. Thresholding based outlier robust PCA. In Conference on Learning Theory, pages 593-628. PMLR, 2017. 1", + "[5] Vartan Choulakian. $L_{1}$ -norm projection pursuit principal component analysis. Computational Statistics & Data Analysis, 50(6):1441-1451, 2006. 1", + "[6] Ondrej Chum, Jií Matas, and Josef Kittler. Locally optimized RANSAC. In Pattern Recognition: 25th DAGM Symposium, Magdeburg, Germany, September 10-12, 2003. Proceedings 25, pages 236-243. Springer, 2003. 2, 6, 13", + "[7] Ondrej Chum, Tomas Werner, and Jiri Matas. Two-view geometry estimation unaffected by a dominant plane. In 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05), pages 772-779. IEEE, 2005. 2, 6, 8, 13", + "[8] Ilias Diakonikolas, Gautam Kamath, Daniel M. Kane, Jerry Li, Ankur Moitra, and Alistair Stewart. Robustly learning a gaussian: Getting optimal error, efficiently. In Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms, page 2683-2702, USA, 2018. Society for Industrial and Applied Mathematics. 1", + "[9] Chris Ding, Ding Zhou, Xiaofeng He, and Hongyuan Zha. R1-PCA: rotational invariant $L_{1}$ -norm principal component analysis for robust subspace factorization. In ICML '06: Proceedings of the 23rd international conference on Machine learning, pages 281-288, New York, NY, USA, 2006. ACM. 1", + "[10] Martin A. Fischler and Robert C. Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 1, 2, 6, 13", + "[11] Gabriel Frahm and Uwe Jaekel. A generalization of Tyler's M-estimators to the case of incomplete data. Computational Statistics & Data Analysis, 54(2):374-393, 2010. 2", + "[12] J-M Frahm and Marc Pollefeys. RANSAC for (quasi-) degenerate data (QDEGSAC). In 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06), pages 453-460. IEEE, 2006. 2, 8", + "[13] William Cole Franks and Ankur Moitra. Rigorous guarantees for Tyler's M-estimator via quantum expansion. In Conference on Learning Theory, pages 1601–1632. PMLR, 2020. 3", + "[14] Jerome H. Friedman and John W. Tukey. A projection pursuit algorithm for exploratory data analysis. IEEE Transactions on Computers, C-23(9):881-890, 1974. 1", + "[15] John Goes, Gilad Lerman, and Boaz Nadler. Robust sparse covariance estimation by thresholding Tyler's M-estimator. The Annals of Statistics, 48(1):86 - 110, 2020. 3" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] Moritz Hardt and Ankur Moitra. Algorithms and hardness for robust subspace recovery. In Conference on Learning Theory, pages 354-375. PMLR, 2013. 1, 2, 3, 4, 5, 6", + "[17] Richard Hartley and Andrew Zisserman. Multiple view geometry in computer vision. Cambridge university press, 2003. 6, 11, 12, 13", + "[18] Yuhe Jin, Dmytro Mishkin, Anastasiia Mishchuk, Jiri Matas, Pascal Fua, Kwang Moo Yi, and Eduard Trulls. Image matching across wide baselines: From paper to practice. International Journal of Computer Vision, 129(2):517-547, 2021. 13", + "[19] Arman Karimian and Roberto Tron. Essential matrix estimation using convex relaxations in orthogonal space. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17142-17152, 2023. 15", + "[20] Yoni Kasten, Amnon Geifman, Meirav Galun, and Ronen Basri. Algebraic characterization of essential matrices and their averaging in multiview settings. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5895-5903, 2019. 7, 8", + "[21] Yoni Kasten, Amnon Geifman, Meirav Galun, and Ronen Basri. Gpsfm: Global projective sfm using algebraic constraints on multi-view fundamental matrices. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3264-3272, 2019. 7, 8", + "[22] John T. Kent and David E. Tyler. Maximum likelihood estimation for the wrapped Cauchy distribution. Journal of Applied Statistics, 15(2):247-254, 1988. 2, 3", + "[23] Nojun Kwak. Principal component analysis based on $L_{1}$ -norm maximization. IEEE transactions on pattern analysis and machine intelligence, 30(9):1672-1680, 2008. 1", + "[24] Gilad Lerman and Tyler Maunu. Fast, robust and non-convex subspace recovery. Information and Inference: A Journal of the IMA, 7(2):277–336, 2018. 1, 2, 3, 4, 6, 13", + "[25] Gilad Lerman and Tyler Maunu. An overview of robust subspace recovery. Proceedings of the IEEE, 106(8):1380-1410, 2018. 1, 3, 4, 5, 6", + "[26] Gilad Lerman and Teng Zhang. $l_{p}$ -recovery of the most significant subspace among multiple subspaces with outliers. Constr. Approx., 40(3):329-385, 2014. 4", + "[27] Gilad Lerman, Michael B. McCoy, Joel A. Tropp, and Teng Zhang. Robust computation of linear models by convex relaxation. Found. Comput. Math., 15(2):363-410, 2015. 1, 3, 5", + "[28] Gilad Lerman, Feng Yu, and Teng Zhang. Theoretical guarantees for the subspace-constrained Tyler's estimator, 2024. 4, 5, 11", + "[29] Guoying Li and Zhonglian Chen. Projection-pursuit approach to robust dispersion matrices and principal components: primary theory and monte carlo. Journal of the American Statistical Association, 80(391):759-766, 1985. 1", + "[30] David G. Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60:91-110, 2004. 6", + "[31] Ricardo A. Maronna and Víctor J. Yohai. Robust estimation of multivariate location and scatter. Wiley StatsRef: Statistics Reference Online, pages 1-12, 2014. 2", + "[32] Tyler Maunu and Gilad Lerman. Robust subspace recovery with adversarial outliers, 2019. 1, 2, 4, 5" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "14583", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[33] Tyler Maunu, Teng Zhang, and Gilad Lerman. A well-tempered landscape for non-convex robust subspace recovery. J. Mach. Learn. Res., 20(1):1348–1406, 2019. 1, 3, 4, 5, 6", + "[34] Tyler Maunu, Chenyu Yu, and Gilad Lerman. Stochastic and private nonconvex outlier-robust PCAs. In Proceedings of Mathematical and Scientific Machine Learning, pages 173–188. PMLR, 2022. 1", + "[35] Michael McCoy and Joel A. Tropp. Two proposals for robust PCA using semidefinite programming. Electronic Journal of Statistics, 5(none):1123 - 1160, 2011. 1", + "[36] Onur Ozyesil and Amit Singer. Robust camera location estimation by convex programming. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2674-2683, 2015. 7, 8", + "[37] Rahul Raguram, Ondrej Chum, Marc Pollefeys, Jiri Matas, and Jan-Michael Frahm. USAC: A universal framework for random sample consensus. IEEE transactions on pattern analysis and machine intelligence, 35(8):2022-2038, 2012. 2, 8", + "[38] Elvezio M. Ronchetti and Peter J. Huber. Robust statistics. John Wiley & Sons Hoboken, NJ, USA, 2009. 1", + "[39] Soumyadip Sengupta, Tal Amir, Meirav Galun, Tom Goldstein, David W Jacobs, Amit Singer, and Ronen Basri. A new rank constraint on multi-view fundamental matrices, and its application to camera location recovery. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4798-4806, 2017. 7, 8, 16", + "[40] Yunpeng Shi, Shaohan Li, Tyler Mauno, and Gilad Lerman. Scalable cluster-consistency statistics for robust multi-object matching. In International Conference on 3D Vision, 3DV 2021, London, United Kingdom, December 1-3, 2021, pages 352-360. IEEE, 2021. 7", + "[41] Noah Snavely, Steven M. Seitz, and Richard Szeliski. Photo tourism: exploring photo collections in 3d. In ACM siggraph 2006 papers, pages 835-846. Association for Computing Machinery, 2006. 6, 7, 8", + "[42] Nathan Srebro and Tommi Jaakkola. Weighted low-rank approximations. In Proceedings of the 20th international conference on machine learning (ICML-03), pages 720–727, 2003. 1", + "[43] Jacob Steinhardt, Moses Charikar, and Gregory Valiant. Resilience: A criterion for learning in the presence of arbitrary outliers. In 9th Innovations in Theoretical Computer Science Conference, ITCS 2018, January 11-14, 2018, Cambridge, MA, USA, pages 45:1-45:21. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2018. 1", + "[44] Ben Tordoff and David W. Murray. Guided sampling and consensus for motion estimation. In Computer Vision—ECCV 2002: 7th European Conference on Computer Vision Copenhagen, Denmark, May 28–31, 2002 Proceedings, Part I 7, pages 82–96. Springer, 2002. 15", + "[45] Philip H.S. Torr and Andrew Zisserman. MLESAC: A new robust estimator with application to estimating image geometry. Computer vision and image understanding, 78(1):138-156, 2000. 2", + "[46] David E. Tyler. Statistical analysis for the angular central gaussian distribution on the sphere. Biometrika, 74(3):579-589, 1987. 2", + "[47] David E. Tyler. A distribution-free m-estimator of multivariate scatter. The Annals of Statistics, pages 234–251, 1987. 1, 2, 6, 13" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[48] Kyle Wilson and Noah Snavely. Robust global translations with 1dsfm. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part III 13, pages 61-75. Springer, 2014. 7", + "[49] Huan Xu, Constantine Caramanis, and Sujay Sanghavi. Robust PCA via outlier pursuit. Advances in neural information processing systems, 23, 2010. 1", + "[50] Teng Zhang. Robust subspace recovery by Tyler's M-estimator. Information and Inference: A Journal of the IMA, 5(1):1-21, 2016. 1, 3, 4, 5, 6, 13", + "[51] Teng Zhang and Gilad Lerman. A novel M-estimator for robust PCA. The Journal of Machine Learning Research, 15(1): 749–808, 2014. 1, 3, 5" + ], + "bbox": [ + 501, + 90, + 893, + 273 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "14584", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/02a10508-95ff-4550-b14e-3121c8c91065_model.json b/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/02a10508-95ff-4550-b14e-3121c8c91065_model.json new file mode 100644 index 0000000000000000000000000000000000000000..de4f3c27fcb5f661ccdb0840ad38efe6f9487ec8 --- /dev/null +++ b/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/02a10508-95ff-4550-b14e-3121c8c91065_model.json @@ -0,0 +1,2376 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.262, + 0.131, + 0.717, + 0.175 + ], + "angle": 0, + "content": "A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion *" + }, + { + "type": "text", + "bbox": [ + 0.148, + 0.203, + 0.334, + 0.258 + ], + "angle": 0, + "content": "Feng Yu† \nUniversity of Minnesota \nfyu@umn.edu" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.204, + 0.596, + 0.257 + ], + "angle": 0, + "content": "Teng Zhang \nUniversity of Central Florida \nteng.zhang@ucf.edu" + }, + { + "type": "text", + "bbox": [ + 0.639, + 0.203, + 0.824, + 0.256 + ], + "angle": 0, + "content": "Gilad Lerman ‡ \nUniversity of Minnesota \nlerman@umn.edu" + }, + { + "type": "title", + "bbox": [ + 0.237, + 0.292, + 0.312, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.324, + 0.474, + 0.566 + ], + "angle": 0, + "content": "We present the subspace-constrained Tyler's estimator (STE) designed for recovering a low-dimensional subspace within a dataset that may be highly corrupted with outliers. STE is a fusion of the Tyler's M-estimator (TME) and a variant of the fast median subspace. Our theoretical analysis suggests that, under a common inlier-outlier model, STE can effectively recover the underlying subspace, even when it contains a smaller fraction of inliers relative to other methods in the field of robust subspace recovery. We apply STE in the context of Structure from Motion (SfM) in two ways: for robust estimation of the fundamental matrix and for the removal of outlying cameras, enhancing the robustness of the SfM pipeline. Numerical experiments confirm the state-of-the-art performance of our method in these applications. This research makes significant contributions to the field of robust subspace recovery, particularly in the context of computer vision and 3D reconstruction." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.597, + 0.203, + 0.613 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.623, + 0.471, + 0.805 + ], + "angle": 0, + "content": "In many applications, data has been collected in large quantities and dimensions. It is a common practice to represent such data within a low-dimensional subspace that preserves its essential information. Principal Component Analysis (PCA) is frequently employed to identify this subspace. However, PCA faces challenges when dealing with data contaminated by outliers. Consequently, the field of Robust Subspace Recovery (RSR) aims to develop a framework for outlier-robust PCA. RSR is particularly relevant to problems in computer vision, such as fundamental matrix estimation, which involves recovering a hidden subspace associated with \"good correspondence pairs\" among highly corrupted measurements." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.806, + 0.471, + 0.851 + ], + "angle": 0, + "content": "Various algorithms have been proposed to address RSR, employing methods such as projection pursuit [1, 5, 14, 23, 29, 35, 38], subspace energy minimization (in particular least absolute" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.293, + 0.895, + 0.369 + ], + "angle": 0, + "content": "deviations and its relaxations) [9, 24, 27, 33, 34, 42, 43, 51], robust covariance estimation [50], filtering-based methods [4, 8, 49] and exhaustive subspace search methods [10, 16]. An in-depth exploration and comprehensive overview of robust subspace recovery and its diverse algorithms can be found in [25]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.37, + 0.895, + 0.552 + ], + "angle": 0, + "content": "Methods based on robust covariance estimators, such as the Tyler's M-estimator (TME), offer additional useful information on the shape of the data within the subspace, similarly to PCA in the non-robust setting. They also offer maximum-likelihood interpretation, which is missing in many other methods. Application of the TME [47] to RSR has been shown to be successful on basic benchmarks [25, 50]. Moreover, under a model of inliers in a general position on a subspace and outliers in general position in the complement of the subspace, TME was shown to recover the subspace within a desirable fraction of inliers [50]. Below this fraction it was proved to be Small Set Expansion (SSE) hard to solve the RSR problem [16]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.552, + 0.896, + 0.734 + ], + "angle": 0, + "content": "One may still succeed with solving the RSR problem with a computationally efficient algorithm when the fraction of inliers is lower than the one required by [16], considering a more restricted data model or violating other assumptions made in [16]. For example, some special results in this direction are discussed in [32]. Also, [33] proposes the generalized haystack model of inliers and outliers to demonstrate the possibility of handling lower fractions of inliers by an RSR algorithm. This model extends the limited standard haystack model [27], where basic methods (such as PCA filtering) can easily work with low fractions of outliers. Nevertheless, it is unclear how practical the above theoretical ideas are for applied settings." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.734, + 0.895, + 0.885 + ], + "angle": 0, + "content": "One practical setting that requires a fraction of inliers significantly lower than the one stated in [16] arises in the problem of robust fundamental (or essential) matrix estimation. The fundamental matrix encompasses the epipolar geometry of two views in stereo vision systems. It is typically computed using point correspondences between the two projected images. This computation requires finding an 8-dimensional subspace within a 9-dimensional ambient space. In this setting, the theoretical framework of [16] requires that the fraction of inliers be at least \\(8/9 \\approx 88.9\\%\\), which is clearly unreasonable to require." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.886, + 0.895, + 0.902 + ], + "angle": 0, + "content": "To date, the RANdom Sample Consensus (RANSAC)" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.863, + 0.458, + 0.876 + ], + "angle": 0, + "content": "*This work was supported by NSF DMS awards 2124913 and 2318926." + }, + { + "type": "page_footnote", + "bbox": [ + 0.097, + 0.876, + 0.38, + 0.888 + ], + "angle": 0, + "content": "† Supplementary code: https://github.com/alexfengg/STE" + }, + { + "type": "page_footnote", + "bbox": [ + 0.097, + 0.888, + 0.376, + 0.9 + ], + "angle": 0, + "content": "\\(\\ddagger\\) Corresponding author. All authors equally contributed." + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.863, + 0.458, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "14575" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.473, + 0.318 + ], + "angle": 0, + "content": "method [10] is the only RSR method that has been highly successful in addressing this nontrivial scenario, gaining widespread popularity in computer vision. RANSAC is an iterative method that randomly selects minimal subsets of the data and fits models, in particular subspaces, to identify the best consensus set, that is, the set in most agreement with the hypothesized model. There are numerous approaches proposed to improve RANSAC, especially for this particular application, including locally optimized RANSAC (LO-RANSAC, [6]), maximum likelihood estimator RANSAC (MLESAC) [45]), degeneracy-check enabled RANSAC (DEGENSAC) [7]) and M-estimator guided RANSAC (MAGSAC) [2]). A near recovery theory for a variant of RANSAC under some assumptions on the outliers was suggested in [32]. Nevertheless, in general, RANSAC is rather slow and its application to higher-dimensional problems is intractable." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.32, + 0.473, + 0.488 + ], + "angle": 0, + "content": "This work introduces a novel RSR algorithm that is guaranteed to robustly handle a lower fraction of outliers than the theoretical threshold proposed by [16], under special settings. Our basic idea is to adapt Tyler's M-Estimator to utilize the information of the underlying \\(d\\)-dimensional subspace, while avoiding estimation of the full covariance. By using less degrees of freedom we obtain a more accurate subspace estimator than the one obtained by TME with improved computational complexity. We show that STE is a fusion of the Tyler's M-estimator (TME) and a variant of the fast median subspace (FMS) [24] that aims to minimize a subspace-based \\(\\ell_0\\) energy." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.489, + 0.473, + 0.671 + ], + "angle": 0, + "content": "Our theory shows that our proposed subspace-constrained Tyler's estimator (STE) algorithm can effectively recover the underlying subspace, even when it contains a smaller fraction of inliers relative to other methods. We obtain this nontrivial achievement first in a generic setting, where we establish when an initial estimator for STE is sufficiently well-conditioned to guarantee the desired robustness of STE. We then assume the asymptotic generalized haystack model and show that under this model, TME itself is a well-conditioned initial estimator for STE, and that unlike TME, STE with this initialization can deal with a lower fraction of inliers than the theoretical threshold specified in [16]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.672, + 0.473, + 0.824 + ], + "angle": 0, + "content": "We demonstrate competitive performance in robust fundamental matrix estimation, relying solely on subspace information without additional methods for handling degenerate scenarios, in contrast to [7, 12, 37]. We also propose a potential application of RSR for removing bad cameras in order to enhance the SfM pipeline and show competitive performance of STE. This is a completely new idea and it may require additional exploration to make it practical. Nevertheless, it offers a very different testbed where \\( N = D \\) is very large and RANSAC is generally intractable." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.473, + 0.903 + ], + "angle": 0, + "content": "The rest of the paper is organized as follows: §2 introduces the STE framework, §3 establishes theoretical guarantees of STE, §4 applies STE to two different problems in SfM, demonstrating its competitive performance relative to existing algorithms, and §5 provides conclusions and future directions." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.09, + 0.685, + 0.108 + ], + "angle": 0, + "content": "2. The STE Algorithm" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.115, + 0.895, + 0.192 + ], + "angle": 0, + "content": "We present our proposed STE. We review basic notation in §2.1 and Tyler's original estimator in §2.2. We describe our method in §2.3, its computational complexity in §2.4, its algorithmic choices in §2.5 and an interpretation for it as a fusion of TME and FMS with \\( p = 0 \\) in §2.6." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.2, + 0.605, + 0.215 + ], + "angle": 0, + "content": "2.1. Notation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.222, + 0.895, + 0.453 + ], + "angle": 0, + "content": "We use bold upper and lower case letters for matrices and column vectors, respectively. Let \\(\\mathbf{I}_k\\) denote the identity matrix in \\(\\mathbb{R}^{k\\times k}\\), where if \\(k\\) is obvious we just write \\(\\mathbf{I}\\). For a matrix \\(\\mathbf{A}\\), we denote by \\(\\operatorname{tr}(\\mathbf{A})\\) and \\(\\operatorname{Im}(\\mathbf{A})\\) the trace and image (i.e., column space) of \\(\\mathbf{A}\\). We denote by \\(S_{+}(D)\\) and \\(S_{++}(D)\\) the sets of positive semidefinite and definite matrices in \\(\\mathbb{R}^{D\\times D}\\), respectively. We denote by \\(O(D,d)\\) the set of semi-orthogonal \\(D\\times d\\) matrices, i.e., \\(\\mathbf{U}\\in O(D,d)\\) if and only if \\(\\mathbf{U}\\in \\mathbb{R}^{D\\times d}\\) and \\(\\mathbf{U}^{\\top}\\mathbf{U} = \\mathbf{I}_{d}\\). We refer to linear \\(d\\)-dimensional subspaces as \\(d\\)-subspaces. For a \\(d\\)-subspace \\(L\\), we denote by \\(\\mathbf{P}_L\\) the \\(D\\times D\\) matrix representing the orthogonal projector onto \\(L\\). We also arbitrarily fix \\(\\mathbf{U}_L\\) in \\(O(D,d)\\) such that \\(\\mathbf{U}_L\\mathbf{U}_L^\\top = \\mathbf{P}_L\\) (such \\(\\mathbf{U}_L\\) is determined up to right multiplication by an orthogonal matrix in \\(O(d,d)\\)). Throughout the paper, \\(\\mathcal{X} = \\{\\pmb{x}_i\\}_{i = 1}^N\\subset \\mathbb{R}^D\\) is assumed to be a given centered dataset, that is, \\(\\sum_{i = 1}^{N}\\pmb{x}_i = \\mathbf{0}\\)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.459, + 0.882, + 0.475 + ], + "angle": 0, + "content": "2.2. Tyler's Estimator and its Application to RSR" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.482, + 0.895, + 0.514 + ], + "angle": 0, + "content": "Tyler's M-estimator (TME) [47] robustly estimates the covariance \\(\\pmb{\\Sigma}^{*}\\) of the dataset \\(\\mathcal{X} = \\{\\pmb{x}_i\\}_{i=1}^N \\subset \\mathbb{R}^D\\) by minimizing" + }, + { + "type": "equation", + "bbox": [ + 0.584, + 0.522, + 0.895, + 0.562 + ], + "angle": 0, + "content": "\\[\n\\frac {D}{N} \\sum_ {i = 1} ^ {N} \\log \\left(\\boldsymbol {x} _ {i} ^ {\\top} \\boldsymbol {\\Sigma} ^ {- 1} \\boldsymbol {x} _ {i}\\right) + \\operatorname {l o g d e t} (\\boldsymbol {\\Sigma}) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.57, + 0.895, + 0.781 + ], + "angle": 0, + "content": "over \\(\\pmb{\\Sigma} \\in S_{++}(D)\\) such that \\(\\mathrm{tr}(\\pmb{\\Sigma}) = 1\\). The cost function in (1) can be motivated by writing the maximum likelihood of the multivariate \\(t\\)-distribution and letting its degrees of freedom parameter, \\(\\nu\\), approach zero [31]. This cost function is invariant to dilations of \\(\\pmb{\\Sigma}\\), and the constraint on \\(\\mathrm{tr}(\\pmb{\\Sigma})\\), whose value can be arbitrarily chosen, fixes a scale. TME also applies to scenarios where the covariance matrix does not exist. In such cases, TME estimates the shape (or scatter matrix) of the distribution, which is defined up to an arbitrary scale. More direct interpretations of TME as a maximum likelihood estimator can be found in [11, 46]. When \\(D\\) is fixed and \\(N\\) approaches infinity, TME is the \"most robust\" estimator of the shape matrix for data i.i.d. sampled from a continuous elliptical distribution [47] in a minimax sense, that is, as a minimizer of the maximal variance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.782, + 0.895, + 0.813 + ], + "angle": 0, + "content": "Tyler [47] proposed the following iterative formula for computing TME:" + }, + { + "type": "equation", + "bbox": [ + 0.507, + 0.82, + 0.885, + 0.862 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\Sigma} ^ {(k)} = \\sum_ {i = 1} ^ {N} \\frac {\\boldsymbol {x} _ {i} \\boldsymbol {x} _ {i} ^ {\\top}}{\\boldsymbol {x} _ {i} ^ {\\top} (\\boldsymbol {\\Sigma} ^ {(k - 1)}) ^ {- 1} \\boldsymbol {x} _ {i}} / \\operatorname {t r} \\left(\\sum_ {i = 1} ^ {N} \\frac {\\boldsymbol {x} _ {i} \\boldsymbol {x} _ {i} ^ {\\top}}{\\boldsymbol {x} _ {i} ^ {\\top} (\\boldsymbol {\\Sigma} ^ {(k - 1)}) ^ {- 1} \\boldsymbol {x} _ {i}}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.87, + 0.895, + 0.902 + ], + "angle": 0, + "content": "Kent and Tyler [22] proved that if any \\(d\\)-subspace of \\(\\mathbb{R}^D\\), where \\(1 \\leq d \\leq D - 1\\), contains fewer than \\(Nd / D\\) data points, then" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "14576" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.137 + ], + "angle": 0, + "content": "the above iterative procedure converges to TME. Linear rate of convergence was proved for the regularized TME in [15] and for TME in [13]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.138, + 0.47, + 0.348 + ], + "angle": 0, + "content": "One can apply the TME estimator to solve the RSR problem with a given dimension \\( d \\) by forming the subspace spanned by the top \\( d \\) eigenvectors of TME. Zhang [50] proved that as long as there are more than \\( Nd / D \\) inliers lying on a subspace, and the projected coordinates of these inliers on the \\( d \\)-subspace and the projected coordinates of the outliers on the \\( (D - d) \\)-dimensional orthogonal complement of the subspace are in general position, then TME recovers this subspace. Zhang [50] also showed that in this setting the above iterative formula converges (note that the condition of convergence in [22] does not apply in this case). The above lower bound of \\( Nd / D \\) on the number of inliers coincides with the general bound for the noiseless RSR problem, beyond which the problem becomes Small Set-Expansion (SSE) hard [16]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.348, + 0.471, + 0.44 + ], + "angle": 0, + "content": "Numerical experiments in [50] and [25] indicated state-of-the-art accuracy of TME compared to other RSR algorithms in various settings. The computational complexity of TME is of order \\( O(K(ND^2 + D^3)) \\), where \\( K \\) is the number of iterations. On the other hand, the cost of faster RSR algorithms is of order \\( O(KNDd) \\) [24, 25, 33]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.449, + 0.39, + 0.464 + ], + "angle": 0, + "content": "2.3. Motivation and Formulation of STE" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.473, + 0.469, + 0.578 + ], + "angle": 0, + "content": "We aim to use more cleverly the \\(d\\)-subspace information within the TME framework to form an RSR algorithm, instead of first estimating the full covariance. By using less degrees of freedom we can obtain a more accurate subspace estimator, especially when the fraction of outliers can be large. Furthermore, our idea allows us to improve the computational cost to become state-of-the-art for high-dimensional settings." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.58, + 0.47, + 0.744 + ], + "angle": 0, + "content": "Many RSR algorithms can be formulated as minimizing a best orthogonal projector onto a \\(d\\)-subspace [24, 25, 27, 33, 51]. We are going to do something similar, but unlike using an orthogonal projector, we will still use information from TME to get the shape of the data on the projected subspace. We will make the rest of the eigenvalues (i.e., bottom \\(D - d\\) ones) equal and shrink them by a parameter \\(0 < \\gamma < 1\\). We thus use a regularized version of a reduced-dimension covariance matrix. This parameter \\(\\gamma\\) plays a role in our theoretical estimates. Making \\(\\gamma\\) smaller helps with better subspace recovery, whereas making \\(\\gamma\\) bigger enhances the well-conditioning of the estimator." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.746, + 0.47, + 0.835 + ], + "angle": 0, + "content": "Following these basic ideas, we formulate our method, STE. For simplicity, we utilize covariance matrices and their inverses. Since these covariance matrices are essentially \\(d\\)-dimensional and include an additional simple regularizing component, our overall computations can be expressed in terms of the computation of the top \\(d\\) singular values of an \\(N \\times D\\) matrix (see §2.4)." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.836, + 0.438, + 0.851 + ], + "angle": 0, + "content": "At iteration \\(k\\) we follow a similar step to that of TME:" + }, + { + "type": "equation", + "bbox": [ + 0.148, + 0.863, + 0.397, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Z} ^ {(k)} := \\sum_ {i = 1} ^ {N} \\boldsymbol {x} _ {i} \\boldsymbol {x} _ {i} ^ {\\top} / (\\boldsymbol {x} _ {i} ^ {\\top} (\\boldsymbol {\\Sigma} ^ {(k - 1)}) ^ {- 1} \\boldsymbol {x} _ {i}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.091, + 0.892, + 0.123 + ], + "angle": 0, + "content": "We compute the eigenvalues \\(\\{\\sigma_i\\}_{i = 1}^D\\) of \\(\\mathbf{Z}^{(k)}\\) and replace each of the bottom \\((D - d)\\) of them with \\(\\gamma \\cdot \\sigma_{d + 1,D}\\), where" + }, + { + "type": "equation", + "bbox": [ + 0.613, + 0.133, + 0.892, + 0.174 + ], + "angle": 0, + "content": "\\[\n\\sigma_ {d + 1, D} := \\frac {1}{D - d} \\sum_ {i = d + 1} ^ {D} \\sigma_ {i}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.187, + 0.892, + 0.278 + ], + "angle": 0, + "content": "We also compute the eigenvectors of \\(\\mathbf{Z}^{(k)}\\) and form the matrix \\(\\boldsymbol{\\Sigma}^{(k)}\\) with the same eigenvectors as those of \\(\\mathbf{Z}^{(k)}\\) and the modified eigenvalues, scaled to have trace 1. We iteratively repeat this procedure until the two estimators are sufficiently close. Algorithm 1 summarizes this procedure. Note that it is invariant to scaling of the data, similarly to TME." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.292, + 0.873, + 0.307 + ], + "angle": 0, + "content": "Algorithm 1 STE: Subspace-Constrained Tyler's Estimator" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.312, + 0.894, + 0.358 + ], + "angle": 0, + "content": "1: Input: \\(\\mathbf{X} = [\\pmb{x}_1, \\dots, \\pmb{x}_N] \\in \\mathbb{R}^{D \\times N}\\): centered data matrix, \\(d\\): subspace dimension, \\(K\\): maximum number of iterations, \\(\\tau, \\gamma\\): parameters." + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.359, + 0.72, + 0.373 + ], + "angle": 0, + "content": "2: Output: \\(L\\): \\(d\\)-subspace in \\(\\mathbb{R}^D\\)" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.373, + 0.619, + 0.389 + ], + "angle": 0, + "content": "3: \\(\\pmb{\\Sigma}^{(0)} = \\mathbf{I}_D / D\\)" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.389, + 0.638, + 0.404 + ], + "angle": 0, + "content": "4: for \\( k = 1,2,\\ldots \\) do" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.404, + 0.801, + 0.42 + ], + "angle": 0, + "content": "5: \\(\\mathbf{Z}^{(k)}\\gets \\sum_{i = 1}^{N}\\pmb {x}_i\\pmb {x}_i^\\top /\\left(\\pmb {x}_i^\\top (\\pmb{\\Sigma}^{(k - 1)})^{-1}\\pmb {x}_i\\right)\\)" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.419, + 0.746, + 0.435 + ], + "angle": 0, + "content": "6: \\([\\mathbf{U}^{(k)},\\mathbf{S}^{(k)},\\bar{\\mathbf{U}}^{(k)}]\\gets \\mathrm{EVD}(\\mathbf{Z}^{(k)})\\)" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.434, + 0.845, + 0.452 + ], + "angle": 0, + "content": "7: \\(\\sigma_{i}\\gets [\\mathbf{S}^{(k)}]_{ii}\\) and \\(\\sigma_{d + 1,D}\\leftarrow \\sum_{i = d + 1}^{D}\\sigma_{i} / (D - d)\\)" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.452, + 0.839, + 0.47 + ], + "angle": 0, + "content": "8: \\(\\widetilde{\\mathbf{S}}^{(k)}\\gets \\mathrm{diag}(\\sigma_1,\\dots ,\\sigma_d,\\gamma \\cdot \\sigma_{d + 1,D},\\dots ,\\gamma \\cdot \\sigma_{d + 1,D}),\\)" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.47, + 0.855, + 0.488 + ], + "angle": 0, + "content": "9: \\(\\pmb{\\Sigma}^{(k)}\\gets \\mathbf{U}^{(k)}\\widetilde{\\mathbf{S}}^{(k)}(\\mathbf{U}^{(k)})^{\\top} / \\mathrm{tr}\\bigl (\\mathbf{U}^{(k)}\\widetilde{\\mathbf{S}}^{(k)}(\\mathbf{U}^{(k)})^{\\top}\\bigr)\\)" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.488, + 0.795, + 0.504 + ], + "angle": 0, + "content": "10: Stop if \\( k \\geq K \\) or \\( \\| \\pmb{\\Sigma}^{(k)} - \\pmb{\\Sigma}^{(k-1)} \\|_F < \\tau \\)." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.505, + 0.581, + 0.517 + ], + "angle": 0, + "content": "11: end for" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.518, + 0.779, + 0.534 + ], + "angle": 0, + "content": "12: \\(L = \\operatorname{Span}\\) of the first \\(d\\) columns of \\(\\mathbf{U}^{(k)}\\)" + }, + { + "type": "list", + "bbox": [ + 0.506, + 0.312, + 0.894, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.563, + 0.745, + 0.579 + ], + "angle": 0, + "content": "2.4. Computational Complexity" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.585, + 0.892, + 0.733 + ], + "angle": 0, + "content": "Setting \\(w_{i}^{(k)} = (\\pmb{x}_{i}^{\\top}(\\pmb{\\Sigma}^{(k - 1)})^{-1}\\pmb{x}_{i})^{-1}\\), we can express \\(\\mathbf{Z}^{(k)}\\) as \\(\\mathbf{Z}^{(k)} = \\widetilde{\\mathbf{X}}\\widetilde{\\mathbf{X}}^{\\top}\\), where \\(\\widetilde{\\mathbf{X}} = [(w_1^{(k)})^{1 / 2}\\pmb{x}_1,\\dots,(w_N^{(k)})^{1 / 2}\\pmb{x}_N]\\). With some abuse of notation we denote by \\(\\sigma_{1},\\ldots ,\\sigma_{D}\\) the eigenvalues of \\(\\pmb{\\Sigma}^{(k - 1)}\\) (and not \\(\\pmb{\\Sigma}^{(k)}\\)). Since they are scaled to have trace 1, \\(\\sigma_{d + 1,D} = (1 - \\sum_{j = 1}^{d}\\sigma_{j}) / (D - d)\\). We thus only need the top \\(d\\) eigenvectors and top \\(d\\) eigenvalues of \\(\\pmb{\\Sigma}^{(k - 1)}\\) to update \\(\\widetilde{w}_i^{(k)}\\). Therefore, the complexity of STE can be of order \\(O(KNDd)\\) if a special fast algorithm is utilized for computing only the top \\(d\\) eigenvectors." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.742, + 0.716, + 0.758 + ], + "angle": 0, + "content": "2.5. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.891, + 0.81 + ], + "angle": 0, + "content": "STE depends on the parameters \\(K\\), \\(\\tau\\) and \\(\\gamma\\) and the initialization of \\(\\pmb{\\Sigma}^{(0)}\\). The first two parameters are rather standard in iterative procedures and do not raise any concern." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Our theory sheds some light on possible choices of \\(\\gamma\\) and in particular it indicates that the algorithm can be more sensitive to choices of \\(\\gamma\\) when the quantity defined later in (3) is relatively small. In this case, it may be beneficial to try several values of \\(\\gamma\\). We propose here a constructive way of doing it. We first form a sequence of \\(0 < \\gamma \\leq 1\\), e.g., \\(\\gamma_{k} = 1 / k, k = 1,\\dots,m\\). In order to" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "14577" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.212 + ], + "angle": 0, + "content": "determine the best choice of \\(\\gamma\\), we compute the distance of each data point \\(\\mathbf{x}\\) to each subspace \\(L_{k}\\), corresponding to the choice of \\(\\gamma_{k}\\), where \\(\\mathrm{dist}(\\mathbf{x}, L_{k}) = \\| \\mathbf{x} - \\mathbf{P}_{L_{k}} \\mathbf{x} \\|\\). We set a threshold \\(\\zeta\\), obtained by the median among all points and all subspaces and for each subspace, \\(L_{k}\\), we count the number of the inliers with distance below this threshold. The best \\(\\gamma_{k}\\) is determined according to the subspace yielding the largest number of inliers. We describe this procedure in Algorithm 2." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.213, + 0.47, + 0.317 + ], + "angle": 0, + "content": "For simplicity, we initialize with \\(\\boldsymbol{\\Sigma}^{(0)} = \\mathbf{I}_D / D\\) and note that with this choice \\(\\boldsymbol{\\Sigma}^{(1)}\\) reflects the trimmed covariance matrix and thus reflects the PCA subspace. One can also initialize with TME or other subspaces (see §3 where the theory of STE is discussed). One can further try several initialization (with possible random components) and use a strategy similar to Algorithm 2 to choose the best one." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.318, + 0.47, + 0.365 + ], + "angle": 0, + "content": "At last, we remark that when computing \\(\\mathbf{Z}^{(k)}\\) we want to ensure that \\(\\pmb{x}_i^\\top (\\pmb{\\Sigma}^{(k-1)})^{-1}\\pmb{x}_i\\) cannot be zero and we thus add the arbitrarily small number \\(10^{-15}\\) to this value." + }, + { + "type": "code_caption", + "bbox": [ + 0.08, + 0.38, + 0.329, + 0.395 + ], + "angle": 0, + "content": "Algorithm 2 Estimating best \\(\\gamma\\) for STE" + }, + { + "type": "algorithm", + "bbox": [ + 0.088, + 0.401, + 0.472, + 0.555 + ], + "angle": 0, + "content": "1: Input: \\(\\mathbf{X} = [\\pmb{x}_1, \\dots, \\pmb{x}_N] \\in \\mathbb{R}^{D \\times N}\\): centered data matrix, \\(d\\): subspace dimension, \\(\\{\\gamma_1, \\dots, \\gamma_m\\}\\): a set of pre-selected \\(\\gamma\\)'s. \n2: Output: \\(\\gamma^*\\): optimal \\(\\gamma\\) among \\(\\{\\gamma_1, \\dots, \\gamma_m\\}\\) \n3: for \\(j = 1, 2, \\dots, m\\) do \n4: \\(L^{(j)} \\gets \\mathrm{STE}(\\mathbf{X}, d, \\gamma_j)\\) \n5: \\(\\mathcal{D}^{(j)} \\gets \\{\\mathrm{dist}(\\pmb{x}_i, L^{(j)}) | \\pmb{x}_i \\in \\mathcal{X}\\}\\). \n6: end for \n7: Set \\(\\zeta = \\mathrm{median}(\\{\\mathcal{D}^{(1)}, \\dots, \\mathcal{D}^{(m)}\\})\\) \n8: \\(j^* = \\operatorname{argmax}_{1 \\leq j \\leq m} |\\mathcal{D}^{(j)}| < \\zeta\\) \n9: \\(\\gamma^* = \\gamma_j^*\\)" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.575, + 0.407, + 0.59 + ], + "angle": 0, + "content": "2.6. STE fuses TME and a Variant of FMS" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.47, + 0.766 + ], + "angle": 0, + "content": "STE is formally similar to both TME and FMS. Indeed, at each iteration these algorithms essentially compute \\(\\boldsymbol{\\Sigma}^{(k + 1)} = \\sum_{i = 1}^{N}w_{i}\\pmb{x}_{i}\\pmb{x}_{i}^{\\top}\\), where \\(w_{i}\\equiv w_{i}\\bigl (\\boldsymbol{\\Sigma}^{(k)}\\bigr)\\). We summarize the formal weights for FMS (with any choice of \\(p\\) for minimizing an \\(\\ell_p\\) energy in [24]), TME and STE. We ignore an additional scaling constant for TME and STE, obtained by dividing \\(w_{i}\\pmb{x}_{i}\\pmb{x}_{i}^{\\top}\\) above by its trace, and a regularization parameter \\(\\delta\\) for FMS. We express these formulas using the eigenvalues \\(\\sigma_1,\\ldots ,\\sigma_D\\) and eigenvectors \\(\\pmb{u}_1,\\dots ,\\pmb{u}_D\\) of the weighted sample covariance, \\(\\sum_{i = 1}^{N}w_{i}\\pmb{x}_{i}\\pmb{x}_{i}^{\\top}\\) for each method and \\(\\beta \\coloneqq \\gamma \\cdot \\sigma_{d + 1,D}\\) (see (2)) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.102, + 0.777, + 0.446, + 0.899 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} w _ {i} ^ {\\mathrm {F M S}} = \\frac {1}{\\left(\\sum_ {j = d + 1} ^ {D} \\left(\\boldsymbol {x} _ {i} ^ {\\top} \\boldsymbol {u} _ {j}\\right) ^ {2}\\right) ^ {(2 - p) / 2}}, \\\\ w _ {i} ^ {\\mathrm {T M E}} = \\frac {1}{\\sum_ {j = 1} ^ {D} \\sigma_ {j} ^ {- 1} (\\pmb {x} _ {i} ^ {\\top} \\pmb {u} _ {j}) ^ {2}}, \\\\ w _ {i} ^ {\\mathrm {S T E}} = \\frac {1}{\\sum_ {j = 1} ^ {d} \\sigma_ {j} ^ {- 1} (\\boldsymbol {x} _ {i} ^ {\\top} \\boldsymbol {u} _ {j}) ^ {2} + \\beta^ {- 1} \\sum_ {j = d + 1} ^ {D} (\\boldsymbol {x} _ {i} ^ {\\top} \\boldsymbol {u} _ {j}) ^ {2}}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.894, + 0.167 + ], + "angle": 0, + "content": "These weights aim to mitigate the impact of outliers in different ways. For FMS, \\(\\sum_{j=d+1}^{D}(\\boldsymbol{x}_i^\\top\\boldsymbol{u}_j)^2\\) is the squared distance of a data point \\(\\boldsymbol{x}_i\\) to the subspace \\(L\\). Thus for \\(p < 2\\), \\(w_i^{\\mathrm{FMS}}\\) is smaller for \"subspace-outliers\", where the robustness to such outliers increases when \\(p \\geq 0\\) decreases." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.168, + 0.894, + 0.272 + ], + "angle": 0, + "content": "The weights of TME are inversely proportional to the squared Mahalanobis distance of \\( \\pmb{x}_i \\) to the empirical distribution. They mitigate the effect of \"covariance-outliers\". If the dataset is concentrated on a \\( k \\)-subspace where \\( k < d \\), then TME can provide smaller weights to points lying away from this subspace, unlike FMS that does not distinguish between points within the larger \\( d \\)-subspace." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.273, + 0.894, + 0.395 + ], + "angle": 0, + "content": "We note that the weights of STE fuse the above two weights. Within a \\(d\\)-subspace, they use the shape of the data. They can thus avoid outliers within this \\(d\\)-subspace. Within the orthogonal component of this subspace, they use a term proportional to that of FMS with \\(p = 0\\). We remark that such \\(\\ell_0\\) minimization has a clear interpretation for RSR, though it is generally hard to guarantee. Indeed, [24] has no guarantees for FMS with \\(p = 0\\). It can also yield unwanted spurious stationary points [26]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.406, + 0.582, + 0.423 + ], + "angle": 0, + "content": "3. Theory" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.431, + 0.892, + 0.567 + ], + "angle": 0, + "content": "We review a theoretical guarantee for STE, whose proof is given in [28]. It requires some conditions and we verify they hold with high probability under the asymptotic generalized haystack model. We assume a noiseless inliers-outliers RSR model. Let \\( L_{*} \\) denote the underlying \\( d \\)-subspace in \\( \\mathbb{R}^{D} \\), \\( \\mathcal{X}_{in} = \\mathcal{X} \\cap L_{*} \\) and \\( \\mathcal{X}_{out} = \\mathcal{X} \\setminus \\mathcal{X}_{in} \\) be the set of inliers and outliers, respectively, and \\( n_1 = |\\mathcal{X}_{in}| \\) and \\( n_0 = |\\mathcal{X}_{out}| \\) be the number of inliers and outliers. Our first assumption is a mild one on how well-conditioned the inliers are in \\( L_{*} \\) (compare e.g., other assumptions in [25, 32])." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.568, + 0.892, + 0.597 + ], + "angle": 0, + "content": "Assumption 1: Any \\(k\\)-subspace of \\(L_{*}\\), \\(1 \\leq k \\leq d\\), contains at most \\(n_1 k / d\\) points." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.892, + 0.688 + ], + "angle": 0, + "content": "Motivation for Assumption 2: The ratio of inliers per outliers, \\( n_1 / n_0 \\), in RSR is often referred to as the SNR (signal-to-noise ratio) [25, 32, 33]. The smaller it is, the best the subspace recovery is. We define the dimension-scaled SNR (DS-SNR) as the SNR obtained when scaling \\( n_1 \\) and \\( n_0 \\) by their respective dimensions (of \\( L_* \\) and \\( L_*^\\perp \\)):" + }, + { + "type": "equation", + "bbox": [ + 0.615, + 0.696, + 0.892, + 0.728 + ], + "angle": 0, + "content": "\\[\n\\mathrm {D S} - \\mathrm {S N R} := \\frac {n _ {1} / d}{n _ {0} / (D - d)}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.855 + ], + "angle": 0, + "content": "Zhang [50] showed that exact recovery by TME is guaranteed whenever DS-SNR \\(>1\\) (assuming general position assumptions on the inliers and outliers) and Hardt and Moitra [16] showed that when considering general datasets with general position assumptions on the inliers and outliers, the RSR problem is SSE hard if the DS-SNR is lower than 1. We aim to show that under the following weaker generic condition, STE can obtain exact recovery with DS-SNR, strictly lower than 1." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.856, + 0.894, + 0.871 + ], + "angle": 0, + "content": "Assumption 2: DS-SNR \\( > \\gamma \\), where \\( \\gamma < 1 \\) is the STE parameter." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Our last assumption requires a sufficiently good initialization for STE, but also implicitly involves additional hidden" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "14578" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.211 + ], + "angle": 0, + "content": "assumptions on the inliers and outliers. This is expected, since Assumption 1 does not require anything from the outliers and also has a very weak requirement from the inliers. To formulate the new assumption we define below some some basic condition numbers for good initialization (which are more complicated than the one for initialization by PCA suggested by [33] and [32]) and also quantities similar to the ones used to guarantee landscape stability in the theory of RSR [25, 27, 33, 51]." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.212, + 0.469, + 0.243 + ], + "angle": 0, + "content": "Definitions required for Assumption 3: Recall that \\(\\pmb{\\Sigma}^{(0)}\\) denotes the initial value in Algorithm 1, and denote" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.25, + 0.353, + 0.271 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\Sigma} _ {L _ {1}, L _ {2}} ^ {(0)} = \\mathbf {U} _ {L _ {1}} ^ {\\top} \\boldsymbol {\\Sigma} ^ {(0)} \\mathbf {U} _ {L _ {2}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.277, + 0.345, + 0.292 + ], + "angle": 0, + "content": "We define the following condition number" + }, + { + "type": "equation", + "bbox": [ + 0.125, + 0.298, + 0.421, + 0.351 + ], + "angle": 0, + "content": "\\[\n\\kappa_ {1} = \\frac {\\sigma_ {d} \\left(\\boldsymbol {\\Sigma} _ {L _ {*} , L _ {*}} ^ {(0)} - \\boldsymbol {\\Sigma} _ {L _ {*} , L _ {*} ^ {\\perp}} ^ {(0)} \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp} , L _ {*}} ^ {(0) - 1} \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp} , L _ {*}} ^ {(0)}\\right)}{\\sigma_ {1} \\left(\\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp} , L _ {*}} ^ {(0)}\\right)}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.357, + 0.469, + 0.403 + ], + "angle": 0, + "content": "To get a better intuition to this primary quantity of Assumption 3, we first express the initial estimator \\(\\Sigma^{(0)}\\), using basis vectors for \\(L_{*}\\) and \\(L_{*}^{\\perp}\\), as a \\(2\\times 2\\) block matrix" + }, + { + "type": "equation", + "bbox": [ + 0.198, + 0.409, + 0.35, + 0.453 + ], + "angle": 0, + "content": "\\[\n\\left( \\begin{array}{c c} \\boldsymbol {\\Sigma} _ {L _ {*}, L _ {*}} ^ {(0)} & \\boldsymbol {\\Sigma} _ {L _ {*}, L _ {*} ^ {\\perp}} ^ {(0)} \\\\ \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp}, L _ {*}} ^ {(0)} & \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp}, L _ {*} ^ {\\perp}} ^ {(0)} \\end{array} \\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.46, + 0.469, + 0.494 + ], + "angle": 0, + "content": "Defining \\(\\Sigma' = \\Sigma_{L_*, L_*}^{(0)} \\Sigma_{L_*^\\perp, L_*^\\perp}^{(0) - 1} \\Sigma_{L_*^\\perp, L_*}^{(0)}\\), we decompose this block matrix as" + }, + { + "type": "equation", + "bbox": [ + 0.125, + 0.499, + 0.424, + 0.543 + ], + "angle": 0, + "content": "\\[\n\\left( \\begin{array}{c c} \\boldsymbol {\\Sigma} ^ {\\prime} & \\boldsymbol {\\Sigma} _ {L _ {*}, L _ {*}} ^ {(0)} \\\\ \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp}, L _ {*}} ^ {(0)} & \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp}, L _ {*}} ^ {(0) ^ {\\perp}}, \\end{array} \\right) + \\left( \\begin{array}{c c} \\boldsymbol {\\Sigma} _ {L _ {*}, L _ {*}} ^ {(0)} - \\boldsymbol {\\Sigma} ^ {\\prime} & 0 \\\\ 0 & 0 \\end{array} \\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.549, + 0.469, + 0.699 + ], + "angle": 0, + "content": "We note that the numerator of \\(\\kappa_{1}\\) is the \\(d\\)-th eigenvalue of the second matrix in the above sum. We show in [28] that this eigenvalue is positive if \\(\\pmb{\\Sigma}^{(0)}\\) is positive definite, which can be easily enforced. The condition number is thus the ratio between the smallest positive eigenvalue of the second matrix of the sum and the largest eigenvalue of the component of the first matrix associated with \\(L_{*}^{\\perp}\\). Therefore, \\(\\kappa_{1}\\) expresses a ratio between a quantifier of a \\(d\\)-dimensional component of \\(\\pmb{\\Sigma}^{(0)}\\), associated with \\(L_{*}\\), and a quantifier of the projection onto \\(L_{*}^{\\perp}\\) of a full rank component of \\(\\pmb{\\Sigma}^{(0)}\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.7, + 0.469, + 0.744 + ], + "angle": 0, + "content": "We also define \\(\\Sigma_{in,*}\\) as the TME solution to the set of the projected inliers \\(\\{\\mathbf{U}_{L^*}\\pmb {x}|\\pmb {x}\\in \\mathcal{X}_{in}\\} \\subset \\mathbb{R}^d\\) and the following two condition numbers" + }, + { + "type": "equation", + "bbox": [ + 0.134, + 0.75, + 0.412, + 0.793 + ], + "angle": 0, + "content": "\\[\n\\kappa_ {2} = \\frac {\\sigma_ {1} \\left(\\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp} , L _ {*} ^ {\\perp}} ^ {(0)}\\right)}{\\sigma_ {D} \\left(\\boldsymbol {\\Sigma} ^ {(0)}\\right)} \\text {a n d} \\kappa_ {i n} = \\frac {\\sigma_ {1} \\left(\\boldsymbol {\\Sigma} _ {i n , *}\\right)}{\\sigma_ {d} \\left(\\boldsymbol {\\Sigma} _ {i n , *}\\right)}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.798, + 0.469, + 0.859 + ], + "angle": 0, + "content": "We note that \\(\\kappa_{in}\\) is analogous to the condition number in (25) of [32], where we replace the sample covariance by the TME estimator. An analog to the alignment of outliers statistic [27, 33] for STE is" + }, + { + "type": "equation", + "bbox": [ + 0.19, + 0.864, + 0.357, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\mathcal {A} = \\left\\| \\sum_ {\\mathbf {x} \\in \\mathcal {X} _ {o u t}} \\frac {\\mathbf {x x} ^ {\\top}}{\\| \\mathbf {U} _ {L _ {*} ^ {\\perp}} \\mathbf {x} \\| ^ {2}} \\right\\|.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.831, + 0.107 + ], + "angle": 0, + "content": "An analog to the stability statistic [27, 33] for STE is" + }, + { + "type": "equation", + "bbox": [ + 0.614, + 0.117, + 0.778, + 0.156 + ], + "angle": 0, + "content": "\\[\n\\mathcal{S} = \\sigma_{d + 1,D}\\Bigl(\\sum_{\\mathbf{x}\\in \\mathcal{X}}\\frac{\\mathbf{x}\\mathbf{x}^{\\top}}{\\| \\mathbf{x}\\|^{2}}\\Bigr),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.168, + 0.738, + 0.182 + ], + "angle": 0, + "content": "where \\(\\sigma_{d + 1,D}(\\mathbf{X})\\) was defined in (2)." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.183, + 0.887, + 0.198 + ], + "angle": 0, + "content": "Assumption 3: There exists \\( C = C(\\gamma, \\mathrm{DS-SNR}) > 0 \\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.517, + 0.21, + 0.892, + 0.25 + ], + "angle": 0, + "content": "\\[\n\\kappa_ {1} \\geq C \\frac {d \\kappa_ {i n} \\mathcal {A}}{n _ {1}} \\left(\\kappa_ {i n} + \\frac {\\mathcal {A}}{\\frac {n _ {1}}{d} - \\gamma \\frac {n _ {0}}{D - d}} + \\frac {\\kappa_ {2} \\mathcal {A}}{\\gamma \\mathcal {S}} (1 + \\kappa_ {i n})\\right). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.26, + 0.892, + 0.411 + ], + "angle": 0, + "content": "The exact technical requirement on \\( C \\) is specified in [28]. In general, the larger the RHS of (4), the more restricted the choice of \\( \\Sigma^{(0)} \\) is. In particular, when \\( \\kappa_1 = \\infty \\), the definition of \\( \\kappa_1 \\) implies that \\( \\mathrm{Im}(\\Sigma^{(0)}) = L_* \\), so the subspace is already recovered by the initial estimate. Therefore, reducing the lower bound of \\( \\kappa_1 \\) may allow some flexibility, so a marginally suboptimal initialization could still work out. In [28], we show that under the asymptotic generalized haystack model, Assumption 3 can be interpreted as an upper bound on the largest principal angle between the initial and ground truth subspaces." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.411, + 0.892, + 0.502 + ], + "angle": 0, + "content": "Generic Theory: The next theorem suggests that under assumptions 1-3, STE nicely converges to an estimator that recovers \\( L_{*} \\). The main significance of this theory is that its assumptions can allow DS-SNR lower than 1 for special instances of datasets (for which the assumptions hold), unlike the general recovery theories of [16] and [50]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.511, + 0.894, + 0.604 + ], + "angle": 0, + "content": "Theorem 1. Under assumptions 1-3, the sequence \\(\\pmb{\\Sigma}^{(k)}\\) generated by STE converges to \\(\\mathbf{U}_{L_{*}}\\pmb{\\Sigma}_{in,*}\\mathbf{U}_{L_{*}}^{\\top}\\), the TME solution for the set of inliers \\(\\mathcal{X}_{in}\\). In addition, let \\(L^{(k)}\\) be the subspace spanned by the top \\(d\\) eigenvectors of \\(\\pmb{\\Sigma}^{(k)}\\), then the angle between \\(L^{(k)}\\) and \\(L_{*}\\), \\(\\angle (L^{(k)},L_{*}) = \\cos^{-1}(\\| \\mathbf{U}_{L^{(k)}}^{\\top}\\mathbf{U}_{L_{*}}\\|)\\), converges \\(r\\)-linearly to zero." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.614, + 0.892, + 0.659 + ], + "angle": 0, + "content": "We discuss insights of this theory on choices of the algorithms and further verify the above stated advantage of STE over TME assuming a common probabilistic model." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.892, + 0.81 + ], + "angle": 0, + "content": "Choice of \\(\\gamma\\) for subspace recovery: In order to avoid too large lower bound for \\(\\kappa_{1}\\) in (4), which we motivated above, it is good to find \\(\\epsilon_{1}\\) and \\(\\epsilon_{2} > 0\\), such that \\(\\gamma\\) lies in \\((\\epsilon_{1},\\mathrm{DS - SNR} - \\epsilon_{2})\\) (to notice this, observe the terms involving \\(\\gamma\\) in the denominators of the last two additive terms in (4)). We thus note that if the DS-SNR is expected to be sufficiently larger than 1, we can use, e.g., \\(\\gamma = 0.5\\), but when the DS-SNR can be close to 1 or lower (e.g., in fundamental matrix estimation), it is advisable to choose small values of \\(\\gamma\\) according to Algorithm 2 and their sizes may depend on the expected value of the DS-SNR." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Possible ways of Initialization: If one expects an initial estimated subspace \\(\\hat{L}\\) to have a sufficiently small angle \\(\\theta\\) with \\(L_{*}\\), where \\(\\theta = \\angle (\\hat{L}, L_{*})\\), then for \\(\\boldsymbol{\\Sigma}^{(0)} \\coloneqq \\Pi_{\\hat{L}} + \\epsilon \\mathbf{I}\\) it can be shown that \\(\\kappa_{1} > O(1 / (\\epsilon + \\theta))\\) and \\(\\kappa_{2} < O(1 + \\frac{\\theta}{\\epsilon})\\). Thus one may use a trusted RSR method, e.g., FMS. As discussed in §2.5, the choice \\(\\boldsymbol{\\Sigma}^{(0)} = \\mathbf{I}\\) (or a scaled version of it) corresponds to \\(\\hat{L}\\) being the" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "14579" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.121 + ], + "angle": 0, + "content": "PCA subspace (obtained at iteration 1). Also, using the TME solution for \\(\\pmb{\\Sigma}^{(0)}\\) corresponds to using the TME subspace as \\(\\hat{L}\\)" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.122, + 0.471, + 0.334 + ], + "angle": 0, + "content": "Theory under a probabilistic model: We show that under a common probabilistic model, the assumptions of Theorem 1, where \\(\\pmb{\\Sigma}^{(0)}\\) is obtained by TME, hold. Moreover, we show that STE (initialized by TME) can recover the correct subspace in situations with DS-SNR \\(< 1\\), whereas TME cannot recover the underlying subspace in such cases. We follow [33] and study the Generalized Haystack Model, though for simplicity, we assume Gaussian instead of sub-Gaussian distributions and an asymptotic setting. We assume \\(n_1\\) inliers i.i.d. sampled from a Gaussian distribution \\(N(0,\\pmb{\\Sigma}^{(in)} / d)\\), where \\(\\pmb{\\Sigma}^{(in)} \\in S_{+}(D)\\) and \\(L_{*} = \\mathrm{Im}(\\pmb{\\Sigma}^{(in)})\\) (so \\(\\pmb{\\Sigma}^{(in)}\\) has \\(d\\) nonzero eigenvalues), and \\(n_0\\) outliers are i.i.d. sampled from a Gaussian distribution \\(N(0,\\pmb{\\Sigma}^{(out)} / D)\\), where \\(\\pmb{\\Sigma}^{(out)} / D \\in S_{++}(D)\\). We define the following condition numbers of inliers (in \\(L_{*}\\)) and outliers:" + }, + { + "type": "equation", + "bbox": [ + 0.133, + 0.345, + 0.414, + 0.381 + ], + "angle": 0, + "content": "\\[\n\\kappa_ {i n} = \\frac {\\sigma_ {1} (\\pmb {\\Sigma} ^ {(i n)})}{\\sigma_ {d} (\\pmb {\\Sigma} ^ {(i n)})} \\mathrm {a n d} \\kappa_ {o u t} = \\frac {\\sigma_ {1} (\\pmb {\\Sigma} ^ {(o u t)})}{\\sigma_ {D} (\\pmb {\\Sigma} ^ {(o u t)})}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.391, + 0.47, + 0.543 + ], + "angle": 0, + "content": "Clearly, Assumption 1 holds under this model, and Assumption 2 constrains some of its parameters. Our next theorem shows that Assumption 3 holds under this model when the initial estimate \\(\\Sigma^{(out)}\\) for STE is obtained by TME. It also shows that in this case STE can solve the RSR problem even when DS-SNR \\(< 1\\), unlike TME. For simplicity, we formulate the theory for the asymptotic case, where \\(N \\to \\infty\\) and the theorem holds almost surely. It is possible to formulate it for a very large \\(N\\) with high probability, but it requires stating complicated constants depending on various parameters." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.553, + 0.473, + 0.724 + ], + "angle": 0, + "content": "Theorem 2. Assume data generated from the above generalized haystack model. Assume further that for \\(0 < \\mu < 1\\), which can be arbitrarily small, \\(d < (1 - \\mu)D - 2\\). Then, for any chosen \\(0 < c_{0} < 1\\), which is a lower bound for \\(\\gamma\\), there exists \\(\\eta \\coloneqq \\eta (\\kappa_{in},\\kappa_{out},c_0,\\mu) < 1\\) such that if DS-SNR \\(\\geq \\eta\\) and \\(\\pmb{\\Sigma}^{(0)}\\) is obtained by TME, then Assumption 3 for \\(\\pmb{\\Sigma}^{(0)}\\) is satisfied with \\(c_{0} < \\gamma < \\eta - c_{0}\\) almost surely as \\(N \\to \\infty\\). Consequently, the output of the STE algorithm, initialized by TME and with the choice of \\(c_{0} < \\gamma < \\eta - c_{0}\\), recovers \\(L_{*}\\). On the other hand, if \\(\\pmb{\\Sigma}_{L_{*},L_{*}^{\\perp}}^{(out)} \\neq 0\\) and DS-SNR \\(< 1\\), then the top \\(d\\) eigenvectors of TME do not recover \\(L_{*}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.47, + 0.84 + ], + "angle": 0, + "content": "There are three different regimes that the theorem covers. When DS-SNR \\(\\geq 1\\), both TME+STE (i.e., STE initialized by TME) and TME solve the RSR problem. When \\(\\eta \\leq\\) DSSNR \\(< 1\\), TME+STE solves the RSR problem and TME generally fails. When \\(\\gamma \\leq\\) DS-SNR \\(< \\eta\\), TME+STE might also fail, but STE with extremely good initialization (that satisfies Assumption 3) can still solve the problem." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.902 + ], + "angle": 0, + "content": "To get a basic idea of the dependence of \\(\\eta\\) on its parameters, we remark that \\(\\eta \\to 1\\) if either \\(c_0 \\to 0\\), \\(\\kappa_{in} \\to \\infty\\), \\(\\kappa_{out} \\to \\infty\\) or \\(\\mu \\to 0\\), where the parameter \\(\\mu\\) is somewhat artificial and might be removed with a tighter proof. Therefore, successful" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.894, + 0.153 + ], + "angle": 0, + "content": "performance of TME+STE requires a DS-SNR that is close to 1 when \\(\\gamma\\) is close to either 0 or \\(\\eta\\) (so that \\(c_{0}\\) is very small) or when either the inlier or outlier distribution is highly non-symmetric, that is, either \\(\\kappa_{in}\\) or \\(\\kappa_{out}\\) is large." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.166, + 0.831, + 0.184 + ], + "angle": 0, + "content": "4. Applications to Structure from Motion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.192, + 0.894, + 0.237 + ], + "angle": 0, + "content": "We apply STE to problems relevant to SfM: robust estimation of fundamental matrices (see §4.1), and initial screening of undesirable cameras (see §4.2)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.247, + 0.845, + 0.263 + ], + "angle": 0, + "content": "4.1. Robust Fundamental Matrix Estimation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.27, + 0.893, + 0.316 + ], + "angle": 0, + "content": "Fundamental matrix estimation from noisy and inexact keypoint matches is a core computer vision problem. It provides a challenging setting for applying RSR methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.316, + 0.894, + 0.422 + ], + "angle": 0, + "content": "We review this setting as follows. Let \\((\\pmb{x},\\pmb{x}^{\\prime})\\in \\mathbb{R}^{3}\\times \\mathbb{R}^{3}\\) be a correspondence pair of two points in different images that are projections of the same 3D point in the scene, where \\(\\pmb{x}\\) and \\(\\pmb{x}^{\\prime}\\) are expressed by homogeneous coordinates of planar points. The fundamental matrix \\(\\mathbf{F}\\in \\mathbb{R}^{3\\times 3}\\) relates these corresponding points and the epipolar lines they lie on as follows: \\(\\pmb{x}^{\\prime \\top}\\mathbf{F}\\pmb{x} = 0\\) [17], or equivalently," + }, + { + "type": "equation", + "bbox": [ + 0.619, + 0.433, + 0.893, + 0.451 + ], + "angle": 0, + "content": "\\[\n\\operatorname {v e c} (\\mathbf {F}) \\cdot \\operatorname {v e c} \\left(\\boldsymbol {x x} ^ {\\prime \\top}\\right) = 0. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.463, + 0.895, + 0.644 + ], + "angle": 0, + "content": "where \\(\\mathrm{vec}(\\cdot)\\) denotes the vectorized form of a matrix. Therefore, ideally, the set of all vectors in \\(\\mathbb{R}^9\\) of the form \\(\\mathrm{vec}(\\pmb{x}\\pmb{x}'^\\top)\\), where \\((\\pmb{x},\\pmb{x}') \\in \\mathbb{R}^3 \\times \\mathbb{R}^3\\) is a correspondence pair, lies on an 8-subspace in \\(\\mathbb{R}^9\\) and its orthogonal complement yields the fundamental matrix. In practice, the measurements of correspondence pairs can be highly corrupted due to poor matching. Moreover, some choices of correspondence pairs and the corruption mechanism may lead to concentration on low-dimensional subspaces within the desired 8-subspace. Furthermore, the corruption mechanism can lead to nontrivial settings of outliers. Lastly, since \\(d = 8\\) and \\(D = 9\\), the theoretical threshold of [16] translates to having the fraction of inliers among all data points be at least \\(8/9 \\approx 88.9\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.894, + 0.735 + ], + "angle": 0, + "content": "Therefore, this application is often a very challenging setting for direct RSR methods. The best performing RSR methods to date for fundamental matrix estimation are variants of RANSAC [10]. RANSAC avoids any subspace-modeling assumptions, but estimates the subspace based on testing myriads of samples, each having 7 or 8 point correspondences [17]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.895, + 0.902 + ], + "angle": 0, + "content": "We test the performance of STE in estimating the fundamental matrix on the Photo Tourism database [41], where the image correspondences are obtained by SIFT feature similarities [30]. We compare STE with the following 3 top RSR performers according to [25]: FMS [24], spherical FMS (SFMS) [24] and TME [47, 50]. We also compared with vanilla RANSAC [10] and two of its specialized extensions, which are state-of-the-art performers for estimating fundamental matrices: locally optimized RANSAC (LO-RANSAC) [6] and degeneracy-check enabled RANSAC (DEGENSAC) [7]. For the RSR methods we used codes from the supplementary material of [25]" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "14580" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.1, + 0.852, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.179, + 0.29, + 0.79, + 0.304 + ], + "angle": 0, + "content": "Figure 1. Median (relative) rotation errors obtained by seven algorithms for the 14 datasets of Photo Tourism." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.331, + 0.468, + 0.392 + ], + "angle": 0, + "content": "with their default options. We further used the Python package pydegensac for implementing LO-RANSAC and DEGENSAC with the inlier threshold \\(\\eta = 0.75\\). For STE, we used Algorithm 2 to estimate the best \\(\\gamma\\) from \\(\\{(2i)^{-1}\\}_{i=1}^{5}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.393, + 0.471, + 0.59 + ], + "angle": 0, + "content": "We measure the accuracy of the results according to the median and mean errors of relative rotation and direction vectors directly obtained by the fundamental matrices for each method. For computing these errors, we compared with ground-truth values provided by [41, 48]. Figure 1 describes the result of the mean errors for relative rotation per dataset of Photo Tourism, where the other three errors and \\(\\mathrm{mAA}(10^{\\circ})\\) are in the supplemental material. STE is significantly better than top RSR performers (TME, FMS and SFMS). Overall, it appears that STE performs better than vanilla RANSAC, except for the Ellis Island and Vienna Cathedral datasets, where RANSAC outperforms STE. STE is still competitive when compared with LO-RANSAC and DEGENSAC, except for Notre Dame and the latter two datasets." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.603, + 0.363, + 0.618 + ], + "angle": 0, + "content": "4.2. Initial Camera Removal for SfM" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.628, + 0.47, + 0.764 + ], + "angle": 0, + "content": "We propose a novel application of RSR for SfM and test STE for this application. Even though our framework is not sufficiently practical at this point, it allows testing STE in a different setting where \\( N = D \\) is very large and \\( d = 6 \\). Our idea is to use RSR within the SfM pipeline right after estimating the fundamental matrices, in order to remove some cameras that result in inaccurate estimated fundamental matrices. The hope is that eventually such methods may reduce corruption and speed up the costly later computationally intensive stages of the global SfM pipeline." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.766, + 0.471, + 0.902 + ], + "angle": 0, + "content": "There are two main reasons to question such a process. One may first question the gain in improving accuracy. Indeed, since the rest of the pipeline already identifies corrupted pairwise measurements, this process may not improve accuracy and may even harm it as it removes whole cameras and not pairs of cameras. That is, it is possible that a camera, which results in bad pairwise measurement, also contributes to some other accurate pairwise estimates that can improve the overall accuracy. The second concern is in terms of speed. In general," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.331, + 0.895, + 0.603 + ], + "angle": 0, + "content": "the removal of cameras may result in higher or comparable speed. Indeed, the LUD global pipeline [36], which we follow, examines the parallel rigidity of the viewing graph and extracts the maximal parallel rigid subgraph. Thus earlier removal of cameras may worsen the parallel rigidity of the graph and increase the computation due to the need of finding a maximal parallel rigid subgraph. For example, [40] removes cameras in an earlier stage of the LUD pipeline, but results in higher computational cost than the LUD pipeline. Therefore, improvement of speed for the LUD pipeline by removing cameras is generally non-trivial. Moreover, currently we use scale factors obtained by first running LUD, so we do not get a real speed improvement. Nevertheless, the proposed method is insightful whenever it may indicate clear improvement in accuracy for a dataset, since one can then infer that the current pipeline is not effective enough in handling corrupted measurements, which can be easily recognized by a simple method. Furthermore, improvement in \"speed\" can be indicative of maintaining parallel rigidity." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.608, + 0.895, + 0.791 + ], + "angle": 0, + "content": "Our RSR formulation is based on a fundamental observation by Sengupta et al. [39] on the low-rank of the \\( n \\)-view essential (or fundamental) matrix. The \\( n \\)-view essential matrix \\( \\mathbf{E} \\) of size \\( 3n \\times 3n \\) is formed by stacking all \\( \\binom{n}{2} \\) essential matrices, while being appropriately scaled. That is, the \\( ij \\)-th block of \\( \\mathbf{E} \\) is the essential matrix for the \\( i \\)-th and \\( j \\)-th cameras, where each \\( \\mathbf{E}_{ij} \\) is scaled by a factor \\( \\lambda_{ij} \\) in accordance with the global coordinate system (see [20, 21, 39]). It was noticed in [39] that \\( \\mathbf{E} \\) has rank 6. Moreover, [39] characterized the set of \\( n \\)-view essential matrices whose camera centers are not all collinear by the satisfaction of a few algebraic conditions, where the major one is \\( \\mathrm{rank}(\\mathbf{E}) = 6 \\). Further explanation appears in [20]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.895, + 0.903 + ], + "angle": 0, + "content": "We propose a straightforward application of RSR, utilizing these ideas to initially eliminate cameras that introduce significant corruption to the essential matrices. For this purpose, we compute the essential matrices (by computing first the fundamental matrices and then using the known camera calibration) and scale each matrix according to the factor obtained by the LUD pipeline [36] (note that this is the initial" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.956 + ], + "angle": 0, + "content": "14581" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.098, + 0.481, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.099, + 0.895, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.269, + 0.893, + 0.299 + ], + "angle": 0, + "content": "Figure 2. Mean (absolute) rotation errors (in degrees, left) and mean translation errors (in degrees, right) of LUD and four RSR methods used to initially screen bad cameras within LUD applied to the 14 datasets of Photo Tourism." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.324, + 0.473, + 0.536 + ], + "angle": 0, + "content": "scaling applied in [20, 21, 39] before applying a non-convex and nontrivial optimization procedure that refines such scales). Using these appropriately scaled essential matrices, we form the \\(n\\)-view essential matrix \\(\\mathbf{E}\\) of size \\(3n\\times 3n\\). We denote the \\(3n\\times 3\\) column blocks of \\(\\mathbf{E}\\) by \\(\\mathbf{E}_{:,1},\\dots,\\mathbf{E}_{:,n}\\) (since \\(\\mathbf{E}\\) is symmetric they are the same as the row blocks transposed). We treat \\(\\mathbf{E}\\) as a data matrix with \\(D = N = 3n\\), where the columns of \\(\\mathbf{E}\\) are the data points. We apply RSR with \\(d = 6\\), recover a \\(d\\)-dimensional robust subspace and identify the outlying columns whose distance is largest from this subspace. To avoid heuristic methods for the cutoff of outliers we assume a fixed percentage of \\(20\\%\\) outlying columns. If a column block, \\(\\mathbf{E}_{:,i}\\) contains an outlying column, we remove its corresponding camera \\(i\\). Consequently, a smaller percentage of cameras (about \\(10 - 15\\%\\)) will be eliminated." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.537, + 0.47, + 0.689 + ], + "angle": 0, + "content": "We use the Photo Tourism database [41] with precomputed pairwise image correspondences provided by [39] (they were obtained by thresholding SIFT feature similarities). To compute scale factors for the essential matrices we use the output of the LUD pipeline [36] as follows (following an idea proposed in [39] for initializing these values): Given the essential matrix for cameras \\(i\\) and \\(j\\) computed at an early stage of our pipeline, \\(\\mathbf{E}_{ij}\\), and the one obtained by the full LUD pipeline, \\(\\mathbf{E}_{ij}^{\\mathrm{LUD}}\\), the scaling factor is \\(\\lambda_{ij} = \\langle \\mathbf{E}_{ij}, [\\mathbf{E}_{ij}^{\\mathrm{LUD}}] \\rangle / \\| [\\mathbf{E}_{ij}^{\\mathrm{LUD}}] \\|_F^2\\). Since many values of \\(\\mathbf{E}_{ij}\\) are missing, we also apply matrix completion." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.471, + 0.901 + ], + "angle": 0, + "content": "We compare the LUD pipeline with the LUD pipeline combined with the filtering processes achieved by STE, FMS, SFMS, and TME. For STE we fix \\(\\gamma = 1/3\\), though any other value we tried yielded the same result. We report both mean and median errors of rotations and translations and runtime of the standard LUD and the RSR+LUD methods with initial screening of cameras. Figure 2 shows the mean rotation and translation errors, where the rest of the figures and a summarizing table are in the supplementary material. In general, STE demonstrates slightly higher accuracy compared to other RSR methods. Improved accuracy is particularly notable when matrix completion is not utilized, as demonstrated in the supplementary material. We observe that LUD+STE generally improves the estimation of camera parameters (both rotations and translations) over LUD." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.324, + 0.895, + 0.522 + ], + "angle": 0, + "content": "The improvement of LUD+STE is noticeable in Roman Forum and Gendarmenmarkt. In the supplementary material we show further improvement for Gendarmenmarkt with the removal of \\(45\\%\\) outlying columns. While the resulting errors are still large, their improvement shows some potential in dealing with difficult SfM structure by initially removing cameras in a way that may help eliminate some scene ambiguities, which are prevalent in Gendarmenmarkt. In terms of runtime, both LUD+STE and LUD+SFMS demonstrate significant improvements, where LUD+SFMS is even faster than LUD+STE. While this does not yet imply faster handling of the datasets (as we use initial scaling factors obtained by LUD), it indicates the efficiency of the removal of outliers in maintaining parallel rigidity." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.54, + 0.62, + 0.556 + ], + "angle": 0, + "content": "5. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.567, + 0.894, + 0.734 + ], + "angle": 0, + "content": "We introduce STE, a meticulously crafted adaptation of TME designed to address challenges within RSR. Theoretical guarantees demonstrate its ability to recover the true underlying subspace reliably, even with a smaller fraction of inliers compared to the well-known theoretical threshold. Under the generalized haystack model, we show that this initialization can be chosen as TME itself, leading to improved handling of a smaller fraction of inliers compared to TME. Our exploration extends to practical applications, where STE proves effective in two 3D vision tasks: robust fundamental matrix estimation and screening of bad cameras for improved SfM." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Several avenues for future research include: \\(\\bullet\\) Exploring adaptations of other robust covariance estimation methods to RSR. \\(\\bullet\\) Studying effective initialization for STE both in theory and in practice. \\(\\bullet\\) In-depth theoretical exploration of the optimal choice of the parameter \\(\\gamma\\). \\(\\bullet\\) Study of alternative ways of adapting TME to RSR problems. \\(\\bullet\\) Improving STE for fundamental matrix estimation following ideas similar to those in [7, 12, 37] for addressing challenging degeneracies. \\(\\bullet\\) Enhancing our initial idea of initial removal of bad cameras, specifically attempting to use it to rectify challenging scene ambiguities. \\(\\bullet\\) Testing our methods for SfM using more recent feature matching algorithms." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "14582" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.17, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.157 + ], + "angle": 0, + "content": "[1] Larry P. Ammann. Robust singular value decompositions: A new approach to projection pursuit. Journal of the American Statistical Association, 88(422):pp. 505-514, 1993. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.472, + 0.214 + ], + "angle": 0, + "content": "[2] Daniel Barath, Jiri Matas, and Jana Noskova. MAGSAC: marginalizing sample consensus. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10197-10205, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.216, + 0.471, + 0.257 + ], + "angle": 0, + "content": "[3] Jian-Feng Cai, Emmanuel J. Candès, and Zuowei Shen. A singular value thresholding algorithm for matrix completion. SIAM Journal on optimization, 20(4):1956-1982, 2010. 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.259, + 0.471, + 0.3 + ], + "angle": 0, + "content": "[4] Yeshwanth Cherapanamjeri, Prateek Jain, and Praneeth Netrapalli. Thresholding based outlier robust PCA. In Conference on Learning Theory, pages 593-628. PMLR, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.302, + 0.471, + 0.343 + ], + "angle": 0, + "content": "[5] Vartan Choulakian. \\(L_{1}\\)-norm projection pursuit principal component analysis. Computational Statistics & Data Analysis, 50(6):1441-1451, 2006. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.346, + 0.471, + 0.401 + ], + "angle": 0, + "content": "[6] Ondrej Chum, Jií Matas, and Josef Kittler. Locally optimized RANSAC. In Pattern Recognition: 25th DAGM Symposium, Magdeburg, Germany, September 10-12, 2003. Proceedings 25, pages 236-243. Springer, 2003. 2, 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.403, + 0.471, + 0.459 + ], + "angle": 0, + "content": "[7] Ondrej Chum, Tomas Werner, and Jiri Matas. Two-view geometry estimation unaffected by a dominant plane. In 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05), pages 772-779. IEEE, 2005. 2, 6, 8, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.46, + 0.471, + 0.543 + ], + "angle": 0, + "content": "[8] Ilias Diakonikolas, Gautam Kamath, Daniel M. Kane, Jerry Li, Ankur Moitra, and Alistair Stewart. Robustly learning a gaussian: Getting optimal error, efficiently. In Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms, page 2683-2702, USA, 2018. Society for Industrial and Applied Mathematics. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.545, + 0.471, + 0.613 + ], + "angle": 0, + "content": "[9] Chris Ding, Ding Zhou, Xiaofeng He, and Hongyuan Zha. R1-PCA: rotational invariant \\( L_{1} \\)-norm principal component analysis for robust subspace factorization. In ICML '06: Proceedings of the 23rd international conference on Machine learning, pages 281-288, New York, NY, USA, 2006. ACM. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.616, + 0.471, + 0.671 + ], + "angle": 0, + "content": "[10] Martin A. Fischler and Robert C. Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 1, 2, 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.673, + 0.471, + 0.714 + ], + "angle": 0, + "content": "[11] Gabriel Frahm and Uwe Jaekel. A generalization of Tyler's M-estimators to the case of incomplete data. Computational Statistics & Data Analysis, 54(2):374-393, 2010. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.716, + 0.471, + 0.771 + ], + "angle": 0, + "content": "[12] J-M Frahm and Marc Pollefeys. RANSAC for (quasi-) degenerate data (QDEGSAC). In 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06), pages 453-460. IEEE, 2006. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.773, + 0.471, + 0.815 + ], + "angle": 0, + "content": "[13] William Cole Franks and Ankur Moitra. Rigorous guarantees for Tyler's M-estimator via quantum expansion. In Conference on Learning Theory, pages 1601–1632. PMLR, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.817, + 0.471, + 0.857 + ], + "angle": 0, + "content": "[14] Jerome H. Friedman and John W. Tukey. A projection pursuit algorithm for exploratory data analysis. IEEE Transactions on Computers, C-23(9):881-890, 1974. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.859, + 0.471, + 0.901 + ], + "angle": 0, + "content": "[15] John Goes, Gilad Lerman, and Boaz Nadler. Robust sparse covariance estimation by thresholding Tyler's M-estimator. The Annals of Statistics, 48(1):86 - 110, 2020. 3" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.135 + ], + "angle": 0, + "content": "[16] Moritz Hardt and Ankur Moitra. Algorithms and hardness for robust subspace recovery. In Conference on Learning Theory, pages 354-375. PMLR, 2013. 1, 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.137, + 0.894, + 0.176 + ], + "angle": 0, + "content": "[17] Richard Hartley and Andrew Zisserman. Multiple view geometry in computer vision. Cambridge university press, 2003. 6, 11, 12, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.18, + 0.894, + 0.235 + ], + "angle": 0, + "content": "[18] Yuhe Jin, Dmytro Mishkin, Anastasiia Mishchuk, Jiri Matas, Pascal Fua, Kwang Moo Yi, and Eduard Trulls. Image matching across wide baselines: From paper to practice. International Journal of Computer Vision, 129(2):517-547, 2021. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.238, + 0.894, + 0.292 + ], + "angle": 0, + "content": "[19] Arman Karimian and Roberto Tron. Essential matrix estimation using convex relaxations in orthogonal space. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17142-17152, 2023. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.295, + 0.894, + 0.363 + ], + "angle": 0, + "content": "[20] Yoni Kasten, Amnon Geifman, Meirav Galun, and Ronen Basri. Algebraic characterization of essential matrices and their averaging in multiview settings. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5895-5903, 2019. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.366, + 0.894, + 0.436 + ], + "angle": 0, + "content": "[21] Yoni Kasten, Amnon Geifman, Meirav Galun, and Ronen Basri. Gpsfm: Global projective sfm using algebraic constraints on multi-view fundamental matrices. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3264-3272, 2019. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.438, + 0.894, + 0.478 + ], + "angle": 0, + "content": "[22] John T. Kent and David E. Tyler. Maximum likelihood estimation for the wrapped Cauchy distribution. Journal of Applied Statistics, 15(2):247-254, 1988. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.481, + 0.894, + 0.522 + ], + "angle": 0, + "content": "[23] Nojun Kwak. Principal component analysis based on \\( L_{1} \\)-norm maximization. IEEE transactions on pattern analysis and machine intelligence, 30(9):1672-1680, 2008. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.525, + 0.894, + 0.565 + ], + "angle": 0, + "content": "[24] Gilad Lerman and Tyler Maunu. Fast, robust and non-convex subspace recovery. Information and Inference: A Journal of the IMA, 7(2):277–336, 2018. 1, 2, 3, 4, 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.568, + 0.894, + 0.609 + ], + "angle": 0, + "content": "[25] Gilad Lerman and Tyler Maunu. An overview of robust subspace recovery. Proceedings of the IEEE, 106(8):1380-1410, 2018. 1, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.611, + 0.894, + 0.653 + ], + "angle": 0, + "content": "[26] Gilad Lerman and Teng Zhang. \\(l_{p}\\)-recovery of the most significant subspace among multiple subspaces with outliers. Constr. Approx., 40(3):329-385, 2014. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.655, + 0.894, + 0.697 + ], + "angle": 0, + "content": "[27] Gilad Lerman, Michael B. McCoy, Joel A. Tropp, and Teng Zhang. Robust computation of linear models by convex relaxation. Found. Comput. Math., 15(2):363-410, 2015. 1, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.699, + 0.894, + 0.726 + ], + "angle": 0, + "content": "[28] Gilad Lerman, Feng Yu, and Teng Zhang. Theoretical guarantees for the subspace-constrained Tyler's estimator, 2024. 4, 5, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.728, + 0.894, + 0.784 + ], + "angle": 0, + "content": "[29] Guoying Li and Zhonglian Chen. Projection-pursuit approach to robust dispersion matrices and principal components: primary theory and monte carlo. Journal of the American Statistical Association, 80(391):759-766, 1985. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.786, + 0.894, + 0.826 + ], + "angle": 0, + "content": "[30] David G. Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60:91-110, 2004. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.829, + 0.894, + 0.871 + ], + "angle": 0, + "content": "[31] Ricardo A. Maronna and Víctor J. Yohai. Robust estimation of multivariate location and scatter. Wiley StatsRef: Statistics Reference Online, pages 1-12, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[32] Tyler Maunu and Gilad Lerman. Robust subspace recovery with adversarial outliers, 2019. 1, 2, 4, 5" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "14583" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "[33] Tyler Maunu, Teng Zhang, and Gilad Lerman. A well-tempered landscape for non-convex robust subspace recovery. J. Mach. Learn. Res., 20(1):1348–1406, 2019. 1, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.472, + 0.192 + ], + "angle": 0, + "content": "[34] Tyler Maunu, Chenyu Yu, and Gilad Lerman. Stochastic and private nonconvex outlier-robust PCAs. In Proceedings of Mathematical and Scientific Machine Learning, pages 173–188. PMLR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.194, + 0.472, + 0.235 + ], + "angle": 0, + "content": "[35] Michael McCoy and Joel A. Tropp. Two proposals for robust PCA using semidefinite programming. Electronic Journal of Statistics, 5(none):1123 - 1160, 2011. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.238, + 0.471, + 0.293 + ], + "angle": 0, + "content": "[36] Onur Ozyesil and Amit Singer. Robust camera location estimation by convex programming. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2674-2683, 2015. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.296, + 0.471, + 0.351 + ], + "angle": 0, + "content": "[37] Rahul Raguram, Ondrej Chum, Marc Pollefeys, Jiri Matas, and Jan-Michael Frahm. USAC: A universal framework for random sample consensus. IEEE transactions on pattern analysis and machine intelligence, 35(8):2022-2038, 2012. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.354, + 0.47, + 0.381 + ], + "angle": 0, + "content": "[38] Elvezio M. Ronchetti and Peter J. Huber. Robust statistics. John Wiley & Sons Hoboken, NJ, USA, 2009. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.384, + 0.471, + 0.467 + ], + "angle": 0, + "content": "[39] Soumyadip Sengupta, Tal Amir, Meirav Galun, Tom Goldstein, David W Jacobs, Amit Singer, and Ronen Basri. A new rank constraint on multi-view fundamental matrices, and its application to camera location recovery. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4798-4806, 2017. 7, 8, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.469, + 0.471, + 0.537 + ], + "angle": 0, + "content": "[40] Yunpeng Shi, Shaohan Li, Tyler Mauno, and Gilad Lerman. Scalable cluster-consistency statistics for robust multi-object matching. In International Conference on 3D Vision, 3DV 2021, London, United Kingdom, December 1-3, 2021, pages 352-360. IEEE, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.541, + 0.47, + 0.596 + ], + "angle": 0, + "content": "[41] Noah Snavely, Steven M. Seitz, and Richard Szeliski. Photo tourism: exploring photo collections in 3d. In ACM siggraph 2006 papers, pages 835-846. Association for Computing Machinery, 2006. 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.599, + 0.471, + 0.64 + ], + "angle": 0, + "content": "[42] Nathan Srebro and Tommi Jaakkola. Weighted low-rank approximations. In Proceedings of the 20th international conference on machine learning (ICML-03), pages 720–727, 2003. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.643, + 0.471, + 0.724 + ], + "angle": 0, + "content": "[43] Jacob Steinhardt, Moses Charikar, and Gregory Valiant. Resilience: A criterion for learning in the presence of arbitrary outliers. In 9th Innovations in Theoretical Computer Science Conference, ITCS 2018, January 11-14, 2018, Cambridge, MA, USA, pages 45:1-45:21. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.728, + 0.471, + 0.797 + ], + "angle": 0, + "content": "[44] Ben Tordoff and David W. Murray. Guided sampling and consensus for motion estimation. In Computer Vision—ECCV 2002: 7th European Conference on Computer Vision Copenhagen, Denmark, May 28–31, 2002 Proceedings, Part I 7, pages 82–96. Springer, 2002. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.8, + 0.471, + 0.841 + ], + "angle": 0, + "content": "[45] Philip H.S. Torr and Andrew Zisserman. MLESAC: A new robust estimator with application to estimating image geometry. Computer vision and image understanding, 78(1):138-156, 2000. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.843, + 0.47, + 0.871 + ], + "angle": 0, + "content": "[46] David E. Tyler. Statistical analysis for the angular central gaussian distribution on the sphere. Biometrika, 74(3):579-589, 1987. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[47] David E. Tyler. A distribution-free m-estimator of multivariate scatter. The Annals of Statistics, pages 234–251, 1987. 1, 2, 6, 13" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.092, + 0.893, + 0.148 + ], + "angle": 0, + "content": "[48] Kyle Wilson and Noah Snavely. Robust global translations with 1dsfm. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part III 13, pages 61-75. Springer, 2014. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.893, + 0.191 + ], + "angle": 0, + "content": "[49] Huan Xu, Constantine Caramanis, and Sujay Sanghavi. Robust PCA via outlier pursuit. Advances in neural information processing systems, 23, 2010. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.193, + 0.894, + 0.233 + ], + "angle": 0, + "content": "[50] Teng Zhang. Robust subspace recovery by Tyler's M-estimator. Information and Inference: A Journal of the IMA, 5(1):1-21, 2016. 1, 3, 4, 5, 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.235, + 0.894, + 0.275 + ], + "angle": 0, + "content": "[51] Teng Zhang and Gilad Lerman. A novel M-estimator for robust PCA. The Journal of Machine Learning Research, 15(1): 749–808, 2014. 1, 3, 5" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.092, + 0.894, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "14584" + } + ] +] \ No newline at end of file diff --git a/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/02a10508-95ff-4550-b14e-3121c8c91065_origin.pdf b/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/02a10508-95ff-4550-b14e-3121c8c91065_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8787533c4315112754e54013e366dc1be4efd852 --- /dev/null +++ b/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/02a10508-95ff-4550-b14e-3121c8c91065_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fff56cababb139ae97900b121d1dfed344998342e0d8c05e70b13c6bdfc27e6e +size 1083125 diff --git a/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/full.md b/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/full.md new file mode 100644 index 0000000000000000000000000000000000000000..a3ffe6f891e717d4381f4b98cd24759a9bd1a231 --- /dev/null +++ b/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/full.md @@ -0,0 +1,370 @@ +# A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion * + +Feng Yu† +University of Minnesota +fyu@umn.edu + +Teng Zhang +University of Central Florida +teng.zhang@ucf.edu + +Gilad Lerman ‡ +University of Minnesota +lerman@umn.edu + +# Abstract + +We present the subspace-constrained Tyler's estimator (STE) designed for recovering a low-dimensional subspace within a dataset that may be highly corrupted with outliers. STE is a fusion of the Tyler's M-estimator (TME) and a variant of the fast median subspace. Our theoretical analysis suggests that, under a common inlier-outlier model, STE can effectively recover the underlying subspace, even when it contains a smaller fraction of inliers relative to other methods in the field of robust subspace recovery. We apply STE in the context of Structure from Motion (SfM) in two ways: for robust estimation of the fundamental matrix and for the removal of outlying cameras, enhancing the robustness of the SfM pipeline. Numerical experiments confirm the state-of-the-art performance of our method in these applications. This research makes significant contributions to the field of robust subspace recovery, particularly in the context of computer vision and 3D reconstruction. + +# 1. Introduction + +In many applications, data has been collected in large quantities and dimensions. It is a common practice to represent such data within a low-dimensional subspace that preserves its essential information. Principal Component Analysis (PCA) is frequently employed to identify this subspace. However, PCA faces challenges when dealing with data contaminated by outliers. Consequently, the field of Robust Subspace Recovery (RSR) aims to develop a framework for outlier-robust PCA. RSR is particularly relevant to problems in computer vision, such as fundamental matrix estimation, which involves recovering a hidden subspace associated with "good correspondence pairs" among highly corrupted measurements. + +Various algorithms have been proposed to address RSR, employing methods such as projection pursuit [1, 5, 14, 23, 29, 35, 38], subspace energy minimization (in particular least absolute + +deviations and its relaxations) [9, 24, 27, 33, 34, 42, 43, 51], robust covariance estimation [50], filtering-based methods [4, 8, 49] and exhaustive subspace search methods [10, 16]. An in-depth exploration and comprehensive overview of robust subspace recovery and its diverse algorithms can be found in [25]. + +Methods based on robust covariance estimators, such as the Tyler's M-estimator (TME), offer additional useful information on the shape of the data within the subspace, similarly to PCA in the non-robust setting. They also offer maximum-likelihood interpretation, which is missing in many other methods. Application of the TME [47] to RSR has been shown to be successful on basic benchmarks [25, 50]. Moreover, under a model of inliers in a general position on a subspace and outliers in general position in the complement of the subspace, TME was shown to recover the subspace within a desirable fraction of inliers [50]. Below this fraction it was proved to be Small Set Expansion (SSE) hard to solve the RSR problem [16]. + +One may still succeed with solving the RSR problem with a computationally efficient algorithm when the fraction of inliers is lower than the one required by [16], considering a more restricted data model or violating other assumptions made in [16]. For example, some special results in this direction are discussed in [32]. Also, [33] proposes the generalized haystack model of inliers and outliers to demonstrate the possibility of handling lower fractions of inliers by an RSR algorithm. This model extends the limited standard haystack model [27], where basic methods (such as PCA filtering) can easily work with low fractions of outliers. Nevertheless, it is unclear how practical the above theoretical ideas are for applied settings. + +One practical setting that requires a fraction of inliers significantly lower than the one stated in [16] arises in the problem of robust fundamental (or essential) matrix estimation. The fundamental matrix encompasses the epipolar geometry of two views in stereo vision systems. It is typically computed using point correspondences between the two projected images. This computation requires finding an 8-dimensional subspace within a 9-dimensional ambient space. In this setting, the theoretical framework of [16] requires that the fraction of inliers be at least $8/9 \approx 88.9\%$ , which is clearly unreasonable to require. + +To date, the RANdom Sample Consensus (RANSAC) + +method [10] is the only RSR method that has been highly successful in addressing this nontrivial scenario, gaining widespread popularity in computer vision. RANSAC is an iterative method that randomly selects minimal subsets of the data and fits models, in particular subspaces, to identify the best consensus set, that is, the set in most agreement with the hypothesized model. There are numerous approaches proposed to improve RANSAC, especially for this particular application, including locally optimized RANSAC (LO-RANSAC, [6]), maximum likelihood estimator RANSAC (MLESAC) [45]), degeneracy-check enabled RANSAC (DEGENSAC) [7]) and M-estimator guided RANSAC (MAGSAC) [2]). A near recovery theory for a variant of RANSAC under some assumptions on the outliers was suggested in [32]. Nevertheless, in general, RANSAC is rather slow and its application to higher-dimensional problems is intractable. + +This work introduces a novel RSR algorithm that is guaranteed to robustly handle a lower fraction of outliers than the theoretical threshold proposed by [16], under special settings. Our basic idea is to adapt Tyler's M-Estimator to utilize the information of the underlying $d$ -dimensional subspace, while avoiding estimation of the full covariance. By using less degrees of freedom we obtain a more accurate subspace estimator than the one obtained by TME with improved computational complexity. We show that STE is a fusion of the Tyler's M-estimator (TME) and a variant of the fast median subspace (FMS) [24] that aims to minimize a subspace-based $\ell_0$ energy. + +Our theory shows that our proposed subspace-constrained Tyler's estimator (STE) algorithm can effectively recover the underlying subspace, even when it contains a smaller fraction of inliers relative to other methods. We obtain this nontrivial achievement first in a generic setting, where we establish when an initial estimator for STE is sufficiently well-conditioned to guarantee the desired robustness of STE. We then assume the asymptotic generalized haystack model and show that under this model, TME itself is a well-conditioned initial estimator for STE, and that unlike TME, STE with this initialization can deal with a lower fraction of inliers than the theoretical threshold specified in [16]. + +We demonstrate competitive performance in robust fundamental matrix estimation, relying solely on subspace information without additional methods for handling degenerate scenarios, in contrast to [7, 12, 37]. We also propose a potential application of RSR for removing bad cameras in order to enhance the SfM pipeline and show competitive performance of STE. This is a completely new idea and it may require additional exploration to make it practical. Nevertheless, it offers a very different testbed where $N = D$ is very large and RANSAC is generally intractable. + +The rest of the paper is organized as follows: §2 introduces the STE framework, §3 establishes theoretical guarantees of STE, §4 applies STE to two different problems in SfM, demonstrating its competitive performance relative to existing algorithms, and §5 provides conclusions and future directions. + +# 2. The STE Algorithm + +We present our proposed STE. We review basic notation in §2.1 and Tyler's original estimator in §2.2. We describe our method in §2.3, its computational complexity in §2.4, its algorithmic choices in §2.5 and an interpretation for it as a fusion of TME and FMS with $p = 0$ in §2.6. + +# 2.1. Notation + +We use bold upper and lower case letters for matrices and column vectors, respectively. Let $\mathbf{I}_k$ denote the identity matrix in $\mathbb{R}^{k\times k}$ , where if $k$ is obvious we just write $\mathbf{I}$ . For a matrix $\mathbf{A}$ , we denote by $\operatorname{tr}(\mathbf{A})$ and $\operatorname{Im}(\mathbf{A})$ the trace and image (i.e., column space) of $\mathbf{A}$ . We denote by $S_{+}(D)$ and $S_{++}(D)$ the sets of positive semidefinite and definite matrices in $\mathbb{R}^{D\times D}$ , respectively. We denote by $O(D,d)$ the set of semi-orthogonal $D\times d$ matrices, i.e., $\mathbf{U}\in O(D,d)$ if and only if $\mathbf{U}\in \mathbb{R}^{D\times d}$ and $\mathbf{U}^{\top}\mathbf{U} = \mathbf{I}_{d}$ . We refer to linear $d$ -dimensional subspaces as $d$ -subspaces. For a $d$ -subspace $L$ , we denote by $\mathbf{P}_L$ the $D\times D$ matrix representing the orthogonal projector onto $L$ . We also arbitrarily fix $\mathbf{U}_L$ in $O(D,d)$ such that $\mathbf{U}_L\mathbf{U}_L^\top = \mathbf{P}_L$ (such $\mathbf{U}_L$ is determined up to right multiplication by an orthogonal matrix in $O(d,d)$ ). Throughout the paper, $\mathcal{X} = \{\pmb{x}_i\}_{i = 1}^N\subset \mathbb{R}^D$ is assumed to be a given centered dataset, that is, $\sum_{i = 1}^{N}\pmb{x}_i = \mathbf{0}$ . + +# 2.2. Tyler's Estimator and its Application to RSR + +Tyler's M-estimator (TME) [47] robustly estimates the covariance $\pmb{\Sigma}^{*}$ of the dataset $\mathcal{X} = \{\pmb{x}_i\}_{i=1}^N \subset \mathbb{R}^D$ by minimizing + +$$ +\frac {D}{N} \sum_ {i = 1} ^ {N} \log \left(\boldsymbol {x} _ {i} ^ {\top} \boldsymbol {\Sigma} ^ {- 1} \boldsymbol {x} _ {i}\right) + \operatorname {l o g d e t} (\boldsymbol {\Sigma}) \tag {1} +$$ + +over $\pmb{\Sigma} \in S_{++}(D)$ such that $\mathrm{tr}(\pmb{\Sigma}) = 1$ . The cost function in (1) can be motivated by writing the maximum likelihood of the multivariate $t$ -distribution and letting its degrees of freedom parameter, $\nu$ , approach zero [31]. This cost function is invariant to dilations of $\pmb{\Sigma}$ , and the constraint on $\mathrm{tr}(\pmb{\Sigma})$ , whose value can be arbitrarily chosen, fixes a scale. TME also applies to scenarios where the covariance matrix does not exist. In such cases, TME estimates the shape (or scatter matrix) of the distribution, which is defined up to an arbitrary scale. More direct interpretations of TME as a maximum likelihood estimator can be found in [11, 46]. When $D$ is fixed and $N$ approaches infinity, TME is the "most robust" estimator of the shape matrix for data i.i.d. sampled from a continuous elliptical distribution [47] in a minimax sense, that is, as a minimizer of the maximal variance. + +Tyler [47] proposed the following iterative formula for computing TME: + +$$ +\boldsymbol {\Sigma} ^ {(k)} = \sum_ {i = 1} ^ {N} \frac {\boldsymbol {x} _ {i} \boldsymbol {x} _ {i} ^ {\top}}{\boldsymbol {x} _ {i} ^ {\top} (\boldsymbol {\Sigma} ^ {(k - 1)}) ^ {- 1} \boldsymbol {x} _ {i}} / \operatorname {t r} \left(\sum_ {i = 1} ^ {N} \frac {\boldsymbol {x} _ {i} \boldsymbol {x} _ {i} ^ {\top}}{\boldsymbol {x} _ {i} ^ {\top} (\boldsymbol {\Sigma} ^ {(k - 1)}) ^ {- 1} \boldsymbol {x} _ {i}}\right). +$$ + +Kent and Tyler [22] proved that if any $d$ -subspace of $\mathbb{R}^D$ , where $1 \leq d \leq D - 1$ , contains fewer than $Nd / D$ data points, then + +the above iterative procedure converges to TME. Linear rate of convergence was proved for the regularized TME in [15] and for TME in [13]. + +One can apply the TME estimator to solve the RSR problem with a given dimension $d$ by forming the subspace spanned by the top $d$ eigenvectors of TME. Zhang [50] proved that as long as there are more than $Nd / D$ inliers lying on a subspace, and the projected coordinates of these inliers on the $d$ -subspace and the projected coordinates of the outliers on the $(D - d)$ -dimensional orthogonal complement of the subspace are in general position, then TME recovers this subspace. Zhang [50] also showed that in this setting the above iterative formula converges (note that the condition of convergence in [22] does not apply in this case). The above lower bound of $Nd / D$ on the number of inliers coincides with the general bound for the noiseless RSR problem, beyond which the problem becomes Small Set-Expansion (SSE) hard [16]. + +Numerical experiments in [50] and [25] indicated state-of-the-art accuracy of TME compared to other RSR algorithms in various settings. The computational complexity of TME is of order $O(K(ND^2 + D^3))$ , where $K$ is the number of iterations. On the other hand, the cost of faster RSR algorithms is of order $O(KNDd)$ [24, 25, 33]. + +# 2.3. Motivation and Formulation of STE + +We aim to use more cleverly the $d$ -subspace information within the TME framework to form an RSR algorithm, instead of first estimating the full covariance. By using less degrees of freedom we can obtain a more accurate subspace estimator, especially when the fraction of outliers can be large. Furthermore, our idea allows us to improve the computational cost to become state-of-the-art for high-dimensional settings. + +Many RSR algorithms can be formulated as minimizing a best orthogonal projector onto a $d$ -subspace [24, 25, 27, 33, 51]. We are going to do something similar, but unlike using an orthogonal projector, we will still use information from TME to get the shape of the data on the projected subspace. We will make the rest of the eigenvalues (i.e., bottom $D - d$ ones) equal and shrink them by a parameter $0 < \gamma < 1$ . We thus use a regularized version of a reduced-dimension covariance matrix. This parameter $\gamma$ plays a role in our theoretical estimates. Making $\gamma$ smaller helps with better subspace recovery, whereas making $\gamma$ bigger enhances the well-conditioning of the estimator. + +Following these basic ideas, we formulate our method, STE. For simplicity, we utilize covariance matrices and their inverses. Since these covariance matrices are essentially $d$ -dimensional and include an additional simple regularizing component, our overall computations can be expressed in terms of the computation of the top $d$ singular values of an $N \times D$ matrix (see §2.4). + +At iteration $k$ we follow a similar step to that of TME: + +$$ +\mathbf {Z} ^ {(k)} := \sum_ {i = 1} ^ {N} \boldsymbol {x} _ {i} \boldsymbol {x} _ {i} ^ {\top} / (\boldsymbol {x} _ {i} ^ {\top} (\boldsymbol {\Sigma} ^ {(k - 1)}) ^ {- 1} \boldsymbol {x} _ {i}). +$$ + +We compute the eigenvalues $\{\sigma_i\}_{i = 1}^D$ of $\mathbf{Z}^{(k)}$ and replace each of the bottom $(D - d)$ of them with $\gamma \cdot \sigma_{d + 1,D}$ , where + +$$ +\sigma_ {d + 1, D} := \frac {1}{D - d} \sum_ {i = d + 1} ^ {D} \sigma_ {i}. \tag {2} +$$ + +We also compute the eigenvectors of $\mathbf{Z}^{(k)}$ and form the matrix $\boldsymbol{\Sigma}^{(k)}$ with the same eigenvectors as those of $\mathbf{Z}^{(k)}$ and the modified eigenvalues, scaled to have trace 1. We iteratively repeat this procedure until the two estimators are sufficiently close. Algorithm 1 summarizes this procedure. Note that it is invariant to scaling of the data, similarly to TME. + +# Algorithm 1 STE: Subspace-Constrained Tyler's Estimator + +1: Input: $\mathbf{X} = [\pmb{x}_1, \dots, \pmb{x}_N] \in \mathbb{R}^{D \times N}$ : centered data matrix, $d$ : subspace dimension, $K$ : maximum number of iterations, $\tau, \gamma$ : parameters. +2: Output: $L$ : $d$ -subspace in $\mathbb{R}^D$ +3: $\pmb{\Sigma}^{(0)} = \mathbf{I}_D / D$ +4: for $k = 1,2,\ldots$ do +5: $\mathbf{Z}^{(k)}\gets \sum_{i = 1}^{N}\pmb {x}_i\pmb {x}_i^\top /\left(\pmb {x}_i^\top (\pmb{\Sigma}^{(k - 1)})^{-1}\pmb {x}_i\right)$ +6: $[\mathbf{U}^{(k)},\mathbf{S}^{(k)},\bar{\mathbf{U}}^{(k)}]\gets \mathrm{EVD}(\mathbf{Z}^{(k)})$ +7: $\sigma_{i}\gets [\mathbf{S}^{(k)}]_{ii}$ and $\sigma_{d + 1,D}\leftarrow \sum_{i = d + 1}^{D}\sigma_{i} / (D - d)$ +8: $\widetilde{\mathbf{S}}^{(k)}\gets \mathrm{diag}(\sigma_1,\dots ,\sigma_d,\gamma \cdot \sigma_{d + 1,D},\dots ,\gamma \cdot \sigma_{d + 1,D}),$ +9: $\pmb{\Sigma}^{(k)}\gets \mathbf{U}^{(k)}\widetilde{\mathbf{S}}^{(k)}(\mathbf{U}^{(k)})^{\top} / \mathrm{tr}\bigl (\mathbf{U}^{(k)}\widetilde{\mathbf{S}}^{(k)}(\mathbf{U}^{(k)})^{\top}\bigr)$ +10: Stop if $k \geq K$ or $\| \pmb{\Sigma}^{(k)} - \pmb{\Sigma}^{(k-1)} \|_F < \tau$ . +11: end for +12: $L = \operatorname{Span}$ of the first $d$ columns of $\mathbf{U}^{(k)}$ + +# 2.4. Computational Complexity + +Setting $w_{i}^{(k)} = (\pmb{x}_{i}^{\top}(\pmb{\Sigma}^{(k - 1)})^{-1}\pmb{x}_{i})^{-1}$ , we can express $\mathbf{Z}^{(k)}$ as $\mathbf{Z}^{(k)} = \widetilde{\mathbf{X}}\widetilde{\mathbf{X}}^{\top}$ , where $\widetilde{\mathbf{X}} = [(w_1^{(k)})^{1 / 2}\pmb{x}_1,\dots,(w_N^{(k)})^{1 / 2}\pmb{x}_N]$ . With some abuse of notation we denote by $\sigma_{1},\ldots ,\sigma_{D}$ the eigenvalues of $\pmb{\Sigma}^{(k - 1)}$ (and not $\pmb{\Sigma}^{(k)}$ ). Since they are scaled to have trace 1, $\sigma_{d + 1,D} = (1 - \sum_{j = 1}^{d}\sigma_{j}) / (D - d)$ . We thus only need the top $d$ eigenvectors and top $d$ eigenvalues of $\pmb{\Sigma}^{(k - 1)}$ to update $\widetilde{w}_i^{(k)}$ . Therefore, the complexity of STE can be of order $O(KNDd)$ if a special fast algorithm is utilized for computing only the top $d$ eigenvectors. + +# 2.5. Implementation Details + +STE depends on the parameters $K$ , $\tau$ and $\gamma$ and the initialization of $\pmb{\Sigma}^{(0)}$ . The first two parameters are rather standard in iterative procedures and do not raise any concern. + +Our theory sheds some light on possible choices of $\gamma$ and in particular it indicates that the algorithm can be more sensitive to choices of $\gamma$ when the quantity defined later in (3) is relatively small. In this case, it may be beneficial to try several values of $\gamma$ . We propose here a constructive way of doing it. We first form a sequence of $0 < \gamma \leq 1$ , e.g., $\gamma_{k} = 1 / k, k = 1,\dots,m$ . In order to + +determine the best choice of $\gamma$ , we compute the distance of each data point $\mathbf{x}$ to each subspace $L_{k}$ , corresponding to the choice of $\gamma_{k}$ , where $\mathrm{dist}(\mathbf{x}, L_{k}) = \| \mathbf{x} - \mathbf{P}_{L_{k}} \mathbf{x} \|$ . We set a threshold $\zeta$ , obtained by the median among all points and all subspaces and for each subspace, $L_{k}$ , we count the number of the inliers with distance below this threshold. The best $\gamma_{k}$ is determined according to the subspace yielding the largest number of inliers. We describe this procedure in Algorithm 2. + +For simplicity, we initialize with $\boldsymbol{\Sigma}^{(0)} = \mathbf{I}_D / D$ and note that with this choice $\boldsymbol{\Sigma}^{(1)}$ reflects the trimmed covariance matrix and thus reflects the PCA subspace. One can also initialize with TME or other subspaces (see §3 where the theory of STE is discussed). One can further try several initialization (with possible random components) and use a strategy similar to Algorithm 2 to choose the best one. + +At last, we remark that when computing $\mathbf{Z}^{(k)}$ we want to ensure that $\pmb{x}_i^\top (\pmb{\Sigma}^{(k-1)})^{-1}\pmb{x}_i$ cannot be zero and we thus add the arbitrarily small number $10^{-15}$ to this value. + +Algorithm 2 Estimating best $\gamma$ for STE +1: Input: $\mathbf{X} = [\pmb{x}_1, \dots, \pmb{x}_N] \in \mathbb{R}^{D \times N}$ : centered data matrix, $d$ : subspace dimension, $\{\gamma_1, \dots, \gamma_m\}$ : a set of pre-selected $\gamma$ 's. +2: Output: $\gamma^*$ : optimal $\gamma$ among $\{\gamma_1, \dots, \gamma_m\}$ +3: for $j = 1, 2, \dots, m$ do +4: $L^{(j)} \gets \mathrm{STE}(\mathbf{X}, d, \gamma_j)$ +5: $\mathcal{D}^{(j)} \gets \{\mathrm{dist}(\pmb{x}_i, L^{(j)}) | \pmb{x}_i \in \mathcal{X}\}$ . +6: end for +7: Set $\zeta = \mathrm{median}(\{\mathcal{D}^{(1)}, \dots, \mathcal{D}^{(m)}\})$ +8: $j^* = \operatorname{argmax}_{1 \leq j \leq m} |\mathcal{D}^{(j)}| < \zeta$ +9: $\gamma^* = \gamma_j^*$ + +# 2.6. STE fuses TME and a Variant of FMS + +STE is formally similar to both TME and FMS. Indeed, at each iteration these algorithms essentially compute $\boldsymbol{\Sigma}^{(k + 1)} = \sum_{i = 1}^{N}w_{i}\pmb{x}_{i}\pmb{x}_{i}^{\top}$ , where $w_{i}\equiv w_{i}\bigl (\boldsymbol{\Sigma}^{(k)}\bigr)$ . We summarize the formal weights for FMS (with any choice of $p$ for minimizing an $\ell_p$ energy in [24]), TME and STE. We ignore an additional scaling constant for TME and STE, obtained by dividing $w_{i}\pmb{x}_{i}\pmb{x}_{i}^{\top}$ above by its trace, and a regularization parameter $\delta$ for FMS. We express these formulas using the eigenvalues $\sigma_1,\ldots ,\sigma_D$ and eigenvectors $\pmb{u}_1,\dots ,\pmb{u}_D$ of the weighted sample covariance, $\sum_{i = 1}^{N}w_{i}\pmb{x}_{i}\pmb{x}_{i}^{\top}$ for each method and $\beta \coloneqq \gamma \cdot \sigma_{d + 1,D}$ (see (2)) as follows: + +$$ +\begin{array}{l} w _ {i} ^ {\mathrm {F M S}} = \frac {1}{\left(\sum_ {j = d + 1} ^ {D} \left(\boldsymbol {x} _ {i} ^ {\top} \boldsymbol {u} _ {j}\right) ^ {2}\right) ^ {(2 - p) / 2}}, \\ w _ {i} ^ {\mathrm {T M E}} = \frac {1}{\sum_ {j = 1} ^ {D} \sigma_ {j} ^ {- 1} (\pmb {x} _ {i} ^ {\top} \pmb {u} _ {j}) ^ {2}}, \\ w _ {i} ^ {\mathrm {S T E}} = \frac {1}{\sum_ {j = 1} ^ {d} \sigma_ {j} ^ {- 1} (\boldsymbol {x} _ {i} ^ {\top} \boldsymbol {u} _ {j}) ^ {2} + \beta^ {- 1} \sum_ {j = d + 1} ^ {D} (\boldsymbol {x} _ {i} ^ {\top} \boldsymbol {u} _ {j}) ^ {2}}. \\ \end{array} +$$ + +These weights aim to mitigate the impact of outliers in different ways. For FMS, $\sum_{j=d+1}^{D}(\boldsymbol{x}_i^\top\boldsymbol{u}_j)^2$ is the squared distance of a data point $\boldsymbol{x}_i$ to the subspace $L$ . Thus for $p < 2$ , $w_i^{\mathrm{FMS}}$ is smaller for "subspace-outliers", where the robustness to such outliers increases when $p \geq 0$ decreases. + +The weights of TME are inversely proportional to the squared Mahalanobis distance of $\pmb{x}_i$ to the empirical distribution. They mitigate the effect of "covariance-outliers". If the dataset is concentrated on a $k$ -subspace where $k < d$ , then TME can provide smaller weights to points lying away from this subspace, unlike FMS that does not distinguish between points within the larger $d$ -subspace. + +We note that the weights of STE fuse the above two weights. Within a $d$ -subspace, they use the shape of the data. They can thus avoid outliers within this $d$ -subspace. Within the orthogonal component of this subspace, they use a term proportional to that of FMS with $p = 0$ . We remark that such $\ell_0$ minimization has a clear interpretation for RSR, though it is generally hard to guarantee. Indeed, [24] has no guarantees for FMS with $p = 0$ . It can also yield unwanted spurious stationary points [26]. + +# 3. Theory + +We review a theoretical guarantee for STE, whose proof is given in [28]. It requires some conditions and we verify they hold with high probability under the asymptotic generalized haystack model. We assume a noiseless inliers-outliers RSR model. Let $L_{*}$ denote the underlying $d$ -subspace in $\mathbb{R}^{D}$ , $\mathcal{X}_{in} = \mathcal{X} \cap L_{*}$ and $\mathcal{X}_{out} = \mathcal{X} \setminus \mathcal{X}_{in}$ be the set of inliers and outliers, respectively, and $n_1 = |\mathcal{X}_{in}|$ and $n_0 = |\mathcal{X}_{out}|$ be the number of inliers and outliers. Our first assumption is a mild one on how well-conditioned the inliers are in $L_{*}$ (compare e.g., other assumptions in [25, 32]). + +Assumption 1: Any $k$ -subspace of $L_{*}$ , $1 \leq k \leq d$ , contains at most $n_1 k / d$ points. + +Motivation for Assumption 2: The ratio of inliers per outliers, $n_1 / n_0$ , in RSR is often referred to as the SNR (signal-to-noise ratio) [25, 32, 33]. The smaller it is, the best the subspace recovery is. We define the dimension-scaled SNR (DS-SNR) as the SNR obtained when scaling $n_1$ and $n_0$ by their respective dimensions (of $L_*$ and $L_*^\perp$ ): + +$$ +\mathrm {D S} - \mathrm {S N R} := \frac {n _ {1} / d}{n _ {0} / (D - d)}. \tag {3} +$$ + +Zhang [50] showed that exact recovery by TME is guaranteed whenever DS-SNR $>1$ (assuming general position assumptions on the inliers and outliers) and Hardt and Moitra [16] showed that when considering general datasets with general position assumptions on the inliers and outliers, the RSR problem is SSE hard if the DS-SNR is lower than 1. We aim to show that under the following weaker generic condition, STE can obtain exact recovery with DS-SNR, strictly lower than 1. + +Assumption 2: DS-SNR $> \gamma$ , where $\gamma < 1$ is the STE parameter. + +Our last assumption requires a sufficiently good initialization for STE, but also implicitly involves additional hidden + +assumptions on the inliers and outliers. This is expected, since Assumption 1 does not require anything from the outliers and also has a very weak requirement from the inliers. To formulate the new assumption we define below some some basic condition numbers for good initialization (which are more complicated than the one for initialization by PCA suggested by [33] and [32]) and also quantities similar to the ones used to guarantee landscape stability in the theory of RSR [25, 27, 33, 51]. + +Definitions required for Assumption 3: Recall that $\pmb{\Sigma}^{(0)}$ denotes the initial value in Algorithm 1, and denote + +$$ +\boldsymbol {\Sigma} _ {L _ {1}, L _ {2}} ^ {(0)} = \mathbf {U} _ {L _ {1}} ^ {\top} \boldsymbol {\Sigma} ^ {(0)} \mathbf {U} _ {L _ {2}}. +$$ + +We define the following condition number + +$$ +\kappa_ {1} = \frac {\sigma_ {d} \left(\boldsymbol {\Sigma} _ {L _ {*} , L _ {*}} ^ {(0)} - \boldsymbol {\Sigma} _ {L _ {*} , L _ {*} ^ {\perp}} ^ {(0)} \boldsymbol {\Sigma} _ {L _ {*} ^ {\perp} , L _ {*}} ^ {(0) - 1} \boldsymbol {\Sigma} _ {L _ {*} ^ {\perp} , L _ {*}} ^ {(0)}\right)}{\sigma_ {1} \left(\boldsymbol {\Sigma} _ {L _ {*} ^ {\perp} , L _ {*}} ^ {(0)}\right)}. +$$ + +To get a better intuition to this primary quantity of Assumption 3, we first express the initial estimator $\Sigma^{(0)}$ , using basis vectors for $L_{*}$ and $L_{*}^{\perp}$ , as a $2\times 2$ block matrix + +$$ +\left( \begin{array}{c c} \boldsymbol {\Sigma} _ {L _ {*}, L _ {*}} ^ {(0)} & \boldsymbol {\Sigma} _ {L _ {*}, L _ {*} ^ {\perp}} ^ {(0)} \\ \boldsymbol {\Sigma} _ {L _ {*} ^ {\perp}, L _ {*}} ^ {(0)} & \boldsymbol {\Sigma} _ {L _ {*} ^ {\perp}, L _ {*} ^ {\perp}} ^ {(0)} \end{array} \right). +$$ + +Defining $\Sigma' = \Sigma_{L_*, L_*}^{(0)} \Sigma_{L_*^\perp, L_*^\perp}^{(0) - 1} \Sigma_{L_*^\perp, L_*}^{(0)}$ , we decompose this block matrix as + +$$ +\left( \begin{array}{c c} \boldsymbol {\Sigma} ^ {\prime} & \boldsymbol {\Sigma} _ {L _ {*}, L _ {*}} ^ {(0)} \\ \boldsymbol {\Sigma} _ {L _ {*} ^ {\perp}, L _ {*}} ^ {(0)} & \boldsymbol {\Sigma} _ {L _ {*} ^ {\perp}, L _ {*}} ^ {(0) ^ {\perp}}, \end{array} \right) + \left( \begin{array}{c c} \boldsymbol {\Sigma} _ {L _ {*}, L _ {*}} ^ {(0)} - \boldsymbol {\Sigma} ^ {\prime} & 0 \\ 0 & 0 \end{array} \right). +$$ + +We note that the numerator of $\kappa_{1}$ is the $d$ -th eigenvalue of the second matrix in the above sum. We show in [28] that this eigenvalue is positive if $\pmb{\Sigma}^{(0)}$ is positive definite, which can be easily enforced. The condition number is thus the ratio between the smallest positive eigenvalue of the second matrix of the sum and the largest eigenvalue of the component of the first matrix associated with $L_{*}^{\perp}$ . Therefore, $\kappa_{1}$ expresses a ratio between a quantifier of a $d$ -dimensional component of $\pmb{\Sigma}^{(0)}$ , associated with $L_{*}$ , and a quantifier of the projection onto $L_{*}^{\perp}$ of a full rank component of $\pmb{\Sigma}^{(0)}$ . + +We also define $\Sigma_{in,*}$ as the TME solution to the set of the projected inliers $\{\mathbf{U}_{L^*}\pmb {x}|\pmb {x}\in \mathcal{X}_{in}\} \subset \mathbb{R}^d$ and the following two condition numbers + +$$ +\kappa_ {2} = \frac {\sigma_ {1} \left(\boldsymbol {\Sigma} _ {L _ {*} ^ {\perp} , L _ {*} ^ {\perp}} ^ {(0)}\right)}{\sigma_ {D} \left(\boldsymbol {\Sigma} ^ {(0)}\right)} \text {a n d} \kappa_ {i n} = \frac {\sigma_ {1} \left(\boldsymbol {\Sigma} _ {i n , *}\right)}{\sigma_ {d} \left(\boldsymbol {\Sigma} _ {i n , *}\right)}. +$$ + +We note that $\kappa_{in}$ is analogous to the condition number in (25) of [32], where we replace the sample covariance by the TME estimator. An analog to the alignment of outliers statistic [27, 33] for STE is + +$$ +\mathcal {A} = \left\| \sum_ {\mathbf {x} \in \mathcal {X} _ {o u t}} \frac {\mathbf {x x} ^ {\top}}{\| \mathbf {U} _ {L _ {*} ^ {\perp}} \mathbf {x} \| ^ {2}} \right\|. +$$ + +An analog to the stability statistic [27, 33] for STE is + +$$ +\mathcal{S} = \sigma_{d + 1,D}\Bigl(\sum_{\mathbf{x}\in \mathcal{X}}\frac{\mathbf{x}\mathbf{x}^{\top}}{\| \mathbf{x}\|^{2}}\Bigr), +$$ + +where $\sigma_{d + 1,D}(\mathbf{X})$ was defined in (2). + +Assumption 3: There exists $C = C(\gamma, \mathrm{DS-SNR}) > 0$ such that + +$$ +\kappa_ {1} \geq C \frac {d \kappa_ {i n} \mathcal {A}}{n _ {1}} \left(\kappa_ {i n} + \frac {\mathcal {A}}{\frac {n _ {1}}{d} - \gamma \frac {n _ {0}}{D - d}} + \frac {\kappa_ {2} \mathcal {A}}{\gamma \mathcal {S}} (1 + \kappa_ {i n})\right). \tag {4} +$$ + +The exact technical requirement on $C$ is specified in [28]. In general, the larger the RHS of (4), the more restricted the choice of $\Sigma^{(0)}$ is. In particular, when $\kappa_1 = \infty$ , the definition of $\kappa_1$ implies that $\mathrm{Im}(\Sigma^{(0)}) = L_*$ , so the subspace is already recovered by the initial estimate. Therefore, reducing the lower bound of $\kappa_1$ may allow some flexibility, so a marginally suboptimal initialization could still work out. In [28], we show that under the asymptotic generalized haystack model, Assumption 3 can be interpreted as an upper bound on the largest principal angle between the initial and ground truth subspaces. + +Generic Theory: The next theorem suggests that under assumptions 1-3, STE nicely converges to an estimator that recovers $L_{*}$ . The main significance of this theory is that its assumptions can allow DS-SNR lower than 1 for special instances of datasets (for which the assumptions hold), unlike the general recovery theories of [16] and [50]. + +Theorem 1. Under assumptions 1-3, the sequence $\pmb{\Sigma}^{(k)}$ generated by STE converges to $\mathbf{U}_{L_{*}}\pmb{\Sigma}_{in,*}\mathbf{U}_{L_{*}}^{\top}$ , the TME solution for the set of inliers $\mathcal{X}_{in}$ . In addition, let $L^{(k)}$ be the subspace spanned by the top $d$ eigenvectors of $\pmb{\Sigma}^{(k)}$ , then the angle between $L^{(k)}$ and $L_{*}$ , $\angle (L^{(k)},L_{*}) = \cos^{-1}(\| \mathbf{U}_{L^{(k)}}^{\top}\mathbf{U}_{L_{*}}\|)$ , converges $r$ -linearly to zero. + +We discuss insights of this theory on choices of the algorithms and further verify the above stated advantage of STE over TME assuming a common probabilistic model. + +Choice of $\gamma$ for subspace recovery: In order to avoid too large lower bound for $\kappa_{1}$ in (4), which we motivated above, it is good to find $\epsilon_{1}$ and $\epsilon_{2} > 0$ , such that $\gamma$ lies in $(\epsilon_{1},\mathrm{DS - SNR} - \epsilon_{2})$ (to notice this, observe the terms involving $\gamma$ in the denominators of the last two additive terms in (4)). We thus note that if the DS-SNR is expected to be sufficiently larger than 1, we can use, e.g., $\gamma = 0.5$ , but when the DS-SNR can be close to 1 or lower (e.g., in fundamental matrix estimation), it is advisable to choose small values of $\gamma$ according to Algorithm 2 and their sizes may depend on the expected value of the DS-SNR. + +Possible ways of Initialization: If one expects an initial estimated subspace $\hat{L}$ to have a sufficiently small angle $\theta$ with $L_{*}$ , where $\theta = \angle (\hat{L}, L_{*})$ , then for $\boldsymbol{\Sigma}^{(0)} \coloneqq \Pi_{\hat{L}} + \epsilon \mathbf{I}$ it can be shown that $\kappa_{1} > O(1 / (\epsilon + \theta))$ and $\kappa_{2} < O(1 + \frac{\theta}{\epsilon})$ . Thus one may use a trusted RSR method, e.g., FMS. As discussed in §2.5, the choice $\boldsymbol{\Sigma}^{(0)} = \mathbf{I}$ (or a scaled version of it) corresponds to $\hat{L}$ being the + +PCA subspace (obtained at iteration 1). Also, using the TME solution for $\pmb{\Sigma}^{(0)}$ corresponds to using the TME subspace as $\hat{L}$ + +Theory under a probabilistic model: We show that under a common probabilistic model, the assumptions of Theorem 1, where $\pmb{\Sigma}^{(0)}$ is obtained by TME, hold. Moreover, we show that STE (initialized by TME) can recover the correct subspace in situations with DS-SNR $< 1$ , whereas TME cannot recover the underlying subspace in such cases. We follow [33] and study the Generalized Haystack Model, though for simplicity, we assume Gaussian instead of sub-Gaussian distributions and an asymptotic setting. We assume $n_1$ inliers i.i.d. sampled from a Gaussian distribution $N(0,\pmb{\Sigma}^{(in)} / d)$ , where $\pmb{\Sigma}^{(in)} \in S_{+}(D)$ and $L_{*} = \mathrm{Im}(\pmb{\Sigma}^{(in)})$ (so $\pmb{\Sigma}^{(in)}$ has $d$ nonzero eigenvalues), and $n_0$ outliers are i.i.d. sampled from a Gaussian distribution $N(0,\pmb{\Sigma}^{(out)} / D)$ , where $\pmb{\Sigma}^{(out)} / D \in S_{++}(D)$ . We define the following condition numbers of inliers (in $L_{*}$ ) and outliers: + +$$ +\kappa_ {i n} = \frac {\sigma_ {1} (\pmb {\Sigma} ^ {(i n)})}{\sigma_ {d} (\pmb {\Sigma} ^ {(i n)})} \mathrm {a n d} \kappa_ {o u t} = \frac {\sigma_ {1} (\pmb {\Sigma} ^ {(o u t)})}{\sigma_ {D} (\pmb {\Sigma} ^ {(o u t)})}. +$$ + +Clearly, Assumption 1 holds under this model, and Assumption 2 constrains some of its parameters. Our next theorem shows that Assumption 3 holds under this model when the initial estimate $\Sigma^{(out)}$ for STE is obtained by TME. It also shows that in this case STE can solve the RSR problem even when DS-SNR $< 1$ , unlike TME. For simplicity, we formulate the theory for the asymptotic case, where $N \to \infty$ and the theorem holds almost surely. It is possible to formulate it for a very large $N$ with high probability, but it requires stating complicated constants depending on various parameters. + +Theorem 2. Assume data generated from the above generalized haystack model. Assume further that for $0 < \mu < 1$ , which can be arbitrarily small, $d < (1 - \mu)D - 2$ . Then, for any chosen $0 < c_{0} < 1$ , which is a lower bound for $\gamma$ , there exists $\eta \coloneqq \eta (\kappa_{in},\kappa_{out},c_0,\mu) < 1$ such that if DS-SNR $\geq \eta$ and $\pmb{\Sigma}^{(0)}$ is obtained by TME, then Assumption 3 for $\pmb{\Sigma}^{(0)}$ is satisfied with $c_{0} < \gamma < \eta - c_{0}$ almost surely as $N \to \infty$ . Consequently, the output of the STE algorithm, initialized by TME and with the choice of $c_{0} < \gamma < \eta - c_{0}$ , recovers $L_{*}$ . On the other hand, if $\pmb{\Sigma}_{L_{*},L_{*}^{\perp}}^{(out)} \neq 0$ and DS-SNR $< 1$ , then the top $d$ eigenvectors of TME do not recover $L_{*}$ . + +There are three different regimes that the theorem covers. When DS-SNR $\geq 1$ , both TME+STE (i.e., STE initialized by TME) and TME solve the RSR problem. When $\eta \leq$ DSSNR $< 1$ , TME+STE solves the RSR problem and TME generally fails. When $\gamma \leq$ DS-SNR $< \eta$ , TME+STE might also fail, but STE with extremely good initialization (that satisfies Assumption 3) can still solve the problem. + +To get a basic idea of the dependence of $\eta$ on its parameters, we remark that $\eta \to 1$ if either $c_0 \to 0$ , $\kappa_{in} \to \infty$ , $\kappa_{out} \to \infty$ or $\mu \to 0$ , where the parameter $\mu$ is somewhat artificial and might be removed with a tighter proof. Therefore, successful + +performance of TME+STE requires a DS-SNR that is close to 1 when $\gamma$ is close to either 0 or $\eta$ (so that $c_{0}$ is very small) or when either the inlier or outlier distribution is highly non-symmetric, that is, either $\kappa_{in}$ or $\kappa_{out}$ is large. + +# 4. Applications to Structure from Motion + +We apply STE to problems relevant to SfM: robust estimation of fundamental matrices (see §4.1), and initial screening of undesirable cameras (see §4.2). + +# 4.1. Robust Fundamental Matrix Estimation + +Fundamental matrix estimation from noisy and inexact keypoint matches is a core computer vision problem. It provides a challenging setting for applying RSR methods. + +We review this setting as follows. Let $(\pmb{x},\pmb{x}^{\prime})\in \mathbb{R}^{3}\times \mathbb{R}^{3}$ be a correspondence pair of two points in different images that are projections of the same 3D point in the scene, where $\pmb{x}$ and $\pmb{x}^{\prime}$ are expressed by homogeneous coordinates of planar points. The fundamental matrix $\mathbf{F}\in \mathbb{R}^{3\times 3}$ relates these corresponding points and the epipolar lines they lie on as follows: $\pmb{x}^{\prime \top}\mathbf{F}\pmb{x} = 0$ [17], or equivalently, + +$$ +\operatorname {v e c} (\mathbf {F}) \cdot \operatorname {v e c} \left(\boldsymbol {x x} ^ {\prime \top}\right) = 0. \tag {5} +$$ + +where $\mathrm{vec}(\cdot)$ denotes the vectorized form of a matrix. Therefore, ideally, the set of all vectors in $\mathbb{R}^9$ of the form $\mathrm{vec}(\pmb{x}\pmb{x}'^\top)$ , where $(\pmb{x},\pmb{x}') \in \mathbb{R}^3 \times \mathbb{R}^3$ is a correspondence pair, lies on an 8-subspace in $\mathbb{R}^9$ and its orthogonal complement yields the fundamental matrix. In practice, the measurements of correspondence pairs can be highly corrupted due to poor matching. Moreover, some choices of correspondence pairs and the corruption mechanism may lead to concentration on low-dimensional subspaces within the desired 8-subspace. Furthermore, the corruption mechanism can lead to nontrivial settings of outliers. Lastly, since $d = 8$ and $D = 9$ , the theoretical threshold of [16] translates to having the fraction of inliers among all data points be at least $8/9 \approx 88.9\%$ . + +Therefore, this application is often a very challenging setting for direct RSR methods. The best performing RSR methods to date for fundamental matrix estimation are variants of RANSAC [10]. RANSAC avoids any subspace-modeling assumptions, but estimates the subspace based on testing myriads of samples, each having 7 or 8 point correspondences [17]. + +We test the performance of STE in estimating the fundamental matrix on the Photo Tourism database [41], where the image correspondences are obtained by SIFT feature similarities [30]. We compare STE with the following 3 top RSR performers according to [25]: FMS [24], spherical FMS (SFMS) [24] and TME [47, 50]. We also compared with vanilla RANSAC [10] and two of its specialized extensions, which are state-of-the-art performers for estimating fundamental matrices: locally optimized RANSAC (LO-RANSAC) [6] and degeneracy-check enabled RANSAC (DEGENSAC) [7]. For the RSR methods we used codes from the supplementary material of [25] + +![](images/953fa5e2e69cf2e35af18f9c70ab4200aff08d22e492096e141a8ffa13c5bacd.jpg) +Figure 1. Median (relative) rotation errors obtained by seven algorithms for the 14 datasets of Photo Tourism. + +with their default options. We further used the Python package pydegensac for implementing LO-RANSAC and DEGENSAC with the inlier threshold $\eta = 0.75$ . For STE, we used Algorithm 2 to estimate the best $\gamma$ from $\{(2i)^{-1}\}_{i=1}^{5}$ . + +We measure the accuracy of the results according to the median and mean errors of relative rotation and direction vectors directly obtained by the fundamental matrices for each method. For computing these errors, we compared with ground-truth values provided by [41, 48]. Figure 1 describes the result of the mean errors for relative rotation per dataset of Photo Tourism, where the other three errors and $\mathrm{mAA}(10^{\circ})$ are in the supplemental material. STE is significantly better than top RSR performers (TME, FMS and SFMS). Overall, it appears that STE performs better than vanilla RANSAC, except for the Ellis Island and Vienna Cathedral datasets, where RANSAC outperforms STE. STE is still competitive when compared with LO-RANSAC and DEGENSAC, except for Notre Dame and the latter two datasets. + +# 4.2. Initial Camera Removal for SfM + +We propose a novel application of RSR for SfM and test STE for this application. Even though our framework is not sufficiently practical at this point, it allows testing STE in a different setting where $N = D$ is very large and $d = 6$ . Our idea is to use RSR within the SfM pipeline right after estimating the fundamental matrices, in order to remove some cameras that result in inaccurate estimated fundamental matrices. The hope is that eventually such methods may reduce corruption and speed up the costly later computationally intensive stages of the global SfM pipeline. + +There are two main reasons to question such a process. One may first question the gain in improving accuracy. Indeed, since the rest of the pipeline already identifies corrupted pairwise measurements, this process may not improve accuracy and may even harm it as it removes whole cameras and not pairs of cameras. That is, it is possible that a camera, which results in bad pairwise measurement, also contributes to some other accurate pairwise estimates that can improve the overall accuracy. The second concern is in terms of speed. In general, + +the removal of cameras may result in higher or comparable speed. Indeed, the LUD global pipeline [36], which we follow, examines the parallel rigidity of the viewing graph and extracts the maximal parallel rigid subgraph. Thus earlier removal of cameras may worsen the parallel rigidity of the graph and increase the computation due to the need of finding a maximal parallel rigid subgraph. For example, [40] removes cameras in an earlier stage of the LUD pipeline, but results in higher computational cost than the LUD pipeline. Therefore, improvement of speed for the LUD pipeline by removing cameras is generally non-trivial. Moreover, currently we use scale factors obtained by first running LUD, so we do not get a real speed improvement. Nevertheless, the proposed method is insightful whenever it may indicate clear improvement in accuracy for a dataset, since one can then infer that the current pipeline is not effective enough in handling corrupted measurements, which can be easily recognized by a simple method. Furthermore, improvement in "speed" can be indicative of maintaining parallel rigidity. + +Our RSR formulation is based on a fundamental observation by Sengupta et al. [39] on the low-rank of the $n$ -view essential (or fundamental) matrix. The $n$ -view essential matrix $\mathbf{E}$ of size $3n \times 3n$ is formed by stacking all $\binom{n}{2}$ essential matrices, while being appropriately scaled. That is, the $ij$ -th block of $\mathbf{E}$ is the essential matrix for the $i$ -th and $j$ -th cameras, where each $\mathbf{E}_{ij}$ is scaled by a factor $\lambda_{ij}$ in accordance with the global coordinate system (see [20, 21, 39]). It was noticed in [39] that $\mathbf{E}$ has rank 6. Moreover, [39] characterized the set of $n$ -view essential matrices whose camera centers are not all collinear by the satisfaction of a few algebraic conditions, where the major one is $\mathrm{rank}(\mathbf{E}) = 6$ . Further explanation appears in [20]. + +We propose a straightforward application of RSR, utilizing these ideas to initially eliminate cameras that introduce significant corruption to the essential matrices. For this purpose, we compute the essential matrices (by computing first the fundamental matrices and then using the known camera calibration) and scale each matrix according to the factor obtained by the LUD pipeline [36] (note that this is the initial + +![](images/a2dd0f25f68613bf4d91113af58c600e73c8bbcd18228539c6ad0a04d559561c.jpg) +Figure 2. Mean (absolute) rotation errors (in degrees, left) and mean translation errors (in degrees, right) of LUD and four RSR methods used to initially screen bad cameras within LUD applied to the 14 datasets of Photo Tourism. + +![](images/f4c5770f5c5861e10f2f07e8c7ced3cebc2f423254ac4d0b257d8aabdf79219c.jpg) + +scaling applied in [20, 21, 39] before applying a non-convex and nontrivial optimization procedure that refines such scales). Using these appropriately scaled essential matrices, we form the $n$ -view essential matrix $\mathbf{E}$ of size $3n\times 3n$ . We denote the $3n\times 3$ column blocks of $\mathbf{E}$ by $\mathbf{E}_{:,1},\dots,\mathbf{E}_{:,n}$ (since $\mathbf{E}$ is symmetric they are the same as the row blocks transposed). We treat $\mathbf{E}$ as a data matrix with $D = N = 3n$ , where the columns of $\mathbf{E}$ are the data points. We apply RSR with $d = 6$ , recover a $d$ -dimensional robust subspace and identify the outlying columns whose distance is largest from this subspace. To avoid heuristic methods for the cutoff of outliers we assume a fixed percentage of $20\%$ outlying columns. If a column block, $\mathbf{E}_{:,i}$ contains an outlying column, we remove its corresponding camera $i$ . Consequently, a smaller percentage of cameras (about $10 - 15\%$ ) will be eliminated. + +We use the Photo Tourism database [41] with precomputed pairwise image correspondences provided by [39] (they were obtained by thresholding SIFT feature similarities). To compute scale factors for the essential matrices we use the output of the LUD pipeline [36] as follows (following an idea proposed in [39] for initializing these values): Given the essential matrix for cameras $i$ and $j$ computed at an early stage of our pipeline, $\mathbf{E}_{ij}$ , and the one obtained by the full LUD pipeline, $\mathbf{E}_{ij}^{\mathrm{LUD}}$ , the scaling factor is $\lambda_{ij} = \langle \mathbf{E}_{ij}, [\mathbf{E}_{ij}^{\mathrm{LUD}}] \rangle / \| [\mathbf{E}_{ij}^{\mathrm{LUD}}] \|_F^2$ . Since many values of $\mathbf{E}_{ij}$ are missing, we also apply matrix completion. + +We compare the LUD pipeline with the LUD pipeline combined with the filtering processes achieved by STE, FMS, SFMS, and TME. For STE we fix $\gamma = 1/3$ , though any other value we tried yielded the same result. We report both mean and median errors of rotations and translations and runtime of the standard LUD and the RSR+LUD methods with initial screening of cameras. Figure 2 shows the mean rotation and translation errors, where the rest of the figures and a summarizing table are in the supplementary material. In general, STE demonstrates slightly higher accuracy compared to other RSR methods. Improved accuracy is particularly notable when matrix completion is not utilized, as demonstrated in the supplementary material. We observe that LUD+STE generally improves the estimation of camera parameters (both rotations and translations) over LUD. + +The improvement of LUD+STE is noticeable in Roman Forum and Gendarmenmarkt. In the supplementary material we show further improvement for Gendarmenmarkt with the removal of $45\%$ outlying columns. While the resulting errors are still large, their improvement shows some potential in dealing with difficult SfM structure by initially removing cameras in a way that may help eliminate some scene ambiguities, which are prevalent in Gendarmenmarkt. In terms of runtime, both LUD+STE and LUD+SFMS demonstrate significant improvements, where LUD+SFMS is even faster than LUD+STE. While this does not yet imply faster handling of the datasets (as we use initial scaling factors obtained by LUD), it indicates the efficiency of the removal of outliers in maintaining parallel rigidity. + +# 5. Conclusions + +We introduce STE, a meticulously crafted adaptation of TME designed to address challenges within RSR. Theoretical guarantees demonstrate its ability to recover the true underlying subspace reliably, even with a smaller fraction of inliers compared to the well-known theoretical threshold. Under the generalized haystack model, we show that this initialization can be chosen as TME itself, leading to improved handling of a smaller fraction of inliers compared to TME. Our exploration extends to practical applications, where STE proves effective in two 3D vision tasks: robust fundamental matrix estimation and screening of bad cameras for improved SfM. + +Several avenues for future research include: $\bullet$ Exploring adaptations of other robust covariance estimation methods to RSR. $\bullet$ Studying effective initialization for STE both in theory and in practice. $\bullet$ In-depth theoretical exploration of the optimal choice of the parameter $\gamma$ . $\bullet$ Study of alternative ways of adapting TME to RSR problems. $\bullet$ Improving STE for fundamental matrix estimation following ideas similar to those in [7, 12, 37] for addressing challenging degeneracies. $\bullet$ Enhancing our initial idea of initial removal of bad cameras, specifically attempting to use it to rectify challenging scene ambiguities. $\bullet$ Testing our methods for SfM using more recent feature matching algorithms. + +# References + +[1] Larry P. Ammann. Robust singular value decompositions: A new approach to projection pursuit. Journal of the American Statistical Association, 88(422):pp. 505-514, 1993. 1 +[2] Daniel Barath, Jiri Matas, and Jana Noskova. MAGSAC: marginalizing sample consensus. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10197-10205, 2019. 2 +[3] Jian-Feng Cai, Emmanuel J. Candès, and Zuowei Shen. A singular value thresholding algorithm for matrix completion. SIAM Journal on optimization, 20(4):1956-1982, 2010. 17 +[4] Yeshwanth Cherapanamjeri, Prateek Jain, and Praneeth Netrapalli. Thresholding based outlier robust PCA. In Conference on Learning Theory, pages 593-628. PMLR, 2017. 1 +[5] Vartan Choulakian. $L_{1}$ -norm projection pursuit principal component analysis. Computational Statistics & Data Analysis, 50(6):1441-1451, 2006. 1 +[6] Ondrej Chum, Jií Matas, and Josef Kittler. Locally optimized RANSAC. In Pattern Recognition: 25th DAGM Symposium, Magdeburg, Germany, September 10-12, 2003. Proceedings 25, pages 236-243. Springer, 2003. 2, 6, 13 +[7] Ondrej Chum, Tomas Werner, and Jiri Matas. Two-view geometry estimation unaffected by a dominant plane. In 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05), pages 772-779. IEEE, 2005. 2, 6, 8, 13 +[8] Ilias Diakonikolas, Gautam Kamath, Daniel M. Kane, Jerry Li, Ankur Moitra, and Alistair Stewart. Robustly learning a gaussian: Getting optimal error, efficiently. In Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms, page 2683-2702, USA, 2018. Society for Industrial and Applied Mathematics. 1 +[9] Chris Ding, Ding Zhou, Xiaofeng He, and Hongyuan Zha. R1-PCA: rotational invariant $L_{1}$ -norm principal component analysis for robust subspace factorization. In ICML '06: Proceedings of the 23rd international conference on Machine learning, pages 281-288, New York, NY, USA, 2006. ACM. 1 +[10] Martin A. Fischler and Robert C. Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 1, 2, 6, 13 +[11] Gabriel Frahm and Uwe Jaekel. A generalization of Tyler's M-estimators to the case of incomplete data. Computational Statistics & Data Analysis, 54(2):374-393, 2010. 2 +[12] J-M Frahm and Marc Pollefeys. RANSAC for (quasi-) degenerate data (QDEGSAC). In 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06), pages 453-460. IEEE, 2006. 2, 8 +[13] William Cole Franks and Ankur Moitra. Rigorous guarantees for Tyler's M-estimator via quantum expansion. In Conference on Learning Theory, pages 1601–1632. PMLR, 2020. 3 +[14] Jerome H. Friedman and John W. Tukey. A projection pursuit algorithm for exploratory data analysis. IEEE Transactions on Computers, C-23(9):881-890, 1974. 1 +[15] John Goes, Gilad Lerman, and Boaz Nadler. Robust sparse covariance estimation by thresholding Tyler's M-estimator. The Annals of Statistics, 48(1):86 - 110, 2020. 3 + +[16] Moritz Hardt and Ankur Moitra. Algorithms and hardness for robust subspace recovery. In Conference on Learning Theory, pages 354-375. PMLR, 2013. 1, 2, 3, 4, 5, 6 +[17] Richard Hartley and Andrew Zisserman. Multiple view geometry in computer vision. Cambridge university press, 2003. 6, 11, 12, 13 +[18] Yuhe Jin, Dmytro Mishkin, Anastasiia Mishchuk, Jiri Matas, Pascal Fua, Kwang Moo Yi, and Eduard Trulls. Image matching across wide baselines: From paper to practice. International Journal of Computer Vision, 129(2):517-547, 2021. 13 +[19] Arman Karimian and Roberto Tron. Essential matrix estimation using convex relaxations in orthogonal space. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17142-17152, 2023. 15 +[20] Yoni Kasten, Amnon Geifman, Meirav Galun, and Ronen Basri. Algebraic characterization of essential matrices and their averaging in multiview settings. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5895-5903, 2019. 7, 8 +[21] Yoni Kasten, Amnon Geifman, Meirav Galun, and Ronen Basri. Gpsfm: Global projective sfm using algebraic constraints on multi-view fundamental matrices. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3264-3272, 2019. 7, 8 +[22] John T. Kent and David E. Tyler. Maximum likelihood estimation for the wrapped Cauchy distribution. Journal of Applied Statistics, 15(2):247-254, 1988. 2, 3 +[23] Nojun Kwak. Principal component analysis based on $L_{1}$ -norm maximization. IEEE transactions on pattern analysis and machine intelligence, 30(9):1672-1680, 2008. 1 +[24] Gilad Lerman and Tyler Maunu. Fast, robust and non-convex subspace recovery. Information and Inference: A Journal of the IMA, 7(2):277–336, 2018. 1, 2, 3, 4, 6, 13 +[25] Gilad Lerman and Tyler Maunu. An overview of robust subspace recovery. Proceedings of the IEEE, 106(8):1380-1410, 2018. 1, 3, 4, 5, 6 +[26] Gilad Lerman and Teng Zhang. $l_{p}$ -recovery of the most significant subspace among multiple subspaces with outliers. Constr. Approx., 40(3):329-385, 2014. 4 +[27] Gilad Lerman, Michael B. McCoy, Joel A. Tropp, and Teng Zhang. Robust computation of linear models by convex relaxation. Found. Comput. Math., 15(2):363-410, 2015. 1, 3, 5 +[28] Gilad Lerman, Feng Yu, and Teng Zhang. Theoretical guarantees for the subspace-constrained Tyler's estimator, 2024. 4, 5, 11 +[29] Guoying Li and Zhonglian Chen. Projection-pursuit approach to robust dispersion matrices and principal components: primary theory and monte carlo. Journal of the American Statistical Association, 80(391):759-766, 1985. 1 +[30] David G. Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60:91-110, 2004. 6 +[31] Ricardo A. Maronna and Víctor J. Yohai. Robust estimation of multivariate location and scatter. Wiley StatsRef: Statistics Reference Online, pages 1-12, 2014. 2 +[32] Tyler Maunu and Gilad Lerman. Robust subspace recovery with adversarial outliers, 2019. 1, 2, 4, 5 + +[33] Tyler Maunu, Teng Zhang, and Gilad Lerman. A well-tempered landscape for non-convex robust subspace recovery. J. Mach. Learn. Res., 20(1):1348–1406, 2019. 1, 3, 4, 5, 6 +[34] Tyler Maunu, Chenyu Yu, and Gilad Lerman. Stochastic and private nonconvex outlier-robust PCAs. In Proceedings of Mathematical and Scientific Machine Learning, pages 173–188. PMLR, 2022. 1 +[35] Michael McCoy and Joel A. Tropp. Two proposals for robust PCA using semidefinite programming. Electronic Journal of Statistics, 5(none):1123 - 1160, 2011. 1 +[36] Onur Ozyesil and Amit Singer. Robust camera location estimation by convex programming. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2674-2683, 2015. 7, 8 +[37] Rahul Raguram, Ondrej Chum, Marc Pollefeys, Jiri Matas, and Jan-Michael Frahm. USAC: A universal framework for random sample consensus. IEEE transactions on pattern analysis and machine intelligence, 35(8):2022-2038, 2012. 2, 8 +[38] Elvezio M. Ronchetti and Peter J. Huber. Robust statistics. John Wiley & Sons Hoboken, NJ, USA, 2009. 1 +[39] Soumyadip Sengupta, Tal Amir, Meirav Galun, Tom Goldstein, David W Jacobs, Amit Singer, and Ronen Basri. A new rank constraint on multi-view fundamental matrices, and its application to camera location recovery. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4798-4806, 2017. 7, 8, 16 +[40] Yunpeng Shi, Shaohan Li, Tyler Mauno, and Gilad Lerman. Scalable cluster-consistency statistics for robust multi-object matching. In International Conference on 3D Vision, 3DV 2021, London, United Kingdom, December 1-3, 2021, pages 352-360. IEEE, 2021. 7 +[41] Noah Snavely, Steven M. Seitz, and Richard Szeliski. Photo tourism: exploring photo collections in 3d. In ACM siggraph 2006 papers, pages 835-846. Association for Computing Machinery, 2006. 6, 7, 8 +[42] Nathan Srebro and Tommi Jaakkola. Weighted low-rank approximations. In Proceedings of the 20th international conference on machine learning (ICML-03), pages 720–727, 2003. 1 +[43] Jacob Steinhardt, Moses Charikar, and Gregory Valiant. Resilience: A criterion for learning in the presence of arbitrary outliers. In 9th Innovations in Theoretical Computer Science Conference, ITCS 2018, January 11-14, 2018, Cambridge, MA, USA, pages 45:1-45:21. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2018. 1 +[44] Ben Tordoff and David W. Murray. Guided sampling and consensus for motion estimation. In Computer Vision—ECCV 2002: 7th European Conference on Computer Vision Copenhagen, Denmark, May 28–31, 2002 Proceedings, Part I 7, pages 82–96. Springer, 2002. 15 +[45] Philip H.S. Torr and Andrew Zisserman. MLESAC: A new robust estimator with application to estimating image geometry. Computer vision and image understanding, 78(1):138-156, 2000. 2 +[46] David E. Tyler. Statistical analysis for the angular central gaussian distribution on the sphere. Biometrika, 74(3):579-589, 1987. 2 +[47] David E. Tyler. A distribution-free m-estimator of multivariate scatter. The Annals of Statistics, pages 234–251, 1987. 1, 2, 6, 13 + +[48] Kyle Wilson and Noah Snavely. Robust global translations with 1dsfm. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part III 13, pages 61-75. Springer, 2014. 7 +[49] Huan Xu, Constantine Caramanis, and Sujay Sanghavi. Robust PCA via outlier pursuit. Advances in neural information processing systems, 23, 2010. 1 +[50] Teng Zhang. Robust subspace recovery by Tyler's M-estimator. Information and Inference: A Journal of the IMA, 5(1):1-21, 2016. 1, 3, 4, 5, 6, 13 +[51] Teng Zhang and Gilad Lerman. A novel M-estimator for robust PCA. The Journal of Machine Learning Research, 15(1): 749–808, 2014. 1, 3, 5 \ No newline at end of file diff --git a/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/images.zip b/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..0aaa61e6077379f246768801dd36f610d6738204 --- /dev/null +++ b/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:191cba62e8e0bfb35b132d5991f47faa35823135dc9f4768b3be49f327098aa7 +size 220036 diff --git a/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/layout.json b/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..fe8fa0853cd9a8874424e5e3afaefa39ed22f841 --- /dev/null +++ b/2024/A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion/layout.json @@ -0,0 +1,14365 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 160, + 103, + 438, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 103, + 438, + 138 + ], + "spans": [ + { + "bbox": [ + 160, + 103, + 438, + 138 + ], + "type": "text", + "content": "A Subspace-Constrained Tyler's Estimator and its Applications to Structure from Motion *" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 90, + 160, + 204, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 160, + 204, + 204 + ], + "spans": [ + { + "bbox": [ + 90, + 160, + 204, + 204 + ], + "type": "text", + "content": "Feng Yu† \nUniversity of Minnesota \nfyu@umn.edu" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 230, + 161, + 364, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 161, + 364, + 203 + ], + "spans": [ + { + "bbox": [ + 230, + 161, + 364, + 203 + ], + "type": "text", + "content": "Teng Zhang \nUniversity of Central Florida \nteng.zhang@ucf.edu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 391, + 160, + 504, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 160, + 504, + 202 + ], + "spans": [ + { + "bbox": [ + 391, + 160, + 504, + 202 + ], + "type": "text", + "content": "Gilad Lerman ‡ \nUniversity of Minnesota \nlerman@umn.edu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 145, + 231, + 190, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 231, + 190, + 243 + ], + "spans": [ + { + "bbox": [ + 145, + 231, + 190, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 256, + 290, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 256, + 290, + 448 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 290, + 448 + ], + "type": "text", + "content": "We present the subspace-constrained Tyler's estimator (STE) designed for recovering a low-dimensional subspace within a dataset that may be highly corrupted with outliers. STE is a fusion of the Tyler's M-estimator (TME) and a variant of the fast median subspace. Our theoretical analysis suggests that, under a common inlier-outlier model, STE can effectively recover the underlying subspace, even when it contains a smaller fraction of inliers relative to other methods in the field of robust subspace recovery. We apply STE in the context of Structure from Motion (SfM) in two ways: for robust estimation of the fundamental matrix and for the removal of outlying cameras, enhancing the robustness of the SfM pipeline. Numerical experiments confirm the state-of-the-art performance of our method in these applications. This research makes significant contributions to the field of robust subspace recovery, particularly in the context of computer vision and 3D reconstruction." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 472, + 124, + 485 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 472, + 124, + 485 + ], + "spans": [ + { + "bbox": [ + 47, + 472, + 124, + 485 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 493, + 288, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 493, + 288, + 637 + ], + "spans": [ + { + "bbox": [ + 46, + 493, + 288, + 637 + ], + "type": "text", + "content": "In many applications, data has been collected in large quantities and dimensions. It is a common practice to represent such data within a low-dimensional subspace that preserves its essential information. Principal Component Analysis (PCA) is frequently employed to identify this subspace. However, PCA faces challenges when dealing with data contaminated by outliers. Consequently, the field of Robust Subspace Recovery (RSR) aims to develop a framework for outlier-robust PCA. RSR is particularly relevant to problems in computer vision, such as fundamental matrix estimation, which involves recovering a hidden subspace associated with \"good correspondence pairs\" among highly corrupted measurements." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 638, + 288, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 638, + 288, + 673 + ], + "spans": [ + { + "bbox": [ + 47, + 638, + 288, + 673 + ], + "type": "text", + "content": "Various algorithms have been proposed to address RSR, employing methods such as projection pursuit [1, 5, 14, 23, 29, 35, 38], subspace energy minimization (in particular least absolute" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 232, + 547, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 232, + 547, + 292 + ], + "spans": [ + { + "bbox": [ + 306, + 232, + 547, + 292 + ], + "type": "text", + "content": "deviations and its relaxations) [9, 24, 27, 33, 34, 42, 43, 51], robust covariance estimation [50], filtering-based methods [4, 8, 49] and exhaustive subspace search methods [10, 16]. An in-depth exploration and comprehensive overview of robust subspace recovery and its diverse algorithms can be found in [25]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "text", + "content": "Methods based on robust covariance estimators, such as the Tyler's M-estimator (TME), offer additional useful information on the shape of the data within the subspace, similarly to PCA in the non-robust setting. They also offer maximum-likelihood interpretation, which is missing in many other methods. Application of the TME [47] to RSR has been shown to be successful on basic benchmarks [25, 50]. Moreover, under a model of inliers in a general position on a subspace and outliers in general position in the complement of the subspace, TME was shown to recover the subspace within a desirable fraction of inliers [50]. Below this fraction it was proved to be Small Set Expansion (SSE) hard to solve the RSR problem [16]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 437, + 548, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 548, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 548, + 581 + ], + "type": "text", + "content": "One may still succeed with solving the RSR problem with a computationally efficient algorithm when the fraction of inliers is lower than the one required by [16], considering a more restricted data model or violating other assumptions made in [16]. For example, some special results in this direction are discussed in [32]. Also, [33] proposes the generalized haystack model of inliers and outliers to demonstrate the possibility of handling lower fractions of inliers by an RSR algorithm. This model extends the limited standard haystack model [27], where basic methods (such as PCA filtering) can easily work with low fractions of outliers. Nevertheless, it is unclear how practical the above theoretical ideas are for applied settings." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 581, + 547, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 581, + 547, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 581, + 547, + 700 + ], + "type": "text", + "content": "One practical setting that requires a fraction of inliers significantly lower than the one stated in [16] arises in the problem of robust fundamental (or essential) matrix estimation. The fundamental matrix encompasses the epipolar geometry of two views in stereo vision systems. It is typically computed using point correspondences between the two projected images. This computation requires finding an 8-dimensional subspace within a 9-dimensional ambient space. In this setting, the theoretical framework of [16] requires that the fraction of inliers be at least " + }, + { + "bbox": [ + 304, + 581, + 547, + 700 + ], + "type": "inline_equation", + "content": "8/9 \\approx 88.9\\%" + }, + { + "bbox": [ + 304, + 581, + 547, + 700 + ], + "type": "text", + "content": ", which is clearly unreasonable to require." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 318, + 701, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 701, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 318, + 701, + 547, + 714 + ], + "type": "text", + "content": "To date, the RANdom Sample Consensus (RANSAC)" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 683, + 280, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 683, + 280, + 693 + ], + "spans": [ + { + "bbox": [ + 58, + 683, + 280, + 693 + ], + "type": "text", + "content": "*This work was supported by NSF DMS awards 2124913 and 2318926." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 59, + 693, + 232, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 693, + 232, + 703 + ], + "spans": [ + { + "bbox": [ + 59, + 693, + 232, + 703 + ], + "type": "text", + "content": "† Supplementary code: https://github.com/alexfengg/STE" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 59, + 703, + 230, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 703, + 230, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 703, + 230, + 712 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 59, + 703, + 230, + 712 + ], + "type": "text", + "content": " Corresponding author. All authors equally contributed." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14575" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 251 + ], + "type": "text", + "content": "method [10] is the only RSR method that has been highly successful in addressing this nontrivial scenario, gaining widespread popularity in computer vision. RANSAC is an iterative method that randomly selects minimal subsets of the data and fits models, in particular subspaces, to identify the best consensus set, that is, the set in most agreement with the hypothesized model. There are numerous approaches proposed to improve RANSAC, especially for this particular application, including locally optimized RANSAC (LO-RANSAC, [6]), maximum likelihood estimator RANSAC (MLESAC) [45]), degeneracy-check enabled RANSAC (DEGENSAC) [7]) and M-estimator guided RANSAC (MAGSAC) [2]). A near recovery theory for a variant of RANSAC under some assumptions on the outliers was suggested in [32]. Nevertheless, in general, RANSAC is rather slow and its application to higher-dimensional problems is intractable." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 253, + 289, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 253, + 289, + 386 + ], + "spans": [ + { + "bbox": [ + 46, + 253, + 289, + 386 + ], + "type": "text", + "content": "This work introduces a novel RSR algorithm that is guaranteed to robustly handle a lower fraction of outliers than the theoretical threshold proposed by [16], under special settings. Our basic idea is to adapt Tyler's M-Estimator to utilize the information of the underlying " + }, + { + "bbox": [ + 46, + 253, + 289, + 386 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 253, + 289, + 386 + ], + "type": "text", + "content": "-dimensional subspace, while avoiding estimation of the full covariance. By using less degrees of freedom we obtain a more accurate subspace estimator than the one obtained by TME with improved computational complexity. We show that STE is a fusion of the Tyler's M-estimator (TME) and a variant of the fast median subspace (FMS) [24] that aims to minimize a subspace-based " + }, + { + "bbox": [ + 46, + 253, + 289, + 386 + ], + "type": "inline_equation", + "content": "\\ell_0" + }, + { + "bbox": [ + 46, + 253, + 289, + 386 + ], + "type": "text", + "content": " energy." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 387, + 289, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 387, + 289, + 531 + ], + "spans": [ + { + "bbox": [ + 46, + 387, + 289, + 531 + ], + "type": "text", + "content": "Our theory shows that our proposed subspace-constrained Tyler's estimator (STE) algorithm can effectively recover the underlying subspace, even when it contains a smaller fraction of inliers relative to other methods. We obtain this nontrivial achievement first in a generic setting, where we establish when an initial estimator for STE is sufficiently well-conditioned to guarantee the desired robustness of STE. We then assume the asymptotic generalized haystack model and show that under this model, TME itself is a well-conditioned initial estimator for STE, and that unlike TME, STE with this initialization can deal with a lower fraction of inliers than the theoretical threshold specified in [16]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 532, + 289, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 532, + 289, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 532, + 289, + 652 + ], + "type": "text", + "content": "We demonstrate competitive performance in robust fundamental matrix estimation, relying solely on subspace information without additional methods for handling degenerate scenarios, in contrast to [7, 12, 37]. We also propose a potential application of RSR for removing bad cameras in order to enhance the SfM pipeline and show competitive performance of STE. This is a completely new idea and it may require additional exploration to make it practical. Nevertheless, it offers a very different testbed where " + }, + { + "bbox": [ + 46, + 532, + 289, + 652 + ], + "type": "inline_equation", + "content": "N = D" + }, + { + "bbox": [ + 46, + 532, + 289, + 652 + ], + "type": "text", + "content": " is very large and RANSAC is generally intractable." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 653, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 289, + 715 + ], + "type": "text", + "content": "The rest of the paper is organized as follows: §2 introduces the STE framework, §3 establishes theoretical guarantees of STE, §4 applies STE to two different problems in SfM, demonstrating its competitive performance relative to existing algorithms, and §5 provides conclusions and future directions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 71, + 419, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 71, + 419, + 85 + ], + "spans": [ + { + "bbox": [ + 305, + 71, + 419, + 85 + ], + "type": "text", + "content": "2. The STE Algorithm" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 91, + 547, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 547, + 152 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 547, + 152 + ], + "type": "text", + "content": "We present our proposed STE. We review basic notation in §2.1 and Tyler's original estimator in §2.2. We describe our method in §2.3, its computational complexity in §2.4, its algorithmic choices in §2.5 and an interpretation for it as a fusion of TME and FMS with " + }, + { + "bbox": [ + 304, + 91, + 547, + 152 + ], + "type": "inline_equation", + "content": "p = 0" + }, + { + "bbox": [ + 304, + 91, + 547, + 152 + ], + "type": "text", + "content": " in §2.6." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 158, + 370, + 170 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 158, + 370, + 170 + ], + "spans": [ + { + "bbox": [ + 306, + 158, + 370, + 170 + ], + "type": "text", + "content": "2.1. Notation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "spans": [ + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": "We use bold upper and lower case letters for matrices and column vectors, respectively. Let " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_k" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " denote the identity matrix in " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{k\\times k}" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": ", where if " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " is obvious we just write " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{I}" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": ". For a matrix " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": ", we denote by " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\operatorname{tr}(\\mathbf{A})" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\operatorname{Im}(\\mathbf{A})" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " the trace and image (i.e., column space) of " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": ". We denote by " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "S_{+}(D)" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "S_{++}(D)" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " the sets of positive semidefinite and definite matrices in " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{D\\times D}" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": ", respectively. We denote by " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "O(D,d)" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " the set of semi-orthogonal " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "D\\times d" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " matrices, i.e., " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{U}\\in O(D,d)" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " if and only if " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{U}\\in \\mathbb{R}^{D\\times d}" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{U}^{\\top}\\mathbf{U} = \\mathbf{I}_{d}" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": ". We refer to linear " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": "-dimensional subspaces as " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": "-subspaces. For a " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": "-subspace " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": ", we denote by " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_L" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " the " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "D\\times D" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " matrix representing the orthogonal projector onto " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": ". We also arbitrarily fix " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{U}_L" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "O(D,d)" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{U}_L\\mathbf{U}_L^\\top = \\mathbf{P}_L" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " (such " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{U}_L" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " is determined up to right multiplication by an orthogonal matrix in " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "O(d,d)" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": "). Throughout the paper, " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = \\{\\pmb{x}_i\\}_{i = 1}^N\\subset \\mathbb{R}^D" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": " is assumed to be a given centered dataset, that is, " + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "inline_equation", + "content": "\\sum_{i = 1}^{N}\\pmb{x}_i = \\mathbf{0}" + }, + { + "bbox": [ + 304, + 175, + 547, + 358 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 363, + 539, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 363, + 539, + 376 + ], + "spans": [ + { + "bbox": [ + 305, + 363, + 539, + 376 + ], + "type": "text", + "content": "2.2. Tyler's Estimator and its Application to RSR" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 381, + 547, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 381, + 547, + 407 + ], + "spans": [ + { + "bbox": [ + 304, + 381, + 547, + 407 + ], + "type": "text", + "content": "Tyler's M-estimator (TME) [47] robustly estimates the covariance " + }, + { + "bbox": [ + 304, + 381, + 547, + 407 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{*}" + }, + { + "bbox": [ + 304, + 381, + 547, + 407 + ], + "type": "text", + "content": " of the dataset " + }, + { + "bbox": [ + 304, + 381, + 547, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = \\{\\pmb{x}_i\\}_{i=1}^N \\subset \\mathbb{R}^D" + }, + { + "bbox": [ + 304, + 381, + 547, + 407 + ], + "type": "text", + "content": " by minimizing" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 357, + 413, + 547, + 445 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 413, + 547, + 445 + ], + "spans": [ + { + "bbox": [ + 357, + 413, + 547, + 445 + ], + "type": "interline_equation", + "content": "\\frac {D}{N} \\sum_ {i = 1} ^ {N} \\log \\left(\\boldsymbol {x} _ {i} ^ {\\top} \\boldsymbol {\\Sigma} ^ {- 1} \\boldsymbol {x} _ {i}\\right) + \\operatorname {l o g d e t} (\\boldsymbol {\\Sigma}) \\tag {1}", + "image_path": "039e0d9d228a12b8da067ea2c135b99286426e97c9e36c71ac648c08eaff2537.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "spans": [ + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "text", + "content": "over " + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma} \\in S_{++}(D)" + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "inline_equation", + "content": "\\mathrm{tr}(\\pmb{\\Sigma}) = 1" + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "text", + "content": ". The cost function in (1) can be motivated by writing the maximum likelihood of the multivariate " + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "text", + "content": "-distribution and letting its degrees of freedom parameter, " + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "inline_equation", + "content": "\\nu" + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "text", + "content": ", approach zero [31]. This cost function is invariant to dilations of " + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}" + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "text", + "content": ", and the constraint on " + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "inline_equation", + "content": "\\mathrm{tr}(\\pmb{\\Sigma})" + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "text", + "content": ", whose value can be arbitrarily chosen, fixes a scale. TME also applies to scenarios where the covariance matrix does not exist. In such cases, TME estimates the shape (or scatter matrix) of the distribution, which is defined up to an arbitrary scale. More direct interpretations of TME as a maximum likelihood estimator can be found in [11, 46]. When " + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "text", + "content": " is fixed and " + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 451, + 547, + 618 + ], + "type": "text", + "content": " approaches infinity, TME is the \"most robust\" estimator of the shape matrix for data i.i.d. sampled from a continuous elliptical distribution [47] in a minimax sense, that is, as a minimizer of the maximal variance." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 619, + 547, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 619, + 547, + 643 + ], + "spans": [ + { + "bbox": [ + 304, + 619, + 547, + 643 + ], + "type": "text", + "content": "Tyler [47] proposed the following iterative formula for computing TME:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 310, + 649, + 541, + 682 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 649, + 541, + 682 + ], + "spans": [ + { + "bbox": [ + 310, + 649, + 541, + 682 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\Sigma} ^ {(k)} = \\sum_ {i = 1} ^ {N} \\frac {\\boldsymbol {x} _ {i} \\boldsymbol {x} _ {i} ^ {\\top}}{\\boldsymbol {x} _ {i} ^ {\\top} (\\boldsymbol {\\Sigma} ^ {(k - 1)}) ^ {- 1} \\boldsymbol {x} _ {i}} / \\operatorname {t r} \\left(\\sum_ {i = 1} ^ {N} \\frac {\\boldsymbol {x} _ {i} \\boldsymbol {x} _ {i} ^ {\\top}}{\\boldsymbol {x} _ {i} ^ {\\top} (\\boldsymbol {\\Sigma} ^ {(k - 1)}) ^ {- 1} \\boldsymbol {x} _ {i}}\\right).", + "image_path": "e83fd51a2e483b8654d53f158212e3d9bc2956c035b50267d39461c981009b3e.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "text", + "content": "Kent and Tyler [22] proved that if any " + }, + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "text", + "content": "-subspace of " + }, + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^D" + }, + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "inline_equation", + "content": "1 \\leq d \\leq D - 1" + }, + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "text", + "content": ", contains fewer than " + }, + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "inline_equation", + "content": "Nd / D" + }, + { + "bbox": [ + 305, + 689, + 547, + 714 + ], + "type": "text", + "content": " data points, then" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "14576" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "type": "text", + "content": "the above iterative procedure converges to TME. Linear rate of convergence was proved for the regularized TME in [15] and for TME in [13]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "spans": [ + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "text", + "content": "One can apply the TME estimator to solve the RSR problem with a given dimension " + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "text", + "content": " by forming the subspace spanned by the top " + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "text", + "content": " eigenvectors of TME. Zhang [50] proved that as long as there are more than " + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "inline_equation", + "content": "Nd / D" + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "text", + "content": " inliers lying on a subspace, and the projected coordinates of these inliers on the " + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "text", + "content": "-subspace and the projected coordinates of the outliers on the " + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "inline_equation", + "content": "(D - d)" + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "text", + "content": "-dimensional orthogonal complement of the subspace are in general position, then TME recovers this subspace. Zhang [50] also showed that in this setting the above iterative formula converges (note that the condition of convergence in [22] does not apply in this case). The above lower bound of " + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "inline_equation", + "content": "Nd / D" + }, + { + "bbox": [ + 46, + 109, + 287, + 275 + ], + "type": "text", + "content": " on the number of inliers coincides with the general bound for the noiseless RSR problem, beyond which the problem becomes Small Set-Expansion (SSE) hard [16]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 275, + 288, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 275, + 288, + 348 + ], + "spans": [ + { + "bbox": [ + 46, + 275, + 288, + 348 + ], + "type": "text", + "content": "Numerical experiments in [50] and [25] indicated state-of-the-art accuracy of TME compared to other RSR algorithms in various settings. The computational complexity of TME is of order " + }, + { + "bbox": [ + 46, + 275, + 288, + 348 + ], + "type": "inline_equation", + "content": "O(K(ND^2 + D^3))" + }, + { + "bbox": [ + 46, + 275, + 288, + 348 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 275, + 288, + 348 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 275, + 288, + 348 + ], + "type": "text", + "content": " is the number of iterations. On the other hand, the cost of faster RSR algorithms is of order " + }, + { + "bbox": [ + 46, + 275, + 288, + 348 + ], + "type": "inline_equation", + "content": "O(KNDd)" + }, + { + "bbox": [ + 46, + 275, + 288, + 348 + ], + "type": "text", + "content": " [24, 25, 33]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 355, + 238, + 367 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 355, + 238, + 367 + ], + "spans": [ + { + "bbox": [ + 47, + 355, + 238, + 367 + ], + "type": "text", + "content": "2.3. Motivation and Formulation of STE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 374, + 287, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 374, + 287, + 457 + ], + "spans": [ + { + "bbox": [ + 46, + 374, + 287, + 457 + ], + "type": "text", + "content": "We aim to use more cleverly the " + }, + { + "bbox": [ + 46, + 374, + 287, + 457 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 374, + 287, + 457 + ], + "type": "text", + "content": "-subspace information within the TME framework to form an RSR algorithm, instead of first estimating the full covariance. By using less degrees of freedom we can obtain a more accurate subspace estimator, especially when the fraction of outliers can be large. Furthermore, our idea allows us to improve the computational cost to become state-of-the-art for high-dimensional settings." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "text", + "content": "Many RSR algorithms can be formulated as minimizing a best orthogonal projector onto a " + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "text", + "content": "-subspace [24, 25, 27, 33, 51]. We are going to do something similar, but unlike using an orthogonal projector, we will still use information from TME to get the shape of the data on the projected subspace. We will make the rest of the eigenvalues (i.e., bottom " + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "inline_equation", + "content": "D - d" + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "text", + "content": " ones) equal and shrink them by a parameter " + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "inline_equation", + "content": "0 < \\gamma < 1" + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "text", + "content": ". We thus use a regularized version of a reduced-dimension covariance matrix. This parameter " + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "text", + "content": " plays a role in our theoretical estimates. Making " + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "text", + "content": " smaller helps with better subspace recovery, whereas making " + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 459, + 287, + 589 + ], + "type": "text", + "content": " bigger enhances the well-conditioning of the estimator." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 590, + 287, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 590, + 287, + 661 + ], + "spans": [ + { + "bbox": [ + 46, + 590, + 287, + 661 + ], + "type": "text", + "content": "Following these basic ideas, we formulate our method, STE. For simplicity, we utilize covariance matrices and their inverses. Since these covariance matrices are essentially " + }, + { + "bbox": [ + 46, + 590, + 287, + 661 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 590, + 287, + 661 + ], + "type": "text", + "content": "-dimensional and include an additional simple regularizing component, our overall computations can be expressed in terms of the computation of the top " + }, + { + "bbox": [ + 46, + 590, + 287, + 661 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 590, + 287, + 661 + ], + "type": "text", + "content": " singular values of an " + }, + { + "bbox": [ + 46, + 590, + 287, + 661 + ], + "type": "inline_equation", + "content": "N \\times D" + }, + { + "bbox": [ + 46, + 590, + 287, + 661 + ], + "type": "text", + "content": " matrix (see §2.4)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 59, + 662, + 268, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 662, + 268, + 673 + ], + "spans": [ + { + "bbox": [ + 59, + 662, + 268, + 673 + ], + "type": "text", + "content": "At iteration " + }, + { + "bbox": [ + 59, + 662, + 268, + 673 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 59, + 662, + 268, + 673 + ], + "type": "text", + "content": " we follow a similar step to that of TME:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 90, + 683, + 242, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 683, + 242, + 715 + ], + "spans": [ + { + "bbox": [ + 90, + 683, + 242, + 715 + ], + "type": "interline_equation", + "content": "\\mathbf {Z} ^ {(k)} := \\sum_ {i = 1} ^ {N} \\boldsymbol {x} _ {i} \\boldsymbol {x} _ {i} ^ {\\top} / (\\boldsymbol {x} _ {i} ^ {\\top} (\\boldsymbol {\\Sigma} ^ {(k - 1)}) ^ {- 1} \\boldsymbol {x} _ {i}).", + "image_path": "8b8e2ceea755078a4354d41003d434cf689117f59b7f268ab9733ae695275755.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 72, + 545, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 97 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 97 + ], + "type": "text", + "content": "We compute the eigenvalues " + }, + { + "bbox": [ + 305, + 72, + 545, + 97 + ], + "type": "inline_equation", + "content": "\\{\\sigma_i\\}_{i = 1}^D" + }, + { + "bbox": [ + 305, + 72, + 545, + 97 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 305, + 72, + 545, + 97 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}^{(k)}" + }, + { + "bbox": [ + 305, + 72, + 545, + 97 + ], + "type": "text", + "content": " and replace each of the bottom " + }, + { + "bbox": [ + 305, + 72, + 545, + 97 + ], + "type": "inline_equation", + "content": "(D - d)" + }, + { + "bbox": [ + 305, + 72, + 545, + 97 + ], + "type": "text", + "content": " of them with " + }, + { + "bbox": [ + 305, + 72, + 545, + 97 + ], + "type": "inline_equation", + "content": "\\gamma \\cdot \\sigma_{d + 1,D}" + }, + { + "bbox": [ + 305, + 72, + 545, + 97 + ], + "type": "text", + "content": ", where" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 375, + 105, + 545, + 137 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 105, + 545, + 137 + ], + "spans": [ + { + "bbox": [ + 375, + 105, + 545, + 137 + ], + "type": "interline_equation", + "content": "\\sigma_ {d + 1, D} := \\frac {1}{D - d} \\sum_ {i = d + 1} ^ {D} \\sigma_ {i}. \\tag {2}", + "image_path": "fc4e26c13d114b60bb0ed60cb0791b7a3817d9884559e3ca32208f9a0404e78c.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 148, + 545, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 148, + 545, + 220 + ], + "spans": [ + { + "bbox": [ + 304, + 148, + 545, + 220 + ], + "type": "text", + "content": "We also compute the eigenvectors of " + }, + { + "bbox": [ + 304, + 148, + 545, + 220 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}^{(k)}" + }, + { + "bbox": [ + 304, + 148, + 545, + 220 + ], + "type": "text", + "content": " and form the matrix " + }, + { + "bbox": [ + 304, + 148, + 545, + 220 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}^{(k)}" + }, + { + "bbox": [ + 304, + 148, + 545, + 220 + ], + "type": "text", + "content": " with the same eigenvectors as those of " + }, + { + "bbox": [ + 304, + 148, + 545, + 220 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}^{(k)}" + }, + { + "bbox": [ + 304, + 148, + 545, + 220 + ], + "type": "text", + "content": " and the modified eigenvalues, scaled to have trace 1. We iteratively repeat this procedure until the two estimators are sufficiently close. Algorithm 1 summarizes this procedure. Note that it is invariant to scaling of the data, similarly to TME." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 231, + 534, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 231, + 534, + 243 + ], + "spans": [ + { + "bbox": [ + 306, + 231, + 534, + 243 + ], + "type": "text", + "content": "Algorithm 1 STE: Subspace-Constrained Tyler's Estimator" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 247, + 547, + 422 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 312, + 247, + 547, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 247, + 547, + 283 + ], + "spans": [ + { + "bbox": [ + 312, + 247, + 547, + 283 + ], + "type": "text", + "content": "1: Input: " + }, + { + "bbox": [ + 312, + 247, + 547, + 283 + ], + "type": "inline_equation", + "content": "\\mathbf{X} = [\\pmb{x}_1, \\dots, \\pmb{x}_N] \\in \\mathbb{R}^{D \\times N}" + }, + { + "bbox": [ + 312, + 247, + 547, + 283 + ], + "type": "text", + "content": ": centered data matrix, " + }, + { + "bbox": [ + 312, + 247, + 547, + 283 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 312, + 247, + 547, + 283 + ], + "type": "text", + "content": ": subspace dimension, " + }, + { + "bbox": [ + 312, + 247, + 547, + 283 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 312, + 247, + 547, + 283 + ], + "type": "text", + "content": ": maximum number of iterations, " + }, + { + "bbox": [ + 312, + 247, + 547, + 283 + ], + "type": "inline_equation", + "content": "\\tau, \\gamma" + }, + { + "bbox": [ + 312, + 247, + 547, + 283 + ], + "type": "text", + "content": ": parameters." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 312, + 284, + 440, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 284, + 440, + 295 + ], + "spans": [ + { + "bbox": [ + 312, + 284, + 440, + 295 + ], + "type": "text", + "content": "2: Output: " + }, + { + "bbox": [ + 312, + 284, + 440, + 295 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 312, + 284, + 440, + 295 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 312, + 284, + 440, + 295 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 312, + 284, + 440, + 295 + ], + "type": "text", + "content": "-subspace in " + }, + { + "bbox": [ + 312, + 284, + 440, + 295 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^D" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 312, + 295, + 378, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 295, + 378, + 308 + ], + "spans": [ + { + "bbox": [ + 312, + 295, + 378, + 308 + ], + "type": "text", + "content": "3: " + }, + { + "bbox": [ + 312, + 295, + 378, + 308 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(0)} = \\mathbf{I}_D / D" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 308, + 390, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 308, + 390, + 319 + ], + "spans": [ + { + "bbox": [ + 312, + 308, + 390, + 319 + ], + "type": "text", + "content": "4: for " + }, + { + "bbox": [ + 312, + 308, + 390, + 319 + ], + "type": "inline_equation", + "content": "k = 1,2,\\ldots" + }, + { + "bbox": [ + 312, + 308, + 390, + 319 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 319, + 490, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 319, + 490, + 332 + ], + "spans": [ + { + "bbox": [ + 312, + 319, + 490, + 332 + ], + "type": "text", + "content": "5: " + }, + { + "bbox": [ + 312, + 319, + 490, + 332 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}^{(k)}\\gets \\sum_{i = 1}^{N}\\pmb {x}_i\\pmb {x}_i^\\top /\\left(\\pmb {x}_i^\\top (\\pmb{\\Sigma}^{(k - 1)})^{-1}\\pmb {x}_i\\right)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 331, + 456, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 331, + 456, + 344 + ], + "spans": [ + { + "bbox": [ + 312, + 331, + 456, + 344 + ], + "type": "text", + "content": "6: " + }, + { + "bbox": [ + 312, + 331, + 456, + 344 + ], + "type": "inline_equation", + "content": "[\\mathbf{U}^{(k)},\\mathbf{S}^{(k)},\\bar{\\mathbf{U}}^{(k)}]\\gets \\mathrm{EVD}(\\mathbf{Z}^{(k)})" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 343, + 517, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 343, + 517, + 357 + ], + "spans": [ + { + "bbox": [ + 312, + 343, + 517, + 357 + ], + "type": "text", + "content": "7: " + }, + { + "bbox": [ + 312, + 343, + 517, + 357 + ], + "type": "inline_equation", + "content": "\\sigma_{i}\\gets [\\mathbf{S}^{(k)}]_{ii}" + }, + { + "bbox": [ + 312, + 343, + 517, + 357 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 312, + 343, + 517, + 357 + ], + "type": "inline_equation", + "content": "\\sigma_{d + 1,D}\\leftarrow \\sum_{i = d + 1}^{D}\\sigma_{i} / (D - d)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 357, + 513, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 357, + 513, + 372 + ], + "spans": [ + { + "bbox": [ + 312, + 357, + 513, + 372 + ], + "type": "text", + "content": "8: " + }, + { + "bbox": [ + 312, + 357, + 513, + 372 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathbf{S}}^{(k)}\\gets \\mathrm{diag}(\\sigma_1,\\dots ,\\sigma_d,\\gamma \\cdot \\sigma_{d + 1,D},\\dots ,\\gamma \\cdot \\sigma_{d + 1,D})," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 372, + 523, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 372, + 523, + 386 + ], + "spans": [ + { + "bbox": [ + 312, + 372, + 523, + 386 + ], + "type": "text", + "content": "9: " + }, + { + "bbox": [ + 312, + 372, + 523, + 386 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(k)}\\gets \\mathbf{U}^{(k)}\\widetilde{\\mathbf{S}}^{(k)}(\\mathbf{U}^{(k)})^{\\top} / \\mathrm{tr}\\bigl (\\mathbf{U}^{(k)}\\widetilde{\\mathbf{S}}^{(k)}(\\mathbf{U}^{(k)})^{\\top}\\bigr)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 309, + 386, + 486, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 386, + 486, + 399 + ], + "spans": [ + { + "bbox": [ + 309, + 386, + 486, + 399 + ], + "type": "text", + "content": "10: Stop if " + }, + { + "bbox": [ + 309, + 386, + 486, + 399 + ], + "type": "inline_equation", + "content": "k \\geq K" + }, + { + "bbox": [ + 309, + 386, + 486, + 399 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 309, + 386, + 486, + 399 + ], + "type": "inline_equation", + "content": "\\| \\pmb{\\Sigma}^{(k)} - \\pmb{\\Sigma}^{(k-1)} \\|_F < \\tau" + }, + { + "bbox": [ + 309, + 386, + 486, + 399 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 309, + 399, + 355, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 399, + 355, + 409 + ], + "spans": [ + { + "bbox": [ + 309, + 399, + 355, + 409 + ], + "type": "text", + "content": "11: end for" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 309, + 410, + 476, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 410, + 476, + 422 + ], + "spans": [ + { + "bbox": [ + 309, + 410, + 476, + 422 + ], + "type": "text", + "content": "12: " + }, + { + "bbox": [ + 309, + 410, + 476, + 422 + ], + "type": "inline_equation", + "content": "L = \\operatorname{Span}" + }, + { + "bbox": [ + 309, + 410, + 476, + 422 + ], + "type": "text", + "content": " of the first " + }, + { + "bbox": [ + 309, + 410, + 476, + 422 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 309, + 410, + 476, + 422 + ], + "type": "text", + "content": " columns of " + }, + { + "bbox": [ + 309, + 410, + 476, + 422 + ], + "type": "inline_equation", + "content": "\\mathbf{U}^{(k)}" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 445, + 455, + 458 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 445, + 455, + 458 + ], + "spans": [ + { + "bbox": [ + 306, + 445, + 455, + 458 + ], + "type": "text", + "content": "2.4. Computational Complexity" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "spans": [ + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": "Setting " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "w_{i}^{(k)} = (\\pmb{x}_{i}^{\\top}(\\pmb{\\Sigma}^{(k - 1)})^{-1}\\pmb{x}_{i})^{-1}" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": ", we can express " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}^{(k)}" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}^{(k)} = \\widetilde{\\mathbf{X}}\\widetilde{\\mathbf{X}}^{\\top}" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathbf{X}} = [(w_1^{(k)})^{1 / 2}\\pmb{x}_1,\\dots,(w_N^{(k)})^{1 / 2}\\pmb{x}_N]" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": ". With some abuse of notation we denote by " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "\\sigma_{1},\\ldots ,\\sigma_{D}" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": " the eigenvalues of " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(k - 1)}" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": " (and not " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(k)}" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": "). Since they are scaled to have trace 1, " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "\\sigma_{d + 1,D} = (1 - \\sum_{j = 1}^{d}\\sigma_{j}) / (D - d)" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": ". We thus only need the top " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": " eigenvectors and top " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": " eigenvalues of " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(k - 1)}" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": " to update " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "\\widetilde{w}_i^{(k)}" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": ". Therefore, the complexity of STE can be of order " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "O(KNDd)" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": " if a special fast algorithm is utilized for computing only the top " + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 463, + 545, + 580 + ], + "type": "text", + "content": " eigenvectors." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 306, + 587, + 438, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 587, + 438, + 600 + ], + "spans": [ + { + "bbox": [ + 306, + 587, + 438, + 600 + ], + "type": "text", + "content": "2.5. Implementation Details" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 304, + 605, + 545, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 641 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 641 + ], + "type": "text", + "content": "STE depends on the parameters " + }, + { + "bbox": [ + 304, + 605, + 545, + 641 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 605, + 545, + 641 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 605, + 545, + 641 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 605, + 545, + 641 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 605, + 545, + 641 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 605, + 545, + 641 + ], + "type": "text", + "content": " and the initialization of " + }, + { + "bbox": [ + 304, + 605, + 545, + 641 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(0)}" + }, + { + "bbox": [ + 304, + 605, + 545, + 641 + ], + "type": "text", + "content": ". The first two parameters are rather standard in iterative procedures and do not raise any concern." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "Our theory sheds some light on possible choices of " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": " and in particular it indicates that the algorithm can be more sensitive to choices of " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": " when the quantity defined later in (3) is relatively small. In this case, it may be beneficial to try several values of " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": ". We propose here a constructive way of doing it. We first form a sequence of " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "0 < \\gamma \\leq 1" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": ", e.g., " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\gamma_{k} = 1 / k, k = 1,\\dots,m" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": ". In order to" + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14577" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": "determine the best choice of " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": ", we compute the distance of each data point " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": " to each subspace " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "L_{k}" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": ", corresponding to the choice of " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\gamma_{k}" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\mathrm{dist}(\\mathbf{x}, L_{k}) = \\| \\mathbf{x} - \\mathbf{P}_{L_{k}} \\mathbf{x} \\|" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": ". We set a threshold " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": ", obtained by the median among all points and all subspaces and for each subspace, " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "L_{k}" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": ", we count the number of the inliers with distance below this threshold. The best " + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\gamma_{k}" + }, + { + "bbox": [ + 46, + 72, + 289, + 167 + ], + "type": "text", + "content": " is determined according to the subspace yielding the largest number of inliers. We describe this procedure in Algorithm 2." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 168, + 287, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 168, + 287, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 168, + 287, + 251 + ], + "type": "text", + "content": "For simplicity, we initialize with " + }, + { + "bbox": [ + 46, + 168, + 287, + 251 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}^{(0)} = \\mathbf{I}_D / D" + }, + { + "bbox": [ + 46, + 168, + 287, + 251 + ], + "type": "text", + "content": " and note that with this choice " + }, + { + "bbox": [ + 46, + 168, + 287, + 251 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}^{(1)}" + }, + { + "bbox": [ + 46, + 168, + 287, + 251 + ], + "type": "text", + "content": " reflects the trimmed covariance matrix and thus reflects the PCA subspace. One can also initialize with TME or other subspaces (see §3 where the theory of STE is discussed). One can further try several initialization (with possible random components) and use a strategy similar to Algorithm 2 to choose the best one." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 251, + 287, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 287, + 289 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 287, + 289 + ], + "type": "text", + "content": "At last, we remark that when computing " + }, + { + "bbox": [ + 47, + 251, + 287, + 289 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}^{(k)}" + }, + { + "bbox": [ + 47, + 251, + 287, + 289 + ], + "type": "text", + "content": " we want to ensure that " + }, + { + "bbox": [ + 47, + 251, + 287, + 289 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i^\\top (\\pmb{\\Sigma}^{(k-1)})^{-1}\\pmb{x}_i" + }, + { + "bbox": [ + 47, + 251, + 287, + 289 + ], + "type": "text", + "content": " cannot be zero and we thus add the arbitrarily small number " + }, + { + "bbox": [ + 47, + 251, + 287, + 289 + ], + "type": "inline_equation", + "content": "10^{-15}" + }, + { + "bbox": [ + 47, + 251, + 287, + 289 + ], + "type": "text", + "content": " to this value." + } + ] + } + ], + "index": 2 + }, + { + "type": "code", + "bbox": [ + 53, + 317, + 288, + 439 + ], + "blocks": [ + { + "bbox": [ + 48, + 300, + 201, + 312 + ], + "lines": [ + { + "bbox": [ + 48, + 300, + 201, + 312 + ], + "spans": [ + { + "bbox": [ + 48, + 300, + 201, + 312 + ], + "type": "text", + "content": "Algorithm 2 Estimating best " + }, + { + "bbox": [ + 48, + 300, + 201, + 312 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 48, + 300, + 201, + 312 + ], + "type": "text", + "content": " for STE" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "lines": [ + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "spans": [ + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": "1: Input: " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "\\mathbf{X} = [\\pmb{x}_1, \\dots, \\pmb{x}_N] \\in \\mathbb{R}^{D \\times N}" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": ": centered data matrix, " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": ": subspace dimension, " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "\\{\\gamma_1, \\dots, \\gamma_m\\}" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": ": a set of pre-selected " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": "'s. \n2: Output: " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "\\gamma^*" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": ": optimal " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": " among " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "\\{\\gamma_1, \\dots, \\gamma_m\\}" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": " \n3: for " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "j = 1, 2, \\dots, m" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": " do \n4: " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "L^{(j)} \\gets \\mathrm{STE}(\\mathbf{X}, d, \\gamma_j)" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": " \n5: " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "\\mathcal{D}^{(j)} \\gets \\{\\mathrm{dist}(\\pmb{x}_i, L^{(j)}) | \\pmb{x}_i \\in \\mathcal{X}\\}" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": ". \n6: end for \n7: Set " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "\\zeta = \\mathrm{median}(\\{\\mathcal{D}^{(1)}, \\dots, \\mathcal{D}^{(m)}\\})" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": " \n8: " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "j^* = \\operatorname{argmax}_{1 \\leq j \\leq m} |\\mathcal{D}^{(j)}| < \\zeta" + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "text", + "content": " \n9: " + }, + { + "bbox": [ + 53, + 317, + 288, + 439 + ], + "type": "inline_equation", + "content": "\\gamma^* = \\gamma_j^*" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "algorithm" + }, + { + "bbox": [ + 47, + 455, + 249, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 455, + 249, + 467 + ], + "spans": [ + { + "bbox": [ + 47, + 455, + 249, + 467 + ], + "type": "text", + "content": "2.6. STE fuses TME and a Variant of FMS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "text", + "content": "STE is formally similar to both TME and FMS. Indeed, at each iteration these algorithms essentially compute " + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}^{(k + 1)} = \\sum_{i = 1}^{N}w_{i}\\pmb{x}_{i}\\pmb{x}_{i}^{\\top}" + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "inline_equation", + "content": "w_{i}\\equiv w_{i}\\bigl (\\boldsymbol{\\Sigma}^{(k)}\\bigr)" + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "text", + "content": ". We summarize the formal weights for FMS (with any choice of " + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "text", + "content": " for minimizing an " + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "text", + "content": " energy in [24]), TME and STE. We ignore an additional scaling constant for TME and STE, obtained by dividing " + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "inline_equation", + "content": "w_{i}\\pmb{x}_{i}\\pmb{x}_{i}^{\\top}" + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "text", + "content": " above by its trace, and a regularization parameter " + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "text", + "content": " for FMS. We express these formulas using the eigenvalues " + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "inline_equation", + "content": "\\sigma_1,\\ldots ,\\sigma_D" + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "text", + "content": " and eigenvectors " + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "inline_equation", + "content": "\\pmb{u}_1,\\dots ,\\pmb{u}_D" + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "text", + "content": " of the weighted sample covariance, " + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "inline_equation", + "content": "\\sum_{i = 1}^{N}w_{i}\\pmb{x}_{i}\\pmb{x}_{i}^{\\top}" + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "text", + "content": " for each method and " + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "inline_equation", + "content": "\\beta \\coloneqq \\gamma \\cdot \\sigma_{d + 1,D}" + }, + { + "bbox": [ + 46, + 474, + 287, + 606 + ], + "type": "text", + "content": " (see (2)) as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 615, + 272, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 615, + 272, + 712 + ], + "spans": [ + { + "bbox": [ + 62, + 615, + 272, + 712 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} w _ {i} ^ {\\mathrm {F M S}} = \\frac {1}{\\left(\\sum_ {j = d + 1} ^ {D} \\left(\\boldsymbol {x} _ {i} ^ {\\top} \\boldsymbol {u} _ {j}\\right) ^ {2}\\right) ^ {(2 - p) / 2}}, \\\\ w _ {i} ^ {\\mathrm {T M E}} = \\frac {1}{\\sum_ {j = 1} ^ {D} \\sigma_ {j} ^ {- 1} (\\pmb {x} _ {i} ^ {\\top} \\pmb {u} _ {j}) ^ {2}}, \\\\ w _ {i} ^ {\\mathrm {S T E}} = \\frac {1}{\\sum_ {j = 1} ^ {d} \\sigma_ {j} ^ {- 1} (\\boldsymbol {x} _ {i} ^ {\\top} \\boldsymbol {u} _ {j}) ^ {2} + \\beta^ {- 1} \\sum_ {j = d + 1} ^ {D} (\\boldsymbol {x} _ {i} ^ {\\top} \\boldsymbol {u} _ {j}) ^ {2}}. \\\\ \\end{array}", + "image_path": "3bb2a6e488005dcbddebcbd79cdaa094a7948941fe6cd3094f5e60d017bdaedf.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "text", + "content": "These weights aim to mitigate the impact of outliers in different ways. For FMS, " + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "inline_equation", + "content": "\\sum_{j=d+1}^{D}(\\boldsymbol{x}_i^\\top\\boldsymbol{u}_j)^2" + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "text", + "content": " is the squared distance of a data point " + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_i" + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "text", + "content": " to the subspace " + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "text", + "content": ". Thus for " + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "inline_equation", + "content": "p < 2" + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "inline_equation", + "content": "w_i^{\\mathrm{FMS}}" + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "text", + "content": " is smaller for \"subspace-outliers\", where the robustness to such outliers increases when " + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "inline_equation", + "content": "p \\geq 0" + }, + { + "bbox": [ + 305, + 72, + 547, + 132 + ], + "type": "text", + "content": " decreases." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 133, + 547, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 133, + 547, + 215 + ], + "spans": [ + { + "bbox": [ + 304, + 133, + 547, + 215 + ], + "type": "text", + "content": "The weights of TME are inversely proportional to the squared Mahalanobis distance of " + }, + { + "bbox": [ + 304, + 133, + 547, + 215 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i" + }, + { + "bbox": [ + 304, + 133, + 547, + 215 + ], + "type": "text", + "content": " to the empirical distribution. They mitigate the effect of \"covariance-outliers\". If the dataset is concentrated on a " + }, + { + "bbox": [ + 304, + 133, + 547, + 215 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 133, + 547, + 215 + ], + "type": "text", + "content": "-subspace where " + }, + { + "bbox": [ + 304, + 133, + 547, + 215 + ], + "type": "inline_equation", + "content": "k < d" + }, + { + "bbox": [ + 304, + 133, + 547, + 215 + ], + "type": "text", + "content": ", then TME can provide smaller weights to points lying away from this subspace, unlike FMS that does not distinguish between points within the larger " + }, + { + "bbox": [ + 304, + 133, + 547, + 215 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 133, + 547, + 215 + ], + "type": "text", + "content": "-subspace." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "spans": [ + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "type": "text", + "content": "We note that the weights of STE fuse the above two weights. Within a " + }, + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "type": "text", + "content": "-subspace, they use the shape of the data. They can thus avoid outliers within this " + }, + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "type": "text", + "content": "-subspace. Within the orthogonal component of this subspace, they use a term proportional to that of FMS with " + }, + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "type": "inline_equation", + "content": "p = 0" + }, + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "type": "text", + "content": ". We remark that such " + }, + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "type": "inline_equation", + "content": "\\ell_0" + }, + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "type": "text", + "content": " minimization has a clear interpretation for RSR, though it is generally hard to guarantee. Indeed, [24] has no guarantees for FMS with " + }, + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "type": "inline_equation", + "content": "p = 0" + }, + { + "bbox": [ + 304, + 216, + 547, + 312 + ], + "type": "text", + "content": ". It can also yield unwanted spurious stationary points [26]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 321, + 356, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 321, + 356, + 335 + ], + "spans": [ + { + "bbox": [ + 306, + 321, + 356, + 335 + ], + "type": "text", + "content": "3. Theory" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "text", + "content": "We review a theoretical guarantee for STE, whose proof is given in [28]. It requires some conditions and we verify they hold with high probability under the asymptotic generalized haystack model. We assume a noiseless inliers-outliers RSR model. Let " + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "inline_equation", + "content": "L_{*}" + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "text", + "content": " denote the underlying " + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "text", + "content": "-subspace in " + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{D}" + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "inline_equation", + "content": "\\mathcal{X}_{in} = \\mathcal{X} \\cap L_{*}" + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "inline_equation", + "content": "\\mathcal{X}_{out} = \\mathcal{X} \\setminus \\mathcal{X}_{in}" + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "text", + "content": " be the set of inliers and outliers, respectively, and " + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "inline_equation", + "content": "n_1 = |\\mathcal{X}_{in}|" + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "inline_equation", + "content": "n_0 = |\\mathcal{X}_{out}|" + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "text", + "content": " be the number of inliers and outliers. Our first assumption is a mild one on how well-conditioned the inliers are in " + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "inline_equation", + "content": "L_{*}" + }, + { + "bbox": [ + 305, + 341, + 545, + 449 + ], + "type": "text", + "content": " (compare e.g., other assumptions in [25, 32])." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 449, + 545, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 449, + 545, + 472 + ], + "spans": [ + { + "bbox": [ + 305, + 449, + 545, + 472 + ], + "type": "text", + "content": "Assumption 1: Any " + }, + { + "bbox": [ + 305, + 449, + 545, + 472 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 305, + 449, + 545, + 472 + ], + "type": "text", + "content": "-subspace of " + }, + { + "bbox": [ + 305, + 449, + 545, + 472 + ], + "type": "inline_equation", + "content": "L_{*}" + }, + { + "bbox": [ + 305, + 449, + 545, + 472 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 449, + 545, + 472 + ], + "type": "inline_equation", + "content": "1 \\leq k \\leq d" + }, + { + "bbox": [ + 305, + 449, + 545, + 472 + ], + "type": "text", + "content": ", contains at most " + }, + { + "bbox": [ + 305, + 449, + 545, + 472 + ], + "type": "inline_equation", + "content": "n_1 k / d" + }, + { + "bbox": [ + 305, + 449, + 545, + 472 + ], + "type": "text", + "content": " points." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "type": "text", + "content": "Motivation for Assumption 2: The ratio of inliers per outliers, " + }, + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "type": "inline_equation", + "content": "n_1 / n_0" + }, + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "type": "text", + "content": ", in RSR is often referred to as the SNR (signal-to-noise ratio) [25, 32, 33]. The smaller it is, the best the subspace recovery is. We define the dimension-scaled SNR (DS-SNR) as the SNR obtained when scaling " + }, + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "type": "inline_equation", + "content": "n_1" + }, + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "type": "inline_equation", + "content": "n_0" + }, + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "type": "text", + "content": " by their respective dimensions (of " + }, + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "type": "inline_equation", + "content": "L_*" + }, + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "type": "inline_equation", + "content": "L_*^\\perp" + }, + { + "bbox": [ + 304, + 473, + 545, + 544 + ], + "type": "text", + "content": "):" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 376, + 551, + 545, + 576 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 551, + 545, + 576 + ], + "spans": [ + { + "bbox": [ + 376, + 551, + 545, + 576 + ], + "type": "interline_equation", + "content": "\\mathrm {D S} - \\mathrm {S N R} := \\frac {n _ {1} / d}{n _ {0} / (D - d)}. \\tag {3}", + "image_path": "94cae400ea3de8ba939dd86ecfd0c6ddb4648dc27439775669cc03c2b40d29d6.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 582, + 545, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 677 + ], + "type": "text", + "content": "Zhang [50] showed that exact recovery by TME is guaranteed whenever DS-SNR " + }, + { + "bbox": [ + 304, + 582, + 545, + 677 + ], + "type": "inline_equation", + "content": ">1" + }, + { + "bbox": [ + 304, + 582, + 545, + 677 + ], + "type": "text", + "content": " (assuming general position assumptions on the inliers and outliers) and Hardt and Moitra [16] showed that when considering general datasets with general position assumptions on the inliers and outliers, the RSR problem is SSE hard if the DS-SNR is lower than 1. We aim to show that under the following weaker generic condition, STE can obtain exact recovery with DS-SNR, strictly lower than 1." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 677, + 547, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 677, + 547, + 689 + ], + "spans": [ + { + "bbox": [ + 306, + 677, + 547, + 689 + ], + "type": "text", + "content": "Assumption 2: DS-SNR " + }, + { + "bbox": [ + 306, + 677, + 547, + 689 + ], + "type": "inline_equation", + "content": "> \\gamma" + }, + { + "bbox": [ + 306, + 677, + 547, + 689 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 306, + 677, + 547, + 689 + ], + "type": "inline_equation", + "content": "\\gamma < 1" + }, + { + "bbox": [ + 306, + 677, + 547, + 689 + ], + "type": "text", + "content": " is the STE parameter." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 689, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 712 + ], + "type": "text", + "content": "Our last assumption requires a sufficiently good initialization for STE, but also implicitly involves additional hidden" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14578" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 167 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 167 + ], + "type": "text", + "content": "assumptions on the inliers and outliers. This is expected, since Assumption 1 does not require anything from the outliers and also has a very weak requirement from the inliers. To formulate the new assumption we define below some some basic condition numbers for good initialization (which are more complicated than the one for initialization by PCA suggested by [33] and [32]) and also quantities similar to the ones used to guarantee landscape stability in the theory of RSR [25, 27, 33, 51]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 167, + 287, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 167, + 287, + 192 + ], + "spans": [ + { + "bbox": [ + 47, + 167, + 287, + 192 + ], + "type": "text", + "content": "Definitions required for Assumption 3: Recall that " + }, + { + "bbox": [ + 47, + 167, + 287, + 192 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(0)}" + }, + { + "bbox": [ + 47, + 167, + 287, + 192 + ], + "type": "text", + "content": " denotes the initial value in Algorithm 1, and denote" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 117, + 198, + 216, + 214 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 198, + 216, + 214 + ], + "spans": [ + { + "bbox": [ + 117, + 198, + 216, + 214 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\Sigma} _ {L _ {1}, L _ {2}} ^ {(0)} = \\mathbf {U} _ {L _ {1}} ^ {\\top} \\boldsymbol {\\Sigma} ^ {(0)} \\mathbf {U} _ {L _ {2}}.", + "image_path": "12ab4a792e382c31f53ff88d6c45b5c1624a0316c99f8e40e0de19b291e27df8.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 219, + 211, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 219, + 211, + 231 + ], + "spans": [ + { + "bbox": [ + 47, + 219, + 211, + 231 + ], + "type": "text", + "content": "We define the following condition number" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 76, + 236, + 257, + 277 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 236, + 257, + 277 + ], + "spans": [ + { + "bbox": [ + 76, + 236, + 257, + 277 + ], + "type": "interline_equation", + "content": "\\kappa_ {1} = \\frac {\\sigma_ {d} \\left(\\boldsymbol {\\Sigma} _ {L _ {*} , L _ {*}} ^ {(0)} - \\boldsymbol {\\Sigma} _ {L _ {*} , L _ {*} ^ {\\perp}} ^ {(0)} \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp} , L _ {*}} ^ {(0) - 1} \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp} , L _ {*}} ^ {(0)}\\right)}{\\sigma_ {1} \\left(\\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp} , L _ {*}} ^ {(0)}\\right)}.", + "image_path": "1f1031fee04ecc8ea3e4c92617ace8d5a0b975fdf96982584f8df8b9af95e5e7.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 282, + 287, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 282, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 282, + 287, + 319 + ], + "type": "text", + "content": "To get a better intuition to this primary quantity of Assumption 3, we first express the initial estimator " + }, + { + "bbox": [ + 47, + 282, + 287, + 319 + ], + "type": "inline_equation", + "content": "\\Sigma^{(0)}" + }, + { + "bbox": [ + 47, + 282, + 287, + 319 + ], + "type": "text", + "content": ", using basis vectors for " + }, + { + "bbox": [ + 47, + 282, + 287, + 319 + ], + "type": "inline_equation", + "content": "L_{*}" + }, + { + "bbox": [ + 47, + 282, + 287, + 319 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 282, + 287, + 319 + ], + "type": "inline_equation", + "content": "L_{*}^{\\perp}" + }, + { + "bbox": [ + 47, + 282, + 287, + 319 + ], + "type": "text", + "content": ", as a " + }, + { + "bbox": [ + 47, + 282, + 287, + 319 + ], + "type": "inline_equation", + "content": "2\\times 2" + }, + { + "bbox": [ + 47, + 282, + 287, + 319 + ], + "type": "text", + "content": " block matrix" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 323, + 214, + 358 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 323, + 214, + 358 + ], + "spans": [ + { + "bbox": [ + 121, + 323, + 214, + 358 + ], + "type": "interline_equation", + "content": "\\left( \\begin{array}{c c} \\boldsymbol {\\Sigma} _ {L _ {*}, L _ {*}} ^ {(0)} & \\boldsymbol {\\Sigma} _ {L _ {*}, L _ {*} ^ {\\perp}} ^ {(0)} \\\\ \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp}, L _ {*}} ^ {(0)} & \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp}, L _ {*} ^ {\\perp}} ^ {(0)} \\end{array} \\right).", + "image_path": "184248b33e38bd80185814f4b306a45e0400cef66c0bceaedfab71b81760ed02.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 364, + 287, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 364, + 287, + 391 + ], + "spans": [ + { + "bbox": [ + 47, + 364, + 287, + 391 + ], + "type": "text", + "content": "Defining " + }, + { + "bbox": [ + 47, + 364, + 287, + 391 + ], + "type": "inline_equation", + "content": "\\Sigma' = \\Sigma_{L_*, L_*}^{(0)} \\Sigma_{L_*^\\perp, L_*^\\perp}^{(0) - 1} \\Sigma_{L_*^\\perp, L_*}^{(0)}" + }, + { + "bbox": [ + 47, + 364, + 287, + 391 + ], + "type": "text", + "content": ", we decompose this block matrix as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 76, + 395, + 259, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 395, + 259, + 430 + ], + "spans": [ + { + "bbox": [ + 76, + 395, + 259, + 430 + ], + "type": "interline_equation", + "content": "\\left( \\begin{array}{c c} \\boldsymbol {\\Sigma} ^ {\\prime} & \\boldsymbol {\\Sigma} _ {L _ {*}, L _ {*}} ^ {(0)} \\\\ \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp}, L _ {*}} ^ {(0)} & \\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp}, L _ {*}} ^ {(0) ^ {\\perp}}, \\end{array} \\right) + \\left( \\begin{array}{c c} \\boldsymbol {\\Sigma} _ {L _ {*}, L _ {*}} ^ {(0)} - \\boldsymbol {\\Sigma} ^ {\\prime} & 0 \\\\ 0 & 0 \\end{array} \\right).", + "image_path": "2fdf858f541cd9a42bc7b055b5490a8c4b0077f01f358d10cf03455b9ec974c4.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "spans": [ + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "text", + "content": "We note that the numerator of " + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "inline_equation", + "content": "\\kappa_{1}" + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "text", + "content": "-th eigenvalue of the second matrix in the above sum. We show in [28] that this eigenvalue is positive if " + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(0)}" + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "text", + "content": " is positive definite, which can be easily enforced. The condition number is thus the ratio between the smallest positive eigenvalue of the second matrix of the sum and the largest eigenvalue of the component of the first matrix associated with " + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "inline_equation", + "content": "L_{*}^{\\perp}" + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "text", + "content": ". Therefore, " + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "inline_equation", + "content": "\\kappa_{1}" + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "text", + "content": " expresses a ratio between a quantifier of a " + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "text", + "content": "-dimensional component of " + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(0)}" + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "text", + "content": ", associated with " + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "inline_equation", + "content": "L_{*}" + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "text", + "content": ", and a quantifier of the projection onto " + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "inline_equation", + "content": "L_{*}^{\\perp}" + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "text", + "content": " of a full rank component of " + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(0)}" + }, + { + "bbox": [ + 47, + 434, + 287, + 553 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 554, + 287, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 554, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 47, + 554, + 287, + 589 + ], + "type": "text", + "content": "We also define " + }, + { + "bbox": [ + 47, + 554, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\Sigma_{in,*}" + }, + { + "bbox": [ + 47, + 554, + 287, + 589 + ], + "type": "text", + "content": " as the TME solution to the set of the projected inliers " + }, + { + "bbox": [ + 47, + 554, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{U}_{L^*}\\pmb {x}|\\pmb {x}\\in \\mathcal{X}_{in}\\} \\subset \\mathbb{R}^d" + }, + { + "bbox": [ + 47, + 554, + 287, + 589 + ], + "type": "text", + "content": " and the following two condition numbers" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 82, + 594, + 252, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 594, + 252, + 628 + ], + "spans": [ + { + "bbox": [ + 82, + 594, + 252, + 628 + ], + "type": "interline_equation", + "content": "\\kappa_ {2} = \\frac {\\sigma_ {1} \\left(\\boldsymbol {\\Sigma} _ {L _ {*} ^ {\\perp} , L _ {*} ^ {\\perp}} ^ {(0)}\\right)}{\\sigma_ {D} \\left(\\boldsymbol {\\Sigma} ^ {(0)}\\right)} \\text {a n d} \\kappa_ {i n} = \\frac {\\sigma_ {1} \\left(\\boldsymbol {\\Sigma} _ {i n , *}\\right)}{\\sigma_ {d} \\left(\\boldsymbol {\\Sigma} _ {i n , *}\\right)}.", + "image_path": "9a4e2069083afc190e7a0a514f43c5ca18a45c201574fadd462b18f2b6a6f1b6.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 632, + 287, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 632, + 287, + 680 + ], + "spans": [ + { + "bbox": [ + 47, + 632, + 287, + 680 + ], + "type": "text", + "content": "We note that " + }, + { + "bbox": [ + 47, + 632, + 287, + 680 + ], + "type": "inline_equation", + "content": "\\kappa_{in}" + }, + { + "bbox": [ + 47, + 632, + 287, + 680 + ], + "type": "text", + "content": " is analogous to the condition number in (25) of [32], where we replace the sample covariance by the TME estimator. An analog to the alignment of outliers statistic [27, 33] for STE is" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 116, + 684, + 218, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 684, + 218, + 715 + ], + "spans": [ + { + "bbox": [ + 116, + 684, + 218, + 715 + ], + "type": "interline_equation", + "content": "\\mathcal {A} = \\left\\| \\sum_ {\\mathbf {x} \\in \\mathcal {X} _ {o u t}} \\frac {\\mathbf {x x} ^ {\\top}}{\\| \\mathbf {U} _ {L _ {*} ^ {\\perp}} \\mathbf {x} \\| ^ {2}} \\right\\|.", + "image_path": "a48653615591312a7d6e5ab26eb3f30afe9345683f6d829e9a80b1566f24f3fe.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 72, + 508, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 508, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 508, + 84 + ], + "type": "text", + "content": "An analog to the stability statistic [27, 33] for STE is" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 375, + 92, + 476, + 123 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 92, + 476, + 123 + ], + "spans": [ + { + "bbox": [ + 375, + 92, + 476, + 123 + ], + "type": "interline_equation", + "content": "\\mathcal{S} = \\sigma_{d + 1,D}\\Bigl(\\sum_{\\mathbf{x}\\in \\mathcal{X}}\\frac{\\mathbf{x}\\mathbf{x}^{\\top}}{\\| \\mathbf{x}\\|^{2}}\\Bigr),", + "image_path": "5ec5e420bcf4712a35d1e567f43d2da673c4ca8bd6060ce9b341cf17d39c398e.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 133, + 451, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 133, + 451, + 144 + ], + "spans": [ + { + "bbox": [ + 305, + 133, + 451, + 144 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 133, + 451, + 144 + ], + "type": "inline_equation", + "content": "\\sigma_{d + 1,D}(\\mathbf{X})" + }, + { + "bbox": [ + 305, + 133, + 451, + 144 + ], + "type": "text", + "content": " was defined in (2)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 144, + 542, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 144, + 542, + 156 + ], + "spans": [ + { + "bbox": [ + 306, + 144, + 542, + 156 + ], + "type": "text", + "content": "Assumption 3: There exists " + }, + { + "bbox": [ + 306, + 144, + 542, + 156 + ], + "type": "inline_equation", + "content": "C = C(\\gamma, \\mathrm{DS-SNR}) > 0" + }, + { + "bbox": [ + 306, + 144, + 542, + 156 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 166, + 545, + 198 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 166, + 545, + 198 + ], + "spans": [ + { + "bbox": [ + 316, + 166, + 545, + 198 + ], + "type": "interline_equation", + "content": "\\kappa_ {1} \\geq C \\frac {d \\kappa_ {i n} \\mathcal {A}}{n _ {1}} \\left(\\kappa_ {i n} + \\frac {\\mathcal {A}}{\\frac {n _ {1}}{d} - \\gamma \\frac {n _ {0}}{D - d}} + \\frac {\\kappa_ {2} \\mathcal {A}}{\\gamma \\mathcal {S}} (1 + \\kappa_ {i n})\\right). \\tag {4}", + "image_path": "605df5e67cebc61ece7a3efe74edf886295477f3ede894181fa071e93c29a89f.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "spans": [ + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "text", + "content": "The exact technical requirement on " + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "text", + "content": " is specified in [28]. In general, the larger the RHS of (4), the more restricted the choice of " + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "inline_equation", + "content": "\\Sigma^{(0)}" + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "text", + "content": " is. In particular, when " + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "inline_equation", + "content": "\\kappa_1 = \\infty" + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "text", + "content": ", the definition of " + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "inline_equation", + "content": "\\kappa_1" + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "text", + "content": " implies that " + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "inline_equation", + "content": "\\mathrm{Im}(\\Sigma^{(0)}) = L_*" + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "text", + "content": ", so the subspace is already recovered by the initial estimate. Therefore, reducing the lower bound of " + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "inline_equation", + "content": "\\kappa_1" + }, + { + "bbox": [ + 304, + 205, + 545, + 325 + ], + "type": "text", + "content": " may allow some flexibility, so a marginally suboptimal initialization could still work out. In [28], we show that under the asymptotic generalized haystack model, Assumption 3 can be interpreted as an upper bound on the largest principal angle between the initial and ground truth subspaces." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 325, + 545, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 325, + 545, + 397 + ], + "spans": [ + { + "bbox": [ + 304, + 325, + 545, + 397 + ], + "type": "text", + "content": "Generic Theory: The next theorem suggests that under assumptions 1-3, STE nicely converges to an estimator that recovers " + }, + { + "bbox": [ + 304, + 325, + 545, + 397 + ], + "type": "inline_equation", + "content": "L_{*}" + }, + { + "bbox": [ + 304, + 325, + 545, + 397 + ], + "type": "text", + "content": ". The main significance of this theory is that its assumptions can allow DS-SNR lower than 1 for special instances of datasets (for which the assumptions hold), unlike the general recovery theories of [16] and [50]." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "spans": [ + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "text", + "content": "Theorem 1. Under assumptions 1-3, the sequence " + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(k)}" + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "text", + "content": " generated by STE converges to " + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "inline_equation", + "content": "\\mathbf{U}_{L_{*}}\\pmb{\\Sigma}_{in,*}\\mathbf{U}_{L_{*}}^{\\top}" + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "text", + "content": ", the TME solution for the set of inliers " + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "inline_equation", + "content": "\\mathcal{X}_{in}" + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "text", + "content": ". In addition, let " + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "inline_equation", + "content": "L^{(k)}" + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "text", + "content": " be the subspace spanned by the top " + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "text", + "content": " eigenvectors of " + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(k)}" + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "text", + "content": ", then the angle between " + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "inline_equation", + "content": "L^{(k)}" + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "inline_equation", + "content": "L_{*}" + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "inline_equation", + "content": "\\angle (L^{(k)},L_{*}) = \\cos^{-1}(\\| \\mathbf{U}_{L^{(k)}}^{\\top}\\mathbf{U}_{L_{*}}\\|)" + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "text", + "content": ", converges " + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 404, + 547, + 478 + ], + "type": "text", + "content": "-linearly to zero." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 486, + 545, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 486, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 304, + 486, + 545, + 521 + ], + "type": "text", + "content": "We discuss insights of this theory on choices of the algorithms and further verify the above stated advantage of STE over TME assuming a common probabilistic model." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "text", + "content": "Choice of " + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "text", + "content": " for subspace recovery: In order to avoid too large lower bound for " + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "inline_equation", + "content": "\\kappa_{1}" + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "text", + "content": " in (4), which we motivated above, it is good to find " + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "inline_equation", + "content": "\\epsilon_{1}" + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "inline_equation", + "content": "\\epsilon_{2} > 0" + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "text", + "content": " lies in " + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "inline_equation", + "content": "(\\epsilon_{1},\\mathrm{DS - SNR} - \\epsilon_{2})" + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "text", + "content": " (to notice this, observe the terms involving " + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "text", + "content": " in the denominators of the last two additive terms in (4)). We thus note that if the DS-SNR is expected to be sufficiently larger than 1, we can use, e.g., " + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "inline_equation", + "content": "\\gamma = 0.5" + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "text", + "content": ", but when the DS-SNR can be close to 1 or lower (e.g., in fundamental matrix estimation), it is advisable to choose small values of " + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 522, + 545, + 641 + ], + "type": "text", + "content": " according to Algorithm 2 and their sizes may depend on the expected value of the DS-SNR." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "Possible ways of Initialization: If one expects an initial estimated subspace " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\hat{L}" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": " to have a sufficiently small angle " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "L_{*}" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\theta = \\angle (\\hat{L}, L_{*})" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": ", then for " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}^{(0)} \\coloneqq \\Pi_{\\hat{L}} + \\epsilon \\mathbf{I}" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": " it can be shown that " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\kappa_{1} > O(1 / (\\epsilon + \\theta))" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\kappa_{2} < O(1 + \\frac{\\theta}{\\epsilon})" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": ". Thus one may use a trusted RSR method, e.g., FMS. As discussed in §2.5, the choice " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}^{(0)} = \\mathbf{I}" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": " (or a scaled version of it) corresponds to " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\hat{L}" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": " being the" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14579" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "type": "text", + "content": "PCA subspace (obtained at iteration 1). Also, using the TME solution for " + }, + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(0)}" + }, + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "type": "text", + "content": " corresponds to using the TME subspace as " + }, + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "type": "inline_equation", + "content": "\\hat{L}" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "spans": [ + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": "Theory under a probabilistic model: We show that under a common probabilistic model, the assumptions of Theorem 1, where " + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(0)}" + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": " is obtained by TME, hold. Moreover, we show that STE (initialized by TME) can recover the correct subspace in situations with DS-SNR " + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "inline_equation", + "content": "< 1" + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": ", whereas TME cannot recover the underlying subspace in such cases. We follow [33] and study the Generalized Haystack Model, though for simplicity, we assume Gaussian instead of sub-Gaussian distributions and an asymptotic setting. We assume " + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "inline_equation", + "content": "n_1" + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": " inliers i.i.d. sampled from a Gaussian distribution " + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "inline_equation", + "content": "N(0,\\pmb{\\Sigma}^{(in)} / d)" + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(in)} \\in S_{+}(D)" + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "inline_equation", + "content": "L_{*} = \\mathrm{Im}(\\pmb{\\Sigma}^{(in)})" + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": " (so " + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(in)}" + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": " has " + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": " nonzero eigenvalues), and " + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "inline_equation", + "content": "n_0" + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": " outliers are i.i.d. sampled from a Gaussian distribution " + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "inline_equation", + "content": "N(0,\\pmb{\\Sigma}^{(out)} / D)" + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(out)} / D \\in S_{++}(D)" + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": ". We define the following condition numbers of inliers (in " + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "inline_equation", + "content": "L_{*}" + }, + { + "bbox": [ + 47, + 96, + 288, + 264 + ], + "type": "text", + "content": ") and outliers:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 81, + 273, + 253, + 301 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 273, + 253, + 301 + ], + "spans": [ + { + "bbox": [ + 81, + 273, + 253, + 301 + ], + "type": "interline_equation", + "content": "\\kappa_ {i n} = \\frac {\\sigma_ {1} (\\pmb {\\Sigma} ^ {(i n)})}{\\sigma_ {d} (\\pmb {\\Sigma} ^ {(i n)})} \\mathrm {a n d} \\kappa_ {o u t} = \\frac {\\sigma_ {1} (\\pmb {\\Sigma} ^ {(o u t)})}{\\sigma_ {D} (\\pmb {\\Sigma} ^ {(o u t)})}.", + "image_path": "39dbc279c190b7bd3f0b2596594c254c712912a32b69c203a3eaf844990700fe.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 309, + 287, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 309, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 46, + 309, + 287, + 430 + ], + "type": "text", + "content": "Clearly, Assumption 1 holds under this model, and Assumption 2 constrains some of its parameters. Our next theorem shows that Assumption 3 holds under this model when the initial estimate " + }, + { + "bbox": [ + 46, + 309, + 287, + 430 + ], + "type": "inline_equation", + "content": "\\Sigma^{(out)}" + }, + { + "bbox": [ + 46, + 309, + 287, + 430 + ], + "type": "text", + "content": " for STE is obtained by TME. It also shows that in this case STE can solve the RSR problem even when DS-SNR " + }, + { + "bbox": [ + 46, + 309, + 287, + 430 + ], + "type": "inline_equation", + "content": "< 1" + }, + { + "bbox": [ + 46, + 309, + 287, + 430 + ], + "type": "text", + "content": ", unlike TME. For simplicity, we formulate the theory for the asymptotic case, where " + }, + { + "bbox": [ + 46, + 309, + 287, + 430 + ], + "type": "inline_equation", + "content": "N \\to \\infty" + }, + { + "bbox": [ + 46, + 309, + 287, + 430 + ], + "type": "text", + "content": " and the theorem holds almost surely. It is possible to formulate it for a very large " + }, + { + "bbox": [ + 46, + 309, + 287, + 430 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 309, + 287, + 430 + ], + "type": "text", + "content": " with high probability, but it requires stating complicated constants depending on various parameters." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "spans": [ + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": "Theorem 2. Assume data generated from the above generalized haystack model. Assume further that for " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "0 < \\mu < 1" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": ", which can be arbitrarily small, " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "d < (1 - \\mu)D - 2" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": ". Then, for any chosen " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "0 < c_{0} < 1" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": ", which is a lower bound for " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": ", there exists " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "\\eta \\coloneqq \\eta (\\kappa_{in},\\kappa_{out},c_0,\\mu) < 1" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": " such that if DS-SNR " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "\\geq \\eta" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(0)}" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": " is obtained by TME, then Assumption 3 for " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}^{(0)}" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": " is satisfied with " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "c_{0} < \\gamma < \\eta - c_{0}" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": " almost surely as " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "N \\to \\infty" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": ". Consequently, the output of the STE algorithm, initialized by TME and with the choice of " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "c_{0} < \\gamma < \\eta - c_{0}" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": ", recovers " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "L_{*}" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": ". On the other hand, if " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_{L_{*},L_{*}^{\\perp}}^{(out)} \\neq 0" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": " and DS-SNR " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "< 1" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": ", then the top " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": " eigenvectors of TME do not recover " + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "inline_equation", + "content": "L_{*}" + }, + { + "bbox": [ + 47, + 437, + 289, + 573 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "type": "text", + "content": "There are three different regimes that the theorem covers. When DS-SNR " + }, + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\geq 1" + }, + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "type": "text", + "content": ", both TME+STE (i.e., STE initialized by TME) and TME solve the RSR problem. When " + }, + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\eta \\leq" + }, + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "type": "text", + "content": " DSSNR " + }, + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "type": "inline_equation", + "content": "< 1" + }, + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "type": "text", + "content": ", TME+STE solves the RSR problem and TME generally fails. When " + }, + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\gamma \\leq" + }, + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "type": "text", + "content": " DS-SNR " + }, + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "type": "inline_equation", + "content": "< \\eta" + }, + { + "bbox": [ + 46, + 582, + 287, + 665 + ], + "type": "text", + "content": ", TME+STE might also fail, but STE with extremely good initialization (that satisfies Assumption 3) can still solve the problem." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "content": "To get a basic idea of the dependence of " + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "content": " on its parameters, we remark that " + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\eta \\to 1" + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "content": " if either " + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "inline_equation", + "content": "c_0 \\to 0" + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\kappa_{in} \\to \\infty" + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\kappa_{out} \\to \\infty" + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mu \\to 0" + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "content": ", where the parameter " + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "content": " is somewhat artificial and might be removed with a tighter proof. Therefore, successful" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "text", + "content": "performance of TME+STE requires a DS-SNR that is close to 1 when " + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "text", + "content": " is close to either 0 or " + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "text", + "content": " (so that " + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "inline_equation", + "content": "c_{0}" + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "text", + "content": " is very small) or when either the inlier or outlier distribution is highly non-symmetric, that is, either " + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "inline_equation", + "content": "\\kappa_{in}" + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "inline_equation", + "content": "\\kappa_{out}" + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "text", + "content": " is large." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 131, + 508, + 145 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 131, + 508, + 145 + ], + "spans": [ + { + "bbox": [ + 305, + 131, + 508, + 145 + ], + "type": "text", + "content": "4. Applications to Structure from Motion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 152, + 547, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 152, + 547, + 187 + ], + "spans": [ + { + "bbox": [ + 304, + 152, + 547, + 187 + ], + "type": "text", + "content": "We apply STE to problems relevant to SfM: robust estimation of fundamental matrices (see §4.1), and initial screening of undesirable cameras (see §4.2)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 195, + 517, + 208 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 195, + 517, + 208 + ], + "spans": [ + { + "bbox": [ + 305, + 195, + 517, + 208 + ], + "type": "text", + "content": "4.1. Robust Fundamental Matrix Estimation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 213, + 546, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 213, + 546, + 250 + ], + "spans": [ + { + "bbox": [ + 304, + 213, + 546, + 250 + ], + "type": "text", + "content": "Fundamental matrix estimation from noisy and inexact keypoint matches is a core computer vision problem. It provides a challenging setting for applying RSR methods." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "spans": [ + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "type": "text", + "content": "We review this setting as follows. Let " + }, + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "type": "inline_equation", + "content": "(\\pmb{x},\\pmb{x}^{\\prime})\\in \\mathbb{R}^{3}\\times \\mathbb{R}^{3}" + }, + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "type": "text", + "content": " be a correspondence pair of two points in different images that are projections of the same 3D point in the scene, where " + }, + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "type": "inline_equation", + "content": "\\pmb{x}^{\\prime}" + }, + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "type": "text", + "content": " are expressed by homogeneous coordinates of planar points. The fundamental matrix " + }, + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "type": "inline_equation", + "content": "\\mathbf{F}\\in \\mathbb{R}^{3\\times 3}" + }, + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "type": "text", + "content": " relates these corresponding points and the epipolar lines they lie on as follows: " + }, + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "type": "inline_equation", + "content": "\\pmb{x}^{\\prime \\top}\\mathbf{F}\\pmb{x} = 0" + }, + { + "bbox": [ + 304, + 250, + 547, + 334 + ], + "type": "text", + "content": " [17], or equivalently," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 378, + 342, + 546, + 357 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 342, + 546, + 357 + ], + "spans": [ + { + "bbox": [ + 378, + 342, + 546, + 357 + ], + "type": "interline_equation", + "content": "\\operatorname {v e c} (\\mathbf {F}) \\cdot \\operatorname {v e c} \\left(\\boldsymbol {x x} ^ {\\prime \\top}\\right) = 0. \\tag {5}", + "image_path": "76ca7027db84bbf03473879b4105775110c1788dabb84d34b03c573ab15b4041.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "inline_equation", + "content": "\\mathrm{vec}(\\cdot)" + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "text", + "content": " denotes the vectorized form of a matrix. Therefore, ideally, the set of all vectors in " + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^9" + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "text", + "content": " of the form " + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "inline_equation", + "content": "\\mathrm{vec}(\\pmb{x}\\pmb{x}'^\\top)" + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "inline_equation", + "content": "(\\pmb{x},\\pmb{x}') \\in \\mathbb{R}^3 \\times \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "text", + "content": " is a correspondence pair, lies on an 8-subspace in " + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^9" + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "text", + "content": " and its orthogonal complement yields the fundamental matrix. In practice, the measurements of correspondence pairs can be highly corrupted due to poor matching. Moreover, some choices of correspondence pairs and the corruption mechanism may lead to concentration on low-dimensional subspaces within the desired 8-subspace. Furthermore, the corruption mechanism can lead to nontrivial settings of outliers. Lastly, since " + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "inline_equation", + "content": "d = 8" + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "inline_equation", + "content": "D = 9" + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "text", + "content": ", the theoretical threshold of [16] translates to having the fraction of inliers among all data points be at least " + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "inline_equation", + "content": "8/9 \\approx 88.9\\%" + }, + { + "bbox": [ + 304, + 366, + 547, + 510 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 510, + 547, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 547, + 582 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 547, + 582 + ], + "type": "text", + "content": "Therefore, this application is often a very challenging setting for direct RSR methods. The best performing RSR methods to date for fundamental matrix estimation are variants of RANSAC [10]. RANSAC avoids any subspace-modeling assumptions, but estimates the subspace based on testing myriads of samples, each having 7 or 8 point correspondences [17]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 582, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 547, + 714 + ], + "type": "text", + "content": "We test the performance of STE in estimating the fundamental matrix on the Photo Tourism database [41], where the image correspondences are obtained by SIFT feature similarities [30]. We compare STE with the following 3 top RSR performers according to [25]: FMS [24], spherical FMS (SFMS) [24] and TME [47, 50]. We also compared with vanilla RANSAC [10] and two of its specialized extensions, which are state-of-the-art performers for estimating fundamental matrices: locally optimized RANSAC (LO-RANSAC) [6] and degeneracy-check enabled RANSAC (DEGENSAC) [7]. For the RSR methods we used codes from the supplementary material of [25]" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "14580" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 79, + 521, + 220 + ], + "blocks": [ + { + "bbox": [ + 72, + 79, + 521, + 220 + ], + "lines": [ + { + "bbox": [ + 72, + 79, + 521, + 220 + ], + "spans": [ + { + "bbox": [ + 72, + 79, + 521, + 220 + ], + "type": "image", + "image_path": "953fa5e2e69cf2e35af18f9c70ab4200aff08d22e492096e141a8ffa13c5bacd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 109, + 229, + 483, + 240 + ], + "lines": [ + { + "bbox": [ + 109, + 229, + 483, + 240 + ], + "spans": [ + { + "bbox": [ + 109, + 229, + 483, + 240 + ], + "type": "text", + "content": "Figure 1. Median (relative) rotation errors obtained by seven algorithms for the 14 datasets of Photo Tourism." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 262, + 286, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 262, + 286, + 310 + ], + "spans": [ + { + "bbox": [ + 46, + 262, + 286, + 310 + ], + "type": "text", + "content": "with their default options. We further used the Python package pydegensac for implementing LO-RANSAC and DEGENSAC with the inlier threshold " + }, + { + "bbox": [ + 46, + 262, + 286, + 310 + ], + "type": "inline_equation", + "content": "\\eta = 0.75" + }, + { + "bbox": [ + 46, + 262, + 286, + 310 + ], + "type": "text", + "content": ". For STE, we used Algorithm 2 to estimate the best " + }, + { + "bbox": [ + 46, + 262, + 286, + 310 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 262, + 286, + 310 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 46, + 262, + 286, + 310 + ], + "type": "inline_equation", + "content": "\\{(2i)^{-1}\\}_{i=1}^{5}" + }, + { + "bbox": [ + 46, + 262, + 286, + 310 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 311, + 288, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 311, + 288, + 467 + ], + "spans": [ + { + "bbox": [ + 46, + 311, + 288, + 467 + ], + "type": "text", + "content": "We measure the accuracy of the results according to the median and mean errors of relative rotation and direction vectors directly obtained by the fundamental matrices for each method. For computing these errors, we compared with ground-truth values provided by [41, 48]. Figure 1 describes the result of the mean errors for relative rotation per dataset of Photo Tourism, where the other three errors and " + }, + { + "bbox": [ + 46, + 311, + 288, + 467 + ], + "type": "inline_equation", + "content": "\\mathrm{mAA}(10^{\\circ})" + }, + { + "bbox": [ + 46, + 311, + 288, + 467 + ], + "type": "text", + "content": " are in the supplemental material. STE is significantly better than top RSR performers (TME, FMS and SFMS). Overall, it appears that STE performs better than vanilla RANSAC, except for the Ellis Island and Vienna Cathedral datasets, where RANSAC outperforms STE. STE is still competitive when compared with LO-RANSAC and DEGENSAC, except for Notre Dame and the latter two datasets." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 477, + 222, + 489 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 477, + 222, + 489 + ], + "spans": [ + { + "bbox": [ + 47, + 477, + 222, + 489 + ], + "type": "text", + "content": "4.2. Initial Camera Removal for SfM" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 497, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 497, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 497, + 287, + 605 + ], + "type": "text", + "content": "We propose a novel application of RSR for SfM and test STE for this application. Even though our framework is not sufficiently practical at this point, it allows testing STE in a different setting where " + }, + { + "bbox": [ + 46, + 497, + 287, + 605 + ], + "type": "inline_equation", + "content": "N = D" + }, + { + "bbox": [ + 46, + 497, + 287, + 605 + ], + "type": "text", + "content": " is very large and " + }, + { + "bbox": [ + 46, + 497, + 287, + 605 + ], + "type": "inline_equation", + "content": "d = 6" + }, + { + "bbox": [ + 46, + 497, + 287, + 605 + ], + "type": "text", + "content": ". Our idea is to use RSR within the SfM pipeline right after estimating the fundamental matrices, in order to remove some cameras that result in inaccurate estimated fundamental matrices. The hope is that eventually such methods may reduce corruption and speed up the costly later computationally intensive stages of the global SfM pipeline." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 606, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 606, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 606, + 288, + 714 + ], + "type": "text", + "content": "There are two main reasons to question such a process. One may first question the gain in improving accuracy. Indeed, since the rest of the pipeline already identifies corrupted pairwise measurements, this process may not improve accuracy and may even harm it as it removes whole cameras and not pairs of cameras. That is, it is possible that a camera, which results in bad pairwise measurement, also contributes to some other accurate pairwise estimates that can improve the overall accuracy. The second concern is in terms of speed. In general," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 262, + 547, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 262, + 547, + 477 + ], + "spans": [ + { + "bbox": [ + 304, + 262, + 547, + 477 + ], + "type": "text", + "content": "the removal of cameras may result in higher or comparable speed. Indeed, the LUD global pipeline [36], which we follow, examines the parallel rigidity of the viewing graph and extracts the maximal parallel rigid subgraph. Thus earlier removal of cameras may worsen the parallel rigidity of the graph and increase the computation due to the need of finding a maximal parallel rigid subgraph. For example, [40] removes cameras in an earlier stage of the LUD pipeline, but results in higher computational cost than the LUD pipeline. Therefore, improvement of speed for the LUD pipeline by removing cameras is generally non-trivial. Moreover, currently we use scale factors obtained by first running LUD, so we do not get a real speed improvement. Nevertheless, the proposed method is insightful whenever it may indicate clear improvement in accuracy for a dataset, since one can then infer that the current pipeline is not effective enough in handling corrupted measurements, which can be easily recognized by a simple method. Furthermore, improvement in \"speed\" can be indicative of maintaining parallel rigidity." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "spans": [ + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": "Our RSR formulation is based on a fundamental observation by Sengupta et al. [39] on the low-rank of the " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": "-view essential (or fundamental) matrix. The " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": "-view essential matrix " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "\\mathbf{E}" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "3n \\times 3n" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": " is formed by stacking all " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "\\binom{n}{2}" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": " essential matrices, while being appropriately scaled. That is, the " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "ij" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": "-th block of " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "\\mathbf{E}" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": " is the essential matrix for the " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": "-th and " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": "-th cameras, where each " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_{ij}" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": " is scaled by a factor " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "\\lambda_{ij}" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": " in accordance with the global coordinate system (see [20, 21, 39]). It was noticed in [39] that " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "\\mathbf{E}" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": " has rank 6. Moreover, [39] characterized the set of " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": "-view essential matrices whose camera centers are not all collinear by the satisfaction of a few algebraic conditions, where the major one is " + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "inline_equation", + "content": "\\mathrm{rank}(\\mathbf{E}) = 6" + }, + { + "bbox": [ + 304, + 481, + 547, + 626 + ], + "type": "text", + "content": ". Further explanation appears in [20]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 547, + 715 + ], + "type": "text", + "content": "We propose a straightforward application of RSR, utilizing these ideas to initially eliminate cameras that introduce significant corruption to the essential matrices. For this purpose, we compute the essential matrices (by computing first the fundamental matrices and then using the known camera calibration) and scale each matrix according to the factor obtained by the LUD pipeline [36] (note that this is the initial" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14581" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 77, + 294, + 205 + ], + "blocks": [ + { + "bbox": [ + 47, + 77, + 294, + 205 + ], + "lines": [ + { + "bbox": [ + 47, + 77, + 294, + 205 + ], + "spans": [ + { + "bbox": [ + 47, + 77, + 294, + 205 + ], + "type": "image", + "image_path": "a2dd0f25f68613bf4d91113af58c600e73c8bbcd18228539c6ad0a04d559561c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 213, + 546, + 236 + ], + "lines": [ + { + "bbox": [ + 46, + 213, + 546, + 236 + ], + "spans": [ + { + "bbox": [ + 46, + 213, + 546, + 236 + ], + "type": "text", + "content": "Figure 2. Mean (absolute) rotation errors (in degrees, left) and mean translation errors (in degrees, right) of LUD and four RSR methods used to initially screen bad cameras within LUD applied to the 14 datasets of Photo Tourism." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 302, + 78, + 547, + 205 + ], + "blocks": [ + { + "bbox": [ + 302, + 78, + 547, + 205 + ], + "lines": [ + { + "bbox": [ + 302, + 78, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 302, + 78, + 547, + 205 + ], + "type": "image", + "image_path": "f4c5770f5c5861e10f2f07e8c7ced3cebc2f423254ac4d0b257d8aabdf79219c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": "scaling applied in [20, 21, 39] before applying a non-convex and nontrivial optimization procedure that refines such scales). Using these appropriately scaled essential matrices, we form the " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": "-view essential matrix " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "\\mathbf{E}" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "3n\\times 3n" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": ". We denote the " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "3n\\times 3" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": " column blocks of " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "\\mathbf{E}" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_{:,1},\\dots,\\mathbf{E}_{:,n}" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": " (since " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "\\mathbf{E}" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": " is symmetric they are the same as the row blocks transposed). We treat " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "\\mathbf{E}" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": " as a data matrix with " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "D = N = 3n" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": ", where the columns of " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "\\mathbf{E}" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": " are the data points. We apply RSR with " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "d = 6" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": ", recover a " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": "-dimensional robust subspace and identify the outlying columns whose distance is largest from this subspace. To avoid heuristic methods for the cutoff of outliers we assume a fixed percentage of " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": " outlying columns. If a column block, " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_{:,i}" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": " contains an outlying column, we remove its corresponding camera " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": ". Consequently, a smaller percentage of cameras (about " + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "inline_equation", + "content": "10 - 15\\%" + }, + { + "bbox": [ + 46, + 256, + 289, + 424 + ], + "type": "text", + "content": ") will be eliminated." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "spans": [ + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "text", + "content": "We use the Photo Tourism database [41] with precomputed pairwise image correspondences provided by [39] (they were obtained by thresholding SIFT feature similarities). To compute scale factors for the essential matrices we use the output of the LUD pipeline [36] as follows (following an idea proposed in [39] for initializing these values): Given the essential matrix for cameras " + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "text", + "content": " computed at an early stage of our pipeline, " + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_{ij}" + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "text", + "content": ", and the one obtained by the full LUD pipeline, " + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_{ij}^{\\mathrm{LUD}}" + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "text", + "content": ", the scaling factor is " + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "inline_equation", + "content": "\\lambda_{ij} = \\langle \\mathbf{E}_{ij}, [\\mathbf{E}_{ij}^{\\mathrm{LUD}}] \\rangle / \\| [\\mathbf{E}_{ij}^{\\mathrm{LUD}}] \\|_F^2" + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "text", + "content": ". Since many values of " + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_{ij}" + }, + { + "bbox": [ + 46, + 425, + 287, + 545 + ], + "type": "text", + "content": " are missing, we also apply matrix completion." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "content": "We compare the LUD pipeline with the LUD pipeline combined with the filtering processes achieved by STE, FMS, SFMS, and TME. For STE we fix " + }, + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\gamma = 1/3" + }, + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "content": ", though any other value we tried yielded the same result. We report both mean and median errors of rotations and translations and runtime of the standard LUD and the RSR+LUD methods with initial screening of cameras. Figure 2 shows the mean rotation and translation errors, where the rest of the figures and a summarizing table are in the supplementary material. In general, STE demonstrates slightly higher accuracy compared to other RSR methods. Improved accuracy is particularly notable when matrix completion is not utilized, as demonstrated in the supplementary material. We observe that LUD+STE generally improves the estimation of camera parameters (both rotations and translations) over LUD." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 256, + 547, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 256, + 547, + 413 + ], + "spans": [ + { + "bbox": [ + 304, + 256, + 547, + 413 + ], + "type": "text", + "content": "The improvement of LUD+STE is noticeable in Roman Forum and Gendarmenmarkt. In the supplementary material we show further improvement for Gendarmenmarkt with the removal of " + }, + { + "bbox": [ + 304, + 256, + 547, + 413 + ], + "type": "inline_equation", + "content": "45\\%" + }, + { + "bbox": [ + 304, + 256, + 547, + 413 + ], + "type": "text", + "content": " outlying columns. While the resulting errors are still large, their improvement shows some potential in dealing with difficult SfM structure by initially removing cameras in a way that may help eliminate some scene ambiguities, which are prevalent in Gendarmenmarkt. In terms of runtime, both LUD+STE and LUD+SFMS demonstrate significant improvements, where LUD+SFMS is even faster than LUD+STE. While this does not yet imply faster handling of the datasets (as we use initial scaling factors obtained by LUD), it indicates the efficiency of the removal of outliers in maintaining parallel rigidity." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 427, + 379, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 427, + 379, + 440 + ], + "spans": [ + { + "bbox": [ + 306, + 427, + 379, + 440 + ], + "type": "text", + "content": "5. Conclusions" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 449, + 547, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 547, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 547, + 581 + ], + "type": "text", + "content": "We introduce STE, a meticulously crafted adaptation of TME designed to address challenges within RSR. Theoretical guarantees demonstrate its ability to recover the true underlying subspace reliably, even with a smaller fraction of inliers compared to the well-known theoretical threshold. Under the generalized haystack model, we show that this initialization can be chosen as TME itself, leading to improved handling of a smaller fraction of inliers compared to TME. Our exploration extends to practical applications, where STE proves effective in two 3D vision tasks: robust fundamental matrix estimation and screening of bad cameras for improved SfM." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "text", + "content": "Several avenues for future research include: " + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "text", + "content": " Exploring adaptations of other robust covariance estimation methods to RSR. " + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "text", + "content": " Studying effective initialization for STE both in theory and in practice. " + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "text", + "content": " In-depth theoretical exploration of the optimal choice of the parameter " + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "text", + "content": " Study of alternative ways of adapting TME to RSR problems. " + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "text", + "content": " Improving STE for fundamental matrix estimation following ideas similar to those in [7, 12, 37] for addressing challenging degeneracies. " + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "text", + "content": " Enhancing our initial idea of initial removal of bad cameras, specifically attempting to use it to rectify challenging scene ambiguities. " + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 304, + 582, + 547, + 713 + ], + "type": "text", + "content": " Testing our methods for SfM using more recent feature matching algorithms." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14582" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 104, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 104, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 104, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "text", + "content": "[1] Larry P. Ammann. Robust singular value decompositions: A new approach to projection pursuit. Journal of the American Statistical Association, 88(422):pp. 505-514, 1993. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 288, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 288, + 169 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 288, + 169 + ], + "type": "text", + "content": "[2] Daniel Barath, Jiri Matas, and Jana Noskova. MAGSAC: marginalizing sample consensus. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10197-10205, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 171, + 288, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 171, + 288, + 203 + ], + "spans": [ + { + "bbox": [ + 53, + 171, + 288, + 203 + ], + "type": "text", + "content": "[3] Jian-Feng Cai, Emmanuel J. Candès, and Zuowei Shen. A singular value thresholding algorithm for matrix completion. SIAM Journal on optimization, 20(4):1956-1982, 2010. 17" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 205, + 288, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 288, + 237 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 288, + 237 + ], + "type": "text", + "content": "[4] Yeshwanth Cherapanamjeri, Prateek Jain, and Praneeth Netrapalli. Thresholding based outlier robust PCA. In Conference on Learning Theory, pages 593-628. PMLR, 2017. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 239, + 288, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 239, + 288, + 271 + ], + "spans": [ + { + "bbox": [ + 53, + 239, + 288, + 271 + ], + "type": "text", + "content": "[5] Vartan Choulakian. " + }, + { + "bbox": [ + 53, + 239, + 288, + 271 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 53, + 239, + 288, + 271 + ], + "type": "text", + "content": "-norm projection pursuit principal component analysis. Computational Statistics & Data Analysis, 50(6):1441-1451, 2006. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 274, + 288, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 274, + 288, + 317 + ], + "spans": [ + { + "bbox": [ + 53, + 274, + 288, + 317 + ], + "type": "text", + "content": "[6] Ondrej Chum, Jií Matas, and Josef Kittler. Locally optimized RANSAC. In Pattern Recognition: 25th DAGM Symposium, Magdeburg, Germany, September 10-12, 2003. Proceedings 25, pages 236-243. Springer, 2003. 2, 6, 13" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 319, + 288, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 319, + 288, + 363 + ], + "spans": [ + { + "bbox": [ + 53, + 319, + 288, + 363 + ], + "type": "text", + "content": "[7] Ondrej Chum, Tomas Werner, and Jiri Matas. Two-view geometry estimation unaffected by a dominant plane. In 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05), pages 772-779. IEEE, 2005. 2, 6, 8, 13" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 364, + 288, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 364, + 288, + 430 + ], + "spans": [ + { + "bbox": [ + 53, + 364, + 288, + 430 + ], + "type": "text", + "content": "[8] Ilias Diakonikolas, Gautam Kamath, Daniel M. Kane, Jerry Li, Ankur Moitra, and Alistair Stewart. Robustly learning a gaussian: Getting optimal error, efficiently. In Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms, page 2683-2702, USA, 2018. Society for Industrial and Applied Mathematics. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 431, + 288, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 431, + 288, + 485 + ], + "spans": [ + { + "bbox": [ + 53, + 431, + 288, + 485 + ], + "type": "text", + "content": "[9] Chris Ding, Ding Zhou, Xiaofeng He, and Hongyuan Zha. R1-PCA: rotational invariant " + }, + { + "bbox": [ + 53, + 431, + 288, + 485 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 53, + 431, + 288, + 485 + ], + "type": "text", + "content": "-norm principal component analysis for robust subspace factorization. In ICML '06: Proceedings of the 23rd international conference on Machine learning, pages 281-288, New York, NY, USA, 2006. ACM. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 487, + 288, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 487, + 288, + 531 + ], + "spans": [ + { + "bbox": [ + 48, + 487, + 288, + 531 + ], + "type": "text", + "content": "[10] Martin A. Fischler and Robert C. Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 1, 2, 6, 13" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 533, + 288, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 533, + 288, + 565 + ], + "spans": [ + { + "bbox": [ + 48, + 533, + 288, + 565 + ], + "type": "text", + "content": "[11] Gabriel Frahm and Uwe Jaekel. A generalization of Tyler's M-estimators to the case of incomplete data. Computational Statistics & Data Analysis, 54(2):374-393, 2010. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 567, + 288, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 288, + 610 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 288, + 610 + ], + "type": "text", + "content": "[12] J-M Frahm and Marc Pollefeys. RANSAC for (quasi-) degenerate data (QDEGSAC). In 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06), pages 453-460. IEEE, 2006. 2, 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 612, + 288, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 612, + 288, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 612, + 288, + 645 + ], + "type": "text", + "content": "[13] William Cole Franks and Ankur Moitra. Rigorous guarantees for Tyler's M-estimator via quantum expansion. In Conference on Learning Theory, pages 1601–1632. PMLR, 2020. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 647, + 288, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 678 + ], + "type": "text", + "content": "[14] Jerome H. Friedman and John W. Tukey. A projection pursuit algorithm for exploratory data analysis. IEEE Transactions on Computers, C-23(9):881-890, 1974. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 680, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 288, + 713 + ], + "type": "text", + "content": "[15] John Goes, Gilad Lerman, and Boaz Nadler. Robust sparse covariance estimation by thresholding Tyler's M-estimator. The Annals of Statistics, 48(1):86 - 110, 2020. 3" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 106 + ], + "type": "text", + "content": "[16] Moritz Hardt and Ankur Moitra. Algorithms and hardness for robust subspace recovery. In Conference on Learning Theory, pages 354-375. PMLR, 2013. 1, 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 108, + 547, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 108, + 547, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 108, + 547, + 139 + ], + "type": "text", + "content": "[17] Richard Hartley and Andrew Zisserman. Multiple view geometry in computer vision. Cambridge university press, 2003. 6, 11, 12, 13" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 142, + 547, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 142, + 547, + 186 + ], + "spans": [ + { + "bbox": [ + 307, + 142, + 547, + 186 + ], + "type": "text", + "content": "[18] Yuhe Jin, Dmytro Mishkin, Anastasiia Mishchuk, Jiri Matas, Pascal Fua, Kwang Moo Yi, and Eduard Trulls. Image matching across wide baselines: From paper to practice. International Journal of Computer Vision, 129(2):517-547, 2021. 13" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 188, + 547, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 188, + 547, + 231 + ], + "spans": [ + { + "bbox": [ + 307, + 188, + 547, + 231 + ], + "type": "text", + "content": "[19] Arman Karimian and Roberto Tron. Essential matrix estimation using convex relaxations in orthogonal space. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17142-17152, 2023. 15" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 233, + 547, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 233, + 547, + 287 + ], + "spans": [ + { + "bbox": [ + 307, + 233, + 547, + 287 + ], + "type": "text", + "content": "[20] Yoni Kasten, Amnon Geifman, Meirav Galun, and Ronen Basri. Algebraic characterization of essential matrices and their averaging in multiview settings. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5895-5903, 2019. 7, 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 289, + 547, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 289, + 547, + 345 + ], + "spans": [ + { + "bbox": [ + 307, + 289, + 547, + 345 + ], + "type": "text", + "content": "[21] Yoni Kasten, Amnon Geifman, Meirav Galun, and Ronen Basri. Gpsfm: Global projective sfm using algebraic constraints on multi-view fundamental matrices. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3264-3272, 2019. 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 346, + 547, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 346, + 547, + 378 + ], + "spans": [ + { + "bbox": [ + 307, + 346, + 547, + 378 + ], + "type": "text", + "content": "[22] John T. Kent and David E. Tyler. Maximum likelihood estimation for the wrapped Cauchy distribution. Journal of Applied Statistics, 15(2):247-254, 1988. 2, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 380, + 547, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 380, + 547, + 413 + ], + "spans": [ + { + "bbox": [ + 307, + 380, + 547, + 413 + ], + "type": "text", + "content": "[23] Nojun Kwak. Principal component analysis based on " + }, + { + "bbox": [ + 307, + 380, + 547, + 413 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 307, + 380, + 547, + 413 + ], + "type": "text", + "content": "-norm maximization. IEEE transactions on pattern analysis and machine intelligence, 30(9):1672-1680, 2008. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 415, + 547, + 447 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 415, + 547, + 447 + ], + "spans": [ + { + "bbox": [ + 307, + 415, + 547, + 447 + ], + "type": "text", + "content": "[24] Gilad Lerman and Tyler Maunu. Fast, robust and non-convex subspace recovery. Information and Inference: A Journal of the IMA, 7(2):277–336, 2018. 1, 2, 3, 4, 6, 13" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 449, + 547, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 449, + 547, + 482 + ], + "spans": [ + { + "bbox": [ + 307, + 449, + 547, + 482 + ], + "type": "text", + "content": "[25] Gilad Lerman and Tyler Maunu. An overview of robust subspace recovery. Proceedings of the IEEE, 106(8):1380-1410, 2018. 1, 3, 4, 5, 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 483, + 547, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 483, + 547, + 517 + ], + "spans": [ + { + "bbox": [ + 307, + 483, + 547, + 517 + ], + "type": "text", + "content": "[26] Gilad Lerman and Teng Zhang. " + }, + { + "bbox": [ + 307, + 483, + 547, + 517 + ], + "type": "inline_equation", + "content": "l_{p}" + }, + { + "bbox": [ + 307, + 483, + 547, + 517 + ], + "type": "text", + "content": "-recovery of the most significant subspace among multiple subspaces with outliers. Constr. Approx., 40(3):329-385, 2014. 4" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 518, + 547, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 518, + 547, + 552 + ], + "spans": [ + { + "bbox": [ + 307, + 518, + 547, + 552 + ], + "type": "text", + "content": "[27] Gilad Lerman, Michael B. McCoy, Joel A. Tropp, and Teng Zhang. Robust computation of linear models by convex relaxation. Found. Comput. Math., 15(2):363-410, 2015. 1, 3, 5" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 553, + 547, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 553, + 547, + 574 + ], + "spans": [ + { + "bbox": [ + 307, + 553, + 547, + 574 + ], + "type": "text", + "content": "[28] Gilad Lerman, Feng Yu, and Teng Zhang. Theoretical guarantees for the subspace-constrained Tyler's estimator, 2024. 4, 5, 11" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 576, + 547, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 576, + 547, + 620 + ], + "spans": [ + { + "bbox": [ + 307, + 576, + 547, + 620 + ], + "type": "text", + "content": "[29] Guoying Li and Zhonglian Chen. Projection-pursuit approach to robust dispersion matrices and principal components: primary theory and monte carlo. Journal of the American Statistical Association, 80(391):759-766, 1985. 1" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 622, + 547, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 622, + 547, + 654 + ], + "spans": [ + { + "bbox": [ + 307, + 622, + 547, + 654 + ], + "type": "text", + "content": "[30] David G. Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60:91-110, 2004. 6" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 656, + 547, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 656, + 547, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 656, + 547, + 689 + ], + "type": "text", + "content": "[31] Ricardo A. Maronna and Víctor J. Yohai. Robust estimation of multivariate location and scatter. Wiley StatsRef: Statistics Reference Online, pages 1-12, 2014. 2" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 691, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 547, + 713 + ], + "type": "text", + "content": "[32] Tyler Maunu and Gilad Lerman. Robust subspace recovery with adversarial outliers, 2019. 1, 2, 4, 5" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14583" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[33] Tyler Maunu, Teng Zhang, and Gilad Lerman. A well-tempered landscape for non-convex robust subspace recovery. J. Mach. Learn. Res., 20(1):1348–1406, 2019. 1, 3, 4, 5, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 288, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 288, + 152 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 288, + 152 + ], + "type": "text", + "content": "[34] Tyler Maunu, Chenyu Yu, and Gilad Lerman. Stochastic and private nonconvex outlier-robust PCAs. In Proceedings of Mathematical and Scientific Machine Learning, pages 173–188. PMLR, 2022. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 153, + 288, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 153, + 288, + 186 + ], + "spans": [ + { + "bbox": [ + 48, + 153, + 288, + 186 + ], + "type": "text", + "content": "[35] Michael McCoy and Joel A. Tropp. Two proposals for robust PCA using semidefinite programming. Electronic Journal of Statistics, 5(none):1123 - 1160, 2011. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 188, + 288, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 188, + 288, + 232 + ], + "spans": [ + { + "bbox": [ + 48, + 188, + 288, + 232 + ], + "type": "text", + "content": "[36] Onur Ozyesil and Amit Singer. Robust camera location estimation by convex programming. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2674-2683, 2015. 7, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 234, + 288, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 234, + 288, + 277 + ], + "spans": [ + { + "bbox": [ + 48, + 234, + 288, + 277 + ], + "type": "text", + "content": "[37] Rahul Raguram, Ondrej Chum, Marc Pollefeys, Jiri Matas, and Jan-Michael Frahm. USAC: A universal framework for random sample consensus. IEEE transactions on pattern analysis and machine intelligence, 35(8):2022-2038, 2012. 2, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 280, + 287, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 280, + 287, + 301 + ], + "spans": [ + { + "bbox": [ + 48, + 280, + 287, + 301 + ], + "type": "text", + "content": "[38] Elvezio M. Ronchetti and Peter J. Huber. Robust statistics. John Wiley & Sons Hoboken, NJ, USA, 2009. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 304, + 288, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 304, + 288, + 369 + ], + "spans": [ + { + "bbox": [ + 48, + 304, + 288, + 369 + ], + "type": "text", + "content": "[39] Soumyadip Sengupta, Tal Amir, Meirav Galun, Tom Goldstein, David W Jacobs, Amit Singer, and Ronen Basri. A new rank constraint on multi-view fundamental matrices, and its application to camera location recovery. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4798-4806, 2017. 7, 8, 16" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 371, + 288, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 371, + 288, + 425 + ], + "spans": [ + { + "bbox": [ + 48, + 371, + 288, + 425 + ], + "type": "text", + "content": "[40] Yunpeng Shi, Shaohan Li, Tyler Mauno, and Gilad Lerman. Scalable cluster-consistency statistics for robust multi-object matching. In International Conference on 3D Vision, 3DV 2021, London, United Kingdom, December 1-3, 2021, pages 352-360. IEEE, 2021. 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 428, + 287, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 428, + 287, + 472 + ], + "spans": [ + { + "bbox": [ + 48, + 428, + 287, + 472 + ], + "type": "text", + "content": "[41] Noah Snavely, Steven M. Seitz, and Richard Szeliski. Photo tourism: exploring photo collections in 3d. In ACM siggraph 2006 papers, pages 835-846. Association for Computing Machinery, 2006. 6, 7, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 474, + 288, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 474, + 288, + 506 + ], + "spans": [ + { + "bbox": [ + 48, + 474, + 288, + 506 + ], + "type": "text", + "content": "[42] Nathan Srebro and Tommi Jaakkola. Weighted low-rank approximations. In Proceedings of the 20th international conference on machine learning (ICML-03), pages 720–727, 2003. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 509, + 288, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 509, + 288, + 573 + ], + "spans": [ + { + "bbox": [ + 48, + 509, + 288, + 573 + ], + "type": "text", + "content": "[43] Jacob Steinhardt, Moses Charikar, and Gregory Valiant. Resilience: A criterion for learning in the presence of arbitrary outliers. In 9th Innovations in Theoretical Computer Science Conference, ITCS 2018, January 11-14, 2018, Cambridge, MA, USA, pages 45:1-45:21. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2018. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 576, + 288, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 576, + 288, + 631 + ], + "spans": [ + { + "bbox": [ + 48, + 576, + 288, + 631 + ], + "type": "text", + "content": "[44] Ben Tordoff and David W. Murray. Guided sampling and consensus for motion estimation. In Computer Vision—ECCV 2002: 7th European Conference on Computer Vision Copenhagen, Denmark, May 28–31, 2002 Proceedings, Part I 7, pages 82–96. Springer, 2002. 15" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 633, + 288, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 633, + 288, + 666 + ], + "spans": [ + { + "bbox": [ + 48, + 633, + 288, + 666 + ], + "type": "text", + "content": "[45] Philip H.S. Torr and Andrew Zisserman. MLESAC: A new robust estimator with application to estimating image geometry. Computer vision and image understanding, 78(1):138-156, 2000. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 667, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 667, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 667, + 287, + 689 + ], + "type": "text", + "content": "[46] David E. Tyler. Statistical analysis for the angular central gaussian distribution on the sphere. Biometrika, 74(3):579-589, 1987. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[47] David E. Tyler. A distribution-free m-estimator of multivariate scatter. The Annals of Statistics, pages 234–251, 1987. 1, 2, 6, 13" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 72, + 547, + 217 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 307, + 72, + 546, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 72, + 546, + 117 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 546, + 117 + ], + "type": "text", + "content": "[48] Kyle Wilson and Noah Snavely. Robust global translations with 1dsfm. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part III 13, pages 61-75. Springer, 2014. 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 118, + 546, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 546, + 151 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 546, + 151 + ], + "type": "text", + "content": "[49] Huan Xu, Constantine Caramanis, and Sujay Sanghavi. Robust PCA via outlier pursuit. Advances in neural information processing systems, 23, 2010. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 152, + 547, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 547, + 184 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 547, + 184 + ], + "type": "text", + "content": "[50] Teng Zhang. Robust subspace recovery by Tyler's M-estimator. Information and Inference: A Journal of the IMA, 5(1):1-21, 2016. 1, 3, 4, 5, 6, 13" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 186, + 547, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 547, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 547, + 217 + ], + "type": "text", + "content": "[51] Teng Zhang and Gilad Lerman. A novel M-estimator for robust PCA. The Journal of Machine Learning Research, 15(1): 749–808, 2014. 1, 3, 5" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "14584" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/c36c78d9-fdc0-45ae-af17-5820282f52ff_content_list.json b/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/c36c78d9-fdc0-45ae-af17-5820282f52ff_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3e811f58c7d9ba9bd5c3b2815b15b2d0a37e5e77 --- /dev/null +++ b/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/c36c78d9-fdc0-45ae-af17-5820282f52ff_content_list.json @@ -0,0 +1,2025 @@ +[ + { + "type": "text", + "text": "A Theory of Joint Light and Heat Transport for Lambertian Scenes", + "text_level": 1, + "bbox": [ + 143, + 130, + 825, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mani Ramanagopal, Sriram Narayanan, Aswin C. Sankaranarayanan, and Srinivasa G. Narasimhan \nCarnegie Mellon University, Pittsburgh, PA 15213, USA", + "bbox": [ + 94, + 180, + 875, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{manikans, snochurn, saswin, srinivas}@andrew.cmu.edu", + "bbox": [ + 248, + 219, + 718, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present a novel theory that establishes the relationship between light transport in visible and thermal infrared, and heat transport in solids. We show that heat generated due to light absorption can be estimated by modeling heat transport using a thermal camera. For situations where heat conduction is negligible, we analytically solve the heat transport equation to derive a simple expression relating the change in thermal image intensity to the absorbed light intensity and heat capacity of the material. Next, we prove that intrinsic image decomposition for Lambertian scenes becomes a well-posed problem if one has access to the absorbed light. Our theory generalizes to arbitrary shapes and unstructured illumination. Our theory is based on applying energy conservation principle at each pixel independently. We validate our theory using real-world experiments on diffuse objects made of different materials that exhibit both direct and global components (inter-reflections) of light transport under unknown complex lighting.", + "bbox": [ + 75, + 300, + 473, + 574 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 602, + 209, + 618 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Printed on paper, this text appears black because the ink does not reflect much light. So what happens to the light striking the ink? It gets absorbed and converted into heat, thereby disappearing from the visible light transport system. Starting from the early works in 1970s [3, 20, 23, 27, 31, 35], decades of research[6, 18, 33] have attempted to separate surface reflectance and shading from images by modeling shapes[30], illuminations [8] and their interactions [13]. However, in the general case, decomposing light transport is fundamentally an ill-posed problem, thus requiring handcrafted [23] or learned priors[1, 2, 4, 9]. But what if we can somehow observe the light lost to absorption?", + "bbox": [ + 75, + 628, + 468, + 809 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Analogous to light transport, heat transport models the generation and flow of heat through a medium and its exchange with the surrounding [5, 34]. In the heat transport system, the heat generated due to light absorption is no different from any other type of heat generation. While heat itself cannot be seen, all objects radiate infrared light based", + "bbox": [ + 75, + 810, + 470, + 902 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/101b9b9dd3a8c77d7a15d69579d103cedba345fb0d2a532e33858b0126cf88eb.jpg", + "image_caption": [ + "Visible Image" + ], + "image_footnote": [], + "bbox": [ + 506, + 267, + 625, + 367 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a1fed00393212e0b5cc1e749154ad7da16465dde422cecdfa5550ffd64deb2ac.jpg", + "image_caption": [ + "Temperature rise at $t = 0.1s$" + ], + "image_footnote": [], + "bbox": [ + 635, + 267, + 754, + 366 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/bca02789b03b7020d76bbb3d95fe2b9bfbeba95b23822089fb44a73b4eb102c6.jpg", + "image_caption": [ + "Temperature rise at $t = 3s$" + ], + "image_footnote": [], + "bbox": [ + 767, + 268, + 885, + 368 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c28b9b618c4df3af25d97017329873094f1efedc8297f351a6c8d30859bf859f.jpg", + "image_caption": [ + "Experiment Setup" + ], + "image_footnote": [], + "bbox": [ + 506, + 380, + 624, + 478 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f3582e4f64d8b962870cd0bc918c3d3db4f7284f946330b978bf0a23f7296008.jpg", + "image_caption": [ + "Albedo" + ], + "image_footnote": [], + "bbox": [ + 633, + 380, + 754, + 478 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/54ace4d89a4bea2f3bcc26dda08d1fbb78d21a99916af79f07cb6601bae05f91.jpg", + "image_caption": [ + "Shading", + "Figure 1. The visible image (brightened for visualization) captures the reflected light, which is the product of albedo and shading at that pixel. The absorbed light gets converted into heat and raises the temperature, which can be observed using a co-located thermal camera. The illumination is turned on at $t = 0$ . The temperature rise at $t = 0.1s$ and $t = 3s$ are shown using turbo colormap. Our novel theory establishes the relationship between light and heat transport and provides an analytical solution to compute albedo and shading for complex shapes and unknown illumination." + ], + "image_footnote": [], + "bbox": [ + 766, + 380, + 885, + 478 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "on their surface temperature, and that can be measured using a thermal camera [34]. By modeling heat transport, we make the first attempt to estimate the intensity of light absorbed by an object, thus establishing the connection between light and heat transport.", + "bbox": [ + 496, + 642, + 893, + 717 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We develop a novel theory that proves having access to absorbed light turns single view intrinsic image decomposition into a pixel-wise well-posed problem, even for arbitrary shape and illumination. Our key insight is that all the complexities of the reflected light transport are also present in the absorbed light, in the same functional form but simply scaled by the complement of the albedo. Consider the color chart seen in Fig. 1. The amount of irradiance due to the line light is approximately equal for the black and white patches. While the visible image records a low intensity for the black patch, the corresponding increase in intensity in the thermal images is high, and vice versa. Leveraging the", + "bbox": [ + 496, + 719, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "11924", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "principle of energy conservation, the sum of reflected light and absorbed light at each scene point must equal its irradiance, which is also called as shading. Similarly, we can compute the ratio of reflected light to irradiance, which is also called as surface reflectance or albedo.", + "bbox": [ + 75, + 90, + 470, + 165 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A key ingredient in our approach is the ability to estimate the intensity of heat generated due to light absorption. In the general case, estimating it requires solving the heat transport equation which does not have an analytical solution for unknown shapes [5, 34]. However, in the absence of heat conduction, we show that the analytical solution to the heat transport equation for a constant source is a transient response that follows a 2-parameter exponential curve. Therefore, the source intensity can be estimated with as little as three frames from a thermal video. In practice, conduction occurs in all real-world objects albeit to a smaller degree in insulators and regions with low temperature gradient. Therefore, we limit the influence of conduction by focusing on the transient response of each pixel immediately after turning on light. A key limitation of our approach is that we require the system to be at thermal equilibrium before the light is turned on and other sources of heat generation, if any, remain constant. This is required to ensure the rise in temperature is only due to the absorbed light.", + "bbox": [ + 75, + 167, + 468, + 455 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Prior works in computational thermal imaging have studied the thermal transient response of objects to heating. Dashpute et al. [11] heat planar objects using a laser and capture a 1 min long thermal video to estimate its thermal diffusivity and emissivity. Of most relevance to our work, Tanaka et al. [32] heat objects using infrared lamps and record a 10 mins long thermal video. They decompose these videos using curve fitting into ambient, specular, diffuse and global components, where the latter two are assumed to be exponential curves. But this decomposition is akin to direct-global separation which is different from intrinsic image decomposition. Also, they use the extracted diffuse component as input to a photometric stereo algorithm. Note that their estimated \"albedo\" corresponds to absorptivity in the infrared spectrum and their photometric stereo is limited to distant point light sources at known directions (separate video for each direction). In contrast, our theory establishes and exploits the causal relationship between light and heat transport. And we apply our theory to albedo-shading separation in the visible spectrum for arbitrary unknown illumination using a single 4 sec thermal video and a single visible image.", + "bbox": [ + 75, + 458, + 468, + 789 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Several works in vision [17, 36, 37] and robotics [22, 25] fuse spatial features from the visible and thermal images in order to improve robustness of downstream tasks, such as object detection [24], to lighting and weather conditions. However, these methods do not reason about the relationship between the two spectrums from a physics perspective.", + "bbox": [ + 75, + 792, + 468, + 883 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We validate our theory through real world experiments", + "bbox": [ + 96, + 885, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "using a co-located setup of a visible and thermal camera. Our target objects, even though diffuse, are made of different materials, contain direct and global light transport (inter-reflections), low and high spatial frequency and unstructured illumination, all of which are unknown.", + "bbox": [ + 496, + 90, + 890, + 167 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Joint Light and Heat Transport", + "text_level": 1, + "bbox": [ + 498, + 181, + 790, + 199 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we briefly introduce the relationship between light and heat transport. While light energy is carried via photons, heat is thermal energy exchanged via molecular vibrations. Visible light (VIS, $0.4 - 0.7\\mu \\mathrm{m}$ ) transport can model the light scattered by the scene from a source towards the camera. The light absorbed by the scene gets converted to heat which is then exchanged via conduction, convection, retention (i.e. increase in temperature) and radiation, and is governed by the heat transport equation. Similar to VIS transport, Longwave Infrared light (LWIR, $8 - 14\\mu \\mathrm{m}$ ) transport can be used to model the radiation emitted by objects, based on their temperature, towards a thermal camera.", + "bbox": [ + 496, + 207, + 890, + 387 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our first contribution is an algorithm, described in Sec. 3, for estimating the intensity of absorbed light using only a thermal video. This involves two steps: 1) inferring temperatures using LWIR light transport, and 2) inferring source intensity using heat transport equation. As all objects in the scene constantly exchange heat, it is hard to disambiguate heat generated by light absorption from other sources of heat at equilibrium. However, if we disturb the equilibrium by turning on the visible light at a known time, then the resulting rise in temperature allows us to estimate heat generated only due to our illumination.", + "bbox": [ + 496, + 388, + 890, + 553 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our second contribution is a novel theory, described in Sec. 4, that decomposes VIS transport for arbitrary shapes and illumination. We derive simple analytical expressions for albedo and shading using a visible image and the absorbed light intensity estimated from a thermal video captured by a co-located thermal camera.", + "bbox": [ + 496, + 555, + 890, + 643 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Estimating Absorbed Light Intensity", + "text_level": 1, + "bbox": [ + 498, + 659, + 831, + 676 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Consider a scene initially at thermal equilibrium. At a time $t_1$ , the illumination, which is constant with time, is turned on and a thermal video is captured. We assume the illumination is focused primarily at the target scene and therefore the temperature of the surrounding remains constant. Our objective is to estimate the spatially varying absorbed light (heat source) intensity using a single thermal video.", + "bbox": [ + 496, + 685, + 890, + 790 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1. Thermal Images to Temperature Changes", + "text_level": 1, + "bbox": [ + 498, + 800, + 856, + 816 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In LWIR light transport, all surfaces including the camera and the scene emit (and reflect) radiation. The pixel intensity in the $n^{th}$ frame $I_{n}(\\mathbf{p})$ of a thermal video can be written as:", + "bbox": [ + 496, + 824, + 890, + 882 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nI _ {n} (\\mathbf {p}) = \\alpha U \\left(T _ {n} (\\mathbf {x})\\right) + U _ {s}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 602, + 885, + 890, + 901 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "11925", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $T_{n}(\\mathbf{x})$ is the temperature at time $t_n, \\alpha$ is the effective emissivity, $U_{s}$ denotes the radiation from the surrounding, and $U(T)$ is a non-linear function that approximates the integral of the Planck radiation law.", + "bbox": [ + 76, + 90, + 467, + 150 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For a small range around $T_*$ , $U(T)$ can be linearly approximated as:", + "bbox": [ + 76, + 151, + 467, + 181 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nU (T) = k _ {1} \\left(T - T _ {*}\\right) + k _ {2}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 194, + 467, + 210 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $k_{1}$ and $k_{2}$ are camera-specific constants that depend on $T_{*}$ . We refer the reader to Appendix A for more details. Combining Eq. (1) and Eq. (2), we get", + "bbox": [ + 76, + 224, + 467, + 268 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nI _ {n} (\\mathbf {p}) - I _ {m} (\\mathbf {p}) = k _ {1} \\alpha \\left(T _ {n} (\\mathbf {x}) - T _ {m} (\\mathbf {x})\\right). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 133, + 282, + 467, + 299 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The above equation shows that change in pixel intensity is linearly related to change in scene temperature. Note that commonly used thermal cameras are uncooled microbolometers that exhibit thermal inertia [28, 29], where the measured intensities have a small delay with respect to changes in the scene. This effect is ignored for the purposes of this paper.", + "bbox": [ + 76, + 311, + 467, + 416 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Heat Transport Equation without Conduction", + "text_level": 1, + "bbox": [ + 76, + 426, + 464, + 443 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Consider an infinitesimal volume at a scene point with area $\\delta_A$ and depth $\\delta_z$ . The heat transport equation at that point can be written as [5, 34]:", + "bbox": [ + 76, + 450, + 467, + 496 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} C _ {v} \\delta_ {A} \\delta_ {z} \\frac {\\partial T}{\\partial t} = \\kappa \\delta_ {A} \\delta_ {z} \\Delta T + \\delta_ {A} h _ {c} (T _ {s} - T) + \\\\ \\delta_ {A} \\sigma \\epsilon \\left(T _ {s} ^ {4} - T ^ {4}\\right) + \\delta_ {A} S, \\tag {4} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 120, + 505, + 467, + 555 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $C_v$ is the volumetric heat capacity, $T$ is the temperature, $\\kappa$ is the thermal conductivity, $\\Delta$ denotes the laplacian operator at that point, $h_c$ is the convection coefficient, $T_s$ is the surrounding temperature, $\\sigma$ is the Stefan-Boltzmann constant, $\\epsilon$ is the surface emissivity, and $S$ is the intensity of heat generated via light absorption. Note that all the terms are expressed in units of W. For an opaque Lambertian scene, all the light absorption happens near the surface.", + "bbox": [ + 76, + 566, + 467, + 686 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Note that the magnitude of heat conduction is proportional to the local temperature laplacian. Analytically solving Eq. (4) requires knowing the shape since the laplacian operator depends on the local curvature. Ignoring conduction, makes the heat equation pixel-wise independent and lends itself to an analytical solution independent of shape. Moreover, many real-world materials, such as paints, plastics, paper and wood, have low thermal conductivity. As the object is initially at equilibrium, local temperature laplacians start at zero and increase with time if and only if neighboring pixels have different material properties and/or receive different amounts of light. Therefore, we consider a short thermal video immediately after light is turned on when conduction can be ignored.", + "bbox": [ + 75, + 688, + 467, + 898 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Dividing by area and ignoring conduction, Eq. (4) is:", + "bbox": [ + 519, + 90, + 867, + 106 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nC _ {v} \\delta_ {z} \\frac {\\partial T}{\\partial t} = h _ {c} \\left(T _ {s} - T\\right) + \\sigma \\epsilon \\left(T _ {s} ^ {4} - T ^ {4}\\right) + S. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 542, + 114, + 888, + 143 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Since temperature rise due to light absorption is typically small ( $\\leq 15\\mathrm{K}$ within 4 sec in our experiments), we linearize the radiation term around a nominal temperature $T_{*}$ to get", + "bbox": [ + 498, + 152, + 888, + 198 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma \\epsilon \\left(T _ {s} ^ {4} - T ^ {4}\\right) \\approx 4 \\sigma \\epsilon T _ {*} ^ {3} \\left(T _ {s} - T\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 583, + 207, + 888, + 224 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where the absolute error due to linearization is $\\leq 4\\%$ . This simplifies Eq. (5) to", + "bbox": [ + 498, + 234, + 888, + 265 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nH \\frac {\\partial T}{\\partial t} + P T = S + P T _ {s}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 273, + 888, + 304 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $H = C_v\\delta_z$ and $P = (h_c + 4\\sigma \\epsilon T_*^3)$", + "bbox": [ + 498, + 311, + 781, + 329 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Analytical Solution", + "text_level": 1, + "bbox": [ + 500, + 337, + 683, + 353 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Solving Eq. (7) at a single pixel (refer Appendix B for derivation), we get", + "bbox": [ + 498, + 359, + 888, + 390 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nT _ {n} - T _ {1} = \\left(\\frac {S}{P} + T _ {s} - T _ {1}\\right) \\left(1 - e ^ {- \\frac {P}{H} \\left(t _ {n} - t _ {1}\\right)}\\right). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 400, + 888, + 433 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Since we assume the system is initially at thermal equilibrium, we can set $T_{s} = T_{1}$ . Now, substituting Eq. (3) into the above equation, we get", + "bbox": [ + 498, + 441, + 888, + 487 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nI _ {n} - I _ {1} = \\frac {S k _ {1} \\alpha}{P} \\left(1 - e ^ {- \\frac {P}{H} \\left(t _ {n} - t _ {1}\\right)}\\right) \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 571, + 496, + 888, + 526 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Therefore, given a thermal video $\\{I_1,\\dots ,I_n\\}$ and corresponding time stamps $\\{t_1,\\ldots ,t_n\\}$ , we use gradient descent for curve fitting at each pixel independently:", + "bbox": [ + 498, + 534, + 888, + 580 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nI _ {n} - I _ {1} = c _ {1} \\left(1 - e ^ {- \\frac {t _ {n} - t _ {1}}{c _ {2}}}\\right). \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 589, + 888, + 609 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.4. Recovering S from Curve Fitting", + "text_level": 1, + "bbox": [ + 500, + 619, + 785, + 636 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The results of curve fitting provide $c_{1}$ and $c_{2}$ at each pixel. From Eq. (9) and Eq. (10), note that $c_{1} = \\frac{Sk_{1}\\alpha}{P}$ . Recovering $S$ from $c_{1}$ would require knowledge of $k_{1}, \\alpha$ and $P$ , where $P$ depends on $h_c, \\epsilon$ and $T_*$ . In theory, all these quantities could vary per-pixel. However, the spatial variation in $S$ , which depends on albedo in visible spectrum and illumination, is much greater than that of others. In this paper, we assume the quantity $\\beta = \\frac{k_1\\alpha}{P}$ is common for all pixels such that $\\beta$ is the constant of proportionality between $S$ and $c_{1}$ . Note that Lambertian scenes typically correspond to rough surfaces which have high emissivity. Also, it is known that most paints have similarly high emissivity values of $> 0.9$ irrespective of their albedo in the visible spectrum [34]. As the object is initially at equilibrium, we can assume $T_*$ , and hence $k_{1}$ , is common for all pixels. In the absence of wind, we reasonably assume convection, if it exists at all, to be uniform throughout the scene.", + "bbox": [ + 498, + 643, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "11926", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/1fc2277ea64133f1ced5f0c83399055f9a5f3295ba714c3e28b256a9f4c4099e.jpg", + "table_caption": [ + "Table 1. Various lighting configurations typically modeled in shape-from-intensity problems. It is trivial to verify the equations for spatially varying albedo $\\rho (x)$ and shading $\\eta (x)$ remains the same irrespective of the complexity of shape or illumination when estimates of both image irradiance $I_{v}(x)$ and absorbed light intensity $\\tilde{S} (x)$ are available. Here, $\\gamma$ is the camera gain, $\\beta$ is the unknown scale factor in the estimation of $\\tilde{S} (x),\\zeta = \\frac{\\gamma}{\\beta}$ is the relative scale factor, $E$ is the source intensity, s is the light source direction, n is the surface normal, $\\omega$ is light source direction for extended source, $\\eta$ is the shading term and $\\eta^{*}$ is the scene irradiance. Inter-reflections are modeled as spatially varying source intensities. All the above cases can be extended to model cast and attached shadows using a shadowing function $W(x)$ without changing the expressions for $\\rho$ and $\\eta$" + ], + "table_footnote": [], + "table_body": "
IlluminationImage Irradiance Iv(x)Estimated Absorbed Light S̃(x)Albedo ρ(x)Shading η(x)
Far sourceγρ(x)/πE(s·n(x))β(1 - ρ(x))E(s·n(x))
Multiple sourcesγρ(x)/π∑lEl(s1·n(x))β(1 - ρ(x))∑lEl(s1·n(x))
Near sourcesγρ(x)/π∑lEl(s1(x)·n(x))β(1 - ρ(x))∑lEl(s1(x)·n(x))πIv(x)/πIv(x)+ζS̃(x)πIv(x)+ζS̃(x)
Extended Sourcesγρ(x)/π∫ωE(ω)(ω·n(x))β(1 - ρ(x))∫ωE(ω)(ω·n(x))
Inter-reflectionsγρ(x)/π∫ωE(x,ω)(ω·n(x))β(1 - ρ(x))∫ωE(x,ω)(ω·n(x))
General illuminationρ(x)/πη(x)β(1 - ρ(x))η(x)/γ
", + "bbox": [ + 76, + 175, + 893, + 309 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Albedo-Shading Separation", + "text_level": 1, + "bbox": [ + 76, + 330, + 334, + 349 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Consider an opaque Lambertian scene imaged by a camera from a fixed view. We assume that the camera is sensitive to all the wavelengths present in the light sources i.e. we primarily consider LEDs or CFL bulbs when using visible cameras. We first consider the case where the albedo and the camera response are independent of wavelength in Sec. 4.1 and then extend our theory to wavelength-dependent albedo functions in Sec. 4.2. The words image and camera correspond to visible spectrum in this section.", + "bbox": [ + 75, + 357, + 470, + 494 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Grayscale Albedo and Camera Response", + "text_level": 1, + "bbox": [ + 76, + 501, + 426, + 517 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The image intensity $I_{v}$ , which is proportional to the power received by the camera per unit area, at a pixel $\\mathbf{p}(\\mathbf{x})$ focused at a scene point $\\mathbf{x}$ is:", + "bbox": [ + 75, + 523, + 468, + 570 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nI _ {v} (\\mathbf {p} (\\mathbf {x})) = \\frac {\\rho (\\mathbf {x})}{\\pi} \\eta (\\mathbf {x}), \\text {s . t .} \\eta (\\mathbf {x}) \\equiv \\gamma \\eta^ {*} (\\mathbf {x}) \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 114, + 577, + 468, + 606 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\rho (\\mathbf{x})$ and $\\eta (\\mathbf{x})$ are the spatially varying albedo and shading, $\\gamma >0$ is the camera gain representing the optics and sensor electronics in the camera, and $\\eta^{*}(\\mathbf{x})$ is the true scene irradiance received by $\\mathbf{x}$ . Note that we do not restrict the lighting geometry in any way and the shading $\\eta (\\mathbf{x})$ term is unstructured. In the rest of the paper, we use $\\mathbf{p}$ in place of $\\mathbf{p}(\\mathbf{x})$ .", + "bbox": [ + 75, + 612, + 468, + 718 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The pixel value in an image describes the energy reflected towards the camera by a scene point. Since the surface is opaque, there is no transmission and the remaining energy gets absorbed and is converted into heat. Recall from Sec. 3 that $S(\\mathbf{x})$ denotes the power absorbed per unit area, i.e. intensity, by $\\mathbf{x}$ . Let $\\tilde{S}(\\mathbf{x})$ be proportional to it, and is given by:", + "bbox": [ + 73, + 718, + 468, + 824 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {S} (\\mathbf {x}) = \\beta S (\\mathbf {x}), \\text {s . t .} S (\\mathbf {x}) = (1 - \\rho (\\mathbf {x})) \\eta^ {*} (\\mathbf {x}). \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 829, + 468, + 849 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During operation, light fixtures also generate some thermal energy which increases its temperature and thereby increasing its blackbody radiation. However, the magnitude of this", + "bbox": [ + 75, + 854, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "additional heat generated at $\\mathbf{x}$ is negligible and hence ignored in this paper. Next, we can express $\\tilde{S}$ using shading as", + "bbox": [ + 498, + 333, + 890, + 378 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {S} (\\mathbf {x}) = \\beta (1 - \\rho (\\mathbf {x})) \\frac {\\eta (\\mathbf {x})}{\\gamma} = \\frac {(1 - \\rho (\\mathbf {x})) \\eta (\\mathbf {x})}{\\zeta}, \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 526, + 383, + 890, + 417 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\zeta = \\frac{\\gamma}{\\beta}$ is the relative scale factor.", + "bbox": [ + 498, + 422, + 764, + 440 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the trivial case where $\\mathbf{x}$ receives no light (neither direct nor global illumination), the shading term $\\eta (\\mathbf{x}) = 0$ and the albedo cannot be estimated. Whenever $\\eta (\\mathbf{x}) > 0$ , we can re-write Eqs. (11) and (13) as", + "bbox": [ + 498, + 439, + 893, + 500 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\pi I _ {v} (\\mathbf {p}) \\frac {1}{\\eta (\\mathbf {x})} - \\rho (\\mathbf {x}) = 0 \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 609, + 506, + 890, + 537 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\zeta \\tilde {S} (\\mathbf {x}) \\frac {1}{\\eta (\\mathbf {x})} + \\rho (\\mathbf {x}) = 1 \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 540, + 890, + 571 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Solving the above system of equations, we get:", + "bbox": [ + 498, + 579, + 812, + 594 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\eta (\\mathbf {x}) = \\pi I _ {v} (\\mathbf {p}) + \\zeta \\tilde {S} (\\mathbf {x}) \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 606, + 601, + 890, + 619 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\rho (\\mathbf {x}) = \\frac {\\pi I _ {v} (\\mathbf {p})}{\\pi I _ {v} (\\mathbf {p}) + \\zeta \\tilde {S} (\\mathbf {x})}. \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 622, + 890, + 657 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "If $I_{v}(\\mathbf{p})$ , $\\tilde{S}(\\mathbf{x})$ and $\\zeta$ are known, the above equations provide a direct method to compute spatially varying albedo and shading components for complex shapes and arbitrary illumination. To emphasize its applicability further, Table 1 lists several types of lighting conditions typically modeled in shape-from-intensity problems and demonstrates that Eqs. (16), (17) hold in all cases.", + "bbox": [ + 496, + 665, + 890, + 773 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Towards General Albedo Functions", + "text_level": 1, + "bbox": [ + 498, + 780, + 808, + 794 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Let the camera have $K$ channels with known spectral responses $\\Gamma_{k}(\\lambda)$ . Recall that each wavelength present in the light sources must fall within at least one of the channels. The image irradiance at $\\mathbf{p}$ in channel $k$ can be written as:", + "bbox": [ + 496, + 803, + 890, + 864 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nI _ {v} ^ {k} (\\mathbf {p}) = \\gamma \\int_ {\\lambda} \\int_ {\\Omega} \\frac {\\rho (\\mathbf {x} , \\lambda)}{\\pi} \\Gamma_ {k} (\\lambda) L (\\mathbf {x}, \\lambda , \\omega) d \\omega d \\lambda , \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 871, + 890, + 905 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "11927", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\rho (\\mathbf{x},\\lambda)$ is the diffuse albedo as a function of wavelength, $L(\\mathbf{x},\\lambda ,\\omega)$ is the spectral radiance at $\\mathbf{x}$ , and $\\omega$ denotes the direction along the outer hemisphere. The corresponding estimate of absorbed power per unit area is:", + "bbox": [ + 76, + 90, + 468, + 151 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {S} (\\mathbf {x}) = \\beta \\int_ {\\lambda} \\int_ {\\Omega} (1 - \\rho (\\mathbf {x}, \\lambda)) L (\\mathbf {x}, \\lambda , \\omega) d \\omega d \\lambda , \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 102, + 157, + 468, + 191 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Shading at a point $\\mathbf{x}$ is influenced by the emission spectrum of the light sources, the relative geometry between $\\mathbf{x}$ and the light sources, and the albedo of other points in the scene due to inter-reflections. While this general case remains an open problem, in the rest of this section we ignore inter-reflections and assume all light sources have a common emission spectrum $l(\\lambda)$ i.e.", + "bbox": [ + 76, + 196, + 468, + 301 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {\\Omega} L (\\mathbf {x}, \\lambda , \\omega) d \\omega = \\eta^ {*} (\\mathbf {x}) l (\\lambda). \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 168, + 309, + 468, + 340 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that, the illumination is still arbitrary in terms of their locations, sizes and angular radiant intensity functions. Substituting Eq. (20) into Eq. (18) and Eq. (19), we can write", + "bbox": [ + 76, + 345, + 468, + 405 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} I _ {v} ^ {k} (\\mathbf {p}) = \\int_ {\\lambda} \\frac {\\rho (\\mathbf {x} , \\lambda)}{\\pi} \\Gamma_ {k} (\\lambda) \\eta (\\mathbf {x}) l (\\lambda) d \\lambda , (21) \\\\ \\tilde {S} (\\mathbf {x}) = \\frac {\\int_ {\\lambda} \\eta (\\mathbf {x}) l (\\lambda) d \\lambda - \\int_ {\\lambda} \\rho (\\mathbf {x} , \\lambda) \\eta (\\mathbf {x}) l (\\lambda) d \\lambda}{\\zeta}. (22) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 410, + 468, + 478 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As a continuous-valued function of wavelength, the diffuse albedo $\\rho (\\mathbf{x},\\lambda)$ is infinite-dimensional, which requires further assumptions to enable tractable computations. We rely on a body of work [10, 14-16] that shows that reflectance spectra lie close to a low-dimensional subspace. Denoting the basis for this subspace as $\\Phi_{\\rho}(\\lambda) = \\{\\tilde{\\rho}_1(\\lambda),\\dots ,\\tilde{\\rho}_M(\\lambda)\\}$ , we can express the diffuse albedo as [21]:", + "bbox": [ + 76, + 484, + 468, + 604 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\rho (\\mathbf {x}, \\lambda) = \\sum_ {m = 1} ^ {M} \\tilde {\\rho} _ {m} (\\lambda) a _ {\\mathbf {x}, m} = \\Phi_ {\\rho} (\\lambda) \\mathbf {a} _ {\\mathbf {x}} \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 140, + 611, + 468, + 652 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{a}_{\\mathbf{x}}\\in \\mathbb{R}^{M}$ are the unknown coefficients of interest. This simplifies Eq. (21) and Eq. (22) into", + "bbox": [ + 76, + 660, + 467, + 691 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nI _ {v} ^ {k} (\\mathbf {p}) = \\eta (\\mathbf {x}) \\mathbf {E} _ {k} ^ {T} \\frac {\\mathbf {a} _ {\\mathbf {x}}}{\\pi}, \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 178, + 696, + 468, + 724 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {S} (\\mathbf {x}) = \\frac {\\eta (\\mathbf {x}) \\left(L - \\mathbf {F} ^ {T} \\mathbf {a} _ {\\mathbf {x}}\\right)}{\\zeta}, \\tag {25}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 724, + 468, + 758 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{E}_k$ , $\\mathbf{F}$ and $L$ can be computed a priori as follows:", + "bbox": [ + 76, + 765, + 467, + 780 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {E} _ {\\mathbf {k}} [ i ] = \\int_ {\\lambda} l (\\lambda) \\Gamma_ {k} (\\lambda) \\tilde {\\rho} _ {i} (\\lambda) d \\lambda , (26) \\\\ \\mathbf {F} [ i ] = \\int_ {\\lambda} l (\\lambda) \\tilde {\\rho} _ {i} (\\lambda) d \\lambda , (27) \\\\ L = \\int_ {\\lambda} l (\\lambda) d \\lambda . (28) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 797, + 468, + 898 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Whenever $\\eta (\\mathbf{x}) > 0$ , Eqs. (24) and (25) can be written as", + "bbox": [ + 500, + 90, + 890, + 106 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\pi I _ {v} ^ {k} (\\mathbf {p}) \\xi (\\mathbf {x}) - \\mathbf {E} _ {k} ^ {T} \\mathbf {a} _ {\\mathbf {x}} = 0, \\forall k (29) \\\\ \\zeta \\tilde {S} (\\mathbf {x}) \\xi (\\mathbf {x}) + \\mathbf {F} ^ {T} \\mathbf {a} _ {\\mathbf {x}} = L, (30) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 593, + 132, + 890, + 171 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\xi (\\mathbf{x}) = 1 / \\eta (\\mathbf{x})$ . Note that we have a system of $K + 1$ linear equations with $M + 1$ unknowns, namely $\\mathbf{a}_{\\mathbf{x}}\\in \\mathbb{R}^{M}$ and $\\xi (\\mathbf{x})$ . Therefore, whenever $K\\geq M$ , the system of equations can be solved to obtain albedo and shading (reciprocal of $\\xi (\\mathbf{x})$ ) at each pixel independently for complex shapes and illumination. Specifically, we use non-negative least squares solver for this problem. For most vision applications, which use a 3-channel RGB camera, we choose a corresponding basis set $\\Phi_{\\rho}$ with $M = 3$ . Our theory could be used with multispectral cameras with more channels when higher fidelity in albedo is desired. While the above derivation relies on Eq. (20), it is still practically useful in many real-world scenes where inter-reflections exist as we will show in Sec. 5.", + "bbox": [ + 496, + 184, + 890, + 393 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the special case of a monochrome camera capturing a scene where albedo depends on wavelength, the shading at each pixel can be expressed as a weighted sum of $I$ and $S$ irrespective of the emission spectrum of the light source. We refer the reader to Appendix C for more details.", + "bbox": [ + 496, + 396, + 890, + 470 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experimental Results", + "text_level": 1, + "bbox": [ + 500, + 486, + 705, + 503 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To validate our theory, we perform experiments on several complex scenes with challenging illumination. Our scenes are mostly diffuse, but contain noticeable non-Lambertian features that test the practical utility of our theory to real-world objects. Our emphasis is on estimating the absorbed light intensity and performing albedo-shading separation.", + "bbox": [ + 496, + 512, + 890, + 603 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Hardware Details: Our imaging system consists of an IDS UI-3130CP color camera with $600 \\times 800$ resolution fitted with an 8mm Tamron lens, a FLIR Boson thermal camera having $\\leq 50\\mathrm{mK}$ NETD with $512 \\times 640$ resolution fitted with an $18\\mathrm{mm}$ ( $24^{\\circ}$ HFOV) integrated lens and a BSP-DI-25-2 gold dichroic beamsplitter from ISP Optics. The cameras are coarsely aligned using an optic stage and a homography is used for fine alignment. We use LED lights from Advanced Illumination, namely a high intensity line light (LL167G96-WHI), a large spot light (SL-S100150M-WHI) and two small spot lights (SL-S050075M-WHI). The relative emission spectrum of the lighting and the spectral response of the color filter array in the visible camera were obtained from their technical datasheets, see Fig. 2.", + "bbox": [ + 496, + 623, + 890, + 835 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Data Capture and Preprocessing: The visible camera was radiometrically calibrated [12]. To capture the full dynamic range of the illumination, we acquired a stack of 15", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "11928", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f1d4c0d4ec26f8e7fee03197ee99785321cd600478e261a2dddd363388ea4883.jpg", + "image_caption": [ + "(a) Imaging System" + ], + "image_footnote": [], + "bbox": [ + 84, + 88, + 267, + 186 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b76c355e89b035504019c0b5c0411c973af1556c03c0e7d7e0c5897d251854ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 274, + 88, + 460, + 191 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ec679174dfd5ba476b1b5565957e9900a46a69d21467cd7a810a404bb2bfa6fb.jpg", + "image_caption": [ + "(b) Experimental Setup", + "(c) Spectral properties of light and CFA", + "Figure 2. Our imaging system consists of a visible camera and a thermal camera colocated using a gold dichroic beamsplitter. The light sources are placed close to the target scene so that the rise in temperature due to light absorption is detectable in the thermal camera. All the light sources have the same emission spectrum. The relative emission spectrum of the white LED and the quantum efficiency curves of the Color Filter Array are obtained from the corresponding datasheets." + ], + "image_footnote": [], + "bbox": [ + 86, + 212, + 459, + 333 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "images in BayerRG format with a geometric progression of exposures that span $0.05\\mathrm{ms}$ to over $180\\mathrm{ms}$ . All the images were demosaiced using gradient-corrected linear interpolation [26] and subsequently quantized to 8-bit images. The resulting LDR images were composited into a single linear HDR image using the previously estimated camera response function. Finally, this image is warped into the perspective of the thermal camera using a homography.", + "bbox": [ + 75, + 488, + 468, + 608 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The thermal camera is allowed to reach steady state operating temperature after powering on, which can take up to 30mins. The entire experimental setup including the target object is allowed to reach thermal equilibrium before beginning data collection. A flat field correction is performed a few seconds prior to turning on the light and a thermal video is recorded. The thermal camera is operated in the high gain state and the raw 16-bit data is captured at $60\\mathrm{Hz}$ . Both the thermal images and the warped HDR image were downsized $4\\times$ using local mean computation to suppress noise and alleviate errors in co-location.", + "bbox": [ + 75, + 609, + 468, + 773 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation Details: We manually identify the first frame when light was turned on and use the pixel-wise median of the preceding frames as the initial frame $I_{1}$ . We use 200 frames since light was turned on for fitting the 2-parameter curve. We implement the curve fitting using gradient descent in PyTorch. We consider a 3 dimensional basis set for albedo with $\\tilde{\\rho}_b(\\lambda) = \\mathbb{I}[400\\mathrm{nm}\\leq \\lambda < 530\\mathrm{nm}]$ ,", + "bbox": [ + 75, + 794, + 468, + 902 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/14d514e0ab82c0a77f70916765292b0c25ec48a0367dc05481b04fb977b951d2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 92, + 759, + 282 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a1703a5c43210ea2f119a94c3be21e3429608753fb1611508ecd48597ed3c521.jpg", + "image_caption": [ + "Figure 3. The first column shows change in intensity at $0.1s$ and at $3s$ after turning on the lights. Note that we process each pixel independently. Two points of significance are marked in blue and red respectively. The middle column shows the curve fitting results for the highlighted points. The input intensities are shown as dots and the estimated intensities are shown as a continuous curves. The dashed lines correspond to the steady-state intensity that would be reached and equals $c_{1}$ from Eq. (10). The last column shows $c_{1}$ for each pixel as a 2D image and a histogram of $c_{2}$ ." + ], + "image_footnote": [], + "bbox": [ + 763, + 93, + 885, + 284 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$\\tilde{\\rho}_g(\\lambda) = \\mathbb{I}[530\\mathrm{nm}\\leq \\lambda < 620\\mathrm{nm}]$ , $\\tilde{\\rho}_r(\\lambda) = \\mathbb{I}[620\\mathrm{nm}\\leq \\lambda < 1100\\mathrm{nm}]$ , where $\\mathbb{I}[\\cdot ]$ is the indicator function. Please refer to Appendix D for details of $\\mathbf{E}_k$ , $\\mathbf{F}$ and $L$ . The relative factor $\\zeta$ can be calibrated for a co-located imaging system with a color chart under controlled lighting. Alternatively, $\\zeta$ can be treated as a hyper-parameter and tuned using cross-validation.", + "bbox": [ + 496, + 428, + 890, + 532 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Heat Source Estimation results", + "text_level": 1, + "bbox": [ + 500, + 542, + 772, + 556 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 3 shows the result of our curve fitting for the wooden blocks scene. The estimated constants $c_{2}$ , which is proportional to heat capacity, appear like white noise and its corresponding histogram plot resembles a Gaussian distribution. This could be due to high levels of noise in thermal videos as well as similar magnitudes of spatial variation in $P$ and $H$ . On the other hand, the estimated per-pixel constants $c_{1}$ have visual similarity to a shading image, although noisy.", + "bbox": [ + 496, + 566, + 890, + 688 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Albedo-Shading separation results", + "text_level": 1, + "bbox": [ + 500, + 696, + 802, + 712 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quantitative Evaluation: For comparison, we chose two methods: (i) the classical, even if dated, Retinex algorithm [23], which is well-suited for the color chart scene. (ii) Ordinal Shading [7], a SOTA learning-based approach which requires a large training dataset. We use the pretrained model here. We use the scale-invariant Mean Squared Error (si-MSE) from [19] as our metric. It is hard to obtain ground truth albedo and shading for general scenes under unknown lighting. And publicly available datasets do not have co-located thermal videos. Therefore, we first evaluate albedo using the color chart under 4 different illuminations. As shown in Fig. 4, our albedo estimates are", + "bbox": [ + 496, + 719, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "11929", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e54b0bed28940c1da2f4742f7d969de2b28839f5e6987cc382d872e0254bbdb9.jpg", + "image_caption": [ + "Figure 4. The first column is the mean value across colors (Ours: 0.020, Retinex: 0.034, Ordinal Shading: 0.080)." + ], + "image_footnote": [], + "bbox": [ + 80, + 89, + 467, + 170 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/da4288e7f27ce649a8b54c015ffcf88b8cbace8b0dde12496e0f68d23619fbe5.jpg", + "image_caption": [ + "Figure 5. Our method operates per-pixel while other methods use hand-crafted or learnt spatial priors. Note the residual albedo in their estimated shading (images brightened for visualization)." + ], + "image_footnote": [], + "bbox": [ + 89, + 217, + 459, + 357 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/0babdced361e533b830845241e6801e159cb203d03ed76c12f6930124ebacf41.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
OursRGB-RetinexOrdinal Shading
Albedo0.0840.2530.399
Shading0.00050.00300.0080
", + "bbox": [ + 84, + 417, + 460, + 467 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. si-MSE values for Albedo and Shading using pseudo ground truth data obtained for the painted mask scene.", + "bbox": [ + 76, + 477, + 467, + 506 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3b6c77f5e2321cc61d72420270513ed39686e7763a1a31541994e090a8f41085.jpg", + "image_caption": [ + "Figure 6. (a) Curve fitting results of the same pixel for different video lengths. (b) Albedo error (against color chart ground truth) vs. length of input video." + ], + "image_footnote": [], + "bbox": [ + 78, + 513, + 467, + 599 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "significantly better than the other methods.", + "bbox": [ + 76, + 664, + 359, + 678 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Next, we obtain pseudo-ground truth similar to [19] i.e. with a scene painted white (ground truth shading) and re-painted with texture. Fig. 5 and Tab. 2 illustrate that our method outperforms SOTA methods both qualitatively and quantitatively.", + "bbox": [ + 75, + 680, + 467, + 755 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation on length of video: Longer duration leads to higher spatial thermal gradients that induce more conduction while shorter duration has lower signal to noise ratio. As seen in Fig. 6a, the curve deviates further away from the initial measurements when using longer videos. The accuracy of the fit correlates with the accuracy of albedo estimate for the color chart (see Fig. 6b). Our experiments use 200 frames which corresponds to the green plots.", + "bbox": [ + 75, + 779, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Qualitative evaluation: Figure 7 summarizes the albedo shading separation results for the four target scenes. As shown in the first two rows, we are given a HDR image from the visible camera and the corresponding absorbed light intensity is estimated from a thermal video using curve fitting as discussed earlier. And the last two rows show results that validate Eqs. (30) which are derived for general functions of albedo and camera response with wavelength.", + "bbox": [ + 496, + 90, + 890, + 210 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In the first scene, the interior of a mask is painted with white and black acrylic paints and the line light is directed at the portion of the image painted white. As highlighted in the callout, the concave portion corresponding to nose appears flat in the estimated albedo image for both the monochrome and RGB cases. Note that the temperature of the background wall does not raise sufficiently in all of the scenes, which makes it challenging for our approach. The thick wall would also have a high heat capacity which exacerbates the challenge. In the second scene, a cardboard sheet with printing on one side is folded to resemble the shape of W. The inner V groove would have inter-reflections while the outer faces are convex.", + "bbox": [ + 496, + 212, + 890, + 406 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In the third scene, a collection of solid colored wooden blocks are stacked into a complex geometry with both cast and attached shadows. This result indirectly shows that ignoring heat conduction for solid objects still allows one to recover the absorbed light intensity precisely. In the final scene, we use a stack of disks made of soft plastic. Different patterns are embossed onto the circumference of the disk. As highlighted in the callout, the shape information corresponding to the embossing is correctly separated into the shading term while the albedo term appears flat. These results demonstrate the broad applicability of our theory to everyday scenes with complex shapes and illumination.", + "bbox": [ + 496, + 407, + 890, + 589 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Grayscale approximation: Fig. 8 shows the estimated albedo and shading using grayscale approximation (Eqs. (16) and (17)). Recall that the grayscale approximation does not require knowledge of the emission spectrum of the light sources and the estimated shading is similar to that using Eqs (30). The monochrome image is approximated by taking the mean value across color channels. Corresponding results for all the scenes are provided in Appendix E.", + "bbox": [ + 496, + 606, + 890, + 728 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 739, + 617, + 753 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This paper studies the theoretical connection between light transport in visible spectrum, heat transport in solids and light transport in the thermal infrared spectrum. We proved that having an estimate of absorbed light turns single image intrinsic image decomposition into a well-posed problem for arbitrary shape and illumination for lambertian scenes. To estimate absorbed light, we derive an analytical expression for surfaces with negligible heat conduction by modeling heat transport immediately after turning on illumination.", + "bbox": [ + 496, + 763, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "11930", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/748958ee1572981de832371edede1391bb1170343ed8ef197ff522880b248763.jpg", + "image_caption": [ + "Mask Interior" + ], + "image_footnote": [], + "bbox": [ + 114, + 109, + 320, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4e656e075a1fe093ebb4783dd16ff7451ffc74806375925846c3f5ccc66ab178.jpg", + "image_caption": [ + "W-Cardboard" + ], + "image_footnote": [], + "bbox": [ + 330, + 109, + 509, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/24473dc35157725c072e77362f948c7e610aa888c288fa0cbd01765990685544.jpg", + "image_caption": [ + "Wooden Blocks" + ], + "image_footnote": [], + "bbox": [ + 519, + 109, + 700, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/59403cee9a05a9d1b5f000a02153f471102c5d9d04da1a6c5747e15ecfb83c8b.jpg", + "image_caption": [ + "Soft Toys" + ], + "image_footnote": [], + "bbox": [ + 710, + 111, + 893, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fa75343ba09fc64648728a7764a4a8010a8060fa86089e6dabe7fddde88b8897.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 140, + 228, + 320, + 339 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d54763d13abba4ef5a1087bbc8f29013ed9bb6e751fe53569cfd864d59913795.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 228, + 509, + 339 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4e8ebc5500c6d741f4aecd220a1d4e17cd4211a255027a793baff16ba3173f63.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 228, + 700, + 339 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/aa8901c22158f65472bc64582400167696fcac57b92866e1c41d70fa17670608.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 710, + 228, + 892, + 339 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c2ecb21f35cde1e9af35ff684b2d79c03a800cd5f25d3e72b40e88291fcb2b5f.jpg", + "image_caption": [ + "RGB Intrinsic Image Decomposition Input" + ], + "image_footnote": [], + "bbox": [ + 114, + 344, + 320, + 454 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d31d8e94c7be6eb65300047a9764c4cb7d5b8be03c7268a97a301159848d3906.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 343, + 509, + 454 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5019005503701ca292aaacb0ba9709ad400d1a452bc2d4c0c0495cfef8e551aa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 343, + 700, + 455 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4e5642cb804e94c47e5f5663aa97b3882d2736a95d70c7d0d3f5c5a3d0f7b85c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 710, + 344, + 892, + 455 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d1f4ac47963f96280d53698a210a512ed88435543e72ed4d8f0a8862a09fdd1c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 116, + 464, + 320, + 573 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8a74eea425aad3e8e20075c4c5061b772f76a7a6dd0c76304a031aeb3dd58a97.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 464, + 509, + 573 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4f00928a1256f4043eb97a011313e612a83b072fc9c5c321c0f47acf372292bc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 521, + 464, + 700, + 573 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5890a8cb26d8397bc5329cd9f2f4d468603b7e2e0b618f281581ef259eca8fbe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 710, + 464, + 892, + 573 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/51a8753d21f4a0b6f7943684a6d624e1749425dd6e510f62df44b2da8b099270.jpg", + "image_caption": [ + "Figure 7. The first row shows the HDR visible image (brightened for visualization). Note that the colorchart is not an input to our method. The second row shows the estimated heat source intensity (turbo colormap) obtained using the method in Sec. 3. The last two rows correspond to solving Eqs. (30) using non-negative least squares method. The estimated albedo is clipped to the range [0, 1]. The callouts for the visible image, heat source intensity, and shading are normalized individually to aid visualization.", + "Figure 8. Albedo-Shading result for the soft toys scene using the grayscale approximation." + ], + "image_footnote": [], + "bbox": [ + 81, + 651, + 468, + 782 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Experiments showed that albedo and shading can be measured from a single view given a visible image and a short thermal video from a co-located imaging system.", + "bbox": [ + 75, + 854, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Just like we have shown an example of how modeling heat transport can help solve challenges in visible light transport, we believe research in visible light transport can help Infrared Thermography by improving accuracy of temperature measurement or observing heat transfer within inhomogeneous surfaces. Extending our theory to the full light transport, including general BRDFs, translucent materials and subsurface scattering are just a few of the exciting new directions that this research opens up.", + "bbox": [ + 496, + 654, + 893, + 791 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 811, + 668, + 829 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was partly supported by NSF grants IIS-2107236, CCF-1730147, and NSF-NIFA AI Institute for Resilient Agriculture. The authors would like to thank Mark Sheinin for helpful discussions.", + "bbox": [ + 496, + 839, + 893, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "11931", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jonathan T Barron and Jitendra Malik. Shape, illumination, and reflectance from shading. IEEE transactions on pattern analysis and machine intelligence, 37(8):1670-1687, 2014. 1", + "[2] Anil S Baslamisli, Hoang-An Le, and Theo Gevers. Cnn based learning using reflection and retina models for intrinsic image decomposition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6674-6683, 2018. 1", + "[3] Peter N Belhumeur, David J Kriegman, and Alan L Yuille. The bas-relief ambiguity. International journal of computer vision, 35(1):33-44, 1999. 1", + "[4] Sean Bell, Kavita Bala, and Noah Snavely. Intrinsic images in the wild. ACM Transactions on Graphics (TOG), 33(4): 1-12, 2014. 1", + "[5] Theodore L. Bergman. Introduction to Heat Transfer. Wiley, 2011. 1, 2, 3", + "[6] Nicolas Bonneel, Balazs Kovacs, Sylvain Paris, and Kavita Bala. Intrinsic decompositions for image editing. In Computer Graphics Forum, pages 593-609. Wiley Online Library, 2017. 1", + "[7] Chris Careaga and Yaqiz Aksoy. Intrinsic image decomposition via ordinal shading. ACM Transactions on Graphics, 43 (1):1-24, 2023. 6", + "[8] Robert Carroll, Ravi Ramamoorthi, and Maneesh Agrawala. Illumination decomposition for material recoloring with consistent interreflections. ACM Trans. Graph., 30(4):43, 2011. 1", + "[9] Jason Chang, Randi Cabezas, and John W Fisher III. Bayesian nonparametric intrinsic image decomposition. In European conference on computer vision, pages 704-719. Springer, 2014. 1", + "[10] Hamilton Y Chong, Steven J Gortler, and Todd Zickler. The von kries hypothesis and a basis for color constancy. In 2007 IEEE 11th International Conference on Computer Vision, pages 1-8. IEEE, 2007. 5", + "[11] Aniket Dashpute, Vishwanath Saragadam, Emma Alexander, Florian Willomitzer, Aggelos Katsaggelos, Ashok Veeraraghavan, and Oliver Cossairt. Thermal spread functions (tsf): Physics-guided material classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1641-1650, 2023. 2", + "[12] Paul E. Debevec and Jitendra Malik. Recovering high dynamic range radiance maps from photographs. In Proceedings of the 24th Annual Conference on Computer Graphics and Interactive Techniques, page 369-378, USA, 1997. ACM Press/Addison-Wesley Publishing Co. 5", + "[13] Sylvain Duchéne, Clement Riant, Gaurav Chaurasia, Jorge Lopez-Moreno, Pierre-Yves Laffont, Stefan Popov, Adrien Bousseau, and George Drettakis. Multi-view intrinsic images of outdoors scenes with an application to relighting. ACM Transactions on Graphics, page 16, 2015. 1", + "[14] Graham D Finlayson, Mark S Drew, and Brian V Funt. Color constancy: enhancing von kries adaption via sensor transformations. In Human Vision, Visual Processing, and Digital Display IV, pages 473-484, 1993. 5" + ], + "bbox": [ + 78, + 114, + 468, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Graham D Finlayson, Mark S Drew, and Brian V Funt. Diagonal transforms suffice for color constancy. In IEEE International Conference on Computer Vision, pages 164-171, 1993.", + "[16] Graham Fyffe, Xueming Yu, and Paul Debevec. Single-shot photometric stereo by spectral multiplexing. In IEEE International Conference on Computational Photography (ICCP), 2011. 5", + "[17] Rikke Gade and Thomas B Moeslund. Thermal cameras and applications: a survey. Machine vision and applications, 25: 245-262, 2014. 2", + "[18] Elena Garces, Carlos Rodriguez-Pardo, Dan Casas, and Jorge Lopez-Moreno. A survey on intrinsic images: Delving deep into lambert and beyond. International Journal of Computer Vision, 130(3):836-868, 2022. 1", + "[19] Roger Grosse, Micah K Johnson, Edward H Adelson, and William T Freeman. Ground truth dataset and baseline evaluations for intrinsic image algorithms. In 2009 IEEE 12th International Conference on Computer Vision, pages 2335-2342. IEEE, 2009. 6, 7", + "[20] Berthold KP Horn and Michael J Brooks. Shape from shading. MIT press, 1989. 1", + "[21] Zhuo Hui, Kalyan Sunkavalli, Sunil Hadap, and Aswin C Sankaranarayanan. Illuminant spectra-based source separation using flash photography. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6209-6218, 2018. 5", + "[22] Soonmin Hwang, Jaesik Park, Namil Kim, Yukyung Choi, and In So Kweon. Multispectral pedestrian detection: Benchmark dataset and baseline. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1037-1045, 2015. 2", + "[23] Edwin H Land and John J McCann. Lightness and retinae theory. Josa, 61(1):1-11, 1971. 1, 6", + "[24] Qiao Liu, Zhenyu He, Xin Li, and Yuan Zheng. Ptb-tir: A thermal infrared pedestrian tracking benchmark. IEEE Transactions on Multimedia, 22(3):666-675, 2019. 2", + "[25] Yawen Lu and Guoyu Lu. Superthermal: Matching thermal as visible through thermal feature exploration. IEEE Robotics and Automation Letters, 6(2):2690-2697, 2021. 2", + "[26] Henrique S Malvar, Li-wei He, and Ross Cutler. High-quality linear interpolation for demosaicing of bayer-patterned color images. In 2004 IEEE International Conference on Acoustics, Speech, and Signal Processing, pages iii-485. IEEE, 2004. 6", + "[27] Shree K Nayar, Katsushi Ikeuchi, and Takeo Kanade. Shape from interreflections. International Journal of Computer Vision, 6:173-195, 1991. 1", + "[28] Manikandasriram Srinivasan Ramanagopal, Zixu Zhang, Ram Vasudevan, and Matthew Johnson-Roberson. Pixelwise motion deblurring of thermal videos. arXiv preprint arXiv:2006.04973, 2020. 3", + "[29] Vishwanath Saragadam, Akshit Dave, Ashok Veeraraghavan, and Richard G. Baraniuk. Thermal image processing via physics-inspired deep networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops, pages 4057-4065, 2021. 3" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "11932", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[30] Steven M Seitz, Yasuyuki Matsushita, and Kiriakos N Kutulakos. A theory of inverse light transport. In Tenth IEEE International Conference on Computer Vision (ICCV'05) Volume 1, pages 1440-1447. IEEE, 2005. 1", + "[31] Steven A Shafer. Using color to separate reflection components. Color Research & Application, 10(4):210-218, 1985. 1", + "[32] Kenichiro Tanaka, Nobuhiro Ikeya, Tsuyoshi Takatani, Hiroyuki Kubo, Takuya Funatomi, Vijay Ravi, Achuta Kadambi, and Yasuhiro Mukaigawa. Time-resolved far infrared light transport decomposition for thermal photometric stereo. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(6):2075-2085, 2019. 2", + "[33] Ayush Tewari, Ohad Fried, Justus Thies, Vincent Sitzmann, Stephen Lombardi, Kalyan Sunkavalli, Ricardo MartinBrualla, Tomas Simon, Jason Saragih, Matthias Nießner, et al. State of the art on neural rendering. In Computer Graphics Forum, pages 701-727. Wiley Online Library, 2020. 1", + "[34] Michael Vollmer and Klaus-Peter Mollmann. Fundamentals of Infrared Thermal Imaging, chapter 1, pages 1-106. John Wiley & Sons, Ltd, 2017. 1, 2, 3", + "[35] Robert J Woodham. Photometric method for determining surface orientation from multiple images. Optical engineering, 19(1):139-144, 1980. 1", + "[36] Pengyu Zhang, Jie Zhao, Dong Wang, Huchuan Lu, and Xiang Ruan. Visible-thermal uav tracking: A large-scale benchmark and new baseline. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8886–8895, 2022. 2", + "[37] Xingchen Zhang, Ping Ye, and Gang Xiao. Vifb: A visible and infrared image fusion benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 104-105, 2020. 2" + ], + "bbox": [ + 78, + 90, + 468, + 571 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "11933", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/c36c78d9-fdc0-45ae-af17-5820282f52ff_model.json b/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/c36c78d9-fdc0-45ae-af17-5820282f52ff_model.json new file mode 100644 index 0000000000000000000000000000000000000000..dbc6de67fc3b0734b9bd833b0e6c1e07594b5f16 --- /dev/null +++ b/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/c36c78d9-fdc0-45ae-af17-5820282f52ff_model.json @@ -0,0 +1,2508 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.144, + 0.131, + 0.826, + 0.152 + ], + "angle": 0, + "content": "A Theory of Joint Light and Heat Transport for Lambertian Scenes" + }, + { + "type": "text", + "bbox": [ + 0.095, + 0.181, + 0.877, + 0.217 + ], + "angle": 0, + "content": "Mani Ramanagopal, Sriram Narayanan, Aswin C. Sankaranarayanan, and Srinivasa G. Narasimhan \nCarnegie Mellon University, Pittsburgh, PA 15213, USA" + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.22, + 0.719, + 0.234 + ], + "angle": 0, + "content": "{manikans, snochurn, saswin, srinivas}@andrew.cmu.edu" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.285 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.301, + 0.474, + 0.575 + ], + "angle": 0, + "content": "We present a novel theory that establishes the relationship between light transport in visible and thermal infrared, and heat transport in solids. We show that heat generated due to light absorption can be estimated by modeling heat transport using a thermal camera. For situations where heat conduction is negligible, we analytically solve the heat transport equation to derive a simple expression relating the change in thermal image intensity to the absorbed light intensity and heat capacity of the material. Next, we prove that intrinsic image decomposition for Lambertian scenes becomes a well-posed problem if one has access to the absorbed light. Our theory generalizes to arbitrary shapes and unstructured illumination. Our theory is based on applying energy conservation principle at each pixel independently. We validate our theory using real-world experiments on diffuse objects made of different materials that exhibit both direct and global components (inter-reflections) of light transport under unknown complex lighting." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.603, + 0.21, + 0.619 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.629, + 0.47, + 0.81 + ], + "angle": 0, + "content": "Printed on paper, this text appears black because the ink does not reflect much light. So what happens to the light striking the ink? It gets absorbed and converted into heat, thereby disappearing from the visible light transport system. Starting from the early works in 1970s [3, 20, 23, 27, 31, 35], decades of research[6, 18, 33] have attempted to separate surface reflectance and shading from images by modeling shapes[30], illuminations [8] and their interactions [13]. However, in the general case, decomposing light transport is fundamentally an ill-posed problem, thus requiring handcrafted [23] or learned priors[1, 2, 4, 9]. But what if we can somehow observe the light lost to absorption?" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Analogous to light transport, heat transport models the generation and flow of heat through a medium and its exchange with the surrounding [5, 34]. In the heat transport system, the heat generated due to light absorption is no different from any other type of heat generation. While heat itself cannot be seen, all objects radiate infrared light based" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.268, + 0.626, + 0.368 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.532, + 0.369, + 0.6, + 0.38 + ], + "angle": 0, + "content": "Visible Image" + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.268, + 0.755, + 0.367 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.635, + 0.368, + 0.759, + 0.378 + ], + "angle": 0, + "content": "Temperature rise at \\(t = 0.1s\\)" + }, + { + "type": "image", + "bbox": [ + 0.768, + 0.27, + 0.887, + 0.369 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.769, + 0.37, + 0.886, + 0.379 + ], + "angle": 0, + "content": "Temperature rise at \\(t = 3s\\)" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.381, + 0.625, + 0.479 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.522, + 0.481, + 0.611, + 0.492 + ], + "angle": 0, + "content": "Experiment Setup" + }, + { + "type": "image", + "bbox": [ + 0.635, + 0.381, + 0.755, + 0.479 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.675, + 0.481, + 0.712, + 0.491 + ], + "angle": 0, + "content": "Albedo" + }, + { + "type": "image", + "bbox": [ + 0.767, + 0.381, + 0.887, + 0.479 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.806, + 0.481, + 0.848, + 0.492 + ], + "angle": 0, + "content": "Shading" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.505, + 0.895, + 0.631 + ], + "angle": 0, + "content": "Figure 1. The visible image (brightened for visualization) captures the reflected light, which is the product of albedo and shading at that pixel. The absorbed light gets converted into heat and raises the temperature, which can be observed using a co-located thermal camera. The illumination is turned on at \\( t = 0 \\). The temperature rise at \\( t = 0.1s \\) and \\( t = 3s \\) are shown using turbo colormap. Our novel theory establishes the relationship between light and heat transport and provides an analytical solution to compute albedo and shading for complex shapes and unknown illumination." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.643, + 0.894, + 0.718 + ], + "angle": 0, + "content": "on their surface temperature, and that can be measured using a thermal camera [34]. By modeling heat transport, we make the first attempt to estimate the intensity of light absorbed by an object, thus establishing the connection between light and heat transport." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.902 + ], + "angle": 0, + "content": "We develop a novel theory that proves having access to absorbed light turns single view intrinsic image decomposition into a pixel-wise well-posed problem, even for arbitrary shape and illumination. Our key insight is that all the complexities of the reflected light transport are also present in the absorbed light, in the same functional form but simply scaled by the complement of the albedo. Consider the color chart seen in Fig. 1. The amount of irradiance due to the line light is approximately equal for the black and white patches. While the visible image records a low intensity for the black patch, the corresponding increase in intensity in the thermal images is high, and vice versa. Leveraging the" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "11924" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.166 + ], + "angle": 0, + "content": "principle of energy conservation, the sum of reflected light and absorbed light at each scene point must equal its irradiance, which is also called as shading. Similarly, we can compute the ratio of reflected light to irradiance, which is also called as surface reflectance or albedo." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.169, + 0.47, + 0.456 + ], + "angle": 0, + "content": "A key ingredient in our approach is the ability to estimate the intensity of heat generated due to light absorption. In the general case, estimating it requires solving the heat transport equation which does not have an analytical solution for unknown shapes [5, 34]. However, in the absence of heat conduction, we show that the analytical solution to the heat transport equation for a constant source is a transient response that follows a 2-parameter exponential curve. Therefore, the source intensity can be estimated with as little as three frames from a thermal video. In practice, conduction occurs in all real-world objects albeit to a smaller degree in insulators and regions with low temperature gradient. Therefore, we limit the influence of conduction by focusing on the transient response of each pixel immediately after turning on light. A key limitation of our approach is that we require the system to be at thermal equilibrium before the light is turned on and other sources of heat generation, if any, remain constant. This is required to ensure the rise in temperature is only due to the absorbed light." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.459, + 0.47, + 0.79 + ], + "angle": 0, + "content": "Prior works in computational thermal imaging have studied the thermal transient response of objects to heating. Dashpute et al. [11] heat planar objects using a laser and capture a 1 min long thermal video to estimate its thermal diffusivity and emissivity. Of most relevance to our work, Tanaka et al. [32] heat objects using infrared lamps and record a 10 mins long thermal video. They decompose these videos using curve fitting into ambient, specular, diffuse and global components, where the latter two are assumed to be exponential curves. But this decomposition is akin to direct-global separation which is different from intrinsic image decomposition. Also, they use the extracted diffuse component as input to a photometric stereo algorithm. Note that their estimated \"albedo\" corresponds to absorptivity in the infrared spectrum and their photometric stereo is limited to distant point light sources at known directions (separate video for each direction). In contrast, our theory establishes and exploits the causal relationship between light and heat transport. And we apply our theory to albedo-shading separation in the visible spectrum for arbitrary unknown illumination using a single 4 sec thermal video and a single visible image." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.793, + 0.469, + 0.884 + ], + "angle": 0, + "content": "Several works in vision [17, 36, 37] and robotics [22, 25] fuse spatial features from the visible and thermal images in order to improve robustness of downstream tasks, such as object detection [24], to lighting and weather conditions. However, these methods do not reason about the relationship between the two spectrums from a physics perspective." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.469, + 0.901 + ], + "angle": 0, + "content": "We validate our theory through real world experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.168 + ], + "angle": 0, + "content": "using a co-located setup of a visible and thermal camera. Our target objects, even though diffuse, are made of different materials, contain direct and global light transport (inter-reflections), low and high spatial frequency and unstructured illumination, all of which are unknown." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.182, + 0.791, + 0.2 + ], + "angle": 0, + "content": "2. Joint Light and Heat Transport" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.208, + 0.892, + 0.388 + ], + "angle": 0, + "content": "In this section, we briefly introduce the relationship between light and heat transport. While light energy is carried via photons, heat is thermal energy exchanged via molecular vibrations. Visible light (VIS, \\(0.4 - 0.7\\mu \\mathrm{m}\\)) transport can model the light scattered by the scene from a source towards the camera. The light absorbed by the scene gets converted to heat which is then exchanged via conduction, convection, retention (i.e. increase in temperature) and radiation, and is governed by the heat transport equation. Similar to VIS transport, Longwave Infrared light (LWIR, \\(8 - 14\\mu \\mathrm{m}\\)) transport can be used to model the radiation emitted by objects, based on their temperature, towards a thermal camera." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.389, + 0.892, + 0.554 + ], + "angle": 0, + "content": "Our first contribution is an algorithm, described in Sec. 3, for estimating the intensity of absorbed light using only a thermal video. This involves two steps: 1) inferring temperatures using LWIR light transport, and 2) inferring source intensity using heat transport equation. As all objects in the scene constantly exchange heat, it is hard to disambiguate heat generated by light absorption from other sources of heat at equilibrium. However, if we disturb the equilibrium by turning on the visible light at a known time, then the resulting rise in temperature allows us to estimate heat generated only due to our illumination." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.556, + 0.892, + 0.645 + ], + "angle": 0, + "content": "Our second contribution is a novel theory, described in Sec. 4, that decomposes VIS transport for arbitrary shapes and illumination. We derive simple analytical expressions for albedo and shading using a visible image and the absorbed light intensity estimated from a thermal video captured by a co-located thermal camera." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.66, + 0.833, + 0.678 + ], + "angle": 0, + "content": "3. Estimating Absorbed Light Intensity" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.686, + 0.892, + 0.791 + ], + "angle": 0, + "content": "Consider a scene initially at thermal equilibrium. At a time \\( t_1 \\), the illumination, which is constant with time, is turned on and a thermal video is captured. We assume the illumination is focused primarily at the target scene and therefore the temperature of the surrounding remains constant. Our objective is to estimate the spatially varying absorbed light (heat source) intensity using a single thermal video." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.801, + 0.857, + 0.818 + ], + "angle": 0, + "content": "3.1. Thermal Images to Temperature Changes" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.892, + 0.883 + ], + "angle": 0, + "content": "In LWIR light transport, all surfaces including the camera and the scene emit (and reflect) radiation. The pixel intensity in the \\( n^{th} \\) frame \\( I_{n}(\\mathbf{p}) \\) of a thermal video can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.603, + 0.886, + 0.892, + 0.902 + ], + "angle": 0, + "content": "\\[\nI _ {n} (\\mathbf {p}) = \\alpha U \\left(T _ {n} (\\mathbf {x})\\right) + U _ {s}, \\tag {1}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11925" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.468, + 0.151 + ], + "angle": 0, + "content": "where \\( T_{n}(\\mathbf{x}) \\) is the temperature at time \\( t_n, \\alpha \\) is the effective emissivity, \\( U_{s} \\) denotes the radiation from the surrounding, and \\( U(T) \\) is a non-linear function that approximates the integral of the Planck radiation law." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.152, + 0.468, + 0.182 + ], + "angle": 0, + "content": "For a small range around \\( T_* \\), \\( U(T) \\) can be linearly approximated as:" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.195, + 0.468, + 0.212 + ], + "angle": 0, + "content": "\\[\nU (T) = k _ {1} \\left(T - T _ {*}\\right) + k _ {2}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.225, + 0.468, + 0.27 + ], + "angle": 0, + "content": "where \\( k_{1} \\) and \\( k_{2} \\) are camera-specific constants that depend on \\( T_{*} \\). We refer the reader to Appendix A for more details. Combining Eq. (1) and Eq. (2), we get" + }, + { + "type": "equation", + "bbox": [ + 0.135, + 0.283, + 0.468, + 0.3 + ], + "angle": 0, + "content": "\\[\nI _ {n} (\\mathbf {p}) - I _ {m} (\\mathbf {p}) = k _ {1} \\alpha \\left(T _ {n} (\\mathbf {x}) - T _ {m} (\\mathbf {x})\\right). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.312, + 0.468, + 0.417 + ], + "angle": 0, + "content": "The above equation shows that change in pixel intensity is linearly related to change in scene temperature. Note that commonly used thermal cameras are uncooled microbolometers that exhibit thermal inertia [28, 29], where the measured intensities have a small delay with respect to changes in the scene. This effect is ignored for the purposes of this paper." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.428, + 0.465, + 0.444 + ], + "angle": 0, + "content": "3.2. Heat Transport Equation without Conduction" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.451, + 0.468, + 0.497 + ], + "angle": 0, + "content": "Consider an infinitesimal volume at a scene point with area \\(\\delta_A\\) and depth \\(\\delta_z\\). The heat transport equation at that point can be written as [5, 34]:" + }, + { + "type": "equation", + "bbox": [ + 0.122, + 0.506, + 0.468, + 0.556 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} C _ {v} \\delta_ {A} \\delta_ {z} \\frac {\\partial T}{\\partial t} = \\kappa \\delta_ {A} \\delta_ {z} \\Delta T + \\delta_ {A} h _ {c} (T _ {s} - T) + \\\\ \\delta_ {A} \\sigma \\epsilon \\left(T _ {s} ^ {4} - T ^ {4}\\right) + \\delta_ {A} S, \\tag {4} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.568, + 0.468, + 0.688 + ], + "angle": 0, + "content": "where \\( C_v \\) is the volumetric heat capacity, \\( T \\) is the temperature, \\( \\kappa \\) is the thermal conductivity, \\( \\Delta \\) denotes the laplacian operator at that point, \\( h_c \\) is the convection coefficient, \\( T_s \\) is the surrounding temperature, \\( \\sigma \\) is the Stefan-Boltzmann constant, \\( \\epsilon \\) is the surface emissivity, and \\( S \\) is the intensity of heat generated via light absorption. Note that all the terms are expressed in units of W. For an opaque Lambertian scene, all the light absorption happens near the surface." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.468, + 0.9 + ], + "angle": 0, + "content": "Note that the magnitude of heat conduction is proportional to the local temperature laplacian. Analytically solving Eq. (4) requires knowing the shape since the laplacian operator depends on the local curvature. Ignoring conduction, makes the heat equation pixel-wise independent and lends itself to an analytical solution independent of shape. Moreover, many real-world materials, such as paints, plastics, paper and wood, have low thermal conductivity. As the object is initially at equilibrium, local temperature laplacians start at zero and increase with time if and only if neighboring pixels have different material properties and/or receive different amounts of light. Therefore, we consider a short thermal video immediately after light is turned on when conduction can be ignored." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.092, + 0.869, + 0.107 + ], + "angle": 0, + "content": "Dividing by area and ignoring conduction, Eq. (4) is:" + }, + { + "type": "equation", + "bbox": [ + 0.543, + 0.115, + 0.89, + 0.145 + ], + "angle": 0, + "content": "\\[\nC _ {v} \\delta_ {z} \\frac {\\partial T}{\\partial t} = h _ {c} \\left(T _ {s} - T\\right) + \\sigma \\epsilon \\left(T _ {s} ^ {4} - T ^ {4}\\right) + S. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.153, + 0.89, + 0.199 + ], + "angle": 0, + "content": "Since temperature rise due to light absorption is typically small (\\(\\leq 15\\mathrm{K}\\) within 4 sec in our experiments), we linearize the radiation term around a nominal temperature \\(T_{*}\\) to get" + }, + { + "type": "equation", + "bbox": [ + 0.584, + 0.208, + 0.89, + 0.226 + ], + "angle": 0, + "content": "\\[\n\\sigma \\epsilon \\left(T _ {s} ^ {4} - T ^ {4}\\right) \\approx 4 \\sigma \\epsilon T _ {*} ^ {3} \\left(T _ {s} - T\\right), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.235, + 0.89, + 0.266 + ], + "angle": 0, + "content": "where the absolute error due to linearization is \\(\\leq 4\\%\\). This simplifies Eq. (5) to" + }, + { + "type": "equation", + "bbox": [ + 0.608, + 0.275, + 0.89, + 0.305 + ], + "angle": 0, + "content": "\\[\nH \\frac {\\partial T}{\\partial t} + P T = S + P T _ {s}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.313, + 0.782, + 0.33 + ], + "angle": 0, + "content": "where \\(H = C_v\\delta_z\\) and \\(P = (h_c + 4\\sigma \\epsilon T_*^3)\\)" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.338, + 0.684, + 0.354 + ], + "angle": 0, + "content": "3.3. Analytical Solution" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.361, + 0.89, + 0.391 + ], + "angle": 0, + "content": "Solving Eq. (7) at a single pixel (refer Appendix B for derivation), we get" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.401, + 0.89, + 0.434 + ], + "angle": 0, + "content": "\\[\nT _ {n} - T _ {1} = \\left(\\frac {S}{P} + T _ {s} - T _ {1}\\right) \\left(1 - e ^ {- \\frac {P}{H} \\left(t _ {n} - t _ {1}\\right)}\\right). \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.443, + 0.89, + 0.488 + ], + "angle": 0, + "content": "Since we assume the system is initially at thermal equilibrium, we can set \\( T_{s} = T_{1} \\). Now, substituting Eq. (3) into the above equation, we get" + }, + { + "type": "equation", + "bbox": [ + 0.573, + 0.497, + 0.89, + 0.527 + ], + "angle": 0, + "content": "\\[\nI _ {n} - I _ {1} = \\frac {S k _ {1} \\alpha}{P} \\left(1 - e ^ {- \\frac {P}{H} \\left(t _ {n} - t _ {1}\\right)}\\right) \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.535, + 0.89, + 0.581 + ], + "angle": 0, + "content": "Therefore, given a thermal video \\(\\{I_1,\\dots ,I_n\\}\\) and corresponding time stamps \\(\\{t_1,\\ldots ,t_n\\}\\), we use gradient descent for curve fitting at each pixel independently:" + }, + { + "type": "equation", + "bbox": [ + 0.6, + 0.59, + 0.89, + 0.611 + ], + "angle": 0, + "content": "\\[\nI _ {n} - I _ {1} = c _ {1} \\left(1 - e ^ {- \\frac {t _ {n} - t _ {1}}{c _ {2}}}\\right). \\tag {10}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.621, + 0.787, + 0.637 + ], + "angle": 0, + "content": "3.4. Recovering S from Curve Fitting" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.644, + 0.892, + 0.901 + ], + "angle": 0, + "content": "The results of curve fitting provide \\( c_{1} \\) and \\( c_{2} \\) at each pixel. From Eq. (9) and Eq. (10), note that \\( c_{1} = \\frac{Sk_{1}\\alpha}{P} \\). Recovering \\( S \\) from \\( c_{1} \\) would require knowledge of \\( k_{1}, \\alpha \\) and \\( P \\), where \\( P \\) depends on \\( h_c, \\epsilon \\) and \\( T_* \\). In theory, all these quantities could vary per-pixel. However, the spatial variation in \\( S \\), which depends on albedo in visible spectrum and illumination, is much greater than that of others. In this paper, we assume the quantity \\( \\beta = \\frac{k_1\\alpha}{P} \\) is common for all pixels such that \\( \\beta \\) is the constant of proportionality between \\( S \\) and \\( c_{1} \\). Note that Lambertian scenes typically correspond to rough surfaces which have high emissivity. Also, it is known that most paints have similarly high emissivity values of \\( > 0.9 \\) irrespective of their albedo in the visible spectrum [34]. As the object is initially at equilibrium, we can assume \\( T_* \\), and hence \\( k_{1} \\), is common for all pixels. In the absence of wind, we reasonably assume convection, if it exists at all, to be uniform throughout the scene." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11926" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.09, + 0.895, + 0.166 + ], + "angle": 0, + "content": "Table 1. Various lighting configurations typically modeled in shape-from-intensity problems. It is trivial to verify the equations for spatially varying albedo \\(\\rho (x)\\) and shading \\(\\eta (x)\\) remains the same irrespective of the complexity of shape or illumination when estimates of both image irradiance \\(I_{v}(x)\\) and absorbed light intensity \\(\\tilde{S} (x)\\) are available. Here, \\(\\gamma\\) is the camera gain, \\(\\beta\\) is the unknown scale factor in the estimation of \\(\\tilde{S} (x),\\zeta = \\frac{\\gamma}{\\beta}\\) is the relative scale factor, \\(E\\) is the source intensity, s is the light source direction, n is the surface normal, \\(\\omega\\) is light source direction for extended source, \\(\\eta\\) is the shading term and \\(\\eta^{*}\\) is the scene irradiance. Inter-reflections are modeled as spatially varying source intensities. All the above cases can be extended to model cast and attached shadows using a shadowing function \\(W(x)\\) without changing the expressions for \\(\\rho\\) and \\(\\eta\\)" + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.176, + 0.894, + 0.31 + ], + "angle": 0, + "content": "
IlluminationImage Irradiance Iv(x)Estimated Absorbed Light S̃(x)Albedo ρ(x)Shading η(x)
Far sourceγρ(x)/πE(s·n(x))β(1 - ρ(x))E(s·n(x))
Multiple sourcesγρ(x)/π∑lEl(s1·n(x))β(1 - ρ(x))∑lEl(s1·n(x))
Near sourcesγρ(x)/π∑lEl(s1(x)·n(x))β(1 - ρ(x))∑lEl(s1(x)·n(x))πIv(x)/πIv(x)+ζS̃(x)πIv(x)+ζS̃(x)
Extended Sourcesγρ(x)/π∫ωE(ω)(ω·n(x))β(1 - ρ(x))∫ωE(ω)(ω·n(x))
Inter-reflectionsγρ(x)/π∫ωE(x,ω)(ω·n(x))β(1 - ρ(x))∫ωE(x,ω)(ω·n(x))
General illuminationρ(x)/πη(x)β(1 - ρ(x))η(x)/γ
" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.332, + 0.336, + 0.35 + ], + "angle": 0, + "content": "4. Albedo-Shading Separation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.358, + 0.471, + 0.495 + ], + "angle": 0, + "content": "Consider an opaque Lambertian scene imaged by a camera from a fixed view. We assume that the camera is sensitive to all the wavelengths present in the light sources i.e. we primarily consider LEDs or CFL bulbs when using visible cameras. We first consider the case where the albedo and the camera response are independent of wavelength in Sec. 4.1 and then extend our theory to wavelength-dependent albedo functions in Sec. 4.2. The words image and camera correspond to visible spectrum in this section." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.502, + 0.427, + 0.518 + ], + "angle": 0, + "content": "4.1. Grayscale Albedo and Camera Response" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.525, + 0.47, + 0.571 + ], + "angle": 0, + "content": "The image intensity \\( I_{v} \\), which is proportional to the power received by the camera per unit area, at a pixel \\( \\mathbf{p}(\\mathbf{x}) \\) focused at a scene point \\( \\mathbf{x} \\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.115, + 0.578, + 0.47, + 0.607 + ], + "angle": 0, + "content": "\\[\nI _ {v} (\\mathbf {p} (\\mathbf {x})) = \\frac {\\rho (\\mathbf {x})}{\\pi} \\eta (\\mathbf {x}), \\text {s . t .} \\eta (\\mathbf {x}) \\equiv \\gamma \\eta^ {*} (\\mathbf {x}) \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.613, + 0.47, + 0.719 + ], + "angle": 0, + "content": "where \\(\\rho (\\mathbf{x})\\) and \\(\\eta (\\mathbf{x})\\) are the spatially varying albedo and shading, \\(\\gamma >0\\) is the camera gain representing the optics and sensor electronics in the camera, and \\(\\eta^{*}(\\mathbf{x})\\) is the true scene irradiance received by \\(\\mathbf{x}\\). Note that we do not restrict the lighting geometry in any way and the shading \\(\\eta (\\mathbf{x})\\) term is unstructured. In the rest of the paper, we use \\(\\mathbf{p}\\) in place of \\(\\mathbf{p}(\\mathbf{x})\\)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.719, + 0.47, + 0.825 + ], + "angle": 0, + "content": "The pixel value in an image describes the energy reflected towards the camera by a scene point. Since the surface is opaque, there is no transmission and the remaining energy gets absorbed and is converted into heat. Recall from Sec. 3 that \\( S(\\mathbf{x}) \\) denotes the power absorbed per unit area, i.e. intensity, by \\( \\mathbf{x} \\). Let \\( \\tilde{S}(\\mathbf{x}) \\) be proportional to it, and is given by:" + }, + { + "type": "equation", + "bbox": [ + 0.105, + 0.83, + 0.47, + 0.85 + ], + "angle": 0, + "content": "\\[\n\\tilde {S} (\\mathbf {x}) = \\beta S (\\mathbf {x}), \\text {s . t .} S (\\mathbf {x}) = (1 - \\rho (\\mathbf {x})) \\eta^ {*} (\\mathbf {x}). \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.902 + ], + "angle": 0, + "content": "During operation, light fixtures also generate some thermal energy which increases its temperature and thereby increasing its blackbody radiation. However, the magnitude of this" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.334, + 0.892, + 0.379 + ], + "angle": 0, + "content": "additional heat generated at \\(\\mathbf{x}\\) is negligible and hence ignored in this paper. Next, we can express \\(\\tilde{S}\\) using shading as" + }, + { + "type": "equation", + "bbox": [ + 0.527, + 0.384, + 0.892, + 0.419 + ], + "angle": 0, + "content": "\\[\n\\tilde {S} (\\mathbf {x}) = \\beta (1 - \\rho (\\mathbf {x})) \\frac {\\eta (\\mathbf {x})}{\\gamma} = \\frac {(1 - \\rho (\\mathbf {x})) \\eta (\\mathbf {x})}{\\zeta}, \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.424, + 0.766, + 0.441 + ], + "angle": 0, + "content": "where \\(\\zeta = \\frac{\\gamma}{\\beta}\\) is the relative scale factor." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.44, + 0.894, + 0.501 + ], + "angle": 0, + "content": "In the trivial case where \\(\\mathbf{x}\\) receives no light (neither direct nor global illumination), the shading term \\(\\eta (\\mathbf{x}) = 0\\) and the albedo cannot be estimated. Whenever \\(\\eta (\\mathbf{x}) > 0\\), we can re-write Eqs. (11) and (13) as" + }, + { + "type": "equation", + "bbox": [ + 0.611, + 0.507, + 0.892, + 0.539 + ], + "angle": 0, + "content": "\\[\n\\pi I _ {v} (\\mathbf {p}) \\frac {1}{\\eta (\\mathbf {x})} - \\rho (\\mathbf {x}) = 0 \\tag {14}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.618, + 0.541, + 0.892, + 0.573 + ], + "angle": 0, + "content": "\\[\n\\zeta \\tilde {S} (\\mathbf {x}) \\frac {1}{\\eta (\\mathbf {x})} + \\rho (\\mathbf {x}) = 1 \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.58, + 0.813, + 0.595 + ], + "angle": 0, + "content": "Solving the above system of equations, we get:" + }, + { + "type": "equation", + "bbox": [ + 0.607, + 0.602, + 0.892, + 0.621 + ], + "angle": 0, + "content": "\\[\n\\eta (\\mathbf {x}) = \\pi I _ {v} (\\mathbf {p}) + \\zeta \\tilde {S} (\\mathbf {x}) \\tag {16}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.608, + 0.623, + 0.892, + 0.658 + ], + "angle": 0, + "content": "\\[\n\\rho (\\mathbf {x}) = \\frac {\\pi I _ {v} (\\mathbf {p})}{\\pi I _ {v} (\\mathbf {p}) + \\zeta \\tilde {S} (\\mathbf {x})}. \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.666, + 0.892, + 0.774 + ], + "angle": 0, + "content": "If \\( I_{v}(\\mathbf{p}) \\), \\( \\tilde{S}(\\mathbf{x}) \\) and \\( \\zeta \\) are known, the above equations provide a direct method to compute spatially varying albedo and shading components for complex shapes and arbitrary illumination. To emphasize its applicability further, Table 1 lists several types of lighting conditions typically modeled in shape-from-intensity problems and demonstrates that Eqs. (16), (17) hold in all cases." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.781, + 0.809, + 0.795 + ], + "angle": 0, + "content": "4.2. Towards General Albedo Functions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.804, + 0.892, + 0.865 + ], + "angle": 0, + "content": "Let the camera have \\(K\\) channels with known spectral responses \\(\\Gamma_{k}(\\lambda)\\). Recall that each wavelength present in the light sources must fall within at least one of the channels. The image irradiance at \\(\\mathbf{p}\\) in channel \\(k\\) can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.872, + 0.892, + 0.906 + ], + "angle": 0, + "content": "\\[\nI _ {v} ^ {k} (\\mathbf {p}) = \\gamma \\int_ {\\lambda} \\int_ {\\Omega} \\frac {\\rho (\\mathbf {x} , \\lambda)}{\\pi} \\Gamma_ {k} (\\lambda) L (\\mathbf {x}, \\lambda , \\omega) d \\omega d \\lambda , \\tag {18}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11927" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "where \\(\\rho (\\mathbf{x},\\lambda)\\) is the diffuse albedo as a function of wavelength, \\(L(\\mathbf{x},\\lambda ,\\omega)\\) is the spectral radiance at \\(\\mathbf{x}\\), and \\(\\omega\\) denotes the direction along the outer hemisphere. The corresponding estimate of absorbed power per unit area is:" + }, + { + "type": "equation", + "bbox": [ + 0.104, + 0.159, + 0.47, + 0.192 + ], + "angle": 0, + "content": "\\[\n\\tilde {S} (\\mathbf {x}) = \\beta \\int_ {\\lambda} \\int_ {\\Omega} (1 - \\rho (\\mathbf {x}, \\lambda)) L (\\mathbf {x}, \\lambda , \\omega) d \\omega d \\lambda , \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.197, + 0.469, + 0.303 + ], + "angle": 0, + "content": "Shading at a point \\( \\mathbf{x} \\) is influenced by the emission spectrum of the light sources, the relative geometry between \\( \\mathbf{x} \\) and the light sources, and the albedo of other points in the scene due to inter-reflections. While this general case remains an open problem, in the rest of this section we ignore inter-reflections and assume all light sources have a common emission spectrum \\( l(\\lambda) \\) i.e." + }, + { + "type": "equation", + "bbox": [ + 0.169, + 0.31, + 0.469, + 0.342 + ], + "angle": 0, + "content": "\\[\n\\int_ {\\Omega} L (\\mathbf {x}, \\lambda , \\omega) d \\omega = \\eta^ {*} (\\mathbf {x}) l (\\lambda). \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.346, + 0.47, + 0.406 + ], + "angle": 0, + "content": "Note that, the illumination is still arbitrary in terms of their locations, sizes and angular radiant intensity functions. Substituting Eq. (20) into Eq. (18) and Eq. (19), we can write" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.411, + 0.469, + 0.479 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} I _ {v} ^ {k} (\\mathbf {p}) = \\int_ {\\lambda} \\frac {\\rho (\\mathbf {x} , \\lambda)}{\\pi} \\Gamma_ {k} (\\lambda) \\eta (\\mathbf {x}) l (\\lambda) d \\lambda , (21) \\\\ \\tilde {S} (\\mathbf {x}) = \\frac {\\int_ {\\lambda} \\eta (\\mathbf {x}) l (\\lambda) d \\lambda - \\int_ {\\lambda} \\rho (\\mathbf {x} , \\lambda) \\eta (\\mathbf {x}) l (\\lambda) d \\lambda}{\\zeta}. (22) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.485, + 0.469, + 0.606 + ], + "angle": 0, + "content": "As a continuous-valued function of wavelength, the diffuse albedo \\(\\rho (\\mathbf{x},\\lambda)\\) is infinite-dimensional, which requires further assumptions to enable tractable computations. We rely on a body of work [10, 14-16] that shows that reflectance spectra lie close to a low-dimensional subspace. Denoting the basis for this subspace as \\(\\Phi_{\\rho}(\\lambda) = \\{\\tilde{\\rho}_1(\\lambda),\\dots ,\\tilde{\\rho}_M(\\lambda)\\}\\), we can express the diffuse albedo as [21]:" + }, + { + "type": "equation", + "bbox": [ + 0.141, + 0.612, + 0.469, + 0.654 + ], + "angle": 0, + "content": "\\[\n\\rho (\\mathbf {x}, \\lambda) = \\sum_ {m = 1} ^ {M} \\tilde {\\rho} _ {m} (\\lambda) a _ {\\mathbf {x}, m} = \\Phi_ {\\rho} (\\lambda) \\mathbf {a} _ {\\mathbf {x}} \\tag {23}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.661, + 0.468, + 0.692 + ], + "angle": 0, + "content": "where \\(\\mathbf{a}_{\\mathbf{x}}\\in \\mathbb{R}^{M}\\) are the unknown coefficients of interest. This simplifies Eq. (21) and Eq. (22) into" + }, + { + "type": "equation", + "bbox": [ + 0.179, + 0.697, + 0.469, + 0.725 + ], + "angle": 0, + "content": "\\[\nI _ {v} ^ {k} (\\mathbf {p}) = \\eta (\\mathbf {x}) \\mathbf {E} _ {k} ^ {T} \\frac {\\mathbf {a} _ {\\mathbf {x}}}{\\pi}, \\tag {24}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.726, + 0.469, + 0.759 + ], + "angle": 0, + "content": "\\[\n\\tilde {S} (\\mathbf {x}) = \\frac {\\eta (\\mathbf {x}) \\left(L - \\mathbf {F} ^ {T} \\mathbf {a} _ {\\mathbf {x}}\\right)}{\\zeta}, \\tag {25}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.766, + 0.468, + 0.781 + ], + "angle": 0, + "content": "where \\(\\mathbf{E}_k\\), \\(\\mathbf{F}\\) and \\(L\\) can be computed a priori as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.798, + 0.469, + 0.899 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {E} _ {\\mathbf {k}} [ i ] = \\int_ {\\lambda} l (\\lambda) \\Gamma_ {k} (\\lambda) \\tilde {\\rho} _ {i} (\\lambda) d \\lambda , (26) \\\\ \\mathbf {F} [ i ] = \\int_ {\\lambda} l (\\lambda) \\tilde {\\rho} _ {i} (\\lambda) d \\lambda , (27) \\\\ L = \\int_ {\\lambda} l (\\lambda) d \\lambda . (28) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.892, + 0.107 + ], + "angle": 0, + "content": "Whenever \\(\\eta (\\mathbf{x}) > 0\\), Eqs. (24) and (25) can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.594, + 0.133, + 0.891, + 0.172 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\pi I _ {v} ^ {k} (\\mathbf {p}) \\xi (\\mathbf {x}) - \\mathbf {E} _ {k} ^ {T} \\mathbf {a} _ {\\mathbf {x}} = 0, \\forall k (29) \\\\ \\zeta \\tilde {S} (\\mathbf {x}) \\xi (\\mathbf {x}) + \\mathbf {F} ^ {T} \\mathbf {a} _ {\\mathbf {x}} = L, (30) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.185, + 0.892, + 0.395 + ], + "angle": 0, + "content": "where \\(\\xi (\\mathbf{x}) = 1 / \\eta (\\mathbf{x})\\). Note that we have a system of \\(K + 1\\) linear equations with \\(M + 1\\) unknowns, namely \\(\\mathbf{a}_{\\mathbf{x}}\\in \\mathbb{R}^{M}\\) and \\(\\xi (\\mathbf{x})\\). Therefore, whenever \\(K\\geq M\\), the system of equations can be solved to obtain albedo and shading (reciprocal of \\(\\xi (\\mathbf{x})\\)) at each pixel independently for complex shapes and illumination. Specifically, we use non-negative least squares solver for this problem. For most vision applications, which use a 3-channel RGB camera, we choose a corresponding basis set \\(\\Phi_{\\rho}\\) with \\(M = 3\\). Our theory could be used with multispectral cameras with more channels when higher fidelity in albedo is desired. While the above derivation relies on Eq. (20), it is still practically useful in many real-world scenes where inter-reflections exist as we will show in Sec. 5." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.397, + 0.892, + 0.472 + ], + "angle": 0, + "content": "In the special case of a monochrome camera capturing a scene where albedo depends on wavelength, the shading at each pixel can be expressed as a weighted sum of \\( I \\) and \\( S \\) irrespective of the emission spectrum of the light source. We refer the reader to Appendix C for more details." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.487, + 0.706, + 0.504 + ], + "angle": 0, + "content": "5. Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.513, + 0.892, + 0.604 + ], + "angle": 0, + "content": "To validate our theory, we perform experiments on several complex scenes with challenging illumination. Our scenes are mostly diffuse, but contain noticeable non-Lambertian features that test the practical utility of our theory to real-world objects. Our emphasis is on estimating the absorbed light intensity and performing albedo-shading separation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.624, + 0.892, + 0.836 + ], + "angle": 0, + "content": "Hardware Details: Our imaging system consists of an IDS UI-3130CP color camera with \\(600 \\times 800\\) resolution fitted with an 8mm Tamron lens, a FLIR Boson thermal camera having \\(\\leq 50\\mathrm{mK}\\) NETD with \\(512 \\times 640\\) resolution fitted with an \\(18\\mathrm{mm}\\) (\\(24^{\\circ}\\) HFOV) integrated lens and a BSP-DI-25-2 gold dichroic beamsplitter from ISP Optics. The cameras are coarsely aligned using an optic stage and a homography is used for fine alignment. We use LED lights from Advanced Illumination, namely a high intensity line light (LL167G96-WHI), a large spot light (SL-S100150M-WHI) and two small spot lights (SL-S050075M-WHI). The relative emission spectrum of the lighting and the spectral response of the color filter array in the visible camera were obtained from their technical datasheets, see Fig. 2." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Data Capture and Preprocessing: The visible camera was radiometrically calibrated [12]. To capture the full dynamic range of the illumination, we acquired a stack of 15" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11928" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.089, + 0.268, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.125, + 0.195, + 0.23, + 0.207 + ], + "angle": 0, + "content": "(a) Imaging System" + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.089, + 0.462, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.308, + 0.195, + 0.433, + 0.207 + ], + "angle": 0, + "content": "(b) Experimental Setup" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.213, + 0.46, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.169, + 0.344, + 0.378, + 0.356 + ], + "angle": 0, + "content": "(c) Spectral properties of light and CFA" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.368, + 0.47, + 0.48 + ], + "angle": 0, + "content": "Figure 2. Our imaging system consists of a visible camera and a thermal camera colocated using a gold dichroic beamsplitter. The light sources are placed close to the target scene so that the rise in temperature due to light absorption is detectable in the thermal camera. All the light sources have the same emission spectrum. The relative emission spectrum of the white LED and the quantum efficiency curves of the Color Filter Array are obtained from the corresponding datasheets." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.489, + 0.47, + 0.609 + ], + "angle": 0, + "content": "images in BayerRG format with a geometric progression of exposures that span \\(0.05\\mathrm{ms}\\) to over \\(180\\mathrm{ms}\\). All the images were demosaiced using gradient-corrected linear interpolation [26] and subsequently quantized to 8-bit images. The resulting LDR images were composited into a single linear HDR image using the previously estimated camera response function. Finally, this image is warped into the perspective of the thermal camera using a homography." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.61, + 0.47, + 0.775 + ], + "angle": 0, + "content": "The thermal camera is allowed to reach steady state operating temperature after powering on, which can take up to 30mins. The entire experimental setup including the target object is allowed to reach thermal equilibrium before beginning data collection. A flat field correction is performed a few seconds prior to turning on the light and a thermal video is recorded. The thermal camera is operated in the high gain state and the raw 16-bit data is captured at \\(60\\mathrm{Hz}\\). Both the thermal images and the warped HDR image were downsized \\(4\\times\\) using local mean computation to suppress noise and alleviate errors in co-location." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Implementation Details: We manually identify the first frame when light was turned on and use the pixel-wise median of the preceding frames as the initial frame \\( I_{1} \\). We use 200 frames since light was turned on for fitting the 2-parameter curve. We implement the curve fitting using gradient descent in PyTorch. We consider a 3 dimensional basis set for albedo with \\( \\tilde{\\rho}_b(\\lambda) = \\mathbb{I}[400\\mathrm{nm}\\leq \\lambda < 530\\mathrm{nm}] \\)," + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.093, + 0.761, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.764, + 0.094, + 0.887, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.297, + 0.892, + 0.422 + ], + "angle": 0, + "content": "Figure 3. The first column shows change in intensity at \\(0.1s\\) and at \\(3s\\) after turning on the lights. Note that we process each pixel independently. Two points of significance are marked in blue and red respectively. The middle column shows the curve fitting results for the highlighted points. The input intensities are shown as dots and the estimated intensities are shown as a continuous curves. The dashed lines correspond to the steady-state intensity that would be reached and equals \\(c_{1}\\) from Eq. (10). The last column shows \\(c_{1}\\) for each pixel as a 2D image and a histogram of \\(c_{2}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.429, + 0.892, + 0.533 + ], + "angle": 0, + "content": "\\(\\tilde{\\rho}_g(\\lambda) = \\mathbb{I}[530\\mathrm{nm}\\leq \\lambda < 620\\mathrm{nm}]\\), \\(\\tilde{\\rho}_r(\\lambda) = \\mathbb{I}[620\\mathrm{nm}\\leq \\lambda < 1100\\mathrm{nm}]\\), where \\(\\mathbb{I}[\\cdot ]\\) is the indicator function. Please refer to Appendix D for details of \\(\\mathbf{E}_k\\), \\(\\mathbf{F}\\) and \\(L\\). The relative factor \\(\\zeta\\) can be calibrated for a co-located imaging system with a color chart under controlled lighting. Alternatively, \\(\\zeta\\) can be treated as a hyper-parameter and tuned using cross-validation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.544, + 0.774, + 0.558 + ], + "angle": 0, + "content": "5.1. Heat Source Estimation results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.567, + 0.892, + 0.689 + ], + "angle": 0, + "content": "Figure 3 shows the result of our curve fitting for the wooden blocks scene. The estimated constants \\( c_{2} \\), which is proportional to heat capacity, appear like white noise and its corresponding histogram plot resembles a Gaussian distribution. This could be due to high levels of noise in thermal videos as well as similar magnitudes of spatial variation in \\( P \\) and \\( H \\). On the other hand, the estimated per-pixel constants \\( c_{1} \\) have visual similarity to a shading image, although noisy." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.697, + 0.803, + 0.713 + ], + "angle": 0, + "content": "5.2. Albedo-Shading separation results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Quantitative Evaluation: For comparison, we chose two methods: (i) the classical, even if dated, Retinex algorithm [23], which is well-suited for the color chart scene. (ii) Ordinal Shading [7], a SOTA learning-based approach which requires a large training dataset. We use the pretrained model here. We use the scale-invariant Mean Squared Error (si-MSE) from [19] as our metric. It is hard to obtain ground truth albedo and shading for general scenes under unknown lighting. And publicly available datasets do not have co-located thermal videos. Therefore, we first evaluate albedo using the color chart under 4 different illuminations. As shown in Fig. 4, our albedo estimates are" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11929" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.09, + 0.468, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.183, + 0.468, + 0.21 + ], + "angle": 0, + "content": "Figure 4. The first column is the mean value across colors (Ours: 0.020, Retinex: 0.034, Ordinal Shading: 0.080)." + }, + { + "type": "image", + "bbox": [ + 0.09, + 0.218, + 0.46, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.37, + 0.468, + 0.41 + ], + "angle": 0, + "content": "Figure 5. Our method operates per-pixel while other methods use hand-crafted or learnt spatial priors. Note the residual albedo in their estimated shading (images brightened for visualization)." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.418, + 0.462, + 0.468 + ], + "angle": 0, + "content": "
OursRGB-RetinexOrdinal Shading
Albedo0.0840.2530.399
Shading0.00050.00300.0080
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.478, + 0.468, + 0.507 + ], + "angle": 0, + "content": "Table 2. si-MSE values for Albedo and Shading using pseudo ground truth data obtained for the painted mask scene." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.514, + 0.468, + 0.6 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.612, + 0.468, + 0.655 + ], + "angle": 0, + "content": "Figure 6. (a) Curve fitting results of the same pixel for different video lengths. (b) Albedo error (against color chart ground truth) vs. length of input video." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.665, + 0.36, + 0.679 + ], + "angle": 0, + "content": "significantly better than the other methods." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.681, + 0.468, + 0.756 + ], + "angle": 0, + "content": "Next, we obtain pseudo-ground truth similar to [19] i.e. with a scene painted white (ground truth shading) and re-painted with texture. Fig. 5 and Tab. 2 illustrate that our method outperforms SOTA methods both qualitatively and quantitatively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Ablation on length of video: Longer duration leads to higher spatial thermal gradients that induce more conduction while shorter duration has lower signal to noise ratio. As seen in Fig. 6a, the curve deviates further away from the initial measurements when using longer videos. The accuracy of the fit correlates with the accuracy of albedo estimate for the color chart (see Fig. 6b). Our experiments use 200 frames which corresponds to the green plots." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.212 + ], + "angle": 0, + "content": "Qualitative evaluation: Figure 7 summarizes the albedo shading separation results for the four target scenes. As shown in the first two rows, we are given a HDR image from the visible camera and the corresponding absorbed light intensity is estimated from a thermal video using curve fitting as discussed earlier. And the last two rows show results that validate Eqs. (30) which are derived for general functions of albedo and camera response with wavelength." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.213, + 0.892, + 0.407 + ], + "angle": 0, + "content": "In the first scene, the interior of a mask is painted with white and black acrylic paints and the line light is directed at the portion of the image painted white. As highlighted in the callout, the concave portion corresponding to nose appears flat in the estimated albedo image for both the monochrome and RGB cases. Note that the temperature of the background wall does not raise sufficiently in all of the scenes, which makes it challenging for our approach. The thick wall would also have a high heat capacity which exacerbates the challenge. In the second scene, a cardboard sheet with printing on one side is folded to resemble the shape of W. The inner V groove would have inter-reflections while the outer faces are convex." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.409, + 0.892, + 0.59 + ], + "angle": 0, + "content": "In the third scene, a collection of solid colored wooden blocks are stacked into a complex geometry with both cast and attached shadows. This result indirectly shows that ignoring heat conduction for solid objects still allows one to recover the absorbed light intensity precisely. In the final scene, we use a stack of disks made of soft plastic. Different patterns are embossed onto the circumference of the disk. As highlighted in the callout, the shape information corresponding to the embossing is correctly separated into the shading term while the albedo term appears flat. These results demonstrate the broad applicability of our theory to everyday scenes with complex shapes and illumination." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.607, + 0.892, + 0.729 + ], + "angle": 0, + "content": "Grayscale approximation: Fig. 8 shows the estimated albedo and shading using grayscale approximation (Eqs. (16) and (17)). Recall that the grayscale approximation does not require knowledge of the emission spectrum of the light sources and the estimated shading is similar to that using Eqs (30). The monochrome image is approximated by taking the mean value across color channels. Corresponding results for all the scenes are provided in Appendix E." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.74, + 0.618, + 0.755 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.901 + ], + "angle": 0, + "content": "This paper studies the theoretical connection between light transport in visible spectrum, heat transport in solids and light transport in the thermal infrared spectrum. We proved that having an estimate of absorbed light turns single image intrinsic image decomposition into a well-posed problem for arbitrary shape and illumination for lambertian scenes. To estimate absorbed light, we derive an analytical expression for surfaces with negligible heat conduction by modeling heat transport immediately after turning on illumination." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11930" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.086, + 0.212, + 0.104, + 0.55 + ], + "angle": 270, + "content": "RGB Intrinsic Image Decomposition Input" + }, + { + "type": "image_caption", + "bbox": [ + 0.188, + 0.098, + 0.274, + 0.109 + ], + "angle": 0, + "content": "Mask Interior" + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.111, + 0.321, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.378, + 0.097, + 0.464, + 0.109 + ], + "angle": 0, + "content": "W-Cardboard" + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.111, + 0.511, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.562, + 0.097, + 0.661, + 0.109 + ], + "angle": 0, + "content": "Wooden Blocks" + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.111, + 0.702, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.771, + 0.097, + 0.831, + 0.111 + ], + "angle": 0, + "content": "Soft Toys" + }, + { + "type": "image", + "bbox": [ + 0.712, + 0.112, + 0.894, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.142, + 0.229, + 0.321, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.229, + 0.511, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.229, + 0.702, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.712, + 0.229, + 0.893, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.345, + 0.321, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.344, + 0.511, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.344, + 0.702, + 0.456 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.712, + 0.345, + 0.893, + 0.456 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.465, + 0.321, + 0.574 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.465, + 0.511, + 0.574 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.465, + 0.702, + 0.574 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.712, + 0.465, + 0.893, + 0.574 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.59, + 0.893, + 0.649 + ], + "angle": 0, + "content": "Figure 7. The first row shows the HDR visible image (brightened for visualization). Note that the colorchart is not an input to our method. The second row shows the estimated heat source intensity (turbo colormap) obtained using the method in Sec. 3. The last two rows correspond to solving Eqs. (30) using non-negative least squares method. The estimated albedo is clipped to the range [0, 1]. The callouts for the visible image, heat source intensity, and shading are normalized individually to aid visualization." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.652, + 0.47, + 0.784 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.793, + 0.47, + 0.822 + ], + "angle": 0, + "content": "Figure 8. Albedo-Shading result for the soft toys scene using the grayscale approximation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Experiments showed that albedo and shading can be measured from a single view given a visible image and a short thermal video from a co-located imaging system." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.655, + 0.895, + 0.792 + ], + "angle": 0, + "content": "Just like we have shown an example of how modeling heat transport can help solve challenges in visible light transport, we believe research in visible light transport can help Infrared Thermography by improving accuracy of temperature measurement or observing heat transfer within inhomogeneous surfaces. Extending our theory to the full light transport, including general BRDFs, translucent materials and subsurface scattering are just a few of the exciting new directions that this research opens up." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.813, + 0.669, + 0.83 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.903 + ], + "angle": 0, + "content": "This work was partly supported by NSF grants IIS-2107236, CCF-1730147, and NSF-NIFA AI Institute for Resilient Agriculture. The authors would like to thank Mark Sheinin for helpful discussions." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "11931" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.169 + ], + "angle": 0, + "content": "[1] Jonathan T Barron and Jitendra Malik. Shape, illumination, and reflectance from shading. IEEE transactions on pattern analysis and machine intelligence, 37(8):1670-1687, 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.172, + 0.47, + 0.24 + ], + "angle": 0, + "content": "[2] Anil S Baslamisli, Hoang-An Le, and Theo Gevers. Cnn based learning using reflection and retina models for intrinsic image decomposition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6674-6683, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.242, + 0.47, + 0.282 + ], + "angle": 0, + "content": "[3] Peter N Belhumeur, David J Kriegman, and Alan L Yuille. The bas-relief ambiguity. International journal of computer vision, 35(1):33-44, 1999. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.284, + 0.469, + 0.324 + ], + "angle": 0, + "content": "[4] Sean Bell, Kavita Bala, and Noah Snavely. Intrinsic images in the wild. ACM Transactions on Graphics (TOG), 33(4): 1-12, 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.327, + 0.469, + 0.353 + ], + "angle": 0, + "content": "[5] Theodore L. Bergman. Introduction to Heat Transfer. Wiley, 2011. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.355, + 0.469, + 0.409 + ], + "angle": 0, + "content": "[6] Nicolas Bonneel, Balazs Kovacs, Sylvain Paris, and Kavita Bala. Intrinsic decompositions for image editing. In Computer Graphics Forum, pages 593-609. Wiley Online Library, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.411, + 0.469, + 0.451 + ], + "angle": 0, + "content": "[7] Chris Careaga and Yaqiz Aksoy. Intrinsic image decomposition via ordinal shading. ACM Transactions on Graphics, 43 (1):1-24, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.453, + 0.469, + 0.507 + ], + "angle": 0, + "content": "[8] Robert Carroll, Ravi Ramamoorthi, and Maneesh Agrawala. Illumination decomposition for material recoloring with consistent interreflections. ACM Trans. Graph., 30(4):43, 2011. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.509, + 0.469, + 0.564 + ], + "angle": 0, + "content": "[9] Jason Chang, Randi Cabezas, and John W Fisher III. Bayesian nonparametric intrinsic image decomposition. In European conference on computer vision, pages 704-719. Springer, 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.566, + 0.469, + 0.62 + ], + "angle": 0, + "content": "[10] Hamilton Y Chong, Steven J Gortler, and Todd Zickler. The von kries hypothesis and a basis for color constancy. In 2007 IEEE 11th International Conference on Computer Vision, pages 1-8. IEEE, 2007. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.622, + 0.469, + 0.704 + ], + "angle": 0, + "content": "[11] Aniket Dashpute, Vishwanath Saragadam, Emma Alexander, Florian Willomitzer, Aggelos Katsaggelos, Ashok Veeraraghavan, and Oliver Cossairt. Thermal spread functions (tsf): Physics-guided material classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1641-1650, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.706, + 0.469, + 0.774 + ], + "angle": 0, + "content": "[12] Paul E. Debevec and Jitendra Malik. Recovering high dynamic range radiance maps from photographs. In Proceedings of the 24th Annual Conference on Computer Graphics and Interactive Techniques, page 369-378, USA, 1997. ACM Press/Addison-Wesley Publishing Co. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.469, + 0.844 + ], + "angle": 0, + "content": "[13] Sylvain Duchéne, Clement Riant, Gaurav Chaurasia, Jorge Lopez-Moreno, Pierre-Yves Laffont, Stefan Popov, Adrien Bousseau, and George Drettakis. Multi-view intrinsic images of outdoors scenes with an application to relighting. ACM Transactions on Graphics, page 16, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[14] Graham D Finlayson, Mark S Drew, and Brian V Funt. Color constancy: enhancing von kries adaption via sensor transformations. In Human Vision, Visual Processing, and Digital Display IV, pages 473-484, 1993. 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.146 + ], + "angle": 0, + "content": "[15] Graham D Finlayson, Mark S Drew, and Brian V Funt. Diagonal transforms suffice for color constancy. In IEEE International Conference on Computer Vision, pages 164-171, 1993." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[16] Graham Fyffe, Xueming Yu, and Paul Debevec. Single-shot photometric stereo by spectral multiplexing. In IEEE International Conference on Computational Photography (ICCP), 2011. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.206, + 0.892, + 0.246 + ], + "angle": 0, + "content": "[17] Rikke Gade and Thomas B Moeslund. Thermal cameras and applications: a survey. Machine vision and applications, 25: 245-262, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.892, + 0.304 + ], + "angle": 0, + "content": "[18] Elena Garces, Carlos Rodriguez-Pardo, Dan Casas, and Jorge Lopez-Moreno. A survey on intrinsic images: Delving deep into lambert and beyond. International Journal of Computer Vision, 130(3):836-868, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.305, + 0.892, + 0.374 + ], + "angle": 0, + "content": "[19] Roger Grosse, Micah K Johnson, Edward H Adelson, and William T Freeman. Ground truth dataset and baseline evaluations for intrinsic image algorithms. In 2009 IEEE 12th International Conference on Computer Vision, pages 2335-2342. IEEE, 2009. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.376, + 0.892, + 0.404 + ], + "angle": 0, + "content": "[20] Berthold KP Horn and Michael J Brooks. Shape from shading. MIT press, 1989. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.406, + 0.892, + 0.473 + ], + "angle": 0, + "content": "[21] Zhuo Hui, Kalyan Sunkavalli, Sunil Hadap, and Aswin C Sankaranarayanan. Illuminant spectra-based source separation using flash photography. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6209-6218, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.476, + 0.892, + 0.545 + ], + "angle": 0, + "content": "[22] Soonmin Hwang, Jaesik Park, Namil Kim, Yukyung Choi, and In So Kweon. Multispectral pedestrian detection: Benchmark dataset and baseline. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1037-1045, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.547, + 0.892, + 0.574 + ], + "angle": 0, + "content": "[23] Edwin H Land and John J McCann. Lightness and retinae theory. Josa, 61(1):1-11, 1971. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.576, + 0.892, + 0.617 + ], + "angle": 0, + "content": "[24] Qiao Liu, Zhenyu He, Xin Li, and Yuan Zheng. Ptb-tir: A thermal infrared pedestrian tracking benchmark. IEEE Transactions on Multimedia, 22(3):666-675, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.619, + 0.892, + 0.659 + ], + "angle": 0, + "content": "[25] Yawen Lu and Guoyu Lu. Superthermal: Matching thermal as visible through thermal feature exploration. IEEE Robotics and Automation Letters, 6(2):2690-2697, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.661, + 0.892, + 0.73 + ], + "angle": 0, + "content": "[26] Henrique S Malvar, Li-wei He, and Ross Cutler. High-quality linear interpolation for demosaicing of bayer-patterned color images. In 2004 IEEE International Conference on Acoustics, Speech, and Signal Processing, pages iii-485. IEEE, 2004. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.732, + 0.892, + 0.772 + ], + "angle": 0, + "content": "[27] Shree K Nayar, Katsushi Ikeuchi, and Takeo Kanade. Shape from interreflections. International Journal of Computer Vision, 6:173-195, 1991. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.774, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[28] Manikandasriram Srinivasan Ramanagopal, Zixu Zhang, Ram Vasudevan, and Matthew Johnson-Roberson. Pixelwise motion deblurring of thermal videos. arXiv preprint arXiv:2006.04973, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.832, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[29] Vishwanath Saragadam, Akshit Dave, Ashok Veeraraghavan, and Richard G. Baraniuk. Thermal image processing via physics-inspired deep networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops, pages 4057-4065, 2021. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "11932" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[30] Steven M Seitz, Yasuyuki Matsushita, and Kiriakos N Kutulakos. A theory of inverse light transport. In Tenth IEEE International Conference on Computer Vision (ICCV'05) Volume 1, pages 1440-1447. IEEE, 2005. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.468, + 0.188 + ], + "angle": 0, + "content": "[31] Steven A Shafer. Using color to separate reflection components. Color Research & Application, 10(4):210-218, 1985. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.192, + 0.468, + 0.274 + ], + "angle": 0, + "content": "[32] Kenichiro Tanaka, Nobuhiro Ikeya, Tsuyoshi Takatani, Hiroyuki Kubo, Takuya Funatomi, Vijay Ravi, Achuta Kadambi, and Yasuhiro Mukaigawa. Time-resolved far infrared light transport decomposition for thermal photometric stereo. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(6):2075-2085, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.277, + 0.468, + 0.357 + ], + "angle": 0, + "content": "[33] Ayush Tewari, Ohad Fried, Justus Thies, Vincent Sitzmann, Stephen Lombardi, Kalyan Sunkavalli, Ricardo MartinBrualla, Tomas Simon, Jason Saragih, Matthias Nießner, et al. State of the art on neural rendering. In Computer Graphics Forum, pages 701-727. Wiley Online Library, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.361, + 0.468, + 0.401 + ], + "angle": 0, + "content": "[34] Michael Vollmer and Klaus-Peter Mollmann. Fundamentals of Infrared Thermal Imaging, chapter 1, pages 1-106. John Wiley & Sons, Ltd, 2017. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.404, + 0.468, + 0.443 + ], + "angle": 0, + "content": "[35] Robert J Woodham. Photometric method for determining surface orientation from multiple images. Optical engineering, 19(1):139-144, 1980. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.446, + 0.468, + 0.514 + ], + "angle": 0, + "content": "[36] Pengyu Zhang, Jie Zhao, Dong Wang, Huchuan Lu, and Xiang Ruan. Visible-thermal uav tracking: A large-scale benchmark and new baseline. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8886–8895, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.517, + 0.468, + 0.572 + ], + "angle": 0, + "content": "[37] Xingchen Zhang, Ping Ye, and Gang Xiao. Vifb: A visible and infrared image fusion benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 104-105, 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.572 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.518, + 0.956 + ], + "angle": 0, + "content": "11933" + } + ] +] \ No newline at end of file diff --git a/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/c36c78d9-fdc0-45ae-af17-5820282f52ff_origin.pdf b/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/c36c78d9-fdc0-45ae-af17-5820282f52ff_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..be6a25350c8e86c18e59ffc165f74cc303dd06ad --- /dev/null +++ b/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/c36c78d9-fdc0-45ae-af17-5820282f52ff_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe973fe74e9a271b514ea3768bd0e776c224683aa38890a7f0dd45831e986b4a +size 3530182 diff --git a/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/full.md b/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/full.md new file mode 100644 index 0000000000000000000000000000000000000000..c46692ed1a2dd8221d0cf685db40ccf6baa95356 --- /dev/null +++ b/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/full.md @@ -0,0 +1,418 @@ +# A Theory of Joint Light and Heat Transport for Lambertian Scenes + +Mani Ramanagopal, Sriram Narayanan, Aswin C. Sankaranarayanan, and Srinivasa G. Narasimhan +Carnegie Mellon University, Pittsburgh, PA 15213, USA + +{manikans, snochurn, saswin, srinivas}@andrew.cmu.edu + +# Abstract + +We present a novel theory that establishes the relationship between light transport in visible and thermal infrared, and heat transport in solids. We show that heat generated due to light absorption can be estimated by modeling heat transport using a thermal camera. For situations where heat conduction is negligible, we analytically solve the heat transport equation to derive a simple expression relating the change in thermal image intensity to the absorbed light intensity and heat capacity of the material. Next, we prove that intrinsic image decomposition for Lambertian scenes becomes a well-posed problem if one has access to the absorbed light. Our theory generalizes to arbitrary shapes and unstructured illumination. Our theory is based on applying energy conservation principle at each pixel independently. We validate our theory using real-world experiments on diffuse objects made of different materials that exhibit both direct and global components (inter-reflections) of light transport under unknown complex lighting. + +# 1. Introduction + +Printed on paper, this text appears black because the ink does not reflect much light. So what happens to the light striking the ink? It gets absorbed and converted into heat, thereby disappearing from the visible light transport system. Starting from the early works in 1970s [3, 20, 23, 27, 31, 35], decades of research[6, 18, 33] have attempted to separate surface reflectance and shading from images by modeling shapes[30], illuminations [8] and their interactions [13]. However, in the general case, decomposing light transport is fundamentally an ill-posed problem, thus requiring handcrafted [23] or learned priors[1, 2, 4, 9]. But what if we can somehow observe the light lost to absorption? + +Analogous to light transport, heat transport models the generation and flow of heat through a medium and its exchange with the surrounding [5, 34]. In the heat transport system, the heat generated due to light absorption is no different from any other type of heat generation. While heat itself cannot be seen, all objects radiate infrared light based + +![](images/101b9b9dd3a8c77d7a15d69579d103cedba345fb0d2a532e33858b0126cf88eb.jpg) +Visible Image + +![](images/a1fed00393212e0b5cc1e749154ad7da16465dde422cecdfa5550ffd64deb2ac.jpg) +Temperature rise at $t = 0.1s$ + +![](images/bca02789b03b7020d76bbb3d95fe2b9bfbeba95b23822089fb44a73b4eb102c6.jpg) +Temperature rise at $t = 3s$ + +![](images/c28b9b618c4df3af25d97017329873094f1efedc8297f351a6c8d30859bf859f.jpg) +Experiment Setup + +![](images/f3582e4f64d8b962870cd0bc918c3d3db4f7284f946330b978bf0a23f7296008.jpg) +Albedo + +![](images/54ace4d89a4bea2f3bcc26dda08d1fbb78d21a99916af79f07cb6601bae05f91.jpg) +Shading +Figure 1. The visible image (brightened for visualization) captures the reflected light, which is the product of albedo and shading at that pixel. The absorbed light gets converted into heat and raises the temperature, which can be observed using a co-located thermal camera. The illumination is turned on at $t = 0$ . The temperature rise at $t = 0.1s$ and $t = 3s$ are shown using turbo colormap. Our novel theory establishes the relationship between light and heat transport and provides an analytical solution to compute albedo and shading for complex shapes and unknown illumination. + +on their surface temperature, and that can be measured using a thermal camera [34]. By modeling heat transport, we make the first attempt to estimate the intensity of light absorbed by an object, thus establishing the connection between light and heat transport. + +We develop a novel theory that proves having access to absorbed light turns single view intrinsic image decomposition into a pixel-wise well-posed problem, even for arbitrary shape and illumination. Our key insight is that all the complexities of the reflected light transport are also present in the absorbed light, in the same functional form but simply scaled by the complement of the albedo. Consider the color chart seen in Fig. 1. The amount of irradiance due to the line light is approximately equal for the black and white patches. While the visible image records a low intensity for the black patch, the corresponding increase in intensity in the thermal images is high, and vice versa. Leveraging the + +principle of energy conservation, the sum of reflected light and absorbed light at each scene point must equal its irradiance, which is also called as shading. Similarly, we can compute the ratio of reflected light to irradiance, which is also called as surface reflectance or albedo. + +A key ingredient in our approach is the ability to estimate the intensity of heat generated due to light absorption. In the general case, estimating it requires solving the heat transport equation which does not have an analytical solution for unknown shapes [5, 34]. However, in the absence of heat conduction, we show that the analytical solution to the heat transport equation for a constant source is a transient response that follows a 2-parameter exponential curve. Therefore, the source intensity can be estimated with as little as three frames from a thermal video. In practice, conduction occurs in all real-world objects albeit to a smaller degree in insulators and regions with low temperature gradient. Therefore, we limit the influence of conduction by focusing on the transient response of each pixel immediately after turning on light. A key limitation of our approach is that we require the system to be at thermal equilibrium before the light is turned on and other sources of heat generation, if any, remain constant. This is required to ensure the rise in temperature is only due to the absorbed light. + +Prior works in computational thermal imaging have studied the thermal transient response of objects to heating. Dashpute et al. [11] heat planar objects using a laser and capture a 1 min long thermal video to estimate its thermal diffusivity and emissivity. Of most relevance to our work, Tanaka et al. [32] heat objects using infrared lamps and record a 10 mins long thermal video. They decompose these videos using curve fitting into ambient, specular, diffuse and global components, where the latter two are assumed to be exponential curves. But this decomposition is akin to direct-global separation which is different from intrinsic image decomposition. Also, they use the extracted diffuse component as input to a photometric stereo algorithm. Note that their estimated "albedo" corresponds to absorptivity in the infrared spectrum and their photometric stereo is limited to distant point light sources at known directions (separate video for each direction). In contrast, our theory establishes and exploits the causal relationship between light and heat transport. And we apply our theory to albedo-shading separation in the visible spectrum for arbitrary unknown illumination using a single 4 sec thermal video and a single visible image. + +Several works in vision [17, 36, 37] and robotics [22, 25] fuse spatial features from the visible and thermal images in order to improve robustness of downstream tasks, such as object detection [24], to lighting and weather conditions. However, these methods do not reason about the relationship between the two spectrums from a physics perspective. + +We validate our theory through real world experiments + +using a co-located setup of a visible and thermal camera. Our target objects, even though diffuse, are made of different materials, contain direct and global light transport (inter-reflections), low and high spatial frequency and unstructured illumination, all of which are unknown. + +# 2. Joint Light and Heat Transport + +In this section, we briefly introduce the relationship between light and heat transport. While light energy is carried via photons, heat is thermal energy exchanged via molecular vibrations. Visible light (VIS, $0.4 - 0.7\mu \mathrm{m}$ ) transport can model the light scattered by the scene from a source towards the camera. The light absorbed by the scene gets converted to heat which is then exchanged via conduction, convection, retention (i.e. increase in temperature) and radiation, and is governed by the heat transport equation. Similar to VIS transport, Longwave Infrared light (LWIR, $8 - 14\mu \mathrm{m}$ ) transport can be used to model the radiation emitted by objects, based on their temperature, towards a thermal camera. + +Our first contribution is an algorithm, described in Sec. 3, for estimating the intensity of absorbed light using only a thermal video. This involves two steps: 1) inferring temperatures using LWIR light transport, and 2) inferring source intensity using heat transport equation. As all objects in the scene constantly exchange heat, it is hard to disambiguate heat generated by light absorption from other sources of heat at equilibrium. However, if we disturb the equilibrium by turning on the visible light at a known time, then the resulting rise in temperature allows us to estimate heat generated only due to our illumination. + +Our second contribution is a novel theory, described in Sec. 4, that decomposes VIS transport for arbitrary shapes and illumination. We derive simple analytical expressions for albedo and shading using a visible image and the absorbed light intensity estimated from a thermal video captured by a co-located thermal camera. + +# 3. Estimating Absorbed Light Intensity + +Consider a scene initially at thermal equilibrium. At a time $t_1$ , the illumination, which is constant with time, is turned on and a thermal video is captured. We assume the illumination is focused primarily at the target scene and therefore the temperature of the surrounding remains constant. Our objective is to estimate the spatially varying absorbed light (heat source) intensity using a single thermal video. + +# 3.1. Thermal Images to Temperature Changes + +In LWIR light transport, all surfaces including the camera and the scene emit (and reflect) radiation. The pixel intensity in the $n^{th}$ frame $I_{n}(\mathbf{p})$ of a thermal video can be written as: + +$$ +I _ {n} (\mathbf {p}) = \alpha U \left(T _ {n} (\mathbf {x})\right) + U _ {s}, \tag {1} +$$ + +where $T_{n}(\mathbf{x})$ is the temperature at time $t_n, \alpha$ is the effective emissivity, $U_{s}$ denotes the radiation from the surrounding, and $U(T)$ is a non-linear function that approximates the integral of the Planck radiation law. + +For a small range around $T_*$ , $U(T)$ can be linearly approximated as: + +$$ +U (T) = k _ {1} \left(T - T _ {*}\right) + k _ {2}, \tag {2} +$$ + +where $k_{1}$ and $k_{2}$ are camera-specific constants that depend on $T_{*}$ . We refer the reader to Appendix A for more details. Combining Eq. (1) and Eq. (2), we get + +$$ +I _ {n} (\mathbf {p}) - I _ {m} (\mathbf {p}) = k _ {1} \alpha \left(T _ {n} (\mathbf {x}) - T _ {m} (\mathbf {x})\right). \tag {3} +$$ + +The above equation shows that change in pixel intensity is linearly related to change in scene temperature. Note that commonly used thermal cameras are uncooled microbolometers that exhibit thermal inertia [28, 29], where the measured intensities have a small delay with respect to changes in the scene. This effect is ignored for the purposes of this paper. + +# 3.2. Heat Transport Equation without Conduction + +Consider an infinitesimal volume at a scene point with area $\delta_A$ and depth $\delta_z$ . The heat transport equation at that point can be written as [5, 34]: + +$$ +\begin{array}{l} C _ {v} \delta_ {A} \delta_ {z} \frac {\partial T}{\partial t} = \kappa \delta_ {A} \delta_ {z} \Delta T + \delta_ {A} h _ {c} (T _ {s} - T) + \\ \delta_ {A} \sigma \epsilon \left(T _ {s} ^ {4} - T ^ {4}\right) + \delta_ {A} S, \tag {4} \\ \end{array} +$$ + +where $C_v$ is the volumetric heat capacity, $T$ is the temperature, $\kappa$ is the thermal conductivity, $\Delta$ denotes the laplacian operator at that point, $h_c$ is the convection coefficient, $T_s$ is the surrounding temperature, $\sigma$ is the Stefan-Boltzmann constant, $\epsilon$ is the surface emissivity, and $S$ is the intensity of heat generated via light absorption. Note that all the terms are expressed in units of W. For an opaque Lambertian scene, all the light absorption happens near the surface. + +Note that the magnitude of heat conduction is proportional to the local temperature laplacian. Analytically solving Eq. (4) requires knowing the shape since the laplacian operator depends on the local curvature. Ignoring conduction, makes the heat equation pixel-wise independent and lends itself to an analytical solution independent of shape. Moreover, many real-world materials, such as paints, plastics, paper and wood, have low thermal conductivity. As the object is initially at equilibrium, local temperature laplacians start at zero and increase with time if and only if neighboring pixels have different material properties and/or receive different amounts of light. Therefore, we consider a short thermal video immediately after light is turned on when conduction can be ignored. + +Dividing by area and ignoring conduction, Eq. (4) is: + +$$ +C _ {v} \delta_ {z} \frac {\partial T}{\partial t} = h _ {c} \left(T _ {s} - T\right) + \sigma \epsilon \left(T _ {s} ^ {4} - T ^ {4}\right) + S. \tag {5} +$$ + +Since temperature rise due to light absorption is typically small ( $\leq 15\mathrm{K}$ within 4 sec in our experiments), we linearize the radiation term around a nominal temperature $T_{*}$ to get + +$$ +\sigma \epsilon \left(T _ {s} ^ {4} - T ^ {4}\right) \approx 4 \sigma \epsilon T _ {*} ^ {3} \left(T _ {s} - T\right), \tag {6} +$$ + +where the absolute error due to linearization is $\leq 4\%$ . This simplifies Eq. (5) to + +$$ +H \frac {\partial T}{\partial t} + P T = S + P T _ {s}, \tag {7} +$$ + +where $H = C_v\delta_z$ and $P = (h_c + 4\sigma \epsilon T_*^3)$ + +# 3.3. Analytical Solution + +Solving Eq. (7) at a single pixel (refer Appendix B for derivation), we get + +$$ +T _ {n} - T _ {1} = \left(\frac {S}{P} + T _ {s} - T _ {1}\right) \left(1 - e ^ {- \frac {P}{H} \left(t _ {n} - t _ {1}\right)}\right). \tag {8} +$$ + +Since we assume the system is initially at thermal equilibrium, we can set $T_{s} = T_{1}$ . Now, substituting Eq. (3) into the above equation, we get + +$$ +I _ {n} - I _ {1} = \frac {S k _ {1} \alpha}{P} \left(1 - e ^ {- \frac {P}{H} \left(t _ {n} - t _ {1}\right)}\right) \tag {9} +$$ + +Therefore, given a thermal video $\{I_1,\dots ,I_n\}$ and corresponding time stamps $\{t_1,\ldots ,t_n\}$ , we use gradient descent for curve fitting at each pixel independently: + +$$ +I _ {n} - I _ {1} = c _ {1} \left(1 - e ^ {- \frac {t _ {n} - t _ {1}}{c _ {2}}}\right). \tag {10} +$$ + +# 3.4. Recovering S from Curve Fitting + +The results of curve fitting provide $c_{1}$ and $c_{2}$ at each pixel. From Eq. (9) and Eq. (10), note that $c_{1} = \frac{Sk_{1}\alpha}{P}$ . Recovering $S$ from $c_{1}$ would require knowledge of $k_{1}, \alpha$ and $P$ , where $P$ depends on $h_c, \epsilon$ and $T_*$ . In theory, all these quantities could vary per-pixel. However, the spatial variation in $S$ , which depends on albedo in visible spectrum and illumination, is much greater than that of others. In this paper, we assume the quantity $\beta = \frac{k_1\alpha}{P}$ is common for all pixels such that $\beta$ is the constant of proportionality between $S$ and $c_{1}$ . Note that Lambertian scenes typically correspond to rough surfaces which have high emissivity. Also, it is known that most paints have similarly high emissivity values of $> 0.9$ irrespective of their albedo in the visible spectrum [34]. As the object is initially at equilibrium, we can assume $T_*$ , and hence $k_{1}$ , is common for all pixels. In the absence of wind, we reasonably assume convection, if it exists at all, to be uniform throughout the scene. + +Table 1. Various lighting configurations typically modeled in shape-from-intensity problems. It is trivial to verify the equations for spatially varying albedo $\rho (x)$ and shading $\eta (x)$ remains the same irrespective of the complexity of shape or illumination when estimates of both image irradiance $I_{v}(x)$ and absorbed light intensity $\tilde{S} (x)$ are available. Here, $\gamma$ is the camera gain, $\beta$ is the unknown scale factor in the estimation of $\tilde{S} (x),\zeta = \frac{\gamma}{\beta}$ is the relative scale factor, $E$ is the source intensity, s is the light source direction, n is the surface normal, $\omega$ is light source direction for extended source, $\eta$ is the shading term and $\eta^{*}$ is the scene irradiance. Inter-reflections are modeled as spatially varying source intensities. All the above cases can be extended to model cast and attached shadows using a shadowing function $W(x)$ without changing the expressions for $\rho$ and $\eta$ + +
IlluminationImage Irradiance Iv(x)Estimated Absorbed Light S̃(x)Albedo ρ(x)Shading η(x)
Far sourceγρ(x)/πE(s·n(x))β(1 - ρ(x))E(s·n(x))
Multiple sourcesγρ(x)/π∑lEl(s1·n(x))β(1 - ρ(x))∑lEl(s1·n(x))
Near sourcesγρ(x)/π∑lEl(s1(x)·n(x))β(1 - ρ(x))∑lEl(s1(x)·n(x))πIv(x)/πIv(x)+ζS̃(x)πIv(x)+ζS̃(x)
Extended Sourcesγρ(x)/π∫ωE(ω)(ω·n(x))β(1 - ρ(x))∫ωE(ω)(ω·n(x))
Inter-reflectionsγρ(x)/π∫ωE(x,ω)(ω·n(x))β(1 - ρ(x))∫ωE(x,ω)(ω·n(x))
General illuminationρ(x)/πη(x)β(1 - ρ(x))η(x)/γ
+ +# 4. Albedo-Shading Separation + +Consider an opaque Lambertian scene imaged by a camera from a fixed view. We assume that the camera is sensitive to all the wavelengths present in the light sources i.e. we primarily consider LEDs or CFL bulbs when using visible cameras. We first consider the case where the albedo and the camera response are independent of wavelength in Sec. 4.1 and then extend our theory to wavelength-dependent albedo functions in Sec. 4.2. The words image and camera correspond to visible spectrum in this section. + +# 4.1. Grayscale Albedo and Camera Response + +The image intensity $I_{v}$ , which is proportional to the power received by the camera per unit area, at a pixel $\mathbf{p}(\mathbf{x})$ focused at a scene point $\mathbf{x}$ is: + +$$ +I _ {v} (\mathbf {p} (\mathbf {x})) = \frac {\rho (\mathbf {x})}{\pi} \eta (\mathbf {x}), \text {s . t .} \eta (\mathbf {x}) \equiv \gamma \eta^ {*} (\mathbf {x}) \tag {11} +$$ + +where $\rho (\mathbf{x})$ and $\eta (\mathbf{x})$ are the spatially varying albedo and shading, $\gamma >0$ is the camera gain representing the optics and sensor electronics in the camera, and $\eta^{*}(\mathbf{x})$ is the true scene irradiance received by $\mathbf{x}$ . Note that we do not restrict the lighting geometry in any way and the shading $\eta (\mathbf{x})$ term is unstructured. In the rest of the paper, we use $\mathbf{p}$ in place of $\mathbf{p}(\mathbf{x})$ . + +The pixel value in an image describes the energy reflected towards the camera by a scene point. Since the surface is opaque, there is no transmission and the remaining energy gets absorbed and is converted into heat. Recall from Sec. 3 that $S(\mathbf{x})$ denotes the power absorbed per unit area, i.e. intensity, by $\mathbf{x}$ . Let $\tilde{S}(\mathbf{x})$ be proportional to it, and is given by: + +$$ +\tilde {S} (\mathbf {x}) = \beta S (\mathbf {x}), \text {s . t .} S (\mathbf {x}) = (1 - \rho (\mathbf {x})) \eta^ {*} (\mathbf {x}). \tag {12} +$$ + +During operation, light fixtures also generate some thermal energy which increases its temperature and thereby increasing its blackbody radiation. However, the magnitude of this + +additional heat generated at $\mathbf{x}$ is negligible and hence ignored in this paper. Next, we can express $\tilde{S}$ using shading as + +$$ +\tilde {S} (\mathbf {x}) = \beta (1 - \rho (\mathbf {x})) \frac {\eta (\mathbf {x})}{\gamma} = \frac {(1 - \rho (\mathbf {x})) \eta (\mathbf {x})}{\zeta}, \tag {13} +$$ + +where $\zeta = \frac{\gamma}{\beta}$ is the relative scale factor. + +In the trivial case where $\mathbf{x}$ receives no light (neither direct nor global illumination), the shading term $\eta (\mathbf{x}) = 0$ and the albedo cannot be estimated. Whenever $\eta (\mathbf{x}) > 0$ , we can re-write Eqs. (11) and (13) as + +$$ +\pi I _ {v} (\mathbf {p}) \frac {1}{\eta (\mathbf {x})} - \rho (\mathbf {x}) = 0 \tag {14} +$$ + +$$ +\zeta \tilde {S} (\mathbf {x}) \frac {1}{\eta (\mathbf {x})} + \rho (\mathbf {x}) = 1 \tag {15} +$$ + +Solving the above system of equations, we get: + +$$ +\eta (\mathbf {x}) = \pi I _ {v} (\mathbf {p}) + \zeta \tilde {S} (\mathbf {x}) \tag {16} +$$ + +$$ +\rho (\mathbf {x}) = \frac {\pi I _ {v} (\mathbf {p})}{\pi I _ {v} (\mathbf {p}) + \zeta \tilde {S} (\mathbf {x})}. \tag {17} +$$ + +If $I_{v}(\mathbf{p})$ , $\tilde{S}(\mathbf{x})$ and $\zeta$ are known, the above equations provide a direct method to compute spatially varying albedo and shading components for complex shapes and arbitrary illumination. To emphasize its applicability further, Table 1 lists several types of lighting conditions typically modeled in shape-from-intensity problems and demonstrates that Eqs. (16), (17) hold in all cases. + +# 4.2. Towards General Albedo Functions + +Let the camera have $K$ channels with known spectral responses $\Gamma_{k}(\lambda)$ . Recall that each wavelength present in the light sources must fall within at least one of the channels. The image irradiance at $\mathbf{p}$ in channel $k$ can be written as: + +$$ +I _ {v} ^ {k} (\mathbf {p}) = \gamma \int_ {\lambda} \int_ {\Omega} \frac {\rho (\mathbf {x} , \lambda)}{\pi} \Gamma_ {k} (\lambda) L (\mathbf {x}, \lambda , \omega) d \omega d \lambda , \tag {18} +$$ + +where $\rho (\mathbf{x},\lambda)$ is the diffuse albedo as a function of wavelength, $L(\mathbf{x},\lambda ,\omega)$ is the spectral radiance at $\mathbf{x}$ , and $\omega$ denotes the direction along the outer hemisphere. The corresponding estimate of absorbed power per unit area is: + +$$ +\tilde {S} (\mathbf {x}) = \beta \int_ {\lambda} \int_ {\Omega} (1 - \rho (\mathbf {x}, \lambda)) L (\mathbf {x}, \lambda , \omega) d \omega d \lambda , \tag {19} +$$ + +Shading at a point $\mathbf{x}$ is influenced by the emission spectrum of the light sources, the relative geometry between $\mathbf{x}$ and the light sources, and the albedo of other points in the scene due to inter-reflections. While this general case remains an open problem, in the rest of this section we ignore inter-reflections and assume all light sources have a common emission spectrum $l(\lambda)$ i.e. + +$$ +\int_ {\Omega} L (\mathbf {x}, \lambda , \omega) d \omega = \eta^ {*} (\mathbf {x}) l (\lambda). \tag {20} +$$ + +Note that, the illumination is still arbitrary in terms of their locations, sizes and angular radiant intensity functions. Substituting Eq. (20) into Eq. (18) and Eq. (19), we can write + +$$ +\begin{array}{l} I _ {v} ^ {k} (\mathbf {p}) = \int_ {\lambda} \frac {\rho (\mathbf {x} , \lambda)}{\pi} \Gamma_ {k} (\lambda) \eta (\mathbf {x}) l (\lambda) d \lambda , (21) \\ \tilde {S} (\mathbf {x}) = \frac {\int_ {\lambda} \eta (\mathbf {x}) l (\lambda) d \lambda - \int_ {\lambda} \rho (\mathbf {x} , \lambda) \eta (\mathbf {x}) l (\lambda) d \lambda}{\zeta}. (22) \\ \end{array} +$$ + +As a continuous-valued function of wavelength, the diffuse albedo $\rho (\mathbf{x},\lambda)$ is infinite-dimensional, which requires further assumptions to enable tractable computations. We rely on a body of work [10, 14-16] that shows that reflectance spectra lie close to a low-dimensional subspace. Denoting the basis for this subspace as $\Phi_{\rho}(\lambda) = \{\tilde{\rho}_1(\lambda),\dots ,\tilde{\rho}_M(\lambda)\}$ , we can express the diffuse albedo as [21]: + +$$ +\rho (\mathbf {x}, \lambda) = \sum_ {m = 1} ^ {M} \tilde {\rho} _ {m} (\lambda) a _ {\mathbf {x}, m} = \Phi_ {\rho} (\lambda) \mathbf {a} _ {\mathbf {x}} \tag {23} +$$ + +where $\mathbf{a}_{\mathbf{x}}\in \mathbb{R}^{M}$ are the unknown coefficients of interest. This simplifies Eq. (21) and Eq. (22) into + +$$ +I _ {v} ^ {k} (\mathbf {p}) = \eta (\mathbf {x}) \mathbf {E} _ {k} ^ {T} \frac {\mathbf {a} _ {\mathbf {x}}}{\pi}, \tag {24} +$$ + +$$ +\tilde {S} (\mathbf {x}) = \frac {\eta (\mathbf {x}) \left(L - \mathbf {F} ^ {T} \mathbf {a} _ {\mathbf {x}}\right)}{\zeta}, \tag {25} +$$ + +where $\mathbf{E}_k$ , $\mathbf{F}$ and $L$ can be computed a priori as follows: + +$$ +\begin{array}{l} \mathbf {E} _ {\mathbf {k}} [ i ] = \int_ {\lambda} l (\lambda) \Gamma_ {k} (\lambda) \tilde {\rho} _ {i} (\lambda) d \lambda , (26) \\ \mathbf {F} [ i ] = \int_ {\lambda} l (\lambda) \tilde {\rho} _ {i} (\lambda) d \lambda , (27) \\ L = \int_ {\lambda} l (\lambda) d \lambda . (28) \\ \end{array} +$$ + +Whenever $\eta (\mathbf{x}) > 0$ , Eqs. (24) and (25) can be written as + +$$ +\begin{array}{l} \pi I _ {v} ^ {k} (\mathbf {p}) \xi (\mathbf {x}) - \mathbf {E} _ {k} ^ {T} \mathbf {a} _ {\mathbf {x}} = 0, \forall k (29) \\ \zeta \tilde {S} (\mathbf {x}) \xi (\mathbf {x}) + \mathbf {F} ^ {T} \mathbf {a} _ {\mathbf {x}} = L, (30) \\ \end{array} +$$ + +where $\xi (\mathbf{x}) = 1 / \eta (\mathbf{x})$ . Note that we have a system of $K + 1$ linear equations with $M + 1$ unknowns, namely $\mathbf{a}_{\mathbf{x}}\in \mathbb{R}^{M}$ and $\xi (\mathbf{x})$ . Therefore, whenever $K\geq M$ , the system of equations can be solved to obtain albedo and shading (reciprocal of $\xi (\mathbf{x})$ ) at each pixel independently for complex shapes and illumination. Specifically, we use non-negative least squares solver for this problem. For most vision applications, which use a 3-channel RGB camera, we choose a corresponding basis set $\Phi_{\rho}$ with $M = 3$ . Our theory could be used with multispectral cameras with more channels when higher fidelity in albedo is desired. While the above derivation relies on Eq. (20), it is still practically useful in many real-world scenes where inter-reflections exist as we will show in Sec. 5. + +In the special case of a monochrome camera capturing a scene where albedo depends on wavelength, the shading at each pixel can be expressed as a weighted sum of $I$ and $S$ irrespective of the emission spectrum of the light source. We refer the reader to Appendix C for more details. + +# 5. Experimental Results + +To validate our theory, we perform experiments on several complex scenes with challenging illumination. Our scenes are mostly diffuse, but contain noticeable non-Lambertian features that test the practical utility of our theory to real-world objects. Our emphasis is on estimating the absorbed light intensity and performing albedo-shading separation. + +Hardware Details: Our imaging system consists of an IDS UI-3130CP color camera with $600 \times 800$ resolution fitted with an 8mm Tamron lens, a FLIR Boson thermal camera having $\leq 50\mathrm{mK}$ NETD with $512 \times 640$ resolution fitted with an $18\mathrm{mm}$ ( $24^{\circ}$ HFOV) integrated lens and a BSP-DI-25-2 gold dichroic beamsplitter from ISP Optics. The cameras are coarsely aligned using an optic stage and a homography is used for fine alignment. We use LED lights from Advanced Illumination, namely a high intensity line light (LL167G96-WHI), a large spot light (SL-S100150M-WHI) and two small spot lights (SL-S050075M-WHI). The relative emission spectrum of the lighting and the spectral response of the color filter array in the visible camera were obtained from their technical datasheets, see Fig. 2. + +Data Capture and Preprocessing: The visible camera was radiometrically calibrated [12]. To capture the full dynamic range of the illumination, we acquired a stack of 15 + +![](images/f1d4c0d4ec26f8e7fee03197ee99785321cd600478e261a2dddd363388ea4883.jpg) +(a) Imaging System + +![](images/b76c355e89b035504019c0b5c0411c973af1556c03c0e7d7e0c5897d251854ca.jpg) + +![](images/ec679174dfd5ba476b1b5565957e9900a46a69d21467cd7a810a404bb2bfa6fb.jpg) +(b) Experimental Setup +(c) Spectral properties of light and CFA +Figure 2. Our imaging system consists of a visible camera and a thermal camera colocated using a gold dichroic beamsplitter. The light sources are placed close to the target scene so that the rise in temperature due to light absorption is detectable in the thermal camera. All the light sources have the same emission spectrum. The relative emission spectrum of the white LED and the quantum efficiency curves of the Color Filter Array are obtained from the corresponding datasheets. + +images in BayerRG format with a geometric progression of exposures that span $0.05\mathrm{ms}$ to over $180\mathrm{ms}$ . All the images were demosaiced using gradient-corrected linear interpolation [26] and subsequently quantized to 8-bit images. The resulting LDR images were composited into a single linear HDR image using the previously estimated camera response function. Finally, this image is warped into the perspective of the thermal camera using a homography. + +The thermal camera is allowed to reach steady state operating temperature after powering on, which can take up to 30mins. The entire experimental setup including the target object is allowed to reach thermal equilibrium before beginning data collection. A flat field correction is performed a few seconds prior to turning on the light and a thermal video is recorded. The thermal camera is operated in the high gain state and the raw 16-bit data is captured at $60\mathrm{Hz}$ . Both the thermal images and the warped HDR image were downsized $4\times$ using local mean computation to suppress noise and alleviate errors in co-location. + +Implementation Details: We manually identify the first frame when light was turned on and use the pixel-wise median of the preceding frames as the initial frame $I_{1}$ . We use 200 frames since light was turned on for fitting the 2-parameter curve. We implement the curve fitting using gradient descent in PyTorch. We consider a 3 dimensional basis set for albedo with $\tilde{\rho}_b(\lambda) = \mathbb{I}[400\mathrm{nm}\leq \lambda < 530\mathrm{nm}]$ , + +![](images/14d514e0ab82c0a77f70916765292b0c25ec48a0367dc05481b04fb977b951d2.jpg) + +![](images/a1703a5c43210ea2f119a94c3be21e3429608753fb1611508ecd48597ed3c521.jpg) +Figure 3. The first column shows change in intensity at $0.1s$ and at $3s$ after turning on the lights. Note that we process each pixel independently. Two points of significance are marked in blue and red respectively. The middle column shows the curve fitting results for the highlighted points. The input intensities are shown as dots and the estimated intensities are shown as a continuous curves. The dashed lines correspond to the steady-state intensity that would be reached and equals $c_{1}$ from Eq. (10). The last column shows $c_{1}$ for each pixel as a 2D image and a histogram of $c_{2}$ . + +$\tilde{\rho}_g(\lambda) = \mathbb{I}[530\mathrm{nm}\leq \lambda < 620\mathrm{nm}]$ , $\tilde{\rho}_r(\lambda) = \mathbb{I}[620\mathrm{nm}\leq \lambda < 1100\mathrm{nm}]$ , where $\mathbb{I}[\cdot ]$ is the indicator function. Please refer to Appendix D for details of $\mathbf{E}_k$ , $\mathbf{F}$ and $L$ . The relative factor $\zeta$ can be calibrated for a co-located imaging system with a color chart under controlled lighting. Alternatively, $\zeta$ can be treated as a hyper-parameter and tuned using cross-validation. + +# 5.1. Heat Source Estimation results + +Figure 3 shows the result of our curve fitting for the wooden blocks scene. The estimated constants $c_{2}$ , which is proportional to heat capacity, appear like white noise and its corresponding histogram plot resembles a Gaussian distribution. This could be due to high levels of noise in thermal videos as well as similar magnitudes of spatial variation in $P$ and $H$ . On the other hand, the estimated per-pixel constants $c_{1}$ have visual similarity to a shading image, although noisy. + +# 5.2. Albedo-Shading separation results + +Quantitative Evaluation: For comparison, we chose two methods: (i) the classical, even if dated, Retinex algorithm [23], which is well-suited for the color chart scene. (ii) Ordinal Shading [7], a SOTA learning-based approach which requires a large training dataset. We use the pretrained model here. We use the scale-invariant Mean Squared Error (si-MSE) from [19] as our metric. It is hard to obtain ground truth albedo and shading for general scenes under unknown lighting. And publicly available datasets do not have co-located thermal videos. Therefore, we first evaluate albedo using the color chart under 4 different illuminations. As shown in Fig. 4, our albedo estimates are + +![](images/e54b0bed28940c1da2f4742f7d969de2b28839f5e6987cc382d872e0254bbdb9.jpg) +Figure 4. The first column is the mean value across colors (Ours: 0.020, Retinex: 0.034, Ordinal Shading: 0.080). + +![](images/da4288e7f27ce649a8b54c015ffcf88b8cbace8b0dde12496e0f68d23619fbe5.jpg) +Figure 5. Our method operates per-pixel while other methods use hand-crafted or learnt spatial priors. Note the residual albedo in their estimated shading (images brightened for visualization). + +
OursRGB-RetinexOrdinal Shading
Albedo0.0840.2530.399
Shading0.00050.00300.0080
+ +Table 2. si-MSE values for Albedo and Shading using pseudo ground truth data obtained for the painted mask scene. + +![](images/3b6c77f5e2321cc61d72420270513ed39686e7763a1a31541994e090a8f41085.jpg) +Figure 6. (a) Curve fitting results of the same pixel for different video lengths. (b) Albedo error (against color chart ground truth) vs. length of input video. + +significantly better than the other methods. + +Next, we obtain pseudo-ground truth similar to [19] i.e. with a scene painted white (ground truth shading) and re-painted with texture. Fig. 5 and Tab. 2 illustrate that our method outperforms SOTA methods both qualitatively and quantitatively. + +Ablation on length of video: Longer duration leads to higher spatial thermal gradients that induce more conduction while shorter duration has lower signal to noise ratio. As seen in Fig. 6a, the curve deviates further away from the initial measurements when using longer videos. The accuracy of the fit correlates with the accuracy of albedo estimate for the color chart (see Fig. 6b). Our experiments use 200 frames which corresponds to the green plots. + +Qualitative evaluation: Figure 7 summarizes the albedo shading separation results for the four target scenes. As shown in the first two rows, we are given a HDR image from the visible camera and the corresponding absorbed light intensity is estimated from a thermal video using curve fitting as discussed earlier. And the last two rows show results that validate Eqs. (30) which are derived for general functions of albedo and camera response with wavelength. + +In the first scene, the interior of a mask is painted with white and black acrylic paints and the line light is directed at the portion of the image painted white. As highlighted in the callout, the concave portion corresponding to nose appears flat in the estimated albedo image for both the monochrome and RGB cases. Note that the temperature of the background wall does not raise sufficiently in all of the scenes, which makes it challenging for our approach. The thick wall would also have a high heat capacity which exacerbates the challenge. In the second scene, a cardboard sheet with printing on one side is folded to resemble the shape of W. The inner V groove would have inter-reflections while the outer faces are convex. + +In the third scene, a collection of solid colored wooden blocks are stacked into a complex geometry with both cast and attached shadows. This result indirectly shows that ignoring heat conduction for solid objects still allows one to recover the absorbed light intensity precisely. In the final scene, we use a stack of disks made of soft plastic. Different patterns are embossed onto the circumference of the disk. As highlighted in the callout, the shape information corresponding to the embossing is correctly separated into the shading term while the albedo term appears flat. These results demonstrate the broad applicability of our theory to everyday scenes with complex shapes and illumination. + +Grayscale approximation: Fig. 8 shows the estimated albedo and shading using grayscale approximation (Eqs. (16) and (17)). Recall that the grayscale approximation does not require knowledge of the emission spectrum of the light sources and the estimated shading is similar to that using Eqs (30). The monochrome image is approximated by taking the mean value across color channels. Corresponding results for all the scenes are provided in Appendix E. + +# 6. Conclusion + +This paper studies the theoretical connection between light transport in visible spectrum, heat transport in solids and light transport in the thermal infrared spectrum. We proved that having an estimate of absorbed light turns single image intrinsic image decomposition into a well-posed problem for arbitrary shape and illumination for lambertian scenes. To estimate absorbed light, we derive an analytical expression for surfaces with negligible heat conduction by modeling heat transport immediately after turning on illumination. + +![](images/748958ee1572981de832371edede1391bb1170343ed8ef197ff522880b248763.jpg) +Mask Interior + +![](images/4e656e075a1fe093ebb4783dd16ff7451ffc74806375925846c3f5ccc66ab178.jpg) +W-Cardboard + +![](images/24473dc35157725c072e77362f948c7e610aa888c288fa0cbd01765990685544.jpg) +Wooden Blocks + +![](images/59403cee9a05a9d1b5f000a02153f471102c5d9d04da1a6c5747e15ecfb83c8b.jpg) +Soft Toys + +![](images/fa75343ba09fc64648728a7764a4a8010a8060fa86089e6dabe7fddde88b8897.jpg) + +![](images/d54763d13abba4ef5a1087bbc8f29013ed9bb6e751fe53569cfd864d59913795.jpg) + +![](images/4e8ebc5500c6d741f4aecd220a1d4e17cd4211a255027a793baff16ba3173f63.jpg) + +![](images/aa8901c22158f65472bc64582400167696fcac57b92866e1c41d70fa17670608.jpg) + +![](images/c2ecb21f35cde1e9af35ff684b2d79c03a800cd5f25d3e72b40e88291fcb2b5f.jpg) +RGB Intrinsic Image Decomposition Input + +![](images/d31d8e94c7be6eb65300047a9764c4cb7d5b8be03c7268a97a301159848d3906.jpg) + +![](images/5019005503701ca292aaacb0ba9709ad400d1a452bc2d4c0c0495cfef8e551aa.jpg) + +![](images/4e5642cb804e94c47e5f5663aa97b3882d2736a95d70c7d0d3f5c5a3d0f7b85c.jpg) + +![](images/d1f4ac47963f96280d53698a210a512ed88435543e72ed4d8f0a8862a09fdd1c.jpg) + +![](images/8a74eea425aad3e8e20075c4c5061b772f76a7a6dd0c76304a031aeb3dd58a97.jpg) + +![](images/4f00928a1256f4043eb97a011313e612a83b072fc9c5c321c0f47acf372292bc.jpg) + +![](images/5890a8cb26d8397bc5329cd9f2f4d468603b7e2e0b618f281581ef259eca8fbe.jpg) + +![](images/51a8753d21f4a0b6f7943684a6d624e1749425dd6e510f62df44b2da8b099270.jpg) +Figure 7. The first row shows the HDR visible image (brightened for visualization). Note that the colorchart is not an input to our method. The second row shows the estimated heat source intensity (turbo colormap) obtained using the method in Sec. 3. The last two rows correspond to solving Eqs. (30) using non-negative least squares method. The estimated albedo is clipped to the range [0, 1]. The callouts for the visible image, heat source intensity, and shading are normalized individually to aid visualization. +Figure 8. Albedo-Shading result for the soft toys scene using the grayscale approximation. + +Experiments showed that albedo and shading can be measured from a single view given a visible image and a short thermal video from a co-located imaging system. + +Just like we have shown an example of how modeling heat transport can help solve challenges in visible light transport, we believe research in visible light transport can help Infrared Thermography by improving accuracy of temperature measurement or observing heat transfer within inhomogeneous surfaces. Extending our theory to the full light transport, including general BRDFs, translucent materials and subsurface scattering are just a few of the exciting new directions that this research opens up. + +# Acknowledgements + +This work was partly supported by NSF grants IIS-2107236, CCF-1730147, and NSF-NIFA AI Institute for Resilient Agriculture. The authors would like to thank Mark Sheinin for helpful discussions. + +# References + +[1] Jonathan T Barron and Jitendra Malik. Shape, illumination, and reflectance from shading. IEEE transactions on pattern analysis and machine intelligence, 37(8):1670-1687, 2014. 1 +[2] Anil S Baslamisli, Hoang-An Le, and Theo Gevers. Cnn based learning using reflection and retina models for intrinsic image decomposition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6674-6683, 2018. 1 +[3] Peter N Belhumeur, David J Kriegman, and Alan L Yuille. The bas-relief ambiguity. International journal of computer vision, 35(1):33-44, 1999. 1 +[4] Sean Bell, Kavita Bala, and Noah Snavely. Intrinsic images in the wild. ACM Transactions on Graphics (TOG), 33(4): 1-12, 2014. 1 +[5] Theodore L. Bergman. Introduction to Heat Transfer. Wiley, 2011. 1, 2, 3 +[6] Nicolas Bonneel, Balazs Kovacs, Sylvain Paris, and Kavita Bala. Intrinsic decompositions for image editing. In Computer Graphics Forum, pages 593-609. Wiley Online Library, 2017. 1 +[7] Chris Careaga and Yaqiz Aksoy. Intrinsic image decomposition via ordinal shading. ACM Transactions on Graphics, 43 (1):1-24, 2023. 6 +[8] Robert Carroll, Ravi Ramamoorthi, and Maneesh Agrawala. Illumination decomposition for material recoloring with consistent interreflections. ACM Trans. Graph., 30(4):43, 2011. 1 +[9] Jason Chang, Randi Cabezas, and John W Fisher III. Bayesian nonparametric intrinsic image decomposition. In European conference on computer vision, pages 704-719. Springer, 2014. 1 +[10] Hamilton Y Chong, Steven J Gortler, and Todd Zickler. The von kries hypothesis and a basis for color constancy. In 2007 IEEE 11th International Conference on Computer Vision, pages 1-8. IEEE, 2007. 5 +[11] Aniket Dashpute, Vishwanath Saragadam, Emma Alexander, Florian Willomitzer, Aggelos Katsaggelos, Ashok Veeraraghavan, and Oliver Cossairt. Thermal spread functions (tsf): Physics-guided material classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1641-1650, 2023. 2 +[12] Paul E. Debevec and Jitendra Malik. Recovering high dynamic range radiance maps from photographs. In Proceedings of the 24th Annual Conference on Computer Graphics and Interactive Techniques, page 369-378, USA, 1997. ACM Press/Addison-Wesley Publishing Co. 5 +[13] Sylvain Duchéne, Clement Riant, Gaurav Chaurasia, Jorge Lopez-Moreno, Pierre-Yves Laffont, Stefan Popov, Adrien Bousseau, and George Drettakis. Multi-view intrinsic images of outdoors scenes with an application to relighting. ACM Transactions on Graphics, page 16, 2015. 1 +[14] Graham D Finlayson, Mark S Drew, and Brian V Funt. Color constancy: enhancing von kries adaption via sensor transformations. In Human Vision, Visual Processing, and Digital Display IV, pages 473-484, 1993. 5 + +[15] Graham D Finlayson, Mark S Drew, and Brian V Funt. Diagonal transforms suffice for color constancy. In IEEE International Conference on Computer Vision, pages 164-171, 1993. +[16] Graham Fyffe, Xueming Yu, and Paul Debevec. Single-shot photometric stereo by spectral multiplexing. In IEEE International Conference on Computational Photography (ICCP), 2011. 5 +[17] Rikke Gade and Thomas B Moeslund. Thermal cameras and applications: a survey. Machine vision and applications, 25: 245-262, 2014. 2 +[18] Elena Garces, Carlos Rodriguez-Pardo, Dan Casas, and Jorge Lopez-Moreno. A survey on intrinsic images: Delving deep into lambert and beyond. International Journal of Computer Vision, 130(3):836-868, 2022. 1 +[19] Roger Grosse, Micah K Johnson, Edward H Adelson, and William T Freeman. Ground truth dataset and baseline evaluations for intrinsic image algorithms. In 2009 IEEE 12th International Conference on Computer Vision, pages 2335-2342. IEEE, 2009. 6, 7 +[20] Berthold KP Horn and Michael J Brooks. Shape from shading. MIT press, 1989. 1 +[21] Zhuo Hui, Kalyan Sunkavalli, Sunil Hadap, and Aswin C Sankaranarayanan. Illuminant spectra-based source separation using flash photography. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6209-6218, 2018. 5 +[22] Soonmin Hwang, Jaesik Park, Namil Kim, Yukyung Choi, and In So Kweon. Multispectral pedestrian detection: Benchmark dataset and baseline. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1037-1045, 2015. 2 +[23] Edwin H Land and John J McCann. Lightness and retinae theory. Josa, 61(1):1-11, 1971. 1, 6 +[24] Qiao Liu, Zhenyu He, Xin Li, and Yuan Zheng. Ptb-tir: A thermal infrared pedestrian tracking benchmark. IEEE Transactions on Multimedia, 22(3):666-675, 2019. 2 +[25] Yawen Lu and Guoyu Lu. Superthermal: Matching thermal as visible through thermal feature exploration. IEEE Robotics and Automation Letters, 6(2):2690-2697, 2021. 2 +[26] Henrique S Malvar, Li-wei He, and Ross Cutler. High-quality linear interpolation for demosaicing of bayer-patterned color images. In 2004 IEEE International Conference on Acoustics, Speech, and Signal Processing, pages iii-485. IEEE, 2004. 6 +[27] Shree K Nayar, Katsushi Ikeuchi, and Takeo Kanade. Shape from interreflections. International Journal of Computer Vision, 6:173-195, 1991. 1 +[28] Manikandasriram Srinivasan Ramanagopal, Zixu Zhang, Ram Vasudevan, and Matthew Johnson-Roberson. Pixelwise motion deblurring of thermal videos. arXiv preprint arXiv:2006.04973, 2020. 3 +[29] Vishwanath Saragadam, Akshit Dave, Ashok Veeraraghavan, and Richard G. Baraniuk. Thermal image processing via physics-inspired deep networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops, pages 4057-4065, 2021. 3 + +[30] Steven M Seitz, Yasuyuki Matsushita, and Kiriakos N Kutulakos. A theory of inverse light transport. In Tenth IEEE International Conference on Computer Vision (ICCV'05) Volume 1, pages 1440-1447. IEEE, 2005. 1 +[31] Steven A Shafer. Using color to separate reflection components. Color Research & Application, 10(4):210-218, 1985. 1 +[32] Kenichiro Tanaka, Nobuhiro Ikeya, Tsuyoshi Takatani, Hiroyuki Kubo, Takuya Funatomi, Vijay Ravi, Achuta Kadambi, and Yasuhiro Mukaigawa. Time-resolved far infrared light transport decomposition for thermal photometric stereo. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(6):2075-2085, 2019. 2 +[33] Ayush Tewari, Ohad Fried, Justus Thies, Vincent Sitzmann, Stephen Lombardi, Kalyan Sunkavalli, Ricardo MartinBrualla, Tomas Simon, Jason Saragih, Matthias Nießner, et al. State of the art on neural rendering. In Computer Graphics Forum, pages 701-727. Wiley Online Library, 2020. 1 +[34] Michael Vollmer and Klaus-Peter Mollmann. Fundamentals of Infrared Thermal Imaging, chapter 1, pages 1-106. John Wiley & Sons, Ltd, 2017. 1, 2, 3 +[35] Robert J Woodham. Photometric method for determining surface orientation from multiple images. Optical engineering, 19(1):139-144, 1980. 1 +[36] Pengyu Zhang, Jie Zhao, Dong Wang, Huchuan Lu, and Xiang Ruan. Visible-thermal uav tracking: A large-scale benchmark and new baseline. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8886–8895, 2022. 2 +[37] Xingchen Zhang, Ping Ye, and Gang Xiao. Vifb: A visible and infrared image fusion benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 104-105, 2020. 2 \ No newline at end of file diff --git a/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/images.zip b/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..f22a259f9e606ef7f4aafe7b551c444eb3e52605 --- /dev/null +++ b/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a89445668f53a20cd3247ab3c92e4b79ec99f893096141fe12c24dbfcd3ffd97 +size 664040 diff --git a/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/layout.json b/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b0d41c908b546fe3c1f77e44dc751764e9b939e2 --- /dev/null +++ b/2024/A Theory of Joint Light and Heat Transport for Lambertian Scenes/layout.json @@ -0,0 +1,10891 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 88, + 103, + 505, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 103, + 505, + 120 + ], + "spans": [ + { + "bbox": [ + 88, + 103, + 505, + 120 + ], + "type": "text", + "content": "A Theory of Joint Light and Heat Transport for Lambertian Scenes" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 58, + 143, + 536, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 143, + 536, + 171 + ], + "spans": [ + { + "bbox": [ + 58, + 143, + 536, + 171 + ], + "type": "text", + "content": "Mani Ramanagopal, Sriram Narayanan, Aswin C. Sankaranarayanan, and Srinivasa G. Narasimhan \nCarnegie Mellon University, Pittsburgh, PA 15213, USA" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 152, + 174, + 440, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 174, + 440, + 185 + ], + "spans": [ + { + "bbox": [ + 152, + 174, + 440, + 185 + ], + "type": "text", + "content": "{manikans, snochurn, saswin, srinivas}@andrew.cmu.edu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 238, + 290, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 238, + 290, + 455 + ], + "spans": [ + { + "bbox": [ + 46, + 238, + 290, + 455 + ], + "type": "text", + "content": "We present a novel theory that establishes the relationship between light transport in visible and thermal infrared, and heat transport in solids. We show that heat generated due to light absorption can be estimated by modeling heat transport using a thermal camera. For situations where heat conduction is negligible, we analytically solve the heat transport equation to derive a simple expression relating the change in thermal image intensity to the absorbed light intensity and heat capacity of the material. Next, we prove that intrinsic image decomposition for Lambertian scenes becomes a well-posed problem if one has access to the absorbed light. Our theory generalizes to arbitrary shapes and unstructured illumination. Our theory is based on applying energy conservation principle at each pixel independently. We validate our theory using real-world experiments on diffuse objects made of different materials that exhibit both direct and global components (inter-reflections) of light transport under unknown complex lighting." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 477, + 128, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 477, + 128, + 490 + ], + "spans": [ + { + "bbox": [ + 47, + 477, + 128, + 490 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 498, + 287, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 287, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 287, + 641 + ], + "type": "text", + "content": "Printed on paper, this text appears black because the ink does not reflect much light. So what happens to the light striking the ink? It gets absorbed and converted into heat, thereby disappearing from the visible light transport system. Starting from the early works in 1970s [3, 20, 23, 27, 31, 35], decades of research[6, 18, 33] have attempted to separate surface reflectance and shading from images by modeling shapes[30], illuminations [8] and their interactions [13]. However, in the general case, decomposing light transport is fundamentally an ill-posed problem, thus requiring handcrafted [23] or learned priors[1, 2, 4, 9]. But what if we can somehow observe the light lost to absorption?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "content": "Analogous to light transport, heat transport models the generation and flow of heat through a medium and its exchange with the surrounding [5, 34]. In the heat transport system, the heat generated due to light absorption is no different from any other type of heat generation. While heat itself cannot be seen, all objects radiate infrared light based" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 212, + 383, + 291 + ], + "blocks": [ + { + "bbox": [ + 310, + 212, + 383, + 291 + ], + "lines": [ + { + "bbox": [ + 310, + 212, + 383, + 291 + ], + "spans": [ + { + "bbox": [ + 310, + 212, + 383, + 291 + ], + "type": "image", + "image_path": "101b9b9dd3a8c77d7a15d69579d103cedba345fb0d2a532e33858b0126cf88eb.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 325, + 292, + 367, + 300 + ], + "lines": [ + { + "bbox": [ + 325, + 292, + 367, + 300 + ], + "spans": [ + { + "bbox": [ + 325, + 292, + 367, + 300 + ], + "type": "text", + "content": "Visible Image" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 389, + 212, + 462, + 290 + ], + "blocks": [ + { + "bbox": [ + 389, + 212, + 462, + 290 + ], + "lines": [ + { + "bbox": [ + 389, + 212, + 462, + 290 + ], + "spans": [ + { + "bbox": [ + 389, + 212, + 462, + 290 + ], + "type": "image", + "image_path": "a1fed00393212e0b5cc1e749154ad7da16465dde422cecdfa5550ffd64deb2ac.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 388, + 291, + 464, + 299 + ], + "lines": [ + { + "bbox": [ + 388, + 291, + 464, + 299 + ], + "spans": [ + { + "bbox": [ + 388, + 291, + 464, + 299 + ], + "type": "text", + "content": "Temperature rise at " + }, + { + "bbox": [ + 388, + 291, + 464, + 299 + ], + "type": "inline_equation", + "content": "t = 0.1s" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 470, + 213, + 542, + 292 + ], + "blocks": [ + { + "bbox": [ + 470, + 213, + 542, + 292 + ], + "lines": [ + { + "bbox": [ + 470, + 213, + 542, + 292 + ], + "spans": [ + { + "bbox": [ + 470, + 213, + 542, + 292 + ], + "type": "image", + "image_path": "bca02789b03b7020d76bbb3d95fe2b9bfbeba95b23822089fb44a73b4eb102c6.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 470, + 293, + 542, + 300 + ], + "lines": [ + { + "bbox": [ + 470, + 293, + 542, + 300 + ], + "spans": [ + { + "bbox": [ + 470, + 293, + 542, + 300 + ], + "type": "text", + "content": "Temperature rise at " + }, + { + "bbox": [ + 470, + 293, + 542, + 300 + ], + "type": "inline_equation", + "content": "t = 3s" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 310, + 301, + 382, + 379 + ], + "blocks": [ + { + "bbox": [ + 310, + 301, + 382, + 379 + ], + "lines": [ + { + "bbox": [ + 310, + 301, + 382, + 379 + ], + "spans": [ + { + "bbox": [ + 310, + 301, + 382, + 379 + ], + "type": "image", + "image_path": "c28b9b618c4df3af25d97017329873094f1efedc8297f351a6c8d30859bf859f.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 319, + 380, + 373, + 389 + ], + "lines": [ + { + "bbox": [ + 319, + 380, + 373, + 389 + ], + "spans": [ + { + "bbox": [ + 319, + 380, + 373, + 389 + ], + "type": "text", + "content": "Experiment Setup" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 388, + 301, + 462, + 379 + ], + "blocks": [ + { + "bbox": [ + 388, + 301, + 462, + 379 + ], + "lines": [ + { + "bbox": [ + 388, + 301, + 462, + 379 + ], + "spans": [ + { + "bbox": [ + 388, + 301, + 462, + 379 + ], + "type": "image", + "image_path": "f3582e4f64d8b962870cd0bc918c3d3db4f7284f946330b978bf0a23f7296008.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 413, + 380, + 435, + 388 + ], + "lines": [ + { + "bbox": [ + 413, + 380, + 435, + 388 + ], + "spans": [ + { + "bbox": [ + 413, + 380, + 435, + 388 + ], + "type": "text", + "content": "Albedo" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 469, + 301, + 542, + 379 + ], + "blocks": [ + { + "bbox": [ + 469, + 301, + 542, + 379 + ], + "lines": [ + { + "bbox": [ + 469, + 301, + 542, + 379 + ], + "spans": [ + { + "bbox": [ + 469, + 301, + 542, + 379 + ], + "type": "image", + "image_path": "54ace4d89a4bea2f3bcc26dda08d1fbb78d21a99916af79f07cb6601bae05f91.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 493, + 380, + 518, + 389 + ], + "lines": [ + { + "bbox": [ + 493, + 380, + 518, + 389 + ], + "spans": [ + { + "bbox": [ + 493, + 380, + 518, + 389 + ], + "type": "text", + "content": "Shading" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 399, + 547, + 499 + ], + "lines": [ + { + "bbox": [ + 304, + 399, + 547, + 499 + ], + "spans": [ + { + "bbox": [ + 304, + 399, + 547, + 499 + ], + "type": "text", + "content": "Figure 1. The visible image (brightened for visualization) captures the reflected light, which is the product of albedo and shading at that pixel. The absorbed light gets converted into heat and raises the temperature, which can be observed using a co-located thermal camera. The illumination is turned on at " + }, + { + "bbox": [ + 304, + 399, + 547, + 499 + ], + "type": "inline_equation", + "content": "t = 0" + }, + { + "bbox": [ + 304, + 399, + 547, + 499 + ], + "type": "text", + "content": ". The temperature rise at " + }, + { + "bbox": [ + 304, + 399, + 547, + 499 + ], + "type": "inline_equation", + "content": "t = 0.1s" + }, + { + "bbox": [ + 304, + 399, + 547, + 499 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 399, + 547, + 499 + ], + "type": "inline_equation", + "content": "t = 3s" + }, + { + "bbox": [ + 304, + 399, + 547, + 499 + ], + "type": "text", + "content": " are shown using turbo colormap. Our novel theory establishes the relationship between light and heat transport and provides an analytical solution to compute albedo and shading for complex shapes and unknown illumination." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 509, + 547, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 509, + 547, + 568 + ], + "spans": [ + { + "bbox": [ + 304, + 509, + 547, + 568 + ], + "type": "text", + "content": "on their surface temperature, and that can be measured using a thermal camera [34]. By modeling heat transport, we make the first attempt to estimate the intensity of light absorbed by an object, thus establishing the connection between light and heat transport." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 570, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 714 + ], + "type": "text", + "content": "We develop a novel theory that proves having access to absorbed light turns single view intrinsic image decomposition into a pixel-wise well-posed problem, even for arbitrary shape and illumination. Our key insight is that all the complexities of the reflected light transport are also present in the absorbed light, in the same functional form but simply scaled by the complement of the albedo. Consider the color chart seen in Fig. 1. The amount of irradiance due to the line light is approximately equal for the black and white patches. While the visible image records a low intensity for the black patch, the corresponding increase in intensity in the thermal images is high, and vice versa. Leveraging the" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "11924" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 131 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 131 + ], + "type": "text", + "content": "principle of energy conservation, the sum of reflected light and absorbed light at each scene point must equal its irradiance, which is also called as shading. Similarly, we can compute the ratio of reflected light to irradiance, which is also called as surface reflectance or albedo." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 133, + 287, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 133, + 287, + 361 + ], + "spans": [ + { + "bbox": [ + 46, + 133, + 287, + 361 + ], + "type": "text", + "content": "A key ingredient in our approach is the ability to estimate the intensity of heat generated due to light absorption. In the general case, estimating it requires solving the heat transport equation which does not have an analytical solution for unknown shapes [5, 34]. However, in the absence of heat conduction, we show that the analytical solution to the heat transport equation for a constant source is a transient response that follows a 2-parameter exponential curve. Therefore, the source intensity can be estimated with as little as three frames from a thermal video. In practice, conduction occurs in all real-world objects albeit to a smaller degree in insulators and regions with low temperature gradient. Therefore, we limit the influence of conduction by focusing on the transient response of each pixel immediately after turning on light. A key limitation of our approach is that we require the system to be at thermal equilibrium before the light is turned on and other sources of heat generation, if any, remain constant. This is required to ensure the rise in temperature is only due to the absorbed light." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 363, + 287, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 363, + 287, + 625 + ], + "spans": [ + { + "bbox": [ + 46, + 363, + 287, + 625 + ], + "type": "text", + "content": "Prior works in computational thermal imaging have studied the thermal transient response of objects to heating. Dashpute et al. [11] heat planar objects using a laser and capture a 1 min long thermal video to estimate its thermal diffusivity and emissivity. Of most relevance to our work, Tanaka et al. [32] heat objects using infrared lamps and record a 10 mins long thermal video. They decompose these videos using curve fitting into ambient, specular, diffuse and global components, where the latter two are assumed to be exponential curves. But this decomposition is akin to direct-global separation which is different from intrinsic image decomposition. Also, they use the extracted diffuse component as input to a photometric stereo algorithm. Note that their estimated \"albedo\" corresponds to absorptivity in the infrared spectrum and their photometric stereo is limited to distant point light sources at known directions (separate video for each direction). In contrast, our theory establishes and exploits the causal relationship between light and heat transport. And we apply our theory to albedo-shading separation in the visible spectrum for arbitrary unknown illumination using a single 4 sec thermal video and a single visible image." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 628, + 287, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 628, + 287, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 628, + 287, + 700 + ], + "type": "text", + "content": "Several works in vision [17, 36, 37] and robotics [22, 25] fuse spatial features from the visible and thermal images in order to improve robustness of downstream tasks, such as object detection [24], to lighting and weather conditions. However, these methods do not reason about the relationship between the two spectrums from a physics perspective." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 701, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 287, + 713 + ], + "type": "text", + "content": "We validate our theory through real world experiments" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "content": "using a co-located setup of a visible and thermal camera. Our target objects, even though diffuse, are made of different materials, contain direct and global light transport (inter-reflections), low and high spatial frequency and unstructured illumination, all of which are unknown." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 144, + 484, + 158 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 144, + 484, + 158 + ], + "spans": [ + { + "bbox": [ + 305, + 144, + 484, + 158 + ], + "type": "text", + "content": "2. Joint Light and Heat Transport" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 164, + 545, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 164, + 545, + 307 + ], + "spans": [ + { + "bbox": [ + 304, + 164, + 545, + 307 + ], + "type": "text", + "content": "In this section, we briefly introduce the relationship between light and heat transport. While light energy is carried via photons, heat is thermal energy exchanged via molecular vibrations. Visible light (VIS, " + }, + { + "bbox": [ + 304, + 164, + 545, + 307 + ], + "type": "inline_equation", + "content": "0.4 - 0.7\\mu \\mathrm{m}" + }, + { + "bbox": [ + 304, + 164, + 545, + 307 + ], + "type": "text", + "content": ") transport can model the light scattered by the scene from a source towards the camera. The light absorbed by the scene gets converted to heat which is then exchanged via conduction, convection, retention (i.e. increase in temperature) and radiation, and is governed by the heat transport equation. Similar to VIS transport, Longwave Infrared light (LWIR, " + }, + { + "bbox": [ + 304, + 164, + 545, + 307 + ], + "type": "inline_equation", + "content": "8 - 14\\mu \\mathrm{m}" + }, + { + "bbox": [ + 304, + 164, + 545, + 307 + ], + "type": "text", + "content": ") transport can be used to model the radiation emitted by objects, based on their temperature, towards a thermal camera." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 308, + 545, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 308, + 545, + 438 + ], + "spans": [ + { + "bbox": [ + 304, + 308, + 545, + 438 + ], + "type": "text", + "content": "Our first contribution is an algorithm, described in Sec. 3, for estimating the intensity of absorbed light using only a thermal video. This involves two steps: 1) inferring temperatures using LWIR light transport, and 2) inferring source intensity using heat transport equation. As all objects in the scene constantly exchange heat, it is hard to disambiguate heat generated by light absorption from other sources of heat at equilibrium. However, if we disturb the equilibrium by turning on the visible light at a known time, then the resulting rise in temperature allows us to estimate heat generated only due to our illumination." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 440, + 545, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 440, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 304, + 440, + 545, + 510 + ], + "type": "text", + "content": "Our second contribution is a novel theory, described in Sec. 4, that decomposes VIS transport for arbitrary shapes and illumination. We derive simple analytical expressions for albedo and shading using a visible image and the absorbed light intensity estimated from a thermal video captured by a co-located thermal camera." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 522, + 509, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 522, + 509, + 536 + ], + "spans": [ + { + "bbox": [ + 305, + 522, + 509, + 536 + ], + "type": "text", + "content": "3. Estimating Absorbed Light Intensity" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 543, + 545, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 543, + 545, + 626 + ], + "spans": [ + { + "bbox": [ + 304, + 543, + 545, + 626 + ], + "type": "text", + "content": "Consider a scene initially at thermal equilibrium. At a time " + }, + { + "bbox": [ + 304, + 543, + 545, + 626 + ], + "type": "inline_equation", + "content": "t_1" + }, + { + "bbox": [ + 304, + 543, + 545, + 626 + ], + "type": "text", + "content": ", the illumination, which is constant with time, is turned on and a thermal video is captured. We assume the illumination is focused primarily at the target scene and therefore the temperature of the surrounding remains constant. Our objective is to estimate the spatially varying absorbed light (heat source) intensity using a single thermal video." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 634, + 524, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 634, + 524, + 647 + ], + "spans": [ + { + "bbox": [ + 305, + 634, + 524, + 647 + ], + "type": "text", + "content": "3.1. Thermal Images to Temperature Changes" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 653, + 545, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 545, + 699 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 545, + 699 + ], + "type": "text", + "content": "In LWIR light transport, all surfaces including the camera and the scene emit (and reflect) radiation. The pixel intensity in the " + }, + { + "bbox": [ + 304, + 653, + 545, + 699 + ], + "type": "inline_equation", + "content": "n^{th}" + }, + { + "bbox": [ + 304, + 653, + 545, + 699 + ], + "type": "text", + "content": " frame " + }, + { + "bbox": [ + 304, + 653, + 545, + 699 + ], + "type": "inline_equation", + "content": "I_{n}(\\mathbf{p})" + }, + { + "bbox": [ + 304, + 653, + 545, + 699 + ], + "type": "text", + "content": " of a thermal video can be written as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 369, + 701, + 545, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 701, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 369, + 701, + 545, + 714 + ], + "type": "interline_equation", + "content": "I _ {n} (\\mathbf {p}) = \\alpha U \\left(T _ {n} (\\mathbf {x})\\right) + U _ {s}, \\tag {1}", + "image_path": "894d75a4db16215f34227162b88e443275597629adc62fcb930d931870ba6c58.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "11925" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 286, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 286, + 119 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 286, + 119 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 72, + 286, + 119 + ], + "type": "inline_equation", + "content": "T_{n}(\\mathbf{x})" + }, + { + "bbox": [ + 47, + 72, + 286, + 119 + ], + "type": "text", + "content": " is the temperature at time " + }, + { + "bbox": [ + 47, + 72, + 286, + 119 + ], + "type": "inline_equation", + "content": "t_n, \\alpha" + }, + { + "bbox": [ + 47, + 72, + 286, + 119 + ], + "type": "text", + "content": " is the effective emissivity, " + }, + { + "bbox": [ + 47, + 72, + 286, + 119 + ], + "type": "inline_equation", + "content": "U_{s}" + }, + { + "bbox": [ + 47, + 72, + 286, + 119 + ], + "type": "text", + "content": " denotes the radiation from the surrounding, and " + }, + { + "bbox": [ + 47, + 72, + 286, + 119 + ], + "type": "inline_equation", + "content": "U(T)" + }, + { + "bbox": [ + 47, + 72, + 286, + 119 + ], + "type": "text", + "content": " is a non-linear function that approximates the integral of the Planck radiation law." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 120, + 286, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 120, + 286, + 144 + ], + "spans": [ + { + "bbox": [ + 47, + 120, + 286, + 144 + ], + "type": "text", + "content": "For a small range around " + }, + { + "bbox": [ + 47, + 120, + 286, + 144 + ], + "type": "inline_equation", + "content": "T_*" + }, + { + "bbox": [ + 47, + 120, + 286, + 144 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 120, + 286, + 144 + ], + "type": "inline_equation", + "content": "U(T)" + }, + { + "bbox": [ + 47, + 120, + 286, + 144 + ], + "type": "text", + "content": " can be linearly approximated as:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 154, + 286, + 167 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 154, + 286, + 167 + ], + "spans": [ + { + "bbox": [ + 111, + 154, + 286, + 167 + ], + "type": "interline_equation", + "content": "U (T) = k _ {1} \\left(T - T _ {*}\\right) + k _ {2}, \\tag {2}", + "image_path": "080eadc976093e2e367d96ee3913e5011497a8e7c6e6af27d15bdc574e7127dc.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 178, + 286, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 178, + 286, + 213 + ], + "spans": [ + { + "bbox": [ + 47, + 178, + 286, + 213 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 178, + 286, + 213 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 47, + 178, + 286, + 213 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 178, + 286, + 213 + ], + "type": "inline_equation", + "content": "k_{2}" + }, + { + "bbox": [ + 47, + 178, + 286, + 213 + ], + "type": "text", + "content": " are camera-specific constants that depend on " + }, + { + "bbox": [ + 47, + 178, + 286, + 213 + ], + "type": "inline_equation", + "content": "T_{*}" + }, + { + "bbox": [ + 47, + 178, + 286, + 213 + ], + "type": "text", + "content": ". We refer the reader to Appendix A for more details. Combining Eq. (1) and Eq. (2), we get" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 224, + 286, + 237 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 224, + 286, + 237 + ], + "spans": [ + { + "bbox": [ + 82, + 224, + 286, + 237 + ], + "type": "interline_equation", + "content": "I _ {n} (\\mathbf {p}) - I _ {m} (\\mathbf {p}) = k _ {1} \\alpha \\left(T _ {n} (\\mathbf {x}) - T _ {m} (\\mathbf {x})\\right). \\tag {3}", + "image_path": "04b3e62021f5277d28cc61713437f99cb3aff5790b4f28e762d14f87083f0db2.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 247, + 286, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 247, + 286, + 330 + ], + "spans": [ + { + "bbox": [ + 47, + 247, + 286, + 330 + ], + "type": "text", + "content": "The above equation shows that change in pixel intensity is linearly related to change in scene temperature. Note that commonly used thermal cameras are uncooled microbolometers that exhibit thermal inertia [28, 29], where the measured intensities have a small delay with respect to changes in the scene. This effect is ignored for the purposes of this paper." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 338, + 284, + 351 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 338, + 284, + 351 + ], + "spans": [ + { + "bbox": [ + 47, + 338, + 284, + 351 + ], + "type": "text", + "content": "3.2. Heat Transport Equation without Conduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 357, + 286, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 357, + 286, + 393 + ], + "spans": [ + { + "bbox": [ + 47, + 357, + 286, + 393 + ], + "type": "text", + "content": "Consider an infinitesimal volume at a scene point with area " + }, + { + "bbox": [ + 47, + 357, + 286, + 393 + ], + "type": "inline_equation", + "content": "\\delta_A" + }, + { + "bbox": [ + 47, + 357, + 286, + 393 + ], + "type": "text", + "content": " and depth " + }, + { + "bbox": [ + 47, + 357, + 286, + 393 + ], + "type": "inline_equation", + "content": "\\delta_z" + }, + { + "bbox": [ + 47, + 357, + 286, + 393 + ], + "type": "text", + "content": ". The heat transport equation at that point can be written as [5, 34]:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 74, + 400, + 286, + 440 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 400, + 286, + 440 + ], + "spans": [ + { + "bbox": [ + 74, + 400, + 286, + 440 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} C _ {v} \\delta_ {A} \\delta_ {z} \\frac {\\partial T}{\\partial t} = \\kappa \\delta_ {A} \\delta_ {z} \\Delta T + \\delta_ {A} h _ {c} (T _ {s} - T) + \\\\ \\delta_ {A} \\sigma \\epsilon \\left(T _ {s} ^ {4} - T ^ {4}\\right) + \\delta_ {A} S, \\tag {4} \\\\ \\end{array}", + "image_path": "30fc42d5aa3a721b17004dadf93deda5ef125598c440a72a73f7b4d87e66ae4e.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "spans": [ + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "inline_equation", + "content": "C_v" + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "text", + "content": " is the volumetric heat capacity, " + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "text", + "content": " is the temperature, " + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "text", + "content": " is the thermal conductivity, " + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "text", + "content": " denotes the laplacian operator at that point, " + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "inline_equation", + "content": "h_c" + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "text", + "content": " is the convection coefficient, " + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "inline_equation", + "content": "T_s" + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "text", + "content": " is the surrounding temperature, " + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "text", + "content": " is the Stefan-Boltzmann constant, " + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "text", + "content": " is the surface emissivity, and " + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 47, + 449, + 286, + 544 + ], + "type": "text", + "content": " is the intensity of heat generated via light absorption. Note that all the terms are expressed in units of W. For an opaque Lambertian scene, all the light absorption happens near the surface." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 545, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 286, + 712 + ], + "type": "text", + "content": "Note that the magnitude of heat conduction is proportional to the local temperature laplacian. Analytically solving Eq. (4) requires knowing the shape since the laplacian operator depends on the local curvature. Ignoring conduction, makes the heat equation pixel-wise independent and lends itself to an analytical solution independent of shape. Moreover, many real-world materials, such as paints, plastics, paper and wood, have low thermal conductivity. As the object is initially at equilibrium, local temperature laplacians start at zero and increase with time if and only if neighboring pixels have different material properties and/or receive different amounts of light. Therefore, we consider a short thermal video immediately after light is turned on when conduction can be ignored." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 318, + 72, + 531, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 72, + 531, + 84 + ], + "spans": [ + { + "bbox": [ + 318, + 72, + 531, + 84 + ], + "type": "text", + "content": "Dividing by area and ignoring conduction, Eq. (4) is:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 332, + 91, + 544, + 114 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 91, + 544, + 114 + ], + "spans": [ + { + "bbox": [ + 332, + 91, + 544, + 114 + ], + "type": "interline_equation", + "content": "C _ {v} \\delta_ {z} \\frac {\\partial T}{\\partial t} = h _ {c} \\left(T _ {s} - T\\right) + \\sigma \\epsilon \\left(T _ {s} ^ {4} - T ^ {4}\\right) + S. \\tag {5}", + "image_path": "a0c3f62cf5f48241a4a2ff59b360165dc33ca21cedef203c6bd56043690d0747.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 121, + 544, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 121, + 544, + 157 + ], + "spans": [ + { + "bbox": [ + 305, + 121, + 544, + 157 + ], + "type": "text", + "content": "Since temperature rise due to light absorption is typically small (" + }, + { + "bbox": [ + 305, + 121, + 544, + 157 + ], + "type": "inline_equation", + "content": "\\leq 15\\mathrm{K}" + }, + { + "bbox": [ + 305, + 121, + 544, + 157 + ], + "type": "text", + "content": " within 4 sec in our experiments), we linearize the radiation term around a nominal temperature " + }, + { + "bbox": [ + 305, + 121, + 544, + 157 + ], + "type": "inline_equation", + "content": "T_{*}" + }, + { + "bbox": [ + 305, + 121, + 544, + 157 + ], + "type": "text", + "content": " to get" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 357, + 164, + 544, + 178 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 164, + 544, + 178 + ], + "spans": [ + { + "bbox": [ + 357, + 164, + 544, + 178 + ], + "type": "interline_equation", + "content": "\\sigma \\epsilon \\left(T _ {s} ^ {4} - T ^ {4}\\right) \\approx 4 \\sigma \\epsilon T _ {*} ^ {3} \\left(T _ {s} - T\\right), \\tag {6}", + "image_path": "e8f90d490fdaa22c162cec94f1ea5078b11bd0d82d0135c1e5499dd6673b120f.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 186, + 544, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 186, + 544, + 210 + ], + "spans": [ + { + "bbox": [ + 305, + 186, + 544, + 210 + ], + "type": "text", + "content": "where the absolute error due to linearization is " + }, + { + "bbox": [ + 305, + 186, + 544, + 210 + ], + "type": "inline_equation", + "content": "\\leq 4\\%" + }, + { + "bbox": [ + 305, + 186, + 544, + 210 + ], + "type": "text", + "content": ". This simplifies Eq. (5) to" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 372, + 217, + 544, + 241 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 217, + 544, + 241 + ], + "spans": [ + { + "bbox": [ + 372, + 217, + 544, + 241 + ], + "type": "interline_equation", + "content": "H \\frac {\\partial T}{\\partial t} + P T = S + P T _ {s}, \\tag {7}", + "image_path": "c6fbc2d1f160fd25812711b5bac23ac1d2f394da00ed6eef9cb6465f035d87a9.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 247, + 478, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 247, + 478, + 261 + ], + "spans": [ + { + "bbox": [ + 305, + 247, + 478, + 261 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 247, + 478, + 261 + ], + "type": "inline_equation", + "content": "H = C_v\\delta_z" + }, + { + "bbox": [ + 305, + 247, + 478, + 261 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 247, + 478, + 261 + ], + "type": "inline_equation", + "content": "P = (h_c + 4\\sigma \\epsilon T_*^3)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 267, + 418, + 280 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 267, + 418, + 280 + ], + "spans": [ + { + "bbox": [ + 306, + 267, + 418, + 280 + ], + "type": "text", + "content": "3.3. Analytical Solution" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 285, + 544, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 285, + 544, + 309 + ], + "spans": [ + { + "bbox": [ + 305, + 285, + 544, + 309 + ], + "type": "text", + "content": "Solving Eq. (7) at a single pixel (refer Appendix B for derivation), we get" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 318, + 317, + 544, + 343 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 317, + 544, + 343 + ], + "spans": [ + { + "bbox": [ + 318, + 317, + 544, + 343 + ], + "type": "interline_equation", + "content": "T _ {n} - T _ {1} = \\left(\\frac {S}{P} + T _ {s} - T _ {1}\\right) \\left(1 - e ^ {- \\frac {P}{H} \\left(t _ {n} - t _ {1}\\right)}\\right). \\tag {8}", + "image_path": "abb082fc9efb75ed4420b6cd4bece6cdbbbcc79c44a841ce66f4a64b4384ada0.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 350, + 544, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 350, + 544, + 386 + ], + "spans": [ + { + "bbox": [ + 305, + 350, + 544, + 386 + ], + "type": "text", + "content": "Since we assume the system is initially at thermal equilibrium, we can set " + }, + { + "bbox": [ + 305, + 350, + 544, + 386 + ], + "type": "inline_equation", + "content": "T_{s} = T_{1}" + }, + { + "bbox": [ + 305, + 350, + 544, + 386 + ], + "type": "text", + "content": ". Now, substituting Eq. (3) into the above equation, we get" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 350, + 393, + 544, + 417 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 393, + 544, + 417 + ], + "spans": [ + { + "bbox": [ + 350, + 393, + 544, + 417 + ], + "type": "interline_equation", + "content": "I _ {n} - I _ {1} = \\frac {S k _ {1} \\alpha}{P} \\left(1 - e ^ {- \\frac {P}{H} \\left(t _ {n} - t _ {1}\\right)}\\right) \\tag {9}", + "image_path": "13b152310cf23fa4bf9d8610f1657d075a3fff6f70489140be87de5400ad1c90.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 423, + 544, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 423, + 544, + 460 + ], + "spans": [ + { + "bbox": [ + 305, + 423, + 544, + 460 + ], + "type": "text", + "content": "Therefore, given a thermal video " + }, + { + "bbox": [ + 305, + 423, + 544, + 460 + ], + "type": "inline_equation", + "content": "\\{I_1,\\dots ,I_n\\}" + }, + { + "bbox": [ + 305, + 423, + 544, + 460 + ], + "type": "text", + "content": " and corresponding time stamps " + }, + { + "bbox": [ + 305, + 423, + 544, + 460 + ], + "type": "inline_equation", + "content": "\\{t_1,\\ldots ,t_n\\}" + }, + { + "bbox": [ + 305, + 423, + 544, + 460 + ], + "type": "text", + "content": ", we use gradient descent for curve fitting at each pixel independently:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 367, + 467, + 544, + 483 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 467, + 544, + 483 + ], + "spans": [ + { + "bbox": [ + 367, + 467, + 544, + 483 + ], + "type": "interline_equation", + "content": "I _ {n} - I _ {1} = c _ {1} \\left(1 - e ^ {- \\frac {t _ {n} - t _ {1}}{c _ {2}}}\\right). \\tag {10}", + "image_path": "3a1a3735d4b1eb09f2e97a3be3607784a546a1cf9a90a3cdd4ded1a4f9184e94.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 306, + 491, + 481, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 491, + 481, + 504 + ], + "spans": [ + { + "bbox": [ + 306, + 491, + 481, + 504 + ], + "type": "text", + "content": "3.4. Recovering S from Curve Fitting" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": "The results of curve fitting provide " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "c_{1}" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "c_{2}" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": " at each pixel. From Eq. (9) and Eq. (10), note that " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "c_{1} = \\frac{Sk_{1}\\alpha}{P}" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": ". Recovering " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "c_{1}" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": " would require knowledge of " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "k_{1}, \\alpha" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": " depends on " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "h_c, \\epsilon" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "T_*" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": ". In theory, all these quantities could vary per-pixel. However, the spatial variation in " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": ", which depends on albedo in visible spectrum and illumination, is much greater than that of others. In this paper, we assume the quantity " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\beta = \\frac{k_1\\alpha}{P}" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": " is common for all pixels such that " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": " is the constant of proportionality between " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "c_{1}" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": ". Note that Lambertian scenes typically correspond to rough surfaces which have high emissivity. Also, it is known that most paints have similarly high emissivity values of " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "> 0.9" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": " irrespective of their albedo in the visible spectrum [34]. As the object is initially at equilibrium, we can assume " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "T_*" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": ", and hence " + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 305, + 510, + 545, + 713 + ], + "type": "text", + "content": ", is common for all pixels. In the absence of wind, we reasonably assume convection, if it exists at all, to be uniform throughout the scene." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11926" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 139, + 547, + 245 + ], + "blocks": [ + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "lines": [ + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "spans": [ + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": "Table 1. Various lighting configurations typically modeled in shape-from-intensity problems. It is trivial to verify the equations for spatially varying albedo " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "\\rho (x)" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " and shading " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "\\eta (x)" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " remains the same irrespective of the complexity of shape or illumination when estimates of both image irradiance " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "I_{v}(x)" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " and absorbed light intensity " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "\\tilde{S} (x)" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " are available. Here, " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " is the camera gain, " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " is the unknown scale factor in the estimation of " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "\\tilde{S} (x),\\zeta = \\frac{\\gamma}{\\beta}" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " is the relative scale factor, " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " is the source intensity, s is the light source direction, n is the surface normal, " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " is light source direction for extended source, " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " is the shading term and " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "\\eta^{*}" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " is the scene irradiance. Inter-reflections are modeled as spatially varying source intensities. All the above cases can be extended to model cast and attached shadows using a shadowing function " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "W(x)" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " without changing the expressions for " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 71, + 547, + 131 + ], + "type": "inline_equation", + "content": "\\eta" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 139, + 547, + 245 + ], + "lines": [ + { + "bbox": [ + 47, + 139, + 547, + 245 + ], + "spans": [ + { + "bbox": [ + 47, + 139, + 547, + 245 + ], + "type": "table", + "html": "
IlluminationImage Irradiance Iv(x)Estimated Absorbed Light S̃(x)Albedo ρ(x)Shading η(x)
Far sourceγρ(x)/πE(s·n(x))β(1 - ρ(x))E(s·n(x))
Multiple sourcesγρ(x)/π∑lEl(s1·n(x))β(1 - ρ(x))∑lEl(s1·n(x))
Near sourcesγρ(x)/π∑lEl(s1(x)·n(x))β(1 - ρ(x))∑lEl(s1(x)·n(x))πIv(x)/πIv(x)+ζS̃(x)πIv(x)+ζS̃(x)
Extended Sourcesγρ(x)/π∫ωE(ω)(ω·n(x))β(1 - ρ(x))∫ωE(ω)(ω·n(x))
Inter-reflectionsγρ(x)/π∫ωE(x,ω)(ω·n(x))β(1 - ρ(x))∫ωE(x,ω)(ω·n(x))
General illuminationρ(x)/πη(x)β(1 - ρ(x))η(x)/γ
", + "image_path": "1fc2277ea64133f1ced5f0c83399055f9a5f3295ba714c3e28b256a9f4c4099e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 262, + 205, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 262, + 205, + 277 + ], + "spans": [ + { + "bbox": [ + 47, + 262, + 205, + 277 + ], + "type": "text", + "content": "4. Albedo-Shading Separation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 283, + 288, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 283, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 46, + 283, + 288, + 392 + ], + "type": "text", + "content": "Consider an opaque Lambertian scene imaged by a camera from a fixed view. We assume that the camera is sensitive to all the wavelengths present in the light sources i.e. we primarily consider LEDs or CFL bulbs when using visible cameras. We first consider the case where the albedo and the camera response are independent of wavelength in Sec. 4.1 and then extend our theory to wavelength-dependent albedo functions in Sec. 4.2. The words image and camera correspond to visible spectrum in this section." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 397, + 261, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 397, + 261, + 410 + ], + "spans": [ + { + "bbox": [ + 47, + 397, + 261, + 410 + ], + "type": "text", + "content": "4.1. Grayscale Albedo and Camera Response" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 415, + 287, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 415, + 287, + 452 + ], + "spans": [ + { + "bbox": [ + 46, + 415, + 287, + 452 + ], + "type": "text", + "content": "The image intensity " + }, + { + "bbox": [ + 46, + 415, + 287, + 452 + ], + "type": "inline_equation", + "content": "I_{v}" + }, + { + "bbox": [ + 46, + 415, + 287, + 452 + ], + "type": "text", + "content": ", which is proportional to the power received by the camera per unit area, at a pixel " + }, + { + "bbox": [ + 46, + 415, + 287, + 452 + ], + "type": "inline_equation", + "content": "\\mathbf{p}(\\mathbf{x})" + }, + { + "bbox": [ + 46, + 415, + 287, + 452 + ], + "type": "text", + "content": " focused at a scene point " + }, + { + "bbox": [ + 46, + 415, + 287, + 452 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 415, + 287, + 452 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 457, + 287, + 480 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 457, + 287, + 480 + ], + "spans": [ + { + "bbox": [ + 70, + 457, + 287, + 480 + ], + "type": "interline_equation", + "content": "I _ {v} (\\mathbf {p} (\\mathbf {x})) = \\frac {\\rho (\\mathbf {x})}{\\pi} \\eta (\\mathbf {x}), \\text {s . t .} \\eta (\\mathbf {x}) \\equiv \\gamma \\eta^ {*} (\\mathbf {x}) \\tag {11}", + "image_path": "dc1f79e971c0a3e969403c720a7c0fb04427183043f51f76659c2ca97ecff770.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "spans": [ + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\rho (\\mathbf{x})" + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\eta (\\mathbf{x})" + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "text", + "content": " are the spatially varying albedo and shading, " + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\gamma >0" + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "text", + "content": " is the camera gain representing the optics and sensor electronics in the camera, and " + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\eta^{*}(\\mathbf{x})" + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "text", + "content": " is the true scene irradiance received by " + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "text", + "content": ". Note that we do not restrict the lighting geometry in any way and the shading " + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\eta (\\mathbf{x})" + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "text", + "content": " term is unstructured. In the rest of the paper, we use " + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\mathbf{p}" + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "text", + "content": " in place of " + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\mathbf{p}(\\mathbf{x})" + }, + { + "bbox": [ + 46, + 485, + 287, + 569 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 569, + 287, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 569, + 287, + 653 + ], + "spans": [ + { + "bbox": [ + 45, + 569, + 287, + 653 + ], + "type": "text", + "content": "The pixel value in an image describes the energy reflected towards the camera by a scene point. Since the surface is opaque, there is no transmission and the remaining energy gets absorbed and is converted into heat. Recall from Sec. 3 that " + }, + { + "bbox": [ + 45, + 569, + 287, + 653 + ], + "type": "inline_equation", + "content": "S(\\mathbf{x})" + }, + { + "bbox": [ + 45, + 569, + 287, + 653 + ], + "type": "text", + "content": " denotes the power absorbed per unit area, i.e. intensity, by " + }, + { + "bbox": [ + 45, + 569, + 287, + 653 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 45, + 569, + 287, + 653 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 45, + 569, + 287, + 653 + ], + "type": "inline_equation", + "content": "\\tilde{S}(\\mathbf{x})" + }, + { + "bbox": [ + 45, + 569, + 287, + 653 + ], + "type": "text", + "content": " be proportional to it, and is given by:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 64, + 657, + 287, + 673 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 657, + 287, + 673 + ], + "spans": [ + { + "bbox": [ + 64, + 657, + 287, + 673 + ], + "type": "interline_equation", + "content": "\\tilde {S} (\\mathbf {x}) = \\beta S (\\mathbf {x}), \\text {s . t .} S (\\mathbf {x}) = (1 - \\rho (\\mathbf {x})) \\eta^ {*} (\\mathbf {x}). \\tag {12}", + "image_path": "168958c4dc47ced9aac6127419a13d7113ebf1297b08d285c89e9b0eafa9580c.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 677, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 714 + ], + "type": "text", + "content": "During operation, light fixtures also generate some thermal energy which increases its temperature and thereby increasing its blackbody radiation. However, the magnitude of this" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 264, + 545, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 264, + 545, + 300 + ], + "spans": [ + { + "bbox": [ + 305, + 264, + 545, + 300 + ], + "type": "text", + "content": "additional heat generated at " + }, + { + "bbox": [ + 305, + 264, + 545, + 300 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 305, + 264, + 545, + 300 + ], + "type": "text", + "content": " is negligible and hence ignored in this paper. Next, we can express " + }, + { + "bbox": [ + 305, + 264, + 545, + 300 + ], + "type": "inline_equation", + "content": "\\tilde{S}" + }, + { + "bbox": [ + 305, + 264, + 545, + 300 + ], + "type": "text", + "content": " using shading as" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 322, + 304, + 545, + 331 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 304, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 322, + 304, + 545, + 331 + ], + "type": "interline_equation", + "content": "\\tilde {S} (\\mathbf {x}) = \\beta (1 - \\rho (\\mathbf {x})) \\frac {\\eta (\\mathbf {x})}{\\gamma} = \\frac {(1 - \\rho (\\mathbf {x})) \\eta (\\mathbf {x})}{\\zeta}, \\tag {13}", + "image_path": "c18c9aa2e20fd163f926080c6c3d8e3f3945aae78ee61614fccd4f70f77c3cf3.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 335, + 468, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 335, + 468, + 349 + ], + "spans": [ + { + "bbox": [ + 305, + 335, + 468, + 349 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 335, + 468, + 349 + ], + "type": "inline_equation", + "content": "\\zeta = \\frac{\\gamma}{\\beta}" + }, + { + "bbox": [ + 305, + 335, + 468, + 349 + ], + "type": "text", + "content": " is the relative scale factor." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 348, + 547, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 348, + 547, + 396 + ], + "spans": [ + { + "bbox": [ + 305, + 348, + 547, + 396 + ], + "type": "text", + "content": "In the trivial case where " + }, + { + "bbox": [ + 305, + 348, + 547, + 396 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 305, + 348, + 547, + 396 + ], + "type": "text", + "content": " receives no light (neither direct nor global illumination), the shading term " + }, + { + "bbox": [ + 305, + 348, + 547, + 396 + ], + "type": "inline_equation", + "content": "\\eta (\\mathbf{x}) = 0" + }, + { + "bbox": [ + 305, + 348, + 547, + 396 + ], + "type": "text", + "content": " and the albedo cannot be estimated. Whenever " + }, + { + "bbox": [ + 305, + 348, + 547, + 396 + ], + "type": "inline_equation", + "content": "\\eta (\\mathbf{x}) > 0" + }, + { + "bbox": [ + 305, + 348, + 547, + 396 + ], + "type": "text", + "content": ", we can re-write Eqs. (11) and (13) as" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 373, + 401, + 545, + 426 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 401, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 373, + 401, + 545, + 426 + ], + "type": "interline_equation", + "content": "\\pi I _ {v} (\\mathbf {p}) \\frac {1}{\\eta (\\mathbf {x})} - \\rho (\\mathbf {x}) = 0 \\tag {14}", + "image_path": "e21389bc03b91cba1bec2ca423c59c62b75548de6d8046b4955e138a6f1c9045.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 378, + 428, + 545, + 453 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 428, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 378, + 428, + 545, + 453 + ], + "type": "interline_equation", + "content": "\\zeta \\tilde {S} (\\mathbf {x}) \\frac {1}{\\eta (\\mathbf {x})} + \\rho (\\mathbf {x}) = 1 \\tag {15}", + "image_path": "3e20594ac5c3bf875afa5ce5625d905dbc05385c6aec4e601312eb47ddbc39d6.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 459, + 497, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 459, + 497, + 471 + ], + "spans": [ + { + "bbox": [ + 305, + 459, + 497, + 471 + ], + "type": "text", + "content": "Solving the above system of equations, we get:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 371, + 476, + 545, + 491 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 371, + 476, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 371, + 476, + 545, + 491 + ], + "type": "interline_equation", + "content": "\\eta (\\mathbf {x}) = \\pi I _ {v} (\\mathbf {p}) + \\zeta \\tilde {S} (\\mathbf {x}) \\tag {16}", + "image_path": "cdf8f69ee81928217b8e668565d5989457cae5f2b67690f014b2dde162f80e72.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 372, + 493, + 545, + 521 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 493, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 372, + 493, + 545, + 521 + ], + "type": "interline_equation", + "content": "\\rho (\\mathbf {x}) = \\frac {\\pi I _ {v} (\\mathbf {p})}{\\pi I _ {v} (\\mathbf {p}) + \\zeta \\tilde {S} (\\mathbf {x})}. \\tag {17}", + "image_path": "17c28f4cae3ddda914d1ef3dbab738906a21ca21aca9f88bfb9c76b749af2079.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 527, + 545, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 527, + 545, + 613 + ], + "spans": [ + { + "bbox": [ + 304, + 527, + 545, + 613 + ], + "type": "text", + "content": "If " + }, + { + "bbox": [ + 304, + 527, + 545, + 613 + ], + "type": "inline_equation", + "content": "I_{v}(\\mathbf{p})" + }, + { + "bbox": [ + 304, + 527, + 545, + 613 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 527, + 545, + 613 + ], + "type": "inline_equation", + "content": "\\tilde{S}(\\mathbf{x})" + }, + { + "bbox": [ + 304, + 527, + 545, + 613 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 527, + 545, + 613 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 304, + 527, + 545, + 613 + ], + "type": "text", + "content": " are known, the above equations provide a direct method to compute spatially varying albedo and shading components for complex shapes and arbitrary illumination. To emphasize its applicability further, Table 1 lists several types of lighting conditions typically modeled in shape-from-intensity problems and demonstrates that Eqs. (16), (17) hold in all cases." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 618, + 495, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 618, + 495, + 629 + ], + "spans": [ + { + "bbox": [ + 305, + 618, + 495, + 629 + ], + "type": "text", + "content": "4.2. Towards General Albedo Functions" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "type": "text", + "content": "Let the camera have " + }, + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "type": "text", + "content": " channels with known spectral responses " + }, + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "type": "inline_equation", + "content": "\\Gamma_{k}(\\lambda)" + }, + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "type": "text", + "content": ". Recall that each wavelength present in the light sources must fall within at least one of the channels. The image irradiance at " + }, + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "type": "inline_equation", + "content": "\\mathbf{p}" + }, + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "type": "text", + "content": " in channel " + }, + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "type": "text", + "content": " can be written as:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 318, + 690, + 545, + 717 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 690, + 545, + 717 + ], + "spans": [ + { + "bbox": [ + 318, + 690, + 545, + 717 + ], + "type": "interline_equation", + "content": "I _ {v} ^ {k} (\\mathbf {p}) = \\gamma \\int_ {\\lambda} \\int_ {\\Omega} \\frac {\\rho (\\mathbf {x} , \\lambda)}{\\pi} \\Gamma_ {k} (\\lambda) L (\\mathbf {x}, \\lambda , \\omega) d \\omega d \\lambda , \\tag {18}", + "image_path": "1f7398f445bf4fbbd06323c3e38655eebff3dcd58d434912d0d6e9ac10c7000d.jpg" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "11927" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\rho (\\mathbf{x},\\lambda)" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " is the diffuse albedo as a function of wavelength, " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "L(\\mathbf{x},\\lambda ,\\omega)" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " is the spectral radiance at " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " denotes the direction along the outer hemisphere. The corresponding estimate of absorbed power per unit area is:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 63, + 125, + 287, + 152 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 125, + 287, + 152 + ], + "spans": [ + { + "bbox": [ + 63, + 125, + 287, + 152 + ], + "type": "interline_equation", + "content": "\\tilde {S} (\\mathbf {x}) = \\beta \\int_ {\\lambda} \\int_ {\\Omega} (1 - \\rho (\\mathbf {x}, \\lambda)) L (\\mathbf {x}, \\lambda , \\omega) d \\omega d \\lambda , \\tag {19}", + "image_path": "1bac3245f42481fe21f471f1da57747ebbc6e0630f31efa756b2000b1d9b114b.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 156, + 287, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 156, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 47, + 156, + 287, + 239 + ], + "type": "text", + "content": "Shading at a point " + }, + { + "bbox": [ + 47, + 156, + 287, + 239 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 47, + 156, + 287, + 239 + ], + "type": "text", + "content": " is influenced by the emission spectrum of the light sources, the relative geometry between " + }, + { + "bbox": [ + 47, + 156, + 287, + 239 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 47, + 156, + 287, + 239 + ], + "type": "text", + "content": " and the light sources, and the albedo of other points in the scene due to inter-reflections. While this general case remains an open problem, in the rest of this section we ignore inter-reflections and assume all light sources have a common emission spectrum " + }, + { + "bbox": [ + 47, + 156, + 287, + 239 + ], + "type": "inline_equation", + "content": "l(\\lambda)" + }, + { + "bbox": [ + 47, + 156, + 287, + 239 + ], + "type": "text", + "content": " i.e." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 103, + 245, + 287, + 270 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 245, + 287, + 270 + ], + "spans": [ + { + "bbox": [ + 103, + 245, + 287, + 270 + ], + "type": "interline_equation", + "content": "\\int_ {\\Omega} L (\\mathbf {x}, \\lambda , \\omega) d \\omega = \\eta^ {*} (\\mathbf {x}) l (\\lambda). \\tag {20}", + "image_path": "08e891e61ecc2440b06fac21bdaba08fb69b6bfbfdfc4cd86b52a8b25b15092a.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 274, + 287, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 274, + 287, + 321 + ], + "spans": [ + { + "bbox": [ + 47, + 274, + 287, + 321 + ], + "type": "text", + "content": "Note that, the illumination is still arbitrary in terms of their locations, sizes and angular radiant intensity functions. Substituting Eq. (20) into Eq. (18) and Eq. (19), we can write" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 325, + 287, + 379 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 325, + 287, + 379 + ], + "spans": [ + { + "bbox": [ + 56, + 325, + 287, + 379 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} I _ {v} ^ {k} (\\mathbf {p}) = \\int_ {\\lambda} \\frac {\\rho (\\mathbf {x} , \\lambda)}{\\pi} \\Gamma_ {k} (\\lambda) \\eta (\\mathbf {x}) l (\\lambda) d \\lambda , (21) \\\\ \\tilde {S} (\\mathbf {x}) = \\frac {\\int_ {\\lambda} \\eta (\\mathbf {x}) l (\\lambda) d \\lambda - \\int_ {\\lambda} \\rho (\\mathbf {x} , \\lambda) \\eta (\\mathbf {x}) l (\\lambda) d \\lambda}{\\zeta}. (22) \\\\ \\end{array}", + "image_path": "b97684f3385d0f9534212c256f77d401fe57e21129631536e8cd76d0184bc352.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 384, + 287, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 384, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 47, + 384, + 287, + 479 + ], + "type": "text", + "content": "As a continuous-valued function of wavelength, the diffuse albedo " + }, + { + "bbox": [ + 47, + 384, + 287, + 479 + ], + "type": "inline_equation", + "content": "\\rho (\\mathbf{x},\\lambda)" + }, + { + "bbox": [ + 47, + 384, + 287, + 479 + ], + "type": "text", + "content": " is infinite-dimensional, which requires further assumptions to enable tractable computations. We rely on a body of work [10, 14-16] that shows that reflectance spectra lie close to a low-dimensional subspace. Denoting the basis for this subspace as " + }, + { + "bbox": [ + 47, + 384, + 287, + 479 + ], + "type": "inline_equation", + "content": "\\Phi_{\\rho}(\\lambda) = \\{\\tilde{\\rho}_1(\\lambda),\\dots ,\\tilde{\\rho}_M(\\lambda)\\}" + }, + { + "bbox": [ + 47, + 384, + 287, + 479 + ], + "type": "text", + "content": ", we can express the diffuse albedo as [21]:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 86, + 484, + 287, + 517 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 484, + 287, + 517 + ], + "spans": [ + { + "bbox": [ + 86, + 484, + 287, + 517 + ], + "type": "interline_equation", + "content": "\\rho (\\mathbf {x}, \\lambda) = \\sum_ {m = 1} ^ {M} \\tilde {\\rho} _ {m} (\\lambda) a _ {\\mathbf {x}, m} = \\Phi_ {\\rho} (\\lambda) \\mathbf {a} _ {\\mathbf {x}} \\tag {23}", + "image_path": "b4193ae4c5d334b26752eaed396e31130c70c1a6942b0f563b30e4fadb52532f.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 523, + 286, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 523, + 286, + 548 + ], + "spans": [ + { + "bbox": [ + 47, + 523, + 286, + 548 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 523, + 286, + 548 + ], + "type": "inline_equation", + "content": "\\mathbf{a}_{\\mathbf{x}}\\in \\mathbb{R}^{M}" + }, + { + "bbox": [ + 47, + 523, + 286, + 548 + ], + "type": "text", + "content": " are the unknown coefficients of interest. This simplifies Eq. (21) and Eq. (22) into" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 109, + 552, + 287, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 552, + 287, + 574 + ], + "spans": [ + { + "bbox": [ + 109, + 552, + 287, + 574 + ], + "type": "interline_equation", + "content": "I _ {v} ^ {k} (\\mathbf {p}) = \\eta (\\mathbf {x}) \\mathbf {E} _ {k} ^ {T} \\frac {\\mathbf {a} _ {\\mathbf {x}}}{\\pi}, \\tag {24}", + "image_path": "9c6d3325d17c767696d2bbce58c1649e108fe8bec3a1456e9d0a0e725040cf20.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 113, + 574, + 287, + 601 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 574, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 113, + 574, + 287, + 601 + ], + "type": "interline_equation", + "content": "\\tilde {S} (\\mathbf {x}) = \\frac {\\eta (\\mathbf {x}) \\left(L - \\mathbf {F} ^ {T} \\mathbf {a} _ {\\mathbf {x}}\\right)}{\\zeta}, \\tag {25}", + "image_path": "abc2d8514a477e9f06dca30f9cb479a2880777a6ffb1932cae72beceaeadc8ec.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 606, + 286, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 606, + 286, + 618 + ], + "spans": [ + { + "bbox": [ + 47, + 606, + 286, + 618 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 606, + 286, + 618 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_k" + }, + { + "bbox": [ + 47, + 606, + 286, + 618 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 606, + 286, + 618 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 47, + 606, + 286, + 618 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 606, + 286, + 618 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 47, + 606, + 286, + 618 + ], + "type": "text", + "content": " can be computed a priori as follows:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 102, + 632, + 287, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 632, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 102, + 632, + 287, + 712 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {E} _ {\\mathbf {k}} [ i ] = \\int_ {\\lambda} l (\\lambda) \\Gamma_ {k} (\\lambda) \\tilde {\\rho} _ {i} (\\lambda) d \\lambda , (26) \\\\ \\mathbf {F} [ i ] = \\int_ {\\lambda} l (\\lambda) \\tilde {\\rho} _ {i} (\\lambda) d \\lambda , (27) \\\\ L = \\int_ {\\lambda} l (\\lambda) d \\lambda . (28) \\\\ \\end{array}", + "image_path": "f049f2eca0a38280cfa2c28d83949cf56ae11b530651744fad61cde15fc2e6f2.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 72, + 545, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 545, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 545, + 84 + ], + "type": "text", + "content": "Whenever " + }, + { + "bbox": [ + 306, + 72, + 545, + 84 + ], + "type": "inline_equation", + "content": "\\eta (\\mathbf{x}) > 0" + }, + { + "bbox": [ + 306, + 72, + 545, + 84 + ], + "type": "text", + "content": ", Eqs. (24) and (25) can be written as" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 363, + 105, + 545, + 136 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 363, + 105, + 545, + 136 + ], + "spans": [ + { + "bbox": [ + 363, + 105, + 545, + 136 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\pi I _ {v} ^ {k} (\\mathbf {p}) \\xi (\\mathbf {x}) - \\mathbf {E} _ {k} ^ {T} \\mathbf {a} _ {\\mathbf {x}} = 0, \\forall k (29) \\\\ \\zeta \\tilde {S} (\\mathbf {x}) \\xi (\\mathbf {x}) + \\mathbf {F} ^ {T} \\mathbf {a} _ {\\mathbf {x}} = L, (30) \\\\ \\end{array}", + "image_path": "4e3d99bdc02ee78b3e29ebbc2ef1f014a74698a87d9cb27cc6054b0f5538d70f.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "spans": [ + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\xi (\\mathbf{x}) = 1 / \\eta (\\mathbf{x})" + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "text", + "content": ". Note that we have a system of " + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "inline_equation", + "content": "K + 1" + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "text", + "content": " linear equations with " + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "inline_equation", + "content": "M + 1" + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "text", + "content": " unknowns, namely " + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\mathbf{a}_{\\mathbf{x}}\\in \\mathbb{R}^{M}" + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\xi (\\mathbf{x})" + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "text", + "content": ". Therefore, whenever " + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "inline_equation", + "content": "K\\geq M" + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "text", + "content": ", the system of equations can be solved to obtain albedo and shading (reciprocal of " + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\xi (\\mathbf{x})" + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "text", + "content": ") at each pixel independently for complex shapes and illumination. Specifically, we use non-negative least squares solver for this problem. For most vision applications, which use a 3-channel RGB camera, we choose a corresponding basis set " + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\Phi_{\\rho}" + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "inline_equation", + "content": "M = 3" + }, + { + "bbox": [ + 304, + 146, + 545, + 312 + ], + "type": "text", + "content": ". Our theory could be used with multispectral cameras with more channels when higher fidelity in albedo is desired. While the above derivation relies on Eq. (20), it is still practically useful in many real-world scenes where inter-reflections exist as we will show in Sec. 5." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 314, + 545, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 314, + 545, + 373 + ], + "spans": [ + { + "bbox": [ + 304, + 314, + 545, + 373 + ], + "type": "text", + "content": "In the special case of a monochrome camera capturing a scene where albedo depends on wavelength, the shading at each pixel can be expressed as a weighted sum of " + }, + { + "bbox": [ + 304, + 314, + 545, + 373 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 314, + 545, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 314, + 545, + 373 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 314, + 545, + 373 + ], + "type": "text", + "content": " irrespective of the emission spectrum of the light source. We refer the reader to Appendix C for more details." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 385, + 432, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 385, + 432, + 399 + ], + "spans": [ + { + "bbox": [ + 306, + 385, + 432, + 399 + ], + "type": "text", + "content": "5. Experimental Results" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 406, + 545, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 406, + 545, + 478 + ], + "spans": [ + { + "bbox": [ + 304, + 406, + 545, + 478 + ], + "type": "text", + "content": "To validate our theory, we perform experiments on several complex scenes with challenging illumination. Our scenes are mostly diffuse, but contain noticeable non-Lambertian features that test the practical utility of our theory to real-world objects. Our emphasis is on estimating the absorbed light intensity and performing albedo-shading separation." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "type": "text", + "content": "Hardware Details: Our imaging system consists of an IDS UI-3130CP color camera with " + }, + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "type": "inline_equation", + "content": "600 \\times 800" + }, + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "type": "text", + "content": " resolution fitted with an 8mm Tamron lens, a FLIR Boson thermal camera having " + }, + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "type": "inline_equation", + "content": "\\leq 50\\mathrm{mK}" + }, + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "type": "text", + "content": " NETD with " + }, + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "type": "inline_equation", + "content": "512 \\times 640" + }, + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "type": "text", + "content": " resolution fitted with an " + }, + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "type": "inline_equation", + "content": "18\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "type": "inline_equation", + "content": "24^{\\circ}" + }, + { + "bbox": [ + 304, + 494, + 545, + 662 + ], + "type": "text", + "content": " HFOV) integrated lens and a BSP-DI-25-2 gold dichroic beamsplitter from ISP Optics. The cameras are coarsely aligned using an optic stage and a homography is used for fine alignment. We use LED lights from Advanced Illumination, namely a high intensity line light (LL167G96-WHI), a large spot light (SL-S100150M-WHI) and two small spot lights (SL-S050075M-WHI). The relative emission spectrum of the lighting and the spectral response of the color filter array in the visible camera were obtained from their technical datasheets, see Fig. 2." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "Data Capture and Preprocessing: The visible camera was radiometrically calibrated [12]. To capture the full dynamic range of the illumination, we acquired a stack of 15" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11928" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 70, + 164, + 148 + ], + "blocks": [ + { + "bbox": [ + 52, + 70, + 164, + 148 + ], + "lines": [ + { + "bbox": [ + 52, + 70, + 164, + 148 + ], + "spans": [ + { + "bbox": [ + 52, + 70, + 164, + 148 + ], + "type": "image", + "image_path": "f1d4c0d4ec26f8e7fee03197ee99785321cd600478e261a2dddd363388ea4883.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 76, + 154, + 140, + 163 + ], + "lines": [ + { + "bbox": [ + 76, + 154, + 140, + 163 + ], + "spans": [ + { + "bbox": [ + 76, + 154, + 140, + 163 + ], + "type": "text", + "content": "(a) Imaging System" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 168, + 70, + 282, + 152 + ], + "blocks": [ + { + "bbox": [ + 168, + 70, + 282, + 152 + ], + "lines": [ + { + "bbox": [ + 168, + 70, + 282, + 152 + ], + "spans": [ + { + "bbox": [ + 168, + 70, + 282, + 152 + ], + "type": "image", + "image_path": "b76c355e89b035504019c0b5c0411c973af1556c03c0e7d7e0c5897d251854ca.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 53, + 168, + 281, + 264 + ], + "blocks": [ + { + "bbox": [ + 188, + 154, + 264, + 163 + ], + "lines": [ + { + "bbox": [ + 188, + 154, + 264, + 163 + ], + "spans": [ + { + "bbox": [ + 188, + 154, + 264, + 163 + ], + "type": "text", + "content": "(b) Experimental Setup" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 53, + 168, + 281, + 264 + ], + "lines": [ + { + "bbox": [ + 53, + 168, + 281, + 264 + ], + "spans": [ + { + "bbox": [ + 53, + 168, + 281, + 264 + ], + "type": "image", + "image_path": "ec679174dfd5ba476b1b5565957e9900a46a69d21467cd7a810a404bb2bfa6fb.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 103, + 272, + 231, + 281 + ], + "lines": [ + { + "bbox": [ + 103, + 272, + 231, + 281 + ], + "spans": [ + { + "bbox": [ + 103, + 272, + 231, + 281 + ], + "type": "text", + "content": "(c) Spectral properties of light and CFA" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 291, + 287, + 380 + ], + "lines": [ + { + "bbox": [ + 46, + 291, + 287, + 380 + ], + "spans": [ + { + "bbox": [ + 46, + 291, + 287, + 380 + ], + "type": "text", + "content": "Figure 2. Our imaging system consists of a visible camera and a thermal camera colocated using a gold dichroic beamsplitter. The light sources are placed close to the target scene so that the rise in temperature due to light absorption is detectable in the thermal camera. All the light sources have the same emission spectrum. The relative emission spectrum of the white LED and the quantum efficiency curves of the Color Filter Array are obtained from the corresponding datasheets." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 387, + 287, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 387, + 287, + 482 + ], + "spans": [ + { + "bbox": [ + 46, + 387, + 287, + 482 + ], + "type": "text", + "content": "images in BayerRG format with a geometric progression of exposures that span " + }, + { + "bbox": [ + 46, + 387, + 287, + 482 + ], + "type": "inline_equation", + "content": "0.05\\mathrm{ms}" + }, + { + "bbox": [ + 46, + 387, + 287, + 482 + ], + "type": "text", + "content": " to over " + }, + { + "bbox": [ + 46, + 387, + 287, + 482 + ], + "type": "inline_equation", + "content": "180\\mathrm{ms}" + }, + { + "bbox": [ + 46, + 387, + 287, + 482 + ], + "type": "text", + "content": ". All the images were demosaiced using gradient-corrected linear interpolation [26] and subsequently quantized to 8-bit images. The resulting LDR images were composited into a single linear HDR image using the previously estimated camera response function. Finally, this image is warped into the perspective of the thermal camera using a homography." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 483, + 287, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 483, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 46, + 483, + 287, + 613 + ], + "type": "text", + "content": "The thermal camera is allowed to reach steady state operating temperature after powering on, which can take up to 30mins. The entire experimental setup including the target object is allowed to reach thermal equilibrium before beginning data collection. A flat field correction is performed a few seconds prior to turning on the light and a thermal video is recorded. The thermal camera is operated in the high gain state and the raw 16-bit data is captured at " + }, + { + "bbox": [ + 46, + 483, + 287, + 613 + ], + "type": "inline_equation", + "content": "60\\mathrm{Hz}" + }, + { + "bbox": [ + 46, + 483, + 287, + 613 + ], + "type": "text", + "content": ". Both the thermal images and the warped HDR image were downsized " + }, + { + "bbox": [ + 46, + 483, + 287, + 613 + ], + "type": "inline_equation", + "content": "4\\times" + }, + { + "bbox": [ + 46, + 483, + 287, + 613 + ], + "type": "text", + "content": " using local mean computation to suppress noise and alleviate errors in co-location." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "text", + "content": "Implementation Details: We manually identify the first frame when light was turned on and use the pixel-wise median of the preceding frames as the initial frame " + }, + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "inline_equation", + "content": "I_{1}" + }, + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "text", + "content": ". We use 200 frames since light was turned on for fitting the 2-parameter curve. We implement the curve fitting using gradient descent in PyTorch. We consider a 3 dimensional basis set for albedo with " + }, + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\tilde{\\rho}_b(\\lambda) = \\mathbb{I}[400\\mathrm{nm}\\leq \\lambda < 530\\mathrm{nm}]" + }, + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 73, + 465, + 224 + ], + "blocks": [ + { + "bbox": [ + 310, + 73, + 465, + 224 + ], + "lines": [ + { + "bbox": [ + 310, + 73, + 465, + 224 + ], + "spans": [ + { + "bbox": [ + 310, + 73, + 465, + 224 + ], + "type": "image", + "image_path": "14d514e0ab82c0a77f70916765292b0c25ec48a0367dc05481b04fb977b951d2.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 467, + 74, + 542, + 225 + ], + "blocks": [ + { + "bbox": [ + 467, + 74, + 542, + 225 + ], + "lines": [ + { + "bbox": [ + 467, + 74, + 542, + 225 + ], + "spans": [ + { + "bbox": [ + 467, + 74, + 542, + 225 + ], + "type": "image", + "image_path": "a1703a5c43210ea2f119a94c3be21e3429608753fb1611508ecd48597ed3c521.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "lines": [ + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "spans": [ + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "type": "text", + "content": "Figure 3. The first column shows change in intensity at " + }, + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "type": "inline_equation", + "content": "0.1s" + }, + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "type": "text", + "content": " and at " + }, + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "type": "inline_equation", + "content": "3s" + }, + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "type": "text", + "content": " after turning on the lights. Note that we process each pixel independently. Two points of significance are marked in blue and red respectively. The middle column shows the curve fitting results for the highlighted points. The input intensities are shown as dots and the estimated intensities are shown as a continuous curves. The dashed lines correspond to the steady-state intensity that would be reached and equals " + }, + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "type": "inline_equation", + "content": "c_{1}" + }, + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "type": "text", + "content": " from Eq. (10). The last column shows " + }, + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "type": "inline_equation", + "content": "c_{1}" + }, + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "type": "text", + "content": " for each pixel as a 2D image and a histogram of " + }, + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "type": "inline_equation", + "content": "c_{2}" + }, + { + "bbox": [ + 304, + 235, + 545, + 334 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "spans": [ + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "inline_equation", + "content": "\\tilde{\\rho}_g(\\lambda) = \\mathbb{I}[530\\mathrm{nm}\\leq \\lambda < 620\\mathrm{nm}]" + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "inline_equation", + "content": "\\tilde{\\rho}_r(\\lambda) = \\mathbb{I}[620\\mathrm{nm}\\leq \\lambda < 1100\\mathrm{nm}]" + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "inline_equation", + "content": "\\mathbb{I}[\\cdot ]" + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "text", + "content": " is the indicator function. Please refer to Appendix D for details of " + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_k" + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "text", + "content": ". The relative factor " + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "text", + "content": " can be calibrated for a co-located imaging system with a color chart under controlled lighting. Alternatively, " + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 304, + 339, + 545, + 422 + ], + "type": "text", + "content": " can be treated as a hyper-parameter and tuned using cross-validation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 430, + 473, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 430, + 473, + 441 + ], + "spans": [ + { + "bbox": [ + 306, + 430, + 473, + 441 + ], + "type": "text", + "content": "5.1. Heat Source Estimation results" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 449, + 545, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 545, + 545 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 545, + 545 + ], + "type": "text", + "content": "Figure 3 shows the result of our curve fitting for the wooden blocks scene. The estimated constants " + }, + { + "bbox": [ + 304, + 449, + 545, + 545 + ], + "type": "inline_equation", + "content": "c_{2}" + }, + { + "bbox": [ + 304, + 449, + 545, + 545 + ], + "type": "text", + "content": ", which is proportional to heat capacity, appear like white noise and its corresponding histogram plot resembles a Gaussian distribution. This could be due to high levels of noise in thermal videos as well as similar magnitudes of spatial variation in " + }, + { + "bbox": [ + 304, + 449, + 545, + 545 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 304, + 449, + 545, + 545 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 449, + 545, + 545 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 304, + 449, + 545, + 545 + ], + "type": "text", + "content": ". On the other hand, the estimated per-pixel constants " + }, + { + "bbox": [ + 304, + 449, + 545, + 545 + ], + "type": "inline_equation", + "content": "c_{1}" + }, + { + "bbox": [ + 304, + 449, + 545, + 545 + ], + "type": "text", + "content": " have visual similarity to a shading image, although noisy." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 552, + 491, + 564 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 552, + 491, + 564 + ], + "spans": [ + { + "bbox": [ + 306, + 552, + 491, + 564 + ], + "type": "text", + "content": "5.2. Albedo-Shading separation results" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": "Quantitative Evaluation: For comparison, we chose two methods: (i) the classical, even if dated, Retinex algorithm [23], which is well-suited for the color chart scene. (ii) Ordinal Shading [7], a SOTA learning-based approach which requires a large training dataset. We use the pretrained model here. We use the scale-invariant Mean Squared Error (si-MSE) from [19] as our metric. It is hard to obtain ground truth albedo and shading for general scenes under unknown lighting. And publicly available datasets do not have co-located thermal videos. Therefore, we first evaluate albedo using the color chart under 4 different illuminations. As shown in Fig. 4, our albedo estimates are" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "11929" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 71, + 286, + 135 + ], + "blocks": [ + { + "bbox": [ + 49, + 71, + 286, + 135 + ], + "lines": [ + { + "bbox": [ + 49, + 71, + 286, + 135 + ], + "spans": [ + { + "bbox": [ + 49, + 71, + 286, + 135 + ], + "type": "image", + "image_path": "e54b0bed28940c1da2f4742f7d969de2b28839f5e6987cc382d872e0254bbdb9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 144, + 286, + 166 + ], + "lines": [ + { + "bbox": [ + 47, + 144, + 286, + 166 + ], + "spans": [ + { + "bbox": [ + 47, + 144, + 286, + 166 + ], + "type": "text", + "content": "Figure 4. The first column is the mean value across colors (Ours: 0.020, Retinex: 0.034, Ordinal Shading: 0.080)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 55, + 172, + 281, + 283 + ], + "blocks": [ + { + "bbox": [ + 55, + 172, + 281, + 283 + ], + "lines": [ + { + "bbox": [ + 55, + 172, + 281, + 283 + ], + "spans": [ + { + "bbox": [ + 55, + 172, + 281, + 283 + ], + "type": "image", + "image_path": "da4288e7f27ce649a8b54c015ffcf88b8cbace8b0dde12496e0f68d23619fbe5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 293, + 286, + 324 + ], + "lines": [ + { + "bbox": [ + 46, + 293, + 286, + 324 + ], + "spans": [ + { + "bbox": [ + 46, + 293, + 286, + 324 + ], + "type": "text", + "content": "Figure 5. Our method operates per-pixel while other methods use hand-crafted or learnt spatial priors. Note the residual albedo in their estimated shading (images brightened for visualization)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 52, + 331, + 282, + 370 + ], + "blocks": [ + { + "bbox": [ + 52, + 331, + 282, + 370 + ], + "lines": [ + { + "bbox": [ + 52, + 331, + 282, + 370 + ], + "spans": [ + { + "bbox": [ + 52, + 331, + 282, + 370 + ], + "type": "table", + "html": "
OursRGB-RetinexOrdinal Shading
Albedo0.0840.2530.399
Shading0.00050.00300.0080
", + "image_path": "0babdced361e533b830845241e6801e159cb203d03ed76c12f6930124ebacf41.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 378, + 286, + 401 + ], + "lines": [ + { + "bbox": [ + 47, + 378, + 286, + 401 + ], + "spans": [ + { + "bbox": [ + 47, + 378, + 286, + 401 + ], + "type": "text", + "content": "Table 2. si-MSE values for Albedo and Shading using pseudo ground truth data obtained for the painted mask scene." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 48, + 407, + 286, + 475 + ], + "blocks": [ + { + "bbox": [ + 48, + 407, + 286, + 475 + ], + "lines": [ + { + "bbox": [ + 48, + 407, + 286, + 475 + ], + "spans": [ + { + "bbox": [ + 48, + 407, + 286, + 475 + ], + "type": "image", + "image_path": "3b6c77f5e2321cc61d72420270513ed39686e7763a1a31541994e090a8f41085.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 484, + 286, + 518 + ], + "lines": [ + { + "bbox": [ + 46, + 484, + 286, + 518 + ], + "spans": [ + { + "bbox": [ + 46, + 484, + 286, + 518 + ], + "type": "text", + "content": "Figure 6. (a) Curve fitting results of the same pixel for different video lengths. (b) Albedo error (against color chart ground truth) vs. length of input video." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 526, + 220, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 526, + 220, + 537 + ], + "spans": [ + { + "bbox": [ + 47, + 526, + 220, + 537 + ], + "type": "text", + "content": "significantly better than the other methods." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 539, + 286, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 539, + 286, + 598 + ], + "spans": [ + { + "bbox": [ + 46, + 539, + 286, + 598 + ], + "type": "text", + "content": "Next, we obtain pseudo-ground truth similar to [19] i.e. with a scene painted white (ground truth shading) and re-painted with texture. Fig. 5 and Tab. 2 illustrate that our method outperforms SOTA methods both qualitatively and quantitatively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "type": "text", + "content": "Ablation on length of video: Longer duration leads to higher spatial thermal gradients that induce more conduction while shorter duration has lower signal to noise ratio. As seen in Fig. 6a, the curve deviates further away from the initial measurements when using longer videos. The accuracy of the fit correlates with the accuracy of albedo estimate for the color chart (see Fig. 6b). Our experiments use 200 frames which corresponds to the green plots." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "content": "Qualitative evaluation: Figure 7 summarizes the albedo shading separation results for the four target scenes. As shown in the first two rows, we are given a HDR image from the visible camera and the corresponding absorbed light intensity is estimated from a thermal video using curve fitting as discussed earlier. And the last two rows show results that validate Eqs. (30) which are derived for general functions of albedo and camera response with wavelength." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 168, + 545, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 168, + 545, + 322 + ], + "spans": [ + { + "bbox": [ + 304, + 168, + 545, + 322 + ], + "type": "text", + "content": "In the first scene, the interior of a mask is painted with white and black acrylic paints and the line light is directed at the portion of the image painted white. As highlighted in the callout, the concave portion corresponding to nose appears flat in the estimated albedo image for both the monochrome and RGB cases. Note that the temperature of the background wall does not raise sufficiently in all of the scenes, which makes it challenging for our approach. The thick wall would also have a high heat capacity which exacerbates the challenge. In the second scene, a cardboard sheet with printing on one side is folded to resemble the shape of W. The inner V groove would have inter-reflections while the outer faces are convex." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 323, + 545, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 323, + 545, + 467 + ], + "spans": [ + { + "bbox": [ + 304, + 323, + 545, + 467 + ], + "type": "text", + "content": "In the third scene, a collection of solid colored wooden blocks are stacked into a complex geometry with both cast and attached shadows. This result indirectly shows that ignoring heat conduction for solid objects still allows one to recover the absorbed light intensity precisely. In the final scene, we use a stack of disks made of soft plastic. Different patterns are embossed onto the circumference of the disk. As highlighted in the callout, the shape information corresponding to the embossing is correctly separated into the shading term while the albedo term appears flat. These results demonstrate the broad applicability of our theory to everyday scenes with complex shapes and illumination." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 480, + 545, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 480, + 545, + 577 + ], + "spans": [ + { + "bbox": [ + 304, + 480, + 545, + 577 + ], + "type": "text", + "content": "Grayscale approximation: Fig. 8 shows the estimated albedo and shading using grayscale approximation (Eqs. (16) and (17)). Recall that the grayscale approximation does not require knowledge of the emission spectrum of the light sources and the estimated shading is similar to that using Eqs (30). The monochrome image is approximated by taking the mean value across color channels. Corresponding results for all the scenes are provided in Appendix E." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 586, + 378, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 586, + 378, + 597 + ], + "spans": [ + { + "bbox": [ + 306, + 586, + 378, + 597 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "type": "text", + "content": "This paper studies the theoretical connection between light transport in visible spectrum, heat transport in solids and light transport in the thermal infrared spectrum. We proved that having an estimate of absorbed light turns single image intrinsic image decomposition into a well-posed problem for arbitrary shape and illumination for lambertian scenes. To estimate absorbed light, we derive an analytical expression for surfaces with negligible heat conduction by modeling heat transport immediately after turning on illumination." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11930" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 87, + 196, + 175 + ], + "blocks": [ + { + "bbox": [ + 115, + 77, + 167, + 86 + ], + "lines": [ + { + "bbox": [ + 115, + 77, + 167, + 86 + ], + "spans": [ + { + "bbox": [ + 115, + 77, + 167, + 86 + ], + "type": "text", + "content": "Mask Interior" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 70, + 87, + 196, + 175 + ], + "lines": [ + { + "bbox": [ + 70, + 87, + 196, + 175 + ], + "spans": [ + { + "bbox": [ + 70, + 87, + 196, + 175 + ], + "type": "image", + "image_path": "748958ee1572981de832371edede1391bb1170343ed8ef197ff522880b248763.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 202, + 87, + 312, + 175 + ], + "blocks": [ + { + "bbox": [ + 231, + 76, + 283, + 86 + ], + "lines": [ + { + "bbox": [ + 231, + 76, + 283, + 86 + ], + "spans": [ + { + "bbox": [ + 231, + 76, + 283, + 86 + ], + "type": "text", + "content": "W-Cardboard" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 202, + 87, + 312, + 175 + ], + "lines": [ + { + "bbox": [ + 202, + 87, + 312, + 175 + ], + "spans": [ + { + "bbox": [ + 202, + 87, + 312, + 175 + ], + "type": "image", + "image_path": "4e656e075a1fe093ebb4783dd16ff7451ffc74806375925846c3f5ccc66ab178.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 318, + 87, + 429, + 175 + ], + "blocks": [ + { + "bbox": [ + 343, + 76, + 404, + 86 + ], + "lines": [ + { + "bbox": [ + 343, + 76, + 404, + 86 + ], + "spans": [ + { + "bbox": [ + 343, + 76, + 404, + 86 + ], + "type": "text", + "content": "Wooden Blocks" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 318, + 87, + 429, + 175 + ], + "lines": [ + { + "bbox": [ + 318, + 87, + 429, + 175 + ], + "spans": [ + { + "bbox": [ + 318, + 87, + 429, + 175 + ], + "type": "image", + "image_path": "24473dc35157725c072e77362f948c7e610aa888c288fa0cbd01765990685544.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 435, + 88, + 547, + 175 + ], + "blocks": [ + { + "bbox": [ + 471, + 76, + 508, + 87 + ], + "lines": [ + { + "bbox": [ + 471, + 76, + 508, + 87 + ], + "spans": [ + { + "bbox": [ + 471, + 76, + 508, + 87 + ], + "type": "text", + "content": "Soft Toys" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 435, + 88, + 547, + 175 + ], + "lines": [ + { + "bbox": [ + 435, + 88, + 547, + 175 + ], + "spans": [ + { + "bbox": [ + 435, + 88, + 547, + 175 + ], + "type": "image", + "image_path": "59403cee9a05a9d1b5f000a02153f471102c5d9d04da1a6c5747e15ecfb83c8b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 86, + 181, + 196, + 269 + ], + "blocks": [ + { + "bbox": [ + 86, + 181, + 196, + 269 + ], + "lines": [ + { + "bbox": [ + 86, + 181, + 196, + 269 + ], + "spans": [ + { + "bbox": [ + 86, + 181, + 196, + 269 + ], + "type": "image", + "image_path": "fa75343ba09fc64648728a7764a4a8010a8060fa86089e6dabe7fddde88b8897.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 203, + 181, + 312, + 269 + ], + "blocks": [ + { + "bbox": [ + 203, + 181, + 312, + 269 + ], + "lines": [ + { + "bbox": [ + 203, + 181, + 312, + 269 + ], + "spans": [ + { + "bbox": [ + 203, + 181, + 312, + 269 + ], + "type": "image", + "image_path": "d54763d13abba4ef5a1087bbc8f29013ed9bb6e751fe53569cfd864d59913795.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 318, + 181, + 429, + 269 + ], + "blocks": [ + { + "bbox": [ + 318, + 181, + 429, + 269 + ], + "lines": [ + { + "bbox": [ + 318, + 181, + 429, + 269 + ], + "spans": [ + { + "bbox": [ + 318, + 181, + 429, + 269 + ], + "type": "image", + "image_path": "4e8ebc5500c6d741f4aecd220a1d4e17cd4211a255027a793baff16ba3173f63.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 435, + 181, + 546, + 269 + ], + "blocks": [ + { + "bbox": [ + 435, + 181, + 546, + 269 + ], + "lines": [ + { + "bbox": [ + 435, + 181, + 546, + 269 + ], + "spans": [ + { + "bbox": [ + 435, + 181, + 546, + 269 + ], + "type": "image", + "image_path": "aa8901c22158f65472bc64582400167696fcac57b92866e1c41d70fa17670608.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 70, + 273, + 196, + 360 + ], + "blocks": [ + { + "bbox": [ + 52, + 167, + 63, + 435 + ], + "lines": [ + { + "bbox": [ + 52, + 167, + 63, + 435 + ], + "spans": [ + { + "bbox": [ + 52, + 167, + 63, + 435 + ], + "type": "text", + "content": "RGB Intrinsic Image Decomposition Input" + } + ] + } + ], + "index": 0, + "angle": 270, + "type": "image_caption" + }, + { + "bbox": [ + 70, + 273, + 196, + 360 + ], + "lines": [ + { + "bbox": [ + 70, + 273, + 196, + 360 + ], + "spans": [ + { + "bbox": [ + 70, + 273, + 196, + 360 + ], + "type": "image", + "image_path": "c2ecb21f35cde1e9af35ff684b2d79c03a800cd5f25d3e72b40e88291fcb2b5f.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 203, + 272, + 312, + 360 + ], + "blocks": [ + { + "bbox": [ + 203, + 272, + 312, + 360 + ], + "lines": [ + { + "bbox": [ + 203, + 272, + 312, + 360 + ], + "spans": [ + { + "bbox": [ + 203, + 272, + 312, + 360 + ], + "type": "image", + "image_path": "d31d8e94c7be6eb65300047a9764c4cb7d5b8be03c7268a97a301159848d3906.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 318, + 272, + 429, + 361 + ], + "blocks": [ + { + "bbox": [ + 318, + 272, + 429, + 361 + ], + "lines": [ + { + "bbox": [ + 318, + 272, + 429, + 361 + ], + "spans": [ + { + "bbox": [ + 318, + 272, + 429, + 361 + ], + "type": "image", + "image_path": "5019005503701ca292aaacb0ba9709ad400d1a452bc2d4c0c0495cfef8e551aa.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 435, + 273, + 546, + 361 + ], + "blocks": [ + { + "bbox": [ + 435, + 273, + 546, + 361 + ], + "lines": [ + { + "bbox": [ + 435, + 273, + 546, + 361 + ], + "spans": [ + { + "bbox": [ + 435, + 273, + 546, + 361 + ], + "type": "image", + "image_path": "4e5642cb804e94c47e5f5663aa97b3882d2736a95d70c7d0d3f5c5a3d0f7b85c.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 71, + 368, + 196, + 454 + ], + "blocks": [ + { + "bbox": [ + 71, + 368, + 196, + 454 + ], + "lines": [ + { + "bbox": [ + 71, + 368, + 196, + 454 + ], + "spans": [ + { + "bbox": [ + 71, + 368, + 196, + 454 + ], + "type": "image", + "image_path": "d1f4ac47963f96280d53698a210a512ed88435543e72ed4d8f0a8862a09fdd1c.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 203, + 368, + 312, + 454 + ], + "blocks": [ + { + "bbox": [ + 203, + 368, + 312, + 454 + ], + "lines": [ + { + "bbox": [ + 203, + 368, + 312, + 454 + ], + "spans": [ + { + "bbox": [ + 203, + 368, + 312, + 454 + ], + "type": "image", + "image_path": "8a74eea425aad3e8e20075c4c5061b772f76a7a6dd0c76304a031aeb3dd58a97.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 319, + 368, + 429, + 454 + ], + "blocks": [ + { + "bbox": [ + 319, + 368, + 429, + 454 + ], + "lines": [ + { + "bbox": [ + 319, + 368, + 429, + 454 + ], + "spans": [ + { + "bbox": [ + 319, + 368, + 429, + 454 + ], + "type": "image", + "image_path": "4f00928a1256f4043eb97a011313e612a83b072fc9c5c321c0f47acf372292bc.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 435, + 368, + 546, + 454 + ], + "blocks": [ + { + "bbox": [ + 435, + 368, + 546, + 454 + ], + "lines": [ + { + "bbox": [ + 435, + 368, + 546, + 454 + ], + "spans": [ + { + "bbox": [ + 435, + 368, + 546, + 454 + ], + "type": "image", + "image_path": "5890a8cb26d8397bc5329cd9f2f4d468603b7e2e0b618f281581ef259eca8fbe.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 50, + 516, + 287, + 620 + ], + "blocks": [ + { + "bbox": [ + 46, + 467, + 546, + 514 + ], + "lines": [ + { + "bbox": [ + 46, + 467, + 546, + 514 + ], + "spans": [ + { + "bbox": [ + 46, + 467, + 546, + 514 + ], + "type": "text", + "content": "Figure 7. The first row shows the HDR visible image (brightened for visualization). Note that the colorchart is not an input to our method. The second row shows the estimated heat source intensity (turbo colormap) obtained using the method in Sec. 3. The last two rows correspond to solving Eqs. (30) using non-negative least squares method. The estimated albedo is clipped to the range [0, 1]. The callouts for the visible image, heat source intensity, and shading are normalized individually to aid visualization." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 50, + 516, + 287, + 620 + ], + "lines": [ + { + "bbox": [ + 50, + 516, + 287, + 620 + ], + "spans": [ + { + "bbox": [ + 50, + 516, + 287, + 620 + ], + "type": "image", + "image_path": "51a8753d21f4a0b6f7943684a6d624e1749425dd6e510f62df44b2da8b099270.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 628, + 287, + 651 + ], + "lines": [ + { + "bbox": [ + 46, + 628, + 287, + 651 + ], + "spans": [ + { + "bbox": [ + 46, + 628, + 287, + 651 + ], + "type": "text", + "content": "Figure 8. Albedo-Shading result for the soft toys scene using the grayscale approximation." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "content": "Experiments showed that albedo and shading can be measured from a single view given a visible image and a short thermal video from a co-located imaging system." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 518, + 547, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 518, + 547, + 627 + ], + "spans": [ + { + "bbox": [ + 304, + 518, + 547, + 627 + ], + "type": "text", + "content": "Just like we have shown an example of how modeling heat transport can help solve challenges in visible light transport, we believe research in visible light transport can help Infrared Thermography by improving accuracy of temperature measurement or observing heat transfer within inhomogeneous surfaces. Extending our theory to the full light transport, including general BRDFs, translucent materials and subsurface scattering are just a few of the exciting new directions that this research opens up." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 306, + 643, + 409, + 657 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 643, + 409, + 657 + ], + "spans": [ + { + "bbox": [ + 306, + 643, + 409, + 657 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": "This work was partly supported by NSF grants IIS-2107236, CCF-1730147, and NSF-NIFA AI Institute for Resilient Agriculture. The authors would like to thank Mark Sheinin for helpful discussions." + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11931" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "text", + "content": "[1] Jonathan T Barron and Jitendra Malik. Shape, illumination, and reflectance from shading. IEEE transactions on pattern analysis and machine intelligence, 37(8):1670-1687, 2014. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 136, + 287, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 136, + 287, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 136, + 287, + 190 + ], + "type": "text", + "content": "[2] Anil S Baslamisli, Hoang-An Le, and Theo Gevers. Cnn based learning using reflection and retina models for intrinsic image decomposition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6674-6683, 2018. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 191, + 287, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 191, + 287, + 223 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 287, + 223 + ], + "type": "text", + "content": "[3] Peter N Belhumeur, David J Kriegman, and Alan L Yuille. The bas-relief ambiguity. International journal of computer vision, 35(1):33-44, 1999. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 224, + 287, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 224, + 287, + 256 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 287, + 256 + ], + "type": "text", + "content": "[4] Sean Bell, Kavita Bala, and Noah Snavely. Intrinsic images in the wild. ACM Transactions on Graphics (TOG), 33(4): 1-12, 2014. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 258, + 287, + 279 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 287, + 279 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 287, + 279 + ], + "type": "text", + "content": "[5] Theodore L. Bergman. Introduction to Heat Transfer. Wiley, 2011. 1, 2, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 281, + 287, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 281, + 287, + 323 + ], + "spans": [ + { + "bbox": [ + 53, + 281, + 287, + 323 + ], + "type": "text", + "content": "[6] Nicolas Bonneel, Balazs Kovacs, Sylvain Paris, and Kavita Bala. Intrinsic decompositions for image editing. In Computer Graphics Forum, pages 593-609. Wiley Online Library, 2017. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 325, + 287, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 325, + 287, + 357 + ], + "spans": [ + { + "bbox": [ + 53, + 325, + 287, + 357 + ], + "type": "text", + "content": "[7] Chris Careaga and Yaqiz Aksoy. Intrinsic image decomposition via ordinal shading. ACM Transactions on Graphics, 43 (1):1-24, 2023. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 358, + 287, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 358, + 287, + 401 + ], + "spans": [ + { + "bbox": [ + 53, + 358, + 287, + 401 + ], + "type": "text", + "content": "[8] Robert Carroll, Ravi Ramamoorthi, and Maneesh Agrawala. Illumination decomposition for material recoloring with consistent interreflections. ACM Trans. Graph., 30(4):43, 2011. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 403, + 287, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 403, + 287, + 446 + ], + "spans": [ + { + "bbox": [ + 53, + 403, + 287, + 446 + ], + "type": "text", + "content": "[9] Jason Chang, Randi Cabezas, and John W Fisher III. Bayesian nonparametric intrinsic image decomposition. In European conference on computer vision, pages 704-719. Springer, 2014. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 448, + 287, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 448, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 48, + 448, + 287, + 491 + ], + "type": "text", + "content": "[10] Hamilton Y Chong, Steven J Gortler, and Todd Zickler. The von kries hypothesis and a basis for color constancy. In 2007 IEEE 11th International Conference on Computer Vision, pages 1-8. IEEE, 2007. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 492, + 287, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 492, + 287, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 492, + 287, + 557 + ], + "type": "text", + "content": "[11] Aniket Dashpute, Vishwanath Saragadam, Emma Alexander, Florian Willomitzer, Aggelos Katsaggelos, Ashok Veeraraghavan, and Oliver Cossairt. Thermal spread functions (tsf): Physics-guided material classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1641-1650, 2023. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 559, + 287, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 287, + 613 + ], + "type": "text", + "content": "[12] Paul E. Debevec and Jitendra Malik. Recovering high dynamic range radiance maps from photographs. In Proceedings of the 24th Annual Conference on Computer Graphics and Interactive Techniques, page 369-378, USA, 1997. ACM Press/Addison-Wesley Publishing Co. 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 614, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 287, + 668 + ], + "type": "text", + "content": "[13] Sylvain Duchéne, Clement Riant, Gaurav Chaurasia, Jorge Lopez-Moreno, Pierre-Yves Laffont, Stefan Popov, Adrien Bousseau, and George Drettakis. Multi-view intrinsic images of outdoors scenes with an application to relighting. ACM Transactions on Graphics, page 16, 2015. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "text", + "content": "[14] Graham D Finlayson, Mark S Drew, and Brian V Funt. Color constancy: enhancing von kries adaption via sensor transformations. In Human Vision, Visual Processing, and Digital Display IV, pages 473-484, 1993. 5" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 115 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 115 + ], + "type": "text", + "content": "[15] Graham D Finlayson, Mark S Drew, and Brian V Funt. Diagonal transforms suffice for color constancy. In IEEE International Conference on Computer Vision, pages 164-171, 1993." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 118, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 161 + ], + "type": "text", + "content": "[16] Graham Fyffe, Xueming Yu, and Paul Debevec. Single-shot photometric stereo by spectral multiplexing. In IEEE International Conference on Computational Photography (ICCP), 2011. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 163, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 194 + ], + "type": "text", + "content": "[17] Rikke Gade and Thomas B Moeslund. Thermal cameras and applications: a survey. Machine vision and applications, 25: 245-262, 2014. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 197, + 545, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 545, + 240 + ], + "type": "text", + "content": "[18] Elena Garces, Carlos Rodriguez-Pardo, Dan Casas, and Jorge Lopez-Moreno. A survey on intrinsic images: Delving deep into lambert and beyond. International Journal of Computer Vision, 130(3):836-868, 2022. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 241, + 545, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 545, + 296 + ], + "type": "text", + "content": "[19] Roger Grosse, Micah K Johnson, Edward H Adelson, and William T Freeman. Ground truth dataset and baseline evaluations for intrinsic image algorithms. In 2009 IEEE 12th International Conference on Computer Vision, pages 2335-2342. IEEE, 2009. 6, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 297, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 297, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 297, + 545, + 319 + ], + "type": "text", + "content": "[20] Berthold KP Horn and Michael J Brooks. Shape from shading. MIT press, 1989. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 321, + 545, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 321, + 545, + 374 + ], + "spans": [ + { + "bbox": [ + 307, + 321, + 545, + 374 + ], + "type": "text", + "content": "[21] Zhuo Hui, Kalyan Sunkavalli, Sunil Hadap, and Aswin C Sankaranarayanan. Illuminant spectra-based source separation using flash photography. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6209-6218, 2018. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 376, + 545, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 376, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 545, + 431 + ], + "type": "text", + "content": "[22] Soonmin Hwang, Jaesik Park, Namil Kim, Yukyung Choi, and In So Kweon. Multispectral pedestrian detection: Benchmark dataset and baseline. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1037-1045, 2015. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 433, + 545, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 433, + 545, + 454 + ], + "spans": [ + { + "bbox": [ + 307, + 433, + 545, + 454 + ], + "type": "text", + "content": "[23] Edwin H Land and John J McCann. Lightness and retinae theory. Josa, 61(1):1-11, 1971. 1, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 456, + 545, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 545, + 488 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 545, + 488 + ], + "type": "text", + "content": "[24] Qiao Liu, Zhenyu He, Xin Li, and Yuan Zheng. Ptb-tir: A thermal infrared pedestrian tracking benchmark. IEEE Transactions on Multimedia, 22(3):666-675, 2019. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 490, + 545, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 490, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 307, + 490, + 545, + 521 + ], + "type": "text", + "content": "[25] Yawen Lu and Guoyu Lu. Superthermal: Matching thermal as visible through thermal feature exploration. IEEE Robotics and Automation Letters, 6(2):2690-2697, 2021. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 523, + 545, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 523, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 307, + 523, + 545, + 578 + ], + "type": "text", + "content": "[26] Henrique S Malvar, Li-wei He, and Ross Cutler. High-quality linear interpolation for demosaicing of bayer-patterned color images. In 2004 IEEE International Conference on Acoustics, Speech, and Signal Processing, pages iii-485. IEEE, 2004. 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 579, + 545, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 579, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 307, + 579, + 545, + 611 + ], + "type": "text", + "content": "[27] Shree K Nayar, Katsushi Ikeuchi, and Takeo Kanade. Shape from interreflections. International Journal of Computer Vision, 6:173-195, 1991. 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 613, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 613, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 307, + 613, + 545, + 656 + ], + "type": "text", + "content": "[28] Manikandasriram Srinivasan Ramanagopal, Zixu Zhang, Ram Vasudevan, and Matthew Johnson-Roberson. Pixelwise motion deblurring of thermal videos. arXiv preprint arXiv:2006.04973, 2020. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 658, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 713 + ], + "type": "text", + "content": "[29] Vishwanath Saragadam, Akshit Dave, Ashok Veeraraghavan, and Richard G. Baraniuk. Thermal image processing via physics-inspired deep networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops, pages 4057-4065, 2021. 3" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "11932" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 453 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[30] Steven M Seitz, Yasuyuki Matsushita, and Kiriakos N Kutulakos. A theory of inverse light transport. In Tenth IEEE International Conference on Computer Vision (ICCV'05) Volume 1, pages 1440-1447. IEEE, 2005. 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 286, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 286, + 148 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 286, + 148 + ], + "type": "text", + "content": "[31] Steven A Shafer. Using color to separate reflection components. Color Research & Application, 10(4):210-218, 1985. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 152, + 286, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 152, + 286, + 217 + ], + "spans": [ + { + "bbox": [ + 49, + 152, + 286, + 217 + ], + "type": "text", + "content": "[32] Kenichiro Tanaka, Nobuhiro Ikeya, Tsuyoshi Takatani, Hiroyuki Kubo, Takuya Funatomi, Vijay Ravi, Achuta Kadambi, and Yasuhiro Mukaigawa. Time-resolved far infrared light transport decomposition for thermal photometric stereo. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(6):2075-2085, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 219, + 286, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 219, + 286, + 282 + ], + "spans": [ + { + "bbox": [ + 49, + 219, + 286, + 282 + ], + "type": "text", + "content": "[33] Ayush Tewari, Ohad Fried, Justus Thies, Vincent Sitzmann, Stephen Lombardi, Kalyan Sunkavalli, Ricardo MartinBrualla, Tomas Simon, Jason Saragih, Matthias Nießner, et al. State of the art on neural rendering. In Computer Graphics Forum, pages 701-727. Wiley Online Library, 2020. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 285, + 286, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 285, + 286, + 317 + ], + "spans": [ + { + "bbox": [ + 49, + 285, + 286, + 317 + ], + "type": "text", + "content": "[34] Michael Vollmer and Klaus-Peter Mollmann. Fundamentals of Infrared Thermal Imaging, chapter 1, pages 1-106. John Wiley & Sons, Ltd, 2017. 1, 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 319, + 286, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 319, + 286, + 350 + ], + "spans": [ + { + "bbox": [ + 49, + 319, + 286, + 350 + ], + "type": "text", + "content": "[35] Robert J Woodham. Photometric method for determining surface orientation from multiple images. Optical engineering, 19(1):139-144, 1980. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 353, + 286, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 353, + 286, + 407 + ], + "spans": [ + { + "bbox": [ + 49, + 353, + 286, + 407 + ], + "type": "text", + "content": "[36] Pengyu Zhang, Jie Zhao, Dong Wang, Huchuan Lu, and Xiang Ruan. Visible-thermal uav tracking: A large-scale benchmark and new baseline. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8886–8895, 2022. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 409, + 286, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 409, + 286, + 453 + ], + "spans": [ + { + "bbox": [ + 49, + 409, + 286, + 453 + ], + "type": "text", + "content": "[37] Xingchen Zhang, Ping Ye, and Gang Xiao. Vifb: A visible and infrared image fusion benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 104-105, 2020. 2" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "11933" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file